Add 'sdk/java-v2/' from commit '55f103e336ca9fb8bf1720d2ef4ee8dd4e221118'
authorPeter Amstutz <pamstutz@veritasgenetics.com>
Thu, 14 Mar 2019 14:11:26 +0000 (10:11 -0400)
committerPeter Amstutz <pamstutz@veritasgenetics.com>
Thu, 14 Mar 2019 14:13:05 +0000 (10:13 -0400)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz@veritasgenetics.com>

git-subtree-dir: sdk/java-v2
git-subtree-mainline: 89c5953f15ff025971e465c86eb6d129ff0a63f9
git-subtree-split: 55f103e336ca9fb8bf1720d2ef4ee8dd4e221118

2927 files changed:
.gitignore
.licenseignore
AUTHORS [new file with mode: 0644]
COPYING
Makefile [new file with mode: 0644]
README.md
apps/workbench/.gitignore [new file with mode: 0644]
apps/workbench/Gemfile [new file with mode: 0644]
apps/workbench/Gemfile.lock [new file with mode: 0644]
apps/workbench/README.textile [new file with mode: 0644]
apps/workbench/Rakefile [new file with mode: 0644]
apps/workbench/app/assets/images/dax.png [new file with mode: 0644]
apps/workbench/app/assets/images/mouse-move.gif [new file with mode: 0644]
apps/workbench/app/assets/images/pipeline-running.gif [new file with mode: 0644]
apps/workbench/app/assets/images/rails.png [new file with mode: 0644]
apps/workbench/app/assets/images/spinner_32px.gif [new file with mode: 0644]
apps/workbench/app/assets/images/trash-icon.png [new file with mode: 0644]
apps/workbench/app/assets/javascripts/add_group.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/add_repository.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/ajax_error.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/angular_shim.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/application.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/arvados_client.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/bootstrap.js.coffee [new file with mode: 0644]
apps/workbench/app/assets/javascripts/collections.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/components/date.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/components/edit_tags.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/components/save_ui_state.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/components/search.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/components/sessions.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/components/test.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/dates.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/edit_collection.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/editable.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/event_log.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/filterable.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/infinite_scroll.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/job_log_graph.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/keep_disks.js.coffee [new file with mode: 0644]
apps/workbench/app/assets/javascripts/link_to_remote.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/list.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/log_viewer.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/mithril_mount.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/modal_pager.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/models/loader.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/models/session_db.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/permission_toggle.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/pipeline_instances.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/report_issue.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/request_shell_access.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/select_modal.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/selection.js.erb [new file with mode: 0644]
apps/workbench/app/assets/javascripts/sizing.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/tab_panes.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/to_tsquery.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/upload_to_collection.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/user_agreements.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/users.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/work_unit_component.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/work_unit_log.js [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/api_client_authorizations.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/application.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/authorized_keys.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/badges.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/cards.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/collections.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/groups.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/humans.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/job_tasks.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/jobs.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/keep_disks.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/links.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/loading.css.scss.erb [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/log_viewer.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/logs.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/nodes.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/pipeline_instances.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/pipeline_templates.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/projects.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/repositories.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/sb-admin.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/scaffolds.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/select_modal.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/sessions.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/specimens.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/traits.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/user_agreements.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/users.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/virtual_machines.css.scss [new file with mode: 0644]
apps/workbench/app/controllers/actions_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/api_client_authorizations_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/application_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/authorized_keys_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/collections_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/container_requests_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/containers_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/groups_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/healthcheck_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/humans_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/job_tasks_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/jobs_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/keep_disks_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/keep_services_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/links_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/logs_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/nodes_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/pipeline_instances_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/pipeline_templates_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/projects_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/repositories_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/search_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/sessions_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/specimens_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/status_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/tests_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/traits_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/trash_items_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/user_agreements_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/users_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/virtual_machines_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/websocket_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/work_unit_templates_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/work_units_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/workflows_controller.rb [new file with mode: 0644]
apps/workbench/app/helpers/application_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/arvados_api_client_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/collections_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/pipeline_components_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/pipeline_instances_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/provenance_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/version_helper.rb [new file with mode: 0644]
apps/workbench/app/mailers/.gitkeep [new file with mode: 0644]
apps/workbench/app/mailers/issue_reporter.rb [new file with mode: 0644]
apps/workbench/app/mailers/request_shell_access_reporter.rb [new file with mode: 0644]
apps/workbench/app/models/.gitkeep [new file with mode: 0644]
apps/workbench/app/models/api_client_authorization.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_api_client.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_base.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_resource_list.rb [new file with mode: 0644]
apps/workbench/app/models/authorized_key.rb [new file with mode: 0644]
apps/workbench/app/models/collection.rb [new file with mode: 0644]
apps/workbench/app/models/container.rb [new file with mode: 0644]
apps/workbench/app/models/container_request.rb [new file with mode: 0644]
apps/workbench/app/models/container_work_unit.rb [new file with mode: 0644]
apps/workbench/app/models/group.rb [new file with mode: 0644]
apps/workbench/app/models/human.rb [new file with mode: 0644]
apps/workbench/app/models/job.rb [new file with mode: 0644]
apps/workbench/app/models/job_task.rb [new file with mode: 0644]
apps/workbench/app/models/job_task_work_unit.rb [new file with mode: 0644]
apps/workbench/app/models/job_work_unit.rb [new file with mode: 0644]
apps/workbench/app/models/keep_disk.rb [new file with mode: 0644]
apps/workbench/app/models/keep_service.rb [new file with mode: 0644]
apps/workbench/app/models/link.rb [new file with mode: 0644]
apps/workbench/app/models/log.rb [new file with mode: 0644]
apps/workbench/app/models/node.rb [new file with mode: 0644]
apps/workbench/app/models/pipeline_instance.rb [new file with mode: 0644]
apps/workbench/app/models/pipeline_instance_work_unit.rb [new file with mode: 0644]
apps/workbench/app/models/pipeline_template.rb [new file with mode: 0644]
apps/workbench/app/models/proxy_work_unit.rb [new file with mode: 0644]
apps/workbench/app/models/repository.rb [new file with mode: 0644]
apps/workbench/app/models/specimen.rb [new file with mode: 0644]
apps/workbench/app/models/trait.rb [new file with mode: 0644]
apps/workbench/app/models/user.rb [new file with mode: 0644]
apps/workbench/app/models/user_agreement.rb [new file with mode: 0644]
apps/workbench/app/models/virtual_machine.rb [new file with mode: 0644]
apps/workbench/app/models/work_unit.rb [new file with mode: 0644]
apps/workbench/app/models/workflow.rb [new file with mode: 0644]
apps/workbench/app/views/api_client_authorizations/_show_help.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/404.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/404.json.erb [new file with mode: 0644]
apps/workbench/app/views/application/_arvados_attr_value.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_arvados_object.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_arvados_object_attr.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_breadcrumb_page_name.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_breadcrumbs.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_browser_unsupported.html [new file with mode: 0644]
apps/workbench/app/views/application/_choose.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_choose.js.erb [new file with mode: 0644]
apps/workbench/app/views/application/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_content.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_content_layout.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_delete_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_extra_tab_line_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_index.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_job_progress.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_loading.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_loading_modal.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_name_and_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_object_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_object_name.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_paging.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_pipeline_progress.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_pipeline_status_label.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_projects_tree_menu.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_report_error.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_report_issue_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_selection_checkbox.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_api_response.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_cli_example.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_curl_example.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_metadata.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_python_example.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_api.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_attributes.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_autoselect_text.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_home_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_sharing.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_star.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_text_with_locators.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_svg_div.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_tab_line_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_title_and_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/api_error.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/api_error.json.erb [new file with mode: 0644]
apps/workbench/app/views/application/destroy.js.erb [new file with mode: 0644]
apps/workbench/app/views/application/error.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/error.json.erb [new file with mode: 0644]
apps/workbench/app/views/application/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/report_issue_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/application/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/star.js.erb [new file with mode: 0644]
apps/workbench/app/views/authorized_keys/create.js.erb [new file with mode: 0644]
apps/workbench/app/views/authorized_keys/edit.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_choose.js.erb [new symlink]
apps/workbench/app/views/collections/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_extra_tab_line_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_index_tbody.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_sharing_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_chooser_preview.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_files.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_provenance_graph.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_source_summary.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_tags.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_upload.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_used_by.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/graph.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/hash_matches.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/index.js.erb [new file with mode: 0644]
apps/workbench/app/views/collections/sharing_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/collections/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/show_file_links.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_extra_tab_line_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_name_and_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_inputs.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_provenance.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_recent_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_show_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/_state_label.html.erb [new file with mode: 0644]
apps/workbench/app/views/container_requests/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/containers/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/containers/_show_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/getting_started/_getting_started_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/groups/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/groups/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/issue_reporter/send_report.text.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_rerun_job_with_options_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_details.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_job_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_provenance.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/keep_disks/_content_layout.html.erb [new file with mode: 0644]
apps/workbench/app/views/layouts/application.html.erb [new file with mode: 0644]
apps/workbench/app/views/layouts/body.html.erb [new file with mode: 0644]
apps/workbench/app/views/links/_breadcrumb_page_name.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_collections_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_jobs_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_pipelines_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_ssh_key_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_component_labels.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_running_component.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_compare.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components_editable.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components_json.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_graph.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_recent_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/compare.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/show.js.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_choose.js.erb [new symlink]
apps/workbench/app/views/pipeline_templates/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_attributes.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_chooser_preview.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_components.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_pipelines.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_choose.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_choose.js.erb [new symlink]
apps/workbench/app/views/projects/_compute_node_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_compute_node_summary.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_index_jobs_and_pipelines.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_index_projects.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_contents_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_dashboard.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_data_collections.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_featured.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_other_objects.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_pipeline_templates.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_processes.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_subprojects.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_tab_contents.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_workflows.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/public.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/remove_items.js.erb [new file with mode: 0644]
apps/workbench/app/views/projects/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/tab_counts.js.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/_add_repository_modal.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/_repository_breadcrumbs.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/_show_help.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/_show_repositories.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/_show_repositories_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/show_blob.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/show_commit.html.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/show_tree.html.erb [new file with mode: 0644]
apps/workbench/app/views/request_shell_access_reporter/send_request.text.erb [new file with mode: 0644]
apps/workbench/app/views/search/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/search/index.html [new file with mode: 0644]
apps/workbench/app/views/sessions/index.html [new file with mode: 0644]
apps/workbench/app/views/sessions/logged_out.html.erb [new file with mode: 0644]
apps/workbench/app/views/tests/mithril.html [new file with mode: 0644]
apps/workbench/app/views/trash_items/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/trash_items/_show_trash_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/trash_items/_show_trashed_collection_rows.html.erb [new symlink]
apps/workbench/app/views/trash_items/_show_trashed_collections.html.erb [new file with mode: 0644]
apps/workbench/app/views/trash_items/_show_trashed_project_rows.html.erb [new symlink]
apps/workbench/app/views/trash_items/_show_trashed_projects.html.erb [new file with mode: 0644]
apps/workbench/app/views/trash_items/_untrash_item.html.erb [new file with mode: 0644]
apps/workbench/app/views/trash_items/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/trash_items/untrash_items.js.erb [new file with mode: 0644]
apps/workbench/app/views/user_agreements/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_add_group_modal.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_add_ssh_key_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_current_token.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_home.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_setup_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_show_activity.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_show_admin.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_ssh_keys.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_tables.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_virtual_machines.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/activity.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/add_ssh_key.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/add_ssh_key_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/current_token.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/home.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/home.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/inactive.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/link_account.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/profile.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/request_shell_access.js [new file with mode: 0644]
apps/workbench/app/views/users/setup.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/setup_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/ssh_keys.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/storage.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/virtual_machines.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/welcome.html.erb [new file with mode: 0644]
apps/workbench/app/views/virtual_machines/_show_help.html.erb [new file with mode: 0644]
apps/workbench/app/views/virtual_machines/webshell.html.erb [new file with mode: 0644]
apps/workbench/app/views/websocket/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_component_detail.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_progress.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_all_processes.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_all_processes_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_child.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_component.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_log_link.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_output.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_outputs.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/_show_table_data.html.erb [new file with mode: 0644]
apps/workbench/app/views/work_units/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/workflows/_show_chooser_preview.html.erb [new file with mode: 0644]
apps/workbench/app/views/workflows/_show_definition.html.erb [new file with mode: 0644]
apps/workbench/app/views/workflows/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/workflows/show.html.erb [new file with mode: 0644]
apps/workbench/config.ru [new file with mode: 0644]
apps/workbench/config/application.default.yml [new file with mode: 0644]
apps/workbench/config/application.rb [new file with mode: 0644]
apps/workbench/config/application.yml.example [new file with mode: 0644]
apps/workbench/config/boot.rb [new file with mode: 0644]
apps/workbench/config/database.yml [new file with mode: 0644]
apps/workbench/config/environment.rb [new file with mode: 0644]
apps/workbench/config/environments/development.rb.example [new file with mode: 0644]
apps/workbench/config/environments/production.rb.example [new file with mode: 0644]
apps/workbench/config/environments/test.rb [new symlink]
apps/workbench/config/environments/test.rb.example [new file with mode: 0644]
apps/workbench/config/initializers/backtrace_silencers.rb [new file with mode: 0644]
apps/workbench/config/initializers/inflections.rb [new file with mode: 0644]
apps/workbench/config/initializers/lograge.rb [new file with mode: 0644]
apps/workbench/config/initializers/mime_types.rb [new file with mode: 0644]
apps/workbench/config/initializers/rack_mini_profile.rb [new file with mode: 0644]
apps/workbench/config/initializers/redcloth.rb [new file with mode: 0644]
apps/workbench/config/initializers/secret_token.rb.example [new file with mode: 0644]
apps/workbench/config/initializers/session_store.rb [new file with mode: 0644]
apps/workbench/config/initializers/time_format.rb [new file with mode: 0644]
apps/workbench/config/initializers/validate_wb2_url_config.rb [new file with mode: 0644]
apps/workbench/config/initializers/wrap_parameters.rb [new file with mode: 0644]
apps/workbench/config/load_config.rb [new file with mode: 0644]
apps/workbench/config/locales/en.bootstrap.yml [new file with mode: 0644]
apps/workbench/config/locales/en.yml [new file with mode: 0644]
apps/workbench/config/piwik.yml.example [new file with mode: 0644]
apps/workbench/config/routes.rb [new file with mode: 0644]
apps/workbench/db/schema.rb [new file with mode: 0644]
apps/workbench/db/seeds.rb [new file with mode: 0644]
apps/workbench/fpm-info.sh [new file with mode: 0644]
apps/workbench/lib/app_version.rb [new file with mode: 0644]
apps/workbench/lib/assets/.gitkeep [new file with mode: 0644]
apps/workbench/lib/config_validators.rb [new file with mode: 0644]
apps/workbench/lib/tasks/.gitkeep [new file with mode: 0644]
apps/workbench/lib/tasks/config_check.rake [new file with mode: 0644]
apps/workbench/lib/tasks/config_dump.rake [new file with mode: 0644]
apps/workbench/log/.gitkeep [new file with mode: 0644]
apps/workbench/npm_packages [new file with mode: 0644]
apps/workbench/public/404.html [new file with mode: 0644]
apps/workbench/public/422.html [new file with mode: 0644]
apps/workbench/public/500.html [new file with mode: 0644]
apps/workbench/public/browser_unsupported.js [new file with mode: 0644]
apps/workbench/public/d3.v3.min.js [new file with mode: 0644]
apps/workbench/public/favicon.ico [new file with mode: 0644]
apps/workbench/public/graph-example.html [new file with mode: 0644]
apps/workbench/public/robots.txt [new file with mode: 0644]
apps/workbench/public/vocabulary-example.json [new file with mode: 0644]
apps/workbench/public/webshell/README [new file with mode: 0644]
apps/workbench/public/webshell/enabled.gif [new file with mode: 0644]
apps/workbench/public/webshell/keyboard.html [new file with mode: 0644]
apps/workbench/public/webshell/keyboard.png [new file with mode: 0644]
apps/workbench/public/webshell/shell_in_a_box.js [new file with mode: 0644]
apps/workbench/public/webshell/styles.css [new file with mode: 0644]
apps/workbench/script/rails [new file with mode: 0755]
apps/workbench/test/controllers/actions_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/api_client_authorizations_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/application_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/authorized_keys_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/collections_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/container_requests_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/containers_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/disabled_api_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/groups_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/healthcheck_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/humans_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/job_tasks_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/jobs_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/keep_disks_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/links_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/logs_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/nodes_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/pipeline_instances_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/pipeline_templates_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/projects_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/repositories_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/search_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/sessions_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/specimens_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/traits_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/trash_items_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/user_agreements_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/users_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/virtual_machines_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/work_units_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/workflows_controller_test.rb [new file with mode: 0644]
apps/workbench/test/diagnostics/container_request_test.rb [new file with mode: 0644]
apps/workbench/test/diagnostics/pipeline_test.rb [new file with mode: 0644]
apps/workbench/test/diagnostics_test_helper.rb [new file with mode: 0644]
apps/workbench/test/fixtures/.gitkeep [new file with mode: 0644]
apps/workbench/test/helpers/collections_helper_test.rb [new file with mode: 0644]
apps/workbench/test/helpers/download_helper.rb [new file with mode: 0644]
apps/workbench/test/helpers/fake_websocket_helper.rb [new file with mode: 0644]
apps/workbench/test/helpers/manifest_examples.rb [new symlink]
apps/workbench/test/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
apps/workbench/test/helpers/repository_stub_helper.rb [new file with mode: 0644]
apps/workbench/test/helpers/search_helper_test.rb [new file with mode: 0644]
apps/workbench/test/helpers/share_object_helper.rb [new file with mode: 0644]
apps/workbench/test/helpers/time_block.rb [new symlink]
apps/workbench/test/integration/.gitkeep [new file with mode: 0644]
apps/workbench/test/integration/ajax_errors_test.rb [new file with mode: 0644]
apps/workbench/test/integration/anonymous_access_test.rb [new file with mode: 0644]
apps/workbench/test/integration/application_layout_test.rb [new file with mode: 0644]
apps/workbench/test/integration/browser_unsupported_test.rb [new file with mode: 0644]
apps/workbench/test/integration/collection_upload_test.rb [new file with mode: 0644]
apps/workbench/test/integration/collections_test.rb [new file with mode: 0644]
apps/workbench/test/integration/container_requests_test.rb [new file with mode: 0644]
apps/workbench/test/integration/download_test.rb [new file with mode: 0644]
apps/workbench/test/integration/errors_test.rb [new file with mode: 0644]
apps/workbench/test/integration/filterable_infinite_scroll_test.rb [new file with mode: 0644]
apps/workbench/test/integration/integration_test_utils.rb [new file with mode: 0644]
apps/workbench/test/integration/jobs_test.rb [new file with mode: 0644]
apps/workbench/test/integration/link_account_test.rb [new file with mode: 0644]
apps/workbench/test/integration/logins_test.rb [new file with mode: 0644]
apps/workbench/test/integration/pipeline_instances_test.rb [new file with mode: 0644]
apps/workbench/test/integration/pipeline_templates_test.rb [new file with mode: 0644]
apps/workbench/test/integration/projects_test.rb [new file with mode: 0644]
apps/workbench/test/integration/report_issue_test.rb [new file with mode: 0644]
apps/workbench/test/integration/repositories_browse_test.rb [new file with mode: 0644]
apps/workbench/test/integration/repositories_test.rb [new file with mode: 0644]
apps/workbench/test/integration/search_box_test.rb [new file with mode: 0644]
apps/workbench/test/integration/smoke_test.rb [new file with mode: 0644]
apps/workbench/test/integration/trash_test.rb [new file with mode: 0644]
apps/workbench/test/integration/user_agreements_test.rb [new file with mode: 0644]
apps/workbench/test/integration/user_profile_test.rb [new file with mode: 0644]
apps/workbench/test/integration/user_settings_menu_test.rb [new file with mode: 0644]
apps/workbench/test/integration/users_test.rb [new file with mode: 0644]
apps/workbench/test/integration/virtual_machines_test.rb [new file with mode: 0644]
apps/workbench/test/integration/websockets_test.rb [new file with mode: 0644]
apps/workbench/test/integration/work_units_test.rb [new file with mode: 0644]
apps/workbench/test/integration_helper.rb [new file with mode: 0644]
apps/workbench/test/integration_performance/collection_unit_test.rb [new file with mode: 0644]
apps/workbench/test/integration_performance/collections_controller_test.rb [new file with mode: 0644]
apps/workbench/test/integration_performance/collections_perf_test.rb [new file with mode: 0644]
apps/workbench/test/performance/browsing_test.rb [new file with mode: 0644]
apps/workbench/test/performance_test_helper.rb [new file with mode: 0644]
apps/workbench/test/support/fake_websocket.js [new file with mode: 0644]
apps/workbench/test/support/remove_file_api.js [new file with mode: 0644]
apps/workbench/test/test_helper.rb [new file with mode: 0644]
apps/workbench/test/unit/.gitkeep [new file with mode: 0644]
apps/workbench/test/unit/arvados_api_client_test.rb [new file with mode: 0644]
apps/workbench/test/unit/arvados_base_test.rb [new file with mode: 0644]
apps/workbench/test/unit/arvados_resource_list_test.rb [new file with mode: 0644]
apps/workbench/test/unit/collection_test.rb [new file with mode: 0644]
apps/workbench/test/unit/disabled_api_test.rb [new file with mode: 0644]
apps/workbench/test/unit/group_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/api_client_authorizations_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/authorized_keys_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/collections_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/groups_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/humans_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/job_tasks_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/jobs_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/keep_disks_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/links_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/logs_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/nodes_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/pipeline_templates_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/projects_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/repositories_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/sessions_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/specimens_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/traits_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/user_agreements_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/users_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/virtual_machines_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/job_test.rb [new file with mode: 0644]
apps/workbench/test/unit/link_test.rb [new file with mode: 0644]
apps/workbench/test/unit/pipeline_instance_test.rb [new file with mode: 0644]
apps/workbench/test/unit/repository_test.rb [new file with mode: 0644]
apps/workbench/test/unit/user_test.rb [new file with mode: 0644]
apps/workbench/test/unit/work_unit_test.rb [new file with mode: 0644]
apps/workbench/vendor/assets/javascripts/.gitkeep [new file with mode: 0644]
apps/workbench/vendor/assets/javascripts/jquery.number.min.js [new file with mode: 0644]
apps/workbench/vendor/assets/stylesheets/.gitkeep [new file with mode: 0644]
apps/workbench/vendor/plugins/.gitkeep [new file with mode: 0644]
build/README [new file with mode: 0644]
build/build-dev-docker-jobs-image.sh [new file with mode: 0755]
build/check-copyright-notices [new file with mode: 0755]
build/create-plot-data-from-log.sh [new file with mode: 0755]
build/go-python-package-scripts/postinst [new file with mode: 0755]
build/go-python-package-scripts/prerm [new file with mode: 0755]
build/libcloud-pin.sh [new file with mode: 0644]
build/package-build-dockerfiles/.gitignore [new file with mode: 0644]
build/package-build-dockerfiles/Makefile [new file with mode: 0644]
build/package-build-dockerfiles/README [new file with mode: 0644]
build/package-build-dockerfiles/build-all-build-containers.sh [new file with mode: 0755]
build/package-build-dockerfiles/centos7/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/debian8/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/debian9/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/ubuntu1404/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/ubuntu1604/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/ubuntu1804/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/.gitignore [new file with mode: 0644]
build/package-test-dockerfiles/Makefile [new file with mode: 0644]
build/package-test-dockerfiles/README [new file with mode: 0644]
build/package-test-dockerfiles/centos7/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/centos7/localrepo.repo [new file with mode: 0644]
build/package-test-dockerfiles/debian8/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/debian9/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1404/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1604/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1604/etc-apt-preferences.d-arvados [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1804/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados [new file with mode: 0644]
build/package-testing/common-test-packages.sh [new file with mode: 0755]
build/package-testing/common-test-rails-server-package.sh [new file with mode: 0755]
build/package-testing/deb-common-test-packages.sh [new file with mode: 0755]
build/package-testing/rpm-common-test-packages.sh [new file with mode: 0755]
build/package-testing/test-package-arvados-api-server.sh [new symlink]
build/package-testing/test-package-arvados-node-manager.sh [new file with mode: 0755]
build/package-testing/test-package-arvados-sso-server.sh [new symlink]
build/package-testing/test-package-arvados-workbench.sh [new symlink]
build/package-testing/test-package-python-arvados-cwl-runner.sh [new symlink]
build/package-testing/test-package-python-arvados-fuse.sh [new symlink]
build/package-testing/test-package-python-arvados-python-client.sh [new symlink]
build/package-testing/test-package-python-cwltest.sh [new symlink]
build/package-testing/test-package-python27-python-arvados-cwl-runner.sh [new file with mode: 0755]
build/package-testing/test-package-python27-python-arvados-fuse.sh [new file with mode: 0755]
build/package-testing/test-package-python27-python-arvados-python-client.sh [new file with mode: 0755]
build/package-testing/test-package-python27-python-cwltest.sh [new file with mode: 0755]
build/package-testing/test-packages-centos7.sh [new symlink]
build/package-testing/test-packages-debian8.sh [new symlink]
build/package-testing/test-packages-debian9.sh [new symlink]
build/package-testing/test-packages-ubuntu1404.sh [new symlink]
build/package-testing/test-packages-ubuntu1604.sh [new symlink]
build/package-testing/test-packages-ubuntu1804.sh [new symlink]
build/rails-package-scripts/README.md [new file with mode: 0644]
build/rails-package-scripts/arvados-api-server.sh [new file with mode: 0644]
build/rails-package-scripts/arvados-sso-server.sh [new file with mode: 0644]
build/rails-package-scripts/arvados-workbench.sh [new file with mode: 0644]
build/rails-package-scripts/postinst.sh [new file with mode: 0644]
build/rails-package-scripts/postrm.sh [new file with mode: 0644]
build/rails-package-scripts/prerm.sh [new file with mode: 0644]
build/rails-package-scripts/step2.sh [new file with mode: 0644]
build/run-build-docker-images.sh [new file with mode: 0755]
build/run-build-docker-jobs-image.sh [new file with mode: 0755]
build/run-build-packages-all-targets.sh [new file with mode: 0755]
build/run-build-packages-one-target.sh [new file with mode: 0755]
build/run-build-packages-python-and-ruby.sh [new file with mode: 0755]
build/run-build-packages-sso.sh [new file with mode: 0755]
build/run-build-packages.sh [new file with mode: 0755]
build/run-build-test-packages-one-target.sh [new file with mode: 0755]
build/run-library.sh [new file with mode: 0755]
build/run-tests.sh [new file with mode: 0755]
cc-by-sa-3.0.txt [new file with mode: 0644]
cmd/arvados-client/.gitignore [new file with mode: 0644]
cmd/arvados-client/cmd.go [new file with mode: 0644]
cmd/arvados-client/cmd_test.go [new file with mode: 0644]
cmd/arvados-server/arvados-controller.service [new file with mode: 0644]
cmd/arvados-server/arvados-dispatch-cloud.service [new file with mode: 0644]
cmd/arvados-server/cmd.go [new file with mode: 0644]
crunch_scripts/GATK2-VariantFiltration [new file with mode: 0755]
crunch_scripts/GATK2-bqsr [new file with mode: 0755]
crunch_scripts/GATK2-merge-call [new file with mode: 0755]
crunch_scripts/GATK2-realign [new file with mode: 0755]
crunch_scripts/arvados-bcbio-nextgen.py [new file with mode: 0755]
crunch_scripts/arvados_bwa.py [new file with mode: 0644]
crunch_scripts/arvados_gatk2.py [new file with mode: 0644]
crunch_scripts/arvados_ipc.py [new file with mode: 0644]
crunch_scripts/arvados_picard.py [new file with mode: 0644]
crunch_scripts/arvados_samtools.py [new file with mode: 0644]
crunch_scripts/bwa-aln [new file with mode: 0755]
crunch_scripts/bwa-index [new file with mode: 0755]
crunch_scripts/collection-merge [new file with mode: 0755]
crunch_scripts/crunchrunner [new file with mode: 0755]
crunch_scripts/crunchutil/__init__.py [new file with mode: 0644]
crunch_scripts/crunchutil/robust_put.py [new file with mode: 0644]
crunch_scripts/crunchutil/subst.py [new file with mode: 0644]
crunch_scripts/crunchutil/vwd.py [new file with mode: 0644]
crunch_scripts/cwl-runner [new file with mode: 0755]
crunch_scripts/decompress-all.py [new file with mode: 0755]
crunch_scripts/file-select [new file with mode: 0755]
crunch_scripts/grep [new file with mode: 0755]
crunch_scripts/hash [new file with mode: 0755]
crunch_scripts/pgp-survey-import [new file with mode: 0755]
crunch_scripts/pgp-survey-parse [new file with mode: 0755]
crunch_scripts/picard-gatk2-prep [new file with mode: 0755]
crunch_scripts/pyrtg.py [new file with mode: 0644]
crunch_scripts/rtg-fasta2sdf [new file with mode: 0755]
crunch_scripts/rtg-fastq2sdf [new file with mode: 0755]
crunch_scripts/rtg-map [new file with mode: 0755]
crunch_scripts/rtg-snp [new file with mode: 0755]
crunch_scripts/run-command [new file with mode: 0755]
crunch_scripts/split-fastq.py [new file with mode: 0755]
crunch_scripts/test/task_output_dir [new file with mode: 0755]
doc/Gemfile [new file with mode: 0644]
doc/Gemfile.lock [new file with mode: 0644]
doc/README.textile [new file with mode: 0644]
doc/Rakefile [new file with mode: 0644]
doc/_config.yml [new file with mode: 0644]
doc/_includes/_0_filter_py.liquid [new file with mode: 0644]
doc/_includes/_alert-incomplete.liquid [new file with mode: 0644]
doc/_includes/_alert_stub.liquid [new file with mode: 0644]
doc/_includes/_arv_copy_expectations.liquid [new file with mode: 0644]
doc/_includes/_arv_run_redirection.liquid [new file with mode: 0644]
doc/_includes/_compute_ping_rb.liquid [new file with mode: 0644]
doc/_includes/_concurrent_hash_script_py.liquid [new file with mode: 0644]
doc/_includes/_container_runtime_constraints.liquid [new file with mode: 0644]
doc/_includes/_container_scheduling_parameters.liquid [new file with mode: 0644]
doc/_includes/_create_superuser_token.liquid [new file with mode: 0644]
doc/_includes/_crunch1only_begin.liquid [new file with mode: 0644]
doc/_includes/_crunch1only_end.liquid [new file with mode: 0644]
doc/_includes/_example_docker.liquid [new file with mode: 0644]
doc/_includes/_example_sdk_go.liquid [new file with mode: 0644]
doc/_includes/_federated_cwl.liquid [new symlink]
doc/_includes/_install_compute_docker.liquid [new file with mode: 0644]
doc/_includes/_install_compute_fuse.liquid [new file with mode: 0644]
doc/_includes/_install_debian_key.liquid [new file with mode: 0644]
doc/_includes/_install_docker_cleaner.liquid [new file with mode: 0644]
doc/_includes/_install_git.liquid [new file with mode: 0644]
doc/_includes/_install_git_curl.liquid [new file with mode: 0644]
doc/_includes/_install_postgres_database.liquid [new file with mode: 0644]
doc/_includes/_install_rails_command.liquid [new file with mode: 0644]
doc/_includes/_install_rails_reconfigure.liquid [new file with mode: 0644]
doc/_includes/_install_redhat_key.liquid [new file with mode: 0644]
doc/_includes/_install_ruby_and_bundler.liquid [new file with mode: 0644]
doc/_includes/_install_runit.liquid [new file with mode: 0644]
doc/_includes/_mount_types.liquid [new file with mode: 0644]
doc/_includes/_navbar_left.liquid [new file with mode: 0644]
doc/_includes/_navbar_top.liquid [new file with mode: 0644]
doc/_includes/_note_python_sc.liquid [new file with mode: 0644]
doc/_includes/_notebox_begin.liquid [new file with mode: 0644]
doc/_includes/_notebox_begin_warning.liquid [new file with mode: 0644]
doc/_includes/_notebox_end.liquid [new file with mode: 0644]
doc/_includes/_pipeline_deprecation_notice.liquid [new file with mode: 0644]
doc/_includes/_run_command_foreach_example.liquid [new file with mode: 0644]
doc/_includes/_run_command_simple_example.liquid [new file with mode: 0644]
doc/_includes/_run_md5sum_py.liquid [new file with mode: 0644]
doc/_includes/_shards_yml.liquid [new symlink]
doc/_includes/_ssh_addkey.liquid [new file with mode: 0644]
doc/_includes/_ssh_intro.liquid [new file with mode: 0644]
doc/_includes/_tutorial_bwa_sortsam_pipeline.liquid [new file with mode: 0644]
doc/_includes/_tutorial_cluster_name.liquid [new file with mode: 0644]
doc/_includes/_tutorial_expectations.liquid [new file with mode: 0644]
doc/_includes/_tutorial_expectations_workstation.liquid [new file with mode: 0644]
doc/_includes/_tutorial_git_repo_expectations.liquid [new file with mode: 0644]
doc/_includes/_tutorial_hash_script_py.liquid [new file with mode: 0644]
doc/_includes/_tutorial_submit_job.liquid [new file with mode: 0644]
doc/_includes/_webring.liquid [new file with mode: 0644]
doc/_includes/_what_is_cwl.liquid [new file with mode: 0644]
doc/_layouts/default.html.liquid [new file with mode: 0644]
doc/admin/activation.html.textile.liquid [new file with mode: 0644]
doc/admin/collection-versioning.html.textile.liquid [new file with mode: 0644]
doc/admin/federation.html.textile.liquid [new file with mode: 0644]
doc/admin/health-checks.html.textile.liquid [new file with mode: 0644]
doc/admin/index.html.textile.liquid [new file with mode: 0644]
doc/admin/management-token.html.textile.liquid [new file with mode: 0644]
doc/admin/merge-remote-account.html.textile.liquid [new file with mode: 0644]
doc/admin/metrics.html.textile.liquid [new file with mode: 0644]
doc/admin/migrating-providers.html.textile.liquid [new file with mode: 0644]
doc/admin/spot-instances.html.textile.liquid [new file with mode: 0644]
doc/admin/storage-classes.html.textile.liquid [new file with mode: 0644]
doc/admin/upgrade-crunch2.html.textile.liquid [new file with mode: 0644]
doc/admin/upgrading.html.textile.liquid [new file with mode: 0644]
doc/api/crunch-scripts.html.textile.liquid [new file with mode: 0644]
doc/api/execution.html.textile.liquid [new file with mode: 0644]
doc/api/index.html.textile.liquid [new file with mode: 0644]
doc/api/methods.html.textile.liquid [new file with mode: 0644]
doc/api/methods/api_client_authorizations.html.textile.liquid [new file with mode: 0644]
doc/api/methods/api_clients.html.textile.liquid [new file with mode: 0644]
doc/api/methods/authorized_keys.html.textile.liquid [new file with mode: 0644]
doc/api/methods/collections.html.textile.liquid [new file with mode: 0644]
doc/api/methods/container_requests.html.textile.liquid [new file with mode: 0644]
doc/api/methods/containers.html.textile.liquid [new file with mode: 0644]
doc/api/methods/groups.html.textile.liquid [new file with mode: 0644]
doc/api/methods/humans.html.textile.liquid [new file with mode: 0644]
doc/api/methods/job_tasks.html.textile.liquid [new file with mode: 0644]
doc/api/methods/jobs.html.textile.liquid [new file with mode: 0644]
doc/api/methods/keep_disks.html.textile.liquid [new file with mode: 0644]
doc/api/methods/keep_services.html.textile.liquid [new file with mode: 0644]
doc/api/methods/links.html.textile.liquid [new file with mode: 0644]
doc/api/methods/logs.html.textile.liquid [new file with mode: 0644]
doc/api/methods/nodes.html.textile.liquid [new file with mode: 0644]
doc/api/methods/pipeline_instances.html.textile.liquid [new file with mode: 0644]
doc/api/methods/pipeline_templates.html.textile.liquid [new file with mode: 0644]
doc/api/methods/repositories.html.textile.liquid [new file with mode: 0644]
doc/api/methods/specimens.html.textile.liquid [new file with mode: 0644]
doc/api/methods/traits.html.textile.liquid [new file with mode: 0644]
doc/api/methods/users.html.textile.liquid [new file with mode: 0644]
doc/api/methods/virtual_machines.html.textile.liquid [new file with mode: 0644]
doc/api/methods/workflows.html.textile.liquid [new file with mode: 0644]
doc/api/permission-model.html.textile.liquid [new file with mode: 0644]
doc/api/requests.html.textile.liquid [new file with mode: 0644]
doc/api/resources.html.textile.liquid [new file with mode: 0644]
doc/api/storage.html.textile.liquid [new file with mode: 0644]
doc/api/tokens.html.textile.liquid [new file with mode: 0644]
doc/architecture/Arvados_arch.odg [new file with mode: 0644]
doc/architecture/Arvados_federation.odg [new file with mode: 0644]
doc/architecture/federation.html.textile.liquid [new file with mode: 0644]
doc/architecture/index.html.textile.liquid [new file with mode: 0644]
doc/css/badges.css [new file with mode: 0644]
doc/css/bootstrap-theme.css [new file with mode: 0644]
doc/css/bootstrap-theme.css.map [new file with mode: 0644]
doc/css/bootstrap-theme.min.css [new file with mode: 0644]
doc/css/bootstrap.css [new file with mode: 0644]
doc/css/bootstrap.css.map [new file with mode: 0644]
doc/css/bootstrap.min.css [new file with mode: 0644]
doc/css/button-override.css [new file with mode: 0644]
doc/css/carousel-override.css [new file with mode: 0644]
doc/css/code.css [new file with mode: 0644]
doc/css/font-awesome.css [new file with mode: 0644]
doc/css/images.css [new file with mode: 0644]
doc/css/nav-list.css [new file with mode: 0644]
doc/examples/pipeline_templates/gatk-exome-fq-snp.json [new file with mode: 0644]
doc/examples/pipeline_templates/rtg-fq-snp.json [new file with mode: 0644]
doc/examples/ruby/list-active-nodes.rb [new file with mode: 0755]
doc/fonts/FontAwesome.otf [new file with mode: 0644]
doc/fonts/fontawesome-webfont.eot [new file with mode: 0755]
doc/fonts/fontawesome-webfont.svg [new file with mode: 0755]
doc/fonts/fontawesome-webfont.ttf [new file with mode: 0755]
doc/fonts/fontawesome-webfont.woff [new file with mode: 0755]
doc/fonts/glyphicons-halflings-regular.eot [new file with mode: 0644]
doc/fonts/glyphicons-halflings-regular.svg [new file with mode: 0644]
doc/fonts/glyphicons-halflings-regular.ttf [new file with mode: 0644]
doc/fonts/glyphicons-halflings-regular.woff [new file with mode: 0644]
doc/gen_api_method_docs.py [new file with mode: 0755]
doc/gen_api_schema_docs.py [new file with mode: 0755]
doc/images/Arvados_Permissions.svg [new file with mode: 0644]
doc/images/Arvados_arch.svg [new file with mode: 0644]
doc/images/Crunch_dispatch.svg [new file with mode: 0644]
doc/images/Keep_manifests.svg [new file with mode: 0644]
doc/images/Keep_reading_writing_block.svg [new file with mode: 0644]
doc/images/Keep_rendezvous_hashing.svg [new file with mode: 0644]
doc/images/Session_Establishment.svg [new file with mode: 0644]
doc/images/add-new-repository.png [new file with mode: 0644]
doc/images/added-new-repository.png [new file with mode: 0644]
doc/images/api-token-host.png [new file with mode: 0644]
doc/images/arvados_federation.svg [new file with mode: 0644]
doc/images/dax-reading-book.png [new file with mode: 0644]
doc/images/dax.png [new file with mode: 0644]
doc/images/doc-bg.jpg [new file with mode: 0644]
doc/images/download-shared-collection.png [new file with mode: 0644]
doc/images/favicon.ico [new file with mode: 0644]
doc/images/files-uploaded.png [new file with mode: 0644]
doc/images/glyphicons-halflings-white.png [new file with mode: 0644]
doc/images/glyphicons-halflings.png [new file with mode: 0644]
doc/images/keyfeatures/chooseinputs.png [new file with mode: 0644]
doc/images/keyfeatures/collectionpage.png [new file with mode: 0644]
doc/images/keyfeatures/dashboard2.png [new file with mode: 0644]
doc/images/keyfeatures/graph.png [new file with mode: 0644]
doc/images/keyfeatures/log.png [new file with mode: 0644]
doc/images/keyfeatures/provenance.png [new file with mode: 0644]
doc/images/keyfeatures/rerun.png [new file with mode: 0644]
doc/images/keyfeatures/running2.png [new file with mode: 0644]
doc/images/keyfeatures/shared.png [new file with mode: 0644]
doc/images/keyfeatures/webupload.png [new file with mode: 0644]
doc/images/publicproject/collection-files.png [new file with mode: 0644]
doc/images/publicproject/collection-graph.png [new file with mode: 0644]
doc/images/publicproject/collection-show.png [new file with mode: 0644]
doc/images/publicproject/collections.png [new file with mode: 0644]
doc/images/publicproject/description.png [new file with mode: 0644]
doc/images/publicproject/instance-advanced.png [new file with mode: 0644]
doc/images/publicproject/instance-components.png [new file with mode: 0644]
doc/images/publicproject/instance-graph.png [new file with mode: 0644]
doc/images/publicproject/instance-job.png [new file with mode: 0644]
doc/images/publicproject/instance-log.png [new file with mode: 0644]
doc/images/publicproject/instance-show.png [new file with mode: 0644]
doc/images/publicproject/instances.png [new file with mode: 0644]
doc/images/quickstart/1.png [new file with mode: 0644]
doc/images/quickstart/2.png [new file with mode: 0644]
doc/images/quickstart/3.png [new file with mode: 0644]
doc/images/quickstart/4.png [new file with mode: 0644]
doc/images/quickstart/5.png [new file with mode: 0644]
doc/images/quickstart/6.png [new file with mode: 0644]
doc/images/quickstart/7.png [new file with mode: 0644]
doc/images/repositories-panel.png [new file with mode: 0644]
doc/images/shared-collection.png [new file with mode: 0644]
doc/images/ssh-adding-public-key.png [new file with mode: 0644]
doc/images/trash-button-topnav.png [new file with mode: 0644]
doc/images/upload-tab-in-new-collection.png [new file with mode: 0644]
doc/images/upload-using-workbench.png [new file with mode: 0644]
doc/images/uses/choosefiles.png [new file with mode: 0644]
doc/images/uses/gotohome.png [new file with mode: 0644]
doc/images/uses/rename.png [new file with mode: 0644]
doc/images/uses/shared.png [new file with mode: 0644]
doc/images/uses/sharedsubdirs.png [new file with mode: 0644]
doc/images/uses/sharing.png [new file with mode: 0644]
doc/images/uses/uploaddata.png [new file with mode: 0644]
doc/images/uses/uploading.png [new file with mode: 0644]
doc/images/vm-access-with-webshell.png [new file with mode: 0644]
doc/images/workbench-dashboard.png [new file with mode: 0644]
doc/images/workbench-move-selected.png [new file with mode: 0644]
doc/index.html.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes-GKE.html.textile.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes-minikube.html.textile.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes.html.textile.liquid [new file with mode: 0644]
doc/install/arvbox.html.textile.liquid [new file with mode: 0644]
doc/install/cheat_sheet.html.textile.liquid [new file with mode: 0644]
doc/install/client.html.textile.liquid [new file with mode: 0644]
doc/install/configure-azure-blob-storage.html.textile.liquid [new file with mode: 0644]
doc/install/configure-fs-storage.html.textile.liquid [new file with mode: 0644]
doc/install/configure-s3-object-storage.html.textile.liquid [new file with mode: 0644]
doc/install/copy_pipeline_from_curoverse.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-compute-node.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-dispatch.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-prerequisites.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-slurm.html.textile.liquid [new file with mode: 0644]
doc/install/crunch2-slurm/install-test.html.textile.liquid [new file with mode: 0644]
doc/install/index.html.textile.liquid [new file with mode: 0644]
doc/install/install-api-server.html.textile.liquid [new file with mode: 0644]
doc/install/install-arv-git-httpd.html.textile.liquid [new file with mode: 0644]
doc/install/install-components.html.textile.liquid [new file with mode: 0644]
doc/install/install-composer.html.textile.liquid [new file with mode: 0644]
doc/install/install-compute-node.html.textile.liquid [new file with mode: 0644]
doc/install/install-compute-ping.html.textile.liquid [new file with mode: 0644]
doc/install/install-controller.html.textile.liquid [new file with mode: 0644]
doc/install/install-crunch-dispatch.html.textile.liquid [new file with mode: 0644]
doc/install/install-keep-balance.html.textile.liquid [new file with mode: 0644]
doc/install/install-keep-web.html.textile.liquid [new file with mode: 0644]
doc/install/install-keepproxy.html.textile.liquid [new file with mode: 0644]
doc/install/install-keepstore.html.textile.liquid [new file with mode: 0644]
doc/install/install-manual-overview.html.textile.liquid [new file with mode: 0644]
doc/install/install-manual-prerequisites.html.textile.liquid [new file with mode: 0644]
doc/install/install-nodemanager.html.textile.liquid [new file with mode: 0644]
doc/install/install-postgresql.html.textile.liquid [new file with mode: 0644]
doc/install/install-shell-server.html.textile.liquid [new file with mode: 0644]
doc/install/install-sso.html.textile.liquid [new file with mode: 0644]
doc/install/install-workbench-app.html.textile.liquid [new file with mode: 0644]
doc/install/install-ws.html.textile.liquid [new file with mode: 0644]
doc/install/migrate-docker19.html.textile.liquid [new file with mode: 0644]
doc/install/pre-built-docker.html.textile.liquid [new file with mode: 0644]
doc/js/bootstrap.js [new file with mode: 0644]
doc/js/bootstrap.min.js [new file with mode: 0644]
doc/js/jquery.min.js [new file with mode: 0644]
doc/sdk/cli/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/install.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/reference.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/subcommands.html.textile.liquid [new file with mode: 0644]
doc/sdk/go/example.html.textile.liquid [new file with mode: 0644]
doc/sdk/go/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/java/example.html.textile.liquid [new file with mode: 0644]
doc/sdk/java/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/perl/example.html.textile.liquid [new file with mode: 0644]
doc/sdk/perl/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/arvados-fuse.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/cookbook.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/crunch-utility-libraries.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/events.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/example.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/python.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/sdk-python.html.textile.liquid [new file with mode: 0644]
doc/sdk/ruby/example.html.textile.liquid [new file with mode: 0644]
doc/sdk/ruby/index.html.textile.liquid [new file with mode: 0644]
doc/start/getting_started/firstpipeline.html.textile.liquid [new file with mode: 0644]
doc/start/getting_started/nextsteps.html.textile.liquid [new file with mode: 0644]
doc/start/getting_started/publicproject.html.textile.liquid [new file with mode: 0644]
doc/start/getting_started/sharedata.html.textile.liquid [new file with mode: 0644]
doc/start/index.html.textile.liquid [new file with mode: 0644]
doc/user/composer/c1.png [new file with mode: 0644]
doc/user/composer/c10.png [new file with mode: 0644]
doc/user/composer/c11.png [new file with mode: 0644]
doc/user/composer/c12.png [new file with mode: 0644]
doc/user/composer/c13.png [new file with mode: 0644]
doc/user/composer/c14.png [new file with mode: 0644]
doc/user/composer/c15.png [new file with mode: 0644]
doc/user/composer/c16.png [new file with mode: 0644]
doc/user/composer/c17.png [new file with mode: 0644]
doc/user/composer/c18.png [new file with mode: 0644]
doc/user/composer/c19.png [new file with mode: 0644]
doc/user/composer/c2.png [new file with mode: 0644]
doc/user/composer/c20.png [new file with mode: 0644]
doc/user/composer/c21.png [new file with mode: 0644]
doc/user/composer/c22.png [new file with mode: 0644]
doc/user/composer/c23.png [new file with mode: 0644]
doc/user/composer/c24.png [new file with mode: 0644]
doc/user/composer/c2b.png [new file with mode: 0644]
doc/user/composer/c2c.png [new file with mode: 0644]
doc/user/composer/c3.png [new file with mode: 0644]
doc/user/composer/c4.png [new file with mode: 0644]
doc/user/composer/c5.png [new file with mode: 0644]
doc/user/composer/c6.png [new file with mode: 0644]
doc/user/composer/c7.png [new file with mode: 0644]
doc/user/composer/c8.png [new file with mode: 0644]
doc/user/composer/c9.png [new file with mode: 0644]
doc/user/composer/composer.html.textile.liquid [new file with mode: 0644]
doc/user/copying/LICENSE-2.0.html [new file with mode: 0644]
doc/user/copying/agpl-3.0.html [new file with mode: 0644]
doc/user/copying/by-sa-3.0.html [new file with mode: 0644]
doc/user/copying/copying.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/bwa-mem/bwa-mem-input-local.yml [new file with mode: 0755]
doc/user/cwl/bwa-mem/bwa-mem-input.yml [new file with mode: 0755]
doc/user/cwl/bwa-mem/bwa-mem-template.yml [new file with mode: 0644]
doc/user/cwl/bwa-mem/bwa-mem.cwl [new file with mode: 0755]
doc/user/cwl/cwl-extensions.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/cwl-run-options.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/cwl-runner.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/cwl-style.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/federated-workflow.odg [new file with mode: 0644]
doc/user/cwl/federated-workflow.svg [new file with mode: 0644]
doc/user/cwl/federated-workflows.html.textile.liquid [new file with mode: 0644]
doc/user/cwl/federated/cat.cwl [new file with mode: 0644]
doc/user/cwl/federated/federated.cwl [new file with mode: 0644]
doc/user/cwl/federated/file-on-clsr1.dat [new file with mode: 0644]
doc/user/cwl/federated/file-on-clsr2.dat [new file with mode: 0644]
doc/user/cwl/federated/file-on-clsr3.dat [new file with mode: 0644]
doc/user/cwl/federated/md5sum.cwl [new file with mode: 0644]
doc/user/cwl/federated/shards.yml [new file with mode: 0644]
doc/user/examples/crunch-examples.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/check-environment.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/community.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/ssh-access-unix.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/ssh-access-windows.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/vm-login-with-webshell.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/workbench.html.textile.liquid [new file with mode: 0644]
doc/user/index.html.textile.liquid [new file with mode: 0644]
doc/user/reference/api-tokens.html.textile.liquid [new file with mode: 0644]
doc/user/reference/cookbook.html.textile.liquid [new file with mode: 0644]
doc/user/reference/job-pipeline-ref.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arv-copy.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arv-docker.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arv-run.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arv-web.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arvados-sync-groups.html.textile.liquid [new file with mode: 0644]
doc/user/topics/collection-versioning.html.textile.liquid [new file with mode: 0644]
doc/user/topics/crunch-tools-overview.html.textile.liquid [new file with mode: 0644]
doc/user/topics/keep.html.textile.liquid [new file with mode: 0644]
doc/user/topics/link-accounts.html.textile.liquid [new file with mode: 0644]
doc/user/topics/run-command.html.textile.liquid [new file with mode: 0644]
doc/user/topics/running-pipeline-command-line.html.textile.liquid [new file with mode: 0644]
doc/user/topics/storage-classes.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-gatk-variantfiltration.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-job1.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-parallel.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-trait-search.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/add-new-repository.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/git-arvados-guide.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/intro-crunch.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/running-external-program.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-firstscript.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-get.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-mount-windows.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-submit-job.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/writing-cwl-workflow.html.textile.liquid [new file with mode: 0644]
doc/zenweb-liquid.rb [new file with mode: 0644]
doc/zenweb-textile.rb [new file with mode: 0644]
docker/jobs/1078ECD7.key [new file with mode: 0644]
docker/jobs/Dockerfile [new file with mode: 0644]
docker/jobs/apt.arvados.org-dev.list [new file with mode: 0644]
docker/jobs/apt.arvados.org-stable.list [new file with mode: 0644]
docker/jobs/apt.arvados.org-testing.list [new file with mode: 0644]
docker/migrate-docker19/Dockerfile [new file with mode: 0644]
docker/migrate-docker19/build.sh [new file with mode: 0755]
docker/migrate-docker19/dnd.sh [new file with mode: 0755]
docker/migrate-docker19/migrate.sh [new file with mode: 0755]
lib/cli/external.go [new file with mode: 0644]
lib/cli/flags.go [new file with mode: 0644]
lib/cli/get.go [new file with mode: 0644]
lib/cli/get_test.go [new file with mode: 0644]
lib/cloud/azure/azure.go [new file with mode: 0644]
lib/cloud/azure/azure_test.go [new file with mode: 0644]
lib/cloud/ec2/ec2.go [new file with mode: 0644]
lib/cloud/ec2/ec2_test.go [new file with mode: 0644]
lib/cloud/interfaces.go [new file with mode: 0644]
lib/cmd/cmd.go [new file with mode: 0644]
lib/cmd/cmd_test.go [new file with mode: 0644]
lib/cmdtest/leakcheck.go [new file with mode: 0644]
lib/controller/cmd.go [new file with mode: 0644]
lib/controller/fed_collections.go [new file with mode: 0644]
lib/controller/fed_containers.go [new file with mode: 0644]
lib/controller/fed_generic.go [new file with mode: 0644]
lib/controller/federation.go [new file with mode: 0644]
lib/controller/federation_test.go [new file with mode: 0644]
lib/controller/handler.go [new file with mode: 0644]
lib/controller/handler_test.go [new file with mode: 0644]
lib/controller/proxy.go [new file with mode: 0644]
lib/controller/server_test.go [new file with mode: 0644]
lib/crunchstat/crunchstat.go [new file with mode: 0644]
lib/crunchstat/crunchstat_test.go [new file with mode: 0644]
lib/dispatchcloud/cmd.go [new file with mode: 0644]
lib/dispatchcloud/container/queue.go [new file with mode: 0644]
lib/dispatchcloud/container/queue_test.go [new file with mode: 0644]
lib/dispatchcloud/dispatcher.go [new file with mode: 0644]
lib/dispatchcloud/dispatcher_test.go [new file with mode: 0644]
lib/dispatchcloud/driver.go [new file with mode: 0644]
lib/dispatchcloud/gocheck_test.go [new file with mode: 0644]
lib/dispatchcloud/logger.go [new file with mode: 0644]
lib/dispatchcloud/node_size.go [new file with mode: 0644]
lib/dispatchcloud/node_size_test.go [new file with mode: 0644]
lib/dispatchcloud/readme.go [new file with mode: 0644]
lib/dispatchcloud/readme_states.txt [new file with mode: 0644]
lib/dispatchcloud/scheduler/fix_stale_locks.go [new file with mode: 0644]
lib/dispatchcloud/scheduler/gocheck_test.go [new file with mode: 0644]
lib/dispatchcloud/scheduler/interfaces.go [new file with mode: 0644]
lib/dispatchcloud/scheduler/run_queue.go [new file with mode: 0644]
lib/dispatchcloud/scheduler/run_queue_test.go [new file with mode: 0644]
lib/dispatchcloud/scheduler/scheduler.go [new file with mode: 0644]
lib/dispatchcloud/scheduler/sync.go [new file with mode: 0644]
lib/dispatchcloud/ssh_executor/executor.go [new file with mode: 0644]
lib/dispatchcloud/ssh_executor/executor_test.go [new file with mode: 0644]
lib/dispatchcloud/test/doc.go [new file with mode: 0644]
lib/dispatchcloud/test/fixtures.go [new file with mode: 0644]
lib/dispatchcloud/test/queue.go [new file with mode: 0644]
lib/dispatchcloud/test/ssh_service.go [new file with mode: 0644]
lib/dispatchcloud/test/sshkey_dispatch [new file with mode: 0644]
lib/dispatchcloud/test/sshkey_dispatch.pub [new file with mode: 0644]
lib/dispatchcloud/test/sshkey_vm [new file with mode: 0644]
lib/dispatchcloud/test/sshkey_vm.pub [new file with mode: 0644]
lib/dispatchcloud/test/stub_driver.go [new file with mode: 0644]
lib/dispatchcloud/worker/gocheck_test.go [new file with mode: 0644]
lib/dispatchcloud/worker/pool.go [new file with mode: 0644]
lib/dispatchcloud/worker/pool_test.go [new file with mode: 0644]
lib/dispatchcloud/worker/throttle.go [new file with mode: 0644]
lib/dispatchcloud/worker/throttle_test.go [new file with mode: 0644]
lib/dispatchcloud/worker/verify.go [new file with mode: 0644]
lib/dispatchcloud/worker/worker.go [new file with mode: 0644]
lib/dispatchcloud/worker/worker_test.go [new file with mode: 0644]
lib/service/cmd.go [new file with mode: 0644]
sdk/R/.Rbuildignore [new file with mode: 0644]
sdk/R/ArvadosR.Rproj [new file with mode: 0644]
sdk/R/DESCRIPTION [new file with mode: 0644]
sdk/R/NAMESPACE [new file with mode: 0644]
sdk/R/R/Arvados.R [new file with mode: 0644]
sdk/R/R/ArvadosFile.R [new file with mode: 0644]
sdk/R/R/Collection.R [new file with mode: 0644]
sdk/R/R/CollectionTree.R [new file with mode: 0644]
sdk/R/R/HttpParser.R [new file with mode: 0644]
sdk/R/R/HttpRequest.R [new file with mode: 0644]
sdk/R/R/RESTService.R [new file with mode: 0644]
sdk/R/R/Subcollection.R [new file with mode: 0644]
sdk/R/R/autoGenAPI.R [new file with mode: 0644]
sdk/R/R/util.R [new file with mode: 0644]
sdk/R/R/zzz.R [new file with mode: 0644]
sdk/R/README.Rmd [new file with mode: 0644]
sdk/R/createDoc.R [new file with mode: 0644]
sdk/R/install_deps.R [new file with mode: 0644]
sdk/R/man/Arvados.Rd [new file with mode: 0644]
sdk/R/man/ArvadosFile.Rd [new file with mode: 0644]
sdk/R/man/Collection.Rd [new file with mode: 0644]
sdk/R/man/Subcollection.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.create.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.create_system_auth.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.current.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.delete.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.get.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.list.Rd [new file with mode: 0644]
sdk/R/man/api_client_authorizations.update.Rd [new file with mode: 0644]
sdk/R/man/api_clients.create.Rd [new file with mode: 0644]
sdk/R/man/api_clients.delete.Rd [new file with mode: 0644]
sdk/R/man/api_clients.get.Rd [new file with mode: 0644]
sdk/R/man/api_clients.list.Rd [new file with mode: 0644]
sdk/R/man/api_clients.update.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.create.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.delete.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.get.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.list.Rd [new file with mode: 0644]
sdk/R/man/authorized_keys.update.Rd [new file with mode: 0644]
sdk/R/man/collections.create.Rd [new file with mode: 0644]
sdk/R/man/collections.delete.Rd [new file with mode: 0644]
sdk/R/man/collections.get.Rd [new file with mode: 0644]
sdk/R/man/collections.list.Rd [new file with mode: 0644]
sdk/R/man/collections.provenance.Rd [new file with mode: 0644]
sdk/R/man/collections.trash.Rd [new file with mode: 0644]
sdk/R/man/collections.untrash.Rd [new file with mode: 0644]
sdk/R/man/collections.update.Rd [new file with mode: 0644]
sdk/R/man/collections.used_by.Rd [new file with mode: 0644]
sdk/R/man/container_requests.create.Rd [new file with mode: 0644]
sdk/R/man/container_requests.delete.Rd [new file with mode: 0644]
sdk/R/man/container_requests.get.Rd [new file with mode: 0644]
sdk/R/man/container_requests.list.Rd [new file with mode: 0644]
sdk/R/man/container_requests.update.Rd [new file with mode: 0644]
sdk/R/man/containers.auth.Rd [new file with mode: 0644]
sdk/R/man/containers.create.Rd [new file with mode: 0644]
sdk/R/man/containers.current.Rd [new file with mode: 0644]
sdk/R/man/containers.delete.Rd [new file with mode: 0644]
sdk/R/man/containers.get.Rd [new file with mode: 0644]
sdk/R/man/containers.list.Rd [new file with mode: 0644]
sdk/R/man/containers.lock.Rd [new file with mode: 0644]
sdk/R/man/containers.secret_mounts.Rd [new file with mode: 0644]
sdk/R/man/containers.unlock.Rd [new file with mode: 0644]
sdk/R/man/containers.update.Rd [new file with mode: 0644]
sdk/R/man/groups.contents.Rd [new file with mode: 0644]
sdk/R/man/groups.create.Rd [new file with mode: 0644]
sdk/R/man/groups.delete.Rd [new file with mode: 0644]
sdk/R/man/groups.get.Rd [new file with mode: 0644]
sdk/R/man/groups.list.Rd [new file with mode: 0644]
sdk/R/man/groups.trash.Rd [new file with mode: 0644]
sdk/R/man/groups.untrash.Rd [new file with mode: 0644]
sdk/R/man/groups.update.Rd [new file with mode: 0644]
sdk/R/man/humans.create.Rd [new file with mode: 0644]
sdk/R/man/humans.delete.Rd [new file with mode: 0644]
sdk/R/man/humans.get.Rd [new file with mode: 0644]
sdk/R/man/humans.list.Rd [new file with mode: 0644]
sdk/R/man/humans.update.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.create.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.delete.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.get.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.list.Rd [new file with mode: 0644]
sdk/R/man/job_tasks.update.Rd [new file with mode: 0644]
sdk/R/man/jobs.cancel.Rd [new file with mode: 0644]
sdk/R/man/jobs.create.Rd [new file with mode: 0644]
sdk/R/man/jobs.delete.Rd [new file with mode: 0644]
sdk/R/man/jobs.get.Rd [new file with mode: 0644]
sdk/R/man/jobs.list.Rd [new file with mode: 0644]
sdk/R/man/jobs.lock.Rd [new file with mode: 0644]
sdk/R/man/jobs.queue.Rd [new file with mode: 0644]
sdk/R/man/jobs.queue_size.Rd [new file with mode: 0644]
sdk/R/man/jobs.update.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.create.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.delete.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.get.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.list.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.ping.Rd [new file with mode: 0644]
sdk/R/man/keep_disks.update.Rd [new file with mode: 0644]
sdk/R/man/keep_services.accessible.Rd [new file with mode: 0644]
sdk/R/man/keep_services.create.Rd [new file with mode: 0644]
sdk/R/man/keep_services.delete.Rd [new file with mode: 0644]
sdk/R/man/keep_services.get.Rd [new file with mode: 0644]
sdk/R/man/keep_services.list.Rd [new file with mode: 0644]
sdk/R/man/keep_services.update.Rd [new file with mode: 0644]
sdk/R/man/links.create.Rd [new file with mode: 0644]
sdk/R/man/links.delete.Rd [new file with mode: 0644]
sdk/R/man/links.get.Rd [new file with mode: 0644]
sdk/R/man/links.get_permissions.Rd [new file with mode: 0644]
sdk/R/man/links.list.Rd [new file with mode: 0644]
sdk/R/man/links.update.Rd [new file with mode: 0644]
sdk/R/man/listAll.Rd [new file with mode: 0644]
sdk/R/man/logs.create.Rd [new file with mode: 0644]
sdk/R/man/logs.delete.Rd [new file with mode: 0644]
sdk/R/man/logs.get.Rd [new file with mode: 0644]
sdk/R/man/logs.list.Rd [new file with mode: 0644]
sdk/R/man/logs.update.Rd [new file with mode: 0644]
sdk/R/man/nodes.create.Rd [new file with mode: 0644]
sdk/R/man/nodes.delete.Rd [new file with mode: 0644]
sdk/R/man/nodes.get.Rd [new file with mode: 0644]
sdk/R/man/nodes.list.Rd [new file with mode: 0644]
sdk/R/man/nodes.ping.Rd [new file with mode: 0644]
sdk/R/man/nodes.update.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.cancel.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.create.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.delete.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.get.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.list.Rd [new file with mode: 0644]
sdk/R/man/pipeline_instances.update.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.create.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.delete.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.get.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.list.Rd [new file with mode: 0644]
sdk/R/man/pipeline_templates.update.Rd [new file with mode: 0644]
sdk/R/man/print.ArvadosFile.Rd [new file with mode: 0644]
sdk/R/man/print.Collection.Rd [new file with mode: 0644]
sdk/R/man/print.Subcollection.Rd [new file with mode: 0644]
sdk/R/man/projects.create.Rd [new file with mode: 0644]
sdk/R/man/projects.delete.Rd [new file with mode: 0644]
sdk/R/man/projects.get.Rd [new file with mode: 0644]
sdk/R/man/projects.list.Rd [new file with mode: 0644]
sdk/R/man/projects.update.Rd [new file with mode: 0644]
sdk/R/man/repositories.create.Rd [new file with mode: 0644]
sdk/R/man/repositories.delete.Rd [new file with mode: 0644]
sdk/R/man/repositories.get.Rd [new file with mode: 0644]
sdk/R/man/repositories.get_all_permissions.Rd [new file with mode: 0644]
sdk/R/man/repositories.list.Rd [new file with mode: 0644]
sdk/R/man/repositories.update.Rd [new file with mode: 0644]
sdk/R/man/specimens.create.Rd [new file with mode: 0644]
sdk/R/man/specimens.delete.Rd [new file with mode: 0644]
sdk/R/man/specimens.get.Rd [new file with mode: 0644]
sdk/R/man/specimens.list.Rd [new file with mode: 0644]
sdk/R/man/specimens.update.Rd [new file with mode: 0644]
sdk/R/man/traits.create.Rd [new file with mode: 0644]
sdk/R/man/traits.delete.Rd [new file with mode: 0644]
sdk/R/man/traits.get.Rd [new file with mode: 0644]
sdk/R/man/traits.list.Rd [new file with mode: 0644]
sdk/R/man/traits.update.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.create.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.delete.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.get.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.list.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.new.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.sign.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.signatures.Rd [new file with mode: 0644]
sdk/R/man/user_agreements.update.Rd [new file with mode: 0644]
sdk/R/man/users.activate.Rd [new file with mode: 0644]
sdk/R/man/users.create.Rd [new file with mode: 0644]
sdk/R/man/users.current.Rd [new file with mode: 0644]
sdk/R/man/users.delete.Rd [new file with mode: 0644]
sdk/R/man/users.get.Rd [new file with mode: 0644]
sdk/R/man/users.list.Rd [new file with mode: 0644]
sdk/R/man/users.merge.Rd [new file with mode: 0644]
sdk/R/man/users.setup.Rd [new file with mode: 0644]
sdk/R/man/users.system.Rd [new file with mode: 0644]
sdk/R/man/users.unsetup.Rd [new file with mode: 0644]
sdk/R/man/users.update.Rd [new file with mode: 0644]
sdk/R/man/users.update_uuid.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.create.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.delete.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.get.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.get_all_logins.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.list.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.logins.Rd [new file with mode: 0644]
sdk/R/man/virtual_machines.update.Rd [new file with mode: 0644]
sdk/R/man/workflows.create.Rd [new file with mode: 0644]
sdk/R/man/workflows.delete.Rd [new file with mode: 0644]
sdk/R/man/workflows.get.Rd [new file with mode: 0644]
sdk/R/man/workflows.list.Rd [new file with mode: 0644]
sdk/R/man/workflows.update.Rd [new file with mode: 0644]
sdk/R/run_test.R [new file with mode: 0644]
sdk/R/tests/testthat.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeArvados.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeHttpParser.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeHttpRequest.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeRESTService.R [new file with mode: 0644]
sdk/R/tests/testthat/test-ArvadosFile.R [new file with mode: 0644]
sdk/R/tests/testthat/test-Collection.R [new file with mode: 0644]
sdk/R/tests/testthat/test-CollectionTree.R [new file with mode: 0644]
sdk/R/tests/testthat/test-HttpParser.R [new file with mode: 0644]
sdk/R/tests/testthat/test-HttpRequest.R [new file with mode: 0644]
sdk/R/tests/testthat/test-RESTService.R [new file with mode: 0644]
sdk/R/tests/testthat/test-Subcollection.R [new file with mode: 0644]
sdk/R/tests/testthat/test-util.R [new file with mode: 0644]
sdk/cli/.gitignore [new file with mode: 0644]
sdk/cli/Gemfile [new file with mode: 0644]
sdk/cli/LICENSE-2.0.txt [new file with mode: 0644]
sdk/cli/Rakefile [new file with mode: 0644]
sdk/cli/arvados-cli.gemspec [new file with mode: 0644]
sdk/cli/bin/arv [new file with mode: 0755]
sdk/cli/bin/arv-copy [new symlink]
sdk/cli/bin/arv-crunch-job [new file with mode: 0755]
sdk/cli/bin/arv-get [new symlink]
sdk/cli/bin/arv-keepdocker [new symlink]
sdk/cli/bin/arv-ls [new symlink]
sdk/cli/bin/arv-mount [new symlink]
sdk/cli/bin/arv-normalize [new symlink]
sdk/cli/bin/arv-put [new symlink]
sdk/cli/bin/arv-run-pipeline-instance [new file with mode: 0755]
sdk/cli/bin/arv-tag [new file with mode: 0755]
sdk/cli/bin/arv-ws [new symlink]
sdk/cli/bin/crunch-job [new file with mode: 0755]
sdk/cli/test/binstub_arv-mount/arv-mount [new file with mode: 0755]
sdk/cli/test/binstub_clean_fail/arv-mount [new file with mode: 0755]
sdk/cli/test/binstub_docker_noop/docker.io [new file with mode: 0755]
sdk/cli/test/binstub_output_coll_owner/python [new file with mode: 0755]
sdk/cli/test/binstub_sanity_check/docker.io [new file with mode: 0755]
sdk/cli/test/binstub_sanity_check/true [new file with mode: 0755]
sdk/cli/test/test_arv-collection-create.rb [new file with mode: 0644]
sdk/cli/test/test_arv-get.rb [new file with mode: 0644]
sdk/cli/test/test_arv-keep-get.rb [new file with mode: 0644]
sdk/cli/test/test_arv-keep-put.rb [new file with mode: 0644]
sdk/cli/test/test_arv-run-pipeline-instance.rb [new file with mode: 0644]
sdk/cli/test/test_arv-tag.rb [new file with mode: 0644]
sdk/cli/test/test_arv-ws.rb [new file with mode: 0644]
sdk/cli/test/test_crunch-job.rb [new file with mode: 0644]
sdk/cwl/.gitignore [new symlink]
sdk/cwl/LICENSE-2.0.txt [new file with mode: 0644]
sdk/cwl/MANIFEST.in [new file with mode: 0644]
sdk/cwl/README.rst [new file with mode: 0644]
sdk/cwl/arvados_cwl/__init__.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/arv-cwl-schema.yml [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvcontainer.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvdocker.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvjob.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvtool.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/arvworkflow.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/context.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/crunch_script.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/done.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/executor.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/fsaccess.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/http.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/pathmapper.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/perf.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/runner.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/task_queue.py [new file with mode: 0644]
sdk/cwl/arvados_cwl/util.py [new file with mode: 0644]
sdk/cwl/arvados_version.py [new file with mode: 0644]
sdk/cwl/bin/arvados-cwl-runner [new file with mode: 0755]
sdk/cwl/bin/cwl-runner [new file with mode: 0755]
sdk/cwl/fpm-info.sh [new file with mode: 0644]
sdk/cwl/gittaggers.py [new file with mode: 0644]
sdk/cwl/setup.py [new file with mode: 0644]
sdk/cwl/test_with_arvbox.sh [new file with mode: 0755]
sdk/cwl/tests/12213-keepref-expr.cwl [new file with mode: 0644]
sdk/cwl/tests/12213-keepref-job.yml [new file with mode: 0644]
sdk/cwl/tests/12213-keepref-tool.cwl [new file with mode: 0644]
sdk/cwl/tests/12213-keepref-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/12418-glob-empty-collection.cwl [new file with mode: 0644]
sdk/cwl/tests/13931-size-job.yml [new file with mode: 0644]
sdk/cwl/tests/13931-size.cwl [new file with mode: 0644]
sdk/cwl/tests/13976-keepref-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/__init__.py [new file with mode: 0644]
sdk/cwl/tests/arvados-tests.sh [new file with mode: 0755]
sdk/cwl/tests/arvados-tests.yml [new file with mode: 0644]
sdk/cwl/tests/cat.cwl [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/a.txt [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/b.txt [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/c.txt [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/collection_per_tool_packed.cwl [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/step1.cwl [new file with mode: 0644]
sdk/cwl/tests/collection_per_tool/step2.cwl [new file with mode: 0644]
sdk/cwl/tests/dir-job.yml [new file with mode: 0644]
sdk/cwl/tests/dir-job2.yml [new file with mode: 0644]
sdk/cwl/tests/federation/README [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox-make-federation.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox/fed-config.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox/mkdir.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox/setup-user.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox/setup_user.py [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox/start.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/arvbox/stop.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/base-case.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/cat.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/hint-on-tool.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/hint-on-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/md5sum-tool-hint.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/md5sum.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/remote-case.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/rev-input-to-output.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/rev.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/runner-home-step-remote.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/runner-remote-step-home.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/scatter-gather.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/threestep-remote.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/twostep-both-remote.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/twostep-home-to-remote.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/twostep-remote-copy-to-home.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/cases/twostep-remote-to-home.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/data/base-case-input.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/hint-on-tool.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/hint-on-wf.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/remote-case-input.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/runner-home-step-remote-input.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/runner-remote-step-home-input.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/scatter-gather-s1.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/scatter-gather-s2.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/scatter-gather-s3.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/threestep-remote.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/twostep-both-remote.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/twostep-home-to-remote.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/twostep-remote-copy-to-home.txt [new file with mode: 0644]
sdk/cwl/tests/federation/data/twostep-remote-to-home.txt [new file with mode: 0644]
sdk/cwl/tests/federation/framework/check-exist.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/framework/check_exist.py [new file with mode: 0644]
sdk/cwl/tests/federation/framework/dockerbuild.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/framework/prepare.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/framework/prepare.py [new file with mode: 0644]
sdk/cwl/tests/federation/framework/run-acr.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/framework/testcase.cwl [new file with mode: 0644]
sdk/cwl/tests/federation/main.cwl [new file with mode: 0755]
sdk/cwl/tests/hg19/hg19.fa [new file with mode: 0644]
sdk/cwl/tests/hg19/hg19.fa.amb [new file with mode: 0644]
sdk/cwl/tests/hg19/hg19.fa.ann [new file with mode: 0644]
sdk/cwl/tests/hg19/hg19.fa.fai [new file with mode: 0644]
sdk/cwl/tests/hw.py [new file with mode: 0644]
sdk/cwl/tests/input/blorp.txt [new file with mode: 0644]
sdk/cwl/tests/keep-dir-test-input.cwl [new file with mode: 0644]
sdk/cwl/tests/keep-dir-test-input2.cwl [new file with mode: 0644]
sdk/cwl/tests/keep-dir-test-input3.cwl [new file with mode: 0644]
sdk/cwl/tests/listing-job.yml [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/echo.cwl [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/hello1.txt [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/run_in_single.cwl [new file with mode: 0644]
sdk/cwl/tests/makes_intermediates/subwf.cwl [new file with mode: 0644]
sdk/cwl/tests/matcher.py [new file with mode: 0644]
sdk/cwl/tests/mock_discovery.py [new file with mode: 0644]
sdk/cwl/tests/noreuse.cwl [new file with mode: 0644]
sdk/cwl/tests/octo.yml [new file with mode: 0644]
sdk/cwl/tests/octothorpe/item #1.txt [new file with mode: 0644]
sdk/cwl/tests/order/empty_order.json [new file with mode: 0644]
sdk/cwl/tests/order/inputs_test_order.json [new file with mode: 0644]
sdk/cwl/tests/samples/sample1_S01_R1_001.fastq.gz [new file with mode: 0644]
sdk/cwl/tests/samples/sample1_S01_R3_001.fastq.gz [new file with mode: 0644]
sdk/cwl/tests/samples/sample2_S01_R1_001.fastq.gz [new file with mode: 0644]
sdk/cwl/tests/samples/sample2_S01_R3_001.fastq.gz [new file with mode: 0644]
sdk/cwl/tests/secondary/dir/hg19.fa [new file with mode: 0644]
sdk/cwl/tests/secondary/dir/hg19.fa.amb [new file with mode: 0644]
sdk/cwl/tests/secondary/dir/hg19.fa.ann [new file with mode: 0644]
sdk/cwl/tests/secondary/dir/hg19.fa.fai [new file with mode: 0644]
sdk/cwl/tests/secondary/ls.cwl [new file with mode: 0644]
sdk/cwl/tests/secondary/sub.cwl [new file with mode: 0644]
sdk/cwl/tests/secondary/wf-job.yml [new file with mode: 0644]
sdk/cwl/tests/secondary/wf.cwl [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/example1.cwl [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/example3.cwl [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/hello.txt [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/hello.txt.idx [new file with mode: 0644]
sdk/cwl/tests/secondaryFiles/inp3.yml [new file with mode: 0644]
sdk/cwl/tests/secret_test_job.yml [new file with mode: 0644]
sdk/cwl/tests/stdout.cwl [new file with mode: 0644]
sdk/cwl/tests/submit_test_job.json [new file with mode: 0644]
sdk/cwl/tests/submit_test_job_missing.json [new file with mode: 0644]
sdk/cwl/tests/test_container.py [new file with mode: 0644]
sdk/cwl/tests/test_fsaccess.py [new file with mode: 0644]
sdk/cwl/tests/test_http.py [new file with mode: 0644]
sdk/cwl/tests/test_job.py [new file with mode: 0644]
sdk/cwl/tests/test_make_output.py [new file with mode: 0644]
sdk/cwl/tests/test_pathmapper.py [new file with mode: 0644]
sdk/cwl/tests/test_submit.py [new file with mode: 0644]
sdk/cwl/tests/test_tq.py [new file with mode: 0644]
sdk/cwl/tests/test_urljoin.py [new file with mode: 0644]
sdk/cwl/tests/test_util.py [new file with mode: 0644]
sdk/cwl/tests/testdir/a [new file with mode: 0644]
sdk/cwl/tests/testdir/b [new file with mode: 0644]
sdk/cwl/tests/testdir/c/d [new file with mode: 0644]
sdk/cwl/tests/tmp1/tmp2/tmp3/.gitkeep [new file with mode: 0644]
sdk/cwl/tests/tool/blub.txt [new file with mode: 0644]
sdk/cwl/tests/tool/submit_tool.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir1.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir3.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir4.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir5.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir6.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir6a.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir7.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/default-dir7a.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/inp1/hello.txt [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf1.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf3.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf4.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf5.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf6.cwl [new file with mode: 0644]
sdk/cwl/tests/wf-defaults/wf7.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/check_mem.py [new file with mode: 0644]
sdk/cwl/tests/wf/echo-subwf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/echo-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/echo_a.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/echo_b.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/expect_arvworkflow.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/expect_packed.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/inputs_test.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/listing_deep.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/listing_none.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/listing_shallow.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/runin-reqs-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/runin-reqs-wf2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/runin-reqs-wf3.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/runin-reqs-wf4.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/runin-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/runin-with-ttl-wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/scatter2.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/scatter2_subwf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/secret_job.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/secret_wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/submit_keepref_wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/submit_wf.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/submit_wf_no_reuse.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/submit_wf_packed.cwl [new file with mode: 0644]
sdk/cwl/tests/wf/submit_wf_runner_resources.cwl [new file with mode: 0644]
sdk/dev-jobs.dockerfile [new file with mode: 0644]
sdk/go/arvados/api_client_authorization.go [new file with mode: 0644]
sdk/go/arvados/byte_size.go [new file with mode: 0644]
sdk/go/arvados/byte_size_test.go [new file with mode: 0644]
sdk/go/arvados/client.go [new file with mode: 0644]
sdk/go/arvados/client_test.go [new file with mode: 0644]
sdk/go/arvados/collection.go [new file with mode: 0644]
sdk/go/arvados/config.go [new file with mode: 0644]
sdk/go/arvados/config_test.go [new file with mode: 0644]
sdk/go/arvados/container.go [new file with mode: 0644]
sdk/go/arvados/contextgroup.go [new file with mode: 0644]
sdk/go/arvados/doc.go [new file with mode: 0644]
sdk/go/arvados/duration.go [new file with mode: 0644]
sdk/go/arvados/error.go [new file with mode: 0644]
sdk/go/arvados/fs_backend.go [new file with mode: 0644]
sdk/go/arvados/fs_base.go [new file with mode: 0644]
sdk/go/arvados/fs_collection.go [new file with mode: 0644]
sdk/go/arvados/fs_collection_test.go [new file with mode: 0644]
sdk/go/arvados/fs_deferred.go [new file with mode: 0644]
sdk/go/arvados/fs_filehandle.go [new file with mode: 0644]
sdk/go/arvados/fs_getternode.go [new file with mode: 0644]
sdk/go/arvados/fs_lookup.go [new file with mode: 0644]
sdk/go/arvados/fs_project.go [new file with mode: 0644]
sdk/go/arvados/fs_project_test.go [new file with mode: 0644]
sdk/go/arvados/fs_site.go [new file with mode: 0644]
sdk/go/arvados/fs_site_test.go [new file with mode: 0644]
sdk/go/arvados/fs_users.go [new file with mode: 0644]
sdk/go/arvados/group.go [new file with mode: 0644]
sdk/go/arvados/integration_test_cluster.go [new file with mode: 0644]
sdk/go/arvados/keep_block.go [new file with mode: 0644]
sdk/go/arvados/keep_service.go [new file with mode: 0644]
sdk/go/arvados/keep_service_test.go [new file with mode: 0644]
sdk/go/arvados/link.go [new file with mode: 0644]
sdk/go/arvados/log.go [new file with mode: 0644]
sdk/go/arvados/node.go [new file with mode: 0644]
sdk/go/arvados/postgresql.go [new file with mode: 0644]
sdk/go/arvados/resource_list.go [new file with mode: 0644]
sdk/go/arvados/resource_list_test.go [new file with mode: 0644]
sdk/go/arvados/throttle.go [new file with mode: 0644]
sdk/go/arvados/user.go [new file with mode: 0644]
sdk/go/arvados/workflow.go [new file with mode: 0644]
sdk/go/arvadosclient/arvadosclient.go [new file with mode: 0644]
sdk/go/arvadosclient/arvadosclient_test.go [new file with mode: 0644]
sdk/go/arvadosclient/pool.go [new file with mode: 0644]
sdk/go/arvadostest/fixtures.go [new file with mode: 0644]
sdk/go/arvadostest/run_servers.go [new file with mode: 0644]
sdk/go/arvadostest/stub.go [new file with mode: 0644]
sdk/go/asyncbuf/buf.go [new file with mode: 0644]
sdk/go/asyncbuf/buf_test.go [new file with mode: 0644]
sdk/go/auth/auth.go [new file with mode: 0644]
sdk/go/auth/handlers.go [new file with mode: 0644]
sdk/go/auth/handlers_test.go [new file with mode: 0644]
sdk/go/auth/salt.go [new file with mode: 0644]
sdk/go/blockdigest/blockdigest.go [new file with mode: 0644]
sdk/go/blockdigest/blockdigest_test.go [new file with mode: 0644]
sdk/go/blockdigest/testing.go [new file with mode: 0644]
sdk/go/config/dump.go [new file with mode: 0644]
sdk/go/config/load.go [new file with mode: 0644]
sdk/go/crunchrunner/crunchrunner.go [new file with mode: 0644]
sdk/go/crunchrunner/crunchrunner_test.go [new file with mode: 0644]
sdk/go/crunchrunner/upload.go [new file with mode: 0644]
sdk/go/crunchrunner/upload_test.go [new file with mode: 0644]
sdk/go/ctxlog/log.go [new file with mode: 0644]
sdk/go/dispatch/dispatch.go [new file with mode: 0644]
sdk/go/dispatch/dispatch_test.go [new file with mode: 0644]
sdk/go/dispatch/throttle.go [new file with mode: 0644]
sdk/go/dispatch/throttle_test.go [new file with mode: 0644]
sdk/go/health/aggregator.go [new file with mode: 0644]
sdk/go/health/aggregator_test.go [new file with mode: 0644]
sdk/go/health/handler.go [new file with mode: 0644]
sdk/go/health/handler_test.go [new file with mode: 0644]
sdk/go/httpserver/error.go [new file with mode: 0644]
sdk/go/httpserver/httpserver.go [new file with mode: 0644]
sdk/go/httpserver/id_generator.go [new file with mode: 0644]
sdk/go/httpserver/log.go [new file with mode: 0644]
sdk/go/httpserver/logger.go [new file with mode: 0644]
sdk/go/httpserver/logger_test.go [new file with mode: 0644]
sdk/go/httpserver/metrics.go [new file with mode: 0644]
sdk/go/httpserver/request_limiter.go [new file with mode: 0644]
sdk/go/httpserver/request_limiter_test.go [new file with mode: 0644]
sdk/go/httpserver/responsewriter.go [new file with mode: 0644]
sdk/go/keepclient/block_cache.go [new file with mode: 0644]
sdk/go/keepclient/collectionreader.go [new file with mode: 0644]
sdk/go/keepclient/collectionreader_test.go [new file with mode: 0644]
sdk/go/keepclient/discover.go [new file with mode: 0644]
sdk/go/keepclient/discover_test.go [new file with mode: 0644]
sdk/go/keepclient/hashcheck.go [new file with mode: 0644]
sdk/go/keepclient/hashcheck_test.go [new file with mode: 0644]
sdk/go/keepclient/keepclient.go [new file with mode: 0644]
sdk/go/keepclient/keepclient_test.go [new file with mode: 0644]
sdk/go/keepclient/perms.go [new file with mode: 0644]
sdk/go/keepclient/perms_test.go [new file with mode: 0644]
sdk/go/keepclient/root_sorter.go [new file with mode: 0644]
sdk/go/keepclient/root_sorter_test.go [new file with mode: 0644]
sdk/go/keepclient/support.go [new file with mode: 0644]
sdk/go/manifest/manifest.go [new file with mode: 0644]
sdk/go/manifest/manifest_test.go [new file with mode: 0644]
sdk/go/manifest/testdata/long_manifest [new file with mode: 0644]
sdk/go/manifest/testdata/short_manifest [new file with mode: 0644]
sdk/go/stats/duration.go [new file with mode: 0644]
sdk/go/stats/duration_test.go [new file with mode: 0644]
sdk/java-v2/.gitignore [new file with mode: 0644]
sdk/java-v2/.licenseignore [new file with mode: 0644]
sdk/java-v2/COPYING [new file with mode: 0644]
sdk/java-v2/README.md [new file with mode: 0644]
sdk/java-v2/agpl-3.0.txt [new file with mode: 0644]
sdk/java-v2/apache-2.0.txt [new file with mode: 0644]
sdk/java-v2/build.gradle [moved from build.gradle with 100% similarity]
sdk/java-v2/gradle/wrapper/gradle-wrapper.jar [moved from gradle/wrapper/gradle-wrapper.jar with 100% similarity]
sdk/java-v2/gradle/wrapper/gradle-wrapper.properties [moved from gradle/wrapper/gradle-wrapper.properties with 100% similarity]
sdk/java-v2/gradlew [moved from gradlew with 100% similarity]
sdk/java-v2/gradlew.bat [moved from gradlew.bat with 100% similarity]
sdk/java-v2/settings.gradle [moved from settings.gradle with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java [moved from src/main/java/org/arvados/client/api/client/BaseApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java [moved from src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java [moved from src/main/java/org/arvados/client/api/client/CollectionsApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java [moved from src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java [moved from src/main/java/org/arvados/client/api/client/GroupsApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java [moved from src/main/java/org/arvados/client/api/client/KeepServerApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServicesApiClient.java [moved from src/main/java/org/arvados/client/api/client/KeepServicesApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java [moved from src/main/java/org/arvados/client/api/client/KeepWebApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/ProgressListener.java [moved from src/main/java/org/arvados/client/api/client/ProgressListener.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/UsersApiClient.java [moved from src/main/java/org/arvados/client/api/client/UsersApiClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java [moved from src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/ApiError.java [moved from src/main/java/org/arvados/client/api/model/ApiError.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Collection.java [moved from src/main/java/org/arvados/client/api/model/Collection.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionList.java [moved from src/main/java/org/arvados/client/api/model/CollectionList.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java [moved from src/main/java/org/arvados/client/api/model/Group.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/GroupList.java [moved from src/main/java/org/arvados/client/api/model/GroupList.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java [moved from src/main/java/org/arvados/client/api/model/Item.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java [moved from src/main/java/org/arvados/client/api/model/ItemList.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepService.java [moved from src/main/java/org/arvados/client/api/model/KeepService.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepServiceList.java [moved from src/main/java/org/arvados/client/api/model/KeepServiceList.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/RuntimeConstraints.java [moved from src/main/java/org/arvados/client/api/model/RuntimeConstraints.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java [moved from src/main/java/org/arvados/client/api/model/User.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/UserList.java [moved from src/main/java/org/arvados/client/api/model/UserList.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Argument.java [moved from src/main/java/org/arvados/client/api/model/argument/Argument.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java [moved from src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Filter.java [moved from src/main/java/org/arvados/client/api/model/argument/Filter.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java [moved from src/main/java/org/arvados/client/api/model/argument/ListArgument.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java [moved from src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/common/Characters.java [moved from src/main/java/org/arvados/client/common/Characters.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/common/Headers.java [moved from src/main/java/org/arvados/client/common/Headers.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/common/Patterns.java [moved from src/main/java/org/arvados/client/common/Patterns.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java [moved from src/main/java/org/arvados/client/config/ConfigProvider.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java [moved from src/main/java/org/arvados/client/config/ExternalConfigProvider.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java [moved from src/main/java/org/arvados/client/config/FileConfigProvider.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosApiException.java [moved from src/main/java/org/arvados/client/exception/ArvadosApiException.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosClientException.java [moved from src/main/java/org/arvados/client/exception/ArvadosClientException.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java [moved from src/main/java/org/arvados/client/facade/ArvadosFacade.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/CollectionFactory.java [moved from src/main/java/org/arvados/client/logic/collection/CollectionFactory.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/FileToken.java [moved from src/main/java/org/arvados/client/logic/collection/FileToken.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java [moved from src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestFactory.java [moved from src/main/java/org/arvados/client/logic/collection/ManifestFactory.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestStream.java [moved from src/main/java/org/arvados/client/logic/collection/ManifestStream.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java [moved from src/main/java/org/arvados/client/logic/keep/FileDownloader.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileTransferHandler.java [moved from src/main/java/org/arvados/client/logic/keep/FileTransferHandler.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileUploader.java [moved from src/main/java/org/arvados/client/logic/keep/FileUploader.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java [moved from src/main/java/org/arvados/client/logic/keep/KeepClient.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepLocator.java [moved from src/main/java/org/arvados/client/logic/keep/KeepLocator.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java [moved from src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java [moved from src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/utils/FileMerge.java [moved from src/main/java/org/arvados/client/utils/FileMerge.java with 100% similarity]
sdk/java-v2/src/main/java/org/arvados/client/utils/FileSplit.java [moved from src/main/java/org/arvados/client/utils/FileSplit.java with 100% similarity]
sdk/java-v2/src/main/resources/reference.conf [moved from src/main/resources/reference.conf with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java [moved from src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java [moved from src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java [moved from src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServerApiClientTest.java [moved from src/test/java/org/arvados/client/api/client/KeepServerApiClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServicesApiClientTest.java [moved from src/test/java/org/arvados/client/api/client/KeepServicesApiClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/UsersApiClientTest.java [moved from src/test/java/org/arvados/client/api/client/UsersApiClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java [moved from src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java [moved from src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java [moved from src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/junit/categories/IntegrationTests.java [moved from src/test/java/org/arvados/client/junit/categories/IntegrationTests.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/FileTokenTest.java [moved from src/test/java/org/arvados/client/logic/collection/FileTokenTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java [moved from src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java [moved from src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java [moved from src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java [moved from src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepClientTest.java [moved from src/test/java/org/arvados/client/logic/keep/KeepClientTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java [moved from src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java [moved from src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java [moved from src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java [moved from src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java [moved from src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/FileTestUtils.java [moved from src/test/java/org/arvados/client/test/utils/FileTestUtils.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/RequestMethod.java [moved from src/test/java/org/arvados/client/test/utils/RequestMethod.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/utils/FileMergeTest.java [moved from src/test/java/org/arvados/client/utils/FileMergeTest.java with 100% similarity]
sdk/java-v2/src/test/java/org/arvados/client/utils/FileSplitTest.java [moved from src/test/java/org/arvados/client/utils/FileSplitTest.java with 100% similarity]
sdk/java-v2/src/test/resources/application.conf [moved from src/test/resources/application.conf with 100% similarity]
sdk/java-v2/src/test/resources/integration-tests-application.conf [moved from src/test/resources/integration-tests-application.conf with 100% similarity]
sdk/java-v2/src/test/resources/integration-tests-application.conf.example [moved from src/test/resources/integration-tests-application.conf.example with 100% similarity]
sdk/java-v2/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker [moved from src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json [moved from src/test/resources/org/arvados/client/api/client/collections-create-manifest.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json [moved from src/test/resources/org/arvados/client/api/client/collections-create-simple.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json [moved from src/test/resources/org/arvados/client/api/client/collections-download-file.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json [moved from src/test/resources/org/arvados/client/api/client/collections-get.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json [moved from src/test/resources/org/arvados/client/api/client/collections-list.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json [moved from src/test/resources/org/arvados/client/api/client/groups-get.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json [moved from src/test/resources/org/arvados/client/api/client/groups-list.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt [moved from src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json [moved from src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json [moved from src/test/resources/org/arvados/client/api/client/keep-services-accessible.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json [moved from src/test/resources/org/arvados/client/api/client/keep-services-get.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json [moved from src/test/resources/org/arvados/client/api/client/keep-services-list.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json [moved from src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json [moved from src/test/resources/org/arvados/client/api/client/users-create.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json [moved from src/test/resources/org/arvados/client/api/client/users-get.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json [moved from src/test/resources/org/arvados/client/api/client/users-list.json with 100% similarity]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json [moved from src/test/resources/org/arvados/client/api/client/users-system.json with 100% similarity]
sdk/java-v2/src/test/resources/selfsigned.keystore.jks [moved from src/test/resources/selfsigned.keystore.jks with 100% similarity]
sdk/java-v2/test-in-docker.sh [moved from test-in-docker.sh with 100% similarity]
sdk/java/.classpath [new file with mode: 0644]
sdk/java/.project [new file with mode: 0644]
sdk/java/.settings/org.eclipse.jdt.core.prefs [new file with mode: 0644]
sdk/java/ArvadosSDKJavaExample.java [new file with mode: 0644]
sdk/java/ArvadosSDKJavaExampleWithPrompt.java [new file with mode: 0644]
sdk/java/README [new file with mode: 0644]
sdk/java/pom.xml [new file with mode: 0644]
sdk/java/src/main/java/org/arvados/sdk/Arvados.java [new file with mode: 0644]
sdk/java/src/main/java/org/arvados/sdk/MethodDetails.java [new file with mode: 0644]
sdk/java/src/main/resources/log4j.properties [new file with mode: 0644]
sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java [new file with mode: 0644]
sdk/java/src/test/resources/first_pipeline.json [new file with mode: 0644]
sdk/pam/.dockerignore [new file with mode: 0644]
sdk/pam/.gitignore [new symlink]
sdk/pam/Dockerfile [new file with mode: 0644]
sdk/pam/LICENSE-2.0.txt [new file with mode: 0644]
sdk/pam/MANIFEST.in [new file with mode: 0644]
sdk/pam/README.rst [new file with mode: 0644]
sdk/pam/arvados_pam/__init__.py [new file with mode: 0644]
sdk/pam/arvados_pam/auth_event.py [new file with mode: 0644]
sdk/pam/arvados_version.py [new file with mode: 0644]
sdk/pam/examples/shellinabox [new file with mode: 0644]
sdk/pam/fpm-info.sh [new file with mode: 0644]
sdk/pam/gittaggers.py [new symlink]
sdk/pam/integration_tests/__init__.py [new file with mode: 0644]
sdk/pam/integration_tests/test_pam.py [new file with mode: 0644]
sdk/pam/lib/libpam_arvados.py [new file with mode: 0644]
sdk/pam/pam-configs/arvados [new file with mode: 0644]
sdk/pam/setup.py [new file with mode: 0755]
sdk/pam/tests/__init__.py [new file with mode: 0644]
sdk/pam/tests/integration_test.pl [new file with mode: 0755]
sdk/pam/tests/mocker.py [new file with mode: 0644]
sdk/pam/tests/test_auth_event.py [new file with mode: 0644]
sdk/pam/tests/test_pam_sm.py [new file with mode: 0644]
sdk/perl/.gitignore [new file with mode: 0644]
sdk/perl/Makefile.PL [new file with mode: 0644]
sdk/perl/lib/Arvados.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/Request.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceAccessor.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceMethod.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceProxy.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceProxyList.pm [new file with mode: 0644]
sdk/python/.gitignore [new file with mode: 0644]
sdk/python/LICENSE-2.0.txt [new file with mode: 0644]
sdk/python/MANIFEST.in [new file with mode: 0644]
sdk/python/README.rst [new file with mode: 0644]
sdk/python/arvados/__init__.py [new file with mode: 0644]
sdk/python/arvados/_normalize_stream.py [new file with mode: 0644]
sdk/python/arvados/_ranges.py [new file with mode: 0644]
sdk/python/arvados/api.py [new file with mode: 0644]
sdk/python/arvados/arvfile.py [new file with mode: 0644]
sdk/python/arvados/cache.py [new file with mode: 0644]
sdk/python/arvados/collection.py [new file with mode: 0644]
sdk/python/arvados/commands/__init__.py [new file with mode: 0644]
sdk/python/arvados/commands/_util.py [new file with mode: 0644]
sdk/python/arvados/commands/arv_copy.py [new file with mode: 0755]
sdk/python/arvados/commands/get.py [new file with mode: 0755]
sdk/python/arvados/commands/keepdocker.py [new file with mode: 0644]
sdk/python/arvados/commands/ls.py [new file with mode: 0644]
sdk/python/arvados/commands/migrate19.py [new file with mode: 0644]
sdk/python/arvados/commands/put.py [new file with mode: 0644]
sdk/python/arvados/commands/run.py [new file with mode: 0644]
sdk/python/arvados/commands/ws.py [new file with mode: 0644]
sdk/python/arvados/config.py [new file with mode: 0644]
sdk/python/arvados/crunch.py [new file with mode: 0644]
sdk/python/arvados/errors.py [new file with mode: 0644]
sdk/python/arvados/events.py [new file with mode: 0644]
sdk/python/arvados/keep.py [new file with mode: 0644]
sdk/python/arvados/retry.py [new file with mode: 0644]
sdk/python/arvados/safeapi.py [new file with mode: 0644]
sdk/python/arvados/stream.py [new file with mode: 0644]
sdk/python/arvados/timer.py [new file with mode: 0644]
sdk/python/arvados/util.py [new file with mode: 0644]
sdk/python/arvados_version.py [new file with mode: 0644]
sdk/python/bin/arv-copy [new file with mode: 0755]
sdk/python/bin/arv-get [new file with mode: 0755]
sdk/python/bin/arv-keepdocker [new file with mode: 0755]
sdk/python/bin/arv-ls [new file with mode: 0755]
sdk/python/bin/arv-migrate-docker19 [new file with mode: 0755]
sdk/python/bin/arv-normalize [new file with mode: 0755]
sdk/python/bin/arv-put [new file with mode: 0755]
sdk/python/bin/arv-run [new file with mode: 0755]
sdk/python/bin/arv-ws [new file with mode: 0755]
sdk/python/fpm-info.sh [new file with mode: 0644]
sdk/python/gittaggers.py [new file with mode: 0644]
sdk/python/setup.py [new file with mode: 0644]
sdk/python/tests/__init__.py [new file with mode: 0644]
sdk/python/tests/arvados_testutil.py [new file with mode: 0644]
sdk/python/tests/data/1000G_ref_manifest [new file with mode: 0644]
sdk/python/tests/data/jlake_manifest [new file with mode: 0644]
sdk/python/tests/keepstub.py [new file with mode: 0644]
sdk/python/tests/manifest_examples.py [new file with mode: 0644]
sdk/python/tests/nginx.conf [new file with mode: 0644]
sdk/python/tests/performance/__init__.py [new file with mode: 0644]
sdk/python/tests/performance/performance_profiler.py [new file with mode: 0644]
sdk/python/tests/performance/test_a_sample.py [new file with mode: 0644]
sdk/python/tests/run_test_server.py [new file with mode: 0644]
sdk/python/tests/slow_test.py [new file with mode: 0644]
sdk/python/tests/test_api.py [new file with mode: 0644]
sdk/python/tests/test_arv_copy.py [new file with mode: 0644]
sdk/python/tests/test_arv_get.py [new file with mode: 0644]
sdk/python/tests/test_arv_keepdocker.py [new file with mode: 0644]
sdk/python/tests/test_arv_ls.py [new file with mode: 0644]
sdk/python/tests/test_arv_normalize.py [new file with mode: 0644]
sdk/python/tests/test_arv_put.py [new file with mode: 0644]
sdk/python/tests/test_arv_run.py [new file with mode: 0644]
sdk/python/tests/test_arv_ws.py [new file with mode: 0644]
sdk/python/tests/test_arvfile.py [new file with mode: 0644]
sdk/python/tests/test_benchmark_collections.py [new file with mode: 0644]
sdk/python/tests/test_cache.py [new file with mode: 0644]
sdk/python/tests/test_collections.py [new file with mode: 0644]
sdk/python/tests/test_crunch.py [new file with mode: 0644]
sdk/python/tests/test_errors.py [new file with mode: 0644]
sdk/python/tests/test_events.py [new file with mode: 0644]
sdk/python/tests/test_keep_client.py [new file with mode: 0644]
sdk/python/tests/test_keep_locator.py [new file with mode: 0644]
sdk/python/tests/test_pipeline_template.py [new file with mode: 0644]
sdk/python/tests/test_retry.py [new file with mode: 0644]
sdk/python/tests/test_retry_job_helpers.py [new file with mode: 0644]
sdk/python/tests/test_sdk.py [new file with mode: 0644]
sdk/python/tests/test_stream.py [new file with mode: 0644]
sdk/python/tests/test_util.py [new file with mode: 0644]
sdk/ruby/.gitignore [new file with mode: 0644]
sdk/ruby/Gemfile [new file with mode: 0644]
sdk/ruby/LICENSE-2.0.txt [new file with mode: 0644]
sdk/ruby/README [new file with mode: 0644]
sdk/ruby/Rakefile [new file with mode: 0644]
sdk/ruby/arvados.gemspec [new file with mode: 0644]
sdk/ruby/lib/arvados.rb [new file with mode: 0644]
sdk/ruby/lib/arvados/collection.rb [new file with mode: 0644]
sdk/ruby/lib/arvados/google_api_client.rb [new file with mode: 0644]
sdk/ruby/lib/arvados/keep.rb [new file with mode: 0644]
sdk/ruby/test/sdk_fixtures.rb [new file with mode: 0644]
sdk/ruby/test/test_big_request.rb [new file with mode: 0644]
sdk/ruby/test/test_collection.rb [new file with mode: 0644]
sdk/ruby/test/test_keep_manifest.rb [new file with mode: 0644]
services/api/.gitignore [new file with mode: 0644]
services/api/Gemfile [new file with mode: 0644]
services/api/Gemfile.lock [new file with mode: 0644]
services/api/README [new file with mode: 0644]
services/api/Rakefile [new file with mode: 0644]
services/api/app/assets/images/logo.png [new file with mode: 0644]
services/api/app/assets/images/rails.png [new file with mode: 0644]
services/api/app/assets/stylesheets/api_client_authorizations.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/api_clients.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/application.css [new file with mode: 0644]
services/api/app/assets/stylesheets/authorized_keys.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/collections.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/commit_ancestors.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/commits.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/groups.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/humans.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/job_tasks.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/jobs.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/keep_disks.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/links.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/logs.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/nodes.css [new file with mode: 0644]
services/api/app/assets/stylesheets/nodes.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/pipeline_instances.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/pipeline_templates.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/repositories.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/scaffolds.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/specimens.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/traits.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/virtual_machines.css.scss [new file with mode: 0644]
services/api/app/controllers/application_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/api_clients_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/authorized_keys_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/collections_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/container_requests_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/containers_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/groups_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/healthcheck_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/humans_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/job_tasks_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/jobs_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/keep_disks_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/keep_services_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/links_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/logs_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/nodes_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/repositories_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/schema_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/specimens_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/traits_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/user_agreements_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/users_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/virtual_machines_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/workflows_controller.rb [new file with mode: 0644]
services/api/app/controllers/database_controller.rb [new file with mode: 0644]
services/api/app/controllers/static_controller.rb [new file with mode: 0644]
services/api/app/controllers/user_sessions_controller.rb [new file with mode: 0644]
services/api/app/helpers/api_client_authorizations_helper.rb [new file with mode: 0644]
services/api/app/helpers/api_clients_helper.rb [new file with mode: 0644]
services/api/app/helpers/application_helper.rb [new file with mode: 0644]
services/api/app/helpers/authorized_keys_helper.rb [new file with mode: 0644]
services/api/app/helpers/collections_helper.rb [new file with mode: 0644]
services/api/app/helpers/commit_ancestors_helper.rb [new file with mode: 0644]
services/api/app/helpers/commits_helper.rb [new file with mode: 0644]
services/api/app/helpers/groups_helper.rb [new file with mode: 0644]
services/api/app/helpers/humans_helper.rb [new file with mode: 0644]
services/api/app/helpers/job_tasks_helper.rb [new file with mode: 0644]
services/api/app/helpers/jobs_helper.rb [new file with mode: 0644]
services/api/app/helpers/keep_disks_helper.rb [new file with mode: 0644]
services/api/app/helpers/links_helper.rb [new file with mode: 0644]
services/api/app/helpers/logs_helper.rb [new file with mode: 0644]
services/api/app/helpers/nodes_helper.rb [new file with mode: 0644]
services/api/app/helpers/pipeline_instances_helper.rb [new file with mode: 0644]
services/api/app/helpers/pipeline_templates_helper.rb [new file with mode: 0644]
services/api/app/helpers/repositories_helper.rb [new file with mode: 0644]
services/api/app/helpers/specimens_helper.rb [new file with mode: 0644]
services/api/app/helpers/traits_helper.rb [new file with mode: 0644]
services/api/app/helpers/virtual_machines_helper.rb [new file with mode: 0644]
services/api/app/mailers/.gitkeep [new file with mode: 0644]
services/api/app/mailers/admin_notifier.rb [new file with mode: 0644]
services/api/app/mailers/profile_notifier.rb [new file with mode: 0644]
services/api/app/mailers/user_notifier.rb [new file with mode: 0644]
services/api/app/middlewares/arvados_api_token.rb [new file with mode: 0644]
services/api/app/middlewares/rack_socket.rb [new file with mode: 0644]
services/api/app/models/.gitkeep [new file with mode: 0644]
services/api/app/models/api_client.rb [new file with mode: 0644]
services/api/app/models/api_client_authorization.rb [new file with mode: 0644]
services/api/app/models/arvados_model.rb [new file with mode: 0644]
services/api/app/models/authorized_key.rb [new file with mode: 0644]
services/api/app/models/blob.rb [new file with mode: 0644]
services/api/app/models/collection.rb [new file with mode: 0644]
services/api/app/models/commit.rb [new file with mode: 0644]
services/api/app/models/commit_ancestor.rb [new file with mode: 0644]
services/api/app/models/container.rb [new file with mode: 0644]
services/api/app/models/container_request.rb [new file with mode: 0644]
services/api/app/models/database_seeds.rb [new file with mode: 0644]
services/api/app/models/group.rb [new file with mode: 0644]
services/api/app/models/human.rb [new file with mode: 0644]
services/api/app/models/job.rb [new file with mode: 0644]
services/api/app/models/job_task.rb [new file with mode: 0644]
services/api/app/models/keep_disk.rb [new file with mode: 0644]
services/api/app/models/keep_service.rb [new file with mode: 0644]
services/api/app/models/link.rb [new file with mode: 0644]
services/api/app/models/log.rb [new file with mode: 0644]
services/api/app/models/node.rb [new file with mode: 0644]
services/api/app/models/pipeline_instance.rb [new file with mode: 0644]
services/api/app/models/pipeline_template.rb [new file with mode: 0644]
services/api/app/models/repository.rb [new file with mode: 0644]
services/api/app/models/specimen.rb [new file with mode: 0644]
services/api/app/models/trait.rb [new file with mode: 0644]
services/api/app/models/user.rb [new file with mode: 0644]
services/api/app/models/user_agreement.rb [new file with mode: 0644]
services/api/app/models/virtual_machine.rb [new file with mode: 0644]
services/api/app/models/workflow.rb [new file with mode: 0644]
services/api/app/views/admin_notifier/new_inactive_user.text.erb [new file with mode: 0644]
services/api/app/views/admin_notifier/new_user.text.erb [new file with mode: 0644]
services/api/app/views/layouts/application.html.erb [new file with mode: 0644]
services/api/app/views/profile_notifier/profile_created.text.erb [new file with mode: 0644]
services/api/app/views/static/intro.html.erb [new file with mode: 0644]
services/api/app/views/static/login_failure.html.erb [new file with mode: 0644]
services/api/app/views/user_notifier/account_is_setup.text.erb [new file with mode: 0644]
services/api/app/views/user_sessions/failure.html.erb [new file with mode: 0644]
services/api/config.ru [new file with mode: 0644]
services/api/config/application.default.yml [new file with mode: 0644]
services/api/config/application.rb [new file with mode: 0644]
services/api/config/application.yml.example [new file with mode: 0644]
services/api/config/boot.rb [new file with mode: 0644]
services/api/config/database.yml.example [new file with mode: 0644]
services/api/config/environment.rb [new file with mode: 0644]
services/api/config/environments/development.rb.example [new file with mode: 0644]
services/api/config/environments/production.rb.example [new file with mode: 0644]
services/api/config/environments/test.rb.example [new file with mode: 0644]
services/api/config/initializers/andand.rb [new file with mode: 0644]
services/api/config/initializers/app_version.rb [new file with mode: 0644]
services/api/config/initializers/authorization.rb [new file with mode: 0644]
services/api/config/initializers/backtrace_silencers.rb [new file with mode: 0644]
services/api/config/initializers/common_api_template.rb [new file with mode: 0644]
services/api/config/initializers/current_api_client.rb [new file with mode: 0644]
services/api/config/initializers/db_current_time.rb [new file with mode: 0644]
services/api/config/initializers/eventbus.rb [new file with mode: 0644]
services/api/config/initializers/fix_www_decode.rb [new file with mode: 0644]
services/api/config/initializers/inflections.rb [new file with mode: 0644]
services/api/config/initializers/kind_and_etag.rb [new file with mode: 0644]
services/api/config/initializers/legacy_jobs_api.rb [new file with mode: 0644]
services/api/config/initializers/load_config.rb [new file with mode: 0644]
services/api/config/initializers/lograge.rb [new file with mode: 0644]
services/api/config/initializers/mime_types.rb [new file with mode: 0644]
services/api/config/initializers/net_http.rb [new file with mode: 0644]
services/api/config/initializers/oj_mimic_json.rb [new file with mode: 0644]
services/api/config/initializers/omniauth_init.rb [new file with mode: 0644]
services/api/config/initializers/permit_all_parameters.rb [new file with mode: 0644]
services/api/config/initializers/preload_all_models.rb [new file with mode: 0644]
services/api/config/initializers/schema_discovery_cache.rb [new file with mode: 0644]
services/api/config/initializers/session_store.rb [new file with mode: 0644]
services/api/config/initializers/time_format.rb [new file with mode: 0644]
services/api/config/initializers/wrap_parameters.rb [new file with mode: 0644]
services/api/config/locales/en.yml [new file with mode: 0644]
services/api/config/routes.rb [new file with mode: 0644]
services/api/config/unbound.template [new file with mode: 0644]
services/api/db/migrate/20121016005009_create_collections.rb [new file with mode: 0644]
services/api/db/migrate/20130105203021_create_metadata.rb [new file with mode: 0644]
services/api/db/migrate/20130105224358_rename_metadata_class.rb [new file with mode: 0644]
services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb [new file with mode: 0644]
services/api/db/migrate/20130107181109_add_uuid_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20130107212832_create_nodes.rb [new file with mode: 0644]
services/api/db/migrate/20130109175700_create_pipelines.rb [new file with mode: 0644]
services/api/db/migrate/20130109220548_create_pipeline_invocations.rb [new file with mode: 0644]
services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb [new file with mode: 0644]
services/api/db/migrate/20130116024233_create_specimens.rb [new file with mode: 0644]
services/api/db/migrate/20130116215213_create_projects.rb [new file with mode: 0644]
services/api/db/migrate/20130118002239_rename_metadata_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20130122020042_create_users.rb [new file with mode: 0644]
services/api/db/migrate/20130122201442_create_logs.rb [new file with mode: 0644]
services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb [new file with mode: 0644]
services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20130123180224_create_api_clients.rb [new file with mode: 0644]
services/api/db/migrate/20130123180228_create_api_client_authorizations.rb [new file with mode: 0644]
services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb [new file with mode: 0644]
services/api/db/migrate/20130128202518_rename_metadata_to_links.rb [new file with mode: 0644]
services/api/db/migrate/20130128231343_add_properties_to_specimen.rb [new file with mode: 0644]
services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb [new file with mode: 0644]
services/api/db/migrate/20130203104818_create_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130203104824_create_job_steps.rb [new file with mode: 0644]
services/api/db/migrate/20130203115329_add_priority_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130207195855_add_index_on_timestamps.rb [new file with mode: 0644]
services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb [new file with mode: 0644]
services/api/db/migrate/20130226170000_remove_native_target_from_links.rb [new file with mode: 0644]
services/api/db/migrate/20130313175417_rename_projects_to_groups.rb [new file with mode: 0644]
services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130315183626_add_log_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130319165853_rename_job_command_to_script.rb [new file with mode: 0644]
services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb [new file with mode: 0644]
services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb [new file with mode: 0644]
services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20130319235957_add_default_owner_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb [new file with mode: 0644]
services/api/db/migrate/20130326173804_create_commits.rb [new file with mode: 0644]
services/api/db/migrate/20130326182917_create_commit_ancestors.rb [new file with mode: 0644]
services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb [new file with mode: 0644]
services/api/db/migrate/20130425024459_create_keep_disks.rb [new file with mode: 0644]
services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb [new file with mode: 0644]
services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb [new file with mode: 0644]
services/api/db/migrate/20130528134100_update_nodes_index.rb [new file with mode: 0644]
services/api/db/migrate/20130606183519_create_authorized_keys.rb [new file with mode: 0644]
services/api/db/migrate/20130608053730_create_virtual_machines.rb [new file with mode: 0644]
services/api/db/migrate/20130610202538_create_repositories.rb [new file with mode: 0644]
services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb [new file with mode: 0644]
services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb [new file with mode: 0644]
services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb [new file with mode: 0644]
services/api/db/migrate/20130626002829_add_is_active_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20130626022810_activate_all_admins.rb [new file with mode: 0644]
services/api/db/migrate/20130627154537_create_traits.rb [new file with mode: 0644]
services/api/db/migrate/20130627184333_create_humans.rb [new file with mode: 0644]
services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20130708185153_rename_user_default_owner.rb [new file with mode: 0644]
services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb [new file with mode: 0644]
services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb [new file with mode: 0644]
services/api/db/migrate/20140117231056_normalize_collection_uuid.rb [new file with mode: 0644]
services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb [new file with mode: 0644]
services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb [new file with mode: 0644]
services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb [new file with mode: 0644]
services/api/db/migrate/20140321191343_add_repository_column_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140325175653_remove_kind_columns.rb [new file with mode: 0644]
services/api/db/migrate/20140402001908_add_system_group.rb [new file with mode: 0644]
services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb [new file with mode: 0644]
services/api/db/migrate/20140421140924_add_group_class_to_groups.rb [new file with mode: 0644]
services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb [new file with mode: 0644]
services/api/db/migrate/20140421151940_timestamps_not_null.rb [new file with mode: 0644]
services/api/db/migrate/20140422011506_pipeline_instance_state.rb [new file with mode: 0644]
services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb [new file with mode: 0644]
services/api/db/migrate/20140423133559_new_scope_format.rb [new file with mode: 0644]
services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb [new file with mode: 0644]
services/api/db/migrate/20140519205916_create_keep_services.rb [new file with mode: 0644]
services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb [new file with mode: 0644]
services/api/db/migrate/20140530200539_add_supplied_script_version.rb [new file with mode: 0644]
services/api/db/migrate/20140601022548_remove_name_from_collections.rb [new file with mode: 0644]
services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb [new file with mode: 0644]
services/api/db/migrate/20140607150616_rename_folder_to_project.rb [new file with mode: 0644]
services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20140627210837_anonymous_group.rb [new file with mode: 0644]
services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb [new file with mode: 0644]
services/api/db/migrate/20140714184006_empty_collection.rb [new file with mode: 0644]
services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb [new file with mode: 0644]
services/api/db/migrate/20140817035914_add_unique_name_constraints.rb [new file with mode: 0644]
services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb [new file with mode: 0644]
services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb [new file with mode: 0644]
services/api/db/migrate/20140828141043_job_priority_fixup.rb [new file with mode: 0644]
services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb [new file with mode: 0644]
services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb [new file with mode: 0644]
services/api/db/migrate/20140918153541_add_properties_to_node.rb [new file with mode: 0644]
services/api/db/migrate/20140918153705_add_state_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb [new file with mode: 0644]
services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20141208164553_owner_uuid_index.rb [new file with mode: 0644]
services/api/db/migrate/20141208174553_descriptions_are_strings.rb [new file with mode: 0644]
services/api/db/migrate/20141208174653_collection_file_names.rb [new file with mode: 0644]
services/api/db/migrate/20141208185217_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20150122175935_no_description_in_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20150123142953_full_text_search.rb [new file with mode: 0644]
services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb [new file with mode: 0644]
services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb [new file with mode: 0644]
services/api/db/migrate/20150206230342_rename_replication_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb [new file with mode: 0644]
services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb [new file with mode: 0644]
services/api/db/migrate/20150312151136_change_collection_expires_at_to_datetime.rb [new file with mode: 0644]
services/api/db/migrate/20150317132720_add_username_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20150324152204_backward_compatibility_for_user_repositories.rb [new file with mode: 0644]
services/api/db/migrate/20150423145759_no_filenames_in_collection_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20150512193020_read_only_on_keep_services.rb [new file with mode: 0644]
services/api/db/migrate/20150526180251_leading_space_on_full_text_index.rb [new file with mode: 0644]
services/api/db/migrate/20151202151426_create_containers_and_requests.rb [new file with mode: 0644]
services/api/db/migrate/20151215134304_fix_containers_index.rb [new file with mode: 0644]
services/api/db/migrate/20151229214707_add_exit_code_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20160208210629_add_uuid_to_api_client_authorization.rb [new file with mode: 0644]
services/api/db/migrate/20160209155729_add_uuid_to_api_token_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20160324144017_add_components_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20160506175108_add_auths_to_container.rb [new file with mode: 0644]
services/api/db/migrate/20160509143250_add_auth_and_lock_to_container_index.rb [new file with mode: 0644]
services/api/db/migrate/20160808151559_create_workflows.rb [new file with mode: 0644]
services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb [new file with mode: 0644]
services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb [new file with mode: 0644]
services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb [new file with mode: 0644]
services/api/db/migrate/20160926194129_add_container_count.rb [new file with mode: 0644]
services/api/db/migrate/20161019171346_add_use_existing_to_container_requests.rb [new file with mode: 0644]
services/api/db/migrate/20161111143147_add_scheduling_parameters_to_container.rb [new file with mode: 0644]
services/api/db/migrate/20161115171221_add_output_and_log_uuid_to_container_request.rb [new file with mode: 0644]
services/api/db/migrate/20161115174218_add_output_and_log_uuids_to_container_request_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20161213172944_full_text_search_indexes.rb [new file with mode: 0644]
services/api/db/migrate/20161222153434_split_expiry_to_trash_and_delete.rb [new file with mode: 0644]
services/api/db/migrate/20161223090712_add_output_name_to_container_requests.rb [new file with mode: 0644]
services/api/db/migrate/20170102153111_add_output_name_to_container_request_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20170105160301_add_output_name_to_cr_fts_index.rb [new file with mode: 0644]
services/api/db/migrate/20170105160302_set_finished_at_on_finished_pipeline_instances.rb [new file with mode: 0644]
services/api/db/migrate/20170216170823_no_cr_mounts_and_workflow_def_in_full_text_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20170301225558_no_downgrade_after_json.rb [new file with mode: 0644]
services/api/db/migrate/20170319063406_serialized_columns_accept_null.rb [new file with mode: 0644]
services/api/db/migrate/20170328215436_add_portable_data_hash_index_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20170330012505_add_output_ttl_to_container_requests.rb [new file with mode: 0644]
services/api/db/migrate/20170419173031_add_created_by_job_task_index_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20170419173712_add_object_owner_index_to_logs.rb [new file with mode: 0644]
services/api/db/migrate/20170419175801_add_requesting_container_index_to_container_requests.rb [new file with mode: 0644]
services/api/db/migrate/20170628185847_jobs_yaml_to_json.rb [new file with mode: 0644]
services/api/db/migrate/20170704160233_yaml_to_json.rb [new file with mode: 0644]
services/api/db/migrate/20170706141334_json_collection_properties.rb [new file with mode: 0644]
services/api/db/migrate/20170824202826_trashable_groups.rb [new file with mode: 0644]
services/api/db/migrate/20170906224040_materialized_permission_view.rb [new file with mode: 0644]
services/api/db/migrate/20171027183824_add_index_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20171208203841_fix_trash_flag_follow.rb [new file with mode: 0644]
services/api/db/migrate/20171212153352_add_gin_index_to_collection_properties.rb [new file with mode: 0644]
services/api/db/migrate/20180216203422_add_storage_classes_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180313180114_change_container_priority_bigint.rb [new file with mode: 0644]
services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb [new file with mode: 0644]
services/api/db/migrate/20180607175050_properties_to_jsonb.rb [new file with mode: 0644]
services/api/db/migrate/20180608123145_add_properties_to_groups.rb [new file with mode: 0644]
services/api/db/migrate/20180806133039_index_all_filenames.rb [new file with mode: 0644]
services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180904110712_add_runtime_status_to_containers.rb [new file with mode: 0644]
services/api/db/migrate/20180913175443_add_version_info_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180915155335_set_current_version_uuid_on_collections.rb [new file with mode: 0644]
services/api/db/migrate/20180917200000_replace_full_text_indexes.rb [new file with mode: 0644]
services/api/db/migrate/20180917205609_recompute_file_names_index.rb [new file with mode: 0644]
services/api/db/migrate/20180919001158_recreate_collection_unique_name_index.rb [new file with mode: 0644]
services/api/db/migrate/20181001175023_add_preserve_version_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20181004131141_add_current_version_uuid_to_collection_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20181005192222_add_container_runtime_token.rb [new file with mode: 0644]
services/api/db/migrate/20181011184200_add_runtime_token_to_container.rb [new file with mode: 0644]
services/api/db/migrate/20181213183234_add_expression_index_to_links.rb [new file with mode: 0644]
services/api/db/migrate/20190214214814_add_container_lock_count.rb [new file with mode: 0644]
services/api/db/seeds.rb [new file with mode: 0644]
services/api/db/structure.sql [new file with mode: 0644]
services/api/fpm-info.sh [new file with mode: 0644]
services/api/lib/app_version.rb [new file with mode: 0644]
services/api/lib/arvados_model_updates.rb [new file with mode: 0644]
services/api/lib/assets/.gitkeep [new file with mode: 0644]
services/api/lib/audit_logs.rb [new file with mode: 0644]
services/api/lib/can_be_an_owner.rb [new file with mode: 0644]
services/api/lib/common_api_template.rb [new file with mode: 0644]
services/api/lib/create_ancestor_view.sql [new file with mode: 0644]
services/api/lib/create_permission_view.sql [new file with mode: 0644]
services/api/lib/create_superuser_token.rb [new file with mode: 0755]
services/api/lib/crunch_dispatch.rb [new file with mode: 0644]
services/api/lib/current_api_client.rb [new file with mode: 0644]
services/api/lib/db_current_time.rb [new file with mode: 0644]
services/api/lib/enable_jobs_api.rb [new file with mode: 0644]
services/api/lib/has_uuid.rb [new file with mode: 0644]
services/api/lib/josh_id.rb [new file with mode: 0644]
services/api/lib/kind_and_etag.rb [new file with mode: 0644]
services/api/lib/load_param.rb [new file with mode: 0644]
services/api/lib/log_reuse_info.rb [new file with mode: 0644]
services/api/lib/migrate_yaml_to_json.rb [new file with mode: 0644]
services/api/lib/record_filters.rb [new file with mode: 0644]
services/api/lib/refresh_permission_view.rb [new file with mode: 0644]
services/api/lib/request_error.rb [new file with mode: 0644]
services/api/lib/safe_json.rb [new file with mode: 0644]
services/api/lib/safer_file_store.rb [new file with mode: 0644]
services/api/lib/salvage_collection.rb [new file with mode: 0755]
services/api/lib/serializers.rb [new file with mode: 0644]
services/api/lib/simulate_job_log.rb [new file with mode: 0644]
services/api/lib/sweep_trashed_objects.rb [new file with mode: 0644]
services/api/lib/tasks/.gitkeep [new file with mode: 0644]
services/api/lib/tasks/config_check.rake [new file with mode: 0644]
services/api/lib/tasks/config_dump.rake [new file with mode: 0644]
services/api/lib/tasks/delete_old_container_logs.rake [new file with mode: 0644]
services/api/lib/tasks/delete_old_job_logs.rake [new file with mode: 0644]
services/api/lib/tasks/replay_job_log.rake [new file with mode: 0644]
services/api/lib/tasks/test_tasks.rake [new file with mode: 0644]
services/api/lib/trashable.rb [new file with mode: 0644]
services/api/lib/update_priority.rb [new file with mode: 0644]
services/api/lib/whitelist_update.rb [new file with mode: 0644]
services/api/public/404.html [new file with mode: 0644]
services/api/public/422.html [new file with mode: 0644]
services/api/public/500.html [new file with mode: 0644]
services/api/public/favicon.ico [new file with mode: 0644]
services/api/public/robots.txt [new file with mode: 0644]
services/api/script/arvados-git-sync.rb [new file with mode: 0755]
services/api/script/create_superuser_token.rb [new file with mode: 0755]
services/api/script/crunch-dispatch.rb [new file with mode: 0755]
services/api/script/crunch_failure_report.py [new file with mode: 0755]
services/api/script/fail-jobs.rb [new file with mode: 0755]
services/api/script/get_anonymous_user_token.rb [new file with mode: 0755]
services/api/script/migrate-gitolite-to-uuid-storage.rb [new file with mode: 0755]
services/api/script/rails [new file with mode: 0755]
services/api/script/rake_test.sh [new file with mode: 0755]
services/api/script/restart-dns-server [new file with mode: 0755]
services/api/script/salvage_collection.rb [new file with mode: 0755]
services/api/script/setup-new-user.rb [new file with mode: 0755]
services/api/test/factories/api_client.rb [new file with mode: 0644]
services/api/test/factories/api_client_authorization.rb [new file with mode: 0644]
services/api/test/factories/group.rb [new file with mode: 0644]
services/api/test/factories/link.rb [new file with mode: 0644]
services/api/test/factories/user.rb [new file with mode: 0644]
services/api/test/fixtures/.gitkeep [new file with mode: 0644]
services/api/test/fixtures/api_client_authorizations.yml [new file with mode: 0644]
services/api/test/fixtures/api_clients.yml [new file with mode: 0644]
services/api/test/fixtures/authorized_keys.yml [new file with mode: 0644]
services/api/test/fixtures/collections.yml [new file with mode: 0644]
services/api/test/fixtures/container_requests.yml [new file with mode: 0644]
services/api/test/fixtures/containers.yml [new file with mode: 0644]
services/api/test/fixtures/files/proc_stat [new file with mode: 0644]
services/api/test/fixtures/groups.yml [new file with mode: 0644]
services/api/test/fixtures/humans.yml [new file with mode: 0644]
services/api/test/fixtures/job_tasks.yml [new file with mode: 0644]
services/api/test/fixtures/jobs.yml [new file with mode: 0644]
services/api/test/fixtures/keep_disks.yml [new file with mode: 0644]
services/api/test/fixtures/keep_services.yml [new file with mode: 0644]
services/api/test/fixtures/links.yml [new file with mode: 0644]
services/api/test/fixtures/logs.yml [new file with mode: 0644]
services/api/test/fixtures/nodes.yml [new file with mode: 0644]
services/api/test/fixtures/pipeline_instances.yml [new file with mode: 0644]
services/api/test/fixtures/pipeline_templates.yml [new file with mode: 0644]
services/api/test/fixtures/repositories.yml [new file with mode: 0644]
services/api/test/fixtures/specimens.yml [new file with mode: 0644]
services/api/test/fixtures/traits.yml [new file with mode: 0644]
services/api/test/fixtures/users.yml [new file with mode: 0644]
services/api/test/fixtures/virtual_machines.yml [new file with mode: 0644]
services/api/test/fixtures/workflows.yml [new file with mode: 0644]
services/api/test/functional/.gitkeep [new file with mode: 0644]
services/api/test/functional/application_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/collections_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/commits_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/container_requests_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/containers_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/filters_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/groups_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/healthcheck_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/humans_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/job_reuse_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/job_tasks_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/jobs_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/keep_disks_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/keep_services_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/links_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/logs_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/nodes_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/query_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/repositories_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/schema_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/specimens_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/traits_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/user_agreements_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/users_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb [new file with mode: 0644]
services/api/test/functional/database_controller_test.rb [new file with mode: 0644]
services/api/test/functional/user_sessions_controller_test.rb [new file with mode: 0644]
services/api/test/helpers/container_test_helper.rb [new file with mode: 0644]
services/api/test/helpers/docker_migration_helper.rb [new file with mode: 0644]
services/api/test/helpers/git_test_helper.rb [new file with mode: 0644]
services/api/test/helpers/manifest_examples.rb [new file with mode: 0644]
services/api/test/helpers/time_block.rb [new file with mode: 0644]
services/api/test/helpers/users_test_helper.rb [new file with mode: 0644]
services/api/test/integration/.gitkeep [new file with mode: 0644]
services/api/test/integration/api_client_authorizations_api_test.rb [new file with mode: 0644]
services/api/test/integration/api_client_authorizations_scopes_test.rb [new file with mode: 0644]
services/api/test/integration/collections_api_test.rb [new file with mode: 0644]
services/api/test/integration/collections_performance_test.rb [new file with mode: 0644]
services/api/test/integration/container_auth_test.rb [new file with mode: 0644]
services/api/test/integration/cross_origin_test.rb [new file with mode: 0644]
services/api/test/integration/crunch_dispatch_test.rb [new file with mode: 0644]
services/api/test/integration/database_reset_test.rb [new file with mode: 0644]
services/api/test/integration/errors_test.rb [new file with mode: 0644]
services/api/test/integration/groups_test.rb [new file with mode: 0644]
services/api/test/integration/jobs_api_test.rb [new file with mode: 0644]
services/api/test/integration/keep_proxy_test.rb [new file with mode: 0644]
services/api/test/integration/login_workflow_test.rb [new file with mode: 0644]
services/api/test/integration/noop_deep_munge_test.rb [new file with mode: 0644]
services/api/test/integration/permissions_test.rb [new file with mode: 0644]
services/api/test/integration/pipeline_test.rb [new file with mode: 0644]
services/api/test/integration/reader_tokens_test.rb [new file with mode: 0644]
services/api/test/integration/remote_user_test.rb [new file with mode: 0644]
services/api/test/integration/select_test.rb [new file with mode: 0644]
services/api/test/integration/serialized_encoding_test.rb [new file with mode: 0644]
services/api/test/integration/user_sessions_test.rb [new file with mode: 0644]
services/api/test/integration/users_test.rb [new file with mode: 0644]
services/api/test/integration/valid_links_test.rb [new file with mode: 0644]
services/api/test/job_logs/crunchstatshort.log [new file with mode: 0644]
services/api/test/performance/links_index_test.rb [new file with mode: 0644]
services/api/test/performance/permission_test.rb [new file with mode: 0644]
services/api/test/tasks/delete_old_container_logs_test.rb [new file with mode: 0644]
services/api/test/tasks/delete_old_job_logs_test.rb [new file with mode: 0644]
services/api/test/test.git.tar [new file with mode: 0644]
services/api/test/test_helper.rb [new file with mode: 0644]
services/api/test/unit/.gitkeep [new file with mode: 0644]
services/api/test/unit/api_client_authorization_test.rb [new file with mode: 0644]
services/api/test/unit/api_client_test.rb [new file with mode: 0644]
services/api/test/unit/app_version_test.rb [new file with mode: 0644]
services/api/test/unit/application_test.rb [new file with mode: 0644]
services/api/test/unit/arvados_model_test.rb [new file with mode: 0644]
services/api/test/unit/authorized_key_test.rb [new file with mode: 0644]
services/api/test/unit/blob_test.rb [new file with mode: 0644]
services/api/test/unit/collection_performance_test.rb [new file with mode: 0644]
services/api/test/unit/collection_test.rb [new file with mode: 0644]
services/api/test/unit/commit_ancestor_test.rb [new file with mode: 0644]
services/api/test/unit/commit_test.rb [new file with mode: 0644]
services/api/test/unit/container_request_test.rb [new file with mode: 0644]
services/api/test/unit/container_test.rb [new file with mode: 0644]
services/api/test/unit/create_superuser_token_test.rb [new file with mode: 0644]
services/api/test/unit/crunch_dispatch_test.rb [new file with mode: 0644]
services/api/test/unit/fail_jobs_test.rb [new file with mode: 0644]
services/api/test/unit/group_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/api_client_authorizations_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/api_clients_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/authorized_keys_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/collections_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/commit_ancestors_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/commits_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/groups_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/humans_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/job_tasks_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/jobs_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/keep_disks_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/links_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/logs_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/nodes_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/pipeline_templates_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/repositories_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/specimens_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/traits_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/virtual_machines_helper_test.rb [new file with mode: 0644]
services/api/test/unit/human_test.rb [new file with mode: 0644]
services/api/test/unit/job_task_test.rb [new file with mode: 0644]
services/api/test/unit/job_test.rb [new file with mode: 0644]
services/api/test/unit/keep_disk_test.rb [new file with mode: 0644]
services/api/test/unit/keep_service_test.rb [new file with mode: 0644]
services/api/test/unit/link_test.rb [new file with mode: 0644]
services/api/test/unit/log_test.rb [new file with mode: 0644]
services/api/test/unit/node_test.rb [new file with mode: 0644]
services/api/test/unit/owner_test.rb [new file with mode: 0644]
services/api/test/unit/permission_test.rb [new file with mode: 0644]
services/api/test/unit/pipeline_instance_test.rb [new file with mode: 0644]
services/api/test/unit/pipeline_template_test.rb [new file with mode: 0644]
services/api/test/unit/repository_test.rb [new file with mode: 0644]
services/api/test/unit/salvage_collection_test.rb [new file with mode: 0644]
services/api/test/unit/seralizer_test.rb [new file with mode: 0644]
services/api/test/unit/specimen_test.rb [new file with mode: 0644]
services/api/test/unit/trait_test.rb [new file with mode: 0644]
services/api/test/unit/update_priority_test.rb [new file with mode: 0644]
services/api/test/unit/user_notifier_test.rb [new file with mode: 0644]
services/api/test/unit/user_test.rb [new file with mode: 0644]
services/api/test/unit/virtual_machine_test.rb [new file with mode: 0644]
services/api/test/unit/workflow_test.rb [new file with mode: 0644]
services/api/vendor/assets/stylesheets/.gitkeep [new file with mode: 0644]
services/api/vendor/plugins/.gitkeep [new file with mode: 0644]
services/arv-git-httpd/.gitignore [new file with mode: 0644]
services/arv-git-httpd/arvados-git-httpd.service [new file with mode: 0644]
services/arv-git-httpd/auth_handler.go [new file with mode: 0644]
services/arv-git-httpd/auth_handler_test.go [new file with mode: 0644]
services/arv-git-httpd/git_handler.go [new file with mode: 0644]
services/arv-git-httpd/git_handler_test.go [new file with mode: 0644]
services/arv-git-httpd/gitolite_test.go [new file with mode: 0644]
services/arv-git-httpd/integration_test.go [new file with mode: 0644]
services/arv-git-httpd/main.go [new file with mode: 0644]
services/arv-git-httpd/server.go [new file with mode: 0644]
services/arv-git-httpd/server_test.go [new file with mode: 0644]
services/arv-git-httpd/usage.go [new file with mode: 0644]
services/arv-web/README [new file with mode: 0644]
services/arv-web/arv-web.py [new file with mode: 0755]
services/arv-web/sample-cgi-app/docker_image [new file with mode: 0644]
services/arv-web/sample-cgi-app/public/.htaccess [new file with mode: 0644]
services/arv-web/sample-cgi-app/public/index.cgi [new file with mode: 0755]
services/arv-web/sample-cgi-app/tmp/.keepkeep [new file with mode: 0644]
services/arv-web/sample-rack-app/config.ru [new file with mode: 0644]
services/arv-web/sample-rack-app/docker_image [new file with mode: 0644]
services/arv-web/sample-rack-app/public/.keepkeep [new file with mode: 0644]
services/arv-web/sample-rack-app/tmp/.keepkeep [new file with mode: 0644]
services/arv-web/sample-static-page/docker_image [new file with mode: 0644]
services/arv-web/sample-static-page/public/index.html [new file with mode: 0644]
services/arv-web/sample-static-page/tmp/.keepkeep [new file with mode: 0644]
services/arv-web/sample-wsgi-app/docker_image [new file with mode: 0644]
services/arv-web/sample-wsgi-app/passenger_wsgi.py [new file with mode: 0644]
services/arv-web/sample-wsgi-app/public/.keepkeep [new file with mode: 0644]
services/arv-web/sample-wsgi-app/tmp/.keepkeep [new file with mode: 0644]
services/crunch-dispatch-local/.gitignore [new file with mode: 0644]
services/crunch-dispatch-local/crunch-dispatch-local.go [new file with mode: 0644]
services/crunch-dispatch-local/crunch-dispatch-local_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go [new file with mode: 0644]
services/crunch-dispatch-slurm/crunch-dispatch-slurm.service [new file with mode: 0644]
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/node_type.go [new file with mode: 0644]
services/crunch-dispatch-slurm/priority.go [new file with mode: 0644]
services/crunch-dispatch-slurm/priority_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/script.go [new file with mode: 0644]
services/crunch-dispatch-slurm/script_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/slurm.go [new file with mode: 0644]
services/crunch-dispatch-slurm/squeue.go [new file with mode: 0644]
services/crunch-dispatch-slurm/squeue_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/usage.go [new file with mode: 0644]
services/crunch-run/background.go [new file with mode: 0644]
services/crunch-run/cgroup.go [new file with mode: 0644]
services/crunch-run/cgroup_test.go [new file with mode: 0644]
services/crunch-run/copier.go [new file with mode: 0644]
services/crunch-run/copier_test.go [new file with mode: 0644]
services/crunch-run/crunchrun.go [new file with mode: 0644]
services/crunch-run/crunchrun_test.go [new file with mode: 0644]
services/crunch-run/git_mount.go [new file with mode: 0644]
services/crunch-run/git_mount_test.go [new file with mode: 0644]
services/crunch-run/logging.go [new file with mode: 0644]
services/crunch-run/logging_test.go [new file with mode: 0644]
services/crunch/crunch-job [new symlink]
services/crunchstat/.gitignore [new file with mode: 0644]
services/crunchstat/crunchstat.go [new file with mode: 0644]
services/crunchstat/crunchstat_test.go [new file with mode: 0644]
services/dockercleaner/.gitignore [new symlink]
services/dockercleaner/MANIFEST.in [new file with mode: 0644]
services/dockercleaner/README.rst [new file with mode: 0644]
services/dockercleaner/agpl-3.0.txt [new file with mode: 0644]
services/dockercleaner/arvados-docker-cleaner.service [new file with mode: 0644]
services/dockercleaner/arvados_docker/__init__.py [new file with mode: 0644]
services/dockercleaner/arvados_docker/cleaner.py [new file with mode: 0755]
services/dockercleaner/arvados_version.py [new file with mode: 0644]
services/dockercleaner/bin/arvados-docker-cleaner [new file with mode: 0755]
services/dockercleaner/gittaggers.py [new symlink]
services/dockercleaner/setup.py [new file with mode: 0644]
services/dockercleaner/tests/__init__.py [new file with mode: 0644]
services/dockercleaner/tests/test_cleaner.py [new file with mode: 0644]
services/fuse/.gitignore [new symlink]
services/fuse/MANIFEST.in [new file with mode: 0644]
services/fuse/README.rst [new file with mode: 0644]
services/fuse/agpl-3.0.txt [new file with mode: 0644]
services/fuse/arvados_fuse/__init__.py [new file with mode: 0644]
services/fuse/arvados_fuse/command.py [new file with mode: 0644]
services/fuse/arvados_fuse/crunchstat.py [new file with mode: 0644]
services/fuse/arvados_fuse/fresh.py [new file with mode: 0644]
services/fuse/arvados_fuse/fusedir.py [new file with mode: 0644]
services/fuse/arvados_fuse/fusefile.py [new file with mode: 0644]
services/fuse/arvados_fuse/unmount.py [new file with mode: 0644]
services/fuse/arvados_version.py [new file with mode: 0644]
services/fuse/bin/arv-mount [new file with mode: 0755]
services/fuse/fpm-info.sh [new file with mode: 0644]
services/fuse/gittaggers.py [new symlink]
services/fuse/setup.py [new file with mode: 0644]
services/fuse/tests/__init__.py [new file with mode: 0644]
services/fuse/tests/fstest.py [new file with mode: 0644]
services/fuse/tests/integration_test.py [new file with mode: 0644]
services/fuse/tests/mount_test_base.py [new file with mode: 0644]
services/fuse/tests/performance/__init__.py [new file with mode: 0644]
services/fuse/tests/performance/performance_profiler.py [new symlink]
services/fuse/tests/performance/test_collection_performance.py [new file with mode: 0644]
services/fuse/tests/prof.py [new file with mode: 0644]
services/fuse/tests/run_test_server.py [new symlink]
services/fuse/tests/slow_test.py [new symlink]
services/fuse/tests/test_cache.py [new file with mode: 0644]
services/fuse/tests/test_command_args.py [new file with mode: 0644]
services/fuse/tests/test_crunchstat.py [new file with mode: 0644]
services/fuse/tests/test_exec.py [new file with mode: 0644]
services/fuse/tests/test_inodes.py [new file with mode: 0644]
services/fuse/tests/test_mount.py [new file with mode: 0644]
services/fuse/tests/test_mount_type.py [new file with mode: 0644]
services/fuse/tests/test_retry.py [new file with mode: 0644]
services/fuse/tests/test_tmp_collection.py [new file with mode: 0644]
services/fuse/tests/test_token_expiry.py [new file with mode: 0644]
services/fuse/tests/test_unmount.py [new file with mode: 0644]
services/health/arvados-health.service [new file with mode: 0644]
services/health/main.go [new file with mode: 0644]
services/keep-balance/balance.go [new file with mode: 0644]
services/keep-balance/balance_run_test.go [new file with mode: 0644]
services/keep-balance/balance_test.go [new file with mode: 0644]
services/keep-balance/block_state.go [new file with mode: 0644]
services/keep-balance/change_set.go [new file with mode: 0644]
services/keep-balance/change_set_test.go [new file with mode: 0644]
services/keep-balance/collection.go [new file with mode: 0644]
services/keep-balance/collection_test.go [new file with mode: 0644]
services/keep-balance/integration_test.go [new file with mode: 0644]
services/keep-balance/keep-balance.service [new file with mode: 0644]
services/keep-balance/keep_service.go [new file with mode: 0644]
services/keep-balance/main.go [new file with mode: 0644]
services/keep-balance/main_test.go [new file with mode: 0644]
services/keep-balance/metrics.go [new file with mode: 0644]
services/keep-balance/server.go [new file with mode: 0644]
services/keep-balance/usage.go [new file with mode: 0644]
services/keep-web/.gitignore [new file with mode: 0644]
services/keep-web/cache.go [new file with mode: 0644]
services/keep-web/cache_test.go [new file with mode: 0644]
services/keep-web/cadaver_test.go [new file with mode: 0644]
services/keep-web/doc.go [new file with mode: 0644]
services/keep-web/handler.go [new file with mode: 0644]
services/keep-web/handler_test.go [new file with mode: 0644]
services/keep-web/keep-web.service [new file with mode: 0644]
services/keep-web/main.go [new file with mode: 0644]
services/keep-web/ranges_test.go [new file with mode: 0644]
services/keep-web/server.go [new file with mode: 0644]
services/keep-web/server_test.go [new file with mode: 0644]
services/keep-web/status_test.go [new file with mode: 0644]
services/keep-web/usage.go [new file with mode: 0644]
services/keep-web/webdav.go [new file with mode: 0644]
services/keep-web/webdav_test.go [new file with mode: 0644]
services/keep/tools/traffic_test.py [new file with mode: 0755]
services/keepproxy/.gitignore [new file with mode: 0644]
services/keepproxy/keepproxy.go [new file with mode: 0644]
services/keepproxy/keepproxy.service [new file with mode: 0644]
services/keepproxy/keepproxy_test.go [new file with mode: 0644]
services/keepproxy/pkg-extras/etc/default/keepproxy [new file with mode: 0644]
services/keepproxy/pkg-extras/etc/init.d/keepproxy [new file with mode: 0755]
services/keepproxy/proxy_client.go [new file with mode: 0644]
services/keepproxy/usage.go [new file with mode: 0644]
services/keepstore/.gitignore [new file with mode: 0644]
services/keepstore/azure_blob_volume.go [new file with mode: 0644]
services/keepstore/azure_blob_volume_test.go [new file with mode: 0644]
services/keepstore/bufferpool.go [new file with mode: 0644]
services/keepstore/bufferpool_test.go [new file with mode: 0644]
services/keepstore/collision.go [new file with mode: 0644]
services/keepstore/collision_test.go [new file with mode: 0644]
services/keepstore/config.go [new file with mode: 0644]
services/keepstore/config_test.go [new file with mode: 0644]
services/keepstore/count.go [new file with mode: 0644]
services/keepstore/deprecated.go [new file with mode: 0644]
services/keepstore/gocheck_test.go [new file with mode: 0644]
services/keepstore/handler_test.go [new file with mode: 0644]
services/keepstore/handlers.go [new file with mode: 0644]
services/keepstore/handlers_with_generic_volume_test.go [new file with mode: 0644]
services/keepstore/keepstore.go [new file with mode: 0644]
services/keepstore/keepstore.service [new file with mode: 0644]
services/keepstore/keepstore_test.go [new file with mode: 0644]
services/keepstore/metrics.go [new file with mode: 0644]
services/keepstore/mock_mutex_for_test.go [new file with mode: 0644]
services/keepstore/mounts_test.go [new file with mode: 0644]
services/keepstore/perms.go [new file with mode: 0644]
services/keepstore/perms_test.go [new file with mode: 0644]
services/keepstore/pipe_adapters.go [new file with mode: 0644]
services/keepstore/proxy_remote.go [new file with mode: 0644]
services/keepstore/proxy_remote_test.go [new file with mode: 0644]
services/keepstore/pull_worker.go [new file with mode: 0644]
services/keepstore/pull_worker_integration_test.go [new file with mode: 0644]
services/keepstore/pull_worker_test.go [new file with mode: 0644]
services/keepstore/s3_volume.go [new file with mode: 0644]
services/keepstore/s3_volume_test.go [new file with mode: 0644]
services/keepstore/server.go [new file with mode: 0644]
services/keepstore/server_test.go [new file with mode: 0644]
services/keepstore/stats_ticker.go [new file with mode: 0644]
services/keepstore/status_test.go [new file with mode: 0644]
services/keepstore/trash_worker.go [new file with mode: 0644]
services/keepstore/trash_worker_test.go [new file with mode: 0644]
services/keepstore/unix_volume.go [new file with mode: 0644]
services/keepstore/unix_volume_test.go [new file with mode: 0644]
services/keepstore/usage.go [new file with mode: 0644]
services/keepstore/volume.go [new file with mode: 0644]
services/keepstore/volume_generic_test.go [new file with mode: 0644]
services/keepstore/volume_test.go [new file with mode: 0644]
services/keepstore/work_queue.go [new file with mode: 0644]
services/keepstore/work_queue_test.go [new file with mode: 0644]
services/login-sync/.gitignore [new file with mode: 0644]
services/login-sync/Gemfile [new file with mode: 0644]
services/login-sync/Rakefile [new file with mode: 0644]
services/login-sync/agpl-3.0.txt [new file with mode: 0644]
services/login-sync/arvados-login-sync.gemspec [new file with mode: 0644]
services/login-sync/bin/arvados-login-sync [new file with mode: 0755]
services/login-sync/test/binstub_new_user/useradd [new file with mode: 0755]
services/login-sync/test/stubs.rb [new file with mode: 0644]
services/login-sync/test/test_add_user.rb [new file with mode: 0644]
services/nodemanager/.gitignore [new symlink]
services/nodemanager/MANIFEST.in [new file with mode: 0644]
services/nodemanager/README.rst [new file with mode: 0644]
services/nodemanager/agpl-3.0.txt [new file with mode: 0644]
services/nodemanager/arvados-node-manager.service [new file with mode: 0644]
services/nodemanager/arvados_version.py [new file with mode: 0644]
services/nodemanager/arvnodeman/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/baseactor.py [new file with mode: 0644]
services/nodemanager/arvnodeman/clientactor.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/dispatch/slurm.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/dispatch/transitions.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/azure.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/dummy.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/ec2.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/gce.py [new file with mode: 0644]
services/nodemanager/arvnodeman/config.py [new file with mode: 0644]
services/nodemanager/arvnodeman/daemon.py [new file with mode: 0644]
services/nodemanager/arvnodeman/jobqueue.py [new file with mode: 0644]
services/nodemanager/arvnodeman/launcher.py [new file with mode: 0644]
services/nodemanager/arvnodeman/nodelist.py [new file with mode: 0644]
services/nodemanager/arvnodeman/status.py [new file with mode: 0644]
services/nodemanager/arvnodeman/test/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/test/fake_driver.py [new file with mode: 0644]
services/nodemanager/arvnodeman/timedcallback.py [new file with mode: 0644]
services/nodemanager/bin/arvados-node-manager [new file with mode: 0755]
services/nodemanager/doc/azure.example.cfg [new file with mode: 0644]
services/nodemanager/doc/ec2.example.cfg [new file with mode: 0644]
services/nodemanager/doc/gce.example.cfg [new file with mode: 0644]
services/nodemanager/doc/local.example.cfg [new file with mode: 0644]
services/nodemanager/fpm-info.sh [new file with mode: 0644]
services/nodemanager/gittaggers.py [new symlink]
services/nodemanager/setup.py [new file with mode: 0644]
services/nodemanager/tests/__init__.py [new file with mode: 0644]
services/nodemanager/tests/fake_azure.cfg.template [new file with mode: 0644]
services/nodemanager/tests/fake_ec2.cfg.template [new file with mode: 0644]
services/nodemanager/tests/fake_gce.cfg.template [new file with mode: 0644]
services/nodemanager/tests/integration_test.py [new file with mode: 0755]
services/nodemanager/tests/stress_test.cwl [new file with mode: 0644]
services/nodemanager/tests/test_arguments.py [new file with mode: 0644]
services/nodemanager/tests/test_clientactor.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_dispatch.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_dispatch_slurm.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_driver.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_driver_azure.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_driver_ec2.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_driver_gce.py [new file with mode: 0644]
services/nodemanager/tests/test_config.py [new file with mode: 0644]
services/nodemanager/tests/test_daemon.py [new file with mode: 0644]
services/nodemanager/tests/test_failure.py [new file with mode: 0644]
services/nodemanager/tests/test_jobqueue.py [new file with mode: 0644]
services/nodemanager/tests/test_nodelist.py [new file with mode: 0644]
services/nodemanager/tests/test_status.py [new file with mode: 0644]
services/nodemanager/tests/test_timedcallback.py [new file with mode: 0644]
services/nodemanager/tests/testutil.py [new file with mode: 0644]
services/ws/arvados-ws.service [new file with mode: 0644]
services/ws/config.go [new file with mode: 0644]
services/ws/doc.go [new file with mode: 0644]
services/ws/event.go [new file with mode: 0644]
services/ws/event_source.go [new file with mode: 0644]
services/ws/event_source_test.go [new file with mode: 0644]
services/ws/event_test.go [new file with mode: 0644]
services/ws/gocheck_test.go [new file with mode: 0644]
services/ws/handler.go [new file with mode: 0644]
services/ws/main.go [new file with mode: 0644]
services/ws/permission.go [new file with mode: 0644]
services/ws/permission_test.go [new file with mode: 0644]
services/ws/router.go [new file with mode: 0644]
services/ws/server.go [new file with mode: 0644]
services/ws/server_test.go [new file with mode: 0644]
services/ws/session.go [new file with mode: 0644]
services/ws/session_v0.go [new file with mode: 0644]
services/ws/session_v0_test.go [new file with mode: 0644]
services/ws/session_v1.go [new file with mode: 0644]
tools/arvbash/arvbash.sh [new file with mode: 0755]
tools/arvbox/bin/arvbox [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/58118E89F3A912897C070ADBF76221572C52609D.asc [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/Dockerfile.base [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/Dockerfile.demo [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/Dockerfile.dev [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/api-setup.sh [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/common.sh [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/createusers.sh [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/crunch-setup.sh [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/daemon.json [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/fuse.conf [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/gitolite.rc [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/go-setup.sh [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/keep-setup.sh [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/logger [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runit-docker/.gitignore [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/LICENSE [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/Makefile [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/README.md [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/changelog [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/compat [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/control [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/copyright [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/docs [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/rules [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runit-docker/debian/source/format [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit-docker/runit-docker [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runit-docker/runit-docker.c [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/runit/1 [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runit/2 [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runit/3 [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runit/ctrlaltdel [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/runsu.sh [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/api/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/api/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/api/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/api/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/certificate/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/certificate/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/certificate/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/composer/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/composer/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/composer/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/composer/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/controller/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/controller/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/controller/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/doc/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/doc/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/doc/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/doc/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/docker/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/docker/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/docker/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/gitolite/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/gitolite/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/gitolite/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/gitolite/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/keep-web/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/keep-web/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keep-web/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keep-web/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/keepproxy/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/keepproxy/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keepproxy/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keepproxy/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/keepstore0/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/keepstore0/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keepstore0/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keepstore0/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/keepstore1/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/keepstore1/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keepstore1/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/keepstore1/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/nginx/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/nginx/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/nginx/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/nginx/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/postgres/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/postgres/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/postgres/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/postgres/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/ready/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/ready/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/sdk/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/sdk/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/sdk/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/sdk/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/slurmctld/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/slurmd/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/slurmd/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/ssh/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/ssh/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/ssh/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/sso/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/sso/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/sso/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/sso/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/vm/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/vm/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/vm/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/vm/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/websockets/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/websockets/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/websockets/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/websockets/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/workbench/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/workbench/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/workbench/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/workbench/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/workbench2/log/main/.gitstub [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/service/workbench2/log/run [new symlink]
tools/arvbox/lib/arvbox/docker/service/workbench2/run [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/service/workbench2/run-service [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/waitforpostgres.sh [new file with mode: 0755]
tools/arvbox/lib/arvbox/docker/yml_override.py [new file with mode: 0755]
tools/crunchstat-summary/.gitignore [new file with mode: 0644]
tools/crunchstat-summary/MANIFEST.in [new file with mode: 0644]
tools/crunchstat-summary/README.rst [new file with mode: 0644]
tools/crunchstat-summary/agpl-3.0.txt [new file with mode: 0644]
tools/crunchstat-summary/arvados_version.py [new file with mode: 0644]
tools/crunchstat-summary/bin/crunchstat-summary [new file with mode: 0755]
tools/crunchstat-summary/crunchstat_summary/__init__.py [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/command.py [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/dygraphs.js [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/dygraphs.py [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/reader.py [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/summarizer.py [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/synchronizer.js [new file with mode: 0644]
tools/crunchstat-summary/crunchstat_summary/webchart.py [new file with mode: 0644]
tools/crunchstat-summary/fpm-info.sh [new file with mode: 0644]
tools/crunchstat-summary/gittaggers.py [new symlink]
tools/crunchstat-summary/setup.py [new file with mode: 0755]
tools/crunchstat-summary/tests/__init__.py [new file with mode: 0644]
tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz [new file with mode: 0644]
tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz.report [new file with mode: 0644]
tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz [new file with mode: 0644]
tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz.report [new file with mode: 0644]
tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk.txt.gz.report [new file with mode: 0644]
tools/crunchstat-summary/tests/crunchstat_error_messages.txt [new file with mode: 0644]
tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz [new file with mode: 0644]
tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz.report [new file with mode: 0644]
tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz [new file with mode: 0644]
tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz.report [new file with mode: 0644]
tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz [new file with mode: 0644]
tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz.report [new file with mode: 0644]
tools/crunchstat-summary/tests/test_examples.py [new file with mode: 0644]
tools/keep-block-check/.gitignore [new file with mode: 0644]
tools/keep-block-check/keep-block-check.go [new file with mode: 0644]
tools/keep-block-check/keep-block-check_test.go [new file with mode: 0644]
tools/keep-exercise/.gitignore [new file with mode: 0644]
tools/keep-exercise/keep-exercise.go [new file with mode: 0644]
tools/keep-rsync/.gitignore [new file with mode: 0644]
tools/keep-rsync/keep-rsync.go [new file with mode: 0644]
tools/keep-rsync/keep-rsync_test.go [new file with mode: 0644]
tools/sync-groups/.gitignore [new file with mode: 0644]
tools/sync-groups/sync-groups.go [new file with mode: 0644]
tools/sync-groups/sync-groups_test.go [new file with mode: 0644]
vendor/.gitignore [new file with mode: 0644]
vendor/vendor.json [new file with mode: 0644]

index c928081f782ae33cf44829ecad7679d2aa617571..db3020ae82d13231817872355b04dd09849f87a0 100644 (file)
@@ -1,9 +1,33 @@
-/.gradle/
-/bin/
-/build/
-.project
-.classpath
-/.settings/
+.bundle
+.rvmrc
+*~
+*.pyc
+*.egg
+*.egg-info
+.eggs
+*.gem
+*.rpm
+*.deb
+docker/*/generated
+docker/config.yml
+doc/.site
+doc/sdk/python/arvados
+doc/sdk/R/arvados
+sdk/perl/MYMETA.*
+sdk/perl/Makefile
+sdk/perl/blib
+sdk/perl/pm_to_blib
+*/vendor
+*/*/vendor
+sdk/java/target
+*.class
+sdk/java/log
+tmp
+sdk/cli/binstubs/
+services/api/config/arvados-clients.yml
+*#*
 .DS_Store
-/.idea/
-/out/
+.vscode
+.Rproj.user
+_version.py
+*.bak
index ecee9c720a67c7a00bd5c58c07047e7e71e85194..45028bf888ff6a40f910f29197aaac1a8d29516f 100644 (file)
@@ -1,4 +1,76 @@
-.licenseignore
-agpl-3.0.txt
+*agpl-3.0.html
+*agpl-3.0.txt
 apache-2.0.txt
-COPYING
\ No newline at end of file
+apps/workbench/app/assets/javascripts/list.js
+apps/workbench/public/webshell/*
+AUTHORS
+*/bootstrap.css
+*/bootstrap.js
+*bootstrap-theme.css
+build/package-test-dockerfiles/centos7/localrepo.repo
+build/package-test-dockerfiles/ubuntu1604/etc-apt-preferences.d-arvados
+*by-sa-3.0.html
+*by-sa-3.0.txt
+*COPYING
+doc/fonts/*
+doc/user/cwl/federated/*
+*/docker_image
+docker/jobs/apt.arvados.org*.list
+docker/jobs/1078ECD7.key
+*/en.bootstrap.yml
+*font-awesome.css
+*.gif
+.gitignore
+*/.gitignore
+*/.gitkeep
+*/.gitstub
+*.gz
+*.gz.report
+*.ico
+*.jpg
+*.svg
+*.odg
+*.json
+*LICENSE*.html
+.licenseignore
+*LICENSE*.txt
+*.lock
+*.log
+*.map
+*.min.css
+*.min.js
+*.png
+*/proc_stat
+*/README
+*/robots.txt
+*/runit-docker/*
+*/sb-admin.css.scss
+*/script/rails
+sdk/cwl/tests/input/blorp.txt
+sdk/cwl/tests/tool/blub.txt
+sdk/cwl/tests/federation/data/*
+sdk/go/manifest/testdata/*_manifest
+sdk/java/.classpath
+sdk/java/pom.xml
+sdk/java/.project
+sdk/java/.settings/org.eclipse.jdt.core.prefs
+sdk/java/src/main/resources/log4j.properties
+sdk/pam/examples/shellinabox
+sdk/pam/pam-configs/arvados
+sdk/python/tests/data/*
+services/api/config/unbound.template
+services/arv-web/sample-cgi-app/public/.htaccess
+services/arv-web/sample-cgi-app/public/index.cgi
+services/keepproxy/pkg-extras/etc/default/keepproxy
+*.tar
+tools/crunchstat-summary/tests/crunchstat_error_messages.txt
+tools/crunchstat-summary/crunchstat_summary/synchronizer.js
+build/package-build-dockerfiles/debian9/D39DC0E3.asc
+build/package-test-dockerfiles/debian9/D39DC0E3.asc
+sdk/R/DESCRIPTION
+sdk/R/NAMESPACE
+sdk/R/.Rbuildignore
+sdk/R/ArvadosR.Rproj
+*.Rd
+lib/dispatchcloud/test/sshkey_*
+*.asc
diff --git a/AUTHORS b/AUTHORS
new file mode 100644 (file)
index 0000000..9a861a6
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,20 @@
+# Names should be added to this file with this pattern:
+#
+# For individuals:
+#   Name <email address>
+#
+# For organizations:
+#   Organization <fnmatch pattern>
+#
+# See python fnmatch module documentation for more information.
+
+Curoverse, Inc. <*@curoverse.com>
+Adam Savitzky <adam.savitzky@gmail.com>
+Colin Nolan <colin.nolan@sanger.ac.uk>
+David <davide.fiorentino.loregio@gmail.com>
+Guillermo Carrasco <guille.ch.88@gmail.com>
+Joshua Randall <joshua.randall@sanger.ac.uk>
+President and Fellows of Harvard College <*@harvard.edu>
+Thomas Mooney <tmooney@genome.wustl.edu>
+Chen Chen <aflyhorse@gmail.com>
+Veritas Genetics, Inc. <*@veritasgenetics.com>
diff --git a/COPYING b/COPYING
index 27d8c813593c47b91e1d87df194fff3533bf1079..61c31397a00534ef01dee6aa9ef3e1aa4b33f6c0 100644 (file)
--- a/COPYING
+++ b/COPYING
@@ -1,10 +1,13 @@
 Unless indicated otherwise in the header of the file, the files in this
-repository are dual-licensed AGPL-3.0 and Apache-2.0
+repository are distributed under one of three different licenses: AGPL-3.0,
+Apache-2.0 or CC-BY-SA-3.0.
 
 Individual files contain an SPDX tag that indicates the license for the file.
-dual-licensed files use the following tag:
+These are the three tags in use:
 
-    SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+    SPDX-License-Identifier: AGPL-3.0
+    SPDX-License-Identifier: Apache-2.0
+    SPDX-License-Identifier: CC-BY-SA-3.0
 
 This enables machine processing of license information based on the SPDX
 License Identifiers that are available here: http://spdx.org/licenses/
@@ -13,3 +16,4 @@ The full license text for each license is available in this directory:
 
   AGPL-3.0:     agpl-3.0.txt
   Apache-2.0:   apache-2.0.txt
+  CC-BY-SA-3.0: cc-by-sa-3.0.txt
diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..8159f5f
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+export WORKSPACE?=$(shell pwd)
+help:
+       @echo >&2
+       @echo >&2 "There is no default make target here.  Did you mean 'make test'?"
+       @echo >&2
+       @echo >&2 "More info:"
+       @echo >&2 "  Installing              --> http://doc.arvados.org/install"
+       @echo >&2 "  Developing/contributing --> https://dev.arvados.org"
+       @echo >&2 "  Project home            --> https://arvados.org"
+       @echo >&2
+       @false
+test:
+       build/run-tests.sh ${TEST_FLAGS}
+packages:
+       build/run-build-packages-all-targets.sh ${PACKAGES_FLAGS}
+test-packages:
+       build/run-build-packages-all-targets.sh --test-packages ${PACKAGES_FLAGS}
index ca5aef91c1a6c8a9f82995c9e8123e505b204994..5843bb84da97eb737c7d8ef319d76a35433cea6d 100644 (file)
--- a/README.md
+++ b/README.md
-```
-Copyright (C) The Arvados Authors. All rights reserved.
-SPDX-License-Identifier: CC-BY-SA-3.0
-```
-
-# Arvados Java SDK
-
-##### About
-Arvados Java Client allows to access Arvados servers and uses two APIs:
-* lower level [Keep Server API](https://doc.arvados.org/api/index.html)
-* higher level [Keep-Web API](https://godoc.org/github.com/curoverse/arvados/services/keep-web) (when needed)
-
-##### Required Java version
-This SDK requires Java 8+
-
-##### Logging
+[comment]: # (Copyright © The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
 
-SLF4J is used for logging. Concrete logging framework and configuration must be provided by a client.
+[Arvados](https://arvados.org) is a free software distributed computing platform
+for bioinformatics, data science, and high throughput analysis of massive data
+sets.  Arvados supports a variety of cloud, cluster and HPC environments.
 
-##### Configuration
+Arvados consists of:
 
-[TypeSafe Configuration](https://github.com/lightbend/config) is used for configuring this library.
+* *Keep*: a petabyte-scale content-addressed distributed storage system for managing and
+  storing collections of files, accessible via HTTP and FUSE mount.
 
-Please, have a look at java/resources/reference.conf for default values provided with this library.
+* *Crunch*: a Docker-based cluster and HPC workflow engine designed providing
+  strong versioning, reproducibilty, and provenance of computations.
 
-* **keepweb-host** - change to host of your Keep-Web installation
-* **keepweb-port** - change to port of your Keep-Web installation
-* **host** - change to host of your Arvados installation
-* **port** - change to port of your Arvados installation
-* **token** - authenticates registered user, one must provide
-  [token obtained from Arvados Workbench](https://doc.arvados.org/user/reference/api-tokens.html)
-* **protocol** - don't change to unless really needed
-* **host-insecure** - insecure communication with Arvados (ignores SSL certificate verification), 
-  don't change to *true* unless really needed
-* **split-size** - size of chunk files in megabytes
-* **temp-dir** - temporary chunk files storage
-* **copies** - amount of chunk files duplicates per Keep server
-* **retries** - in case of chunk files send failure this should allow to repeat send 
-  (*NOTE*: this parameter is not used at the moment but was left for future improvements)
+* Related services and components including a web workbench for managing files
+  and compute jobs, REST APIs, SDKs, and other tools.
 
-In order to override default settings one can create application.conf file in an application.
-Example: src/test/resources/application.conf.
+## Quick start
 
-Alternatively ExternalConfigProvider class can be used to pass configuration via code. 
-ExternalConfigProvider comes with a builder and all of the above values must be provided in order for it to work properly.
+Veritas Genetics maintains a public installation of Arvados for evaluation and trial use, the [Arvados Playground](https://playground.arvados.org). A Google account is required to log in.
 
-ArvadosFacade has two constructors, one without arguments that uses values from reference.conf and second one 
-taking ExternalConfigProvider as an argument.
+To try out Arvados on your local workstation, you can use Arvbox, which
+provides Arvados components pre-installed in a Docker container (requires
+Docker 1.9+).  After cloning the Arvados git repository:
 
-##### API clients
+```
+$ cd arvados/tools/arvbox/bin
+$ ./arvbox start localdemo
+```
 
-All API clients inherit from BaseStandardApiClient. This class contains implementation of all 
-common methods as described in http://doc.arvados.org/api/methods.html.
+In this mode you will only be able to connect to Arvbox from the same host.  To
+configure Arvbox to be accessible over a network and for other options see
+http://doc.arvados.org/install/arvbox.html for details.
 
-Parameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example:
+## Documentation
 
-```java
-String uuid = "ardev-4zz18-rxcql7qwyakg1r1";
+Complete documentation, including a User Guide, Installation documentation and
+API documentation is available at http://doc.arvados.org/
 
-Collection actual = client.get(uuid);
-```
+If you wish to build the Arvados documentation from a local git clone, see
+doc/README.textile for instructions.
 
-```java
-ListArgument listArgument = ListArgument.builder()
-        .filters(Arrays.asList(
-                Filter.of("owner_uuid", Operator.LIKE, "ardev%"),
-                Filter.of("name", Operator.LIKE, "Super%"),
-                Filter.of("portable_data_hash", Operator.IN, Lists.newArrayList("54f6d9f59065d3c009d4306660989379+65")
-            )))
-        .build();
+## Community
 
-CollectionList actual = client.list(listArgument);
-```
+[![Join the chat at https://gitter.im/curoverse/arvados](https://badges.gitter.im/curoverse/arvados.svg)](https://gitter.im/curoverse/arvados?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
 
-Non-standard API clients must inherit from BaseApiClient. 
-For example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods.
+The [curoverse/arvados channel](https://gitter.im/curoverse/arvados)
+channel at [gitter.im](https://gitter.im) is available for live
+discussion and support.
 
-##### Business logic
+The
+[Arvados user mailing list](http://lists.arvados.org/mailman/listinfo/arvados)
+is a forum for general discussion, questions, and news about Arvados
+development.  The
+[Arvados developer mailing list](http://lists.arvados.org/mailman/listinfo/arvados-dev)
+is a forum for more technical discussion, intended for developers and
+contributors to Arvados.
 
-More advanced API data handling could be implemented as *Facade* classes. 
-In current version functionalities provided by SDK are handled by *ArvadosFacade*.
-They include:
-* **downloading single file from collection** - using Keep-Web
-* **downloading whole collection** - using Keep-Web or Keep Server API
-* **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details
-* **uploading single file** - to either new or existing collection
-* **uploading list of files** - to either new or existing collection
-* **creating an empty collection**
-* **getting current user info**
-* **listing current user's collections**
-* **creating new project**
-* **deleting certain collection**
+## Development
 
-##### Note regarding Keep-Web
+[![Build Status](https://ci.curoverse.com/buildStatus/icon?job=run-tests)](https://ci.curoverse.com/job/run-tests/)
+[![Go Report Card](https://goreportcard.com/badge/github.com/curoverse/arvados)](https://goreportcard.com/report/github.com/curoverse/arvados)
 
-Current version requires both Keep Web and standard Keep Server API configured in order to use Keep-Web functionalities.
+The Arvados public bug tracker is located at https://dev.arvados.org/projects/arvados/issues
 
-##### Integration tests
+Continuous integration is hosted at https://ci.curoverse.com/
 
-In order to run integration tests all fields within following configuration file must be provided: 
-```java
-src/test/resources/integration-test-appliation.conf 
-```
-Parameter **integration-tests.project-uuid** should contain UUID of one project available to user,
-whose token was provided within configuration file. 
+Instructions for setting up a development environment and working on specific
+components can be found on the
+["Hacking Arvados" page of the Arvados wiki](https://dev.arvados.org/projects/arvados/wiki/Hacking).
 
-Integration tests require connection to real Arvados server.
+## Contributing
 
-##### Note regarding file naming
+When making a pull request, please ensure *every git commit message* includes a one-line [Developer Certificate of Origin](https://dev.arvados.org/projects/arvados/wiki/Developer_Certificate_Of_Origin). If you have already made commits without it, fix them with `git commit --amend` or `git rebase`.
 
-While uploading via this SDK all uploaded files within single collection must have different names.
-This applies also to uploading files to already existing collection. 
-Renaming files with duplicate names is not implemented in current version.
+## Licensing
 
+Arvados is Free Software.  See COPYING for information about Arvados Free
+Software licenses.
diff --git a/apps/workbench/.gitignore b/apps/workbench/.gitignore
new file mode 100644 (file)
index 0000000..25c7c3e
--- /dev/null
@@ -0,0 +1,49 @@
+# Ignore the default SQLite database.
+/db/*.sqlite3
+
+# Ignore all logfiles and tempfiles.
+/log/*.log
+/log/*.log.gz
+/tmp
+.byebug_history
+
+/config/.secret_token
+/config/initializers/secret_token.rb
+
+/public/assets
+
+/config/environments/development.rb
+/config/environments/production.rb
+/config/application.yml
+
+# Workbench doesn't need one anyway, so this shouldn't come up, but...
+/config/database.yml
+
+/config/piwik.yml
+
+# Capistrano files are coming from another repo
+/Capfile*
+/config/deploy*
+
+# Themes are coming from another repo
+/themes/*
+
+# This can be a symlink to ../../../doc/.site in dev setups
+/public/doc
+
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
+
+# Generated git-commit.version file
+/git-commit.version
+
+# npm-rails
+/node_modules
+/npm-debug.log
+
+# Generated when building distribution packages
+/package-build.version
diff --git a/apps/workbench/Gemfile b/apps/workbench/Gemfile
new file mode 100644 (file)
index 0000000..7150faa
--- /dev/null
@@ -0,0 +1,109 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+source 'https://rubygems.org'
+
+gem 'rails', '~> 4.2.0'
+gem 'arvados', '>= 0.1.20150511150219'
+
+gem 'activerecord-nulldb-adapter'
+gem 'multi_json'
+gem 'oj'
+gem 'sass'
+gem 'mime-types'
+gem 'responders', '~> 2.0'
+
+# Note: keeping this out of the "group :assets" section "may" allow us
+# to use Coffescript for UJS responses. It also prevents a
+# warning/problem when running tests: "WARN: tilt autoloading
+# 'coffee_script' in a non thread-safe way; explicit require
+# 'coffee_script' suggested."
+gem 'coffee-rails'
+
+# Gems used only for assets and not required
+# in production environments by default.
+group :assets do
+  gem 'sass-rails'
+  gem 'uglifier', '~> 2.0'
+
+  # See https://github.com/sstephenson/execjs#readme for more supported runtimes
+  gem 'therubyracer', :platforms => :ruby
+end
+
+group :development do
+  gem 'byebug'
+  gem 'ruby-debug-passenger'
+  gem 'rack-mini-profiler', require: false
+  gem 'flamegraph', require: false
+  #gem 'web-console', '~> 2.0'
+end
+
+group :test, :diagnostics, :performance do
+  gem 'minitest', '~> 5.10.3'
+  gem 'selenium-webdriver', '~> 3'
+  gem 'capybara', '~> 2.5.0'
+  gem 'poltergeist', '~> 1.5.1'
+  gem 'headless', '~> 1.0.2'
+end
+
+group :test, :performance do
+  gem 'rails-perftest'
+  gem 'ruby-prof'
+  gem 'rvm-capistrano'
+  # Note: "require: false" here tells bunder not to automatically
+  # 'require' the packages during application startup. Installation is
+  # still mandatory.
+  gem 'simplecov', '~> 0.7', require: false
+  gem 'simplecov-rcov', require: false
+  gem 'mocha', require: false
+end
+
+gem 'jquery-rails'
+gem 'bootstrap-sass', '~> 3.1.0'
+gem 'bootstrap-x-editable-rails'
+gem 'bootstrap-tab-history-rails'
+
+gem 'angularjs-rails', '~> 1.3.8'
+
+gem 'less'
+gem 'less-rails'
+gem 'wiselinks'
+gem 'sshkey'
+
+# To use ActiveModel has_secure_password
+# gem 'bcrypt-ruby', '~> 3.0.0'
+
+# To use Jbuilder templates for JSON
+# gem 'jbuilder'
+
+# Use unicorn as the app server
+# gem 'unicorn'
+
+# Deploy with Capistrano
+# gem 'capistrano'
+
+# To use debugger
+#gem 'byebug'
+
+gem 'passenger', :group => :production
+gem 'andand'
+gem 'RedCloth'
+
+gem 'piwik_analytics'
+gem 'httpclient', '~> 2.5'
+
+# This fork has Rails 4 compatible routes
+gem 'themes_for_rails', git: 'https://github.com/curoverse/themes_for_rails'
+
+gem "deep_merge", :require => 'deep_merge/rails_compat'
+
+gem 'morrisjs-rails'
+gem 'raphael-rails'
+
+gem 'lograge'
+gem 'logstash-event'
+
+gem 'safe_yaml'
+
+gem 'npm-rails'
diff --git a/apps/workbench/Gemfile.lock b/apps/workbench/Gemfile.lock
new file mode 100644 (file)
index 0000000..cc45ca6
--- /dev/null
@@ -0,0 +1,359 @@
+GIT
+  remote: https://github.com/curoverse/themes_for_rails
+  revision: 61154877047d2346890bda0b7be5827cf51a6a76
+  specs:
+    themes_for_rails (0.5.1)
+      rails (>= 3.0.0)
+
+GEM
+  remote: https://rubygems.org/
+  specs:
+    RedCloth (4.3.2)
+    actionmailer (4.2.11)
+      actionpack (= 4.2.11)
+      actionview (= 4.2.11)
+      activejob (= 4.2.11)
+      mail (~> 2.5, >= 2.5.4)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+    actionpack (4.2.11)
+      actionview (= 4.2.11)
+      activesupport (= 4.2.11)
+      rack (~> 1.6)
+      rack-test (~> 0.6.2)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-html-sanitizer (~> 1.0, >= 1.0.2)
+    actionview (4.2.11)
+      activesupport (= 4.2.11)
+      builder (~> 3.1)
+      erubis (~> 2.7.0)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-html-sanitizer (~> 1.0, >= 1.0.3)
+    activejob (4.2.11)
+      activesupport (= 4.2.11)
+      globalid (>= 0.3.0)
+    activemodel (4.2.11)
+      activesupport (= 4.2.11)
+      builder (~> 3.1)
+    activerecord (4.2.11)
+      activemodel (= 4.2.11)
+      activesupport (= 4.2.11)
+      arel (~> 6.0)
+    activerecord-nulldb-adapter (0.3.8)
+      activerecord (>= 2.0.0)
+    activesupport (4.2.11)
+      i18n (~> 0.7)
+      minitest (~> 5.1)
+      thread_safe (~> 0.3, >= 0.3.4)
+      tzinfo (~> 1.1)
+    addressable (2.5.2)
+      public_suffix (>= 2.0.2, < 4.0)
+    andand (1.3.3)
+    angularjs-rails (1.3.15)
+    arel (6.0.4)
+    arvados (0.1.20180302192246)
+      activesupport (>= 3)
+      andand (~> 1.3, >= 1.3.3)
+      google-api-client (>= 0.7, < 0.8.9)
+      i18n (~> 0)
+      json (>= 1.7.7, < 3)
+      jwt (>= 0.1.5, < 2)
+    autoparse (0.3.3)
+      addressable (>= 2.3.1)
+      extlib (>= 0.9.15)
+      multi_json (>= 1.0.0)
+    bootstrap-sass (3.1.1.1)
+      sass (~> 3.2)
+    bootstrap-tab-history-rails (0.1.0)
+      railties (>= 3.1)
+    bootstrap-x-editable-rails (1.5.1.1)
+      railties (>= 3.0)
+    builder (3.2.3)
+    byebug (10.0.0)
+    capistrano (2.15.9)
+      highline
+      net-scp (>= 1.0.0)
+      net-sftp (>= 2.0.0)
+      net-ssh (>= 2.0.14)
+      net-ssh-gateway (>= 1.1.0)
+    capybara (2.5.0)
+      mime-types (>= 1.16)
+      nokogiri (>= 1.3.3)
+      rack (>= 1.0.0)
+      rack-test (>= 0.5.4)
+      xpath (~> 2.0)
+    childprocess (0.9.0)
+      ffi (~> 1.0, >= 1.0.11)
+    cliver (0.3.2)
+    coffee-rails (4.2.2)
+      coffee-script (>= 2.2.0)
+      railties (>= 4.0.0)
+    coffee-script (2.4.1)
+      coffee-script-source
+      execjs
+    coffee-script-source (1.12.2)
+    commonjs (0.2.7)
+    concurrent-ruby (1.1.4)
+    crass (1.0.4)
+    deep_merge (1.2.1)
+    docile (1.1.5)
+    erubis (2.7.0)
+    execjs (2.7.0)
+    extlib (0.9.16)
+    faraday (0.14.0)
+      multipart-post (>= 1.2, < 3)
+    ffi (1.9.25)
+    flamegraph (0.9.5)
+    globalid (0.4.1)
+      activesupport (>= 4.2.0)
+    google-api-client (0.8.7)
+      activesupport (>= 3.2, < 5.0)
+      addressable (~> 2.3)
+      autoparse (~> 0.3)
+      extlib (~> 0.9)
+      faraday (~> 0.9)
+      googleauth (~> 0.3)
+      launchy (~> 2.4)
+      multi_json (~> 1.10)
+      retriable (~> 1.4)
+      signet (~> 0.6)
+    googleauth (0.6.2)
+      faraday (~> 0.12)
+      jwt (>= 1.4, < 3.0)
+      logging (~> 2.0)
+      memoist (~> 0.12)
+      multi_json (~> 1.11)
+      os (~> 0.9)
+      signet (~> 0.7)
+    grease (0.3.1)
+    headless (1.0.2)
+    highline (1.7.10)
+    httpclient (2.8.3)
+    i18n (0.9.5)
+      concurrent-ruby (~> 1.0)
+    jquery-rails (3.1.4)
+      railties (>= 3.0, < 5.0)
+      thor (>= 0.14, < 2.0)
+    json (2.1.0)
+    jwt (1.5.6)
+    launchy (2.4.3)
+      addressable (~> 2.3)
+    less (2.6.0)
+      commonjs (~> 0.2.7)
+    less-rails (3.0.0)
+      actionpack (>= 4.0)
+      grease
+      less (~> 2.6.0)
+      sprockets (> 2, < 4)
+      tilt
+    libv8 (3.16.14.19)
+    little-plugger (1.1.4)
+    logging (2.2.2)
+      little-plugger (~> 1.1)
+      multi_json (~> 1.10)
+    lograge (0.9.0)
+      actionpack (>= 4)
+      activesupport (>= 4)
+      railties (>= 4)
+      request_store (~> 1.0)
+    logstash-event (1.2.02)
+    loofah (2.2.3)
+      crass (~> 1.0.2)
+      nokogiri (>= 1.5.9)
+    mail (2.7.1)
+      mini_mime (>= 0.1.1)
+    memoist (0.16.0)
+    metaclass (0.0.4)
+    mime-types (3.1)
+      mime-types-data (~> 3.2015)
+    mime-types-data (3.2016.0521)
+    mini_mime (1.0.1)
+    mini_portile2 (2.4.0)
+    minitest (5.10.3)
+    mocha (1.3.0)
+      metaclass (~> 0.0.1)
+    morrisjs-rails (0.5.1.2)
+      railties (> 3.1, < 6)
+    multi_json (1.13.1)
+    multipart-post (2.0.0)
+    net-scp (1.2.1)
+      net-ssh (>= 2.6.5)
+    net-sftp (2.1.2)
+      net-ssh (>= 2.6.5)
+    net-ssh (4.2.0)
+    net-ssh-gateway (2.0.0)
+      net-ssh (>= 4.0.0)
+    nokogiri (1.9.1)
+      mini_portile2 (~> 2.4.0)
+    npm-rails (0.2.1)
+      rails (>= 3.2)
+    oj (3.6.4)
+    os (0.9.6)
+    passenger (5.2.1)
+      rack
+      rake (>= 0.8.1)
+    piwik_analytics (1.0.2)
+      actionpack
+      activesupport
+      rails (>= 3.0.0)
+    poltergeist (1.5.1)
+      capybara (~> 2.1)
+      cliver (~> 0.3.1)
+      multi_json (~> 1.0)
+      websocket-driver (>= 0.2.0)
+    public_suffix (3.0.2)
+    rack (1.6.11)
+    rack-mini-profiler (0.10.7)
+      rack (>= 1.2.0)
+    rack-test (0.6.3)
+      rack (>= 1.0)
+    rails (4.2.11)
+      actionmailer (= 4.2.11)
+      actionpack (= 4.2.11)
+      actionview (= 4.2.11)
+      activejob (= 4.2.11)
+      activemodel (= 4.2.11)
+      activerecord (= 4.2.11)
+      activesupport (= 4.2.11)
+      bundler (>= 1.3.0, < 2.0)
+      railties (= 4.2.11)
+      sprockets-rails
+    rails-deprecated_sanitizer (1.0.3)
+      activesupport (>= 4.2.0.alpha)
+    rails-dom-testing (1.0.9)
+      activesupport (>= 4.2.0, < 5.0)
+      nokogiri (~> 1.6)
+      rails-deprecated_sanitizer (>= 1.0.1)
+    rails-html-sanitizer (1.0.4)
+      loofah (~> 2.2, >= 2.2.2)
+    rails-perftest (0.0.7)
+    railties (4.2.11)
+      actionpack (= 4.2.11)
+      activesupport (= 4.2.11)
+      rake (>= 0.8.7)
+      thor (>= 0.18.1, < 2.0)
+    rake (12.3.2)
+    raphael-rails (2.1.2)
+    rb-fsevent (0.10.3)
+    rb-inotify (0.9.10)
+      ffi (>= 0.5.0, < 2)
+    ref (2.0.0)
+    request_store (1.4.0)
+      rack (>= 1.4)
+    responders (2.4.0)
+      actionpack (>= 4.2.0, < 5.3)
+      railties (>= 4.2.0, < 5.3)
+    retriable (1.4.1)
+    ruby-debug-passenger (0.2.0)
+    ruby-prof (0.17.0)
+    rubyzip (1.2.2)
+    rvm-capistrano (1.5.6)
+      capistrano (~> 2.15.4)
+    safe_yaml (1.0.4)
+    sass (3.5.5)
+      sass-listen (~> 4.0.0)
+    sass-listen (4.0.0)
+      rb-fsevent (~> 0.9, >= 0.9.4)
+      rb-inotify (~> 0.9, >= 0.9.7)
+    sass-rails (5.0.7)
+      railties (>= 4.0.0, < 6)
+      sass (~> 3.1)
+      sprockets (>= 2.8, < 4.0)
+      sprockets-rails (>= 2.0, < 4.0)
+      tilt (>= 1.1, < 3)
+    selenium-webdriver (3.14.1)
+      childprocess (~> 0.5)
+      rubyzip (~> 1.2, >= 1.2.2)
+    signet (0.8.1)
+      addressable (~> 2.3)
+      faraday (~> 0.9)
+      jwt (>= 1.5, < 3.0)
+      multi_json (~> 1.10)
+    simplecov (0.15.1)
+      docile (~> 1.1.0)
+      json (>= 1.8, < 3)
+      simplecov-html (~> 0.10.0)
+    simplecov-html (0.10.2)
+    simplecov-rcov (0.2.3)
+      simplecov (>= 0.4.1)
+    sprockets (3.7.2)
+      concurrent-ruby (~> 1.0)
+      rack (> 1, < 3)
+    sprockets-rails (3.2.1)
+      actionpack (>= 4.0)
+      activesupport (>= 4.0)
+      sprockets (>= 3.0.0)
+    sshkey (1.9.0)
+    therubyracer (0.12.3)
+      libv8 (~> 3.16.14.15)
+      ref
+    thor (0.20.3)
+    thread_safe (0.3.6)
+    tilt (2.0.8)
+    tzinfo (1.2.5)
+      thread_safe (~> 0.1)
+    uglifier (2.7.2)
+      execjs (>= 0.3.0)
+      json (>= 1.8.0)
+    websocket-driver (0.7.0)
+      websocket-extensions (>= 0.1.0)
+    websocket-extensions (0.1.3)
+    wiselinks (1.2.1)
+    xpath (2.1.0)
+      nokogiri (~> 1.3)
+
+PLATFORMS
+  ruby
+
+DEPENDENCIES
+  RedCloth
+  activerecord-nulldb-adapter
+  andand
+  angularjs-rails (~> 1.3.8)
+  arvados (>= 0.1.20150511150219)
+  bootstrap-sass (~> 3.1.0)
+  bootstrap-tab-history-rails
+  bootstrap-x-editable-rails
+  byebug
+  capybara (~> 2.5.0)
+  coffee-rails
+  deep_merge
+  flamegraph
+  headless (~> 1.0.2)
+  httpclient (~> 2.5)
+  jquery-rails
+  less
+  less-rails
+  lograge
+  logstash-event
+  mime-types
+  minitest (~> 5.10.3)
+  mocha
+  morrisjs-rails
+  multi_json
+  npm-rails
+  oj
+  passenger
+  piwik_analytics
+  poltergeist (~> 1.5.1)
+  rack-mini-profiler
+  rails (~> 4.2.0)
+  rails-perftest
+  raphael-rails
+  responders (~> 2.0)
+  ruby-debug-passenger
+  ruby-prof
+  rvm-capistrano
+  safe_yaml
+  sass
+  sass-rails
+  selenium-webdriver (~> 3)
+  simplecov (~> 0.7)
+  simplecov-rcov
+  sshkey
+  themes_for_rails!
+  therubyracer
+  uglifier (~> 2.0)
+  wiselinks
+
+BUNDLED WITH
+   1.17.2
diff --git a/apps/workbench/README.textile b/apps/workbench/README.textile
new file mode 100644 (file)
index 0000000..18380ac
--- /dev/null
@@ -0,0 +1,27 @@
+###. Copyright (C) The Arvados Authors. All rights reserved.
+....
+.... SPDX-License-Identifier: AGPL-3.0
+
+h1. Developing Workbench
+
+This document includes information to help developers who would like to contribute to Workbench.  If you just want to install it, please refer to our "Workbench installation guide":http://doc.arvados.org/install/install-workbench-app.html.
+
+h2. Running tests
+
+The Workbench application includes a series of integration tests.  When you run these, it starts the API server in a test environment, with all of its fixtures loaded, then tests Workbench by starting that server and making requests against it.
+
+In order for this to work, you must have Firefox installed (or Iceweasel, if you're running Debian), as well as the X Virtual Frame Buffer driver.
+
+<pre>
+$ sudo apt-get install iceweasel xvfb
+</pre>
+
+If you install the Workbench Bundle in deployment mode, you must also install the API server Bundle in deployment mode, and vice versa.  If your Bundle installs have mismatched modes, the integration tests will fail with "Gem not found" errors.
+
+h2. Writing tests
+
+Integration tests are written with Capybara, which drives a fully-featured Web browser to interact with Workbench exactly as a user would.
+
+If your test requires JavaScript support, your test method should start with the line @Capybara.current_driver = Capybara.javascript_driver@.  Otherwise, Capybara defaults to a simpler browser for speed.
+
+In most tests, you can directly call "Capybara's Session methods":http://rubydoc.info/github/jnicklas/capybara/Capybara/Session to drive the browser and check its state.  If you need finer-grained control, refer to the "full Capybara documentation":http://rubydoc.info/github/jnicklas/capybara/Capybara.
diff --git a/apps/workbench/Rakefile b/apps/workbench/Rakefile
new file mode 100644 (file)
index 0000000..037f901
--- /dev/null
@@ -0,0 +1,11 @@
+#!/usr/bin/env rake
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Add your own tasks in files placed in lib/tasks ending in .rake,
+# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.
+
+require File.expand_path('../config/application', __FILE__)
+
+ArvadosWorkbench::Application.load_tasks
diff --git a/apps/workbench/app/assets/images/dax.png b/apps/workbench/app/assets/images/dax.png
new file mode 100644 (file)
index 0000000..c511f0e
Binary files /dev/null and b/apps/workbench/app/assets/images/dax.png differ
diff --git a/apps/workbench/app/assets/images/mouse-move.gif b/apps/workbench/app/assets/images/mouse-move.gif
new file mode 100644 (file)
index 0000000..497b159
Binary files /dev/null and b/apps/workbench/app/assets/images/mouse-move.gif differ
diff --git a/apps/workbench/app/assets/images/pipeline-running.gif b/apps/workbench/app/assets/images/pipeline-running.gif
new file mode 100644 (file)
index 0000000..64e9009
Binary files /dev/null and b/apps/workbench/app/assets/images/pipeline-running.gif differ
diff --git a/apps/workbench/app/assets/images/rails.png b/apps/workbench/app/assets/images/rails.png
new file mode 100644 (file)
index 0000000..d5edc04
Binary files /dev/null and b/apps/workbench/app/assets/images/rails.png differ
diff --git a/apps/workbench/app/assets/images/spinner_32px.gif b/apps/workbench/app/assets/images/spinner_32px.gif
new file mode 100644 (file)
index 0000000..3288d10
Binary files /dev/null and b/apps/workbench/app/assets/images/spinner_32px.gif differ
diff --git a/apps/workbench/app/assets/images/trash-icon.png b/apps/workbench/app/assets/images/trash-icon.png
new file mode 100644 (file)
index 0000000..5c26c24
Binary files /dev/null and b/apps/workbench/app/assets/images/trash-icon.png differ
diff --git a/apps/workbench/app/assets/javascripts/add_group.js b/apps/workbench/app/assets/javascripts/add_group.js
new file mode 100644 (file)
index 0000000..23de53d
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('shown.bs.modal', '#add-group-modal', function(event) {
+    // Disable the submit button on modal loading
+    $submit = $('#add-group-submit');
+    $submit.prop('disabled', true);
+
+    $('input[type=text]', event.target).val('');
+    $('#add-group-error', event.target).hide();
+}).on('input propertychange', '#group_name_input', function(event) {
+    group_name = $(event.target).val();
+    $submit = $('#add-group-submit');
+    $submit.prop('disabled', (group_name === null || group_name === ""));
+}).on('submit', '#add-group-form', function(event) {
+    var $form = $(event.target),
+    $submit = $(':submit', $form),
+    $error = $('#add-group-error', $form),
+    group_name = $('input[name="group_name_input"]', $form).val();
+
+    $submit.prop('disabled', true);
+
+    $error.hide();
+    $.ajax('/groups',
+           {method: 'POST',
+            dataType: 'json',
+            data: {group: {name: group_name, group_class: 'role'}},
+            context: $form}).
+        done(function(data, status, jqxhr) {
+            location.reload();
+        }).
+        fail(function(jqxhr, status, error) {
+            var errlist = jqxhr.responseJSON.errors;
+            var errmsg;
+            if (Array.isArray(errlist)) {
+                errmsg = errlist.join();
+            } else {
+                errmsg = ("The server returned an error when creating " +
+                          "this group (status " + jqxhr.status +
+                          ": " + errlist + ").");
+            }
+            $error.text(errmsg);
+            $error.show();
+            $submit.prop('disabled', false);
+        });
+    return false;
+});
diff --git a/apps/workbench/app/assets/javascripts/add_repository.js b/apps/workbench/app/assets/javascripts/add_repository.js
new file mode 100644 (file)
index 0000000..efcd19d
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('shown.bs.modal', '#add-repository-modal', function(event) {
+    $('input[type=text]', event.target).val('');
+    $('#add-repository-error', event.target).hide();
+}).on('submit', '#add-repository-form', function(event) {
+    var $form = $(event.target),
+    $submit = $(':submit', $form),
+    $error = $('#add-repository-error', $form),
+    repo_owner_uuid = $('input[name="add_repo_owner_uuid"]', $form).val(),
+    repo_prefix = $('input[name="add_repo_prefix"]', $form).val(),
+    repo_basename = $('input[name="add_repo_basename"]', $form).val();
+
+    $submit.prop('disabled', true);
+    $error.hide();
+    $.ajax('/repositories',
+           {method: 'POST',
+            dataType: 'json',
+            data: {repository: {owner_uuid: repo_owner_uuid,
+                                name: repo_prefix + repo_basename}},
+            context: $form}).
+        done(function(data, status, jqxhr) {
+            location.reload();
+        }).
+        fail(function(jqxhr, status, error) {
+            var errlist = jqxhr.responseJSON.errors;
+            var errmsg;
+            if (Array.isArray(errlist)) {
+                errmsg = errlist.join();
+            } else {
+                errmsg = ("The server returned an error when making " +
+                          "this repository (status " + jqxhr.status +
+                          ": " + errlist + ").");
+            }
+            $error.text(errmsg);
+            $error.show();
+            $submit.prop('disabled', false);
+        });
+    return false;
+});
diff --git a/apps/workbench/app/assets/javascripts/ajax_error.js b/apps/workbench/app/assets/javascripts/ajax_error.js
new file mode 100644 (file)
index 0000000..dd31cc6
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('ajax:error', function(e, xhr, status, error) {
+    var errorMessage = '' + status + ': ' + error;
+    // $btn is the element (button/link) that initiated the failed request.
+    var $btn = $(e.target);
+    // Populate some elements with the error text (e.g., a <p> in an alert div)
+    $($btn.attr('data-on-error-write')).text(errorMessage);
+    // Show some elements (e.g., an alert div)
+    $($btn.attr('data-on-error-show')).show();
+    // Hide some elements (e.g., a success/normal div)
+    $($btn.attr('data-on-error-hide')).hide();
+}).on('ajax:success', function(e) {
+    var $btn = $(e.target);
+    $($btn.attr('data-on-success-show')).show();
+    $($btn.attr('data-on-success-hide')).hide();
+});
diff --git a/apps/workbench/app/assets/javascripts/angular_shim.js b/apps/workbench/app/assets/javascripts/angular_shim.js
new file mode 100644 (file)
index 0000000..5da6728
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Compile any new HTML content that was loaded via jQuery.ajax().
+// Currently this only works for tabs, and only because they emit an
+// arv:pane:loaded event after updating the DOM.
+
+$(document).on('arv:pane:loaded', function(event, $updatedElement) {
+    if (angular && $updatedElement && angular.element($updatedElement).injector()) {
+        angular.element($updatedElement).injector().invoke([
+            '$compile', function($compile) {
+                var scope = angular.element($updatedElement).scope();
+                $compile($updatedElement)(scope);
+            }]);
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/application.js b/apps/workbench/app/assets/javascripts/application.js
new file mode 100644 (file)
index 0000000..270a4c7
--- /dev/null
@@ -0,0 +1,262 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+//
+// This is a manifest file that'll be compiled into application.js, which will include all the files
+// listed below.
+//
+// Any JavaScript/Coffee file within this directory, lib/assets/javascripts, vendor/assets/javascripts,
+// or vendor/assets/javascripts of plugins, if any, can be referenced here using a relative path.
+//
+// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
+// the compiled file.
+//
+// WARNING: THE FIRST BLANK LINE MARKS THE END OF WHAT'S TO BE PROCESSED, ANY BLANK LINE SHOULD
+// GO AFTER THE REQUIRES BELOW.
+//
+//= require jquery
+//= require jquery_ujs
+//= require bootstrap
+//= require bootstrap/dropdown
+//= require bootstrap/tab
+//= require bootstrap/tooltip
+//= require bootstrap/popover
+//= require bootstrap/collapse
+//= require bootstrap/modal
+//= require bootstrap/button
+//= require bootstrap3-editable/bootstrap-editable
+//= require bootstrap-tab-history
+//= require wiselinks
+//= require angular
+//= require raphael
+//= require morris
+//= require jquery.number.min
+//= require npm-dependencies
+//= require mithril/stream/stream
+//= require awesomplete
+//= require jssha
+//= require_tree .
+
+Es6ObjectAssign.polyfill()
+window.m = Object.assign(window.Mithril, {stream: window.m.stream})
+
+jQuery(function($){
+    $(document).ajaxStart(function(){
+      $('.modal-with-loading-spinner .spinner').show();
+    }).ajaxStop(function(){
+      $('.modal-with-loading-spinner .spinner').hide();
+    });
+
+    $('[data-toggle=tooltip]').tooltip();
+
+    $('.expand-collapse-row').on('click', function(event) {
+        var targets = $('#' + $(this).attr('data-id'));
+        if (targets.css('display') == 'none') {
+            $(this).addClass('icon-minus-sign');
+            $(this).removeClass('icon-plus-sign');
+        } else {
+            $(this).addClass('icon-plus-sign');
+            $(this).removeClass('icon-minus-sign');
+        }
+        targets.fadeToggle(200);
+    });
+
+    var ajaxCount = 0;
+
+    $(document).
+        on('ajax:send', function(e, xhr) {
+            ajaxCount += 1;
+            if (ajaxCount == 1) {
+                $('.loading').fadeTo('fast', 1);
+            }
+        }).
+        on('ajax:complete', function(e, status) {
+            ajaxCount -= 1;
+            if (ajaxCount == 0) {
+                $('.loading').fadeOut('fast', 0);
+            }
+        }).
+        on('ajaxSend', function(e, xhr) {
+            // jQuery triggers 'ajaxSend' event when starting an ajax call, but
+            // rails-generated ajax triggers generate 'ajax:send'.  Workbench
+            // event listeners currently expect 'ajax:send', so trigger the
+            // rails event in response to the jQuery one.
+            $(document).trigger('ajax:send');
+        }).
+        on('ajaxComplete', function(e, xhr) {
+            // See comment above about ajaxSend/ajax:send
+            $(document).trigger('ajax:complete');
+        }).
+        on('click', '.removable-tag a', function(e) {
+            var tag_span = $(this).parents('[data-tag-link-uuid]').eq(0)
+            tag_span.fadeTo('fast', 0.2);
+            $.ajax('/links/' + tag_span.attr('data-tag-link-uuid'),
+                   {dataType: 'json',
+                    type: 'POST',
+                    data: { '_method': 'DELETE' },
+                    context: tag_span}).
+                done(function(data, status, jqxhr) {
+                    this.remove();
+                }).
+                fail(function(jqxhr, status, error) {
+                    this.addClass('label-danger').fadeTo('fast', '1');
+                });
+            return false;
+        }).
+        on('click', 'a.add-tag-button', function(e) {
+            var jqxhr;
+            var new_tag_uuid = 'new-tag-' + Math.random();
+            var tag_head_uuid = $(this).parents('tr').attr('data-object-uuid');
+            var new_tag = window.prompt("Add tag for collection "+
+                                    tag_head_uuid,
+                                    "");
+            if (new_tag == null)
+                return false;
+            var new_tag_span =
+                $('<span class="label label-info removable-tag"></span>').
+                attr('data-tag-link-uuid', new_tag_uuid).
+                text(new_tag).
+                css('opacity', '0.2').
+                append('&nbsp;<span class="removable-tag"><a title="Delete tag"><i class="fa fa-fw fa-trash-o"></i></a></span>');
+            $(this).
+                parent().
+                find('>span').
+                append(new_tag_span).
+                append(' ');
+            $.ajax($(this).attr('data-remote-href'),
+                           {dataType: 'json',
+                            type: $(this).attr('data-remote-method'),
+                            data: {
+                                'link[head_uuid]': tag_head_uuid,
+                                'link[link_class]': 'tag',
+                                'link[name]': new_tag
+                            },
+                            context: new_tag_span}).
+                done(function(data, status, jqxhr) {
+                    this.attr('data-tag-link-uuid', data.uuid).
+                        fadeTo('fast', '1');
+                }).
+                fail(function(jqxhr, status, error) {
+                    this.addClass('label-danger').fadeTo('fast', '1');
+                });
+            return false;
+        }).
+        on('click focusin', 'input.select-on-focus', function(event) {
+            event.target.select();
+        });
+
+    $(document).
+        on('ajax:complete ready', function() {
+            // See http://getbootstrap.com/javascript/#buttons
+            $('.btn').button();
+        }).
+        on('ready ajax:complete', function() {
+            $('[data-toggle~=tooltip]').tooltip({container:'body'});
+        }).
+        on('ready ajax:complete', function() {
+            // This makes the dialog close on Esc key, obviously.
+            $('.modal').attr('tabindex', '-1')
+        }).
+        on('ready', function() {
+            // Need this to trigger input validation/synchronization callbacks because some browsers
+            // auto-fill form fields (e.g., when navigating "back" to a page where some text
+            // had been entered in a search box) without triggering a change or input event.
+            $('input').each(function(el) {
+                $(el).trigger($.Event('input', {currentTarget: el}));
+            });
+        });
+
+    HeaderRowFixer = function(selector) {
+        this.duplicateTheadTr = function() {
+            $(selector).each(function() {
+                var the_table = this;
+                if ($('>tbody>tr:first>th', the_table).length > 0)
+                    return;
+                $('>tbody', the_table).
+                    prepend($('>thead>tr', the_table).
+                            clone().
+                            css('opacity', 0));
+            });
+        }
+        this.fixThead = function() {
+            $(selector).each(function() {
+                var widths = [];
+                $('> tbody > tr:eq(1) > td', this).each( function(i,v){
+                    widths.push($(v).width());
+                });
+                for(i=0;i<widths.length;i++) {
+                    $('thead th:eq('+i+')', this).width(widths[i]);
+                }
+            });
+        }
+    }
+
+    var fixer = new HeaderRowFixer('.table-fixed-header-row');
+    fixer.duplicateTheadTr();
+    fixer.fixThead();
+    $(window).resize(function(){
+        fixer.fixThead();
+    });
+    $(document).on('ajax:complete', function(e, status) {
+        fixer.duplicateTheadTr();
+        fixer.fixThead();
+    });
+
+    $(document).ready(function() {
+        /* When wiselinks is initialized, selection.js is not working. Since we want to stop
+           using selection.js in the near future, let's not initialize wiselinks for now. */
+
+        // window.wiselinks = new Wiselinks();
+
+        $(document).off('page:loading').on('page:loading', function(event, $target, render, url){
+            $("#page-wrapper").fadeOut(200);
+        });
+
+        $(document).off('page:redirected').on('page:redirected', function(event, $target, render, url){
+        });
+
+        $(document).off('page:always').on('page:always', function(event, xhr, settings){
+            $("#page-wrapper").fadeIn(200);
+        });
+
+        $(document).off('page:done').on('page:done', function(event, $target, status, url, data){
+        });
+
+        $(document).off('page:fail').on('page:fail', function(event, $target, status, url, error, code){
+        });
+    });
+
+    $(document).on('click', '.compute-detail', function(e) {
+        $(e.target).collapse('hide');
+    });
+
+    $(document).on('click', '.compute-node-summary', function(e) {
+        $(e.target.href).collapse('toggle');
+    });
+
+    $(document).on('click', '.force-cache-reload', function(e) {
+        history.replaceState( { nocache: true }, '' );
+    });
+});
+
+window.addEventListener("DOMContentLoaded", function(e) {
+    if(history.state) {
+        if(history.state.nocache) {
+            showLoadingModal();
+            history.replaceState( {}, '' );
+            location.reload(true);
+        }
+    }
+});
+
+function showLoadingModal() {
+    $('#loading-modal').modal('show');
+}
+
+function hideLoadingModal() {
+    $('#loading-modal').modal('hide');
+}
+
+function hasHTML5History() {
+    return !!(window.history && window.history.pushState);
+}
diff --git a/apps/workbench/app/assets/javascripts/arvados_client.js b/apps/workbench/app/assets/javascripts/arvados_client.js
new file mode 100644 (file)
index 0000000..478dc29
--- /dev/null
@@ -0,0 +1,105 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+angular.
+    module('Arvados', []).
+    service('ArvadosClient', ArvadosClient);
+
+ArvadosClient.$inject = ['arvadosApiToken', 'arvadosDiscoveryUri']
+function ArvadosClient(arvadosApiToken, arvadosDiscoveryUri) {
+    $.extend(this, {
+        apiPromise: apiPromise,
+        uniqueNameForManifest: uniqueNameForManifest
+    });
+    return this;
+    ////////////////////////////////
+
+    var that = this;
+    var promiseDiscovery;
+    var discoveryDoc;
+
+    function apiPromise(controller, action, params) {
+        // Start an API call. Return a promise that will resolve with
+        // the API response.
+        return getDiscoveryDoc().then(function() {
+            var meth = discoveryDoc.resources[controller].methods[action];
+            var data = $.extend({}, params, {_method: meth.httpMethod});
+            $.each(data, function(k, v) {
+                if (typeof(v) === 'object') {
+                    data[k] = JSON.stringify(v);
+                }
+            });
+            var path = meth.path.replace(/{(.*?)}/, function(_, key) {
+                var val = data[key];
+                delete data[key];
+                return encodeURIComponent(val);
+            });
+            return $.ajax({
+                url: discoveryDoc.baseUrl + path,
+                type: 'POST',
+                crossDomain: true,
+                dataType: 'json',
+                data: data,
+                headers: {
+                    Authorization: 'OAuth2 ' + arvadosApiToken
+                }
+            });
+        });
+    }
+
+    function uniqueNameForManifest(manifest, newStreamName, origName) {
+        // Return an (escaped) filename starting with (unescaped)
+        // origName that won't conflict with any existing names in the
+        // manifest if saved under newStreamName. newStreamName must
+        // be exactly as given in the manifest, e.g., "." or "./foo"
+        // or "./foo/bar".
+        //
+        // Example:
+        //
+        // uniqueNameForManifest('./foo [...] 0:0:bar\\040baz.txt\n', '.',
+        //                       'foo/bar baz.txt')
+        // =>
+        // 'foo/bar\\040baz\\040(1).txt'
+        var newName;
+        var nameStub = origName;
+        var suffixInt = null;
+        var ok = false;
+        var lineMatch, linesRe = /(\S+).*/gm;
+        var fileTokenMatch, fileTokensRe = / \d+:\d+:(\S+)/g;
+        while (!ok) {
+            ok = true;
+            // Add ' (N)' before the filename extension, if any.
+            newName = (!suffixInt ? nameStub :
+                       nameStub.replace(/(\.[^.]*)?$/, ' ('+suffixInt+')$1')).
+                replace(/ /g, '\\040');
+            while (ok && null !==
+                   (lineMatch = linesRe.exec(manifest))) {
+                // lineMatch is [theEntireLine, streamName]
+                while (ok && null !==
+                       (fileTokenMatch = fileTokensRe.exec(lineMatch[0]))) {
+                    // fileTokenMatch is [theEntireToken, fileName]
+                    if (lineMatch[1] + '/' + fileTokenMatch[1]
+                        ===
+                        newStreamName + '/' + newName) {
+                        ok = false;
+                    }
+                }
+            }
+            suffixInt = (suffixInt || 0) + 1;
+        }
+        return newName;
+    }
+
+    function getDiscoveryDoc() {
+        if (!promiseDiscovery) {
+            promiseDiscovery = $.ajax({
+                url: arvadosDiscoveryUri,
+                crossDomain: true
+            }).then(function(data, status, xhr) {
+                discoveryDoc = data;
+            });
+        }
+        return promiseDiscovery;
+    }
+}
diff --git a/apps/workbench/app/assets/javascripts/bootstrap.js.coffee b/apps/workbench/app/assets/javascripts/bootstrap.js.coffee
new file mode 100644 (file)
index 0000000..441d77f
--- /dev/null
@@ -0,0 +1,8 @@
+### Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 ###
+
+jQuery ->
+  $("a[rel=popover]").popover()
+  $(".tooltip").tooltip()
+  $("a[rel=tooltip]").tooltip()
diff --git a/apps/workbench/app/assets/javascripts/collections.js b/apps/workbench/app/assets/javascripts/collections.js
new file mode 100644 (file)
index 0000000..0752e05
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+jQuery(function($){
+    $(document).on('click', '.toggle-persist button', function() {
+        var toggle_group = $(this).parents('[data-remote-href]').first();
+        var want_persist = !toggle_group.find('button').hasClass('active');
+        var want_state = want_persist ? 'persistent' : 'cache';
+        toggle_group.find('button').
+            toggleClass('active', want_persist).
+            html(want_persist ? 'Persistent' : 'Cache');
+        $.ajax(toggle_group.attr('data-remote-href'),
+               {dataType: 'json',
+                type: 'POST',
+                data: {
+                    value: want_state
+                },
+                context: {
+                    toggle_group: toggle_group,
+                    want_state: want_state,
+                    button: this
+                }
+               }).
+            done(function(data, status, jqxhr) {
+                var context = this;
+                // Remove "danger" status in case a previous action failed
+                $('.btn-danger', context.toggle_group).
+                    addClass('btn-info').
+                    removeClass('btn-danger');
+                // Update last-saved-state
+                context.toggle_group.
+                    attr('data-persistent-state', context.want_state);
+            }).
+            fail(function(jqxhr, status, error) {
+                var context = this;
+                var saved_state;
+                // Add a visual indication that something failed
+                $(context.button).
+                    addClass('btn-danger').
+                    removeClass('btn-info');
+                // Change to the last-saved-state
+                saved_state = context.toggle_group.attr('data-persistent-state');
+                $(context.button).
+                    toggleClass('active', saved_state == 'persistent').
+                    html(saved_state == 'persistent' ? 'Persistent' : 'Cache');
+
+                if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                    // Request cancelled due to page reload.
+                    // Displaying an alert would be rather annoying.
+                } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+                    window.alert("Request failed: " +
+                                 jqxhr.responseJSON.errors.join("; "));
+                } else {
+                    window.alert("Request failed.");
+                }
+            });
+    });
+});
diff --git a/apps/workbench/app/assets/javascripts/components/date.js b/apps/workbench/app/assets/javascripts/components/date.js
new file mode 100644 (file)
index 0000000..62eacc3
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.LocalizedDateTime = {
+    view: function(vnode) {
+        return m('span', new Date(Date.parse(vnode.attrs.parse)).toLocaleString())
+    },
+}
diff --git a/apps/workbench/app/assets/javascripts/components/edit_tags.js b/apps/workbench/app/assets/javascripts/components/edit_tags.js
new file mode 100644 (file)
index 0000000..1fddb26
--- /dev/null
@@ -0,0 +1,265 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.SimpleInput = {
+    view: function(vnode) {
+        return m('input.form-control', {
+            style: {
+                width: '100%',
+            },
+            type: 'text',
+            placeholder: 'Add ' + vnode.attrs.placeholder,
+            value: vnode.attrs.value,
+            onchange: function() {
+                if (this.value != '') {
+                    vnode.attrs.value(this.value)
+                }
+            },
+        }, vnode.attrs.value)
+    },
+}
+
+window.SelectOrAutocomplete = {
+    view: function(vnode) {
+        return m('input.form-control', {
+            style: {
+                width: '100%'
+            },
+            type: 'text',
+            value: vnode.attrs.value,
+            placeholder: (vnode.attrs.create ? 'Add or select ': 'Select ') + vnode.attrs.placeholder,
+        }, vnode.attrs.value)
+    },
+    oncreate: function(vnode) {
+        vnode.state.awesomplete = new Awesomplete(vnode.dom, {
+            list: vnode.attrs.options,
+            minChars: 0,
+            maxItems: 1000000,
+            autoFirst: true,
+            sort: false,
+        })
+        vnode.state.create = vnode.attrs.create
+        vnode.state.options = vnode.attrs.options
+        // Option is selected from the list.
+        $(vnode.dom).on('awesomplete-selectcomplete', function(event) {
+            vnode.attrs.value(this.value)
+        })
+        $(vnode.dom).on('change', function(event) {
+            if (!vnode.state.create && !(this.value in vnode.state.options)) {
+                this.value = vnode.attrs.value()
+            } else {
+                if (vnode.attrs.value() !== this.value) {
+                    vnode.attrs.value(this.value)
+                }
+            }
+        })
+        $(vnode.dom).on('focusin', function(event) {
+            if (this.value === '') {
+                vnode.state.awesomplete.evaluate()
+                vnode.state.awesomplete.open()
+            }
+        })
+    },
+    onupdate: function(vnode) {
+        vnode.state.awesomplete.list = vnode.attrs.options
+        vnode.state.create = vnode.attrs.create
+        vnode.state.options = vnode.attrs.options
+    },
+}
+
+window.TagEditorRow = {
+    view: function(vnode) {
+        var nameOpts = Object.keys(vnode.attrs.vocabulary().tags)
+        var valueOpts = []
+        var inputComponent = SelectOrAutocomplete
+        if (nameOpts.length === 0) {
+            // If there's not vocabulary defined, switch to a simple input field
+            inputComponent = SimpleInput
+        } else {
+            // Name options list
+            if (vnode.attrs.name() != '' && !(vnode.attrs.name() in vnode.attrs.vocabulary().tags)) {
+                nameOpts.push(vnode.attrs.name())
+            }
+            // Value options list
+            if (vnode.attrs.name() in vnode.attrs.vocabulary().tags &&
+                'values' in vnode.attrs.vocabulary().tags[vnode.attrs.name()]) {
+                    valueOpts = vnode.attrs.vocabulary().tags[vnode.attrs.name()].values
+            }
+        }
+        return m('tr', [
+            // Erase tag
+            m('td', [
+                vnode.attrs.editMode &&
+                m('div.text-center', m('a.btn.btn-default.btn-sm', {
+                    style: {
+                        align: 'center'
+                    },
+                    onclick: function(e) { vnode.attrs.removeTag() }
+                }, m('i.fa.fa-fw.fa-trash-o')))
+            ]),
+            // Tag key
+            m('td', [
+                vnode.attrs.editMode ?
+                m('div', {key: 'key'}, [
+                    m(inputComponent, {
+                        options: nameOpts,
+                        value: vnode.attrs.name,
+                        // Allow any tag name unless 'strict' is set to true.
+                        create: !vnode.attrs.vocabulary().strict,
+                        placeholder: 'key',
+                    })
+                ])
+                : vnode.attrs.name
+            ]),
+            // Tag value
+            m('td', [
+                vnode.attrs.editMode ?
+                m('div', {key: 'value'}, [
+                    m(inputComponent, {
+                        options: valueOpts,
+                        value: vnode.attrs.value,
+                        placeholder: 'value',
+                        // Allow any value on tags not listed on the vocabulary.
+                        // Allow any value on tags without values, or the ones
+                        // that aren't explicitly declared to be strict.
+                        create: !(vnode.attrs.name() in vnode.attrs.vocabulary().tags)
+                            || !vnode.attrs.vocabulary().tags[vnode.attrs.name()].values
+                            || vnode.attrs.vocabulary().tags[vnode.attrs.name()].values.length === 0
+                            || !vnode.attrs.vocabulary().tags[vnode.attrs.name()].strict,
+                    })
+                ])
+                : vnode.attrs.value
+            ])
+        ])
+    }
+}
+
+window.TagEditorTable = {
+    view: function(vnode) {
+        return m('table.table.table-condensed.table-justforlayout', [
+            m('colgroup', [
+                m('col', {width:'5%'}),
+                m('col', {width:'25%'}),
+                m('col', {width:'70%'}),
+            ]),
+            m('thead', [
+                m('tr', [
+                    m('th'),
+                    m('th', 'Key'),
+                    m('th', 'Value'),
+                ])
+            ]),
+            m('tbody', [
+                vnode.attrs.tags.length > 0
+                ? vnode.attrs.tags.map(function(tag, idx) {
+                    return m(TagEditorRow, {
+                        key: tag.rowKey,
+                        removeTag: function() {
+                            vnode.attrs.tags.splice(idx, 1)
+                            vnode.attrs.dirty(true)
+                        },
+                        editMode: vnode.attrs.editMode,
+                        name: tag.name,
+                        value: tag.value,
+                        vocabulary: vnode.attrs.vocabulary
+                    })
+                })
+                : m('tr', m('td[colspan=3]', m('center', 'Loading tags...')))
+            ]),
+        ])
+    }
+}
+
+var uniqueID = 1
+
+window.TagEditorApp = {
+    appendTag: function(vnode, name, value) {
+        var tag = {name: m.stream(name), value: m.stream(value), rowKey: uniqueID++}
+        vnode.state.tags.push(tag)
+        // Set dirty flag when any of name/value changes to non empty string
+        tag.name.map(function() { vnode.state.dirty(true) })
+        tag.value.map(function() { vnode.state.dirty(true) })
+        tag.name.map(m.redraw)
+    },
+    oninit: function(vnode) {
+        vnode.state.sessionDB = new SessionDB()
+        // Get vocabulary
+        vnode.state.vocabulary = m.stream({'strict':false, 'tags':{}})
+        var vocabularyTimestamp = parseInt(Date.now() / 300000) // Bust cache every 5 minutes
+        m.request('/vocabulary.json?v=' + vocabularyTimestamp).then(vnode.state.vocabulary)
+        vnode.state.editMode = vnode.attrs.targetEditable
+        vnode.state.tags = []
+        vnode.state.dirty = m.stream(false)
+        vnode.state.dirty.map(m.redraw)
+        vnode.state.objPath = 'arvados/v1/' + vnode.attrs.targetController + '/' + vnode.attrs.targetUuid
+        // Get tags
+        vnode.state.sessionDB.request(
+            vnode.state.sessionDB.loadLocal(),
+            'arvados/v1/' + vnode.attrs.targetController,
+            {
+                data: {
+                    filters: JSON.stringify([['uuid', '=', vnode.attrs.targetUuid]]),
+                    select: JSON.stringify(['properties'])
+                },
+            }).then(function(obj) {
+                if (obj.items.length == 1) {
+                    o = obj.items[0]
+                    Object.keys(o.properties).forEach(function(k) {
+                        vnode.state.appendTag(vnode, k, o.properties[k])
+                    })
+                    if (vnode.state.editMode) {
+                        vnode.state.appendTag(vnode, '', '')
+                    }
+                    // Data synced with server, so dirty state should be false
+                    vnode.state.dirty(false)
+                    // Add new tag row when the last one is completed
+                    vnode.state.dirty.map(function() {
+                        if (!vnode.state.editMode) { return }
+                        lastTag = vnode.state.tags.slice(-1).pop()
+                        if (lastTag === undefined || (lastTag.name() !== '' || lastTag.value() !== '')) {
+                            vnode.state.appendTag(vnode, '', '')
+                        }
+                    })
+                }
+            }
+        )
+    },
+    view: function(vnode) {
+        return [
+            vnode.state.editMode &&
+            m('div.pull-left', [
+                m('a.btn.btn-primary.btn-sm' + (vnode.state.dirty() ? '' : '.disabled'), {
+                    style: {
+                        margin: '10px 0px'
+                    },
+                    onclick: function(e) {
+                        var tags = {}
+                        vnode.state.tags.forEach(function(t) {
+                            // Only ignore tags with empty key
+                            if (t.name() != '') {
+                                tags[t.name()] = t.value()
+                            }
+                        })
+                        vnode.state.sessionDB.request(
+                            vnode.state.sessionDB.loadLocal(),
+                            vnode.state.objPath, {
+                                method: 'PUT',
+                                data: {properties: JSON.stringify(tags)}
+                            }
+                        ).then(function(v) {
+                            vnode.state.dirty(false)
+                        })
+                    }
+                }, vnode.state.dirty() ? ' Save changes ' : ' Saved ')
+            ]),
+            // Tags table
+            m(TagEditorTable, {
+                editMode: vnode.state.editMode,
+                tags: vnode.state.tags,
+                vocabulary: vnode.state.vocabulary,
+                dirty: vnode.state.dirty
+            })
+        ]
+    },
+}
diff --git a/apps/workbench/app/assets/javascripts/components/save_ui_state.js b/apps/workbench/app/assets/javascripts/components/save_ui_state.js
new file mode 100644 (file)
index 0000000..3aece31
--- /dev/null
@@ -0,0 +1,90 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// SaveUIState avoids losing scroll position due to navigation
+// events, and saves/restores other caller-specified UI state.
+//
+// It does not display any content itself: do not pass any children.
+//
+// Use of multiple SaveUIState components on the same page is not
+// (yet) supported.
+//
+// The problem being solved:
+//
+// Page 1 loads some content dynamically (e.g., via infinite scroll)
+// after the initial render. User scrolls down, clicks a link, and
+// lands on page 2. User clicks the Back button, and lands on page
+// 1. Page 1 renders its initial content while waiting for AJAX.
+//
+// But (without SaveUIState) the document body is small now, so the
+// browser resets scroll position to the top of the page. Even if we
+// end up displaying the same dynamic content, the user's place on the
+// page has been lost.
+//
+// SaveUIState fixes this by stashing the current body height when
+// navigating away from page 1. When navigating back, it restores the
+// body height even before the page has loaded, so the browser does
+// not reset the scroll position.
+//
+// SaveUIState also saves/restores arbitrary UI state (like text typed
+// in a search box) in response to navigation events.
+//
+// See CollectionsSearch for an example.
+//
+// Attributes:
+//
+// {getter-setter} currentState: the current UI state
+//
+// {any} defaultState: value to initialize currentState with, if
+// nothing is stashed in browser history.
+//
+// {boolean} forgetSavedHeight: the body height loaded from the
+// browser history (if any) is outdated; we should let the browser
+// determine the correct body height from the current page
+// content. Set this when dynamic content has been reset.
+//
+// {boolean} saveBodyHeight: save/restore body height as described
+// above.
+window.SaveUIState = {
+    saveState: function() {
+        var state = history.state || {}
+        state.bodyHeight = window.getComputedStyle(document.body)['height']
+        state.currentState = this.currentState()
+        history.replaceState(state, '')
+    },
+    oninit: function(vnode) {
+        vnode.state.currentState = vnode.attrs.currentState
+        var hstate = history.state || {}
+
+        if (vnode.attrs.saveBodyHeight && hstate.bodyHeight) {
+            document.body.style['min-height'] = hstate.bodyHeight
+            delete hstate.bodyHeight
+        }
+
+        if (hstate.currentState) {
+            vnode.attrs.currentState(hstate.currentState)
+            delete hstate.currentState
+        } else {
+            vnode.attrs.currentState(vnode.attrs.defaultState)
+        }
+
+        history.replaceState(hstate, '')
+    },
+    oncreate: function(vnode) {
+        vnode.state.saveState = vnode.state.saveState.bind(vnode.state)
+        window.addEventListener('beforeunload', vnode.state.saveState)
+        vnode.state.onupdate(vnode)
+    },
+    onupdate: function(vnode) {
+        if (vnode.attrs.saveBodyHeight && vnode.attrs.forgetSavedHeight) {
+            document.body.style['min-height'] = null
+        }
+    },
+    onremove: function(vnode) {
+        window.removeEventListener('beforeunload', vnode.state.saveState)
+    },
+    view: function(vnode) {
+        return null
+    },
+}
diff --git a/apps/workbench/app/assets/javascripts/components/search.js b/apps/workbench/app/assets/javascripts/components/search.js
new file mode 100644 (file)
index 0000000..04572ec
--- /dev/null
@@ -0,0 +1,216 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.SearchResultsTable = {
+    maybeLoadMore: function(dom) {
+        var loader = this.loader
+        if (loader.state != loader.READY)
+            // Can't start getting more items anyway: no point in
+            // checking anything else.
+            return
+        var contentRect = dom.getBoundingClientRect()
+        var scroller = window // TODO: use dom's nearest ancestor with scrollbars
+        if (contentRect.bottom < 2 * scroller.innerHeight) {
+            // We have less than 1 page worth of content available
+            // below the visible area. Load more.
+            loader.loadMore()
+            // Indicate loading is in progress.
+            window.requestAnimationFrame(m.redraw)
+        }
+    },
+    oncreate: function(vnode) {
+        vnode.state.maybeLoadMore = vnode.state.maybeLoadMore.bind(vnode.state, vnode.dom)
+        window.addEventListener('scroll', vnode.state.maybeLoadMore)
+        window.addEventListener('resize', vnode.state.maybeLoadMore)
+        vnode.state.timer = window.setInterval(vnode.state.maybeLoadMore, 200)
+        vnode.state.loader = vnode.attrs.loader
+        vnode.state.onupdate(vnode)
+    },
+    onupdate: function(vnode) {
+        vnode.state.loader = vnode.attrs.loader
+    },
+    onremove: function(vnode) {
+        window.clearInterval(vnode.state.timer)
+        window.removeEventListener('scroll', vnode.state.maybeLoadMore)
+        window.removeEventListener('resize', vnode.state.maybeLoadMore)
+    },
+    view: function(vnode) {
+        var loader = vnode.attrs.loader
+        var iconsMap = {
+            collections: m('i.fa.fa-fw.fa-archive'),
+            projects: m('i.fa.fa-fw.fa-folder'),
+        }
+        var db = new SessionDB()
+        var sessions = db.loadActive()
+        return m('table.table.table-condensed', [
+            m('thead', m('tr', [
+                m('th'),
+                m('th', 'uuid'),
+                m('th', 'name'),
+                m('th', 'last modified'),
+            ])),
+            m('tbody', [
+                loader.items().map(function(item) {
+                    var session = sessions[item.uuid.slice(0,5)]
+                    var tokenParam = ''
+                    // Add the salted token to search result links from federated
+                    // remote hosts.
+                    if (!session.isFromRails && session.token.indexOf('v2/') == 0) {
+                        tokenParam = session.token
+                    }
+                    return m('tr', [
+                        m('td', m('form', {
+                            action: item.workbenchBaseURL() + '/' + item.objectType.wb_path + '/' + item.uuid,
+                            method: 'GET'
+                        }, [
+                            tokenParam !== '' &&
+                                m('input[type=hidden][name=api_token]', {value: tokenParam}),
+                            item.workbenchBaseURL() &&
+                                m('button.btn.btn-xs.btn-default[type=submit]', {
+                                    'data-original-title': 'show '+item.objectType.description,
+                                    'data-placement': 'top',
+                                    'data-toggle': 'tooltip',
+                                    // Bootstrap's tooltip feature
+                                    oncreate: function(vnode) { $(vnode.dom).tooltip() },
+                                }, iconsMap[item.objectType.wb_path]),
+                        ])),
+                        m('td.arvados-uuid', item.uuid),
+                        m('td', item.name || '(unnamed)'),
+                        m('td', m(LocalizedDateTime, {parse: item.modified_at})),
+                    ])
+                }),
+            ]),
+            loader.state == loader.DONE ? null : m('tfoot', m('tr', [
+                m('th[colspan=4]', m('button.btn.btn-xs', {
+                    className: loader.state == loader.LOADING ? 'btn-default' : 'btn-primary',
+                    style: {
+                        display: 'block',
+                        width: '12em',
+                        marginLeft: 'auto',
+                        marginRight: 'auto',
+                    },
+                    disabled: loader.state == loader.LOADING,
+                    onclick: function() {
+                        loader.loadMore()
+                        return false
+                    },
+                }, loader.state == loader.LOADING ? '(loading)' : 'Load more')),
+            ])),
+        ])
+    },
+}
+
+window.Search = {
+    oninit: function(vnode) {
+        vnode.state.sessionDB = new SessionDB()
+        vnode.state.sessionDB.autoRedirectToHomeCluster('/search')
+        vnode.state.searchEntered = m.stream()
+        vnode.state.searchActive = m.stream()
+        // When searchActive changes (e.g., when restoring state
+        // after navigation), update the text field too.
+        vnode.state.searchActive.map(vnode.state.searchEntered)
+        // When searchActive changes, create a new loader that filters
+        // with the given search term.
+        vnode.state.searchActive.map(function(q) {
+            var sessions = vnode.state.sessionDB.loadActive()
+            vnode.state.loader = new MergingLoader({
+                children: Object.keys(sessions).map(function(key) {
+                    var session = sessions[key]
+                    var workbenchBaseURL = function() {
+                        return vnode.state.sessionDB.workbenchBaseURL(session)
+                    }
+                    var searchable_objects = [
+                        {
+                            wb_path: 'projects',
+                            api_path: 'arvados/v1/groups',
+                            filters: [['group_class', '=', 'project']],
+                            description: 'project',
+                        },
+                        {
+                            wb_path: 'collections',
+                            api_path: 'arvados/v1/collections',
+                            filters: [],
+                            description: 'collection',
+                        },
+                    ]
+                    return new MergingLoader({
+                        sessionKey: key,
+                        // For every session, search for every object type
+                        children: searchable_objects.map(function(obj_type) {
+                            return new MultipageLoader({
+                                sessionKey: key,
+                                loadFunc: function(filters) {
+                                    // Apply additional type dependant filters
+                                    filters = filters.concat(obj_type.filters)
+                                    var tsquery = to_tsquery(q)
+                                    if (tsquery) {
+                                        filters.push(['any', '@@', tsquery])
+                                    }
+                                    return vnode.state.sessionDB.request(session, obj_type.api_path, {
+                                        data: {
+                                            filters: JSON.stringify(filters),
+                                            count: 'none',
+                                        },
+                                    }).then(function(resp) {
+                                        resp.items.map(function(item) {
+                                            item.workbenchBaseURL = workbenchBaseURL
+                                            item.objectType = obj_type
+                                        })
+                                        return resp
+                                    })
+                                },
+                            })
+                        }),
+                    })
+                }),
+            })
+        })
+    },
+    view: function(vnode) {
+        return m('form', {
+            onsubmit: function() {
+                vnode.state.searchActive(vnode.state.searchEntered())
+                vnode.state.forgetSavedHeight = true
+                return false
+            },
+        }, [
+            m(SaveUIState, {
+                defaultState: '',
+                currentState: vnode.state.searchActive,
+                forgetSavedHeight: vnode.state.forgetSavedHeight,
+                saveBodyHeight: true,
+            }),
+            vnode.state.loader && [
+                m('.row', [
+                    m('.col-md-6', [
+                        m('.input-group', [
+                            m('input#search.form-control[placeholder=Search collections and projects]', {
+                                oninput: m.withAttr('value', vnode.state.searchEntered),
+                                value: vnode.state.searchEntered(),
+                            }),
+                            m('.input-group-btn', [
+                                m('input.btn.btn-primary[type=submit][value="Search"]'),
+                            ]),
+                        ]),
+                    ]),
+                    m('.col-md-6', [
+                        'Searching sites: ',
+                        vnode.state.loader.children.length == 0
+                            ? m('span.label.label-xs.label-danger', 'none')
+                            : vnode.state.loader.children.map(function(child) {
+                                return [m('span.label.label-xs', {
+                                    className: child.state == child.LOADING ? 'label-warning' : 'label-success',
+                                }, child.sessionKey), ' ']
+                            }),
+                        ' ',
+                        m('a[href="/sessions"]', 'Add/remove sites'),
+                    ]),
+                ]),
+                m(SearchResultsTable, {
+                    loader: vnode.state.loader,
+                }),
+            ],
+        ])
+    },
+}
diff --git a/apps/workbench/app/assets/javascripts/components/sessions.js b/apps/workbench/app/assets/javascripts/components/sessions.js
new file mode 100644 (file)
index 0000000..04ca6ac
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('ready', function() {
+    var db = new SessionDB();
+    db.checkForNewToken();
+    db.fillMissingUUIDs();
+    db.autoLoadRemoteHosts();
+});
+
+window.SessionsTable = {
+    oninit: function(vnode) {
+        vnode.state.db = new SessionDB();
+        vnode.state.db.autoRedirectToHomeCluster('/sessions');
+        vnode.state.db.migrateNonFederatedSessions();
+        vnode.state.hostToAdd = m.stream('');
+        vnode.state.error = m.stream();
+        vnode.state.checking = m.stream();
+    },
+    view: function(vnode) {
+        var db = vnode.state.db;
+        var sessions = db.loadAll();
+        return m('.container', [
+            m('p', [
+                'You can log in to multiple Arvados sites here, then use the ',
+                m('a[href="/search"]', 'multi-site search'),
+                ' page to search collections and projects on all sites at once.'
+            ]),
+            m('table.table.table-condensed.table-hover', [
+                m('thead', m('tr', [
+                    m('th', 'status'),
+                    m('th', 'cluster ID'),
+                    m('th', 'username'),
+                    m('th', 'email'),
+                    m('th', 'actions'),
+                    m('th')
+                ])),
+                m('tbody', [
+                    Object.keys(sessions).map(function(uuidPrefix) {
+                        var session = sessions[uuidPrefix];
+                        return m('tr', [
+                            session.token && session.user ? [
+                                m('td', session.user.is_active ?
+                                    m('span.label.label-success', 'logged in') :
+                                    m('span.label.label-warning', 'inactive')),
+                                m('td', {title: session.baseURL}, uuidPrefix),
+                                m('td', session.user.username),
+                                m('td', session.user.email),
+                                m('td', session.isFromRails ? null : m('button.btn.btn-xs.btn-default', {
+                                    uuidPrefix: uuidPrefix,
+                                    onclick: m.withAttr('uuidPrefix', db.logout),
+                                }, session.listedHost ? 'Disable ':'Log out ', m('span.glyphicon.glyphicon-log-out')))
+                            ] : [
+                                m('td', m('span.label.label-default', 'logged out')),
+                                m('td', {title: session.baseURL}, uuidPrefix),
+                                m('td'),
+                                m('td'),
+                                m('td', m('a.btn.btn-xs.btn-primary', {
+                                    uuidPrefix: uuidPrefix,
+                                    onclick: db.login.bind(db, session.baseURL),
+                                }, session.listedHost ? 'Enable ':'Log in ', m('span.glyphicon.glyphicon-log-in')))
+                            ],
+                            m('td', (session.isFromRails || session.listedHost) ? null :
+                                m('button.btn.btn-xs.btn-default', {
+                                    uuidPrefix: uuidPrefix,
+                                    onclick: m.withAttr('uuidPrefix', db.trash),
+                                }, 'Remove ', m('span.glyphicon.glyphicon-trash'))
+                            ),
+                        ])
+                    }),
+                ]),
+            ]),
+            m('.row', m('.col-md-6', [
+                m('form', {
+                    onsubmit: function() {
+                        vnode.state.error(null)
+                        vnode.state.checking(true)
+                        db.findAPI(vnode.state.hostToAdd())
+                            .then(db.login)
+                            .catch(function() {
+                                vnode.state.error(true)
+                            })
+                            .then(vnode.state.checking.bind(null, null))
+                        return false
+                    },
+                }, [
+                    m('p', [
+                        'To add a remote Arvados site, paste the remote site\'s host here (see "ARVADOS_API_HOST" on the "current token" page).',
+                    ]),
+                    m('.input-group', { className: vnode.state.error() && 'has-error' }, [
+                        m('input.form-control[type=text][name=apiHost][placeholder="zzzzz.arvadosapi.com"]', {
+                            oninput: m.withAttr('value', vnode.state.hostToAdd),
+                        }),
+                        m('.input-group-btn', [
+                            m('input.btn.btn-primary[type=submit][value="Log in"]', {
+                                disabled: !vnode.state.hostToAdd(),
+                            }),
+                        ]),
+                    ]),
+                ]),
+                m('p'),
+                vnode.state.error() && m('p.alert.alert-danger', 'Request failed. Make sure this is a working API server address.'),
+                vnode.state.checking() && m('p.alert.alert-info', 'Checking...'),
+            ])),
+        ])
+    },
+}
diff --git a/apps/workbench/app/assets/javascripts/components/test.js b/apps/workbench/app/assets/javascripts/components/test.js
new file mode 100644 (file)
index 0000000..4893544
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.TestComponent = {
+    view: function(vnode) {
+        return m('div.mithril-test-component', [
+            m('p', {
+                onclick: m.withAttr('zzz', function(){}),
+            }, [
+                'mithril is working; rendered at t=',
+                (new Date()).getTime(),
+                'ms (click to re-render)',
+            ]),
+        ])
+    },
+}
diff --git a/apps/workbench/app/assets/javascripts/dates.js b/apps/workbench/app/assets/javascripts/dates.js
new file mode 100644 (file)
index 0000000..ed5f284
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+jQuery(function($){
+$(document).on('ajax:complete arv:pane:loaded ready', function() {
+    $('[data-utc-date]').each(function(i, elm) {
+        // Try matching the date using a couple of different formats.
+        var v = $(elm).attr('data-utc-date').match(/(\d\d\d\d)-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d) UTC/);
+        if (!v) {
+            v = $(elm).attr('data-utc-date').match(/(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)Z/);
+        }
+
+        if (v) {
+            // Create a new date object from the timestamp so the browser can
+            // render the date based on the locale/timezone.
+            var ts = new Date(Date.UTC(v[1], v[2]-1, v[3], v[4], v[5], v[6]));
+            if ($(elm).attr('data-utc-date-opts') && $(elm).attr('data-utc-date-opts').match(/noseconds/)) {
+                $(elm).text((ts.getHours() > 12 ? (ts.getHours()-12) : ts.getHours())
+                            + ":" + (ts.getMinutes() < 10 ? '0' : '') + ts.getMinutes()
+                            + (ts.getHours() >= 12 ? " PM " : " AM ")
+                            + ts.toLocaleDateString());
+            } else {
+                $(elm).text(ts.toLocaleTimeString() + " " + ts.toLocaleDateString());
+            }
+        }
+    });
+});
+});
diff --git a/apps/workbench/app/assets/javascripts/edit_collection.js b/apps/workbench/app/assets/javascripts/edit_collection.js
new file mode 100644 (file)
index 0000000..9220ac3
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// On loading of a collection, enable the "lock" button and
+// disable all file modification controls (upload, rename, delete)
+$(document).
+    ready(function(event) {
+        $(".btn-collection-file-control").addClass("disabled");
+        $(".btn-collection-rename-file-span").attr("title", "Unlock collection to rename file");
+        $(".btn-collection-remove-file-span").attr("title", "Unlock collection to remove file");
+        $(".btn-remove-selected-files").attr("title", "Unlock collection to remove selected files");
+        $(".tab-pane-Upload").addClass("disabled");
+        $(".tab-pane-Upload").attr("title", "Unlock collection to upload files");
+        $("#Upload-tab").attr("data-toggle", "disabled");
+    }).
+    on('click', '.lock-collection-btn', function(event) {
+        classes = $(event.target).attr('class')
+
+        if (classes.indexOf("fa-lock") != -1) {
+            // About to unlock; warn and get confirmation from user
+            if (confirm("Adding, renaming, and deleting files changes the portable data hash. Are you sure you want to unlock the collection?")) {
+                $(".lock-collection-btn").removeClass("fa-lock");
+                $(".lock-collection-btn").addClass("fa-unlock");
+                $(".lock-collection-btn").attr("title", "Lock collection to prevent editing files");
+                $(".btn-collection-rename-file-span").attr("title", "");
+                $(".btn-collection-remove-file-span").attr("title", "");
+                $(".btn-collection-file-control").removeClass("disabled");
+                $(".btn-remove-selected-files").attr("title", "");
+                $(".tab-pane-Upload").removeClass("disabled");
+                $(".tab-pane-Upload").attr("data-original-title", "");
+                $("#Upload-tab").attr("data-toggle", "tab");
+            } else {
+                // User clicked "no" and so do not unlock
+            }
+        } else {
+            // Lock it back
+            $(".lock-collection-btn").removeClass("fa-unlock");
+            $(".lock-collection-btn").addClass("fa-lock");
+            $(".lock-collection-btn").attr("title", "Unlock collection to edit files");
+            $(".btn-collection-rename-file-span").attr("title", "Unlock collection to rename file");
+            $(".btn-collection-remove-file-span").attr("title", "Unlock collection to remove file");
+            $(".btn-collection-file-control").addClass("disabled");
+            $(".btn-remove-selected-files").attr("title", "Unlock collection to remove selected files");
+            $(".tab-pane-Upload").addClass("disabled");
+            $(".tab-pane-Upload").attr("data-original-title", "Unlock collection to upload files");
+            $("#Upload-tab").attr("data-toggle", "disabled");
+        }
+    });
diff --git a/apps/workbench/app/assets/javascripts/editable.js b/apps/workbench/app/assets/javascripts/editable.js
new file mode 100644 (file)
index 0000000..939506c
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$.fn.editable.defaults.ajaxOptions = {type: 'post', dataType: 'json'};
+$.fn.editable.defaults.send = 'always';
+
+// Default for editing is popup.  I experimented with inline which is a little
+// nicer in that it shows up right under the mouse instead of nearby.  However,
+// the inline box is taller than the regular content, which causes the page
+// layout to shift unless we make the table rows tall, which leaves a lot of
+// wasted space when not editing.  Also inline can get cut off if the page is
+// too narrow, when the popup box will just move to do the right thing.
+//$.fn.editable.defaults.mode = 'inline';
+
+$.fn.editable.defaults.success = function (response, newValue) {
+    $(document).trigger('editable:success', [this, response, newValue]);
+};
+
+$.fn.editable.defaults.params = function (params) {
+    var a = {};
+    var key = params.pk.key;
+    a.id = $(this).attr('data-object-uuid') || params.pk.id;
+    a[key] = params.pk.defaults || {};
+    // Remove null values. Otherwise they get transmitted as empty
+    // strings in request params.
+    for (i in a[key]) {
+        if (a[key][i] == null)
+            delete a[key][i];
+    }
+    a[key][params.name] = params.value;
+    if (!a.id) {
+        a['_method'] = 'post';
+    } else {
+        a['_method'] = 'put';
+    }
+    return a;
+};
+
+$.fn.editable.defaults.validate = function (value) {
+    if (value == "***invalid***") {
+        return "Invalid selection";
+    }
+}
+
+$(document).
+    on('ready ajax:complete', function() {
+        $('.editable').
+            not('.editable-done-setup').
+            addClass('editable-done-setup').
+            editable({
+                success: function(response, newValue) {
+                    // If we just created a new object, stash its UUID
+                    // so we edit it next time instead of creating
+                    // another new object.
+                    if (!$(this).attr('data-object-uuid') && response.uuid) {
+                        $(this).attr('data-object-uuid', response.uuid);
+                    }
+                    if (response.href) {
+                        $(this).editable('option', 'url', response.href);
+                    }
+                    if ($(this).attr('data-name')) {
+                        var textileAttr = $(this).attr('data-name') + 'Textile';
+                        if (response[textileAttr]) {
+                            $(this).attr('data-textile', response[textileAttr]);
+                        }
+                    }
+                    return;
+                },
+                error: function(response, newValue) {
+                    var errlist = response.responseJSON.errors;
+                    var errmsg;
+                    if (Array.isArray(errlist)) {
+                        errmsg = errlist.join();
+                    } else {
+                        errmsg = ("The server returned an error when making " +
+                                  "this update (status " + response.status +
+                                  ": " + errlist + ").");
+                    }
+                    return errmsg;
+                }
+            }).
+            on('hidden', function(e, reason) {
+                // After saving a new attribute, update the same
+                // information if it appears elsewhere on the page.
+                if (reason != 'save') return;
+                var html = $(this).html();
+                if( $(this).attr('data-textile') ) {
+                    html = $(this).attr('data-textile');
+                    $(this).html(html);
+                }
+                var uuid = $(this).attr('data-object-uuid');
+                var attr = $(this).attr('data-name');
+                var edited = this;
+                if (uuid && attr) {
+                    $("[data-object-uuid='" + uuid + "']" +
+                      "[data-name='" + attr + "']").each(function() {
+                          if (this != edited)
+                              $(this).html(html);
+                      });
+                }
+            });
+    }).
+    on('ready ajax:complete', function() {
+        $("[data-toggle~='x-editable']").
+            not('.editable-done-setup').
+            addClass('editable-done-setup').
+            click(function(e) {
+                e.stopPropagation();
+                $($(this).attr('data-toggle-selector')).editable('toggle');
+            });
+    });
+
+$.fn.editabletypes.text.defaults.tpl = '<input type="text" name="editable-text">'
+
+$.fn.editableform.buttons = '\
+<button type="submit" class="btn btn-primary btn-sm editable-submit" \
+  id="editable-submit"><i class="glyphicon glyphicon-ok"></i></button>\
+<button type="button" class="btn btn-default btn-sm editable-cancel" \
+  id="editable-cancel"><i class="glyphicon glyphicon-remove"></i></button>\
+'
diff --git a/apps/workbench/app/assets/javascripts/event_log.js b/apps/workbench/app/assets/javascripts/event_log.js
new file mode 100644 (file)
index 0000000..e576ba9
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+/*
+ * This js establishes a websockets connection with the API Server.
+ */
+
+/* Subscribe to websockets event log.  Do nothing if already connected. */
+function subscribeToEventLog () {
+    // if websockets are not supported by browser, do not subscribe for events
+    websocketsSupported = ('WebSocket' in window);
+    if (websocketsSupported == false) {
+        return;
+    }
+
+    // check if websocket connection is already stored on the window
+    event_log_disp = $(window).data("arv-websocket");
+    if (event_log_disp == null) {
+        // need to create new websocket and event log dispatcher
+        websocket_url = $('meta[name=arv-websocket-url]').attr("content");
+        if (websocket_url == null)
+            return;
+
+        event_log_disp = new WebSocket(websocket_url);
+
+        event_log_disp.onopen = onEventLogDispatcherOpen;
+        event_log_disp.onmessage = onEventLogDispatcherMessage;
+
+        // store websocket in window to allow reuse when multiple divs subscribe for events
+        $(window).data("arv-websocket", event_log_disp);
+    }
+}
+
+/* Send subscribe message to the websockets server.  Without any filters
+   arguments, this subscribes to all events */
+function onEventLogDispatcherOpen(event) {
+    this.send('{"method":"subscribe"}');
+}
+
+/* Trigger event for all applicable elements waiting for this event */
+function onEventLogDispatcherMessage(event) {
+    parsedData = JSON.parse(event.data);
+    object_uuid = parsedData.object_uuid;
+
+    if (!object_uuid) {
+        return;
+    }
+
+    // if there are any listeners for this object uuid or "all", trigger the event
+    matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuid=\"all\"],.arv-log-event-listener[data-object-kind=\"" + parsedData.object_kind + "\"]";
+    $(matches).trigger('arv-log-event', parsedData);
+}
+
+/* Automatically connect if there are any elements on the page that want to
+   receive event log events. */
+$(document).on('ajax:complete ready', function() {
+    var a = $('.arv-log-event-listener');
+    if (a.length > 0) {
+        subscribeToEventLog();
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/filterable.js b/apps/workbench/app/assets/javascripts/filterable.js
new file mode 100644 (file)
index 0000000..e571e32
--- /dev/null
@@ -0,0 +1,208 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// filterable.js shows/hides content when the user operates
+// search/select widgets. For "infinite scroll" content, it passes the
+// filters to the server and retrieves new content. For other content,
+// it filters the existing DOM elements using jQuery show/hide.
+//
+// Usage:
+//
+// 1. Add the "filterable" class to each filterable content item.
+// Typically, each item is a 'tr' or a 'div class="row"'.
+//
+// <div id="results">
+//   <div class="filterable row">First row</div>
+//   <div class="filterable row">Second row</div>
+// </div>
+//
+// 2. Add the "filterable-control" class to each search/select widget.
+// Also add a data-filterable-target attribute with a jQuery selector
+// for an ancestor of the filterable items, i.e., the container in
+// which this widget should apply filtering.
+//
+// <input class="filterable-control" data-filterable-target="#results"
+//        type="text" />
+//
+// Supported widgets:
+//
+// <input type="text" ... />
+//
+// The input value is used as a regular expression. Rows with content
+// matching the regular expression are shown.
+//
+// <select ... data-filterable-attribute="data-example-attr">
+//  <option value="foo">Foo</option>
+//  <option value="">Show all</option>
+// </select>
+//
+// When the user selects the "Foo" option, rows with
+// data-example-attr="foo" are shown, and all others are hidden. When
+// the user selects the "Show all" option, all rows are shown.
+//
+// <input type="checkbox" data-on-value="{}" data-off-value="{}" ... />
+//
+// Merges on- or off-value with other params in query. Only works with
+// infinite-scroll.
+//
+// Notes:
+//
+// When multiple filterable-control widgets operate on the same
+// data-filterable-target, items must pass _all_ filters in order to
+// be shown.
+//
+// If one data-filterable-target is the parent of another
+// data-filterable-target, results are undefined. Don't do this.
+//
+// Combining "select" filterable-controls with infinite-scroll is not
+// yet supported.
+
+function updateFilterableQueryNow($target) {
+    var newquery = $target.data('filterable-query-new');
+    var params = $target.data('infinite-content-params-filterable') || {};
+    var tsquery = to_tsquery(newquery);
+    if (tsquery == null) {
+        params.filters = [];
+    } else {
+        params.filters = [['any', '@@', tsquery]];
+    }
+    $(".modal-dialog-preview-pane").html("");
+    $target.data('infinite-content-params-filterable', params);
+    $target.data('filterable-query', newquery);
+}
+
+$(document).
+    on('ready ajax:success', function() {
+        // Copy any initial input values into
+        // data-filterable-query[-new].
+        $('input[type=text].filterable-control').each(function() {
+            var $this = $(this);
+            var $target = $($this.attr('data-filterable-target'));
+            if ($target.data('filterable-query-new') === undefined) {
+                $target.data('filterable-query', $this.val());
+                $target.data('filterable-query-new', $this.val());
+                updateFilterableQueryNow($target);
+            }
+        });
+        $('[data-infinite-scroller]').on('refresh-content', '[data-filterable-query]', function(e) {
+            // If some other event causes a refresh-content event while there
+            // is a new query waiting to cooloff, we should use the new query
+            // right away -- otherwise we'd launch an extra ajax request that
+            // would have to be reloaded as soon as the cooloff period ends.
+            if (this != e.target)
+                return;
+            if ($(this).data('filterable-query') == $(this).data('filterable-query-new'))
+                return;
+            updateFilterableQueryNow($(this));
+        });
+    }).
+    on('change', 'input[type=checkbox].filterable-control', function(e) {
+        if (this != e.target) return;
+        var $target = $($(this).attr('data-filterable-target'));
+        var currentquery = $target.data('filterable-query');
+        if (currentquery === undefined) currentquery = '';
+        if ($target.is('[data-infinite-scroller]')) {
+            var datakey = 'infiniteContentParamsFrom'+this.id;
+            var whichvalue = $(this).is(':checked') ? 'on-value' : 'off-value';
+            if (JSON.stringify($target.data(datakey)) == JSON.stringify($(this).data(whichvalue)))
+                return;
+            $target.data(datakey, $(this).data(whichvalue));
+            updateFilterableQueryNow($target);
+            $target.trigger('refresh-content');
+        }
+    }).
+    on('paste keyup input', 'input[type=text].filterable-control', function(e) {
+        var regexp;
+        if (this != e.target) return;
+        var $target = $($(this).attr('data-filterable-target'));
+        var currentquery = $target.data('filterable-query');
+        if (currentquery === undefined) currentquery = '';
+        if ($target.is('[data-infinite-scroller]')) {
+            // We already know how to load content dynamically, so we
+            // can do all filtering on the server side.
+
+            if ($target.data('infinite-cooloff-timer') > 0) {
+                // Clear a stale refresh-after-delay timer.
+                clearTimeout($target.data('infinite-cooloff-timer'));
+            }
+            // Stash the new query string in the filterable container.
+            $target.data('filterable-query-new', $(this).val());
+            if (currentquery == $(this).val()) {
+                // Don't mess with existing results or queries in
+                // progress.
+                return;
+            }
+            $target.data('infinite-cooloff-timer', setTimeout(function() {
+                // If the user doesn't do any query-changing actions
+                // in the next 1/4 second (like type or erase
+                // characters in the search box), hide the stale
+                // content and ask the server for new results.
+                updateFilterableQueryNow($target);
+                $target.trigger('refresh-content');
+            }, 250));
+        } else {
+            // Target does not have infinite-scroll capability. Just
+            // filter the rows in the browser using a RegExp.
+            regexp = undefined;
+            try {
+                regexp = new RegExp($(this).val(), 'i');
+            } catch(e) {
+                if (e instanceof SyntaxError) {
+                    // Invalid/partial regexp. See 'has-error' below.
+                } else {
+                    throw e;
+                }
+            }
+            $target.
+                toggleClass('has-error', regexp === undefined).
+                addClass('filterable-container').
+                data('q', regexp).
+                trigger('refresh');
+        }
+    }).on('refresh', '.filterable-container', function() {
+        var $container = $(this);
+        var q = $(this).data('q');
+        var filters = $(this).data('filters');
+        $('.filterable', this).hide().filter(function() {
+            var $row = $(this);
+            var pass = true;
+            if (q && !$row.text().match(q))
+                return false;
+            if (filters) {
+                $.each(filters, function(filterby, val) {
+                    if (!val) return;
+                    if (!pass) return;
+                    pass = false;
+                    $.each(val.split(" "), function(i, e) {
+                        if ($row.attr(filterby) == e)
+                            pass = true;
+                    });
+                });
+            }
+            return pass;
+        }).show();
+
+        // Show/hide each section heading depending on whether any
+        // content rows are visible in that section.
+        $('.row[data-section-heading]', this).each(function(){
+            $(this).toggle($('.row.filterable[data-section-name="' +
+                             $(this).attr('data-section-name') +
+                             '"]:visible').length > 0);
+        });
+
+        // Load more content if the last result is showing.
+        $('.infinite-scroller').add(window).trigger('scroll');
+    }).on('change', 'select.filterable-control', function() {
+        var val = $(this).val();
+        var filterby = $(this).attr('data-filterable-attribute');
+        var $target = $($(this).attr('data-filterable-target')).
+            addClass('filterable-container');
+        var filters = $target.data('filters') || {};
+        filters[filterby] = val;
+        $target.
+            data('filters', filters).
+            trigger('refresh');
+    }).on('ajax:complete', function() {
+        $('.filterable-control').trigger('input');
+    });
diff --git a/apps/workbench/app/assets/javascripts/infinite_scroll.js b/apps/workbench/app/assets/javascripts/infinite_scroll.js
new file mode 100644 (file)
index 0000000..3e63858
--- /dev/null
@@ -0,0 +1,309 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// infinite_scroll.js displays a tab's content using automatic scrolling
+// when the user scrolls to the bottom of the page and there is more data.
+//
+// Usage:
+//
+// 1. Adding infinite scrolling to a tab pane using "show" method
+//
+//  The steps below describe adding scrolling to the project#show action.
+//
+//  a. In the "app/views/projects/" folder add a file for your tab
+//      (ex: _show_jobs_and_pipelines.html.erb)
+//    In this file, add a div or tbody with data-infinite-scroller.
+//      Note: This page uses _show_tab_contents.html.erb so that
+//            several tabs can reuse this implementation.
+//    Also add the filters to be used for loading the tab content.
+//
+//  b. Add a file named "_show_contents_rows.html.erb" that loads
+//    the data (by invoking get_objects_and_names from the controller).
+//
+//  c. In the "app/controllers/projects_controller.rb,
+//    Update the show method to add a block for "params[:partial]"
+//      that loads the show_contents_rows partial.
+//    Optionally, add a "tab_counts" method that loads the total number
+//      of objects count to be displayed for this tab.
+//
+// 2. Adding infinite scrolling to the "Recent" tab in "index" page
+//  The steps below describe adding scrolling to the pipeline_instances index page.
+//
+//  a. In the "app/views/pipeline_instances/_show_recent.html.erb/" file
+//      add a div or tbody with data-infinite-scroller.
+//
+//  b. Add the partial "_show_recent_rows.html.erb" that displays the
+//      page contents on scroll using the @objects
+
+function maybe_load_more_content(event) {
+    var scroller = this;
+    var $container = $(event.data.container);
+    var src;                     // url for retrieving content
+    var scrollHeight;
+    var spinner, colspan;
+    var serial = Date.now();
+    var params;
+    scrollHeight = scroller.scrollHeight || $('body')[0].scrollHeight;
+    if ($(scroller).scrollTop() + $(scroller).height()
+        >
+        scrollHeight - 50)
+    {
+        if (!$container.attr('data-infinite-content-href0')) {
+            // Remember the first page source url, so we can refresh
+            // from page 1 later.
+            $container.attr('data-infinite-content-href0',
+                            $container.attr('data-infinite-content-href'));
+        }
+        src = $container.attr('data-infinite-content-href');
+        if (!src || !$container.is(':visible'))
+            // Finished
+            return;
+
+        // Don't start another request until this one finishes
+        $container.attr('data-infinite-content-href', null);
+        spinner = '<div class="spinner spinner-32px spinner-h-center"></div>';
+        if ($container.is('table,tbody,thead,tfoot')) {
+            // Hack to determine how many columns a new tr should have
+            // in order to reach full width.
+            colspan = $container.closest('table').
+                find('tr').eq(0).find('td,th').length;
+            if (colspan == 0)
+                colspan = '*';
+            spinner = ('<tr class="spinner"><td colspan="' + colspan + '">' +
+                       spinner +
+                       '</td></tr>');
+        }
+        $container.find(".spinner").detach();
+        $container.append(spinner);
+        $container.data('data-infinite-serial', serial);
+
+        if (src == $container.attr('data-infinite-content-href0')) {
+            // If we're loading the first page, collect filters from
+            // various sources.
+            params = mergeInfiniteContentParams($container);
+            $.each(params, function(k,v) {
+                if (v instanceof Object) {
+                    params[k] = JSON.stringify(v);
+                }
+            });
+        } else {
+            // If we're loading page >1, ignore other filtering
+            // mechanisms and just use the "next page" URI from the
+            // previous page's response. Aside from avoiding race
+            // conditions (where page 2 could have different filters
+            // than page 1), this allows the server to use filters in
+            // the "next page" URI to achieve paging. (To apply any
+            // new filters effectively, we need to load page 1 again
+            // anyway.)
+            params = {};
+        }
+
+        $.ajax(src,
+               {dataType: 'json',
+                type: 'GET',
+                data: params,
+                context: {container: $container, src: src, serial: serial}}).
+            fail(function(jqxhr, status, error) {
+                var $faildiv;
+                var $container = this.container;
+                if ($container.data('data-infinite-serial') != this.serial) {
+                    // A newer request is already in progress.
+                    return;
+                }
+                if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                    message = "Cancelled.";
+                } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+                    message = jqxhr.responseJSON.errors.join("; ");
+                } else {
+                    message = "Request failed.";
+                }
+                // TODO: report the message to the user.
+                console.log(message);
+                $faildiv = $('<div />').
+                    attr('data-infinite-content-href', this.src).
+                    addClass('infinite-retry').
+                    append('<span class="fa fa-warning" /> Oops, request failed. <button class="btn btn-xs btn-primary">Retry</button>');
+                $container.find('div.spinner').replaceWith($faildiv);
+            }).
+            done(function(data, status, jqxhr) {
+                if ($container.data('data-infinite-serial') != this.serial) {
+                    // A newer request is already in progress.
+                    return;
+                }
+                $container.find(".spinner").detach();
+                $container.append(data.content);
+                $container.attr('data-infinite-content-href', data.next_page_href);
+                ping_all_scrollers();
+            });
+     }
+}
+
+function ping_all_scrollers() {
+    // Send a scroll event to all scroll listeners that might need
+    // updating. Adding infinite-scroller class to the window element
+    // doesn't work, so we add it explicitly here.
+    $('.infinite-scroller').add(window).trigger('scroll');
+}
+
+function mergeInfiniteContentParams($container) {
+    var params = {};
+    // Combine infiniteContentParams from multiple sources. This
+    // mechanism allows each of several components to set and
+    // update its own set of filters, without having to worry
+    // about stomping on some other component's filters.
+    //
+    // For example, filterable.js writes filters in
+    // infiniteContentParamsFilterable ("search for text foo")
+    // without worrying about clobbering the filters set up by the
+    // tab pane ("only show container requests and pipeline instances
+    // in this tab").
+    $.each($container.data(), function(datakey, datavalue) {
+        // Note: We attach these data to DOM elements using
+        // <element data-foo-bar="baz">. We store/retrieve them
+        // using $('element').data('foo-bar'), although
+        // .data('fooBar') would also work. The "all data" hash
+        // returned by $('element').data(), however, always has
+        // keys like 'fooBar'. In other words, where we have a
+        // choice, we stick with the 'foo-bar' style to be
+        // consistent with HTML. Here, our only option is
+        // 'fooBar'.
+        if (/^infiniteContentParams/.exec(datakey)) {
+            if (datavalue instanceof Object) {
+                $.each(datavalue, function(hkey, hvalue) {
+                    if (hvalue instanceof Array) {
+                        params[hkey] = (params[hkey] || []).
+                            concat(hvalue);
+                    } else if (hvalue instanceof Object) {
+                        $.extend(params[hkey], hvalue);
+                    } else {
+                        params[hkey] = hvalue;
+                    }
+                });
+            }
+        }
+    });
+    return params;
+}
+
+function setColumnSort( $container, $header, direction ) {
+    // $container should be the tbody or whatever has all the infinite table data attributes
+    // $header should be the th with a preset data-sort-order attribute
+    // direction should be "asc" or "desc"
+    // This function returns the order by clause for this column header as a string
+
+    // First reset all sort directions
+    $('th[data-sort-order]').removeData('sort-order-direction');
+    // set the current one
+    $header.data('sort-order-direction', direction);
+    // change the ordering parameter
+    var paramsAttr = 'infinite-content-params-' + $container.data('infinite-content-params-attr');
+    var params = $container.data(paramsAttr) || {};
+    params.order = $header.data('sort-order').split(",").join( ' ' + direction + ', ' ) + ' ' + direction;
+    $container.data(paramsAttr, params);
+    // show the correct icon next to the column header
+    $container.trigger('sort-icons');
+
+    return params.order;
+}
+
+$(document).
+    on('click', 'div.infinite-retry button', function() {
+        var $retry_div = $(this).closest('.infinite-retry');
+        var $container = $(this).closest('.infinite-scroller-ready')
+        $container.attr('data-infinite-content-href',
+                        $retry_div.attr('data-infinite-content-href'));
+        $retry_div.
+            replaceWith('<div class="spinner spinner-32px spinner-h-center" />');
+        ping_all_scrollers();
+    }).
+    on('refresh-content', '[data-infinite-scroller]', function() {
+        // Clear all rows, reset source href to initial state, and
+        // (if the container is visible) start loading content.
+        var first_page_href = $(this).attr('data-infinite-content-href0');
+        if (!first_page_href)
+            first_page_href = $(this).attr('data-infinite-content-href');
+        $(this).
+            html('').
+            attr('data-infinite-content-href', first_page_href);
+        ping_all_scrollers();
+    }).
+    on('ready ajax:complete', function() {
+        $('[data-infinite-scroller]').each(function() {
+            if ($(this).hasClass('infinite-scroller-ready'))
+                return;
+            $(this).addClass('infinite-scroller-ready');
+
+            // deal with sorting if there is any, and if it was set on this page for this tab already
+            if( $('th[data-sort-order]').length ) {
+                var tabId = $(this).closest('div.tab-pane').attr('id');
+                if( hasHTML5History() && history.state !== undefined && history.state !== null && history.state.order !== undefined && history.state.order[tabId] !== undefined ) {
+                    // we will use the list of one or more table columns associated with this header to find the right element
+                    // see sortable_columns as it is passed to render_pane in the various tab .erbs (e.g. _show_jobs_and_pipelines.html.erb)
+                    var strippedColumns = history.state.order[tabId].replace(/\s|\basc\b|\bdesc\b/g,'');
+                    var sortDirection = history.state.order[tabId].split(" ")[1].replace(/,/,'');
+                    $columnHeader = $(this).closest('table').find('[data-sort-order="'+ strippedColumns +'"]');
+                    setColumnSort( $(this), $columnHeader, sortDirection );
+                } else {
+                    // otherwise just reset the sort icons
+                    $(this).trigger('sort-icons');
+                }
+            }
+
+            // $scroller is the DOM element that hears "scroll"
+            // events: sometimes it's a div, sometimes it's
+            // window. Here, "this" is the DOM element containing the
+            // result rows. We pass it to maybe_load_more_content in
+            // event.data.
+            var $scroller = $($(this).attr('data-infinite-scroller'));
+            if (!$scroller.hasClass('smart-scroll') &&
+                'scroll' != $scroller.css('overflow-y'))
+                $scroller = $(window);
+            $scroller.
+                addClass('infinite-scroller').
+                on('scroll resize', { container: this }, maybe_load_more_content).
+                trigger('scroll');
+        });
+    }).
+    on('shown.bs.tab', 'a[data-toggle="tab"]', function(event) {
+        $(event.target.getAttribute('href') + ' [data-infinite-scroller]').
+            trigger('scroll');
+    }).
+    on('click', 'th[data-sort-order]', function() {
+        var direction = $(this).data('sort-order-direction');
+        // reverse the current direction, or do ascending if none
+        if( direction === undefined || direction === 'desc' ) {
+            direction = 'asc';
+        } else {
+            direction = 'desc';
+        }
+
+        var $container = $(this).closest('table').find('[data-infinite-content-params-attr]');
+
+        var order = setColumnSort( $container, $(this), direction );
+
+        // put it in the browser history state if browser allows it
+        if( hasHTML5History() ) {
+            var tabId = $(this).closest('div.tab-pane').attr('id');
+            var state =  history.state || {};
+            if( state.order === undefined ) {
+                state.order = {};
+            }
+            state.order[tabId] = order;
+            history.replaceState( state, null, null );
+        }
+
+        $container.trigger('refresh-content');
+    }).
+    on('sort-icons', function() {
+        // set or reset the icon next to each sortable column header according to the current direction attribute
+        $('th[data-sort-order]').each(function() {
+            $(this).find('i').remove();
+            var direction = $(this).data('sort-order-direction');
+            if( direction !== undefined ) {
+                $(this).append('<i class="fa fa-sort-' + direction + '"/>');
+            } else {
+                $(this).append('<i class="fa fa-sort"/>');
+            }
+        });
+    });
diff --git a/apps/workbench/app/assets/javascripts/job_log_graph.js b/apps/workbench/app/assets/javascripts/job_log_graph.js
new file mode 100644 (file)
index 0000000..f47f4f1
--- /dev/null
@@ -0,0 +1,339 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+/* Assumes existence of:
+  window.jobGraphData = [];
+  window.jobGraphSeries = [];
+  window.jobGraphSortedSeries = [];
+  window.jobGraphMaxima = {};
+ */
+function processLogLineForChart( logLine ) {
+    try {
+        var match = logLine.match(/^(\S+) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+        if( !match ) {
+            match = logLine.match(/^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+            if( match ) {
+                match[1] = (new Date(match[1] + ' UTC')).toISOString().replace('Z','');
+            }
+        }
+        if( match ) {
+            var rawDetailData = '';
+            var datum = null;
+
+            // the timestamp comes first
+            var timestamp = match[1].replace('_','T') + 'Z';
+
+            // we are interested in "-- interval" recordings
+            var intervalMatch = match[6].match(/(.*) -- interval (.*)/);
+            if( intervalMatch ) {
+                var intervalData = intervalMatch[2].trim().split(' ');
+                var dt = parseFloat(intervalData[0]);
+                var dsum = 0.0;
+                for(var i=2; i < intervalData.length; i += 2 ) {
+                    dsum += parseFloat(intervalData[i]);
+                }
+                datum = dsum/dt;
+
+                if( datum < 0 ) {
+                    // not interested in negative deltas
+                    return;
+                }
+
+                rawDetailData = intervalMatch[2];
+
+                // for the series name use the task number (4th term) and then the first word after 'crunchstat:'
+                var series = 'T' + match[4] + '-' + match[5];
+
+                // special calculation for cpus
+                if( /-cpu$/.test(series) ) {
+                    // divide the stat by the number of cpus unless the time count is less than the interval length
+                    if( dsum.toFixed(1) > dt.toFixed(1) ) {
+                        var cpuCountMatch = intervalMatch[1].match(/(\d+) cpus/);
+                        if( cpuCountMatch ) {
+                            datum = datum / cpuCountMatch[1];
+                        }
+                    }
+                }
+
+                addJobGraphDatum( timestamp, datum, series, rawDetailData );
+            } else {
+                // we are also interested in memory ("mem") recordings
+                var memoryMatch = match[6].match(/(\d+) cache (\d+) swap (\d+) pgmajfault (\d+) rss/);
+                if( memoryMatch ) {
+                    rawDetailData = match[6];
+                    // one datapoint for rss and one for swap - only show the rawDetailData for rss
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[4]), 'T' + match[4] + "-rss", rawDetailData );
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[2]), 'T' + match[4] + "-swap", '' );
+                } else {
+                    // not interested
+                    return;
+                }
+            }
+
+            window.redraw = true;
+        }
+    } catch( err ) {
+        console.log( 'Ignoring error trying to process log line: ' + err);
+    }
+}
+
+function addJobGraphDatum(timestamp, datum, series, rawDetailData) {
+    // check for new series
+    if( $.inArray( series, jobGraphSeries ) < 0 ) {
+        var newIndex = jobGraphSeries.push(series) - 1;
+        jobGraphSortedSeries.push(newIndex);
+        jobGraphSortedSeries.sort( function(a,b) {
+            var matchA = jobGraphSeries[a].match(/^T(\d+)-(.*)/);
+            var matchB = jobGraphSeries[b].match(/^T(\d+)-(.*)/);
+            var termA = ('000000' + matchA[1]).slice(-6) + matchA[2];
+            var termB = ('000000' + matchB[1]).slice(-6) + matchB[2];
+            return termA > termB ? 1 : -1;
+        });
+        jobGraphMaxima[series] = null;
+        window.recreate = true;
+    }
+
+    if( datum !== 0 && ( jobGraphMaxima[series] === null || jobGraphMaxima[series] < datum ) ) {
+        if( isJobSeriesRescalable(series) ) {
+            // use old maximum to get a scale conversion
+            var scaleConversion = jobGraphMaxima[series]/datum;
+            // set new maximum and rescale the series
+            jobGraphMaxima[series] = datum;
+            rescaleJobGraphSeries( series, scaleConversion );
+        }
+    }
+
+    // scale
+    var scaledDatum = null;
+    if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null && jobGraphMaxima[series] !== 0 ) {
+        scaledDatum = datum/jobGraphMaxima[series]
+    } else {
+        scaledDatum = datum;
+    }
+    // identify x axis point, searching from the end of the array (most recent)
+    var found = false;
+    for( var i = jobGraphData.length - 1; i >= 0; i-- ) {
+        if( jobGraphData[i]['t'] === timestamp ) {
+            found = true;
+            jobGraphData[i][series] = scaledDatum;
+            jobGraphData[i]['raw-'+series] = rawDetailData;
+            break;
+        } else if( jobGraphData[i]['t'] < timestamp  ) {
+            // we've gone far enough back in time and this data is supposed to be sorted
+            break;
+        }
+    }
+    // index counter from previous loop will have gone one too far, so add one
+    var insertAt = i+1;
+    if(!found) {
+        // create a new x point for this previously unrecorded timestamp
+        var entry = { 't': timestamp };
+        entry[series] = scaledDatum;
+        entry['raw-'+series] = rawDetailData;
+        jobGraphData.splice( insertAt, 0, entry );
+        var shifted = [];
+        // now let's see about "scrolling" the graph, dropping entries that are too old (>10 minutes)
+        while( jobGraphData.length > 0
+                 && (Date.parse( jobGraphData[0]['t'] ) + 10*60000 < Date.parse( jobGraphData[jobGraphData.length-1]['t'] )) ) {
+            shifted.push(jobGraphData.shift());
+        }
+        if( shifted.length > 0 ) {
+            // from those that we dropped, were any of them maxima? if so we need to rescale
+            jobGraphSeries.forEach( function(series) {
+                // test that every shifted entry in this series was either not a number (in which case we don't care)
+                // or else approximately (to 2 decimal places) smaller than the scaled maximum (i.e. 1),
+                // because otherwise we just scrolled off something that was a maximum point
+                // and so we need to recalculate a new maximum point by looking at all remaining displayed points in the series
+                if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null
+                      && !shifted.every( function(e) { return( !$.isNumeric(e[series]) || e[series].toFixed(2) < 1.0 ) } ) ) {
+                    // check the remaining displayed points and find the new (scaled) maximum
+                    var seriesMax = null;
+                    jobGraphData.forEach( function(entry) {
+                        if( $.isNumeric(entry[series]) && (seriesMax === null || entry[series] > seriesMax)) {
+                            seriesMax = entry[series];
+                        }
+                    });
+                    if( seriesMax !== null && seriesMax !== 0 ) {
+                        // set new actual maximum using the new maximum as the conversion conversion and rescale the series
+                        jobGraphMaxima[series] *= seriesMax;
+                        var scaleConversion = 1/seriesMax;
+                        rescaleJobGraphSeries( series, scaleConversion );
+                    }
+                    else {
+                        // we no longer have any data points displaying for this series
+                        jobGraphMaxima[series] = null;
+                    }
+                }
+            });
+        }
+        // add a 10 minute old null data point to keep the chart honest if the oldest point is less than 9.9 minutes old
+        if( jobGraphData.length > 0 ) {
+            var earliestTimestamp = jobGraphData[0]['t'];
+            var mostRecentTimestamp = jobGraphData[jobGraphData.length-1]['t'];
+            if( (Date.parse( earliestTimestamp ) + 9.9*60000 > Date.parse( mostRecentTimestamp )) ) {
+                var tenMinutesBefore = (new Date(Date.parse( mostRecentTimestamp ) - 600*1000)).toISOString();
+                jobGraphData.unshift( { 't': tenMinutesBefore } );
+            }
+        }
+    }
+
+}
+
+function createJobGraph(elementName) {
+    delete jobGraph;
+    var emptyGraph = false;
+    if( jobGraphData.length === 0 ) {
+        // If there is no data we still want to show an empty graph,
+        // so add an empty datum and placeholder series to fool it
+        // into displaying itself.  Note that when finally a new
+        // series is added, the graph will be recreated anyway.
+        jobGraphData.push( {} );
+        jobGraphSeries.push( '' );
+        emptyGraph = true;
+    }
+    var graphteristics = {
+        element: elementName,
+        data: jobGraphData,
+        ymax: 1.0,
+        yLabelFormat: function () { return ''; },
+        xkey: 't',
+        ykeys: jobGraphSeries,
+        labels: jobGraphSeries,
+        resize: true,
+        hideHover: 'auto',
+        parseTime: true,
+        hoverCallback: function(index, options, content) {
+            var s = '';
+            for (var i=0; i < jobGraphSortedSeries.length; i++) {
+                var sortedIndex = jobGraphSortedSeries[i];
+                var series = options.ykeys[sortedIndex];
+                var datum = options.data[index][series];
+                var point = ''
+                point += "<div class='morris-hover-point' style='color: ";
+                point += options.lineColors[sortedIndex % options.lineColors.length];
+                point += "'>";
+                var labelMatch = options.labels[sortedIndex].match(/^T(\d+)-(.*)/);
+                point += 'Task ' + labelMatch[1] + ' ' + labelMatch[2];
+                point += ": ";
+                if ( datum !== undefined ) {
+                    if( isJobSeriesRescalable( series ) ) {
+                        datum *= jobGraphMaxima[series];
+                    }
+                    if( parseFloat(datum) !== 0 ) {
+                        if( /-cpu$/.test(series) ){
+                            datum = $.number(datum * 100, 1) + '%';
+                        } else if( datum < 10 ) {
+                            datum = $.number(datum, 2);
+                        } else {
+                            datum = $.number(datum);
+                        }
+                        if(options.data[index]['raw-'+series]) {
+                            datum += ' (' + options.data[index]['raw-'+series] + ')';
+                        }
+                    }
+                    point += datum;
+                } else {
+                    continue;
+                }
+                point += "</div> ";
+                s += point;
+            }
+            if (s === '') {
+                // No Y coordinates? This isn't a real data point,
+                // it's just the placeholder we use to make sure the
+                // graph can render when empty. Don't show a tooltip.
+                return '';
+            }
+            return ("<div class='morris-hover-row-label'>" +
+                    options.data[index][options.xkey] +
+                    "</div> " + s);
+        }
+    }
+    if( emptyGraph ) {
+        graphteristics['axes'] = false;
+        graphteristics['parseTime'] = false;
+        graphteristics['hideHover'] = 'always';
+    }
+    $('#' + elementName).html('');
+    window.jobGraph = Morris.Line( graphteristics );
+    if( emptyGraph ) {
+        jobGraphData = [];
+        jobGraphSeries = [];
+    }
+}
+
+function rescaleJobGraphSeries( series, scaleConversion ) {
+    if( isJobSeriesRescalable() ) {
+        $.each( jobGraphData, function( i, entry ) {
+            if( entry[series] !== null && entry[series] !== undefined ) {
+                entry[series] *= scaleConversion;
+            }
+        });
+    }
+}
+
+// that's right - we never do this for the 'cpu' series, which will always be between 0 and 1 anyway
+function isJobSeriesRescalable( series ) {
+    return !/-cpu$/.test(series);
+}
+
+function processLogEventForGraph(event, eventData) {
+    if( eventData.properties.text ) {
+        eventData.properties.text.split('\n').forEach( function( logLine ) {
+            processLogLineForChart( logLine );
+        } );
+    }
+}
+
+$(document).on('arv-log-event', '#log_graph_div', function(event, eventData) {
+    processLogEventForGraph(event, eventData);
+    if (!window.jobGraphShown) {
+        // Draw immediately, instead of waiting for the 5-second
+        // timer.
+        redrawIfNeeded.call(window, this);
+    }
+});
+
+function redrawIfNeeded(graph_div) {
+    if (!window.redraw) {
+        return;
+    }
+    window.redraw = false;
+
+    if (window.recreate) {
+        // Series have changed: we need to draw an entirely new graph.
+        // Running createJobGraph in a show() callback ensures the div
+        // is fully shown when morris uses it to size its svg element.
+        $(graph_div).show(0, createJobGraph.bind(window, $(graph_div).attr('id')));
+        window.jobGraphShown = true;
+        window.recreate = false;
+    } else {
+        window.jobGraph.setData(window.jobGraphData);
+    }
+}
+
+$(document).on('ready ajax:complete', function() {
+    $('#log_graph_div').not('.graph-is-setup').addClass('graph-is-setup').each( function( index, graph_div ) {
+        window.jobGraphShown = false;
+        window.jobGraphData = [];
+        window.jobGraphSeries = [];
+        window.jobGraphSortedSeries = [];
+        window.jobGraphMaxima = {};
+        window.recreate = false;
+        window.redraw = false;
+
+        $.get('/jobs/' + $(graph_div).data('object-uuid') + '/logs.json', function(data) {
+            data.forEach( function( entry ) {
+                processLogEventForGraph({}, entry);
+            });
+            // Update the graph now to show the recent data points
+            // received via /logs.json (along with any new data points
+            // we received via websockets while waiting for /logs.json
+            // to respond).
+            redrawIfNeeded(graph_div);
+        });
+
+        setInterval(redrawIfNeeded.bind(window, graph_div), 5000);
+    });
+});
diff --git a/apps/workbench/app/assets/javascripts/keep_disks.js.coffee b/apps/workbench/app/assets/javascripts/keep_disks.js.coffee
new file mode 100644 (file)
index 0000000..d33312d
--- /dev/null
@@ -0,0 +1,32 @@
+### Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 ###
+
+cache_age_in_days = (milliseconds_age) ->
+  ONE_DAY = 1000 * 60 * 60 * 24
+  milliseconds_age / ONE_DAY
+
+cache_age_hover = (milliseconds_age) ->
+  'Cache age ' + cache_age_in_days(milliseconds_age).toFixed(1) + ' days.'
+
+cache_age_axis_label = (milliseconds_age) ->
+  cache_age_in_days(milliseconds_age).toFixed(0) + ' days'
+
+float_as_percentage = (proportion) ->
+  (proportion.toFixed(4) * 100) + '%'
+
+$.renderHistogram = (histogram_data) ->
+  Morris.Area({
+    element: 'cache-age-vs-disk-histogram',
+    pointSize: 0,
+    lineWidth: 0,
+    data: histogram_data,
+    xkey: 'age',
+    ykeys: ['persisted', 'cache'],
+    labels: ['Persisted Storage Disk Utilization', 'Cached Storage Disk Utilization'],
+    ymax: 1,
+    ymin: 0,
+    xLabelFormat: cache_age_axis_label,
+    yLabelFormat: float_as_percentage,
+    dateFormat: cache_age_hover
+  })
diff --git a/apps/workbench/app/assets/javascripts/link_to_remote.js b/apps/workbench/app/assets/javascripts/link_to_remote.js
new file mode 100644 (file)
index 0000000..8610ac6
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$.rails.href = function(element) {
+    if (element.is('a')) {
+        // data-remote=true links must put their remote targets in
+        // data-remote-href="..." instead of href="...".  This helps
+        // us avoid accidentally using the same href="..." in both the
+        // remote (Rails UJS) and non-remote (native browser) handlers
+        // -- which differ greatly in how they use that value -- and
+        // forgetting to test any non-remote cases like "open in new
+        // tab". If you really want copy-link-address/open-in-new-tab
+        // to work on a data-remote=true link, supply the
+        // copy-and-pastable URI in href in addition to the AJAX URI
+        // in data-remote-href.
+        //
+        // (Currently, the only places we make any remote links are
+        // link_to() in ApplicationHelper, which renames href="..." to
+        // data-remote-href="...", and select_modal, which builds a
+        // data-remote=true link on the client side.)
+        return element.data('remote-href');
+    } else {
+        // Normal rails-ujs behavior.
+        return element.attr('href');
+    }
+}
diff --git a/apps/workbench/app/assets/javascripts/list.js b/apps/workbench/app/assets/javascripts/list.js
new file mode 100644 (file)
index 0000000..d8ea7ba
--- /dev/null
@@ -0,0 +1,1474 @@
+;(function(){
+
+/**
+ * Require the given path.
+ *
+ * @param {String} path
+ * @return {Object} exports
+ * @api public
+ */
+
+function require(path, parent, orig) {
+  var resolved = require.resolve(path);
+
+  // lookup failed
+  if (null == resolved) {
+    orig = orig || path;
+    parent = parent || 'root';
+    var err = new Error('Failed to require "' + orig + '" from "' + parent + '"');
+    err.path = orig;
+    err.parent = parent;
+    err.require = true;
+    throw err;
+  }
+
+  var module = require.modules[resolved];
+
+  // perform real require()
+  // by invoking the module's
+  // registered function
+  if (!module._resolving && !module.exports) {
+    var mod = {};
+    mod.exports = {};
+    mod.client = mod.component = true;
+    module._resolving = true;
+    module.call(this, mod.exports, require.relative(resolved), mod);
+    delete module._resolving;
+    module.exports = mod.exports;
+  }
+
+  return module.exports;
+}
+
+/**
+ * Registered modules.
+ */
+
+require.modules = {};
+
+/**
+ * Registered aliases.
+ */
+
+require.aliases = {};
+
+/**
+ * Resolve `path`.
+ *
+ * Lookup:
+ *
+ *   - PATH/index.js
+ *   - PATH.js
+ *   - PATH
+ *
+ * @param {String} path
+ * @return {String} path or null
+ * @api private
+ */
+
+require.resolve = function(path) {
+  if (path.charAt(0) === '/') path = path.slice(1);
+
+  var paths = [
+    path,
+    path + '.js',
+    path + '.json',
+    path + '/index.js',
+    path + '/index.json'
+  ];
+
+  for (var i = 0; i < paths.length; i++) {
+    var path = paths[i];
+    if (require.modules.hasOwnProperty(path)) return path;
+    if (require.aliases.hasOwnProperty(path)) return require.aliases[path];
+  }
+};
+
+/**
+ * Normalize `path` relative to the current path.
+ *
+ * @param {String} curr
+ * @param {String} path
+ * @return {String}
+ * @api private
+ */
+
+require.normalize = function(curr, path) {
+  var segs = [];
+
+  if ('.' != path.charAt(0)) return path;
+
+  curr = curr.split('/');
+  path = path.split('/');
+
+  for (var i = 0; i < path.length; ++i) {
+    if ('..' == path[i]) {
+      curr.pop();
+    } else if ('.' != path[i] && '' != path[i]) {
+      segs.push(path[i]);
+    }
+  }
+
+  return curr.concat(segs).join('/');
+};
+
+/**
+ * Register module at `path` with callback `definition`.
+ *
+ * @param {String} path
+ * @param {Function} definition
+ * @api private
+ */
+
+require.register = function(path, definition) {
+  require.modules[path] = definition;
+};
+
+/**
+ * Alias a module definition.
+ *
+ * @param {String} from
+ * @param {String} to
+ * @api private
+ */
+
+require.alias = function(from, to) {
+  if (!require.modules.hasOwnProperty(from)) {
+    throw new Error('Failed to alias "' + from + '", it does not exist');
+  }
+  require.aliases[to] = from;
+};
+
+/**
+ * Return a require function relative to the `parent` path.
+ *
+ * @param {String} parent
+ * @return {Function}
+ * @api private
+ */
+
+require.relative = function(parent) {
+  var p = require.normalize(parent, '..');
+
+  /**
+   * lastIndexOf helper.
+   */
+
+  function lastIndexOf(arr, obj) {
+    var i = arr.length;
+    while (i--) {
+      if (arr[i] === obj) return i;
+    }
+    return -1;
+  }
+
+  /**
+   * The relative require() itself.
+   */
+
+  function localRequire(path) {
+    var resolved = localRequire.resolve(path);
+    return require(resolved, parent, path);
+  }
+
+  /**
+   * Resolve relative to the parent.
+   */
+
+  localRequire.resolve = function(path) {
+    var c = path.charAt(0);
+    if ('/' == c) return path.slice(1);
+    if ('.' == c) return require.normalize(p, path);
+
+    // resolve deps by returning
+    // the dep in the nearest "deps"
+    // directory
+    var segs = parent.split('/');
+    var i = lastIndexOf(segs, 'deps') + 1;
+    if (!i) i = 0;
+    path = segs.slice(0, i + 1).join('/') + '/deps/' + path;
+    return path;
+  };
+
+  /**
+   * Check if module is defined at `path`.
+   */
+
+  localRequire.exists = function(path) {
+    return require.modules.hasOwnProperty(localRequire.resolve(path));
+  };
+
+  return localRequire;
+};
+require.register("component-classes/index.js", function(exports, require, module){
+/**
+ * Module dependencies.
+ */
+
+var index = require('indexof');
+
+/**
+ * Whitespace regexp.
+ */
+
+var re = /\s+/;
+
+/**
+ * toString reference.
+ */
+
+var toString = Object.prototype.toString;
+
+/**
+ * Wrap `el` in a `ClassList`.
+ *
+ * @param {Element} el
+ * @return {ClassList}
+ * @api public
+ */
+
+module.exports = function(el){
+  return new ClassList(el);
+};
+
+/**
+ * Initialize a new ClassList for `el`.
+ *
+ * @param {Element} el
+ * @api private
+ */
+
+function ClassList(el) {
+  if (!el) throw new Error('A DOM element reference is required');
+  this.el = el;
+  this.list = el.classList;
+}
+
+/**
+ * Add class `name` if not already present.
+ *
+ * @param {String} name
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.add = function(name){
+  // classList
+  if (this.list) {
+    this.list.add(name);
+    return this;
+  }
+
+  // fallback
+  var arr = this.array();
+  var i = index(arr, name);
+  if (!~i) arr.push(name);
+  this.el.className = arr.join(' ');
+  return this;
+};
+
+/**
+ * Remove class `name` when present, or
+ * pass a regular expression to remove
+ * any which match.
+ *
+ * @param {String|RegExp} name
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.remove = function(name){
+  if ('[object RegExp]' == toString.call(name)) {
+    return this.removeMatching(name);
+  }
+
+  // classList
+  if (this.list) {
+    this.list.remove(name);
+    return this;
+  }
+
+  // fallback
+  var arr = this.array();
+  var i = index(arr, name);
+  if (~i) arr.splice(i, 1);
+  this.el.className = arr.join(' ');
+  return this;
+};
+
+/**
+ * Remove all classes matching `re`.
+ *
+ * @param {RegExp} re
+ * @return {ClassList}
+ * @api private
+ */
+
+ClassList.prototype.removeMatching = function(re){
+  var arr = this.array();
+  for (var i = 0; i < arr.length; i++) {
+    if (re.test(arr[i])) {
+      this.remove(arr[i]);
+    }
+  }
+  return this;
+};
+
+/**
+ * Toggle class `name`, can force state via `force`.
+ *
+ * For browsers that support classList, but do not support `force` yet,
+ * the mistake will be detected and corrected.
+ *
+ * @param {String} name
+ * @param {Boolean} force
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.toggle = function(name, force){
+  // classList
+  if (this.list) {
+    if ("undefined" !== typeof force) {
+      if (force !== this.list.toggle(name, force)) {
+        this.list.toggle(name); // toggle again to correct
+      }
+    } else {
+      this.list.toggle(name);
+    }
+    return this;
+  }
+
+  // fallback
+  if ("undefined" !== typeof force) {
+    if (!force) {
+      this.remove(name);
+    } else {
+      this.add(name);
+    }
+  } else {
+    if (this.has(name)) {
+      this.remove(name);
+    } else {
+      this.add(name);
+    }
+  }
+
+  return this;
+};
+
+/**
+ * Return an array of classes.
+ *
+ * @return {Array}
+ * @api public
+ */
+
+ClassList.prototype.array = function(){
+  var str = this.el.className.replace(/^\s+|\s+$/g, '');
+  var arr = str.split(re);
+  if ('' === arr[0]) arr.shift();
+  return arr;
+};
+
+/**
+ * Check if class `name` is present.
+ *
+ * @param {String} name
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.has =
+ClassList.prototype.contains = function(name){
+  return this.list
+    ? this.list.contains(name)
+    : !! ~index(this.array(), name);
+};
+
+});
+require.register("segmentio-extend/index.js", function(exports, require, module){
+
+module.exports = function extend (object) {
+    // Takes an unlimited number of extenders.
+    var args = Array.prototype.slice.call(arguments, 1);
+
+    // For each extender, copy their properties on our object.
+    for (var i = 0, source; source = args[i]; i++) {
+        if (!source) continue;
+        for (var property in source) {
+            object[property] = source[property];
+        }
+    }
+
+    return object;
+};
+});
+require.register("component-indexof/index.js", function(exports, require, module){
+module.exports = function(arr, obj){
+  if (arr.indexOf) return arr.indexOf(obj);
+  for (var i = 0; i < arr.length; ++i) {
+    if (arr[i] === obj) return i;
+  }
+  return -1;
+};
+});
+require.register("component-event/index.js", function(exports, require, module){
+var bind = window.addEventListener ? 'addEventListener' : 'attachEvent',
+    unbind = window.removeEventListener ? 'removeEventListener' : 'detachEvent',
+    prefix = bind !== 'addEventListener' ? 'on' : '';
+
+/**
+ * Bind `el` event `type` to `fn`.
+ *
+ * @param {Element} el
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @return {Function}
+ * @api public
+ */
+
+exports.bind = function(el, type, fn, capture){
+  el[bind](prefix + type, fn, capture || false);
+  return fn;
+};
+
+/**
+ * Unbind `el` event `type`'s callback `fn`.
+ *
+ * @param {Element} el
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @return {Function}
+ * @api public
+ */
+
+exports.unbind = function(el, type, fn, capture){
+  el[unbind](prefix + type, fn, capture || false);
+  return fn;
+};
+});
+require.register("timoxley-to-array/index.js", function(exports, require, module){
+/**
+ * Convert an array-like object into an `Array`.
+ * If `collection` is already an `Array`, then will return a clone of `collection`.
+ *
+ * @param {Array | Mixed} collection An `Array` or array-like object to convert e.g. `arguments` or `NodeList`
+ * @return {Array} Naive conversion of `collection` to a new `Array`.
+ * @api public
+ */
+
+module.exports = function toArray(collection) {
+  if (typeof collection === 'undefined') return []
+  if (collection === null) return [null]
+  if (collection === window) return [window]
+  if (typeof collection === 'string') return [collection]
+  if (isArray(collection)) return collection
+  if (typeof collection.length != 'number') return [collection]
+  if (typeof collection === 'function' && collection instanceof Function) return [collection]
+
+  var arr = []
+  for (var i = 0; i < collection.length; i++) {
+    if (Object.prototype.hasOwnProperty.call(collection, i) || i in collection) {
+      arr.push(collection[i])
+    }
+  }
+  if (!arr.length) return []
+  return arr
+}
+
+function isArray(arr) {
+  return Object.prototype.toString.call(arr) === "[object Array]";
+}
+
+});
+require.register("javve-events/index.js", function(exports, require, module){
+var events = require('event'),
+  toArray = require('to-array');
+
+/**
+ * Bind `el` event `type` to `fn`.
+ *
+ * @param {Element} el, NodeList, HTMLCollection or Array
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @api public
+ */
+
+exports.bind = function(el, type, fn, capture){
+  el = toArray(el);
+  for ( var i = 0; i < el.length; i++ ) {
+    events.bind(el[i], type, fn, capture);
+  }
+};
+
+/**
+ * Unbind `el` event `type`'s callback `fn`.
+ *
+ * @param {Element} el, NodeList, HTMLCollection or Array
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @api public
+ */
+
+exports.unbind = function(el, type, fn, capture){
+  el = toArray(el);
+  for ( var i = 0; i < el.length; i++ ) {
+    events.unbind(el[i], type, fn, capture);
+  }
+};
+
+});
+require.register("javve-get-by-class/index.js", function(exports, require, module){
+/**
+ * Find all elements with class `className` inside `container`.
+ * Use `single = true` to increase performance in older browsers
+ * when only one element is needed.
+ *
+ * @param {String} className
+ * @param {Element} container
+ * @param {Boolean} single
+ * @api public
+ */
+
+module.exports = (function() {
+  if (document.getElementsByClassName) {
+    return function(container, className, single) {
+      if (single) {
+        return container.getElementsByClassName(className)[0];
+      } else {
+        return container.getElementsByClassName(className);
+      }
+    };
+  } else if (document.querySelector) {
+    return function(container, className, single) {
+      className = '.' + className;
+      if (single) {
+        return container.querySelector(className);
+      } else {
+        return container.querySelectorAll(className);
+      }
+    };
+  } else {
+    return function(container, className, single) {
+      var classElements = [],
+        tag = '*';
+      if (container == null) {
+        container = document;
+      }
+      var els = container.getElementsByTagName(tag);
+      var elsLen = els.length;
+      var pattern = new RegExp("(^|\\s)"+className+"(\\s|$)");
+      for (var i = 0, j = 0; i < elsLen; i++) {
+        if ( pattern.test(els[i].className) ) {
+          if (single) {
+            return els[i];
+          } else {
+            classElements[j] = els[i];
+            j++;
+          }
+        }
+      }
+      return classElements;
+    };
+  }
+})();
+
+});
+require.register("javve-get-attribute/index.js", function(exports, require, module){
+/**
+ * Return the value for `attr` at `element`.
+ *
+ * @param {Element} el
+ * @param {String} attr
+ * @api public
+ */
+
+module.exports = function(el, attr) {
+  var result = (el.getAttribute && el.getAttribute(attr)) || null;
+  if( !result ) {
+    var attrs = el.attributes;
+    var length = attrs.length;
+    for(var i = 0; i < length; i++) {
+      if (attr[i] !== undefined) {
+        if(attr[i].nodeName === attr) {
+          result = attr[i].nodeValue;
+        }
+      }
+    }
+  }
+  return result;
+}
+});
+require.register("javve-natural-sort/index.js", function(exports, require, module){
+/*
+ * Natural Sort algorithm for Javascript - Version 0.7 - Released under MIT license
+ * Author: Jim Palmer (based on chunking idea from Dave Koelle)
+ */
+
+module.exports = function(a, b, options) {
+  var re = /(^-?[0-9]+(\.?[0-9]*)[df]?e?[0-9]?$|^0x[0-9a-f]+$|[0-9]+)/gi,
+    sre = /(^[ ]*|[ ]*$)/g,
+    dre = /(^([\w ]+,?[\w ]+)?[\w ]+,?[\w ]+\d+:\d+(:\d+)?[\w ]?|^\d{1,4}[\/\-]\d{1,4}[\/\-]\d{1,4}|^\w+, \w+ \d+, \d{4})/,
+    hre = /^0x[0-9a-f]+$/i,
+    ore = /^0/,
+    options = options || {},
+    i = function(s) { return options.insensitive && (''+s).toLowerCase() || ''+s },
+    // convert all to strings strip whitespace
+    x = i(a).replace(sre, '') || '',
+    y = i(b).replace(sre, '') || '',
+    // chunk/tokenize
+    xN = x.replace(re, '\0$1\0').replace(/\0$/,'').replace(/^\0/,'').split('\0'),
+    yN = y.replace(re, '\0$1\0').replace(/\0$/,'').replace(/^\0/,'').split('\0'),
+    // numeric, hex or date detection
+    xD = parseInt(x.match(hre)) || (xN.length != 1 && x.match(dre) && Date.parse(x)),
+    yD = parseInt(y.match(hre)) || xD && y.match(dre) && Date.parse(y) || null,
+    oFxNcL, oFyNcL,
+    mult = options.desc ? -1 : 1;
+  // first try and sort Hex codes or Dates
+  if (yD)
+    if ( xD < yD ) return -1 * mult;
+    else if ( xD > yD ) return 1 * mult;
+  // natural sorting through split numeric strings and default strings
+  for(var cLoc=0, numS=Math.max(xN.length, yN.length); cLoc < numS; cLoc++) {
+    // find floats not starting with '0', string or 0 if not defined (Clint Priest)
+    oFxNcL = !(xN[cLoc] || '').match(ore) && parseFloat(xN[cLoc]) || xN[cLoc] || 0;
+    oFyNcL = !(yN[cLoc] || '').match(ore) && parseFloat(yN[cLoc]) || yN[cLoc] || 0;
+    // handle numeric vs string comparison - number < string - (Kyle Adams)
+    if (isNaN(oFxNcL) !== isNaN(oFyNcL)) { return (isNaN(oFxNcL)) ? 1 : -1; }
+    // rely on string comparison if different types - i.e. '02' < 2 != '02' < '2'
+    else if (typeof oFxNcL !== typeof oFyNcL) {
+      oFxNcL += '';
+      oFyNcL += '';
+    }
+    if (oFxNcL < oFyNcL) return -1 * mult;
+    if (oFxNcL > oFyNcL) return 1 * mult;
+  }
+  return 0;
+};
+
+/*
+var defaultSort = getSortFunction();
+
+module.exports = function(a, b, options) {
+  if (arguments.length == 1) {
+    options = a;
+    return getSortFunction(options);
+  } else {
+    return defaultSort(a,b);
+  }
+}
+*/
+});
+require.register("javve-to-string/index.js", function(exports, require, module){
+module.exports = function(s) {
+    s = (s === undefined) ? "" : s;
+    s = (s === null) ? "" : s;
+    s = s.toString();
+    return s;
+};
+
+});
+require.register("component-type/index.js", function(exports, require, module){
+/**
+ * toString ref.
+ */
+
+var toString = Object.prototype.toString;
+
+/**
+ * Return the type of `val`.
+ *
+ * @param {Mixed} val
+ * @return {String}
+ * @api public
+ */
+
+module.exports = function(val){
+  switch (toString.call(val)) {
+    case '[object Date]': return 'date';
+    case '[object RegExp]': return 'regexp';
+    case '[object Arguments]': return 'arguments';
+    case '[object Array]': return 'array';
+    case '[object Error]': return 'error';
+  }
+
+  if (val === null) return 'null';
+  if (val === undefined) return 'undefined';
+  if (val !== val) return 'nan';
+  if (val && val.nodeType === 1) return 'element';
+
+  return typeof val.valueOf();
+};
+
+});
+require.register("list.js/index.js", function(exports, require, module){
+/*
+ListJS with beta 1.0.0
+By Jonny Strömberg (www.jonnystromberg.com, www.listjs.com)
+*/
+(function( window, undefined ) {
+"use strict";
+
+var document = window.document,
+    getByClass = require('get-by-class'),
+    extend = require('extend'),
+    indexOf = require('indexof');
+
+var List = function(id, options, values) {
+
+    var self = this,
+               init,
+        Item = require('./src/item')(self),
+        addAsync = require('./src/add-async')(self),
+        parse = require('./src/parse')(self);
+
+    init = {
+        start: function() {
+            self.listClass      = "list";
+            self.searchClass    = "search";
+            self.sortClass      = "sort";
+            self.page           = 200;
+            self.i              = 1;
+            self.items          = [];
+            self.visibleItems   = [];
+            self.matchingItems  = [];
+            self.searched       = false;
+            self.filtered       = false;
+            self.handlers       = { 'updated': [] };
+            self.plugins        = {};
+            self.helpers        = {
+                getByClass: getByClass,
+                extend: extend,
+                indexOf: indexOf
+            };
+
+            extend(self, options);
+
+            self.listContainer = (typeof(id) === 'string') ? document.getElementById(id) : id;
+            if (!self.listContainer) { return; }
+            self.list           = getByClass(self.listContainer, self.listClass, true);
+
+            self.templater      = require('./src/templater')(self);
+            self.search         = require('./src/search')(self);
+            self.filter         = require('./src/filter')(self);
+            self.sort           = require('./src/sort')(self);
+
+            this.items();
+            self.update();
+            this.plugins();
+        },
+        items: function() {
+            parse(self.list);
+            if (values !== undefined) {
+                self.add(values);
+            }
+        },
+        plugins: function() {
+            for (var i = 0; i < self.plugins.length; i++) {
+                var plugin = self.plugins[i];
+                self[plugin.name] = plugin;
+                plugin.init(self);
+            }
+        }
+    };
+
+
+    /*
+    * Add object to list
+    */
+    this.add = function(values, callback) {
+        if (callback) {
+            addAsync(values, callback);
+            return;
+        }
+        var added = [],
+            notCreate = false;
+        if (values[0] === undefined){
+            values = [values];
+        }
+        for (var i = 0, il = values.length; i < il; i++) {
+            var item = null;
+            if (values[i] instanceof Item) {
+                item = values[i];
+                item.reload();
+            } else {
+                notCreate = (self.items.length > self.page) ? true : false;
+                item = new Item(values[i], undefined, notCreate);
+            }
+            self.items.push(item);
+            added.push(item);
+        }
+        self.update();
+        return added;
+    };
+
+       this.show = function(i, page) {
+               this.i = i;
+               this.page = page;
+               self.update();
+        return self;
+       };
+
+    /* Removes object from list.
+    * Loops through the list and removes objects where
+    * property "valuename" === value
+    */
+    this.remove = function(valueName, value, options) {
+        var found = 0;
+        for (var i = 0, il = self.items.length; i < il; i++) {
+            if (self.items[i].values()[valueName] == value) {
+                self.templater.remove(self.items[i], options);
+                self.items.splice(i,1);
+                il--;
+                i--;
+                found++;
+            }
+        }
+        self.update();
+        return found;
+    };
+
+    /* Gets the objects in the list which
+    * property "valueName" === value
+    */
+    this.get = function(valueName, value) {
+        var matchedItems = [];
+        for (var i = 0, il = self.items.length; i < il; i++) {
+            var item = self.items[i];
+            if (item.values()[valueName] == value) {
+                matchedItems.push(item);
+            }
+        }
+        return matchedItems;
+    };
+
+    /*
+    * Get size of the list
+    */
+    this.size = function() {
+        return self.items.length;
+    };
+
+    /*
+    * Removes all items from the list
+    */
+    this.clear = function() {
+        self.templater.clear();
+        self.items = [];
+        return self;
+    };
+
+    this.on = function(event, callback) {
+        self.handlers[event].push(callback);
+        return self;
+    };
+
+    this.off = function(event, callback) {
+        var e = self.handlers[event];
+        var index = indexOf(e, callback);
+        if (index > -1) {
+            e.splice(index, 1);
+        }
+        return self;
+    };
+
+    this.trigger = function(event) {
+        var i = self.handlers[event].length;
+        while(i--) {
+            self.handlers[event][i](self);
+        }
+        return self;
+    };
+
+    this.reset = {
+        filter: function() {
+            var is = self.items,
+                il = is.length;
+            while (il--) {
+                is[il].filtered = false;
+            }
+            return self;
+        },
+        search: function() {
+            var is = self.items,
+                il = is.length;
+            while (il--) {
+                is[il].found = false;
+            }
+            return self;
+        }
+    };
+
+    this.update = function() {
+        var is = self.items,
+                       il = is.length;
+
+        self.visibleItems = [];
+        self.matchingItems = [];
+        self.templater.clear();
+        for (var i = 0; i < il; i++) {
+            if (is[i].matching() && ((self.matchingItems.length+1) >= self.i && self.visibleItems.length < self.page)) {
+                is[i].show();
+                self.visibleItems.push(is[i]);
+                self.matchingItems.push(is[i]);
+                       } else if (is[i].matching()) {
+                self.matchingItems.push(is[i]);
+                is[i].hide();
+                       } else {
+                is[i].hide();
+                       }
+        }
+        self.trigger('updated');
+        return self;
+    };
+
+    init.start();
+};
+
+module.exports = List;
+
+})(window);
+
+});
+require.register("list.js/src/search.js", function(exports, require, module){
+var events = require('events'),
+    getByClass = require('get-by-class'),
+    toString = require('to-string');
+
+module.exports = function(list) {
+    var item,
+        text,
+        columns,
+        searchString,
+        customSearch;
+
+    var prepare = {
+        resetList: function() {
+            list.i = 1;
+            list.templater.clear();
+            customSearch = undefined;
+        },
+        setOptions: function(args) {
+            if (args.length == 2 && args[1] instanceof Array) {
+                columns = args[1];
+            } else if (args.length == 2 && typeof(args[1]) == "function") {
+                customSearch = args[1];
+            } else if (args.length == 3) {
+                columns = args[1];
+                customSearch = args[2];
+            }
+        },
+        setColumns: function() {
+            columns = (columns === undefined) ? prepare.toArray(list.items[0].values()) : columns;
+        },
+        setSearchString: function(s) {
+            s = toString(s).toLowerCase();
+            s = s.replace(/[-[\]{}()*+?.,\\^$|#]/g, "\\$&"); // Escape regular expression characters
+            searchString = s;
+        },
+        toArray: function(values) {
+            var tmpColumn = [];
+            for (var name in values) {
+                tmpColumn.push(name);
+            }
+            return tmpColumn;
+        }
+    };
+    var search = {
+        list: function() {
+            for (var k = 0, kl = list.items.length; k < kl; k++) {
+                search.item(list.items[k]);
+            }
+        },
+        item: function(item) {
+            item.found = false;
+            for (var j = 0, jl = columns.length; j < jl; j++) {
+                if (search.values(item.values(), columns[j])) {
+                    item.found = true;
+                    return;
+                }
+            }
+        },
+        values: function(values, column) {
+            if (values.hasOwnProperty(column)) {
+                text = toString(values[column]).toLowerCase();
+                if ((searchString !== "") && (text.search(searchString) > -1)) {
+                    return true;
+                }
+            }
+            return false;
+        },
+        reset: function() {
+            list.reset.search();
+            list.searched = false;
+        }
+    };
+
+    var searchMethod = function(str) {
+        list.trigger('searchStart');
+
+        prepare.resetList();
+        prepare.setSearchString(str);
+        prepare.setOptions(arguments); // str, cols|searchFunction, searchFunction
+        prepare.setColumns();
+
+        if (searchString === "" ) {
+            search.reset();
+        } else {
+            list.searched = true;
+            if (customSearch) {
+                customSearch(searchString, columns);
+            } else {
+                search.list();
+            }
+        }
+
+        list.update();
+        list.trigger('searchComplete');
+        return list.visibleItems;
+    };
+
+    list.handlers.searchStart = list.handlers.searchStart || [];
+    list.handlers.searchComplete = list.handlers.searchComplete || [];
+
+    events.bind(getByClass(list.listContainer, list.searchClass), 'keyup', function(e) {
+        var target = e.target || e.srcElement, // IE have srcElement
+            alreadyCleared = (target.value === "" && !list.searched);
+        if (!alreadyCleared) { // If oninput already have resetted the list, do nothing
+            searchMethod(target.value);
+        }
+    });
+
+    // Used to detect click on HTML5 clear button
+    events.bind(getByClass(list.listContainer, list.searchClass), 'input', function(e) {
+        var target = e.target || e.srcElement;
+        if (target.value === "") {
+            searchMethod('');
+        }
+    });
+
+    list.helpers.toString = toString;
+    return searchMethod;
+};
+
+});
+require.register("list.js/src/sort.js", function(exports, require, module){
+var naturalSort = require('natural-sort'),
+    classes = require('classes'),
+    events = require('events'),
+    getByClass = require('get-by-class'),
+    getAttribute = require('get-attribute');
+
+module.exports = function(list) {
+    list.sortFunction = list.sortFunction || function(itemA, itemB, options) {
+        options.desc = options.order == "desc" ? true : false; // Natural sort uses this format
+        return naturalSort(itemA.values()[options.valueName], itemB.values()[options.valueName], options);
+    };
+
+    var buttons = {
+        els: undefined,
+        clear: function() {
+            for (var i = 0, il = buttons.els.length; i < il; i++) {
+                classes(buttons.els[i]).remove('asc');
+                classes(buttons.els[i]).remove('desc');
+            }
+        },
+        getOrder: function(btn) {
+            var predefinedOrder = getAttribute(btn, 'data-order');
+            if (predefinedOrder == "asc" || predefinedOrder == "desc") {
+                return predefinedOrder;
+            } else if (classes(btn).has('desc')) {
+                return "asc";
+            } else if (classes(btn).has('asc')) {
+                return "desc";
+            } else {
+                return "asc";
+            }
+        },
+        getInSensitive: function(btn, options) {
+            var insensitive = getAttribute(btn, 'data-insensitive');
+            if (insensitive === "true") {
+                options.insensitive = true;
+            } else {
+                options.insensitive = false;
+            }
+        },
+        setOrder: function(options) {
+            for (var i = 0, il = buttons.els.length; i < il; i++) {
+                var btn = buttons.els[i];
+                if (getAttribute(btn, 'data-sort') !== options.valueName) {
+                    continue;
+                }
+                var predefinedOrder = getAttribute(btn, 'data-order');
+                if (predefinedOrder == "asc" || predefinedOrder == "desc") {
+                    if (predefinedOrder == options.order) {
+                        classes(btn).add(options.order);
+                    }
+                } else {
+                    classes(btn).add(options.order);
+                }
+            }
+        }
+    };
+    var sort = function() {
+        list.trigger('sortStart');
+        options = {};
+
+        var target = arguments[0].currentTarget || arguments[0].srcElement || undefined;
+
+        if (target) {
+            options.valueName = getAttribute(target, 'data-sort');
+            buttons.getInSensitive(target, options);
+            options.order = buttons.getOrder(target);
+        } else {
+            options = arguments[1] || options;
+            options.valueName = arguments[0];
+            options.order = options.order || "asc";
+            options.insensitive = (typeof options.insensitive == "undefined") ? true : options.insensitive;
+        }
+        buttons.clear();
+        buttons.setOrder(options);
+
+        options.sortFunction = options.sortFunction || list.sortFunction;
+        list.items.sort(function(a, b) {
+            return options.sortFunction(a, b, options);
+        });
+        list.update();
+        list.trigger('sortComplete');
+    };
+
+    // Add handlers
+    list.handlers.sortStart = list.handlers.sortStart || [];
+    list.handlers.sortComplete = list.handlers.sortComplete || [];
+
+    buttons.els = getByClass(list.listContainer, list.sortClass);
+    events.bind(buttons.els, 'click', sort);
+    list.on('searchStart', buttons.clear);
+    list.on('filterStart', buttons.clear);
+
+    // Helpers
+    list.helpers.classes = classes;
+    list.helpers.naturalSort = naturalSort;
+    list.helpers.events = events;
+    list.helpers.getAttribute = getAttribute;
+
+    return sort;
+};
+
+});
+require.register("list.js/src/item.js", function(exports, require, module){
+module.exports = function(list) {
+    return function(initValues, element, notCreate) {
+        var item = this;
+
+        this._values = {};
+
+        this.found = false; // Show if list.searched == true and this.found == true
+        this.filtered = false;// Show if list.filtered == true and this.filtered == true
+
+        var init = function(initValues, element, notCreate) {
+            if (element === undefined) {
+                if (notCreate) {
+                    item.values(initValues, notCreate);
+                } else {
+                    item.values(initValues);
+                }
+            } else {
+                item.elm = element;
+                var values = list.templater.get(item, initValues);
+                item.values(values);
+            }
+        };
+        this.values = function(newValues, notCreate) {
+            if (newValues !== undefined) {
+                for(var name in newValues) {
+                    item._values[name] = newValues[name];
+                }
+                if (notCreate !== true) {
+                    list.templater.set(item, item.values());
+                }
+            } else {
+                return item._values;
+            }
+        };
+        this.show = function() {
+            list.templater.show(item);
+        };
+        this.hide = function() {
+            list.templater.hide(item);
+        };
+        this.matching = function() {
+            return (
+                (list.filtered && list.searched && item.found && item.filtered) ||
+                (list.filtered && !list.searched && item.filtered) ||
+                (!list.filtered && list.searched && item.found) ||
+                (!list.filtered && !list.searched)
+            );
+        };
+        this.visible = function() {
+            return (item.elm.parentNode == list.list) ? true : false;
+        };
+        init(initValues, element, notCreate);
+    };
+};
+
+});
+require.register("list.js/src/templater.js", function(exports, require, module){
+var getByClass = require('get-by-class');
+
+var Templater = function(list) {
+    var itemSource = getItemSource(list.item),
+        templater = this;
+
+    function getItemSource(item) {
+        if (item === undefined) {
+            var nodes = list.list.childNodes,
+                items = [];
+
+            for (var i = 0, il = nodes.length; i < il; i++) {
+                // Only textnodes have a data attribute
+                if (nodes[i].data === undefined) {
+                    return nodes[i];
+                }
+            }
+            return null;
+        } else if (item.indexOf("<") !== -1) { // Try create html element of list, do not work for tables!!
+            var div = document.createElement('div');
+            div.innerHTML = item;
+            return div.firstChild;
+        } else {
+            return document.getElementById(list.item);
+        }
+    }
+
+    /* Get values from element */
+    this.get = function(item, valueNames) {
+        templater.create(item);
+        var values = {};
+        for(var i = 0, il = valueNames.length; i < il; i++) {
+            var elm = getByClass(item.elm, valueNames[i], true);
+            values[valueNames[i]] = elm ? elm.innerHTML : "";
+        }
+        return values;
+    };
+
+    /* Sets values at element */
+    this.set = function(item, values) {
+        if (!templater.create(item)) {
+            for(var v in values) {
+                if (values.hasOwnProperty(v)) {
+                    // TODO speed up if possible
+                    var elm = getByClass(item.elm, v, true);
+                    if (elm) {
+                        /* src attribute for image tag & text for other tags */
+                        if (elm.tagName === "IMG" && values[v] !== "") {
+                            elm.src = values[v];
+                        } else {
+                            elm.innerHTML = values[v];
+                        }
+                    }
+                }
+            }
+        }
+    };
+
+    this.create = function(item) {
+        if (item.elm !== undefined) {
+            return false;
+        }
+        /* If item source does not exists, use the first item in list as
+        source for new items */
+        var newItem = itemSource.cloneNode(true);
+        newItem.removeAttribute('id');
+        item.elm = newItem;
+        templater.set(item, item.values());
+        return true;
+    };
+    this.remove = function(item) {
+        list.list.removeChild(item.elm);
+    };
+    this.show = function(item) {
+        templater.create(item);
+        list.list.appendChild(item.elm);
+    };
+    this.hide = function(item) {
+        if (item.elm !== undefined && item.elm.parentNode === list.list) {
+            list.list.removeChild(item.elm);
+        }
+    };
+    this.clear = function() {
+        /* .innerHTML = ''; fucks up IE */
+        if (list.list.hasChildNodes()) {
+            while (list.list.childNodes.length >= 1)
+            {
+                list.list.removeChild(list.list.firstChild);
+            }
+        }
+    };
+};
+
+module.exports = function(list) {
+    return new Templater(list);
+};
+
+});
+require.register("list.js/src/filter.js", function(exports, require, module){
+module.exports = function(list) {
+
+    // Add handlers
+    list.handlers.filterStart = list.handlers.filterStart || [];
+    list.handlers.filterComplete = list.handlers.filterComplete || [];
+
+    return function(filterFunction) {
+        list.trigger('filterStart');
+        list.i = 1; // Reset paging
+        list.reset.filter();
+        if (filterFunction === undefined) {
+            list.filtered = false;
+        } else {
+            list.filtered = true;
+            var is = list.items;
+            for (var i = 0, il = is.length; i < il; i++) {
+                var item = is[i];
+                if (filterFunction(item)) {
+                    item.filtered = true;
+                } else {
+                    item.filtered = false;
+                }
+            }
+        }
+        list.update();
+        list.trigger('filterComplete');
+        return list.visibleItems;
+    };
+};
+
+});
+require.register("list.js/src/add-async.js", function(exports, require, module){
+module.exports = function(list) {
+    return function(values, callback, items) {
+        var valuesToAdd = values.splice(0, 100);
+        items = items || [];
+        items = items.concat(list.add(valuesToAdd));
+        if (values.length > 0) {
+            setTimeout(function() {
+                addAsync(values, callback, items);
+            }, 10);
+        } else {
+            list.update();
+            callback(items);
+        }
+    };
+};
+});
+require.register("list.js/src/parse.js", function(exports, require, module){
+module.exports = function(list) {
+
+    var Item = require('./item')(list);
+
+    var getChildren = function(parent) {
+        var nodes = parent.childNodes,
+            items = [];
+        for (var i = 0, il = nodes.length; i < il; i++) {
+            // Only textnodes have a data attribute
+            if (nodes[i].data === undefined) {
+                items.push(nodes[i]);
+            }
+        }
+        return items;
+    };
+
+    var parse = function(itemElements, valueNames) {
+        for (var i = 0, il = itemElements.length; i < il; i++) {
+            list.items.push(new Item(valueNames, itemElements[i]));
+        }
+    };
+    var parseAsync = function(itemElements, valueNames) {
+        var itemsToIndex = itemElements.splice(0, 100); // TODO: If < 100 items, what happens in IE etc?
+        parse(itemsToIndex, valueNames);
+        if (itemElements.length > 0) {
+            setTimeout(function() {
+                init.items.indexAsync(itemElements, valueNames);
+            }, 10);
+        } else {
+            list.update();
+            // TODO: Add indexed callback
+        }
+    };
+
+    return function() {
+        var itemsToIndex = getChildren(list.list),
+            valueNames = list.valueNames;
+
+        if (list.indexAsync) {
+            parseAsync(itemsToIndex, valueNames);
+        } else {
+            parse(itemsToIndex, valueNames);
+        }
+    };
+};
+
+});
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+require.alias("component-classes/index.js", "list.js/deps/classes/index.js");
+require.alias("component-classes/index.js", "classes/index.js");
+require.alias("component-indexof/index.js", "component-classes/deps/indexof/index.js");
+
+require.alias("segmentio-extend/index.js", "list.js/deps/extend/index.js");
+require.alias("segmentio-extend/index.js", "extend/index.js");
+
+require.alias("component-indexof/index.js", "list.js/deps/indexof/index.js");
+require.alias("component-indexof/index.js", "indexof/index.js");
+
+require.alias("javve-events/index.js", "list.js/deps/events/index.js");
+require.alias("javve-events/index.js", "events/index.js");
+require.alias("component-event/index.js", "javve-events/deps/event/index.js");
+
+require.alias("timoxley-to-array/index.js", "javve-events/deps/to-array/index.js");
+
+require.alias("javve-get-by-class/index.js", "list.js/deps/get-by-class/index.js");
+require.alias("javve-get-by-class/index.js", "get-by-class/index.js");
+
+require.alias("javve-get-attribute/index.js", "list.js/deps/get-attribute/index.js");
+require.alias("javve-get-attribute/index.js", "get-attribute/index.js");
+
+require.alias("javve-natural-sort/index.js", "list.js/deps/natural-sort/index.js");
+require.alias("javve-natural-sort/index.js", "natural-sort/index.js");
+
+require.alias("javve-to-string/index.js", "list.js/deps/to-string/index.js");
+require.alias("javve-to-string/index.js", "list.js/deps/to-string/index.js");
+require.alias("javve-to-string/index.js", "to-string/index.js");
+require.alias("javve-to-string/index.js", "javve-to-string/index.js");
+require.alias("component-type/index.js", "list.js/deps/type/index.js");
+require.alias("component-type/index.js", "type/index.js");
+if (typeof exports == "object") {
+  module.exports = require("list.js");
+} else if (typeof define == "function" && define.amd) {
+  define(function(){ return require("list.js"); });
+} else {
+  this["List"] = require("list.js");
+}})();
\ No newline at end of file
diff --git a/apps/workbench/app/assets/javascripts/log_viewer.js b/apps/workbench/app/assets/javascripts/log_viewer.js
new file mode 100644 (file)
index 0000000..b201ed7
--- /dev/null
@@ -0,0 +1,286 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+function newTaskState() {
+    return {"complete_count": 0,
+            "failure_count": 0,
+            "task_count": 0,
+            "incomplete_count": 0,
+            "nodes": []};
+}
+
+function addToLogViewer(logViewer, lines, taskState) {
+    var re = /((\d\d\d\d)-(\d\d)-(\d\d))_((\d\d):(\d\d):(\d\d)) ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}) (\d+) (\d+)? (.*)/;
+
+    var items = [];
+    var count = logViewer.items.length;
+    for (var a in lines) {
+        var v = lines[a].match(re);
+        if (v != null) {
+
+            var ts = new Date(Date.UTC(v[2], v[3]-1, v[4], v[6], v[7], v[8]));
+
+            v11 = v[11];
+            if (typeof v[11] === 'undefined') {
+                v11 = "";
+            } else {
+                v11 = Number(v11);
+            }
+
+            var message = v[12];
+            var type = "";
+            var node = "";
+            var slot = "";
+            if (v11 !== "") {
+                if (!taskState.hasOwnProperty(v11)) {
+                    taskState[v11] = {};
+                    taskState.task_count += 1;
+                }
+
+                if (/^stderr /.test(message)) {
+                    message = message.substr(7);
+                    if (/^crunchstat: /.test(message)) {
+                        type = "crunchstat";
+                        message = message.substr(12);
+                    } else if (/^srun: /.test(message) || /^slurmd/.test(message)) {
+                        type = "task-dispatch";
+                    } else {
+                        type = "task-print";
+                    }
+                } else {
+                    var m;
+                    if (m = /^success in (\d+) second/.exec(message)) {
+                        taskState[v11].outcome = "success";
+                        taskState[v11].runtime = Number(m[1]);
+                        taskState.complete_count += 1;
+                    }
+                    else if (m = /^failure \(\#\d+, (temporary|permanent)\) after (\d+) second/.exec(message)) {
+                        taskState[v11].outcome = "failure";
+                        taskState[v11].runtime = Number(m[2]);
+                        taskState.failure_count += 1;
+                        if (m[1] == "permanent") {
+                            taskState.incomplete_count += 1;
+                        }
+                    }
+                    else if (m = /^child \d+ started on ([^.]*)\.(\d+)/.exec(message)) {
+                        taskState[v11].node = m[1];
+                        taskState[v11].slot = m[2];
+                        if (taskState.nodes.indexOf(m[1], 0) == -1) {
+                            taskState.nodes.push(m[1]);
+                        }
+                        for (var i in items) {
+                            if (i > 0) {
+                                if (items[i].taskid === v11) {
+                                    items[i].node = m[1];
+                                    items[i].slot = m[2];
+                                }
+                            }
+                        }
+                    }
+                    type = "task-dispatch";
+                }
+                node = taskState[v11].node;
+                slot = taskState[v11].slot;
+            } else {
+                type = "crunch";
+            }
+
+            items.push({
+                id: count,
+                ts: ts,
+                timestamp: ts.toLocaleDateString() + " " + ts.toLocaleTimeString(),
+                taskid: v11,
+                node: node,
+                slot: slot,
+                message: message.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;'),
+                type: type
+            });
+            count += 1;
+        } else {
+            console.log("Did not parse line " + a + ": " + lines[a]);
+        }
+    }
+    logViewer.add(items);
+}
+
+function sortById(a, b, opt) {
+    a = a.values();
+    b = b.values();
+
+    if (a["id"] > b["id"]) {
+        return 1;
+    }
+    if (a["id"] < b["id"]) {
+        return -1;
+    }
+    return 0;
+}
+
+function sortByTask(a, b, opt) {
+    var aa = a.values();
+    var bb = b.values();
+
+    if (aa["taskid"] === "" && bb["taskid"] !== "") {
+        return -1;
+    }
+    if (aa["taskid"] !== "" && bb["taskid"] === "") {
+        return 1;
+    }
+
+    if (aa["taskid"] !== "" && bb["taskid"] !== "") {
+        if (aa["taskid"] > bb["taskid"]) {
+            return 1;
+        }
+        if (aa["taskid"] < bb["taskid"]) {
+            return -1;
+        }
+    }
+
+    return sortById(a, b, opt);
+}
+
+function sortByNode(a, b, opt) {
+    var aa = a.values();
+    var bb = b.values();
+
+    if (aa["node"] === "" && bb["node"] !== "") {
+        return -1;
+    }
+    if (aa["node"] !== "" && bb["node"] === "") {
+        return 1;
+    }
+
+    if (aa["node"] !== "" && bb["node"] !== "") {
+        if (aa["node"] > bb["node"]) {
+            return 1;
+        }
+        if (aa["node"] < bb["node"]) {
+            return -1;
+        }
+    }
+
+    if (aa["slot"] !== "" && bb["slot"] !== "") {
+        if (aa["slot"] > bb["slot"]) {
+            return 1;
+        }
+        if (aa["slot"] < bb["slot"]) {
+            return -1;
+        }
+    }
+
+    return sortById(a, b, opt);
+}
+
+
+function dumbPluralize(n, s, p) {
+    if (typeof p === 'undefined') {
+        p = "s";
+    }
+    if (n == 0 || n > 1) {
+        return n + " " + (s + p);
+    } else {
+        return n + " " + s;
+    }
+}
+
+function generateJobOverview(id, logViewer, taskState) {
+    var html = "";
+
+    if (logViewer.items.length > 2) {
+        var first = logViewer.items[1];
+        var last = logViewer.items[logViewer.items.length-1];
+        var duration = (last.values().ts.getTime() - first.values().ts.getTime()) / 1000;
+
+        var hours = 0;
+        var minutes = 0;
+        var seconds;
+
+        if (duration >= 3600) {
+            hours = Math.floor(duration / 3600);
+            duration -= (hours * 3600);
+        }
+        if (duration >= 60) {
+            minutes = Math.floor(duration / 60);
+            duration -= (minutes * 60);
+        }
+        seconds = duration;
+
+        var tcount = taskState.task_count;
+
+        html += "<p>";
+        html += "Started at " + first.values().timestamp + ".  ";
+        html += "Ran " + dumbPluralize(tcount, " task") + " over ";
+        if (hours > 0) {
+            html += dumbPluralize(hours, " hour");
+        }
+        if (minutes > 0) {
+            html += " " + dumbPluralize(minutes, " minute");
+        }
+        if (seconds > 0) {
+            html += " " + dumbPluralize(seconds, " second");
+        }
+
+        html += " using " + dumbPluralize(taskState.nodes.length, " node");
+
+        html += ".  " + dumbPluralize(taskState.complete_count, "task") + " completed";
+        html += ",  " + dumbPluralize(taskState.incomplete_count, "task") +  " incomplete";
+        html += " (" + dumbPluralize(taskState.failure_count, " failure") + ")";
+
+        html += ".  Finished at " + last.values().timestamp + ".";
+        html += "</p>";
+    } else {
+       html = "<p>Job log is empty or failed to load.</p>";
+    }
+
+    $(id).html(html);
+}
+
+function gotoPage(n, logViewer, page, id) {
+    if (n < 0) { return; }
+    if (n*page > logViewer.matchingItems.length) { return; }
+    logViewer.page_offset = n;
+    logViewer.show(n*page, page);
+}
+
+function updatePaging(id, logViewer, page) {
+    var p = "";
+    var i = logViewer.matchingItems.length;
+    var n;
+    for (n = 0; (n*page) < i; n += 1) {
+        if (n == logViewer.page_offset) {
+            p += "<span class='log-viewer-page-num'>" + (n+1) + "</span> ";
+        } else {
+            p += "<a href=\"#\" class='log-viewer-page-num log-viewer-page-" + n + "'>" + (n+1) + "</a> ";
+        }
+    }
+    $(id).html(p);
+    for (n = 0; (n*page) < i; n += 1) {
+        (function(n) {
+            $(".log-viewer-page-" + n).on("click", function() {
+                gotoPage(n, logViewer, page, id);
+                return false;
+            });
+        })(n);
+    }
+
+    if (logViewer.page_offset == 0) {
+        $(".log-viewer-page-up").addClass("text-muted");
+    } else {
+        $(".log-viewer-page-up").removeClass("text-muted");
+    }
+
+    if (logViewer.page_offset == (n-1)) {
+        $(".log-viewer-page-down").addClass("text-muted");
+    } else {
+        $(".log-viewer-page-down").removeClass("text-muted");
+    }
+}
+
+function nextPage(logViewer, page, id) {
+    gotoPage(logViewer.page_offset+1, logViewer, page, id);
+}
+
+function prevPage(logViewer, page, id) {
+    gotoPage(logViewer.page_offset-1, logViewer, page, id);
+}
diff --git a/apps/workbench/app/assets/javascripts/mithril_mount.js b/apps/workbench/app/assets/javascripts/mithril_mount.js
new file mode 100644 (file)
index 0000000..7995ffe
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('ready arv:pane:loaded', function() {
+    $('[data-mount-mithril]').each(function() {
+        var data = $(this).data()
+        m.mount(this, {view: function () {return m(window[data.mountMithril], data)}})
+    })
+})
diff --git a/apps/workbench/app/assets/javascripts/modal_pager.js b/apps/workbench/app/assets/javascripts/modal_pager.js
new file mode 100644 (file)
index 0000000..ffa45ee
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Usage:
+//
+// 1. Add some buttons to your modal, one with class="pager-next" and
+// one with class="pager-prev".
+//
+// 2. Put multiple .modal-body sections in your modal.
+//
+// 3. Add a "pager-count" div where page count is shown.
+// For ex: "1 of 10" when showing first page of 10 pages.
+
+$(document).on('click', '.modal .pager-next', function() {
+    var $modal = $(this).parents('.modal');
+    $modal.data('page', ($modal.data('page') || 0) + 1).trigger('pager:render');
+    return false;
+}).on('click', '.modal .pager-prev', function() {
+    var $modal = $(this).parents('.modal');
+    $modal.data('page', ($modal.data('page') || 1) - 1).trigger('pager:render');
+    return false;
+}).on('ready ajax:success', function() {
+    $('.modal').trigger('pager:render');
+}).on('pager:render', '.modal', function() {
+    var $modal = $(this);
+    var page = $modal.data('page') || 0;
+    var $panes = $('.modal-body', $modal);
+    if (page >= $panes.length) {
+        // Somehow moved past end
+        page = $panes.length - 1;
+        $modal.data('page', page);
+    } else if (page < 0) {
+        page = 0;
+    }
+
+    var $pager_count = $('.pager-count', $modal);
+    $pager_count.text((page+1) + " of " + $panes.length);
+
+    var selected = $panes.hide().eq(page).show();
+    enableButton($('.pager-prev', $modal), page > 0);
+    enableButton($('.pager-next', $modal), page < $panes.length - 1);
+    function enableButton(btn, ok) {
+        btn.prop('disabled', !ok).
+            toggleClass('btn-primary', ok).
+            toggleClass('btn-default', !ok);
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/models/loader.js b/apps/workbench/app/assets/javascripts/models/loader.js
new file mode 100644 (file)
index 0000000..0b29de6
--- /dev/null
@@ -0,0 +1,159 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// MultipageLoader retrieves a multi-page result set from the
+// server. The constructor initiates the first page load.
+//
+// config.loadFunc is a function that accepts an array of
+// paging-related filters, and returns a promise for the API
+// response. loadFunc() must retrieve results in "modified_at desc"
+// order.
+//
+// state is:
+// * 'loading' if a network request is in progress;
+// * 'done' if there are no more items to load;
+// * 'ready' otherwise.
+//
+// items is a stream that resolves to an array of all items retrieved so far.
+//
+// loadMore() loads the next page, if any.
+window.MultipageLoader = function(config) {
+    var loader = this
+    Object.assign(loader, config, {
+        state: 'ready',
+        DONE: 'done',
+        LOADING: 'loading',
+        READY: 'ready',
+
+        items: m.stream([]),
+        thresholdItem: null,
+        loadMore: function() {
+            if (loader.state == loader.DONE || loader.state == loader.LOADING)
+                return
+            var filters = loader.thresholdItem ? [
+                ["modified_at", "<=", loader.thresholdItem.modified_at],
+                ["uuid", "!=", loader.thresholdItem.uuid],
+            ] : []
+            loader.state = loader.LOADING
+            loader.loadFunc(filters).then(function(resp) {
+                var items = loader.items()
+                Array.prototype.push.apply(items, resp.items)
+                if (resp.items.length == 0) {
+                    loader.state = loader.DONE
+                } else {
+                    loader.thresholdItem = resp.items[resp.items.length-1]
+                    loader.state = loader.READY
+                }
+                loader.items(items)
+            }).catch(function(err) {
+                loader.err = err
+                loader.state = loader.READY
+            })
+        },
+    })
+    loader.loadMore()
+}
+
+// MergingLoader merges results from multiple loaders (given in the
+// config.children array) into a single result set.
+//
+// new MergingLoader({children: [loader, loader, ...]})
+//
+// The children must retrieve results in "modified_at desc" order.
+window.MergingLoader = function(config) {
+    var loader = this
+    Object.assign(loader, config, {
+        // Sorted items ready to display, merged from all children.
+        items: m.stream([]),
+        state: 'ready',
+        DONE: 'done',
+        LOADING: 'loading',
+        READY: 'ready',
+        loadable: function() {
+            // Return an array of children that we could call
+            // loadMore() on. Update loader.state.
+            loader.state = loader.DONE
+            return loader.children.filter(function(child) {
+                if (child.state == child.DONE)
+                    return false
+                if (child.state == child.LOADING) {
+                    loader.state = loader.LOADING
+                    return false
+                }
+                if (loader.state == loader.DONE)
+                    loader.state = loader.READY
+                return true
+            })
+        },
+        loadMore: function() {
+            // Call loadMore() on children that have reached
+            // lowWaterMark.
+            loader.loadable().map(function(child) {
+                if (child.items().length - child.itemsDisplayed < loader.lowWaterMark) {
+                    loader.state = loader.LOADING
+                    child.loadMore()
+                }
+            })
+        },
+        mergeItems: function() {
+            // We want to avoid moving items around on the screen once
+            // they're displayed.
+            //
+            // To this end, here we find the last safely displayable
+            // item ("cutoff") by getting the last item from each
+            // unfinished child, and taking the topmost (most recent)
+            // one of those.
+            //
+            // (If we were to display an item below that cutoff, the
+            // next page of results from an unfinished child could
+            // include items that get inserted above the cutoff,
+            // causing the cutoff item to move down.)
+            var cutoff
+            var cutoffUnknown = false
+            loader.children.forEach(function(child) {
+                if (child.state == child.DONE)
+                    return
+                var items = child.items()
+                if (items.length == 0) {
+                    // No idea what's coming in the next page.
+                    cutoffUnknown = true
+                    return
+                }
+                var last = items[items.length-1].modified_at
+                if (!cutoff || cutoff < last)
+                    cutoff = last
+            })
+            if (cutoffUnknown)
+                return
+            var combined = []
+            loader.children.forEach(function(child) {
+                child.itemsDisplayed = 0
+                child.items().every(function(item) {
+                    if (cutoff && item.modified_at < cutoff)
+                        // Don't display this item or anything after
+                        // it (see "cutoff" comment above).
+                        return false
+                    combined.push(item)
+                    child.itemsDisplayed++
+                    return true // continue
+                })
+            })
+            loader.items(combined.sort(function(a, b) {
+                return a.modified_at < b.modified_at ? 1 : -1
+            }))
+        },
+        // Number of undisplayed items to keep on hand for each result
+        // set. When hitting "load more", if a result set already has
+        // this many additional results available, we don't bother
+        // fetching a new page. This is the _minimum_ number of rows
+        // that will be added to loader.items in each "load more"
+        // event (except for the case where all items are displayed).
+        lowWaterMark: 23,
+    })
+    var childrenReady = m.stream.merge(loader.children.map(function(child) {
+        return child.items
+    }))
+    childrenReady.map(loader.loadable)
+    childrenReady.map(loader.mergeItems)
+}
diff --git a/apps/workbench/app/assets/javascripts/models/session_db.js b/apps/workbench/app/assets/javascripts/models/session_db.js
new file mode 100644 (file)
index 0000000..fd1cdfe
--- /dev/null
@@ -0,0 +1,357 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.SessionDB = function() {
+    var db = this;
+    Object.assign(db, {
+        discoveryCache: {},
+        tokenUUIDCache: null,
+        loadFromLocalStorage: function() {
+            try {
+                return JSON.parse(window.localStorage.getItem('sessions')) || {};
+            } catch(e) {}
+            return {};
+        },
+        loadAll: function() {
+            var all = db.loadFromLocalStorage();
+            if (window.defaultSession) {
+                window.defaultSession.isFromRails = true;
+                all[window.defaultSession.user.uuid.slice(0, 5)] = window.defaultSession;
+            }
+            return all;
+        },
+        loadActive: function() {
+            var sessions = db.loadAll();
+            Object.keys(sessions).forEach(function(key) {
+                if (!sessions[key].token || (sessions[key].user && !sessions[key].user.is_active)) {
+                    delete sessions[key];
+                }
+            });
+            return sessions;
+        },
+        loadLocal: function() {
+            var sessions = db.loadActive();
+            var s = false;
+            Object.keys(sessions).forEach(function(key) {
+                if (sessions[key].isFromRails) {
+                    s = sessions[key];
+                    return;
+                }
+            });
+            return s;
+        },
+        save: function(k, v) {
+            var sessions = db.loadAll();
+            sessions[k] = v;
+            Object.keys(sessions).forEach(function(key) {
+                if (sessions[key].isFromRails) {
+                    delete sessions[key];
+                }
+            });
+            window.localStorage.setItem('sessions', JSON.stringify(sessions));
+        },
+        trash: function(k) {
+            var sessions = db.loadAll();
+            delete sessions[k];
+            window.localStorage.setItem('sessions', JSON.stringify(sessions));
+        },
+        findAPI: function(url) {
+            // Given a Workbench or API host or URL, return a promise
+            // for the corresponding API server's base URL.  Typical
+            // use:
+            // sessionDB.findAPI('https://workbench.example/foo').then(sessionDB.login)
+            if (url.length === 5 && url.indexOf('.') < 0) {
+                url += '.arvadosapi.com';
+            }
+            if (url.indexOf('://') < 0) {
+                url = 'https://' + url;
+            }
+            url = new URL(url);
+            return m.request(url.origin + '/discovery/v1/apis/arvados/v1/rest').then(function() {
+                return url.origin + '/';
+            }).catch(function(err) {
+                // If url is a Workbench site (and isn't too old),
+                // /status.json will tell us its API host.
+                return m.request(url.origin + '/status.json').then(function(resp) {
+                    if (!resp.apiBaseURL) {
+                        throw 'no apiBaseURL in status response';
+                    }
+                    return resp.apiBaseURL;
+                });
+            });
+        },
+        login: function(baseURL, fallbackLogin) {
+            // Initiate login procedure with given API base URL (e.g.,
+            // "http://api.example/").
+            //
+            // Any page that has a button that invokes login() must
+            // also call checkForNewToken() on (at least) its first
+            // render. Otherwise, the login procedure can't be
+            // completed.
+            if (fallbackLogin === undefined) {
+                fallbackLogin = true;
+            }
+            var session = db.loadLocal();
+            var apiHostname = new URL(session.baseURL).hostname;
+            db.discoveryDoc(session).map(function(localDD) {
+                var uuidPrefix = localDD.uuidPrefix;
+                db.discoveryDoc({baseURL: baseURL}).map(function(dd) {
+                    if (uuidPrefix in dd.remoteHosts ||
+                        (dd.remoteHostsViaDNS && apiHostname.endsWith('.arvadosapi.com'))) {
+                        // Federated identity login via salted token
+                        db.saltedToken(dd.uuidPrefix).then(function(token) {
+                            m.request(baseURL+'arvados/v1/users/current', {
+                                headers: {
+                                    authorization: 'Bearer '+token
+                                }
+                            }).then(function(user) {
+                                // Federated login successful.
+                                var remoteSession = {
+                                    user: user,
+                                    baseURL: baseURL,
+                                    token: token,
+                                    listedHost: (dd.uuidPrefix in localDD.remoteHosts)
+                                };
+                                db.save(dd.uuidPrefix, remoteSession);
+                            }).catch(function(e) {
+                                if (dd.uuidPrefix in localDD.remoteHosts) {
+                                    // If the remote system is configured to allow federated
+                                    // logins from this cluster, but rejected the salted
+                                    // token, save as a logged out session anyways.
+                                    var remoteSession = {
+                                        baseURL: baseURL,
+                                        listedHost: true
+                                    };
+                                    db.save(dd.uuidPrefix, remoteSession);
+                                } else if (fallbackLogin) {
+                                    // Remote cluster not listed as a remote host and rejecting
+                                    // the salted token, try classic login.
+                                    db.loginClassic(baseURL);
+                                }
+                            });
+                        });
+                    } else if (fallbackLogin) {
+                        // Classic login will be used when the remote system doesn't list this
+                        // cluster as part of the federation.
+                        db.loginClassic(baseURL);
+                    }
+                });
+            });
+            return false;
+        },
+        loginClassic: function(baseURL) {
+            document.location = baseURL + 'login?return_to=' + encodeURIComponent(document.location.href.replace(/\?.*/, '')+'?baseURL='+encodeURIComponent(baseURL));
+        },
+        logout: function(k) {
+            // Forget the token, but leave the other info in the db so
+            // the user can log in again without providing the login
+            // host again.
+            var sessions = db.loadAll();
+            delete sessions[k].token;
+            db.save(k, sessions[k]);
+        },
+        saltedToken: function(uuid_prefix) {
+            // Takes a cluster UUID prefix and returns a salted token to allow
+            // log into said cluster using federated identity.
+            var session = db.loadLocal();
+            return db.tokenUUID().then(function(token_uuid) {
+                var shaObj = new jsSHA("SHA-1", "TEXT");
+                var secret = session.token;
+                if (session.token.startsWith("v2/")) {
+                    secret = session.token.split("/")[2];
+                }
+                shaObj.setHMACKey(secret, "TEXT");
+                shaObj.update(uuid_prefix);
+                var hmac = shaObj.getHMAC("HEX");
+                return 'v2/' + token_uuid + '/' + hmac;
+            });
+        },
+        checkForNewToken: function() {
+            // If there's a token and baseURL in the location bar (i.e.,
+            // we just landed here after a successful login), save it and
+            // scrub the location bar.
+            if (document.location.search[0] != '?') { return; }
+            var params = {};
+            document.location.search.slice(1).split('&').map(function(kv) {
+                var e = kv.indexOf('=');
+                if (e < 0) {
+                    return;
+                }
+                params[decodeURIComponent(kv.slice(0, e))] = decodeURIComponent(kv.slice(e+1));
+            });
+            if (!params.baseURL || !params.api_token) {
+                // Have a query string, but it's not a login callback.
+                return;
+            }
+            params.token = params.api_token;
+            delete params.api_token;
+            db.save(params.baseURL, params);
+            history.replaceState({}, '', document.location.origin + document.location.pathname);
+        },
+        fillMissingUUIDs: function() {
+            var sessions = db.loadAll();
+            Object.keys(sessions).map(function(key) {
+                if (key.indexOf('://') < 0) {
+                    return;
+                }
+                // key is the baseURL placeholder. We need to get our user
+                // record to find out the cluster's real uuid prefix.
+                var session = sessions[key];
+                m.request(session.baseURL+'arvados/v1/users/current', {
+                    headers: {
+                        authorization: 'OAuth2 '+session.token
+                    }
+                }).then(function(user) {
+                    session.user = user;
+                    db.save(user.owner_uuid.slice(0, 5), session);
+                    db.trash(key);
+                });
+            });
+        },
+        // Return the Workbench base URL advertised by the session's
+        // API server, or a reasonable guess, or (if neither strategy
+        // works out) null.
+        workbenchBaseURL: function(session) {
+            var dd = db.discoveryDoc(session)();
+            if (!dd) {
+                // Don't fall back to guessing until we receive the discovery doc
+                return null;
+            }
+            if (dd.workbenchUrl) {
+                return dd.workbenchUrl;
+            }
+            // Guess workbench.{apihostport} is a Workbench... unless
+            // the host part of apihostport is an IPv4 or [IPv6]
+            // address.
+            if (!session.baseURL.match('://(\\[|\\d+\\.\\d+\\.\\d+\\.\\d+[:/])')) {
+                var wbUrl = session.baseURL.replace('://', '://workbench.');
+                // Remove the trailing slash, if it's there.
+                return wbUrl.slice(-1) === '/' ? wbUrl.slice(0, -1) : wbUrl;
+            }
+            return null;
+        },
+        // Return a m.stream that will get fulfilled with the
+        // discovery doc from a session's API server.
+        discoveryDoc: function(session) {
+            var cache = db.discoveryCache[session.baseURL];
+            if (!cache && session) {
+                db.discoveryCache[session.baseURL] = cache = m.stream();
+                var baseURL = session.baseURL;
+                if (baseURL[baseURL.length - 1] !== '/') {
+                    baseURL += '/';
+                }
+                m.request(baseURL+'discovery/v1/apis/arvados/v1/rest')
+                    .then(function (dd) {
+                        // Just in case we're talking with an old API server.
+                        dd.remoteHosts = dd.remoteHosts || {};
+                        if (dd.remoteHostsViaDNS === undefined) {
+                            dd.remoteHostsViaDNS = false;
+                        }
+                        return dd;
+                    })
+                    .then(cache);
+            }
+            return cache;
+        },
+        // Return a promise with the local session token's UUID from the API server.
+        tokenUUID: function() {
+            var cache = db.tokenUUIDCache;
+            if (!cache) {
+                var session = db.loadLocal();
+                if (session.token.startsWith("v2/")) {
+                    var uuid = session.token.split("/")[1]
+                    db.tokenUUIDCache = uuid;
+                    return new Promise(function(resolve, reject) {
+                        resolve(uuid);
+                    });
+                }
+                return db.request(session, 'arvados/v1/api_client_authorizations', {
+                    data: {
+                        filters: JSON.stringify([['api_token', '=', session.token]])
+                    }
+                }).then(function(resp) {
+                    var uuid = resp.items[0].uuid;
+                    db.tokenUUIDCache = uuid;
+                    return uuid;
+                });
+            } else {
+                return new Promise(function(resolve, reject) {
+                    resolve(cache);
+                });
+            }
+        },
+        request: function(session, path, opts) {
+            opts = opts || {};
+            opts.headers = opts.headers || {};
+            opts.headers.authorization = 'OAuth2 '+ session.token;
+            return m.request(session.baseURL + path, opts);
+        },
+        // Check non-federated remote active sessions if they should be migrated to
+        // a salted token.
+        migrateNonFederatedSessions: function() {
+            var sessions = db.loadActive();
+            Object.keys(sessions).map(function(uuidPrefix) {
+                session = sessions[uuidPrefix];
+                if (!session.isFromRails && session.token) {
+                    db.saltedToken(uuidPrefix).then(function(saltedToken) {
+                        if (session.token != saltedToken) {
+                            // Only try the federated login
+                            db.login(session.baseURL, false);
+                        }
+                    });
+                }
+            });
+        },
+        // If remoteHosts is populated on the local API discovery doc, try to
+        // add any listed missing session.
+        autoLoadRemoteHosts: function() {
+            var sessions = db.loadAll();
+            var doc = db.discoveryDoc(db.loadLocal());
+            if (doc === undefined) { return; }
+            doc.map(function(d) {
+                Object.keys(d.remoteHosts).map(function(uuidPrefix) {
+                    if (!(sessions[uuidPrefix])) {
+                        db.findAPI(d.remoteHosts[uuidPrefix]).then(function(baseURL) {
+                            db.login(baseURL, false);
+                        });
+                    }
+                });
+            });
+        },
+        // If the current logged in account is from a remote federated cluster,
+        // redirect the user to their home cluster's workbench.
+        // This is meant to avoid confusion when the user clicks through a search
+        // result on the home cluster's multi site search page, landing on the
+        // remote workbench and later trying to do another search by just clicking
+        // on the multi site search button instead of going back with the browser.
+        autoRedirectToHomeCluster: function(path) {
+            path = path || '/';
+            var session = db.loadLocal();
+            var userUUIDPrefix = session.user.uuid.slice(0, 5);
+            // If the current user is local to the cluster, do nothing.
+            if (userUUIDPrefix === session.user.owner_uuid.slice(0, 5)) {
+                return;
+            }
+            db.discoveryDoc(session).map(function (d) {
+                // Guess the remote host from the local discovery doc settings
+                var rHost = null;
+                if (d.remoteHosts[userUUIDPrefix]) {
+                    rHost = d.remoteHosts[userUUIDPrefix];
+                } else if (d.remoteHostsViaDNS) {
+                    rHost = userUUIDPrefix + '.arvadosapi.com';
+                } else {
+                    // This should not happen: having remote user whose uuid prefix
+                    // isn't listed on remoteHosts and dns mechanism is deactivated
+                    return;
+                }
+                // Get the remote cluster workbench url & redirect there.
+                db.findAPI(rHost).then(function (apiUrl) {
+                    db.discoveryDoc({baseURL: apiUrl}).map(function (d) {
+                        document.location = d.workbenchUrl + path;
+                    });
+                });
+            });
+        }
+    });
+};
diff --git a/apps/workbench/app/assets/javascripts/permission_toggle.js b/apps/workbench/app/assets/javascripts/permission_toggle.js
new file mode 100644 (file)
index 0000000..007a25b
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).
+    on('click', '[data-toggle-permission] input[type=checkbox]', function() {
+        var data = {};
+        var keys = ['data-permission-uuid',
+                    'data-permission-name',
+                    'data-permission-head',
+                    'data-permission-tail'];
+        var attr;
+        for(var i in keys) {
+            attr = keys[i];
+            data[attr] = $(this).closest('[' + attr + ']').attr(attr);
+            if (data[attr] === undefined) {
+                console.log(["Error: no " + attr + " established here.", this]);
+                return;
+            }
+        }
+        var is_checked = $(this).prop('checked');
+
+        if (is_checked) {
+            $.ajax('/links',
+                   {dataType: 'json',
+                    type: 'POST',
+                    data: {'link[tail_uuid]': data['data-permission-tail'],
+                           'link[head_uuid]': data['data-permission-head'],
+                           'link[link_class]': 'permission',
+                           'link[name]': data['data-permission-name']},
+                    context: this}).
+                fail(function(jqxhr, status, error) {
+                    $(this).prop('checked', false);
+                }).
+                done(function(data, status, jqxhr) {
+                    $(this).attr('data-permission-uuid', data['uuid']);
+                }).
+                always(function() {
+                    $(this).prop('disabled', false);
+                });
+        }
+        else {
+            $.ajax('/links/' + data['data-permission-uuid'],
+                   {dataType: 'json',
+                    type: 'POST',
+                    data: {'_method': 'DELETE'},
+                    context: this}).
+                fail(function(jqxhr, status, error) {
+                    $(this).prop('checked', true);
+                }).
+                done(function(data, status, jqxhr) {
+                    $(this).attr('data-permission-uuid', 'x');
+                }).
+                always(function() {
+                    $(this).prop('disabled', false);
+                });
+        }
+        $(this).prop('disabled', true);
+    });
diff --git a/apps/workbench/app/assets/javascripts/pipeline_instances.js b/apps/workbench/app/assets/javascripts/pipeline_instances.js
new file mode 100644 (file)
index 0000000..7570b2f
--- /dev/null
@@ -0,0 +1,124 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+function run_pipeline_button_state() {
+    var a = $('a.editable.required.editable-empty,input.form-control.required[value=""]');
+    if ((a.length > 0) || ($('.unreadable-inputs-present').length)) {
+        $(".run-pipeline-button").addClass("disabled");
+    }
+    else {
+        $(".run-pipeline-button").removeClass("disabled");
+    }
+}
+
+$(document).on('editable:success', function(event, tag, response, newValue) {
+    var $tag = $(tag);
+    if ($('.run-pipeline-button').length == 0)
+        return;
+    if ($tag.hasClass("required")) {
+        if (newValue && newValue.trim() != "") {
+            $tag.removeClass("editable-empty");
+            $tag.parent().css("background-color", "");
+            $tag.parent().prev().css("background-color", "");
+        }
+        else {
+            $tag.addClass("editable-empty");
+            $tag.parent().css("background-color", "#ffdddd");
+            $tag.parent().prev().css("background-color", "#ffdddd");
+        }
+    }
+    if ($tag.attr('data-name')) {
+        // Update other inputs representing the same piece of data
+        $('.editable[data-name="' + $tag.attr('data-name') + '"]').
+            editable('setValue', newValue);
+    }
+    run_pipeline_button_state();
+});
+
+$(document).on('ready ajax:complete', function() {
+    $('a.editable.required').each(function() {
+        var $tag = $(this);
+        if ($tag.hasClass("editable-empty")) {
+            $tag.parent().css("background-color", "#ffdddd");
+            $tag.parent().prev().css("background-color", "#ffdddd");
+        }
+        else {
+            $tag.parent().css("background-color", "");
+            $tag.parent().prev().css("background-color", "");
+        }
+    });
+    $('input.required').each(function() {
+        var $tag = $(this);
+        if ($tag.hasClass("unreadable-input")) {
+            $tag.parent().parent().css("background-color", "#ffdddd");
+            $tag.parent().parent().prev().css("background-color", "#ffdddd");
+        }
+        else {
+            $tag.parent().parent().css("background-color", "");
+            $tag.parent().parent().prev().css("background-color", "");
+        }
+    });
+    run_pipeline_button_state();
+});
+
+$(document).on('arv-log-event', '.arv-refresh-on-state-change', function(event, eventData) {
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    if (eventData.event_type == "update" &&
+        eventData.properties.old_attributes.state != eventData.properties.new_attributes.state)
+    {
+        $(event.target).trigger('arv:pane:reload');
+    }
+});
+
+$(document).on('arv-log-event', '.arv-log-event-subscribe-to-pipeline-job-uuids', function(event, eventData){
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    if (!((eventData.object_kind == 'arvados#pipelineInstance') &&
+          (eventData.event_type == "create" ||
+           eventData.event_type == "update") &&
+         eventData.properties &&
+         eventData.properties.new_attributes &&
+         eventData.properties.new_attributes.components)) {
+        return;
+    }
+    var objs = "";
+    var components = eventData.properties.new_attributes.components;
+    for (a in components) {
+        if (components[a].job && components[a].job.uuid) {
+            objs += " " + components[a].job.uuid;
+        }
+    }
+    $(event.target).attr("data-object-uuids", eventData.object_uuid + objs);
+});
+
+$(document).on('ready ajax:success', function() {
+    $('.arv-log-refresh-control').each(function() {
+        var uuids = $(this).attr('data-object-uuids');
+        var $pane = $(this).closest('[data-pane-content-url]');
+        $pane.attr('data-object-uuids', uuids);
+    });
+});
+
+// Set up all events for the pipeline instances compare button.
+(function() {
+    var compare_form = '#compare';
+    var compare_inputs = '#comparedInstances :checkbox[name="uuids[]"]';
+    var update_button = function(event) {
+        var $form = $(compare_form);
+        var $checked_inputs = $(compare_inputs).filter(':checked');
+        $(':submit', $form).prop('disabled', (($checked_inputs.length < 2) ||
+                                              ($checked_inputs.length > 3)));
+        $('input[name="uuids[]"]', $form).remove();
+        $form.append($checked_inputs.clone()
+                     .removeAttr('id').attr('type', 'hidden'));
+    };
+    $(document)
+        .on('ready ajax:success', compare_form, update_button)
+        .on('change', compare_inputs, update_button);
+})();
diff --git a/apps/workbench/app/assets/javascripts/report_issue.js b/apps/workbench/app/assets/javascripts/report_issue.js
new file mode 100644 (file)
index 0000000..0285693
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).
+  on('click', "#report-issue-submit", function(e){
+    $(this).html('Sending');
+    $(this).prop('disabled', true);
+    var $cancelButton = $('#report-issue-cancel');
+    if ($cancelButton) {
+      $cancelButton.html('Close');
+    }
+    $('div').remove('.modal-footer-status');
+
+    $.ajax('/report_issue', {
+        type: 'POST',
+        data: $(this).parents('form').serialize()
+    }).success(function(data, status, jqxhr) {
+        var $sendButton = $('#report-issue-submit');
+        $sendButton.html('Report sent');
+        $('div').remove('.modal-footer-status');
+        $('.modal-footer').append('<div><br/></div><div class="modal-footer-status alert alert-success"><p class="contain-align-left">Thanks for reporting this issue!</p></div>');
+    }).fail(function(jqxhr, status, error) {
+        var $sendButton = $('#report-issue-submit');
+        if ($sendButton && $sendButton.prop('disabled')) {
+          $('div').remove('.modal-footer-status');
+          $('.modal-footer').append('<div><br/></div><div class="modal-footer-status alert alert-danger"><p class="contain-align-left">We are sorry. We could not submit your report! We really want this to work, though -- please try again.</p></div>');
+          $sendButton.html('Send problem report');
+          $sendButton.prop('disabled', false);
+        }
+        var $cancelButton = $('#report-issue-cancel');
+        $cancelButton.html('Cancel');
+    });
+    return false;
+  });
diff --git a/apps/workbench/app/assets/javascripts/request_shell_access.js b/apps/workbench/app/assets/javascripts/request_shell_access.js
new file mode 100644 (file)
index 0000000..eb4fbc3
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('ready ajax:success storage', function() {
+    // Update the "shell access requested" info box according to the
+    // current state of localStorage.
+    var msg = localStorage.getItem('request_shell_access');
+    var $noShellAccessDiv = $('#no_shell_access');
+    if ($noShellAccessDiv.length > 0) {
+        $('.alert-success p', $noShellAccessDiv).text(msg);
+        $('.alert-success', $noShellAccessDiv).toggle(!!msg);
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/select_modal.js b/apps/workbench/app/assets/javascripts/select_modal.js
new file mode 100644 (file)
index 0000000..19cf3cd
--- /dev/null
@@ -0,0 +1,185 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('click', '.selectable', function() {
+    var any;
+    var $this = $(this);
+    var $container = $(this).closest('.selectable-container');
+    if (!$container.hasClass('multiple')) {
+        $container.
+            find('.selectable').
+            removeClass('active');
+    }
+    $this.toggleClass('active');
+
+    if (!$this.hasClass('use-preview-selection')) {
+      any = ($container.
+           find('.selectable.active').length > 0)
+    }
+
+    if (!$container.hasClass('preview-selectable-container')) {
+      $this.
+        closest('.modal').
+        find('[data-enable-if-selection]').
+        prop('disabled', !any);
+
+      if ($this.hasClass('active')) {
+        var no_preview_available = '<div class="spinner-h-center spinner-v-center"><center>(No preview available)</center></div>';
+        if (!$this.attr('data-preview-href')) {
+            $(".modal-dialog-preview-pane").html(no_preview_available);
+            return;
+        }
+        $(".modal-dialog-preview-pane").html('<div class="spinner spinner-32px spinner-h-center spinner-v-center"></div>');
+        $.ajax($this.attr('data-preview-href'),
+               {dataType: "html"}).
+            done(function(data, status, jqxhr) {
+                $(".modal-dialog-preview-pane").html(data);
+            }).
+            fail(function(data, status, jqxhr) {
+                $(".modal-dialog-preview-pane").html(no_preview_available);
+            });
+      }
+    } else {
+      any = ($container.
+           find('.preview-selectable.active').length > 0)
+      $(this).
+          closest('.modal').
+          find('[data-enable-if-selection]').
+          prop('disabled', !any);
+    }
+
+}).on('click', '.modal button[data-action-href]', function() {
+    var selection = [];
+    var data = [];
+    var $modal = $(this).closest('.modal');
+    var http_method = $(this).attr('data-method').toUpperCase();
+    var action_data = $(this).data('action-data');
+    var action_data_from_params = $(this).data('action-data-from-params');
+    var selection_param = action_data.selection_param;
+    $modal.find('.modal-error').removeClass('hide').hide();
+
+    var $preview_selections = $modal.find('.preview-selectable.active');
+    if ($preview_selections.length > 0) {
+      data.push({name: selection_param, value: $preview_selections.first().attr('href')});
+    }
+
+    if (data.length == 0) {   // not using preview selection option
+      $modal.find('.selectable.active[data-object-uuid]').each(function() {
+        var val = $(this).attr('data-object-uuid');
+        data.push({name: selection_param, value: val});
+      });
+    }
+    $.each($.extend({}, action_data, action_data_from_params),
+           function(key, value) {
+               if (value instanceof Array && key[-1] != ']') {
+                   for (var i in value) {
+                       data.push({name: key + '[]', value: value[i]});
+                   }
+               } else {
+                   data.push({name: key, value: value});
+               }
+           });
+    if (http_method === 'PATCH') {
+        // Some user agents do not support HTTP PATCH (notably,
+        // phantomjs silently ignores our "data" and sends an empty
+        // request body) so we use POST instead, and supply a
+        // _method=PATCH param to tell Rails what we really want.
+        data.push({name: '_method', value: http_method});
+        http_method = 'POST';
+    }
+    $.ajax($(this).attr('data-action-href'),
+           {dataType: 'json',
+            type: http_method,
+            data: data,
+            traditional: false,
+            context: {modal: $modal, action_data: action_data}}).
+        fail(function(jqxhr, status, error) {
+            if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                message = "Cancelled."
+            } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+                message = jqxhr.responseJSON.errors.join("; ");
+            } else {
+                message = "Request failed.";
+            }
+            this.modal.find('.modal-error').
+                html('<div class="alert alert-danger"></div>').
+                show().
+                children().text(message);
+        }).
+        done(function(data, status, jqxhr) {
+            var event_name = this.action_data.success;
+            this.modal.find('.modal-error').hide();
+            $(document).trigger(event_name!=null ? event_name : 'page-refresh',
+                                [data, status, jqxhr, this.action_data]);
+        });
+}).on('click', '.chooser-show-project', function() {
+    var params = {};
+    var project_uuid = $(this).attr('data-project-uuid');
+    $(this).attr('href', '#');  // Skip normal click handler
+    if (project_uuid) {
+        params = {'filters': [['owner_uuid',
+                               '=',
+                               project_uuid]],
+                  'project_uuid': project_uuid
+                 };
+    }
+    $(".modal-dialog-preview-pane").html("");
+    // Use current selection as dropdown button label
+    $(this).
+        closest('.dropdown-menu').
+        prev('button').
+        html($(this).text() + ' <span class="caret"></span>');
+    // Set (or unset) filter params and refresh filterable rows
+    $($(this).closest('[data-filterable-target]').attr('data-filterable-target')).
+        data('infinite-content-params-from-project-dropdown', params).
+        trigger('refresh-content');
+}).on('ready', function() {
+    $('form[data-search-modal] a').on('click', function() {
+        $(this).closest('form').submit();
+        return false;
+    });
+    $('form[data-search-modal]').on('submit', function() {
+        // Ask the server for a Search modal. When it arrives, copy
+        // the search string from the top nav input into the modal's
+        // search query field.
+        var $form = $(this);
+        var searchq = $form.find('input').val();
+        var is_a_uuid = /^([0-9a-f]{32}(\+\S+)?|[0-9a-z]{5}-[0-9a-z]{5}-[0-9a-z]{15})$/;
+        if (searchq.trim().match(is_a_uuid)) {
+            window.location = '/actions?uuid=' + encodeURIComponent(searchq.trim());
+            // Show the "loading" indicator. TODO: better page transition hook
+            $(document).trigger('ajax:send');
+            return false;
+        }
+        if ($form.find('a[data-remote]').length > 0) {
+            // A search dialog is already loading.
+            return false;
+        }
+        $('<a />').
+            attr('data-remote-href', $form.attr('data-search-modal')).
+            attr('data-remote', 'true').
+            attr('data-method', 'GET').
+            hide().
+            appendTo($form).
+            on('ajax:success', function(data, status, xhr) {
+                $('body > .modal-container input[type=text]').
+                    val($form.find('input').val()).
+                    focus();
+                $form.find('input').val('');
+            }).on('ajax:complete', function() {
+                $(this).detach();
+            }).
+            click();
+        return false;
+    });
+}).on('page-refresh', function(event, data, status, jqxhr, action_data) {
+    window.location.reload();
+}).on('tab-refresh', function(event, data, status, jqxhr, action_data) {
+    $(document).trigger('arv:pane:reload:all');
+    $('body > .modal-container .modal').modal('hide');
+}).on('redirect-to-created-object', function(event, data, status, jqxhr, action_data) {
+    window.location.href = data.href.replace(/^[^\/]*\/\/[^\/]*/, '');
+}).on('shown.bs.modal', 'body > .modal-container .modal', function() {
+    $('.focus-on-display', this).focus();
+});
diff --git a/apps/workbench/app/assets/javascripts/selection.js.erb b/apps/workbench/app/assets/javascripts/selection.js.erb
new file mode 100644 (file)
index 0000000..e8f21ee
--- /dev/null
@@ -0,0 +1,111 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+//= require jquery
+//= require jquery_ujs
+
+/** Javascript for selection. */
+
+jQuery(function($){
+    $(document).
+        on('change', '.persistent-selection:checkbox', function(e) {
+            $(document).trigger('selections-updated');
+        });
+});
+
+function dispatch_selection_action() {
+    /* When the user clicks a selection action link, build a form to perform
+       the action on the selected data, and submit it.
+       This is based on handleMethod from rails-ujs, extended to add the
+       selections to the submitted form.
+       Copyright (c) 2007-2010 Contributors at http://github.com/rails/jquery-ujs/contributors
+       */
+    var $container = $(this);
+    if ($container.closest('.disabled').length) {
+        return false;
+    }
+    $container.closest('.dropdown-menu').dropdown('toggle');
+
+    var href = $container.data('href'),
+    method = $container.data('method') || 'GET',
+    paramName = $container.data('selection-param-name'),
+    csrfToken = $('meta[name=csrf-token]').attr('content'),
+    csrfParam = $('meta[name=csrf-param]').attr('content'),
+    form = $('<form method="post" action="' + href + '"></form>'),
+    metadataInput = ('<input name="_method" value="' + method +
+                     '" type="hidden" />');
+
+    if (csrfParam !== undefined && csrfToken !== undefined) {
+        metadataInput += ('<input type="hidden" name="' + csrfParam +
+                          '" value="' + csrfToken + '" />');
+    }
+    $container.
+        closest('.selection-action-container').
+        find(':checkbox:checked:visible').
+        each(function(index, elem) {
+            metadataInput += ('<input type="hidden" name="' + paramName +
+                              '" value="' + elem.value + '" />');
+        });
+
+    form.data('remote', $container.data('remote'));
+    form.hide().append(metadataInput).appendTo('body');
+    form.submit();
+    return false;
+}
+
+function enable_disable_selection_actions() {
+    var $container = $(this);
+    var $checked = $('.persistent-selection:checkbox:checked', $container);
+    var collection_lock_classes = $('.lock-collection-btn').attr('class')
+
+    $('[data-selection-action]', $container).
+        closest('div.btn-group-sm').
+        find('ul li').
+        toggleClass('disabled', ($checked.length == 0));
+    $('[data-selection-action=compare]', $container).
+        closest('li').
+        toggleClass('disabled',
+                    ($checked.filter('[value*=-d1hrv-]').length < 2) ||
+                    ($checked.not('[value*=-d1hrv-]').length > 0));
+    <% unless Group.copies_to_projects? %>
+        $('[data-selection-action=copy]', $container).
+            closest('li').
+            toggleClass('disabled',
+                        ($checked.filter('[value*=-j7d0g-]').length > 0) ||
+                        ($checked.length < 1));
+    <% end %>
+    $('[data-selection-action=combine-project-contents]', $container).
+        closest('li').
+        toggleClass('disabled',
+                    ($checked.filter('[value*=-4zz18-]').length < 1) ||
+                    ($checked.length != $checked.filter('[value*=-4zz18-]').length));
+    $('[data-selection-action=remove-selected-files]', $container).
+        closest('li').
+        toggleClass('disabled',
+                    ($checked.length < 0) ||
+                    !($checked.length > 0 && collection_lock_classes && collection_lock_classes.indexOf("fa-unlock") !=-1));
+    $('[data-selection-action=untrash-selected-items]', $container).
+        closest('li').
+        toggleClass('disabled',
+                    ($checked.length < 1));
+}
+
+$(document).
+    on('selections-updated', function() {
+        $('.selection-action-container').each(enable_disable_selection_actions);
+    }).
+    on('ready ajax:complete', function() {
+        $('[data-selection-action]').
+            off('click', dispatch_selection_action).
+            on('click', dispatch_selection_action);
+        $(this).trigger('selections-updated');
+    });
+
+function select_all_items() {
+  $(".arv-selectable-items :checkbox").filter(":visible").prop("checked", true).trigger("change");
+}
+
+function unselect_all_items() {
+  $(".arv-selectable-items :checkbox").filter(":visible").prop("checked", false).trigger("change");
+}
diff --git a/apps/workbench/app/assets/javascripts/sizing.js b/apps/workbench/app/assets/javascripts/sizing.js
new file mode 100644 (file)
index 0000000..569956f
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+function graph_zoom(divId, svgId, scale) {
+    var pg = document.getElementById(divId);
+    vcenter = (pg.scrollTop + (pg.scrollHeight - pg.scrollTopMax)/2.0) / pg.scrollHeight;
+    hcenter = (pg.scrollLeft + (pg.scrollWidth - pg.scrollLeftMax)/2.0) / pg.scrollWidth;
+    var g = document.getElementById(svgId);
+    g.setAttribute("height", parseFloat(g.getAttribute("height")) * scale);
+    g.setAttribute("width", parseFloat(g.getAttribute("width")) * scale);
+    pg.scrollTop = (vcenter * pg.scrollHeight) - (pg.scrollHeight - pg.scrollTopMax)/2.0;
+    pg.scrollLeft = (hcenter * pg.scrollWidth) - (pg.scrollWidth - pg.scrollLeftMax)/2.0;
+    smart_scroll_fixup();
+}
+
+function smart_scroll_fixup(s) {
+
+    if (s != null && s.type == 'shown.bs.tab') {
+        s = [s.target];
+    }
+    else {
+        s = $(".smart-scroll");
+    }
+
+    s.each(function(i, a) {
+        a = $(a);
+        var h = window.innerHeight - a.offset().top - a.attr("data-smart-scroll-padding-bottom");
+        height = String(h) + "px";
+        a.css('max-height', height);
+    });
+}
+
+$(window).on('load ready resize scroll ajax:complete', smart_scroll_fixup);
+$(document).on('shown.bs.tab', 'ul.nav-tabs > li > a', smart_scroll_fixup);
diff --git a/apps/workbench/app/assets/javascripts/tab_panes.js b/apps/workbench/app/assets/javascripts/tab_panes.js
new file mode 100644 (file)
index 0000000..b19a277
--- /dev/null
@@ -0,0 +1,217 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Load tab panes on demand. See app/views/application/_content.html.erb
+
+// Fire when a tab is selected/clicked.
+$(document).on('shown.bs.tab', '[data-toggle="tab"]', function(event) {
+    // reload the pane (unless it's already loaded)
+    $($(event.target).attr('href')).
+        not('.pane-loaded').
+        trigger('arv:pane:reload');
+});
+
+// Ask a refreshable pane to reload via ajax.
+//
+// Target of this event is the DOM element to be updated. A reload
+// consists of an AJAX call to load the "data-pane-content-url" and
+// replace the content of the target element with the retrieved HTML.
+//
+// There are four CSS classes set on the element to indicate its state:
+// pane-loading, pane-stale, pane-loaded, pane-reload-pending
+//
+// There are five states based on the presence or absence of css classes:
+//
+// 1. Absence of any pane-* states means the pane is empty, and should
+// be loaded as soon as it becomes visible.
+//
+// 2. "pane-loading" means an AJAX call has been made to reload the
+// pane and we are waiting on a result.
+//
+// 3. "pane-loading pane-stale" means the pane is loading, but has
+// already been invalidated and should schedule a reload as soon as
+// possible after the current load completes. (This happens when there
+// is a cluster of events, where the reload is triggered by the first
+// event, but we want ensure that we eventually load the final
+// quiescent state).
+//
+// 4. "pane-loaded" means the pane is up to date.
+//
+// 5. "pane-loaded pane-reload-pending" means a reload is needed, and
+// has been scheduled, but has not started because the pane's
+// minimum-time-between-reloads throttle has not yet been reached.
+//
+$(document).on('arv:pane:reload', '[data-pane-content-url]', function(e) {
+    if (this != e.target) {
+        // An arv:pane:reload event was sent to an element (e.target)
+        // which happens to have an ancestor (this) matching the above
+        // '[data-pane-content-url]' selector. This happens because
+        // events bubble up the DOM on their way to document. However,
+        // here we only care about events delivered directly to _this_
+        // selected element (i.e., this==e.target), not ones delivered
+        // to its children. The event "e" is uninteresting here.
+        return;
+    }
+
+    // $pane, the event target, is an element whose content is to be
+    // replaced. Pseudoclasses on $pane (pane-loading, etc) encode the
+    // current loading state.
+    var $pane = $(this);
+
+    if ($pane.hasClass('pane-loading')) {
+        // Already loading, mark stale to schedule a reload after this one.
+        $pane.addClass('pane-stale');
+        return;
+    }
+
+    // The default throttle (mininum milliseconds between refreshes)
+    // can be overridden by an .arv-log-refresh-control element inside
+    // the pane -- or, failing that, the pane element itself -- with a
+    // data-load-throttle attribute. This allows the server to adjust
+    // the throttle depending on the pane content.
+    var throttle =
+        $pane.find('.arv-log-refresh-control').attr('data-load-throttle') ||
+        $pane.attr('data-load-throttle') ||
+        15000;
+    var now = (new Date()).getTime();
+    var loaded_at = $pane.attr('data-loaded-at');
+    var since_last_load = now - loaded_at;
+    if (loaded_at && (since_last_load < throttle)) {
+        if (!$pane.hasClass('pane-reload-pending')) {
+            $pane.addClass('pane-reload-pending');
+            setTimeout((function() {
+                $pane.trigger('arv:pane:reload');
+            }), throttle - since_last_load);
+        }
+        return;
+    }
+
+    // We know this doesn't have 'pane-loading' because we tested for it above
+    $pane.removeClass('pane-reload-pending');
+    $pane.removeClass('pane-loaded');
+    $pane.removeClass('pane-stale');
+
+    if (!$pane.hasClass('active') &&
+        $pane.parent().hasClass('tab-content')) {
+        // $pane is one of the content areas in a bootstrap tabs
+        // widget, and it isn't the currently selected tab. If and
+        // when the user does select the corresponding tab, it will
+        // get a shown.bs.tab event, which will invoke this reload
+        // function again (see handler above). For now, we just insert
+        // a spinner, which will be displayed while the new content is
+        // loading.
+        $pane.html('<div class="spinner spinner-32px spinner-h-center"></div>');
+        return;
+    }
+
+    $pane.addClass('pane-loading');
+
+    var content_url = $pane.attr('data-pane-content-url');
+    $.ajax(content_url, {dataType: 'html', type: 'GET', context: $pane}).
+        done(function(data, status, jqxhr) {
+            var $pane = this;
+            // Preserve collapsed state
+            var collapsable = {};
+            $(".collapse", this).each(function(i, c) {
+                collapsable[c.id] = $(c).hasClass('in');
+            });
+            var tmp = $(data);
+            $(".collapse", tmp).each(function(i, c) {
+                if (collapsable[c.id]) {
+                    $(c).addClass('in');
+                } else {
+                    $(c).removeClass('in');
+                }
+            });
+            $pane.html(tmp);
+            $pane.removeClass('pane-loading');
+            $pane.addClass('pane-loaded');
+            $pane.attr('data-loaded-at', (new Date()).getTime());
+            $pane.trigger('arv:pane:loaded', [$pane]);
+
+            if ($pane.hasClass('pane-stale')) {
+                $pane.trigger('arv:pane:reload');
+            }
+        }).fail(function(jqxhr, status, error) {
+            var $pane = this;
+            var errhtml;
+            var contentType = jqxhr.getResponseHeader('Content-Type');
+            if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                if ($pane.attr('data-loaded-at') > 0) {
+                    // Stale content is already present. Leave it
+                    // there while loading the next page.
+                    $pane.removeClass('pane-loading');
+                    $pane.addClass('pane-loaded');
+                    // ...but schedule another refresh (after a
+                    // throttle delay) in case the act of navigating
+                    // away gets cancelled itself, leaving this page
+                    // with content that we know is stale.
+                    $pane.addClass('pane-stale');
+                    $pane.attr('data-loaded-at', (new Date()).getTime());
+                    $pane.trigger('arv:pane:reload');
+                    return;
+                }
+                errhtml = "Cancelled.";
+            } else if (contentType && contentType.match(/\btext\/html\b/)) {
+                var $response = $(jqxhr.responseText);
+                var $wrapper = $('div#page-wrapper', $response);
+                if ($wrapper.length) {
+                    errhtml = $wrapper.html();
+                } else {
+                    errhtml = jqxhr.responseText;
+                }
+            } else {
+                errhtml = ("An error occurred: " +
+                           (jqxhr.responseText || status)).
+                    replace(/&/g, '&amp;').
+                    replace(/</g, '&lt;').
+                    replace(/>/g, '&gt;');
+            }
+            $pane.html('<div class="pane-error-display"><p>' +
+                      '<a href="#" class="btn btn-primary tab_reload">' +
+                      '<i class="fa fa-fw fa-refresh"></i> ' +
+                      'Reload tab</a></p><iframe style="width: 100%"></iframe></div>');
+            $('.tab_reload', $pane).click(function() {
+                $(this).
+                    html('<div class="spinner spinner-32px spinner-h-center"></div>').
+                    closest('.pane-loaded').
+                    attr('data-loaded-at', 0).
+                    trigger('arv:pane:reload');
+            });
+            // We want to render the error in an iframe, in order to
+            // avoid conflicts with the main page's element ids, etc.
+            // In order to do that dynamically, we have to set a
+            // timeout on the iframe window to load our HTML *after*
+            // the default source (e.g., about:blank) has loaded.
+            var iframe = $('iframe', $pane)[0];
+            iframe.contentWindow.setTimeout(function() {
+                $('body', iframe.contentDocument).html(errhtml);
+                iframe.height = iframe.contentDocument.body.scrollHeight + "px";
+            }, 1);
+            $pane.removeClass('pane-loading');
+            $pane.addClass('pane-loaded');
+        });
+});
+
+// Mark all panes as stale/dirty. Refresh any 'active' panes.
+$(document).on('arv:pane:reload:all', function() {
+    $('[data-pane-content-url]').trigger('arv:pane:reload');
+});
+
+$(document).on('arv-log-event', '.arv-refresh-on-log-event', function(event) {
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    // Panes marked arv-refresh-on-log-event should be refreshed
+    $(event.target).trigger('arv:pane:reload');
+});
+
+// If there is a 'tab counts url' in the nav-tabs element then use it to get some javascript that will update them
+$(document).on('ready count-change', function() {
+    var tabCountsUrl = $('ul.nav-tabs').data('tab-counts-url');
+    if( tabCountsUrl && tabCountsUrl.length ) {
+        $.get( tabCountsUrl );
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/to_tsquery.js b/apps/workbench/app/assets/javascripts/to_tsquery.js
new file mode 100644 (file)
index 0000000..f2e34d9
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// to_tsquery() converts a user-entered search query to a useful
+// operand for the Arvados API "@@" filter. It returns null if it
+// can't come up with anything valid (e.g., q consists entirely of
+// punctuation).
+//
+// Examples:
+//
+// "foo"     => "foo:*"
+// "foo_bar" => "foo:*&bar:*"
+// "foo.bar" => "foo.bar:*"    // "." is a word char in FT queries
+// "foo/b-r" => "foo/b-r:*"    // "/" and "-", too
+// "foo|bar" => "foo:*&bar:*"
+// " oo|ba " => "oo:*&ba:*"
+// "__ "     => null
+// ""        => null
+// null      => null
+window.to_tsquery = function(q) {
+    q = (q || '').replace(/[^-\w\.\/]+/g, ' ').trim().replace(/ /g, ':*&')
+    if (q == '')
+        return null
+    return q + ':*'
+}
diff --git a/apps/workbench/app/assets/javascripts/upload_to_collection.js b/apps/workbench/app/assets/javascripts/upload_to_collection.js
new file mode 100644 (file)
index 0000000..d66be63
--- /dev/null
@@ -0,0 +1,494 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+var app = angular.module('Workbench', ['Arvados']);
+app.controller('UploadToCollection', UploadToCollection);
+app.directive('arvUuid', arvUuid);
+
+function arvUuid() {
+    // Copy the given uuid into the current $scope.
+    return {
+        restrict: 'A',
+        link: function(scope, element, attributes) {
+            scope.uuid = attributes.arvUuid;
+        }
+    };
+}
+
+UploadToCollection.$inject = ['$scope', '$filter', '$q', '$timeout',
+                              'ArvadosClient', 'arvadosApiToken'];
+function UploadToCollection($scope, $filter, $q, $timeout,
+                            ArvadosClient, arvadosApiToken) {
+    $.extend($scope, {
+        uploadQueue: [],
+        uploader: new QueueUploader(),
+        addFilesToQueue: function(files) {
+            // Angular binding doesn't work its usual magic for file
+            // inputs, so we need to $scope.$apply() this update.
+            $scope.$apply(function(){
+                var i, nItemsTodo;
+                // Add these new files after the items already waiting
+                // in the queue -- but before the items that are
+                // 'Done' and have therefore been pushed to the
+                // bottom.
+                for (nItemsTodo = 0;
+                     (nItemsTodo < $scope.uploadQueue.length &&
+                      $scope.uploadQueue[nItemsTodo].state !== 'Done'); ) {
+                    nItemsTodo++;
+                }
+                for (i=0; i<files.length; i++) {
+                    $scope.uploadQueue.splice(nItemsTodo+i, 0,
+                        new FileUploader(files[i]));
+                }
+            });
+        },
+        go: function() {
+            $scope.uploader.go();
+        },
+        stop: function() {
+            $scope.uploader.stop();
+        },
+        removeFileFromQueue: function(index) {
+            var wasRunning = $scope.uploader.running;
+            $scope.uploadQueue[index].stop();
+            $scope.uploadQueue.splice(index, 1);
+            if (wasRunning)
+                $scope.go();
+        },
+        countInStates: function(want_states) {
+            var found = 0;
+            $.each($scope.uploadQueue, function() {
+                if (want_states.indexOf(this.state) >= 0) {
+                    ++found;
+                }
+            });
+            return found;
+        }
+    });
+    ////////////////////////////////
+
+    var keepProxy;
+    var defaultErrorMessage = 'A network error occurred: either the server was unreachable, or there is a server configuration problem. Please check your browser debug console for a more specific error message (browser security features prevent us from showing the details here).';
+
+    function SliceReader(_slice) {
+        var that = this;
+        $.extend(this, {
+            go: go
+        });
+        ////////////////////////////////
+        var _deferred;
+        var _reader;
+        function go() {
+            // Return a promise, which will be resolved with the
+            // requested slice data.
+            _deferred = $.Deferred();
+            _reader = new FileReader();
+            _reader.onload = resolve;
+            _reader.onerror = _deferred.reject;
+            _reader.onprogress = _deferred.notify;
+            _reader.readAsArrayBuffer(_slice.blob);
+            return _deferred.promise();
+        }
+        function resolve() {
+            if (that._reader.result.length !== that._slice.size) {
+                // Sometimes we get an onload event even if the read
+                // did not return the desired number of bytes. We
+                // treat that as a fail.
+                _deferred.reject(
+                    null, "Read error",
+                    "Short read: wanted " + _slice.size +
+                        ", received " + _reader.result.length);
+                return;
+            }
+            return _deferred.resolve(_reader.result);
+        }
+    }
+
+    function SliceUploader(_label, _data, _dataSize) {
+        $.extend(this, {
+            go: go,
+            stop: stop
+        });
+        ////////////////////////////////
+        var that = this;
+        var _deferred;
+        var _failCount = 0;
+        var _failMax = 3;
+        var _jqxhr;
+        function go() {
+            // Send data to the Keep proxy. Retry a few times on
+            // fail. Return a promise that will get resolved with
+            // resolve(locator) when the block is accepted by the
+            // proxy.
+            _deferred = $.Deferred();
+            if (proxyUriBase().match(/^http:/) &&
+                window.location.origin.match(/^https:/)) {
+                // In this case, requests will fail, and no ajax
+                // success/fail handlers will be called (!), which
+                // will leave our status saying "uploading" and the
+                // user waiting for something to happen. Better to
+                // give up now.
+                _deferred.reject({
+                    textStatus: 'error',
+                    err: 'There is a server configuration problem. Proxy ' + proxyUriBase() + ' cannot be used from origin ' + window.location.origin + ' due to the browser\'s mixed-content (https/http) policy.'
+                });
+            } else {
+                goSend();
+            }
+            return _deferred.promise();
+        }
+        function stop() {
+            _failMax = 0;
+            _jqxhr.abort();
+            _deferred.reject({
+                textStatus: 'stopped',
+                err: 'interrupted at slice '+_label
+            });
+        }
+        function goSend() {
+            _jqxhr = $.ajax({
+                url: proxyUriBase(),
+                type: 'POST',
+                crossDomain: true,
+                headers: {
+                    'Authorization': 'OAuth2 '+arvadosApiToken,
+                    'Content-Type': 'application/octet-stream',
+                    'X-Keep-Desired-Replicas': '2'
+                },
+                xhr: function() {
+                    // Make an xhr that reports upload progress
+                    var xhr = $.ajaxSettings.xhr();
+                    if (xhr.upload) {
+                        xhr.upload.onprogress = onSendProgress;
+                    }
+                    return xhr;
+                },
+                processData: false,
+                data: _data
+            });
+            _jqxhr.then(onSendResolve, onSendReject);
+        }
+        function onSendProgress(xhrProgressEvent) {
+            _deferred.notify(xhrProgressEvent.loaded, _dataSize);
+        }
+        function onSendResolve(data, textStatus, jqxhr) {
+            _deferred.resolve(data, _dataSize);
+        }
+        function onSendReject(xhr, textStatus, err) {
+            if (++_failCount < _failMax) {
+                // TODO: nice to tell the user that retry is happening.
+                console.log('slice ' + _label + ': ' +
+                            textStatus + ', retry ' + _failCount);
+                goSend();
+            } else {
+                _deferred.reject(
+                    {xhr: xhr, textStatus: textStatus, err: err});
+            }
+        }
+        function proxyUriBase() {
+            return ((keepProxy.service_ssl_flag ? 'https' : 'http') +
+                    '://' + keepProxy.service_host + ':' +
+                    keepProxy.service_port + '/');
+        }
+    }
+
+    function FileUploader(file) {
+        $.extend(this, {
+            file: file,
+            locators: [],
+            progress: 0.0,
+            state: 'Queued',    // Queued, Uploading, Paused, Uploaded, Done
+            statistics: null,
+            go: go,
+            stop: stop          // User wants to stop.
+        });
+        ////////////////////////////////
+        var that = this;
+        var _currentUploader;
+        var _currentSlice;
+        var _deferred;
+        var _maxBlobSize = Math.pow(2,26);
+        var _bytesDone = 0;
+        var _queueTime = Date.now();
+        var _startTime;
+        var _startByte;
+        var _finishTime;
+        var _readPos = 0;       // number of bytes confirmed uploaded
+        function go() {
+            if (_deferred)
+                _deferred.reject({textStatus: 'restarted'});
+            _deferred = $.Deferred();
+            that.state = 'Uploading';
+            _startTime = Date.now();
+            _startByte = _readPos;
+            setProgress();
+            goSlice();
+            return _deferred.promise().always(function() { _deferred = null; });
+        }
+        function stop() {
+            if (_deferred) {
+                that.state = 'Paused';
+                _deferred.reject({textStatus: 'stopped', err: 'interrupted'});
+            }
+            if (_currentUploader) {
+                _currentUploader.stop();
+                _currentUploader = null;
+            }
+        }
+        function goSlice() {
+            // Ensure this._deferred gets resolved or rejected --
+            // either right here, or when a new promise arranged right
+            // here is fulfilled.
+            _currentSlice = nextSlice();
+            if (!_currentSlice) {
+                // All slices have been uploaded, but the work won't
+                // be truly Done until the target collection has been
+                // updated by the QueueUploader. This state is called:
+                that.state = 'Uploaded';
+                setProgress(_readPos);
+                _currentUploader = null;
+                _deferred.resolve([that]);
+                return;
+            }
+            _currentUploader = new SliceUploader(
+                _readPos.toString(),
+                _currentSlice.blob,
+                _currentSlice.size);
+            _currentUploader.go().then(
+                onUploaderResolve,
+                onUploaderReject,
+                onUploaderProgress);
+        }
+        function onUploaderResolve(locator, dataSize) {
+            var sizeHint = (''+locator).split('+')[1];
+            if (!locator || parseInt(sizeHint) !== dataSize) {
+                console.log("onUploaderResolve, but locator '" + locator +
+                            "' with size hint '" + sizeHint +
+                            "' does not look right for dataSize=" + dataSize);
+                return onUploaderReject({
+                    textStatus: "error",
+                    err: "Bad response from slice upload"
+                });
+            }
+            that.locators.push(locator);
+            _readPos += dataSize;
+            _currentUploader = null;
+            goSlice();
+        }
+        function onUploaderReject(reason) {
+            that.state = 'Paused';
+            setProgress(_readPos);
+            _currentUploader = null;
+            if (_deferred)
+                _deferred.reject(reason);
+        }
+        function onUploaderProgress(sliceDone, sliceSize) {
+            setProgress(_readPos + sliceDone);
+        }
+        function nextSlice() {
+            var size = Math.min(
+                _maxBlobSize,
+                that.file.size - _readPos);
+            setProgress(_readPos);
+            if (size === 0) {
+                return false;
+            }
+            var blob = that.file.slice(
+                _readPos, _readPos+size,
+                'application/octet-stream; charset=x-user-defined');
+            return {blob: blob, size: size};
+        }
+        function setProgress(bytesDone) {
+            var kBps;
+            if (that.file.size == 0)
+                that.progress = 100;
+            else
+                that.progress = Math.min(100, 100 * bytesDone / that.file.size);
+            if (bytesDone > _startByte) {
+                kBps = (bytesDone - _startByte) /
+                    (Date.now() - _startTime);
+                that.statistics = (
+                    '' + $filter('number')(bytesDone/1024, '0') + ' KiB ' +
+                        'at ~' + $filter('number')(kBps, '0') + ' KiB/s')
+                if (that.state === 'Paused') {
+                    that.statistics += ', paused';
+                } else if (that.state === 'Uploading') {
+                    that.statistics += ', ETA ' +
+                        $filter('date')(
+                            new Date(
+                                Date.now() + (that.file.size - bytesDone) / kBps),
+                            'shortTime')
+                }
+            } else {
+                that.statistics = that.state;
+            }
+            if (that.state === 'Uploaded') {
+                // 'Uploaded' gets reported as 'finished', which is a
+                // little misleading because the collection hasn't
+                // been updated yet. But FileUploader's portion of the
+                // work (and the time when it makes sense to show
+                // speed and ETA) is finished.
+                that.statistics += ', finished ' +
+                    $filter('date')(Date.now(), 'shortTime');
+                _finishTime = Date.now();
+            }
+            if (_deferred)
+                _deferred.notify();
+        }
+    }
+
+    function QueueUploader() {
+        $.extend(this, {
+            state: 'Idle',      // Idle, Running, Stopped, Failed
+            stateReason: null,
+            statusSuccess: null,
+            go: go,
+            stop: stop
+        });
+        ////////////////////////////////
+        var that = this;
+        var _deferred;          // the one we promise to go()'s caller
+        var _deferredAppend;    // tracks current appendToCollection
+        function go() {
+            if (_deferred) return _deferred.promise();
+            if (_deferredAppend) return _deferredAppend.promise();
+            _deferred = $.Deferred();
+            that.state = 'Running';
+            ArvadosClient.apiPromise(
+                'keep_services', 'list',
+                {filters: [['service_type','=','proxy']]}).
+                then(doQueueWithProxy);
+            onQueueProgress();
+            return _deferred.promise().always(function() { _deferred = null; });
+        }
+        function stop() {
+            that.state = 'Stopped';
+            if (_deferred) {
+                _deferred.reject({});
+            }
+            for (var i=0; i<$scope.uploadQueue.length; i++)
+                $scope.uploadQueue[i].stop();
+            onQueueProgress();
+        }
+        function doQueueWithProxy(data) {
+            keepProxy = data.items[0];
+            if (!keepProxy) {
+                that.state = 'Failed';
+                that.stateReason =
+                    'There seems to be no Keep proxy service available.';
+                _deferred.reject(null, 'error', that.stateReason);
+                return;
+            }
+            return doQueueWork();
+        }
+        function doQueueWork() {
+            // If anything is not Done, do it.
+            if ($scope.uploadQueue.length > 0 &&
+                $scope.uploadQueue[0].state !== 'Done') {
+                if (_deferred) {
+                    that.stateReason = null;
+                    return $scope.uploadQueue[0].go().
+                        then(appendToCollection, null, onQueueProgress).
+                        then(doQueueWork, onQueueReject);
+                } else {
+                    // Queue work has been stopped. Just update the
+                    // view.
+                    onQueueProgress();
+                    return;
+                }
+            }
+            // If everything is Done, resolve the promise and clean
+            // up. Note this can happen even after the _deferred
+            // promise has been rejected: specifically, when stop() is
+            // called too late to prevent completion of the last
+            // upload. In that case we want to update state to "Idle",
+            // rather than leave it at "Stopped".
+            onQueueResolve();
+        }
+        function onQueueReject(reason) {
+            if (!_deferred) {
+                // Outcome has already been decided (by stop()).
+                return;
+            }
+
+            that.state = 'Failed';
+            that.stateReason = (
+                (reason.textStatus || 'Error') +
+                    (reason.xhr && reason.xhr.options
+                     ? (' (from ' + reason.xhr.options.url + ')')
+                     : '') +
+                    ': ' +
+                    (reason.err || defaultErrorMessage));
+            if (reason.xhr && reason.xhr.responseText)
+                that.stateReason += ' -- ' + reason.xhr.responseText;
+            _deferred.reject(reason);
+            onQueueProgress();
+        }
+        function onQueueResolve() {
+            that.state = 'Idle';
+            that.stateReason = 'Done!';
+            if (_deferred)
+                _deferred.resolve();
+            onQueueProgress();
+        }
+        function onQueueProgress() {
+            // Ensure updates happen after FileUpload promise callbacks.
+            $timeout(function(){$scope.$apply();});
+        }
+        function appendToCollection(uploads) {
+            _deferredAppend = $.Deferred();
+            ArvadosClient.apiPromise(
+                'collections', 'get',
+                { uuid: $scope.uuid }).
+                then(function(collection) {
+                    var manifestText = '';
+                    $.each(uploads, function(_, upload) {
+                        var locators = upload.locators;
+                        if (locators.length === 0) {
+                            // Every stream must have at least one
+                            // data locator, even if it is zero bytes
+                            // long:
+                            locators = ['d41d8cd98f00b204e9800998ecf8427e+0'];
+                        }
+                        filename = ArvadosClient.uniqueNameForManifest(
+                            collection.manifest_text,
+                            '.', upload.file.name);
+                        collection.manifest_text += '. ' +
+                            locators.join(' ') +
+                            ' 0:' + upload.file.size.toString() + ':' +
+                            filename +
+                            '\n';
+                    });
+                    return ArvadosClient.apiPromise(
+                        'collections', 'update',
+                        { uuid: $scope.uuid,
+                          collection:
+                          { manifest_text:
+                            collection.manifest_text }
+                        });
+                }).
+                then(function() {
+                    // Mark the completed upload(s) as Done and push
+                    // them to the bottom of the queue.
+                    var i, qLen = $scope.uploadQueue.length;
+                    for (i=0; i<qLen; i++) {
+                        if (uploads.indexOf($scope.uploadQueue[i]) >= 0) {
+                            $scope.uploadQueue[i].state = 'Done';
+                            $scope.uploadQueue.push.apply(
+                                $scope.uploadQueue,
+                                $scope.uploadQueue.splice(i, 1));
+                            --i;
+                            --qLen;
+                        }
+                    }
+                }).
+                then(_deferredAppend.resolve,
+                     _deferredAppend.reject);
+            return _deferredAppend.promise().
+                always(function() {
+                    _deferredAppend = null;
+                });
+        }
+    }
+}
diff --git a/apps/workbench/app/assets/javascripts/user_agreements.js b/apps/workbench/app/assets/javascripts/user_agreements.js
new file mode 100644 (file)
index 0000000..7ce5342
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+function enable_okbutton() {
+    var $div = $('#open_user_agreement');
+    var allchecked = $('input[name="checked[]"]', $div).not(':checked').length == 0;
+    $('input[type=submit]', $div).prop('disabled', !allchecked);
+}
+$(document).on('click keyup input', '#open_user_agreement input', enable_okbutton);
+$(document).on('ready ajax:complete', enable_okbutton);
diff --git a/apps/workbench/app/assets/javascripts/users.js b/apps/workbench/app/assets/javascripts/users.js
new file mode 100644 (file)
index 0000000..565ea9c
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).
+    on('notifications:recount',
+       function() {
+           var menu = $('.notification-menu');
+           n = $('.notification', menu).not('.empty').length;
+           $('.notification-count', menu).html(n>0 ? n : '');
+       }).
+    on('ajax:success', 'form.new_authorized_key',
+       function(e, data, status, xhr) {
+           $(e.target).parents('.notification').eq(0).fadeOut('slow', function() {
+               $('<li class="alert alert-success daxalert">SSH key added.</li>').hide().replaceAll(this).fadeIn('slow');
+               $(document).trigger('notifications:recount');
+           });
+       }).
+    on('ajax:complete', 'form.new_authorized_key',
+       function(e, data, status, xhr) {
+           $($('input[name=disable_element]', e.target).val()).
+               fadeTo(200, 1.0);
+       }).
+    on('ajax:error', 'form.new_authorized_key',
+       function(e, xhr, status, error) {
+           var error_div;
+           response = $.parseJSON(xhr.responseText);
+           error_div = $(e.target).parent().find('div.ajax-errors');
+           if (error_div.length == 0) {
+               $(e.target).parent().append('<div class="alert alert-error ajax-errors"></div>');
+               error_div = $(e.target).parent().find('div.ajax-errors');
+           }
+           if (response.errors) {
+               error_div.html($('<p/>').text(response.errors).html());
+           } else {
+               error_div.html('<p>Sorry, request failed.</p>');
+           }
+           error_div.show();
+           $($('input[name=disable_element]', e.target).val()).
+               fadeTo(200, 1.0);
+       }).
+    on('click', 'form[data-remote] input[type=submit]',
+       function(e) {
+           $(e.target).parents('form').eq(0).parent().find('div.ajax-errors').html('').hide();
+           $($(e.target).
+             parents('form').
+             find('input[name=disable_element]').
+             val()).
+               fadeTo(200, 0.3);
+           return true;
+       });
diff --git a/apps/workbench/app/assets/javascripts/work_unit_component.js b/apps/workbench/app/assets/javascripts/work_unit_component.js
new file mode 100644 (file)
index 0000000..a84a2e7
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).
+  on('click', '.component-detail-panel', function(event) {
+    var href = $($(event.target).attr('href'));
+    if ($(href).hasClass("in")) {
+      var content_div = href.find('.work-unit-component-detail-body');
+      content_div.html('<div class="spinner spinner-32px col-sm-1"></div>');
+      var content_url = href.attr('content-url');
+      var action_data = href.attr('action-data');
+      $.ajax(content_url, {dataType: 'html', type: 'POST', data: {action_data: action_data}}).
+        done(function(data, status, jqxhr) {
+          content_div.html(data);
+        }).fail(function(jqxhr, status, error) {
+          content_div.html(error);
+        });
+    }
+  });
diff --git a/apps/workbench/app/assets/javascripts/work_unit_log.js b/apps/workbench/app/assets/javascripts/work_unit_log.js
new file mode 100644 (file)
index 0000000..c43bae0
--- /dev/null
@@ -0,0 +1,69 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+$(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(event, eventData){
+    var wasatbottom, txt;
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+
+    if (!('properties' in eventData)) {
+        return;
+    }
+
+    txt = '';
+    if ('text' in eventData.properties &&
+       eventData.properties.text.length > 0) {
+        txt += eventData.properties.text;
+        if (txt.slice(txt.length-1) != "\n") {
+            txt += "\n";
+        }
+    }
+    if (eventData.event_type == 'update' &&
+        eventData.object_uuid.indexOf("-dz642-") == 5 &&
+        'old_attributes' in eventData.properties &&
+        'new_attributes' in eventData.properties) {
+        // Container update
+        if (eventData.properties.old_attributes.state != eventData.properties.new_attributes.state) {
+            var stamp = eventData.event_at + " ";
+            switch(eventData.properties.new_attributes.state) {
+            case "Queued":
+                txt += stamp + "Container "+eventData.object_uuid+" was returned to the queue\n";
+                break;
+            case "Locked":
+                txt += stamp + "Container "+eventData.object_uuid+" was taken from the queue by a dispatch process\n";
+                break;
+            case "Running":
+                txt += stamp + "Container "+eventData.object_uuid+" started\n";
+                break;
+            case "Complete":
+                txt += stamp + "Container "+eventData.object_uuid+" finished\n";
+                break;
+            case "Cancelled":
+                txt += stamp + "Container "+eventData.object_uuid+" was cancelled\n";
+                break;
+            default:
+                // Unknown state -- unexpected, might as well log it.
+                txt += stamp + "Container "+eventData.object_uuid+" changed state to " +
+                    eventData.properties.new_attributes.state + "\n";
+                break;
+            }
+        }
+    }
+
+    if (txt == '') {
+        return;
+    }
+
+    wasatbottom = (this.scrollTop + this.clientHeight >= this.scrollHeight);
+    if (eventData.prepend) {
+        $(this).prepend(txt);
+    } else {
+        $(this).append(txt);
+    }
+    if (wasatbottom) {
+        this.scrollTop = this.scrollHeight;
+    }
+});
diff --git a/apps/workbench/app/assets/stylesheets/api_client_authorizations.css.scss b/apps/workbench/app/assets/stylesheets/api_client_authorizations.css.scss
new file mode 100644 (file)
index 0000000..ec87eb2
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the ApiClientAuthorizations controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/application.css.scss b/apps/workbench/app/assets/stylesheets/application.css.scss
new file mode 100644 (file)
index 0000000..8822d5c
--- /dev/null
@@ -0,0 +1,353 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+/*
+ * This is a manifest file that'll be compiled into application.css, which will include all the files
+ * listed below.
+ *
+ * Any CSS and SCSS file within this directory, lib/assets/stylesheets, vendor/assets/stylesheets,
+ * or vendor/assets/stylesheets of plugins, if any, can be referenced here using a relative path.
+ *
+ * You're free to add application-wide styles to this file and they'll appear at the top of the
+ * compiled file, but it's generally better to create a new file per style scope.
+ *
+ *= require_self
+ *= require bootstrap
+ *= require bootstrap3-editable/bootstrap-editable
+ *= require morris
+ *= require awesomplete
+ *= require_tree .
+ */
+
+.contain-align-left {
+    text-align: left;
+}
+table.topalign>tbody>tr>td {
+    vertical-align: top;
+}
+table.topalign>thead>tr>td {
+    vertical-align: bottom;
+}
+tr.cell-valign-center>td {
+    vertical-align: middle;
+}
+tr.cell-noborder>td,tr.cell-noborder>th {
+    border: none;
+}
+table.table-justforlayout>tr>td,
+table.table-justforlayout>tr>th,
+table.table-justforlayout>thead>tr>td,
+table.table-justforlayout>thead>tr>th,
+table.table-justforlayout>tbody>tr>td,
+table.table-justforlayout>tbody>tr>th{
+    border: none;
+}
+table.table-justforlayout {
+    margin-bottom: 0;
+}
+.smaller-text {
+    font-size: .8em;
+}
+.deemphasize {
+    font-size: .8em;
+    color: #888;
+}
+.lighten {
+    color: #888;
+}
+.arvados-filename,
+.arvados-uuid {
+    font-size: .8em;
+    font-family: monospace;
+}
+table .data-size, .table .data-size {
+    text-align: right;
+}
+body .editable-empty {
+    color: #999;
+}
+body .editable-empty:hover {
+    color: #0088cc;
+}
+table.arv-index tbody td.arv-object-AuthorizedKey.arv-attr-public_key {
+    overflow-x: hidden;
+    max-width: 120px;
+}
+table.arv-index > thead > tr > th {
+    border-top: none;
+}
+table.table-fixedlayout {
+    white-space: nowrap;
+    table-layout: fixed;
+}
+table.table-fixedlayout td {
+    overflow: hidden;
+    overflow-x: hidden;
+    text-overflow: ellipsis;
+}
+table.table-smallcontent td {
+    font-size: 85%;
+}
+form input.search-mini {
+    padding: 0 6px;
+}
+form.small-form-margin {
+    margin-bottom: 2px;
+}
+.nowrap {
+    white-space: nowrap;
+}
+input.select-on-focus {
+    font-family: monospace;
+    background: inherit;
+    border: thin #ccc solid;
+    border-radius: .2em;
+    padding: .15em .5em;
+}
+input.select-on-focus:focus {
+    border-color: #9bf;
+}
+
+/* top nav */
+$top-nav-bg: #3c163d;
+$top-nav-bg-bottom: #260027;
+nav.navbar-fixed-top .navbar-brand {
+    color: #79537a;
+    letter-spacing: 0.4em;
+}
+nav.navbar-fixed-top {
+    background: $top-nav-bg;
+    background: linear-gradient(to bottom, $top-nav-bg 0%,$top-nav-bg-bottom 100%);
+}
+.navbar.breadcrumbs {
+    line-height: 50px;
+    border-radius: 0;
+    margin-bottom: 0;
+    border-right: 0;
+    border-left: 0;
+}
+.navbar.breadcrumbs .nav > li > a,
+.navbar.breadcrumbs .nav > li {
+    color: #000;
+}
+.navbar.breadcrumbs .nav > li.nav-separator > i {
+    color: #bbb;
+}
+.navbar.breadcrumbs .navbar-form {
+  margin-top: 0px;
+  margin-bottom: 0px;
+}
+.navbar.breadcrumbs .navbar-text {
+  margin-top: 0px;
+  margin-bottom: 0px;
+}
+
+nav.navbar-fixed-top .navbar-nav.navbar-right > li.open > a,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li.open > a:focus,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li.open > a:hover {
+    background: lighten($top-nav-bg, 5%);
+}
+nav.navbar-fixed-top .navbar-nav.navbar-right > li > a,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li > a:focus,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li > a:hover {
+    color: #fff;
+}
+
+.dax {
+    max-width: 10%;
+    margin-right: 1em;
+    float: left
+}
+
+.smart-scroll {
+    overflow: auto;
+    margin-bottom: -15px;
+}
+
+.infinite-scroller .fa-warning {
+    color: #800;
+}
+
+th[data-sort-order] {
+    cursor: pointer;
+}
+
+.inline-progress-container div.progress {
+    margin-bottom: 0;
+}
+
+.inline-progress-container {
+    width: 100%;
+    display:inline-block;
+}
+
+td.add-tag-button {
+    white-space: normal;
+}
+td.add-tag-button .add-tag-button {
+    margin-right: 4px;
+    opacity: 0.2;
+}
+td.add-tag-button .add-tag-button:hover {
+    opacity: 1;
+}
+span.removable-tag-container {
+    line-height: 1.6;
+}
+.label.removable-tag a {
+    color: #fff;
+    cursor: pointer;
+}
+
+li.notification {
+    padding: 10px;
+}
+
+td.trash-project-msg {
+    white-space: normal;
+}
+
+// See HeaderRowFixer in application.js
+table.table-fixed-header-row {
+    width: 100%;
+    border-spacing: 0px;
+    margin:0;
+}
+table.table-fixed-header-row thead {
+    position:fixed;
+    background: #fff;
+}
+table.table-fixed-header-row tbody {
+    position:relative;
+    top:1.5em;
+}
+
+.dropdown-menu {
+    max-height: 30em;
+    overflow-y: auto;
+}
+
+.dropdown-menu a {
+    cursor: pointer;
+}
+
+.row-fill-height, .row-fill-height>div[class*='col-'] {
+    display: flex;
+}
+.row-fill-height>div[class*='col-']>div {
+    width: 100%;
+}
+
+/* Show editable popover above side-nav */
+.editable-popup.popover {
+    z-index:1055;
+}
+
+/* Do not leave space for left-nav */
+div#wrapper {
+  padding-left: 0;
+}
+
+.arv-description-as-subtitle {
+  padding-bottom: 1em;
+}
+.arv-description-in-table {
+  height: 4em;
+  overflow-x: hidden;
+  overflow-y: hidden;
+}
+.arv-description-in-table:hover {
+  overflow-y: auto;
+}
+
+.btn.btn-nodecorate {
+  border: none;
+}
+svg text {
+    font-size: 6pt;
+}
+
+div.pane-content iframe {
+  width: 100%;
+  border: none;
+}
+span.editable-textile {
+  display: inline-block;
+}
+.text-overflow-ellipsis {
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+.time-label-divider {
+  font-size: 80%;
+  min-width: 1em;
+  padding: 0px 2px 0px 0px;
+}
+.task-summary-status {
+  font-size: 80%;
+}
+#page-wrapper > div > h2 {
+  margin-top: 0px;
+}
+
+.compute-summary-numbers td {
+  font-size: 150%;
+}
+
+.arv-log-refresh-control {
+  display: none;
+}
+
+/* Hide Angular content until Angular is ready */
+[ng\:cloak], [ng-cloak], .ng-cloak {
+    display: none !important;
+}
+
+/* tabs */
+ul.nav.nav-tabs {
+    font-size: 90%
+}
+
+.hover-dropdown:hover .dropdown-menu {
+  display: block;
+}
+
+.arv-description-as-subtitle .editable-inline,
+.arv-description-as-subtitle .editable-inline .form-group,
+.arv-description-as-subtitle .editable-inline .form-group .editable-input,
+.arv-description-as-subtitle .editable-inline .form-group .editable-input textarea,
+{
+    width: 98%!important;
+}
+
+/* Needed for awesomplete to play nice with bootstrap */
+div.awesomplete {
+    display: block;
+}
+/* Makes awesomplete listings to be scrollable */
+.awesomplete > ul {
+    max-height: 410px;
+    overflow-y: auto;
+}
+
+.dropdown-menu > li > form > button {
+    display: block;
+    padding: 3px 20px;
+    clear: both;
+    font-weight: normal;
+    line-height: 1.428571429;
+    color: #333333;
+    white-space: nowrap;
+    cursor: pointer;
+    text-decoration: none;
+    background: transparent;
+    border-style: none;
+}
+
+.dropdown-menu > li > form > button:hover {
+    text-decoration: none;
+    color: #262626;
+    background-color: #f5f5f5;
+}
diff --git a/apps/workbench/app/assets/stylesheets/authorized_keys.css.scss b/apps/workbench/app/assets/stylesheets/authorized_keys.css.scss
new file mode 100644 (file)
index 0000000..73cfd5b
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the AuthorizedKeys controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+form .table input[type=text] {
+    width: 600px;
+}
+form .table textarea {
+    width: 600px;
+    height: 10em;
+}
diff --git a/apps/workbench/app/assets/stylesheets/badges.css.scss b/apps/workbench/app/assets/stylesheets/badges.css.scss
new file mode 100644 (file)
index 0000000..ddaf5b9
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+/* Colors
+ * Contextual variations of badges
+ * Bootstrap 3.0 removed contexts for badges, we re-introduce them, based on what is done for labels
+ */
+
+.badge.badge-error {
+  background-color: #b94a48;
+}
+
+.badge.badge-warning {
+  background-color: #f89406;
+}
+
+.badge.badge-success {
+  background-color: #468847;
+}
+
+.badge.badge-info {
+  background-color: #3a87ad;
+}
+
+.badge.badge-inverse {
+  background-color: #333333;
+}
+
+.badge.badge-alert {
+    background: red;
+}
diff --git a/apps/workbench/app/assets/stylesheets/cards.css.scss b/apps/workbench/app/assets/stylesheets/cards.css.scss
new file mode 100644 (file)
index 0000000..3cf29c5
--- /dev/null
@@ -0,0 +1,89 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+.card {
+    padding-top: 20px;
+    margin: 10px 0 20px 0;
+    background-color: #ffffff;
+    border: 1px solid #d8d8d8;
+    border-top-width: 0;
+    border-bottom-width: 2px;
+    -webkit-border-radius: 3px;
+    -moz-border-radius: 3px;
+    border-radius: 3px;
+    -webkit-box-shadow: none;
+    -moz-box-shadow: none;
+    box-shadow: none;
+    -webkit-box-sizing: border-box;
+    -moz-box-sizing: border-box;
+    box-sizing: border-box;
+}
+.card.arvados-object {
+    position: relative;
+    display: inline-block;
+    width: 170px;
+    height: 175px;
+    padding-top: 0;
+    margin-left: 20px;
+    overflow: hidden;
+    vertical-align: top;
+}
+.card.arvados-object .card-top.green {
+    background-color: #53a93f;
+}
+.card.arvados-object .card-top.blue {
+    background-color: #427fed;
+}
+.card.arvados-object .card-top {
+    position: absolute;
+    top: 0;
+    left: 0;
+    display: inline-block;
+    width: 170px;
+    height: 25px;
+    background-color: #ffffff;
+}
+.card.arvados-object .card-info {
+    position: absolute;
+    top: 25px;
+    display: inline-block;
+    width: 100%;
+    height: 101px;
+    overflow: hidden;
+    background: #ffffff;
+    -webkit-box-sizing: border-box;
+    -moz-box-sizing: border-box;
+    box-sizing: border-box;
+}
+.card.arvados-object .card-info .title {
+    display: block;
+    margin: 8px 14px 0 14px;
+    overflow: hidden;
+    font-size: 16px;
+    font-weight: bold;
+    line-height: 18px;
+    color: #404040;
+}
+.card.arvados-object .card-info .desc {
+    display: block;
+    margin: 8px 14px 0 14px;
+    overflow: hidden;
+    font-size: 12px;
+    line-height: 16px;
+    color: #737373;
+    text-overflow: ellipsis;
+}
+.card.arvados-object .card-bottom {
+    position: absolute;
+    bottom: 0;
+    left: 0;
+    display: inline-block;
+    width: 100%;
+    padding: 10px 20px;
+    line-height: 29px;
+    text-align: center;
+    -webkit-box-sizing: border-box;
+    -moz-box-sizing: border-box;
+    box-sizing: border-box;
+}
diff --git a/apps/workbench/app/assets/stylesheets/collections.css.scss b/apps/workbench/app/assets/stylesheets/collections.css.scss
new file mode 100644 (file)
index 0000000..c5cc699
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+/* Style for _show_files tree view. */
+
+ul#collection_files {
+  padding: 0 .5em;
+}
+
+ul.collection_files {
+  line-height: 2.5em;
+  list-style-type: none;
+  padding-left: 2.3em;
+}
+
+ul.collection_files li {
+  clear: both;
+}
+
+.collection_files_row {
+  padding: 1px;  /* Replaced by border for :hover */
+}
+
+.collection_files_row:hover {
+  background-color: #D9EDF7;
+  padding: 0px;
+  border: 1px solid #BCE8F1;
+  border-radius: 3px;
+}
+
+.collection_files_inline {
+  clear: both;
+  width: 80%;
+  margin: 0 3em;
+}
+
+.collection_files_inline img {
+  max-height: 15em;
+}
+
+.collection_files_name {
+  padding-left: .5em;
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+
+.collection_files_name i.fa-fw:first-child {
+  width: 1.6em;
+}
+
+/*
+  "active" and "inactive" colors are too similar for a toggle switch
+  in the default bootstrap theme.
+  */
+
+$inactive-bg: #5bc0de;
+$active-bg: #39b3d7;
+
+.btn-group.toggle-persist .btn {
+    width: 6em;
+}
+.btn-group.toggle-persist .btn-info {
+    background-color: lighten($inactive-bg, 15%);
+}
+
+.btn-group.toggle-persist .btn-info.active {
+    background-color: $active-bg;
+}
+
+.lock-collection-btn {
+    display: inline-block;
+    padding: .5em 2em;
+    margin: 0 1em;
+}
+
+.collection-tag-field * {
+  display: inline-block;
+}
diff --git a/apps/workbench/app/assets/stylesheets/groups.css.scss b/apps/workbench/app/assets/stylesheets/groups.css.scss
new file mode 100644 (file)
index 0000000..905e72a
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Groups controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/humans.css.scss b/apps/workbench/app/assets/stylesheets/humans.css.scss
new file mode 100644 (file)
index 0000000..29668c2
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Humans controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/job_tasks.css.scss b/apps/workbench/app/assets/stylesheets/job_tasks.css.scss
new file mode 100644 (file)
index 0000000..0d4d260
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the JobTasks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/jobs.css.scss b/apps/workbench/app/assets/stylesheets/jobs.css.scss
new file mode 100644 (file)
index 0000000..9b1ea65
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+.arv-job-log-window {
+    height: 40em;
+    white-space: pre;
+    overflow: scroll;
+    background: black;
+    color: white;
+    font-family: monospace;
+    font-size: .8em;
+    border: 2px solid black;
+}
+
+.morris-hover-point {
+    text-align: left;
+    width: 100%;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/keep_disks.css.scss b/apps/workbench/app/assets/stylesheets/keep_disks.css.scss
new file mode 100644 (file)
index 0000000..0985d8c
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the KeepDisks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+
+/* Margin allows us some space between the table above. */
+div.graph {
+    margin-top: 20px;
+}
+div.graph h3, div.graph h4 {
+    text-align: center;
+}
diff --git a/apps/workbench/app/assets/stylesheets/links.css.scss b/apps/workbench/app/assets/stylesheets/links.css.scss
new file mode 100644 (file)
index 0000000..cf4c4e7
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Links controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/loading.css.scss.erb b/apps/workbench/app/assets/stylesheets/loading.css.scss.erb
new file mode 100644 (file)
index 0000000..ee6ca34
--- /dev/null
@@ -0,0 +1,72 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+.loading {
+    opacity: 0;
+}
+
+.spinner {
+    /* placeholder for stuff like $.find('.spinner').detach() */
+}
+
+.spinner-32px {
+    background-image: url('<%= asset_path('spinner_32px.gif') %>');
+    background-repeat: no-repeat;
+    width: 32px;
+    height: 32px;
+}
+
+.spinner-h-center {
+    margin-left: auto;
+    margin-right: auto;
+}
+
+.spinner-v-center {
+    position: relative;
+    top: 45%;
+}
+
+.rotating {
+    color: #f00;
+    /* Chrome and Firefox, at least in Linux, render a horrible shaky
+       mess -- better not to bother.
+
+      animation-name: rotateThis;
+      animation-duration: 2s;
+      animation-iteration-count: infinite;
+      animation-timing-function: linear;
+      -moz-animation-name: rotateThis;
+      -moz-animation-duration: 2s;
+      -moz-animation-iteration-count: infinite;
+      -moz-animation-timing-function: linear;
+      -ms-animation-name: rotateThis;
+      -ms-animation-duration: 2s;
+      -ms-animation-iteration-count: infinite;
+      -ms-animation-timing-function: linear;
+      -webkit-animation-name: rotateThis;
+      -webkit-animation-duration: 2s;
+      -webkit-animation-iteration-count: infinite;
+      -webkit-animation-timing-function: linear;
+      */
+}
+
+@keyframes rotateThis {
+  from { transform: rotate( 0deg );   }
+  to   { transform: rotate( 360deg ); }
+}
+
+@-webkit-keyframes rotateThis {
+  from { -webkit-transform: rotate( 0deg );   }
+  to   { -webkit-transform: rotate( 360deg ); }
+}
+
+@-moz-keyframes rotateThis {
+  from { -moz-transform: rotate( 0deg );   }
+  to   { -moz-transform: rotate( 360deg ); }
+}
+
+@-ms-keyframes rotateThis {
+  from { -ms-transform: rotate( 0deg );   }
+  to   { -ms-transform: rotate( 360deg ); }
+}
diff --git a/apps/workbench/app/assets/stylesheets/log_viewer.scss b/apps/workbench/app/assets/stylesheets/log_viewer.scss
new file mode 100644 (file)
index 0000000..c3fa8b9
--- /dev/null
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+.log-viewer-table {
+ width: 100%;
+ font-family: "Lucida Console", Monaco, monospace;
+ font-size: 11px;
+ table-layout: fixed;
+ thead tr {
+   th {
+     padding-right: 1em;
+   }
+   th.id {
+     display: none;
+   }
+   th.timestamp {
+     width: 15em;
+   }
+   th.type {
+     width: 8em;
+   }
+   th.taskid {
+     width: 4em;
+   }
+   th.node {
+     width: 8em;
+   }
+   th.slot {
+     width: 3em;
+   }
+   th.message {
+     width: auto;
+   }
+ }
+ tbody tr {
+   vertical-align: top;
+   td {
+     padding-right: 1em;
+   }
+   td.id {
+     display: none;
+   }
+   td.taskid {
+     text-align: right;
+   }
+   td.slot {
+     text-align: right;
+   }
+   td.message {
+     word-wrap: break-word;
+   }
+ }
+}
+
+.log-viewer-button {
+  width: 12em;
+}
+
+.log-viewer-paging-div {
+  font-size: 18px;
+  text-align: center;
+}
+
+.log-viewer-page-num {
+  padding-left: .3em;
+  padding-right: .3em;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/logs.css.scss b/apps/workbench/app/assets/stylesheets/logs.css.scss
new file mode 100644 (file)
index 0000000..c8b22f9
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Logs controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/nodes.css.scss b/apps/workbench/app/assets/stylesheets/nodes.css.scss
new file mode 100644 (file)
index 0000000..a7b0861
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Nodes controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/pipeline_instances.css.scss b/apps/workbench/app/assets/stylesheets/pipeline_instances.css.scss
new file mode 100644 (file)
index 0000000..135685c
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the PipelineInstances controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+
+.pipeline-compare-headrow div {
+    padding-top: .5em;
+    padding-bottom: .5em;
+}
+.pipeline-compare-headrow:first-child {
+    border-bottom: 1px solid black;
+}
+.pipeline-compare-row .notnormal {
+    background: #ffffaa;
+}
+
+.pipeline_color_legend {
+    margin-top: 0.2em;
+    padding: 0.2em 1em;
+    border: 1px solid #000;
+}
+.pipeline_color_legend a {
+    color: #000;
+}
+
+.col-md-1.pipeline-instance-spacing {
+  padding: 0px;
+  margin: 0px;
+}
+
+.col-md-3.pipeline-instance-spacing > .progress {
+  padding: 0px;
+  margin: 0px;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/pipeline_templates.css.scss b/apps/workbench/app/assets/stylesheets/pipeline_templates.css.scss
new file mode 100644 (file)
index 0000000..329f0ed
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the PipelineTemplates controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+
+.pipeline_color_legend {
+    padding-left: 1em;
+    padding-right: 1em;
+}
+
+table.pipeline-components-table {
+  width: 100%;
+  table-layout: fixed;
+  overflow: hidden;
+}
+
+table.pipeline-components-table thead th {
+  text-align: bottom;
+}
+table.pipeline-components-table div.progress {
+  margin-bottom: 0;
+}
+
+table.pipeline-components-table td {
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+
+td.required {
+  background: #ffdddd;
+}
diff --git a/apps/workbench/app/assets/stylesheets/projects.css.scss b/apps/workbench/app/assets/stylesheets/projects.css.scss
new file mode 100644 (file)
index 0000000..10c2ed0
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+.arv-project-list > .row {
+    padding-top: 5px;
+    padding-bottom: 5px;
+    padding-right: 1em;
+}
+.arv-project-list > .row.project:hover {
+    background: #d9edf7;
+}
+div.scroll-20em {
+    height: 20em;
+    overflow-y: scroll;
+}
+
+.compute-summary {
+    margin: 0.15em 0em 0.15em 0em;
+    display: inline-block;
+}
+
+.compute-summary-head {
+    margin-left: 0.3em;
+}
+
+.compute-detail {
+    border: 1px solid;
+    border-color: #DDD;
+    border-radius: 3px;
+    padding: 0.2em;
+    position: absolute;
+    z-index: 1;
+    background: white;
+}
+
+.compute-detail:hover {
+   cursor: pointer;
+}
+
+.compute-node-summary:hover {
+  cursor: pointer;
+}
+
+.compute-summary-numbers .panel {
+  margin-bottom: 0px;
+}
+
+.compute-summary-numbers table {
+  width: 100%;
+  td,th {
+    text-align: center;
+  }
+}
+
+.compute-summary-nodelist {
+  margin-bottom: 10px
+}
+
+.dashboard-panel-info-row {
+  padding: .5em;
+  border-radius: .3em;
+}
+
+.dashboard-panel-info-row:hover {
+  background-color: #D9EDF7;
+}
+
+.progress-bar.progress-bar-default {
+  background-color: #999;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/repositories.css.scss b/apps/workbench/app/assets/stylesheets/repositories.css.scss
new file mode 100644 (file)
index 0000000..1dd9a16
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Repositories controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/sb-admin.css.scss b/apps/workbench/app/assets/stylesheets/sb-admin.css.scss
new file mode 100644 (file)
index 0000000..9bae214
--- /dev/null
@@ -0,0 +1,164 @@
+/* 
+Author: Start Bootstrap - http://startbootstrap.com
+'SB Admin' HTML Template by Start Bootstrap
+
+All Start Bootstrap themes are licensed under Apache 2.0. 
+For more info and more free Bootstrap 3 HTML themes, visit http://startbootstrap.com!
+*/
+
+/* ATTN: This is mobile first CSS - to update 786px and up screen width use the media query near the bottom of the document! */
+
+/* Global Styles */
+
+body {
+  margin-top: 50px;
+}
+
+#wrapper {
+  padding-left: 0;
+}
+
+#page-wrapper {
+  width: 100%;
+  padding: 5px 15px;
+}
+
+/* Nav Messages */
+
+.messages-dropdown .dropdown-menu .message-preview .avatar,
+.messages-dropdown .dropdown-menu .message-preview .name,
+.messages-dropdown .dropdown-menu .message-preview .message,
+.messages-dropdown .dropdown-menu .message-preview .time {
+  display: block;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .avatar {
+  float: left;
+  margin-right: 15px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .name {
+  font-weight: bold;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .message {
+  font-size: 12px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .time {
+  font-size: 12px;
+}
+
+
+/* Nav Announcements */
+
+.announcement-heading {
+  font-size: 50px;
+  margin: 0;
+}
+
+.announcement-text {
+  margin: 0;
+}
+
+/* Table Headers */
+
+table.tablesorter thead {
+  cursor: pointer;
+}
+
+table.tablesorter thead tr th:hover {
+  background-color: #f5f5f5;
+}
+
+/* Flot Chart Containers */
+
+.flot-chart {
+  display: block;
+  height: 400px;
+}
+
+.flot-chart-content {
+  width: 100%;
+  height: 100%;
+}
+
+/* Edit Below to Customize Widths > 768px */
+@media (min-width:768px) {
+
+  /* Wrappers */
+
+  #wrapper {
+        padding-left: 225px;
+  }
+
+  #page-wrapper {
+        padding: 15px 25px;
+  }
+
+  /* Side Nav */
+
+  .side-nav {
+        margin-left: -225px;
+        left: 225px;
+        width: 225px;
+        position: fixed;
+        top: 50px;
+        height: calc(100% - 50px);
+        border-radius: 0;
+        border: none;
+        background-color: #f8f8f8;
+        overflow-y: auto;
+        overflow-x: hidden; /* no left nav scroll bar */
+  }
+
+  /* Bootstrap Default Overrides - Customized Dropdowns for the Side Nav */
+
+  .side-nav>li.dropdown>ul.dropdown-menu {
+        position: relative;
+        min-width: 225px;
+        margin: 0;
+        padding: 0;
+        border: none;
+        border-radius: 0;
+        background-color: transparent;
+        box-shadow: none;
+        -webkit-box-shadow: none;
+  }
+
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a {
+        color: #777777;
+        padding: 15px 15px 15px 25px;
+  }
+
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a:hover,
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a.active,
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a:focus {
+        background-color: #ffffff;
+  }
+
+  .side-nav>li>a {
+        width: 225px;
+  }
+
+  .navbar-default .navbar-nav.side-nav>li>a:hover,
+  .navbar-default .navbar-nav.side-nav>li>a:focus {
+        background-color: #ffffff;
+  }
+
+  /* Nav Messages */
+
+  .messages-dropdown .dropdown-menu {
+        min-width: 300px;
+  }
+
+  .messages-dropdown .dropdown-menu li a {
+        white-space: normal;
+  }
+
+  .navbar-collapse {
+    padding-left: 15px !important;
+    padding-right: 15px !important;
+  }
+
+}
diff --git a/apps/workbench/app/assets/stylesheets/scaffolds.css.scss b/apps/workbench/app/assets/stylesheets/scaffolds.css.scss
new file mode 100644 (file)
index 0000000..23e0f76
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+/*
+  We don't want the default Rails CSS, so the rules are deleted. This
+  empty file is left here so Rails doesn't re-add it next time it
+  generates a scaffold.
+  */
diff --git a/apps/workbench/app/assets/stylesheets/select_modal.css.scss b/apps/workbench/app/assets/stylesheets/select_modal.css.scss
new file mode 100644 (file)
index 0000000..bd7ff92
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+.selectable-container > .row {
+    padding-top: 5px;
+    padding-bottom: 5px;
+    padding-right: 1em;
+    color: #888;
+}
+.selectable-container > .row.selectable {
+    color: #000;
+}
+.selectable.active, .selectable:hover {
+    background: #d9edf7;
+    cursor: pointer;
+}
+.selectable.active,
+.selectable.active *,
+.selectable.active:hover,
+.selectable.active:hover * {
+    background: #428bca;
+    color: #fff;
+}
+.selectable-container > .row.class-separator {
+    background: #ddd;
+}
diff --git a/apps/workbench/app/assets/stylesheets/sessions.css.scss b/apps/workbench/app/assets/stylesheets/sessions.css.scss
new file mode 100644 (file)
index 0000000..e08b086
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Sessions controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/specimens.css.scss b/apps/workbench/app/assets/stylesheets/specimens.css.scss
new file mode 100644 (file)
index 0000000..60d630c
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Specimens controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/traits.css.scss b/apps/workbench/app/assets/stylesheets/traits.css.scss
new file mode 100644 (file)
index 0000000..7d2f713
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Traits controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/user_agreements.css.scss b/apps/workbench/app/assets/stylesheets/user_agreements.css.scss
new file mode 100644 (file)
index 0000000..d9eb5eb
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the user_agreements controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/users.css.scss b/apps/workbench/app/assets/stylesheets/users.css.scss
new file mode 100644 (file)
index 0000000..a087ca3
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Users controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/virtual_machines.css.scss b/apps/workbench/app/assets/stylesheets/virtual_machines.css.scss
new file mode 100644 (file)
index 0000000..4a94d45
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the VirtualMachines controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/controllers/actions_controller.rb b/apps/workbench/app/controllers/actions_controller.rb
new file mode 100644 (file)
index 0000000..beeae07
--- /dev/null
@@ -0,0 +1,253 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "arvados/collection"
+
+class ActionsController < ApplicationController
+
+  # Skip require_thread_api_token if this is a show action
+  # for an object uuid that supports anonymous access.
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name and
+    params['uuid'] and
+    model_class.in?([Collection, Group, Job, PipelineInstance, PipelineTemplate])
+  }
+  skip_filter :require_thread_api_token, only: [:report_issue_popup, :report_issue]
+  skip_filter :check_user_agreements, only: [:report_issue_popup, :report_issue]
+
+  @@exposed_actions = {}
+  def self.expose_action method, &block
+    @@exposed_actions[method] = true
+    define_method method, block
+  end
+
+  def model_class
+    ArvadosBase::resource_class_for_uuid(params[:uuid])
+  end
+
+  def show
+    @object = model_class.andand.find(params[:uuid])
+    if @object.is_a? Link and
+        @object.link_class == 'name' and
+        ArvadosBase::resource_class_for_uuid(@object.head_uuid) == Collection
+      redirect_to collection_path(id: @object.uuid)
+    elsif @object.is_a?(Group) and @object.group_class == 'project'
+      redirect_to project_path(id: @object.uuid)
+    elsif @object
+      redirect_to @object
+    else
+      raise ActiveRecord::RecordNotFound
+    end
+  end
+
+  def post
+    params.keys.collect(&:to_sym).each do |param|
+      if @@exposed_actions[param]
+        return self.send(param)
+      end
+    end
+    redirect_to :back
+  end
+
+  expose_action :copy_selections_into_project do
+    move_or_copy :copy
+  end
+
+  expose_action :move_selections_into_project do
+    move_or_copy :move
+  end
+
+  def move_or_copy action
+    uuids_to_add = params["selection"]
+    uuids_to_add = [ uuids_to_add ] unless uuids_to_add.is_a? Array
+    resource_classes = uuids_to_add.
+      collect { |x| ArvadosBase::resource_class_for_uuid(x) }.
+      uniq
+    resource_classes.each do |resource_class|
+      resource_class.filter([['uuid','in',uuids_to_add]]).each do |src|
+        if resource_class == Collection and not Collection.attribute_info.include?(:name)
+          dst = Link.new(owner_uuid: @object.uuid,
+                         tail_uuid: @object.uuid,
+                         head_uuid: src.uuid,
+                         link_class: 'name',
+                         name: src.uuid)
+        else
+          case action
+          when :copy
+            dst = src.dup
+            if dst.respond_to? :'name='
+              if dst.name
+                dst.name = "Copy of #{dst.name}"
+              else
+                dst.name = "Copy of unnamed #{dst.class_for_display.downcase}"
+              end
+            end
+            if resource_class == Collection
+              dst.manifest_text = Collection.select([:manifest_text]).where(uuid: src.uuid).first.manifest_text
+            end
+          when :move
+            dst = src
+          else
+            raise ArgumentError.new "Unsupported action #{action}"
+          end
+          dst.owner_uuid = @object.uuid
+          dst.tail_uuid = @object.uuid if dst.class == Link
+        end
+        begin
+          dst.save!
+        rescue
+          dst.name += " (#{Time.now.localtime})" if dst.respond_to? :name=
+          dst.save!
+        end
+      end
+    end
+    if (resource_classes == [Collection] and
+        @object.is_a? Group and
+        @object.group_class == 'project') or
+        @object.is_a? User
+      # In the common case where only collections are copied/moved
+      # into a project, it's polite to land on the collections tab on
+      # the destination project.
+      redirect_to project_url(@object.uuid, anchor: 'Data_collections')
+    else
+      # Otherwise just land on the default (Description) tab.
+      redirect_to @object
+    end
+  end
+
+  expose_action :combine_selected_files_into_collection do
+    uuids, source_paths = selected_collection_files params
+
+    new_coll = Arv::Collection.new
+    Collection.where(uuid: uuids.uniq).
+        select([:uuid, :manifest_text]).each do |coll|
+      src_coll = Arv::Collection.new(coll.manifest_text)
+      src_pathlist = source_paths[coll.uuid]
+      if src_pathlist.any?(&:blank?)
+        src_pathlist = src_coll.each_file_path
+        destdir = nil
+      else
+        destdir = "."
+      end
+      src_pathlist.each do |src_path|
+        src_path = src_path.sub(/^(\.\/|\/|)/, "./")
+        src_stream, _, basename = src_path.rpartition("/")
+        dst_stream = destdir || src_stream
+        # Generate a unique name by adding (1), (2), etc. to it.
+        # If the filename has a dot that's not at the beginning, insert the
+        # number just before that.  Otherwise, append the number to the name.
+        if match = basename.match(/[^\.]\./)
+          suffix_start = match.begin(0) + 1
+        else
+          suffix_start = basename.size
+        end
+        suffix_size = 0
+        dst_path = nil
+        loop.each_with_index do |_, try_count|
+          dst_path = "#{dst_stream}/#{basename}"
+          break unless new_coll.exist?(dst_path)
+          uniq_suffix = "(#{try_count + 1})"
+          basename[suffix_start, suffix_size] = uniq_suffix
+          suffix_size = uniq_suffix.size
+        end
+        new_coll.cp_r(src_path, dst_path, src_coll)
+      end
+    end
+
+    coll_attrs = {
+      manifest_text: new_coll.manifest_text,
+      name: "Collection created at #{Time.now.localtime}",
+    }
+    flash = {}
+
+    # set owner_uuid to current project, provided it is writable
+    action_data = Oj.load(params['action_data'] || "{}")
+    if action_data['current_project_uuid'] and
+        current_project = Group.find?(action_data['current_project_uuid']) and
+        current_project.writable_by.andand.include?(current_user.uuid)
+      coll_attrs[:owner_uuid] = current_project.uuid
+      flash[:message] =
+        "Created new collection in the project #{current_project.name}."
+    else
+      flash[:message] = "Created new collection in your Home project."
+    end
+
+    newc = Collection.create!(coll_attrs)
+    source_paths.each_key do |src_uuid|
+      unless Link.create({
+                           tail_uuid: src_uuid,
+                           head_uuid: newc.uuid,
+                           link_class: "provenance",
+                           name: "provided",
+                         })
+        flash[:error] = "
+An error occurred when saving provenance information for this collection.
+You can try recreating the collection to get a copy with full provenance data."
+        break
+      end
+    end
+    redirect_to(newc, flash: flash)
+  end
+
+  def report_issue_popup
+    respond_to do |format|
+      format.js
+      format.html
+    end
+  end
+
+  def report_issue
+    logger.warn "report_issue: #{params.inspect}"
+
+    respond_to do |format|
+      IssueReporter.send_report(current_user, params).deliver
+      format.js {render nothing: true}
+    end
+  end
+
+  # star / unstar the current project
+  def star
+    links = Link.where(tail_uuid: current_user.uuid,
+                       head_uuid: @object.uuid,
+                       link_class: 'star')
+
+    if params['status'] == 'create'
+      # create 'star' link if one does not already exist
+      if !links.andand.any?
+        dst = Link.new(owner_uuid: current_user.uuid,
+                       tail_uuid: current_user.uuid,
+                       head_uuid: @object.uuid,
+                       link_class: 'star',
+                       name: @object.uuid)
+        dst.save!
+      end
+    else # delete any existing 'star' links
+      if links.andand.any?
+        links.each do |link|
+          link.destroy
+        end
+      end
+    end
+
+    respond_to do |format|
+      format.js
+    end
+  end
+
+  protected
+
+  def derive_unique_filename filename, manifest_files
+    filename_parts = filename.split('.')
+    filename_part = filename_parts[0]
+    counter = 1
+    while true
+      return filename if !manifest_files.include? filename
+      filename_parts[0] = filename_part + "(" + counter.to_s + ")"
+      filename = filename_parts.join('.')
+      counter += 1
+    end
+  end
+
+end
diff --git a/apps/workbench/app/controllers/api_client_authorizations_controller.rb b/apps/workbench/app/controllers/api_client_authorizations_controller.rb
new file mode 100644 (file)
index 0000000..c7ff560
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApiClientAuthorizationsController < ApplicationController
+
+  def index_pane_list
+    %w(Recent Help)
+  end
+
+end
diff --git a/apps/workbench/app/controllers/application_controller.rb b/apps/workbench/app/controllers/application_controller.rb
new file mode 100644 (file)
index 0000000..8d9e857
--- /dev/null
@@ -0,0 +1,1323 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApplicationController < ActionController::Base
+  include ArvadosApiClientHelper
+  include ApplicationHelper
+
+  respond_to :html, :json, :js
+  protect_from_forgery
+
+  ERROR_ACTIONS = [:render_error, :render_not_found]
+
+  around_filter :thread_clear
+  around_filter :set_current_request_id
+  around_filter :set_thread_api_token
+  # Methods that don't require login should
+  #   skip_around_filter :require_thread_api_token
+  around_filter :require_thread_api_token, except: ERROR_ACTIONS
+  before_filter :ensure_arvados_api_exists, only: [:index, :show]
+  before_filter :set_cache_buster
+  before_filter :accept_uuid_as_id_param, except: ERROR_ACTIONS
+  before_filter :check_user_agreements, except: ERROR_ACTIONS
+  before_filter :check_user_profile, except: ERROR_ACTIONS
+  before_filter :load_filters_and_paging_params, except: ERROR_ACTIONS
+  before_filter :find_object_by_uuid, except: [:create, :index, :choose] + ERROR_ACTIONS
+  theme :select_theme
+
+  begin
+    rescue_from(ActiveRecord::RecordNotFound,
+                ActionController::RoutingError,
+                ActionController::UnknownController,
+                AbstractController::ActionNotFound,
+                with: :render_not_found)
+    rescue_from(Exception,
+                ActionController::UrlGenerationError,
+                with: :render_exception)
+  end
+
+  def set_cache_buster
+    response.headers["Cache-Control"] = "no-cache, no-store, max-age=0, must-revalidate"
+    response.headers["Pragma"] = "no-cache"
+    response.headers["Expires"] = "Fri, 01 Jan 1990 00:00:00 GMT"
+  end
+
+  def unprocessable(message=nil)
+    @errors ||= []
+
+    @errors << message if message
+    render_error status: 422
+  end
+
+  def render_error(opts={})
+    # Helpers can rely on the presence of @errors to know they're
+    # being used in an error page.
+    @errors ||= []
+    opts[:status] ||= 500
+    respond_to do |f|
+      # json must come before html here, so it gets used as the
+      # default format when js is requested by the client. This lets
+      # ajax:error callback parse the response correctly, even though
+      # the browser can't.
+      f.json { render opts.merge(json: {success: false, errors: @errors}) }
+      f.html { render({action: 'error'}.merge(opts)) }
+    end
+  end
+
+  def render_exception(e)
+    logger.error e.inspect
+    logger.error e.backtrace.collect { |x| x + "\n" }.join('') if e.backtrace
+    err_opts = {status: 422}
+    if e.is_a?(ArvadosApiClient::ApiError)
+      err_opts.merge!(action: 'api_error', locals: {api_error: e})
+      @errors = e.api_response[:errors]
+    elsif @object.andand.errors.andand.full_messages.andand.any?
+      @errors = @object.errors.full_messages
+    else
+      @errors = [e.to_s]
+    end
+    # Make user information available on the error page, falling back to the
+    # session cache if the API server is unavailable.
+    begin
+      load_api_token(session[:arvados_api_token])
+    rescue ArvadosApiClient::ApiError
+      unless session[:user].nil?
+        begin
+          Thread.current[:user] = User.new(session[:user])
+        rescue ArvadosApiClient::ApiError
+          # This can happen if User's columns are unavailable.  Nothing to do.
+        end
+      end
+    end
+    # Preload projects trees for the template.  If that's not doable, set empty
+    # trees so error page rendering can proceed.  (It's easier to rescue the
+    # exception here than in a template.)
+    unless current_user.nil?
+      begin
+        my_starred_projects current_user
+        build_my_wanted_projects_tree current_user
+      rescue ArvadosApiClient::ApiError
+        # Fall back to the default-setting code later.
+      end
+    end
+    @starred_projects ||= []
+    @my_wanted_projects_tree ||= []
+    render_error(err_opts)
+  end
+
+  def render_not_found(e=ActionController::RoutingError.new("Path not found"))
+    logger.error e.inspect
+    @errors = ["Path not found"]
+    set_thread_api_token do
+      self.render_error(action: '404', status: 404)
+    end
+  end
+
+  # params[:order]:
+  #
+  # The order can be left empty to allow it to default.
+  # Or it can be a comma separated list of real database column names, one per model.
+  # Column names should always be qualified by a table name and a direction is optional, defaulting to asc
+  # (e.g. "collections.name" or "collections.name desc").
+  # If a column name is specified, that table will be sorted by that column.
+  # If there are objects from different models that will be shown (such as in Pipelines and processes tab),
+  # then a sort column name can optionally be specified for each model, passed as an comma-separated list (e.g. "jobs.script, pipeline_instances.name")
+  # Currently only one sort column name and direction can be specified for each model.
+  def load_filters_and_paging_params
+    if params[:order].blank?
+      @order = 'created_at desc'
+    elsif params[:order].is_a? Array
+      @order = params[:order]
+    else
+      begin
+        @order = JSON.load(params[:order])
+      rescue
+        @order = params[:order].split(',')
+      end
+    end
+    @order = [@order] unless @order.is_a? Array
+
+    @limit ||= 200
+    if params[:limit]
+      @limit = params[:limit].to_i
+    end
+
+    @offset ||= 0
+    if params[:offset]
+      @offset = params[:offset].to_i
+    end
+
+    @filters ||= []
+    if params[:filters]
+      filters = params[:filters]
+      if filters.is_a? String
+        filters = Oj.load filters
+      elsif filters.is_a? Array
+        filters = filters.collect do |filter|
+          if filter.is_a? String
+            # Accept filters[]=["foo","=","bar"]
+            Oj.load filter
+          else
+            # Accept filters=[["foo","=","bar"]]
+            filter
+          end
+        end
+      end
+      # After this, params[:filters] can be trusted to be an array of arrays:
+      params[:filters] = filters
+      @filters += filters
+    end
+  end
+
+  def find_objects_for_index
+    @objects ||= model_class
+    @objects = @objects.filter(@filters).limit(@limit).offset(@offset)
+    @objects.fetch_multiple_pages(false)
+  end
+
+  def render_index
+    respond_to do |f|
+      f.json {
+        if params[:partial]
+          @next_page_href = next_page_href(partial: params[:partial], filters: @filters.to_json)
+          render json: {
+            content: render_to_string(partial: "show_#{params[:partial]}",
+                                      formats: [:html]),
+            next_page_href: @next_page_href
+          }
+        else
+          render json: @objects
+        end
+      }
+      f.html {
+        if params[:tab_pane]
+          render_pane params[:tab_pane]
+        else
+          render
+        end
+      }
+      f.js { render }
+    end
+  end
+
+  helper_method :render_pane
+  def render_pane tab_pane, opts={}
+    render_opts = {
+      partial: 'show_' + tab_pane.downcase,
+      locals: {
+        comparable: self.respond_to?(:compare),
+        objects: @objects,
+        tab_pane: tab_pane
+      }.merge(opts[:locals] || {})
+    }
+    if opts[:to_string]
+      render_to_string render_opts
+    else
+      render render_opts
+    end
+  end
+
+  def ensure_arvados_api_exists
+    if model_class.is_a?(Class) && model_class < ArvadosBase && !model_class.api_exists?(params['action'].to_sym)
+      @errors = ["#{params['action']} method is not supported for #{params['controller']}"]
+      return render_error(status: 404)
+    end
+  end
+
+  def index
+    find_objects_for_index if !@objects
+    render_index
+  end
+
+  helper_method :next_page_offset
+  def next_page_offset objects=nil
+    if !objects
+      objects = @objects
+    end
+    if objects.respond_to?(:result_offset) and
+        objects.respond_to?(:result_limit)
+      next_offset = objects.result_offset + objects.result_limit
+      if objects.respond_to?(:items_available) and (next_offset < objects.items_available)
+        next_offset
+      elsif @objects.results.size > 0 and (params[:count] == 'none' or
+           (params[:controller] == 'search' and params[:action] == 'choose'))
+        last_object_class = @objects.last.class
+        if params['last_object_class'].nil? or params['last_object_class'] == last_object_class.to_s
+          next_offset
+        else
+          @objects.select{|obj| obj.class == last_object_class}.size
+        end
+      else
+        nil
+      end
+    end
+  end
+
+  helper_method :next_page_href
+  def next_page_href with_params={}
+    if next_page_offset
+      url_for with_params.merge(offset: next_page_offset)
+    end
+  end
+
+  helper_method :next_page_filters
+  def next_page_filters nextpage_operator
+    next_page_filters = @filters.reject do |attr, op, val|
+      (attr == 'created_at' and op == nextpage_operator) or
+      (attr == 'uuid' and op == 'not in')
+    end
+
+    if @objects.any?
+      last_created_at = @objects.last.created_at
+
+      last_uuids = []
+      @objects.each do |obj|
+        last_uuids << obj.uuid if obj.created_at.eql?(last_created_at)
+      end
+
+      next_page_filters += [['created_at', nextpage_operator, last_created_at]]
+      next_page_filters += [['uuid', 'not in', last_uuids]]
+    end
+
+    next_page_filters
+  end
+
+  def show
+    if !@object
+      return render_not_found("object not found")
+    end
+    respond_to do |f|
+      f.json do
+        extra_attrs = { href: url_for(action: :show, id: @object) }
+        @object.textile_attributes.each do |textile_attr|
+          extra_attrs.merge!({ "#{textile_attr}Textile" => view_context.render_markup(@object.attributes[textile_attr]) })
+        end
+        render json: @object.attributes.merge(extra_attrs)
+      end
+      f.html {
+        if params['tab_pane']
+          render_pane(if params['tab_pane'].is_a? Hash then params['tab_pane']["name"] else params['tab_pane'] end)
+        elsif request.request_method.in? ['GET', 'HEAD']
+          render
+        else
+          redirect_to (params[:return_to] ||
+                       polymorphic_url(@object,
+                                       anchor: params[:redirect_to_anchor]))
+        end
+      }
+      f.js { render }
+    end
+  end
+
+  def redirect_to uri, *args
+    if request.xhr?
+      if not uri.is_a? String
+        uri = polymorphic_url(uri)
+      end
+      render json: {href: uri}
+    else
+      super
+    end
+  end
+
+  def choose
+    params[:limit] ||= 40
+    respond_to do |f|
+      if params[:partial]
+        f.json {
+          find_objects_for_index if !@objects
+          render json: {
+            content: render_to_string(partial: "choose_rows.html",
+                                      formats: [:html]),
+            next_page_href: next_page_href(partial: params[:partial])
+          }
+        }
+      end
+      f.js {
+        find_objects_for_index if !@objects
+        render partial: 'choose', locals: {multiple: params[:multiple]}
+      }
+    end
+  end
+
+  def render_content
+    if !@object
+      return render_not_found("object not found")
+    end
+  end
+
+  def new
+    @object = model_class.new
+  end
+
+  def update
+    @updates ||= params[@object.resource_param_name.to_sym]
+    @updates.keys.each do |attr|
+      if @object.send(attr).is_a? Hash
+        if @updates[attr].is_a? String
+          @updates[attr] = Oj.load @updates[attr]
+        end
+        if params[:merge] || params["merge_#{attr}".to_sym]
+          # Merge provided Hash with current Hash, instead of
+          # replacing.
+          @updates[attr] = @object.send(attr).with_indifferent_access.
+            deep_merge(@updates[attr].with_indifferent_access)
+        end
+      end
+    end
+    if @object.update_attributes @updates
+      show
+    else
+      self.render_error status: 422
+    end
+  end
+
+  def create
+    @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
+    @new_resource_attrs ||= {}
+    @new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
+    @object ||= model_class.new @new_resource_attrs, params["options"]
+
+    if @object.save
+      show
+    else
+      render_error status: 422
+    end
+  end
+
+  # Clone the given object, merging any attribute values supplied as
+  # with a create action.
+  def copy
+    @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
+    @new_resource_attrs ||= {}
+    @object = @object.dup
+    @object.update_attributes @new_resource_attrs
+    if not @new_resource_attrs[:name] and @object.respond_to? :name
+      if @object.name and @object.name != ''
+        @object.name = "Copy of #{@object.name}"
+      else
+        @object.name = ""
+      end
+    end
+    @object.save!
+    show
+  end
+
+  def destroy
+    if @object.destroy
+      respond_to do |f|
+        f.json { render json: @object }
+        f.html {
+          redirect_to(params[:return_to] || :back)
+        }
+        f.js { render }
+      end
+    else
+      self.render_error status: 422
+    end
+  end
+
+  def current_user
+    Thread.current[:user]
+  end
+
+  def model_class
+    controller_name.classify.constantize
+  end
+
+  def breadcrumb_page_name
+    (@breadcrumb_page_name ||
+     (@object.friendly_link_name if @object.respond_to? :friendly_link_name) ||
+     action_name)
+  end
+
+  def index_pane_list
+    %w(Recent)
+  end
+
+  def show_pane_list
+    %w(Attributes Advanced)
+  end
+
+  def set_share_links
+    @user_is_manager = false
+    @share_links = []
+
+    if @object.uuid != current_user.andand.uuid
+      begin
+        @share_links = Link.permissions_for(@object)
+        @user_is_manager = true
+      rescue ArvadosApiClient::AccessForbiddenException,
+        ArvadosApiClient::NotFoundException
+      end
+    end
+  end
+
+  def share_with
+    if not params[:uuids].andand.any?
+      @errors = ["No user/group UUIDs specified to share with."]
+      return render_error(status: 422)
+    end
+    results = {"success" => [], "errors" => []}
+    params[:uuids].each do |shared_uuid|
+      begin
+        Link.create(tail_uuid: shared_uuid, link_class: "permission",
+                    name: "can_read", head_uuid: @object.uuid)
+      rescue ArvadosApiClient::ApiError => error
+        error_list = error.api_response.andand[:errors]
+        if error_list.andand.any?
+          results["errors"] += error_list.map { |e| "#{shared_uuid}: #{e}" }
+        else
+          error_code = error.api_status || "Bad status"
+          results["errors"] << "#{shared_uuid}: #{error_code} response"
+        end
+      else
+        results["success"] << shared_uuid
+      end
+    end
+    if results["errors"].empty?
+      results.delete("errors")
+      status = 200
+    else
+      status = 422
+    end
+    respond_to do |f|
+      f.json { render(json: results, status: status) }
+    end
+  end
+
+  helper_method :is_starred
+  def is_starred
+    links = Link.where(tail_uuid: current_user.uuid,
+               head_uuid: @object.uuid,
+               link_class: 'star')
+
+    return links.andand.any?
+  end
+
+  protected
+
+  helper_method :strip_token_from_path
+  def strip_token_from_path(path)
+    path.sub(/([\?&;])api_token=[^&;]*[&;]?/, '\1')
+  end
+
+  def redirect_to_login
+    if request.xhr? or request.format.json?
+      @errors = ['You are not logged in. Most likely your session has timed out and you need to log in again.']
+      render_error status: 401
+    elsif request.method.in? ['GET', 'HEAD']
+      redirect_to arvados_api_client.arvados_login_url(return_to: strip_token_from_path(request.url))
+    else
+      flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
+      redirect_to :back
+    end
+    false  # For convenience to return from callbacks
+  end
+
+  def using_specific_api_token(api_token, opts={})
+    start_values = {}
+    [:arvados_api_token, :user].each do |key|
+      start_values[key] = Thread.current[key]
+    end
+    if opts.fetch(:load_user, true)
+      load_api_token(api_token)
+    else
+      Thread.current[:arvados_api_token] = api_token
+      Thread.current[:user] = nil
+    end
+    begin
+      yield
+    ensure
+      start_values.each_key { |key| Thread.current[key] = start_values[key] }
+    end
+  end
+
+
+  def accept_uuid_as_id_param
+    if params[:id] and params[:id].match /\D/
+      params[:uuid] = params.delete :id
+    end
+  end
+
+  def find_object_by_uuid
+    begin
+      if not model_class
+        @object = nil
+      elsif params[:uuid].nil? or params[:uuid].empty?
+        @object = nil
+      elsif not params[:uuid].is_a?(String)
+        @object = model_class.where(uuid: params[:uuid]).first
+      elsif (model_class != Link and
+             resource_class_for_uuid(params[:uuid]) == Link)
+        @name_link = Link.find(params[:uuid])
+        @object = model_class.find(@name_link.head_uuid)
+      else
+        @object = model_class.find(params[:uuid])
+        load_preloaded_objects [@object]
+      end
+    rescue ArvadosApiClient::NotFoundException, ArvadosApiClient::NotLoggedInException, RuntimeError => error
+      if error.is_a?(RuntimeError) and (error.message !~ /^argument to find\(/)
+        raise
+      end
+      render_not_found(error)
+      return false
+    end
+  end
+
+  def thread_clear
+    load_api_token(nil)
+    Rails.cache.delete_matched(/^request_#{Thread.current.object_id}_/)
+    yield
+    Rails.cache.delete_matched(/^request_#{Thread.current.object_id}_/)
+  end
+
+  def set_current_request_id
+    response.headers['X-Request-Id'] =
+      Thread.current[:request_id] =
+      "req-" + Random::DEFAULT.rand(2**128).to_s(36)[0..19]
+    yield
+    Thread.current[:request_id] = nil
+  end
+
+  def append_info_to_payload(payload)
+    super
+    payload[:request_id] = response.headers['X-Request-Id']
+  end
+
+  # Set up the thread with the given API token and associated user object.
+  def load_api_token(new_token)
+    Thread.current[:arvados_api_token] = new_token
+    if new_token.nil?
+      Thread.current[:user] = nil
+    else
+      Thread.current[:user] = User.current
+    end
+  end
+
+  # If there's a valid api_token parameter, set up the session with that
+  # user's information.  Return true if the method redirects the request
+  # (usually a post-login redirect); false otherwise.
+  def setup_user_session
+    return false unless params[:api_token]
+    Thread.current[:arvados_api_token] = params[:api_token]
+    begin
+      user = User.current
+    rescue ArvadosApiClient::NotLoggedInException
+      false  # We may redirect to login, or not, based on the current action.
+    else
+      session[:arvados_api_token] = params[:api_token]
+      # If we later have trouble contacting the API server, we still want
+      # to be able to render basic user information in the UI--see
+      # render_exception above.  We store that in the session here.  This is
+      # not intended to be used as a general-purpose cache.  See #2891.
+      session[:user] = {
+        uuid: user.uuid,
+        email: user.email,
+        first_name: user.first_name,
+        last_name: user.last_name,
+        is_active: user.is_active,
+        is_admin: user.is_admin,
+        prefs: user.prefs
+      }
+
+      if !request.format.json? and request.method.in? ['GET', 'HEAD']
+        # Repeat this request with api_token in the (new) session
+        # cookie instead of the query string.  This prevents API
+        # tokens from appearing in (and being inadvisedly copied
+        # and pasted from) browser Location bars.
+        redirect_to strip_token_from_path(request.fullpath)
+        true
+      else
+        false
+      end
+    ensure
+      Thread.current[:arvados_api_token] = nil
+    end
+  end
+
+  # Save the session API token in thread-local storage, and yield.
+  # This method also takes care of session setup if the request
+  # provides a valid api_token parameter.
+  # If a token is unavailable or expired, the block is still run, with
+  # a nil token.
+  def set_thread_api_token
+    if Thread.current[:arvados_api_token]
+      yield   # An API token has already been found - pass it through.
+      return
+    elsif setup_user_session
+      return  # A new session was set up and received a response.
+    end
+
+    begin
+      load_api_token(session[:arvados_api_token])
+      yield
+    rescue ArvadosApiClient::NotLoggedInException
+      # If we got this error with a token, it must've expired.
+      # Retry the request without a token.
+      unless Thread.current[:arvados_api_token].nil?
+        load_api_token(nil)
+        yield
+      end
+    ensure
+      # Remove token in case this Thread is used for anything else.
+      load_api_token(nil)
+    end
+  end
+
+  # Redirect to login/welcome if client provided expired API token (or
+  # none at all)
+  def require_thread_api_token
+    if Thread.current[:arvados_api_token]
+      yield
+    elsif session[:arvados_api_token]
+      # Expired session. Clear it before refreshing login so that,
+      # if this login procedure fails, we end up showing the "please
+      # log in" page instead of getting stuck in a redirect loop.
+      session.delete :arvados_api_token
+      redirect_to_login
+    elsif request.xhr?
+      # If we redirect to the welcome page, the browser will handle
+      # the 302 by itself and the client code will end up rendering
+      # the "welcome" page in some content area where it doesn't make
+      # sense. Instead, we send 401 ("authenticate and try again" or
+      # "display error", depending on how smart the client side is).
+      @errors = ['You are not logged in.']
+      render_error status: 401
+    else
+      redirect_to welcome_users_path(return_to: request.fullpath)
+    end
+  end
+
+  def ensure_current_user_is_admin
+    if not current_user
+      @errors = ['Not logged in']
+      render_error status: 401
+    elsif not current_user.is_admin
+      @errors = ['Permission denied']
+      render_error status: 403
+    end
+  end
+
+  helper_method :unsigned_user_agreements
+  def unsigned_user_agreements
+    @signed_ua_uuids ||= UserAgreement.signatures.map &:head_uuid
+    @unsigned_user_agreements ||= UserAgreement.all.map do |ua|
+      if not @signed_ua_uuids.index ua.uuid
+        Collection.find(ua.uuid)
+      end
+    end.compact
+  end
+
+  def check_user_agreements
+    if current_user && !current_user.is_active
+      if not current_user.is_invited
+        return redirect_to inactive_users_path(return_to: request.fullpath)
+      end
+      if unsigned_user_agreements.empty?
+        # No agreements to sign. Perhaps we just need to ask?
+        current_user.activate
+        if !current_user.is_active
+          logger.warn "#{current_user.uuid.inspect}: " +
+            "No user agreements to sign, but activate failed!"
+        end
+      end
+      if !current_user.is_active
+        redirect_to user_agreements_path(return_to: request.fullpath)
+      end
+    end
+    true
+  end
+
+  def check_user_profile
+    return true if !current_user
+    if request.method.downcase != 'get' || params[:partial] ||
+       params[:tab_pane] || params[:action_method] ||
+       params[:action] == 'setup_popup'
+      return true
+    end
+
+    if missing_required_profile?
+      redirect_to profile_user_path(current_user.uuid, return_to: request.fullpath)
+    end
+    true
+  end
+
+  helper_method :missing_required_profile?
+  def missing_required_profile?
+    missing_required = false
+
+    profile_config = Rails.configuration.user_profile_form_fields
+    if current_user && profile_config
+      current_user_profile = current_user.prefs[:profile]
+      profile_config.kind_of?(Array) && profile_config.andand.each do |entry|
+        if entry['required']
+          if !current_user_profile ||
+             !current_user_profile[entry['key'].to_sym] ||
+             current_user_profile[entry['key'].to_sym].empty?
+            missing_required = true
+            break
+          end
+        end
+      end
+    end
+
+    missing_required
+  end
+
+  def select_theme
+    return Rails.configuration.arvados_theme
+  end
+
+  @@notification_tests = []
+
+  @@notification_tests.push lambda { |controller, current_user|
+    return nil if Rails.configuration.shell_in_a_box_url
+    AuthorizedKey.limit(1).where(authorized_user_uuid: current_user.uuid).each do
+      return nil
+    end
+    return lambda { |view|
+      view.render partial: 'notifications/ssh_key_notification'
+    }
+  }
+
+  @@notification_tests.push lambda { |controller, current_user|
+    Collection.limit(1).where(created_by: current_user.uuid).each do
+      return nil
+    end
+    return lambda { |view|
+      view.render partial: 'notifications/collections_notification'
+    }
+  }
+
+  @@notification_tests.push lambda { |controller, current_user|
+    if PipelineInstance.api_exists?(:index)
+      PipelineInstance.limit(1).where(created_by: current_user.uuid).each do
+        return nil
+      end
+    else
+      return nil
+    end
+    return lambda { |view|
+      view.render partial: 'notifications/pipelines_notification'
+    }
+  }
+
+  helper_method :user_notifications
+  def user_notifications
+    return [] if @errors or not current_user.andand.is_active or not Rails.configuration.show_user_notifications
+    @notifications ||= @@notification_tests.map do |t|
+      t.call(self, current_user)
+    end.compact
+  end
+
+  helper_method :all_projects
+  def all_projects
+    @all_projects ||= Group.
+      filter([['group_class','=','project']]).order('name')
+  end
+
+  helper_method :my_projects
+  def my_projects
+    return @my_projects if @my_projects
+    @my_projects = []
+    root_of = {}
+    all_projects.each do |g|
+      root_of[g.uuid] = g.owner_uuid
+      @my_projects << g
+    end
+    done = false
+    while not done
+      done = true
+      root_of = root_of.each_with_object({}) do |(child, parent), h|
+        if root_of[parent]
+          h[child] = root_of[parent]
+          done = false
+        else
+          h[child] = parent
+        end
+      end
+    end
+    @my_projects = @my_projects.select do |g|
+      root_of[g.uuid] == current_user.uuid
+    end
+  end
+
+  helper_method :projects_shared_with_me
+  def projects_shared_with_me
+    my_project_uuids = my_projects.collect &:uuid
+    all_projects.reject { |x| x.uuid.in? my_project_uuids }
+  end
+
+  helper_method :recent_jobs_and_pipelines
+  def recent_jobs_and_pipelines
+    (Job.limit(10) |
+     PipelineInstance.limit(10)).
+      sort_by do |x|
+      (x.finished_at || x.started_at rescue nil) || x.modified_at || x.created_at
+    end.reverse
+  end
+
+  helper_method :running_pipelines
+  def running_pipelines
+    pi = PipelineInstance.order(["started_at asc", "created_at asc"]).filter([["state", "in", ["RunningOnServer", "RunningOnClient"]]])
+    jobs = {}
+    pi.each do |pl|
+      pl.components.each do |k,v|
+        if v.is_a? Hash and v[:job]
+          jobs[v[:job][:uuid]] = {}
+        end
+      end
+    end
+
+    if jobs.keys.any?
+      Job.filter([["uuid", "in", jobs.keys]]).each do |j|
+        jobs[j[:uuid]] = j
+      end
+
+      pi.each do |pl|
+        pl.components.each do |k,v|
+          if v.is_a? Hash and v[:job]
+            v[:job] = jobs[v[:job][:uuid]]
+          end
+        end
+      end
+    end
+
+    pi
+  end
+
+  helper_method :recent_processes
+  def recent_processes lim
+    lim = 12 if lim.nil?
+
+    procs = {}
+    if PipelineInstance.api_exists?(:index)
+      cols = %w(uuid owner_uuid created_at modified_at pipeline_template_uuid name state started_at finished_at)
+      pipelines = PipelineInstance.select(cols).limit(lim).order(["created_at desc"])
+      pipelines.results.each { |pi| procs[pi] = pi.created_at }
+    end
+
+    crs = ContainerRequest.limit(lim).order(["created_at desc"]).filter([["requesting_container_uuid", "=", nil]])
+    crs.results.each { |c| procs[c] = c.created_at }
+
+    Hash[procs.sort_by {|key, value| value}].keys.reverse.first(lim)
+  end
+
+  helper_method :recent_collections
+  def recent_collections lim
+    c = Collection.limit(lim).order(["modified_at desc"]).results
+    own = {}
+    Group.filter([["uuid", "in", c.map(&:owner_uuid)]]).each do |g|
+      own[g[:uuid]] = g
+    end
+    {collections: c, owners: own}
+  end
+
+  helper_method :my_starred_projects
+  def my_starred_projects user
+    return if @starred_projects
+    links = Link.filter([['tail_uuid', '=', user.uuid],
+                         ['link_class', '=', 'star'],
+                         ['head_uuid', 'is_a', 'arvados#group']]).select(%w(head_uuid))
+    uuids = links.collect { |x| x.head_uuid }
+    starred_projects = Group.filter([['uuid', 'in', uuids]]).order('name')
+    @starred_projects = starred_projects.results
+  end
+
+  # If there are more than 200 projects that are readable by the user,
+  # build the tree using only the top 200+ projects owned by the user,
+  # from the top three levels.
+  # That is: get toplevel projects under home, get subprojects of
+  # these projects, and so on until we hit the limit.
+  def my_wanted_projects(user, page_size=100)
+    return @my_wanted_projects if @my_wanted_projects
+
+    from_top = []
+    uuids = [user.uuid]
+    depth = 0
+    @too_many_projects = false
+    @reached_level_limit = false
+    while from_top.size <= page_size*2
+      current_level = Group.filter([['group_class','=','project'],
+                                    ['owner_uuid', 'in', uuids]])
+                      .order('name').limit(page_size*2)
+      break if current_level.results.size == 0
+      @too_many_projects = true if current_level.items_available > current_level.results.size
+      from_top.concat current_level.results
+      uuids = current_level.results.collect(&:uuid)
+      depth += 1
+      if depth >= 3
+        @reached_level_limit = true
+        break
+      end
+    end
+    @my_wanted_projects = from_top
+  end
+
+  helper_method :my_wanted_projects_tree
+  def my_wanted_projects_tree(user, page_size=100)
+    build_my_wanted_projects_tree(user, page_size)
+    [@my_wanted_projects_tree, @too_many_projects, @reached_level_limit]
+  end
+
+  def build_my_wanted_projects_tree(user, page_size=100)
+    return @my_wanted_projects_tree if @my_wanted_projects_tree
+
+    parent_of = {user.uuid => 'me'}
+    my_wanted_projects(user, page_size).each do |ob|
+      parent_of[ob.uuid] = ob.owner_uuid
+    end
+    children_of = {false => [], 'me' => [user]}
+    my_wanted_projects(user, page_size).each do |ob|
+      if ob.owner_uuid != user.uuid and
+          not parent_of.has_key? ob.owner_uuid
+        parent_of[ob.uuid] = false
+      end
+      children_of[parent_of[ob.uuid]] ||= []
+      children_of[parent_of[ob.uuid]] << ob
+    end
+    buildtree = lambda do |children_of, root_uuid=false|
+      tree = {}
+      children_of[root_uuid].andand.each do |ob|
+        tree[ob] = buildtree.call(children_of, ob.uuid)
+      end
+      tree
+    end
+    sorted_paths = lambda do |tree, depth=0|
+      paths = []
+      tree.keys.sort_by { |ob|
+        ob.is_a?(String) ? ob : ob.friendly_link_name
+      }.each do |ob|
+        paths << {object: ob, depth: depth}
+        paths += sorted_paths.call tree[ob], depth+1
+      end
+      paths
+    end
+    @my_wanted_projects_tree =
+      sorted_paths.call buildtree.call(children_of, 'me')
+  end
+
+  helper_method :get_object
+  def get_object uuid
+    if @get_object.nil? and @objects
+      @get_object = @objects.each_with_object({}) do |object, h|
+        h[object.uuid] = object
+      end
+    end
+    @get_object ||= {}
+    @get_object[uuid]
+  end
+
+  helper_method :project_breadcrumbs
+  def project_breadcrumbs
+    crumbs = []
+    current = @name_link || @object
+    while current
+      # Halt if a group ownership loop is detected. API should refuse
+      # to produce this state, but it could still arise from a race
+      # condition when group ownership changes between our find()
+      # queries.
+      break if crumbs.collect(&:uuid).include? current.uuid
+
+      if current.is_a?(Group) and current.group_class == 'project'
+        crumbs.prepend current
+      end
+      if current.is_a? Link
+        current = Group.find?(current.tail_uuid)
+      else
+        current = Group.find?(current.owner_uuid)
+      end
+    end
+    crumbs
+  end
+
+  helper_method :current_project_uuid
+  def current_project_uuid
+    if @object.is_a? Group and @object.group_class == 'project'
+      @object.uuid
+    elsif @name_link.andand.tail_uuid
+      @name_link.tail_uuid
+    elsif @object and resource_class_for_uuid(@object.owner_uuid) == Group
+      @object.owner_uuid
+    else
+      nil
+    end
+  end
+
+  # helper method to get links for given object or uuid
+  helper_method :links_for_object
+  def links_for_object object_or_uuid
+    raise ArgumentError, 'No input argument' unless object_or_uuid
+    preload_links_for_objects([object_or_uuid])
+    uuid = object_or_uuid.is_a?(String) ? object_or_uuid : object_or_uuid.uuid
+    @all_links_for[uuid] ||= []
+  end
+
+  # helper method to preload links for given objects and uuids
+  helper_method :preload_links_for_objects
+  def preload_links_for_objects objects_and_uuids
+    @all_links_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless objects_and_uuids.is_a? Array
+    return @all_links_for if objects_and_uuids.empty?
+
+    uuids = objects_and_uuids.collect { |x| x.is_a?(String) ? x : x.uuid }
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @all_links_for[x].nil? }.any?
+      return @all_links_for
+    end
+
+    uuids.each do |x|
+      @all_links_for[x] = []
+    end
+
+    # TODO: make sure we get every page of results from API server
+    Link.filter([['head_uuid', 'in', uuids]]).each do |link|
+      @all_links_for[link.head_uuid] << link
+    end
+    @all_links_for
+  end
+
+  # helper method to get a certain number of objects of a specific type
+  # this can be used to replace any uses of: "dataclass.limit(n)"
+  helper_method :get_n_objects_of_class
+  def get_n_objects_of_class dataclass, size
+    @objects_map_for ||= {}
+
+    raise ArgumentError, 'Argument is not a data class' unless dataclass.is_a? Class and dataclass < ArvadosBase
+    raise ArgumentError, 'Argument is not a valid limit size' unless (size && size>0)
+
+    # if the objects_map_for has a value for this dataclass, and the
+    # size used to retrieve those objects is equal, return it
+    size_key = "#{dataclass.name}_size"
+    if @objects_map_for[dataclass.name] && @objects_map_for[size_key] &&
+        (@objects_map_for[size_key] == size)
+      return @objects_map_for[dataclass.name]
+    end
+
+    @objects_map_for[size_key] = size
+    @objects_map_for[dataclass.name] = dataclass.limit(size)
+  end
+
+  # helper method to get collections for the given uuid
+  helper_method :collections_for_object
+  def collections_for_object uuid
+    raise ArgumentError, 'No input argument' unless uuid
+    preload_collections_for_objects([uuid])
+    @all_collections_for[uuid] ||= []
+  end
+
+  # helper method to preload collections for the given uuids
+  helper_method :preload_collections_for_objects
+  def preload_collections_for_objects uuids
+    @all_collections_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless uuids.is_a? Array
+    return @all_collections_for if uuids.empty?
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @all_collections_for[x].nil? }.any?
+      return @all_collections_for
+    end
+
+    uuids.each do |x|
+      @all_collections_for[x] = []
+    end
+
+    # TODO: make sure we get every page of results from API server
+    Collection.where(uuid: uuids).each do |collection|
+      @all_collections_for[collection.uuid] << collection
+    end
+    @all_collections_for
+  end
+
+  # helper method to get log collections for the given log
+  helper_method :log_collections_for_object
+  def log_collections_for_object log
+    raise ArgumentError, 'No input argument' unless log
+
+    preload_log_collections_for_objects([log])
+
+    uuid = log
+    fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(log)
+    if fixup && fixup.size>1
+      uuid = fixup[1]
+    end
+
+    @all_log_collections_for[uuid] ||= []
+  end
+
+  # helper method to preload collections for the given uuids
+  helper_method :preload_log_collections_for_objects
+  def preload_log_collections_for_objects logs
+    @all_log_collections_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless logs.is_a? Array
+    return @all_log_collections_for if logs.empty?
+
+    uuids = []
+    logs.each do |log|
+      fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(log)
+      if fixup && fixup.size>1
+        uuids << fixup[1]
+      else
+        uuids << log
+      end
+    end
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @all_log_collections_for[x].nil? }.any?
+      return @all_log_collections_for
+    end
+
+    uuids.each do |x|
+      @all_log_collections_for[x] = []
+    end
+
+    # TODO: make sure we get every page of results from API server
+    Collection.where(uuid: uuids).each do |collection|
+      @all_log_collections_for[collection.uuid] << collection
+    end
+    @all_log_collections_for
+  end
+
+  # Helper method to get one collection for the given portable_data_hash
+  # This is used to determine if a pdh is readable by the current_user
+  helper_method :collection_for_pdh
+  def collection_for_pdh pdh
+    raise ArgumentError, 'No input argument' unless pdh
+    preload_for_pdhs([pdh])
+    @all_pdhs_for[pdh] ||= []
+  end
+
+  # Helper method to preload one collection each for the given pdhs
+  # This is used to determine if a pdh is readable by the current_user
+  helper_method :preload_for_pdhs
+  def preload_for_pdhs pdhs
+    @all_pdhs_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless pdhs.is_a? Array
+    return @all_pdhs_for if pdhs.empty?
+
+    # if already preloaded for all of these pdhs, return
+    if not pdhs.select { |x| @all_pdhs_for[x].nil? }.any?
+      return @all_pdhs_for
+    end
+
+    pdhs.each do |x|
+      @all_pdhs_for[x] = []
+    end
+
+    Collection.select(%w(portable_data_hash)).where(portable_data_hash: pdhs).distinct().each do |collection|
+      @all_pdhs_for[collection.portable_data_hash] << collection
+    end
+    @all_pdhs_for
+  end
+
+  # helper method to get object of a given dataclass and uuid
+  helper_method :object_for_dataclass
+  def object_for_dataclass dataclass, uuid, by_attr=nil
+    raise ArgumentError, 'No input argument dataclass' unless (dataclass && uuid)
+    preload_objects_for_dataclass(dataclass, [uuid], by_attr)
+    @objects_for[uuid]
+  end
+
+  # helper method to preload objects for given dataclass and uuids
+  helper_method :preload_objects_for_dataclass
+  def preload_objects_for_dataclass dataclass, uuids, by_attr=nil
+    @objects_for ||= {}
+
+    raise ArgumentError, 'Argument is not a data class' unless dataclass.is_a? Class
+    raise ArgumentError, 'Argument is not an array' unless uuids.is_a? Array
+
+    return @objects_for if uuids.empty?
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| !@objects_for.include?(x) }.any?
+      return @objects_for
+    end
+
+    # preset all uuids to nil
+    uuids.each do |x|
+      @objects_for[x] = nil
+    end
+    if by_attr and ![:uuid, :name].include?(by_attr)
+      raise ArgumentError, "Preloading only using lookups by uuid or name are supported: #{by_attr}"
+    elsif by_attr and by_attr == :name
+      dataclass.where(name: uuids).each do |obj|
+        @objects_for[obj.name] = obj
+      end
+    else
+      key_prefix = "request_#{Thread.current.object_id}_#{dataclass.to_s}_"
+      dataclass.where(uuid: uuids).each do |obj|
+        @objects_for[obj.uuid] = obj
+        if dataclass == Collection
+          # The collecions#index defaults to "all attributes except manifest_text"
+          # Hence, this object is not suitable for preloading the find() cache.
+        else
+          Rails.cache.write(key_prefix + obj.uuid, obj.as_json)
+        end
+      end
+    end
+    @objects_for
+  end
+
+  # helper method to load objects that are already preloaded
+  helper_method :load_preloaded_objects
+  def load_preloaded_objects objs
+    @objects_for ||= {}
+    objs.each do |obj|
+      @objects_for[obj.uuid] = obj
+    end
+  end
+
+  # helper method to get the names of collection files selected
+  helper_method :selected_collection_files
+  def selected_collection_files params
+    link_uuids, coll_ids = params["selection"].partition do |sel_s|
+      ArvadosBase::resource_class_for_uuid(sel_s) == Link
+    end
+
+    unless link_uuids.empty?
+      Link.select([:head_uuid]).where(uuid: link_uuids).each do |link|
+        if ArvadosBase::resource_class_for_uuid(link.head_uuid) == Collection
+          coll_ids << link.head_uuid
+        end
+      end
+    end
+
+    uuids = []
+    pdhs = []
+    source_paths = Hash.new { |hash, key| hash[key] = [] }
+    coll_ids.each do |coll_id|
+      if m = CollectionsHelper.match(coll_id)
+        key = m[1] + m[2]
+        pdhs << key
+        source_paths[key] << m[4]
+      elsif m = CollectionsHelper.match_uuid_with_optional_filepath(coll_id)
+        key = m[1]
+        uuids << key
+        source_paths[key] << m[4]
+      end
+    end
+
+    unless pdhs.empty?
+      Collection.where(portable_data_hash: pdhs.uniq).
+          select([:uuid, :portable_data_hash]).each do |coll|
+        unless source_paths[coll.portable_data_hash].empty?
+          uuids << coll.uuid
+          source_paths[coll.uuid] = source_paths.delete(coll.portable_data_hash)
+        end
+      end
+    end
+
+    [uuids, source_paths]
+  end
+
+  def wiselinks_layout
+    'body'
+  end
+end
diff --git a/apps/workbench/app/controllers/authorized_keys_controller.rb b/apps/workbench/app/controllers/authorized_keys_controller.rb
new file mode 100644 (file)
index 0000000..ac47ce7
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AuthorizedKeysController < ApplicationController
+  def index_pane_list
+    %w(Recent Help)
+  end
+
+  def new
+    super
+    @object.authorized_user_uuid = current_user.uuid if current_user
+    @object.key_type = 'SSH'
+  end
+
+  def create
+    defaults = { authorized_user_uuid: current_user.uuid, key_type: 'SSH' }
+    @object = AuthorizedKey.new defaults.merge(params[:authorized_key] || {})
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/collections_controller.rb b/apps/workbench/app/controllers/collections_controller.rb
new file mode 100644 (file)
index 0000000..0a7f22b
--- /dev/null
@@ -0,0 +1,389 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "arvados/keep"
+require "arvados/collection"
+require "uri"
+
+class CollectionsController < ApplicationController
+  include ActionController::Live
+
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+  skip_around_filter(:require_thread_api_token,
+                     only: [:show_file, :show_file_links])
+  skip_before_filter(:find_object_by_uuid,
+                     only: [:provenance, :show_file, :show_file_links])
+  # We depend on show_file to display the user agreement:
+  skip_before_filter :check_user_agreements, only: :show_file
+  skip_before_filter :check_user_profile, only: :show_file
+
+  RELATION_LIMIT = 5
+
+  def show_pane_list
+    panes = %w(Files Upload Tags Provenance_graph Used_by Advanced)
+    panes = panes - %w(Upload) unless (@object.editable? rescue false)
+    panes
+  end
+
+  def set_persistent
+    case params[:value]
+    when 'persistent', 'cache'
+      persist_links = Link.filter([['owner_uuid', '=', current_user.uuid],
+                                   ['link_class', '=', 'resources'],
+                                   ['name', '=', 'wants'],
+                                   ['tail_uuid', '=', current_user.uuid],
+                                   ['head_uuid', '=', @object.uuid]])
+      logger.debug persist_links.inspect
+    else
+      return unprocessable "Invalid value #{value.inspect}"
+    end
+    if params[:value] == 'persistent'
+      if not persist_links.any?
+        Link.create(link_class: 'resources',
+                    name: 'wants',
+                    tail_uuid: current_user.uuid,
+                    head_uuid: @object.uuid)
+      end
+    else
+      persist_links.each do |link|
+        link.destroy || raise
+      end
+    end
+
+    respond_to do |f|
+      f.json { render json: @object }
+    end
+  end
+
+  def index
+    # API server index doesn't return manifest_text by default, but our
+    # callers want it unless otherwise specified.
+    @select ||= Collection.columns.map(&:name)
+    base_search = Collection.select(@select)
+    if params[:search].andand.length.andand > 0
+      tags = Link.where(any: ['contains', params[:search]])
+      @objects = (base_search.where(uuid: tags.collect(&:head_uuid)) |
+                      base_search.where(any: ['contains', params[:search]])).
+        uniq { |c| c.uuid }
+    else
+      if params[:limit]
+        limit = params[:limit].to_i
+      else
+        limit = 100
+      end
+
+      if params[:offset]
+        offset = params[:offset].to_i
+      else
+        offset = 0
+      end
+
+      @objects = base_search.limit(limit).offset(offset)
+    end
+    @links = Link.where(head_uuid: @objects.collect(&:uuid))
+    @collection_info = {}
+    @objects.each do |c|
+      @collection_info[c.uuid] = {
+        tag_links: [],
+        wanted: false,
+        wanted_by_me: false,
+        provenance: [],
+        links: []
+      }
+    end
+    @links.each do |link|
+      @collection_info[link.head_uuid] ||= {}
+      info = @collection_info[link.head_uuid]
+      case link.link_class
+      when 'tag'
+        info[:tag_links] << link
+      when 'resources'
+        info[:wanted] = true
+        info[:wanted_by_me] ||= link.tail_uuid == current_user.uuid
+      when 'provenance'
+        info[:provenance] << link.name
+      end
+      info[:links] << link
+    end
+    @request_url = request.url
+
+    render_index
+  end
+
+  def show_file_links
+    return show_file
+  end
+
+  def show_file
+    # The order of searched tokens is important: because the anonymous user
+    # token is passed along with every API request, we have to check it first.
+    # Otherwise, it's impossible to know whether any other request succeeded
+    # because of the reader token.
+    coll = nil
+    tokens = [(Rails.configuration.anonymous_user_token || nil),
+              params[:reader_token],
+              Thread.current[:arvados_api_token]].compact
+    usable_token = find_usable_token(tokens) do
+      coll = Collection.find(params[:uuid])
+    end
+    if usable_token.nil?
+      # Response already rendered.
+      return
+    end
+
+    opts = {}
+    if usable_token == params[:reader_token]
+      opts[:path_token] = usable_token
+    elsif usable_token == Rails.configuration.anonymous_user_token
+      # Don't pass a token at all
+    else
+      # We pass the current user's real token only if it's necessary
+      # to read the collection.
+      opts[:query_token] = usable_token
+    end
+    opts[:disposition] = params[:disposition] if params[:disposition]
+    return redirect_to keep_web_url(params[:uuid], params[:file], opts)
+  end
+
+  def sharing_scopes
+    ["GET /arvados/v1/collections/#{@object.uuid}", "GET /arvados/v1/collections/#{@object.uuid}/", "GET /arvados/v1/keep_services/accessible"]
+  end
+
+  def search_scopes
+    begin
+      ApiClientAuthorization.filter([['scopes', '=', sharing_scopes]]).results
+    rescue ArvadosApiClient::AccessForbiddenException
+      nil
+    end
+  end
+
+  def find_object_by_uuid
+    if not Keep::Locator.parse params[:id]
+      super
+    end
+  end
+
+  def show
+    return super if !@object
+
+    @logs = []
+
+    if params["tab_pane"] == "Provenance_graph"
+      @prov_svg = ProvenanceHelper::create_provenance_graph(@object.provenance, "provenance_svg",
+                                                            {:request => request,
+                                                             :direction => :top_down,
+                                                             :combine_jobs => :script_only}) rescue nil
+    end
+
+    if current_user
+      if Keep::Locator.parse params["uuid"]
+        @same_pdh = Collection.filter([["portable_data_hash", "=", @object.portable_data_hash]]).limit(20)
+        if @same_pdh.results.size == 1
+          redirect_to collection_path(@same_pdh[0]["uuid"])
+          return
+        end
+        owners = @same_pdh.map(&:owner_uuid).to_a.uniq
+        preload_objects_for_dataclass Group, owners
+        preload_objects_for_dataclass User, owners
+        uuids = @same_pdh.map(&:uuid).to_a.uniq
+        preload_links_for_objects uuids
+        render 'hash_matches'
+        return
+      else
+        if Job.api_exists?(:index)
+          jobs_with = lambda do |conds|
+            Job.limit(RELATION_LIMIT).where(conds)
+              .results.sort_by { |j| j.finished_at || j.created_at }
+          end
+          @output_of = jobs_with.call(output: @object.portable_data_hash)
+          @log_of = jobs_with.call(log: @object.portable_data_hash)
+        end
+
+        @project_links = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+          .where(head_uuid: @object.uuid, link_class: 'name').results
+        project_hash = Group.where(uuid: @project_links.map(&:tail_uuid)).to_hash
+        @projects = project_hash.values
+
+        @permissions = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+          .where(head_uuid: @object.uuid, link_class: 'permission',
+                 name: 'can_read').results
+        @search_sharing = search_scopes
+
+        if params["tab_pane"] == "Used_by"
+          @used_by_svg = ProvenanceHelper::create_provenance_graph(@object.used_by, "used_by_svg",
+                                                                   {:request => request,
+                                                                    :direction => :top_down,
+                                                                    :combine_jobs => :script_only,
+                                                                    :pdata_only => true}) rescue nil
+        end
+      end
+    end
+    super
+  end
+
+  def sharing_popup
+    @search_sharing = search_scopes
+    render("sharing_popup.js", content_type: "text/javascript")
+  end
+
+  helper_method :download_link
+
+  def download_link
+    token = @search_sharing.first.api_token
+    keep_web_url(@object.uuid, nil, {path_token: token})
+  end
+
+  def share
+    ApiClientAuthorization.create(scopes: sharing_scopes)
+    sharing_popup
+  end
+
+  def unshare
+    search_scopes.each do |s|
+      s.destroy
+    end
+    sharing_popup
+  end
+
+  def remove_selected_files
+    uuids, source_paths = selected_collection_files params
+
+    arv_coll = Arv::Collection.new(@object.manifest_text)
+    source_paths[uuids[0]].each do |p|
+      arv_coll.rm "."+p
+    end
+
+    if @object.update_attributes manifest_text: arv_coll.manifest_text
+      show
+    else
+      self.render_error status: 422
+    end
+  end
+
+  def update
+    updated_attr = params[:collection].each.select {|a| a[0].andand.start_with? 'rename-file-path:'}
+
+    if updated_attr.size > 0
+      # Is it file rename?
+      file_path = updated_attr[0][0].split('rename-file-path:')[-1]
+
+      new_file_path = updated_attr[0][1]
+      if new_file_path.start_with?('./')
+        # looks good
+      elsif new_file_path.start_with?('/')
+        new_file_path = '.' + new_file_path
+      else
+        new_file_path = './' + new_file_path
+      end
+
+      arv_coll = Arv::Collection.new(@object.manifest_text)
+
+      if arv_coll.exist?(new_file_path)
+        @errors = 'Duplicate file path. Please use a different name.'
+        self.render_error status: 422
+      else
+        arv_coll.rename "./"+file_path, new_file_path
+
+        if @object.update_attributes manifest_text: arv_coll.manifest_text
+          show
+        else
+          self.render_error status: 422
+        end
+      end
+    else
+      # Not a file rename; use default
+      super
+    end
+  end
+
+  protected
+
+  def find_usable_token(token_list)
+    # Iterate over every given token to make it the current token and
+    # yield the given block.
+    # If the block succeeds, return the token it used.
+    # Otherwise, render an error response based on the most specific
+    # error we encounter, and return nil.
+    most_specific_error = [401]
+    token_list.each do |api_token|
+      begin
+        # We can't load the corresponding user, because the token may not
+        # be scoped for that.
+        using_specific_api_token(api_token, load_user: false) do
+          yield
+          return api_token
+        end
+      rescue ArvadosApiClient::ApiError => error
+        if error.api_status >= most_specific_error.first
+          most_specific_error = [error.api_status, error]
+        end
+      end
+    end
+    case most_specific_error.shift
+    when 401, 403
+      redirect_to_login
+    when 404
+      render_not_found(*most_specific_error)
+    end
+    return nil
+  end
+
+  def keep_web_url(uuid_or_pdh, file, opts)
+    munged_id = uuid_or_pdh.sub('+', '-')
+    fmt = {uuid_or_pdh: munged_id}
+
+    tmpl = Rails.configuration.keep_web_url
+    if Rails.configuration.keep_web_download_url and
+        (!tmpl or opts[:disposition] == 'attachment')
+      # Prefer the attachment-only-host when we want an attachment
+      # (and when there is no preview link configured)
+      tmpl = Rails.configuration.keep_web_download_url
+    elsif not Rails.configuration.trust_all_content
+      check_uri = URI.parse(tmpl % fmt)
+      if opts[:query_token] and
+          not check_uri.host.start_with?(munged_id + "--") and
+          not check_uri.host.start_with?(munged_id + ".")
+        # We're about to pass a token in the query string, but
+        # keep-web can't accept that safely at a single-origin URL
+        # template (unless it's -attachment-only-host).
+        tmpl = Rails.configuration.keep_web_download_url
+        if not tmpl
+          raise ArgumentError, "Download precluded by site configuration"
+        end
+        logger.warn("Using download link, even though inline content " \
+                    "was requested: #{check_uri.to_s}")
+      end
+    end
+
+    if tmpl == Rails.configuration.keep_web_download_url
+      # This takes us to keep-web's -attachment-only-host so there is
+      # no need to add ?disposition=attachment.
+      opts.delete :disposition
+    end
+
+    uri = URI.parse(tmpl % fmt)
+    uri.path += '/' unless uri.path.end_with? '/'
+    if opts[:path_token]
+      uri.path += 't=' + opts[:path_token] + '/'
+    end
+    uri.path += '_/'
+    uri.path += URI.escape(file) if file
+
+    query = Hash[URI.decode_www_form(uri.query || '')]
+    { query_token: 'api_token',
+      disposition: 'disposition' }.each do |opt, param|
+      if opts.include? opt
+        query[param] = opts[opt]
+      end
+    end
+    unless query.empty?
+      uri.query = URI.encode_www_form(query)
+    end
+
+    uri.to_s
+  end
+end
diff --git a/apps/workbench/app/controllers/container_requests_controller.rb b/apps/workbench/app/controllers/container_requests_controller.rb
new file mode 100644 (file)
index 0000000..783cafa
--- /dev/null
@@ -0,0 +1,189 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ContainerRequestsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
+  def generate_provenance(cr)
+    return if params['tab_pane'] != "Provenance"
+
+    nodes = {cr[:uuid] => cr}
+    child_crs = []
+    col_uuids = []
+    col_pdhs = []
+    col_uuids << cr[:output_uuid] if cr[:output_uuid]
+    col_pdhs += ProvenanceHelper::cr_input_pdhs(cr)
+
+    # Search for child CRs
+    if cr[:container_uuid]
+      child_crs = ContainerRequest.where(requesting_container_uuid: cr[:container_uuid])
+
+      child_crs.each do |child|
+        nodes[child[:uuid]] = child
+        col_uuids << child[:output_uuid] if child[:output_uuid]
+        col_pdhs += ProvenanceHelper::cr_input_pdhs(child)
+      end
+    end
+
+    output_cols = {} # Indexed by UUID
+    input_cols = {} # Indexed by PDH
+    output_pdhs = []
+
+    # Batch requests to get all related collections
+    # First fetch output collections by UUID.
+    Collection.filter([['uuid', 'in', col_uuids.uniq]]).each do |c|
+      output_cols[c[:uuid]] = c
+      output_pdhs << c[:portable_data_hash]
+    end
+    # Then, get only input collections by PDH. There could be more than one collection
+    # per PDH: the number of collections is used on the collection node label.
+    Collection.filter(
+      [['portable_data_hash', 'in', col_pdhs - output_pdhs]]).each do |c|
+      if input_cols[c[:portable_data_hash]]
+        input_cols[c[:portable_data_hash]] << c
+      else
+        input_cols[c[:portable_data_hash]] = [c]
+      end
+    end
+
+    @svg = ProvenanceHelper::create_provenance_graph(
+      nodes, "provenance_svg",
+      {
+        :request => request,
+        :direction => :top_down,
+        :output_collections => output_cols,
+        :input_collections => input_cols,
+        :cr_children_of => {
+          cr[:uuid] => child_crs.select{|child| child[:uuid]},
+        },
+      })
+  end
+
+  def show_pane_list
+    panes = %w(Status Log Provenance Advanced)
+    if @object.andand.state == 'Uncommitted'
+      panes = %w(Inputs) + panes - %w(Log Provenance)
+    end
+    panes
+  end
+
+  def show
+    generate_provenance(@object)
+    super
+  end
+
+  def cancel
+    if @object.container_uuid
+      c = Container.select(['state']).where(uuid: @object.container_uuid).first
+      if c && c.state != 'Running'
+        # If the container hasn't started yet, setting priority=0
+        # leaves our request in "Committed" state and doesn't cancel
+        # the container (even if no other requests are giving it
+        # priority). To avoid showing this container request as "on
+        # hold" after hitting the Cancel button, set state=Final too.
+        @object.state = 'Final'
+      end
+    end
+    @object.update_attributes! priority: 0
+    if params[:return_to]
+      redirect_to params[:return_to]
+    else
+      redirect_to @object
+    end
+  end
+
+  def update
+    @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+    input_obj = @updates[:mounts].andand[:"/var/lib/cwl/cwl.input.json"].andand[:content]
+    if input_obj
+      workflow = @object.mounts[:"/var/lib/cwl/workflow.json"][:content]
+      get_cwl_inputs(workflow).each do |input_schema|
+        if not input_obj.include? cwl_shortname(input_schema[:id])
+          next
+        end
+        required, primary_type, param_id = cwl_input_info(input_schema)
+        if input_obj[param_id] == ""
+          input_obj[param_id] = nil
+        elsif primary_type == "boolean"
+          input_obj[param_id] = input_obj[param_id] == "true"
+        elsif ["int", "long"].include? primary_type
+          input_obj[param_id] = input_obj[param_id].to_i
+        elsif ["float", "double"].include? primary_type
+          input_obj[param_id] = input_obj[param_id].to_f
+        elsif ["File", "Directory"].include? primary_type
+          re = CollectionsHelper.match_uuid_with_optional_filepath(input_obj[param_id])
+          if re
+            c = Collection.find(re[1])
+            input_obj[param_id] = {"class" => primary_type,
+                                   "location" => "keep:#{c.portable_data_hash}#{re[4]}",
+                                   "arv:collection" => input_obj[param_id]}
+          end
+        end
+      end
+    end
+    params[:merge] = true
+    begin
+      super
+    rescue => e
+      flash[:error] = e.to_s
+      show
+    end
+  end
+
+  def copy
+    src = @object
+
+    @object = ContainerRequest.new
+
+    # By default the copied CR won't be reusing containers, unless use_existing=true
+    # param is passed.
+    command = src.command
+    if params[:use_existing]
+      @object.use_existing = true
+      # Pass the correct argument to arvados-cwl-runner command.
+      if src.command[0] == 'arvados-cwl-runner'
+        command = src.command - ['--disable-reuse']
+        command.insert(1, '--enable-reuse')
+      end
+    else
+      @object.use_existing = false
+      # Pass the correct argument to arvados-cwl-runner command.
+      if src.command[0] == 'arvados-cwl-runner'
+        command = src.command - ['--enable-reuse']
+        command.insert(1, '--disable-reuse')
+      end
+    end
+
+    @object.command = command
+    @object.container_image = src.container_image
+    @object.cwd = src.cwd
+    @object.description = src.description
+    @object.environment = src.environment
+    @object.mounts = src.mounts
+    @object.name = src.name
+    @object.output_path = src.output_path
+    @object.priority = 1
+    @object.properties[:template_uuid] = src.properties[:template_uuid]
+    @object.runtime_constraints = src.runtime_constraints
+    @object.scheduling_parameters = src.scheduling_parameters
+    @object.state = 'Uncommitted'
+
+    # set owner_uuid to that of source, provided it is a project and writable by current user
+    current_project = Group.find(src.owner_uuid) rescue nil
+    if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
+      @object.owner_uuid = src.owner_uuid
+    end
+
+    super
+  end
+
+  def index
+    @limit = 20
+    super
+  end
+
+end
diff --git a/apps/workbench/app/controllers/containers_controller.rb b/apps/workbench/app/controllers/containers_controller.rb
new file mode 100644 (file)
index 0000000..f0e3164
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ContainersController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
+  def show_pane_list
+    %w(Status Log Advanced)
+  end
+end
diff --git a/apps/workbench/app/controllers/groups_controller.rb b/apps/workbench/app/controllers/groups_controller.rb
new file mode 100644 (file)
index 0000000..aa78feb
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class GroupsController < ApplicationController
+  def index
+    @groups = Group.filter [['group_class', '!=', 'project']]
+    @group_uuids = @groups.collect &:uuid
+    @links_from = Link.where link_class: 'permission', tail_uuid: @group_uuids
+    @links_to = Link.where link_class: 'permission', head_uuid: @group_uuids
+    render_index
+  end
+
+  def show
+    if @object.group_class == 'project'
+      redirect_to(project_path(@object))
+    else
+      super
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/healthcheck_controller.rb b/apps/workbench/app/controllers/healthcheck_controller.rb
new file mode 100644 (file)
index 0000000..60043d9
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class HealthcheckController < ApplicationController
+  skip_around_filter :thread_clear
+  skip_around_filter :set_thread_api_token
+  skip_around_filter :require_thread_api_token
+  skip_before_filter :ensure_arvados_api_exists
+  skip_before_filter :accept_uuid_as_id_param
+  skip_before_filter :check_user_agreements
+  skip_before_filter :check_user_profile
+  skip_before_filter :load_filters_and_paging_params
+  skip_before_filter :find_object_by_uuid
+
+  before_filter :check_auth_header
+
+  def check_auth_header
+    mgmt_token = Rails.configuration.ManagementToken
+    auth_header = request.headers['Authorization']
+
+    if !mgmt_token
+      render :json => {:errors => "disabled"}, :status => 404
+    elsif !auth_header
+      render :json => {:errors => "authorization required"}, :status => 401
+    elsif auth_header != 'Bearer '+mgmt_token
+      render :json => {:errors => "authorization error"}, :status => 403
+    end
+  end
+
+  def ping
+    resp = {"health" => "OK"}
+    render json: resp
+  end
+end
diff --git a/apps/workbench/app/controllers/humans_controller.rb b/apps/workbench/app/controllers/humans_controller.rb
new file mode 100644 (file)
index 0000000..dd08b30
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class HumansController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/job_tasks_controller.rb b/apps/workbench/app/controllers/job_tasks_controller.rb
new file mode 100644 (file)
index 0000000..67b31ad
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobTasksController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/jobs_controller.rb b/apps/workbench/app/controllers/jobs_controller.rb
new file mode 100644 (file)
index 0000000..204dbb7
--- /dev/null
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
+  def generate_provenance(jobs)
+    return if params['tab_pane'] != "Provenance"
+
+    nodes = {}
+    collections = []
+    hashes = []
+    jobs.each do |j|
+      nodes[j[:uuid]] = j
+      hashes << j[:output]
+      ProvenanceHelper::find_collections(j[:script_parameters]) do |hash, uuid|
+        collections << uuid if uuid
+        hashes << hash if hash
+      end
+      nodes[j[:script_version]] = {:uuid => j[:script_version]}
+    end
+
+    Collection.where(uuid: collections).each do |c|
+      nodes[c[:portable_data_hash]] = c
+    end
+
+    Collection.where(portable_data_hash: hashes).each do |c|
+      nodes[c[:portable_data_hash]] = c
+    end
+
+    @svg = ProvenanceHelper::create_provenance_graph nodes, "provenance_svg", {
+      :request => request,
+      :direction => :top_down,
+      :all_script_parameters => true,
+      :script_version_nodes => true}
+  end
+
+  def index
+    @svg = ""
+    if params[:uuid]
+      @objects = Job.where(uuid: params[:uuid])
+      generate_provenance(@objects)
+      render_index
+    else
+      @limit = 20
+      super
+    end
+  end
+
+  def cancel
+    @object.cancel
+    if params[:return_to]
+      redirect_to params[:return_to]
+    else
+      redirect_to @object
+    end
+  end
+
+  def show
+    generate_provenance([@object])
+    super
+  end
+
+  def logs
+    @logs = @object.
+      stderr_log_query(Rails.configuration.running_job_log_records_to_fetch).
+      map { |e| e.serializable_hash.merge({ 'prepend' => true }) }
+    respond_to do |format|
+      format.json { render json: @logs }
+    end
+  end
+
+  def index_pane_list
+    if params[:uuid]
+      %w(Recent Provenance)
+    else
+      %w(Recent)
+    end
+  end
+
+  def show_pane_list
+    %w(Status Log Details Provenance Advanced)
+  end
+end
diff --git a/apps/workbench/app/controllers/keep_disks_controller.rb b/apps/workbench/app/controllers/keep_disks_controller.rb
new file mode 100644 (file)
index 0000000..c95ebdc
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class KeepDisksController < ApplicationController
+  def create
+    defaults = { is_readable: true, is_writable: true }
+    @object = KeepDisk.new defaults.merge(params[:keep_disk] || {})
+    super
+  end
+
+  def index
+    # Retrieve cache age histogram info from logs.
+
+    # In the logs we expect to find it in an ordered list with entries
+    # of the form (mtime, disk proportion free).
+
+    # An entry of the form (1388747781, 0.52) means that if we deleted
+    # the oldest non-presisted blocks until we had 52% of the disk
+    # free, then all blocks with an mtime greater than 1388747781
+    # would be preserved.
+
+    # The chart we want to produce, will tell us how much of the disk
+    # will be free if we use a cache age of x days. Therefore we will
+    # produce output specifying the age, cache and persisted. age is
+    # specified in milliseconds. cache is the size of the cache if we
+    # delete all blocks older than age. persistent is the size of the
+    # persisted blocks. It is constant regardless of age, but it lets
+    # us show a stacked graph.
+
+    # Finally each entry in cache_age_histogram is a dictionary,
+    # because that's what our charting package wats.
+
+    @cache_age_histogram = []
+    @histogram_pretty_date = nil
+    histogram_log = Log.
+      filter([[:event_type, '=', 'block-age-free-space-histogram']]).
+      order(:created_at => :desc).
+      with_count('none').
+      limit(1)
+    histogram_log.each do |log_entry|
+      # We expect this block to only execute at most once since we
+      # specified limit(1)
+      @cache_age_histogram = log_entry['properties'][:histogram]
+      # Javascript wants dates in milliseconds.
+      histogram_date_ms = log_entry['event_at'].to_i * 1000
+      @histogram_pretty_date = log_entry['event_at'].strftime('%b %-d, %Y')
+
+      total_free_cache = @cache_age_histogram[-1][1]
+      persisted_storage = 1 - total_free_cache
+      @cache_age_histogram.map! { |x| {:age => histogram_date_ms - x[0]*1000,
+          :cache => total_free_cache - x[1],
+          :persisted => persisted_storage} }
+    end
+
+    # Do the regular control work needed.
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/keep_services_controller.rb b/apps/workbench/app/controllers/keep_services_controller.rb
new file mode 100644 (file)
index 0000000..361d400
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class KeepServicesController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/links_controller.rb b/apps/workbench/app/controllers/links_controller.rb
new file mode 100644 (file)
index 0000000..b79fad4
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class LinksController < ApplicationController
+  def show
+    if @object.link_class == 'name' and
+        Collection == ArvadosBase::resource_class_for_uuid(@object.head_uuid)
+      return redirect_to collection_path(@object.uuid)
+    end
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/logs_controller.rb b/apps/workbench/app/controllers/logs_controller.rb
new file mode 100644 (file)
index 0000000..512f0a3
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class LogsController < ApplicationController
+  before_filter :ensure_current_user_is_admin
+end
diff --git a/apps/workbench/app/controllers/nodes_controller.rb b/apps/workbench/app/controllers/nodes_controller.rb
new file mode 100644 (file)
index 0000000..72bde69
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class NodesController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/pipeline_instances_controller.rb b/apps/workbench/app/controllers/pipeline_instances_controller.rb
new file mode 100644 (file)
index 0000000..93bb869
--- /dev/null
@@ -0,0 +1,374 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineInstancesController < ApplicationController
+  skip_before_filter :find_object_by_uuid, only: :compare
+  before_filter :find_objects_by_uuid, only: :compare
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
+  include PipelineInstancesHelper
+  include PipelineComponentsHelper
+
+  def copy
+    template = PipelineTemplate.find?(@object.pipeline_template_uuid)
+
+    source = @object
+    @object = PipelineInstance.new
+    @object.pipeline_template_uuid = source.pipeline_template_uuid
+
+    if params['components'] == 'use_latest' and template
+      @object.components = template.components.deep_dup
+      @object.components.each do |cname, component|
+        # Go through the script parameters of each component
+        # that are marked as user input and copy them over.
+        # Skip any components that are not present in the
+        # source instance (there's nothing to copy)
+        if source.components.include? cname
+          component[:script_parameters].each do |pname, val|
+            if val.is_a? Hash and val[:dataclass]
+              # this is user-inputtable, so check the value from the source pipeline
+              srcvalue = source.components[cname][:script_parameters][pname]
+              if not srcvalue.nil?
+                component[:script_parameters][pname] = srcvalue
+              end
+            end
+          end
+        end
+      end
+    else
+      @object.components = source.components.deep_dup
+    end
+
+    if params['script'] == 'use_same'
+      # Go through each component and copy the script_version from each job.
+      @object.components.each do |cname, component|
+        if source.components.include? cname and source.components[cname][:job]
+          component[:script_version] = source.components[cname][:job][:script_version]
+        end
+      end
+    end
+
+    @object.components.each do |cname, component|
+      component.delete :job
+    end
+    @object.state = 'New'
+
+    # set owner_uuid to that of source, provided it is a project and writable by current user
+    current_project = Group.find(source.owner_uuid) rescue nil
+    if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
+      @object.owner_uuid = source.owner_uuid
+    end
+
+    super
+  end
+
+  def update
+    @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+    if (components = @updates[:components])
+      components.each do |cname, component|
+        if component[:script_parameters]
+          component[:script_parameters].each do |param, value_info|
+            if value_info.is_a? Hash
+              value_info_partitioned = value_info[:value].partition('/') if value_info[:value].andand.class.eql?(String)
+              value_info_value = value_info_partitioned ? value_info_partitioned[0] : value_info[:value]
+              value_info_class = resource_class_for_uuid value_info_value
+              if value_info_class == Link
+                # Use the link target, not the link itself, as script
+                # parameter; but keep the link info around as well.
+                link = Link.find value_info[:value]
+                value_info[:value] = link.head_uuid
+                value_info[:link_uuid] = link.uuid
+                value_info[:link_name] = link.name
+              else
+                # Delete stale link_uuid and link_name data.
+                value_info[:link_uuid] = nil
+                value_info[:link_name] = nil
+              end
+              if value_info_class == Collection
+                # to ensure reproducibility, the script_parameter for a
+                # collection should be the portable_data_hash
+                # keep the collection name and uuid for human-readability
+                obj = Collection.find value_info_value
+                if value_info_partitioned
+                  value_info[:value] = obj.portable_data_hash + value_info_partitioned[1] + value_info_partitioned[2]
+                  value_info[:selection_name] = obj.name ? obj.name + value_info_partitioned[1] + value_info_partitioned[2] : obj.name
+                else
+                  value_info[:value] = obj.portable_data_hash
+                  value_info[:selection_name] = obj.name
+                end
+                value_info[:selection_uuid] = obj.uuid
+              end
+            end
+          end
+        end
+      end
+    end
+    super
+  end
+
+  def graph(pipelines)
+    return nil, nil if params['tab_pane'] != "Graph"
+
+    provenance = {}
+    pips = {}
+    n = 1
+
+    # When comparing more than one pipeline, "pips" stores bit fields that
+    # indicates which objects are part of which pipelines.
+
+    pipelines.each do |p|
+      collections = []
+      hashes = []
+      jobs = []
+
+      p[:components].each do |k, v|
+        provenance["component_#{p[:uuid]}_#{k}"] = v
+
+        collections << v[:output_uuid] if v[:output_uuid]
+        jobs << v[:job][:uuid] if v[:job]
+      end
+
+      jobs = jobs.compact.uniq
+      if jobs.any?
+        Job.where(uuid: jobs).each do |j|
+          job_uuid = j.uuid
+
+          provenance[job_uuid] = j
+          pips[job_uuid] = 0 unless pips[job_uuid] != nil
+          pips[job_uuid] |= n
+
+          hashes << j[:output] if j[:output]
+          ProvenanceHelper::find_collections(j) do |hash, uuid|
+            collections << uuid if uuid
+            hashes << hash if hash
+          end
+
+          if j[:script_version]
+            script_uuid = j[:script_version]
+            provenance[script_uuid] = {:uuid => script_uuid}
+            pips[script_uuid] = 0 unless pips[script_uuid] != nil
+            pips[script_uuid] |= n
+          end
+        end
+      end
+
+      hashes = hashes.compact.uniq
+      if hashes.any?
+        Collection.where(portable_data_hash: hashes).each do |c|
+          hash_uuid = c.portable_data_hash
+          provenance[hash_uuid] = c
+          pips[hash_uuid] = 0 unless pips[hash_uuid] != nil
+          pips[hash_uuid] |= n
+        end
+      end
+
+      collections = collections.compact.uniq
+      if collections.any?
+        Collection.where(uuid: collections).each do |c|
+          collection_uuid = c.uuid
+          provenance[collection_uuid] = c
+          pips[collection_uuid] = 0 unless pips[collection_uuid] != nil
+          pips[collection_uuid] |= n
+        end
+      end
+
+      n = n << 1
+    end
+
+    return provenance, pips
+  end
+
+  def show
+    # the #show action can also be called by #compare, which does its own work to set up @pipelines
+    unless defined? @pipelines
+      @pipelines = [@object]
+    end
+
+    provenance, pips = graph(@pipelines)
+    if provenance
+      @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+        :request => request,
+        :direction => :top_down,
+        :all_script_parameters => true,
+        :combine_jobs => :script_and_version,
+        :pips => pips,
+        :only_components => true,
+        :no_docker => true,
+        :no_log => true}
+    end
+
+    super
+  end
+
+  def compare
+    @breadcrumb_page_name = 'compare'
+
+    @rows = []          # each is {name: S, components: [...]}
+
+    if params['tab_pane'] == "Compare" or params['tab_pane'].nil?
+      # Build a table: x=pipeline y=component
+      @objects.each_with_index do |pi, pi_index|
+        pipeline_jobs(pi).each do |component|
+          # Find a cell with the same name as this component but no
+          # entry for this pipeline
+          target_row = nil
+          @rows.each_with_index do |row, row_index|
+            if row[:name] == component[:name] and !row[:components][pi_index]
+              target_row = row
+            end
+          end
+          if !target_row
+            target_row = {name: component[:name], components: []}
+            @rows << target_row
+          end
+          target_row[:components][pi_index] = component
+        end
+      end
+
+      @rows.each do |row|
+        # Build a "normal" pseudo-component for this row by picking the
+        # most common value for each attribute. If all values are
+        # equally common, there is no "normal".
+        normal = {}              # attr => most common value
+        highscore = {}           # attr => how common "normal" is
+        score = {}               # attr => { value => how common }
+        row[:components].each do |pj|
+          next if pj.nil?
+          pj.each do |k,v|
+            vstr = for_comparison v
+            score[k] ||= {}
+            score[k][vstr] = (score[k][vstr] || 0) + 1
+            highscore[k] ||= 0
+            if score[k][vstr] == highscore[k]
+              # tie for first place = no "normal"
+              normal.delete k
+            elsif score[k][vstr] == highscore[k] + 1
+              # more pipelines have v than anything else
+              highscore[k] = score[k][vstr]
+              normal[k] = vstr
+            end
+          end
+        end
+
+        # Add a hash in component[:is_normal]: { attr => is_the_value_normal? }
+        row[:components].each do |pj|
+          next if pj.nil?
+          pj[:is_normal] = {}
+          pj.each do |k,v|
+            pj[:is_normal][k] = (normal.has_key?(k) && normal[k] == for_comparison(v))
+          end
+        end
+      end
+    end
+
+    if params['tab_pane'] == "Graph"
+      @pipelines = @objects
+    end
+
+    @object = @objects.first
+
+    show
+  end
+
+  def show_pane_list
+    panes = %w(Components Log Graph Advanced)
+    if @object and @object.state.in? ['New', 'Ready']
+      panes = %w(Inputs) + panes - %w(Log)
+    end
+    if not @object.components.values.any? { |x| x[:job] rescue false }
+      panes -= ['Graph']
+    end
+    panes
+  end
+
+  def compare_pane_list
+    %w(Compare Graph)
+  end
+
+  helper_method :unreadable_inputs_present?
+  def unreadable_inputs_present?
+    unless @unreadable_inputs_present.nil?
+      return @unreadable_inputs_present
+    end
+
+    input_uuids = []
+    input_pdhs = []
+    @object.components.each do |k, component|
+      next if !component
+      component[:script_parameters].andand.each do |p, tv|
+        if (tv.is_a? Hash) and ((tv[:dataclass] == "Collection") || (tv[:dataclass] == "File"))
+          if tv[:value]
+            value = tv[:value]
+          elsif tv[:default]
+            value = tv[:default]
+          else
+            value = ''
+          end
+          if value.present?
+            split = value.split '/'
+            if CollectionsHelper.match(split[0])
+              input_pdhs << split[0]
+            else
+              input_uuids << split[0]
+            end
+          end
+        end
+      end
+    end
+
+    input_pdhs = input_pdhs.uniq
+    input_uuids = input_uuids.uniq
+
+    preload_collections_for_objects input_uuids if input_uuids.any?
+    preload_for_pdhs input_pdhs if input_pdhs.any?
+
+    @unreadable_inputs_present = false
+    input_uuids.each do |uuid|
+      if !collections_for_object(uuid).any?
+        @unreadable_inputs_present = true
+        break
+      end
+    end
+    if !@unreadable_inputs_present
+      input_pdhs.each do |pdh|
+        if !collection_for_pdh(pdh).any?
+          @unreadable_inputs_present = true
+          break
+        end
+      end
+    end
+
+    @unreadable_inputs_present
+  end
+
+  def cancel
+    @object.cancel
+    if params[:return_to]
+      redirect_to params[:return_to]
+    else
+      redirect_to @object
+    end
+  end
+
+  protected
+  def for_comparison v
+    if v.is_a? Hash or v.is_a? Array
+      v.to_json
+    else
+      v.to_s
+    end
+  end
+
+  def load_filters_and_paging_params
+    params[:limit] = 20
+    super
+  end
+
+  def find_objects_by_uuid
+    @objects = model_class.where(uuid: params[:uuids])
+  end
+end
diff --git a/apps/workbench/app/controllers/pipeline_templates_controller.rb b/apps/workbench/app/controllers/pipeline_templates_controller.rb
new file mode 100644 (file)
index 0000000..7d94e34
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineTemplatesController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
+  include PipelineComponentsHelper
+
+  def show
+    @objects = PipelineInstance.where(pipeline_template_uuid: @object.uuid)
+    super
+  end
+
+  def show_pane_list
+    %w(Components Pipelines Advanced)
+  end
+end
diff --git a/apps/workbench/app/controllers/projects_controller.rb b/apps/workbench/app/controllers/projects_controller.rb
new file mode 100644 (file)
index 0000000..4a7563a
--- /dev/null
@@ -0,0 +1,324 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ProjectsController < ApplicationController
+  before_filter :set_share_links, if: -> { defined? @object and @object}
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    %w(show tab_counts public).include? ctrl.action_name
+  }
+
+  def model_class
+    Group
+  end
+
+  def find_object_by_uuid
+    if (current_user and params[:uuid] == current_user.uuid) or
+       (resource_class_for_uuid(params[:uuid]) == User)
+      if params[:uuid] != current_user.uuid
+        @object = User.find(params[:uuid])
+      else
+        @object = current_user.dup
+        @object.uuid = current_user.uuid
+      end
+
+      class << @object
+        def name
+          if current_user.uuid == self.uuid
+            'Home'
+          else
+            "Home for #{self.email}"
+          end
+        end
+        def description
+          ''
+        end
+        def attribute_editable? attr, *args
+          case attr
+          when 'description', 'name'
+            false
+          else
+            super
+          end
+        end
+      end
+    else
+      super
+    end
+  end
+
+  def index_pane_list
+    %w(Projects)
+  end
+
+  # Returning an array of hashes instead of an array of strings will allow
+  # us to tell the interface to get counts for each pane (using :filters).
+  # It also seems to me that something like these could be used to configure the contents of the panes.
+  def show_pane_list
+    pane_list = []
+
+    procs = ["arvados#containerRequest"]
+    procs_pane_name = 'Processes'
+    if PipelineInstance.api_exists?(:index)
+      procs << "arvados#pipelineInstance"
+      procs_pane_name = 'Pipelines_and_processes'
+    end
+
+    workflows = ["arvados#workflow"]
+    workflows_pane_name = 'Workflows'
+    if PipelineTemplate.api_exists?(:index)
+      workflows << "arvados#pipelineTemplate"
+      workflows_pane_name = 'Pipeline_templates'
+    end
+
+    if @object.uuid != current_user.andand.uuid
+      pane_list << 'Description'
+    end
+    pane_list <<
+      {
+        :name => 'Data_collections',
+        :filters => [%w(uuid is_a arvados#collection)]
+      }
+    pane_list <<
+      {
+        :name => procs_pane_name,
+        :filters => [%w(uuid is_a) + [procs]]
+      }
+    pane_list <<
+      {
+        :name => workflows_pane_name,
+        :filters => [%w(uuid is_a) + [workflows]]
+      }
+    pane_list <<
+      {
+        :name => 'Subprojects',
+        :filters => [%w(uuid is_a arvados#group)]
+      }
+    pane_list <<
+      {
+        :name => 'Other_objects',
+        :filters => [%w(uuid is_a) + [%w(arvados#human arvados#specimen arvados#trait)]]
+      } if current_user
+    pane_list << { :name => 'Sharing',
+                   :count => @share_links.count } if @user_is_manager
+    pane_list << { :name => 'Advanced' }
+  end
+
+  # Called via AJAX and returns Javascript that populates tab counts into tab titles.
+  # References #show_pane_list action which should return an array of hashes each with :name
+  # and then optionally a :filters to run or a straight up :count
+  #
+  # This action could easily be moved to the ApplicationController to genericize the tab_counts behaviour,
+  # but one or more new routes would have to be created, the js.erb would also have to be moved
+  def tab_counts
+    @tab_counts = {}
+    show_pane_list.each do |pane|
+      if pane.is_a?(Hash)
+        if pane[:count]
+          @tab_counts[pane[:name]] = pane[:count]
+        elsif pane[:filters]
+          @tab_counts[pane[:name]] = @object.contents(filters: pane[:filters]).items_available
+        end
+      end
+    end
+  end
+
+  def remove_item
+    params[:item_uuids] = [params[:item_uuid]]
+    remove_items
+    render template: 'projects/remove_items'
+  end
+
+  def remove_items
+    @removed_uuids = []
+    links = []
+    params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
+      if item.class == Collection or item.class == Group
+        # Use delete API on collections and projects/groups
+        item.destroy
+        @removed_uuids << item.uuid
+      elsif item.owner_uuid == @object.uuid
+        # Object is owned by this project. Remove it from the project by
+        # changing owner to the current user.
+        begin
+          item.update_attributes owner_uuid: current_user.uuid
+          @removed_uuids << item.uuid
+        rescue ArvadosApiClient::ApiErrorResponseException => e
+          if e.message.include? '_owner_uuid_'
+            rename_to = item.name + ' removed from ' +
+                        (@object.name ? @object.name : @object.uuid) +
+                        ' at ' + Time.now.to_s
+            updates = {}
+            updates[:name] = rename_to
+            updates[:owner_uuid] = current_user.uuid
+            item.update_attributes updates
+            @removed_uuids << item.uuid
+          else
+            raise
+          end
+        end
+      end
+    end
+  end
+
+  def destroy
+    while (objects = Link.filter([['owner_uuid','=',@object.uuid],
+                                  ['tail_uuid','=',@object.uuid]])).any?
+      objects.each do |object|
+        object.destroy
+      end
+    end
+    while (objects = @object.contents).any?
+      objects.each do |object|
+        object.update_attributes! owner_uuid: current_user.uuid
+      end
+    end
+    if ArvadosBase::resource_class_for_uuid(@object.owner_uuid) == Group
+      params[:return_to] ||= group_path(@object.owner_uuid)
+    else
+      params[:return_to] ||= projects_path
+    end
+    super
+  end
+
+  def find_objects_for_index
+    # We can use the all_projects helper, but we have to dup the
+    # result -- otherwise, when we apply our per-request filters and
+    # limits, they will infect the @all_projects cache too (see
+    # #6640).
+    @objects = all_projects.dup
+    super
+  end
+
+  def load_contents_objects kinds=[]
+    kind_filters = @filters.select do |attr,op,val|
+      op == 'is_a' and val.is_a? Array and val.count > 1
+    end
+    if /^created_at\b/ =~ @order[0] and kind_filters.count == 1
+      # If filtering on multiple types and sorting by date: Get the
+      # first page of each type, sort the entire set, truncate to one
+      # page, and use the last item on this page as a filter for
+      # retrieving the next page. Ideally the API would do this for
+      # us, but it doesn't (yet).
+
+      # To avoid losing items that have the same created_at as the
+      # last item on this page, we retrieve an overlapping page with a
+      # "created_at <= last_created_at" filter, then remove duplicates
+      # with a "uuid not in [...]" filter (see below).
+      nextpage_operator = /\bdesc$/i =~ @order[0] ? '<=' : '>='
+
+      @objects = []
+      @name_link_for = {}
+      kind_filters.each do |attr,op,val|
+        (val.is_a?(Array) ? val : [val]).each do |type|
+          klass = type.split('#')[-1]
+          klass[0] = klass[0].capitalize
+          next if(!Object.const_get(klass).api_exists?(:index))
+
+          filters = @filters - kind_filters + [['uuid', 'is_a', type]]
+          if type == 'arvados#containerRequest'
+            filters = filters + [['container_requests.requesting_container_uuid', '=', nil]]
+          end
+          objects = @object.contents(order: @order,
+                                     limit: @limit,
+                                     filters: filters,
+                                    )
+          objects.each do |object|
+            @name_link_for[object.andand.uuid] = objects.links_for(object, 'name').first
+          end
+          @objects += objects
+        end
+      end
+      @objects = @objects.to_a.sort_by(&:created_at)
+      @objects.reverse! if nextpage_operator == '<='
+      @objects = @objects[0..@limit-1]
+
+      if @objects.any?
+        @next_page_filters = next_page_filters(nextpage_operator)
+        @next_page_href = url_for(partial: :contents_rows,
+                                  limit: @limit,
+                                  filters: @next_page_filters.to_json)
+      else
+        @next_page_href = nil
+      end
+    else
+      @objects = @object.contents(order: @order,
+                                  limit: @limit,
+                                  filters: @filters,
+                                  offset: @offset)
+      @next_page_href = next_page_href(partial: :contents_rows,
+                                       filters: @filters.to_json,
+                                       order: @order.to_json)
+    end
+
+    preload_links_for_objects(@objects.to_a)
+  end
+
+  def show
+    if !@object
+      return render_not_found("object not found")
+    end
+
+    if params[:partial]
+      load_contents_objects
+      respond_to do |f|
+        f.json {
+          render json: {
+            content: render_to_string(partial: 'show_contents_rows.html',
+                                      formats: [:html]),
+            next_page_href: @next_page_href
+          }
+        }
+      end
+    else
+      @objects = []
+      super
+    end
+  end
+
+  def create
+    @new_resource_attrs = (params['project'] || {}).merge(group_class: 'project')
+    @new_resource_attrs[:name] ||= 'New project'
+    super
+  end
+
+  def update
+    @updates = params['project']
+    super
+  end
+
+  helper_method :get_objects_and_names
+  def get_objects_and_names(objects=nil)
+    objects = @objects if objects.nil?
+    objects_and_names = []
+    objects.each do |object|
+      if objects.respond_to? :links_for and
+          !(name_links = objects.links_for(object, 'name')).empty?
+        name_links.each do |name_link|
+          objects_and_names << [object, name_link]
+        end
+      elsif @name_link_for.andand[object.uuid]
+        objects_and_names << [object, @name_link_for[object.uuid]]
+      elsif object.respond_to? :name
+        objects_and_names << [object, object]
+      else
+        objects_and_names << [object,
+                               Link.new(owner_uuid: @object.uuid,
+                                        tail_uuid: @object.uuid,
+                                        head_uuid: object.uuid,
+                                        link_class: "name",
+                                        name: "")]
+
+      end
+    end
+    objects_and_names
+  end
+
+  def public  # Yes 'public' is the name of the action for public projects
+    return render_not_found if not Rails.configuration.anonymous_user_token or not Rails.configuration.enable_public_projects_page
+    @objects = using_specific_api_token Rails.configuration.anonymous_user_token do
+      Group.where(group_class: 'project').order("modified_at DESC")
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/repositories_controller.rb b/apps/workbench/app/controllers/repositories_controller.rb
new file mode 100644 (file)
index 0000000..5ca6f22
--- /dev/null
@@ -0,0 +1,107 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RepositoriesController < ApplicationController
+  before_filter :set_share_links, if: -> { defined? @object }
+
+  def index_pane_list
+    %w(repositories help)
+  end
+
+  def show_pane_list
+    if @user_is_manager
+      panes = super | %w(Sharing)
+      panes.insert(panes.length-1, panes.delete_at(panes.index('Advanced'))) if panes.index('Advanced')
+      panes
+    else
+      panes = super
+    end
+    panes.delete('Attributes') if !current_user.is_admin
+    panes
+  end
+
+  def show_tree
+    @commit = params[:commit]
+    @path = params[:path] || ''
+    @subtree = @object.ls_subtree @commit, @path.chomp('/')
+  end
+
+  def show_blob
+    @commit = params[:commit]
+    @path = params[:path]
+    @blobdata = @object.cat_file @commit, @path
+  end
+
+  def show_commit
+    @commit = params[:commit]
+  end
+
+  def all_repos
+    limit = params[:limit].andand.to_i || 100
+    offset = params[:offset].andand.to_i || 0
+    @filters = params[:filters] || []
+
+    if @filters.any?
+      owner_filter = @filters.select do |attr, op, val|
+        (attr == 'owner_uuid')
+      end
+    end
+
+    if !owner_filter.andand.any?
+      filters = @filters + [["owner_uuid", "=", current_user.uuid]]
+      my_repos = Repository.all.order("name ASC").limit(limit).offset(offset).filter(filters).results
+    else      # done fetching all owned repositories
+      my_repos = []
+    end
+
+    if !owner_filter.andand.any?  # if this is next page request, the first page was still fetching "own" repos
+      @filters = @filters.reject do |attr, op, val|
+        (attr == 'owner_uuid') or
+        (attr == 'name') or
+        (attr == 'uuid')
+      end
+    end
+
+    filters = @filters + [["owner_uuid", "!=", current_user.uuid]]
+    other_repos = Repository.all.order("name ASC").limit(limit).offset(offset).filter(filters).results
+
+    @objects = (my_repos + other_repos).first(limit)
+  end
+
+  def find_objects_for_index
+    return if !params[:partial]
+
+    all_repos
+
+    if @objects.any?
+      @next_page_filters = next_page_filters('>=')
+      @next_page_href = url_for(partial: :repositories_rows,
+                                filters: @next_page_filters.to_json)
+    else
+      @next_page_href = nil
+    end
+  end
+
+  def next_page_href with_params={}
+    @next_page_href
+  end
+
+  def next_page_filters nextpage_operator
+    next_page_filters = @filters.reject do |attr, op, val|
+      (attr == 'owner_uuid') or
+      (attr == 'name' and op == nextpage_operator) or
+      (attr == 'uuid' and op == 'not in')
+    end
+
+    if @objects.any?
+      last_obj = @objects.last
+      next_page_filters += [['name', nextpage_operator, last_obj.name]]
+      next_page_filters += [['uuid', 'not in', [last_obj.uuid]]]
+      # if not-owned, it means we are done with owned repos and fetching other repos
+      next_page_filters += [['owner_uuid', '!=', last_obj.uuid]] if last_obj.owner_uuid != current_user.uuid
+    end
+
+    next_page_filters
+  end
+end
diff --git a/apps/workbench/app/controllers/search_controller.rb b/apps/workbench/app/controllers/search_controller.rb
new file mode 100644 (file)
index 0000000..3775abd
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SearchController < ApplicationController
+  skip_before_filter :ensure_arvados_api_exists
+
+  def find_objects_for_index
+    search_what = Group
+    if params[:project_uuid]
+      # Special case for "search all things in project":
+      @filters = @filters.select do |attr, operator, operand|
+        not (attr == 'owner_uuid' and operator == '=')
+      end
+      # Special case for project_uuid is a user uuid:
+      if ArvadosBase::resource_class_for_uuid(params[:project_uuid]) == User
+        search_what = User.find params[:project_uuid]
+      else
+        search_what = Group.find params[:project_uuid]
+      end
+    end
+    @objects = search_what.contents(limit: @limit,
+                                    offset: @offset,
+                                    recursive: true,
+                                    count: 'none',
+                                    last_object_class: params["last_object_class"],
+                                    filters: @filters)
+    super
+  end
+
+  def next_page_href with_params={}
+    super with_params.merge(last_object_class: @objects.last.class.to_s,
+                            project_uuid: params[:project_uuid],
+                            recursive: true,
+                            count: 'none',
+                            filters: @filters.to_json)
+  end
+end
diff --git a/apps/workbench/app/controllers/sessions_controller.rb b/apps/workbench/app/controllers/sessions_controller.rb
new file mode 100644 (file)
index 0000000..48fbc6b
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SessionsController < ApplicationController
+  skip_around_filter :require_thread_api_token, :only => [:destroy, :logged_out]
+  skip_around_filter :set_thread_api_token, :only => [:destroy, :logged_out]
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :find_objects_for_index
+  skip_before_filter :ensure_arvados_api_exists
+
+  def destroy
+    session.clear
+    redirect_to arvados_api_client.arvados_logout_url(return_to: root_url)
+  end
+
+  def logged_out
+    redirect_to root_url if session[:arvados_api_token]
+    render_index
+  end
+
+  def index
+  end
+end
diff --git a/apps/workbench/app/controllers/specimens_controller.rb b/apps/workbench/app/controllers/specimens_controller.rb
new file mode 100644 (file)
index 0000000..76a1271
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SpecimensController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/status_controller.rb b/apps/workbench/app/controllers/status_controller.rb
new file mode 100644 (file)
index 0000000..90b7be5
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class StatusController < ApplicationController
+  skip_around_filter :require_thread_api_token
+  skip_before_filter :find_object_by_uuid
+  def status
+    # Allow non-credentialed cross-origin requests
+    headers['Access-Control-Allow-Origin'] = '*'
+    resp = {
+      apiBaseURL: arvados_api_client.arvados_v1_base.sub(%r{/arvados/v\d+.*}, '/'),
+      version: AppVersion.hash,
+    }
+    render json: resp
+  end
+end
diff --git a/apps/workbench/app/controllers/tests_controller.rb b/apps/workbench/app/controllers/tests_controller.rb
new file mode 100644 (file)
index 0000000..5d2de4e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class TestsController < ApplicationController
+  skip_before_filter :find_object_by_uuid
+  def mithril
+  end
+end
diff --git a/apps/workbench/app/controllers/traits_controller.rb b/apps/workbench/app/controllers/traits_controller.rb
new file mode 100644 (file)
index 0000000..81bded4
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class TraitsController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/trash_items_controller.rb b/apps/workbench/app/controllers/trash_items_controller.rb
new file mode 100644 (file)
index 0000000..7d6e143
--- /dev/null
@@ -0,0 +1,147 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class TrashItemsController < ApplicationController
+  def model_class
+    Collection
+  end
+
+  def index_pane_list
+    %w(Trashed_collections Trashed_projects)
+  end
+
+  def find_objects_for_index
+    # If it's not the index rows partial display, just return
+    # The /index request will again be invoked to display the
+    # partial at which time, we will be using the objects found.
+    return if !params[:partial]
+
+    trashed_items
+
+    if @objects.any?
+      @objects = @objects.sort_by { |obj| obj.modified_at }.reverse
+      @next_page_filters = next_page_filters('<=')
+      @next_page_href = url_for(partial: params[:partial],
+                                filters: @next_page_filters.to_json)
+    else
+      @next_page_href = nil
+    end
+  end
+
+  def next_page_href with_params={}
+    @next_page_href
+  end
+
+  def next_page_filters nextpage_operator
+    next_page_filters = @filters.reject do |attr, op, val|
+      (attr == 'modified_at' and op == nextpage_operator) or
+      (attr == 'uuid' and op == 'not in')
+    end
+
+    if @objects.any?
+      last_trash_at = @objects.last.modified_at
+
+      last_uuids = []
+      @objects.each do |obj|
+        last_uuids << obj.uuid if obj.trash_at.eql?(last_trash_at)
+      end
+
+      next_page_filters += [['modified_at', nextpage_operator, last_trash_at]]
+      next_page_filters += [['uuid', 'not in', last_uuids]]
+    end
+
+    next_page_filters
+  end
+
+  def trashed_items
+    if params[:partial] == "trashed_collection_rows"
+      query_on = Collection
+    elsif params[:partial] == "trashed_project_rows"
+      query_on = Group
+    end
+
+    last_mod_at = nil
+    last_uuids = []
+
+    # API server index doesn't return manifest_text by default, but our
+    # callers want it unless otherwise specified.
+    #@select ||= query_on.columns.map(&:name) - %w(id updated_at)
+    limit = if params[:limit] then params[:limit].to_i else 100 end
+    offset = if params[:offset] then params[:offset].to_i else 0 end
+
+    @objects = []
+    while !@objects.any?
+      base_search = query_on
+
+      if !last_mod_at.nil?
+        base_search = base_search.filter([["modified_at", "<=", last_mod_at], ["uuid", "not in", last_uuids]])
+      end
+
+      base_search = base_search.include_trash(true).limit(limit).offset(offset)
+
+      if params[:filters].andand.length.andand > 0
+        tags = Link.filter(params[:filters])
+        tagged = []
+        if tags.results.length > 0
+          tagged = query_on.include_trash(true).where(uuid: tags.collect(&:head_uuid))
+        end
+        @objects = (tagged | base_search.filter(params[:filters])).uniq(&:uuid)
+      else
+        @objects = base_search.where(is_trashed: true)
+      end
+
+      if @objects.any?
+        owner_uuids = @objects.collect(&:owner_uuid).uniq
+        @owners = {}
+        @not_trashed = {}
+        Group.filter([["uuid", "in", owner_uuids]]).include_trash(true).each do |grp|
+          @owners[grp.uuid] = grp
+        end
+        User.filter([["uuid", "in", owner_uuids]]).include_trash(true).each do |grp|
+          @owners[grp.uuid] = grp
+          @not_trashed[grp.uuid] = true
+        end
+        Group.filter([["uuid", "in", owner_uuids]]).select([:uuid]).each do |grp|
+          @not_trashed[grp.uuid] = true
+        end
+      else
+        return
+      end
+
+      last_mod_at = @objects.last.modified_at
+      last_uuids = []
+      @objects.each do |obj|
+        last_uuids << obj.uuid if obj.modified_at.eql?(last_mod_at)
+      end
+
+      @objects = @objects.select {|item| item.is_trashed || @not_trashed[item.owner_uuid].nil? }
+    end
+  end
+
+  def untrash_items
+    @untrashed_uuids = []
+
+    updates = {trash_at: nil}
+
+    if params[:selection].is_a? Array
+      klass = resource_class_for_uuid(params[:selection][0])
+    else
+      klass = resource_class_for_uuid(params[:selection])
+    end
+
+    first = nil
+    klass.include_trash(1).where(uuid: params[:selection]).each do |c|
+      first = c
+      c.untrash
+      @untrashed_uuids << c.uuid
+    end
+
+    respond_to do |format|
+      format.js
+      format.html do
+        redirect_to first
+      end
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/user_agreements_controller.rb b/apps/workbench/app/controllers/user_agreements_controller.rb
new file mode 100644 (file)
index 0000000..2797c4c
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UserAgreementsController < ApplicationController
+  skip_before_filter :check_user_agreements
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :check_user_profile
+
+  def index
+    if unsigned_user_agreements.empty?
+      redirect_to(params[:return_to] || :back)
+    end
+  end
+
+  def model_class
+    Collection
+  end
+
+  def sign
+    params[:checked].each do |checked|
+      if (r = CollectionsHelper.match_uuid_with_optional_filepath(checked))
+        UserAgreement.sign uuid: r[1]
+      end
+    end
+    current_user.activate
+    redirect_to(params[:return_to] || :back)
+  end
+end
diff --git a/apps/workbench/app/controllers/users_controller.rb b/apps/workbench/app/controllers/users_controller.rb
new file mode 100644 (file)
index 0000000..c954944
--- /dev/null
@@ -0,0 +1,374 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UsersController < ApplicationController
+  skip_around_filter :require_thread_api_token, only: :welcome
+  skip_before_filter :check_user_agreements, only: [:welcome, :inactive, :link_account, :merge]
+  skip_before_filter :check_user_profile, only: [:welcome, :inactive, :profile, :link_account, :merge]
+  skip_before_filter :find_object_by_uuid, only: [:welcome, :activity, :storage]
+  before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
+
+  def show
+    if params[:uuid] == current_user.uuid
+      respond_to do |f|
+        f.html do
+          if request.url.include?("/users/#{current_user.uuid}")
+            super
+          else
+            redirect_to(params[:return_to] || project_path(params[:uuid]))
+          end
+        end
+      end
+    else
+      super
+    end
+  end
+
+  def welcome
+    if current_user
+      redirect_to (params[:return_to] || '/')
+    end
+  end
+
+  def inactive
+    if current_user.andand.is_invited
+      redirect_to (params[:return_to] || '/')
+    end
+  end
+
+  def profile
+    params[:offer_return_to] ||= params[:return_to]
+  end
+
+  def activity
+    @breadcrumb_page_name = nil
+    @users = User.limit(params[:limit])
+    @user_activity = {}
+    @activity = {
+      logins: {},
+      jobs: {},
+      pipeline_instances: {}
+    }
+    @total_activity = {}
+    @spans = [['This week', Time.now.beginning_of_week, Time.now],
+              ['Last week',
+               Time.now.beginning_of_week.advance(weeks:-1),
+               Time.now.beginning_of_week],
+              ['This month', Time.now.beginning_of_month, Time.now],
+              ['Last month',
+               1.month.ago.beginning_of_month,
+               Time.now.beginning_of_month]]
+    @spans.each do |span, threshold_start, threshold_end|
+      @activity[:logins][span] = Log.select(%w(uuid modified_by_user_uuid)).
+        filter([[:event_type, '=', 'login'],
+                [:object_kind, '=', 'arvados#user'],
+                [:created_at, '>=', threshold_start],
+                [:created_at, '<', threshold_end]])
+      @activity[:jobs][span] = Job.select(%w(uuid modified_by_user_uuid)).
+        filter([[:created_at, '>=', threshold_start],
+                [:created_at, '<', threshold_end]])
+      @activity[:pipeline_instances][span] = PipelineInstance.select(%w(uuid modified_by_user_uuid)).
+        filter([[:created_at, '>=', threshold_start],
+                [:created_at, '<', threshold_end]])
+      @activity.each do |type, act|
+        records = act[span]
+        @users.each do |u|
+          @user_activity[u.uuid] ||= {}
+          @user_activity[u.uuid][span + ' ' + type.to_s] ||= 0
+        end
+        records.each do |record|
+          @user_activity[record.modified_by_user_uuid] ||= {}
+          @user_activity[record.modified_by_user_uuid][span + ' ' + type.to_s] ||= 0
+          @user_activity[record.modified_by_user_uuid][span + ' ' + type.to_s] += 1
+          @total_activity[span + ' ' + type.to_s] ||= 0
+          @total_activity[span + ' ' + type.to_s] += 1
+        end
+      end
+    end
+    @users = @users.sort_by do |a|
+      [-@user_activity[a.uuid].values.inject(:+), a.full_name]
+    end
+    # Prepend a "Total" pseudo-user to the sorted list
+    @user_activity[nil] = @total_activity
+    @users = [OpenStruct.new(uuid: nil)] + @users
+  end
+
+  def storage
+    @breadcrumb_page_name = nil
+    @users = User.limit(params[:limit])
+    @user_storage = {}
+    total_storage = {}
+    @log_date = {}
+    @users.each do |u|
+      @user_storage[u.uuid] ||= {}
+      storage_log = Log.
+        filter([[:object_uuid, '=', u.uuid],
+                [:event_type, '=', 'user-storage-report']]).
+        order(:created_at => :desc).
+        with_count('none').
+        limit(1)
+      storage_log.each do |log_entry|
+        # We expect this block to only execute once since we specified limit(1)
+        @user_storage[u.uuid] = log_entry['properties']
+        @log_date[u.uuid] = log_entry['event_at']
+      end
+      total_storage.merge!(@user_storage[u.uuid]) { |k,v1,v2| v1 + v2 }
+    end
+    @users = @users.sort_by { |u|
+      [-@user_storage[u.uuid].values.push(0).inject(:+), u.full_name]}
+    # Prepend a "Total" pseudo-user to the sorted list
+    @users = [OpenStruct.new(uuid: nil)] + @users
+    @user_storage[nil] = total_storage
+  end
+
+  def show_pane_list
+    if current_user.andand.is_admin
+      super | %w(Admin)
+    else
+      super
+    end
+  end
+
+  def index_pane_list
+    if current_user.andand.is_admin
+      super | %w(Activity)
+    else
+      super
+    end
+  end
+
+  def sudo
+    resp = arvados_api_client.api(ApiClientAuthorization, '', {
+                                    api_client_authorization: {
+                                      owner_uuid: @object.uuid
+                                    }
+                                  })
+    redirect_to root_url(api_token: "v2/#{resp[:uuid]}/#{resp[:api_token]}")
+  end
+
+  def home
+    @my_ssh_keys = AuthorizedKey.where(authorized_user_uuid: current_user.uuid)
+    @my_tag_links = {}
+
+    @my_jobs = Job.
+      limit(10).
+      order('created_at desc').
+      where(created_by: current_user.uuid)
+
+    @my_collections = Collection.
+      limit(10).
+      order('created_at desc').
+      where(created_by: current_user.uuid)
+    collection_uuids = @my_collections.collect &:uuid
+
+    @persist_state = {}
+    collection_uuids.each do |uuid|
+      @persist_state[uuid] = 'cache'
+    end
+
+    Link.filter([['head_uuid', 'in', collection_uuids],
+                             ['link_class', 'in', ['tag', 'resources']]]).
+      each do |link|
+      case link.link_class
+      when 'tag'
+        (@my_tag_links[link.head_uuid] ||= []) << link
+      when 'resources'
+        if link.name == 'wants'
+          @persist_state[link.head_uuid] = 'persistent'
+        end
+      end
+    end
+
+    @my_pipelines = PipelineInstance.
+      limit(10).
+      order('created_at desc').
+      where(created_by: current_user.uuid)
+
+    respond_to do |f|
+      f.js { render template: 'users/home.js' }
+      f.html { render template: 'users/home' }
+    end
+  end
+
+  def unsetup
+    if current_user.andand.is_admin
+      @object.unsetup
+    end
+    show
+  end
+
+  def setup
+    respond_to do |format|
+      if current_user.andand.is_admin
+        setup_params = {}
+        setup_params[:send_notification_email] = "#{Rails.configuration.send_user_setup_notification_email}"
+        if params['user_uuid'] && params['user_uuid'].size>0
+          setup_params[:uuid] = params['user_uuid']
+        end
+        if params['email'] && params['email'].size>0
+          user = {email: params['email']}
+          setup_params[:user] = user
+        end
+        if params['openid_prefix'] && params['openid_prefix'].size>0
+          setup_params[:openid_prefix] = params['openid_prefix']
+        end
+        if params['vm_uuid'] && params['vm_uuid'].size>0
+          setup_params[:vm_uuid] = params['vm_uuid']
+        end
+
+        setup_resp = User.setup setup_params
+        if setup_resp
+          vm_link = nil
+          setup_resp[:items].each do |item|
+            if item[:head_kind] == "arvados#virtualMachine"
+              vm_link = item
+              break
+            end
+          end
+          if params[:groups]
+            new_groups = params[:groups].split(',').map(&:strip).select{|i| !i.empty?}
+            if vm_link and new_groups != vm_link[:properties][:groups]
+              vm_login_link = Link.where(uuid: vm_link[:uuid])
+              if vm_login_link.items_available > 0
+                link = vm_login_link.results.first
+                props = link.properties
+                props[:groups] = new_groups
+                link.save!
+              end
+            end
+          end
+
+          format.js
+        else
+          self.render_error status: 422
+        end
+      else
+        self.render_error status: 422
+      end
+    end
+  end
+
+  def setup_popup
+    @vms = VirtualMachine.all.results
+
+    @current_selections = find_current_links @object
+
+    respond_to do |format|
+      format.html
+      format.js
+    end
+  end
+
+  def virtual_machines
+    @my_vm_logins = {}
+    Link.where(tail_uuid: @object.uuid,
+               link_class: 'permission',
+               name: 'can_login').
+          each do |perm_link|
+            if perm_link.properties.andand[:username]
+              @my_vm_logins[perm_link.head_uuid] ||= []
+              @my_vm_logins[perm_link.head_uuid] << perm_link.properties[:username]
+            end
+          end
+    @my_virtual_machines = VirtualMachine.where(uuid: @my_vm_logins.keys)
+  end
+
+  def ssh_keys
+    @my_ssh_keys = AuthorizedKey.where(key_type: 'SSH', owner_uuid: @object.uuid)
+  end
+
+  def add_ssh_key_popup
+    respond_to do |format|
+      format.html
+      format.js
+    end
+  end
+
+  def add_ssh_key
+    respond_to do |format|
+      key_params = {'key_type' => 'SSH'}
+      key_params['authorized_user_uuid'] = current_user.uuid
+
+      if params['name'] && params['name'].size>0
+        key_params['name'] = params['name'].strip
+      end
+      if params['public_key'] && params['public_key'].size>0
+        key_params['public_key'] = params['public_key'].strip
+      end
+
+      if !key_params['name'] && params['public_key'].andand.size>0
+        split_key = key_params['public_key'].split
+        key_params['name'] = split_key[-1] if (split_key.size == 3)
+      end
+
+      new_key = AuthorizedKey.create! key_params
+      if new_key
+        format.js
+      else
+        self.render_error status: 422
+      end
+    end
+  end
+
+  def request_shell_access
+    logger.warn "request_access: #{params.inspect}"
+    params['request_url'] = request.url
+    RequestShellAccessReporter.send_request(current_user, params).deliver
+  end
+
+  def merge
+    User.merge params[:new_user_token], params[:direction]
+    redirect_to "/"
+  end
+
+  protected
+
+  def find_current_links user
+    current_selections = {}
+
+    if !user
+      return current_selections
+    end
+
+    # oid login perm
+    oid_login_perms = Link.where(tail_uuid: user.email,
+                                   head_kind: 'arvados#user',
+                                   link_class: 'permission',
+                                   name: 'can_login')
+
+    if oid_login_perms.any?
+      prefix_properties = oid_login_perms.first.properties
+      current_selections[:identity_url_prefix] = prefix_properties[:identity_url_prefix]
+    end
+
+    # repo perm
+    repo_perms = Link.where(tail_uuid: user.uuid,
+                            head_kind: 'arvados#repository',
+                            link_class: 'permission',
+                            name: 'can_write')
+    if repo_perms.any?
+      repo_uuid = repo_perms.first.head_uuid
+      repos = Repository.where(head_uuid: repo_uuid)
+      if repos.any?
+        repo_name = repos.first.name
+        current_selections[:repo_name] = repo_name
+      end
+    end
+
+    # vm login perm
+    vm_login_perms = Link.where(tail_uuid: user.uuid,
+                              head_kind: 'arvados#virtualMachine',
+                              link_class: 'permission',
+                              name: 'can_login')
+    if vm_login_perms.any?
+      vm_perm = vm_login_perms.first
+      vm_uuid = vm_perm.head_uuid
+      current_selections[:vm_uuid] = vm_uuid
+      current_selections[:groups] = vm_perm.properties[:groups].andand.join(', ')
+    end
+
+    return current_selections
+  end
+
+end
diff --git a/apps/workbench/app/controllers/virtual_machines_controller.rb b/apps/workbench/app/controllers/virtual_machines_controller.rb
new file mode 100644 (file)
index 0000000..19763b9
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class VirtualMachinesController < ApplicationController
+  def index
+    @objects ||= model_class.all
+    @vm_logins = {}
+    if @objects.andand.first
+      Link.where(tail_uuid: current_user.uuid,
+                 head_uuid: @objects.collect(&:uuid),
+                 link_class: 'permission',
+                 name: 'can_login').
+        each do |perm_link|
+        if perm_link.properties.andand[:username]
+          @vm_logins[perm_link.head_uuid] ||= []
+          @vm_logins[perm_link.head_uuid] << perm_link.properties[:username]
+        end
+      end
+      @objects.each do |vm|
+        vm.current_user_logins = @vm_logins[vm.uuid].andand.compact || []
+      end
+    end
+    super
+  end
+
+  def webshell
+    return render_not_found if not Rails.configuration.shell_in_a_box_url
+    @webshell_url = Rails.configuration.shell_in_a_box_url % {
+      uuid: @object.uuid,
+      hostname: @object.hostname,
+    }
+    render layout: false
+  end
+
+end
diff --git a/apps/workbench/app/controllers/websocket_controller.rb b/apps/workbench/app/controllers/websocket_controller.rb
new file mode 100644 (file)
index 0000000..e6fa5af
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WebsocketController < ApplicationController
+  skip_before_filter :find_objects_for_index
+
+  def index
+  end
+
+  def model_class
+    "Websocket"
+  end
+end
diff --git a/apps/workbench/app/controllers/work_unit_templates_controller.rb b/apps/workbench/app/controllers/work_unit_templates_controller.rb
new file mode 100644 (file)
index 0000000..1dba520
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WorkUnitTemplatesController < ApplicationController
+  def find_objects_for_index
+    return if !params[:partial]
+
+    @limit = 40
+    @filters = @filters || []
+
+    # get next page of pipeline_templates
+    if PipelineTemplate.api_exists?(:index)
+      filters = @filters + [["uuid", "is_a", ["arvados#pipelineTemplate"]]]
+      pipelines = PipelineTemplate.limit(@limit).order(["created_at desc"]).filter(filters)
+    end
+
+    # get next page of workflows
+    filters = @filters + [["uuid", "is_a", ["arvados#workflow"]]]
+    workflows = Workflow.limit(@limit).order(["created_at desc"]).filter(filters)
+
+    @objects = (pipelines.to_a + workflows.to_a).sort_by(&:created_at).reverse.first(@limit)
+
+    if @objects.any?
+      @next_page_filters = next_page_filters('<=')
+      @next_page_href = url_for(partial: :choose_rows,
+                                filters: @next_page_filters.to_json)
+    else
+      @next_page_href = nil
+    end
+  end
+
+  def next_page_href with_params={}
+    @next_page_href
+  end
+end
diff --git a/apps/workbench/app/controllers/work_units_controller.rb b/apps/workbench/app/controllers/work_units_controller.rb
new file mode 100644 (file)
index 0000000..d3ded86
--- /dev/null
@@ -0,0 +1,219 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WorkUnitsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show_child_component' == ctrl.action_name
+  }
+
+  def find_objects_for_index
+    # If it's not the index rows partial display, just return
+    # The /index request will again be invoked to display the
+    # partial at which time, we will be using the objects found.
+    return if !params[:partial]
+
+    @limit = 20
+    @filters = @filters || []
+
+    pipelines = []
+    jobs = []
+
+    # get next page of pipeline_instances
+    if PipelineInstance.api_exists?(:index)
+      filters = @filters + [["uuid", "is_a", ["arvados#pipelineInstance"]]]
+      pipelines = PipelineInstance.limit(@limit).order(["created_at desc"]).filter(filters)
+    end
+
+    if params[:show_children]
+      # get next page of jobs
+      if Job.api_exists?(:index)
+        filters = @filters + [["uuid", "is_a", ["arvados#job"]]]
+        jobs = Job.limit(@limit).order(["created_at desc"]).filter(filters)
+      end
+    end
+
+    # get next page of container_requests
+    filters = @filters + [["uuid", "is_a", ["arvados#containerRequest"]]]
+    if !params[:show_children]
+     filters << ["requesting_container_uuid", "=", nil]
+    end
+    crs = ContainerRequest.limit(@limit).order(["created_at desc"]).filter(filters)
+    @objects = (jobs.to_a + pipelines.to_a + crs.to_a).sort_by(&:created_at).reverse.first(@limit)
+
+    if @objects.any?
+      @next_page_filters = next_page_filters('<=')
+      @next_page_href = url_for(partial: :all_processes_rows,
+                                filters: @next_page_filters.to_json,
+                                show_children: params[:show_children])
+      preload_links_for_objects(@objects.to_a)
+    else
+      @next_page_href = nil
+    end
+  end
+
+  def next_page_href with_params={}
+    @next_page_href
+  end
+
+  def create
+    template_uuid = params['work_unit']['template_uuid']
+
+    attrs = {}
+    rc = resource_class_for_uuid(template_uuid)
+    if rc == PipelineTemplate
+      model_class = PipelineInstance
+      attrs['pipeline_template_uuid'] = template_uuid
+    elsif rc == Workflow
+      # workflow json
+      workflow = Workflow.find? template_uuid
+      if workflow.definition
+        begin
+          wf_json = ActiveSupport::HashWithIndifferentAccess.new YAML::load(workflow.definition)
+        rescue => e
+          logger.error "Error converting definition yaml to json: #{e.message}"
+          raise ArgumentError, "Error converting definition yaml to json: #{e.message}"
+        end
+      end
+
+      model_class = ContainerRequest
+
+      attrs['name'] = "#{workflow['name']} container" if workflow['name'].present?
+      attrs['properties'] = {'template_uuid' => template_uuid}
+      attrs['priority'] = 1
+      attrs['state'] = "Uncommitted"
+
+      # required
+      attrs['container_image'] = "arvados/jobs"
+      attrs['cwd'] = "/var/spool/cwl"
+      attrs['output_path'] = "/var/spool/cwl"
+
+      # runtime constriants
+      runtime_constraints = {
+        "vcpus" => 1,
+        "ram" => 1024 * 1024 * 1024,
+        "API" => true
+      }
+
+      keep_cache = 256
+      input_defaults = {}
+      if wf_json
+        main = get_cwl_main(wf_json)
+        main[:inputs].each do |input|
+          if input[:default]
+            input_defaults[cwl_shortname(input[:id])] = input[:default]
+          end
+        end
+        if main[:hints]
+          main[:hints].each do |hint|
+            if hint[:class] == "http://arvados.org/cwl#WorkflowRunnerResources"
+              if hint[:coresMin]
+                runtime_constraints["vcpus"] = hint[:coresMin]
+              end
+              if hint[:ramMin]
+                runtime_constraints["ram"] = hint[:ramMin] * 1024 * 1024
+              end
+              if hint[:keep_cache]
+                keep_cache = hint[:keep_cache]
+              end
+            end
+          end
+        end
+      end
+
+      attrs['command'] = ["arvados-cwl-runner",
+                          "--local",
+                          "--api=containers",
+                          "--project-uuid=#{params['work_unit']['owner_uuid']}",
+                          "--collection-cache-size=#{keep_cache}",
+                          "/var/lib/cwl/workflow.json#main",
+                          "/var/lib/cwl/cwl.input.json"]
+
+      # mounts
+      mounts = {
+        "/var/lib/cwl/cwl.input.json" => {
+          "kind" => "json",
+          "content" => input_defaults
+        },
+        "stdout" => {
+          "kind" => "file",
+          "path" => "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl" => {
+          "kind" => "collection",
+          "writable" => true
+        }
+      }
+      if wf_json
+        mounts["/var/lib/cwl/workflow.json"] = {
+          "kind" => "json",
+          "content" => wf_json
+        }
+      end
+      attrs['mounts'] = mounts
+
+      attrs['runtime_constraints'] = runtime_constraints
+    else
+      raise ArgumentError, "Unsupported template uuid: #{template_uuid}"
+    end
+
+    attrs['owner_uuid'] = params['work_unit']['owner_uuid']
+    @object ||= model_class.new attrs
+
+    if @object.save
+      redirect_to @object
+    else
+      render_error status: 422
+    end
+  end
+
+  def find_object_by_uuid
+    if params['object_type']
+      @object = params['object_type'].constantize.find(params['uuid'])
+    else
+      super
+    end
+  end
+
+  def show_child_component
+    data = JSON.load(params[:action_data])
+
+    current_obj = {}
+    current_obj_uuid = data['current_obj_uuid']
+    current_obj_name = data['current_obj_name']
+    current_obj_type = data['current_obj_type']
+    current_obj_parent = data['current_obj_parent']
+    if current_obj_uuid
+      resource_class = resource_class_for_uuid current_obj_uuid
+      obj = object_for_dataclass(resource_class, current_obj_uuid)
+      current_obj = obj if obj
+    end
+
+    if current_obj.is_a?(Hash) and !current_obj.any?
+      if current_obj_parent
+        resource_class = resource_class_for_uuid current_obj_parent
+        parent = object_for_dataclass(resource_class, current_obj_parent)
+        parent_wu = parent.work_unit
+        children = parent_wu.children
+        if current_obj_uuid
+          wu = children.select {|c| c.uuid == current_obj_uuid}.first
+        else current_obj_name
+          wu = children.select {|c| c.label.to_s == current_obj_name}.first
+        end
+      end
+    else
+      if current_obj_type == JobWorkUnit.to_s
+        wu = JobWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+      elsif current_obj_type == PipelineInstanceWorkUnit.to_s
+        wu = PipelineInstanceWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+      elsif current_obj_type == ContainerWorkUnit.to_s
+        wu = ContainerWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+      end
+    end
+
+    respond_to do |f|
+      f.html { render(partial: "show_component", locals: {wu: wu}) }
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/workflows_controller.rb b/apps/workbench/app/controllers/workflows_controller.rb
new file mode 100644 (file)
index 0000000..3b98413
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WorkflowsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
+  def show_pane_list
+    %w(Definition Advanced)
+  end
+end
diff --git a/apps/workbench/app/helpers/application_helper.rb b/apps/workbench/app/helpers/application_helper.rb
new file mode 100644 (file)
index 0000000..15bf77f
--- /dev/null
@@ -0,0 +1,691 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ApplicationHelper
+  def current_user
+    controller.current_user
+  end
+
+  def self.match_uuid(uuid)
+    /^([0-9a-z]{5})-([0-9a-z]{5})-([0-9a-z]{15})$/.match(uuid.to_s)
+  end
+
+  def current_api_host
+    Rails.configuration.arvados_v1_base.gsub /https?:\/\/|\/arvados\/v1/,''
+  end
+
+  def current_uuid_prefix
+    current_api_host[0..4]
+  end
+
+  def render_markup(markup)
+    allowed_tags = Rails::Html::Sanitizer.white_list_sanitizer.allowed_tags + %w(table tbody th tr td col colgroup caption thead tfoot)
+    sanitize(raw(RedCloth.new(markup.to_s).to_html(:refs_arvados, :textile)), tags: allowed_tags) if markup
+  end
+
+  def human_readable_bytes_html(n)
+    return h(n) unless n.is_a? Fixnum
+    return "0 bytes" if (n == 0)
+
+    orders = {
+      1 => "bytes",
+      1024 => "KiB",
+      (1024*1024) => "MiB",
+      (1024*1024*1024) => "GiB",
+      (1024*1024*1024*1024) => "TiB"
+    }
+
+    orders.each do |k, v|
+      sig = (n.to_f/k)
+      if sig >=1 and sig < 1024
+        if v == 'bytes'
+          return "%i #{v}" % sig
+        else
+          return "%0.1f #{v}" % sig
+        end
+      end
+    end
+
+    return h(n)
+  end
+
+  def resource_class_for_uuid(attrvalue, opts={})
+    ArvadosBase::resource_class_for_uuid(attrvalue, opts)
+  end
+
+  # When using {remote:true}, or using {method:...} to use an HTTP
+  # method other than GET, move the target URI from href to
+  # data-remote-href. Otherwise, browsers offer features like "open in
+  # new window" and "copy link address" which bypass Rails' click
+  # handler and therefore end up at incorrect/nonexistent routes (by
+  # ignoring data-method) and expect to receive pages rather than
+  # javascript responses.
+  #
+  # See assets/javascripts/link_to_remote.js for supporting code.
+  def link_to *args, &block
+    if (args.last and args.last.is_a? Hash and
+        (args.last[:remote] or
+         (args.last[:method] and
+          args.last[:method].to_s.upcase != 'GET')))
+      if Rails.env.test?
+        # Capybara/phantomjs can't click_link without an href, even if
+        # the click handler means it never gets used.
+        raw super.gsub(' href="', ' href="#" data-remote-href="')
+      else
+        # Regular browsers work as desired: users can click A elements
+        # without hrefs, and click handlers fire; but there's no "copy
+        # link address" option in the right-click menu.
+        raw super.gsub(' href="', ' data-remote-href="')
+      end
+    else
+      super
+    end
+  end
+
+  ##
+  # Returns HTML that links to the Arvados object specified in +attrvalue+
+  # Provides various output control and styling options.
+  #
+  # +attrvalue+ an Arvados model object or uuid
+  #
+  # +opts+ a set of flags to control output:
+  #
+  # [:link_text] the link text to use (may include HTML), overrides everything else
+  #
+  # [:friendly_name] whether to use the "friendly" name in the link text (by
+  # calling #friendly_link_name on the object), otherwise use the uuid
+  #
+  # [:with_class_name] prefix the link text with the class name of the model
+  #
+  # [:no_tags] disable tags in the link text (default is to show tags).
+  # Currently tags are only shown for Collections.
+  #
+  # [:thumbnail] if the object is a collection, show an image thumbnail if the
+  # collection consists of a single image file.
+  #
+  # [:no_link] don't create a link, just return the link text
+  #
+  # +style_opts+ additional HTML properties for the anchor tag, passed to link_to
+  #
+  def link_to_if_arvados_object(attrvalue, opts={}, style_opts={})
+    if (resource_class = resource_class_for_uuid(attrvalue, opts))
+      if attrvalue.is_a? ArvadosBase
+        object = attrvalue
+        link_uuid = attrvalue.uuid
+      else
+        object = nil
+        link_uuid = attrvalue
+      end
+      link_name = opts[:link_text]
+      tags = ""
+      if !link_name
+        link_name = object.andand.default_name || resource_class.default_name
+
+        if opts[:friendly_name]
+          if attrvalue.respond_to? :friendly_link_name
+            link_name = attrvalue.friendly_link_name opts[:lookup]
+          else
+            begin
+              if resource_class.name == 'Collection'
+                if CollectionsHelper.match(link_uuid)
+                  link_name = collection_for_pdh(link_uuid).andand.first.andand.portable_data_hash
+                else
+                  link_name = collections_for_object(link_uuid).andand.first.andand.friendly_link_name
+                end
+              else
+                link_name = object_for_dataclass(resource_class, link_uuid).andand.friendly_link_name
+              end
+            rescue ArvadosApiClient::NotFoundException
+              # If that lookup failed, the link will too. So don't make one.
+              return attrvalue
+            end
+          end
+        end
+        if link_name.nil? or link_name.empty?
+          link_name = attrvalue
+        end
+        if opts[:with_class_name]
+          link_name = "#{resource_class.to_s}: #{link_name}"
+        end
+        if !opts[:no_tags] and resource_class == Collection
+          links_for_object(link_uuid).each do |tag|
+            if tag.link_class.in? ["tag", "identifier"]
+              tags += ' <span class="label label-info">'
+              tags += link_to tag.name, controller: "links", filters: [["link_class", "=", "tag"], ["name", "=", tag.name]].to_json
+              tags += '</span>'
+            end
+          end
+        end
+        if opts[:thumbnail] and resource_class == Collection
+          # add an image thumbnail if the collection consists of a single image file.
+          collections_for_object(link_uuid).each do |c|
+            if c.files.length == 1 and CollectionsHelper::is_image c.files.first[1]
+              link_name += " "
+              link_name += image_tag "#{url_for c}/#{CollectionsHelper::file_path c.files.first}", style: "height: 4em; width: auto"
+            end
+          end
+        end
+      end
+      style_opts[:class] = (style_opts[:class] || '') + ' nowrap'
+      if opts[:no_link] or (resource_class == User && !current_user)
+        raw(link_name)
+      else
+        controller_class = resource_class.to_s.tableize
+        if controller_class.eql?('groups') and object.andand.group_class.eql?('project')
+          controller_class = 'projects'
+        end
+        (link_to raw(link_name), { controller: controller_class, action: 'show', id: ((opts[:name_link].andand.uuid) || link_uuid) }, style_opts) + raw(tags)
+      end
+    else
+      # just return attrvalue if it is not recognizable as an Arvados object or uuid.
+      if attrvalue.nil? or (attrvalue.is_a? String and attrvalue.empty?)
+        "(none)"
+      else
+        attrvalue
+      end
+    end
+  end
+
+  def link_to_arvados_object_if_readable(attrvalue, link_text_if_not_readable, opts={})
+    resource_class = resource_class_for_uuid(attrvalue.split('/')[0]) if attrvalue.is_a?(String)
+    if !resource_class
+      return link_to_if_arvados_object attrvalue, opts
+    end
+
+    readable = object_readable attrvalue, resource_class
+    if readable
+      link_to_if_arvados_object attrvalue, opts
+    elsif opts[:required] and current_user # no need to show this for anonymous user
+      raw('<div><input type="text" style="border:none;width:100%;background:#ffdddd" disabled=true class="required unreadable-input" value="') + link_text_if_not_readable + raw('" ></input></div>')
+    else
+      link_text_if_not_readable
+    end
+  end
+
+  # This method takes advantage of preloaded collections and objects.
+  # Hence you can improve performance by first preloading objects
+  # related to the page context before using this method.
+  def object_readable attrvalue, resource_class=nil
+    # if it is a collection filename, check readable for the locator
+    attrvalue = attrvalue.split('/')[0] if attrvalue
+
+    resource_class = resource_class_for_uuid(attrvalue) if resource_class.nil?
+    return if resource_class.nil?
+
+    return_value = nil
+    if resource_class.to_s == 'Collection'
+      if CollectionsHelper.match(attrvalue)
+        found = collection_for_pdh(attrvalue)
+        return_value = found.first if found.any?
+      else
+        found = collections_for_object(attrvalue)
+        return_value = found.first if found.any?
+      end
+    else
+      return_value = object_for_dataclass(resource_class, attrvalue)
+    end
+    return_value
+  end
+
+  # Render an editable attribute with the attrvalue of the attr.
+  # The htmloptions are added to the editable element's list of attributes.
+  # The nonhtml_options are only used to customize the display of the element.
+  def render_editable_attribute(object, attr, attrvalue=nil, htmloptions={}, nonhtml_options={})
+    attrvalue = object.send(attr) if attrvalue.nil?
+    if not object.attribute_editable?(attr)
+      if attrvalue && attrvalue.length > 0
+        return render_attribute_as_textile( object, attr, attrvalue, false )
+      else
+        return (attr == 'name' and object.andand.default_name) ||
+                '(none)'
+      end
+    end
+
+    input_type = 'text'
+    attrtype = object.class.attribute_info[attr.to_sym].andand[:type]
+    if attrtype == 'text' or attr == 'description'
+      input_type = 'textarea'
+    elsif attrtype == 'datetime'
+      input_type = 'date'
+    else
+      input_type = 'text'
+    end
+
+    attrvalue = attrvalue.to_json if attrvalue.is_a? Hash or attrvalue.is_a? Array
+    rendervalue = render_attribute_as_textile( object, attr, attrvalue, false )
+
+    ajax_options = {
+      "data-pk" => {
+        id: object.uuid,
+        key: object.class.to_s.underscore
+      }
+    }
+    if object.uuid
+      ajax_options['data-url'] = url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore)
+    else
+      ajax_options['data-url'] = url_for(action: "create", controller: object.class.to_s.pluralize.underscore)
+      ajax_options['data-pk'][:defaults] = object.attributes
+    end
+    ajax_options['data-pk'] = ajax_options['data-pk'].to_json
+    @unique_id ||= (Time.now.to_f*1000000).to_i
+    span_id = object.uuid.to_s + '-' + attr.to_s + '-' + (@unique_id += 1).to_s
+
+    span_tag = content_tag 'span', rendervalue, {
+      "data-emptytext" => '(none)',
+      "data-placement" => "bottom",
+      "data-type" => input_type,
+      "data-title" => "Edit #{attr.to_s.gsub '_', ' '}",
+      "data-name" => htmloptions['selection_name'] || attr,
+      "data-object-uuid" => object.uuid,
+      "data-toggle" => "manual",
+      "data-value" => htmloptions['data-value'] || attrvalue,
+      "id" => span_id,
+      :class => "editable #{is_textile?( object, attr ) ? 'editable-textile' : ''}"
+    }.merge(htmloptions).merge(ajax_options)
+
+    edit_tiptitle = 'edit'
+    edit_tiptitle = 'Warning: do not use hyphens in the repository name as they will be stripped' if (object.class.to_s == 'Repository' and attr == 'name')
+
+    edit_button = raw('<a href="#" class="btn btn-xs btn-' + (nonhtml_options[:btnclass] || 'default') + ' btn-nodecorate" data-toggle="x-editable tooltip" data-toggle-selector="#' + span_id + '" data-placement="top" title="' + (nonhtml_options[:tiptitle] || edit_tiptitle) + '"><i class="fa fa-fw fa-pencil"></i>' + (nonhtml_options[:btntext] || '') + '</a>')
+
+    if nonhtml_options[:btnplacement] == :left
+      edit_button + ' ' + span_tag
+    elsif nonhtml_options[:btnplacement] == :top
+      edit_button + raw('<br/>') + span_tag
+    else
+      span_tag + ' ' + edit_button
+    end
+  end
+
+  def render_pipeline_component_attribute(object, attr, subattr, value_info, htmloptions={})
+    datatype = nil
+    required = true
+    attrvalue = value_info
+
+    if value_info.is_a? Hash
+      if value_info[:output_of]
+        return raw("<span class='label label-default'>#{value_info[:output_of]}</span>")
+      end
+      if value_info[:dataclass]
+        dataclass = value_info[:dataclass]
+      end
+      if value_info[:optional] != nil
+        required = (value_info[:optional] != "true")
+      end
+      if value_info[:required] != nil
+        required = value_info[:required]
+      end
+
+      # Pick a suitable attrvalue to show as the current value (i.e.,
+      # the one that would be used if we ran the pipeline right now).
+      if value_info[:value]
+        attrvalue = value_info[:value]
+      elsif value_info[:default]
+        attrvalue = value_info[:default]
+      else
+        attrvalue = ''
+      end
+      preconfigured_search_str = value_info[:search_for]
+    end
+
+    if not object.andand.attribute_editable?(attr)
+      return link_to_arvados_object_if_readable(attrvalue, attrvalue, {friendly_name: true, required: required})
+    end
+
+    if dataclass
+      begin
+        dataclass = dataclass.constantize
+      rescue NameError
+      end
+    else
+      dataclass = ArvadosBase.resource_class_for_uuid(attrvalue)
+    end
+
+    id = "#{object.uuid}-#{subattr.join('-')}"
+    dn = "[#{attr}]"
+    subattr.each do |a|
+      dn += "[#{a}]"
+    end
+    if value_info.is_a? Hash
+      dn += '[value]'
+    end
+
+    if (dataclass == Collection) or (dataclass == File)
+      selection_param = object.class.to_s.underscore + dn
+      display_value = attrvalue
+      if value_info.is_a?(Hash)
+        if (link = Link.find? value_info[:link_uuid])
+          display_value = link.name
+        elsif value_info[:link_name]
+          display_value = value_info[:link_name]
+        elsif value_info[:selection_name]
+          display_value = value_info[:selection_name]
+        end
+      end
+      if (attr == :components) and (subattr.size > 2)
+        chooser_title = "Choose a #{dataclass == Collection ? 'dataset' : 'file'} for #{object.component_input_title(subattr[0], subattr[2])}:"
+      else
+        chooser_title = "Choose a #{dataclass == Collection ? 'dataset' : 'file'}:"
+      end
+      modal_path = choose_collections_path \
+      ({ title: chooser_title,
+         filters: [['owner_uuid', '=', object.owner_uuid]].to_json,
+         action_name: 'OK',
+         action_href: pipeline_instance_path(id: object.uuid),
+         action_method: 'patch',
+         preconfigured_search_str: (preconfigured_search_str || ""),
+         action_data: {
+           merge: true,
+           use_preview_selection: dataclass == File ? true : nil,
+           selection_param: selection_param,
+           success: 'page-refresh'
+         }.to_json,
+        })
+
+      return content_tag('div', :class => 'input-group') do
+        html = text_field_tag(dn, display_value,
+                              :class =>
+                              "form-control #{'required' if required} #{'unreadable-input' if attrvalue.present? and !object_readable(attrvalue, Collection)}")
+        html + content_tag('span', :class => 'input-group-btn') do
+          link_to('Choose',
+                  modal_path,
+                  { :class => "btn btn-primary",
+                    :remote => true,
+                    :method => 'get',
+                  })
+        end
+      end
+    end
+
+    if attrvalue.is_a? String
+      datatype = 'text'
+    elsif attrvalue.is_a?(Array) or dataclass.andand.is_a?(Class)
+      # TODO: find a way to edit with x-editable
+      return attrvalue
+    end
+
+    # When datatype is a String or Fixnum, link_to the attrvalue
+    lt = link_to attrvalue, '#', {
+      "data-emptytext" => "none",
+      "data-placement" => "bottom",
+      "data-type" => datatype,
+      "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+      "data-title" => "Set value for #{subattr[-1].to_s}",
+      "data-name" => dn,
+      "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+      "data-value" => attrvalue,
+      # "clear" button interferes with form-control's up/down arrows
+      "data-clear" => false,
+      :class => "editable #{'required' if required} form-control",
+      :id => id
+    }.merge(htmloptions)
+
+    lt
+  end
+
+  def get_cwl_main(workflow)
+    if workflow[:"$graph"].nil?
+      return workflow
+    else
+      workflow[:"$graph"].each do |tool|
+        if tool[:id] == "#main"
+          return tool
+        end
+      end
+    end
+  end
+
+  def get_cwl_inputs(workflow)
+    get_cwl_main(workflow)[:inputs]
+  end
+
+
+  def cwl_shortname(id)
+    if id[0] == "#"
+      id = id[1..-1]
+    end
+    return id.split("/")[-1]
+  end
+
+  def cwl_input_info(input_schema)
+    required = !(input_schema[:type].include? "null")
+    if input_schema[:type].is_a? Array
+      primary_type = input_schema[:type].select { |n| n != "null" }[0]
+    elsif input_schema[:type].is_a? String
+      primary_type = input_schema[:type]
+    elsif input_schema[:type].is_a? Hash
+      primary_type = input_schema[:type]
+    end
+    param_id = cwl_shortname(input_schema[:id])
+    return required, primary_type, param_id
+  end
+
+  def cwl_input_value(object, input_schema, set_attr_path)
+    dn = ""
+    attrvalue = object
+    set_attr_path.each do |a|
+      dn += "[#{a}]"
+      attrvalue = attrvalue[a.to_sym]
+    end
+    return dn, attrvalue
+  end
+
+  def cwl_inputs_required(object, inputs_schema, set_attr_path)
+    r = 0
+    inputs_schema.each do |input|
+      required, primary_type, param_id = cwl_input_info(input)
+      dn, attrvalue = cwl_input_value(object, input, set_attr_path + [param_id])
+      r += 1 if required and attrvalue.nil?
+    end
+    r
+  end
+
+  def render_cwl_input(object, input_schema, set_attr_path, htmloptions={})
+    required, primary_type, param_id = cwl_input_info(input_schema)
+
+    dn, attrvalue = cwl_input_value(object, input_schema, set_attr_path + [param_id])
+    attrvalue = if attrvalue.nil? then "" else attrvalue end
+
+    id = "#{object.uuid}-#{param_id}"
+
+    opt_empty_selection = if required then [] else [{value: "", text: ""}] end
+
+    if ["Directory", "File"].include? primary_type
+      chooser_title = "Choose a #{primary_type == 'Directory' ? 'dataset' : 'file'}:"
+      selection_param = object.class.to_s.underscore + dn
+      if attrvalue.is_a? Hash
+        display_value = attrvalue[:"arv:collection"] || attrvalue[:location]
+        re = CollectionsHelper.match_uuid_with_optional_filepath(display_value)
+        if re
+          if re[4]
+            display_value = "#{Collection.find(re[1]).name} / #{re[4][1..-1]}"
+          else
+            display_value = Collection.find(re[1]).name
+          end
+        end
+      end
+      modal_path = choose_collections_path \
+      ({ title: chooser_title,
+         filters: [['owner_uuid', '=', object.owner_uuid]].to_json,
+         action_name: 'OK',
+         action_href: container_request_path(id: object.uuid),
+         action_method: 'patch',
+         preconfigured_search_str: "",
+         action_data: {
+           merge: true,
+           use_preview_selection: primary_type == 'File' ? true : nil,
+           selection_param: selection_param,
+           success: 'page-refresh'
+         }.to_json,
+        })
+
+      return content_tag('div', :class => 'input-group') do
+        html = text_field_tag(dn, display_value,
+                              :class =>
+                              "form-control #{'required' if required}")
+        html + content_tag('span', :class => 'input-group-btn') do
+          link_to('Choose',
+                  modal_path,
+                  { :class => "btn btn-primary",
+                    :remote => true,
+                    :method => 'get',
+                  })
+        end
+      end
+    elsif "boolean" == primary_type
+      return link_to attrvalue.to_s, '#', {
+                     "data-emptytext" => "none",
+                     "data-placement" => "bottom",
+                     "data-type" => "select",
+                     "data-source" => (opt_empty_selection + [{value: "true", text: "true"}, {value: "false", text: "false"}]).to_json,
+                     "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+                     "data-title" => "Set value for #{cwl_shortname(input_schema[:id])}",
+                     "data-name" => dn,
+                     "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+                     "data-value" => attrvalue.to_s,
+                     # "clear" button interferes with form-control's up/down arrows
+                     "data-clear" => false,
+                     :class => "editable #{'required' if required} form-control",
+                     :id => id
+                   }.merge(htmloptions)
+    elsif primary_type.is_a? Hash and primary_type[:type] == "enum"
+      return link_to attrvalue, '#', {
+                     "data-emptytext" => "none",
+                     "data-placement" => "bottom",
+                     "data-type" => "select",
+                     "data-source" => (opt_empty_selection + primary_type[:symbols].map {|i| {:value => i, :text => i} }).to_json,
+                     "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+                     "data-title" => "Set value for #{cwl_shortname(input_schema[:id])}",
+                     "data-name" => dn,
+                     "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+                     "data-value" => attrvalue,
+                     # "clear" button interferes with form-control's up/down arrows
+                     "data-clear" => false,
+                     :class => "editable #{'required' if required} form-control",
+                     :id => id
+                   }.merge(htmloptions)
+    elsif primary_type.is_a? String
+      if ["int", "long"].include? primary_type
+        datatype = "number"
+      else
+        datatype = "text"
+      end
+
+      return link_to attrvalue, '#', {
+                     "data-emptytext" => "none",
+                     "data-placement" => "bottom",
+                     "data-type" => datatype,
+                     "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+                     "data-title" => "Set value for #{cwl_shortname(input_schema[:id])}",
+                     "data-name" => dn,
+                     "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+                     "data-value" => attrvalue,
+                     # "clear" button interferes with form-control's up/down arrows
+                     "data-clear" => false,
+                     :class => "editable #{'required' if required} form-control",
+                     :id => id
+                     }.merge(htmloptions)
+    else
+      return "Unable to render editing control for parameter type #{primary_type}"
+    end
+  end
+
+  def render_arvados_object_list_start(list, button_text, button_href,
+                                       params={}, *rest, &block)
+    show_max = params.delete(:show_max) || 3
+    params[:class] ||= 'btn btn-xs btn-default'
+    list[0...show_max].each { |item| yield item }
+    unless list[show_max].nil?
+      link_to(h(button_text) +
+              raw(' &nbsp; <i class="fa fa-fw fa-arrow-circle-right"></i>'),
+              button_href, params, *rest)
+    end
+  end
+
+  def render_controller_partial partial, opts
+    cname = opts.delete :controller_name
+    begin
+      render opts.merge(partial: "#{cname}/#{partial}")
+    rescue ActionView::MissingTemplate
+      render opts.merge(partial: "application/#{partial}")
+    end
+  end
+
+  RESOURCE_CLASS_ICONS = {
+    "Collection" => "fa-archive",
+    "ContainerRequest" => "fa-gears",
+    "Group" => "fa-users",
+    "Human" => "fa-male",  # FIXME: Use a more inclusive icon.
+    "Job" => "fa-gears",
+    "KeepDisk" => "fa-hdd-o",
+    "KeepService" => "fa-exchange",
+    "Link" => "fa-arrows-h",
+    "Node" => "fa-cloud",
+    "PipelineInstance" => "fa-gears",
+    "PipelineTemplate" => "fa-gears",
+    "Repository" => "fa-code-fork",
+    "Specimen" => "fa-flask",
+    "Trait" => "fa-clipboard",
+    "User" => "fa-user",
+    "VirtualMachine" => "fa-terminal",
+    "Workflow" => "fa-gears",
+  }
+  DEFAULT_ICON_CLASS = "fa-cube"
+
+  def fa_icon_class_for_class(resource_class, default=DEFAULT_ICON_CLASS)
+    RESOURCE_CLASS_ICONS.fetch(resource_class.to_s, default)
+  end
+
+  def fa_icon_class_for_uuid(uuid, default=DEFAULT_ICON_CLASS)
+    fa_icon_class_for_class(resource_class_for_uuid(uuid), default)
+  end
+
+  def fa_icon_class_for_object(object, default=DEFAULT_ICON_CLASS)
+    case class_name = object.class.to_s
+    when "Group"
+      object.group_class ? 'fa-folder' : 'fa-users'
+    else
+      RESOURCE_CLASS_ICONS.fetch(class_name, default)
+    end
+  end
+
+  def chooser_preview_url_for object, use_preview_selection=false
+    case object.class.to_s
+    when 'Collection'
+      polymorphic_path(object, tab_pane: 'chooser_preview', use_preview_selection: use_preview_selection)
+    else
+      nil
+    end
+  end
+
+  def render_attribute_as_textile( object, attr, attrvalue, truncate )
+    if attrvalue && (is_textile? object, attr)
+      markup = render_markup attrvalue
+      markup = markup[0,markup.index('</p>')+4] if (truncate && markup.index('</p>'))
+      return markup
+    else
+      return attrvalue
+    end
+  end
+
+  def render_localized_date(date, opts="")
+    raw("<span class='utc-date' data-utc-date='#{date}' data-utc-date-opts='noseconds'>#{date}</span>")
+  end
+
+  def render_time duration, use_words, round_to_min=true
+    render_runtime duration, use_words, round_to_min
+  end
+
+  # Keep locators are expected to be of the form \"...<pdh/file_path>\"
+  JSON_KEEP_LOCATOR_REGEXP = /([0-9a-f]{32}\+\d+[^'"]*?)(?=['"]|\z|$)/
+  def keep_locator_in_json str
+    # Return a list of all matches
+    str.scan(JSON_KEEP_LOCATOR_REGEXP).flatten
+  end
+
+private
+  def is_textile?( object, attr )
+    is_textile = object.textile_attributes.andand.include?(attr)
+  end
+end
diff --git a/apps/workbench/app/helpers/arvados_api_client_helper.rb b/apps/workbench/app/helpers/arvados_api_client_helper.rb
new file mode 100644 (file)
index 0000000..5901de4
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ArvadosApiClientHelper
+  def arvados_api_client
+    ArvadosApiClient.new_or_current
+  end
+end
+
+# For the benefit of themes that still expect $arvados_api_client to work:
+class ArvadosClientProxyHack
+  def method_missing *args
+    ArvadosApiClient.new_or_current.send *args
+  end
+end
+$arvados_api_client = ArvadosClientProxyHack.new
diff --git a/apps/workbench/app/helpers/collections_helper.rb b/apps/workbench/app/helpers/collections_helper.rb
new file mode 100644 (file)
index 0000000..f5f5485
--- /dev/null
@@ -0,0 +1,81 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module CollectionsHelper
+  def d3ify_links(links)
+    links.collect do |x|
+      {source: x.tail_uuid, target: x.head_uuid, type: x.name}
+    end
+  end
+
+  ##
+  # Regex match for collection portable data hash, returns a regex match object with the
+  # hash in group 1, (optional) size in group 2, (optional) subsequent uuid
+  # fields in group 3, and (optional) file path within the collection as group
+  # 4
+  # returns nil for no match.
+  #
+  # +pdh+ the portable data hash string to match
+  #
+  def self.match(pdh)
+    /^([a-f0-9]{32})(\+\d+)(\+[^+]+)*?(\/.*)?$/.match(pdh.to_s)
+  end
+
+  ##
+  # Regex match for collection UUIDs, returns a regex match object with the
+  # uuid in group 1, empty groups 2 and 3 (for consistency with the match
+  # method above), and (optional) file path within the collection as group
+  # 4.
+  # returns nil for no match.
+  #
+  def self.match_uuid_with_optional_filepath(uuid_with_optional_file)
+    /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})()()(\/.*)?$/.match(uuid_with_optional_file.to_s)
+  end
+
+  ##
+  # Regex match for common image file extensions, returns a regex match object
+  # with the matched extension in group 1; or nil for no match.
+  #
+  # +file+ the file string to match
+  #
+  def self.is_image file
+    /\.(jpg|jpeg|gif|png|svg)$/i.match(file)
+  end
+
+  ##
+  # Generates a relative file path than can be appended to the URL of a
+  # collection to get a file download link without adding a spurious ./ at the
+  # beginning for files in the default stream.
+  #
+  # +file+ an entry in the Collection.files list in the form [stream, name, size]
+  #
+  def self.file_path file
+    f0 = file[0]
+    f0 = '' if f0 == '.'
+    f0 = f0[2..-1] if f0[0..1] == './'
+    f0 += '/' if not f0.empty?
+    file_path = "#{f0}#{file[1]}"
+  end
+
+  ##
+  # Check if collection preview is allowed for the given filename with extension
+  #
+  def preview_allowed_for file_name
+    file_type = MIME::Types.type_for(file_name).first
+    if file_type.nil?
+      if file_name.downcase.end_with?('.cwl') # unknown mime type, but we support preview
+        true
+      else
+        false
+      end
+    elsif (file_type.raw_media_type == "text") || (file_type.raw_media_type == "image")
+      true
+    elsif (file_type.raw_media_type == "application") &&
+          (Rails.configuration.application_mimetypes_with_view_icon.include? (file_type.sub_type))
+      true
+    else
+      false
+    end
+  end
+end
diff --git a/apps/workbench/app/helpers/pipeline_components_helper.rb b/apps/workbench/app/helpers/pipeline_components_helper.rb
new file mode 100644 (file)
index 0000000..702772c
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module PipelineComponentsHelper
+  def render_pipeline_components(template_suffix, fallback=nil, locals={})
+    begin
+      render(partial: "pipeline_instances/show_components_#{template_suffix}",
+             locals: locals)
+    rescue => e
+      logger.error "#{e.inspect}"
+      logger.error "#{e.backtrace.join("\n\t")}"
+      case fallback
+      when :json
+        render(partial: "pipeline_instances/show_components_json",
+               locals: {error_name: e.inspect, backtrace: e.backtrace.join("\n\t")})
+      end
+    end
+  end
+end
diff --git a/apps/workbench/app/helpers/pipeline_instances_helper.rb b/apps/workbench/app/helpers/pipeline_instances_helper.rb
new file mode 100644 (file)
index 0000000..2142375
--- /dev/null
@@ -0,0 +1,319 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module PipelineInstancesHelper
+
+  def pipeline_jobs object=nil
+    object ||= @object
+    if object.components[:steps].is_a? Array
+      pipeline_jobs_oldschool object
+    elsif object.components.is_a? Hash
+      pipeline_jobs_newschool object
+    end
+  end
+
+  def render_pipeline_jobs
+    pipeline_jobs.collect do |pj|
+      render_pipeline_job pj
+    end
+  end
+
+  def render_pipeline_job pj
+    pj[:progress_bar] = render partial: 'job_progress', locals: {:j => pj[:job]}
+    pj[:output_link] = link_to_if_arvados_object pj[:output]
+    pj[:job_link] = link_to_if_arvados_object pj[:job][:uuid] if pj[:job]
+    pj
+  end
+
+  # Merge (started_at, finished_at) time range into the list of time ranges in
+  # timestamps (timestamps must be sorted and non-overlapping).
+  # return the updated timestamps list.
+  def merge_range timestamps, started_at, finished_at
+    # in the comments below, 'i' is the entry in the timestamps array and 'j'
+    # is the started_at, finished_at range which is passed in.
+    timestamps.each_index do |i|
+      if started_at
+        if started_at >= timestamps[i][0] and finished_at <= timestamps[i][1]
+          # 'j' started and ended during 'i'
+          return timestamps
+        end
+
+        if started_at < timestamps[i][0] and finished_at >= timestamps[i][0] and finished_at <= timestamps[i][1]
+          # 'j' started before 'i' and finished during 'i'
+          # re-merge range between when 'j' started and 'i' finished
+          finished_at = timestamps[i][1]
+          timestamps.delete_at i
+          return merge_range timestamps, started_at, finished_at
+        end
+
+        if started_at >= timestamps[i][0] and started_at <= timestamps[i][1]
+          # 'j' started during 'i' and finished sometime after
+          # move end time of 'i' back
+          # re-merge range between when 'i' started and 'j' finished
+          started_at = timestamps[i][0]
+          timestamps.delete_at i
+          return merge_range timestamps, started_at, finished_at
+        end
+
+        if finished_at < timestamps[i][0]
+          # 'j' finished before 'i' started, so insert before 'i'
+          timestamps.insert i, [started_at, finished_at]
+          return timestamps
+        end
+      end
+    end
+
+    timestamps << [started_at, finished_at]
+  end
+
+  # Accept a list of objects with [:started_at] and [:finished_at] keys and
+  # merge overlapping ranges to compute the time spent running after periods of
+  # overlapping execution are factored out.
+  def determine_wallclock_runtime jobs
+    timestamps = []
+    jobs.each do |j|
+      started_at = (j.started_at if j.respond_to?(:started_at)) || (j[:started_at] if j.is_a?(Hash))
+      finished_at = (j.finished_at if j.respond_to?(:finished_at)) || (j[:finished_at] if j.is_a?(Hash)) || Time.now
+      if started_at
+        timestamps = merge_range timestamps, started_at, finished_at
+      end
+    end
+    timestamps.map { |t| t[1] - t[0] }.reduce(:+) || 0
+  end
+
+  protected
+
+  def pipeline_jobs_newschool object
+    ret = []
+    i = -1
+
+    jobuuids = object.components.values.map { |c|
+      c[:job][:uuid] if c.is_a?(Hash) and c[:job].is_a?(Hash)
+    }.compact
+    job = {}
+    Job.where(uuid: jobuuids).each do |j|
+      job[j[:uuid]] = j
+    end
+
+    object.components.each do |cname, c|
+      i += 1
+      pj = {index: i, name: cname}
+      if not c.is_a?(Hash)
+        ret << pj
+        next
+      end
+      if c[:job] and c[:job][:uuid] and job[c[:job][:uuid]]
+        pj[:job] = job[c[:job][:uuid]]
+      elsif c[:job].is_a?(Hash)
+        pj[:job] = c[:job]
+        if pj[:job][:started_at].is_a? String
+          pj[:job][:started_at] = Time.parse(pj[:job][:started_at])
+        end
+        if pj[:job][:finished_at].is_a? String
+          pj[:job][:finished_at] = Time.parse(pj[:job][:finished_at])
+        end
+        # If necessary, figure out the state based on the other fields.
+        pj[:job][:state] ||= if pj[:job][:cancelled_at]
+                               "Cancelled"
+                             elsif pj[:job][:success] == false
+                               "Failed"
+                             elsif pj[:job][:success] == true
+                               "Complete"
+                             elsif pj[:job][:running] == true
+                               "Running"
+                             else
+                               "Queued"
+                             end
+      else
+        pj[:job] = {}
+      end
+      pj[:percent_done] = 0
+      pj[:percent_running] = 0
+      if pj[:job][:success]
+        if pj[:job][:output]
+          pj[:progress] = 1.0
+          pj[:percent_done] = 100
+        else
+          pj[:progress] = 0.0
+        end
+      else
+        if pj[:job][:tasks_summary]
+          begin
+            ts = pj[:job][:tasks_summary]
+            denom = ts[:done].to_f + ts[:running].to_f + ts[:todo].to_f
+            pj[:progress] = (ts[:done].to_f + ts[:running].to_f/2) / denom
+            pj[:percent_done] = 100.0 * ts[:done].to_f / denom
+            pj[:percent_running] = 100.0 * ts[:running].to_f / denom
+            pj[:progress_detail] = "#{ts[:done]} done #{ts[:running]} run #{ts[:todo]} todo"
+          rescue
+            pj[:progress] = 0.5
+            pj[:percent_done] = 0.0
+            pj[:percent_running] = 100.0
+          end
+        else
+          pj[:progress] = 0.0
+        end
+      end
+
+      case pj[:job][:state]
+        when 'Complete'
+        pj[:result] = 'complete'
+        pj[:labeltype] = 'success'
+        pj[:complete] = true
+        pj[:progress] = 1.0
+      when 'Failed'
+        pj[:result] = 'failed'
+        pj[:labeltype] = 'danger'
+        pj[:failed] = true
+      when 'Cancelled'
+        pj[:result] = 'cancelled'
+        pj[:labeltype] = 'danger'
+        pj[:failed] = true
+      when 'Running'
+        pj[:result] = 'running'
+        pj[:labeltype] = 'primary'
+      when 'Queued'
+        pj[:result] = 'queued'
+        pj[:labeltype] = 'default'
+      else
+        pj[:result] = 'none'
+        pj[:labeltype] = 'default'
+      end
+
+      pj[:job_id] = pj[:job][:uuid]
+      pj[:script] = pj[:job][:script] || c[:script]
+      pj[:repository] = pj[:job][:script] || c[:repository]
+      pj[:script_parameters] = pj[:job][:script_parameters] || c[:script_parameters]
+      pj[:script_version] = pj[:job][:script_version] || c[:script_version]
+      pj[:nondeterministic] = pj[:job][:nondeterministic] || c[:nondeterministic]
+      pj[:output] = pj[:job][:output]
+      pj[:output_uuid] = c[:output_uuid]
+      pj[:finished_at] = pj[:job][:finished_at]
+      ret << pj
+    end
+    ret
+  end
+
+  def pipeline_jobs_oldschool object
+    ret = []
+    object.components[:steps].each_with_index do |step, i|
+      pj = {index: i, name: step[:name]}
+      if step[:complete] and step[:complete] != 0
+        if step[:output_data_locator]
+          pj[:progress] = 1.0
+        else
+          pj[:progress] = 0.0
+        end
+      else
+        if step[:progress] and
+            (re = step[:progress].match /^(\d+)\+(\d+)\/(\d+)$/)
+          pj[:progress] = (((re[1].to_f + re[2].to_f/2) / re[3].to_f) rescue 0.5)
+        else
+          pj[:progress] = 0.0
+        end
+        if step[:failed]
+          pj[:result] = 'failed'
+          pj[:failed] = true
+        end
+      end
+      if step[:warehousejob]
+        if step[:complete]
+          pj[:result] = 'complete'
+          pj[:complete] = true
+          pj[:progress] = 1.0
+        elsif step[:warehousejob][:finishtime]
+          pj[:result] = 'failed'
+          pj[:failed] = true
+        elsif step[:warehousejob][:starttime]
+          pj[:result] = 'running'
+        else
+          pj[:result] = 'queued'
+        end
+      end
+      pj[:progress_detail] = (step[:progress] rescue nil)
+      pj[:job_id] = (step[:warehousejob][:id] rescue nil)
+      pj[:job_link] = pj[:job_id]
+      pj[:script] = step[:function]
+      pj[:script_version] = (step[:warehousejob][:revision] rescue nil)
+      pj[:output] = step[:output_data_locator]
+      pj[:finished_at] = (Time.parse(step[:warehousejob][:finishtime]) rescue nil)
+      ret << pj
+    end
+    ret
+  end
+
+  MINUTE = 60
+  HOUR = 60 * MINUTE
+  DAY = 24 * HOUR
+
+  def render_runtime duration, use_words, round_to_min=true
+    days = 0
+    hours = 0
+    minutes = 0
+    seconds = 0
+
+    if duration >= DAY
+      days = (duration / DAY).floor
+      duration -= days * DAY
+    end
+
+    if duration >= HOUR
+      hours = (duration / HOUR).floor
+      duration -= hours * HOUR
+    end
+
+    if duration >= MINUTE
+      minutes = (duration / MINUTE).floor
+      duration -= minutes * MINUTE
+    end
+
+    seconds = duration.floor
+
+    if round_to_min and seconds >= 30
+      minutes += 1
+    end
+
+    if use_words
+      s = []
+      if days > 0 then
+        s << "#{days} day#{'s' if days != 1}"
+      end
+      if hours > 0 then
+        s << "#{hours} hour#{'s' if hours != 1}"
+      end
+      if minutes > 0 then
+        s << "#{minutes} minute#{'s' if minutes != 1}"
+      end
+      if not round_to_min or s.size == 0
+        s << "#{seconds} second#{'s' if seconds != 1}"
+      end
+      s = s * " "
+    else
+      s = ""
+      if days > 0
+        s += "#{days}<span class='time-label-divider'>d</span>"
+      end
+
+      if (hours > 0)
+        s += "#{hours}<span class='time-label-divider'>h</span>"
+      end
+
+      s += "#{minutes}<span class='time-label-divider'>m</span>"
+
+      if not round_to_min or (days == 0 and hours == 0 and minutes == 0)
+        s += "#{seconds}<span class='time-label-divider'>s</span>"
+      end
+    end
+
+    raw(s)
+  end
+
+  def render_unreadable_inputs_present
+    if current_user and controller.class.name.eql?('PipelineInstancesController') and unreadable_inputs_present?
+      raw('<div class="alert alert-danger unreadable-inputs-present">' +
+            '<p>One or more inputs provided are not readable by you. ' +
+              'Please correct these before you can run the pipeline.</p></div>')
+    end
+  end
+end
diff --git a/apps/workbench/app/helpers/provenance_helper.rb b/apps/workbench/app/helpers/provenance_helper.rb
new file mode 100644 (file)
index 0000000..9b4d265
--- /dev/null
@@ -0,0 +1,407 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ProvenanceHelper
+
+  class GenerateGraph
+    def initialize(pdata, opts)
+      @pdata = pdata
+      @opts = opts
+      @visited = {}
+      @jobs = {}
+      @node_extra = {}
+    end
+
+    def self.collection_uuid(uuid)
+      Keep::Locator.parse(uuid).andand.strip_hints.andand.to_s
+    end
+
+    def url_for u
+      p = { :host => @opts[:request].host,
+        :port => @opts[:request].port,
+        :protocol => @opts[:request].protocol }
+      p.merge! u
+      Rails.application.routes.url_helpers.url_for (p)
+    end
+
+    def determine_fillcolor(n)
+      fillcolor = %w(666666 669966 666699 666666 996666)[n || 0] || '666666'
+      "style=\"filled\",color=\"#ffffff\",fillcolor=\"##{fillcolor}\",fontcolor=\"#ffffff\""
+    end
+
+    def describe_node(uuid, describe_opts={})
+      bgcolor = determine_fillcolor (describe_opts[:pip] || @opts[:pips].andand[uuid])
+
+      rsc = ArvadosBase::resource_class_for_uuid uuid
+
+      if GenerateGraph::collection_uuid(uuid) || rsc == Collection
+        if Collection.is_empty_blob_locator? uuid.to_s
+          # special case
+          return "\"#{uuid}\" [label=\"(empty collection)\"];\n"
+        end
+
+        if describe_opts[:col_uuid]
+          href = url_for ({:controller => Collection.to_s.tableize,
+                           :action => :show,
+                           :id => describe_opts[:col_uuid].to_s })
+        else
+          href = url_for ({:controller => Collection.to_s.tableize,
+                           :action => :show,
+                           :id => uuid.to_s })
+        end
+
+        return "\"#{uuid}\" [label=\"#{encode_quotes(describe_opts[:label] || (@pdata[uuid] and @pdata[uuid][:name]) || uuid)}\",shape=box,href=\"#{href}\",#{bgcolor}];\n"
+      else
+        href = ""
+        if describe_opts[:href]
+          href = ",href=\"#{url_for ({:controller => describe_opts[:href][:controller],
+                            :action => :show,
+                            :id => describe_opts[:href][:id] })}\""
+        end
+        return "\"#{uuid}\" [label=\"#{encode_quotes(describe_opts[:label] || uuid)}\",#{bgcolor},shape=#{describe_opts[:shape] || 'box'}#{href}];\n"
+      end
+    end
+
+    def job_uuid(job)
+      d = Digest::MD5.hexdigest(job[:script_parameters].to_json)
+      if @opts[:combine_jobs] == :script_only
+        uuid = "#{job[:script]}_#{d}"
+      elsif @opts[:combine_jobs] == :script_and_version
+        uuid = "#{job[:script]}_#{job[:script_version]}_#{d}"
+      else
+        uuid = "#{job[:uuid]}"
+      end
+
+      @jobs[uuid] = [] unless @jobs[uuid]
+      @jobs[uuid] << job unless @jobs[uuid].include? job
+
+      uuid
+    end
+
+    def edge(tail, head, extra)
+      if @opts[:direction] == :bottom_up
+        gr = "\"#{encode_quotes head}\" -> \"#{encode_quotes tail}\""
+      else
+        gr = "\"#{encode_quotes tail}\" -> \"#{encode_quotes head}\""
+      end
+
+      if extra.length > 0
+        gr += " ["
+        extra.each do |k, v|
+          gr += "#{k}=\"#{encode_quotes v}\","
+        end
+        gr += "]"
+      end
+      gr += ";\n"
+      gr
+    end
+
+    def script_param_edges(uuid, sp)
+      gr = ""
+
+      sp.each do |k, v|
+        if @opts[:all_script_parameters]
+          if v.is_a? Array or v.is_a? Hash
+            encv = JSON.pretty_generate(v).gsub("\n", "\\l") + "\\l"
+          else
+            encv = v.to_json
+          end
+          gr += "\"#{encode_quotes encv}\" [shape=box];\n"
+          gr += edge(encv, uuid, {:label => k})
+        end
+      end
+      gr
+    end
+
+    def job_edges job, edge_opts={}
+      uuid = job_uuid(job)
+      gr = ""
+
+      ProvenanceHelper::find_collections job[:script_parameters] do |collection_hash, collection_uuid, key|
+        if collection_uuid
+          gr += describe_node(collection_uuid)
+          gr += edge(collection_uuid, uuid, {:label => key})
+        else
+          gr += describe_node(collection_hash)
+          gr += edge(collection_hash, uuid, {:label => key})
+        end
+      end
+
+      if job[:docker_image_locator] and !@opts[:no_docker]
+        gr += describe_node(job[:docker_image_locator], {label: (job[:runtime_constraints].andand[:docker_image] || job[:docker_image_locator])})
+        gr += edge(job[:docker_image_locator], uuid, {label: "docker_image"})
+      end
+
+      if @opts[:script_version_nodes]
+        gr += describe_node(job[:script_version], {:label => "git:#{job[:script_version]}"})
+        gr += edge(job[:script_version], uuid, {:label => "script_version"})
+      end
+
+      if job[:output] and !edge_opts[:no_output]
+        gr += describe_node(job[:output])
+        gr += edge(uuid, job[:output], {label: "output" })
+      end
+
+      if job[:log] and !edge_opts[:no_log]
+        gr += describe_node(job[:log])
+        gr += edge(uuid, job[:log], {label: "log"})
+      end
+
+      gr
+    end
+
+    def generate_provenance_edges(uuid)
+      gr = ""
+      m = GenerateGraph::collection_uuid(uuid)
+      uuid = m if m
+
+      if uuid.nil? or uuid.empty? or @visited[uuid]
+        return ""
+      end
+
+      if @pdata[uuid].nil?
+        return ""
+      else
+        @visited[uuid] = true
+      end
+
+      if uuid.start_with? "component_"
+        # Pipeline component inputs
+        job = @pdata[@pdata[uuid][:job].andand[:uuid]]
+
+        if job
+          gr += describe_node(job_uuid(job), {label: uuid[38..-1], pip: @opts[:pips].andand[job[:uuid]], shape: "oval",
+                                href: {controller: 'jobs', id: job[:uuid]}})
+          gr += job_edges job, {no_output: true, no_log: true}
+        end
+
+        # Pipeline component output
+        outuuid = @pdata[uuid][:output_uuid]
+        if outuuid
+          outcollection = @pdata[outuuid]
+          if outcollection
+            gr += edge(job_uuid(job), outcollection[:portable_data_hash], {label: "output"})
+            gr += describe_node(outcollection[:portable_data_hash], {label: outcollection[:name]})
+          end
+        elsif job and job[:output]
+          gr += describe_node(job[:output])
+          gr += edge(job_uuid(job), job[:output], {label: "output" })
+        end
+      else
+        rsc = ArvadosBase::resource_class_for_uuid uuid
+
+        if rsc == Job
+          job = @pdata[uuid]
+          gr += job_edges job if job
+        elsif rsc == ContainerRequest
+          cr = @pdata[uuid]
+          if cr
+            gr += describe_node(cr[:uuid], {href: {controller: 'container_requests',
+                                                   id: cr[:uuid]},
+                                            label: cr[:name],
+                                            shape: 'oval'})
+            # Connect child CRs
+            children = @opts[:cr_children_of].andand[cr[:uuid]]
+            if children
+              children.each do |child|
+                gr += edge(child[:uuid], cr[:uuid], {label: 'child'})
+              end
+            end
+            # Output collection node
+            if cr[:output_uuid] and @opts[:output_collections][cr[:output_uuid]]
+              c = @opts[:output_collections][cr[:output_uuid]]
+              gr += describe_node(c[:portable_data_hash],
+                                  {
+                                    label: c[:name],
+                                    col_uuid: c[:uuid],
+                                  })
+              gr += edge(cr[:uuid],
+                         c[:portable_data_hash],
+                         {label: 'output'})
+            end
+            # Input collection nodes
+            output_pdhs = @opts[:output_collections].values.collect{|c|
+              c[:portable_data_hash]}
+            ProvenanceHelper::cr_input_pdhs(cr).each do |pdh|
+              if not output_pdhs.include?(pdh)
+                # Search for collections on the same project first
+                cols = @opts[:input_collections][pdh].andand.select{|c|
+                  c[:owner_uuid] == cr[:owner_uuid]}
+                if not cols or cols.empty?
+                  # Search for any collection with this PDH
+                  cols = @opts[:input_collections][pdh]
+                end
+                if cols
+                  names = cols.collect{|x| x[:name]}.uniq
+                else
+                  names = ['(collection not found)']
+                end
+                input_name = names.first
+                if names.length > 1
+                  input_name += " + #{names.length - 1} more"
+                end
+                gr += describe_node(pdh, {label: input_name})
+              end
+              gr += edge(pdh, cr[:uuid], {label: 'input'})
+            end
+          end
+        end
+      end
+
+      @pdata.each do |k, link|
+        if link[:head_uuid] == uuid.to_s and link[:link_class] == "provenance"
+          href = url_for ({:controller => Link.to_s.tableize,
+                            :action => :show,
+                            :id => link[:uuid] })
+
+          gr += describe_node(link[:tail_uuid])
+          gr += edge(link[:head_uuid], link[:tail_uuid], {:label => link[:name], :href => href})
+          gr += generate_provenance_edges(link[:tail_uuid])
+        end
+      end
+
+      gr
+    end
+
+    def describe_jobs
+      gr = ""
+      @jobs.each do |k, v|
+        href = url_for ({:controller => Job.to_s.tableize,
+                          :action => :index })
+
+        gr += "\"#{k}\" [href=\"#{href}?"
+
+        n = 0
+        v.each do |u|
+          gr += ";" unless gr.end_with? "?"
+          gr += "uuid%5b%5d=#{u[:uuid]}"
+          n |= @opts[:pips][u[:uuid]] if @opts[:pips] and @opts[:pips][u[:uuid]]
+        end
+
+        gr += "\",label=\""
+
+        label = "#{v[0][:script]}"
+
+        if label == "run-command" and v[0][:script_parameters][:command].is_a? Array
+          label = v[0][:script_parameters][:command].join(' ')
+        end
+
+        if not @opts[:combine_jobs]
+          label += "\\n#{v[0][:finished_at]}"
+        end
+
+        gr += encode_quotes label
+
+        gr += "\",#{determine_fillcolor n}];\n"
+      end
+      gr
+    end
+
+    def encode_quotes value
+      value.to_s.gsub("\"", "\\\"").gsub("\n", "\\n")
+    end
+  end
+
+  def self.create_provenance_graph(pdata, svgId, opts={})
+    if pdata.is_a? Array or pdata.is_a? ArvadosResourceList
+      p2 = {}
+      pdata.each do |k|
+        p2[k[:uuid]] = k if k[:uuid]
+      end
+      pdata = p2
+    end
+
+    unless pdata.is_a? Hash
+      raise "create_provenance_graph accepts Array or Hash for pdata only, pdata is #{pdata.class}"
+    end
+
+    gr = """strict digraph {
+node [fontsize=10,fontname=\"Helvetica,Arial,sans-serif\"];
+edge [fontsize=10,fontname=\"Helvetica,Arial,sans-serif\"];
+"""
+
+    if opts[:direction] == :bottom_up
+      gr += "edge [dir=back];"
+    end
+
+    begin
+      pdata = pdata.stringify_keys
+
+      g = GenerateGraph.new(pdata, opts)
+
+      pdata.each do |k, v|
+        if !opts[:only_components] or k.start_with? "component_"
+          gr += g.generate_provenance_edges(k)
+        else
+          #gr += describe_node(k)
+        end
+      end
+
+      if !opts[:only_components]
+        gr += g.describe_jobs
+      end
+
+    rescue => e
+      Rails.logger.warn "#{e.inspect}"
+      Rails.logger.warn "#{e.backtrace.join("\n\t")}"
+      raise
+    end
+
+    gr += "}"
+    svg = ""
+
+    require 'open3'
+
+    Open3.popen2("dot", "-Tsvg") do |stdin, stdout, wait_thr|
+      stdin.print(gr)
+      stdin.close
+      svg = stdout.read()
+      wait_thr.value
+      stdout.close()
+    end
+
+    svg = svg.sub(/<\?xml.*?\?>/m, "")
+    svg = svg.sub(/<!DOCTYPE.*?>/m, "")
+    svg = svg.sub(/<svg /, "<svg id=\"#{svgId}\" ")
+  end
+
+  # yields hash, uuid
+  # Position indicates whether it is a content hash or arvados uuid.
+  # One will hold a value, the other will always be nil.
+  def self.find_collections(sp, key=nil, &b)
+    case sp
+    when ArvadosBase
+      sp.class.columns.each do |c|
+        find_collections(sp[c.name.to_sym], nil, &b)
+      end
+    when Hash
+      sp.each do |k, v|
+        find_collections(v, key || k, &b)
+      end
+    when Array
+      sp.each do |v|
+        find_collections(v, key, &b)
+      end
+    when String
+      if m = /[a-f0-9]{32}\+\d+/.match(sp)
+        yield m[0], nil, key
+      elsif m = /[0-9a-z]{5}-4zz18-[0-9a-z]{15}/.match(sp)
+        yield nil, m[0], key
+      end
+    end
+  end
+
+  def self.cr_input_pdhs cr
+    pdhs = []
+    input_obj = cr[:mounts].andand[:"/var/lib/cwl/cwl.input.json"].andand[:content] || cr[:mounts]
+    if input_obj
+      find_collections input_obj do |col_hash, col_uuid, key|
+        if col_hash
+          pdhs << col_hash
+        end
+      end
+    end
+    pdhs
+  end
+end
diff --git a/apps/workbench/app/helpers/version_helper.rb b/apps/workbench/app/helpers/version_helper.rb
new file mode 100644 (file)
index 0000000..e673c81
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module VersionHelper
+  # Get the source_version given in the API server's discovery
+  # document.
+  def api_source_version
+    arvados_api_client.discovery[:source_version]
+  end
+
+  # Get the packageVersion given in the API server's discovery
+  # document.
+  def api_package_version
+    arvados_api_client.discovery[:packageVersion]
+  end
+
+  # URL for browsing source code for the given version.
+  def version_link_target version
+    "https://arvados.org/projects/arvados/repository/changes?rev=#{version.sub(/-.*/, "")}"
+  end
+end
diff --git a/apps/workbench/app/mailers/.gitkeep b/apps/workbench/app/mailers/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/mailers/issue_reporter.rb b/apps/workbench/app/mailers/issue_reporter.rb
new file mode 100644 (file)
index 0000000..de07122
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class IssueReporter < ActionMailer::Base
+  default from: Rails.configuration.issue_reporter_email_from
+  default to: Rails.configuration.issue_reporter_email_to
+
+  def send_report(user, params)
+    @user = user
+    @params = params
+    subject = 'Issue reported'
+    subject += " by #{@user.email}" if @user
+    mail(subject: subject)
+  end
+end
diff --git a/apps/workbench/app/mailers/request_shell_access_reporter.rb b/apps/workbench/app/mailers/request_shell_access_reporter.rb
new file mode 100644 (file)
index 0000000..8615cea
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RequestShellAccessReporter < ActionMailer::Base
+  default from: Rails.configuration.email_from
+  default to: Rails.configuration.support_email_address
+
+  def send_request(user, params)
+    @user = user
+    @params = params
+    subject = "Shell account request from #{user.full_name} (#{user.email}, #{user.uuid})"
+    mail(subject: subject)
+  end
+end
diff --git a/apps/workbench/app/models/.gitkeep b/apps/workbench/app/models/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/models/api_client_authorization.rb b/apps/workbench/app/models/api_client_authorization.rb
new file mode 100644 (file)
index 0000000..b78cb28
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApiClientAuthorization < ArvadosBase
+  def editable_attributes
+    %w(expires_at default_owner_uuid)
+  end
+  def self.creatable?
+    false
+  end
+end
diff --git a/apps/workbench/app/models/arvados_api_client.rb b/apps/workbench/app/models/arvados_api_client.rb
new file mode 100644 (file)
index 0000000..5a8fd51
--- /dev/null
@@ -0,0 +1,283 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'httpclient'
+require 'thread'
+
+class ArvadosApiClient
+  class ApiError < StandardError
+    attr_reader :api_response, :api_response_s, :api_status, :request_url
+
+    def initialize(request_url, errmsg)
+      @request_url = request_url
+      @api_response ||= {}
+      errors = @api_response[:errors]
+      if not errors.is_a?(Array)
+        @api_response[:errors] = [errors || errmsg]
+      end
+      super(errmsg)
+    end
+  end
+
+  class NoApiResponseException < ApiError
+    def initialize(request_url, exception)
+      @api_response_s = exception.to_s
+      super(request_url,
+            "#{exception.class.to_s} error connecting to API server")
+    end
+  end
+
+  class InvalidApiResponseException < ApiError
+    def initialize(request_url, api_response)
+      @api_status = api_response.status_code
+      @api_response_s = api_response.content
+      super(request_url, "Unparseable response from API server")
+    end
+  end
+
+  class ApiErrorResponseException < ApiError
+    def initialize(request_url, api_response)
+      @api_status = api_response.status_code
+      @api_response_s = api_response.content
+      @api_response = Oj.load(@api_response_s, :symbol_keys => true)
+      errors = @api_response[:errors]
+      if errors.respond_to?(:join)
+        errors = errors.join("\n\n")
+      else
+        errors = errors.to_s
+      end
+      super(request_url, "#{errors} [API: #{@api_status}]")
+    end
+  end
+
+  class AccessForbiddenException < ApiErrorResponseException; end
+  class NotFoundException < ApiErrorResponseException; end
+  class NotLoggedInException < ApiErrorResponseException; end
+
+  ERROR_CODE_CLASSES = {
+    401 => NotLoggedInException,
+    403 => AccessForbiddenException,
+    404 => NotFoundException,
+  }
+
+  @@profiling_enabled = Rails.configuration.profiling_enabled
+  @@discovery = nil
+
+  # An API client object suitable for handling API requests on behalf
+  # of the current thread.
+  def self.new_or_current
+    # If this thread doesn't have an API client yet, *or* this model
+    # has been reloaded since the existing client was created, create
+    # a new client. Otherwise, keep using the latest client created in
+    # the current thread.
+    unless Thread.current[:arvados_api_client].andand.class == self
+      Thread.current[:arvados_api_client] = new
+    end
+    Thread.current[:arvados_api_client]
+  end
+
+  def initialize *args
+    @api_client = nil
+    @client_mtx = Mutex.new
+  end
+
+  def api(resources_kind, action, data=nil, tokens={}, include_anon_token=true)
+
+    profile_checkpoint
+
+    if not @api_client
+      @client_mtx.synchronize do
+        @api_client = HTTPClient.new
+        @api_client.ssl_config.timeout = Rails.configuration.api_client_connect_timeout
+        @api_client.connect_timeout = Rails.configuration.api_client_connect_timeout
+        @api_client.receive_timeout = Rails.configuration.api_client_receive_timeout
+        if Rails.configuration.arvados_insecure_https
+          @api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
+        else
+          # Use system CA certificates
+          ["/etc/ssl/certs/ca-certificates.crt",
+           "/etc/pki/tls/certs/ca-bundle.crt"]
+            .select { |ca_path| File.readable?(ca_path) }
+            .each { |ca_path| @api_client.ssl_config.add_trust_ca(ca_path) }
+        end
+        if Rails.configuration.api_response_compression
+          @api_client.transparent_gzip_decompression = true
+        end
+      end
+    end
+
+    resources_kind = class_kind(resources_kind).pluralize if resources_kind.is_a? Class
+    url = "#{self.arvados_v1_base}/#{resources_kind}#{action}"
+
+    # Clean up /arvados/v1/../../discovery/v1 to /discovery/v1
+    url.sub! '/arvados/v1/../../', '/'
+
+    query = {
+      'reader_tokens' => ((tokens[:reader_tokens] ||
+                           Thread.current[:reader_tokens] ||
+                           []) +
+                          (include_anon_token ? [Rails.configuration.anonymous_user_token] : [])).to_json,
+    }
+    if !data.nil?
+      data.each do |k,v|
+        if v.is_a? String or v.nil?
+          query[k] = v
+        elsif v == true
+          query[k] = 1
+        elsif v == false
+          query[k] = 0
+        else
+          query[k] = Oj.dump(v, mode: :compat)
+        end
+      end
+    else
+      query["_method"] = "GET"
+    end
+
+    if @@profiling_enabled
+      query["_profile"] = "true"
+    end
+
+    headers = {
+      "Accept" => "application/json",
+      "Authorization" => "OAuth2 " +
+                         (tokens[:arvados_api_token] ||
+                          Thread.current[:arvados_api_token] ||
+                          ''),
+      "X-Request-Id" => Thread.current[:request_id] || '',
+    }
+
+    profile_checkpoint { "Prepare request #{query["_method"] or "POST"} #{url} #{query[:uuid]} #{query.inspect[0,256]}" }
+    msg = @client_mtx.synchronize do
+      begin
+        @api_client.post(url, query, headers)
+      rescue => exception
+        raise NoApiResponseException.new(url, exception)
+      end
+    end
+    profile_checkpoint 'API transaction'
+    if @@profiling_enabled
+      if msg.headers['X-Runtime']
+        Rails.logger.info "API server: #{msg.headers['X-Runtime']} runtime reported"
+      end
+      Rails.logger.info "Content-Encoding #{msg.headers['Content-Encoding'].inspect}, Content-Length #{msg.headers['Content-Length'].inspect}, actual content size #{msg.content.size}"
+    end
+
+    begin
+      resp = Oj.load(msg.content, :symbol_keys => true)
+    rescue Oj::ParseError
+      resp = nil
+    end
+
+    if not resp.is_a? Hash
+      raise InvalidApiResponseException.new(url, msg)
+    elsif msg.status_code != 200
+      error_class = ERROR_CODE_CLASSES.fetch(msg.status_code,
+                                             ApiErrorResponseException)
+      raise error_class.new(url, msg)
+    end
+
+    if resp[:_profile]
+      Rails.logger.info "API client: " \
+      "#{resp.delete(:_profile)[:request_time]} request_time"
+    end
+    profile_checkpoint 'Parse response'
+    resp
+  end
+
+  def self.patch_paging_vars(ary, items_available, offset, limit, links=nil)
+    if items_available
+      (class << ary; self; end).class_eval { attr_accessor :items_available }
+      ary.items_available = items_available
+    end
+    if offset
+      (class << ary; self; end).class_eval { attr_accessor :offset }
+      ary.offset = offset
+    end
+    if limit
+      (class << ary; self; end).class_eval { attr_accessor :limit }
+      ary.limit = limit
+    end
+    if links
+      (class << ary; self; end).class_eval { attr_accessor :links }
+      ary.links = links
+    end
+    ary
+  end
+
+  def unpack_api_response(j, kind=nil)
+    if j.is_a? Hash and j[:items].is_a? Array and j[:kind].match(/(_list|List)$/)
+      ary = j[:items].collect { |x| unpack_api_response x, x[:kind] }
+      links = ArvadosResourceList.new Link
+      links.results = (j[:links] || []).collect do |x|
+        unpack_api_response x, x[:kind]
+      end
+      self.class.patch_paging_vars(ary, j[:items_available], j[:offset], j[:limit], links)
+    elsif j.is_a? Hash and (kind || j[:kind])
+      oclass = self.kind_class(kind || j[:kind])
+      if oclass
+        j.keys.each do |k|
+          childkind = j["#{k.to_s}_kind".to_sym]
+          if childkind
+            j[k] = self.unpack_api_response(j[k], childkind)
+          end
+        end
+        oclass.new.private_reload(j)
+      else
+        j
+      end
+    else
+      j
+    end
+  end
+
+  def arvados_login_url(params={})
+    if Rails.configuration.respond_to? :arvados_login_base
+      uri = Rails.configuration.arvados_login_base
+    else
+      uri = self.arvados_v1_base.sub(%r{/arvados/v\d+.*}, '/login')
+    end
+    if params.size > 0
+      uri += '?' << params.collect { |k,v|
+        CGI.escape(k.to_s) + '=' + CGI.escape(v.to_s)
+      }.join('&')
+    end
+    uri
+  end
+
+  def arvados_logout_url(params={})
+    arvados_login_url(params).sub('/login','/logout')
+  end
+
+  def arvados_v1_base
+    Rails.configuration.arvados_v1_base
+  end
+
+  def discovery
+    @@discovery ||= api '../../discovery/v1/apis/arvados/v1/rest', ''
+  end
+
+  def kind_class(kind)
+    kind.match(/^arvados\#(.+?)(_list|List)?$/)[1].pluralize.classify.constantize rescue nil
+  end
+
+  def class_kind(resource_class)
+    resource_class.to_s.underscore
+  end
+
+  def self.class_kind(resource_class)
+    resource_class.to_s.underscore
+  end
+
+  protected
+  def profile_checkpoint label=nil
+    return if !@@profiling_enabled
+    label = yield if block_given?
+    t = Time.now
+    if label and @profile_t0
+      Rails.logger.info "API client: #{t - @profile_t0} #{label}"
+    end
+    @profile_t0 = t
+  end
+end
diff --git a/apps/workbench/app/models/arvados_base.rb b/apps/workbench/app/models/arvados_base.rb
new file mode 100644 (file)
index 0000000..d7a65bd
--- /dev/null
@@ -0,0 +1,495 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ArvadosBase < ActiveRecord::Base
+  self.abstract_class = true
+  attr_accessor :attribute_sortkey
+  attr_accessor :create_params
+
+  def self.arvados_api_client
+    ArvadosApiClient.new_or_current
+  end
+
+  def arvados_api_client
+    ArvadosApiClient.new_or_current
+  end
+
+  def self.uuid_infix_object_kind
+    @@uuid_infix_object_kind ||=
+      begin
+        infix_kind = {}
+        arvados_api_client.discovery[:schemas].each do |name, schema|
+          if schema[:uuidPrefix]
+            infix_kind[schema[:uuidPrefix]] =
+              'arvados#' + name.to_s.camelcase(:lower)
+          end
+        end
+
+        # Recognize obsolete types.
+        infix_kind.
+          merge('mxsvm' => 'arvados#pipelineTemplate', # Pipeline
+                'uo14g' => 'arvados#pipelineInstance', # PipelineInvocation
+                'ldvyl' => 'arvados#group') # Project
+      end
+  end
+
+  def initialize raw_params={}, create_params={}
+    super self.class.permit_attribute_params(raw_params)
+    @create_params = create_params
+    @attribute_sortkey ||= {
+      'id' => nil,
+      'name' => '000',
+      'owner_uuid' => '002',
+      'event_type' => '100',
+      'link_class' => '100',
+      'group_class' => '100',
+      'tail_uuid' => '101',
+      'head_uuid' => '102',
+      'object_uuid' => '102',
+      'summary' => '104',
+      'description' => '104',
+      'properties' => '150',
+      'info' => '150',
+      'created_at' => '200',
+      'modified_at' => '201',
+      'modified_by_user_uuid' => '202',
+      'modified_by_client_uuid' => '203',
+      'uuid' => '999',
+    }
+    @loaded_attributes = {}
+  end
+
+  def self.columns
+    return @discovered_columns if @discovered_columns.andand.any?
+    @discovered_columns = []
+    @attribute_info ||= {}
+    schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
+    return @discovered_columns if schema.nil?
+    schema[:properties].each do |k, coldef|
+      case k
+      when :etag, :kind
+        attr_reader k
+      else
+        if coldef[:type] == coldef[:type].downcase
+          # boolean, integer, etc.
+          @discovered_columns << column(k, coldef[:type])
+        else
+          # Hash, Array
+          @discovered_columns << column(k, coldef[:type], coldef[:type].constantize.new)
+          serialize k, coldef[:type].constantize
+        end
+        define_method k do
+          unless new_record? or @loaded_attributes.include? k.to_s
+            Rails.logger.debug "BUG: access non-loaded attribute #{k}"
+            # We should...
+            # raise ActiveModel::MissingAttributeError, "missing attribute: #{k}"
+          end
+          super()
+        end
+        @attribute_info[k] = coldef
+      end
+    end
+    @discovered_columns
+  end
+
+  def self.column(name, sql_type = nil, default = nil, null = true)
+    if sql_type == 'datetime'
+      cast_type = "ActiveRecord::Type::DateTime".constantize.new
+    else
+      cast_type = ActiveRecord::Base.connection.lookup_cast_type(sql_type)
+    end
+    ActiveRecord::ConnectionAdapters::Column.new(name.to_s, default, cast_type, sql_type.to_s, null)
+  end
+
+  def self.attribute_info
+    self.columns
+    @attribute_info
+  end
+
+  def self.find(uuid, opts={})
+    if uuid.class != String or uuid.length < 27 then
+      raise 'argument to find() must be a uuid string. Acceptable formats: warehouse locator or string with format xxxxx-xxxxx-xxxxxxxxxxxxxxx'
+    end
+
+    if self == ArvadosBase
+      # Determine type from uuid and defer to the appropriate subclass.
+      return resource_class_for_uuid(uuid).find(uuid, opts)
+    end
+
+    # Only do one lookup on the API side per {class, uuid, workbench
+    # request} unless {cache: false} is given via opts.
+    cache_key = "request_#{Thread.current.object_id}_#{self.to_s}_#{uuid}"
+    if opts[:cache] == false
+      Rails.cache.write cache_key, arvados_api_client.api(self, '/' + uuid)
+    end
+    hash = Rails.cache.fetch cache_key do
+      arvados_api_client.api(self, '/' + uuid)
+    end
+    new.private_reload(hash)
+  end
+
+  def self.find?(*args)
+    find(*args) rescue nil
+  end
+
+  def self.order(*args)
+    ArvadosResourceList.new(self).order(*args)
+  end
+
+  def self.filter(*args)
+    ArvadosResourceList.new(self).filter(*args)
+  end
+
+  def self.where(*args)
+    ArvadosResourceList.new(self).where(*args)
+  end
+
+  def self.limit(*args)
+    ArvadosResourceList.new(self).limit(*args)
+  end
+
+  def self.select(*args)
+    ArvadosResourceList.new(self).select(*args)
+  end
+
+  def self.with_count(*args)
+    ArvadosResourceList.new(self).with_count(*args)
+  end
+
+  def self.distinct(*args)
+    ArvadosResourceList.new(self).distinct(*args)
+  end
+
+  def self.include_trash(*args)
+    ArvadosResourceList.new(self).include_trash(*args)
+  end
+
+  def self.recursive(*args)
+    ArvadosResourceList.new(self).recursive(*args)
+  end
+
+  def self.eager(*args)
+    ArvadosResourceList.new(self).eager(*args)
+  end
+
+  def self.all
+    ArvadosResourceList.new(self)
+  end
+
+  def self.permit_attribute_params raw_params
+    # strong_parameters does not provide security in Workbench: anyone
+    # who can get this far can just as well do a call directly to our
+    # database (Arvados) with the same credentials we use.
+    #
+    # The following permit! is necessary even with
+    # "ActionController::Parameters.permit_all_parameters = true",
+    # because permit_all does not permit nested attributes.
+    ActionController::Parameters.new(raw_params).permit!
+  end
+
+  def self.create raw_params={}, create_params={}
+    x = super(permit_attribute_params(raw_params))
+    x.create_params = create_params
+    x
+  end
+
+  def update_attributes raw_params={}
+    super(self.class.permit_attribute_params(raw_params))
+  end
+
+  def save
+    obdata = {}
+    self.class.columns.each do |col|
+      # Non-nil serialized values must be sent because we can't tell
+      # whether they've changed. Other than that, any given attribute
+      # is either unchanged (in which case there's no need to send its
+      # old value in the update/create command) or has been added to
+      # #changed by ActiveRecord's #attr= method.
+      if changed.include? col.name or
+          ([Hash, Array].include?(attributes[col.name].class) and
+           @loaded_attributes[col.name])
+        obdata[col.name.to_sym] = self.send col.name
+      end
+    end
+    obdata.delete :id
+    postdata = { self.class.to_s.underscore => obdata }
+    if etag
+      postdata['_method'] = 'PUT'
+      obdata.delete :uuid
+      resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
+    else
+      postdata.merge!(@create_params) if @create_params
+      resp = arvados_api_client.api(self.class, '', postdata)
+    end
+    return false if !resp[:etag] || !resp[:uuid]
+
+    # set read-only non-database attributes
+    @etag = resp[:etag]
+    @kind = resp[:kind]
+
+    # attributes can be modified during "save" -- we should update our copies
+    resp.keys.each do |attr|
+      if self.respond_to? "#{attr}=".to_sym
+        self.send(attr.to_s + '=', resp[attr.to_sym])
+      end
+    end
+
+    changes_applied
+    @new_record = false
+
+    self
+  end
+
+  def save!
+    self.save or raise Exception.new("Save failed")
+  end
+
+  def destroy
+    if etag || uuid
+      postdata = { '_method' => 'DELETE' }
+      resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
+      resp[:etag] && resp[:uuid] && resp
+    else
+      true
+    end
+  end
+
+  def links(*args)
+    o = {}
+    o.merge!(args.pop) if args[-1].is_a? Hash
+    o[:link_class] ||= args.shift
+    o[:name] ||= args.shift
+    o[:tail_uuid] = self.uuid
+    if all_links
+      return all_links.select do |m|
+        ok = true
+        o.each do |k,v|
+          if !v.nil?
+            test_v = m.send(k)
+            if (v.respond_to?(:uuid) ? v.uuid : v.to_s) != (test_v.respond_to?(:uuid) ? test_v.uuid : test_v.to_s)
+              ok = false
+            end
+          end
+        end
+        ok
+      end
+    end
+    @links = arvados_api_client.api Link, '', { _method: 'GET', where: o, eager: true }
+    @links = arvados_api_client.unpack_api_response(@links)
+  end
+
+  def all_links
+    return @all_links if @all_links
+    res = arvados_api_client.api Link, '', {
+      _method: 'GET',
+      where: {
+        tail_kind: self.kind,
+        tail_uuid: self.uuid
+      },
+      eager: true
+    }
+    @all_links = arvados_api_client.unpack_api_response(res)
+  end
+
+  def reload
+    private_reload(self.uuid)
+  end
+
+  def private_reload(uuid_or_hash)
+    raise "No such object" if !uuid_or_hash
+    if uuid_or_hash.is_a? Hash
+      hash = uuid_or_hash
+    else
+      hash = arvados_api_client.api(self.class, '/' + uuid_or_hash)
+    end
+    hash.each do |k,v|
+      @loaded_attributes[k.to_s] = true
+      if self.respond_to?(k.to_s + '=')
+        self.send(k.to_s + '=', v)
+      else
+        # When ArvadosApiClient#schema starts telling us what to expect
+        # in API responses (not just the server side database
+        # columns), this sort of awfulness can be avoided:
+        self.instance_variable_set('@' + k.to_s, v)
+        if !self.respond_to? k
+          singleton = class << self; self end
+          singleton.send :define_method, k, lambda { instance_variable_get('@' + k.to_s) }
+        end
+      end
+    end
+    @all_links = nil
+    changes_applied
+    @new_record = false
+    self
+  end
+
+  def to_param
+    uuid
+  end
+
+  def initialize_copy orig
+    super
+    forget_uuid!
+  end
+
+  def attributes_for_display
+    self.attributes.reject { |k,v|
+      attribute_sortkey.has_key?(k) and !attribute_sortkey[k]
+    }.sort_by { |k,v|
+      attribute_sortkey[k] or k
+    }
+  end
+
+  def class_for_display
+    self.class.to_s.underscore.humanize
+  end
+
+  def self.class_for_display
+    self.to_s.underscore.humanize
+  end
+
+  # Array of strings that are names of attributes that should be rendered as textile.
+  def textile_attributes
+    []
+  end
+
+  def self.creatable?
+    current_user.andand.is_active && api_exists?(:create)
+  end
+
+  def self.goes_in_projects?
+    false
+  end
+
+  # can this class of object be copied into a project?
+  # override to false on indivudal model classes for which this should not be true
+  def self.copies_to_projects?
+    self.goes_in_projects?
+  end
+
+  def editable?
+    (current_user and current_user.is_active and
+     (current_user.is_admin or
+      current_user.uuid == self.owner_uuid or
+      new_record? or
+      (respond_to?(:writable_by) ?
+       writable_by.include?(current_user.uuid) :
+       (ArvadosBase.find(owner_uuid).writable_by.include? current_user.uuid rescue false)))) or false
+  end
+
+  def deletable?
+    editable?
+  end
+
+  def self.api_exists?(method)
+    arvados_api_client.discovery[:resources][self.to_s.underscore.pluralize.to_sym].andand[:methods].andand[method]
+  end
+
+  # Array of strings that are the names of attributes that can be edited
+  # with X-Editable.
+  def editable_attributes
+    self.class.columns.map(&:name) -
+      %w(created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at)
+  end
+
+  def attribute_editable?(attr, ever=nil)
+    if not editable_attributes.include?(attr.to_s)
+      false
+    elsif not (current_user.andand.is_active)
+      false
+    elsif attr == 'uuid'
+      current_user.is_admin
+    elsif ever
+      true
+    else
+      editable?
+    end
+  end
+
+  def self.resource_class_for_uuid(uuid, opts={})
+    if uuid.is_a? ArvadosBase
+      return uuid.class
+    end
+    unless uuid.is_a? String
+      return nil
+    end
+    if opts[:class].is_a? Class
+      return opts[:class]
+    end
+    if uuid.match /^[0-9a-f]{32}(\+[^,]+)*(,[0-9a-f]{32}(\+[^,]+)*)*$/
+      return Collection
+    end
+    resource_class = nil
+    uuid.match /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/ do |re|
+      resource_class ||= arvados_api_client.
+        kind_class(self.uuid_infix_object_kind[re[1]])
+    end
+    if opts[:referring_object] and
+        opts[:referring_attr] and
+        opts[:referring_attr].match /_uuid$/
+      resource_class ||= arvados_api_client.
+        kind_class(opts[:referring_object].
+                   attributes[opts[:referring_attr].
+                              sub(/_uuid$/, '_kind')])
+    end
+    resource_class
+  end
+
+  def resource_param_name
+    self.class.to_s.underscore
+  end
+
+  def friendly_link_name lookup=nil
+    (name if self.respond_to? :name) || default_name
+  end
+
+  def content_summary
+    self.class_for_display
+  end
+
+  def selection_label
+    friendly_link_name
+  end
+
+  def self.default_name
+    self.to_s.underscore.humanize
+  end
+
+  def controller
+    (self.class.to_s.pluralize + 'Controller').constantize
+  end
+
+  def controller_name
+    self.class.to_s.tableize
+  end
+
+  # Placeholder for name when name is missing or empty
+  def default_name
+    if self.respond_to? :name
+      "New #{class_for_display.downcase}"
+    else
+      uuid
+    end
+  end
+
+  def owner
+    ArvadosBase.find(owner_uuid) rescue nil
+  end
+
+  protected
+
+  def forget_uuid!
+    self.uuid = nil
+    @etag = nil
+    self
+  end
+
+  def self.current_user
+    Thread.current[:user] ||= User.current if Thread.current[:arvados_api_token]
+    Thread.current[:user]
+  end
+  def current_user
+    self.class.current_user
+  end
+end
diff --git a/apps/workbench/app/models/arvados_resource_list.rb b/apps/workbench/app/models/arvados_resource_list.rb
new file mode 100644 (file)
index 0000000..9ba61ea
--- /dev/null
@@ -0,0 +1,250 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ArvadosResourceList
+  include ArvadosApiClientHelper
+  include Enumerable
+
+  attr_reader :resource_class
+
+  def initialize resource_class=nil
+    @resource_class = resource_class
+    @fetch_multiple_pages = true
+    @arvados_api_token = Thread.current[:arvados_api_token]
+    @reader_tokens = Thread.current[:reader_tokens]
+  end
+
+  def eager(bool=true)
+    @eager = bool
+    self
+  end
+
+  def distinct(bool=true)
+    @distinct = bool
+    self
+  end
+
+  def include_trash(option=nil)
+    @include_trash = option
+    self
+  end
+
+  def recursive(option=nil)
+    @recursive = option
+    self
+  end
+
+  def limit(max_results)
+    if not max_results.nil? and not max_results.is_a? Integer
+      raise ArgumentError("argument to limit() must be an Integer or nil")
+    end
+    @limit = max_results
+    self
+  end
+
+  def offset(skip)
+    @offset = skip
+    self
+  end
+
+  def order(orderby_spec)
+    @orderby_spec = orderby_spec
+    self
+  end
+
+  def select(columns=nil)
+    # If no column arguments were given, invoke Enumerable#select.
+    if columns.nil?
+      super()
+    else
+      @select ||= []
+      @select += columns
+      self
+    end
+  end
+
+  def filter _filters
+    @filters ||= []
+    @filters += _filters
+    self
+  end
+
+  def where(cond)
+    @cond = cond.dup
+    @cond.keys.each do |uuid_key|
+      if @cond[uuid_key] and (@cond[uuid_key].is_a? Array or
+                             @cond[uuid_key].is_a? ArvadosBase)
+        # Coerce cond[uuid_key] to an array of uuid strings.  This
+        # allows caller the convenience of passing an array of real
+        # objects and uuids in cond[uuid_key].
+        if !@cond[uuid_key].is_a? Array
+          @cond[uuid_key] = [@cond[uuid_key]]
+        end
+        @cond[uuid_key] = @cond[uuid_key].collect do |item|
+          if item.is_a? ArvadosBase
+            item.uuid
+          else
+            item
+          end
+        end
+      end
+    end
+    @cond.keys.select { |x| x.match /_kind$/ }.each do |kind_key|
+      if @cond[kind_key].is_a? Class
+        @cond = @cond.merge({ kind_key => 'arvados#' + arvados_api_client.class_kind(@cond[kind_key]) })
+      end
+    end
+    self
+  end
+
+  # with_count sets the 'count' parameter to 'exact' or 'none' -- see
+  # https://doc.arvados.org/api/methods.html#index
+  def with_count(count_param='exact')
+    @count = count_param
+    self
+  end
+
+  def fetch_multiple_pages(f)
+    @fetch_multiple_pages = f
+    self
+  end
+
+  def results
+    if !@results
+      @results = []
+      self.each_page do |r|
+        @results.concat r
+      end
+    end
+    @results
+  end
+
+  def results=(r)
+    @results = r
+    @items_available = r.items_available if r.respond_to? :items_available
+    @result_limit = r.limit if r.respond_to? :limit
+    @result_offset = r.offset if r.respond_to? :offset
+    @results
+  end
+
+  def to_ary
+    results
+  end
+
+  def each(&block)
+    if not @results.nil?
+      @results.each &block
+    else
+      self.each_page do |items|
+        items.each do |i|
+          block.call i
+        end
+      end
+    end
+    self
+  end
+
+  def first
+    results.first
+  end
+
+  def last
+    results.last
+  end
+
+  def [](*x)
+    results.send('[]', *x)
+  end
+
+  def |(x)
+    if x.is_a? Hash
+      self.to_hash | x
+    else
+      results | x.to_ary
+    end
+  end
+
+  def to_hash
+    Hash[self.collect { |x| [x.uuid, x] }]
+  end
+
+  def empty?
+    self.first.nil?
+  end
+
+  def items_available
+    results
+    @items_available
+  end
+
+  def result_limit
+    results
+    @result_limit
+  end
+
+  def result_offset
+    results
+    @result_offset
+  end
+
+  # Obsolete method retained during api transition.
+  def links_for item_or_uuid, link_class=false
+    []
+  end
+
+  protected
+
+  def each_page
+    api_params = {
+      _method: 'GET'
+    }
+    api_params[:count] = @count if @count
+    api_params[:where] = @cond if @cond
+    api_params[:eager] = '1' if @eager
+    api_params[:select] = @select if @select
+    api_params[:order] = @orderby_spec if @orderby_spec
+    api_params[:filters] = @filters if @filters
+    api_params[:distinct] = @distinct if @distinct
+    api_params[:include_trash] = @include_trash if @include_trash
+    if @fetch_multiple_pages
+      # Default limit to (effectively) api server's MAX_LIMIT
+      api_params[:limit] = 2**(0.size*8 - 1) - 1
+    end
+
+    item_count = 0
+    offset = @offset || 0
+    @result_limit = nil
+    @result_offset = nil
+
+    begin
+      api_params[:offset] = offset
+      api_params[:limit] = (@limit - item_count) if @limit
+
+      res = arvados_api_client.api(@resource_class, '', api_params,
+                                   arvados_api_token: @arvados_api_token,
+                                   reader_tokens: @reader_tokens)
+      items = arvados_api_client.unpack_api_response res
+
+      @items_available = items.items_available if items.respond_to?(:items_available)
+      @result_limit = items.limit if (@fetch_multiple_pages == false) and items.respond_to?(:limit)
+      @result_offset = items.offset if (@fetch_multiple_pages == false) and items.respond_to?(:offset)
+
+      break if items.nil? or not items.any?
+
+      item_count += items.size
+      if items.respond_to?(:offset)
+        offset = items.offset + items.size
+      else
+        offset = item_count
+      end
+
+      yield items
+
+      break if @limit and item_count >= @limit
+      break if items.respond_to? :items_available and offset >= items.items_available
+    end while @fetch_multiple_pages
+    self
+  end
+
+end
diff --git a/apps/workbench/app/models/authorized_key.rb b/apps/workbench/app/models/authorized_key.rb
new file mode 100644 (file)
index 0000000..9809eef
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AuthorizedKey < ArvadosBase
+  def attribute_editable?(attr, ever=nil)
+    if (attr.to_s == 'authorized_user_uuid') and (not ever)
+      current_user.andand.is_admin
+    else
+      super
+    end
+  end
+
+  def self.creatable?
+    false
+  end
+end
diff --git a/apps/workbench/app/models/collection.rb b/apps/workbench/app/models/collection.rb
new file mode 100644 (file)
index 0000000..f5aef84
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "arvados/keep"
+
+class Collection < ArvadosBase
+  MD5_EMPTY = 'd41d8cd98f00b204e9800998ecf8427e'
+
+  def default_name
+    if Collection.is_empty_blob_locator? self.uuid
+      "Empty Collection"
+    else
+      super
+    end
+  end
+
+  # Return true if the given string is the locator of a zero-length blob
+  def self.is_empty_blob_locator? locator
+    !!locator.to_s.match("^#{MD5_EMPTY}(\\+.*)?\$")
+  end
+
+  def self.goes_in_projects?
+    true
+  end
+
+  def manifest
+    if @manifest.nil? or manifest_text_changed?
+      @manifest = Keep::Manifest.new(manifest_text || "")
+    end
+    @manifest
+  end
+
+  def files
+    # This method provides backwards compatibility for code that relied on
+    # the old files field in API results.  New code should use manifest
+    # methods directly.
+    manifest.files
+  end
+
+  def content_summary
+    if total_bytes > 0
+      ApplicationController.helpers.human_readable_bytes_html(total_bytes) + " " + super
+    else
+      super + " modified at " + modified_at.to_s
+    end
+  end
+
+  def total_bytes
+    manifest.files.inject(0) { |sum, filespec| sum + filespec.last }
+  end
+
+  def files_tree
+    tree = manifest.files.group_by do |file_spec|
+      File.split(file_spec.first)
+    end
+    return [] if tree.empty?
+    # Fill in entries for empty directories.
+    tree.keys.map { |basedir, _| File.split(basedir) }.each do |splitdir|
+      until tree.include?(splitdir)
+        tree[splitdir] = []
+        splitdir = File.split(splitdir.first)
+      end
+    end
+    dir_to_tree = lambda do |dirname|
+      # First list subdirectories, with their files inside.
+      subnodes = tree.keys.select { |bd, td| (bd == dirname) and (td != '.') }
+        .sort.flat_map do |parts|
+        [parts + [nil]] + dir_to_tree.call(File.join(parts))
+      end
+      # Then extend that list with files in this directory, except the empty dir placeholders (0:0:. files).
+      subnodes + tree[File.split(dirname)].reject { |_, basename, size| (basename == '.') and (size == 0) }
+    end
+    dir_to_tree.call('.')
+  end
+
+  def editable_attributes
+    %w(name description manifest_text filename)
+  end
+
+  def provenance
+    arvados_api_client.api "collections/#{self.uuid}/", "provenance"
+  end
+
+  def used_by
+    arvados_api_client.api "collections/#{self.uuid}/", "used_by"
+  end
+
+  def uuid
+    if self[:uuid].nil?
+      return self[:portable_data_hash]
+    else
+      super
+    end
+  end
+
+  def friendly_link_name lookup=nil
+    name || portable_data_hash
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+
+  def untrash
+    arvados_api_client.api(self.class, "/#{self.uuid}/untrash", {"ensure_unique_name" => true})
+  end
+end
diff --git a/apps/workbench/app/models/container.rb b/apps/workbench/app/models/container.rb
new file mode 100644 (file)
index 0000000..8de28ae
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Container < ArvadosBase
+  def self.creatable?
+    false
+  end
+
+  def work_unit(label=nil, child_objects=nil)
+    ContainerWorkUnit.new(self, label, self.uuid, child_objects=child_objects)
+  end
+end
diff --git a/apps/workbench/app/models/container_request.rb b/apps/workbench/app/models/container_request.rb
new file mode 100644 (file)
index 0000000..3c08d94
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ContainerRequest < ArvadosBase
+  def self.creatable?
+    false
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+
+  def self.goes_in_projects?
+    true
+  end
+
+  def work_unit(label=nil, child_objects=nil)
+    ContainerWorkUnit.new(self, label, self.uuid, child_objects=child_objects)
+  end
+end
diff --git a/apps/workbench/app/models/container_work_unit.rb b/apps/workbench/app/models/container_work_unit.rb
new file mode 100644 (file)
index 0000000..ef20a7f
--- /dev/null
@@ -0,0 +1,235 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ContainerWorkUnit < ProxyWorkUnit
+  attr_accessor :container
+  attr_accessor :child_proxies
+
+  def initialize proxied, label, parent, child_objects=nil
+    super proxied, label, parent
+    if @proxied.is_a?(ContainerRequest)
+      container_uuid = get(:container_uuid)
+      if container_uuid
+        @container = Container.find(container_uuid)
+      end
+    end
+    @child_proxies = child_objects
+  end
+
+  def children
+    return @my_children if @my_children
+
+    items = []
+    container_uuid = if @proxied.is_a?(Container) then uuid else get(:container_uuid) end
+    if container_uuid
+      cols = ContainerRequest.columns.map(&:name) - %w(id updated_at mounts secret_mounts runtime_token)
+      my_children = @child_proxies || ContainerRequest.select(cols).where(requesting_container_uuid: container_uuid).results if !my_children
+      my_child_containers = my_children.map(&:container_uuid).compact.uniq
+      grandchildren = {}
+      my_child_containers.each { |c| grandchildren[c] = []} if my_child_containers.any?
+      reqs = ContainerRequest.select(cols).where(requesting_container_uuid: my_child_containers).results if my_child_containers.any?
+      reqs.each {|cr| grandchildren[cr.requesting_container_uuid] << cr} if reqs
+
+      my_children.each do |cr|
+        items << cr.work_unit(cr.name || 'this container', child_objects=grandchildren[cr.container_uuid])
+      end
+    end
+
+    @child_proxies = nil #no need of this any longer
+    @my_children = items
+  end
+
+  def title
+    "container"
+  end
+
+  def uri
+    uuid = get(:uuid)
+
+    return nil unless uuid
+
+    if @proxied.class.respond_to? :table_name
+      "/#{@proxied.class.table_name}/#{uuid}"
+    else
+      resource_class = ArvadosBase.resource_class_for_uuid(uuid)
+      "#{resource_class.table_name}/#{uuid}" if resource_class
+    end
+  end
+
+  def can_cancel?
+    @proxied.is_a?(ContainerRequest) &&
+      @proxied.state == "Committed" &&
+      (@proxied.priority > 0 || get(:state, @container) != 'Running') &&
+      @proxied.editable?
+  end
+
+  def container_uuid
+    get(:container_uuid)
+  end
+
+  def requesting_container_uuid
+    get(:requesting_container_uuid)
+  end
+
+  def priority
+    @proxied.priority
+  end
+
+  # For the following properties, use value from the @container if exists
+  # This applies to a ContainerRequest with container_uuid
+
+  def started_at
+    t = get_combined(:started_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def modified_at
+    t = get_combined(:modified_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def finished_at
+    t = get_combined(:finished_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def state_label
+    if get(:state) == 'Final' && get(:state, @container) != 'Complete'
+      # Request was finalized before its container started (or the
+      # container was cancelled)
+      return 'Cancelled'
+    end
+    state = get(:state, @container) || get(:state, @proxied)
+    case state
+    when 'Locked', 'Queued'
+      if priority == 0
+        'On hold'
+      else
+        'Queued'
+      end
+    when 'Complete'
+      if exit_code == 0
+        state
+      else
+        'Failed'
+      end
+    when 'Running'
+      if runtime_status[:error]
+        'Failing'
+      elsif runtime_status[:warning]
+        'Warning'
+      else
+        state
+      end
+    else
+      # Cancelled, or Uncommitted (no container assigned)
+      state
+    end
+  end
+
+  def runtime_status
+    return get(:runtime_status, @container) || get(:runtime_status, @proxied)
+  end
+
+  def state_bootstrap_class
+    case state_label
+    when 'Failing'
+      'danger'
+    when 'Warning'
+      'warning'
+    else
+      super
+    end
+  end
+
+  def exit_code
+    get_combined(:exit_code)
+  end
+
+  def docker_image
+    get_combined(:container_image)
+  end
+
+  def runtime_constraints
+    get_combined(:runtime_constraints)
+  end
+
+  def log_collection
+    if @proxied.is_a?(ContainerRequest)
+      get(:log_uuid)
+    else
+      get(:log)
+    end
+  end
+
+  def outputs
+    items = []
+    if @proxied.is_a?(ContainerRequest)
+      out = get(:output_uuid)
+    else
+      out = get(:output)
+    end
+    items << out if out
+    items
+  end
+
+  def command
+    get_combined(:command)
+  end
+
+  def cwd
+    get_combined(:cwd)
+  end
+
+  def environment
+    env = get_combined(:environment)
+    env = nil if env.andand.empty?
+    env
+  end
+
+  def mounts
+    mnt = get_combined(:mounts)
+    mnt = nil if mnt.andand.empty?
+    mnt
+  end
+
+  def output_path
+    get_combined(:output_path)
+  end
+
+  def log_object_uuids
+    [get(:uuid, @container), get(:uuid, @proxied)].compact
+  end
+
+  def render_log
+    collection = Collection.find(log_collection) rescue nil
+    if collection
+      return {log: collection, partial: 'collections/show_files', locals: {object: collection, no_checkboxes: true}}
+    end
+  end
+
+  def template_uuid
+    properties = get(:properties)
+    if properties
+      properties[:template_uuid]
+    end
+  end
+
+  # End combined properties
+
+  protected
+  def get_combined key
+    from_container = get(key, @container)
+    from_proxied = get(key, @proxied)
+
+    if from_container.is_a? Hash or from_container.is_a? Array
+      if from_container.any? then from_container else from_proxied end
+    else
+      from_container || from_proxied
+    end
+  end
+end
diff --git a/apps/workbench/app/models/group.rb b/apps/workbench/app/models/group.rb
new file mode 100644 (file)
index 0000000..08b13bf
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Group < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def self.copies_to_projects?
+    false
+  end
+
+  def self.contents params={}
+    res = arvados_api_client.api self, "/contents", {
+      _method: 'GET'
+    }.merge(params)
+    ret = ArvadosResourceList.new
+    ret.results = arvados_api_client.unpack_api_response(res)
+    ret
+  end
+
+  def contents params={}
+    res = arvados_api_client.api self.class, "/#{self.uuid}/contents", {
+      _method: 'GET'
+    }.merge(params)
+    ret = ArvadosResourceList.new
+    ret.results = arvados_api_client.unpack_api_response(res)
+    ret
+  end
+
+  def class_for_display
+    group_class == 'project' ? 'Project' : super
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def untrash
+    arvados_api_client.api(self.class, "/#{self.uuid}/untrash", {"ensure_unique_name" => true})
+  end
+end
diff --git a/apps/workbench/app/models/human.rb b/apps/workbench/app/models/human.rb
new file mode 100644 (file)
index 0000000..c1acef5
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Human < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/models/job.rb b/apps/workbench/app/models/job.rb
new file mode 100644 (file)
index 0000000..7c55d9e
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Job < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def content_summary
+    "#{script} job"
+  end
+
+  def editable_attributes
+    %w(description)
+  end
+
+  def default_name
+    if script
+      x = "\"#{script}\" job"
+    else
+      x = super
+    end
+    if finished_at
+      x += " finished #{finished_at.strftime('%b %-d')}"
+    elsif started_at
+      x += " started #{started_at.strftime('%b %-d')}"
+    elsif created_at
+      x += " submitted #{created_at.strftime('%b %-d')}"
+    end
+  end
+
+  def cancel
+    arvados_api_client.api "jobs/#{self.uuid}/", "cancel", {"cascade" => true}
+  end
+
+  def self.queue_size
+    arvados_api_client.api("jobs/", "queue_size", {"_method"=> "GET"})[:queue_size] rescue 0
+  end
+
+  def self.queue
+    arvados_api_client.unpack_api_response arvados_api_client.api("jobs/", "queue", {"_method"=> "GET"})
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+
+  def stderr_log_query(limit=nil)
+    query = Log.where(object_uuid: self.uuid).order("created_at DESC").with_count('none')
+    query = query.limit(limit) if limit
+    query
+  end
+
+  def stderr_log_lines(limit=2000)
+    stderr_log_query(limit).results.reverse.
+      flat_map { |log| log.properties[:text].split("\n") rescue [] }
+  end
+
+  def work_unit(label=nil)
+    JobWorkUnit.new(self, label, self.uuid)
+  end
+end
diff --git a/apps/workbench/app/models/job_task.rb b/apps/workbench/app/models/job_task.rb
new file mode 100644 (file)
index 0000000..b10a2b0
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobTask < ArvadosBase
+  def work_unit(label=nil)
+    JobTaskWorkUnit.new(self, label, self.uuid)
+  end
+end
diff --git a/apps/workbench/app/models/job_task_work_unit.rb b/apps/workbench/app/models/job_task_work_unit.rb
new file mode 100644 (file)
index 0000000..f5cd526
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobTaskWorkUnit < ProxyWorkUnit
+  def title
+    "job task"
+  end
+end
diff --git a/apps/workbench/app/models/job_work_unit.rb b/apps/workbench/app/models/job_work_unit.rb
new file mode 100644 (file)
index 0000000..83825a5
--- /dev/null
@@ -0,0 +1,100 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobWorkUnit < ProxyWorkUnit
+  def children
+    return @my_children if @my_children
+
+    # Jobs components
+    items = []
+    components = get(:components)
+    uuids = components.andand.collect {|_, v| v}
+    return items if (!uuids or uuids.empty?)
+
+    rcs = {}
+    uuids.each do |u|
+      r = ArvadosBase::resource_class_for_uuid(u)
+      rcs[r] = [] unless rcs[r]
+      rcs[r] << u
+    end
+    rcs.each do |rc, ids|
+      rc.where(uuid: ids).each do |obj|
+        items << obj.work_unit(components.key(obj.uuid))
+      end
+    end
+
+    @my_children = items
+  end
+
+  def child_summary
+    if children.any?
+      super
+    else
+      get(:tasks_summary)
+    end
+  end
+
+  def parameters
+    get(:script_parameters)
+  end
+
+  def repository
+    get(:repository)
+  end
+
+  def script
+    get(:script)
+  end
+
+  def script_version
+    get(:script_version)
+  end
+
+  def supplied_script_version
+    get(:supplied_script_version)
+  end
+
+  def docker_image
+    get(:docker_image_locator)
+  end
+
+  def nondeterministic
+    get(:nondeterministic)
+  end
+
+  def runtime_constraints
+    get(:runtime_constraints)
+  end
+
+  def priority
+    get(:priority)
+  end
+
+  def log_collection
+    get(:log)
+  end
+
+  def outputs
+    items = []
+    items << get(:output) if get(:output)
+    items
+  end
+
+  def can_cancel?
+    state_label.in? ["Queued", "Running"]
+  end
+
+  def confirm_cancellation
+    "All unfinished child jobs and pipelines will also be canceled, even if they are being used in another job or pipeline. Are you sure you want to cancel this job?"
+  end
+
+  def uri
+    uuid = get(:uuid)
+    "/jobs/#{uuid}"
+  end
+
+  def title
+    "job"
+  end
+end
diff --git a/apps/workbench/app/models/keep_disk.rb b/apps/workbench/app/models/keep_disk.rb
new file mode 100644 (file)
index 0000000..f4fea2c
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class KeepDisk < ArvadosBase
+  def self.creatable?
+    false
+  end
+end
diff --git a/apps/workbench/app/models/keep_service.rb b/apps/workbench/app/models/keep_service.rb
new file mode 100644 (file)
index 0000000..2fea18a
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class KeepService < ArvadosBase
+  def self.creatable?
+    false
+  end
+end
diff --git a/apps/workbench/app/models/link.rb b/apps/workbench/app/models/link.rb
new file mode 100644 (file)
index 0000000..920b4bd
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Link < ArvadosBase
+  attr_accessor :head
+  attr_accessor :tail
+  def self.by_tail(t, opts={})
+    where(opts.merge :tail_uuid => t.uuid)
+  end
+
+  def default_name
+    self.class.resource_class_for_uuid(head_uuid).default_name rescue super
+  end
+
+  def self.permissions_for(thing)
+    if thing.respond_to? :uuid
+      uuid = thing.uuid
+    else
+      uuid = thing
+    end
+    result = arvados_api_client.api("permissions", "/#{uuid}")
+    arvados_api_client.unpack_api_response(result)
+  end
+
+  def self.creatable?
+    false
+  end
+end
diff --git a/apps/workbench/app/models/log.rb b/apps/workbench/app/models/log.rb
new file mode 100644 (file)
index 0000000..6bbefa1
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Log < ArvadosBase
+  attr_accessor :object
+  def self.creatable?
+    # Technically yes, but not worth offering: it will be empty, and
+    # you won't be able to edit it.
+    false
+  end
+end
diff --git a/apps/workbench/app/models/node.rb b/apps/workbench/app/models/node.rb
new file mode 100644 (file)
index 0000000..785cc4f
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Node < ArvadosBase
+  def self.creatable?
+    false
+  end
+  def friendly_link_name lookup=nil
+    (hostname && !hostname.empty?) ? hostname : uuid
+  end
+end
diff --git a/apps/workbench/app/models/pipeline_instance.rb b/apps/workbench/app/models/pipeline_instance.rb
new file mode 100644 (file)
index 0000000..dd2cc0a
--- /dev/null
@@ -0,0 +1,153 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "arvados/keep"
+
+class PipelineInstance < ArvadosBase
+  attr_accessor :pipeline_template
+
+  def self.goes_in_projects?
+    true
+  end
+
+  def friendly_link_name lookup=nil
+    pipeline_name = self.name
+    if pipeline_name.nil? or pipeline_name.empty?
+      template = if lookup and lookup[self.pipeline_template_uuid]
+                   lookup[self.pipeline_template_uuid]
+                 else
+                   PipelineTemplate.find?(self.pipeline_template_uuid) if self.pipeline_template_uuid
+                 end
+      if template
+        template.name
+      else
+        self.uuid
+      end
+    else
+      pipeline_name
+    end
+  end
+
+  def content_summary
+    begin
+      PipelineTemplate.find(pipeline_template_uuid).name
+    rescue
+      super
+    end
+  end
+
+  def update_job_parameters(new_params)
+    self.components[:steps].each_with_index do |step, i|
+      step[:params].each do |param|
+        if new_params.has_key?(new_param_name = "#{i}/#{param[:name]}") or
+            new_params.has_key?(new_param_name = "#{step[:name]}/#{param[:name]}") or
+            new_params.has_key?(new_param_name = param[:name])
+          param_type = :value
+          %w(hash data_locator).collect(&:to_sym).each do |ptype|
+            param_type = ptype if param.has_key? ptype
+          end
+          param[param_type] = new_params[new_param_name]
+        end
+      end
+    end
+  end
+
+  def editable_attributes
+    %w(name description components)
+  end
+
+  def attribute_editable?(name, ever=nil)
+    if name.to_s == "components"
+      (ever or %w(New Ready).include?(state)) and super
+    else
+      super
+    end
+  end
+
+  def attributes_for_display
+    super.reject { |k,v| k == 'components' }
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def component_input_title(component_name, input_name)
+    component = components[component_name]
+    return nil if component.nil?
+    param_info = component[:script_parameters].andand[input_name.to_sym]
+    if param_info.is_a?(Hash) and param_info[:title]
+      param_info[:title]
+    else
+      "\"#{input_name.to_s}\" parameter for #{component[:script]} script in #{component_name} component"
+    end
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+
+  def job_uuids
+    components_map { |cspec| cspec[:job][:uuid] rescue nil }
+  end
+
+  def job_log_ids
+    components_map { |cspec| cspec[:job][:log] rescue nil }
+  end
+
+  def job_ids
+    components_map { |cspec| cspec[:job][:uuid] rescue nil }
+  end
+
+  def stderr_log_object_uuids
+    result = job_uuids.values.compact
+    result << uuid
+  end
+
+  def stderr_log_query(limit=nil)
+    query = Log.
+            with_count('none').
+            where(event_type: "stderr",
+                  object_uuid: stderr_log_object_uuids).
+            order("created_at DESC")
+    unless limit.nil?
+      query = query.limit(limit)
+    end
+    query
+  end
+
+  def stderr_log_lines(limit=2000)
+    stderr_log_query(limit).results.reverse.
+      flat_map { |log| log.properties[:text].split("\n") rescue [] }
+  end
+
+  def has_readable_logs?
+    log_pdhs, log_uuids = job_log_ids.values.compact.partition do |loc_s|
+      Keep::Locator.parse(loc_s)
+    end
+    if log_pdhs.any? and
+        Collection.where(portable_data_hash: log_pdhs).limit(1).results.any?
+      true
+    elsif log_uuids.any? and
+        Collection.where(uuid: log_uuids).limit(1).results.any?
+      true
+    else
+      stderr_log_query(1).results.any?
+    end
+  end
+
+  def work_unit(label=nil)
+    PipelineInstanceWorkUnit.new(self, label || self.name, self.uuid)
+  end
+
+  def cancel
+    arvados_api_client.api "pipeline_instances/#{self.uuid}/", "cancel", {"cascade" => true}
+  end
+
+  private
+
+  def components_map
+    Hash[components.map { |cname, cspec| [cname, yield(cspec)] }]
+  end
+end
diff --git a/apps/workbench/app/models/pipeline_instance_work_unit.rb b/apps/workbench/app/models/pipeline_instance_work_unit.rb
new file mode 100644 (file)
index 0000000..a9bc9cf
--- /dev/null
@@ -0,0 +1,80 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineInstanceWorkUnit < ProxyWorkUnit
+  def children
+    return @my_children if @my_children
+
+    items = []
+
+    jobs = {}
+    results = Job.where(uuid: @proxied.job_ids.values).results
+    results.each do |j|
+      jobs[j.uuid] = j
+    end
+
+    components = get(:components)
+    components.each do |name, c|
+      if c.is_a?(Hash)
+        job = c[:job]
+        if job
+          if job[:uuid] and jobs[job[:uuid]]
+            items << jobs[job[:uuid]].work_unit(name)
+          else
+            items << JobWorkUnit.new(job, name, uuid)
+          end
+        else
+          items << JobWorkUnit.new(c, name, uuid)
+        end
+      else
+        @unreadable_children = true
+        break
+      end
+    end
+
+    @my_children = items
+  end
+
+  def outputs
+    items = []
+    components = get(:components)
+    components.each do |name, c|
+      if c.is_a?(Hash)
+        items << c[:output_uuid] if c[:output_uuid]
+      end
+    end
+    items
+  end
+
+  def uri
+    uuid = get(:uuid)
+    "/pipeline_instances/#{uuid}"
+  end
+
+  def title
+    "pipeline"
+  end
+
+  def template_uuid
+    get(:pipeline_template_uuid)
+  end
+
+  def state_label
+    if get(:state) != "Failed"
+      return super
+    end
+    if get(:components_summary).andand[:failed].andand > 0
+      return super
+    end
+    # Show "Cancelled" instead of "Failed" if there are no failed
+    # components. #12840
+    get(:components).each do |_, c|
+      jstate = c[:job][:state] rescue nil
+      if jstate == "Failed"
+        return "Failed"
+      end
+    end
+    "Cancelled"
+  end
+end
diff --git a/apps/workbench/app/models/pipeline_template.rb b/apps/workbench/app/models/pipeline_template.rb
new file mode 100644 (file)
index 0000000..bce0f08
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineTemplate < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+end
diff --git a/apps/workbench/app/models/proxy_work_unit.rb b/apps/workbench/app/models/proxy_work_unit.rb
new file mode 100644 (file)
index 0000000..adf0bd7
--- /dev/null
@@ -0,0 +1,339 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ProxyWorkUnit < WorkUnit
+  require 'time'
+
+  attr_accessor :lbl
+  attr_accessor :proxied
+  attr_accessor :my_children
+  attr_accessor :unreadable_children
+
+  def initialize proxied, label, parent
+    @lbl = label
+    @proxied = proxied
+    @parent = parent
+  end
+
+  def label
+    @lbl
+  end
+
+  def uuid
+    get(:uuid)
+  end
+
+  def parent
+    @parent
+  end
+
+  def modified_by_user_uuid
+    get(:modified_by_user_uuid)
+  end
+
+  def owner_uuid
+    get(:owner_uuid)
+  end
+
+  def created_at
+    t = get(:created_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def started_at
+    t = get(:started_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def modified_at
+    t = get(:modified_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def finished_at
+    t = get(:finished_at)
+    t = Time.parse(t) if (t.is_a? String)
+    t
+  end
+
+  def state_label
+    state = get(:state)
+    if ["Running", "RunningOnServer", "RunningOnClient"].include? state
+      "Running"
+    elsif state == 'New'
+      "Not started"
+    else
+      state
+    end
+  end
+
+  def state_bootstrap_class
+    state = state_label
+    case state
+    when 'Complete'
+      'success'
+    when 'Failed', 'Cancelled'
+      'danger'
+    when 'Running', 'RunningOnServer', 'RunningOnClient'
+      'info'
+    else
+      'default'
+    end
+  end
+
+  def success?
+    state = state_label
+    if state == 'Complete'
+      true
+    elsif state == 'Failed' or state == 'Cancelled'
+      false
+    else
+      nil
+    end
+  end
+
+  def child_summary
+    done = 0
+    failed = 0
+    todo = 0
+    running = 0
+    children.each do |c|
+      case c.state_label
+      when 'Complete'
+        done = done+1
+      when 'Failed', 'Cancelled'
+        failed = failed+1
+      when 'Running'
+        running = running+1
+      else
+        todo = todo+1
+      end
+    end
+
+    summary = {}
+    summary[:done] = done
+    summary[:failed] = failed
+    summary[:todo] = todo
+    summary[:running] = running
+    summary
+  end
+
+  def child_summary_str
+    summary = child_summary
+    summary_txt = ''
+
+    if state_label == 'Running'
+      done = summary[:done] || 0
+      running = summary[:running] || 0
+      failed = summary[:failed] || 0
+      todo = summary[:todo] || 0
+      total = done + running + failed + todo
+
+      if total > 0
+        summary_txt += "#{summary[:done]} #{'child'.pluralize(summary[:done])} done,"
+        summary_txt += "#{summary[:failed]} failed,"
+        summary_txt += "#{summary[:running]} running,"
+        summary_txt += "#{summary[:todo]} pending"
+      end
+    end
+    summary_txt
+  end
+
+  def progress
+    state = state_label
+    if state == 'Complete'
+      return 1.0
+    elsif state == 'Failed' or state == 'Cancelled'
+      return 0.0
+    end
+
+    summary = child_summary
+    return 0.0 if summary.nil?
+
+    done = summary[:done] || 0
+    running = summary[:running] || 0
+    failed = summary[:failed] || 0
+    todo = summary[:todo] || 0
+    total = done + running + failed + todo
+    if total > 0
+      (done+failed).to_f / total
+    else
+      0.0
+    end
+  end
+
+  def children
+    []
+  end
+
+  def outputs
+    []
+  end
+
+  def title
+    "process"
+  end
+
+  def has_unreadable_children
+    @unreadable_children
+  end
+
+  def walltime
+    if state_label != "Queued"
+      if started_at
+        ((if finished_at then finished_at else Time.now() end) - started_at)
+      end
+    end
+  end
+
+  def cputime
+    if children.any?
+      children.map { |c|
+        c.cputime
+      }.reduce(:+) || 0
+    else
+      if started_at
+        (runtime_constraints.andand[:min_nodes] || 1).to_i * ((finished_at || Time.now()) - started_at)
+      else
+        0
+      end
+    end
+  end
+
+  def queuedtime
+    if state_label == "Queued"
+      Time.now - Time.parse(created_at.to_s)
+    end
+  end
+
+  def is_running?
+    state_label == 'Running'
+  end
+
+  def is_paused?
+    state_label == 'Paused'
+  end
+
+  def is_finished?
+    state_label.in? ["Complete", "Failed", "Cancelled"]
+  end
+
+  def is_failed?
+    state_label == 'Failed'
+  end
+
+  def runtime_contributors
+    contributors = []
+    if children.any?
+      children.each{|c| contributors << c.runtime_contributors}
+    else
+      contributors << self
+    end
+    contributors.flatten
+  end
+
+  def runningtime
+    ApplicationController.helpers.determine_wallclock_runtime runtime_contributors
+  end
+
+  def show_runtime
+    walltime = 0
+    running_time = runningtime
+    if started_at
+      walltime = if finished_at then (finished_at - started_at) else (Time.now - started_at) end
+    end
+    resp = '<p>'
+
+    if started_at
+      resp << "This #{title} started at "
+      resp << ApplicationController.helpers.render_localized_date(started_at)
+      resp << ". It "
+      if state_label == 'Complete'
+        resp << "completed in "
+      elsif state_label == 'Failed'
+        resp << "failed after "
+      elsif state_label == 'Cancelled'
+        resp << "was cancelled after "
+      else
+        resp << "has been active for "
+      end
+
+      resp << ApplicationController.helpers.render_time(walltime, false)
+
+      if finished_at
+        resp << " at "
+        resp << ApplicationController.helpers.render_localized_date(finished_at)
+      end
+      resp << "."
+    else
+      if state_label
+        resp << "This #{title} is "
+        resp << if state_label == 'Running' then 'active' else state_label.downcase end
+        resp << "."
+      end
+    end
+
+    if is_failed?
+      if runtime_status.andand[:error]
+        resp << " Check the error information below."
+      else
+        resp << " Check the Log tab for more detail about why it failed."
+      end
+    end
+    resp << "</p>"
+
+    resp << "<p>"
+    if state_label
+      resp << "It has runtime of "
+
+      cpu_time = cputime
+
+      resp << ApplicationController.helpers.render_time(running_time, false)
+      if (walltime - running_time) > 0
+        resp << "("
+        resp << ApplicationController.helpers.render_time(walltime - running_time, false)
+        resp << "queued)"
+      end
+      if cpu_time == 0
+        resp << "."
+      else
+        resp << " and used "
+        resp << ApplicationController.helpers.render_time(cpu_time, false)
+        resp << " of node allocation time ("
+        resp << (cpu_time/running_time).round(1).to_s
+        resp << "&Cross; scaling)."
+      end
+    end
+    resp << "</p>"
+
+    resp
+  end
+
+  def log_object_uuids
+    [uuid]
+  end
+
+  def live_log_lines(limit)
+    Log.where(object_uuid: log_object_uuids).
+      order("created_at DESC").
+      limit(limit).
+      with_count('none').
+      select { |log| log.properties[:text].is_a? String }.
+      reverse.
+      flat_map { |log| log.properties[:text].split("\n") }
+  end
+
+  protected
+
+  def get key, obj=@proxied
+    if obj.respond_to? key
+      obj.send(key)
+    elsif obj.is_a?(Hash)
+      obj[key] || obj[key.to_s]
+    end
+  end
+end
diff --git a/apps/workbench/app/models/repository.rb b/apps/workbench/app/models/repository.rb
new file mode 100644 (file)
index 0000000..6e8b68b
--- /dev/null
@@ -0,0 +1,118 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Repository < ArvadosBase
+  def self.creatable?
+    false
+  end
+  def attributes_for_display
+    super.reject { |x| x[0] == 'fetch_url' }
+  end
+  def editable_attributes
+    if current_user.is_admin
+      super
+    else
+      []
+    end
+  end
+
+  def show commit_sha1
+    refresh
+    run_git 'show', commit_sha1
+  end
+
+  def cat_file commit_sha1, path
+    refresh
+    run_git 'cat-file', 'blob', commit_sha1 + ':' + path
+  end
+
+  def ls_tree_lr commit_sha1
+    refresh
+    run_git 'ls-tree', '-l', '-r', commit_sha1
+  end
+
+  # subtree returns a list of files under the given path at the
+  # specified commit. Results are returned as an array of file nodes,
+  # where each file node is an array [file mode, blob sha1, file size
+  # in bytes, path relative to the given directory]. If the path is
+  # not found, [] is returned.
+  def ls_subtree commit, path
+    path = path.chomp '/'
+    subtree = []
+    ls_tree_lr(commit).each_line do |line|
+      mode, type, sha1, size, filepath = line.split
+      next if type != 'blob'
+      if filepath[0,path.length] == path and
+          (path == '' or filepath[path.length] == '/')
+        subtree << [mode.to_i(8), sha1, size.to_i,
+                    filepath[path.length,filepath.length]]
+      end
+    end
+    subtree
+  end
+
+  # http_fetch_url returns the first http:// or https:// url (if any)
+  # in the api response's clone_urls attribute.
+  def http_fetch_url
+    clone_urls.andand.select { |u| /^http/ =~ u }.first
+  end
+
+  protected
+
+  # refresh fetches the latest repository content into the local
+  # cache. It is a no-op if it has already been run on this object:
+  # this (pretty much) avoids doing more than one remote git operation
+  # per Workbench request.
+  def refresh
+    run_git 'fetch', http_fetch_url, '+*:*' unless @fresh
+    @fresh = true
+  end
+
+  # run_git sets up the ARVADOS_API_TOKEN environment variable,
+  # creates a local git directory for this repository if necessary,
+  # executes "git --git-dir localgitdir {args to run_git}", and
+  # returns the output. It raises GitCommandError if git exits
+  # non-zero.
+  def run_git *gitcmd
+    if not @workdir
+      workdir = File.expand_path uuid+'.git', Rails.configuration.repository_cache
+      if not File.exists? workdir
+        FileUtils.mkdir_p Rails.configuration.repository_cache
+        [['git', 'init', '--bare', workdir],
+        ].each do |cmd|
+          system *cmd
+          raise GitCommandError.new($?.to_s) unless $?.exitstatus == 0
+        end
+      end
+      @workdir = workdir
+    end
+    [['git', '--git-dir', @workdir, 'config', '--local',
+      "credential.#{http_fetch_url}.username", 'none'],
+     ['git', '--git-dir', @workdir, 'config', '--local',
+      "credential.#{http_fetch_url}.helper",
+      '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'],
+     ['git', '--git-dir', @workdir, 'config', '--local',
+           'http.sslVerify',
+           Rails.configuration.arvados_insecure_https ? 'false' : 'true'],
+     ].each do |cmd|
+      system *cmd
+      raise GitCommandError.new($?.to_s) unless $?.exitstatus == 0
+    end
+    env = {}.
+      merge(ENV).
+      merge('ARVADOS_API_TOKEN' => Thread.current[:arvados_api_token])
+    cmd = ['git', '--git-dir', @workdir] + gitcmd
+    io = IO.popen(env, cmd, err: [:child, :out])
+    output = io.read
+    io.close
+    # "If [io] is opened by IO.popen, close sets $?." --ruby 2.2.1 docs
+    unless $?.exitstatus == 0
+      raise GitCommandError.new("`git #{gitcmd.join ' '}` #{$?}: #{output}")
+    end
+    output
+  end
+
+  class GitCommandError < StandardError
+  end
+end
diff --git a/apps/workbench/app/models/specimen.rb b/apps/workbench/app/models/specimen.rb
new file mode 100644 (file)
index 0000000..4418f7c
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Specimen < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/models/trait.rb b/apps/workbench/app/models/trait.rb
new file mode 100644 (file)
index 0000000..421a107
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Trait < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/models/user.rb b/apps/workbench/app/models/user.rb
new file mode 100644 (file)
index 0000000..865ff6e
--- /dev/null
@@ -0,0 +1,115 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class User < ArvadosBase
+  def initialize(*args)
+    super(*args)
+    @attribute_sortkey['first_name'] = '050'
+    @attribute_sortkey['last_name'] = '051'
+  end
+
+  def self.current
+    res = arvados_api_client.api self, '/current', nil, {}, false
+    arvados_api_client.unpack_api_response(res)
+  end
+
+  def self.merge new_user_token, direction
+    # Merge user accounts.
+    #
+    # If the direction is "in", the current user is merged into the
+    # user represented by new_user_token
+    #
+    # If the direction is "out", the user represented by new_user_token
+    # is merged into the current user.
+
+    if direction == "in"
+      user_a = new_user_token
+      user_b = Thread.current[:arvados_api_token]
+      new_group_name = "Migrated from #{Thread.current[:user].email} (#{Thread.current[:user].uuid})"
+    elsif direction == "out"
+      user_a = Thread.current[:arvados_api_token]
+      user_b = new_user_token
+      res = arvados_api_client.api self, '/current', nil, {:arvados_api_token => user_b}, false
+      user_b_info = arvados_api_client.unpack_api_response(res)
+      new_group_name = "Migrated from #{user_b_info.email} (#{user_b_info.uuid})"
+    else
+      raise "Invalid merge direction, expected 'in' or 'out'"
+    end
+
+    # Create a project owned by user_a to accept everything owned by user_b
+    res = arvados_api_client.api Group, nil, {:group => {
+                                                :name => new_group_name,
+                                                :group_class => "project"},
+                                              :ensure_unique_name => true},
+                                 {:arvados_api_token => user_a}, false
+    target = arvados_api_client.unpack_api_response(res)
+
+    # The merge API merges the "current" user (user_b) into the user
+    # represented by "new_user_token" (user_a).
+    # After merging, the user_b redirects to user_a.
+    res = arvados_api_client.api self, '/merge', {:new_user_token => user_a,
+                                                  :new_owner_uuid => target[:uuid],
+                                                  :redirect_to_new_user => true},
+                                 {:arvados_api_token => user_b}, false
+    arvados_api_client.unpack_api_response(res)
+  end
+
+  def self.system
+    @@arvados_system_user ||= begin
+                                res = arvados_api_client.api self, '/system'
+                                arvados_api_client.unpack_api_response(res)
+                              end
+  end
+
+  def full_name
+    (self.first_name || "") + " " + (self.last_name || "")
+  end
+
+  def activate
+    self.private_reload(arvados_api_client.api(self.class,
+                                               "/#{self.uuid}/activate",
+                                               {}))
+  end
+
+  def contents params={}
+    Group.contents params.merge(uuid: self.uuid)
+  end
+
+  def attributes_for_display
+    super.reject { |k,v| %w(owner_uuid default_owner_uuid identity_url prefs).index k }
+  end
+
+  def attribute_editable?(attr, ever=nil)
+    (ever or not (self.uuid.andand.match(/000000000000000$/) and
+                  self.is_admin)) and super
+  end
+
+  def friendly_link_name lookup=nil
+    [self.first_name, self.last_name].compact.join ' '
+  end
+
+  def unsetup
+    self.private_reload(arvados_api_client.api(self.class,
+                                               "/#{self.uuid}/unsetup",
+                                               {}))
+  end
+
+  def self.setup params
+    arvados_api_client.api(self, "/setup", params)
+  end
+
+  def update_profile params
+    self.private_reload(arvados_api_client.api(self.class,
+                                               "/#{self.uuid}/profile",
+                                               params))
+  end
+
+  def deletable?
+    false
+  end
+
+   def self.creatable?
+    current_user and current_user.is_admin
+   end
+end
diff --git a/apps/workbench/app/models/user_agreement.rb b/apps/workbench/app/models/user_agreement.rb
new file mode 100644 (file)
index 0000000..fbba426
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UserAgreement < ArvadosBase
+  def self.signatures
+    res = arvados_api_client.api self, '/signatures'
+    arvados_api_client.unpack_api_response(res)
+  end
+  def self.sign(params)
+    res = arvados_api_client.api self, '/sign', params
+    arvados_api_client.unpack_api_response(res)
+  end
+end
diff --git a/apps/workbench/app/models/virtual_machine.rb b/apps/workbench/app/models/virtual_machine.rb
new file mode 100644 (file)
index 0000000..a81d76f
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class VirtualMachine < ArvadosBase
+  attr_accessor :current_user_logins
+
+  def self.creatable?
+    false
+  end
+
+  def attributes_for_display
+    super.append ['current_user_logins', @current_user_logins]
+  end
+
+  def editable_attributes
+    super - %w(current_user_logins)
+  end
+
+  def self.attribute_info
+    merger = ->(k,a,b) { a.merge(b, &merger) }
+    merger [nil,
+            {current_user_logins: {column_heading: "logins", type: 'array'}},
+            super]
+  end
+
+  def friendly_link_name lookup=nil
+    (hostname && !hostname.empty?) ? hostname : uuid
+  end
+end
diff --git a/apps/workbench/app/models/work_unit.rb b/apps/workbench/app/models/work_unit.rb
new file mode 100644 (file)
index 0000000..493dd2f
--- /dev/null
@@ -0,0 +1,218 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WorkUnit
+  # This is an abstract class that documents the WorkUnit interface
+
+  def label
+    # returns the label that was assigned when creating the work unit
+  end
+
+  def uuid
+    # returns the arvados UUID of the underlying object
+  end
+
+  def parent
+    # returns the parent uuid of this work unit
+  end
+
+  def children
+    # returns an array of child work units
+  end
+
+  def modified_by_user_uuid
+    # returns uuid of the user who modified this work unit most recently
+  end
+
+  def owner_uuid
+    # returns uuid of the owner of this work unit
+  end
+
+  def created_at
+    # returns created_at timestamp
+  end
+
+  def modified_at
+    # returns modified_at timestamp
+  end
+
+  def started_at
+    # returns started_at timestamp for this work unit
+  end
+
+  def finished_at
+    # returns finished_at timestamp
+  end
+
+  def state_label
+    # returns a string representing state of the work unit
+  end
+
+  def exit_code
+    # returns the work unit's execution exit code
+  end
+
+  def state_bootstrap_class
+    # returns a class like "danger", "success", or "warning" that a view can use directly to make a display class
+  end
+
+  def success?
+    # returns true if the work unit finished successfully,
+    # false if it has a permanent failure,
+    # and nil if the final state is not determined.
+  end
+
+  def progress
+    # returns a number between 0 and 1
+  end
+
+  def log_collection
+    # returns uuid or pdh with saved log data, if any
+  end
+
+  def parameters
+    # returns work unit parameters, if any
+  end
+
+  def script
+    # returns script for this work unit, if any
+  end
+
+  def repository
+    # returns this work unit's script repository, if any
+  end
+
+  def script_version
+    # returns this work unit's script_version, if any
+  end
+
+  def supplied_script_version
+    # returns this work unit's supplied_script_version, if any
+  end
+
+  def docker_image
+    # returns this work unit's docker_image, if any
+  end
+
+  def runtime_constraints
+    # returns this work unit's runtime_constraints, if any
+  end
+
+  def priority
+    # returns this work unit's priority, if any
+  end
+
+  def nondeterministic
+    # returns if this is nondeterministic
+  end
+
+  def outputs
+    # returns array containing uuid or pdh of output data
+  end
+
+  def child_summary
+    # summary status of any children of this work unit
+  end
+
+  def child_summary_str
+    # textual representation of child summary
+  end
+
+  def can_cancel?
+    # returns true if this work unit can be canceled
+  end
+
+  def confirm_cancellation
+    # returns true if this work unit wants to use a confirmation for cancellation
+  end
+
+  def uri
+    # returns the uri for this work unit
+  end
+
+  def title
+    # title for the work unit
+  end
+
+  def has_unreadable_children
+    # accept it if you can't understand your own children
+  end
+
+  # view helper methods
+  def walltime
+    # return walltime for a running or completed work unit
+  end
+
+  def cputime
+    # return cputime for a running or completed work unit
+  end
+
+  def queuedtime
+    # return queued time if the work unit is queued
+  end
+
+  def is_running?
+    # is the work unit in running state?
+  end
+
+  def is_paused?
+    # is the work unit in paused state?
+  end
+
+  def is_finished?
+    # is the work unit in finished state?
+  end
+
+  def is_failed?
+    # is this work unit in failed state?
+  end
+
+  def command
+    # command to execute
+  end
+
+  def cwd
+    # initial workind directory
+  end
+
+  def environment
+    # environment variables
+  end
+
+  def mounts
+    # mounts
+  end
+
+  def output_path
+    # path to a directory or file to save output
+  end
+
+  def container_uuid
+    # container_uuid of a container_request
+  end
+
+  def requesting_container_uuid
+    # requesting_container_uuid of a container_request
+  end
+
+  def log_object_uuids
+    # object uuids for live log
+  end
+
+  def live_log_lines(limit)
+    # fetch log entries from logs table for @proxied
+  end
+
+  def render_log
+    # return partial and locals to be rendered
+  end
+
+  def template_uuid
+    # return the uuid of this work unit's template, if one exists
+  end
+
+  def runtime_status
+    # Returns this work unit's runtime_status, if any
+  end
+end
diff --git a/apps/workbench/app/models/workflow.rb b/apps/workbench/app/models/workflow.rb
new file mode 100644 (file)
index 0000000..31d433e
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Workflow < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+end
diff --git a/apps/workbench/app/views/api_client_authorizations/_show_help.html.erb b/apps/workbench/app/views/api_client_authorizations/_show_help.html.erb
new file mode 100644 (file)
index 0000000..0118390
--- /dev/null
@@ -0,0 +1,18 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<pre>
+### Pasting the following lines at a shell prompt will allow Arvados SDKs
+### to authenticate to your account, <%= current_user.email %>
+
+read ARVADOS_API_TOKEN &lt;&lt;EOF
+<%= Thread.current[:arvados_api_token] %>
+EOF
+export ARVADOS_API_TOKEN ARVADOS_API_HOST=<%= current_api_host %>
+<% if Rails.configuration.arvados_insecure_https %>
+export ARVADOS_API_HOST_INSECURE=true
+<% else %>
+unset ARVADOS_API_HOST_INSECURE
+<% end %>
+</pre>
diff --git a/apps/workbench/app/views/application/404.html.erb b/apps/workbench/app/views/application/404.html.erb
new file mode 100644 (file)
index 0000000..61cbd67
--- /dev/null
@@ -0,0 +1,107 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+   if (controller.andand.action_name == 'show') and params[:uuid]
+     check_trash = controller.model_class.include_trash(true).where(uuid: params[:uuid])
+     class_name = controller.model_class.to_s.underscore
+     class_name_h = class_name.humanize(capitalize: false)
+     req_item = safe_join([class_name_h, " with UUID ",
+                             raw("<code>"), params[:uuid], raw("</code>")], "")
+     req_item_plain_text = safe_join([class_name_h, " with UUID ", params[:uuid]])
+   else
+     req_item = "page you requested"
+     req_item_plain_text = "page you requested"
+   end
+%>
+
+  <% untrash_object = nil %>
+
+  <% if check_trash.andand.any? %>
+    <% object = check_trash.first %>
+    <% if object.respond_to?(:is_trashed) && object.is_trashed %>
+      <% untrash_object = object %>
+    <% else %>
+      <% owner = object %>
+      <% while true %>
+        <% owner = Group.where(uuid: owner.owner_uuid).include_trash(true).first %>
+        <% if owner.nil? %>
+          <% break %>
+        <% end %>
+        <% if owner.is_trashed %>
+          <% untrash_object = owner %>
+          <% break %>
+        <% end %>
+      <% end %>
+    <% end %>
+  <% end %>
+
+  <% if !untrash_object.nil? %>
+    <h2>Trashed</h2>
+
+      <% untrash_name = if !untrash_object.name.blank? then
+                 "'#{untrash_object.name}'"
+                 else
+                 untrash_object.uuid
+               end %>
+
+    <p>The <%= req_item %> is
+      <% if untrash_object == object %>
+        in the trash.
+      <% else %>
+        owned by trashed project <%= untrash_name %> (<code><%= untrash_object.uuid %></code>).
+      <% end %>
+    </p>
+
+    <p>
+      It will be permanently deleted at <%= render_localized_date(untrash_object.delete_at) %>.
+    </p>
+
+  <p>
+    <% if untrash_object != object %>
+      You must untrash the owner project to access this <%= class_name_h %>.
+    <% end %>
+      <% if untrash_object.is_trashed and untrash_object.editable? %>
+        <% msg = "Untrash '#{untrash_name}'?" %>
+        <%= link_to({action: 'untrash_items', selection: [untrash_object.uuid], controller: :trash_items}, remote: true, method: :post,
+        title: "Untrash", style: 'cursor: pointer;') do %>
+
+        <% end %>
+
+        <%= form_tag url_for({action: 'untrash_items', controller: :trash_items}), {method: :post} %>
+        <%= hidden_field_tag :selection, [untrash_object.uuid] %>
+        <button type="submit">Click here to untrash <%= untrash_name %> <i class="fa fa-fw fa-recycle"></i></button>
+      <% end %>
+    </p>
+
+  <% else %>
+
+<h2>Not Found</h2>
+
+<p>The <%= req_item %> was not found.</p>
+
+<% if !current_user %>
+
+  <p>
+    <%= link_to(arvados_api_client.arvados_login_url(return_to: strip_token_from_path(request.url)),
+                {class: "btn btn-primary report-issue-modal-window"}) do %>
+      <i class="fa fa-fw fa-sign-in"></i> Log in
+    <% end %>
+    to view private data.
+  </p>
+
+<% elsif class_name %>
+
+  <p>
+    Perhaps you'd like to <%= link_to("browse all
+    #{class_name_h.pluralize}", action: :index, controller:
+    class_name.tableize) %>?
+  </p>
+
+<% end %>
+
+<% end %>
+
+<% error_message = "The #{req_item_plain_text} was not found." %>
+<%= render :partial => "report_error", :locals => {error_message: error_message, error_type: '404'} %>
diff --git a/apps/workbench/app/views/application/404.json.erb b/apps/workbench/app/views/application/404.json.erb
new file mode 100644 (file)
index 0000000..a697490
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
diff --git a/apps/workbench/app/views/application/_arvados_attr_value.html.erb b/apps/workbench/app/views/application/_arvados_attr_value.html.erb
new file mode 100644 (file)
index 0000000..98732dc
--- /dev/null
@@ -0,0 +1,26 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if attrvalue.is_a? Array and attrvalue.collect(&:class).uniq.compact == [String] %>
+  <% attrvalue.each do |message| %>
+    <%= message %><br />
+  <% end %>
+<% else %>
+      <% if attr and obj.attribute_editable?(attr) and (!defined?(editable) || editable) %>
+        <% if resource_class_for_uuid(attrvalue, {referring_object: obj, referring_attr: attr}) %>
+          <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: true, friendly_name: true} %>
+          <br>
+        <% end %>
+        <%= render_editable_attribute obj, attr %>
+      <% elsif attr == 'uuid' %>
+        <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: false, friendly_name: false} %>
+      <% else %>
+        <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: true, friendly_name: true, thumbnail: true} %>
+      <% end %>
+      <!--
+      <% if resource_class_for_uuid(attrvalue, {referring_object: obj, referring_attr: attr}) %>
+        <%= link_to_if_arvados_object(attrvalue, { referring_object: obj, link_text: raw('<span class="glyphicon glyphicon-hand-right"></span>'), referring_attr: attr })  %>
+      <% end %>
+      -->
+<% end %>
diff --git a/apps/workbench/app/views/application/_arvados_object.html.erb b/apps/workbench/app/views/application/_arvados_object.html.erb
new file mode 100644 (file)
index 0000000..6d59e0e
--- /dev/null
@@ -0,0 +1,40 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :arvados_object_table do %>
+
+<% end %>
+
+<% if content_for? :page_content %>
+<%= yield :page_content %>
+<% else %>
+<%= yield :arvados_object_table %>
+<% end %>
+
+<div>
+  <ul class="nav nav-tabs">
+    <% if content_for? :page_content %>
+    <li><a href="#arvados-object-table" data-toggle="tab">Table</a></li>
+    <% end %>
+    <li class="active"><a href="#arvados-object-json" data-toggle="tab">API response JSON</a></li>
+    <% if @object.andand.uuid %>
+    <li><a href="#arvados-object-curl" data-toggle="tab">curl update example</a></li>
+    <li><a href="#arvados-object-arv" data-toggle="tab">&ldquo;arv&rdquo; CLI examples</a></li>
+    <li><a href="#arvados-object-python" data-toggle="tab">Python example</a></li>
+    <% end %>
+  </ul>
+
+  <div class="tab-content">
+    <% if content_for? :page_content %>
+    <div id="arvados-object-table" class="tab-pane fade">
+      <%= yield :arvados_object_table %>
+    </div>
+    <% end %>
+    <div id="arvados-object-json" class="tab-pane fade in active">
+
+    </div>
+
+
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_arvados_object_attr.html.erb b/apps/workbench/app/views/application/_arvados_object_attr.html.erb
new file mode 100644 (file)
index 0000000..9b9c39f
--- /dev/null
@@ -0,0 +1,21 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% object ||= @object %>
+<% if attrvalue.is_a? Hash then attrvalue.each do |infokey, infocontent| %>
+<tr class="info">
+  <td><%= attr %>[<%= infokey %>]</td>
+  <td>
+    <%= render partial: 'application/arvados_attr_value', locals: { obj: object, attr: nil, attrvalue: infocontent } %>
+  </td>
+</tr>
+<% end %>
+<% elsif attrvalue.is_a? String or attrvalue.respond_to? :to_s %>
+<tr class="<%= 'info' if %w(uuid owner_uuid created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at).include?(attr.to_s) %>">
+  <td><%= attr %></td>
+  <td>
+    <%= render partial: 'application/arvados_attr_value', locals: { obj: object, attr: attr, attrvalue: attrvalue } %>
+  </td>
+</tr>
+<% end %>
diff --git a/apps/workbench/app/views/application/_breadcrumb_page_name.html.erb b/apps/workbench/app/views/application/_breadcrumb_page_name.html.erb
new file mode 100644 (file)
index 0000000..0ff635b
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+
diff --git a/apps/workbench/app/views/application/_breadcrumbs.html.erb b/apps/workbench/app/views/application/_breadcrumbs.html.erb
new file mode 100644 (file)
index 0000000..fb4a146
--- /dev/null
@@ -0,0 +1,80 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+      <nav class="navbar navbar-default breadcrumbs" role="navigation">
+        <ul class="nav navbar-nav navbar-left">
+          <li>
+            <a href="/">
+              <i class="fa fa-lg fa-fw fa-dashboard"></i>
+              Dashboard
+            </a>
+          </li>
+          <li class="dropdown">
+            <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="projects-menu">
+              Projects
+              <span class="caret"></span>
+            </a>
+            <ul class="dropdown-menu" style="min-width: 20em" role="menu">
+              <li role="menuitem">
+                  <%= link_to(
+                        url_for(
+                          action: 'choose',
+                          controller: 'search',
+                          filters: [['uuid', 'is_a', 'arvados#group']].to_json,
+                          title: 'Search',
+                          action_name: 'Show',
+                          action_href: url_for(controller: :actions, action: :show),
+                          action_method: 'get',
+                          action_data: {selection_param: 'uuid', success: 'redirect-to-created-object'}.to_json),
+                        { remote: true, method: 'get', title: "Search" }) do %>
+                    <i class="glyphicon fa-fw glyphicon-search"></i> Search all projects ...
+                  <% end %>
+               </li>
+              <% if Rails.configuration.anonymous_user_token and Rails.configuration.enable_public_projects_page %>
+                <li role="menuitem"><a href="/projects/public" role="menuitem"><i class="fa fa-fw fa-list"></i> Browse public projects </a>
+                </li>
+              <% end %>
+              <li role="menuitem">
+                <%= link_to projects_path(options: {ensure_unique_name: true}), role: 'menu-item', method: :post do %>
+                  <i class="fa fa-fw fa-plus"></i> Add a new project
+                <% end %>
+              </li>
+              <li role="presentation" class="divider"></li>
+              <%= render partial: "projects_tree_menu", locals: {
+                  :project_link_to => Proc.new do |pnode, &block|
+                    link_to(project_path(pnode[:object].uuid),
+                      data: { 'object-uuid' => pnode[:object].uuid,
+                              'name' => 'name' },
+                      &block)
+                  end,
+              } %>
+            </ul>
+          </li>
+          <% if @name_link or @object %>
+            <li class="nav-separator">
+              <i class="fa fa-lg fa-angle-double-right"></i>
+            </li>
+            <li>
+              <%= link_to project_path(current_user.uuid) do %>
+                Home
+              <% end %>
+            </li>
+            <% project_breadcrumbs.each do |p| %>
+              <li class="nav-separator">
+                <i class="fa fa-lg fa-angle-double-right"></i>
+              </li>
+              <li>
+                <%= link_to(p.name, project_path(p.uuid), data: {object_uuid: p.uuid, name: 'name'}) %>
+              </li>
+            <% end %>
+          <% end %>
+        </ul>
+        <ul class="nav navbar-nav navbar-right">
+          <li>
+            <a href="/trash">
+              <%= image_tag("trash-icon.png", size: "20x20" ) %> Trash
+            </a>
+          </li>
+        </ul>
+      </nav>
diff --git a/apps/workbench/app/views/application/_browser_unsupported.html b/apps/workbench/app/views/application/_browser_unsupported.html
new file mode 100644 (file)
index 0000000..5424aba
--- /dev/null
@@ -0,0 +1,28 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!-- googleoff: all -->
+<style type="text/css">
+  #browser-unsupported .alert {
+    margin-left: -100px;
+    margin-right: -100px;
+    padding-left: 120px;
+    padding-right: 120px;
+  }
+</style>
+<div id="browser-unsupported" class="hidden">
+  <div class="alert alert-danger">
+    <p>
+      <b>Hey!</b> Your web browser is missing some of the features we
+      rely on.  Usually this means you are running an old version.
+      Updating your system, or switching to a current version
+      of <a class="alert-link"
+      href="//google.com/search?q=download+Mozilla+Firefox">Firefox</a>
+      or <a class="alert-link"
+      href="//google.com/search?q=download+Google+Chrome">Chrome</a>,
+      should fix this.
+    </p>
+  </div>
+</div>
+<!-- googleon: all -->
diff --git a/apps/workbench/app/views/application/_choose.html.erb b/apps/workbench/app/views/application/_choose.html.erb
new file mode 100644 (file)
index 0000000..e3e2708
--- /dev/null
@@ -0,0 +1,93 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="modal arv-choose modal-with-loading-spinner">
+  <div class="modal-dialog" style="width:80%">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <h4 class="modal-title"><%= params[:title] || "Choose #{@objects.resource_class.andand.class_for_display}" %></h4>
+      </div>
+
+      <div class="modal-body">
+        <% if params[:message].present? %>
+          <p> <%= params[:message] %> </p>
+        <% end %>
+
+        <% project_filters, chooser_filters = (params[:filters] || []).partition do |attr, op, val|
+             attr == "owner_uuid" and op == "="
+           end %>
+        <div class="input-group">
+          <% if params[:by_project].to_s != "false" %>
+            <% if project_filters.empty?
+                 selected_project_name = 'All projects'
+               else
+                 val = project_filters.last.last
+                 if val == current_user.uuid
+                   selected_project_name = "Home"
+                 else
+                   selected_project_name = Group.find(val).name rescue val
+                 end
+               end
+               %>
+            <div class="input-group-btn" data-filterable-target=".modal.arv-choose .selectable-container">
+              <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
+                <%= selected_project_name %> <span class="caret"></span>
+              </button>
+              <ul class="dropdown-menu" role="menu">
+                <li>
+                  <%= link_to '#', class: 'chooser-show-project' do %>
+                    All projects
+                  <% end %>
+                </li>
+                <li class="divider" />
+                <%= render partial: "projects_tree_menu", locals: {
+                      :project_link_to => Proc.new do |pnode, &block|
+                        link_to "#", {
+                          class: "chooser-show-project",
+                          data: {'project_uuid' => pnode[:object].uuid},
+                        }, &block
+                      end,
+                      :top_button => nil
+                    } %>
+              </ul>
+            </div>
+          <% end %>
+          <input type="text" value="<%=params[:preconfigured_search_str] || ''%>" class="form-control filterable-control focus-on-display" placeholder="Search" data-filterable-target=".modal.arv-choose .selectable-container"/>
+        </div>
+        <div style="height: 1em" />
+
+        <% preview_pane = (params[:preview_pane].to_s != "false") %>
+        <div class="row" style="height: 20em">
+          <div class="<%= 'col-sm-6' if preview_pane %> col-xs-12 arv-filterable-list selectable-container <%= 'multiple' if multiple %>"
+               style="height: 100%; overflow-y: scroll"
+               data-infinite-scroller="#choose-scroll"
+               id="choose-scroll"
+               data-infinite-content-params-from-chooser="<%= {filters: chooser_filters}.to_json %>"
+               <% if project_filters.any? %>
+                 data-infinite-content-params-from-project-dropdown="<%= {filters: project_filters, project_uuid: project_filters.last.last}.to_json %>"
+               <% end %>
+               <%
+                  action_data = JSON.parse params['action_data'] if params['action_data']
+                  use_preview_sel = action_data ? action_data['use_preview_selection'] : false
+                %>
+               data-infinite-content-href="<%= url_for partial: true,
+                                                       use_preview_selection: use_preview_sel %>">
+          </div>
+          <% if preview_pane %>
+            <div class="col-sm-6 col-xs-12 modal-dialog-preview-pane" style="height: 100%; overflow-y: scroll">
+            </div>
+          <% end %>
+        </div>
+
+        <div class="modal-footer">
+          <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+          <button class="btn btn-primary" aria-hidden="true" data-enable-if-selection disabled><%= raw(params[:action_name]) || 'Select' %></button>
+          <div class="modal-error hide" style="text-align: left; margin-top: 1em;">
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_choose.js.erb b/apps/workbench/app/views/application/_choose.js.erb
new file mode 100644 (file)
index 0000000..9638028
--- /dev/null
@@ -0,0 +1,31 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+=begin
+
+Parameters received from the caller/requestor of the modal are
+attached to the action button (.btn-primary) as follows:
+
+action_class -- string -- added as a pseudoclass to the action button.
+
+action_href -- string -- will be available at $(btn).attr('data-action-href')
+
+action_data -- json-encoded object -- will be at $(btn).data('action-data')
+
+action_data_form_params -- array -- for each X in this array, the
+value of params[X] during this "show chooser" request will be in
+$(btn).data('action-data-from-params')[X].
+
+=end
+%>
+
+$('body > .modal-container').html("<%= escape_javascript(render partial: 'choose.html', locals: {multiple: multiple}) %>");
+$('body > .modal-container .modal').modal('show');
+$('body > .modal-container .modal .modal-footer .btn-primary').
+    addClass('<%= j params[:action_class] %>').
+    attr('data-action-href', '<%= j params[:action_href] %>').
+    attr('data-method', '<%= j params[:action_method] %>').
+    data('action-data', <%= raw params[:action_data] %>).
+    data('action-data-from-params', <%= raw params.select { |k,v| k.in?(params[:action_data_from_params] || []) }.to_json %>);
diff --git a/apps/workbench/app/views/application/_choose_rows.html.erb b/apps/workbench/app/views/application/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..371398d
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= url_for object %>?tab_pane=chooser_preview">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw fa-gear"></i>
+      <%= object.name %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/application/_content.html.erb b/apps/workbench/app/views/application/_content.html.erb
new file mode 100644 (file)
index 0000000..7f35420
--- /dev/null
@@ -0,0 +1,73 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :tab_panes do %>
+
+  <% comparable = controller.respond_to? :compare %>
+
+  <ul class="nav nav-tabs" data-tab-counts-url="<%= url_for(action: :tab_counts) rescue '' %>">
+    <% pane_list.each_with_index do |pane, i| %>
+      <% pane_name = (pane.is_a?(Hash) ? pane[:name] : pane) %>
+
+      <% data_toggle = "tab" %>
+      <% tab_tooltip = "" %>
+      <% link_disabled = "" %>
+
+      <% if (pane_name == "Log") and !(ArvadosBase.find(@object.owner_uuid).writable_by.include?(current_user.andand.uuid) rescue nil)
+          if controller.model_class.to_s == 'Job'
+            if @object.log and !@object.log.empty?
+              logCollection = Collection.find? @object.log
+              if !logCollection
+                data_toggle = "disabled"
+                tab_tooltip = "Log data is not available"
+                link_disabled = "disabled"
+              end
+            end
+          elsif (controller.model_class.to_s == 'PipelineInstance' and
+                 !@object.has_readable_logs?)
+            data_toggle = "disabled"
+            tab_tooltip = "Log data is not available"
+            link_disabled = "disabled"
+          end
+        end
+      %>
+
+      <li class="<%= 'active' if i==0 %> <%= link_disabled %> tab-pane-<%=pane_name%>" data-toggle="tooltip" data-placement="top" title="<%=tab_tooltip%>">
+        <a href="#<%= pane_name %>"
+           id="<%= pane_name %>-tab"
+           data-toggle="<%= data_toggle %>"
+           data-tab-history=true
+           data-tab-history-update-url=true
+           >
+          <%= pane_name.gsub('_', ' ') %> <span id="<%= pane_name %>-count"></span>
+        </a>
+      </li>
+    <% end %>
+  </ul>
+
+  <div class="tab-content">
+    <% pane_list.each_with_index do |pane, i| %>
+      <% pane_name = (pane.is_a?(Hash) ? pane[:name] : pane) %>
+      <div id="<%= pane_name %>"
+           class="tab-pane fade <%= 'in active pane-loaded' if i==0 %> arv-log-event-listener arv-refresh-on-log-event arv-log-event-subscribe-to-pipeline-job-uuids"
+           <% if controller.action_name == "index" %>
+             data-object-kind="arvados#<%= ArvadosApiClient.class_kind controller.model_class %>"
+           <% else %>
+             data-object-uuid="<%= @object.uuid %>"
+           <% end %>
+           data-pane-content-url="<%= url_for(params.merge(tab_pane: pane_name)) %>"
+           style="margin-top:0.5em;"
+           >
+        <div class="pane-content">
+          <% if i == 0 %>
+            <%= render_pane pane_name, to_string: true %>
+          <% else %>
+            <div class="spinner spinner-32px spinner-h-center"></div>
+          <% end %>
+        </div>
+      </div>
+    <% end %>
+  </div>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_content_layout.html.erb b/apps/workbench/app/views/application/_content_layout.html.erb
new file mode 100644 (file)
index 0000000..4aff081
--- /dev/null
@@ -0,0 +1,14 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="clearfix">
+  <%= content_for :content_top %>
+  <div class="pull-right">
+    <%= content_for :tab_line_buttons %>
+  </div>
+</div>
+
+<%= content_for :tab_panes %>
+
+<%= render :partial => 'loading_modal' %>
diff --git a/apps/workbench/app/views/application/_create_new_object_button.html.erb b/apps/workbench/app/views/application/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..19377ae
--- /dev/null
@@ -0,0 +1,11 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div style="display:inline-block">
+  <%= button_to({action: 'create'}, {class: 'btn btn-sm btn-primary'}) do %>
+    <i class="fa fa-fw fa-plus"></i>
+    Add a new
+    <%= controller.controller_name.singularize.humanize.downcase %>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/application/_delete_object_button.html.erb b/apps/workbench/app/views/application/_delete_object_button.html.erb
new file mode 100644 (file)
index 0000000..4db3aea
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if object.deletable? %>
+  <%= link_to({controller: object.class.table_name, action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "Really delete #{object.class_for_display.downcase} '#{object.friendly_link_name}'?"}) do %>
+    <i class="glyphicon glyphicon-trash"></i>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_extra_tab_line_buttons.html.erb b/apps/workbench/app/views/application/_extra_tab_line_buttons.html.erb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/views/application/_index.html.erb b/apps/workbench/app/views/application/_index.html.erb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/views/application/_job_progress.html.erb b/apps/workbench/app/views/application/_job_progress.html.erb
new file mode 100644 (file)
index 0000000..9f5ce55
--- /dev/null
@@ -0,0 +1,55 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if (j.andand[:state] == "Running" or defined? scaleby) and (not defined? show_progress_bar or show_progress_bar) %>
+  <%
+    failed = j[:tasks_summary][:failed] || 0 rescue 0
+    done = j[:tasks_summary][:done] || 0 rescue 0
+    running = j[:tasks_summary][:running] || 0 rescue 0
+    todo = j[:tasks_summary][:todo] || 0 rescue 0
+
+    if done + running + failed + todo == 0
+      # No tasks were ever created for this job;
+      # render an empty progress bar.
+      done_percent = 0
+    else
+      percent_total_tasks = 100.0 / (done + running + failed + todo)
+      if defined? scaleby
+        percent_total_tasks *= scaleby
+      end
+      done_percent = (done+failed) * percent_total_tasks
+    end
+    %>
+
+  <% if not defined? scaleby %>
+    <div class="progress" style="margin-bottom: 0px">
+  <% end %>
+
+  <span class="progress-bar <%= if failed == 0 then 'progress-bar-success' else 'progress-bar-warning' end %>" style="width: <%= done_percent %>%;">
+  </span>
+
+  <% if not defined? scaleby %>
+  </div>
+  <% end %>
+
+<% else %>
+
+<% to_label = {
+     "Cancelled" => "danger",
+     "Complete" => "success",
+     "Running" => "info",
+     "Failed" => "danger",
+     "Queued" => "default",
+     nil => "default"
+   } %>
+
+  <span class="label label-<%= to_label[j.andand[:state]] %>">
+    <%= if defined? title
+          title
+        else
+          if j.andand[:state] then j[:state].downcase else "Not ready" end
+        end
+        %></span>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_loading.html.erb b/apps/workbench/app/views/application/_loading.html.erb
new file mode 100644 (file)
index 0000000..6936efd
--- /dev/null
@@ -0,0 +1,194 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="socket">
+  <div class="gel center-gel">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c1 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c2 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c3 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c4 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c5 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c6 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  
+  <div class="gel c7 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  
+  <div class="gel c8 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c9 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c10 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c11 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c12 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c13 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c14 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c15 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c16 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c17 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c18 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c19 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c20 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c21 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c22 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c23 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c24 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c25 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c26 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c28 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c29 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c30 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c31 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c32 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c33 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c34 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c35 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c36 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c37 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  
+</div>
diff --git a/apps/workbench/app/views/application/_loading_modal.html.erb b/apps/workbench/app/views/application/_loading_modal.html.erb
new file mode 100644 (file)
index 0000000..7d88d14
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div id="loading-modal" class="modal fade">
+  <div class="modal-dialog">
+       <div class="modal-content">
+         <div class="modal-header">
+           <h3>Refreshing...</h3>
+         </div>
+         <div class="modal-body">
+           <p>Content may have changed.</p>
+         </div>
+       </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_name_and_description.html.erb b/apps/workbench/app/views/application/_name_and_description.html.erb
new file mode 100644 (file)
index 0000000..8d6f10b
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'object_name' %>
+<%= render partial: 'object_description' %>
diff --git a/apps/workbench/app/views/application/_object_description.html.erb b/apps/workbench/app/views/application/_object_description.html.erb
new file mode 100644 (file)
index 0000000..1dbc11d
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.respond_to? :description %>
+  <div class="arv-description-as-subtitle">
+    <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/application/_object_name.html.erb b/apps/workbench/app/views/application/_object_name.html.erb
new file mode 100644 (file)
index 0000000..2bb456c
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.respond_to? :name %>
+  <h2>
+    <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => "New #{controller.model_class.to_s.underscore.gsub("_"," ")}" } %>
+  </h2>
+<% end %>
diff --git a/apps/workbench/app/views/application/_paging.html.erb b/apps/workbench/app/views/application/_paging.html.erb
new file mode 100644 (file)
index 0000000..abd6ecb
--- /dev/null
@@ -0,0 +1,132 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :css do %>
+.index-paging {
+text-align: center;
+padding-left: 1em;
+padding-right: 1em;
+background-color: whitesmoke;
+}
+.paging-number {
+display: inline-block;
+min-width: 1.2em;
+}
+<% end %>
+
+<% results.fetch_multiple_pages(false) %>
+
+<% if results.respond_to? :result_offset and
+       results.respond_to? :result_limit and
+       results.respond_to? :items_available and
+       results.result_offset != nil and
+       results.result_limit != nil and
+       results.items_available != nil
+%>
+<div class="index-paging">
+  Displaying <%= results.result_offset+1 %> &ndash;
+  <%= if results.result_offset + results.result_limit > results.items_available
+        results.items_available
+      else
+        results.result_offset + results.result_limit
+      end %>
+ out of <%= results.items_available %>
+</div>
+
+<% if not (results.result_offset == 0 and results.items_available <= results.result_limit) %>
+
+<div class="index-paging">
+
+<% if results.result_offset > 0 %>
+  <% if results.result_offset > results.result_limit %>
+    <% prev_offset = results.result_offset - results.result_limit %>
+  <% else %>
+    <% prev_offset = 0 %>
+  <% end %>
+<% else %>
+  <% prev_offset = nil %>
+<% end %>
+
+<% this_offset = results.result_offset %>
+
+<% if (results.result_offset + results.result_limit) < results.items_available %>
+  <% next_offset = results.result_offset + results.result_limit %>
+<% else %>
+  <% next_offset = nil %>
+<% end %>
+
+<span class="pull-left">
+<% if results.result_offset > 0 %>
+  <%= link_to raw("<span class='glyphicon glyphicon-fast-backward'></span>"), {:id => object, :offset => 0, :limit => results.result_limit}  %>
+<% else %>
+  <span class='glyphicon glyphicon-fast-backward text-muted'></span>
+<% end %>
+
+<% if prev_offset %>
+  <%= link_to raw("<span class='glyphicon glyphicon-step-backward'></span>"), {:id => object, :offset => prev_offset, :limit => results.result_limit}  %>
+<% else %>
+<span class='glyphicon glyphicon-step-backward text-muted'></span>
+<% end %>
+</span>
+
+<% first = this_offset - (10 * results.result_limit) %>
+<% last = this_offset + (11 * results.result_limit) %>
+
+<% lastpage_offset = (results.items_available / results.result_limit) * results.result_limit %>
+
+<% if last > results.items_available %>
+  <% first -= (last - lastpage_offset) %>
+  <% last -= (last - results.items_available) %>
+<% end %>
+
+<% if first < 0 %>
+  <% d = -first %>
+  <% first += d %>
+  <% last += d %>
+<% end %>
+
+<% last = results.items_available if last > results.items_available %>
+
+<% i = first %>
+<% n = first / results.result_limit %>
+
+<% if first > 0 %>
+&hellip;
+<% end %>
+
+<% while i < last %>
+<% if i != this_offset %>
+  <%= link_to "#{n+1}", {:id => @object, :offset => i, :limit => results.result_limit}, class: 'paging-number' %>
+<% else %>
+  <span class="paging-number" style="font-weight: bold;"><%= n+1 %></span>
+<% end %>
+<% i += results.result_limit %>
+<% n += 1 %>
+<% end %>
+
+<% if last < results.items_available %>
+&hellip;
+<% end %>
+
+<span class="pull-right">
+<% if next_offset %>
+  <%= link_to raw("<span class='glyphicon glyphicon-step-forward'></span>"), {:id => @object, :offset => next_offset, :limit => results.result_limit}  %>
+<% else %>
+<span class='glyphicon glyphicon-forward text-muted'></span>
+<% end %>
+
+<% if (results.items_available - results.result_offset) >= results.result_limit %>
+  <%= link_to raw("<span class='glyphicon glyphicon-fast-forward'></span>"), {:id => @object, :offset => results.items_available - (results.items_available % results.result_limit),
+        :limit => results.result_limit}  %>
+<% else %>
+  <span class='glyphicon glyphicon-fast-forward text-muted'></span>
+<% end %>
+
+</span>
+
+</div>
+
+<% end %>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_pipeline_progress.html.erb b/apps/workbench/app/views/application/_pipeline_progress.html.erb
new file mode 100644 (file)
index 0000000..7ea2e68
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% component_frac = 1.0 / p.components.length %>
+<div class="progress">
+  <% p.components.each do |k,c| %>
+    <% if c.is_a?(Hash) and c[:job] %>
+      <%= render partial: "job_progress", locals: {:j => c[:job], :scaleby => component_frac } %>
+    <% end %>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/application/_pipeline_status_label.html.erb b/apps/workbench/app/views/application/_pipeline_status_label.html.erb
new file mode 100644 (file)
index 0000000..c057751
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if p.state == 'Complete' %>
+  <span class="label label-success">complete</span>
+<% elsif p.state == 'Failed' %>
+  <span class="label label-danger">failed</span>
+<% elsif p.state == 'RunningOnServer' || p.state == 'RunningOnClient' %>
+  <span class="label label-info">running</span>
+<% elsif p.state == 'Paused'  %>
+  <span class="label label-default">paused</span>
+<% else %>
+  <% if not p.components.values.any? { |c| c[:job] rescue false } %>
+    <span class="label label-default">not started</span>
+  <% else %>
+    <span class="label label-default">not running</span>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_projects_tree_menu.html.erb b/apps/workbench/app/views/application/_projects_tree_menu.html.erb
new file mode 100644 (file)
index 0000000..08d3b81
--- /dev/null
@@ -0,0 +1,50 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% starred_projects = my_starred_projects current_user%>
+<% if starred_projects.andand.any? %>
+  <li role="presentation" class="dropdown-header">
+    My favorite projects
+  </li>
+  <li>
+    <%= project_link_to.call({object: current_user, depth: 0}) do %>
+      <span style="padding-left: 0">Home</span>
+    <% end %>
+  </li>
+  <% (starred_projects).each do |pnode| %>
+    <li>
+      <%= project_link_to.call({object: pnode, depth: 0}) do%>
+        <span style="padding-left: 0em"></span><%= pnode[:name] %>
+      <% end %>
+    </li>
+  <% end %>
+  <li role="presentation" class="divider"></li>
+<% end %>
+
+<li role="presentation" class="dropdown-header">
+  My projects
+</li>
+<li>
+  <%= project_link_to.call({object: current_user, depth: 0}) do %>
+    <span style="padding-left: 0">Home</span>
+  <% end %>
+</li>
+<% my_tree = my_wanted_projects_tree current_user %>
+<% my_tree[0].each do |pnode| %>
+  <% next if pnode[:object].class != Group %>
+  <li>
+    <%= project_link_to.call pnode do %>
+      <span style="padding-left: <%= pnode[:depth] %>em"></span><%= pnode[:object].name %>
+    <% end %>
+  </li>
+<% end %>
+<% if my_tree[1] or my_tree[0].size > 200 %>
+<li role="presentation" class="dropdown-header">
+  Some projects have been omitted.
+</li>
+<% elsif my_tree[2] %>
+<li role="presentation" class="dropdown-header">
+  Showing top three levels of your projects.
+</li>
+<% end %>
diff --git a/apps/workbench/app/views/application/_report_error.html.erb b/apps/workbench/app/views/application/_report_error.html.erb
new file mode 100644 (file)
index 0000000..ab0fd67
--- /dev/null
@@ -0,0 +1,35 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+   popup_params = {
+     popup_type: 'report',
+     current_location: request.url,
+     current_path: request.fullpath,
+     action_method: 'post',
+   }
+   if error_type == "api"
+     popup_params.merge!(
+       api_error_request_url: api_error.andand.request_url || "",
+       api_error_response: api_error.andand.api_response || "",
+     )
+   else
+     popup_params.merge!(error_message: error_message)
+   end
+%>
+
+<p>
+<%= link_to(report_issue_popup_path(popup_params),
+            {class: 'btn btn-primary report-issue-modal-window', :remote => true, return_to: request.url}) do %>
+  <i class="fa fa-fw fa-support"></i> Report problem
+<% end %>
+
+or
+
+<%= mail_to(Rails.configuration.support_email_address, "email us",
+            subject: "Workbench problem report",
+            body: "Problem while viewing page #{request.url}") %>
+
+if you suspect this is a bug.
+</p>
diff --git a/apps/workbench/app/views/application/_report_issue_popup.html.erb b/apps/workbench/app/views/application/_report_issue_popup.html.erb
new file mode 100644 (file)
index 0000000..8823fdd
--- /dev/null
@@ -0,0 +1,154 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  generated_at = arvados_api_client.discovery[:generatedAt]
+  arvados_base = Rails.configuration.arvados_v1_base
+  support_email = Rails.configuration.support_email_address
+
+  additional_info = {}
+  additional_info['Current location'] = params[:current_location]
+  additional_info['User UUID'] = current_user.uuid if current_user
+
+  additional_info_str = additional_info.map {|k,v| "#{k}=#{v}"}.join("\n")
+
+  additional_info['api_source_version'] = api_source_version
+  additional_info['api_package_version'] = api_package_version
+  additional_info['generated_at'] = generated_at
+  additional_info['workbench_version'] = AppVersion.hash
+  additional_info['workbench_package_version'] = AppVersion.package_version
+  additional_info['arvados_base'] = arvados_base
+  additional_info['support_email'] = support_email
+  additional_info['error_message'] = params[:error_message] if params[:error_message]
+  additional_info['api_error_request_url'] = params[:api_error_request_url] if params[:api_error_request_url]
+  additional_info['api_error_response'] = params[:api_error_response] if params[:api_error_response]
+%>
+
+<div class="modal">
+ <div class="modal-dialog modal-with-loading-spinner">
+  <div class="modal-content">
+
+    <%= form_tag report_issue_path, {name: 'report-issue-form', method: 'post',
+        class: 'form-horizontal'} do %>
+
+      <%
+        title = 'Version / debugging info'
+        title = 'Report a problem' if params[:popup_type] == 'report'
+      %>
+
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-8"> <h4 class="modal-title"><%=title%></h4> </div>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body" style="height: 25em; overflow-y: scroll">
+        <div class="form-group">
+          <label for="support_email" class="col-sm-4 control-label"> Support email </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="support_version"><a href="mailto:<%=support_email%>?subject=Workbench problem report&amp;body=Problem while viewing page <%=params[:current_location]%>"><%=support_email%></a></p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="current_page" class="col-sm-4 control-label"> Current page </label>
+          <div class="col-sm-8">
+            <p class="form-control-static text-overflow-ellipsis" name="current_page"><%=params[:current_path]%></a></p>
+          </div>
+        </div>
+
+        <% if params[:popup_type] == 'report' %>
+          <div class="form-group">
+            <label for="report_text_label" class="col-sm-4 control-label"> Describe the problem </label>
+            <div class="col-sm-8">
+              <textarea class="form-control" rows="4" id="report_issue_text" name="report_issue_text" type="text" placeholder="Describe the problem"/>
+            </div>
+            <input type="hidden" name="report_additional_info" value="<%=additional_info.to_json%>">
+          </div>
+        <% end %>
+
+        <div class="form-group">
+          <label for="wb_version" class="col-sm-4 control-label"> Workbench version </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="wb_version">
+              <%= AppVersion.package_version %> (<%= link_to AppVersion.hash, version_link_target(AppVersion.hash) %>)
+            </p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="server_version" class="col-sm-4 control-label"> API version </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="server_version">
+              <%= api_package_version %> (<%= link_to api_source_version, version_link_target(api_source_version) %>)
+            </p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="generated_at" class="col-sm-4 control-label"> API startup time </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="generated_at"><%=generated_at%></p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="arvados_base" class="col-sm-4 control-label"> API address </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="arvados_base"><%=arvados_base%></p>
+          </div>
+        </div>
+
+        <% if current_user %>
+          <div class="form-group">
+            <label for="user_uuid" class="col-sm-4 control-label"> User UUID </label>
+            <div class="col-sm-8">
+              <p class="form-control-static" name="user_uuid"><%=current_user.uuid%></p>
+            </div>
+          </div>
+        <% end %>
+
+        <% if params[:error_message] %>
+          <div class="form-group">
+            <label for="error_message" class="col-sm-4 control-label"> Error message </label>
+            <div class="col-sm-8">
+              <p class="form-control-static text-overflow-ellipsis" name="error_message"><%=params[:error_message]%></p>
+            </div>
+          </div>
+        <% end %>
+
+        <% if params[:api_error_request_url] %>
+          <div class="form-group">
+            <label for="api_error_url" class="col-sm-4 control-label"> API error request URL </label>
+            <div class="col-sm-8">
+              <p class="form-control-static text-overflow-ellipsis" name="api_error_url"><%=params[:api_error_request_url]%></p>
+            </div>
+          </div>
+        <% end %>
+
+        <% if params[:api_error_response] %>
+          <div class="form-group">
+            <label for="api_error_response" class="col-sm-4 control-label"> API error response </label>
+            <div class="col-sm-8">
+              <p class="form-control-static text-overflow-ellipsis" name="api_error_response"><%=params[:api_error_response]%></p>
+            </div>
+          </div>
+        <% end %>
+      </div>
+
+      <div class="modal-footer">
+        <% if params[:popup_type] == 'report' %>
+          <button class="btn btn-default report-issue-cancel" id="report-issue-cancel" data-dismiss="modal" aria-hidden="true">Cancel</button>
+          <button type="submit" id="report-issue-submit" class="btn btn-primary report-issue-submit" autofocus>Send problem report</button>
+        <% else %>
+          <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Close</button>
+        <% end %>
+      </div>
+    <% end #form %>
+  </div>
+ </div>
+</div>
diff --git a/apps/workbench/app/views/application/_selection_checkbox.html.erb b/apps/workbench/app/views/application/_selection_checkbox.html.erb
new file mode 100644 (file)
index 0000000..af65a6d
--- /dev/null
@@ -0,0 +1,24 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%if object and object.uuid and (object.class.goes_in_projects? or (object.is_a?(Link) and ArvadosBase::resource_class_for_uuid(object.head_uuid).to_s == 'Collection')) %>
+  <% fn = if defined? friendly_name and not friendly_name.nil?
+            friendly_name
+          else
+            link_to_if_arvados_object object, {no_link: true}
+          end
+     %>
+  <% # This 'fn' string may contain embedded HTML which is already marked html_safe.
+     # Since we are putting it into a tag attribute, we need to copy into an
+     # unsafe string so that rails will escape it for us.
+     fn = String.new fn %>
+<%= check_box_tag 'uuids[]', object.uuid, false, {
+      :class => 'persistent-selection',
+      :id => object.uuid,
+      :friendly_type => object.class.name,
+      :friendly_name => fn,
+      :href => "#{url_for controller: object.class.name.tableize, action: 'show', id: object.uuid }",
+      :title => "Click to add this item to your selection list"
+} %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_advanced.html.erb b/apps/workbench/app/views/application/_show_advanced.html.erb
new file mode 100644 (file)
index 0000000..d9423c5
--- /dev/null
@@ -0,0 +1,27 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="panel-group" id="arv-adv-accordion">
+  <% ['API response',
+      'Metadata',
+      'Python example',
+      'CLI example',
+      'curl example'].each do |section| %>
+    <% section_id = section.gsub(" ","_").downcase %>
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a data-toggle="collapse" data-parent="#arv-adv-accordion" href="#advanced_<%=section_id%>">
+            <%= section %>
+          </a>
+        </h4>
+      </div>
+      <div id="advanced_<%=section_id%>" class="panel-collapse collapse <%#= 'in' if section == 'API response'%>">
+        <div class="panel-body">
+          <%= render partial: "show_advanced_#{section_id}", locals: {object: @object} %>
+        </div>
+      </div>
+    </div>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/application/_show_advanced_api_response.html.erb b/apps/workbench/app/views/application/_show_advanced_api_response.html.erb
new file mode 100644 (file)
index 0000000..f856f91
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<pre>
+<%= JSON.pretty_generate(object.attributes.reject { |k,v| k == 'id' }) rescue nil %>
+</pre>
diff --git a/apps/workbench/app/views/application/_show_advanced_cli_example.html.erb b/apps/workbench/app/views/application/_show_advanced_cli_example.html.erb
new file mode 100644 (file)
index 0000000..102cf4a
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+An example arv command to get a <%= object.class.to_s.underscore %> using its uuid:
+<pre>
+arv <%= object.class.to_s.underscore %> get \
+ --uuid <%= object.uuid %>
+</pre>
+
+An example arv command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
+<pre>
+arv <%= object.class.to_s.underscore %> update \
+ --uuid <%= object.uuid %> \
+ --<%= object.class.to_s.underscore.gsub '_', '-' %> '<%= JSON.generate({object.attributes.keys[-3] => object.attributes.values[-3]}).gsub("'","'\''") %>'
+</pre>
diff --git a/apps/workbench/app/views/application/_show_advanced_curl_example.html.erb b/apps/workbench/app/views/application/_show_advanced_curl_example.html.erb
new file mode 100644 (file)
index 0000000..c517de3
--- /dev/null
@@ -0,0 +1,14 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+An example curl command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
+<pre>
+curl -X PUT \
+ -H "Authorization: OAuth2 $ARVADOS_API_TOKEN" \
+ --data-urlencode <%= object.class.to_s.underscore %>@/dev/stdin \
+ https://$ARVADOS_API_HOST/arvados/v1/<%= object.class.to_s.pluralize.underscore %>/<%= object.uuid %> \
+ &lt;&lt;EOF
+<%= JSON.pretty_generate({object.attributes.keys[-3] => object.attributes.values[-3]}) %>
+EOF
+</pre>
diff --git a/apps/workbench/app/views/application/_show_advanced_metadata.html.erb b/apps/workbench/app/views/application/_show_advanced_metadata.html.erb
new file mode 100644 (file)
index 0000000..062dba9
--- /dev/null
@@ -0,0 +1,60 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% outgoing = Link.where(tail_uuid: @object.uuid) %>
+<% incoming = Link.where(head_uuid: @object.uuid) %>
+
+<%
+  preload_uuids = []
+  preload_head_uuids = []
+  outgoing.results.each do |link|
+    preload_uuids << link.uuid
+    preload_uuids << link.head_uuid
+    preload_head_uuids << link.head_uuid
+  end
+  preload_collections_for_objects preload_uuids
+  preload_links_for_objects preload_head_uuids
+%>
+
+<% if (outgoing | incoming).any? %>
+<table class="table topalign">
+  <colgroup>
+    <col width="20%" />
+    <col width="10%" />
+    <col width="10%" />
+    <col width="20%" />
+    <col width="20%" />
+    <col width="20%" />
+  </colgroup>
+  <thead>
+    <tr>
+      <th></th>
+      <th>link_class</th>
+      <th>name</th>
+      <th>tail</th>
+      <th>head</th>
+      <th>properties</th>
+    </tr>
+  </thead>
+  <tbody>
+    <% (outgoing | incoming).each do |link| %>
+      <tr>
+        <td>
+          <%= render partial: 'show_object_button', locals: { object: link, size: 'xs' } %>
+          <span class="arvados-uuid"><%= link.uuid %></span>
+        </td>
+        <td><%= link.link_class %></td>
+        <td><%= link.name %></td>
+        <td><%= link.tail_uuid == object.uuid ? 'this' : (render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "tail_uuid", attrvalue: link.tail_uuid, editable: false }) %></td>
+        <td><%= link.head_uuid == object.uuid ? 'this' : (render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "head_uuid", attrvalue: link.head_uuid, editable: false }) %></td>
+        <td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "properties", attrvalue: link.properties, editable: false } %></td>
+      </tr>
+    <% end %>
+  </tbody>
+</table>
+<% else %>
+<span class="deemphasize">
+  (No metadata links found)
+</span>
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_advanced_python_example.html.erb b/apps/workbench/app/views/application/_show_advanced_python_example.html.erb
new file mode 100644 (file)
index 0000000..4ae3945
--- /dev/null
@@ -0,0 +1,10 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+An example python command to get a <%= object.class.to_s.underscore %> using its uuid:
+<pre>
+import arvados
+
+x = arvados.api().<%= object.class.to_s.pluralize.underscore %>().get(uuid='<%= object.uuid %>').execute()
+</pre>
diff --git a/apps/workbench/app/views/application/_show_api.html.erb b/apps/workbench/app/views/application/_show_api.html.erb
new file mode 100644 (file)
index 0000000..72cc363
--- /dev/null
@@ -0,0 +1,46 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.andand.uuid %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">curl</div>
+  <div class="panel-body">
+  <pre>
+curl -X PUT \
+ -H "Authorization: OAuth2 $ARVADOS_API_TOKEN" \
+ --data-urlencode <%= @object.class.to_s.underscore %>@/dev/stdin \
+ https://$ARVADOS_API_HOST/arvados/v1/<%= @object.class.to_s.pluralize.underscore %>/<%= @object.uuid %> \
+ &lt;&lt;EOF
+<%= JSON.pretty_generate({@object.attributes.keys[-3] => @object.attributes.values[-3]}) %>
+EOF
+  </pre>
+  </div>
+</div>
+
+<div class="panel panel-default">
+  <div class="panel-heading"><b>arv</b> command line tool</div>
+  <div class="panel-body">
+  <pre>
+arv <%= @object.class.to_s.underscore %> get \
+ --uuid <%= @object.uuid %>
+
+arv <%= @object.class.to_s.underscore %> update \
+ --uuid <%= @object.uuid %> \
+ --<%= @object.class.to_s.underscore.gsub '_', '-' %> '<%= JSON.generate({@object.attributes.keys[-3] => @object.attributes.values[-3]}).gsub("'","'\''") %>'
+      </pre>
+  </div>
+</div>
+
+<div class="panel panel-default">
+  <div class="panel-heading"><b>Python</b> SDK</div>
+  <div class="panel-body">
+    <pre>
+import arvados
+
+x = arvados.api().<%= @object.class.to_s.pluralize.underscore %>().get(uuid='<%= @object.uuid %>').execute()
+      </pre>
+<% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_show_attributes.html.erb b/apps/workbench/app/views/application/_show_attributes.html.erb
new file mode 100644 (file)
index 0000000..c48428e
--- /dev/null
@@ -0,0 +1,17 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= form_for @object do |f| %>
+<table class="table topalign">
+  <thead>
+  </thead>
+  <tbody>
+    <% @object.attributes_for_display.each do |attr, attrvalue| %>
+    <%= render partial: 'application/arvados_object_attr', locals: { attr: attr, attrvalue: attrvalue } %>
+    <% end %>
+  </tbody>
+</table>
+
+<% end %>
+
diff --git a/apps/workbench/app/views/application/_show_autoselect_text.html.erb b/apps/workbench/app/views/application/_show_autoselect_text.html.erb
new file mode 100644 (file)
index 0000000..a007a55
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# Render local variable `text` so the entire text is automatically
+    selected when clicked or focused. %>
+<input class="select-on-focus <%= tagclass %>" type="text" readonly
+       size="<%= text.size %>" value="<%= text %>">
diff --git a/apps/workbench/app/views/application/_show_home_button.html.erb b/apps/workbench/app/views/application/_show_home_button.html.erb
new file mode 100644 (file)
index 0000000..0f87fc8
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if (current_user.is_admin and controller.model_class == User) %>
+  <%= link_to 'Home', "/projects/#{object.uuid}" %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_object_button.html.erb b/apps/workbench/app/views/application/_show_object_button.html.erb
new file mode 100644 (file)
index 0000000..3acfdaa
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% htmloptions = {class: ''}.merge(htmloptions || {})
+   htmloptions[:class] += " btn-#{size}" rescue nil
+   link_text = 'Show' unless defined?(link_text) and link_text
+ %>
+<%= link_to_if_arvados_object object, {
+      link_text: raw('<i class="fa fa-fw ' + fa_icon_class_for_object(object) + '"></i> ' + link_text),
+      name_link: (defined?(name_link) && name_link && name_link.uuid) ? name_link : nil
+    }, {
+      data: {
+        toggle: 'tooltip',
+        placement: 'top'
+      },
+      title: 'show ' + object.class_for_display.downcase,
+      class: 'btn btn-default ' + htmloptions[:class],
+    } %>
diff --git a/apps/workbench/app/views/application/_show_object_description_cell.html.erb b/apps/workbench/app/views/application/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..e681cc2
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= object.content_summary %>
+
diff --git a/apps/workbench/app/views/application/_show_recent.html.erb b/apps/workbench/app/views/application/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..537cce7
--- /dev/null
@@ -0,0 +1,81 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if objects.empty? %>
+<br/>
+<p style="text-align: center">
+  No <%= controller.controller_name.humanize.downcase %> to display.
+</p>
+
+<% else %>
+
+<% attr_blacklist = ' created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at owner_uuid group_class properties' %>
+
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
+<%= form_tag do |f| %>
+
+<table class="table table-condensed arv-index">
+  <thead>
+    <tr>
+      <% if objects.first and objects.first.class.goes_in_projects? %>
+        <th></th>
+      <% end %>
+      <th></th>
+      <% objects.first.attributes_for_display.each do |attr, attrvalue| %>
+      <% next if attr_blacklist.index(" "+attr) %>
+      <th class="arv-attr-<%= attr %>">
+        <%= controller.model_class.attribute_info[attr.to_sym].andand[:column_heading] or attr.sub /_uuid/, '' %>
+      </th>
+      <% end %>
+      <th>
+        <!-- a column for user's home -->
+      </th>
+      <th>
+        <!-- a column for delete buttons -->
+      </th>
+    </tr>
+  </thead>
+
+  <tbody>
+    <% objects.each do |object| %>
+    <tr data-object-uuid="<%= object.uuid %>">
+      <% if objects.first.class.goes_in_projects? %>
+        <td>
+          <%= render :partial => "selection_checkbox", :locals => {:object => object} %>
+        </td>
+      <% end %>
+      <td>
+        <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+      </td>
+
+      <% object.attributes_for_display.each do |attr, attrvalue| %>
+      <% next if attr_blacklist.index(" "+attr) %>
+      <td class="arv-object-<%= object.class.to_s %> arv-attr-<%= attr %>">
+        <% if attr == 'uuid' %>
+          <span class="arvados-uuid"><%= attrvalue %></span>
+        <% else %>
+          <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: object, with_class_name: true, friendly_name: true} %>
+        <% end %>
+      </td>
+      <% end %>
+      <td>
+        <%= render partial: 'show_home_button', locals: {object:object} %>
+      </td>
+      <td>
+        <%= render partial: 'delete_object_button', locals: {object:object} %>
+      </td>
+    </tr>
+    <% end %>
+  </tbody>
+
+  <tfoot>
+  </tfoot>
+</table>
+
+<% end %>
+
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_sharing.html.erb b/apps/workbench/app/views/application/_show_sharing.html.erb
new file mode 100644 (file)
index 0000000..8403ee0
--- /dev/null
@@ -0,0 +1,136 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+   uuid_map = {}
+   if @share_links
+     [User, Group].each do |type|
+       type
+         .filter([['uuid','in',@share_links.collect(&:tail_uuid)]])
+         .each do |o|
+         uuid_map[o.uuid] = o
+       end
+     end
+   end
+   perm_name_desc_map = {}
+   perm_desc_name_map = {}
+   perms_json = []
+   ['Read', 'Write', 'Manage'].each do |link_desc|
+     link_name = "can_#{link_desc.downcase}"
+     perm_name_desc_map[link_name] = link_desc
+     perm_desc_name_map[link_desc] = link_name
+     perms_json << {value: link_name, text: link_desc}
+   end
+   perms_json = perms_json.to_json
+   choose_filters = {
+     "groups" => [["group_class", "=", "role"]],
+   }
+   if not Rails.configuration.anonymous_user_token
+     # It would be ideal to filter out the anonymous group by UUID,
+     # but that's not readily doable.  Workbench can't generate the
+     # UUID for a != filter, because it can't introspect the API
+     # server's UUID prefix.  And we can't say "uuid not like
+     # %-anonymouspublic", because the API server doesn't support a
+     # "not like" filter.
+     choose_filters["groups"] << ["name", "!=", "Anonymous users"]
+   end
+   choose_filters.default = []
+   owner_icon = fa_icon_class_for_uuid(@object.owner_uuid)
+   if owner_icon == "fa-users"
+     owner_icon = "fa-folder"
+     owner_type = "parent project"
+   else
+     owner_type = "owning user"
+   end
+
+   sharing_path = url_for(:controller => params['controller'], :action => 'share_with')
+%>
+
+<div class="pull-right">
+  <% ["users", "groups"].each do |share_class| %>
+
+  <%= link_to(send("choose_#{share_class}_path",
+      title: "Share with #{share_class}",
+      message: "Only #{share_class} you are allowed to access are shown. Please contact your administrator if you need to be added to a specific group.",
+      by_project: false,
+      preview_pane: false,
+      multiple: true,
+      filters: choose_filters[share_class].to_json,
+      action_method: 'post',
+      action_href: sharing_path,
+      action_name: 'Add',
+      action_data: {selection_param: 'uuids[]', success: 'tab-refresh'}.to_json),
+      class: "btn btn-primary btn-sm", remote: true) do %>
+  <i class="fa fa-fw fa-plus"></i> Share with <%= share_class %>&hellip;
+  <% end %>
+
+  <% end %>
+</div>
+
+<p>Permissions for this <%=@object.class_for_display.downcase%> are inherited from the <%= owner_type %>
+  <i class="fa fa-fw <%= owner_icon %>"></i>
+  <%= link_to_if_arvados_object @object.owner_uuid, friendly_name: true %>.
+</p>
+
+<% if @object.is_a? Repository %>
+<p>
+  Please note that changes to git repository sharing may take up to two minutes to take effect.
+</p>
+<% end %>
+
+<table id="object_sharing" class="topalign table" style="clear: both; margin-top: 1em;">
+  <tr>
+    <th>User/Group Name</th>
+    <th>Email Address</th>
+    <th colspan="2"><%=@object.class_for_display%> Access</th>
+  </tr>
+
+  <% @share_links.andand.each do |link|
+       shared_with = uuid_map[link.tail_uuid]
+       if shared_with.nil?
+         link_name = link.tail_uuid
+       elsif shared_with.respond_to?(:full_name)
+         link_name = shared_with.full_name
+       else
+         link_name = shared_with.name
+       end
+       if shared_with && shared_with.respond_to?(:email)
+         email = shared_with.email
+       end
+  %>
+  <tr data-object-uuid="<%= link.uuid %>">
+    <td>
+      <i class="fa fa-fw <%= fa_icon_class_for_uuid(link.tail_uuid) %>"></i>
+      <%= link_to_if_arvados_object(link.tail_uuid, link_text: link_name) %>
+    </td>
+    <td>
+      <%= email %>
+    </td>
+    <td><%= link_to perm_name_desc_map[link.name], '#', {
+      "data-emptytext" => "Read",
+      "data-placement" => "bottom",
+      "data-type" => "select",
+      "data-url" => url_for(action: "update", id: link.uuid, controller: "links", merge: true),
+      "data-title" => "Set #{link_name}'s access level",
+      "data-name" => "[name]",
+      "data-pk" => {id: link.tail_uuid, key: "link"}.to_json,
+      "data-value" => link.name,
+      "data-clear" => false,
+      "data-source" => perms_json,
+      "data-tpl" => "<select id=\"share_change_level\"></select>",
+      "class" => "editable form-control",
+      } %>
+    </td>
+    <td>
+      <%= link_to(
+          {action: 'destroy', id: link.uuid, controller: "links"},
+          {title: 'Revoke', class: 'btn btn-default btn-nodecorate', method: :delete,
+           data: {confirm: "Revoke #{link_name}'s access to this #{@object.class_for_display.downcase}?",
+                  remote: true}}) do %>
+      <i class="fa fa-fw fa-trash-o"></i>
+      <% end %>
+    </td>
+  </tr>
+  <% end %>
+</table>
diff --git a/apps/workbench/app/views/application/_show_star.html.erb b/apps/workbench/app/views/application/_show_star.html.erb
new file mode 100644 (file)
index 0000000..6256eae
--- /dev/null
@@ -0,0 +1,13 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if current_user and is_starred %>
+  <%= link_to(star_path(status: 'delete', id:@object.uuid, action_method: 'get'), style: "color:#D00", class: "btn btn-xs star-unstar", title: "Remove from list of favorites", remote: true) do  %>
+            <i class="fa fa-lg fa-star"></i>
+          <% end %>
+<% else %>
+  <%= link_to(star_path(status: 'create', id:@object.uuid, action_method: 'get'), class: "btn btn-xs star-unstar", title: "Add to list of favorites", remote: true) do %>
+            <i class="fa fa-lg fa-star-o"></i>
+          <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_text_with_locators.html.erb b/apps/workbench/app/views/application/_show_text_with_locators.html.erb
new file mode 100644 (file)
index 0000000..b34b4ca
--- /dev/null
@@ -0,0 +1,44 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# The locators in the given text are expected to be of the form JSON_KEEP_LOCATOR_REGEXP %>
+
+<% data_height = data_height || 100 %>
+  <div style="max-height:<%=data_height%>px; overflow:auto;">
+    <% text_data.each_line do |line| %>
+      <% matches = keep_locator_in_json line %>
+
+      <% if matches.nil? or matches.empty? %>
+        <span style="white-space: pre-wrap; margin: none;"><%= line %></span>
+      <% else
+        subs = []
+        matches.uniq.each do |loc|
+          pdh, filename = loc.split('/', 2)
+
+          if object_readable(pdh)
+            # Add PDH link
+            replacement = link_to_arvados_object_if_readable(pdh, pdh, friendly_name: true)
+            if filename
+              link_params = {controller: 'collections', action: 'show_file', uuid: pdh, file: filename}
+              if preview_allowed_for(filename)
+                params = {disposition: 'inline'}
+              else
+                params = {disposition: 'attachment'}
+              end
+              file_link = link_to(raw("/"+filename), link_params.merge(params))
+              # Add file link
+              replacement << file_link
+            end
+            # Add link(s) substitution
+            subs << [loc, replacement]
+          end
+        end
+        # Replace all readable locators with links
+        subs.each do |loc, link|
+          line.gsub!(loc, link)
+        end %>
+        <span style="white-space: pre-wrap; margin: none;"><%= raw line %></span>
+      <% end %>
+    <% end %>
+  </div>
diff --git a/apps/workbench/app/views/application/_svg_div.html.erb b/apps/workbench/app/views/application/_svg_div.html.erb
new file mode 100644 (file)
index 0000000..8a417d9
--- /dev/null
@@ -0,0 +1,41 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= content_for :css do %>
+/* Need separate style for each instance of svg div because javascript will manipulate the properties. */
+#<%= divId %> {
+ padding-left: 3px;
+ overflow: auto;
+ border: solid;
+ border-width: 1px;
+ border-color: gray;
+ position: absolute;
+ left: 25px;
+ right: 25px;
+}
+path:hover {
+stroke-width: 5;
+}
+path {
+stroke-linecap: round;
+}
+<% end %>
+
+<%= content_for :js do %>
+    $(window).on('load', function() {
+      $(window).on('load resize scroll', function () { graph_zoom("<%= divId %>","<%=svgId %>", 1) } );
+    });
+<% end %>
+
+<div id="_<%= divId %>_container">
+  <div style="text-align: right">
+    <a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-out" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', .9)"></span></a>
+    <a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-in" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', 1./.9)"></span></a>
+  </div>
+
+  <div id="<%= divId %>" class="smart-scroll">
+    <span id="_<%= divId %>_center" style="padding-left: 0px"></span>
+    <%= raw(svg) %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_tab_line_buttons.html.erb b/apps/workbench/app/views/application/_tab_line_buttons.html.erb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/views/application/_title_and_buttons.html.erb b/apps/workbench/app/views/application/_title_and_buttons.html.erb
new file mode 100644 (file)
index 0000000..647243a
--- /dev/null
@@ -0,0 +1,75 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% object_class = @object.class_for_display.downcase %>
+<% content_for :page_title do %>
+  <%= (@object.respond_to?(:properties) and !@object.properties.nil? ? @object.properties[:page_title] : nil) ||
+      @name_link.andand.name ||
+      @object.friendly_link_name %>
+<% end %>
+
+<% content_for :content_top do %>
+  <% if !['Group','User', 'Collection'].include? @object.class.to_s # projects and collections handle it themselves %>
+    <%= render partial: 'name_and_description' %>
+  <% end %>
+<% end %>
+
+<% if @object.class.goes_in_projects? && @object.uuid != current_user.andand.uuid # Not the "Home" project %>
+  <% content_for :tab_line_buttons do %>
+    <% if current_user.andand.is_active %>
+      <%= render partial: 'extra_tab_line_buttons' %>
+    <% end %>
+    <% if current_user.andand.is_active && @object.class.copies_to_projects? %>
+      <%= link_to(
+          choose_projects_path(
+           title: "Copy this #{object_class} to:",
+           action_name: 'Copy',
+           action_href: actions_path,
+           action_method: 'post',
+           action_data: {
+             copy_selections_into_project: true,
+             selection: @name_link.andand.uuid || @object.uuid,
+             selection_param: 'uuid',
+             success: 'redirect-to-created-object'
+           }.to_json),
+          { class: "btn btn-sm btn-primary", remote: true, method: 'get',
+            title: "Make a copy of this #{object_class}" }) do %>
+        <i class="fa fa-fw fa-copy"></i> Copy to project...
+      <% end %>
+    <% end %>
+    <% if (ArvadosBase.find(@object.owner_uuid).writable_by.include?(current_user.andand.uuid) rescue nil) %>
+      <%= link_to(
+          choose_projects_path(
+           title: "Move this #{object_class} to:",
+           action_name: 'Move',
+           action_href: actions_path,
+           action_method: 'post',
+           action_data: {
+             move_selections_into_project: true,
+             selection: @name_link.andand.uuid || @object.uuid,
+             selection_param: 'uuid',
+             success: 'redirect-to-created-object'
+           }.to_json),
+          { class: "btn btn-sm btn-primary force-cache-reload", remote: true, method: 'get',
+            title: "Move this #{object_class} to a different project"}) do %>
+        <i class="fa fa-fw fa-truck"></i> Move <%=object_class%>...
+      <% end %>
+    <% end %>
+  <% end %>
+<% end %>
+
+<% unless flash["error"].blank? %>
+<div class="flash-message alert alert-danger" role="alert">
+  <p class="contain-align-left"><%= flash["error"] %></p>
+</div>
+<% flash.delete("error") %>
+<% end %>
+
+<% unless flash.empty? %>
+<div class="flash-message alert alert-warning">
+  <% flash.each do |_, msg| %>
+  <p class="contain-align-left"><%= msg %></p>
+  <% end %>
+</div>
+<% end %>
diff --git a/apps/workbench/app/views/application/api_error.html.erb b/apps/workbench/app/views/application/api_error.html.erb
new file mode 100644 (file)
index 0000000..8f3c69b
--- /dev/null
@@ -0,0 +1,29 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<h2>Oh... fiddlesticks.</h2>
+
+<p>An error occurred when Workbench sent a request to the Arvados API server.  Try reloading this page.  If the problem is temporary, your request might go through next time.
+
+<% if not api_error %>
+</p>
+<% else %>
+If that doesn't work, the information below can help system administrators track down the problem.
+</p>
+
+<dl>
+  <dt>API request URL</dt>
+  <dd><code><%= api_error.request_url %></code></dd>
+
+  <% if api_error.api_response.empty? %>
+  <dt>Invalid API response</dt>
+  <dd><%= api_error.api_response_s %></dd>
+  <% else %>
+  <dt>API response</dt>
+  <dd><pre><%= Oj.dump(api_error.api_response, indent: 2) %></pre></dd>
+  <% end %>
+</dl>
+<% end %>
+
+<%= render :partial => "report_error", :locals => {api_error: api_error, error_type: 'api'} %>
diff --git a/apps/workbench/app/views/application/api_error.json.erb b/apps/workbench/app/views/application/api_error.json.erb
new file mode 100644 (file)
index 0000000..a697490
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
diff --git a/apps/workbench/app/views/application/destroy.js.erb b/apps/workbench/app/views/application/destroy.js.erb
new file mode 100644 (file)
index 0000000..397acdb
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$(document).trigger('count-change');
+$('[data-object-uuid=<%= @object.uuid %>]').hide('slow', function() {
+    $(this).remove();
+});
diff --git a/apps/workbench/app/views/application/error.html.erb b/apps/workbench/app/views/application/error.html.erb
new file mode 100644 (file)
index 0000000..e0f579e
--- /dev/null
@@ -0,0 +1,13 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<h2>Oh... fiddlesticks.</h2>
+
+<p>Sorry, I had some trouble handling your request.</p>
+
+<ul>
+<% if @errors.is_a? Array then @errors.each do |error| %>
+<li><%= error %></li>
+<% end end %>
+</ul>
diff --git a/apps/workbench/app/views/application/error.json.erb b/apps/workbench/app/views/application/error.json.erb
new file mode 100644 (file)
index 0000000..a697490
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
diff --git a/apps/workbench/app/views/application/index.html.erb b/apps/workbench/app/views/application/index.html.erb
new file mode 100644 (file)
index 0000000..7db8559
--- /dev/null
@@ -0,0 +1,17 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :page_title do %>
+<%= controller.controller_name.humanize.capitalize %>
+<% end %>
+
+<% content_for :tab_line_buttons do %>
+
+  <% if controller.model_class.creatable? %>
+    <%= render partial: 'create_new_object_button' %>
+  <% end %>
+
+<% end %>
+
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.index_pane_list }%>
diff --git a/apps/workbench/app/views/application/report_issue_popup.js.erb b/apps/workbench/app/views/application/report_issue_popup.js.erb
new file mode 100644 (file)
index 0000000..bd11f9e
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$("#report-issue-modal-window").html("<%= escape_javascript(render partial: 'report_issue_popup') %>");
+$("#report-issue-modal-window .modal").modal('show');
+
+// Disable the submit button on modal loading
+$submit = $('#report-issue-submit');
+$submit.prop('disabled', true);
+
+// capture events to enable submit button when applicable
+$('#report_issue_text').bind('input propertychange', function() {
+  var problem_desc = document.forms["report-issue-form"]["report_issue_text"].value;
+  $submit.prop('disabled', (problem_desc === null) || (problem_desc === ""));
+});
diff --git a/apps/workbench/app/views/application/show.html.erb b/apps/workbench/app/views/application/show.html.erb
new file mode 100644 (file)
index 0000000..15b2f12
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'title_and_buttons' %>
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
diff --git a/apps/workbench/app/views/application/star.js.erb b/apps/workbench/app/views/application/star.js.erb
new file mode 100644 (file)
index 0000000..cbb9834
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$(".star-unstar").html("<%= escape_javascript(render partial: 'show_star') %>");
+$(".breadcrumbs").html("<%= escape_javascript(render partial: 'breadcrumbs') %>");
diff --git a/apps/workbench/app/views/authorized_keys/create.js.erb b/apps/workbench/app/views/authorized_keys/create.js.erb
new file mode 100644 (file)
index 0000000..4c682c8
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+;
diff --git a/apps/workbench/app/views/authorized_keys/edit.html.erb b/apps/workbench/app/views/authorized_keys/edit.html.erb
new file mode 100644 (file)
index 0000000..9b5bd11
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'form' %>
diff --git a/apps/workbench/app/views/collections/_choose.js.erb b/apps/workbench/app/views/collections/_choose.js.erb
new file mode 120000 (symlink)
index 0000000..8420a7f
--- /dev/null
@@ -0,0 +1 @@
+../application/_choose.js.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/collections/_choose_rows.html.erb b/apps/workbench/app/views/collections/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..50f7ffe
--- /dev/null
@@ -0,0 +1,28 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |object| %>
+    <div class="row filterable selectable <%= 'use-preview-selection' if params['use_preview_selection']%>" data-object-uuid="<%= object.uuid %>"
+         data-preview-href="<%= chooser_preview_url_for object, params['use_preview_selection'] %>"
+         style="margin-left: 1em; border-bottom-style: solid; border-bottom-width: 1px; border-bottom-color: #DDDDDD">
+      <i class="fa fa-fw fa-archive"></i>
+      <% if object.respond_to? :name %>
+        <% if not (object.name.nil? or object.name.empty?) %>
+          <%= object.name %>
+        <% elsif object.is_a? Collection and object.files.length > 0 %>
+          <%= object.files[0][1] %>
+          <%= "+ #{object.files.length-1} more" if object.files.length > 1 %>
+        <% else %>
+          <%= object.uuid %>
+        <% end %>
+      <% else %>
+        <%= object.uuid %>
+      <% end %>
+      <% links_for_object(object).each do |tag| %>
+        <% if tag.link_class == 'tag' %>
+          <span class="label label-info"><%= tag.name %></span>
+        <% end %>
+      <% end %>
+    </div>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_create_new_object_button.html.erb b/apps/workbench/app/views/collections/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..2e1ca47
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# "Create a new collection" would work, but the search filter on collections#index breaks the tab_line_buttons layout. %>
diff --git a/apps/workbench/app/views/collections/_extra_tab_line_buttons.html.erb b/apps/workbench/app/views/collections/_extra_tab_line_buttons.html.erb
new file mode 100644 (file)
index 0000000..5664cb2
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.editable? %>
+  <i class="fa fa-fw fa-lock lock-collection-btn btn btn-primary" title="Unlock collection to edit files"></i>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_index_tbody.html.erb b/apps/workbench/app/views/collections/_index_tbody.html.erb
new file mode 100644 (file)
index 0000000..845c92e
--- /dev/null
@@ -0,0 +1,56 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |c| %>
+
+<tr class="collection" data-object-uuid="<%= c.uuid %>">
+  <td>
+    <%=
+       friendly_name = c.friendly_link_name
+       @collection_info[c.uuid][:tag_links].each do |tag_link|
+         friendly_name += raw(" <span class='label label-info'>#{tag_link.name}</span>")
+       end
+       render partial: "selection_checkbox", locals: {
+         object: c,
+         friendly_name: friendly_name
+       }
+    %>
+
+    <%= render :partial => "show_object_button", :locals => {object: c, size: 'xs'} %>
+  </td>
+  <td>
+    <%= c.uuid %>
+  </td>
+  <td>
+    <% i = 0 %>
+    <% while i < 3 and i < c.files.length %>
+      <% file = c.files[i] %>
+      <% file_path = "#{file[0]}/#{file[1]}" %>
+      <%= link_to file[1], {controller: 'collections', action: 'show_file', uuid: c.uuid, file: file_path, size: file[2], disposition: 'inline'}, {title: 'View in browser'} %><br />
+      <% i += 1 %>
+    <% end %>
+    <% if i < c.files.length %>
+      &vellip;
+    <% end %>
+  </td>
+  <td>
+    <%= c.created_at.to_s if c.created_at %>
+  </td>
+  <td class="add-tag-button">
+    <a class="btn btn-xs btn-info add-tag-button pull-right" data-remote-href="<%= url_for(controller: 'links', action: 'create') %>" data-remote-method="post"><i class="glyphicon glyphicon-plus"></i>&nbsp;Add</a>
+    <span class="removable-tag-container">
+    <% if @collection_info[c.uuid] %>
+      <% @collection_info[c.uuid][:tag_links].each do |tag_link| %>
+        <span class="label label-info removable-tag" data-tag-link-uuid="<%= tag_link.uuid %>"><%= tag_link.name %>
+          <% if tag_link.owner_uuid == current_user.andand.uuid %>
+          &nbsp;<a title="Delete tag"><i class="glyphicon glyphicon-trash"></i></a>
+          <% end %>
+        </span>&nbsp;
+      <% end %>
+    <% end %>
+    </span>
+  </td>
+</tr>
+
+<% end %>
diff --git a/apps/workbench/app/views/collections/_sharing_button.html.erb b/apps/workbench/app/views/collections/_sharing_button.html.erb
new file mode 100644 (file)
index 0000000..3d8ea3f
--- /dev/null
@@ -0,0 +1,21 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% button_attrs = {
+     class: 'btn btn-xs btn-info',
+     remote: true,
+     method: :post,
+   } %>
+<% if @search_sharing.nil? %>
+  <p>Your API token is not authorized to manage collection sharing links.</p>
+<% elsif @search_sharing.empty? %>
+  <%= button_to("Create sharing link", {action: "share"}, button_attrs) %>
+<% else %>
+  <div>
+    <% button_attrs[:class] += " pull-right" %>
+    <%= button_to("Unshare", {action: "unshare"}, button_attrs) %>
+    Shared at:
+    <div class="smaller-text" style="clear: both; word-break: break-all"><%= link_to download_link, download_link %></div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_show_chooser_preview.html.erb b/apps/workbench/app/views/collections/_show_chooser_preview.html.erb
new file mode 100644 (file)
index 0000000..77dacc4
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: "show_source_summary" %>
+<%= render partial: "show_files", locals: {no_checkboxes: true, use_preview_selection: params['use_preview_selection']} %>
diff --git a/apps/workbench/app/views/collections/_show_files.html.erb b/apps/workbench/app/views/collections/_show_files.html.erb
new file mode 100644 (file)
index 0000000..96ddf95
--- /dev/null
@@ -0,0 +1,146 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  preview_selectable_container = ''
+  preview_selectable = ''
+  padding_left = '1em'
+  if !params['use_preview_selection'].nil? and params['use_preview_selection'] == 'true'
+    preview_selectable_container = 'preview-selectable-container selectable-container'
+    preview_selectable = 'preview-selectable selectable'
+    padding_left = '0em'
+  end
+%>
+
+<% object = @object unless object %>
+
+<div class="selection-action-container" style="padding-left: <%=padding_left%>">
+  <% if Collection.creatable? and (!defined? no_checkboxes or !no_checkboxes) %>
+    <div class="row">
+      <div class="pull-left">
+        <div class="btn-group btn-group-sm">
+          <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+          <ul class="dropdown-menu" role="menu">
+            <li><%= link_to "Create new collection with selected files", '#',
+                    method: :post,
+                    'data-href' => combine_selected_path(
+                      action_data: {current_project_uuid: object.owner_uuid}.to_json
+                    ),
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'combine-collections',
+                    'data-toggle' => 'dropdown'
+              %></li>
+            <% if object.editable? %>
+            <li><%= link_to "Remove selected files", '#',
+                    method: :post,
+                    'data-href' => url_for(controller: 'collections', action: :remove_selected_files),
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'remove-selected-files',
+                    'data-toggle' => 'dropdown',
+                    'class' => 'btn-remove-selected-files'
+              %></li>
+            <% end %>
+          </ul>
+        </div>
+        <div class="btn-group btn-group-sm">
+          <button id="select-all" type="button" class="btn btn-default" onClick="select_all_items()">Select all</button>
+          <button id="unselect-all" type="button" class="btn btn-default" onClick="unselect_all_items()">Unselect all</button>
+        </div>
+      </div>
+      <div class="pull-right">
+        <input class="form-control filterable-control" data-filterable-target="ul#collection_files" id="file_regex" name="file_regex" placeholder="filename regex" type="text"/>
+      </div>
+    </div>
+    <p/>
+  <% end %>
+
+  <% file_tree = object.andand.files_tree %>
+  <% if file_tree.nil? or file_tree.empty? %>
+    <p>This collection is empty.</p>
+  <% else %>
+    <ul id="collection_files" class="collection_files arv-selectable-items <%=preview_selectable_container%>">
+    <% dirstack = [file_tree.first.first] %>
+    <% file_tree.take(10000).each_with_index do |(dirname, filename, size), index| %>
+      <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+      <% while dirstack.any? and (dirstack.last != dirname) %>
+        <% dirstack.pop %></ul></li>
+      <% end %>
+      <li>
+      <% if size.nil?  # This is a subdirectory. %>
+        <% dirstack.push(File.join(dirname, filename)) %>
+        <div class="collection_files_row">
+         <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
+        </div>
+        <ul class="collection_files">
+      <% else %>
+        <% link_params = {controller: 'collections', action: 'show_file',
+                          uuid: object.portable_data_hash, file: file_path, size: size} %>
+         <div class="collection_files_row filterable <%=preview_selectable%>" href="<%=object.uuid%>/<%=file_path%>">
+          <div class="collection_files_buttons pull-right">
+            <%= raw(human_readable_bytes_html(size)) %>
+            <%= link_to(raw('<i class="fa fa-search"></i>'),
+                        link_params.merge(disposition: 'inline'),
+                        {title: "View #{file_path}", class: "btn btn-info btn-sm", disabled: !preview_allowed_for(file_path)}) %>
+            <%= link_to(raw('<i class="fa fa-download"></i>'),
+                        link_params.merge(disposition: 'attachment'),
+                        {title: "Download #{file_path}", class: "btn btn-info btn-sm"}) %>
+          </div>
+
+          <div class="collection_files_name">
+            <% if (!defined? no_checkboxes or !no_checkboxes) and current_user %>
+            <%= check_box_tag 'uuids[]', "#{object.uuid}/#{file_path}", false, {
+                  :class => "persistent-selection",
+                  :friendly_type => "File",
+                  :friendly_name => "#{object.uuid}/#{file_path}",
+                  :href => url_for(controller: 'collections', action: 'show_file',
+                                   uuid: object.portable_data_hash, file: file_path),
+                  :title => "Include #{file_path} in your selections",
+                  :id => "#{object.uuid}_file_#{index}",
+                } %>
+            <span>&nbsp;</span>
+            <% end %>
+
+            <% if object.editable? %>
+                <span class="btn-collection-remove-file-span">
+                <%= link_to({controller: 'collections', action: 'remove_selected_files', id: object.uuid, selection: [object.portable_data_hash+'/'+file_path]}, method: :post, remote: true, data: {confirm: "Remove #{file_path}?", toggle: 'tooltip', placement: 'top'}, class: 'btn btn-sm btn-default btn-nodecorate btn-collection-file-control', title: 'Remove this file') do %>
+                  <i class="fa fa-fw fa-trash-o"></i>
+                <% end %>
+                </span>
+            <% end %>
+        <% if CollectionsHelper::is_image(filename) %>
+            <i class="fa fa-fw fa-bar-chart-o"></i>
+              <% if object.editable? %>
+                <span class="btn-collection-rename-file-span">
+                <%= render_editable_attribute object, 'filename', filename, {'data-value' => file_path, 'data-toggle' => 'manual', 'selection_name' => 'rename-file-path:'+file_path}, {tiptitle: 'Edit name or directory or both for this file', btnclass: 'collection-file-control'} %>
+                </span>
+              <% else %>
+                <%= filename %>
+              <% end %>
+            </div>
+          <div class="collection_files_inline">
+            <%= link_to(image_tag("#{url_for object}/#{file_path}"),
+                        link_params.merge(disposition: 'inline'),
+                        {title: file_path}) %>
+          </div>
+         </div>
+        <% else %>
+              <% if object.editable? %>
+                <i class="fa fa-fw fa-file"></i><span class="btn-collection-rename-file-span"><%= render_editable_attribute object, 'filename', filename, {'data-value' => file_path, 'data-toggle' => 'manual', 'selection_name' => 'rename-file-path:'+file_path}, {tiptitle: 'Edit name or directory or both for this file', btnclass: 'collection-file-control'}  %>
+                </span>
+              <% else %>
+                <i class="fa fa-fw fa-file" href="<%=object.uuid%>/<%=file_path%>" ></i> <%= filename %>
+              <% end %>
+            </div>
+         </div>
+        <% end %>
+        </li>
+      <% end  # if file or directory %>
+    <% end  # file_tree.each %>
+    <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+  <% end  # if file_tree %>
+</div>
+
+<% content_for :footer_html do %>
+<div id="collection-sharing-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_show_provenance_graph.html.erb b/apps/workbench/app/views/collections/_show_provenance_graph.html.erb
new file mode 100644 (file)
index 0000000..84ee5bd
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'application/svg_div', locals: {
+    divId: "provenance_graph_div", 
+    svgId: "provenance_svg", 
+    svg: @prov_svg } %>
diff --git a/apps/workbench/app/views/collections/_show_recent.html.erb b/apps/workbench/app/views/collections/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..037c0bf
--- /dev/null
@@ -0,0 +1,65 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="selection-action-container" style="padding-left: 1em">
+  <div class="row">
+    <div class="pull-left">
+      <div class="btn-group btn-group-sm">
+        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+        <ul class="dropdown-menu" role="menu">
+          <li><%= link_to "Create new collection with selected collections", '#',
+                  method: :post,
+                  'data-href' => combine_selected_path,
+                  'data-selection-param-name' => 'selection[]',
+                  'data-selection-action' => 'combine-collections',
+                  'data-toggle' => 'dropdown'
+            %></li>
+        </ul>
+      </div>
+    </div>
+  </div>
+  <p/>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<div style="padding-right: 1em">
+
+<%= form_tag do |f| %>
+
+<table id="collections-index" class="topalign table table-condensed table-fixedlayout"> <!-- table-fixed-header-row -->
+  <colgroup>
+    <col width="10%" />
+    <col width="10%" />
+    <col width="40%" />
+    <col width="10%" />
+    <col width="30%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th></th>
+      <th>uuid</th>
+      <th>contents</th>
+      <th>created at</th>
+      <th>tags</th>
+    </tr>
+  </thead>
+  <tbody>
+    <%= render partial: 'index_tbody' %>
+  </tbody>
+</table>
+
+<% end %>
+
+</div>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<% content_for :footer_js do %>
+$(document).on('click', 'form[data-remote] input[type=submit]', function() {
+  $('table#collections-index tbody').fadeTo(200, 0.3);
+  return true;
+});
+<% end %>
+
+</div>
diff --git a/apps/workbench/app/views/collections/_show_source_summary.html.erb b/apps/workbench/app/views/collections/_show_source_summary.html.erb
new file mode 100644 (file)
index 0000000..398742e
--- /dev/null
@@ -0,0 +1,43 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<p><i>Content size:</i><br />
+  <%= pluralize(@object.manifest.files_count, "file") %> totalling
+  <%= raw(human_readable_bytes_html(@object.manifest.files_size)) %></p>
+
+<% if not (@output_of.andand.any? or @log_of.andand.any?) %>
+  <p><i>No source information available.</i></p>
+<% end %>
+
+<% if @output_of.andand.any? %>
+  <% pipelines = PipelineInstance.limit(5).filter([["components", "like", "%#{@object.uuid}%"]]) %>
+  <%
+    message = "This collection was the output of the following:"
+    if pipelines.items_available > pipelines.results.size
+      message += ' (' + (pipelines.items_available - pipelines.results.size).to_s + ' more results are not shown)'
+    end
+  %>
+  <p><i><%= message %></i><br />
+    <% pipelines.each do |pipeline| %>
+      <% pipeline.components.each do |cname, c| %>
+        <% if c[:output_uuid] == @object.uuid %>
+          <b><%= cname %></b> component of <b><%= link_to_if_arvados_object(pipeline, friendly_name: true) %></b>
+          <% if c.andand[:job].andand[:finished_at] %>
+            finished at <%= render_localized_date(c[:job][:finished_at]) %>
+          <% end %>
+          <br>
+        <% end %>
+      <% end %>
+    <% end %>
+  </p>
+<% end %>
+
+<% if @log_of.andand.any? %>
+  <p><i>This collection contains log messages from:</i><br />
+    <%= render_arvados_object_list_start(@log_of, 'Show all jobs',
+                                         jobs_path(filters: [['log', '=', @object.portable_data_hash]].to_json)) do |job| %>
+      <%= link_to_if_arvados_object(job, friendly_name: true) %><br />
+    <% end %>
+  </p>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_show_tags.html.erb b/apps/workbench/app/views/collections/_show_tags.html.erb
new file mode 100644 (file)
index 0000000..3e0460a
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+  <div class="arv-log-refresh-control"
+    data-load-throttle="86486400000" <%# 1001 nights (in milliseconds) %>
+    ></div>
+
+  <div class="collection-tags-container" style="padding-left:2em;padding-right:2em;">
+    <div data-mount-mithril="TagEditorApp" data-target-controller="<%= controller_name %>" data-target-uuid="<%= @object.uuid %>" data-target-editable="<%= @object.editable? %>"></div>
+  </div>
\ No newline at end of file
diff --git a/apps/workbench/app/views/collections/_show_upload.html.erb b/apps/workbench/app/views/collections/_show_upload.html.erb
new file mode 100644 (file)
index 0000000..5805fec
--- /dev/null
@@ -0,0 +1,70 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights (in milliseconds) %>
+     ></div>
+<div ng-cloak ng-controller="UploadToCollection" arv-uuid="<%= @object.uuid %>">
+  <div class="panel panel-primary">
+    <div class="panel-body">
+      <div class="row">
+        <div class="col-sm-4">
+          <input type="file" multiple id="file_selector" ng-model="incoming" onchange="angular.element(this).scope().addFilesToQueue(this.files); $(this).val('');">
+          <div class="btn-group btn-group-sm" role="group" style="margin-top: 1.5em">
+            <button type="button" class="btn btn-default" ng-click="stop()" ng-disabled="uploader.state !== 'Running'"><i class="fa fa-fw fa-pause"></i> Pause</button>
+            <button type="button" class="btn btn-primary" ng-click="go()" ng-disabled="uploader.state === 'Running' || countInStates(['Paused', 'Queued']) === 0"><i class="fa fa-fw fa-play"></i> Start</button>
+          </div>
+        </div>
+        <div class="col-sm-8">
+          <div ng-show="uploader.state === 'Running'"
+               class="alert alert-info"
+               ><i class="fa fa-gear"></i>
+            Upload in progress.
+            <span ng-show="countInStates(['Done']) > 0">
+              {{countInStates(['Done'])}} file{{countInStates(['Done'])>1?'s':''}} finished.
+            </span>
+          </div>
+          <div ng-show="uploader.state === 'Idle' && uploader.stateReason"
+               class="alert alert-success"
+               ><i class="fa fa-fw fa-flag-checkered"></i> &nbsp; {{uploader.stateReason}}
+          </div>
+          <div ng-show="uploader.state === 'Failed'"
+               class="alert alert-danger"
+               ><i class="fa fa-fw fa-warning"></i> &nbsp; {{uploader.stateReason}}
+          </div>
+          <div ng-show="uploader.state === 'Stopped'"
+               class="alert alert-info"
+               ><i class="fa fa-fw fa-info"></i> &nbsp; Paused. Click the Start button to resume uploading.
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+  <div ng-repeat="upload in uploadQueue" class="row" ng-class="{lighten: upload.state==='Done'}">
+    <div class="col-sm-1">
+      <button class="btn btn-xs btn-default"
+              ng-show="upload.state!=='Done'"
+              ng-click="removeFileFromQueue($index)"
+              title="cancel"><i class="fa fa-fw fa-times"></i></button>
+      <span class="label label-success label-info"
+            ng-show="upload.state==='Done'">finished</span>
+    </div>
+    <div class="col-sm-4 nowrap" style="overflow-x:hidden;text-overflow:ellipsis">
+      <span title="{{upload.file.name}}">
+        {{upload.file.name}}
+      </span>
+    </div>
+    <div class="col-sm-1" style="text-align: right">
+      {{upload.file.size/1024 | number:0}}&nbsp;KiB
+    </div>
+    <div class="col-sm-2">
+      <div class="progress">
+        <span class="progress-bar" style="width: {{upload.progress}}%"></span>
+      </div>
+    </div>
+    <div class="col-sm-4" ng-class="{lighten: upload.state !== 'Uploading'}">
+      {{upload.statistics}}
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/collections/_show_used_by.html.erb b/apps/workbench/app/views/collections/_show_used_by.html.erb
new file mode 100644 (file)
index 0000000..a7ec57d
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'application/svg_div', locals: {
+    divId: "used_by_graph", 
+    svgId: "used_by_svg", 
+    svg: @used_by_svg } %>
+
diff --git a/apps/workbench/app/views/collections/graph.html.erb b/apps/workbench/app/views/collections/graph.html.erb
new file mode 100644 (file)
index 0000000..9d8e540
--- /dev/null
@@ -0,0 +1,195 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%#= render :partial => 'nav' %>
+<table class="table table-bordered">
+  <tbody>
+    <tr>
+      <td class="d3">
+      </td>
+    </tr>
+  </tbody>
+</table>
+
+<% content_for :head do %>
+<%= javascript_include_tag '/d3.v3.min.js' %>
+
+    <style type="text/css">
+
+path.link {
+  fill: none;
+  stroke: #666;
+  stroke-width: 1.5px;
+}
+
+path.link.derived_from {
+  stroke: green;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.can_write {
+  stroke: green;
+}
+
+path.link.member_of {
+  stroke: blue;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.created {
+  stroke: red;
+}
+
+circle.node {
+  fill: #ccc;
+  stroke: #333;
+  stroke-width: 1.5px;
+}
+
+edgetext {
+  font: 12px sans-serif;
+  pointer-events: none;
+    text-align: center;
+}
+
+text {
+  font: 12px sans-serif;
+  pointer-events: none;
+}
+
+text.shadow {
+  stroke: #fff;
+  stroke-width: 3px;
+  stroke-opacity: .8;
+}
+
+    </style>
+<% end %>
+
+<% content_for :js do %>
+
+jQuery(function($){
+
+    var links = <%= raw d3ify_links(@links).to_json %>;
+
+    var nodes = {};
+
+    // Compute the distinct nodes from the links.
+    links.forEach(function(link) {
+       link.source = nodes[link.source] || (nodes[link.source] = {name: link.source});
+       link.target = nodes[link.target] || (nodes[link.target] = {name: link.target});
+    });
+
+    var fill_for = {'ldvyl': 'green',
+                   'j58dm': 'red',
+                   '4zz18': 'blue'};
+    jQuery.each(nodes, function(i, node) {
+       var m = node.name.match(/-([a-z0-9]{5})-/)
+       if (m)
+           node.fill = fill_for[m[1]] || '#ccc';
+       else if (node.name.match(/^[0-9a-f]{32}/))
+           node.fill = fill_for['4zz18'];
+       else
+           node.fill = '#ccc';
+    });
+
+    var w = 960,
+    h = 600;
+
+    var force = d3.layout.force()
+       .nodes(d3.values(nodes))
+       .links(links)
+       .size([w, h])
+       .linkDistance(150)
+       .charge(-300)
+       .on("tick", tick)
+       .start();
+
+    var svg = d3.select("td.d3").append("svg:svg")
+       .attr("width", w)
+       .attr("height", h);
+
+    // Per-type markers, as they don't inherit styles.
+    svg.append("svg:defs").selectAll("marker")
+       .data(["member_of", "owner", "derived_from"])
+       .enter().append("svg:marker")
+       .attr("id", String)
+       .attr("viewBox", "0 -5 10 10")
+       .attr("refX", 15)
+       .attr("refY", -1.5)
+       .attr("markerWidth", 6)
+       .attr("markerHeight", 6)
+       .attr("orient", "auto")
+       .append("svg:path")
+       .attr("d", "M0,-5L10,0L0,5");
+
+    var path = svg.append("svg:g").selectAll("path")
+       .data(force.links())
+       .enter().append("svg:path")
+       .attr("class", function(d) { return "link " + d.type; })
+       .attr("marker-end", function(d) { return "url(#" + d.type + ")"; });
+
+    var circle = svg.append("svg:g").selectAll("circle")
+       .data(force.nodes())
+       .enter().append("svg:circle")
+       .attr("r", 6)
+       .style("fill", function(d) { return d.fill; })
+       .call(force.drag);
+
+    var text = svg.append("svg:g").selectAll("g")
+       .data(force.nodes())
+       .enter().append("svg:g");
+
+    // A copy of the text with a thick white stroke for legibility.
+    text.append("svg:text")
+       .attr("x", 8)
+       .attr("y", ".31em")
+       .attr("class", "shadow")
+       .text(function(d) { return d.name.replace(/^([0-9a-z]{5}-){2}/,''); });
+
+    text.append("svg:text")
+       .attr("x", 8)
+       .attr("y", ".31em")
+       .text(function(d) { return d.name.replace(/^([0-9a-z]{5}-){2}/,''); });
+
+    var edgetext = svg.append("svg:g").selectAll("g")
+       .data(force.links())
+       .enter().append("svg:g");
+
+    edgetext
+       .append("svg:text")
+       .attr("x","-5em")
+       .attr("y","-0.2em")
+       .text(function(d) { return d.type; });
+
+    // Use elliptical arc path segments to doubly-encode directionality.
+    function tick() {
+       path.attr("d", function(d) {
+           var dx = d.target.x - d.source.x,
+            dy = d.target.y - d.source.y,
+            // dr = Math.sqrt(dx * dx + dy * dy);
+            dr = 0;
+           return "M" + d.source.x + "," + d.source.y + "A" + dr + "," + dr + " 0 0,1 " + d.target.x + "," + d.target.y;
+       });
+
+       circle.attr("transform", function(d) {
+           return "translate(" + d.x + "," + d.y + ")";
+       });
+
+       text.attr("transform", function(d) {
+           return "translate(" + d.x + "," + d.y + ")";
+       });
+
+       edgetext.attr("transform", function(d) {
+           return "translate(" +
+               (d.source.x + d.target.x)/2 + "," +
+               (d.source.y + d.target.y)/2 +
+               ")rotate(" +
+               (Math.atan2(d.target.y - d.source.y, d.target.x - d.source.x) * 180 / Math.PI) +
+               ")";
+       });
+    }
+
+})(jQuery);
+<% end %>
diff --git a/apps/workbench/app/views/collections/hash_matches.html.erb b/apps/workbench/app/views/collections/hash_matches.html.erb
new file mode 100644 (file)
index 0000000..ba2a443
--- /dev/null
@@ -0,0 +1,33 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  message = "The following collections have this content:"
+  if @same_pdh.items_available > @same_pdh.results.size
+    message += ' (' + (@same_pdh.items_available - @same_pdh.results.size).to_s + ' more results are not shown)'
+  end
+%>
+<div class="row">
+  <div class="col-md-10 col-md-offset-1">
+    <div class="panel panel-info">
+      <div class="panel-heading">
+        <h3 class="panel-title"><%= params["uuid"] %></h3>
+      </div>
+      <div class="panel-body">
+        <p><i><%= message %></i></p>
+        <% @same_pdh.sort { |a,b| b.created_at <=> a.created_at }.each do |c| %>
+          <div class="row">
+            <div class="col-md-8">
+              <% owner = object_for_dataclass(Group, c.owner_uuid) || object_for_dataclass(User, c.owner_uuid) %>
+              <%= link_to_if_arvados_object owner, {:friendly_name => true} %> / <%= link_to_if_arvados_object c, {:friendly_name => true} %><br>
+            </div>
+            <div class="col-md-4">
+              <%= render_localized_date c.created_at %>
+            </div>
+          </div>
+        <% end %>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/collections/index.html.erb b/apps/workbench/app/views/collections/index.html.erb
new file mode 100644 (file)
index 0000000..e1285e8
--- /dev/null
@@ -0,0 +1,18 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :tab_line_buttons do %>
+ <%= form_tag collections_path, method: 'get', remote: true, class: 'form-search' do %>
+ <div class="input-group">
+   <%= text_field_tag :search, params[:search], class: 'form-control', placeholder: 'Search collections' %>
+   <span class="input-group-btn">
+     <%= button_tag(class: 'btn btn-info') do %>
+     <span class="glyphicon glyphicon-search"></span>
+     <% end %>
+   </span>
+ </div>
+ <% end %>
+<% end %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/collections/index.js.erb b/apps/workbench/app/views/collections/index.js.erb
new file mode 100644 (file)
index 0000000..3e91c01
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+if(history.replaceState)
+    history.replaceState(null, null, "<%= escape_javascript(@request_url) %>");
+$('table#collections-index tbody').html("<%= escape_javascript(render partial: 'index_tbody') %>");
+$('table#collections-index tbody').fadeTo(200, 1.0);
diff --git a/apps/workbench/app/views/collections/sharing_popup.js.erb b/apps/workbench/app/views/collections/sharing_popup.js.erb
new file mode 100644 (file)
index 0000000..2975d51
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$("#sharing-button").html("<%= escape_javascript(render partial: 'sharing_button') %>");
diff --git a/apps/workbench/app/views/collections/show.html.erb b/apps/workbench/app/views/collections/show.html.erb
new file mode 100644 (file)
index 0000000..5671266
--- /dev/null
@@ -0,0 +1,90 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="row row-fill-height">
+  <div class="col-md-7">
+    <div class="panel panel-info">
+      <div class="panel-heading">
+        <h3 class="panel-title">
+          <%= if @object.respond_to? :name
+                render_editable_attribute @object, :name
+              elsif @name_link
+                @name_link.name
+              else
+                @object.uuid
+              end %>
+        </h3>
+      </div>
+      <div class="panel-body">
+        <div class="arv-description-as-subtitle">
+          <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
+        </div>
+        <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
+        <p><i>Collection UUID:</i><br />
+          <%= render partial: "show_autoselect_text", locals: {text: @object.uuid, tagclass: "arvados-uuid"} %>
+        </p>
+        <p><i>Content address:</i><br />
+          <%= render partial: "show_autoselect_text", locals: {text: @object.portable_data_hash, tagclass: "arvados-uuid"} %>
+        </p>
+        <%= render partial: "show_source_summary" %>
+      </div>
+    </div>
+  </div>
+  <% if current_user %>
+  <div class="col-md-5">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <h3 class="panel-title">
+          Sharing and permissions
+        </h3>
+      </div>
+      <div class="panel-body">
+        <!--
+        <input type="text" class="form-control" placeholder="Search"/>
+        -->
+
+        <div id="sharing-button">
+          <%= render partial: 'sharing_button' %>
+        </div>
+
+        <div style="height:0.5em;"></div>
+        <% if @projects.andand.any? %>
+          <p>Included in projects:<br />
+          <%= render_arvados_object_list_start(@projects, 'Show all projects',
+                links_path(filters: [['head_uuid', '=', @object.uuid],
+                                     ['link_class', '=', 'name']].to_json)) do |project| %>
+            <%= link_to_if_arvados_object(project, friendly_name: true) %><br />
+          <% end %>
+          </p>
+        <% end %>
+        <% if @permissions.andand.any? %>
+          <p>Readable by:<br />
+          <%= render_arvados_object_list_start(@permissions, 'Show all permissions',
+                links_path(filters: [['head_uuid', '=', @object.uuid],
+                                    ['link_class', '=', 'permission']].to_json)) do |link| %>
+          <%= link_to_if_arvados_object(link.tail_uuid, friendly_name: true) %><br />
+          <% end %>
+          </p>
+        <% end %>
+
+      </div>
+    </div>
+  </div>
+  <% else %>
+  <div class="col-md-5">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <h3 class="panel-title">
+          Welcome to Arvados
+        </h3>
+      </div>
+      <div class="panel-body">
+        You are accessing public data.
+      </div>
+    </div>
+  </div>
+  <% end %>
+</div>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/collections/show_file_links.html.erb b/apps/workbench/app/views/collections/show_file_links.html.erb
new file mode 100644 (file)
index 0000000..8a2ce6b
--- /dev/null
@@ -0,0 +1,86 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<!DOCTYPE html>
+<html>
+<% coll_name = "Collection #{@object.uuid}" %>
+<% link_opts = {controller: 'collections', action: 'show_file',
+                uuid: @object.uuid, reader_token: params[:reader_token]} %>
+<head>
+  <meta charset="utf-8">
+  <title>
+    <%= coll_name %> / <%= Rails.configuration.site_name %>
+  </title>
+  <meta name="description" content="">
+  <meta name="author" content="">
+  <meta name="robots" content="NOINDEX">
+  <style type="text/css">
+body {
+  margin: 1.5em;
+}
+pre {
+  background-color: #D9EDF7;
+  border-radius: .25em;
+  padding: .75em;
+  overflow: auto;
+}
+.footer {
+  font-size: 82%;
+}
+.footer h2 {
+  font-size: 1.2em;
+}
+  </style>
+</head>
+<body>
+
+<h1><%= coll_name %></h1>
+
+<p>This collection of data files is being shared with you through
+Arvados.  You can download individual files listed below.  To download
+the entire collection with wget, try:</p>
+
+<pre>$ wget --mirror --no-parent --no-host --cut-dirs=3 <%=
+         url_for(link_opts.merge(action: 'show_file_links', only_path: false,
+                                 trailing_slash: true))
+       %></pre>
+
+<h2>File Listing</h2>
+
+<% file_tree = @object.andand.files_tree %>
+<% if file_tree.andand.any? %>
+  <ul id="collection_files" class="collection_files">
+  <% dirstack = [file_tree.first.first] %>
+  <% file_tree.take(10000).each_with_index do |(dirname, filename, size), index| %>
+    <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+    <% while dirstack.any? and (dirstack.last != dirname) %>
+      <% dirstack.pop %></ul></li>
+    <% end %>
+    <li>
+    <% if size.nil?  # This is a subdirectory. %>
+      <% dirstack.push(File.join(dirname, filename)) %>
+      <%= filename %>
+      <ul class="collection_files">
+    <% else %>
+      <%= link_to(filename,
+                  link_opts.merge(file: file_path),
+                  {title: "Download #{file_path}"}) %>
+      </li>
+    <% end %>
+  <% end %>
+  <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% else %>
+  <p>No files in this collection.</p>
+<% end %>
+
+<div class="footer">
+<h2>About Arvados</h2>
+
+<p>Arvados is a free and open source software bioinformatics platform.
+To learn more, visit arvados.org.
+Arvados is not responsible for the files listed on this page.</p>
+</div>
+
+</body>
+</html>
diff --git a/apps/workbench/app/views/container_requests/_extra_tab_line_buttons.html.erb b/apps/workbench/app/views/container_requests/_extra_tab_line_buttons.html.erb
new file mode 100644 (file)
index 0000000..b698c93
--- /dev/null
@@ -0,0 +1,48 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.state == 'Final' %>
+<script type="application/javascript">
+  function reset_form_cr_reuse() {
+    $('#use_existing').removeAttr('checked');
+  }
+</script>
+
+  <%= link_to raw('<i class="fa fa-fw fa-play"></i> Re-run...'),
+      "#",
+      {class: 'btn btn-sm btn-primary', 'data-toggle' => 'modal',
+       'data-target' => '#clone-and-edit-modal-window',
+       title: 'This will make a copy and take you there. You can then make any needed changes and run it'}  %>
+
+<div id="clone-and-edit-modal-window" class="modal fade" role="dialog"
+     aria-labelledby="myModalLabel" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+
+    <%= form_tag copy_container_request_path do |f| %>
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form_cr_reuse()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title">Re-run container request</h4> </div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+              <%= check_box_tag(:use_existing, "true", false) %>
+              <%= label_tag(:use_existing, "Enable container reuse") %>
+      </div>
+
+      <div class="modal-footer">
+        <button class="btn btn-default" onClick="reset_form_cr_reuse()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button type="submit" class="btn btn-primary" name="container_request[state]" value="Uncommitted">Copy and edit inputs</button>
+      </div>
+
+    </div>
+    <% end %>
+  </div>
+</div>
+
+<% end %>
diff --git a/apps/workbench/app/views/container_requests/_name_and_description.html.erb b/apps/workbench/app/views/container_requests/_name_and_description.html.erb
new file mode 100644 (file)
index 0000000..085ba83
--- /dev/null
@@ -0,0 +1,25 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  wu = @object.work_unit
+  template_uuid = wu.template_uuid
+  template = Workflow.find?(template_uuid) if template_uuid
+  div_class = "col-sm-12"
+  div_class = "col-sm-6" if template
+%>
+
+<div class="<%=div_class%>">
+  <%= render partial: 'object_name' %>
+  <%= render partial: 'object_description' %>
+</div>
+
+<% if template %>
+  <div class="alert alert-info <%=div_class%>">
+     This container request was created from the workflow <%= link_to_if_arvados_object template, friendly_name: true %><br />
+     <% if template.modified_at && (template.modified_at > @object.created_at) %>
+        Note: This workflow has been modified since this container request was created.
+     <% end %>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/container_requests/_show_inputs.html.erb b/apps/workbench/app/views/container_requests/_show_inputs.html.erb
new file mode 100644 (file)
index 0000000..fd8e363
--- /dev/null
@@ -0,0 +1,53 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+n_inputs = if @object.mounts[:"/var/lib/cwl/workflow.json"] && @object.mounts[:"/var/lib/cwl/cwl.input.json"]
+             cwl_inputs_required(@object, get_cwl_inputs(@object.mounts[:"/var/lib/cwl/workflow.json"][:content]), [:mounts, :"/var/lib/cwl/cwl.input.json", :content])
+           else
+             0
+           end
+%>
+
+<% content_for :pi_input_form do %>
+<form role="form" style="width:60%">
+  <div class="form-group">
+    <% workflow = @object.mounts[:"/var/lib/cwl/workflow.json"].andand[:content] %>
+    <% if workflow %>
+      <% inputs = get_cwl_inputs(workflow) %>
+      <% inputs.each do |input| %>
+        <label for="#input-<%= cwl_shortname(input[:id]) %>">
+          <%= input[:label] || cwl_shortname(input[:id]) %>
+        </label>
+        <div>
+          <p class="form-control-static">
+            <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+          </p>
+        </div>
+        <p class="help-block">
+          <%= input[:doc] %>
+        </p>
+      <% end %>
+    <% end %>
+  </div>
+</form>
+<% end %>
+
+<% if n_inputs == 0 %>
+  <p><i>This workflow does not need any further inputs specified.  Click the "Run" button at the bottom of the page to start the workflow.</i></p>
+<% else %>
+  <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the workflow.</i></p>
+<% end %>
+
+<% if @object.editable? %>
+  <%= content_for :pi_input_form %>
+  <%= link_to(url_for('container_request[state]' => 'Committed'),
+        class: 'btn btn-primary run-pipeline-button',
+        method: :patch
+        ) do %>
+    Run <i class="fa fa-fw fa-play"></i>
+  <% end %>
+<% end %>
+
+<%= render_unreadable_inputs_present %>
diff --git a/apps/workbench/app/views/container_requests/_show_log.html.erb b/apps/workbench/app/views/container_requests/_show_log.html.erb
new file mode 100644 (file)
index 0000000..ec529aa
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render(partial: 'work_units/show_log', locals: {obj: @object, name: @object[:name] || 'this container'}) %>
diff --git a/apps/workbench/app/views/container_requests/_show_object_description_cell.html.erb b/apps/workbench/app/views/container_requests/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..2df207a
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="nowrap">
+  <%= object.content_summary %><br />
+  <%= render partial: 'container_requests/state_label', locals: {object: object} %>
+</div>
diff --git a/apps/workbench/app/views/container_requests/_show_provenance.html.erb b/apps/workbench/app/views/container_requests/_show_provenance.html.erb
new file mode 100644 (file)
index 0000000..d9c1273
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'application/svg_div', locals: {
+      divId: "provenance_graph",
+      svgId: "provenance_svg",
+      svg: @svg } %>
diff --git a/apps/workbench/app/views/container_requests/_show_recent.html.erb b/apps/workbench/app/views/container_requests/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..6cdd8a4
--- /dev/null
@@ -0,0 +1,41 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= form_tag({}, {id: "containerRequests"}) do |f| %>
+
+<table class="table table-condensed table-fixedlayout arv-recent-container-requests">
+  <colgroup>
+    <col width="10%" />
+    <col width="20%" />
+    <col width="20%" />
+    <col width="15%" />
+    <col width="15%" />
+    <col width="15%" />
+    <col width="5%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+        Status
+      </th><th>
+        Name
+      </th><th>
+        Description
+      </th><th>
+        Workflow
+      </th><th>
+        Owner
+      </th><th>
+        Created at
+      </th><th>
+      </th>
+    </tr>
+  </thead>
+
+  <tbody data-infinite-scroller="#recent-container-requests" id="recent-container-requests"
+         data-infinite-content-href="<%= url_for partial: :recent_rows %>" >
+  </tbody>
+</table>
+
+<% end %>
diff --git a/apps/workbench/app/views/container_requests/_show_recent_rows.html.erb b/apps/workbench/app/views/container_requests/_show_recent_rows.html.erb
new file mode 100644 (file)
index 0000000..0212162
--- /dev/null
@@ -0,0 +1,40 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  containers = @objects.map(&:container_uuid).compact.uniq
+  preload_objects_for_dataclass(Container, containers) if containers.any?
+
+  workflows = @objects.collect {|o| o.properties[:template_uuid]}.compact.uniq
+  preload_objects_for_dataclass(Workflow, workflows) if workflows.any?
+
+  owner_uuids = @objects.map(&:owner_uuid).compact.uniq
+  preload_objects_for_dataclass(User, owner_uuids) if owner_uuids.any?
+  preload_objects_for_dataclass(Group, owner_uuids) if owner_uuids.any?
+
+  objs = containers + workflows + owner_uuids
+  preload_links_for_objects objs if objs.any?
+%>
+
+<% @objects.sort_by { |obj| obj.created_at }.reverse.each do |obj| %>
+  <% wu = obj.work_unit obj.name %>
+
+  <tr data-object-uuid="<%= wu.uuid %>" class="cr-<%= wu.uuid %>">
+    <td>
+      <span class="label label-<%= wu.state_bootstrap_class %>"><%= wu.state_label %></span>
+    </td><td>
+      <%= link_to_if_arvados_object obj, friendly_name: true, link_text: if obj.name && !obj.name.empty? then obj.name else obj.uuid end %>
+    </td><td>
+      <%= obj.description || '' %>
+    </td><td>
+      <%= link_to_if_arvados_object wu.template_uuid, friendly_name: true %>
+    </td><td>
+      <%= link_to_if_arvados_object wu.owner_uuid, friendly_name: true %>
+    </td><td>
+      <%= wu.created_at.to_s %>
+    </td><td>
+      <%= render partial: 'delete_object_button', locals: {object:obj} %>
+    </td>
+  </tr>
+<% end %>
diff --git a/apps/workbench/app/views/container_requests/_show_status.html.erb b/apps/workbench/app/views/container_requests/_show_status.html.erb
new file mode 100644 (file)
index 0000000..49dfdcd
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render(partial: 'work_units/show_status', locals: {current_obj: @object, name: @object[:name] || 'this container'}) %>
diff --git a/apps/workbench/app/views/container_requests/_state_label.html.erb b/apps/workbench/app/views/container_requests/_state_label.html.erb
new file mode 100644 (file)
index 0000000..1ddd2b2
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% wu = object.work_unit object.name %>
+<span class="label label-<%=wu.state_bootstrap_class%>">
+  <%=wu.state_label%>
+</span>
diff --git a/apps/workbench/app/views/container_requests/index.html.erb b/apps/workbench/app/views/container_requests/index.html.erb
new file mode 100644 (file)
index 0000000..d4c64f5
--- /dev/null
@@ -0,0 +1,15 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :tab_line_buttons do %>
+  <div class="input-group">
+    <input type="text" class="form-control filterable-control recent-container-requests-filterable-control"
+           placeholder="Search container requests"
+           data-filterable-target="#recent-container-requests"
+           value="<%= params[:search] %>"
+           />
+  </div>
+<% end %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/containers/_show_log.html.erb b/apps/workbench/app/views/containers/_show_log.html.erb
new file mode 100644 (file)
index 0000000..ec529aa
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render(partial: 'work_units/show_log', locals: {obj: @object, name: @object[:name] || 'this container'}) %>
diff --git a/apps/workbench/app/views/containers/_show_status.html.erb b/apps/workbench/app/views/containers/_show_status.html.erb
new file mode 100644 (file)
index 0000000..52d2e87
--- /dev/null
@@ -0,0 +1,21 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render(partial: 'work_units/show_status', locals: {current_obj: @object, name: @object[:name] || 'this container'}) %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <span class="panel-title">Container requests</span>
+  </div>
+  <div class="panel-body">
+    <% crs = ContainerRequest.order("created_at desc").filter([["container_uuid", "=", @object.uuid]]) %>
+    <% crs.each do |cr| %>
+      <div>
+        <%= link_to_if_arvados_object cr, friendly_name: true %>
+        created at
+        <%= render_localized_date(cr.created_at) %>.
+      </div>
+    <% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/getting_started/_getting_started_popup.html.erb b/apps/workbench/app/views/getting_started/_getting_started_popup.html.erb
new file mode 100644 (file)
index 0000000..fa75ec2
--- /dev/null
@@ -0,0 +1,183 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<style>
+div.figure {
+}
+.style_image1 {
+  border: 10px solid #ddd;
+  display: block;
+  margin-left: auto;
+  margin-right: auto;
+}
+.style_image2 {
+  border: 10px solid #ddd;
+  display: block;
+  margin-left: 1em;
+}
+div.figure p {
+  text-align: center;
+  font-style: italic;
+  text-indent: 0;
+  border-top:-0.3em;
+}
+</style>
+
+<div id="getting-started-modal-window" class="modal">
+  <div class="modal-dialog modal-with-loading-spinner" style="width: 50em">
+    <div class="modal-content">
+      <div class="modal-header" style="text-align: center">
+        <button type="button" class="close" data-dismiss="modal" aria-hidden="true">x</button>
+        <div>
+          <div class="col-sm-8"><h4 class="modal-title" style="text-align: right">Getting Started with Arvados</h4></div>  <%#Todo: center instead of right%>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <%#First Page%>
+      <div class="modal-body" style="height: 40em; overflow-y: scroll">
+        <div style="margin-top: -0.5em; margin-left: 0.5em;">
+          <p><div style="font-size: 150%;">Welcome!</div></p>
+          <p>
+            What you're looking at right now is <b>Workbench</b>, the graphical interface to the Arvados system.
+          </p><p>
+            <div class="figure">
+              <p> <%= image_tag "pipeline-running.gif", :class => "style_image1" %></p> <%#Todo: shorter gif%>
+              <p>Running the Pathomap pipeline in Arvados.</p>
+            </div>
+          </p><p>
+            Click the <span class="btn btn-sm btn-primary">Next &gt;</span> button below for a speed tour of Arvados.
+          </p><p style="margin-top:2em;">
+            <em><strong>Note:</strong> You can always come back to this Getting Started guide by clicking the <span class="fa fa-lg fa-question-circle"></span> in the upper-right corner.</em>
+          </p>
+        </div>
+      </div>
+
+      <%#Page Two%>
+      <div class="modal-body" style="height: 40em; overflow-y: scroll">
+        <div style="margin-top: -0.5em; margin-left: 0.5em;">
+          <p><div style="font-size: 150%;">Take It for a Spin</div></p>
+          <p>
+            Run your first pipeline in 3 quick steps:
+          </p>
+          <div style="display: block; margin: 0em 2em; padding-top: 1em; padding-bottom: 1em; border: thin dashed silver;">
+            <p style="margin-left: 1em;">
+              <em>First, <a href="/users/welcome">log-in or register</a> with any Google account if you haven't already.</em>
+            </p><p>
+              <ol><li> Go to the <span class="btn btn-sm btn-default"><i class="fa fa-lg fa-fw fa-dashboard"></i> Dashboard</span> &gt; <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a pipeline...</span>
+                  <p style="margin-top:1em;">
+                    <%= image_tag "mouse-move.gif", :class => "style_image2" %>
+                  </p>
+                </li>
+                <li> <span class="btn btn-sm btn-default"><i class="fa fa-fw fa-gear"></i>Mason Lab -- Ancestry Mapper (public)</span> &gt; <span class="btn btn-sm btn-primary">Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i></span></li><br>
+                <li> <span class="btn btn-sm btn-primary">Run <i class="fa fa-fw fa-play"></i></span></li>
+              </ol>
+          </p></div>
+          <p style="margin-top:1em;">
+            <i class="fa fa-flag fa-flip-horizontal" style="color: green"></i> <i class="fa fa-child"></i>
+            <strong>Voila!</strong> <i class="fa fa-child"></i> <i class="fa fa-flag" style="color: green"></i>
+            Your pipeline is now spooling up and getting ready to run!
+          </p><p>
+            Go ahead, try it for yourself right now. <span class="glyphicon glyphicon-thumbs-up"></span>
+          </p><p>
+            Or click <span class="btn btn-sm btn-primary">Next &gt;</span> below to keep reading!
+          </p>
+        </div>
+      </div>
+
+      <%#Page Three%>
+      <div class="modal-body" style="height: 40em; overflow-y: scroll">
+        <div style="margin-top: -0.5em; margin-left: 0.5em;">
+          <p><div style="font-size: 150%;">Three Useful Terms</div></p>
+          <ol>
+            <li>
+              <strong>Pipeline</strong> — A re-usable series of analysis steps.
+              <ul>
+                <li>
+                  Also known as a “workflow” in other systems
+                </li><li>
+                  A list of well-documented public pipelines can be found in the upper right corner by clicking the <span class="fa fa-lg fa-question-circle"></span> &gt; <a href="<%= Rails.configuration.arvados_public_data_doc_url %>">Public Pipelines and Datasets</a>
+                </li><li>
+                  Pro-tip: A Pipeline contains Jobs which contain Tasks
+                </li><li>
+                  Pipelines can only be shared within a project
+                </li>
+              </ul>
+            </li>
+
+            <li>
+              <strong>Collection </strong>— Like a folder, but better.
+              <ul>
+                <li>
+                  Upload data right in your browser
+                </li><li>
+                  Better than a folder?
+                  <ul><li>
+                      Collections contain the content-address of the data instead of the data itself
+                    </li><li>
+                      Sets of data can be flexibly defined and re-defined without duplicating data
+                    </li>
+                </ul></li><li>
+                  Collections can be shared using the "Sharing and Permissions"  &gt; "Share" button
+                </li>
+              </ul>
+            </li>
+
+            <li>
+              <strong>Projects </strong>— Contain pipelines templates, pipeline instances (individual runs of a pipeline), and collections.
+              <ul><li>
+                  The most useful one is your default "Home" project, under Projects &gt; Home
+                </li><li>
+                  Projects can be shared using the "sharing" tab
+                </li>
+              </ul>
+            </li>
+          </ol>
+
+        </div>
+      </div>
+
+      <%#Page Four%>
+      <div class="modal-body" style="height: 40em; overflow-y: scroll">
+        <div style="margin-top: -0.5em; margin-left: 0.5em;">
+          <p><div style="font-size: 150%;">Six Reasons Arvados is Awesome</div></p>
+          <p>
+            This guide, and in fact all of Workbench, is just a teaser for the full power of Arvados:
+          </p>
+          <ol>
+            <li>
+              <strong>Reproducible analyses</strong>: Enough said.
+            </li><li>
+              <strong>Data provenance</strong>: Every file in Arvados can tell you where it came from.
+            </li><li>
+              <strong>Serious scaling</strong>: Need 500 GB of space? 200 compute hours? Arvados scales and parallelizes your work for you intelligently.
+            </li><li>
+              <strong>Share pipelines or data</strong>: Easily publish your work to the world, just like <a href="http://www.pathomap.org/2015/04/08/run-the-pathomap-human-ancestry-pipeline-on-arvados/">the Pathomap team did</a>.
+            </li><li>
+              <strong>Use existing pipelines</strong>: Use best-practices pipelines on your own data with the click of a button.
+            </li><li>
+              <strong>Open source</strong>: Arvados is completely open source. Check out our <a href="http://dev.arvados.org">developer site</a>.
+            </li>
+          </ol>
+          <p style="margin-top: 1em;">
+            Want to use the command-line, or hungry to learn more? Check out the User Guide at <a href="http://doc.arvados.org/">doc.arvados.org</a>.
+          </p><p>
+            Questions still? Head over to <a href="http://doc.arvados.org/">doc.arvados.org</a> to find mailing-list and contact info for the Arvados community.
+          </p><p>
+            That's all, folks! Click the "x" up top to leave this guide.
+          </p>
+        </div>
+      </div>
+
+      <div class="modal-footer">
+        <div style="text-align:center">
+          <button class="btn btn-default pager-prev"><i class="fa fa-fw fa-chevron-left"></i><span style="font-weight: bold;"> Prev</span></button>
+          <button class="btn btn-default pager-next"><span style="font-weight: bold;">Next </span><i class="fa fa-fw fa-chevron-right"></i></button>
+          <div class="pager-count pull-right"><span style="margin:5px"></span></div>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/groups/_choose_rows.html.erb b/apps/workbench/app/views/groups/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..9286752
--- /dev/null
@@ -0,0 +1,13 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% icon_class = fa_icon_class_for_class(Group) %>
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw <%= icon_class %>"></i>
+      <%= object.name %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/groups/_show_recent.html.erb b/apps/workbench/app/views/groups/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..3acbfef
--- /dev/null
@@ -0,0 +1,46 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: "paging", locals: {results: @groups, object: @object} %>
+
+<table class="table table-hover">
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+       Group
+      </th><th>
+       Owner
+      </th><th>
+       Incoming permissions
+      </th><th>
+       Outgoing permissions
+      </th><th>
+       <!-- column for delete buttons -->
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+
+    <% @groups.sort_by { |g| g[:created_at] }.reverse.each do |g| %>
+
+    <tr>
+      <td>
+        <%= link_to_if_arvados_object g, friendly_name: true %>
+      </td><td>
+        <%= link_to_if_arvados_object g.owner_uuid, friendly_name: true %>
+      </td><td>
+        <%= @links_to.select { |x| x.head_uuid == g.uuid }.collect(&:tail_uuid).uniq.count %>
+      </td><td>
+        <%= @links_from.select { |x| x.tail_uuid == g.uuid }.collect(&:head_uuid).uniq.count %>
+      </td><td>
+        <%= render partial: 'delete_object_button', locals: {object:g} %>
+      </td>
+    </tr>
+
+    <% end %>
+
+  </tbody>
+</table>
+
+<%= render partial: "paging", locals: {results: @groups, object: @object} %>
diff --git a/apps/workbench/app/views/issue_reporter/send_report.text.erb b/apps/workbench/app/views/issue_reporter/send_report.text.erb
new file mode 100644 (file)
index 0000000..a6108dc
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @user %>
+Issue reported by user <%=@user.email%>
+<% else %>
+Issue reported
+<% end %>
+
+Details of the report:
+<% if @params['report_additional_info'] %>
+<%  map_to_s = JSON.parse(@params['report_additional_info']).map {|k,v| "#{k}=#{v}"}.join("\n") %>
+<%= map_to_s %>
+<% end %>
+Report text=<%=@params['report_issue_text'] %>
diff --git a/apps/workbench/app/views/jobs/_create_new_object_button.html.erb b/apps/workbench/app/views/jobs/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..33c21e2
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# There is no UI for context-free "create a new job" %>
diff --git a/apps/workbench/app/views/jobs/_rerun_job_with_options_popup.html.erb b/apps/workbench/app/views/jobs/_rerun_job_with_options_popup.html.erb
new file mode 100644 (file)
index 0000000..ba68106
--- /dev/null
@@ -0,0 +1,59 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @job = @object %>
+<div id="jobRerunModal" class="modal" role="dialog" aria-labelledby="jobRerunTitle" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+      <%= form_for(@job, method: :post, url: {controller: 'jobs', action: 'create'}) do |f| %>
+        <% [:script, :repository, :supplied_script_version, :nondeterministic].each do |field_sym| %>
+          <%= f.hidden_field(field_sym) %>
+        <% end %>
+        <% [:script_parameters, :runtime_constraints].each do |field_sym| %>
+          <%= f.hidden_field(field_sym, value: @job.send(field_sym).to_json) %>
+        <% end %>
+        <div class="modal-header">
+          <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+          <div id="jobRerunTitle">
+            <div class="col-sm-6"> <h4 class="modal-title">Re-run job</h4> </div>
+          </div>
+          <br/>
+        </div>
+
+        <div class="modal-body">
+          <p>
+            If this job is part of a pipeline, that pipeline would not
+            know about the new job you are running.  If you want to
+            update your pipeline results, please re-run the pipeline
+            instead.
+          </p>
+          <p>
+            The inputs and parameters will be the same as the current
+            job.  Thus, the new job will not reflect any changes made
+            to the pipeline that initiated this job.
+          </p>
+          <div style="padding-left: 1em">
+            <% if (@job.supplied_script_version.blank? or
+                   (@job.supplied_script_version == @job.script_version)) %>
+              <%= f.hidden_field(:script_version) %>
+            <% else %>
+              <%= f.radio_button("script_version", @job.script_version) %>
+              <%= f.label(:script_version, "Use same script version as this run", value: @job.script_version) %>
+              <p style="padding-left: 1em"> Use the same script version as the current job.</p>
+
+              <%= f.radio_button(:script_version, @job.supplied_script_version) %>
+              <%= f.label(:script_version, "Use latest script version", value: @job.supplied_script_version) %>
+              <p style="padding-left: 1em"> Use the current commit indicated by '<%= @job.supplied_script_version %>' in the '<%= @job.repository %>' repository.</p>
+            <% end %>
+          </div>
+        </div>
+
+        <div class="modal-footer">
+          <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+          <%= f.submit(value: "Run now", class: "btn btn-primary") %>
+        </div>
+      <% end %>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/jobs/_show_details.html.erb b/apps/workbench/app/views/jobs/_show_details.html.erb
new file mode 100644 (file)
index 0000000..e27cbd2
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'application/show_attributes' %>
diff --git a/apps/workbench/app/views/jobs/_show_job_buttons.html.erb b/apps/workbench/app/views/jobs/_show_job_buttons.html.erb
new file mode 100644 (file)
index 0000000..7938a65
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.state != "Running" and Job.creatable? %>
+  <button type="button" class="btn btn-sm btn-primary" data-toggle="modal" data-target="#jobRerunModal">
+    <i class="fa fa-fw fa-gear"></i> Re-run job...
+  </button>
+<% end %>
diff --git a/apps/workbench/app/views/jobs/_show_log.html.erb b/apps/workbench/app/views/jobs/_show_log.html.erb
new file mode 100644 (file)
index 0000000..e84641d
--- /dev/null
@@ -0,0 +1,286 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if !@object.log %>
+
+<div id="log_graph_div"
+     class="arv-log-event-listener"
+     style="display:none"
+     data-object-uuid="<%= @object.uuid %>"></div>
+
+<pre id="event_log_div"
+     class="arv-log-event-listener arv-log-event-handler-append-logs arv-job-log-window"
+     data-object-uuid="<%= @object.uuid %>"
+  ><%= @object.stderr_log_lines(Rails.configuration.running_job_log_records_to_fetch).join("\n") %>
+</pre>
+
+<%# Applying a long throttle suppresses the auto-refresh of this
+    partial that would normally be triggered by arv-log-event. %>
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights %>
+     ></div>
+
+<% else %>
+
+<script>
+(function() {
+var pagesize = 1000;
+var logViewer = new List('log-viewer', {
+  valueNames: [ 'id', 'timestamp', 'taskid', 'message', 'type'],
+  page: pagesize
+});
+
+logViewer.page_offset = 0;
+logViewer.on("updated", function() { updatePaging(".log-viewer-paging", logViewer, pagesize) } );
+$(".log-viewer-page-up").on("click", function() { prevPage(logViewer, pagesize, ".log-viewer-paging"); return false; });
+$(".log-viewer-page-down").on("click", function() { nextPage(logViewer, pagesize, ".log-viewer-paging"); return false; });
+
+var taskState = newTaskState();
+
+var makeFilter = function() {
+  var pass = [];
+  $(".toggle-filter, .radio-filter").each(function(i, e) {
+    if (e.checked) {
+      pass.push(e.id.substr(5));
+    }
+  });
+
+  return (function(item) {
+    var v = false;
+    if (item.values().taskid !== "") {
+      for (a in pass) {
+        if (pass[a] == "all-tasks") { v = true; }
+        else if (pass[a] == "successful-tasks" && taskState[item.values().taskid].outcome == "success") { v = true; }
+        else if (pass[a] == "failed-tasks" && taskState[item.values().taskid].outcome == "failure") { v = true; }
+      }
+    } else {
+      v = true;
+    }
+    for (a in pass) {
+      if (pass[a] == item.values().type) { return v; }
+    }
+    return false;
+  });
+}
+
+<% if @object.log and !@object.log.empty? %>
+  <% logcollection = Collection.find @object.log %>
+  <% if logcollection %>
+    var log_size = <%= logcollection.files[0][2] %>
+    var log_maxbytes = <%= Rails.configuration.log_viewer_max_bytes %>;
+    var logcollection_url = '<%=j url_for logcollection %>/<%=j logcollection.files[0][1] %>';
+    $("#log-viewer-download-url").attr('href', logcollection_url);
+    $("#log-viewer-download-pane").show();
+    var headers = {};
+    if (log_size > log_maxbytes) {
+      headers['Range'] = 'bytes=0-' + (log_maxbytes - 1);
+    }
+    var ajax_opts = { dataType: 'text', headers: headers };
+    load_log();
+
+    function load_log() {
+        $.ajax(logcollection_url, ajax_opts).done(done).fail(fail);
+    }
+    function done(data, status, jqxhr) {
+        if (jqxhr.getResponseHeader('Content-Type').indexOf('application/json') === 0) {
+            // The browser won't allow a redirect-with-cookie response
+            // because keep-web isn't same-origin with us. Instead, we
+            // assure keep-web it's OK to respond with the content
+            // immediately by setting the token in the request body
+            // instead and adding disposition=attachment.
+            logcollection_url = JSON.parse(data).href;
+            var queryAt = logcollection_url.indexOf('?api_token=');
+            if (queryAt >= 0) {
+                ajax_opts.method = 'POST';
+                ajax_opts.data = {
+                    api_token: logcollection_url.slice(queryAt+11),
+                    disposition: 'attachment',
+                };
+                logcollection_url = logcollection_url.slice(0, queryAt);
+            }
+            return load_log();
+        }
+        logViewer.filter();
+        addToLogViewer(logViewer, data.split("\n"), taskState);
+        logViewer.filter(makeFilter());
+        content_range_hdr = jqxhr.getResponseHeader('Content-Range');
+        var v = content_range_hdr && content_range_hdr.match(/bytes \d+-(\d+)\/(.+)/);
+        short_log = v && (v[2] == '*' || parseInt(v[1]) + 1 < v[2]);
+        if (jqxhr.status == 206 && short_log) {
+            $("#log-viewer-overview").html(
+                '<p>Showing only ' + data.length + ' bytes of this log.' +
+                    ' Timing information is unavailable since' +
+                    ' the full log was not retrieved.</p>'
+            );
+        } else {
+            generateJobOverview("#log-viewer-overview", logViewer, taskState);
+        }
+        $("#log-viewer .spinner").detach();
+    }
+    function fail(jqxhr, status, error) {
+        // TODO: tell the user about the error
+        console.log('load_log failed: status='+status+' error='+error);
+        $("#log-viewer .spinner").detach();
+    }
+  <% end %>
+<% else %>
+  <%# Live log loading not implemented yet. %>
+<% end %>
+
+$(".toggle-filter, .radio-filter").on("change", function() {
+  logViewer.filter(makeFilter());
+});
+
+$("#filter-all").on("click", function() {
+  $(".toggle-filter").each(function(i, f) { f.checked = true; });
+  logViewer.filter(makeFilter());
+});
+
+$("#filter-none").on("click", function() {
+  $(".toggle-filter").each(function(i, f) { f.checked = false; console.log(f); });
+  logViewer.filter(makeFilter());
+});
+
+$("#sort-by-time").on("change", function() {
+  logViewer.sort("id", {sortFunction: sortById});
+});
+
+$("#sort-by-task").on("change", function() {
+  logViewer.sort("taskid", {sortFunction: sortByTask});
+});
+
+$("#sort-by-node").on("change", function() {
+  logViewer.sort("node", {sortFunction: sortByNode});
+});
+
+$("#set-show-failed-only").on("click", function() {
+  $("#sort-by-task").prop("checked", true);
+  $("#show-failed-tasks").prop("checked", true);
+  $("#show-crunch").prop("checked", false);
+  $("#show-task-dispatch").prop("checked", true);
+  $("#show-script-print").prop("checked", true);
+  $("#show-crunchstat").prop("checked", false);
+  logViewer.filter(makeFilter());
+  logViewer.sort("taskid", {sortFunction: sortByTask});
+});
+
+})();
+
+</script>
+
+<div id="log-viewer">
+
+  <h3>Summary</h3>
+  <p id="log-viewer-overview">
+    <% if !logcollection %>
+      The collection containing the job log was not found.
+    <% end %>
+  </p>
+
+  <p id="log-viewer-download-pane" style="display:none">
+    <a id="log-viewer-download-url" href="">Download the full log</a>
+  </p>
+
+  <div class="h3">Log
+
+    <span class="pull-right">
+      <% if @object.andand.tasks_summary.andand[:failed] and @object.tasks_summary[:failed] > 0 %>
+        <button id="set-show-failed-only" class="btn btn-danger">
+          Show failed task diagnostics only
+        </button>
+      <% end %>
+
+      <button id="filter-all" class="btn">
+        Select all
+      </button>
+      <button id="filter-none" class="btn">
+        Select none
+      </button>
+    </span>
+  </div>
+
+  <input class="search pull-right" style="margin-top: 1em" placeholder="Search" />
+
+  <div>
+    <div class="radio-inline log-viewer-button" style="margin-left: 10px">
+      <label><input id="sort-by-time" type="radio" name="sort-radio" checked> Sort by time</label>
+    </div>
+    <div class="radio-inline log-viewer-button">
+      <label><input id="sort-by-node" type="radio" name="sort-radio" > Sort by node</label>
+    </div>
+
+    <div class="radio-inline log-viewer-button">
+      <label><input id="sort-by-task" type="radio" name="sort-radio" > Sort by task</label>
+    </div>
+  </div>
+
+  <div>
+    <div class="radio-inline log-viewer-button" style="margin-left: 10px">
+      <label><input id="show-all-tasks" type="radio" name="show-tasks-group" checked="true" class="radio-filter"> Show all tasks</label>
+    </div>
+    <div class="radio-inline log-viewer-button">
+      <label><input id="show-successful-tasks" type="radio" name="show-tasks-group" class="radio-filter"> Only successful tasks</label>
+    </div>
+    <div class="radio-inline log-viewer-button">
+      <label><input id="show-failed-tasks" type="radio" name="show-tasks-group" class="radio-filter"> Only failed tasks</label>
+    </div>
+  </div>
+
+  <div>
+    <div class="checkbox-inline log-viewer-button" style="margin-left: 10px">
+      <label><input id="show-crunch" type="checkbox" checked="true" class="toggle-filter"> Show crunch diagnostics</label>
+    </div>
+    <div class="checkbox-inline log-viewer-button">
+      <label><input id="show-task-dispatch" type="checkbox" checked="true" class="toggle-filter"> Show task dispatch</label>
+    </div>
+    <div class="checkbox-inline log-viewer-button">
+      <label><input id="show-task-print" type="checkbox" checked="true" class="toggle-filter"> Show task diagnostics</label>
+    </div>
+    <div class="checkbox-inline log-viewer-button">
+      <label><input id="show-crunchstat" type="checkbox" checked="true" class="toggle-filter"> Show compute usage</label>
+    </div>
+
+  </div>
+
+  <div class="smart-scroll" data-smart-scroll-padding-bottom="50" style="margin-bottom: 0px">
+    <table class="log-viewer-table">
+      <thead>
+        <tr>
+          <th class="id" data-sort="id"></th>
+          <th class="timestamp" data-sort="timestamp">Timestamp</th>
+          <th class="node"  data-sort="node">Node</th>
+          <th class="slot"  data-sort="slot">Slot</th>
+          <th class="type" data-sort="type">Log type</th>
+          <th class="taskid"  data-sort="taskid">Task</th>
+          <th class="message" data-sort="message">Message</th>
+        </tr>
+      </thead>
+      <tbody class="list">
+        <tr>
+          <td class="id"></td>
+          <td class="timestamp"></td>
+          <td class="node"></td>
+          <td class="slot"></td>
+          <td class="type"></td>
+          <td class="taskid"></td>
+          <td class="message"></td>
+        </tr>
+      </tbody>
+    </table>
+
+    <% if @object.log and logcollection %>
+      <div class="spinner spinner-32px"></div>
+    <% end %>
+
+  </div>
+
+  <div class="log-viewer-paging-div" style="margin-bottom: -15px">
+    <a href="#" class="log-viewer-page-up"><span class='glyphicon glyphicon-arrow-up'></span></a>
+    <span class="log-viewer-paging"></span>
+    <a href="#" class="log-viewer-page-down"><span class='glyphicon glyphicon-arrow-down'></span></a>
+  </div>
+
+</div>
+
+<% end %>
diff --git a/apps/workbench/app/views/jobs/_show_object_description_cell.html.erb b/apps/workbench/app/views/jobs/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..cd58fc6
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="nowrap">
+  <div class="row">
+    <div class="col-sm-2 inline-progress-container">
+      <%= render partial: 'job_progress', locals: {j: object} %>
+    </div>
+    <div class="col-sm-10">
+      <%= object.script %>
+      <span class="deemphasize">
+        job
+        using <%= object.script_version %> commit
+        from <%= object.repository %> repository
+      </span>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/jobs/_show_provenance.html.erb b/apps/workbench/app/views/jobs/_show_provenance.html.erb
new file mode 100644 (file)
index 0000000..fd6fba5
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'application/svg_div', locals: {
+      divId: "provenance_graph", 
+      svgId: "provenance_svg", 
+      svg: @svg } %>
diff --git a/apps/workbench/app/views/jobs/_show_recent.html.erb b/apps/workbench/app/views/jobs/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..1dd0c82
--- /dev/null
@@ -0,0 +1,124 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :css do %>
+  table.topalign>tbody>tr>td {
+  vertical-align: top;
+  }
+  table.topalign>thead>tr>td {
+  vertical-align: bottom;
+  }
+<% end %>
+
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
+<table class="topalign table">
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+      </th><th>
+       status
+      </th><th>
+       uuid
+      </th><th>
+       script
+      </th><th>
+       version
+      </th><th>
+       output
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+
+    <% @objects.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
+
+    <tr class="cell-noborder">
+      <td>
+        <i class="icon-plus-sign expand-collapse-row" data-id="<%= j.uuid %>" style="cursor: pointer"></i>
+      </td>
+      <td>
+        <div class="inline-progress-container">
+          <%= render partial: 'job_progress', locals: {:j => j} %>
+        </div>
+      </td>
+      <td>
+        <%= link_to_if_arvados_object j %>
+      </td>
+      <td>
+        <%= j.script %>
+      </td>
+      <td>
+        <%= j.script_version.andand[0..8] %>
+      </td>
+      <td>
+        <%= link_to_if_arvados_object j.output %>
+      </td>
+    </tr>
+    <tr class="cell-noborder" id="<%= j.uuid %>" style="display:none">
+      <td colspan="7"><table class="table table-justforlayout"><tr>
+      <td style="border-left: 1px solid black">
+        <table class="table table-condensed">
+          <tr>
+            <td>
+              queued
+            </td>
+            <td>
+             &#x2709;&nbsp;<span title="<%= j.created_at %>"><%= raw distance_of_time_in_words(Time.now, j.created_at).sub('about ','~').sub(' ','&nbsp;') + '&nbsp;ago' if j.created_at %></span>
+            </td>
+            <td>
+             <%= raw('for&nbsp;' + distance_of_time_in_words(j.started_at, j.created_at).sub('about ','~').sub(' ','&nbsp;')) if j.created_at and j.started_at %>
+            </td>
+          </tr>
+          <% if j.started_at.is_a? Time %>
+          <tr>
+            <td>
+              started
+            </td>
+            <td>
+             &#x2708;&nbsp;<span title="<%= j.created_at %>"><%= raw distance_of_time_in_words(j.started_at, Time.now).sub('about ','~').sub(' ','&nbsp;') + '&nbsp;ago' if j.started_at %></span>
+            </td>
+            <td>
+              <% if j.finished_at.is_a? Time %>
+             <%= raw('ran&nbsp;' + distance_of_time_in_words(j.finished_at, j.started_at).sub('about ','~').sub(' ','&nbsp;')) %>
+              <% elsif j.state == "Running" %>
+              <span class="badge badge-success" title="tasks finished">&#x2714;&nbsp;<%= j.tasks_summary[:done] %></span>
+              <span class="badge badge-info" title="tasks running">&#x2708;&nbsp;<%= j.tasks_summary[:running] %></span>
+              <span class="badge" title="tasks todo">&#x2709;&nbsp;<%= j.tasks_summary[:todo] %></span>
+              <% if j.tasks_summary[:failed] %>
+              <span class="badge badge-warning" title="task failures">&#x2716;&nbsp;<%= j.tasks_summary[:failed] %></span>
+              <% end %>
+              <% end %>
+            </td>
+          </tr>
+          <% end %>
+        </table>
+      </td><td>
+        <table class="table table-condensed">
+          <tr><td colspan="2">
+              <%= j.script %> <%= j.script_version %>
+          </td></tr>
+          <% j.script_parameters.sort.each do |k,v| %>
+          <tr>
+            <td><%= k %></td><td><%= link_to_if_arvados_object v %></td>
+          </tr>
+          <% end %>
+          <tr>
+            <td>output</td><td><%= link_to_if_arvados_object j.output %></td>
+          </tr>
+        </table>
+      </td><td>
+        <table class="table table-condensed">
+        <% j.runtime_constraints.sort.each do |k,v| %>
+        <tr><td><%= v %></td><td><%= k %></td></tr>
+        <% end %>
+        </table>
+      </td>
+      </tr></table></td>
+    </tr>
+
+    <% end %>
+
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/jobs/_show_status.html.erb b/apps/workbench/app/views/jobs/_show_status.html.erb
new file mode 100644 (file)
index 0000000..ced5b1e
--- /dev/null
@@ -0,0 +1,58 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render(partial: 'work_units/show_status', locals: {current_obj: @object, name: @object[:name] || 'this job'}) %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <span class="panel-title">Used in pipelines</span>
+  </div>
+  <div class="panel-body used-in-pipelines">
+    <% pi = PipelineInstance.order("created_at desc").filter([["components", "like", "%#{@object.uuid}%"]]) %>
+
+    <% pi.each do |pipeline| %>
+      <% pipeline.components.each do |k, v| %>
+        <% if v[:job] and v[:job][:uuid] == @object.uuid %>
+          <div>
+            <b><%= k %></b>
+            component of
+            <%= link_to_if_arvados_object pipeline, friendly_name: true %>
+            created at
+            <%= render_localized_date(pipeline.created_at) %>.
+          </div>
+        <% end %>
+      <% end %>
+    <% end %>
+  </div>
+
+  <div class="panel-heading">
+    <span class="panel-title">Used in jobs</span>
+  </div>
+
+  <% jobs = Job.order("created_at desc").filter([["components", "like", "%#{@object.uuid}%"]]).limit(10) %>
+  <%
+     too_many_message = ""
+     if jobs.items_available > jobs.results.size
+       too_many_message = (jobs.items_available - jobs.results.size).to_s + ' more jobs are not listed.'
+     end
+  %>
+  <div class="panel-body used-in-jobs">
+    <% if too_many_message != "" %>
+      <p><i><%= too_many_message %></i></p>
+    <% end %>
+    <% jobs.each do |j| %>
+      <% j.components.each do |k, v| %>
+        <% if v == @object.uuid %>
+          <div>
+            <b><%= k %></b>
+            component of
+            <%= link_to_if_arvados_object j, friendly_name: true %>
+            created at
+            <%= render_localized_date(j.created_at) %>.
+          </div>
+        <% end %>
+      <% end %>
+    <% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/jobs/show.html.erb b/apps/workbench/app/views/jobs/show.html.erb
new file mode 100644 (file)
index 0000000..1bf8065
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :tab_line_buttons do %>
+  <div class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
+       data-pane-content-url="<%= url_for(params.merge(tab_pane: "job_buttons")) %>"
+       data-object-uuid="<%= @object.uuid %>"
+       style="display: inline">
+  <%= render partial: 'show_job_buttons', locals: {object: @object}%>
+  </div>
+<% end %>
+
+<%= render partial: 'title_and_buttons' %>
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
+<%= render partial: 'rerun_job_with_options_popup' %>
diff --git a/apps/workbench/app/views/keep_disks/_content_layout.html.erb b/apps/workbench/app/views/keep_disks/_content_layout.html.erb
new file mode 100644 (file)
index 0000000..06822e5
--- /dev/null
@@ -0,0 +1,24 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% unless @histogram_pretty_date.nil? %>
+  <% content_for :tab_panes do %>
+  <script type="text/javascript">
+    $(document).ready(function(){
+      $.renderHistogram(<%= raw @cache_age_histogram.to_json %>);
+    });
+  </script>
+  <div class='graph'>
+    <h3>Cache Age vs. Disk Utilization</h3>
+    <h4>circa <%= @histogram_pretty_date %></h4>
+    <div id='cache-age-vs-disk-histogram'>
+    </div>
+  </div>
+  <% end %>
+<% end %>
+<%= content_for :content_top %>
+<div class="pull-right">
+  <%= content_for :tab_line_buttons %>
+</div>
+<%= content_for :tab_panes %>
diff --git a/apps/workbench/app/views/layouts/application.html.erb b/apps/workbench/app/views/layouts/application.html.erb
new file mode 100644 (file)
index 0000000..b59bad4
--- /dev/null
@@ -0,0 +1,79 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<!DOCTYPE html>
+<html ng-app="Workbench">
+<head>
+  <meta charset="utf-8">
+  <title>
+    <% if content_for? :page_title %>
+    <%= yield :page_title %> / <%= Rails.configuration.site_name %>
+    <% else %>
+    <%= Rails.configuration.site_name %>
+    <% end %>
+  </title>
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  <link rel="icon" href="/favicon.ico" type="image/x-icon">
+  <link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
+  <meta name="description" content="">
+  <meta name="author" content="">
+  <% if current_user %>
+    <% content_for :js do %>
+      window.defaultSession = <%=raw({baseURL: Rails.configuration.arvados_v1_base.sub(/\/arvados\/v1$/, '/'), token: Thread.current[:arvados_api_token], user: current_user}.to_json)%>
+    <% end %>
+  <% end %>
+  <% if current_user and $arvados_api_client.discovery[:websocketUrl] %>
+  <meta name="arv-websocket-url" content="<%=$arvados_api_client.discovery[:websocketUrl]%>?api_token=<%=Thread.current[:arvados_api_token]%>">
+  <% end %>
+  <meta name="robots" content="NOINDEX, NOFOLLOW">
+
+  <%# Feature #5645: Add open graph meta tags to generate this page's
+      social graph that search engines can use. http://ogp.me/ %>
+  <meta property="og:type" content="article" />
+  <meta property="og:url" content="<%= request.url %>" />
+  <meta property="og:site_name" content="<%= Rails.configuration.site_name %>" />
+  <% if @object %>
+    <% if @object.respond_to?(:name) and @object.name.present? %>
+      <meta property="og:title" content="<%= @object.name%>" />
+    <% end %>
+    <% if (@object.respond_to?(:description) rescue nil) and @object.description.present? %>
+      <meta property="og:description" content="<%= @object.description%>" />
+    <% end %>
+  <% end %>
+  <%# Done adding open graph meta tags %>
+
+  <%= stylesheet_link_tag    "application", :media => "all" %>
+  <%= javascript_include_tag "application" %>
+  <%= csrf_meta_tags %>
+  <%= yield :head %>
+  <%= javascript_tag do %>
+    angular.module('Arvados').value('arvadosApiToken', '<%=Thread.current[:arvados_api_token]%>');
+    angular.module('Arvados').value('arvadosDiscoveryUri', '<%= Rails.configuration.arvados_v1_base.sub '/arvados/v1', '/discovery/v1/apis/arvados/v1/rest' %>');
+  <%= yield :js %>
+  <% end %>
+  <style>
+    <%= yield :css %>
+    body {
+    min-height: 100%;
+    height: 100%;
+    }
+
+    @media (max-width: 979px) { body { padding-top: 0; } }
+
+    @media (max-width: 767px) {
+      .breadcrumbs {
+        padding-top: 0;
+      }
+    }
+  </style>
+  <link href="//netdna.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.css" rel="stylesheet">
+  <%= piwik_tracking_tag if (PiwikAnalytics.configuration.url != 'localhost' rescue false) %>
+</head>
+<body>
+<%= render template: 'layouts/body' %>
+<%= javascript_tag do %>
+<%= yield :footer_js %>
+<% end %>
+</body>
+</html>
diff --git a/apps/workbench/app/views/layouts/body.html.erb b/apps/workbench/app/views/layouts/body.html.erb
new file mode 100644 (file)
index 0000000..b2cd097
--- /dev/null
@@ -0,0 +1,279 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+  <div id="wrapper" class="container-fluid">
+    <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
+      <div class="navbar-header">
+        <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
+          <span class="sr-only">Toggle navigation</span>
+          <span class="icon-bar"></span>
+          <span class="icon-bar"></span>
+          <span class="icon-bar"></span>
+        </button>
+        <% site_name = Rails.configuration.site_name.downcase rescue Rails.application.class.parent_name %>
+        <% if current_user %>
+          <a class="navbar-brand" href="/" data-push=true><%= site_name %></a>
+        <% else %>
+          <span class="navbar-brand"><%= site_name %></span>
+        <% end %>
+      </div>
+
+      <div class="collapse navbar-collapse">
+        <ul class="nav navbar-nav navbar-right">
+
+          <li>
+            <a><i class="rotating loading glyphicon glyphicon-refresh"></i></a>
+          </li>
+
+          <% if current_user %>
+            <% if current_user.is_active %>
+              <% if Rails.configuration.multi_site_search %>
+                <li>
+                  <form class="navbar-form">
+                    <%=
+                       target = Rails.configuration.multi_site_search
+                       if target == true
+                         target = {controller: 'search', action: 'index'}
+                       end
+                       link_to("Multi-site search", target, {class: 'btn btn-default'}) %>
+                  </form>
+                </li>
+              <% end %>
+              <li>
+                <form class="navbar-form" role="search"
+                           data-search-modal=
+                           "<%= url_for(
+                            action: 'choose',
+                            controller: 'search',
+                            title: 'Search',
+                            action_name: 'Show',
+                            action_href: url_for(controller: :actions, action: :show),
+                            action_method: 'get',
+                            action_data: {selection_param: 'uuid', success: 'redirect-to-created-object'}.to_json)
+                           %>">
+                  <div class="input-group" style="width: 220px">
+                    <input type="text" class="form-control" placeholder="search this site">
+                    <a class="input-group-addon"><span class="glyphicon glyphicon-search"></span></a>
+                  </div>
+                </form>
+              </li>
+            <% end %>
+
+            <li class="dropdown notification-menu">
+              <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
+                <span class="badge badge-alert notification-count"><%= user_notifications.length if user_notifications.any? %></span>
+                <span class="fa fa-lg fa-user"></span>
+                <span class="caret"></span>
+              </a>
+              <ul class="dropdown-menu" role="menu">
+                <li role="presentation" class="dropdown-header">
+                  <%= current_user.email %>
+                </li>
+                <% if current_user.is_active %>
+                <li role="menuitem"><a href="/projects/<%=current_user.uuid%>" role="menuitem"><i class="fa fa-lg fa-home fa-fw"></i> Home project </a></li>
+                  <% if Rails.configuration.composer_url %>
+                    <li role="menuitem">
+                     <form action="<%= Rails.configuration.composer_url %>" method="GET">
+                       <input type="hidden" name="api_token" value="<%= Thread.current[:arvados_api_token] %>" />
+                       <button role="menuitem" type="submit">
+                         <i class="fa fa-lg fa-share-alt fa-fw"></i> Workflow Composer
+                       </button>
+                     </form>
+                    </li>
+                  <% end %>
+                <% if Rails.configuration.workbench2_url %>
+                <li role="menuitem">
+                  <%
+                    wb2_url = Rails.configuration.workbench2_url
+                    wb2_url += '/' if wb2_url[-1] != '/'
+                    wb2_url += 'token'
+                  %>
+                  <form action="<%= wb2_url %>" method="GET">
+                    <input type="hidden" name="api_token" value="<%= Thread.current[:arvados_api_token] %>">
+                    <button role="menuitem" type="submit">
+                      <i class="fa fa-lg fa-share-square fa-fw"></i> Go to Workbench 2
+                    </button>
+                  </form>
+                </li>
+                <% end %>
+                <li role="menuitem">
+                  <%= link_to virtual_machines_user_path(current_user), role: 'menu-item' do %>
+                    <i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
+                  <% end %>
+                </li>
+                <% if Rails.configuration.repositories %>
+                <li role="menuitem"><a href="/repositories" role="menuitem"><i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories </a></li>
+                <% end -%>
+                <li role="menuitem"><a href="/current_token" role="menuitem"><i class="fa fa-lg fa-ticket fa-fw"></i> Current token</a></li>
+                <li role="menuitem">
+                  <%= link_to ssh_keys_user_path(current_user), role: 'menu-item' do %>
+                    <i class="fa fa-lg fa-key fa-fw"></i> SSH keys
+                  <% end %>
+</li>
+                <li role="menuitem"><a href="/users/link_account" role="menuitem"><i class="fa fa-lg fa-link fa-fw"></i> Link account </a></li>
+                <% if Rails.configuration.user_profile_form_fields %>
+                  <li role="menuitem"><a href="/users/<%=current_user.uuid%>/profile" role="menuitem"><i class="fa fa-lg fa-user fa-fw"></i> Manage profile</a></li>
+                <% end %>
+                <% end %>
+                <li role="presentation" class="divider"></li>
+                <li role="menuitem"><a href="<%= logout_path %>" role="menuitem"><i class="fa fa-lg fa-sign-out fa-fw"></i> Log out</a></li>
+                <% if user_notifications.any? %>
+                  <li role="presentation" class="divider"></li>
+                  <% user_notifications.each_with_index do |n, i| %>
+                    <% if i > 0 %><li class="divider"></li><% end %>
+                    <li class="notification"><%= n.call(self) %></li>
+                  <% end %>
+                <% end %>
+              </ul>
+            </li>
+
+            <% if current_user.is_admin %>
+              <li class="dropdown">
+                <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="system-menu">
+                  <span class="fa fa-lg fa-gear"></span>
+                  <span class="caret"></span>
+                </a>
+                <ul class="dropdown-menu" role="menu">
+                  <li role="presentation" class="dropdown-header">
+                    Admin Settings
+                  </li>
+                  <% if Rails.configuration.repositories %>
+                  <li role="menuitem"><a href="/repositories">
+                      <i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
+                  </a></li>
+                  <% end -%>
+                  <li role="menuitem"><a href="/virtual_machines">
+                      <i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
+                  </a></li>
+                  <li role="menuitem"><a href="/authorized_keys">
+                      <i class="fa fa-lg fa-key fa-fw"></i> SSH keys
+                  </a></li>
+                  <li role="menuitem"><a href="/api_client_authorizations">
+                      <i class="fa fa-lg fa-ticket fa-fw"></i> API tokens
+                  </a></li>
+                  <li role="menuitem"><a href="/links">
+                      <i class="fa fa-lg fa-arrows-h fa-fw"></i> Links
+                  </a></li>
+                  <li role="menuitem"><a href="/users">
+                      <i class="fa fa-lg fa-user fa-fw"></i> Users
+                  </a></li>
+                  <li role="menuitem"><a href="/groups">
+                      <i class="fa fa-lg fa-users fa-fw"></i> Groups
+                  </a></li>
+                  <li role="menuitem"><a href="/nodes">
+                      <i class="fa fa-lg fa-cloud fa-fw"></i> Compute nodes
+                  </a></li>
+                  <li role="menuitem"><a href="/keep_services">
+                      <i class="fa fa-lg fa-exchange fa-fw"></i> Keep services
+                  </a></li>
+                  <li role="menuitem"><a href="/keep_disks">
+                      <i class="fa fa-lg fa-hdd-o fa-fw"></i> Keep disks
+                  </a></li>
+                </ul>
+              </li>
+            <% end %>
+          <% else %>
+            <% if Rails.configuration.anonymous_user_token and Rails.configuration.enable_public_projects_page %>
+              <li><%= link_to 'Browse public projects', "/projects/public" %></li>
+            <% end %>
+            <li class="dropdown hover-dropdown login-menu">
+              <a href="<%= arvados_api_client.arvados_login_url(return_to: request.url) %>">Log in</a>
+              <ul class="dropdown-menu">
+                <li>
+                  <a href="<%= arvados_api_client.arvados_login_url(return_to: request.url) %>">
+                    <span class="fa fa-lg fa-sign-in"></span>
+                    <p style="margin-left: 1.6em; margin-top: -1.35em; margin-bottom: 0em; margin-right: 0.5em;">Log in or register with<br/>any Google account</p>
+                  </a>
+                </li>
+              </ul>
+            </li>
+          <% end %>
+
+          <li class="dropdown help-menu">
+            <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="arv-help">
+              <span class="fa fa-lg fa-question-circle"></span>
+              <span class="caret"></span>
+            </a>
+            <ul class="dropdown-menu">
+              <li role="presentation" class="dropdown-header">
+                Help
+              </li>
+              <% if Rails.configuration.enable_getting_started_popup %>
+                <li>
+                <%= link_to raw('<i class="fa fa-fw fa-info"></i> Getting Started ...'), "#",
+                     {'data-toggle' => "modal", 'data-target' => '#getting-started-modal-window'}  %>
+                </li>
+              <% end %>
+              <% if Rails.configuration.arvados_public_data_doc_url %>
+                <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Public Pipelines and Data sets'), "#{Rails.configuration.arvados_public_data_doc_url}", target: "_blank" %></li>
+              <% end %>
+              <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
+              <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
+              <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+              <li role="presentation" class="divider"></li>
+              <li> <%= link_to report_issue_popup_path(popup_type: 'version', current_location: request.url, current_path: request.fullpath, action_method: 'post'),
+                      {class: 'report-issue-modal-window', remote: true, return_to: request.url} do %>
+                       <i class="fa fa-fw fa-support"></i> Show version / debugging info ...
+                      <% end %>
+              </li>
+              <li> <%= link_to report_issue_popup_path(popup_type: 'report', current_location: request.url, current_path: request.fullpath, action_method: 'post'),
+                      {class: 'report-issue-modal-window', remote: true, return_to: request.url} do %>
+                       <i class="fa fa-fw fa-support"></i> Report a problem ...
+                      <% end %>
+              </li>
+            </ul>
+          </li>
+        </ul>
+      </div><!-- /.navbar-collapse -->
+    </nav>
+
+    <% if current_user.andand.is_active %>
+      <%= render partial: 'breadcrumbs' %>
+    <% elsif !current_user %>   <%# anonymous %>
+      <% if (@name_link or @object) and (project_breadcrumbs.any?) %>
+        <nav class="navbar navbar-default breadcrumbs" role="navigation">
+          <ul class="nav navbar-nav navbar-left">
+            <li>
+              <a href="/projects/public">Public Projects</a>
+            </li>
+            <% project_breadcrumbs.each do |p| %>
+              <li class="nav-separator">
+                <i class="fa fa-lg fa-angle-double-right"></i>
+              </li>
+              <li>
+                <%= link_to(p.name, project_path(p.uuid), data: {object_uuid: p.uuid, name: 'name'}) %>
+              </li>
+            <% end %>
+          </ul>
+        </nav>
+      <% end %>
+    <% end %>
+
+    <%= render partial: 'browser_unsupported' %><%# requires JS support below %>
+    <%= render partial: 'getting_started/getting_started_popup' %>
+
+    <div id="page-wrapper">
+      <%= yield %>
+    </div>
+  </div>
+
+  <%= yield :footer_html %>
+
+<div class="modal-container"></div>
+<div id="report-issue-modal-window"></div>
+<script src="/browser_unsupported.js"></script>
+
+<%  if Rails.configuration.enable_getting_started_popup and current_user and !current_user.prefs[:getting_started_shown] and
+       !request.url.include?("/profile") and
+       !request.url.include?("/user_agreements") and
+       !request.url.include?("/inactive")%>
+  <script>
+    $("#getting-started-modal-window").modal('show');
+  </script>
+  <%
+    prefs = current_user.prefs
+    prefs[:getting_started_shown] = Time.now
+    current_user.update_attributes prefs: prefs.to_json
+  %>
+<% end %>
diff --git a/apps/workbench/app/views/links/_breadcrumb_page_name.html.erb b/apps/workbench/app/views/links/_breadcrumb_page_name.html.erb
new file mode 100644 (file)
index 0000000..4043908
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object %>
+(<%= @object.link_class %>)
+<%= @object.name %>:
+<%= @object.tail_kind.andand.sub 'arvados#', '' %>
+&rarr;
+<%= @object.head_kind.andand.sub 'arvados#', '' %>
+<% end %>
+
diff --git a/apps/workbench/app/views/notifications/_collections_notification.html.erb b/apps/workbench/app/views/notifications/_collections_notification.html.erb
new file mode 100644 (file)
index 0000000..22643bf
--- /dev/null
@@ -0,0 +1,11 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+  <%= image_tag "dax.png", class: "dax" %>
+  <p>
+    Hi, I noticed you haven't uploaded a new collection yet. 
+    <%= link_to "Click here to learn how to upload data to Arvados Keep.", 
+       "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-keep.html", 
+       style: "font-weight: bold", target: "_blank" %>
+  </p>
diff --git a/apps/workbench/app/views/notifications/_jobs_notification.html.erb b/apps/workbench/app/views/notifications/_jobs_notification.html.erb
new file mode 100644 (file)
index 0000000..d9cc7a6
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+  <p><%= image_tag "dax.png", class: "dax" %>
+    Hi, I noticed you haven't run a job yet. 
+    <%= link_to "Click here to learn how to run an Arvados Crunch job.", 
+       "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-job1.html", 
+       style: "font-weight: bold",
+       target: "_blank" %>
+  </p>
+
diff --git a/apps/workbench/app/views/notifications/_pipelines_notification.html.erb b/apps/workbench/app/views/notifications/_pipelines_notification.html.erb
new file mode 100644 (file)
index 0000000..e70fc59
--- /dev/null
@@ -0,0 +1,11 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+  <p><%= image_tag "dax.png", class: "dax" %>
+    Hi, I noticed you haven't run a pipeline yet.  
+    <%= link_to "Click here to learn how to run an Arvados Crunch pipeline.", 
+       "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-pipeline-workbench.html",
+       style: "font-weight: bold",
+       target: "_blank" %>
+  </p>
diff --git a/apps/workbench/app/views/notifications/_ssh_key_notification.html.erb b/apps/workbench/app/views/notifications/_ssh_key_notification.html.erb
new file mode 100644 (file)
index 0000000..a17a451
--- /dev/null
@@ -0,0 +1,11 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+   <%= image_tag "dax.png", class: "dax" %>
+    <div>
+      Hi, I noticed that you have not yet set up an SSH public key for use with Arvados.
+      <%= link_to ssh_keys_user_path(current_user) do %>
+        <b>Click here to set up an SSH public key for use with Arvados.</b>
+      <%end%>
+    </div>
diff --git a/apps/workbench/app/views/pipeline_instances/_component_labels.html.erb b/apps/workbench/app/views/pipeline_instances/_component_labels.html.erb
new file mode 100644 (file)
index 0000000..73154b4
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% pipeline_jobs(object).each do |pj| %>
+  <span class="label label-<%= pj[:labeltype] %>">
+    <%= pj[:name] %>
+  </span>&nbsp;
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_running_component.html.erb b/apps/workbench/app/views/pipeline_instances/_running_component.html.erb
new file mode 100644 (file)
index 0000000..6e8785a
--- /dev/null
@@ -0,0 +1,204 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% current_job = pj[:job] if pj[:job] != {} and pj[:job][:uuid] %>
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <div class="container-fluid">
+      <div class="row-fluid">
+        <%# column offset 0 %>
+        <div class="col-md-2" style="word-break:break-all;">
+          <h4 class="panel-title">
+            <a data-toggle="collapse" href="#collapse<%= i %>">
+              <%= pj[:name] %> <span class="caret"></span>
+            </a>
+          </h4>
+        </div>
+
+        <%# column offset 2 %>
+        <div class="col-md-2 pipeline-instance-spacing">
+          <%= pj[:progress_bar] %>
+        </div>
+
+        <%# column offset 4 %>
+        <% if not current_job %>
+          <div class="col-md-8"></div>
+        <% else %>
+          <div class="col-md-1">
+            <% if (pipeline_display rescue nil) %>
+              <% if current_job[:state].in? ["Complete", "Failed", "Cancelled"] %>
+                <% if current_job[:log] %>
+                  <% logCollection = Collection.find? current_job[:log] %>
+                  <% if logCollection %>
+                    <%= link_to "Log", job_path(current_job[:uuid], anchor: "Log") %>
+                  <% else %>
+                    Log unavailable
+                  <% end %>
+                <% end %>
+              <% elsif current_job[:state] == "Running" %>
+                <% job = Job.find? current_job[:uuid] %>
+                <% if job %>
+                  <%= link_to "Log", job_path(current_job[:uuid], anchor: "Log") %>
+                <% else %>
+                  Log unavailable
+                <% end %>
+              <% end %>
+            <% end %>
+          </div>
+
+          <%# column offset 5 %>
+          <% if current_job[:state] != "Queued" %>
+          <div class="col-md-3">
+            <% if current_job[:started_at] %>
+              <% walltime = ((if current_job[:finished_at] then current_job[:finished_at] else Time.now() end) - current_job[:started_at]) %>
+              <% cputime = (current_job[:runtime_constraints].andand[:min_nodes] || 1).to_i *
+                           ((current_job[:finished_at] || Time.now()) - current_job[:started_at]) %>
+              <%= render_runtime(walltime, false) %>
+              <% if cputime > 0 %> / <%= render_runtime(cputime, false) %> (<%= (cputime/walltime).round(1) %>&Cross;)<% end %>
+            <% end %>
+          </div>
+          <% end %>
+
+          <% if current_job[:state] == "Queued" %>
+            <%# column offset 5 %>
+            <div class="col-md-6">
+              <% queuetime = Time.now - Time.parse(current_job[:created_at].to_s) %>
+              Queued for <%= render_runtime(queuetime, false) %>.
+            </div>
+          <% elsif current_job[:state] == "Running" %>
+            <%# column offset 8 %>
+            <div class="col-md-3">
+              <span class="task-summary-status">
+                <%= current_job[:tasks_summary][:done] %>&nbsp;<%= "task".pluralize(current_job[:tasks_summary][:done]) %> done,
+                <%= current_job[:tasks_summary][:failed] %>&nbsp;failed,
+                <%= current_job[:tasks_summary][:running] %>&nbsp;running,
+                <%= current_job[:tasks_summary][:todo] %>&nbsp;pending
+              </span>
+            </div>
+          <% elsif current_job[:state].in? ["Complete", "Failed", "Cancelled"] %>
+            <%# column offset 8 %>
+            <div class="col-md-4 text-overflow-ellipsis">
+              <% if pj[:output_uuid] %>
+                <%= link_to_arvados_object_if_readable(pj[:output_uuid], "#{pj[:output_uuid]} (Unavailable)", friendly_name: true) %>
+              <% elsif current_job[:output] %>
+                <%= link_to_arvados_object_if_readable(current_job[:output], "#{current_job[:output]} (Unavailable)", link_text: "Output of #{pj[:name]}") %>
+              <% else %>
+                No output.
+              <% end %>
+            </div>
+          <% end %>
+
+          <% if current_job[:state].in? ["Queued", "Running"] and @object.editable? %>
+            <%# column offset 11 %>
+            <div class="col-md-1 pipeline-instance-spacing">
+              <%= form_tag "/jobs/#{current_job[:uuid]}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
+                <%= hidden_field_tag :return_to, url_for(@object) %>
+                <%= button_tag "Cancel", {class: 'btn btn-xs btn-danger', id: "cancel-job-button"} %>
+              <% end %>
+            </div>
+          <% end %>
+        <% end %>
+      </div>
+    </div>
+  </div>
+
+  <div id="collapse<%= i %>" class="panel-collapse collapse <%= if expanded then 'in' end %>">
+    <div class="panel-body">
+      <div class="container">
+        <% current_component = (if current_job then current_job else pj end) %>
+        <div class="row">
+          <div class="col-md-6">
+            <table>
+              <% # link to repo tree/file only if the repo is readable
+                 # and the commit is a sha1...
+                 repo =
+                 (/^[0-9a-f]{40}$/ =~ current_component[:script_version] and
+                 Repository.where(name: current_component[:repository]).first)
+
+                 # ...and the api server provides an http:// or https:// url
+                 repo = nil unless repo.andand.http_fetch_url
+                 %>
+              <% [:script, :repository, :script_version, :supplied_script_version, :nondeterministic].each do |k| %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    <%= k.to_s %>:
+                  </td>
+                  <td>
+                    <% if current_component[k].nil? %>
+                      (none)
+                    <% elsif repo and k == :repository %>
+                      <%= link_to current_component[k], show_repository_tree_path(id: repo.uuid, commit: current_component[:script_version], path: '/') %>
+                    <% elsif repo and k == :script %>
+                      <%= link_to current_component[k], show_repository_blob_path(id: repo.uuid, commit: current_component[:script_version], path: 'crunch_scripts/'+current_component[:script]) %>
+                    <% elsif repo and k == :script_version %>
+                      <%= link_to current_component[k], show_repository_commit_path(id: repo.uuid, commit: current_component[:script_version]) %>
+                    <% else %>
+                      <%= current_component[k] %>
+                    <% end %>
+                  </td>
+                </tr>
+              <% end %>
+              <% if current_component[:runtime_constraints].andand[:docker_image] and current_component[:docker_image_locator] %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image:
+                  </td>
+                  <td>
+                    <%= current_component[:runtime_constraints][:docker_image] %>
+                  </td>
+                </tr>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image_locator:
+                  </td>
+                  <td>
+                    <%= link_to_arvados_object_if_readable(current_component[:docker_image_locator],
+                      current_component[:docker_image_locator], friendly_name: true) %>
+                  </td>
+                </tr>
+              <% else %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image:
+                  </td>
+                  <td>
+                    Not run in Docker
+                  </td>
+                </tr>
+              <% end %>
+            </table>
+          </div>
+          <div class="col-md-5">
+            <table>
+              <% [:uuid, :modified_by_user_uuid, :priority, :created_at, :started_at, :finished_at].each do |k| %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    <%= k.to_s %>:
+                  </td>
+                  <td>
+                    <% if k == :uuid %>
+                      <%= link_to_arvados_object_if_readable(current_component[k], current_component[k], link_text: current_component[k]) %>
+                    <% elsif k.to_s.end_with? 'uuid' %>
+                      <%= link_to_arvados_object_if_readable(current_component[k], current_component[k], friendly_name: true) %>
+                    <% elsif k.to_s.end_with? '_at' %>
+                      <%= render_localized_date(current_component[k]) %>
+                    <% else %>
+                      <%= current_component[k] %>
+                    <% end %>
+                  </td>
+                </tr>
+              <% end %>
+            </table>
+          </div>
+        </div>
+        <div class="row">
+          <div class="col-md-12">
+            <p>script_parameters:</p>
+            <pre><%= JSON.pretty_generate(current_component[:script_parameters]) rescue nil %></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_compare.html.erb b/apps/workbench/app/views/pipeline_instances/_show_compare.html.erb
new file mode 100644 (file)
index 0000000..e730257
--- /dev/null
@@ -0,0 +1,70 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% pi_span = [(10.0/[@objects.count,1].max).floor,1].max %>
+
+<div class="headrow pipeline-compare-headrow">
+  <div class="row">
+  <div class="col-sm-2">
+    <%# label %>
+  </div>
+  <% @objects.each do |object| %>
+  <div class="col-sm-<%= pi_span %>" style="overflow-x: hidden; text-overflow: ellipsis;">
+    <%= render :partial => "show_object_button", :locals => {object: object, size: 'sm' } %>
+    <%= object.name || "unnamed #{object.class_for_display.downcase}" %>
+    <br />
+    <span class="deemphasize">Template:</span> <%= link_to_if_arvados_object object.pipeline_template_uuid, friendly_name: true %>
+  </div>
+  <% end %>
+  </div>
+</div>
+
+<% @rows.each do |row| %>
+<div class="row pipeline-compare-row">
+  <div class="col-sm-2">
+    <%= row[:name] %>
+  </div>
+  <% @objects.each_with_index do |_, x| %>
+    <div class="col-sm-<%= pi_span %>">
+      <div class="row">
+        <div class="col-sm-12">
+
+        <% if row[:components][x] %>
+          <% pj = render_pipeline_job row[:components][x] %>
+
+          <%= link_to_if_arvados_object pj[:job_id], {friendly_name: true, with_class_name: true}, {class: 'deemphasize'} %>
+          <br />
+
+          <% %w(script script_version script_parameters output).each do |key| %>
+              <% unless key=='output' and pj[:result] != 'complete' %>
+              <% val = pj[key.to_sym] || pj[:job].andand[key.to_sym] %>
+              <% link_name = case
+                 when !val
+                   val = ''
+                 when key == 'script_version' && val.match(/^[0-9a-f]{7,}$/)
+                   val = val[0..7] # TODO: leave val alone, make link_to handle git commits
+                 when key == 'output'
+                   val.sub! /\+K.*$/, ''
+                   val[0..12]
+                 when key == 'script_parameters'
+                   val = val.keys.sort.join(', ')
+                 end
+                 %>
+              <span class="deemphasize"><%= key %>:</span>&nbsp;<span class="<%= 'notnormal' if !pj[:is_normal][key.to_sym] %>"><%= link_to_if_arvados_object val, {friendly_name: true, link_text: link_name} %></span>
+              <% end %>
+            <br />
+          <% end %>
+          <% else %>
+          None
+        <% end %>
+        </div>
+      </div>
+    </div>
+  <% end %>
+</div>
+<div class="row" style="padding: .5em">
+</div>
+<% end %>
+
+
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components.html.erb
new file mode 100644 (file)
index 0000000..3fca07a
--- /dev/null
@@ -0,0 +1,25 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if !@object.state.in? ['New', 'Ready'] %>
+
+  <%
+     job_uuids = @object.components.map { |k,j| j.is_a? Hash and j[:job].andand[:uuid] }.compact
+     throttle = 86486400000 # 1001 nights
+     %>
+  <div class="arv-log-refresh-control"
+       data-load-throttle="<%= throttle %>"
+       data-object-uuids="<%= @object.uuid %> <%= job_uuids.join(' ') %>"
+       ></div>
+
+  <%= render partial: 'work_units/show_component', locals: {wu: @object.work_unit(@object.name)} %>
+
+<% else %>
+  <%# state is either New or Ready %>
+  <%= render_unreadable_inputs_present %>
+
+  <p><i>Here are all of the pipeline's components (jobs that will need to run in order to complete the pipeline). If you know what you're doing (or you're experimenting) you can modify these parameters before starting the pipeline. Usually, you only need to edit the settings presented on the "Inputs" tab above.</i></p>
+
+  <%= render_pipeline_components("editable", :json, editable: true) %>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components_editable.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components_editable.html.erb
new file mode 100644 (file)
index 0000000..5311925
--- /dev/null
@@ -0,0 +1,52 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<table class="table pipeline-components-table" style="margin-top: -.1em">
+  <colgroup>
+    <col style="width: 20%" />
+    <col style="width: 20%" />
+    <col style="width: 20%" />
+    <col style="width: 40%" />
+  </colgroup>
+
+  <thead>
+    <tr>
+      <th>
+        component
+      </th><th>
+        script
+      </th><th>
+        parameter
+      </th><th>
+        value
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+    <% @object.components.each do |k, component| %>
+      <% next if !component %>
+      <tr>
+        <td><%= k %></td>
+
+        <td><%= component[:script] %></td>
+
+        <td>script version</td>
+
+        <td>
+          <%= render_pipeline_component_attribute (editable && @object), :components, [k, :script_version], component[:script_version] %>
+        </td>
+      </tr>
+
+      <% component[:script_parameters].andand.each do |p, tv| %>
+        <tr>
+          <td style="border-top: none"></td>
+          <td style="border-top: none"></td>
+
+          <td class="property-edit-row"><%= p %></td>
+          <td class="property-edit-row"><%= render_pipeline_component_attribute (editable && @object), :components, [k, :script_parameters, p.to_sym], tv %></td>
+        </tr>
+      <% end %>
+    <% end %>
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components_json.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components_json.html.erb
new file mode 100644 (file)
index 0000000..4fdc8fb
--- /dev/null
@@ -0,0 +1,36 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<p>The components of this pipeline are in a format that Workbench does not recognize.</p>
+
+<p>Error encountered: <b><%= error_name %></b></p>
+
+    <div id="components-accordion" class="panel panel-default">
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a data-toggle="collapse" data-parent="#components-accordion" href="#components-json">
+            Show components JSON
+          </a>
+        </h4>
+      </div>
+      <div id="components-json" class="panel-collapse collapse">
+        <div class="panel-body">
+          <pre><%= Oj.dump(@object.components, indent: 2) %></pre>
+        </div>
+      </div>
+      <% if backtrace %>
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a data-toggle="collapse" data-parent="#components-accordion" href="#components-backtrace">
+            Show backtrace
+          </a>
+        </h4>
+      </div>
+      <div id="components-backtrace" class="panel-collapse collapse">
+        <div class="panel-body">
+          <pre><%= backtrace %></pre>
+        </div>
+      </div>
+      <% end %>
+    </div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb
new file mode 100644 (file)
index 0000000..60d4c2a
--- /dev/null
@@ -0,0 +1,107 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# Summary %>
+
+<div class="pull-right" style="padding-left: 1em">
+  Current state: <span class="badge badge-info" data-pipeline-state="<%= @object.state %>">
+    <% if @object.state == "RunningOnServer" %>
+      Active
+    <% else %>
+      <%= @object.state %>
+    <% end %>
+  </span>&nbsp;
+</div>
+
+<% pipeline_jobs = render_pipeline_jobs %>
+<% job_uuids = pipeline_jobs.map { |j| j[:job].andand[:uuid] }.compact %>
+
+<% if @object.state == 'Paused' %>
+  <p>
+    This pipeline is paused.  Jobs that are
+    already running will continue to run, but no new jobs will be submitted.
+  </p>
+<% end %>
+
+<% runningtime = determine_wallclock_runtime(pipeline_jobs.map {|j| j[:job]}.compact) %>
+
+<p>
+  <% if @object.started_at %>
+    This pipeline started at <%= render_localized_date(@object.started_at) %>.
+    It
+    <% if @object.state == 'Complete' %>
+      completed in
+    <% elsif @object.state == 'Failed' %>
+      failed after
+    <% elsif @object.state == 'Cancelled' %>
+      was cancelled after
+    <% else %>
+      has been active for
+    <% end %>
+
+    <% walltime = if @object.finished_at then
+                    @object.finished_at - @object.started_at
+                  else
+                    Time.now - @object.started_at
+                  end %>
+
+    <%= if walltime > runningtime
+          render_runtime(walltime, false)
+        else
+          render_runtime(runningtime, false)
+        end %><% if @object.finished_at %> at <%= render_localized_date(@object.finished_at) %><% end %>.
+    <% else %>
+      This pipeline is <%= if @object.state.start_with? 'Running' then 'active' else @object.state.downcase end %>.
+        <% walltime = 0%>
+    <% end %>
+
+  <% if @object.state == 'Failed' %>
+    Check the Log tab for more detail about why this pipeline failed.
+  <% end %>
+</p>
+
+<p>
+    This pipeline
+    <% if @object.state.start_with? 'Running' %>
+      has run
+    <% else %>
+      ran
+    <% end %>
+    for
+    <%
+        cputime = pipeline_jobs.map { |j|
+        if j[:job][:started_at]
+          (j[:job][:runtime_constraints].andand[:min_nodes] || 1).to_i * ((j[:job][:finished_at] || Time.now()) - j[:job][:started_at])
+        else
+          0
+        end
+       }.reduce(:+) || 0 %>
+    <%= render_runtime(runningtime, false) %><% if (walltime - runningtime) > 0 %>
+      (<%= render_runtime(walltime - runningtime, false) %> queued)<% end %><% if cputime == 0 %>.<% else %>
+      and used
+    <%= render_runtime(cputime, false) %>
+    of node allocation time (<%= (cputime/runningtime).round(1) %>&Cross; scaling).
+    <% end %>
+</p>
+
+<%# Components %>
+
+<%
+  job_uuids = pipeline_jobs.collect {|j| j[:job][:uuid]}.compact
+  if job_uuids.any?
+    resource_class = resource_class_for_uuid(job_uuids.first, friendly_name: true)
+    preload_objects_for_dataclass resource_class, job_uuids
+  end
+
+  job_collections = pipeline_jobs.collect {|j| j[:job][:output]}.compact
+  job_collections.concat pipeline_jobs.collect {|j| j[:job][:docker_image_locator]}.uniq.compact
+  job_collections_pdhs = job_collections.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
+  job_collections_uuids = job_collections - job_collections_pdhs
+  preload_collections_for_objects job_collections_uuids if job_collections_uuids.any?
+  preload_for_pdhs job_collections_pdhs if job_collections_pdhs.any?
+%>
+
+<% pipeline_jobs.each_with_index do |pj, i| %>
+  <%= render partial: 'running_component', locals: {pj: pj, i: i, expanded: false, pipeline_display: true} %>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_graph.html.erb b/apps/workbench/app/views/pipeline_instances/_show_graph.html.erb
new file mode 100644 (file)
index 0000000..1536591
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @pipelines.count > 1 %>
+  <div style="text-align: center; padding-top: 0.5em">
+    <span class="pipeline_color_legend" style="background: #aaffaa"><%= link_to_if_arvados_object @pipelines[0], friendly_name: true %></span>
+    <span class="pipeline_color_legend" style="background: #aaaaff"><%= link_to_if_arvados_object @pipelines[1], friendly_name: true %></span>
+    <% if @pipelines.count > 2 %>
+    <span class="pipeline_color_legend" style="background: #ffaaaa"><%= link_to_if_arvados_object @pipelines[2], friendly_name: true %></span>
+    <% end %>
+    <span class="pipeline_color_legend" style="background: #aaaaaa">Common to <%= @pipelines.count > 2 ? 'multiple' : 'both' %> pipelines</span>
+  </div>
+<% end %>
+
+<%= render partial: 'application/svg_div', locals: {
+      divId: "provenance_graph", 
+      svgId: "provenance_svg", 
+      svg: @prov_svg } %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb b/apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb
new file mode 100644 (file)
index 0000000..60d4445
--- /dev/null
@@ -0,0 +1,56 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% n_inputs = 0 %>
+
+<% content_for :pi_input_form do %>
+<form role="form" style="width:60%">
+  <div class="form-group">
+    <% @object.components.each do |cname, component| %>
+      <% next if !component %>
+      <% component[:script_parameters].andand.each do |pname, pvalue_spec| %>
+        <% if pvalue_spec.is_a? Hash %>
+          <% if pvalue_spec[:description] or
+                pvalue_spec[:required] or pvalue_spec[:optional] == false %>
+            <% n_inputs += 1 %>
+            <label for="<% "#{cname}-#{pname}" %>">
+              <%= @object.component_input_title(cname, pname) %>
+            </label>
+            <div>
+              <p class="form-control-static">
+                <%= render_pipeline_component_attribute @object, :components, [cname, :script_parameters, pname.to_sym], pvalue_spec %>
+              </p>
+            </div>
+            <p class="help-block">
+              <%= pvalue_spec[:description] %>
+            </p>
+          <% end %>
+        <% end %>
+      <% end %>
+    <% end %>
+  </div>
+</form>
+<% end %>
+
+<% if n_inputs == 0 %>
+  <p>This pipeline does not need any further inputs specified. You can start it by clicking the "Run" button whenever you're ready. (It's not too late to change existing settings, though.)</p>
+<% else %>
+  <%= render_unreadable_inputs_present %>
+
+  <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the pipeline.</i></p>
+  <% if @object.editable? %>
+    <%= content_for :pi_input_form %>
+      <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+          class: 'btn btn-primary run-pipeline-button',
+          method: :patch
+          ) do %>
+        Run <i class="fa fa-fw fa-play"></i>
+    <% end %>
+  <% end %>
+
+<% end %>
+
+<div style="margin-top: 1em;">
+  <p>Click the "Components" tab above to see a full list of pipeline settings.</p>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_log.html.erb b/apps/workbench/app/views/pipeline_instances/_show_log.html.erb
new file mode 100644 (file)
index 0000000..24937ba
--- /dev/null
@@ -0,0 +1,49 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% log_ids = @object.job_log_ids
+   job_ids = @object.job_ids
+   still_logging, done_logging = log_ids.keys.partition { |k| log_ids[k].nil? }
+%>
+
+<% unless done_logging.empty? %>
+  <table class="topalign table table-condensed table-fixedlayout">
+    <colgroup>
+      <col width="40%" />
+      <col width="60%" />
+    </colgroup>
+    <thead>
+      <tr>
+        <th>finished component</th>
+        <th>job log</th>
+      </tr>
+    </thead>
+    <tbody>
+      <% done_logging.each do |cname| %>
+      <tr>
+        <td><%= cname %></td>
+        <td><%= link_to("Log for #{cname}",
+                job_path(job_ids[cname], anchor: "Log"))
+                %></td>
+      </tr>
+      <% end %>
+    </tbody>
+  </table>
+<% end %>
+
+<% unless still_logging.empty? %>
+  <h4>Logs in progress</h4>
+
+  <pre id="event_log_div"
+       class="arv-log-event-listener arv-log-event-handler-append-logs arv-log-event-subscribe-to-pipeline-job-uuids arv-job-log-window"
+       data-object-uuids="<%= @object.stderr_log_object_uuids.join(' ') %>"
+       ><%= @object.stderr_log_lines.join("\n") %></pre>
+
+  <%# Applying a long throttle suppresses the auto-refresh of this
+      partial that would normally be triggered by arv-log-event. %>
+  <div class="arv-log-refresh-control"
+       data-load-throttle="86486400000" <%# 1001 nights %>
+       ></div>
+<% end %>
+
diff --git a/apps/workbench/app/views/pipeline_instances/_show_object_description_cell.html.erb b/apps/workbench/app/views/pipeline_instances/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..60ed93b
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="nowrap">
+  <%= object.content_summary %><br />
+  <%= render partial: 'pipeline_instances/component_labels', locals: {object: object} %>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_recent.html.erb b/apps/workbench/app/views/pipeline_instances/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..3aac930
--- /dev/null
@@ -0,0 +1,41 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= form_tag({}, {id: "comparedInstances"}) do |f| %>
+
+<table class="table table-condensed table-fixedlayout arv-recent-pipeline-instances">
+  <colgroup>
+    <col width="5%" />
+    <col width="15%" />
+    <col width="25%" />
+    <col width="20%" />
+    <col width="15%" />
+    <col width="15%" />
+    <col width="5%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+      </th><th>
+       Status
+      </th><th>
+       Instance
+      </th><th>
+       Template
+      </th><th>
+       Owner
+      </th><th>
+       Created at
+      </th><th>
+      </th>
+    </tr>
+  </thead>
+
+  <tbody data-infinite-scroller="#recent-pipeline-instances" id="recent-pipeline-instances"
+         data-infinite-content-href="<%= url_for partial: :recent_rows %>" >
+  </tbody>
+
+</table>
+
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_recent_rows.html.erb b/apps/workbench/app/views/pipeline_instances/_show_recent_rows.html.erb
new file mode 100644 (file)
index 0000000..bcf6b28
--- /dev/null
@@ -0,0 +1,36 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.sort_by { |ob| ob.created_at }.reverse.each do |ob| %>
+    <tr data-object-uuid="<%= ob.uuid %>" data-kind="<%= ob.kind %>" >
+      <td>
+        <%= check_box_tag 'uuids[]', ob.uuid, false, :class => 'persistent-selection' %>
+      </td><td>
+        <%= render partial: 'pipeline_status_label', locals: {:p => ob} %>
+      </td><td colspan="1">
+        <%= link_to_if_arvados_object ob, friendly_name: true %>
+      </td><td>
+        <%= link_to_if_arvados_object ob.pipeline_template_uuid, friendly_name: true %>
+      </td><td>
+        <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
+      </td><td>
+        <%= ob.created_at.to_s %>
+      </td><td>
+        <%= render partial: 'delete_object_button', locals: {object:ob} %>
+      </td>
+    </tr>
+    <tr data-object-uuid="<%= ob.uuid %>">
+      <td style="border-top: 0;" colspan="2">
+      </td>
+      <td style="border-top: 0; opacity: 0.5;" colspan="6">
+        <% ob.components.each do |cname, c| %>
+          <% if c.is_a?(Hash) and c[:job] %>
+            <%= render partial: "job_progress", locals: {:j => c[:job], :title => cname.to_s, :show_progress_bar => false } %>
+          <% else %>
+            <span class="label label-default"><%= cname.to_s %></span>
+          <% end %>
+        <% end %>
+      </td>
+    </tr>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb b/apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb
new file mode 100644 (file)
index 0000000..ae9e3c7
--- /dev/null
@@ -0,0 +1,52 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if current_user.andand.is_active %>
+  <% if @object.state.in? ['Complete', 'Failed', 'Cancelled', 'Paused'] %>
+
+  <%= link_to(copy_pipeline_instance_path('id' => @object.uuid, 'script' => "use_latest", "components" => "use_latest", "pipeline_instance[state]" => "RunningOnServer"),
+      class: 'btn btn-primary',
+      title: 'Re-run with latest options',
+      #data: {toggle: :tooltip, placement: :top}, title: 'Re-run',
+      method: :post,
+      ) do %>
+    <i class="fa fa-fw fa-play"></i> Re-run with latest
+  <% end %>
+
+  <%= link_to raw('<i class="fa fa-fw fa-cogs"></i> Re-run options...'),
+      "#",
+      {class: 'btn btn-primary', 'data-toggle' =>  "modal",
+        'data-target' => '#clone-and-edit-modal-window',
+        title: 'Re-run with options'}  %>
+  <% end %>
+
+  <% if @object.state.in? ['New', 'Ready'] %>
+    <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+        class: 'btn btn-primary run-pipeline-button',
+        title: 'Run this pipeline',
+        method: :patch
+        ) do %>
+      <i class="fa fa-fw fa-play"></i> Run
+    <% end %>
+  <% else %>
+    <% if @object.state.in? ['RunningOnClient', 'RunningOnServer'] %>
+      <%= link_to(cancel_pipeline_instance_path,
+          class: 'btn btn-primary run-pipeline-button',
+          title: 'Pause this pipeline',
+          data: {confirm: 'All unfinished child jobs will be canceled, even if they are being used in another job or pipeline. Are you sure you want to pause this pipeline?'},
+          method: :post
+          ) do %>
+        <i class="fa fa-fw fa-pause"></i> Pause
+      <% end %>
+    <% elsif @object.state == 'Paused' %>
+      <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+          class: 'btn btn-primary run-pipeline-button',
+          title: 'Resume this pipeline',
+          method: :patch
+          ) do %>
+        <i class="fa fa-fw fa-play"></i> Resume
+      <% end %>
+    <% end %>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/compare.html.erb b/apps/workbench/app/views/pipeline_instances/compare.html.erb
new file mode 100644 (file)
index 0000000..960d81d
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if (o = Group.find?(@objects.first.owner_uuid)) %>
+  <% content_for :breadcrumbs do %>
+    <li class="nav-separator"><span class="glyphicon glyphicon-arrow-right"></span></li>
+    <li>
+      <%= link_to(o.name, project_path(o.uuid)) %>
+    </li>
+    <li class="nav-separator">
+      <span class="glyphicon glyphicon-arrow-right"></span>
+    </li>
+    <li>
+      <%= link_to '#' do %>compare pipelines<% end %>
+    </li>
+  <% end %>
+<% end %>
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.compare_pane_list }  %>
diff --git a/apps/workbench/app/views/pipeline_instances/index.html.erb b/apps/workbench/app/views/pipeline_instances/index.html.erb
new file mode 100644 (file)
index 0000000..250d51a
--- /dev/null
@@ -0,0 +1,21 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :tab_line_buttons do %>
+  <div class="input-group">
+    <input type="text" class="form-control filterable-control recent-pipeline-instances-filterable-control"
+           placeholder="Search pipeline instances"
+           data-filterable-target="#recent-pipeline-instances"
+           <%# Just for the double-load test in FilterableInfiniteScrollTest: %>
+           value="<%= params[:search] %>"
+           />
+  </div>
+
+  <%= form_tag({action: 'compare', controller: params[:controller], method: 'get'}, {method: 'get', id: 'compare', class: 'pull-right small-form-margin'}) do |f| %>
+    <%= submit_tag 'Compare 2 or 3 selected', {class: 'btn btn-primary', disabled: true} %>
+  <% end rescue nil %>
+
+<% end %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/pipeline_instances/show.html.erb b/apps/workbench/app/views/pipeline_instances/show.html.erb
new file mode 100644 (file)
index 0000000..881d771
--- /dev/null
@@ -0,0 +1,77 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% template = PipelineTemplate.find?(@object.pipeline_template_uuid) %>
+<%= content_for :content_top do %>
+  <div class="row">
+    <div class="col-sm-6">
+      <%= render partial: 'name_and_description' %>
+    </div>
+    <% if template %>
+      <div class="alert alert-info col-sm-6">
+        This pipeline was created from the template <%= link_to_if_arvados_object template, friendly_name: true %><br />
+        <% if template.modified_at && (template.modified_at > @object.created_at) %>
+        Note: This template has been modified since this instance was created.
+        <% end %>
+      </div>
+    <% end %>
+  </div>
+<% end %>
+
+<% content_for :tab_line_buttons do %>
+
+  <div id="pipeline-instance-tab-buttons"
+       class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
+       data-pane-content-url="<%= url_for(params.merge(tab_pane: "tab_buttons")) %>"
+       data-object-uuid="<%= @object.uuid %>"
+       >
+    <%= render partial: 'show_tab_buttons', locals: {object: @object}%>
+  </div>
+
+<% end %>
+
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
+
+<div id="clone-and-edit-modal-window" class="modal fade" role="dialog"
+     aria-labelledby="myModalLabel" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+
+    <%= form_tag copy_pipeline_instance_path do |f| %>
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title">Re-run pipeline</h4> </div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+              <%= radio_button_tag(:script, "use_latest", true) %>
+              <%= label_tag(:script_use_latest, "Use latest script versions") %>
+              <br>
+              <%= radio_button_tag(:script, "use_same") %>
+              <%= label_tag(:script_use_same, "Use same script versions as this run") %>
+              <br>
+              <% if template %>
+              <br>
+              <%= radio_button_tag(:components, "use_latest", true) %>
+              <%= label_tag(:components_use_latest, "Update components against template") %>
+              <br>
+              <%= radio_button_tag(:components, "use_same") %>
+              <%= label_tag(:components_use_same, "Use same components as this run") %>
+              <% end %>
+      </div>
+
+      <div class="modal-footer">
+        <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="RunningOnServer">Run now</button>
+        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="New">Copy and edit inputs</button>
+      </div>
+
+    </div>
+    <% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/show.js.erb b/apps/workbench/app/views/pipeline_instances/show.js.erb
new file mode 100644 (file)
index 0000000..28a1fdb
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% self.formats = [:html] %>
+var new_content = "<%= escape_javascript(render template: 'pipeline_instances/show') %>";
+var selected_tab_hrefs = [];
+if ($('div#page-wrapper').html() != new_content) {
+    $('.nav-tabs li.active a').each(function() {
+        selected_tab_hrefs.push($(this).attr('href'));
+    });
+
+    $('div#page-wrapper').html(new_content);
+
+    // Show the same tabs that were active before we rewrote page-wrapper
+    $.each(selected_tab_hrefs, function(i, href) {
+        $('.nav-tabs li a[href="' + href + '"]').tab('show');
+    });
+}
diff --git a/apps/workbench/app/views/pipeline_templates/_choose.js.erb b/apps/workbench/app/views/pipeline_templates/_choose.js.erb
new file mode 120000 (symlink)
index 0000000..8420a7f
--- /dev/null
@@ -0,0 +1 @@
+../application/_choose.js.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/pipeline_templates/_choose_rows.html.erb b/apps/workbench/app/views/pipeline_templates/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..371398d
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= url_for object %>?tab_pane=chooser_preview">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw fa-gear"></i>
+      <%= object.name %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_attributes.html.erb b/apps/workbench/app/views/pipeline_templates/_show_attributes.html.erb
new file mode 100644 (file)
index 0000000..1b3557b
--- /dev/null
@@ -0,0 +1,19 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= content_for :content_top do %>
+  <h2>Template '<%= @object.name %>'</h2>
+<% end %>
+
+<table class="table topalign">
+  <thead>
+  </thead>
+  <tbody>
+    <% @object.attributes_for_display.each do |attr, attrvalue| %>
+      <% if attr != 'components' %>
+        <%= render partial: 'application/arvados_object_attr', locals: { attr: attr, attrvalue: attrvalue } %>
+      <% end %>
+    <% end %>
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_chooser_preview.html.erb b/apps/workbench/app/views/pipeline_templates/_show_chooser_preview.html.erb
new file mode 100644 (file)
index 0000000..614ec33
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="col-sm-11 col-sm-push-1 arv-description-in-table">
+  <%= @object.description %>
+</div>
+<%= render partial: 'show_components' %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_components.html.erb b/apps/workbench/app/views/pipeline_templates/_show_components.html.erb
new file mode 100644 (file)
index 0000000..fd4a0ed
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pipeline_components("editable", :json, editable: false) %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_pipelines.html.erb b/apps/workbench/app/views/pipeline_templates/_show_pipelines.html.erb
new file mode 100644 (file)
index 0000000..3df0296
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+
+  <%= render partial: 'pipeline_instances/show_recent' %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_recent.html.erb b/apps/workbench/app/views/pipeline_templates/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..c708c1f
--- /dev/null
@@ -0,0 +1,72 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<table class="table table-condensed arv-index">
+  <colgroup>
+    <col width="8%" />
+    <col width="10%" />
+    <col width="22%" />
+    <col width="45%" />
+    <col width="15%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+      </th><th>
+      </th><th>
+        name
+      </th><th>
+        description/components
+      </th><th>
+        owner
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+
+    <% @objects.sort_by { |ob| ob[:created_at] }.reverse.each do |ob| %>
+
+    <tr>
+      <td>
+        <%= button_to(choose_projects_path(id: "run-pipeline-button",
+                                     title: 'Choose project',
+                                     editable: true,
+                                     action_name: 'Choose',
+                                     action_href: pipeline_instances_path,
+                                     action_method: 'post',
+                                     action_data: {selection_param: 'pipeline_instance[owner_uuid]',
+                                                   'pipeline_instance[pipeline_template_uuid]' => ob.uuid,
+                                                   'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (ob.name.andand.size.andand>0 ? " using the pipeline template *#{ob.name}*" : ""),
+                                                   'success' => 'redirect-to-created-object'
+                                                  }.to_json),
+                { class: "btn btn-default btn-xs", title: "Run #{ob.name}", remote: true, method: :get }
+            ) do %>
+               <i class="fa fa-fw fa-play"></i> Run
+              <% end %>
+      </td>
+      <td>
+        <%= render :partial => "show_object_button", :locals => {object: ob, size: 'xs'} %>
+      </td><td>
+        <%= render_editable_attribute ob, 'name' %>
+      </td><td>
+        <% if ob.respond_to?(:description) and ob.description %>
+          <%= render_attribute_as_textile(ob, "description", ob.description, false) %>
+          <br />
+        <% end %>
+        <% ob.components.collect { |k,v| k.to_s }.each do |k| %>
+          <span class="label label-default"><%= k %></span>
+        <% end %>
+      </td><td>
+        <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
+      </td>
+    </tr>
+
+    <% end %>
+
+  </tbody>
+</table>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
diff --git a/apps/workbench/app/views/pipeline_templates/show.html.erb b/apps/workbench/app/views/pipeline_templates/show.html.erb
new file mode 100644 (file)
index 0000000..7f07d27
--- /dev/null
@@ -0,0 +1,29 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.editable? %>
+  <% content_for :tab_line_buttons do %>
+    <%= link_to(choose_projects_path(
+        id: "run-pipeline-button",
+        title: 'Choose project',
+        editable: true,
+        action_name: 'Choose',
+        action_href: pipeline_instances_path,
+        action_method: 'post',
+        action_data: {
+          'selection_param' => 'pipeline_instance[owner_uuid]',
+          'pipeline_instance[pipeline_template_uuid]' => @object.uuid,
+          'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (@object.name.andand.size.andand>0 ? " using the pipeline template *#{@object.name}*" : ""),
+          'success' => 'redirect-to-created-object',
+        }.to_json), {
+          class: "btn btn-primary btn-sm",
+          remote: true,
+          title: 'Run this pipeline'
+        }) do %>
+      <i class="fa fa-gear"></i> Run this pipeline
+    <% end %>
+  <% end %>
+<% end %>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/projects/_choose.html.erb b/apps/workbench/app/views/projects/_choose.html.erb
new file mode 100644 (file)
index 0000000..8e5695e
--- /dev/null
@@ -0,0 +1,61 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="modal modal-with-loading-spinner">
+  <div class="modal-dialog">
+    <div class="modal-content">
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title"><%= params[:title] || 'Choose project' %></h4> </div>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+        <div class="selectable-container" style="height: 15em; overflow-y: scroll">
+          <% starred_projects = my_starred_projects current_user%>
+          <% if starred_projects.andand.any? %>
+            <% writable_projects = starred_projects.select(&:editable?) %>
+            <% writable_projects.each do |projectnode| %>
+              <% row_name = projectnode.friendly_link_name || 'New project' %>
+              <div class="selectable project row"
+                   style="padding-left: 1em; margin-right: 0px"
+                   data-object-uuid="<%= projectnode.uuid %>">
+                <i class="fa fa-fw fa-folder-o"></i> <%= row_name %> <i class="fa fa-fw fa-star"></i>
+              </div>
+            <% end %>
+          <% end %>
+
+          <% my_projects = my_wanted_projects_tree(current_user) %>
+          <% my_projects[0].each do |projectnode| %>
+            <% if projectnode[:object].uuid == current_user.uuid
+                 row_name = "Home"
+               else
+                 row_name = projectnode[:object].friendly_link_name || 'New project'
+               end %>
+            <div class="selectable project row"
+                 style="padding-left: <%= 1 + projectnode[:depth] %>em; margin-right: 0px"
+                 data-object-uuid="<%= projectnode[:object].uuid %>">
+              <i class="fa fa-fw fa-folder-o"></i> <%= row_name %>
+            </div>
+          <% end %>
+        </div>
+
+        <% if my_projects[1] or my_projects[2] or my_projects[0].size > 200 %>
+          <div>Some of your projects are omitted. Add projects of interest to favorites.</div>
+        <% end %>
+      </div>
+
+      <div class="modal-footer">
+        <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button class="btn btn-primary" aria-hidden="true" data-enable-if-selection disabled><%= params[:action_name] || 'Select' %></button>
+        <div class="modal-error hide" style="text-align: left; margin-top: 1em;">
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/projects/_choose.js.erb b/apps/workbench/app/views/projects/_choose.js.erb
new file mode 120000 (symlink)
index 0000000..8420a7f
--- /dev/null
@@ -0,0 +1 @@
+../application/_choose.js.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/projects/_compute_node_status.html.erb b/apps/workbench/app/views/projects/_compute_node_status.html.erb
new file mode 100644 (file)
index 0000000..3de2ab6
--- /dev/null
@@ -0,0 +1,20 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<h4>Node status</h4>
+<div class="compute-summary-nodelist">
+    <% nodes.each do |n| %>
+        <div class="compute-summary">
+          <a data-toggle="collapse" href="#detail_<%= n.hostname %>" class="compute-summary-head label label-<%= if n.crunch_worker_state == 'busy' then 'primary' else 'default' end %>">
+            <%= n.hostname %>
+          </a>
+          <div id="detail_<%= n.hostname %>" class="collapse compute-detail">
+            state: <%= n.crunch_worker_state %><br>
+            <% [:total_cpu_cores, :total_ram_mb, :total_scratch_mb].each do |i| %>
+              <%= i.to_s.gsub '_', ' ' %>: <%= n.properties[i] %><br>
+            <% end %>
+          </div>
+        </div>
+    <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_compute_node_summary.html.erb b/apps/workbench/app/views/projects/_compute_node_summary.html.erb
new file mode 100644 (file)
index 0000000..40a212e
--- /dev/null
@@ -0,0 +1,20 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="compute-summary-numbers">
+    <table>
+      <colgroup>
+        <col width="50%">
+        <col width="50%">
+      </colgroup>
+      <tr>
+        <td><%= nodes.select {|n| n.crunch_worker_state == "busy" }.size %></td>
+        <td><%= nodes.select {|n| n.crunch_worker_state == "idle" }.size %></td>
+      </tr>
+      <tr>
+        <th>Busy nodes</th>
+        <th>Idle nodes</th>
+      </tr>
+    </table>
+</div>
diff --git a/apps/workbench/app/views/projects/_index_jobs_and_pipelines.html.erb b/apps/workbench/app/views/projects/_index_jobs_and_pipelines.html.erb
new file mode 100644 (file)
index 0000000..d0f36b1
--- /dev/null
@@ -0,0 +1,30 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div>
+  <% any = false %>
+  <% recent_jobs_and_pipelines[0..9].each do |object| %>
+    <% any = true %>
+    <div class="row" style="height: 4.5em">
+      <div class="col-sm-4">
+        <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+        <% if object.respond_to?(:name) %>
+          <%= render_editable_attribute object, 'name', nil, {}, {tiptitle: 'rename'} %>
+        <% else %>
+          <%= object.class_for_display %> <%= object.uuid %>
+        <% end %>
+      </div>
+      <div class="col-sm-8 arv-description-in-table">
+        <%= render_controller_partial(
+            'show_object_description_cell.html',
+            controller_name: object.controller_name,
+            locals: {object: object})
+            %>
+      </div>
+    </div>
+  <% end %>
+  <% if not any %>
+    <span class="deemphasize">No jobs or pipelines to display.</span>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_index_projects.html.erb b/apps/workbench/app/views/projects/_index_projects.html.erb
new file mode 100644 (file)
index 0000000..e726a46
--- /dev/null
@@ -0,0 +1,36 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="container-fluid arv-project-list">
+  <% tree.each do |projectnode| %>
+    <% rowtype = projectnode[:object].class %>
+    <% next if rowtype != Group and !show_root_node %>
+    <div class="<%= 'project' if rowtype.in?([Group,User]) %> row">
+      <div class="col-md-4" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
+        <% if show_root_node and rowtype == String %>
+          <i class="fa fa-fw fa-share-alt"></i>
+          <%= projectnode[:object] %>
+        <% elsif show_root_node and rowtype == User %>
+          <% if projectnode[:object].uuid == current_user.andand.uuid %>
+            <i class="fa fa-fw fa-folder-o"></i>
+            <%= link_to project_path(id: projectnode[:object].uuid) do %>
+              Home
+            <% end %>
+          <% else %>
+            <i class="fa fa-fw fa-folder-o"></i>
+            <%= projectnode[:object].friendly_link_name %>
+          <% end %>
+        <% elsif rowtype == Group %>
+          <i class="fa fa-fw fa-folder-o"></i>
+          <%= link_to projectnode[:object] do %>
+            <%= projectnode[:object].friendly_link_name %>
+          <% end %>
+        <% end %>
+      </div>
+      <% if projectnode[:object].respond_to?(:description) and not projectnode[:object].description.blank? %>
+        <div class="col-md-8 small"><%= render_attribute_as_textile(projectnode[:object], "description", projectnode[:object].description, true) %></div>
+      <% end %>
+    </div>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_show_contents_rows.html.erb b/apps/workbench/app/views/projects/_show_contents_rows.html.erb
new file mode 100644 (file)
index 0000000..d440c46
--- /dev/null
@@ -0,0 +1,46 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% get_objects_and_names.each do |object, name_link| %>
+  <% name_object = (object.respond_to?(:name) || !name_link) ? object : name_link %>
+  <tr class="filterable"
+      data-object-uuid="<%= name_object.uuid %>"
+      data-kind="<%= object.kind %>"
+      data-object-created-at="<%= object.created_at %>"
+      >
+    <td>
+      <div style="width:1em; display:inline-block;">
+        <%= render partial: 'selection_checkbox', locals: {object: object, friendly_name: ((name_object.name rescue '') || '')} %>
+      </div>
+    </td>
+
+    <td>
+      <% if @object.editable? %>
+        <%= link_to({action: 'remove_item', id: @object.uuid, item_uuid: ((name_link && name_link.uuid) || object.uuid)}, method: :delete, remote: true, data: {confirm: "Remove #{object.class_for_display.downcase} #{name_object.name rescue object.uuid} from this project?", toggle: 'tooltip', placement: 'top'}, class: 'btn btn-sm btn-default btn-nodecorate', title: 'remove') do %>
+          <i class="fa fa-fw fa-trash-o"></i>
+        <% end %>
+      <% else %>
+        <i class="fa fa-fw"></i><%# placeholder %>
+      <% end %>
+    </td>
+
+    <td>
+      <%= render :partial => "show_object_button", :locals => {object: object, size: 'sm', name_link: name_link} %>
+    </td>
+
+    <td>
+      <% if object.respond_to?(:name) %>
+        <%= render_editable_attribute (name_link || object), 'name', nil, {}, {tiptitle: 'rename'} %>
+      <% end %>
+    </td>
+
+    <td class="arv-description-in-table">
+      <%= render_controller_partial(
+          'show_object_description_cell.html',
+          controller_name: object.controller_name,
+          locals: {object: object})
+          %>
+    </td>
+  </tr>
+<% end %>
diff --git a/apps/workbench/app/views/projects/_show_dashboard.html.erb b/apps/workbench/app/views/projects/_show_dashboard.html.erb
new file mode 100644 (file)
index 0000000..69abf04
--- /dev/null
@@ -0,0 +1,219 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  recent_procs = recent_processes(12)
+
+  # preload container_uuids of any container requests
+  recent_crs = recent_procs.map {|p| p if p.is_a?(ContainerRequest)}.compact.uniq
+  recent_cr_containers = recent_crs.map {|cr| cr.container_uuid}.compact.uniq
+  preload_objects_for_dataclass(Container, recent_cr_containers) if recent_cr_containers.andand.any?
+
+  # fetch children of all the active crs in one call, if there are any
+  active_crs = recent_crs.each {|cr| cr if (cr.priority.andand > 0 and cr.state != 'Final' and cr.container_uuid)}
+
+  wus = {}
+  outputs = []
+  recent_procs.each do |p|
+    wu = p.work_unit
+
+    wus[p] = wu
+    outputs << wu.outputs
+  end
+  outputs = outputs.flatten.uniq
+
+  collection_pdhs = outputs.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
+  collection_uuids = outputs - collection_pdhs
+
+  if Rails.configuration.show_recent_collections_on_dashboard
+    recent_cs = recent_collections(8)
+    collection_uuids = collection_uuids + recent_cs[:collections].collect {|c| c.uuid}
+    collection_uuids.flatten.uniq
+  end
+
+  preload_collections_for_objects collection_uuids if collection_uuids.any?
+  preload_for_pdhs collection_pdhs if collection_pdhs.any?
+  preload_links_for_objects(collection_pdhs + collection_uuids)
+%>
+
+<%
+  recent_procs_panel_width = 6
+  if !PipelineInstance.api_exists?(:index)
+    recent_procs_title = 'Recent processes'
+    run_proc_title = 'Choose a workflow to run:'
+    show_node_status = false
+    # Recent processes panel should take the entire width when is the only one
+    # being rendered.
+    if !Rails.configuration.show_recent_collections_on_dashboard
+      recent_procs_panel_width = 12
+    end
+  else
+    recent_procs_title = 'Recent pipelines and processes'
+    run_proc_title = 'Choose a pipeline or workflow to run:'
+    show_node_status = true
+  end
+%>
+
+  <div class="row">
+    <div class="col-md-<%= recent_procs_panel_width %>">
+      <div class="panel panel-default" style="min-height: 10.5em">
+        <div class="panel-heading">
+          <span class="panel-title"><%=recent_procs_title%></span>
+          <% if current_user.andand.is_active %>
+            <span class="pull-right recent-processes-actions">
+              <span>
+                <%= link_to(
+                choose_work_unit_templates_path(
+                  title: run_proc_title,
+                  action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
+                  action_href: work_units_path,
+                  action_method: 'post',
+                  action_data: {'selection_param' => 'work_unit[template_uuid]', 'work_unit[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+                { class: "btn btn-primary btn-xs", remote: true }) do %>
+                  <i class="fa fa-fw fa-gear"></i> Run a process...
+                <% end %>
+              </span>
+              <span>
+                  <%= link_to all_processes_path, class: 'btn btn-default btn-xs' do %>
+                    All processes <i class="fa fa-fw fa-arrow-circle-right"></i>
+                  <% end %>
+              </span>
+            </span>
+          <% end %>
+        </div>
+
+        <div class="panel-body recent-processes">
+          <% if recent_procs.empty? %>
+            No recent pipelines or processes.
+          <% else %>
+          <% wus.each do |p, wu| %>
+            <%
+            # Set up tooltip containing useful runtime information
+            runtime_status_tooltip = nil
+            if wu.runtime_status
+              if wu.runtime_status[:error]
+                runtime_status_tooltip = "Error: #{wu.runtime_status[:error]}"
+              elsif wu.runtime_status[:warning]
+                runtime_status_tooltip = "Warning: #{wu.runtime_status[:warning]}"
+              end
+            end
+            %>
+            <% if wu.is_finished? %>
+            <div class="dashboard-panel-info-row row-<%=wu.uuid%>" title="<%=sanitize(runtime_status_tooltip)%>">
+              <div class="row">
+                <div class="col-md-6 text-overflow-ellipsis">
+                  <%= link_to_if_arvados_object p, {friendly_name: true} %>
+                </div>
+                <div class="col-md-2">
+                  <span class="label label-<%=wu.state_bootstrap_class%>"><%=wu.state_label%></span>
+                </div>
+                <div class="col-md-4">
+                  <%= render_localized_date(wu.finished_at || wu.modified_at, "noseconds") %>
+                </div>
+              </div>
+              <div class="row">
+                <div class="col-md-12">
+                  <% if wu.started_at and wu.finished_at %>
+                    <% wu_time = wu.finished_at - wu.started_at %>
+                    Active for <%= render_runtime(wu_time, false) %>
+                  <% end %>
+
+                  <%= render partial: 'work_units/show_output', locals: {wu: wu, align: 'pull-right', include_icon: true} %>
+                </div>
+              </div>
+
+            </div>
+            <% else %>
+            <div class="dashboard-panel-info-row row-<%=wu.uuid%>" title="<%=sanitize(runtime_status_tooltip)%>">
+              <div class="row">
+                <div class="col-md-6 text-overflow-ellipsis">
+                  <%= link_to_if_arvados_object p, {friendly_name: true} %>
+                </div>
+                <div class="col-md-2">
+                  <span class="label label-<%=wu.state_bootstrap_class%>"><%=wu.state_label%></span>
+                </div>
+              </div>
+
+              <div class="clearfix">
+                <% if wu.started_at %>
+                  Started at <%= render_localized_date(wu.started_at, "noseconds") %>
+                  Active for <%= render_runtime(Time.now - wu.started_at, false) %>.
+                <% else %>
+                  Created at <%= render_localized_date(wu.created_at, "noseconds") %>.
+                  <% if wu.state_label == 'Queued' %>
+                    Queued for <%= render_runtime(Time.now - wu.created_at, false) %>.
+                  <% end %>
+                <% end %>
+              </div>
+            </div>
+            <% end %>
+          <% end %>
+          <% end %>
+        </div>
+      </div>
+    </div>
+
+    <div class="col-md-6">
+      <% if show_node_status %>
+      <% nodes = Node.filter([["last_ping_at", ">", Time.now - 3600]]).results %>
+      <div class="panel panel-default" style="min-height: 10.5em">
+        <div class="panel-heading"><span class="panel-title">Compute node status</span>
+          <span class="pull-right compute-node-actions">
+            <% if current_user.andand.is_admin %>
+              <span>
+                <%= link_to nodes_path, class: 'btn btn-default btn-xs' do %>
+                  All nodes <i class="fa fa-fw fa-arrow-circle-right"></i>
+                <% end %>
+              </span>
+            <% end %>
+          </span>
+        </div>
+        <div class="panel-body compute-node-summary-pane">
+          <div>
+            <%= render partial: 'compute_node_summary', locals: {nodes: nodes} %>
+            <% active_nodes = [] %>
+            <% nodes.sort_by { |n| n.hostname || "" }.each do |n| %>
+              <% if n.crunch_worker_state.in? ["busy", "idle"] %>
+                <% active_nodes << n %>
+              <% end %>
+            <% end %>
+            <% if active_nodes.any? %>
+              <div style="text-align: center">
+                <a data-toggle="collapse" href="#compute_node_status">Details <span class="caret"></span></a>
+              </div>
+            <% end %>
+          </div>
+          <div id="compute_node_status" class="collapse">
+            <%= render partial: 'compute_node_status', locals: {nodes: active_nodes} %>
+          </div>
+        </div>
+      </div>
+      <% end %>
+      <% if Rails.configuration.show_recent_collections_on_dashboard %>
+      <div class="panel panel-default">
+        <div class="panel-heading"><span class="panel-title">Recent collections</span>
+          <span class="pull-right">
+            <%= link_to collections_path, class: 'btn btn-default btn-xs' do %>
+              All collections <i class="fa fa-fw fa-arrow-circle-right"></i>
+            <% end %>
+          </span>
+        </div>
+        <div class="panel-body">
+          <% recent_cs[:collections].each do |p| %>
+            <div class="dashboard-panel-info-row">
+              <div>
+                <% if recent_cs[:owners][p[:owner_uuid]].is_a?(Group) %>
+                <i class="fa fa-fw fa-folder-o"></i><%= link_to_if_arvados_object recent_cs[:owners][p[:owner_uuid]], friendly_name: true %>/
+                <% end %>
+                <span class="pull-right"><%= render_localized_date(p[:modified_at], "noseconds") %></span>
+              </div>
+              <div class="text-overflow-ellipsis" style="margin-left: 1em; width: 100%"><%= link_to_if_arvados_object p, {friendly_name: true, no_tags: true} %>
+              </div>
+            </div>
+          <% end %>
+        </div>
+      </div>
+      <% end %>
+    </div>
+  </div>
diff --git a/apps/workbench/app/views/projects/_show_data_collections.html.erb b/apps/workbench/app/views/projects/_show_data_collections.html.erb
new file mode 100644 (file)
index 0000000..3a390ff
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', "arvados#collection"]],
+    sortable_columns: { 'name' => 'collections.name', 'description' => 'collections.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_description.html.erb b/apps/workbench/app/views/projects/_show_description.html.erb
new file mode 100644 (file)
index 0000000..40780f7
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if @object.respond_to? :description %>
+  <div class="arv-description-as-subtitle">
+    <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual', 'data-mode' => 'inline', 'data-rows' => 10 }, { btntext: 'Edit', btnclass: 'primary', btnplacement: :top } %>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/projects/_show_featured.html.erb b/apps/workbench/app/views/projects/_show_featured.html.erb
new file mode 100644 (file)
index 0000000..5a788fd
--- /dev/null
@@ -0,0 +1,22 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="row">
+  <% @objects[0..3].each do |object| %>
+  <div class="card arvados-object">
+    <div class="card-top blue">
+      <a href="#">
+        <img src="/favicon.ico" alt=""/>
+      </a>
+    </div>
+    <div class="card-info">
+      <span class="title"><%= @objects.name_for(object) || object.class_for_display %></span>
+      <div class="desc"><%= object.respond_to?(:description) ? object.description : object.uuid %></div>
+    </div>
+    <div class="card-bottom">
+      <%= render :partial => "show_object_button", :locals => {object: object, htmloptions: {class: 'btn-default btn-block'}} %>
+    </div>
+  </div>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_show_other_objects.html.erb b/apps/workbench/app/views/projects/_show_other_objects.html.erb
new file mode 100644 (file)
index 0000000..f75cf98
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', ["arvados#human", "arvados#specimen", "arvados#trait"]]],
+       sortable_columns: { 'name' => 'humans.uuid, specimens.uuid, traits.name' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_pipeline_templates.html.erb b/apps/workbench/app/views/projects/_show_pipeline_templates.html.erb
new file mode 100644 (file)
index 0000000..40ba6bd
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    limit: 50,
+    filters: [['uuid', 'is_a', ["arvados#pipelineTemplate", "arvados#workflow"]]],
+       sortable_columns: { 'name' => 'pipeline_templates.name, workflows.name', 'description' => 'pipeline_templates.description, workflows.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb b/apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb
new file mode 100644 (file)
index 0000000..1facf53
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+      limit: 50,
+      filters: [['uuid', 'is_a', ["arvados#containerRequest", "arvados#pipelineInstance"]]],
+      sortable_columns: { 'name' => 'container_requests.name, pipeline_instances.name', 'description' => 'container_requests.description, pipeline_instances.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_processes.html.erb b/apps/workbench/app/views/projects/_show_processes.html.erb
new file mode 100644 (file)
index 0000000..eb9c87f
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+      limit: 50,
+      filters: [['uuid', 'is_a', ["arvados#containerRequest"]]],
+      sortable_columns: { 'name' => 'container_requests.name', 'description' => 'container_requests.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_subprojects.html.erb b/apps/workbench/app/views/projects/_show_subprojects.html.erb
new file mode 100644 (file)
index 0000000..652366a
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', ["arvados#group"]]],
+       sortable_columns: { 'name' => 'groups.name', 'description' => 'groups.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_tab_contents.html.erb b/apps/workbench/app/views/projects/_show_tab_contents.html.erb
new file mode 100644 (file)
index 0000000..2e5c8a3
--- /dev/null
@@ -0,0 +1,118 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% sortable_columns = {} if local_assigns[:sortable_columns].nil? %>
+<div class="selection-action-container">
+  <div class="row">
+    <div class="col-sm-5">
+      <div class="btn-group btn-group-sm">
+        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection <span class="caret"></span></button>
+        <ul class="dropdown-menu" role="menu">
+          <% if Collection.creatable? %>
+            <li><%= link_to "Create new collection with selected collections", '#',
+                    'data-href' => combine_selected_path(
+                      action_data: {current_project_uuid: @object.uuid}.to_json
+                    ),
+                    'id' => 'combine_selections_button',
+                    method: :post,
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'combine-project-contents',
+                    'data-toggle' => 'dropdown'
+              %></li>
+          <% end %>
+          <li><%= link_to "Compare selected", '#',
+                  'data-href' => compare_pipeline_instances_path,
+                  'data-selection-param-name' => 'uuids[]',
+                  'data-selection-action' => 'compare',
+                  'data-toggle' => 'dropdown'
+            %></li>
+          <% if Collection.creatable? %>
+            <li><%= link_to "Copy selected...", '#',
+                    'data-href' => choose_projects_path(
+                      title: 'Copy selected items to...',
+                      editable: true,
+                      action_name: 'Copy',
+                      action_href: actions_path,
+                      action_method: 'post',
+                      action_data_from_params: ['selection'],
+                      action_data: {
+                        copy_selections_into_project: true,
+                        selection_param: 'uuid',
+                        success: 'page-refresh'}.to_json),
+                    'data-remote' => true,
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'copy',
+                    'data-toggle' => 'dropdown'
+              %></li>
+          <% end %>
+          <% if @object.editable? %>
+            <li><%= link_to "Move selected...", '#',
+                    'data-href' => choose_projects_path(
+                      title: 'Move selected items to...',
+                      editable: true,
+                      action_name: 'Move',
+                      action_href: actions_path,
+                      action_method: 'post',
+                      action_data_from_params: ['selection'],
+                      action_data: {
+                        move_selections_into_project: true,
+                        selection_param: 'uuid',
+                        success: 'page-refresh'}.to_json),
+                    'data-remote' => true,
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'move',
+                    'data-toggle' => 'dropdown'
+              %></li>
+            <li><%= link_to "Remove selected", '#',
+                    method: :delete,
+                    'data-href' => url_for(action: :remove_items),
+                    'data-selection-param-name' => 'item_uuids[]',
+                    'data-selection-action' => 'remove',
+                    'data-remote' => true,
+                    'data-toggle' => 'dropdown'
+              %></li>
+          <% end %>
+        </ul>
+      </div>
+      <div class="btn-group btn-group-sm">
+        <button id="select-all" type="button" class="btn btn-default" onClick="select_all_items()">Select all</button>
+        <button id="unselect-all" type="button" class="btn btn-default" onClick="unselect_all_items()">Unselect all</button>
+      </div>
+    </div>
+    <div class="col-sm-4 pull-right">
+      <input type="text" class="form-control filterable-control" placeholder="Search project contents" data-filterable-target="table.arv-index.arv-project-<%= tab_pane %> tbody"/>
+    </div>
+  </div>
+
+  <table class="table table-condensed arv-index arv-selectable-items arv-project-<%= tab_pane %>">
+    <colgroup>
+      <col width="0*" style="max-width: fit-content;" />
+      <col width="0*" style="max-width: fit-content;" />
+      <col width="0*" style="max-width: fit-content;" />
+      <col width="60%" style="width: 60%;" />
+      <col width="40%" style="width: 40%;" />
+    </colgroup>
+    <tbody data-infinite-scroller="#<%= tab_pane %>-scroll" data-infinite-content-href="<%= url_for partial: :contents_rows %>" data-infinite-content-params-projecttab="<%= local_assigns.select{|k| [:order, :limit, :filters].include? k }.to_json %>" data-infinite-content-params-attr="projecttab">
+    </tbody>
+    <thead>
+      <tr>
+        <th></th>
+        <th></th>
+        <th></th>
+        <% sort_order = sortable_columns['name'].gsub(/\s/,'') if sortable_columns['name'] %>
+        <th <% if !sort_order.nil? %>
+              data-sort-order='<%= sort_order %>'
+            <% end %> >
+          name
+        </th>
+        <% sort_order = sortable_columns['description'].gsub(/\s/,'') if sortable_columns['description'] %>
+        <th <% if !sort_order.nil? %>
+              data-sort-order='<%= sort_order %>'
+            <% end %> >
+          description
+        </th>
+      </tr>
+    </thead>
+  </table>
+</div>
diff --git a/apps/workbench/app/views/projects/_show_workflows.html.erb b/apps/workbench/app/views/projects/_show_workflows.html.erb
new file mode 100644 (file)
index 0000000..6399a44
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    limit: 50,
+    filters: [['uuid', 'is_a', ["arvados#workflow"]]],
+       sortable_columns: { 'name' => 'workflows.name', 'description' => 'workflows.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/index.html.erb b/apps/workbench/app/views/projects/index.html.erb
new file mode 100644 (file)
index 0000000..14da3e4
--- /dev/null
@@ -0,0 +1,11 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="pane-loaded arv-log-event-listener arv-refresh-on-log-event"
+     data-pane-content-url="<%= root_url tab_pane: "dashboard" %>"
+     data-object-uuid="all"
+     data-load-throttle="15000"
+     >
+  <%= render partial: 'show_dashboard' %>
+</div>
diff --git a/apps/workbench/app/views/projects/public.html.erb b/apps/workbench/app/views/projects/public.html.erb
new file mode 100644 (file)
index 0000000..9827d54
--- /dev/null
@@ -0,0 +1,33 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<table class="table">
+  <colgroup>
+    <col width="25%" />
+    <col width="75%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+        Name
+      </th>
+      <th>
+        Description
+      </th>
+    </tr>
+  </thead>
+
+  <tbody>
+  <% @objects.each do |p| %>
+    <tr>
+      <td>
+        <%= link_to_if_arvados_object p, {friendly_name: true} %>
+      </td>
+      <td>
+        <%= render_attribute_as_textile(p, "description", p.description, true) %>
+      </td>
+    </tr>
+  <% end %>
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/projects/remove_items.js.erb b/apps/workbench/app/views/projects/remove_items.js.erb
new file mode 100644 (file)
index 0000000..1ae95cb
--- /dev/null
@@ -0,0 +1,10 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$(document).trigger('count-change');
+<% @removed_uuids.each do |uuid| %>
+       $('[data-object-uuid=<%= uuid %>]').hide('slow', function() {
+           $(this).remove();
+       });
+<% end %>
diff --git a/apps/workbench/app/views/projects/show.html.erb b/apps/workbench/app/views/projects/show.html.erb
new file mode 100644 (file)
index 0000000..6066335
--- /dev/null
@@ -0,0 +1,70 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :content_top do %>
+  <h2>
+    <% if @object.uuid == current_user.andand.uuid %>
+      Home
+    <% else %>
+      <%= render partial: "show_star" %>
+      <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => "New project" } %>
+    <% end %>
+  </h2>
+<% end %>
+
+<%
+  if !PipelineInstance.api_exists?(:index)
+    run_proc_title = 'Choose a workflow to run:'
+    run_proc_hover = 'Run a workflow in this project'
+  else
+    run_proc_title = 'Choose a pipeline or workflow to run:'
+    run_proc_hover = 'Run a pipeline or workflow in this project'
+  end
+%>
+
+<% content_for :tab_line_buttons do %>
+  <% if @object.editable? %>
+    <div class="btn-group btn-group-sm">
+      <button type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown"><i class="fa fa-fw fa-plus"></i> Add data <span class="caret"></span></button>
+      <ul class="dropdown-menu pull-right" role="menu">
+        <li>
+          <%= link_to(
+                choose_collections_path(
+                  title: 'Choose a collection to copy into this project:',
+                  multiple: true,
+                  action_name: 'Copy',
+                  action_href: actions_path(id: @object.uuid),
+                  action_method: 'post',
+                  action_data: {selection_param: 'selection[]', copy_selections_into_project: @object.uuid, success: 'page-refresh'}.to_json),
+                { remote: true, data: {'event-after-select' => 'page-refresh', 'toggle' => 'dropdown'} }) do %>
+            <i class="fa fa-fw fa-clipboard"></i> Copy data from another project
+          <% end %>
+        </li>
+        <li>
+          <%= link_to(collections_path(options: {ensure_unique_name: true}, collection: {manifest_text: "", name: "New collection", owner_uuid: @object.uuid}, redirect_to_anchor: 'Upload'), {
+              method: 'post',
+              data: {toggle: 'dropdown'}}) do %>
+            <i class="fa fa-fw fa-upload"></i> Upload files from my computer
+          <% end %>
+        </li>
+      </ul>
+    </div>
+    <%= link_to(
+          choose_work_unit_templates_path(
+            title: run_proc_title,
+            action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
+            action_href: work_units_path,
+            action_method: 'post',
+            action_data: {'selection_param' => 'work_unit[template_uuid]', 'work_unit[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+          { class: "btn btn-primary btn-sm", remote: true, title: run_proc_hover }) do %>
+      <i class="fa fa-fw fa-gear"></i> Run a process...
+    <% end %>
+    <%= link_to projects_path({'project[owner_uuid]' => @object.uuid, 'options' => {'ensure_unique_name' => true}}), method: :post, title: "Add a subproject to this project", class: 'btn btn-sm btn-primary' do %>
+      <i class="fa fa-fw fa-plus"></i>
+      Add a subproject
+    <% end %>
+  <% end %>
+<% end %>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/projects/tab_counts.js.erb b/apps/workbench/app/views/projects/tab_counts.js.erb
new file mode 100644 (file)
index 0000000..8757a82
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @tab_counts.each do |pane_name, tab_count| %>
+  $('span#<%= pane_name %>-count').html('(<%= tab_count %>)');
+<% end %>
\ No newline at end of file
diff --git a/apps/workbench/app/views/repositories/_add_repository_modal.html.erb b/apps/workbench/app/views/repositories/_add_repository_modal.html.erb
new file mode 100644 (file)
index 0000000..8fe151b
--- /dev/null
@@ -0,0 +1,45 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+   if current_user.uuid.ends_with?("-000000000000000")
+     repo_prefix = ""
+   else
+     repo_prefix = current_user.username + "/"
+   end
+-%>
+<div class="modal" id="add-repository-modal" tabindex="-1" role="dialog" aria-labelledby="add-repository-label" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+      <form id="add-repository-form">
+        <input type="hidden" id="add_repo_owner_uuid" name="add_repo_owner_uuid" value="<%= current_user.uuid %>">
+        <input type="hidden" id="add_repo_prefix" name="add_repo_prefix" value="<%= repo_prefix %>">
+        <div class="modal-header">
+          <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+          <h4 class="modal-title" id="add-repository-label">Add new repository</h4>
+        </div>
+        <div class="modal-body form-horizontal">
+          <div class="form-group">
+            <label for="add_repo_basename" class="col-sm-2 control-label">Name</label>
+            <div class="col-sm-10">
+              <div class="input-group arvados-uuid">
+                <% unless repo_prefix.empty? %>
+                  <span class="input-group-addon"><%= repo_prefix %></span>
+                <% end %>
+                <input type="text" class="form-control" id="add_repo_basename" name="add_repo_basename">
+                <span class="input-group-addon">.git</span>
+              </div>
+            </div>
+          </div>
+          <p class="alert alert-info">It may take a minute or two before you can clone your new repository.</p>
+          <p id="add-repository-error" class="alert alert-danger"></p>
+        </div>
+        <div class="modal-footer">
+          <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+          <input type="submit" class="btn btn-primary" id="add-repository-submit" name="submit" value="Create">
+        </div>
+      </form>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/repositories/_repository_breadcrumbs.html.erb b/apps/workbench/app/views/repositories/_repository_breadcrumbs.html.erb
new file mode 100644 (file)
index 0000000..6d0f990
--- /dev/null
@@ -0,0 +1,17 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="pull-right">
+  <span class="deemphasize">Browsing <%= @object.name %> repository at commit</span>
+  <%= link_to(@commit, show_repository_commit_path(id: @object.uuid, commit: @commit), title: 'show commit message') %>
+</div>
+<p>
+  <%= link_to(@object.name, show_repository_tree_path(id: @object.uuid, commit: @commit, path: ''), title: 'show root directory of source tree') %>
+  <% parents = ''
+     (@path || '').split('/').each do |pathpart|
+     parents = parents + pathpart + '/'
+     %>
+    / <%= link_to pathpart, show_repository_tree_path(id: @object.uuid, commit: @commit, path: parents) %>
+  <% end %>
+</p>
diff --git a/apps/workbench/app/views/repositories/_show_help.html.erb b/apps/workbench/app/views/repositories/_show_help.html.erb
new file mode 100644 (file)
index 0000000..5904fb2
--- /dev/null
@@ -0,0 +1,37 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+    filters = @filters + [["owner_uuid", "=", current_user.uuid]]
+    example = Repository.all.order("name ASC").filter(filters).limit(1).results.first
+    example = Repository.all.order("name ASC").limit(1).results.first if !example
+%>
+
+<% if example %>
+
+<p>
+Sample git quick start:
+</p>
+
+<pre>
+git clone <%= example.push_url %> <%= example.name unless example.push_url.match(/:(\S+)\.git$/).andand[1] == example.name %>
+cd <%= example.name %>
+# edit files
+git add the/files/you/changed
+git commit
+git push
+</pre>
+
+<% end %>
+
+<p>
+  See also:
+  <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
+  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+      target: "_blank"%> and 
+  <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; Writing a Crunch
+  Script'),
+  "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-firstscript.html",
+  target: "_blank"%>.
+</p>
diff --git a/apps/workbench/app/views/repositories/_show_repositories.html.erb b/apps/workbench/app/views/repositories/_show_repositories.html.erb
new file mode 100644 (file)
index 0000000..871ba1d
--- /dev/null
@@ -0,0 +1,46 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: "add_repository_modal" %>
+
+<div class="container" style="width: 100%">
+  <div class="row">
+    <div class="col-md-pull-9 pull-left">
+      <p>
+        When you are using an Arvados virtual machine, you should clone the https:// URLs. This will authenticate automatically using your API token.
+      </p>
+      <p>
+        In order to clone git repositories using SSH, <%= link_to ssh_keys_user_path(current_user) do%> add an SSH key to your account<%end%> and clone the git@ URLs.
+      </p>
+    </div>
+    <div class="col-md-pull-3 pull-right">
+      <%= link_to raw('<i class="fa fa-plus"></i> Add new repository'), "#",
+                      {class: 'btn btn-xs btn-primary', 'data-toggle' => "modal",
+                       'data-target' => '#add-repository-modal'}  %>
+    </div>
+  </div>
+
+  <div>
+    <table class="table table-condensed table-fixedlayout repositories-table">
+      <colgroup>
+        <col style="width: 10%" />
+        <col style="width: 30%" />
+        <col style="width: 55%" />
+        <col style="width: 5%" />
+      </colgroup>
+      <thead>
+        <tr>
+          <th></th>
+          <th> Name </th>
+          <th> URL </th>
+          <th></th>
+        </tr>
+      </thead>
+
+      <tbody data-infinite-scroller="#repositories-rows" id="repositories-rows"
+        data-infinite-content-href="<%= url_for partial: :repositories_rows %>" >
+      </tbody>
+    </table>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/repositories/_show_repositories_rows.html.erb b/apps/workbench/app/views/repositories/_show_repositories_rows.html.erb
new file mode 100644 (file)
index 0000000..fe88608
--- /dev/null
@@ -0,0 +1,23 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |repo| %>
+  <tr data-object-uuid="<%= repo.uuid %>">
+    <td>
+      <%= render :partial => "show_object_button", :locals => {object: repo, size: 'xs' } %>
+    </td>
+    <td style="word-break:break-all;">
+      <%= repo[:name] %>
+    </td>
+    <td style="word-break:break-all;">
+      <code><%= repo.http_fetch_url %></code><br/>
+      <code><%= repo.editable? ? repo.push_url : repo.fetch_url %></code>
+    </td>
+    <td>
+      <% if repo.editable? %>
+        <%= render partial: 'delete_object_button', locals: {object: repo} %>
+      <% end %>
+    </td>
+  </tr>
+<% end %>
diff --git a/apps/workbench/app/views/repositories/show_blob.html.erb b/apps/workbench/app/views/repositories/show_blob.html.erb
new file mode 100644 (file)
index 0000000..729c9c6
--- /dev/null
@@ -0,0 +1,17 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'repository_breadcrumbs' %>
+
+<% if not @blobdata.valid_encoding? %>
+  <div class="alert alert-warning">
+    <p>
+      This file has an invalid text encoding, so it can't be shown
+      here.  (This probably just means it's a binary file, not a text
+      file.)
+    </p>
+  </div>
+<% else %>
+  <pre><%= @blobdata %></pre>
+<% end %>
diff --git a/apps/workbench/app/views/repositories/show_commit.html.erb b/apps/workbench/app/views/repositories/show_commit.html.erb
new file mode 100644 (file)
index 0000000..55e8952
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'repository_breadcrumbs' %>
+
+<pre><%= @object.show @commit %></pre>
diff --git a/apps/workbench/app/views/repositories/show_tree.html.erb b/apps/workbench/app/views/repositories/show_tree.html.erb
new file mode 100644 (file)
index 0000000..3545131
--- /dev/null
@@ -0,0 +1,44 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'repository_breadcrumbs' %>
+
+<table class="table table-condensed table-hover">
+  <thead>
+    <tr>
+      <th>File</th>
+      <th class="data-size">Size</th>
+    </tr>
+  </thead>
+  <tbody>
+    <% @subtree.each do |mode, sha1, size, subpath| %>
+      <tr>
+        <td>
+          <span style="opacity: 0.6">
+            <% pathparts = subpath.sub(/^\//, '').split('/')
+               basename = pathparts.pop
+               parents = @path
+               pathparts.each do |pathpart| %>
+              <% parents = parents + '/' + pathpart %>
+              <%= link_to pathpart, url_for(path: parents) %>
+              /
+            <% end %>
+          </span>
+          <%= link_to basename, url_for(action: :show_blob, path: parents + '/' + basename) %>
+        </td>
+        <td class="data-size">
+          <%= human_readable_bytes_html(size) %>
+        </td>
+      </tr>
+    <% end %>
+    <% if @subtree.empty? %>
+      <tr>
+        <td>
+          No files found.
+        </td>
+      </tr>
+    <% end %>
+  </tbody>
+  <tfoot></tfoot>
+</table>
diff --git a/apps/workbench/app/views/request_shell_access_reporter/send_request.text.erb b/apps/workbench/app/views/request_shell_access_reporter/send_request.text.erb
new file mode 100644 (file)
index 0000000..ab87517
--- /dev/null
@@ -0,0 +1,11 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+Shell account request from <%=@user.full_name%> (<%=@user.email%>, <%=@user.uuid%>)
+
+Details of the request:
+Full name: <%=@user.full_name%>
+Email address: <%=@user.email%>
+User's UUID: <%=@user.uuid%>
+User setup URL: <%= link_to('setup user', @params['request_url'].gsub('/request_shell_access', '#Admin')) %>
diff --git a/apps/workbench/app/views/search/_choose_rows.html.erb b/apps/workbench/app/views/search/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..04de426
--- /dev/null
@@ -0,0 +1,29 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+<% current_class = params[:last_object_class] %>
+<% @objects.each do |object| %>
+  <% icon_class = fa_icon_class_for_class(object.class) %>
+  <% if object.class.to_s != current_class %>
+    <% current_class = object.class.to_s %>
+    <div class="row class-separator" data-section-heading="true" data-section-name="<%= object.class.to_s %>">
+      <div class="col-sm-12">
+        <%= object.class_for_display.pluralize.downcase %>
+      </div>
+    </div>
+  <% end %>
+  <div class="row filterable selectable" data-section-name="<%= object.class.to_s %>" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= chooser_preview_url_for object %>">
+    <div class="col-sm-12" style="overflow-x:hidden; white-space: nowrap">
+      <i class="fa fa-fw <%= icon_class %>"></i>
+      <% if (name_link = @objects.links_for(object, 'name').first) %>
+        <%= name_link.name %>
+        <span style="display:none"><%= object.uuid %></span>
+      <% elsif object.respond_to?(:name) and object.name and object.name.length > 0 %>
+        <%= object.name %>
+        <span style="display:none"><%= object.uuid %></span>
+      <% else %>
+        <span class="arvados-uuid"><%= object.uuid %></span>
+      <% end %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/search/index.html b/apps/workbench/app/views/search/index.html
new file mode 100644 (file)
index 0000000..6bcad0b
--- /dev/null
@@ -0,0 +1,5 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<div data-mount-mithril="Search"></div>
diff --git a/apps/workbench/app/views/sessions/index.html b/apps/workbench/app/views/sessions/index.html
new file mode 100644 (file)
index 0000000..bf23028
--- /dev/null
@@ -0,0 +1,5 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<div data-mount-mithril="SessionsTable"></div>
diff --git a/apps/workbench/app/views/sessions/logged_out.html.erb b/apps/workbench/app/views/sessions/logged_out.html.erb
new file mode 100644 (file)
index 0000000..c3bd449
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<p>You have logged out.</p>
diff --git a/apps/workbench/app/views/tests/mithril.html b/apps/workbench/app/views/tests/mithril.html
new file mode 100644 (file)
index 0000000..fac2d88
--- /dev/null
@@ -0,0 +1,5 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<div data-mount-mithril="TestComponent"></div>
diff --git a/apps/workbench/app/views/trash_items/_create_new_object_button.html.erb b/apps/workbench/app/views/trash_items/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..2d34e36
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# There is no such thing %>
diff --git a/apps/workbench/app/views/trash_items/_show_trash_rows.html.erb b/apps/workbench/app/views/trash_items/_show_trash_rows.html.erb
new file mode 100644 (file)
index 0000000..dd451b6
--- /dev/null
@@ -0,0 +1,47 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |obj| %>
+  <tr data-object-uuid="<%= obj.uuid %>" data-kind="<%= obj.kind %>" >
+    <td>
+      <% if obj.editable? and obj.is_trashed %>
+        <%= check_box_tag 'uuids[]', obj.uuid, false, :class => 'persistent-selection', style: 'cursor: pointer;' %>
+      <% end %>
+    </td>
+    <td>
+      <%= if !obj.name.blank? then obj.name else obj.uuid end %>
+    </td>
+    <% if obj.is_trashed %>
+      <td>
+        <%= link_to_if_arvados_object @owners[obj.owner_uuid], friendly_name: true %>
+      </td>
+
+      <td>
+        <% if obj.trash_at %>
+          <%= render_localized_date(obj.trash_at)  %>
+        <% end %>
+        <br />
+        <% if obj.delete_at %>
+          <%= render_localized_date(obj.delete_at) %>
+        <% end %>
+      </td>
+    <% else %>
+      <td colspan="2" class="trash-project-msg">
+        <%= link_to_if_arvados_object @owners[obj.owner_uuid], friendly_name: true %>
+        <br>
+        This item is contained within a trashed project.
+      </td>
+    <% end %>
+    <td>
+      <%= obj.uuid %>
+      <% if defined? obj.portable_data_hash %>
+        <br /><%= obj.portable_data_hash %>
+      <% end %>
+    </td>
+    <td>
+      <%= render partial: 'untrash_item', locals: {object:obj} %>
+    </td>
+  </tr>
+
+<% end %>
diff --git a/apps/workbench/app/views/trash_items/_show_trashed_collection_rows.html.erb b/apps/workbench/app/views/trash_items/_show_trashed_collection_rows.html.erb
new file mode 120000 (symlink)
index 0000000..6841b57
--- /dev/null
@@ -0,0 +1 @@
+_show_trash_rows.html.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/trash_items/_show_trashed_collections.html.erb b/apps/workbench/app/views/trash_items/_show_trashed_collections.html.erb
new file mode 100644 (file)
index 0000000..4c5fd3f
--- /dev/null
@@ -0,0 +1,60 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="container selection-action-container" style="width: 100%">
+  <div class="col-md-2 pull-left">
+    <div class="btn-group btn-group-sm">
+      <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+      <ul class="dropdown-menu" role="menu">
+        <li><%= link_to "Un-trash selected items", '#',
+                method: :post,
+                remote: true,
+                'id' => 'untrash_selected_items',
+                'data-href' => untrash_items_trash_items_path,
+                'data-selection-param-name' => 'selection[]',
+                'data-selection-action' => 'untrash-selected-items',
+                'data-toggle' => 'dropdown'
+          %></li>
+      </ul>
+    </div>
+  </div>
+  <div class="col-md-4 pull-right">
+    <input type="text" class="form-control filterable-control recent-trash-items"
+           placeholder="Search trash"
+           data-filterable-target="#recent-collection-trash-items"
+           value="<%= params[:search] %>" />
+  </div>
+
+  <p>
+    <b>Note:</b> Collections which are located within a trashed project are only shown when searching the trash.
+  </p>
+
+  <div>
+    <table id="trash-index" class="topalign table table-condensed table-fixedlayout">
+      <colgroup>
+        <col width="5%" />
+        <col width="16%" />
+        <col width="25%" />
+        <col width="20%" />
+        <col width="29%" />
+        <col width="5%" />
+      </colgroup>
+
+      <thead>
+        <tr class="contain-align-left">
+          <th></th>
+          <th>Name</th>
+          <th>Parent project</th>
+          <th>Date&nbsp;trashed&nbsp;/<br />to&nbsp;be&nbsp;deleted</th>
+          <th>UUID&nbsp;/<br />Content&nbsp;address&nbsp;(PDH)</th>
+          <th></th>
+        </tr>
+      </thead>
+
+      <tbody data-infinite-scroller="#recent-collection-trash-items" id="recent-collection-trash-items"
+        data-infinite-content-href="<%= url_for partial: :trashed_collection_rows %>" >
+      </tbody>
+    </table>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/trash_items/_show_trashed_project_rows.html.erb b/apps/workbench/app/views/trash_items/_show_trashed_project_rows.html.erb
new file mode 120000 (symlink)
index 0000000..6841b57
--- /dev/null
@@ -0,0 +1 @@
+_show_trash_rows.html.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/trash_items/_show_trashed_projects.html.erb b/apps/workbench/app/views/trash_items/_show_trashed_projects.html.erb
new file mode 100644 (file)
index 0000000..6f1e062
--- /dev/null
@@ -0,0 +1,60 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="container selection-action-container" style="width: 100%">
+  <div class="col-md-2 pull-left">
+    <div class="btn-group btn-group-sm">
+      <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+      <ul class="dropdown-menu" role="menu">
+        <li><%= link_to "Un-trash selected items", '#',
+                method: :post,
+                remote: true,
+                'id' => 'untrash_selected_items',
+                'data-href' => untrash_items_trash_items_path,
+                'data-selection-param-name' => 'selection[]',
+                'data-selection-action' => 'untrash-selected-items',
+                'data-toggle' => 'dropdown'
+          %></li>
+      </ul>
+    </div>
+  </div>
+  <div class="col-md-4 pull-right">
+    <input type="text" class="form-control filterable-control recent-trash-items"
+           placeholder="Search trash"
+           data-filterable-target="#recent-project-trash-items"
+           value="<%= params[:search] %>" />
+  </div>
+
+  <p>
+    <b>Note:</b> Projects which are a subproject of a trashed project are only shown when searching the trash.
+  </p>
+
+  <div>
+    <table id="trash-index" class="topalign table table-condensed table-fixedlayout">
+      <colgroup>
+        <col width="5%" />
+        <col width="16%" />
+        <col width="25%" />
+        <col width="20%" />
+        <col width="29%" />
+        <col width="5%" />
+      </colgroup>
+
+      <thead>
+        <tr class="contain-align-left">
+          <th></th>
+          <th>Name</th>
+          <th>Parent project</th>
+          <th>Date&nbsp;trashed&nbsp;/<br />to&nbsp;be&nbsp;deleted</th>
+          <th>UUID</th>
+          <th></th>
+        </tr>
+      </thead>
+
+      <tbody data-infinite-scroller="#recent-project-trash-items" id="recent-project-trash-items"
+        data-infinite-content-href="<%= url_for partial: :trashed_project_rows %>" >
+      </tbody>
+    </table>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/trash_items/_untrash_item.html.erb b/apps/workbench/app/views/trash_items/_untrash_item.html.erb
new file mode 100644 (file)
index 0000000..50780d9
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if object.editable? %>
+    <%= link_to(url_for(object), {title: "Untrash", style: 'cursor: pointer;'}) do %>
+      <i class="fa fa-fw fa-recycle"></i>
+    <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/trash_items/index.html.erb b/apps/workbench/app/views/trash_items/index.html.erb
new file mode 100644 (file)
index 0000000..1a55d5b
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/trash_items/untrash_items.js.erb b/apps/workbench/app/views/trash_items/untrash_items.js.erb
new file mode 100644 (file)
index 0000000..de773f4
--- /dev/null
@@ -0,0 +1,9 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @untrashed_uuids.each do |uuid| %>
+       $('[data-object-uuid=<%= uuid %>]').hide('slow', function() {
+           $(this).remove();
+       });
+<% end %>
diff --git a/apps/workbench/app/views/user_agreements/index.html.erb b/apps/workbench/app/views/user_agreements/index.html.erb
new file mode 100644 (file)
index 0000000..5f70c47
--- /dev/null
@@ -0,0 +1,45 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+
+<% n_files = unsigned_user_agreements.collect(&:files).flatten(1).count %>
+<% content_for :page_title do %>
+<% if n_files == 1 %>
+<%= unsigned_user_agreements.first.files.first[1].sub(/\.[a-z]{3,4}$/,'') %>
+<% else %>
+User agreements
+<% end %>
+<% end %>
+
+<%= form_for(unsigned_user_agreements.first, {url: {action: 'sign', controller: 'user_agreements'}, method: :post}) do |f| %>
+<%= hidden_field_tag :return_to, request.url %>
+<div id="open_user_agreement">
+  <div class="alert alert-info">
+    <strong>Please check <%= n_files > 1 ? 'each' : 'the' %> box below</strong> to indicate that you have read and accepted the user agreement<%= 's' if n_files > 1 %>.
+  </div>
+  <% if n_files == 1 and (Rails.configuration.show_user_agreement_inline rescue false) %>
+  <% ua = unsigned_user_agreements.first; file = ua.files.first %>
+  <object data="<%= url_for(controller: 'collections', action: 'show_file', uuid: ua.uuid, file: "#{file[0]}/#{file[1]}") %>" type="<%= Rack::Mime::MIME_TYPES[file[1].match(/\.\w+$/)[0]] rescue '' %>" width="100%" height="400px">
+  </object>
+  <% end %>
+  <div>
+    <% unsigned_user_agreements.each do |ua| %>
+    <% ua.files.each do |file| %>
+    <div class="checkbox">
+      <%= f.label 'checked[]' do %>
+      <%= check_box_tag 'checked[]', "#{ua.uuid}/#{file[0]}/#{file[1]}", false %>
+      Accept <%= file[1].sub(/\.[a-z]{3,4}$/,'') %>
+      <%= link_to 'View agreement', {controller: 'collections', action: 'show_file', uuid: ua.uuid, file: "#{file[0]}/#{file[1]}"}, {target: '_blank', class: 'btn btn-xs btn-info'} %>
+      <% end %>
+    </div>
+    <% end %>
+    <% end %>
+  </div>
+  <div style="height: 1em"></div>
+  <div>
+    <%= f.submit 'Continue', {class: 'btn btn-primary'} %>
+  </div>
+</div>
+<% end %>
diff --git a/apps/workbench/app/views/users/_add_group_modal.html.erb b/apps/workbench/app/views/users/_add_group_modal.html.erb
new file mode 100644 (file)
index 0000000..f2ae645
--- /dev/null
@@ -0,0 +1,31 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="modal" id="add-group-modal" tabindex="-1" role="dialog" aria-labelledby="add-group-label" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+      <form id="add-group-form">
+        <div class="modal-header">
+          <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+          <h4 class="modal-title" id="add-group-label">Add new group</h4>
+        </div>
+        <div class="modal-body form-horizontal">
+          <div class="form-group">
+            <label for="group_name_input" class="col-sm-1 control-label">Name</label>
+            <div class="col-sm-9">
+              <div class="input-group-name">
+                <input type="text" class="form-control" id="group_name_input" name="group_name_input" placeholder="Enter group name"/>
+              </div>
+            </div>
+          </div>
+          <p id="add-group-error" class="alert alert-danger"></p>
+        </div>
+        <div class="modal-footer">
+          <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+          <input type="submit" class="btn btn-primary" id="add-group-submit" name="submit" value="Create">
+        </div>
+      </form>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/_add_ssh_key_popup.html.erb b/apps/workbench/app/views/users/_add_ssh_key_popup.html.erb
new file mode 100644 (file)
index 0000000..5abaf15
--- /dev/null
@@ -0,0 +1,42 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="modal-dialog modal-with-loading-spinner">
+  <div class="modal-content">
+
+    <%= form_tag add_ssh_key_path, {method: 'get', id: 'add_new_key_form', name: 'add_new_key_form', class: 'form-search, new_authorized_key', remote: true} do %>
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title">Add SSH Key</h4> </div>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+        <div> <%= link_to "Click here to learn about SSH keys in Arvados.",
+                  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+                  style: "font-weight: bold",
+                  target: "_blank" %>
+        </div>
+        <div class="form-group">
+          <label for="public_key">Public Key</label>
+          <textarea class="form-control" id="public_key" rows="4" name="public_key" type="text"/>
+        </div>
+        <div class="form-group">
+          <label for="name">Name</label>
+          <input class="form-control" id="name" maxlength="250" name="name" type="text"/>
+        </div>
+      </div>
+
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button type="submit" class="btn btn-primary" autofocus>Submit</button>
+      </div>
+
+    <% end #form %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/_choose_rows.html.erb b/apps/workbench/app/views/users/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..862efad
--- /dev/null
@@ -0,0 +1,13 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% icon_class = fa_icon_class_for_class(User) %>
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw <%= icon_class %>"></i>
+      <%= object.full_name %> &lt;<%= object.email %>&gt;
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/users/_create_new_object_button.html.erb b/apps/workbench/app/views/users/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..fc959e3
--- /dev/null
@@ -0,0 +1,10 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= link_to setup_user_popup_path,
+  {class: 'btn btn-sm btn-primary', :remote => true, 'data-toggle' =>  "modal",
+    'data-target' => '#user-setup-modal-window', return_to: request.url} do %>
+  <i class="fa fa-fw fa-plus"></i> Add a new user
+<% end %>
+<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
diff --git a/apps/workbench/app/views/users/_current_token.html.erb b/apps/workbench/app/views/users/_current_token.html.erb
new file mode 100644 (file)
index 0000000..deab2d7
--- /dev/null
@@ -0,0 +1,30 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <h4 class="panel-title">
+      <a data-parent="#arv-adv-accordion" href="/current_token">
+        Current Token
+      </a>
+    </h4>
+  </div>
+
+<div id="#manage_current_token" class="panel-body">
+<p>The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados with the proper permissions. For more information see <%= link_to raw('Getting an API token'), "#{Rails.configuration.arvados_docsite}/user/reference/api-tokens.html", target: "_blank"%>.</p>
+<p>Paste the following lines at a shell prompt to set up the necessary environment for Arvados SDKs to authenticate to your <b><%= current_user.username %></b> account.</p>
+
+<pre>
+HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
+export ARVADOS_API_TOKEN=<%= Thread.current[:arvados_api_token] %>
+export ARVADOS_API_HOST=<%= current_api_host %>
+<% if Rails.configuration.arvados_insecure_https %>
+export ARVADOS_API_HOST_INSECURE=true
+<% else %>
+unset ARVADOS_API_HOST_INSECURE
+<% end %>
+</pre>
+<p>Arvados<%= link_to virtual_machines_user_path(current_user) do%> virtual machines<%end%> do this for you automatically. This setup is needed only when you use the API remotely (e.g., from your own workstation).</p>
+</div>
+</div>
diff --git a/apps/workbench/app/views/users/_home.html.erb b/apps/workbench/app/views/users/_home.html.erb
new file mode 100644 (file)
index 0000000..96ba627
--- /dev/null
@@ -0,0 +1,38 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+<% content_for :css do %>
+      .dash-list {
+        padding: 9px 0;
+      }
+      .dash-list>ul>li>a>span {
+      min-width: 1.5em;
+      margin-left: auto;
+      margin-right: auto;
+      }
+      .centerme {
+      margin-left: auto;
+      margin-right: auto;
+      text-align: center;
+      }
+      .bigfatnumber {
+      font-size: 4em;
+      font-weight: bold;
+      }
+      .dax {
+      max-width: 10%;
+      margin-right: 1em;
+      float: left
+      }
+      .daxalert {
+      overflow: hidden;
+      }
+<% end %>
+
+<div id="home-tables">
+
+    <%= render :partial => 'tables' %>
+
+</div>
diff --git a/apps/workbench/app/views/users/_setup_popup.html.erb b/apps/workbench/app/views/users/_setup_popup.html.erb
new file mode 100644 (file)
index 0000000..3b3794b
--- /dev/null
@@ -0,0 +1,77 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="modal-dialog modal-with-loading-spinner">
+  <div class="modal-content">
+
+    <%= form_tag setup_user_path, {id: 'setup_form', name: 'setup_form', method: 'get',
+        class: 'form-search', remote: true} do %>
+
+    <div class="modal-header">
+      <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+      <div>
+        <div class="col-sm-6"> <h4 class="modal-title">Setup Shell Account</h4> </div>
+        <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+      </div>
+      <br/>
+    </div>
+
+    <div class="modal-body">
+      <% if @object%>
+        <% uuid = @object.uuid %>
+        <% email = @object.email %>
+      <% end %>
+      <% disable_email = uuid != nil %>
+      <% identity_url_prefix = @current_selections[:identity_url_prefix] %>
+      <% disable_url_prefix = identity_url_prefix != nil %>
+      <% selected_vm = @current_selections[:vm_uuid] %>
+      <% groups = @current_selections[:groups] %>
+
+      <input id="user_uuid" maxlength="250" name="user_uuid" type="hidden" value="<%=uuid%>">
+      <div class="form-group">
+        <label for="email">Email</label>
+        <% if disable_email %>
+        <input class="form-control" id="email" maxlength="250" name="email" type="text" value="<%=email%>" disabled>
+        <% else %>
+        <input class="form-control" id="email" maxlength="250" name="email" type="text">
+        <% end %>
+      </div>
+      <div class="form-group">
+        <label for="openid_prefix">Identity URL Prefix</label>
+        <% if disable_url_prefix %>
+        <input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
+               value="<%=identity_url_prefix%>" disabled=true>
+        <% else %>
+        <input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
+               value="<%= Rails.configuration.default_openid_prefix %>">
+        <% end %>
+      </div>
+      <div class="form-group">
+        <label for="vm_uuid">Virtual Machine</label>
+        <select class="form-control" name="vm_uuid">
+          <option value="" <%= 'selected' unless selected_vm %>>
+            Choose One:
+          </option>
+          <% @vms.each do |vm| %>
+            <option value="<%=vm.uuid%>"
+              <%= 'selected' if selected_vm == vm.uuid %>>
+              <%= vm.hostname %>
+            </option>
+          <% end %>
+        </select>
+      </div>
+      <div class="groups-group">
+        <label for="groups">Groups for virtual machine (comma separated list)</label>
+        <input class="form-control" id="groups" maxlength="250" name="groups" type="text" value="<%=groups%>">
+      </div>
+    </div>
+
+    <div class="modal-footer">
+      <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+      <button type="submit" id="register" class="btn btn-primary" autofocus>Submit</button>
+    </div>
+
+    <% end #form %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/_show_activity.html.erb b/apps/workbench/app/views/users/_show_activity.html.erb
new file mode 100644 (file)
index 0000000..b1fba61
--- /dev/null
@@ -0,0 +1,8 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<p>
+  As an admin user, you can <%= link_to "view recent user activity", activity_users_url %>.
+</p>
+
diff --git a/apps/workbench/app/views/users/_show_admin.html.erb b/apps/workbench/app/views/users/_show_admin.html.erb
new file mode 100644 (file)
index 0000000..89156aa
--- /dev/null
@@ -0,0 +1,117 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="row">
+  <div class="col-md-6">
+    <p>
+      As an admin, you can log in as this user. When you&rsquo;ve
+      finished, you will need to log out and log in again with your
+      own account.
+    </p>
+
+    <blockquote>
+      <%= button_to "Log in as #{@object.full_name}", sudo_user_url(id: @object.uuid), class: 'btn btn-primary' %>
+    </blockquote>
+
+    <p>
+      As an admin, you can setup a shell account for this user.
+      The login name is automatically generated from the user's e-mail address.
+    </p>
+
+    <blockquote>
+      <%= link_to "Setup shell account #{'for ' if @object.full_name.present?} #{@object.full_name}", setup_popup_user_url(id: @object.uuid),  {class: 'btn btn-primary', :remote => true, 'data-toggle' =>  "modal", 'data-target' => '#user-setup-modal-window'}  %>
+    </blockquote>
+
+    <p>
+      As an admin, you can deactivate and reset this user. This will
+      remove all repository/VM permissions for the user. If you
+      "setup" the user again, the user will have to sign the user
+      agreement again.
+    </p>
+
+    <blockquote>
+      <%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', data: {confirm: "Are you sure you want to deactivate #{@object.full_name}?"} %>
+    </blockquote>
+  </div>
+  <div class="col-md-6">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Group memberships
+
+        <div class="pull-right">
+          <%= link_to raw('<i class="fa fa-plus"></i> Add new group'), "#",
+                       {class: 'btn btn-xs btn-primary', 'data-toggle' => "modal",
+                        'data-target' => '#add-group-modal'}  %>
+        </div>
+      </div>
+      <div class="panel-body">
+        <div class="alert alert-info">
+          <b>Tip:</b> in most cases, you want <i>both permissions at once</i> for a given group.
+          <br/>
+          The user&rarr;group permission is can_manage.
+          <br/>
+          The group&rarr;user permission is can_read.
+        </div>
+        <form>
+          <% permitted_group_perms = {}
+             Link.filter([
+             ['tail_uuid', '=', @object.uuid],
+             ['head_uuid', 'is_a', 'arvados#group'],
+             ['link_class', '=', 'permission'],
+             ]).each do |perm|
+               permitted_group_perms[perm.head_uuid] = perm.uuid
+             end %>
+          <% member_group_perms = {}
+             Link.permissions_for(@object).each do |perm|
+               member_group_perms[perm.tail_uuid] = perm.uuid
+             end %>
+          <% Group.order(['name']).where(group_class: 'role').each do |group| %>
+            <div>
+              <label class="checkbox-inline" data-toggle-permission="true" data-permission-tail="<%= @object.uuid %>" data-permission-name="can_manage">
+                <%= check_box_tag(
+                    'group_uuids[]',
+                    group.uuid,
+                    permitted_group_perms[group.uuid],
+                    disabled: (group.owner_uuid == @object.uuid),
+                    data: {
+                      permission_head: group.uuid,
+                      permission_uuid: permitted_group_perms[group.uuid]}) %>
+                <small>user&rarr;group</small>
+              </label>
+              <label class="checkbox-inline" data-toggle-permission="true" data-permission-head="<%= @object.uuid %>" data-permission-name="can_read">
+                <%= check_box_tag(
+                    'group_uuids[]',
+                    group.uuid,
+                    member_group_perms[group.uuid],
+                    disabled: (group.owner_uuid == @object.uuid),
+                    data: {
+                      permission_tail: group.uuid,
+                      permission_uuid: member_group_perms[group.uuid]}) %>
+                <small>group&rarr;user</small>
+              </label>
+              <label class="checkbox-inline">
+                <%= group.name || '(unnamed)' %> <span class="deemphasize">(owned by <%= User.find?(group.owner_uuid).andand.full_name %>)</span>
+              </label>
+            </div>
+          <% end.empty? and begin %>
+            <div>
+              (No groups defined.)
+            </div>
+          <% end %>
+        </form>
+      </div>
+      <div class="panel-footer">
+        These groups (roles) can also be managed from the command line. For example:
+        <ul>
+          <li><code>arv group create \<br/>--group '{"group_class":"role","name":"New group"}'</code></li>
+          <li><code>arv group list \<br/>--filters '[["group_class","=","role"]]' \<br/>--select '["uuid","name"]'</code></li>
+          <li><code>arv edit <i>uuid</i></code></li>
+        </ul>
+      </div>
+    </div>
+  </div>
+</div>
+
+<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+<%= render partial: "add_group_modal" %>
diff --git a/apps/workbench/app/views/users/_ssh_keys.html.erb b/apps/workbench/app/views/users/_ssh_keys.html.erb
new file mode 100644 (file)
index 0000000..8d2f513
--- /dev/null
@@ -0,0 +1,73 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <div class="pull-right">
+      <%= link_to raw('<i class="fa fa-plus"></i>' " Add new SSH key"), add_ssh_key_popup_url,
+                   {class: 'btn btn-xs btn-primary', :remote => true, 'data-toggle' =>  "modal",
+                    'data-target' => '#add-ssh-key-modal-window'}  %>
+    </div>
+    <h4 class="panel-title">
+      <%= link_to ssh_keys_user_path(current_user) do %>
+        SSH Keys
+      <%end%>
+    </h4>
+  </div>
+
+<div id="manage_ssh_keys" class="panel-body">
+  <% if !@my_ssh_keys.any? %>
+     <p> You have not yet set up an SSH public key for use with Arvados. <%= link_to "Learn more.",
+                  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+                  style: "font-weight: bold",
+                  target: "_blank" %>
+     </p>
+     <p> When you have an SSH key you would like to use, add it using the <b>Add</b> button. </p>
+  <% else %>
+    <table class="table manage-ssh-keys-table">
+      <colgroup>
+        <col style="width: 35%" />
+        <col style="width: 55%" />
+        <col style="width: 10%" />
+      </colgroup>
+      <thead>
+        <tr>
+          <th> Name </th>
+          <th> Key Fingerprint </th>
+          <th> </th>
+        </tr>
+      </thead>
+      <tbody>
+        <% @my_ssh_keys.andand.each do |key| %>
+          <tr style="word-break:break-all;">
+            <td>
+              <%= key[:name] %>
+            </td>
+            <td style="word-break:break-all;">
+              <% if key[:public_key] && key[:public_key].size > 0 %>
+                <div>
+                  <span title="<%=key[:public_key]%>"> <%=
+                    begin
+                      SSHKey.fingerprint key[:public_key]
+                    rescue
+                      "INVALID KEY: " + key[:public_key]
+                    end
+                   %> </span>
+                </div>
+              <% else %>
+                  <%= key[:public_key] %>
+              <% end %>
+            </td>
+            <td>
+              <%= link_to(authorized_key_path(id: key[:uuid]), method: :delete, class: 'btn btn-sm', data: {confirm: "Really delete key?"}) do %>
+                  <i class="fa fa-fw fa-trash-o"></i>
+              <% end %>
+            </td>
+          </tr>
+        <% end %>
+      </tbody>
+    </table>
+  <% end %>
+</div>
+</div>
diff --git a/apps/workbench/app/views/users/_tables.html.erb b/apps/workbench/app/views/users/_tables.html.erb
new file mode 100644 (file)
index 0000000..5667951
--- /dev/null
@@ -0,0 +1,270 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if current_user.andand.is_active %>
+  <div>
+    <strong>Recent jobs</strong>
+    <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
+    <%= link_to raw("Show all jobs &rarr;"), jobs_path, class: 'pull-right' %>
+    <% if not current_user.andand.is_active or @my_jobs.empty? %>
+      <p>(None)</p>
+    <% else %>
+      <table class="table table-bordered table-condensed table-fixedlayout">
+        <colgroup>
+          <col width="20%" />
+          <col width="20%" />
+          <col width="20%" />
+          <col width="13%" />
+          <col width="13%" />
+          <col width="20%" />
+        </colgroup>
+
+        <tr>
+          <th>Script</th>
+          <th>Output</th>
+          <th>Log</th>
+          <th>Created at</th>
+          <th>Status</th>
+        </tr>
+
+        <%# Preload collections, logs, and pipeline instance objects %>
+        <%
+          collection_uuids = []
+          log_uuids = []
+          @my_jobs[0..6].each do |j|
+            collection_uuids << j.output
+            log_uuids << j.log
+          end
+
+          @my_collections[0..6].each do |c|
+            collection_uuids << c.uuid
+          end
+
+          preload_collections_for_objects collection_uuids
+          preload_log_collections_for_objects log_uuids
+
+          pi_uuids = []
+          @my_pipelines[0..6].each do |p|
+            pi_uuids << p.uuid
+          end
+          resource_class = resource_class_for_uuid(pi_uuids.first, friendly_name: true)
+          preload_objects_for_dataclass resource_class, pi_uuids
+        %>
+
+        <% @my_jobs[0..6].each do |j| %>
+          <tr data-object-uuid="<%= j.uuid %>">
+            <td>
+              <small>
+                <%= link_to((j.script.andand[0..31] || j.uuid), job_path(j.uuid)) %>
+              </small>
+            </td>
+
+            <td>
+              <small>
+                <% if j.state == "Complete" and j.output %>
+                  <a href="<%= collection_path(j.output) %>">
+                    <% collections = collections_for_object(j.output) %>
+                      <% if collections && !collections.empty? %>
+                      <% c = collections.first %>
+                      <% c.files.each do |file| %>
+                        <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
+                      <% end %>
+                     <% end %>
+                  </a>
+              <% end %>
+            </small>
+          </td>
+
+<td>
+  <small>
+    <% if j.log %>
+      <% log_collections = log_collections_for_object(j.log) %>
+      <% if log_collections && !log_collections.empty? %>
+        <% c = log_collections.first %>
+        <% c.files.each do |file| %>
+          <a href="<%= collection_path(j.log) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">Log</a>
+        <% end %>
+      <% end %>
+    <% elsif j.respond_to? :log_buffer and j.log_buffer.is_a? String %>
+      <% buf = j.log_buffer.strip.split("\n").last %>
+      <span title="<%= buf %>"><%= buf %></span>
+    <% end %>
+  </small>
+</td>
+
+<td>
+  <small>
+    <%= j.created_at.to_s if j.created_at %>
+  </small>
+</td>
+
+<td>
+  <div class="inline-progress-container">
+  <%= render partial: 'job_progress', locals: {:j => j} %>
+  </div>
+</td>
+
+</tr>
+<% end %>
+</table>
+<% end %>
+</div>
+
+<div>
+  <strong>Recent pipeline instances</strong>
+  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
+  <%= link_to raw("Show all pipeline instances &rarr;"), pipeline_instances_path, class: 'pull-right' %>
+  <% if not current_user.andand.is_active or @my_pipelines.empty? %>
+    <p>(None)</p>
+  <% else %>
+    <table class="table table-bordered table-condensed table-fixedlayout">
+      <colgroup>
+        <col width="30%" />
+        <col width="30%" />
+        <col width="13%" />
+        <col width="13%" />
+        <col width="20%" />
+      </colgroup>
+
+      <tr>
+        <th>Instance</th>
+        <th>Template</th>
+        <th>Created at</th>
+        <th>Status</th>
+        <th>Progress</th>
+      </tr>
+
+      <% @my_pipelines[0..6].each do |p| %>
+        <tr data-object-uuid="<%= p.uuid %>">
+          <td>
+            <small>
+              <%= link_to_if_arvados_object p.uuid, friendly_name: true %>
+            </small>
+          </td>
+
+          <td>
+            <small>
+              <%= link_to_if_arvados_object p.pipeline_template_uuid, friendly_name: true %>
+            </small>
+          </td>
+
+          <td>
+            <small>
+              <%= (p.created_at.to_s) if p.created_at %>
+            </small>
+          </td>
+
+          <td>
+            <%= render partial: 'pipeline_status_label', locals: {:p => p} %>
+          </td>
+
+          <td>
+            <div class="inline-progress-container">
+              <%= render partial: 'pipeline_progress', locals: {:p => p} %>
+            </div>
+          </td>
+        </tr>
+      <% end %>
+    </table>
+  <% end %>
+</div>
+
+<div>
+  <strong>Recent collections</strong>
+  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
+  <%= link_to raw("Show all collections &rarr;"), collections_path, class: 'pull-right' %>
+  <div class="pull-right" style="padding-right: 1em; width: 30%;">
+    <%= form_tag collections_path,
+          method: 'get',
+          class: 'form-search small-form-margin' do %>
+    <div class="input-group input-group-sm">
+      <%= text_field_tag :search, params[:search], class: 'form-control', placeholder: 'Search' %>
+      <span class="input-group-btn">
+        <%= button_tag(class: 'btn btn-info') do %>
+        <span class="glyphicon glyphicon-search"></span>
+        <% end %>
+      </span>
+    </div>
+    <% end %>
+  </div>
+  <% if not current_user.andand.is_active or @my_collections.empty? %>
+    <p>(None)</p>
+  <% else %>
+    <table class="table table-bordered table-condensed table-fixedlayout">
+      <colgroup>
+        <col width="46%" />
+        <col width="32%" />
+        <col width="10%" />
+        <col width="12%" />
+      </colgroup>
+
+      <tr>
+        <th>Contents</th>
+        <th>Tags</th>
+        <th>Age</th>
+        <th>Storage</th>
+      </tr>
+
+      <% @my_collections[0..6].each do |c| %>
+        <tr data-object-uuid="<%= c.uuid %>">
+          <td>
+            <small>
+              <a href="<%= collection_path(c.uuid) %>">
+                <% c.files.each do |file| %>
+                  <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
+                <% end %>
+              </a>
+            </small>
+          </td>
+          <td>
+            <% if @my_tag_links[c.uuid] %>
+            <small>
+              <%= @my_tag_links[c.uuid].collect(&:name).join(", ") %>
+            </small>
+            <% end %>
+          </td>
+          <td>
+            <small>
+              <%= c.created_at.to_s if c.created_at %>
+            </small>
+          </td>
+          <td>
+            <%= render partial: 'collections/toggle_persist', locals: { uuid: c.uuid, current_state: @persist_state[c.uuid] } %>
+          </td>
+        </tr>
+      <% end %>
+    </table>
+  <% end %>
+</div>
+
+<% else %>
+
+  <div class="row-fluid">
+    <div class="col-sm-4">
+      <%= image_tag "dax.png", style: "max-width:100%" %>
+    </div>
+    <div class="col-sm-8">
+      <h2>Welcome to Arvados, <%= current_user.first_name %>!</h2>
+      <div class="well">
+        <p>
+          Your account must be activated by an Arvados administrator.  If this
+          is your first time accessing Arvados and would like to request
+          access, or you believe you are seeing the page in error, please
+          <%= link_to "contact us", Rails.configuration.activation_contact_link %>.
+          You should receive an email at the address you used to log in when
+          your account is activated.  In the mean time, you can
+          <%= link_to "learn more about Arvados", "https://arvados.org/projects/arvados/wiki/Introduction_to_Arvados" %>,
+          and <%= link_to "read the Arvados user guide", "http://doc.arvados.org/user" %>.
+        </p>
+        <p style="padding-bottom: 1em">
+          <%= link_to raw('Contact us &#x2709;'),
+              Rails.configuration.activation_contact_link, class: "pull-right btn btn-primary" %></p>
+      </div>
+    </div>
+  </div>
+<% end %>
+
+<% content_for :js do %>
+setInterval(function(){$('a.refresh:eq(0)').click()}, 60000);
+<% end %>
diff --git a/apps/workbench/app/views/users/_virtual_machines.html.erb b/apps/workbench/app/views/users/_virtual_machines.html.erb
new file mode 100644 (file)
index 0000000..c891b0c
--- /dev/null
@@ -0,0 +1,113 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <h4 class="panel-title">
+      <%= link_to virtual_machines_user_path(current_user) do %>
+        Virtual Machines
+      <% end %>
+
+    </h4>
+  </div>
+
+<div id="manage_virtual_machines" class="panel-body">
+  <p>
+    For more information see <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; VM access'),
+  "#{Rails.configuration.arvados_docsite}/user/getting_started/vm-login-with-webshell.html",
+  target: "_blank"%>.
+  </p>
+
+  <% if !@my_virtual_machines.any? %>
+    <div id="no_shell_access" class="no_shell_access">
+      <div class="alert alert-warning clearfix">
+        <p>
+          You do not have access to any virtual machines.  Some
+          Arvados features require using the command line.  You may
+          request access to a hosted virtual machine with the command
+          line shell.
+        </p>
+        <div class="pull-right">
+          <%= link_to({
+              action: 'request_shell_access',
+              controller: 'users',
+              id: current_user.uuid
+              },
+              method: :post,
+              remote: true,
+              class: 'btn btn-xs btn-primary',
+              data: {
+              disable_with: "Sending request...",
+              on_error_hide: '.no_shell_access .alert-success',
+              on_error_show: '.no_shell_access .alert-danger',
+              on_error_write: '.no_shell_access .alert-danger .error-text',
+              on_success_hide: '.no_shell_access .alert-danger',
+              }) do %>
+            Send request for shell access
+          <% end %>
+        </div>
+      </div>
+      <div class="alert alert-success" style="display:none">
+        <p class="contain-align-left"><%# (see javascripts/request_shell_access.js) %></p>
+      </div>
+      <div class="alert alert-danger" style="display:none">
+        <p class="contain-align-left">Sorry, something went wrong. Please try again. (<span class="error-text"></span>)</p>
+      </div>
+    </div>
+  <% else %>
+    <script> localStorage.removeItem('request_shell_access'); </script>
+    <table class="table virtual-machines-table">
+      <colgroup>
+        <col style="width: 25%" />
+        <col style="width: 25%" />
+        <col style="width: 50%" />
+      </colgroup>
+      <thead>
+        <tr>
+          <th> Host name </th>
+          <th> Login name </th>
+          <th> Command line </th>
+          <% if Rails.configuration.shell_in_a_box_url %>
+            <th> Web shell <span class="label label-info">beta</span></th>
+          <% end %>
+        </tr>
+      </thead>
+      <tbody>
+        <% @my_virtual_machines.andand.each do |vm| %>
+          <tr>
+            <td style="word-break:break-all;">
+              <%= vm[:hostname] %>
+            </td>
+            <td style="word-break:break-all;">
+              <%= @my_vm_logins[vm[:uuid]].andand.compact.andand.join(", ") %>
+            </td>
+            <td style="word-break:break-all;">
+              <% if @my_vm_logins[vm[:uuid]] %>
+                <% @my_vm_logins[vm[:uuid]].each do |login| %>
+                  <code>ssh&nbsp;<%= login %>@<%= vm[:hostname] %>.<%= current_uuid_prefix || 'xyzzy' %></code>
+                <% end %>
+              <% end %>
+            </td>
+            <% if Rails.configuration.shell_in_a_box_url %>
+              <td>
+                <% @my_vm_logins[vm[:uuid]].andand.each do |login| %>
+                  <%= link_to webshell_virtual_machine_path(vm, login: login), title: "Open a terminal session in your browser", class: 'btn btn-xs btn-default', target: "_blank" do %>
+                    Log in as <%= login %><br />
+                  <% end %>
+                <% end %>
+              </td>
+            <% end %>
+          </tr>
+        <% end %>
+      </tbody>
+    </table>
+  <% end %>
+</div>
+</div>
+  <p>In order to access virtual machines using SSH, <%= link_to ssh_keys_user_path(current_user) do%> add an SSH key to your account<%end%> and add a section like this to your SSH configuration file ( <i>~/.ssh/config</i>):</p>
+    <pre>Host *.<%= current_uuid_prefix || 'xyzzy' %>
+      TCPKeepAlive yes
+      ServerAliveInterval 60
+      ProxyCommand ssh -p2222 turnout@switchyard.<%= current_api_host || 'xyzzy.arvadosapi.com' %> -x -a $SSH_PROXY_FLAGS %h
+    </pre>
diff --git a/apps/workbench/app/views/users/activity.html.erb b/apps/workbench/app/views/users/activity.html.erb
new file mode 100644 (file)
index 0000000..9df4b1f
--- /dev/null
@@ -0,0 +1,76 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :css do %>
+table#users-activity-table th {
+    overflow-x: hidden;
+}
+table#users-activity-table .cell-for-span-This-month,
+table#users-activity-table .cell-for-span-Last-month {
+    background: #eee;
+}
+<% end %>
+<table class="table table-condensed arv-index" id="users-activity-table">
+  <colgroup>
+    <col width="28%" />
+  </colgroup>
+  <% @spans.each do |_| %>
+  <colgroup>
+    <% 3.times do %>
+    <col width="<%= (72 / @spans.count / 3).floor %>%" />
+    <% end %>
+  </colgroup>
+  <% end %>
+
+  <tr>
+    <th rowspan="2">User</th>
+    <% @spans.each do |span, start_at, end_at| %>
+    <th colspan="3" class="cell-for-span-<%= span.gsub ' ','-' %>">
+      <%= span %>
+      <br />
+      <%= start_at.strftime('%b %-d') %>
+      -
+      <%= (end_at-1.second).strftime('%b %-d') %>
+    </th>
+    <% end %>
+  </tr>
+  <tr>
+    <% @spans.each do |span, _| %>
+    <th class="cell-for-span-<%= span.gsub ' ','-' %>">Logins</th>
+    <th class="cell-for-span-<%= span.gsub ' ','-' %>">Jobs</th>
+    <th class="cell-for-span-<%= span.gsub ' ','-' %>">Pipelines</th>
+    <% end %>
+  </tr>
+
+  <% @users.each do |user| %>
+  <tr>
+    <td>
+      <small>
+        <% if user.uuid %>
+        <%= link_to_if_arvados_object user, friendly_name: true %>
+        <% else %>
+        <b>Total</b>
+        <% end %>
+      </small>
+    </td>
+
+    <% @spans.each do |span, _| %>
+    <% ['logins', 'jobs', 'pipeline_instances'].each do |type| %>
+    <td class="cell-for-span-<%= span.gsub ' ','-' %>">
+      <small>
+        <%= @user_activity[user.uuid][span + " " + type].to_s %>
+      </small>
+    </td>
+    <% end %>
+    <% end %>
+  </tr>
+  <% end %>
+</table>
+
+<% content_for :footer_js do %>
+$('#users-activity-table td small').each(function(){
+  if ($(this).html().trim() == '0')
+    $(this).css('opacity', '0.3');
+});
+<% end %>
diff --git a/apps/workbench/app/views/users/add_ssh_key.js.erb b/apps/workbench/app/views/users/add_ssh_key.js.erb
new file mode 100644 (file)
index 0000000..42a6252
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$("#add-ssh-key-modal-window").modal("hide");
+document.location.reload();
diff --git a/apps/workbench/app/views/users/add_ssh_key_popup.js.erb b/apps/workbench/app/views/users/add_ssh_key_popup.js.erb
new file mode 100644 (file)
index 0000000..eba8960
--- /dev/null
@@ -0,0 +1,12 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$("#add-ssh-key-modal-window").html("<%= escape_javascript(render partial: 'add_ssh_key_popup') %>");
+
+// reset form input fields, for the next time around
+function reset_form() {
+  $('#name').val("");
+  $('#public_key').val("");
+  $('select').val('')
+}
diff --git a/apps/workbench/app/views/users/current_token.html.erb b/apps/workbench/app/views/users/current_token.html.erb
new file mode 100644 (file)
index 0000000..7ee81e3
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render :partial => 'current_token' %>
diff --git a/apps/workbench/app/views/users/home.html.erb b/apps/workbench/app/views/users/home.html.erb
new file mode 100644 (file)
index 0000000..8d212cd
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render :partial => 'home' %>
diff --git a/apps/workbench/app/views/users/home.js.erb b/apps/workbench/app/views/users/home.js.erb
new file mode 100644 (file)
index 0000000..aedf947
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+var new_content = "<%= escape_javascript(render partial: 'tables') %>";
+if ($('div#home-tables').html() != new_content)
+   $('div#home-tables').html(new_content);
diff --git a/apps/workbench/app/views/users/inactive.html.erb b/apps/workbench/app/views/users/inactive.html.erb
new file mode 100644 (file)
index 0000000..f3cb3cf
--- /dev/null
@@ -0,0 +1,35 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+
+<div class="row">
+  <div class="col-sm-8 col-sm-push-4" style="margin-top: 1em">
+    <div class="well clearfix">
+      <%= image_tag "dax.png", style: "width: 147px; height: 197px; max-width: 25%; margin-right: 2em", class: 'pull-left' %>
+
+      <h3>Hi! You're logged in, but...</h3>
+
+      <p>
+
+        Your account is inactive.
+
+      </p><p>
+
+        An administrator must activate your account before you can get
+        any further.
+
+      </p><p>
+
+        <%= link_to 'Retry', (params[:return_to] || '/'), class: 'btn btn-primary' %>
+
+      </p>
+
+      <p>
+       Already have an account with a different login?  <a href="/users/link_account">Link this login to your existing account.</a>
+      </p>
+
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/link_account.html.erb b/apps/workbench/app/views/users/link_account.html.erb
new file mode 100644 (file)
index 0000000..86a0446
--- /dev/null
@@ -0,0 +1,112 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= javascript_tag do %>
+  function update_visibility() {
+    if (sessionStorage.getItem('link_account_api_token') &&
+      sessionStorage.getItem('link_account_uuid') != '<%= Thread.current[:user].uuid %>')
+    {
+      $("#ready-to-link").css({"display": "inherit"});
+      $("#need-login").css({"display": "none"});
+
+      <% if params[:direction] == "in" %>
+      var user_a = "<b>"+sessionStorage.getItem('link_account_email')+"</b> ("+sessionStorage.getItem('link_account_username')+", "+sessionStorage.getItem('link_account_uuid')+")";
+      var user_b = "<b><%= Thread.current[:user].email %></b> (<%= Thread.current[:user].username%>, <%= Thread.current[:user].uuid%>)";
+      var user_a_is_active = (sessionStorage.getItem('link_account_is_active') == "true");
+      var user_a_is_admin = (sessionStorage.getItem('link_account_is_admin') == "true");
+      var user_b_is_admin = <%=if Thread.current[:user].is_admin then "true" else "false" end %>;
+      <% else %>
+      var user_a = "<b><%= Thread.current[:user].email %></b> (<%= Thread.current[:user].username%>, <%= Thread.current[:user].uuid%>)";
+      var user_b = "<b>"+sessionStorage.getItem('link_account_email')+"</b> ("+sessionStorage.getItem('link_account_username')+", "+sessionStorage.getItem('link_account_uuid')+")";
+      var user_a_is_active = <%= Thread.current[:user].is_active %>;
+      var user_a_is_admin = <%=if Thread.current[:user].is_admin then "true" else "false" end %>;
+      var user_b_is_admin = (sessionStorage.getItem('link_account_is_admin') == "true");
+      <% end %>
+
+      $("#new-user-token-input").val(sessionStorage.getItem('link_account_api_token'));
+
+      if (!user_a_is_active) {
+        $("#will-link-to").html("<p>Cannot link "+user_b+" to inactive account "+user_a+".</p>");
+        $("#link-account-submit").prop("disabled", true);
+      } else if (user_b_is_admin && !user_a_is_admin) {
+        $("#will-link-to").html("<p>Cannot link admin account "+user_b+" to non-admin account "+user_a+".</p>");
+        $("#link-account-submit").prop("disabled", true);
+      } else {
+        $("#will-link-to").html("<p>Clicking 'Link accounts' will link "+user_b+" created on <%=Thread.current[:user].created_at%> to "+
+          user_a+" created at <b>"+sessionStorage.getItem('link_account_created_at')+"</b>.</p>"+
+          "<p>After linking, logging in as "+user_b+" will log you into the same account as "+user_a+
+          ".</p>  <p>Any objects owned by "+user_b+" will be transferred to "+user_a+".</p>");
+      }
+    } else {
+      $("#ready-to-link").css({"display": "none"});
+      $("#need-login").css({"display": "inherit"});
+    }
+
+    sessionStorage.removeItem('link_account_api_token');
+    sessionStorage.removeItem('link_account_uuid');
+    sessionStorage.removeItem('link_account_email');
+    sessionStorage.removeItem('link_account_username');
+    sessionStorage.removeItem('link_account_created_at');
+    sessionStorage.removeItem('link_account_is_active');
+    sessionStorage.removeItem('link_account_is_admin');
+  };
+
+  $(window).on("load", function() {
+    update_visibility();
+  });
+
+  function do_login(dir) {
+    sessionStorage.setItem('link_account_api_token', '<%= Thread.current[:arvados_api_token] %>');
+    sessionStorage.setItem('link_account_email', '<%= Thread.current[:user].email %>');
+    sessionStorage.setItem('link_account_username', '<%= Thread.current[:user].username %>');
+    sessionStorage.setItem('link_account_uuid', '<%= Thread.current[:user].uuid %>');
+    sessionStorage.setItem('link_account_created_at', '<%= Thread.current[:user].created_at %>');
+    sessionStorage.setItem('link_account_is_active', <%= if Thread.current[:user].is_active then "true" else "false" end %>);
+    sessionStorage.setItem('link_account_is_admin', <%= if Thread.current[:user].is_admin then "true" else "false" end %>);
+    window.location.replace('<%=arvados_api_client.arvados_logout_url(return_to: arvados_api_client.arvados_login_url(return_to: "#{strip_token_from_path(request.url)}?direction="))%>'+dir);
+  }
+
+  $(document).on("click", "#link-account-in", function(e) { do_login("in"); });
+  $(document).on("click", "#link-account-out", function(e) { do_login("out"); });
+
+  $(document).on("click", "#cancel-link-accounts", function() {
+    window.location.replace('/users/link_account?api_token='+$("#new-user-token-input").val());
+  });
+<% end %>
+
+<div id="need-login" style="display: none">
+
+  <p>You are currently logged in as <b><%= Thread.current[:user].email %></b> (<%= Thread.current[:user].username%>, <%= Thread.current[:user].uuid %>) created at <b><%= Thread.current[:user].created_at%></b></p>
+
+<p>You can link Arvados accounts.  After linking, either login will take you to the same account.</p>
+
+  <p>
+    <% if Thread.current[:user].is_active %>
+  <button class="btn btn-primary" id="link-account-in" style="margin-right: 1em">
+    <i class="fa fa-fw fa-sign-in"></i> Add another login to this account
+  </button>
+  <% end %>
+  <button class="btn btn-primary" id="link-account-out" style="margin-right: 1em">
+    <i class="fa fa-fw fa-sign-in"></i> Use this login to access another account
+  </button>
+
+</p>
+</div>
+
+<div id="ready-to-link" style="display: none">
+
+  <div id="will-link-to"></div>
+
+  <%= button_tag "Cancel", class: "btn btn-cancel pull-left", id: "cancel-link-accounts", style: "margin-right: 1em" %>
+
+  <%= form_tag do |f| %>
+    <input type="hidden" id="new-user-token-input" name="new_user_token" value="" />
+    <input type="hidden" id="new-user-token-input" name="direction" value="<%=params[:direction]%>" />
+    <%= button_tag class: "btn btn-primary", id: "link-account-submit" do %>
+      <i class="fa fa-fw fa-link"></i> Link accounts
+  <% end %>
+<% end %>
+
+</div>
+</div>
diff --git a/apps/workbench/app/views/users/profile.html.erb b/apps/workbench/app/views/users/profile.html.erb
new file mode 100644 (file)
index 0000000..26d1f57
--- /dev/null
@@ -0,0 +1,110 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+    profile_config = Rails.configuration.user_profile_form_fields
+    current_user_profile = current_user.prefs[:profile]
+    show_save_button = false
+
+    profile_message = Rails.configuration.user_profile_form_message
+%>
+
+<div>
+    <div class="panel panel-default">
+        <div class="panel-heading">
+          <h4 class="panel-title">
+            Profile
+          </h4>
+        </div>
+        <div class="panel-body">
+          <% if !missing_required_profile? && params[:offer_return_to] %>
+            <div class="alert alert-success">
+              <% if current_user.prefs[:getting_started_shown] %>
+                <p>Thank you for filling in your profile. <%= link_to 'Back to work!', params[:offer_return_to], class: 'btn btn-sm btn-primary' %></p>
+              <% else %>
+                <p>Thank you for filling in your profile. <%= link_to 'Get started', params[:offer_return_to], class: 'btn btn-sm btn-primary' %></p>
+              <% end %>
+            </div>
+          <% else %>
+            <div class="alert alert-info">
+              <p><%=raw(profile_message)%></p>
+            </div>
+          <% end %>
+
+            <%= form_for current_user, html: {id: 'save_profile_form', name: 'save_profile_form', class: 'form-horizontal'} do %>
+              <%= hidden_field_tag :offer_return_to, params[:offer_return_to] %>
+              <%= hidden_field_tag :return_to, profile_user_path(current_user.uuid, offer_return_to: params[:offer_return_to]) %>
+              <div class="form-group">
+                  <label for="email" class="col-sm-3 control-label"> E-mail </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="email" name="email"><%=current_user.email%></p>
+                  </div>
+              </div>
+              <div class="form-group">
+                  <label for="first_name" class="col-sm-3 control-label"> First Name </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="first_name" name="first_name"><%=current_user.first_name%></p>
+                  </div>
+              </div>
+              <div class="form-group">
+                  <label for="last_name" class="col-sm-3 control-label"> Last Name </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="last_name" name="last_name"><%=current_user.last_name%></p>
+                  </div>
+              </div>
+              <div class="form-group">
+                  <label for="identity_url" class="col-sm-3 control-label"> Identity URL </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="identity_url" name="identity_url"><%=current_user.andand.identity_url%></p>
+                  </div>
+              </div>
+
+              <% profile_config.kind_of?(Array) && profile_config.andand.each do |entry| %>
+                <% if entry['key'] %>
+                  <%
+                      show_save_button = true
+                      label = entry['required'] ? '* ' : ''
+                      label += entry['form_field_title']
+                      value = current_user_profile[entry['key'].to_sym] if current_user_profile
+                  %>
+                  <div class="form-group">
+                    <label for="<%=entry['key']%>"
+                           class="col-sm-3 control-label"
+                           style=<%="color:red" if entry['required']&&(!value||value.empty?)%>> <%=label%>
+                    </label>
+                    <% if entry['type'] == 'select' %>
+                      <div class="col-sm-8">
+                        <select class="form-control" name="user[prefs][profile][<%=entry['key']%>]">
+                          <% entry['options'].each do |option| %>
+                            <option value="<%=option%>" <%='selected' if option==value%>><%=option%></option>
+                          <% end %>
+                        </select>
+                      </div>
+                    <% else %>
+                      <div class="col-sm-8">
+                        <input type="text" class="form-control" name="user[prefs][profile][<%=entry['key']%>]" placeholder="<%=entry['form_field_description']%>" value="<%=value%>" ></input>
+                      </div>
+                    <% end %>
+                  </div>
+                <% end %>
+              <% end %>
+
+              <%# If the user has other prefs, we need to preserve them %>
+              <% current_user.prefs.each do |key, value| %>
+                <% if key != :profile %>
+                  <input type="hidden" name="user[prefs][<%=key%>]" value="<%=value.to_json%>">
+                <% end %>
+              <% end %>
+
+              <% if show_save_button %>
+                <div class="form-group">
+                  <div class="col-sm-offset-3 col-sm-8">
+                    <button type="submit" class="btn btn-primary">Save profile</button>
+                  </div>
+                </div>
+              <% end %>
+            <% end %>
+        </div>
+    </div>
+</div>
diff --git a/apps/workbench/app/views/users/request_shell_access.js b/apps/workbench/app/views/users/request_shell_access.js
new file mode 100644 (file)
index 0000000..9a20ace
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+var timestamp = new Date();
+localStorage.setItem("request_shell_access",
+                     "A request for shell access was sent on " +
+                     timestamp.toLocaleDateString() +
+                     " at " +
+                     timestamp.toLocaleTimeString());
+// The storage event gets triggered automatically in _other_ windows
+// when we hit localStorage, but we also need to fire it manually in
+// _this_ window.
+$(document).trigger('storage');
diff --git a/apps/workbench/app/views/users/setup.js.erb b/apps/workbench/app/views/users/setup.js.erb
new file mode 100644 (file)
index 0000000..6032dfd
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$("#user-setup-modal-window").modal("hide");
+document.location.reload();
diff --git a/apps/workbench/app/views/users/setup_popup.js.erb b/apps/workbench/app/views/users/setup_popup.js.erb
new file mode 100644 (file)
index 0000000..0a98719
--- /dev/null
@@ -0,0 +1,48 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+$("#user-setup-modal-window").html("<%= escape_javascript(render partial: 'setup_popup') %>");
+
+// disable the submit button on load
+var $input = $('input:text'),
+$register = $('#register');
+
+var email_disabled = document.forms["setup_form"]["email"].disabled;
+var email_value = document.forms["setup_form"]["email"].value;
+var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
+if ((email_disabled == false) && (email_value == null || email_value == "" ||
+        prefix_value == null || prefix_value == "")) {
+  $register.prop('disabled', true);
+}
+
+// capture events to enable submit button when applicable
+$input.on('keyup paste mouseleave', function() {
+  var trigger = false;
+
+  var email_disabled = document.forms["setup_form"]["email"].disabled;
+  var email_value = document.forms["setup_form"]["email"].value;
+  var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
+
+  var emailRegExp = /^([\w-\.]+@([\w-]+\.)+[\w-]{2,4})?$/;
+  var validEmail = false;
+
+  if (emailRegExp.test(email_value )) {
+    validEmail = true;
+  }
+
+  if ((email_disabled == false) && (!validEmail || email_value == null ||
+            email_value == "" || prefix_value == null || prefix_value == "")){
+    trigger = true;
+  }
+
+  $register.prop('disabled', trigger);
+});
+
+// reset form input fields, for the next time around
+function reset_form() {
+  $('#email').val("");
+  $('#openid_prefix').val("");
+  $('#repo_name').val("");
+  $('select').val('')
+}
diff --git a/apps/workbench/app/views/users/ssh_keys.html.erb b/apps/workbench/app/views/users/ssh_keys.html.erb
new file mode 100644 (file)
index 0000000..d4a1ba4
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render :partial => 'ssh_keys' %>
+<div id="add-ssh-key-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
diff --git a/apps/workbench/app/views/users/storage.html.erb b/apps/workbench/app/views/users/storage.html.erb
new file mode 100644 (file)
index 0000000..151ea8b
--- /dev/null
@@ -0,0 +1,70 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :css do %>
+table#users-storage-table th {
+    overflow-x: hidden;
+    text-align: center;
+}
+table#users-storage-table .byte-value {
+    text-align: right;
+}
+<% end %>
+<table class="table table-condensed arv-index" id="users-storage-table">
+  <colgroup>
+    <col />
+  </colgroup>
+
+  <tr>
+    <th rowspan="2">User</th>
+    <th colspan="2">
+      Collections Read Size
+    </th>
+    <th colspan="2">
+      Collections Persisted Storage
+    </th>
+    <th rowspan="2">Measured At</th>
+  </tr>
+  <tr>
+    <% 2.times do %>
+    <th class="byte-value">
+      Total (unweighted)
+    </th>
+    <th class="byte-value">
+      Shared (weighted)
+    </th>
+    <% end %>
+  </tr>
+
+  <% @users.each do |user| %>
+  <tr>
+    <td>
+      <% if user.uuid %>
+      <small>
+        <%= link_to_if_arvados_object user, friendly_name: true %>
+      </small>
+      <% else %>
+      <b>Total</b>
+      <% end %>
+    </td>
+    <% [:read_collections_total_bytes, :read_collections_weighted_bytes, :persisted_collections_total_bytes, :persisted_collections_weighted_bytes].each do |key| %>
+    <td class="byte-value">
+      <%= human_readable_bytes_html(@user_storage[user.uuid].fetch(key,0).floor) %>
+    </td>
+    <% end %>
+    <% if @log_date.key?(user.uuid) %>
+    <td class="date" title="<%= @log_date[user.uuid] %>">
+      <%= @log_date[user.uuid].strftime('%F') %>
+    </td>
+    <% end %>
+  </tr>
+  <% end %>
+</table>
+
+<% content_for :footer_js do %>
+$('#users-storage-table td small').each(function(){
+  if ($(this).html().trim() == '0')
+    $(this).css('opacity', '0.3');
+});
+<% end %>
diff --git a/apps/workbench/app/views/users/virtual_machines.html.erb b/apps/workbench/app/views/users/virtual_machines.html.erb
new file mode 100644 (file)
index 0000000..3133f1b
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render :partial => 'virtual_machines' %>
diff --git a/apps/workbench/app/views/users/welcome.html.erb b/apps/workbench/app/views/users/welcome.html.erb
new file mode 100644 (file)
index 0000000..b10ca8b
--- /dev/null
@@ -0,0 +1,45 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+
+<div class="row">
+  <div class="col-sm-8 col-sm-push-4" style="margin-top: 1em">
+    <div class="well clearfix">
+      <%= image_tag "dax.png", style: "width: 112px; height: 150px; margin-right: 2em", class: 'pull-left' %>
+
+      <h3 style="margin-top:0">Please log in.</h3>
+
+      <p>
+
+        The "Log in" button below will show you a Google sign-in page.
+        After you assure Google that you want to log in here with your
+        Google account, you will be redirected back here to
+        <%= Rails.configuration.site_name %>.
+
+      </p><p>
+
+        If you have never used <%= Rails.configuration.site_name %>
+        before, logging in for the first time will automatically
+        create a new account.
+
+      </p><p>
+
+        <i><%= Rails.configuration.site_name %> uses your name and
+          email address only for identification, and does not retrieve
+          any other personal information from Google.</i>
+
+      </p>
+        <%# Todo: add list of external authentications providers to
+            discovery document, then generate the option list here. Right
+            now, don't provide 'auth_provider' to get the default one. %>
+        <div class="pull-right">
+          <%= link_to arvados_api_client.arvados_login_url(return_to: request.url), class: "btn btn-primary" do %>
+          Log in to <%= Rails.configuration.site_name %>
+          <i class="fa fa-fw fa-arrow-circle-right"></i>
+          <% end %>
+        </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/virtual_machines/_show_help.html.erb b/apps/workbench/app/views/virtual_machines/_show_help.html.erb
new file mode 100644 (file)
index 0000000..1391657
--- /dev/null
@@ -0,0 +1,30 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<p>
+Sample <code>~/.ssh/config</code> section:
+</p>
+
+<pre>
+Host *.arvados
+  ProxyCommand ssh -p2222 turnout@switchyard.<%= current_api_host || 'xyzzy.arvadosapi.com' %> -x -a $SSH_PROXY_FLAGS %h
+<% if @objects.first.andand.current_user_logins.andand.first %>
+  User <%= @objects.first.current_user_logins.andand.first %>
+<% end %>
+</pre>
+
+<p>
+Sample login command:
+</p>
+
+<pre>
+ssh <%= @objects.first.andand.hostname.andand.sub('.'+current_api_host,'') or 'vm-hostname' %>.arvados
+</pre>
+
+<p>
+  See also:
+  <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
+  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+  target: "_blank"%>.
+</p>
diff --git a/apps/workbench/app/views/virtual_machines/webshell.html.erb b/apps/workbench/app/views/virtual_machines/webshell.html.erb
new file mode 100644 (file)
index 0000000..202ae70
--- /dev/null
@@ -0,0 +1,53 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<html>
+    <title><%= @object.hostname %> / <%= Rails.configuration.site_name %></title>
+    <link rel="stylesheet" href="<%= asset_path 'webshell/styles.css' %>" type="text/css">
+    <style type="text/css">
+      body {
+        margin: 0px;
+      }
+    </style>
+    <script type="text/javascript"><!--
+      (function() {
+        // We would like to hide overflowing lines as this can lead to
+        // visually jarring results if the browser substitutes oversized
+        // Unicode characters from different fonts. Unfortunately, a bug
+        // in Firefox prevents it from allowing multi-line text
+        // selections whenever we change the "overflow" style. So, only
+        // do so for non-Netscape browsers.
+        if (typeof navigator.appName == 'undefined' ||
+            navigator.appName != 'Netscape') {
+          document.write('<style type="text/css">' +
+                         '#vt100 #console div, #vt100 #alt_console div {' +
+                         '  overflow: hidden;' +
+                         '}' +
+                         '</style>');
+        }
+      })();
+
+      function login(username, token) {
+        var sh = new ShellInABox("<%= j @webshell_url %>");
+        setTimeout(function() {
+          sh.keysPressed("<%= j params[:login] %>\n");
+          setTimeout(function() {
+            sh.keysPressed("<%= j Thread.current[:arvados_api_token] %>\n");
+            sh.vt100('(sent authentication token)\n');
+          }, 2000);
+        }, 2000);
+      }
+    // -->
+</script>
+    <link rel="icon" href="<%= asset_path 'favicon.ico' %>" type="image/x-icon">
+    <script type="text/javascript" src="<%= asset_path 'webshell/shell_in_a_box.js' %>"></script>
+  </head>
+  <!-- Load ShellInABox from a timer as Konqueror sometimes fails to
+       correctly deal with the enclosing frameset (if any), if we do not
+       do this
+   -->
+<body onload="setTimeout(login, 1000)"
+    scroll="no"><noscript>JavaScript must be enabled for ShellInABox</noscript>
+</body>
+</html>
diff --git a/apps/workbench/app/views/websocket/index.html.erb b/apps/workbench/app/views/websocket/index.html.erb
new file mode 100644 (file)
index 0000000..6274fb0
--- /dev/null
@@ -0,0 +1,38 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :page_title do %>
+  Event bus debugging page
+<% end %>
+<h1>Event bus debugging page</h1>
+
+<form>
+<textarea style="width:100%; height: 10em" id="websocket-message-content"></textarea>
+<button type="button" id="send-to-websocket">Send</button>
+</form>
+
+<br>
+
+<p id="PutStuffHere"></p>
+
+<script>
+$(function() {
+putStuffThere = function (content) {
+  $("#PutStuffHere").append(content + "<br>");
+};
+
+var dispatcher = new WebSocket('<%= arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
+dispatcher.onmessage = function(event) {
+  //putStuffThere(JSON.parse(event.data));
+  putStuffThere(event.data);
+};
+
+sendStuff = function () {
+  dispatcher.send($("#websocket-message-content").val());
+};
+
+$("#send-to-websocket").click(sendStuff);
+});
+
+</script>
diff --git a/apps/workbench/app/views/work_units/_component_detail.html.erb b/apps/workbench/app/views/work_units/_component_detail.html.erb
new file mode 100644 (file)
index 0000000..e48a91e
--- /dev/null
@@ -0,0 +1,220 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  collections = [current_obj.outputs, current_obj.docker_image].flatten.compact.uniq
+  collections_pdhs = collections.select {|x| !CollectionsHelper.match(x).nil?}.uniq.compact
+  collections_uuids = collections - collections_pdhs
+  preload_collections_for_objects collections_uuids if collections_uuids.any?
+  preload_links_for_objects collections_uuids if collections_uuids.any?
+
+  preload_objects_for_dataclass(Repository, [current_obj.repository], :name) if current_obj.repository
+
+  # if container_X, preload mounted collections
+  if @object.is_a? Container or @object.is_a? ContainerRequest
+    # get any collections in mounts
+    mounts = current_obj.send(:mounts) if current_obj.respond_to?(:mounts)
+    input_obj = mounts.andand[:"/var/lib/cwl/cwl.input.json"].andand[:content]
+    if input_obj
+      input_obj.to_s.scan(/([0-9a-f]{32}\+\d+)/).each {|cs| collections_pdhs += cs}
+    end
+
+    command = current_obj.send(:command) if current_obj.respond_to?(:command)
+    if command
+      command.to_s.scan(/([0-9a-f]{32}\+\d+)/).each {|cs| collections_pdhs += cs}
+    end
+  end
+
+  collections_pdhs.compact.uniq
+  preload_for_pdhs collections_pdhs if collections_pdhs.any?
+  preload_links_for_objects collections_pdhs if collections_pdhs.any?
+%>
+
+      <div class="container">
+        <div class="row">
+          <div class="col-md-6" style="overflow-x: auto">
+            <% if current_obj.uuid.nil? %>
+              No <%= current_obj.title %> has been submitted yet.
+            <% else %>
+            <table class="table table-condensed">
+              <% keys = [:uuid, :modified_by_user_uuid, :created_at, :started_at, :finished_at, :container_uuid] %>
+              <% keys << :log_collection if @object.uuid != current_obj.uuid %>
+              <% keys << :outputs %>
+              <% keys.each do |k| %>
+                <%
+                  val = current_obj.send(k) if current_obj.respond_to?(k)
+                  if k == :outputs
+                    has_val = val.andand.any?
+                  elsif k == :log_collection and current_obj.state_label == "Running"
+                    has_val = true
+                  else
+                    has_val = val
+                  end
+                %>
+                <% if has_val %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    <%= k.to_s %>:
+                  </td>
+                  <td>
+                    <% if k == :uuid %>
+                      <%= link_to_arvados_object_if_readable(val, val, link_text: val) %>
+                    <% elsif k.to_s.end_with? 'uuid' %>
+                      <%= link_to_arvados_object_if_readable(val, val, friendly_name: true) %>
+                    <% elsif k.to_s.end_with? '_at' %>
+                      <%= render_localized_date(val) %>
+                    <% elsif k == :outputs and val.any? %>
+                      <% if val.size == 1 %>
+                        <%= link_to_arvados_object_if_readable(val[0], "#{val[0]} (Unavailable)", friendly_name: true) %>
+                      <% else %>
+                        <%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
+                      <% end %>
+                    <% elsif k == :log_collection %>
+                      <%= render partial: 'work_units/show_log_link', locals: {wu: current_obj} %>
+                    <% else %>
+                      <%= val %>
+                    <% end %>
+                  </td>
+                </tr>
+                <% end %>
+              <% end %>
+            </table>
+            <% end %>
+          </div>
+          <div class="col-md-6">
+            <table class="table table-condensed">
+              <% # link to repo tree/file only if the repo is readable and the commit is a sha1
+                 repo = (/^[0-9a-f]{40}$/ =~ current_obj.script_version and
+                         current_obj.repository and
+                         object_for_dataclass(Repository, current_obj.repository, :name))
+                 repo = nil unless repo.andand.http_fetch_url
+                 %>
+              <% [:script, :repository, :script_version, :supplied_script_version, :nondeterministic,
+                  :priority, :runtime_constraints, :requesting_container_uuid].each do |k| %>
+                <% val = current_obj.send(k) if current_obj.respond_to?(k) %>
+                <% if val %>
+                <tr valign="top">
+                  <td style="padding-right: 1em">
+                    <%= k.to_s %>:
+                  </td>
+                  <td>
+                    <% if repo and k == :repository %>
+                      <%= link_to val, show_repository_tree_path(id: repo.uuid, commit: current_obj.script_version, path: '/') %>
+                    <% elsif repo and k == :script %>
+                      <%= link_to val, show_repository_blob_path(id: repo.uuid, commit: current_obj.script_version, path: 'crunch_scripts/'+current_obj.script) %>
+                    <% elsif repo and k == :script_version %>
+                      <%= link_to val, show_repository_commit_path(id: repo.uuid, commit: current_obj.script_version) %>
+                    <% elsif k == :runtime_constraints and val.any? %>
+                      <%= render partial: 'work_units/show_table_data', locals: {id: current_obj.uuid, name: k, data_map: val} %>
+                    <% elsif k.to_s.end_with? 'uuid' %>
+                      <%= link_to_arvados_object_if_readable(val, val, friendly_name: true) %>
+                    <% else %>
+                      <%= val %>
+                    <% end %>
+                  </td>
+                </tr>
+                <% end %>
+              <% end %>
+
+              <%
+                mounts = current_obj.send(:mounts) if current_obj.respond_to?(:mounts)
+                mount_wf = mounts.andand[:"/var/lib/cwl/workflow.json"]
+                mount_wf = mount_wf[5..-1] if mount_wf.andand.is_a?(String) and mount_wf.start_with?('keep:')
+                mount_wf_cls = resource_class_for_uuid(mount_wf) if mount_wf
+              %>
+              <tr>
+                <% if mount_wf_cls == Collection %>
+                  <td style="padding-right: 1em">
+                    workflow.json:
+                  </td>
+                  <td>
+                    <%= link_to_if_arvados_object mount_wf, friendly_name: true %>
+                  </td>
+                <% end %>
+              </tr>
+
+              <% if current_obj.runtime_constraints.andand[:docker_image] and current_obj.docker_image %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image:
+                  </td>
+                  <td>
+                    <%= current_obj.runtime_constraints[:docker_image] %>
+                  </td>
+                </tr>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image_locator:
+                  </td>
+                  <td>
+                    <%= link_to_arvados_object_if_readable(current_obj.docker_image,
+                      current_obj.docker_image, friendly_name: true) %>
+                  </td>
+                </tr>
+              <% elsif current_obj.docker_image %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image_locator:
+                  </td>
+                  <td>
+                    <%= link_to_arvados_object_if_readable(current_obj.docker_image,
+                      current_obj.docker_image, friendly_name: true) %>
+                  </td>
+                </tr>
+              <% end %>
+            </table>
+          </div>
+
+          <div class="col-md-12">
+            <table class="table table-condensed" style="table-layout:fixed;">
+              <col width="15%" />
+              <col width="85%" />
+              <% [:command].each do |k| %>
+                <% val = current_obj.send(k) if current_obj.respond_to?(k) %>
+                <% if val %>
+                <tr>
+                  <td valign="top">
+                    <%= k.to_s %>:
+                  </td>
+                  <td style="word-wrap: break-all;">
+                    <% if k == :command %>
+                        <% val = JSON.pretty_generate(val) %>
+                        <%= render partial: 'show_text_with_locators', locals: {data_height: 200, text_data: val} %>
+                    <% else %>
+                      <%= val %>
+                    <% end %>
+                  </td>
+                </tr>
+                <% end %>
+              <% end %>
+
+              <%
+                mounts = current_obj.send(:mounts) if current_obj.respond_to?(:mounts)
+                input_obj = mounts.andand[:"/var/lib/cwl/cwl.input.json"].andand[:content]
+                mnt_inputs = JSON.pretty_generate(input_obj) if input_obj
+              %>
+              <% if mnt_inputs %>
+                <tr>
+                  <td valign="top">
+                    cwl.input.json:
+                  </td>
+                  <td style="word-wrap: break-all;">
+                    <%= render partial: 'show_text_with_locators', locals: {data_height: 400, text_data: mnt_inputs} %>
+                  </td>
+                </tr>
+              <% end %>
+            </table>
+          </div>
+
+        </div>
+
+        <% if current_obj.parameters and !current_obj.parameters.empty? %>
+        <div class="row">
+          <div class="col-md-12">
+            <p>script_parameters:</p>
+            <pre><%= JSON.pretty_generate(current_obj.parameters) rescue nil %></pre>
+          </div>
+        </div>
+        <% end %>
+      </div>
diff --git a/apps/workbench/app/views/work_units/_progress.html.erb b/apps/workbench/app/views/work_units/_progress.html.erb
new file mode 100644 (file)
index 0000000..bfc5100
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if wu.is_running? %>
+  <% if @object.andand.uuid == wu.uuid and wu.progress == 0.0 %>
+    <span class="label label-<%= wu.state_bootstrap_class %>"> Active </span>
+  <% else%>
+    <div class="progress" style="margin-bottom: 0px">
+      <span class="progress-bar progress-bar-<%= wu.state_bootstrap_class %>" style="width: <%= wu.progress*100 %>%;">
+      </span>
+    </div>
+  <% end %>
+<% else %>
+  <span class="label label-<%= wu.state_bootstrap_class %>"><%= wu.state_label %></span>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_all_processes.html.erb b/apps/workbench/app/views/work_units/_show_all_processes.html.erb
new file mode 100644 (file)
index 0000000..0d6d831
--- /dev/null
@@ -0,0 +1,65 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="pull-right">
+  <div class="form-group">
+    <input type="text" class="form-control filterable-control recent-all-processes-filterable-control"
+           placeholder="Search all processes"
+           data-filterable-target="#all-processes-scroll"
+           value="<%= params[:search] %>" size="40" />
+  </div>
+  <div class="checkbox">
+    <label>
+      <input id="IncludeChildProcs" type="checkbox" class="filterable-control"
+            data-on-value="{&quot;show_children&quot;:true}"
+            data-off-value="{}"
+            data-filterable-target="#all-processes-scroll" />
+      Show child processes
+    </label>
+  </div>
+</div>
+
+<div>
+  <div>
+    <div>
+      <table class="table table-condensed table-fixedlayout arv-recent-all-processes">
+        <colgroup>
+          <col width="25%" />
+          <col width="10%" />
+          <col width="20%" />
+          <col width="20%" />
+          <col width="20%" />
+          <col width="5%" />
+        </colgroup>
+
+        <thead>
+          <tr class="contain-align-left">
+            <th>
+              Process
+            </th>
+            <th>
+              Status
+            </th>
+            <th>
+              Owner
+            </th>
+            <th>
+              Created at
+            </th>
+            <th>
+              Output
+            </th>
+            <th>
+            </th>
+          </tr>
+        </thead>
+
+        <tbody data-infinite-scroller="#all-processes-scroll" id="all-processes-scroll"
+               data-infinite-content-params-from-exclude-child-procs="{}"
+               data-infinite-content-href="<%= url_for partial: :all_processes_rows %>" >
+        </tbody>
+      </table>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/work_units/_show_all_processes_rows.html.erb b/apps/workbench/app/views/work_units/_show_all_processes_rows.html.erb
new file mode 100644 (file)
index 0000000..b0afb33
--- /dev/null
@@ -0,0 +1,27 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |obj| %>
+  <% wu = obj.work_unit %>
+  <tr data-object-uuid="<%= wu.uuid %>" >
+    <td>
+      <%= link_to_if_arvados_object obj, friendly_name: true %>
+    </td>
+    <td>
+      <span class="label label-<%= wu.state_bootstrap_class %>"><%= wu.state_label %></span>
+    </td>
+    <td>
+      <%= link_to_if_arvados_object wu.owner_uuid, friendly_name: true %>
+    </td>
+    <td>
+      <%= render_localized_date(wu.created_at) %>
+    </td>
+    <td>
+      <%= render partial: 'work_units/show_output', locals: {wu: wu, align: ''} %>
+    </td>
+    <td>
+      <%= render partial: 'delete_object_button', locals: {object:obj} %>
+    </td>
+  </tr>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_child.html.erb b/apps/workbench/app/views/work_units/_show_child.html.erb
new file mode 100644 (file)
index 0000000..53f3e43
--- /dev/null
@@ -0,0 +1,63 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+      <div class="row">
+        <div class="col-md-3" style="word-break:break-all;">
+          <h4 class="panel-title">
+            <a class="component-detail-panel" data-toggle="collapse" href="#collapse<%= i %>">
+              <%= current_obj.label %> <span class="caret" href="#collapse<%= i %>"></span>
+            </a>
+          </h4>
+        </div>
+
+        <div class="col-md-2 pipeline-instance-spacing">
+          <%= render partial: 'work_units/progress', locals: {wu: current_obj} %>
+        </div>
+
+        <% if not current_obj %>
+          <div class="col-md-7"></div>
+        <% else %>
+          <% walltime = current_obj.walltime %>
+          <% cputime = current_obj.cputime %>
+          <% runningtime = current_obj.runningtime %>
+          <div class="col-md-3">
+          <% if walltime and cputime %>
+            <%= render_runtime([walltime, runningtime].max, false) %>
+            <% if cputime > 0 %> / <%= render_runtime(cputime, false) %> (<%= (cputime/runningtime).round(1) %>&Cross;)<% end %>
+          <% end %>
+          </div>
+
+          <% queuetime = current_obj.queuedtime %>
+          <% if queuetime %>
+            <div class="col-md-3">
+              Queued for <%= render_runtime(queuetime, false) %>.
+            </div>
+          <% elsif current_obj.is_running? %>
+            <div class="col-md-3">
+              <span class="task-summary-status">
+                <%= current_obj.child_summary_str %>
+              </span>
+            </div>
+          <% end %>
+
+          <div class="col-md-1 pipeline-instance-spacing">
+          <% if current_obj.can_cancel? and @object.editable? %>
+              <%= form_tag "#{current_obj.uri}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
+                <%= hidden_field_tag :return_to, url_for(@object) %>
+                <%= button_tag "Cancel", {class: 'btn btn-xs btn-warning', id: "cancel-child-button"} %>
+              <% end %>
+          <% end %>
+          </div>
+        <% end %>
+      </div>
+  </div>
+
+  <% content_url = url_for(controller: :work_units, action: :show_child_component, id: @object.uuid, object_type: @object.class.to_s) %>
+  <div id="collapse<%=i%>" class="work-unit-component-detail panel-collapse collapse <%= if expanded then 'in' end %>" content-url="<%=content_url%>" action-data="<%={current_obj_type: current_obj.class.to_s, current_obj_uuid: current_obj.uuid, current_obj_name: current_obj.label, current_obj_parent: current_obj.parent}.to_json%>">
+    <div class="panel-body work-unit-component-detail-body">
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/work_units/_show_component.html.erb b/apps/workbench/app/views/work_units/_show_component.html.erb
new file mode 100644 (file)
index 0000000..cac263d
--- /dev/null
@@ -0,0 +1,100 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%# Work unit status %>
+
+<div class="row">
+  <div class="col-md-4">
+    <% if wu.is_paused? %>
+      <p>
+        This <%= wu.title %> is paused. Children that were running
+        were cancelled and no new processes will be submitted.
+      </p>
+    <% end %>
+
+    <%= raw(wu.show_runtime) %>
+  </div>
+  <%# Need additional handling for main object display  %>
+  <% if @object.uuid == wu.uuid %>
+    <div class="col-md-3">
+      <% if wu.is_running? and wu.child_summary_str %>
+        <%= wu.child_summary_str %>
+      <% end %>
+    </div>
+    <div class="col-md-3">
+      <%= render partial: 'work_units/progress', locals: {wu: wu} %>
+    </div>
+    <div class="col-md-2">
+      <% if wu.can_cancel? and @object.editable? %>
+        <% confirm = if wu.confirm_cancellation then {confirm: wu.confirm_cancellation} else {} end %>
+        <%= form_tag "#{wu.uri}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
+          <%= hidden_field_tag :return_to, url_for(@object) %>
+          <%= button_tag "Cancel", {class: 'btn btn-xs btn-warning', id: "cancel-obj-button", data: confirm} %>
+        <% end %>
+      <% end %>
+    </div>
+  <% end %>
+</div>
+
+<%# Display runtime error information %>
+<% if wu.runtime_status.andand[:error] %>
+<div class="container">
+  <div class="col-md-12">
+    <div class="panel panel-danger">
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a class="component-detail-panel" data-toggle="collapse" href="#errorDetail">
+            <span class="caret"></span> Error: <%= sanitize(wu.runtime_status[:error]) %>
+          </a>
+        </h4>
+      </div>
+      <div id="errorDetail" class="panel-body panel-collapse collapse">
+        <% if wu.runtime_status[:errorDetail] %>
+          <pre><%= sanitize(wu.runtime_status[:errorDetail]) %></pre>
+        <% else %>
+          No detailed information available.
+        <% end %>
+      </div>
+    </div>
+  </div>
+</div>
+<% end %>
+
+<%# Display runtime warning message %>
+<% if wu.runtime_status.andand[:warning] %>
+<div class="container">
+  <div class="col-md-12">
+    <div class="panel panel-warning">
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a class="component-detail-panel" data-toggle="collapse" href="#warningDetail">
+            <span class="caret"></span> Warning: <%= sanitize(wu.runtime_status[:warning]) %>
+          </a>
+        </h4>
+      </div>
+      <div id="warningDetail" class="panel-body panel-collapse collapse">
+        <% if wu.runtime_status[:warningDetail] %>
+          <pre><%= sanitize(wu.runtime_status[:warningDetail]) %></pre>
+        <% else %>
+          No detailed information available.
+        <% end %>
+      </div>
+    </div>
+  </div>
+</div>
+<% end %>
+
+<p>
+  <%= render(partial: 'work_units/component_detail', locals: {current_obj: wu}) %>
+</p>
+
+<%# Work unit children %>
+<% if wu.has_unreadable_children %>
+  <%= render(partial: "pipeline_instances/show_components_json",
+             locals: {error_name: "Unreadable components", backtrace: nil, wu: wu}) %>
+<% else %>
+  <% wu.children.each do |c| %>
+    <%= render(partial: 'work_units/show_child', locals: {current_obj: c, i: (c.uuid || rand(2**128).to_s(36)), expanded: false}) %>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_log.html.erb b/apps/workbench/app/views/work_units/_show_log.html.erb
new file mode 100644 (file)
index 0000000..1f643ac
--- /dev/null
@@ -0,0 +1,32 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% wu = obj.work_unit(name) %>
+
+<% render_log = wu.render_log %>
+<% if render_log %>
+  <div>
+    <% log_url = url_for render_log[:log] %>
+    <p> <a href="<%= log_url %>">Download the log</a> </p>
+    <%= render(partial: render_log[:partial], locals: render_log[:locals]) %>
+  </div>
+<% end %>
+
+<% live_log_lines = wu.live_log_lines(Rails.configuration.running_job_log_records_to_fetch).join("\n") %>
+<% if !render_log or (live_log_lines.size > 0) %>
+<%# Still running, or recently finished and logs are still available from logs table %>
+<%# Show recent logs in terminal window %>
+<h4>Recent logs</h4>
+<pre id="event_log_div"
+     class="arv-log-event-listener arv-log-event-handler-append-logs arv-job-log-window"
+     data-object-uuids="<%= wu.log_object_uuids.join(' ') %>"
+  ><%= live_log_lines %>
+</pre>
+
+<%# Applying a long throttle suppresses the auto-refresh of this
+    partial that would normally be triggered by arv-log-event. %>
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights %>>
+</div>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_log_link.html.erb b/apps/workbench/app/views/work_units/_show_log_link.html.erb
new file mode 100644 (file)
index 0000000..a563a13
--- /dev/null
@@ -0,0 +1,18 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if wu.state_label.in? ["Complete", "Failed", "Cancelled"] %>
+  <% lc = wu.log_collection %>
+  <% if lc and object_readable(lc, Collection) and object_readable(wu.uuid) %>
+    <%= link_to("Log", "#{wu.uri}#Log") %>
+  <% else %>
+    Log unavailable
+  <% end %>
+<% elsif wu.state_label == "Running" %>
+  <% if object_readable(wu.uuid) %>
+    <%= link_to("Log", "#{wu.uri}#Log") %>
+  <% else %>
+    Log unavailable
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/work_units/_show_output.html.erb b/apps/workbench/app/views/work_units/_show_output.html.erb
new file mode 100644 (file)
index 0000000..9c76b4f
--- /dev/null
@@ -0,0 +1,17 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<span class="<%=align%> text-overflow-ellipsis" style="max-width: 100%">
+  <% outputs = wu.outputs %>
+  <% if outputs.size == 0 %>
+    No output
+  <% elsif outputs.size == 1 %>
+    <% if defined?(include_icon) && include_icon %>
+      <i class="fa fa-fw fa-archive"></i>
+    <% end %>
+    <%= link_to_if_arvados_object outputs[0], friendly_name: true %>
+  <% else %>
+    <%= render partial: 'work_units/show_outputs', locals: {id: wu.uuid, outputs: outputs, align:align} %>
+  <% end %>
+</span>
diff --git a/apps/workbench/app/views/work_units/_show_outputs.html.erb b/apps/workbench/app/views/work_units/_show_outputs.html.erb
new file mode 100644 (file)
index 0000000..11286ad
--- /dev/null
@@ -0,0 +1,16 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<span class="<%=align%>"><a href="#<%= id %>-outputs" data-toggle="collapse">Outputs <span class="caret"></span></a></span>
+<div class="row collapse" id="<%= id %>-outputs" >
+  <div class="col-md-12">
+    <div class="pull-right" style="max-width: 100%">
+      <% outputs.each do |out| %>
+        <div class="text-overflow-ellipsis">
+          <i class="fa fa-fw fa-archive"></i> <%= link_to_if_arvados_object out, friendly_name: true %>
+        </div>
+      <% end %>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/work_units/_show_status.html.erb b/apps/workbench/app/views/work_units/_show_status.html.erb
new file mode 100644 (file)
index 0000000..0039485
--- /dev/null
@@ -0,0 +1,27 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+    container_uuid = if @object.is_a?(Container) then @object.uuid elsif @object.is_a?(ContainerRequest) then @object.container_uuid end
+    if container_uuid
+      cols = ContainerRequest.columns.map(&:name) - %w(id updated_at mounts runtime_token)
+      reqs = ContainerRequest.select(cols).where(requesting_container_uuid: container_uuid).results
+      load_preloaded_objects(reqs)
+
+      child_cs = reqs.map(&:requesting_container_uuid).uniq
+      child_cs += reqs.map(&:container_uuid).uniq
+      preload_objects_for_dataclass(Container, child_cs)
+
+      wu = current_obj.work_unit(name, child_objects=reqs)
+    else
+      wu = current_obj.work_unit(name)
+    end
+%>
+
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights %>
+     ></div>
+<%=
+   render(partial: 'work_units/show_component', locals: {wu: wu})
+%>
diff --git a/apps/workbench/app/views/work_units/_show_table_data.html.erb b/apps/workbench/app/views/work_units/_show_table_data.html.erb
new file mode 100644 (file)
index 0000000..57b4f99
--- /dev/null
@@ -0,0 +1,18 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="data-table <%=name%>-table" id="<%=name%>-table" style="max-height: 150px; overflow-y: auto;">
+  <table>
+    <% data_map.each do |k, v|%>
+      <tr>
+        <td>
+          <%= k.to_s %>
+        </td>
+        <td style="padding-left: 1em; padding-right: 1em">
+          <%= v %>
+        </td>
+      </tr>
+    <% end %>
+  </table>
+</div>
diff --git a/apps/workbench/app/views/work_units/index.html.erb b/apps/workbench/app/views/work_units/index.html.erb
new file mode 100644 (file)
index 0000000..ae59817
--- /dev/null
@@ -0,0 +1,5 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: 'work_units/show_all_processes' %>
diff --git a/apps/workbench/app/views/workflows/_show_chooser_preview.html.erb b/apps/workbench/app/views/workflows/_show_chooser_preview.html.erb
new file mode 100644 (file)
index 0000000..3ca68a5
--- /dev/null
@@ -0,0 +1,7 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="col-sm-11 col-sm-push-1 arv-description-in-table">
+  <%= (@object.description if @object.description.present?) || 'No description' %>
+</div>
diff --git a/apps/workbench/app/views/workflows/_show_definition.html.erb b/apps/workbench/app/views/workflows/_show_definition.html.erb
new file mode 100644 (file)
index 0000000..f0e01a1
--- /dev/null
@@ -0,0 +1,52 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  wf_def = ActiveSupport::HashWithIndifferentAccess.new YAML::load(@object.definition) if @object.definition
+  wf_def = wf_def[:"$graph"].andand[0] || wf_def if wf_def
+
+  items = {}
+  baseCommand = wf_def.andand["baseCommand"]
+  items['baseCommand'] = baseCommand if baseCommand
+
+  args = wf_def.andand["arguments"]
+  items['arguments'] = args if args
+
+  hints = wf_def.andand["hints"]
+  items['hints'] = hints if hints
+
+  inputs = wf_def.andand["inputs"]
+  items['inputs'] = inputs if inputs
+
+  outputs = wf_def.andand["outputs"]
+  items['outputs'] = outputs if outputs
+
+  # preload the collections
+  collections_pdhs = []
+  items.each do |k, v|
+    v.to_s.scan(/([0-9a-f]{32}\+\d+)/).each {|l| collections_pdhs += l}
+  end
+  collections_pdhs.compact.uniq
+  preload_for_pdhs collections_pdhs if collections_pdhs.any?
+  preload_links_for_objects collections_pdhs if collections_pdhs.any?
+%>
+
+  <div class="col-md-12">
+    <table class="table table-condensed" style="table-layout:fixed;">
+      <col width="15%" />
+      <col width="85%" />
+
+      <% items.each do |k, v| %>
+          <tr>
+            <td valign="top">
+              <%= k %>:
+            </td>
+            <td>
+              <% val = JSON.pretty_generate(v) %>
+              <%= render partial: 'show_text_with_locators', locals: {data_height: 300, text_data: val} %>
+            </td>
+          </tr>
+      <% end %>
+    </table>
+  </div>
diff --git a/apps/workbench/app/views/workflows/_show_recent.html.erb b/apps/workbench/app/views/workflows/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..4acb1e4
--- /dev/null
@@ -0,0 +1,69 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<table class="table table-condensed arv-index">
+  <colgroup>
+    <col width="10%" />
+    <col width="10%" />
+    <col width="25%" />
+    <col width="40%" />
+    <col width="15%" />
+  </colgroup>
+
+  <thead>
+    <tr class="contain-align-left">
+      <th></th>
+      <th></th>
+      <th> name </th>
+      <th> description </th>
+      <th> owner </th>
+    </tr>
+  </thead>
+
+  <tbody>
+    <% @objects.sort_by { |ob| ob[:created_at] }.reverse.each do |ob| %>
+      <tr>
+        <td>
+          <%= button_to(choose_projects_path(id: "run-workflow-button",
+                                             title: 'Choose project',
+                                             editable: true,
+                                             action_name: 'Choose',
+                                             action_href: work_units_path,
+                                             action_method: 'post',
+                                             action_data: {'selection_param' => 'work_unit[owner_uuid]',
+                                                           'work_unit[template_uuid]' => ob.uuid,
+                                                           'success' => 'redirect-to-created-object'
+                                                          }.to_json),
+                  { class: "btn btn-default btn-xs", title: "Run #{ob.name}", remote: true, method: :get }
+              ) do %>
+                 <i class="fa fa-fw fa-play"></i> Run
+          <% end %>
+        </td>
+
+        <td>
+          <%= render :partial => "show_object_button", :locals => {object: ob, size: 'xs'} %>
+        </td>
+
+        <td>
+          <%= render_editable_attribute ob, 'name' %>
+        </td>
+
+        <td>
+          <% if ob.description %>
+            <%= render_attribute_as_textile(ob, "description", ob.description, false) %>
+            <br />
+          <% end %>
+        </td>
+
+        <td>
+          <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
+        </td>
+      </tr>
+    <% end %>
+  </tbody>
+</table>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
diff --git a/apps/workbench/app/views/workflows/show.html.erb b/apps/workbench/app/views/workflows/show.html.erb
new file mode 100644 (file)
index 0000000..ccb83de
--- /dev/null
@@ -0,0 +1,24 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if current_user.andand.is_active %>
+  <% content_for :tab_line_buttons do %>
+    <%= link_to(choose_projects_path(id: "run-workflow-button",
+                                     title: 'Choose project',
+                                     editable: true,
+                                     action_name: 'Choose',
+                                     action_href: work_units_path,
+                                     action_method: 'post',
+                                     action_data: {'selection_param' => 'work_unit[owner_uuid]',
+                                                   'work_unit[template_uuid]' => @object.uuid,
+                                                   'success' => 'redirect-to-created-object'
+                                                  }.to_json),
+          { class: "btn btn-primary btn-sm", title: "Run #{@object.name}", remote: true }
+        ) do %>
+      <i class="fa fa-fw fa-gear"></i> Run this workflow
+    <% end %>
+  <% end %>
+<% end %>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/config.ru b/apps/workbench/config.ru
new file mode 100644 (file)
index 0000000..7ee9ab6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file is used by Rack-based servers to start the application.
+
+require ::File.expand_path('../config/environment',  __FILE__)
+run ArvadosWorkbench::Application
diff --git a/apps/workbench/config/application.default.yml b/apps/workbench/config/application.default.yml
new file mode 100644 (file)
index 0000000..ccc7e4b
--- /dev/null
@@ -0,0 +1,336 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create application.yml
+# instead (see application.yml.example).
+
+# Below is a sample setting for diagnostics testing.
+# Configure workbench URL as "arvados_workbench_url"
+# Configure test user tokens as "user_tokens".
+#   At this time the tests need an "active" user token.
+# Also, configure the pipelines to be executed as "pipelines_to_test".
+# For each of the pipelines identified by the name of your choice
+#     ("pipeline_1" and "pipeline_2" in this sample), provide the following:
+#   template_uuid: is the uuid of the template to be executed
+#   input_paths: an array of inputs for the pipeline. Use either a collection's "uuid"
+#     or a file's "uuid/file_name" path in this array. If the pipeline does not require
+#     any inputs, this can be omitted.
+#   max_wait_seconds: max time in seconds to wait for the pipeline run to complete.
+#     Default value of 30 seconds is used when this value is not provided.
+diagnostics:
+  arvados_workbench_url: https://localhost:3031
+  user_tokens:
+    active: eu33jurqntstmwo05h1jr3eblmi961e802703y6657s8zb14r
+  pipelines_to_test:
+    pipeline_1:
+      template_uuid: zzzzz-p5p6p-rxj8d71854j9idn
+      input_paths: [zzzzz-4zz18-nz98douzhaa3jh2]
+      max_wait_seconds: 10
+    pipeline_2:
+      template_uuid: zzzzz-p5p6p-1xbobfobk94ppbv
+      input_paths: [zzzzz-4zz18-nz98douzhaa3jh2, zzzzz-4zz18-gpw9o5wpcti3nib]
+  container_requests_to_test:
+    container_request_1:
+      workflow_uuid: zzzzz-7fd4e-60e96shgwspt4mw
+      input_paths: []
+      max_wait_seconds: 10
+
+# Below is a sample setting for performance testing.
+# Configure workbench URL as "arvados_workbench_url"
+# Configure test user token as "user_token".
+performance:
+  arvados_workbench_url: https://localhost:3031
+  user_token: eu33jurqntstmwo05h1jr3eblmi961e802703y6657s8zb14r
+
+development:
+  cache_classes: false
+  eager_load: true
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_mailer.raise_delivery_errors: false
+  active_support.deprecation: :log
+  action_dispatch.best_standards_support: :builtin
+  assets.debug: true
+  profiling_enabled: true
+  site_name: Arvados Workbench (dev)
+
+  # API server configuration
+  arvados_login_base: ~
+  arvados_v1_base: ~
+  arvados_insecure_https: ~
+
+production:
+  force_ssl: true
+  cache_classes: true
+  eager_load: true
+  consider_all_requests_local: false
+  action_controller.perform_caching: true
+  serve_static_files: false
+  assets.compile: false
+  assets.digest: true
+  i18n.fallbacks: true
+  active_support.deprecation: :notify
+  profiling_enabled: false
+  log_level: info
+
+  arvados_insecure_https: false
+
+  data_import_dir: /data/arvados-workbench-upload/data
+  data_export_dir: /data/arvados-workbench-download/data
+
+  # API server configuration
+  arvados_login_base: ~
+  arvados_v1_base: ~
+  arvados_insecure_https: ~
+
+  site_name: Arvados Workbench
+
+test:
+  cache_classes: true
+  eager_load: false
+  serve_static_files: true
+  static_cache_control: public, max-age=3600
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_dispatch.show_exceptions: false
+  action_controller.allow_forgery_protection: false
+  action_mailer.delivery_method: :test
+  active_support.deprecation: :stderr
+  profiling_enabled: true
+  secret_token: <%= rand(2**256).to_s(36) %>
+  secret_key_base: <%= rand(2**256).to_s(36) %>
+  # This setting is to allow workbench start when running tests, it should be
+  # set to a correct value when testing relevant features.
+  keep_web_url: http://example.com/c=%{uuid_or_pdh}
+
+  # When you run the Workbench's integration tests, it starts the API
+  # server as a dependency.  These settings should match the API
+  # server's Rails defaults.  If you adjust those, change these
+  # settings in application.yml to match.
+  arvados_login_base: https://localhost:3000/login
+  arvados_v1_base: https://localhost:3000/arvados/v1
+  arvados_insecure_https: true
+
+  site_name: Workbench:test
+
+  # Enable user profile with one required field
+  user_profile_form_fields:
+    - key: organization
+      type: text
+      form_field_title: Institution
+      form_field_description: Your organization
+      required: true
+    - key: role
+      type: select
+      form_field_title: Your role
+      form_field_description: Choose the category that best describes your role in your organization.
+      options:
+        - Bio-informatician
+        - Computational biologist
+        - Biologist or geneticist
+        - Software developer
+        - IT
+        - Other
+
+common:
+  assets.js_compressor: false
+  assets.css_compressor: false
+  data_import_dir: /tmp/arvados-workbench-upload
+  data_export_dir: /tmp/arvados-workbench-download
+  arvados_login_base: https://arvados.local/login
+  arvados_v1_base: https://arvados.local/arvados/v1
+  arvados_insecure_https: true
+  activation_contact_link: mailto:info@arvados.org
+  arvados_docsite: http://doc.arvados.org
+  arvados_public_data_doc_url: http://arvados.org/projects/arvados/wiki/Public_Pipelines_and_Datasets
+  arvados_theme: default
+  show_user_agreement_inline: false
+  secret_token: ~
+  secret_key_base: false
+  default_openid_prefix: https://www.google.com/accounts/o8/id
+  send_user_setup_notification_email: true
+
+  # Scratch directory used by the remote repository browsing
+  # feature. If it doesn't exist, it (and any missing parents) will be
+  # created using mkdir_p.
+  repository_cache: <%= File.expand_path 'tmp/git', Rails.root %>
+
+  # Set user_profile_form_fields to enable and configure the user
+  # profile page. Default is set to false. A commented example with
+  # full description is provided below.
+  user_profile_form_fields: false
+
+  # Below is a sample setting of user_profile_form_fields config parameter.
+  # This configuration parameter should be set to either false (to disable) or
+  # to an array as shown below.
+  # Configure the list of input fields to be displayed in the profile page
+  # using the attribute "key" for each of the input fields.
+  # This sample shows configuration with one required and one optional form fields.
+  # For each of these input fields:
+  #   You can specify "type" as "text" or "select".
+  #   List the "options" to be displayed for each of the "select" menu.
+  #   Set "required" as "true" for any of these fields to make them required.
+  # If any of the required fields are missing in the user's profile, the user will be
+  # redirected to the profile page before they can access any Workbench features.
+  #user_profile_form_fields:
+  #  - key: organization
+  #    type: text
+  #    form_field_title: Institution/Company
+  #    form_field_description: Your organization
+  #    required: true
+  #  - key: role
+  #    type: select
+  #    form_field_title: Your role
+  #    form_field_description: Choose the category that best describes your role in your organization.
+  #    options:
+  #      - Bio-informatician
+  #      - Computational biologist
+  #      - Biologist or geneticist
+  #      - Software developer
+  #      - IT
+  #      - Other
+
+  # Use "user_profile_form_message" to configure the message you want to display on
+  # the profile page.
+  user_profile_form_message: Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.
+
+  # Override the automatic version string. With the default value of
+  # false, the version string is read from git-commit.version in
+  # Rails.root (included in vendor packages) or determined by invoking
+  # "git log".
+  source_version: false
+
+  # Override the automatic package string. With the default value of
+  # false, the package string is read from package-build.version in
+  # Rails.root (included in vendor packages).
+  package_version: false
+
+  # report notification to and from addresses
+  issue_reporter_email_from: arvados@example.com
+  issue_reporter_email_to: arvados@example.com
+  support_email_address: arvados@example.com
+
+  # generic issue email from
+  email_from: arvados@example.com
+
+  # Mimetypes of applications for which the view icon
+  # would be enabled in a collection's show page.
+  # It is sufficient to list only applications here.
+  # No need to list text and image types.
+  application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
+
+  # the maximum number of bytes to load in the log viewer
+  log_viewer_max_bytes: 1000000
+
+  # Set anonymous_user_token to enable anonymous user access. You can get
+  # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
+  # in the directory where your API server is running.
+  anonymous_user_token: false
+
+  # when anonymous_user_token is configured, show public projects page
+  enable_public_projects_page: true
+
+  # by default, disable the "Getting Started" popup which is specific to the public beta install
+  enable_getting_started_popup: false
+
+  # Ask Arvados API server to compress its response payloads.
+  api_response_compression: true
+
+  # Timeouts for API requests.
+  api_client_connect_timeout: 120
+  api_client_receive_timeout: 300
+
+  # ShellInABox service endpoint URL for a given VM.  If false, do not
+  # offer web shell logins.
+  #
+  # E.g., using a path-based proxy server to forward connections to shell hosts:
+  # https://webshell.uuid_prefix.arvadosapi.com/%{hostname}
+  #
+  # E.g., using a name-based proxy server to forward connections to shell hosts:
+  # https://%{hostname}.webshell.uuid_prefix.arvadosapi.com/
+  shell_in_a_box_url: false
+
+  # Format of preview links. If false, use keep_web_download_url
+  # instead, and disable inline preview.
+  # If both are false, Workbench won't start, this is a mandatory configuration.
+  #
+  # Examples:
+  # keep_web_url: https://%{uuid_or_pdh}.collections.uuid_prefix.arvadosapi.com
+  # keep_web_url: https://%{uuid_or_pdh}--collections.uuid_prefix.arvadosapi.com
+  #
+  # Example supporting only public data and collection-sharing links
+  # (other data will be handled as downloads via keep_web_download_url):
+  # keep_web_url: https://collections.uuid_prefix.arvadosapi.com/c=%{uuid_or_pdh}
+  keep_web_url: false
+
+  # Format of download links. If false, use keep_web_url with
+  # disposition=attachment query param.
+  #
+  # The host part of the keep_web_download_url value here must match
+  # the -attachment-only-host argument given to keep-web: if
+  # keep_web_download_url is "https://FOO.EXAMPLE/c=..." then keep-web
+  # must run with "-attachment-only-host=FOO.EXAMPLE".
+  #
+  # If keep_web_download_url is false, and keep_web_url uses a
+  # single-origin form, then Workbench will show an error page
+  # when asked to download or preview private data.
+  #
+  # Example:
+  # keep_web_download_url: https://download.uuid_prefix.arvadosapi.com/c=%{uuid_or_pdh}
+  keep_web_download_url: false
+
+  # In "trust all content" mode, Workbench will redirect download
+  # requests to keep-web, even in the cases when keep-web would have
+  # to expose XSS vulnerabilities in order to handle the redirect.
+  #
+  # When enabling this setting, the -trust-all-content flag on the
+  # keep-web server must also be enabled.  For more detail, see
+  # https://godoc.org/github.com/curoverse/arvados/services/keep-web
+  #
+  # This setting has no effect in the recommended configuration, where
+  # the host part of keep_web_url begins with %{uuid_or_pdh}: in this
+  # case XSS protection is provided by browsers' same-origin policy.
+  #
+  # The default setting (false) is appropriate for a multi-user site.
+  trust_all_content: false
+
+  # Maximum number of historic log records of a running job to fetch
+  # and display in the Log tab, while subscribing to web sockets.
+  running_job_log_records_to_fetch: 2000
+
+  # In systems with many shared projects, loading of dashboard and topnav
+  # cab be slow due to collections indexing; use the following parameters
+  # to suppress these properties
+  show_recent_collections_on_dashboard: true
+  show_user_notifications: true
+
+  # Token to be included in all healthcheck requests. Disabled by default.
+  # Workbench expects request header of the format "Authorization: Bearer xxx"
+  ManagementToken: false
+
+  # Enable/disable "multi-site search" in top nav (true/false), or
+  # link it to the multi-site search on a remote Workbench site.
+  #
+  # Example:
+  # multi_site_search: https://workbench.qr1hi.arvadosapi.com/collections/multisite
+  multi_site_search: false
+
+  #
+  # Link to use for Arvados Workflow Composer app, or false if not available.
+  #
+  composer_url: false
+
+  #
+  # Should workbench allow management of local git repositories? Set to false if
+  # the jobs api is disabled and there are no local git repositories.
+  #
+  repositories: true
+
+  #
+  # Add an item to the user menu pointing to workbench2_url, if not false.
+  #
+  # Example:
+  # workbench2_url: https://workbench2.qr1hi.arvadosapi.com
+  #
+  workbench2_url: false
diff --git a/apps/workbench/config/application.rb b/apps/workbench/config/application.rb
new file mode 100644 (file)
index 0000000..891dd43
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require File.expand_path('../boot', __FILE__)
+
+require 'rails/all'
+
+Bundler.require(:default, Rails.env)
+
+module ArvadosWorkbench
+  class Application < Rails::Application
+    # Settings in config/environments/* take precedence over those specified here.
+    # Application configuration should go into files in config/initializers
+    # -- all .rb files in that directory are automatically loaded.
+
+    # Custom directories with classes and modules you want to be autoloadable.
+    # config.autoload_paths += %W(#{config.root}/extras)
+    config.autoload_paths += %W(#{config.root}/lib)
+
+    # Only load the plugins named here, in the order given (default is alphabetical).
+    # :all can be used as a placeholder for all plugins not explicitly named.
+    # config.plugins = [ :exception_notification, :ssl_requirement, :all ]
+
+    # Activate observers that should always be running.
+    # config.active_record.observers = :cacher, :garbage_collector, :forum_observer
+
+    # Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
+    # Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
+    # config.time_zone = 'Central Time (US & Canada)'
+
+    # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
+    # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
+    # config.i18n.default_locale = :de
+
+    # Configure the default encoding used in templates for Ruby 1.9.
+    config.encoding = "utf-8"
+
+    # Configure sensitive parameters which will be filtered from the log file.
+    config.filter_parameters += [:password]
+
+    # Enable escaping HTML in JSON.
+    config.active_support.escape_html_entities_in_json = true
+
+    # Use SQL instead of Active Record's schema dumper when creating the database.
+    # This is necessary if your schema can't be completely dumped by the schema dumper,
+    # like if you have constraints or database-specific column types
+    # config.active_record.schema_format = :sql
+
+    # Enable the asset pipeline
+    config.assets.enabled = true
+
+    # Version of your assets, change this if you want to expire all your assets
+    config.assets.version = '1.0'
+
+    # npm-rails loads top-level modules like window.Mithril, but we
+    # also pull in some code from node_modules in application.js, like
+    # mithril/stream/stream.
+    config.assets.paths << Rails.root.join('node_modules')
+  end
+end
+
+require File.expand_path('../load_config', __FILE__)
diff --git a/apps/workbench/config/application.yml.example b/apps/workbench/config/application.yml.example
new file mode 100644 (file)
index 0000000..85df228
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+development:
+  # At minimum, you need a nice long randomly generated secret_token here.
+  secret_token: ~
+
+  # You probably also want to point to your API server.
+  arvados_login_base: https://arvados.local:3030/login
+  arvados_v1_base: https://arvados.local:3030/arvados/v1
+  arvados_insecure_https: true
+
+  # You need to configure at least one of these:
+  keep_web_url: false
+  keep_web_download_url: false
+
+production:
+  # At minimum, you need a nice long randomly generated secret_token here.
+  secret_token: ~
+
+  # You probably also want to point to your API server.
+  arvados_login_base: https://arvados.local:3030/login
+  arvados_v1_base: https://arvados.local:3030/arvados/v1
+  arvados_insecure_https: false
+
+  # You need to configure at least one of these:
+  keep_web_url: false
+  keep_web_download_url: false
diff --git a/apps/workbench/config/boot.rb b/apps/workbench/config/boot.rb
new file mode 100644 (file)
index 0000000..8153266
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'rubygems'
+
+# Set up gems listed in the Gemfile.
+ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
+
+require 'bundler/setup' if File.exists?(ENV['BUNDLE_GEMFILE'])
+
+# Use ARVADOS_API_TOKEN environment variable (if set) in console
+require 'rails'
+module ArvadosApiClientConsoleMode
+  class Railtie < Rails::Railtie
+    console do
+      Thread.current[:arvados_api_token] ||= ENV['ARVADOS_API_TOKEN']
+    end
+  end
+end
diff --git a/apps/workbench/config/database.yml b/apps/workbench/config/database.yml
new file mode 100644 (file)
index 0000000..5908b03
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Note: The database configuration is not actually used.
+development:
+  adapter: nulldb
+test:
+  adapter: nulldb
+production:
+  adapter: nulldb
+diagnostics:
+  adapter: nulldb
+performance:
+  adapter: nulldb
diff --git a/apps/workbench/config/environment.rb b/apps/workbench/config/environment.rb
new file mode 100644 (file)
index 0000000..d6b6a00
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Load the rails application
+require File.expand_path('../application', __FILE__)
+
+# Initialize the rails application
+ArvadosWorkbench::Application.initialize!
diff --git a/apps/workbench/config/environments/development.rb.example b/apps/workbench/config/environments/development.rb.example
new file mode 100644 (file)
index 0000000..d0b7efa
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # In the development environment your application's code is reloaded on
+  # every request. This slows down response time but is perfect for development
+  # since you don't have to restart the web server when you make code changes.
+  config.cache_classes = false
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Don't care if the mailer can't send
+  config.action_mailer.raise_delivery_errors = false
+
+  # Print deprecation notices to the Rails logger
+  config.active_support.deprecation = :log
+
+  # Only use best-standards-support built into browsers
+  config.action_dispatch.best_standards_support = :builtin
+
+  # Do not compress assets
+  config.assets.js_compressor = false
+
+  # Expands the lines which load the assets
+  config.assets.debug = true
+
+end
diff --git a/apps/workbench/config/environments/production.rb.example b/apps/workbench/config/environments/production.rb.example
new file mode 100644 (file)
index 0000000..8b656c5
--- /dev/null
@@ -0,0 +1,71 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # Code is not reloaded between requests
+  config.cache_classes = true
+
+  # Full error reports are disabled and caching is turned on
+  config.consider_all_requests_local       = false
+  config.action_controller.perform_caching = true
+
+  # Disable Rails's static asset server (Apache or nginx will already do this)
+  config.serve_static_files = false
+
+  # Compress JavaScripts and CSS
+  config.assets.js_compressor = :uglifier
+
+  # Don't fallback to assets pipeline if a precompiled asset is missed
+  config.assets.compile = false
+
+  # Generate digests for assets URLs
+  config.assets.digest = true
+
+  # Defaults to nil and saved in location specified by config.assets.prefix
+  # config.assets.manifest = YOUR_PATH
+
+  # Specifies the header that your server uses for sending files
+  # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
+  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
+
+  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
+  # config.force_ssl = true
+
+  # See everything in the log (default is :info)
+  # config.log_level = :debug
+
+  # Prepend all log lines with the following tags
+  # config.log_tags = [ :subdomain, :uuid ]
+
+  # Use a different logger for distributed setups
+  # config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new)
+
+  # Use a different cache store in production
+  # config.cache_store = :mem_cache_store
+
+  # Enable serving of images, stylesheets, and JavaScripts from an asset server
+  # config.action_controller.asset_host = "http://assets.example.com"
+
+  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
+  # config.assets.precompile += %w( search.js )
+
+  # Disable delivery errors, bad email addresses will be ignored
+  # config.action_mailer.raise_delivery_errors = false
+
+  # Enable threaded mode
+  # config.threadsafe!
+
+  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
+  # the I18n.default_locale when a translation can not be found)
+  config.i18n.fallbacks = true
+
+  # Send deprecation notices to registered listeners
+  config.active_support.deprecation = :notify
+
+  # Log timing data for API transactions
+  config.profiling_enabled = false
+
+end
diff --git a/apps/workbench/config/environments/test.rb b/apps/workbench/config/environments/test.rb
new file mode 120000 (symlink)
index 0000000..f1e9dbf
--- /dev/null
@@ -0,0 +1 @@
+test.rb.example
\ No newline at end of file
diff --git a/apps/workbench/config/environments/test.rb.example b/apps/workbench/config/environments/test.rb.example
new file mode 100644 (file)
index 0000000..7ce5082
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # The test environment is used exclusively to run your application's
+  # test suite. You never need to work with it otherwise. Remember that
+  # your test database is "scratch space" for the test suite and is wiped
+  # and recreated between test runs. Don't rely on the data there!
+  config.cache_classes = true
+
+  # Configure static asset server for tests with Cache-Control for performance
+  config.serve_static_files = true
+  config.static_cache_control = "public, max-age=3600"
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Raise exceptions instead of rendering exception templates
+  config.action_dispatch.show_exceptions = false
+
+  # Disable request forgery protection in test environment
+  config.action_controller.allow_forgery_protection    = false
+
+  # Tell Action Mailer not to deliver emails to the real world.
+  # The :test delivery method accumulates sent emails in the
+  # ActionMailer::Base.deliveries array.
+  config.action_mailer.delivery_method = :test
+
+  # Print deprecation notices to the stderr
+  config.active_support.deprecation = :stderr
+
+  # Log timing data for API transactions
+  config.profiling_enabled = false
+
+  # Can be :random or :sorted. Rails 5 will use :random by default
+  config.active_support.test_order = :sorted
+
+end
diff --git a/apps/workbench/config/initializers/backtrace_silencers.rb b/apps/workbench/config/initializers/backtrace_silencers.rb
new file mode 100644 (file)
index 0000000..b9c6bce
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.
+# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ }
+
+# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code.
+# Rails.backtrace_cleaner.remove_silencers!
diff --git a/apps/workbench/config/initializers/inflections.rb b/apps/workbench/config/initializers/inflections.rb
new file mode 100644 (file)
index 0000000..01e7158
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Add new inflection rules using the following format
+# (all these examples are active by default):
+# ActiveSupport::Inflector.inflections do |inflect|
+#   inflect.plural /^(ox)$/i, '\1en'
+#   inflect.singular /^(ox)en/i, '\1'
+#   inflect.irregular 'person', 'people'
+#   inflect.uncountable %w( fish sheep )
+# end
+#
+# These inflection rules are supported but not enabled by default:
+# ActiveSupport::Inflector.inflections do |inflect|
+#   inflect.acronym 'RESTful'
+# end
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.plural /^([Ss]pecimen)$/i, '\1s'
+  inflect.singular /^([Ss]pecimen)s?/i, '\1'
+  inflect.plural /^([Hh]uman)$/i, '\1s'
+  inflect.singular /^([Hh]uman)s?/i, '\1'
+end
diff --git a/apps/workbench/config/initializers/lograge.rb b/apps/workbench/config/initializers/lograge.rb
new file mode 100644 (file)
index 0000000..6e7f165
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ArvadosWorkbench::Application.configure do
+  config.lograge.enabled = true
+  config.lograge.formatter = Lograge::Formatters::Logstash.new
+  config.lograge.custom_options = lambda do |event|
+    payload = {
+      request_id: event.payload[:request_id],
+    }
+    # Also log params (minus the pseudo-params added by Rails). But if
+    # params is huge, don't log the whole thing, just hope we get the
+    # most useful bits in truncate(json(params)).
+    exceptions = %w(controller action format id)
+    params = event.payload[:params].except(*exceptions)
+    params_s = Oj.dump(params)
+    if params_s.length > 1000
+      payload[:params_truncated] = params_s[0..1000] + "[...]"
+    else
+      payload[:params] = params
+    end
+    payload
+  end
+end
diff --git a/apps/workbench/config/initializers/mime_types.rb b/apps/workbench/config/initializers/mime_types.rb
new file mode 100644 (file)
index 0000000..69781a1
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Add new mime types for use in respond_to blocks:
+# Mime::Type.register "text/richtext", :rtf
+# Mime::Type.register_alias "text/html", :iphone
+
+# add new mime types to MIME from mime_types gem
+
+require 'mime/types'
+include MIME
+[
+  %w(fasta fa fas fsa seq),
+  %w(go),
+  %w(r),
+  %w(sam),
+  %w(python py),
+].each do |suffixes|
+  if (MIME::Types.type_for(suffixes[0]).first.nil?)
+    MIME::Types.add(MIME::Type.new(["application/#{suffixes[0]}", suffixes]))
+  end
+end
diff --git a/apps/workbench/config/initializers/rack_mini_profile.rb b/apps/workbench/config/initializers/rack_mini_profile.rb
new file mode 100644 (file)
index 0000000..5fedf3f
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+if not Rails.env.production? and ENV['ENABLE_PROFILING']
+  require 'rack-mini-profiler'
+  require 'flamegraph'
+  Rack::MiniProfilerRails.initialize! Rails.application
+end
diff --git a/apps/workbench/config/initializers/redcloth.rb b/apps/workbench/config/initializers/redcloth.rb
new file mode 100644 (file)
index 0000000..e0d6ac4
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module RedClothArvadosLinkExtension
+
+  class RedClothViewBase < ActionView::Base
+    include ApplicationHelper
+    include ActionView::Helpers::UrlHelper
+    include Rails.application.routes.url_helpers
+
+    def helper_link_to_if_arvados_object(link, opts)
+      link_to_if_arvados_object(link, opts)
+    end
+  end
+
+  def refs_arvados(text)
+    text.gsub!(/"(?!\s)([^"]*\S)":(\S+)/) do
+      text, link = $~[1..2]
+      arvados_link = RedClothViewBase.new.helper_link_to_if_arvados_object(link, { :link_text => text })
+      # if it's not an arvados_link the helper will return the link unprocessed and so we will reconstruct the textile link string so it can be processed normally
+      (arvados_link == link) ? "\"#{text}\":#{link}" : arvados_link
+    end
+  end
+end
+
+RedCloth.send(:include, RedClothArvadosLinkExtension)
diff --git a/apps/workbench/config/initializers/secret_token.rb.example b/apps/workbench/config/initializers/secret_token.rb.example
new file mode 100644 (file)
index 0000000..fa6e816
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Your secret key for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+ArvadosWorkbench::Application.config.secret_token ||= rand(2**256).to_s(36)
diff --git a/apps/workbench/config/initializers/session_store.rb b/apps/workbench/config/initializers/session_store.rb
new file mode 100644 (file)
index 0000000..b53e9ef
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+ArvadosWorkbench::Application.config.session_store :cookie_store, key: '_arvados_workbench_session'
+
+# Use the database for sessions instead of the cookie-based default,
+# which shouldn't be used to store highly confidential information
+# (create the session table with "rails generate session_migration")
+# ArvadosWorkbench::Application.config.session_store :active_record_store
diff --git a/apps/workbench/config/initializers/time_format.rb b/apps/workbench/config/initializers/time_format.rb
new file mode 100644 (file)
index 0000000..b0cc6c9
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ActiveSupport::TimeWithZone
+  def as_json *args
+    strftime "%Y-%m-%dT%H:%M:%S.%NZ"
+  end
+end
diff --git a/apps/workbench/config/initializers/validate_wb2_url_config.rb b/apps/workbench/config/initializers/validate_wb2_url_config.rb
new file mode 100644 (file)
index 0000000..f909648
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+include ConfigValidators
+
+ConfigValidators::validate_wb2_url_config()
\ No newline at end of file
diff --git a/apps/workbench/config/initializers/wrap_parameters.rb b/apps/workbench/config/initializers/wrap_parameters.rb
new file mode 100644 (file)
index 0000000..6fb9786
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+#
+# This file contains settings for ActionController::ParamsWrapper which
+# is enabled by default.
+
+# Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array.
+ActiveSupport.on_load(:action_controller) do
+  wrap_parameters format: [:json]
+end
+
+# Disable root element in JSON by default.
+ActiveSupport.on_load(:active_record) do
+  self.include_root_in_json = false
+end
diff --git a/apps/workbench/config/load_config.rb b/apps/workbench/config/load_config.rb
new file mode 100644 (file)
index 0000000..d8d4dff
--- /dev/null
@@ -0,0 +1,71 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file must be loaded _after_ secret_token.rb if secret_token is
+# defined there instead of in config/application.yml.
+
+$application_config = {}
+
+%w(application.default application).each do |cfgfile|
+  path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+  if File.exists? path
+    yaml = ERB.new(IO.read path).result(binding)
+    confs = YAML.load(yaml, deserialize_symbols: true)
+    $application_config.merge!(confs['common'] || {})
+    $application_config.merge!(confs[::Rails.env.to_s] || {})
+  end
+end
+
+ArvadosWorkbench::Application.configure do
+  nils = []
+  $application_config.each do |k, v|
+    # "foo.bar: baz" --> { config.foo.bar = baz }
+    cfg = config
+    ks = k.split '.'
+    k = ks.pop
+    ks.each do |kk|
+      cfg = cfg.send(kk)
+    end
+    if v.nil? and cfg.respond_to?(k) and !cfg.send(k).nil?
+      # Config is nil in *.yml, but has been set already in
+      # environments/*.rb (or has a Rails default). Don't overwrite
+      # the default/upstream config with nil.
+      #
+      # After config files have been migrated, this mechanism should
+      # be removed.
+      Rails.logger.warn <<EOS
+DEPRECATED: Inheriting config.#{ks.join '.'} from Rails config.
+            Please move this config into config/application.yml.
+EOS
+    elsif v.nil?
+      # Config variables are not allowed to be nil. Make a "naughty"
+      # list, and present it below.
+      nils << k
+    else
+      cfg.send "#{k}=", v
+    end
+  end
+  if !nils.empty? and not ::Rails.groups.include?('assets')
+    raise <<EOS
+#{::Rails.groups.include?('assets')}
+Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
+
+The following configuration settings must be specified in
+config/application.yml:
+* #{nils.join "\n* "}
+
+EOS
+  end
+  # Refuse to start if keep-web isn't configured
+  if not (config.keep_web_url or config.keep_web_download_url) and not ::Rails.groups.include?('assets')
+    raise <<EOS
+Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
+
+Keep-web service must be configured in config/application.yml:
+* keep_web_url
+* keep_web_download_url
+
+EOS
+  end
+end
diff --git a/apps/workbench/config/locales/en.bootstrap.yml b/apps/workbench/config/locales/en.bootstrap.yml
new file mode 100644 (file)
index 0000000..664de2b
--- /dev/null
@@ -0,0 +1,18 @@
+# Sample localization file for English. Add more files in this directory for other locales.
+# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+
+en:
+  helpers:
+    actions: "Actions"
+    links:
+      back: "Back"
+      cancel: "Cancel"
+      confirm: "Are you sure?"
+      destroy: "Delete"
+      new: "New"
+      edit: "Edit"
+    titles:
+      edit: "Edit"
+      save: "Save"
+      new: "New"
+      delete: "Delete"
diff --git a/apps/workbench/config/locales/en.yml b/apps/workbench/config/locales/en.yml
new file mode 100644 (file)
index 0000000..e6a62cb
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Sample localization file for English. Add more files in this directory for other locales.
+# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+
+en:
+  hello: "Hello world"
diff --git a/apps/workbench/config/piwik.yml.example b/apps/workbench/config/piwik.yml.example
new file mode 100644 (file)
index 0000000..52a1ffb
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Configuration:
+# 
+# disabled
+#   false if tracking tag should be shown
+# use_async
+#   Set to true if you want to use asynchronous tracking
+# url
+#   The url of your piwik instance (e.g. localhost/piwik/
+# id_site
+#   The id of your website inside Piwik
+#
+production:
+  piwik:
+    id_site: 1
+    url: localhost
+    use_async: false
+    disabled: false
+
+development:
+  piwik:
+    id_site: 1
+    url: localhost
+    disabled: true
+    use_async: false
+    hostname: localhost
+
+test:
+  piwik:
+    id_site: 1
+    url: localhost
+    disabled: true
+    use_async: false
+    hostname: localhost
diff --git a/apps/workbench/config/routes.rb b/apps/workbench/config/routes.rb
new file mode 100644 (file)
index 0000000..718adfd
--- /dev/null
@@ -0,0 +1,142 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ArvadosWorkbench::Application.routes.draw do
+  themes_for_rails
+
+  resources :keep_disks
+  resources :keep_services
+  resources :user_agreements do
+    post 'sign', on: :collection
+    get 'signatures', on: :collection
+  end
+  get '/user_agreements/signatures' => 'user_agreements#signatures'
+  get "users/setup_popup" => 'users#setup_popup', :as => :setup_user_popup
+  get "users/setup" => 'users#setup', :as => :setup_user
+  get "report_issue_popup" => 'actions#report_issue_popup', :as => :report_issue_popup
+  post "report_issue" => 'actions#report_issue', :as => :report_issue
+  get "star" => 'actions#star', :as => :star
+  get "all_processes" => 'work_units#index', :as => :all_processes
+  get "choose_work_unit_templates" => 'work_unit_templates#choose', :as => :choose_work_unit_templates
+  resources :work_units do
+    post 'show_child_component', :on => :member
+  end
+  resources :nodes
+  resources :humans
+  resources :traits
+  resources :api_client_authorizations
+  resources :virtual_machines
+  resources :containers
+  resources :container_requests do
+    post 'cancel', :on => :member
+    post 'copy', on: :member
+  end
+  get '/virtual_machines/:id/webshell/:login' => 'virtual_machines#webshell', :as => :webshell_virtual_machine
+  resources :authorized_keys
+  resources :job_tasks
+  resources :jobs do
+    post 'cancel', :on => :member
+    get 'logs', :on => :member
+  end
+  resources :repositories do
+    post 'share_with', on: :member
+  end
+  # {format: false} prevents rails from treating "foo.png" as foo?format=png
+  get '/repositories/:id/tree/:commit' => 'repositories#show_tree'
+  get '/repositories/:id/tree/:commit/*path' => 'repositories#show_tree', as: :show_repository_tree, format: false
+  get '/repositories/:id/blob/:commit/*path' => 'repositories#show_blob', as: :show_repository_blob, format: false
+  get '/repositories/:id/commit/:commit' => 'repositories#show_commit', as: :show_repository_commit
+  resources :sessions
+  match '/logout' => 'sessions#destroy', via: [:get, :post]
+  get '/logged_out' => 'sessions#logged_out'
+  resources :users do
+    get 'choose', :on => :collection
+    get 'home', :on => :member
+    get 'welcome', :on => :collection
+    get 'inactive', :on => :collection
+    get 'activity', :on => :collection
+    get 'storage', :on => :collection
+    post 'sudo', :on => :member
+    post 'unsetup', :on => :member
+    get 'setup_popup', :on => :member
+    get 'profile', :on => :member
+    post 'request_shell_access', :on => :member
+    get 'virtual_machines', :on => :member
+    get 'repositories', :on => :member
+    get 'ssh_keys', :on => :member
+    get 'link_account', :on => :collection
+    post 'link_account', :on => :collection, :action => :merge
+  end
+  get '/current_token' => 'users#current_token'
+  get "/add_ssh_key_popup" => 'users#add_ssh_key_popup', :as => :add_ssh_key_popup
+  get "/add_ssh_key" => 'users#add_ssh_key', :as => :add_ssh_key
+  resources :logs
+  resources :factory_jobs
+  resources :uploaded_datasets
+  resources :groups do
+    get 'choose', on: :collection
+  end
+  resources :specimens
+  resources :pipeline_templates do
+    get 'choose', on: :collection
+  end
+  resources :pipeline_instances do
+    post 'cancel', :on => :member
+    get 'compare', on: :collection
+    post 'copy', on: :member
+  end
+  resources :links
+  get '/collections/graph' => 'collections#graph'
+  resources :collections do
+    post 'set_persistent', on: :member
+    get 'sharing_popup', :on => :member
+    post 'share', :on => :member
+    post 'unshare', :on => :member
+    get 'choose', on: :collection
+    post 'remove_selected_files', on: :member
+    get 'tags', on: :member
+    post 'save_tags', on: :member
+    get 'multisite', on: :collection, to: redirect('/search')
+  end
+  get('/collections/download/:uuid/:reader_token/*file' => 'collections#show_file',
+      format: false)
+  get '/collections/download/:uuid/:reader_token' => 'collections#show_file_links'
+  get '/collections/:uuid/*file' => 'collections#show_file', :format => false
+  resources :projects do
+    match 'remove/:item_uuid', on: :member, via: :delete, action: :remove_item
+    match 'remove_items', on: :member, via: :delete, action: :remove_items
+    get 'choose', on: :collection
+    post 'share_with', on: :member
+    get 'tab_counts', on: :member
+    get 'public', on: :collection
+  end
+
+  resources :search do
+    get 'choose', :on => :collection
+  end
+
+  resources :workflows
+
+  get "trash" => 'trash_items#index', :as => :trash
+  resources :trash_items do
+    post 'untrash_items', on: :collection
+  end
+
+  post 'actions' => 'actions#post'
+  get 'actions' => 'actions#show'
+  get 'websockets' => 'websocket#index'
+  post "combine_selected" => 'actions#combine_selected_files_into_collection'
+
+  root :to => 'projects#index'
+
+  match '/_health/ping', to: 'healthcheck#ping', via: [:get]
+
+  get '/tests/mithril', to: 'tests#mithril'
+
+  get '/status', to: 'status#status'
+
+  # Send unroutable requests to an arbitrary controller
+  # (ends up at ApplicationController#render_not_found)
+  match '*a', to: 'links#render_not_found', via: [:get, :post]
+end
diff --git a/apps/workbench/db/schema.rb b/apps/workbench/db/schema.rb
new file mode 100644 (file)
index 0000000..3412ad8
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# encoding: UTF-8
+# This file is auto-generated from the current state of the database. Instead
+# of editing this file, please use the migrations feature of Active Record to
+# incrementally modify your database, and then regenerate this schema definition.
+#
+# Note that this schema.rb definition is the authoritative source for your
+# database schema. If you need to create the application database on another
+# system, you should be using db:schema:load, not running all the migrations
+# from scratch. The latter is a flawed and unsustainable approach (the more migrations
+# you'll amass, the slower it'll run and the greater likelihood for issues).
+#
+# It's strongly recommended to check this file into your version control system.
+
+ActiveRecord::Schema.define(:version => 0) do
+
+end
diff --git a/apps/workbench/db/seeds.rb b/apps/workbench/db/seeds.rb
new file mode 100644 (file)
index 0000000..d1ae89d
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file should contain all the record creation needed to seed the database with its default values.
+# The data can then be loaded with the rake db:seed (or created alongside the db with db:setup).
+#
+# Examples:
+#
+#   cities = City.create([{ name: 'Chicago' }, { name: 'Copenhagen' }])
+#   Mayor.create(name: 'Emanuel', city: cities.first)
diff --git a/apps/workbench/fpm-info.sh b/apps/workbench/fpm-info.sh
new file mode 100644 (file)
index 0000000..22ec1ba
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+case "$TARGET" in
+    centos*)
+        fpm_depends+=(git)
+        ;;
+    debian* | ubuntu*)
+        fpm_depends+=(git g++)
+        ;;
+esac
diff --git a/apps/workbench/lib/app_version.rb b/apps/workbench/lib/app_version.rb
new file mode 100644 (file)
index 0000000..9db76e2
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# If you change this file, you'll probably also want to make the same
+# changes in services/api/lib/app_version.rb.
+
+class AppVersion
+  def self.git(*args, &block)
+    IO.popen(["git", "--git-dir", ".git"] + args, "r",
+             chdir: Rails.root.join('../..'),
+             err: "/dev/null",
+             &block)
+  end
+
+  def self.forget
+    @hash = nil
+    @package_version = nil
+  end
+
+  # Return abbrev commit hash for current code version: "abc1234", or
+  # "abc1234-modified" if there are uncommitted changes. If present,
+  # return contents of {root}/git-commit.version instead.
+  def self.hash
+    if (cached = Rails.configuration.source_version || @hash)
+      return cached
+    end
+
+    # Read the version from our package's git-commit.version file, if available.
+    begin
+      @hash = IO.read(Rails.root.join("git-commit.version")).strip
+    rescue Errno::ENOENT
+    end
+
+    if @hash.nil? or @hash.empty?
+      begin
+        local_modified = false
+        git("status", "--porcelain") do |git_pipe|
+          git_pipe.each_line do |_|
+            STDERR.puts _
+            local_modified = true
+            # Continue reading the pipe so git doesn't get SIGPIPE.
+          end
+        end
+        if $?.success?
+          git("log", "-n1", "--format=%H") do |git_pipe|
+            git_pipe.each_line do |line|
+              @hash = line.chomp[0...8] + (local_modified ? '-modified' : '')
+            end
+          end
+        end
+      rescue SystemCallError
+      end
+    end
+
+    @hash || "unknown"
+  end
+
+  def self.package_version
+    if (cached = Rails.configuration.package_version || @package_version)
+      return cached
+    end
+
+    begin
+      @package_version = IO.read(Rails.root.join("package-build.version")).strip
+    rescue Errno::ENOENT
+      @package_version = "unknown"
+    end
+
+    @package_version
+  end
+end
diff --git a/apps/workbench/lib/assets/.gitkeep b/apps/workbench/lib/assets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/lib/config_validators.rb b/apps/workbench/lib/config_validators.rb
new file mode 100644 (file)
index 0000000..ec76916
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'uri'
+
+module ConfigValidators
+    def validate_wb2_url_config
+        if Rails.configuration.workbench2_url
+            begin
+                if !URI.parse(Rails.configuration.workbench2_url).is_a?(URI::HTTP)
+                    Rails.logger.warn("workbench2_url config is not an HTTP URL: #{Rails.configuration.workbench2_url}")
+                    Rails.configuration.workbench2_url = false
+                elsif /.*[\/]{2,}$/.match(Rails.configuration.workbench2_url)
+                    Rails.logger.warn("workbench2_url config shouldn't have multiple trailing slashes: #{Rails.configuration.workbench2_url}")
+                    Rails.configuration.workbench2_url = false
+                else
+                    return true
+                end
+            rescue URI::InvalidURIError
+                Rails.logger.warn("workbench2_url config invalid URL: #{Rails.configuration.workbench2_url}")
+                Rails.configuration.workbench2_url = false
+            end
+        end
+        return false
+    end
+end
+
diff --git a/apps/workbench/lib/tasks/.gitkeep b/apps/workbench/lib/tasks/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/lib/tasks/config_check.rake b/apps/workbench/lib/tasks/config_check.rake
new file mode 100644 (file)
index 0000000..9fd5435
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+namespace :config do
+  desc 'Ensure site configuration has all required settings'
+  task check: :environment do
+    $application_config.sort.each do |k, v|
+      if ENV.has_key?('QUIET') then
+        # Make sure we still check for the variable to exist
+        eval("Rails.configuration.#{k}")
+      else
+        if /(password|secret)/.match(k) then
+          # Make sure we still check for the variable to exist, but don't print the value
+          eval("Rails.configuration.#{k}")
+          $stderr.puts "%-32s %s" % [k, '*********']
+        else
+          $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
+        end
+      end
+    end
+  end
+end
diff --git a/apps/workbench/lib/tasks/config_dump.rake b/apps/workbench/lib/tasks/config_dump.rake
new file mode 100644 (file)
index 0000000..ed34960
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+namespace :config do
+  desc 'Show site configuration'
+  task dump: :environment do
+    puts $application_config.to_yaml
+  end
+end
diff --git a/apps/workbench/log/.gitkeep b/apps/workbench/log/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/npm_packages b/apps/workbench/npm_packages
new file mode 100644 (file)
index 0000000..64f58ac
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Run "rake npm:install"
+
+# Browserify is required.
+npm 'browserify', require: false
+npm 'jquery'
+npm 'awesomplete'
+npm 'jssha'
+
+npm 'mithril'
+npm 'es6-object-assign'
diff --git a/apps/workbench/public/404.html b/apps/workbench/public/404.html
new file mode 100644 (file)
index 0000000..abb9f80
--- /dev/null
@@ -0,0 +1,30 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The page you were looking for doesn't exist (404)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/404.html -->
+  <div class="dialog">
+    <h1>The page you were looking for doesn't exist.</h1>
+    <p>You may have mistyped the address or the page may have moved.</p>
+  </div>
+</body>
+</html>
diff --git a/apps/workbench/public/422.html b/apps/workbench/public/422.html
new file mode 100644 (file)
index 0000000..faa4a52
--- /dev/null
@@ -0,0 +1,30 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The change you wanted was rejected (422)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/422.html -->
+  <div class="dialog">
+    <h1>The change you wanted was rejected.</h1>
+    <p>Maybe you tried to change something you didn't have access to.</p>
+  </div>
+</body>
+</html>
diff --git a/apps/workbench/public/500.html b/apps/workbench/public/500.html
new file mode 100644 (file)
index 0000000..97e04f3
--- /dev/null
@@ -0,0 +1,29 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>We're sorry, but something went wrong (500)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/500.html -->
+  <div class="dialog">
+    <h1>We're sorry, but something went wrong.</h1>
+  </div>
+</body>
+</html>
diff --git a/apps/workbench/public/browser_unsupported.js b/apps/workbench/public/browser_unsupported.js
new file mode 100644 (file)
index 0000000..a972b7f
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+(function() {
+    var ok = false;
+    try {
+        if (window.Blob &&
+            window.File &&
+            window.FileReader &&
+            window.localStorage &&
+            window.WebSocket) {
+            ok = true;
+        }
+    } catch(err) {}
+    if (!ok) {
+        document.getElementById('browser-unsupported').className='';
+    }
+})();
diff --git a/apps/workbench/public/d3.v3.min.js b/apps/workbench/public/d3.v3.min.js
new file mode 100644 (file)
index 0000000..cba27c9
--- /dev/null
@@ -0,0 +1,4 @@
+(function(){function t(t){return t.target}function n(t){return t.source}function e(t,n){try{for(var e in n)Object.defineProperty(t.prototype,e,{value:n[e],enumerable:!1})}catch(r){t.prototype=n}}function r(t){for(var n=-1,e=t.length,r=[];e>++n;)r.push(t[n]);return r}function i(t){return Array.prototype.slice.call(t)}function u(){}function a(t){return t}function o(){return!0}function c(t){return"function"==typeof t?t:function(){return t}}function l(t,n,e){return function(){var r=e.apply(n,arguments);return arguments.length?t:r}}function s(t){return null!=t&&!isNaN(t)}function f(t){return t.length}function h(t){return t.trim().replace(/\s+/g," ")}function d(t){for(var n=1;t*n%1;)n*=10;return n}function g(t){return 1===t.length?function(n,e){t(null==n?e:null)}:t}function p(t){return t.responseText}function m(t){return JSON.parse(t.responseText)}function v(t){var n=document.createRange();return n.selectNode(document.body),n.createContextualFragment(t.responseText)}function y(t){return t.responseXML}function M(){}function b(t){function n(){for(var n,r=e,i=-1,u=r.length;u>++i;)(n=r[i].on)&&n.apply(this,arguments);return t}var e=[],r=new u;return n.on=function(n,i){var u,a=r.get(n);return 2>arguments.length?a&&a.on:(a&&(a.on=null,e=e.slice(0,u=e.indexOf(a)).concat(e.slice(u+1)),r.remove(n)),i&&e.push(r.set(n,{on:i})),t)},n}function x(t,n){return n-(t?1+Math.floor(Math.log(t+Math.pow(10,1+Math.floor(Math.log(t)/Math.LN10)-n))/Math.LN10):1)}function _(t){return t+""}function w(t,n){var e=Math.pow(10,3*Math.abs(8-n));return{scale:n>8?function(t){return t/e}:function(t){return t*e},symbol:t}}function S(t){return function(n){return 0>=n?0:n>=1?1:t(n)}}function k(t){return function(n){return 1-t(1-n)}}function E(t){return function(n){return.5*(.5>n?t(2*n):2-t(2-2*n))}}function A(t){return t*t}function N(t){return t*t*t}function T(t){if(0>=t)return 0;if(t>=1)return 1;var n=t*t,e=n*t;return 4*(.5>t?e:3*(t-n)+e-.75)}function q(t){return function(n){return Math.pow(n,t)}}function C(t){return 1-Math.cos(t*Ru/2)}function z(t){return Math.pow(2,10*(t-1))}function D(t){return 1-Math.sqrt(1-t*t)}function L(t,n){var e;return 2>arguments.length&&(n=.45),arguments.length?e=n/(2*Ru)*Math.asin(1/t):(t=1,e=n/4),function(r){return 1+t*Math.pow(2,10*-r)*Math.sin(2*(r-e)*Ru/n)}}function F(t){return t||(t=1.70158),function(n){return n*n*((t+1)*n-t)}}function H(t){return 1/2.75>t?7.5625*t*t:2/2.75>t?7.5625*(t-=1.5/2.75)*t+.75:2.5/2.75>t?7.5625*(t-=2.25/2.75)*t+.9375:7.5625*(t-=2.625/2.75)*t+.984375}function R(){d3.event.stopPropagation(),d3.event.preventDefault()}function P(){for(var t,n=d3.event;t=n.sourceEvent;)n=t;return n}function j(t){for(var n=new M,e=0,r=arguments.length;r>++e;)n[arguments[e]]=b(n);return n.of=function(e,r){return function(i){try{var u=i.sourceEvent=d3.event;i.target=t,d3.event=i,n[i.type].apply(e,r)}finally{d3.event=u}}},n}function O(t){var n=[t.a,t.b],e=[t.c,t.d],r=U(n),i=Y(n,e),u=U(I(e,n,-i))||0;n[0]*e[1]<e[0]*n[1]&&(n[0]*=-1,n[1]*=-1,r*=-1,i*=-1),this.rotate=(r?Math.atan2(n[1],n[0]):Math.atan2(-e[0],e[1]))*Ou,this.translate=[t.e,t.f],this.scale=[r,u],this.skew=u?Math.atan2(i,u)*Ou:0}function Y(t,n){return t[0]*n[0]+t[1]*n[1]}function U(t){var n=Math.sqrt(Y(t,t));return n&&(t[0]/=n,t[1]/=n),n}function I(t,n,e){return t[0]+=e*n[0],t[1]+=e*n[1],t}function V(t){return"transform"==t?d3.interpolateTransform:d3.interpolate}function X(t,n){return n=n-(t=+t)?1/(n-t):0,function(e){return(e-t)*n}}function Z(t,n){return n=n-(t=+t)?1/(n-t):0,function(e){return Math.max(0,Math.min(1,(e-t)*n))}}function B(){}function $(t,n,e){return new J(t,n,e)}function J(t,n,e){this.r=t,this.g=n,this.b=e}function G(t){return 16>t?"0"+Math.max(0,t).toString(16):Math.min(255,t).toString(16)}function K(t,n,e){var r,i,u,a=0,o=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(t))switch(i=r[2].split(","),r[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return n(nn(i[0]),nn(i[1]),nn(i[2]))}return(u=aa.get(t))?n(u.r,u.g,u.b):(null!=t&&"#"===t.charAt(0)&&(4===t.length?(a=t.charAt(1),a+=a,o=t.charAt(2),o+=o,c=t.charAt(3),c+=c):7===t.length&&(a=t.substring(1,3),o=t.substring(3,5),c=t.substring(5,7)),a=parseInt(a,16),o=parseInt(o,16),c=parseInt(c,16)),n(a,o,c))}function W(t,n,e){var r,i,u=Math.min(t/=255,n/=255,e/=255),a=Math.max(t,n,e),o=a-u,c=(a+u)/2;return o?(i=.5>c?o/(a+u):o/(2-a-u),r=t==a?(n-e)/o+(e>n?6:0):n==a?(e-t)/o+2:(t-n)/o+4,r*=60):i=r=0,en(r,i,c)}function Q(t,n,e){t=tn(t),n=tn(n),e=tn(e);var r=gn((.4124564*t+.3575761*n+.1804375*e)/sa),i=gn((.2126729*t+.7151522*n+.072175*e)/fa),u=gn((.0193339*t+.119192*n+.9503041*e)/ha);return ln(116*i-16,500*(r-i),200*(i-u))}function tn(t){return.04045>=(t/=255)?t/12.92:Math.pow((t+.055)/1.055,2.4)}function nn(t){var n=parseFloat(t);return"%"===t.charAt(t.length-1)?Math.round(2.55*n):n}function en(t,n,e){return new rn(t,n,e)}function rn(t,n,e){this.h=t,this.s=n,this.l=e}function un(t,n,e){function r(t){return t>360?t-=360:0>t&&(t+=360),60>t?u+(a-u)*t/60:180>t?a:240>t?u+(a-u)*(240-t)/60:u}function i(t){return Math.round(255*r(t))}var u,a;return t%=360,0>t&&(t+=360),n=0>n?0:n>1?1:n,e=0>e?0:e>1?1:e,a=.5>=e?e*(1+n):e+n-e*n,u=2*e-a,$(i(t+120),i(t),i(t-120))}function an(t,n,e){return new on(t,n,e)}function on(t,n,e){this.h=t,this.c=n,this.l=e}function cn(t,n,e){return ln(e,Math.cos(t*=ju)*n,Math.sin(t)*n)}function ln(t,n,e){return new sn(t,n,e)}function sn(t,n,e){this.l=t,this.a=n,this.b=e}function fn(t,n,e){var r=(t+16)/116,i=r+n/500,u=r-e/200;return i=dn(i)*sa,r=dn(r)*fa,u=dn(u)*ha,$(pn(3.2404542*i-1.5371385*r-.4985314*u),pn(-.969266*i+1.8760108*r+.041556*u),pn(.0556434*i-.2040259*r+1.0572252*u))}function hn(t,n,e){return an(180*(Math.atan2(e,n)/Ru),Math.sqrt(n*n+e*e),t)}function dn(t){return t>.206893034?t*t*t:(t-4/29)/7.787037}function gn(t){return t>.008856?Math.pow(t,1/3):7.787037*t+4/29}function pn(t){return Math.round(255*(.00304>=t?12.92*t:1.055*Math.pow(t,1/2.4)-.055))}function mn(t){return Iu(t,Ma),t}function vn(t){return function(){return ga(t,this)}}function yn(t){return function(){return pa(t,this)}}function Mn(t,n){function e(){this.removeAttribute(t)}function r(){this.removeAttributeNS(t.space,t.local)}function i(){this.setAttribute(t,n)}function u(){this.setAttributeNS(t.space,t.local,n)}function a(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}function o(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}return t=d3.ns.qualify(t),null==n?t.local?r:e:"function"==typeof n?t.local?o:a:t.local?u:i}function bn(t){return RegExp("(?:^|\\s+)"+d3.requote(t)+"(?:\\s+|$)","g")}function xn(t,n){function e(){for(var e=-1;i>++e;)t[e](this,n)}function r(){for(var e=-1,r=n.apply(this,arguments);i>++e;)t[e](this,r)}t=t.trim().split(/\s+/).map(_n);var i=t.length;return"function"==typeof n?r:e}function _n(t){var n=bn(t);return function(e,r){if(i=e.classList)return r?i.add(t):i.remove(t);var i=e.className,u=null!=i.baseVal,a=u?i.baseVal:i;r?(n.lastIndex=0,n.test(a)||(a=h(a+" "+t),u?i.baseVal=a:e.className=a)):a&&(a=h(a.replace(n," ")),u?i.baseVal=a:e.className=a)}}function wn(t,n,e){function r(){this.style.removeProperty(t)}function i(){this.style.setProperty(t,n,e)}function u(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}return null==n?r:"function"==typeof n?u:i}function Sn(t,n){function e(){delete this[t]}function r(){this[t]=n}function i(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}return null==n?e:"function"==typeof n?i:r}function kn(t){return{__data__:t}}function En(t){return function(){return ya(this,t)}}function An(t){return arguments.length||(t=d3.ascending),function(n,e){return t(n&&n.__data__,e&&e.__data__)}}function Nn(t,n,e){function r(){var n=this[u];n&&(this.removeEventListener(t,n,n.$),delete this[u])}function i(){function i(t){var e=d3.event;d3.event=t,o[0]=a.__data__;try{n.apply(a,o)}finally{d3.event=e}}var a=this,o=Yu(arguments);r.call(this),this.addEventListener(t,this[u]=i,i.$=e),i._=n}var u="__on"+t,a=t.indexOf(".");return a>0&&(t=t.substring(0,a)),n?i:r}function Tn(t,n){for(var e=0,r=t.length;r>e;e++)for(var i,u=t[e],a=0,o=u.length;o>a;a++)(i=u[a])&&n(i,a,e);return t}function qn(t){return Iu(t,xa),t}function Cn(t,n){return Iu(t,wa),t.id=n,t}function zn(t,n,e,r){var i=t.__transition__||(t.__transition__={active:0,count:0}),a=i[e];if(!a){var o=r.time;return a=i[e]={tween:new u,event:d3.dispatch("start","end"),time:o,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,d3.timer(function(r){function u(r){return i.active>e?l():(i.active=e,h.start.call(t,s,n),a.tween.forEach(function(e,r){(r=r.call(t,s,n))&&p.push(r)}),c(r)||d3.timer(c,0,o),1)}function c(r){if(i.active!==e)return l();for(var u=(r-d)/g,a=f(u),o=p.length;o>0;)p[--o].call(t,a);return u>=1?(l(),h.end.call(t,s,n),1):void 0}function l(){return--i.count?delete i[e]:delete t.__transition__,1}var s=t.__data__,f=a.ease,h=a.event,d=a.delay,g=a.duration,p=[];return r>=d?u(r):d3.timer(u,d,o),1},0,o),a}}function Dn(t){return null==t&&(t=""),function(){this.textContent=t}}function Ln(t,n,e,r){var i=t.id;return Tn(t,"function"==typeof e?function(t,u,a){t.__transition__[i].tween.set(n,r(e.call(t,t.__data__,u,a)))}:(e=r(e),function(t){t.__transition__[i].tween.set(n,e)}))}function Fn(){for(var t,n=Date.now(),e=qa;e;)t=n-e.then,t>=e.delay&&(e.flush=e.callback(t)),e=e.next;var r=Hn()-n;r>24?(isFinite(r)&&(clearTimeout(Aa),Aa=setTimeout(Fn,r)),Ea=0):(Ea=1,Ca(Fn))}function Hn(){for(var t=null,n=qa,e=1/0;n;)n.flush?(delete Ta[n.callback.id],n=t?t.next=n.next:qa=n.next):(e=Math.min(e,n.then+n.delay),n=(t=n).next);return e}function Rn(t,n){var e=t.ownerSVGElement||t;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>za&&(window.scrollX||window.scrollY)){e=d3.select(document.body).append("svg").style("position","absolute").style("top",0).style("left",0);var i=e[0][0].getScreenCTM();za=!(i.f||i.e),e.remove()}return za?(r.x=n.pageX,r.y=n.pageY):(r.x=n.clientX,r.y=n.clientY),r=r.matrixTransform(t.getScreenCTM().inverse()),[r.x,r.y]}var u=t.getBoundingClientRect();return[n.clientX-u.left-t.clientLeft,n.clientY-u.top-t.clientTop]}function Pn(){}function jn(t){var n=t[0],e=t[t.length-1];return e>n?[n,e]:[e,n]}function On(t){return t.rangeExtent?t.rangeExtent():jn(t.range())}function Yn(t,n){var e,r=0,i=t.length-1,u=t[r],a=t[i];return u>a&&(e=r,r=i,i=e,e=u,u=a,a=e),(n=n(a-u))&&(t[r]=n.floor(u),t[i]=n.ceil(a)),t}function Un(){return Math}function In(t,n,e,r){function i(){var i=Math.min(t.length,n.length)>2?Gn:Jn,c=r?Z:X;return a=i(t,n,c,e),o=i(n,t,c,d3.interpolate),u}function u(t){return a(t)}var a,o;return u.invert=function(t){return o(t)},u.domain=function(n){return arguments.length?(t=n.map(Number),i()):t},u.range=function(t){return arguments.length?(n=t,i()):n},u.rangeRound=function(t){return u.range(t).interpolate(d3.interpolateRound)},u.clamp=function(t){return arguments.length?(r=t,i()):r},u.interpolate=function(t){return arguments.length?(e=t,i()):e},u.ticks=function(n){return Bn(t,n)},u.tickFormat=function(n){return $n(t,n)},u.nice=function(){return Yn(t,Xn),i()},u.copy=function(){return In(t,n,e,r)},i()}function Vn(t,n){return d3.rebind(t,n,"range","rangeRound","interpolate","clamp")}function Xn(t){return t=Math.pow(10,Math.round(Math.log(t)/Math.LN10)-1),t&&{floor:function(n){return Math.floor(n/t)*t},ceil:function(n){return Math.ceil(n/t)*t}}}function Zn(t,n){var e=jn(t),r=e[1]-e[0],i=Math.pow(10,Math.floor(Math.log(r/n)/Math.LN10)),u=n/r*i;return.15>=u?i*=10:.35>=u?i*=5:.75>=u&&(i*=2),e[0]=Math.ceil(e[0]/i)*i,e[1]=Math.floor(e[1]/i)*i+.5*i,e[2]=i,e}function Bn(t,n){return d3.range.apply(d3,Zn(t,n))}function $n(t,n){return d3.format(",."+Math.max(0,-Math.floor(Math.log(Zn(t,n)[2])/Math.LN10+.01))+"f")}function Jn(t,n,e,r){var i=e(t[0],t[1]),u=r(n[0],n[1]);return function(t){return u(i(t))}}function Gn(t,n,e,r){var i=[],u=[],a=0,o=Math.min(t.length,n.length)-1;for(t[o]<t[0]&&(t=t.slice().reverse(),n=n.slice().reverse());o>=++a;)i.push(e(t[a-1],t[a])),u.push(r(n[a-1],n[a]));return function(n){var e=d3.bisect(t,n,1,o)-1;return u[e](i[e](n))}}function Kn(t,n){function e(e){return t(n(e))}var r=n.pow;return e.invert=function(n){return r(t.invert(n))},e.domain=function(i){return arguments.length?(n=0>i[0]?Qn:Wn,r=n.pow,t.domain(i.map(n)),e):t.domain().map(r)},e.nice=function(){return t.domain(Yn(t.domain(),Un)),e},e.ticks=function(){var e=jn(t.domain()),i=[];if(e.every(isFinite)){var u=Math.floor(e[0]),a=Math.ceil(e[1]),o=r(e[0]),c=r(e[1]);if(n===Qn)for(i.push(r(u));a>u++;)for(var l=9;l>0;l--)i.push(r(u)*l);else{for(;a>u;u++)for(var l=1;10>l;l++)i.push(r(u)*l);i.push(r(u))}for(u=0;o>i[u];u++);for(a=i.length;i[a-1]>c;a--);i=i.slice(u,a)}return i},e.tickFormat=function(t,i){if(2>arguments.length&&(i=Da),!arguments.length)return i;var u,a=Math.max(.1,t/e.ticks().length),o=n===Qn?(u=-1e-12,Math.floor):(u=1e-12,Math.ceil);return function(t){return a>=t/r(o(n(t)+u))?i(t):""}},e.copy=function(){return Kn(t.copy(),n)},Vn(e,t)}function Wn(t){return Math.log(0>t?0:t)/Math.LN10}function Qn(t){return-Math.log(t>0?0:-t)/Math.LN10}function te(t,n){function e(n){return t(r(n))}var r=ne(n),i=ne(1/n);return e.invert=function(n){return i(t.invert(n))},e.domain=function(n){return arguments.length?(t.domain(n.map(r)),e):t.domain().map(i)},e.ticks=function(t){return Bn(e.domain(),t)},e.tickFormat=function(t){return $n(e.domain(),t)},e.nice=function(){return e.domain(Yn(e.domain(),Xn))},e.exponent=function(t){if(!arguments.length)return n;var u=e.domain();return r=ne(n=t),i=ne(1/n),e.domain(u)},e.copy=function(){return te(t.copy(),n)},Vn(e,t)}function ne(t){return function(n){return 0>n?-Math.pow(-n,t):Math.pow(n,t)}}function ee(t,n){function e(n){return a[((i.get(n)||i.set(n,t.push(n)))-1)%a.length]}function r(n,e){return d3.range(t.length).map(function(t){return n+e*t})}var i,a,o;return e.domain=function(r){if(!arguments.length)return t;t=[],i=new u;for(var a,o=-1,c=r.length;c>++o;)i.has(a=r[o])||i.set(a,t.push(a));return e[n.t].apply(e,n.a)},e.range=function(t){return arguments.length?(a=t,o=0,n={t:"range",a:arguments},e):a},e.rangePoints=function(i,u){2>arguments.length&&(u=0);var c=i[0],l=i[1],s=(l-c)/(Math.max(1,t.length-1)+u);return a=r(2>t.length?(c+l)/2:c+s*u/2,s),o=0,n={t:"rangePoints",a:arguments},e},e.rangeBands=function(i,u,c){2>arguments.length&&(u=0),3>arguments.length&&(c=u);var l=i[1]<i[0],s=i[l-0],f=i[1-l],h=(f-s)/(t.length-u+2*c);return a=r(s+h*c,h),l&&a.reverse(),o=h*(1-u),n={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(i,u,c){2>arguments.length&&(u=0),3>arguments.length&&(c=u);var l=i[1]<i[0],s=i[l-0],f=i[1-l],h=Math.floor((f-s)/(t.length-u+2*c)),d=f-s-(t.length-u)*h;return a=r(s+Math.round(d/2),h),l&&a.reverse(),o=Math.round(h*(1-u)),n={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return o},e.rangeExtent=function(){return jn(n.a[0])},e.copy=function(){return ee(t,n)},e.domain(t)}function re(t,n){function e(){var e=0,u=n.length;for(i=[];u>++e;)i[e-1]=d3.quantile(t,e/u);return r}function r(t){return isNaN(t=+t)?0/0:n[d3.bisect(i,t)]}var i;return r.domain=function(n){return arguments.length?(t=n.filter(function(t){return!isNaN(t)}).sort(d3.ascending),e()):t},r.range=function(t){return arguments.length?(n=t,e()):n},r.quantiles=function(){return i},r.copy=function(){return re(t,n)},e()}function ie(t,n,e){function r(n){return e[Math.max(0,Math.min(a,Math.floor(u*(n-t))))]}function i(){return u=e.length/(n-t),a=e.length-1,r}var u,a;return r.domain=function(e){return arguments.length?(t=+e[0],n=+e[e.length-1],i()):[t,n]},r.range=function(t){return arguments.length?(e=t,i()):e},r.copy=function(){return ie(t,n,e)},i()}function ue(t,n){function e(e){return n[d3.bisect(t,e)]}return e.domain=function(n){return arguments.length?(t=n,e):t},e.range=function(t){return arguments.length?(n=t,e):n},e.copy=function(){return ue(t,n)},e}function ae(t){function n(t){return+t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=e.map(n),n):t},n.ticks=function(n){return Bn(t,n)},n.tickFormat=function(n){return $n(t,n)},n.copy=function(){return ae(t)},n}function oe(t){return t.innerRadius}function ce(t){return t.outerRadius}function le(t){return t.startAngle}function se(t){return t.endAngle}function fe(t){function n(n){function a(){s.push("M",u(t(f),l))}for(var o,s=[],f=[],h=-1,d=n.length,g=c(e),p=c(r);d>++h;)i.call(this,o=n[h],h)?f.push([+g.call(this,o,h),+p.call(this,o,h)]):f.length&&(a(),f=[]);return f.length&&a(),s.length?s.join(""):null}var e=he,r=de,i=o,u=ge,a=u.key,l=.7;return n.x=function(t){return arguments.length?(e=t,n):e},n.y=function(t){return arguments.length?(r=t,n):r},n.defined=function(t){return arguments.length?(i=t,n):i},n.interpolate=function(t){return arguments.length?(a="function"==typeof t?u=t:(u=Oa.get(t)||ge).key,n):a},n.tension=function(t){return arguments.length?(l=t,n):l},n}function he(t){return t[0]}function de(t){return t[1]}function ge(t){return t.join("L")}function pe(t){return ge(t)+"Z"}function me(t){for(var n=0,e=t.length,r=t[0],i=[r[0],",",r[1]];e>++n;)i.push("V",(r=t[n])[1],"H",r[0]);return i.join("")}function ve(t){for(var n=0,e=t.length,r=t[0],i=[r[0],",",r[1]];e>++n;)i.push("H",(r=t[n])[0],"V",r[1]);return i.join("")}function ye(t,n){return 4>t.length?ge(t):t[1]+xe(t.slice(1,t.length-1),_e(t,n))}function Me(t,n){return 3>t.length?ge(t):t[0]+xe((t.push(t[0]),t),_e([t[t.length-2]].concat(t,[t[1]]),n))}function be(t,n){return 3>t.length?ge(t):t[0]+xe(t,_e(t,n))}function xe(t,n){if(1>n.length||t.length!=n.length&&t.length!=n.length+2)return ge(t);var e=t.length!=n.length,r="",i=t[0],u=t[1],a=n[0],o=a,c=1;if(e&&(r+="Q"+(u[0]-2*a[0]/3)+","+(u[1]-2*a[1]/3)+","+u[0]+","+u[1],i=t[1],c=2),n.length>1){o=n[1],u=t[c],c++,r+="C"+(i[0]+a[0])+","+(i[1]+a[1])+","+(u[0]-o[0])+","+(u[1]-o[1])+","+u[0]+","+u[1];for(var l=2;n.length>l;l++,c++)u=t[c],o=n[l],r+="S"+(u[0]-o[0])+","+(u[1]-o[1])+","+u[0]+","+u[1]}if(e){var s=t[c];r+="Q"+(u[0]+2*o[0]/3)+","+(u[1]+2*o[1]/3)+","+s[0]+","+s[1]}return r}function _e(t,n){for(var e,r=[],i=(1-n)/2,u=t[0],a=t[1],o=1,c=t.length;c>++o;)e=u,u=a,a=t[o],r.push([i*(a[0]-e[0]),i*(a[1]-e[1])]);return r}function we(t){if(3>t.length)return ge(t);var n=1,e=t.length,r=t[0],i=r[0],u=r[1],a=[i,i,i,(r=t[1])[0]],o=[u,u,u,r[1]],c=[i,",",u];for(Ne(c,a,o);e>++n;)r=t[n],a.shift(),a.push(r[0]),o.shift(),o.push(r[1]),Ne(c,a,o);for(n=-1;2>++n;)a.shift(),a.push(r[0]),o.shift(),o.push(r[1]),Ne(c,a,o);return c.join("")}function Se(t){if(4>t.length)return ge(t);for(var n,e=[],r=-1,i=t.length,u=[0],a=[0];3>++r;)n=t[r],u.push(n[0]),a.push(n[1]);for(e.push(Ae(Ia,u)+","+Ae(Ia,a)),--r;i>++r;)n=t[r],u.shift(),u.push(n[0]),a.shift(),a.push(n[1]),Ne(e,u,a);return e.join("")}function ke(t){for(var n,e,r=-1,i=t.length,u=i+4,a=[],o=[];4>++r;)e=t[r%i],a.push(e[0]),o.push(e[1]);for(n=[Ae(Ia,a),",",Ae(Ia,o)],--r;u>++r;)e=t[r%i],a.shift(),a.push(e[0]),o.shift(),o.push(e[1]),Ne(n,a,o);return n.join("")}function Ee(t,n){var e=t.length-1;if(e)for(var r,i,u=t[0][0],a=t[0][1],o=t[e][0]-u,c=t[e][1]-a,l=-1;e>=++l;)r=t[l],i=l/e,r[0]=n*r[0]+(1-n)*(u+i*o),r[1]=n*r[1]+(1-n)*(a+i*c);return we(t)}function Ae(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]+t[3]*n[3]}function Ne(t,n,e){t.push("C",Ae(Ya,n),",",Ae(Ya,e),",",Ae(Ua,n),",",Ae(Ua,e),",",Ae(Ia,n),",",Ae(Ia,e))}function Te(t,n){return(n[1]-t[1])/(n[0]-t[0])}function qe(t){for(var n=0,e=t.length-1,r=[],i=t[0],u=t[1],a=r[0]=Te(i,u);e>++n;)r[n]=(a+(a=Te(i=u,u=t[n+1])))/2;return r[n]=a,r}function Ce(t){for(var n,e,r,i,u=[],a=qe(t),o=-1,c=t.length-1;c>++o;)n=Te(t[o],t[o+1]),1e-6>Math.abs(n)?a[o]=a[o+1]=0:(e=a[o]/n,r=a[o+1]/n,i=e*e+r*r,i>9&&(i=3*n/Math.sqrt(i),a[o]=i*e,a[o+1]=i*r));for(o=-1;c>=++o;)i=(t[Math.min(c,o+1)][0]-t[Math.max(0,o-1)][0])/(6*(1+a[o]*a[o])),u.push([i||0,a[o]*i||0]);return u}function ze(t){return 3>t.length?ge(t):t[0]+xe(t,Ce(t))}function De(t){for(var n,e,r,i=-1,u=t.length;u>++i;)n=t[i],e=n[0],r=n[1]+Pa,n[0]=e*Math.cos(r),n[1]=e*Math.sin(r);return t}function Le(t){function n(n){function o(){m.push("M",l(t(y),d),h,f(t(v.reverse()),d),"Z")}for(var s,g,p,m=[],v=[],y=[],M=-1,b=n.length,x=c(e),_=c(i),w=e===r?function(){return g}:c(r),S=i===u?function(){return p}:c(u);b>++M;)a.call(this,s=n[M],M)?(v.push([g=+x.call(this,s,M),p=+_.call(this,s,M)]),y.push([+w.call(this,s,M),+S.call(this,s,M)])):v.length&&(o(),v=[],y=[]);return v.length&&o(),m.length?m.join(""):null}var e=he,r=he,i=0,u=de,a=o,l=ge,s=l.key,f=l,h="L",d=.7;return n.x=function(t){return arguments.length?(e=r=t,n):r},n.x0=function(t){return arguments.length?(e=t,n):e},n.x1=function(t){return arguments.length?(r=t,n):r},n.y=function(t){return arguments.length?(i=u=t,n):u},n.y0=function(t){return arguments.length?(i=t,n):i},n.y1=function(t){return arguments.length?(u=t,n):u},n.defined=function(t){return arguments.length?(a=t,n):a},n.interpolate=function(t){return arguments.length?(s="function"==typeof t?l=t:(l=Oa.get(t)||ge).key,f=l.reverse||l,h=l.closed?"M":"L",n):s},n.tension=function(t){return arguments.length?(d=t,n):d},n}function Fe(t){return t.radius}function He(t){return[t.x,t.y]}function Re(t){return function(){var n=t.apply(this,arguments),e=n[0],r=n[1]+Pa;return[e*Math.cos(r),e*Math.sin(r)]}}function Pe(){return 64}function je(){return"circle"}function Oe(t){var n=Math.sqrt(t/Ru);return"M0,"+n+"A"+n+","+n+" 0 1,1 0,"+-n+"A"+n+","+n+" 0 1,1 0,"+n+"Z"}function Ye(t,n){t.attr("transform",function(t){return"translate("+n(t)+",0)"})}function Ue(t,n){t.attr("transform",function(t){return"translate(0,"+n(t)+")"})}function Ie(t,n,e){if(r=[],e&&n.length>1){for(var r,i,u,a=jn(t.domain()),o=-1,c=n.length,l=(n[1]-n[0])/++e;c>++o;)for(i=e;--i>0;)(u=+n[o]-i*l)>=a[0]&&r.push(u);for(--o,i=0;e>++i&&(u=+n[o]+i*l)<a[1];)r.push(u)}return r}function Ve(){Ja||(Ja=d3.select("body").append("div").style("visibility","hidden").style("top",0).style("height",0).style("width",0).style("overflow-y","scroll").append("div").style("height","2000px").node().parentNode);var t,n=d3.event;try{Ja.scrollTop=1e3,Ja.dispatchEvent(n),t=1e3-Ja.scrollTop}catch(e){t=n.wheelDelta||5*-n.detail}return t}function Xe(t){for(var n=t.source,e=t.target,r=Be(n,e),i=[n];n!==r;)n=n.parent,i.push(n);for(var u=i.length;e!==r;)i.splice(u,0,e),e=e.parent;return i}function Ze(t){for(var n=[],e=t.parent;null!=e;)n.push(t),t=e,e=e.parent;return n.push(t),n}function Be(t,n){if(t===n)return t;for(var e=Ze(t),r=Ze(n),i=e.pop(),u=r.pop(),a=null;i===u;)a=i,i=e.pop(),u=r.pop();return a}function $e(t){t.fixed|=2}function Je(t){t.fixed&=1}function Ge(t){t.fixed|=4,t.px=t.x,t.py=t.y}function Ke(t){t.fixed&=3}function We(t,n,e){var r=0,i=0;if(t.charge=0,!t.leaf)for(var u,a=t.nodes,o=a.length,c=-1;o>++c;)u=a[c],null!=u&&(We(u,n,e),t.charge+=u.charge,r+=u.charge*u.cx,i+=u.charge*u.cy);if(t.point){t.leaf||(t.point.x+=Math.random()-.5,t.point.y+=Math.random()-.5);var l=n*e[t.point.index];t.charge+=t.pointCharge=l,r+=l*t.point.x,i+=l*t.point.y}t.cx=r/t.charge,t.cy=i/t.charge}function Qe(){return 20}function tr(){return 1}function nr(t){return t.x}function er(t){return t.y}function rr(t,n,e){t.y0=n,t.y=e}function ir(t){return d3.range(t.length)}function ur(t){for(var n=-1,e=t[0].length,r=[];e>++n;)r[n]=0;return r}function ar(t){for(var n,e=1,r=0,i=t[0][1],u=t.length;u>e;++e)(n=t[e][1])>i&&(r=e,i=n);return r}function or(t){return t.reduce(cr,0)}function cr(t,n){return t+n[1]}function lr(t,n){return sr(t,Math.ceil(Math.log(n.length)/Math.LN2+1))}function sr(t,n){for(var e=-1,r=+t[0],i=(t[1]-r)/n,u=[];n>=++e;)u[e]=i*e+r;return u}function fr(t){return[d3.min(t),d3.max(t)]}function hr(t,n){return d3.rebind(t,n,"sort","children","value"),t.nodes=t,t.links=mr,t}function dr(t){return t.children}function gr(t){return t.value}function pr(t,n){return n.value-t.value}function mr(t){return d3.merge(t.map(function(t){return(t.children||[]).map(function(n){return{source:t,target:n}})}))}function vr(t,n){return t.value-n.value}function yr(t,n){var e=t._pack_next;t._pack_next=n,n._pack_prev=t,n._pack_next=e,e._pack_prev=n}function Mr(t,n){t._pack_next=n,n._pack_prev=t}function br(t,n){var e=n.x-t.x,r=n.y-t.y,i=t.r+n.r;return i*i-e*e-r*r>.001}function xr(t){function n(t){s=Math.min(t.x-t.r,s),f=Math.max(t.x+t.r,f),h=Math.min(t.y-t.r,h),d=Math.max(t.y+t.r,d)}if((e=t.children)&&(l=e.length)){var e,r,i,u,a,o,c,l,s=1/0,f=-1/0,h=1/0,d=-1/0;if(e.forEach(_r),r=e[0],r.x=-r.r,r.y=0,n(r),l>1&&(i=e[1],i.x=i.r,i.y=0,n(i),l>2))for(u=e[2],kr(r,i,u),n(u),yr(r,u),r._pack_prev=u,yr(u,i),i=r._pack_next,a=3;l>a;a++){kr(r,i,u=e[a]);var g=0,p=1,m=1;for(o=i._pack_next;o!==i;o=o._pack_next,p++)if(br(o,u)){g=1;break}if(1==g)for(c=r._pack_prev;c!==o._pack_prev&&!br(c,u);c=c._pack_prev,m++);g?(m>p||p==m&&i.r<r.r?Mr(r,i=o):Mr(r=c,i),a--):(yr(r,u),i=u,n(u))}var v=(s+f)/2,y=(h+d)/2,M=0;for(a=0;l>a;a++)u=e[a],u.x-=v,u.y-=y,M=Math.max(M,u.r+Math.sqrt(u.x*u.x+u.y*u.y));t.r=M,e.forEach(wr)}}function _r(t){t._pack_next=t._pack_prev=t}function wr(t){delete t._pack_next,delete t._pack_prev}function Sr(t,n,e,r){var i=t.children;if(t.x=n+=r*t.x,t.y=e+=r*t.y,t.r*=r,i)for(var u=-1,a=i.length;a>++u;)Sr(i[u],n,e,r)}function kr(t,n,e){var r=t.r+e.r,i=n.x-t.x,u=n.y-t.y;if(r&&(i||u)){var a=n.r+e.r,o=i*i+u*u;a*=a,r*=r;var c=.5+(r-a)/(2*o),l=Math.sqrt(Math.max(0,2*a*(r+o)-(r-=o)*r-a*a))/(2*o);e.x=t.x+c*i+l*u,e.y=t.y+c*u-l*i}else e.x=t.x+r,e.y=t.y}function Er(t){return 1+d3.max(t,function(t){return t.y})}function Ar(t){return t.reduce(function(t,n){return t+n.x},0)/t.length}function Nr(t){var n=t.children;return n&&n.length?Nr(n[0]):t}function Tr(t){var n,e=t.children;return e&&(n=e.length)?Tr(e[n-1]):t}function qr(t,n){return t.parent==n.parent?1:2}function Cr(t){var n=t.children;return n&&n.length?n[0]:t._tree.thread}function zr(t){var n,e=t.children;return e&&(n=e.length)?e[n-1]:t._tree.thread}function Dr(t,n){var e=t.children;if(e&&(i=e.length))for(var r,i,u=-1;i>++u;)n(r=Dr(e[u],n),t)>0&&(t=r);return t}function Lr(t,n){return t.x-n.x}function Fr(t,n){return n.x-t.x}function Hr(t,n){return t.depth-n.depth}function Rr(t,n){function e(t,r){var i=t.children;if(i&&(a=i.length))for(var u,a,o=null,c=-1;a>++c;)u=i[c],e(u,o),o=u;n(t,r)}e(t,null)}function Pr(t){for(var n,e=0,r=0,i=t.children,u=i.length;--u>=0;)n=i[u]._tree,n.prelim+=e,n.mod+=e,e+=n.shift+(r+=n.change)}function jr(t,n,e){t=t._tree,n=n._tree;var r=e/(n.number-t.number);t.change+=r,n.change-=r,n.shift+=e,n.prelim+=e,n.mod+=e}function Or(t,n,e){return t._tree.ancestor.parent==n.parent?t._tree.ancestor:e}function Yr(t){return{x:t.x,y:t.y,dx:t.dx,dy:t.dy}}function Ur(t,n){var e=t.x+n[3],r=t.y+n[0],i=t.dx-n[1]-n[3],u=t.dy-n[0]-n[2];return 0>i&&(e+=i/2,i=0),0>u&&(r+=u/2,u=0),{x:e,y:r,dx:i,dy:u}}function Ir(t,n){function e(t,e){return d3.xhr(t,n,e).response(r)}function r(t){return e.parse(t.responseText)}function i(n){return n.map(u).join(t)}function u(t){return a.test(t)?'"'+t.replace(/\"/g,'""')+'"':t}var a=RegExp('["'+t+"\n]"),o=t.charCodeAt(0);return e.parse=function(t){var n;return e.parseRows(t,function(t){return n?n(t):(n=Function("d","return {"+t.map(function(t,n){return JSON.stringify(t)+": d["+n+"]"}).join(",")+"}"),void 0)})},e.parseRows=function(t,n){function e(){if(s>=l)return a;if(i)return i=!1,u;var n=s;if(34===t.charCodeAt(n)){for(var e=n;l>e++;)if(34===t.charCodeAt(e)){if(34!==t.charCodeAt(e+1))break;++e}s=e+2;var r=t.charCodeAt(e+1);return 13===r?(i=!0,10===t.charCodeAt(e+2)&&++s):10===r&&(i=!0),t.substring(n+1,e).replace(/""/g,'"')}for(;l>s;){var r=t.charCodeAt(s++),c=1;if(10===r)i=!0;else if(13===r)i=!0,10===t.charCodeAt(s)&&(++s,++c);else if(r!==o)continue;return t.substring(n,s-c)}return t.substring(n)}for(var r,i,u={},a={},c=[],l=t.length,s=0,f=0;(r=e())!==a;){for(var h=[];r!==u&&r!==a;)h.push(r),r=e();(!n||(h=n(h,f++)))&&c.push(h)}return c},e.format=function(t){return t.map(i).join("\n")},e}function Vr(t,n){no.hasOwnProperty(t.type)&&no[t.type](t,n)}function Xr(t,n,e){var r,i=-1,u=t.length-e;for(n.lineStart();u>++i;)r=t[i],n.point(r[0],r[1]);n.lineEnd()}function Zr(t,n){var e=-1,r=t.length;for(n.polygonStart();r>++e;)Xr(t[e],n,1);n.polygonEnd()}function Br(t){return[Math.atan2(t[1],t[0]),Math.asin(Math.max(-1,Math.min(1,t[2])))]}function $r(t,n){return Pu>Math.abs(t[0]-n[0])&&Pu>Math.abs(t[1]-n[1])}function Jr(t){var n=t[0],e=t[1],r=Math.cos(e);return[r*Math.cos(n),r*Math.sin(n),Math.sin(e)]}function Gr(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function Kr(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function Wr(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function Qr(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function ti(t){var n=Math.sqrt(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}function ni(t){function n(n){function r(e,r){e=t(e,r),n.point(e[0],e[1])}function u(){s=0/0,p.point=a,n.lineStart()}function a(r,u){var a=Jr([r,u]),o=t(r,u);e(s,f,l,h,d,g,s=o[0],f=o[1],l=r,h=a[0],d=a[1],g=a[2],i,n),n.point(s,f)}function o(){p.point=r,n.lineEnd()}function c(){var t,r,c,m,v,y,M;u(),p.point=function(n,e){a(t=n,r=e),c=s,m=f,v=h,y=d,M=g,p.point=a},p.lineEnd=function(){e(s,f,l,h,d,g,c,m,t,v,y,M,i,n),p.lineEnd=o,o()}}var l,s,f,h,d,g,p={point:r,lineStart:u,lineEnd:o,polygonStart:function(){n.polygonStart(),p.lineStart=c},polygonEnd:function(){n.polygonEnd(),p.lineStart=u}};return p}function e(n,i,u,a,o,c,l,s,f,h,d,g,p,m){var v=l-n,y=s-i,M=v*v+y*y;if(M>4*r&&p--){var b=a+h,x=o+d,_=c+g,w=Math.sqrt(b*b+x*x+_*_),S=Math.asin(_/=w),k=Pu>Math.abs(Math.abs(_)-1)?(u+f)/2:Math.atan2(x,b),E=t(k,S),A=E[0],N=E[1],T=A-n,q=N-i,C=y*T-v*q;(C*C/M>r||Math.abs((v*T+y*q)/M-.5)>.3)&&(e(n,i,u,a,o,c,A,N,k,b/=w,x/=w,_,p,m),m.point(A,N),e(A,N,k,b,x,_,l,s,f,h,d,g,p,m))}}var r=.5,i=16;return n.precision=function(t){return arguments.length?(i=(r=t*t)>0&&16,n):Math.sqrt(r)},n}function ei(t,n){function e(t,n){var e=Math.sqrt(u-2*i*Math.sin(n))/i;return[e*Math.sin(t*=i),a-e*Math.cos(t)]}var r=Math.sin(t),i=(r+Math.sin(n))/2,u=1+r*(2*i-r),a=Math.sqrt(u)/i;return e.invert=function(t,n){var e=a-n;return[Math.atan2(t,e)/i,Math.asin((u-(t*t+e*e)*i*i)/(2*i))]},e}function ri(t){function n(t,n){r>t&&(r=t),t>u&&(u=t),i>n&&(i=n),n>a&&(a=n)}function e(){o.point=o.lineEnd=Pn}var r,i,u,a,o={point:n,lineStart:Pn,lineEnd:Pn,polygonStart:function(){o.lineEnd=e},polygonEnd:function(){o.point=n}};return function(n){return a=u=-(r=i=1/0),d3.geo.stream(n,t(o)),[[r,i],[u,a]]}}function ii(t,n){if(!io){++uo,t*=ju;var e=Math.cos(n*=ju);ao+=(e*Math.cos(t)-ao)/uo,oo+=(e*Math.sin(t)-oo)/uo,co+=(Math.sin(n)-co)/uo}}function ui(){var t,n;io=1,ai(),io=2;var e=lo.point;lo.point=function(r,i){e(t=r,n=i)},lo.lineEnd=function(){lo.point(t,n),oi(),lo.lineEnd=oi}}function ai(){function t(t,i){t*=ju;var u=Math.cos(i*=ju),a=u*Math.cos(t),o=u*Math.sin(t),c=Math.sin(i),l=Math.atan2(Math.sqrt((l=e*c-r*o)*l+(l=r*a-n*c)*l+(l=n*o-e*a)*l),n*a+e*o+r*c);uo+=l,ao+=l*(n+(n=a)),oo+=l*(e+(e=o)),co+=l*(r+(r=c))}var n,e,r;io>1||(1>io&&(io=1,uo=ao=oo=co=0),lo.point=function(i,u){i*=ju;var a=Math.cos(u*=ju);n=a*Math.cos(i),e=a*Math.sin(i),r=Math.sin(u),lo.point=t})}function oi(){lo.point=ii}function ci(t,n){var e=Math.cos(t),r=Math.sin(t);return function(i,u,a,o){null!=i?(i=li(e,i),u=li(e,u),(a>0?u>i:i>u)&&(i+=2*a*Ru)):(i=t+2*a*Ru,u=t);for(var c,l=a*n,s=i;a>0?s>u:u>s;s-=l)o.point((c=Br([e,-r*Math.cos(s),-r*Math.sin(s)]))[0],c[1])}}function li(t,n){var e=Jr(n);e[0]-=t,ti(e);var r=Math.acos(Math.max(-1,Math.min(1,-e[1])));return((0>-e[2]?-r:r)+2*Math.PI-Pu)%(2*Math.PI)}function si(t,n,e){return function(r){function i(n,e){t(n,e)&&r.point(n,e)}function u(t,n){m.point(t,n)}function a(){v.point=u,m.lineStart()}function o(){v.point=i,m.lineEnd()}function c(t,n){M.point(t,n),p.push([t,n])}function l(){M.lineStart(),p=[]}function s(){c(p[0][0],p[0][1]),M.lineEnd();var t,n=M.clean(),e=y.buffer(),i=e.length;if(!i)return g=!0,d+=mi(p,-1),p=null,void 0;if(p=null,1&n){t=e[0],h+=mi(t,1);var u,i=t.length-1,a=-1;for(r.lineStart();i>++a;)r.point((u=t[a])[0],u[1]);return r.lineEnd(),void 0}i>1&&2&n&&e.push(e.pop().concat(e.shift())),f.push(e.filter(gi))}var f,h,d,g,p,m=n(r),v={point:i,lineStart:a,lineEnd:o,polygonStart:function(){v.point=c,v.lineStart=l,v.lineEnd=s,g=!1,d=h=0,f=[],r.polygonStart()
+},polygonEnd:function(){v.point=i,v.lineStart=a,v.lineEnd=o,f=d3.merge(f),f.length?fi(f,e,r):(-Pu>h||g&&-Pu>d)&&(r.lineStart(),e(null,null,1,r),r.lineEnd()),r.polygonEnd(),f=null},sphere:function(){r.polygonStart(),r.lineStart(),e(null,null,1,r),r.lineEnd(),r.polygonEnd()}},y=pi(),M=n(y);return v}}function fi(t,n,e){var r=[],i=[];if(t.forEach(function(t){var n=t.length;if(!(1>=n)){var e=t[0],u=t[n-1],a={point:e,points:t,other:null,visited:!1,entry:!0,subject:!0},o={point:e,points:[e],other:a,visited:!1,entry:!1,subject:!1};a.other=o,r.push(a),i.push(o),a={point:u,points:[u],other:null,visited:!1,entry:!1,subject:!0},o={point:u,points:[u],other:a,visited:!1,entry:!0,subject:!1},a.other=o,r.push(a),i.push(o)}}),i.sort(di),hi(r),hi(i),r.length)for(var u,a,o,c=r[0];;){for(u=c;u.visited;)if((u=u.next)===c)return;a=u.points,e.lineStart();do{if(u.visited=u.other.visited=!0,u.entry){if(u.subject)for(var l=0;a.length>l;l++)e.point((o=a[l])[0],o[1]);else n(u.point,u.next.point,1,e);u=u.next}else{if(u.subject){a=u.prev.points;for(var l=a.length;--l>=0;)e.point((o=a[l])[0],o[1])}else n(u.point,u.prev.point,-1,e);u=u.prev}u=u.other,a=u.points}while(!u.visited);e.lineEnd()}}function hi(t){if(n=t.length){for(var n,e,r=0,i=t[0];n>++r;)i.next=e=t[r],e.prev=i,i=e;i.next=e=t[0],e.prev=i}}function di(t,n){return(0>(t=t.point)[0]?t[1]-Ru/2-Pu:Ru/2-t[1])-(0>(n=n.point)[0]?n[1]-Ru/2-Pu:Ru/2-n[1])}function gi(t){return t.length>1}function pi(){var t,n=[];return{lineStart:function(){n.push(t=[])},point:function(n,e){t.push([n,e])},lineEnd:Pn,buffer:function(){var e=n;return n=[],t=null,e}}}function mi(t,n){if(!(e=t.length))return 0;for(var e,r,i,u=0,a=0,o=t[0],c=o[0],l=o[1],s=Math.cos(l),f=Math.atan2(n*Math.sin(c)*s,Math.sin(l)),h=1-n*Math.cos(c)*s,d=f;e>++u;)o=t[u],s=Math.cos(l=o[1]),r=Math.atan2(n*Math.sin(c=o[0])*s,Math.sin(l)),i=1-n*Math.cos(c)*s,Pu>Math.abs(h-2)&&Pu>Math.abs(i-2)||(Pu>Math.abs(i)||Pu>Math.abs(h)||(Pu>Math.abs(Math.abs(r-f)-Ru)?i+h>2&&(a+=4*(r-f)):a+=Pu>Math.abs(h-2)?4*(r-d):((3*Ru+r-f)%(2*Ru)-Ru)*(h+i)),d=f,f=r,h=i);return a}function vi(t){var n,e=0/0,r=0/0,i=0/0;return{lineStart:function(){t.lineStart(),n=1},point:function(u,a){var o=u>0?Ru:-Ru,c=Math.abs(u-e);Pu>Math.abs(c-Ru)?(t.point(e,r=(r+a)/2>0?Ru/2:-Ru/2),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(o,r),t.point(u,r),n=0):i!==o&&c>=Ru&&(Pu>Math.abs(e-i)&&(e-=i*Pu),Pu>Math.abs(u-o)&&(u-=o*Pu),r=yi(e,r,u,a),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(o,r),n=0),t.point(e=u,r=a),i=o},lineEnd:function(){t.lineEnd(),e=r=0/0},clean:function(){return 2-n}}}function yi(t,n,e,r){var i,u,a=Math.sin(t-e);return Math.abs(a)>Pu?Math.atan((Math.sin(n)*(u=Math.cos(r))*Math.sin(e)-Math.sin(r)*(i=Math.cos(n))*Math.sin(t))/(i*u*a)):(n+r)/2}function Mi(t,n,e,r){var i;if(null==t)i=e*Ru/2,r.point(-Ru,i),r.point(0,i),r.point(Ru,i),r.point(Ru,0),r.point(Ru,-i),r.point(0,-i),r.point(-Ru,-i),r.point(-Ru,0),r.point(-Ru,i);else if(Math.abs(t[0]-n[0])>Pu){var u=(t[0]<n[0]?1:-1)*Ru;i=e*u/2,r.point(-u,i),r.point(0,i),r.point(u,i)}else r.point(n[0],n[1])}function bi(t){function n(t,n){return Math.cos(t)*Math.cos(n)>u}function e(t){var e,i,u,a;return{lineStart:function(){u=i=!1,a=1},point:function(o,c){var l,s=[o,c],f=n(o,c);!e&&(u=i=f)&&t.lineStart(),f!==i&&(l=r(e,s),($r(e,l)||$r(s,l))&&(s[0]+=Pu,s[1]+=Pu,f=n(s[0],s[1]))),f!==i&&(a=0,(i=f)?(t.lineStart(),l=r(s,e),t.point(l[0],l[1])):(l=r(e,s),t.point(l[0],l[1]),t.lineEnd()),e=l),!f||e&&$r(e,s)||t.point(s[0],s[1]),e=s},lineEnd:function(){i&&t.lineEnd(),e=null},clean:function(){return a|(u&&i)<<1}}}function r(t,n){var e=Jr(t,0),r=Jr(n,0),i=[1,0,0],a=Kr(e,r),o=Gr(a,a),c=a[0],l=o-c*c;if(!l)return t;var s=u*o/l,f=-u*c/l,h=Kr(i,a),d=Qr(i,s),g=Qr(a,f);Wr(d,g);var p=h,m=Gr(d,p),v=Gr(p,p),y=Math.sqrt(m*m-v*(Gr(d,d)-1)),M=Qr(p,(-m-y)/v);return Wr(M,d),Br(M)}var i=t*ju,u=Math.cos(i),a=ci(i,6*ju);return si(n,e,a)}function xi(t,n){function e(e,r){return e=t(e,r),n(e[0],e[1])}return t.invert&&n.invert&&(e.invert=function(e,r){return e=n.invert(e,r),e&&t.invert(e[0],e[1])}),e}function _i(t,n){return[t,n]}function wi(t,n,e){var r=d3.range(t,n-Pu,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function Si(t,n,e){var r=d3.range(t,n-Pu,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function ki(t,n,e,r){function i(t){var n=Math.sin(t*=d)*g,e=Math.sin(d-t)*g,r=e*l+n*f,i=e*s+n*h,u=e*a+n*c;return[Math.atan2(i,r)/ju,Math.atan2(u,Math.sqrt(r*r+i*i))/ju]}var u=Math.cos(n),a=Math.sin(n),o=Math.cos(r),c=Math.sin(r),l=u*Math.cos(t),s=u*Math.sin(t),f=o*Math.cos(e),h=o*Math.sin(e),d=Math.acos(Math.max(-1,Math.min(1,a*c+u*o*Math.cos(e-t)))),g=1/Math.sin(d);return i.distance=d,i}function Ei(t,n){return[t/(2*Ru),Math.max(-.5,Math.min(.5,Math.log(Math.tan(Ru/4+n/2))/(2*Ru)))]}function Ai(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Ni(t){var n=ni(function(n,e){return t([n*Ou,e*Ou])});return function(t){return t=n(t),{point:function(n,e){t.point(n*ju,e*ju)},sphere:function(){t.sphere()},lineStart:function(){t.lineStart()},lineEnd:function(){t.lineEnd()},polygonStart:function(){t.polygonStart()},polygonEnd:function(){t.polygonEnd()}}}}function Ti(){function t(t,n){a.push("M",t,",",n,u)}function n(t,n){a.push("M",t,",",n),o.point=e}function e(t,n){a.push("L",t,",",n)}function r(){o.point=t}function i(){a.push("Z")}var u=Ai(4.5),a=[],o={point:t,lineStart:function(){o.point=n},lineEnd:r,polygonStart:function(){o.lineEnd=i},polygonEnd:function(){o.lineEnd=r,o.point=t},pointRadius:function(t){return u=Ai(t),o},result:function(){if(a.length){var t=a.join("");return a=[],t}}};return o}function qi(t){function n(n,e){t.moveTo(n,e),t.arc(n,e,a,0,2*Ru)}function e(n,e){t.moveTo(n,e),o.point=r}function r(n,e){t.lineTo(n,e)}function i(){o.point=n}function u(){t.closePath()}var a=4.5,o={point:n,lineStart:function(){o.point=e},lineEnd:i,polygonStart:function(){o.lineEnd=u},polygonEnd:function(){o.lineEnd=i,o.point=n},pointRadius:function(t){return a=t,o},result:Pn};return o}function Ci(){function t(t,n){po+=i*t-r*n,r=t,i=n}var n,e,r,i;mo.point=function(u,a){mo.point=t,n=r=u,e=i=a},mo.lineEnd=function(){t(n,e)}}function zi(t,n){io||(ao+=t,oo+=n,++co)}function Di(){function t(t,r){var i=t-n,u=r-e,a=Math.sqrt(i*i+u*u);ao+=a*(n+t)/2,oo+=a*(e+r)/2,co+=a,n=t,e=r}var n,e;if(1!==io){if(!(1>io))return;io=1,ao=oo=co=0}vo.point=function(r,i){vo.point=t,n=r,e=i}}function Li(){vo.point=zi}function Fi(){function t(t,n){var e=i*t-r*n;ao+=e*(r+t),oo+=e*(i+n),co+=3*e,r=t,i=n}var n,e,r,i;2>io&&(io=2,ao=oo=co=0),vo.point=function(u,a){vo.point=t,n=r=u,e=i=a},vo.lineEnd=function(){t(n,e)}}function Hi(){function t(t,n){if(t*=ju,n*=ju,!(Pu>Math.abs(Math.abs(u)-Ru/2)&&Pu>Math.abs(Math.abs(n)-Ru/2))){var e=Math.cos(n),c=Math.sin(n);if(Pu>Math.abs(u-Ru/2))Mo+=2*(t-r);else{var l=t-i,s=Math.cos(l),f=Math.atan2(Math.sqrt((f=e*Math.sin(l))*f+(f=a*c-o*e*s)*f),o*c+a*e*s),h=(f+Ru+u+n)/4;Mo+=(0>l&&l>-Ru||l>Ru?-4:4)*Math.atan(Math.sqrt(Math.abs(Math.tan(h)*Math.tan(h-f/2)*Math.tan(h-Ru/4-u/2)*Math.tan(h-Ru/4-n/2))))}r=i,i=t,u=n,a=e,o=c}}var n,e,r,i,u,a,o;bo.point=function(c,l){bo.point=t,r=i=(n=c)*ju,u=(e=l)*ju,a=Math.cos(u),o=Math.sin(u)},bo.lineEnd=function(){t(n,e)}}function Ri(t){return Pi(function(){return t})()}function Pi(t){function n(t){return t=a(t[0]*ju,t[1]*ju),[t[0]*s+o,c-t[1]*s]}function e(t){return t=a.invert((t[0]-o)/s,(c-t[1])/s),t&&[t[0]*Ou,t[1]*Ou]}function r(){a=xi(u=Oi(p,m,v),i);var t=i(d,g);return o=f-t[0]*s,c=h+t[1]*s,n}var i,u,a,o,c,l=ni(function(t,n){return t=i(t,n),[t[0]*s+o,c-t[1]*s]}),s=150,f=480,h=250,d=0,g=0,p=0,m=0,v=0,y=so,M=null;return n.stream=function(t){return ji(u,y(l(t)))},n.clipAngle=function(t){return arguments.length?(y=null==t?(M=t,so):bi(M=+t),n):M},n.scale=function(t){return arguments.length?(s=+t,r()):s},n.translate=function(t){return arguments.length?(f=+t[0],h=+t[1],r()):[f,h]},n.center=function(t){return arguments.length?(d=t[0]%360*ju,g=t[1]%360*ju,r()):[d*Ou,g*Ou]},n.rotate=function(t){return arguments.length?(p=t[0]%360*ju,m=t[1]%360*ju,v=t.length>2?t[2]%360*ju:0,r()):[p*Ou,m*Ou,v*Ou]},d3.rebind(n,l,"precision"),function(){return i=t.apply(this,arguments),n.invert=i.invert&&e,r()}}function ji(t,n){return{point:function(e,r){r=t(e*ju,r*ju),e=r[0],n.point(e>Ru?e-2*Ru:-Ru>e?e+2*Ru:e,r[1])},sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Oi(t,n,e){return t?n||e?xi(Ui(t),Ii(n,e)):Ui(t):n||e?Ii(n,e):_i}function Yi(t){return function(n,e){return n+=t,[n>Ru?n-2*Ru:-Ru>n?n+2*Ru:n,e]}}function Ui(t){var n=Yi(t);return n.invert=Yi(-t),n}function Ii(t,n){function e(t,n){var e=Math.cos(n),o=Math.cos(t)*e,c=Math.sin(t)*e,l=Math.sin(n),s=l*r+o*i;return[Math.atan2(c*u-s*a,o*r-l*i),Math.asin(Math.max(-1,Math.min(1,s*u+c*a)))]}var r=Math.cos(t),i=Math.sin(t),u=Math.cos(n),a=Math.sin(n);return e.invert=function(t,n){var e=Math.cos(n),o=Math.cos(t)*e,c=Math.sin(t)*e,l=Math.sin(n),s=l*u-c*a;return[Math.atan2(c*u+l*a,o*r+s*i),Math.asin(Math.max(-1,Math.min(1,s*r-o*i)))]},e}function Vi(t,n){function e(n,e){var r=Math.cos(n),i=Math.cos(e),u=t(r*i);return[u*i*Math.sin(n),u*Math.sin(e)]}return e.invert=function(t,e){var r=Math.sqrt(t*t+e*e),i=n(r),u=Math.sin(i),a=Math.cos(i);return[Math.atan2(t*u,r*a),Math.asin(r&&e*u/r)]},e}function Xi(t,n,e,r){var i,u,a,o,c,l,s;return i=r[t],u=i[0],a=i[1],i=r[n],o=i[0],c=i[1],i=r[e],l=i[0],s=i[1],(s-a)*(o-u)-(c-a)*(l-u)>0}function Zi(t,n,e){return(e[0]-n[0])*(t[1]-n[1])<(e[1]-n[1])*(t[0]-n[0])}function Bi(t,n,e,r){var i=t[0],u=e[0],a=n[0]-i,o=r[0]-u,c=t[1],l=e[1],s=n[1]-c,f=r[1]-l,h=(o*(c-l)-f*(i-u))/(f*a-o*s);return[i+h*a,c+h*s]}function $i(t,n){var e={list:t.map(function(t,n){return{index:n,x:t[0],y:t[1]}}).sort(function(t,n){return t.y<n.y?-1:t.y>n.y?1:t.x<n.x?-1:t.x>n.x?1:0}),bottomSite:null},r={list:[],leftEnd:null,rightEnd:null,init:function(){r.leftEnd=r.createHalfEdge(null,"l"),r.rightEnd=r.createHalfEdge(null,"l"),r.leftEnd.r=r.rightEnd,r.rightEnd.l=r.leftEnd,r.list.unshift(r.leftEnd,r.rightEnd)},createHalfEdge:function(t,n){return{edge:t,side:n,vertex:null,l:null,r:null}},insert:function(t,n){n.l=t,n.r=t.r,t.r.l=n,t.r=n},leftBound:function(t){var n=r.leftEnd;do n=n.r;while(n!=r.rightEnd&&i.rightOf(n,t));return n=n.l},del:function(t){t.l.r=t.r,t.r.l=t.l,t.edge=null},right:function(t){return t.r},left:function(t){return t.l},leftRegion:function(t){return null==t.edge?e.bottomSite:t.edge.region[t.side]},rightRegion:function(t){return null==t.edge?e.bottomSite:t.edge.region[_o[t.side]]}},i={bisect:function(t,n){var e={region:{l:t,r:n},ep:{l:null,r:null}},r=n.x-t.x,i=n.y-t.y,u=r>0?r:-r,a=i>0?i:-i;return e.c=t.x*r+t.y*i+.5*(r*r+i*i),u>a?(e.a=1,e.b=i/r,e.c/=r):(e.b=1,e.a=r/i,e.c/=i),e},intersect:function(t,n){var e=t.edge,r=n.edge;if(!e||!r||e.region.r==r.region.r)return null;var i=e.a*r.b-e.b*r.a;if(1e-10>Math.abs(i))return null;var u,a,o=(e.c*r.b-r.c*e.b)/i,c=(r.c*e.a-e.c*r.a)/i,l=e.region.r,s=r.region.r;l.y<s.y||l.y==s.y&&l.x<s.x?(u=t,a=e):(u=n,a=r);var f=o>=a.region.r.x;return f&&"l"===u.side||!f&&"r"===u.side?null:{x:o,y:c}},rightOf:function(t,n){var e=t.edge,r=e.region.r,i=n.x>r.x;if(i&&"l"===t.side)return 1;if(!i&&"r"===t.side)return 0;if(1===e.a){var u=n.y-r.y,a=n.x-r.x,o=0,c=0;if(!i&&0>e.b||i&&e.b>=0?c=o=u>=e.b*a:(c=n.x+n.y*e.b>e.c,0>e.b&&(c=!c),c||(o=1)),!o){var l=r.x-e.region.l.x;c=e.b*(a*a-u*u)<l*u*(1+2*a/l+e.b*e.b),0>e.b&&(c=!c)}}else{var s=e.c-e.a*n.x,f=n.y-s,h=n.x-r.x,d=s-r.y;c=f*f>h*h+d*d}return"l"===t.side?c:!c},endPoint:function(t,e,r){t.ep[e]=r,t.ep[_o[e]]&&n(t)},distance:function(t,n){var e=t.x-n.x,r=t.y-n.y;return Math.sqrt(e*e+r*r)}},u={list:[],insert:function(t,n,e){t.vertex=n,t.ystar=n.y+e;for(var r=0,i=u.list,a=i.length;a>r;r++){var o=i[r];if(!(t.ystar>o.ystar||t.ystar==o.ystar&&n.x>o.vertex.x))break}i.splice(r,0,t)},del:function(t){for(var n=0,e=u.list,r=e.length;r>n&&e[n]!=t;++n);e.splice(n,1)},empty:function(){return 0===u.list.length},nextEvent:function(t){for(var n=0,e=u.list,r=e.length;r>n;++n)if(e[n]==t)return e[n+1];return null},min:function(){var t=u.list[0];return{x:t.vertex.x,y:t.ystar}},extractMin:function(){return u.list.shift()}};r.init(),e.bottomSite=e.list.shift();for(var a,o,c,l,s,f,h,d,g,p,m,v,y,M=e.list.shift();;)if(u.empty()||(a=u.min()),M&&(u.empty()||M.y<a.y||M.y==a.y&&M.x<a.x))o=r.leftBound(M),c=r.right(o),h=r.rightRegion(o),v=i.bisect(h,M),f=r.createHalfEdge(v,"l"),r.insert(o,f),p=i.intersect(o,f),p&&(u.del(o),u.insert(o,p,i.distance(p,M))),o=f,f=r.createHalfEdge(v,"r"),r.insert(o,f),p=i.intersect(f,c),p&&u.insert(f,p,i.distance(p,M)),M=e.list.shift();else{if(u.empty())break;o=u.extractMin(),l=r.left(o),c=r.right(o),s=r.right(c),h=r.leftRegion(o),d=r.rightRegion(c),m=o.vertex,i.endPoint(o.edge,o.side,m),i.endPoint(c.edge,c.side,m),r.del(o),u.del(c),r.del(c),y="l",h.y>d.y&&(g=h,h=d,d=g,y="r"),v=i.bisect(h,d),f=r.createHalfEdge(v,y),r.insert(l,f),i.endPoint(v,_o[y],m),p=i.intersect(l,f),p&&(u.del(l),u.insert(l,p,i.distance(p,h))),p=i.intersect(f,s),p&&u.insert(f,p,i.distance(p,h))}for(o=r.right(r.leftEnd);o!=r.rightEnd;o=r.right(o))n(o.edge)}function Ji(){return{leaf:!0,nodes:[],point:null}}function Gi(t,n,e,r,i,u){if(!t(n,e,r,i,u)){var a=.5*(e+i),o=.5*(r+u),c=n.nodes;c[0]&&Gi(t,c[0],e,r,a,o),c[1]&&Gi(t,c[1],a,r,i,o),c[2]&&Gi(t,c[2],e,o,a,u),c[3]&&Gi(t,c[3],a,o,i,u)}}function Ki(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Wi(t,n,e,r){for(var i,u,a=0,o=n.length,c=e.length;o>a;){if(r>=c)return-1;if(i=n.charCodeAt(a++),37===i){if(u=Yo[n.charAt(a++)],!u||0>(r=u(t,e,r)))return-1}else if(i!=e.charCodeAt(r++))return-1}return r}function Qi(t){return RegExp("^(?:"+t.map(d3.requote).join("|")+")","i")}function tu(t){for(var n=new u,e=-1,r=t.length;r>++e;)n.set(t[e].toLowerCase(),e);return n}function nu(t,n,e){t+="";var r=t.length;return e>r?Array(e-r+1).join(n)+t:t}function eu(t,n,e){Lo.lastIndex=0;var r=Lo.exec(n.substring(e));return r?e+=r[0].length:-1}function ru(t,n,e){Do.lastIndex=0;var r=Do.exec(n.substring(e));return r?e+=r[0].length:-1}function iu(t,n,e){Ro.lastIndex=0;var r=Ro.exec(n.substring(e));return r?(t.m=Po.get(r[0].toLowerCase()),e+=r[0].length):-1}function uu(t,n,e){Fo.lastIndex=0;var r=Fo.exec(n.substring(e));return r?(t.m=Ho.get(r[0].toLowerCase()),e+=r[0].length):-1}function au(t,n,e){return Wi(t,""+Oo.c,n,e)}function ou(t,n,e){return Wi(t,""+Oo.x,n,e)}function cu(t,n,e){return Wi(t,""+Oo.X,n,e)}function lu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+4));return r?(t.y=+r[0],e+=r[0].length):-1}function su(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.y=fu(+r[0]),e+=r[0].length):-1}function fu(t){return t+(t>68?1900:2e3)}function hu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.m=r[0]-1,e+=r[0].length):-1}function du(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.d=+r[0],e+=r[0].length):-1}function gu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.H=+r[0],e+=r[0].length):-1}function pu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.M=+r[0],e+=r[0].length):-1}function mu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.S=+r[0],e+=r[0].length):-1}function vu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+3));return r?(t.L=+r[0],e+=r[0].length):-1}function yu(t,n,e){var r=Io.get(n.substring(e,e+=2).toLowerCase());return null==r?-1:(t.p=r,e)}function Mu(t){var n=t.getTimezoneOffset(),e=n>0?"-":"+",r=~~(Math.abs(n)/60),i=Math.abs(n)%60;return e+nu(r,"0",2)+nu(i,"0",2)}function bu(t){return t.toISOString()}function xu(t,n,e){function r(n){var e=t(n),r=u(e,1);return r-n>n-e?e:r}function i(e){return n(e=t(new wo(e-1)),1),e}function u(t,e){return n(t=new wo(+t),e),t}function a(t,r,u){var a=i(t),o=[];if(u>1)for(;r>a;)e(a)%u||o.push(new Date(+a)),n(a,1);else for(;r>a;)o.push(new Date(+a)),n(a,1);return o}function o(t,n,e){try{wo=Ki;var r=new Ki;return r._=t,a(r,n,e)}finally{wo=Date}}t.floor=t,t.round=r,t.ceil=i,t.offset=u,t.range=a;var c=t.utc=_u(t);return c.floor=c,c.round=_u(r),c.ceil=_u(i),c.offset=_u(u),c.range=o,t}function _u(t){return function(n,e){try{wo=Ki;var r=new Ki;return r._=n,t(r,e)._}finally{wo=Date}}}function wu(t,n,e){function r(n){return t(n)}return r.invert=function(n){return ku(t.invert(n))},r.domain=function(n){return arguments.length?(t.domain(n),r):t.domain().map(ku)},r.nice=function(t){return r.domain(Yn(r.domain(),function(){return t}))},r.ticks=function(e,i){var u=Su(r.domain());if("function"!=typeof e){var a=u[1]-u[0],o=a/e,c=d3.bisect(Xo,o);if(c==Xo.length)return n.year(u,e);if(!c)return t.ticks(e).map(ku);Math.log(o/Xo[c-1])<Math.log(Xo[c]/o)&&--c,e=n[c],i=e[1],e=e[0].range}return e(u[0],new Date(+u[1]+1),i)},r.tickFormat=function(){return e},r.copy=function(){return wu(t.copy(),n,e)},d3.rebind(r,t,"range","rangeRound","interpolate","clamp")}function Su(t){var n=t[0],e=t[t.length-1];return e>n?[n,e]:[e,n]}function ku(t){return new Date(t)}function Eu(t){return function(n){for(var e=t.length-1,r=t[e];!r[1](n);)r=t[--e];return r[0](n)}}function Au(t){var n=new Date(t,0,1);return n.setFullYear(t),n}function Nu(t){var n=t.getFullYear(),e=Au(n),r=Au(n+1);return n+(t-e)/(r-e)}function Tu(t){var n=new Date(Date.UTC(t,0,1));return n.setUTCFullYear(t),n}function qu(t){var n=t.getUTCFullYear(),e=Tu(n),r=Tu(n+1);return n+(t-e)/(r-e)}var Cu=".",zu=",",Du=[3,3];Date.now||(Date.now=function(){return+new Date});try{document.createElement("div").style.setProperty("opacity",0,"")}catch(Lu){var Fu=CSSStyleDeclaration.prototype,Hu=Fu.setProperty;Fu.setProperty=function(t,n,e){Hu.call(this,t,n+"",e)}}d3={version:"3.0.4"};var Ru=Math.PI,Pu=1e-6,ju=Ru/180,Ou=180/Ru,Yu=i;try{Yu(document.documentElement.childNodes)[0].nodeType}catch(Uu){Yu=r}var Iu=[].__proto__?function(t,n){t.__proto__=n}:function(t,n){for(var e in n)t[e]=n[e]};d3.map=function(t){var n=new u;for(var e in t)n.set(e,t[e]);return n},e(u,{has:function(t){return Vu+t in this},get:function(t){return this[Vu+t]},set:function(t,n){return this[Vu+t]=n},remove:function(t){return t=Vu+t,t in this&&delete this[t]},keys:function(){var t=[];return this.forEach(function(n){t.push(n)}),t},values:function(){var t=[];return this.forEach(function(n,e){t.push(e)}),t},entries:function(){var t=[];return this.forEach(function(n,e){t.push({key:n,value:e})}),t},forEach:function(t){for(var n in this)n.charCodeAt(0)===Xu&&t.call(this,n.substring(1),this[n])}});var Vu="\0",Xu=Vu.charCodeAt(0);d3.functor=c,d3.rebind=function(t,n){for(var e,r=1,i=arguments.length;i>++r;)t[e=arguments[r]]=l(t,n,n[e]);return t},d3.ascending=function(t,n){return n>t?-1:t>n?1:t>=n?0:0/0},d3.descending=function(t,n){return t>n?-1:n>t?1:n>=t?0:0/0},d3.mean=function(t,n){var e,r=t.length,i=0,u=-1,a=0;if(1===arguments.length)for(;r>++u;)s(e=t[u])&&(i+=(e-i)/++a);else for(;r>++u;)s(e=n.call(t,t[u],u))&&(i+=(e-i)/++a);return a?i:void 0},d3.median=function(t,n){return arguments.length>1&&(t=t.map(n)),t=t.filter(s),t.length?d3.quantile(t.sort(d3.ascending),.5):void 0},d3.min=function(t,n){var e,r,i=-1,u=t.length;if(1===arguments.length){for(;u>++i&&(null==(e=t[i])||e!=e);)e=void 0;for(;u>++i;)null!=(r=t[i])&&e>r&&(e=r)}else{for(;u>++i&&(null==(e=n.call(t,t[i],i))||e!=e);)e=void 0;for(;u>++i;)null!=(r=n.call(t,t[i],i))&&e>r&&(e=r)}return e},d3.max=function(t,n){var e,r,i=-1,u=t.length;if(1===arguments.length){for(;u>++i&&(null==(e=t[i])||e!=e);)e=void 0;for(;u>++i;)null!=(r=t[i])&&r>e&&(e=r)}else{for(;u>++i&&(null==(e=n.call(t,t[i],i))||e!=e);)e=void 0;for(;u>++i;)null!=(r=n.call(t,t[i],i))&&r>e&&(e=r)}return e},d3.extent=function(t,n){var e,r,i,u=-1,a=t.length;if(1===arguments.length){for(;a>++u&&(null==(e=i=t[u])||e!=e);)e=i=void 0;for(;a>++u;)null!=(r=t[u])&&(e>r&&(e=r),r>i&&(i=r))}else{for(;a>++u&&(null==(e=i=n.call(t,t[u],u))||e!=e);)e=void 0;for(;a>++u;)null!=(r=n.call(t,t[u],u))&&(e>r&&(e=r),r>i&&(i=r))}return[e,i]},d3.random={normal:function(t,n){var e=arguments.length;return 2>e&&(n=1),1>e&&(t=0),function(){var e,r,i;do e=2*Math.random()-1,r=2*Math.random()-1,i=e*e+r*r;while(!i||i>1);return t+n*e*Math.sqrt(-2*Math.log(i)/i)}},logNormal:function(t,n){var e=arguments.length;2>e&&(n=1),1>e&&(t=0);var r=d3.random.normal();return function(){return Math.exp(t+n*r())}},irwinHall:function(t){return function(){for(var n=0,e=0;t>e;e++)n+=Math.random();return n/t}}},d3.sum=function(t,n){var e,r=0,i=t.length,u=-1;if(1===arguments.length)for(;i>++u;)isNaN(e=+t[u])||(r+=e);else for(;i>++u;)isNaN(e=+n.call(t,t[u],u))||(r+=e);return r},d3.quantile=function(t,n){var e=(t.length-1)*n+1,r=Math.floor(e),i=+t[r-1],u=e-r;return u?i+u*(t[r]-i):i},d3.shuffle=function(t){for(var n,e,r=t.length;r;)e=0|Math.random()*r--,n=t[r],t[r]=t[e],t[e]=n;return t},d3.transpose=function(t){return d3.zip.apply(d3,t)},d3.zip=function(){if(!(r=arguments.length))return[];for(var t=-1,n=d3.min(arguments,f),e=Array(n);n>++t;)for(var r,i=-1,u=e[t]=Array(r);r>++i;)u[i]=arguments[i][t];return e},d3.bisector=function(t){return{left:function(n,e,r,i){for(3>arguments.length&&(r=0),4>arguments.length&&(i=n.length);i>r;){var u=r+i>>>1;e>t.call(n,n[u],u)?r=u+1:i=u}return r},right:function(n,e,r,i){for(3>arguments.length&&(r=0),4>arguments.length&&(i=n.length);i>r;){var u=r+i>>>1;t.call(n,n[u],u)>e?i=u:r=u+1}return r}}};var Zu=d3.bisector(function(t){return t});d3.bisectLeft=Zu.left,d3.bisect=d3.bisectRight=Zu.right,d3.nest=function(){function t(n,o){if(o>=a.length)return r?r.call(i,n):e?n.sort(e):n;for(var c,l,s,f=-1,h=n.length,d=a[o++],g=new u,p={};h>++f;)(s=g.get(c=d(l=n[f])))?s.push(l):g.set(c,[l]);return g.forEach(function(n,e){p[n]=t(e,o)}),p}function n(t,e){if(e>=a.length)return t;var r,i=[],u=o[e++];for(r in t)i.push({key:r,values:n(t[r],e)});return u&&i.sort(function(t,n){return u(t.key,n.key)}),i}var e,r,i={},a=[],o=[];return i.map=function(n){return t(n,0)},i.entries=function(e){return n(t(e,0),0)},i.key=function(t){return a.push(t),i},i.sortKeys=function(t){return o[a.length-1]=t,i},i.sortValues=function(t){return e=t,i},i.rollup=function(t){return r=t,i},i},d3.keys=function(t){var n=[];for(var e in t)n.push(e);return n},d3.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},d3.entries=function(t){var n=[];for(var e in t)n.push({key:e,value:t[e]});return n},d3.permute=function(t,n){for(var e=[],r=-1,i=n.length;i>++r;)e[r]=t[n[r]];return e},d3.merge=function(t){return Array.prototype.concat.apply([],t)},d3.range=function(t,n,e){if(3>arguments.length&&(e=1,2>arguments.length&&(n=t,t=0)),1/0===(n-t)/e)throw Error("infinite range");var r,i=[],u=d(Math.abs(e)),a=-1;if(t*=u,n*=u,e*=u,0>e)for(;(r=t+e*++a)>n;)i.push(r/u);else for(;n>(r=t+e*++a);)i.push(r/u);return i},d3.requote=function(t){return t.replace(Bu,"\\$&")};var Bu=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;d3.round=function(t,n){return n?Math.round(t*(n=Math.pow(10,n)))/n:Math.round(t)},d3.xhr=function(t,n,e){function r(){var t=l.status;!t&&l.responseText||t>=200&&300>t||304===t?u.load.call(i,c.call(i,l)):u.error.call(i,l)}var i={},u=d3.dispatch("progress","load","error"),o={},c=a,l=new(window.XDomainRequest&&/^(http(s)?:)?\/\//.test(t)?XDomainRequest:XMLHttpRequest);return"onload"in l?l.onload=l.onerror=r:l.onreadystatechange=function(){l.readyState>3&&r()},l.onprogress=function(t){var n=d3.event;d3.event=t;try{u.progress.call(i,l)}finally{d3.event=n}},i.header=function(t,n){return t=(t+"").toLowerCase(),2>arguments.length?o[t]:(null==n?delete o[t]:o[t]=n+"",i)},i.mimeType=function(t){return arguments.length?(n=null==t?null:t+"",i):n},i.response=function(t){return c=t,i},["get","post"].forEach(function(t){i[t]=function(){return i.send.apply(i,[t].concat(Yu(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),l.open(e,t,!0),null==n||"accept"in o||(o.accept=n+",*/*"),l.setRequestHeader)for(var a in o)l.setRequestHeader(a,o[a]);return null!=n&&l.overrideMimeType&&l.overrideMimeType(n),null!=u&&i.on("error",u).on("load",function(t){u(null,t)}),l.send(null==r?null:r),i},i.abort=function(){return l.abort(),i},d3.rebind(i,u,"on"),2===arguments.length&&"function"==typeof n&&(e=n,n=null),null==e?i:i.get(g(e))},d3.text=function(){return d3.xhr.apply(d3,arguments).response(p)},d3.json=function(t,n){return d3.xhr(t,"application/json",n).response(m)},d3.html=function(t,n){return d3.xhr(t,"text/html",n).response(v)},d3.xml=function(){return d3.xhr.apply(d3,arguments).response(y)};var $u={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};d3.ns={prefix:$u,qualify:function(t){var n=t.indexOf(":"),e=t;return n>=0&&(e=t.substring(0,n),t=t.substring(n+1)),$u.hasOwnProperty(e)?{space:$u[e],local:t}:t}},d3.dispatch=function(){for(var t=new M,n=-1,e=arguments.length;e>++n;)t[arguments[n]]=b(t);return t},M.prototype.on=function(t,n){var e=t.indexOf("."),r="";return e>0&&(r=t.substring(e+1),t=t.substring(0,e)),2>arguments.length?this[t].on(r):this[t].on(r,n)},d3.format=function(t){var n=Ju.exec(t),e=n[1]||" ",r=n[2]||">",i=n[3]||"",u=n[4]||"",a=n[5],o=+n[6],c=n[7],l=n[8],s=n[9],f=1,h="",d=!1;switch(l&&(l=+l.substring(1)),(a||"0"===e&&"="===r)&&(a=e="0",r="=",c&&(o-=Math.floor((o-1)/4))),s){case"n":c=!0,s="g";break;case"%":f=100,h="%",s="f";break;case"p":f=100,h="%",s="r";break;case"b":case"o":case"x":case"X":u&&(u="0"+s.toLowerCase());case"c":case"d":d=!0,l=0;break;case"s":f=-1,s="r"}"#"===u&&(u=""),"r"!=s||l||(s="g"),s=Gu.get(s)||_;var g=a&&c;return function(t){if(d&&t%1)return"";var n=0>t||0===t&&0>1/t?(t=-t,"-"):i;if(0>f){var p=d3.formatPrefix(t,l);t=p.scale(t),h=p.symbol}else t*=f;t=s(t,l),!a&&c&&(t=Ku(t));var m=u.length+t.length+(g?0:n.length),v=o>m?Array(m=o-m+1).join(e):"";return g&&(t=Ku(v+t)),Cu&&t.replace(".",Cu),n+=u,("<"===r?n+t+v:">"===r?v+n+t:"^"===r?v.substring(0,m>>=1)+n+t+v.substring(m):n+(g?t:v+t))+h}};var Ju=/(?:([^{])?([<>=^]))?([+\- ])?(#)?(0)?([0-9]+)?(,)?(\.[0-9]+)?([a-zA-Z%])?/,Gu=d3.map({b:function(t){return t.toString(2)},c:function(t){return String.fromCharCode(t)},o:function(t){return t.toString(8)},x:function(t){return t.toString(16)},X:function(t){return t.toString(16).toUpperCase()},g:function(t,n){return t.toPrecision(n)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},r:function(t,n){return d3.round(t,n=x(t,n)).toFixed(Math.max(0,Math.min(20,n)))}}),Ku=a;if(Du){var Wu=Du.length;Ku=function(t){for(var n=t.lastIndexOf("."),e=n>=0?"."+t.substring(n+1):(n=t.length,""),r=[],i=0,u=Du[0];n>0&&u>0;)r.push(t.substring(n-=u,n+u)),u=Du[i=(i+1)%Wu];return r.reverse().join(zu||"")+e}}var Qu=["y","z","a","f","p","n","μ","m","","k","M","G","T","P","E","Z","Y"].map(w);d3.formatPrefix=function(t,n){var e=0;return t&&(0>t&&(t*=-1),n&&(t=d3.round(t,x(t,n))),e=1+Math.floor(1e-12+Math.log(t)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Qu[8+e/3]};var ta=function(){return a},na=d3.map({linear:ta,poly:q,quad:function(){return A},cubic:function(){return N},sin:function(){return C},exp:function(){return z},circle:function(){return D},elastic:L,back:F,bounce:function(){return H}}),ea=d3.map({"in":a,out:k,"in-out":E,"out-in":function(t){return E(k(t))}});d3.ease=function(t){var n=t.indexOf("-"),e=n>=0?t.substring(0,n):t,r=n>=0?t.substring(n+1):"in";return e=na.get(e)||ta,r=ea.get(r)||a,S(r(e.apply(null,Array.prototype.slice.call(arguments,1))))},d3.event=null,d3.transform=function(t){var n=document.createElementNS(d3.ns.prefix.svg,"g");return(d3.transform=function(t){n.setAttribute("transform",t);var e=n.transform.baseVal.consolidate();return new O(e?e.matrix:ra)})(t)},O.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var ra={a:1,b:0,c:0,d:1,e:0,f:0};d3.interpolate=function(t,n){for(var e,r=d3.interpolators.length;--r>=0&&!(e=d3.interpolators[r](t,n)););return e},d3.interpolateNumber=function(t,n){return n-=t,function(e){return t+n*e}},d3.interpolateRound=function(t,n){return n-=t,function(e){return Math.round(t+n*e)}},d3.interpolateString=function(t,n){var e,r,i,u,a,o=0,c=0,l=[],s=[];for(ia.lastIndex=0,r=0;e=ia.exec(n);++r)e.index&&l.push(n.substring(o,c=e.index)),s.push({i:l.length,x:e[0]}),l.push(null),o=ia.lastIndex;for(n.length>o&&l.push(n.substring(o)),r=0,u=s.length;(e=ia.exec(t))&&u>r;++r)if(a=s[r],a.x==e[0]){if(a.i)if(null==l[a.i+1])for(l[a.i-1]+=a.x,l.splice(a.i,1),i=r+1;u>i;++i)s[i].i--;else for(l[a.i-1]+=a.x+l[a.i+1],l.splice(a.i,2),i=r+1;u>i;++i)s[i].i-=2;else if(null==l[a.i+1])l[a.i]=a.x;else for(l[a.i]=a.x+l[a.i+1],l.splice(a.i+1,1),i=r+1;u>i;++i)s[i].i--;s.splice(r,1),u--,r--}else a.x=d3.interpolateNumber(parseFloat(e[0]),parseFloat(a.x));for(;u>r;)a=s.pop(),null==l[a.i+1]?l[a.i]=a.x:(l[a.i]=a.x+l[a.i+1],l.splice(a.i+1,1)),u--;return 1===l.length?null==l[0]?s[0].x:function(){return n}:function(t){for(r=0;u>r;++r)l[(a=s[r]).i]=a.x(t);return l.join("")}},d3.interpolateTransform=function(t,n){var e,r=[],i=[],u=d3.transform(t),a=d3.transform(n),o=u.translate,c=a.translate,l=u.rotate,s=a.rotate,f=u.skew,h=a.skew,d=u.scale,g=a.scale;return o[0]!=c[0]||o[1]!=c[1]?(r.push("translate(",null,",",null,")"),i.push({i:1,x:d3.interpolateNumber(o[0],c[0])},{i:3,x:d3.interpolateNumber(o[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),l!=s?(l-s>180?s+=360:s-l>180&&(l+=360),i.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:d3.interpolateNumber(l,s)})):s&&r.push(r.pop()+"rotate("+s+")"),f!=h?i.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:d3.interpolateNumber(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),d[0]!=g[0]||d[1]!=g[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),i.push({i:e-4,x:d3.interpolateNumber(d[0],g[0])},{i:e-2,x:d3.interpolateNumber(d[1],g[1])})):(1!=g[0]||1!=g[1])&&r.push(r.pop()+"scale("+g+")"),e=i.length,function(t){for(var n,u=-1;e>++u;)r[(n=i[u]).i]=n.x(t);return r.join("")}},d3.interpolateRgb=function(t,n){t=d3.rgb(t),n=d3.rgb(n);var e=t.r,r=t.g,i=t.b,u=n.r-e,a=n.g-r,o=n.b-i;return function(t){return"#"+G(Math.round(e+u*t))+G(Math.round(r+a*t))+G(Math.round(i+o*t))}},d3.interpolateHsl=function(t,n){t=d3.hsl(t),n=d3.hsl(n);var e=t.h,r=t.s,i=t.l,u=n.h-e,a=n.s-r,o=n.l-i;return u>180?u-=360:-180>u&&(u+=360),function(t){return un(e+u*t,r+a*t,i+o*t)+""}},d3.interpolateLab=function(t,n){t=d3.lab(t),n=d3.lab(n);var e=t.l,r=t.a,i=t.b,u=n.l-e,a=n.a-r,o=n.b-i;return function(t){return fn(e+u*t,r+a*t,i+o*t)+""}},d3.interpolateHcl=function(t,n){t=d3.hcl(t),n=d3.hcl(n);var e=t.h,r=t.c,i=t.l,u=n.h-e,a=n.c-r,o=n.l-i;return u>180?u-=360:-180>u&&(u+=360),function(t){return cn(e+u*t,r+a*t,i+o*t)+""}},d3.interpolateArray=function(t,n){var e,r=[],i=[],u=t.length,a=n.length,o=Math.min(t.length,n.length);for(e=0;o>e;++e)r.push(d3.interpolate(t[e],n[e]));for(;u>e;++e)i[e]=t[e];for(;a>e;++e)i[e]=n[e];return function(t){for(e=0;o>e;++e)i[e]=r[e](t);return i}},d3.interpolateObject=function(t,n){var e,r={},i={};for(e in t)e in n?r[e]=V(e)(t[e],n[e]):i[e]=t[e];for(e in n)e in t||(i[e]=n[e]);return function(t){for(e in r)i[e]=r[e](t);return i}};var ia=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;d3.interpolators=[d3.interpolateObject,function(t,n){return n instanceof Array&&d3.interpolateArray(t,n)},function(t,n){return("string"==typeof t||"string"==typeof n)&&d3.interpolateString(t+"",n+"")},function(t,n){return("string"==typeof n?aa.has(n)||/^(#|rgb\(|hsl\()/.test(n):n instanceof B)&&d3.interpolateRgb(t,n)},function(t,n){return!isNaN(t=+t)&&!isNaN(n=+n)&&d3.interpolateNumber(t,n)}],B.prototype.toString=function(){return this.rgb()+""},d3.rgb=function(t,n,e){return 1===arguments.length?t instanceof J?$(t.r,t.g,t.b):K(""+t,$,un):$(~~t,~~n,~~e)};var ua=J.prototype=new B;ua.brighter=function(t){t=Math.pow(.7,arguments.length?t:1);var n=this.r,e=this.g,r=this.b,i=30;return n||e||r?(n&&i>n&&(n=i),e&&i>e&&(e=i),r&&i>r&&(r=i),$(Math.min(255,Math.floor(n/t)),Math.min(255,Math.floor(e/t)),Math.min(255,Math.floor(r/t)))):$(i,i,i)},ua.darker=function(t){return t=Math.pow(.7,arguments.length?t:1),$(Math.floor(t*this.r),Math.floor(t*this.g),Math.floor(t*this.b))
+},ua.hsl=function(){return W(this.r,this.g,this.b)},ua.toString=function(){return"#"+G(this.r)+G(this.g)+G(this.b)};var aa=d3.map({aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"});aa.forEach(function(t,n){aa.set(t,K(n,$,un))}),d3.hsl=function(t,n,e){return 1===arguments.length?t instanceof rn?en(t.h,t.s,t.l):K(""+t,W,en):en(+t,+n,+e)};var oa=rn.prototype=new B;oa.brighter=function(t){return t=Math.pow(.7,arguments.length?t:1),en(this.h,this.s,this.l/t)},oa.darker=function(t){return t=Math.pow(.7,arguments.length?t:1),en(this.h,this.s,t*this.l)},oa.rgb=function(){return un(this.h,this.s,this.l)},d3.hcl=function(t,n,e){return 1===arguments.length?t instanceof on?an(t.h,t.c,t.l):t instanceof sn?hn(t.l,t.a,t.b):hn((t=Q((t=d3.rgb(t)).r,t.g,t.b)).l,t.a,t.b):an(+t,+n,+e)};var ca=on.prototype=new B;ca.brighter=function(t){return an(this.h,this.c,Math.min(100,this.l+la*(arguments.length?t:1)))},ca.darker=function(t){return an(this.h,this.c,Math.max(0,this.l-la*(arguments.length?t:1)))},ca.rgb=function(){return cn(this.h,this.c,this.l).rgb()},d3.lab=function(t,n,e){return 1===arguments.length?t instanceof sn?ln(t.l,t.a,t.b):t instanceof on?cn(t.l,t.c,t.h):Q((t=d3.rgb(t)).r,t.g,t.b):ln(+t,+n,+e)};var la=18,sa=.95047,fa=1,ha=1.08883,da=sn.prototype=new B;da.brighter=function(t){return ln(Math.min(100,this.l+la*(arguments.length?t:1)),this.a,this.b)},da.darker=function(t){return ln(Math.max(0,this.l-la*(arguments.length?t:1)),this.a,this.b)},da.rgb=function(){return fn(this.l,this.a,this.b)};var ga=function(t,n){return n.querySelector(t)},pa=function(t,n){return n.querySelectorAll(t)},ma=document.documentElement,va=ma.matchesSelector||ma.webkitMatchesSelector||ma.mozMatchesSelector||ma.msMatchesSelector||ma.oMatchesSelector,ya=function(t,n){return va.call(t,n)};"function"==typeof Sizzle&&(ga=function(t,n){return Sizzle(t,n)[0]||null},pa=function(t,n){return Sizzle.uniqueSort(Sizzle(t,n))},ya=Sizzle.matchesSelector);var Ma=[];d3.selection=function(){return ba},d3.selection.prototype=Ma,Ma.select=function(t){var n,e,r,i,u=[];"function"!=typeof t&&(t=vn(t));for(var a=-1,o=this.length;o>++a;){u.push(n=[]),n.parentNode=(r=this[a]).parentNode;for(var c=-1,l=r.length;l>++c;)(i=r[c])?(n.push(e=t.call(i,i.__data__,c)),e&&"__data__"in i&&(e.__data__=i.__data__)):n.push(null)}return mn(u)},Ma.selectAll=function(t){var n,e,r=[];"function"!=typeof t&&(t=yn(t));for(var i=-1,u=this.length;u>++i;)for(var a=this[i],o=-1,c=a.length;c>++o;)(e=a[o])&&(r.push(n=Yu(t.call(e,e.__data__,o))),n.parentNode=e);return mn(r)},Ma.attr=function(t,n){if(2>arguments.length){if("string"==typeof t){var e=this.node();return t=d3.ns.qualify(t),t.local?e.getAttributeNS(t.space,t.local):e.getAttribute(t)}for(n in t)this.each(Mn(n,t[n]));return this}return this.each(Mn(t,n))},Ma.classed=function(t,n){if(2>arguments.length){if("string"==typeof t){var e=this.node(),r=(t=t.trim().split(/^|\s+/g)).length,i=-1;if(n=e.classList){for(;r>++i;)if(!n.contains(t[i]))return!1}else for(n=e.className,null!=n.baseVal&&(n=n.baseVal);r>++i;)if(!bn(t[i]).test(n))return!1;return!0}for(n in t)this.each(xn(n,t[n]));return this}return this.each(xn(t,n))},Ma.style=function(t,n,e){var r=arguments.length;if(3>r){if("string"!=typeof t){2>r&&(n="");for(e in t)this.each(wn(e,t[e],n));return this}if(2>r)return getComputedStyle(this.node(),null).getPropertyValue(t);e=""}return this.each(wn(t,n,e))},Ma.property=function(t,n){if(2>arguments.length){if("string"==typeof t)return this.node()[t];for(n in t)this.each(Sn(n,t[n]));return this}return this.each(Sn(t,n))},Ma.text=function(t){return arguments.length?this.each("function"==typeof t?function(){var n=t.apply(this,arguments);this.textContent=null==n?"":n}:null==t?function(){this.textContent=""}:function(){this.textContent=t}):this.node().textContent},Ma.html=function(t){return arguments.length?this.each("function"==typeof t?function(){var n=t.apply(this,arguments);this.innerHTML=null==n?"":n}:null==t?function(){this.innerHTML=""}:function(){this.innerHTML=t}):this.node().innerHTML},Ma.append=function(t){function n(){return this.appendChild(document.createElementNS(this.namespaceURI,t))}function e(){return this.appendChild(document.createElementNS(t.space,t.local))}return t=d3.ns.qualify(t),this.select(t.local?e:n)},Ma.insert=function(t,n){function e(){return this.insertBefore(document.createElementNS(this.namespaceURI,t),ga(n,this))}function r(){return this.insertBefore(document.createElementNS(t.space,t.local),ga(n,this))}return t=d3.ns.qualify(t),this.select(t.local?r:e)},Ma.remove=function(){return this.each(function(){var t=this.parentNode;t&&t.removeChild(this)})},Ma.data=function(t,n){function e(t,e){var r,i,a,o=t.length,f=e.length,h=Math.min(o,f),d=Array(f),g=Array(f),p=Array(o);if(n){var m,v=new u,y=new u,M=[];for(r=-1;o>++r;)m=n.call(i=t[r],i.__data__,r),v.has(m)?p[r]=i:v.set(m,i),M.push(m);for(r=-1;f>++r;)m=n.call(e,a=e[r],r),(i=v.get(m))?(d[r]=i,i.__data__=a):y.has(m)||(g[r]=kn(a)),y.set(m,a),v.remove(m);for(r=-1;o>++r;)v.has(M[r])&&(p[r]=t[r])}else{for(r=-1;h>++r;)i=t[r],a=e[r],i?(i.__data__=a,d[r]=i):g[r]=kn(a);for(;f>r;++r)g[r]=kn(e[r]);for(;o>r;++r)p[r]=t[r]}g.update=d,g.parentNode=d.parentNode=p.parentNode=t.parentNode,c.push(g),l.push(d),s.push(p)}var r,i,a=-1,o=this.length;if(!arguments.length){for(t=Array(o=(r=this[0]).length);o>++a;)(i=r[a])&&(t[a]=i.__data__);return t}var c=qn([]),l=mn([]),s=mn([]);if("function"==typeof t)for(;o>++a;)e(r=this[a],t.call(r,r.parentNode.__data__,a));else for(;o>++a;)e(r=this[a],t);return l.enter=function(){return c},l.exit=function(){return s},l},Ma.datum=function(t){return arguments.length?this.property("__data__",t):this.property("__data__")},Ma.filter=function(t){var n,e,r,i=[];"function"!=typeof t&&(t=En(t));for(var u=0,a=this.length;a>u;u++){i.push(n=[]),n.parentNode=(e=this[u]).parentNode;for(var o=0,c=e.length;c>o;o++)(r=e[o])&&t.call(r,r.__data__,o)&&n.push(r)}return mn(i)},Ma.order=function(){for(var t=-1,n=this.length;n>++t;)for(var e,r=this[t],i=r.length-1,u=r[i];--i>=0;)(e=r[i])&&(u&&u!==e.nextSibling&&u.parentNode.insertBefore(e,u),u=e);return this},Ma.sort=function(t){t=An.apply(this,arguments);for(var n=-1,e=this.length;e>++n;)this[n].sort(t);return this.order()},Ma.on=function(t,n,e){var r=arguments.length;if(3>r){if("string"!=typeof t){2>r&&(n=!1);for(e in t)this.each(Nn(e,t[e],n));return this}if(2>r)return(r=this.node()["__on"+t])&&r._;e=!1}return this.each(Nn(t,n,e))},Ma.each=function(t){return Tn(this,function(n,e,r){t.call(n,n.__data__,e,r)})},Ma.call=function(t){var n=Yu(arguments);return t.apply(n[0]=this,n),this},Ma.empty=function(){return!this.node()},Ma.node=function(){for(var t=0,n=this.length;n>t;t++)for(var e=this[t],r=0,i=e.length;i>r;r++){var u=e[r];if(u)return u}return null},Ma.transition=function(){var t,n,e=_a||++Sa,r=[],i=Object.create(ka);i.time=Date.now();for(var u=-1,a=this.length;a>++u;){r.push(t=[]);for(var o=this[u],c=-1,l=o.length;l>++c;)(n=o[c])&&zn(n,c,e,i),t.push(n)}return Cn(r,e)};var ba=mn([[document]]);ba[0].parentNode=ma,d3.select=function(t){return"string"==typeof t?ba.select(t):mn([[t]])},d3.selectAll=function(t){return"string"==typeof t?ba.selectAll(t):mn([Yu(t)])};var xa=[];d3.selection.enter=qn,d3.selection.enter.prototype=xa,xa.append=Ma.append,xa.insert=Ma.insert,xa.empty=Ma.empty,xa.node=Ma.node,xa.select=function(t){for(var n,e,r,i,u,a=[],o=-1,c=this.length;c>++o;){r=(i=this[o]).update,a.push(n=[]),n.parentNode=i.parentNode;for(var l=-1,s=i.length;s>++l;)(u=i[l])?(n.push(r[l]=e=t.call(i.parentNode,u.__data__,l)),e.__data__=u.__data__):n.push(null)}return mn(a)};var _a,wa=[],Sa=0,ka={ease:T,delay:0,duration:250};wa.call=Ma.call,wa.empty=Ma.empty,wa.node=Ma.node,d3.transition=function(t){return arguments.length?_a?t.transition():t:ba.transition()},d3.transition.prototype=wa,wa.select=function(t){var n,e,r,i=this.id,u=[];"function"!=typeof t&&(t=vn(t));for(var a=-1,o=this.length;o>++a;){u.push(n=[]);for(var c=this[a],l=-1,s=c.length;s>++l;)(r=c[l])&&(e=t.call(r,r.__data__,l))?("__data__"in r&&(e.__data__=r.__data__),zn(e,l,i,r.__transition__[i]),n.push(e)):n.push(null)}return Cn(u,i)},wa.selectAll=function(t){var n,e,r,i,u,a=this.id,o=[];"function"!=typeof t&&(t=yn(t));for(var c=-1,l=this.length;l>++c;)for(var s=this[c],f=-1,h=s.length;h>++f;)if(r=s[f]){u=r.__transition__[a],e=t.call(r,r.__data__,f),o.push(n=[]);for(var d=-1,g=e.length;g>++d;)zn(i=e[d],d,a,u),n.push(i)}return Cn(o,a)},wa.filter=function(t){var n,e,r,i=[];"function"!=typeof t&&(t=En(t));for(var u=0,a=this.length;a>u;u++){i.push(n=[]);for(var e=this[u],o=0,c=e.length;c>o;o++)(r=e[o])&&t.call(r,r.__data__,o)&&n.push(r)}return Cn(i,this.id,this.time).ease(this.ease())},wa.attr=function(t,n){function e(){this.removeAttribute(u)}function r(){this.removeAttributeNS(u.space,u.local)}if(2>arguments.length){for(n in t)this.attr(n,t[n]);return this}var i=V(t),u=d3.ns.qualify(t);return Ln(this,"attr."+t,n,function(t){function n(){var n,e=this.getAttribute(u);return e!==t&&(n=i(e,t),function(t){this.setAttribute(u,n(t))})}function a(){var n,e=this.getAttributeNS(u.space,u.local);return e!==t&&(n=i(e,t),function(t){this.setAttributeNS(u.space,u.local,n(t))})}return null==t?u.local?r:e:(t+="",u.local?a:n)})},wa.attrTween=function(t,n){function e(t,e){var r=n.call(this,t,e,this.getAttribute(i));return r&&function(t){this.setAttribute(i,r(t))}}function r(t,e){var r=n.call(this,t,e,this.getAttributeNS(i.space,i.local));return r&&function(t){this.setAttributeNS(i.space,i.local,r(t))}}var i=d3.ns.qualify(t);return this.tween("attr."+t,i.local?r:e)},wa.style=function(t,n,e){function r(){this.style.removeProperty(t)}var i=arguments.length;if(3>i){if("string"!=typeof t){2>i&&(n="");for(e in t)this.style(e,t[e],n);return this}e=""}var u=V(t);return Ln(this,"style."+t,n,function(n){function i(){var r,i=getComputedStyle(this,null).getPropertyValue(t);return i!==n&&(r=u(i,n),function(n){this.style.setProperty(t,r(n),e)})}return null==n?r:(n+="",i)})},wa.styleTween=function(t,n,e){return 3>arguments.length&&(e=""),this.tween("style."+t,function(r,i){var u=n.call(this,r,i,getComputedStyle(this,null).getPropertyValue(t));return u&&function(n){this.style.setProperty(t,u(n),e)}})},wa.text=function(t){return Ln(this,"text",t,Dn)},wa.remove=function(){return this.each("end.transition",function(){var t;!this.__transition__&&(t=this.parentNode)&&t.removeChild(this)})},wa.ease=function(t){var n=this.id;return 1>arguments.length?this.node().__transition__[n].ease:("function"!=typeof t&&(t=d3.ease.apply(d3,arguments)),Tn(this,function(e){e.__transition__[n].ease=t}))},wa.delay=function(t){var n=this.id;return Tn(this,"function"==typeof t?function(e,r,i){e.__transition__[n].delay=0|t.call(e,e.__data__,r,i)}:(t|=0,function(e){e.__transition__[n].delay=t}))},wa.duration=function(t){var n=this.id;return Tn(this,"function"==typeof t?function(e,r,i){e.__transition__[n].duration=Math.max(1,0|t.call(e,e.__data__,r,i))}:(t=Math.max(1,0|t),function(e){e.__transition__[n].duration=t}))},wa.each=function(t,n){var e=this.id;if(2>arguments.length){var r=ka,i=_a;_a=e,Tn(this,function(n,r,i){ka=n.__transition__[e],t.call(n,n.__data__,r,i)}),ka=r,_a=i}else Tn(this,function(r){r.__transition__[e].event.on(t,n)});return this},wa.transition=function(){for(var t,n,e,r,i=this.id,u=++Sa,a=[],o=0,c=this.length;c>o;o++){a.push(t=[]);for(var n=this[o],l=0,s=n.length;s>l;l++)(e=n[l])&&(r=Object.create(e.__transition__[i]),r.delay+=r.duration,zn(e,l,u,r)),t.push(e)}return Cn(a,u)},wa.tween=function(t,n){var e=this.id;return 2>arguments.length?this.node().__transition__[e].tween.get(t):Tn(this,null==n?function(n){n.__transition__[e].tween.remove(t)}:function(r){r.__transition__[e].tween.set(t,n)})};var Ea,Aa,Na=0,Ta={},qa=null;d3.timer=function(t,n,e){if(3>arguments.length){if(2>arguments.length)n=0;else if(!isFinite(n))return;e=Date.now()}var r=Ta[t.id];r&&r.callback===t?(r.then=e,r.delay=n):Ta[t.id=++Na]=qa={callback:t,then:e,delay:n,next:qa},Ea||(Aa=clearTimeout(Aa),Ea=1,Ca(Fn))},d3.timer.flush=function(){for(var t,n=Date.now(),e=qa;e;)t=n-e.then,e.delay||(e.flush=e.callback(t)),e=e.next;Hn()};var Ca=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){setTimeout(t,17)};d3.mouse=function(t){return Rn(t,P())};var za=/WebKit/.test(navigator.userAgent)?-1:0;d3.touches=function(t,n){return 2>arguments.length&&(n=P().touches),n?Yu(n).map(function(n){var e=Rn(t,n);return e.identifier=n.identifier,e}):[]},d3.scale={},d3.scale.linear=function(){return In([0,1],[0,1],d3.interpolate,!1)},d3.scale.log=function(){return Kn(d3.scale.linear(),Wn)};var Da=d3.format(".0e");Wn.pow=function(t){return Math.pow(10,t)},Qn.pow=function(t){return-Math.pow(10,-t)},d3.scale.pow=function(){return te(d3.scale.linear(),1)},d3.scale.sqrt=function(){return d3.scale.pow().exponent(.5)},d3.scale.ordinal=function(){return ee([],{t:"range",a:[[]]})},d3.scale.category10=function(){return d3.scale.ordinal().range(La)},d3.scale.category20=function(){return d3.scale.ordinal().range(Fa)},d3.scale.category20b=function(){return d3.scale.ordinal().range(Ha)},d3.scale.category20c=function(){return d3.scale.ordinal().range(Ra)};var La=["#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf"],Fa=["#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5"],Ha=["#393b79","#5254a3","#6b6ecf","#9c9ede","#637939","#8ca252","#b5cf6b","#cedb9c","#8c6d31","#bd9e39","#e7ba52","#e7cb94","#843c39","#ad494a","#d6616b","#e7969c","#7b4173","#a55194","#ce6dbd","#de9ed6"],Ra=["#3182bd","#6baed6","#9ecae1","#c6dbef","#e6550d","#fd8d3c","#fdae6b","#fdd0a2","#31a354","#74c476","#a1d99b","#c7e9c0","#756bb1","#9e9ac8","#bcbddc","#dadaeb","#636363","#969696","#bdbdbd","#d9d9d9"];d3.scale.quantile=function(){return re([],[])},d3.scale.quantize=function(){return ie(0,1,[0,1])},d3.scale.threshold=function(){return ue([.5],[0,1])},d3.scale.identity=function(){return ae([0,1])},d3.svg={},d3.svg.arc=function(){function t(){var t=n.apply(this,arguments),u=e.apply(this,arguments),a=r.apply(this,arguments)+Pa,o=i.apply(this,arguments)+Pa,c=(a>o&&(c=a,a=o,o=c),o-a),l=Ru>c?"0":"1",s=Math.cos(a),f=Math.sin(a),h=Math.cos(o),d=Math.sin(o);return c>=ja?t?"M0,"+u+"A"+u+","+u+" 0 1,1 0,"+-u+"A"+u+","+u+" 0 1,1 0,"+u+"M0,"+t+"A"+t+","+t+" 0 1,0 0,"+-t+"A"+t+","+t+" 0 1,0 0,"+t+"Z":"M0,"+u+"A"+u+","+u+" 0 1,1 0,"+-u+"A"+u+","+u+" 0 1,1 0,"+u+"Z":t?"M"+u*s+","+u*f+"A"+u+","+u+" 0 "+l+",1 "+u*h+","+u*d+"L"+t*h+","+t*d+"A"+t+","+t+" 0 "+l+",0 "+t*s+","+t*f+"Z":"M"+u*s+","+u*f+"A"+u+","+u+" 0 "+l+",1 "+u*h+","+u*d+"L0,0"+"Z"}var n=oe,e=ce,r=le,i=se;return t.innerRadius=function(e){return arguments.length?(n=c(e),t):n},t.outerRadius=function(n){return arguments.length?(e=c(n),t):e},t.startAngle=function(n){return arguments.length?(r=c(n),t):r},t.endAngle=function(n){return arguments.length?(i=c(n),t):i},t.centroid=function(){var t=(n.apply(this,arguments)+e.apply(this,arguments))/2,u=(r.apply(this,arguments)+i.apply(this,arguments))/2+Pa;return[Math.cos(u)*t,Math.sin(u)*t]},t};var Pa=-Ru/2,ja=2*Ru-1e-6;d3.svg.line=function(){return fe(a)};var Oa=d3.map({linear:ge,"linear-closed":pe,"step-before":me,"step-after":ve,basis:we,"basis-open":Se,"basis-closed":ke,bundle:Ee,cardinal:be,"cardinal-open":ye,"cardinal-closed":Me,monotone:ze});Oa.forEach(function(t,n){n.key=t,n.closed=/-closed$/.test(t)});var Ya=[0,2/3,1/3,0],Ua=[0,1/3,2/3,0],Ia=[0,1/6,2/3,1/6];d3.svg.line.radial=function(){var t=fe(De);return t.radius=t.x,delete t.x,t.angle=t.y,delete t.y,t},me.reverse=ve,ve.reverse=me,d3.svg.area=function(){return Le(a)},d3.svg.area.radial=function(){var t=Le(De);return t.radius=t.x,delete t.x,t.innerRadius=t.x0,delete t.x0,t.outerRadius=t.x1,delete t.x1,t.angle=t.y,delete t.y,t.startAngle=t.y0,delete t.y0,t.endAngle=t.y1,delete t.y1,t},d3.svg.chord=function(){function e(t,n){var e=r(this,o,t,n),c=r(this,l,t,n);return"M"+e.p0+u(e.r,e.p1,e.a1-e.a0)+(i(e,c)?a(e.r,e.p1,e.r,e.p0):a(e.r,e.p1,c.r,c.p0)+u(c.r,c.p1,c.a1-c.a0)+a(c.r,c.p1,e.r,e.p0))+"Z"}function r(t,n,e,r){var i=n.call(t,e,r),u=s.call(t,i,r),a=f.call(t,i,r)+Pa,o=h.call(t,i,r)+Pa;return{r:u,a0:a,a1:o,p0:[u*Math.cos(a),u*Math.sin(a)],p1:[u*Math.cos(o),u*Math.sin(o)]}}function i(t,n){return t.a0==n.a0&&t.a1==n.a1}function u(t,n,e){return"A"+t+","+t+" 0 "+ +(e>Ru)+",1 "+n}function a(t,n,e,r){return"Q 0,0 "+r}var o=n,l=t,s=Fe,f=le,h=se;return e.radius=function(t){return arguments.length?(s=c(t),e):s},e.source=function(t){return arguments.length?(o=c(t),e):o},e.target=function(t){return arguments.length?(l=c(t),e):l},e.startAngle=function(t){return arguments.length?(f=c(t),e):f},e.endAngle=function(t){return arguments.length?(h=c(t),e):h},e},d3.svg.diagonal=function(){function e(t,n){var e=r.call(this,t,n),a=i.call(this,t,n),o=(e.y+a.y)/2,c=[e,{x:e.x,y:o},{x:a.x,y:o},a];return c=c.map(u),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var r=n,i=t,u=He;return e.source=function(t){return arguments.length?(r=c(t),e):r},e.target=function(t){return arguments.length?(i=c(t),e):i},e.projection=function(t){return arguments.length?(u=t,e):u},e},d3.svg.diagonal.radial=function(){var t=d3.svg.diagonal(),n=He,e=t.projection;return t.projection=function(t){return arguments.length?e(Re(n=t)):n},t},d3.svg.symbol=function(){function t(t,r){return(Va.get(n.call(this,t,r))||Oe)(e.call(this,t,r))}var n=je,e=Pe;return t.type=function(e){return arguments.length?(n=c(e),t):n},t.size=function(n){return arguments.length?(e=c(n),t):e},t};var Va=d3.map({circle:Oe,cross:function(t){var n=Math.sqrt(t/5)/2;return"M"+-3*n+","+-n+"H"+-n+"V"+-3*n+"H"+n+"V"+-n+"H"+3*n+"V"+n+"H"+n+"V"+3*n+"H"+-n+"V"+n+"H"+-3*n+"Z"},diamond:function(t){var n=Math.sqrt(t/(2*Za)),e=n*Za;return"M0,"+-n+"L"+e+",0"+" 0,"+n+" "+-e+",0"+"Z"},square:function(t){var n=Math.sqrt(t)/2;return"M"+-n+","+-n+"L"+n+","+-n+" "+n+","+n+" "+-n+","+n+"Z"},"triangle-down":function(t){var n=Math.sqrt(t/Xa),e=n*Xa/2;return"M0,"+e+"L"+n+","+-e+" "+-n+","+-e+"Z"},"triangle-up":function(t){var n=Math.sqrt(t/Xa),e=n*Xa/2;return"M0,"+-e+"L"+n+","+e+" "+-n+","+e+"Z"}});d3.svg.symbolTypes=Va.keys();var Xa=Math.sqrt(3),Za=Math.tan(30*ju);d3.svg.axis=function(){function t(t){t.each(function(){var t,f=d3.select(this),h=null==l?e.ticks?e.ticks.apply(e,c):e.domain():l,d=null==n?e.tickFormat?e.tickFormat.apply(e,c):String:n,g=Ie(e,h,s),p=f.selectAll(".minor").data(g,String),m=p.enter().insert("line","g").attr("class","tick minor").style("opacity",1e-6),v=d3.transition(p.exit()).style("opacity",1e-6).remove(),y=d3.transition(p).style("opacity",1),M=f.selectAll("g").data(h,String),b=M.enter().insert("g","path").style("opacity",1e-6),x=d3.transition(M.exit()).style("opacity",1e-6).remove(),_=d3.transition(M).style("opacity",1),w=On(e),S=f.selectAll(".domain").data([0]),k=d3.transition(S),E=e.copy(),A=this.__chart__||E;this.__chart__=E,S.enter().append("path").attr("class","domain"),b.append("line").attr("class","tick"),b.append("text");var N=b.select("line"),T=_.select("line"),q=M.select("text").text(d),C=b.select("text"),z=_.select("text");switch(r){case"bottom":t=Ye,m.attr("y2",u),y.attr("x2",0).attr("y2",u),N.attr("y2",i),C.attr("y",Math.max(i,0)+o),T.attr("x2",0).attr("y2",i),z.attr("x",0).attr("y",Math.max(i,0)+o),q.attr("dy",".71em").style("text-anchor","middle"),k.attr("d","M"+w[0]+","+a+"V0H"+w[1]+"V"+a);break;case"top":t=Ye,m.attr("y2",-u),y.attr("x2",0).attr("y2",-u),N.attr("y2",-i),C.attr("y",-(Math.max(i,0)+o)),T.attr("x2",0).attr("y2",-i),z.attr("x",0).attr("y",-(Math.max(i,0)+o)),q.attr("dy","0em").style("text-anchor","middle"),k.attr("d","M"+w[0]+","+-a+"V0H"+w[1]+"V"+-a);break;case"left":t=Ue,m.attr("x2",-u),y.attr("x2",-u).attr("y2",0),N.attr("x2",-i),C.attr("x",-(Math.max(i,0)+o)),T.attr("x2",-i).attr("y2",0),z.attr("x",-(Math.max(i,0)+o)).attr("y",0),q.attr("dy",".32em").style("text-anchor","end"),k.attr("d","M"+-a+","+w[0]+"H0V"+w[1]+"H"+-a);break;case"right":t=Ue,m.attr("x2",u),y.attr("x2",u).attr("y2",0),N.attr("x2",i),C.attr("x",Math.max(i,0)+o),T.attr("x2",i).attr("y2",0),z.attr("x",Math.max(i,0)+o).attr("y",0),q.attr("dy",".32em").style("text-anchor","start"),k.attr("d","M"+a+","+w[0]+"H0V"+w[1]+"H"+a)}if(e.ticks)b.call(t,A),_.call(t,E),x.call(t,E),m.call(t,A),y.call(t,E),v.call(t,E);else{var D=E.rangeBand()/2,L=function(t){return E(t)+D};b.call(t,L),_.call(t,L)}})}var n,e=d3.scale.linear(),r="bottom",i=6,u=6,a=6,o=3,c=[10],l=null,s=0;return t.scale=function(n){return arguments.length?(e=n,t):e},t.orient=function(n){return arguments.length?(r=n,t):r},t.ticks=function(){return arguments.length?(c=arguments,t):c},t.tickValues=function(n){return arguments.length?(l=n,t):l},t.tickFormat=function(e){return arguments.length?(n=e,t):n},t.tickSize=function(n,e){if(!arguments.length)return i;var r=arguments.length-1;return i=+n,u=r>1?+e:i,a=r>0?+arguments[r]:i,t},t.tickPadding=function(n){return arguments.length?(o=+n,t):o},t.tickSubdivide=function(n){return arguments.length?(s=+n,t):s},t},d3.svg.brush=function(){function t(u){u.each(function(){var u,a=d3.select(this),s=a.selectAll(".background").data([0]),f=a.selectAll(".extent").data([0]),h=a.selectAll(".resize").data(l,String);a.style("pointer-events","all").on("mousedown.brush",i).on("touchstart.brush",i),s.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),f.enter().append("rect").attr("class","extent").style("cursor","move"),h.enter().append("g").attr("class",function(t){return"resize "+t}).style("cursor",function(t){return Ba[t]}).append("rect").attr("x",function(t){return/[ew]$/.test(t)?-3:null}).attr("y",function(t){return/^[ns]/.test(t)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),h.style("display",t.empty()?"none":null),h.exit().remove(),o&&(u=On(o),s.attr("x",u[0]).attr("width",u[1]-u[0]),e(a)),c&&(u=On(c),s.attr("y",u[0]).attr("height",u[1]-u[0]),r(a)),n(a)})}function n(t){t.selectAll(".resize").attr("transform",function(t){return"translate("+s[+/e$/.test(t)][0]+","+s[+/^s/.test(t)][1]+")"})}function e(t){t.select(".extent").attr("x",s[0][0]),t.selectAll(".extent,.n>rect,.s>rect").attr("width",s[1][0]-s[0][0])}function r(t){t.select(".extent").attr("y",s[0][1]),t.selectAll(".extent,.e>rect,.w>rect").attr("height",s[1][1]-s[0][1])}function i(){function i(){var t=d3.event.changedTouches;return t?d3.touches(v,t)[0]:d3.mouse(v)}function l(){32==d3.event.keyCode&&(S||(p=null,k[0]-=s[1][0],k[1]-=s[1][1],S=2),R())}function f(){32==d3.event.keyCode&&2==S&&(k[0]+=s[1][0],k[1]+=s[1][1],S=0,R())}function h(){var t=i(),u=!1;m&&(t[0]+=m[0],t[1]+=m[1]),S||(d3.event.altKey?(p||(p=[(s[0][0]+s[1][0])/2,(s[0][1]+s[1][1])/2]),k[0]=s[+(t[0]<p[0])][0],k[1]=s[+(t[1]<p[1])][1]):p=null),_&&d(t,o,0)&&(e(b),u=!0),w&&d(t,c,1)&&(r(b),u=!0),u&&(n(b),M({type:"brush",mode:S?"move":"resize"}))}function d(t,n,e){var r,i,a=On(n),o=a[0],c=a[1],l=k[e],f=s[1][e]-s[0][e];return S&&(o-=l,c-=f+l),r=Math.max(o,Math.min(c,t[e])),S?i=(r+=l)+f:(p&&(l=Math.max(o,Math.min(c,2*p[e]-r))),r>l?(i=r,r=l):i=l),s[0][e]!==r||s[1][e]!==i?(u=null,s[0][e]=r,s[1][e]=i,!0):void 0}function g(){h(),b.style("pointer-events","all").selectAll(".resize").style("display",t.empty()?"none":null),d3.select("body").style("cursor",null),E.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),M({type:"brushend"}),R()}var p,m,v=this,y=d3.select(d3.event.target),M=a.of(v,arguments),b=d3.select(v),x=y.datum(),_=!/^(n|s)$/.test(x)&&o,w=!/^(e|w)$/.test(x)&&c,S=y.classed("extent"),k=i(),E=d3.select(window).on("mousemove.brush",h).on("mouseup.brush",g).on("touchmove.brush",h).on("touchend.brush",g).on("keydown.brush",l).on("keyup.brush",f);if(S)k[0]=s[0][0]-k[0],k[1]=s[0][1]-k[1];else if(x){var A=+/w$/.test(x),N=+/^n/.test(x);m=[s[1-A][0]-k[0],s[1-N][1]-k[1]],k[0]=s[A][0],k[1]=s[N][1]}else d3.event.altKey&&(p=k.slice());b.style("pointer-events","none").selectAll(".resize").style("display",null),d3.select("body").style("cursor",y.style("cursor")),M({type:"brushstart"}),h(),R()}var u,a=j(t,"brushstart","brush","brushend"),o=null,c=null,l=$a[0],s=[[0,0],[0,0]];return t.x=function(n){return arguments.length?(o=n,l=$a[!o<<1|!c],t):o},t.y=function(n){return arguments.length?(c=n,l=$a[!o<<1|!c],t):c},t.extent=function(n){var e,r,i,a,l;return arguments.length?(u=[[0,0],[0,0]],o&&(e=n[0],r=n[1],c&&(e=e[0],r=r[0]),u[0][0]=e,u[1][0]=r,o.invert&&(e=o(e),r=o(r)),e>r&&(l=e,e=r,r=l),s[0][0]=0|e,s[1][0]=0|r),c&&(i=n[0],a=n[1],o&&(i=i[1],a=a[1]),u[0][1]=i,u[1][1]=a,c.invert&&(i=c(i),a=c(a)),i>a&&(l=i,i=a,a=l),s[0][1]=0|i,s[1][1]=0|a),t):(n=u||s,o&&(e=n[0][0],r=n[1][0],u||(e=s[0][0],r=s[1][0],o.invert&&(e=o.invert(e),r=o.invert(r)),e>r&&(l=e,e=r,r=l))),c&&(i=n[0][1],a=n[1][1],u||(i=s[0][1],a=s[1][1],c.invert&&(i=c.invert(i),a=c.invert(a)),i>a&&(l=i,i=a,a=l))),o&&c?[[e,i],[r,a]]:o?[e,r]:c&&[i,a])},t.clear=function(){return u=null,s[0][0]=s[0][1]=s[1][0]=s[1][1]=0,t},t.empty=function(){return o&&s[0][0]===s[1][0]||c&&s[0][1]===s[1][1]},d3.rebind(t,a,"on")};var Ba={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},$a=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]];d3.behavior={},d3.behavior.drag=function(){function t(){this.on("mousedown.drag",n).on("touchstart.drag",n)}function n(){function t(){var t=o.parentNode;return null!=s?d3.touches(t).filter(function(t){return t.identifier===s})[0]:d3.mouse(t)}function n(){if(!o.parentNode)return i();var n=t(),e=n[0]-f[0],r=n[1]-f[1];h|=e|r,f=n,R(),c({type:"drag",x:n[0]+a[0],y:n[1]+a[1],dx:e,dy:r})}function i(){c({type:"dragend"}),h&&(R(),d3.event.target===l&&d.on("click.drag",u,!0)),d.on(null!=s?"touchmove.drag-"+s:"mousemove.drag",null).on(null!=s?"touchend.drag-"+s:"mouseup.drag",null)}function u(){R(),d.on("click.drag",null)}var a,o=this,c=e.of(o,arguments),l=d3.event.target,s=d3.event.touches?d3.event.changedTouches[0].identifier:null,f=t(),h=0,d=d3.select(window).on(null!=s?"touchmove.drag-"+s:"mousemove.drag",n).on(null!=s?"touchend.drag-"+s:"mouseup.drag",i,!0);r?(a=r.apply(o,arguments),a=[a.x-f[0],a.y-f[1]]):a=[0,0],null==s&&R(),c({type:"dragstart"})}var e=j(t,"drag","dragstart","dragend"),r=null;return t.origin=function(n){return arguments.length?(r=n,t):r},d3.rebind(t,e,"on")},d3.behavior.zoom=function(){function t(){this.on("mousedown.zoom",o).on("mousewheel.zoom",c).on("mousemove.zoom",l).on("DOMMouseScroll.zoom",c).on("dblclick.zoom",s).on("touchstart.zoom",f).on("touchmove.zoom",h).on("touchend.zoom",f)}function n(t){return[(t[0]-b[0])/x,(t[1]-b[1])/x]}function e(t){return[t[0]*x+b[0],t[1]*x+b[1]]}function r(t){x=Math.max(_[0],Math.min(_[1],t))}function i(t,n){n=e(n),b[0]+=t[0]-n[0],b[1]+=t[1]-n[1]}function u(){m&&m.domain(p.range().map(function(t){return(t-b[0])/x}).map(p.invert)),y&&y.domain(v.range().map(function(t){return(t-b[1])/x}).map(v.invert))}function a(t){u(),d3.event.preventDefault(),t({type:"zoom",scale:x,translate:b})}function o(){function t(){l=1,i(d3.mouse(u),f),a(o)}function e(){l&&R(),s.on("mousemove.zoom",null).on("mouseup.zoom",null),l&&d3.event.target===c&&s.on("click.zoom",r,!0)}function r(){R(),s.on("click.zoom",null)}var u=this,o=w.of(u,arguments),c=d3.event.target,l=0,s=d3.select(window).on("mousemove.zoom",t).on("mouseup.zoom",e),f=n(d3.mouse(u));window.focus(),R()}function c(){d||(d=n(d3.mouse(this))),r(Math.pow(2,.002*Ve())*x),i(d3.mouse(this),d),a(w.of(this,arguments))}function l(){d=null}function s(){var t=d3.mouse(this),e=n(t),u=Math.log(x)/Math.LN2;r(Math.pow(2,d3.event.shiftKey?Math.ceil(u)-1:Math.floor(u)+1)),i(t,e),a(w.of(this,arguments))}function f(){var t=d3.touches(this),e=Date.now();if(g=x,d={},t.forEach(function(t){d[t.identifier]=n(t)}),R(),1===t.length){if(500>e-M){var u=t[0],o=n(t[0]);r(2*x),i(u,o),a(w.of(this,arguments))}M=e}}function h(){var t=d3.touches(this),n=t[0],e=d[n.identifier];if(u=t[1]){var u,o=d[u.identifier];n=[(n[0]+u[0])/2,(n[1]+u[1])/2],e=[(e[0]+o[0])/2,(e[1]+o[1])/2],r(d3.event.scale*g)}i(n,e),M=null,a(w.of(this,arguments))}var d,g,p,m,v,y,M,b=[0,0],x=1,_=Ga,w=j(t,"zoom");return t.translate=function(n){return arguments.length?(b=n.map(Number),u(),t):b},t.scale=function(n){return arguments.length?(x=+n,u(),t):x},t.scaleExtent=function(n){return arguments.length?(_=null==n?Ga:n.map(Number),t):_},t.x=function(n){return arguments.length?(m=n,p=n.copy(),b=[0,0],x=1,t):m},t.y=function(n){return arguments.length?(y=n,v=n.copy(),b=[0,0],x=1,t):y},d3.rebind(t,w,"on")};var Ja,Ga=[0,1/0];d3.layout={},d3.layout.bundle=function(){return function(t){for(var n=[],e=-1,r=t.length;r>++e;)n.push(Xe(t[e]));return n}},d3.layout.chord=function(){function t(){var t,l,f,h,d,g={},p=[],m=d3.range(u),v=[];for(e=[],r=[],t=0,h=-1;u>++h;){for(l=0,d=-1;u>++d;)l+=i[h][d];p.push(l),v.push(d3.range(u)),t+=l}for(a&&m.sort(function(t,n){return a(p[t],p[n])}),o&&v.forEach(function(t,n){t.sort(function(t,e){return o(i[n][t],i[n][e])
+})}),t=(2*Ru-s*u)/t,l=0,h=-1;u>++h;){for(f=l,d=-1;u>++d;){var y=m[h],M=v[y][d],b=i[y][M],x=l,_=l+=b*t;g[y+"-"+M]={index:y,subindex:M,startAngle:x,endAngle:_,value:b}}r[y]={index:y,startAngle:f,endAngle:l,value:(l-f)/t},l+=s}for(h=-1;u>++h;)for(d=h-1;u>++d;){var w=g[h+"-"+d],S=g[d+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&n()}function n(){e.sort(function(t,n){return c((t.source.value+t.target.value)/2,(n.source.value+n.target.value)/2)})}var e,r,i,u,a,o,c,l={},s=0;return l.matrix=function(t){return arguments.length?(u=(i=t)&&i.length,e=r=null,l):i},l.padding=function(t){return arguments.length?(s=t,e=r=null,l):s},l.sortGroups=function(t){return arguments.length?(a=t,e=r=null,l):a},l.sortSubgroups=function(t){return arguments.length?(o=t,e=null,l):o},l.sortChords=function(t){return arguments.length?(c=t,e&&n(),l):c},l.chords=function(){return e||t(),e},l.groups=function(){return r||t(),r},l},d3.layout.force=function(){function t(t){return function(n,e,r,i){if(n.point!==t){var u=n.cx-t.x,a=n.cy-t.y,o=1/Math.sqrt(u*u+a*a);if(v>(i-e)*o){var c=n.charge*o*o;return t.px-=u*c,t.py-=a*c,!0}if(n.point&&isFinite(o)){var c=n.pointCharge*o*o;t.px-=u*c,t.py-=a*c}}return!n.charge}}function n(t){t.px=d3.event.x,t.py=d3.event.y,l.resume()}var e,r,i,u,o,l={},s=d3.dispatch("start","tick","end"),f=[1,1],h=.9,d=Qe,g=tr,p=-30,m=.1,v=.8,y=[],M=[];return l.tick=function(){if(.005>(r*=.99))return s.end({type:"end",alpha:r=0}),!0;var n,e,a,c,l,d,g,v,b,x=y.length,_=M.length;for(e=0;_>e;++e)a=M[e],c=a.source,l=a.target,v=l.x-c.x,b=l.y-c.y,(d=v*v+b*b)&&(d=r*u[e]*((d=Math.sqrt(d))-i[e])/d,v*=d,b*=d,l.x-=v*(g=c.weight/(l.weight+c.weight)),l.y-=b*g,c.x+=v*(g=1-g),c.y+=b*g);if((g=r*m)&&(v=f[0]/2,b=f[1]/2,e=-1,g))for(;x>++e;)a=y[e],a.x+=(v-a.x)*g,a.y+=(b-a.y)*g;if(p)for(We(n=d3.geom.quadtree(y),r,o),e=-1;x>++e;)(a=y[e]).fixed||n.visit(t(a));for(e=-1;x>++e;)a=y[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*h,a.y-=(a.py-(a.py=a.y))*h);s.tick({type:"tick",alpha:r})},l.nodes=function(t){return arguments.length?(y=t,l):y},l.links=function(t){return arguments.length?(M=t,l):M},l.size=function(t){return arguments.length?(f=t,l):f},l.linkDistance=function(t){return arguments.length?(d=c(t),l):d},l.distance=l.linkDistance,l.linkStrength=function(t){return arguments.length?(g=c(t),l):g},l.friction=function(t){return arguments.length?(h=t,l):h},l.charge=function(t){return arguments.length?(p="function"==typeof t?t:+t,l):p},l.gravity=function(t){return arguments.length?(m=t,l):m},l.theta=function(t){return arguments.length?(v=t,l):v},l.alpha=function(t){return arguments.length?(r?r=t>0?t:0:t>0&&(s.start({type:"start",alpha:r=t}),d3.timer(l.tick)),l):r},l.start=function(){function t(t,r){for(var i,u=n(e),a=-1,o=u.length;o>++a;)if(!isNaN(i=u[a][t]))return i;return Math.random()*r}function n(){if(!a){for(a=[],r=0;s>r;++r)a[r]=[];for(r=0;h>r;++r){var t=M[r];a[t.source.index].push(t.target),a[t.target.index].push(t.source)}}return a[e]}var e,r,a,c,s=y.length,h=M.length,m=f[0],v=f[1];for(e=0;s>e;++e)(c=y[e]).index=e,c.weight=0;for(i=[],u=[],e=0;h>e;++e)c=M[e],"number"==typeof c.source&&(c.source=y[c.source]),"number"==typeof c.target&&(c.target=y[c.target]),i[e]=d.call(this,c,e),u[e]=g.call(this,c,e),++c.source.weight,++c.target.weight;for(e=0;s>e;++e)c=y[e],isNaN(c.x)&&(c.x=t("x",m)),isNaN(c.y)&&(c.y=t("y",v)),isNaN(c.px)&&(c.px=c.x),isNaN(c.py)&&(c.py=c.y);if(o=[],"function"==typeof p)for(e=0;s>e;++e)o[e]=+p.call(this,y[e],e);else for(e=0;s>e;++e)o[e]=p;return l.resume()},l.resume=function(){return l.alpha(.1)},l.stop=function(){return l.alpha(0)},l.drag=function(){e||(e=d3.behavior.drag().origin(a).on("dragstart",$e).on("drag",n).on("dragend",Je)),this.on("mouseover.force",Ge).on("mouseout.force",Ke).call(e)},d3.rebind(l,s,"on")},d3.layout.partition=function(){function t(n,e,r,i){var u=n.children;if(n.x=e,n.y=n.depth*i,n.dx=r,n.dy=i,u&&(a=u.length)){var a,o,c,l=-1;for(r=n.value?r/n.value:0;a>++l;)t(o=u[l],e,c=o.value*r,i),e+=c}}function n(t){var e=t.children,r=0;if(e&&(i=e.length))for(var i,u=-1;i>++u;)r=Math.max(r,n(e[u]));return 1+r}function e(e,u){var a=r.call(this,e,u);return t(a[0],0,i[0],i[1]/n(a[0])),a}var r=d3.layout.hierarchy(),i=[1,1];return e.size=function(t){return arguments.length?(i=t,e):i},hr(e,r)},d3.layout.pie=function(){function t(u){var a=u.map(function(e,r){return+n.call(t,e,r)}),o=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof i?i.apply(this,arguments):i)-r)/d3.sum(a),l=d3.range(u.length);null!=e&&l.sort(e===Ka?function(t,n){return a[n]-a[t]}:function(t,n){return e(u[t],u[n])});var s=[];return l.forEach(function(t){var n;s[t]={data:u[t],value:n=a[t],startAngle:o,endAngle:o+=n*c}}),s}var n=Number,e=Ka,r=0,i=2*Ru;return t.value=function(e){return arguments.length?(n=e,t):n},t.sort=function(n){return arguments.length?(e=n,t):e},t.startAngle=function(n){return arguments.length?(r=n,t):r},t.endAngle=function(n){return arguments.length?(i=n,t):i},t};var Ka={};d3.layout.stack=function(){function t(a,c){var l=a.map(function(e,r){return n.call(t,e,r)}),s=l.map(function(n){return n.map(function(n,e){return[u.call(t,n,e),o.call(t,n,e)]})}),f=e.call(t,s,c);l=d3.permute(l,f),s=d3.permute(s,f);var h,d,g,p=r.call(t,s,c),m=l.length,v=l[0].length;for(d=0;v>d;++d)for(i.call(t,l[0][d],g=p[d],s[0][d][1]),h=1;m>h;++h)i.call(t,l[h][d],g+=s[h-1][d][1],s[h][d][1]);return a}var n=a,e=ir,r=ur,i=rr,u=nr,o=er;return t.values=function(e){return arguments.length?(n=e,t):n},t.order=function(n){return arguments.length?(e="function"==typeof n?n:Wa.get(n)||ir,t):e},t.offset=function(n){return arguments.length?(r="function"==typeof n?n:Qa.get(n)||ur,t):r},t.x=function(n){return arguments.length?(u=n,t):u},t.y=function(n){return arguments.length?(o=n,t):o},t.out=function(n){return arguments.length?(i=n,t):i},t};var Wa=d3.map({"inside-out":function(t){var n,e,r=t.length,i=t.map(ar),u=t.map(or),a=d3.range(r).sort(function(t,n){return i[t]-i[n]}),o=0,c=0,l=[],s=[];for(n=0;r>n;++n)e=a[n],c>o?(o+=u[e],l.push(e)):(c+=u[e],s.push(e));return s.reverse().concat(l)},reverse:function(t){return d3.range(t.length).reverse()},"default":ir}),Qa=d3.map({silhouette:function(t){var n,e,r,i=t.length,u=t[0].length,a=[],o=0,c=[];for(e=0;u>e;++e){for(n=0,r=0;i>n;n++)r+=t[n][e][1];r>o&&(o=r),a.push(r)}for(e=0;u>e;++e)c[e]=(o-a[e])/2;return c},wiggle:function(t){var n,e,r,i,u,a,o,c,l,s=t.length,f=t[0],h=f.length,d=[];for(d[0]=c=l=0,e=1;h>e;++e){for(n=0,i=0;s>n;++n)i+=t[n][e][1];for(n=0,u=0,o=f[e][0]-f[e-1][0];s>n;++n){for(r=0,a=(t[n][e][1]-t[n][e-1][1])/(2*o);n>r;++r)a+=(t[r][e][1]-t[r][e-1][1])/o;u+=a*t[n][e][1]}d[e]=c-=i?u/i*o:0,l>c&&(l=c)}for(e=0;h>e;++e)d[e]-=l;return d},expand:function(t){var n,e,r,i=t.length,u=t[0].length,a=1/i,o=[];for(e=0;u>e;++e){for(n=0,r=0;i>n;n++)r+=t[n][e][1];if(r)for(n=0;i>n;n++)t[n][e][1]/=r;else for(n=0;i>n;n++)t[n][e][1]=a}for(e=0;u>e;++e)o[e]=0;return o},zero:ur});d3.layout.histogram=function(){function t(t,u){for(var a,o,c=[],l=t.map(e,this),s=r.call(this,l,u),f=i.call(this,s,l,u),u=-1,h=l.length,d=f.length-1,g=n?1:1/h;d>++u;)a=c[u]=[],a.dx=f[u+1]-(a.x=f[u]),a.y=0;if(d>0)for(u=-1;h>++u;)o=l[u],o>=s[0]&&s[1]>=o&&(a=c[d3.bisect(f,o,1,d)-1],a.y+=g,a.push(t[u]));return c}var n=!0,e=Number,r=fr,i=lr;return t.value=function(n){return arguments.length?(e=n,t):e},t.range=function(n){return arguments.length?(r=c(n),t):r},t.bins=function(n){return arguments.length?(i="number"==typeof n?function(t){return sr(t,n)}:c(n),t):i},t.frequency=function(e){return arguments.length?(n=!!e,t):n},t},d3.layout.hierarchy=function(){function t(n,a,o){var c=i.call(e,n,a);if(n.depth=a,o.push(n),c&&(l=c.length)){for(var l,s,f=-1,h=n.children=[],d=0,g=a+1;l>++f;)s=t(c[f],g,o),s.parent=n,h.push(s),d+=s.value;r&&h.sort(r),u&&(n.value=d)}else u&&(n.value=+u.call(e,n,a)||0);return n}function n(t,r){var i=t.children,a=0;if(i&&(o=i.length))for(var o,c=-1,l=r+1;o>++c;)a+=n(i[c],l);else u&&(a=+u.call(e,t,r)||0);return u&&(t.value=a),a}function e(n){var e=[];return t(n,0,e),e}var r=pr,i=dr,u=gr;return e.sort=function(t){return arguments.length?(r=t,e):r},e.children=function(t){return arguments.length?(i=t,e):i},e.value=function(t){return arguments.length?(u=t,e):u},e.revalue=function(t){return n(t,0),t},e},d3.layout.pack=function(){function t(t,i){var u=n.call(this,t,i),a=u[0];a.x=0,a.y=0,Rr(a,function(t){t.r=Math.sqrt(t.value)}),Rr(a,xr);var o=r[0],c=r[1],l=Math.max(2*a.r/o,2*a.r/c);if(e>0){var s=e*l/2;Rr(a,function(t){t.r+=s}),Rr(a,xr),Rr(a,function(t){t.r-=s}),l=Math.max(2*a.r/o,2*a.r/c)}return Sr(a,o/2,c/2,1/l),u}var n=d3.layout.hierarchy().sort(vr),e=0,r=[1,1];return t.size=function(n){return arguments.length?(r=n,t):r},t.padding=function(n){return arguments.length?(e=+n,t):e},hr(t,n)},d3.layout.cluster=function(){function t(t,i){var u,a=n.call(this,t,i),o=a[0],c=0;Rr(o,function(t){var n=t.children;n&&n.length?(t.x=Ar(n),t.y=Er(n)):(t.x=u?c+=e(t,u):0,t.y=0,u=t)});var l=Nr(o),s=Tr(o),f=l.x-e(l,s)/2,h=s.x+e(s,l)/2;return Rr(o,function(t){t.x=(t.x-f)/(h-f)*r[0],t.y=(1-(o.y?t.y/o.y:1))*r[1]}),a}var n=d3.layout.hierarchy().sort(null).value(null),e=qr,r=[1,1];return t.separation=function(n){return arguments.length?(e=n,t):e},t.size=function(n){return arguments.length?(r=n,t):r},hr(t,n)},d3.layout.tree=function(){function t(t,i){function u(t,n){var r=t.children,i=t._tree;if(r&&(a=r.length)){for(var a,c,l,s=r[0],f=s,h=-1;a>++h;)l=r[h],u(l,c),f=o(l,c,f),c=l;Pr(t);var d=.5*(s._tree.prelim+l._tree.prelim);n?(i.prelim=n._tree.prelim+e(t,n),i.mod=i.prelim-d):i.prelim=d}else n&&(i.prelim=n._tree.prelim+e(t,n))}function a(t,n){t.x=t._tree.prelim+n;var e=t.children;if(e&&(r=e.length)){var r,i=-1;for(n+=t._tree.mod;r>++i;)a(e[i],n)}}function o(t,n,r){if(n){for(var i,u=t,a=t,o=n,c=t.parent.children[0],l=u._tree.mod,s=a._tree.mod,f=o._tree.mod,h=c._tree.mod;o=zr(o),u=Cr(u),o&&u;)c=Cr(c),a=zr(a),a._tree.ancestor=t,i=o._tree.prelim+f-u._tree.prelim-l+e(o,u),i>0&&(jr(Or(o,t,r),t,i),l+=i,s+=i),f+=o._tree.mod,l+=u._tree.mod,h+=c._tree.mod,s+=a._tree.mod;o&&!zr(a)&&(a._tree.thread=o,a._tree.mod+=f-s),u&&!Cr(c)&&(c._tree.thread=u,c._tree.mod+=l-h,r=t)}return r}var c=n.call(this,t,i),l=c[0];Rr(l,function(t,n){t._tree={ancestor:t,prelim:0,mod:0,change:0,shift:0,number:n?n._tree.number+1:0}}),u(l),a(l,-l._tree.prelim);var s=Dr(l,Fr),f=Dr(l,Lr),h=Dr(l,Hr),d=s.x-e(s,f)/2,g=f.x+e(f,s)/2,p=h.depth||1;return Rr(l,function(t){t.x=(t.x-d)/(g-d)*r[0],t.y=t.depth/p*r[1],delete t._tree}),c}var n=d3.layout.hierarchy().sort(null).value(null),e=qr,r=[1,1];return t.separation=function(n){return arguments.length?(e=n,t):e},t.size=function(n){return arguments.length?(r=n,t):r},hr(t,n)},d3.layout.treemap=function(){function t(t,n){for(var e,r,i=-1,u=t.length;u>++i;)r=(e=t[i]).value*(0>n?0:n),e.area=isNaN(r)||0>=r?0:r}function n(e){var u=e.children;if(u&&u.length){var a,o,c,l=f(e),s=[],h=u.slice(),g=1/0,p="slice"===d?l.dx:"dice"===d?l.dy:"slice-dice"===d?1&e.depth?l.dy:l.dx:Math.min(l.dx,l.dy);for(t(h,l.dx*l.dy/e.value),s.area=0;(c=h.length)>0;)s.push(a=h[c-1]),s.area+=a.area,"squarify"!==d||g>=(o=r(s,p))?(h.pop(),g=o):(s.area-=s.pop().area,i(s,p,l,!1),p=Math.min(l.dx,l.dy),s.length=s.area=0,g=1/0);s.length&&(i(s,p,l,!0),s.length=s.area=0),u.forEach(n)}}function e(n){var r=n.children;if(r&&r.length){var u,a=f(n),o=r.slice(),c=[];for(t(o,a.dx*a.dy/n.value),c.area=0;u=o.pop();)c.push(u),c.area+=u.area,null!=u.z&&(i(c,u.z?a.dx:a.dy,a,!o.length),c.length=c.area=0);r.forEach(e)}}function r(t,n){for(var e,r=t.area,i=0,u=1/0,a=-1,o=t.length;o>++a;)(e=t[a].area)&&(u>e&&(u=e),e>i&&(i=e));return r*=r,n*=n,r?Math.max(n*i*g/r,r/(n*u*g)):1/0}function i(t,n,e,r){var i,u=-1,a=t.length,o=e.x,l=e.y,s=n?c(t.area/n):0;if(n==e.dx){for((r||s>e.dy)&&(s=e.dy);a>++u;)i=t[u],i.x=o,i.y=l,i.dy=s,o+=i.dx=Math.min(e.x+e.dx-o,s?c(i.area/s):0);i.z=!0,i.dx+=e.x+e.dx-o,e.y+=s,e.dy-=s}else{for((r||s>e.dx)&&(s=e.dx);a>++u;)i=t[u],i.x=o,i.y=l,i.dx=s,l+=i.dy=Math.min(e.y+e.dy-l,s?c(i.area/s):0);i.z=!1,i.dy+=e.y+e.dy-l,e.x+=s,e.dx-=s}}function u(r){var i=a||o(r),u=i[0];return u.x=0,u.y=0,u.dx=l[0],u.dy=l[1],a&&o.revalue(u),t([u],u.dx*u.dy/u.value),(a?e:n)(u),h&&(a=i),i}var a,o=d3.layout.hierarchy(),c=Math.round,l=[1,1],s=null,f=Yr,h=!1,d="squarify",g=.5*(1+Math.sqrt(5));return u.size=function(t){return arguments.length?(l=t,u):l},u.padding=function(t){function n(n){var e=t.call(u,n,n.depth);return null==e?Yr(n):Ur(n,"number"==typeof e?[e,e,e,e]:e)}function e(n){return Ur(n,t)}if(!arguments.length)return s;var r;return f=null==(s=t)?Yr:"function"==(r=typeof t)?n:"number"===r?(t=[t,t,t,t],e):e,u},u.round=function(t){return arguments.length?(c=t?Math.round:Number,u):c!=Number},u.sticky=function(t){return arguments.length?(h=t,a=null,u):h},u.ratio=function(t){return arguments.length?(g=t,u):g},u.mode=function(t){return arguments.length?(d=t+"",u):d},hr(u,o)},d3.csv=Ir(",","text/csv"),d3.tsv=Ir("        ","text/tab-separated-values"),d3.geo={},d3.geo.stream=function(t,n){to.hasOwnProperty(t.type)?to[t.type](t,n):Vr(t,n)};var to={Feature:function(t,n){Vr(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;i>++r;)Vr(e[r].geometry,n)}},no={Sphere:function(t,n){n.sphere()},Point:function(t,n){var e=t.coordinates;n.point(e[0],e[1])},MultiPoint:function(t,n){for(var e,r=t.coordinates,i=-1,u=r.length;u>++i;)e=r[i],n.point(e[0],e[1])},LineString:function(t,n){Xr(t.coordinates,n,0)},MultiLineString:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;i>++r;)Xr(e[r],n,0)},Polygon:function(t,n){Zr(t.coordinates,n)},MultiPolygon:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;i>++r;)Zr(e[r],n)},GeometryCollection:function(t,n){for(var e=t.geometries,r=-1,i=e.length;i>++r;)Vr(e[r],n)}};d3.geo.albersUsa=function(){function t(t){return n(t)(t)}function n(t){var n=t[0],a=t[1];return a>50?r:-140>n?i:21>a?u:e}var e=d3.geo.albers(),r=d3.geo.albers().rotate([160,0]).center([0,60]).parallels([55,65]),i=d3.geo.albers().rotate([160,0]).center([0,20]).parallels([8,18]),u=d3.geo.albers().rotate([60,0]).center([0,10]).parallels([8,18]);return t.scale=function(n){return arguments.length?(e.scale(n),r.scale(.6*n),i.scale(n),u.scale(1.5*n),t.translate(e.translate())):e.scale()},t.translate=function(n){if(!arguments.length)return e.translate();var a=e.scale(),o=n[0],c=n[1];return e.translate(n),r.translate([o-.4*a,c+.17*a]),i.translate([o-.19*a,c+.2*a]),u.translate([o+.58*a,c+.43*a]),t},t.scale(e.scale())},(d3.geo.albers=function(){var t=29.5*ju,n=45.5*ju,e=Pi(ei),r=e(t,n);return r.parallels=function(r){return arguments.length?e(t=r[0]*ju,n=r[1]*ju):[t*Ou,n*Ou]},r.rotate([98,0]).center([0,38]).scale(1e3)}).raw=ei;var eo=Vi(function(t){return Math.sqrt(2/(1+t))},function(t){return 2*Math.asin(t/2)});(d3.geo.azimuthalEqualArea=function(){return Ri(eo)}).raw=eo;var ro=Vi(function(t){var n=Math.acos(t);return n&&n/Math.sin(n)},a);(d3.geo.azimuthalEquidistant=function(){return Ri(ro)}).raw=ro,d3.geo.bounds=ri(a),d3.geo.centroid=function(t){io=uo=ao=oo=co=0,d3.geo.stream(t,lo);var n;return uo&&Math.abs(n=Math.sqrt(ao*ao+oo*oo+co*co))>Pu?[Math.atan2(oo,ao)*Ou,Math.asin(Math.max(-1,Math.min(1,co/n)))*Ou]:void 0};var io,uo,ao,oo,co,lo={sphere:function(){2>io&&(io=2,uo=ao=oo=co=0)},point:ii,lineStart:ai,lineEnd:oi,polygonStart:function(){2>io&&(io=2,uo=ao=oo=co=0),lo.lineStart=ui},polygonEnd:function(){lo.lineStart=ai}};d3.geo.circle=function(){function t(){var t="function"==typeof r?r.apply(this,arguments):r,n=Oi(-t[0]*ju,-t[1]*ju,0).invert,i=[];return e(null,null,1,{point:function(t,e){i.push(t=n(t,e)),t[0]*=Ou,t[1]*=Ou}}),{type:"Polygon",coordinates:[i]}}var n,e,r=[0,0],i=6;return t.origin=function(n){return arguments.length?(r=n,t):r},t.angle=function(r){return arguments.length?(e=ci((n=+r)*ju,i*ju),t):n},t.precision=function(r){return arguments.length?(e=ci(n*ju,(i=+r)*ju),t):i},t.angle(90)};var so=si(o,vi,Mi);(d3.geo.equirectangular=function(){return Ri(_i).scale(250/Ru)}).raw=_i.invert=_i;var fo=Vi(function(t){return 1/t},Math.atan);(d3.geo.gnomonic=function(){return Ri(fo)}).raw=fo,d3.geo.graticule=function(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return d3.range(Math.ceil(r/c)*c,e,c).map(a).concat(d3.range(Math.ceil(u/l)*l,i,l).map(o))}var e,r,i,u,a,o,c=22.5,l=c,s=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[a(r).concat(o(i).slice(1),a(e).reverse().slice(1),o(u).reverse().slice(1))]}},t.extent=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],u=+n[0][1],i=+n[1][1],r>e&&(n=r,r=e,e=n),u>i&&(n=u,u=i,i=n),t.precision(s)):[[r,u],[e,i]]},t.step=function(n){return arguments.length?(c=+n[0],l=+n[1],t):[c,l]},t.precision=function(n){return arguments.length?(s=+n,a=wi(u,i,s),o=Si(r,e,s),t):s},t.extent([[-180+Pu,-90+Pu],[180-Pu,90-Pu]])},d3.geo.interpolate=function(t,n){return ki(t[0]*ju,t[1]*ju,n[0]*ju,n[1]*ju)},d3.geo.greatArc=function(){function e(){for(var t=r||a.apply(this,arguments),n=i||o.apply(this,arguments),e=u||d3.geo.interpolate(t,n),l=0,s=c/e.distance,f=[t];1>(l+=s);)f.push(e(l));return f.push(n),{type:"LineString",coordinates:f}}var r,i,u,a=n,o=t,c=6*ju;return e.distance=function(){return(u||d3.geo.interpolate(r||a.apply(this,arguments),i||o.apply(this,arguments))).distance},e.source=function(t){return arguments.length?(a=t,r="function"==typeof t?null:t,u=r&&i?d3.geo.interpolate(r,i):null,e):a},e.target=function(t){return arguments.length?(o=t,i="function"==typeof t?null:t,u=r&&i?d3.geo.interpolate(r,i):null,e):o},e.precision=function(t){return arguments.length?(c=t*ju,e):c/ju},e},Ei.invert=function(t,n){return[2*Ru*t,2*Math.atan(Math.exp(2*Ru*n))-Ru/2]},(d3.geo.mercator=function(){return Ri(Ei).scale(500)}).raw=Ei;var ho=Vi(function(){return 1},Math.asin);(d3.geo.orthographic=function(){return Ri(ho)}).raw=ho,d3.geo.path=function(){function t(t){return t&&d3.geo.stream(t,r(i.pointRadius("function"==typeof u?+u.apply(this,arguments):u))),i.result()}var n,e,r,i,u=4.5;return t.area=function(t){return go=0,d3.geo.stream(t,r(mo)),go},t.centroid=function(t){return io=ao=oo=co=0,d3.geo.stream(t,r(vo)),co?[ao/co,oo/co]:void 0},t.bounds=function(t){return ri(r)(t)},t.projection=function(e){return arguments.length?(r=(n=e)?e.stream||Ni(e):a,t):n},t.context=function(n){return arguments.length?(i=null==(e=n)?new Ti:new qi(n),t):e},t.pointRadius=function(n){return arguments.length?(u="function"==typeof n?n:+n,t):u},t.projection(d3.geo.albersUsa()).context(null)};var go,po,mo={point:Pn,lineStart:Pn,lineEnd:Pn,polygonStart:function(){po=0,mo.lineStart=Ci},polygonEnd:function(){mo.lineStart=mo.lineEnd=mo.point=Pn,go+=Math.abs(po/2)}},vo={point:zi,lineStart:Di,lineEnd:Li,polygonStart:function(){vo.lineStart=Fi},polygonEnd:function(){vo.point=zi,vo.lineStart=Di,vo.lineEnd=Li}};d3.geo.area=function(t){return yo=0,d3.geo.stream(t,bo),yo};var yo,Mo,bo={sphere:function(){yo+=4*Ru},point:Pn,lineStart:Pn,lineEnd:Pn,polygonStart:function(){Mo=0,bo.lineStart=Hi},polygonEnd:function(){yo+=0>Mo?4*Ru+Mo:Mo,bo.lineStart=bo.lineEnd=bo.point=Pn}};d3.geo.projection=Ri,d3.geo.projectionMutator=Pi;var xo=Vi(function(t){return 1/(1+t)},function(t){return 2*Math.atan(t)});(d3.geo.stereographic=function(){return Ri(xo)}).raw=xo,d3.geom={},d3.geom.hull=function(t){if(3>t.length)return[];var n,e,r,i,u,a,o,c,l,s,f=t.length,h=f-1,d=[],g=[],p=0;for(n=1;f>n;++n)t[n][1]<t[p][1]?p=n:t[n][1]==t[p][1]&&(p=t[n][0]<t[p][0]?n:p);for(n=0;f>n;++n)n!==p&&(i=t[n][1]-t[p][1],r=t[n][0]-t[p][0],d.push({angle:Math.atan2(i,r),index:n}));for(d.sort(function(t,n){return t.angle-n.angle}),l=d[0].angle,c=d[0].index,o=0,n=1;h>n;++n)e=d[n].index,l==d[n].angle?(r=t[c][0]-t[p][0],i=t[c][1]-t[p][1],u=t[e][0]-t[p][0],a=t[e][1]-t[p][1],r*r+i*i>=u*u+a*a?d[n].index=-1:(d[o].index=-1,l=d[n].angle,o=n,c=e)):(l=d[n].angle,o=n,c=e);for(g.push(p),n=0,e=0;2>n;++e)-1!==d[e].index&&(g.push(d[e].index),n++);for(s=g.length;h>e;++e)if(-1!==d[e].index){for(;!Xi(g[s-2],g[s-1],d[e].index,t);)--s;g[s++]=d[e].index}var m=[];for(n=0;s>n;++n)m.push(t[g[n]]);return m},d3.geom.polygon=function(t){return t.area=function(){for(var n=0,e=t.length,r=t[e-1][1]*t[0][0]-t[e-1][0]*t[0][1];e>++n;)r+=t[n-1][1]*t[n][0]-t[n-1][0]*t[n][1];return.5*r},t.centroid=function(n){var e,r,i=-1,u=t.length,a=0,o=0,c=t[u-1];for(arguments.length||(n=-1/(6*t.area()));u>++i;)e=c,c=t[i],r=e[0]*c[1]-c[0]*e[1],a+=(e[0]+c[0])*r,o+=(e[1]+c[1])*r;return[a*n,o*n]},t.clip=function(n){for(var e,r,i,u,a,o,c=-1,l=t.length,s=t[l-1];l>++c;){for(e=n.slice(),n.length=0,u=t[c],a=e[(i=e.length)-1],r=-1;i>++r;)o=e[r],Zi(o,s,u)?(Zi(a,s,u)||n.push(Bi(a,o,s,u)),n.push(o)):Zi(a,s,u)&&n.push(Bi(a,o,s,u)),a=o;s=u}return n},t},d3.geom.voronoi=function(t){var n=t.map(function(){return[]}),e=1e6;return $i(t,function(t){var r,i,u,a,o,c;1===t.a&&t.b>=0?(r=t.ep.r,i=t.ep.l):(r=t.ep.l,i=t.ep.r),1===t.a?(o=r?r.y:-e,u=t.c-t.b*o,c=i?i.y:e,a=t.c-t.b*c):(u=r?r.x:-e,o=t.c-t.a*u,a=i?i.x:e,c=t.c-t.a*a);var l=[u,o],s=[a,c];n[t.region.l.index].push(l,s),n[t.region.r.index].push(l,s)}),n=n.map(function(n,e){var r=t[e][0],i=t[e][1],u=n.map(function(t){return Math.atan2(t[0]-r,t[1]-i)});return d3.range(n.length).sort(function(t,n){return u[t]-u[n]}).filter(function(t,n,e){return!n||u[t]-u[e[n-1]]>Pu}).map(function(t){return n[t]})}),n.forEach(function(n,r){var i=n.length;if(!i)return n.push([-e,-e],[-e,e],[e,e],[e,-e]);if(!(i>2)){var u=t[r],a=n[0],o=n[1],c=u[0],l=u[1],s=a[0],f=a[1],h=o[0],d=o[1],g=Math.abs(h-s),p=d-f;if(Pu>Math.abs(p)){var m=f>l?-e:e;n.push([-e,m],[e,m])}else if(Pu>g){var v=s>c?-e:e;n.push([v,-e],[v,e])}else{var m=(s-c)*(d-f)>(h-s)*(f-l)?e:-e,y=Math.abs(p)-g;Pu>Math.abs(y)?n.push([0>p?m:-m,m]):(y>0&&(m*=-1),n.push([-e,m],[e,m]))}}}),n};var _o={l:"r",r:"l"};d3.geom.delaunay=function(t){var n=t.map(function(){return[]}),e=[];return $i(t,function(e){n[e.region.l.index].push(t[e.region.r.index])}),n.forEach(function(n,r){var i=t[r],u=i[0],a=i[1];n.forEach(function(t){t.angle=Math.atan2(t[0]-u,t[1]-a)}),n.sort(function(t,n){return t.angle-n.angle});for(var o=0,c=n.length-1;c>o;o++)e.push([i,n[o],n[o+1]])}),e},d3.geom.quadtree=function(t,n,e,r,i){function u(t,n,e,r,i,u){if(!isNaN(n.x)&&!isNaN(n.y))if(t.leaf){var o=t.point;o?.01>Math.abs(o.x-n.x)+Math.abs(o.y-n.y)?a(t,n,e,r,i,u):(t.point=null,a(t,o,e,r,i,u),a(t,n,e,r,i,u)):t.point=n}else a(t,n,e,r,i,u)}function a(t,n,e,r,i,a){var o=.5*(e+i),c=.5*(r+a),l=n.x>=o,s=n.y>=c,f=(s<<1)+l;t.leaf=!1,t=t.nodes[f]||(t.nodes[f]=Ji()),l?e=o:i=o,s?r=c:a=c,u(t,n,e,r,i,a)}var o,c=-1,l=t.length;if(5>arguments.length)if(3===arguments.length)i=e,r=n,e=n=0;else for(n=e=1/0,r=i=-1/0;l>++c;)o=t[c],n>o.x&&(n=o.x),e>o.y&&(e=o.y),o.x>r&&(r=o.x),o.y>i&&(i=o.y);var s=r-n,f=i-e;s>f?i=e+s:r=n+f;var h=Ji();return h.add=function(t){u(h,t,n,e,r,i)},h.visit=function(t){Gi(t,h,n,e,r,i)},t.forEach(h.add),h},d3.time={};var wo=Date,So=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"];Ki.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){ko.setUTCDate.apply(this._,arguments)},setDay:function(){ko.setUTCDay.apply(this._,arguments)},setFullYear:function(){ko.setUTCFullYear.apply(this._,arguments)},setHours:function(){ko.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){ko.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){ko.setUTCMinutes.apply(this._,arguments)},setMonth:function(){ko.setUTCMonth.apply(this._,arguments)},setSeconds:function(){ko.setUTCSeconds.apply(this._,arguments)},setTime:function(){ko.setTime.apply(this._,arguments)}};var ko=Date.prototype,Eo="%a %b %e %X %Y",Ao="%m/%d/%Y",No="%H:%M:%S",To=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],qo=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],Co=["January","February","March","April","May","June","July","August","September","October","November","December"],zo=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];d3.time.format=function(t){function n(n){for(var r,i,u,a=[],o=-1,c=0;e>++o;)37===t.charCodeAt(o)&&(a.push(t.substring(c,o)),null!=(i=jo[r=t.charAt(++o)])&&(r=t.charAt(++o)),(u=Oo[r])&&(r=u(n,null==i?"e"===r?" ":"0":i)),a.push(r),c=o+1);return a.push(t.substring(c,o)),a.join("")}var e=t.length;return n.parse=function(n){var e={y:1900,m:0,d:1,H:0,M:0,S:0,L:0},r=Wi(e,t,n,0);if(r!=n.length)return null;"p"in e&&(e.H=e.H%12+12*e.p);var i=new wo;return i.setFullYear(e.y,e.m,e.d),i.setHours(e.H,e.M,e.S,e.L),i},n.toString=function(){return t},n};var Do=Qi(To),Lo=Qi(qo),Fo=Qi(Co),Ho=tu(Co),Ro=Qi(zo),Po=tu(zo),jo={"-":"",_:" ",0:"0"},Oo={a:function(t){return qo[t.getDay()]},A:function(t){return To[t.getDay()]},b:function(t){return zo[t.getMonth()]},B:function(t){return Co[t.getMonth()]},c:d3.time.format(Eo),d:function(t,n){return nu(t.getDate(),n,2)},e:function(t,n){return nu(t.getDate(),n,2)},H:function(t,n){return nu(t.getHours(),n,2)},I:function(t,n){return nu(t.getHours()%12||12,n,2)},j:function(t,n){return nu(1+d3.time.dayOfYear(t),n,3)},L:function(t,n){return nu(t.getMilliseconds(),n,3)},m:function(t,n){return nu(t.getMonth()+1,n,2)},M:function(t,n){return nu(t.getMinutes(),n,2)},p:function(t){return t.getHours()>=12?"PM":"AM"},S:function(t,n){return nu(t.getSeconds(),n,2)},U:function(t,n){return nu(d3.time.sundayOfYear(t),n,2)},w:function(t){return t.getDay()},W:function(t,n){return nu(d3.time.mondayOfYear(t),n,2)},x:d3.time.format(Ao),X:d3.time.format(No),y:function(t,n){return nu(t.getFullYear()%100,n,2)},Y:function(t,n){return nu(t.getFullYear()%1e4,n,4)},Z:Mu,"%":function(){return"%"}},Yo={a:eu,A:ru,b:iu,B:uu,c:au,d:du,e:du,H:gu,I:gu,L:vu,m:hu,M:pu,p:yu,S:mu,x:ou,X:cu,y:su,Y:lu},Uo=/^\s*\d+/,Io=d3.map({am:0,pm:1});d3.time.format.utc=function(t){function n(t){try{wo=Ki;var n=new wo;return n._=t,e(n)}finally{wo=Date}}var e=d3.time.format(t);return n.parse=function(t){try{wo=Ki;var n=e.parse(t);return n&&n._}finally{wo=Date}},n.toString=e.toString,n};var Vo=d3.time.format.utc("%Y-%m-%dT%H:%M:%S.%LZ");d3.time.format.iso=Date.prototype.toISOString?bu:Vo,bu.parse=function(t){var n=new Date(t);return isNaN(n)?null:n},bu.toString=Vo.toString,d3.time.second=xu(function(t){return new wo(1e3*Math.floor(t/1e3))},function(t,n){t.setTime(t.getTime()+1e3*Math.floor(n))},function(t){return t.getSeconds()}),d3.time.seconds=d3.time.second.range,d3.time.seconds.utc=d3.time.second.utc.range,d3.time.minute=xu(function(t){return new wo(6e4*Math.floor(t/6e4))},function(t,n){t.setTime(t.getTime()+6e4*Math.floor(n))},function(t){return t.getMinutes()}),d3.time.minutes=d3.time.minute.range,d3.time.minutes.utc=d3.time.minute.utc.range,d3.time.hour=xu(function(t){var n=t.getTimezoneOffset()/60;return new wo(36e5*(Math.floor(t/36e5-n)+n))},function(t,n){t.setTime(t.getTime()+36e5*Math.floor(n))},function(t){return t.getHours()}),d3.time.hours=d3.time.hour.range,d3.time.hours.utc=d3.time.hour.utc.range,d3.time.day=xu(function(t){var n=new wo(1970,0);return n.setFullYear(t.getFullYear(),t.getMonth(),t.getDate()),n},function(t,n){t.setDate(t.getDate()+n)},function(t){return t.getDate()-1}),d3.time.days=d3.time.day.range,d3.time.days.utc=d3.time.day.utc.range,d3.time.dayOfYear=function(t){var n=d3.time.year(t);return Math.floor((t-n-6e4*(t.getTimezoneOffset()-n.getTimezoneOffset()))/864e5)},So.forEach(function(t,n){t=t.toLowerCase(),n=7-n;var e=d3.time[t]=xu(function(t){return(t=d3.time.day(t)).setDate(t.getDate()-(t.getDay()+n)%7),t},function(t,n){t.setDate(t.getDate()+7*Math.floor(n))},function(t){var e=d3.time.year(t).getDay();return Math.floor((d3.time.dayOfYear(t)+(e+n)%7)/7)-(e!==n)});d3.time[t+"s"]=e.range,d3.time[t+"s"].utc=e.utc.range,d3.time[t+"OfYear"]=function(t){var e=d3.time.year(t).getDay();return Math.floor((d3.time.dayOfYear(t)+(e+n)%7)/7)}}),d3.time.week=d3.time.sunday,d3.time.weeks=d3.time.sunday.range,d3.time.weeks.utc=d3.time.sunday.utc.range,d3.time.weekOfYear=d3.time.sundayOfYear,d3.time.month=xu(function(t){return t=d3.time.day(t),t.setDate(1),t},function(t,n){t.setMonth(t.getMonth()+n)},function(t){return t.getMonth()}),d3.time.months=d3.time.month.range,d3.time.months.utc=d3.time.month.utc.range,d3.time.year=xu(function(t){return t=d3.time.day(t),t.setMonth(0,1),t},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t){return t.getFullYear()}),d3.time.years=d3.time.year.range,d3.time.years.utc=d3.time.year.utc.range;var Xo=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Zo=[[d3.time.second,1],[d3.time.second,5],[d3.time.second,15],[d3.time.second,30],[d3.time.minute,1],[d3.time.minute,5],[d3.time.minute,15],[d3.time.minute,30],[d3.time.hour,1],[d3.time.hour,3],[d3.time.hour,6],[d3.time.hour,12],[d3.time.day,1],[d3.time.day,2],[d3.time.week,1],[d3.time.month,1],[d3.time.month,3],[d3.time.year,1]],Bo=[[d3.time.format("%Y"),o],[d3.time.format("%B"),function(t){return t.getMonth()}],[d3.time.format("%b %d"),function(t){return 1!=t.getDate()}],[d3.time.format("%a %d"),function(t){return t.getDay()&&1!=t.getDate()}],[d3.time.format("%I %p"),function(t){return t.getHours()}],[d3.time.format("%I:%M"),function(t){return t.getMinutes()}],[d3.time.format(":%S"),function(t){return t.getSeconds()}],[d3.time.format(".%L"),function(t){return t.getMilliseconds()}]],$o=d3.scale.linear(),Jo=Eu(Bo);Zo.year=function(t,n){return $o.domain(t.map(Nu)).ticks(n).map(Au)},d3.time.scale=function(){return wu(d3.scale.linear(),Zo,Jo)};var Go=Zo.map(function(t){return[t[0].utc,t[1]]}),Ko=[[d3.time.format.utc("%Y"),o],[d3.time.format.utc("%B"),function(t){return t.getUTCMonth()}],[d3.time.format.utc("%b %d"),function(t){return 1!=t.getUTCDate()}],[d3.time.format.utc("%a %d"),function(t){return t.getUTCDay()&&1!=t.getUTCDate()}],[d3.time.format.utc("%I %p"),function(t){return t.getUTCHours()}],[d3.time.format.utc("%I:%M"),function(t){return t.getUTCMinutes()}],[d3.time.format.utc(":%S"),function(t){return t.getUTCSeconds()}],[d3.time.format.utc(".%L"),function(t){return t.getUTCMilliseconds()}]],Wo=Eu(Ko);Go.year=function(t,n){return $o.domain(t.map(qu)).ticks(n).map(Tu)},d3.time.scale.utc=function(){return wu(d3.scale.linear(),Go,Wo)}})();
\ No newline at end of file
diff --git a/apps/workbench/public/favicon.ico b/apps/workbench/public/favicon.ico
new file mode 100644 (file)
index 0000000..4c763b6
Binary files /dev/null and b/apps/workbench/public/favicon.ico differ
diff --git a/apps/workbench/public/graph-example.html b/apps/workbench/public/graph-example.html
new file mode 100644 (file)
index 0000000..f593032
--- /dev/null
@@ -0,0 +1,185 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<!-- from http://bl.ocks.org/1153292 -->
+<html>
+  <head>
+    <meta http-equiv="Content-type" content="text/html; charset=utf-8">
+    <title>Object graph example</title>
+    <script src="d3.v3.min.js"></script>
+    <style type="text/css">
+
+path.link {
+  fill: none;
+  stroke: #666;
+  stroke-width: 1.5px;
+}
+
+marker#can_read {
+  fill: green;
+}
+
+path.link.can_read {
+  stroke: green;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.can_write {
+  stroke: green;
+}
+
+path.link.member_of {
+  stroke: blue;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.created {
+  stroke: red;
+}
+
+circle {
+  fill: #ccc;
+  stroke: #333;
+  stroke-width: 1.5px;
+}
+
+edgetext {
+  font: 12px sans-serif;
+  pointer-events: none;
+    text-align: center;
+}
+
+text {
+  font: 12px sans-serif;
+  pointer-events: none;
+}
+
+text.shadow {
+  stroke: #fff;
+  stroke-width: 3px;
+  stroke-opacity: .8;
+}
+
+    </style>
+  </head>
+  <body>
+    <script type="text/javascript">
+
+var links = [
+  {source: "user: customer", target: "project: customer_project", type: "can_read"},
+  {source: "user: import robot", target: "project: customer_project", type: "can_read"},
+  {source: "user: pipeline robot", target: "project: customer_project", type: "can_read"},
+  {source: "user: uploader", target: "collection: w3anr2hk2wgfpuo", type: "created"},
+  {source: "user: uploader", target: "project: customer_project", type: "created"},
+  {source: "collection: w3anr2hk2wgfpuo", target: "project: customer_project", type: "member_of"}
+];
+
+var nodes = {};
+
+// Compute the distinct nodes from the links.
+links.forEach(function(link) {
+  link.source = nodes[link.source] || (nodes[link.source] = {name: link.source});
+  link.target = nodes[link.target] || (nodes[link.target] = {name: link.target});
+});
+
+var w = 960,
+    h = 500;
+
+var force = d3.layout.force()
+    .nodes(d3.values(nodes))
+    .links(links)
+    .size([w, h])
+    .linkDistance(250)
+    .charge(-300)
+    .on("tick", tick)
+    .start();
+
+var svg = d3.select("body").append("svg:svg")
+    .attr("width", w)
+    .attr("height", h);
+
+// Per-type markers, as they don't inherit styles.
+svg.append("svg:defs").selectAll("marker")
+    .data(["created", "member_of", "can_read", "can_write"])
+  .enter().append("svg:marker")
+    .attr("id", String)
+    .attr("viewBox", "0 -5 10 10")
+    .attr("refX", 15)
+    .attr("refY", -1.5)
+    .attr("markerWidth", 6)
+    .attr("markerHeight", 6)
+    .attr("orient", "auto")
+  .append("svg:path")
+    .attr("d", "M0,-5L10,0L0,5");
+
+var path = svg.append("svg:g").selectAll("path")
+    .data(force.links())
+  .enter().append("svg:path")
+    .attr("class", function(d) { return "link " + d.type; })
+    .attr("marker-end", function(d) { return "url(#" + d.type + ")"; });
+
+var circle = svg.append("svg:g").selectAll("circle")
+    .data(force.nodes())
+  .enter().append("svg:circle")
+    .attr("r", 6)
+    .call(force.drag);
+
+var text = svg.append("svg:g").selectAll("g")
+    .data(force.nodes())
+  .enter().append("svg:g");
+
+// A copy of the text with a thick white stroke for legibility.
+text.append("svg:text")
+    .attr("x", 8)
+    .attr("y", ".31em")
+    .attr("class", "shadow")
+    .text(function(d) { return d.name; });
+
+text.append("svg:text")
+    .attr("x", 8)
+    .attr("y", ".31em")
+    .text(function(d) { return d.name; });
+
+var edgetext = svg.append("svg:g").selectAll("g")
+    .data(force.links())
+    .enter().append("svg:g");
+
+edgetext
+    .append("svg:text")
+    .attr("x",0)
+    .attr("y","-0.2em")
+    .text(function(d) { return d.type; });
+
+// Use elliptical arc path segments to doubly-encode directionality.
+function tick() {
+  path.attr("d", function(d) {
+    var dx = d.target.x - d.source.x,
+        dy = d.target.y - d.source.y,
+        // dr = Math.sqrt(dx * dx + dy * dy);
+        dr = 0;
+    return "M" + d.source.x + "," + d.source.y + "A" + dr + "," + dr + " 0 0,1 " + d.target.x + "," + d.target.y;
+  });
+
+  circle.attr("transform", function(d) {
+    return "translate(" + d.x + "," + d.y + ")";
+  });
+
+  text.attr("transform", function(d) {
+    return "translate(" + d.x + "," + d.y + ")";
+  });
+
+  edgetext.attr("transform", function(d) {
+      return "translate(" +
+         (d.source.x + d.target.x)/2 + "," +
+         (d.source.y + d.target.y)/2 +
+         ")rotate(" +
+         (Math.atan2(d.target.y - d.source.y, d.target.x - d.source.x) * 180 / Math.PI) +
+         ")";
+  });
+}
+
+    </script>
+  </body>
+</html>
diff --git a/apps/workbench/public/robots.txt b/apps/workbench/public/robots.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/public/vocabulary-example.json b/apps/workbench/public/vocabulary-example.json
new file mode 100644 (file)
index 0000000..b227dc2
--- /dev/null
@@ -0,0 +1,32 @@
+{
+    "strict": false,
+    "tags": {
+        "fruit": {
+            "values": ["pineapple", "tomato", "orange", "banana", "advocado", "lemon", "apple", "peach", "strawberry"],
+            "strict": true
+        },
+        "animal": {
+            "values": ["human", "dog", "elephant", "eagle"],
+            "strict": false
+        },
+        "color": {
+            "values": ["yellow", "red", "magenta", "green"],
+            "strict": false
+        },
+        "text": {},
+        "category": {
+            "values": ["experimental", "development", "production"]
+        },
+        "comments": {},
+        "importance": {
+            "values": ["critical", "important", "low priority"]
+        },
+        "size": {
+            "values": ["x-small", "small", "medium", "large", "x-large"]
+        },
+        "country": {
+            "values": ["Afghanistan","Åland Islands","Albania","Algeria","American Samoa","AndorrA","Angola","Anguilla","Antarctica","Antigua and Barbuda","Argentina","Armenia","Aruba","Australia","Austria","Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin","Bermuda","Bhutan","Bolivia","Bosnia and Herzegovina","Botswana","Bouvet Island","Brazil","British Indian Ocean Territory","Brunei Darussalam","Bulgaria","Burkina Faso","Burundi","Cambodia","Cameroon","Canada","Cape Verde","Cayman Islands","Central African Republic","Chad","Chile","China","Christmas Island","Cocos (Keeling) Islands","Colombia","Comoros","Congo","Congo, The Democratic Republic of the","Cook Islands","Costa Rica","Cote D'Ivoire","Croatia","Cuba","Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt","El Salvador","Equatorial Guinea","Eritrea","Estonia","Ethiopia","Falkland Islands (Malvinas)","Faroe Islands","Fiji","Finland","France","French Guiana","French Polynesia","French Southern Territories","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guadeloupe","Guam","Guatemala","Guernsey","Guinea","Guinea-Bissau","Guyana","Haiti","Heard Island and Mcdonald Islands","Holy See (Vatican City State)","Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran, Islamic Republic Of","Iraq","Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya","Kiribati","Korea, Democratic People'S Republic of","Korea, Republic of","Kuwait","Kyrgyzstan","Lao People'S Democratic Republic","Latvia","Lebanon","Lesotho","Liberia","Libyan Arab Jamahiriya","Liechtenstein","Lithuania","Luxembourg","Macao","Macedonia, The Former Yugoslav Republic of","Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Marshall Islands","Martinique","Mauritania","Mauritius","Mayotte","Mexico","Micronesia, Federated States of","Moldova, Republic of","Monaco","Mongolia","Montserrat","Morocco","Mozambique","Myanmar","Namibia","Nauru","Nepal","Netherlands","Netherlands Antilles","New Caledonia","New Zealand","Nicaragua","Niger","Nigeria","Niue","Norfolk Island","Northern Mariana Islands","Norway","Oman","Pakistan","Palau","Palestinian Territory, Occupied","Panama","Papua New Guinea","Paraguay","Peru","Philippines","Pitcairn","Poland","Portugal","Puerto Rico","Qatar","Reunion","Romania","Russian Federation","RWANDA","Saint Helena","Saint Kitts and Nevis","Saint Lucia","Saint Pierre and Miquelon","Saint Vincent and the Grenadines","Samoa","San Marino","Sao Tome and Principe","Saudi Arabia","Senegal","Serbia and Montenegro","Seychelles","Sierra Leone","Singapore","Slovakia","Slovenia","Solomon Islands","Somalia","South Africa","South Georgia and the South Sandwich Islands","Spain","Sri Lanka","Sudan","Suriname","Svalbard and Jan Mayen","Swaziland","Sweden","Switzerland","Syrian Arab Republic","Taiwan, Province of China","Tajikistan","Tanzania, United Republic of","Thailand","Timor-Leste","Togo","Tokelau","Tonga","Trinidad and Tobago","Tunisia","Turkey","Turkmenistan","Turks and Caicos Islands","Tuvalu","Uganda","Ukraine","United Arab Emirates","United Kingdom","United States","United States Minor Outlying Islands","Uruguay","Uzbekistan","Vanuatu","Venezuela","Viet Nam","Virgin Islands, British","Virgin Islands, U.S.","Wallis and Futuna","Western Sahara","Yemen","Zambia","Zimbabwe"],
+            "strict": true
+        }
+    }
+}
\ No newline at end of file
diff --git a/apps/workbench/public/webshell/README b/apps/workbench/public/webshell/README
new file mode 100644 (file)
index 0000000..b8920c5
--- /dev/null
@@ -0,0 +1,3 @@
+See also
+* VirtualMachinesController#webshell
+* https://code.google.com/p/shellinabox/source/browse/#git%2Fshellinabox
diff --git a/apps/workbench/public/webshell/enabled.gif b/apps/workbench/public/webshell/enabled.gif
new file mode 100644 (file)
index 0000000..07936e2
Binary files /dev/null and b/apps/workbench/public/webshell/enabled.gif differ
diff --git a/apps/workbench/public/webshell/keyboard.html b/apps/workbench/public/webshell/keyboard.html
new file mode 100644 (file)
index 0000000..6a95f3b
--- /dev/null
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xml:lang="en" lang="en">
+<head>
+</head>
+<body><pre class="box"><div
+  ><i id="27">Esc</i><i id="112">F1</i><i id="113">F2</i><i id="114">F3</i
+  ><i id="115">F4</i><i id="116">F5</i><i id="117">F6</i><i id="118">F7</i
+  ><i id="119">F8</i><i id="120">F9</i><i id="121">F10</i><i id="122">F11</i
+  ><i id="123">F12</i><br
+  /><b><span class="unshifted">`</span><span class="shifted">~</span></b
+    ><b><span class="unshifted">1</span><span class="shifted">!</span></b
+    ><b><span class="unshifted">2</span><span class="shifted">@</span></b
+    ><b><span class="unshifted">3</span><span class="shifted">#</span></b
+    ><b><span class="unshifted">4</span><span class="shifted">&#36;</span></b
+    ><b><span class="unshifted">5</span><span class="shifted">&#37;</span></b
+    ><b><span class="unshifted">6</span><span class="shifted">^</span></b
+    ><b><span class="unshifted">7</span><span class="shifted">&amp;</span></b
+    ><b><span class="unshifted">8</span><span class="shifted">*</span></b
+    ><b><span class="unshifted">9</span><span class="shifted">(</span></b
+    ><b><span class="unshifted">0</span><span class="shifted">)</span></b
+    ><b><span class="unshifted">-</span><span class="shifted">_</span></b
+    ><b><span class="unshifted">=</span><span class="shifted">+</span></b
+    ><i id="8">&nbsp;&larr;&nbsp;</i
+    ><br
+  /><i id="9">Tab</i
+    ><b>Q</b><b>W</b><b>E</b><b>R</b><b>T</b><b>Y</b><b>U</b><b>I</b><b>O</b
+    ><b>P</b
+    ><b><span class="unshifted">[</span><span class="shifted">{</span></b
+    ><b><span class="unshifted">]</span><span class="shifted">}</span></b
+    ><b><span class="unshifted">&#92;</span><span class="shifted">|</span></b
+    ><br
+  /><u>Tab&nbsp;&nbsp;</u
+    ><b>A</b><b>S</b><b>D</b><b>F</b><b>G</b><b>H</b><b>J</b><b>K</b><b>L</b
+    ><b><span class="unshifted">;</span><span class="shifted">:</span></b
+    ><b><span class="unshifted">&#39;</span><span class="shifted">"</span></b
+    ><i id="13">Enter</i
+    ><br
+  /><u>&nbsp;&nbsp;</u
+    ><i id="16">Shift</i
+    ><b>Z</b><b>X</b><b>C</b><b>V</b><b>B</b><b>N</b><b>M</b
+    ><b><span class="unshifted">,</span><span class="shifted">&lt;</span></b
+    ><b><span class="unshifted">.</span><span class="shifted">&gt;</span></b
+    ><b><span class="unshifted">/</span><span class="shifted">?</span></b
+    ><i id="16">Shift</i
+    ><br
+  /><u>XXX</u
+    ><i id="17">Ctrl</i
+    ><i id="18">Alt</i
+    ><i style="width: 25ex">&nbsp</i
+  ></div
+  >&nbsp;&nbsp;&nbsp;<div
+    ><i id="45">Ins</i><i id="46">Del</i><i id="36">Home</i><i id="35">End</i
+    ><br
+    /><u>&nbsp;</u><br
+    /><u>&nbsp;</u><br
+    /><u>Ins</u><s>&nbsp;</s><b id="38">&uarr;</b><s>&nbsp;</s><u>&nbsp;</u
+      ><b id="33">&uArr;</b><br
+    /><u>Ins</u><b id="37">&larr;</b><b id="40">&darr;</b
+      ><b id="39">&rarr;</b><u>&nbsp;</u><b id="34">&dArr;</b
+  ></div
+></pre></body></html>
diff --git a/apps/workbench/public/webshell/keyboard.png b/apps/workbench/public/webshell/keyboard.png
new file mode 100644 (file)
index 0000000..feef519
Binary files /dev/null and b/apps/workbench/public/webshell/keyboard.png differ
diff --git a/apps/workbench/public/webshell/shell_in_a_box.js b/apps/workbench/public/webshell/shell_in_a_box.js
new file mode 100644 (file)
index 0000000..0c7e800
--- /dev/null
@@ -0,0 +1,4835 @@
+// This file contains code from shell_in_a_box.js and vt100.js
+
+
+// ShellInABox.js -- Use XMLHttpRequest to provide an AJAX terminal emulator.
+// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// In addition to these license terms, the author grants the following
+// additional rights:
+//
+// If you modify this program, or any covered work, by linking or
+// combining it with the OpenSSL project's OpenSSL library (or a
+// modified version of that library), containing parts covered by the
+// terms of the OpenSSL or SSLeay licenses, the author
+// grants you additional permission to convey the resulting work.
+// Corresponding Source for a non-source form of such a combination
+// shall include the source code for the parts of OpenSSL used as well
+// as that of the covered work.
+//
+// You may at your option choose to remove this additional permission from
+// the work, or from any part of it.
+//
+// It is possible to build this program in a way that it loads OpenSSL
+// libraries at run-time. If doing so, the following notices are required
+// by the OpenSSL and SSLeay licenses:
+//
+// This product includes software developed by the OpenSSL Project
+// for use in the OpenSSL Toolkit. (http://www.openssl.org/)
+//
+// This product includes cryptographic software written by Eric Young
+// (eay@cryptsoft.com)
+//
+//
+// The most up-to-date version of this program is always available from
+// http://shellinabox.com
+//
+//
+// Notes:
+//
+// The author believes that for the purposes of this license, you meet the
+// requirements for publishing the source code, if your web server publishes
+// the source in unmodified form (i.e. with licensing information, comments,
+// formatting, and identifier names intact). If there are technical reasons
+// that require you to make changes to the source code when serving the
+// JavaScript (e.g to remove pre-processor directives from the source), these
+// changes should be done in a reversible fashion.
+//
+// The author does not consider websites that reference this script in
+// unmodified form, and web servers that serve this script in unmodified form
+// to be derived works. As such, they are believed to be outside of the
+// scope of this license and not subject to the rights or restrictions of the
+// GNU General Public License.
+//
+// If in doubt, consult a legal professional familiar with the laws that
+// apply in your country.
+
+// #define XHR_UNITIALIZED 0
+// #define XHR_OPEN        1
+// #define XHR_SENT        2
+// #define XHR_RECEIVING   3
+// #define XHR_LOADED      4
+
+// IE does not define XMLHttpRequest by default, so we provide a suitable
+// wrapper.
+if (typeof XMLHttpRequest == 'undefined') {
+  XMLHttpRequest = function() {
+    try { return new ActiveXObject('Msxml2.XMLHTTP.6.0');} catch (e) { }
+    try { return new ActiveXObject('Msxml2.XMLHTTP.3.0');} catch (e) { }
+    try { return new ActiveXObject('Msxml2.XMLHTTP');    } catch (e) { }
+    try { return new ActiveXObject('Microsoft.XMLHTTP'); } catch (e) { }
+    throw new Error('');
+  };
+}
+
+function extend(subClass, baseClass) {
+  function inheritance() { }
+  inheritance.prototype          = baseClass.prototype;
+  subClass.prototype             = new inheritance();
+  subClass.prototype.constructor = subClass;
+  subClass.prototype.superClass  = baseClass.prototype;
+};
+
+function ShellInABox(url, container) {
+  if (url == undefined) {
+    this.rooturl    = document.location.href;
+    this.url        = document.location.href.replace(/[?#].*/, '');
+  } else {
+    this.rooturl    = url;
+    this.url        = url;
+  }
+  if (document.location.hash != '') {
+    var hash        = decodeURIComponent(document.location.hash).
+                      replace(/^#/, '');
+    this.nextUrl    = hash.replace(/,.*/, '');
+    this.session    = hash.replace(/[^,]*,/, '');
+  } else {
+    this.nextUrl    = this.url;
+    this.session    = null;
+  }
+  this.pendingKeys  = '';
+  this.keysInFlight = false;
+  this.connected    = false;
+  this.superClass.constructor.call(this, container);
+
+  // We have to initiate the first XMLHttpRequest from a timer. Otherwise,
+  // Chrome never realizes that the page has loaded.
+  setTimeout(function(shellInABox) {
+               return function() {
+                 shellInABox.sendRequest();
+               };
+             }(this), 1);
+};
+extend(ShellInABox, VT100);
+
+ShellInABox.prototype.sessionClosed = function() {
+  try {
+    this.connected    = false;
+    if (this.session) {
+      this.session    = undefined;
+      if (this.cursorX > 0) {
+        this.vt100('\r\n');
+      }
+      this.vt100('Session closed.');
+    }
+    // Revealing the "reconnect" button is commented out until we hook
+    // up the username+token auto-login mechanism to the new session:
+    //this.showReconnect(true);
+  } catch (e) {
+  }
+};
+
+ShellInABox.prototype.reconnect = function() {
+  this.showReconnect(false);
+  if (!this.session) {
+    if (document.location.hash != '') {
+      // A shellinaboxd daemon launched from a CGI only allows a single
+      // session. In order to reconnect, we must reload the frame definition
+      // and obtain a new port number. As this is a different origin, we
+      // need to get enclosing page to help us.
+      parent.location        = this.nextUrl;
+    } else {
+      if (this.url != this.nextUrl) {
+        document.location.replace(this.nextUrl);
+      } else {
+        this.pendingKeys     = '';
+        this.keysInFlight    = false;
+        this.reset(true);
+        this.sendRequest();
+      }
+    }
+  }
+  return false;
+};
+
+ShellInABox.prototype.sendRequest = function(request) {
+  if (request == undefined) {
+    request                  = new XMLHttpRequest();
+  }
+  request.open('POST', this.url + '?', true);
+  request.setRequestHeader('Cache-Control', 'no-cache');
+  request.setRequestHeader('Content-Type',
+                           'application/x-www-form-urlencoded; charset=utf-8');
+  var content                = 'width=' + this.terminalWidth +
+                               '&height=' + this.terminalHeight +
+                               (this.session ? '&session=' +
+                                encodeURIComponent(this.session) : '&rooturl='+
+                                encodeURIComponent(this.rooturl));
+
+  request.onreadystatechange = function(shellInABox) {
+    return function() {
+             try {
+               return shellInABox.onReadyStateChange(request);
+             } catch (e) {
+               shellInABox.sessionClosed();
+             }
+           }
+    }(this);
+  ShellInABox.lastRequestSent = Date.now();
+  request.send(content);
+};
+
+ShellInABox.prototype.onReadyStateChange = function(request) {
+  if (request.readyState == 4 /* XHR_LOADED */) {
+    if (request.status == 200) {
+      this.connected = true;
+      var response   = eval('(' + request.responseText + ')');
+      if (response.data) {
+        this.vt100(response.data);
+      }
+
+      if (!response.session ||
+          this.session && this.session != response.session) {
+        this.sessionClosed();
+      } else {
+        this.session = response.session;
+        this.sendRequest(request);
+      }
+    } else if (request.status == 0) {
+        if (ShellInABox.lastRequestSent + 2000 < Date.now()) {
+            // Timeout, try again
+            this.sendRequest(request);
+        } else {
+            this.vt100('\r\n\r\nRequest failed.');
+            this.sessionClosed();
+        }
+    } else {
+      this.sessionClosed();
+    }
+  }
+};
+
+ShellInABox.prototype.sendKeys = function(keys) {
+  if (!this.connected) {
+    return;
+  }
+  if (this.keysInFlight || this.session == undefined) {
+    this.pendingKeys          += keys;
+  } else {
+    this.keysInFlight          = true;
+    keys                       = this.pendingKeys + keys;
+    this.pendingKeys           = '';
+    var request                = new XMLHttpRequest();
+    request.open('POST', this.url + '?', true);
+    request.setRequestHeader('Cache-Control', 'no-cache');
+    request.setRequestHeader('Content-Type',
+                           'application/x-www-form-urlencoded; charset=utf-8');
+    var content                = 'width=' + this.terminalWidth +
+                                 '&height=' + this.terminalHeight +
+                                 '&session=' +encodeURIComponent(this.session)+
+                                 '&keys=' + encodeURIComponent(keys);
+    request.onreadystatechange = function(shellInABox) {
+      return function() {
+               try {
+                 return shellInABox.keyPressReadyStateChange(request);
+               } catch (e) {
+               }
+             }
+      }(this);
+    request.send(content);
+  }
+};
+
+ShellInABox.prototype.keyPressReadyStateChange = function(request) {
+  if (request.readyState == 4 /* XHR_LOADED */) {
+    this.keysInFlight = false;
+    if (this.pendingKeys) {
+      this.sendKeys('');
+    }
+  }
+};
+
+ShellInABox.prototype.keysPressed = function(ch) {
+  var hex = '0123456789ABCDEF';
+  var s   = '';
+  for (var i = 0; i < ch.length; i++) {
+    var c = ch.charCodeAt(i);
+    if (c < 128) {
+      s += hex.charAt(c >> 4) + hex.charAt(c & 0xF);
+    } else if (c < 0x800) {
+      s += hex.charAt(0xC +  (c >> 10)       ) +
+           hex.charAt(       (c >>  6) & 0xF ) +
+           hex.charAt(0x8 + ((c >>  4) & 0x3)) +
+           hex.charAt(        c        & 0xF );
+    } else if (c < 0x10000) {
+      s += 'E'                                 +
+           hex.charAt(       (c >> 12)       ) +
+           hex.charAt(0x8 + ((c >> 10) & 0x3)) +
+           hex.charAt(       (c >>  6) & 0xF ) +
+           hex.charAt(0x8 + ((c >>  4) & 0x3)) +
+           hex.charAt(        c        & 0xF );
+    } else if (c < 0x110000) {
+      s += 'F'                                 +
+           hex.charAt(       (c >> 18)       ) +
+           hex.charAt(0x8 + ((c >> 16) & 0x3)) +
+           hex.charAt(       (c >> 12) & 0xF ) +
+           hex.charAt(0x8 + ((c >> 10) & 0x3)) +
+           hex.charAt(       (c >>  6) & 0xF ) +
+           hex.charAt(0x8 + ((c >>  4) & 0x3)) +
+           hex.charAt(        c        & 0xF );
+    }
+  }
+  this.sendKeys(s);
+};
+
+ShellInABox.prototype.resized = function(w, h) {
+  // Do not send a resize request until we are fully initialized.
+  if (this.session) {
+    // sendKeys() always transmits the current terminal size. So, flush all
+    // pending keys.
+    this.sendKeys('');
+  }
+};
+
+ShellInABox.prototype.toggleSSL = function() {
+  if (document.location.hash != '') {
+    if (this.nextUrl.match(/\?plain$/)) {
+      this.nextUrl    = this.nextUrl.replace(/\?plain$/, '');
+    } else {
+      this.nextUrl    = this.nextUrl.replace(/[?#].*/, '') + '?plain';
+    }
+    if (!this.session) {
+      parent.location = this.nextUrl;
+    }
+  } else {
+    this.nextUrl      = this.nextUrl.match(/^https:/)
+           ? this.nextUrl.replace(/^https:/, 'http:').replace(/\/*$/, '/plain')
+           : this.nextUrl.replace(/^http/, 'https').replace(/\/*plain$/, '');
+  }
+  if (this.nextUrl.match(/^[:]*:\/\/[^/]*$/)) {
+    this.nextUrl     += '/';
+  }
+  if (this.session && this.nextUrl != this.url) {
+    alert('This change will take effect the next time you login.');
+  }
+};
+
+ShellInABox.prototype.extendContextMenu = function(entries, actions) {
+  // Modify the entries and actions in place, adding any locally defined
+  // menu entries.
+  var oldActions            = [ ];
+  for (var i = 0; i < actions.length; i++) {
+    oldActions[i]           = actions[i];
+  }
+  for (var node = entries.firstChild, i = 0, j = 0; node;
+       node = node.nextSibling) {
+    if (node.tagName == 'LI') {
+      actions[i++]          = oldActions[j++];
+      if (node.id == "endconfig") {
+        node.id             = '';
+        if (typeof serverSupportsSSL != 'undefined' && serverSupportsSSL &&
+            !(typeof disableSSLMenu != 'undefined' && disableSSLMenu)) {
+          // If the server supports both SSL and plain text connections,
+          // provide a menu entry to switch between the two.
+          var newNode       = document.createElement('li');
+          var isSecure;
+          if (document.location.hash != '') {
+            isSecure        = !this.nextUrl.match(/\?plain$/);
+          } else {
+            isSecure        =  this.nextUrl.match(/^https:/);
+          }
+          newNode.innerHTML = (isSecure ? '&#10004; ' : '') + 'Secure';
+          if (node.nextSibling) {
+            entries.insertBefore(newNode, node.nextSibling);
+          } else {
+            entries.appendChild(newNode);
+          }
+          actions[i++]      = this.toggleSSL;
+          node              = newNode;
+        }
+        node.id             = 'endconfig';
+      }
+    }
+  }
+  
+};
+
+ShellInABox.prototype.about = function() {
+  alert("Shell In A Box version " + "2.10 (revision 239)" +
+        "\nCopyright 2008-2010 by Markus Gutschke\n" +
+        "For more information check http://shellinabox.com" +
+        (typeof serverSupportsSSL != 'undefined' && serverSupportsSSL ?
+         "\n\n" +
+         "This product includes software developed by the OpenSSL Project\n" +
+         "for use in the OpenSSL Toolkit. (http://www.openssl.org/)\n" +
+         "\n" +
+         "This product includes cryptographic software written by " +
+         "Eric Young\n(eay@cryptsoft.com)" :
+         ""));
+};
+
+
+// VT100.js -- JavaScript based terminal emulator
+// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// In addition to these license terms, the author grants the following
+// additional rights:
+//
+// If you modify this program, or any covered work, by linking or
+// combining it with the OpenSSL project's OpenSSL library (or a
+// modified version of that library), containing parts covered by the
+// terms of the OpenSSL or SSLeay licenses, the author
+// grants you additional permission to convey the resulting work.
+// Corresponding Source for a non-source form of such a combination
+// shall include the source code for the parts of OpenSSL used as well
+// as that of the covered work.
+//
+// You may at your option choose to remove this additional permission from
+// the work, or from any part of it.
+//
+// It is possible to build this program in a way that it loads OpenSSL
+// libraries at run-time. If doing so, the following notices are required
+// by the OpenSSL and SSLeay licenses:
+//
+// This product includes software developed by the OpenSSL Project
+// for use in the OpenSSL Toolkit. (http://www.openssl.org/)
+//
+// This product includes cryptographic software written by Eric Young
+// (eay@cryptsoft.com)
+//
+//
+// The most up-to-date version of this program is always available from
+// http://shellinabox.com
+//
+//
+// Notes:
+//
+// The author believes that for the purposes of this license, you meet the
+// requirements for publishing the source code, if your web server publishes
+// the source in unmodified form (i.e. with licensing information, comments,
+// formatting, and identifier names intact). If there are technical reasons
+// that require you to make changes to the source code when serving the
+// JavaScript (e.g to remove pre-processor directives from the source), these
+// changes should be done in a reversible fashion.
+//
+// The author does not consider websites that reference this script in
+// unmodified form, and web servers that serve this script in unmodified form
+// to be derived works. As such, they are believed to be outside of the
+// scope of this license and not subject to the rights or restrictions of the
+// GNU General Public License.
+//
+// If in doubt, consult a legal professional familiar with the laws that
+// apply in your country.
+
+// #define ESnormal        0
+// #define ESesc           1
+// #define ESsquare        2
+// #define ESgetpars       3
+// #define ESgotpars       4
+// #define ESdeviceattr    5
+// #define ESfunckey       6
+// #define EShash          7
+// #define ESsetG0         8
+// #define ESsetG1         9
+// #define ESsetG2        10
+// #define ESsetG3        11
+// #define ESbang         12
+// #define ESpercent      13
+// #define ESignore       14
+// #define ESnonstd       15
+// #define ESpalette      16
+// #define EStitle        17
+// #define ESss2          18
+// #define ESss3          19
+
+// #define ATTR_DEFAULT   0x00F0
+// #define ATTR_REVERSE   0x0100
+// #define ATTR_UNDERLINE 0x0200
+// #define ATTR_DIM       0x0400
+// #define ATTR_BRIGHT    0x0800
+// #define ATTR_BLINK     0x1000
+
+// #define MOUSE_DOWN     0
+// #define MOUSE_UP       1
+// #define MOUSE_CLICK    2
+
+function VT100(container) {
+  if (typeof linkifyURLs == 'undefined' || linkifyURLs <= 0) {
+    this.urlRE            = null;
+  } else {
+    this.urlRE            = new RegExp(
+    // Known URL protocol are "http", "https", and "ftp".
+    '(?:http|https|ftp)://' +
+
+    // Optionally allow username and passwords.
+    '(?:[^:@/ \u00A0]*(?::[^@/ \u00A0]*)?@)?' +
+
+    // Hostname.
+    '(?:[1-9][0-9]{0,2}(?:[.][1-9][0-9]{0,2}){3}|' +
+    '[0-9a-fA-F]{0,4}(?::{1,2}[0-9a-fA-F]{1,4})+|' +
+    '(?!-)[^[!"#$%&\'()*+,/:;<=>?@\\^_`{|}~\u0000- \u007F-\u00A0]+)' +
+
+    // Port
+    '(?::[1-9][0-9]*)?' +
+
+    // Path.
+    '(?:/(?:(?![/ \u00A0]|[,.)}"\u0027!]+[ \u00A0]|[,.)}"\u0027!]+$).)*)*|' +
+
+    (linkifyURLs <= 1 ? '' :
+    // Also support URLs without a protocol (assume "http").
+    // Optional username and password.
+    '(?:[^:@/ \u00A0]*(?::[^@/ \u00A0]*)?@)?' +
+
+    // Hostnames must end with a well-known top-level domain or must be
+    // numeric.
+    '(?:[1-9][0-9]{0,2}(?:[.][1-9][0-9]{0,2}){3}|' +
+    'localhost|' +
+    '(?:(?!-)' +
+        '[^.[!"#$%&\'()*+,/:;<=>?@\\^_`{|}~\u0000- \u007F-\u00A0]+[.]){2,}' +
+    '(?:(?:com|net|org|edu|gov|aero|asia|biz|cat|coop|info|int|jobs|mil|mobi|'+
+    'museum|name|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|' +
+    'au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|' +
+    'ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|' +
+    'dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|' +
+    'gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|' +
+    'ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|' +
+    'lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|' +
+    'mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|' +
+    'pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|' +
+    'sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|' +
+    'tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|' +
+    'yu|za|zm|zw|arpa)(?![a-zA-Z0-9])|[Xx][Nn]--[-a-zA-Z0-9]+))' +
+
+    // Port
+    '(?::[1-9][0-9]{0,4})?' +
+
+    // Path.
+    '(?:/(?:(?![/ \u00A0]|[,.)}"\u0027!]+[ \u00A0]|[,.)}"\u0027!]+$).)*)*|') +
+
+    // In addition, support e-mail address. Optionally, recognize "mailto:"
+    '(?:mailto:)' + (linkifyURLs <= 1 ? '' : '?') +
+
+    // Username:
+    '[-_.+a-zA-Z0-9]+@' +
+
+    // Hostname.
+    '(?!-)[-a-zA-Z0-9]+(?:[.](?!-)[-a-zA-Z0-9]+)?[.]' +
+    '(?:(?:com|net|org|edu|gov|aero|asia|biz|cat|coop|info|int|jobs|mil|mobi|'+
+    'museum|name|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|' +
+    'au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|' +
+    'ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|' +
+    'dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|' +
+    'gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|' +
+    'ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|' +
+    'lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|' +
+    'mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|' +
+    'pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|' +
+    'sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|' +
+    'tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|' +
+    'yu|za|zm|zw|arpa)(?![a-zA-Z0-9])|[Xx][Nn]--[-a-zA-Z0-9]+)' +
+
+    // Optional arguments
+    '(?:[?](?:(?![ \u00A0]|[,.)}"\u0027!]+[ \u00A0]|[,.)}"\u0027!]+$).)*)?');
+  }
+  this.getUserSettings();
+  this.initializeElements(container);
+  this.maxScrollbackLines = 500;
+  this.npar               = 0;
+  this.par                = [ ];
+  this.isQuestionMark     = false;
+  this.savedX             = [ ];
+  this.savedY             = [ ];
+  this.savedAttr          = [ ];
+  this.savedUseGMap       = 0;
+  this.savedGMap          = [ this.Latin1Map, this.VT100GraphicsMap,
+                              this.CodePage437Map, this.DirectToFontMap ];
+  this.savedValid         = [ ];
+  this.respondString      = '';
+  this.titleString        = '';
+  this.internalClipboard  = undefined;
+  this.reset(true);
+}
+
+VT100.prototype.reset = function(clearHistory) {
+  this.isEsc                                         = 0 /* ESnormal */;
+  this.needWrap                                      = false;
+  this.autoWrapMode                                  = true;
+  this.dispCtrl                                      = false;
+  this.toggleMeta                                    = false;
+  this.insertMode                                    = false;
+  this.applKeyMode                                   = false;
+  this.cursorKeyMode                                 = false;
+  this.crLfMode                                      = false;
+  this.offsetMode                                    = false;
+  this.mouseReporting                                = false;
+  this.printing                                      = false;
+  if (typeof this.printWin != 'undefined' &&
+      this.printWin && !this.printWin.closed) {
+    this.printWin.close();
+  }
+  this.printWin                                      = null;
+  this.utfEnabled                                    = this.utfPreferred;
+  this.utfCount                                      = 0;
+  this.utfChar                                       = 0;
+  this.color                                         = 'ansi0 bgAnsi15';
+  this.style                                         = '';
+  this.attr                                          = 0x00F0 /* ATTR_DEFAULT */;
+  this.useGMap                                       = 0;
+  this.GMap                                          = [ this.Latin1Map,
+                                                         this.VT100GraphicsMap,
+                                                         this.CodePage437Map,
+                                                         this.DirectToFontMap];
+  this.translate                                     = this.GMap[this.useGMap];
+  this.top                                           = 0;
+  this.bottom                                        = this.terminalHeight;
+  this.lastCharacter                                 = ' ';
+  this.userTabStop                                   = [ ];
+
+  if (clearHistory) {
+    for (var i = 0; i < 2; i++) {
+      while (this.console[i].firstChild) {
+        this.console[i].removeChild(this.console[i].firstChild);
+      }
+    }
+  }
+
+  this.enableAlternateScreen(false);
+
+  var wasCompressed                                  = false;
+  var transform                                      = this.getTransformName();
+  if (transform) {
+    for (var i = 0; i < 2; ++i) {
+      wasCompressed                  |= this.console[i].style[transform] != '';
+      this.console[i].style[transform]               = '';
+    }
+    this.cursor.style[transform]                     = '';
+    this.space.style[transform]                      = '';
+    if (transform == 'filter') {
+      this.console[this.currentScreen].style.width   = '';
+    }
+  }
+  this.scale                                         = 1.0;
+  if (wasCompressed) {
+    this.resizer();
+  }
+
+  this.gotoXY(0, 0);
+  this.showCursor();
+  this.isInverted                                    = false;
+  this.refreshInvertedState();
+  this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight,
+                   this.color, this.style);
+};
+
+VT100.prototype.addListener = function(elem, event, listener) {
+  try {
+    if (elem.addEventListener) {
+      elem.addEventListener(event, listener, false);
+    } else {
+      elem.attachEvent('on' + event, listener);
+    }
+  } catch (e) {
+  }
+};
+
+VT100.prototype.getUserSettings = function() {
+  // Compute hash signature to identify the entries in the userCSS menu.
+  // If the menu is unchanged from last time, default values can be
+  // looked up in a cookie associated with this page.
+  this.signature            = 3;
+  this.utfPreferred         = true;
+  this.visualBell           = typeof suppressAllAudio != 'undefined' &&
+                              suppressAllAudio;
+  this.autoprint            = true;
+  this.softKeyboard         = false;
+  this.blinkingCursor       = true;
+  if (this.visualBell) {
+    this.signature          = Math.floor(16807*this.signature + 1) %
+                                         ((1 << 31) - 1);
+  }
+  if (typeof userCSSList != 'undefined') {
+    for (var i = 0; i < userCSSList.length; ++i) {
+      var label             = userCSSList[i][0];
+      for (var j = 0; j < label.length; ++j) {
+        this.signature      = Math.floor(16807*this.signature+
+                                         label.charCodeAt(j)) %
+                                         ((1 << 31) - 1);
+      }
+      if (userCSSList[i][1]) {
+        this.signature      = Math.floor(16807*this.signature + 1) %
+                                         ((1 << 31) - 1);
+      }
+    }
+  }
+
+  var key                   = 'shellInABox=' + this.signature + ':';
+  var settings              = document.cookie.indexOf(key);
+  if (settings >= 0) {
+    settings                = document.cookie.substr(settings + key.length).
+                                                   replace(/([0-1]*).*/, "$1");
+    if (settings.length == 5 + (typeof userCSSList == 'undefined' ?
+                                0 : userCSSList.length)) {
+      this.utfPreferred     = settings.charAt(0) != '0';
+      this.visualBell       = settings.charAt(1) != '0';
+      this.autoprint        = settings.charAt(2) != '0';
+      this.softKeyboard     = settings.charAt(3) != '0';
+      this.blinkingCursor   = settings.charAt(4) != '0';
+      if (typeof userCSSList != 'undefined') {
+        for (var i = 0; i < userCSSList.length; ++i) {
+          userCSSList[i][2] = settings.charAt(i + 5) != '0';
+        }
+      }
+    }
+  }
+  this.utfEnabled           = this.utfPreferred;
+};
+
+VT100.prototype.storeUserSettings = function() {
+  var settings  = 'shellInABox=' + this.signature + ':' +
+                  (this.utfEnabled     ? '1' : '0') +
+                  (this.visualBell     ? '1' : '0') +
+                  (this.autoprint      ? '1' : '0') +
+                  (this.softKeyboard   ? '1' : '0') +
+                  (this.blinkingCursor ? '1' : '0');
+  if (typeof userCSSList != 'undefined') {
+    for (var i = 0; i < userCSSList.length; ++i) {
+      settings += userCSSList[i][2] ? '1' : '0';
+    }
+  }
+  var d         = new Date();
+  d.setDate(d.getDate() + 3653);
+  document.cookie = settings + ';expires=' + d.toGMTString();
+};
+
+VT100.prototype.initializeUserCSSStyles = function() {
+  this.usercssActions                    = [];
+  if (typeof userCSSList != 'undefined') {
+    var menu                             = '';
+    var group                            = '';
+    var wasSingleSel                     = 1;
+    var beginOfGroup                     = 0;
+    for (var i = 0; i <= userCSSList.length; ++i) {
+      if (i < userCSSList.length) {
+        var label                        = userCSSList[i][0];
+        var newGroup                     = userCSSList[i][1];
+        var enabled                      = userCSSList[i][2];
+      
+        // Add user style sheet to document
+        var style                        = document.createElement('link');
+        var id                           = document.createAttribute('id');
+        id.nodeValue                     = 'usercss-' + i;
+        style.setAttributeNode(id);
+        var rel                          = document.createAttribute('rel');
+        rel.nodeValue                    = 'stylesheet';
+        style.setAttributeNode(rel);
+        var href                         = document.createAttribute('href');
+        href.nodeValue                   = 'usercss-' + i + '.css';
+        style.setAttributeNode(href);
+        var type                         = document.createAttribute('type');
+        type.nodeValue                   = 'text/css';
+        style.setAttributeNode(type);
+        document.getElementsByTagName('head')[0].appendChild(style);
+        style.disabled                   = !enabled;
+      }
+    
+      // Add entry to menu
+      if (newGroup || i == userCSSList.length) {
+        if (beginOfGroup != 0 && (i - beginOfGroup > 1 || !wasSingleSel)) {
+          // The last group had multiple entries that are mutually exclusive;
+          // or the previous to last group did. In either case, we need to
+          // append a "<hr />" before we can add the last group to the menu.
+          menu                          += '<hr />';
+        }
+        wasSingleSel                     = i - beginOfGroup < 1;
+        menu                            += group;
+        group                            = '';
+
+        for (var j = beginOfGroup; j < i; ++j) {
+          this.usercssActions[this.usercssActions.length] =
+            function(vt100, current, begin, count) {
+
+              // Deselect all other entries in the group, then either select
+              // (for multiple entries in group) or toggle (for on/off entry)
+              // the current entry.
+              return function() {
+                var entry                = vt100.getChildById(vt100.menu,
+                                                              'beginusercss');
+                var i                    = -1;
+                var j                    = -1;
+                for (var c = count; c > 0; ++j) {
+                  if (entry.tagName == 'LI') {
+                    if (++i >= begin) {
+                      --c;
+                      var label          = vt100.usercss.childNodes[j];
+
+                      // Restore label to just the text content
+                      if (typeof label.textContent == 'undefined') {
+                        var s            = label.innerText;
+                        label.innerHTML  = '';
+                        label.appendChild(document.createTextNode(s));
+                      } else {
+                        label.textContent= label.textContent;
+                      }
+
+                      // User style sheets are numbered sequentially
+                      var sheet          = document.getElementById(
+                                                               'usercss-' + i);
+                      if (i == current) {
+                        if (count == 1) {
+                          sheet.disabled = !sheet.disabled;
+                        } else {
+                          sheet.disabled = false;
+                        }
+                        if (!sheet.disabled) {
+                          label.innerHTML= '<img src="/webshell/enabled.gif" />' +
+                                           label.innerHTML;
+                        }
+                      } else {
+                        sheet.disabled   = true;
+                      }
+                      userCSSList[i][2]  = !sheet.disabled;
+                    }
+                  }
+                  entry                  = entry.nextSibling;
+                }
+
+                // If the font size changed, adjust cursor and line dimensions
+                this.cursor.style.cssText= '';
+                this.cursorWidth         = this.cursor.clientWidth;
+                this.cursorHeight        = this.lineheight.clientHeight;
+                for (i = 0; i < this.console.length; ++i) {
+                  for (var line = this.console[i].firstChild; line;
+                       line = line.nextSibling) {
+                    line.style.height    = this.cursorHeight + 'px';
+                  }
+                }
+                vt100.resizer();
+              };
+            }(this, j, beginOfGroup, i - beginOfGroup);
+        }
+
+        if (i == userCSSList.length) {
+          break;
+        }
+
+        beginOfGroup                     = i;
+      }
+      // Collect all entries in a group, before attaching them to the menu.
+      // This is necessary as we don't know whether this is a group of
+      // mutually exclusive options (which should be separated by "<hr />" on
+      // both ends), or whether this is a on/off toggle, which can be grouped
+      // together with other on/off options.
+      group                             +=
+        '<li>' + (enabled ? '<img src="/webshell/enabled.gif" />' : '') +
+                 label +
+        '</li>';
+    }
+    this.usercss.innerHTML               = menu;
+  }
+};
+
+VT100.prototype.resetLastSelectedKey = function(e) {
+  var key                          = this.lastSelectedKey;
+  if (!key) {
+    return false;
+  }
+
+  var position                     = this.mousePosition(e);
+
+  // We don't get all the necessary events to reliably reselect a key
+  // if we moved away from it and then back onto it. We approximate the
+  // behavior by remembering the key until either we release the mouse
+  // button (we might never get this event if the mouse has since left
+  // the window), or until we move away too far.
+  var box                          = this.keyboard.firstChild;
+  if (position[0] <  box.offsetLeft + key.offsetWidth ||
+      position[1] <  box.offsetTop + key.offsetHeight ||
+      position[0] >= box.offsetLeft + box.offsetWidth - key.offsetWidth ||
+      position[1] >= box.offsetTop + box.offsetHeight - key.offsetHeight ||
+      position[0] <  box.offsetLeft + key.offsetLeft - key.offsetWidth ||
+      position[1] <  box.offsetTop + key.offsetTop - key.offsetHeight ||
+      position[0] >= box.offsetLeft + key.offsetLeft + 2*key.offsetWidth ||
+      position[1] >= box.offsetTop + key.offsetTop + 2*key.offsetHeight) {
+    if (this.lastSelectedKey.className) log.console('reset: deselecting');
+    this.lastSelectedKey.className = '';
+    this.lastSelectedKey           = undefined;
+  }
+  return false;
+};
+
+VT100.prototype.showShiftState = function(state) {
+  var style              = document.getElementById('shift_state');
+  if (state) {
+    this.setTextContentRaw(style,
+                           '#vt100 #keyboard .shifted {' +
+                             'display: inline }' +
+                           '#vt100 #keyboard .unshifted {' +
+                             'display: none }');
+  } else {
+    this.setTextContentRaw(style, '');
+  }
+  var elems              = this.keyboard.getElementsByTagName('I');
+  for (var i = 0; i < elems.length; ++i) {
+    if (elems[i].id == '16') {
+      elems[i].className = state ? 'selected' : '';
+    }
+  }
+};
+
+VT100.prototype.showCtrlState = function(state) {
+  var ctrl         = this.getChildById(this.keyboard, '17' /* Ctrl */);
+  if (ctrl) {
+    ctrl.className = state ? 'selected' : '';
+  }
+};
+
+VT100.prototype.showAltState = function(state) {
+  var alt         = this.getChildById(this.keyboard, '18' /* Alt */);
+  if (alt) {
+    alt.className = state ? 'selected' : '';
+  }
+};
+
+VT100.prototype.clickedKeyboard = function(e, elem, ch, key, shift, ctrl, alt){
+  var fake      = [ ];
+  fake.charCode = ch;
+  fake.keyCode  = key;
+  fake.ctrlKey  = ctrl;
+  fake.shiftKey = shift;
+  fake.altKey   = alt;
+  fake.metaKey  = alt;
+  return this.handleKey(fake);
+};
+
+VT100.prototype.addKeyBinding = function(elem, ch, key, CH, KEY) {
+  if (elem == undefined) {
+    return;
+  }
+  if (ch == '\u00A0') {
+    // &nbsp; should be treated as a regular space character.
+    ch                                  = ' ';
+  }
+  if (ch != undefined && CH == undefined) {
+    // For letter keys, we automatically compute the uppercase character code
+    // from the lowercase one.
+    CH                                  = ch.toUpperCase();
+  }
+  if (KEY == undefined && key != undefined) {
+    // Most keys have identically key codes for both lowercase and uppercase
+    // keypresses. Normally, only function keys would have distinct key codes,
+    // whereas regular keys have character codes.
+    KEY                                 = key;
+  } else if (KEY == undefined && CH != undefined) {
+    // For regular keys, copy the character code to the key code.
+    KEY                                 = CH.charCodeAt(0);
+  }
+  if (key == undefined && ch != undefined) {
+    // For regular keys, copy the character code to the key code.
+    key                                 = ch.charCodeAt(0);
+  }
+  // Convert characters to numeric character codes. If the character code
+  // is undefined (i.e. this is a function key), set it to zero.
+  ch                                    = ch ? ch.charCodeAt(0) : 0;
+  CH                                    = CH ? CH.charCodeAt(0) : 0;
+
+  // Mouse down events high light the key. We also set lastSelectedKey. This
+  // is needed to that mouseout/mouseover can keep track of the key that
+  // is currently being clicked.
+  this.addListener(elem, 'mousedown',
+    function(vt100, elem, key) { return function(e) {
+      if ((e.which || e.button) == 1) {
+        if (vt100.lastSelectedKey) {       
+          vt100.lastSelectedKey.className= '';
+        }
+        // Highlight the key while the mouse button is held down.
+        if (key == 16 /* Shift */) {
+          if (!elem.className != vt100.isShift) {
+            vt100.showShiftState(!vt100.isShift);
+          }
+        } else if (key == 17 /* Ctrl */) {
+          if (!elem.className != vt100.isCtrl) {
+            vt100.showCtrlState(!vt100.isCtrl);
+          }
+        } else if (key == 18 /* Alt */) {
+          if (!elem.className != vt100.isAlt) {
+            vt100.showAltState(!vt100.isAlt);
+          }
+        } else {
+          elem.className                  = 'selected';
+        }
+        vt100.lastSelectedKey             = elem;
+      }
+      return false; }; }(this, elem, key));
+  var clicked                           =
+    // Modifier keys update the state of the keyboard, but do not generate
+    // any key clicks that get forwarded to the application.
+    key >= 16 /* Shift */ && key <= 18 /* Alt */ ?
+    function(vt100, elem) { return function(e) {
+      if (elem == vt100.lastSelectedKey) {
+        if (key == 16 /* Shift */) {
+          // The user clicked the Shift key
+          vt100.isShift                 = !vt100.isShift;
+          vt100.showShiftState(vt100.isShift);
+        } else if (key == 17 /* Ctrl */) {
+          vt100.isCtrl                  = !vt100.isCtrl;
+          vt100.showCtrlState(vt100.isCtrl);
+        } else if (key == 18 /* Alt */) {
+          vt100.isAlt                   = !vt100.isAlt;
+          vt100.showAltState(vt100.isAlt);
+        }
+        vt100.lastSelectedKey           = undefined;
+      }
+      if (vt100.lastSelectedKey) {
+        vt100.lastSelectedKey.className = '';
+        vt100.lastSelectedKey           = undefined;
+      }
+      return false; }; }(this, elem) :
+    // Regular keys generate key clicks, when the mouse button is released or
+    // when a mouse click event is received.
+    function(vt100, elem, ch, key, CH, KEY) { return function(e) {
+      if (vt100.lastSelectedKey) {
+        if (elem == vt100.lastSelectedKey) {
+          // The user clicked a key.
+          if (vt100.isShift) {
+            vt100.clickedKeyboard(e, elem, CH, KEY,
+                                  true, vt100.isCtrl, vt100.isAlt);
+          } else {
+            vt100.clickedKeyboard(e, elem, ch, key,
+                                  false, vt100.isCtrl, vt100.isAlt);
+          }
+          vt100.isShift                 = false;
+          vt100.showShiftState(false);
+          vt100.isCtrl                  = false;
+          vt100.showCtrlState(false);
+          vt100.isAlt                   = false;
+          vt100.showAltState(false);
+        }
+        vt100.lastSelectedKey.className = '';
+        vt100.lastSelectedKey           = undefined;
+      }
+      elem.className                    = '';
+      return false; }; }(this, elem, ch, key, CH, KEY);
+  this.addListener(elem, 'mouseup', clicked);
+  this.addListener(elem, 'click', clicked);
+
+  // When moving the mouse away from a key, check if any keys need to be
+  // deselected.
+  this.addListener(elem, 'mouseout',
+    function(vt100, elem, key) { return function(e) {
+      if (key == 16 /* Shift */) {
+        if (!elem.className == vt100.isShift) {
+          vt100.showShiftState(vt100.isShift);
+        }
+      } else if (key == 17 /* Ctrl */) {
+        if (!elem.className == vt100.isCtrl) {
+          vt100.showCtrlState(vt100.isCtrl);
+        }
+      } else if (key == 18 /* Alt */) {
+        if (!elem.className == vt100.isAlt) {
+          vt100.showAltState(vt100.isAlt);
+        }
+      } else if (elem.className) {
+        elem.className                  = '';
+        vt100.lastSelectedKey           = elem;
+      } else if (vt100.lastSelectedKey) {
+        vt100.resetLastSelectedKey(e);
+      }
+      return false; }; }(this, elem, key));
+
+  // When moving the mouse over a key, select it if the user is still holding
+  // the mouse button down (i.e. elem == lastSelectedKey)
+  this.addListener(elem, 'mouseover',
+    function(vt100, elem, key) { return function(e) {
+      if (elem == vt100.lastSelectedKey) {
+        if (key == 16 /* Shift */) {
+          if (!elem.className != vt100.isShift) {
+            vt100.showShiftState(!vt100.isShift);
+          }
+        } else if (key == 17 /* Ctrl */) {
+          if (!elem.className != vt100.isCtrl) {
+            vt100.showCtrlState(!vt100.isCtrl);
+          }
+        } else if (key == 18 /* Alt */) {
+          if (!elem.className != vt100.isAlt) {
+            vt100.showAltState(!vt100.isAlt);
+          }
+        } else if (!elem.className) {
+          elem.className                = 'selected';
+        }
+      } else {
+        vt100.resetLastSelectedKey(e);
+      }
+      return false; }; }(this, elem, key));
+};
+
+VT100.prototype.initializeKeyBindings = function(elem) {
+  if (elem) {
+    if (elem.nodeName == "I" || elem.nodeName == "B") {
+      if (elem.id) {
+        // Function keys. The Javascript keycode is part of the "id"
+        var i     = parseInt(elem.id);
+        if (i) {
+          // If the id does not parse as a number, it is not a keycode.
+          this.addKeyBinding(elem, undefined, i);
+        }
+      } else {
+        var child = elem.firstChild;
+        if (child) {
+          if (child.nodeName == "#text") {
+            // If the key only has a text node as a child, then it is a letter.
+            // Automatically compute the lower and upper case version of the
+            // key.
+            var text = this.getTextContent(child) ||
+                       this.getTextContent(elem);
+            this.addKeyBinding(elem, text.toLowerCase());
+          } else if (child.nextSibling) {
+            // If the key has two children, they are the lower and upper case
+            // character code, respectively.
+            this.addKeyBinding(elem, this.getTextContent(child), undefined,
+                               this.getTextContent(child.nextSibling));
+          }
+        }
+      }
+    }
+  }
+  // Recursively parse all other child nodes.
+  for (elem = elem.firstChild; elem; elem = elem.nextSibling) {
+    this.initializeKeyBindings(elem);
+  }
+};
+
+VT100.prototype.initializeKeyboardButton = function() {
+  // Configure mouse event handlers for button that displays/hides keyboard
+  this.addListener(this.keyboardImage, 'click',
+    function(vt100) { return function(e) {
+      if (vt100.keyboard.style.display != '') {
+        if (vt100.reconnectBtn.style.visibility != '') {
+          vt100.initializeKeyboard();
+          vt100.showSoftKeyboard();
+        }
+      } else {
+        vt100.hideSoftKeyboard();
+        vt100.input.focus();
+      }
+      return false; }; }(this));
+
+  // Enable button that displays keyboard
+  if (this.softKeyboard) {
+    this.keyboardImage.style.visibility = 'visible';
+  }
+};
+
+VT100.prototype.initializeKeyboard = function() {
+  // Only need to initialize the keyboard the very first time. When doing so,
+  // copy the keyboard layout from the iframe.
+  if (this.keyboard.firstChild) {
+    return;
+  }
+  this.keyboard.innerHTML               =
+                                    this.layout.contentDocument.body.innerHTML;
+  var box                               = this.keyboard.firstChild;
+  this.hideSoftKeyboard();
+
+  // Configure mouse event handlers for on-screen keyboard
+  this.addListener(this.keyboard, 'click',
+    function(vt100) { return function(e) {
+      vt100.hideSoftKeyboard();
+      vt100.input.focus();
+      return false; }; }(this));
+  this.addListener(this.keyboard, 'selectstart', this.cancelEvent);
+  this.addListener(box, 'click', this.cancelEvent);
+  this.addListener(box, 'mouseup',
+    function(vt100) { return function(e) {
+      if (vt100.lastSelectedKey) {
+        vt100.lastSelectedKey.className = '';
+        vt100.lastSelectedKey           = undefined;
+      }
+      return false; }; }(this));
+  this.addListener(box, 'mouseout',
+    function(vt100) { return function(e) {
+      return vt100.resetLastSelectedKey(e); }; }(this));
+  this.addListener(box, 'mouseover',
+    function(vt100) { return function(e) {
+      return vt100.resetLastSelectedKey(e); }; }(this));
+
+  // Configure SHIFT key behavior
+  var style                             = document.createElement('style');
+  var id                                = document.createAttribute('id');
+  id.nodeValue                          = 'shift_state';
+  style.setAttributeNode(id);
+  var type                              = document.createAttribute('type');
+  type.nodeValue                        = 'text/css';
+  style.setAttributeNode(type);
+  document.getElementsByTagName('head')[0].appendChild(style);
+
+  // Set up key bindings
+  this.initializeKeyBindings(box);
+};
+
+VT100.prototype.initializeElements = function(container) {
+  // If the necessary objects have not already been defined in the HTML
+  // page, create them now.
+  if (container) {
+    this.container             = container;
+  } else if (!(this.container  = document.getElementById('vt100'))) {
+    this.container             = document.createElement('div');
+    this.container.id          = 'vt100';
+    document.body.appendChild(this.container);
+  }
+
+  if (!this.getChildById(this.container, 'reconnect')   ||
+      !this.getChildById(this.container, 'menu')        ||
+      !this.getChildById(this.container, 'keyboard')    ||
+      !this.getChildById(this.container, 'kbd_button')  ||
+      !this.getChildById(this.container, 'kbd_img')     ||
+      !this.getChildById(this.container, 'layout')      ||
+      !this.getChildById(this.container, 'scrollable')  ||
+      !this.getChildById(this.container, 'console')     ||
+      !this.getChildById(this.container, 'alt_console') ||
+      !this.getChildById(this.container, 'ieprobe')     ||
+      !this.getChildById(this.container, 'padding')     ||
+      !this.getChildById(this.container, 'cursor')      ||
+      !this.getChildById(this.container, 'lineheight')  ||
+      !this.getChildById(this.container, 'usercss')     ||
+      !this.getChildById(this.container, 'space')       ||
+      !this.getChildById(this.container, 'input')       ||
+      !this.getChildById(this.container, 'cliphelper')) {
+    // Only enable the "embed" object, if we have a suitable plugin. Otherwise,
+    // we might get a pointless warning that a suitable plugin is not yet
+    // installed. If in doubt, we'd rather just stay silent.
+    var embed                  = '';
+    try {
+      if (typeof navigator.mimeTypes["audio/x-wav"].enabledPlugin.name !=
+          'undefined') {
+        embed                  = typeof suppressAllAudio != 'undefined' &&
+                                 suppressAllAudio ? "" :
+        '<embed classid="clsid:02BF25D5-8C17-4B23-BC80-D3488ABDDC6B" ' +
+                       'id="beep_embed" ' +
+                       'src="beep.wav" ' +
+                       'autostart="false" ' +
+                       'volume="100" ' +
+                       'enablejavascript="true" ' +
+                       'type="audio/x-wav" ' +
+                       'height="16" ' +
+                       'width="200" ' +
+                       'style="position:absolute;left:-1000px;top:-1000px" />';
+      }
+    } catch (e) {
+    }
+
+    this.container.innerHTML   =
+                       '<div id="reconnect" style="visibility: hidden">' +
+                         '<input type="button" value="Connect" ' +
+                                'onsubmit="return false" />' +
+                       '</div>' +
+                       '<div id="cursize" style="visibility: hidden">' +
+                       '</div>' +
+                       '<div id="menu"></div>' +
+                       '<div id="keyboard" unselectable="on">' +
+                       '</div>' +
+                       '<div id="scrollable">' +
+                         '<table id="kbd_button">' +
+                           '<tr><td width="100%">&nbsp;</td>' +
+                           '<td><img id="kbd_img" src="/webshell/keyboard.png" /></td>' +
+                           '<td>&nbsp;&nbsp;&nbsp;&nbsp;</td></tr>' +
+                         '</table>' +
+                         '<pre id="lineheight">&nbsp;</pre>' +
+                         '<pre id="console">' +
+                           '<pre></pre>' +
+                           '<div id="ieprobe"><span>&nbsp;</span></div>' +
+                         '</pre>' +
+                         '<pre id="alt_console" style="display: none"></pre>' +
+                         '<div id="padding"></div>' +
+                         '<pre id="cursor">&nbsp;</pre>' +
+                       '</div>' +
+                       '<div class="hidden">' +
+                         '<div id="usercss"></div>' +
+                         '<pre><div><span id="space"></span></div></pre>' +
+                         '<input type="textfield" id="input" autocorrect="off" autocapitalize="off" />' +
+                         '<input type="textfield" id="cliphelper" />' +
+                         (typeof suppressAllAudio != 'undefined' &&
+                          suppressAllAudio ? "" :
+                         embed + '<bgsound id="beep_bgsound" loop=1 />') +
+                          '<iframe id="layout" src="/webshell/keyboard.html" />' +
+                        '</div>';
+  }
+
+  // Find the object used for playing the "beep" sound, if any.
+  if (typeof suppressAllAudio != 'undefined' && suppressAllAudio) {
+    this.beeper                = undefined;
+  } else {
+    this.beeper                = this.getChildById(this.container,
+                                                   'beep_embed');
+    if (!this.beeper || !this.beeper.Play) {
+      this.beeper              = this.getChildById(this.container,
+                                                   'beep_bgsound');
+      if (!this.beeper || typeof this.beeper.src == 'undefined') {
+        this.beeper            = undefined;
+      }
+    }
+  }
+
+  // Initialize the variables for finding the text console and the
+  // cursor.
+  this.reconnectBtn            = this.getChildById(this.container,'reconnect');
+  this.curSizeBox              = this.getChildById(this.container, 'cursize');
+  this.menu                    = this.getChildById(this.container, 'menu');
+  this.keyboard                = this.getChildById(this.container, 'keyboard');
+  this.keyboardImage           = this.getChildById(this.container, 'kbd_img');
+  this.layout                  = this.getChildById(this.container, 'layout');
+  this.scrollable              = this.getChildById(this.container,
+                                                                 'scrollable');
+  this.lineheight              = this.getChildById(this.container,
+                                                                 'lineheight');
+  this.console                 =
+                          [ this.getChildById(this.container, 'console'),
+                            this.getChildById(this.container, 'alt_console') ];
+  var ieProbe                  = this.getChildById(this.container, 'ieprobe');
+  this.padding                 = this.getChildById(this.container, 'padding');
+  this.cursor                  = this.getChildById(this.container, 'cursor');
+  this.usercss                 = this.getChildById(this.container, 'usercss');
+  this.space                   = this.getChildById(this.container, 'space');
+  this.input                   = this.getChildById(this.container, 'input');
+  this.cliphelper              = this.getChildById(this.container,
+                                                                 'cliphelper');
+
+  // Add any user selectable style sheets to the menu
+  this.initializeUserCSSStyles();
+
+  // Remember the dimensions of a standard character glyph. We would
+  // expect that we could just check cursor.clientWidth/Height at any time,
+  // but it turns out that browsers sometimes invalidate these values
+  // (e.g. while displaying a print preview screen).
+  this.cursorWidth             = this.cursor.clientWidth;
+  this.cursorHeight            = this.lineheight.clientHeight;
+
+  // IE has a slightly different boxing model, that we need to compensate for
+  this.isIE                    = ieProbe.offsetTop > 1;
+  ieProbe                      = undefined;
+  this.console.innerHTML       = '';
+
+  // Determine if the terminal window is positioned at the beginning of the
+  // page, or if it is embedded somewhere else in the page. For full-screen
+  // terminals, automatically resize whenever the browser window changes.
+  var marginTop                = parseInt(this.getCurrentComputedStyle(
+                                          document.body, 'marginTop'));
+  var marginLeft               = parseInt(this.getCurrentComputedStyle(
+                                          document.body, 'marginLeft'));
+  var marginRight              = parseInt(this.getCurrentComputedStyle(
+                                          document.body, 'marginRight'));
+  var x                        = this.container.offsetLeft;
+  var y                        = this.container.offsetTop;
+  for (var parent = this.container; parent = parent.offsetParent; ) {
+    x                         += parent.offsetLeft;
+    y                         += parent.offsetTop;
+  }
+  this.isEmbedded              = marginTop != y ||
+                                 marginLeft != x ||
+                                 (window.innerWidth ||
+                                  document.documentElement.clientWidth ||
+                                  document.body.clientWidth) -
+                                 marginRight != x + this.container.offsetWidth;
+  if (!this.isEmbedded) {
+    // Some browsers generate resize events when the terminal is first
+    // shown. Disable showing the size indicator until a little bit after
+    // the terminal has been rendered the first time.
+    this.indicateSize          = false;
+    setTimeout(function(vt100) {
+      return function() {
+        vt100.indicateSize     = true;
+      };
+    }(this), 100);
+    this.addListener(window, 'resize', 
+                     function(vt100) {
+                       return function() {
+                         vt100.hideContextMenu();
+                         vt100.resizer();
+                         vt100.showCurrentSize();
+                        }
+                      }(this));
+    
+    // Hide extra scrollbars attached to window
+    document.body.style.margin = '0px';
+    try { document.body.style.overflow ='hidden'; } catch (e) { }
+    try { document.body.oncontextmenu = function() {return false;};} catch(e){}
+  }
+
+  // Set up onscreen soft keyboard
+  this.initializeKeyboardButton();
+
+  // Hide context menu
+  this.hideContextMenu();
+
+  // Add listener to reconnect button
+  this.addListener(this.reconnectBtn.firstChild, 'click',
+                   function(vt100) {
+                     return function() {
+                       var rc = vt100.reconnect();
+                       vt100.input.focus();
+                       return rc;
+                     }
+                   }(this));
+
+  // Add input listeners
+  this.addListener(this.input, 'blur',
+                   function(vt100) {
+                     return function() { vt100.blurCursor(); } }(this));
+  this.addListener(this.input, 'focus',
+                   function(vt100) {
+                     return function() { vt100.focusCursor(); } }(this));
+  this.addListener(this.input, 'keydown',
+                   function(vt100) {
+                     return function(e) {
+                       if (!e) e = window.event;
+                       return vt100.keyDown(e); } }(this));
+  this.addListener(this.input, 'keypress',
+                   function(vt100) {
+                     return function(e) {
+                       if (!e) e = window.event;
+                       return vt100.keyPressed(e); } }(this));
+  this.addListener(this.input, 'keyup',
+                   function(vt100) {
+                     return function(e) {
+                       if (!e) e = window.event;
+                       return vt100.keyUp(e); } }(this));
+
+  // Attach listeners that move the focus to the <input> field. This way we
+  // can make sure that we can receive keyboard input.
+  var mouseEvent               = function(vt100, type) {
+    return function(e) {
+      if (!e) e = window.event;
+      return vt100.mouseEvent(e, type);
+    };
+  };
+  this.addListener(this.scrollable,'mousedown',mouseEvent(this, 0 /* MOUSE_DOWN */));
+  this.addListener(this.scrollable,'mouseup',  mouseEvent(this, 1 /* MOUSE_UP */));
+  this.addListener(this.scrollable,'click',    mouseEvent(this, 2 /* MOUSE_CLICK */));
+
+  // Check that browser supports drag and drop
+  if ('draggable' in document.createElement('span')) {
+      var dropEvent            = function (vt100) {
+          return function(e) {
+              if (!e) e = window.event;
+              if (e.preventDefault) e.preventDefault();
+              vt100.keysPressed(e.dataTransfer.getData('Text'));
+              return false;
+          };
+      };
+      // Tell the browser that we *can* drop on this target
+      this.addListener(this.scrollable, 'dragover', cancel);
+      this.addListener(this.scrollable, 'dragenter', cancel);
+
+      // Add a listener for the drop event
+      this.addListener(this.scrollable, 'drop', dropEvent(this));
+  }
+  
+  // Initialize the blank terminal window.
+  this.currentScreen           = 0;
+  this.cursorX                 = 0;
+  this.cursorY                 = 0;
+  this.numScrollbackLines      = 0;
+  this.top                     = 0;
+  this.bottom                  = 0x7FFFFFFF;
+  this.scale                   = 1.0;
+  this.resizer();
+  this.focusCursor();
+  this.input.focus();
+};
+
+function cancel(event) {
+  if (event.preventDefault) {
+    event.preventDefault();
+  }
+  return false;
+}
+
+VT100.prototype.getChildById = function(parent, id) {
+  var nodeList = parent.all || parent.getElementsByTagName('*');
+  if (typeof nodeList.namedItem == 'undefined') {
+    for (var i = 0; i < nodeList.length; i++) {
+      if (nodeList[i].id == id) {
+        return nodeList[i];
+      }
+    }
+    return null;
+  } else {
+    var elem = (parent.all || parent.getElementsByTagName('*')).namedItem(id);
+    return elem ? elem[0] || elem : null;
+  }
+};
+
+VT100.prototype.getCurrentComputedStyle = function(elem, style) {
+  if (typeof elem.currentStyle != 'undefined') {
+    return elem.currentStyle[style];
+  } else {
+    return document.defaultView.getComputedStyle(elem, null)[style];
+  }
+};
+
+VT100.prototype.reconnect = function() {
+  return false;
+};
+
+VT100.prototype.showReconnect = function(state) {
+  if (state) {
+    this.hideSoftKeyboard();
+    this.reconnectBtn.style.visibility = '';
+  } else {
+    this.reconnectBtn.style.visibility = 'hidden';
+  }
+};
+
+VT100.prototype.repairElements = function(console) {
+  for (var line = console.firstChild; line; line = line.nextSibling) {
+    if (!line.clientHeight) {
+      var newLine = document.createElement(line.tagName);
+      newLine.style.cssText       = line.style.cssText;
+      newLine.className           = line.className;
+      if (line.tagName == 'DIV') {
+        for (var span = line.firstChild; span; span = span.nextSibling) {
+          var newSpan             = document.createElement(span.tagName);
+          newSpan.style.cssText   = span.style.cssText;
+          newSpan.className      = span.className;
+          this.setTextContent(newSpan, this.getTextContent(span));
+          newLine.appendChild(newSpan);
+        }
+      } else {
+        this.setTextContent(newLine, this.getTextContent(line));
+      }
+      line.parentNode.replaceChild(newLine, line);
+      line                        = newLine;
+    }
+  }
+};
+
+VT100.prototype.resized = function(w, h) {
+};
+
+VT100.prototype.resizer = function() {
+  // Hide onscreen soft keyboard
+  this.hideSoftKeyboard();
+
+  // The cursor can get corrupted if the print-preview is displayed in Firefox.
+  // Recreating it, will repair it.
+  var newCursor                = document.createElement('pre');
+  this.setTextContent(newCursor, ' ');
+  newCursor.id                 = 'cursor';
+  newCursor.style.cssText      = this.cursor.style.cssText;
+  this.cursor.parentNode.insertBefore(newCursor, this.cursor);
+  if (!newCursor.clientHeight) {
+    // Things are broken right now. This is probably because we are
+    // displaying the print-preview. Just don't change any of our settings
+    // until the print dialog is closed again.
+    newCursor.parentNode.removeChild(newCursor);
+    return;
+  } else {
+    // Swap the old broken cursor for the newly created one.
+    this.cursor.parentNode.removeChild(this.cursor);
+    this.cursor                = newCursor;
+  }
+
+  // Really horrible things happen if the contents of the terminal changes
+  // while the print-preview is showing. We get HTML elements that show up
+  // in the DOM, but that do not take up any space. Find these elements and
+  // try to fix them.
+  this.repairElements(this.console[0]);
+  this.repairElements(this.console[1]);
+
+  // Lock the cursor size to the size of a normal character. This helps with
+  // characters that are taller/shorter than normal. Unfortunately, we will
+  // still get confused if somebody enters a character that is wider/narrower
+  // than normal. This can happen if the browser tries to substitute a
+  // characters from a different font.
+  this.cursor.style.width      = this.cursorWidth  + 'px';
+  this.cursor.style.height     = this.cursorHeight + 'px';
+
+  // Adjust height for one pixel padding of the #vt100 element.
+  // The latter is necessary to properly display the inactive cursor.
+  var console                  = this.console[this.currentScreen];
+  var height                   = (this.isEmbedded ? this.container.clientHeight
+                                  : (window.innerHeight ||
+                                     document.documentElement.clientHeight ||
+                                     document.body.clientHeight))-1;
+  var partial                  = height % this.cursorHeight;
+  this.scrollable.style.height = (height > 0 ? height : 0) + 'px';
+  this.padding.style.height    = (partial > 0 ? partial : 0) + 'px';
+  var oldTerminalHeight        = this.terminalHeight;
+  this.updateWidth();
+  this.updateHeight();
+
+  // Clip the cursor to the visible screen.
+  var cx                       = this.cursorX;
+  var cy                       = this.cursorY + this.numScrollbackLines;
+
+  // The alternate screen never keeps a scroll back buffer.
+  this.updateNumScrollbackLines();
+  while (this.currentScreen && this.numScrollbackLines > 0) {
+    console.removeChild(console.firstChild);
+    this.numScrollbackLines--;
+  }
+  cy                          -= this.numScrollbackLines;
+  if (cx < 0) {
+    cx                         = 0;
+  } else if (cx > this.terminalWidth) {
+    cx                         = this.terminalWidth - 1;
+    if (cx < 0) {
+      cx                       = 0;
+    }
+  }
+  if (cy < 0) {
+    cy                         = 0;
+  } else if (cy > this.terminalHeight) {
+    cy                         = this.terminalHeight - 1;
+    if (cy < 0) {
+      cy                       = 0;
+    }
+  }
+
+  // Clip the scroll region to the visible screen.
+  if (this.bottom > this.terminalHeight ||
+      this.bottom == oldTerminalHeight) {
+    this.bottom                = this.terminalHeight;
+  }
+  if (this.top >= this.bottom) {
+    this.top                   = this.bottom-1;
+    if (this.top < 0) {
+      this.top                 = 0;
+    }
+  }
+
+  // Truncate lines, if necessary. Explicitly reposition cursor (this is
+  // particularly important after changing the screen number), and reset
+  // the scroll region to the default.
+  this.truncateLines(this.terminalWidth);
+  this.putString(cx, cy, '', undefined);
+  this.scrollable.scrollTop    = this.numScrollbackLines *
+                                 this.cursorHeight + 1;
+
+  // Update classNames for lines in the scrollback buffer
+  var line                     = console.firstChild;
+  for (var i = 0; i < this.numScrollbackLines; i++) {
+    line.className             = 'scrollback';
+    line                       = line.nextSibling;
+  }
+  while (line) {
+    line.className             = '';
+    line                       = line.nextSibling;
+  }
+
+  // Reposition the reconnect button
+  this.reconnectBtn.style.left = (this.terminalWidth*this.cursorWidth/
+                                  this.scale -
+                                  this.reconnectBtn.clientWidth)/2 + 'px';
+  this.reconnectBtn.style.top  = (this.terminalHeight*this.cursorHeight-
+                                  this.reconnectBtn.clientHeight)/2 + 'px';
+
+  // Send notification that the window size has been changed
+  this.resized(this.terminalWidth, this.terminalHeight);
+};
+
+VT100.prototype.showCurrentSize = function() {
+  if (!this.indicateSize) {
+    return;
+  }
+  this.curSizeBox.innerHTML             = '' + this.terminalWidth + 'x' +
+                                               this.terminalHeight;
+  this.curSizeBox.style.left            =
+                                      (this.terminalWidth*this.cursorWidth/
+                                       this.scale -
+                                       this.curSizeBox.clientWidth)/2 + 'px';
+  this.curSizeBox.style.top             =
+                                      (this.terminalHeight*this.cursorHeight -
+                                       this.curSizeBox.clientHeight)/2 + 'px';
+  this.curSizeBox.style.visibility      = '';
+  if (this.curSizeTimeout) {
+    clearTimeout(this.curSizeTimeout);
+  }
+
+  // Only show the terminal size for a short amount of time after resizing.
+  // Then hide this information, again. Some browsers generate resize events
+  // throughout the entire resize operation. This is nice, and we will show
+  // the terminal size while the user is dragging the window borders.
+  // Other browsers only generate a single event when the user releases the
+  // mouse. In those cases, we can only show the terminal size once at the
+  // end of the resize operation.
+  this.curSizeTimeout                   = setTimeout(function(vt100) {
+    return function() {
+      vt100.curSizeTimeout              = null;
+      vt100.curSizeBox.style.visibility = 'hidden';
+    };
+  }(this), 1000);
+};
+
+VT100.prototype.selection = function() {
+  try {
+    return '' + (window.getSelection && window.getSelection() ||
+                 document.selection && document.selection.type == 'Text' &&
+                 document.selection.createRange().text || '');
+  } catch (e) {
+  }
+  return '';
+};
+
+VT100.prototype.cancelEvent = function(event) {
+  try {
+    // For non-IE browsers
+    event.stopPropagation();
+    event.preventDefault();
+  } catch (e) {
+  }
+  try {
+    // For IE
+    event.cancelBubble = true;
+    event.returnValue  = false;
+    event.button       = 0;
+    event.keyCode      = 0;
+  } catch (e) {
+  }
+  return false;
+};
+
+VT100.prototype.mousePosition = function(event) {
+  var offsetX      = this.container.offsetLeft;
+  var offsetY      = this.container.offsetTop;
+  for (var e = this.container; e = e.offsetParent; ) {
+    offsetX       += e.offsetLeft;
+    offsetY       += e.offsetTop;
+  }
+  return [ event.clientX - offsetX,
+           event.clientY - offsetY ];
+};
+
+VT100.prototype.mouseEvent = function(event, type) {
+  // If any text is currently selected, do not move the focus as that would
+  // invalidate the selection.
+  var selection    = this.selection();
+  if ((type == 1 /* MOUSE_UP */ || type == 2 /* MOUSE_CLICK */) && !selection.length) {
+    this.input.focus();
+  }
+
+  // Compute mouse position in characters.
+  var position     = this.mousePosition(event);
+  var x            = Math.floor(position[0] / this.cursorWidth);
+  var y            = Math.floor((position[1] + this.scrollable.scrollTop) /
+                                this.cursorHeight) - this.numScrollbackLines;
+  var inside       = true;
+  if (x >= this.terminalWidth) {
+    x              = this.terminalWidth - 1;
+    inside         = false;
+  }
+  if (x < 0) {
+    x              = 0;
+    inside         = false;
+  }
+  if (y >= this.terminalHeight) {
+    y              = this.terminalHeight - 1;
+    inside         = false;
+  }
+  if (y < 0) {
+    y              = 0;
+    inside         = false;
+  }
+
+  // Compute button number and modifier keys.
+  var button       = type != 0 /* MOUSE_DOWN */ ? 3 :
+                     typeof event.pageX != 'undefined' ? event.button :
+                     [ undefined, 0, 2, 0, 1, 0, 1, 0  ][event.button];
+  if (button != undefined) {
+    if (event.shiftKey) {
+      button      |= 0x04;
+    }
+    if (event.altKey || event.metaKey) {
+      button      |= 0x08;
+    }
+    if (event.ctrlKey) {
+      button      |= 0x10;
+    }
+  }
+
+  // Report mouse events if they happen inside of the current screen and
+  // with the SHIFT key unpressed. Both of these restrictions do not apply
+  // for button releases, as we always want to report those.
+  if (this.mouseReporting && !selection.length &&
+      (type != 0 /* MOUSE_DOWN */ || !event.shiftKey)) {
+    if (inside || type != 0 /* MOUSE_DOWN */) {
+      if (button != undefined) {
+        var report = '\u001B[M' + String.fromCharCode(button + 32) +
+                                  String.fromCharCode(x      + 33) +
+                                  String.fromCharCode(y      + 33);
+        if (type != 2 /* MOUSE_CLICK */) {
+          this.keysPressed(report);
+        }
+
+        // If we reported the event, stop propagating it (not sure, if this
+        // actually works on most browsers; blocking the global "oncontextmenu"
+        // even is still necessary).
+        return this.cancelEvent(event);
+      }
+    }
+  }
+
+  // Bring up context menu.
+  if (button == 2 && !event.shiftKey) {
+    if (type == 0 /* MOUSE_DOWN */) {
+      this.showContextMenu(position[0], position[1]);
+    }
+    return this.cancelEvent(event);
+  }
+
+  if (this.mouseReporting) {
+    try {
+      event.shiftKey         = false;
+    } catch (e) {
+    }
+  }
+
+  return true;
+};
+
+VT100.prototype.replaceChar = function(s, ch, repl) {
+  for (var i = -1;;) {
+    i = s.indexOf(ch, i + 1);
+    if (i < 0) {
+      break;
+    }
+    s = s.substr(0, i) + repl + s.substr(i + 1);
+  }
+  return s;
+};
+
+VT100.prototype.htmlEscape = function(s) {
+  return this.replaceChar(this.replaceChar(this.replaceChar(this.replaceChar(
+                s, '&', '&amp;'), '<', '&lt;'), '"', '&quot;'), ' ', '\u00A0');
+};
+
+VT100.prototype.getTextContent = function(elem) {
+  return elem.textContent ||
+         (typeof elem.textContent == 'undefined' ? elem.innerText : '');
+};
+
+VT100.prototype.setTextContentRaw = function(elem, s) {
+  // Updating the content of an element is an expensive operation. It actually
+  // pays off to first check whether the element is still unchanged.
+  if (typeof elem.textContent == 'undefined') {
+    if (elem.innerText != s) {
+      try {
+        elem.innerText = s;
+      } catch (e) {
+        // Very old versions of IE do not allow setting innerText. Instead,
+        // remove all children, by setting innerHTML and then set the text
+        // using DOM methods.
+        elem.innerHTML = '';
+        elem.appendChild(document.createTextNode(
+                                          this.replaceChar(s, ' ', '\u00A0')));
+      }
+    }
+  } else {
+    if (elem.textContent != s) {
+      elem.textContent = s;
+    }
+  }
+};
+
+VT100.prototype.setTextContent = function(elem, s) {
+  // Check if we find any URLs in the text. If so, automatically convert them
+  // to links.
+  if (this.urlRE && this.urlRE.test(s)) {
+    var inner          = '';
+    for (;;) {
+      var consumed = 0;
+      if (RegExp.leftContext != null) {
+        inner         += this.htmlEscape(RegExp.leftContext);
+        consumed      += RegExp.leftContext.length;
+      }
+      var url          = this.htmlEscape(RegExp.lastMatch);
+      var fullUrl      = url;
+
+      // If no protocol was specified, try to guess a reasonable one.
+      if (url.indexOf('http://') < 0 && url.indexOf('https://') < 0 &&
+          url.indexOf('ftp://')  < 0 && url.indexOf('mailto:')  < 0) {
+        var slash      = url.indexOf('/');
+        var at         = url.indexOf('@');
+        var question   = url.indexOf('?');
+        if (at > 0 &&
+            (at < question || question < 0) &&
+            (slash < 0 || (question > 0 && slash > question))) {
+          fullUrl      = 'mailto:' + url;
+        } else {
+          fullUrl      = (url.indexOf('ftp.') == 0 ? 'ftp://' : 'http://') +
+                          url;
+        }
+      }
+
+      inner           += '<a target="vt100Link" href="' + fullUrl +
+                         '">' + url + '</a>';
+      consumed        += RegExp.lastMatch.length;
+      s                = s.substr(consumed);
+      if (!this.urlRE.test(s)) {
+        if (RegExp.rightContext != null) {
+          inner       += this.htmlEscape(RegExp.rightContext);
+        }
+        break;
+      }
+    }
+    elem.innerHTML     = inner;
+    return;
+  }
+
+  this.setTextContentRaw(elem, s);
+};
+
+VT100.prototype.insertBlankLine = function(y, color, style) {
+  // Insert a blank line a position y. This method ignores the scrollback
+  // buffer. The caller has to add the length of the scrollback buffer to
+  // the position, if necessary.
+  // If the position is larger than the number of current lines, this
+  // method just adds a new line right after the last existing one. It does
+  // not add any missing lines in between. It is the caller's responsibility
+  // to do so.
+  if (!color) {
+    color                = 'ansi0 bgAnsi15';
+  }
+  if (!style) {
+    style                = '';
+  }
+  var line;
+  if (color != 'ansi0 bgAnsi15' && !style) {
+    line                 = document.createElement('pre');
+    this.setTextContent(line, '\n');
+  } else {
+    line                 = document.createElement('div');
+    var span             = document.createElement('span');
+    span.style.cssText   = style;
+    span.className      = color;
+    this.setTextContent(span, this.spaces(this.terminalWidth));
+    line.appendChild(span);
+  }
+  line.style.height      = this.cursorHeight + 'px';
+  var console            = this.console[this.currentScreen];
+  if (console.childNodes.length > y) {
+    console.insertBefore(line, console.childNodes[y]);
+  } else {
+    console.appendChild(line);
+  }
+};
+
+VT100.prototype.updateWidth = function() {
+  this.terminalWidth = Math.floor(this.console[this.currentScreen].offsetWidth/
+                                  this.cursorWidth*this.scale);
+  return this.terminalWidth;
+};
+
+VT100.prototype.updateHeight = function() {
+  // We want to be able to display either a terminal window that fills the
+  // entire browser window, or a terminal window that is contained in a
+  // <div> which is embededded somewhere in the web page.
+  if (this.isEmbedded) {
+    // Embedded terminal. Use size of the containing <div> (id="vt100").
+    this.terminalHeight = Math.floor((this.container.clientHeight-1) /
+                                     this.cursorHeight);
+  } else {
+    // Use the full browser window.
+    this.terminalHeight = Math.floor(((window.innerHeight ||
+                                       document.documentElement.clientHeight ||
+                                       document.body.clientHeight)-1)/
+                                     this.cursorHeight);
+  }
+  return this.terminalHeight;
+};
+
+VT100.prototype.updateNumScrollbackLines = function() {
+  var scrollback          = Math.floor(
+                                this.console[this.currentScreen].offsetHeight /
+                                this.cursorHeight) -
+                            this.terminalHeight;
+  this.numScrollbackLines = scrollback < 0 ? 0 : scrollback;
+  return this.numScrollbackLines;
+};
+
+VT100.prototype.truncateLines = function(width) {
+  if (width < 0) {
+    width             = 0;
+  }
+  for (var line = this.console[this.currentScreen].firstChild; line;
+       line = line.nextSibling) {
+    if (line.tagName == 'DIV') {
+      var x           = 0;
+
+      // Traverse current line and truncate it once we saw "width" characters
+      for (var span = line.firstChild; span;
+           span = span.nextSibling) {
+        var s         = this.getTextContent(span);
+        var l         = s.length;
+        if (x + l > width) {
+          this.setTextContent(span, s.substr(0, width - x));
+          while (span.nextSibling) {
+            line.removeChild(line.lastChild);
+          }
+          break;
+        }
+        x            += l;
+      }
+      // Prune white space from the end of the current line
+      var span       = line.lastChild;
+      while (span &&
+             span.className == 'ansi0 bgAnsi15' &&
+             !span.style.cssText.length) {
+        // Scan backwards looking for first non-space character
+        var s         = this.getTextContent(span);
+        for (var i = s.length; i--; ) {
+          if (s.charAt(i) != ' ' && s.charAt(i) != '\u00A0') {
+            if (i+1 != s.length) {
+              this.setTextContent(s.substr(0, i+1));
+            }
+            span      = null;
+            break;
+          }
+        }
+        if (span) {
+          var sibling = span;
+          span        = span.previousSibling;
+          if (span) {
+            // Remove blank <span>'s from end of line
+            line.removeChild(sibling);
+          } else {
+            // Remove entire line (i.e. <div>), if empty
+            var blank = document.createElement('pre');
+            blank.style.height = this.cursorHeight + 'px';
+            this.setTextContent(blank, '\n');
+            line.parentNode.replaceChild(blank, line);
+          }
+        }
+      }
+    }
+  }
+};
+
+VT100.prototype.putString = function(x, y, text, color, style) {
+  if (!color) {
+    color                           = 'ansi0 bgAnsi15';
+  }
+  if (!style) {
+    style                           = '';
+  }
+  var yIdx                          = y + this.numScrollbackLines;
+  var line;
+  var sibling;
+  var s;
+  var span;
+  var xPos                          = 0;
+  var console                       = this.console[this.currentScreen];
+  if (!text.length && (yIdx >= console.childNodes.length ||
+                       console.childNodes[yIdx].tagName != 'DIV')) {
+    // Positioning cursor to a blank location
+    span                            = null;
+  } else {
+    // Create missing blank lines at end of page
+    while (console.childNodes.length <= yIdx) {
+      // In order to simplify lookups, we want to make sure that each line
+      // is represented by exactly one element (and possibly a whole bunch of
+      // children).
+      // For non-blank lines, we can create a <div> containing one or more
+      // <span>s. For blank lines, this fails as browsers tend to optimize them
+      // away. But fortunately, a <pre> tag containing a newline character
+      // appears to work for all browsers (a &nbsp; would also work, but then
+      // copying from the browser window would insert superfluous spaces into
+      // the clipboard).
+      this.insertBlankLine(yIdx);
+    }
+    line                            = console.childNodes[yIdx];
+    
+    // If necessary, promote blank '\n' line to a <div> tag
+    if (line.tagName != 'DIV') {
+      var div                       = document.createElement('div');
+      div.style.height              = this.cursorHeight + 'px';
+      div.innerHTML                 = '<span></span>';
+      console.replaceChild(div, line);
+      line                          = div;
+    }
+
+    // Scan through list of <span>'s until we find the one where our text
+    // starts
+    span                            = line.firstChild;
+    var len;
+    while (span.nextSibling && xPos < x) {
+      len                           = this.getTextContent(span).length;
+      if (xPos + len > x) {
+        break;
+      }
+      xPos                         += len;
+      span                          = span.nextSibling;
+    }
+
+    if (text.length) {
+      // If current <span> is not long enough, pad with spaces or add new
+      // span
+      s                             = this.getTextContent(span);
+      var oldColor                  = span.className;
+      var oldStyle                  = span.style.cssText;
+      if (xPos + s.length < x) {
+        if (oldColor != 'ansi0 bgAnsi15' || oldStyle != '') {
+          span                      = document.createElement('span');
+          line.appendChild(span);
+          span.className            = 'ansi0 bgAnsi15';
+          span.style.cssText        = '';
+          oldColor                  = 'ansi0 bgAnsi15';
+          oldStyle                  = '';
+          xPos                     += s.length;
+          s                         = '';
+        }
+        do {
+          s                        += ' ';
+        } while (xPos + s.length < x);
+      }
+    
+      // If styles do not match, create a new <span>
+      var del                       = text.length - s.length + x - xPos;
+      if (oldColor != color ||
+          (oldStyle != style && (oldStyle || style))) {
+        if (xPos == x) {
+          // Replacing text at beginning of existing <span>
+          if (text.length >= s.length) {
+            // New text is equal or longer than existing text
+            s                       = text;
+          } else {
+            // Insert new <span> before the current one, then remove leading
+            // part of existing <span>, adjust style of new <span>, and finally
+            // set its contents
+            sibling                 = document.createElement('span');
+            line.insertBefore(sibling, span);
+            this.setTextContent(span, s.substr(text.length));
+            span                    = sibling;
+            s                       = text;
+          }
+        } else {
+          // Replacing text some way into the existing <span>
+          var remainder             = s.substr(x + text.length - xPos);
+          this.setTextContent(span, s.substr(0, x - xPos));
+          xPos                      = x;
+          sibling                   = document.createElement('span');
+          if (span.nextSibling) {
+            line.insertBefore(sibling, span.nextSibling);
+            span                    = sibling;
+            if (remainder.length) {
+              sibling               = document.createElement('span');
+              sibling.className     = oldColor;
+              sibling.style.cssText = oldStyle;
+              this.setTextContent(sibling, remainder);
+              line.insertBefore(sibling, span.nextSibling);
+            }
+          } else {
+            line.appendChild(sibling);
+            span                    = sibling;
+            if (remainder.length) {
+              sibling               = document.createElement('span');
+              sibling.className     = oldColor;
+              sibling.style.cssText = oldStyle;
+              this.setTextContent(sibling, remainder);
+              line.appendChild(sibling);
+            }
+          }
+          s                         = text;
+        }
+        span.className              = color;
+        span.style.cssText          = style;
+      } else {
+        // Overwrite (partial) <span> with new text
+        s                           = s.substr(0, x - xPos) +
+          text +
+          s.substr(x + text.length - xPos);
+      }
+      this.setTextContent(span, s);
+
+      
+      // Delete all subsequent <span>'s that have just been overwritten
+      sibling                       = span.nextSibling;
+      while (del > 0 && sibling) {
+        s                           = this.getTextContent(sibling);
+        len                         = s.length;
+        if (len <= del) {
+          line.removeChild(sibling);
+          del                      -= len;
+          sibling                   = span.nextSibling;
+        } else {
+          this.setTextContent(sibling, s.substr(del));
+          break;
+        }
+      }
+      
+      // Merge <span> with next sibling, if styles are identical
+      if (sibling && span.className == sibling.className &&
+          span.style.cssText == sibling.style.cssText) {
+        this.setTextContent(span,
+                            this.getTextContent(span) +
+                            this.getTextContent(sibling));
+        line.removeChild(sibling);
+      }
+    }
+  }
+
+  // Position cursor
+  this.cursorX                      = x + text.length;
+  if (this.cursorX >= this.terminalWidth) {
+    this.cursorX                    = this.terminalWidth - 1;
+    if (this.cursorX < 0) {
+      this.cursorX                  = 0;
+    }
+  }
+  var pixelX                        = -1;
+  var pixelY                        = -1;
+  if (!this.cursor.style.visibility) {
+    var idx                         = this.cursorX - xPos;
+    if (span) {
+      // If we are in a non-empty line, take the cursor Y position from the
+      // other elements in this line. If dealing with broken, non-proportional
+      // fonts, this is likely to yield better results.
+      pixelY                        = span.offsetTop +
+                                      span.offsetParent.offsetTop;
+      s                             = this.getTextContent(span);
+      var nxtIdx                    = idx - s.length;
+      if (nxtIdx < 0) {
+        this.setTextContent(this.cursor, s.charAt(idx));
+        pixelX                      = span.offsetLeft +
+                                      idx*span.offsetWidth / s.length;
+      } else {
+        if (nxtIdx == 0) {
+          pixelX                    = span.offsetLeft + span.offsetWidth;
+        }
+        if (span.nextSibling) {
+          s                         = this.getTextContent(span.nextSibling);
+          this.setTextContent(this.cursor, s.charAt(nxtIdx));
+          if (pixelX < 0) {
+            pixelX                  = span.nextSibling.offsetLeft +
+                                      nxtIdx*span.offsetWidth / s.length;
+          }
+        } else {
+          this.setTextContent(this.cursor, ' ');
+        }
+      }
+    } else {
+      this.setTextContent(this.cursor, ' ');
+    }
+  }
+  if (pixelX >= 0) {
+    this.cursor.style.left          = (pixelX + (this.isIE ? 1 : 0))/
+                                      this.scale + 'px';
+  } else {
+    this.setTextContent(this.space, this.spaces(this.cursorX));
+    this.cursor.style.left          = (this.space.offsetWidth +
+                                       console.offsetLeft)/this.scale + 'px';
+  }
+  this.cursorY                      = yIdx - this.numScrollbackLines;
+  if (pixelY >= 0) {
+    this.cursor.style.top           = pixelY + 'px';
+  } else {
+    this.cursor.style.top           = yIdx*this.cursorHeight +
+                                      console.offsetTop + 'px';
+  }
+
+  if (text.length) {
+    // Merge <span> with previous sibling, if styles are identical
+    if ((sibling = span.previousSibling) &&
+        span.className == sibling.className &&
+        span.style.cssText == sibling.style.cssText) {
+      this.setTextContent(span,
+                          this.getTextContent(sibling) +
+                          this.getTextContent(span));
+      line.removeChild(sibling);
+    }
+    
+    // Prune white space from the end of the current line
+    span                            = line.lastChild;
+    while (span &&
+           span.className == 'ansi0 bgAnsi15' &&
+           !span.style.cssText.length) {
+      // Scan backwards looking for first non-space character
+      s                             = this.getTextContent(span);
+      for (var i = s.length; i--; ) {
+        if (s.charAt(i) != ' ' && s.charAt(i) != '\u00A0') {
+          if (i+1 != s.length) {
+            this.setTextContent(s.substr(0, i+1));
+          }
+          span                      = null;
+          break;
+        }
+      }
+      if (span) {
+        sibling                     = span;
+        span                        = span.previousSibling;
+        if (span) {
+          // Remove blank <span>'s from end of line
+          line.removeChild(sibling);
+        } else {
+          // Remove entire line (i.e. <div>), if empty
+          var blank                 = document.createElement('pre');
+          blank.style.height        = this.cursorHeight + 'px';
+          this.setTextContent(blank, '\n');
+          line.parentNode.replaceChild(blank, line);
+        }
+      }
+    }
+  }
+};
+
+VT100.prototype.gotoXY = function(x, y) {
+  if (x >= this.terminalWidth) {
+    x           = this.terminalWidth - 1;
+  }
+  if (x < 0) {
+    x           = 0;
+  }
+  var minY, maxY;
+  if (this.offsetMode) {
+    minY        = this.top;
+    maxY        = this.bottom;
+  } else {
+    minY        = 0;
+    maxY        = this.terminalHeight;
+  }
+  if (y >= maxY) {
+    y           = maxY - 1;
+  }
+  if (y < minY) {
+    y           = minY;
+  }
+  this.putString(x, y, '', undefined);
+  this.needWrap = false;
+};
+
+VT100.prototype.gotoXaY = function(x, y) {
+  this.gotoXY(x, this.offsetMode ? (this.top + y) : y);
+};
+
+VT100.prototype.refreshInvertedState = function() {
+  if (this.isInverted) {
+    this.scrollable.className += ' inverted';
+  } else {
+    this.scrollable.className = this.scrollable.className.
+                                                     replace(/ *inverted/, '');
+  }
+};
+
+VT100.prototype.enableAlternateScreen = function(state) {
+  // Don't do anything, if we are already on the desired screen
+  if ((state ? 1 : 0) == this.currentScreen) {
+    // Calling the resizer is not actually necessary. But it is a good way
+    // of resetting state that might have gotten corrupted.
+    this.resizer();
+    return;
+  }
+  
+  // We save the full state of the normal screen, when we switch away from it.
+  // But for the alternate screen, no saving is necessary. We always reset
+  // it when we switch to it.
+  if (state) {
+    this.saveCursor();
+  }
+
+  // Display new screen, and initialize state (the resizer does that for us).
+  this.currentScreen                                 = state ? 1 : 0;
+  this.console[1-this.currentScreen].style.display   = 'none';
+  this.console[this.currentScreen].style.display     = '';
+
+  // Select appropriate character pitch.
+  var transform                                      = this.getTransformName();
+  if (transform) {
+    if (state) {
+      // Upon enabling the alternate screen, we switch to 80 column mode. But
+      // upon returning to the regular screen, we restore the mode that was
+      // in effect previously.
+      this.console[1].style[transform]               = '';
+    }
+    var style                                        =
+                             this.console[this.currentScreen].style[transform];
+    this.cursor.style[transform]                     = style;
+    this.space.style[transform]                      = style;
+    this.scale                                       = style == '' ? 1.0:1.65;
+    if (transform == 'filter') {
+       this.console[this.currentScreen].style.width  = style == '' ? '165%':'';
+    }
+  }
+  this.resizer();
+
+  // If we switched to the alternate screen, reset it completely. Otherwise,
+  // restore the saved state.
+  if (state) {
+    this.gotoXY(0, 0);
+    this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight);
+  } else {
+    this.restoreCursor();
+  }
+};
+
+VT100.prototype.hideCursor = function() {
+  var hidden = this.cursor.style.visibility == 'hidden';
+  if (!hidden) {
+    this.cursor.style.visibility = 'hidden';
+    return true;
+  }
+  return false;
+};
+
+VT100.prototype.showCursor = function(x, y) {
+  if (this.cursor.style.visibility) {
+    this.cursor.style.visibility = '';
+    this.putString(x == undefined ? this.cursorX : x,
+                   y == undefined ? this.cursorY : y,
+                   '', undefined);
+    return true;
+  }
+  return false;
+};
+
+VT100.prototype.scrollBack = function() {
+  var i                     = this.scrollable.scrollTop -
+                              this.scrollable.clientHeight;
+  this.scrollable.scrollTop = i < 0 ? 0 : i;
+};
+
+VT100.prototype.scrollFore = function() {
+  var i                     = this.scrollable.scrollTop +
+                              this.scrollable.clientHeight;
+  this.scrollable.scrollTop = i > this.numScrollbackLines *
+                                  this.cursorHeight + 1
+                              ? this.numScrollbackLines *
+                                this.cursorHeight + 1
+                              : i;
+};
+
+VT100.prototype.spaces = function(i) {
+  var s = '';
+  while (i-- > 0) {
+    s += ' ';
+  }
+  return s;
+};
+
+VT100.prototype.clearRegion = function(x, y, w, h, color, style) {
+  w         += x;
+  if (x < 0) {
+    x        = 0;
+  }
+  if (w > this.terminalWidth) {
+    w        = this.terminalWidth;
+  }
+  if ((w    -= x) <= 0) {
+    return;
+  }
+  h         += y;
+  if (y < 0) {
+    y        = 0;
+  }
+  if (h > this.terminalHeight) {
+    h        = this.terminalHeight;
+  }
+  if ((h    -= y) <= 0) {
+    return;
+  }
+
+  // Special case the situation where we clear the entire screen, and we do
+  // not have a scrollback buffer. In that case, we should just remove all
+  // child nodes.
+  if (!this.numScrollbackLines &&
+      w == this.terminalWidth && h == this.terminalHeight &&
+      (color == undefined || color == 'ansi0 bgAnsi15') && !style) {
+    var console = this.console[this.currentScreen];
+    while (console.lastChild) {
+      console.removeChild(console.lastChild);
+    }
+    this.putString(this.cursorX, this.cursorY, '', undefined);
+  } else {
+    var hidden = this.hideCursor();
+    var cx     = this.cursorX;
+    var cy     = this.cursorY;
+    var s      = this.spaces(w);
+    for (var i = y+h; i-- > y; ) {
+      this.putString(x, i, s, color, style);
+    }
+    hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);
+  }
+};
+
+VT100.prototype.copyLineSegment = function(dX, dY, sX, sY, w) {
+  var text                            = [ ];
+  var className                       = [ ];
+  var style                           = [ ];
+  var console                         = this.console[this.currentScreen];
+  if (sY >= console.childNodes.length) {
+    text[0]                           = this.spaces(w);
+    className[0]                      = undefined;
+    style[0]                          = undefined;
+  } else {
+    var line = console.childNodes[sY];
+    if (line.tagName != 'DIV' || !line.childNodes.length) {
+      text[0]                         = this.spaces(w);
+      className[0]                    = undefined;
+      style[0]                        = undefined;
+    } else {
+      var x                           = 0;
+      for (var span = line.firstChild; span && w > 0; span = span.nextSibling){
+        var s                         = this.getTextContent(span);
+        var len                       = s.length;
+        if (x + len > sX) {
+          var o                       = sX > x ? sX - x : 0;
+          text[text.length]           = s.substr(o, w);
+          className[className.length] = span.className;
+          style[style.length]         = span.style.cssText;
+          w                          -= len - o;
+        }
+        x                            += len;
+      }
+      if (w > 0) {
+        text[text.length]             = this.spaces(w);
+        className[className.length]   = undefined;
+        style[style.length]           = undefined;
+      }
+    }
+  }
+  var hidden                          = this.hideCursor();
+  var cx                              = this.cursorX;
+  var cy                              = this.cursorY;
+  for (var i = 0; i < text.length; i++) {
+    var color;
+    if (className[i]) {
+      color                           = className[i];
+    } else {
+      color                           = 'ansi0 bgAnsi15';
+    }
+    this.putString(dX, dY - this.numScrollbackLines, text[i], color, style[i]);
+    dX                               += text[i].length;
+  }
+  hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);
+};
+
+VT100.prototype.scrollRegion = function(x, y, w, h, incX, incY,
+                                        color, style) {
+  var left             = incX < 0 ? -incX : 0;
+  var right            = incX > 0 ?  incX : 0;
+  var up               = incY < 0 ? -incY : 0;
+  var down             = incY > 0 ?  incY : 0;
+
+  // Clip region against terminal size
+  var dontScroll       = null;
+  w                   += x;
+  if (x < left) {
+    x                  = left;
+  }
+  if (w > this.terminalWidth - right) {
+    w                  = this.terminalWidth - right;
+  }
+  if ((w              -= x) <= 0) {
+    dontScroll         = 1;
+  }
+  h                   += y;
+  if (y < up) {
+    y                  = up;
+  }
+  if (h > this.terminalHeight - down) {
+    h                  = this.terminalHeight - down;
+  }
+  if ((h              -= y) < 0) {
+    dontScroll         = 1;
+  }
+  if (!dontScroll) {
+    if (style && style.indexOf('underline')) {
+      // Different terminal emulators disagree on the attributes that
+      // are used for scrolling. The consensus seems to be, never to
+      // fill with underlined spaces. N.B. this is different from the
+      // cases when the user blanks a region. User-initiated blanking
+      // always fills with all of the current attributes.
+      style            = style.replace(/text-decoration:underline;/, '');
+    }
+
+    // Compute current scroll position
+    var scrollPos      = this.numScrollbackLines -
+                      (this.scrollable.scrollTop-1) / this.cursorHeight;
+
+    // Determine original cursor position. Hide cursor temporarily to avoid
+    // visual artifacts.
+    var hidden         = this.hideCursor();
+    var cx             = this.cursorX;
+    var cy             = this.cursorY;
+    var console        = this.console[this.currentScreen];
+
+    if (!incX && !x && w == this.terminalWidth) {
+      // Scrolling entire lines
+      if (incY < 0) {
+        // Scrolling up
+        if (!this.currentScreen && y == -incY &&
+            h == this.terminalHeight + incY) {
+          // Scrolling up with adding to the scrollback buffer. This is only
+          // possible if there are at least as many lines in the console,
+          // as the terminal is high
+          while (console.childNodes.length < this.terminalHeight) {
+            this.insertBlankLine(this.terminalHeight);
+          }
+          
+          // Add new lines at bottom in order to force scrolling
+          for (var i = 0; i < y; i++) {
+            this.insertBlankLine(console.childNodes.length, color, style);
+          }
+
+          // Adjust the number of lines in the scrollback buffer by
+          // removing excess entries.
+          this.updateNumScrollbackLines();
+          while (this.numScrollbackLines >
+                 (this.currentScreen ? 0 : this.maxScrollbackLines)) {
+            console.removeChild(console.firstChild);
+            this.numScrollbackLines--;
+          }
+
+          // Mark lines in the scrollback buffer, so that they do not get
+          // printed.
+          for (var i = this.numScrollbackLines, j = -incY;
+               i-- > 0 && j-- > 0; ) {
+            console.childNodes[i].className = 'scrollback';
+          }
+        } else {
+          // Scrolling up without adding to the scrollback buffer.
+          for (var i = -incY;
+               i-- > 0 &&
+               console.childNodes.length >
+               this.numScrollbackLines + y + incY; ) {
+            console.removeChild(console.childNodes[
+                                          this.numScrollbackLines + y + incY]);
+          }
+
+          // If we used to have a scrollback buffer, then we must make sure
+          // that we add back blank lines at the bottom of the terminal.
+          // Similarly, if we are scrolling in the middle of the screen,
+          // we must add blank lines to ensure that the bottom of the screen
+          // does not move up.
+          if (this.numScrollbackLines > 0 ||
+              console.childNodes.length > this.numScrollbackLines+y+h+incY) {
+            for (var i = -incY; i-- > 0; ) {
+              this.insertBlankLine(this.numScrollbackLines + y + h + incY,
+                                   color, style);
+            }
+          }
+        }
+      } else {
+        // Scrolling down
+        for (var i = incY;
+             i-- > 0 &&
+             console.childNodes.length > this.numScrollbackLines + y + h; ) {
+          console.removeChild(console.childNodes[this.numScrollbackLines+y+h]);
+        }
+        for (var i = incY; i--; ) {
+          this.insertBlankLine(this.numScrollbackLines + y, color, style);
+        }
+      }
+    } else {
+      // Scrolling partial lines
+      if (incY <= 0) {
+        // Scrolling up or horizontally within a line
+        for (var i = y + this.numScrollbackLines;
+             i < y + this.numScrollbackLines + h;
+             i++) {
+          this.copyLineSegment(x + incX, i + incY, x, i, w);
+        }
+      } else {
+        // Scrolling down
+        for (var i = y + this.numScrollbackLines + h;
+             i-- > y + this.numScrollbackLines; ) {
+          this.copyLineSegment(x + incX, i + incY, x, i, w);
+        }
+      }
+
+      // Clear blank regions
+      if (incX > 0) {
+        this.clearRegion(x, y, incX, h, color, style);
+      } else if (incX < 0) {
+        this.clearRegion(x + w + incX, y, -incX, h, color, style);
+      }
+      if (incY > 0) {
+        this.clearRegion(x, y, w, incY, color, style);
+      } else if (incY < 0) {
+        this.clearRegion(x, y + h + incY, w, -incY, color, style);
+      }
+    }
+
+    // Reset scroll position
+    this.scrollable.scrollTop = (this.numScrollbackLines-scrollPos) *
+                                this.cursorHeight + 1;
+
+    // Move cursor back to its original position
+    hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);
+  }
+};
+
+VT100.prototype.copy = function(selection) {
+  if (selection == undefined) {
+    selection                = this.selection();
+  }
+  this.internalClipboard     = undefined;
+  if (selection.length) {
+    try {
+      // IE
+      this.cliphelper.value  = selection;
+      this.cliphelper.select();
+      this.cliphelper.createTextRange().execCommand('copy');
+    } catch (e) {
+      this.internalClipboard = selection;
+    }
+    this.cliphelper.value    = '';
+  }
+};
+
+VT100.prototype.copyLast = function() {
+  // Opening the context menu can remove the selection. We try to prevent this
+  // from happening, but that is not possible for all browsers. So, instead,
+  // we compute the selection before showing the menu.
+  this.copy(this.lastSelection);
+};
+
+VT100.prototype.pasteFnc = function() {
+  var clipboard     = undefined;
+  if (this.internalClipboard != undefined) {
+    clipboard       = this.internalClipboard;
+  } else {
+    try {
+      this.cliphelper.value = '';
+      this.cliphelper.createTextRange().execCommand('paste');
+      clipboard     = this.cliphelper.value;
+    } catch (e) {
+    }
+  }
+  this.cliphelper.value = '';
+  if (clipboard && this.menu.style.visibility == 'hidden') {
+    return function() {
+      this.keysPressed('' + clipboard);
+    };
+  } else {
+    return undefined;
+  }
+};
+
+VT100.prototype.pasteBrowserFnc = function() {
+  var clipboard     = prompt("Paste into this box:","");
+  if (clipboard != undefined) {
+     return this.keysPressed('' + clipboard);
+  }
+};
+
+VT100.prototype.toggleUTF = function() {
+  this.utfEnabled   = !this.utfEnabled;
+
+  // We always persist the last value that the user selected. Not necessarily
+  // the last value that a random program requested.
+  this.utfPreferred = this.utfEnabled;
+};
+
+VT100.prototype.toggleBell = function() {
+  this.visualBell = !this.visualBell;
+};
+
+VT100.prototype.toggleSoftKeyboard = function() {
+  this.softKeyboard = !this.softKeyboard;
+  this.keyboardImage.style.visibility = this.softKeyboard ? 'visible' : '';
+};
+
+VT100.prototype.deselectKeys = function(elem) {
+  if (elem && elem.className == 'selected') {
+    elem.className = '';
+  }
+  for (elem = elem.firstChild; elem; elem = elem.nextSibling) {
+    this.deselectKeys(elem);
+  }
+};
+
+VT100.prototype.showSoftKeyboard = function() {
+  // Make sure no key is currently selected
+  this.lastSelectedKey           = undefined;
+  this.deselectKeys(this.keyboard);
+  this.isShift                   = false;
+  this.showShiftState(false);
+  this.isCtrl                    = false;
+  this.showCtrlState(false);
+  this.isAlt                     = false;
+  this.showAltState(false);
+
+  this.keyboard.style.left       = '0px';
+  this.keyboard.style.top        = '0px';
+  this.keyboard.style.width      = this.container.offsetWidth  + 'px';
+  this.keyboard.style.height     = this.container.offsetHeight + 'px';
+  this.keyboard.style.visibility = 'hidden';
+  this.keyboard.style.display    = '';
+
+  var kbd                        = this.keyboard.firstChild;
+  var scale                      = 1.0;
+  var transform                  = this.getTransformName();
+  if (transform) {
+    kbd.style[transform]         = '';
+    if (kbd.offsetWidth > 0.9 * this.container.offsetWidth) {
+      scale                      = (kbd.offsetWidth/
+                                    this.container.offsetWidth)/0.9;
+    }
+    if (kbd.offsetHeight > 0.9 * this.container.offsetHeight) {
+      scale                      = Math.max((kbd.offsetHeight/
+                                             this.container.offsetHeight)/0.9);
+    }
+    var style                    = this.getTransformStyle(transform,
+                                              scale > 1.0 ? scale : undefined);
+    kbd.style[transform]         = style;
+  }
+  if (transform == 'filter') {
+    scale                        = 1.0;
+  }
+  kbd.style.left                 = ((this.container.offsetWidth -
+                                     kbd.offsetWidth/scale)/2) + 'px';
+  kbd.style.top                  = ((this.container.offsetHeight -
+                                     kbd.offsetHeight/scale)/2) + 'px';
+
+  this.keyboard.style.visibility = 'visible';
+};
+
+VT100.prototype.hideSoftKeyboard = function() {
+  this.keyboard.style.display    = 'none';
+};
+
+VT100.prototype.toggleCursorBlinking = function() {
+  this.blinkingCursor = !this.blinkingCursor;
+};
+
+VT100.prototype.about = function() {
+  alert("VT100 Terminal Emulator " + "2.10 (revision 239)" +
+        "\nCopyright 2008-2010 by Markus Gutschke\n" +
+        "For more information check http://shellinabox.com");
+};
+
+VT100.prototype.hideContextMenu = function() {
+  this.menu.style.visibility = 'hidden';
+  this.menu.style.top        = '-100px';
+  this.menu.style.left       = '-100px';
+  this.menu.style.width      = '0px';
+  this.menu.style.height     = '0px';
+};
+
+VT100.prototype.extendContextMenu = function(entries, actions) {
+};
+
+VT100.prototype.showContextMenu = function(x, y) {
+  this.menu.innerHTML         =
+    '<table class="popup" ' +
+           'cellpadding="0" cellspacing="0">' +
+      '<tr><td>' +
+        '<ul id="menuentries">' +
+          '<li id="beginclipboard">Copy</li>' +
+          '<li id="endclipboard">Paste</li>' +
+          '<li id="browserclipboard">Paste from browser</li>' +
+          '<hr />' +
+          '<li id="reset">Reset</li>' +
+          '<hr />' +
+          '<li id="beginconfig">' +
+             (this.utfEnabled ? '<img src="/webshell/enabled.gif" />' : '') +
+             'Unicode</li>' +
+          '<li>' +
+             (this.visualBell ? '<img src="/webshell/enabled.gif" />' : '') +
+             'Visual Bell</li>'+
+          '<li>' +
+             (this.softKeyboard ? '<img src="/webshell/enabled.gif" />' : '') +
+             'Onscreen Keyboard</li>' +
+          '<li id="endconfig">' +
+             (this.blinkingCursor ? '<img src="/webshell/enabled.gif" />' : '') +
+             'Blinking Cursor</li>'+
+          (this.usercss.firstChild ?
+           '<hr id="beginusercss" />' +
+           this.usercss.innerHTML +
+           '<hr id="endusercss" />' :
+           '<hr />') +
+          '<li id="about">About...</li>' +
+        '</ul>' +
+      '</td></tr>' +
+    '</table>';
+
+  var popup                   = this.menu.firstChild;
+  var menuentries             = this.getChildById(popup, 'menuentries');
+
+  // Determine menu entries that should be disabled
+  this.lastSelection          = this.selection();
+  if (!this.lastSelection.length) {
+    menuentries.firstChild.className
+                              = 'disabled';
+  }
+  var p                       = this.pasteFnc();
+  if (!p) {
+    menuentries.childNodes[1].className
+                              = 'disabled';
+  }
+
+  // Actions for default items
+  var actions                 = [ this.copyLast, p, this.pasteBrowserFnc, this.reset,
+                                  this.toggleUTF, this.toggleBell,
+                                  this.toggleSoftKeyboard,
+                                  this.toggleCursorBlinking ];
+
+  // Actions for user CSS styles (if any)
+  for (var i = 0; i < this.usercssActions.length; ++i) {
+    actions[actions.length]   = this.usercssActions[i];
+  }
+  actions[actions.length]     = this.about;
+
+  // Allow subclasses to dynamically add entries to the context menu
+  this.extendContextMenu(menuentries, actions);
+
+  // Hook up event listeners
+  for (var node = menuentries.firstChild, i = 0; node;
+       node = node.nextSibling) {
+    if (node.tagName == 'LI') {
+      if (node.className != 'disabled') {
+        this.addListener(node, 'mouseover',
+                         function(vt100, node) {
+                           return function() {
+                             node.className = 'hover';
+                           }
+                         }(this, node));
+        this.addListener(node, 'mouseout',
+                         function(vt100, node) {
+                           return function() {
+                             node.className = '';
+                           }
+                         }(this, node));
+        this.addListener(node, 'mousedown',
+                         function(vt100, action) {
+                           return function(event) {
+                             vt100.hideContextMenu();
+                             action.call(vt100);
+                             vt100.storeUserSettings();
+                             return vt100.cancelEvent(event || window.event);
+                           }
+                         }(this, actions[i]));
+        this.addListener(node, 'mouseup',
+                         function(vt100) {
+                           return function(event) {
+                             return vt100.cancelEvent(event || window.event);
+                           }
+                         }(this));
+        this.addListener(node, 'mouseclick',
+                         function(vt100) {
+                           return function(event) {
+                             return vt100.cancelEvent(event || window.event);
+                           }
+                         }());
+      }
+      i++;
+    }
+  }
+
+  // Position menu next to the mouse pointer
+  this.menu.style.left        = '0px';
+  this.menu.style.top         = '0px';
+  this.menu.style.width       =  this.container.offsetWidth  + 'px';
+  this.menu.style.height      =  this.container.offsetHeight + 'px';
+  popup.style.left            = '0px';
+  popup.style.top             = '0px';
+  
+  var margin                  = 2;
+  if (x + popup.clientWidth >= this.container.offsetWidth - margin) {
+    x              = this.container.offsetWidth-popup.clientWidth - margin - 1;
+  }
+  if (x < margin) {
+    x                         = margin;
+  }
+  if (y + popup.clientHeight >= this.container.offsetHeight - margin) {
+    y            = this.container.offsetHeight-popup.clientHeight - margin - 1;
+  }
+  if (y < margin) {
+    y                         = margin;
+  }
+  popup.style.left            = x + 'px';
+  popup.style.top             = y + 'px';
+
+  // Block all other interactions with the terminal emulator
+  this.addListener(this.menu, 'click', function(vt100) {
+                                         return function() {
+                                           vt100.hideContextMenu();
+                                         }
+                                       }(this));
+
+  // Show the menu
+  this.menu.style.visibility  = '';
+};
+
+VT100.prototype.keysPressed = function(ch) {
+  for (var i = 0; i < ch.length; i++) {
+    var c = ch.charCodeAt(i);
+    this.vt100(c >= 7 && c <= 15 ||
+               c == 24 || c == 26 || c == 27 || c >= 32
+               ? String.fromCharCode(c) : '<' + c + '>');
+  }
+};
+
+VT100.prototype.applyModifiers = function(ch, event) {
+  if (ch) {
+    if (event.ctrlKey) {
+      if (ch >= 32 && ch <= 127) {
+        // For historic reasons, some control characters are treated specially
+        switch (ch) {
+        case /* 3 */ 51: ch  =  27; break;
+        case /* 4 */ 52: ch  =  28; break;
+        case /* 5 */ 53: ch  =  29; break;
+        case /* 6 */ 54: ch  =  30; break;
+        case /* 7 */ 55: ch  =  31; break;
+        case /* 8 */ 56: ch  = 127; break;
+        case /* ? */ 63: ch  = 127; break;
+        default:         ch &=  31; break;
+        }
+      }
+    }
+    return String.fromCharCode(ch);
+  } else {
+    return undefined;
+  }
+};
+
+VT100.prototype.handleKey = function(event) {
+  // this.vt100('H: c=' + event.charCode + ', k=' + event.keyCode +
+  //            (event.shiftKey || event.ctrlKey || event.altKey ||
+  //             event.metaKey ? ', ' +
+  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+  //            '\r\n');
+  var ch, key;
+  if (typeof event.charCode != 'undefined') {
+    // non-IE keypress events have a translated charCode value. Also, our
+    // fake events generated when receiving keydown events include this data
+    // on all browsers.
+    ch                                = event.charCode;
+    key                               = event.keyCode;
+  } else {
+    // When sending a keypress event, IE includes the translated character
+    // code in the keyCode field.
+    ch                                = event.keyCode;
+    key                               = undefined;
+  }
+
+  // Apply modifier keys (ctrl and shift)
+  if (ch) {
+    key                               = undefined;
+  }
+  ch                                  = this.applyModifiers(ch, event);
+
+  // By this point, "ch" is either defined and contains the character code, or
+  // it is undefined and "key" defines the code of a function key 
+  if (ch != undefined) {
+    this.scrollable.scrollTop         = this.numScrollbackLines *
+                                        this.cursorHeight + 1;
+  } else {
+    if ((event.altKey || event.metaKey) && !event.shiftKey && !event.ctrlKey) {
+      // Many programs have difficulties dealing with parametrized escape
+      // sequences for function keys. Thus, if ALT is the only modifier
+      // key, return Emacs-style keycodes for commonly used keys.
+      switch (key) {
+      case  33: /* Page Up      */ ch = '\u001B<';                      break;
+      case  34: /* Page Down    */ ch = '\u001B>';                      break;
+      case  37: /* Left         */ ch = '\u001Bb';                      break;
+      case  38: /* Up           */ ch = '\u001Bp';                      break;
+      case  39: /* Right        */ ch = '\u001Bf';                      break;
+      case  40: /* Down         */ ch = '\u001Bn';                      break;
+      case  46: /* Delete       */ ch = '\u001Bd';                      break;
+      default:                                                          break;
+      }
+    } else if (event.shiftKey && !event.ctrlKey &&
+               !event.altKey && !event.metaKey) {
+      switch (key) {
+      case  33: /* Page Up      */ this.scrollBack();                   return;
+      case  34: /* Page Down    */ this.scrollFore();                   return;
+      default:                                                          break;
+      }
+    }
+    if (ch == undefined) {
+      switch (key) {
+      case   8: /* Backspace    */ ch = '\u007f';                       break;
+      case   9: /* Tab          */ ch = '\u0009';                       break;
+      case  10: /* Return       */ ch = '\u000A';                       break;
+      case  13: /* Enter        */ ch = this.crLfMode ?
+                                        '\r\n' : '\r';                  break;
+      case  16: /* Shift        */                                      return;
+      case  17: /* Ctrl         */                                      return;
+      case  18: /* Alt          */                                      return;
+      case  19: /* Break        */                                      return;
+      case  20: /* Caps Lock    */                                      return;
+      case  27: /* Escape       */ ch = '\u001B';                       break;
+      case  33: /* Page Up      */ ch = '\u001B[5~';                    break;
+      case  34: /* Page Down    */ ch = '\u001B[6~';                    break;
+      case  35: /* End          */ ch = '\u001BOF';                     break;
+      case  36: /* Home         */ ch = '\u001BOH';                     break;
+      case  37: /* Left         */ ch = this.cursorKeyMode ?
+                             '\u001BOD' : '\u001B[D';                   break;
+      case  38: /* Up           */ ch = this.cursorKeyMode ?
+                             '\u001BOA' : '\u001B[A';                   break;
+      case  39: /* Right        */ ch = this.cursorKeyMode ?
+                             '\u001BOC' : '\u001B[C';                   break;
+      case  40: /* Down         */ ch = this.cursorKeyMode ?
+                             '\u001BOB' : '\u001B[B';                   break;
+      case  45: /* Insert       */ ch = '\u001B[2~';                    break;
+      case  46: /* Delete       */ ch = '\u001B[3~';                    break;
+      case  91: /* Left Window  */                                      return;
+      case  92: /* Right Window */                                      return;
+      case  93: /* Select       */                                      return;
+      case  96: /* 0            */ ch = this.applyModifiers(48, event); break;
+      case  97: /* 1            */ ch = this.applyModifiers(49, event); break;
+      case  98: /* 2            */ ch = this.applyModifiers(50, event); break;
+      case  99: /* 3            */ ch = this.applyModifiers(51, event); break;
+      case 100: /* 4            */ ch = this.applyModifiers(52, event); break;
+      case 101: /* 5            */ ch = this.applyModifiers(53, event); break;
+      case 102: /* 6            */ ch = this.applyModifiers(54, event); break;
+      case 103: /* 7            */ ch = this.applyModifiers(55, event); break;
+      case 104: /* 8            */ ch = this.applyModifiers(56, event); break;
+      case 105: /* 9            */ ch = this.applyModifiers(58, event); break;
+      case 106: /* *            */ ch = this.applyModifiers(42, event); break;
+      case 107: /* +            */ ch = this.applyModifiers(43, event); break;
+      case 109: /* -            */ ch = this.applyModifiers(45, event); break;
+      case 110: /* .            */ ch = this.applyModifiers(46, event); break;
+      case 111: /* /            */ ch = this.applyModifiers(47, event); break;
+      case 112: /* F1           */ ch = '\u001BOP';                     break;
+      case 113: /* F2           */ ch = '\u001BOQ';                     break;
+      case 114: /* F3           */ ch = '\u001BOR';                     break;
+      case 115: /* F4           */ ch = '\u001BOS';                     break;
+      case 116: /* F5           */ ch = '\u001B[15~';                   break;
+      case 117: /* F6           */ ch = '\u001B[17~';                   break;
+      case 118: /* F7           */ ch = '\u001B[18~';                   break;
+      case 119: /* F8           */ ch = '\u001B[19~';                   break;
+      case 120: /* F9           */ ch = '\u001B[20~';                   break;
+      case 121: /* F10          */ ch = '\u001B[21~';                   break;
+      case 122: /* F11          */ ch = '\u001B[23~';                   break;
+      case 123: /* F12          */ ch = '\u001B[24~';                   break;
+      case 144: /* Num Lock     */                                      return;
+      case 145: /* Scroll Lock  */                                      return;
+      case 186: /* ;            */ ch = this.applyModifiers(59, event); break;
+      case 187: /* =            */ ch = this.applyModifiers(61, event); break;
+      case 188: /* ,            */ ch = this.applyModifiers(44, event); break;
+      case 189: /* -            */ ch = this.applyModifiers(45, event); break;
+      case 173: /* -            */ ch = this.applyModifiers(45, event); break; // FF15 Patch
+      case 190: /* .            */ ch = this.applyModifiers(46, event); break;
+      case 191: /* /            */ ch = this.applyModifiers(47, event); break;
+      // Conflicts with dead key " on Swiss keyboards
+      //case 192: /* `            */ ch = this.applyModifiers(96, event); break;
+      // Conflicts with dead key " on Swiss keyboards
+      //case 219: /* [            */ ch = this.applyModifiers(91, event); break;
+      case 220: /* \            */ ch = this.applyModifiers(92, event); break;
+      // Conflicts with dead key ^ and ` on Swiss keaboards
+      //                         ^ and " on French keyboards
+      //case 221: /* ]            */ ch = this.applyModifiers(93, event); break;
+      case 222: /* '            */ ch = this.applyModifiers(39, event); break;
+      default:                                                          return;
+      }
+      this.scrollable.scrollTop       = this.numScrollbackLines *
+                                        this.cursorHeight + 1;
+    }
+  }
+
+  // "ch" now contains the sequence of keycodes to send. But we might still
+  // have to apply the effects of modifier keys.
+  if (event.shiftKey || event.ctrlKey || event.altKey || event.metaKey) {
+    var start, digit, part1, part2;
+    if ((start = ch.substr(0, 2)) == '\u001B[') {
+      for (part1 = start;
+           part1.length < ch.length &&
+             (digit = ch.charCodeAt(part1.length)) >= 48 && digit <= 57; ) {
+        part1                         = ch.substr(0, part1.length + 1);
+      }
+      part2                           = ch.substr(part1.length);
+      if (part1.length > 2) {
+        part1                        += ';';
+      }
+    } else if (start == '\u001BO') {
+      part1                           = start;
+      part2                           = ch.substr(2);
+    }
+    if (part1 != undefined) {
+      ch                              = part1                                 +
+                                       ((event.shiftKey             ? 1 : 0)  +
+                                        (event.altKey|event.metaKey ? 2 : 0)  +
+                                        (event.ctrlKey              ? 4 : 0)) +
+                                        part2;
+    } else if (ch.length == 1 && (event.altKey || event.metaKey)) {
+      ch                              = '\u001B' + ch;
+    }
+  }
+
+  if (this.menu.style.visibility == 'hidden') {
+    // this.vt100('R: c=');
+    // for (var i = 0; i < ch.length; i++)
+    //   this.vt100((i != 0 ? ', ' : '') + ch.charCodeAt(i));
+    // this.vt100('\r\n');
+    this.keysPressed(ch);
+  }
+};
+
+VT100.prototype.inspect = function(o, d) {
+  if (d == undefined) {
+    d       = 0;
+  }
+  var rc    = '';
+  if (typeof o == 'object' && ++d < 2) {
+    rc      = '[\r\n';
+    for (i in o) {
+      rc   += this.spaces(d * 2) + i + ' -> ';
+      try {
+        rc += this.inspect(o[i], d);
+      } catch (e) {
+        rc += '?' + '?' + '?\r\n';
+      }
+    }
+    rc     += ']\r\n';
+  } else {
+    rc     += ('' + o).replace(/\n/g, ' ').replace(/ +/g,' ') + '\r\n';
+  }
+  return rc;
+};
+
+VT100.prototype.checkComposedKeys = function(event) {
+  // Composed keys (at least on Linux) do not generate normal events.
+  // Instead, they get entered into the text field. We normally catch
+  // this on the next keyup event.
+  var s              = this.input.value;
+  if (s.length) {
+    this.input.value = '';
+    if (this.menu.style.visibility == 'hidden') {
+      this.keysPressed(s);
+    }
+  }
+};
+
+VT100.prototype.fixEvent = function(event) {
+  // Some browsers report AltGR as a combination of ALT and CTRL. As AltGr
+  // is used as a second-level selector, clear the modifier bits before
+  // handling the event.
+  if (event.ctrlKey && event.altKey) {
+    var fake                = [ ];
+    fake.charCode           = event.charCode;
+    fake.keyCode            = event.keyCode;
+    fake.ctrlKey            = false;
+    fake.shiftKey           = event.shiftKey;
+    fake.altKey             = false;
+    fake.metaKey            = event.metaKey;
+    return fake;
+  }
+
+  // Some browsers fail to translate keys, if both shift and alt/meta is
+  // pressed at the same time. We try to translate those cases, but that
+  // only works for US keyboard layouts.
+  if (event.shiftKey) {
+    var u                   = undefined;
+    var s                   = undefined;
+    switch (this.lastNormalKeyDownEvent.keyCode) {
+    case  39: /* ' -> " */ u = 39; s =  34; break;
+    case  44: /* , -> < */ u = 44; s =  60; break;
+    case  45: /* - -> _ */ u = 45; s =  95; break;
+    case  46: /* . -> > */ u = 46; s =  62; break;
+    case  47: /* / -> ? */ u = 47; s =  63; break;
+
+    case  48: /* 0 -> ) */ u = 48; s =  41; break;
+    case  49: /* 1 -> ! */ u = 49; s =  33; break;
+    case  50: /* 2 -> @ */ u = 50; s =  64; break;
+    case  51: /* 3 -> # */ u = 51; s =  35; break;
+    case  52: /* 4 -> $ */ u = 52; s =  36; break;
+    case  53: /* 5 -> % */ u = 53; s =  37; break;
+    case  54: /* 6 -> ^ */ u = 54; s =  94; break;
+    case  55: /* 7 -> & */ u = 55; s =  38; break;
+    case  56: /* 8 -> * */ u = 56; s =  42; break;
+    case  57: /* 9 -> ( */ u = 57; s =  40; break;
+
+    case  59: /* ; -> : */ u = 59; s =  58; break;
+    case  61: /* = -> + */ u = 61; s =  43; break;
+    case  91: /* [ -> { */ u = 91; s = 123; break;
+    case  92: /* \ -> | */ u = 92; s = 124; break;
+    case  93: /* ] -> } */ u = 93; s = 125; break; 
+    case  96: /* ` -> ~ */ u = 96; s = 126; break;
+
+    case 109: /* - -> _ */ u = 45; s =  95; break;
+    case 111: /* / -> ? */ u = 47; s =  63; break;
+
+    case 186: /* ; -> : */ u = 59; s =  58; break;
+    case 187: /* = -> + */ u = 61; s =  43; break;
+    case 188: /* , -> < */ u = 44; s =  60; break;
+    case 189: /* - -> _ */ u = 45; s =  95; break;
+    case 173: /* - -> _ */ u = 45; s =  95; break; // FF15 Patch
+    case 190: /* . -> > */ u = 46; s =  62; break;
+    case 191: /* / -> ? */ u = 47; s =  63; break;
+    case 192: /* ` -> ~ */ u = 96; s = 126; break;
+    case 219: /* [ -> { */ u = 91; s = 123; break;
+    case 220: /* \ -> | */ u = 92; s = 124; break;
+    case 221: /* ] -> } */ u = 93; s = 125; break; 
+    case 222: /* ' -> " */ u = 39; s =  34; break;
+    default:                                break;
+    }
+    if (s && (event.charCode == u || event.charCode == 0)) {
+      var fake              = [ ];
+      fake.charCode         = s;
+      fake.keyCode          = event.keyCode;
+      fake.ctrlKey          = event.ctrlKey;
+      fake.shiftKey         = event.shiftKey;
+      fake.altKey           = event.altKey;
+      fake.metaKey          = event.metaKey;
+      return fake;
+    }
+  }
+  return event;
+};
+
+VT100.prototype.keyDown = function(event) {
+  // this.vt100('D: c=' + event.charCode + ', k=' + event.keyCode +
+  //            (event.shiftKey || event.ctrlKey || event.altKey ||
+  //             event.metaKey ? ', ' +
+  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+  //            '\r\n');
+  this.checkComposedKeys(event);
+  this.lastKeyPressedEvent      = undefined;
+  this.lastKeyDownEvent         = undefined;
+  this.lastNormalKeyDownEvent   = event;
+
+  // Swiss keyboard conflicts:
+  // [ 59
+  // ] 192
+  // ' 219 (dead key)
+  // { 220
+  // ~ 221 (dead key)
+  // } 223
+  // French keyoard conflicts:
+  // ~ 50 (dead key)
+  // } 107
+  var asciiKey                  =
+    event.keyCode ==  32                         ||
+    event.keyCode >=  48 && event.keyCode <=  57 ||
+    event.keyCode >=  65 && event.keyCode <=  90;
+  var alphNumKey                =
+    asciiKey                                     ||
+    event.keyCode ==  59 ||
+    event.keyCode >=  96 && event.keyCode <= 105 ||
+    event.keyCode == 107 ||
+    event.keyCode == 192 ||
+    event.keyCode >= 219 && event.keyCode <= 221 ||
+    event.keyCode == 223 ||
+    event.keyCode == 226;
+  var normalKey                 =
+    alphNumKey                                   ||
+    event.keyCode ==  61 ||
+    event.keyCode == 106 ||
+    event.keyCode >= 109 && event.keyCode <= 111 ||
+    event.keyCode >= 186 && event.keyCode <= 191 ||
+    event.keyCode == 222 ||
+    event.keyCode == 252;
+  try {
+    if (navigator.appName == 'Konqueror') {
+      normalKey                |= event.keyCode < 128;
+    }
+  } catch (e) {
+  }
+
+  // We normally prefer to look at keypress events, as they perform the
+  // translation from keyCode to charCode. This is important, as the
+  // translation is locale-dependent.
+  // But for some keys, we must intercept them during the keydown event,
+  // as they would otherwise get interpreted by the browser.
+  // Even, when doing all of this, there are some keys that we can never
+  // intercept. This applies to some of the menu navigation keys in IE.
+  // In fact, we see them, but we cannot stop IE from seeing them, too.
+  if ((event.charCode || event.keyCode) &&
+      ((alphNumKey && (event.ctrlKey || event.altKey || event.metaKey) &&
+        !event.shiftKey &&
+        // Some browsers signal AltGR as both CTRL and ALT. Do not try to
+        // interpret this sequence ourselves, as some keyboard layouts use
+        // it for second-level layouts.
+        !(event.ctrlKey && event.altKey)) ||
+       this.catchModifiersEarly && normalKey && !alphNumKey &&
+       (event.ctrlKey || event.altKey || event.metaKey) ||
+       !normalKey)) {
+    this.lastKeyDownEvent       = event;
+    var fake                    = [ ];
+    fake.ctrlKey                = event.ctrlKey;
+    fake.shiftKey               = event.shiftKey;
+    fake.altKey                 = event.altKey;
+    fake.metaKey                = event.metaKey;
+    if (asciiKey) {
+      fake.charCode             = event.keyCode;
+      fake.keyCode              = 0;
+    } else {
+      fake.charCode             = 0;
+      fake.keyCode              = event.keyCode;
+      if (!alphNumKey && event.shiftKey) {
+        fake                    = this.fixEvent(fake);
+      }
+    }
+
+    this.handleKey(fake);
+    this.lastNormalKeyDownEvent = undefined;
+
+    try {
+      // For non-IE browsers
+      event.stopPropagation();
+      event.preventDefault();
+    } catch (e) {
+    }
+    try {
+      // For IE
+      event.cancelBubble = true;
+      event.returnValue  = false;
+      event.keyCode      = 0;
+    } catch (e) {
+    }
+
+    return false;
+  }
+  return true;
+};
+
+VT100.prototype.keyPressed = function(event) {
+  // this.vt100('P: c=' + event.charCode + ', k=' + event.keyCode +
+  //            (event.shiftKey || event.ctrlKey || event.altKey ||
+  //             event.metaKey ? ', ' +
+  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+  //            '\r\n');
+  if (this.lastKeyDownEvent) {
+    // If we already processed the key on keydown, do not process it
+    // again here. Ideally, the browser should not even have generated a
+    // keypress event in this case. But that does not appear to always work.
+    this.lastKeyDownEvent     = undefined;
+  } else {
+    this.handleKey(event.altKey || event.metaKey
+                   ? this.fixEvent(event) : event);
+  }
+
+  try {
+    // For non-IE browsers
+    event.preventDefault();
+  } catch (e) {
+  }
+
+  try {
+    // For IE
+    event.cancelBubble = true;
+    event.returnValue  = false;
+    event.keyCode      = 0;
+  } catch (e) {
+  }
+
+  this.lastNormalKeyDownEvent = undefined;
+  this.lastKeyPressedEvent    = event;
+  return false;
+};
+
+VT100.prototype.keyUp = function(event) {
+  // this.vt100('U: c=' + event.charCode + ', k=' + event.keyCode +
+  //            (event.shiftKey || event.ctrlKey || event.altKey ||
+  //             event.metaKey ? ', ' +
+  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+  //            '\r\n');
+  if (this.lastKeyPressedEvent) {
+    // The compose key on Linux occasionally confuses the browser and keeps
+    // inserting bogus characters into the input field, even if just a regular
+    // key has been pressed. Detect this case and drop the bogus characters.
+    (event.target ||
+     event.srcElement).value      = '';
+  } else {
+    // This is usually were we notice that a key has been composed and
+    // thus failed to generate normal events.
+    this.checkComposedKeys(event);
+
+    // Some browsers don't report keypress events if ctrl or alt is pressed
+    // for non-alphanumerical keys. Patch things up for now, but in the
+    // future we will catch these keys earlier (in the keydown handler).
+    if (this.lastNormalKeyDownEvent) {
+      // this.vt100('ENABLING EARLY CATCHING OF MODIFIER KEYS\r\n');
+      this.catchModifiersEarly    = true;
+      var asciiKey                =
+        event.keyCode ==  32                         ||
+        // Conflicts with dead key ~ (code 50) on French keyboards
+        //event.keyCode >=  48 && event.keyCode <=  57 ||
+        event.keyCode >=  48 && event.keyCode <=  49 ||
+        event.keyCode >=  51 && event.keyCode <=  57 ||
+        event.keyCode >=  65 && event.keyCode <=  90;
+      var alphNumKey              =
+        asciiKey                                     ||
+        event.keyCode ==  50                         ||
+        event.keyCode >=  96 && event.keyCode <= 105;
+      var normalKey               =
+        alphNumKey                                   ||
+        event.keyCode ==  59 || event.keyCode ==  61 ||
+        event.keyCode == 106 || event.keyCode == 107 ||
+        event.keyCode >= 109 && event.keyCode <= 111 ||
+        event.keyCode >= 186 && event.keyCode <= 192 ||
+        event.keyCode >= 219 && event.keyCode <= 223 ||
+        event.keyCode == 252;
+      var fake                    = [ ];
+      fake.ctrlKey                = event.ctrlKey;
+      fake.shiftKey               = event.shiftKey;
+      fake.altKey                 = event.altKey;
+      fake.metaKey                = event.metaKey;
+      if (asciiKey) {
+        fake.charCode             = event.keyCode;
+        fake.keyCode              = 0;
+      } else {
+        fake.charCode             = 0;
+        fake.keyCode              = event.keyCode;
+        if (!alphNumKey && (event.ctrlKey || event.altKey || event.metaKey)) {
+          fake                    = this.fixEvent(fake);
+        }
+      }
+      this.lastNormalKeyDownEvent = undefined;
+      this.handleKey(fake);
+    }
+  }
+
+  try {
+    // For IE
+    event.cancelBubble            = true;
+    event.returnValue             = false;
+    event.keyCode                 = 0;
+  } catch (e) {
+  }
+
+  this.lastKeyDownEvent           = undefined;
+  this.lastKeyPressedEvent        = undefined;
+  return false;
+};
+
+VT100.prototype.animateCursor = function(inactive) {
+  if (!this.cursorInterval) {
+    this.cursorInterval       = setInterval(
+      function(vt100) {
+        return function() {
+          vt100.animateCursor();
+
+          // Use this opportunity to check whether the user entered a composed
+          // key, or whether somebody pasted text into the textfield.
+          vt100.checkComposedKeys();
+        }
+      }(this), 500);
+  }
+  if (inactive != undefined || this.cursor.className != 'inactive') {
+    if (inactive) {
+      this.cursor.className   = 'inactive';
+    } else {
+      if (this.blinkingCursor) {
+        this.cursor.className = this.cursor.className == 'bright'
+                                ? 'dim' : 'bright';
+      } else {
+        this.cursor.className = 'bright';
+      }
+    }
+  }
+};
+
+VT100.prototype.blurCursor = function() {
+  this.animateCursor(true);
+};
+
+VT100.prototype.focusCursor = function() {
+  this.animateCursor(false);
+};
+
+VT100.prototype.flashScreen = function() {
+  this.isInverted       = !this.isInverted;
+  this.refreshInvertedState();
+  this.isInverted       = !this.isInverted;
+  setTimeout(function(vt100) {
+               return function() {
+                 vt100.refreshInvertedState();
+               };
+             }(this), 100);
+};
+
+VT100.prototype.beep = function() {
+  if (this.visualBell) {
+    this.flashScreen();
+  } else {
+    try {
+      this.beeper.Play();
+    } catch (e) {
+      try {
+        this.beeper.src = 'beep.wav';
+      } catch (e) {
+      }
+    }
+  }
+};
+
+VT100.prototype.bs = function() {
+  if (this.cursorX > 0) {
+    this.gotoXY(this.cursorX - 1, this.cursorY);
+    this.needWrap = false;
+  }
+};
+
+VT100.prototype.ht = function(count) {
+  if (count == undefined) {
+    count        = 1;
+  }
+  var cx         = this.cursorX;
+  while (count-- > 0) {
+    while (cx++ < this.terminalWidth) {
+      var tabState = this.userTabStop[cx];
+      if (tabState == false) {
+        // Explicitly cleared tab stop
+        continue;
+      } else if (tabState) {
+        // Explicitly set tab stop
+        break;
+      } else {
+        // Default tab stop at each eighth column
+        if (cx % 8 == 0) {
+          break;
+        }
+      }
+    }
+  }
+  if (cx > this.terminalWidth - 1) {
+    cx           = this.terminalWidth - 1;
+  }
+  if (cx != this.cursorX) {
+    this.gotoXY(cx, this.cursorY);
+  }
+};
+
+VT100.prototype.rt = function(count) {
+  if (count == undefined) {
+    count          = 1 ;
+  }
+  var cx           = this.cursorX;
+  while (count-- > 0) {
+    while (cx-- > 0) {
+      var tabState = this.userTabStop[cx];
+      if (tabState == false) {
+        // Explicitly cleared tab stop
+        continue;
+      } else if (tabState) {
+        // Explicitly set tab stop
+        break;
+      } else {
+        // Default tab stop at each eighth column
+        if (cx % 8 == 0) {
+          break;
+        }
+      }
+    }
+  }
+  if (cx < 0) {
+    cx             = 0;
+  }
+  if (cx != this.cursorX) {
+    this.gotoXY(cx, this.cursorY);
+  }
+};
+
+VT100.prototype.cr = function() {
+  this.gotoXY(0, this.cursorY);
+  this.needWrap = false;
+};
+
+VT100.prototype.lf = function(count) {
+  if (count == undefined) {
+    count    = 1;
+  } else {
+    if (count > this.terminalHeight) {
+      count  = this.terminalHeight;
+    }
+    if (count < 1) {
+      count  = 1;
+    }
+  }
+  while (count-- > 0) {
+    if (this.cursorY == this.bottom - 1) {
+      this.scrollRegion(0, this.top + 1,
+                        this.terminalWidth, this.bottom - this.top - 1,
+                        0, -1, this.color, this.style);
+      offset = undefined;
+    } else if (this.cursorY < this.terminalHeight - 1) {
+      this.gotoXY(this.cursorX, this.cursorY + 1);
+    }
+  }
+};
+
+VT100.prototype.ri = function(count) {
+  if (count == undefined) {
+    count   = 1;
+  } else {
+    if (count > this.terminalHeight) {
+      count = this.terminalHeight;
+    }
+    if (count < 1) {
+      count = 1;
+    }
+  }
+  while (count-- > 0) {
+    if (this.cursorY == this.top) {
+      this.scrollRegion(0, this.top,
+                        this.terminalWidth, this.bottom - this.top - 1,
+                        0, 1, this.color, this.style);
+    } else if (this.cursorY > 0) {
+      this.gotoXY(this.cursorX, this.cursorY - 1);
+    }
+  }
+  this.needWrap = false;
+};
+
+VT100.prototype.respondID = function() {
+  this.respondString += '\u001B[?6c';
+};
+
+VT100.prototype.respondSecondaryDA = function() {
+  this.respondString += '\u001B[>0;0;0c';
+};
+
+
+VT100.prototype.updateStyle = function() {
+  this.style   = '';
+  if (this.attr & 0x0200 /* ATTR_UNDERLINE */) {
+    this.style = 'text-decoration: underline;';
+  }
+  var bg       = (this.attr >> 4) & 0xF;
+  var fg       =  this.attr       & 0xF;
+  if (this.attr & 0x0100 /* ATTR_REVERSE */) {
+    var tmp    = bg;
+    bg         = fg;
+    fg         = tmp;
+  }
+  if ((this.attr & (0x0100 /* ATTR_REVERSE */ | 0x0400 /* ATTR_DIM */)) == 0x0400 /* ATTR_DIM */) {
+    fg         = 8; // Dark grey
+  } else if (this.attr & 0x0800 /* ATTR_BRIGHT */) {
+    fg        |= 8;
+    this.style = 'font-weight: bold;';
+  }
+  if (this.attr & 0x1000 /* ATTR_BLINK */) {
+    this.style = 'text-decoration: blink;';
+  }
+  this.color   = 'ansi' + fg + ' bgAnsi' + bg;
+};
+
+VT100.prototype.setAttrColors = function(attr) {
+  if (attr != this.attr) {
+    this.attr = attr;
+    this.updateStyle();
+  }
+};
+
+VT100.prototype.saveCursor = function() {
+  this.savedX[this.currentScreen]     = this.cursorX;
+  this.savedY[this.currentScreen]     = this.cursorY;
+  this.savedAttr[this.currentScreen]  = this.attr;
+  this.savedUseGMap                   = this.useGMap;
+  for (var i = 0; i < 4; i++) {
+    this.savedGMap[i]                 = this.GMap[i];
+  }
+  this.savedValid[this.currentScreen] = true;
+};
+
+VT100.prototype.restoreCursor = function() {
+  if (!this.savedValid[this.currentScreen]) {
+    return;
+  }
+  this.attr      = this.savedAttr[this.currentScreen];
+  this.updateStyle();
+  this.useGMap   = this.savedUseGMap;
+  for (var i = 0; i < 4; i++) {
+    this.GMap[i] = this.savedGMap[i];
+  }
+  this.translate = this.GMap[this.useGMap];
+  this.needWrap  = false;
+  this.gotoXY(this.savedX[this.currentScreen],
+              this.savedY[this.currentScreen]);
+};
+
+VT100.prototype.getTransformName = function() {
+  var styles = [ 'transform', 'WebkitTransform', 'MozTransform', 'filter' ];
+  for (var i = 0; i < styles.length; ++i) {
+    if (typeof this.console[0].style[styles[i]] != 'undefined') {
+      return styles[i];
+    }
+  }
+  return undefined;
+};
+
+VT100.prototype.getTransformStyle = function(transform, scale) {
+  return scale && scale != 1.0
+    ? transform == 'filter'
+      ? 'progid:DXImageTransform.Microsoft.Matrix(' +
+                                 'M11=' + (1.0/scale) + ',M12=0,M21=0,M22=1,' +
+                                 "sizingMethod='auto expand')"
+      : 'translateX(-50%) ' +
+        'scaleX(' + (1.0/scale) + ') ' +
+        'translateX(50%)'
+    : '';
+};
+
+VT100.prototype.set80_132Mode = function(state) {
+  var transform                  = this.getTransformName();
+  if (transform) {
+    if ((this.console[this.currentScreen].style[transform] != '') == state) {
+      return;
+    }
+    var style                    = state ?
+                                   this.getTransformStyle(transform, 1.65):'';
+    this.console[this.currentScreen].style[transform] = style;
+    this.cursor.style[transform] = style;
+    this.space.style[transform]  = style;
+    this.scale                   = state ? 1.65 : 1.0;
+    if (transform == 'filter') {
+      this.console[this.currentScreen].style.width = state ? '165%' : '';
+    }
+    this.resizer();
+  }
+};
+
+VT100.prototype.setMode = function(state) {
+  for (var i = 0; i <= this.npar; i++) {
+    if (this.isQuestionMark) {
+      switch (this.par[i]) {
+      case  1: this.cursorKeyMode      = state;                      break;
+      case  3: this.set80_132Mode(state);                            break;
+      case  5: this.isInverted = state; this.refreshInvertedState(); break;
+      case  6: this.offsetMode         = state;                      break;
+      case  7: this.autoWrapMode       = state;                      break;
+      case 1000:
+      case  9: this.mouseReporting     = state;                      break;
+      case 25: this.cursorNeedsShowing = state;
+               if (state) { this.showCursor(); }
+               else       { this.hideCursor(); }                     break;
+      case 1047:
+      case 1049:
+      case 47: this.enableAlternateScreen(state);                    break;
+      default:                                                       break;
+      }
+    } else {
+      switch (this.par[i]) {
+      case  3: this.dispCtrl           = state;                      break;
+      case  4: this.insertMode         = state;                      break;
+      case  20:this.crLfMode           = state;                      break;
+      default:                                                       break;
+      }
+    }
+  }
+};
+
+VT100.prototype.statusReport = function() {
+  // Ready and operational.
+  this.respondString += '\u001B[0n';
+};
+
+VT100.prototype.cursorReport = function() {
+  this.respondString += '\u001B[' +
+                        (this.cursorY + (this.offsetMode ? this.top + 1 : 1)) +
+                        ';' +
+                        (this.cursorX + 1) +
+                        'R';
+};
+
+VT100.prototype.setCursorAttr = function(setAttr, xorAttr) {
+  // Changing of cursor color is not implemented.
+};
+
+VT100.prototype.openPrinterWindow = function() {
+  var rc            = true;
+  try {
+    if (!this.printWin || this.printWin.closed) {
+      this.printWin = window.open('', 'print-output',
+        'width=800,height=600,directories=no,location=no,menubar=yes,' +
+        'status=no,toolbar=no,titlebar=yes,scrollbars=yes,resizable=yes');
+      this.printWin.document.body.innerHTML =
+        '<link rel="stylesheet" href="' +
+          document.location.protocol + '//' + document.location.host +
+          document.location.pathname.replace(/[^/]*$/, '') +
+          'print-styles.css" type="text/css">\n' +
+        '<div id="options"><input id="autoprint" type="checkbox"' +
+          (this.autoprint ? ' checked' : '') + '>' +
+          'Automatically, print page(s) when job is ready' +
+        '</input></div>\n' +
+        '<div id="spacer"><input type="checkbox">&nbsp;</input></div>' +
+        '<pre id="print"></pre>\n';
+      var autoprint = this.printWin.document.getElementById('autoprint');
+      this.addListener(autoprint, 'click',
+                       (function(vt100, autoprint) {
+                         return function() {
+                           vt100.autoprint = autoprint.checked;
+                           vt100.storeUserSettings();
+                           return false;
+                         };
+                       })(this, autoprint));
+      this.printWin.document.title = 'ShellInABox Printer Output';
+    }
+  } catch (e) {
+    // Maybe, a popup blocker prevented us from working. Better catch the
+    // exception, so that we won't break the entire terminal session. The
+    // user probably needs to disable the blocker first before retrying the
+    // operation.
+    rc              = false;
+  }
+  rc               &= this.printWin && !this.printWin.closed &&
+                      (this.printWin.innerWidth ||
+                       this.printWin.document.documentElement.clientWidth ||
+                       this.printWin.document.body.clientWidth) > 1;
+
+  if (!rc && this.printing == 100) {
+    // Different popup blockers work differently. We try to detect a couple
+    // of common methods. And then we retry again a brief amount later, as
+    // false positives are otherwise possible. If we are sure that there is
+    // a popup blocker in effect, we alert the user to it. This is helpful
+    // as some popup blockers have minimal or no UI, and the user might not
+    // notice that they are missing the popup. In any case, we only show at
+    // most one message per print job.
+    this.printing   = true;
+    setTimeout((function(win) {
+                  return function() {
+                    if (!win || win.closed ||
+                        (win.innerWidth ||
+                         win.document.documentElement.clientWidth ||
+                         win.document.body.clientWidth) <= 1) {
+                      alert('Attempted to print, but a popup blocker ' +
+                            'prevented the printer window from opening');
+                    }
+                  };
+                })(this.printWin), 2000);
+  }
+  return rc;
+};
+
+VT100.prototype.sendToPrinter = function(s) {
+  this.openPrinterWindow();
+  try {
+    var doc   = this.printWin.document;
+    var print = doc.getElementById('print');
+    if (print.lastChild && print.lastChild.nodeName == '#text') {
+      print.lastChild.textContent += this.replaceChar(s, ' ', '\u00A0');
+    } else {
+      print.appendChild(doc.createTextNode(this.replaceChar(s, ' ','\u00A0')));
+    }
+  } catch (e) {
+    // There probably was a more aggressive popup blocker that prevented us
+    // from accessing the printer windows.
+  }
+};
+
+VT100.prototype.sendControlToPrinter = function(ch) {
+  // We get called whenever doControl() is active. But for the printer, we
+  // only implement a basic line printer that doesn't understand most of
+  // the escape sequences of the VT100 terminal. In fact, the only escape
+  // sequence that we really need to recognize is '^[[5i' for turning the
+  // printer off.
+  try {
+    switch (ch) {
+    case  9:
+      // HT
+      this.openPrinterWindow();
+      var doc                 = this.printWin.document;
+      var print               = doc.getElementById('print');
+      var chars               = print.lastChild &&
+                                print.lastChild.nodeName == '#text' ?
+                                print.lastChild.textContent.length : 0;
+      this.sendToPrinter(this.spaces(8 - (chars % 8)));
+      break;
+    case 10:
+      // CR
+      break;
+    case 12:
+      // FF
+      this.openPrinterWindow();
+      var pageBreak           = this.printWin.document.createElement('div');
+      pageBreak.className     = 'pagebreak';
+      pageBreak.innerHTML     = '<hr />';
+      this.printWin.document.getElementById('print').appendChild(pageBreak);
+      break;
+    case 13:
+      // LF
+      this.openPrinterWindow();
+      var lineBreak           = this.printWin.document.createElement('br');
+      this.printWin.document.getElementById('print').appendChild(lineBreak);
+      break;
+    case 27:
+      // ESC
+      this.isEsc              = 1 /* ESesc */;
+      break;
+    default:
+      switch (this.isEsc) {
+      case 1 /* ESesc */:
+        this.isEsc            = 0 /* ESnormal */;
+        switch (ch) {
+        case 0x5B /*[*/:
+          this.isEsc          = 2 /* ESsquare */;
+          break;
+        default:
+          break;
+        }
+        break;
+      case 2 /* ESsquare */:
+        this.npar             = 0;
+        this.par              = [ 0, 0, 0, 0, 0, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0, 0, 0 ];
+        this.isEsc            = 3 /* ESgetpars */;
+        this.isQuestionMark   = ch == 0x3F /*?*/;
+        if (this.isQuestionMark) {
+          break;
+        }
+        // Fall through
+      case 3 /* ESgetpars */: 
+        if (ch == 0x3B /*;*/) {
+          this.npar++;
+          break;
+        } else if (ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) {
+          var par             = this.par[this.npar];
+          if (par == undefined) {
+            par               = 0;
+          }
+          this.par[this.npar] = 10*par + (ch & 0xF);
+          break;
+        } else {
+          this.isEsc          = 4 /* ESgotpars */;
+        }
+        // Fall through
+      case 4 /* ESgotpars */:
+        this.isEsc            = 0 /* ESnormal */;
+        if (this.isQuestionMark) {
+          break;
+        }
+        switch (ch) {
+        case 0x69 /*i*/:
+          this.csii(this.par[0]);
+          break;
+        default:
+          break;
+        }
+        break;
+      default:
+        this.isEsc            = 0 /* ESnormal */;
+        break;
+      }
+      break;
+    }
+  } catch (e) {
+    // There probably was a more aggressive popup blocker that prevented us
+    // from accessing the printer windows.
+  }
+};
+
+VT100.prototype.csiAt = function(number) {
+  // Insert spaces
+  if (number == 0) {
+    number      = 1;
+  }
+  if (number > this.terminalWidth - this.cursorX) {
+    number      = this.terminalWidth - this.cursorX;
+  }
+  this.scrollRegion(this.cursorX, this.cursorY,
+                    this.terminalWidth - this.cursorX - number, 1,
+                    number, 0, this.color, this.style);
+  this.needWrap = false;
+};
+
+VT100.prototype.csii = function(number) {
+  // Printer control
+  switch (number) {
+  case 0: // Print Screen
+    window.print();
+    break;
+  case 4: // Stop printing
+    try {
+      if (this.printing && this.printWin && !this.printWin.closed) {
+        var print = this.printWin.document.getElementById('print');
+        while (print.lastChild &&
+               print.lastChild.tagName == 'DIV' &&
+               print.lastChild.className == 'pagebreak') {
+          // Remove trailing blank pages
+          print.removeChild(print.lastChild);
+        }
+        if (this.autoprint) {
+          this.printWin.print();
+        }
+      }
+    } catch (e) {
+    }
+    this.printing = false;
+    break;
+  case 5: // Start printing
+    if (!this.printing && this.printWin && !this.printWin.closed) {
+      this.printWin.document.getElementById('print').innerHTML = '';
+    }
+    this.printing = 100;
+    break;
+  default:
+    break;
+  }
+};
+
+VT100.prototype.csiJ = function(number) {
+  switch (number) {
+  case 0: // Erase from cursor to end of display
+    this.clearRegion(this.cursorX, this.cursorY,
+                     this.terminalWidth - this.cursorX, 1,
+                     this.color, this.style);
+    if (this.cursorY < this.terminalHeight-2) {
+      this.clearRegion(0, this.cursorY+1,
+                       this.terminalWidth, this.terminalHeight-this.cursorY-1,
+                       this.color, this.style);
+    }
+    break;
+  case 1: // Erase from start to cursor
+    if (this.cursorY > 0) {
+      this.clearRegion(0, 0,
+                       this.terminalWidth, this.cursorY,
+                       this.color, this.style);
+    }
+    this.clearRegion(0, this.cursorY, this.cursorX + 1, 1,
+                     this.color, this.style);
+    break;
+  case 2: // Erase whole display
+    this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight,
+                     this.color, this.style);
+    break;
+  default:
+    return;
+  }
+  needWrap = false;
+};
+
+VT100.prototype.csiK = function(number) {
+  switch (number) {
+  case 0: // Erase from cursor to end of line
+    this.clearRegion(this.cursorX, this.cursorY,
+                     this.terminalWidth - this.cursorX, 1,
+                     this.color, this.style);
+    break;
+  case 1: // Erase from start of line to cursor
+    this.clearRegion(0, this.cursorY, this.cursorX + 1, 1,
+                     this.color, this.style);
+    break;
+  case 2: // Erase whole line
+    this.clearRegion(0, this.cursorY, this.terminalWidth, 1,
+                     this.color, this.style);
+    break;
+  default:
+    return;
+  }
+  needWrap = false;
+};
+
+VT100.prototype.csiL = function(number) {
+  // Open line by inserting blank line(s)
+  if (this.cursorY >= this.bottom) {
+    return;
+  }
+  if (number == 0) {
+    number = 1;
+  }
+  if (number > this.bottom - this.cursorY) {
+    number = this.bottom - this.cursorY;
+  }
+  this.scrollRegion(0, this.cursorY,
+                    this.terminalWidth, this.bottom - this.cursorY - number,
+                    0, number, this.color, this.style);
+  needWrap = false;
+};
+
+VT100.prototype.csiM = function(number) {
+  // Delete line(s), scrolling up the bottom of the screen.
+  if (this.cursorY >= this.bottom) {
+    return;
+  }
+  if (number == 0) {
+    number = 1;
+  }
+  if (number > this.bottom - this.cursorY) {
+    number = bottom - cursorY;
+  }
+  this.scrollRegion(0, this.cursorY + number,
+                    this.terminalWidth, this.bottom - this.cursorY - number,
+                    0, -number, this.color, this.style);
+  needWrap = false;
+};
+
+VT100.prototype.csim = function() {
+  for (var i = 0; i <= this.npar; i++) {
+    switch (this.par[i]) {
+    case 0:  this.attr  = 0x00F0 /* ATTR_DEFAULT */;                                break;
+    case 1:  this.attr  = (this.attr & ~0x0400 /* ATTR_DIM */)|0x0800 /* ATTR_BRIGHT */;         break;
+    case 2:  this.attr  = (this.attr & ~0x0800 /* ATTR_BRIGHT */)|0x0400 /* ATTR_DIM */;         break;
+    case 4:  this.attr |= 0x0200 /* ATTR_UNDERLINE */;                              break;
+    case 5:  this.attr |= 0x1000 /* ATTR_BLINK */;                                  break;
+    case 7:  this.attr |= 0x0100 /* ATTR_REVERSE */;                                break;
+    case 10:
+      this.translate    = this.GMap[this.useGMap];
+      this.dispCtrl     = false;
+      this.toggleMeta   = false;
+      break;
+    case 11:
+      this.translate    = this.CodePage437Map;
+      this.dispCtrl     = true;
+      this.toggleMeta   = false;
+      break;
+    case 12:
+      this.translate    = this.CodePage437Map;
+      this.dispCtrl     = true;
+      this.toggleMeta   = true;
+      break;
+    case 21:
+    case 22: this.attr &= ~(0x0800 /* ATTR_BRIGHT */|0x0400 /* ATTR_DIM */);                     break;
+    case 24: this.attr &= ~ 0x0200 /* ATTR_UNDERLINE */;                            break;
+    case 25: this.attr &= ~ 0x1000 /* ATTR_BLINK */;                                break;
+    case 27: this.attr &= ~ 0x0100 /* ATTR_REVERSE */;                              break;
+    case 38: this.attr  = (this.attr & ~(0x0400 /* ATTR_DIM */|0x0800 /* ATTR_BRIGHT */|0x0F))|
+                          0x0200 /* ATTR_UNDERLINE */;                              break;
+    case 39: this.attr &= ~(0x0400 /* ATTR_DIM */|0x0800 /* ATTR_BRIGHT */|0x0200 /* ATTR_UNDERLINE */|0x0F); break;
+    case 49: this.attr |= 0xF0;                                        break;
+    default:
+      if (this.par[i] >= 30 && this.par[i] <= 37) {
+          var fg        = this.par[i] - 30;
+          this.attr     = (this.attr & ~0x0F) | fg;
+      } else if (this.par[i] >= 40 && this.par[i] <= 47) {
+          var bg        = this.par[i] - 40;
+          this.attr     = (this.attr & ~0xF0) | (bg << 4);
+      }
+      break;
+    }
+  }
+  this.updateStyle();
+};
+
+VT100.prototype.csiP = function(number) {
+  // Delete character(s) following cursor
+  if (number == 0) {
+    number = 1;
+  }
+  if (number > this.terminalWidth - this.cursorX) {
+    number = this.terminalWidth - this.cursorX;
+  }
+  this.scrollRegion(this.cursorX + number, this.cursorY,
+                    this.terminalWidth - this.cursorX - number, 1,
+                    -number, 0, this.color, this.style);
+  needWrap = false;
+};
+
+VT100.prototype.csiX = function(number) {
+  // Clear characters following cursor
+  if (number == 0) {
+    number++;
+  }
+  if (number > this.terminalWidth - this.cursorX) {
+    number = this.terminalWidth - this.cursorX;
+  }
+  this.clearRegion(this.cursorX, this.cursorY, number, 1,
+                   this.color, this.style);
+  needWrap = false;
+};
+
+VT100.prototype.settermCommand = function() {
+  // Setterm commands are not implemented
+};
+
+VT100.prototype.doControl = function(ch) {
+  if (this.printing) {
+    this.sendControlToPrinter(ch);
+    return '';
+  }
+  var lineBuf                = '';
+  switch (ch) {
+  case 0x00: /* ignored */                                              break;
+  case 0x08: this.bs();                                                 break;
+  case 0x09: this.ht();                                                 break;
+  case 0x0A:
+  case 0x0B:
+  case 0x0C:
+  case 0x84: this.lf(); if (!this.crLfMode)                             break;
+  case 0x0D: this.cr();                                                 break;
+  case 0x85: this.cr(); this.lf();                                      break;
+  case 0x0E: this.useGMap     = 1;
+             this.translate   = this.GMap[1];
+             this.dispCtrl    = true;                                   break;
+  case 0x0F: this.useGMap     = 0;
+             this.translate   = this.GMap[0];
+             this.dispCtrl    = false;                                  break;
+  case 0x18:
+  case 0x1A: this.isEsc       = 0 /* ESnormal */;                               break;
+  case 0x1B: this.isEsc       = 1 /* ESesc */;                                  break;
+  case 0x7F: /* ignored */                                              break;
+  case 0x88: this.userTabStop[this.cursorX] = true;                     break;
+  case 0x8D: this.ri();                                                 break;
+  case 0x8E: this.isEsc       = 18 /* ESss2 */;                                  break;
+  case 0x8F: this.isEsc       = 19 /* ESss3 */;                                  break;
+  case 0x9A: this.respondID();                                          break;
+  case 0x9B: this.isEsc       = 2 /* ESsquare */;                               break;
+  case 0x07: if (this.isEsc != 17 /* EStitle */) {
+               this.beep();                                             break;
+             }
+             /* fall thru */
+  default:   switch (this.isEsc) {
+    case 1 /* ESesc */:
+      this.isEsc              = 0 /* ESnormal */;
+      switch (ch) {
+/*%*/ case 0x25: this.isEsc   = 13 /* ESpercent */;                              break;
+/*(*/ case 0x28: this.isEsc   = 8 /* ESsetG0 */;                                break;
+/*-*/ case 0x2D:
+/*)*/ case 0x29: this.isEsc   = 9 /* ESsetG1 */;                                break;
+/*.*/ case 0x2E:
+/***/ case 0x2A: this.isEsc   = 10 /* ESsetG2 */;                                break;
+/*/*/ case 0x2F:
+/*+*/ case 0x2B: this.isEsc   = 11 /* ESsetG3 */;                                break;
+/*#*/ case 0x23: this.isEsc   = 7 /* EShash */;                                 break;
+/*7*/ case 0x37: this.saveCursor();                                     break;
+/*8*/ case 0x38: this.restoreCursor();                                  break;
+/*>*/ case 0x3E: this.applKeyMode = false;                              break;
+/*=*/ case 0x3D: this.applKeyMode = true;                               break;
+/*D*/ case 0x44: this.lf();                                             break;
+/*E*/ case 0x45: this.cr(); this.lf();                                  break;
+/*M*/ case 0x4D: this.ri();                                             break;
+/*N*/ case 0x4E: this.isEsc   = 18 /* ESss2 */;                                  break;
+/*O*/ case 0x4F: this.isEsc   = 19 /* ESss3 */;                                  break;
+/*H*/ case 0x48: this.userTabStop[this.cursorX] = true;                 break;
+/*Z*/ case 0x5A: this.respondID();                                      break;
+/*[*/ case 0x5B: this.isEsc   = 2 /* ESsquare */;                               break;
+/*]*/ case 0x5D: this.isEsc   = 15 /* ESnonstd */;                               break;
+/*c*/ case 0x63: this.reset();                                          break;
+/*g*/ case 0x67: this.flashScreen();                                    break;
+      default:                                                          break;
+      }
+      break;
+    case 15 /* ESnonstd */:
+      switch (ch) {
+/*0*/ case 0x30:
+/*1*/ case 0x31:
+/*2*/ case 0x32: this.isEsc   = 17 /* EStitle */; this.titleString = '';         break;
+/*P*/ case 0x50: this.npar    = 0; this.par = [ 0, 0, 0, 0, 0, 0, 0 ];
+                 this.isEsc   = 16 /* ESpalette */;                              break;
+/*R*/ case 0x52: // Palette support is not implemented
+                 this.isEsc   = 0 /* ESnormal */;                               break;
+      default:   this.isEsc   = 0 /* ESnormal */;                               break;
+      }
+      break;
+    case 16 /* ESpalette */:
+      if ((ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) ||
+          (ch >= 0x41 /*A*/ && ch <= 0x46 /*F*/) ||
+          (ch >= 0x61 /*a*/ && ch <= 0x66 /*f*/)) {
+        this.par[this.npar++] = ch > 0x39  /*9*/ ? (ch & 0xDF) - 55
+                                                : (ch & 0xF);
+        if (this.npar == 7) {
+          // Palette support is not implemented
+          this.isEsc          = 0 /* ESnormal */;
+        }
+      } else {
+        this.isEsc            = 0 /* ESnormal */;
+      }
+      break;
+    case 2 /* ESsquare */:
+      this.npar               = 0;
+      this.par                = [ 0, 0, 0, 0, 0, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0, 0, 0 ];
+      this.isEsc              = 3 /* ESgetpars */;
+/*[*/ if (ch == 0x5B) { // Function key
+        this.isEsc            = 6 /* ESfunckey */;
+        break;
+      } else {
+/*?*/   this.isQuestionMark   = ch == 0x3F;
+        if (this.isQuestionMark) {
+          break;
+        }
+      }
+      // Fall through
+    case 5 /* ESdeviceattr */:
+    case 3 /* ESgetpars */: 
+/*;*/ if (ch == 0x3B) {
+        this.npar++;
+        break;
+      } else if (ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) {
+        var par               = this.par[this.npar];
+        if (par == undefined) {
+          par                 = 0;
+        }
+        this.par[this.npar]   = 10*par + (ch & 0xF);
+        break;
+      } else if (this.isEsc == 5 /* ESdeviceattr */) {
+        switch (ch) {
+/*c*/   case 0x63: if (this.par[0] == 0) this.respondSecondaryDA();     break;
+/*m*/   case 0x6D: /* (re)set key modifier resource values */           break;
+/*n*/   case 0x6E: /* disable key modifier resource values */           break;
+/*p*/   case 0x70: /* set pointer mode resource value */                break;
+        default:                                                        break;
+        }
+        this.isEsc            = 0 /* ESnormal */;
+        break;
+      } else {
+        this.isEsc            = 4 /* ESgotpars */;
+      }
+      // Fall through
+    case 4 /* ESgotpars */:
+      this.isEsc              = 0 /* ESnormal */;
+      if (this.isQuestionMark) {
+        switch (ch) {
+/*h*/   case 0x68: this.setMode(true);                                  break;
+/*l*/   case 0x6C: this.setMode(false);                                 break;
+/*c*/   case 0x63: this.setCursorAttr(this.par[2], this.par[1]);        break;
+        default:                                                        break;
+        }
+        this.isQuestionMark   = false;
+        break;
+      }
+      switch (ch) {
+/*!*/ case 0x21: this.isEsc   = 12 /* ESbang */;                                 break;
+/*>*/ case 0x3E: if (!this.npar) this.isEsc  = 5 /* ESdeviceattr */;            break;
+/*G*/ case 0x47:
+/*`*/ case 0x60: this.gotoXY(this.par[0] - 1, this.cursorY);            break;
+/*A*/ case 0x41: this.gotoXY(this.cursorX,
+                             this.cursorY - (this.par[0] ? this.par[0] : 1));
+                                                                        break;
+/*B*/ case 0x42:
+/*e*/ case 0x65: this.gotoXY(this.cursorX,
+                             this.cursorY + (this.par[0] ? this.par[0] : 1));
+                                                                        break;
+/*C*/ case 0x43:
+/*a*/ case 0x61: this.gotoXY(this.cursorX + (this.par[0] ? this.par[0] : 1),
+                             this.cursorY);                             break;
+/*D*/ case 0x44: this.gotoXY(this.cursorX - (this.par[0] ? this.par[0] : 1),
+                             this.cursorY);                             break;
+/*E*/ case 0x45: this.gotoXY(0, this.cursorY + (this.par[0] ? this.par[0] :1));
+                                                                        break;
+/*F*/ case 0x46: this.gotoXY(0, this.cursorY - (this.par[0] ? this.par[0] :1));
+                                                                        break;
+/*d*/ case 0x64: this.gotoXaY(this.cursorX, this.par[0] - 1);           break;
+/*H*/ case 0x48:
+/*f*/ case 0x66: this.gotoXaY(this.par[1] - 1, this.par[0] - 1);        break;
+/*I*/ case 0x49: this.ht(this.par[0] ? this.par[0] : 1);                break;
+/*@*/ case 0x40: this.csiAt(this.par[0]);                               break;
+/*i*/ case 0x69: this.csii(this.par[0]);                                break;
+/*J*/ case 0x4A: this.csiJ(this.par[0]);                                break;
+/*K*/ case 0x4B: this.csiK(this.par[0]);                                break;
+/*L*/ case 0x4C: this.csiL(this.par[0]);                                break;
+/*M*/ case 0x4D: this.csiM(this.par[0]);                                break;
+/*m*/ case 0x6D: this.csim();                                           break;
+/*P*/ case 0x50: this.csiP(this.par[0]);                                break;
+/*X*/ case 0x58: this.csiX(this.par[0]);                                break;
+/*S*/ case 0x53: this.lf(this.par[0] ? this.par[0] : 1);                break;
+/*T*/ case 0x54: this.ri(this.par[0] ? this.par[0] : 1);                break;
+/*c*/ case 0x63: if (!this.par[0]) this.respondID();                    break;
+/*g*/ case 0x67: if (this.par[0] == 0) {
+                   this.userTabStop[this.cursorX] = false;
+                 } else if (this.par[0] == 2 || this.par[0] == 3) {
+                   this.userTabStop               = [ ];
+                   for (var i = 0; i < this.terminalWidth; i++) {
+                     this.userTabStop[i]          = false;
+                   }
+                 }
+                 break;
+/*h*/ case 0x68: this.setMode(true);                                    break;
+/*l*/ case 0x6C: this.setMode(false);                                   break;
+/*n*/ case 0x6E: switch (this.par[0]) {
+                 case 5: this.statusReport();                           break;
+                 case 6: this.cursorReport();                           break;
+                 default:                                               break;
+                 }
+                 break;
+/*q*/ case 0x71: // LED control not implemented
+                                                                        break;
+/*r*/ case 0x72: var t        = this.par[0] ? this.par[0] : 1;
+                 var b        = this.par[1] ? this.par[1]
+                                            : this.terminalHeight;
+                 if (t < b && b <= this.terminalHeight) {
+                   this.top   = t - 1;
+                   this.bottom= b;
+                   this.gotoXaY(0, 0);
+                 }
+                 break;
+/*b*/ case 0x62: var c        = this.par[0] ? this.par[0] : 1;
+                 if (c > this.terminalWidth * this.terminalHeight) {
+                   c          = this.terminalWidth * this.terminalHeight;
+                 }
+                 while (c-- > 0) {
+                   lineBuf   += this.lastCharacter;
+                 }
+                 break;
+/*s*/ case 0x73: this.saveCursor();                                     break;
+/*u*/ case 0x75: this.restoreCursor();                                  break;
+/*Z*/ case 0x5A: this.rt(this.par[0] ? this.par[0] : 1);                break;
+/*]*/ case 0x5D: this.settermCommand();                                 break;
+      default:                                                          break;
+      }
+      break;
+    case 12 /* ESbang */:
+      if (ch == 'p') {
+        this.reset();
+      }
+      this.isEsc              = 0 /* ESnormal */;
+      break;
+    case 13 /* ESpercent */:
+      this.isEsc              = 0 /* ESnormal */;
+      switch (ch) {
+/*@*/ case 0x40: this.utfEnabled = false;                               break;
+/*G*/ case 0x47:
+/*8*/ case 0x38: this.utfEnabled = true;                                break;
+      default:                                                          break;
+      }
+      break;
+    case 6 /* ESfunckey */:
+      this.isEsc              = 0 /* ESnormal */;                               break;
+    case 7 /* EShash */:
+      this.isEsc              = 0 /* ESnormal */;
+/*8*/ if (ch == 0x38) {
+        // Screen alignment test not implemented
+      }
+      break;
+    case 8 /* ESsetG0 */:
+    case 9 /* ESsetG1 */:
+    case 10 /* ESsetG2 */:
+    case 11 /* ESsetG3 */:
+      var g                   = this.isEsc - 8 /* ESsetG0 */;
+      this.isEsc              = 0 /* ESnormal */;
+      switch (ch) {
+/*0*/ case 0x30: this.GMap[g] = this.VT100GraphicsMap;                  break;
+/*A*/ case 0x42:
+/*B*/ case 0x42: this.GMap[g] = this.Latin1Map;                         break;
+/*U*/ case 0x55: this.GMap[g] = this.CodePage437Map;                    break;
+/*K*/ case 0x4B: this.GMap[g] = this.DirectToFontMap;                   break;
+      default:                                                          break;
+      }
+      if (this.useGMap == g) {
+        this.translate        = this.GMap[g];
+      }
+      break;
+    case 17 /* EStitle */:
+      if (ch == 0x07) {
+        if (this.titleString && this.titleString.charAt(0) == ';') {
+          this.titleString    = this.titleString.substr(1);
+          if (this.titleString != '') {
+            this.titleString += ' - ';
+          }
+          this.titleString += 'Shell In A Box'
+        }
+        try {
+          window.document.title = this.titleString;
+        } catch (e) {
+        }
+        this.isEsc            = 0 /* ESnormal */;
+      } else {
+        this.titleString     += String.fromCharCode(ch);
+      }
+      break;
+    case 18 /* ESss2 */:
+    case 19 /* ESss3 */:
+      if (ch < 256) {
+          ch                  = this.GMap[this.isEsc - 18 /* ESss2 */ + 2]
+                                         [this.toggleMeta ? (ch | 0x80) : ch];
+        if ((ch & 0xFF00) == 0xF000) {
+          ch                  = ch & 0xFF;
+        } else if (ch == 0xFEFF || (ch >= 0x200A && ch <= 0x200F)) {
+          this.isEsc         = 0 /* ESnormal */;                                break;
+        }
+      }
+      this.lastCharacter      = String.fromCharCode(ch);
+      lineBuf                += this.lastCharacter;
+      this.isEsc              = 0 /* ESnormal */;                               break;
+    default:
+      this.isEsc              = 0 /* ESnormal */;                               break;
+    }
+    break;
+  }
+  return lineBuf;
+};
+
+VT100.prototype.renderString = function(s, showCursor) {
+  if (this.printing) {
+    this.sendToPrinter(s);
+    if (showCursor) {
+      this.showCursor();
+    }
+    return;
+  }
+
+  // We try to minimize the number of DOM operations by coalescing individual
+  // characters into strings. This is a significant performance improvement.
+  var incX = s.length;
+  if (incX > this.terminalWidth - this.cursorX) {
+    incX   = this.terminalWidth - this.cursorX;
+    if (incX <= 0) {
+      return;
+    }
+    s      = s.substr(0, incX - 1) + s.charAt(s.length - 1);
+  }
+  if (showCursor) {
+    // Minimize the number of calls to putString(), by avoiding a direct
+    // call to this.showCursor()
+    this.cursor.style.visibility = '';
+  }
+  this.putString(this.cursorX, this.cursorY, s, this.color, this.style);
+};
+
+VT100.prototype.vt100 = function(s) {
+  this.cursorNeedsShowing = this.hideCursor();
+  this.respondString      = '';
+  var lineBuf             = '';
+  for (var i = 0; i < s.length; i++) {
+    var ch = s.charCodeAt(i);
+    if (this.utfEnabled) {
+      // Decode UTF8 encoded character
+      if (ch > 0x7F) {
+        if (this.utfCount > 0 && (ch & 0xC0) == 0x80) {
+          this.utfChar    = (this.utfChar << 6) | (ch & 0x3F);
+          if (--this.utfCount <= 0) {
+            if (this.utfChar > 0xFFFF || this.utfChar < 0) {
+              ch = 0xFFFD;
+            } else {
+              ch          = this.utfChar;
+            }
+          } else {
+            continue;
+          }
+        } else {
+          if ((ch & 0xE0) == 0xC0) {
+            this.utfCount = 1;
+            this.utfChar  = ch & 0x1F;
+          } else if ((ch & 0xF0) == 0xE0) {
+            this.utfCount = 2;
+            this.utfChar  = ch & 0x0F;
+          } else if ((ch & 0xF8) == 0xF0) {
+            this.utfCount = 3;
+            this.utfChar  = ch & 0x07;
+          } else if ((ch & 0xFC) == 0xF8) {
+            this.utfCount = 4;
+            this.utfChar  = ch & 0x03;
+          } else if ((ch & 0xFE) == 0xFC) {
+            this.utfCount = 5;
+            this.utfChar  = ch & 0x01;
+          } else {
+            this.utfCount = 0;
+          }
+          continue;
+        }
+      } else {
+        this.utfCount     = 0;
+      }
+    }
+    var isNormalCharacter =
+      (ch >= 32 && ch <= 127 || ch >= 160 ||
+       this.utfEnabled && ch >= 128 ||
+       !(this.dispCtrl ? this.ctrlAlways : this.ctrlAction)[ch & 0x1F]) &&
+      (ch != 0x7F || this.dispCtrl);
+    
+    if (isNormalCharacter && this.isEsc == 0 /* ESnormal */) {
+      if (ch < 256) {
+        ch                = this.translate[this.toggleMeta ? (ch | 0x80) : ch];
+      }
+      if ((ch & 0xFF00) == 0xF000) {
+        ch                = ch & 0xFF;
+      } else if (ch == 0xFEFF || (ch >= 0x200A && ch <= 0x200F)) {
+        continue;
+      }
+      if (!this.printing) {
+        if (this.needWrap || this.insertMode) {
+          if (lineBuf) {
+            this.renderString(lineBuf);
+            lineBuf       = '';
+          }
+        }
+        if (this.needWrap) {
+          this.cr(); this.lf();
+        }
+        if (this.insertMode) {
+          this.scrollRegion(this.cursorX, this.cursorY,
+                            this.terminalWidth - this.cursorX - 1, 1,
+                            1, 0, this.color, this.style);
+        }
+      }
+      this.lastCharacter  = String.fromCharCode(ch);
+      lineBuf            += this.lastCharacter;
+      if (!this.printing &&
+          this.cursorX + lineBuf.length >= this.terminalWidth) {
+        this.needWrap     = this.autoWrapMode;
+      }
+    } else {
+      if (lineBuf) {
+        this.renderString(lineBuf);
+        lineBuf           = '';
+      }
+      var expand          = this.doControl(ch);
+      if (expand.length) {
+        var r             = this.respondString;
+        this.respondString= r + this.vt100(expand);
+      }
+    }
+  }
+  if (lineBuf) {
+    this.renderString(lineBuf, this.cursorNeedsShowing);
+  } else if (this.cursorNeedsShowing) {
+    this.showCursor();
+  }
+  return this.respondString;
+};
+
+VT100.prototype.Latin1Map = [
+0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
+0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7,
+0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF,
+0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF
+];
+
+VT100.prototype.VT100GraphicsMap = [
+0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+0x0028, 0x0029, 0x002A, 0x2192, 0x2190, 0x2191, 0x2193, 0x002F,
+0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x00A0,
+0x25C6, 0x2592, 0x2409, 0x240C, 0x240D, 0x240A, 0x00B0, 0x00B1,
+0x2591, 0x240B, 0x2518, 0x2510, 0x250C, 0x2514, 0x253C, 0xF800,
+0xF801, 0x2500, 0xF803, 0xF804, 0x251C, 0x2524, 0x2534, 0x252C,
+0x2502, 0x2264, 0x2265, 0x03C0, 0x2260, 0x00A3, 0x00B7, 0x007F,
+0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7,
+0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF,
+0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF
+];
+
+VT100.prototype.CodePage437Map = [
+0x0000, 0x263A, 0x263B, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+0x25D8, 0x25CB, 0x25D9, 0x2642, 0x2640, 0x266A, 0x266B, 0x263C,
+0x25B6, 0x25C0, 0x2195, 0x203C, 0x00B6, 0x00A7, 0x25AC, 0x21A8,
+0x2191, 0x2193, 0x2192, 0x2190, 0x221F, 0x2194, 0x25B2, 0x25BC,
+0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x2302,
+0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7,
+0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5,
+0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9,
+0x00FF, 0x00D6, 0x00DC, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192,
+0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA,
+0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB,
+0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
+0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510,
+0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F,
+0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567,
+0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B,
+0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580,
+0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4,
+0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229,
+0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248,
+0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0
+];
+
+VT100.prototype.DirectToFontMap = [
+0xF000, 0xF001, 0xF002, 0xF003, 0xF004, 0xF005, 0xF006, 0xF007,
+0xF008, 0xF009, 0xF00A, 0xF00B, 0xF00C, 0xF00D, 0xF00E, 0xF00F,
+0xF010, 0xF011, 0xF012, 0xF013, 0xF014, 0xF015, 0xF016, 0xF017,
+0xF018, 0xF019, 0xF01A, 0xF01B, 0xF01C, 0xF01D, 0xF01E, 0xF01F,
+0xF020, 0xF021, 0xF022, 0xF023, 0xF024, 0xF025, 0xF026, 0xF027,
+0xF028, 0xF029, 0xF02A, 0xF02B, 0xF02C, 0xF02D, 0xF02E, 0xF02F,
+0xF030, 0xF031, 0xF032, 0xF033, 0xF034, 0xF035, 0xF036, 0xF037,
+0xF038, 0xF039, 0xF03A, 0xF03B, 0xF03C, 0xF03D, 0xF03E, 0xF03F,
+0xF040, 0xF041, 0xF042, 0xF043, 0xF044, 0xF045, 0xF046, 0xF047,
+0xF048, 0xF049, 0xF04A, 0xF04B, 0xF04C, 0xF04D, 0xF04E, 0xF04F,
+0xF050, 0xF051, 0xF052, 0xF053, 0xF054, 0xF055, 0xF056, 0xF057,
+0xF058, 0xF059, 0xF05A, 0xF05B, 0xF05C, 0xF05D, 0xF05E, 0xF05F,
+0xF060, 0xF061, 0xF062, 0xF063, 0xF064, 0xF065, 0xF066, 0xF067,
+0xF068, 0xF069, 0xF06A, 0xF06B, 0xF06C, 0xF06D, 0xF06E, 0xF06F,
+0xF070, 0xF071, 0xF072, 0xF073, 0xF074, 0xF075, 0xF076, 0xF077,
+0xF078, 0xF079, 0xF07A, 0xF07B, 0xF07C, 0xF07D, 0xF07E, 0xF07F,
+0xF080, 0xF081, 0xF082, 0xF083, 0xF084, 0xF085, 0xF086, 0xF087,
+0xF088, 0xF089, 0xF08A, 0xF08B, 0xF08C, 0xF08D, 0xF08E, 0xF08F,
+0xF090, 0xF091, 0xF092, 0xF093, 0xF094, 0xF095, 0xF096, 0xF097,
+0xF098, 0xF099, 0xF09A, 0xF09B, 0xF09C, 0xF09D, 0xF09E, 0xF09F,
+0xF0A0, 0xF0A1, 0xF0A2, 0xF0A3, 0xF0A4, 0xF0A5, 0xF0A6, 0xF0A7,
+0xF0A8, 0xF0A9, 0xF0AA, 0xF0AB, 0xF0AC, 0xF0AD, 0xF0AE, 0xF0AF,
+0xF0B0, 0xF0B1, 0xF0B2, 0xF0B3, 0xF0B4, 0xF0B5, 0xF0B6, 0xF0B7,
+0xF0B8, 0xF0B9, 0xF0BA, 0xF0BB, 0xF0BC, 0xF0BD, 0xF0BE, 0xF0BF,
+0xF0C0, 0xF0C1, 0xF0C2, 0xF0C3, 0xF0C4, 0xF0C5, 0xF0C6, 0xF0C7,
+0xF0C8, 0xF0C9, 0xF0CA, 0xF0CB, 0xF0CC, 0xF0CD, 0xF0CE, 0xF0CF,
+0xF0D0, 0xF0D1, 0xF0D2, 0xF0D3, 0xF0D4, 0xF0D5, 0xF0D6, 0xF0D7,
+0xF0D8, 0xF0D9, 0xF0DA, 0xF0DB, 0xF0DC, 0xF0DD, 0xF0DE, 0xF0DF,
+0xF0E0, 0xF0E1, 0xF0E2, 0xF0E3, 0xF0E4, 0xF0E5, 0xF0E6, 0xF0E7,
+0xF0E8, 0xF0E9, 0xF0EA, 0xF0EB, 0xF0EC, 0xF0ED, 0xF0EE, 0xF0EF,
+0xF0F0, 0xF0F1, 0xF0F2, 0xF0F3, 0xF0F4, 0xF0F5, 0xF0F6, 0xF0F7,
+0xF0F8, 0xF0F9, 0xF0FA, 0xF0FB, 0xF0FC, 0xF0FD, 0xF0FE, 0xF0FF
+];
+
+VT100.prototype.ctrlAction = [
+  true,  false, false, false, false, false, false, true,
+  true,  true,  true,  true,  true,  true,  true,  true,
+  false, false, false, false, false, false, false, false,
+  true,  false, true,  true,  false, false, false, false
+];
+
+VT100.prototype.ctrlAlways = [
+  true,  false, false, false, false, false, false, false,
+  true,  false, true,  false, true,  true,  true,  true,
+  false, false, false, false, false, false, false, false,
+  false, false, false, true,  false, false, false, false
+];
+
+
diff --git a/apps/workbench/public/webshell/styles.css b/apps/workbench/public/webshell/styles.css
new file mode 100644 (file)
index 0000000..3097cb4
--- /dev/null
@@ -0,0 +1,272 @@
+#vt100 a { 
+  text-decoration:      none;
+  color:                inherit;
+}
+
+#vt100 a:hover { 
+  text-decoration:      underline;
+}
+
+#vt100 #reconnect {
+  position:             absolute;
+  z-index:              2;
+}
+
+#vt100 #reconnect input { 
+  padding:              1ex;
+  font-weight:          bold;
+  font-size:            x-large;
+}
+
+#vt100 #cursize {
+  background:           #EEEEEE;
+  border:               1px solid black;
+  font-family:          sans-serif;
+  font-size:            large;
+  font-weight:          bold;
+  padding:              1ex;
+  position:             absolute;
+  z-index:              2;
+}
+
+#vt100 pre { 
+  margin:               0px;
+}
+
+#vt100 pre pre {
+  overflow:             hidden;
+}
+
+#vt100 #scrollable {
+  overflow-x:           hidden;
+  overflow-y:           scroll;
+  position:             relative;
+  padding:              1px;
+}
+
+#vt100 #console, #vt100 #alt_console, #vt100 #cursor, #vt100 #lineheight, #vt100 .hidden pre { 
+  font-family:          "DejaVu Sans Mono", "Everson Mono", FreeMono, "Andale Mono", monospace;
+}
+
+#vt100 #lineheight { 
+  position:             absolute;
+  visibility:           hidden;
+}
+
+#vt100 #cursor {
+  position:             absolute;
+  left:                 0px;
+  top:                  0px;
+  overflow:             hidden;
+  z-index:              1;
+}
+
+#vt100 #cursor.bright {
+  background-color:     black;
+  color:                white;
+}
+
+#vt100 #cursor.dim {
+  visibility:           hidden;
+}
+
+#vt100 #cursor.inactive {
+  border:               1px solid;
+  margin:               -1px;
+}
+
+#vt100 #padding { 
+  visibility:           hidden;
+  width:                1px;
+  height:               0px;
+  overflow:             hidden;
+}
+
+#vt100 .hidden {
+  position:             absolute;
+  top:                  -10000px;
+  left:                 -10000px;
+  width:                0px;
+  height:               0px;
+}
+
+#vt100 #menu { 
+  overflow:             visible;
+  position:             absolute;
+  z-index:              3;
+}
+
+#vt100 #menu .popup {
+  background-color:     #EEEEEE;
+  border:               1px solid black;
+  font-family:          sans-serif;
+  position:             absolute;
+}
+
+#vt100 #menu .popup ul { 
+  list-style-type:      none;
+  padding:              0px;
+  margin:               0px;
+  min-width:            10em;
+}
+
+#vt100 #menu .popup li { 
+  padding:              3px 0.5ex 3px 0.5ex;
+}
+
+#vt100 #menu .popup li.hover {
+  background-color:     #444444;
+  color:                white;
+}
+
+#vt100 #menu .popup li.disabled {
+  color:                #AAAAAA;
+}
+
+#vt100 #menu .popup hr { 
+  margin:               0.5ex 0px 0.5ex 0px;
+}
+
+#vt100 #menu img { 
+  margin-right:         0.5ex;
+  width:                1ex;
+  height:               1ex;
+}
+
+#vt100 #scrollable.inverted { color:            #ffffff;
+                              background-color: #000000; }
+
+#vt100 #kbd_button { 
+  float:                left;
+  position:             fixed;
+  z-index:              0;
+  visibility:           hidden;
+}
+
+#vt100 #keyboard {
+  z-index:              3;
+  position:             absolute;
+}
+
+#vt100 #keyboard .box {
+  font-family:          sans-serif;
+  background-color:     #cccccc;
+  padding:              .8em;
+  float:                left;
+  position:             absolute;
+  border-radius:        10px;
+  -moz-border-radius:   10px;
+  box-shadow:           4px 4px 6px #222222;
+  -webkit-box-shadow:   4px 4px 6px #222222;
+  /* Don't set the -moz-box-shadow. It doesn't properly scale when CSS
+   * transforms are in effect. Once Firefox supports box-shadow, it should
+   * automatically do the right thing. Until then, leave shadows disabled
+   * for Firefox.
+   */
+  opacity:              0.85;
+  -moz-opacity:         0.85;
+  filter:               alpha(opacity=85);
+}
+
+#vt100 #keyboard .box * {
+  vertical-align:       top;
+  display:              inline-block;
+}
+
+#vt100 #keyboard b, #vt100 #keyboard i, #vt100 #keyboard s, #vt100 #keyboard u {
+  font-style:           normal;
+  font-weight:          bold;
+  border-radius:        5px;
+  -moz-border-radius:   5px;
+  background-color:     #555555;
+  color:                #eeeeee;
+  box-shadow:           2px 2px 3px #222222;
+  -webkit-box-shadow:   2px 2px 3px #222222;
+  padding:              4px;
+  margin:               2px;
+  height:               2ex;
+  display:              inline-block;
+  text-align:           center;
+  text-decoration:      none;
+}
+
+#vt100 #keyboard b, #vt100 #keyboard s {
+  width:                2ex;
+}
+
+#vt100 #keyboard u, #vt100 #keyboard s {
+  visibility:           hidden;
+}
+
+#vt100 #keyboard .shifted { 
+  display:              none;
+}
+
+#vt100 #keyboard .selected {
+  color:                #888888;
+  background-color:     #eeeeee;
+  box-shadow:           0px 0px 3px #222222;
+  -webkit-box-shadow:   0px 0px 3px #222222;
+  position:             relative;
+  top:                  1px;
+  left:                 1px;
+}
+
+[if DEFINES_COLORS]
+/* IE cannot properly handle "inherit" properties. So, the monochrome.css/
+ * color.css style sheets cannot work, if we define colors in styles.css.
+ */
+[else DEFINES_COLORS]
+#vt100 .ansi0               {                            }
+#vt100 .ansi1               { color:            #cd0000; }
+#vt100 .ansi2               { color:            #00cd00; }
+#vt100 .ansi3               { color:            #cdcd00; }
+#vt100 .ansi4               { color:            #0000ee; }
+#vt100 .ansi5               { color:            #cd00cd; }
+#vt100 .ansi6               { color:            #00cdcd; }
+#vt100 .ansi7               { color:            #e5e5e5; }
+#vt100 .ansi8               { color:            #7f7f7f; }
+#vt100 .ansi9               { color:            #ff0000; }
+#vt100 .ansi10              { color:            #00ff00; }
+#vt100 .ansi11              { color:            #e8e800; }
+#vt100 .ansi12              { color:            #5c5cff; }
+#vt100 .ansi13              { color:            #ff00ff; }
+#vt100 .ansi14              { color:            #00ffff; }
+#vt100 .ansi15              { color:            #ffffff; }
+
+#vt100 .bgAnsi0             { background-color: #000000; }
+#vt100 .bgAnsi1             { background-color: #cd0000; }
+#vt100 .bgAnsi2             { background-color: #00cd00; }
+#vt100 .bgAnsi3             { background-color: #cdcd00; }
+#vt100 .bgAnsi4             { background-color: #0000ee; }
+#vt100 .bgAnsi5             { background-color: #cd00cd; }
+#vt100 .bgAnsi6             { background-color: #00cdcd; }
+#vt100 .bgAnsi7             { background-color: #e5e5e5; }
+#vt100 .bgAnsi8             { background-color: #7f7f7f; }
+#vt100 .bgAnsi9             { background-color: #ff0000; }
+#vt100 .bgAnsi10            { background-color: #00ff00; }
+#vt100 .bgAnsi11            { background-color: #e8e800; }
+#vt100 .bgAnsi12            { background-color: #5c5cff; }
+#vt100 .bgAnsi13            { background-color: #ff00ff; }
+#vt100 .bgAnsi14            { background-color: #00ffff; }
+#vt100 .bgAnsi15            {                            }
+[endif DEFINES_COLORS]
+
+@media print {
+  #vt100 .scrollback {
+    display:            none;
+  }
+
+  #vt100 #reconnect, #vt100 #cursor, #vt100 #menu, #vt100 #kbd_button, #vt100 #keyboard { 
+    visibility:         hidden;
+  }
+
+  #vt100 #scrollable { 
+    overflow:           hidden;
+  }
+
+  #vt100 #console, #vt100 #alt_console { 
+    overflow:           hidden;
+    width:              1000000ex;
+  }
+}
diff --git a/apps/workbench/script/rails b/apps/workbench/script/rails
new file mode 100755 (executable)
index 0000000..f8da2cf
--- /dev/null
@@ -0,0 +1,6 @@
+#!/usr/bin/env ruby
+# This command will automatically be run when you run "rails" with Rails 3 gems installed from the root of your application.
+
+APP_PATH = File.expand_path('../../config/application',  __FILE__)
+require File.expand_path('../../config/boot',  __FILE__)
+require 'rails/commands'
diff --git a/apps/workbench/test/controllers/actions_controller_test.rb b/apps/workbench/test/controllers/actions_controller_test.rb
new file mode 100644 (file)
index 0000000..e768c6c
--- /dev/null
@@ -0,0 +1,206 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ActionsControllerTest < ActionController::TestCase
+
+  test "send report" do
+    post :report_issue, {format: 'js'}, session_for(:admin)
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject.include? "Issue reported by admin"
+        found_email = true
+        break
+      end
+    end
+    assert_equal true, found_email, 'Expected email after issue reported'
+  end
+
+  test "combine files into new collection" do
+    post(:combine_selected_files_into_collection, {
+           selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
+                       'zzzzz-4zz18-ehbhgtheo8909or/bar',
+                       'zzzzz-4zz18-y9vne9npefyxh8g/baz',
+                       '7a6ef4c162a5c6413070a8bd0bffc818+150'],
+           format: "json"},
+         session_for(:active))
+
+    assert_response 302   # collection created and redirected to new collection page
+
+    assert_includes(response.headers['Location'], '/collections/')
+    new_collection_uuid = response.headers['Location'].split('/')[-1]
+
+    use_token :active
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: new_collection_uuid).first
+    manifest_text = collection['manifest_text']
+    assert_includes(manifest_text, "foo")
+    assert_includes(manifest_text, "bar")
+    assert_includes(manifest_text, "baz")
+    assert_includes(manifest_text, "0:0:file1 0:0:file2 0:0:file3")
+    assert_includes(manifest_text, "dir1/subdir")
+    assert_includes(manifest_text, "dir2")
+  end
+
+  test "combine files  with repeated names into new collection" do
+    post(:combine_selected_files_into_collection, {
+           selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
+                       'zzzzz-4zz18-00000nonamecoll/foo',
+                       'zzzzz-4zz18-abcd6fx123409f7/foo',
+                       'zzzzz-4zz18-ehbhgtheo8909or/bar',
+                       'zzzzz-4zz18-y9vne9npefyxh8g/baz',
+                       '7a6ef4c162a5c6413070a8bd0bffc818+150'],
+           format: "json"},
+         session_for(:active))
+
+    assert_response 302   # collection created and redirected to new collection page
+
+    assert_includes(response.headers['Location'], '/collections/')
+    new_collection_uuid = response.headers['Location'].split('/')[-1]
+
+    use_token :active
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: new_collection_uuid).first
+    manifest_text = collection['manifest_text']
+    assert_includes(manifest_text, "foo(1)")
+    assert_includes(manifest_text, "foo(2)")
+    assert_includes(manifest_text, "bar")
+    assert_includes(manifest_text, "baz")
+    assert_includes(manifest_text, "0:0:file1 0:0:file2 0:0:file3")
+    assert_includes(manifest_text, "dir1/subdir")
+    assert_includes(manifest_text, "dir2")
+  end
+
+  test "combine collections with repeated filenames in almost similar directories and expect files with proper suffixes" do
+    post(:combine_selected_files_into_collection, {
+           selection: ['zzzzz-4zz18-duplicatenames1',
+                       'zzzzz-4zz18-duplicatenames2',
+                       'zzzzz-4zz18-znfnqtbbv4spc3w/foo',
+                       'zzzzz-4zz18-00000nonamecoll/foo',],
+           format: "json"},
+         session_for(:active))
+
+    assert_response 302   # collection created and redirected to new collection page
+
+    assert response.headers['Location'].include? '/collections/'
+    new_collection_uuid = response.headers['Location'].split('/')[-1]
+
+    use_token :active
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: new_collection_uuid).first
+    manifest_text = collection['manifest_text']
+
+    assert_includes(manifest_text, 'foo')
+    assert_includes(manifest_text, 'foo(1)')
+
+    streams = manifest_text.split "\n"
+    streams.each do |stream|
+      if stream.start_with? './dir1'
+        # dir1 stream
+        assert_includes(stream, ':alice(1)')
+        assert_includes(stream, ':alice.txt')
+        assert_includes(stream, ':alice(1).txt')
+        assert_includes(stream, ':bob.txt')
+        assert_includes(stream, ':carol.txt')
+      elsif stream.start_with? './dir2'
+        # dir2 stream
+        assert_includes(stream, ':alice.txt')
+        assert_includes(stream, ':alice(1).txt')
+      elsif stream.start_with? '. '
+        # . stream
+        assert_includes(stream, ':foo')
+        assert_includes(stream, ':foo(1)')
+      end
+    end
+  end
+
+  test "combine collections with same filename in two different streams and expect no suffixes for filenames" do
+    post(:combine_selected_files_into_collection, {
+           selection: ['zzzzz-4zz18-znfnqtbbv4spc3w',
+                       'zzzzz-4zz18-foonbarfilesdir'],
+           format: "json"},
+         session_for(:active))
+
+    assert_response 302   # collection created and redirected to new collection page
+
+    assert_includes(response.headers['Location'], '/collections/')
+    new_collection_uuid = response.headers['Location'].split('/')[-1]
+
+    use_token :active
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: new_collection_uuid).first
+    manifest_text = collection['manifest_text']
+
+    streams = manifest_text.split "\n"
+    assert_equal 2, streams.length
+    streams.each do |stream|
+      if stream.start_with? './dir1'
+        assert_includes(stream, 'foo')
+      elsif stream.start_with? '. '
+        assert_includes(stream, 'foo')
+      end
+    end
+    refute_includes(manifest_text, 'foo(1)')
+  end
+
+  test "combine foo files from two different collection streams and expect proper filename suffixes" do
+    post(:combine_selected_files_into_collection, {
+           selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
+                       'zzzzz-4zz18-foonbarfilesdir/dir1/foo'],
+           format: "json"},
+         session_for(:active))
+
+    assert_response 302   # collection created and redirected to new collection page
+
+    assert_includes(response.headers['Location'], '/collections/')
+    new_collection_uuid = response.headers['Location'].split('/')[-1]
+
+    use_token :active
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: new_collection_uuid).first
+    manifest_text = collection['manifest_text']
+
+    streams = manifest_text.split "\n"
+    assert_equal 1, streams.length, "Incorrect number of streams in #{manifest_text}"
+    assert_includes(manifest_text, 'foo')
+    assert_includes(manifest_text, 'foo(1)')
+  end
+
+  [
+    ['collections', 'user_agreement_in_anonymously_accessible_project'],
+    ['groups', 'anonymously_accessible_project'],
+    ['jobs', 'running_job_in_publicly_accessible_project'],
+    ['pipeline_instances', 'pipeline_in_publicly_accessible_project'],
+    ['pipeline_templates', 'pipeline_template_in_publicly_accessible_project'],
+  ].each do |dm, fixture|
+    test "access show method for public #{dm} and expect to see page" do
+      Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+      get(:show, {uuid: api_fixture(dm)[fixture]['uuid']})
+      assert_response :redirect
+      if dm == 'groups'
+        assert_includes @response.redirect_url, "projects/#{fixture['uuid']}"
+      else
+        assert_includes @response.redirect_url, "#{dm}/#{fixture['uuid']}"
+      end
+    end
+  end
+
+  [
+    ['collections', 'foo_collection_in_aproject', 404],
+    ['groups', 'subproject_in_asubproject_with_same_name_as_one_in_active_user_home', 404],
+    ['jobs', 'job_with_latest_version', 404],
+    ['pipeline_instances', 'pipeline_owned_by_active_in_home', 404],
+    ['pipeline_templates', 'template_in_asubproject_with_same_name_as_one_in_active_user_home', 404],
+    ['traits', 'owned_by_aproject_with_no_name', :redirect],
+  ].each do |dm, fixture, expected|
+    test "access show method for non-public #{dm} and expect #{expected}" do
+      Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+      get(:show, {uuid: api_fixture(dm)[fixture]['uuid']})
+      assert_response expected
+      if expected == 404
+        assert_includes @response.inspect, 'Log in'
+      else
+        assert_match /\/users\/welcome/, @response.redirect_url
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/api_client_authorizations_controller_test.rb b/apps/workbench/test/controllers/api_client_authorizations_controller_test.rb
new file mode 100644 (file)
index 0000000..a2a5eb6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApiClientAuthorizationsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/application_controller_test.rb b/apps/workbench/test/controllers/application_controller_test.rb
new file mode 100644 (file)
index 0000000..45952ce
--- /dev/null
@@ -0,0 +1,517 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApplicationControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  setup do
+    @user_dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('users')['active']['uuid'])
+  end
+
+  test "links for object" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    link_head_uuid = api_fixture('links')['foo_file_readable_by_active']['head_uuid']
+
+    links = ac.send :links_for_object, link_head_uuid
+
+    assert links, 'Expected links'
+    assert links.is_a?(Array), 'Expected an array'
+    assert links.size > 0, 'Expected at least one link'
+    assert links[0][:uuid], 'Expected uuid for the head_link'
+  end
+
+  test "preload links for objects and uuids" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    link1_head_uuid = api_fixture('links')['foo_file_readable_by_active']['head_uuid']
+    link2_uuid = api_fixture('links')['bar_file_readable_by_active']['uuid']
+    link3_head_uuid = api_fixture('links')['bar_file_readable_by_active']['head_uuid']
+
+    link2_object = User.find(api_fixture('users')['active']['uuid'])
+    link2_object_uuid = link2_object['uuid']
+
+    uuids = [link1_head_uuid, link2_object, link3_head_uuid]
+    links = ac.send :preload_links_for_objects, uuids
+
+    assert links, 'Expected links'
+    assert links.is_a?(Hash), 'Expected a hash'
+    assert links.size == 3, 'Expected two objects in the preloaded links hash'
+    assert links[link1_head_uuid], 'Expected links for the passed in link head_uuid'
+    assert links[link2_object_uuid], 'Expected links for the passed in object uuid'
+    assert links[link3_head_uuid], 'Expected links for the passed in link head_uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    links = ac.send :preload_links_for_objects, uuids
+    assert links, 'Expected links'
+    assert links.is_a?(Hash), 'Expected a hash'
+    assert links.size == 3, 'Expected two objects in the preloaded links hash'
+    assert links[link1_head_uuid], 'Expected links for the passed in link head_uuid'
+  end
+
+  [ [:preload_links_for_objects, [] ],
+    [:preload_collections_for_objects, [] ],
+    [:preload_log_collections_for_objects, [] ],
+    [:preload_objects_for_dataclass, [] ],
+    [:preload_for_pdhs, [] ],
+  ].each do |input|
+    test "preload data for empty array input #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      if input[0] == :preload_objects_for_dataclass
+        objects = ac.send input[0], @user_dataclass, input[1]
+      else
+        objects = ac.send input[0], input[1]
+      end
+
+      assert objects, 'Expected objects'
+      assert objects.is_a?(Hash), 'Expected a hash'
+      assert objects.size == 0, 'Expected no objects in the preloaded hash'
+    end
+  end
+
+  [ [:preload_links_for_objects, 'input not an array'],
+    [:preload_links_for_objects, nil],
+    [:links_for_object, nil],
+    [:preload_collections_for_objects, 'input not an array'],
+    [:preload_collections_for_objects, nil],
+    [:collections_for_object, nil],
+    [:preload_log_collections_for_objects, 'input not an array'],
+    [:preload_log_collections_for_objects, nil],
+    [:log_collections_for_object, nil],
+    [:preload_objects_for_dataclass, 'input not an array'],
+    [:preload_objects_for_dataclass, nil],
+    [:object_for_dataclass, 'some_dataclass', nil],
+    [:object_for_dataclass, nil, 'some_uuid'],
+    [:preload_for_pdhs, 'input not an array'],
+    [:preload_for_pdhs, nil],
+  ].each do |input|
+    test "preload data for wrong type input #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      if input[0] == :object_for_dataclass
+        assert_raise ArgumentError do
+          ac.send input[0], input[1], input[2]
+        end
+      else
+        assert_raise ArgumentError do
+          ac.send input[0], input[1]
+        end
+      end
+    end
+  end
+
+  [ [:links_for_object, 'no-such-uuid' ],
+    [:collections_for_object, 'no-such-uuid' ],
+    [:log_collections_for_object, 'no-such-uuid' ],
+    [:object_for_dataclass, 'no-such-uuid' ],
+    [:collection_for_pdh, 'no-such-pdh' ],
+  ].each do |input|
+    test "get data for no such uuid #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      if input[0] == :object_for_dataclass
+        object = ac.send input[0], @user_dataclass, input[1]
+        assert_not object, 'Expected no object'
+      else
+        objects = ac.send input[0], input[1]
+        assert objects, 'Expected objects'
+        assert objects.is_a?(Array), 'Expected a array'
+        assert_empty objects
+      end
+    end
+  end
+
+  test "get 10 objects of data class user" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    objects = ac.send :get_n_objects_of_class, @user_dataclass, 10
+
+    assert objects, 'Expected objects'
+    assert objects.is_a?(ArvadosResourceList), 'Expected an ArvadosResourceList'
+
+    first_object = objects.first
+    assert first_object, 'Expected at least one object'
+    assert_equal 'User', first_object.class.name, 'Expected user object'
+
+    # invoke it again. this time, the preloaded info will be returned
+    objects = ac.send :get_n_objects_of_class, @user_dataclass, 10
+    assert objects, 'Expected objects'
+    assert_equal 'User', objects.first.class.name, 'Expected user object'
+  end
+
+  [ ['User', 10],
+    [nil, 10],
+    [@user_dataclass, 0],
+    [@user_dataclass, -1],
+    [@user_dataclass, nil] ].each do |input|
+    test "get_n_objects for incorrect input #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      assert_raise ArgumentError do
+        ac.send :get_n_objects_of_class, input[0], input[1]
+      end
+    end
+  end
+
+  test "collections for object" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid = api_fixture('collections')['foo_file']['uuid']
+
+    collections = ac.send :collections_for_object, uuid
+
+    assert collections, 'Expected collections'
+    assert collections.is_a?(Array), 'Expected an array'
+    assert collections.size == 1, 'Expected one collection object'
+    assert_equal collections[0][:uuid], uuid, 'Expected uuid not found in collections'
+  end
+
+  test "preload collections for given uuids" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid1 = api_fixture('collections')['foo_file']['uuid']
+    uuid2 = api_fixture('collections')['bar_file']['uuid']
+
+    uuids = [uuid1, uuid2]
+    collections = ac.send :preload_collections_for_objects, uuids
+
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid1].size, 1, 'Expected one collection for the passed in uuid'
+    assert collections[uuid2], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid2].size, 1, 'Expected one collection for the passed in uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    collections = ac.send :preload_collections_for_objects, uuids
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+  end
+
+  test "log collections for object" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid = api_fixture('logs')['system_adds_foo_file']['object_uuid']
+
+    collections = ac.send :log_collections_for_object, uuid
+
+    assert collections, 'Expected collections'
+    assert collections.is_a?(Array), 'Expected an array'
+    assert collections.size == 1, 'Expected one collection object'
+    assert_equal collections[0][:uuid], uuid, 'Expected uuid not found in collections'
+  end
+
+  test "preload log collections for given uuids" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid1 = api_fixture('logs')['system_adds_foo_file']['object_uuid']
+    uuid2 = api_fixture('collections')['bar_file']['uuid']
+
+    uuids = [uuid1, uuid2]
+    collections = ac.send :preload_log_collections_for_objects, uuids
+
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid1].size, 1, 'Expected one collection for the passed in uuid'
+    assert collections[uuid2], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid2].size, 1, 'Expected one collection for the passed in uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    collections = ac.send :preload_log_collections_for_objects, uuids
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+  end
+
+  test "object for dataclass" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('jobs')['running']['uuid'])
+    uuid = api_fixture('jobs')['running']['uuid']
+
+    obj = ac.send :object_for_dataclass, dataclass, uuid
+
+    assert obj, 'Expected object'
+    assert 'Job', obj.class
+    assert_equal uuid, obj['uuid'], 'Expected uuid not found'
+    assert_equal api_fixture('jobs')['running']['script_version'], obj['script_version'],
+      'Expected script_version not found'
+  end
+
+  test "preload objects for dataclass" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('jobs')['running']['uuid'])
+
+    uuid1 = api_fixture('jobs')['running']['uuid']
+    uuid2 = api_fixture('jobs')['running_cancelled']['uuid']
+
+    uuids = [uuid1, uuid2]
+    users = ac.send :preload_objects_for_dataclass, dataclass, uuids
+
+    assert users, 'Expected objects'
+    assert users.is_a?(Hash), 'Expected a hash'
+
+    assert users.size == 2, 'Expected two objects in the preloaded hash'
+    assert users[uuid1], 'Expected user object for the passed in uuid'
+    assert users[uuid2], 'Expected user object for the passed in uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    users = ac.send :preload_objects_for_dataclass, dataclass, uuids
+    assert users, 'Expected objects'
+    assert users.is_a?(Hash), 'Expected a hash'
+    assert users.size == 2, 'Expected two objects in the preloaded hash'
+
+    # invoke again for this with one more uuid
+    uuids << api_fixture('jobs')['foobar']['uuid']
+    users = ac.send :preload_objects_for_dataclass, dataclass, uuids
+    assert users, 'Expected objects'
+    assert users.is_a?(Hash), 'Expected a hash'
+    assert users.size == 3, 'Expected two objects in the preloaded hash'
+  end
+
+  test "preload one collection each for given portable_data_hash list" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    pdh1 = api_fixture('collections')['foo_file']['portable_data_hash']
+    pdh2 = api_fixture('collections')['bar_file']['portable_data_hash']
+
+    pdhs = [pdh1, pdh2]
+    collections = ac.send :preload_for_pdhs, pdhs
+
+    assert collections, 'Expected collections map'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    # Each pdh has more than one collection; however, we should get only one for each
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[pdh1], 'Expected collections for the passed in pdh #{pdh1}'
+    assert_equal collections[pdh1].size, 1, 'Expected one collection for the passed in pdh #{pdh1}'
+    assert collections[pdh2], 'Expected collections for the passed in pdh #{pdh2}'
+    assert_equal collections[pdh2].size, 1, 'Expected one collection for the passed in pdh #{pdh2}'
+  end
+
+  test "requesting a nonexistent object returns 404" do
+    # We're really testing ApplicationController's find_object_by_uuid.
+    # It's easiest to do that by instantiating a concrete controller.
+    @controller = NodesController.new
+    get(:show, {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session_for(:admin))
+    assert_response 404
+  end
+
+  test "requesting to the API server includes X-Request-Id header" do
+    got_header = nil
+    stub_api_calls
+    stub_api_client.stubs(:post).with do |url, query, header={}|
+      got_header = header
+      true
+    end.returns fake_api_response('{}', 200, {})
+
+    Rails.configuration.anonymous_user_token =
+      api_fixture("api_client_authorizations", "anonymous", "api_token")
+    @controller = ProjectsController.new
+    test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
+    get(:show, {id: test_uuid})
+
+    assert_not_nil got_header
+    assert_includes got_header, 'X-Request-Id'
+    assert_match /^req-[0-9a-zA-Z]{20}$/, got_header["X-Request-Id"]
+  end
+
+  test "current request_id is nil after a request" do
+    @controller = NodesController.new
+    get(:index, {}, session_for(:active))
+    assert_nil Thread.current[:request_id]
+  end
+
+  test "X-Request-Id header" do
+    @controller = NodesController.new
+    get(:index, {}, session_for(:active))
+    assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']
+  end
+
+  [".navbar .login-menu a",
+   ".navbar .login-menu .dropdown-menu a"
+  ].each do |css_selector|
+    test "login link at #{css_selector.inspect} includes return_to param" do
+      # Without an anonymous token, we're immediately redirected to login.
+      Rails.configuration.anonymous_user_token =
+        api_fixture("api_client_authorizations", "anonymous", "api_token")
+      @controller = ProjectsController.new
+      test_uuid = "zzzzz-j7d0g-zzzzzzzzzzzzzzz"
+      get(:show, {id: test_uuid})
+      login_link = css_select(css_selector).first
+      assert_not_nil(login_link, "failed to select login link")
+      login_href = URI.unescape(login_link.attributes["href"].value)
+      # The parameter needs to include the full URL to work.
+      assert_includes(login_href, "://")
+      assert_match(/[\?&]return_to=[^&]*\/projects\/#{test_uuid}(&|$)/,
+                   login_href)
+    end
+  end
+
+  test "Workbench returns 4xx when API server is unreachable" do
+    # We're really testing ApplicationController's render_exception.
+    # Our primary concern is that it doesn't raise an error and
+    # return 500.
+    orig_api_server = Rails.configuration.arvados_v1_base
+    begin
+      # The URL should look valid in all respects, and avoid talking over a
+      # network.  100::/64 is the IPv6 discard prefix, so it's perfect.
+      Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
+      @controller = NodesController.new
+      get(:index, {}, session_for(:active))
+      assert_includes(405..422, @response.code.to_i,
+                      "bad response code when API server is unreachable")
+    ensure
+      Rails.configuration.arvados_v1_base = orig_api_server
+    end
+  end
+
+  [
+    [CollectionsController.new, api_fixture('collections')['user_agreement_in_anonymously_accessible_project']],
+    [CollectionsController.new, api_fixture('collections')['user_agreement_in_anonymously_accessible_project'], false],
+    [JobsController.new, api_fixture('jobs')['running_job_in_publicly_accessible_project']],
+    [JobsController.new, api_fixture('jobs')['running_job_in_publicly_accessible_project'], false],
+    [PipelineInstancesController.new, api_fixture('pipeline_instances')['pipeline_in_publicly_accessible_project']],
+    [PipelineInstancesController.new, api_fixture('pipeline_instances')['pipeline_in_publicly_accessible_project'], false],
+    [PipelineTemplatesController.new, api_fixture('pipeline_templates')['pipeline_template_in_publicly_accessible_project']],
+    [PipelineTemplatesController.new, api_fixture('pipeline_templates')['pipeline_template_in_publicly_accessible_project'], false],
+    [ProjectsController.new, api_fixture('groups')['anonymously_accessible_project']],
+    [ProjectsController.new, api_fixture('groups')['anonymously_accessible_project'], false],
+  ].each do |controller, fixture, anon_config=true|
+    test "#{controller} show method with anonymous config #{anon_config ? '' : 'not '}enabled" do
+      if anon_config
+        Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+      else
+        Rails.configuration.anonymous_user_token = false
+      end
+
+      @controller = controller
+
+      get(:show, {id: fixture['uuid']})
+
+      if anon_config
+        assert_response 200
+        if controller.class == JobsController
+          assert_includes @response.inspect, fixture['script']
+        else
+          assert_includes @response.inspect, fixture['name']
+        end
+      else
+        assert_response :redirect
+        assert_match /\/users\/welcome/, @response.redirect_url
+      end
+    end
+  end
+
+  [
+    true,
+    false,
+  ].each do |config|
+    test "invoke show with include_accept_encoding_header config #{config}" do
+      Rails.configuration.include_accept_encoding_header_in_api_requests = config
+
+      @controller = CollectionsController.new
+      get(:show, {id: api_fixture('collections')['foo_file']['uuid']}, session_for(:admin))
+
+      assert_equal([['.', 'foo', 3]], assigns(:object).files)
+    end
+  end
+
+  test 'Edit name and verify that a duplicate is not created' do
+    @controller = ProjectsController.new
+    project = api_fixture("groups")["aproject"]
+    post :update, {
+      id: project["uuid"],
+      project: {
+        name: 'test name'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_includes @response.body, 'test name'
+    updated = assigns(:object)
+    assert_equal updated.uuid, project["uuid"]
+    assert_equal 'test name', updated.name
+  end
+
+  [
+    [VirtualMachinesController.new, 'hostname', false],
+    [UsersController.new, 'first_name', true],
+  ].each do |controller, expect_str, expect_home_link|
+    test "access #{controller.controller_name} index as admin and verify Home link is#{' not' if !expect_home_link} shown" do
+      @controller = controller
+
+      get :index, {}, session_for(:admin)
+
+      assert_response 200
+      assert_includes @response.body, expect_str
+
+      home_link = "/projects/#{api_fixture('users')['active']['uuid']}"
+
+      if expect_home_link
+        refute_empty css_select("[href=\"/projects/#{api_fixture('users')['active']['uuid']}\"]")
+      else
+        assert_empty css_select("[href=\"/projects/#{api_fixture('users')['active']['uuid']}\"]")
+      end
+    end
+  end
+
+  [
+    [VirtualMachinesController.new, 'hostname', true],
+    [UsersController.new, 'first_name', false],
+  ].each do |controller, expect_str, expect_delete_link|
+    test "access #{controller.controller_name} index as admin and verify Delete option is#{' not' if !expect_delete_link} shown" do
+      @controller = controller
+
+      get :index, {}, session_for(:admin)
+
+      assert_response 200
+      assert_includes @response.body, expect_str
+      if expect_delete_link
+        refute_empty css_select('[data-method=delete]')
+      else
+        assert_empty css_select('[data-method=delete]')
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/authorized_keys_controller_test.rb b/apps/workbench/test/controllers/authorized_keys_controller_test.rb
new file mode 100644 (file)
index 0000000..fd55bc3
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class AuthorizedKeysControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/collections_controller_test.rb b/apps/workbench/test/controllers/collections_controller_test.rb
new file mode 100644 (file)
index 0000000..3ff02a8
--- /dev/null
@@ -0,0 +1,729 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CollectionsControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  include PipelineInstancesHelper
+
+  NONEXISTENT_COLLECTION = "ffffffffffffffffffffffffffffffff+0"
+
+  def config_anonymous enable
+    Rails.configuration.anonymous_user_token =
+      if enable
+        api_token('anonymous')
+      else
+        false
+      end
+  end
+
+  def collection_params(collection_name, file_name=nil)
+    uuid = api_fixture('collections')[collection_name.to_s]['uuid']
+    params = {uuid: uuid, id: uuid}
+    params[:file] = file_name if file_name
+    params
+  end
+
+  def assert_hash_includes(actual_hash, expected_hash, msg=nil)
+    expected_hash.each do |key, value|
+      assert_equal(value, actual_hash[key], msg)
+    end
+  end
+
+  def assert_no_session
+    assert_hash_includes(session, {arvados_api_token: nil},
+                         "session includes unexpected API token")
+  end
+
+  def assert_session_for_auth(client_auth)
+    api_token =
+      self.api_token(client_auth.to_s)
+    assert_hash_includes(session, {arvados_api_token: api_token},
+                         "session token does not belong to #{client_auth}")
+  end
+
+  def show_collection(params, session={}, response=:success)
+    params = collection_params(params) if not params.is_a? Hash
+    session = session_for(session) if not session.is_a? Hash
+    get(:show, params, session)
+    assert_response response
+  end
+
+  test "viewing a collection" do
+    show_collection(:foo_file, :active)
+    assert_equal([['.', 'foo', 3]], assigns(:object).files)
+  end
+
+  test "viewing a collection with spaces in filename" do
+    show_collection(:w_a_z_file, :active)
+    assert_equal([['.', 'w a z', 5]], assigns(:object).files)
+  end
+
+  test "download a file with spaces in filename" do
+    setup_for_keep_web
+    collection = api_fixture('collections')['w_a_z_file']
+    get :show_file, {
+      uuid: collection['uuid'],
+      file: 'w a z'
+    }, session_for(:active)
+    assert_response :redirect
+    assert_match /w%20a%20z/, response.redirect_url
+  end
+
+  test "viewing a collection fetches related projects" do
+    show_collection({id: api_fixture('collections')["foo_file"]['portable_data_hash']}, :active)
+    assert_includes(assigns(:same_pdh).map(&:owner_uuid),
+                    api_fixture('groups')['aproject']['uuid'],
+                    "controller did not find linked project")
+  end
+
+  test "viewing a collection fetches related permissions" do
+    show_collection(:bar_file, :active)
+    assert_includes(assigns(:permissions).map(&:uuid),
+                    api_fixture('links')['bar_file_readable_by_active']['uuid'],
+                    "controller did not find permission link")
+  end
+
+  test "viewing a collection fetches jobs that output it" do
+    show_collection(:bar_file, :active)
+    assert_includes(assigns(:output_of).map(&:uuid),
+                    api_fixture('jobs')['foobar']['uuid'],
+                    "controller did not find output job")
+  end
+
+  test "viewing a collection fetches jobs that logged it" do
+    show_collection(:baz_file, :active)
+    assert_includes(assigns(:log_of).map(&:uuid),
+                    api_fixture('jobs')['foobar']['uuid'],
+                    "controller did not find logger job")
+  end
+
+  test "sharing auths available to admin" do
+    show_collection("collection_owned_by_active", "admin_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "sharing auths available to owner" do
+    show_collection("collection_owned_by_active", "active_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "sharing auths available to reader" do
+    show_collection("foo_collection_in_aproject",
+                    "project_viewer_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "viewing collection files with a reader token" do
+    params = collection_params(:foo_file)
+    params[:reader_token] = api_token("active_all_collections")
+    get(:show_file_links, params)
+    assert_response :redirect
+    assert_no_session
+  end
+
+  test "fetching collection file with reader token" do
+    setup_for_keep_web
+    params = collection_params(:foo_file, "foo")
+    params[:reader_token] = api_token("active_all_collections")
+    get(:show_file, params)
+    assert_response :redirect
+    assert_match /foo/, response.redirect_url
+    assert_no_session
+  end
+
+  test "reader token Collection links end with trailing slash" do
+    # Testing the fix for #2937.
+    session = session_for(:active_trustedclient)
+    post(:share, collection_params(:foo_file), session)
+    assert(@controller.download_link.ends_with? '/',
+           "Collection share link does not end with slash for wget")
+  end
+
+  test "getting a file from Keep" do
+    setup_for_keep_web
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:active)
+    get(:show_file, params, sess)
+    assert_response :redirect
+    assert_match /foo/, response.redirect_url
+  end
+
+  test 'anonymous download' do
+    setup_for_keep_web
+    config_anonymous true
+    get :show_file, {
+      uuid: api_fixture('collections')['user_agreement_in_anonymously_accessible_project']['uuid'],
+      file: 'GNU_General_Public_License,_version_3.pdf',
+    }
+    assert_response :redirect
+    assert_match /GNU_General_Public_License/, response.redirect_url
+  end
+
+  test "can't get a file from Keep without permission" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:spectator)
+    get(:show_file, params, sess)
+    assert_response 404
+  end
+
+  test "getting a file from Keep with a good reader token" do
+    setup_for_keep_web
+    params = collection_params(:foo_file, 'foo')
+    read_token = api_token('active')
+    params[:reader_token] = read_token
+    get(:show_file, params)
+    assert_response :redirect
+    assert_match /foo/, response.redirect_url
+    assert_not_equal(read_token, session[:arvados_api_token],
+                     "using a reader token set the session's API token")
+  end
+
+  [false, true].each do |anon|
+    test "download a file using a reader token with insufficient scope, anon #{anon}" do
+      config_anonymous anon
+      params = collection_params(:foo_file, 'foo')
+      params[:reader_token] =
+        api_token('active_noscope')
+      get(:show_file, params)
+      if anon
+        # Some files can be shown without a valid token, but not this one.
+        assert_response 404
+      else
+        # No files will ever be shown without a valid token. You
+        # should log in and try again.
+        assert_response :redirect
+      end
+    end
+  end
+
+  test "can get a file with an unpermissioned auth but in-scope reader token" do
+    setup_for_keep_web
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:expired)
+    read_token = api_token('active')
+    params[:reader_token] = read_token
+    get(:show_file, params, sess)
+    assert_response :redirect
+    assert_not_equal(read_token, session[:arvados_api_token],
+                     "using a reader token set the session's API token")
+  end
+
+  test "inactive user can retrieve user agreement" do
+    setup_for_keep_web
+    ua_collection = api_fixture('collections')['user_agreement']
+    # Here we don't test whether the agreement can be retrieved from
+    # Keep. We only test that show_file decides to send file content.
+    get :show_file, {
+      uuid: ua_collection['uuid'],
+      file: ua_collection['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
+    }, session_for(:inactive)
+    assert_nil(assigns(:unsigned_user_agreements),
+               "Did not skip check_user_agreements filter " +
+               "when showing the user agreement.")
+    assert_response :redirect
+  end
+
+  test "requesting nonexistent Collection returns 404" do
+    show_collection({uuid: NONEXISTENT_COLLECTION, id: NONEXISTENT_COLLECTION},
+                    :active, 404)
+  end
+
+  test "show file in a subdirectory of a collection" do
+    setup_for_keep_web
+    params = collection_params(:collection_with_files_in_subdir, 'subdir2/subdir3/subdir4/file1_in_subdir4.txt')
+    get(:show_file, params, session_for(:user1_with_load))
+    assert_response :redirect
+    assert_match /subdir2\/subdir3\/subdir4\/file1_in_subdir4\.txt/, response.redirect_url
+  end
+
+  test 'provenance graph' do
+    use_token 'admin'
+
+    obj = find_fixture Collection, "graph_test_collection3"
+
+    provenance = obj.provenance.stringify_keys
+
+    [obj[:portable_data_hash]].each do |k|
+      assert_not_nil provenance[k], "Expected key #{k} in provenance set"
+    end
+
+    prov_svg = ProvenanceHelper::create_provenance_graph(provenance, "provenance_svg",
+                                                         {:request => RequestDuck,
+                                                           :direction => :bottom_up,
+                                                           :combine_jobs => :script_only})
+
+    stage1 = find_fixture Job, "graph_stage1"
+    stage3 = find_fixture Job, "graph_stage3"
+    previous_job_run = find_fixture Job, "previous_job_run"
+
+    obj_id = obj.portable_data_hash.gsub('+', '\\\+')
+    stage1_out = stage1.output.gsub('+', '\\\+')
+    stage1_id = "#{stage1.script}_#{Digest::MD5.hexdigest(stage1[:script_parameters].to_json)}"
+    stage3_id = "#{stage3.script}_#{Digest::MD5.hexdigest(stage3[:script_parameters].to_json)}"
+
+    assert /#{obj_id}&#45;&gt;#{stage3_id}/.match(prov_svg)
+
+    assert /#{stage3_id}&#45;&gt;#{stage1_out}/.match(prov_svg)
+
+    assert /#{stage1_out}&#45;&gt;#{stage1_id}/.match(prov_svg)
+
+  end
+
+  test 'used_by graph' do
+    use_token 'admin'
+    obj = find_fixture Collection, "graph_test_collection1"
+
+    used_by = obj.used_by.stringify_keys
+
+    used_by_svg = ProvenanceHelper::create_provenance_graph(used_by, "used_by_svg",
+                                                            {:request => RequestDuck,
+                                                              :direction => :top_down,
+                                                              :combine_jobs => :script_only,
+                                                              :pdata_only => true})
+
+    stage2 = find_fixture Job, "graph_stage2"
+    stage3 = find_fixture Job, "graph_stage3"
+
+    stage2_id = "#{stage2.script}_#{Digest::MD5.hexdigest(stage2[:script_parameters].to_json)}"
+    stage3_id = "#{stage3.script}_#{Digest::MD5.hexdigest(stage3[:script_parameters].to_json)}"
+
+    obj_id = obj.portable_data_hash.gsub('+', '\\\+')
+    stage3_out = stage3.output.gsub('+', '\\\+')
+
+    assert /#{obj_id}&#45;&gt;#{stage2_id}/.match(used_by_svg)
+
+    assert /#{obj_id}&#45;&gt;#{stage3_id}/.match(used_by_svg)
+
+    assert /#{stage3_id}&#45;&gt;#{stage3_out}/.match(used_by_svg)
+
+    assert /#{stage3_id}&#45;&gt;#{stage3_out}/.match(used_by_svg)
+
+  end
+
+  test "view collection with empty properties" do
+    fixture_name = :collection_with_empty_properties
+    show_collection(fixture_name, :active)
+    assert_equal(api_fixture('collections')[fixture_name.to_s]['name'], assigns(:object).name)
+    assert_not_nil(assigns(:object).properties)
+    assert_empty(assigns(:object).properties)
+  end
+
+  test "view collection with one property" do
+    fixture_name = :collection_with_one_property
+    show_collection(fixture_name, :active)
+    fixture = api_fixture('collections')[fixture_name.to_s]
+    assert_equal(fixture['name'], assigns(:object).name)
+    assert_equal(fixture['properties'][0], assigns(:object).properties[0])
+  end
+
+  test "create collection with properties" do
+    post :create, {
+      collection: {
+        name: 'collection created with properties',
+        manifest_text: '',
+        properties: {
+          property_1: 'value_1'
+        },
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    assert_not_nil assigns(:object).uuid
+    assert_equal 'collection created with properties', assigns(:object).name
+    assert_equal 'value_1', assigns(:object).properties[:property_1]
+  end
+
+  test "update description and check manifest_text is not lost" do
+    collection = api_fixture("collections")["multilevel_collection_1"]
+    post :update, {
+      id: collection["uuid"],
+      collection: {
+        description: 'test description update'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    assert_not_nil assigns(:object)
+    # Ensure the Workbench response still has the original manifest_text
+    assert_equal 'test description update', assigns(:object).description
+    assert_equal true, strip_signatures_and_compare(collection['manifest_text'], assigns(:object).manifest_text)
+    # Ensure the API server still has the original manifest_text after
+    # we called arvados.v1.collections.update
+    use_token :active do
+      assert_equal true, strip_signatures_and_compare(Collection.find(collection['uuid']).manifest_text,
+                                                      collection['manifest_text'])
+    end
+  end
+
+  # Since we got the initial collection from fixture, there are no signatures in manifest_text.
+  # However, after update or find, the collection retrieved will have singed manifest_text.
+  # Hence, let's compare each line after excluding signatures.
+  def strip_signatures_and_compare m1, m2
+    m1_lines = m1.split "\n"
+    m2_lines = m2.split "\n"
+
+    return false if m1_lines.size != m2_lines.size
+
+    m1_lines.each_with_index do |line, i|
+      m1_words = []
+      line.split.each do |word|
+        m1_words << word.split('+A')[0]
+      end
+      m2_words = []
+      m2_lines[i].split.each do |word|
+        m2_words << word.split('+A')[0]
+      end
+      return false if !m1_words.join(' ').eql?(m2_words.join(' '))
+    end
+
+    return true
+  end
+
+  test "view collection and verify none of the file types listed are disabled" do
+    show_collection(:collection_with_several_supported_file_types, :active)
+
+    files = assigns(:object).files
+    assert_equal true, files.length>0, "Expected one or more files in collection"
+
+    disabled = css_select('[disabled="disabled"]').collect do |el|
+      el
+    end
+    assert_equal 0, disabled.length, "Expected no disabled files in collection viewables list"
+  end
+
+  test "view collection and verify file types listed are all disabled" do
+    show_collection(:collection_with_several_unsupported_file_types, :active)
+
+    files = assigns(:object).files.collect do |_, file, _|
+      file
+    end
+    assert_equal true, files.length>0, "Expected one or more files in collection"
+
+    disabled = css_select('[disabled="disabled"]').collect do |el|
+      el.attributes['title'].value.split[-1]
+    end
+
+    assert_equal files.sort, disabled.sort, "Expected to see all collection files in disabled list of files"
+  end
+
+  test "anonymous user accesses collection in shared project" do
+    config_anonymous true
+    collection = api_fixture('collections')['public_text_file']
+    get(:show, {id: collection['uuid']})
+
+    response_object = assigns(:object)
+    assert_equal collection['name'], response_object['name']
+    assert_equal collection['uuid'], response_object['uuid']
+    assert_includes @response.body, 'Hello world'
+    assert_includes @response.body, 'Content address'
+    refute_nil css_select('[href="#Advanced"]')
+  end
+
+  test "can view empty collection" do
+    get :show, {id: 'd41d8cd98f00b204e9800998ecf8427e+0'}, session_for(:active)
+    assert_includes @response.body, 'The following collections have this content'
+  end
+
+  test "collection portable data hash redirect" do
+    di = api_fixture('collections')['docker_image']
+    get :show, {id: di['portable_data_hash']}, session_for(:active)
+    assert_match /\/collections\/#{di['uuid']}/, @response.redirect_url
+  end
+
+  test "collection portable data hash with multiple matches" do
+    pdh = api_fixture('collections')['foo_file']['portable_data_hash']
+    get :show, {id: pdh}, session_for(:admin)
+    matches = api_fixture('collections').select {|k,v| v["portable_data_hash"] == pdh}
+    assert matches.size > 1
+
+    matches.each do |k,v|
+      assert_match /href="\/collections\/#{v['uuid']}">.*#{v['name']}<\/a>/, @response.body
+    end
+
+    assert_includes @response.body, 'The following collections have this content:'
+    assert_not_includes @response.body, 'more results are not shown'
+    assert_not_includes @response.body, 'Activity'
+    assert_not_includes @response.body, 'Sharing and permissions'
+  end
+
+  test "collection page renders name" do
+    collection = api_fixture('collections')['foo_file']
+    get :show, {id: collection['uuid']}, session_for(:active)
+    assert_includes @response.body, collection['name']
+    assert_match /not authorized to manage collection sharing links/, @response.body
+  end
+
+  test "No Upload tab on non-writable collection" do
+    get :show, {id: api_fixture('collections')['user_agreement']['uuid']}, session_for(:active)
+    assert_not_includes @response.body, '<a href="#Upload"'
+  end
+
+  def setup_for_keep_web cfg='https://%{uuid_or_pdh}.example', dl_cfg=false
+    Rails.configuration.keep_web_url = cfg
+    Rails.configuration.keep_web_download_url = dl_cfg
+  end
+
+  %w(uuid portable_data_hash).each do |id_type|
+    test "Redirect to keep_web_url via #{id_type}" do
+      setup_for_keep_web
+      tok = api_token('active')
+      id = api_fixture('collections')['w_a_z_file'][id_type]
+      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      assert_response :redirect
+      assert_equal "https://#{id.sub '+', '-'}.example/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
+    end
+
+    test "Redirect to keep_web_url via #{id_type} with reader token" do
+      setup_for_keep_web
+      tok = api_token('active')
+      id = api_fixture('collections')['w_a_z_file'][id_type]
+      get :show_file, {uuid: id, file: "w a z", reader_token: tok}, session_for(:expired)
+      assert_response :redirect
+      assert_equal "https://#{id.sub '+', '-'}.example/t=#{URI.escape tok}/_/w%20a%20z", @response.redirect_url
+    end
+
+    test "Redirect to keep_web_url via #{id_type} with no token" do
+      setup_for_keep_web
+      config_anonymous true
+      id = api_fixture('collections')['public_text_file'][id_type]
+      get :show_file, {uuid: id, file: "Hello World.txt"}
+      assert_response :redirect
+      assert_equal "https://#{id.sub '+', '-'}.example/_/Hello%20World.txt", @response.redirect_url
+    end
+
+    test "Redirect to keep_web_url via #{id_type} with disposition param" do
+      setup_for_keep_web
+      config_anonymous true
+      id = api_fixture('collections')['public_text_file'][id_type]
+      get :show_file, {
+        uuid: id,
+        file: "Hello World.txt",
+        disposition: 'attachment',
+      }
+      assert_response :redirect
+      assert_equal "https://#{id.sub '+', '-'}.example/_/Hello%20World.txt?disposition=attachment", @response.redirect_url
+    end
+
+    test "Redirect to keep_web_download_url via #{id_type}" do
+      setup_for_keep_web('https://collections.example/c=%{uuid_or_pdh}',
+                         'https://download.example/c=%{uuid_or_pdh}')
+      tok = api_token('active')
+      id = api_fixture('collections')['w_a_z_file'][id_type]
+      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      assert_response :redirect
+      assert_equal "https://download.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
+    end
+
+    test "Redirect to keep_web_url via #{id_type} when trust_all_content enabled" do
+      Rails.configuration.trust_all_content = true
+      setup_for_keep_web('https://collections.example/c=%{uuid_or_pdh}',
+                         'https://download.example/c=%{uuid_or_pdh}')
+      tok = api_token('active')
+      id = api_fixture('collections')['w_a_z_file'][id_type]
+      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      assert_response :redirect
+      assert_equal "https://collections.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
+    end
+  end
+
+  [false, true].each do |anon|
+    test "No redirect to keep_web_url if collection not found, anon #{anon}" do
+      setup_for_keep_web
+      config_anonymous anon
+      id = api_fixture('collections')['w_a_z_file']['uuid']
+      get :show_file, {uuid: id, file: "w a z"}, session_for(:spectator)
+      assert_response 404
+    end
+
+    test "Redirect download to keep_web_download_url, anon #{anon}" do
+      config_anonymous anon
+      setup_for_keep_web('https://collections.example/c=%{uuid_or_pdh}',
+                         'https://download.example/c=%{uuid_or_pdh}')
+      tok = api_token('active')
+      id = api_fixture('collections')['public_text_file']['uuid']
+      get :show_file, {
+        uuid: id,
+        file: 'Hello world.txt',
+        disposition: 'attachment',
+      }, session_for(:active)
+      assert_response :redirect
+      expect_url = "https://download.example/c=#{id.sub '+', '-'}/_/Hello%20world.txt"
+      if not anon
+        expect_url += "?api_token=#{URI.escape tok, '/'}"
+      end
+      assert_equal expect_url, @response.redirect_url
+    end
+  end
+
+  test "Error if file is impossible to retrieve from keep_web_url" do
+    # Cannot pass a session token using a single-origin keep-web URL,
+    # cannot read this collection without a session token.
+    setup_for_keep_web 'https://collections.example/c=%{uuid_or_pdh}', false
+    id = api_fixture('collections')['w_a_z_file']['uuid']
+    get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+    assert_response 422
+  end
+
+  [false, true].each do |trust_all_content|
+    test "Redirect preview to keep_web_download_url when preview is disabled and trust_all_content is #{trust_all_content}" do
+      Rails.configuration.trust_all_content = trust_all_content
+      setup_for_keep_web false, 'https://download.example/c=%{uuid_or_pdh}'
+      tok = api_token('active')
+      id = api_fixture('collections')['w_a_z_file']['uuid']
+      get :show_file, {uuid: id, file: "w a z"}, session_for(:active)
+      assert_response :redirect
+      assert_equal "https://download.example/c=#{id.sub '+', '-'}/_/w%20a%20z?api_token=#{URI.escape tok, '/'}", @response.redirect_url
+    end
+  end
+
+  test "remove selected files from collection" do
+    use_token :active
+
+    # create a new collection to test; using existing collections will cause other tests to fail,
+    # and resetting fixtures after each test makes it take almost 4 times to run this test file.
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n"
+
+    collection = Collection.create(manifest_text: manifest_text)
+    assert_includes(collection['manifest_text'], "0:0:file1")
+
+    # now remove all files named 'file1' from the collection
+    post :remove_selected_files, {
+      id: collection['uuid'],
+      selection: ["#{collection['uuid']}/file1",
+                  "#{collection['uuid']}/dir1/file1"],
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+
+    # verify no 'file1' in the updated collection
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
+    assert_not_includes(collection['manifest_text'], "0:0:file1")
+    assert_includes(collection['manifest_text'], "0:0:file2") # but other files still exist
+  end
+
+  test "remove all files from a subdir of a collection" do
+    use_token :active
+
+    # create a new collection to test
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n"
+
+    collection = Collection.create(manifest_text: manifest_text)
+    assert_includes(collection['manifest_text'], "0:0:file1")
+
+    # now remove all files from "dir1" subdir of the collection
+    post :remove_selected_files, {
+      id: collection['uuid'],
+      selection: ["#{collection['uuid']}/dir1/file1",
+                  "#{collection['uuid']}/dir1/file2"],
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+
+    # verify that "./dir1" no longer exists in this collection's manifest text
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
+    assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1 0:0:file2\n$/, collection['manifest_text']
+    assert_not_includes(collection['manifest_text'], 'dir1')
+  end
+
+  test "rename file in a collection" do
+    use_token :active
+
+    # create a new collection to test
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png\n"
+
+    collection = Collection.create(manifest_text: manifest_text)
+    assert_includes(collection['manifest_text'], "0:0:file1")
+
+    # rename 'file1' as 'file1renamed' and verify
+    post :update, {
+      id: collection['uuid'],
+      collection: {
+        'rename-file-path:file1' => 'file1renamed'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
+    assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed 0:0:file2\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png\n$/, collection['manifest_text']
+
+    # now rename 'file2' such that it is moved into 'dir1'
+    @test_counter = 0
+    post :update, {
+      id: collection['uuid'],
+      collection: {
+        'rename-file-path:file2' => 'dir1/file2'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
+    assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1 0:0:dir1file2 0:0:dir1imagefile.png 0:0:file2\n$/, collection['manifest_text']
+
+    # now rename 'dir1/dir1file1' such that it is moved into a new subdir
+    @test_counter = 0
+    post :update, {
+      id: collection['uuid'],
+      collection: {
+        'rename-file-path:dir1/dir1file1' => 'dir2/dir3/dir1file1moved'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
+    assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file2 0:0:dir1imagefile.png 0:0:file2\n.\/dir2\/dir3 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1moved\n$/, collection['manifest_text']
+
+    # now rename the image file 'dir1/dir1imagefile.png'
+    @test_counter = 0
+    post :update, {
+      id: collection['uuid'],
+      collection: {
+        'rename-file-path:dir1/dir1imagefile.png' => 'dir1/dir1imagefilerenamed.png'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: collection['uuid']).first
+    assert_match /. d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:file1renamed\n.\/dir1 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file2 0:0:dir1imagefilerenamed.png 0:0:file2\n.\/dir2\/dir3 d41d8cd98f00b204e9800998ecf8427e\+0\+A(.*) 0:0:dir1file1moved\n$/, collection['manifest_text']
+  end
+
+  test "renaming file with a duplicate name in same stream not allowed" do
+    use_token :active
+
+    # rename 'file2' as 'file1' and expect error
+    post :update, {
+      id: 'zzzzz-4zz18-pyw8yp9g3pr7irn',
+      collection: {
+        'rename-file-path:file2' => 'file1'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response 422
+    assert_includes json_response['errors'], 'Duplicate file path'
+  end
+
+  test "renaming file with a duplicate name as another stream not allowed" do
+    use_token :active
+
+    # rename 'file1' as 'dir1/file1' and expect error
+    post :update, {
+      id: 'zzzzz-4zz18-pyw8yp9g3pr7irn',
+      collection: {
+        'rename-file-path:file1' => 'dir1/file1'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response 422
+    assert_includes json_response['errors'], 'Duplicate file path'
+  end
+end
diff --git a/apps/workbench/test/controllers/container_requests_controller_test.rb b/apps/workbench/test/controllers/container_requests_controller_test.rb
new file mode 100644 (file)
index 0000000..6e96839
--- /dev/null
@@ -0,0 +1,145 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ContainerRequestsControllerTest < ActionController::TestCase
+  test "visit completed container request log tab" do
+    use_token 'active'
+
+    cr = api_fixture('container_requests')['completed']
+    container_uuid = cr['container_uuid']
+    container = Container.find(container_uuid)
+
+    get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+    assert_response :success
+
+    assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
+    assert_select "a", {:href=>"#{container['log']}/baz"}
+    assert_not_includes @response.body, '<pre id="event_log_div"'
+  end
+
+  test "visit running container request log tab" do
+    use_token 'active'
+
+    cr = api_fixture('container_requests')['running']
+    container_uuid = cr['container_uuid']
+    container = Container.find(container_uuid)
+
+    get :show, {id: cr['uuid'], tab_pane: 'Log'}, session_for(:active)
+    assert_response :success
+
+    assert_includes @response.body, '<pre id="event_log_div"'
+    assert_select 'Download the log', false
+  end
+
+  test "completed container request offers re-run option" do
+    use_token 'active'
+
+    uuid = api_fixture('container_requests')['completed']['uuid']
+
+    get :show, {id: uuid}, session_for(:active)
+    assert_response :success
+
+    assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
+  end
+
+  test "cancel request for queued container" do
+    cr_fixture = api_fixture('container_requests')['queued']
+    post :cancel, {id: cr_fixture['uuid']}, session_for(:active)
+    assert_response 302
+
+    use_token 'active'
+    cr = ContainerRequest.find(cr_fixture['uuid'])
+    assert_equal 'Final', cr.state
+    assert_equal 0, cr.priority
+    c = Container.find(cr_fixture['container_uuid'])
+    assert_equal 'Queued', c.state
+    assert_equal 0, c.priority
+  end
+
+  [
+    ['completed', false, false],
+    ['completed', true, false],
+    ['completed-older', false, true],
+    ['completed-older', true, true],
+  ].each do |cr_fixture, reuse_enabled, uses_acr|
+    test "container request #{uses_acr ? '' : 'not'} using arvados-cwl-runner copy #{reuse_enabled ? 'with' : 'without'} reuse enabled" do
+      completed_cr = api_fixture('container_requests')[cr_fixture]
+      # Set up post request params
+      copy_params = {id: completed_cr['uuid']}
+      if reuse_enabled
+        copy_params.merge!({use_existing: true})
+      end
+      post(:copy, copy_params, session_for(:active))
+      assert_response 302
+      copied_cr = assigns(:object)
+      assert_not_nil copied_cr
+      assert_equal 'Uncommitted', copied_cr[:state]
+      assert_equal "Copy of #{completed_cr['name']}", copied_cr['name']
+      assert_equal completed_cr['cmd'], copied_cr['cmd']
+      assert_equal completed_cr['runtime_constraints']['ram'], copied_cr['runtime_constraints'][:ram]
+      if reuse_enabled
+        assert copied_cr[:use_existing]
+      else
+        refute copied_cr[:use_existing]
+      end
+      # If the CR's command is arvados-cwl-runner, the appropriate flag should
+      # be passed to it
+      if uses_acr
+        if reuse_enabled
+          # arvados-cwl-runner's default behavior is to enable reuse
+          assert_includes copied_cr['command'], 'arvados-cwl-runner'
+          assert_not_includes copied_cr['command'], '--disable-reuse'
+        else
+          assert_includes copied_cr['command'], 'arvados-cwl-runner'
+          assert_includes copied_cr['command'], '--disable-reuse'
+          assert_not_includes copied_cr['command'], '--enable-reuse'
+        end
+      else
+        # If no arvados-cwl-runner is being used, the command should be left alone
+        assert_equal completed_cr['command'], copied_cr['command']
+      end
+    end
+  end
+
+  [
+    ['completed', true],
+    ['running', true],
+    ['queued', true],
+    ['uncommitted', false],
+  ].each do |cr_fixture, should_show|
+    test "provenance tab should #{should_show ? '' : 'not'} be shown on #{cr_fixture} container requests" do
+      cr = api_fixture('container_requests')[cr_fixture]
+      assert_not_nil cr
+      get(:show,
+          {id: cr['uuid']},
+          session_for(:active))
+      assert_response :success
+      if should_show
+        assert_includes @response.body, "href=\"#Provenance\""
+      else
+        assert_not_includes @response.body, "href=\"#Provenance\""
+      end
+    end
+  end
+
+  test "container request display" do
+    use_token 'active'
+
+    cr = api_fixture('container_requests')['completed_with_input_mounts']
+
+    get :show, {id: cr['uuid']}, session_for(:active)
+    assert_response :success
+
+    assert_match /hello/, @response.body
+    assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/baz\?" # locator on command
+    assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar\?" # locator on command
+    assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foo" # mount input1
+    assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/bar" # mount input2
+    assert_includes @response.body, "href=\"\/collections/1fd08fc162a5c6413070a8bd0bffc818+150" # mount workflow
+    assert_includes @response.body, "href=\"#Log\""
+    assert_includes @response.body, "href=\"#Provenance\""
+  end
+end
diff --git a/apps/workbench/test/controllers/containers_controller_test.rb b/apps/workbench/test/controllers/containers_controller_test.rb
new file mode 100644 (file)
index 0000000..a6a2999
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ContainersControllerTest < ActionController::TestCase
+  test "visit container log" do
+    use_token 'active'
+
+    container = api_fixture('containers')['completed']
+
+    get :show, {id: container['uuid'], tab_pane: 'Log'}, session_for(:active)
+    assert_response :success
+
+    assert_select "a", {:href=>"/collections/#{container['log']}", :text=>"Download the log"}
+    assert_select "a", {:href=>"#{container['log']}/baz"}
+  end
+end
diff --git a/apps/workbench/test/controllers/disabled_api_test.rb b/apps/workbench/test/controllers/disabled_api_test.rb
new file mode 100644 (file)
index 0000000..913f2b9
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/share_object_helper'
+
+class DisabledApiTest < ActionController::TestCase
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, false
+
+  test "dashboard recent processes when pipeline_instance index API is disabled" do
+    @controller = ProjectsController.new
+
+    dd = ArvadosApiClient.new_or_current.discovery.deep_dup
+    dd[:resources][:pipeline_instances][:methods].delete(:index)
+    ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
+
+    get :index, {}, session_for(:active)
+    assert_includes @response.body, "zzzzz-xvhdp-cr4runningcntnr" # expect crs
+    assert_not_includes @response.body, "zzzzz-d1hrv-"   # expect no pipelines
+    assert_includes @response.body, "Run a process"
+  end
+
+  test "dashboard compute node status not shown when pipeline_instance index API is disabled" do
+    @controller = ProjectsController.new
+
+    dd = ArvadosApiClient.new_or_current.discovery.deep_dup
+    dd[:resources][:pipeline_instances][:methods].delete(:index)
+    ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
+
+    get :index, {}, session_for(:active)
+    assert_not_includes @response.body, "compute-node-summary-pane"
+  end
+
+  [
+    [:jobs, JobsController.new],
+    [:job_tasks, JobTasksController.new],
+    [:pipeline_instances, PipelineInstancesController.new],
+    [:pipeline_templates, PipelineTemplatesController.new],
+  ].each do |ctrl_name, ctrl|
+    test "#{ctrl_name} index page when API is disabled" do
+      @controller = ctrl
+
+      dd = ArvadosApiClient.new_or_current.discovery.deep_dup
+      dd[:resources][ctrl_name][:methods].delete(:index)
+      ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
+
+      get :index, {}, session_for(:active)
+      assert_response 404
+    end
+  end
+
+  [
+    :admin,
+    :active,
+    nil,
+  ].each do |user|
+    test "project tabs as user #{user} when pipeline related index APIs are disabled" do
+      @controller = ProjectsController.new
+
+      Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+
+      dd = ArvadosApiClient.new_or_current.discovery.deep_dup
+      dd[:resources][:pipeline_templates][:methods].delete(:index)
+      ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
+
+      proj_uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
+
+      if user
+        get(:show, {id: proj_uuid}, session_for(user))
+      else
+        get(:show, {id: proj_uuid})
+      end
+
+      resp = @response.body
+      assert_includes resp, "href=\"#Data_collections\""
+      assert_includes resp, "href=\"#Pipelines_and_processes\""
+      assert_includes resp, "href=\"#Workflows\""
+      assert_not_includes resp, "href=\"#Pipeline_templates\""
+      assert_includes @response.body, "Run a process" if user == :admin
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/groups_controller_test.rb b/apps/workbench/test/controllers/groups_controller_test.rb
new file mode 100644 (file)
index 0000000..83f0c9d
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ProjectsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/healthcheck_controller_test.rb b/apps/workbench/test/controllers/healthcheck_controller_test.rb
new file mode 100644 (file)
index 0000000..45726e5
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class HealthcheckControllerTest < ActionController::TestCase
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, false
+
+  [
+    [false, nil, 404, 'disabled'],
+    [true, nil, 401, 'authorization required'],
+    [true, 'badformatwithnoBearer', 403, 'authorization error'],
+    [true, 'Bearer wrongtoken', 403, 'authorization error'],
+    [true, 'Bearer configuredmanagementtoken', 200, '{"health":"OK"}'],
+  ].each do |enabled, header, error_code, error_msg|
+    test "ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'" do
+      Rails.configuration.ManagementToken = 'configuredmanagementtoken' if enabled
+
+      @request.headers['Authorization'] = header
+      get :ping
+      assert_response error_code
+
+      resp = JSON.parse(@response.body)
+      if error_code == 200
+        assert_equal(JSON.load('{"health":"OK"}'), resp)
+      else
+        assert_equal(resp['errors'], error_msg)
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/humans_controller_test.rb b/apps/workbench/test/controllers/humans_controller_test.rb
new file mode 100644 (file)
index 0000000..08553c4
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class HumansControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/job_tasks_controller_test.rb b/apps/workbench/test/controllers/job_tasks_controller_test.rb
new file mode 100644 (file)
index 0000000..faccfdb
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobTasksControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/jobs_controller_test.rb b/apps/workbench/test/controllers/jobs_controller_test.rb
new file mode 100644 (file)
index 0000000..f854eaa
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobsControllerTest < ActionController::TestCase
+  test "visit jobs index page" do
+    get :index, {}, session_for(:active)
+    assert_response :success
+  end
+
+  test "job page lists pipelines and jobs in which it is used" do
+    get(:show,
+        {id: api_fixture('jobs')['completed_job_in_publicly_accessible_project']['uuid']},
+        session_for(:active))
+    assert_response :success
+
+    assert_select "div.used-in-pipelines" do
+      assert_select "a[href=/pipeline_instances/zzzzz-d1hrv-n68vc490mloy4fi]"
+    end
+
+    assert_select "div.used-in-jobs" do
+      assert_select "a[href=/jobs/zzzzz-8i9sb-with2components]"
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/keep_disks_controller_test.rb b/apps/workbench/test/controllers/keep_disks_controller_test.rb
new file mode 100644 (file)
index 0000000..b421dd7
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class KeepDisksControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/links_controller_test.rb b/apps/workbench/test/controllers/links_controller_test.rb
new file mode 100644 (file)
index 0000000..7ff5457
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class MetadataControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/logs_controller_test.rb b/apps/workbench/test/controllers/logs_controller_test.rb
new file mode 100644 (file)
index 0000000..4699c0d
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LogsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/nodes_controller_test.rb b/apps/workbench/test/controllers/nodes_controller_test.rb
new file mode 100644 (file)
index 0000000..c7e4867
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class NodesControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/pipeline_instances_controller_test.rb b/apps/workbench/test/controllers/pipeline_instances_controller_test.rb
new file mode 100644 (file)
index 0000000..6887cac
--- /dev/null
@@ -0,0 +1,336 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineInstancesControllerTest < ActionController::TestCase
+  include PipelineInstancesHelper
+
+  def create_instance_long_enough_to(instance_attrs={})
+    # create 'two_part' pipeline with the given instance attributes
+    pt_fixture = api_fixture('pipeline_templates')['two_part']
+    post :create, {
+      pipeline_instance: instance_attrs.merge({
+        pipeline_template_uuid: pt_fixture['uuid']
+      }),
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    pi_uuid = assigns(:object).uuid
+    assert_not_nil assigns(:object)
+
+    # yield
+    yield pi_uuid, pt_fixture
+
+    # delete the pipeline instance
+    use_token :active
+    PipelineInstance.where(uuid: pi_uuid).first.destroy
+  end
+
+  test "pipeline instance components populated after create" do
+    create_instance_long_enough_to do |new_instance_uuid, template_fixture|
+      assert_equal(template_fixture['components'].to_json,
+                   assigns(:object).components.to_json)
+    end
+  end
+
+  test "can render pipeline instance with tagged collections" do
+    # Make sure to pass in a tagged collection to test that part of the rendering behavior.
+    get(:show,
+        {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
+
+  test "update script_parameters one at a time using merge param" do
+      template_fixture = api_fixture('pipeline_templates')['two_part']
+      post :update, {
+        id: api_fixture("pipeline_instances")["pipeline_to_merge_params"]["uuid"],
+        pipeline_instance: {
+          components: {
+            "part-two" => {
+              script_parameters: {
+                integer_with_value: {
+                  value: 9
+                },
+                plain_string: {
+                  value: 'quux'
+                },
+              }
+            }
+          }
+        },
+        merge: true,
+        format: :json
+      }, session_for(:active)
+      assert_response :success
+      assert_not_nil assigns(:object)
+      orig_params = template_fixture['components']['part-two']['script_parameters']
+      new_params = assigns(:object).components[:'part-two'][:script_parameters]
+      orig_params.keys.each do |k|
+        unless %w(integer_with_value plain_string).index(k)
+          assert_equal orig_params[k].to_json, new_params[k.to_sym].to_json
+        end
+      end
+  end
+
+  test "component rendering copes with unexpected components format" do
+    get(:show,
+        {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
+
+  test "dates in JSON components are parsed" do
+    get(:show,
+        {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
+        session_for(:active))
+    assert_response :success
+    assert_not_nil assigns(:object)
+    assert_not_nil assigns(:object).components[:foo][:job]
+    start_at = assigns(:object).components[:foo][:job][:started_at]
+    start_at = Time.parse(start_at) if (start_at.andand.class == String)
+    assert start_at.is_a? Time
+    finished_at = assigns(:object).components[:foo][:job][:started_at]
+    finished_at = Time.parse(finished_at) if (finished_at.andand.class == String)
+    assert finished_at.is_a? Time
+  end
+
+  # The next two tests ensure that a pipeline instance can be copied
+  # when the template has components that do not exist in the
+  # instance (ticket #4000).
+
+  test "copy pipeline instance with components=use_latest" do
+    post(:copy,
+         {
+           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
+           components: 'use_latest',
+           script: 'use_latest',
+           pipeline_instance: {
+             state: 'RunningOnServer'
+           }
+         },
+         session_for(:active))
+    assert_response 302
+    assert_not_nil assigns(:object)
+
+    # Component 'foo' has script parameters only in the pipeline instance.
+    # Component 'bar' is present only in the pipeline_template.
+    # Test that the copied pipeline instance includes parameters for
+    # component 'foo' from the source instance, and parameters for
+    # component 'bar' from the source template.
+    #
+    assert_not_nil assigns(:object).components[:foo]
+    foo = assigns(:object).components[:foo]
+    assert_not_nil foo[:script_parameters]
+    assert_not_nil foo[:script_parameters][:input]
+    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
+
+    assert_not_nil assigns(:object).components[:bar]
+    bar = assigns(:object).components[:bar]
+    assert_not_nil bar[:script_parameters]
+    assert_not_nil bar[:script_parameters][:input]
+    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
+  end
+
+  test "copy pipeline instance on newer template works with script=use_same" do
+    post(:copy,
+         {
+           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
+           components: 'use_latest',
+           script: 'use_same',
+           pipeline_instance: {
+             state: 'RunningOnServer'
+           }
+         },
+         session_for(:active))
+    assert_response 302
+    assert_not_nil assigns(:object)
+
+    # Test that relevant component parameters were copied from both
+    # the source instance and source template, respectively (see
+    # previous test)
+    #
+    assert_not_nil assigns(:object).components[:foo]
+    foo = assigns(:object).components[:foo]
+    assert_not_nil foo[:script_parameters]
+    assert_not_nil foo[:script_parameters][:input]
+    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
+
+    assert_not_nil assigns(:object).components[:bar]
+    bar = assigns(:object).components[:bar]
+    assert_not_nil bar[:script_parameters]
+    assert_not_nil bar[:script_parameters][:input]
+    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
+  end
+
+  test "generate graph" do
+
+    use_token 'admin'
+
+    pipeline_for_graph = {
+      state: 'Complete',
+      uuid: 'zzzzz-d1hrv-9fm8l10i9z2kqc9',
+      components: {
+        stage1: {
+          repository: 'foo',
+          script: 'hash',
+          script_version: 'master',
+          job: {uuid: 'zzzzz-8i9sb-graphstage10000'},
+          output_uuid: 'zzzzz-4zz18-bv31uwvy3neko22'
+        },
+        stage2: {
+          repository: 'foo',
+          script: 'hash2',
+          script_version: 'master',
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+          },
+          job: {uuid: 'zzzzz-8i9sb-graphstage20000'},
+          output_uuid: 'zzzzz-4zz18-uukreo9rbgwsujx'
+        }
+      }
+    }
+
+    @controller.params['tab_pane'] = "Graph"
+    provenance, pips = @controller.graph([pipeline_for_graph])
+
+    graph_test_collection1 = find_fixture Collection, "graph_test_collection1"
+    stage1 = find_fixture Job, "graph_stage1"
+    stage2 = find_fixture Job, "graph_stage2"
+
+    ['component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage1',
+     'component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage2',
+     stage1.uuid,
+     stage2.uuid,
+     stage1.output,
+     stage2.output,
+     pipeline_for_graph[:components][:stage1][:output_uuid],
+     pipeline_for_graph[:components][:stage2][:output_uuid]
+    ].each do |k|
+
+      assert_not_nil provenance[k], "Expected key #{k} in provenance set"
+      assert_equal 1, pips[k], "Expected key #{k} in pips set" if !k.start_with? "component_"
+    end
+
+    prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+        :request => RequestDuck,
+        :all_script_parameters => true,
+        :combine_jobs => :script_and_version,
+        :pips => pips,
+        :only_components => true }
+
+    stage1_id = "#{stage1[:script]}_#{stage1[:script_version]}_#{Digest::MD5.hexdigest(stage1[:script_parameters].to_json)}"
+    stage2_id = "#{stage2[:script]}_#{stage2[:script_version]}_#{Digest::MD5.hexdigest(stage2[:script_parameters].to_json)}"
+
+    stage1_out = stage1[:output].gsub('+','\\\+')
+
+    assert_match /#{stage1_id}&#45;&gt;#{stage1_out}/, prov_svg
+
+    assert_match /#{stage1_out}&#45;&gt;#{stage2_id}/, prov_svg
+
+  end
+
+  test "generate graph compare" do
+
+    use_token 'admin'
+
+    pipeline_for_graph1 = {
+      state: 'Complete',
+      uuid: 'zzzzz-d1hrv-9fm8l10i9z2kqc9',
+      components: {
+        stage1: {
+          repository: 'foo',
+          script: 'hash',
+          script_version: 'master',
+          job: {uuid: 'zzzzz-8i9sb-graphstage10000'},
+          output_uuid: 'zzzzz-4zz18-bv31uwvy3neko22'
+        },
+        stage2: {
+          repository: 'foo',
+          script: 'hash2',
+          script_version: 'master',
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+          },
+          job: {uuid: 'zzzzz-8i9sb-graphstage20000'},
+          output_uuid: 'zzzzz-4zz18-uukreo9rbgwsujx'
+        }
+      }
+    }
+
+    pipeline_for_graph2 = {
+      state: 'Complete',
+      uuid: 'zzzzz-d1hrv-9fm8l10i9z2kqc0',
+      components: {
+        stage1: {
+          repository: 'foo',
+          script: 'hash',
+          script_version: 'master',
+          job: {uuid: 'zzzzz-8i9sb-graphstage10000'},
+          output_uuid: 'zzzzz-4zz18-bv31uwvy3neko22'
+        },
+        stage2: {
+          repository: 'foo',
+          script: 'hash2',
+          script_version: 'master',
+          script_parameters: {
+          },
+          job: {uuid: 'zzzzz-8i9sb-graphstage30000'},
+          output_uuid: 'zzzzz-4zz18-uukreo9rbgwsujj'
+        }
+      }
+    }
+
+    @controller.params['tab_pane'] = "Graph"
+    provenance, pips = @controller.graph([pipeline_for_graph1, pipeline_for_graph2])
+
+    collection1 = find_fixture Collection, "graph_test_collection1"
+
+    stage1 = find_fixture Job, "graph_stage1"
+    stage2 = find_fixture Job, "graph_stage2"
+    stage3 = find_fixture Job, "graph_stage3"
+
+    [['component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage1', nil],
+     ['component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage2', nil],
+     ['component_zzzzz-d1hrv-9fm8l10i9z2kqc0_stage1', nil],
+     ['component_zzzzz-d1hrv-9fm8l10i9z2kqc0_stage2', nil],
+     [stage1.uuid, 3],
+     [stage2.uuid, 1],
+     [stage3.uuid, 2],
+     [stage1.output, 3],
+     [stage2.output, 1],
+     [stage3.output, 2],
+     [pipeline_for_graph1[:components][:stage1][:output_uuid], 3],
+     [pipeline_for_graph1[:components][:stage2][:output_uuid], 1],
+     [pipeline_for_graph2[:components][:stage2][:output_uuid], 2]
+    ].each do |k|
+      assert_not_nil provenance[k[0]], "Expected key #{k[0]} in provenance set"
+      assert_equal k[1], pips[k[0]], "Expected key #{k} in pips" if !k[0].start_with? "component_"
+    end
+
+    prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+        :request => RequestDuck,
+        :all_script_parameters => true,
+        :combine_jobs => :script_and_version,
+        :pips => pips,
+        :only_components => true }
+
+    collection1_id = collection1.portable_data_hash.gsub('+','\\\+')
+
+    stage2_id = "#{stage2[:script]}_#{stage2[:script_version]}_#{Digest::MD5.hexdigest(stage2[:script_parameters].to_json)}"
+    stage3_id = "#{stage3[:script]}_#{stage3[:script_version]}_#{Digest::MD5.hexdigest(stage3[:script_parameters].to_json)}"
+
+    stage2_out = stage2[:output].gsub('+','\\\+')
+    stage3_out = stage3[:output].gsub('+','\\\+')
+
+    assert_match /#{collection1_id}&#45;&gt;#{stage2_id}/, prov_svg
+    assert_match /#{collection1_id}&#45;&gt;#{stage3_id}/, prov_svg
+
+    assert_match /#{stage2_id}&#45;&gt;#{stage2_out}/, prov_svg
+    assert_match /#{stage3_id}&#45;&gt;#{stage3_out}/, prov_svg
+
+  end
+
+end
diff --git a/apps/workbench/test/controllers/pipeline_templates_controller_test.rb b/apps/workbench/test/controllers/pipeline_templates_controller_test.rb
new file mode 100644 (file)
index 0000000..1f733c4
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineTemplatesControllerTest < ActionController::TestCase
+  test "component rendering copes with unexpeceted components format" do
+    get(:show,
+        {id: api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
+end
diff --git a/apps/workbench/test/controllers/projects_controller_test.rb b/apps/workbench/test/controllers/projects_controller_test.rb
new file mode 100644 (file)
index 0000000..21b3361
--- /dev/null
@@ -0,0 +1,599 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/share_object_helper'
+
+class ProjectsControllerTest < ActionController::TestCase
+  include ShareObjectHelper
+
+  test "invited user is asked to sign user agreements on front page" do
+    get :index, {}, session_for(:inactive)
+    assert_response :redirect
+    assert_match(/^#{Regexp.escape(user_agreements_url)}\b/,
+                 @response.redirect_url,
+                 "Inactive user was not redirected to user_agreements page")
+  end
+
+  test "uninvited user is asked to wait for activation" do
+    get :index, {}, session_for(:inactive_uninvited)
+    assert_response :redirect
+    assert_match(/^#{Regexp.escape(inactive_users_url)}\b/,
+                 @response.redirect_url,
+                 "Uninvited user was not redirected to inactive user page")
+  end
+
+  [[:active, true],
+   [:project_viewer, false]].each do |which_user, should_show|
+    test "create subproject button #{'not ' unless should_show} shown to #{which_user}" do
+      readonly_project_uuid = api_fixture('groups')['aproject']['uuid']
+      get :show, {
+        id: readonly_project_uuid
+      }, session_for(which_user)
+      buttons = css_select('[data-method=post]').select do |el|
+        el.attributes['data-remote-href'].value.match /project.*owner_uuid.*#{readonly_project_uuid}/
+      end
+      if should_show
+        assert_not_empty(buttons, "did not offer to create a subproject")
+      else
+        assert_empty(buttons.collect(&:to_s),
+                     "offered to create a subproject in a non-writable project")
+      end
+    end
+  end
+
+  test "sharing a project with a user and group" do
+    uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
+                 api_fixture("users")["future_project_user"]["uuid"]]
+    post(:share_with, {
+           id: api_fixture("groups")["asubproject"]["uuid"],
+           uuids: uuid_list,
+           format: "json"},
+         session_for(:active))
+    assert_response :success
+    assert_equal(uuid_list, json_response["success"])
+  end
+
+  test "user with project read permission can't add permissions" do
+    share_uuid = api_fixture("users")["spectator"]["uuid"]
+    post(:share_with, {
+           id: api_fixture("groups")["aproject"]["uuid"],
+           uuids: [share_uuid],
+           format: "json"},
+         session_for(:project_viewer))
+    assert_response 422
+    assert(json_response["errors"].andand.
+             any? { |msg| msg.start_with?("#{share_uuid}: ") },
+           "JSON response missing properly formatted sharing error")
+  end
+
+  test "admin can_manage aproject" do
+    assert user_can_manage(:admin, api_fixture("groups")["aproject"])
+  end
+
+  test "owner can_manage aproject" do
+    assert user_can_manage(:active, api_fixture("groups")["aproject"])
+  end
+
+  test "owner can_manage asubproject" do
+    assert user_can_manage(:active, api_fixture("groups")["asubproject"])
+  end
+
+  test "viewer can't manage aproject" do
+    refute user_can_manage(:project_viewer, api_fixture("groups")["aproject"])
+  end
+
+  test "viewer can't manage asubproject" do
+    refute user_can_manage(:project_viewer, api_fixture("groups")["asubproject"])
+  end
+
+  test "subproject_admin can_manage asubproject" do
+    assert user_can_manage(:subproject_admin, api_fixture("groups")["asubproject"])
+  end
+
+  test "detect ownership loop in project breadcrumbs" do
+    # This test has an arbitrary time limit -- otherwise we'd just sit
+    # here forever instead of reporting that the loop was not
+    # detected. The test passes quickly, but fails slowly.
+    Timeout::timeout 10 do
+      get(:show,
+          { id: api_fixture("groups")["project_owns_itself"]["uuid"] },
+          session_for(:admin))
+    end
+    assert_response :success
+  end
+
+  test "project admin can remove collections from the project" do
+    # Deleting an object that supports 'trash_at' should make it
+    # completely inaccessible to API queries, not simply moved out of
+    # the project.
+    coll_key = "collection_to_remove_from_subproject"
+    coll_uuid = api_fixture("collections")[coll_key]["uuid"]
+    delete(:remove_item,
+           { id: api_fixture("groups")["asubproject"]["uuid"],
+             item_uuid: coll_uuid,
+             format: "js" },
+           session_for(:subproject_admin))
+    assert_response :success
+    assert_match(/\b#{coll_uuid}\b/, @response.body,
+                 "removed object not named in response")
+
+    use_token :subproject_admin
+    assert_raise ArvadosApiClient::NotFoundException do
+      Collection.find(coll_uuid, cache: false)
+    end
+  end
+
+  test "project admin can remove items from project other than collections" do
+    # An object which does not have an trash_at field (e.g. Specimen)
+    # should be implicitly moved to the user's Home project when removed.
+    specimen_uuid = api_fixture('specimens', 'in_asubproject')['uuid']
+    delete(:remove_item,
+           { id: api_fixture('groups', 'asubproject')['uuid'],
+             item_uuid: specimen_uuid,
+             format: 'js' },
+           session_for(:subproject_admin))
+    assert_response :success
+    assert_match(/\b#{specimen_uuid}\b/, @response.body,
+                 "removed object not named in response")
+
+    use_token :subproject_admin
+    new_specimen = Specimen.find(specimen_uuid)
+    assert_equal api_fixture('users', 'subproject_admin')['uuid'], new_specimen.owner_uuid
+  end
+
+  # An object which does not offer an expired_at field but has a xx_owner_uuid_name_unique constraint
+  # will be renamed when removed and another object with the same name exists in user's home project.
+  [
+    ['pipeline_templates', 'template_in_asubproject_with_same_name_as_one_in_active_user_home'],
+  ].each do |dm, fixture|
+    test "removing #{dm} from a subproject results in renaming it when there is another such object with same name in home project" do
+      object = api_fixture(dm, fixture)
+      delete(:remove_item,
+             { id: api_fixture('groups', 'asubproject')['uuid'],
+               item_uuid: object['uuid'],
+               format: 'js' },
+             session_for(:active))
+      assert_response :success
+      assert_match(/\b#{object['uuid']}\b/, @response.body,
+                   "removed object not named in response")
+      use_token :active
+      if dm.eql?('groups')
+        found = Group.find(object['uuid'])
+      else
+        found = PipelineTemplate.find(object['uuid'])
+      end
+      assert_equal api_fixture('users', 'active')['uuid'], found.owner_uuid
+      assert_equal true, found.name.include?(object['name'] + ' removed from ')
+    end
+  end
+
+  test 'projects#show tab infinite scroll partial obeys limit' do
+    get_contents_rows(limit: 1, filters: [['uuid','is_a',['arvados#job']]])
+    assert_response :success
+    assert_equal(1, json_response['content'].scan('<tr').count,
+                 "Did not get exactly one row")
+  end
+
+  ['', ' asc', ' desc'].each do |direction|
+    test "projects#show tab partial orders correctly by #{direction}" do
+      _test_tab_content_order direction
+    end
+  end
+
+  def _test_tab_content_order direction
+    get_contents_rows(limit: 100,
+                      order: "created_at#{direction}",
+                      filters: [['uuid','is_a',['arvados#job',
+                                                'arvados#pipelineInstance']]])
+    assert_response :success
+    not_grouped_by_kind = nil
+    last_timestamp = nil
+    last_kind = nil
+    found_kind = {}
+    json_response['content'].scan /<tr[^>]+>/ do |tr_tag|
+      found_timestamps = 0
+      tr_tag.scan(/\ data-object-created-at=\"(.*?)\"/).each do |t,|
+        if last_timestamp
+          correct_operator = / desc$/ =~ direction ? :>= : :<=
+          assert_operator(last_timestamp, correct_operator, t,
+                          "Rows are not sorted by created_at#{direction}")
+        end
+        last_timestamp = t
+        found_timestamps += 1
+      end
+      assert_equal(1, found_timestamps,
+                   "Content row did not have exactly one timestamp")
+
+      # Confirm that the test for timestamp ordering couldn't have
+      # passed merely because the test fixtures have convenient
+      # timestamps (e.g., there is only one pipeline and one job in
+      # the project being tested, or there are no pipelines at all in
+      # the project being tested):
+      tr_tag.scan /\ data-kind=\"(.*?)\"/ do |kind|
+        if last_kind and last_kind != kind and found_kind[kind]
+          # We saw this kind before, then a different kind, then
+          # this kind again. That means objects are not grouped by
+          # kind.
+          not_grouped_by_kind = true
+        end
+        found_kind[kind] ||= 0
+        found_kind[kind] += 1
+        last_kind = kind
+      end
+    end
+    assert_equal(true, not_grouped_by_kind,
+                 "Could not confirm that results are not grouped by kind")
+  end
+
+  def get_contents_rows params
+    params = {
+      id: api_fixture('users')['active']['uuid'],
+      partial: :contents_rows,
+      format: :json,
+    }.merge(params)
+    encoded_params = Hash[params.map { |k,v|
+                            [k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
+                          }]
+    get :show, encoded_params, session_for(:active)
+  end
+
+  test "visit non-public project as anonymous when anonymous browsing is enabled and expect page not found" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    get(:show, {id: api_fixture('groups')['aproject']['uuid']})
+    assert_response 404
+    assert_match(/log ?in/i, @response.body)
+  end
+
+  test "visit home page as anonymous when anonymous browsing is enabled and expect login" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    get(:index)
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+  end
+
+  [
+    nil,
+    :active,
+  ].each do |user|
+    test "visit public projects page when anon config is enabled, as user #{user}, and expect page" do
+      Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+
+      if user
+        get :public, {}, session_for(user)
+      else
+        get :public
+      end
+
+      assert_response :success
+      assert_not_nil assigns(:objects)
+      project_names = assigns(:objects).collect(&:name)
+      assert_includes project_names, 'Unrestricted public data'
+      assert_not_includes project_names, 'A Project'
+      refute_empty css_select('[href="/projects/public"]')
+    end
+  end
+
+  test "visit public projects page when anon config is not enabled as active user and expect 404" do
+    get :public, {}, session_for(:active)
+    assert_response 404
+  end
+
+  test "visit public projects page when anon config is enabled but public projects page is disabled as active user and expect 404" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    Rails.configuration.enable_public_projects_page = false
+    get :public, {}, session_for(:active)
+    assert_response 404
+  end
+
+  test "visit public projects page when anon config is not enabled as anonymous and expect login page" do
+    get :public
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+    assert_empty css_select('[href="/projects/public"]')
+  end
+
+  test "visit public projects page when anon config is enabled and public projects page is disabled and expect login page" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    Rails.configuration.enable_public_projects_page = false
+    get :index
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+    assert_empty css_select('[href="/projects/public"]')
+  end
+
+  test "visit public projects page when anon config is not enabled and public projects page is enabled and expect login page" do
+    Rails.configuration.enable_public_projects_page = true
+    get :index
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+    assert_empty css_select('[href="/projects/public"]')
+  end
+
+  test "find a project and edit its description" do
+    project = api_fixture('groups')['aproject']
+    use_token :active
+    found = Group.find(project['uuid'])
+    found.description = 'test description update'
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+    assert_includes @response.body, 'test description update'
+  end
+
+  test "find a project and edit description to textile description" do
+    project = api_fixture('groups')['aproject']
+    use_token :active
+    found = Group.find(project['uuid'])
+    found.description = '*test bold description for textile formatting*'
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+    assert_includes @response.body, '<strong>test bold description for textile formatting</strong>'
+  end
+
+  test "find a project and edit description to html description" do
+    project = api_fixture('groups')['aproject']
+    use_token :active
+    found = Group.find(project['uuid'])
+    found.description = '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+    assert_includes @response.body, '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
+  end
+
+  test "find a project and edit description to unsafe html description" do
+    project = api_fixture('groups')['aproject']
+    use_token :active
+    found = Group.find(project['uuid'])
+    found.description = 'Textile description with unsafe script tag <script language="javascript">alert("Hello there")</script>.'
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+    assert_includes @response.body, 'Textile description with unsafe script tag alert("Hello there").'
+  end
+
+  # Tests #14519
+  test "textile table on description renders as table html markup" do
+    use_token :active
+    project = api_fixture('groups')['aproject']
+    textile_table = <<EOT
+table(table table-striped table-condensed).
+|_. First Header |_. Second Header |
+|Content Cell |Content Cell |
+|Content Cell |Content Cell |
+EOT
+    found = Group.find(project['uuid'])
+    found.description = textile_table
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+    assert_includes @response.body, '<th>First Header'
+    assert_includes @response.body, '<td>Content Cell'
+  end
+
+  test "find a project and edit description to textile description with link to object" do
+    project = api_fixture('groups')['aproject']
+    use_token :active
+    found = Group.find(project['uuid'])
+
+    # uses 'Link to object' as a hyperlink for the object
+    found.description = '"Link to object":' + api_fixture('groups')['asubproject']['uuid']
+    found.save!
+    get(:show, {id: project['uuid']}, session_for(:active))
+
+    # check that input was converted to textile, not staying as inputted
+    refute_includes  @response.body,'"Link to object"'
+    refute_empty css_select('[href="/groups/zzzzz-j7d0g-axqo7eu9pwvna1x"]')
+  end
+
+  test "project viewer can't see project sharing tab" do
+    project = api_fixture('groups')['aproject']
+    get(:show, {id: project['uuid']}, session_for(:project_viewer))
+    refute_includes @response.body, '<div id="Sharing"'
+    assert_includes @response.body, '<div id="Data_collections"'
+  end
+
+  [
+    'admin',
+    'active',
+  ].each do |username|
+    test "#{username} can see project sharing tab" do
+     project = api_fixture('groups')['aproject']
+     get(:show, {id: project['uuid']}, session_for(username))
+     assert_includes @response.body, '<div id="Sharing"'
+     assert_includes @response.body, '<div id="Data_collections"'
+    end
+  end
+
+  [
+    ['admin',true],
+    ['active',true],
+    ['project_viewer',false],
+  ].each do |user, can_move|
+    test "#{user} can move subproject from project #{can_move}" do
+      get(:show, {id: api_fixture('groups')['aproject']['uuid']}, session_for(user))
+      if can_move
+        assert_includes @response.body, 'Move project...'
+      else
+        refute_includes @response.body, 'Move project...'
+      end
+    end
+  end
+
+  [
+    [:admin, true],
+    [:active, false],
+  ].each do |user, expect_all_nodes|
+    test "in dashboard other index page links as #{user}" do
+      get :index, {}, session_for(user)
+
+      [["processes", "/all_processes"],
+       ["collections", "/collections"],
+      ].each do |target, path|
+        assert_includes @response.body, "href=\"#{path}\""
+        assert_includes @response.body, "All #{target}"
+      end
+
+      if expect_all_nodes
+        assert_includes @response.body, "href=\"/nodes\""
+        assert_includes @response.body, "All nodes"
+      else
+        assert_not_includes @response.body, "href=\"/nodes\""
+        assert_not_includes @response.body, "All nodes"
+      end
+    end
+  end
+
+  test "dashboard should show the correct status for processes" do
+    get :index, {}, session_for(:active)
+    assert_select 'div.panel-body.recent-processes' do
+      [
+        {
+          fixture: 'container_requests',
+          state: 'completed',
+          selectors: [['div.progress', false],
+                      ['span.label.label-success', true, 'Complete']]
+        },
+        {
+          fixture: 'container_requests',
+          state: 'uncommitted',
+          selectors: [['div.progress', false],
+                      ['span.label.label-default', true, 'Uncommitted']]
+        },
+        {
+          fixture: 'container_requests',
+          state: 'queued',
+          selectors: [['div.progress', false],
+                      ['span.label.label-default', true, 'Queued']]
+        },
+        {
+          fixture: 'container_requests',
+          state: 'running',
+          selectors: [['.label-info', true, 'Running']]
+        },
+        {
+          fixture: 'pipeline_instances',
+          state: 'new_pipeline',
+          selectors: [['div.progress', false],
+                      ['span.label.label-default', true, 'Not started']]
+        },
+        {
+          fixture: 'pipeline_instances',
+          state: 'pipeline_in_running_state',
+          selectors: [['.label-info', true, 'Running']]
+        },
+      ].each do |c|
+        uuid = api_fixture(c[:fixture])[c[:state]]['uuid']
+        assert_select "div.dashboard-panel-info-row.row-#{uuid}" do
+          if c.include? :selectors
+            c[:selectors].each do |selector, should_show, label|
+              assert_select selector, should_show, "UUID #{uuid} should #{should_show ? '' : 'not'} show '#{selector}'"
+              if should_show and not label.nil?
+                assert_select selector, label, "UUID #{uuid} state label should show #{label}"
+              end
+            end
+          end
+        end
+      end
+    end
+  end
+
+  test "visit a public project and verify the public projects page link exists" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    uuid = api_fixture('groups')['anonymously_accessible_project']['uuid']
+    get :show, {id: uuid}
+    project = assigns(:object)
+    assert_equal uuid, project['uuid']
+    refute_empty css_select("[href=\"/projects/#{project['uuid']}\"]")
+    assert_includes @response.body, "<a href=\"/projects/public\">Public Projects</a>"
+  end
+
+  test 'all_projects unaffected by params after use by ProjectsController (#6640)' do
+    @controller = ProjectsController.new
+    project_uuid = api_fixture('groups')['aproject']['uuid']
+    get :index, {
+      filters: [['uuid', '<', project_uuid]].to_json,
+      limit: 0,
+      offset: 1000,
+    }, session_for(:active)
+    assert_select "#projects-menu + ul li.divider ~ li a[href=/projects/#{project_uuid}]"
+  end
+
+  [
+    ["active", 5, ["aproject", "asubproject"], "anonymously_accessible_project"],
+    ["user1_with_load", 2, ["project_with_10_collections"], "project_with_2_pipelines_and_60_crs"],
+    ["admin", 5, ["anonymously_accessible_project", "subproject_in_anonymous_accessible_project"], "aproject"],
+  ].each do |user, page_size, tree_segment, unexpected|
+    # Note: this test is sensitive to database collation. It passes
+    # with en_US.UTF-8.
+    test "build my projects tree for #{user} user and verify #{unexpected} is omitted" do
+      use_token user
+
+      tree, _, _ = @controller.send(:my_wanted_projects_tree,
+                                    User.current,
+                                    page_size)
+
+      tree_segment_at_depth_1 = api_fixture('groups')[tree_segment[0]]
+      tree_segment_at_depth_2 = api_fixture('groups')[tree_segment[1]] if tree_segment[1]
+
+      node_depth = {}
+      tree.each do |x|
+        node_depth[x[:object]['uuid']] = x[:depth]
+      end
+
+      assert_equal(1, node_depth[tree_segment_at_depth_1['uuid']])
+      assert_equal(2, node_depth[tree_segment_at_depth_2['uuid']]) if tree_segment[1]
+
+      unexpected_project = api_fixture('groups')[unexpected]
+      assert_nil(node_depth[unexpected_project['uuid']], node_depth.inspect)
+    end
+  end
+
+  [
+    ["active", 1],
+    ["project_viewer", 1],
+    ["admin", 0],
+  ].each do |user, size|
+    test "starred projects for #{user}" do
+      use_token user
+      ctrl = ProjectsController.new
+      current_user = User.find(api_fixture('users')[user]['uuid'])
+      my_starred_project = ctrl.send :my_starred_projects, current_user
+      assert_equal(size, my_starred_project.andand.size)
+
+      ctrl2 = ProjectsController.new
+      current_user = User.find(api_fixture('users')[user]['uuid'])
+      my_starred_project = ctrl2.send :my_starred_projects, current_user
+      assert_equal(size, my_starred_project.andand.size)
+    end
+  end
+
+  test "unshare project and verify that it is no longer included in shared user's starred projects" do
+    # remove sharing link
+    use_token :system_user
+    Link.find(api_fixture('links')['share_starred_project_with_project_viewer']['uuid']).destroy
+
+    # verify that project is no longer included in starred projects
+    use_token :project_viewer
+    current_user = User.find(api_fixture('users')['project_viewer']['uuid'])
+    ctrl = ProjectsController.new
+    my_starred_project = ctrl.send :my_starred_projects, current_user
+    assert_equal(0, my_starred_project.andand.size)
+
+    # share it again
+    @controller = LinksController.new
+    post :create, {
+      link: {
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: api_fixture('groups')['starred_and_shared_active_user_project']['uuid'],
+        tail_uuid: api_fixture('users')['project_viewer']['uuid'],
+      },
+      format: :json
+    }, session_for(:system_user)
+
+    # verify that the project is again included in starred projects
+    use_token :project_viewer
+    ctrl = ProjectsController.new
+    my_starred_project = ctrl.send :my_starred_projects, current_user
+    assert_equal(1, my_starred_project.andand.size)
+  end
+end
diff --git a/apps/workbench/test/controllers/repositories_controller_test.rb b/apps/workbench/test/controllers/repositories_controller_test.rb
new file mode 100644 (file)
index 0000000..b81e238
--- /dev/null
@@ -0,0 +1,144 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/repository_stub_helper'
+require 'helpers/share_object_helper'
+
+class RepositoriesControllerTest < ActionController::TestCase
+  include RepositoryStubHelper
+  include ShareObjectHelper
+
+  [
+    :active, #owner
+    :admin,
+  ].each do |user|
+    test "#{user} shares repository with a user and group" do
+      uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
+                   api_fixture("users")["future_project_user"]["uuid"]]
+      post(:share_with, {
+             id: api_fixture("repositories")["foo"]["uuid"],
+             uuids: uuid_list,
+             format: "json"},
+           session_for(user))
+      assert_response :success
+      assert_equal(uuid_list, json_response["success"])
+    end
+  end
+
+  test "user with repository read permission cannot add permissions" do
+    share_uuid = api_fixture("users")["project_viewer"]["uuid"]
+    post(:share_with, {
+           id: api_fixture("repositories")["arvados"]["uuid"],
+           uuids: [share_uuid],
+           format: "json"},
+         session_for(:spectator))
+    assert_response 422
+    assert(json_response["errors"].andand.
+             any? { |msg| msg.start_with?("#{share_uuid}: ") },
+           "JSON response missing properly formatted sharing error")
+  end
+
+  test "admin can_manage repository" do
+    assert user_can_manage(:admin, api_fixture("repositories")["foo"])
+  end
+
+  test "owner can_manage repository" do
+    assert user_can_manage(:active, api_fixture("repositories")["foo"])
+  end
+
+  test "viewer cannot manage repository" do
+    refute user_can_manage(:spectator, api_fixture("repositories")["arvados"])
+  end
+
+  [
+    [:active, ['#Sharing', '#Advanced']],
+    [:admin,  ['#Attributes', '#Sharing', '#Advanced']],
+  ].each do |user, expected_panes|
+    test "#{user} sees panes #{expected_panes}" do
+      get :show, {
+        id: api_fixture('repositories')['foo']['uuid']
+      }, session_for(user)
+      assert_response :success
+
+      panes = css_select('[data-toggle=tab]').each do |pane|
+        pane_name = pane.attributes['href'].value
+        assert_includes expected_panes, pane_name
+      end
+    end
+  end
+
+  ### Browse repository content
+
+  [:active, :spectator].each do |user|
+    test "show tree to #{user}" do
+      reset_api_fixtures_after_test false
+      sha1, _, _ = stub_repo_content
+      get :show_tree, {
+        id: api_fixture('repositories')['foo']['uuid'],
+        commit: sha1,
+      }, session_for(user)
+      assert_response :success
+      assert_select 'tr td a', 'COPYING'
+      assert_select 'tr td', '625 bytes'
+      assert_select 'tr td a', 'apps'
+      assert_select 'tr td a', 'workbench'
+      assert_select 'tr td a', 'Gemfile'
+      assert_select 'tr td', '33.7 KiB'
+    end
+
+    test "show commit to #{user}" do
+      reset_api_fixtures_after_test false
+      sha1, commit, _ = stub_repo_content
+      get :show_commit, {
+        id: api_fixture('repositories')['foo']['uuid'],
+        commit: sha1,
+      }, session_for(user)
+      assert_response :success
+      assert_select 'pre', commit
+    end
+
+    test "show blob to #{user}" do
+      reset_api_fixtures_after_test false
+      sha1, _, filedata = stub_repo_content filename: 'COPYING'
+      get :show_blob, {
+        id: api_fixture('repositories')['foo']['uuid'],
+        commit: sha1,
+        path: 'COPYING',
+      }, session_for(user)
+      assert_response :success
+      assert_select 'pre', filedata
+    end
+  end
+
+  ['', '/'].each do |path|
+    test "show tree with path '#{path}'" do
+      reset_api_fixtures_after_test false
+      sha1, _, _ = stub_repo_content filename: 'COPYING'
+      get :show_tree, {
+        id: api_fixture('repositories')['foo']['uuid'],
+        commit: sha1,
+        path: path,
+      }, session_for(:active)
+      assert_response :success
+      assert_select 'tr td', 'COPYING'
+    end
+  end
+
+  test "get repositories lists linked as well as owned repositories" do
+    params = {
+      partial: :repositories_rows,
+      format: :json,
+    }
+    get :index, params, session_for(:active)
+    assert_response :success
+    repos = assigns(:objects)
+    assert repos
+    assert_not_empty repos, "my_repositories should not be empty"
+    repo_uuids = repos.map(&:uuid)
+    assert_includes repo_uuids, api_fixture('repositories')['repository2']['uuid']  # owned by active
+    assert_includes repo_uuids, api_fixture('repositories')['repository4']['uuid']  # shared with active
+    assert_includes repo_uuids, api_fixture('repositories')['arvados']['uuid']      # shared with all_users
+  end
+end
diff --git a/apps/workbench/test/controllers/search_controller_test.rb b/apps/workbench/test/controllers/search_controller_test.rb
new file mode 100644 (file)
index 0000000..c57d705
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SearchControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  include Rails.application.routes.url_helpers
+
+  test 'Get search dialog' do
+    xhr :get, :choose, {
+      format: :js,
+      title: 'Search',
+      action_name: 'Show',
+      action_href: url_for(host: 'localhost', controller: :actions, action: :show),
+      action_data: {}.to_json,
+    }, session_for(:active)
+    assert_response :success
+  end
+
+  test 'Get search results for all projects' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+    }, session_for(:active)
+    assert_response :success
+    assert_not_empty(json_response['content'],
+                     'search results for all projects should not be empty')
+  end
+
+  test 'Get search results for empty project' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+      project_uuid: api_fixture('groups')['empty_project']['uuid'],
+    }, session_for(:active)
+    assert_response :success
+    assert_empty(json_response['content'],
+                 'search results for empty project should be empty')
+  end
+
+  test 'search results for aproject and verify recursive contents' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+      project_uuid: api_fixture('groups')['aproject']['uuid'],
+    }, session_for(:active)
+    assert_response :success
+    assert_not_empty(json_response['content'],
+                 'search results for aproject should not be empty')
+    items = []
+    json_response['content'].scan /<div[^>]+>/ do |div_tag|
+      div_tag.scan(/\ data-object-uuid=\"(.*?)\"/).each do |uuid,|
+        items << uuid
+      end
+    end
+
+    assert_includes(items, api_fixture('collections')['collection_to_move_around_in_aproject']['uuid'])
+    assert_includes(items, api_fixture('groups')['asubproject']['uuid'])
+    assert_includes(items, api_fixture('collections')['baz_collection_name_in_asubproject']['uuid'])
+    assert_includes(items,
+      api_fixture('groups')['subproject_in_asubproject_with_same_name_as_one_in_active_user_home']['uuid'])
+  end
+end
diff --git a/apps/workbench/test/controllers/sessions_controller_test.rb b/apps/workbench/test/controllers/sessions_controller_test.rb
new file mode 100644 (file)
index 0000000..bd22cf5
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SessionsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/specimens_controller_test.rb b/apps/workbench/test/controllers/specimens_controller_test.rb
new file mode 100644 (file)
index 0000000..596d078
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SpecimensControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/traits_controller_test.rb b/apps/workbench/test/controllers/traits_controller_test.rb
new file mode 100644 (file)
index 0000000..6c33c2f
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class TraitsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/trash_items_controller_test.rb b/apps/workbench/test/controllers/trash_items_controller_test.rb
new file mode 100644 (file)
index 0000000..40a017b
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class TrashItemsControllerTest < ActionController::TestCase
+  test "untrash collection with same name as another collection" do
+    collection = api_fixture('collections')['trashed_collection_to_test_name_conflict_on_untrash']
+    items = [collection['uuid']]
+    post :untrash_items, {
+      selection: items,
+      format: :js
+    }, session_for(:active)
+
+    assert_response :success
+  end
+end
diff --git a/apps/workbench/test/controllers/user_agreements_controller_test.rb b/apps/workbench/test/controllers/user_agreements_controller_test.rb
new file mode 100644 (file)
index 0000000..1733058
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserAgreementsControllerTest < ActionController::TestCase
+  test 'User agreements page shows form if some user agreements are not signed' do
+    get :index, {}, session_for(:inactive)
+    assert_response 200
+  end
+
+  test 'User agreements page redirects if all user agreements signed' do
+    get :index, {return_to: root_path}, session_for(:active)
+    assert_response :redirect
+    assert_equal(root_url,
+                 @response.redirect_url,
+                 "Active user was not redirected to :return_to param")
+  end
+end
diff --git a/apps/workbench/test/controllers/users_controller_test.rb b/apps/workbench/test/controllers/users_controller_test.rb
new file mode 100644 (file)
index 0000000..393b864
--- /dev/null
@@ -0,0 +1,112 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UsersControllerTest < ActionController::TestCase
+
+  test "valid token works in controller test" do
+    get :index, {}, session_for(:active)
+    assert_response :success
+  end
+
+  test "ignore previously valid token (for deleted user), don't crash" do
+    get :activity, {}, session_for(:valid_token_deleted_user)
+    assert_response :redirect
+    assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+    assert_nil assigns(:my_jobs)
+    assert_nil assigns(:my_ssh_keys)
+  end
+
+  test "expired token redirects to api server login" do
+    get :show, {
+      id: api_fixture('users')['active']['uuid']
+    }, session_for(:expired_trustedclient)
+    assert_response :redirect
+    assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+    assert_nil assigns(:my_jobs)
+    assert_nil assigns(:my_ssh_keys)
+  end
+
+  test "show welcome page if no token provided" do
+    get :index, {}
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+  end
+
+  test "'log in as user' feature uses a v2 token" do
+    post :sudo, {
+      id: api_fixture('users')['active']['uuid']
+    }, session_for('admin_trustedclient')
+    assert_response :redirect
+    assert_match /api_token=v2%2F/, @response.redirect_url
+  end
+
+  test "request shell access" do
+    user = api_fixture('users')['spectator']
+
+    ActionMailer::Base.deliveries = []
+
+    post :request_shell_access, {
+      id: user['uuid'],
+      format: 'js'
+    }, session_for(:spectator)
+    assert_response :success
+
+    full_name = "#{user['first_name']} #{user['last_name']}"
+    expected = "Shell account request from #{full_name} (#{user['email']}, #{user['uuid']})"
+    found_email = 0
+    ActionMailer::Base.deliveries.each do |email|
+      if email.subject.include?(expected)
+        found_email += 1
+        break
+      end
+    end
+    assert_equal 1, found_email, "Expected 1 email after requesting shell access"
+  end
+
+  [
+    'admin',
+    'active',
+  ].each do |username|
+    test "access users page as #{username} and verify show button is available" do
+      admin_user = api_fixture('users','admin')
+      active_user = api_fixture('users','active')
+      get :index, {}, session_for(username)
+      if username == 'admin'
+        assert_match /<a href="\/projects\/#{admin_user['uuid']}">Home<\/a>/, @response.body
+        assert_match /<a href="\/projects\/#{active_user['uuid']}">Home<\/a>/, @response.body
+        assert_match /href="\/users\/#{admin_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_match /href="\/users\/#{active_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_includes @response.body, admin_user['email']
+        assert_includes @response.body, active_user['email']
+      else
+        refute_match  /Home<\/a>/, @response.body
+        refute_match /href="\/users\/#{admin_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_match /href="\/users\/#{active_user['uuid']}"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+        assert_includes @response.body, active_user['email']
+      end
+    end
+  end
+
+  [
+    'admin',
+    'active',
+  ].each do |username|
+    test "access settings drop down menu as #{username}" do
+      admin_user = api_fixture('users','admin')
+      active_user = api_fixture('users','active')
+      get :show, {
+        id: api_fixture('users')[username]['uuid']
+      }, session_for(username)
+      if username == 'admin'
+        assert_includes @response.body, admin_user['email']
+        refute_empty css_select('[id="system-menu"]')
+      else
+        assert_includes @response.body, active_user['email']
+        assert_empty css_select('[id="system-menu"]')
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/virtual_machines_controller_test.rb b/apps/workbench/test/controllers/virtual_machines_controller_test.rb
new file mode 100644 (file)
index 0000000..0f781b9
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class VirtualMachinesControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/work_units_controller_test.rb b/apps/workbench/test/controllers/work_units_controller_test.rb
new file mode 100644 (file)
index 0000000..a698b8d
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class WorkUnitsControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls.
+  # Save some time by skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  [
+    ['foo', 10, 25,
+      ['/pipeline_instances/zzzzz-d1hrv-1xfj6xkicf2muk2',
+       '/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+       '/jobs/zzzzz-8i9sb-grx15v5mjnsyxk7'],
+      ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3',
+       '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+       '/container_requests/zzzzz-xvhdp-cr4completedcr2']],
+    ['pipeline_with_tagged_collection_input', 1, 1,
+      ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3'],
+      ['/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+       '/jobs/zzzzz-8i9sb-pshmckwoma9plh7',
+       '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+       '/container_requests/zzzzz-xvhdp-cr4completedcr2']],
+    ['no_such_match', 0, 0,
+      [],
+      ['/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+       '/jobs/zzzzz-8i9sb-pshmckwoma9plh7',
+       '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+       '/container_requests/zzzzz-xvhdp-cr4completedcr2']],
+  ].each do |search_filter, expected_min, expected_max, expected, not_expected|
+    test "all_processes page for search filter '#{search_filter}'" do
+      work_units_index(filters: [['any','@@', search_filter]], show_children: true)
+      assert_response :success
+
+      # Verify that expected number of processes are found
+      found_count = json_response['content'].scan('<tr').count
+      if expected_min == expected_max
+        assert_equal(true, found_count == expected_min,
+          "Not found expected number of items. Expected #{expected_min} and found #{found_count}")
+      else
+        assert_equal(true, found_count>=expected_min,
+          "Found too few items. Expected at least #{expected_min} and found #{found_count}")
+        assert_equal(true, found_count<=expected_max,
+          "Found too many items. Expected at most #{expected_max} and found #{found_count}")
+      end
+
+      # verify that all expected uuid links are found
+      expected.each do |link|
+        assert_match /href="#{link}"/, json_response['content']
+      end
+
+      # verify that none of the not_expected uuid links are found
+      not_expected.each do |link|
+        assert_no_match /href="#{link}"/, json_response['content']
+      end
+    end
+  end
+
+  def work_units_index params
+    params = {
+      partial: :all_processes_rows,
+      format: :json,
+    }.merge(params)
+    encoded_params = Hash[params.map { |k,v|
+                            [k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
+                          }]
+    get :index, encoded_params, session_for(:active)
+  end
+end
diff --git a/apps/workbench/test/controllers/workflows_controller_test.rb b/apps/workbench/test/controllers/workflows_controller_test.rb
new file mode 100644 (file)
index 0000000..d73809a
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class WorkflowsControllerTest < ActionController::TestCase
+  test "index" do
+    get :index, {}, session_for(:active)
+    assert_response :success
+    assert_includes @response.body, 'Valid workflow with no definition yaml'
+  end
+
+  test "show" do
+    use_token 'active'
+
+    wf = api_fixture('workflows')['workflow_with_input_specifications']
+
+    get :show, {id: wf['uuid']}, session_for(:active)
+    assert_response :success
+
+    assert_includes @response.body, "a short label for this parameter (optional)"
+    assert_includes @response.body, "href=\"#Advanced\""
+  end
+end
diff --git a/apps/workbench/test/diagnostics/container_request_test.rb b/apps/workbench/test/diagnostics/container_request_test.rb
new file mode 100644 (file)
index 0000000..47e7f78
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'diagnostics_test_helper'
+
+# This test assumes that the configured workflow_uuid corresponds to a cwl workflow.
+# Ex: configure a workflow using the steps below and use the resulting workflow uuid:
+#   > cd arvados/doc/user/cwl/bwa-mem
+#   > arvados-cwl-runner --create-workflow bwa-mem.cwl bwa-mem-input.yml
+
+class ContainerRequestTest < DiagnosticsTest
+  crs_to_test = Rails.configuration.container_requests_to_test.andand.keys
+
+  setup do
+    need_selenium 'to make websockets work'
+  end
+
+  crs_to_test.andand.each do |cr_to_test|
+    test "run container_request: #{cr_to_test}" do
+      cr_config = Rails.configuration.container_requests_to_test[cr_to_test]
+
+      visit_page_with_token 'active'
+
+      find('.btn', text: 'Run a process').click
+
+      within('.modal-dialog') do
+        page.find_field('Search').set cr_config['workflow_uuid']
+        wait_for_ajax
+        find('.selectable', text: 'bwa-mem.cwl').click
+        find('.btn', text: 'Next: choose inputs').click
+      end
+
+      page.assert_selector('a.disabled,button.disabled', text: 'Run') if cr_config['input_paths'].any?
+
+      # Choose input for the workflow
+      cr_config['input_paths'].each do |look_for|
+        select_input look_for
+      end
+      wait_for_ajax
+
+      # All needed input are already filled in. Run this workflow now
+      page.assert_no_selector('a.disabled,button.disabled', text: 'Run')
+      find('a,button', text: 'Run').click
+
+      # container_request is running. Run button is no longer available.
+      page.assert_no_selector('a', text: 'Run')
+
+      # Wait for container_request run to complete
+      wait_until_page_has 'completed', cr_config['max_wait_seconds']
+    end
+  end
+end
diff --git a/apps/workbench/test/diagnostics/pipeline_test.rb b/apps/workbench/test/diagnostics/pipeline_test.rb
new file mode 100644 (file)
index 0000000..d90d0cb
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'diagnostics_test_helper'
+
+class PipelineTest < DiagnosticsTest
+  pipelines_to_test = Rails.configuration.pipelines_to_test.andand.keys
+
+  setup do
+    need_selenium 'to make websockets work'
+  end
+
+  pipelines_to_test.andand.each do |pipeline_to_test|
+    test "run pipeline: #{pipeline_to_test}" do
+      visit_page_with_token 'active'
+      pipeline_config = Rails.configuration.pipelines_to_test[pipeline_to_test]
+
+      # Search for tutorial template
+      find '.navbar-fixed-top'
+      within('.navbar-fixed-top') do
+        page.find_field('search this site').set pipeline_config['template_uuid']
+        page.find('.glyphicon-search').click
+      end
+
+      # Run the pipeline
+      assert_triggers_dom_event 'shown.bs.modal' do
+        find('a,button', text: 'Run').click
+      end
+
+      # Choose project
+      within('.modal-dialog') do
+        find('.selectable', text: 'Home').click
+        find('button', text: 'Choose').click
+      end
+
+      page.assert_selector('a.disabled,button.disabled', text: 'Run') if pipeline_config['input_paths'].any?
+
+      # Choose input for the pipeline
+      pipeline_config['input_paths'].each do |look_for|
+        select_input look_for
+      end
+      wait_for_ajax
+
+      # All needed input are filled in. Run this pipeline now
+      find('a,button', text: 'Components').click
+      find('a,button', text: 'Run').click
+
+      # Pipeline is running. We have a "Pause" button instead now.
+      page.assert_selector 'a,button', text: 'Pause'
+
+      # Wait for pipeline run to complete
+      wait_until_page_has 'completed', pipeline_config['max_wait_seconds']
+    end
+  end
+end
diff --git a/apps/workbench/test/diagnostics_test_helper.rb b/apps/workbench/test/diagnostics_test_helper.rb
new file mode 100644 (file)
index 0000000..d53753d
--- /dev/null
@@ -0,0 +1,82 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'yaml'
+
+# Diagnostics tests are executed when "RAILS_ENV=diagnostics" is used.
+# When "RAILS_ENV=test" is used, tests in the "diagnostics" directory
+# will not be executed.
+
+# Command to run diagnostics tests:
+#   RAILS_ENV=diagnostics bundle exec rake TEST=test/diagnostics/**/*.rb
+
+class DiagnosticsTest < ActionDispatch::IntegrationTest
+
+  # Prepends workbench URL to the path provided and visits that page
+  # Expects path parameters such as "/collections/<uuid>"
+  def visit_page_with_token token_name, path='/'
+    workbench_url = Rails.configuration.arvados_workbench_url
+    if workbench_url.end_with? '/'
+      workbench_url = workbench_url[0, workbench_url.size-1]
+    end
+    tokens = Rails.configuration.user_tokens
+    visit page_with_token(tokens[token_name], (workbench_url + path))
+  end
+
+  def select_input look_for
+    inputs_needed = page.all('.btn', text: 'Choose')
+    return if (!inputs_needed || !inputs_needed.any?)
+
+    look_for_uuid = nil
+    look_for_file = nil
+    if look_for.andand.index('/').andand.>0
+      partitions = look_for.partition('/')
+      look_for_uuid = partitions[0]
+      look_for_file = partitions[2]
+    else
+      look_for_uuid = look_for
+      look_for_file = nil
+    end
+
+    assert_triggers_dom_event 'shown.bs.modal' do
+      inputs_needed[0].click
+    end
+
+    within('.modal-dialog') do
+      if look_for_uuid
+        fill_in('Search', with: look_for_uuid, exact: true)
+        wait_for_ajax
+      end
+
+      page.all('.selectable').first.click
+      wait_for_ajax
+      # ajax reload is wiping out input selection after search results; so, select again.
+      page.all('.selectable').first.click
+      wait_for_ajax
+
+      if look_for_file
+        wait_for_ajax
+        within('.collection_files_name', text: look_for_file) do
+          find('.fa-file').click
+        end
+      end
+
+      find('button', text: 'OK').click
+      wait_for_ajax
+    end
+  end
+
+  # Looks for the text_to_look_for for up to the max_time provided
+  def wait_until_page_has text_to_look_for, max_time=30
+    max_time = 30 if (!max_time || (max_time.to_s != max_time.to_i.to_s))
+    text_found = false
+    Timeout.timeout(max_time) do
+      until text_found do
+        visit_page_with_token 'active', current_path
+        text_found = has_text?(text_to_look_for)
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/fixtures/.gitkeep b/apps/workbench/test/fixtures/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/helpers/collections_helper_test.rb b/apps/workbench/test/helpers/collections_helper_test.rb
new file mode 100644 (file)
index 0000000..e02b2ab
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CollectionsHelperTest < ActionView::TestCase
+  reset_api_fixtures :after_each_test, false
+
+  [
+    ["filename.csv", true],
+    ["filename.fa", true],
+    ["filename.fasta", true],
+    ["filename.seq", true],   # another fasta extension
+    ["filename.go", true],
+    ["filename.htm", true],
+    ["filename.html", true],
+    ["filename.json", true],
+    ["filename.md", true],
+    ["filename.pdf", true],
+    ["filename.py", true],
+    ["filename.R", true],
+    ["filename.sam", true],
+    ["filename.sh", true],
+    ["filename.txt", true],
+    ["filename.tiff", true],
+    ["filename.tsv", true],
+    ["filename.vcf", true],
+    ["filename.xml", true],
+    ["filename.xsl", true],
+    ["filename.yml", true],
+    ["filename.yaml", true],
+    ["filename.bed", true],
+    ["filename.cwl", true],
+
+    ["filename.bam", false],
+    ["filename.tar", false],
+    ["filename", false],
+  ].each do |file_name, preview_allowed|
+    test "verify '#{file_name}' is allowed for preview #{preview_allowed}" do
+      assert_equal preview_allowed, preview_allowed_for(file_name)
+    end
+  end
+end
diff --git a/apps/workbench/test/helpers/download_helper.rb b/apps/workbench/test/helpers/download_helper.rb
new file mode 100644 (file)
index 0000000..c8b5712
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module DownloadHelper
+  module_function
+
+  def path
+    Rails.root.join 'tmp', 'downloads'
+  end
+
+  def clear
+    if File.exist? path
+      FileUtils.rm_r path
+    end
+    begin
+      Dir.mkdir path
+    rescue Errno::EEXIST
+    end
+  end
+
+  def done
+    Dir[path.join '*'].reject do |f|
+      /\.part$/ =~ f
+    end
+  end
+end
diff --git a/apps/workbench/test/helpers/fake_websocket_helper.rb b/apps/workbench/test/helpers/fake_websocket_helper.rb
new file mode 100644 (file)
index 0000000..a62775c
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module FakeWebsocketHelper
+  def use_fake_websocket_driver
+    Capybara.current_driver = :poltergeist_with_fake_websocket
+  end
+
+  def fake_websocket_event(logdata)
+    stamp = Time.now.utc.in_time_zone.as_json
+    defaults = {
+      owner_uuid: api_fixture('users')['system_user']['uuid'],
+      event_at: stamp,
+      created_at: stamp,
+      updated_at: stamp,
+    }
+    event = {data: Oj.dump(defaults.merge(logdata), mode: :compat)}
+    script = '$(window).data("arv-websocket").onmessage('+Oj.dump(event, mode: :compat)+');'
+    page.evaluate_script(script)
+  end
+end
diff --git a/apps/workbench/test/helpers/manifest_examples.rb b/apps/workbench/test/helpers/manifest_examples.rb
new file mode 120000 (symlink)
index 0000000..cb908ef
--- /dev/null
@@ -0,0 +1 @@
+../../../../services/api/test/helpers/manifest_examples.rb
\ No newline at end of file
diff --git a/apps/workbench/test/helpers/pipeline_instances_helper_test.rb b/apps/workbench/test/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..413df55
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+  test "one" do
+    r = [{started_at: 1, finished_at: 3}]
+    assert_equal 2, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 5}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 2}, {started_at: 3, finished_at: 5}]
+    assert_equal 3, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2}]
+    assert_equal 3, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2},
+         {started_at: 2, finished_at: 4}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 5}, {started_at: 2, finished_at: 3}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 4}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
+         {started_at: 5, finished_at: 8}]
+    assert_equal 7, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
+         {started_at: 6, finished_at: 8}]
+    assert_equal 6, determine_wallclock_runtime(r)
+  end
+end
diff --git a/apps/workbench/test/helpers/repository_stub_helper.rb b/apps/workbench/test/helpers/repository_stub_helper.rb
new file mode 100644 (file)
index 0000000..419de8c
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module RepositoryStubHelper
+  # Supply some fake git content.
+  def stub_repo_content opts={}
+    fakesha1 = opts[:sha1] || 'abcdefabcdefabcdefabcdefabcdefabcdefabcd'
+    fakefilename = opts[:filename] || 'COPYING'
+    fakefilesrc = File.expand_path('../../../../../'+fakefilename, __FILE__)
+    fakefile = File.read fakefilesrc
+    fakecommit = <<-EOS
+      commit abcdefabcdefabcdefabcdefabcdefabcdefabcd
+      Author: Fake R <fake@example.com>
+      Date:   Wed Apr 1 11:59:59 2015 -0400
+
+          It's a fake commit.
+
+    EOS
+    Repository.any_instance.stubs(:ls_tree_lr).with(fakesha1).returns <<-EOS
+      100644 blob eec475862e6ec2a87554e0fca90697e87f441bf5     226    .gitignore
+      100644 blob acbd7523ed49f01217874965aa3180cccec89d61     625    COPYING
+      100644 blob d645695673349e3947e8e5ae42332d0ac3164cd7   11358    LICENSE-2.0.txt
+      100644 blob c7a36c355b4a2b94dfab45c9748330022a788c91     622    README
+      100644 blob dba13ed2ddf783ee8118c6a581dbf75305f816a3   34520    agpl-3.0.txt
+      100644 blob 9bef02bbfda670595750fd99a4461005ce5b8f12     695    apps/workbench/.gitignore
+      100644 blob b51f674d90f68bfb50d9304068f915e42b04aea4    2249    apps/workbench/Gemfile
+      100644 blob b51f674d90f68bfb50d9304068f915e42b04aea4    2249    apps/workbench/Gemfile
+      100755 blob cdd5ebaff27781f93ab85e484410c0ce9e97770f    1012    crunch_scripts/hash
+    EOS
+    Repository.any_instance.
+      stubs(:cat_file).with(fakesha1, fakefilename).returns fakefile
+    Repository.any_instance.
+      stubs(:show).with(fakesha1).returns fakecommit
+    return fakesha1, fakecommit, fakefile
+  end
+end
diff --git a/apps/workbench/test/helpers/search_helper_test.rb b/apps/workbench/test/helpers/search_helper_test.rb
new file mode 100644 (file)
index 0000000..acf7390
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SearchHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/helpers/share_object_helper.rb b/apps/workbench/test/helpers/share_object_helper.rb
new file mode 100644 (file)
index 0000000..454cb2c
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ShareObjectHelper
+  def show_object_using(auth_key, type, key, expect)
+    obj_uuid = api_fixture(type)[key]['uuid']
+    visit(page_with_token(auth_key, "/#{type}/#{obj_uuid}"))
+    assert(page.has_text?(expect), "expected string not found: #{expect}")
+  end
+
+  def share_rows
+    find('#object_sharing').all('tr')
+  end
+
+  def add_share_and_check(share_type, name, obj=nil)
+    assert(page.has_no_text?(name), "project is already shared with #{name}")
+    start_share_count = share_rows.size
+    click_on("Share with #{share_type}")
+    within(".modal-container") do
+      # Order is important here: we should find something that appears in the
+      # modal before we make any assertions about what's not in the modal.
+      # Otherwise, the not-included assertions might falsely pass because
+      # the modal hasn't loaded yet.
+      find(".selectable", text: name).click
+      assert_text "Only #{share_type} you are allowed to access are shown"
+      assert(has_no_selector?(".modal-dialog-preview-pane"),
+             "preview pane available in sharing dialog")
+      if share_type == 'users' and obj and obj['email']
+        assert(page.has_text?(obj['email']), "Did not find user's email")
+      end
+      assert_raises(Capybara::ElementNotFound,
+                    "Projects pulldown available from sharing dialog") do
+        click_on "All projects"
+      end
+      click_on "Add"
+    end
+    # Admin case takes many times longer than normal user, but not sure why
+    using_wait_time(30) do
+      assert(page.has_link?(name),
+             "new share #{name} was not added to sharing table")
+      assert_equal(start_share_count + 1, share_rows.size,
+                   "new share did not add row to sharing table")
+    end
+  end
+
+  def modify_share_and_check(name)
+    start_rows = share_rows
+    # We assume rows have already been rendered and can be checked quickly
+    link_row = start_rows.select { |row| row.has_text?(name, wait:(0.1) ) }
+    assert_equal(1, link_row.size, "row with new permission not found")
+    within(link_row.first) do
+      click_on("Read")
+      select("Write", from: "share_change_level")
+      click_on("editable-submit")
+      assert(has_link?("Write"),
+             "failed to change access level on new share")
+      click_on "Revoke"
+      if Capybara.current_driver == :selenium
+        page.driver.browser.switch_to.alert.accept
+      else
+        # poltergeist returns true for confirm(), so we don't need to accept.
+      end
+    end
+    # Ensure revoked permission disappears from page.
+    using_wait_time(Capybara.default_max_wait_time * 3) do
+      assert_no_text name
+      assert_equal(start_rows.size - 1, share_rows.size,
+                   "revoking share did not remove row from sharing table")
+    end
+  end
+
+  def user_can_manage(user_sym, fixture)
+    get(:show, {id: fixture["uuid"]}, session_for(user_sym))
+    is_manager = assigns(:user_is_manager)
+    assert_not_nil(is_manager, "user_is_manager flag not set")
+    if not is_manager
+      assert_empty(assigns(:share_links),
+                   "non-manager has share links set")
+    end
+    is_manager
+  end
+
+end
diff --git a/apps/workbench/test/helpers/time_block.rb b/apps/workbench/test/helpers/time_block.rb
new file mode 120000 (symlink)
index 0000000..afb43e7
--- /dev/null
@@ -0,0 +1 @@
+../../../../services/api/test/helpers/time_block.rb
\ No newline at end of file
diff --git a/apps/workbench/test/integration/.gitkeep b/apps/workbench/test/integration/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/integration/ajax_errors_test.rb b/apps/workbench/test/integration/ajax_errors_test.rb
new file mode 100644 (file)
index 0000000..b3b1f1f
--- /dev/null
@@ -0,0 +1,62 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class AjaxErrorsTest < ActionDispatch::IntegrationTest
+  setup do
+    # Regrettably...
+    need_selenium 'to assert_text in iframe'
+  end
+
+  test 'load pane with deleted session' do
+    skip 'unreliable test'
+    # Simulate loading a page in browser-tab A, hitting "Log out" in
+    # browser-tab B, then returning to browser-tab A and choosing a
+    # different tab. (Automatic tab refreshes will behave similarly.)
+    visit page_with_token('active', '/projects/' + api_fixture('groups')['aproject']['uuid'])
+    ActionDispatch::Request::Session.any_instance.stubs(:[]).returns(nil)
+    click_link "Subprojects"
+    wait_for_ajax
+    assert_no_double_layout
+    assert_selector 'a,button', text: 'Reload tab'
+    assert_selector '.pane-error-display'
+    page.driver.browser.switch_to.frame 0
+    assert_text 'You are not logged in.'
+  end
+
+  test 'load pane with expired token' do
+    skip 'unreliable test'
+    # Similar to 'deleted session'. Here, the session cookie is still
+    # alive, but it contains a token which has expired. This uses a
+    # different code path because Workbench cannot detect that
+    # anything is amiss until it actually uses the token in an API
+    # request.
+    visit page_with_token('active', '/projects/' + api_fixture('groups')['aproject']['uuid'])
+    use_token :active_trustedclient do
+      # Go behind Workbench's back to expire the "active" token.
+      token = api_fixture('api_client_authorizations')['active']['api_token']
+      auth = ApiClientAuthorization.find(token)
+      auth.update_attributes(expires_at: '1999-12-31T23:59:59Z')
+    end
+    click_link "Subprojects"
+    wait_for_ajax
+    assert_no_double_layout
+    assert_selector 'a,button', text: 'Reload tab'
+    assert_selector '.pane-error-display'
+    page.driver.browser.switch_to.frame 0
+    assert_text 'You are not logged in.'
+  end
+
+  protected
+
+  def assert_no_double_layout
+    # Check we're not rendering a full page layout within a tab
+    # pane. Bootstrap responsive layouts require exactly one
+    # div.container-fluid. Checking "body body" would be more generic,
+    # but doesn't work when the browser/driver automatically collapses
+    # syntatically invalid tags.
+    assert_no_selector '.container-fluid .container-fluid'
+  end
+end
diff --git a/apps/workbench/test/integration/anonymous_access_test.rb b/apps/workbench/test/integration/anonymous_access_test.rb
new file mode 100644 (file)
index 0000000..8d772b0
--- /dev/null
@@ -0,0 +1,341 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class AnonymousAccessTest < ActionDispatch::IntegrationTest
+  include KeepWebConfig
+
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  setup do
+    need_javascript
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+  end
+
+  PUBLIC_PROJECT = "/projects/#{api_fixture('groups')['anonymously_accessible_project']['uuid']}"
+
+  def verify_site_navigation_anonymous_enabled user, is_active
+    if user
+      if user['is_active']
+        assert_text 'Unrestricted public data'
+        assert_selector 'a', text: 'Projects'
+        page.find("#projects-menu").click
+        within('.dropdown-menu') do
+          assert_selector 'a', text: 'Search all projects'
+          assert_selector "a[href=\"/projects/public\"]", text: 'Browse public projects'
+          assert_selector 'a', text: 'Add a new project'
+          assert_selector 'li[class="dropdown-header"]', text: 'My projects'
+        end
+      else
+        assert_text 'indicate that you have read and accepted the user agreement'
+      end
+      within('.navbar-fixed-top') do
+        assert_selector 'a', text: Rails.configuration.site_name.downcase
+        assert(page.has_link?("notifications-menu"), 'no user menu')
+        page.find("#notifications-menu").click
+        within('.dropdown-menu') do
+          assert_selector 'a', text: 'Log out'
+        end
+      end
+    else  # anonymous
+      assert_text 'Unrestricted public data'
+      within('.navbar-fixed-top') do
+        assert_text Rails.configuration.site_name.downcase
+        assert_no_selector 'a', text: Rails.configuration.site_name.downcase
+        assert_selector 'a', text: 'Log in'
+        assert_selector 'a', text: 'Browse public projects'
+      end
+    end
+  end
+
+  [
+    [nil, nil, false, false],
+    ['inactive', api_fixture('users')['inactive'], false, false],
+    ['active', api_fixture('users')['active'], true, true],
+  ].each do |token, user, is_active|
+    test "visit public project as user #{token.inspect} when anonymous browsing is enabled" do
+      if !token
+        visit PUBLIC_PROJECT
+      else
+        visit page_with_token(token, PUBLIC_PROJECT)
+      end
+
+      verify_site_navigation_anonymous_enabled user, is_active
+    end
+  end
+
+  test "selection actions when anonymous user accesses shared project" do
+    visit PUBLIC_PROJECT
+
+    assert_selector 'a', text: 'Description'
+    assert_selector 'a', text: 'Data collections'
+    assert_selector 'a', text: 'Pipelines and processes'
+    assert_selector 'a', text: 'Pipeline templates'
+    assert_selector 'a', text: 'Subprojects'
+    assert_selector 'a', text: 'Advanced'
+    assert_no_selector 'a', text: 'Other objects'
+    assert_no_selector 'button', text: 'Add data'
+
+    click_link 'Data collections'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Compare selected'
+      assert_no_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li', text: 'Move selected'
+      assert_no_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  test "anonymous user accesses data collections tab in shared project" do
+    visit PUBLIC_PROJECT
+    click_link 'Data collections'
+    collection = api_fixture('collections')['user_agreement_in_anonymously_accessible_project']
+    assert_text 'GNU General Public License'
+
+    assert_selector 'a', text: 'Data collections'
+
+    # click on show collection
+    within "tr[data-object-uuid=\"#{collection['uuid']}\"]" do
+      click_link 'Show'
+    end
+
+    # in collection page
+    assert_no_selector 'input', text: 'Create sharing link'
+    assert_no_text 'Sharing and permissions'
+    assert_no_selector 'a', text: 'Upload'
+    assert_no_selector 'button', 'Selection'
+
+    within '#collection_files tr,li', text: 'GNU_General_Public_License,_version_3.pdf' do
+      assert page.has_no_selector?('[value*="GNU_General_Public_License"]')
+      find 'a[title~=View]'
+      find 'a[title~=Download]'
+    end
+  end
+
+  test 'view file' do
+    use_keep_web_config
+
+    magic = rand(2**512).to_s 36
+    owner = api_fixture('groups')['anonymously_accessible_project']['uuid']
+    col = upload_data_and_get_collection(magic, 'admin', "Hello\\040world.txt", owner)
+    visit '/collections/' + col.uuid
+    find('tr,li', text: 'Hello world.txt').
+      find('a[title~=View]').click
+    assert_text magic
+  end
+
+  [
+    'running anonymously accessible cr',
+    'pipelineInstance'
+  ].each do |proc|
+    test "anonymous user accesses pipelines and processes tab in shared project and clicks on '#{proc}'" do
+      visit PUBLIC_PROJECT
+      click_link 'Data collections'
+      assert_text 'GNU General Public License'
+
+      click_link 'Pipelines and processes'
+      assert_text 'Pipeline in publicly accessible project'
+
+      if proc.include? 'pipeline'
+        verify_pipeline_instance_row
+      else
+        verify_container_request_row proc
+      end
+    end
+  end
+
+  def verify_container_request_row look_for
+    within first('tr', text: look_for) do
+      click_link 'Show'
+    end
+    assert_text 'Public Projects Unrestricted public data'
+    assert_text 'command'
+
+    assert_text 'zzzzz-tpzed-xurymjxw79nv3jz' # modified by user
+    assert_no_selector 'a', text: 'zzzzz-tpzed-xurymjxw79nv3jz'
+    assert_no_selector 'button', text: 'Cancel'
+  end
+
+  def verify_pipeline_instance_row
+    within first('tr[data-kind="arvados#pipelineInstance"]') do
+      assert_text 'Pipeline in publicly accessible project'
+      click_link 'Show'
+    end
+
+    # in pipeline instance page
+    assert_text 'Public Projects Unrestricted public data'
+    assert_text 'This pipeline is complete'
+    assert_no_selector 'a', text: 'Re-run with latest'
+    assert_no_selector 'a', text: 'Re-run options'
+  end
+
+  [
+    'pipelineTemplate',
+    'workflow'
+  ].each do |type|
+    test "anonymous user accesses pipeline templates tab in shared project and click on #{type}" do
+      visit PUBLIC_PROJECT
+      click_link 'Data collections'
+      assert_text 'GNU General Public License'
+
+      assert_selector 'a', text: 'Pipeline templates'
+
+      click_link 'Pipeline templates'
+      assert_text 'Pipeline template in publicly accessible project'
+      assert_text 'Workflow with input specifications'
+
+      if type == 'pipelineTemplate'
+        within first('tr[data-kind="arvados#pipelineTemplate"]') do
+          click_link 'Show'
+        end
+
+        # in template page
+        assert_text 'Public Projects Unrestricted public data'
+        assert_text 'script version'
+        assert_no_selector 'a', text: 'Run this pipeline'
+      else
+        within first('tr[data-kind="arvados#workflow"]') do
+          click_link 'Show'
+        end
+
+        # in workflow page
+        assert_text 'Public Projects Unrestricted public data'
+        assert_text 'this workflow has inputs specified'
+      end
+    end
+  end
+
+  test "anonymous user accesses subprojects tab in shared project" do
+    visit PUBLIC_PROJECT + '#Subprojects'
+
+    assert_text 'Subproject in anonymous accessible project'
+
+    within first('tr[data-kind="arvados#group"]') do
+      click_link 'Show'
+    end
+
+    # in subproject
+    assert_text 'Description for subproject in anonymous accessible project'
+  end
+
+  [
+    ['pipeline_in_publicly_accessible_project', true],
+    ['pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', false],
+    ['pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', false, 'spectator'],
+    ['pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', true, 'admin'],
+
+    ['completed_job_in_publicly_accessible_project', true],
+    ['running_job_in_publicly_accessible_project', true],
+    ['job_in_publicly_accessible_project_but_other_objects_elsewhere', false],
+  ].each do |fixture, objects_readable, user=nil|
+    test "access #{fixture} in public project with objects readable=#{objects_readable} with user #{user}" do
+      pipeline_page = true if fixture.include?('pipeline')
+
+      if pipeline_page
+        object = api_fixture('pipeline_instances')[fixture]
+        page_link = "/pipeline_instances/#{object['uuid']}"
+        expect_log_text = "Log for foo"
+      else      # job
+        object = api_fixture('jobs')[fixture]
+        page_link = "/jobs/#{object['uuid']}"
+        expect_log_text = "stderr crunchstat"
+      end
+
+      if user
+        visit page_with_token user, page_link
+      else
+        visit page_link
+      end
+
+      # click job link, if in pipeline page
+      click_link 'foo' if pipeline_page
+
+      if objects_readable
+        assert_selector 'a[href="#Log"]', text: 'Log'
+        assert_no_selector 'a[data-toggle="disabled"]', text: 'Log'
+        assert_no_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
+        if pipeline_page
+          assert_text 'This pipeline was created from'
+          job_id = object['components']['foo']['job']['uuid']
+          assert_selector 'a', text: job_id
+          assert_selector "a[href=\"/jobs/#{job_id}#Log\"]", text: 'Log'
+
+          # We'd like to test the Log tab on job pages too, but we can't right
+          # now because Poltergeist 1.x doesn't support JavaScript's
+          # Function.prototype.bind, which is used by job_log_graph.js.
+          find(:xpath, "//a[@href='#Log']").click
+          assert_text expect_log_text
+        end
+      else
+        assert_selector 'a[data-toggle="disabled"]', text: 'Log'
+        assert_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
+        assert_text object['job']
+        if pipeline_page
+          assert_no_text 'This pipeline was created from'  # template is not readable
+          assert_no_selector 'a', text: object['components']['foo']['job']['uuid']
+          assert_text 'Log unavailable'
+        end
+        find(:xpath, "//a[@href='#Log']").click
+        assert_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
+        assert_no_text expect_log_text
+      end
+    end
+  end
+
+  [
+    ['new_pipeline_in_publicly_accessible_project', true],
+    ['new_pipeline_in_publicly_accessible_project', true, 'spectator'],
+    ['new_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', false],
+    ['new_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', false, 'spectator'],
+    ['new_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', true, 'admin'],
+    ['new_pipeline_in_publicly_accessible_project_with_dataclass_file_and_other_objects_elsewhere', false],
+    ['new_pipeline_in_publicly_accessible_project_with_dataclass_file_and_other_objects_elsewhere', false, 'spectator'],
+    ['new_pipeline_in_publicly_accessible_project_with_dataclass_file_and_other_objects_elsewhere', true, 'admin'],
+  ].each do |fixture, objects_readable, user=nil|
+    test "access #{fixture} in public project with objects readable=#{objects_readable} with user #{user}" do
+      object = api_fixture('pipeline_instances')[fixture]
+      page = "/pipeline_instances/#{object['uuid']}"
+      if user
+        visit page_with_token user, page
+      else
+        visit page
+      end
+
+      # click Components tab
+      click_link 'Components'
+
+      if objects_readable
+        assert_text 'This pipeline was created from'
+        if user == 'admin'
+          assert_text 'input'
+          assert_selector 'a', text: 'Choose'
+          assert_selector 'a', text: 'Run'
+          assert_no_selector 'a.disabled', text: 'Run'
+        else
+          assert_selector 'a', text: object['components']['foo']['script_parameters']['input']['value']
+          user ? (assert_selector 'a', text: 'Run') : (assert_no_selector 'a', text: 'Run')
+        end
+      else
+        assert_no_text 'This pipeline was created from'  # template is not readable
+        input = object['components']['foo']['script_parameters']['input']['value']
+        assert_no_selector 'a', text: input
+        if user
+          input = input.gsub('/', '\\/')
+          assert_text "One or more inputs provided are not readable"
+          assert_selector "input[type=text][value=#{input}]"
+          assert_selector 'a.disabled', text: 'Run'
+        else
+          assert_no_text "One or more inputs provided are not readable"
+          assert_text input
+          assert_no_selector 'a', text: 'Run'
+        end
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/application_layout_test.rb b/apps/workbench/test/integration/application_layout_test.rb
new file mode 100644 (file)
index 0000000..b3f704c
--- /dev/null
@@ -0,0 +1,317 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class ApplicationLayoutTest < ActionDispatch::IntegrationTest
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  setup do
+    need_javascript
+  end
+
+  def verify_homepage user, invited, has_profile
+    profile_config = Rails.configuration.user_profile_form_fields
+
+    if !user
+      assert page.has_text?('Please log in'), 'Not found text - Please log in'
+      assert page.has_text?('The "Log in" button below will show you a Google sign-in page'), 'Not found text - google sign in page'
+      assert page.has_no_text?('My projects'), 'Found text - My projects'
+      assert page.has_link?("Log in to #{Rails.configuration.site_name}"), 'Not found text - log in to'
+    elsif user['is_active']
+      if profile_config && !has_profile
+        assert page.has_text?('Save profile'), 'No text - Save profile'
+      else
+        assert page.has_link?("Projects"), 'Not found link - Projects'
+        page.find("#projects-menu").click
+        assert_selector 'a', text: 'Search all projects'
+        assert_no_selector 'a', text: 'Browse public projects'
+        assert_selector 'a', text: 'Add a new project'
+        assert_selector 'li[class="dropdown-header"]', text: 'My projects'
+      end
+    elsif invited
+      assert page.has_text?('Please check the box below to indicate that you have read and accepted the user agreement'), 'Not found text - Please check the box below . . .'
+    else
+      assert page.has_text?('Your account is inactive'), 'Not found text - Your account is inactive'
+    end
+
+    within('.navbar-fixed-top') do
+      if !user
+        assert_text Rails.configuration.site_name.downcase
+        assert_no_selector 'a', text: Rails.configuration.site_name.downcase
+        assert page.has_link?('Log in'), 'Not found link - Log in'
+      else
+        # my account menu
+        assert_selector 'a', text: Rails.configuration.site_name.downcase
+        assert(page.has_link?("notifications-menu"), 'no user menu')
+        page.find("#notifications-menu").click
+        within('.dropdown-menu') do
+          if user['is_active']
+            assert page.has_no_link?('Not active'), 'Found link - Not active'
+            assert page.has_no_link?('Sign agreements'), 'Found link - Sign agreements'
+
+            assert_selector "a[href=\"/projects/#{user['uuid']}\"]", text: 'Home project'
+            assert_selector "a[href=\"/users/#{user['uuid']}/virtual_machines\"]", text: 'Virtual machines'
+            assert_selector "a[href=\"/repositories\"]", text: 'Repositories'
+            assert_selector "a[href=\"/current_token\"]", text: 'Current token'
+            assert_selector "a[href=\"/users/#{user['uuid']}/ssh_keys\"]", text: 'SSH keys'
+
+            if profile_config
+              assert_selector "a[href=\"/users/#{user['uuid']}/profile\"]", text: 'Manage profile'
+            else
+              assert_no_selector "a[href=\"/users/#{user['uuid']}/profile\"]", text: 'Manage profile'
+            end
+          else
+            assert_no_selector 'a', text: 'Home project'
+            assert page.has_no_link?('Virtual machines'), 'Found link - Virtual machines'
+            assert page.has_no_link?('Repositories'), 'Found link - Repositories'
+            assert page.has_no_link?('Current token'), 'Found link - Current token'
+            assert page.has_no_link?('SSH keys'), 'Found link - SSH keys'
+            assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+          end
+          assert page.has_link?('Log out'), 'No link - Log out'
+        end
+      end
+    end
+  end
+
+  # test the help menu
+  def check_help_menu
+    within('.navbar-fixed-top') do
+      page.find("#arv-help").click
+      within('.dropdown-menu') do
+        assert_no_selector 'a', text:'Getting Started ...'
+        assert_selector 'a', text:'Public Pipelines and Data sets'
+        assert page.has_link?('Tutorials and User guide'), 'No link - Tutorials and User guide'
+        assert page.has_link?('API Reference'), 'No link - API Reference'
+        assert page.has_link?('SDK Reference'), 'No link - SDK Reference'
+        assert page.has_link?('Show version / debugging info ...'), 'No link - Show version / debugging info'
+        assert page.has_link?('Report a problem ...'), 'No link - Report a problem'
+        # Version info and Report a problem are tested in "report_issue_test.rb"
+      end
+    end
+  end
+
+  def verify_system_menu user
+    if user && user['is_admin']
+      assert page.has_link?('system-menu'), 'No link - system menu'
+      within('.navbar-fixed-top') do
+        page.find("#system-menu").click
+        within('.dropdown-menu') do
+          assert page.has_text?('Groups'), 'No text - Groups'
+          assert page.has_link?('Repositories'), 'No link - Repositories'
+          assert page.has_link?('Virtual machines'), 'No link - Virtual machines'
+          assert page.has_link?('SSH keys'), 'No link - SSH keys'
+          assert page.has_link?('API tokens'), 'No link - API tokens'
+          find('a', text: 'Users').click
+        end
+      end
+      assert page.has_text? 'Add a new user'
+    else
+      assert page.has_no_link?('system-menu'), 'Found link - system menu'
+    end
+  end
+
+  [
+    [nil, nil, false, false],
+    ['inactive', api_fixture('users')['inactive'], true, false],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited'], false, false],
+    ['active', api_fixture('users')['active'], true, true],
+    ['admin', api_fixture('users')['admin'], true, true],
+    ['active_no_prefs', api_fixture('users')['active_no_prefs'], true, false],
+    ['active_no_prefs_profile_no_getting_started_shown',
+        api_fixture('users')['active_no_prefs_profile_no_getting_started_shown'], true, false],
+  ].each do |token, user, invited, has_profile|
+
+    test "visit home page for user #{token}" do
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      check_help_menu
+      verify_homepage user, invited, has_profile
+      verify_system_menu user
+    end
+  end
+
+  [
+    [false, false],
+    ['http://wb2.example.org//', false],
+    ['ftp://wb2.example.org', false],
+    ['wb2.example.org', false],
+    ['http://wb2.example.org', true],
+    ['https://wb2.example.org', true],
+    ['http://wb2.example.org/', true],
+    ['https://wb2.example.org/', true],
+  ].each do |wb2_url_config, wb2_menu_appear|
+    test "workbench2_url=#{wb2_url_config} should#{wb2_menu_appear ? '' : ' not'} show WB2 menu" do
+      Rails.configuration.workbench2_url = wb2_url_config
+      assert_equal wb2_menu_appear, ConfigValidators::validate_wb2_url_config()
+
+      visit page_with_token('active')
+      within('.navbar-fixed-top') do
+        page.find("#notifications-menu").click
+        within('.dropdown-menu') do
+          assert_equal wb2_menu_appear, page.has_text?('Go to Workbench 2')
+        end
+      end
+    end
+  end
+
+  [
+    ['active', true],
+    ['active_with_prefs_profile_no_getting_started_shown', false],
+  ].each do |token, getting_started_shown|
+    test "getting started help menu item #{getting_started_shown}" do
+      Rails.configuration.enable_getting_started_popup = true
+
+      visit page_with_token(token)
+
+      if getting_started_shown
+        within '.navbar-fixed-top' do
+          find('.help-menu > a').click
+          find('.help-menu .dropdown-menu a', text: 'Getting Started ...').click
+        end
+      end
+
+      within '.modal-content' do
+        assert_text 'Getting Started'
+        assert_selector 'button:not([disabled])', text: 'Next'
+        assert_no_selector 'button:not([disabled])', text: 'Prev'
+
+        # Use Next button to enable Prev button
+        click_button 'Next'
+        assert_selector 'button:not([disabled])', text: 'Prev'  # Prev button is now enabled
+        click_button 'Prev'
+        assert_no_selector 'button:not([disabled])', text: 'Prev'  # Prev button is again disabled
+
+        # Click Next until last page is reached and verify that it is disabled
+        (0..20).each do |i|   # currently we only have 4 pages, and don't expect to have more than 20 in future
+          click_button 'Next'
+          begin
+            find('button:not([disabled])', text: 'Next')
+          rescue => e
+            break
+          end
+        end
+        assert_no_selector 'button:not([disabled])', text: 'Next'  # Next button is disabled
+        assert_selector 'button:not([disabled])', text: 'Prev'     # Prev button is enabled
+        click_button 'Prev'
+        assert_selector 'button:not([disabled])', text: 'Next'     # Next button is now enabled
+
+        first('button', text: 'x').click
+      end
+      assert_text 'Recent pipelines and processes' # seeing dashboard now
+    end
+  end
+
+  test "test arvados_public_data_doc_url config unset" do
+    Rails.configuration.arvados_public_data_doc_url = false
+
+    visit page_with_token('active')
+    within '.navbar-fixed-top' do
+      find('.help-menu > a').click
+
+      assert_no_selector 'a', text:'Public Pipelines and Data sets'
+      assert_no_selector 'a', text:'Getting Started ...'
+
+      assert page.has_link?('Tutorials and User guide'), 'No link - Tutorials and User guide'
+      assert page.has_link?('API Reference'), 'No link - API Reference'
+      assert page.has_link?('SDK Reference'), 'No link - SDK Reference'
+      assert page.has_link?('Show version / debugging info ...'), 'No link - Show version / debugging info'
+      assert page.has_link?('Report a problem ...'), 'No link - Report a problem'
+    end
+  end
+
+  test "no SSH public key notification when shell_in_a_box_url is configured" do
+    Rails.configuration.shell_in_a_box_url = 'example.com'
+    visit page_with_token('job_reader')
+    click_link 'notifications-menu'
+    assert_no_selector 'a', text:'Click here to set up an SSH public key for use with Arvados.'
+    assert_selector 'a', text:'Click here to learn how to run an Arvados Crunch pipeline'
+  end
+
+   [
+    ['Repositories', nil, 'active/crunchdispatchtest'],
+    ['Virtual machines', nil, 'testvm.shell'],
+    ['SSH keys', nil, 'public_key'],
+    ['Links', nil, 'link_class'],
+    ['Groups', nil, 'All users'],
+    ['Compute nodes', nil, 'ping_secret'],
+    ['Keep services', nil, 'service_ssl_flag'],
+    ['Keep disks', nil, 'bytes_free'],
+  ].each do |page_name, add_button_text, look_for|
+    test "test system menu #{page_name} link" do
+      visit page_with_token('admin')
+      within('.navbar-fixed-top') do
+        page.find("#system-menu").click
+        within('.dropdown-menu') do
+          assert_selector 'a', text: page_name
+          find('a', text: page_name).click
+        end
+      end
+
+      # click the add button if it exists
+      if add_button_text
+        assert_selector 'button', text: "Add a new #{add_button_text}"
+        find('button', text: "Add a new #{add_button_text}").click
+      else
+        assert_no_selector 'button', text:"Add a new"
+      end
+
+      # look for unique property in the current page
+      assert_text look_for
+    end
+  end
+
+  [
+    ['active', false],
+    ['admin', true],
+  ].each do |token, is_admin|
+    test "visit dashboard as #{token}" do
+      visit page_with_token(token)
+
+      assert_text 'Recent pipelines and processes' # seeing dashboard now
+      within('.recent-processes-actions') do
+        assert page.has_link?('Run a process')
+        assert page.has_link?('All processes')
+      end
+
+      within('.recent-processes') do
+
+        within('.row-zzzzz-xvhdp-cr4runningcntnr') do
+          assert_text 'running'
+        end
+
+        assert_text 'zzzzz-d1hrv-twodonepipeline'
+        within('.row-zzzzz-d1hrv-twodonepipeline')do
+          assert_text 'No output'
+        end
+
+        assert_text 'completed container request'
+        within('.row-zzzzz-xvhdp-cr4completedctr')do
+          assert page.has_link? 'foo_file'
+        end
+      end
+
+      within('.compute-node-actions') do
+        if is_admin
+          assert page.has_link?('All nodes')
+        else
+          assert page.has_no_link?('All nodes')
+        end
+      end
+
+      within('.compute-node-summary-pane') do
+        click_link 'Details'
+        assert_text 'compute0'
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/browser_unsupported_test.rb b/apps/workbench/test/integration/browser_unsupported_test.rb
new file mode 100644 (file)
index 0000000..2933a04
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class BrowserUnsupported < ActionDispatch::IntegrationTest
+  WARNING_FRAGMENT = 'Your web browser is missing some of the features'
+
+  test 'warning if no File API' do
+    Capybara.current_driver = :poltergeist_without_file_api
+    visit '/'
+    assert_text :visible, WARNING_FRAGMENT
+  end
+
+  test 'no warning if File API' do
+    need_javascript
+    visit '/'
+    assert_no_text :visible, WARNING_FRAGMENT
+  end
+end
diff --git a/apps/workbench/test/integration/collection_upload_test.rb b/apps/workbench/test/integration/collection_upload_test.rb
new file mode 100644 (file)
index 0000000..608cd52
--- /dev/null
@@ -0,0 +1,149 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class CollectionUploadTest < ActionDispatch::IntegrationTest
+  setup do
+    testfiles.each do |filename, content|
+      open(testfile_path(filename), 'w') do |io|
+        io.write content
+      end
+    end
+    # Database reset doesn't restore KeepServices; we have to
+    # save/restore manually.
+    use_token :admin do
+      @keep_services = KeepService.all.to_a
+    end
+  end
+
+  teardown do
+    use_token :admin do
+      @keep_services.each do |ks|
+        KeepService.find(ks.uuid).update_attributes(ks.attributes)
+      end
+    end
+    testfiles.each do |filename, _|
+      File.unlink(testfile_path filename)
+    end
+  end
+
+  test "Create new collection using upload button" do
+    need_javascript
+    visit page_with_token 'active', aproject_path
+    find('.btn', text: 'Add data').click
+    click_link 'Upload files from my computer'
+    # Should be looking at a new empty collection.
+    assert_text 'New collection'
+    assert_text ' 0 files'
+    assert_text ' 0 bytes'
+    # The "Upload" tab should be active and loaded.
+    assert_selector 'div#Upload.active div.panel'
+  end
+
+  test "Upload two empty files with the same name" do
+    need_selenium "to make file uploads work"
+    visit page_with_token 'active', sandbox_path
+
+    unlock_collection
+
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('empty.txt')
+    assert_selector 'div', text: 'empty.txt'
+    attach_file 'file_selector', testfile_path('empty.txt')
+    assert_selector 'div.row div span[title]', text: 'empty.txt', count: 2
+    click_button 'Start'
+    assert_text :visible, 'Done!'
+    visit sandbox_path+'.json'
+    assert_match /_text":"\. d41d8\S+ 0:0:empty.txt\\n\. d41d8\S+ 0:0:empty\\\\040\(1\).txt\\n"/, body
+  end
+
+  test "Upload non-empty files" do
+    need_selenium "to make file uploads work"
+    visit page_with_token 'active', sandbox_path
+
+    unlock_collection
+
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('a')
+    attach_file 'file_selector', testfile_path('foo.txt')
+    assert_selector 'button:not([disabled])', text: 'Start'
+    click_button 'Start'
+    assert_text :visible, 'Done!'
+    visit sandbox_path+'.json'
+    assert_match /_text":"\. 0cc1\S+ 0:1:a\\n\. acbd\S+ 0:3:foo.txt\\n"/, body
+  end
+
+  test "Report mixed-content error" do
+    skip 'Test suite does not use TLS'
+    need_selenium "to make file uploads work"
+    use_token :admin do
+      KeepService.where(service_type: 'proxy').first.
+        update_attributes(service_ssl_flag: false)
+    end
+    visit page_with_token 'active', sandbox_path
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('foo.txt')
+    assert_selector 'button:not([disabled])', text: 'Start'
+    click_button 'Start'
+    using_wait_time 5 do
+      assert_text :visible, 'server setup problem'
+      assert_text :visible, 'cannot be used from origin'
+    end
+  end
+
+  test "Report network error" do
+    need_selenium "to make file uploads work"
+    use_token :admin do
+      # Even if port 0 is a thing, surely nx.example.net won't
+      # respond
+      KeepService.where(service_type: 'proxy').first.
+        update_attributes(service_host: 'nx.example.net',
+                          service_port: 0)
+    end
+    visit page_with_token 'active', sandbox_path
+
+    unlock_collection
+
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('foo.txt')
+    assert_selector 'button:not([disabled])', text: 'Start'
+    click_button 'Start'
+    using_wait_time 5 do
+      assert_text :visible, 'network error'
+    end
+  end
+
+  protected
+
+  def aproject_path
+    '/projects/' + api_fixture('groups')['aproject']['uuid']
+  end
+
+  def sandbox_uuid
+    api_fixture('collections')['upload_sandbox']['uuid']
+  end
+
+  def sandbox_path
+    '/collections/' + sandbox_uuid
+  end
+
+  def testfiles
+    {
+      'empty.txt' => '',
+      'a' => 'a',
+      'foo.txt' => 'foo'
+    }
+  end
+
+  def testfile_path filename
+    # Must be an absolute path. https://github.com/jnicklas/capybara/issues/621
+    File.join Dir.getwd, 'tmp', filename
+  end
+
+  def unlock_collection
+    first('.lock-collection-btn').click
+    accept_alert
+  end
+end
diff --git a/apps/workbench/test/integration/collections_test.rb b/apps/workbench/test/integration/collections_test.rb
new file mode 100644 (file)
index 0000000..6dd3c52
--- /dev/null
@@ -0,0 +1,437 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require_relative 'integration_test_utils'
+
+class CollectionsTest < ActionDispatch::IntegrationTest
+  include KeepWebConfig
+
+  setup do
+    need_javascript
+  end
+
+  test "Can copy a collection to a project" do
+    collection_uuid = api_fixture('collections')['foo_file']['uuid']
+    collection_name = api_fixture('collections')['foo_file']['name']
+    project_uuid = api_fixture('groups')['aproject']['uuid']
+    project_name = api_fixture('groups')['aproject']['name']
+    visit page_with_token('active', "/collections/#{collection_uuid}")
+    click_link 'Copy to project...'
+    find('.selectable', text: project_name).click
+    find('.modal-footer a,button', text: 'Copy').click
+    # Should navigate to the Data collections tab of the project after copying
+    assert_text project_name
+    assert_text "Copy of #{collection_name}"
+  end
+
+  def check_sharing(want_state, link_regexp)
+    # We specifically want to click buttons.  See #4291.
+    if want_state == :off
+      click_button "Unshare"
+      text_assertion = :assert_no_text
+      link_assertion = :assert_empty
+    else
+      click_button "Create sharing link"
+      text_assertion = :assert_text
+      link_assertion = :refute_empty
+    end
+    using_wait_time(Capybara.default_max_wait_time * 3) do
+      send(text_assertion, "Shared at:")
+    end
+    send(link_assertion, all("a").select { |a| a[:href] =~ link_regexp })
+  end
+
+  test "creating and uncreating a sharing link" do
+    coll_uuid = api_fixture("collections", "collection_owned_by_active", "uuid")
+    download_link_re =
+      Regexp.new(Regexp.escape("/c=#{coll_uuid}/"))
+    visit page_with_token("active_trustedclient", "/collections/#{coll_uuid}")
+    within "#sharing-button" do
+      check_sharing(:on, download_link_re)
+      check_sharing(:off, download_link_re)
+    end
+  end
+
+  test "can download an entire collection with a reader token" do
+    use_keep_web_config
+
+    token = api_token('active')
+    data = "foo\nfile\n"
+    datablock = `echo -n #{data.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
+    assert $?.success?, $?
+
+    col = nil
+    use_token 'active' do
+      mtxt = ". #{datablock} 0:#{data.length}:foo\n"
+      col = Collection.create(manifest_text: mtxt)
+    end
+
+    uuid = col.uuid
+    token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
+    url_head = "/collections/download/#{uuid}/#{token}/"
+    visit url_head
+    # It seems that Capybara can't inspect tags outside the body, so this is
+    # a very blunt approach.
+    assert_no_match(/<\s*meta[^>]+\bnofollow\b/i, page.html,
+                    "wget prohibited from recursing the collection page")
+    # Look at all the links that wget would recurse through using our
+    # recommended options, and check that it's exactly the file list.
+    hrefs = page.all('a').map do |anchor|
+      link = anchor[:href] || ''
+      if link.start_with? url_head
+        link[url_head.size .. -1]
+      elsif link.start_with? '/'
+        nil
+      else
+        link
+      end
+    end
+    assert_equal(['./foo'], hrefs.compact.sort,
+                 "download page did provide strictly file links")
+    click_link "foo"
+    assert_text "foo\nfile\n"
+  end
+
+  test "combine selected collections into new collection" do
+    foo_collection = api_fixture('collections')['foo_file']
+    bar_collection = api_fixture('collections')['bar_file']
+
+    visit page_with_token('active', "/collections")
+
+    assert(page.has_text?(foo_collection['uuid']), "Collection page did not include foo file")
+    assert(page.has_text?(bar_collection['uuid']), "Collection page did not include bar file")
+
+    within "tr[data-object-uuid=\"#{foo_collection['uuid']}\"]" do
+      find('input[type=checkbox]').click
+    end
+
+    within "tr[data-object-uuid=\"#{bar_collection['uuid']}\"]" do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Create new collection with selected collections'
+    end
+
+    # now in the newly created collection page
+    assert(page.has_text?('Copy to project'), "Copy to project text not found in new collection page")
+    assert(page.has_no_text?(foo_collection['name']), "Collection page did not include foo file")
+    assert(page.has_text?('foo'), "Collection page did not include foo file")
+    assert(page.has_no_text?(bar_collection['name']), "Collection page did not include foo file")
+    assert(page.has_text?('bar'), "Collection page did not include bar file")
+    assert(page.has_text?('Created new collection in your Home project'),
+                          'Not found flash message that new collection is created in Home project')
+  end
+
+  [
+    ['active', 'foo_file', false],
+    ['active', 'foo_collection_in_aproject', true],
+    ['project_viewer', 'foo_file', false],
+    ['project_viewer', 'foo_collection_in_aproject', false], #aproject not writable
+  ].each do |user, collection, expect_collection_in_aproject|
+    test "combine selected collection files into new collection #{user} #{collection} #{expect_collection_in_aproject}" do
+      my_collection = api_fixture('collections')[collection]
+
+      visit page_with_token(user, "/collections")
+
+      # choose file from foo collection
+      within('tr', text: my_collection['uuid']) do
+        click_link 'Show'
+      end
+
+      # now in collection page
+      find('input[type=checkbox]').click
+
+      click_button 'Selection...'
+      within('.selection-action-container') do
+        click_link 'Create new collection with selected files'
+      end
+
+      # now in the newly created collection page
+      assert(page.has_text?('Copy to project'), "Copy to project text not found in new collection page")
+      assert(page.has_no_text?(my_collection['name']), "Collection page did not include foo file")
+      assert(page.has_text?('foo'), "Collection page did not include foo file")
+      if expect_collection_in_aproject
+        aproject = api_fixture('groups')['aproject']
+        assert page.has_text?("Created new collection in the project #{aproject['name']}"),
+                              'Not found flash message that new collection is created in aproject'
+      else
+        assert page.has_text?("Created new collection in your Home project"),
+                              'Not found flash message that new collection is created in Home project'
+      end
+    end
+  end
+
+  test "combine selected collection files from collection subdirectory" do
+    visit page_with_token('user1_with_load', "/collections/zzzzz-4zz18-filesinsubdir00")
+
+    # now in collection page
+    input_files = page.all('input[type=checkbox]')
+    (0..input_files.count-1).each do |i|
+      input_files[i].click
+    end
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Create new collection with selected files'
+    end
+
+    # now in the newly created collection page
+    assert(page.has_text?('file_in_subdir1'), 'file not found - file_in_subdir1')
+    assert(page.has_text?('file1_in_subdir3.txt'), 'file not found - file1_in_subdir3.txt')
+    assert(page.has_text?('file2_in_subdir3.txt'), 'file not found - file2_in_subdir3.txt')
+    assert(page.has_text?('file1_in_subdir4.txt'), 'file not found - file1_in_subdir4.txt')
+    assert(page.has_text?('file2_in_subdir4.txt'), 'file not found - file1_in_subdir4.txt')
+  end
+
+  test "Collection portable data hash with multiple matches with more than one page of results" do
+    pdh = api_fixture('collections')['baz_file']['portable_data_hash']
+    visit page_with_token('admin', "/collections/#{pdh}")
+
+    assert_selector 'a', text: 'Collection_1'
+
+    assert_text 'The following collections have this content:'
+    assert_text 'more results are not shown'
+    assert_no_text 'Activity'
+    assert_no_text 'Sharing and permissions'
+  end
+
+  test "Filtering collection files by regexp" do
+    col = api_fixture('collections', 'multilevel_collection_1')
+    visit page_with_token('active', "/collections/#{col['uuid']}")
+
+    # Filter file list to some but not all files in the collection
+    page.find_field('file_regex').set('file[12]')
+    assert page.has_text?("file1")
+    assert page.has_text?("file2")
+    assert page.has_no_text?("file3")
+
+    # Filter file list with a regex matching all files
+    page.find_field('file_regex').set('.*')
+    assert page.has_text?("file1")
+    assert page.has_text?("file2")
+    assert page.has_text?("file3")
+
+    # Filter file list to a regex matching no files
+    page.find_field('file_regex').set('file9')
+    assert page.has_no_text?("file1")
+    assert page.has_no_text?("file2")
+    assert page.has_no_text?("file3")
+    # make sure that we actually are looking at the collections
+    # page and not e.g. a fiddlesticks
+    assert page.has_text?("multilevel_collection_1")
+    assert page.has_text?(col["name"] || col["uuid"])
+
+    # Set filename filter to a syntactically invalid regex
+    # Page loads, but stops filtering after the last valid regex parse
+    page.find_field('file_regex').set('file[2')
+    assert page.has_text?("multilevel_collection_1")
+    assert page.has_text?(col["name"] || col["uuid"])
+    assert page.has_text?("file1")
+    assert page.has_text?("file2")
+    assert page.has_text?("file3")
+
+    # Test the "Select all" button
+
+    # Note: calling .set('') on a Selenium element is not sufficient
+    # to reset the field for this test, as it does not send any key
+    # events to the browser. To clear the field, we must instead send
+    # a backspace character.
+    # See https://selenium.googlecode.com/svn/trunk/docs/api/rb/Selenium/WebDriver/Element.html#clear-instance_method
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#select-all').click
+    assert_checkboxes_state('input[type=checkbox]', true, '"select all" should check all checkboxes')
+
+    # Test the "Unselect all" button
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#unselect-all').click
+    assert_checkboxes_state('input[type=checkbox]', false, '"unselect all" should clear all checkboxes')
+
+    # Filter files, then "select all", then unfilter
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#unselect-all').click
+    page.find_field('file_regex').set('file[12]')
+    find('button#select-all').click
+    page.find_field('file_regex').set("\b") # backspace
+
+    # all "file1" and "file2" checkboxes must be selected
+    # all "file3" checkboxes must be clear
+    assert_checkboxes_state('[value*="file1"]', true, 'checkboxes for file1 should be selected after filtering')
+    assert_checkboxes_state('[value*="file2"]', true, 'checkboxes for file2 should be selected after filtering')
+    assert_checkboxes_state('[value*="file3"]', false, 'checkboxes for file3 should be clear after filtering')
+
+    # Select all files, then filter, then "unselect all", then unfilter
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#select-all').click
+    page.find_field('file_regex').set('file[12]')
+    find('button#unselect-all').click
+    page.find_field('file_regex').set("\b") # backspace
+
+    # all "file1" and "file2" checkboxes must be clear
+    # all "file3" checkboxes must be selected
+    assert_checkboxes_state('[value*="file1"]', false, 'checkboxes for file1 should be clear after filtering')
+    assert_checkboxes_state('[value*="file2"]', false, 'checkboxes for file2 should be clear after filtering')
+    assert_checkboxes_state('[value*="file3"]', true, 'checkboxes for file3 should be selected after filtering')
+  end
+
+  test "Creating collection from list of filtered files" do
+    col = api_fixture('collections', 'collection_with_files_in_subdir')
+    visit page_with_token('user1_with_load', "/collections/#{col['uuid']}")
+    assert page.has_text?('file_in_subdir1'), 'expected file_in_subdir1 not found'
+    assert page.has_text?('file1_in_subdir3'), 'expected file1_in_subdir3 not found'
+    assert page.has_text?('file2_in_subdir3'), 'expected file2_in_subdir3 not found'
+    assert page.has_text?('file1_in_subdir4'), 'expected file1_in_subdir4 not found'
+    assert page.has_text?('file2_in_subdir4'), 'expected file2_in_subdir4 not found'
+
+    # Select all files but then filter them to files in subdir1, subdir2 or subdir3
+    find('button#select-all').click
+    page.find_field('file_regex').set('_in_subdir[123]')
+    assert page.has_text?('file_in_subdir1'), 'expected file_in_subdir1 not in filtered files'
+    assert page.has_text?('file1_in_subdir3'), 'expected file1_in_subdir3 not in filtered files'
+    assert page.has_text?('file2_in_subdir3'), 'expected file2_in_subdir3 not in filtered files'
+    assert page.has_no_text?('file1_in_subdir4'), 'file1_in_subdir4 found in filtered files'
+    assert page.has_no_text?('file2_in_subdir4'), 'file2_in_subdir4 found in filtered files'
+
+    # Create a new collection
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Create new collection with selected files'
+    end
+
+    # now in the newly created collection page
+    # must have files in subdir1 and subdir3 but not subdir4
+    assert page.has_text?('file_in_subdir1'), 'file_in_subdir1 missing from new collection'
+    assert page.has_text?('file1_in_subdir3'), 'file1_in_subdir3 missing from new collection'
+    assert page.has_text?('file2_in_subdir3'), 'file2_in_subdir3 missing from new collection'
+    assert page.has_no_text?('file1_in_subdir4'), 'file1_in_subdir4 found in new collection'
+    assert page.has_no_text?('file2_in_subdir4'), 'file2_in_subdir4 found in new collection'
+
+    # Make sure we're not still on the old collection page.
+    refute_match(%r{/collections/#{col['uuid']}}, page.current_url)
+  end
+
+  test "remove a file from collection using checkbox and dropdown option" do
+    need_selenium 'to confirm unlock'
+
+    visit page_with_token('active', '/collections/zzzzz-4zz18-a21ux3541sxa8sf')
+    assert(page.has_text?('file1'), 'file not found - file1')
+
+    unlock_collection
+
+    # remove first file
+    input_files = page.all('input[type=checkbox]')
+    input_files[0].click
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Remove selected files'
+    end
+
+    assert(page.has_no_text?('file1'), 'file found - file')
+    assert(page.has_text?('file2'), 'file not found - file2')
+  end
+
+  test "remove a file in collection using trash icon" do
+    need_selenium 'to confirm unlock'
+
+    visit page_with_token('active', '/collections/zzzzz-4zz18-a21ux3541sxa8sf')
+    assert(page.has_text?('file1'), 'file not found - file1')
+
+    unlock_collection
+
+    first('.fa-trash-o').click
+    accept_alert
+
+    assert(page.has_no_text?('file1'), 'file found - file')
+    assert(page.has_text?('file2'), 'file not found - file2')
+  end
+
+  test "rename a file in collection" do
+    need_selenium 'to confirm unlock'
+
+    visit page_with_token('active', '/collections/zzzzz-4zz18-a21ux3541sxa8sf')
+
+    unlock_collection
+
+    within('.collection_files') do
+      first('.fa-pencil').click
+      find('.editable-input input').set('file1renamed')
+      find('.editable-submit').click
+    end
+
+    assert(page.has_text?('file1renamed'), 'file not found - file1renamed')
+  end
+
+  test "remove/rename file options not presented if user cannot update a collection" do
+    # visit a publicly accessible collection as 'spectator'
+    visit page_with_token('spectator', '/collections/zzzzz-4zz18-uukreo9rbgwsujr')
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Create new collection with selected files'
+      assert_no_selector 'li', text: 'Remove selected files'
+    end
+
+    within('.collection_files') do
+      assert(page.has_text?('GNU_General_Public_License'), 'file not found - GNU_General_Public_License')
+      assert_nil first('.fa-pencil')
+      assert_nil first('.fa-trash-o')
+    end
+  end
+
+  test "unlock collection to modify files" do
+    need_selenium 'to confirm remove'
+
+    collection = api_fixture('collections')['collection_owned_by_active']
+
+    # On load, collection is locked, and upload tab, rename and remove options are disabled
+    visit page_with_token('active', "/collections/#{collection['uuid']}")
+
+    assert_selector 'a[data-toggle="disabled"]', text: 'Upload'
+
+    within('.collection_files') do
+      file_ctrls = page.all('.btn-collection-file-control')
+      assert_equal 2, file_ctrls.size
+      assert_equal true, file_ctrls[0]['class'].include?('disabled')
+      assert_equal true, file_ctrls[1]['class'].include?('disabled')
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Remove selected files'
+      assert_selector 'li', text: 'Create new collection with selected files'
+    end
+
+    unlock_collection
+
+    assert_no_selector 'a[data-toggle="disabled"]', text: 'Upload'
+    assert_selector 'a', text: 'Upload'
+
+    within('.collection_files') do
+      file_ctrls = page.all('.btn-collection-file-control')
+      assert_equal 2, file_ctrls.size
+      assert_equal false, file_ctrls[0]['class'].include?('disabled')
+      assert_equal false, file_ctrls[1]['class'].include?('disabled')
+
+      # previous checkbox selection won't result in firing a new event;
+      # undo and redo checkbox to fire the selection event again
+      find('input[type=checkbox]').click
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_no_selector 'li.disabled', text: 'Remove selected files'
+      assert_selector 'li', text: 'Remove selected files'
+    end
+  end
+
+  def unlock_collection
+    first('.lock-collection-btn').click
+    accept_alert
+  end
+end
diff --git a/apps/workbench/test/integration/container_requests_test.rb b/apps/workbench/test/integration/container_requests_test.rb
new file mode 100644 (file)
index 0000000..151654b
--- /dev/null
@@ -0,0 +1,161 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class ContainerRequestsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  [
+    ['ex_string', 'abc'],
+    ['ex_string_opt', 'abc'],
+    ['ex_int', 12],
+    ['ex_int_opt', 12],
+    ['ex_long', 12],
+    ['ex_double', '12.34', 12.34],
+    ['ex_float', '12.34', 12.34],
+  ].each do |input_id, input_value, expected_value|
+    test "set input #{input_id} with #{input_value}" do
+      request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+      visit page_with_token("active", "/container_requests/#{request_uuid}")
+      selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][#{input_id}]']"
+      find(selector).click
+      find(".editable-input input").set(input_value)
+      find("#editable-submit").click
+      assert_no_selector(".editable-popup")
+      assert_selector(selector, text: expected_value || input_value)
+    end
+  end
+
+  test "select value for boolean input" do
+    request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+    visit page_with_token("active", "/container_requests/#{request_uuid}")
+    selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][ex_boolean]']"
+    find(selector).click
+    within(".editable-input") do
+      select "true"
+    end
+    find("#editable-submit").click
+    assert_no_selector(".editable-popup")
+    assert_selector(selector, text: "true")
+  end
+
+  test "select value for enum typed input" do
+    request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+    visit page_with_token("active", "/container_requests/#{request_uuid}")
+    selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][ex_enum]']"
+    find(selector).click
+    within(".editable-input") do
+      select "b"    # second value
+    end
+    find("#editable-submit").click
+    assert_no_selector(".editable-popup")
+    assert_selector(selector, text: "b")
+  end
+
+  [
+    ['directory_type'],
+    ['file_type'],
+  ].each do |type|
+    test "select value for #{type} input" do
+      request_uuid = api_fixture("container_requests", "uncommitted-with-directory-input", "uuid")
+      visit page_with_token("active", "/container_requests/#{request_uuid}")
+      assert_text 'Provide a value for the following parameter'
+      click_link 'Choose'
+      within('.modal-dialog') do
+        wait_for_ajax
+        collection = api_fixture('collections', 'collection_with_one_property', 'uuid')
+        find("div[data-object-uuid=#{collection}]").click
+        if type == 'ex_file'
+          wait_for_ajax
+          find('.preview-selectable', text: 'bar').click
+        end
+        find('button', text: 'OK').click
+      end
+      page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+      assert_text 'This workflow does not need any further inputs'
+      click_link "Run"
+      wait_for_ajax
+      assert_text 'This container is queued'
+    end
+  end
+
+  test "Run button enabled once all required inputs are provided" do
+    request_uuid = api_fixture("container_requests", "uncommitted-with-required-and-optional-inputs", "uuid")
+    visit page_with_token("active", "/container_requests/#{request_uuid}")
+    assert_text 'Provide a value for the following parameter'
+
+    page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+
+    selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][int_required]']"
+    find(selector).click
+    find(".editable-input input").set(2016)
+    find("#editable-submit").click
+
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+    click_link "Run"
+    wait_for_ajax
+    assert_text 'This container is queued'
+  end
+
+  test "Run button enabled when workflow is empty and no inputs are needed" do
+    visit page_with_token("active")
+
+    find('.btn', text: 'Run a process').click
+    within('.modal-dialog') do
+      find('.selectable', text: 'Valid workflow with no definition yaml').click
+      find('.btn', text: 'Next: choose inputs').click
+    end
+
+    assert_text 'This workflow does not need any further inputs'
+    page.assert_selector 'a', text: 'Run'
+  end
+
+  test "Provenance graph shown on committed container requests" do
+    cr = api_fixture('container_requests', 'completed')
+    visit page_with_token("active", "/container_requests/#{cr['uuid']}")
+    assert page.has_text? 'Provenance'
+    click_link 'Provenance'
+    wait_for_ajax
+    # Check for provenance graph existance
+    page.assert_selector '#provenance_svg'
+    page.assert_selector 'ellipse+text', text: cr['name'], visible: false
+    page.assert_selector 'g.node>title', text: cr['uuid'], visible: false
+  end
+
+  test "index page" do
+    visit page_with_token("active", "/container_requests")
+
+    within(".arv-recent-container-requests") do
+      page.execute_script "window.scrollBy(0,999000)"
+      wait_for_ajax
+    end
+
+    running_owner_active = api_fixture("container_requests", "requester_for_running")
+    anon_accessible_cr = api_fixture("container_requests", "running_anonymous_accessible")
+
+    # both of these CRs should be accessible to the user
+    assert_selector "a[href=\"/container_requests/#{running_owner_active['uuid']}\"]", text: running_owner_active[:name]
+    assert_selector "a[href=\"/container_requests/#{anon_accessible_cr['uuid']}\"]", text: anon_accessible_cr[:name]
+
+    # user can delete the "running" container_request
+    within(".cr-#{running_owner_active['uuid']}") do
+      assert_not_nil first('.glyphicon-trash')
+    end
+
+    # user can not delete the anonymously accessible container_request
+    within(".cr-#{anon_accessible_cr['uuid']}") do
+      assert_nil first('.glyphicon-trash')
+    end
+
+    # verify the search box in the page
+    find('.recent-container-requests-filterable-control').set("anonymous")
+    sleep 0.350 # Wait for 250ms debounce timer (see filterable.js)
+    wait_for_ajax
+    assert_no_selector "a[href=\"/container_requests/#{running_owner_active['uuid']}\"]", text: running_owner_active[:name]
+    assert_selector "a[href=\"/container_requests/#{anon_accessible_cr['uuid']}\"]", text: anon_accessible_cr[:name]
+  end
+end
diff --git a/apps/workbench/test/integration/download_test.rb b/apps/workbench/test/integration/download_test.rb
new file mode 100644 (file)
index 0000000..407458b
--- /dev/null
@@ -0,0 +1,97 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'helpers/download_helper'
+
+class DownloadTest < ActionDispatch::IntegrationTest
+  include KeepWebConfig
+
+  @@wrote_test_data = false
+
+  setup do
+    use_keep_web_config
+
+    # Make sure Capybara can download files.
+    need_selenium 'for downloading', :selenium_with_download
+    DownloadHelper.clear
+
+    # Keep data isn't populated by fixtures, so we have to write any
+    # data we expect to read.
+    if !@@wrote_test_data
+      ['foo', 'w a z', "Hello world\n"].each do |data|
+        md5 = `echo -n #{data.shellescape} | arv-put --no-progress --raw -`
+        assert_match /^#{Digest::MD5.hexdigest(data)}/, md5
+        assert $?.success?, $?
+      end
+      @@wrote_test_data = true
+    end
+  end
+
+  ['uuid', 'portable_data_hash'].each do |id_type|
+    test "preview from keep-web by #{id_type} using a reader token" do
+      uuid_or_pdh = api_fixture('collections')['foo_file'][id_type]
+      token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
+      visit "/collections/download/#{uuid_or_pdh}/#{token}/"
+      within 'ul' do
+        click_link 'foo'
+      end
+      assert_no_selector 'a'
+      assert_text 'foo'
+    end
+
+    test "preview anonymous content from keep-web by #{id_type}" do
+      Rails.configuration.anonymous_user_token =
+        api_fixture('api_client_authorizations')['anonymous']['api_token']
+      uuid_or_pdh =
+        api_fixture('collections')['public_text_file'][id_type]
+      visit "/collections/#{uuid_or_pdh}"
+      within "#collection_files" do
+        find('[title~=View]').click
+      end
+      assert_no_selector 'a'
+      assert_text 'Hello world'
+    end
+
+    test "download anonymous content from keep-web by #{id_type}" do
+      Rails.configuration.anonymous_user_token =
+        api_fixture('api_client_authorizations')['anonymous']['api_token']
+      uuid_or_pdh =
+        api_fixture('collections')['public_text_file'][id_type]
+      visit "/collections/#{uuid_or_pdh}"
+      within "#collection_files" do
+        find('[title~=Download]').click
+      end
+      wait_for_download 'Hello world.txt', "Hello world\n"
+    end
+  end
+
+  test "download from keep-web using a session token" do
+    uuid = api_fixture('collections')['w_a_z_file']['uuid']
+    token = api_fixture('api_client_authorizations')['active']['api_token']
+    visit page_with_token('active', "/collections/#{uuid}")
+    within "#collection_files" do
+      find('[title~=Download]').click
+    end
+    wait_for_download 'w a z', 'w a z', timeout: 20
+  end
+
+  def wait_for_download filename, expect_data, timeout: 3
+    data = nil
+    tries = 0
+    while tries < timeout*10 && data != expect_data
+      sleep 0.1
+      tries += 1
+      data = File.read(DownloadHelper.path.join filename) rescue nil
+    end
+    assert_equal expect_data, data
+  end
+
+  # TODO(TC): test "view pages hosted by keep-web, using session
+  # token". We might persuade selenium to send
+  # "collection-uuid.dl.example" requests to localhost by configuring
+  # our test nginx server to work as its forward proxy. Until then,
+  # we're relying on the "Redirect to keep_web_url via #{id_type}"
+  # test in CollectionsControllerTest (and keep-web's tests).
+end
diff --git a/apps/workbench/test/integration/errors_test.rb b/apps/workbench/test/integration/errors_test.rb
new file mode 100644 (file)
index 0000000..81d4bbb
--- /dev/null
@@ -0,0 +1,128 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class ErrorsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  BAD_UUID = "ffffffffffffffffffffffffffffffff+0"
+
+  test "error page renders user navigation" do
+    visit(page_with_token("active", "/collections/#{BAD_UUID}"))
+    assert(page.has_link?("notifications-menu"),
+           "User information missing from error page")
+    assert(page.has_no_text?(/log ?in/i),
+           "Logged in user prompted to log in on error page")
+  end
+
+  test "no user navigation with expired token" do
+    visit(page_with_token("expired", "/collections/#{BAD_UUID}"))
+    assert(page.has_no_link?("notifications-menu"),
+           "Page visited with expired token included user information")
+    assert(page.has_selector?("a", text: /log ?in/i),
+           "Login prompt missing on expired token error page")
+  end
+
+  test "error page renders without login" do
+    visit "/collections/download/#{BAD_UUID}/#{@@API_AUTHS['active']['api_token']}"
+    assert(page.has_no_text?(/\b500\b/),
+           "Error page without login returned 500")
+  end
+
+  test "'object not found' page includes search link" do
+    visit(page_with_token("active", "/collections/#{BAD_UUID}"))
+    assert(all("a").any? { |a| a[:href] =~ %r{/collections/?(\?|$)} },
+           "no search link found on 404 page")
+  end
+
+  def now_timestamp
+    Time.now.utc.to_i
+  end
+
+  def page_has_error_token?(start_stamp)
+    matching_stamps = (start_stamp .. now_timestamp).to_a.join("|")
+    # Check the page HTML because we really don't care how it's presented.
+    # I think it would even be reasonable to put it in a comment.
+    page.html =~ /\b(#{matching_stamps})\+[0-9A-Fa-f]{8}\b/
+  end
+
+  test "showing a bad UUID returns 404" do
+    visit(page_with_token("active", "/pipeline_templates/zzz"))
+    assert(page.has_no_text?(/fiddlesticks/i),
+           "trying to show a bad UUID rendered a fiddlesticks page, not 404")
+  end
+
+  test "404 page includes information about missing object" do
+    visit(page_with_token("active", "/groups/zazazaz"))
+    assert(page.has_text?(/group with UUID zazazaz/i),
+           "name of searched group missing from 404 page")
+  end
+
+  test "unrouted 404 page works" do
+    visit(page_with_token("active", "/__asdf/ghjk/zxcv"))
+    assert(page.has_text?(/not found/i),
+           "unrouted page missing 404 text")
+    assert(page.has_no_text?(/fiddlesticks/i),
+           "unrouted request returned a generic error page, not 404")
+  end
+
+  test "API error page has Report problem button" do
+    # point to a bad api server url to generate fiddlesticks error
+    original_arvados_v1_base = Rails.configuration.arvados_v1_base
+    Rails.configuration.arvados_v1_base = "https://[::1]:1/"
+
+    visit page_with_token("active")
+
+    assert_text 'fiddlesticks'
+
+    # reset api server base config to let the popup rendering to work
+    Rails.configuration.arvados_v1_base = original_arvados_v1_base
+
+    click_link 'Report problem'
+
+    within '.modal-content' do
+      assert_text 'Report a problem'
+      assert_no_text 'Version / debugging info'
+      assert_text 'Describe the problem'
+      assert_text 'Send problem report'
+      # "Send" button should be disabled until text is entered
+      assert_no_selector 'a,button:not([disabled])', text: 'Send problem report'
+      assert_selector 'a,button', text: 'Cancel'
+
+      report = mock
+      report.expects(:deliver).returns true
+      IssueReporter.expects(:send_report).returns report
+
+      # enter a report text and click on report
+      find_field('report_issue_text').set 'my test report text'
+      click_button 'Send problem report'
+
+      # ajax success updated button texts and added footer message
+      assert_no_selector 'a,button', text: 'Send problem report'
+      assert_no_selector 'a,button', text: 'Cancel'
+      assert_text 'Report sent'
+      assert_text 'Thanks for reporting this issue'
+      click_button 'Close'
+    end
+
+    # out of the popup now and should be back in the error page
+    assert_text 'fiddlesticks'
+  end
+
+  test "showing a trashed collection UUID gives untrash button" do
+    visit(page_with_token("active", "/collections/zzzzz-4zz18-trashedproj2col"))
+    assert(page.has_text?(/You must untrash the owner project to access this/i),
+           "missing untrash instructions")
+  end
+
+  test "showing a trashed container request gives untrash button" do
+    visit(page_with_token("active", "/container_requests/zzzzz-xvhdp-cr5trashedcontr"))
+    assert(page.has_text?(/You must untrash the owner project to access this/i),
+           "missing untrash instructions")
+  end
+
+end
diff --git a/apps/workbench/test/integration/filterable_infinite_scroll_test.rb b/apps/workbench/test/integration/filterable_infinite_scroll_test.rb
new file mode 100644 (file)
index 0000000..ed23d30
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class FilterableInfiniteScrollTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # Chrome remembers what you had in the text field when you hit
+  # "back". Here, we simulate the same effect by sending an otherwise
+  # unused ?search=foo param to pre-populate the search field.
+  test 'no double-load if text input has a value at page load time' do
+    visit page_with_token('admin', '/pipeline_instances')
+    assert_text 'pipeline_with_job'
+    visit page_with_token('admin', '/pipeline_instances?search=pipeline_with_tagged')
+    # Horrible hack to ensure the search results can't load correctly
+    # on the second attempt.
+    assert_selector '#recent-pipeline-instances'
+    assert page.evaluate_script('$("#recent-pipeline-instances[data-infinite-content-href0]").attr("data-infinite-content-href0","/give-me-an-error").length == 1')
+    # Wait for the first page of results to appear.
+    assert_text 'pipeline_with_tagged_collection_input'
+    # Make sure the results are filtered.
+    assert_no_text 'pipeline_with_job'
+    # Make sure pipeline_with_job didn't disappear merely because
+    # the results were replaced with an error message.
+    assert_text 'pipeline_with_tagged_collection_input'
+  end
+end
diff --git a/apps/workbench/test/integration/integration_test_utils.rb b/apps/workbench/test/integration/integration_test_utils.rb
new file mode 100644 (file)
index 0000000..336843c
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file is used to define methods reusable by two or more integration tests
+#
+
+# check_checkboxes_state asserts that the page holds at least one
+# checkbox matching 'selector', and that all matching checkboxes
+# are in state 'checkbox_status' (i.e. checked if true, unchecked otherwise)
+def assert_checkboxes_state(selector, checkbox_status, msg=nil)
+  assert page.has_selector?(selector)
+  page.all(selector).each do |checkbox|
+    assert(checkbox.checked? == checkbox_status, msg)
+  end
+end
diff --git a/apps/workbench/test/integration/jobs_test.rb b/apps/workbench/test/integration/jobs_test.rb
new file mode 100644 (file)
index 0000000..bf48d88
--- /dev/null
@@ -0,0 +1,184 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'fileutils'
+require 'tmpdir'
+
+require 'integration_helper'
+
+class JobsTest < ActionDispatch::IntegrationTest
+  include KeepWebConfig
+
+  setup do
+      need_javascript
+  end
+
+  def fakepipe_with_log_data
+    content =
+      "2014-01-01_12:00:01 zzzzz-8i9sb-0vsrcqi7whchuil 0  log message 1\n" +
+      "2014-01-01_12:00:02 zzzzz-8i9sb-0vsrcqi7whchuil 0  log message 2\n" +
+      "2014-01-01_12:00:03 zzzzz-8i9sb-0vsrcqi7whchuil 0  log message 3\n"
+    StringIO.new content, 'r'
+  end
+
+  test "add job description" do
+    job = api_fixture('jobs')['nearly_finished_job']
+    visit page_with_token("active", "/jobs/#{job['uuid']}")
+
+    # edit job description
+    within('.arv-description-as-subtitle') do
+      find('.fa-pencil').click
+      find('.editable-input textarea').set('*Textile description for job* - "Go to dashboard":/')
+      find('.editable-submit').click
+    end
+
+    # Verify edited description
+    assert_no_text '*Textile description for job*'
+    assert_text 'Textile description for job'
+    assert_selector 'a[href="/"]', text: 'Go to dashboard'
+  end
+
+  test 'view partial job log' do
+    need_selenium 'to be able to see the CORS response headers (PhantomJS 1.9.8 does not)'
+    use_keep_web_config
+
+    # This config will be restored during teardown by ../test_helper.rb:
+    Rails.configuration.log_viewer_max_bytes = 100
+
+    logdata = fakepipe_with_log_data.read
+    job_uuid = api_fixture('jobs')['running']['uuid']
+    logcollection = upload_data_and_get_collection(logdata, 'active', "#{job_uuid}.log.txt")
+    job = nil
+    use_token 'active' do
+      job = Job.find job_uuid
+      job.update_attributes log: logcollection.portable_data_hash
+    end
+    visit page_with_token 'active', '/jobs/'+job.uuid
+    find('a[href="#Log"]').click
+    wait_for_ajax
+    assert_text 'Showing only 100 bytes of this log'
+  end
+
+  test 'view log via keep-web redirect' do
+    use_keep_web_config
+
+    token = api_token('active')
+    logdata = fakepipe_with_log_data.read
+    logblock = `echo -n #{logdata.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
+    assert $?.success?, $?
+
+    job = nil
+    use_token 'active' do
+      job = Job.find api_fixture('jobs')['running']['uuid']
+      mtxt = ". #{logblock} 0:#{logdata.length}:#{job.uuid}.log.txt\n"
+      logcollection = Collection.create(manifest_text: mtxt)
+      job.update_attributes log: logcollection.portable_data_hash
+    end
+    visit page_with_token 'active', '/jobs/'+job.uuid
+    find('a[href="#Log"]').click
+    assert_text 'log message 1'
+  end
+
+  [
+    ['foobar', false, false],
+    ['job_with_latest_version', true, false],
+    ['job_with_latest_version', true, true],
+  ].each do |job_name, expect_options, use_latest|
+    test "Rerun #{job_name} job, expect options #{expect_options},
+          and use latest version option #{use_latest}" do
+      job = api_fixture('jobs')[job_name]
+      visit page_with_token 'active', '/jobs/'+job['uuid']
+
+      if expect_options
+        assert_text 'supplied_script_version: master'
+      else
+        assert_no_text 'supplied_script_version'
+      end
+
+      assert_triggers_dom_event 'shown.bs.modal' do
+        find('a,button', text: 'Re-run job...').click
+      end
+      within('.modal-dialog') do
+        assert_selector 'a,button', text: 'Cancel'
+        if use_latest
+          page.choose("job_script_version_#{job['supplied_script_version']}")
+        end
+        click_on "Run now"
+      end
+
+      # Re-running jobs doesn't currently work because the test API
+      # server has no git repository to check against.  For now, check
+      # that the error message says something appropriate for that
+      # situation.
+      if expect_options && use_latest
+        assert_text "077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+      else
+        assert_text "Script version #{job['script_version']} does not resolve to a commit"
+      end
+    end
+  end
+
+  [
+    ['active', true],
+    ['job_reader2', false],
+  ].each do |user, readable|
+    test "view job with components as #{user} user" do
+      job = api_fixture('jobs')['running_job_with_components']
+      component1 = api_fixture('jobs')['completed_job_in_publicly_accessible_project']
+      component2 = api_fixture('pipeline_instances')['running_pipeline_with_complete_job']
+      component2_child1 = api_fixture('jobs')['previous_job_run']
+      component2_child2 = api_fixture('jobs')['running']
+
+      visit page_with_token(user, "/jobs/#{job['uuid']}")
+      assert page.has_text? job['script_version']
+      assert page.has_no_text? 'script_parameters'
+
+      # The job_reader2 is allowed to read job, component2, and component2_child1,
+      # and component2_child2 only as a component of the pipeline component2
+      if readable
+        assert page.has_link? 'component1'
+        assert page.has_link? 'component2'
+      else
+        assert page.has_no_link? 'component1'
+        assert page.has_link? 'component2'
+      end
+
+      if readable
+        click_link('component1')
+        within('.panel-collapse') do
+          assert(has_text? component1['uuid'])
+          assert(has_text? component1['script_version'])
+          assert(has_text? 'script_parameters')
+        end
+        click_link('component1')
+      end
+
+      click_link('component2')
+      within('.panel-collapse') do
+        assert(has_text? component2['uuid'])
+        assert(has_text? component2['script_version'])
+        assert(has_no_text? 'script_parameters')
+        assert(has_link? 'previous')
+        assert(has_link? 'running')
+
+        click_link('previous')
+        within('.panel-collapse') do
+          assert(has_text? component2_child1['uuid'])
+          assert(has_text? component2_child1['script_version'])
+        end
+        click_link('previous')
+
+        click_link('running')
+        within('.panel-collapse') do
+          assert(has_text? component2_child2['uuid'])
+          if readable
+            assert(has_text? component2_child2['script_version'])
+          else
+            assert(has_no_text? component2_child2['script_version'])
+          end
+        end
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/link_account_test.rb b/apps/workbench/test/integration/link_account_test.rb
new file mode 100644 (file)
index 0000000..9c22f5a
--- /dev/null
@@ -0,0 +1,172 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'webrick'
+
+class LinkAccountTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  def start_sso_stub token
+    port = available_port('sso_stub')
+
+    s = WEBrick::HTTPServer.new(
+      :Port => port,
+      :BindAddress => 'localhost',
+      :Logger => WEBrick::Log.new('/dev/null', WEBrick::BasicLog::DEBUG),
+      :AccessLog => [nil,nil]
+    )
+
+    s.mount_proc("/login"){|req, res|
+      res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, req.query["return_to"] + "&api_token=#{token}")
+      s.shutdown
+    }
+
+    s.mount_proc("/logout"){|req, res|
+      res.set_redirect(WEBrick::HTTPStatus::TemporaryRedirect, req.query["return_to"])
+    }
+
+    Thread.new do
+      s.start
+    end
+
+    "http://localhost:#{port}/"
+  end
+
+  test "Add another login to this account" do
+    visit page_with_token('active_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['project_viewer_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Add another login to this account").click
+
+    find("#notifications-menu").click
+    assert_text "project-viewer@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Use this login to access another account" do
+    visit page_with_token('project_viewer_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "project-viewer@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Link login of inactive user to this account" do
+    visit page_with_token('active_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Add another login to this account").click
+
+    find("#notifications-menu").click
+    assert_text "inactive-uninvited-user@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Cannot link to inactive user" do
+    visit page_with_token('active_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['inactive_uninvited_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "inactive-uninvited-user@arvados.local"
+
+    assert_text "Cannot link active-user@arvados.local"
+
+    assert find("#link-account-submit")['disabled']
+
+    find("button", text: "Cancel").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Inactive user can link to active account" do
+    visit page_with_token('inactive_uninvited_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "inactive-uninvited-user@arvados.local"
+
+    assert_text "Already have an account with a different login?"
+
+    find("a", text: "Link this login to your existing account").click
+
+    assert_no_text "Add another login to this account"
+
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    find("button", text: "Link accounts").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+  end
+
+  test "Admin cannot link to non-admin" do
+    visit page_with_token('admin_trustedclient')
+    stub = start_sso_stub(api_fixture('api_client_authorizations')['active_trustedclient']['api_token'])
+    Rails.configuration.arvados_login_base = stub + "login"
+
+    find("#notifications-menu").click
+    assert_text "admin@arvados.local"
+
+    find("a", text: "Link account").click
+    find("button", text: "Use this login to access another account").click
+
+    find("#notifications-menu").click
+    assert_text "active-user@arvados.local"
+
+    assert_text "Cannot link admin account admin@arvados.local"
+
+    assert find("#link-account-submit")['disabled']
+
+    find("button", text: "Cancel").click
+
+    find("#notifications-menu").click
+    assert_text "admin@arvados.local"
+  end
+
+end
diff --git a/apps/workbench/test/integration/logins_test.rb b/apps/workbench/test/integration/logins_test.rb
new file mode 100644 (file)
index 0000000..7f2774c
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class LoginsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  test "login with api_token works after redirect" do
+    visit page_with_token('active_trustedclient')
+    assert page.has_text?('Recent pipelines and processes'), "Missing 'Recent pipelines and processes' from page"
+    assert_no_match(/\bapi_token=/, current_path)
+  end
+
+  test "trying to use expired token redirects to login page" do
+    visit page_with_token('expired_trustedclient')
+    buttons = all("a.btn", text: /Log in/)
+    assert_equal(1, buttons.size, "Failed to find one login button")
+    login_link = buttons.first[:href]
+    assert_match(%r{//[^/]+/login}, login_link)
+    assert_no_match(/\bapi_token=/, login_link)
+  end
+end
diff --git a/apps/workbench/test/integration/pipeline_instances_test.rb b/apps/workbench/test/integration/pipeline_instances_test.rb
new file mode 100644 (file)
index 0000000..47e385a
--- /dev/null
@@ -0,0 +1,588 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class PipelineInstancesTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  def parse_browser_timestamp t
+    # Timestamps are displayed in the browser's time zone (which can
+    # differ from ours) and they come from toLocaleTimeString (which
+    # means they don't necessarily tell us which time zone they're
+    # using). In order to make sense of them, we need to ask the
+    # browser to parse them and generate a timestamp that can be
+    # parsed reliably.
+    #
+    # Note: Even with all this help, phantomjs seem to behave badly
+    # when parsing timestamps on the other side of a DST transition.
+    # See skipped tests below.
+
+    # In some locales (e.g., en_CA.UTF-8) Firefox can't parse what its
+    # own toLocaleString() puts out.
+    t.sub!(/(\d\d\d\d)-(\d\d)-(\d\d)/, '\2/\3/\1')
+
+    if /(\d+:\d+ [AP]M) (\d+\/\d+\/\d+)/ =~ t
+      # Currently dates.js renders timestamps as
+      # '{t.toLocaleTimeString()} {t.toLocaleDateString()}' which even
+      # en_US browsers can't make sense of. First we need to flip it
+      # around so it looks like what toLocaleString() would have made.
+      t = $~[2] + ', ' + $~[1]
+    end
+
+    utc = page.evaluate_script("new Date('#{t}').toUTCString()")
+    DateTime.parse(utc).to_time
+  end
+
+  if false
+    # No need to test (or mention) these all the time. If they start
+    # working (without need_selenium) then some real tests might not
+    # need_selenium any more.
+
+    test 'phantomjs DST' do
+      skip '^^'
+      t0s = '3/8/2015, 01:59 AM'
+      t1s = '3/8/2015, 03:01 AM'
+      t0 = parse_browser_timestamp t0s
+      t1 = parse_browser_timestamp t1s
+      assert_equal 120, t1-t0, "'#{t0s}' to '#{t1s}' was reported as #{t1-t0} seconds, should be 120"
+    end
+
+    test 'phantomjs DST 2' do
+      skip '^^'
+      t0s = '2015-03-08T10:43:00Z'
+      t1s = '2015-03-09T03:43:00Z'
+      t0 = parse_browser_timestamp page.evaluate_script("new Date('#{t0s}').toLocaleString()")
+      t1 = parse_browser_timestamp page.evaluate_script("new Date('#{t1s}').toLocaleString()")
+      assert_equal 17*3600, t1-t0, "'#{t0s}' to '#{t1s}' was reported as #{t1-t0} seconds, should be #{17*3600} (17 hours)"
+    end
+  end
+
+  test 'Create and run a pipeline' do
+    visit page_with_token('active_trustedclient', '/pipeline_templates')
+    within('tr', text: 'Two Part Pipeline Template') do
+      find('a,button', text: 'Run').click
+    end
+
+    # project chooser
+    within('.modal-dialog') do #FIXME: source of 1 test error
+      find('.selectable', text: 'A Project').click
+      find('button', text: 'Choose').click
+    end
+
+    # This pipeline needs input. So, Run should be disabled
+    page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+
+    instance_page = current_path
+
+    # Add this collection to the project
+    visit '/projects'
+    find("#projects-menu").click
+    find('.dropdown-menu a,button', text: 'A Project').click
+    find('.btn', text: 'Add data').click
+    find('.dropdown-menu a,button', text: 'Copy data from another project').click
+    within('.modal-dialog') do
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('.btn', text: 'Copy').click
+    end
+    using_wait_time(Capybara.default_max_wait_time * 3) do
+      wait_for_ajax
+    end
+
+    click_link 'Pipelines and processes'
+    find('tr[data-kind="arvados#pipelineInstance"]', text: '(none)').
+      find('a', text: 'Show').
+      click
+
+    assert find('p', text: 'Provide a value')
+
+    find('div.form-group', text: 'Foo/bar pair').
+      find('.btn', text: 'Choose').
+      click
+
+    within('.modal-dialog') do
+      assert(has_text?("Foo/bar pair"),
+             "pipeline input picker missing name of input")
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('button', text: 'OK').click
+    end
+    wait_for_ajax
+
+    # The input, after being specified, should still be displayed (#3382)
+    assert find('div.form-group', text: 'Foo/bar pair')
+
+    # The input, after being specified, should still be editable (#3382)
+    find('div.form-group', text: 'Foo/bar pair').
+      find('.btn', text: 'Choose').click
+
+    within('.modal-dialog') do
+      assert(has_text?("Foo/bar pair"),
+             "pipeline input picker missing name of input")
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('button', text: 'OK').click
+    end
+
+    # For good measure, check one last time that the input, after being specified twice, is still be displayed (#3382)
+    assert find('div.form-group', text: 'Foo/bar pair')
+
+    # Ensure that the collection's portable_data_hash, uuid and name
+    # are saved in the desired places. (#4015)
+
+    # foo_collection_in_aproject is the collection tagged with foo_tag.
+    collection = api_fixture('collections', 'foo_collection_in_aproject')
+    click_link 'Advanced'
+    click_link 'API response'
+    api_response = JSON.parse(find('div#advanced_api_response pre').text)
+    input_params = api_response['components']['part-one']['script_parameters']['input']
+    assert_equal input_params['value'], collection['portable_data_hash']
+    assert_equal input_params['selection_name'], collection['name']
+    assert_equal input_params['selection_uuid'], collection['uuid']
+
+    # "Run" button is now enabled
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+
+    first('a,button', text: 'Run').click
+
+    # Pipeline is running. We have a "Pause" button instead now.
+    page.assert_selector 'a,button', text: 'Pause'
+    find('a,button', text: 'Pause').click
+
+    # Pipeline is stopped. It should now be in paused state and Runnable again.
+    assert page.has_text? 'Paused'
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+    page.assert_selector 'a,button', text: 'Re-run with latest'
+    page.assert_selector 'a,button', text: 'Re-run options'
+
+    # Since it is test env, no jobs are created to run. So, graph not visible
+    assert page.has_no_text? 'Graph'
+  end
+
+  # Create a pipeline instance from within a project and run
+  test 'Create pipeline inside a project and run' do
+    visit page_with_token('active_trustedclient', '/projects')
+
+    # Add collection to the project using Add data button
+    find("#projects-menu").click
+    find('.dropdown-menu a,button', text: 'A Project').click
+    find('.btn', text: 'Add data').click
+    find('.dropdown-menu a,button', text: 'Copy data from another project').click
+    within('.modal-dialog') do
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('.btn', text: 'Copy').click
+    end
+    using_wait_time(Capybara.default_max_wait_time * 3) do
+      wait_for_ajax
+    end
+
+    create_and_run_pipeline_in_aproject true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false
+  end
+
+  # Create a pipeline instance from outside of a project
+  test 'Run a pipeline from dashboard' do
+    visit page_with_token('active_trustedclient')
+    create_and_run_pipeline_in_aproject false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false
+  end
+
+  test 'view pipeline with job and see graph' do
+    visit page_with_token('active_trustedclient', '/pipeline_instances')
+    assert page.has_text? 'pipeline_with_job'
+
+    find('a', text: 'pipeline_with_job').click
+
+    # since the pipeline component has a job, expect to see the graph
+    assert page.has_text? 'Graph'
+    click_link 'Graph'
+    page.assert_selector "#provenance_graph"
+  end
+
+  test 'pipeline description' do
+    visit page_with_token('active_trustedclient', '/pipeline_instances')
+    assert page.has_text? 'pipeline_with_job'
+
+    find('a', text: 'pipeline_with_job').click
+
+    within('.arv-description-as-subtitle') do
+      find('.fa-pencil').click
+      find('.editable-input textarea').set('*Textile description for pipeline instance*')
+      find('.editable-submit').click
+    end
+    wait_for_ajax
+
+    # verify description
+    assert page.has_no_text? '*Textile description for pipeline instance*'
+    assert page.has_text? 'Textile description for pipeline instance'
+  end
+
+  test "JSON popup available for strange components" do
+    uuid = api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]
+    visit page_with_token("active", "/pipeline_instances/#{uuid}")
+    click_on "Components"
+    assert(page.has_no_text?("script_parameters"),
+           "components JSON visible without popup")
+    click_on "Show components JSON"
+    assert(page.has_text?("script_parameters"),
+           "components JSON not found")
+  end
+
+  def create_pipeline_from(template_name, project_name="Home")
+    # Visit the named pipeline template and create a pipeline instance from it.
+    # The instance will be created under the named project.
+    template_uuid = api_fixture("pipeline_templates", template_name, "uuid")
+    visit page_with_token("active", "/pipeline_templates/#{template_uuid}")
+    click_on "Run this pipeline"
+    within(".modal-dialog") do # FIXME: source of 3 test errors
+      # Set project for the new pipeline instance
+      find(".selectable", text: project_name).click
+      click_on "Choose"
+    end
+    assert(has_text?("This pipeline was created from the template"),
+           "did not land on pipeline instance page")
+  end
+
+  PROJECT_WITH_SEARCH_COLLECTION = "A Subproject"
+  def check_parameter_search(proj_name)
+    create_pipeline_from("parameter_with_search", proj_name)
+    search_text = api_fixture("pipeline_templates", "parameter_with_search",
+                              "components", "with-search",
+                              "script_parameters", "input", "search_for")
+    first("a.btn,button", text: "Choose").click
+    within(".modal-body") do
+      if (proj_name != PROJECT_WITH_SEARCH_COLLECTION)
+        # Switch finder modal to Subproject to find the Collection.
+        click_on proj_name
+        click_on PROJECT_WITH_SEARCH_COLLECTION
+      end
+      assert_equal(search_text, first("input").value,
+                   "parameter search not preseeded")
+      assert(has_text?(api_fixture("collections")["baz_collection_name_in_asubproject"]["name"]),
+             "baz Collection not in preseeded search results")
+    end
+  end
+
+  test "Workbench respects search_for parameter in templates" do
+    check_parameter_search(PROJECT_WITH_SEARCH_COLLECTION)
+  end
+
+  test "Workbench preserves search_for parameter after project switch" do
+    check_parameter_search("A Project")
+  end
+
+  test "enter a float for a number pipeline input" do
+    # Poltergeist either does not support the HTML 5 <input
+    # type="number">, or interferes with the associated X-Editable
+    # validation code.  If the input field has type=number (forcing an
+    # integer), this test will yield a false positive under
+    # Poltergeist.  --Brett, 2015-02-05
+    need_selenium "for strict X-Editable input validation"
+    create_pipeline_from("template_with_dataclass_number")
+    INPUT_SELECTOR =
+      ".editable[data-name='[components][work][script_parameters][input][value]']"
+    find(INPUT_SELECTOR).click
+    find(".editable-input input").set("12.34")
+    find("#editable-submit").click
+    assert_no_selector(".editable-popup")
+    assert_selector(INPUT_SELECTOR, text: "12.34")
+  end
+
+  [
+    [true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
+    [false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
+    [true, 'Two Part Template with dataclass File', 'foo_collection_in_aproject', true],
+    [false, 'Two Part Template with dataclass File', 'foo_collection_in_aproject', true],
+    [true, 'Two Part Pipeline Template', 'collection_with_no_name_in_aproject', false],
+  ].each do |in_aproject, template_name, collection, choose_file|
+    test "Run pipeline instance in #{in_aproject} with #{template_name} with #{collection} file #{choose_file}" do
+      if in_aproject
+        visit page_with_token 'active', \
+        '/projects/'+api_fixture('groups')['aproject']['uuid']
+      else
+        visit page_with_token 'active', '/'
+      end
+
+      # need bigger modal size when choosing a file from collection
+      if Capybara.current_driver == :selenium
+        Capybara.current_session.driver.browser.manage.window.resize_to(1200, 800)
+      end
+
+      create_and_run_pipeline_in_aproject in_aproject, template_name, collection, choose_file
+      instance_path = current_path
+
+      # Pause the pipeline
+      find('a,button', text: 'Pause').click
+      assert page.has_text? 'Paused'
+      page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+      page.assert_selector 'a,button', text: 'Re-run with latest'
+      page.assert_selector 'a,button', text: 'Re-run options'
+
+      # Verify that the newly created instance is created in the right project.
+      assert page.has_text? 'Home'
+      if in_aproject
+        assert page.has_text? 'A Project'
+      else
+        assert page.has_no_text? 'A Project'
+      end
+    end
+  end
+
+  [
+    ['active', false, false, false],
+    ['active', false, false, true],
+    ['active', true, false, false],
+    ['active', true, true, false],
+    ['active', true, false, true],
+    ['active', true, true, true],
+    ['project_viewer', false, false, true],
+    ['project_viewer', true, true, true],
+  ].each do |user, with_options, choose_options, in_aproject|
+    test "Rerun pipeline instance as #{user} using options #{with_options} #{choose_options} in #{in_aproject}" do
+      if in_aproject
+        path = '/pipeline_instances/'+api_fixture('pipeline_instances')['pipeline_owned_by_active_in_aproject']['uuid']
+      else
+        path = '/pipeline_instances/'+api_fixture('pipeline_instances')['pipeline_owned_by_active_in_home']['uuid']
+      end
+
+      visit page_with_token(user, path)
+
+      page.assert_selector 'a,button', text: 'Re-run with latest'
+      page.assert_selector 'a,button', text: 'Re-run options'
+
+      if user == 'project_viewer' && in_aproject
+        assert page.has_text? 'A Project'
+      end
+
+      # Now re-run the pipeline
+      if with_options
+        assert_triggers_dom_event 'shown.bs.modal' do
+          find('a,button', text: 'Re-run options').click
+        end
+        within('.modal-dialog') do
+          page.assert_selector 'a,button', text: 'Copy and edit inputs'
+          page.assert_selector 'a,button', text: 'Run now'
+          if choose_options
+            find('button', text: 'Copy and edit inputs').click
+          else
+            find('button', text: 'Run now').click
+          end
+        end
+      else
+        find('a,button', text: 'Re-run with latest').click
+      end
+
+      # Verify that the newly created instance is created in the right
+      # project. In case of project_viewer user, since the user cannot
+      # write to the project, the pipeline should have been created in
+      # the user's Home project.
+      assert_not_equal path, current_path, 'Rerun instance path expected to be different'
+      assert_text 'Home'
+      if in_aproject && (user != 'project_viewer')
+        assert_text 'A Project'
+      else
+        assert_no_text 'A Project'
+      end
+    end
+  end
+
+  # Create and run a pipeline for 'Two Part Pipeline Template' in 'A Project'
+  def create_and_run_pipeline_in_aproject in_aproject, template_name, collection_fixture, choose_file=false
+    # collection in aproject to be used as input
+    collection = api_fixture('collections', collection_fixture)
+
+    # create a pipeline instance
+    find('.btn', text: 'Run a process').click
+    within('.modal-dialog') do
+      find('.selectable', text: template_name).click
+      find('.btn', text: 'Next: choose inputs').click
+    end
+
+    assert find('p', text: 'Provide a value')
+
+    find('div.form-group', text: 'Foo/bar pair').
+      find('.btn', text: 'Choose').
+      click
+
+    within('.modal-dialog') do
+      if in_aproject
+        assert_selector 'button.dropdown-toggle', text: 'A Project'
+        wait_for_ajax
+      else
+        assert_selector 'button.dropdown-toggle', text: 'Home'
+        wait_for_ajax
+        click_button "Home"
+        click_link "A Project"
+        wait_for_ajax
+      end
+
+      if collection_fixture == 'foo_collection_in_aproject'
+        first('span', text: 'foo_tag').click
+      elsif collection['name']
+        first('span', text: "#{collection['name']}").click
+      else
+        collection_uuid = collection['uuid']
+        find("div[data-object-uuid=#{collection_uuid}]").click
+      end
+
+      if choose_file
+        wait_for_ajax
+        find('.preview-selectable', text: 'foo').click
+      end
+      find('button', text: 'OK').click
+    end
+
+    # The input, after being specified, should still be displayed (#3382)
+    assert find('div.form-group', text: 'Foo/bar pair')
+
+    # Ensure that the collection's portable_data_hash, uuid and name
+    # are saved in the desired places. (#4015)
+    click_link 'Advanced'
+    click_link 'API response'
+
+    api_response = JSON.parse(find('div#advanced_api_response pre').text)
+    input_params = api_response['components']['part-one']['script_parameters']['input']
+    assert_equal(input_params['selection_uuid'], collection['uuid'], "Not found expected input param uuid")
+    if choose_file
+      assert_equal(input_params['value'], collection['portable_data_hash']+'/foo', "Not found expected input file param value")
+      assert_equal(input_params['selection_name'], collection['name']+'/foo', "Not found expected input file param name")
+    else
+      assert_equal(input_params['value'], collection['portable_data_hash'], "Not found expected input param value")
+      assert_equal(input_params['selection_name'], collection['name'], "Not found expected input selection name")
+    end
+
+    # "Run" button present and enabled
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+    first('a,button', text: 'Run').click
+
+    # Pipeline is running. We have a "Pause" button instead now.
+    page.assert_no_selector 'a,button', text: 'Run'
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+    page.assert_selector 'a,button', text: 'Pause'
+
+    # Since it is test env, no jobs are created to run. So, graph not visible
+    assert page.has_no_text? 'Graph'
+  end
+
+  [
+    ['user1_with_load', 'zzzzz-d1hrv-10pipelines0001', 0], # run time 0 minutes
+    ['user1_with_load', 'zzzzz-d1hrv-10pipelines0010', 17*60*60 + 51*60], # run time 17 hours and 51 minutes
+    ['active', 'zzzzz-d1hrv-runningpipeline', nil], # state = running
+  ].each do |user, uuid, run_time|
+    test "pipeline start and finish time display for #{uuid}" do
+      need_selenium 'to parse timestamps correctly across DST boundaries'
+      visit page_with_token(user, "/pipeline_instances/#{uuid}")
+
+      regexp = "This pipeline started at (.+?)\\. "
+      if run_time
+        regexp += "It failed after (.+?) at (.+?)\\. Check the Log"
+      else
+        regexp += "It has been active for \\d"
+      end
+      assert_match /#{regexp}/, page.text
+
+      return if !run_time
+
+      # match again to capture (.*)
+      _, started, duration, finished = *(/#{regexp}/.match(page.text))
+      assert_equal(
+        run_time,
+        parse_browser_timestamp(finished) - parse_browser_timestamp(started),
+        "expected: #{run_time}, got: started #{started}, finished #{finished}, duration #{duration}")
+    end
+  end
+
+  [
+    ['fuse', nil, 2, 20],                           # has 2 as of 11-07-2014
+    ['user1_with_load', '000025pipelines', 25, 25], # owned_by the project zzzzz-j7d0g-000025pipelines, two pages
+    ['admin', 'pipeline_20', 1, 1],
+    ['active', 'no such match', 0, 0],
+  ].each do |user, search_filter, expected_min, expected_max|
+    test "scroll pipeline instances page for #{user} with search filter #{search_filter}
+          and expect #{expected_min} <= found_items <= #{expected_max}" do
+      visit page_with_token(user, "/pipeline_instances")
+
+      if search_filter
+        find('.recent-pipeline-instances-filterable-control').set(search_filter)
+        # Wait for 250ms debounce timer (see filterable.js)
+        sleep 0.350
+        wait_for_ajax
+      end
+
+      page_scrolls = expected_max/20 + 2    # scroll num_pages+2 times to test scrolling is disabled when it should be
+      within('.arv-recent-pipeline-instances') do
+        (0..page_scrolls).each do |i|
+          page.driver.scroll_to 0, 999000
+          begin
+            wait_for_ajax
+          rescue
+          end
+        end
+      end
+
+      # Verify that expected number of pipeline instances are found
+      found_items = page.all('tr[data-kind="arvados#pipelineInstance"]')
+      found_count = found_items.count
+      if expected_min == expected_max
+        assert_equal(true, found_count == expected_min,
+          "Not found expected number of items. Expected #{expected_min} and found #{found_count}")
+        assert page.has_no_text? 'request failed'
+      else
+        assert_equal(true, found_count>=expected_min,
+          "Found too few items. Expected at least #{expected_min} and found #{found_count}")
+        assert_equal(true, found_count<=expected_max,
+          "Found too many items. Expected at most #{expected_max} and found #{found_count}")
+      end
+    end
+  end
+
+  test 'render job run time when job record is inaccessible' do
+    pi = api_fixture('pipeline_instances', 'has_component_with_completed_jobs')
+    visit page_with_token 'active', '/pipeline_instances/' + pi['uuid']
+    assert_text 'Queued for '
+  end
+
+  test "job logs linked for running pipeline" do
+    pi = api_fixture("pipeline_instances", "running_pipeline_with_complete_job")
+    visit(page_with_token("active", "/pipeline_instances/#{pi['uuid']}"))
+    find(:xpath, "//a[@href='#Log']").click
+    within "#Log" do
+      assert_text "Log for previous"
+      log_link = find("a", text: "Log for previous")
+      assert_includes(log_link[:href],
+                      "/jobs/#{pi["components"]["previous"]["job"]["uuid"]}#Log")
+      assert_selector "#event_log_div"
+    end
+  end
+
+  test "job logs linked for complete pipeline" do
+    pi = api_fixture("pipeline_instances", "complete_pipeline_with_two_jobs")
+    visit(page_with_token("active", "/pipeline_instances/#{pi['uuid']}"))
+    find(:xpath, "//a[@href='#Log']").click
+    within "#Log" do
+      assert_text "Log for previous"
+      pi["components"].each do |cname, cspec|
+        log_link = find("a", text: "Log for #{cname}")
+        assert_includes(log_link[:href], "/jobs/#{cspec["job"]["uuid"]}#Log")
+      end
+      assert_no_selector "#event_log_div"
+    end
+  end
+
+  test "job logs linked for failed pipeline" do
+    pi = api_fixture("pipeline_instances", "failed_pipeline_with_two_jobs")
+    visit(page_with_token("active", "/pipeline_instances/#{pi['uuid']}"))
+    find(:xpath, "//a[@href='#Log']").click
+    within "#Log" do
+      assert_text "Log for previous"
+      pi["components"].each do |cname, cspec|
+        log_link = find("a", text: "Log for #{cname}")
+        assert_includes(log_link[:href], "/jobs/#{cspec["job"]["uuid"]}#Log")
+      end
+      assert_no_selector "#event_log_div"
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/pipeline_templates_test.rb b/apps/workbench/test/integration/pipeline_templates_test.rb
new file mode 100644 (file)
index 0000000..ad14df1
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class PipelineTemplatesTest < ActionDispatch::IntegrationTest
+  test "JSON popup available for strange components" do
+    need_javascript
+    uuid = api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]
+    visit page_with_token("active", "/pipeline_templates/#{uuid}")
+    click_on "Components"
+    assert(page.has_no_text?("script_parameters"),
+           "components JSON visible without popup")
+    click_on "Show components JSON"
+    assert(page.has_text?("script_parameters"),
+           "components JSON not found")
+  end
+
+  test "pipeline template description" do
+    need_javascript
+    visit page_with_token("active", "/pipeline_templates")
+
+    # go to Two Part pipeline template
+    within first('tr', text: 'Two Part Pipeline Template') do
+      find(".fa-gears").click
+    end
+
+    # edit template description
+    within('.arv-description-as-subtitle') do
+      find('.fa-pencil').click
+      find('.editable-input textarea').set('*Textile description for pipeline template* - "Go to dashboard":/')
+      find('.editable-submit').click
+    end
+    wait_for_ajax
+
+    # Verfiy edited description
+    assert page.has_no_text? '*Textile description for pipeline template*'
+    assert page.has_text? 'Textile description for pipeline template'
+    assert page.has_link? 'Go to dashboard'
+    click_link 'Go to dashboard'
+    assert page.has_text? 'Recent pipelines and processes'
+
+    # again visit recent templates page and verify edited description
+    visit page_with_token("active", "/pipeline_templates")
+    assert page.has_no_text? '*Textile description for pipeline template*'
+    assert page.has_text? 'Textile description for pipeline template'
+  end
+end
diff --git a/apps/workbench/test/integration/projects_test.rb b/apps/workbench/test/integration/projects_test.rb
new file mode 100644 (file)
index 0000000..ac78344
--- /dev/null
@@ -0,0 +1,759 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'helpers/share_object_helper'
+require_relative 'integration_test_utils'
+
+class ProjectsTest < ActionDispatch::IntegrationTest
+  include ShareObjectHelper
+
+  setup do
+    need_javascript
+  end
+
+  test 'Check collection count for A Project in the tab pane titles' do
+    project_uuid = api_fixture('groups')['aproject']['uuid']
+    visit page_with_token 'active', '/projects/' + project_uuid
+    click_link 'Data collections'
+    wait_for_ajax
+    collection_count = page.all("[data-pk*='collection']").count
+    assert_selector '#Data_collections-tab span', text: "(#{collection_count})"
+  end
+
+  test 'Find a project and edit its description' do
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "A Project").click
+    within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
+      find('span', text: api_fixture('groups')['aproject']['name']).click
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('I just edited this.')
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+    end
+    visit current_path
+    assert(find?('.container-fluid', text: 'I just edited this.'),
+           "Description update did not survive page refresh")
+  end
+
+  test 'Create a project and move it into a different project' do
+    visit page_with_token 'active', '/projects'
+    find("#projects-menu").click
+    within('.dropdown-menu') do
+      first('li', text: 'Home').click
+    end
+    wait_for_ajax
+    find('.btn', text: "Add a subproject").click
+
+    within('h2') do
+      find('.fa-pencil').click
+      find('.editable-input input').set('Project 1234')
+      find('.glyphicon-ok').click
+    end
+    wait_for_ajax
+
+    visit '/projects'
+    find("#projects-menu").click
+    within('.dropdown-menu') do
+      first('li', text: 'Home').click
+    end
+    wait_for_ajax
+    find('.btn', text: "Add a subproject").click
+    within('h2') do
+      find('.fa-pencil').click
+      find('.editable-input input').set('Project 5678')
+      find('.glyphicon-ok').click
+    end
+    wait_for_ajax
+
+    click_link 'Move project...'
+    find('.selectable', text: 'Project 1234').click
+    find('.modal-footer a,button', text: 'Move').click
+    wait_for_ajax
+
+    # Wait for the page to refresh and show the new parent in Sharing panel
+    click_link 'Sharing'
+    assert(page.has_link?("Project 1234"),
+           "Project 5678 should now be inside project 1234")
+  end
+
+  def open_groups_sharing(project_name="aproject", token_name="active")
+    project = api_fixture("groups", project_name)
+    visit(page_with_token(token_name, "/projects/#{project['uuid']}"))
+    click_on "Sharing"
+    click_on "Share with groups"
+  end
+
+  def group_name(group_key)
+    api_fixture("groups", group_key, "name")
+  end
+
+  test "projects not publicly sharable when anonymous browsing disabled" do
+    Rails.configuration.anonymous_user_token = false
+    open_groups_sharing
+    # Check for a group we do expect first, to make sure the modal's loaded.
+    assert_selector(".modal-container .selectable",
+                    text: group_name("all_users"))
+    assert_no_selector(".modal-container .selectable",
+                       text: group_name("anonymous_group"))
+  end
+
+  test "projects publicly sharable when anonymous browsing enabled" do
+    Rails.configuration.anonymous_user_token = "testonlytoken"
+    open_groups_sharing
+    assert_selector(".modal-container .selectable",
+                    text: group_name("anonymous_group"))
+  end
+
+  test "project owner can manage sharing for another user" do
+    add_user = api_fixture('users')['future_project_user']
+    new_name = ["first_name", "last_name"].map { |k| add_user[k] }.join(" ")
+
+    show_object_using('active', 'groups', 'aproject', 'A Project')
+    click_on "Sharing"
+    add_share_and_check("users", new_name, add_user)
+    modify_share_and_check(new_name)
+  end
+
+  test "project owner can manage sharing for another group" do
+    new_name = api_fixture('groups')['future_project_viewing_group']['name']
+
+    show_object_using('active', 'groups', 'aproject', 'A Project')
+    click_on "Sharing"
+    add_share_and_check("groups", new_name)
+    modify_share_and_check(new_name)
+  end
+
+  test "'share with group' listing does not offer projects" do
+    show_object_using('active', 'groups', 'aproject', 'A Project')
+    click_on "Sharing"
+    click_on "Share with groups"
+    good_uuid = api_fixture("groups")["private"]["uuid"]
+    assert(page.has_selector?(".selectable[data-object-uuid=\"#{good_uuid}\"]"),
+           "'share with groups' listing missing owned user group")
+    bad_uuid = api_fixture("groups")["asubproject"]["uuid"]
+    assert(page.has_no_selector?(".selectable[data-object-uuid=\"#{bad_uuid}\"]"),
+           "'share with groups' listing includes project")
+  end
+
+  [
+    ['Move',api_fixture('collections')['collection_to_move_around_in_aproject'],
+      api_fixture('groups')['aproject'],api_fixture('groups')['asubproject']],
+    ['Remove',api_fixture('collections')['collection_to_move_around_in_aproject'],
+      api_fixture('groups')['aproject']],
+    ['Copy',api_fixture('collections')['collection_to_move_around_in_aproject'],
+      api_fixture('groups')['aproject'],api_fixture('groups')['asubproject']],
+    ['Remove',api_fixture('collections')['collection_in_aproject_with_same_name_as_in_home_project'],
+      api_fixture('groups')['aproject'],nil,true],
+  ].each do |action, my_collection, src, dest=nil, expect_name_change=nil|
+    test "selection #{action} -> #{expect_name_change.inspect} for project" do
+      perform_selection_action src, dest, my_collection, action
+
+      case action
+      when 'Copy'
+        assert page.has_text?(my_collection['name']), 'Collection not found in src project after copy'
+        visit page_with_token 'active', '/'
+        find("#projects-menu").click
+        find(".dropdown-menu a", text: dest['name']).click
+        click_link 'Data collections'
+        assert page.has_text?(my_collection['name']), 'Collection not found in dest project after copy'
+
+      when 'Move'
+        assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after move'
+        visit page_with_token 'active', '/'
+        find("#projects-menu").click
+        find(".dropdown-menu a", text: dest['name']).click
+        click_link 'Data collections'
+        assert page.has_text?(my_collection['name']), 'Collection not found in dest project after move'
+
+      when 'Remove'
+        assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after remove'
+      end
+    end
+  end
+
+  def perform_selection_action src, dest, item, action
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: src['name']).click
+    click_link 'Data collections'
+    assert page.has_text?(item['name']), 'Collection not found in src project'
+
+    within('tr', text: item['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+
+    within('.selection-action-container') do
+      assert page.has_text?("Compare selected"), "Compare selected link text not found"
+      assert page.has_link?("Copy selected"), "Copy selected link not found"
+      assert page.has_link?("Move selected"), "Move selected link not found"
+      assert page.has_link?("Remove selected"), "Remove selected link not found"
+
+      click_link "#{action} selected"
+    end
+
+    # select the destination project if a Copy or Move action is being performed
+    if action == 'Copy' || action == 'Move'
+      within(".modal-container") do
+        find('.selectable', text: dest['name']).click
+        find('.modal-footer a,button', text: action).click
+        wait_for_ajax
+      end
+    end
+  end
+
+  # Test copy action state. It should not be available when a subproject is selected.
+  test "copy action is disabled when a subproject is selected" do
+    my_project = api_fixture('groups')['aproject']
+    my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+    my_subproject = api_fixture('groups')['asubproject']
+
+    # verify that selection options are disabled on the project until an item is selected
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    click_link 'Data collections'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li.disabled', text: 'Remove selected'
+    end
+
+    # select collection and verify links are enabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+    click_link 'Data collections'
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # select subproject and verify that copy action is disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # select subproject and a collection and verify that copy action is still disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_link 'Data collections'
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_link 'Subprojects'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  # When project tabs are switched, only options applicable to the current tab's selections are enabled.
+  test "verify selection options when tabs are switched" do
+    my_project = api_fixture('groups')['aproject']
+    my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+    my_subproject = api_fixture('groups')['asubproject']
+
+    # select subproject and a collection and verify that copy action is still disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    # Select a sub-project
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    # Select a collection
+    click_link 'Data collections'
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    # Go back to Subprojects tab
+    click_link 'Subprojects'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # Close the dropdown by clicking outside it.
+    find('.dropdown-toggle', text: 'Selection').find(:xpath, '..').click
+
+    # Go back to Data collections tab
+    find('.nav-tabs a', text: 'Data collections').click
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  # "Move selected" and "Remove selected" options should not be
+  # available when current user cannot write to the project
+  test "move selected and remove selected actions not available when current user cannot write to project" do
+    my_project = api_fixture('groups')['anonymously_accessible_project']
+    visit page_with_token 'active', "/projects/#{my_project['uuid']}"
+
+    click_link 'Data collections'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Compare selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li', text: 'Move selected'
+      assert_no_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  [
+    ['active', true],
+    ['project_viewer', false],
+  ].each do |user, expect_collection_in_aproject|
+    test "combine selected collections into new collection #{user} #{expect_collection_in_aproject}" do
+      my_project = api_fixture('groups')['aproject']
+      my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+
+      visit page_with_token user, "/projects/#{my_project['uuid']}"
+      click_link 'Data collections'
+      assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+      within('tr', text: my_collection['name']) do
+        find('input[type=checkbox]').click
+      end
+
+      click_button 'Selection'
+      within('.selection-action-container') do
+        click_link 'Create new collection with selected collections'
+      end
+
+      # now in the new collection page
+      if expect_collection_in_aproject
+        assert page.has_text?("Created new collection in the project #{my_project['name']}"),
+                              'Not found flash message that new collection is created in aproject'
+      else
+        assert page.has_text?("Created new collection in your Home project"),
+                              'Not found flash message that new collection is created in Home project'
+      end
+    end
+  end
+
+  def scroll_setup(project_name,
+                   total_nbr_items,
+                   item_list_parameter,
+                   sorted = false,
+                   sort_parameters = nil)
+    project_uuid = api_fixture('groups')[project_name]['uuid']
+    visit page_with_token 'user1_with_load', '/projects/' + project_uuid
+
+    assert(page.has_text?("#{item_list_parameter.humanize} (#{total_nbr_items})"), "Number of #{item_list_parameter.humanize} did not match the input amount")
+
+    click_link item_list_parameter.humanize
+    wait_for_ajax
+
+    if sorted
+      find("th[data-sort-order='#{sort_parameters.gsub(/\s/,'')}']").click
+      wait_for_ajax
+    end
+  end
+
+  def scroll_items_check(nbr_items,
+                         fixture_prefix,
+                         item_list_parameter,
+                         item_selector,
+                         sorted = false)
+    items = []
+    for i in 1..nbr_items
+      items << "#{fixture_prefix}#{i}"
+    end
+
+    verify_items = items.dup
+    unexpected_items = []
+    item_count = 0
+    within(".arv-project-#{item_list_parameter}") do
+      page.execute_script "window.scrollBy(0,999000)"
+      begin
+        wait_for_ajax
+      rescue
+      end
+
+      # Visit all rows. If not all expected items are found, retry
+      found_items = page.all(item_selector)
+      item_count = found_items.count
+
+      previous = nil
+      (0..item_count-1).each do |i|
+        # Found row text using the fixture string e.g. "Show Collection_#{n} "
+        item_name = found_items[i].text.split[1]
+        if !items.include? item_name
+          unexpected_items << item_name
+        else
+          verify_items.delete item_name
+        end
+        if sorted
+          # check sort order
+          assert_operator( previous.downcase, :<=, item_name.downcase) if previous
+          previous = item_name
+        end
+      end
+
+      assert_equal true, unexpected_items.empty?, "Found unexpected #{item_list_parameter.humanize} #{unexpected_items.inspect}"
+      assert_equal nbr_items, item_count, "Found different number of #{item_list_parameter.humanize}"
+      assert_equal true, verify_items.empty?, "Did not find all the #{item_list_parameter.humanize}"
+    end
+  end
+
+  [
+    ['project_with_10_collections', 10],
+    ['project_with_201_collections', 201], # two pages of data
+  ].each do |project_name, nbr_items|
+    test "scroll collections tab for #{project_name} with #{nbr_items} objects" do
+      item_list_parameter = "Data_collections"
+      scroll_setup project_name,
+                   nbr_items,
+                   item_list_parameter
+      scroll_items_check nbr_items,
+                         "Collection_",
+                         item_list_parameter,
+                         'tr[data-kind="arvados#collection"]'
+    end
+  end
+
+  [
+    ['project_with_10_collections', 10],
+    ['project_with_201_collections', 201], # two pages of data
+  ].each do |project_name, nbr_items|
+    test "scroll collections tab for #{project_name} with #{nbr_items} objects with ascending sort (case insensitive)" do
+      item_list_parameter = "Data_collections"
+      scroll_setup project_name,
+                   nbr_items,
+                   item_list_parameter,
+                   true,
+                   "collections.name"
+      scroll_items_check nbr_items,
+                         "Collection_",
+                         item_list_parameter,
+                         'tr[data-kind="arvados#collection"]',
+                         true
+    end
+  end
+
+  [
+    ['project_with_10_pipelines', 10, 0],
+    ['project_with_2_pipelines_and_60_crs', 2, 60],
+    ['project_with_25_pipelines', 25, 0],
+  ].each do |project_name, num_pipelines, num_crs|
+    test "scroll pipeline instances tab for #{project_name} with #{num_pipelines} pipelines and #{num_crs} container requests" do
+      item_list_parameter = "Pipelines_and_processes"
+      scroll_setup project_name,
+                   num_pipelines + num_crs,
+                   item_list_parameter
+      # check the general scrolling and the pipelines
+      scroll_items_check num_pipelines,
+                         "pipeline_",
+                         item_list_parameter,
+                         'tr[data-kind="arvados#pipelineInstance"]'
+      # Check container request count separately
+      crs_found = page.all('tr[data-kind="arvados#containerRequest"]')
+      found_cr_count = crs_found.count
+      assert_equal num_crs, found_cr_count, 'Did not find expected number of container requests'
+    end
+  end
+
+  test "error while loading tab" do
+    original_arvados_v1_base = Rails.configuration.arvados_v1_base
+
+    visit page_with_token 'active', '/projects/' + api_fixture('groups')['aproject']['uuid']
+
+    # Point to a bad api server url to generate error
+    Rails.configuration.arvados_v1_base = "https://[::1]:1/"
+    click_link 'Other objects'
+    within '#Other_objects' do
+      # Error
+      assert_selector('a', text: 'Reload tab')
+
+      # Now point back to the orig api server and reload tab
+      Rails.configuration.arvados_v1_base = original_arvados_v1_base
+      click_link 'Reload tab'
+      assert_no_selector('a', text: 'Reload tab')
+      assert_selector('button', text: 'Selection')
+      within '.selection-action-container' do
+        assert_selector 'tr[data-kind="arvados#trait"]'
+      end
+    end
+  end
+
+  test "add new project using projects dropdown" do
+    visit page_with_token 'active', '/'
+
+    # Add a new project
+    find("#projects-menu").click
+    click_link 'Add a new project'
+    assert_text 'New project'
+    assert_text 'No description provided'
+  end
+
+  test "first tab loads data when visiting other tab directly" do
+    # As of 2014-12-19, the first tab of project#show uses infinite scrolling.
+    # Make sure that it loads data even if we visit another tab directly.
+    need_selenium 'to land on specified tab using {url}#Advanced'
+    user = api_fixture("users", "active")
+    visit(page_with_token("active_trustedclient",
+                          "/projects/#{user['uuid']}#Advanced"))
+    assert_text("API response")
+    find("#page-wrapper .nav-tabs :first-child a").click
+    assert_text("Collection modified at")
+  end
+
+  # "Select all" and "Unselect all" options
+  test "select all and unselect all actions" do
+    need_selenium 'to check and uncheck checkboxes'
+
+    visit page_with_token 'active', '/projects/' + api_fixture('groups')['aproject']['uuid']
+
+    # Go to "Data collections" tab and click on "Select all"
+    click_link 'Data collections'
+    wait_for_ajax
+
+    # Initially, all selection options for this tab should be disabled
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    # Select all
+    click_button 'Select all'
+
+    assert_checkboxes_state('input[type=checkbox]', true, '"select all" should check all checkboxes')
+
+    # Now the selection options should be enabled
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    # Go to Pipelines and processes tab and assert none selected
+    click_link 'Pipelines and processes'
+    wait_for_ajax
+
+    # Since this is the first visit to this tab, all selection options should be disabled
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    assert_checkboxes_state('input[type=checkbox]', false, '"select all" should check all checkboxes')
+
+    # Select all
+    click_button 'Select all'
+    assert_checkboxes_state('input[type=checkbox]', true, '"select all" should check all checkboxes')
+
+    # Applicable selection options should be enabled
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    # Unselect all
+    click_button 'Unselect all'
+    assert_checkboxes_state('input[type=checkbox]', false, '"select all" should check all checkboxes')
+
+    # All selection options should be disabled again
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    # Go back to Data collections tab and verify all are still selected
+    click_link 'Data collections'
+    wait_for_ajax
+
+    # Selection options should be enabled based on the fact that all collections are still selected in this tab
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    assert_checkboxes_state('input[type=checkbox]', true, '"select all" should check all checkboxes')
+
+    # Unselect all
+    find('button#unselect-all').click
+    assert_checkboxes_state('input[type=checkbox]', false, '"unselect all" should clear all checkboxes')
+
+    # Now all selection options should be disabled because none of the collections are checked
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+    end
+
+    # Verify checking just one checkbox still works as expected
+    within('tr', text: api_fixture('collections')['collection_to_move_around_in_aproject']['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+    end
+  end
+
+  test "test search all projects menu item in projects menu" do
+     need_selenium
+     visit page_with_token('active')
+     find('#projects-menu').click
+     within('.dropdown-menu') do
+       assert_selector 'a', text: 'Search all projects'
+       find('a', text: 'Search all projects').click
+     end
+     within('.modal-content') do
+        assert page.has_text?('All projects'), 'No text - All projects'
+        assert page.has_text?('Search'), 'No text - Search'
+        assert page.has_text?('Cancel'), 'No text - Cancel'
+        fill_in "Search", with: 'Unrestricted public data'
+        wait_for_ajax
+        assert_selector 'div', text: 'Unrestricted public data'
+        find(:xpath, '//*[@id="choose-scroll"]/div[2]/div').click
+        click_button 'Show'
+     end
+     assert page.has_text?('Unrestricted public data'), 'No text - Unrestricted public data'
+     assert page.has_text?('An anonymously accessible project'), 'No text - An anonymously accessible project'
+  end
+
+  test "test star and unstar project" do
+    visit page_with_token 'active', "/projects/#{api_fixture('groups')['anonymously_accessible_project']['uuid']}"
+
+    # add to favorites
+    find('.fa-star-o').click
+    wait_for_ajax
+
+    find("#projects-menu").click
+    within('.dropdown-menu') do
+      assert_selector 'li', text: 'Unrestricted public data'
+    end
+
+    # remove from favotires
+    find('.fa-star').click
+    wait_for_ajax
+
+    find("#projects-menu").click
+    within('.dropdown-menu') do
+      assert_no_selector 'li', text: 'Unrestricted public data'
+    end
+  end
+
+  [
+    ['Two Part Pipeline Template', 'part-one', 'Provide a value for the following'],
+    ['Workflow with input specifications', 'this workflow has inputs specified', 'Provide a value for the following'],
+  ].each do |template_name, preview_txt, process_txt|
+    test "run a process using template #{template_name} in a project" do
+      project = api_fixture('groups')['aproject']
+      visit page_with_token 'active', '/projects/' + project['uuid']
+
+      find('.btn', text: 'Run a process').click
+
+      # in the chooser, verify preview and click Next button
+      within('.modal-dialog') do
+        find('.selectable', text: template_name).click
+        assert_text preview_txt
+        find('.btn', text: 'Next: choose inputs').click
+      end
+
+      # in the process page now
+      assert_text process_txt
+      assert_text project['name']
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/report_issue_test.rb b/apps/workbench/test/integration/report_issue_test.rb
new file mode 100644 (file)
index 0000000..dc89868
--- /dev/null
@@ -0,0 +1,107 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class ReportIssueTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+    @user_profile_form_fields = Rails.configuration.user_profile_form_fields
+  end
+
+  teardown do
+    Rails.configuration.user_profile_form_fields = @user_profile_form_fields
+  end
+
+  # test version info and report issue from help menu
+  def check_version_info_and_report_issue_from_help_menu
+    within '.navbar-fixed-top' do
+      find('.help-menu > a').click
+      within '.help-menu .dropdown-menu' do
+        assert page.has_link?('Tutorials and User guide'), 'No link - Tutorials and User guide'
+        assert page.has_link?('API Reference'), 'No link - API Reference'
+        assert page.has_link?('SDK Reference'), 'No link - SDK Reference'
+        assert page.has_link?('Show version / debugging info ...'), 'No link - Show version / debugging info'
+        assert page.has_link?('Report a problem ...'), 'No link - Report a problem'
+
+        # check show version info link
+        click_link 'Show version / debugging info ...'
+      end
+    end
+
+    within '.modal-content' do
+      assert page.has_text?('Version / debugging info'), 'No text - Version / debugging info'
+      assert page.has_no_text?('Report a problem'), 'Found text - Report a problem'
+      assert page.has_no_text?('Describe the problem?'), 'Found text - Describe the problem'
+      assert page.has_button?('Close'), 'No button - Close'
+      assert page.has_no_button?('Send problem report'), 'Found button - Send problem report'
+      history_links = all('a').select do |a|
+        a[:href] =~ %r!^https://arvados.org/projects/arvados/repository/changes\?rev=[0-9a-f]+$!
+      end
+      assert_operator(2, :<=, history_links.count,
+                      "Should have found two links to revision history " +
+                      "in #{history_links.inspect}")
+      click_button 'Close'
+    end
+
+    # check report issue link
+    within '.navbar-fixed-top' do
+      find('.help-menu > a').click
+      find('.help-menu .dropdown-menu a', text: 'Report a problem ...').click
+    end
+
+    within '.modal-content' do
+      assert page.has_text?('Report a problem'), 'No text - Report a problem'
+      assert page.has_no_text?('Version / debugging info'), 'Found text - Version / debugging info'
+      assert page.has_text?('Describe the problem'), 'No text - Describe the problem'
+      assert page.has_no_button?('Close'), 'Found button - Close'
+      assert page.has_text?('Send problem report'), 'Send problem report button text is not found'
+      assert page.has_no_button?('Send problem report'), 'Send problem report button is not disabled before entering problem description'
+      assert page.has_button?('Cancel'), 'No button - Cancel'
+
+      # enter a report text and click on report
+      page.find_field('report_issue_text').set 'my test report text'
+      assert page.has_button?('Send problem report'), 'Send problem report button not enabled after entering text'
+
+      report = mock
+      report.expects(:deliver).returns true
+      IssueReporter.expects(:send_report).returns report
+
+      click_button 'Send problem report'
+
+      # ajax success updated button texts and added footer message
+      assert page.has_no_text?('Send problem report'), 'Found button - Send problem report'
+      assert page.has_no_button?('Cancel'), 'Found button - Cancel'
+      assert page.has_text?('Report sent'), 'No text - Report sent'
+      assert page.has_button?('Close'), 'No text - Close'
+      assert page.has_text?('Thanks for reporting this issue'), 'No text - Thanks for reporting this issue'
+
+      click_button 'Close'
+    end
+  end
+
+  [
+    [nil, nil],
+    ['inactive', api_fixture('users')['inactive']],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited']],
+    ['active', api_fixture('users')['active']],
+    ['admin', api_fixture('users')['admin']],
+    ['active_no_prefs', api_fixture('users')['active_no_prefs']],
+    ['active_no_prefs_profile_no_getting_started_shown',
+        api_fixture('users')['active_no_prefs_profile_no_getting_started_shown']],
+  ].each do |token, user|
+
+    test "check version info and report issue for user #{token}" do
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      check_version_info_and_report_issue_from_help_menu
+    end
+
+  end
+
+end
diff --git a/apps/workbench/test/integration/repositories_browse_test.rb b/apps/workbench/test/integration/repositories_browse_test.rb
new file mode 100644 (file)
index 0000000..e668b8c
--- /dev/null
@@ -0,0 +1,57 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'helpers/repository_stub_helper'
+require 'helpers/share_object_helper'
+
+class RepositoriesTest < ActionDispatch::IntegrationTest
+  include RepositoryStubHelper
+  include ShareObjectHelper
+
+  reset_api_fixtures :after_each_test, false
+
+  setup do
+    need_javascript
+  end
+
+  test "browse repository from jobs#show" do
+    sha1 = api_fixture('jobs')['running']['script_version']
+    _, fakecommit, fakefile =
+      stub_repo_content sha1: sha1, filename: 'crunch_scripts/hash'
+    show_object_using 'active', 'jobs', 'running', sha1
+    click_on api_fixture('jobs')['running']['script']
+    assert_text fakefile
+    click_on 'crunch_scripts'
+    assert_selector 'td a', text: 'hash'
+    click_on 'foo'
+    assert_selector 'td a', text: 'crunch_scripts'
+    click_on sha1
+    assert_text fakecommit
+
+    show_object_using 'active', 'jobs', 'running', sha1
+    click_on 'active/foo'
+    assert_selector 'td a', text: 'crunch_scripts'
+
+    show_object_using 'active', 'jobs', 'running', sha1
+    click_on sha1
+    assert_text fakecommit
+  end
+
+  test "browse using arv-git-http" do
+    repo = api_fixture('repositories')['foo']
+    portfile =
+      File.expand_path('../../../../../tmp/arv-git-httpd-ssl.port', __FILE__)
+    gitsslport = File.read(portfile)
+    Repository.any_instance.
+      stubs(:http_fetch_url).
+      returns "https://localhost:#{gitsslport}/#{repo['name']}.git"
+    commit_sha1 = '1de84a854e2b440dc53bf42f8548afa4c17da332'
+    visit page_with_token('active', "/repositories/#{repo['uuid']}/commit/#{commit_sha1}")
+    assert_text "Date:   Tue Mar 18 15:55:28 2014 -0400"
+    visit page_with_token('active', "/repositories/#{repo['uuid']}/tree/#{commit_sha1}")
+    assert_selector "tbody td a", "foo"
+    assert_text "12 bytes"
+  end
+end
diff --git a/apps/workbench/test/integration/repositories_test.rb b/apps/workbench/test/integration/repositories_test.rb
new file mode 100644 (file)
index 0000000..a7b0baa
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'helpers/share_object_helper'
+
+class RepositoriesTest < ActionDispatch::IntegrationTest
+  include ShareObjectHelper
+
+  setup do
+    need_javascript
+  end
+
+  [
+    'active', #owner
+    'admin'
+  ].each do |user|
+    test "#{user} can manage sharing for another user" do
+      add_user = api_fixture('users')['future_project_user']
+      new_name = ["first_name", "last_name"].map { |k| add_user[k] }.join(" ")
+      show_object_using(user, 'repositories', 'foo',
+                        api_fixture('repositories')['foo']['name'])
+      click_on "Sharing"
+      add_share_and_check("users", new_name, add_user)
+      modify_share_and_check(new_name)
+    end
+  end
+
+  [
+    'active', #owner
+    'admin'
+  ].each do |user|
+    test "#{user} can manage sharing for another group" do
+      new_name = api_fixture('groups')['future_project_viewing_group']['name']
+      show_object_using(user, 'repositories', 'foo',
+                        api_fixture('repositories')['foo']['name'])
+      click_on "Sharing"
+      add_share_and_check("groups", new_name)
+      modify_share_and_check(new_name)
+    end
+  end
+
+  test "spectator does not see repository sharing tab" do
+    show_object_using('spectator', 'repositories', 'arvados',
+                      api_fixture('repositories')['arvados']['name'])
+    assert(page.has_no_link?("Sharing"),
+           "read-only repository user sees sharing tab")
+  end
+end
diff --git a/apps/workbench/test/integration/search_box_test.rb b/apps/workbench/test/integration/search_box_test.rb
new file mode 100644 (file)
index 0000000..1eed158
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class SearchBoxTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # test the search box
+  def verify_search_box user
+    if user && user['is_active']
+      aproject_uuid = api_fixture('groups')['aproject']['uuid']
+      # let's search for aproject by uuid
+      within('.navbar-fixed-top') do
+        page.has_field?('search this site')
+        page.find_field('search this site').set aproject_uuid
+        page.find('.glyphicon-search').click
+      end
+
+      # we should now be in aproject as a result of search
+      assert_selector 'a', text:'Data collections'
+      click_link 'Data collections'
+      assert_selector "#Data_collections[data-object-uuid='#{aproject_uuid}']", "Expected to be in user page after search click"
+
+      # let's search again for an invalid uuid
+      within('.navbar-fixed-top') do
+        search_for = String.new user['uuid']
+        search_for[0]='1'
+        page.find_field('search this site').set search_for
+        page.find('.glyphicon-search').click
+      end
+
+      # we should see 'not found' error page
+      assert page.has_text?('Not Found'), 'No text - Not Found'
+      assert page.has_link?('Report problem'), 'No text - Report problem'
+      click_link 'Report problem'
+      within '.modal-content' do
+        assert page.has_text?('Report a problem'), 'No text - Report a problem'
+        assert page.has_no_text?('Version / debugging info'), 'No text - Version / debugging info'
+        assert page.has_text?('Describe the problem'), 'No text - Describe the problem'
+        assert page.has_text?('Send problem report'), 'Send problem report button text is not found'
+        assert page.has_no_button?('Send problem report'), 'Send problem report button is not disabled before entering problem description'
+        assert page.has_button?('Cancel'), 'No button - Cancel'
+
+        # enter a report text and click on report
+        page.find_field('report_issue_text').set 'my test report text'
+        assert page.has_button?('Send problem report'), 'Send problem report button not enabled after entering text'
+        click_button 'Send problem report'
+
+        # ajax success updated button texts and added footer message
+        assert page.has_no_text?('Send problem report'), 'Found button - Send problem report'
+        assert page.has_no_button?('Cancel'), 'Found button - Cancel'
+        assert page.has_text?('Report sent'), 'No text - Report sent'
+        assert page.has_button?('Close'), 'No text - Close'
+        assert page.has_text?('Thanks for reporting this issue'), 'No text - Thanks for reporting this issue'
+
+        click_button 'Close'
+      end
+
+      # let's search for the anonymously accessible project
+      publicly_accessible_project = api_fixture('groups')['anonymously_accessible_project']
+
+      within('.navbar-fixed-top') do
+        # search again for the anonymously accessible project
+        page.find_field('search this site').set publicly_accessible_project['name'][0,10]
+        page.find('.glyphicon-search').click
+      end
+
+      within '.modal-content' do
+        assert page.has_text?('All projects'), 'No text - All projects'
+        assert page.has_text?('Search'), 'No text - Search'
+        assert page.has_text?('Cancel'), 'No text - Cancel'
+        assert_selector('div', text: publicly_accessible_project['name'])
+        find(:xpath, '//div[./span[contains(.,publicly_accessible_project["uuid"])]]').click
+
+        click_button 'Show'
+      end
+
+      # seeing "Unrestricted public data" now
+      assert page.has_text?(publicly_accessible_project['name']), 'No text - publicly accessible project name'
+      assert page.has_text?(publicly_accessible_project['description']), 'No text - publicly accessible project description'
+    else
+      within('.navbar-fixed-top') do
+        page.has_no_field?('search this site')
+      end
+    end
+  end
+
+  [
+    [nil, nil],
+    ['inactive', api_fixture('users')['inactive']],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited']],
+    ['active', api_fixture('users')['active']],
+    ['admin', api_fixture('users')['admin']],
+  ].each do |token, user|
+
+    test "test search box for user #{token}" do
+      visit page_with_token(token)
+
+      verify_search_box user
+    end
+
+  end
+
+end
diff --git a/apps/workbench/test/integration/smoke_test.rb b/apps/workbench/test/integration/smoke_test.rb
new file mode 100644 (file)
index 0000000..18973db
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+require 'uri'
+
+class SmokeTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  def assert_visit_success(allowed=[200])
+    assert_includes(allowed, status_code,
+                    "#{current_url} returned #{status_code}, not one of " +
+                    allowed.inspect)
+  end
+
+  def all_links_in(find_spec, text_regexp=//)
+    all(find_spec + ' a').collect { |tag|
+      if tag[:href].nil? or tag[:href].empty? or (tag.text !~ text_regexp)
+        nil
+      elsif tag[:'data-remote']
+        # these don't necessarily work with format=html
+        nil
+      else
+        url = URI(tag[:href])
+        url.host.nil? ? url.path : nil
+      end
+    }.compact
+  end
+
+  test "all first-level links succeed" do
+    visit page_with_token('active_trustedclient', '/')
+    assert_visit_success
+    click_link 'notifications-menu'
+    urls = [all_links_in('nav'),
+            all_links_in('.navbar', /^Manage /)].flatten
+    seen_urls = ['/']
+    while not (url = urls.shift).nil?
+      next if seen_urls.include? url
+      visit url
+      seen_urls << url
+      assert_visit_success
+      # Uncommenting the line below lets you crawl the entire site for a
+      # more thorough test.
+      # urls += all_links_in('body')
+    end
+  end
+
+  test "mithril test page" do
+    visit page_with_token('active_trustedclient', '/tests/mithril')
+    assert_visit_success
+    assert_selector 'p', text: 'mithril is working'
+  end
+end
diff --git a/apps/workbench/test/integration/trash_test.rb b/apps/workbench/test/integration/trash_test.rb
new file mode 100644 (file)
index 0000000..22732a3
--- /dev/null
@@ -0,0 +1,169 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class TrashTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  test "trash page" do
+    deleted = api_fixture('collections')['deleted_on_next_sweep']
+    expired1 = api_fixture('collections')['unique_expired_collection']
+    expired2 = api_fixture('collections')['unique_expired_collection2']
+
+    # visit trash page
+    visit page_with_token('active', "/trash")
+
+    assert_text deleted['name']
+    assert_text deleted['uuid']
+    assert_text deleted['portable_data_hash']
+    assert_text expired1['name']
+    assert_no_text expired2['name']   # not readable by this user
+    assert_no_text 'foo_file'         # not trash
+
+    # Un-trash one item using selection dropdown
+    within('tr', text: deleted['name']) do
+      first('input').click
+    end
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Un-trash selected items'
+    end
+
+    wait_for_ajax
+
+    assert_text expired1['name']      # this should still be there
+    assert_no_text deleted['name']    # this should no longer be here
+
+    # Un-trash another item using the recycle button
+    within('tr', text: expired1['name']) do
+      first('.fa-recycle').click
+    end
+
+    wait_for_ajax
+
+    assert_text "The collection with UUID #{expired1['uuid']} is in the trash"
+
+    click_on "Click here to untrash '#{expired1['name']}'"
+
+    # verify that the two un-trashed items are now shown in /collections page
+    visit page_with_token('active', "/collections")
+    assert_text deleted['uuid']
+    assert_text expired1['uuid']
+    assert_no_text expired2['uuid']
+  end
+
+  ["button","selection"].each do |method|
+    test "trashed projects using #{method}" do
+      deleted = api_fixture('groups')['trashed_project']
+      aproject = api_fixture('groups')['aproject']
+
+      # verify that the un-trashed item are missing in /groups page
+      visit page_with_token('active', "/projects/zzzzz-tpzed-xurymjxw79nv3jz")
+      click_on "Subprojects"
+      assert_no_text deleted['name']
+
+      # visit trash page
+      visit page_with_token('active', "/trash")
+      click_on "Trashed projects"
+
+      assert_text deleted['name']
+      assert_text deleted['uuid']
+      assert_no_text aproject['name']
+      assert_no_text aproject['uuid']
+
+      # Un-trash item
+      if method == "button"
+        within('tr', text: deleted['uuid']) do
+          first('.fa-recycle').click
+        end
+        assert_text "The group with UUID #{deleted['uuid']} is in the trash"
+        click_on "Click here to untrash '#{deleted['name']}'"
+      else
+        within('tr', text: deleted['uuid']) do
+          first('input').click
+        end
+        click_button 'Selection...'
+        within('.selection-action-container') do
+          click_link 'Un-trash selected items'
+        end
+        wait_for_ajax
+        assert_no_text deleted['uuid']
+      end
+
+      # check that the un-trashed item are now shown on parent project page
+      visit page_with_token('active', "/projects/zzzzz-tpzed-xurymjxw79nv3jz")
+      click_on "Subprojects"
+      assert_text deleted['name']
+      assert_text aproject['name']
+
+      # Trash another item
+      if method == "button"
+        within('tr', text: aproject['name']) do
+          first('.fa-trash-o').click
+        end
+      else
+        within('tr', text: aproject['name']) do
+          first('input').click
+        end
+        click_button 'Selection'
+        within('.selection-action-container') do
+          click_link 'Remove selected'
+        end
+      end
+
+      wait_for_ajax
+      assert_no_text aproject['name']
+      visit current_path
+      assert_no_text aproject['name']
+
+      # visit trash page
+      visit page_with_token('active', "/trash")
+      click_on "Trashed projects"
+
+      assert_text aproject['name']
+      assert_text aproject['uuid']
+    end
+  end
+
+  test "trash page with search" do
+    deleted = api_fixture('collections')['deleted_on_next_sweep']
+    expired = api_fixture('collections')['unique_expired_collection']
+
+    visit page_with_token('active', "/trash")
+
+    assert_text deleted['name']
+    assert_text deleted['uuid']
+    assert_text deleted['portable_data_hash']
+    assert_text expired['name']
+
+    page.find_field('Search trash').set 'expired'
+
+    assert_no_text deleted['name']
+    assert_text expired['name']
+
+    page.find_field('Search trash').set deleted['portable_data_hash'][0..9]
+
+    assert_no_text expired['name']
+    assert_text deleted['name']
+    assert_text deleted['uuid']
+    assert_text deleted['portable_data_hash']
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Un-trash selected items'
+    end
+
+    first('input').click
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Un-trash selected items'
+      assert_selector 'li.disabled', text: 'Un-trash selected items'
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/user_agreements_test.rb b/apps/workbench/test/integration/user_agreements_test.rb
new file mode 100644 (file)
index 0000000..666e47f
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class UserAgreementsTest < ActionDispatch::IntegrationTest
+
+  setup do
+    need_javascript
+  end
+
+  def continuebutton_selector
+    'input[type=submit][disabled][value=Continue]'
+  end
+
+  test "cannot click continue without ticking checkbox" do
+    visit page_with_token('inactive')
+    assert_selector continuebutton_selector
+  end
+
+  test "continue button is enabled after ticking checkbox" do
+    visit page_with_token('inactive')
+    assert_selector continuebutton_selector
+    find('input[type=checkbox]').click
+    assert_no_selector continuebutton_selector
+    assert_nil(find_button('Continue')[:disabled],
+               'Continue button did not become enabled')
+  end
+
+end
diff --git a/apps/workbench/test/integration/user_profile_test.rb b/apps/workbench/test/integration/user_profile_test.rb
new file mode 100644 (file)
index 0000000..e4d9894
--- /dev/null
@@ -0,0 +1,162 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class UserProfileTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+    @user_profile_form_fields = Rails.configuration.user_profile_form_fields
+  end
+
+  teardown do
+    Rails.configuration.user_profile_form_fields = @user_profile_form_fields
+  end
+
+  def verify_homepage_with_profile user, invited, has_profile
+    profile_config = Rails.configuration.user_profile_form_fields
+
+    if !user
+      assert_text('Please log in')
+    elsif user['is_active']
+      if profile_config && !has_profile
+        assert_text('Save profile')
+        add_profile user
+      else
+        assert_text('Recent pipelines and processes')
+        assert_no_text('Save profile')
+      end
+    elsif invited
+      assert_text('Please check the box below to indicate that you have read and accepted the user agreement')
+      assert_no_text('Save profile')
+    else
+      assert_text('Your account is inactive')
+      assert_no_text('Save profile')
+    end
+
+    # If the user has not already seen getting_started modal, it will be shown on first visit.
+    if user and user['is_active'] and !user['prefs']['getting_started_shown']
+      within '.modal-content' do
+        assert_text 'Getting Started'
+        assert_selector 'button', text: 'Next'
+        assert_selector 'button', text: 'Prev'
+        first('button', text: 'x').click
+      end
+    end
+
+    within('.navbar-fixed-top') do
+      if !user
+        assert page.has_link?('Log in'), 'Not found link - Log in'
+      else
+        # my account menu
+        assert_selector("#notifications-menu")
+        page.find("#notifications-menu").click
+        within('.dropdown-menu') do
+          if user['is_active']
+            assert_no_selector('a', text: 'Not active')
+            assert_no_selector('a', text: 'Sign agreements')
+
+            assert_selector('a', text: 'Virtual machines')
+            assert_selector('a', text: 'Repositories')
+            assert_selector('a', text: 'Current token')
+            assert_selector('a', text: 'SSH keys')
+
+            if profile_config
+              assert_selector('a', text: 'Manage profile')
+            else
+              assert_no_selector('a', text: 'Manage profile')
+            end
+          end
+          assert_selector('a', text: 'Log out')
+        end
+      end
+    end
+  end
+
+  # Check manage profile page and add missing profile to the user
+  def add_profile user
+    assert_no_text('My projects')
+    assert_no_text('Projects shared with me')
+
+    assert_text('Profile')
+    assert_text('First Name')
+    assert_text('Last Name')
+    assert_text('Identity URL')
+    assert_text('E-mail')
+    assert_text(user['email'])
+
+    # Using the default profile which has message and one required field
+
+    # Save profile without filling in the required field. Expect to be back in this profile page again
+    click_button "Save profile"
+    assert_text('Profile')
+    assert_text('First Name')
+    assert_text('Last Name')
+    assert_text('Save profile')
+
+    # This time fill in required field and then save. Expect to go to requested page after that.
+    profile_message = Rails.configuration.user_profile_form_message
+    required_field_title = ''
+    required_field_key = ''
+    profile_config = Rails.configuration.user_profile_form_fields
+    profile_config.each do |entry|
+      if entry['required']
+        required_field_key = entry['key']
+        required_field_title = entry['form_field_title']
+        break
+      end
+    end
+
+    assert page.has_text? profile_message.gsub(/<.*?>/,'')
+    assert_text(required_field_title)
+
+    page.find_field('user[prefs][profile]['+required_field_key+']').set 'value to fill required field'
+
+    click_button "Save profile"
+    # profile saved and in profile page now with success
+    assert_text('Thank you for filling in your profile')
+    assert_selector('input' +
+                    '[name="user[prefs][profile]['+required_field_key+']"]' +
+                    '[value="value to fill required field"]')
+    if user['prefs']['getting_started_shown']
+      click_link 'Back to work!'
+    else
+      click_link 'Get started'
+    end
+
+    # profile saved and in home page now
+    assert_text('Recent pipelines and processes')
+  end
+
+  [
+    [nil, false, false],
+    ['inactive', true, false],
+    ['inactive_uninvited', false, false],
+    ['active', true, true],
+    ['admin', true, true],
+    ['active_no_prefs', true, false],
+    ['active_no_prefs_profile_no_getting_started_shown', true, false],
+    ['active_no_prefs_profile_with_getting_started_shown', true, false],
+  ].each do |token, invited, has_profile|
+    [true, false].each do |profile_required|
+      test "visit #{token} home page when profile is #{'not ' if !profile_required}configured" do
+        if !profile_required
+          Rails.configuration.user_profile_form_fields = false
+        else
+          # Our test config enabled profile by default. So, no need to update config
+        end
+        Rails.configuration.enable_getting_started_popup = true
+
+        if !token
+          visit ('/')
+        else
+          visit page_with_token(token)
+        end
+
+        user = token && api_fixture('users')[token]
+        verify_homepage_with_profile user, invited, has_profile
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/user_settings_menu_test.rb b/apps/workbench/test/integration/user_settings_menu_test.rb
new file mode 100644 (file)
index 0000000..6a0e46e
--- /dev/null
@@ -0,0 +1,236 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class UserSettingsMenuTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # test user settings menu
+  def verify_user_settings_menu user
+    if user['is_active']
+      within('.navbar-fixed-top') do
+        page.find("#notifications-menu").click
+        within('.dropdown-menu') do
+          assert_selector 'a', text: 'Virtual machines'
+          assert_selector 'a', text: 'Repositories'
+          assert_selector 'a', text: 'Current token'
+          assert_selector 'a', text: 'SSH keys'
+          find('a', text: 'SSH keys').click
+        end
+      end
+
+      # now in SSH Keys page
+      assert page.has_text?('Add new SSH key'), 'No text - Add SSH key'
+      add_and_verify_ssh_key
+    else  # inactive user
+      within('.navbar-fixed-top') do
+        page.find("#notifications-menu").click
+        within('.dropdown-menu') do
+          assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+        end
+      end
+    end
+  end
+
+  def add_and_verify_ssh_key
+      click_link 'Add new SSH key'
+
+      within '.modal-content' do
+        assert page.has_text?('Public Key'), 'No text - Public Key'
+        assert page.has_button?('Cancel'), 'No button - Cancel'
+        assert page.has_button?('Submit'), 'No button - Submit'
+
+        page.find_field('public_key').set 'first test with an incorrect ssh key value'
+        click_button 'Submit'
+        assert_text 'Public key does not appear to be a valid ssh-rsa or dsa public key'
+
+        public_key_str = api_fixture('authorized_keys')['active']['public_key']
+        page.find_field('public_key').set public_key_str
+        page.find_field('name').set 'added_in_test'
+        click_button 'Submit'
+        assert_text 'Public key already exists in the database, use a different key.'
+
+        new_key = SSHKey.generate
+        page.find_field('public_key').set new_key.ssh_public_key
+        page.find_field('name').set 'added_in_test'
+        click_button 'Submit'
+      end
+
+      # key must be added. look for it in the refreshed page
+      assert_text 'added_in_test'
+  end
+
+  [
+    ['inactive', api_fixture('users')['inactive']],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited']],
+    ['active', api_fixture('users')['active']],
+    ['admin', api_fixture('users')['admin']],
+  ].each do |token, user|
+    test "test user settings menu for user #{token}" do
+      visit page_with_token(token)
+      verify_user_settings_menu user
+    end
+  end
+
+  test "pipeline notification shown even though public pipelines exist" do
+    skip "created_by doesn't work that way"
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    visit page_with_token 'job_reader'
+    click_link 'notifications-menu'
+    assert_selector 'a', text: 'Click here to learn how to run an Arvados Crunch pipeline'
+  end
+
+  [
+    ['job_reader', :ssh, :pipeline],
+    ['active'],
+  ].each do |user, *expect|
+    test "user settings menu for #{user} with notifications #{expect.inspect}" do
+      Rails.configuration.anonymous_user_token = false
+      visit page_with_token(user)
+      click_link 'notifications-menu'
+      if expect.include? :ssh
+        assert_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        click_link('Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Add new SSH key')
+
+        add_and_verify_ssh_key
+
+        # No more SSH notification
+        click_link 'notifications-menu'
+        assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+      else
+        assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_no_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      end
+
+      if expect.include? :pipeline
+        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      end
+    end
+  end
+
+  test "verify repositories for active user" do
+    visit page_with_token('active',"/repositories")
+
+    repos = [[api_fixture('repositories')['foo'], true],
+             [api_fixture('repositories')['repository3'], false],
+             [api_fixture('repositories')['repository4'], false],
+             [api_fixture('repositories')['arvados'], false]]
+
+    repos.each do |(repo, owned)|
+      within('tr', text: repo['name']+'.git') do
+        assert_text repo['name']
+        assert_selector 'a', text:'Show'
+        if owned
+          assert_not_nil first('.glyphicon-trash')
+        else
+          assert_nil first('.glyphicon-trash')
+        end
+      end
+    end
+  end
+
+  test "request shell access" do
+    ActionMailer::Base.deliveries = []
+    visit page_with_token('spectator', "/users/#{api_fixture('users')['spectator']['uuid']}/virtual_machines")
+    assert_text 'You do not have access to any virtual machines'
+    click_link 'Send request for shell access'
+
+    # Button text changes to "sending...", then back to normal. In the
+    # test suite we can't depend on confirming the "sending..." state
+    # before it goes back to normal, though.
+    ## assert_selector 'a', text: 'Sending request...'
+    assert_selector 'a', text: 'Send request for shell access'
+    assert_text 'A request for shell access was sent'
+
+    # verify that the email was sent
+    user = api_fixture('users')['spectator']
+    full_name = "#{user['first_name']} #{user['last_name']}"
+    expected = "Shell account request from #{full_name} (#{user['email']}, #{user['uuid']})"
+    found_email = 0
+    ActionMailer::Base.deliveries.each do |email|
+      if email.subject.include?(expected)
+        found_email += 1
+      end
+    end
+    assert_equal 1, found_email, "Expected email after requesting shell access"
+
+    # Revisit the page and verify the request sent message along with
+    # the request button.
+    within('.navbar-fixed-top') do
+      page.find("#notifications-menu").click
+      within('.dropdown-menu') do
+        find('a', text: 'Virtual machines').click
+      end
+    end
+    assert_text 'You do not have access to any virtual machines.'
+    assert_text 'A request for shell access was sent on '
+    assert_selector 'a', text: 'Send request for shell access'
+  end
+
+  test "create new repository" do
+    visit page_with_token("active_trustedclient")
+    within('.navbar-fixed-top') do
+      page.find("#notifications-menu").click
+      within('.dropdown-menu') do
+        assert_selector 'a', text: 'Repositories'
+        find('a', text: 'Repositories').click
+      end
+    end
+    click_on "Add new repository"
+    within ".modal-dialog" do
+      fill_in "Name", with: "workbenchtest"
+      click_on "Create"
+    end
+    assert_text ":active/workbenchtest.git"
+    assert_match /git@git.*:active\/workbenchtest.git/, page.text
+    assert_match /https:\/\/git.*\/active\/workbenchtest.git/, page.text
+  end
+
+  [
+    ['virtual_machines', nil, 'Host name', 'testvm2.shell'],
+    ['/repositories', 'Add new repository', 'It may take a minute or two before you can clone your new repository.', 'active/foo'],
+    ['/current_token', nil, 'HISTIGNORE=$HISTIGNORE', 'ARVADOS_API_TOKEN=3kg6k6lzmp9kj5'],
+    ['ssh_keys', 'Add new SSH key', 'Click here to learn about SSH keys in Arvados.', 'active'],
+  ].each do |page_name, button_name, look_for, content|
+    test "test user settings menu for page #{page_name}" do
+      if page_name == '/current_token' || page_name == '/repositories'
+        visit page_with_token('active', page_name)
+      else
+        visit page_with_token('active', "/users/#{api_fixture('users')['active']['uuid']}/#{page_name}")
+      end
+
+      assert page.has_text? content
+      if button_name
+        assert_selector 'a', text: button_name
+        find('a', text: button_name).click
+      end
+
+      assert page.has_text? look_for
+    end
+  end
+
+  [
+    ['virtual_machines', 'You do not have access to any virtual machines.'],
+    ['/repositories', api_fixture('repositories')['arvados']['name']],
+    ['/current_token', 'HISTIGNORE=$HISTIGNORE'],
+    ['ssh_keys', 'You have not yet set up an SSH public key for use with Arvados.'],
+  ].each do |page_name, look_for|
+    test "test user settings menu for page #{page_name} when page is empty" do
+      if page_name == '/current_token' || page_name == '/repositories'
+        visit page_with_token('user1_with_load', page_name)
+      else
+        visit page_with_token('admin', "/users/#{api_fixture('users')['user1_with_load']['uuid']}/#{page_name}")
+      end
+
+      assert page.has_text? look_for
+      if page_name == '/repositories'
+        assert_equal 1, page.all('a[data-original-title="show repository"]').count
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/users_test.rb b/apps/workbench/test/integration/users_test.rb
new file mode 100644 (file)
index 0000000..bad01a1
--- /dev/null
@@ -0,0 +1,227 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class UsersTest < ActionDispatch::IntegrationTest
+
+  test "login as active user but not admin" do
+    need_javascript
+    visit page_with_token('active_trustedclient')
+
+    assert page.has_no_link? 'Users' 'Found Users link for non-admin user'
+  end
+
+  test "login as admin user and verify active user data" do
+    need_javascript
+    visit page_with_token('admin_trustedclient')
+
+    # go to Users list page
+    find('#system-menu').click
+    click_link 'Users'
+
+    # check active user attributes in the list page
+    page.within(:xpath, '//tr[@data-object-uuid="zzzzz-tpzed-xurymjxw79nv3jz"]') do
+      assert (text.include? 'true false'), 'Expected is_active'
+    end
+
+    find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+      find('a', text: 'Show').
+      click
+    assert page.has_text? 'Attributes'
+    assert page.has_text? 'Advanced'
+    assert page.has_text? 'Admin'
+
+    # go to the Attributes tab
+    click_link 'Attributes'
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "true", text, "Expected user's is_active to be true"
+    end
+    page.within(:xpath, '//span[@data-name="is_admin"]') do
+      assert_equal "false", text, "Expected user's is_admin to be false"
+    end
+
+  end
+
+  test "create a new user" do
+    need_javascript
+
+    visit page_with_token('admin_trustedclient')
+
+    find('#system-menu').click
+    click_link 'Users'
+
+    assert page.has_text? 'zzzzz-tpzed-d9tiejq69daie8f'
+
+    click_link 'Add a new user'
+
+    within '.modal-content' do
+      find 'label', text: 'Virtual Machine'
+      fill_in "email", :with => "foo@example.com"
+      click_button "Submit"
+      wait_for_ajax
+    end
+
+    visit '/users'
+
+    # verify that the new user showed up in the users page and find
+    # the new user's UUID
+    new_user_uuid =
+      find('tr[data-object-uuid]', text: 'foo@example.com')['data-object-uuid']
+    assert new_user_uuid, "Expected new user uuid not found"
+
+    # go to the new user's page
+    find('tr', text: new_user_uuid).
+      find('a', text: 'Show').
+      click
+
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "false", text, "Expected new user's is_active to be false"
+    end
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'can_login' # make sure page is rendered / ready
+    assert page.has_no_text? 'VirtualMachine:'
+  end
+
+  test "setup the active user" do
+    need_javascript
+    visit page_with_token('admin_trustedclient')
+
+    find('#system-menu').click
+    click_link 'Users'
+
+    # click on active user
+    find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+      find('a', text: 'Show').
+      click
+    user_url = page.current_url
+
+    # Setup user
+    click_link 'Admin'
+    assert page.has_text? 'As an admin, you can setup'
+
+    click_link 'Setup shell account for Active User'
+
+    within '.modal-content' do
+      find 'label', text: 'Virtual Machine'
+      click_button "Submit"
+    end
+
+    visit user_url
+    assert page.has_text? 'modified_by_client_uuid'
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    vm_links = all("a", text: "VirtualMachine:")
+    assert_equal(1, vm_links.size)
+    assert_equal("VirtualMachine: testvm2.shell", vm_links.first.text)
+
+    # Click on Setup button again and this time also choose a VM
+    click_link 'Admin'
+    click_link 'Setup shell account for Active User'
+
+    within '.modal-content' do
+      select("testvm.shell", :from => 'vm_uuid')
+      fill_in "groups", :with => "test group one, test-group-two"
+      click_button "Submit"
+    end
+
+    visit user_url
+    find '#Attributes', text: 'modified_by_client_uuid'
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'VirtualMachine: testvm.shell'
+    assert page.has_text? '["test group one", "test-group-two"]'
+  end
+
+  test "unsetup active user" do
+    need_javascript
+
+    visit page_with_token('admin_trustedclient')
+
+    find('#system-menu').click
+    click_link 'Users'
+
+    # click on active user
+    find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+      find('a', text: 'Show').
+      click
+    user_url = page.current_url
+
+    # Verify that is_active is set
+    find('a,button', text: 'Attributes').click
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "true", text, "Expected user's is_active to be true"
+    end
+
+    # go to Admin tab
+    click_link 'Admin'
+    assert page.has_text? 'As an admin, you can deactivate and reset this user'
+
+    # unsetup user and verify all the above links are deleted
+    click_link 'Admin'
+    click_button 'Deactivate Active User'
+
+    if Capybara.current_driver == :selenium
+      sleep(0.1)
+      page.driver.browser.switch_to.alert.accept
+    else
+      # poltergeist returns true for confirm(), so we don't need to accept.
+    end
+
+    # Should now be back in the Attributes tab for the user
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "false", text, "Expected user's is_active to be false after unsetup"
+    end
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_no_text? 'VirtualMachine: testvm.shell'
+
+    # setup user again and verify links present
+    click_link 'Admin'
+    click_link 'Setup shell account for Active User'
+
+    within '.modal-content' do
+      select("testvm.shell", :from => 'vm_uuid')
+      click_button "Submit"
+    end
+
+    visit user_url
+    assert page.has_text? 'modified_by_client_uuid'
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'VirtualMachine: testvm.shell'
+  end
+
+  test "test add group button" do
+    need_javascript
+
+    user_url = "/users/#{api_fixture('users')['active']['uuid']}"
+    visit page_with_token('admin_trustedclient', user_url)
+
+    # Setup user
+    click_link 'Admin'
+    assert page.has_text? 'As an admin, you can setup'
+
+    click_link 'Add new group'
+
+    within '.modal-content' do
+      fill_in "group_name_input", :with => "test-group-added-in-modal"
+      click_button "Create"
+    end
+    wait_for_ajax
+
+    # Back in the user "Admin" tab
+    assert page.has_text? 'test-group-added-in-modal'
+  end
+end
diff --git a/apps/workbench/test/integration/virtual_machines_test.rb b/apps/workbench/test/integration/virtual_machines_test.rb
new file mode 100644 (file)
index 0000000..a13abf9
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class VirtualMachinesTest < ActionDispatch::IntegrationTest
+end
diff --git a/apps/workbench/test/integration/websockets_test.rb b/apps/workbench/test/integration/websockets_test.rb
new file mode 100644 (file)
index 0000000..e377da3
--- /dev/null
@@ -0,0 +1,260 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+class WebsocketTest < ActionDispatch::IntegrationTest
+  setup do
+    need_selenium "to make websockets work"
+    @dispatch_client = ArvadosApiClient.new
+  end
+
+  def dispatch_log(body)
+    use_token :dispatch1 do
+      @dispatch_client.api('logs', '', log: body)
+    end
+  end
+
+  test "test page" do
+    visit(page_with_token("active", "/websockets"))
+    fill_in("websocket-message-content", :with => "Stuff")
+    click_button("Send")
+    assert_text '"status":400'
+  end
+
+  [
+   ['pipeline_instances', 'pipeline_in_running_state', api_fixture('jobs')['running']],
+   ['jobs', 'running'],
+   ['containers', 'running'],
+   ['container_requests', 'running', api_fixture('containers')['running']],
+  ].each do |controller, view_fixture_name, log_target_fixture|
+    view_fixture = api_fixture(controller)[view_fixture_name]
+    log_target_fixture ||= view_fixture
+
+    test "test live logging and scrolling for #{controller}" do
+
+      visit(page_with_token("active", "/#{controller}/#{view_fixture['uuid']}\#Log"))
+      assert_no_text '123 hello'
+
+      text = ""
+      (1..1000).each do |i|
+        text << "#{i} hello\n"
+      end
+
+      dispatch_log(owner_uuid: log_target_fixture['owner_uuid'],
+                   object_uuid: log_target_fixture['uuid'],
+                   event_type: "stderr",
+                   properties: {"text" => text})
+      assert_text '1000 hello'
+
+      # First test that when we're already at the bottom of the page, it scrolls down
+      # when a new line is added.
+      old_top = page.evaluate_script("$('#event_log_div').scrollTop()")
+
+      dispatch_log(owner_uuid: log_target_fixture['owner_uuid'],
+                   object_uuid: log_target_fixture['uuid'],
+                   event_type: "dispatch",
+                   properties: {"text" => "1001 hello\n"})
+      assert_text '1001 hello'
+
+      # Check that new value of scrollTop is greater than the old one
+      new_top = page.evaluate_script("$('#event_log_div').scrollTop()")
+      assert_operator new_top, :>, old_top
+
+      # Now scroll to 30 pixels from the top
+      page.execute_script "$('#event_log_div').scrollTop(30)"
+      assert_equal 30, page.evaluate_script("$('#event_log_div').scrollTop()")
+
+      dispatch_log(owner_uuid: log_target_fixture['owner_uuid'],
+                   object_uuid: log_target_fixture['uuid'],
+                   event_type: "stdout",
+                   properties: {"text" => "1002 hello\n"})
+      assert_text '1002 hello'
+
+      # Check that we haven't changed scroll position
+      assert_equal 30, page.evaluate_script("$('#event_log_div').scrollTop()")
+    end
+  end
+
+  test "pipeline instance arv-refresh-on-log-event" do
+    # Do something and check that the pane reloads.
+    p = use_token :active do
+      PipelineInstance.create(state: "RunningOnServer",
+                              components: {
+                                c1: {
+                                  script: "test_hash.py",
+                                  script_version: "1de84a854e2b440dc53bf42f8548afa4c17da332"
+                                }
+                              })
+    end
+    visit(page_with_token("active", "/pipeline_instances/#{p.uuid}"))
+
+    assert_text 'Active'
+    assert page.has_link? 'Pause'
+    assert_no_text 'Complete'
+    assert page.has_no_link? 'Re-run with latest'
+
+    use_token :dispatch1 do
+      p.update_attributes!(state: 'Complete')
+    end
+
+    assert_no_text 'Active'
+    assert page.has_no_link? 'Pause'
+    assert_text 'Complete'
+    assert page.has_link? 'Re-run with latest'
+  end
+
+  test "job arv-refresh-on-log-event" do
+    # Do something and check that the pane reloads.
+    uuid = api_fixture('jobs')['running_will_be_completed']['uuid']
+    visit(page_with_token("active", "/jobs/#{uuid}"))
+
+    assert_no_text 'complete'
+    assert_no_text 'Re-run job'
+
+    use_token :dispatch1 do
+      Job.find(uuid).update_attributes!(state: 'Complete')
+    end
+
+    assert_text 'complete'
+    assert_text 'Re-run job'
+  end
+
+  test "dashboard arv-refresh-on-log-event" do
+    visit(page_with_token("active", "/"))
+
+    assert_no_text 'test dashboard arv-refresh-on-log-event'
+
+    # Do something and check that the pane reloads.
+    use_token :active do
+      p = PipelineInstance.create({state: "RunningOnServer",
+                                    name: "test dashboard arv-refresh-on-log-event",
+                                    components: {
+                                    }
+                                  })
+    end
+
+    assert_text 'test dashboard arv-refresh-on-log-event'
+  end
+
+  test 'job graph appears when first data point is already in logs table' do
+    job_graph_first_datapoint_test
+  end
+
+  test 'job graph appears when first data point arrives by websocket' do
+    use_token :admin do
+      Log.find(api_fixture('logs')['crunchstat_for_running_job']['uuid']).destroy
+    end
+    job_graph_first_datapoint_test expect_existing_datapoints: false
+  end
+
+  def job_graph_first_datapoint_test expect_existing_datapoints: true
+    uuid = api_fixture('jobs')['running']['uuid']
+
+    visit page_with_token "active", "/jobs/#{uuid}"
+    click_link "Log"
+
+    assert_selector '#event_log_div', visible: true
+
+    if expect_existing_datapoints
+      assert_selector '#log_graph_div', visible: true
+      # Magic numbers 12.99 etc come from the job log fixture:
+      assert_last_datapoint 'T1-cpu', (((12.99+0.99)/10.0002)/8)
+    else
+      # Until graphable data arrives, we should see the text log but not the graph.
+      assert_no_selector '#log_graph_div', visible: true
+    end
+
+    text = "2014-11-07_23:33:51 #{uuid} 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys"
+
+    assert_triggers_dom_event 'arv-log-event' do
+      dispatch_log(owner_uuid: api_fixture('jobs')['running']['owner_uuid'],
+                   object_uuid: uuid,
+                   event_type: "stderr",
+                   properties: {"text" => text})
+    end
+
+    # Graph should have appeared (even if it hadn't above). It's
+    # important not to wait like matchers usually do: we are
+    # confirming the graph is visible _immediately_ after the first
+    # data point arrives.
+    using_wait_time 0 do
+      assert_selector '#log_graph_div', visible: true
+    end
+    assert_last_datapoint 'T1-cpu', (((35.39+0.86)/10.0002)/8)
+  end
+
+  test "live log charting from replayed log" do
+    uuid = api_fixture("jobs")['running']['uuid']
+
+    visit page_with_token "active", "/jobs/#{uuid}"
+    click_link "Log"
+
+    assert_triggers_dom_event 'arv-log-event' do
+      ApiServerForTests.new.run_rake_task("replay_job_log", "test/job_logs/crunchstatshort.log,1.0,#{uuid}")
+    end
+
+    assert_last_datapoint 'T1-cpu', (((35.39+0.86)/10.0002)/8)
+  end
+
+  def assert_last_datapoint series, value
+    datum = page.evaluate_script("jobGraphData[jobGraphData.length-1]['#{series}']")
+    assert_in_epsilon value, datum.to_f
+  end
+
+  test "test running job with just a few previous log records" do
+    job = api_fixture("jobs")['running']
+
+    # Create just one old log record
+    dispatch_log(owner_uuid: job['owner_uuid'],
+                 object_uuid: job['uuid'],
+                 event_type: "stderr",
+                 properties: {"text" => "Historic log message"})
+
+    visit page_with_token("active", "/jobs/#{job['uuid']}\#Log")
+
+    # Expect "all" historic log records because we have less than
+    # default Rails.configuration.running_job_log_records_to_fetch count
+    assert_text 'Historic log message'
+
+    # Create new log record and expect it to show up in log tab
+    dispatch_log(owner_uuid: job['owner_uuid'],
+                 object_uuid: job['uuid'],
+                 event_type: "stderr",
+                 properties: {"text" => "Log message after subscription"})
+    assert_text 'Log message after subscription'
+  end
+
+  test "test running job with too many previous log records" do
+    max = 5
+    Rails.configuration.running_job_log_records_to_fetch = max
+    job = api_fixture("jobs")['running']
+
+    # Create max+1 log records
+    (0..max).each do |count|
+      dispatch_log(owner_uuid: job['owner_uuid'],
+                   object_uuid: job['uuid'],
+                   event_type: "stderr",
+                   properties: {"text" => "Old log message #{count}"})
+    end
+
+    visit page_with_token("active", "/jobs/#{job['uuid']}\#Log")
+
+    # Expect all but the first historic log records,
+    # because that was one too many than fetch count.
+    (1..max).each do |count|
+      assert_text "Old log message #{count}"
+    end
+    assert_no_text 'Old log message 0'
+
+    # Create one more log record after subscription
+    dispatch_log(owner_uuid: job['owner_uuid'],
+                 object_uuid: job['uuid'],
+                 event_type: "stderr",
+                 properties: {"text" => "Life goes on!"})
+
+    # Expect it to show up in log tab
+    assert_text 'Life goes on!'
+  end
+end
diff --git a/apps/workbench/test/integration/work_units_test.rb b/apps/workbench/test/integration/work_units_test.rb
new file mode 100644 (file)
index 0000000..e5cc6e4
--- /dev/null
@@ -0,0 +1,316 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'helpers/fake_websocket_helper'
+require 'integration_helper'
+
+class WorkUnitsTest < ActionDispatch::IntegrationTest
+  include FakeWebsocketHelper
+
+  setup do
+    need_javascript
+  end
+
+  [[true, 25, 100,
+    ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3',
+     '/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+     '/jobs/zzzzz-8i9sb-grx15v5mjnsyxk7',
+     '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+     '/container_requests/zzzzz-xvhdp-cr4completedcr2',
+     '/container_requests/zzzzz-xvhdp-cr4requestercn2'],
+    ['/pipeline_instances/zzzzz-d1hrv-scarxiyajtshq3l',
+     '/container_requests/zzzzz-xvhdp-oneof60crs00001']],
+   [false, 25, 100,
+    ['/pipeline_instances/zzzzz-d1hrv-1yfj61234abcdk3',
+     '/pipeline_instances/zzzzz-d1hrv-jobspeccomponts',
+     '/container_requests/zzzzz-xvhdp-cr4completedcr2'],
+    ['/pipeline_instances/zzzzz-d1hrv-scarxiyajtshq3l',
+     '/container_requests/zzzzz-xvhdp-oneof60crs00001',
+     '/jobs/zzzzz-8i9sb-grx15v5mjnsyxk7',
+     '/jobs/zzzzz-8i9sb-n7omg50bvt0m1nf',
+     '/container_requests/zzzzz-xvhdp-cr4requestercn2'
+    ]]
+  ].each do |show_children, expected_min, expected_max, expected, not_expected|
+    test "scroll all_processes page with show_children=#{show_children}" do
+      visit page_with_token('active', "/all_processes")
+
+      if show_children
+        find('#IncludeChildProcs').click
+        wait_for_ajax
+      end
+
+      page_scrolls = expected_max/20 + 2
+      within('.arv-recent-all-processes') do
+        (0..page_scrolls).each do |i|
+          page.driver.scroll_to 0, 999000
+          begin
+            wait_for_ajax
+          rescue
+          end
+        end
+      end
+
+      # Verify that expected number of processes are found
+      found_items = page.all('tr[data-object-uuid]')
+      found_count = found_items.count
+      if expected_min == expected_max
+        assert_equal(true, found_count == expected_min,
+                     "Not found expected number of items. Expected #{expected_min} and found #{found_count}")
+        assert page.has_no_text? 'request failed'
+      else
+        assert_equal(true, found_count>=expected_min,
+                     "Found too few items. Expected at least #{expected_min} and found #{found_count}")
+        assert_equal(true, found_count<=expected_max,
+                     "Found too many items. Expected at most #{expected_max} and found #{found_count}")
+      end
+
+      # verify that all expected uuid links are found
+      expected.each do |link|
+        assert_selector "a[href=\"#{link}\"]"
+      end
+
+      # verify that none of the not_expected uuid links are found
+      not_expected.each do |link|
+        assert_no_selector "a[href=\"#{link}\"]"
+      end
+    end
+  end
+
+  [
+    ['jobs', 'running_job_with_components', true, true],
+    ['pipeline_instances', 'components_is_jobspec', true, true],
+    ['containers', 'running', false],
+    ['container_requests', 'running', true],
+  ].each do |type, fixture, cancelable, confirm_cancellation|
+    test "cancel button for #{type}/#{fixture}" do
+      if cancelable
+        need_selenium 'to cancel'
+      end
+
+      obj = api_fixture(type)[fixture]
+      visit page_with_token "active", "/#{type}/#{obj['uuid']}"
+
+      assert_text 'created_at'
+      if cancelable
+        assert_text 'priority: 501' if type.include?('container')
+        if type.include?('pipeline')
+          assert_selector 'a', text: 'Pause'
+          first('a,link', text: 'Pause').click
+        else
+          assert_selector 'button', text: 'Cancel'
+          first('a,button', text: 'Cancel').click
+        end
+        if confirm_cancellation
+          alert = page.driver.browser.switch_to.alert
+          alert.accept
+        end
+        wait_for_ajax
+      end
+
+      if type.include?('pipeline')
+        assert_selector 'a', text: 'Resume'
+        assert_no_selector 'a', text: 'Pause'
+      elsif type.include?('job')
+        assert_text 'Cancelled'
+        assert_text 'Paused'  # this job has a pipeline child which was also cancelled
+        assert_no_selector 'button', text: 'Cancel'
+      elsif cancelable
+        assert_text 'priority: 0'
+      end
+    end
+  end
+
+  [
+    ['jobs', 'running_job_with_components'],
+    ['pipeline_instances', 'has_component_with_completed_jobs'],
+    ['container_requests', 'running'],
+    ['container_requests', 'completed'],
+  ].each do |type, fixture|
+    test "edit description for #{type}/#{fixture}" do
+      obj = api_fixture(type)[fixture]
+      visit page_with_token "active", "/#{type}/#{obj['uuid']}"
+
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('*Textile description for object*')
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+
+      # verify description
+      assert page.has_no_text? '*Textile description for object*'
+      assert page.has_text? 'Textile description for object'
+    end
+  end
+
+  [
+    ['Pipeline with default input specifications', 'part-one', 'Provide values for the following'],
+    ['Workflow with default input specifications', 'this workflow has inputs specified', 'Provide a value for the following'],
+  ].each do |template_name, preview_txt, process_txt|
+    test "run a process using template #{template_name} from dashboard" do
+      visit page_with_token('admin')
+      assert_text 'Recent pipelines and processes' # seeing dashboard now
+
+      within('.recent-processes-actions') do
+        assert page.has_link?('All processes')
+        find('a', text: 'Run a process').click
+      end
+
+      # in the chooser, verify preview and click Next button
+      within('.modal-dialog') do
+        find('.selectable', text: template_name).click
+        assert_text preview_txt
+        find('.btn', text: 'Next: choose inputs').click
+      end
+
+      # in the process page now
+      assert_text process_txt
+      assert_selector 'a', text: template_name
+
+      assert_equal "Set value for ex_string_def", find('div.form-group > div > p.form-control-static > a', text: "hello-testing-123")[:"data-title"]
+
+      page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+    end
+  end
+
+  test 'display container state changes in Container Request live log' do
+    use_fake_websocket_driver
+    c = api_fixture('containers')['queued']
+    cr = api_fixture('container_requests')['queued']
+    visit page_with_token('active', '/container_requests/'+cr['uuid'])
+    click_link('Log')
+
+    # The attrs of the "terminal window" text div in the log tab
+    # indicates which objects' events are worth displaying. Events
+    # that arrive too early (before that div exists) are not
+    # shown. For the user's sake, these early logs should also be
+    # retrieved and shown one way or another -- but in this particular
+    # test, we are only interested in logs that arrive by
+    # websocket. Therefore, to avoid races, we wait for the log tab to
+    # display before sending any events.
+    assert_text 'Recent logs'
+
+    [[{
+        event_type: 'dispatch',
+        properties: {
+          text: "dispatch logged a fake message\n",
+        },
+      }, "dispatch logged"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Locked'},
+          new_attributes: {state: 'Queued'},
+        },
+      }, "Container #{c['uuid']} was returned to the queue"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Queued'},
+          new_attributes: {state: 'Locked'},
+        },
+      }, "Container #{c['uuid']} was taken from the queue by a dispatch process"],
+     [{
+        event_type: 'crunch-run',
+        properties: {
+          text: "according to fake crunch-run,\nsome setup stuff happened on the compute node\n",
+        },
+      }, "setup stuff happened"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Locked'},
+          new_attributes: {state: 'Running'},
+        },
+      }, "Container #{c['uuid']} started"],
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Running'},
+          new_attributes: {state: 'Complete', exit_code: 1},
+        },
+      }, "Container #{c['uuid']} finished"],
+     # It's unrealistic for state to change again once it's Complete,
+     # but the logging code doesn't care, so we do it to keep the test
+     # simple.
+     [{
+        event_type: 'update',
+        properties: {
+          old_attributes: {state: 'Running'},
+          new_attributes: {state: 'Cancelled'},
+        },
+      }, "Container #{c['uuid']} was cancelled"],
+    ].each do |send_event, expect_log_text|
+      assert_no_text(expect_log_text)
+      fake_websocket_event(send_event.merge(object_uuid: c['uuid']))
+      assert_text(expect_log_text)
+    end
+  end
+
+  [
+    ['jobs', 'active', 'running_job_with_components', 'component1', '/jobs/zzzzz-8i9sb-jyq01m7in1jlofj#Log'],
+    ['pipeline_instances', 'active', 'pipeline_in_running_state', 'foo', '/jobs/zzzzz-8i9sb-pshmckwoma9plh7#Log'],
+    ['pipeline_instances', nil, 'pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', 'foo', 'Log unavailable'],
+  ].each do |type, token, fixture, child, log_link|
+    test "link_to_log for #{fixture} for #{token}" do
+      obj = api_fixture(type)[fixture]
+      if token
+        visit page_with_token token, "/#{type}/#{obj['uuid']}"
+      else
+        Rails.configuration.anonymous_user_token =
+          api_fixture("api_client_authorizations", "anonymous", "api_token")
+        visit "/#{type}/#{obj['uuid']}"
+      end
+
+      click_link(child)
+
+      if token
+        assert_selector "a[href=\"#{log_link}\"]"
+      else
+        assert_text log_link
+      end
+    end
+  end
+
+  test 'Run from workflows index page' do
+    visit page_with_token('active', '/workflows')
+
+    wf_count = page.all('a[data-original-title="show workflow"]').count
+    assert_equal true, wf_count>0
+
+    # Run one of the workflows
+    wf_name = 'Workflow with input specifications'
+    within('tr', text: wf_name) do
+      find('a,button', text: 'Run').click
+    end
+
+    # Choose project for the container_request being created
+    within('.modal-dialog') do
+      find('.selectable', text: 'A Project').click
+      find('button', text: 'Choose').click
+    end
+
+    # In newly created container_request page now
+    assert_text 'A Project' # CR created in "A Project"
+    assert_text "This container request was created from the workflow #{wf_name}"
+    assert_match /Provide a value for .* then click the \"Run\" button to start the workflow/, page.text
+  end
+
+  test 'Run workflow from show page' do
+    visit page_with_token('active', '/workflows/zzzzz-7fd4e-validwithinputs')
+
+    find('a,button', text: 'Run this workflow').click
+
+    # Choose project for the container_request being created
+    within('.modal-dialog') do
+      find('.selectable', text: 'A Project').click
+      find('button', text: 'Choose').click
+    end
+
+    # In newly created container_request page now
+    assert_text 'A Project' # CR created in "A Project"
+    assert_text "This container request was created from the workflow"
+    assert_match /Provide a value for .* then click the \"Run\" button to start the workflow/, page.text
+  end
+end
diff --git a/apps/workbench/test/integration_helper.rb b/apps/workbench/test/integration_helper.rb
new file mode 100644 (file)
index 0000000..85c929f
--- /dev/null
@@ -0,0 +1,274 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'capybara/rails'
+require 'capybara/poltergeist'
+require 'uri'
+require 'yaml'
+
+def available_port for_what
+  begin
+    Addrinfo.tcp("0.0.0.0", 0).listen do |srv|
+      port = srv.connect_address.ip_port
+      # Selenium needs an additional locking port, check if it's available
+      # and retry if necessary.
+      if for_what == 'selenium'
+        locking_port = port - 1
+        Addrinfo.tcp("0.0.0.0", locking_port).listen.close
+      end
+      STDERR.puts "Using port #{port} for #{for_what}"
+      return port
+    end
+  rescue Errno::EADDRINUSE, Errno::EACCES
+    retry
+  end
+end
+
+def selenium_opts
+  {
+    port: available_port('selenium'),
+    desired_capabilities: Selenium::WebDriver::Remote::Capabilities.firefox(
+      acceptInsecureCerts: true,
+    ),
+  }
+end
+
+def poltergeist_opts
+  {
+    phantomjs_options: ['--ignore-ssl-errors=true'],
+    port: available_port('poltergeist'),
+    window_size: [1200, 800],
+  }
+end
+
+Capybara.register_driver :poltergeist do |app|
+  Capybara::Poltergeist::Driver.new app, poltergeist_opts
+end
+
+Capybara.register_driver :poltergeist_debug do |app|
+  Capybara::Poltergeist::Driver.new app, poltergeist_opts.merge(inspector: true)
+end
+
+Capybara.register_driver :poltergeist_with_fake_websocket do |app|
+  js = File.expand_path '../support/fake_websocket.js', __FILE__
+  Capybara::Poltergeist::Driver.new app, poltergeist_opts.merge(extensions: [js])
+end
+
+Capybara.register_driver :poltergeist_without_file_api do |app|
+  js = File.expand_path '../support/remove_file_api.js', __FILE__
+  Capybara::Poltergeist::Driver.new app, poltergeist_opts.merge(extensions: [js])
+end
+
+Capybara.register_driver :selenium do |app|
+  Capybara::Selenium::Driver.new app, selenium_opts
+end
+
+Capybara.register_driver :selenium_with_download do |app|
+  profile = Selenium::WebDriver::Firefox::Profile.new
+  profile['browser.download.dir'] = DownloadHelper.path.to_s
+  profile['browser.download.downloadDir'] = DownloadHelper.path.to_s
+  profile['browser.download.defaultFolder'] = DownloadHelper.path.to_s
+  profile['browser.download.folderList'] = 2 # "save to user-defined location"
+  profile['browser.download.manager.showWhenStarting'] = false
+  profile['browser.helperApps.alwaysAsk.force'] = false
+  profile['browser.helperApps.neverAsk.saveToDisk'] = 'text/plain,application/octet-stream'
+  Capybara::Selenium::Driver.new app, selenium_opts.merge(profile: profile)
+end
+
+module WaitForAjax
+  # FIXME: Huge side effect here
+  # The following line changes the global default Capybara wait time, affecting
+  # every test which follows this one. This should be removed and the failing tests
+  # should have their individual wait times increased, if appropriate, using
+  # the using_wait_time(N) construct to temporarily change the wait time.
+  # Note: the below is especially bad because there are places that increase wait
+  # times using a multiplier e.g. using_wait_time(3 * Capybara.default_max_wait_time)
+  Capybara.default_max_wait_time = 10
+  def wait_for_ajax
+    timeout = 10
+    count = 0
+    while page.evaluate_script("jQuery.active").to_i > 0
+      count += 1
+      raise "AJAX request took more than #{timeout} seconds" if count > timeout * 10
+      sleep(0.1)
+    end
+  end
+
+end
+
+module AssertDomEvent
+  # Yield the supplied block, then wait for an event to arrive at a
+  # DOM element.
+  def assert_triggers_dom_event events, target='body'
+    magic = 'received-dom-event-' + rand(2**30).to_s(36)
+    page.execute_script <<eos
+      $('#{target}').one('#{events}', function() {
+        $('body').addClass('#{magic}');
+      });
+eos
+    yield
+    assert_selector "body.#{magic}"
+    page.execute_script "$('body').removeClass('#{magic}');";
+  end
+end
+
+module HeadlessHelper
+  class HeadlessSingleton
+    @display = ENV['ARVADOS_TEST_HEADLESS_DISPLAY'] || rand(400)+100
+    STDERR.puts "Using display :#{@display} for headless tests"
+    def self.get
+      @headless ||= Headless.new reuse: false, display: @display
+    end
+  end
+
+  Capybara.default_driver = :rack_test
+
+  def self.included base
+    base.class_eval do
+      setup do
+        Capybara.use_default_driver
+        @headless = false
+      end
+
+      teardown do
+        if @headless
+          @headless.stop
+          @headless = false
+        end
+      end
+    end
+  end
+
+  def need_selenium reason=nil, driver=:selenium
+    Capybara.current_driver = driver
+    unless ENV['ARVADOS_TEST_HEADFUL'] or @headless
+      @headless = HeadlessSingleton.get
+      @headless.start
+    end
+  end
+
+  def need_javascript reason=nil
+    unless Capybara.current_driver == :selenium
+      Capybara.current_driver = :poltergeist
+    end
+  end
+end
+
+module KeepWebConfig
+  def getport service
+    File.read(File.expand_path("../../../../tmp/#{service}.port", __FILE__))
+  end
+
+  def use_keep_web_config
+    @kwport = getport 'keep-web-ssl'
+    @kwdport = getport 'keep-web-dl-ssl'
+    Rails.configuration.keep_web_url = "https://localhost:#{@kwport}/c=%{uuid_or_pdh}"
+    Rails.configuration.keep_web_download_url = "https://localhost:#{@kwdport}/c=%{uuid_or_pdh}"
+  end
+end
+
+class ActionDispatch::IntegrationTest
+  # Make the Capybara DSL available in all integration tests
+  include Capybara::DSL
+  include ApiFixtureLoader
+  include WaitForAjax
+  include AssertDomEvent
+  include HeadlessHelper
+
+  @@API_AUTHS = self.api_fixture('api_client_authorizations')
+
+  def page_with_token(token, path='/')
+    # Generate a page path with an embedded API token.
+    # Typical usage: visit page_with_token('token_name', page)
+    # The token can be specified by the name of an api_client_authorizations
+    # fixture, or passed as a raw string.
+    api_token = ((@@API_AUTHS.include? token) ?
+                 @@API_AUTHS[token]['api_token'] : token)
+    path_parts = path.partition("#")
+    sep = (path_parts.first.include? '?') ? '&' : '?'
+    q_string = URI.encode_www_form('api_token' => api_token)
+    path_parts.insert(1, "#{sep}#{q_string}")
+    path_parts.join("")
+  end
+
+  # Find a page element, but return false instead of raising an
+  # exception if not found. Use this with assertions to explain that
+  # the error signifies a failed test rather than an unexpected error
+  # during a testing procedure.
+  def find? *args
+    begin
+      find *args
+    rescue Capybara::ElementNotFound
+      false
+    end
+  end
+
+  @@screenshot_count = 1
+  def screenshot
+    image_file = "./tmp/workbench-fail-#{@@screenshot_count}.png"
+    begin
+      page.save_screenshot image_file
+    rescue Capybara::NotSupportedByDriverError
+      # C'est la vie.
+    else
+      puts "Saved #{image_file}"
+      @@screenshot_count += 1
+    end
+  end
+
+  teardown do
+    if not passed?
+      screenshot
+    end
+    if Capybara.current_driver == :selenium
+      # Clearing localStorage crashes on a page where JS isn't
+      # executed. We also need to make sure we're clearing
+      # localStorage for the test server's origin, even if we finished
+      # the test on a different origin.
+      host = Capybara.current_session.server.host
+      port = Capybara.current_session.server.port
+      base = "http://#{host}:#{port}"
+      if page.evaluate_script("window.document.contentType") != "text/html" ||
+         !page.evaluate_script("window.location.toString()").start_with?(base)
+        visit "#{base}/404"
+      end
+      page.execute_script("window.localStorage.clear()")
+    else
+      page.driver.restart if defined?(page.driver.restart)
+    end
+    Capybara.reset_sessions!
+  end
+
+  def accept_alert
+    if Capybara.current_driver == :selenium
+      (0..9).each do
+        begin
+          page.driver.browser.switch_to.alert.accept
+          break
+        rescue Selenium::WebDriver::Error::NoSuchAlertError
+         sleep 0.1
+        end
+      end
+    else
+      # poltergeist returns true for confirm, so no need to accept
+    end
+  end
+end
+
+def upload_data_and_get_collection(data, user, filename, owner_uuid=nil)
+  token = api_token(user)
+  datablock = `echo -n #{data.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
+  assert $?.success?, $?
+  col = nil
+  use_token user do
+    mtxt = ". #{datablock} 0:#{data.length}:#{filename}\n"
+    if owner_uuid
+      col = Collection.create(manifest_text: mtxt, owner_uuid: owner_uuid)
+    else
+      col = Collection.create(manifest_text: mtxt)
+    end
+  end
+  return col
+end
diff --git a/apps/workbench/test/integration_performance/collection_unit_test.rb b/apps/workbench/test/integration_performance/collection_unit_test.rb
new file mode 100644 (file)
index 0000000..44b9ad9
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/manifest_examples'
+require 'helpers/time_block'
+
+class Blob
+end
+
+class BigCollectionTest < ActiveSupport::TestCase
+  include ManifestExamples
+
+  setup do
+    Blob.stubs(:sign_locator).returns 'd41d8cd98f00b204e9800998ecf8427e+0'
+  end
+
+  teardown do
+    Thread.current[:arvados_api_client] = nil
+  end
+
+  # You can try with compress=false here too, but at last check it
+  # didn't make a significant difference.
+  [true].each do |compress|
+    test "crud cycle for collection with big manifest (compress=#{compress})" do
+      Rails.configuration.api_response_compression = compress
+      Thread.current[:arvados_api_client] = nil
+      crudtest
+    end
+  end
+
+  def crudtest
+    use_token :active
+    bigmanifest = time_block 'build example' do
+      make_manifest(streams: 100,
+                    files_per_stream: 100,
+                    blocks_per_file: 20,
+                    bytes_per_block: 0)
+    end
+    c = time_block "new (manifest size = #{bigmanifest.length>>20}MiB)" do
+      Collection.new manifest_text: bigmanifest
+    end
+    time_block 'create' do
+      c.save!
+    end
+    time_block 'read' do
+      Collection.find c.uuid
+    end
+    time_block 'read(cached)' do
+      Collection.find c.uuid
+    end
+    time_block 'list' do
+      list = Collection.select(['uuid', 'manifest_text']).filter [['uuid','=',c.uuid]]
+      assert_equal 1, list.count
+      assert_equal c.uuid, list.first.uuid
+      assert_not_nil list.first.manifest_text
+    end
+    time_block 'update(name-only)' do
+      manifest_text_length = c.manifest_text.length
+      c.update_attributes name: 'renamed during test case'
+      assert_equal c.manifest_text.length, manifest_text_length
+    end
+    time_block 'update' do
+      c.manifest_text += ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:empty.txt\n"
+      c.save!
+    end
+    time_block 'delete' do
+      c.destroy
+    end
+    time_block 'read(404)' do
+      assert_empty Collection.filter([['uuid','=',c.uuid]])
+    end
+  end
+end
diff --git a/apps/workbench/test/integration_performance/collections_controller_test.rb b/apps/workbench/test/integration_performance/collections_controller_test.rb
new file mode 100644 (file)
index 0000000..17dd9b6
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/manifest_examples'
+require 'helpers/time_block'
+
+class Blob
+end
+
+class BigCollectionsControllerTest < ActionController::TestCase
+  include ManifestExamples
+
+  setup do
+    Blob.stubs(:sign_locator).returns 'd41d8cd98f00b204e9800998ecf8427e+0'
+  end
+
+  test "combine two big and two small collections" do
+    @controller = ActionsController.new
+    bigmanifest1 = time_block 'build example' do
+      make_manifest(streams: 100,
+                    files_per_stream: 100,
+                    blocks_per_file: 20,
+                    bytes_per_block: 0)
+    end
+    bigmanifest2 = bigmanifest1.gsub '.txt', '.txt2'
+    smallmanifest1 = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:small1.txt\n"
+    smallmanifest2 = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:small2.txt\n"
+    totalsize = bigmanifest1.length + bigmanifest2.length +
+      smallmanifest1.length + smallmanifest2.length
+    parts = time_block "create (total #{totalsize>>20}MiB)" do
+      use_token :active do
+        {
+          big1: Collection.create(manifest_text: bigmanifest1),
+          big2: Collection.create(manifest_text: bigmanifest2),
+          small1: Collection.create(manifest_text: smallmanifest1),
+          small2: Collection.create(manifest_text: smallmanifest2),
+        }
+      end
+    end
+    time_block 'combine' do
+      post :combine_selected_files_into_collection, {
+        selection: [parts[:big1].uuid,
+                    parts[:big2].uuid,
+                    parts[:small1].uuid + '/small1.txt',
+                    parts[:small2].uuid + '/small2.txt',
+                   ],
+        format: :html
+      }, session_for(:active)
+    end
+    assert_response :redirect
+  end
+
+  [:json, :html].each do |format|
+    test "show collection with big manifest (#{format})" do
+      bigmanifest = time_block 'build example' do
+        make_manifest(streams: 100,
+                      files_per_stream: 100,
+                      blocks_per_file: 20,
+                      bytes_per_block: 0)
+      end
+      @controller = CollectionsController.new
+      c = time_block "create (manifest size #{bigmanifest.length>>20}MiB)" do
+        use_token :active do
+          Collection.create(manifest_text: bigmanifest)
+        end
+      end
+      time_block 'show' do
+        get :show, {id: c.uuid, format: format}, session_for(:active)
+      end
+      assert_response :success
+    end
+  end
+end
diff --git a/apps/workbench/test/integration_performance/collections_perf_test.rb b/apps/workbench/test/integration_performance/collections_perf_test.rb
new file mode 100644 (file)
index 0000000..c6dc3be
--- /dev/null
@@ -0,0 +1,120 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+# The tests in the "integration_performance" dir are not included in regular
+#   build pipeline since it is not one of the "standard" test directories.
+#
+# To run tests in this directory use the following command:
+# ./run-tests.sh WORKSPACE=~/arvados --only apps/workbench apps/workbench_test="TEST=test/integration_performance/*.rb"
+#
+
+class CollectionsPerfTest < ActionDispatch::IntegrationTest
+  setup do
+    Capybara.current_driver = :rack_test
+  end
+
+  def create_large_collection size, file_name_prefix
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e+0"
+
+    i = 0
+    until manifest_text.length > size do
+      manifest_text << " 0:0:#{file_name_prefix}#{i.to_s}"
+      i += 1
+    end
+    manifest_text << "\n"
+
+    Rails.logger.info "Creating collection at #{Time.now.to_f}"
+    collection = Collection.create! ({manifest_text: manifest_text})
+    Rails.logger.info "Done creating collection at #{Time.now.to_f}"
+
+    collection
+  end
+
+  [
+    1000000,
+    10000000,
+    20000000,
+  ].each do |size|
+    test "Create and show large collection with manifest text of #{size}" do
+      use_token :active
+      new_collection = create_large_collection size, 'collection_file_name_with_prefix_'
+
+      Rails.logger.info "Visiting collection at #{Time.now.to_f}"
+      visit page_with_token('active', "/collections/#{new_collection.uuid}")
+      Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
+
+      assert_selector "input[value=\"#{new_collection.uuid}\"]"
+      assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
+    end
+  end
+
+  # This does not work with larger sizes because of need_javascript.
+  # Just use one test with 100,000 for now.
+  [
+    100000,
+  ].each do |size|
+    test "Create, show, and update description for large collection with manifest text of #{size}" do
+      need_javascript
+
+      use_token :active
+      new_collection = create_large_collection size, 'collection_file_name_with_prefix_'
+
+      Rails.logger.info "Visiting collection at #{Time.now.to_f}"
+      visit page_with_token('active', "/collections/#{new_collection.uuid}")
+      Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
+
+      assert_selector "input[value=\"#{new_collection.uuid}\"]"
+      assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
+
+      # edit description
+      Rails.logger.info "Editing description at #{Time.now.to_f}"
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('description for this large collection')
+        find('.editable-submit').click
+      end
+      Rails.logger.info "Done editing description at #{Time.now.to_f}"
+
+      assert_text 'description for this large collection'
+    end
+  end
+
+  [
+    [1000000, 10000],
+    [10000000, 10000],
+    [20000000, 10000],
+  ].each do |size1, size2|
+    test "Create one large collection of #{size1} and one small collection of #{size2} and combine them" do
+      use_token :active
+      first_collection = create_large_collection size1, 'collection_file_name_with_prefix_1_'
+      second_collection = create_large_collection size2, 'collection_file_name_with_prefix_2_'
+
+      Rails.logger.info "Visiting collections page at #{Time.now.to_f}"
+      visit page_with_token('active', "/collections")
+      Rails.logger.info "Done visiting collections page at at #{Time.now.to_f}"
+
+      assert_text first_collection.uuid
+      assert_text second_collection.uuid
+
+      within('tr', text: first_collection['uuid']) do
+        find('input[type=checkbox]').click
+      end
+
+      within('tr', text: second_collection['uuid']) do
+        find('input[type=checkbox]').click
+      end
+
+      Rails.logger.info "Clicking on combine collections option at #{Time.now.to_f}"
+      click_button 'Selection...'
+      within('.selection-action-container') do
+        click_link 'Create new collection with selected collections'
+      end
+      Rails.logger.info "Done combining collections at #{Time.now.to_f}"
+
+      assert(page.has_link?('collection_file_name_with_prefix_1_0'), "Collection page did not include file link")
+    end
+  end
+end
diff --git a/apps/workbench/test/performance/browsing_test.rb b/apps/workbench/test/performance/browsing_test.rb
new file mode 100644 (file)
index 0000000..71e4c5c
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# http://guides.rubyonrails.org/v3.2.13/performance_testing.html
+
+require 'test_helper'
+require 'rails/performance_test_help'
+require 'performance_test_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class BrowsingTest < WorkbenchPerformanceTest
+  self.profile_options = { :runs => 5,
+                           :metrics => [:wall_time],
+                           :output => 'tmp/performance',
+                           :formats => [:flat] }
+
+  setup do
+    need_javascript
+  end
+
+  test "home page" do
+    visit_page_with_token
+    assert_text 'Dashboard'
+    assert_selector 'a', text: 'Run a process'
+  end
+
+  test "search for hash" do
+    visit_page_with_token
+    assert_text 'Dashboard'
+
+    assert_selector '.navbar-fixed-top'
+    assert_triggers_dom_event 'shown.bs.modal' do
+      within '.navbar-fixed-top' do
+        find_field('search this site').set 'hash'
+        find('.glyphicon-search').click
+      end
+    end
+
+    sleep(50)
+
+    # In the search dialog now. Expect at least one item in the result display.
+    within '.modal-content' do
+      assert_text 'All projects'
+      assert_text 'Search'
+      assert_selector '.selectable[data-object-uuid]'
+      click_button 'Cancel'
+    end
+  end
+end
diff --git a/apps/workbench/test/performance_test_helper.rb b/apps/workbench/test/performance_test_helper.rb
new file mode 100644 (file)
index 0000000..c3b50c2
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'integration_helper'
+
+# Performance test can run in two different ways:
+#
+# 1. Similar to other integration tests using the command:
+#     RAILS_ENV=test bundle exec rake test:benchmark
+#
+# 2. Against a configured workbench url using "RAILS_ENV=performance".
+#     RAILS_ENV=performance bundle exec rake test:benchmark
+
+class WorkbenchPerformanceTest < ActionDispatch::PerformanceTest
+
+  # When running in "RAILS_ENV=performance" mode, uses performance
+  # config params.  In this mode, prepends workbench URL to the given
+  # path provided, and visits that page using the configured
+  # "user_token".
+  def visit_page_with_token path='/'
+    if Rails.env == 'performance'
+      token = Rails.configuration.user_token
+      workbench_url = Rails.configuration.arvados_workbench_url
+      if workbench_url.end_with? '/'
+        workbench_url = workbench_url[0, workbench_url.size-1]
+      end
+    else
+      token = 'active'
+      workbench_url = ''
+    end
+
+    visit page_with_token(token, (workbench_url + path))
+  end
+
+end
diff --git a/apps/workbench/test/support/fake_websocket.js b/apps/workbench/test/support/fake_websocket.js
new file mode 100644 (file)
index 0000000..7f04a40
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+sockets = [];
+window.WebSocket = function(url) {
+    sockets.push(this);
+    window.setTimeout(function() {
+        sockets.map(function(s) {
+            s.onopen();
+        });
+        sockets.splice(0);
+    }, 1);
+}
+
+window.WebSocket.prototype.send = function(msg) {
+    // Uncomment for debugging:
+    // console.log("fake WebSocket: send: "+msg);
+}
diff --git a/apps/workbench/test/support/remove_file_api.js b/apps/workbench/test/support/remove_file_api.js
new file mode 100644 (file)
index 0000000..77dd643
--- /dev/null
@@ -0,0 +1,5 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.FileReader = null;
diff --git a/apps/workbench/test/test_helper.rb b/apps/workbench/test/test_helper.rb
new file mode 100644 (file)
index 0000000..8435eb4
--- /dev/null
@@ -0,0 +1,353 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ENV["RAILS_ENV"] = "test" if (ENV["RAILS_ENV"] != "diagnostics" and ENV["RAILS_ENV"] != "performance")
+
+unless ENV["NO_COVERAGE_TEST"]
+  begin
+    require 'simplecov'
+    require 'simplecov-rcov'
+    class SimpleCov::Formatter::MergedFormatter
+      def format(result)
+        SimpleCov::Formatter::HTMLFormatter.new.format(result)
+        SimpleCov::Formatter::RcovFormatter.new.format(result)
+      end
+    end
+    SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+    SimpleCov.start do
+      add_filter '/test/'
+      add_filter 'initializers/secret_token'
+    end
+  rescue Exception => e
+    $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+  end
+end
+
+require File.expand_path('../../config/environment', __FILE__)
+require 'rails/test_help'
+require 'mocha/mini_test'
+
+class ActiveSupport::TestCase
+  # Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
+  # alphabetical order.
+  #
+  # Note: You'll currently still have to declare fixtures explicitly
+  # in integration tests -- they do not yet inherit this setting
+  fixtures :all
+  def use_token(token_name)
+    user_was = Thread.current[:user]
+    token_was = Thread.current[:arvados_api_token]
+    auth = api_fixture('api_client_authorizations')[token_name.to_s]
+    Thread.current[:arvados_api_token] = "v2/#{auth['uuid']}/#{auth['api_token']}"
+    if block_given?
+      begin
+        yield
+      ensure
+        Thread.current[:user] = user_was
+        Thread.current[:arvados_api_token] = token_was
+      end
+    end
+  end
+
+  teardown do
+    Thread.current[:arvados_api_token] = nil
+    Thread.current[:user] = nil
+    Thread.current[:reader_tokens] = nil
+    # Diagnostics suite doesn't run a server, so there's no cache to clear.
+    Rails.cache.clear unless (Rails.env == "diagnostics")
+    # Restore configuration settings changed during tests
+    self.class.reset_application_config
+  end
+
+  def self.reset_application_config
+    $application_config.each do |k,v|
+      if k.match /^[^.]*$/
+        Rails.configuration.send (k + '='), v
+      end
+    end
+  end
+end
+
+module ApiFixtureLoader
+  def self.included(base)
+    base.extend(ClassMethods)
+  end
+
+  module ClassMethods
+    @@api_fixtures = {}
+    def api_fixture(name, *keys)
+      # Returns the data structure from the named API server test fixture.
+      @@api_fixtures[name] ||= \
+      begin
+        path = File.join(ApiServerForTests::ARV_API_SERVER_DIR,
+                         'test', 'fixtures', "#{name}.yml")
+        file = IO.read(path)
+        trim_index = file.index('# Test Helper trims the rest of the file')
+        file = file[0, trim_index] if trim_index
+        YAML.load(file).each do |name, ob|
+          ob.reject! { |k, v| k.start_with?('secret_') }
+        end
+      end
+      keys.inject(@@api_fixtures[name]) { |hash, key| hash[key] }.deep_dup
+    end
+  end
+
+  def api_fixture(name, *keys)
+    self.class.api_fixture(name, *keys)
+  end
+
+  def api_token(name)
+    auth = api_fixture('api_client_authorizations')[name]
+    "v2/#{auth['uuid']}/#{auth['api_token']}"
+  end
+
+  def find_fixture(object_class, name)
+    object_class.find(api_fixture(object_class.to_s.pluralize.underscore,
+                                  name, "uuid"))
+  end
+end
+
+module ApiMockHelpers
+  def fake_api_response body, status_code, headers
+    resp = mock
+    resp.responds_like_instance_of HTTP::Message
+    resp.stubs(:headers).returns headers
+    resp.stubs(:content).returns body
+    resp.stubs(:status_code).returns status_code
+    resp
+  end
+
+  def stub_api_calls_with_body body, status_code=200, headers={}
+    stub_api_calls
+    resp = fake_api_response body, status_code, headers
+    stub_api_client.stubs(:post).returns resp
+  end
+
+  def stub_api_calls
+    @stubbed_client = ArvadosApiClient.new
+    @stubbed_client.instance_eval do
+      @api_client = HTTPClient.new
+    end
+    ArvadosApiClient.stubs(:new_or_current).returns(@stubbed_client)
+  end
+
+  def stub_api_calls_with_invalid_json
+    stub_api_calls_with_body ']"omg,bogus"['
+  end
+
+  # Return the HTTPClient mock used by the ArvadosApiClient mock. You
+  # must have called stub_api_calls first.
+  def stub_api_client
+    @stubbed_client.instance_eval do
+      @api_client
+    end
+  end
+end
+
+class ActiveSupport::TestCase
+  include ApiMockHelpers
+end
+
+class ActiveSupport::TestCase
+  include ApiFixtureLoader
+  def session_for api_client_auth_name
+    auth = api_fixture('api_client_authorizations')[api_client_auth_name.to_s]
+    {
+      arvados_api_token: "v2/#{auth['uuid']}/#{auth['api_token']}"
+    }
+  end
+  def json_response
+    Oj.load(@response.body)
+  end
+end
+
+class ApiServerForTests
+  PYTHON_TESTS_DIR = File.expand_path('../../../../sdk/python/tests', __FILE__)
+  ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
+  SERVER_PID_PATH = File.expand_path('tmp/pids/test-server.pid', ARV_API_SERVER_DIR)
+  WEBSOCKET_PID_PATH = File.expand_path('tmp/pids/test-server.pid', ARV_API_SERVER_DIR)
+  @main_process_pid = $$
+  @@server_is_running = false
+
+  def check_output *args
+    output = nil
+    Bundler.with_clean_env do
+      output = IO.popen *args do |io|
+        io.read
+      end
+      if not $?.success?
+        raise RuntimeError, "Command failed (#{$?}): #{args.inspect}"
+      end
+    end
+    output
+  end
+
+  def run_test_server
+    Dir.chdir PYTHON_TESTS_DIR do
+      check_output %w(python ./run_test_server.py start_keep)
+    end
+  end
+
+  def stop_test_server
+    Dir.chdir PYTHON_TESTS_DIR do
+      check_output %w(python ./run_test_server.py stop_keep)
+    end
+    @@server_is_running = false
+  end
+
+  def run args=[]
+    return if @@server_is_running
+
+    # Stop server left over from interrupted previous run
+    stop_test_server
+
+    ::MiniTest.after_run do
+      stop_test_server
+    end
+
+    run_test_server
+    $application_config['arvados_login_base'] = "https://#{ENV['ARVADOS_API_HOST']}/login"
+    $application_config['arvados_v1_base'] = "https://#{ENV['ARVADOS_API_HOST']}/arvados/v1"
+    $application_config['arvados_insecure_host'] = true
+    ActiveSupport::TestCase.reset_application_config
+
+    @@server_is_running = true
+  end
+
+  def run_rake_task task_name, arg_string
+    Dir.chdir ARV_API_SERVER_DIR do
+      check_output ['bundle', 'exec', 'rake', "#{task_name}[#{arg_string}]"]
+    end
+  end
+end
+
+class ActionController::TestCase
+  setup do
+    @test_counter = 0
+  end
+
+  def check_counter action
+    @test_counter += 1
+    if @test_counter == 2
+      assert_equal 1, 2, "Multiple actions in controller test"
+    end
+  end
+
+  [:get, :post, :put, :patch, :delete].each do |method|
+    define_method method do |action, *args|
+      check_counter action
+      super action, *args
+    end
+  end
+end
+
+# Test classes can call reset_api_fixtures(when_to_reset,flag) to
+# override the default. Example:
+#
+# class MySuite < ActionDispatch::IntegrationTest
+#   reset_api_fixtures :after_each_test, false
+#   reset_api_fixtures :after_suite, true
+#   ...
+# end
+#
+# The default behavior is reset_api_fixtures(:after_each_test,true).
+#
+class ActiveSupport::TestCase
+
+  def self.inherited subclass
+    subclass.class_eval do
+      class << self
+        attr_accessor :want_reset_api_fixtures
+      end
+      @want_reset_api_fixtures = {
+        after_each_test: true,
+        after_suite: false,
+        before_suite: false,
+      }
+    end
+    super
+  end
+  # Existing subclasses of ActiveSupport::TestCase (ones that already
+  # existed before we set up the self.inherited hook above) will not
+  # get their own instance variable. They're not real test cases
+  # anyway, so we give them a "don't reset anywhere" stub.
+  def self.want_reset_api_fixtures
+    {}
+  end
+
+  def self.reset_api_fixtures where, t=true
+    if not want_reset_api_fixtures.has_key? where
+      raise ArgumentError, "There is no #{where.inspect} hook"
+    end
+    self.want_reset_api_fixtures[where] = t
+  end
+
+  def self.run *args
+    reset_api_fixtures_now if want_reset_api_fixtures[:before_suite]
+    result = super
+    reset_api_fixtures_now if want_reset_api_fixtures[:after_suite]
+    result
+  end
+
+  def after_teardown
+    if self.class.want_reset_api_fixtures[:after_each_test] and
+        @want_reset_api_fixtures != false
+      self.class.reset_api_fixtures_now
+    end
+    super
+  end
+
+  def reset_api_fixtures_after_test t=true
+    @want_reset_api_fixtures = t
+  end
+
+  protected
+  def self.reset_api_fixtures_now
+    # Never try to reset fixtures when we're just using test
+    # infrastructure to run performance/diagnostics suites.
+    return unless Rails.env == 'test'
+
+    auth = api_fixture('api_client_authorizations')['admin_trustedclient']
+    Thread.current[:arvados_api_token] = "v2/#{auth['uuid']}/#{auth['api_token']}"
+    ArvadosApiClient.new.api(nil, '../../database/reset', {})
+    Thread.current[:arvados_api_token] = nil
+  end
+end
+
+# If it quacks like a duck, it must be a HTTP request object.
+class RequestDuck
+  def self.host
+    "localhost"
+  end
+
+  def self.port
+    8080
+  end
+
+  def self.protocol
+    "http"
+  end
+end
+
+# Example:
+#
+# apps/workbench$ RAILS_ENV=test bundle exec irb -Ilib:test
+# > load 'test/test_helper.rb'
+# > singletest 'integration/collection_upload_test.rb', 'Upload two empty files'
+#
+def singletest test_class_file, test_name
+  load File.join('test', test_class_file)
+  Minitest.run ['-v', '-n', "test_#{test_name.gsub ' ', '_'}"]
+  Object.send(:remove_const,
+              test_class_file.gsub(/.*\/|\.rb$/, '').camelize.to_sym)
+  ::Minitest::Runnable.runnables.reject! { true }
+end
+
+if ENV["RAILS_ENV"].eql? 'test'
+  ApiServerForTests.new.run
+  ApiServerForTests.new.run ["--websockets"]
+end
+
+# Reset fixtures now (i.e., before any tests run).
+ActiveSupport::TestCase.reset_api_fixtures_now
diff --git a/apps/workbench/test/unit/.gitkeep b/apps/workbench/test/unit/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/unit/arvados_api_client_test.rb b/apps/workbench/test/unit/arvados_api_client_test.rb
new file mode 100644 (file)
index 0000000..6d071de
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ArvadosApiClientTest < ActiveSupport::TestCase
+  # We use a mock instead of making real API calls, so there's no need to reset.
+  reset_api_fixtures :after_each_test, false
+
+  test 'successful stubbed api request' do
+    stub_api_calls_with_body '{"foo":"bar","baz":0}'
+    use_token :active
+    resp = ArvadosApiClient.new_or_current.api Link, ''
+    assert_equal Hash, resp.class
+    assert_equal 'bar', resp[:foo]
+    assert_equal 0, resp[:baz]
+  end
+
+  test 'exception if server returns non-JSON' do
+    stub_api_calls_with_invalid_json
+    assert_raises ArvadosApiClient::InvalidApiResponseException do
+      use_token :active
+      resp = ArvadosApiClient.new_or_current.api Link, ''
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/arvados_base_test.rb b/apps/workbench/test/unit/arvados_base_test.rb
new file mode 100644 (file)
index 0000000..d0942dc
--- /dev/null
@@ -0,0 +1,91 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ArvadosBaseTest < ActiveSupport::TestCase
+  test '#save does not send unchanged string attributes' do
+    use_token :active do
+      fixture = api_fixture("collections")["foo_collection_in_aproject"]
+      c = Collection.find(fixture['uuid'])
+
+      new_name = 'name changed during test'
+
+      got_query = nil
+      stub_api_calls
+      stub_api_client.expects(:post).with do |url, query, opts={}|
+        got_query = query
+        true
+      end.returns fake_api_response('{}', 200, {})
+      c.name = new_name
+      c.save
+
+      updates = JSON.parse got_query['collection']
+      assert_equal updates['name'], new_name
+      refute_includes updates, 'description'
+      refute_includes updates, 'manifest_text'
+    end
+  end
+
+  test '#save does not send unchanged attributes missing because of select' do
+    use_token :active do
+      fixture = api_fixture("collections")["foo_collection_in_aproject"]
+      c = Collection.
+        filter([['uuid','=',fixture['uuid']]]).
+        select(['uuid']).
+        first
+      if 'MissingAttribute check is re-enabled' == true
+        assert_raises ActiveModel::MissingAttributeError do
+          c.properties
+        end
+      else
+        assert_equal({}, c.properties)
+      end
+
+      got_query = nil
+      stub_api_calls
+      stub_api_client.expects(:post).with do |url, query, opts={}|
+        got_query = query
+        true
+      end.returns fake_api_response('{}', 200, {})
+      c.name = 'foo'
+      c.save
+
+      updates = JSON.parse got_query['collection']
+      assert_includes updates, 'name'
+      refute_includes updates, 'description'
+      refute_includes updates, 'properties'
+    end
+  end
+
+  [false,
+   {},
+   {'foo' => 'bar'},
+  ].each do |init_props|
+    test "#save sends serialized attributes if changed from #{init_props}" do
+      use_token :active do
+        fixture = api_fixture("collections")["foo_collection_in_aproject"]
+        c = Collection.find(fixture['uuid'])
+
+        if init_props
+          c.properties = init_props if init_props
+          c.save!
+        end
+
+        got_query = nil
+        stub_api_calls
+        stub_api_client.expects(:post).with do |url, query, opts={}|
+          got_query = query
+          true
+        end.returns fake_api_response('{"etag":"fake","uuid":"fake"}', 200, {})
+
+        c.properties['baz'] = 'qux'
+        c.save!
+
+        updates = JSON.parse got_query['collection']
+        assert_includes updates, 'properties'
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/arvados_resource_list_test.rb b/apps/workbench/test/unit/arvados_resource_list_test.rb
new file mode 100644 (file)
index 0000000..e9eb2f8
--- /dev/null
@@ -0,0 +1,106 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ResourceListTest < ActiveSupport::TestCase
+
+  reset_api_fixtures :after_each_test, false
+
+  test 'links_for on a resource list that does not return links' do
+    use_token :active
+    results = Specimen.all
+    assert_equal [], results.links_for(api_fixture('users')['active']['uuid'])
+  end
+
+  test 'get all items by default' do
+    use_token :admin
+    a = 0
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').each do
+      a += 1
+    end
+    assert_equal 201, a
+  end
+
+  test 'prefetch all items' do
+    use_token :admin
+    a = 0
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').each do
+      a += 1
+    end
+    assert_equal 201, a
+  end
+
+  test 'get limited items' do
+    use_token :admin
+    a = 0
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').limit(51).each do
+      a += 1
+    end
+    assert_equal 51, a
+  end
+
+  test 'get limited items, limit % page_size != 0' do
+    skip "Requires server MAX_LIMIT < 200 which is not currently the default"
+
+    use_token :admin
+    max_page_size = Collection.
+      where(owner_uuid: 'zzzzz-j7d0g-0201collections').
+      limit(1000000000).
+      fetch_multiple_pages(false).
+      count
+    # Conditions necessary for this test to be valid:
+    assert_operator 200, :>, max_page_size
+    assert_operator 1, :<, max_page_size
+    # Verify that the server really sends max_page_size when asked for max_page_size+1
+    assert_equal max_page_size, Collection.
+      where(owner_uuid: 'zzzzz-j7d0g-0201collections').
+      limit(max_page_size+1).
+      fetch_multiple_pages(false).
+      results.
+      count
+    # Now that we know the max_page_size+1 is in the middle of page 2,
+    # make sure #each returns page 1 and only the requested part of
+    # page 2.
+    a = 0
+    saw_uuid = {}
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').limit(max_page_size+1).each do |item|
+      a += 1
+      saw_uuid[item.uuid] = true
+    end
+    assert_equal max_page_size+1, a
+    # Ensure no overlap between pages
+    assert_equal max_page_size+1, saw_uuid.size
+  end
+
+  test 'get single page of items' do
+    use_token :admin
+    a = 0
+    c = Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').fetch_multiple_pages(false)
+    c.each do
+      a += 1
+    end
+
+    assert_operator a, :<, 201
+    assert_equal c.result_limit, a
+  end
+
+  test 'get empty set' do
+    use_token :admin
+    c = Collection.
+      where(owner_uuid: 'doesn-texis-tdoesntexistdoe').
+      fetch_multiple_pages(false)
+    # Important: check c.result_offset before calling c.results here.
+    assert_equal 0, c.result_offset
+    assert_equal 0, c.items_available
+    assert_empty c.results
+  end
+
+  test 'count=none' do
+    use_token :active
+    c = Collection.with_count('none')
+    assert_nil c.items_available
+    refute_empty c.results
+  end
+end
diff --git a/apps/workbench/test/unit/collection_test.rb b/apps/workbench/test/unit/collection_test.rb
new file mode 100644 (file)
index 0000000..870f8ba
--- /dev/null
@@ -0,0 +1,78 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CollectionTest < ActiveSupport::TestCase
+  test 'recognize empty blob locator' do
+    ['d41d8cd98f00b204e9800998ecf8427e+0',
+     'd41d8cd98f00b204e9800998ecf8427e',
+     'd41d8cd98f00b204e9800998ecf8427e+0+Xyzzy'].each do |x|
+      assert_equal true, Collection.is_empty_blob_locator?(x)
+    end
+    ['d41d8cd98f00b204e9800998ecf8427e0',
+     'acbd18db4cc2f85cedef654fccc4a4d8+3',
+     'acbd18db4cc2f85cedef654fccc4a4d8+0'].each do |x|
+      assert_equal false, Collection.is_empty_blob_locator?(x)
+    end
+  end
+
+  def get_files_tree(coll_name)
+    use_token :admin
+    Collection.find(api_fixture('collections')[coll_name]['uuid']).files_tree
+  end
+
+  test "easy files_tree" do
+    files_in = lambda do |dirname|
+      (1..3).map { |n| [dirname, "file#{n}", 0] }
+    end
+    assert_equal([['.', 'dir1', nil], ['./dir1', 'subdir', nil]] +
+                 files_in['./dir1/subdir'] + files_in['./dir1'] +
+                 [['.', 'dir2', nil]] + files_in['./dir2'] + files_in['.'],
+                 get_files_tree('multilevel_collection_1'),
+                 "Collection file tree was malformed")
+  end
+
+  test "files_tree with files deep in subdirectories" do
+    # This test makes sure files_tree generates synthetic directory entries.
+    # The manifest doesn't list directories with no files.
+    assert_equal([['.', 'dir1', nil], ['./dir1', 'sub1', nil],
+                  ['./dir1/sub1', 'a', 0], ['./dir1/sub1', 'b', 0],
+                  ['.', 'dir2', nil], ['./dir2', 'sub2', nil],
+                  ['./dir2/sub2', 'c', 0], ['./dir2/sub2', 'd', 0]],
+                 get_files_tree('multilevel_collection_2'),
+                 "Collection file tree was malformed")
+  end
+
+  test "portable_data_hash never editable" do
+    refute(Collection.new.attribute_editable?("portable_data_hash", :ever))
+  end
+
+  test "admin can edit name" do
+    use_token :admin
+    assert(find_fixture(Collection, "foo_file").attribute_editable?("name"),
+           "admin not allowed to edit collection name")
+  end
+
+  test "project owner can edit name" do
+    use_token :active
+    assert(find_fixture(Collection, "foo_collection_in_aproject")
+             .attribute_editable?("name"),
+           "project owner not allowed to edit collection name")
+  end
+
+  test "project admin can edit name" do
+    use_token :subproject_admin
+    assert(find_fixture(Collection, "baz_file_in_asubproject")
+             .attribute_editable?("name"),
+           "project admin not allowed to edit collection name")
+  end
+
+  test "project viewer cannot edit name" do
+    use_token :project_viewer
+    refute(find_fixture(Collection, "foo_collection_in_aproject")
+             .attribute_editable?("name"),
+           "project viewer allowed to edit collection name")
+  end
+end
diff --git a/apps/workbench/test/unit/disabled_api_test.rb b/apps/workbench/test/unit/disabled_api_test.rb
new file mode 100644 (file)
index 0000000..9e18a70
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class DisabledApiTest < ActiveSupport::TestCase
+  test 'Job.creatable? reflects whether jobs.create API is enabled' do
+    use_token(:active) do
+      assert(Job.creatable?)
+    end
+    dd = ArvadosApiClient.new_or_current.discovery.deep_dup
+    dd[:resources][:jobs][:methods].delete(:create)
+    ArvadosApiClient.any_instance.stubs(:discovery).returns(dd)
+    use_token(:active) do
+      refute(Job.creatable?)
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/group_test.rb b/apps/workbench/test/unit/group_test.rb
new file mode 100644 (file)
index 0000000..7040f97
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class GroupTest < ActiveSupport::TestCase
+  test "get contents with names" do
+    use_token :active
+    oi = Group.
+      find(api_fixture('groups')['asubproject']['uuid']).
+      contents()
+    assert_operator(0, :<, oi.count,
+                    "Expected to find some items belonging to :active user")
+    assert_operator(0, :<, oi.items_available,
+                    "Expected contents response to have items_available > 0")
+    oi_uuids = oi.collect { |i| i['uuid'] }
+
+    expect_uuid = api_fixture('specimens')['in_asubproject']['uuid']
+    assert_includes(oi_uuids, expect_uuid,
+                    "Expected '#{expect_uuid}' in asubproject's contents")
+  end
+
+  test "can select specific group columns" do
+    use_token :admin
+    Group.select(["uuid", "name"]).limit(5).each do |user|
+      assert_not_nil user.uuid
+      assert_not_nil user.name
+      assert_nil user.owner_uuid
+    end
+  end
+
+  test "project editable by its admin" do
+    use_token :subproject_admin
+    project = Group.find(api_fixture("groups")["asubproject"]["uuid"])
+    assert(project.editable?, "project not editable by admin")
+  end
+
+  test "project not editable by reader" do
+    use_token :project_viewer
+    project = Group.find(api_fixture("groups")["aproject"]["uuid"])
+    refute(project.editable?, "project editable by reader")
+  end
+end
diff --git a/apps/workbench/test/unit/helpers/api_client_authorizations_helper_test.rb b/apps/workbench/test/unit/helpers/api_client_authorizations_helper_test.rb
new file mode 100644 (file)
index 0000000..01ed430
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApiClientAuthorizationsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/authorized_keys_helper_test.rb b/apps/workbench/test/unit/helpers/authorized_keys_helper_test.rb
new file mode 100644 (file)
index 0000000..010a0fe
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class AuthorizedKeysHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/collections_helper_test.rb b/apps/workbench/test/unit/helpers/collections_helper_test.rb
new file mode 100644 (file)
index 0000000..15e2a94
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CollectionsHelperTest < ActionView::TestCase
+  test "file_path generates short names" do
+    assert_equal('foo', CollectionsHelper.file_path(['.', 'foo', 0]),
+                 "wrong result for filename in collection root")
+    assert_equal('foo/bar', CollectionsHelper.file_path(['foo', 'bar', 0]),
+                 "wrong result for filename in directory without leading .")
+    assert_equal('foo/bar', CollectionsHelper.file_path(['./foo', 'bar', 0]),
+                 "wrong result for filename in directory with leading .")
+  end
+end
diff --git a/apps/workbench/test/unit/helpers/groups_helper_test.rb b/apps/workbench/test/unit/helpers/groups_helper_test.rb
new file mode 100644 (file)
index 0000000..1bde02e
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ProjectsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/humans_helper_test.rb b/apps/workbench/test/unit/helpers/humans_helper_test.rb
new file mode 100644 (file)
index 0000000..22f9e81
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class HumansHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/job_tasks_helper_test.rb b/apps/workbench/test/unit/helpers/job_tasks_helper_test.rb
new file mode 100644 (file)
index 0000000..af0302c
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobTasksHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/jobs_helper_test.rb b/apps/workbench/test/unit/helpers/jobs_helper_test.rb
new file mode 100644 (file)
index 0000000..9d64b7d
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/keep_disks_helper_test.rb b/apps/workbench/test/unit/helpers/keep_disks_helper_test.rb
new file mode 100644 (file)
index 0000000..9dcc619
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class KeepDisksHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/links_helper_test.rb b/apps/workbench/test/unit/helpers/links_helper_test.rb
new file mode 100644 (file)
index 0000000..2d84ea6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class MetadataHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/logs_helper_test.rb b/apps/workbench/test/unit/helpers/logs_helper_test.rb
new file mode 100644 (file)
index 0000000..616f6e6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LogsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/nodes_helper_test.rb b/apps/workbench/test/unit/helpers/nodes_helper_test.rb
new file mode 100644 (file)
index 0000000..8a92eb9
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class NodesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/pipeline_instances_helper_test.rb b/apps/workbench/test/unit/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..9d3b5c4
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/pipeline_templates_helper_test.rb b/apps/workbench/test/unit/helpers/pipeline_templates_helper_test.rb
new file mode 100644 (file)
index 0000000..3d3406d
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineTemplatesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/projects_helper_test.rb b/apps/workbench/test/unit/helpers/projects_helper_test.rb
new file mode 100644 (file)
index 0000000..1bde02e
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ProjectsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/repositories_helper_test.rb b/apps/workbench/test/unit/helpers/repositories_helper_test.rb
new file mode 100644 (file)
index 0000000..33cb590
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class RepositoriesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/sessions_helper_test.rb b/apps/workbench/test/unit/helpers/sessions_helper_test.rb
new file mode 100644 (file)
index 0000000..98467f9
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SessionsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/specimens_helper_test.rb b/apps/workbench/test/unit/helpers/specimens_helper_test.rb
new file mode 100644 (file)
index 0000000..3709198
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SpecimensHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/traits_helper_test.rb b/apps/workbench/test/unit/helpers/traits_helper_test.rb
new file mode 100644 (file)
index 0000000..03b6a97
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class TraitsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/user_agreements_helper_test.rb b/apps/workbench/test/unit/helpers/user_agreements_helper_test.rb
new file mode 100644 (file)
index 0000000..3e9a6b9
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserAgreementsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/users_helper_test.rb b/apps/workbench/test/unit/helpers/users_helper_test.rb
new file mode 100644 (file)
index 0000000..808736d
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UsersHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/virtual_machines_helper_test.rb b/apps/workbench/test/unit/helpers/virtual_machines_helper_test.rb
new file mode 100644 (file)
index 0000000..99fc258
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class VirtualMachinesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/job_test.rb b/apps/workbench/test/unit/job_test.rb
new file mode 100644 (file)
index 0000000..85d2ef3
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobTest < ActiveSupport::TestCase
+  test "admin can edit description" do
+    use_token :admin
+    assert(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "admin not allowed to edit job description")
+  end
+
+  test "project owner can edit description" do
+    use_token :active
+    assert(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "project owner not allowed to edit job description")
+  end
+
+  test "project admin can edit description" do
+    use_token :subproject_admin
+    assert(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "project admin not allowed to edit job description")
+  end
+
+  test "project viewer cannot edit description" do
+    use_token :project_viewer
+    refute(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "project viewer allowed to edit job description")
+  end
+end
diff --git a/apps/workbench/test/unit/link_test.rb b/apps/workbench/test/unit/link_test.rb
new file mode 100644 (file)
index 0000000..9fbf98d
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LinkTest < ActiveSupport::TestCase
+
+  reset_api_fixtures :after_each_test, false
+
+  def uuid_for(fixture_name, object_name)
+    api_fixture(fixture_name)[object_name]["uuid"]
+  end
+
+  test "active user can get permissions for owned project object" do
+    use_token :active
+    project = Group.find(uuid_for("groups", "aproject"))
+    refute_empty(Link.permissions_for(project),
+                 "no permissions found for managed project")
+  end
+
+  test "active user can get permissions for owned project by UUID" do
+    use_token :active
+    refute_empty(Link.permissions_for(uuid_for("groups", "aproject")),
+                 "no permissions found for managed project")
+  end
+
+  test "admin can get permissions for project object" do
+    use_token :admin
+    project = Group.find(uuid_for("groups", "aproject"))
+    refute_empty(Link.permissions_for(project),
+                 "no permissions found for managed project")
+  end
+
+  test "admin can get permissions for project by UUID" do
+    use_token :admin
+    refute_empty(Link.permissions_for(uuid_for("groups", "aproject")),
+                 "no permissions found for managed project")
+  end
+
+  test "project viewer can't get permissions for readable project object" do
+    use_token :project_viewer
+    project = Group.find(uuid_for("groups", "aproject"))
+    assert_raises(ArvadosApiClient::AccessForbiddenException) do
+      Link.permissions_for(project)
+    end
+  end
+
+  test "project viewer can't get permissions for readable project by UUID" do
+    use_token :project_viewer
+    assert_raises(ArvadosApiClient::AccessForbiddenException) do
+      Link.permissions_for(uuid_for("groups", "aproject"))
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/pipeline_instance_test.rb b/apps/workbench/test/unit/pipeline_instance_test.rb
new file mode 100644 (file)
index 0000000..3ff3fcf
--- /dev/null
@@ -0,0 +1,118 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineInstanceTest < ActiveSupport::TestCase
+
+  reset_api_fixtures :after_each_test, false
+
+  def find_pi_with(token_name, pi_name)
+    use_token token_name
+    find_fixture(PipelineInstance, pi_name)
+  end
+
+  def attribute_editable_for?(token_name, pi_name, attr_name, ever=nil)
+    find_pi_with(token_name, pi_name).attribute_editable?(attr_name, ever)
+  end
+
+  test "admin can edit name" do
+    assert(attribute_editable_for?(:admin, "new_pipeline_in_subproject",
+                                   "name"),
+           "admin not allowed to edit pipeline instance name")
+  end
+
+  test "project owner can edit name" do
+    assert(attribute_editable_for?(:active, "new_pipeline_in_subproject",
+                                   "name"),
+           "project owner not allowed to edit pipeline instance name")
+  end
+
+  test "project admin can edit name" do
+    assert(attribute_editable_for?(:subproject_admin,
+                                   "new_pipeline_in_subproject", "name"),
+           "project admin not allowed to edit pipeline instance name")
+  end
+
+  test "project viewer cannot edit name" do
+    refute(attribute_editable_for?(:project_viewer,
+                                   "new_pipeline_in_subproject", "name"),
+           "project viewer allowed to edit pipeline instance name")
+  end
+
+  test "name editable on completed pipeline" do
+    assert(attribute_editable_for?(:active, "has_component_with_completed_jobs",
+                                   "name"),
+           "name not editable on complete pipeline")
+  end
+
+  test "components editable on new pipeline" do
+    assert(attribute_editable_for?(:active, "new_pipeline", "components"),
+           "components not editable on new pipeline")
+  end
+
+  test "components not editable on completed pipeline" do
+    refute(attribute_editable_for?(:active, "has_component_with_completed_jobs",
+                                   "components"),
+           "components not editable on new pipeline")
+  end
+
+  test "job_logs for partially complete pipeline" do
+    log_uuid = api_fixture("collections", "real_log_collection", "uuid")
+    pi = find_pi_with(:active, "running_pipeline_with_complete_job")
+    assert_equal({previous: log_uuid, running: nil}, pi.job_log_ids)
+  end
+
+  test "job_logs for complete pipeline" do
+    log_uuid = api_fixture("collections", "real_log_collection", "uuid")
+    pi = find_pi_with(:active, "complete_pipeline_with_two_jobs")
+    assert_equal({ancient: log_uuid, previous: log_uuid}, pi.job_log_ids)
+  end
+
+  test "job_logs for malformed pipeline" do
+    pi = find_pi_with(:active, "components_is_jobspec")
+    assert_empty(pi.job_log_ids.select { |_, log| not log.nil? })
+  end
+
+  def check_stderr_logs(token_name, pi_name, log_name)
+    pi = find_pi_with(token_name, pi_name)
+    actual_logs = pi.stderr_log_lines
+    expected_text = api_fixture("logs", log_name, "properties", "text")
+    expected_text.each_line do |log_line|
+      assert_includes(actual_logs, log_line.chomp)
+    end
+  end
+
+  test "stderr_logs for running pipeline" do
+    check_stderr_logs(:active,
+                      "pipeline_in_publicly_accessible_project",
+                      "log_line_for_pipeline_in_publicly_accessible_project")
+  end
+
+  test "stderr_logs for job in complete pipeline" do
+    check_stderr_logs(:active,
+                      "failed_pipeline_with_two_jobs",
+                      "crunchstat_for_previous_job")
+  end
+
+  test "has_readable_logs? for unrun pipeline" do
+    pi = find_pi_with(:active, "new_pipeline")
+    refute(pi.has_readable_logs?)
+  end
+
+  test "has_readable_logs? for running pipeline" do
+    pi = find_pi_with(:active, "running_pipeline_with_complete_job")
+    assert(pi.has_readable_logs?)
+  end
+
+  test "has_readable_logs? for complete pipeline" do
+    pi = find_pi_with(:active, "pipeline_in_publicly_accessible_project_but_other_objects_elsewhere")
+    assert(pi.has_readable_logs?)
+  end
+
+  test "has_readable_logs? for complete pipeline when jobs unreadable" do
+    pi = find_pi_with(:anonymous, "pipeline_in_publicly_accessible_project_but_other_objects_elsewhere")
+    refute(pi.has_readable_logs?)
+  end
+end
diff --git a/apps/workbench/test/unit/repository_test.rb b/apps/workbench/test/unit/repository_test.rb
new file mode 100644 (file)
index 0000000..d62d02b
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class RepositoryTest < ActiveSupport::TestCase
+  [
+    ['admin', true],
+    ['active', false],
+  ].each do |user, can_edit|
+    test "#{user} can edit attributes #{can_edit}" do
+      use_token user
+      attrs = Repository.new.editable_attributes
+      if can_edit
+        refute_empty attrs
+      else
+        assert_empty attrs
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/user_test.rb b/apps/workbench/test/unit/user_test.rb
new file mode 100644 (file)
index 0000000..fa9a69d
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserTest < ActiveSupport::TestCase
+  test "can select specific user columns" do
+    use_token :admin
+    User.select(["uuid", "is_active"]).limit(5).each do |user|
+      assert_not_nil user.uuid
+      assert_not_nil user.is_active
+      assert_nil user.first_name
+    end
+  end
+
+  test "User.current doesn't return anonymous user when using invalid token" do
+    # Set up anonymous user token
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    # First, try with a valid user
+    use_token :active
+    u = User.current
+    assert(find_fixture(User, "active").uuid == u.uuid)
+    # Next, simulate an invalid token
+    Thread.current[:arvados_api_token] = 'thistokenwontwork'
+    assert_raises(ArvadosApiClient::NotLoggedInException) do
+      User.current
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/work_unit_test.rb b/apps/workbench/test/unit/work_unit_test.rb
new file mode 100644 (file)
index 0000000..1daf582
--- /dev/null
@@ -0,0 +1,109 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class WorkUnitTest < ActiveSupport::TestCase
+
+  reset_api_fixtures :after_each_test, false
+
+  setup do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+  end
+
+  [
+    [Job, 'running_job_with_components', "jwu", 2, "Running", nil, 0.5],
+    [PipelineInstance, 'pipeline_in_running_state', nil, 1, "Running", nil, 0.0],
+    [PipelineInstance, 'has_component_with_completed_jobs', nil, 3, "Complete", true, 1.0],
+    [PipelineInstance, 'pipeline_with_tagged_collection_input', "pwu", 1, "Ready", nil, 0.0],
+    [PipelineInstance, 'failed_pipeline_with_two_jobs', nil, 2, "Cancelled", false, 0.0],
+    [Container, 'requester', 'cwu', 1, "Complete", true, 1.0],
+    [ContainerRequest, 'cr_for_requester', 'cwu', 1, "Complete", true, 1.0],
+    [ContainerRequest, 'queued', 'cwu', 0, "Queued", nil, 0.0],   # priority 1
+    [ContainerRequest, 'canceled_with_queued_container', 'cwu', 0, "Cancelled", false, 0.0],
+    [ContainerRequest, 'canceled_with_locked_container', 'cwu', 0, "Cancelled", false, 0.0],
+    [ContainerRequest, 'canceled_with_running_container', 'cwu', 1, "Running", nil, 0.0],
+  ].each do |type, fixture, label, num_children, state, success, progress|
+    test "children of #{fixture}" do
+      use_token 'active'
+      obj = find_fixture(type, fixture)
+      wu = obj.work_unit(label)
+
+      if label != nil
+        assert_equal(label, wu.label)
+      else
+        assert_equal(obj.name, wu.label)
+      end
+      assert_equal(obj['uuid'], wu.uuid)
+      assert_equal(state, wu.state_label)
+      assert_equal(success, wu.success?)
+      assert_equal(progress, wu.progress)
+
+      assert_equal(num_children, wu.children.size)
+      wu.children.each do |child|
+        assert_equal(true, child.respond_to?(:script))
+      end
+    end
+  end
+
+  [
+    ['cr_for_failed', 'Failed', 33],
+    ['completed', 'Complete', 0],
+  ].each do |cr_fixture, state, exit_code|
+    test "Completed ContainerRequest state = #{state} with exit_code = #{exit_code}" do
+      use_token 'active'
+      obj = find_fixture(ContainerRequest, cr_fixture)
+      wu = obj.work_unit
+      assert_equal state, wu.state_label
+      assert_equal exit_code, wu.exit_code
+    end
+  end
+
+  [
+    [Job, 'running_job_with_components', 1, 1, nil, true],
+    [Job, 'queued', nil, 0, 1, false],
+    [PipelineInstance, 'pipeline_in_running_state', 1, 1, nil, false],
+    [PipelineInstance, 'has_component_with_completed_jobs', 60, 60, nil, true],
+  ].each do |type, fixture, walltime, cputime, queuedtime, cputime_more_than_walltime|
+    test "times for #{fixture}" do
+      use_token 'active'
+      obj = find_fixture(type, fixture)
+      wu = obj.work_unit
+
+      if walltime
+        assert_equal true, (wu.walltime >= walltime)
+      else
+        assert_equal walltime, wu.walltime
+      end
+
+      if cputime
+        assert_equal true, (wu.cputime >= cputime)
+      else
+        assert_equal cputime, wu.cputime
+      end
+
+      if queuedtime
+        assert_equal true, (wu.queuedtime >= queuedtime)
+      else
+        assert_equal queuedtime, wu.queuedtime
+      end
+
+      assert_equal cputime_more_than_walltime, (wu.cputime > wu.walltime) if wu.cputime and wu.walltime
+    end
+  end
+
+  test 'can_cancel?' do
+    use_token 'active' do
+      assert find_fixture(Job, 'running').work_unit.can_cancel?
+      refute find_fixture(Container, 'running').work_unit.can_cancel?
+      assert find_fixture(ContainerRequest, 'running').work_unit.can_cancel?
+    end
+    use_token 'spectator' do
+      refute find_fixture(ContainerRequest, 'running_anonymous_accessible').work_unit.can_cancel?
+    end
+    use_token 'admin' do
+      assert find_fixture(ContainerRequest, 'running_anonymous_accessible').work_unit.can_cancel?
+    end
+  end
+end
diff --git a/apps/workbench/vendor/assets/javascripts/.gitkeep b/apps/workbench/vendor/assets/javascripts/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/vendor/assets/javascripts/jquery.number.min.js b/apps/workbench/vendor/assets/javascripts/jquery.number.min.js
new file mode 100644 (file)
index 0000000..4fce02b
--- /dev/null
@@ -0,0 +1,2 @@
+/*! jQuery number 2.1.5 (c) github.com/teamdf/jquery-number | opensource.teamdf.com/license */
+(function(e){"use strict";function t(e,t){if(this.createTextRange){var n=this.createTextRange();n.collapse(true);n.moveStart("character",e);n.moveEnd("character",t-e);n.select()}else if(this.setSelectionRange){this.focus();this.setSelectionRange(e,t)}}function n(e){var t=this.value.length;e=e.toLowerCase()=="start"?"Start":"End";if(document.selection){var n=document.selection.createRange(),r,i,s;r=n.duplicate();r.expand("textedit");r.setEndPoint("EndToEnd",n);i=r.text.length-n.text.length;s=i+n.text.length;return e=="Start"?i:s}else if(typeof this["selection"+e]!="undefined"){t=this["selection"+e]}return t}var r={codes:{46:127,188:44,109:45,190:46,191:47,192:96,220:92,222:39,221:93,219:91,173:45,187:61,186:59,189:45,110:46},shifts:{96:"~",49:"!",50:"@",51:"#",52:"$",53:"%",54:"^",55:"&",56:"*",57:"(",48:")",45:"_",61:"+",91:"{",93:"}",92:"|",59:":",39:'"',44:"<",46:">",47:"?"}};e.fn.number=function(i,s,o,u){u=typeof u==="undefined"?",":u;o=typeof o==="undefined"?".":o;s=typeof s==="undefined"?0:s;var a="\\u"+("0000"+o.charCodeAt(0).toString(16)).slice(-4),f=new RegExp("[^"+a+"0-9]","g"),l=new RegExp(a,"g");if(i===true){if(this.is("input:text")){return this.on({"keydown.format":function(i){var a=e(this),f=a.data("numFormat"),l=i.keyCode?i.keyCode:i.which,c="",h=n.apply(this,["start"]),p=n.apply(this,["end"]),d="",v=false;if(r.codes.hasOwnProperty(l)){l=r.codes[l]}if(!i.shiftKey&&l>=65&&l<=90){l+=32}else if(!i.shiftKey&&l>=69&&l<=105){l-=48}else if(i.shiftKey&&r.shifts.hasOwnProperty(l)){c=r.shifts[l]}if(c=="")c=String.fromCharCode(l);if(l!=8&&l!=45&&l!=127&&c!=o&&!c.match(/[0-9]/)){var m=i.keyCode?i.keyCode:i.which;if(m==46||m==8||m==127||m==9||m==27||m==13||(m==65||m==82||m==80||m==83||m==70||m==72||m==66||m==74||m==84||m==90||m==61||m==173||m==48)&&(i.ctrlKey||i.metaKey)===true||(m==86||m==67||m==88)&&(i.ctrlKey||i.metaKey)===true||m>=35&&m<=39||m>=112&&m<=123){return}i.preventDefault();return false}if(h==0&&p==this.value.length||a.val()==0){if(l==8){h=p=1;this.value="";f.init=s>0?-1:0;f.c=s>0?-(s+1):0;t.apply(this,[0,0])}else if(c==o){h=p=1;this.value="0"+o+(new Array(s+1)).join("0");f.init=s>0?1:0;f.c=s>0?-(s+1):0}else if(l==45){h=p=2;this.value="-0"+o+(new Array(s+1)).join("0");f.init=s>0?1:0;f.c=s>0?-(s+1):0;t.apply(this,[2,2])}else{f.init=s>0?-1:0;f.c=s>0?-s:0}}else{f.c=p-this.value.length}f.isPartialSelection=h==p?false:true;if(s>0&&c==o&&h==this.value.length-s-1){f.c++;f.init=Math.max(0,f.init);i.preventDefault();v=this.value.length+f.c}else if(l==45&&(h!=0||this.value.indexOf("-")==0)){i.preventDefault()}else if(c==o){f.init=Math.max(0,f.init);i.preventDefault()}else if(s>0&&l==127&&h==this.value.length-s-1){i.preventDefault()}else if(s>0&&l==8&&h==this.value.length-s){i.preventDefault();f.c--;v=this.value.length+f.c}else if(s>0&&l==127&&h>this.value.length-s-1){if(this.value==="")return;if(this.value.slice(h,h+1)!="0"){d=this.value.slice(0,h)+"0"+this.value.slice(h+1);a.val(d)}i.preventDefault();v=this.value.length+f.c}else if(s>0&&l==8&&h>this.value.length-s){if(this.value==="")return;if(this.value.slice(h-1,h)!="0"){d=this.value.slice(0,h-1)+"0"+this.value.slice(h);a.val(d)}i.preventDefault();f.c--;v=this.value.length+f.c}else if(l==127&&this.value.slice(h,h+1)==u){i.preventDefault()}else if(l==8&&this.value.slice(h-1,h)==u){i.preventDefault();f.c--;v=this.value.length+f.c}else if(s>0&&h==p&&this.value.length>s+1&&h>this.value.length-s-1&&isFinite(+c)&&!i.metaKey&&!i.ctrlKey&&!i.altKey&&c.length===1){if(p===this.value.length){d=this.value.slice(0,h-1)}else{d=this.value.slice(0,h)+this.value.slice(h+1)}this.value=d;v=h}if(v!==false){t.apply(this,[v,v])}a.data("numFormat",f)},"keyup.format":function(r){var i=e(this),o=i.data("numFormat"),u=r.keyCode?r.keyCode:r.which,a=n.apply(this,["start"]),f=n.apply(this,["end"]),l;if(a===0&&f===0&&(u===189||u===109)){i.val("-"+i.val());a=1;o.c=1-this.value.length;o.init=1;i.data("numFormat",o);l=this.value.length+o.c;t.apply(this,[l,l])}if(this.value===""||(u<48||u>57)&&(u<96||u>105)&&u!==8&&u!==46&&u!==110)return;i.val(i.val());if(s>0){if(o.init<1){a=this.value.length-s-(o.init<0?1:0);o.c=a-this.value.length;o.init=1;i.data("numFormat",o)}else if(a>this.value.length-s&&u!=8){o.c++;i.data("numFormat",o)}}if(u==46&&!o.isPartialSelection){o.c++;i.data("numFormat",o)}l=this.value.length+o.c;t.apply(this,[l,l])},"paste.format":function(t){var n=e(this),r=t.originalEvent,i=null;if(window.clipboardData&&window.clipboardData.getData){i=window.clipboardData.getData("Text")}else if(r.clipboardData&&r.clipboardData.getData){i=r.clipboardData.getData("text/plain")}n.val(i);t.preventDefault();return false}}).each(function(){var t=e(this).data("numFormat",{c:-(s+1),decimals:s,thousands_sep:u,dec_point:o,regex_dec_num:f,regex_dec:l,init:this.value.indexOf(".")?true:false});if(this.value==="")return;t.val(t.val())})}else{return this.each(function(){var t=e(this),n=+t.text().replace(f,"").replace(l,".");t.number(!isFinite(n)?0:+n,s,o,u)})}}return this.text(e.number.apply(window,arguments))};var i=null,s=null;if(e.isPlainObject(e.valHooks.text)){if(e.isFunction(e.valHooks.text.get))i=e.valHooks.text.get;if(e.isFunction(e.valHooks.text.set))s=e.valHooks.text.set}else{e.valHooks.text={}}e.valHooks.text.get=function(t){var n=e(t),r,s,o=n.data("numFormat");if(!o){if(e.isFunction(i)){return i(t)}else{return undefined}}else{if(t.value==="")return"";r=+t.value.replace(o.regex_dec_num,"").replace(o.regex_dec,".");return(t.value.indexOf("-")===0?"-":"")+(isFinite(r)?r:0)}};e.valHooks.text.set=function(t,n){var r=e(t),i=r.data("numFormat");if(!i){if(e.isFunction(s)){return s(t,n)}else{return undefined}}else{var o=e.number(n,i.decimals,i.dec_point,i.thousands_sep);return t.value=o}};e.number=function(e,t,n,r){r=typeof r==="undefined"?",":r;n=typeof n==="undefined"?".":n;t=!isFinite(+t)?0:Math.abs(t);var i="\\u"+("0000"+n.charCodeAt(0).toString(16)).slice(-4);var s="\\u"+("0000"+r.charCodeAt(0).toString(16)).slice(-4);e=(e+"").replace(".",n).replace(new RegExp(s,"g"),"").replace(new RegExp(i,"g"),".").replace(new RegExp("[^0-9+-Ee.]","g"),"");var o=!isFinite(+e)?0:+e,u="",a=function(e,t){var n=Math.pow(10,t);return""+Math.round(e*n)/n};u=(t?a(o,t):""+Math.round(o)).split(".");if(u[0].length>3){u[0]=u[0].replace(/\B(?=(?:\d{3})+(?!\d))/g,r)}if((u[1]||"").length<t){u[1]=u[1]||"";u[1]+=(new Array(t-u[1].length+1)).join("0")}return u.join(n)}})(jQuery)
diff --git a/apps/workbench/vendor/assets/stylesheets/.gitkeep b/apps/workbench/vendor/assets/stylesheets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/vendor/plugins/.gitkeep b/apps/workbench/vendor/plugins/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/build/README b/build/README
new file mode 100644 (file)
index 0000000..4c67839
--- /dev/null
@@ -0,0 +1,34 @@
+Scripts in this directory:
+
+run-tests.sh                             Run unit and integration test suite.
+
+run-build-test-packages-one-target.sh    Entry point, wraps
+                                         run-build-packages-one-target.sh to
+                                         perform package building and testing
+                                         inside Docker.
+
+run-build-packages-one-target.sh         Build packages for one target inside Docker.
+
+run-build-packages-all-targets.sh        Run run-build-packages-one-target.sh
+                                         for every target.
+
+run-build-packages.sh                    Actually build packages.  Intended to run
+                                         inside Docker container with proper
+                                         build environment.
+
+run-build-packages-sso.sh                Build single-sign-on server packages.
+
+run-build-packages-python-and-ruby.sh    Build Python and Ruby packages suitable
+                                         for upload to PyPi and Rubygems.
+
+run-build-docker-images.sh               Build arvbox Docker images.
+
+run-build-docker-jobs-image.sh           Build arvados/jobs Docker image
+                                         (uses published debian packages)
+
+build-dev-docker-jobs-image.sh           Build developer arvados/jobs Docker image
+                                         (uses local git tree)
+
+run-library.sh                           A library of functions shared by the
+                                         various scripts in this
+                                         directory.
\ No newline at end of file
diff --git a/build/build-dev-docker-jobs-image.sh b/build/build-dev-docker-jobs-image.sh
new file mode 100755 (executable)
index 0000000..2e4c457
--- /dev/null
@@ -0,0 +1,80 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+read -rd "\000" helpmessage <<EOF
+Build an arvados/jobs Docker image from local git tree.
+
+Intended for use by developers working on arvados-python-client or
+arvados-cwl-runner and need to run a crunch job with a custom package
+version.  Also supports building custom cwltool if CWLTOOL is set.
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+CWLTOOL=path           (optional) Path to cwltool git repository.
+SALAD=path             (optional) Path to schema_salad git repository.
+PYCMD=pythonexec       (optional) Specify the python executable to use in the docker image. Defaults to "python".
+
+EOF
+
+set -e
+
+if [[ -z "$WORKSPACE" ]] ; then
+    export WORKSPACE=$(readlink -f $(dirname $0)/..)
+    echo "Using WORKSPACE $WORKSPACE"
+fi
+
+if [[ -z "$ARVADOS_API_HOST" || -z "$ARVADOS_API_TOKEN" ]] ; then
+    echo "$helpmessage"
+    echo
+    echo "Must set ARVADOS_API_HOST and ARVADOS_API_TOKEN"
+    exit 1
+fi
+
+cd "$WORKSPACE"
+
+py=python
+if [[ -n "$PYCMD" ]] ; then
+    py="$PYCMD" ;
+fi
+
+(cd sdk/python && python setup.py sdist)
+sdk=$(cd sdk/python/dist && ls -t arvados-python-client-*.tar.gz | head -n1)
+
+(cd sdk/cwl && python setup.py sdist)
+runner=$(cd sdk/cwl/dist && ls -t arvados-cwl-runner-*.tar.gz | head -n1)
+
+rm -rf sdk/cwl/salad_dist
+mkdir -p sdk/cwl/salad_dist
+if [[ -n "$SALAD" ]] ; then
+    (cd "$SALAD" && python setup.py sdist)
+    salad=$(cd "$SALAD/dist" && ls -t schema-salad-*.tar.gz | head -n1)
+    cp "$SALAD/dist/$salad" $WORKSPACE/sdk/cwl/salad_dist
+fi
+
+rm -rf sdk/cwl/cwltool_dist
+mkdir -p sdk/cwl/cwltool_dist
+if [[ -n "$CWLTOOL" ]] ; then
+    (cd "$CWLTOOL" && python setup.py sdist)
+    cwltool=$(cd "$CWLTOOL/dist" && ls -t cwltool-*.tar.gz | head -n1)
+    cp "$CWLTOOL/dist/$cwltool" $WORKSPACE/sdk/cwl/cwltool_dist
+fi
+
+. build/run-library.sh
+
+python_sdk_ts=$(cd sdk/python && timestamp_from_git)
+cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+
+python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)
+cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)
+
+if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
+    cwl_runner_version=$(cd sdk/python && nohash_version_from_git 1.0)
+fi
+
+docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool --build-arg pythoncmd=$py -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
+echo arv-keepdocker arvados/jobs $cwl_runner_version
+arv-keepdocker arvados/jobs $cwl_runner_version
diff --git a/build/check-copyright-notices b/build/check-copyright-notices
new file mode 100755 (executable)
index 0000000..ba08f34
--- /dev/null
@@ -0,0 +1,220 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+fix=false
+while [[ "${@}" != "" ]]
+do
+    arg=${1}
+    shift
+    case ${arg} in
+        --help)
+            cat <<EOF
+Usage: $0 [--fix] [-- git-ls-args...]
+
+Options:
+
+--fix   Insert missing copyright notices where possible.
+
+Git arguments:
+
+Arguments after "--" are passed to \`git ls-files\`; this can be used to
+restrict the set of files to check.
+
+EOF
+            exit 2
+            ;;
+        --fix)
+            fix=true
+            ;;
+        --)
+            break
+            ;;
+        *)
+            echo >&2 "Unrecognized argument '${arg}'. Try $0 --help"
+            exit 2
+            ;;
+    esac
+done
+
+fixer() {
+    want="${want}" perl -pi~ - "${1}" <<'EOF'
+BEGIN { undef $/ }
+s{^((\#\!.*?\n|\n*---\n.*?\n\.\.\.\n|<\?xml.*?>\n)\n?)?}{${2}$ENV{want}\n\n}ms
+EOF
+}
+
+IFS=$'\n' read -a ignores -r -d $'\000' <.licenseignore || true
+result=0
+
+coproc git ls-files -z ${@} </dev/null
+while read -rd $'\000' fnm
+do
+    grepAfter=2
+    grepBefore=0
+    cs=
+    cc=
+    ce=
+    fixer=
+    if [[ ! -f ${fnm} ]] || [[ -L ${fnm} ]] || [[ ! -s ${fnm} ]]
+    then
+        continue
+    fi
+
+    ignore=
+    for pattern in "${ignores[@]}"
+    do
+        if [[ ${fnm} == ${pattern} ]]
+        then
+            ignore=1
+        fi
+    done
+    if [[ ${ignore} = 1 ]]; then continue; fi
+
+    case ${fnm} in
+        Makefile | */Makefile \
+            | *.dockerfile | */Dockerfile.* | */Dockerfile | *.dockerignore \
+            | */MANIFEST.in | */fuse.conf | */gitolite.rc \
+            | *.pl | *.pm | *.PL \
+            | *.rb | *.rb.example | *.rake | *.ru \
+            | *.gemspec | */Gemfile | */Rakefile \
+            | services/login-sync/bin/* \
+            | sdk/cli/bin/* \
+            | *.py \
+            | sdk/python/bin/arv-* \
+            | sdk/cwl/bin/* \
+            | services/nodemanager/bin/* \
+            | services/fuse/bin/* \
+            | tools/crunchstat-summary/bin/* \
+            | crunch_scripts/* \
+            | *.yaml | *.yml | *.yml.example | *.cwl \
+            | *.sh | *.service \
+            | */run | */run-service | */restart-dns-server \
+            | */nodemanager/doc/*.cfg \
+            | */nodemanager/tests/fake*.cfg.template \
+            | */nginx.conf \
+            | build/build.list | *.R)
+            fixer=fixer
+            cc="#"
+            ;;
+        *.md)
+            fixer=fixer
+            cc="[//]: #"
+            ;;
+        *.rst)
+            fixer=fixer
+            cc=".."
+            ;;
+        *.erb)
+            fixer=fixer
+            cs="<%# "
+            cc=""
+            ce=" %>"
+            ;;
+        *.liquid)
+            fixer=fixer
+            cs=$'{% comment %}\n'
+            cc=""
+            ce=$'\n{% endcomment %}'
+            grepAfter=3
+            grepBefore=1
+            ;;
+        *.textile)
+            fixer=fixer
+            cs="###. "
+            cc="...."
+            ce=
+            ;;
+        *.css)
+            fixer=fixer
+            cs="/* "
+            cc=""
+            ce=" */"
+            ;;
+        *.coffee)
+            fixer=fixer
+            cs="### "
+            cc=""
+            ce=" ###"
+            ;;
+        *.go | *.scss | *.java | *.js)
+            fixer=fixer
+            cc="//"
+            ;;
+        *.sql)
+            fixer=fixer
+            cc="--"
+            ;;
+        *.html | *.svg)
+            fixer=fixer
+            cs="<!-- "
+            cc=""
+            ce=" -->"
+            ;;
+        *)
+            cc="#"
+            hashbang=$(head -n1 ${fnm})
+            if [[ ${hashbang} = "#!/bin/sh" ]] ||  [[ ${hashbang} = "#!/bin/bash" ]]
+            then
+                fixer=fixer
+            fi
+            ;;
+    esac
+    wantGPL="${cs:-${cc}${cc:+ }}Copyright (C) The Arvados Authors. All rights reserved.
+${cc}
+${cc}${cc:+ }SPDX-License-Identifier: AGPL-3.0${ce}"
+    wantApache="${cs:-${cc}${cc:+ }}Copyright (C) The Arvados Authors. All rights reserved.
+${cc}
+${cc}${cc:+ }SPDX-License-Identifier: Apache-2.0${ce}"
+    wantBYSA="${cs:-${cc}${cc:+ }}Copyright (C) The Arvados Authors. All rights reserved.
+${cc}
+${cc}${cc:+ }SPDX-License-Identifier: CC-BY-SA-3.0${ce}"
+    wantBYSAmd="[comment]: # (Copyright © The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)"
+    found=$(head -n20 "$fnm" | egrep -A${grepAfter} -B${grepBefore} 'Copyright.*All rights reserved.' || true)
+    case ${fnm} in
+        Makefile | build/* | lib/* | tools/* | apps/* | services/* | sdk/cli/bin/crunch-job)
+            want=${wantGPL}
+            ;;
+        crunch_scripts/* | docker/* | sdk/*)
+            want=${wantApache}
+            ;;
+        doc/*)
+            want=${wantBYSA}
+            ;;
+        README.md)
+            want=${wantBYSAmd}
+            ;;
+        *)
+            want=
+            ;;
+    esac
+    case "$found" in
+        "$wantGPL")
+            ;;
+        "$wantApache")
+            ;;
+        "$wantBYSA")
+            ;;
+        "$wantBYSAmd")
+            ;;
+        "")
+            if [[ -z ${found} ]] && [[ -n ${want} ]] && [[ $fix = true ]] && [[ $fixer != "" ]]
+            then
+                ${fixer} ${fnm}
+            else
+                echo "missing copyright notice: $fnm"
+                result=1
+            fi
+            ;;
+        *)
+            echo "nonstandard copyright notice: $fnm '${found}'"
+            result=1
+            ;;
+    esac
+done <&${COPROC[0]}
+exit $result
diff --git a/build/create-plot-data-from-log.sh b/build/create-plot-data-from-log.sh
new file mode 100755 (executable)
index 0000000..0741fd9
--- /dev/null
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+build=$1
+file=$2
+outputdir=$3
+
+usage() {
+    echo "./$0 build_number file_to_parse output_dir"
+    echo "this script will use the build output to generate *csv and *txt"
+    echo "for jenkins plugin plot https://github.com/jenkinsci/plot-plugin/"
+}
+
+if [ $# -ne 3 ]
+then
+    usage
+    exit 1
+fi
+
+if [ ! -e $file ]
+then
+    usage
+    echo "$file doesn't exist! exiting"
+    exit 2
+fi
+if [ ! -w $outputdir ]
+then
+    usage
+    echo "$outputdir isn't writeable! exiting"
+    exit 3
+fi
+
+#------------------------------
+## MAXLINE is the amount of lines that will read after the pattern
+## is match (the logfile could be hundred thousands lines long).
+## 1000 should be safe enough to capture all the output of the individual test
+MAXLINES=1000
+
+## TODO: check $build and $file make sense
+
+for test in \
+ test_Create_and_show_large_collection_with_manifest_text_of_20000000 \
+ test_Create,_show,_and_update_description_for_large_collection_with_manifest_text_of_100000 \
+ test_Create_one_large_collection_of_20000000_and_one_small_collection_of_10000_and_combine_them
+do
+ cleaned_test=$(echo $test | tr -d ",.:;/")
+ (zgrep -i -E -A$MAXLINES "^[A-Za-z0-9]+Test: $test" $file && echo "----") | tail -n +1 | tail --lines=+3|grep -B$MAXLINES -E "^-*$" -m1 > $outputdir/$cleaned_test-$build.txt
+ result=$?
+ if [ $result -eq 0 ]
+ then
+   echo processing  $outputdir/$cleaned_test-$build.txt creating  $outputdir/$cleaned_test.csv
+   echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed (.*) in [0-9]+ms.*$/;print "".++$line."-$1,";' | perl -p -e 's/,$//g'|tr " " "_" ) >  $outputdir/$cleaned_test.csv
+   echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed.*in ([0-9]+)ms.*$/;print "$1,";' | perl -p -e 's/,$//g' ) >>  $outputdir/$cleaned_test.csv
+   #echo URL=https://ci.curoverse.com/view/job/arvados-api-server/ws/apps/workbench/log/$cleaned_test-$build.txt/*view*/ >>  $outputdir/$test.properties
+ else
+   echo "$test was't found on $file"
+   cleaned_test=$(echo $test | tr -d ",.:;/")
+   >  $outputdir/$cleaned_test.csv
+ fi
+done
diff --git a/build/go-python-package-scripts/postinst b/build/go-python-package-scripts/postinst
new file mode 100755 (executable)
index 0000000..ab2568a
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+# Detect rpm-based systems: the exit code of the following command is zero
+# on rpm-based systems
+if /usr/bin/rpm -q -f /usr/bin/rpm >/dev/null 2>&1; then
+    # Red Hat ("%{...}" is interpolated at package build time)
+    pkg="%{name}"
+    pkgtype=rpm
+    prefix="${RPM_INSTALL_PREFIX}"
+else
+    # Debian
+    script="$(basename "${0}")"
+    pkg="${script%.postinst}"
+    pkgtype=deb
+    prefix=/usr
+fi
+
+case "${pkgtype}-${1}" in
+    deb-configure | rpm-1)
+        dest_dir="/lib/systemd/system"
+        if ! [ -d "${dest_dir}" ]; then
+            exit 0
+        fi
+
+        # Find the unit file we need to install.
+        unit_file="${pkg}.service"
+        for dir in \
+            "${prefix}/share/doc/${pkg}" \
+            "${dest_dir}"; do
+            if [ -e "${dir}/${unit_file}" ]; then
+                src_dir="${dir}"
+                break
+            fi
+        done
+        if [ -z "${src_dir}" ]; then
+            echo >&2 "WARNING: postinst script did not find ${unit_file} anywhere."
+            exit 0
+        fi
+
+        # Install/update the unit file if necessary.
+        if [ "${src_dir}" != "${dest_dir}" ]; then
+            cp "${src_dir}/${unit_file}" "${dest_dir}/" || exit 0
+        fi
+
+        # Enable service, and make sure systemd re-reads the unit
+        # file, in case we changed it.
+        if [ -e /run/systemd/system ]; then
+            systemctl daemon-reload || true
+            eval "$(systemctl -p UnitFileState show "${pkg}")"
+            case "${UnitFileState}" in
+                disabled)
+                    # Failing to enable or start the service is not a
+                    # package error, so don't let errors here
+                    # propagate up.
+                    systemctl enable "${pkg}" || true
+                    systemctl start "${pkg}" || true
+                    ;;
+                enabled)
+                    systemctl reload-or-try-restart "${pkg}" || true
+                    ;;
+            esac
+        fi
+        ;;
+esac
diff --git a/build/go-python-package-scripts/prerm b/build/go-python-package-scripts/prerm
new file mode 100755 (executable)
index 0000000..c0f45d6
--- /dev/null
@@ -0,0 +1,35 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+# Detect rpm-based systems: the exit code of the following command is zero
+# on rpm-based systems
+if /usr/bin/rpm -q -f /usr/bin/rpm >/dev/null 2>&1; then
+    # Red Hat ("%{...}" is interpolated at package build time)
+    pkg="%{name}"
+    pkgtype=rpm
+    prefix="${RPM_INSTALL_PREFIX}"
+else
+    # Debian
+    script="$(basename "${0}")"
+    pkg="${script%.prerm}"
+    pkgtype=deb
+    prefix=/usr
+fi
+
+case "${pkgtype}-${1}" in
+    deb-remove | rpm-0)
+        if [ -e /run/systemd/system ]; then
+            systemctl stop "${pkg}" || true
+            systemctl disable "${pkg}" || true
+        fi
+        if [ -e "${prefix}/share/doc/${pkg}/${pkg}.service" ]; then
+            # Unit files from Python packages get installed by
+            # postinst so we have to remove them explicitly here.
+            rm "/lib/systemd/system/${pkg}/${pkg}.service" || true
+        fi
+        ;;
+esac
diff --git a/build/libcloud-pin.sh b/build/libcloud-pin.sh
new file mode 100644 (file)
index 0000000..65e9be5
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+LIBCLOUD_PIN=2.3.1.dev2
+
+using_fork=true
+if [[ $using_fork = true ]]; then
+    LIBCLOUD_PIN_SRC="https://github.com/curoverse/libcloud/archive/apache-libcloud-$LIBCLOUD_PIN.zip"
+else
+    LIBCLOUD_PIN_SRC=""
+fi
diff --git a/build/package-build-dockerfiles/.gitignore b/build/package-build-dockerfiles/.gitignore
new file mode 100644 (file)
index 0000000..ceee9fa
--- /dev/null
@@ -0,0 +1,2 @@
+*/generated
+common-generated/
diff --git a/build/package-build-dockerfiles/Makefile b/build/package-build-dockerfiles/Makefile
new file mode 100644 (file)
index 0000000..5232050
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+all: centos7/generated debian8/generated debian9/generated ubuntu1404/generated ubuntu1604/generated ubuntu1804/generated
+
+centos7/generated: common-generated-all
+       test -d centos7/generated || mkdir centos7/generated
+       cp -rlt centos7/generated common-generated/*
+
+debian8/generated: common-generated-all
+       test -d debian8/generated || mkdir debian8/generated
+       cp -rlt debian8/generated common-generated/*
+
+debian9/generated: common-generated-all
+       test -d debian9/generated || mkdir debian9/generated
+       cp -rlt debian9/generated common-generated/*
+
+ubuntu1404/generated: common-generated-all
+       test -d ubuntu1404/generated || mkdir ubuntu1404/generated
+       cp -rlt ubuntu1404/generated common-generated/*
+
+ubuntu1604/generated: common-generated-all
+       test -d ubuntu1604/generated || mkdir ubuntu1604/generated
+       cp -rlt ubuntu1604/generated common-generated/*
+
+ubuntu1804/generated: common-generated-all
+       test -d ubuntu1804/generated || mkdir ubuntu1804/generated
+       cp -rlt ubuntu1804/generated common-generated/*
+
+GOTARBALL=go1.10.1.linux-amd64.tar.gz
+NODETARBALL=node-v6.11.2-linux-x64.tar.xz
+RVMKEY1=mpapis.asc
+RVMKEY2=pkuczynski.asc
+
+common-generated-all: common-generated/$(GOTARBALL) common-generated/$(NODETARBALL) common-generated/$(RVMKEY1) common-generated/$(RVMKEY2)
+
+common-generated/$(GOTARBALL): common-generated
+       wget -cqO common-generated/$(GOTARBALL) http://storage.googleapis.com/golang/$(GOTARBALL)
+
+common-generated/$(NODETARBALL): common-generated
+       wget -cqO common-generated/$(NODETARBALL) https://nodejs.org/dist/v6.11.2/$(NODETARBALL)
+
+common-generated/$(RVMKEY1): common-generated
+       wget -cqO common-generated/$(RVMKEY1) https://rvm.io/mpapis.asc
+
+common-generated/$(RVMKEY2): common-generated
+       wget -cqO common-generated/$(RVMKEY2) https://rvm.io/pkuczynski.asc
+
+common-generated:
+       mkdir common-generated
diff --git a/build/package-build-dockerfiles/README b/build/package-build-dockerfiles/README
new file mode 100644 (file)
index 0000000..0dfab94
--- /dev/null
@@ -0,0 +1,13 @@
+==================
+DOCKER IMAGE BUILD
+==================
+
+1. `make`
+2. `cd DISTRO`
+3. `docker build -t arvados/build:DISTRO .`
+
+==============
+BUILD PACKAGES
+==============
+
+`docker run -v /path/to/your/arvados-dev/jenkins:/jenkins -v /path/to/your/arvados:/arvados arvados/build:DISTRO`
diff --git a/build/package-build-dockerfiles/build-all-build-containers.sh b/build/package-build-dockerfiles/build-all-build-containers.sh
new file mode 100755 (executable)
index 0000000..0d37859
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+make
+
+for target in `find -maxdepth 1 -type d |grep -v generated`; do
+  if [[ "$target" == "." ]]; then
+    continue
+  fi
+  target=${target#./}
+  echo $target
+  cd $target
+  docker build -t arvados/build:$target .
+  cd ..
+done
+
+
diff --git a/build/package-build-dockerfiles/centos7/Dockerfile b/build/package-build-dockerfiles/centos7/Dockerfile
new file mode 100644 (file)
index 0000000..ad6f4e1
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM centos:7
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+# Install dependencies.
+RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git python-virtualenv wget
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+# Need to "touch" RPM database to workaround bug in interaction between
+# overlayfs and yum (https://bugzilla.redhat.com/show_bug.cgi?id=1213602)
+RUN touch /var/lib/rpm/* && yum -q -y install rh-python35
+RUN scl enable rh-python35 "easy_install-3.5 pip" && easy_install-2.7 pip
+
+# Add epel, we need it for the python-pam dependency
+RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+RUN rpm -ivh epel-release-latest-7.noarch.rpm
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+# The version of setuptools that comes with CentOS is way too old
+RUN pip install --upgrade setuptools
+
+ENV WORKSPACE /arvados
+CMD ["scl", "enable", "rh-python35", "/usr/local/rvm/bin/rvm-exec default bash /jenkins/run-build-packages.sh --target centos7"]
diff --git a/build/package-build-dockerfiles/debian8/Dockerfile b/build/package-build-dockerfiles/debian8/Dockerfile
new file mode 100644 (file)
index 0000000..3f591cd
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM debian:jessie
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev
+
+# Install virtualenv
+RUN /usr/bin/pip install virtualenv
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian8"]
diff --git a/build/package-build-dockerfiles/debian9/Dockerfile b/build/package-build-dockerfiles/debian9/Dockerfile
new file mode 100644 (file)
index 0000000..6f7f3fa
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+## dont use debian:9 here since the word 'stretch' is used for rvm precompiled binaries
+FROM debian:stretch
+MAINTAINER Nico Cesar <nico@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev
+
+# Install virtualenv
+RUN /usr/bin/pip install virtualenv
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian9"]
diff --git a/build/package-build-dockerfiles/ubuntu1404/Dockerfile b/build/package-build-dockerfiles/ubuntu1404/Dockerfile
new file mode 100644 (file)
index 0000000..4c01c9e
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:trusty
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip python3.4-venv python3.4-dev
+
+# Install virtualenv
+RUN /usr/bin/pip install virtualenv
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1404"]
diff --git a/build/package-build-dockerfiles/ubuntu1604/Dockerfile b/build/package-build-dockerfiles/ubuntu1604/Dockerfile
new file mode 100644 (file)
index 0000000..a83fc77
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:xenial
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev
+
+# Install virtualenv
+RUN /usr/bin/pip install virtualenv
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1604"]
diff --git a/build/package-build-dockerfiles/ubuntu1804/Dockerfile b/build/package-build-dockerfiles/ubuntu1804/Dockerfile
new file mode 100644 (file)
index 0000000..d0a0999
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:bionic
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev
+
+# Install virtualenv
+RUN /usr/bin/pip install virtualenv
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1804"]
diff --git a/build/package-test-dockerfiles/.gitignore b/build/package-test-dockerfiles/.gitignore
new file mode 100644 (file)
index 0000000..ceee9fa
--- /dev/null
@@ -0,0 +1,2 @@
+*/generated
+common-generated/
diff --git a/build/package-test-dockerfiles/Makefile b/build/package-test-dockerfiles/Makefile
new file mode 100644 (file)
index 0000000..c6d5a15
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+all: centos7/generated debian8/generated debian9/generated ubuntu1404/generated ubuntu1604/generated ubuntu1804/generated
+
+centos7/generated: common-generated-all
+       test -d centos7/generated || mkdir centos7/generated
+       cp -rlt centos7/generated common-generated/*
+
+debian8/generated: common-generated-all
+       test -d debian8/generated || mkdir debian8/generated
+       cp -rlt debian8/generated common-generated/*
+
+debian9/generated: common-generated-all
+       test -d debian9/generated || mkdir debian9/generated
+       cp -rlt debian9/generated common-generated/*
+
+ubuntu1404/generated: common-generated-all
+       test -d ubuntu1404/generated || mkdir ubuntu1404/generated
+       cp -rlt ubuntu1404/generated common-generated/*
+
+ubuntu1604/generated: common-generated-all
+       test -d ubuntu1604/generated || mkdir ubuntu1604/generated
+       cp -rlt ubuntu1604/generated common-generated/*
+
+ubuntu1804/generated: common-generated-all
+       test -d ubuntu1804/generated || mkdir ubuntu1804/generated
+       cp -rlt ubuntu1804/generated common-generated/*
+
+RVMKEY1=mpapis.asc
+RVMKEY2=pkuczynski.asc
+
+common-generated-all: common-generated/$(RVMKEY1) common-generated/$(RVMKEY2)
+
+common-generated/$(RVMKEY1): common-generated
+       wget -cqO common-generated/$(RVMKEY1) https://rvm.io/mpapis.asc
+
+common-generated/$(RVMKEY2): common-generated
+       wget -cqO common-generated/$(RVMKEY2) https://rvm.io/pkuczynski.asc
+
+common-generated:
+       mkdir common-generated
diff --git a/build/package-test-dockerfiles/README b/build/package-test-dockerfiles/README
new file mode 100644 (file)
index 0000000..f938d42
--- /dev/null
@@ -0,0 +1,7 @@
+==================
+DOCKER IMAGE BUILD
+==================
+
+1. `make`
+2. `cd DISTRO`
+3. `docker build -t arvados/build:DISTRO .`
diff --git a/build/package-test-dockerfiles/centos7/Dockerfile b/build/package-test-dockerfiles/centos7/Dockerfile
new file mode 100644 (file)
index 0000000..0bfe80b
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM centos:7
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+# Install dependencies.
+RUN yum -q -y install scl-utils centos-release-scl which tar wget
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN touch /var/lib/rpm/* && \
+    gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# Add epel, we need it for the python-pam dependency
+RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+RUN rpm -ivh epel-release-latest-7.noarch.rpm
+
+COPY localrepo.repo /etc/yum.repos.d/localrepo.repo
diff --git a/build/package-test-dockerfiles/centos7/localrepo.repo b/build/package-test-dockerfiles/centos7/localrepo.repo
new file mode 100644 (file)
index 0000000..ebb8765
--- /dev/null
@@ -0,0 +1,5 @@
+[localrepo]
+name=Arvados Test
+baseurl=file:///arvados/packages/centos7
+gpgcheck=0
+enabled=1
diff --git a/build/package-test-dockerfiles/debian8/Dockerfile b/build/package-test-dockerfiles/debian8/Dockerfile
new file mode 100644 (file)
index 0000000..2168f72
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM debian:8
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get -y install --no-install-recommends curl ca-certificates
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/debian8/ /" >>/etc/apt/sources.list
diff --git a/build/package-test-dockerfiles/debian9/Dockerfile b/build/package-test-dockerfiles/debian9/Dockerfile
new file mode 100644 (file)
index 0000000..9c46ef6
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM debian:stretch
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get -y install --no-install-recommends curl ca-certificates gpg procps
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/debian9/ /" >>/etc/apt/sources.list
diff --git a/build/package-test-dockerfiles/ubuntu1404/Dockerfile b/build/package-test-dockerfiles/ubuntu1404/Dockerfile
new file mode 100644 (file)
index 0000000..c05dbee
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:trusty
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get -y install --no-install-recommends curl ca-certificates python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip binutils build-essential ca-certificates
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/ubuntu1404/ /" >>/etc/apt/sources.list
diff --git a/build/package-test-dockerfiles/ubuntu1604/Dockerfile b/build/package-test-dockerfiles/ubuntu1604/Dockerfile
new file mode 100644 (file)
index 0000000..615ab1c
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:xenial
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get -y install --no-install-recommends curl ca-certificates
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/ubuntu1604/ /" >>/etc/apt/sources.list
+
+# Add preferences file for the Arvados packages. This pins Arvados
+# packages at priority 501, so that older python dependency versions
+# are preferred in those cases where we need them
+ADD etc-apt-preferences.d-arvados /etc/apt/preferences.d/arvados
diff --git a/build/package-test-dockerfiles/ubuntu1604/etc-apt-preferences.d-arvados b/build/package-test-dockerfiles/ubuntu1604/etc-apt-preferences.d-arvados
new file mode 100644 (file)
index 0000000..9e24695
--- /dev/null
@@ -0,0 +1,3 @@
+Package: *
+Pin: release o=Arvados
+Pin-Priority: 501
diff --git a/build/package-test-dockerfiles/ubuntu1804/Dockerfile b/build/package-test-dockerfiles/ubuntu1804/Dockerfile
new file mode 100644 (file)
index 0000000..d530d22
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:bionic
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get -y install --no-install-recommends curl ca-certificates gnupg2
+
+# Install RVM
+ADD generated/mpapis.asc /tmp/
+ADD generated/pkuczynski.asc /tmp/
+RUN gpg --import --no-tty /tmp/mpapis.asc && \
+    gpg --import --no-tty /tmp/pkuczynski.asc && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.3 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb [trusted=yes] file:///arvados/packages/ubuntu1804/ /" >>/etc/apt/sources.list
+
+# Add preferences file for the Arvados packages. This pins Arvados
+# packages at priority 501, so that older python dependency versions
+# are preferred in those cases where we need them
+ADD etc-apt-preferences.d-arvados /etc/apt/preferences.d/arvados
diff --git a/build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados b/build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados
new file mode 100644 (file)
index 0000000..9e24695
--- /dev/null
@@ -0,0 +1,3 @@
+Package: *
+Pin: release o=Arvados
+Pin-Priority: 501
diff --git a/build/package-testing/common-test-packages.sh b/build/package-testing/common-test-packages.sh
new file mode 100755 (executable)
index 0000000..ad356f2
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -eu
+
+FAIL=0
+
+echo
+
+while read so && [ -n "$so" ]; do
+    if ldd "$so" | grep "not found" ; then
+        echo "^^^ Missing while scanning $so ^^^"
+        FAIL=1
+    fi
+done <<EOF
+$(find -name '*.so')
+EOF
+
+if test -x "/jenkins/package-testing/test-package-$1.sh" ; then
+    if ! "/jenkins/package-testing/test-package-$1.sh" ; then
+       FAIL=1
+    fi
+fi
+
+if test $FAIL = 0 ; then
+   echo "Package $1 passed"
+fi
+
+exit $FAIL
diff --git a/build/package-testing/common-test-rails-server-package.sh b/build/package-testing/common-test-rails-server-package.sh
new file mode 100755 (executable)
index 0000000..6a7097c
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+if [ 0 = "$#" ]; then
+    PACKAGE_NAME="$(basename "$0" | grep -Eo '\barvados.*$')"
+    PACKAGE_NAME=${PACKAGE_NAME%.sh}
+else
+    PACKAGE_NAME=$1; shift
+fi
+
+cd "/var/www/${PACKAGE_NAME%-server}/current"
+
+case "$TARGET" in
+    debian*|ubuntu*)
+        apt-get install -y nginx
+        dpkg-reconfigure "$PACKAGE_NAME"
+        ;;
+    centos*)
+        yum install --assumeyes httpd
+        yum reinstall --assumeyes "$PACKAGE_NAME"
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+/usr/local/rvm/bin/rvm-exec default bundle list >"$ARV_PACKAGES_DIR/$PACKAGE_NAME.gems"
diff --git a/build/package-testing/deb-common-test-packages.sh b/build/package-testing/deb-common-test-packages.sh
new file mode 100755 (executable)
index 0000000..77017ba
--- /dev/null
@@ -0,0 +1,59 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+set -eu
+
+# Set up
+DEBUG=${ARVADOS_DEBUG:-0}
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQQ_UNLESS_DEBUG=-qq
+if [[ "$DEBUG" != "0" ]]; then
+  STDOUT_IF_DEBUG=/dev/stdout
+  STDERR_IF_DEBUG=/dev/stderr
+  DASHQQ_UNLESS_DEBUG=
+fi
+
+# Multiple .deb based distros symlink to this script, so extract the target
+# from the invocation path.
+target=$(echo $0 | sed 's/.*test-packages-\([^.]*\)\.sh.*/\1/')
+
+export ARV_PACKAGES_DIR="/arvados/packages/$target"
+
+dpkg-query --show > "$ARV_PACKAGES_DIR/$1.before"
+
+apt-get $DASHQQ_UNLESS_DEBUG update
+
+apt-get $DASHQQ_UNLESS_DEBUG -y --allow-unauthenticated install "$1" >"$STDOUT_IF_DEBUG" 2>"$STDERR_IF_DEBUG"
+
+dpkg-query --show > "$ARV_PACKAGES_DIR/$1.after"
+
+set +e
+diff "$ARV_PACKAGES_DIR/$1.before" "$ARV_PACKAGES_DIR/$1.after" > "$ARV_PACKAGES_DIR/$1.diff"
+set -e
+
+mkdir -p /tmp/opts
+cd /tmp/opts
+
+export ARV_PACKAGES_DIR="/arvados/packages/$target"
+
+if [[ -f $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb 2>/dev/null | head -n1) ]] ; then
+    debpkg=$(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1)
+else
+    debpkg=$(ls -t "$ARV_PACKAGES_DIR/processed/$1"_*.deb | head -n1)
+fi
+
+dpkg-deb -x $debpkg .
+
+if [[ "$DEBUG" != "0" ]]; then
+  while read so && [ -n "$so" ]; do
+      echo
+      echo "== Packages dependencies for $so =="
+      ldd "$so" | awk '($3 ~ /^\//){print $3}' | sort -u | xargs dpkg -S | cut -d: -f1 | sort -u
+  done <<EOF
+$(find -name '*.so')
+EOF
+fi
+
+exec /jenkins/package-testing/common-test-packages.sh "$1"
diff --git a/build/package-testing/rpm-common-test-packages.sh b/build/package-testing/rpm-common-test-packages.sh
new file mode 100755 (executable)
index 0000000..12450dd
--- /dev/null
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -eu
+
+# Set up
+DEBUG=${ARVADOS_DEBUG:-0}
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+if [[ "$DEBUG" != "0" ]]; then
+  STDOUT_IF_DEBUG=/dev/stdout
+  STDERR_IF_DEBUG=/dev/stderr
+fi
+
+target=$(basename "$0" | grep -Eo '\bcentos[[:digit:]]+\b')
+
+yum -q clean all
+touch /var/lib/rpm/*
+
+export ARV_PACKAGES_DIR="/arvados/packages/$target"
+
+rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.before"
+
+yum install --assumeyes -e 0 $1
+
+rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.after"
+
+diff "$ARV_PACKAGES_DIR/$1".{before,after} >"$ARV_PACKAGES_DIR/$1.diff" || true
+
+# Enable any Software Collections that the package depended on.
+if [[ -d /opt/rh ]]; then
+    # We have to stage the list to a file, because `ls | while read` would
+    # make a subshell, causing the `source` lines to have no effect.
+    scl_list=$(mktemp)
+    ls /opt/rh >"$scl_list"
+
+    # SCL scripts aren't designed to run with -eu.
+    set +eu
+    while read scl; do
+        source scl_source enable "$scl"
+    done <"$scl_list"
+    set -eu
+    rm "$scl_list"
+fi
+
+mkdir -p /tmp/opts
+cd /tmp/opts
+
+rpm2cpio $(ls -t "$ARV_PACKAGES_DIR/$1"-*.rpm | head -n1) | cpio -idm 2>/dev/null
+
+if [[ "$DEBUG" != "0" ]]; then
+  find -name '*.so' | while read so; do
+      echo -e "\n== Packages dependencies for $so =="
+      ldd "$so" \
+          | awk '($3 ~ /^\//){print $3}' | sort -u | xargs rpm -qf | sort -u
+  done
+fi
+
+exec /jenkins/package-testing/common-test-packages.sh "$1"
diff --git a/build/package-testing/test-package-arvados-api-server.sh b/build/package-testing/test-package-arvados-api-server.sh
new file mode 120000 (symlink)
index 0000000..8bc03bc
--- /dev/null
@@ -0,0 +1 @@
+common-test-rails-server-package.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-arvados-node-manager.sh b/build/package-testing/test-package-arvados-node-manager.sh
new file mode 100755 (executable)
index 0000000..9300f4c
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+arvados-node-manager --version
+
+exec /usr/share/python2.7/dist/arvados-node-manager/bin/python2.7 <<EOF
+import libcloud.compute.types
+import libcloud.compute.providers
+libcloud.compute.providers.get_driver(libcloud.compute.types.Provider.AZURE_ARM)
+print "Successfully imported compatible libcloud library"
+EOF
diff --git a/build/package-testing/test-package-arvados-sso-server.sh b/build/package-testing/test-package-arvados-sso-server.sh
new file mode 120000 (symlink)
index 0000000..8bc03bc
--- /dev/null
@@ -0,0 +1 @@
+common-test-rails-server-package.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-arvados-workbench.sh b/build/package-testing/test-package-arvados-workbench.sh
new file mode 120000 (symlink)
index 0000000..8bc03bc
--- /dev/null
@@ -0,0 +1 @@
+common-test-rails-server-package.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-python-arvados-cwl-runner.sh b/build/package-testing/test-package-python-arvados-cwl-runner.sh
new file mode 120000 (symlink)
index 0000000..61e61b1
--- /dev/null
@@ -0,0 +1 @@
+test-package-python27-python-arvados-cwl-runner.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-python-arvados-fuse.sh b/build/package-testing/test-package-python-arvados-fuse.sh
new file mode 120000 (symlink)
index 0000000..3b9232c
--- /dev/null
@@ -0,0 +1 @@
+test-package-python27-python-arvados-fuse.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-python-arvados-python-client.sh b/build/package-testing/test-package-python-arvados-python-client.sh
new file mode 120000 (symlink)
index 0000000..8a4d0ea
--- /dev/null
@@ -0,0 +1 @@
+test-package-python27-python-arvados-python-client.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-python-cwltest.sh b/build/package-testing/test-package-python-cwltest.sh
new file mode 120000 (symlink)
index 0000000..9b6545b
--- /dev/null
@@ -0,0 +1 @@
+test-package-python27-python-cwltest.sh
\ No newline at end of file
diff --git a/build/package-testing/test-package-python27-python-arvados-cwl-runner.sh b/build/package-testing/test-package-python27-python-arvados-cwl-runner.sh
new file mode 100755 (executable)
index 0000000..99327c0
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+arvados-cwl-runner --version
diff --git a/build/package-testing/test-package-python27-python-arvados-fuse.sh b/build/package-testing/test-package-python27-python-arvados-fuse.sh
new file mode 100755 (executable)
index 0000000..8192985
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+arv-mount --version
diff --git a/build/package-testing/test-package-python27-python-arvados-python-client.sh b/build/package-testing/test-package-python27-python-arvados-python-client.sh
new file mode 100755 (executable)
index 0000000..2c92a3e
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+arv-put --version
+
+/usr/share/python2.7/dist/python-arvados-python-client/bin/python2.7 << EOF
+import arvados
+print "Successfully imported arvados"
+EOF
diff --git a/build/package-testing/test-package-python27-python-cwltest.sh b/build/package-testing/test-package-python27-python-cwltest.sh
new file mode 100755 (executable)
index 0000000..395cefc
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec python <<EOF
+import cwltest
+EOF
diff --git a/build/package-testing/test-packages-centos7.sh b/build/package-testing/test-packages-centos7.sh
new file mode 120000 (symlink)
index 0000000..64ef604
--- /dev/null
@@ -0,0 +1 @@
+rpm-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-debian8.sh b/build/package-testing/test-packages-debian8.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-debian9.sh b/build/package-testing/test-packages-debian9.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-ubuntu1404.sh b/build/package-testing/test-packages-ubuntu1404.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-ubuntu1604.sh b/build/package-testing/test-packages-ubuntu1604.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-ubuntu1804.sh b/build/package-testing/test-packages-ubuntu1804.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/rails-package-scripts/README.md b/build/rails-package-scripts/README.md
new file mode 100644 (file)
index 0000000..0d720bd
--- /dev/null
@@ -0,0 +1,18 @@
+[//]: # Copyright (C) The Arvados Authors. All rights reserved.
+[//]: #
+[//]: # SPDX-License-Identifier: AGPL-3.0
+
+When run-build-packages.sh builds a Rails package, it generates the package's pre/post-inst/rm scripts by concatenating:
+
+1. package_name.sh, which defines variables about where package files live and some human-readable names about them.
+2. step2.sh, which uses those to define some utility variables and set defaults for things that aren't set.
+3. stepname.sh, like postinst.sh, prerm.sh, etc., which uses all this information to do the actual work.
+
+Since our build process is a tower of shell scripts, concatenating files seemed like the least worst option to share code between these files and packages.  More advanced code generation would've been too much trouble to integrate into our build process at this time.  Trying to inject portions of files into other files seemed error-prone and likely to introduce bugs to the end result.
+
+postinst.sh lets the early parts define a few hooks to control behavior:
+
+* After it installs the core configuration files (database.yml, application.yml, and production.rb) to /etc/arvados/server, it calls setup_extra_conffiles.  By default this is a noop function (in step2.sh).  API server defines this to set up the old omniauth.rb conffile.
+* Before it restarts nginx, it calls setup_before_nginx_restart.  By default this is a noop function (in step2.sh).  API server defines this to set up the internal git repository, if necessary.
+* $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database.  API server uses db:structure:load.  SSO server uses db:schema:load.  Workbench doesn't set this, which causes the postinst to skip all database work.
+* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task.  SSO clears this flag (it doesn't have that task code).
diff --git a/build/rails-package-scripts/arvados-api-server.sh b/build/rails-package-scripts/arvados-api-server.sh
new file mode 100644 (file)
index 0000000..6d11ea8
--- /dev/null
@@ -0,0 +1,36 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file declares variables common to all scripts for one Rails package.
+
+PACKAGE_NAME=arvados-api-server
+INSTALL_PATH=/var/www/arvados-api
+CONFIG_PATH=/etc/arvados/api
+DOC_URL="http://doc.arvados.org/install/install-api-server.html#configure"
+
+RAILSPKG_DATABASE_LOAD_TASK=db:structure:load
+setup_extra_conffiles() {
+    setup_conffile initializers/omniauth.rb
+}
+
+setup_before_nginx_restart() {
+  # initialize git_internal_dir
+  # usually /var/lib/arvados/internal.git (set in application.default.yml )
+  if [ "$APPLICATION_READY" = "1" ]; then
+      GIT_INTERNAL_DIR=$($COMMAND_PREFIX bundle exec rake config:check 2>&1 | grep git_internal_dir | awk '{ print $2 }')
+      if [ ! -e "$GIT_INTERNAL_DIR" ]; then
+        run_and_report "Creating git_internal_dir '$GIT_INTERNAL_DIR'" \
+          mkdir -p "$GIT_INTERNAL_DIR"
+        run_and_report "Initializing git_internal_dir '$GIT_INTERNAL_DIR'" \
+          git init --quiet --bare $GIT_INTERNAL_DIR
+      else
+        echo "Initializing git_internal_dir $GIT_INTERNAL_DIR: directory exists, skipped."
+      fi
+      run_and_report "Making sure '$GIT_INTERNAL_DIR' has the right permission" \
+         chown -R "$WWW_OWNER:" "$GIT_INTERNAL_DIR"
+  else
+      echo "Initializing git_internal_dir... skipped."
+  fi
+}
diff --git a/build/rails-package-scripts/arvados-sso-server.sh b/build/rails-package-scripts/arvados-sso-server.sh
new file mode 100644 (file)
index 0000000..fff582b
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file declares variables common to all scripts for one Rails package.
+
+PACKAGE_NAME=arvados-sso-server
+INSTALL_PATH=/var/www/arvados-sso
+CONFIG_PATH=/etc/arvados/sso
+DOC_URL="http://doc.arvados.org/install/install-sso.html#configure"
+RAILSPKG_DATABASE_LOAD_TASK=db:schema:load
+RAILSPKG_SUPPORTS_CONFIG_CHECK=0
diff --git a/build/rails-package-scripts/arvados-workbench.sh b/build/rails-package-scripts/arvados-workbench.sh
new file mode 100644 (file)
index 0000000..878c137
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file declares variables common to all scripts for one Rails package.
+
+PACKAGE_NAME=arvados-workbench
+INSTALL_PATH=/var/www/arvados-workbench
+CONFIG_PATH=/etc/arvados/workbench
+DOC_URL="http://doc.arvados.org/install/install-workbench-app.html#configure"
diff --git a/build/rails-package-scripts/postinst.sh b/build/rails-package-scripts/postinst.sh
new file mode 100644 (file)
index 0000000..789a7ee
--- /dev/null
@@ -0,0 +1,262 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This code runs after package variable definitions and step2.sh.
+
+set -e
+
+DATABASE_READY=1
+APPLICATION_READY=1
+
+if [ -s "$HOME/.rvm/scripts/rvm" ] || [ -s "/usr/local/rvm/scripts/rvm" ]; then
+    COMMAND_PREFIX="/usr/local/rvm/bin/rvm-exec default"
+else
+    COMMAND_PREFIX=
+fi
+
+report_not_ready() {
+    local ready_flag="$1"; shift
+    local config_file="$1"; shift
+    if [ "1" != "$ready_flag" ]; then cat >&2 <<EOF
+
+PLEASE NOTE:
+
+The $PACKAGE_NAME package was not configured completely because
+$config_file needs some tweaking.
+Please refer to the documentation at
+<$DOC_URL> for more details.
+
+When $(basename "$config_file") has been modified,
+reconfigure or reinstall this package.
+
+EOF
+    fi
+}
+
+report_web_service_warning() {
+    local warning="$1"; shift
+    cat >&2 <<EOF
+
+WARNING: $warning.
+
+To override, set the WEB_SERVICE environment variable to the name of the service
+hosting the Rails server.
+
+For Debian-based systems, then reconfigure this package with dpkg-reconfigure.
+
+For RPM-based systems, then reinstall this package.
+
+EOF
+}
+
+run_and_report() {
+    # Usage: run_and_report ACTION_MSG CMD
+    # This is the usual wrapper that prints ACTION_MSG, runs CMD, then writes
+    # a message about whether CMD succeeded or failed.  Returns the exit code
+    # of CMD.
+    local action_message="$1"; shift
+    local retcode=0
+    echo -n "$action_message..."
+    if "$@"; then
+        echo " done."
+    else
+        retcode=$?
+        echo " failed."
+    fi
+    return $retcode
+}
+
+setup_confdirs() {
+    for confdir in "$@"; do
+        if [ ! -d "$confdir" ]; then
+            install -d -g "$WWW_OWNER" -m 0750 "$confdir"
+        fi
+    done
+}
+
+setup_conffile() {
+    # Usage: setup_conffile CONFFILE_PATH [SOURCE_PATH]
+    # Both paths are relative to RELEASE_CONFIG_PATH.
+    # This function will try to safely ensure that a symbolic link for
+    # the configuration file points from RELEASE_CONFIG_PATH to CONFIG_PATH.
+    # If SOURCE_PATH is given, this function will try to install that file as
+    # the configuration file in CONFIG_PATH, and return 1 if the file in
+    # CONFIG_PATH is unmodified from the source.
+    local conffile_relpath="$1"; shift
+    local conffile_source="$1"
+    local release_conffile="$RELEASE_CONFIG_PATH/$conffile_relpath"
+    local etc_conffile="$CONFIG_PATH/$(basename "$conffile_relpath")"
+
+    # Note that -h can return true and -e will return false simultaneously
+    # when the target is a dangling symlink.  We're okay with that outcome,
+    # so check -h first.
+    if [ ! -h "$release_conffile" ]; then
+        if [ ! -e "$release_conffile" ]; then
+            ln -s "$etc_conffile" "$release_conffile"
+        # If there's a config file in /var/www identical to the one in /etc,
+        # overwrite it with a symlink after porting its permissions.
+        elif cmp --quiet "$release_conffile" "$etc_conffile"; then
+            local ownership="$(stat -c "%u:%g" "$release_conffile")"
+            local owning_group="${ownership#*:}"
+            if [ 0 != "$owning_group" ]; then
+                chgrp "$owning_group" "$CONFIG_PATH" /etc/arvados
+            fi
+            chown "$ownership" "$etc_conffile"
+            chmod --reference="$release_conffile" "$etc_conffile"
+            ln --force -s "$etc_conffile" "$release_conffile"
+        fi
+    fi
+
+    if [ -n "$conffile_source" ]; then
+        if [ ! -e "$etc_conffile" ]; then
+            install -g "$WWW_OWNER" -m 0640 \
+                    "$RELEASE_CONFIG_PATH/$conffile_source" "$etc_conffile"
+            return 1
+        # Even if $etc_conffile already existed, it might be unmodified from
+        # the source.  This is especially likely when a user installs, updates
+        # database.yml, then reconfigures before they update application.yml.
+        # Use cmp to be sure whether $etc_conffile is modified.
+        elif cmp --quiet "$RELEASE_CONFIG_PATH/$conffile_source" "$etc_conffile"; then
+            return 1
+        fi
+    fi
+}
+
+prepare_database() {
+  DB_MIGRATE_STATUS=`$COMMAND_PREFIX bundle exec rake db:migrate:status 2>&1 || true`
+  if echo "$DB_MIGRATE_STATUS" | grep -qF 'Schema migrations table does not exist yet.'; then
+      # The database exists, but the migrations table doesn't.
+      run_and_report "Setting up database" $COMMAND_PREFIX bundle exec \
+                     rake "$RAILSPKG_DATABASE_LOAD_TASK" db:seed
+  elif echo "$DB_MIGRATE_STATUS" | grep -q '^database: '; then
+      run_and_report "Running db:migrate" \
+                     $COMMAND_PREFIX bundle exec rake db:migrate
+  elif echo "$DB_MIGRATE_STATUS" | grep -q 'database .* does not exist'; then
+      if ! run_and_report "Running db:setup" \
+           $COMMAND_PREFIX bundle exec rake db:setup 2>/dev/null; then
+          echo "Warning: unable to set up database." >&2
+          DATABASE_READY=0
+      fi
+  else
+    echo "Warning: Database is not ready to set up. Skipping database setup." >&2
+    DATABASE_READY=0
+  fi
+}
+
+configure_version() {
+  if [ -n "$WEB_SERVICE" ]; then
+      SERVICE_MANAGER=$(guess_service_manager)
+  elif WEB_SERVICE=$(list_services_systemd | grep -E '^(nginx|httpd)'); then
+      SERVICE_MANAGER=systemd
+  elif WEB_SERVICE=$(list_services_service \
+                         | grep -Eo '\b(nginx|httpd)[^[:space:]]*'); then
+      SERVICE_MANAGER=service
+  fi
+
+  if [ -z "$WEB_SERVICE" ]; then
+    report_web_service_warning "Web service (Nginx or Apache) not found"
+  elif [ "$WEB_SERVICE" != "$(echo "$WEB_SERVICE" | head -n 1)" ]; then
+    WEB_SERVICE=$(echo "$WEB_SERVICE" | head -n 1)
+    report_web_service_warning \
+        "Multiple web services found.  Choosing the first one ($WEB_SERVICE)"
+  fi
+
+  if [ -e /etc/redhat-release ]; then
+      # Recognize any service that starts with "nginx"; e.g., nginx16.
+      if [ "$WEB_SERVICE" != "${WEB_SERVICE#nginx}" ]; then
+        WWW_OWNER=nginx
+      else
+        WWW_OWNER=apache
+      fi
+  else
+      # Assume we're on a Debian-based system for now.
+      # Both Apache and Nginx run as www-data by default.
+      WWW_OWNER=www-data
+  fi
+
+  echo
+  echo "Assumption: $WEB_SERVICE is configured to serve Rails from"
+  echo "            $RELEASE_PATH"
+  echo "Assumption: $WEB_SERVICE and passenger run as $WWW_OWNER"
+  echo
+
+  echo -n "Creating symlinks to configuration in $CONFIG_PATH ..."
+  setup_confdirs /etc/arvados "$CONFIG_PATH"
+  setup_conffile environments/production.rb environments/production.rb.example \
+      || true
+  setup_conffile application.yml application.yml.example || APPLICATION_READY=0
+  if [ -n "$RAILSPKG_DATABASE_LOAD_TASK" ]; then
+      setup_conffile database.yml database.yml.example || DATABASE_READY=0
+  fi
+  setup_extra_conffiles
+  echo "... done."
+
+  # Before we do anything else, make sure some directories and files are in place
+  if [ ! -e $SHARED_PATH/log ]; then mkdir -p $SHARED_PATH/log; fi
+  if [ ! -e $RELEASE_PATH/tmp ]; then mkdir -p $RELEASE_PATH/tmp; fi
+  if [ ! -e $RELEASE_PATH/log ]; then ln -s $SHARED_PATH/log $RELEASE_PATH/log; fi
+  if [ ! -e $SHARED_PATH/log/production.log ]; then touch $SHARED_PATH/log/production.log; fi
+
+  cd "$RELEASE_PATH"
+  export RAILS_ENV=production
+
+  if ! $COMMAND_PREFIX bundle --version >/dev/null; then
+      run_and_report "Installing bundle" $COMMAND_PREFIX gem install bundle
+  fi
+
+  run_and_report "Running bundle install" \
+      $COMMAND_PREFIX bundle install --path $SHARED_PATH/vendor_bundle --local --quiet
+
+  echo -n "Ensuring directory and file permissions ..."
+  # Ensure correct ownership of a few files
+  chown "$WWW_OWNER:" $RELEASE_PATH/config/environment.rb
+  chown "$WWW_OWNER:" $RELEASE_PATH/config.ru
+  chown "$WWW_OWNER:" $RELEASE_PATH/Gemfile.lock
+  chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp || true
+  chown -R "$WWW_OWNER:" $SHARED_PATH/log
+  case "$RAILSPKG_DATABASE_LOAD_TASK" in
+      db:schema:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/schema.rb ;;
+      db:structure:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/structure.sql ;;
+  esac
+  chmod 644 $SHARED_PATH/log/*
+  chmod -R 2775 $RELEASE_PATH/tmp || true
+  echo "... done."
+
+  if [ -n "$RAILSPKG_DATABASE_LOAD_TASK" ]; then
+      prepare_database
+  fi
+
+  if [ 11 = "$RAILSPKG_SUPPORTS_CONFIG_CHECK$APPLICATION_READY" ]; then
+      run_and_report "Checking application.yml for completeness" \
+          $COMMAND_PREFIX bundle exec rake config:check || APPLICATION_READY=0
+  fi
+
+  # precompile assets; thankfully this does not take long
+  if [ "$APPLICATION_READY" = "1" ]; then
+      run_and_report "Precompiling assets" \
+          $COMMAND_PREFIX bundle exec rake assets:precompile -q -s 2>/dev/null \
+          || APPLICATION_READY=0
+  else
+      echo "Precompiling assets... skipped."
+  fi
+  chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp
+
+  setup_before_nginx_restart
+
+  if [ -n "$SERVICE_MANAGER" ]; then
+      service_command "$SERVICE_MANAGER" restart "$WEB_SERVICE"
+  fi
+}
+
+if [ "$1" = configure ]; then
+  # This is a debian-based system
+  configure_version
+elif [ "$1" = "0" ] || [ "$1" = "1" ] || [ "$1" = "2" ]; then
+  # This is an rpm-based system
+  configure_version
+fi
+
+report_not_ready "$DATABASE_READY" "$CONFIG_PATH/database.yml"
+report_not_ready "$APPLICATION_READY" "$CONFIG_PATH/application.yml"
diff --git a/build/rails-package-scripts/postrm.sh b/build/rails-package-scripts/postrm.sh
new file mode 100644 (file)
index 0000000..0e282ba
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This code runs after package variable definitions and step2.sh.
+
+set -e
+
+purge () {
+  rm -rf $SHARED_PATH/vendor_bundle
+  rm -rf $SHARED_PATH/log
+  rm -rf $CONFIG_PATH
+  rmdir $SHARED_PATH || true
+  rmdir $INSTALL_PATH || true
+}
+
+if [ "$1" = 'purge' ]; then
+  # This is a debian-based system and purge was requested
+  purge
+elif [ "$1" = "0" ]; then
+  # This is an rpm-based system, no guarantees are made, always purge
+  # Apparently yum doesn't actually remember what it installed.
+  # Clean those files up here, then purge.
+  rm -rf $RELEASE_PATH
+  purge
+fi
diff --git a/build/rails-package-scripts/prerm.sh b/build/rails-package-scripts/prerm.sh
new file mode 100644 (file)
index 0000000..9816b14
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This code runs after package variable definitions and step2.sh.
+
+remove () {
+  rm -f $RELEASE_PATH/config/database.yml
+  rm -f $RELEASE_PATH/config/environments/production.rb
+  rm -f $RELEASE_PATH/config/application.yml
+  # Old API server configuration file.
+  rm -f $RELEASE_PATH/config/initializers/omniauth.rb
+  rm -rf $RELEASE_PATH/public/assets/
+  rm -rf $RELEASE_PATH/tmp
+  rm -rf $RELEASE_PATH/.bundle
+  rm -rf $RELEASE_PATH/log
+}
+
+if [ "$1" = 'remove' ]; then
+  # This is a debian-based system and removal was requested
+  remove
+elif [ "$1" = "0" ] || [ "$1" = "1" ] || [ "$1" = "2" ]; then
+  # This is an rpm-based system
+  remove
+fi
diff --git a/build/rails-package-scripts/step2.sh b/build/rails-package-scripts/step2.sh
new file mode 100644 (file)
index 0000000..482d27a
--- /dev/null
@@ -0,0 +1,82 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This code runs after package variable definitions, before the actual
+# pre/post package work, to set some variable and function defaults.
+
+if [ -z "$INSTALL_PATH" ]; then
+    cat >&2 <<EOF
+
+PACKAGE BUILD ERROR: $0 is missing package metadata.
+
+This package is buggy.  Please mail <support@curoverse.com> to let
+us know the name and version number of the package you tried to
+install, and we'll get it fixed.
+
+EOF
+    exit 3
+fi
+
+RELEASE_PATH=$INSTALL_PATH/current
+RELEASE_CONFIG_PATH=$RELEASE_PATH/config
+SHARED_PATH=$INSTALL_PATH/shared
+
+RAILSPKG_SUPPORTS_CONFIG_CHECK=${RAILSPKG_SUPPORTS_CONFIG_CHECK:-1}
+if ! type setup_extra_conffiles >/dev/null 2>&1; then
+    setup_extra_conffiles() { return; }
+fi
+if ! type setup_before_nginx_restart >/dev/null 2>&1; then
+    setup_before_nginx_restart() { return; }
+fi
+
+if [ -e /run/systemd/system ]; then
+    USING_SYSTEMD=1
+else
+    USING_SYSTEMD=0
+fi
+
+if which service >/dev/null 2>&1; then
+    USING_SERVICE=1
+else
+    USING_SERVICE=0
+fi
+
+guess_service_manager() {
+    if [ 1 = "$USING_SYSTEMD" ]; then
+        echo systemd
+    elif [ 1 = "$USING_SERVICE" ]; then
+        echo service
+    else
+        return 1
+    fi
+}
+
+list_services_systemd() {
+    test 1 = "$USING_SYSTEMD" || return
+    # Print only service names, without the `.service` suffix.
+    systemctl list-unit-files '*.service' \
+        | awk '($1 ~ /\.service/){print substr($1, 1, length($1) - 8)}'
+}
+
+list_services_service() {
+    test 1 = "$USING_SERVICE" || return
+    # Output is completely different across Debian and Red Hat.
+    # We can't really parse it.
+    service --status-all 2>/dev/null
+}
+
+service_command() {
+    local service_manager="$1"; shift
+    local command="$1"; shift
+    local service="$1"; shift
+    case "$service_manager" in
+        systemd) systemctl "$command" "$service" ;;
+        service) service "$service" "$command" ;;
+    esac
+}
+
+if ! guess_service_manager >/dev/null; then
+    echo "WARNING: Unsupported init system. Can't manage web service." >&2
+fi
diff --git a/build/run-build-docker-images.sh b/build/run-build-docker-images.sh
new file mode 100755 (executable)
index 0000000..fd7b38e
--- /dev/null
@@ -0,0 +1,174 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+function usage {
+    echo >&2
+    echo >&2 "usage: $0 [options]"
+    echo >&2
+    echo >&2 "$0 options:"
+    echo >&2 "  -t, --tags [csv_tags]         comma separated tags"
+    echo >&2 "  -u, --upload                  Upload the images (docker push)"
+    echo >&2 "  -h, --help                    Display this help and exit"
+    echo >&2
+    echo >&2 "  If no options are given, just builds the images."
+}
+
+upload=false
+
+# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+TEMP=`getopt -o hut: \
+    --long help,upload,tags: \
+    -n "$0" -- "$@"`
+
+if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
+# Note the quotes around `$TEMP': they are essential!
+eval set -- "$TEMP"
+
+while [ $# -ge 1 ]
+do
+    case $1 in
+        -u | --upload)
+            upload=true
+            shift
+            ;;
+        -t | --tags)
+            case "$2" in
+                "")
+                  echo "ERROR: --tags needs a parameter";
+                  usage;
+                  exit 1
+                  ;;
+                *)
+                  tags=$2;
+                  shift 2
+                  ;;
+            esac
+            ;;
+        --)
+            shift
+            break
+            ;;
+        *)
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+
+EXITCODE=0
+
+COLUMNS=80
+
+title () {
+    printf "\n%*s\n\n" $(((${#title}+$COLUMNS)/2)) "********** $1 **********"
+}
+
+docker_push () {
+    if [[ ! -z "$tags" ]]
+    then
+        for tag in $( echo $tags|tr "," " " )
+        do
+             $DOCKER tag $1 $1:$tag
+        done
+    fi
+
+    # Sometimes docker push fails; retry it a few times if necessary.
+    for i in `seq 1 5`; do
+        $DOCKER push $*
+        ECODE=$?
+        if [[ "$ECODE" == "0" ]]; then
+            break
+        fi
+    done
+
+    if [[ "$ECODE" != "0" ]]; then
+        title "!!!!!! docker push $* failed !!!!!!"
+        EXITCODE=$(($EXITCODE + $ECODE))
+    fi
+}
+
+timer_reset() {
+    t0=$SECONDS
+}
+
+timer() {
+    echo -n "$(($SECONDS - $t0))s"
+}
+
+# Sanity check
+if ! [[ -n "$WORKSPACE" ]]; then
+    echo >&2
+    echo >&2 "Error: WORKSPACE environment variable not set"
+    echo >&2
+    exit 1
+fi
+
+echo $WORKSPACE
+
+# find the docker binary
+DOCKER=`which docker.io`
+
+if [[ "$DOCKER" == "" ]]; then
+    DOCKER=`which docker`
+fi
+
+if [[ "$DOCKER" == "" ]]; then
+    title "Error: you need to have docker installed. Could not find the docker executable."
+    exit 1
+fi
+
+# DOCKER
+title "Starting docker build"
+
+timer_reset
+
+# clean up the docker build environment
+cd "$WORKSPACE"
+
+title "Starting arvbox build localdemo"
+
+tools/arvbox/bin/arvbox build localdemo
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    title "!!!!!! docker BUILD FAILED !!!!!!"
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+title "Starting arvbox build dev"
+
+tools/arvbox/bin/arvbox build dev
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    title "!!!!!! docker BUILD FAILED !!!!!!"
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+title "docker build complete (`timer`)"
+
+title "uploading images"
+
+timer_reset
+
+if [[ "$EXITCODE" != "0" ]]; then
+    title "upload arvados images SKIPPED because build failed"
+else
+    if [[ $upload == true ]]; then
+        ## 20150526 nico -- *sometimes* dockerhub needs re-login
+        ## even though credentials are already in .dockercfg
+        docker login -u arvados
+
+        docker_push arvados/arvbox-dev
+        docker_push arvados/arvbox-demo
+        title "upload arvados images complete (`timer`)"
+    else
+        title "upload arvados images SKIPPED because no --upload option set"
+    fi
+fi
+
+exit $EXITCODE
diff --git a/build/run-build-docker-jobs-image.sh b/build/run-build-docker-jobs-image.sh
new file mode 100755 (executable)
index 0000000..7d7e1fc
--- /dev/null
@@ -0,0 +1,236 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+function usage {
+    echo >&2
+    echo >&2 "usage: WORKSPACE=/path/to/arvados $0 [options]"
+    echo >&2
+    echo >&2 "$0 options:"
+    echo >&2 "  -t, --tags                    version tag for docker"
+    echo >&2 "  -r, --repo                    Arvados package repot to use: dev, testing, stable (default: dev)"
+    echo >&2 "  -u, --upload                  Upload the images (docker push)"
+    echo >&2 "  --no-cache                    Don't use build cache"
+    echo >&2 "  -h, --help                    Display this help and exit"
+    echo >&2
+    echo >&2 "  WORKSPACE=path                Path to the Arvados source tree to build from"
+    echo >&2
+}
+upload=false
+REPO=dev
+
+# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+TEMP=`getopt -o hut:r: \
+    --long help,upload,no-cache,tags,repo: \
+    -n "$0" -- "$@"`
+
+if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
+# Note the quotes around `$TEMP': they are essential!
+eval set -- "$TEMP"
+
+while [ $# -ge 1 ]
+do
+    case $1 in
+        -u | --upload)
+            upload=true
+            shift
+            ;;
+        --no-cache)
+            NOCACHE=--no-cache
+            shift
+            ;;
+        -t | --tags)
+            case "$2" in
+                "")
+                  echo "ERROR: --tags needs a parameter";
+                  usage;
+                  exit 1
+                  ;;
+                *)
+                  version_tag="$2";
+                  shift 2
+                  ;;
+            esac
+            ;;
+        -r | --repo)
+            case "$2" in
+                "")
+                  echo "ERROR: --repo needs a parameter";
+                  usage;
+                  exit 1
+                  ;;
+                *)
+                  REPO="$2";
+                  shift 2
+                  ;;
+            esac
+            ;;
+        --)
+            shift
+            break
+            ;;
+        *)
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+EXITCODE=0
+
+exit_cleanly() {
+    trap - INT
+    report_outcomes
+    exit $EXITCODE
+}
+
+# Sanity check
+if ! [[ -n "$WORKSPACE" ]]; then
+    usage;
+    echo >&2 "Error: WORKSPACE environment variable not set"
+    echo >&2
+    exit 1
+fi
+
+echo $WORKSPACE
+
+COLUMNS=80
+. $WORKSPACE/build/run-library.sh
+
+docker_push () {
+    # Sometimes docker push fails; retry it a few times if necessary.
+    for i in `seq 1 5`; do
+        $DOCKER push $*
+        ECODE=$?
+        if [[ "$ECODE" == "0" ]]; then
+            break
+        fi
+    done
+
+    if [[ "$ECODE" != "0" ]]; then
+        EXITCODE=$(($EXITCODE + $ECODE))
+    fi
+    checkexit $ECODE "docker push $*"
+}
+
+# find the docker binary
+DOCKER=`which docker.io`
+
+if [[ "$DOCKER" == "" ]]; then
+    DOCKER=`which docker`
+fi
+
+if [[ "$DOCKER" == "" ]]; then
+    title "Error: you need to have docker installed. Could not find the docker executable."
+    exit 1
+fi
+
+# DOCKER
+title "Starting docker build"
+
+timer_reset
+
+# clean up the docker build environment
+cd "$WORKSPACE"
+
+if [[ -z "$ARVADOS_BUILDING_VERSION" ]] && ! [[ -z "$version_tag" ]]; then
+       ARVADOS_BUILDING_VERSION="$version_tag"
+       ARVADOS_BUILDING_ITERATION="1"
+fi
+
+python_sdk_ts=$(cd sdk/python && timestamp_from_git)
+cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+
+python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)
+cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)
+
+if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
+    cwl_runner_version=$(cd sdk/python && nohash_version_from_git 1.0)
+fi
+
+echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
+
+if [[ "${python_sdk_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
+       python_sdk_version="${python_sdk_version}-1"
+else
+       python_sdk_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
+fi
+
+cwl_runner_version_orig=$cwl_runner_version
+
+if [[ "${cwl_runner_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
+       cwl_runner_version="${cwl_runner_version}-1"
+else
+       cwl_runner_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
+fi
+
+cd docker/jobs
+docker build $NOCACHE \
+       --build-arg python_sdk_version=${python_sdk_version} \
+       --build-arg cwl_runner_version=${cwl_runner_version} \
+       --build-arg repo_version=${REPO} \
+       -t arvados/jobs:$cwl_runner_version_orig .
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+checkexit $ECODE "docker build"
+title "docker build complete (`timer`)"
+
+if [[ "$ECODE" != "0" ]]; then
+  exit_cleanly
+fi
+
+timer_reset
+
+if docker --version |grep " 1\.[0-9]\." ; then
+    # Docker version prior 1.10 require -f flag
+    # -f flag removed in Docker 1.12
+    FORCE=-f
+fi
+
+#docker export arvados/jobs:$cwl_runner_version_orig | docker import - arvados/jobs:$cwl_runner_version_orig
+
+if ! [[ -z "$version_tag" ]]; then
+    docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:"$version_tag"
+else
+    docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:latest
+fi
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+checkexit $ECODE "docker tag"
+title "docker tag complete (`timer`)"
+
+title "uploading images"
+
+timer_reset
+
+if [[ "$ECODE" != "0" ]]; then
+    title "upload arvados images SKIPPED because build or tag failed"
+else
+    if [[ $upload == true ]]; then
+        ## 20150526 nico -- *sometimes* dockerhub needs re-login
+        ## even though credentials are already in .dockercfg
+        docker login -u arvados
+        if ! [[ -z "$version_tag" ]]; then
+            docker_push arvados/jobs:"$version_tag"
+        else
+           docker_push arvados/jobs:$cwl_runner_version_orig
+           docker_push arvados/jobs:latest
+        fi
+        title "upload arvados images finished (`timer`)"
+    else
+        title "upload arvados images SKIPPED because no --upload option set (`timer`)"
+    fi
+fi
+
+exit_cleanly
diff --git a/build/run-build-packages-all-targets.sh b/build/run-build-packages-all-targets.sh
new file mode 100755 (executable)
index 0000000..85c498e
--- /dev/null
@@ -0,0 +1,113 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Orchestrate run-build-packages.sh for every target
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+Options:
+
+--command
+    Build command to execute (default: use built-in Docker image command)
+--test-packages
+    Run package install tests
+--debug
+    Output debug information (default: false)
+--build-version <string>
+    Version to build (default:
+    \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or
+    0.1.timestamp.commithash)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+set -e
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,test-packages,debug,command:,only-test:,build-version: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+COMMAND=
+DEBUG=
+TEST_PACKAGES=
+ONLY_TEST=
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --debug)
+            DEBUG="--debug"
+            ;;
+        --command)
+            COMMAND="$2"; shift
+            ;;
+        --test-packages)
+            TEST_PACKAGES="--test-packages"
+            ;;
+        --only-test)
+            ONLY_TEST="$1 $2"; shift
+            ;;
+        --build-version)
+            ARVADOS_BUILDING_VERSION="$2"; shift
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+cd $(dirname $0)
+
+FINAL_EXITCODE=0
+
+for dockerfile_path in $(find -name Dockerfile | grep package-build-dockerfiles); do
+    if ./run-build-packages-one-target.sh --target "$(basename $(dirname "$dockerfile_path"))" --command "$COMMAND" --build-version "$ARVADOS_BUILDING_VERSION" $DEBUG $TEST_PACKAGES $ONLY_TEST ; then
+        true
+    else
+        FINAL_EXITCODE=$?
+        echo
+        echo "Build packages failed for $(basename $(dirname "$dockerfile_path"))"
+        echo
+    fi
+done
+
+if test $FINAL_EXITCODE != 0 ; then
+    echo
+    echo "Build packages failed with code $FINAL_EXITCODE" >&2
+    echo
+fi
+
+exit $FINAL_EXITCODE
diff --git a/build/run-build-packages-one-target.sh b/build/run-build-packages-one-target.sh
new file mode 100755 (executable)
index 0000000..9b21b58
--- /dev/null
@@ -0,0 +1,287 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Orchestrate run-build-packages.sh for one target
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+--target <target>
+    Distribution to build packages for (default: debian8)
+--command
+    Build command to execute (default: use built-in Docker image command)
+--test-packages
+    Run package install test script "test-packages-[target].sh"
+--debug
+    Output debug information (default: false)
+--only-build <package>
+    Build only a specific package
+--only-test <package>
+    Test only a specific package
+--force-test
+    Test even if there is no new untested package
+--build-version <string>
+    Version to build (default:
+    \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or
+    0.1.timestamp.commithash)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+set -e
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,debug,test-packages,target:,command:,only-test:,force-test,only-build:,build-version: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+TARGET=debian8
+COMMAND=
+DEBUG=
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --only-test)
+            test_packages=1
+            testing_one_package=1
+            packages="$2"; shift
+            ;;
+        --force-test)
+            FORCE_TEST=true
+            ;;
+        --only-build)
+            ONLY_BUILD="$2"; shift
+            ;;
+        --debug)
+            DEBUG=" --debug"
+            ARVADOS_DEBUG="1"
+            ;;
+        --command)
+            COMMAND="$2"; shift
+            ;;
+        --test-packages)
+            test_packages=1
+            ;;
+        --build-version)
+            if [[ -z "$2" ]]; then
+                :
+            elif ! [[ "$2" =~ (.*)-(.*) ]]; then
+                echo >&2 "FATAL: --build-version '$2' does not include an iteration. Try '${2}-1'?"
+                exit 1
+            else
+                ARVADOS_BUILDING_VERSION="${BASH_REMATCH[1]}"
+                ARVADOS_BUILDING_ITERATION="${BASH_REMATCH[2]}"
+            fi
+            shift
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+set -e
+
+if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
+    echo "build version='$ARVADOS_BUILDING_VERSION', package iteration='$ARVADOS_BUILDING_ITERATION'"
+fi
+
+if [[ -n "$test_packages" ]]; then
+  if [[ -n "$(find $WORKSPACE/packages/$TARGET -name '*.rpm')" ]] ; then
+    set +e
+    /usr/bin/which createrepo >/dev/null
+    if [[ "$?" != "0" ]]; then
+      echo >&2
+      echo >&2 "Error: please install createrepo. E.g. sudo apt-get install createrepo"
+      echo >&2
+      exit 1
+    fi
+    set -e
+    createrepo $WORKSPACE/packages/$TARGET
+  fi
+
+  if [[ -n "$(find $WORKSPACE/packages/$TARGET -name '*.deb')" ]] ; then
+    (cd $WORKSPACE/packages/$TARGET
+      dpkg-scanpackages .  2> >(grep -v 'warning' 1>&2) | tee Packages | gzip -c > Packages.gz
+      apt-ftparchive -o APT::FTPArchive::Release::Origin=Arvados release . > Release
+    )
+  fi
+
+  COMMAND="/jenkins/package-testing/test-packages-$TARGET.sh"
+  IMAGE="arvados/package-test:$TARGET"
+else
+  IMAGE="arvados/build:$TARGET"
+  if [[ "$COMMAND" != "" ]]; then
+    COMMAND="/usr/local/rvm/bin/rvm-exec default bash /jenkins/$COMMAND --target $TARGET$DEBUG"
+  fi
+fi
+
+JENKINS_DIR=$(dirname "$(readlink -e "$0")")
+
+if [[ -n "$test_packages" ]]; then
+    pushd "$JENKINS_DIR/package-test-dockerfiles"
+    make "$TARGET/generated"
+else
+    pushd "$JENKINS_DIR/package-build-dockerfiles"
+    make "$TARGET/generated"
+fi
+
+echo $TARGET
+cd $TARGET
+time docker build --tag=$IMAGE .
+popd
+
+if test -z "$packages" ; then
+    packages="arvados-api-server
+        arvados-client
+        arvados-docker-cleaner
+        arvados-git-httpd
+        arvados-node-manager
+        arvados-src
+        arvados-workbench
+        crunch-dispatch-local
+        crunch-dispatch-slurm
+        crunch-run
+        crunchstat
+        keep-balance
+        keep-block-check
+        keepproxy
+        keep-rsync
+        keepstore
+        keep-web
+        libarvados-perl
+        python-arvados-fuse
+        python-arvados-python-client
+        python-arvados-cwl-runner"
+fi
+
+FINAL_EXITCODE=0
+
+package_fails=""
+
+mkdir -p "$WORKSPACE/apps/workbench/vendor/cache-$TARGET"
+mkdir -p "$WORKSPACE/services/api/vendor/cache-$TARGET"
+
+docker_volume_args=(
+    -v "$JENKINS_DIR:/jenkins"
+    -v "$WORKSPACE:/arvados"
+    -v /arvados/services/api/vendor/bundle
+    -v /arvados/apps/workbench/vendor/bundle
+    -v "$WORKSPACE/services/api/vendor/cache-$TARGET:/arvados/services/api/vendor/cache"
+    -v "$WORKSPACE/apps/workbench/vendor/cache-$TARGET:/arvados/apps/workbench/vendor/cache"
+)
+
+if [[ -n "$test_packages" ]]; then
+    for p in $packages ; do
+        if [[ -n "$ONLY_BUILD" ]] && [[ "$p" != "$ONLY_BUILD" ]]; then
+            continue
+        fi
+        if [[ -e "${WORKSPACE}/packages/.last_test_${TARGET}" ]] && [[ -z "$FORCE_TEST" ]]; then
+          MATCH=`find ${WORKSPACE}/packages/ -newer ${WORKSPACE}/packages/.last_test_${TARGET} -regex .*${TARGET}/$p.*`
+          if [[ "$MATCH" == "" ]]; then
+            # No new package has been built that needs testing
+            echo "Skipping $p test because no new package was built since the last test."
+            continue
+          fi
+        fi
+        # If we're testing all packages, we should not error out on packages that don't exist.
+        # If we are testing one specific package only (i.e. --only-test was given), we should
+        # error out if that package does not exist.
+        if [[ -z "$testing_one_package" ]]; then
+          MATCH=`find ${WORKSPACE}/packages/ -regextype posix-extended -regex .*${TARGET}/$p.*\\(deb\\|rpm\\)`
+          if [[ "$MATCH" == "" ]]; then
+            # No new package has been built that needs testing
+            echo "Skipping $p test because no package file is available to test."
+            continue
+          fi
+        fi
+        echo
+        echo "START: $p test on $IMAGE" >&2
+        # ulimit option can be removed when debian8 and ubuntu1404 are retired
+        if docker run --ulimit nofile=4096:4096 \
+            --rm \
+            "${docker_volume_args[@]}" \
+            --env ARVADOS_DEBUG=$ARVADOS_DEBUG \
+            --env "TARGET=$TARGET" \
+            --env "WORKSPACE=/arvados" \
+            "$IMAGE" $COMMAND $p
+        then
+            echo "OK: $p test on $IMAGE succeeded" >&2
+        else
+            FINAL_EXITCODE=$?
+            package_fails="$package_fails $p"
+            echo "ERROR: $p test on $IMAGE failed with exit status $FINAL_EXITCODE" >&2
+        fi
+    done
+
+    if [[ "$FINAL_EXITCODE" == "0" ]]; then
+      touch ${WORKSPACE}/packages/.last_test_${TARGET}
+    fi
+else
+    echo
+    echo "START: build packages on $IMAGE" >&2
+    # Move existing packages and other files into the processed/ subdirectory
+    if [[ ! -e "${WORKSPACE}/packages/${TARGET}/processed" ]]; then
+      mkdir -p "${WORKSPACE}/packages/${TARGET}/processed"
+    fi
+    set +e
+    mv -f ${WORKSPACE}/packages/${TARGET}/* ${WORKSPACE}/packages/${TARGET}/processed/ 2>/dev/null
+    set -e
+    # Build packages. ulimit option can be removed when debian8 and ubuntu1404 are retired
+    if docker run --ulimit nofile=4096:4096 \
+        --rm \
+        "${docker_volume_args[@]}" \
+        --env ARVADOS_BUILDING_VERSION="$ARVADOS_BUILDING_VERSION" \
+        --env ARVADOS_BUILDING_ITERATION="$ARVADOS_BUILDING_ITERATION" \
+        --env ARVADOS_DEBUG=$ARVADOS_DEBUG \
+        --env "ONLY_BUILD=$ONLY_BUILD" \
+        "$IMAGE" $COMMAND
+    then
+        echo
+        echo "OK: build packages on $IMAGE succeeded" >&2
+    else
+        FINAL_EXITCODE=$?
+        echo "ERROR: build packages on $IMAGE failed with exit status $FINAL_EXITCODE" >&2
+    fi
+fi
+
+if test -n "$package_fails" ; then
+    echo "Failed package tests:$package_fails" >&2
+fi
+
+exit $FINAL_EXITCODE
diff --git a/build/run-build-packages-python-and-ruby.sh b/build/run-build-packages-python-and-ruby.sh
new file mode 100755 (executable)
index 0000000..4c5f39a
--- /dev/null
@@ -0,0 +1,209 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+COLUMNS=80
+
+. `dirname "$(readlink -f "$0")"`/run-library.sh
+#. `dirname "$(readlink -f "$0")"`/libcloud-pin.sh
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Build Arvados Python packages and Ruby gems
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+Options:
+
+--debug
+    Output debug information (default: false)
+--upload
+    If the build and test steps are successful, upload the python
+    packages to pypi and the gems to rubygems (default: false)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+exit_cleanly() {
+    trap - INT
+    report_outcomes
+    exit ${#failures[@]}
+}
+
+gem_wrapper() {
+  local gem_name="$1"; shift
+  local gem_directory="$1"; shift
+
+  title "Start $gem_name gem build"
+  timer_reset
+
+  cd "$gem_directory"
+  handle_ruby_gem $gem_name
+
+  checkexit $? "$gem_name gem build"
+  title "End of $gem_name gem build (`timer`)"
+}
+
+python_wrapper() {
+  local package_name="$1"; shift
+  local package_directory="$1"; shift
+
+  title "Start $package_name python package build"
+  timer_reset
+
+  cd "$package_directory"
+  if [[ $DEBUG > 0 ]]; then
+    echo `pwd`
+  fi
+  handle_python_package
+
+  checkexit $? "$package_name python package build"
+  title "End of $package_name python package build (`timer`)"
+}
+
+TARGET=
+UPLOAD=0
+DEBUG=${ARVADOS_DEBUG:-0}
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,debug,upload,target: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --upload)
+            UPLOAD=1
+            ;;
+        --debug)
+            DEBUG=1
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQ_UNLESS_DEBUG=-q
+if [[ "$DEBUG" != 0 ]]; then
+    STDOUT_IF_DEBUG=/dev/stdout
+    STDERR_IF_DEBUG=/dev/stderr
+    DASHQ_UNLESS_DEBUG=
+fi
+
+RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
+RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
+if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
+  # error; for some reason, the path is not accessible
+  # to the script (e.g. permissions re-evaled after suid)
+  exit 1  # fail
+fi
+
+debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
+debug_echo "Workspace is $WORKSPACE"
+
+if [[ -f /etc/profile.d/rvm.sh ]]; then
+    source /etc/profile.d/rvm.sh
+    GEM="rvm-exec default gem"
+else
+    GEM=gem
+fi
+
+# Make all files world-readable -- jenkins runs with umask 027, and has checked
+# out our git tree here
+chmod o+r "$WORKSPACE" -R
+
+# More cleanup - make sure all executables that we'll package are 755
+cd "$WORKSPACE"
+find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
+
+# Now fix our umask to something better suited to building and publishing
+# gems and packages
+umask 0022
+
+debug_echo "umask is" `umask`
+
+gem_wrapper arvados "$WORKSPACE/sdk/ruby"
+gem_wrapper arvados-cli "$WORKSPACE/sdk/cli"
+gem_wrapper arvados-login-sync "$WORKSPACE/services/login-sync"
+
+GEM_BUILD_FAILURES=0
+if [ ${#failures[@]} -ne 0 ]; then
+  GEM_BUILD_FAILURES=${#failures[@]}
+fi
+
+python_wrapper arvados-pam "$WORKSPACE/sdk/pam"
+python_wrapper arvados-python-client "$WORKSPACE/sdk/python"
+python_wrapper arvados-cwl-runner "$WORKSPACE/sdk/cwl"
+python_wrapper arvados_fuse "$WORKSPACE/services/fuse"
+python_wrapper arvados-node-manager "$WORKSPACE/services/nodemanager"
+
+PYTHON_BUILD_FAILURES=0
+if [ $((${#failures[@]} - $GEM_BUILD_FAILURES)) -ne 0 ]; then
+  PYTHON_BUILD_FAILURES=${#failures[@]} - $GEM_BUILD_FAILURES
+fi
+
+if [[ "$UPLOAD" != 0 ]]; then
+
+  if [[ $DEBUG > 0 ]]; then
+    EXTRA_UPLOAD_FLAGS=" --verbose"
+  else
+    EXTRA_UPLOAD_FLAGS=""
+  fi
+
+  if [[ ! -e "$WORKSPACE/packages" ]]; then
+    mkdir -p "$WORKSPACE/packages"
+  fi
+
+  title "Start upload python packages"
+  timer_reset
+
+  if [ "$PYTHON_BUILD_FAILURES" -eq 0 ]; then
+    /usr/local/arvados-dev/jenkins/run_upload_packages.py $EXTRA_UPLOAD_FLAGS --workspace $WORKSPACE python
+  else
+    echo "Skipping python packages upload, there were errors building the packages"
+  fi
+  checkexit $? "upload python packages"
+  title "End of upload python packages (`timer`)"
+
+  title "Start upload ruby gems"
+  timer_reset
+
+  if [ "$GEM_BUILD_FAILURES" -eq 0 ]; then
+    /usr/local/arvados-dev/jenkins/run_upload_packages.py $EXTRA_UPLOAD_FLAGS --workspace $WORKSPACE gems
+  else
+    echo "Skipping ruby gem upload, there were errors building the packages"
+  fi
+  checkexit $? "upload ruby gems"
+  title "End of upload ruby gems (`timer`)"
+
+fi
+
+exit_cleanly
diff --git a/build/run-build-packages-sso.sh b/build/run-build-packages-sso.sh
new file mode 100755 (executable)
index 0000000..d6a2117
--- /dev/null
@@ -0,0 +1,158 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+JENKINS_DIR=$(dirname $(readlink -e "$0"))
+. "$JENKINS_DIR/run-library.sh"
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Build Arvados SSO server package
+
+Syntax:
+        WORKSPACE=/path/to/arvados-sso $(basename $0) [options]
+
+Options:
+
+--debug
+    Output debug information (default: false)
+--target
+    Distribution to build packages for (default: debian8)
+
+WORKSPACE=path         Path to the Arvados SSO source tree to build packages from
+
+EOF
+
+EXITCODE=0
+DEBUG=${ARVADOS_DEBUG:-0}
+TARGET=debian8
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,build-bundle-packages,debug,target: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --debug)
+            DEBUG=1
+            ;;
+        --test-packages)
+            test_packages=1
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQ_UNLESS_DEBUG=-q
+if [[ "$DEBUG" != 0 ]]; then
+    STDOUT_IF_DEBUG=/dev/stdout
+    STDERR_IF_DEBUG=/dev/stderr
+    DASHQ_UNLESS_DEBUG=
+fi
+
+case "$TARGET" in
+    debian*)
+        FORMAT=deb
+        ;;
+    ubuntu*)
+        FORMAT=deb
+        ;;
+    centos*)
+        FORMAT=rpm
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+# Test for fpm
+fpm --version >/dev/null 2>&1
+
+if [[ "$?" != 0 ]]; then
+    echo >&2 "$helpmessage"
+    echo >&2
+    echo >&2 "Error: fpm not found"
+    echo >&2
+    exit 1
+fi
+
+RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
+RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
+if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
+    # error; for some reason, the path is not accessible
+    # to the script (e.g. permissions re-evaled after suid)
+    exit 1  # fail
+fi
+
+debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
+debug_echo "Workspace is $WORKSPACE"
+
+if [[ -f /etc/profile.d/rvm.sh ]]; then
+    source /etc/profile.d/rvm.sh
+    GEM="rvm-exec default gem"
+else
+    GEM=gem
+fi
+
+# Make all files world-readable -- jenkins runs with umask 027, and has checked
+# out our git tree here
+chmod o+r "$WORKSPACE" -R
+
+# More cleanup - make sure all executables that we'll package are 755
+# No executables in the sso server package
+#find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
+
+# Now fix our umask to something better suited to building and publishing
+# gems and packages
+umask 0022
+
+debug_echo "umask is" `umask`
+
+if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then
+    mkdir -p "$WORKSPACE/packages/$TARGET"
+fi
+
+# Build the SSO server package
+handle_rails_package arvados-sso-server "$WORKSPACE" \
+                     "$WORKSPACE/LICENCE" --url="https://arvados.org" \
+                     --description="Arvados SSO server - Arvados is a free and open source platform for big data science." \
+                     --license="Expat license"
+
+exit $EXITCODE
diff --git a/build/run-build-packages.sh b/build/run-build-packages.sh
new file mode 100755 (executable)
index 0000000..b800d43
--- /dev/null
@@ -0,0 +1,412 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+. `dirname "$(readlink -f "$0")"`/run-library.sh
+. `dirname "$(readlink -f "$0")"`/libcloud-pin.sh
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Build Arvados packages
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+Options:
+
+--build-bundle-packages  (default: false)
+    Build api server and workbench packages with vendor/bundle included
+--debug
+    Output debug information (default: false)
+--target <target>
+    Distribution to build packages for (default: debian8)
+--only-build <package>
+    Build only a specific package (or $ONLY_BUILD from environment)
+--command
+    Build command to execute (defaults to the run command defined in the
+    Docker image)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+# Begin of user configuration
+
+# set to --no-cache-dir to disable pip caching
+CACHE_FLAG=
+
+MAINTAINER="Ward Vandewege <wvandewege@veritasgenetics.com>"
+VENDOR="Veritas Genetics, Inc."
+
+# End of user configuration
+
+DEBUG=${ARVADOS_DEBUG:-0}
+EXITCODE=0
+TARGET=debian8
+COMMAND=
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,build-bundle-packages,debug,target:,only-build: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --only-build)
+            ONLY_BUILD="$2"; shift
+            ;;
+        --debug)
+            DEBUG=1
+            ;;
+        --command)
+            COMMAND="$2"; shift
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+if [[ "$COMMAND" != "" ]]; then
+  COMMAND="/usr/local/rvm/bin/rvm-exec default bash /jenkins/$COMMAND --target $TARGET"
+fi
+
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQ_UNLESS_DEBUG=-q
+if [[ "$DEBUG" != 0 ]]; then
+    STDOUT_IF_DEBUG=/dev/stdout
+    STDERR_IF_DEBUG=/dev/stderr
+    DASHQ_UNLESS_DEBUG=
+fi
+
+declare -a PYTHON_BACKPORTS PYTHON3_BACKPORTS
+
+PYTHON2_VERSION=2.7
+PYTHON3_VERSION=$(python3 -c 'import sys; print("{v.major}.{v.minor}".format(v=sys.version_info))')
+
+## These defaults are suitable for any Debian-based distribution.
+# You can customize them as needed in distro sections below.
+PYTHON2_PACKAGE=python$PYTHON2_VERSION
+PYTHON2_PKG_PREFIX=python
+PYTHON2_PREFIX=/usr
+PYTHON2_INSTALL_LIB=lib/python$PYTHON2_VERSION/dist-packages
+
+PYTHON3_PACKAGE=python$PYTHON3_VERSION
+PYTHON3_PKG_PREFIX=python3
+PYTHON3_PREFIX=/usr
+PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/dist-packages
+## End Debian Python defaults.
+
+case "$TARGET" in
+    debian*)
+        FORMAT=deb
+        ;;
+    ubuntu*)
+        FORMAT=deb
+        ;;
+    centos*)
+        FORMAT=rpm
+        PYTHON2_PACKAGE=$(rpm -qf "$(which python$PYTHON2_VERSION)" --queryformat '%{NAME}\n')
+        PYTHON2_PKG_PREFIX=$PYTHON2_PACKAGE
+        PYTHON2_INSTALL_LIB=lib/python$PYTHON2_VERSION/site-packages
+        PYTHON3_PACKAGE=$(rpm -qf "$(which python$PYTHON3_VERSION)" --queryformat '%{NAME}\n')
+        PYTHON3_PKG_PREFIX=$PYTHON3_PACKAGE
+        PYTHON3_PREFIX=/opt/rh/rh-python35/root/usr
+        PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/site-packages
+        export PYCURL_SSL_LIBRARY=nss
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+# Test for fpm
+fpm --version >/dev/null 2>&1
+
+if [[ "$?" != 0 ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: fpm not found"
+  echo >&2
+  exit 1
+fi
+
+PYTHON2_FPM_INSTALLER=(--python-easyinstall "$(find_python_program easy_install-$PYTHON2_VERSION easy_install)")
+install3=$(find_python_program easy_install-$PYTHON3_VERSION easy_install3 pip-$PYTHON3_VERSION pip3)
+if [[ $install3 =~ easy_ ]]; then
+    PYTHON3_FPM_INSTALLER=(--python-easyinstall "$install3")
+else
+    PYTHON3_FPM_INSTALLER=(--python-pip "$install3")
+fi
+
+RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
+RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
+if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
+  # error; for some reason, the path is not accessible
+  # to the script (e.g. permissions re-evaled after suid)
+  exit 1  # fail
+fi
+
+debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
+debug_echo "Workspace is $WORKSPACE"
+
+if [[ -f /etc/profile.d/rvm.sh ]]; then
+    source /etc/profile.d/rvm.sh
+    GEM="rvm-exec default gem"
+else
+    GEM=gem
+fi
+
+# Make all files world-readable -- jenkins runs with umask 027, and has checked
+# out our git tree here
+chmod o+r "$WORKSPACE" -R
+
+# More cleanup - make sure all executables that we'll package are 755
+cd "$WORKSPACE"
+find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
+
+# Now fix our umask to something better suited to building and publishing
+# gems and packages
+umask 0022
+
+debug_echo "umask is" `umask`
+
+if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then
+  mkdir -p $WORKSPACE/packages/$TARGET
+  chown --reference="$WORKSPACE" "$WORKSPACE/packages/$TARGET"
+fi
+
+# Perl packages
+debug_echo -e "\nPerl packages\n"
+
+if [[ -z "$ONLY_BUILD" ]] || [[ "libarvados-perl" = "$ONLY_BUILD" ]] ; then
+  cd "$WORKSPACE/sdk/perl"
+  libarvados_perl_version="$(version_from_git)"
+
+  cd $WORKSPACE/packages/$TARGET
+  test_package_presence libarvados-perl "$libarvados_perl_version"
+
+  if [[ "$?" == "0" ]]; then
+    cd "$WORKSPACE/sdk/perl"
+
+    if [[ -e Makefile ]]; then
+      make realclean >"$STDOUT_IF_DEBUG"
+    fi
+    find -maxdepth 1 \( -name 'MANIFEST*' -or -name "libarvados-perl*.$FORMAT" \) \
+        -delete
+    rm -rf install
+
+    perl Makefile.PL INSTALL_BASE=install >"$STDOUT_IF_DEBUG" && \
+        make install INSTALLDIRS=perl >"$STDOUT_IF_DEBUG" && \
+        fpm_build install/lib/=/usr/share libarvados-perl \
+        dir "$(version_from_git)" install/man/=/usr/share/man \
+        "$WORKSPACE/apache-2.0.txt=/usr/share/doc/libarvados-perl/apache-2.0.txt" && \
+        mv --no-clobber libarvados-perl*.$FORMAT "$WORKSPACE/packages/$TARGET/"
+  fi
+fi
+
+# Ruby gems
+debug_echo -e "\nRuby gems\n"
+
+FPM_GEM_PREFIX=$($GEM environment gemdir)
+
+cd "$WORKSPACE/sdk/ruby"
+handle_ruby_gem arvados
+
+cd "$WORKSPACE/sdk/cli"
+handle_ruby_gem arvados-cli
+
+cd "$WORKSPACE/services/login-sync"
+handle_ruby_gem arvados-login-sync
+
+# Python packages
+debug_echo -e "\nPython packages\n"
+
+# arvados-src
+(
+    cd "$WORKSPACE"
+    COMMIT_HASH=$(format_last_commit_here "%H")
+    arvados_src_version="$(version_from_git)"
+
+    cd $WORKSPACE/packages/$TARGET
+    test_package_presence arvados-src $arvados_src_version src ""
+
+    if [[ "$?" == "0" ]]; then
+      cd "$WORKSPACE"
+      SRC_BUILD_DIR=$(mktemp -d)
+      # mktemp creates the directory with 0700 permissions by default
+      chmod 755 $SRC_BUILD_DIR
+      git clone $DASHQ_UNLESS_DEBUG "$WORKSPACE/.git" "$SRC_BUILD_DIR"
+      cd "$SRC_BUILD_DIR"
+
+      # go into detached-head state
+      git checkout $DASHQ_UNLESS_DEBUG "$COMMIT_HASH"
+      echo "$COMMIT_HASH" >git-commit.version
+
+      cd "$SRC_BUILD_DIR"
+      PKG_VERSION=$(version_from_git)
+      cd $WORKSPACE/packages/$TARGET
+      fpm_build $SRC_BUILD_DIR/=/usr/local/arvados/src arvados-src 'dir' "$PKG_VERSION" "--exclude=usr/local/arvados/src/.git" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=The Arvados source code" "--architecture=all"
+
+      rm -rf "$SRC_BUILD_DIR"
+    fi
+)
+
+# Go binaries
+cd $WORKSPACE/packages/$TARGET
+export GOPATH=$(mktemp -d)
+go get github.com/kardianos/govendor
+package_go_binary cmd/arvados-client arvados-client \
+    "Arvados command line tool (beta)"
+package_go_binary cmd/arvados-server arvados-server \
+    "Arvados server daemons"
+package_go_binary cmd/arvados-server arvados-controller \
+    "Arvados cluster controller daemon"
+package_go_binary cmd/arvados-server arvados-dispatch-cloud \
+    "Arvados cluster cloud dispatch"
+package_go_binary sdk/go/crunchrunner crunchrunner \
+    "Crunchrunner executes a command inside a container and uploads the output"
+package_go_binary services/arv-git-httpd arvados-git-httpd \
+    "Provide authenticated http access to Arvados-hosted git repositories"
+package_go_binary services/crunch-dispatch-local crunch-dispatch-local \
+    "Dispatch Crunch containers on the local system"
+package_go_binary services/crunch-dispatch-slurm crunch-dispatch-slurm \
+    "Dispatch Crunch containers to a SLURM cluster"
+package_go_binary services/crunch-run crunch-run \
+    "Supervise a single Crunch container"
+package_go_binary services/crunchstat crunchstat \
+    "Gather cpu/memory/network statistics of running Crunch jobs"
+package_go_binary services/health arvados-health \
+    "Check health of all Arvados cluster services"
+package_go_binary services/keep-balance keep-balance \
+    "Rebalance and garbage-collect data blocks stored in Arvados Keep"
+package_go_binary services/keepproxy keepproxy \
+    "Make a Keep cluster accessible to clients that are not on the LAN"
+package_go_binary services/keepstore keepstore \
+    "Keep storage daemon, accessible to clients on the LAN"
+package_go_binary services/keep-web keep-web \
+    "Static web hosting service for user data stored in Arvados Keep"
+package_go_binary services/ws arvados-ws \
+    "Arvados Websocket server"
+package_go_binary tools/sync-groups arvados-sync-groups \
+    "Synchronize remote groups into Arvados from an external source"
+package_go_binary tools/keep-block-check keep-block-check \
+    "Verify that all data from one set of Keep servers to another was copied"
+package_go_binary tools/keep-rsync keep-rsync \
+    "Copy all data from one set of Keep servers to another"
+package_go_binary tools/keep-exercise keep-exercise \
+    "Performance testing tool for Arvados Keep"
+
+# The Python SDK
+fpm_build_virtualenv "arvados-python-client" "sdk/python"
+fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
+
+# Arvados cwl runner
+fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl"
+
+# The PAM module
+fpm_build_virtualenv "libpam-arvados" "sdk/pam"
+
+# The FUSE driver
+fpm_build_virtualenv "arvados-fuse" "services/fuse"
+
+# The node manager
+fpm_build_virtualenv "arvados-node-manager" "services/nodemanager"
+
+# The Docker image cleaner
+fpm_build_virtualenv "arvados-docker-cleaner" "services/dockercleaner" "python3"
+
+# The Arvados crunchstat-summary tool
+fpm_build_virtualenv "crunchstat-summary" "tools/crunchstat-summary"
+
+# The cwltest package, which lives out of tree
+cd "$WORKSPACE"
+if [[ -e "$WORKSPACE/cwltest" ]]; then
+       rm -rf "$WORKSPACE/cwltest"
+fi
+git clone https://github.com/common-workflow-language/cwltest.git
+# signal to our build script that we want a cwltest executable installed in /usr/bin/
+mkdir cwltest/bin && touch cwltest/bin/cwltest
+fpm_build_virtualenv "cwltest" "cwltest"
+rm -rf "$WORKSPACE/cwltest"
+
+# Build the API server package
+test_rails_package_presence arvados-api-server "$WORKSPACE/services/api"
+if [[ "$?" == "0" ]]; then
+  handle_rails_package arvados-api-server "$WORKSPACE/services/api" \
+      "$WORKSPACE/agpl-3.0.txt" --url="https://arvados.org" \
+      --description="Arvados API server - Arvados is a free and open source platform for big data science." \
+      --license="GNU Affero General Public License, version 3.0"
+fi
+
+# Build the workbench server package
+test_rails_package_presence arvados-workbench "$WORKSPACE/apps/workbench"
+if [[ "$?" == "0" ]] ; then
+  (
+      set -e
+      cd "$WORKSPACE/apps/workbench"
+
+      # We need to bundle to be ready even when we build a package without vendor directory
+      # because asset compilation requires it.
+      bundle install --system >"$STDOUT_IF_DEBUG"
+
+      # clear the tmp directory; the asset generation step will recreate tmp/cache/assets,
+      # and we want that in the package, so it's easier to not exclude the tmp directory
+      # from the package - empty it instead.
+      rm -rf tmp
+      mkdir tmp
+
+      # Set up application.yml and production.rb so that asset precompilation works
+      \cp config/application.yml.example config/application.yml -f
+      \cp config/environments/production.rb.example config/environments/production.rb -f
+      sed -i 's/secret_token: ~/secret_token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/' config/application.yml
+      sed -i 's/keep_web_url: false/keep_web_url: exampledotcom/' config/application.yml
+
+      RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake npm:install >/dev/null
+      RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake assets:precompile >/dev/null
+
+      # Remove generated configuration files so they don't go in the package.
+      rm config/application.yml config/environments/production.rb
+  )
+
+  if [[ "$?" != "0" ]]; then
+    echo "ERROR: Asset precompilation failed"
+    EXITCODE=1
+  else
+    handle_rails_package arvados-workbench "$WORKSPACE/apps/workbench" \
+        "$WORKSPACE/agpl-3.0.txt" --url="https://arvados.org" \
+        --description="Arvados Workbench - Arvados is a free and open source platform for big data science." \
+        --license="GNU Affero General Public License, version 3.0"
+  fi
+fi
+
+# clean up temporary GOPATH
+rm -rf "$GOPATH"
+
+exit $EXITCODE
diff --git a/build/run-build-test-packages-one-target.sh b/build/run-build-test-packages-one-target.sh
new file mode 100755 (executable)
index 0000000..b98a4c0
--- /dev/null
@@ -0,0 +1,138 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Build, test and (optionally) upload packages for one target
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+--target <target>
+    Distribution to build packages for (default: debian8)
+--upload
+    If the build and test steps are successful, upload the packages
+    to a remote apt repository (default: false)
+--rc
+    Optional Parameter to build Release Candidate
+--build-version <version>
+    Version to build (default:
+    \$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or
+    0.1.timestamp.commithash)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,upload,rc,target:,build-version: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+TARGET=debian8
+UPLOAD=0
+RC=0
+
+declare -a build_args=()
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --upload)
+            UPLOAD=1
+            ;;
+        --rc)
+            RC=1
+            ;;
+        --build-version)
+            build_args+=("$1" "$2")
+            shift
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+build_args+=(--target "$TARGET")
+
+exit_cleanly() {
+    trap - INT
+    report_outcomes
+    exit ${#failures}
+}
+
+COLUMNS=80
+. $WORKSPACE/build/run-library.sh
+
+title "Start build packages"
+timer_reset
+
+$WORKSPACE/build/run-build-packages-one-target.sh "${build_args[@]}"
+
+checkexit $? "build packages"
+title "End of build packages (`timer`)"
+
+title "Start test packages"
+timer_reset
+
+if [ ${#failures[@]} -eq 0 ]; then
+  $WORKSPACE/build/run-build-packages-one-target.sh "${build_args[@]}" --test-packages
+else
+  echo "Skipping package upload, there were errors building the packages"
+fi
+
+checkexit $? "test packages"
+title "End of test packages (`timer`)"
+
+if [[ "$UPLOAD" != 0 ]]; then
+  title "Start upload packages"
+  timer_reset
+
+  if [ ${#failures[@]} -eq 0 ]; then
+    if [[ "$RC" != 0 ]]; then
+      echo "/usr/local/arvados-dev/jenkins/run_upload_packages_testing.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET"
+      /usr/local/arvados-dev/jenkins/run_upload_packages_testing.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET
+    else
+      echo "/usr/local/arvados-dev/jenkins/run_upload_packages.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET"
+      /usr/local/arvados-dev/jenkins/run_upload_packages.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET
+    fi
+  else
+    echo "Skipping package upload, there were errors building and/or testing the packages"
+  fi
+  checkexit $? "upload packages"
+  title "End of upload packages (`timer`)"
+fi
+
+exit_cleanly
\ No newline at end of file
diff --git a/build/run-library.sh b/build/run-library.sh
new file mode 100755 (executable)
index 0000000..de9d67d
--- /dev/null
@@ -0,0 +1,867 @@
+#!/bin/bash -xe
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# A library of functions shared by the various scripts in this directory.
+
+# This is the timestamp about when we merged changed to include licenses
+# with Arvados packages.  We use it as a heuristic to add revisions for
+# older packages.
+LICENSE_PACKAGE_TS=20151208015500
+
+if [[ -z "$ARVADOS_BUILDING_VERSION" ]]; then
+    RAILS_PACKAGE_ITERATION=8
+else
+    RAILS_PACKAGE_ITERATION="$ARVADOS_BUILDING_ITERATION"
+fi
+
+debug_echo () {
+    echo "$@" >"$STDOUT_IF_DEBUG"
+}
+
+find_python_program() {
+    prog="$1"
+    shift
+    for prog in "$@"; do
+        if "$prog" --version >/dev/null 2>&1; then
+            echo "$prog"
+            return 0
+        fi
+    done
+    cat >&2 <<EOF
+$helpmessage
+
+Error: $prog (from Python setuptools module) not found
+
+EOF
+    exit 1
+}
+
+format_last_commit_here() {
+    local format="$1"; shift
+    TZ=UTC git log -n1 --first-parent "--format=format:$format" .
+}
+
+version_from_git() {
+    # Output the version being built, or if we're building a
+    # dev/prerelease, output a version number based on the git log for
+    # the current working directory.
+    if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
+        echo "$ARVADOS_BUILDING_VERSION"
+        return
+    fi
+
+    local git_ts git_hash prefix
+    if [[ -n "$1" ]] ; then
+        prefix="$1"
+    else
+        prefix="0.1"
+    fi
+
+    declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
+    ARVADOS_BUILDING_VERSION="$(git tag -l |sort -V -r |head -n1).$(date -ud "@$git_ts" +%Y%m%d%H%M%S)"
+    echo "$ARVADOS_BUILDING_VERSION"
+}
+
+nohash_version_from_git() {
+    if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
+        echo "$ARVADOS_BUILDING_VERSION"
+        return
+    fi
+    version_from_git $1 | cut -d. -f1-4
+}
+
+timestamp_from_git() {
+    format_last_commit_here "%ct"
+}
+
+handle_python_package () {
+  # This function assumes the current working directory is the python package directory
+  if [ -n "$(find dist -name "*-$(nohash_version_from_git).tar.gz" -print -quit)" ]; then
+    # This package doesn't need rebuilding.
+    return
+  fi
+  # Make sure only to use sdist - that's the only format pip can deal with (sigh)
+  python setup.py $DASHQ_UNLESS_DEBUG sdist
+}
+
+handle_ruby_gem() {
+    local gem_name="$1"; shift
+    local gem_version="$(nohash_version_from_git)"
+    local gem_src_dir="$(pwd)"
+
+    if [[ -n "$ONLY_BUILD" ]] && [[ "$gem_name" != "$ONLY_BUILD" ]] ; then
+        return 0
+    fi
+
+    if ! [[ -e "${gem_name}-${gem_version}.gem" ]]; then
+        find -maxdepth 1 -name "${gem_name}-*.gem" -delete
+
+        # -q appears to be broken in gem version 2.2.2
+        $GEM build "$gem_name.gemspec" $DASHQ_UNLESS_DEBUG >"$STDOUT_IF_DEBUG" 2>"$STDERR_IF_DEBUG"
+    fi
+}
+
+# Usage: package_go_binary services/foo arvados-foo "Compute foo to arbitrary precision"
+package_go_binary() {
+    local src_path="$1"; shift
+    local prog="$1"; shift
+    local description="$1"; shift
+    local license_file="${1:-agpl-3.0.txt}"; shift
+
+    if [[ -n "$ONLY_BUILD" ]] && [[ "$prog" != "$ONLY_BUILD" ]] ; then
+        return 0
+    fi
+
+    debug_echo "package_go_binary $src_path as $prog"
+
+    local basename="${src_path##*/}"
+
+    mkdir -p "$GOPATH/src/git.curoverse.com"
+    ln -sfn "$WORKSPACE" "$GOPATH/src/git.curoverse.com/arvados.git"
+    (cd "$GOPATH/src/git.curoverse.com/arvados.git" && "$GOPATH/bin/govendor" sync -v)
+
+    cd "$GOPATH/src/git.curoverse.com/arvados.git/$src_path"
+    local version="$(version_from_git)"
+    local timestamp="$(timestamp_from_git)"
+
+    # Update the version number and build a new package if the vendor
+    # bundle has changed, or the command imports anything from the
+    # Arvados SDK and the SDK has changed.
+    declare -a checkdirs=(vendor)
+    if grep -qr git.curoverse.com/arvados .; then
+        checkdirs+=(sdk/go lib)
+    fi
+    for dir in ${checkdirs[@]}; do
+        cd "$GOPATH/src/git.curoverse.com/arvados.git/$dir"
+        ts="$(timestamp_from_git)"
+        if [[ "$ts" -gt "$timestamp" ]]; then
+            version=$(version_from_git)
+            timestamp="$ts"
+        fi
+    done
+
+    cd $WORKSPACE/packages/$TARGET
+    test_package_presence $prog $version go
+
+    if [[ "$?" != "0" ]]; then
+      return 1
+    fi
+
+    go get -ldflags "-X main.version=${version}" "git.curoverse.com/arvados.git/$src_path"
+
+    local -a switches=()
+    systemd_unit="$WORKSPACE/${src_path}/${prog}.service"
+    if [[ -e "${systemd_unit}" ]]; then
+        switches+=(
+            --after-install "${WORKSPACE}/build/go-python-package-scripts/postinst"
+            --before-remove "${WORKSPACE}/build/go-python-package-scripts/prerm"
+            "${systemd_unit}=/lib/systemd/system/${prog}.service")
+    fi
+    switches+=("$WORKSPACE/${license_file}=/usr/share/doc/$prog/${license_file}")
+
+    fpm_build "$GOPATH/bin/${basename}=/usr/bin/${prog}" "${prog}" dir "${version}" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=${description}" "${switches[@]}"
+}
+
+default_iteration() {
+    if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
+        echo "$ARVADOS_BUILDING_ITERATION"
+        return
+    fi
+    local package_name="$1"; shift
+    local package_version="$1"; shift
+    local package_type="$1"; shift
+    local iteration=1
+    if [[ $package_version =~ ^0\.1\.([0-9]{14})(\.|$) ]] && \
+           [[ ${BASH_REMATCH[1]} -le $LICENSE_PACKAGE_TS ]]; then
+        iteration=2
+    fi
+    if [[ $package_type =~ ^python ]]; then
+      # Fix --iteration for #9242.
+      iteration=2
+    fi
+    echo $iteration
+}
+
+_build_rails_package_scripts() {
+    local pkgname="$1"; shift
+    local destdir="$1"; shift
+    local srcdir="$RUN_BUILD_PACKAGES_PATH/rails-package-scripts"
+    for scriptname in postinst prerm postrm; do
+        cat "$srcdir/$pkgname.sh" "$srcdir/step2.sh" "$srcdir/$scriptname.sh" \
+            >"$destdir/$scriptname" || return $?
+    done
+}
+
+test_rails_package_presence() {
+  local pkgname="$1"; shift
+  local srcdir="$1"; shift
+
+  if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then
+    return 1
+  fi
+
+  tmppwd=`pwd`
+
+  cd $srcdir
+
+  local version="$(version_from_git)"
+
+  cd $tmppwd
+
+  test_package_presence $pkgname $version rails "$RAILS_PACKAGE_ITERATION"
+}
+
+test_package_presence() {
+    local pkgname="$1"; shift
+    local version="$1"; shift
+    local pkgtype="$1"; shift
+    local iteration="$1"; shift
+    local arch="$1"; shift
+
+    if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then
+        return 1
+    fi
+
+    if [[ "$iteration" == "" ]]; then
+        iteration="$(default_iteration "$pkgname" "$version" "$pkgtype")"
+    fi
+
+    if [[ "$arch" == "" ]]; then
+      rpm_architecture="x86_64"
+      deb_architecture="amd64"
+
+      if [[ "$pkgtype" =~ ^(src)$ ]]; then
+        rpm_architecture="noarch"
+        deb_architecture="all"
+      fi
+
+      # These python packages have binary components
+      if [[ "$pkgname" =~ (ruamel|ciso|pycrypto|pyyaml) ]]; then
+        rpm_architecture="x86_64"
+        deb_architecture="amd64"
+      fi
+    else
+      rpm_architecture=$arch
+      deb_architecture=$arch
+    fi
+
+    if [[ "$FORMAT" == "deb" ]]; then
+        local complete_pkgname="${pkgname}_$version${iteration:+-$iteration}_$deb_architecture.deb"
+    else
+        # rpm packages get iteration 1 if we don't supply one
+        iteration=${iteration:-1}
+        local complete_pkgname="$pkgname-$version-${iteration}.$rpm_architecture.rpm"
+    fi
+
+    # See if we can skip building the package, only if it already exists in the
+    # processed/ directory. If so, move it back to the packages directory to make
+    # sure it gets picked up by the test and/or upload steps.
+    # Get the list of packages from the repos
+
+    if [[ "$FORMAT" == "deb" ]]; then
+      declare -A dd
+      dd[debian8]=jessie
+      dd[debian9]=stretch
+      dd[debian10]=buster
+      dd[ubuntu1404]=trusty
+      dd[ubuntu1604]=xenial
+      dd[ubuntu1804]=bionic
+      D=${dd[$TARGET]}
+      if [ ${pkgname:0:3} = "lib" ]; then
+        repo_subdir=${pkgname:0:4}
+      else
+        repo_subdir=${pkgname:0:1}
+      fi
+
+      repo_pkg_list=$(curl -s -o - http://apt.arvados.org/pool/${D}/main/${repo_subdir}/)
+      echo ${repo_pkg_list} |grep -q ${complete_pkgname}
+      if [ $? -eq 0 ] ; then
+        echo "Package $complete_pkgname exists, not rebuilding!"
+        curl -s -o ./${complete_pkgname} http://apt.arvados.org/pool/${D}/main/${repo_subdir}/${complete_pkgname}
+        return 1
+      elif test -f "$WORKSPACE/packages/$TARGET/processed/${complete_pkgname}" ; then
+        echo "Package $complete_pkgname exists, not rebuilding!"
+        return 1
+      else
+        echo "Package $complete_pkgname not found, building"
+        return 0
+      fi
+    else
+      centos_repo="http://rpm.arvados.org/CentOS/7/dev/x86_64/"
+
+      repo_pkg_list=$(curl -s -o - ${centos_repo})
+      echo ${repo_pkg_list} |grep -q ${complete_pkgname}
+      if [ $? -eq 0 ]; then
+        echo "Package $complete_pkgname exists, not rebuilding!"
+        curl -s -o ./${complete_pkgname} ${centos_repo}${complete_pkgname}
+        return 1
+      elif test -f "$WORKSPACE/packages/$TARGET/processed/${complete_pkgname}" ; then
+        echo "Package $complete_pkgname exists, not rebuilding!"
+        return 1
+      else
+        echo "Package $complete_pkgname not found, building"
+        return 0
+      fi
+    fi
+}
+
+handle_rails_package() {
+    local pkgname="$1"; shift
+
+    if [[ -n "$ONLY_BUILD" ]] && [[ "$pkgname" != "$ONLY_BUILD" ]] ; then
+        return 0
+    fi
+    local srcdir="$1"; shift
+    cd "$srcdir"
+    local license_path="$1"; shift
+    local version="$(version_from_git)"
+    echo "$version" >package-build.version
+    local scripts_dir="$(mktemp --tmpdir -d "$pkgname-XXXXXXXX.scripts")" && \
+    (
+        set -e
+        _build_rails_package_scripts "$pkgname" "$scripts_dir"
+        cd "$srcdir"
+        mkdir -p tmp
+        git rev-parse HEAD >git-commit.version
+        bundle package --all
+    )
+    if [[ 0 != "$?" ]] || ! cd "$WORKSPACE/packages/$TARGET"; then
+        echo "ERROR: $pkgname package prep failed" >&2
+        rm -rf "$scripts_dir"
+        EXITCODE=1
+        return 1
+    fi
+    local railsdir="/var/www/${pkgname%-server}/current"
+    local -a pos_args=("$srcdir/=$railsdir" "$pkgname" dir "$version")
+    local license_arg="$license_path=$railsdir/$(basename "$license_path")"
+    local -a switches=(--after-install "$scripts_dir/postinst"
+                       --before-remove "$scripts_dir/prerm"
+                       --after-remove "$scripts_dir/postrm")
+    if [[ -z "$ARVADOS_BUILDING_VERSION" ]]; then
+        switches+=(--iteration $RAILS_PACKAGE_ITERATION)
+    fi
+    # For some reason fpm excludes need to not start with /.
+    local exclude_root="${railsdir#/}"
+    # .git and packages are for the SSO server, which is built from its
+    # repository root.
+    local -a exclude_list=(.git packages tmp log coverage Capfile\* \
+                           config/deploy\* config/application.yml)
+    # for arvados-workbench, we need to have the (dummy) config/database.yml in the package
+    if  [[ "$pkgname" != "arvados-workbench" ]]; then
+      exclude_list+=('config/database.yml')
+    fi
+    for exclude in ${exclude_list[@]}; do
+        switches+=(-x "$exclude_root/$exclude")
+    done
+    fpm_build "${pos_args[@]}" "${switches[@]}" \
+              -x "$exclude_root/vendor/cache-*" \
+              -x "$exclude_root/vendor/bundle" "$@" "$license_arg"
+    rm -rf "$scripts_dir"
+}
+
+# Build python packages with a virtualenv built-in
+fpm_build_virtualenv () {
+  PKG=$1
+  shift
+  PKG_DIR=$1
+  shift
+  PACKAGE_TYPE=${1:-python}
+  shift
+
+  # Set up
+  STDOUT_IF_DEBUG=/dev/null
+  STDERR_IF_DEBUG=/dev/null
+  DASHQ_UNLESS_DEBUG=-q
+  if [[ "$DEBUG" != "0" ]]; then
+      STDOUT_IF_DEBUG=/dev/stdout
+      STDERR_IF_DEBUG=/dev/stderr
+      DASHQ_UNLESS_DEBUG=
+  fi
+  if [[ "$ARVADOS_BUILDING_ITERATION" == "" ]]; then
+    ARVADOS_BUILDING_ITERATION=1
+  fi
+
+  local python=""
+  case "$PACKAGE_TYPE" in
+    python3)
+        python=python3
+        if [[ "$FORMAT" != "rpm" ]]; then
+          pip=pip3
+        else
+          # In CentOS, we use a different mechanism to get the right version of pip
+          pip=pip
+        fi
+        PACKAGE_PREFIX=$PYTHON3_PKG_PREFIX
+        ;;
+    python)
+        # All Arvados Python2 packages depend on Python 2.7.
+        # Make sure we build with that for consistency.
+        python=python2.7
+        pip=pip
+        PACKAGE_PREFIX=$PYTHON2_PKG_PREFIX
+        ;;
+  esac
+
+  if [[ "$PKG" != "libpam-arvados" ]] &&
+     [[ "$PKG" != "arvados-node-manager" ]] &&
+     [[ "$PKG" != "arvados-docker-cleaner" ]]; then
+    PYTHON_PKG=$PACKAGE_PREFIX-$PKG
+  else
+    # Exception to our package naming convention
+    PYTHON_PKG=$PKG
+  fi
+
+  if [[ -n "$ONLY_BUILD" ]] && [[ "$PYTHON_PKG" != "$ONLY_BUILD" ]] && [[ "$PKG" != "$ONLY_BUILD" ]]; then
+    return 0
+  fi
+
+  cd $WORKSPACE/$PKG_DIR
+
+  rm -rf dist/*
+
+  # Get the latest setuptools
+  if ! $pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U setuptools; then
+    echo "Error, unable to upgrade setuptools with"
+    echo "  $pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U setuptools"
+    exit 1
+  fi
+  # filter a useless warning (when building the cwltest package) from the stderr output
+  if ! $python setup.py $DASHQ_UNLESS_DEBUG sdist 2> >(grep -v 'warning: no previously-included files matching'); then
+    echo "Error, unable to run $python setup.py sdist for $PKG"
+    exit 1
+  fi
+
+  PACKAGE_PATH=`(cd dist; ls *tar.gz)`
+
+  # Determine the package version from the generated sdist archive
+  PYTHON_VERSION=${ARVADOS_BUILDING_VERSION:-$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)}
+
+  # See if we actually need to build this package; does it exist already?
+  # We can't do this earlier than here, because we need PYTHON_VERSION...
+  # This isn't so bad; the sdist call above is pretty quick compared to
+  # the invocation of virtualenv and fpm, below.
+  if ! test_package_presence "$PYTHON_PKG" $PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
+    return 0
+  fi
+
+  echo "Building $FORMAT package for $PKG from $PKG_DIR"
+
+  # Package the sdist in a virtualenv
+  echo "Creating virtualenv..."
+
+  cd dist
+
+  rm -rf build
+  rm -f $PYTHON_PKG*deb
+  echo "virtualenv version: `virtualenv --version`"
+  virtualenv_command="virtualenv --python `which $python` $DASHQ_UNLESS_DEBUG build/usr/share/$python/dist/$PYTHON_PKG"
+
+  if ! $virtualenv_command; then
+    echo "Error, unable to run"
+    echo "  $virtualenv_command"
+    exit 1
+  fi
+
+  if ! build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U pip; then
+    echo "Error, unable to upgrade pip with"
+    echo "  build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U pip"
+    exit 1
+  fi
+  echo "pip version:        `build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip --version`"
+
+  if ! build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U setuptools; then
+    echo "Error, unable to upgrade setuptools with"
+    echo "  build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U setuptools"
+    exit 1
+  fi
+  echo "setuptools version: `build/usr/share/$python/dist/$PYTHON_PKG/bin/$python -c 'import setuptools; print(setuptools.__version__)'`"
+
+  if ! build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U wheel; then
+    echo "Error, unable to upgrade wheel with"
+    echo "  build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U wheel"
+    exit 1
+  fi
+  echo "wheel version:      `build/usr/share/$python/dist/$PYTHON_PKG/bin/wheel version`"
+
+  if [[ "$TARGET" != "centos7" ]] || [[ "$PYTHON_PKG" != "python-arvados-fuse" ]]; then
+    build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG $PACKAGE_PATH
+  else
+    # centos7 needs these special tweaks to install python-arvados-fuse
+    build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG docutils
+    PYCURL_SSL_LIBRARY=nss build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG $PACKAGE_PATH
+  fi
+
+  if [[ "$?" != "0" ]]; then
+    echo "Error, unable to run"
+    echo "  build/usr/share/$python/dist/$PYTHON_PKG/bin/$pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG $PACKAGE_PATH"
+    exit 1
+  fi
+
+  cd build/usr/share/$python/dist/$PYTHON_PKG/
+
+  # Replace the shebang lines in all python scripts, and handle the activate
+  # scripts too This is a functional replacement of the 237 line
+  # virtualenv_tools.py script that doesn't work in python3 without serious
+  # patching, minus the parts we don't need (modifying pyc files, etc).
+  for binfile in `ls bin/`; do
+    if ! file --mime bin/$binfile |grep -q binary; then
+      # Not a binary file
+      if [[ "$binfile" =~ ^activate(.csh|.fish|)$ ]]; then
+        # these 'activate' scripts need special treatment
+        sed -i "s/VIRTUAL_ENV=\".*\"/VIRTUAL_ENV=\"\/usr\/share\/$python\/dist\/$PYTHON_PKG\"/" bin/$binfile
+        sed -i "s/VIRTUAL_ENV \".*\"/VIRTUAL_ENV \"\/usr\/share\/$python\/dist\/$PYTHON_PKG\"/" bin/$binfile
+      else
+        if grep -q -E '^#!.*/bin/python\d?' bin/$binfile; then
+          # Replace shebang line
+          sed -i "1 s/^.*$/#!\/usr\/share\/$python\/dist\/$PYTHON_PKG\/bin\/python/" bin/$binfile
+        fi
+      fi
+    fi
+  done
+
+  cd - >$STDOUT_IF_DEBUG
+
+  find build -iname '*.pyc' -exec rm {} \;
+  find build -iname '*.pyo' -exec rm {} \;
+
+  # Finally, generate the package
+  echo "Creating package..."
+
+  declare -a COMMAND_ARR=("fpm" "-s" "dir" "-t" "$FORMAT")
+
+  if [[ "$MAINTAINER" != "" ]]; then
+    COMMAND_ARR+=('--maintainer' "$MAINTAINER")
+  fi
+
+  if [[ "$VENDOR" != "" ]]; then
+    COMMAND_ARR+=('--vendor' "$VENDOR")
+  fi
+
+  COMMAND_ARR+=('--url' 'https://arvados.org')
+
+  # Get description
+  DESCRIPTION=`grep '\sdescription' $WORKSPACE/$PKG_DIR/setup.py|cut -f2 -d=|sed -e "s/[',\\"]//g"`
+  COMMAND_ARR+=('--description' "$DESCRIPTION")
+
+  # Get license string
+  LICENSE_STRING=`grep license $WORKSPACE/$PKG_DIR/setup.py|cut -f2 -d=|sed -e "s/[',\\"]//g"`
+  COMMAND_ARR+=('--license' "$LICENSE_STRING")
+
+  # 12271 - As FPM-generated packages don't include scripts by default, the
+  # packages cleanup on upgrade depends on files being listed on the %files
+  # section in the generated SPEC files. To remove DIRECTORIES, they need to
+  # be listed in that sectiontoo, so we need to add this parameter to properly
+  # remove lingering dirs. But this only works for python2: if used on
+  # python33, it includes dirs like /opt/rh/python33 that belong to
+  # other packages.
+  if [[ "$FORMAT" == "rpm" ]] && [[ "$python" == "python2.7" ]]; then
+    COMMAND_ARR+=('--rpm-auto-add-directories')
+  fi
+
+  if [[ "$PKG" == "arvados-python-client" ]]; then
+    if [[ "$python" == "python2.7" ]]; then
+      COMMAND_ARR+=('--conflicts' "$PYTHON3_PKG_PREFIX-$PKG")
+    else
+      COMMAND_ARR+=('--conflicts' "$PYTHON2_PKG_PREFIX-$PKG")
+    fi
+  fi
+
+  if [[ "$DEBUG" != "0" ]]; then
+    COMMAND_ARR+=('--verbose' '--log' 'info')
+  fi
+
+  COMMAND_ARR+=('-v' "$PYTHON_VERSION")
+  COMMAND_ARR+=('--iteration' "$ARVADOS_BUILDING_ITERATION")
+  COMMAND_ARR+=('-n' "$PYTHON_PKG")
+  COMMAND_ARR+=('-C' "build")
+
+  if [[ -e "$WORKSPACE/$PKG_DIR/$PKG.service" ]]; then
+    COMMAND_ARR+=('--after-install' "${WORKSPACE}/build/go-python-package-scripts/postinst")
+    COMMAND_ARR+=('--before-remove' "${WORKSPACE}/build/go-python-package-scripts/prerm")
+  fi
+
+  if [[ "$python" == "python2.7" ]]; then
+    COMMAND_ARR+=('--depends' "$PYTHON2_PACKAGE")
+  else
+    COMMAND_ARR+=('--depends' "$PYTHON3_PACKAGE")
+  fi
+
+  # avoid warning
+  COMMAND_ARR+=('--deb-no-default-config-files')
+
+  # Append --depends X and other arguments specified by fpm-info.sh in
+  # the package source dir. These are added last so they can override
+  # the arguments added by this script.
+  declare -a fpm_args=()
+  declare -a fpm_depends=()
+
+  fpminfo="$WORKSPACE/$PKG_DIR/fpm-info.sh"
+  if [[ -e "$fpminfo" ]]; then
+    echo "Loading fpm overrides from $fpminfo"
+    if ! source "$fpminfo"; then
+      echo "Error, unable to source $WORKSPACE/$PKG_DIR/fpm-info.sh for $PKG"
+      exit 1
+    fi
+  fi
+
+  for i in "${fpm_depends[@]}"; do
+    COMMAND_ARR+=('--depends' "$i")
+  done
+
+  COMMAND_ARR+=("${fpm_args[@]}")
+
+  # Make sure to install all our package binaries in /usr/bin.
+  # We have to walk $WORKSPACE/$PKG_DIR/bin rather than
+  # $WORKSPACE/build/usr/share/$python/dist/$PYTHON_PKG/bin/ to get the list
+  # because the latter also includes all the python binaries for the virtualenv.
+  # We have to take the copies of our binaries from the latter directory, though,
+  # because those are the ones we rewrote the shebang line of, above.
+  if [[ -e "$WORKSPACE/$PKG_DIR/bin" ]]; then
+    for binary in `ls $WORKSPACE/$PKG_DIR/bin`; do
+      COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/bin/$binary=/usr/bin/")
+    done
+  fi
+
+  # the libpam module should place this file in the historically correct place
+  # so as not to break backwards compatibility
+  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/python2.7/dist/libpam-arvados/lib/security/libpam_arvados.py" ]]; then
+    COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/data/lib/security/libpam_arvados.py=/usr/data/lib/security/")
+  fi
+
+  # the python-arvados-cwl-runner package comes with cwltool, expose that version
+  if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/python2.7/dist/python-arvados-cwl-runner/bin/cwltool" ]]; then
+    COMMAND_ARR+=("usr/share/python2.7/dist/python-arvados-cwl-runner/bin/cwltool=/usr/bin/")
+  fi
+
+  COMMAND_ARR+=(".")
+
+  FPM_RESULTS=$("${COMMAND_ARR[@]}")
+  FPM_EXIT_CODE=$?
+
+  # if something went wrong and debug is off, print out the fpm command that errored
+  if ! fpm_verify $FPM_EXIT_CODE $FPM_RESULTS && [[ "$STDOUT_IF_DEBUG" == "/dev/null" ]]; then
+    echo "fpm returned an error executing the command:"
+    echo
+    echo -e "\n${COMMAND_ARR[@]}\n"
+  else
+    echo `ls *$FORMAT`
+    mv $WORKSPACE/$PKG_DIR/dist/*$FORMAT $WORKSPACE/packages/$TARGET/
+  fi
+  echo
+}
+
+# Build packages for everything
+fpm_build () {
+  # The package source.  Depending on the source type, this can be a
+  # path, or the name of the package in an upstream repository (e.g.,
+  # pip).
+  PACKAGE=$1
+  shift
+  # The name of the package to build.
+  PACKAGE_NAME=$1
+  shift
+  # The type of source package.  Passed to fpm -s.  Default "dir".
+  PACKAGE_TYPE=${1:-dir}
+  shift
+  # Optional: the package version number.  Passed to fpm -v.
+  VERSION=$1
+  shift
+
+  if [[ -n "$ONLY_BUILD" ]] && [[ "$PACKAGE_NAME" != "$ONLY_BUILD" ]] && [[ "$PACKAGE" != "$ONLY_BUILD" ]] ; then
+      return 0
+  fi
+
+  local default_iteration_value="$(default_iteration "$PACKAGE" "$VERSION" "$PACKAGE_TYPE")"
+
+  declare -a COMMAND_ARR=("fpm" "-s" "$PACKAGE_TYPE" "-t" "$FORMAT")
+  if [ python = "$PACKAGE_TYPE" ] && [ deb = "$FORMAT" ]; then
+      # Dependencies are built from setup.py.  Since setup.py will never
+      # refer to Debian package iterations, it doesn't make sense to
+      # enforce those in the .deb dependencies.
+      COMMAND_ARR+=(--deb-ignore-iteration-in-dependencies)
+  fi
+
+  # 12271 - As FPM-generated packages don't include scripts by default, the
+  # packages cleanup on upgrade depends on files being listed on the %files
+  # section in the generated SPEC files. To remove DIRECTORIES, they need to
+  # be listed in that section too, so we need to add this parameter to properly
+  # remove lingering dirs. But this only works for python2: if used on
+  # python33, it includes dirs like /opt/rh/python33 that belong to
+  # other packages.
+  if [[ "$FORMAT" = rpm ]] && [[ "$python" = python2.7 ]]; then
+    COMMAND_ARR+=('--rpm-auto-add-directories')
+  fi
+
+  if [[ "$DEBUG" != "0" ]]; then
+    COMMAND_ARR+=('--verbose' '--log' 'info')
+  fi
+
+  if [[ -n "$PACKAGE_NAME" ]]; then
+    COMMAND_ARR+=('-n' "$PACKAGE_NAME")
+  fi
+
+  if [[ "$MAINTAINER" != "" ]]; then
+    COMMAND_ARR+=('--maintainer' "$MAINTAINER")
+  fi
+
+  if [[ "$VENDOR" != "" ]]; then
+    COMMAND_ARR+=('--vendor' "$VENDOR")
+  fi
+
+  if [[ "$VERSION" != "" ]]; then
+    COMMAND_ARR+=('-v' "$VERSION")
+  fi
+  if [[ -n "$default_iteration_value" ]]; then
+      # We can always add an --iteration here.  If another one is specified in $@,
+      # that will take precedence, as desired.
+      COMMAND_ARR+=(--iteration "$default_iteration_value")
+  fi
+
+  # Append --depends X and other arguments specified by fpm-info.sh in
+  # the package source dir. These are added last so they can override
+  # the arguments added by this script.
+  declare -a fpm_args=()
+  declare -a build_depends=()
+  declare -a fpm_depends=()
+  declare -a fpm_exclude=()
+  declare -a fpm_dirs=(
+      # source dir part of 'dir' package ("/source=/dest" => "/source"):
+      "${PACKAGE%%=/*}")
+  for pkgdir in "${fpm_dirs[@]}"; do
+      fpminfo="$pkgdir/fpm-info.sh"
+      if [[ -e "$fpminfo" ]]; then
+          debug_echo "Loading fpm overrides from $fpminfo"
+          source "$fpminfo"
+          break
+      fi
+  done
+  for pkg in "${build_depends[@]}"; do
+      if [[ $TARGET =~ debian|ubuntu ]]; then
+          pkg_deb=$(ls "$WORKSPACE/packages/$TARGET/$pkg_"*.deb | sort -rg | awk 'NR==1')
+          if [[ -e $pkg_deb ]]; then
+              echo "Installing build_dep $pkg from $pkg_deb"
+              dpkg -i "$pkg_deb"
+          else
+              echo "Attemping to install build_dep $pkg using apt-get"
+              apt-get install -y "$pkg"
+          fi
+          apt-get -y -f install
+      else
+          pkg_rpm=$(ls "$WORKSPACE/packages/$TARGET/$pkg"-[0-9]*.rpm | sort -rg | awk 'NR==1')
+          if [[ -e $pkg_rpm ]]; then
+              echo "Installing build_dep $pkg from $pkg_rpm"
+              rpm -i "$pkg_rpm"
+          else
+              echo "Attemping to install build_dep $pkg"
+              rpm -i "$pkg"
+          fi
+      fi
+  done
+  for i in "${fpm_depends[@]}"; do
+    COMMAND_ARR+=('--depends' "$i")
+  done
+  for i in "${fpm_exclude[@]}"; do
+    COMMAND_ARR+=('--exclude' "$i")
+  done
+
+  # Append remaining function arguments directly to fpm's command line.
+  for i; do
+    COMMAND_ARR+=("$i")
+  done
+
+  COMMAND_ARR+=("${fpm_args[@]}")
+
+  COMMAND_ARR+=("$PACKAGE")
+
+  debug_echo -e "\n${COMMAND_ARR[@]}\n"
+
+  FPM_RESULTS=$("${COMMAND_ARR[@]}")
+  FPM_EXIT_CODE=$?
+
+  fpm_verify $FPM_EXIT_CODE $FPM_RESULTS
+
+  # if something went wrong and debug is off, print out the fpm command that errored
+  if [[ 0 -ne $? ]] && [[ "$STDOUT_IF_DEBUG" == "/dev/null" ]]; then
+    echo -e "\n${COMMAND_ARR[@]}\n"
+  fi
+}
+
+# verify build results
+fpm_verify () {
+  FPM_EXIT_CODE=$1
+  shift
+  FPM_RESULTS=$@
+
+  FPM_PACKAGE_NAME=''
+  if [[ $FPM_RESULTS =~ ([A-Za-z0-9_\.-]*\.)(deb|rpm) ]]; then
+    FPM_PACKAGE_NAME=${BASH_REMATCH[1]}${BASH_REMATCH[2]}
+  fi
+
+  if [[ "$FPM_PACKAGE_NAME" == "" ]]; then
+    EXITCODE=1
+    echo
+    echo "Error: $PACKAGE: Unable to figure out package name from fpm results:"
+    echo
+    echo $FPM_RESULTS
+    echo
+    return 1
+  elif [[ "$FPM_RESULTS" =~ "File already exists" ]]; then
+    echo "Package $FPM_PACKAGE_NAME exists, not rebuilding"
+    return 0
+  elif [[ 0 -ne "$FPM_EXIT_CODE" ]]; then
+    EXITCODE=1
+    echo "Error building package for $1:\n $FPM_RESULTS"
+    return 1
+  fi
+}
+
+install_package() {
+  PACKAGES=$@
+  if [[ "$FORMAT" == "deb" ]]; then
+    $SUDO apt-get install $PACKAGES --yes
+  elif [[ "$FORMAT" == "rpm" ]]; then
+    $SUDO yum -q -y install $PACKAGES
+  fi
+}
+
+title () {
+    txt="********** $1 **********"
+    printf "\n%*s%s\n\n" $((($COLUMNS-${#txt})/2)) "" "$txt"
+}
+
+checkexit() {
+    if [[ "$1" != "0" ]]; then
+        title "!!!!!! $2 FAILED !!!!!!"
+        failures+=("$2 (`timer`)")
+    else
+        successes+=("$2 (`timer`)")
+    fi
+}
+
+timer_reset() {
+    t0=$SECONDS
+}
+
+timer() {
+    echo -n "$(($SECONDS - $t0))s"
+}
+
+report_outcomes() {
+    for x in "${successes[@]}"
+    do
+        echo "Pass: $x"
+    done
+
+    if [[ ${#failures[@]} == 0 ]]
+    then
+        echo "All test suites passed."
+    else
+        echo "Failures (${#failures[@]}):"
+        for x in "${failures[@]}"
+        do
+            echo "Fail: $x"
+        done
+    fi
+}
diff --git a/build/run-tests.sh b/build/run-tests.sh
new file mode 100755 (executable)
index 0000000..095d32e
--- /dev/null
@@ -0,0 +1,1099 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+. `dirname "$(readlink -f "$0")"`/libcloud-pin.sh
+
+COLUMNS=80
+. `dirname "$(readlink -f "$0")"`/run-library.sh
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Install and test Arvados components.
+
+Exit non-zero if any tests fail.
+
+Syntax:
+        $(basename $0) WORKSPACE=/path/to/arvados [options]
+
+Options:
+
+--skip FOO     Do not test the FOO component.
+--only FOO     Do not test anything except the FOO component.
+--temp DIR     Install components and dependencies under DIR instead of
+               making a new temporary directory. Implies --leave-temp.
+--leave-temp   Do not remove GOPATH, virtualenv, and other temp dirs at exit.
+               Instead, show the path to give as --temp to reuse them in
+               subsequent invocations.
+--repeat N     Repeat each install/test step until it succeeds N times.
+--retry        Prompt to retry if an install or test suite fails.
+--skip-install Do not run any install steps. Just run tests.
+               You should provide GOPATH, GEMHOME, and VENVDIR options
+               from a previous invocation if you use this option.
+--only-install Run specific install step
+--short        Skip (or scale down) some slow tests.
+WORKSPACE=path Arvados source tree to test.
+CONFIGSRC=path Dir with api server config files to copy into source tree.
+               (If none given, leave config files alone in source tree.)
+services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
+               Restrict apiserver tests to the given file
+sdk/python_test="--test-suite tests.test_keep_locator"
+               Restrict Python SDK tests to the given class
+apps/workbench_test="TEST=test/integration/pipeline_instances_test.rb"
+               Restrict Workbench tests to the given file
+services/arv-git-httpd_test="-check.vv"
+               Show all log messages, even when tests pass (also works
+               with services/keepstore_test etc.)
+ARVADOS_DEBUG=1
+               Print more debug messages
+envvar=value   Set \$envvar to value. Primarily useful for WORKSPACE,
+               *_test, and other examples shown above.
+
+Assuming --skip-install is not given, all components are installed
+into \$GOPATH, \$VENDIR, and \$GEMHOME before running any tests. Many
+test suites depend on other components being installed, and installing
+everything tends to be quicker than debugging dependencies.
+
+As a special concession to the current CI server config, CONFIGSRC
+defaults to $HOME/arvados-api-server if that directory exists.
+
+More information and background:
+
+https://arvados.org/projects/arvados/wiki/Running_tests
+
+Available tests:
+
+apps/workbench (*)
+apps/workbench_units (*)
+apps/workbench_functionals (*)
+apps/workbench_integration (*)
+apps/workbench_benchmark
+apps/workbench_profile
+cmd/arvados-client
+cmd/arvados-server
+doc
+lib/cli
+lib/cmd
+lib/controller
+lib/crunchstat
+lib/cloud
+lib/cloud/azure
+lib/dispatchcloud
+lib/dispatchcloud/container
+lib/dispatchcloud/scheduler
+lib/dispatchcloud/ssh_executor
+lib/dispatchcloud/worker
+services/api
+services/arv-git-httpd
+services/crunchstat
+services/dockercleaner
+services/fuse
+services/health
+services/keep-web
+services/keepproxy
+services/keepstore
+services/keep-balance
+services/login-sync
+services/nodemanager
+services/nodemanager_integration
+services/crunch-run
+services/crunch-dispatch-local
+services/crunch-dispatch-slurm
+services/ws
+sdk/cli
+sdk/pam
+sdk/python
+sdk/python:py3
+sdk/ruby
+sdk/go/arvados
+sdk/go/arvadosclient
+sdk/go/auth
+sdk/go/dispatch
+sdk/go/keepclient
+sdk/go/health
+sdk/go/httpserver
+sdk/go/manifest
+sdk/go/blockdigest
+sdk/go/asyncbuf
+sdk/go/stats
+sdk/go/crunchrunner
+sdk/cwl
+sdk/R
+tools/sync-groups
+tools/crunchstat-summary
+tools/keep-exercise
+tools/keep-rsync
+tools/keep-block-check
+
+(*) apps/workbench is shorthand for apps/workbench_units +
+    apps/workbench_functionals + apps/workbench_integration
+
+EOF
+
+# First make sure to remove any ARVADOS_ variables from the calling
+# environment that could interfere with the tests.
+unset $(env | cut -d= -f1 | grep \^ARVADOS_)
+
+# Reset other variables that could affect our [tests'] behavior by
+# accident.
+GITDIR=
+GOPATH=
+VENVDIR=
+VENV3DIR=
+PYTHONPATH=
+GEMHOME=
+PERLINSTALLBASE=
+R_LIBS=
+
+short=
+only_install=
+temp=
+temp_preserve=
+
+clear_temp() {
+    if [[ -z "$temp" ]]; then
+        # we didn't even get as far as making a temp dir
+        :
+    elif [[ -z "$temp_preserve" ]]; then
+        rm -rf "$temp"
+    else
+        echo "Leaving behind temp dirs in $temp"
+    fi
+}
+
+fatal() {
+    clear_temp
+    echo >&2 "Fatal: $* (encountered in ${FUNCNAME[1]} at ${BASH_SOURCE[1]} line ${BASH_LINENO[0]})"
+    exit 1
+}
+
+exit_cleanly() {
+    trap - INT
+    create-plot-data-from-log.sh $BUILD_NUMBER "$WORKSPACE/apps/workbench/log/test.log" "$WORKSPACE/apps/workbench/log/"
+    rotate_logfile "$WORKSPACE/apps/workbench/log/" "test.log"
+    stop_services
+    rotate_logfile "$WORKSPACE/services/api/log/" "test.log"
+    report_outcomes
+    clear_temp
+    exit ${#failures}
+}
+
+sanity_checks() {
+    ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
+        || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
+    echo Checking dependencies:
+    echo -n 'virtualenv: '
+    virtualenv --version \
+        || fatal "No virtualenv. Try: apt-get install virtualenv (on ubuntu: python-virtualenv)"
+    echo -n 'ruby: '
+    ruby -v \
+        || fatal "No ruby. Install >=2.1.9 (using rbenv, rvm, or source)"
+    echo -n 'go: '
+    go version \
+        || fatal "No go binary. See http://golang.org/doc/install"
+    [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 10 ]] \
+        || fatal "Go >= 1.10 required. See http://golang.org/doc/install"
+    echo -n 'gcc: '
+    gcc --version | egrep ^gcc \
+        || fatal "No gcc. Try: apt-get install build-essential"
+    echo -n 'fuse.h: '
+    find /usr/include -path '*fuse/fuse.h' | egrep --max-count=1 . \
+        || fatal "No fuse/fuse.h. Try: apt-get install libfuse-dev"
+    echo -n 'gnutls.h: '
+    find /usr/include -path '*gnutls/gnutls.h' | egrep --max-count=1 . \
+        || fatal "No gnutls/gnutls.h. Try: apt-get install libgnutls28-dev"
+    echo -n 'Python2 pyconfig.h: '
+    find /usr/include -path '*/python2*/pyconfig.h' | egrep --max-count=1 . \
+        || fatal "No Python2 pyconfig.h. Try: apt-get install python2.7-dev"
+    echo -n 'Python3 pyconfig.h: '
+    find /usr/include -path '*/python3*/pyconfig.h' | egrep --max-count=1 . \
+        || fatal "No Python3 pyconfig.h. Try: apt-get install python3-dev"
+    echo -n 'nginx: '
+    PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin" nginx -v \
+        || fatal "No nginx. Try: apt-get install nginx"
+    echo -n 'perl: '
+    perl -v | grep version \
+        || fatal "No perl. Try: apt-get install perl"
+    for mod in ExtUtils::MakeMaker JSON LWP Net::SSL; do
+        echo -n "perl $mod: "
+        perl -e "use $mod; print \"\$$mod::VERSION\\n\"" \
+            || fatal "No $mod. Try: apt-get install perl-modules libcrypt-ssleay-perl libjson-perl libwww-perl"
+    done
+    echo -n 'gitolite: '
+    which gitolite \
+        || fatal "No gitolite. Try: apt-get install gitolite3"
+    echo -n 'npm: '
+    npm --version \
+        || fatal "No npm. Try: wget -O- https://nodejs.org/dist/v6.11.2/node-v6.11.2-linux-x64.tar.xz | sudo tar -C /usr/local -xJf - && sudo ln -s ../node-v6.11.2-linux-x64/bin/{node,npm} /usr/local/bin/"
+    echo -n 'cadaver: '
+    cadaver --version | grep -w cadaver \
+          || fatal "No cadaver. Try: apt-get install cadaver"
+    echo -n 'libattr1 xattr.h: '
+    find /usr/include -path '*/attr/xattr.h' | egrep --max-count=1 . \
+        || fatal "No libattr1 xattr.h. Try: apt-get install libattr1-dev"
+    echo -n 'libcurl curl.h: '
+    find /usr/include -path '*/curl/curl.h' | egrep --max-count=1 . \
+        || fatal "No libcurl curl.h. Try: apt-get install libcurl4-gnutls-dev"
+    echo -n 'libpq libpq-fe.h: '
+    find /usr/include -path '*/postgresql/libpq-fe.h' | egrep --max-count=1 . \
+        || fatal "No libpq libpq-fe.h. Try: apt-get install libpq-dev"
+    echo -n 'services/api/config/database.yml: '
+    if [[ ! -f "$WORKSPACE/services/api/config/database.yml" ]]; then
+           fatal "Please provide a database.yml file for the test suite"
+    else
+           echo "OK"
+    fi
+    echo -n 'postgresql: '
+    psql --version || fatal "No postgresql. Try: apt-get install postgresql postgresql-client-common"
+    echo -n 'phantomjs: '
+    phantomjs --version || fatal "No phantomjs. Try: apt-get install phantomjs"
+    echo -n 'xvfb: '
+    which Xvfb || fatal "No xvfb. Try: apt-get install xvfb"
+    echo -n 'graphviz: '
+    dot -V || fatal "No graphviz. Try: apt-get install graphviz"
+    echo -n 'geckodriver: '
+    geckodriver --version | grep ^geckodriver || echo "No geckodriver. Try: wget -O- https://github.com/mozilla/geckodriver/releases/download/v0.23.0/geckodriver-v0.23.0-linux64.tar.gz | sudo tar -C /usr/local/bin -xzf - geckodriver"
+
+    if [[ "$NEED_SDK_R" = true ]]; then
+      # R SDK stuff
+      echo -n 'R: '
+      which Rscript || fatal "No Rscript. Try: apt-get install r-base"
+      echo -n 'testthat: '
+      Rscript -e "library('testthat')" || fatal "No testthat. Try: apt-get install r-cran-testthat"
+      # needed for roxygen2, needed for devtools, needed for R sdk
+      pkg-config --exists libxml-2.0 || fatal "No libxml2. Try: apt-get install libxml2-dev"
+      # needed for pkgdown, builds R SDK doc pages
+      which pandoc || fatal "No pandoc. Try: apt-get install pandoc"
+    fi
+}
+
+rotate_logfile() {
+  # i.e.  rotate_logfile "$WORKSPACE/apps/workbench/log/" "test.log"
+  # $BUILD_NUMBER is set by Jenkins if this script is being called as part of a Jenkins run
+  if [[ -f "$1/$2" ]]; then
+    THEDATE=`date +%Y%m%d%H%M%S`
+    mv "$1/$2" "$1/$THEDATE-$BUILD_NUMBER-$2"
+    gzip "$1/$THEDATE-$BUILD_NUMBER-$2"
+  fi
+}
+
+declare -a failures
+declare -A skip
+declare -A only
+declare -A testargs
+skip[apps/workbench_profile]=1
+# nodemanager_integration tests are not reliable, see #12061.
+skip[services/nodemanager_integration]=1
+
+while [[ -n "$1" ]]
+do
+    arg="$1"; shift
+    case "$arg" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --skip)
+            skip[$1]=1; shift
+            ;;
+        --only)
+            only[$1]=1; skip[$1]=""; shift
+            ;;
+        --short)
+            short=1
+            ;;
+        --skip-install)
+            only_install=nothing
+            ;;
+        --only-install)
+            only_install="$1"; shift
+            ;;
+        --temp)
+            temp="$1"; shift
+            temp_preserve=1
+            ;;
+        --leave-temp)
+            temp_preserve=1
+            ;;
+        --repeat)
+            repeat=$((${1}+0)); shift
+            ;;
+        --retry)
+            retry=1
+            ;;
+        *_test=*)
+            suite="${arg%%_test=*}"
+            args="${arg#*=}"
+            testargs["$suite"]="$args"
+            ;;
+        *=*)
+            eval export $(echo $arg | cut -d= -f1)=\"$(echo $arg | cut -d= -f2-)\"
+            ;;
+        *)
+            echo >&2 "$0: Unrecognized option: '$arg'. Try: $0 --help"
+            exit 1
+            ;;
+    esac
+done
+
+# R SDK installation is very slow (~360s in a clean environment) and only
+# required when testing it. Skip that step if it is not needed.
+NEED_SDK_R=true
+
+if [[ ${#only[@]} -ne 0 ]] &&
+   [[ -z "${only['sdk/R']}" && -z "${only['doc']}" ]]; then
+  NEED_SDK_R=false
+fi
+
+if [[ ${skip["sdk/R"]} == 1 && ${skip["doc"]} == 1 ]]; then
+  NEED_SDK_R=false
+fi
+
+if [[ $NEED_SDK_R == false ]]; then
+       echo "R SDK not needed, it will not be installed."
+fi
+
+start_services() {
+    echo 'Starting API, keepproxy, keep-web, ws, arv-git-httpd, and nginx ssl proxy...'
+    if [[ ! -d "$WORKSPACE/services/api/log" ]]; then
+       mkdir -p "$WORKSPACE/services/api/log"
+    fi
+    # Remove empty api.pid file if it exists
+    if [[ -f "$WORKSPACE/tmp/api.pid" && ! -s "$WORKSPACE/tmp/api.pid" ]]; then
+       rm -f "$WORKSPACE/tmp/api.pid"
+    fi
+    cd "$WORKSPACE" \
+        && eval $(python sdk/python/tests/run_test_server.py start --auth admin || echo fail=1) \
+        && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
+        && export ARVADOS_TEST_API_INSTALLED="$$" \
+        && python sdk/python/tests/run_test_server.py start_controller \
+        && python sdk/python/tests/run_test_server.py start_keep_proxy \
+        && python sdk/python/tests/run_test_server.py start_keep-web \
+        && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
+        && python sdk/python/tests/run_test_server.py start_ws \
+        && eval $(python sdk/python/tests/run_test_server.py start_nginx || echo fail=1) \
+        && (env | egrep ^ARVADOS)
+    if [[ -n "$fail" ]]; then
+       return 1
+    fi
+}
+
+stop_services() {
+    if [[ -z "$ARVADOS_TEST_API_HOST" ]]; then
+        return
+    fi
+    unset ARVADOS_TEST_API_HOST
+    cd "$WORKSPACE" \
+        && python sdk/python/tests/run_test_server.py stop_nginx \
+        && python sdk/python/tests/run_test_server.py stop_arv-git-httpd \
+        && python sdk/python/tests/run_test_server.py stop_ws \
+        && python sdk/python/tests/run_test_server.py stop_keep-web \
+        && python sdk/python/tests/run_test_server.py stop_keep_proxy \
+        && python sdk/python/tests/run_test_server.py stop_controller \
+        && python sdk/python/tests/run_test_server.py stop
+}
+
+interrupt() {
+    failures+=("($(basename $0) interrupted)")
+    exit_cleanly
+}
+trap interrupt INT
+
+sanity_checks
+
+echo "WORKSPACE=$WORKSPACE"
+
+if [[ -z "$CONFIGSRC" ]] && [[ -d "$HOME/arvados-api-server" ]]; then
+    # Jenkins expects us to use this by default.
+    CONFIGSRC="$HOME/arvados-api-server"
+fi
+
+# Clean up .pyc files that may exist in the workspace
+cd "$WORKSPACE"
+find -name '*.pyc' -delete
+
+if [[ -z "$temp" ]]; then
+    temp="$(mktemp -d)"
+fi
+
+# Set up temporary install dirs (unless existing dirs were supplied)
+for tmpdir in VENVDIR VENV3DIR GOPATH GEMHOME PERLINSTALLBASE R_LIBS
+do
+    if [[ -z "${!tmpdir}" ]]; then
+        eval "$tmpdir"="$temp/$tmpdir"
+    fi
+    if ! [[ -d "${!tmpdir}" ]]; then
+        mkdir "${!tmpdir}" || fatal "can't create ${!tmpdir} (does $temp exist?)"
+    fi
+done
+
+rm -vf "${WORKSPACE}/tmp/*.log"
+
+setup_ruby_environment() {
+    if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
+        source "$HOME/.rvm/scripts/rvm"
+        using_rvm=true
+    elif [[ -s "/usr/local/rvm/scripts/rvm" ]] ; then
+        source "/usr/local/rvm/scripts/rvm"
+        using_rvm=true
+    else
+        using_rvm=false
+    fi
+
+    if [[ "$using_rvm" == true ]]; then
+        # If rvm is in use, we can't just put separate "dependencies"
+        # and "gems-under-test" paths to GEM_PATH: passenger resets
+        # the environment to the "current gemset", which would lose
+        # our GEM_PATH and prevent our test suites from running ruby
+        # programs (for example, the Workbench test suite could not
+        # boot an API server or run arv). Instead, we have to make an
+        # rvm gemset and use it for everything.
+
+        [[ `type rvm | head -n1` == "rvm is a function" ]] \
+            || fatal 'rvm check'
+
+        # Put rvm's favorite path back in first place (overriding
+        # virtualenv, which just put itself there). Ignore rvm's
+        # complaint about not being in first place already.
+        rvm use @default 2>/dev/null
+
+        # Create (if needed) and switch to an @arvados-tests-* gemset,
+        # salting the gemset name so it doesn't interfere with
+        # concurrent builds in other workspaces. Leave the choice of
+        # ruby to the caller.
+        gemset="arvados-tests-$(echo -n "${WORKSPACE}" | md5sum | head -c16)"
+        rvm use "@${gemset}" --create \
+            || fatal 'rvm gemset setup'
+
+        rvm env
+    else
+        # When our "bundle install"s need to install new gems to
+        # satisfy dependencies, we want them to go where "gem install
+        # --user-install" would put them. (However, if the caller has
+        # already set GEM_HOME, we assume that's where dependencies
+        # should be installed, and we should leave it alone.)
+
+        if [ -z "$GEM_HOME" ]; then
+            user_gempath="$(gem env gempath)"
+            export GEM_HOME="${user_gempath%%:*}"
+        fi
+        PATH="$(gem env gemdir)/bin:$PATH"
+
+        # When we build and install our own gems, we install them in our
+        # $GEMHOME tmpdir, and we want them to be at the front of GEM_PATH and
+        # PATH so integration tests prefer them over other versions that
+        # happen to be installed in $user_gempath, system dirs, etc.
+
+        tmpdir_gem_home="$(env - PATH="$PATH" HOME="$GEMHOME" gem env gempath | cut -f1 -d:)"
+        PATH="$tmpdir_gem_home/bin:$PATH"
+        export GEM_PATH="$tmpdir_gem_home"
+
+        echo "Will install dependencies to $(gem env gemdir)"
+        echo "Will install arvados gems to $tmpdir_gem_home"
+        echo "Gem search path is GEM_PATH=$GEM_PATH"
+    fi
+    bundle config || gem install bundler \
+        || fatal 'install bundler'
+}
+
+with_test_gemset() {
+    if [[ "$using_rvm" == true ]]; then
+        "$@"
+    else
+        GEM_HOME="$tmpdir_gem_home" GEM_PATH="$tmpdir_gem_home" "$@"
+    fi
+}
+
+gem_uninstall_if_exists() {
+    if gem list "$1\$" | egrep '^\w'; then
+        gem uninstall --force --all --executables "$1"
+    fi
+}
+
+setup_virtualenv() {
+    local venvdest="$1"; shift
+    if ! [[ -e "$venvdest/bin/activate" ]] || ! [[ -e "$venvdest/bin/pip" ]]; then
+        virtualenv --setuptools "$@" "$venvdest" || fatal "virtualenv $venvdest failed"
+    elif [[ -n "$short" ]]; then
+        return
+    fi
+    if [[ $("$venvdest/bin/python" --version 2>&1) =~ \ 3\.[012]\. ]]; then
+        # pip 8.0.0 dropped support for python 3.2, e.g., debian wheezy
+        "$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7,<8'
+    else
+        "$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
+    fi
+    # ubuntu1404 can't seem to install mock via tests_require, but it can do this.
+    "$venvdest/bin/pip" install --no-cache-dir 'mock>=1.0' 'pbr<1.7.0'
+}
+
+export PERLINSTALLBASE
+export PERL5LIB="$PERLINSTALLBASE/lib/perl5${PERL5LIB:+:$PERL5LIB}"
+
+export R_LIBS
+
+export GOPATH
+(
+    set -e
+    mkdir -p "$GOPATH/src/git.curoverse.com"
+    if [[ ! -h "$GOPATH/src/git.curoverse.com/arvados.git" ]]; then
+        for d in \
+            "$GOPATH/src/git.curoverse.com/arvados.git/tmp/GOPATH" \
+                "$GOPATH/src/git.curoverse.com/arvados.git/tmp" \
+                "$GOPATH/src/git.curoverse.com/arvados.git"; do
+            [[ -d "$d" ]] && rmdir "$d"
+        done
+    fi
+    for d in \
+        "$GOPATH/src/git.curoverse.com/arvados.git/arvados" \
+        "$GOPATH/src/git.curoverse.com/arvados.git"; do
+        [[ -h "$d" ]] && rm "$d"
+    done
+    ln -vsfT "$WORKSPACE" "$GOPATH/src/git.curoverse.com/arvados.git"
+    go get -v github.com/kardianos/govendor
+    cd "$GOPATH/src/git.curoverse.com/arvados.git"
+    if [[ -n "$short" ]]; then
+        go get -v -d ...
+        "$GOPATH/bin/govendor" sync
+    else
+        # Remove cached source dirs in workdir. Otherwise, they will
+        # not qualify as +missing or +external below, and we won't be
+        # able to detect that they're missing from vendor/vendor.json.
+        rm -rf vendor/*/
+        go get -v -d ...
+        "$GOPATH/bin/govendor" sync
+        [[ -z $("$GOPATH/bin/govendor" list +unused +missing +external | tee /dev/stderr) ]] \
+            || fatal "vendor/vendor.json has unused or missing dependencies -- try:
+
+(export GOPATH=\"${GOPATH}\"; cd \$GOPATH/src/git.curoverse.com/arvados.git && \$GOPATH/bin/govendor add +missing +external && \$GOPATH/bin/govendor remove +unused)
+
+";
+    fi
+) || fatal "Go setup failed"
+
+setup_virtualenv "$VENVDIR" --python python2.7
+. "$VENVDIR/bin/activate"
+
+# Needed for run_test_server.py which is used by certain (non-Python) tests.
+pip install --no-cache-dir PyYAML \
+    || fatal "pip install PyYAML failed"
+
+# Preinstall libcloud if using a fork; otherwise nodemanager "pip
+# install" won't pick it up by default.
+if [[ -n "$LIBCLOUD_PIN_SRC" ]]; then
+    pip freeze 2>/dev/null | egrep ^apache-libcloud==$LIBCLOUD_PIN \
+        || pip install --pre --ignore-installed --no-cache-dir "$LIBCLOUD_PIN_SRC" >/dev/null \
+        || fatal "pip install apache-libcloud failed"
+fi
+
+# Deactivate Python 2 virtualenv
+deactivate
+
+declare -a pythonstuff
+pythonstuff=(
+    sdk/pam
+    sdk/python
+    sdk/python:py3
+    sdk/cwl
+    sdk/cwl:py3
+    services/dockercleaner:py3
+    services/fuse
+    services/nodemanager
+    tools/crunchstat-summary
+    )
+
+# If Python 3 is available, set up its virtualenv in $VENV3DIR.
+# Otherwise, skip dependent tests.
+PYTHON3=$(which python3)
+if [[ ${?} = 0 ]]; then
+    setup_virtualenv "$VENV3DIR" --python python3
+else
+    PYTHON3=
+    cat >&2 <<EOF
+
+Warning: python3 could not be found. Python 3 tests will be skipped.
+
+EOF
+fi
+
+# Reactivate Python 2 virtualenv
+. "$VENVDIR/bin/activate"
+
+# Note: this must be the last time we change PATH, otherwise rvm will
+# whine a lot.
+setup_ruby_environment
+
+echo "PATH is $PATH"
+
+if ! which bundler >/dev/null
+then
+    gem install --user-install bundler || fatal 'Could not install bundler'
+fi
+
+# Jenkins config requires that glob tmp/*.log match something. Ensure
+# that happens even if we don't end up running services that set up
+# logging.
+mkdir -p "${WORKSPACE}/tmp/" || fatal "could not mkdir ${WORKSPACE}/tmp"
+touch "${WORKSPACE}/tmp/controller.log" || fatal "could not touch ${WORKSPACE}/tmp/controller.log"
+
+retry() {
+    remain="${repeat}"
+    while :
+    do
+        if ${@}; then
+            if [[ "$remain" -gt 1 ]]; then
+                remain=$((${remain}-1))
+                title "Repeating ${remain} more times"
+            else
+                break
+            fi
+        elif [[ "$retry" == 1 ]]; then
+            read -p 'Try again? [Y/n] ' x
+            if [[ "$x" != "y" ]] && [[ "$x" != "" ]]
+            then
+                break
+            fi
+        else
+            break
+        fi
+    done
+}
+
+do_test() {
+    case "${1}" in
+        apps/workbench_units | apps/workbench_functionals | apps/workbench_integration)
+            suite=apps/workbench
+            ;;
+        services/nodemanager | services/nodemanager_integration)
+            suite=services/nodemanager_suite
+            ;;
+        *)
+            suite="${1}"
+            ;;
+    esac
+    if [[ -z "${skip[$suite]}" && -z "${skip[$1]}" && \
+              (${#only[@]} -eq 0 || ${only[$suite]} -eq 1 || \
+                   ${only[$1]} -eq 1) ||
+                  ${only[$2]} -eq 1 ]]; then
+        retry do_test_once ${@}
+    else
+        title "Skipping ${1} tests"
+    fi
+}
+
+do_test_once() {
+    unset result
+
+    title "Running $1 tests"
+    timer_reset
+    if [[ "$2" == "go" ]]
+    then
+        covername="coverage-$(echo "$1" | sed -e 's/\//_/g')"
+        coverflags=("-covermode=count" "-coverprofile=$WORKSPACE/tmp/.$covername.tmp")
+        # We do "go get -t" here to catch compilation errors
+        # before trying "go test". Otherwise, coverage-reporting
+        # mode makes Go show the wrong line numbers when reporting
+        # compilation errors.
+        go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
+            cd "$GOPATH/src/git.curoverse.com/arvados.git/$1" && \
+            [[ -z "$(gofmt -e -d . | tee /dev/stderr)" ]] && \
+            if [[ -n "${testargs[$1]}" ]]
+        then
+            # "go test -check.vv giturl" doesn't work, but this
+            # does:
+            go test ${short:+-short} ${testargs[$1]}
+        else
+            # The above form gets verbose even when testargs is
+            # empty, so use this form in such cases:
+            go test ${short:+-short} ${coverflags[@]} "git.curoverse.com/arvados.git/$1"
+        fi
+        result=${result:-$?}
+        if [[ -f "$WORKSPACE/tmp/.$covername.tmp" ]]
+        then
+            go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html"
+            rm "$WORKSPACE/tmp/.$covername.tmp"
+        fi
+    elif [[ "$2" == "pip" ]]
+    then
+        tries=0
+        cd "$WORKSPACE/$1" && while :
+        do
+            tries=$((${tries}+1))
+            # $3 can name a path directory for us to use, including trailing
+            # slash; e.g., the bin/ subdirectory of a virtualenv.
+            "${3}python" setup.py ${short:+--short-tests-only} test ${testargs[$1]}
+            result=$?
+            if [[ ${tries} < 3 && ${result} == 137 ]]
+            then
+                printf '\n*****\n%s tests killed -- retrying\n*****\n\n' "$1"
+                continue
+            else
+                break
+            fi
+        done
+    elif [[ "$2" != "" ]]
+    then
+        "test_$2"
+    else
+        "test_$1"
+    fi
+    result=${result:-$?}
+    checkexit $result "$1 tests"
+    title "End of $1 tests (`timer`)"
+    return $result
+}
+
+do_install() {
+  skipit=false
+
+  if [[ -z "${only_install}" || "${only_install}" == "${1}" || "${only_install}" == "${2}" ]]; then
+      retry do_install_once ${@}
+  else
+      skipit=true
+  fi
+
+  if [[ "$skipit" = true ]]; then
+    title "Skipping $1 install"
+  fi
+}
+
+do_install_once() {
+    title "Running $1 install"
+    timer_reset
+    if [[ "$2" == "go" ]]
+    then
+        go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1"
+    elif [[ "$2" == "pip" ]]
+    then
+        # $3 can name a path directory for us to use, including trailing
+        # slash; e.g., the bin/ subdirectory of a virtualenv.
+
+        # Need to change to a different directory after creating
+        # the source dist package to avoid a pip bug.
+        # see https://arvados.org/issues/5766 for details.
+
+        # Also need to install twice, because if it believes the package is
+        # already installed, pip it won't install it.  So the first "pip
+        # install" ensures that the dependencies are met, the second "pip
+        # install" ensures that we've actually installed the local package
+        # we just built.
+        cd "$WORKSPACE/$1" \
+            && "${3}python" setup.py sdist rotate --keep=1 --match .tar.gz \
+            && cd "$WORKSPACE" \
+            && "${3}pip" install --no-cache-dir --quiet "$WORKSPACE/$1/dist"/*.tar.gz \
+            && "${3}pip" install --no-cache-dir --quiet --no-deps --ignore-installed "$WORKSPACE/$1/dist"/*.tar.gz
+    elif [[ "$2" != "" ]]
+    then
+        "install_$2"
+    else
+        "install_$1"
+    fi
+    result=$?
+    checkexit $result "$1 install"
+    title "End of $1 install (`timer`)"
+    return $result
+}
+
+bundle_install_trylocal() {
+    (
+        set -e
+        echo "(Running bundle install --local. 'could not find package' messages are OK.)"
+        if ! bundle install --local --no-deployment; then
+            echo "(Running bundle install again, without --local.)"
+            bundle install --no-deployment
+        fi
+        bundle package --all
+    )
+}
+
+install_doc() {
+    cd "$WORKSPACE/doc" \
+        && bundle_install_trylocal \
+        && rm -rf .site
+}
+do_install doc
+
+install_gem() {
+    gemname=$1
+    srcpath=$2
+    with_test_gemset gem_uninstall_if_exists "$gemname" \
+        && cd "$WORKSPACE/$srcpath" \
+        && bundle_install_trylocal \
+        && gem build "$gemname.gemspec" \
+        && with_test_gemset gem install --no-ri --no-rdoc $(ls -t "$gemname"-*.gem|head -n1)
+}
+
+install_ruby_sdk() {
+    install_gem arvados sdk/ruby
+}
+do_install sdk/ruby ruby_sdk
+
+install_R_sdk() {
+  if [[ "$NEED_SDK_R" = true ]]; then
+    cd "$WORKSPACE/sdk/R" \
+       && Rscript --vanilla install_deps.R
+  fi
+}
+do_install sdk/R R_sdk
+
+install_perl_sdk() {
+    cd "$WORKSPACE/sdk/perl" \
+        && perl Makefile.PL INSTALL_BASE="$PERLINSTALLBASE" \
+        && make install INSTALLDIRS=perl
+}
+do_install sdk/perl perl_sdk
+
+install_cli() {
+    install_gem arvados-cli sdk/cli
+}
+do_install sdk/cli cli
+
+install_login-sync() {
+    install_gem arvados-login-sync services/login-sync
+}
+do_install services/login-sync login-sync
+
+# Install the Python SDK early. Various other test suites (like
+# keepproxy) bring up run_test_server.py, which imports the arvados
+# module. We can't actually *test* the Python SDK yet though, because
+# its own test suite brings up some of those other programs (like
+# keepproxy).
+for p in "${pythonstuff[@]}"
+do
+    dir=${p%:py3}
+    if [[ ${dir} = ${p} ]]; then
+        if [[ -z ${skip[python2]} ]]; then
+            do_install ${dir} pip
+        fi
+    elif [[ -n ${PYTHON3} ]]; then
+        if [[ -z ${skip[python3]} ]]; then
+            do_install ${dir} pip "$VENV3DIR/bin/"
+        fi
+    fi
+done
+
+install_apiserver() {
+    cd "$WORKSPACE/services/api" \
+        && RAILS_ENV=test bundle_install_trylocal
+
+    rm -f config/environments/test.rb
+    cp config/environments/test.rb.example config/environments/test.rb
+
+    if [ -n "$CONFIGSRC" ]
+    then
+        for f in database.yml
+        do
+            cp "$CONFIGSRC/$f" config/ || fatal "$f"
+        done
+    fi
+
+    # Clear out any lingering postgresql connections to the test
+    # database, so that we can drop it. This assumes the current user
+    # is a postgresql superuser.
+    cd "$WORKSPACE/services/api" \
+        && test_database=$(python -c "import yaml; print yaml.load(file('config/database.yml'))['test']['database']") \
+        && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
+
+    mkdir -p "$WORKSPACE/services/api/tmp/pids"
+
+    cert="$WORKSPACE/services/api/tmp/self-signed"
+    if [[ ! -e "$cert.pem" || "$(date -r "$cert.pem" +%s)" -lt 1512659226 ]]; then
+        (
+            dir="$WORKSPACE/services/api/tmp"
+            set -ex
+            openssl req -newkey rsa:2048 -nodes -subj '/C=US/ST=State/L=City/CN=localhost' -out "$cert.csr" -keyout "$cert.key" </dev/null
+            openssl x509 -req -in "$cert.csr" -signkey "$cert.key" -out "$cert.pem" -days 3650 -extfile <(printf 'subjectAltName=DNS:localhost,DNS:::1,DNS:0.0.0.0,DNS:127.0.0.1,IP:::1,IP:0.0.0.0,IP:127.0.0.1')
+        ) || return 1
+    fi
+
+    cd "$WORKSPACE/services/api" \
+        && rm -rf tmp/git \
+        && mkdir -p tmp/git \
+        && cd tmp/git \
+        && tar xf ../../test/test.git.tar \
+        && mkdir -p internal.git \
+        && git --git-dir internal.git init \
+            || return 1
+
+    cd "$WORKSPACE/services/api" \
+        && RAILS_ENV=test bundle exec rake db:drop \
+        && RAILS_ENV=test bundle exec rake db:setup \
+        && RAILS_ENV=test bundle exec rake db:fixtures:load
+}
+do_install services/api apiserver
+
+declare -a gostuff
+gostuff=(
+    cmd/arvados-client
+    cmd/arvados-server
+    lib/cli
+    lib/cmd
+    lib/controller
+    lib/crunchstat
+    lib/cloud
+    lib/cloud/azure
+    lib/cloud/ec2
+    lib/dispatchcloud
+    lib/dispatchcloud/container
+    lib/dispatchcloud/scheduler
+    lib/dispatchcloud/ssh_executor
+    lib/dispatchcloud/worker
+    sdk/go/arvados
+    sdk/go/arvadosclient
+    sdk/go/auth
+    sdk/go/blockdigest
+    sdk/go/dispatch
+    sdk/go/health
+    sdk/go/httpserver
+    sdk/go/manifest
+    sdk/go/asyncbuf
+    sdk/go/crunchrunner
+    sdk/go/stats
+    services/arv-git-httpd
+    services/crunchstat
+    services/health
+    services/keep-web
+    services/keepstore
+    sdk/go/keepclient
+    services/keep-balance
+    services/keepproxy
+    services/crunch-dispatch-local
+    services/crunch-dispatch-slurm
+    services/crunch-run
+    services/ws
+    tools/keep-block-check
+    tools/keep-exercise
+    tools/keep-rsync
+    tools/sync-groups
+)
+for g in "${gostuff[@]}"
+do
+    do_install "$g" go
+done
+
+install_workbench() {
+    cd "$WORKSPACE/apps/workbench" \
+        && mkdir -p tmp/cache \
+        && RAILS_ENV=test bundle_install_trylocal \
+        && RAILS_ENV=test RAILS_GROUPS=assets bundle exec rake npm:install
+}
+do_install apps/workbench workbench
+
+unset http_proxy https_proxy no_proxy
+
+test_doclinkchecker() {
+    (
+        set -e
+        cd "$WORKSPACE/doc"
+        ARVADOS_API_HOST=qr1hi.arvadosapi.com
+        # Make sure python-epydoc is installed or the next line won't
+        # do much good!
+        PYTHONPATH=$WORKSPACE/sdk/python/ bundle exec rake linkchecker baseurl=file://$WORKSPACE/doc/.site/ arvados_workbench_host=https://workbench.$ARVADOS_API_HOST arvados_api_host=$ARVADOS_API_HOST
+    )
+}
+do_test doc doclinkchecker
+
+stop_services
+
+test_apiserver() {
+    rm -f "$WORKSPACE/services/api/git-commit.version"
+    cd "$WORKSPACE/services/api" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test TESTOPTS=-v ${testargs[services/api]}
+}
+do_test services/api apiserver
+
+# Shortcut for when we're only running apiserver tests. This saves a bit of time,
+# because we don't need to start up the api server for subsequent tests.
+if [ ! -z "$only" ] && [ "$only" == "services/api" ]; then
+  rotate_logfile "$WORKSPACE/services/api/log/" "test.log"
+  exit_cleanly
+fi
+
+start_services || { stop_services; fatal "start_services"; }
+
+test_ruby_sdk() {
+    cd "$WORKSPACE/sdk/ruby" \
+        && bundle exec rake test TESTOPTS=-v ${testargs[sdk/ruby]}
+}
+do_test sdk/ruby ruby_sdk
+
+test_R_sdk() {
+  if [[ "$NEED_SDK_R" = true ]]; then
+    cd "$WORKSPACE/sdk/R" \
+        && Rscript --vanilla run_test.R
+  fi
+}
+
+do_test sdk/R R_sdk
+
+test_cli() {
+    cd "$WORKSPACE/sdk/cli" \
+        && mkdir -p /tmp/keep \
+        && KEEP_LOCAL_STORE=/tmp/keep bundle exec rake test TESTOPTS=-v ${testargs[sdk/cli]}
+}
+do_test sdk/cli cli
+
+test_login-sync() {
+    cd "$WORKSPACE/services/login-sync" \
+        && bundle exec rake test TESTOPTS=-v ${testargs[services/login-sync]}
+}
+do_test services/login-sync login-sync
+
+test_nodemanager_integration() {
+    cd "$WORKSPACE/services/nodemanager" \
+        && tests/integration_test.py ${testargs[services/nodemanager_integration]}
+}
+do_test services/nodemanager_integration nodemanager_integration
+
+for p in "${pythonstuff[@]}"
+do
+    dir=${p%:py3}
+    if [[ ${dir} = ${p} ]]; then
+        if [[ -z ${skip[python2]} ]]; then
+            do_test ${dir} pip
+        fi
+    elif [[ -n ${PYTHON3} ]]; then
+        if [[ -z ${skip[python3]} ]]; then
+            do_test ${dir} pip "$VENV3DIR/bin/"
+        fi
+    fi
+done
+
+for g in "${gostuff[@]}"
+do
+    do_test "$g" go
+done
+
+test_workbench_units() {
+    cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]}
+}
+do_test apps/workbench_units workbench_units
+
+test_workbench_functionals() {
+    cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]}
+}
+do_test apps/workbench_functionals workbench_functionals
+
+test_workbench_integration() {
+    cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]}
+}
+do_test apps/workbench_integration workbench_integration
+
+
+test_workbench_benchmark() {
+    cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:benchmark ${testargs[apps/workbench_benchmark]}
+}
+do_test apps/workbench_benchmark workbench_benchmark
+
+test_workbench_profile() {
+    cd "$WORKSPACE/apps/workbench" \
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:profile ${testargs[apps/workbench_profile]}
+}
+do_test apps/workbench_profile workbench_profile
+
+exit_cleanly
diff --git a/cc-by-sa-3.0.txt b/cc-by-sa-3.0.txt
new file mode 100644 (file)
index 0000000..281c9b6
--- /dev/null
@@ -0,0 +1,297 @@
+Creative Commons Legal Code
+
+Attribution-ShareAlike 3.0 United States
+
+License
+
+THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
+COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
+COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
+AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+
+BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
+BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE
+CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE
+IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+
+1. Definitions
+
+ a. "Collective Work" means a work, such as a periodical issue, anthology or
+    encyclopedia, in which the Work in its entirety in unmodified form, along
+    with one or more other contributions, constituting separate and independent
+    works in themselves, are assembled into a collective whole. A work that
+    constitutes a Collective Work will not be considered a Derivative Work (as
+    defined below) for the purposes of this License.
+
+ b. "Creative Commons Compatible License" means a license that is listed at
+    http://creativecommons.org/compatiblelicenses that has been approved by
+    Creative Commons as being essentially equivalent to this License,
+    including, at a minimum, because that license: (i) contains terms that have
+    the same purpose, meaning and effect as the License Elements of this
+    License; and, (ii) explicitly permits the relicensing of derivatives of
+    works made available under that license under this License or either a
+    Creative Commons unported license or a Creative Commons jurisdiction
+    license with the same License Elements as this License.
+
+ c. "Derivative Work" means a work based upon the Work or upon the Work and
+    other pre-existing works, such as a translation, musical arrangement,
+    dramatization, fictionalization, motion picture version, sound recording,
+    art reproduction, abridgment, condensation, or any other form in which the
+    Work may be recast, transformed, or adapted, except that a work that
+    constitutes a Collective Work will not be considered a Derivative Work for
+    the purpose of this License. For the avoidance of doubt, where the Work is
+    a musical composition or sound recording, the synchronization of the Work
+    in timed-relation with a moving image ("synching") will be considered a
+    Derivative Work for the purpose of this License.
+
+ d. "License Elements" means the following high-level license attributes as
+    selected by Licensor and indicated in the title of this License:
+    Attribution, ShareAlike.
+
+ e. "Licensor" means the individual, individuals, entity or entities that
+    offers the Work under the terms of this License.
+
+ f. "Original Author" means the individual, individuals, entity or entities who
+    created the Work.
+
+ g. "Work" means the copyrightable work of authorship offered under the terms
+    of this License.
+
+    h. "You" means an individual or entity exercising rights under this License
+    who has not previously violated the terms of this License with respect to
+    the Work, or who has received express permission from the Licensor to
+    exercise rights under this License despite a previous violation.
+
+2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or
+restrict any rights arising from fair use, first sale or other limitations on
+the exclusive rights of the copyright owner under copyright law or other
+applicable laws.
+
+3. License Grant. Subject to the terms and conditions of this License, Licensor
+hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the
+duration of the applicable copyright) license to exercise the rights in the
+Work as stated below:
+
+ a. to reproduce the Work, to incorporate the Work into one or more Collective
+    Works, and to reproduce the Work as incorporated in the Collective Works;
+
+ b. to create and reproduce Derivative Works provided that any such
+    Derivative Work, including any translation in any medium, takes reasonable
+    steps to clearly label, demarcate or otherwise identify that changes were
+    made to the original Work. For example, a translation could be marked "The
+    original work was translated from English to Spanish," or a modification
+    could indicate "The original work has been modified.";
+
+ c. to distribute copies or phonorecords of, display publicly, perform
+    publicly, and perform publicly by means of a digital audio transmission the
+    Work including as incorporated in Collective Works;
+
+ d. to distribute copies or phonorecords of, display publicly, perform
+    publicly, and perform publicly by means of a digital audio transmission
+    Derivative Works.
+
+ e. For the avoidance of doubt, where the Work is a musical composition:
+
+     i. Performance Royalties Under Blanket Licenses. Licensor waives the
+        exclusive right to collect, whether individually or, in the event that
+        Licensor is a member of a performance rights society (e.g. ASCAP, BMI,
+        SESAC), via that society, royalties for the public performance or
+        public digital performance (e.g. webcast) of the Work.
+
+    ii. Mechanical Rights and Statutory Royalties. Licensor waives the
+        exclusive right to collect, whether individually or via a music rights
+        agency or designated agent (e.g. Harry Fox Agency), royalties for any
+        phonorecord You create from the Work ("cover version") and distribute,
+        subject to the compulsory license created by 17 USC Section 115 of the
+        US Copyright Act (or the equivalent in other jurisdictions).
+
+ f. Webcasting Rights and Statutory Royalties. For the avoidance of doubt,
+    where the Work is a sound recording, Licensor waives the exclusive right to
+    collect, whether individually or via a performance-rights society
+    (e.g. SoundExchange), royalties for the public digital performance
+    (e.g. webcast) of the Work, subject to the compulsory license created by 17
+    USC Section 114 of the US Copyright Act (or the equivalent in other
+    jurisdictions).
+
+The above rights may be exercised in all media and formats whether now known or
+hereafter devised. The above rights include the right to make such
+modifications as are technically necessary to exercise the rights in other
+media and formats. All rights not expressly granted by Licensor are hereby
+reserved.
+
+4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:
+
+ a. You may distribute, publicly display, publicly perform, or publicly
+    digitally perform the Work only under the terms of this License, and You
+    must include a copy of, or the Uniform Resource Identifier for, this
+    License with every copy or phonorecord of the Work You distribute, publicly
+    display, publicly perform, or publicly digitally perform. You may not offer
+    or impose any terms on the Work that restrict the terms of this License or
+    the ability of a recipient of the Work to exercise of the rights granted to
+    that recipient under the terms of the License. You may not sublicense the
+    Work. You must keep intact all notices that refer to this License and to
+    the disclaimer of warranties. When You distribute, publicly display,
+    publicly perform, or publicly digitally perform the Work, You may not
+    impose any technological measures on the Work that restrict the ability of
+    a recipient of the Work from You to exercise of the rights granted to that
+    recipient under the terms of the License. This Section 4(a) applies to the
+    Work as incorporated in a Collective Work, but this does not require the
+    Collective Work apart from the Work itself to be made subject to the terms
+    of this License. If You create a Collective Work, upon notice from any
+    Licensor You must, to the extent practicable, remove from the Collective
+    Work any credit as required by Section 4(c), as requested. If You create a
+    Derivative Work, upon notice from any Licensor You must, to the extent
+    practicable, remove from the Derivative Work any credit as required by
+    Section 4(c), as requested.
+
+ b. You may distribute, publicly display, publicly perform, or publicly
+    digitally perform a Derivative Work only under: (i) the terms of this
+    License; (ii) a later version of this License with the same License
+    Elements as this License; (iii) either the Creative Commons (Unported)
+    license or a Creative Commons jurisdiction license (either this or a later
+    license version) that contains the same License Elements as this License
+    (e.g. Attribution-ShareAlike 3.0 (Unported)); (iv) a Creative Commons
+    Compatible License. If you license the Derivative Work under one of the
+    licenses mentioned in (iv), you must comply with the terms of that
+    license. If you license the Derivative Work under the terms of any of the
+    licenses mentioned in (i), (ii) or (iii) (the "Applicable License"), you
+    must comply with the terms of the Applicable License generally and with the
+    following provisions: (I) You must include a copy of, or the Uniform
+    Resource Identifier for, the Applicable License with every copy or
+    phonorecord of each Derivative Work You distribute, publicly display,
+    publicly perform, or publicly digitally perform; (II) You may not offer or
+    impose any terms on the Derivative Works that restrict the terms of the
+    Applicable License or the ability of a recipient of the Work to exercise
+    the rights granted to that recipient under the terms of the Applicable
+    License; (III) You must keep intact all notices that refer to the
+    Applicable License and to the disclaimer of warranties; and, (IV) when You
+    distribute, publicly display, publicly perform, or publicly digitally
+    perform the Work, You may not impose any technological measures on the
+    Derivative Work that restrict the ability of a recipient of the Derivative
+    Work from You to exercise the rights granted to that recipient under the
+    terms of the Applicable License. This Section 4(b) applies to the
+    Derivative Work as incorporated in a Collective Work, but this does not
+    require the Collective Work apart from the Derivative Work itself to be
+    made subject to the terms of the Applicable License.
+
+ c. If You distribute, publicly display, publicly perform, or publicly
+    digitally perform the Work (as defined in Section 1 above) or any
+    Derivative Works (as defined in Section 1 above) or Collective Works (as
+    defined in Section 1 above), You must, unless a request has been made
+    pursuant to Section 4(a), keep intact all copyright notices for the Work
+    and provide, reasonable to the medium or means You are utilizing: (i) the
+    name of the Original Author (or pseudonym, if applicable) if supplied,
+    and/or (ii) if the Original Author and/or Licensor designate another party
+    or parties (e.g. a sponsor institute, publishing entity, journal) for
+    attribution ("Attribution Parties") in Licensor's copyright notice, terms
+    of service or by other reasonable means, the name of such party or parties;
+    the title of the Work if supplied; to the extent reasonably practicable,
+    the Uniform Resource Identifier, if any, that Licensor specifies to be
+    associated with the Work, unless such URI does not refer to the copyright
+    notice or licensing information for the Work; and, consistent with Section
+    3(b) in the case of a Derivative Work, a credit identifying the use of the
+    Work in the Derivative Work (e.g., "French translation of the Work by
+    Original Author," or "Screenplay based on original Work by Original
+    Author"). The credit required by this Section 4(c) may be implemented in
+    any reasonable manner; provided, however, that in the case of a Derivative
+    Work or Collective Work, at a minimum such credit will appear, if a credit
+    for all contributing authors of the Derivative Work or Collective Work
+    appears, then as part of these credits and in a manner at least as
+    prominent as the credits for the other contributing authors. For the
+    avoidance of doubt, You may only use the credit required by this Section
+    for the purpose of attribution in the manner set out above and, by
+    exercising Your rights under this License, You may not implicitly or
+    explicitly assert or imply any connection with, sponsorship or endorsement
+    by the Original Author, Licensor and/or Attribution Parties, as
+    appropriate, of You or Your use of the Work, without the separate, express
+    prior written permission of the Original Author, Licensor and/or
+    Attribution Parties.
+
+
+5. Representations, Warranties and Disclaimer
+
+UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS
+THE WORK AS-IS AND ONLY TO THE EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK
+BY THE LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
+KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING,
+WITHOUT LIMITATION, WARRANTIES OF TITLE, MARKETABILITY, MERCHANTIBILITY,
+FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR
+OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT
+DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED
+WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
+
+6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN
+NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL,
+INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS
+LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+7. Termination
+
+ a. This License and the rights granted hereunder will terminate automatically
+    upon any breach by You of the terms of this License. Individuals or
+    entities who have received Derivative Works or Collective Works from You
+    under this License, however, will not have their licenses terminated
+    provided such individuals or entities remain in full compliance with those
+    licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of
+    this License.
+
+ b. Subject to the above terms and conditions, the license granted here is
+    perpetual (for the duration of the applicable copyright in the
+    Work). Notwithstanding the above, Licensor reserves the right to release
+    the Work under different license terms or to stop distributing the Work at
+    any time; provided, however that any such election will not serve to
+    withdraw this License (or any other license that has been, or is required
+    to be, granted under the terms of this License), and this License will
+    continue in full force and effect unless terminated as stated above.
+
+8. Miscellaneous
+
+ a. Each time You distribute or publicly digitally perform the Work (as defined
+    in Section 1 above) or a Collective Work (as defined in Section 1 above),
+    the Licensor offers to the recipient a license to the Work on the same
+    terms and conditions as the license granted to You under this License.
+
+ b. Each time You distribute or publicly digitally perform a Derivative Work,
+    Licensor offers to the recipient a license to the original Work on the same
+    terms and conditions as the license granted to You under this License.
+
+ c. If any provision of this License is invalid or unenforceable under
+    applicable law, it shall not affect the validity or enforceability of the
+    remainder of the terms of this License, and without further action by the
+    parties to this agreement, such provision shall be reformed to the minimum
+    extent necessary to make such provision valid and enforceable.
+
+ d. No term or provision of this License shall be deemed waived and no breach
+    consented to unless such waiver or consent shall be in writing and signed
+    by the party to be charged with such waiver or consent.
+
+ e. This License constitutes the entire agreement between the parties with
+    respect to the Work licensed here. There are no understandings, agreements
+    or representations with respect to the Work not specified here. Licensor
+    shall not be bound by any additional provisions that may appear in any
+    communication from You. This License may not be modified without the mutual
+    written agreement of the Licensor and You.
+
+Creative Commons Notice
+
+    Creative Commons is not a party to this License, and makes no warranty
+    whatsoever in connection with the Work. Creative Commons will not be liable
+    to You or any party on any legal theory for any damages whatsoever,
+    including without limitation any general, special, incidental or
+    consequential damages arising in connection to this
+    license. Notwithstanding the foregoing two (2) sentences, if Creative
+    Commons has expressly identified itself as the Licensor hereunder, it shall
+    have all rights and obligations of Licensor.
+
+    Except for the limited purpose of indicating to the public that the Work is
+    licensed under the CCPL, Creative Commons does not authorize the use by
+    either party of the trademark "Creative Commons" or any related trademark
+    or logo of Creative Commons without the prior written consent of Creative
+    Commons. Any permitted use will be in compliance with Creative Commons'
+    then-current trademark usage guidelines, as may be published on its website
+    or otherwise made available upon request from time to time. For the
+    avoidance of doubt, this trademark restriction does not form part of this
+    License.
+
+    Creative Commons may be contacted at http://creativecommons.org/.
diff --git a/cmd/arvados-client/.gitignore b/cmd/arvados-client/.gitignore
new file mode 100644 (file)
index 0000000..21dd863
--- /dev/null
@@ -0,0 +1 @@
+arvados-*
diff --git a/cmd/arvados-client/cmd.go b/cmd/arvados-client/cmd.go
new file mode 100644 (file)
index 0000000..4550ae5
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package main
+
+import (
+       "os"
+
+       "git.curoverse.com/arvados.git/lib/cli"
+       "git.curoverse.com/arvados.git/lib/cmd"
+)
+
+var (
+       version = "dev"
+       handler = cmd.Multi(map[string]cmd.Handler{
+               "-e":        cmd.Version(version),
+               "version":   cmd.Version(version),
+               "-version":  cmd.Version(version),
+               "--version": cmd.Version(version),
+
+               "copy":     cli.Copy,
+               "create":   cli.Create,
+               "edit":     cli.Edit,
+               "get":      cli.Get,
+               "keep":     cli.Keep,
+               "pipeline": cli.Pipeline,
+               "run":      cli.Run,
+               "tag":      cli.Tag,
+               "ws":       cli.Ws,
+
+               "api_client_authorization": cli.APICall,
+               "api_client":               cli.APICall,
+               "authorized_key":           cli.APICall,
+               "collection":               cli.APICall,
+               "container":                cli.APICall,
+               "container_request":        cli.APICall,
+               "group":                    cli.APICall,
+               "human":                    cli.APICall,
+               "job":                      cli.APICall,
+               "job_task":                 cli.APICall,
+               "keep_disk":                cli.APICall,
+               "keep_service":             cli.APICall,
+               "link":                     cli.APICall,
+               "log":                      cli.APICall,
+               "node":                     cli.APICall,
+               "pipeline_instance":        cli.APICall,
+               "pipeline_template":        cli.APICall,
+               "repository":               cli.APICall,
+               "specimen":                 cli.APICall,
+               "trait":                    cli.APICall,
+               "user_agreement":           cli.APICall,
+               "user":                     cli.APICall,
+               "virtual_machine":          cli.APICall,
+               "workflow":                 cli.APICall,
+       })
+)
+
+func fixLegacyArgs(args []string) []string {
+       flags, _ := cli.LegacyFlagSet()
+       return cmd.SubcommandToFront(args, flags)
+}
+
+func main() {
+       os.Exit(handler.RunCommand(os.Args[0], fixLegacyArgs(os.Args[1:]), os.Stdin, os.Stdout, os.Stderr))
+}
diff --git a/cmd/arvados-client/cmd_test.go b/cmd/arvados-client/cmd_test.go
new file mode 100644 (file)
index 0000000..cbbc7b1
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package main
+
+import (
+       "bytes"
+       "io/ioutil"
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&ClientSuite{})
+
+type ClientSuite struct{}
+
+func (s *ClientSuite) TestBadCommand(c *check.C) {
+       exited := handler.RunCommand("arvados-client", []string{"no such command"}, bytes.NewReader(nil), ioutil.Discard, ioutil.Discard)
+       c.Check(exited, check.Equals, 2)
+}
+
+func (s *ClientSuite) TestBadSubcommandArgs(c *check.C) {
+       exited := handler.RunCommand("arvados-client", []string{"get"}, bytes.NewReader(nil), ioutil.Discard, ioutil.Discard)
+       c.Check(exited, check.Equals, 2)
+}
+
+func (s *ClientSuite) TestVersion(c *check.C) {
+       stdout := bytes.NewBuffer(nil)
+       stderr := bytes.NewBuffer(nil)
+       exited := handler.RunCommand("arvados-client", []string{"version"}, bytes.NewReader(nil), stdout, stderr)
+       c.Check(exited, check.Equals, 0)
+       c.Check(stdout.String(), check.Matches, `arvados-client dev \(go[0-9\.]+\)\n`)
+       c.Check(stderr.String(), check.Equals, "")
+}
diff --git a/cmd/arvados-server/arvados-controller.service b/cmd/arvados-server/arvados-controller.service
new file mode 100644 (file)
index 0000000..e857074
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados controller
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/config.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+EnvironmentFile=-/etc/arvados/environment
+ExecStart=/usr/bin/arvados-controller
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/cmd/arvados-server/arvados-dispatch-cloud.service b/cmd/arvados-server/arvados-dispatch-cloud.service
new file mode 100644 (file)
index 0000000..aa5cc3b
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=arvados-dispatch-cloud
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/config.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+EnvironmentFile=-/etc/arvados/environment
+ExecStart=/usr/bin/arvados-dispatch-cloud
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/cmd/arvados-server/cmd.go b/cmd/arvados-server/cmd.go
new file mode 100644 (file)
index 0000000..cd15d25
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "os"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/controller"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud"
+)
+
+var (
+       version = "dev"
+       handler = cmd.Multi(map[string]cmd.Handler{
+               "version":   cmd.Version(version),
+               "-version":  cmd.Version(version),
+               "--version": cmd.Version(version),
+
+               "controller":     controller.Command,
+               "dispatch-cloud": dispatchcloud.Command,
+       })
+)
+
+func main() {
+       os.Exit(handler.RunCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))
+}
diff --git a/crunch_scripts/GATK2-VariantFiltration b/crunch_scripts/GATK2-VariantFiltration
new file mode 100755 (executable)
index 0000000..0ef4a74
--- /dev/null
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+gatk_path = arvados.util.tarball_extract(
+    tarball = this_job['script_parameters']['gatk_binary_tarball'],
+    path = 'gatk')
+bundle_path = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    path = 'gatk-bundle',
+    files = ['human_g1k_v37.dict', 'human_g1k_v37.fasta', 'human_g1k_v37.fasta.fai'])
+this_task_input = this_task['parameters']['input']
+
+input_file = list(arvados.CollectionReader(this_task_input).all_files())[0]
+
+# choose vcf temporary file names
+vcf_in = os.path.join(arvados.current_task().tmpdir,
+                      os.path.basename(input_file.name()))
+vcf_out = re.sub('(.*)\\.vcf', '\\1-filtered.vcf', vcf_in)
+
+# fetch the unfiltered data
+vcf_in_file = open(vcf_in, 'w')
+for buf in input_file.readall():
+    vcf_in_file.write(buf)
+vcf_in_file.close()
+
+stdoutdata, stderrdata = arvados.util.run_command(
+    ['java', '-Xmx1g',
+     '-jar', os.path.join(gatk_path,'GenomeAnalysisTK.jar'),
+     '-T', 'VariantFiltration', '--variant', vcf_in,
+     '--out', vcf_out,
+     '--filterExpression', 'QD < 2.0',
+     '--filterName', 'GATK_QD',
+     '--filterExpression', 'MQ < 40.0',
+     '--filterName', 'GATK_MQ',
+     '--filterExpression', 'FS > 60.0',
+     '--filterName', 'GATK_FS',
+     '--filterExpression', 'MQRankSum < -12.5',
+     '--filterName', 'GATK_MQRankSum',
+     '--filterExpression', 'ReadPosRankSum < -8.0',
+     '--filterName', 'GATK_ReadPosRankSum',
+     '-R', os.path.join(bundle_path, 'human_g1k_v37.fasta')],
+    cwd=arvados.current_task().tmpdir)
+
+# store the filtered data
+with open(vcf_out, 'rb') as f:
+    out = arvados.CollectionWriter()
+    while True:
+        buf = f.read()
+        if len(buf) == 0:
+            break
+        out.write(buf)
+out.set_current_file_name(os.path.basename(vcf_out))
+
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/GATK2-bqsr b/crunch_scripts/GATK2-bqsr
new file mode 100755 (executable)
index 0000000..ab78226
--- /dev/null
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import re
+import arvados
+import arvados_gatk2
+import arvados_samtools
+from arvados_ipc import *
+
+class InvalidArgumentError(Exception):
+    pass
+
+arvados_samtools.one_task_per_bam_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+tmpdir = arvados.current_task().tmpdir
+arvados.util.clear_tmpdir()
+
+known_sites_files = arvados.getjobparam(
+    'known_sites',
+    ['dbsnp_137.b37.vcf',
+     'Mills_and_1000G_gold_standard.indels.b37.vcf',
+     ])
+bundle_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    files = [
+        'human_g1k_v37.dict',
+        'human_g1k_v37.fasta',
+        'human_g1k_v37.fasta.fai'
+        ] + known_sites_files + [v + '.idx' for v in known_sites_files],
+    path = 'gatk_bundle')
+ref_fasta_files = [os.path.join(bundle_dir, f)
+                   for f in os.listdir(bundle_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+
+input_collection = this_task['parameters']['input']
+input_dir = arvados.util.collection_extract(
+    collection = input_collection,
+    path = os.path.join(this_task.tmpdir, 'input'))
+input_bam_files = []
+for f in arvados.util.listdir_recursive(input_dir):
+    if re.search(r'\.bam$', f):
+        input_stream_name, input_file_name = os.path.split(f)
+        input_bam_files += [os.path.join(input_dir, f)]
+if len(input_bam_files) != 1:
+    raise InvalidArgumentError("Expected exactly one bam file per task.")
+
+known_sites_args = []
+for f in known_sites_files:
+    known_sites_args += ['-knownSites', os.path.join(bundle_dir, f)]
+
+recal_file = os.path.join(tmpdir, 'recal.csv')
+
+children = {}
+pipes = {}
+
+arvados_gatk2.run(
+    args=[
+        '-nct', arvados_gatk2.cpus_on_this_node(),
+        '-T', 'BaseRecalibrator',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', recal_file,
+        ] + known_sites_args)
+
+pipe_setup(pipes, 'BQSR')
+if 0 == named_fork(children, 'BQSR'):
+    pipe_closeallbut(pipes, ('BQSR', 'w'))
+    arvados_gatk2.run(
+        args=[
+        '-T', 'PrintReads',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', '/dev/fd/' + str(pipes['BQSR','w']),
+        '-BQSR', recal_file,
+        '--disable_bam_indexing',
+        ],
+        close_fds=False)
+    os._exit(0)
+os.close(pipes.pop(('BQSR','w'), None))
+
+out = arvados.CollectionWriter()
+out.start_new_stream(input_stream_name)
+
+out.start_new_file(input_file_name + '.recal.csv')
+out.write(open(recal_file, 'rb'))
+
+out.start_new_file(input_file_name)
+while True:
+    buf = os.read(pipes['BQSR','r'], 2**20)
+    if len(buf) == 0:
+        break
+    out.write(buf)
+pipe_closeallbut(pipes)
+
+if waitpid_and_check_children(children):
+    this_task.set_output(out.finish())
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/GATK2-merge-call b/crunch_scripts/GATK2-merge-call
new file mode 100755 (executable)
index 0000000..6d17517
--- /dev/null
@@ -0,0 +1,242 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import re
+import string
+import threading
+import arvados
+import arvados_gatk2
+import arvados_picard
+from arvados_ipc import *
+
+class InvalidArgumentError(Exception):
+    pass
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+tmpdir = arvados.current_task().tmpdir
+arvados.util.clear_tmpdir()
+
+bundle_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    files = [
+        'human_g1k_v37.dict',
+        'human_g1k_v37.fasta',
+        'human_g1k_v37.fasta.fai',
+        'dbsnp_137.b37.vcf',
+        'dbsnp_137.b37.vcf.idx',
+        ],
+    path = 'gatk_bundle')
+ref_fasta_files = [os.path.join(bundle_dir, f)
+                   for f in os.listdir(bundle_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+regions_args = []
+if 'regions' in this_job['script_parameters']:
+    regions_dir = arvados.util.collection_extract(
+        collection = this_job['script_parameters']['regions'],
+        path = 'regions')
+    region_padding = int(this_job['script_parameters']['region_padding'])
+    for f in os.listdir(regions_dir):
+        if re.search(r'\.bed$', f):
+            regions_args += [
+                '--intervals', os.path.join(regions_dir, f),
+                '--interval_padding', str(region_padding)
+                ]
+
+
+# Start a child process for each input file, feeding data to picard.
+
+input_child_names = []
+children = {}
+pipes = {}
+
+input_collection = this_job['script_parameters']['input']
+input_index = 0
+for s in arvados.CollectionReader(input_collection).all_streams():
+    for f in s.all_files():
+        if not re.search(r'\.bam$', f.name()):
+            continue
+        input_index += 1
+        childname = 'input-' + str(input_index)
+        input_child_names += [childname]
+        pipe_setup(pipes, childname)
+        childpid = named_fork(children, childname)
+        if childpid == 0:
+            pipe_closeallbut(pipes, (childname, 'w'))
+            for s in f.readall():
+                os.write(pipes[childname, 'w'], s)
+            os.close(pipes[childname, 'w'])
+            os._exit(0)
+        sys.stderr.write("pid %d writing %s to fd %d->%d\n" %
+                         (childpid,
+                          s.name()+'/'+f.name(),
+                          pipes[childname, 'w'],
+                          pipes[childname, 'r']))
+        pipe_closeallbut(pipes, *[(childname, 'r')
+                                  for childname in input_child_names])
+
+
+# Merge-sort the input files to merge.bam
+
+arvados_picard.run(
+    'MergeSamFiles',
+    args=[
+        'I=/dev/fd/' + str(pipes[childname, 'r'])
+        for childname in input_child_names
+        ],
+    params={
+        'o': 'merge.bam',
+        'quiet': 'true',
+        'so': 'coordinate',
+        'use_threading': 'true',
+        'create_index': 'true',
+        'validation_stringency': 'LENIENT',
+        },
+    close_fds=False,
+    )
+pipe_closeallbut(pipes)
+
+
+# Run CoverageBySample on merge.bam
+
+pipe_setup(pipes, 'stats_log')
+pipe_setup(pipes, 'stats_out')
+if 0 == named_fork(children, 'GATK'):
+    pipe_closeallbut(pipes,
+                     ('stats_log', 'w'),
+                     ('stats_out', 'w'))
+    arvados_gatk2.run(
+        args=[
+            '-T', 'CoverageBySample',
+            '-R', ref_fasta_files[0],
+            '-I', 'merge.bam',
+            '-o', '/dev/fd/' + str(pipes['stats_out', 'w']),
+            '--log_to_file', '/dev/fd/' + str(pipes['stats_log', 'w']),
+            ]
+        + regions_args,
+        close_fds=False)
+    pipe_closeallbut(pipes)
+    os._exit(0)
+pipe_closeallbut(pipes, ('stats_log', 'r'), ('stats_out', 'r'))
+
+
+# Start two threads to read from CoverageBySample pipes
+
+class ExceptionPropagatingThread(threading.Thread):
+    """
+    If a subclassed thread calls _raise(e) in run(), running join() on
+    the thread will raise e in the thread that calls join().
+    """
+    def __init__(self, *args, **kwargs):
+        super(ExceptionPropagatingThread, self).__init__(*args, **kwargs)
+        self.__exception = None
+    def join(self, *args, **kwargs):
+        ret = super(ExceptionPropagatingThread, self).join(*args, **kwargs)
+        if self.__exception:
+            raise self.__exception
+        return ret
+    def _raise(self, exception):
+        self.__exception = exception
+
+class StatsLogReader(ExceptionPropagatingThread):
+    def __init__(self, **kwargs):
+        super(StatsLogReader, self).__init__()
+        self.args = kwargs
+    def run(self):
+        try:
+            for logline in self.args['infile']:
+                x = re.search('Processing (\d+) bp from intervals', logline)
+                if x:
+                    self._total_bp = int(x.group(1))
+        except Exception as e:
+            self._raise(e)
+    def total_bp(self):
+        self.join()
+        return self._total_bp
+stats_log_thr = StatsLogReader(infile=os.fdopen(pipes.pop(('stats_log', 'r'))))
+stats_log_thr.start()
+
+class StatsOutReader(ExceptionPropagatingThread):
+    """
+    Read output of CoverageBySample and collect a histogram of
+    coverage (last column) -> number of loci (number of rows).
+    """
+    def __init__(self, **kwargs):
+        super(StatsOutReader, self).__init__()
+        self.args = kwargs
+    def run(self):
+        try:
+            hist = [0]
+            histtot = 0
+            for line in self.args['infile']:
+                try:
+                    i = int(string.split(line)[-1])
+                except ValueError:
+                    continue
+                if i >= 1:
+                    if len(hist) <= i:
+                        hist.extend([0 for x in range(1+i-len(hist))])
+                    hist[i] += 1
+                    histtot += 1
+            hist[0] = stats_log_thr.total_bp() - histtot
+            self._histogram = hist
+        except Exception as e:
+            self._raise(e)
+    def histogram(self):
+        self.join()
+        return self._histogram
+stats_out_thr = StatsOutReader(infile=os.fdopen(pipes.pop(('stats_out', 'r'))))
+stats_out_thr.start()
+
+
+# Run UnifiedGenotyper on merge.bam
+
+arvados_gatk2.run(
+    args=[
+        '-nt', arvados_gatk2.cpus_on_this_node(),
+        '-T', 'UnifiedGenotyper',
+        '-R', ref_fasta_files[0],
+        '-I', 'merge.bam',
+        '-o', os.path.join(tmpdir, 'out.vcf'),
+        '--dbsnp', os.path.join(bundle_dir, 'dbsnp_137.b37.vcf'),
+        '-metrics', 'UniGenMetrics',
+        '-A', 'DepthOfCoverage',
+        '-A', 'AlleleBalance',
+        '-A', 'QualByDepth',
+        '-A', 'HaplotypeScore',
+        '-A', 'MappingQualityRankSumTest',
+        '-A', 'ReadPosRankSumTest',
+        '-A', 'FisherStrand',
+        '-glm', 'both',
+        ]
+    + regions_args
+    + arvados.getjobparam('GATK2_UnifiedGenotyper_args',[]))
+
+# Copy the output VCF file to Keep
+
+out = arvados.CollectionWriter()
+out.start_new_stream()
+out.start_new_file('out.vcf')
+out.write(open(os.path.join(tmpdir, 'out.vcf'), 'rb'))
+
+
+# Write statistics to Keep
+
+out.start_new_file('mincoverage_nlocus.csv')
+sofar = 0
+hist = stats_out_thr.histogram()
+total_bp = stats_log_thr.total_bp()
+for i in range(len(hist)):
+    out.write("%d,%d,%f\n" %
+              (i,
+               total_bp - sofar,
+               100.0 * (total_bp - sofar) / total_bp))
+    sofar += hist[i]
+
+if waitpid_and_check_children(children):
+    this_task.set_output(out.finish())
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/GATK2-realign b/crunch_scripts/GATK2-realign
new file mode 100755 (executable)
index 0000000..2787dff
--- /dev/null
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import re
+import arvados
+import arvados_gatk2
+import arvados_picard
+import arvados_samtools
+from arvados_ipc import *
+
+class InvalidArgumentError(Exception):
+    pass
+
+arvados_samtools.one_task_per_bam_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+tmpdir = arvados.current_task().tmpdir
+arvados.util.clear_tmpdir()
+
+known_sites_files = arvados.getjobparam(
+    'known_sites',
+    ['dbsnp_137.b37.vcf',
+     'Mills_and_1000G_gold_standard.indels.b37.vcf',
+     ])
+bundle_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    files = [
+        'human_g1k_v37.dict',
+        'human_g1k_v37.fasta',
+        'human_g1k_v37.fasta.fai'
+        ] + known_sites_files + [v + '.idx' for v in known_sites_files],
+    path = 'gatk_bundle')
+ref_fasta_files = [os.path.join(bundle_dir, f)
+                   for f in os.listdir(bundle_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+regions_args = []
+if 'regions' in this_job['script_parameters']:
+    regions_dir = arvados.util.collection_extract(
+        collection = this_job['script_parameters']['regions'],
+        path = 'regions')
+    region_padding = int(this_job['script_parameters']['region_padding'])
+    for f in os.listdir(regions_dir):
+        if re.search(r'\.bed$', f):
+            regions_args += [
+                '--intervals', os.path.join(regions_dir, f),
+                '--interval_padding', str(region_padding)
+                ]
+
+input_collection = this_task['parameters']['input']
+input_dir = arvados.util.collection_extract(
+    collection = input_collection,
+    path = os.path.join(this_task.tmpdir, 'input'))
+input_bam_files = []
+for f in arvados.util.listdir_recursive(input_dir):
+    if re.search(r'\.bam$', f):
+        input_stream_name, input_file_name = os.path.split(f)
+        input_bam_files += [os.path.join(input_dir, f)]
+if len(input_bam_files) != 1:
+    raise InvalidArgumentError("Expected exactly one bam file per task.")
+
+known_sites_args = []
+for f in known_sites_files:
+    known_sites_args += ['-known', os.path.join(bundle_dir, f)]
+
+children = {}
+pipes = {}
+
+arvados_gatk2.run(
+    args=[
+        '-nt', arvados_gatk2.cpus_per_task(),
+        '-T', 'RealignerTargetCreator',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', os.path.join(tmpdir, 'intervals.list')
+        ] + known_sites_args + regions_args)
+
+pipe_setup(pipes, 'IndelRealigner')
+if 0 == named_fork(children, 'IndelRealigner'):
+    pipe_closeallbut(pipes, ('IndelRealigner', 'w'))
+    arvados_gatk2.run(
+        args=[
+        '-T', 'IndelRealigner',
+        '-R', ref_fasta_files[0],
+        '-targetIntervals', os.path.join(tmpdir, 'intervals.list'),
+        '-I', input_bam_files[0],
+        '-o', '/dev/fd/' + str(pipes['IndelRealigner','w']),
+        '--disable_bam_indexing',
+        ] + known_sites_args + regions_args,
+        close_fds=False)
+    os._exit(0)
+os.close(pipes.pop(('IndelRealigner','w'), None))
+
+pipe_setup(pipes, 'bammanifest')
+pipe_setup(pipes, 'bam')
+if 0==named_fork(children, 'bammanifest'):
+    pipe_closeallbut(pipes,
+                     ('IndelRealigner', 'r'),
+                     ('bammanifest', 'w'),
+                     ('bam', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(input_file_name)
+    while True:
+        buf = os.read(pipes['IndelRealigner','r'], 2**20)
+        if len(buf) == 0:
+            break
+        os.write(pipes['bam','w'], buf)
+        out.write(buf)
+    os.write(pipes['bammanifest','w'], out.manifest_text())
+    os.close(pipes['bammanifest','w'])
+    os._exit(0)
+
+pipe_setup(pipes, 'index')
+if 0==named_fork(children, 'index'):
+    pipe_closeallbut(pipes, ('bam', 'r'), ('index', 'w'))
+    arvados_picard.run(
+        'BuildBamIndex',
+        params={
+            'i': '/dev/fd/' + str(pipes['bam','r']),
+            'o': '/dev/fd/' + str(pipes['index','w']),
+            'quiet': 'true',
+            'validation_stringency': 'LENIENT'
+            },
+        close_fds=False)
+    os._exit(0)
+
+pipe_setup(pipes, 'indexmanifest')
+if 0==named_fork(children, 'indexmanifest'):
+    pipe_closeallbut(pipes, ('index', 'r'), ('indexmanifest', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(re.sub('\.bam$', '.bai', input_file_name))
+    while True:
+        buf = os.read(pipes['index','r'], 2**20)
+        if len(buf) == 0:
+            break
+        out.write(buf)
+    os.write(pipes['indexmanifest','w'], out.manifest_text())
+    os.close(pipes['indexmanifest','w'])
+    os._exit(0)
+
+pipe_closeallbut(pipes, ('bammanifest', 'r'), ('indexmanifest', 'r'))
+outmanifest = ''
+for which in ['bammanifest', 'indexmanifest']:
+    with os.fdopen(pipes[which,'r'], 'rb', 2**20) as f:
+        while True:
+            buf = f.read()
+            if buf == '':
+                break
+            outmanifest += buf
+
+all_ok = True
+for (childname, pid) in children.items():
+    all_ok = all_ok and waitpid_and_check_exit(pid, childname)
+
+if all_ok:
+    this_task.set_output(outmanifest)
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/arvados-bcbio-nextgen.py b/crunch_scripts/arvados-bcbio-nextgen.py
new file mode 100755 (executable)
index 0000000..b7e19ec
--- /dev/null
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import subprocess
+import crunchutil.subst as subst
+import shutil
+import os
+import sys
+import time
+
+if len(arvados.current_task()['parameters']) > 0:
+    p = arvados.current_task()['parameters']
+else:
+    p = arvados.current_job()['script_parameters']
+
+t = arvados.current_task().tmpdir
+
+os.unlink("/usr/local/share/bcbio-nextgen/galaxy")
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy")
+shutil.copy("/usr/local/share/bcbio-nextgen/config/bcbio_system.yaml", "/usr/local/share/bcbio-nextgen/galaxy")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool_data_table_conf.xml", "w") as f:
+    f.write('''<tables>
+    <!-- Locations of indexes in the BWA mapper format -->
+    <table name="bwa_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bwa_index.loc" />
+    </table>
+    <!-- Locations of indexes in the Bowtie2 mapper format -->
+    <table name="bowtie2_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bowtie2_indices.loc" />
+    </table>
+    <!-- Locations of indexes in the Bowtie2 mapper format for TopHat2 to use -->
+    <table name="tophat2_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bowtie2_indices.loc" />
+    </table>
+    <!-- Location of SAMTools indexes and other files -->
+    <table name="sam_fa_indexes" comment_char="#">
+        <columns>index, value, path</columns>
+        <file path="tool-data/sam_fa_indices.loc" />
+    </table>
+    <!-- Location of Picard dict file and other files -->
+    <table name="picard_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/picard_index.loc" />
+    </table>
+    <!-- Location of Picard dict files valid for GATK -->
+    <table name="gatk_picard_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/gatk_sorted_picard_index.loc" />
+    </table>
+</tables>
+''')
+
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy/tool-data")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bowtie2_indices.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(dir $(bowtie2_indices))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bwa_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(bwa_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/gatk_sorted_picard_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(gatk_sorted_picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/picard_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/sam_fa_indices.loc", "w") as f:
+    f.write(subst.do_substitution(p, "index\tGRCh37\t$(file $(sam_fa_indices))\n"))
+
+with open("/tmp/crunch-job/freebayes-variant.yaml", "w") as f:
+    f.write('''
+# Template for whole genome Illumina variant calling with FreeBayes
+# This is a GATK-free pipeline without post-alignment BAM pre-processing
+# (recalibration and realignment)
+---
+details:
+  - analysis: variant2
+    genome_build: GRCh37
+    # to do multi-sample variant calling, assign samples the same metadata / batch
+    # metadata:
+    #   batch: your-arbitrary-batch-name
+    algorithm:
+      aligner: bwa
+      mark_duplicates: true
+      recalibrate: false
+      realign: false
+      variantcaller: freebayes
+      platform: illumina
+      quality_format: Standard
+      # for targetted projects, set the region
+      # variant_regions: /path/to/your.bed
+''')
+
+os.unlink("/usr/local/share/bcbio-nextgen/gemini_data")
+os.symlink(arvados.get_job_param_mount("gemini_data"), "/usr/local/share/bcbio-nextgen/gemini_data")
+
+os.chdir(arvados.current_task().tmpdir)
+
+rcode = subprocess.call(["bcbio_nextgen.py", "--workflow", "template", "/tmp/crunch-job/freebayes-variant.yaml", "project1",
+                         subst.do_substitution(p, "$(file $(R1))"),
+                         subst.do_substitution(p, "$(file $(R2))")])
+
+os.chdir("project1/work")
+
+os.symlink("/usr/local/share/bcbio-nextgen/galaxy/tool-data", "tool-data")
+
+rcode = subprocess.call(["bcbio_nextgen.py", "../config/project1.yaml", "-n", os.environ['CRUNCH_NODE_SLOTS']])
+
+print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+
+if rcode == 0:
+    os.chdir("../final")
+
+    print("arvados-bcbio-nextgen: the follow output files will be saved to keep:")
+
+    subprocess.call(["find", ".", "-type", "f", "-printf", "arvados-bcbio-nextgen: %12.12s %h/%f\\n"])
+
+    print("arvados-bcbio-nextgen: start writing output to keep")
+
+    done = False
+    api = arvados.api('v1')
+    while not done:
+        try:
+            out = arvados.CollectionWriter()
+            out.write_directory_tree(".", max_manifest_depth=0)
+            outuuid = out.finish()
+            api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                                 body={
+                                                     'output':outuuid,
+                                                     'success': (rcode == 0),
+                                                     'progress':1.0
+                                                 }).execute()
+            done = True
+        except Exception as e:
+            print("arvados-bcbio-nextgen: caught exception: {}".format(e))
+            time.sleep(5)
+
+sys.exit(rcode)
diff --git a/crunch_scripts/arvados_bwa.py b/crunch_scripts/arvados_bwa.py
new file mode 100644 (file)
index 0000000..aefc1f0
--- /dev/null
@@ -0,0 +1,115 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+bwa_install_path = None
+
+def install_path():
+    """
+    Extract the bwa source tree, build the bwa binary, and return the
+    path to the source tree.
+    """
+    global bwa_install_path
+    if bwa_install_path:
+        return bwa_install_path
+
+    bwa_install_path = arvados.util.tarball_extract(
+        tarball = arvados.current_job()['script_parameters']['bwa_tbz'],
+        path = 'bwa')
+
+    # build "bwa" binary
+    lockfile = open(os.path.split(bwa_install_path)[0] + '.bwa-make.lock',
+                    'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    arvados.util.run_command(['make', '-j16'], cwd=bwa_install_path)
+    lockfile.close()
+
+    return bwa_install_path
+
+def bwa_binary():
+    """
+    Return the path to the bwa executable.
+    """
+    return os.path.join(install_path(), 'bwa')
+
+def run(command, command_args, **kwargs):
+    """
+    Build and run the bwa binary.
+
+    command is the bwa module, e.g., "index" or "aln".
+
+    command_args is a list of additional command line arguments, e.g.,
+    ['-a', 'bwtsw', 'ref.fasta']
+
+    It is assumed that we are running in a Crunch job environment, and
+    the job's "bwa_tbz" parameter is a collection containing the bwa
+    source tree in a .tbz file.
+    """
+    execargs = [bwa_binary(),
+                command]
+    execargs += command_args
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    arvados.util.run_command(
+        execargs,
+        cwd=arvados.current_task().tmpdir,
+        stderr=sys.stderr,
+        stdin=kwargs.get('stdin', subprocess.PIPE),
+        stdout=kwargs.get('stdout', sys.stderr))
+
+def one_task_per_pair_input_file(if_sequence=0, and_end_task=True):
+    """
+    Queue one task for each pair of fastq files in this job's input
+    collection.
+
+    Each new task will have two parameters, named "input_1" and
+    "input_2", each being a manifest containing a single fastq file.
+
+    A matching pair of files in the input collection is assumed to
+    have names "x_1.y" and "x_2.y".
+
+    Files in the input collection that are not part of a matched pair
+    are silently ignored.
+
+    if_sequence and and_end_task arguments have the same significance
+    as in arvados.job_setup.one_task_per_input_file().
+    """
+    if if_sequence != arvados.current_task()['sequence']:
+        return
+    job_input = arvados.current_job()['script_parameters']['input']
+    cr = arvados.CollectionReader(job_input)
+    all_files = []
+    for s in cr.all_streams():
+        all_files += list(s.all_files())
+    for s in cr.all_streams():
+        for left_file in s.all_files():
+            left_name = left_file.name()
+            right_file = None
+            right_name = re.sub(r'(.*_)1\.', '\g<1>2.', left_name)
+            if right_name == left_name:
+                continue
+            for f2 in s.all_files():
+                if right_name == f2.name():
+                    right_file = f2
+            if right_file != None:
+                new_task_attrs = {
+                    'job_uuid': arvados.current_job()['uuid'],
+                    'created_by_job_task_uuid': arvados.current_task()['uuid'],
+                    'sequence': if_sequence + 1,
+                    'parameters': {
+                        'input_1':left_file.as_manifest(),
+                        'input_2':right_file.as_manifest()
+                        }
+                    }
+                arvados.api().job_tasks().create(body=new_task_attrs).execute()
+    if and_end_task:
+        arvados.api().job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                   body={'success':True}
+                                   ).execute()
+        exit(0)
diff --git a/crunch_scripts/arvados_gatk2.py b/crunch_scripts/arvados_gatk2.py
new file mode 100644 (file)
index 0000000..fa00b44
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+gatk2_install_path = None
+
+def install_path():
+    global gatk2_install_path
+    if gatk2_install_path:
+        return gatk2_install_path
+    gatk2_install_path = arvados.util.tarball_extract(
+        tarball = arvados.current_job()['script_parameters']['gatk_tbz'],
+        path = 'gatk2')
+    return gatk2_install_path
+
+def memory_limit():
+    taskspernode = int(os.environ.get('CRUNCH_NODE_SLOTS', '1'))
+    with open('/proc/meminfo', 'r') as f:
+        ram = int(re.search(r'MemTotal:\s*(\d+)', f.read()).group(1)) / 1024
+    if taskspernode > 1:
+        ram = ram / taskspernode
+    return max(ram-700, 500)
+
+def cpus_on_this_node():
+    with open('/proc/cpuinfo', 'r') as cpuinfo:
+        return max(int(os.environ.get('SLURM_CPUS_ON_NODE', 1)),
+                   len(re.findall(r'^processor\s*:\s*\d',
+                                  cpuinfo.read(),
+                                  re.MULTILINE)))
+
+def cpus_per_task():
+    return max(1, (cpus_on_this_node()
+                   / int(os.environ.get('CRUNCH_NODE_SLOTS', 1))))
+
+def run(**kwargs):
+    kwargs.setdefault('cwd', arvados.current_task().tmpdir)
+    kwargs.setdefault('stdout', sys.stderr)
+    execargs = ['java',
+                '-Xmx%dm' % memory_limit(),
+                '-Djava.io.tmpdir=' + arvados.current_task().tmpdir,
+                '-jar', os.path.join(install_path(), 'GenomeAnalysisTK.jar')]
+    execargs += [str(arg) for arg in kwargs.pop('args', [])]
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    return arvados.util.run_command(execargs, **kwargs)
+
diff --git a/crunch_scripts/arvados_ipc.py b/crunch_scripts/arvados_ipc.py
new file mode 100644 (file)
index 0000000..9787162
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import re
+import sys
+import subprocess
+
+def pipe_setup(pipes, name):
+    pipes[name,'r'], pipes[name,'w'] = os.pipe()
+
+def pipe_closeallbut(pipes, *keepus):
+    for n,m in pipes.keys():
+        if (n,m) not in keepus:
+            os.close(pipes.pop((n,m), None))
+
+def named_fork(children, name):
+    children[name] = os.fork()
+    return children[name]
+
+def waitpid_and_check_children(children):
+    """
+    Given a dict of childname->pid, wait for each child process to
+    finish, and report non-zero exit status on stderr. Return True if
+    all children exited 0.
+    """
+    all_ok = True
+    for (childname, pid) in children.items():
+        # all_ok must be on RHS here -- we need to call waitpid() on
+        # every child, even if all_ok is already False.
+        all_ok = waitpid_and_check_exit(pid, childname) and all_ok
+    return all_ok
+
+def waitpid_and_check_exit(pid, childname=''):
+    """
+    Wait for a child process to finish. If it exits non-zero, report
+    exit status on stderr (mentioning the given childname) and return
+    False. If it exits zero, return True.
+    """
+    _, childstatus = os.waitpid(pid, 0)
+    exitvalue = childstatus >> 8
+    signal = childstatus & 127
+    dumpedcore = childstatus & 128
+    if childstatus != 0:
+        sys.stderr.write("%s child %d failed: exit %d signal %d core %s\n"
+                         % (childname, pid, exitvalue, signal,
+                            ('y' if dumpedcore else 'n')))
+        return False
+    return True
+
diff --git a/crunch_scripts/arvados_picard.py b/crunch_scripts/arvados_picard.py
new file mode 100644 (file)
index 0000000..3d830db
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+picard_install_path = None
+
+def install_path():
+    global picard_install_path
+    if picard_install_path:
+        return picard_install_path
+    zipball = arvados.current_job()['script_parameters']['picard_zip']
+    extracted = arvados.util.zipball_extract(
+        zipball = zipball,
+        path = 'picard')
+    for f in os.listdir(extracted):
+        if (re.search(r'^picard-tools-[\d\.]+$', f) and
+            os.path.exists(os.path.join(extracted, f, '.'))):
+            picard_install_path = os.path.join(extracted, f)
+            break
+    if not picard_install_path:
+        raise Exception("picard-tools-{version} directory not found in %s" %
+                        zipball)
+    return picard_install_path
+
+def run(module, **kwargs):
+    kwargs.setdefault('cwd', arvados.current_task().tmpdir)
+    execargs = ['java',
+                '-Xmx1500m',
+                '-Djava.io.tmpdir=' + arvados.current_task().tmpdir,
+                '-jar', os.path.join(install_path(), module + '.jar')]
+    execargs += [str(arg) for arg in kwargs.pop('args', [])]
+    for key, value in kwargs.pop('params', {}).items():
+        execargs += [key.upper() + '=' + str(value)]
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    return arvados.util.run_command(execargs, **kwargs)
diff --git a/crunch_scripts/arvados_samtools.py b/crunch_scripts/arvados_samtools.py
new file mode 100644 (file)
index 0000000..09992f6
--- /dev/null
@@ -0,0 +1,110 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+samtools_path = None
+
+def samtools_install_path():
+    """
+    Extract the samtools source tree, build the samtools binary, and
+    return the path to the source tree.
+    """
+    global samtools_path
+    if samtools_path:
+        return samtools_path
+    samtools_path = arvados.util.tarball_extract(
+        tarball = arvados.current_job()['script_parameters']['samtools_tgz'],
+        path = 'samtools')
+
+    # build "samtools" binary
+    lockfile = open(os.path.split(samtools_path)[0] + '.samtools-make.lock',
+                    'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    arvados.util.run_command(['make', '-j16'], cwd=samtools_path)
+    lockfile.close()
+
+    return samtools_path
+
+def samtools_binary():
+    """
+    Return the path to the samtools executable.
+    """
+    return os.path.join(samtools_install_path(), 'samtools')
+
+def run(command, command_args, **kwargs):
+    """
+    Build and run the samtools binary.
+
+    command is the samtools subcommand, e.g., "view" or "sort".
+
+    command_args is a list of additional command line arguments, e.g.,
+    ['-bt', 'ref_list.txt', '-o', 'aln.bam', 'aln.sam.gz']
+
+    It is assumed that we are running in a Crunch job environment, and
+    the job's "samtools_tgz" parameter is a collection containing the
+    samtools source tree in a .tgz file.
+    """
+    execargs = [samtools_binary(),
+                command]
+    execargs += command_args
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    arvados.util.run_command(
+        execargs,
+        cwd=arvados.current_task().tmpdir,
+        stdin=kwargs.get('stdin', subprocess.PIPE),
+        stderr=kwargs.get('stderr', sys.stderr),
+        stdout=kwargs.get('stdout', sys.stderr))
+
+def one_task_per_bam_file(if_sequence=0, and_end_task=True):
+    """
+    Queue one task for each bam file in this job's input collection.
+
+    Each new task will have an "input" parameter: a manifest
+    containing one .bam file and (if available) the corresponding .bai
+    index file.
+
+    Files in the input collection that are not named *.bam or *.bai
+    (as well as *.bai files that do not match any .bam file present)
+    are silently ignored.
+
+    if_sequence and and_end_task arguments have the same significance
+    as in arvados.job_setup.one_task_per_input_file().
+    """
+    if if_sequence != arvados.current_task()['sequence']:
+        return
+    job_input = arvados.current_job()['script_parameters']['input']
+    cr = arvados.CollectionReader(job_input)
+    bam = {}
+    bai = {}
+    for s in cr.all_streams():
+        for f in s.all_files():
+            if re.search(r'\.bam$', f.name()):
+                bam[s.name(), f.name()] = f
+            elif re.search(r'\.bai$', f.name()):
+                bai[s.name(), f.name()] = f
+    for ((s_name, f_name), bam_f) in bam.items():
+        bai_f = bai.get((s_name, re.sub(r'bam$', 'bai', f_name)), None)
+        task_input = bam_f.as_manifest()
+        if bai_f:
+            task_input += bai_f.as_manifest()
+        new_task_attrs = {
+            'job_uuid': arvados.current_job()['uuid'],
+            'created_by_job_task_uuid': arvados.current_task()['uuid'],
+            'sequence': if_sequence + 1,
+            'parameters': {
+                'input': task_input
+                }
+            }
+        arvados.api().job_tasks().create(body=new_task_attrs).execute()
+    if and_end_task:
+        arvados.api().job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                         body={'success':True}
+                                         ).execute()
+        exit(0)
diff --git a/crunch_scripts/bwa-aln b/crunch_scripts/bwa-aln
new file mode 100755 (executable)
index 0000000..e3d85a7
--- /dev/null
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados_bwa
+import arvados_samtools
+import os
+import re
+import sys
+import subprocess
+
+arvados_bwa.one_task_per_pair_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference_index'],
+    path = 'reference',
+    decompress = False)
+
+ref_basename = None
+for f in os.listdir(ref_dir):
+    basename = re.sub(r'\.bwt$', '', f)
+    if basename != f:
+        ref_basename = os.path.join(ref_dir, basename)
+if ref_basename == None:
+    raise Exception("Could not find *.bwt in reference collection.")
+
+tmp_dir = arvados.current_task().tmpdir
+
+class Aligner:
+    def input_filename(self):
+        for s in arvados.CollectionReader(self.collection).all_streams():
+            for f in s.all_files():
+                return f.decompressed_name()
+    def generate_input(self):
+        for s in arvados.CollectionReader(self.collection).all_streams():
+            for f in s.all_files():
+                for s in f.readall_decompressed():
+                    yield s
+    def aln(self, input_param):
+        self.collection = this_task['parameters'][input_param]
+        reads_filename = os.path.join(tmp_dir, self.input_filename())
+        aln_filename = os.path.join(tmp_dir, self.input_filename() + '.sai')
+        reads_pipe_r, reads_pipe_w = os.pipe()
+        if os.fork() == 0:
+            os.close(reads_pipe_r)
+            reads_file = open(reads_filename, 'wb')
+            for s in self.generate_input():
+                if len(s) != os.write(reads_pipe_w, s):
+                    raise Exception("short write")
+                reads_file.write(s)
+            reads_file.close()
+            os.close(reads_pipe_w)
+            sys.exit(0)
+        os.close(reads_pipe_w)
+
+        aln_file = open(aln_filename, 'wb')
+        bwa_proc = subprocess.Popen(
+            [arvados_bwa.bwa_binary(),
+             'aln', '-t', '16',
+             ref_basename,
+             '-'],
+            stdin=os.fdopen(reads_pipe_r, 'rb', 2**20),
+            stdout=aln_file)
+        aln_file.close()
+        return reads_filename, aln_filename
+
+reads_1, alignments_1 = Aligner().aln('input_1')
+reads_2, alignments_2 = Aligner().aln('input_2')
+pid1, exit1 = os.wait()
+pid2, exit2 = os.wait()
+if exit1 != 0 or exit2 != 0:
+    raise Exception("bwa aln exited non-zero (0x%x, 0x%x)" % (exit1, exit2))
+
+# output alignments in sam format to pipe
+sam_pipe_r, sam_pipe_w = os.pipe()
+sam_pid = os.fork()
+if sam_pid != 0:
+    # parent
+    os.close(sam_pipe_w)
+else:
+    # child
+    os.close(sam_pipe_r)
+    arvados_bwa.run('sampe',
+                    [ref_basename,
+                     alignments_1, alignments_2,
+                     reads_1, reads_2],
+                    stdout=os.fdopen(sam_pipe_w, 'wb', 2**20))
+    sys.exit(0)
+
+# convert sam (sam_pipe_r) to bam (bam_pipe_w)
+bam_pipe_r, bam_pipe_w = os.pipe()
+bam_pid = os.fork()
+if bam_pid != 0:
+    # parent
+    os.close(bam_pipe_w)
+    os.close(sam_pipe_r)
+else:
+    # child
+    os.close(bam_pipe_r)
+    arvados_samtools.run('view',
+                         ['-S', '-b',
+                          '-'],
+                         stdin=os.fdopen(sam_pipe_r, 'rb', 2**20),
+                         stdout=os.fdopen(bam_pipe_w, 'wb', 2**20))
+    sys.exit(0)
+
+# copy bam (bam_pipe_r) to Keep
+out_bam_filename = os.path.split(reads_1)[-1] + '.bam'
+out = arvados.CollectionWriter()
+out.start_new_stream()
+out.start_new_file(out_bam_filename)
+out.write(os.fdopen(bam_pipe_r, 'rb', 2**20))
+
+# make sure everyone exited nicely
+pid3, exit3 = os.waitpid(sam_pid, 0)
+if exit3 != 0:
+    raise Exception("bwa sampe exited non-zero (0x%x)" % exit3)
+pid4, exit4 = os.waitpid(bam_pid, 0)
+if exit4 != 0:
+    raise Exception("samtools view exited non-zero (0x%x)" % exit4)
+
+# proclaim success
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/bwa-index b/crunch_scripts/bwa-index
new file mode 100755 (executable)
index 0000000..f5b7030
--- /dev/null
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados_bwa
+import os
+import re
+import sys
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'reference',
+    decompress = False)
+
+ref_fasta_files = (os.path.join(ref_dir, f)
+                   for f in os.listdir(ref_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f))
+
+# build reference index
+arvados_bwa.run('index',
+                ['-a', 'bwtsw'] + list(ref_fasta_files))
+
+# move output files to new empty directory
+out_dir = os.path.join(arvados.current_task().tmpdir, 'out')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+os.mkdir(out_dir)
+for f in os.listdir(ref_dir):
+    if re.search(r'\.(amb|ann|bwt|pac|rbwt|rpac|rsa|sa)$', f):
+        sys.stderr.write("bwa output: %s (%d)\n" %
+                         (f, os.stat(os.path.join(ref_dir, f)).st_size))
+        os.rename(os.path.join(ref_dir, f),
+                  os.path.join(out_dir, f))
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/collection-merge b/crunch_scripts/collection-merge
new file mode 100755 (executable)
index 0000000..f3aa5ce
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# collection-merge
+#
+# Merge two or more collections together.  Can also be used to extract specific
+# files from a collection to produce a new collection.
+#
+# input:
+# An array of collections or collection/file paths in script_parameter["input"]
+#
+# output:
+# A manifest with the collections merged.  Duplicate file names will
+# have their contents concatenated in the order that they appear in the input
+# array.
+
+import arvados
+import md5
+import crunchutil.subst as subst
+import subprocess
+import os
+import hashlib
+
+p = arvados.current_job()['script_parameters']
+
+merged = ""
+src = []
+for c in p["input"]:
+    c = subst.do_substitution(p, c)
+    i = c.find('/')
+    if i == -1:
+        src.append(c)
+        merged += arvados.CollectionReader(c).manifest_text()
+    else:
+        src.append(c[0:i])
+        cr = arvados.CollectionReader(c[0:i])
+        j = c.rfind('/')
+        stream = c[i+1:j]
+        if stream == "":
+            stream = "."
+        fn = c[(j+1):]
+        for s in cr.all_streams():
+            if s.name() == stream:
+                if fn in s.files():
+                    merged += s.files()[fn].as_manifest()
+
+arvados.current_task().set_output(merged)
diff --git a/crunch_scripts/crunchrunner b/crunch_scripts/crunchrunner
new file mode 100755 (executable)
index 0000000..25d3ba5
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+if test -n "$JOB_PARAMETER_CRUNCHRUNNER" ; then
+    exec $TASK_KEEPMOUNT/$JOB_PARAMETER_CRUNCHRUNNER
+else
+    exec /usr/local/bin/crunchrunner
+fi
diff --git a/crunch_scripts/crunchutil/__init__.py b/crunch_scripts/crunchutil/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/crunch_scripts/crunchutil/robust_put.py b/crunch_scripts/crunchutil/robust_put.py
new file mode 100644 (file)
index 0000000..27b0bf3
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados.commands.put as put
+import os
+import logging
+import time
+
+def machine_progress(bytes_written, bytes_expected):
+    return "upload wrote {} total {}\n".format(
+        bytes_written, -1 if (bytes_expected is None) else bytes_expected)
+
+class Args(object):
+    def __init__(self, fn):
+        self.filename = None
+        self.paths = [fn]
+        self.max_manifest_depth = 0
+
+# Upload to Keep with error recovery.
+# Return a uuid or raise an exception if there are too many failures.
+def upload(source_dir, logger=None):
+    if logger is None:
+        logger = logging.getLogger("arvados")
+
+    source_dir = os.path.abspath(source_dir)
+    done = False
+    if 'TASK_WORK' in os.environ:
+        resume_cache = put.ResumeCache(os.path.join(arvados.current_task().tmpdir, "upload-output-checkpoint"))
+    else:
+        resume_cache = put.ResumeCache(put.ResumeCache.make_path(Args(source_dir)))
+    reporter = put.progress_writer(machine_progress)
+    bytes_expected = put.expected_bytes_for([source_dir])
+    backoff = 1
+    outuuid = None
+    while not done:
+        try:
+            out = put.ArvPutCollectionWriter.from_cache(resume_cache, reporter, bytes_expected)
+            out.do_queued_work()
+            out.write_directory_tree(source_dir, max_manifest_depth=0)
+            outuuid = out.finish()
+            done = True
+        except KeyboardInterrupt as e:
+            logger.critical("caught interrupt signal 2")
+            raise e
+        except Exception as e:
+            logger.exception("caught exception:")
+            backoff *= 2
+            if backoff > 256:
+                logger.critical("Too many upload failures, giving up")
+                raise e
+            else:
+                logger.warning("Sleeping for %s seconds before trying again" % backoff)
+                time.sleep(backoff)
+    return outuuid
diff --git a/crunch_scripts/crunchutil/subst.py b/crunch_scripts/crunchutil/subst.py
new file mode 100644 (file)
index 0000000..53def97
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import glob
+import os
+import re
+import stat
+
+BACKSLASH_ESCAPE_RE = re.compile(r'\\(.)')
+
+class SubstitutionError(Exception):
+    pass
+
+def search(c):
+    DEFAULT = 0
+    DOLLAR = 1
+
+    i = 0
+    state = DEFAULT
+    start = None
+    depth = 0
+    while i < len(c):
+        if c[i] == '\\':
+            i += 1
+        elif state == DEFAULT:
+            if c[i] == '$':
+                state = DOLLAR
+                if depth == 0:
+                    start = i
+            elif c[i] == ')':
+                if depth == 1:
+                    return [start, i]
+                if depth > 0:
+                    depth -= 1
+        elif state == DOLLAR:
+            if c[i] == '(':
+                depth += 1
+            state = DEFAULT
+        i += 1
+    if depth != 0:
+        raise SubstitutionError("Substitution error, mismatched parentheses {}".format(c))
+    return None
+
+def sub_file(v):
+    path = os.path.join(os.environ['TASK_KEEPMOUNT'], v)
+    st = os.stat(path)
+    if st and stat.S_ISREG(st.st_mode):
+        return path
+    else:
+        raise SubstitutionError("$(file {}) is not accessible or is not a regular file".format(path))
+
+def sub_dir(v):
+    d = os.path.dirname(v)
+    if d == '':
+        d = v
+    path = os.path.join(os.environ['TASK_KEEPMOUNT'], d)
+    st = os.stat(path)
+    if st and stat.S_ISDIR(st.st_mode):
+        return path
+    else:
+        raise SubstitutionError("$(dir {}) is not accessible or is not a directory".format(path))
+
+def sub_basename(v):
+    return os.path.splitext(os.path.basename(v))[0]
+
+def sub_glob(v):
+    l = glob.glob(v)
+    if len(l) == 0:
+        raise SubstitutionError("$(glob {}) no match found".format(v))
+    else:
+        return l[0]
+
+default_subs = {"file ": sub_file,
+                "dir ": sub_dir,
+                "basename ": sub_basename,
+                "glob ": sub_glob}
+
+def do_substitution(p, c, subs=default_subs):
+    while True:
+        m = search(c)
+        if m is None:
+            return BACKSLASH_ESCAPE_RE.sub(r'\1', c)
+
+        v = do_substitution(p, c[m[0]+2 : m[1]])
+        var = True
+        for sub in subs:
+            if v.startswith(sub):
+                r = subs[sub](v[len(sub):])
+                var = False
+                break
+        if var:
+            if v in p:
+                r = p[v]
+            else:
+                raise SubstitutionError("Unknown variable or function '%s' while performing substitution on '%s'" % (v, c))
+            if r is None:
+                raise SubstitutionError("Substitution for '%s' is null while performing substitution on '%s'" % (v, c))
+            if not isinstance(r, basestring):
+                raise SubstitutionError("Substitution for '%s' must be a string while performing substitution on '%s'" % (v, c))
+
+        c = c[:m[0]] + r + c[m[1]+1:]
diff --git a/crunch_scripts/crunchutil/vwd.py b/crunch_scripts/crunchutil/vwd.py
new file mode 100644 (file)
index 0000000..3245da1
--- /dev/null
@@ -0,0 +1,107 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import stat
+import arvados.commands.run
+import logging
+
+# Implements "Virtual Working Directory"
+# Provides a way of emulating a shared writable directory in Keep based
+# on a "check out, edit, check in, merge" model.
+# At the moment, this only permits adding new files, applications
+# cannot modify or delete existing files.
+
+# Create a symlink tree rooted at target_dir mirroring arv-mounted
+# source_collection.  target_dir must be empty, and will be created if it
+# doesn't exist.
+def checkout(source_collection, target_dir, keepmount=None):
+    # create symlinks
+    if keepmount is None:
+        keepmount = os.environ['TASK_KEEPMOUNT']
+
+    if not os.path.exists(target_dir):
+        os.makedirs(target_dir)
+
+    l = os.listdir(target_dir)
+    if len(l) > 0:
+        raise Exception("target_dir must be empty before checkout, contains %s" % l)
+
+    stem = os.path.join(keepmount, source_collection)
+    for root, dirs, files in os.walk(os.path.join(keepmount, source_collection), topdown=True):
+        rel = root[len(stem)+1:]
+        for d in dirs:
+            os.mkdir(os.path.join(target_dir, rel, d))
+        for f in files:
+            os.symlink(os.path.join(root, f), os.path.join(target_dir, rel, f))
+
+def checkin(target_dir):
+    """Write files in `target_dir` to Keep.
+
+    Regular files or symlinks to files outside the keep mount are written to
+    Keep as normal files (Keep does not support symlinks).
+
+    Symlinks to files in the keep mount will result in files in the new
+    collection which reference existing Keep blocks, no data copying necessary.
+
+    Returns a new Collection object, with data flushed but the collection record
+    not saved to the API.
+
+    """
+
+    outputcollection = arvados.collection.Collection(num_retries=5)
+
+    if target_dir[-1:] != '/':
+        target_dir += '/'
+
+    collections = {}
+
+    logger = logging.getLogger("arvados")
+
+    last_error = None
+    for root, dirs, files in os.walk(target_dir):
+        for f in files:
+            try:
+                s = os.lstat(os.path.join(root, f))
+
+                writeIt = False
+
+                if stat.S_ISREG(s.st_mode):
+                    writeIt = True
+                elif stat.S_ISLNK(s.st_mode):
+                    # 1. check if it is a link into a collection
+                    real = os.path.split(os.path.realpath(os.path.join(root, f)))
+                    (pdh, branch) = arvados.commands.run.is_in_collection(real[0], real[1])
+                    if pdh is not None:
+                        # 2. load collection
+                        if pdh not in collections:
+                            # 2.1 make sure it is flushed (see #5787 note 11)
+                            fd = os.open(real[0], os.O_RDONLY)
+                            os.fsync(fd)
+                            os.close(fd)
+
+                            # 2.2 get collection from API server
+                            collections[pdh] = arvados.collection.CollectionReader(pdh,
+                                                                                   api_client=outputcollection._my_api(),
+                                                                                   keep_client=outputcollection._my_keep(),
+                                                                                   num_retries=5)
+                        # 3. copy arvfile to new collection
+                        outputcollection.copy(branch, os.path.join(root[len(target_dir):], f), source_collection=collections[pdh])
+                    else:
+                        writeIt = True
+
+                if writeIt:
+                    reldir = root[len(target_dir):]
+                    with outputcollection.open(os.path.join(reldir, f), "wb") as writer:
+                        with open(os.path.join(root, f), "rb") as reader:
+                            dat = reader.read(64*1024)
+                            while dat:
+                                writer.write(dat)
+                                dat = reader.read(64*1024)
+            except (IOError, OSError) as e:
+                logger.error(e)
+                last_error = e
+
+    return (outputcollection, last_error)
diff --git a/crunch_scripts/cwl-runner b/crunch_scripts/cwl-runner
new file mode 100755 (executable)
index 0000000..0c79844
--- /dev/null
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Crunch script integration for running arvados-cwl-runner inside a crunch job.
+
+import arvados_cwl
+import sys
+
+try:
+    # Use the crunch script defined in the arvados_cwl package.  This helps
+    # prevent the crunch script from going out of sync with the rest of the
+    # arvados_cwl package.
+    import arvados_cwl.crunch_script
+    arvados_cwl.crunch_script.run()
+    sys.exit()
+except ImportError:
+    pass
+
+# When running against an older arvados-cwl-runner package without
+# arvados_cwl.crunch_script, fall back to the old code.
+
+
+# This gets the job record, transforms the script parameters into a valid CWL
+# input object, then executes the CWL runner to run the underlying workflow or
+# tool.  When the workflow completes, record the output object in an output
+# collection for this runner job.
+
+import arvados
+import arvados.collection
+import arvados.util
+import cwltool.main
+import logging
+import os
+import json
+import argparse
+import re
+import functools
+
+from arvados.api import OrderedJsonModel
+from cwltool.process import shortname, adjustFileObjs, adjustDirObjs, getListing, normalizeFilesDirs
+from cwltool.load_tool import load_tool
+
+# Print package versions
+logging.info(cwltool.main.versionstring())
+
+api = arvados.api("v1")
+
+try:
+    job_order_object = arvados.current_job()['script_parameters']
+
+    pdh_path = re.compile(r'^[0-9a-f]{32}\+\d+(/.+)?$')
+
+    def keeppath(v):
+        if pdh_path.match(v):
+            return "keep:%s" % v
+        else:
+            return v
+
+    def keeppathObj(v):
+        v["location"] = keeppath(v["location"])
+
+    job_order_object["cwl:tool"] = "file://%s/%s" % (os.environ['TASK_KEEPMOUNT'], job_order_object["cwl:tool"])
+
+    for k,v in job_order_object.items():
+        if isinstance(v, basestring) and arvados.util.keep_locator_pattern.match(v):
+            job_order_object[k] = {
+                "class": "File",
+                "location": "keep:%s" % v
+            }
+
+    adjustFileObjs(job_order_object, keeppathObj)
+    adjustDirObjs(job_order_object, keeppathObj)
+    normalizeFilesDirs(job_order_object)
+    adjustDirObjs(job_order_object, functools.partial(getListing, arvados_cwl.fsaccess.CollectionFsAccess("", api_client=api)))
+
+    output_name = None
+    if "arv:output_name" in job_order_object:
+        output_name = job_order_object["arv:output_name"]
+        del job_order_object["arv:output_name"]
+
+    runner = arvados_cwl.ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()),
+                                      output_name=output_name)
+
+    t = load_tool(job_order_object, runner.arv_make_tool)
+
+    args = argparse.Namespace()
+    args.project_uuid = arvados.current_job()["owner_uuid"]
+    args.enable_reuse = True
+    args.submit = False
+    args.debug = True
+    args.quiet = False
+    args.ignore_docker_for_reuse = False
+    args.basedir = os.getcwd()
+    args.cwl_runner_job={"uuid": arvados.current_job()["uuid"], "state": arvados.current_job()["state"]}
+    outputObj = runner.arv_executor(t, job_order_object, **vars(args))
+
+    if runner.final_output_collection:
+        outputCollection = runner.final_output_collection.portable_data_hash()
+    else:
+        outputCollection = None
+
+    api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                         body={
+                                             'output': outputCollection,
+                                             'success': True,
+                                             'progress':1.0
+                                         }).execute()
+except Exception as e:
+    logging.exception("Unhandled exception")
+    api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                         body={
+                                             'output': None,
+                                             'success': False,
+                                             'progress':1.0
+                                         }).execute()
diff --git a/crunch_scripts/decompress-all.py b/crunch_scripts/decompress-all.py
new file mode 100755 (executable)
index 0000000..100ea12
--- /dev/null
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+#
+# decompress-all.py
+#
+# Decompress all compressed files in the collection using the "dtrx" tool and
+# produce a new collection with the contents.  Uncompressed files
+# are passed through.
+#
+# input:
+# A collection at script_parameters["input"]
+#
+# output:
+# A manifest of the uncompressed contents of the input collection.
+
+import arvados
+import re
+import subprocess
+import os
+import sys
+import crunchutil.robust_put as robust_put
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
+
+task = arvados.current_task()
+
+input_file = task['parameters']['input']
+
+infile_parts = re.match(r"(^[a-f0-9]{32}\+\d+)(\+\S+)*(/.*)?(/[^/]+)$", input_file)
+
+outdir = os.path.join(task.tmpdir, "output")
+os.makedirs(outdir)
+os.chdir(outdir)
+
+if infile_parts is None:
+    print >>sys.stderr, "Failed to parse input filename '%s' as a Keep file\n" % input_file
+    sys.exit(1)
+
+cr = arvados.CollectionReader(infile_parts.group(1))
+streamname = infile_parts.group(3)[1:]
+filename = infile_parts.group(4)[1:]
+
+if streamname is not None:
+    subprocess.call(["mkdir", "-p", streamname])
+    os.chdir(streamname)
+else:
+    streamname = '.'
+
+m = re.match(r'.*\.(gz|Z|bz2|tgz|tbz|zip|rar|7z|cab|deb|rpm|cpio|gem)$', arvados.get_task_param_mount('input'), re.IGNORECASE)
+
+if m is not None:
+    rc = subprocess.call(["dtrx", "-r", "-n", "-q", arvados.get_task_param_mount('input')])
+    if rc == 0:
+        task.set_output(robust_put.upload(outdir))
+    else:
+        sys.exit(rc)
+else:
+    streamreader = filter(lambda s: s.name() == streamname, cr.all_streams())[0]
+    filereader = streamreader.files()[filename]
+    task.set_output(streamname + filereader.as_manifest()[1:])
diff --git a/crunch_scripts/file-select b/crunch_scripts/file-select
new file mode 100755 (executable)
index 0000000..c4af05c
--- /dev/null
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_job_input = this_job['script_parameters']['input']
+manifest_text = ""
+for f in arvados.CollectionReader(this_job_input).all_files():
+    if f.name() in this_job['script_parameters']['names']:
+        manifest_text += f.as_manifest()
+
+this_task.set_output(arvados.Keep.put(manifest_text))
diff --git a/crunch_scripts/grep b/crunch_scripts/grep
new file mode 100755 (executable)
index 0000000..a84c0f6
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_task_input = this_task['parameters']['input']
+pattern = re.compile(this_job['script_parameters']['pattern'])
+
+input_file = list(arvados.CollectionReader(this_task_input).all_files())[0]
+out = arvados.CollectionWriter()
+out.set_current_file_name(input_file.decompressed_name())
+out.set_current_stream_name(input_file.stream_name())
+for line in input_file.readlines():
+    if pattern.search(line):
+        out.write(line)
+
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/hash b/crunch_scripts/hash
new file mode 100755 (executable)
index 0000000..56eec7a
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/env python                                                                                                                                                                            
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import hashlib
+import os
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+
+if 'algorithm' in this_job['script_parameters']:
+    alg = this_job['script_parameters']['algorithm']
+else:
+    alg = 'md5'
+digestor = hashlib.new(alg)
+
+input_file = arvados.get_task_param_mount('input')
+
+with open(input_file) as f:
+    while True:
+        buf = f.read(2**20)
+        if len(buf) == 0:
+            break
+        digestor.update(buf)
+
+hexdigest = digestor.hexdigest()
+
+file_name = '/'.join(this_task['parameters']['input'].split('/')[1:])
+
+out = arvados.CollectionWriter()
+out.set_current_file_name("md5sum.txt")
+out.write("%s %s\n" % (hexdigest, file_name))
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/pgp-survey-import b/crunch_scripts/pgp-survey-import
new file mode 100755 (executable)
index 0000000..f12e84b
--- /dev/null
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import string
+import json
+import UserDict
+import sys
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_job_input = this_job['script_parameters']['input']
+
+out = arvados.CollectionWriter()
+out.set_current_file_name("arvados_objects.json")
+out.write("[\n")
+separator = ""
+
+traits = {}
+done_bytes = 0
+done_ratio = 0
+for input_file in arvados.CollectionReader(this_job_input).all_files():
+    for line_number, line in enumerate(input_file.readlines()):
+
+        done_bytes += len(line)
+        new_done_ratio = 1.0 * done_bytes / input_file.size()
+        if line_number == 2 or new_done_ratio - done_ratio > 0.05:
+            sys.stderr.write("progress: %d%% after %d lines\n" % (int(done_ratio * 100), line_number+1))
+            done_ratio = new_done_ratio
+
+        words = string.split(string.strip(line), "\t")
+        if line_number == 0:
+            headings = words
+            for t in arvados.api('v1').traits().list(
+                where={'name':words},
+                limit=1000
+                ).execute()['items']:
+                traits[t['name']] = t
+            for i, trait_name in enumerate(words[3:], start=3):
+                # find or create trait
+                if trait_name not in traits:
+                    traits_match = arvados.api('v1').traits().list(
+                        where={'name':trait_name}
+                        ).execute()['items']
+                    if len(traits_match) > 0:
+                        traits[trait_name] = traits_match[0]
+                    else:
+                        traits[trait_name] = arvados.api('v1').traits().create(
+                            trait={'name':trait_name}).execute()
+                out.write(separator)
+                out.write(json.dumps(traits[trait_name]))
+                separator = ",\n"
+        else:
+            huID_links_match = arvados.api('v1').links().list(
+                where={'link_class':'identifier','name':words[0]}
+                ).execute()['items']
+            if len(huID_links_match) > 0:
+                human_uuid = huID_links_match[0]['head_uuid']
+            else:
+                human = arvados.api('v1').humans().create(
+                    body={}
+                    ).execute()
+                huID_link = arvados.api('v1').links().create(
+                    body={
+                        'link_class':'identifier',
+                        'name':words[0],
+                        'head_kind':'arvados#human',
+                        'head_uuid':human['uuid']
+                        }
+                    ).execute()
+                human_uuid = human['uuid']
+            human_trait = {}
+            for t in arvados.api('v1').links().list(
+                limit=10000,
+                where={
+                    'tail_uuid':human_uuid,
+                    'tail_kind':'arvados#human',
+                    'head_kind':'arvados#trait',
+                    'link_class':'human_trait',
+                    'name':'pgp-survey-response'
+                    }
+                ).execute()['items']:
+                human_trait[t['head_uuid']] = t
+            for i, trait_value in enumerate(words[3:], start=3):
+                trait_uuid = traits[headings[i]]['uuid']
+                if trait_uuid in human_trait:
+                    trait_link = human_trait[trait_uuid]
+                    if trait_link['properties']['value'] != trait_value:
+                        # update database value to match survey response
+                        trait_link['properties']['value'] = trait_value
+                        arvados.api('v1').links().update(
+                            uuid=trait_link['uuid'],
+                            body={'properties':trait_link['properties']}
+                            ).execute()
+                    out.write(",\n")
+                    out.write(json.dumps(trait_link))
+                elif trait_value == '':
+                    # nothing in database, nothing in input
+                    pass
+                else:
+                    trait_link = {
+                        'tail_uuid':human_uuid,
+                        'tail_kind':'arvados#human',
+                        'head_uuid':traits[headings[i]]['uuid'],
+                        'head_kind':'arvados#trait',
+                        'link_class':'human_trait',
+                        'name':'pgp-survey-response',
+                        'properties': { 'value': trait_value }
+                        }
+                    arvados.api('v1').links().create(
+                        body=trait_link
+                        ).execute()
+                    out.write(",\n")
+                    out.write(json.dumps(trait_link))
+
+out.write("\n]\n")
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/pgp-survey-parse b/crunch_scripts/pgp-survey-parse
new file mode 100755 (executable)
index 0000000..ee852f1
--- /dev/null
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+parser_path = arvados.util.git_checkout(
+    url = this_job['script_parameters']['parser_url'],
+    version = this_job['script_parameters']['parser_version'],
+    path = 'parser')
+
+stdoutdata, stderrdata = arvados.util.run_command(
+    ["python", "demo.py"],
+    cwd=parser_path)
+
+out = arvados.CollectionWriter()
+out.write(stdoutdata)
+out.set_current_file_name('participant_traits.tsv')
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/picard-gatk2-prep b/crunch_scripts/picard-gatk2-prep
new file mode 100755 (executable)
index 0000000..976060f
--- /dev/null
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+import sys
+import subprocess
+import arvados_picard
+from arvados_ipc import *
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference'],
+    path = 'reference',
+    decompress = True)
+ref_fasta_files = [os.path.join(ref_dir, f)
+                   for f in os.listdir(ref_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+input_collection = this_task['parameters']['input']
+
+for s in arvados.CollectionReader(input_collection).all_streams():
+    for f in s.all_files():
+        input_stream_name = s.name()
+        input_file_name = f.name()
+        break
+
+# Unfortunately, picard FixMateInformation cannot read from a pipe. We
+# must copy the input to a temporary file before running picard.
+input_bam_path = os.path.join(this_task.tmpdir, input_file_name)
+with open(input_bam_path, 'wb') as bam:
+    for s in arvados.CollectionReader(input_collection).all_streams():
+        for f in s.all_files():
+            for s in f.readall():
+                bam.write(s)
+
+children = {}
+pipes = {}
+
+pipe_setup(pipes, 'fixmate')
+if 0==named_fork(children, 'fixmate'):
+    pipe_closeallbut(pipes, ('fixmate', 'w'))
+    arvados_picard.run(
+        'FixMateInformation',
+        params={
+            'i': input_bam_path,
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'so': 'coordinate',
+            'validation_stringency': 'LENIENT',
+            'compression_level': 0
+            },
+        stdout=os.fdopen(pipes['fixmate','w'], 'wb', 2**20))
+    os._exit(0)
+os.close(pipes.pop(('fixmate','w'), None))
+
+pipe_setup(pipes, 'sortsam')
+if 0==named_fork(children, 'sortsam'):
+    pipe_closeallbut(pipes, ('fixmate', 'r'), ('sortsam', 'w'))
+    arvados_picard.run(
+        'SortSam',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'so': 'coordinate',
+            'validation_stringency': 'LENIENT',
+            'compression_level': 0
+            },
+        stdin=os.fdopen(pipes['fixmate','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['sortsam','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'reordersam')
+if 0==named_fork(children, 'reordersam'):
+    pipe_closeallbut(pipes, ('sortsam', 'r'), ('reordersam', 'w'))
+    arvados_picard.run(
+        'ReorderSam',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'reference': ref_fasta_files[0],
+            'quiet': 'true',
+            'validation_stringency': 'LENIENT',
+            'compression_level': 0
+            },
+        stdin=os.fdopen(pipes['sortsam','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['reordersam','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'addrg')
+if 0==named_fork(children, 'addrg'):
+    pipe_closeallbut(pipes, ('reordersam', 'r'), ('addrg', 'w'))
+    arvados_picard.run(
+        'AddOrReplaceReadGroups',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'rglb': this_job['script_parameters'].get('rglb', 0),
+            'rgpl': this_job['script_parameters'].get('rgpl', 'illumina'),
+            'rgpu': this_job['script_parameters'].get('rgpu', 0),
+            'rgsm': this_job['script_parameters'].get('rgsm', 0),
+            'validation_stringency': 'LENIENT'
+            },
+        stdin=os.fdopen(pipes['reordersam','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['addrg','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'bammanifest')
+pipe_setup(pipes, 'bam')
+pipe_setup(pipes, 'casm_in')
+if 0==named_fork(children, 'bammanifest'):
+    pipe_closeallbut(pipes,
+                     ('addrg', 'r'),
+                     ('bammanifest', 'w'),
+                     ('bam', 'w'),
+                     ('casm_in', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(input_file_name)
+    while True:
+        buf = os.read(pipes['addrg','r'], 2**20)
+        if len(buf) == 0:
+            break
+        os.write(pipes['bam','w'], buf)
+        os.write(pipes['casm_in','w'], buf)
+        out.write(buf)
+    os.write(pipes['bammanifest','w'], out.manifest_text())
+    os.close(pipes['bammanifest','w'])
+    os._exit(0)
+
+pipe_setup(pipes, 'casm')
+if 0 == named_fork(children, 'casm'):
+    pipe_closeallbut(pipes, ('casm_in', 'r'), ('casm', 'w'))
+    arvados_picard.run(
+        'CollectAlignmentSummaryMetrics',
+        params={
+            'input': '/dev/fd/' + str(pipes['casm_in','r']),
+            'output': '/dev/fd/' + str(pipes['casm','w']),
+            'reference_sequence': ref_fasta_files[0],
+            'validation_stringency': 'LENIENT',
+            },
+        close_fds=False)
+    os._exit(0)
+
+pipe_setup(pipes, 'index')
+if 0==named_fork(children, 'index'):
+    pipe_closeallbut(pipes, ('bam', 'r'), ('index', 'w'))
+    arvados_picard.run(
+        'BuildBamIndex',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'validation_stringency': 'LENIENT'
+            },
+        stdin=os.fdopen(pipes['bam','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['index','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'indexmanifest')
+if 0==named_fork(children, 'indexmanifest'):
+    pipe_closeallbut(pipes, ('index', 'r'), ('indexmanifest', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(re.sub('\.bam$', '.bai', input_file_name))
+    while True:
+        buf = os.read(pipes['index','r'], 2**20)
+        if len(buf) == 0:
+            break
+        out.write(buf)
+    os.write(pipes['indexmanifest','w'], out.manifest_text())
+    os.close(pipes['indexmanifest','w'])
+    os._exit(0)
+
+pipe_closeallbut(pipes,
+                 ('bammanifest', 'r'),
+                 ('indexmanifest', 'r'),
+                 ('casm', 'r'))
+
+outmanifest = ''
+
+for which in ['bammanifest', 'indexmanifest']:
+    with os.fdopen(pipes[which,'r'], 'rb', 2**20) as f:
+        while True:
+            buf = f.read()
+            if buf == '':
+                break
+            outmanifest += buf
+
+casm_out = arvados.CollectionWriter()
+casm_out.start_new_stream(input_stream_name)
+casm_out.start_new_file(input_file_name + '.casm.tsv')
+casm_out.write(os.fdopen(pipes.pop(('casm','r'))))
+
+outmanifest += casm_out.manifest_text()
+
+all_ok = True
+for (childname, pid) in children.items():
+    all_ok = all_ok and waitpid_and_check_exit(pid, childname)
+
+if all_ok:
+    this_task.set_output(outmanifest)
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/pyrtg.py b/crunch_scripts/pyrtg.py
new file mode 100644 (file)
index 0000000..d733270
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+import os
+import sys
+
+rtg_install_path = None
+
+def setup():
+    global rtg_install_path
+    if rtg_install_path:
+        return rtg_install_path
+    rtg_path = arvados.util.zipball_extract(
+        zipball = arvados.current_job()['script_parameters']['rtg_binary_zip'],
+        path = 'rtg')
+    rtg_license_path = arvados.util.collection_extract(
+        collection = arvados.current_job()['script_parameters']['rtg_license'],
+        path = 'license',
+        decompress = False)
+
+    # symlink to rtg-license.txt
+    license_txt_path = os.path.join(rtg_license_path, 'rtg-license.txt')
+    try:
+        os.symlink(license_txt_path, os.path.join(rtg_path,'rtg-license.txt'))
+    except OSError:
+        if not os.path.exists(os.path.join(rtg_path,'rtg-license.txt')):
+            os.symlink(license_txt_path, os.path.join(rtg_path,'rtg-license.txt'))
+
+    rtg_install_path = rtg_path
+    return rtg_path
+
+def run_rtg(command, output_dir, command_args, **kwargs):
+    global rtg_install_path
+    execargs = [os.path.join(rtg_install_path, 'rtg'),
+                command,
+                '-o', output_dir]
+    execargs += command_args
+    sys.stderr.write("run_rtg: exec %s\n" % str(execargs))
+    arvados.util.run_command(
+        execargs,
+        cwd=arvados.current_task().tmpdir,
+        stderr=sys.stderr,
+        stdout=sys.stderr)
+
+    # Exit status cannot be trusted in rtg 1.1.1.
+    assert_done(output_dir)
+
+    # Copy log files to stderr and delete them to avoid storing them
+    # in Keep with the output data.
+    for dirent in arvados.util.listdir_recursive(output_dir):
+        if is_log_file(dirent):
+            log_file = os.path.join(output_dir, dirent)
+            sys.stderr.write(' '.join(['==>', dirent, '<==\n']))
+            with open(log_file, 'rb') as f:
+                while True:
+                    buf = f.read(2**20)
+                    if len(buf) == 0:
+                        break
+                    sys.stderr.write(buf)
+            sys.stderr.write('\n') # in case log does not end in newline
+            os.unlink(log_file)
+
+def assert_done(output_dir):
+    # Sanity-check exit code.
+    done_file = os.path.join(output_dir, 'done')
+    if not os.path.exists(done_file):
+        raise Exception("rtg exited 0 but %s does not exist. abort.\n" % done_file)
+
+def is_log_file(filename):
+    return re.search(r'^(.*/)?(progress|done|\S+.log)$', filename)
+
+setup()
diff --git a/crunch_scripts/rtg-fasta2sdf b/crunch_scripts/rtg-fasta2sdf
new file mode 100755 (executable)
index 0000000..f1ef617
--- /dev/null
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+fasta_path = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'fasta',
+    decompress = False)
+fasta_files = filter(lambda f: f != '.locator', os.listdir(fasta_path))
+out_dir = os.path.join(arvados.current_task().tmpdir, 'ref-sdf')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+
+pyrtg.run_rtg('format', out_dir,
+              map(lambda f: os.path.join(fasta_path, f), fasta_files))
+
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/rtg-fastq2sdf b/crunch_scripts/rtg-fastq2sdf
new file mode 100755 (executable)
index 0000000..e42697f
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+fastq_path = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'fastq')
+fastq_files = filter(lambda f: f != '.locator', os.listdir(fastq_path))
+tmp_dir_base = os.path.join(arvados.current_task().tmpdir, 'tmp')
+out_dir = os.path.join(arvados.current_task().tmpdir, 'reads')
+
+arvados.util.run_command(['rm', '-rf', tmp_dir_base], stderr=sys.stderr)
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+os.mkdir(tmp_dir_base)
+
+# convert fastq to sdf
+tmp_dirs = []
+for leftarm in fastq_files:
+    if re.search(r'_1.f(ast)?q(.gz)?$', leftarm):
+        rightarm = re.sub(r'_1(.f(ast)?q(.gz)?)$', '_2\\1', leftarm)
+        if rightarm in fastq_files:
+            tmp_dirs += ['%s/%08d' % (tmp_dir_base, len(tmp_dirs))]
+            pyrtg.run_rtg('format', tmp_dirs[-1],
+                          ['-f', 'fastq',
+                           '-q', 'sanger',
+                           '-l', os.path.join(fastq_path, leftarm),
+                           '-r', os.path.join(fastq_path, rightarm)])
+
+# split sdf
+pyrtg.run_rtg('sdfsplit', out_dir,
+              ['-n', '1500000'] + tmp_dirs)
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=1)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/rtg-map b/crunch_scripts/rtg-map
new file mode 100755 (executable)
index 0000000..f740888
--- /dev/null
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+arvados.job_setup.one_task_per_input_stream(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+in_dir = os.path.join(this_task.tmpdir, 'input')
+arvados.util.run_command(['rm', '-rf', in_dir], stderr=sys.stderr)
+in_dir = arvados.util.stream_extract(
+    stream = arvados.StreamReader(this_task['parameters']['input']),
+    path = in_dir,
+    decompress = False)
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference'],
+    path = 'reference',
+    decompress = False)
+
+out_dir = os.path.join(arvados.current_task().tmpdir, 'out')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+
+# map reads
+pyrtg.run_rtg('map', out_dir,
+              ['-i', in_dir,
+               '-t', ref_dir,
+               '-a', '2',
+               '-b', '1',
+               '--sam-rg', '@RG\\tID:NA\\tSM:NA\\tPL:ILLUMINA'])
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, this_task['parameters']['input'][0], 0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/rtg-snp b/crunch_scripts/rtg-snp
new file mode 100755 (executable)
index 0000000..1d8a605
--- /dev/null
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference'],
+    path = 'reference',
+    decompress = False)
+input_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'input')
+bam_files = map(lambda f: os.path.join(input_dir, f),
+                filter(lambda f: re.search(r'^(.*/)?alignments.bam$', f),
+                       arvados.util.listdir_recursive(input_dir)))
+out_dir = os.path.join(arvados.current_task().tmpdir, 'out')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+
+# call sequence variants
+pyrtg.run_rtg('snp', out_dir,
+              ['-t', ref_dir] + bam_files)
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/run-command b/crunch_scripts/run-command
new file mode 100755 (executable)
index 0000000..3fd08bf
--- /dev/null
@@ -0,0 +1,458 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import logging
+
+logger = logging.getLogger('run-command')
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(logging.Formatter("run-command: %(message)s"))
+logger.addHandler(log_handler)
+logger.setLevel(logging.INFO)
+
+import arvados
+import re
+import os
+import subprocess
+import sys
+import shutil
+import crunchutil.subst as subst
+import time
+import arvados.commands.put as put
+import signal
+import stat
+import copy
+import traceback
+import pprint
+import multiprocessing
+import crunchutil.robust_put as robust_put
+import crunchutil.vwd as vwd
+import argparse
+import json
+import tempfile
+import errno
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--dry-run', action='store_true')
+parser.add_argument('--script-parameters', type=str, default="{}")
+args = parser.parse_args()
+
+os.umask(0077)
+
+if not args.dry_run:
+    api = arvados.api('v1')
+    t = arvados.current_task().tmpdir
+    os.chdir(arvados.current_task().tmpdir)
+    os.mkdir("tmpdir")
+    os.mkdir("output")
+
+    os.chdir("output")
+
+    outdir = os.getcwd()
+
+    taskp = None
+    jobp = arvados.current_job()['script_parameters']
+    if len(arvados.current_task()['parameters']) > 0:
+        taskp = arvados.current_task()['parameters']
+else:
+    outdir = "/tmp"
+    jobp = json.loads(args.script_parameters)
+    os.environ['JOB_UUID'] = 'zzzzz-8i9sb-1234567890abcde'
+    os.environ['TASK_UUID'] = 'zzzzz-ot0gb-1234567890abcde'
+    os.environ['CRUNCH_SRC'] = '/tmp/crunch-src'
+    if 'TASK_KEEPMOUNT' not in os.environ:
+        os.environ['TASK_KEEPMOUNT'] = '/keep'
+
+def sub_tmpdir(v):
+    return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
+
+def sub_outdir(v):
+    return outdir
+
+def sub_cores(v):
+     return str(multiprocessing.cpu_count())
+
+def sub_jobid(v):
+     return os.environ['JOB_UUID']
+
+def sub_taskid(v):
+     return os.environ['TASK_UUID']
+
+def sub_jobsrc(v):
+     return os.environ['CRUNCH_SRC']
+
+subst.default_subs["task.tmpdir"] = sub_tmpdir
+subst.default_subs["task.outdir"] = sub_outdir
+subst.default_subs["job.srcdir"] = sub_jobsrc
+subst.default_subs["node.cores"] = sub_cores
+subst.default_subs["job.uuid"] = sub_jobid
+subst.default_subs["task.uuid"] = sub_taskid
+
+class SigHandler(object):
+    def __init__(self):
+        self.sig = None
+
+    def send_signal(self, subprocesses, signum):
+        for sp in subprocesses:
+            sp.send_signal(signum)
+        self.sig = signum
+
+# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
+def flatten(l, ltypes=(list, tuple)):
+    ltype = type(l)
+    l = list(l)
+    i = 0
+    while i < len(l):
+        while isinstance(l[i], ltypes):
+            if not l[i]:
+                l.pop(i)
+                i -= 1
+                break
+            else:
+                l[i:i + 1] = l[i]
+        i += 1
+    return ltype(l)
+
+def add_to_group(gr, match):
+    m = match.groups()
+    if m not in gr:
+        gr[m] = []
+    gr[m].append(match.group(0))
+
+class EvaluationError(Exception):
+    pass
+
+# Return the name of variable ('var') that will take on each value in 'items'
+# when performing an inner substitution
+def var_items(p, c, key):
+    if key not in c:
+        raise EvaluationError("'%s' was expected in 'p' but is missing" % key)
+
+    if "var" in c:
+        if not isinstance(c["var"], basestring):
+            raise EvaluationError("Value of 'var' must be a string")
+        # Var specifies the variable name for inner parameter substitution
+        return (c["var"], get_items(p, c[key]))
+    else:
+        # The component function ('key') value is a list, so return the list
+        # directly with no parameter selected.
+        if isinstance(c[key], list):
+            return (None, get_items(p, c[key]))
+        elif isinstance(c[key], basestring):
+            # check if c[key] is a string that looks like a parameter
+            m = re.match("^\$\((.*)\)$", c[key])
+            if m and m.group(1) in p:
+                return (m.group(1), get_items(p, c[key]))
+            else:
+                # backwards compatible, foreach specifies bare parameter name to use
+                return (c[key], get_items(p, p[c[key]]))
+        else:
+            raise EvaluationError("Value of '%s' must be a string or list" % key)
+
+# "p" is the parameter scope, "c" is the item to be expanded.
+# If "c" is a dict, apply function expansion.
+# If "c" is a list, recursively expand each item and return a new list.
+# If "c" is a string, apply parameter substitution
+def expand_item(p, c):
+    if isinstance(c, dict):
+        if "foreach" in c and "command" in c:
+            # Expand a command template for each item in the specified user
+            # parameter
+            var, items = var_items(p, c, "foreach")
+            if var is None:
+                raise EvaluationError("Must specify 'var' in foreach")
+            r = []
+            for i in items:
+                params = copy.copy(p)
+                params[var] = i
+                r.append(expand_item(params, c["command"]))
+            return r
+        elif "list" in c and "index" in c and "command" in c:
+            # extract a single item from a list
+            var, items = var_items(p, c, "list")
+            if var is None:
+                raise EvaluationError("Must specify 'var' in list")
+            params = copy.copy(p)
+            params[var] = items[int(c["index"])]
+            return expand_item(params, c["command"])
+        elif "regex" in c:
+            pattern = re.compile(c["regex"])
+            if "filter" in c:
+                # filter list so that it only includes items that match a
+                # regular expression
+                _, items = var_items(p, c, "filter")
+                return [i for i in items if pattern.match(i)]
+            elif "group" in c:
+                # generate a list of lists, where items are grouped on common
+                # subexpression match
+                _, items = var_items(p, c, "group")
+                groups = {}
+                for i in items:
+                    match = pattern.match(i)
+                    if match:
+                        add_to_group(groups, match)
+                return [groups[k] for k in groups]
+            elif "extract" in c:
+                # generate a list of lists, where items are split by
+                # subexpression match
+                _, items = var_items(p, c, "extract")
+                r = []
+                for i in items:
+                    match = pattern.match(i)
+                    if match:
+                        r.append(list(match.groups()))
+                return r
+        elif "batch" in c and "size" in c:
+            # generate a list of lists, where items are split into a batch size
+            _, items = var_items(p, c, "batch")
+            sz = int(c["size"])
+            r = []
+            for j in xrange(0, len(items), sz):
+                r.append(items[j:j+sz])
+            return r
+        raise EvaluationError("Missing valid list context function")
+    elif isinstance(c, list):
+        return [expand_item(p, arg) for arg in c]
+    elif isinstance(c, basestring):
+        m = re.match("^\$\((.*)\)$", c)
+        if m and m.group(1) in p:
+            return expand_item(p, p[m.group(1)])
+        else:
+            return subst.do_substitution(p, c)
+    else:
+        raise EvaluationError("expand_item() unexpected parameter type %s" % type(c))
+
+# Evaluate in a list context
+# "p" is the parameter scope, "value" will be evaluated
+# if "value" is a list after expansion, return that
+# if "value" is a path to a directory, return a list consisting of each entry in the directory
+# if "value" is a path to a file, return a list consisting of each line of the file
+def get_items(p, value):
+    value = expand_item(p, value)
+    if isinstance(value, list):
+        return value
+    elif isinstance(value, basestring):
+        mode = os.stat(value).st_mode
+        prefix = value[len(os.environ['TASK_KEEPMOUNT'])+1:]
+        if mode is not None:
+            if stat.S_ISDIR(mode):
+                items = [os.path.join(value, l) for l in os.listdir(value)]
+            elif stat.S_ISREG(mode):
+                with open(value) as f:
+                    items = [line.rstrip("\r\n") for line in f]
+            return items
+    raise EvaluationError("get_items did not yield a list")
+
+stdoutname = None
+stdoutfile = None
+stdinname = None
+stdinfile = None
+
+# Construct the cross product of all values of each variable listed in fvars
+def recursive_foreach(params, fvars):
+    var = fvars[0]
+    fvars = fvars[1:]
+    items = get_items(params, params[var])
+    logger.info("parallelizing on %s with items %s" % (var, items))
+    if items is not None:
+        for i in items:
+            params = copy.copy(params)
+            params[var] = i
+            if len(fvars) > 0:
+                recursive_foreach(params, fvars)
+            else:
+                if not args.dry_run:
+                    arvados.api().job_tasks().create(body={
+                        'job_uuid': arvados.current_job()['uuid'],
+                        'created_by_job_task_uuid': arvados.current_task()['uuid'],
+                        'sequence': 1,
+                        'parameters': params
+                    }).execute()
+                else:
+                    if isinstance(params["command"][0], list):
+                        for c in params["command"]:
+                            logger.info(flatten(expand_item(params, c)))
+                    else:
+                        logger.info(flatten(expand_item(params, params["command"])))
+    else:
+        logger.error("parameter %s with value %s in task.foreach yielded no items" % (var, params[var]))
+        sys.exit(1)
+
+try:
+    if "task.foreach" in jobp:
+        if args.dry_run or arvados.current_task()['sequence'] == 0:
+            # This is the first task to start the other tasks and exit
+            fvars = jobp["task.foreach"]
+            if isinstance(fvars, basestring):
+                fvars = [fvars]
+            if not isinstance(fvars, list) or len(fvars) == 0:
+                logger.error("value of task.foreach must be a string or non-empty list")
+                sys.exit(1)
+            recursive_foreach(jobp, jobp["task.foreach"])
+            if not args.dry_run:
+                if "task.vwd" in jobp:
+                    # Set output of the first task to the base vwd collection so it
+                    # will be merged with output fragments from the other tasks by
+                    # crunch.
+                    arvados.current_task().set_output(subst.do_substitution(jobp, jobp["task.vwd"]))
+                else:
+                    arvados.current_task().set_output(None)
+            sys.exit(0)
+    else:
+        # This is the only task so taskp/jobp are the same
+        taskp = jobp
+except Exception as e:
+    logger.exception("caught exception")
+    logger.error("job parameters were:")
+    logger.error(pprint.pformat(jobp))
+    sys.exit(1)
+
+try:
+    if not args.dry_run:
+        if "task.vwd" in taskp:
+            # Populate output directory with symlinks to files in collection
+            vwd.checkout(subst.do_substitution(taskp, taskp["task.vwd"]), outdir)
+
+        if "task.cwd" in taskp:
+            os.chdir(subst.do_substitution(taskp, taskp["task.cwd"]))
+
+    cmd = []
+    if isinstance(taskp["command"][0], list):
+        for c in taskp["command"]:
+            cmd.append(flatten(expand_item(taskp, c)))
+    else:
+        cmd.append(flatten(expand_item(taskp, taskp["command"])))
+
+    if "task.stdin" in taskp:
+        stdinname = subst.do_substitution(taskp, taskp["task.stdin"])
+        if not args.dry_run:
+            stdinfile = open(stdinname, "rb")
+
+    if "task.stdout" in taskp:
+        stdoutname = subst.do_substitution(taskp, taskp["task.stdout"])
+        if not args.dry_run:
+            stdoutfile = open(stdoutname, "wb")
+
+    if "task.env" in taskp:
+        env = copy.copy(os.environ)
+        for k,v in taskp["task.env"].items():
+            env[k] = subst.do_substitution(taskp, v)
+    else:
+        env = None
+
+    logger.info("{}{}{}".format(' | '.join([' '.join(c) for c in cmd]), (" < " + stdinname) if stdinname is not None else "", (" > " + stdoutname) if stdoutname is not None else ""))
+
+    if args.dry_run:
+        sys.exit(0)
+except subst.SubstitutionError as e:
+    logger.error(str(e))
+    logger.error("task parameters were:")
+    logger.error(pprint.pformat(taskp))
+    sys.exit(1)
+except Exception as e:
+    logger.exception("caught exception")
+    logger.error("task parameters were:")
+    logger.error(pprint.pformat(taskp))
+    sys.exit(1)
+
+# rcode holds the return codes produced by each subprocess
+rcode = {}
+try:
+    subprocesses = []
+    close_streams = []
+    if stdinfile:
+        close_streams.append(stdinfile)
+    next_stdin = stdinfile
+
+    for i in xrange(len(cmd)):
+        if i == len(cmd)-1:
+            # this is the last command in the pipeline, so its stdout should go to stdoutfile
+            next_stdout = stdoutfile
+        else:
+            # this is an intermediate command in the pipeline, so its stdout should go to a pipe
+            next_stdout = subprocess.PIPE
+
+        sp = subprocess.Popen(cmd[i], shell=False, stdin=next_stdin, stdout=next_stdout, env=env)
+
+        # Need to close the FDs on our side so that subcommands will get SIGPIPE if the
+        # consuming process ends prematurely.
+        if sp.stdout:
+            close_streams.append(sp.stdout)
+
+        # Send this processes's stdout to to the next process's stdin
+        next_stdin = sp.stdout
+
+        subprocesses.append(sp)
+
+    # File descriptors have been handed off to the subprocesses, so close them here.
+    for s in close_streams:
+        s.close()
+
+    # Set up signal handling
+    sig = SigHandler()
+
+    # Forward terminate signals to the subprocesses.
+    signal.signal(signal.SIGINT, lambda signum, frame: sig.send_signal(subprocesses, signum))
+    signal.signal(signal.SIGTERM, lambda signum, frame: sig.send_signal(subprocesses, signum))
+    signal.signal(signal.SIGQUIT, lambda signum, frame: sig.send_signal(subprocesses, signum))
+
+    active = 1
+    pids = set([s.pid for s in subprocesses])
+    while len(pids) > 0:
+        try:
+            (pid, status) = os.wait()
+        except OSError as e:
+            if e.errno == errno.EINTR:
+                pass
+            else:
+                raise
+        else:
+            pids.discard(pid)
+            if not taskp.get("task.ignore_rcode"):
+                rcode[pid] = (status >> 8)
+            else:
+                rcode[pid] = 0
+
+    if sig.sig is not None:
+        logger.critical("terminating on signal %s" % sig.sig)
+        sys.exit(2)
+    else:
+        for i in xrange(len(cmd)):
+            r = rcode[subprocesses[i].pid]
+            logger.info("%s completed with exit code %i (%s)" % (cmd[i][0], r, "success" if r == 0 else "failed"))
+
+except Exception as e:
+    logger.exception("caught exception")
+
+# restore default signal handlers.
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+signal.signal(signal.SIGTERM, signal.SIG_DFL)
+signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+
+logger.info("the following output files will be saved to keep:")
+
+subprocess.call(["find", "-L", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"], stdout=sys.stderr, cwd=outdir)
+
+logger.info("start writing output to keep")
+
+if "task.vwd" in taskp and "task.foreach" in jobp:
+    for root, dirs, files in os.walk(outdir):
+        for f in files:
+            s = os.lstat(os.path.join(root, f))
+            if stat.S_ISLNK(s.st_mode):
+                os.unlink(os.path.join(root, f))
+
+(outcollection, checkin_error) = vwd.checkin(outdir)
+
+# Success if we ran any subprocess, and they all exited 0.
+success = rcode and all(status == 0 for status in rcode.itervalues()) and not checkin_error
+
+api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                     body={
+                                         'output': outcollection.manifest_text(),
+                                         'success': success,
+                                         'progress':1.0
+                                     }).execute()
+
+sys.exit(0 if success else 1)
diff --git a/crunch_scripts/split-fastq.py b/crunch_scripts/split-fastq.py
new file mode 100755 (executable)
index 0000000..61c384f
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import re
+import hashlib
+import string
+
+api = arvados.api('v1')
+
+piece = 0
+manifest_text = ""
+
+# Look for paired reads
+
+inp = arvados.CollectionReader(arvados.getjobparam('reads'))
+
+manifest_list = []
+
+def nextline(reader, start):
+    n = -1
+    while True:
+        r = reader.readfrom(start, 128)
+        if r == '':
+            break
+        n = string.find(r, "\n")
+        if n > -1:
+            break
+        else:
+            start += 128
+    return n
+
+prog = re.compile(r'(.*?)(_[12])?\.fastq(\.gz)?$')
+
+# Look for fastq files
+for s in inp.all_streams():
+    for f in s.all_files():
+        name_pieces = prog.match(f.name())
+        if name_pieces is not None:
+            if s.name() != ".":
+                # The downstream tool (run-command) only iterates over the top
+                # level of directories so if there are fastq files in
+                # directories in the input, the choice is either to forget
+                # there are directories (which might lead to name conflicts) or
+                # just fail.
+                print >>sys.stderr, "fastq must be at the root of the collection"
+                sys.exit(1)
+
+            p = None
+            if name_pieces.group(2) is not None:
+                if name_pieces.group(2) == "_1":
+                    p = [{}, {}]
+                    p[0]["reader"] = s.files()[name_pieces.group(0)]
+                    p[1]["reader"] = s.files()[name_pieces.group(1) + "_2.fastq" + (name_pieces.group(3) if name_pieces.group(3) else '')]
+            else:
+                p = [{}]
+                p[0]["reader"] = s.files()[name_pieces.group(0)]
+
+            if p is not None:
+                for i in xrange(0, len(p)):
+                    m = p[i]["reader"].as_manifest().split()
+                    m[0] = "./_" + str(piece)
+                    manifest_list.append(m)
+                piece += 1
+
+manifest_text = "\n".join(" ".join(m) for m in manifest_list) + "\n"
+
+arvados.current_task().set_output(manifest_text)
diff --git a/crunch_scripts/test/task_output_dir b/crunch_scripts/test/task_output_dir
new file mode 100755 (executable)
index 0000000..8b2c7ce
--- /dev/null
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados.crunch
+import hashlib
+import os
+
+out = arvados.crunch.TaskOutputDir()
+
+string = open(__file__).read()
+with open(os.path.join(out.path, 'example.out'), 'w') as f:
+    f.write(string)
+with open(os.path.join(out.path, 'example.out.SHA1'), 'w') as f:
+    f.write(hashlib.sha1(string).hexdigest() + "\n")
+
+arvados.current_task().set_output(out.manifest_text())
diff --git a/doc/Gemfile b/doc/Gemfile
new file mode 100644 (file)
index 0000000..502be88
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+source 'https://rubygems.org'
+
+gem 'zenweb'
+gem 'liquid'
+gem 'RedCloth'
+gem 'colorize'
diff --git a/doc/Gemfile.lock b/doc/Gemfile.lock
new file mode 100644 (file)
index 0000000..344a0a8
--- /dev/null
@@ -0,0 +1,34 @@
+GEM
+  remote: https://rubygems.org/
+  specs:
+    RedCloth (4.2.9)
+    coderay (1.1.0)
+    colorize (0.6.0)
+    kramdown (1.3.1)
+    less (1.2.21)
+      mutter (>= 0.4.2)
+      treetop (>= 1.4.2)
+    liquid (2.6.1)
+    makerakeworkwell (1.0.3)
+      rake (>= 0.9.2, < 11)
+    mutter (0.5.3)
+    polyglot (0.3.3)
+    rake (10.1.1)
+    treetop (1.4.15)
+      polyglot
+      polyglot (>= 0.3.1)
+    zenweb (3.3.1)
+      coderay (~> 1.0)
+      kramdown (~> 1.0)
+      less (~> 1.2)
+      makerakeworkwell (~> 1.0)
+      rake (>= 0.9, < 11)
+
+PLATFORMS
+  ruby
+
+DEPENDENCIES
+  RedCloth
+  colorize
+  liquid
+  zenweb
diff --git a/doc/README.textile b/doc/README.textile
new file mode 100644 (file)
index 0000000..75a30e9
--- /dev/null
@@ -0,0 +1,74 @@
+###. Copyright (C) The Arvados Authors. All rights reserved.
+....
+.... SPDX-License-Identifier: CC-BY-SA-3.0
+
+h1. Arvados documentation
+
+This is the source code for "doc.arvados.org":http://doc.arvados.org.
+
+Here's how to build the HTML pages locally so you can preview your updates before you commit and push.
+
+Additional information is available on the "'Documentation' page on the Arvados wiki":https://dev.arvados.org/projects/arvados/wiki/Documentation.
+
+h2. Install dependencies
+
+<pre>
+arvados/doc$ bundle install
+arvados/doc$ pip install epydoc
+</pre>
+
+h2. Generate HTML pages
+
+<pre>
+arvados/doc$ rake
+</pre>
+
+Alternately, to make the documentation browsable on the local filesystem:
+
+<pre>
+arvados/doc$ rake generate baseurl=$PWD/.site
+</pre>
+
+h2. Run linkchecker
+
+If you have "Linkchecker":http://wummel.github.io/linkchecker/ installed on
+your system, you can run it against the documentation:
+
+<pre>
+arvados/doc$ rake linkchecker baseurl=file://$PWD/.site
+</pre>
+
+Please note that this will regenerate your $PWD/.site directory.
+
+h2. Preview HTML pages
+
+<pre>
+arvados/doc$ rake run
+[2014-03-10 09:03:41] INFO  WEBrick 1.3.1
+[2014-03-10 09:03:41] INFO  ruby 2.1.1 (2014-02-24) [x86_64-linux]
+[2014-03-10 09:03:41] INFO  WEBrick::HTTPServer#start: pid=8926 port=8000
+</pre>
+
+Preview the rendered pages at "http://localhost:8000":http://localhost:8000.
+
+h2. Publish HTML pages inside Workbench
+
+(or some other web site)
+
+You can set @baseurl@ (the URL prefix for all internal links), @arvados_cluster_uuid@, @arvados_api_host@ and @arvados_workbench_host@ without changing @_config.yml@:
+
+<pre>
+arvados/doc$ rake generate baseurl=/doc arvados_api_host=xyzzy.arvadosapi.com
+</pre>
+
+Make the docs appear at {workbench_host}/doc by creating a symbolic link in Workbench's @public@ directory, pointing to the generated HTML tree.
+
+<pre>
+arvados/doc$ ln -sn ../../../doc/.site ../apps/workbench/public/doc
+</pre>
+
+h2. Delete generated files
+
+<pre>
+arvados/doc$ rake realclean
+</pre>
diff --git a/doc/Rakefile b/doc/Rakefile
new file mode 100644 (file)
index 0000000..9deca3a
--- /dev/null
@@ -0,0 +1,112 @@
+#!/usr/bin/env rake
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+require "rubygems"
+require "colorize"
+
+task :generate => [ :realclean, 'sdk/python/arvados/index.html', 'sdk/R/arvados/index.html' ] do
+  vars = ['baseurl', 'arvados_cluster_uuid', 'arvados_api_host', 'arvados_workbench_host']
+  vars.each do |v|
+    if ENV[v]
+      website.config.h[v] = ENV[v]
+    end
+  end
+end
+
+file "sdk/python/arvados/index.html" do |t|
+  `which epydoc`
+  if $? == 0
+    STDERR.puts `epydoc --html --parse-only -o sdk/python/arvados ../sdk/python/arvados/ 2>&1`
+    raise if $? != 0
+  else
+    puts "Warning: epydoc not found, Python documentation will not be generated".colorize(:light_red)
+  end
+end
+
+file "sdk/R/arvados/index.html" do |t|
+  `which R`
+  if $? == 0
+    tgt = Dir.pwd
+    Dir.mkdir("sdk/R")
+    Dir.mkdir("sdk/R/arvados")
+    docfiles = []
+    Dir.chdir("../sdk/R/") do
+      STDERR.puts `Rscript createDoc.R README.Rmd #{tgt}/sdk/R/README.md 2>&1`
+      Dir.entries("man").each do |rd|
+        if rd[-3..-1] == ".Rd"
+          htmlfile = "#{rd[0..-4]}.html"
+          `R CMD Rdconv -t html man/#{rd} > #{tgt}/sdk/R/arvados/#{htmlfile}`
+          docfiles << htmlfile
+        end
+      end
+    end
+    raise if $? != 0
+
+    File.open("sdk/R/README.md", "r") do |rd|
+    File.open("sdk/R/index.html.md", "w") do |fn|
+      fn.write(<<-EOF
+---
+layout: default
+navsection: sdk
+navmenu: R
+title: "R SDK Overview"
+...
+
+#{rd.read.gsub(/^```$/, "~~~").gsub(/^```(\w)$/, "~~~\\1")}
+EOF
+              )
+      end
+    end
+
+    File.open("sdk/R/arvados/index.html.textile.liquid", "w") do |fn|
+      fn.write(<<-EOF
+---
+layout: default
+navsection: sdk
+navmenu: R
+title: "R Reference"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+EOF
+              )
+
+      docfiles.sort.each do |d|
+        fn.write("* \"#{d[0..-6]}\":#{d}\n")
+      end
+
+    end
+  else
+    puts "Warning: R not found, R documentation will not be generated".colorize(:light_red)
+  end
+end
+
+task :linkchecker => [ :generate ] do
+  Dir.chdir(".site") do
+    `which linkchecker`
+    if $? == 0
+      system "linkchecker index.html --ignore-url='!file://'" or exit $?.exitstatus
+    else
+      puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
+    end
+  end
+end
+
+task :clean do
+  rm_rf "sdk/python/arvados"
+  rm_rf "sdk/R"
+end
+
+require "zenweb/tasks"
+load "zenweb-textile.rb"
+load "zenweb-liquid.rb"
+
+task :extra_wirings do
+  $website.pages["sdk/python/python.html.textile.liquid"].depends_on("sdk/python/arvados/index.html")
+end
diff --git a/doc/_config.yml b/doc/_config.yml
new file mode 100644 (file)
index 0000000..1e17d04
--- /dev/null
@@ -0,0 +1,217 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# baseurl is the location of the generated site from the browser's
+# perspective (e.g., http://doc.arvados.org or
+# file:///tmp/arvados/doc/.site). To make docs show up inside
+# workbench, use /doc here and add a symlink at
+# apps/workbench/public/doc pointing to ../../../doc/.site
+# You can also set these on the command line:
+# $ rake generate baseurl=/example arvados_api_host=example.com
+
+baseurl:
+arvados_api_host: localhost
+arvados_cluster_uuid: local
+arvados_workbench_host: http://localhost
+
+exclude: ["Rakefile", "tmp", "vendor"]
+
+navbar:
+  userguide:
+    - Welcome:
+      - user/index.html.textile.liquid
+      - user/getting_started/community.html.textile.liquid
+    - Run a workflow using Workbench:
+      - user/getting_started/workbench.html.textile.liquid
+      - user/tutorials/tutorial-workflow-workbench.html.textile.liquid
+      - user/composer/composer.html.textile.liquid
+    - Access an Arvados virtual machine:
+      - user/getting_started/vm-login-with-webshell.html.textile.liquid
+      - user/getting_started/ssh-access-unix.html.textile.liquid
+      - user/getting_started/ssh-access-windows.html.textile.liquid
+      - user/getting_started/check-environment.html.textile.liquid
+      - user/reference/api-tokens.html.textile.liquid
+    - Working with data sets:
+      - user/tutorials/tutorial-keep.html.textile.liquid
+      - user/tutorials/tutorial-keep-get.html.textile.liquid
+      - user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid
+      - user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid
+      - user/tutorials/tutorial-keep-mount-windows.html.textile.liquid
+      - user/topics/keep.html.textile.liquid
+      - user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid
+      - user/topics/arv-copy.html.textile.liquid
+      - user/topics/storage-classes.html.textile.liquid
+      - user/topics/collection-versioning.html.textile.liquid
+    - Running workflows at the command line:
+      - user/cwl/cwl-runner.html.textile.liquid
+      - user/cwl/cwl-run-options.html.textile.liquid
+    - Working with git repositories:
+      - user/tutorials/add-new-repository.html.textile.liquid
+      - user/tutorials/git-arvados-guide.html.textile.liquid
+    - Develop an Arvados workflow:
+      - user/tutorials/intro-crunch.html.textile.liquid
+      - user/tutorials/writing-cwl-workflow.html.textile.liquid
+      - user/cwl/federated-workflows.html.textile.liquid
+      - user/cwl/cwl-style.html.textile.liquid
+      - user/cwl/cwl-extensions.html.textile.liquid
+      - user/topics/arv-docker.html.textile.liquid
+    - Reference:
+      - user/topics/link-accounts.html.textile.liquid
+      - user/reference/cookbook.html.textile.liquid
+    - Arvados License:
+      - user/copying/copying.html.textile.liquid
+      - user/copying/agpl-3.0.html
+      - user/copying/LICENSE-2.0.html
+      - user/copying/by-sa-3.0.html
+    - Obsolete documentation:
+      - user/topics/running-pipeline-command-line.html.textile.liquid
+      - user/topics/arv-run.html.textile.liquid
+      - user/tutorials/running-external-program.html.textile.liquid
+      - user/topics/crunch-tools-overview.html.textile.liquid
+      - user/tutorials/tutorial-firstscript.html.textile.liquid
+      - user/tutorials/tutorial-submit-job.html.textile.liquid
+      - user/topics/tutorial-parallel.html.textile.liquid
+      - user/topics/run-command.html.textile.liquid
+      - user/reference/job-pipeline-ref.html.textile.liquid
+      - user/examples/crunch-examples.html.textile.liquid
+      - user/topics/tutorial-trait-search.html.textile.liquid
+  sdk:
+    - Overview:
+      - sdk/index.html.textile.liquid
+    - Python:
+      - sdk/python/sdk-python.html.textile.liquid
+      - sdk/python/example.html.textile.liquid
+      - sdk/python/python.html.textile.liquid
+      - sdk/python/arvados-fuse.html.textile.liquid
+      - sdk/python/events.html.textile.liquid
+      - sdk/python/cookbook.html.textile.liquid
+      - sdk/python/crunch-utility-libraries.html.textile.liquid
+    - CLI:
+      - sdk/cli/install.html.textile.liquid
+      - sdk/cli/index.html.textile.liquid
+      - sdk/cli/reference.html.textile.liquid
+      - sdk/cli/subcommands.html.textile.liquid
+    - Go:
+      - sdk/go/index.html.textile.liquid
+      - sdk/go/example.html.textile.liquid
+    - R:
+      - sdk/R/index.html.md
+      - sdk/R/arvados/index.html.textile.liquid
+    - Perl:
+      - sdk/perl/index.html.textile.liquid
+      - sdk/perl/example.html.textile.liquid
+    - Ruby:
+      - sdk/ruby/index.html.textile.liquid
+      - sdk/ruby/example.html.textile.liquid
+    - Java:
+      - sdk/java/index.html.textile.liquid
+      - sdk/java/example.html.textile.liquid
+  api:
+    - Concepts:
+      - api/index.html.textile.liquid
+      - api/tokens.html.textile.liquid
+      - api/requests.html.textile.liquid
+      - api/methods.html.textile.liquid
+      - api/resources.html.textile.liquid
+    - Permission and authentication:
+      - api/methods/api_client_authorizations.html.textile.liquid
+      - api/methods/api_clients.html.textile.liquid
+      - api/methods/authorized_keys.html.textile.liquid
+      - api/methods/groups.html.textile.liquid
+      - api/methods/users.html.textile.liquid
+    - System resources:
+      - api/methods/keep_services.html.textile.liquid
+      - api/methods/links.html.textile.liquid
+      - api/methods/logs.html.textile.liquid
+      - api/methods/nodes.html.textile.liquid
+      - api/methods/virtual_machines.html.textile.liquid
+      - api/methods/keep_disks.html.textile.liquid
+    - Data management:
+      - api/methods/collections.html.textile.liquid
+      - api/methods/repositories.html.textile.liquid
+    - Container engine:
+      - api/methods/container_requests.html.textile.liquid
+      - api/methods/containers.html.textile.liquid
+      - api/methods/workflows.html.textile.liquid
+    - Jobs engine (deprecated):
+      - api/crunch-scripts.html.textile.liquid
+      - api/methods/jobs.html.textile.liquid
+      - api/methods/job_tasks.html.textile.liquid
+      - api/methods/pipeline_instances.html.textile.liquid
+      - api/methods/pipeline_templates.html.textile.liquid
+    - Metadata for bioinformatics:
+      - api/methods/humans.html.textile.liquid
+      - api/methods/specimens.html.textile.liquid
+      - api/methods/traits.html.textile.liquid
+  architecture:
+    - Topics:
+      - architecture/index.html.textile.liquid
+      - api/storage.html.textile.liquid
+      - api/execution.html.textile.liquid
+      - api/permission-model.html.textile.liquid
+      - architecture/federation.html.textile.liquid
+  admin:
+    - Topics:
+      - admin/index.html.textile.liquid
+    - Upgrading and migrations:
+      - admin/upgrading.html.textile.liquid
+      - install/migrate-docker19.html.textile.liquid
+      - admin/upgrade-crunch2.html.textile.liquid
+    - Users and Groups:
+      - install/cheat_sheet.html.textile.liquid
+      - admin/activation.html.textile.liquid
+      - admin/merge-remote-account.html.textile.liquid
+      - admin/migrating-providers.html.textile.liquid
+      - user/topics/arvados-sync-groups.html.textile.liquid
+    - Monitoring:
+      - admin/health-checks.html.textile.liquid
+      - admin/metrics.html.textile.liquid
+      - admin/management-token.html.textile.liquid
+    - Cloud:
+      - admin/storage-classes.html.textile.liquid
+      - admin/spot-instances.html.textile.liquid
+    - Other:
+      - admin/collection-versioning.html.textile.liquid
+      - admin/federation.html.textile.liquid
+  installguide:
+    - Overview:
+      - install/index.html.textile.liquid
+    - Docker quick start:
+      - install/arvbox.html.textile.liquid
+    - Arvados on Kubernetes:
+      - install/arvados-on-kubernetes.html.textile.liquid
+    - Manual installation:
+      - install/install-manual-prerequisites.html.textile.liquid
+      - install/install-components.html.textile.liquid
+    - Core:
+      - install/install-postgresql.html.textile.liquid
+      - install/install-api-server.html.textile.liquid
+      - install/install-controller.html.textile.liquid
+    - Keep:
+      - install/install-keepstore.html.textile.liquid
+      - install/configure-fs-storage.html.textile.liquid
+      - install/configure-s3-object-storage.html.textile.liquid
+      - install/configure-azure-blob-storage.html.textile.liquid
+      - install/install-keepproxy.html.textile.liquid
+      - install/install-keep-web.html.textile.liquid
+      - install/install-keep-balance.html.textile.liquid
+    - User interface:
+      - install/install-sso.html.textile.liquid
+      - install/install-workbench-app.html.textile.liquid
+      - install/install-composer.html.textile.liquid
+    - Additional services:
+      - install/install-ws.html.textile.liquid
+      - install/install-shell-server.html.textile.liquid
+      - install/install-arv-git-httpd.html.textile.liquid
+    - Containers API support on SLURM:
+      - install/crunch2-slurm/install-prerequisites.html.textile.liquid
+      - install/crunch2-slurm/install-slurm.html.textile.liquid
+      - install/crunch2-slurm/install-compute-node.html.textile.liquid
+      - install/crunch2-slurm/install-dispatch.html.textile.liquid
+      - install/crunch2-slurm/install-test.html.textile.liquid
+      - install/install-nodemanager.html.textile.liquid
+      - install/install-compute-ping.html.textile.liquid
+    - Jobs API support (deprecated):
+      - install/install-crunch-dispatch.html.textile.liquid
+      - install/install-compute-node.html.textile.liquid
diff --git a/doc/_includes/_0_filter_py.liquid b/doc/_includes/_0_filter_py.liquid
new file mode 100644 (file)
index 0000000..ff055db
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# Import the Arvados sdk module
+import arvados
+
+# Get information about the task from the environment
+this_task = arvados.current_task()
+
+this_task_input = arvados.current_job()['script_parameters']['input']
+
+# Create the object access to the collection referred to in the input
+collection = arvados.CollectionReader(this_task_input)
+
+# Create an object to write a new collection as output
+out = arvados.CollectionWriter()
+
+# Create a new file in the output collection
+with out.open('0-filter.txt') as out_file:
+    # Iterate over every input file in the input collection
+    for input_file in collection.all_files():
+        # Output every line in the file that starts with '0'
+        out_file.writelines(line for line in input_file if line.startswith('0'))
+
+# Commit the output to Keep.
+output_locator = out.finish()
+
+# Use the resulting locator as the output for this task.
+this_task.set_output(output_locator)
+
+# Done!
diff --git a/doc/_includes/_alert-incomplete.liquid b/doc/_includes/_alert-incomplete.liquid
new file mode 100644 (file)
index 0000000..8a62ec7
--- /dev/null
@@ -0,0 +1,11 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Hi!</h4>
+  <P>This section is incomplete. Please be patient with us as we fill in the blanks &mdash; or <A href="https://dev.arvados.org/projects/arvados/wiki/Documentation#Contributing">contribute to the documentation project.</A></P>
+</div>
diff --git a/doc/_includes/_alert_stub.liquid b/doc/_includes/_alert_stub.liquid
new file mode 100644 (file)
index 0000000..dd56f17
--- /dev/null
@@ -0,0 +1,11 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Hi!</h4>
+  <p>This section is incomplete. Please be patient with us as we fill in the blanks &mdash; or <A href="https://dev.arvados.org/projects/arvados/wiki/Documentation#Contributing">contribute to the documentation project.</A></p>
+</div>
diff --git a/doc/_includes/_arv_copy_expectations.liquid b/doc/_includes/_arv_copy_expectations.liquid
new file mode 100644 (file)
index 0000000..2231b06
--- /dev/null
@@ -0,0 +1,12 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+As stated above, arv-copy is recursive by default and requires a working git repository in the destination cluster. If you do not have a repository created, you can follow the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *tutorial* repository created in that page as the example.
+
+<br/>In addition, arv-copy requires git when copying to a git repository. Please make sure that git is installed and available.
+
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_arv_run_redirection.liquid b/doc/_includes/_arv_run_redirection.liquid
new file mode 100644 (file)
index 0000000..663de0b
--- /dev/null
@@ -0,0 +1,27 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/3229739b505d2b878b62aed09895a55a+142</span>
+$ <span class="userinput">ls *.fastq</span>
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC \< *.fastq \> output.txt</span>
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq > output.txt
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq > output.txt
+ 2 stderr run-command: completed with exit code 0 (success)
+ 2 stderr run-command: the following output files will be saved to keep:
+ 2 stderr run-command: 121 ./output.txt
+ 2 stderr run-command: start writing output to keep
+ 1 stderr run-command: completed with exit code 0 (success)
+ 1 stderr run-command: the following output files will be saved to keep:
+ 1 stderr run-command: 363 ./output.txt
+ 1 stderr run-command: start writing output to keep
+ 2 stderr upload wrote 121 total 121
+ 1 stderr upload wrote 363 total 363
+[..]
+</pre>
+</notextile>
diff --git a/doc/_includes/_compute_ping_rb.liquid b/doc/_includes/_compute_ping_rb.liquid
new file mode 100644 (file)
index 0000000..c0b21cd
--- /dev/null
@@ -0,0 +1,290 @@
+#!/usr/bin/env ruby
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+require 'rubygems'
+
+require 'cgi'
+require 'fileutils'
+require 'json'
+require 'net/https'
+require 'socket'
+require 'syslog'
+
+class ComputeNodePing
+  @@NODEDATA_DIR = "/var/tmp/arv-node-data"
+  @@PUPPET_CONFFILE = "/etc/puppet/puppet.conf"
+  @@HOST_STATEFILE = "/var/run/arvados-compute-ping-hoststate.json"
+
+  def initialize(args, stdout, stderr)
+    @stdout = stdout
+    @stderr = stderr
+    @stderr_loglevel = ((args.first == "quiet") ?
+                        Syslog::LOG_ERR : Syslog::LOG_DEBUG)
+    @puppet_disabled = false
+    @syslog = Syslog.open("arvados-compute-ping",
+                          Syslog::LOG_CONS | Syslog::LOG_PID,
+                          Syslog::LOG_DAEMON)
+    @puppetless = File.exist?('/compute-node.puppetless')
+
+    begin
+      prepare_ping
+      load_puppet_conf unless @puppetless
+      begin
+        @host_state = JSON.parse(IO.read(@@HOST_STATEFILE))
+      rescue Errno::ENOENT
+        @host_state = nil
+      end
+    rescue
+      @syslog.close
+      raise
+    end
+  end
+
+  def send
+    pong = send_raw_ping
+
+    if pong["hostname"] and pong["domain"] and pong["first_ping_at"]
+      if @host_state.nil?
+        @host_state = {
+          "fqdn" => (Socket.gethostbyname(Socket.gethostname).first rescue nil),
+          "resumed_slurm" =>
+            ["busy", "idle"].include?(pong["crunch_worker_state"]),
+        }
+        update_host_state({})
+      end
+
+      if hostname_changed?(pong)
+        disable_puppet unless @puppetless
+        rename_host(pong)
+        update_host_state("fqdn" => fqdn_from_pong(pong),
+                          "resumed_slurm" => false)
+      end
+
+      unless @host_state["resumed_slurm"]
+        run_puppet_agent unless @puppetless
+        resume_slurm_node(pong["hostname"])
+        update_host_state("resumed_slurm" => true)
+      end
+    end
+
+    log("Last ping at #{pong['last_ping_at']}")
+  end
+
+  def cleanup
+    enable_puppet if @puppet_disabled and not @puppetless
+    @syslog.close
+  end
+
+  private
+
+  def log(message, level=Syslog::LOG_INFO)
+    @syslog.log(level, message)
+    if level <= @stderr_loglevel
+      @stderr.write("#{Time.now.strftime("%Y-%m-%d %H:%M:%S")} #{message}\n")
+    end
+  end
+
+  def abort(message, code=1)
+    log(message, Syslog::LOG_ERR)
+    exit(code)
+  end
+
+  def run_and_check(cmd_a, accept_codes, io_opts, &block)
+    result = IO.popen(cmd_a, "r", io_opts, &block)
+    unless accept_codes.include?($?.exitstatus)
+      abort("#{cmd_a} exited #{$?.exitstatus}")
+    end
+    result
+  end
+
+  DEFAULT_ACCEPT_CODES=[0]
+  def check_output(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
+    # Run a command, check the exit status, and return its stdout as a string.
+    run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
+      pipe.read
+    end
+  end
+
+  def check_command(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
+    # Run a command, send stdout to syslog, and check the exit status.
+    run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
+      pipe.each_line do |line|
+        line.chomp!
+        log("#{cmd_a.first}: #{line}") unless line.empty?
+      end
+    end
+  end
+
+  def replace_file(path, body)
+    open(path, "w") { |f| f.write(body) }
+  end
+
+  def update_host_state(updates_h)
+    @host_state.merge!(updates_h)
+    replace_file(@@HOST_STATEFILE, @host_state.to_json)
+  end
+
+  def disable_puppet
+    check_command(["puppet", "agent", "--disable"])
+    @puppet_disabled = true
+    loop do
+      # Wait for any running puppet agents to finish.
+      check_output(["pgrep", "puppet"], 0..1)
+      break if $?.exitstatus == 1
+      sleep(1)
+    end
+  end
+
+  def enable_puppet
+    check_command(["puppet", "agent", "--enable"])
+    @puppet_disabled = false
+  end
+
+  def prepare_ping
+    begin
+      ping_uri_s = File.read(File.join(@@NODEDATA_DIR, "arv-ping-url"))
+    rescue Errno::ENOENT
+      abort("ping URL file is not present yet, skipping run")
+    end
+
+    ping_uri = URI.parse(ping_uri_s)
+    payload_h = CGI.parse(ping_uri.query)
+
+    # Collect all extra data to be sent
+    dirname = File.join(@@NODEDATA_DIR, "meta-data")
+    Dir.open(dirname).each do |basename|
+      filename = File.join(dirname, basename)
+      if File.file?(filename)
+        payload_h[basename.gsub('-', '_')] = File.read(filename).chomp
+      end
+    end
+
+    ping_uri.query = nil
+    @ping_req = Net::HTTP::Post.new(ping_uri.to_s)
+    @ping_req.set_form_data(payload_h)
+    @ping_client = Net::HTTP.new(ping_uri.host, ping_uri.port)
+    @ping_client.use_ssl = ping_uri.scheme == 'https'
+  end
+
+  def send_raw_ping
+    begin
+      response = @ping_client.start do |http|
+        http.request(@ping_req)
+      end
+      if response.is_a? Net::HTTPSuccess
+        pong = JSON.parse(response.body)
+      else
+        raise "response was a #{response}"
+      end
+    rescue JSON::ParserError => error
+      abort("Error sending ping: could not parse JSON response: #{error}")
+    rescue => error
+      abort("Error sending ping: #{error}")
+    end
+
+    replace_file(File.join(@@NODEDATA_DIR, "pong.json"), response.body)
+    if pong["errors"] then
+      log(pong["errors"].join("; "), Syslog::LOG_ERR)
+      if pong["errors"].grep(/Incorrect ping_secret/).any?
+        system("halt")
+      end
+      exit(1)
+    end
+    pong
+  end
+
+  def load_puppet_conf
+    # Parse Puppet configuration suitable for rewriting.
+    # Save certnames in @puppet_certnames.
+    # Save other functional configuration lines in @puppet_conf.
+    @puppet_conf = []
+    @puppet_certnames = []
+    open(@@PUPPET_CONFFILE, "r") do |conffile|
+      conffile.each_line do |line|
+        key, value = line.strip.split(/\s*=\s*/, 2)
+        if key == "certname"
+          @puppet_certnames << value
+        elsif not (key.nil? or key.empty? or key.start_with?("#"))
+          @puppet_conf << line
+        end
+      end
+    end
+  end
+
+  def fqdn_from_pong(pong)
+    "#{pong['hostname']}.#{pong['domain']}"
+  end
+
+  def certname_from_pong(pong)
+    fqdn = fqdn_from_pong(pong).sub(".", ".compute.")
+    "#{pong['first_ping_at'].gsub(':', '-').downcase}.#{fqdn}"
+  end
+
+  def hostname_changed?(pong)
+    if @puppetless
+      (@host_state["fqdn"] != fqdn_from_pong(pong))
+    else
+      (@host_state["fqdn"] != fqdn_from_pong(pong)) or
+        (@puppet_certnames != [certname_from_pong(pong)])
+    end
+  end
+
+  def rename_host(pong)
+    new_fqdn = fqdn_from_pong(pong)
+    log("Renaming host from #{@host_state["fqdn"]} to #{new_fqdn}")
+
+    replace_file("/etc/hostname", "#{new_fqdn.split('.', 2).first}\n")
+    check_output(["hostname", new_fqdn])
+
+    ip_address = check_output(["facter", "ipaddress"]).chomp
+    esc_address = Regexp.escape(ip_address)
+    check_command(["sed", "-i", "/etc/hosts",
+                   "-e", "s/^#{esc_address}.*$/#{ip_address}\t#{new_fqdn}/"])
+
+    unless @puppetless
+      new_conflines = @puppet_conf + ["\n[agent]\n",
+                                      "certname=#{certname_from_pong(pong)}\n"]
+      replace_file(@@PUPPET_CONFFILE, new_conflines.join(""))
+      FileUtils.remove_entry_secure("/var/lib/puppet/ssl")
+    end
+  end
+
+  def run_puppet_agent
+    log("Running puppet agent")
+    enable_puppet
+    check_command(["puppet", "agent", "--onetime", "--no-daemonize",
+                   "--no-splay", "--detailed-exitcodes",
+                   "--ignorecache", "--no-usecacheonfailure"],
+                  [0, 2], {err: [:child, :out]})
+  end
+
+  def resume_slurm_node(node_name)
+    current_state = check_output(["sinfo", "--noheader", "-o", "%t",
+                                  "-n", node_name]).chomp
+    if %w(down drain drng).include?(current_state)
+      log("Resuming node in SLURM")
+      check_command(["scontrol", "update", "NodeName=#{node_name}",
+                     "State=RESUME"], [0], {err: [:child, :out]})
+    end
+  end
+end
+
+LOCK_DIRNAME = "/var/lock/arvados-compute-node.lock"
+begin
+  Dir.mkdir(LOCK_DIRNAME)
+rescue Errno::EEXIST
+  exit(0)
+end
+
+ping_sender = nil
+begin
+  ping_sender = ComputeNodePing.new(ARGV, $stdout, $stderr)
+  ping_sender.send
+ensure
+  Dir.rmdir(LOCK_DIRNAME)
+  ping_sender.cleanup unless ping_sender.nil?
+end
diff --git a/doc/_includes/_concurrent_hash_script_py.liquid b/doc/_includes/_concurrent_hash_script_py.liquid
new file mode 100644 (file)
index 0000000..2c55298
--- /dev/null
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+import hashlib
+import os
+import arvados
+
+# Jobs consist of one or more tasks.  A task is a single invocation of
+# a crunch script.
+
+# Get the current task
+this_task = arvados.current_task()
+
+# Tasks have a sequence number for ordering.  All tasks
+# with the current sequence number must finish successfully
+# before tasks in the next sequence are started.
+# The first task has sequence number 0
+if this_task['sequence'] == 0:
+    # Get the "input" field from "script_parameters" on the task object
+    job_input = arvados.current_job()['script_parameters']['input']
+
+    # Create a collection reader to read the input
+    cr = arvados.CollectionReader(job_input)
+
+    # Loop over each stream in the collection (a stream is a subset of
+    # files that logically represents a directory)
+    for s in cr.all_streams():
+
+        # Loop over each file in the stream
+        for f in s.all_files():
+
+            # Synthesize a manifest for just this file
+            task_input = f.as_manifest()
+
+            # Set attributes for a new task:
+            # 'job_uuid' the job that this task is part of
+            # 'created_by_job_task_uuid' this task that is creating the new task
+            # 'sequence' the sequence number of the new task
+            # 'parameters' the parameters to be passed to the new task
+            new_task_attrs = {
+                'job_uuid': arvados.current_job()['uuid'],
+                'created_by_job_task_uuid': arvados.current_task()['uuid'],
+                'sequence': 1,
+                'parameters': {
+                    'input':task_input
+                    }
+                }
+
+            # Ask the Arvados API server to create a new task, running the same
+            # script as the parent task specified in 'created_by_job_task_uuid'
+            arvados.api().job_tasks().create(body=new_task_attrs).execute()
+
+    # Now tell the Arvados API server that this task executed successfully,
+    # even though it doesn't have any output.
+    this_task.set_output(None)
+else:
+    # The task sequence was not 0, so it must be a parallel worker task
+    # created by the first task
+
+    # Instead of getting "input" from the "script_parameters" field of
+    # the job object, we get it from the "parameters" field of the
+    # task object
+    this_task_input = this_task['parameters']['input']
+
+    collection = arvados.CollectionReader(this_task_input)
+
+    # There should only be one file in the collection, so get the
+    # first one from the all files iterator.
+    input_file = next(collection.all_files())
+    output_path = os.path.normpath(os.path.join(input_file.stream_name(),
+                                                input_file.name))
+
+    # Everything after this is the same as the first tutorial.
+    digestor = hashlib.new('md5')
+    for buf in input_file.readall():
+        digestor.update(buf)
+
+    out = arvados.CollectionWriter()
+    with out.open('md5sum.txt') as out_file:
+        out_file.write("{} {}\n".format(digestor.hexdigest(), output_path))
+
+    this_task.set_output(out.finish())
+
+# Done!
diff --git a/doc/_includes/_container_runtime_constraints.liquid b/doc/_includes/_container_runtime_constraints.liquid
new file mode 100644 (file)
index 0000000..7e0c8f1
--- /dev/null
@@ -0,0 +1,16 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Runtime constraints
+
+Runtime constraints restrict the container's access to compute resources and the outside world (in addition to its explicitly stated inputs and output).
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Notes|
+|ram|integer|Number of ram bytes to be used to run this process.|Optional. However, a ContainerRequest that is in "Committed" state must provide this.|
+|vcpus|integer|Number of cores to be used to run this process.|Optional. However, a ContainerRequest that is in "Committed" state must provide this.|
+|keep_cache_ram|integer|Number of keep cache bytes to be used to run this process.|Optional.|
+|API|boolean|When set, ARVADOS_API_HOST and ARVADOS_API_TOKEN will be set, and container will have networking enabled to access the Arvados API server.|Optional.|
diff --git a/doc/_includes/_container_scheduling_parameters.liquid b/doc/_includes/_container_scheduling_parameters.liquid
new file mode 100644 (file)
index 0000000..abbe6f4
--- /dev/null
@@ -0,0 +1,15 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Scheduling parameters
+
+Parameters to be passed to the container scheduler (e.g., SLURM) when running a container.
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Notes|
+|partitions|array of strings|The names of one or more compute partitions that may run this container. If not provided, the system will choose where to run the container.|Optional.|
+|preemptible|boolean|If true, the dispatcher will ask for a preemptible cloud node instance (eg: AWS Spot Instance) to run this container.|Optional. Default is false.|
+|max_run_time|integer|Maximum running time (in seconds) that this container will be allowed to run before being cancelled.|Optional. Default is 0 (no limit).|
diff --git a/doc/_includes/_create_superuser_token.liquid b/doc/_includes/_create_superuser_token.liquid
new file mode 100644 (file)
index 0000000..07d8a4a
--- /dev/null
@@ -0,0 +1,14 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+On the <strong>API server</strong>, use the following commands:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd /var/www/arvados-api/current</span>
+$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production bundle exec script/create_superuser_token.rb</span>
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</code></pre>
+</notextile>
diff --git a/doc/_includes/_crunch1only_begin.liquid b/doc/_includes/_crunch1only_begin.liquid
new file mode 100644 (file)
index 0000000..6dc304a
--- /dev/null
@@ -0,0 +1,8 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin_warning' %}
+This section assumes the legacy Jobs API is available. Some newer installations have already disabled the Jobs API in favor of the Containers API.
diff --git a/doc/_includes/_crunch1only_end.liquid b/doc/_includes/_crunch1only_end.liquid
new file mode 100644 (file)
index 0000000..a3f2278
--- /dev/null
@@ -0,0 +1,7 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_example_docker.liquid b/doc/_includes/_example_docker.liquid
new file mode 100644 (file)
index 0000000..2d6335a
--- /dev/null
@@ -0,0 +1,34 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{
+    "name": "Example using R in a custom Docker image",
+    "components": {
+        "Rscript": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "Rscript",
+                    "$(glob $(file $(myscript))/*.r)",
+                    "$(glob $(dir $(mydata))/*.csv)"
+                ],
+                "myscript": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "mydata": {
+                    "required": true,
+                    "dataclass": "Collection"
+                }
+            },
+            "runtime_constraints": {
+                "docker_image": "arvados/jobs-with-r"
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_example_sdk_go.liquid b/doc/_includes/_example_sdk_go.liquid
new file mode 100644 (file)
index 0000000..cc68b5a
--- /dev/null
@@ -0,0 +1,118 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+package main
+
+
+// *******************
+// Import the modules.
+//
+// Our examples don't use keepclient, but they do use fmt and log to
+// display output.
+
+import (
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "log"
+)
+
+func main() {
+
+
+       // ********************************
+       // Set up an API client user agent.
+       //
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatalf("Error setting up arvados client %s", err.Error())
+       }
+
+
+       // *****************************************
+       // Print the full name of the current user.
+       //
+
+       type user struct {
+               // Remember to start each field name with a capital letter,
+               // otherwise it won't get populated by the arvados client because
+               // the field will be invisible to it.
+               Uuid     string `json:"uuid"`
+               FullName string `json:"full_name"`
+       }
+
+       var u user
+       err = arv.Call("GET", "users", "", "current", nil, &u)
+
+       if err != nil {
+               log.Fatalf("error querying current user", err.Error())
+       }
+
+       log.Printf("Logged in as %s (uuid %s)", u.FullName, u.Uuid)
+
+
+       // ********************************************************
+       // Print all fields from the first five collections returned.
+       //
+       // Note that some fields, are not returned by default and have to be
+       // requested. See below for an example.
+
+       var results map[string]interface{}
+
+       params := arvadosclient.Dict{"limit": 5}
+
+       err = arv.List("collections", params, &results)
+       if err != nil {
+               log.Fatalf("error querying collections", err.Error())
+       }
+
+       printArvadosResults(results)
+
+
+       // *********************************************************
+       // Print some fields from the first two collections returned.
+       //
+       // We also print manifest_test, which has to be explicitly requested.
+       //
+
+       collection_fields_wanted := []string{"manifest_text", "owner_uuid", "uuid"}
+       params = arvadosclient.Dict{"limit": 2, "select": collection_fields_wanted}
+
+       err = arv.List("collections", params, &results)
+       if err != nil {
+               log.Fatalf("error querying collections", err.Error())
+       }
+
+       printArvadosResults(results)
+}
+
+
+// A helper method which will print out a result map returned by
+// arvadosclient.
+func printArvadosResults(results map[string]interface{}) {
+       for key, value := range results {
+               // "items", if it exists, holds a map.
+               // So we print it prettily below.
+               if key != "items" {
+                       fmt.Println(key, ":", value)
+               }
+       }
+
+       if value, ok := results["items"]; ok {
+               items := value.([]interface{})
+               for index, item := range items {
+                       fmt.Println("===========  ", index, "  ===========")
+                       item_map := item.(map[string]interface{})
+                       if len(item_map) == 0 {
+                               fmt.Println("item", index, ": empty map")
+                       } else {
+                               for k, v := range item_map {
+                                       fmt.Println(index, k, ":", v)
+                               }
+                       }
+               }
+       }
+}
diff --git a/doc/_includes/_federated_cwl.liquid b/doc/_includes/_federated_cwl.liquid
new file mode 120000 (symlink)
index 0000000..59a629c
--- /dev/null
@@ -0,0 +1 @@
+../user/cwl/federated/federated.cwl
\ No newline at end of file
diff --git a/doc/_includes/_install_compute_docker.liquid b/doc/_includes/_install_compute_docker.liquid
new file mode 100644 (file)
index 0000000..ea3640e
--- /dev/null
@@ -0,0 +1,79 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Install Docker
+
+Compute nodes must have Docker installed to run containers.  This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported).  Follow the "Docker Engine installation documentation":https://docs.docker.com/ for your distribution.
+
+For Debian-based systems, the Arvados package repository includes a backported @docker.io@ package with a known-good version you can install.
+
+h2(#configure_docker_daemon). Configure the Docker daemon
+
+Crunch runs Docker containers with relatively little configuration.  You may need to start the Docker daemon with specific options to make sure these jobs run smoothly in your environment.  This section highlights options that are useful to most installations.  Refer to the "Docker daemon reference":https://docs.docker.com/reference/commandline/daemon/ for complete information about all available options.
+
+The best way to configure these options varies by distribution.
+
+* If you're using our backported @docker.io@ package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker.io@.
+* If you're using another Debian-based package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker@.
+* On Red Hat-based distributions, you can list these options in the @other_args@ setting in @/etc/sysconfig/docker@.
+
+h3. Default ulimits
+
+Docker containers inherit ulimits from the Docker daemon.  However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job.  You may want to increase default limits for compute containers by passing @--default-ulimit@ options to the Docker daemon.  For example, to allow containers to open 10,000 files, set @--default-ulimit nofile=10000:10000@.
+
+h3. DNS
+
+Your containers must be able to resolve the hostname of your API server and any hostnames returned in Keep service records.  If these names are not in public DNS records, you may need to specify a DNS resolver for the containers by setting the @--dns@ address to an IP address of an appropriate nameserver.  You may specify this option more than once to use multiple nameservers.
+
+h2. Configure Linux cgroups accounting
+
+Linux can report what compute resources are used by processes in a specific cgroup or Docker container.  Crunch can use these reports to share that information with users running compute work.  This can help pipeline authors debug and optimize their workflows.
+
+To enable cgroups accounting, you must boot Linux with the command line parameters @cgroup_enable=memory swapaccount=1@.
+
+On Debian-based systems, open the file @/etc/default/grub@ in an editor.  Find where the string @GRUB_CMDLINE_LINUX@ is set.  Add @cgroup_enable=memory swapaccount=1@ to that string.  Save the file and exit the editor.  Then run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo update-grub</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems, run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo grubby --update-kernel=ALL --args='cgroup_enable=memory swapaccount=1'</span>
+</code></pre>
+</notextile>
+
+Finally, reboot the system to make these changes effective.
+
+h2. Create a project for Docker images
+
+Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
+
+<notextile>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$uuid_prefix-tpzed-000000000000000\", \"group_class\":\"project\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
+~$ <span class="userinput">echo "Arvados project uuid is '$project_uuid'"</span>
+~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$project_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF</span>
+</code></pre></notextile>
+
+h2. Download and tag the latest arvados/jobs docker image
+
+In order to start workflows from workbench, there needs to be Docker image tagged @arvados/jobs:latest@. The following command downloads the latest arvados/jobs image from Docker Hub, loads it into Keep, and tags it as 'latest'.  In this example @$project_uuid@ should be the the UUID of the "Arvados Standard Docker Images" project.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-keepdocker --pull arvados/jobs latest --project-uuid $project_uuid</span>
+</code></pre></notextile>
+
+If the image needs to be downloaded from Docker Hub, the command can take a few minutes to complete, depending on available network bandwidth.
diff --git a/doc/_includes/_install_compute_fuse.liquid b/doc/_includes/_install_compute_fuse.liquid
new file mode 100644 (file)
index 0000000..449c32c
--- /dev/null
@@ -0,0 +1,23 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Configure FUSE
+
+FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:
+
+<notextile>
+<pre>
+# Set the maximum number of FUSE mounts allowed to non-root users.
+# The default is 1000.
+#
+#mount_max = 1000
+
+# Allow non-root users to specify the 'allow_other' or 'allow_root'
+# mount options.
+#
+user_allow_other
+</pre>
+</notextile>
diff --git a/doc/_includes/_install_debian_key.liquid b/doc/_includes/_install_debian_key.liquid
new file mode 100644 (file)
index 0000000..75942c7
--- /dev/null
@@ -0,0 +1,10 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+</code></pre>
+</notextile>
diff --git a/doc/_includes/_install_docker_cleaner.liquid b/doc/_includes/_install_docker_cleaner.liquid
new file mode 100644 (file)
index 0000000..5b0e155
--- /dev/null
@@ -0,0 +1,34 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Configure the Docker cleaner
+
+The arvados-docker-cleaner program removes least recently used Docker images as needed to keep disk usage below a configured limit.
+
+{% include 'notebox_begin' %}
+This also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or configure it with @"RemoveStoppedContainers":"never"@.
+{% include 'notebox_end' %}
+
+Create a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.
+
+<notextile>
+<pre><code>{
+    "Quota": "<span class="userinput">10G</span>",
+    "RemoveStoppedContainers": "always"
+}
+</code></pre>
+</notextile>
+
+*Choosing a quota:* Most deployments will want a quota that's at least 10G.  From there, a larger quota can help reduce compute overhead by preventing reloading the same Docker image repeatedly, but will leave less space for other files on the same storage (usually Docker volumes).  Make sure the quota is less than the total space available for Docker images.
+
+Restart the service after updating the configuration file.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+*If you are using a different daemon supervisor,* or if you want to test the daemon in a terminal window, run @arvados-docker-cleaner@. Run @arvados-docker-cleaner --help@ for more configuration options.
diff --git a/doc/_includes/_install_git.liquid b/doc/_includes/_install_git.liquid
new file mode 100644 (file)
index 0000000..d60379f
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+The Arvados API and Git servers require Git 1.7.10 or later.
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_install_git_curl.liquid b/doc/_includes/_install_git_curl.liquid
new file mode 100644 (file)
index 0000000..40b95d3
--- /dev/null
@@ -0,0 +1,19 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+On a Debian-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install git curl</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install git curl</span>
+</code></pre>
+</notextile>
diff --git a/doc/_includes/_install_postgres_database.liquid b/doc/_includes/_install_postgres_database.liquid
new file mode 100644 (file)
index 0000000..aad4688
--- /dev/null
@@ -0,0 +1,21 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# Start a shell for the postgres user:
+  <notextile><pre>~$ <span class="userinput">sudo -u postgres bash</span></pre></notextile>
+# Generate a new database password:
+  <notextile><pre>$ <span class="userinput">ruby -e 'puts rand(2**128).to_s(36)'</span>
+yourgeneratedpassword
+</pre></notextile> Record this.  You'll need it when you set up the Rails server later.
+# Create a database user with the password you generated:
+  <notextile><pre><code>$ <span class="userinput">createuser --encrypted -R -S --pwprompt {{service_role}}</span>
+Enter password for new role: <span class="userinput">yourgeneratedpassword</span>
+Enter it again: <span class="userinput">yourgeneratedpassword</span>
+</code></pre></notextile>
+# Create a database owned by the new user:
+  <notextile><pre><code>$ <span class="userinput">createdb {{service_database}} -T template0 -E UTF8 -O {{service_role}}</span></code></pre></notextile>
+# Exit the postgres user shell:
+  <notextile><pre>$ <span class="userinput">exit</span></pre></notextile>
diff --git a/doc/_includes/_install_rails_command.liquid b/doc/_includes/_install_rails_command.liquid
new file mode 100644 (file)
index 0000000..027f64b
--- /dev/null
@@ -0,0 +1,47 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% comment %}
+This template recognizes four variables:
+* railshost: The hostname included in the prompt, to let the user know where to run the command.  If this is the empty string, no hostname will be displayed.  Default "apiserver".
+* railsdir: The directory included in the prompt, to let the user know where to run the command.  Default "/var/www/arvados-api/current".
+* railscmd: The full command to run.  Default "bundle exec rails console".
+* railsout: The expected output of the command, if any.
+{% endcomment %} Change *@webserver-user@* to the user that runs your web server process.  If you install Phusion Passenger as we recommend, this is *@www-data@* on Debian-based systems, and *@nginx@* on Red Hat-based systems.
+
+{% unless railshost %}
+  {% assign railshost = "apiserver" %}
+{% endunless %}
+
+{% unless (railshost == "") or (railshost contains ":") %}
+  {% capture railshost %}{{railshost}}:{% endcapture %}
+{% endunless %}
+
+{% unless railsdir %}
+  {% assign railsdir = "/var/www/arvados-api/current" %}
+{% endunless %}
+
+{% unless railscmd %}
+  {% assign railscmd = "bundle exec rails console" %}
+{% endunless %}
+
+Using RVM:
+
+<notextile>
+<pre><code>{{railshost}}~$ <span class="userinput">cd {{railsdir}}</span>
+{{railshost}}{{railsdir}}$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production `which rvm-exec` default {{railscmd}}</span>
+{% if railsout %}{{railsout}}
+{% endif %}</code></pre>
+</notextile>
+
+Not using RVM:
+
+<notextile>
+<pre><code>{{railshost}}~$ <span class="userinput">cd {{railsdir}}</span>
+{{railshost}}{{railsdir}}$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production {{railscmd}}</span>
+{% if railsout %}{{railsout}}
+{% endif %}</code></pre>
+</notextile>
diff --git a/doc/_includes/_install_rails_reconfigure.liquid b/doc/_includes/_install_rails_reconfigure.liquid
new file mode 100644 (file)
index 0000000..4687431
--- /dev/null
@@ -0,0 +1,17 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Now that all your configuration is in place, rerun the {{railspkg}} package configuration to install necessary Ruby Gems and other server dependencies.  On Debian-based systems:
+
+<notextile><pre><code>~$ <span class="userinput">sudo dpkg-reconfigure {{railspkg}}</span>
+</code></pre></notextile>
+
+On Red Hat-based systems:
+
+<notextile><pre><code>~$ <span class="userinput">sudo yum reinstall {{railspkg}}</span>
+</code></pre></notextile>
+
+You only need to do this manual step once, after initial configuration.  When you make configuration changes in the future, you just need to restart Nginx for them to take effect.
\ No newline at end of file
diff --git a/doc/_includes/_install_redhat_key.liquid b/doc/_includes/_install_redhat_key.liquid
new file mode 100644 (file)
index 0000000..69cfd5a
--- /dev/null
@@ -0,0 +1,15 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Curoverse signing key fingerprint is
+
+<notextile>
+<pre><code>
+pub  2048R/1078ECD7 2010-11-15 Curoverse, Inc Automatic Signing Key <sysadmin@curoverse.com>
+      Key fingerprint = B2DA 2991 656E B4A5 0314  CA2B 5716 5911 1078 ECD7
+sub  2048R/5A8C5A93 2010-11-15
+</code></pre>
+</notextile>
diff --git a/doc/_includes/_install_ruby_and_bundler.liquid b/doc/_includes/_install_ruby_and_bundler.liquid
new file mode 100644 (file)
index 0000000..d5a5a15
--- /dev/null
@@ -0,0 +1,69 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Ruby 2.3 is recommended; Ruby 2.1 is also known to work.
+
+h4(#rvm). *Option 1: Install with RVM*
+
+<notextile>
+<pre><code><span class="userinput">sudo gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+\curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.3
+</span></code></pre></notextile>
+
+Either log out and log back in to activate RVM, or explicitly load it in all open shells like this:
+
+<notextile>
+<pre><code><span class="userinput">source /usr/local/rvm/scripts/rvm
+</span></code></pre></notextile>
+
+Once RVM is activated in your shell, install Bundler:
+
+<notextile>
+<pre><code>~$ <span class="userinput">gem install bundler</span>
+</code></pre></notextile>
+
+h4(#fromsource). *Option 2: Install from source*
+
+Install prerequisites for Debian 8:
+
+<notextile>
+<pre><code><span class="userinput">sudo apt-get install \
+    bison build-essential gettext libcurl3 libcurl3-gnutls \
+    libcurl4-openssl-dev libpcre3-dev libreadline-dev \
+    libssl-dev libxslt1.1 zlib1g-dev
+</span></code></pre></notextile>
+
+Install prerequisites for CentOS 7:
+
+<notextile>
+<pre><code><span class="userinput">sudo yum install \
+    libyaml-devel glibc-headers autoconf gcc-c++ glibc-devel \
+    patch readline-devel zlib-devel libffi-devel openssl-devel \
+    make automake libtool bison sqlite-devel tar
+</span></code></pre></notextile>
+
+Install prerequisites for Ubuntu 12.04 or 14.04:
+
+<notextile>
+<pre><code><span class="userinput">sudo apt-get install \
+    gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev \
+    libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev \
+    libncurses5-dev automake libtool bison pkg-config libffi-dev curl
+</span></code></pre></notextile>
+
+Build and install Ruby:
+
+<notextile>
+<pre><code><span class="userinput">mkdir -p ~/src
+cd ~/src
+curl -f http://cache.ruby-lang.org/pub/ruby/2.3/ruby-2.3.3.tar.gz | tar xz
+cd ruby-2.3.3
+./configure --disable-install-rdoc
+make
+sudo make install
+
+sudo -i gem install bundler</span>
+</code></pre></notextile>
diff --git a/doc/_includes/_install_runit.liquid b/doc/_includes/_install_runit.liquid
new file mode 100644 (file)
index 0000000..d5f8341
--- /dev/null
@@ -0,0 +1,19 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install runit</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install runit</span>
+</code></pre>
+</notextile>
diff --git a/doc/_includes/_mount_types.liquid b/doc/_includes/_mount_types.liquid
new file mode 100644 (file)
index 0000000..edf8edf
--- /dev/null
@@ -0,0 +1,118 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Mount types
+
+The "mounts" hash is the primary mechanism for adding data to the container at runtime (beyond what is already in the container image).
+
+Each value of the "mounts" hash is itself a hash, whose "kind" key determines the handler used to attach data to the container.
+
+table(table table-bordered table-condensed).
+|_. Mount type|_. Kind|_. Description|_. Examples|
+|Arvados data collection|@collection@|@"portable_data_hash"@ _or_ @"uuid"@ _may_ be provided. If not provided, a new collection will be created. This is useful when @"writable":true@ and the container's @output_path@ is (or is a subdirectory of) this mount target.
+@"writable"@ may be provided with a @true@ or @false@ to indicate the path must (or must not) be writable. If not specified, the system can choose.
+@"path"@ may be provided, and defaults to @"/"@.
+At container startup, the target path will have the same directory structure as the given path within the collection. Even if the files/directories are writable in the container, modifications will _not_ be saved back to the original collections when the container ends.|<pre><code>{
+ "kind":"collection",
+ "uuid":"...",
+ "path":"/foo.txt"
+}
+{
+ "kind":"collection",
+ "uuid":"..."
+}</code></pre>|
+|Git tree|@git_tree@|@"uuid"@ must be the UUID of an Arvados-hosted git repository.
+@"commit"@ must be a full 40-character commit hash.
+@"path"@, if provided, must be "/".
+At container startup, the target path will have the source tree indicated by the given commit. The @.git@ metadata directory _will not_ be available.|<pre><code>{
+ "kind":"git_tree",
+ "uuid":"zzzzz-s0uqq-xxxxxxxxxxxxxxx",
+ "commit":"f315c59f90934cccae6381e72bba59d27ba42099"
+}
+</code></pre>|
+|Temporary directory|@tmp@|@"capacity"@: capacity (in bytes) of the storage device.
+@"device_type"@ (optional, default "network"): one of @{"ram", "ssd", "disk", "network"}@ indicating the acceptable level of performance.
+At container startup, the target path will be empty. When the container finishes, the content will be discarded. This will be backed by a storage mechanism no slower than the specified type.|<pre><code>{
+ "kind":"tmp",
+ "capacity":100000000000
+}
+{
+ "kind":"tmp",
+ "capacity":1000000000,
+ "device_type":"ram"
+}</code></pre>|
+|Keep|@keep@|Expose all readable collections via arv-mount.
+Requires suitable runtime constraints.|<pre><code>{
+ "kind":"keep"
+}</code></pre>|
+|Mounted file or directory|@file@|@"path"@: absolute path (inside the container) of a file or directory that is (or is inside) another mount target.
+Can be used for "stdin" and "stdout" targets.|<pre><code>{
+ "kind":"file",
+ "path":"/mounted_tmp/a.out"
+}</code></pre>|
+|JSON document|@json@|A JSON-encoded string, array, or object.|<pre>{
+ "kind":"json",
+ "content":{"foo":"bar"}
+}</pre>|
+
+h2(#pre-populate-output). Pre-populate output using Mount points
+
+When a container's output_path is a tmp mount backed by local disk, this output directory can be pre-populated with content from existing collections. This content can be specified by mounting collections at mount points that are subdirectories of output_path. Certain restrictions apply:
+
+1. Only mount points of kind @collection@ are supported.
+
+2. Mount points underneath output_path which have @"writable":true@ are copied into output_path during container initialization and may be updated, renamed, or deleted by the running container.  The original collection is not modified.  On container completion, files remaining in the output are saved to the output collection.   The mount at output_path must be big enough to accommodate copies of the inner writable mounts.
+
+3. If any such mount points are configured as @exclude_from_output":true@, they will be excluded from the output.
+
+If any process in the container tries to modify, remove, or rename these mount points or anything underneath them, the operation will fail and the container output and the underlying collections used to pre-populate are unaffected.
+
+h3. Example mount point configurations
+
+All the below examples are based on this collection:
+<pre><code>
+portable_data_hash cdfbe2e823222d26483d52e5089d553c+175
+
+manifest_text: ./alice 03032680d3fa0561ef4f85071140861e+13+A04e9d06459cda00aa997565bd78001061cf5bffb@58ab593d 0:13:hello.txt\n./bob d820b9df970e1b498e7723c50b107e1b+11+A42d162a60210479d1cfaf9fbb98d494ac6322ae6@58ab593d 0:11:hello.txt\n./carol cf72b172ff969250ae14a893a6745440+13+A476a2fd39e14e9c03af3076bd17e3612c075ff66@58ab593d 0:13:hello.txt\n
+</code></pre>
+
+table(table table-bordered table-condensed).
+|{width:40%}. *Mount point*|{width:30%}. *Description*|{width:30%}. *Resulting collection manifest text*|
+|<pre><code>"mounts": {
+  "/tmp/foo": {
+    "kind": "collection",
+    "portable_data_hash": "cdfbe2...+175"
+  },
+},
+"output_path": "/tmp"
+</code></pre>|No path specified and hence the entire collection will be mounted.|./*foo/*alice 030326... 0:13:hello.txt\n
+./*foo/*bob d820b9... 0:11:hello.txt\n
+./*foo/*carol cf72b1... 0:13:hello.txt\n
+*Note:* Here the "." in streams is replaced with *foo*.|
+|<pre><code>"mounts": {
+  "/tmp/foo/bar": {
+    "kind": "collection",
+    "portable_data_hash": "cdfbe2...+175"
+    "path": "alice"
+  },
+},
+"output_path": "/tmp"
+</code></pre>|Specified path refers to the subdirectory *alice* in the collection.|./*foo/bar* 030326... 0:13:hello.txt\n
+*Note:* only the manifest text segment for the subdirectory *alice* is included after replacing the subdirectory *alice* with *foo/bar*.|
+|<pre><code>"mounts": {
+  "/tmp/foo/bar": {
+    "kind": "collection",
+    "portable_data_hash": "cdfbe2...+175"
+    "path": "alice/hello.txt"
+  },
+},
+"output_path": "/tmp"
+</code></pre>|Specified path refers to the file *hello.txt* in the *alice* subdirectory|./*foo* 030326... 0:13:*bar*\n
+*Note:* Here the subdirectory *alice* is replaced with *foo* and the filename *hello.txt* from this subdirectory is replaced with *bar*.|
+
+h2(#symlinks-in-output). Symlinks in output
+
+When a container's output_path is a tmp mount backed by local disk, this output directory can contain symlinks to other files in the the output directory, or to collection mount points.  If the symlink leads to a collection mount, efficiently copy the collection into the output collection.  Symlinks leading to files or directories are expanded and created as regular files in the output collection.  Further, whether symlinks are relative or absolute, every symlink target (even targets that are symlinks themselves) must point to a path in either the output directory or a collection mount.
diff --git a/doc/_includes/_navbar_left.liquid b/doc/_includes/_navbar_left.liquid
new file mode 100644 (file)
index 0000000..cba6c46
--- /dev/null
@@ -0,0 +1,24 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="col-sm-3">
+  <div class="well">
+    <ol class="nav nav-list">
+      {% for section in site.navbar[page.navsection] %}
+      {% for entry in section %}
+      <li><span class="nav-header">{{ entry[0] }}</span>
+       <ol class="nav nav-list">
+          {% for item in entry[1] %}        
+          {% assign p = site.pages[item] %}
+          <li {% if p.url == page.url %} class="active activesubnav" {% elsif p.title == page.subnavsection %} class="activesubnav" {% endif %}>
+            <a href="{{ site.baseurl }}{{ p.url }}">{{ p.title }}</a></li>
+          {% endfor %}
+        </ol>
+        {% endfor %}
+        {% endfor %}
+    </ol>
+  </div>
+</div>
diff --git a/doc/_includes/_navbar_top.liquid b/doc/_includes/_navbar_top.liquid
new file mode 100644 (file)
index 0000000..7d96ea0
--- /dev/null
@@ -0,0 +1,43 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="navbar navbar-default navbar-fixed-top">
+  <div class="container-fluid">
+    <div class="navbar-header">
+      <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#bs-navbar-collapse">
+        <span class="sr-only">Toggle navigation</span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+      </button>
+      <a class="navbar-brand" href="{{ site.baseurl }}/">Arvados&trade; Docs</a>
+    </div>
+    <div class="collapse navbar-collapse" id="bs-navbar-collapse">
+      <ul class="nav navbar-nav">
+        <!--<li {% if page.navsection == 'start' %} class="active" {% endif %}><a href="{{ site.baseurl }}/start/index.html">Getting&nbsp;Started</a></li>-->
+        <li {% if page.navsection == 'userguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/user/index.html">User&nbsp;Guide</a></li>
+        <li {% if page.navsection == 'sdk' %} class="active" {% endif %}><a href="{{ site.baseurl }}/sdk/index.html">SDKs</a></li>
+        <li {% if page.navsection == 'architecture' %} class="active" {% endif %}><a href="{{ site.baseurl }}/architecture/index.html">Architecture</a></li>
+        <li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/index.html">API</a></li>
+        <li {% if page.navsection == 'admin' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/index.html">Admin</a></li>
+        <li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/index.html">Install</a></li>
+        <li><a href="https://arvados.org" style="padding-left: 2em">arvados.org&nbsp;&raquo;</a></li>
+      </ul>
+
+      <div class="pull-right" style="padding-top: 6px">
+        <form method="get" action="https://www.google.com/search">
+          <div class="input-group" style="width: 220px">
+            <input type="text" class="form-control" name="q" placeholder="search">
+            <div class="input-group-addon">
+              <button class="glyphicon glyphicon-search" style="border: 0px" type="submit"></button>
+            </div>
+            <input type="hidden" name="sitesearch" value="doc.arvados.org"/>
+          </div>
+        </form>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/doc/_includes/_note_python_sc.liquid b/doc/_includes/_note_python_sc.liquid
new file mode 100644 (file)
index 0000000..4b08177
--- /dev/null
@@ -0,0 +1,29 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+
+{% if rh_version %} On CentOS {{rh_version}} and RHEL {{rh_version}},
+{% else %} On CentOS and RHEL,
+{% endif %} these packages require a more recent version from Software Collections.  The Software Collection will be installed automatically as long as Software Collections are enabled on your system.
+
+To "enable Software Collections on CentOS":https://wiki.centos.org/AdditionalResources/Repositories/SCL, run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install centos-release-scl scl-utils</span>
+</code></pre>
+</notextile>
+
+To enable Software Collections on RHEL:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum-config-manager --enable rhel-server-rhscl-7-rpms</span>
+</code></pre>
+</notextile>
+
+"See also section 2.1 of Red Hat's Installation chapter":https://access.redhat.com/documentation/en-US/Red_Hat_Software_Collections/2/html/2.0_Release_Notes/chap-Installation.html .
+
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_notebox_begin.liquid b/doc/_includes/_notebox_begin.liquid
new file mode 100644 (file)
index 0000000..39a859e
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Note:</h4>
diff --git a/doc/_includes/_notebox_begin_warning.liquid b/doc/_includes/_notebox_begin_warning.liquid
new file mode 100644 (file)
index 0000000..ecee2a0
--- /dev/null
@@ -0,0 +1,8 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="alert alert-block alert-warning">
+  <h4>Note:</h4>
diff --git a/doc/_includes/_notebox_end.liquid b/doc/_includes/_notebox_end.liquid
new file mode 100644 (file)
index 0000000..e53941e
--- /dev/null
@@ -0,0 +1,7 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+</div>
diff --git a/doc/_includes/_pipeline_deprecation_notice.liquid b/doc/_includes/_pipeline_deprecation_notice.liquid
new file mode 100644 (file)
index 0000000..35c89be
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin_warning' %}
+Arvados pipeline templates are deprecated.  The recommended way to develop new workflows for Arvados is using the "Common Workflow Language":{{site.baseurl}}/user/cwl/cwl-runner.html.
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_run_command_foreach_example.liquid b/doc/_includes/_run_command_foreach_example.liquid
new file mode 100644 (file)
index 0000000..8e3dd71
--- /dev/null
@@ -0,0 +1,46 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{
+    "name":"run-command example pipeline",
+    "components":{
+        "bwa-mem": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "bwa",
+                    "mem",
+                    "-t",
+                    "$(node.cores)",
+                    "$(glob $(dir $(reference_collection))/*.fasta)",
+                    {
+                        "foreach": "read_pair",
+                        "command": "$(read_pair)"
+                    }
+                ],
+                "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam",
+                "task.foreach": ["sample_subdir", "read_pair"],
+                "reference_collection": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "sample": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "sample_subdir": "$(dir $(sample))",
+                "read_pair": {
+                    "value": {
+                        "group": "sample_subdir",
+                        "regex": "(.*)_[12]\\.fastq(\\.gz)?$"
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_run_command_simple_example.liquid b/doc/_includes/_run_command_simple_example.liquid
new file mode 100644 (file)
index 0000000..b37ae9a
--- /dev/null
@@ -0,0 +1,43 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{
+    "name":"run-command example pipeline",
+    "components":{
+         "bwa-mem": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "$(dir $(bwa_collection))/bwa",
+                    "mem",
+                    "-t",
+                    "$(node.cores)",
+                    "-R",
+                    "@RG\\\tID:group_id\\\tPL:illumina\\\tSM:sample_id",
+                    "$(glob $(dir $(reference_collection))/*.fasta)",
+                    "$(glob $(dir $(sample))/*_1.fastq)",
+                    "$(glob $(dir $(sample))/*_2.fastq)"
+                ],
+                "reference_collection": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "bwa_collection": {
+                    "required": true,
+                    "dataclass": "Collection",
+                    "default": "39c6f22d40001074f4200a72559ae7eb+5745"
+                },
+                "sample": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_run_md5sum_py.liquid b/doc/_includes/_run_md5sum_py.liquid
new file mode 100644 (file)
index 0000000..6d10672
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+import arvados
+
+# Automatically parallelize this job by running one task per file.
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
+
+# Get the input file for the task
+input_file = arvados.get_task_param_mount('input')
+
+# Run the external 'md5sum' program on the input file
+stdoutdata, stderrdata = arvados.util.run_command(['md5sum', input_file])
+
+# Save the standard output (stdoutdata) to "md5sum.txt" in the output collection
+out = arvados.CollectionWriter()
+with out.open('md5sum.txt') as out_file:
+    out_file.write(stdoutdata)
+arvados.current_task().set_output(out.finish())
diff --git a/doc/_includes/_shards_yml.liquid b/doc/_includes/_shards_yml.liquid
new file mode 120000 (symlink)
index 0000000..99ae31c
--- /dev/null
@@ -0,0 +1 @@
+../user/cwl/federated/shards.yml
\ No newline at end of file
diff --git a/doc/_includes/_ssh_addkey.liquid b/doc/_includes/_ssh_addkey.liquid
new file mode 100644 (file)
index 0000000..7a8a992
--- /dev/null
@@ -0,0 +1,23 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+
+You may now proceed to "adding your key to the Arvados Workbench.":#workbench
+
+h1(#workbench). Adding your key to Arvados Workbench
+
+h3. From the Workbench dashboard
+
+In the Workbench top navigation menu, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> to access the user settings menu and click on the menu item *SSH keys* to go to the *SSH keys* page. Click on the <span class="btn btn-primary">*+* Add new SSH key</span> button in this page. This will open a popup as shown in this screenshot:
+
+!{{ site.baseurl }}/images/ssh-adding-public-key.png!
+Paste your public key into the text area labeled *Public Key*, and click on the <span class="btn btn-primary">Submit</span> button. You are now ready to "log into an Arvados VM":#login.
+
+h1(#login). Using SSH to log into an Arvados VM
+
+To see a list of virtual machines that you have access to and determine the name and login information, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu and click on the menu item *Virtual machines* to go to the Virtual machines page. This page lists the virtual machines you can access. The *Host name* column lists the name of each available VM.  The *Login name* column will have a list of comma separated values of the form @you@. In this guide the hostname will be *_shell_* and the login will be *_you_*.  Replace these with your hostname and login name as appropriate.
+
+
diff --git a/doc/_includes/_ssh_intro.liquid b/doc/_includes/_ssh_intro.liquid
new file mode 100644 (file)
index 0000000..8cb09f1
--- /dev/null
@@ -0,0 +1,13 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+
+Arvados requires a public SSH key in order to securely log in to an Arvados VM instance, or to access an Arvados Git repository. The three sections below help you get started:
+
+# "Getting your SSH key":#gettingkey
+# "Adding your key to Arvados Workbench":#workbench
+# "Using SSH to log into an Arvados VM instance":#login
+
diff --git a/doc/_includes/_tutorial_bwa_sortsam_pipeline.liquid b/doc/_includes/_tutorial_bwa_sortsam_pipeline.liquid
new file mode 100644 (file)
index 0000000..3b39403
--- /dev/null
@@ -0,0 +1,78 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{
+    "name": "Tutorial align using bwa mem and SortSam",
+    "components": {
+        "bwa-mem": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "$(dir $(bwa_collection))/bwa",
+                    "mem",
+                    "-t",
+                    "$(node.cores)",
+                    "-R",
+                    "@RG\\\tID:group_id\\\tPL:illumina\\\tSM:sample_id",
+                    "$(glob $(dir $(reference_collection))/*.fasta)",
+                    "$(glob $(dir $(sample))/*_1.fastq)",
+                    "$(glob $(dir $(sample))/*_2.fastq)"
+                ],
+                "reference_collection": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "bwa_collection": {
+                    "required": true,
+                    "dataclass": "Collection",
+                    "default": "39c6f22d40001074f4200a72559ae7eb+5745"
+                },
+                "sample": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
+            },
+            "runtime_constraints": {
+                "docker_image": "bcosc/arv-base-java",
+                "arvados_sdk_version": "master"
+            }
+        },
+        "SortSam": {
+            "script": "run-command",
+            "script_version": "847459b3c257aba65df3e0cbf6777f7148542af2",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "java",
+                    "-Xmx4g",
+                    "-Djava.io.tmpdir=$(tmpdir)",
+                    "-jar",
+                    "$(dir $(picard))/SortSam.jar",
+                    "CREATE_INDEX=True",
+                    "SORT_ORDER=coordinate",
+                    "VALIDATION_STRINGENCY=LENIENT",
+                    "INPUT=$(glob $(dir $(input))/*.sam)",
+                    "OUTPUT=$(basename $(glob $(dir $(input))/*.sam)).sort.bam"
+                ],
+                "input": {
+                    "output_of": "bwa-mem"
+                },
+                "picard": {
+                    "required": true,
+                    "dataclass": "Collection",
+                    "default": "88447c464574ad7f79e551070043f9a9+1970"
+                }
+            },
+            "runtime_constraints": {
+                "docker_image": "bcosc/arv-base-java",
+                "arvados_sdk_version": "master"
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_tutorial_cluster_name.liquid b/doc/_includes/_tutorial_cluster_name.liquid
new file mode 100644 (file)
index 0000000..22fbc46
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+This tutorial assumes you are using the default Arvados instance, @qr1hi@. If you are using a different instance, replace @qr1hi@ with your instance. See "Accessing Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html for more details.
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_tutorial_expectations.liquid b/doc/_includes/_tutorial_expectations.liquid
new file mode 100644 (file)
index 0000000..6c4fbeb
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+This tutorial assumes that you are logged into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "FUSE Driver":{{site.baseurl}}/sdk/python/arvados-fuse.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_tutorial_expectations_workstation.liquid b/doc/_includes/_tutorial_expectations_workstation.liquid
new file mode 100644 (file)
index 0000000..7d24c1e
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+This tutorial assumes that you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_tutorial_git_repo_expectations.liquid b/doc/_includes/_tutorial_git_repo_expectations.liquid
new file mode 100644 (file)
index 0000000..8a172de
--- /dev/null
@@ -0,0 +1,9 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin' %}
+This tutorial assumes that you have a working Arvados repository. If you do not have a repository created, you can follow the instructions in the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *$USER/tutorial* repository created in that page as the example.
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_tutorial_hash_script_py.liquid b/doc/_includes/_tutorial_hash_script_py.liquid
new file mode 100644 (file)
index 0000000..9eacb76
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+import hashlib      # Import the hashlib module to compute MD5.
+import os           # Import the os module for basic path manipulation
+import arvados      # Import the Arvados sdk module
+
+# Automatically parallelize this job by running one task per file.
+# This means that if the input consists of many files, each file will
+# be processed in parallel on different nodes enabling the job to
+# be completed quicker.
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
+
+# Get object representing the current task
+this_task = arvados.current_task()
+
+# Create the message digest object that will compute the MD5 hash
+digestor = hashlib.new('md5')
+
+# Get the input file for the task
+input_id, input_path = this_task['parameters']['input'].split('/', 1)
+
+# Open the input collection
+input_collection = arvados.CollectionReader(input_id)
+
+# Open the input file for reading
+with input_collection.open(input_path) as input_file:
+    for buf in input_file.readall():  # Iterate the file's data blocks
+        digestor.update(buf)          # Update the MD5 hash object
+
+# Write a new collection as output
+out = arvados.CollectionWriter()
+
+# Write an output file with one line: the MD5 value and input path
+with out.open('md5sum.txt') as out_file:
+    out_file.write("{} {}/{}\n".format(digestor.hexdigest(), input_id,
+                                       os.path.normpath(input_path)))
+
+# Commit the output to Keep.
+output_locator = out.finish()
+
+# Use the resulting locator as the output for this task.
+this_task.set_output(output_locator)
+
+# Done!
diff --git a/doc/_includes/_tutorial_submit_job.liquid b/doc/_includes/_tutorial_submit_job.liquid
new file mode 100644 (file)
index 0000000..548a619
--- /dev/null
@@ -0,0 +1,25 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{
+  "name":"My md5 pipeline",
+  "components":{
+    "do_hash":{
+      "repository":"$USER/$USER",
+      "script":"hash.py",
+      "script_version":"master",
+      "runtime_constraints":{
+        "docker_image":"arvados/jobs"
+      },
+      "script_parameters":{
+        "input":{
+          "required": true,
+          "dataclass": "Collection"
+        }
+      }
+    }
+  }
+}
diff --git a/doc/_includes/_webring.liquid b/doc/_includes/_webring.liquid
new file mode 100644 (file)
index 0000000..602897b
--- /dev/null
@@ -0,0 +1,35 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% assign n = 0 %}
+{% assign prev = "" %}
+{% assign nx = 0 %}
+{% for section in site.navbar[page.navsection] %}
+  {% for entry in section %}
+    {% for item in entry[1] %}        
+      {% assign p = site.pages[item] %}
+      {% if nx == 1 %}
+        <hr>
+        {% if prev != "" %}
+          <a href="{{ site.baseurl }}{{ prev.url }}" class="pull-left">Previous: {{ prev.title }}</a>
+        {% endif %}
+        <a href="{{ site.baseurl }}{{ p.url }}" class="pull-right">Next: {{ p.title }}</a>
+        {% assign nx = 0 %}
+        {% assign n = 1 %}
+      {% endif %}
+      {% if p.url == page.url %}
+        {% assign nx = 1 %}
+      {% else %}
+        {% assign prev = p %}
+      {% endif %}
+    {% endfor %}
+  {% endfor %}
+{% endfor %}
+{% if n == 0 && prev != "" %}
+  <hr>
+  <a href="{{ site.baseurl }}{{ prev.url }}" class="pull-left">Previous: {{ prev.title }}</a>
+  {% assign n = 1 %}
+{% endif %}
\ No newline at end of file
diff --git a/doc/_includes/_what_is_cwl.liquid b/doc/_includes/_what_is_cwl.liquid
new file mode 100644 (file)
index 0000000..2fda2a7
--- /dev/null
@@ -0,0 +1,7 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The "Common Workflow Language (CWL)":http://commonwl.org is a multi-vendor open standard for describing analysis tools and workflows that are portable across a variety of platforms.  CWL is the recommended way to develop and run workflows for Arvados.  Arvados supports the "CWL v1.0":http://commonwl.org/v1.0 specification.
diff --git a/doc/_layouts/default.html.liquid b/doc/_layouts/default.html.liquid
new file mode 100644 (file)
index 0000000..7c6d36e
--- /dev/null
@@ -0,0 +1,124 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>{% unless page.title == "Arvados | Documentation" %} Arvados {% if page.navmenu %}| {{ page.navmenu }} {% endif %} | {% endunless %}{{ page.title }}</title>
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <meta name="description" content="">
+    <meta name="author" content="">
+    <link rel="icon" href="{{ site.baseurl }}/images/favicon.ico" type="image/x-icon">
+    <link rel="shortcut icon" href="{{ site.baseurl }}/images/favicon.ico" type="image/x-icon">
+    <link href="{{ site.baseurl }}/css/bootstrap.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/nav-list.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/badges.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/code.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/font-awesome.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/carousel-override.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/button-override.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/images.css" rel="stylesheet">
+    <style>
+      html {
+      height:100%;
+      }
+      body {
+      padding-top: 61px;
+      height: 90%; /* If calc() is not supported */
+      height: calc(100% - 46px); /* Sets the body full height minus the padding for the menu bar */
+      }
+      @media (max-width: 979px) {
+      div.frontpagehero {
+      margin-left: -20px;
+      margin-right: -20px;
+      padding-left: 20px;
+      }
+      }
+      .sidebar-nav {
+        padding: 9px 0;
+      }
+      .section-block {
+      background: #eeeeee;
+      padding: 1em;
+      -webkit-border-radius: 12px;
+      -moz-border-radius: 12px;
+      border-radius: 12px;
+      margin: 0 2em;
+      }
+      .row-fluid :first-child .section-block {
+      margin-left: 0;
+      }
+      .row-fluid :last-child .section-block {
+      margin-right: 0;
+      }
+      .rarr {
+      font-size: 1.5em;
+      }
+      .darr {
+      font-size: 4em;
+      text-align: center;
+      margin-bottom: 1em;
+      }
+      :target {
+      padding-top: 61px;
+      margin-top: -61px;
+      }
+    </style>
+
+    <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
+    <!--[if lt IE 9]>
+        <script src="../assets/js/html5shiv.js"></script>
+        <![endif]-->
+  </head>
+  <body class="nopad">
+    {% include 'navbar_top' %}
+
+    {% if page.navsection == 'top' or page.no_nav_left %}
+    {{ content }}
+    {% else %}
+
+    <div class="container-fluid">
+      <div class="row">
+        {% include 'navbar_left' %}
+        <div class="col-sm-9">
+          <h1>{{ page.title }}</h1>
+          {{ content }}
+          {% include 'webring' %}
+        </div>
+      </div>
+
+      <div style="height: 2em"></div>
+
+    </div>
+    {% endif %}
+    <script src="{{ site.baseurl }}/js/jquery.min.js"></script>
+    <script src="{{ site.baseurl }}/js/bootstrap.min.js"></script>
+    <script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+  ga('create', 'UA-40055979-1', 'arvados.org');
+  ga('send', 'pageview');
+
+    </script>
+
+{% if page.no_nav_left %}
+{% else %}
+<p style="text-align: center"><small>
+The content of this documentation is licensed under the
+<a href="{{ site.baseurl }}/user/copying/by-sa-3.0.html">Creative
+  Commons Attribution-Share Alike 3.0 United States</a> licence.<br>
+Code samples in this documentation are licensed under the
+<a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License, Version 2.0.</a></small>
+</p>
+{% endif %}
+
+
+  </body>
+</html>
diff --git a/doc/admin/activation.html.textile.liquid b/doc/admin/activation.html.textile.liquid
new file mode 100644 (file)
index 0000000..4a08e50
--- /dev/null
@@ -0,0 +1,229 @@
+---
+layout: default
+navsection: admin
+title: User activation
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how new users are created and activated.
+
+"Browser login and management of API tokens is described here.":{{site.baseurl}}/api/tokens.html
+
+h3. Authentication
+
+After completing the authentication process, a callback is made from the SSO server to the API server, providing a user record and @identity_url@ (despite the name, this is actually an Arvados user uuid).
+
+The API server searches for a user record with the @identity_url@ supplied by the SSO.  If found, that user account will be used, unless the account has @redirect_to_user_uuid@ set, in which case it will use the user in @redirect_to_user_uuid@ instead (this is used for the "link account":{{site.baseurl}}/user/topics/link-accounts.html feature).
+
+Next, it searches by email address for a "pre-activated account.":#pre-activated
+
+If no existing user record is found, a new user object will be created.
+
+A federated user follows a slightly different flow, whereby a special token is presented and the API server verifies user's identity with the home cluster, however it also results in a user object (representing the remote user) being created.
+
+h3. User setup
+
+If @auto_setup_new_users@ is true, as part of creating the new user object, the user is immediately set up with:
+
+* @can_login@ @permission@ link going (email address &rarr; user uuid) which records @identity_url_prefix@
+* Membership in the "All users" group (can read all users, all users can see new user)
+* A new git repo and @can_manage@ permission if @auto_setup_new_users_with_repository@ is true
+* @can_login@ permission to a shell node if @auto_setup_new_users_with_vm_uuid@ is set to the uuid of a vm
+
+Otherwise, an admin must explicitly invoke "setup" on the user via workbench or the API.
+
+h3. User activation
+
+A newly created user is inactive (@is_active@ is false) by default unless @new_users_are_active@.
+
+An inactive user cannot create or update any object, but can read Arvados objects that the user account has permission to read.  This implies that if @auto_setup_new_users@ is true, an "inactive" user who has been set up may still be able to do things, such as read things shared with "All users", clone and push to the git repository, or login to a VM.
+
+{% comment %}
+Maybe these services should check is_active.
+
+I believe that when this was originally designed, being able to access git and VM required an ssh key, and an inactive user could not register an ssh key because that required creating a record.  However, it is now possible to authenticate to shell VMs and http+git with just an API token.
+{% endcomment %}
+
+At this point, there are two ways a user can be activated.
+
+# An admin can set the @is_active@ field directly.  This runs @setup_on_activate@ which sets up oid_login_perm and group membership, but does not set repo or vm (even if if @auto_setup_new_users_with_repository@ and/or @auto_setup_new_users_with_vm_uuid@ are set).
+# Self-activation using the @activate@ method of the users controller.
+
+h3. User agreements
+
+The @activate@ method of the users controller checks if the user @is_invited@ and whether the user has "signed" all the user agreements.
+
+@is_invited@ is true if any of these are true:
+* @is_active@ is true
+* @new_users_are_active@ is true
+* the user account has a permission link to read the system "all users" group.
+
+User agreements are accessed by getting a listing on the @user_agreements@ endpoint.  This returns a list of collection uuids.  This is executed as a system user, so it bypasses normal read permission checks.
+
+The available user agreements are represented in the Links table as
+
+<pre>
+{
+  "link_class": "signature",
+  "name": "require",
+  "tail_uuid": "*system user uuid*",
+  "head_uuid: "*collection uuid*"
+}
+</pre>
+
+The collection contains the user agreement text file.
+
+On workbench, it checks @is_invited@.  If true, it displays the clickthrough agreements which the user can "sign".  If @is_invited@ is false, the user ends up at the "inactive user" page.
+
+The @user_agreements/sign@ endpoint creates a Link object:
+
+<pre>
+{
+  "link_class": "signature"
+  "name": "click",
+  "tail_uuid": "*user uuid*",
+  "head_uuid: "*collection uuid*"
+}
+</pre>
+
+This is executed as a system user, so it bypasses the restriction that inactive users cannot create objects.
+
+The @user_agreements/signatures@ endpoint returns the list of Link objects that represent signatures by the current user (created by @sign@).
+
+h3. User profile
+
+The user profile is checked by workbench after checking if user agreements need to be signed.  The requirement to fill out the user profile is not enforced by the API server.
+
+h3(#pre-activated). Pre-activate user by email address
+
+You may create a user account for a user that has not yet logged in, and identify the user by email address.
+
+1. As an admin, create a user object:
+
+<pre>
+{
+  "email": "foo@example.com",
+  "username": "barney",
+  "is_active": true
+}
+</pre>
+
+2. Create a link object, where @tail_uuid@ is the user's email address, @head_uuid@ is the user object created in the previous step, and @xxxxx@ is the value of @uuid_prefix@ of the SSO server.
+
+<pre>
+{
+  "link_class": "permission",
+  "name": "can_login",
+  "tail_uuid": "email address",
+  "head_uuid: "user uuid",
+  "properties": {
+    "identity_url_prefix": "xxxxx-tpzed-"
+  }
+}
+</pre>
+
+3. When the user logs in the first time, the email address will be recognized and the user will be associated with the linked user object.
+
+h3. Pre-activate federated user
+
+1. As admin, create a user object with the @uuid@ of the federated user (this is the user's uuid on their home cluster):
+
+<pre>
+{
+  "uuid": "home1-tpzed-000000000000000",
+  "email": "foo@example.com",
+  "username": "barney",
+  "is_active": true
+}
+</pre>
+
+2. When the user logs in, they will be associated with the existing user object.
+
+h3. Auto-activate federated users from trusted clusters
+
+In the API server config, configure @auto_activate_users_from@ with a list of one or more five-character cluster ids.  A federated user from one of the listed clusters which @is_active@ on the home cluster will be automatically set up and activated on this cluster.
+
+h3(#deactivating_users). Deactivating users
+
+Setting @is_active@ is not sufficient to lock out a user.  The user can call @activate@ to become active again.  Instead, use @unsetup@:
+
+* Delete oid_login_perms
+* Delete git repository permission links
+* Delete VM login permission links
+* Remove from "All users" group
+* Delete any "signatures"
+* Clear preferences / profile
+* Mark as inactive
+
+{% comment %}
+Does not revoke @is_admin@, so you can't unsetup an admin unless you turn admin off first.
+
+"inactive" does not prevent user from reading things they previously had access to.
+
+Does not revoke API tokens.
+{% endcomment %}
+
+h3. Activation flows
+
+h4. Private instance
+
+Policy: users must be manually approved.
+
+<pre>
+auto_setup_new_users: false
+new_users_are_active: false
+</pre>
+
+# User is created.  Not set up.  @is_active@ is false.
+# Workbench checks @is_invited@ and finds it is false.  User gets "inactive user" page.
+# Admin goes to user page and clicks either "setup user" or manually @is_active@ to true.
+# Clicking "setup user" sets up the user.  This includes adding the user to "All users" which qualifies the user as @is_invited@.
+# On refreshing workbench, the user is still inactive, but is able to self-activate after signing clickthrough agreements (if any).
+# Alternately, directly setting @is_active@ to true also sets up the user, but workbench won't display clickthrough agreements (because the user is already active).
+
+h4. Federated instance
+
+Policy: users from other clusters in the federation are activated, users from outside the federation must be manually approved
+
+<pre>
+auto_setup_new_users: false
+new_users_are_active: false
+auto_activate_users_from: [home1]
+</pre>
+
+# Federated user arrives claiming to be from cluster 'home1'
+# API server authenticates user as being from cluster 'home1'
+# Because 'home1' is in @auto_activate_users_from@ the user is set up and activated.
+# User can immediately start using workbench.
+
+h4. Open instance
+
+Policy: anybody who shows up and signs the agreements is activated.
+
+<pre>
+auto_setup_new_users: true
+new_users_are_active: false
+</pre>
+
+# User is created and auto-setup.  At this point, @is_active@ is false, but user has been added to "All users" group.
+# Workbench checks @is_invited@ and finds it is true, because the user is a member of "All users" group.
+# Workbench presents user with list of user agreements, user reads and clicks "sign" for each one.
+# Workbench tries to activate user.
+# User is activated.
+
+h4. Developer instance
+
+Policy: avoid wasting developer's time during development/testing
+
+<pre>
+auto_setup_new_users: true
+new_users_are_active: true
+</pre>
+
+# User is created, immediately auto-setup, and auto-activated.
+# User can immediately start using workbench.
diff --git a/doc/admin/collection-versioning.html.textile.liquid b/doc/admin/collection-versioning.html.textile.liquid
new file mode 100644 (file)
index 0000000..6da1756
--- /dev/null
@@ -0,0 +1,32 @@
+---
+layout: default
+navsection: admin
+title: Configuring collection versioning
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to enable and configure the collection versioning feature on the API server.
+
+h3. API Server configuration
+
+There are 2 configuration settings that control this feature, both go on the @application.yml@ file.
+
+h4. Settting: @collection_versioning@ (Boolean. Default: false)
+
+If @true@, collection versioning is enabled, meaning that new version records can be created. Note that if you set @collection_versioning@ to @false@ after being enabled, old versions will still be accessible, but further changes will not be versioned.
+
+h4. Setting: @preserve_version_if_idle@ (Numeric. Default: -1)
+
+This setting control the auto-save aspect of collection versioning, and can be set to:
+* @-1@: Never auto-save versions. Only save versions when the client ask for it by setting @preserve_version@ to @true@ on any given collection.
+* @0@: Preserve all versions every time a collection gets a versionable update.
+* @N@ (being N > 0): Preserve version when a collection gets a versionable update after a period of at least N seconds since the last time it was modified.
+
+h3. Using collection versioning
+
+"Discussed in the user guide":{{site.baseurl}}/user/topics/collection-versioning.html
\ No newline at end of file
diff --git a/doc/admin/federation.html.textile.liquid b/doc/admin/federation.html.textile.liquid
new file mode 100644 (file)
index 0000000..3728507
--- /dev/null
@@ -0,0 +1,74 @@
+---
+layout: default
+navsection: admin
+title: Configuring federation
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to enable and configure federation capabilities between clusters.
+
+An overview on how this feature works is discussed in the "architecture section":{{site.baseurl}}/architecture/federation.html
+
+h3. API Server configuration
+
+To accept users from remote clusters, some settings need to be added to the @application.yml@ file. There are two ways in which a remote cluster can be identified: either explictly by listing its prefix-to-hostname mapping, or implicitly by assuming the given remote cluster is public and belongs to the @.arvadosapi.com@ subdomain.
+
+For example, if you want to set up a private cluster federation, the following configuration will only allow access to users from @clsr2@ & @clsr3@:
+
+<pre>
+production:
+  remote_hosts:
+    clsr2: api.cluster2.com
+    clsr3: api.cluster3.com
+  remote_hosts_via_dns: false
+  auto_activate_users_from: []
+</pre>
+
+The additional @auto_activate_users_from@ setting can be used to allow users from the clusters in the federation to not only read but also create & update objects on the local cluster. This feature is covered in more detail in the "user activation section":{{site.baseurl}}/admin/activation.html. In the current example, only manually activated remote users would have full access to the local cluster.
+
+h3. Arvados controller & keepstores configuration
+
+Both @arvados-controller@ and @keepstore@ services also need to be configured, as they proxy requests to remote clusters when needed.
+
+Continuing the previous example, the necessary settings should be added to the @/etc/arvados/config.yml@ file as follows:
+
+<pre>
+Clusters:
+  clsr1:
+    RemoteClusters:
+      clsr2:
+        Host: api.cluster2.com
+        Proxy: true
+      clsr3:
+        Host: api.cluster3.com
+        Proxy: true
+</pre>
+
+Similar settings should be added to @clsr2@ & @clsr3@ hosts, so that all clusters in the federation can talk to each other.
+
+h3. Testing
+
+Following the above example, let's suppose @clsr1@ is our "home cluster", that is to say, we use our @clsr1@ user account as our federated identity and both @clsr2@ and @clsr3@ remote clusters are set up to allow users from @clsr1@ and to auto-activate them. The first thing to do would be to log into a remote workbench using the local user token. This can be done following these steps:
+
+1. Log into the local workbench and get the user token
+2. Visit the remote workbench specifying the local user token by URL: @https://workbench.cluster2.com?api_token=token_from_clsr1@
+3. You should now be logged into @clsr2@ with your account from @clsr1@
+
+To further test the federation setup, you can create a collection on @clsr2@, uploading some files and copying its UUID. Next, logged into a shell node on your home cluster you should be able to get that collection by running:
+
+<pre>
+user@clsr1:~$ arv collection get --uuid clsr2-xvhdp-xxxxxxxxxxxxxxx
+</pre>
+
+The returned collection metadata should show the local user's uuid on the @owner_uuid@ field. This tests that the @arvados-controller@ service is proxying requests correctly.
+
+One last test may be performed, to confirm that the @keepstore@ services also recognize remote cluster prefixes and proxy the requests. You can ask for the previously created collection using any of the usual tools, for example:
+
+<pre>
+user@clsr1:~$ arv-get clsr2-xvhdp-xxxxxxxxxxxxxxx/uploaded_file .
+</pre>
diff --git a/doc/admin/health-checks.html.textile.liquid b/doc/admin/health-checks.html.textile.liquid
new file mode 100644 (file)
index 0000000..eb71fda
--- /dev/null
@@ -0,0 +1,73 @@
+---
+layout: default
+navsection: admin
+title: Health checks
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Health check endpoints are found at @/_health/ping@ on many Arvados services.  The purpose of the health check is to offer a simple method of determining if a service can be reached and allow the service to self-report any problems, suitable for integrating into operational alert systems.
+
+To access health check endpoints, services must be configured with a "management token":management-token.html .
+
+Health check endpoints return a JSON object with the field @health@.  This has a value of either @OK@ or @ERROR@.  On error, it may also include a  field @error@ with additional information.  Examples:
+
+<pre>
+{
+  "health": "OK"
+}
+</pre>
+
+<pre>
+{
+  "health": "ERROR"
+  "error": "Inverted polarity in the warp core"
+}
+</pre>
+
+h2. Healthcheck aggregator
+
+The service @arvados-health@ performs health checks on all configured services and returns a single value of @OK@ or @ERROR@ for the entire cluster.  It exposes the endpoint @/_health/all@ .
+
+The healthcheck aggregator uses the @NodeProfile@ section of the cluster-wide @arvados.yml@ configuration file.  Here is an example.
+
+<pre>
+Cluster:
+  # The cluster uuid prefix
+  zzzzz:
+    ManagementToken: xyzzy
+    NodeProfile:
+      # For each node, the profile name corresponds to a
+      # locally-resolvable hostname, and describes which Arvados
+      # services are available on that machine.
+      api:
+        arvados-controller:
+          Listen: :8000
+        arvados-api-server:
+          Listen: :8001
+      manage:
+       arvados-node-manager:
+         Listen: :8002
+      workbench:
+       arvados-workbench:
+         Listen: :8003
+       arvados-ws:
+         Listen: :8004
+      keep:
+       keep-web:
+         Listen: :8005
+       keepproxy:
+         Listen: :8006
+       keep-balance:
+         Listen: :9005
+      keep0:
+        keepstore:
+         Listen: :25107
+      keep1:
+        keepstore:
+         Listen: :25107
+</pre>
diff --git a/doc/admin/index.html.textile.liquid b/doc/admin/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..97549ae
--- /dev/null
@@ -0,0 +1,13 @@
+---
+layout: default
+navsection: admin
+title: "Arvados admin overview"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This section describes how to administer an Arvados cluster.  Cluster admins should already be familiar with the "Arvados architecture.":{{site.baseurl}}/architecture/index.html  For instructions on installing and configuring an Arvados cluster, see the "install guide.":{{site.baseurl}}/install/index.html
diff --git a/doc/admin/management-token.html.textile.liquid b/doc/admin/management-token.html.textile.liquid
new file mode 100644 (file)
index 0000000..5380f38
--- /dev/null
@@ -0,0 +1,56 @@
+---
+layout: default
+navsection: admin
+title: Management token
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+To enable and collect health checks and metrics, services must be configured with a "management token".
+
+Services must have ManagementToken configured.  This is used to authorize access monitoring endpoints.  If ManagementToken is not configured, monitoring endpoints will return the error @404 disabled@.
+
+To access a monitoring endpoint, the requester must provide the HTTP header @Authorization: Bearer (ManagementToken)@.
+
+h2. API server
+
+Set @ManagementToken@ in the appropriate section of @application.yml@
+
+<pre>
+production:
+  # Token to be included in all healthcheck requests. Disabled by default.
+  # Server expects request header of the format "Authorization: Bearer xxx"
+  ManagementToken: xxx
+</pre>
+
+h2. Node Manager
+
+Set @port@ (the listen port) and @ManagementToken@ in the @Manage@ section of @node-manager.ini@.
+
+<pre>
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+ManagementToken = xxx
+</pre>
+
+h2. Other services
+
+The following services also support monitoring.  Set @ManagementToken@ in the respective yaml config file for each service.
+
+* keepstore
+* keep-web
+* keepproxy
+* arv-git-httpd
+* websockets
diff --git a/doc/admin/merge-remote-account.html.textile.liquid b/doc/admin/merge-remote-account.html.textile.liquid
new file mode 100644 (file)
index 0000000..b69730c
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: admin
+title: "Migrating a user to a federated account"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+When you use federation capabilities to connect two or more clusters that were already operating, some users might already have accounts on multiple clusters. Typically, they will want to choose a single account on one of the clusters and abandon the rest, transferring all data or permissions from their old “remote” accounts to a single “home” account.
+
+This effect can be achieved by changing the UUIDs of the user records on the remote clusters. This should be done before the user has ever used federation features to access cluster B with cluster A credentials. Otherwise, see "managing conflicting accounts" below.
+
+For example, a user might have:
+* an account A on cluster A with uuid @aaaaa-tpzed-abcdefghijklmno@, and
+* an account B on cluster B with uuid @bbbbb-tpzed-lmnopqrstuvwxyz@
+
+An administrator at cluster B can merge the two accounts by renaming account B to account A.
+
+<notextile>
+<pre><code>#!/usr/bin/env python
+import arvados
+arvados.api('v1').users().update_uuid(
+    uuid="<span class="userinput">bbbbb-tpzed-lmnopqrstuvwxyz</span>",
+    new_uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>").execute()
+</code></pre></notextile>
+
+This should be done when the user is idle, i.e., not logged in and not running any jobs or containers.
+
+h2. Managing conflicting accounts
+
+If the user has already used federation capabilities to access cluster B using account A before the above migration has been done, this will have already created a database entry for account A on cluster B, and the above program will error out. To fix this, the same "update_uuid API call":../api/methods/users.html#update_uuid can be used to move the conflicting account out of the way first.
+
+<notextile>
+<pre><code>#!/usr/bin/env python
+import arvados
+import random
+import string
+random_chars = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(15))
+arvados.api('v1').users().update_uuid(
+    uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
+    new_uuid="bbbbb-tpzed-"+random_chars).execute()
+</code></pre></notextile>
+
+After this is done and the migration is complete, the affected user should wait 5 minutes for the authorization cache to expire before using the remote cluster.
diff --git a/doc/admin/metrics.html.textile.liquid b/doc/admin/metrics.html.textile.liquid
new file mode 100644 (file)
index 0000000..893eac1
--- /dev/null
@@ -0,0 +1,216 @@
+---
+layout: default
+navsection: admin
+title: Metrics
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Some Arvados services publish Prometheus/OpenMetrics-compatible metrics at @/metrics@, and some provide additional runtime status at @/status.json@.  Metrics can help you understand how components perform under load, find performance bottlenecks, and detect and diagnose problems.
+
+To access metrics endpoints, services must be configured with a "management token":management-token.html. When accessing a metrics endpoint, prefix the management token with @"Bearer "@ and supply it in the @Authorization@ request header.
+
+<pre>curl -sfH "Authorization: Bearer your_management_token_goes_here" "https://0.0.0.0:25107/status.json"
+</pre>
+
+h2. Keep-web
+
+Keep-web exports metrics at @/metrics@ -- e.g., @https://collections.zzzzz.arvadosapi.com/metrics@.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|request_duration_seconds|summary|elapsed time between receiving a request and sending the last byte of the response body (segmented by HTTP request method and response status code)|
+|time_to_status_seconds|summary|elapsed time between receiving a request and sending the HTTP response status code (segmented by HTTP request method and response status code)|
+
+Metrics in the @arvados_keepweb_collectioncache@ namespace report keep-web's internal cache of Arvados collection metadata.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|arvados_keepweb_collectioncache_requests|counter|cache lookups|
+|arvados_keepweb_collectioncache_api_calls|counter|outgoing API calls|
+|arvados_keepweb_collectioncache_permission_hits|counter|collection-to-permission cache hits|
+|arvados_keepweb_collectioncache_pdh_hits|counter|UUID-to-PDH cache hits|
+|arvados_keepweb_collectioncache_hits|counter|PDH-to-manifest cache hits|
+|arvados_keepweb_collectioncache_cached_manifests|gauge|number of collections in the cache|
+|arvados_keepweb_collectioncache_cached_manifest_bytes|gauge|memory consumed by cached collection manifests|
+
+h2. Keepstore
+
+Keepstore exports metrics at @/status.json@ -- e.g., @http://keep0.zzzzz.arvadosapi.com:25107/status.json@.
+
+h3. Root
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|Volumes|         array of "volumeStatusEnt":#volumeStatusEnt ||
+|BufferPool|      "PoolStatus":#PoolStatus ||
+|PullQueue|       "WorkQueueStatus":#WorkQueueStatus ||
+|TrashQueue|      "WorkQueueStatus":#WorkQueueStatus ||
+|RequestsCurrent| int ||
+|RequestsMax|     int ||
+|Version|         string ||
+
+h3(#volumeStatusEnt). volumeStatusEnt
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|Label|         string||
+|Status|        "VolumeStatus":#VolumeStatus ||
+|VolumeStats|   "ioStats":#ioStats ||
+
+h3(#VolumeStatus). VolumeStatus
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|MountPoint| string||
+|DeviceNum|  uint64||
+|BytesFree|  uint64||
+|BytesUsed|  uint64||
+
+h3(#ioStats). ioStats
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|Errors|     uint64||
+|Ops|        uint64||
+|CompareOps| uint64||
+|GetOps|     uint64||
+|PutOps|     uint64||
+|TouchOps|   uint64||
+|InBytes|    uint64||
+|OutBytes|   uint64||
+
+h3(#PoolStatus). PoolStatus
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|BytesAllocatedCumulative|      uint64||
+|BuffersMax|   int||
+|BuffersInUse| int||
+
+h3(#WorkQueueStatus). WorkQueueStatus
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|InProgress| int||
+|Queued|     int||
+
+h3. Example response
+
+<pre>
+{
+  "Volumes": [
+    {
+      "Label": "[UnixVolume /var/lib/arvados/keep0]",
+      "Status": {
+        "MountPoint": "/var/lib/arvados/keep0",
+        "DeviceNum": 65029,
+        "BytesFree": 222532972544,
+        "BytesUsed": 435456679936
+      },
+      "InternalStats": {
+        "Errors": 0,
+        "InBytes": 1111,
+        "OutBytes": 0,
+        "OpenOps": 1,
+        "StatOps": 4,
+        "FlockOps": 0,
+        "UtimesOps": 0,
+        "CreateOps": 0,
+        "RenameOps": 0,
+        "UnlinkOps": 0,
+        "ReaddirOps": 0
+      }
+    }
+  ],
+  "BufferPool": {
+    "BytesAllocatedCumulative": 67108864,
+    "BuffersMax": 20,
+    "BuffersInUse": 0
+  },
+  "PullQueue": {
+    "InProgress": 0,
+    "Queued": 0
+  },
+  "TrashQueue": {
+    "InProgress": 0,
+    "Queued": 0
+  },
+  "RequestsCurrent": 1,
+  "RequestsMax": 40,
+  "Version": "dev"
+}
+</pre>
+
+h2. Keep-balance
+
+Keep-balance exports metrics at @/metrics@ -- e.g., @http://keep.zzzzz.arvadosapi.com:9005/metrics@.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|arvados_keep_total_{replicas,blocks,bytes}|gauge|stored data (stored in backend volumes, whether referenced or not)|
+|arvados_keep_garbage_{replicas,blocks,bytes}|gauge|garbage data (unreferenced, and old enough to trash)|
+|arvados_keep_transient_{replicas,blocks,bytes}|gauge|transient data (unreferenced, but too new to trash)|
+|arvados_keep_overreplicated_{replicas,blocks,bytes}|gauge|overreplicated data (more replicas exist than are needed)|
+|arvados_keep_underreplicated_{replicas,blocks,bytes}|gauge|underreplicated data (fewer replicas exist than are needed)|
+|arvados_keep_lost_{replicas,blocks,bytes}|gauge|lost data (referenced by collections, but not found on any backend volume)|
+|arvados_keep_dedup_block_ratio|gauge|deduplication ratio (block references in collections &divide; distinct blocks referenced)|
+|arvados_keep_dedup_byte_ratio|gauge|deduplication ratio (block references in collections &divide; distinct blocks referenced, weighted by block size)|
+|arvados_keepbalance_get_state_seconds|summary|time to get all collections and keepstore volume indexes for one iteration|
+|arvados_keepbalance_changeset_compute_seconds|summary|time to compute changesets for one iteration|
+|arvados_keepbalance_send_pull_list_seconds|summary|time to send pull lists to all keepstore servers for one iteration|
+|arvados_keepbalance_send_trash_list_seconds|summary|time to send trash lists to all keepstore servers for one iteration|
+|arvados_keepbalance_sweep_seconds|summary|time to complete one iteration|
+
+Each @arvados_keep_@ storage state statistic above is presented as a set of three metrics:
+
+table(table table-bordered table-condensed).
+|*_blocks|distinct block hashes|
+|*_bytes|bytes stored on backend volumes|
+|*_replicas|objects/files stored on backend volumes|
+
+h2. Node manager
+
+The node manager status end point provides a snapshot of internal status at the time of the most recent wishlist update.
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|nodes_booting|int|Number of nodes in booting state|
+|nodes_unpaired|int|Number of nodes in unpaired state|
+|nodes_busy|int|Number of nodes in busy state|
+|nodes_idle|int|Number of nodes in idle state|
+|nodes_fail|int|Number of nodes in fail state|
+|nodes_down|int|Number of nodes in down state|
+|nodes_shutdown|int|Number of nodes in shutdown state|
+|nodes_wish|int|Number of nodes in the current wishlist|
+|node_quota|int|Current node count ceiling due to cloud quota limits|
+|config_max_nodes|int|Configured max node count|
+
+h3. Example
+
+<pre>
+{
+  "actor_exceptions": 0,
+  "idle_times": {
+    "compute1": 0,
+    "compute3": 0,
+    "compute2": 0,
+    "compute4": 0
+  },
+  "create_node_errors": 0,
+  "destroy_node_errors": 0,
+  "nodes_idle": 0,
+  "config_max_nodes": 8,
+  "list_nodes_errors": 0,
+  "node_quota": 8,
+  "Version": "1.1.4.20180719160944",
+  "nodes_wish": 0,
+  "nodes_unpaired": 0,
+  "nodes_busy": 4,
+  "boot_failures": 0
+}
+</pre>
diff --git a/doc/admin/migrating-providers.html.textile.liquid b/doc/admin/migrating-providers.html.textile.liquid
new file mode 100644 (file)
index 0000000..9231dc2
--- /dev/null
@@ -0,0 +1,41 @@
+---
+layout: default
+navsection: admin
+title: "Migrating account providers"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to enable users to use more than one provider to log into the same Arvados account.  This can be used to migrate account providers, for example, from LDAP to Google.  In order to do this, users must be able to log into both the "old" and "new" providers.
+
+h2. Configure multiple providers in SSO
+
+In @application.yml@ for the SSO server, enable both @google_oauth2@ and @ldap@ providers:
+
+<pre>
+production:
+  google_oauth2_client_id: abcd
+  google_oauth2_client_secret: abcd
+
+  use_ldap:
+    title: Example LDAP
+    host: ldap.example.com
+    port: 636
+    method: ssl
+    base: "ou=Users, dc=example, dc=com"
+    uid: uid
+    username: uid
+</pre>
+
+Restart the SSO server after changing the configuration.
+
+h2. Link accounts
+
+Instruct users to go through the process of "linking accounts":{{site.baseurl}}/user/topics/link-accounts.html
+
+After linking accounts, users can use the new provider to access their existing Arvados account.
+
+Once all users have migrated, the old account provider can be removed from the SSO configuration.
diff --git a/doc/admin/spot-instances.html.textile.liquid b/doc/admin/spot-instances.html.textile.liquid
new file mode 100644 (file)
index 0000000..1c61b60
--- /dev/null
@@ -0,0 +1,78 @@
+---
+layout: default
+navsection: admin
+title: Using AWS Spot instances
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to set up the system to take advantage of "Amazon's EC2 spot instances":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html.
+
+h3. Nodemanager
+
+Nodemanager should have configured cloud sizes that include the @preemptible@ boolean parameter. For example, for every on-demand cloud node size, you could create a @.spot@ variant, like this:
+
+<pre>
+[Size m4.large]
+cores = 2
+scratch = 32000
+
+[Size m4.large.spot]
+cores = 2
+instance_type = m4.large
+preemptible = true
+scratch = 32000
+</pre>
+
+h3. Slurm dispatcher
+
+The @crunch-dispatch-slurm@ service needs a matching instance type configuration on @/etc/arvados/config.yml@, following the previous example:
+
+<pre>
+Clusters:
+  uuid_prefix:
+    InstanceTypes:
+    - Name: m4.large
+      VCPUs: 2
+      RAM: 7782000000
+      Scratch: 32000000000
+      Price: 0.1
+    - Name: m4.large.spot
+      Preemptible: true
+      VCPUs: 2
+      RAM: 7782000000
+      Scratch: 32000000000
+      Price: 0.1
+</pre>
+
+@InstanceType@ names should match those defined on nodemanager's config file because it's @crunch-dispatch-slurm@'s job to select the instance type and communicate the decision to @nodemanager@ via Slurm.
+
+h3. API Server
+
+Container requests will need the @preemptible@ scheduling parameter included, to make the dispatcher request a spot instance. The API Server configuration file includes an option that when active, will auto assign the @preemptible@ parameter to any new child container request if it doesn't have it already. To activate this feature, the following should be added to the @application.yml@ file:
+
+<pre>
+preemptible_instances: true
+</pre>
+
+With this configuration active, child container requests should include the @preemptible = false@ parameter at creation time to avoid being scheduled for spot instance usage.
+
+h3. AWS Permissions
+
+When requesting spot instances, Amazon's API may return an authorization error depending on how users and permissions are set on the account. If this is the case check nodemanager's log for:
+
+<pre>
+BaseHTTPError: AuthFailure.ServiceLinkedRoleCreationNotPermitted: The provided credentials do not have permission to create the service-linked role for EC2 Spot Instances.
+</pre>
+
+The account needs to have a service linked role created. This can be done by logging into the AWS account, go to _IAM Management_ &rarr; _Roles_ and create the @AWSServiceRoleForEC2Spot@ role by clicking on the @Create@ button, selecting @EC2@ service and @EC2 - Spot Instances@ use case.
+
+h3. Cost Tracking
+
+Amazon's Spot instances prices are declared at instance request time and defined by the maximum price that the user is willing to pay per hour. By default, this price is the same amount as the on-demand version of each instance type, and this setting is the one that nodemanager uses for now, as it doesn't include any pricing data to the spot instance request.
+
+The real price that a spot instance has at any point in time is discovered at the end of each usage hour, depending on instance demand. For this reason, AWS provides a data feed subscription to get hourly logs, as described on "Amazon's User Guide":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html.
\ No newline at end of file
diff --git a/doc/admin/storage-classes.html.textile.liquid b/doc/admin/storage-classes.html.textile.liquid
new file mode 100644 (file)
index 0000000..1a6420d
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: admin
+title: Configuring storage classes
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Storage classes (alternately known as "storage tiers") allow you to control which volumes should be used to store particular collection data blocks.  This can be used to implement data storage policies such as moving data to archival storage.
+
+The storage classes for each volume are set in the per-volume "keepstore configuration":{{site.baseurl}}/install/install-keepstore.html
+
+<pre>
+Volumes:
+ - ... Volume configuration ...
+   #
+   # If no storage classes are specified, will use [default]
+   #
+   StorageClasses: null
+
+ - ... Volume configuration ...
+   #
+   # Specify this volume is in the "archival" storage class.
+   #
+   StorageClasses: [archival]
+
+</pre>
+
+Names of storage classes are internal to the cluster and decided by the administrator.  Aside from "default", Arvados currently does not define any standard storage class names.
+
+h3. Using storage classes
+
+"Discussed in the user guide":{{site.baseurl}}/user/topics/storage-classes.html
+
+h3. Storage management notes
+
+The "keep-balance":{{site.baseurl}}/install/install-keep-balance.html service is responsible for deciding which blocks should be placed on which keepstore volumes.  As part of the rebalancing behavior, it will determine where a block should go in order to satisfy the desired storage classes, and issue pull requests to copy the block from its original volume to the desired volume.  The block will subsequently be moved to trash on the original volume.
+
+If a block appears in multiple collections with different storage classes, the block will be stored in separate volumes for each storage class, even if that results in overreplication, unless there is a volume which has all the desired storage classes.
+
+If a collection has a desired storage class which is not available in any keepstore volume, the collection's blocks will remain in place, and an error will appear in the @keep-balance@ logs.
+
+This feature does not provide a hard guarantee on where data will be stored.  Data may be written to default storage and moved to the desired storage class later.  If controlling data locality is a hard requirement (such as legal restrictions on the location of data) we recommend setting up multiple Arvados clusters.
diff --git a/doc/admin/upgrade-crunch2.html.textile.liquid b/doc/admin/upgrade-crunch2.html.textile.liquid
new file mode 100644 (file)
index 0000000..1946358
--- /dev/null
@@ -0,0 +1,53 @@
+---
+layout: default
+navsection: admin
+title: Upgrading to Containers API
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The "containers" API is the recommended way to submit compute work to Arvados.  It supersedes the "jobs" API, which is deprecated.
+
+h2. Benefits over the "jobs" API
+
+* Simpler and more robust execution with fewer points of failure
+* Automatic retry for containers that fail to run to completion due to infrastructure errors
+* Scales to thousands of simultaneous containers
+* Able to support alternate schedulers/dispatchers in addition to slurm
+* Improved logging, different streams logs/metrics stored in different files in the log collection
+* Records more upfront detail about the compute node, and additional metrics (such as available disk space over the course of the container run)
+* Better behavior when deciding whether to reuse past work -- pick the oldest container that matches the criteria
+* Can reuse running containers between workflows, cancelling a workflow will not cancel containers that are shared with other workflows
+* Supports setting time-to-live on intermediate output collections for automatic cleanup
+* Supports "secret" inputs, suitable for passwords or access tokens, which are hidden from the API responses and logs, and forgotten after use
+* Does not require "git" for dispatching work
+
+h2. Differences from the "jobs" API
+
+Containers cannot reuse jobs (but can reuse other containers)
+
+Uses the service "crunch-dispatch-slurm":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html instead of @crunch-dispatch.rb@
+
+Non-CWL Arvados "pipeline templates" are not supported with containers.  Pipeline templates should be rewritten in CWL and registered as "Workflows".
+
+The containers APIs is incompatible with the jobs API, code which integrates with the "jobs" API must be updated to work with containers
+
+Containers have network access disabled by default
+
+The keep mount only exposes collections which are explicitly listed as inputs
+
+h2. Migrating to "containers" API
+
+Run your workflows using @arvados-cwl-runner --api=containers@ (only necessary if both the jobs and containers APIs are enabled, if the jobs API is disabled, it will use the containers API automatically)
+
+Register your workflows so they can be run from workbench using @arvados-cwl-runner --api=containers --create-workflow@
+
+Read "Migrating running CWL on jobs API to containers API":{{site.baseurl}}/user/cwl/cwl-style.html#migrate
+
+Use @arv:APIRequirement: {}@ in the @requirements@ section of your CWL file to enable network access for the container (see "Arvados CWL Extensions":{{site.baseurl}}/user/cwl/cwl-extensions.html)
+
+For examples on how to manage container requests with the Python SDK, see "Python cookbook":{{site.baseurl}}/sdk/python/cookbook.html
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
new file mode 100644 (file)
index 0000000..6e2e6cb
--- /dev/null
@@ -0,0 +1,456 @@
+---
+layout: default
+navsection: admin
+title: "Upgrading Arvados and Release notes"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+What you need to know and do in order to upgrade your Arvados installation.
+
+h2. General process
+
+# Wait for the cluster to be idle and stop Arvados services.
+# Install new packages using @apt-get upgrade@ or @yum upgrade@.
+# Package installation scripts will perform any necessary data migrations.
+# Consult upgrade notes below to see if any manual configuration updates are necessary.
+# Restart Arvados services.
+
+h2. Upgrade notes
+
+Some versions introduce changes that require special attention when upgrading: e.g., there is a new service to install, or there is a change to the default configuration that you might need to override in order to preserve the old behavior.
+
+{% comment %}
+Note to developers: Add new items at the top. Include the date, issue number, commit, and considerations/instructions for those about to upgrade.
+
+TODO: extract this information based on git commit messages and generate changelogs / release notes automatically.
+{% endcomment %}
+
+h3. current master branch
+
+h4. Stricter collection manifest validation on the API server
+
+As a consequence of "#14482":https://dev.arvados.org/issues/14482, the Ruby SDK does a more rigorous collection manifest validation. Collections created after 2015-05 are unlikely to be invalid, however you may check for invalid manifests using the script below.
+
+You could set up a new rvm gemset and install the specific arvados gem for testing, like so:
+
+<notextile>
+<pre><code>~$ <span class="userinput">rvm gemset create rubysdk-test</span>
+~$ <span class="userinput">rvm gemset use rubysdk-test</span>
+~$ <span class="userinput">gem install arvados -v 1.3.1.20190301212059</span>
+</code></pre>
+</notextile>
+
+Next, you can run the following script using admin credentials, it will scan the whole collection database and report any collection that didn't pass the check:
+
+{% codeblock as ruby %}
+require 'arvados'
+require 'arvados/keep'
+
+api = Arvados.new
+offset = 0
+batch_size = 100
+invalid = []
+
+while true
+    begin
+        req = api.collection.index(
+            :select => [:uuid, :created_at, :manifest_text],
+            :include_trash => true, :include_old_versions => true,
+            :limit => batch_size, :offset => offset)
+    rescue
+        invalid.each {|c| puts "#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}" }
+        raise
+    end
+
+    req[:items].each do |col|
+        begin
+            Keep::Manifest.validate! col[:manifest_text]
+        rescue Exception => e
+            puts "Collection #{col[:uuid]} manifest not valid"
+            invalid << {uuid: col[:uuid], error: e, created_at: col[:created_at]}
+        end
+    end
+    puts "Checked #{offset} / #{req[:items_available]} - Invalid: #{invalid.size}"
+    offset += req[:limit]
+    break if offset > req[:items_available]
+end
+
+if invalid.empty?
+    puts "No invalid collection manifests found"
+else
+    invalid.each {|c| puts "#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}" }
+end
+{% endcodeblock %}
+
+The script will return a final report enumerating any invalid collection by UUID, with its creation date and error message so you can take the proper correction measures, if needed.
+
+h4. Python packaging change
+
+As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed.
+
+One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
+
+Python scripts that rely on the distribution Arvados Python SDK packages to import the Arvados SDK will need to be tweaked to load the correct Python environment.
+
+This can be done by activating the virtualenv outside of the script:
+
+<notextile>
+<pre>~$ <code class="userinput">source /usr/share/python2.7/dist/python-arvados-python-client/bin/activate</code>
+(python-arvados-python-client) ~$ <code class="userinput">path-to-the-python-script</code>
+</pre>
+</notextile>
+
+Or alternatively, by updating the shebang line at the start of the script to:
+
+<notextile>
+<pre>
+#!/usr/share/python2.7/dist/python-arvados-python-client/bin/python
+</pre>
+</notextile>
+
+h4. python-arvados-cwl-runner deb/rpm package now conflicts with python-cwltool deb/rpm package
+
+As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. The python-arvados-cwl-runner package now includes a version of cwltool. If present, the python-cwltool and cwltool distribution packages will need to be uninstalled before the python-arvados-cwl-runner deb or rpm package can be installed.
+
+h4. Centos7 Python 3 dependency upgraded to rh-python35
+
+As part of story "#9945":https://dev.arvados.org/issues/9945, the Python 3 dependency for Centos7 Arvados packages was upgraded from SCL python33 to rh-python35.
+
+h4. Centos7 package for libpam-arvados depends on the python-pam package, which is available from EPEL
+
+As part of story "#9945":https://dev.arvados.org/issues/9945, it was discovered that the Centos7 package for libpam-arvados was missing a dependency on the python-pam package, which is available from the EPEL repository. The dependency has been added to the libpam-arvados package. This means that going forward, the EPEL repository will need to be enabled to install libpam-arvados on Centos7.
+
+h3. v1.3.0 (2018-12-05)
+
+This release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections.
+
+The @arvados-controller@ component now requires the /etc/arvados/config.yml file to be present. See <a href="{{ site.baseurl }}/install/install-controller.html#configuration">the @arvados-controller@ installation instructions</a>.
+
+Support for the deprecated "jobs" API is broken in this release.  Users who rely on it should not upgrade.  This will be fixed in an upcoming 1.3.1 patch release, however users are "encouraged to migrate":upgrade-crunch2.html as support for the "jobs" API will be dropped in an upcoming release.  Users who are already using the "containers" API are not affected.
+
+h3. v1.2.1 (2018-11-26)
+
+There are no special upgrade notes for this release.
+
+h3. v1.2.0 (2018-09-05)
+
+h4. Regenerate Postgres table statistics
+
+It is recommended to regenerate the table statistics for Postgres after upgrading to v1.2.0. If autovacuum is enabled on your installation, this script would do the trick:
+
+<pre>
+#!/bin/bash
+
+set -e
+set -u
+
+tables=`echo "\dt" | psql arvados_production | grep public|awk -e '{print $3}'`
+
+for t in $tables; do
+    echo "echo 'analyze $t' | psql arvados_production"
+    time echo "analyze $t" | psql arvados_production
+done
+</pre>
+
+If you also need to do the vacuum, you could adapt the script to run 'vacuum analyze' instead of 'analyze'.
+
+h4. New component: arvados-controller
+
+Commit "db5107dca":https://dev.arvados.org/projects/arvados/repository/revisions/db5107dca adds a new system service, arvados-controller. More detail is available in story "#13496":https://dev.arvados.org/issues/13497.
+
+To add the Arvados Controller to your system please refer to the "installation instructions":../install/install-controller.html after upgrading your system to 1.2.0.
+
+Verify your setup by confirming that API calls appear in the controller's logs (_e.g._, @journalctl -fu arvados-controller@) while loading a workbench page.
+
+h3. v1.1.4 (2018-04-10)
+
+h4. arvados-cwl-runner regressions (2018-04-05)
+
+<strong>Secondary files missing from toplevel workflow inputs</strong>
+
+This only affects workflows that rely on implicit discovery of secondaryFiles.
+
+If a workflow input does not declare @secondaryFiles@ corresponding to the @secondaryFiles@ of workflow steps which use the input, the workflow would inconsistently succeed or fail depending on whether the input values were specified as local files or referenced an existing collection (and whether the existing collection contained the secondary files or not).  To ensure consistent behavior, the workflow is now required to declare in the top level workflow inputs any secondaryFiles that are expected by workflow steps.
+
+As an example, the following workflow will fail because the @toplevel_input@ does not declare the @secondaryFiles@ that are expected by @step_input@:
+
+<pre>
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  toplevel_input: File
+outputs: []
+steps:
+  step1:
+    in:
+      step_input: toplevel_input
+    out: []
+    run:
+      id: sub
+      class: CommandLineTool
+      inputs:
+        step_input:
+          type: File
+          secondaryFiles:
+            - .idx
+      outputs: []
+      baseCommand: echo
+</pre>
+
+When run, this produces an error like this:
+
+<pre>
+cwltool ERROR: [step step1] Cannot make job: Missing required secondary file 'hello.txt.idx' from file object: {
+    "basename": "hello.txt",
+    "class": "File",
+    "location": "keep:ade9d0e032044bd7f58daaecc0d06bc6+51/hello.txt",
+    "size": 0,
+    "nameroot": "hello",
+    "nameext": ".txt",
+    "secondaryFiles": []
+}
+</pre>
+
+To fix this error, add the appropriate @secondaryFiles@ section to @toplevel_input@
+
+<notextile>
+<pre><code>class: Workflow
+cwlVersion: v1.0
+inputs:
+  <span class="userinput">toplevel_input:
+    type: File
+    secondaryFiles:
+      - .idx</span>
+outputs: []
+steps:
+  step1:
+    in:
+      step_input: toplevel_input
+    out: []
+    run:
+      id: sub
+      class: CommandLineTool
+      inputs:
+        step_input:
+          type: File
+          secondaryFiles:
+            - .idx
+      outputs: []
+      baseCommand: echo
+</code></pre>
+</notextile>
+
+This bug has been fixed in Arvados release v1.2.0.
+
+<strong>Secondary files on default file inputs</strong>
+
+@File@ inputs that have default values and also expect @secondaryFiles@ and will fail to upload default @secondaryFiles@.  As an example, the following case will fail:
+
+<pre>
+class: CommandLineTool
+inputs:
+  step_input:
+    type: File
+    secondaryFiles:
+      - .idx
+    default:
+      class: File
+      location: hello.txt
+outputs: []
+baseCommand: echo
+</pre>
+
+When run, this produces an error like this:
+
+<pre>
+2018-05-03 10:58:47 cwltool ERROR: Unhandled error, try again with --debug for more information:
+  [Errno 2] File not found: u'hello.txt.idx'
+</pre>
+
+To fix this, manually upload the primary and secondary files to keep and explicitly declare @secondaryFiles@ on the default primary file:
+
+<notextile>
+<pre><code>class: CommandLineTool
+inputs:
+  step_input:
+    type: File
+    secondaryFiles:
+      - .idx
+    <span class="userinput">default:
+      class: File
+      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
+      secondaryFiles:
+       - class: File
+         location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt.idx</span>
+outputs: []
+baseCommand: echo
+</code></pre>
+</notextile>
+
+This bug has been fixed in Arvados release v1.2.0.
+
+h3. v1.1.3 (2018-02-08)
+
+There are no special upgrade notes for this release.
+
+h3. v1.1.2 (2017-12-22)
+
+h4. The minimum version for Postgres is now 9.4 (2017-12-08)
+
+As part of story "#11908":https://dev.arvados.org/issues/11908, commit "8f987a9271":https://dev.arvados.org/projects/arvados/repository/revisions/8f987a9271 introduces a dependency on Postgres 9.4. Previously, Arvados required Postgres 9.3.
+
+* Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade
+* Ubuntu 16.04 (pg 9.5) does not require an upgrade
+* Ubuntu 14.04 (pg 9.3) requires upgrade to Postgres 9.4: https://www.postgresql.org/download/linux/ubuntu/
+* CentOS 7 and RHEL7 (pg 9.2) require upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html
+*# Create a database backup using @pg_dump@
+*# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/
+*# Restore from the backup using @psql@
+
+h3. v1.1.1 (2017-11-30)
+
+There are no special upgrade notes for this release.
+
+h3. v1.1.0 (2017-10-24)
+
+h4. The minimum version for Postgres is now 9.3 (2017-09-25)
+
+As part of story "#12032":https://dev.arvados.org/issues/12032, commit "68bdf4cbb1":https://dev.arvados.org/projects/arvados/repository/revisions/68bdf4cbb1 introduces a dependency on Postgres 9.3. Previously, Arvados required Postgres 9.1.
+
+* Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade
+* Ubuntu 16.04 (pg 9.5) does not require an upgrade
+* Ubuntu 14.04 (pg 9.3) is compatible, however upgrading to Postgres 9.4 is recommended: https://www.postgresql.org/download/linux/ubuntu/
+* CentOS 7 and RHEL7 (pg 9.2) should upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html
+*# Create a database backup using @pg_dump@
+*# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/
+*# Restore from the backup using @psql@
+
+h3. Older versions
+
+h4. Upgrade slower than usual (2017-06-30)
+
+As part of story "#11807":https://dev.arvados.org/issues/11807, commit "55aafbb":https://dev.arvados.org/projects/arvados/repository/revisions/55aafbb converts old "jobs" database records from YAML to JSON, making the upgrade process slower than usual.
+
+* The migration can take some time if your database contains a substantial number of YAML-serialized rows (i.e., you installed Arvados before March 3, 2017 "660a614":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 and used the jobs/pipelines APIs). Otherwise, the upgrade will be no slower than usual.
+* The conversion runs as a database migration, i.e., during the deb/rpm package upgrade process, while your API server is unavailable.
+* Expect it to take about 1 minute per 20K jobs that have ever been created/run.
+
+h4. Service discovery overhead change in keep-web (2017-06-05)
+
+As part of story "#9005":https://dev.arvados.org/issues/9005, commit "cb230b0":https://dev.arvados.org/projects/arvados/repository/revisions/cb230b0 reduces service discovery overhead in keep-web requests.
+
+* When upgrading keep-web _or keepproxy_ to/past this version, make sure to update API server as well. Otherwise, a bad token in a request can cause keep-web to fail future requests until either keep-web restarts or API server gets upgraded.
+
+h4. Node manager now has an http endpoint for management (2017-04-12)
+
+As part of story "#11349":https://dev.arvados.org/issues/11349, commit "2c094e2":https://dev.arvados.org/projects/arvados/repository/revisions/2c094e2 adds a "management" http server to nodemanager.
+
+* To enable it, add to your configuration file: <pre>[Manage]
+  address = 127.0.0.1
+  port = 8989</pre> (see example configuration files in source:services/nodemanager/doc or https://doc.arvados.org/install/install-nodemanager.html for more info)
+* The server responds to @http://{address}:{port}/status.json@ with a summary of how many nodes are in each state (booting, busy, shutdown, etc.)
+
+h4. New websockets component (2017-03-23)
+
+As part of story "#10766":https://dev.arvados.org/issues/10766, commit "e8cc0d7":https://dev.arvados.org/projects/arvados/repository/revisions/e8cc0d7 replaces puma with arvados-ws as the recommended websocket server.
+* See http://doc.arvados.org/install/install-ws.html for install/upgrade instructions.
+* Remove the old puma server after the upgrade is complete. Example, with runit: <pre>
+$ sudo sv down /etc/sv/puma
+$ sudo rm -r /etc/sv/puma
+</pre> Example, with systemd: <pre>
+$ systemctl disable puma
+$ systemctl stop puma
+</pre>
+
+h4. Change of database encoding for hashes and arrays (2017-03-06)
+
+As part of story "#11168":https://dev.arvados.org/issues/11168, commit "660a614":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 uses JSON instead of YAML to encode hashes and arrays in the database.
+
+* Aside from a slight performance improvement, this should have no externally visible effect.
+* Downgrading past this version is not supported, and is likely to cause errors. If this happens, the solution is to upgrade past this version.
+* After upgrading, make sure to restart puma and crunch-dispatch-* processes.
+
+h4. Docker image format compatibility check (2017-02-03)
+
+As part of story "#10969":https://dev.arvados.org/issues/10969, commit "74a9dec":https://dev.arvados.org/projects/arvados/repository/revisions/74a9dec introduces a Docker image format compatibility check: the @arv keep docker@ command prevents users from inadvertently saving docker images that compute nodes won't be able to run.
+* If your compute nodes run a version of *docker older than 1.10* you must override the default by adding to your API server configuration (@/etc/arvados/api/application.yml@): <pre><code class="yaml">docker_image_formats: ["v1"]</code></pre>
+* Refer to the comments above @docker_image_formats@ in @/var/www/arvados-api/current/config/application.default.yml@ or source:services/api/config/application.default.yml or issue "#10969":https://dev.arvados.org/issues/10969 for more detail.
+* *NOTE:* This does *not* include any support for migrating existing Docker images from v1 to v2 format. This will come later: for now, sites running Docker 1.9 or earlier should still *avoid upgrading Docker further than 1.9.*
+
+h4. Debian and RPM packages now have systemd unit files (2016-09-27)
+
+Several Debian and RPM packages -- keep-balance ("d9eec0b":https://dev.arvados.org/projects/arvados/repository/revisions/d9eec0b), keep-web ("3399e63":https://dev.arvados.org/projects/arvados/repository/revisions/3399e63), keepproxy ("6de67b6":https://dev.arvados.org/projects/arvados/repository/revisions/6de67b6), and arvados-git-httpd ("9e27ddf":https://dev.arvados.org/projects/arvados/repository/revisions/9e27ddf) -- now enable their respective components using systemd. These components prefer YAML configuration files over command line flags ("3bbe1cd":https://dev.arvados.org/projects/arvados/repository/revisions/3bbe1cd).
+
+* On Debian-based systems using systemd, services are enabled automatically when packages are installed.
+* On RedHat-based systems using systemd, unit files are installed but services must be enabled explicitly: e.g., <code>"sudo systemctl enable keep-web; sudo systemctl start keep-web"</code>.
+* The new systemd-supervised services will not start up successfully until configuration files are installed in /etc/arvados/: e.g., <code>"Sep 26 18:23:55 62751f5bb946 keep-web[74]: 2016/09/26 18:23:55 open /etc/arvados/keep-web/keep-web.yml: no such file or directory"</code>
+* To migrate from runit to systemd after installing the new packages, we recommend the following procedure:
+*# Bring down the runit service: "sv down /etc/sv/keep-web"
+*# Create a JSON configuration file (e.g., /etc/arvados/keep-web/keep-web.yml -- see "keep-web -help")
+*# Ensure the service is running correctly under systemd: "systemctl status keep-web" / "journalctl -u keep-web"
+*# Remove the runit service so it doesn't start at next boot
+* Affected services:
+** keep-balance - /etc/arvados/keep-balance/keep-balance.yml
+** keep-web - /etc/arvados/keep-web/keep-web.yml
+** keepproxy - /etc/arvados/keepproxy/keepproxy.yml
+** arvados-git-httpd - /etc/arvados/arv-git-httpd/arv-git-httpd.yml
+
+h4. Installation paths for Python modules and script changed (2016-05-31)
+
+Commits "ae72b172c8":https://dev.arvados.org/projects/arvados/repository/revisions/ae72b172c8 and "3aae316c25":https://dev.arvados.org/projects/arvados/repository/revisions/3aae316c25 change the filesystem location where Python modules and scripts are installed.
+
+* Previous packages installed these files to the distribution's preferred path under @/usr/local@ (or the equivalent location in a Software Collection).  Now they get installed to a path under @/usr@.  This improves compatibility with other Python packages provided by the distribution.  See "#9242":https://dev.arvados.org/issues/9242 for more background.
+* If you simply import Python modules from scripts, or call Python tools relying on $PATH, you don't need to make any changes.  If you have hardcoded full paths to some of these files (e.g., in symbolic links or configuration files), you will need to update those paths after this upgrade.
+
+h4. Crunchrunner package is required on compute and shell nodes (2016-04-25)
+
+Commit "eebcb5e":https://dev.arvados.org/projects/arvados/repository/revisions/eebcb5e requires the crunchrunner package to be installed on compute nodes and shell nodes in order to run CWL workflows.
+
+* On each Debian-based compute node and shell node, run: @sudo apt-get install crunchrunner@
+* On each Red Hat-based compute node and shell node, run: @sudo yum install crunchrunner@
+
+h4. Keep permission signature algorithm change (2016-04-21)
+
+Commit "3c88abd":https://dev.arvados.org/projects/arvados/repository/revisions/3c88abd changes the Keep permission signature algorithm.
+
+* All software components that generate signatures must be upgraded together. These are: keepstore, API server, keep-block-check, and keep-rsync. For example, if keepstore < 0.1.20160421183420 but API server >= 0.1.20160421183420, clients will not be able to read or write data in Keep.
+* Jobs and client operations that are in progress during the upgrade (including arv-put's "resume cache") will fail.
+
+h4. Workbench's "Getting Started" popup disabled by default (2015-01-05)
+
+Commit "e1276d6e":https://dev.arvados.org/projects/arvados/repository/revisions/e1276d6e disables Workbench's "Getting Started" popup by default.
+
+* If you want new users to continue seeing this popup, set @enable_getting_started_popup: true@ in Workbench's @application.yml@ configuration.
+
+h4. Crunch jobs now have access to Keep-backed writable scratch storage (2015-12-03)
+
+Commit "5590c9ac":https://dev.arvados.org/projects/arvados/repository/revisions/5590c9ac makes a Keep-backed writable scratch directory available in crunch jobs (see "#7751":https://dev.arvados.org/issues/7751)
+
+* All compute nodes must be upgraded to arvados-fuse >= 0.1.2015112518060 because crunch-job uses some new arv-mount flags (--mount-tmp, --mount-by-pdh) introduced in merge "346a558":https://dev.arvados.org/projects/arvados/repository/revisions/346a558
+* Jobs will fail if the API server (in particular crunch-job from the arvados-cli gem) is upgraded without upgrading arvados-fuse on compute nodes.
+
+h4. Recommended configuration change for keep-web (2015-11-11)
+
+Commit "1e2ace5":https://dev.arvados.org/projects/arvados/repository/revisions/1e2ace5 changes recommended config for keep-web (see "#5824":https://dev.arvados.org/issues/5824)
+
+* proxy/dns/ssl config should be updated to route "https://download.uuid_prefix.arvadosapi.com/" requests to keep-web (alongside the existing "collections" routing)
+* keep-web command line adds @-attachment-only-host download.uuid_prefix.arvadosapi.com@
+* Workbench config adds @keep_web_download_url@
+* More info on the (still beta/non-TOC-linked) "keep-web doc page":http://doc.arvados.org/install/install-keep-web.html
+
+h4. Stopped containers are now automatically removed on compute nodes (2015-11-04)
+
+Commit "1d1c6de":https://dev.arvados.org/projects/arvados/repository/revisions/1d1c6de removes stopped containers (see "#7444":https://dev.arvados.org/issues/7444)
+
+* arvados-docker-cleaner removes _all_ docker containers as soon as they exit, effectively making @docker run@ default to @--rm@. If you run arvados-docker-cleaner on a host that does anything other than run crunch-jobs, and you still want to be able to use @docker start@, read the "new doc page":http://doc.arvados.org/install/install-compute-node.html to learn how to turn this off before upgrading.
+
+h4. New keep-web service (2015-11-04)
+
+Commit "21006cf":https://dev.arvados.org/projects/arvados/repository/revisions/21006cf adds a new keep-web service (see "#5824":https://dev.arvados.org/issues/5824).
+
+* Nothing relies on keep-web yet, but early adopters can install it now by following http://doc.arvados.org/install/install-keep-web.html (it is not yet linked in the TOC).
diff --git a/doc/api/crunch-scripts.html.textile.liquid b/doc/api/crunch-scripts.html.textile.liquid
new file mode 100644 (file)
index 0000000..3df1db4
--- /dev/null
@@ -0,0 +1,50 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: Crunch scripts
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Crunch scripts
+
+A crunch script is responsible for completing a single JobTask. In doing so, it will:
+
+* (optionally) read some input from Keep
+* (optionally) store some output in Keep
+* (optionally) create some new JobTasks and add them to the current Job
+* (optionally) update the current JobTask record with the "output" attribute set to a Keep locator or a fragment of a manifest
+* update the current JobTask record with the "success" attribute set to True
+
+A task's context is provided in environment variables.
+
+table(table table-bordered table-condensed).
+|Environment variable|Description|
+|@JOB_UUID@|UUID of the current "Job":methods/jobs.html|
+|@TASK_UUID@|UUID of the current "JobTask":methods/job_tasks.html|
+|@ARVADOS_API_HOST@|Hostname and port number of API server|
+|@ARVADOS_API_TOKEN@|Authentication token to use with API calls made by the current task|
+
+The crunch script typically uses the Python SDK (or another suitable client library / SDK) to connect to the Arvados service and retrieve the rest of the details about the current job and task.
+
+The Python SDK has some shortcuts for common operations.
+
+In general, a crunch script can access information about the current job and task like this:
+
+<pre>
+import arvados
+import os
+
+job = arvados.api().jobs().get(uuid=os.environ['JOB_UUID']).execute()
+$sys.stderr.write("script_parameters['foo'] == %s"
+                  % job['script_parameters']['foo'])
+
+task = arvados.api().job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
+$sys.stderr.write("current task sequence number is %d"
+                  % task['sequence'])
+</pre>
diff --git a/doc/api/execution.html.textile.liquid b/doc/api/execution.html.textile.liquid
new file mode 100644 (file)
index 0000000..cada9ab
--- /dev/null
@@ -0,0 +1,58 @@
+---
+layout: default
+navsection: architecture
+title: Computing with Crunch
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Crunch is the name for the Arvados system for managing computation.  It provides an abstract API to various clouds and HPC resource allocation and scheduling systems, and integrates closely with Keep storage and the Arvados permission system.
+
+h2. Container API
+
+# To submit work, create a "container request":{{site.baseurl}}/api/methods/container_requests.html in the @Committed@ state.
+# The system will fufill the container request by creating or reusing a "Container object":{{site.baseurl}}/api/methods/containers.html and assigning it to the @container_uuid@ field.  If the same request has been submitted in the past, it may reuse an existing container.  The reuse behavior can be suppressed with @use_existing: false@ in the container request.
+# The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as SLURM).
+# The container executes.  Upon termination the container goes into the  @Complete@ state.  If the container execution was interrupted or lost due to system failure, it will go into the @Cancelled@ state.
+# When the container associated with the container request is completed, the container request will go into the @Final@ state.
+# The @output_uuid@ field of the container request contains the uuid of output collection produced by container request.
+
+!(full-width){{site.baseurl}}/images/Crunch_dispatch.svg!
+
+h2(#RAM). Understanding RAM requests for containers
+
+The @runtime_constraints@ section of a container specifies working RAM (@ram@) and Keep cache (@keep_cache_ram@).  If not specified, containers get a default Keep cache (@container_default_keep_cache_ram@, default 256 MiB).  The total RAM requested for a container is the sum of working RAM, Keep cache, and an additional RAM reservation configured by the admin (@ReserveExtraRAM@ in the dispatcher configuration, default zero).
+
+The total RAM request is used to schedule containers onto compute nodes.  RAM allocation limits are enforced using kernel controls such as cgroups.  A container which requests 1 GiB RAM will only be permitted to allocate up to 1 GiB of RAM, even if scheduled on a 4 GiB node.  On HPC systems, a multi-core node may run multiple containers at a time.
+
+When running on the cloud, the memory request (along with CPU and disk) is used to select (and possibly boot) an instance type with adequate resources to run the container.  Instance type RAM is derated 5% from the published specification to accomodate virtual machine, kernel and system services overhead.
+
+h3. Calculate minimum instance type RAM for a container
+
+    (RAM request + Keep cache + ReserveExtraRAM) * (100/95)
+
+For example, for a 3 GiB request, default Keep cache, and no extra RAM reserved:
+
+    (3072 + 256) * 1.0526 = 3494 MiB
+
+To run this container, the instance type must have a published RAM size of at least 3494 MiB.
+
+h3. Calculate the maximum requestable RAM for an instance type
+
+    (Instance type RAM * (95/100)) - Keep cache - ReserveExtraRAM
+
+For example, for a 3.75 GiB node, default Keep cache, and no extra RAM reserved:
+
+    (3840 * 0.95) - 256 = 3392 MiB
+
+To run on this instance type, the container can request at most 3392 MiB of working RAM.
+
+h2. Job API (deprecated)
+
+# To submit work, create a "job":{{site.baseurl}}/api/methods/jobs.html .  If the same job has been submitted in the past, it will return an existing job in @Completed@ state.
+# The dispatcher process will notice a new job in @Queued@ state and attempt to allocate nodes to run the job.
+# The job executes.
+# Retrieve the @output@ field with the portable data hash of the collection with the output files of the job.
diff --git a/doc/api/index.html.textile.liquid b/doc/api/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..2cc5871
--- /dev/null
@@ -0,0 +1,21 @@
+---
+layout: default
+navsection: api
+title: API Reference
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This reference describes the semantics of Arvados resources and how to programatically access Arvados via its REST API.  Each resource listed in this section is exposed on the Arvados API server under the @/arvados/v1/@ path prefix, for example, @https://{{ site.arvados_api_host }}/arvados/v1/collections@.
+
+h2. Discovery document
+
+The API server publishes a machine-readable description of its endpoints and some additional site configuration values via a JSON-formatted discovery document.  This is available at @/discovery/v1/apis/arvados/v1/rest@, for example @https://{{ site.arvados_api_host }}/discovery/v1/apis/arvados/v1/rest@.  Some Arvados SDKs use the discovery document to generate language bindings.
+
+h2. Workbench examples
+
+Many Arvados Workbench pages, under the the *Advanced* tab, provide examples of API and SDK use for accessing the current resource .
diff --git a/doc/api/methods.html.textile.liquid b/doc/api/methods.html.textile.liquid
new file mode 100644 (file)
index 0000000..4f97ba4
--- /dev/null
@@ -0,0 +1,159 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: Common resource methods
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The following methods are available for most resources.  Some resources may limit who can perform certain operations.  Consult documentation for individual resource types for details.
+
+The methods are relative to the base URI, e.g., @/arvados/v1/resource_type@.  For arguments specifying a *Location* of @path@, the value of the argument is incorporated into the path portion of the URI.  For example, a @uuid@ of @aaaaa-bbbbb-ccccccccccccccc@ in a path position yields a URI of @/arvados/v1/resource_type/aaaaa-bbbbb-ccccccccccccccc@.
+
+Arguments specifying a *Location* of "query" are incorporated into the query portion of the URI or request body.  For example, @/arvados/v1/resource_type?count=none@.
+
+Certain method calls on certain object types support "federation":{{site.baseurl}}/architecture/federation.html , that is, the ability to operate on objects owned by different clusters.   API pages for specific object types list which federated operations are supported for that type (if any) in the "Methods" section.  Methods which implicitly include a cluster id (such as @GET@ on a specific uuid, using the uuid prefix) will be directed to the appropriate cluster.  Methods that don't implicitly include the cluster id (such as @create@) use the @cluster_id@ query parameter to specify which cluster to direct the request.
+
+h2. create
+
+The @create@ method creates a new object of the specified type.  Note that:
+
+* Only the listed attributes (and "standard metadata":resources.html) are set
+* Unset attributes will get default values
+* The attributes of a given resource type are fixed (you cannot introduce new toplevel attributes)
+
+This method corresponds to the HTTP request @POST /arvados/v1/resource_type@.  A successful create call returns a copy of the new object.
+
+To create an object on a remote cluster (federated create), provide the @cluster_id@ of the target cluster.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |
+|{resource_type}|object|Name is the singular form of the resource type, e.g., for the "collections" resource, this argument is "collection"|body|
+|{cluster_id}|string|Optional, the cluster on which to create the object if not the current cluster.|query|
+
+h2. delete
+
+The @delete@ method deletes an object of the specified type.  It corresponds to the HTTP request @DELETE /arvados/v1/resource_type/uuid@.  A successful delete call returns a copy of the deleted object.
+
+The cluster id portion of the @uuid@ is used to determine which cluster owns the object, a federated delete request will be routed to that cluster.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |
+{background:#ccffcc}.|uuid|string|The UUID of the object in question.|path|
+
+h2. get
+
+The @get@ method gets a single object with the specified @uuid@.  It corresponds to the HTTP request @GET /arvados/v1/resource_type/uuid@.
+
+The cluster id portion of the @uuid@ is used to determine which cluster owns the object, a federated get request will be routed to that cluster.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |
+{background:#ccffcc}.|uuid|string|The UUID of the object in question.|path|
+
+h2(#index). list
+
+The @list@ method requests an list of resources of that type.  It corresponds to the HTTP request @GET /arvados/v1/resource_type@.  All resources support "list" method unless otherwise noted.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |
+|limit   |integer|Maximum number of resources to return.  If not provided, server will provide a default limit.  Server may also impose a maximum number of records that can be returned in a single request.|query|
+|offset  |integer|Skip the first 'offset' number of resources that would be returned under the given filter conditions.|query|
+|filters |array  |"Conditions for selecting resources to return.":#filters|query|
+|order   |array  |Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order.
+Example: @["head_uuid asc","modified_at desc"]@
+Default: @["created_at desc"]@|query|
+|select  |array  |Set of attributes to include in the response.
+Example: @["head_uuid","tail_uuid"]@
+Default: all available attributes.  As a special case, collections do not return "manifest_text" unless explicitly selected.|query|
+|distinct|boolean|@true@: (default) do not return duplicate objects
+@false@: permitted to return duplicates|query|
+|count|string|@"exact"@ (default): Include an @items_available@ response field giving the number of distinct matching items that can be retrieved (irrespective of @limit@ and @offset@ arguments).
+@"none"@: Omit the @items_available@ response field. This option will produce a faster response.|query|
+
+h3(#filters). Available list method filters
+
+The value of the @filters@ parameter is an array of conditions. The @list@ method returns only the resources that satisfy all of the given conditions. In other words, the conjunction @AND@ is implicit.
+
+Each condition is expressed as an array with three elements: @[attribute, operator, operand]@.
+
+table(table table-bordered table-condensed).
+|_. Index|_. Element|_. Type|_. Description|_. Examples|
+|0|attribute|string|Name of the attribute to compare (or "any" to return resources with any matching attribute)|@script_version@, @head_uuid@, @any@|
+|1|operator|string|Comparison operator|@>@, @>=@, @like@, @not in@|
+|2|operand|string, array, or null|Value to compare with the resource attribute|@"d00220fb%"@, @"1234"@, @["foo","bar"]@, @nil@|
+
+The following operators are available.
+
+table(table table-bordered table-condensed).
+|_. Operator|_. Operand type|_. Description|_. Example|
+|@=@, @!=@|string, number, timestamp, or null|Equality comparison|@["tail_uuid","=","xyzzy-j7d0g-fffffffffffffff"]@ @["tail_uuid","!=",null]@|
+|@<@, @<=@, @>=@, @>@|string, number, or timestamp|Ordering comparison|@["script_version",">","123"]@|
+|@like@, @ilike@|string|SQL pattern match.  Single character match is @_@ and wildcard is @%@. The @ilike@ operator is case-insensitive|@["script_version","like","d00220fb%"]@|
+|@in@, @not in@|array of strings|Set membership|@["script_version","in",["master","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@|
+|@is_a@|string|Arvados object type|@["head_uuid","is_a","arvados#collection"]@|
+|@exists@|string|Test if a subproperty is present.|@["properties","exists","my_subproperty"]@|
+
+h4(#subpropertyfilters). Filtering on subproperties
+
+Some record type have an additional @properties@ attribute that allows recording and filtering on additional key-value pairs.  To filter on a subproperty, the value in the @attribute@ position has the form @properties.user_property@.  You may also use JSON-LD / RDF style URIs for property keys by enclosing them in @<...>@ for example @properties.<http://example.com/user_property>@.  Alternately you may also provide a JSON-LD "@context" field, however at this time JSON-LD contexts are not interpreted by Arvados.
+
+table(table table-bordered table-condensed).
+|_. Operator|_. Operand type|_. Description|_. Example|
+|@=@, @!=@|string, number or boolean|Equality comparison|@["properties.my_subproperty", "=", "fizzy whizy sparkle pop"]@|
+|@<@, @<=@, @>=@, @>@|string or number|Ordering comparison|@["properties.my_subproperty", "<", 3]@|
+|@like@, @ilike@|string|SQL pattern match, single character match is @_@ and wildcard is @%@, ilike is case-insensitive|@["properties.my_subproperty", "like", "d00220fb%"]@|
+|@in@, @not in@|array of strings|Set membership|@["properties.my_subproperty", "in", ["fizz", "buzz"]]@|
+|@exists@|boolean|Test if a subproperty is present or not (determined by operand).|@["properties.my_subproperty", "exists", true]@|
+
+Note that exclusion filters @!=@ and @not in@ will return records for which the property is not defined at all.  To restrict filtering to records on which the subproperty is defined, combine with an @exists@ filter.
+
+h4. Federated listing
+
+Federated listing forwards a request to multiple clusters and combines the results.  Currently only a very restricted form of the "list" method is supported.
+
+To query multiple clusters, the list request must:
+
+* Have filters only matching @[["uuid", "in", [...]]@ or @["uuid", "=", "..."]@
+* Specify @count=none@
+* If @select@ is specified, it must include @uuid@
+* Not specify @limit@, @offset@ or @order@
+* Not request more items than the maximum response size
+
+This form may be used to request a specific list of objects by uuid which are owned by multiple clusters.
+
+h3. Results of list method
+
+A successful call to list will return the following object.
+
+table(table table-bordered table-condensed).
+|_. Attribute |_. Type |_. Description |
+|kind|string|type of objects returned|
+|offset|integer|query offset in effect|
+|limit|integer|query limit in effect|
+|items|array|actual query payload, an array of resource objects|
+|items_available|integer|total items available matching query|
+
+h2. update
+
+The @update@ method updates fields on the object with the specified @uuid@.  It corresponds to the HTTP request @PUT /arvados/v1/resource_type/uuid@.  Note that only the listed attributes (and "standard metadata":resources.html) are updated, unset attributes will retain their previous values, and the attributes of a given resource type are fixed (you cannot introduce new toplevel attributes).  Also note that updates replace the value of the attribute, so if an attribute has an object value, the entire object is replaced.  A successful update call returns the updated copy of the object.
+
+The cluster id portion of the @uuid@ is used to determine which cluster owns the object, a federated update request will be routed to that cluster.
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |
+{background:#ccffcc}.|uuid|string|The UUID of the resource in question.|path||
+|{resource_type}|object||query||
diff --git a/doc/api/methods/api_client_authorizations.html.textile.liquid b/doc/api/methods/api_client_authorizations.html.textile.liquid
new file mode 100644 (file)
index 0000000..bcf7756
--- /dev/null
@@ -0,0 +1,103 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "api_client_authorizations"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_client_authorizations@
+
+Object type: @gj3su@
+
+Example UUID: @zzzzz-gj3su-0123456789abcde@
+
+h2. Resource
+
+The @api_client_authorizations@ resource stores the API tokens that have been issued to permit access the API server.
+
+An ApiClientAuthorization is *not* a generic Arvados resource.  The full list of properties that belong to an ApiClientAuthorization is:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|uuid|string|An identifier used to refer to the token without exposing the actual token.||
+|api_token|string|The actual token string that is expected in the Authorization header.||
+|api_client_id|integer|-||
+|user_id|integer|-||
+|created_by_ip_address|string|-||
+|last_used_by_ip_address|string|The network address of the most recent client using this token.||
+|last_used_at|datetime|Timestamp of the most recent request using this token.||
+|expires_at|datetime|Time at which the token is no longer valid.  May be set to a time in the past in order to immediately expire a token.||
+|owner_uuid|string|The user associated with the token.  All operations using this token are checked against the permissions of this user.||
+|scopes|array|A list of resources this token is allowed to access.  A scope of ["all"] allows all resources.  See "API Authorization":{{site.baseurl}}/api/tokens.html#scopes for details.||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3(#create). create
+
+Create a new ApiClientAuthorization.
+
+Regular users may only create self-owned API tokens, but may provide a restricted "scope":{{site.baseurl}}/api/tokens.html#scopes .  Administrators may create API tokens corresponding to any user.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|api_client_authorization|object||query||
+
+h3. create_system_auth
+
+create_system_auth api_client_authorizations
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|api_client_id|integer||query||
+|scopes|array||query||
+
+h3. delete
+
+Delete an existing ApiClientAuthorization.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
+
+h3. get
+
+Gets an ApiClientAuthorization's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
+
+h3. list
+
+List api_client_authorizations.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing ApiClientAuthorization.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
+|api_client_authorization|object||query||
diff --git a/doc/api/methods/api_clients.html.textile.liquid b/doc/api/methods/api_clients.html.textile.liquid
new file mode 100644 (file)
index 0000000..3f7abd4
--- /dev/null
@@ -0,0 +1,83 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "api_clients"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_clients@
+
+Object type: @ozdt8@
+
+Example UUID: @zzzzz-ozdt8-0123456789abcde@
+
+h2. Resource
+
+The "api_clients" resource determines if web applications that have gone through the browser login flow may create or list API tokens.
+
+Each ApiClient has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|url_prefix|string|||
+|is_trusted|boolean|Trusted by users to handle their API tokens (ApiClientAuthorizations).||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new ApiClient.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|api_client|object||query||
+
+h3. delete
+
+Delete an existing ApiClient.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
+
+h3. get
+
+Gets a ApiClient's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
+
+h3. list
+
+List api_clients.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing ApiClient.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
+|api_client|object||query||
diff --git a/doc/api/methods/authorized_keys.html.textile.liquid b/doc/api/methods/authorized_keys.html.textile.liquid
new file mode 100644 (file)
index 0000000..48b7b6f
--- /dev/null
@@ -0,0 +1,85 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "authorized_keys"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/authorized_keys@
+
+Object type: @fngyi@
+
+Example UUID: @zzzzz-fngyi-0123456789abcde@
+
+h2. Resource
+
+The authorized_keys resource stores SSH public keys which grant access to virtual machines or git repositories on the Arvados cluster as the user in @authorized_user_uuid@.
+
+Each AuthorizedKey has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|A name to help the user manage their keys.||
+|key_type|string|Public key type, currently only supports "SSH"||
+|authorized_user_uuid|string|The user to which this key belongs.  Authentication using this key authenticates as this user.||
+|public_key|text|The actual public key material, e.g., from @~/.ssh/id_rsa.pub@||
+|expires_at|datetime|Expiration date after which the key is no longer valid.||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new AuthorizedKey.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|authorized_key|object||query||
+
+h3. delete
+
+Delete an existing AuthorizedKey.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
+
+h3. get
+
+Gets a AuthorizedKey's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
+
+h3. list
+
+List authorized_keys.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing AuthorizedKey.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
+|authorized_key|object||query||
diff --git a/doc/api/methods/collections.html.textile.liquid b/doc/api/methods/collections.html.textile.liquid
new file mode 100644 (file)
index 0000000..c68773d
--- /dev/null
@@ -0,0 +1,124 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "collections"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/collections@
+
+Object type: @4zz18@
+
+Example UUID: @zzzzz-4zz18-0123456789abcde@
+
+h2. Resource
+
+Collections describe sets of files in terms of data blocks stored in Keep.  See "storage in Keep":{{site.baseurl}}/api/storage.html for details.
+
+Each collection has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|description|text|||
+|properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters ||
+|portable_data_hash|string|The MD5 sum of the manifest text stripped of block hints other than the size hint.||
+|manifest_text|text|||
+|replication_desired|number|Minimum storage replication level desired for each data block referenced by this collection. A value of @null@ signifies that the site default replication level (typically 2) is desired.|@2@|
+|replication_confirmed|number|Replication level most recently confirmed by the storage system. This field is null when a collection is first created, and is reset to null when the manifest_text changes in a way that introduces a new data block. An integer value indicates the replication level of the _least replicated_ data block in the collection.|@2@, null|
+|replication_confirmed_at|datetime|When replication_confirmed was confirmed. If replication_confirmed is null, this field is also null.||
+|trash_at|datetime|If @trash_at@ is non-null and in the past, this collection will be hidden from API calls.  May be untrashed.||
+|delete_at|datetime|If @delete_at@ is non-null and in the past, the collection may be permanently deleted.||
+|is_trashed|boolean|True if @trash_at@ is in the past, false if not.||
+|current_version_uuid|string|UUID of the collection's current version. On new collections, it'll be equal to the @uuid@ attribute.||
+|version|number|Version number, starting at 1 on new collections. This attribute is read-only.||
+|preserve_version|boolean|When set to true on a current version, it will be saved on the next versionable update.||
+
+h3. Conditions of creating a Collection
+
+The @portable_data_hash@ and @manifest_text@ attributes must be provided when creating a Collection. The cryptographic digest of the supplied @manifest_text@ must match the supplied @portable_data_hash@.
+
+h3. Side effects of creating a Collection
+
+Referenced blocks are protected from garbage collection in Keep.
+
+Data can be shared with other users via the Arvados permission model.
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+Supports federated @get@ only, which may be called with either a uuid or a portable data hash.  When requesting a portable data hash which is not available on the home cluster, the query is forwarded to all the clusters listed in @RemoteClusters@ and returns the first successful result.
+
+h3. create
+
+Create a new Collection.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|collection|object||query||
+
+h3. delete
+
+Put a Collection in the trash.  This sets the @trash_at@ field to @now@ and @delete_at@ field to @now@ + token TTL.  A trashed collection is invisible to most API calls unless the @include_trash@ parameter is true.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
+
+h3. get
+
+Gets a Collection's metadata by UUID or portable data hash.  When making a request by portable data hash, the returned record will only have the @portable_data_hash@ and @manifest_text@.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
+
+h3. list
+
+List collections.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|include_trash|boolean (default false)|Include trashed collections.|query||
+|include_old_versions|boolean (default false)|Include past versions of the collection(s) being listed, if any.|query||
+
+Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default.  If you need it, pass a @select@ parameter that includes @manifest_text@.
+
+h3. update
+
+Update attributes of an existing Collection.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
+|collection|object||query||
+
+h3. untrash
+
+Remove a Collection from the trash.  This sets the @trash_at@ and @delete_at@ fields to @null@.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection to untrash.|path||
+|ensure_unique_name|boolean (default false)|Rename collection uniquely if untrashing it would fail with a unique name conflict.|query||
diff --git a/doc/api/methods/container_requests.html.textile.liquid b/doc/api/methods/container_requests.html.textile.liquid
new file mode 100644 (file)
index 0000000..b9a21fc
--- /dev/null
@@ -0,0 +1,164 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "container_requests"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/container_requests@
+
+Object type: @xvhdp@
+
+Example UUID: @zzzzz-xvhdp-0123456789abcde@
+
+h2. Resource
+
+A container request is a request for the Arvados cluster to perform some computational work.  See "computing with Crunch":{{site.baseurl}}/api/execution.html for details.
+
+Each ContainerRequest offers the following attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+All attributes are optional, unless otherwise marked as required.
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|name|string|The name of the container_request.||
+|description|string|The description of the container_request.||
+|properties|hash|User-defined metadata that does not affect how the container is run.  May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
+|state|string|The allowed states are "Uncommitted", "Committed", and "Final".|Once a request is Committed, the only attributes that can be modified are priority, container_uuid, and container_count_max. A request in the "Final" state cannot have any of its functional parts modified (i.e., only name, description, and properties fields can be modified).|
+|requesting_container_uuid|string|The uuid of the parent container that created this container_request, if any. Represents a process tree.|The priority of this container_request is inherited from the parent container, if the parent container is cancelled, this container_request will be cancelled as well.|
+|container_uuid|string|The uuid of the container that satisfies this container_request. The system may return a preexisting Container that matches the container request criteria. See "Container reuse":#container_reuse for more details.|Container reuse is the default behavior, but may be disabled with @use_existing: false@ to always create a new container.|
+|container_count_max|integer|Maximum number of containers to start, i.e., the maximum number of "attempts" to be made.||
+|mounts|hash|Objects to attach to the container's filesystem and stdin/stdout.|See "Mount types":#mount_types for more details.|
+|secret_mounts|hash|Objects to attach to the container's filesystem.  Only "json" or "text" mount types allowed.|Not returned in API responses. Reset to empty when state is "Complete" or "Cancelled".|
+|runtime_constraints|hash|Restrict the container's access to compute resources and the outside world.|Required when in "Committed" state. e.g.,<pre><code>{
+  "ram":12000000000,
+  "vcpus":2,
+  "API":true
+}</code></pre>See "Runtime constraints":#runtime_constraints for more details.|
+|scheduling_parameters|hash|Parameters to be passed to the container scheduler when running this container.|e.g.,<pre><code>{
+"partitions":["fastcpu","vfastcpu"]
+}</code></pre>See "Scheduling parameters":#scheduling_parameters for more details.|
+|container_image|string|Portable data hash of a collection containing the docker image to run the container.|Required.|
+|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.||
+|cwd|string|Initial working directory, given as an absolute path (in the container) or a path relative to the WORKDIR given in the image's Dockerfile.|Required.|
+|command|array of strings|Command to execute in the container.|Required. e.g., @["echo","hello"]@|
+|output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be one of the mount targets. For best performance, point output_path to a writable collection mount.  See "Pre-populate output using Mount points":#pre-populate-output for details regarding optional output pre-population using mount points and "Symlinks in output":#symlinks-in-output for additional details.|Required.|
+|output_name|string|Desired name for the output collection. If null, a name will be assigned automatically.||
+|output_ttl|integer|Desired lifetime for the output collection, in seconds. If zero, the output collection will not be deleted automatically.||
+|priority|integer|Range 0-1000.  Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed".  See "below for more details":#priority .|
+|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|
+|use_existing|boolean|If possible, use an existing (non-failed) container to satisfy the request instead of creating a new one.|Default is true|
+|log_uuid|string|Log collection containing log messages provided by the scheduler and crunch processes.|Null if the container has not yet completed.|
+|output_uuid|string|Output collection created when the container finished successfully.|Null if the container has failed or not yet completed.|
+|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|
+|runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc.  |Not returned in API responses.  Reset to null when state is "Complete" or "Cancelled".|
+|runtime_user_uuid|string|The user permission that will be granted to this container.||
+|runtime_auth_scopes|array of string|The scopes associated with the auth token used to run this container.||
+
+h2(#priority). Priority
+
+The @priority@ field has a range of 0-1000.
+
+Priority 0 means no container should run on behalf of this request, and containers already running will be terminated (setting container priority to 0 is the cancel operation.)
+
+Priority 1 is the lowest priority.
+
+Priority 1000 is the highest priority.
+
+The actual order that containers execute is determined by the underlying scheduling software (e.g. SLURM) and may be based on a combination of container priority, submission time, available resources, and other factors.
+
+In the current implementation, the magnitude of difference in priority between two containers affects the weight of priority vs age in determining scheduling order.  If two containers have only a small difference in priority (for example, 500 and 501) and the lower priority container has a longer queue time, the lower priority container may be scheduled before the higher priority container.  Use a greater magnitude difference (for example, 500 and 600) to give higher weight to priority over queue time.
+
+h2(#mount_types). {% include 'mount_types' %}
+
+h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
+
+h2(#scheduling_parameters). {% include 'container_scheduling_parameters' %}
+
+h2(#container_reuse). Container reuse
+
+When a container request is "Committed", the system will try to find and reuse an existing Container with the same command, cwd, environment, output_path, container_image, mounts, secret_mounts, runtime_constraints, runtime_user_uuid, and runtime_auth_scopes being requested. (Hashes in the serialized fields environment, mounts and runtime_constraints use normalized key order.)
+
+In order of preference, the system will use:
+* The first matching container to have finished successfully (i.e., reached state "Complete" with an exit_code of 0) whose log and output collections are still available.
+* The oldest matching "Running" container with the highest progress, i.e., the container that is most likely to finish first.
+* The oldest matching "Locked" container with the highest priority, i.e., the container that is most likely to start first.
+* The oldest matching "Queued" container with the highest priority, i.e,, the container that is most likely to start first.
+* A new container.
+
+h2(#cancel_container). Canceling a container request
+
+A container request may be canceled by setting its priority to 0, using an update call.
+
+When a container request is canceled, it will still reflect the state of the Container it is associated with via the container_uuid attribute. If that Container is being reused by any other container_requests that are still active, i.e., not yet canceled, that Container may continue to run or be scheduled to run by the system in future. However, if no other container_requests are using that Contianer, then the Container will get canceled as well.
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+Supports federated @create@, @delete@, @get@, @list@, and @update@.
+
+h2(#create). create
+
+Create a new container request.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|container_request|object|Container request resource.|request body||
+|cluster_id|string|The federated cluster to submit the container request.|query||
+
+The request body must include the required attributes command, container_image, cwd, and output_path. It can also inlcude other attributes such as environment, mounts, and runtime_constraints.
+
+h3. delete
+
+Delete an existing container request.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path||
+
+h3. get
+
+Get a container request's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path||
+
+h3. list
+
+List container_requests.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+See the create method documentation for more information about container request-specific filters.
+
+h3. update
+
+Update attributes of an existing container request.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path||
+|container_request|object||query||
+
+{% include 'notebox_begin' %}
+Setting the priority of a committed container_request to 0 may cancel a running container assigned for it.
+See "Canceling a container request":{{site.baseurl}}/api/methods/container_requests.html#cancel_container for further details.
+{% include 'notebox_end' %}
diff --git a/doc/api/methods/containers.html.textile.liquid b/doc/api/methods/containers.html.textile.liquid
new file mode 100644 (file)
index 0000000..f0ce8e3
--- /dev/null
@@ -0,0 +1,154 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "containers"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/containers@
+
+Object type: @dz642@
+
+Example UUID: @zzzzz-dz642-0123456789abcde@
+
+h2. Resource
+
+A container is work order to be dispatched to an Arvados cluster to perform some computational work.  A container is created in response to a container request.  See "computing with Crunch":{{site.baseurl}}/api/execution.html for details.
+
+Each Container offers the following attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|state|string|The allowed states are "Queued", "Locked", "Running", "Cancelled" and "Complete".|See "Container states":#container_states for more details.|
+|started_at|datetime|When this container started running.|Null if container has not yet started.|
+|finished_at|datetime|When this container finished.|Null if container has not yet finished.|
+|log|string|UUID or portable data hash of a collection containing the log messages produced when executing the container.|PDH after the container is finished, otherwise UUID or null.|
+|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.|Must be equal to a ContainerRequest's environment in order to satisfy the ContainerRequest.|
+|cwd|string|Initial working directory.|Must be equal to a ContainerRequest's cwd in order to satisfy the ContainerRequest|
+|command|array of strings|Command to execute.| Must be equal to a ContainerRequest's command in order to satisfy the ContainerRequest.|
+|output_path|string|Path to a directory or file inside the container that should be preserved as this container's output when it finishes.|Must be equal to a ContainerRequest's output_path in order to satisfy the ContainerRequest.|
+|mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|See "Mount types":#mount_types for more details.|
+|secret_mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|Not returned in API responses. Reset to empty when state is "Complete" or "Cancelled".|
+|runtime_constraints|hash|Compute resources, and access to the outside world, that are / were available to the container.
+Generally this will contain additional keys that are not present in any corresponding ContainerRequests: for example, even if no ContainerRequests specified constraints on the number of CPU cores, the number of cores actually used will be recorded here.|e.g.,
+<pre><code>{
+  "ram":12000000000,
+  "vcpus":2,
+  "API":true
+}</code></pre>See "Runtime constraints":#runtime_constraints for more details.|
+|runtime_status|hash|Information related to the container's run, including its steps. Some keys have specific meaning and are described later in this page.|e.g.,
+<pre><code>{
+  "error": "This container won't be successful because at least one step has already failed."
+}</code></pre>See "Runtime status":#runtime_status for more details.|
+|scheduling_parameters|hash|Parameters to be passed to the container scheduler when running this container.|e.g.,<pre><code>{
+"partitions":["fastcpu","vfastcpu"]
+}</code></pre>See "Scheduling parameters":#scheduling_parameters for more details.|
+|output|string|Portable data hash of the output collection.|Null if the container is not yet finished.|
+|container_image|string|Portable data hash of a collection containing the docker image used to run the container.||
+|progress|number|A number between 0.0 and 1.0 describing the fraction of work done.||
+|priority|integer|Range 0-1000.  Indicate scheduling order preference.|Currently assigned by the system as the max() of the priorities of all associated ContainerRequests.  See "container request priority":container_requests.html#priority .|
+|exit_code|integer|Process exit code.|Null if state!="Complete"|
+|auth_uuid|string|UUID of a token to be passed into the container itself, used to access Keep-backed mounts, etc.  Automatically assigned.|Null if state∉{"Locked","Running"} or if @runtime_token@ was provided.|
+|locked_by_uuid|string|UUID of a token, indicating which dispatch process changed state to Locked. If null, any token can be used to lock. If not null, only the indicated token can modify this container.|Null if state∉{"Locked","Running"}|
+|runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc.|Not returned in API responses.  Reset to null when state is "Complete" or "Cancelled".|
+
+h2(#container_states). Container states
+
+table(table table-bordered table-condensed).
+|_. State|_. Sgnificance|_. Allowed next|
+|Queued|Waiting for a dispatcher to lock it and try to run the container.|Locked, Cancelled|
+|Locked|A dispatcher has "taken" the container and is allocating resources for it. The container has not started yet.|Queued, Running, Cancelled|
+|Running|Resources have been allocated and the contained process has been started (or is about to start). Crunch-run _must_ set state to Running _before_ there is any possibility that user code will run in the container.|Complete, Cancelled|
+|Complete|Container was running, and the contained process/command has exited.|-|
+|Cancelled|The container did not run long enough to produce an exit code. This includes cases where the container didn't even start, cases where the container was interrupted/killed before it exited by itself (e.g., priority changed to 0), and cases where some problem prevented the system from capturing the contained process's exit status (exit code and output).|-|
+
+h2(#mount_types). {% include 'mount_types' %}
+
+h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
+
+h2(#runtime_status). Runtime status
+
+Runtime status provides container's relevant information about its progress even while it's still in Running state. This is used to avoid reusing containers that have not yet failed but will definitely do, and also for easier workflow debugging.
+
+The following keys have well known meanings:
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Notes|
+|error|string|The existance of this key indicates the container will definitely fail, or has already failed.|Optional.|
+|warning|string|Indicates something unusual happened or is currently happening, but isn't considered fatal.|Optional.|
+|activity|string|A message for the end user about what state the container is currently in.|Optional.|
+|errorDetails|string|Additional structured error details.|Optional.|
+|warningDetails|string|Additional structured warning details.|Optional.|
+
+h2(#scheduling_parameters). {% include 'container_scheduling_parameters' %}
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+Supports federated @get@ and @list@.
+
+h2(#create). create
+
+Create a new Container.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|container|object|Container resource|request body||
+
+h3. delete
+
+Delete an existing Container.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||
+
+h3. get
+
+Get a Container's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||
+
+h3. list
+
+List containers.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+See the create method documentation for more information about Container-specific filters.
+
+h3. update
+
+Update attributes of an existing Container.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||
+|container|object||query||
+
+h3. auth
+
+Get the api_client_authorization record indicated by this container's auth_uuid, which belongs to the container's locked_by_uuid.
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
diff --git a/doc/api/methods/groups.html.textile.liquid b/doc/api/methods/groups.html.textile.liquid
new file mode 100644 (file)
index 0000000..9c75fa8
--- /dev/null
@@ -0,0 +1,151 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "groups"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/groups@
+
+Object type: @j7d0g@
+
+Example UUID: @zzzzz-j7d0g-0123456789abcde@
+
+h2. Resource
+
+Groups provides a way to apply the same permissions to a set of Arvados objects.  See "permission model":{{site.baseurl}}/api/permission-model.html for details.
+
+Each Group has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|group_class|string|Type of group. This does not affect behavior, but determines how the group is presented in the user interface. For example, @project@ indicates that the group should be displayed by Workbench and arv-mount as a project for organizing and naming objects.|@"project"@
+null|
+|description|text|||
+|properties|hash|User-defined metadata, may be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters ||
+|writable_by|array|List of UUID strings identifying Users and other Groups that have write permission for this Group.  Only users who are allowed to administer the Group will receive a full list.  Other users will receive a partial list that includes the Group's owner_uuid and (if applicable) their own user UUID.||
+|trash_at|datetime|If @trash_at@ is non-null and in the past, this group and all objects directly or indirectly owned by the group will be hidden from API calls.  May be untrashed.||
+|delete_at|datetime|If @delete_at@ is non-null and in the past, the group and all objects directly or indirectly owned by the group may be permanently deleted.||
+|is_trashed|datetime|True if @trash_at@ is in the past, false if not.||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. contents
+
+Retrieve a list of items owned by the group.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the group in question.|path||
+|limit|integer (default 100)|Maximum number of items to return.|query||
+|order|array|Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order. Sort within a resource type by prefixing the attribute with the resource name and a period.|query|@["collections.modified_at desc"]@|
+|filters|array|Conditions for filtering items.|query|@[["uuid", "is_a", "arvados#job"]]@|
+|recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|
+|exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project.  Use this to get a list of items that are shared with the user.|query|@true@|
+
+Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections.  If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter.
+
+Note: Use filters with the attribute format @<item type>.<field name>@ to filter items of a specific type. For example: @["pipeline_instances.state", "=", "Complete"]@ to filter @pipeline_instances@ where @state@ is @Complete@. All other types of items owned by this group will be unimpacted by this filter and will still be included.
+
+h3. create
+
+Create a new Group.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|group|object||query||
+|async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@|
+
+h3. delete
+
+Put a Group in the trash.  This sets the @trash_at@ field to @now@ and @delete_at@ field to @now@ + token TTL.  A trashed group is invisible to most API calls unless the @include_trash@ parameter is true.  All objects directly or indirectly owned by the Group are considered trashed as well.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+
+h3. get
+
+Gets a Group's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+
+h3. list
+
+List groups.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. show
+
+show groups
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h3. update
+
+Update attributes of an existing Group.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+|group|object||query||
+|async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@|
+
+h3. untrash
+
+Remove a Group from the trash.  This sets the @trash_at@ and @delete_at@ fields to @null@.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group to untrash.|path||
+|ensure_unique_name|boolean (default false)|Rename project uniquely if untrashing it would fail with a unique name conflict.|query||
+
+h3. shared
+
+This endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account.  This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the "home" project tree.
+
+When called with "include=owner_uuid" this also returns (in the "included" field) the objects that own those projects (users or non-project groups).
+
+Specifically, the logic is:
+
+<pre>
+select groups that are readable by current user AND
+    (the owner_uuid is a user (but not the current user) OR
+     the owner_uuid is not readable by the current user OR
+     the owner_uuid is a group but group_class is not a project)
+</pre>
+
+In addition to the "include" parameter this endpoint also supports the same parameters as the "list method.":{{site.baseurl}}/api/methods.html#index
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query|?include=owner_uuid|
diff --git a/doc/api/methods/humans.html.textile.liquid b/doc/api/methods/humans.html.textile.liquid
new file mode 100644 (file)
index 0000000..84cb22c
--- /dev/null
@@ -0,0 +1,81 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "humans"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/humans@
+
+Object type: @7a9it@
+
+Example UUID: @zzzzz-7a9it-0123456789abcde@
+
+h2. Resource
+
+A metadata record that may be used to represent a human subject.
+
+Each Human has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|properties|hash|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Human.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|human|object||query||
+
+h3. delete
+
+Delete an existing Human.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
+
+h3. get
+
+Gets a Human's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
+
+h3. list
+
+List humans.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing Human.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
+|human|object||query||
diff --git a/doc/api/methods/job_tasks.html.textile.liquid b/doc/api/methods/job_tasks.html.textile.liquid
new file mode 100644 (file)
index 0000000..deee3a5
--- /dev/null
@@ -0,0 +1,97 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "job_tasks"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/job_tasks@
+
+Object type: @ot0gb@
+
+Example UUID: @zzzzz-ot0gb-0123456789abcde@
+
+h2. Resource
+
+Deprecated.
+
+A job task is a individually scheduled unit of work executed as part of an overall job.
+
+Each JobTask has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|sequence|integer|Execution sequence.
+A step cannot be run until all steps with lower sequence numbers have completed.
+Job steps with the same sequence number can be run in any order.||
+|parameters|hash|||
+|output|text|||
+|progress|float|||
+|success|boolean|Is null if the task has neither completed successfully nor failed permanently.||
+
+The following attributes should not be updated by anyone other than the job manager:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|qsequence|integer|Order of arrival|0-based|
+|job_uuid|string|||
+|created_by_job_task_uuid|string|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new JobTask.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|job_task|object||query||
+
+h3. delete
+
+Delete an existing JobTask.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
+
+h3. get
+
+Gets a JobTask's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
+
+h3. list
+
+List job_tasks.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing JobTask.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
+|job_task|object||query||
diff --git a/doc/api/methods/jobs.html.textile.liquid b/doc/api/methods/jobs.html.textile.liquid
new file mode 100644 (file)
index 0000000..2f06186
--- /dev/null
@@ -0,0 +1,288 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "jobs"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/jobs@
+
+Object type: @8i9sb@
+
+Example UUID: @zzzzz-8i9sb-0123456789abcde@
+
+h2. Resource
+
+Deprecated.
+
+A job describes a work order to be executed by the Arvados cluster.
+
+Each job has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|script|string|The filename of the job script.|This program will be invoked by Crunch for each job task. It is given as a path to an executable file, relative to the @/crunch_scripts@ directory in the Git tree specified by the _repository_ and _script_version_ attributes.|
+|script_parameters|hash|The input parameters for the job.|Conventionally, one of the parameters is called @"input"@. Typically, some parameter values are collection UUIDs. Ultimately, though, the significance of parameters is left entirely up to the script itself.|
+|repository|string|Git repository name or URL.|Source of the repository where the given script_version is to be found. This can be given as the name of a locally hosted repository, or as a publicly accessible URL starting with @git://@, @http://@, or @https://@.
+Examples:
+@yourusername/yourrepo@
+@https://github.com/curoverse/arvados.git@|
+|script_version|string|Git commit|During a **create** transaction, this is the Git branch, tag, or hash supplied by the client. Before the job starts, Arvados updates it to the full 40-character SHA-1 hash of the commit used by the job.
+See "Specifying Git versions":#script_version below for more detail about acceptable ways to specify a commit.|
+|cancelled_by_client_uuid|string|API client ID|Is null if job has not been cancelled|
+|cancelled_by_user_uuid|string|Authenticated user ID|Is null if job has not been cancelled|
+|cancelled_at|datetime|When job was cancelled|Is null if job has not been cancelled|
+|started_at|datetime|When job started running|Is null if job has not [yet] started|
+|finished_at|datetime|When job finished running|Is null if job has not [yet] finished|
+|running|boolean|Whether the job is running||
+|success|boolean|Whether the job indicated successful completion|Is null if job has not finished|
+|is_locked_by_uuid|string|UUID of the user who has locked this job|Is null if job is not locked. The system user locks the job when starting the job, in order to prevent job attributes from being altered.|
+|node_uuids|array|List of UUID strings for node objects that have been assigned to this job||
+|log|string|Collection UUID|Is null if the job has not finished. After the job runs, the given collection contains a text file with log messages provided by the @arv-crunch-job@ task scheduler as well as the standard error streams provided by the task processes.|
+|tasks_summary|hash|Summary of task completion states.|Example: @{"done":0,"running":4,"todo":2,"failed":0}@|
+|output|string|Collection UUID|Is null if the job has not finished.|
+|nondeterministic|boolean|The job is expected to produce different results if run more than once.|If true, this job will not be considered as a candidate for automatic re-use when submitting subsequent identical jobs.|
+|submit_id|string|Unique ID provided by client when job was submitted|Optional. This can be used by a client to make the "jobs.create":{{site.baseurl}}/api/methods/jobs.html#create method idempotent.|
+|priority|string|||
+|arvados_sdk_version|string|Git commit hash that specifies the SDK version to use from the Arvados repository|This is set by searching the Arvados repository for a match for the arvados_sdk_version runtime constraint.|
+|docker_image_locator|string|Portable data hash of the collection that contains the Docker image to use|This is set by searching readable collections for a match for the docker_image runtime constraint.|
+|runtime_constraints|hash|Constraints that must be satisfied by the job/task scheduler in order to run the job.|See below.|
+|components|hash|Name and uuid pairs representing the child work units of this job. The uuids can be of different object types.|Example components hash: @{"name1": "zzzzz-8i9sb-xyz...", "name2": "zzzzz-d1hrv-xyz...",}@|
+
+h3(#script_version). Specifying Git versions
+
+The script_version attribute and arvados_sdk_version runtime constraint are typically given as a branch, tag, or commit hash, but there are many more ways to specify a Git commit. The "specifying revisions" section of the "gitrevisions manual page":http://git-scm.com/docs/gitrevisions.html has a definitive list. Arvados accepts Git versions in any format listed there that names a single commit (not a tree, a blob, or a range of commits). However, some kinds of names can be expected to resolve differently in Arvados than they do in your local repository. For example, <code>HEAD@{1}</code> refers to the local reflog, and @origin/master@ typically refers to a remote branch: neither is likely to work as desired if given as a Git version.
+
+h3. Runtime constraints
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Implemented|
+|arvados_sdk_version|string|The Git version of the SDKs to use from the Arvados git repository.  See "Specifying Git versions":#script_version for more detail about acceptable ways to specify a commit.  If you use this, you must also specify a @docker_image@ constraint (see below).  In order to install the Python SDK successfully, Crunch must be able to find and run virtualenv inside the container.|&#10003;|
+|docker_image|string|The Docker image that this Job needs to run.  If specified, Crunch will create a Docker container from this image, and run the Job's script inside that.  The Keep mount and work directories will be available as volumes inside this container.  The image must be uploaded to Arvados using @arv keep docker@.  You may specify the image in any format that Docker accepts, such as @arvados/jobs@, @debian:latest@, or the Docker image id.  Alternatively, you may specify the portable data hash of the image Collection.|&#10003;|
+|min_nodes|integer||&#10003;|
+|max_nodes|integer|||
+|min_cores_per_node|integer|Require that each node assigned to this Job have the specified number of CPU cores|&#10003;|
+|min_ram_mb_per_node|integer|Require that each node assigned to this Job have the specified amount of real memory (in MiB)|&#10003;|
+|min_scratch_mb_per_node|integer|Require that each node assigned to this Job have the specified amount of scratch storage available (in MiB)|&#10003;|
+|max_tasks_per_node|integer|Maximum simultaneous tasks on a single node|&#10003;|
+|keep_cache_mb_per_task|integer|Size of file data buffer for per-task Keep directory ($TASK_KEEPMOUNT), in MiB.  Default is 256 MiB.  Increase this to reduce cache thrashing in situtations such as accessing multiple large (64+ MiB) files at the same time, or accessing different parts of a large file at the same time.|&#10003;|
+|min_ram_per_task|integer|Minimum real memory (KiB) per task||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. cancel
+
+Cancel a job that is queued or running.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h3(#create). create
+
+Create a new Job.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|job|object|Job resource|request body||
+|minimum_script_version |string     |Git branch, tag, or commit hash specifying the minimum acceptable script version (earliest ancestor) to consider when deciding whether to re-use a past job.[1]|query|@"c3e86c9"@|
+|exclude_script_versions|array of strings|Git commit branches, tags, or hashes to exclude when deciding whether to re-use a past job.|query|@["8f03c71","8f03c71"]@
+@["badtag1","badtag2"]@|
+|filters|array of arrays|Conditions to find Jobs to reuse.|query||
+|find_or_create         |boolean    |Before creating, look for an existing job that has identical script, script_version, and script_parameters to those in the present job, has nondeterministic=false, and did not fail (it could be queued, running, or completed). If such a job exists, respond with the existing job instead of submitting a new one.|query|@false@|
+
+When a job is submitted to the queue using the **create** method, the @script_version@ attribute is updated to a full 40-character Git commit hash based on the current content of the specified repository. If @script_version@ cannot be resolved, the job submission is rejected.
+
+fn1. See the "note about specifying Git commits":#script_version for more detail.
+
+h4. Specialized filters
+
+Special filter operations are available for specific Job columns.
+
+* @script_version@ @in git@ @REFSPEC@, @arvados_sdk_version@ @in git@ @REFSPEC@<br>Resolve @REFSPEC@ to a list of Git commits, and match jobs with a @script_version@ or @arvados_sdk_version@ in that list.  When creating a job and filtering @script_version@, the search will find commits between @REFSPEC@ and the submitted job's @script_version@; all other searches will find commits between @REFSPEC@ and HEAD.  This list may include parallel branches if there is more than one path between @REFSPEC@ and the end commit in the graph.  Use @not in@ or @not in git@ filters (below) to blacklist specific commits.
+
+* @script_version@ @not in git@ @REFSPEC@, @arvados_sdk_version@ @not in git@ @REFSPEC@<br>Resolve @REFSPEC@ to a list of Git commits, and match jobs with a @script_version@ or @arvados_sdk_version@ not in that list.
+
+* @docker_image_locator@ @in docker@ @SEARCH@<br>@SEARCH@ can be a Docker image hash, a repository name, or a repository name and tag separated by a colon (@:@).  The server will find collections that contain a Docker image that match that search criteria, then match jobs with a @docker_image_locator@ in that list.
+
+* @docker_image_locator@ @not in docker@ @SEARCH@<br>Negate the @in docker@ filter.
+
+h4. Reusing jobs
+
+Because Arvados records the exact version of the script, input parameters, and runtime environment that was used to run the job, if the script is deterministic (meaning that the same code version is guaranteed to produce the same outputs from the same inputs) then it is possible to re-use the results of past jobs, and avoid re-running the computation to save time.  Arvados uses the following algorithm to determine if a past job can be re-used:
+
+notextile. <div class="spaced-out">
+
+# If @find_or_create@ is false or omitted, create a new job and skip the rest of these steps.
+# If @filters@ are specified, find jobs that match those filters. If any filters are given, there must be at least one filter on the @repository@ attribute and one on the @script@ attribute: otherwise an error is returned.
+# If @filters@ are not specified, find jobs with the same @repository@ and @script@, with a @script_version@ between @minimum_script_version@ and @script_version@ inclusively (excluding @excluded_script_versions@), and a @docker_image_locator@ with the latest Collection that matches the submitted job's @docker_image@ constraint.  If the submitted job includes an @arvados_sdk_version@ constraint, jobs must have an @arvados_sdk_version@ between that refspec and HEAD to be found. *This form is deprecated: use filters instead.*
+# If the found jobs include a completed job, and all found completed jobs have consistent output, return one of them.  Which specific job is returned is undefined.
+# If the found jobs only include incomplete jobs, return one of them.  Which specific job is returned is undefined.
+# If no job has been returned so far, create and return a new job.
+
+</div>
+
+h4. Examples
+
+Run the script "crunch_scripts/hash.py" in the repository "you" using the "master" commit.  Arvados should re-use a previous job if the script_version of the previous job is the same as the current "master" commit. This works irrespective of whether the previous job was submitted using the name "master", a different branch name or tag indicating the same commit, a SHA-1 commit hash, etc.
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>/<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "find_or_create": true
+}
+</pre></notextile>
+
+Run using exactly the version "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5". Arvados should re-use a previous job if the "script_version" of that job is also "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5".
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>/<b>you</b>",
+    "script_version": "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "find_or_create": true
+}
+</pre></notextile>
+
+Arvados should re-use a previous job if the "script_version" of the previous job is between "earlier_version_tag" and the "master" commit (inclusive), but not the commit indicated by "blacklisted_version_tag". If there are no previous jobs matching these criteria, run the job using the "master" commit.
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>/<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "minimum_script_version": "earlier_version_tag",
+  "exclude_script_versions": ["blacklisted_version_tag"],
+  "find_or_create": true
+}
+</pre></notextile>
+
+The same behavior, using filters:
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>/<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "filters": [["script", "=", "hash.py"],
+              ["repository", "=", "<b>you</b>/<b>you</b>"],
+              ["script_version", "in git", "earlier_version_tag"],
+              ["script_version", "not in git", "blacklisted_version_tag"]],
+  "find_or_create": true
+}
+</pre></notextile>
+
+Run the script "crunch_scripts/monte-carlo.py" in the repository "you/you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
+
+<notextile><pre>
+{
+  "job": {
+    "script": "monte-carlo.py",
+    "repository": "<b>you</b>/<b>you</b>",
+    "script_version": "master",
+    "nondeterministic": true,
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  }
+}
+</pre></notextile>
+
+h3. delete
+
+Delete an existing Job.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
+
+h3. get
+
+Gets a Job's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
+
+h3. list
+
+List jobs.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+See the create method documentation for more information about Job-specific filters.
+
+h3. log_tail_follow
+
+log_tail_follow jobs
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+|buffer_size|integer (default 8192)||query||
+
+h3. queue
+
+Get the current job queue.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|order|string||query||
+|filters|array||query||
+
+This method is equivalent to the "list method":#list, except that the results are restricted to queued jobs (i.e., jobs that have not yet been started or cancelled) and order defaults to queue priority.
+
+h3. update
+
+Update attributes of an existing Job.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
+|job|object||query||
diff --git a/doc/api/methods/keep_disks.html.textile.liquid b/doc/api/methods/keep_disks.html.textile.liquid
new file mode 100644 (file)
index 0000000..7624b66
--- /dev/null
@@ -0,0 +1,107 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "keep_disks (deprecated)"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_disks@
+
+Object type: @penuu@
+
+Example UUID: @zzzzz-penuu-0123456789abcde@
+
+h2. Resource
+
+Obsoleted by "keep_services":{{site.baseurl}}/api/methods/keep_services.html
+
+Each KeepDisk has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|ping_secret|string|||
+|node_uuid|string|||
+|filesystem_uuid|string|||
+|bytes_total|integer|||
+|bytes_free|integer|||
+|is_readable|boolean|||
+|is_writable|boolean|||
+|last_read_at|datetime|||
+|last_write_at|datetime|||
+|last_ping_at|datetime|||
+|keep_service_uuid|string|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new KeepDisk.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|keep_disk|object||query||
+
+h3. delete
+
+Delete an existing KeepDisk.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
+
+h3. get
+
+Gets a KeepDisk's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
+
+h3. list
+
+List keep_disks.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. ping
+
+ping keep_disks
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|ping_secret|string||query||
+{background:#ccffcc}.|service_port|string||query||
+{background:#ccffcc}.|service_ssl_flag|string||query||
+|filesystem_uuid|string||query||
+|node_uuid|string||query||
+|service_host|string||query||
+|uuid|string||query||
+
+h3. update
+
+Update attributes of an existing KeepDisk.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
+|keep_disk|object||query||
diff --git a/doc/api/methods/keep_services.html.textile.liquid b/doc/api/methods/keep_services.html.textile.liquid
new file mode 100644 (file)
index 0000000..a62f106
--- /dev/null
@@ -0,0 +1,88 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "keep_services"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_services@
+
+Object type: @bi6l4@
+
+Example UUID: @zzzzz-bi6l4-0123456789abcde@
+
+h2. Resource
+
+The keep_services resource keep clients to discover storage servers and proxies available on the cluster for persistent storage and retrieval of keep blocks.
+
+Each KeepService has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|service_host|string|hostname of the server||
+|service_port|integer|TCP port of the service||
+|service_ssl_flag|boolean|if the server uses SSL||
+|service_type|string|The service type, one of "disk", "blob" (cloud object store) or "proxy" (keepproxy)||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. accessible
+
+Get a list of keep services that are accessible to the requesting client.  Unlike @list@, this is context-sensitive based on the requester, for example providing the list of actual Keep servers when inside the cluster, but providing a proxy service if client contacts Arvados from outside the cluster.
+
+h3. create
+
+Create a new KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|keep_service|object||query||
+
+h3. delete
+
+Delete an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h3. get
+
+Gets a KeepService's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h3. list
+
+List keep_services.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+|keep_service|object||query||
diff --git a/doc/api/methods/links.html.textile.liquid b/doc/api/methods/links.html.textile.liquid
new file mode 100644 (file)
index 0000000..0464344
--- /dev/null
@@ -0,0 +1,102 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "links"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@
+
+Object type: @o0j2j@
+
+Example UUID: @zzzzz-o0j2j-0123456789abcde@
+
+h2. Resource
+
+Links are an extensible way to describe relationships between Arvados objects and metadata about individual objects.
+
+Each link has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|head_uuid|string|The object being described or acted on.|
+|tail_uuid|string|The origin or actor in the description or action (may be null).|
+|link_class|string|Type of link|
+|name|string|Primary value of the link.|
+|properties|hash|Additional information, expressed as a key&rarr;value hash. Key: string. Value: string, number, array, or hash.  May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters|
+
+h2. Link classes
+
+Some classes are pre-defined by convention and have standard meanings attached to names.
+
+h3. permission
+
+See "permission links":{{site.baseurl}}/api/permission-model.html#links section of the permission model.
+
+h3. tag
+
+A **tag** link describes an object using an unparsed plain text string. Tags can be used to annotate objects that are not editable, like collections and objects shared as read-only.
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|
+|&rarr;Collection           | _tag name_ &rarr; _collection uuid_|
+|&rarr;Job                  | _tag name_ &rarr; _job uuid_|
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Link.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|link|object||query||
+
+h3. delete
+
+Delete an existing Link.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
+
+h3. get
+
+Gets a Link's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
+
+h3. list
+
+List links.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing Link.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
+|link|object||query||
diff --git a/doc/api/methods/logs.html.textile.liquid b/doc/api/methods/logs.html.textile.liquid
new file mode 100644 (file)
index 0000000..5a7ac32
--- /dev/null
@@ -0,0 +1,91 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "logs"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/logs@
+
+Object type: @57u5n@
+
+Example UUID: @zzzzz-57u5n-0123456789abcde@
+
+h2. Resource
+
+Each Log has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|object_uuid|string|The arvados object that is the subject of the log.||
+|event_at|datetime|||
+|event_type|string|A user-defined category or type for this event.|@LOGIN@|
+|summary|text|||
+|properties|hash|||
+
+h3. Creation
+
+Any user may create Log entries for any event they find useful. User-generated Logs have no intrinsic meaning to other users or to the Arvados system itself; it is up to each user to choose appropriate log event types and summaries for their project.
+
+h3. System Logs
+
+Arvados uses Logs to record creation, deletion, and updates of other Arvados resources.
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new log entry.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|log|object||query||
+
+h3. delete
+
+Delete an existing log entry. This method can only be used by privileged (system administrator) users.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
+
+h3. get
+
+Retrieve a log entry.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
+
+h3. list
+
+List log entries.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing log entry. This method can only be used by privileged (system administrator) users.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
+|log|object||query||
diff --git a/doc/api/methods/nodes.html.textile.liquid b/doc/api/methods/nodes.html.textile.liquid
new file mode 100644 (file)
index 0000000..7ddc625
--- /dev/null
@@ -0,0 +1,102 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "nodes"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/nodes@
+
+Object type: @7ekkf@
+
+Example UUID: @zzzzz-7ekkf-0123456789abcde@
+
+h2. Resource
+
+Node resources list compute nodes on which Crunch may schedule work.
+
+Each Node has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|slot_number|integer|||
+|hostname|string|||
+|domain|string|||
+|ip_address|string|||
+|job_uuid|string|The UUID of the job that this node is assigned to work on.  If you do not have permission to read the job, this will be null.||
+|first_ping_at|datetime|||
+|last_ping_at|datetime|||
+|info|hash|Sensitive information about the node (only visible to admin) such as 'ping_secret' and 'ec2_instance_id'. May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
+|properties|hash|Public information about the node, such as 'total_cpu_cores', 'total_ram_mb', and 'total_scratch_mb'.  May be used in queries using "subproperty filters":{{site.baseurl}}/api/methods.html#subpropertyfilters||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|node|object||query||
+
+h3. delete
+
+Delete an existing Node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
+
+h3. get
+
+Gets a Node's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
+
+h3. list
+
+List nodes.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. ping
+
+Process a ping from a compute node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|ping_secret|string||query||
+{background:#ccffcc}.|uuid|string||path||
+
+h3. update
+
+Update attributes of an existing Node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
+|node|object||query||
+
+To remove a node's job assignment, update the node object's @job_uuid@ to null.
diff --git a/doc/api/methods/pipeline_instances.html.textile.liquid b/doc/api/methods/pipeline_instances.html.textile.liquid
new file mode 100644 (file)
index 0000000..09fd4fe
--- /dev/null
@@ -0,0 +1,86 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "pipeline_instances"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_instances@
+
+Object type: @d1hrv@
+
+Example UUID: @zzzzz-d1hrv-0123456789abcde@
+
+h2. Resource
+
+Deprecated.  A pipeline instance is a collection of jobs managed by @aravdos-run-pipeline-instance@.
+
+Each PipelineInstance has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|pipeline_template_uuid|string|The "pipeline template":pipeline_templates.html that this instance was created from.||
+|name|string|||
+|components|hash|||
+|success|boolean|||
+|active|boolean|||
+|properties|Hash|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new PipelineInstance.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|pipeline_instance|object||query||
+
+h3. delete
+
+Delete an existing PipelineInstance.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
+
+h3. get
+
+Gets a PipelineInstance's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
+
+h3. list
+
+List pipeline_instances.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing PipelineInstance.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
+|pipeline_instance|object||query||
diff --git a/doc/api/methods/pipeline_templates.html.textile.liquid b/doc/api/methods/pipeline_templates.html.textile.liquid
new file mode 100644 (file)
index 0000000..85df279
--- /dev/null
@@ -0,0 +1,224 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "pipeline_templates"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_templates@
+
+Object type: @p5p6p@
+
+Example UUID: @zzzzz-p5p6p-0123456789abcde@
+
+h2. Resource
+
+Deprecated.  A pipeline template is a collection of jobs that can be instantiated as a pipeline_instance.
+
+Each PipelineTemplate has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|components|hash|||
+
+The pipeline template consists of "name" and "components".
+
+table(table table-bordered table-condensed).
+|_. Attribute    |_. Type |_. Accepted values                           |_. Required|_. Description|
+|name            |string  |any                                          |yes        |The human-readable name of the pipeline template.|
+|components      |object  |JSON object containing job submission objects|yes        |The component jobs that make up the pipeline, with the component name as the key. |
+
+h3. Components
+
+The components field of the pipeline template is a JSON object which describes the individual steps that make up the pipeline.  Each component is an Arvados job submission.  "Parameters for job submissions are described on the job method page.":{{site.baseurl}}/api/methods/jobs.html#create  In addition, a component can have the following parameters:
+
+table(table table-bordered table-condensed).
+|_. Attribute    |_. Type          |_. Accepted values |_. Required|_. Description|
+|output_name     |string or boolean|string or false    |no         |If a string is provided, use this name for the output collection of this component.  If the value is false, do not create a permanent output collection (an temporary intermediate collection will still be created).  If not provided, a default name will be assigned to the output.|
+
+h3. Script parameters
+
+When used in a pipeline, each parameter in the 'script_parameters' attribute of a component job can specify that the input parameter must be supplied by the user, or the input parameter should be linked to the output of another component.  To do this, the value of the parameter should be JSON object containing one of the following attributes:
+
+table(table table-bordered table-condensed).
+|_. Attribute    |_. Type |_. Accepted values                               |_. Description|
+|default         |any     |any                                              |The default value for this parameter.|
+|required        |boolean |true or false                                    |Specifies whether the parameter is required to have a value or not.|
+|dataclass       |string  |One of 'Collection', 'File' [1], 'number', or 'text' |Data type of this parameter.|
+|search_for      |string  |any string                                       |Substring to use as a default search string when choosing inputs.|
+|output_of       |string  |the name of another component in the pipeline    |Specifies that the value of this parameter should be set to the 'output' attribute of the job that corresponds to the specified component.|
+|title           |string  |any string                                       |User friendly title to display when choosing parameter values|
+|description     |string  |any string                                       |Extended text description for describing expected/valid values for the script parameter|
+|link_name       |string  |any string                                       |User friendly name to display for the parameter value instead of the actual parameter value|
+
+The 'output_of' parameter is especially important, as this is how components are actually linked together to form a pipeline.  Component jobs that depend on the output of other components do not run until the parent job completes and has produced output.  If the parent job fails, the entire pipeline fails.
+
+fn1. The 'File' type refers to a specific file within a Keep collection in the form 'collection_hash/filename', for example '887cd41e9c613463eab2f0d885c6dd96+83/bob.txt'.
+
+The 'search_for' parameter is meaningful only when input dataclass of type Collection or File is used. If a value is provided, this will be preloaded into the input data chooser dialog in Workbench. For example, if your input dataclass is a File and you are interested in a certain filename extention, you can preconfigure it in this attribute.
+
+h3. Examples
+
+This is a pipeline named "Filter MD5 hash values" with two components, "do_hash" and "filter".  The "input" script parameter of the "do_hash" component is required to be filled in by the user, and the expected data type is "Collection".  This also specifies that the "input" script parameter of the "filter" component is the output of "do_hash", so "filter" will not run until "do_hash" completes successfully.  When the pipeline runs, past jobs that meet the criteria described above may be substituted for either or both components to avoid redundant computation.
+
+<notextile><pre>
+{
+  "name": "Filter MD5 hash values",
+  "components": {
+    "do_hash": {
+      "script": "hash.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "required": true,
+          "dataclass": "Collection",
+          "search_for": ".fastq.gz",
+          "title":"Please select a fastq file"
+        }
+      },
+    },
+    "filter": {
+      "script": "0-filter.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "output_of": "do_hash"
+        }
+      },
+    }
+  }
+}
+</pre></notextile>
+
+This pipeline consists of three components.  The components "thing1" and "thing2" both depend on "cat_in_the_hat".  Once the "cat_in_the_hat" job is complete, both "thing1" and "thing2" can run in parallel, because they do not depend on each other.
+
+<notextile><pre>
+{
+  "name": "Wreck the house",
+  "components": {
+    "cat_in_the_hat": {
+      "script": "cat.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": { }
+    },
+    "thing1": {
+      "script": "thing1.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "output_of": "cat_in_the_hat"
+        }
+      },
+    },
+    "thing2": {
+      "script": "thing2.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "output_of": "cat_in_the_hat"
+        }
+      },
+    },
+  }
+}
+</pre></notextile>
+
+This pipeline consists of three components.  The component "cleanup" depends on "thing1" and "thing2".  Both "thing1" and "thing2" are started immediately and can run in parallel, because they do not depend on each other, but "cleanup" cannot begin until both "thing1" and "thing2" have completed.
+
+<notextile><pre>
+{
+  "name": "Clean the house",
+  "components": {
+    "thing1": {
+      "script": "thing1.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": { }
+    },
+    "thing2": {
+      "script": "thing2.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": { }
+    },
+    "cleanup": {
+      "script": "cleanup.py",
+      "repository": "<b>you</b>/<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "mess1": {
+          "output_of": "thing1"
+        },
+        "mess2": {
+          "output_of": "thing2"
+        }
+      }
+    }
+  }
+}
+</pre></notextile>
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new PipelineTemplate.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|pipeline_template|object||query||
+
+h3. delete
+
+Delete an existing PipelineTemplate.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
+
+h3. get
+
+Gets a PipelineTemplate's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
+
+h3. list
+
+List pipeline_templates.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing PipelineTemplate.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
+|pipeline_template|object||query||
diff --git a/doc/api/methods/repositories.html.textile.liquid b/doc/api/methods/repositories.html.textile.liquid
new file mode 100644 (file)
index 0000000..7a47da6
--- /dev/null
@@ -0,0 +1,94 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "repositories"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/repositories@
+
+Object type: @s0uqq@
+
+Example UUID: @zzzzz-s0uqq-0123456789abcde@
+
+h2. Resource
+
+The repositories resource lists git repositories managed by Arvados.
+
+Each Repository has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|The name of the repository on disk.  Repository names must begin with a letter and contain only alphanumerics.  Unless the repository is owned by the system user, the name must begin with the owner's username, then be separated from the base repository name with @/@.  You may not create a repository that is owned by a user without a username.|@username/project1@|
+|clone_urls|array|URLs from which the repository can be cloned. Read-only.|@["git@git.zzzzz.arvadosapi.com:foo/bar.git",
+ "https://git.zzzzz.arvadosapi.com/foo/bar.git"]@|
+|fetch_url|string|URL suggested as a fetch-url in git config. Deprecated. Read-only.||
+|push_url|string|URL suggested as a push-url in git config. Deprecated. Read-only.||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Repository.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|repository|object||query||
+
+h3. delete
+
+Delete an existing Repository.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
+
+h3. get
+
+Gets a Repository's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
+
+h3. get_all_permissions
+
+get_all_permissions repositories
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+
+h3. list
+
+List repositories.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing Repository.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
+|repository|object||query||
diff --git a/doc/api/methods/specimens.html.textile.liquid b/doc/api/methods/specimens.html.textile.liquid
new file mode 100644 (file)
index 0000000..6ee79ca
--- /dev/null
@@ -0,0 +1,81 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "specimens"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/specimens@
+
+Object type: @j58dm@
+
+Example UUID: @zzzzz-j58dm-0123456789abcde@
+
+h2. Resource
+
+A metadata record that may be used to represent a biological specimen.
+
+Each Specimen has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|material|string|||
+|properties|hash|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Specimen.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|specimen|object||query||
+
+h3. delete
+
+Delete an existing Specimen.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
+
+h3. get
+
+Gets a Specimen's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
+
+h3. list
+
+List specimens.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing Specimen.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
+|specimen|object||query||
diff --git a/doc/api/methods/traits.html.textile.liquid b/doc/api/methods/traits.html.textile.liquid
new file mode 100644 (file)
index 0000000..34b60cf
--- /dev/null
@@ -0,0 +1,82 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "traits"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/traits@
+
+Object type: @q1cn2@
+
+Example UUID: @zzzzz-q1cn2-0123456789abcde@
+
+h2. Resource
+
+A metadata record that may be used to represent a genotype or phenotype trait.
+
+Each Trait has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|properties|hash|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new Trait.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|trait|object||query||
+
+h3. delete
+
+Delete an existing Trait.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
+
+h3. get
+
+Gets a Trait's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
+
+h3. list
+
+List traits.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing Trait.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
+|trait|object||query||
diff --git a/doc/api/methods/users.html.textile.liquid b/doc/api/methods/users.html.textile.liquid
new file mode 100644 (file)
index 0000000..098c2ca
--- /dev/null
@@ -0,0 +1,126 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "users"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/users@
+
+Object type: @tpzed@
+
+Example UUID: @zzzzz-tpzed-0123456789abcde@
+
+h2. Resource
+
+Users represent individuals with access to the Arvados cluster.
+
+Each User has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|email|string|||
+|username|string|The username used for the user's git repositories and virtual machine logins.  Usernames must start with a letter, and contain only alphanumerics.  When a new user is created, a default username is set from their e-mail address.  Only administrators may change the username.||
+|first_name|string|||
+|last_name|string|||
+|identity_url|string|||
+|is_admin|boolean|||
+|prefs|hash|||
+|default_owner_uuid|string|||
+|is_active|boolean|||
+|writable_by|array|List of UUID strings identifying Groups and other Users that can modify this User object.  This will include the user's owner_uuid and, for administrators and users requesting their own User object, the requesting user's UUID.||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new User.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|user|object||query||
+
+h3. current
+
+Get the user associated with the provided API token.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+
+h3. delete
+
+Delete an existing User.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h3. get
+
+Gets a User's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
+
+h3. list
+
+List users.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. system
+
+Get the user record for the "system user.":{{site.baseurl}}/api/permission-model.html#system
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+
+h3. update
+
+Update attributes of an existing User.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
+|user|object||query||
+
+h3(#update_uuid). update_uuid
+
+Change the UUID of an existing user, updating all database references accordingly.
+
+This method can only be used by an admin user. It should only be used when the affected user is idle. New references to the affected user that are established _while the update_uuid operation is in progress_ might not be migrated as expected.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The current UUID of the user in question.|path|@zzzzz-tpzed-12345abcde12345@|
+{background:#ccffcc}.|new_uuid|string|The desired new UUID. It is an error to use a UUID belonging to an existing user.|query|@zzzzz-tpzed-abcde12345abcde@|
diff --git a/doc/api/methods/virtual_machines.html.textile.liquid b/doc/api/methods/virtual_machines.html.textile.liquid
new file mode 100644 (file)
index 0000000..89272a4
--- /dev/null
@@ -0,0 +1,109 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "virtual_machines"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/virtual_machines@
+
+Object type: @2x53u@
+
+Example UUID: @zzzzz-2x53u-0123456789abcde@
+
+h2. Resource
+
+The virtual_machines resource lists compute resources in the Arvados cluster to which a user may log in to get an interactive shell (via ssh or webshell).
+
+Each VirtualMachine has, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|hostname|string|||
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h3. create
+
+Create a new VirtualMachine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|virtual_machine|object||query||
+
+h3. delete
+
+Delete an existing VirtualMachine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
+
+h3. get
+
+Gets a VirtualMachine's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
+
+h3(#logins). logins
+
+Get a list of SSH keys and account names that should be able to log in to a given virtual machine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+The response is an object with the field @items@ containing an array of objects in the following format:
+
+table(table table-bordered table-condensed).
+|_. Key|_. Value type|_. Description|_. Example|
+|username|string|Name of the Unix login account to which the user should be able to log in|@"jsmith"@|
+|hostname|string|Hostname of the virtual machine|@"shell.xyzzy.arvadosapi.com"@|
+|public_key|string|SSH public key|@"ssh-rsa AAAAB3NzaC1yc2E..."@|
+|user_uuid|string|UUID of the user who should be able to log in|@"xyzzy-tpzed-mv4d7dy7n91te11"@|
+|virtual_machine_uuid|string|UUID of the "virtual machine resource":{{site.baseurl}}/api/methods/virtual_machines.html|@"zzzzz-2x53u-kvszmclnbjuv8xc"@|
+|authorized_key_uuid|string|UUID of the "authorized key resource":{{site.baseurl}}/api/methods/authorized_keys.html|@"zzzzz-fngyi-v9p0cyfmjxbio64"@|
+
+h3. get_all_logins
+
+Get a list of SSH keys and account names that should be able to log in for every virtual machine in the system.
+
+Arguments: none.
+
+The response has the same format as the response to the "logins method":#logins above.
+
+h3. list
+
+List virtual_machines.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing VirtualMachine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
+|virtual_machine|object||query||
diff --git a/doc/api/methods/workflows.html.textile.liquid b/doc/api/methods/workflows.html.textile.liquid
new file mode 100644 (file)
index 0000000..77ed6f3
--- /dev/null
@@ -0,0 +1,84 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "workflows"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/workflows@
+
+Object type: @7fd4e@
+
+Example UUID: @zzzzz-7fd4e-0123456789abcde@
+
+h2. Resource
+
+Stores a "Common Workflow Language":http://commonwl.org (CWL) computational workflow that can be searched for, browsed and executed (submitted to Crunch) from the workbench.
+
+Each Workflow offers the following optional attributes, in addition to the "Common resource fields":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|If not specified, will be set to any "name" from the "definition" attribute.||
+|description|string|If not specified, will be set to any "description" from the "definition" attribute.||
+|definition|string|A "Common Workflow Language" document.|Visit "Common Workflow Language":http://www.commonwl.org/ for details.|
+
+h2. Methods
+
+See "Common resource methods":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+Supports federated @create@, @delete@, @get@, @list@, and @update@.
+
+h3. create
+
+Create a new Workflow.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|workflow|object|Workflow resource|request body||
+
+h3. delete
+
+Delete an existing Workflow.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||
+
+h3. get
+
+Get a Workflow's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||
+
+h3. list
+
+List workflows.
+
+See "common resource list method.":{{site.baseurl}}/api/methods.html#index
+
+h3. update
+
+Update attributes of an existing Workflow.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||
+|workflow|object||query||
diff --git a/doc/api/permission-model.html.textile.liquid b/doc/api/permission-model.html.textile.liquid
new file mode 100644 (file)
index 0000000..7ee1790
--- /dev/null
@@ -0,0 +1,77 @@
+---
+layout: default
+navsection: architecture
+navmenu: Concepts
+title: "Permission model"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+* There are four levels of permission: *none*, *can_read*, *can_write*, and *can_manage*.
+** *none* is the default state when there are no other permission grants.
+*** the object is not included in any list query response.
+*** direct queries of the object by uuid return 404 Not Found.
+*** Link objects require valid identifiers in @head_uuid@ and @tail_uuid@, so an attempt to create a Link that references an unreadable object will return an error indicating the object is not found.
+** *can_read* grants read-only access to the record.  Attempting to update or delete the record returns an error.  *can_read* does not allow a reader to see any permission grants on the object except the object's owner_uuid and the reader's own permissions.
+** *can_write* permits changes to the record (but not permission links).  *can_write* permits the user to delete the object.  *can_write* also implies *can_read*.
+** *can_manage* permits the user to read, create, update and delete permission links whose @head_uuid@ is this object's @uuid@.  *can_manage* also implies *can_write* and *can_read*.
+
+h2. Ownership
+
+* All Arvados objects have an @owner_uuid@ field. Valid uuid types for @owner_uuid@ are "User" and "Group".
+* The User or Group specified by @owner_uuid@ has *can_manage* permission on the object.
+** This permission is one way: A User or Group's @owner_uuid@ being equal to @X@ does not imply any permission for that User/Group to read, write, or manage an object whose @uuid@ is equal to @X@.
+* Applications should represent each object as belonging to, or being "inside", the Group/User referenced by its @owner_uuid@.
+** A "project" is a subtype of Group that is treated as a "Project" in Workbench, and as a directory by @arv-mount@.
+** A "role" is a subtype of Group that is treated in Workbench as a group of users who have permissions in common (typically an organizational group).
+* To change the @owner_uuid@ field, it is necessary to have @can_write@ permission on both the current owner and the new owner.
+
+h2(#links). Permission links
+
+A link object with
+
+* @owner_uuid@ of the system user.
+* @link_class@ "permission"
+* @name@ one of *can_read*, *can_write* or *can_manage*
+* @head_uuid@ of some Arvados object
+* @tail_uuid@ of a User or Group
+
+grants the @name@ permission for @tail_uuid@ accessing @head_uuid@
+
+* If a User has *can_manage* permission on some object, this grants permission to read, create, update and delete permission links where the @head_uuid@ is the object under management.
+
+h3. Transitive permissions
+
+Permissions can be obtained indirectly through Groups.
+* If a User X *can_read* Group A, and Group A *can_read* Object B, then User X *can_read* Object B.
+* Permissions are narrowed to the least powerful permission on the path.
+** If User X *can_write* Group A, and Group A *can_read* Object B, then User X *can_read* Object B.
+** If User X *can_read* Group A, and Group A *can_write* Object B, then User X *can_read* Object B.
+
+h2. Group Membership
+
+Group membership is determined by whether the group has *can_read* permission on an object.  If a group G *can_read* an object A, then we say A is a member of G.
+
+For some kinds of groups, like roles, it is natural for users who are members of a group to also have *can_manage* permission on the group, i.e., G *can_read* A  and A *can_manage* G ("A can do anything G can do"). However, this is not necessary: A can be a member of a group while being unable to even read it.
+
+h2. Special cases
+
+* Log table objects are additionally readable based on whether the User has *can_read* permission on @object_uuid@ (User can access log history about objects it can read).  To retain the integrity of the log, the log table should deny all update or delete operations.
+* Permission links where @tail_uuid@ is a User permit @can_read@ on the link by that user.  (User can discover her own permission grants.)
+* *can_read* on a Collection grants permission to read the blocks that make up the collection (API server returns signed blocks)
+* If User or Group X *can_FOO* Group A, and Group A *can_manage* User B, then X *can_FOO* _everything that User B can_FOO_.
+
+h2(#system). System user and group
+
+A privileged user account exists for the use by internal Arvados components.  This user manages system objects which should not be "owned" by any particular user.  The system user uuid is @{siteprefix}-tpzed-000000000000000@.
+
+h2. Anoymous user and group
+
+An Arvado site may be configued to allow users to browse resources without requiring a log in.  In this case, permissions for non-logged-in users are associated with the "anonymous" user.  To make objects visible to the public, they can be shared with the "anonymous" group.  The anonymous user uuid is @{siteprefix}-tpzed-anonymouspublic@.  The anonymous group uuid is @{siteprefix}-j7d0g-anonymouspublic@.
+
+h2. Example
+
+!(full-width){{site.baseurl}}/images/Arvados_Permissions.svg!
diff --git a/doc/api/requests.html.textile.liquid b/doc/api/requests.html.textile.liquid
new file mode 100644 (file)
index 0000000..84cae49
--- /dev/null
@@ -0,0 +1,354 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: REST API syntax
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados exposes a REST API using standard HTTP requests.
+
+h3. HTTP Method
+
+Use @GET@ to request individual resources or lists of resources.
+
+Use @POST@ to create new resources.
+
+Use @PUT@ to update an existing resource.
+
+Use @DELETE@ to remove an existing resource.
+
+As a special case, a @POST@ with the query parameter @_method=GET@ will be treated as a GET request.  This makes it possible to issue @GET@ requests where the query string exceeds the maximum request URI length, by putting the query string in the body of the request.
+
+h3. Request URI
+
+The URI portion of the request identifies the specific resource to operate on.  For example, operations on "collections":{{site.baseurl}}/api/methods/collections.html use the @https://{{ site.arvados_api_host }}/arvados/v1/collections@ request URI prefix.
+
+h3. Authorization header
+
+Every request must include an API token.  This identifies the user making the request for the purposes of access control.  In addition, tokens may be further "restricted in scope":{{site.baseurl}}/api/methods/api_client_authorizations.html#scope to only access certain API endpoints.
+
+API requests must provide the API token using the @Authorization@ header in the following format:
+
+<pre>
+$ curl -v -H "Authorization: OAuth2 xxxxapitokenxxxx" https://192.168.5.2:8000/arvados/v1/collections
+> GET /arvados/v1/collections HTTP/1.1
+> ...
+> Authorization: OAuth2 xxxxapitokenxxxx
+> ...
+</pre>
+
+h3. Parameters
+
+Request parameters may be provided in one of two ways.  They may be provided in the "query" section of request URI, or they may be provided in the body of the request with application/x-www-form-urlencoded encoding.  If parameters are provided in both places, their values will be merged.  Parameter names must be unique.  If a parameter appears multiple times, the behavior is undefined.
+
+Structured and nested parameter values must be provided as urlencoded JSON.
+
+h3. Result
+
+Results are returned JSON-encoded in the response body.
+
+h3. Errors
+
+If a request cannot be fulfilled, the API will return 4xx or 5xx HTTP status code.  Be aware that the API server may return a 404 (Not Found) status for resources that exist but for which the client does not have read access.  The API will also return an error record:
+
+table(table table-bordered table-condensed).
+|*Parameter name*|*Value*|*Description*|
+|errors|array|An array of one or more error messages|
+|error_token|string|a unique identifier used to correlate the error in the API server logs|
+
+h2. Examples
+
+h3. Create a new record
+
+<pre>
+$ curl -v -X POST --data-urlencode 'collection={"name":"empty collection"}' -H "Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections | jq .
+> POST /arvados/v1/collections HTTP/1.1
+> User-Agent: curl/7.38.0
+> Host: 192.168.5.2:8000
+> Accept: */*
+> Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
+> Content-Length: 54
+> Content-Type: application/x-www-form-urlencoded
+>
+} [data not shown]
+< HTTP/1.1 200 OK
+< Content-Type: application/json; charset=utf-8
+< Transfer-Encoding: chunked
+< Connection: keep-alive
+< Status: 200 OK
+< Access-Control-Allow-Origin: *
+< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
+< Access-Control-Allow-Headers: Authorization
+< Access-Control-Max-Age: 86486400
+< X-UA-Compatible: IE=Edge,chrome=1
+< ETag: "2ec9ef5151c1f7a1486ad169c33ae462"
+< Cache-Control: max-age=0, private, must-revalidate
+< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJTIwMjQ1NTE5YmEwMzU1MGZkMTBmYmY1YzllY2ZiMjFlBjsAVA%3D%3D--653bc9c20899d48ee8523e18d9a4c1cde0702577; path=/; HttpOnly
+< X-Request-Id: 56aa10bc49097f3b44d3ed946bf0e61e
+< X-Runtime: 0.049951
+< X-Powered-By: Phusion Passenger 4.0.41
+< Date: Fri, 28 Oct 2016 19:20:09 GMT
+< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
+<
+{
+  "href": "/collections/962eh-4zz18-m1ma0mxxfg3mbcc",
+  "kind": "arvados#collection",
+  "etag": "c5ifrv1ox2tu6alb559ymtkb7",
+  "uuid": "962eh-4zz18-m1ma0mxxfg3mbcc",
+  "owner_uuid": "962eh-tpzed-000000000000000",
+  "created_at": "2016-10-28T19:20:09.320771531Z",
+  "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
+  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
+  "modified_at": "2016-10-28T19:20:09.319661000Z",
+  "name": "empty collection",
+  "description": null,
+  "properties": {},
+  "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+  "manifest_text": "",
+  "replication_desired": null,
+  "replication_confirmed": null,
+  "replication_confirmed_at": null,
+  "expires_at": null
+}
+</pre>
+
+h3. Delete a record
+
+<pre>
+$ curl -X DELETE -v -H "Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-m1ma0mxxfg3mbcc | jq .
+> DELETE /arvados/v1/collections/962eh-4zz18-m1ma0mxxfg3mbcc HTTP/1.1
+> User-Agent: curl/7.38.0
+> Host: 192.168.5.2:8000
+> Accept: */*
+> Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
+>
+< HTTP/1.1 200 OK
+< Content-Type: application/json; charset=utf-8
+< Transfer-Encoding: chunked
+< Connection: keep-alive
+< Status: 200 OK
+< Access-Control-Allow-Origin: *
+< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
+< Access-Control-Allow-Headers: Authorization
+< Access-Control-Max-Age: 86486400
+< X-UA-Compatible: IE=Edge,chrome=1
+< ETag: "1e8f72802cf1a6d0a5c4a1ebbfcc46a9"
+< Cache-Control: max-age=0, private, must-revalidate
+< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJTc2NDYyY2M0NTNlNmU3M2Y2M2E3YmFiMWQ1MTEyZGZkBjsAVA%3D%3D--d28c7dd640bd24e2b12f01e77088072138dcf145; path=/; HttpOnly
+< X-Request-Id: e66fd3ab825bdb87301f5456161fb641
+< X-Runtime: 0.028788
+< X-Powered-By: Phusion Passenger 4.0.41
+< Date: Fri, 28 Oct 2016 19:33:31 GMT
+< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
+<
+{
+  "href": "/collections/962eh-4zz18-m1ma0mxxfg3mbcc",
+  "kind": "arvados#collection",
+  "etag": "c5ifrv1ox2tu6alb559ymtkb7",
+  "uuid": "962eh-4zz18-m1ma0mxxfg3mbcc",
+  "owner_uuid": "962eh-tpzed-000000000000000",
+  "created_at": "2016-10-28T19:20:09.320771000Z",
+  "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
+  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
+  "modified_at": "2016-10-28T19:20:09.319661000Z",
+  "name": "empty collection",
+  "description": null,
+  "properties": {},
+  "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+  "manifest_text": "",
+  "replication_desired": null,
+  "replication_confirmed": null,
+  "replication_confirmed_at": null,
+  "expires_at": null
+}
+</pre>
+
+h3. Get a specific record
+
+<pre>
+$ curl -v -H "Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km | jq .
+> GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km HTTP/1.1
+> User-Agent: curl/7.38.0
+> Host: 192.168.5.2:8000
+> Accept: */*
+> Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
+>
+< HTTP/1.1 200 OK
+< Content-Type: application/json; charset=utf-8
+< Transfer-Encoding: chunked
+< Connection: keep-alive
+< Status: 200 OK
+< Access-Control-Allow-Origin: *
+< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
+< Access-Control-Allow-Headers: Authorization
+< Access-Control-Max-Age: 86486400
+< X-UA-Compatible: IE=Edge,chrome=1
+< ETag: "fec2ddf433a352e5a2b5d356abd6d3d4"
+< Cache-Control: max-age=0, private, must-revalidate
+< X-Request-Id: 40b447507ff202ae9a0b0b3e0ebe98da
+< X-Runtime: 0.011404
+< X-Powered-By: Phusion Passenger 4.0.41
+< Date: Fri, 28 Oct 2016 18:59:09 GMT
+< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
+<
+{
+  "href": "/collections/962eh-4zz18-xi32mpz2621o8km",
+  "kind": "arvados#collection",
+  "etag": "3mmn0s9e1z5s5opfofmtb9k8p",
+  "uuid": "962eh-4zz18-xi32mpz2621o8km",
+  "owner_uuid": "962eh-tpzed-000000000000000",
+  "created_at": "2016-10-27T14:47:43.792587000Z",
+  "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
+  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
+  "modified_at": "2016-10-27T14:47:43.792166000Z",
+  "name": "Saved at 2016-10-27 14:47:43 UTC by peter@debian",
+  "description": null,
+  "properties": {},
+  "portable_data_hash": "93a45073511646a5c3e2f4953fcf6f61+116",
+  "manifest_text": ". eff999f3b5158331eb44a9a93e3b36e1+67108864+Aad3839bea88bce22cbfe71cf4943de7dab3ea52a@5826180f db141bfd11f7da60dce9e5ee85a988b8+34038725+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f 0:101147589:rna.SRR948778.bam\n",
+  "replication_desired": null,
+  "replication_confirmed": null,
+  "replication_confirmed_at": null,
+  "expires_at": null
+}
+</pre>
+
+h3. List records and filter by date
+
+(Note, return result is truncated).
+
+<pre>
+$ curl -v -G --data-urlencode 'filters=[["created_at",">","2016-11-08T21:38:24.124834000Z"]]' -H "Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections | jq .
+> GET /arvados/v1/collections?filters=%5B%5B%22uuid%22%2C%20%22%3D%22%2C%20%22962eh-4zz18-xi32mpz2621o8km%22%5D%5D HTTP/1.1
+> User-Agent: curl/7.38.0
+> Host: 192.168.5.2:8000
+> Accept: */*
+> Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
+>
+< HTTP/1.1 200 OK
+< Content-Type: application/json; charset=utf-8
+< Transfer-Encoding: chunked
+< Connection: keep-alive
+< Status: 200 OK
+< Access-Control-Allow-Origin: *
+< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
+< Access-Control-Allow-Headers: Authorization
+< Access-Control-Max-Age: 86486400
+< X-UA-Compatible: IE=Edge,chrome=1
+< ETag: "76345ef24952f073acc3a0c550241d4e"
+< Cache-Control: max-age=0, private, must-revalidate
+< X-Request-Id: d34b8ede4ffc707d8ed172dc2f47ff5e
+< X-Runtime: 0.012727
+< X-Powered-By: Phusion Passenger 4.0.41
+< Date: Fri, 28 Oct 2016 19:08:52 GMT
+< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
+<
+{
+  "kind": "arvados#collectionList",
+  "etag": "",
+  "self_link": "",
+  "offset": 0,
+  "limit": 100,
+  "items": [
+    {
+      "href": "/collections/962eh-4zz18-ybggo9im899vv60",
+      "kind": "arvados#collection",
+      "etag": "bvgrrsg63zsenb9wnpnp0nsgl",
+      "uuid": "962eh-4zz18-ybggo9im899vv60",
+      "owner_uuid": "962eh-tpzed-000000000000000",
+      "created_at": "2016-11-08T21:47:36.937106000Z",
+      "modified_by_client_uuid": null,
+      "modified_by_user_uuid": "962eh-tpzed-000000000000000",
+      "modified_at": "2016-11-08T21:47:36.936625000Z",
+      "name": "Log from cwl-runner job 962eh-8i9sb-45jww0k15fi5ldd",
+      "description": null,
+      "properties": {},
+      "portable_data_hash": "a7820b94717eff86229927565fedbd72+85",
+      "replication_desired": null,
+      "replication_confirmed": null,
+      "replication_confirmed_at": null,
+      "expires_at": null
+    },
+   ...
+    {
+      "href": "/collections/962eh-4zz18-37i1tfl5de5ild9",
+      "kind": "arvados#collection",
+      "etag": "2fa07dx52lux8wa1loehwyrc5",
+      "uuid": "962eh-4zz18-37i1tfl5de5ild9",
+      "owner_uuid": "962eh-tpzed-000000000000000",
+      "created_at": "2016-11-08T21:38:46.717798000Z",
+      "modified_by_client_uuid": null,
+      "modified_by_user_uuid": "962eh-tpzed-000000000000000",
+      "modified_at": "2016-11-08T21:38:46.717409000Z",
+      "name": null,
+      "description": null,
+      "properties": {},
+      "portable_data_hash": "9d43d4c8328640446f6e252cda584e7e+54",
+      "replication_desired": null,
+      "replication_confirmed": null,
+      "replication_confirmed_at": null,
+      "expires_at": null
+    }
+  ],
+  "items_available": 99
+}
+</pre>
+
+h3. Update a field
+
+<pre>
+$ curl -v -X PUT --data-urlencode 'collection={"name":"rna.SRR948778.bam"}' -H "Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km | jq .
+> PUT /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km HTTP/1.1
+> User-Agent: curl/7.38.0
+> Host: 192.168.5.2:8000
+> Accept: */*
+> Authorization: OAuth2 oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr
+> Content-Length: 53
+> Content-Type: application/x-www-form-urlencoded
+>
+} [data not shown]
+< HTTP/1.1 200 OK
+< Content-Type: application/json; charset=utf-8
+< Transfer-Encoding: chunked
+< Connection: keep-alive
+< Status: 200 OK
+< Access-Control-Allow-Origin: *
+< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE
+< Access-Control-Allow-Headers: Authorization
+< Access-Control-Max-Age: 86486400
+< X-UA-Compatible: IE=Edge,chrome=1
+< ETag: "fbb50d2847426eab793e3fcf346ca9eb"
+< Cache-Control: max-age=0, private, must-revalidate
+< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJWI3NjFjMzVjMGI5OGExYmNjZDg0ZTg5MjZhMzcwMDE1BjsAVA%3D%3D--0e005d71fad15cb366e47361c38474b7447ba155; path=/; HttpOnly
+< X-Request-Id: 76d3cb3c0995af6133b0a73a64f57354
+< X-Runtime: 0.030756
+< X-Powered-By: Phusion Passenger 4.0.41
+< Date: Fri, 28 Oct 2016 19:15:16 GMT
+< Server: nginx/1.4.7 + Phusion Passenger 4.0.41
+<
+{
+  "href": "/collections/962eh-4zz18-xi32mpz2621o8km",
+  "kind": "arvados#collection",
+  "etag": "51509hhxo9qqjxqewnoz1b7og",
+  "uuid": "962eh-4zz18-xi32mpz2621o8km",
+  "owner_uuid": "962eh-tpzed-000000000000000",
+  "created_at": "2016-10-27T14:47:43.792587000Z",
+  "modified_by_client_uuid": "962eh-ozdt8-lm5x8emraox8epg",
+  "modified_by_user_uuid": "962eh-tpzed-000000000000000",
+  "modified_at": "2016-10-28T19:15:16.137814000Z",
+  "name": "rna.SRR948778.bam",
+  "description": null,
+  "properties": {},
+  "portable_data_hash": "93a45073511646a5c3e2f4953fcf6f61+116",
+  "manifest_text": ". eff999f3b5158331eb44a9a93e3b36e1+67108864+Acca57af82cc18c5dfa47bdfd16e335fccd09dfa5@582618c4 db141bfd11f7da60dce9e5ee85a988b8+34038725+A7764f122f41f92c2d5bde1852fcdd1bea5f8bd78@582618c4 0:101147589:rna.SRR948778.bam\n",
+  "replication_desired": null,
+  "replication_confirmed": null,
+  "replication_confirmed_at": null,
+  "expires_at": null
+}
+</pre>
diff --git a/doc/api/resources.html.textile.liquid b/doc/api/resources.html.textile.liquid
new file mode 100644 (file)
index 0000000..2c4491f
--- /dev/null
@@ -0,0 +1,44 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: Common resource fields
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes the common attributes of Arvados resources.
+
+h2(#resource). Resource
+
+table(table table-bordered table-condensed).
+|_. Attribute |_. Type |_. Description |_. Example|
+|uuid|string|universally unique object identifier, set on @create@|@mk2qn-4zz18-w3anr2hk2wgfpuo@|
+|owner_uuid|string|UUID of owner (must be a User or Group), set on @create@, controls who may access the resource, ownership may be changed explicitly with @update@, see "permission model":{{site.baseurl}}/api/permission-model.html for details.|@mk2qn-tpzed-a4lcehql0dv2u25@|
+|created_at|datetime|When resource was created, set on @create@|@2013-01-21T22:17:39Z@|
+|modified_by_client_uuid|string|API client software which most recently modified the resource, set on @create@ and @update@|@mk2qn-ozdt8-vq8l5qkzj7pr7h7@|
+|modified_by_user_uuid|string|Authenticated user, on whose behalf the client was acting when modifying the resource, set on @create@ and @update@|@mk2qn-tpzed-a4lcehql0dv2u25@|
+|modified_at|datetime|When resource was last modified, set on @create@ and @update@|@2013-01-25T22:29:32Z@|
+|href|string|a URL that can be used to address this resource||
+|kind|string|@arvados#{resource_type}@|@arvados#collection@|
+|etag|string|The ETag[1] of the resource|@1xlmizzjq7wro3dlb2dirf505@|
+
+h2. Object UUID
+
+Each object is assigned a UUID.  This has the format @aaaaa-bbbbb-ccccccccccccccc@.
+
+# The first field (@aaaaa@ in the example) is the site prefix.  This is unique to a specific Arvados installation.
+# The second field (@bbbbb@ in the example) is the object type.
+# The third field (@ccccccccccccccc@ in the example) uniquely identifies the object.
+
+h2. Timestamps
+
+All Arvados timestamps follow ISO 8601 datetime format with fractional seconds (microsecond precision).  All timestamps are UTC.  Date format: @YYYY-mm-ddTHH:MM:SS.SSSSZ@ example date: @2016-11-08T21:38:24.124834000Z@.
+
+h2. ETags
+
+fn1. Each response includes an ETag, a string which changes when the resource changes.  Clients can use this to check whether a resource has changed since they last retrieved it.  If a previous ETag is provided along with a request, and the resource has not changed since, the server may return a "not modified" response.
diff --git a/doc/api/storage.html.textile.liquid b/doc/api/storage.html.textile.liquid
new file mode 100644 (file)
index 0000000..aa0ed21
--- /dev/null
@@ -0,0 +1,175 @@
+---
+layout: default
+navsection: architecture
+title: Storage in Keep
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keep clients are applications such as @arv-get@, @arv-put@ and @arv-mount@ which store and retrieve data from Keep.  In doing so, these programs interact with both the API server (which stores file metadata in form of Collection objects) and individual Keep servers (which store the actual data blocks).
+
+!(full-width){{site.baseurl}}/images/Keep_reading_writing_block.svg!
+
+h2. Storing a file
+
+# The client discovers keep servers (or proxies) using the @accessible@ method on "keep_services":{{site.baseurl}}/api/methods/keep_services.html
+# Data is split into 64 MiB blocks and the MD5 hash is computed for each block.
+# The client uploads each block to one or more Keep servers, based on the number of desired replicas.  The priority order is determined using rendezvous hashing, described below.
+# The Keep server returns a block locator (the MD5 sum of the block) and a "signed token" which the client can use as proof of knowledge for the block.
+# The client constructs a @manifest@ which lists the blocks by MD5 hash and how to reassemble them into the original files.
+# The client creates a "collection":{{site.baseurl}}/api/methods/collections.html and provides the @manifest_text@
+# The API server accepts the collection after validating the signed tokens (proof of knowledge) for each block.
+
+!(full-width){{site.baseurl}}/images/Keep_manifests.svg!
+
+h2. Fetching a file
+
+# The client requests a @collection@ object including @manifest_text@ from the APIs server
+# The server adds "token signatures" to the @manifest_text@ and returns it to the client.
+# The client discovers keep servers (or proxies) using the @accessible@ method on "keep_services":{{site.baseurl}}/api/methods/keep_services.html
+# For each data block, the client chooses the highest priority server using rendezvous hashing, described below.
+# The client sends the data block request to the keep server, along with the token signature from the API which proves to Keep servers that the client is permitted to read a given block.
+# The server provides the block data after validating the token signature for the block (if the server does not have the block, it returns a 404 and the client tries the next highest priority server)
+
+!(full-width){{site.baseurl}}/images/Keep_rendezvous_hashing.svg!
+
+Each @keep_service@ resource has an assigned uuid.  To determine priority assignments of blocks to servers, for each keep service compute the MD5 sum of the string concatenation of the block locator (hex-coded hash part only) and service uuid, then sort this list in descending order.  Blocks are preferentially placed on servers with the highest weight.
+
+h2. Keep server API
+
+The Keep server is accessed via a simple HTTP REST API.
+
+*GET /blocklocator+size+A@token*
+
+Fetch the data block.  Response returns block contents.  If permission checking is enabled, requires a valid token hint.
+
+*PUT /blocklocator*
+
+Body: the block contents.  Responds the block locator consisting of MD5 sum of the data, block size, and signed token hint.
+
+*POST /*
+
+Body: the block contents.  Responds the block locator consisting of MD5 sum of the data, block size, and signed token hint.
+
+h2(#locator). Keep locator format
+
+BNF notation for a valid Keep locator string (with hints).  For example @d41d8cd98f00b204e9800998ecf8427e+0+Z+Ada39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294@
+
+<pre>
+locator        ::= sized-digest hint*
+sized-digest   ::= digest size-hint
+digest         ::= <32 lowercase hexadecimal digits>
+size-hint      ::= "+" [0-9]+
+hint           ::= "+" hint-type hint-content
+hint-type      ::= [A-Z]+
+hint-content   ::= [A-Za-z0-9@_-]*
+sign-hint      ::= "+A" <40 lowercase hexadecimal digits> "@" sign-timestamp
+sign-timestamp ::= <8 lowercase hexadecimal digits>
+</pre>
+
+h3. Token signatures
+
+A token signature (sign-hint) provides proof-of-access for a data block.  It is computed by taking a SHA1 HMAC of the blob signing token (a shared secret between the API server and keep servers), block digest, current API token, expiration timestamp, and blob signature TTL.
+
+When communicating with the Keep store to fetch a block, or the API server to create or update a collection, the service computes the expected token signature for each block and compares it to the token signature that was presented by the client.  Keep clients receive valid block signatures when uploading a block to a keep store (getting back a signed token as proof of knowledge) or, from the API server, getting the manifest text of a collection on which the user has read permission.
+
+Security of a token signature is derived from the following characteristics:
+
+# Valid signatures can only be generated by entities that know the shared secret (the "blob signing token")
+# A signature can only be used by an entity that also know the API token that was used to generate it.
+# It expires after a set date (the expiration time, based on the "blob signature time-to-live (TTL)")
+
+h3. Regular expression to validate locator
+
+<pre>
+/^([0-9a-f]{32})\+([0-9]+)(\+[A-Z][-A-Za-z0-9@_]*)*$/
+</pre>
+
+h3. Valid locators
+
+table(table table-bordered table-condensed).
+|@d41d8cd98f00b204e9800998ecf8427e+0@|
+|@d41d8cd98f00b204e9800998ecf8427e+0+Z@|
+|<code>d41d8cd98f00b204e9800998ecf8427e+0+Z+Ada39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294</code>|
+
+h3. Invalid locators
+
+table(table table-bordered table-condensed).
+||Why|
+|@d41d8cd98f00b204e9800998ecf8427e@|No size hint|
+|@d41d8cd98f00b204e9800998ecf8427e+Z+0@|Other hint before size hint|
+|@d41d8cd98f00b204e9800998ecf8427e+0+0@|Multiple size hints|
+|@d41d8cd98f00b204e9800998ecf8427e+0+z@|Hint does not start with uppercase letter|
+|@d41d8cd98f00b204e9800998ecf8427e+0+Zfoo*bar@|Hint contains invalid character @*@|
+
+h2. Manifest v1
+
+A manifest is utf-8 encoded text, consisting of zero or more newline-terminated streams.
+
+<pre>
+manifest       ::= stream*
+stream         ::= stream-name (" " locator)+ (" " file-segment)+ "\n"
+stream-name    ::= "." ("/" path-component)*
+path-component ::= <printable ASCII - (whitespace, "/")>+
+file-segment   ::= position ":" size ":" filename
+position       ::= [0-9]+
+size           ::= [0-9]+
+filename       ::= path-component ("/" path-component)*
+</pre>
+
+Notes:
+
+* The first token is the stream name, consisting of one or more path components, delimited by @"/"@.
+** The first path component is always @"."@.
+** No path component is empty.
+** No path component following the first one can be "." or "..".
+** The stream name never begins or ends with @"/"@.
+* The next N tokens are "keep locators":#locator
+** These describe the "data stream".  By logically concatenating the blocks in the order that they appear, we can refer to "positions" in the data stream.
+* File tokens come after the sequence of keep locators.
+** A file token has three parts, delimited by @":"@: position, size, filename.
+** Position and size are given in decimal
+** The position is the position in the data stream
+** The size is the count of bytes following the position in the data stream.  A file size may cross multiple blocks in the data stream.
+** Filename may contain @"/"@ characters, but must not start or end with @"/"@, and must not contain @"//"@.
+** Filename components (delimited by @"/"@) must not be @"."@ or @".."@.
+** There may be multiple file tokens.
+
+It is legal to have multiple file tokens in the manifest (possible across different streams) with the same combined path name @stream name + "/" + filename@.  This must be interpreted as a concatenation of file content, in the order that the file tokens appear in the manifest.
+
+Spaces are represented by the escape sequence @\040@.  Spaces in stream names and filenames must be translated when reading and writing manifests.  A manifest may not contain TAB characters, nor other ASCII whitespace characters or control codes other than the spaces or newlines used as delimiters specified above.  A manifest always ends with a newline -- except the empty (zero-length) string, which is a valid manifest.
+
+h3. Normalized manifest v1
+
+A normalized manifest is a manifest that meets the following additional restrictions:
+
+* Streams are in alphanumeric order.
+* Each stream name is unique within the manifest.
+* Files within a stream are listed in alphanumeric order.
+* Blocks within a stream are ordered based on order of file tokens of the stream.  A given block is listed at most once in a stream.
+* Filename must not contain @"/"@ (the stream name represents the path prefix)
+
+h3. Example manifests
+
+A manifest with four files in two directories:
+
+<pre>
+. 930625b054ce894ac40596c3f5a0d947+33 0:0:a 0:0:b 0:33:output.txt
+./c d41d8cd98f00b204e9800998ecf8427e+0 0:0:d
+</pre>
+
+The same manifest with permission signatures on each block:
+
+<pre>
+. 930625b054ce894ac40596c3f5a0d947+33+A1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc 0:0:a 0:0:b 0:33:output.txt
+./c d41d8cd98f00b204e9800998ecf8427e+0+A27117dcd30c013a6e85d6d74c9a50179a1446efa@5835c8bc 0:0:d
+</pre>
+
+A manifest containing a file consisting of multiple blocks and a space in the file name:
+
+<pre>
+. c449ed86671e4a34a8b8b9430850beba+67108864 09fcfea01c3a141b89dd0dcfa1b7768e+22534144 0:89643008:Docker\040image.tar
+</pre>
diff --git a/doc/api/tokens.html.textile.liquid b/doc/api/tokens.html.textile.liquid
new file mode 100644 (file)
index 0000000..3437003
--- /dev/null
@@ -0,0 +1,72 @@
+---
+layout: default
+navsection: api
+title: API Authorization
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+All requests to the API server must have an API token.  API tokens can be issued by going though the login flow, or created via the API.  At this time, only browser based applications can perform login from email/password.  Command line applications and services must use an API token provided via the @ARVADOS_API_TOKEN@ environment variable or configuration file.
+
+h2. Browser login
+
+Browser based applications can perform log in via the following highlevel flow:
+
+# The web application presents a "login" link to @/login@ on the API server with a @return_to@ parameter provided in the query portion of the URL.  For example @https://{{ site.arvados_api_host }}/login?return_to=XXX@ , where  @return_to=XXX@ is the URL of the login page for the web application.
+# The "login" link takes the browser to the login page (this may involve several redirects)
+# The user logs in.  API server authenticates the user and issues a new API token.
+# The browser is redirected to the login page URL provided in @return_to=XXX@ with the addition of @?api_token=xxxxapitokenxxxx@.
+# The web application gets the login request with the included authorization token.
+
+!{{site.baseurl}}/images/Session_Establishment.svg!
+
+The "browser authentication process is documented in detail on the Arvados wiki.":https://dev.arvados.org/projects/arvados/wiki/Workbench_authentication_process
+
+h2. User activation
+
+"Creation and activation of new users is described here.":{{site.baseurl}}/admin/activation.html
+
+h2. Creating tokens via the API
+
+The browser login method above issues a new token.  Using that token, it is possible to make API calls to create additional tokens.  To do so, use the @create@ method of the "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html resource.
+
+h2. Trusted API clients
+
+The "api_clients":{{site.baseurl}}/api/methods/api_clients.html resource determines if web applications that have gone through the browser login flow may create or list API tokens.
+
+After the user has authenticated, but before an authorization token is issued and browser redirect sent (sending the browser back to the @return_to@ login page bearing @api_token@), the server strips the path and query portion from @return_to@ to get @url_prefix@.  The @url_prefix@ is used to find or create an ApiClient object.  The newly issued API client authorization (API token) is associated with this ApiClient object.
+
+API clients may be marked as "trusted" by making an API call to create or update an "api_clients":{{site.baseurl}}/api/methods/api_clients.html resource and set the @is_trusted@ flag to @true@. An authorization token associated with a "trusted" client is permitted to list authorization tokens on "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html .
+
+A authorization token which is not associated with a trusted client may only use the @current@ method to query its own api_client_authorization object.  The "untrusted" token is forbidden performing any other operations on API client authorizations, such as listing other authorizations or creating new authorizations.
+
+Authorization tokens which are not issued via the browser login flow (created directly via the API) will not have an associated api client.  This means authorization tokens created via the API are always "untrusted".
+
+h2(#scopes). Scopes
+
+Scopes can restrict a token so it may only access certain resources.  This is in addition to normal permission checks for the user associated with the token.
+
+Each entry in scopes consists of a @request_method@ and @request_path@, where the @request_method@ is a HTTP method (one of @GET@, @POST@, @PUT@ or @DELETE@) and @request_path@ is the request URI.  A given request is permitted if it matches a scopes exactly, or the scope ends with @/@ and the request string is a prefix of the scope.
+
+As a special case, a scope of ["all"] allows all resources.
+
+h3. Scope examples
+
+A scope of @GET /arvados/v1/collections@ permits listing collections.
+
+* Requests with different methods, such as creating a new collection using @POST /arvados/v1/collections@, will be rejected.
+* Requests to access other resources, such as @GET /arvados/v1/groups@, will be rejected.
+* Be aware that requests for specific records, such as @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will also be rejected.  This is because the scope @GET /arvados/v1/collections@ does not end in @/@
+
+A scope of @GET /arvados/v1/collections/@ (with @/@ suffix) will permit access to individual collections.
+
+* The request @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will succeed
+* Be aware that requests for listing @GET /arvados/v1/collections@ (no @/@ suffix) will be rejected, because it is not a match with the rule @GET /arvados/v1/collections/@
+* A listing request @GET /arvados/v1/collections/@ will have the trailing @/@ suffix trimmed before the scope check, as a result it will not match the rule @GET /arvados/v1/collections/@.
+
+To allow both listing objects and requesting individual objects, include both in the scope: @["GET /arvados/v1/collections", "GET /arvados/v1/collections/"]@
+
+A narrow scope such as @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will disallow listing objects as well as disallow requesting any object other than those listed in the scope.
diff --git a/doc/architecture/Arvados_arch.odg b/doc/architecture/Arvados_arch.odg
new file mode 100644 (file)
index 0000000..8b363c1
Binary files /dev/null and b/doc/architecture/Arvados_arch.odg differ
diff --git a/doc/architecture/Arvados_federation.odg b/doc/architecture/Arvados_federation.odg
new file mode 100644 (file)
index 0000000..4e52000
Binary files /dev/null and b/doc/architecture/Arvados_federation.odg differ
diff --git a/doc/architecture/federation.html.textile.liquid b/doc/architecture/federation.html.textile.liquid
new file mode 100644 (file)
index 0000000..08dad1e
--- /dev/null
@@ -0,0 +1,113 @@
+---
+layout: default
+navsection: architecture
+title: "Federation"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados federation enables clients to transparently read, create and manipulate objects and collections across clusters in different regions or organizations.  Federation supports workfows that integrate and analyze data across multiple clusters by sending computation to where the data is, reducing the technical and legal barriers to analyzing large, sensitive data sets.
+
+_This feature is under development.  Support for federation is limited to certain types of requests.  The behaviors described here should not be interpreted as a stable API._
+
+Detailed configuration information is available on the "federation admin section":{{site.baseurl}}/admin/federation.html.
+
+h2(#cluster_id). Cluster identifiers
+
+Clusters are identified by a five-digit alphanumeric id (numbers and lowercase letters).  There are 36 ^5^ = 60466176 possible cluster identifiers.
+
+* For automated tests purposes, use "z****"
+* For experimental/local-only/private clusters that won't ever be visible on the public Internet, use "x****"
+* For long-lived clusters, we recommend reserving a cluster id.  Contact "mailto:support@curoverse.com":support@curoverse.com
+
+Cluster identifiers are mapped API server hosts one of two ways:
+
+* Through DNS resolution, under the @arvadosapi.com@ domain.  For example, the API server for the cluster @qr1hi@ can be found at @qr1hi.arvadosapi.com@.  To register a cluster id for free under @arvadosapi.com@, contact "mailto:support@curoverse.com":support@curoverse.com
+* Through explicit configuration:
+
+The @RemoteClusters@ section of @/etc/arvados/config.yml@ (for arvados-controller)
+
+<pre>
+Clusters:
+  clsr1:
+    RemoteClusters:
+      clsr2:
+        Host: api.cluster2.com
+        Proxy: true
+      clsr3:
+        Host: api.cluster3.com
+        Proxy: true
+</pre>
+
+The @remote_hosts@ section of @application.yml@ (for the API server)
+
+<pre>
+production:
+  remote_hosts:
+    clsr2: api.cluster2.com
+    clsr3: api.cluster3.com
+</pre>
+
+In this example, the cluster @clsr1@ is configured to contact @api.cluster2.com@ for requests involving @clsr2@ and @api.cluster3.com@ for requests involving @clsr3@.
+
+h2(#identity). Identity
+
+A federated user has a single identity across the cluster federation.  This identity is a user account on a specific "home cluster".  When arvados-controller contacts a remote cluster, the remote cluster verifies the user's identity (see below) and then creates a mirror of the user account with the same uuid of the user's home cluster.  On the remote cluster, permissions can then be granted to the federated user, and the federated user can create and own objects.
+
+h3. Authenticating remote users with salted tokens
+
+When making a request to the home cluster, authorization is established by looking up the API token in the @api_client_authorizations@ table to determine the user identity.  When making a request to a remote cluster, we need to provide an API token which can be used to establish the user's identity.  The remote cluster will connect back to the home cluster to determine if the token valid and the user it corresponds to.  However, we do not want to send along the same API token used for the original request.  If the remote cluster is malicious or compromised, sending along user's regular token would compromise the user account on the home cluster.  Instead, the controller sends a "salted token".  The salted token is restricted to only to fetching the user account and group membership.  The salted token consists of the uuid of the token in @api_client_authorizations@ and the SHA1 HMAC of the original token and the cluster id of remote cluster.  To verify the token, the remote cluster contacts the home cluster and provides the token uuid, the hash, and its cluster id.  The home cluster uses the uuid to look up the token re-computes the SHA1 HMAC of the original token and cluster id.  If that hash matches, then the token is valid.  To avoid having to re-validate the token on every request, it is cached for a short period.
+
+The security properties of this scheme are:
+
+* The salted token does not grant access on the home cluster beyond what is needed to verify user identity
+* Revoking a token on the home cluster also revokes it for remote clusters (after the cache period)
+* A salted token given to a malicious/compromised cluster cannot be used to gain access to the user account on another remote cluster
+
+h2(#retrieval). Federated records
+
+!(full-width){{site.baseurl}}/images/arvados_federation.svg!
+
+h3. Retrieving and updating records
+
+In the REST API, GET and PUT/PATCH requests are used to fetch and update records.
+
+# the client begins by making a request to the home arvados-controller to retrieve or update a specific record owned by a remote cluster
+# arvados-controller determines the 5-digit cluster id from the first part of the uuid string
+# arvados-controller determines the API server host corresponding to the cluster id
+# arvados-controller creates a "salted" token by combining the API token used for the request and the target cluster id
+# arvados-controller contacts the remote cluster to request the desired record, providing the salted token
+# the remote cluster verifies the salted token
+# the remote cluster processes the request and returns a response
+# arvados-controller forwards the response to the client
+
+h3. Creating records
+
+In the REST API, POST requests create new records, so there is no uuid to use for the cluster id.  In this case, to create an object on a remote cluster, the request includes the @cluster_id@ parameter.  The flow is otherwise the same as described above.
+
+h3. Collections and Keep block retrieval
+
+Each collection record has @manifest_text@, which describes how to reassemble keep blocks into files as described in the "Storage in Keep.":{{site.baseurl}}/api/storage.html.  Each block identifier in the manifest has an added signature which is used to confirm permission to read the block.  To read a block from a keepstore server, the client must provide the block identifier, the signature, and the same API token used to retrieve the collection record.
+
+When a collection record is returned through a federation request, the keep blocks listed in the manifest may not be available on the local cluster, and the keep block signatures returned by the remote cluster are not valid for the local cluster.  To solve this, arvados-controller rewrites the signatures in the manifest to "remote cluster" signatures.
+
+A local signature comes after the block identifier and block size, and starts with @+A@:
+
+<code>930625b054ce894ac40596c3f5a0d947+33+A1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc</code>
+
+A remote cluster signature starts with @+R@, then the cluster id of the cluster it originated from (@zzzzz@ in this example), a dash, and then the original signature:
+
+<code>930625b054ce894ac40596c3f5a0d947+33+Rzzzzz-1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc</code>
+
+When the client provides a remote-signed block locator to keepstore, the keepstore proxies the request to the remote cluster.
+
+# keepstore determines the cluster id to contact from the first part of the @+R@ signature
+# creates a salted token using the API token and cluster id
+# contacts the "accessible" endpoint on the remote cluster to determine the remote cluster's keepstore or keepproxy hosts
+# converts the remote signature @+R@ back to a local signature @+A@
+# contacts the remote keepstore or keepproxy host and requests the block using the local signature
+# returns the block contents back to the client
diff --git a/doc/architecture/index.html.textile.liquid b/doc/architecture/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..c7ea326
--- /dev/null
@@ -0,0 +1,59 @@
+---
+layout: default
+navsection: architecture
+title: "Arvados components"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+!(full-width){{site.baseurl}}/images/Arvados_arch.svg!
+
+h3. Services
+
+Located in @arvados/services@ except for Workbench which is located in @arvados/apps/workbench@.
+
+table(table table-bordered table-condensed).
+|_. Component|_. Description|
+|api|The API server is the core of Arvados.  It is backed by a Postgres database and manages information such as metadata for storage, a record of submitted compute jobs, users, groups, and associated permissions.|
+|arv-git-httpd|Provides a git+http interface to Arvados-managed git repositories, with permissions and authentication based on an Arvados API token.|
+|crunch-dispatch-local|Get compute requests submitted to the API server and execute them locally.|
+|crunch-dispatch-slurm|Get compute requests submitted to the API server and submit them to slurm.|
+|crunch-run|Dispatched by crunch-dispatch, executes a single compute run: setting up a Docker container, running it, and collecting the output.|
+|dockercleaner|Daemon for cleaning up Docker containers and images.|
+|fuse|Filesystem in USErspace (FUSE) filesystem driver for Keep.|
+|health|Health check proxy, contacts configured Arvados services at their health check endpoints and reports results.|
+|keep-balance|Perform storage utilization reporting, optimization and garbage collection.  Moves data blocks to their optimum location, ensures correct replication and storage class, and trashes unreferenced blocks.|
+|keepproxy|Provides low-level access to keepstore services (block-level data access) for clients outside the internal (private) network.|
+|keepstore|Provides access to underlying storage (filesystem or object storage such as Amazon S3 or Azure Blob) with Arvados permissions.|
+|keep-web|Provides high-level WebDAV access to collections (file-level data access).|
+|login-sync|Synchronize virtual machine users with Arvados users and permissions.|
+|nodemanager|Provide elastic computing by creating and destroying cloud based virtual machines on compute demand.|
+|ws|Publishes API server change events over websockets.|
+|workbench|Web application providing user interface to Arvados services.|
+
+h3. Tools
+
+The @arv@ command is located in @arvados/sdk/ruby@, the @arv-*@ tools are located in @arvados/sdk/python@, the rest are located in @arvados/tools@.
+
+table(table table-bordered table-condensed).
+|_. Component|_. Description |
+|arv|Provides command line access to API, also provides some purpose utilities.|
+|arv-copy|Copy a collection from one cluster to another|
+|arv-get|Get files from a collection.|
+|arv-keepdocker|Upload Docker images from local Docker daemon to Keep.|
+|arv-ls|List files in a collection|
+|arv-migrate-docker19|Migrate Docker images in Keep from v1 format (Docker 1.9 or earlier) to v2 format (Docker 1.10 or later)|
+|arv-normalize|Read manifest text on stdin and produce normalized manifest text on stdout.|
+|arv-put|Upload files to a collection.|
+|arv-ws|Print events from Arvados websocket event source.|
+|arvbash|Helpful @bash@ macros for using Arvados at the command line.|
+|arvbox|Dockerized Arvados environment for development and testing.|
+|crunchstat-summary|Read execution metrics (cpu %, ram, network, etc) collected from a compute container and produce a report.|
+|keep-block-check|Given a list of keep block locators, check that each block exists on one of the configured keepstore servers and verify the block hash.|
+|keep-exercise|Benchmarking tool to test throughput and reliability of keepstores under various usage patterns.|
+|keep-rsync|Get lists of blocks from two clusters, copy blocks which exist on source cluster but are missing from destination cluster.|
+|sync-groups|Take a CSV file listing (group, username) pairs and synchronize membership in Arvados groups.|
diff --git a/doc/css/badges.css b/doc/css/badges.css
new file mode 100644 (file)
index 0000000..d74528f
--- /dev/null
@@ -0,0 +1,32 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+/* Colors
+ * Contextual variations of badges
+ * Bootstrap 3.0 removed contexts for badges, we re-introduce them, based on what is done for labels
+ */
+
+.badge.badge-error {
+  background-color: #b94a48;
+}
+
+.badge.badge-warning {
+  background-color: #f89406;
+}
+
+.badge.badge-success {
+  background-color: #468847;
+}
+
+.badge.badge-info {
+  background-color: #3a87ad;
+}
+
+.badge.badge-inverse {
+  background-color: #333333;
+}
+
+.badge.badge-alert {
+    background: red;
+}
diff --git a/doc/css/bootstrap-theme.css b/doc/css/bootstrap-theme.css
new file mode 100644 (file)
index 0000000..11fcc9b
--- /dev/null
@@ -0,0 +1,347 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+.btn-default,
+.btn-primary,
+.btn-success,
+.btn-info,
+.btn-warning,
+.btn-danger {
+  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);
+}
+.btn-default:active,
+.btn-primary:active,
+.btn-success:active,
+.btn-info:active,
+.btn-warning:active,
+.btn-danger:active,
+.btn-default.active,
+.btn-primary.active,
+.btn-success.active,
+.btn-info.active,
+.btn-warning.active,
+.btn-danger.active {
+  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn:active,
+.btn.active {
+  background-image: none;
+}
+.btn-default {
+  text-shadow: 0 1px 0 #fff;
+  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);
+  background-image:         linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #dbdbdb;
+  border-color: #ccc;
+}
+.btn-default:hover,
+.btn-default:focus {
+  background-color: #e0e0e0;
+  background-position: 0 -15px;
+}
+.btn-default:active,
+.btn-default.active {
+  background-color: #e0e0e0;
+  border-color: #dbdbdb;
+}
+.btn-primary {
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #2b669a;
+}
+.btn-primary:hover,
+.btn-primary:focus {
+  background-color: #2d6ca2;
+  background-position: 0 -15px;
+}
+.btn-primary:active,
+.btn-primary.active {
+  background-color: #2d6ca2;
+  border-color: #2b669a;
+}
+.btn-success {
+  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
+  background-image:         linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #3e8f3e;
+}
+.btn-success:hover,
+.btn-success:focus {
+  background-color: #419641;
+  background-position: 0 -15px;
+}
+.btn-success:active,
+.btn-success.active {
+  background-color: #419641;
+  border-color: #3e8f3e;
+}
+.btn-info {
+  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
+  background-image:         linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #28a4c9;
+}
+.btn-info:hover,
+.btn-info:focus {
+  background-color: #2aabd2;
+  background-position: 0 -15px;
+}
+.btn-info:active,
+.btn-info.active {
+  background-color: #2aabd2;
+  border-color: #28a4c9;
+}
+.btn-warning {
+  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
+  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #e38d13;
+}
+.btn-warning:hover,
+.btn-warning:focus {
+  background-color: #eb9316;
+  background-position: 0 -15px;
+}
+.btn-warning:active,
+.btn-warning.active {
+  background-color: #eb9316;
+  border-color: #e38d13;
+}
+.btn-danger {
+  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
+  background-image:         linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #b92c28;
+}
+.btn-danger:hover,
+.btn-danger:focus {
+  background-color: #c12e2a;
+  background-position: 0 -15px;
+}
+.btn-danger:active,
+.btn-danger.active {
+  background-color: #c12e2a;
+  border-color: #b92c28;
+}
+.thumbnail,
+.img-thumbnail {
+  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+}
+.dropdown-menu > li > a:hover,
+.dropdown-menu > li > a:focus {
+  background-color: #e8e8e8;
+  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
+  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
+  background-repeat: repeat-x;
+}
+.dropdown-menu > .active > a,
+.dropdown-menu > .active > a:hover,
+.dropdown-menu > .active > a:focus {
+  background-color: #357ebd;
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
+  background-repeat: repeat-x;
+}
+.navbar-default {
+  background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%);
+  background-image:         linear-gradient(to bottom, #fff 0%, #f8f8f8 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);
+}
+.navbar-default .navbar-nav > .active > a {
+  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);
+  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);
+  background-repeat: repeat-x;
+  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);
+}
+.navbar-brand,
+.navbar-nav > li > a {
+  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);
+}
+.navbar-inverse {
+  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);
+  background-image:         linear-gradient(to bottom, #3c3c3c 0%, #222 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+}
+.navbar-inverse .navbar-nav > .active > a {
+  background-image: -webkit-linear-gradient(top, #222 0%, #282828 100%);
+  background-image:         linear-gradient(to bottom, #222 0%, #282828 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);
+  background-repeat: repeat-x;
+  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);
+          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);
+}
+.navbar-inverse .navbar-brand,
+.navbar-inverse .navbar-nav > li > a {
+  text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);
+}
+.navbar-static-top,
+.navbar-fixed-top,
+.navbar-fixed-bottom {
+  border-radius: 0;
+}
+.alert {
+  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);
+}
+.alert-success {
+  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
+  background-image:         linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #b2dba1;
+}
+.alert-info {
+  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
+  background-image:         linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #9acfea;
+}
+.alert-warning {
+  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
+  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #f5e79e;
+}
+.alert-danger {
+  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
+  background-image:         linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #dca7a7;
+}
+.progress {
+  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
+  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar {
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-success {
+  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
+  background-image:         linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-info {
+  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
+  background-image:         linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-warning {
+  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
+  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-danger {
+  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
+  background-image:         linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
+  background-repeat: repeat-x;
+}
+.list-group {
+  border-radius: 4px;
+  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+}
+.list-group-item.active,
+.list-group-item.active:hover,
+.list-group-item.active:focus {
+  text-shadow: 0 -1px 0 #3071a9;
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #3278b3;
+}
+.panel {
+  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05);
+          box-shadow: 0 1px 2px rgba(0, 0, 0, .05);
+}
+.panel-default > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
+  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-primary > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-success > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
+  background-image:         linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-info > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
+  background-image:         linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-warning > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
+  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-danger > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
+  background-image:         linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
+  background-repeat: repeat-x;
+}
+.well {
+  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
+  background-image:         linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #dcdcdc;
+  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);
+          box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);
+}
+/*# sourceMappingURL=bootstrap-theme.css.map */
diff --git a/doc/css/bootstrap-theme.css.map b/doc/css/bootstrap-theme.css.map
new file mode 100644 (file)
index 0000000..29c1319
--- /dev/null
@@ -0,0 +1 @@
+{"version":3,"sources":["less/theme.less","less/mixins.less"],"names":[],"mappings":"AAeA;AACA;AACA;AACA;AACA;AACA;EACE,wCAAA;ECqGA,2FAAA;EACQ,mFAAA;;ADjGR,YAAC;AAAD,YAAC;AAAD,YAAC;AAAD,SAAC;AAAD,YAAC;AAAD,WAAC;AACD,YAAC;AAAD,YAAC;AAAD,YAAC;AAAD,SAAC;AAAD,YAAC;AAAD,WAAC;EC+FD,wDAAA;EACQ,gDAAA;;ADpER,IAAC;AACD,IAAC;EACC,sBAAA;;AAKJ;EC8PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;EAyB2C,yBAAA;EAA2B,kBAAA;;AAvBtE,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAeJ;EC6PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAgBJ;EC4PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAiBJ;EC2PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,SAAC;AACD,SAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,SAAC;AACD,SAAC;EACC,yBAAA;EACA,qBAAA;;AAkBJ;EC0PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAmBJ;ECyPI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,WAAC;AACD,WAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,WAAC;AACD,WAAC;EACC,yBAAA;EACA,qBAAA;;AA2BJ;AACA;EC8CE,kDAAA;EACQ,0CAAA;;ADrCV,cAAe,KAAK,IAAG;AACvB,cAAe,KAAK,IAAG;ECqOnB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EDtOF,yBAAA;;AAEF,cAAe,UAAU;AACzB,cAAe,UAAU,IAAG;AAC5B,cAAe,UAAU,IAAG;EC+NxB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EDhOF,yBAAA;;AAUF;ECmNI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EAoCF,mEAAA;EDvPA,kBAAA;ECcA,2FAAA;EACQ,mFAAA;;ADlBV,eAOE,YAAY,UAAU;EC4MpB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EArMF,wDAAA;EACQ,gDAAA;;ADNV;AACA,WAAY,KAAK;EACf,8CAAA;;AAIF;ECiMI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EAoCF,mEAAA;;ADxOF,eAIE,YAAY,UAAU;EC6LpB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EArMF,uDAAA;EACQ,+CAAA;;ADAV,eASE;AATF,eAUE,YAAY,KAAK;EACf,yCAAA;;AAKJ;AACA;AACA;EACE,gBAAA;;AAUF;EACE,6CAAA;EC/BA,0FAAA;EACQ,kFAAA;;AD0CV;ECuJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAKF;ECsJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAMF;ECqJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAOF;ECoJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAgBF;EC2II,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADpIJ;ECiII,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADnIJ;ECgII,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADlIJ;EC+HI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADjIJ;EC8HI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADhIJ;EC6HI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADxHJ;EACE,kBAAA;EC9EA,kDAAA;EACQ,0CAAA;;ADgFV,gBAAgB;AAChB,gBAAgB,OAAO;AACvB,gBAAgB,OAAO;EACrB,6BAAA;EC8GE,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED/GF,qBAAA;;AAUF;EChGE,iDAAA;EACQ,yCAAA;;ADyGV,cAAe;ECwFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;AD1FJ,cAAe;ECuFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADzFJ,cAAe;ECsFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADxFJ,WAAY;ECqFR,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADvFJ,cAAe;ECoFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADtFJ,aAAc;ECmFV,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;AD9EJ;EC2EI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED5EF,qBAAA;ECzHA,yFAAA;EACQ,iFAAA","sourcesContent":["\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0,0,0,.2);\n  @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 1px rgba(0,0,0,.075);\n  .box-shadow(@shadow);\n\n  // Reset the shadow\n  &:active,\n  &.active {\n    .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n  }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n  #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n  .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners\n  background-repeat: repeat-x;\n  border-color: darken(@btn-color, 14%);\n\n  &:hover,\n  &:focus  {\n    background-color: darken(@btn-color, 12%);\n    background-position: 0 -15px;\n  }\n\n  &:active,\n  &.active {\n    background-color: darken(@btn-color, 12%);\n    border-color: darken(@btn-color, 14%);\n  }\n}\n\n// Common styles\n.btn {\n  // Remove the gradient for the pressed/active state\n  &:active,\n  &.active {\n    background-image: none;\n  }\n}\n\n// Apply the mixin to the buttons\n.btn-default { .btn-styles(@btn-default-bg); text-shadow: 0 1px 0 #fff; border-color: #ccc; }\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info    { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger  { .btn-styles(@btn-danger-bg); }\n\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n  .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n  background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n  background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n  border-radius: @navbar-border-radius;\n  @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 5px rgba(0,0,0,.075);\n  .box-shadow(@shadow);\n\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: darken(@navbar-default-bg, 5%); @end-color: darken(@navbar-default-bg, 2%));\n    .box-shadow(inset 0 3px 9px rgba(0,0,0,.075));\n  }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255,255,255,.25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n  #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: @navbar-inverse-bg; @end-color: lighten(@navbar-inverse-bg, 2.5%));\n    .box-shadow(inset 0 3px 9px rgba(0,0,0,.25));\n  }\n\n  .navbar-brand,\n  .navbar-nav > li > a {\n    text-shadow: 0 -1px 0 rgba(0,0,0,.25);\n  }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n  text-shadow: 0 1px 0 rgba(255,255,255,.2);\n  @shadow: inset 0 1px 0 rgba(255,255,255,.25), 0 1px 2px rgba(0,0,0,.05);\n  .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n  border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success    { .alert-styles(@alert-success-bg); }\n.alert-info       { .alert-styles(@alert-info-bg); }\n.alert-warning    { .alert-styles(@alert-warning-bg); }\n.alert-danger     { .alert-styles(@alert-danger-bg); }\n\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n  #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar            { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success    { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info       { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning    { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger     { .progress-bar-styles(@progress-bar-danger-bg); }\n\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n  border-radius: @border-radius-base;\n  .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n  #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n  border-color: darken(@list-group-active-border, 7.5%);\n}\n\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n  .box-shadow(0 1px 2px rgba(0,0,0,.05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading   { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading   { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading   { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading      { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading   { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading    { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n  #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n  border-color: darken(@well-bg, 10%);\n  @shadow: inset 0 1px 3px rgba(0,0,0,.05), 0 1px 0 rgba(255,255,255,.1);\n  .box-shadow(@shadow);\n}\n","//\n// Mixins\n// --------------------------------------------------\n\n\n// Utilities\n// -------------------------\n\n// Clearfix\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n.clearfix() {\n  &:before,\n  &:after {\n    content: \" \"; // 1\n    display: table; // 2\n  }\n  &:after {\n    clear: both;\n  }\n}\n\n// WebKit-style focus\n.tab-focus() {\n  // Default\n  outline: thin dotted;\n  // WebKit\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n\n// Center-align a block level element\n.center-block() {\n  display: block;\n  margin-left: auto;\n  margin-right: auto;\n}\n\n// Sizing shortcuts\n.size(@width; @height) {\n  width: @width;\n  height: @height;\n}\n.square(@size) {\n  .size(@size; @size);\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  &:-moz-placeholder            { color: @color; } // Firefox 4-18\n  &::-moz-placeholder           { color: @color;   // Firefox 19+\n                                  opacity: 1; } // See https://github.com/twbs/bootstrap/pull/11526\n  &:-ms-input-placeholder       { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Text overflow\n// Requires inline-block or block for proper styling\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n\n// CSS image replacement\n//\n// Heads up! v3 launched with with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`. Note\n// that we cannot chain the mixins together in Less, so they are repeated.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (will be removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n\n\n\n// CSS3 PROPERTIES\n// --------------------------------------------------\n\n// Single side border-radius\n.border-top-radius(@radius) {\n  border-top-right-radius: @radius;\n   border-top-left-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-bottom-right-radius: @radius;\n     border-top-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n   border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-bottom-left-radius: @radius;\n     border-top-left-radius: @radius;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n//   supported browsers that have box shadow capabilities now support the\n//   standard `box-shadow` property.\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Transitions\n.transition(@transition) {\n  -webkit-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n// Transformations\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n          transform: rotate(@degrees);\n}\n.scale(@ratio; @ratio-y...) {\n  -webkit-transform: scale(@ratio, @ratio-y);\n      -ms-transform: scale(@ratio, @ratio-y); // IE9 only\n          transform: scale(@ratio, @ratio-y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n          transform: translate(@x, @y);\n}\n.skew(@x; @y) {\n  -webkit-transform: skew(@x, @y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n          transform: skew(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n.backface-visibility(@visibility){\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// User select\n// For selecting text on the page\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n       -o-user-select: @select;\n          user-select: @select;\n}\n\n// Resize anything\n.resizable(@direction) {\n  resize: @direction; // Options: horizontal, vertical, both\n  overflow: auto; // Safari fix\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  word-wrap: break-word;\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n}\n\n// Opacity\n.opacity(@opacity) {\n  opacity: @opacity;\n  // IE8 filter\n  @opacity-ie: (@opacity * 100);\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n}\n\n\n\n// GRADIENTS\n// --------------------------------------------------\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, color-stop(@start-color @start-percent), color-stop(@end-color @end-percent)); // Safari 5.1-6, Chrome 10+\n    background-image:  linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-repeat: repeat-x;\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n\n// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n\n\n\n// Retina images\n//\n// Short retina mixin for setting background-image and -size\n\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and (   min--moz-device-pixel-ratio: 2),\n  only screen and (     -o-min-device-pixel-ratio: 2/1),\n  only screen and (        min-device-pixel-ratio: 2),\n  only screen and (                min-resolution: 192dpi),\n  only screen and (                min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// COMPONENT MIXINS\n// --------------------------------------------------\n\n// Horizontal dividers\n// -------------------------\n// Dividers (basically an hr) within dropdowns and nav lists\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n\n// Panels\n// -------------------------\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse .panel-body {\n      border-top-color: @border;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n\n// Alerts\n// -------------------------\n.alert-variant(@background; @border; @text-color) {\n  background-color: @background;\n  border-color: @border;\n  color: @text-color;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n\n// Tables\n// -------------------------\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n\n// List Groups\n// -------------------------\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a& {\n      color: @color;\n\n      .list-group-item-heading { color: inherit; }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n\n// Button variants\n// -------------------------\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:hover,\n  &:focus,\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 8%);\n        border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    background-image: none;\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      background-color: @background;\n          border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n// -------------------------\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n\n// Pagination\n// -------------------------\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n\n// Labels\n// -------------------------\n.label-variant(@color) {\n  background-color: @color;\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n\n// Contextual backgrounds\n// -------------------------\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover {\n    background-color: darken(@color, 10%);\n  }\n}\n\n// Typography\n// -------------------------\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover {\n    color: darken(@color, 10%);\n  }\n}\n\n// Navbar vertical align\n// -------------------------\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n\n// Progress bars\n// -------------------------\n.progress-bar-variant(@color) {\n  background-color: @color;\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n\n// Responsive utilities\n// -------------------------\n// More easily include all the states for responsive-utilities.less.\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n    &,\n  tr&,\n  th&,\n  td& { display: none !important; }\n}\n\n\n// Grid System\n// -----------\n\n// Centered container element\n.container-fixed() {\n  margin-right: auto;\n  margin-left: auto;\n  padding-left:  (@grid-gutter-width / 2);\n  padding-right: (@grid-gutter-width / 2);\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-left:  (@gutter / -2);\n  margin-right: (@gutter / -2);\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  @media (min-width: @screen-xs-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-push(@columns) {\n  @media (min-width: @screen-xs-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-pull(@columns) {\n  @media (min-width: @screen-xs-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-left:  (@grid-gutter-width / 2);\n      padding-right: (@grid-gutter-width / 2);\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.make-grid-columns-float(@class) {\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = push) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = pull) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.make-grid(@index, @class, @type) when (@index >= 0) {\n  .calc-grid(@index, @class, @type);\n  // next iteration\n  .make-grid((@index - 1), @class, @type);\n}\n\n\n// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0,0,0,.075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    border-color: @border-color;\n    background-color: @background-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-focus-border` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n"]}
\ No newline at end of file
diff --git a/doc/css/bootstrap-theme.min.css b/doc/css/bootstrap-theme.min.css
new file mode 100644 (file)
index 0000000..cff38df
--- /dev/null
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn:active,.btn.active{background-image:none}.btn-default{background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;text-shadow:0 1px 0 #fff;border-color:#ccc}.btn-default:hover,.btn-default:focus{background-color:#e0e0e0;background-position:0 -15px}.btn-default:active,.btn-default.active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-primary{background-image:-webkit-linear-gradient(top,#428bca 0,#2d6ca2 100%);background-image:linear-gradient(to bottom,#428bca 0,#2d6ca2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#2b669a}.btn-primary:hover,.btn-primary:focus{background-color:#2d6ca2;background-position:0 -15px}.btn-primary:active,.btn-primary.active{background-color:#2d6ca2;border-color:#2b669a}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:hover,.btn-success:focus{background-color:#419641;background-position:0 -15px}.btn-success:active,.btn-success.active{background-color:#419641;border-color:#3e8f3e}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:hover,.btn-info:focus{background-color:#2aabd2;background-position:0 -15px}.btn-info:active,.btn-info.active{background-color:#2aabd2;border-color:#28a4c9}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:hover,.btn-warning:focus{background-color:#eb9316;background-position:0 -15px}.btn-warning:active,.btn-warning.active{background-color:#eb9316;border-color:#e38d13}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:hover,.btn-danger:focus{background-color:#c12e2a;background-position:0 -15px}.btn-danger:active,.btn-danger.active{background-color:#c12e2a;border-color:#b92c28}.thumbnail,.img-thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-color:#e8e8e8}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-image:-webkit-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);background-color:#357ebd}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f3f3f3 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f3f3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.navbar-inverse .navbar-nav>.active>a{background-image:-webkit-linear-gradient(top,#222 0,#282828 100%);background-image:linear-gradient(to bottom,#222 0,#282828 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0)}.progress-bar{background-image:-webkit-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0)}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0)}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0)}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0)}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);border-color:#3278b3}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0)}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0)}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}
\ No newline at end of file
diff --git a/doc/css/bootstrap.css b/doc/css/bootstrap.css
new file mode 100644 (file)
index 0000000..16b635c
--- /dev/null
@@ -0,0 +1,5835 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+/*! normalize.css v3.0.0 | MIT License | git.io/normalize */
+html {
+  font-family: sans-serif;
+  -webkit-text-size-adjust: 100%;
+      -ms-text-size-adjust: 100%;
+}
+body {
+  margin: 0;
+}
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+nav,
+section,
+summary {
+  display: block;
+}
+audio,
+canvas,
+progress,
+video {
+  display: inline-block;
+  vertical-align: baseline;
+}
+audio:not([controls]) {
+  display: none;
+  height: 0;
+}
+[hidden],
+template {
+  display: none;
+}
+a {
+  background: transparent;
+}
+a:active,
+a:hover {
+  outline: 0;
+}
+abbr[title] {
+  border-bottom: 1px dotted;
+}
+b,
+strong {
+  font-weight: bold;
+}
+dfn {
+  font-style: italic;
+}
+h1 {
+  margin: .67em 0;
+  font-size: 2em;
+}
+mark {
+  color: #000;
+  background: #ff0;
+}
+small {
+  font-size: 80%;
+}
+sub,
+sup {
+  position: relative;
+  font-size: 75%;
+  line-height: 0;
+  vertical-align: baseline;
+}
+sup {
+  top: -.5em;
+}
+sub {
+  bottom: -.25em;
+}
+img {
+  border: 0;
+}
+svg:not(:root) {
+  overflow: hidden;
+}
+figure {
+  margin: 1em 40px;
+}
+hr {
+  height: 0;
+  -moz-box-sizing: content-box;
+       box-sizing: content-box;
+}
+pre {
+  overflow: auto;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: monospace, monospace;
+  font-size: 1em;
+}
+button,
+input,
+optgroup,
+select,
+textarea {
+  margin: 0;
+  font: inherit;
+  color: inherit;
+}
+button {
+  overflow: visible;
+}
+button,
+select {
+  text-transform: none;
+}
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+  -webkit-appearance: button;
+  cursor: pointer;
+}
+button[disabled],
+html input[disabled] {
+  cursor: default;
+}
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+  padding: 0;
+  border: 0;
+}
+input {
+  line-height: normal;
+}
+input[type="checkbox"],
+input[type="radio"] {
+  box-sizing: border-box;
+  padding: 0;
+}
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+  height: auto;
+}
+input[type="search"] {
+  -webkit-box-sizing: content-box;
+     -moz-box-sizing: content-box;
+          box-sizing: content-box;
+  -webkit-appearance: textfield;
+}
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+  -webkit-appearance: none;
+}
+fieldset {
+  padding: .35em .625em .75em;
+  margin: 0 2px;
+  border: 1px solid #c0c0c0;
+}
+legend {
+  padding: 0;
+  border: 0;
+}
+textarea {
+  overflow: auto;
+}
+optgroup {
+  font-weight: bold;
+}
+table {
+  border-spacing: 0;
+  border-collapse: collapse;
+}
+td,
+th {
+  padding: 0;
+}
+@media print {
+  * {
+    color: #000 !important;
+    text-shadow: none !important;
+    background: transparent !important;
+    box-shadow: none !important;
+  }
+  a,
+  a:visited {
+    text-decoration: underline;
+  }
+  a[href]:after {
+    content: " (" attr(href) ")";
+  }
+  abbr[title]:after {
+    content: " (" attr(title) ")";
+  }
+  a[href^="javascript:"]:after,
+  a[href^="#"]:after {
+    content: "";
+  }
+  pre,
+  blockquote {
+    border: 1px solid #999;
+
+    page-break-inside: avoid;
+  }
+  thead {
+    display: table-header-group;
+  }
+  tr,
+  img {
+    page-break-inside: avoid;
+  }
+  img {
+    max-width: 100% !important;
+  }
+  p,
+  h2,
+  h3 {
+    orphans: 3;
+    widows: 3;
+  }
+  h2,
+  h3 {
+    page-break-after: avoid;
+  }
+  select {
+    background: #fff !important;
+  }
+  .navbar {
+    display: none;
+  }
+  .table td,
+  .table th {
+    background-color: #fff !important;
+  }
+  .btn > .caret,
+  .dropup > .btn > .caret {
+    border-top-color: #000 !important;
+  }
+  .label {
+    border: 1px solid #000;
+  }
+  .table {
+    border-collapse: collapse !important;
+  }
+  .table-bordered th,
+  .table-bordered td {
+    border: 1px solid #ddd !important;
+  }
+}
+* {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+*:before,
+*:after {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+html {
+  font-size: 62.5%;
+
+  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
+}
+body {
+  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+  font-size: 14px;
+  line-height: 1.428571429;
+  color: #333;
+  background-color: #fff;
+}
+input,
+button,
+select,
+textarea {
+  font-family: inherit;
+  font-size: inherit;
+  line-height: inherit;
+}
+a {
+  color: #428bca;
+  text-decoration: none;
+}
+a:hover,
+a:focus {
+  color: #2a6496;
+  text-decoration: underline;
+}
+a:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+figure {
+  margin: 0;
+}
+img {
+  vertical-align: middle;
+}
+.img-responsive {
+  display: block;
+  max-width: 100%;
+  height: auto;
+}
+.img-rounded {
+  border-radius: 6px;
+}
+.img-thumbnail {
+  display: inline-block;
+  max-width: 100%;
+  height: auto;
+  padding: 4px;
+  line-height: 1.428571429;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 4px;
+  -webkit-transition: all .2s ease-in-out;
+          transition: all .2s ease-in-out;
+}
+.img-circle {
+  border-radius: 50%;
+}
+hr {
+  margin-top: 20px;
+  margin-bottom: 20px;
+  border: 0;
+  border-top: 1px solid #eee;
+}
+.sr-only {
+  position: absolute;
+  width: 1px;
+  height: 1px;
+  padding: 0;
+  margin: -1px;
+  overflow: hidden;
+  clip: rect(0, 0, 0, 0);
+  border: 0;
+}
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+.h1,
+.h2,
+.h3,
+.h4,
+.h5,
+.h6 {
+  font-family: inherit;
+  font-weight: 500;
+  line-height: 1.1;
+  color: inherit;
+}
+h1 small,
+h2 small,
+h3 small,
+h4 small,
+h5 small,
+h6 small,
+.h1 small,
+.h2 small,
+.h3 small,
+.h4 small,
+.h5 small,
+.h6 small,
+h1 .small,
+h2 .small,
+h3 .small,
+h4 .small,
+h5 .small,
+h6 .small,
+.h1 .small,
+.h2 .small,
+.h3 .small,
+.h4 .small,
+.h5 .small,
+.h6 .small {
+  font-weight: normal;
+  line-height: 1;
+  color: #999;
+}
+h1,
+.h1,
+h2,
+.h2,
+h3,
+.h3 {
+  margin-top: 20px;
+  margin-bottom: 10px;
+}
+h1 small,
+.h1 small,
+h2 small,
+.h2 small,
+h3 small,
+.h3 small,
+h1 .small,
+.h1 .small,
+h2 .small,
+.h2 .small,
+h3 .small,
+.h3 .small {
+  font-size: 65%;
+}
+h4,
+.h4,
+h5,
+.h5,
+h6,
+.h6 {
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+h4 small,
+.h4 small,
+h5 small,
+.h5 small,
+h6 small,
+.h6 small,
+h4 .small,
+.h4 .small,
+h5 .small,
+.h5 .small,
+h6 .small,
+.h6 .small {
+  font-size: 75%;
+}
+h1,
+.h1 {
+  font-size: 36px;
+}
+h2,
+.h2 {
+  font-size: 30px;
+}
+h3,
+.h3 {
+  font-size: 24px;
+}
+h4,
+.h4 {
+  font-size: 18px;
+}
+h5,
+.h5 {
+  font-size: 14px;
+}
+h6,
+.h6 {
+  font-size: 12px;
+}
+p {
+  margin: 0 0 10px;
+}
+.lead {
+  margin-bottom: 20px;
+  font-size: 16px;
+  font-weight: 200;
+  line-height: 1.4;
+}
+@media (min-width: 768px) {
+  .lead {
+    font-size: 21px;
+  }
+}
+small,
+.small {
+  font-size: 85%;
+}
+cite {
+  font-style: normal;
+}
+.text-left {
+  text-align: left;
+}
+.text-right {
+  text-align: right;
+}
+.text-center {
+  text-align: center;
+}
+.text-justify {
+  text-align: justify;
+}
+.text-muted {
+  color: #999;
+}
+.text-primary {
+  color: #428bca;
+}
+a.text-primary:hover {
+  color: #3071a9;
+}
+.text-success {
+  color: #3c763d;
+}
+a.text-success:hover {
+  color: #2b542c;
+}
+.text-info {
+  color: #31708f;
+}
+a.text-info:hover {
+  color: #245269;
+}
+.text-warning {
+  color: #8a6d3b;
+}
+a.text-warning:hover {
+  color: #66512c;
+}
+.text-danger {
+  color: #a94442;
+}
+a.text-danger:hover {
+  color: #843534;
+}
+.bg-primary {
+  color: #fff;
+  background-color: #428bca;
+}
+a.bg-primary:hover {
+  background-color: #3071a9;
+}
+.bg-success {
+  background-color: #dff0d8;
+}
+a.bg-success:hover {
+  background-color: #c1e2b3;
+}
+.bg-info {
+  background-color: #d9edf7;
+}
+a.bg-info:hover {
+  background-color: #afd9ee;
+}
+.bg-warning {
+  background-color: #fcf8e3;
+}
+a.bg-warning:hover {
+  background-color: #f7ecb5;
+}
+.bg-danger {
+  background-color: #f2dede;
+}
+a.bg-danger:hover {
+  background-color: #e4b9b9;
+}
+.page-header {
+  padding-bottom: 9px;
+  margin: 40px 0 20px;
+  border-bottom: 1px solid #eee;
+}
+ul,
+ol {
+  margin-top: 0;
+  margin-bottom: 10px;
+}
+ul ul,
+ol ul,
+ul ol,
+ol ol {
+  margin-bottom: 0;
+}
+.list-unstyled {
+  padding-left: 0;
+  list-style: none;
+}
+.list-inline {
+  padding-left: 0;
+  list-style: none;
+}
+.list-inline > li {
+  display: inline-block;
+  padding-right: 5px;
+  padding-left: 5px;
+}
+.list-inline > li:first-child {
+  padding-left: 0;
+}
+dl {
+  margin-top: 0;
+  margin-bottom: 20px;
+}
+dt,
+dd {
+  line-height: 1.428571429;
+}
+dt {
+  font-weight: bold;
+}
+dd {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .dl-horizontal dt {
+    float: left;
+    width: 160px;
+    overflow: hidden;
+    clear: left;
+    text-align: right;
+    text-overflow: ellipsis;
+    white-space: nowrap;
+  }
+  .dl-horizontal dd {
+    margin-left: 180px;
+  }
+}
+abbr[title],
+abbr[data-original-title] {
+  cursor: help;
+  border-bottom: 1px dotted #999;
+}
+.initialism {
+  font-size: 90%;
+  text-transform: uppercase;
+}
+blockquote {
+  padding: 10px 20px;
+  margin: 0 0 20px;
+  font-size: 17.5px;
+  border-left: 5px solid #eee;
+}
+blockquote p:last-child,
+blockquote ul:last-child,
+blockquote ol:last-child {
+  margin-bottom: 0;
+}
+blockquote footer,
+blockquote small,
+blockquote .small {
+  display: block;
+  font-size: 80%;
+  line-height: 1.428571429;
+  color: #999;
+}
+blockquote footer:before,
+blockquote small:before,
+blockquote .small:before {
+  content: '\2014 \00A0';
+}
+.blockquote-reverse,
+blockquote.pull-right {
+  padding-right: 15px;
+  padding-left: 0;
+  text-align: right;
+  border-right: 5px solid #eee;
+  border-left: 0;
+}
+.blockquote-reverse footer:before,
+blockquote.pull-right footer:before,
+.blockquote-reverse small:before,
+blockquote.pull-right small:before,
+.blockquote-reverse .small:before,
+blockquote.pull-right .small:before {
+  content: '';
+}
+.blockquote-reverse footer:after,
+blockquote.pull-right footer:after,
+.blockquote-reverse small:after,
+blockquote.pull-right small:after,
+.blockquote-reverse .small:after,
+blockquote.pull-right .small:after {
+  content: '\00A0 \2014';
+}
+blockquote:before,
+blockquote:after {
+  content: "";
+}
+address {
+  margin-bottom: 20px;
+  font-style: normal;
+  line-height: 1.428571429;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
+}
+code {
+  padding: 2px 4px;
+  font-size: 90%;
+  color: #c7254e;
+  white-space: nowrap;
+  background-color: #f9f2f4;
+  border-radius: 4px;
+}
+kbd {
+  padding: 2px 4px;
+  font-size: 90%;
+  color: #fff;
+  background-color: #333;
+  border-radius: 3px;
+  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);
+}
+pre {
+  display: block;
+  padding: 9.5px;
+  margin: 0 0 10px;
+  font-size: 13px;
+  line-height: 1.428571429;
+  color: #333;
+  word-break: break-all;
+  word-wrap: break-word;
+  background-color: #f5f5f5;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+}
+pre code {
+  padding: 0;
+  font-size: inherit;
+  color: inherit;
+  white-space: pre-wrap;
+  background-color: transparent;
+  border-radius: 0;
+}
+.pre-scrollable {
+  max-height: 340px;
+  overflow-y: scroll;
+}
+.container {
+  padding-right: 15px;
+  padding-left: 15px;
+  margin-right: auto;
+  margin-left: auto;
+}
+@media (min-width: 768px) {
+  .container {
+    width: 750px;
+  }
+}
+@media (min-width: 992px) {
+  .container {
+    width: 970px;
+  }
+}
+@media (min-width: 1200px) {
+  .container {
+    width: 1170px;
+  }
+}
+.container-fluid {
+  padding-right: 15px;
+  padding-left: 15px;
+  margin-right: auto;
+  margin-left: auto;
+}
+.row {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {
+  position: relative;
+  min-height: 1px;
+  padding-right: 15px;
+  padding-left: 15px;
+}
+.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {
+  float: left;
+}
+.col-xs-12 {
+  width: 100%;
+}
+.col-xs-11 {
+  width: 91.66666666666666%;
+}
+.col-xs-10 {
+  width: 83.33333333333334%;
+}
+.col-xs-9 {
+  width: 75%;
+}
+.col-xs-8 {
+  width: 66.66666666666666%;
+}
+.col-xs-7 {
+  width: 58.333333333333336%;
+}
+.col-xs-6 {
+  width: 50%;
+}
+.col-xs-5 {
+  width: 41.66666666666667%;
+}
+.col-xs-4 {
+  width: 33.33333333333333%;
+}
+.col-xs-3 {
+  width: 25%;
+}
+.col-xs-2 {
+  width: 16.666666666666664%;
+}
+.col-xs-1 {
+  width: 8.333333333333332%;
+}
+.col-xs-pull-12 {
+  right: 100%;
+}
+.col-xs-pull-11 {
+  right: 91.66666666666666%;
+}
+.col-xs-pull-10 {
+  right: 83.33333333333334%;
+}
+.col-xs-pull-9 {
+  right: 75%;
+}
+.col-xs-pull-8 {
+  right: 66.66666666666666%;
+}
+.col-xs-pull-7 {
+  right: 58.333333333333336%;
+}
+.col-xs-pull-6 {
+  right: 50%;
+}
+.col-xs-pull-5 {
+  right: 41.66666666666667%;
+}
+.col-xs-pull-4 {
+  right: 33.33333333333333%;
+}
+.col-xs-pull-3 {
+  right: 25%;
+}
+.col-xs-pull-2 {
+  right: 16.666666666666664%;
+}
+.col-xs-pull-1 {
+  right: 8.333333333333332%;
+}
+.col-xs-pull-0 {
+  right: 0;
+}
+.col-xs-push-12 {
+  left: 100%;
+}
+.col-xs-push-11 {
+  left: 91.66666666666666%;
+}
+.col-xs-push-10 {
+  left: 83.33333333333334%;
+}
+.col-xs-push-9 {
+  left: 75%;
+}
+.col-xs-push-8 {
+  left: 66.66666666666666%;
+}
+.col-xs-push-7 {
+  left: 58.333333333333336%;
+}
+.col-xs-push-6 {
+  left: 50%;
+}
+.col-xs-push-5 {
+  left: 41.66666666666667%;
+}
+.col-xs-push-4 {
+  left: 33.33333333333333%;
+}
+.col-xs-push-3 {
+  left: 25%;
+}
+.col-xs-push-2 {
+  left: 16.666666666666664%;
+}
+.col-xs-push-1 {
+  left: 8.333333333333332%;
+}
+.col-xs-push-0 {
+  left: 0;
+}
+.col-xs-offset-12 {
+  margin-left: 100%;
+}
+.col-xs-offset-11 {
+  margin-left: 91.66666666666666%;
+}
+.col-xs-offset-10 {
+  margin-left: 83.33333333333334%;
+}
+.col-xs-offset-9 {
+  margin-left: 75%;
+}
+.col-xs-offset-8 {
+  margin-left: 66.66666666666666%;
+}
+.col-xs-offset-7 {
+  margin-left: 58.333333333333336%;
+}
+.col-xs-offset-6 {
+  margin-left: 50%;
+}
+.col-xs-offset-5 {
+  margin-left: 41.66666666666667%;
+}
+.col-xs-offset-4 {
+  margin-left: 33.33333333333333%;
+}
+.col-xs-offset-3 {
+  margin-left: 25%;
+}
+.col-xs-offset-2 {
+  margin-left: 16.666666666666664%;
+}
+.col-xs-offset-1 {
+  margin-left: 8.333333333333332%;
+}
+.col-xs-offset-0 {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {
+    float: left;
+  }
+  .col-sm-12 {
+    width: 100%;
+  }
+  .col-sm-11 {
+    width: 91.66666666666666%;
+  }
+  .col-sm-10 {
+    width: 83.33333333333334%;
+  }
+  .col-sm-9 {
+    width: 75%;
+  }
+  .col-sm-8 {
+    width: 66.66666666666666%;
+  }
+  .col-sm-7 {
+    width: 58.333333333333336%;
+  }
+  .col-sm-6 {
+    width: 50%;
+  }
+  .col-sm-5 {
+    width: 41.66666666666667%;
+  }
+  .col-sm-4 {
+    width: 33.33333333333333%;
+  }
+  .col-sm-3 {
+    width: 25%;
+  }
+  .col-sm-2 {
+    width: 16.666666666666664%;
+  }
+  .col-sm-1 {
+    width: 8.333333333333332%;
+  }
+  .col-sm-pull-12 {
+    right: 100%;
+  }
+  .col-sm-pull-11 {
+    right: 91.66666666666666%;
+  }
+  .col-sm-pull-10 {
+    right: 83.33333333333334%;
+  }
+  .col-sm-pull-9 {
+    right: 75%;
+  }
+  .col-sm-pull-8 {
+    right: 66.66666666666666%;
+  }
+  .col-sm-pull-7 {
+    right: 58.333333333333336%;
+  }
+  .col-sm-pull-6 {
+    right: 50%;
+  }
+  .col-sm-pull-5 {
+    right: 41.66666666666667%;
+  }
+  .col-sm-pull-4 {
+    right: 33.33333333333333%;
+  }
+  .col-sm-pull-3 {
+    right: 25%;
+  }
+  .col-sm-pull-2 {
+    right: 16.666666666666664%;
+  }
+  .col-sm-pull-1 {
+    right: 8.333333333333332%;
+  }
+  .col-sm-pull-0 {
+    right: 0;
+  }
+  .col-sm-push-12 {
+    left: 100%;
+  }
+  .col-sm-push-11 {
+    left: 91.66666666666666%;
+  }
+  .col-sm-push-10 {
+    left: 83.33333333333334%;
+  }
+  .col-sm-push-9 {
+    left: 75%;
+  }
+  .col-sm-push-8 {
+    left: 66.66666666666666%;
+  }
+  .col-sm-push-7 {
+    left: 58.333333333333336%;
+  }
+  .col-sm-push-6 {
+    left: 50%;
+  }
+  .col-sm-push-5 {
+    left: 41.66666666666667%;
+  }
+  .col-sm-push-4 {
+    left: 33.33333333333333%;
+  }
+  .col-sm-push-3 {
+    left: 25%;
+  }
+  .col-sm-push-2 {
+    left: 16.666666666666664%;
+  }
+  .col-sm-push-1 {
+    left: 8.333333333333332%;
+  }
+  .col-sm-push-0 {
+    left: 0;
+  }
+  .col-sm-offset-12 {
+    margin-left: 100%;
+  }
+  .col-sm-offset-11 {
+    margin-left: 91.66666666666666%;
+  }
+  .col-sm-offset-10 {
+    margin-left: 83.33333333333334%;
+  }
+  .col-sm-offset-9 {
+    margin-left: 75%;
+  }
+  .col-sm-offset-8 {
+    margin-left: 66.66666666666666%;
+  }
+  .col-sm-offset-7 {
+    margin-left: 58.333333333333336%;
+  }
+  .col-sm-offset-6 {
+    margin-left: 50%;
+  }
+  .col-sm-offset-5 {
+    margin-left: 41.66666666666667%;
+  }
+  .col-sm-offset-4 {
+    margin-left: 33.33333333333333%;
+  }
+  .col-sm-offset-3 {
+    margin-left: 25%;
+  }
+  .col-sm-offset-2 {
+    margin-left: 16.666666666666664%;
+  }
+  .col-sm-offset-1 {
+    margin-left: 8.333333333333332%;
+  }
+  .col-sm-offset-0 {
+    margin-left: 0;
+  }
+}
+@media (min-width: 992px) {
+  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {
+    float: left;
+  }
+  .col-md-12 {
+    width: 100%;
+  }
+  .col-md-11 {
+    width: 91.66666666666666%;
+  }
+  .col-md-10 {
+    width: 83.33333333333334%;
+  }
+  .col-md-9 {
+    width: 75%;
+  }
+  .col-md-8 {
+    width: 66.66666666666666%;
+  }
+  .col-md-7 {
+    width: 58.333333333333336%;
+  }
+  .col-md-6 {
+    width: 50%;
+  }
+  .col-md-5 {
+    width: 41.66666666666667%;
+  }
+  .col-md-4 {
+    width: 33.33333333333333%;
+  }
+  .col-md-3 {
+    width: 25%;
+  }
+  .col-md-2 {
+    width: 16.666666666666664%;
+  }
+  .col-md-1 {
+    width: 8.333333333333332%;
+  }
+  .col-md-pull-12 {
+    right: 100%;
+  }
+  .col-md-pull-11 {
+    right: 91.66666666666666%;
+  }
+  .col-md-pull-10 {
+    right: 83.33333333333334%;
+  }
+  .col-md-pull-9 {
+    right: 75%;
+  }
+  .col-md-pull-8 {
+    right: 66.66666666666666%;
+  }
+  .col-md-pull-7 {
+    right: 58.333333333333336%;
+  }
+  .col-md-pull-6 {
+    right: 50%;
+  }
+  .col-md-pull-5 {
+    right: 41.66666666666667%;
+  }
+  .col-md-pull-4 {
+    right: 33.33333333333333%;
+  }
+  .col-md-pull-3 {
+    right: 25%;
+  }
+  .col-md-pull-2 {
+    right: 16.666666666666664%;
+  }
+  .col-md-pull-1 {
+    right: 8.333333333333332%;
+  }
+  .col-md-pull-0 {
+    right: 0;
+  }
+  .col-md-push-12 {
+    left: 100%;
+  }
+  .col-md-push-11 {
+    left: 91.66666666666666%;
+  }
+  .col-md-push-10 {
+    left: 83.33333333333334%;
+  }
+  .col-md-push-9 {
+    left: 75%;
+  }
+  .col-md-push-8 {
+    left: 66.66666666666666%;
+  }
+  .col-md-push-7 {
+    left: 58.333333333333336%;
+  }
+  .col-md-push-6 {
+    left: 50%;
+  }
+  .col-md-push-5 {
+    left: 41.66666666666667%;
+  }
+  .col-md-push-4 {
+    left: 33.33333333333333%;
+  }
+  .col-md-push-3 {
+    left: 25%;
+  }
+  .col-md-push-2 {
+    left: 16.666666666666664%;
+  }
+  .col-md-push-1 {
+    left: 8.333333333333332%;
+  }
+  .col-md-push-0 {
+    left: 0;
+  }
+  .col-md-offset-12 {
+    margin-left: 100%;
+  }
+  .col-md-offset-11 {
+    margin-left: 91.66666666666666%;
+  }
+  .col-md-offset-10 {
+    margin-left: 83.33333333333334%;
+  }
+  .col-md-offset-9 {
+    margin-left: 75%;
+  }
+  .col-md-offset-8 {
+    margin-left: 66.66666666666666%;
+  }
+  .col-md-offset-7 {
+    margin-left: 58.333333333333336%;
+  }
+  .col-md-offset-6 {
+    margin-left: 50%;
+  }
+  .col-md-offset-5 {
+    margin-left: 41.66666666666667%;
+  }
+  .col-md-offset-4 {
+    margin-left: 33.33333333333333%;
+  }
+  .col-md-offset-3 {
+    margin-left: 25%;
+  }
+  .col-md-offset-2 {
+    margin-left: 16.666666666666664%;
+  }
+  .col-md-offset-1 {
+    margin-left: 8.333333333333332%;
+  }
+  .col-md-offset-0 {
+    margin-left: 0;
+  }
+}
+@media (min-width: 1200px) {
+  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {
+    float: left;
+  }
+  .col-lg-12 {
+    width: 100%;
+  }
+  .col-lg-11 {
+    width: 91.66666666666666%;
+  }
+  .col-lg-10 {
+    width: 83.33333333333334%;
+  }
+  .col-lg-9 {
+    width: 75%;
+  }
+  .col-lg-8 {
+    width: 66.66666666666666%;
+  }
+  .col-lg-7 {
+    width: 58.333333333333336%;
+  }
+  .col-lg-6 {
+    width: 50%;
+  }
+  .col-lg-5 {
+    width: 41.66666666666667%;
+  }
+  .col-lg-4 {
+    width: 33.33333333333333%;
+  }
+  .col-lg-3 {
+    width: 25%;
+  }
+  .col-lg-2 {
+    width: 16.666666666666664%;
+  }
+  .col-lg-1 {
+    width: 8.333333333333332%;
+  }
+  .col-lg-pull-12 {
+    right: 100%;
+  }
+  .col-lg-pull-11 {
+    right: 91.66666666666666%;
+  }
+  .col-lg-pull-10 {
+    right: 83.33333333333334%;
+  }
+  .col-lg-pull-9 {
+    right: 75%;
+  }
+  .col-lg-pull-8 {
+    right: 66.66666666666666%;
+  }
+  .col-lg-pull-7 {
+    right: 58.333333333333336%;
+  }
+  .col-lg-pull-6 {
+    right: 50%;
+  }
+  .col-lg-pull-5 {
+    right: 41.66666666666667%;
+  }
+  .col-lg-pull-4 {
+    right: 33.33333333333333%;
+  }
+  .col-lg-pull-3 {
+    right: 25%;
+  }
+  .col-lg-pull-2 {
+    right: 16.666666666666664%;
+  }
+  .col-lg-pull-1 {
+    right: 8.333333333333332%;
+  }
+  .col-lg-pull-0 {
+    right: 0;
+  }
+  .col-lg-push-12 {
+    left: 100%;
+  }
+  .col-lg-push-11 {
+    left: 91.66666666666666%;
+  }
+  .col-lg-push-10 {
+    left: 83.33333333333334%;
+  }
+  .col-lg-push-9 {
+    left: 75%;
+  }
+  .col-lg-push-8 {
+    left: 66.66666666666666%;
+  }
+  .col-lg-push-7 {
+    left: 58.333333333333336%;
+  }
+  .col-lg-push-6 {
+    left: 50%;
+  }
+  .col-lg-push-5 {
+    left: 41.66666666666667%;
+  }
+  .col-lg-push-4 {
+    left: 33.33333333333333%;
+  }
+  .col-lg-push-3 {
+    left: 25%;
+  }
+  .col-lg-push-2 {
+    left: 16.666666666666664%;
+  }
+  .col-lg-push-1 {
+    left: 8.333333333333332%;
+  }
+  .col-lg-push-0 {
+    left: 0;
+  }
+  .col-lg-offset-12 {
+    margin-left: 100%;
+  }
+  .col-lg-offset-11 {
+    margin-left: 91.66666666666666%;
+  }
+  .col-lg-offset-10 {
+    margin-left: 83.33333333333334%;
+  }
+  .col-lg-offset-9 {
+    margin-left: 75%;
+  }
+  .col-lg-offset-8 {
+    margin-left: 66.66666666666666%;
+  }
+  .col-lg-offset-7 {
+    margin-left: 58.333333333333336%;
+  }
+  .col-lg-offset-6 {
+    margin-left: 50%;
+  }
+  .col-lg-offset-5 {
+    margin-left: 41.66666666666667%;
+  }
+  .col-lg-offset-4 {
+    margin-left: 33.33333333333333%;
+  }
+  .col-lg-offset-3 {
+    margin-left: 25%;
+  }
+  .col-lg-offset-2 {
+    margin-left: 16.666666666666664%;
+  }
+  .col-lg-offset-1 {
+    margin-left: 8.333333333333332%;
+  }
+  .col-lg-offset-0 {
+    margin-left: 0;
+  }
+}
+table {
+  max-width: 100%;
+  background-color: transparent;
+}
+th {
+  text-align: left;
+}
+.table {
+  width: 100%;
+  margin-bottom: 20px;
+}
+.table > thead > tr > th,
+.table > tbody > tr > th,
+.table > tfoot > tr > th,
+.table > thead > tr > td,
+.table > tbody > tr > td,
+.table > tfoot > tr > td {
+  padding: 8px;
+  line-height: 1.428571429;
+  vertical-align: top;
+  border-top: 1px solid #ddd;
+}
+.table > thead > tr > th {
+  vertical-align: bottom;
+  border-bottom: 2px solid #ddd;
+}
+.table > caption + thead > tr:first-child > th,
+.table > colgroup + thead > tr:first-child > th,
+.table > thead:first-child > tr:first-child > th,
+.table > caption + thead > tr:first-child > td,
+.table > colgroup + thead > tr:first-child > td,
+.table > thead:first-child > tr:first-child > td {
+  border-top: 0;
+}
+.table > tbody + tbody {
+  border-top: 2px solid #ddd;
+}
+.table .table {
+  background-color: #fff;
+}
+.table-condensed > thead > tr > th,
+.table-condensed > tbody > tr > th,
+.table-condensed > tfoot > tr > th,
+.table-condensed > thead > tr > td,
+.table-condensed > tbody > tr > td,
+.table-condensed > tfoot > tr > td {
+  padding: 5px;
+}
+.table-bordered {
+  border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > tbody > tr > th,
+.table-bordered > tfoot > tr > th,
+.table-bordered > thead > tr > td,
+.table-bordered > tbody > tr > td,
+.table-bordered > tfoot > tr > td {
+  border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > thead > tr > td {
+  border-bottom-width: 2px;
+}
+.table-striped > tbody > tr:nth-child(odd) > td,
+.table-striped > tbody > tr:nth-child(odd) > th {
+  background-color: #f9f9f9;
+}
+.table-hover > tbody > tr:hover > td,
+.table-hover > tbody > tr:hover > th {
+  background-color: #f5f5f5;
+}
+table col[class*="col-"] {
+  position: static;
+  display: table-column;
+  float: none;
+}
+table td[class*="col-"],
+table th[class*="col-"] {
+  position: static;
+  display: table-cell;
+  float: none;
+}
+.table > thead > tr > td.active,
+.table > tbody > tr > td.active,
+.table > tfoot > tr > td.active,
+.table > thead > tr > th.active,
+.table > tbody > tr > th.active,
+.table > tfoot > tr > th.active,
+.table > thead > tr.active > td,
+.table > tbody > tr.active > td,
+.table > tfoot > tr.active > td,
+.table > thead > tr.active > th,
+.table > tbody > tr.active > th,
+.table > tfoot > tr.active > th {
+  background-color: #f5f5f5;
+}
+.table-hover > tbody > tr > td.active:hover,
+.table-hover > tbody > tr > th.active:hover,
+.table-hover > tbody > tr.active:hover > td,
+.table-hover > tbody > tr.active:hover > th {
+  background-color: #e8e8e8;
+}
+.table > thead > tr > td.success,
+.table > tbody > tr > td.success,
+.table > tfoot > tr > td.success,
+.table > thead > tr > th.success,
+.table > tbody > tr > th.success,
+.table > tfoot > tr > th.success,
+.table > thead > tr.success > td,
+.table > tbody > tr.success > td,
+.table > tfoot > tr.success > td,
+.table > thead > tr.success > th,
+.table > tbody > tr.success > th,
+.table > tfoot > tr.success > th {
+  background-color: #dff0d8;
+}
+.table-hover > tbody > tr > td.success:hover,
+.table-hover > tbody > tr > th.success:hover,
+.table-hover > tbody > tr.success:hover > td,
+.table-hover > tbody > tr.success:hover > th {
+  background-color: #d0e9c6;
+}
+.table > thead > tr > td.info,
+.table > tbody > tr > td.info,
+.table > tfoot > tr > td.info,
+.table > thead > tr > th.info,
+.table > tbody > tr > th.info,
+.table > tfoot > tr > th.info,
+.table > thead > tr.info > td,
+.table > tbody > tr.info > td,
+.table > tfoot > tr.info > td,
+.table > thead > tr.info > th,
+.table > tbody > tr.info > th,
+.table > tfoot > tr.info > th {
+  background-color: #d9edf7;
+}
+.table-hover > tbody > tr > td.info:hover,
+.table-hover > tbody > tr > th.info:hover,
+.table-hover > tbody > tr.info:hover > td,
+.table-hover > tbody > tr.info:hover > th {
+  background-color: #c4e3f3;
+}
+.table > thead > tr > td.warning,
+.table > tbody > tr > td.warning,
+.table > tfoot > tr > td.warning,
+.table > thead > tr > th.warning,
+.table > tbody > tr > th.warning,
+.table > tfoot > tr > th.warning,
+.table > thead > tr.warning > td,
+.table > tbody > tr.warning > td,
+.table > tfoot > tr.warning > td,
+.table > thead > tr.warning > th,
+.table > tbody > tr.warning > th,
+.table > tfoot > tr.warning > th {
+  background-color: #fcf8e3;
+}
+.table-hover > tbody > tr > td.warning:hover,
+.table-hover > tbody > tr > th.warning:hover,
+.table-hover > tbody > tr.warning:hover > td,
+.table-hover > tbody > tr.warning:hover > th {
+  background-color: #faf2cc;
+}
+.table > thead > tr > td.danger,
+.table > tbody > tr > td.danger,
+.table > tfoot > tr > td.danger,
+.table > thead > tr > th.danger,
+.table > tbody > tr > th.danger,
+.table > tfoot > tr > th.danger,
+.table > thead > tr.danger > td,
+.table > tbody > tr.danger > td,
+.table > tfoot > tr.danger > td,
+.table > thead > tr.danger > th,
+.table > tbody > tr.danger > th,
+.table > tfoot > tr.danger > th {
+  background-color: #f2dede;
+}
+.table-hover > tbody > tr > td.danger:hover,
+.table-hover > tbody > tr > th.danger:hover,
+.table-hover > tbody > tr.danger:hover > td,
+.table-hover > tbody > tr.danger:hover > th {
+  background-color: #ebcccc;
+}
+@media (max-width: 767px) {
+  .table-responsive {
+    width: 100%;
+    margin-bottom: 15px;
+    overflow-x: scroll;
+    overflow-y: hidden;
+    -webkit-overflow-scrolling: touch;
+    -ms-overflow-style: -ms-autohiding-scrollbar;
+    border: 1px solid #ddd;
+  }
+  .table-responsive > .table {
+    margin-bottom: 0;
+  }
+  .table-responsive > .table > thead > tr > th,
+  .table-responsive > .table > tbody > tr > th,
+  .table-responsive > .table > tfoot > tr > th,
+  .table-responsive > .table > thead > tr > td,
+  .table-responsive > .table > tbody > tr > td,
+  .table-responsive > .table > tfoot > tr > td {
+    white-space: nowrap;
+  }
+  .table-responsive > .table-bordered {
+    border: 0;
+  }
+  .table-responsive > .table-bordered > thead > tr > th:first-child,
+  .table-responsive > .table-bordered > tbody > tr > th:first-child,
+  .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+  .table-responsive > .table-bordered > thead > tr > td:first-child,
+  .table-responsive > .table-bordered > tbody > tr > td:first-child,
+  .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+    border-left: 0;
+  }
+  .table-responsive > .table-bordered > thead > tr > th:last-child,
+  .table-responsive > .table-bordered > tbody > tr > th:last-child,
+  .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+  .table-responsive > .table-bordered > thead > tr > td:last-child,
+  .table-responsive > .table-bordered > tbody > tr > td:last-child,
+  .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+    border-right: 0;
+  }
+  .table-responsive > .table-bordered > tbody > tr:last-child > th,
+  .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+  .table-responsive > .table-bordered > tbody > tr:last-child > td,
+  .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+    border-bottom: 0;
+  }
+}
+fieldset {
+  min-width: 0;
+  padding: 0;
+  margin: 0;
+  border: 0;
+}
+legend {
+  display: block;
+  width: 100%;
+  padding: 0;
+  margin-bottom: 20px;
+  font-size: 21px;
+  line-height: inherit;
+  color: #333;
+  border: 0;
+  border-bottom: 1px solid #e5e5e5;
+}
+label {
+  display: inline-block;
+  margin-bottom: 5px;
+  font-weight: bold;
+}
+input[type="search"] {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+input[type="radio"],
+input[type="checkbox"] {
+  margin: 4px 0 0;
+  margin-top: 1px \9;
+  /* IE8-9 */
+  line-height: normal;
+}
+input[type="file"] {
+  display: block;
+}
+input[type="range"] {
+  display: block;
+  width: 100%;
+}
+select[multiple],
+select[size] {
+  height: auto;
+}
+input[type="file"]:focus,
+input[type="radio"]:focus,
+input[type="checkbox"]:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+output {
+  display: block;
+  padding-top: 7px;
+  font-size: 14px;
+  line-height: 1.428571429;
+  color: #555;
+}
+.form-control {
+  display: block;
+  width: 100%;
+  height: 34px;
+  padding: 6px 12px;
+  font-size: 14px;
+  line-height: 1.428571429;
+  color: #555;
+  background-color: #fff;
+  background-image: none;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+          transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+}
+.form-control:focus {
+  border-color: #66afe9;
+  outline: 0;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+          box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+}
+.form-control:-moz-placeholder {
+  color: #999;
+}
+.form-control::-moz-placeholder {
+  color: #999;
+  opacity: 1;
+}
+.form-control:-ms-input-placeholder {
+  color: #999;
+}
+.form-control::-webkit-input-placeholder {
+  color: #999;
+}
+.form-control[disabled],
+.form-control[readonly],
+fieldset[disabled] .form-control {
+  cursor: not-allowed;
+  background-color: #eee;
+  opacity: 1;
+}
+textarea.form-control {
+  height: auto;
+}
+input[type="date"] {
+  line-height: 34px;
+}
+.form-group {
+  margin-bottom: 15px;
+}
+.radio,
+.checkbox {
+  display: block;
+  min-height: 20px;
+  padding-left: 20px;
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+.radio label,
+.checkbox label {
+  display: inline;
+  font-weight: normal;
+  cursor: pointer;
+}
+.radio input[type="radio"],
+.radio-inline input[type="radio"],
+.checkbox input[type="checkbox"],
+.checkbox-inline input[type="checkbox"] {
+  float: left;
+  margin-left: -20px;
+}
+.radio + .radio,
+.checkbox + .checkbox {
+  margin-top: -5px;
+}
+.radio-inline,
+.checkbox-inline {
+  display: inline-block;
+  padding-left: 20px;
+  margin-bottom: 0;
+  font-weight: normal;
+  vertical-align: middle;
+  cursor: pointer;
+}
+.radio-inline + .radio-inline,
+.checkbox-inline + .checkbox-inline {
+  margin-top: 0;
+  margin-left: 10px;
+}
+input[type="radio"][disabled],
+input[type="checkbox"][disabled],
+.radio[disabled],
+.radio-inline[disabled],
+.checkbox[disabled],
+.checkbox-inline[disabled],
+fieldset[disabled] input[type="radio"],
+fieldset[disabled] input[type="checkbox"],
+fieldset[disabled] .radio,
+fieldset[disabled] .radio-inline,
+fieldset[disabled] .checkbox,
+fieldset[disabled] .checkbox-inline {
+  cursor: not-allowed;
+}
+.input-sm {
+  height: 30px;
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+select.input-sm {
+  height: 30px;
+  line-height: 30px;
+}
+textarea.input-sm,
+select[multiple].input-sm {
+  height: auto;
+}
+.input-lg {
+  height: 46px;
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+select.input-lg {
+  height: 46px;
+  line-height: 46px;
+}
+textarea.input-lg,
+select[multiple].input-lg {
+  height: auto;
+}
+.has-feedback {
+  position: relative;
+}
+.has-feedback .form-control {
+  padding-right: 42.5px;
+}
+.has-feedback .form-control-feedback {
+  position: absolute;
+  top: 25px;
+  right: 0;
+  display: block;
+  width: 34px;
+  height: 34px;
+  line-height: 34px;
+  text-align: center;
+}
+.has-success .help-block,
+.has-success .control-label,
+.has-success .radio,
+.has-success .checkbox,
+.has-success .radio-inline,
+.has-success .checkbox-inline {
+  color: #3c763d;
+}
+.has-success .form-control {
+  border-color: #3c763d;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-success .form-control:focus {
+  border-color: #2b542c;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+}
+.has-success .input-group-addon {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #3c763d;
+}
+.has-success .form-control-feedback {
+  color: #3c763d;
+}
+.has-warning .help-block,
+.has-warning .control-label,
+.has-warning .radio,
+.has-warning .checkbox,
+.has-warning .radio-inline,
+.has-warning .checkbox-inline {
+  color: #8a6d3b;
+}
+.has-warning .form-control {
+  border-color: #8a6d3b;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-warning .form-control:focus {
+  border-color: #66512c;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+}
+.has-warning .input-group-addon {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #8a6d3b;
+}
+.has-warning .form-control-feedback {
+  color: #8a6d3b;
+}
+.has-error .help-block,
+.has-error .control-label,
+.has-error .radio,
+.has-error .checkbox,
+.has-error .radio-inline,
+.has-error .checkbox-inline {
+  color: #a94442;
+}
+.has-error .form-control {
+  border-color: #a94442;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-error .form-control:focus {
+  border-color: #843534;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+}
+.has-error .input-group-addon {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #a94442;
+}
+.has-error .form-control-feedback {
+  color: #a94442;
+}
+.form-control-static {
+  margin-bottom: 0;
+}
+.help-block {
+  display: block;
+  margin-top: 5px;
+  margin-bottom: 10px;
+  color: #737373;
+}
+@media (min-width: 768px) {
+  .form-inline .form-group {
+    display: inline-block;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .form-control {
+    display: inline-block;
+    width: auto;
+    vertical-align: middle;
+  }
+  .form-inline .control-label {
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .radio,
+  .form-inline .checkbox {
+    display: inline-block;
+    padding-left: 0;
+    margin-top: 0;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .radio input[type="radio"],
+  .form-inline .checkbox input[type="checkbox"] {
+    float: none;
+    margin-left: 0;
+  }
+  .form-inline .has-feedback .form-control-feedback {
+    top: 0;
+  }
+}
+.form-horizontal .control-label,
+.form-horizontal .radio,
+.form-horizontal .checkbox,
+.form-horizontal .radio-inline,
+.form-horizontal .checkbox-inline {
+  padding-top: 7px;
+  margin-top: 0;
+  margin-bottom: 0;
+}
+.form-horizontal .radio,
+.form-horizontal .checkbox {
+  min-height: 27px;
+}
+.form-horizontal .form-group {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+.form-horizontal .form-control-static {
+  padding-top: 7px;
+}
+@media (min-width: 768px) {
+  .form-horizontal .control-label {
+    text-align: right;
+  }
+}
+.form-horizontal .has-feedback .form-control-feedback {
+  top: 0;
+  right: 15px;
+}
+.btn {
+  display: inline-block;
+  padding: 6px 12px;
+  margin-bottom: 0;
+  font-size: 14px;
+  font-weight: normal;
+  line-height: 1.428571429;
+  text-align: center;
+  white-space: nowrap;
+  vertical-align: middle;
+  cursor: pointer;
+  -webkit-user-select: none;
+     -moz-user-select: none;
+      -ms-user-select: none;
+       -o-user-select: none;
+          user-select: none;
+  background-image: none;
+  border: 1px solid transparent;
+  border-radius: 4px;
+}
+.btn:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+.btn:hover,
+.btn:focus {
+  color: #333;
+  text-decoration: none;
+}
+.btn:active,
+.btn.active {
+  background-image: none;
+  outline: 0;
+  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn.disabled,
+.btn[disabled],
+fieldset[disabled] .btn {
+  pointer-events: none;
+  cursor: not-allowed;
+  filter: alpha(opacity=65);
+  -webkit-box-shadow: none;
+          box-shadow: none;
+  opacity: .65;
+}
+.btn-default {
+  color: #333;
+  background-color: #fff;
+  border-color: #ccc;
+}
+.btn-default:hover,
+.btn-default:focus,
+.btn-default:active,
+.btn-default.active,
+.open .dropdown-toggle.btn-default {
+  color: #333;
+  background-color: #ebebeb;
+  border-color: #adadad;
+}
+.btn-default:active,
+.btn-default.active,
+.open .dropdown-toggle.btn-default {
+  background-image: none;
+}
+.btn-default.disabled,
+.btn-default[disabled],
+fieldset[disabled] .btn-default,
+.btn-default.disabled:hover,
+.btn-default[disabled]:hover,
+fieldset[disabled] .btn-default:hover,
+.btn-default.disabled:focus,
+.btn-default[disabled]:focus,
+fieldset[disabled] .btn-default:focus,
+.btn-default.disabled:active,
+.btn-default[disabled]:active,
+fieldset[disabled] .btn-default:active,
+.btn-default.disabled.active,
+.btn-default[disabled].active,
+fieldset[disabled] .btn-default.active {
+  background-color: #fff;
+  border-color: #ccc;
+}
+.btn-default .badge {
+  color: #fff;
+  background-color: #333;
+}
+.btn-primary {
+  color: #fff;
+  background-color: #428bca;
+  border-color: #357ebd;
+}
+.btn-primary:hover,
+.btn-primary:focus,
+.btn-primary:active,
+.btn-primary.active,
+.open .dropdown-toggle.btn-primary {
+  color: #fff;
+  background-color: #3276b1;
+  border-color: #285e8e;
+}
+.btn-primary:active,
+.btn-primary.active,
+.open .dropdown-toggle.btn-primary {
+  background-image: none;
+}
+.btn-primary.disabled,
+.btn-primary[disabled],
+fieldset[disabled] .btn-primary,
+.btn-primary.disabled:hover,
+.btn-primary[disabled]:hover,
+fieldset[disabled] .btn-primary:hover,
+.btn-primary.disabled:focus,
+.btn-primary[disabled]:focus,
+fieldset[disabled] .btn-primary:focus,
+.btn-primary.disabled:active,
+.btn-primary[disabled]:active,
+fieldset[disabled] .btn-primary:active,
+.btn-primary.disabled.active,
+.btn-primary[disabled].active,
+fieldset[disabled] .btn-primary.active {
+  background-color: #428bca;
+  border-color: #357ebd;
+}
+.btn-primary .badge {
+  color: #428bca;
+  background-color: #fff;
+}
+.btn-success {
+  color: #fff;
+  background-color: #5cb85c;
+  border-color: #4cae4c;
+}
+.btn-success:hover,
+.btn-success:focus,
+.btn-success:active,
+.btn-success.active,
+.open .dropdown-toggle.btn-success {
+  color: #fff;
+  background-color: #47a447;
+  border-color: #398439;
+}
+.btn-success:active,
+.btn-success.active,
+.open .dropdown-toggle.btn-success {
+  background-image: none;
+}
+.btn-success.disabled,
+.btn-success[disabled],
+fieldset[disabled] .btn-success,
+.btn-success.disabled:hover,
+.btn-success[disabled]:hover,
+fieldset[disabled] .btn-success:hover,
+.btn-success.disabled:focus,
+.btn-success[disabled]:focus,
+fieldset[disabled] .btn-success:focus,
+.btn-success.disabled:active,
+.btn-success[disabled]:active,
+fieldset[disabled] .btn-success:active,
+.btn-success.disabled.active,
+.btn-success[disabled].active,
+fieldset[disabled] .btn-success.active {
+  background-color: #5cb85c;
+  border-color: #4cae4c;
+}
+.btn-success .badge {
+  color: #5cb85c;
+  background-color: #fff;
+}
+.btn-info {
+  color: #fff;
+  background-color: #5bc0de;
+  border-color: #46b8da;
+}
+.btn-info:hover,
+.btn-info:focus,
+.btn-info:active,
+.btn-info.active,
+.open .dropdown-toggle.btn-info {
+  color: #fff;
+  background-color: #39b3d7;
+  border-color: #269abc;
+}
+.btn-info:active,
+.btn-info.active,
+.open .dropdown-toggle.btn-info {
+  background-image: none;
+}
+.btn-info.disabled,
+.btn-info[disabled],
+fieldset[disabled] .btn-info,
+.btn-info.disabled:hover,
+.btn-info[disabled]:hover,
+fieldset[disabled] .btn-info:hover,
+.btn-info.disabled:focus,
+.btn-info[disabled]:focus,
+fieldset[disabled] .btn-info:focus,
+.btn-info.disabled:active,
+.btn-info[disabled]:active,
+fieldset[disabled] .btn-info:active,
+.btn-info.disabled.active,
+.btn-info[disabled].active,
+fieldset[disabled] .btn-info.active {
+  background-color: #5bc0de;
+  border-color: #46b8da;
+}
+.btn-info .badge {
+  color: #5bc0de;
+  background-color: #fff;
+}
+.btn-warning {
+  color: #fff;
+  background-color: #f0ad4e;
+  border-color: #eea236;
+}
+.btn-warning:hover,
+.btn-warning:focus,
+.btn-warning:active,
+.btn-warning.active,
+.open .dropdown-toggle.btn-warning {
+  color: #fff;
+  background-color: #ed9c28;
+  border-color: #d58512;
+}
+.btn-warning:active,
+.btn-warning.active,
+.open .dropdown-toggle.btn-warning {
+  background-image: none;
+}
+.btn-warning.disabled,
+.btn-warning[disabled],
+fieldset[disabled] .btn-warning,
+.btn-warning.disabled:hover,
+.btn-warning[disabled]:hover,
+fieldset[disabled] .btn-warning:hover,
+.btn-warning.disabled:focus,
+.btn-warning[disabled]:focus,
+fieldset[disabled] .btn-warning:focus,
+.btn-warning.disabled:active,
+.btn-warning[disabled]:active,
+fieldset[disabled] .btn-warning:active,
+.btn-warning.disabled.active,
+.btn-warning[disabled].active,
+fieldset[disabled] .btn-warning.active {
+  background-color: #f0ad4e;
+  border-color: #eea236;
+}
+.btn-warning .badge {
+  color: #f0ad4e;
+  background-color: #fff;
+}
+.btn-danger {
+  color: #fff;
+  background-color: #d9534f;
+  border-color: #d43f3a;
+}
+.btn-danger:hover,
+.btn-danger:focus,
+.btn-danger:active,
+.btn-danger.active,
+.open .dropdown-toggle.btn-danger {
+  color: #fff;
+  background-color: #d2322d;
+  border-color: #ac2925;
+}
+.btn-danger:active,
+.btn-danger.active,
+.open .dropdown-toggle.btn-danger {
+  background-image: none;
+}
+.btn-danger.disabled,
+.btn-danger[disabled],
+fieldset[disabled] .btn-danger,
+.btn-danger.disabled:hover,
+.btn-danger[disabled]:hover,
+fieldset[disabled] .btn-danger:hover,
+.btn-danger.disabled:focus,
+.btn-danger[disabled]:focus,
+fieldset[disabled] .btn-danger:focus,
+.btn-danger.disabled:active,
+.btn-danger[disabled]:active,
+fieldset[disabled] .btn-danger:active,
+.btn-danger.disabled.active,
+.btn-danger[disabled].active,
+fieldset[disabled] .btn-danger.active {
+  background-color: #d9534f;
+  border-color: #d43f3a;
+}
+.btn-danger .badge {
+  color: #d9534f;
+  background-color: #fff;
+}
+.btn-link {
+  font-weight: normal;
+  color: #428bca;
+  cursor: pointer;
+  border-radius: 0;
+}
+.btn-link,
+.btn-link:active,
+.btn-link[disabled],
+fieldset[disabled] .btn-link {
+  background-color: transparent;
+  -webkit-box-shadow: none;
+          box-shadow: none;
+}
+.btn-link,
+.btn-link:hover,
+.btn-link:focus,
+.btn-link:active {
+  border-color: transparent;
+}
+.btn-link:hover,
+.btn-link:focus {
+  color: #2a6496;
+  text-decoration: underline;
+  background-color: transparent;
+}
+.btn-link[disabled]:hover,
+fieldset[disabled] .btn-link:hover,
+.btn-link[disabled]:focus,
+fieldset[disabled] .btn-link:focus {
+  color: #999;
+  text-decoration: none;
+}
+.btn-lg {
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+.btn-sm {
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-xs {
+  padding: 1px 5px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-block {
+  display: block;
+  width: 100%;
+  padding-right: 0;
+  padding-left: 0;
+}
+.btn-block + .btn-block {
+  margin-top: 5px;
+}
+input[type="submit"].btn-block,
+input[type="reset"].btn-block,
+input[type="button"].btn-block {
+  width: 100%;
+}
+.fade {
+  opacity: 0;
+  -webkit-transition: opacity .15s linear;
+          transition: opacity .15s linear;
+}
+.fade.in {
+  opacity: 1;
+}
+.collapse {
+  display: none;
+}
+.collapse.in {
+  display: block;
+}
+.collapsing {
+  position: relative;
+  height: 0;
+  overflow: hidden;
+  -webkit-transition: height .35s ease;
+          transition: height .35s ease;
+}
+@font-face {
+  font-family: 'Glyphicons Halflings';
+
+  src: url('../fonts/glyphicons-halflings-regular.eot');
+  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
+}
+.glyphicon {
+  position: relative;
+  top: 1px;
+  display: inline-block;
+  font-family: 'Glyphicons Halflings';
+  font-style: normal;
+  font-weight: normal;
+  line-height: 1;
+
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+.glyphicon-asterisk:before {
+  content: "\2a";
+}
+.glyphicon-plus:before {
+  content: "\2b";
+}
+.glyphicon-euro:before {
+  content: "\20ac";
+}
+.glyphicon-minus:before {
+  content: "\2212";
+}
+.glyphicon-cloud:before {
+  content: "\2601";
+}
+.glyphicon-envelope:before {
+  content: "\2709";
+}
+.glyphicon-pencil:before {
+  content: "\270f";
+}
+.glyphicon-glass:before {
+  content: "\e001";
+}
+.glyphicon-music:before {
+  content: "\e002";
+}
+.glyphicon-search:before {
+  content: "\e003";
+}
+.glyphicon-heart:before {
+  content: "\e005";
+}
+.glyphicon-star:before {
+  content: "\e006";
+}
+.glyphicon-star-empty:before {
+  content: "\e007";
+}
+.glyphicon-user:before {
+  content: "\e008";
+}
+.glyphicon-film:before {
+  content: "\e009";
+}
+.glyphicon-th-large:before {
+  content: "\e010";
+}
+.glyphicon-th:before {
+  content: "\e011";
+}
+.glyphicon-th-list:before {
+  content: "\e012";
+}
+.glyphicon-ok:before {
+  content: "\e013";
+}
+.glyphicon-remove:before {
+  content: "\e014";
+}
+.glyphicon-zoom-in:before {
+  content: "\e015";
+}
+.glyphicon-zoom-out:before {
+  content: "\e016";
+}
+.glyphicon-off:before {
+  content: "\e017";
+}
+.glyphicon-signal:before {
+  content: "\e018";
+}
+.glyphicon-cog:before {
+  content: "\e019";
+}
+.glyphicon-trash:before {
+  content: "\e020";
+}
+.glyphicon-home:before {
+  content: "\e021";
+}
+.glyphicon-file:before {
+  content: "\e022";
+}
+.glyphicon-time:before {
+  content: "\e023";
+}
+.glyphicon-road:before {
+  content: "\e024";
+}
+.glyphicon-download-alt:before {
+  content: "\e025";
+}
+.glyphicon-download:before {
+  content: "\e026";
+}
+.glyphicon-upload:before {
+  content: "\e027";
+}
+.glyphicon-inbox:before {
+  content: "\e028";
+}
+.glyphicon-play-circle:before {
+  content: "\e029";
+}
+.glyphicon-repeat:before {
+  content: "\e030";
+}
+.glyphicon-refresh:before {
+  content: "\e031";
+}
+.glyphicon-list-alt:before {
+  content: "\e032";
+}
+.glyphicon-lock:before {
+  content: "\e033";
+}
+.glyphicon-flag:before {
+  content: "\e034";
+}
+.glyphicon-headphones:before {
+  content: "\e035";
+}
+.glyphicon-volume-off:before {
+  content: "\e036";
+}
+.glyphicon-volume-down:before {
+  content: "\e037";
+}
+.glyphicon-volume-up:before {
+  content: "\e038";
+}
+.glyphicon-qrcode:before {
+  content: "\e039";
+}
+.glyphicon-barcode:before {
+  content: "\e040";
+}
+.glyphicon-tag:before {
+  content: "\e041";
+}
+.glyphicon-tags:before {
+  content: "\e042";
+}
+.glyphicon-book:before {
+  content: "\e043";
+}
+.glyphicon-bookmark:before {
+  content: "\e044";
+}
+.glyphicon-print:before {
+  content: "\e045";
+}
+.glyphicon-camera:before {
+  content: "\e046";
+}
+.glyphicon-font:before {
+  content: "\e047";
+}
+.glyphicon-bold:before {
+  content: "\e048";
+}
+.glyphicon-italic:before {
+  content: "\e049";
+}
+.glyphicon-text-height:before {
+  content: "\e050";
+}
+.glyphicon-text-width:before {
+  content: "\e051";
+}
+.glyphicon-align-left:before {
+  content: "\e052";
+}
+.glyphicon-align-center:before {
+  content: "\e053";
+}
+.glyphicon-align-right:before {
+  content: "\e054";
+}
+.glyphicon-align-justify:before {
+  content: "\e055";
+}
+.glyphicon-list:before {
+  content: "\e056";
+}
+.glyphicon-indent-left:before {
+  content: "\e057";
+}
+.glyphicon-indent-right:before {
+  content: "\e058";
+}
+.glyphicon-facetime-video:before {
+  content: "\e059";
+}
+.glyphicon-picture:before {
+  content: "\e060";
+}
+.glyphicon-map-marker:before {
+  content: "\e062";
+}
+.glyphicon-adjust:before {
+  content: "\e063";
+}
+.glyphicon-tint:before {
+  content: "\e064";
+}
+.glyphicon-edit:before {
+  content: "\e065";
+}
+.glyphicon-share:before {
+  content: "\e066";
+}
+.glyphicon-check:before {
+  content: "\e067";
+}
+.glyphicon-move:before {
+  content: "\e068";
+}
+.glyphicon-step-backward:before {
+  content: "\e069";
+}
+.glyphicon-fast-backward:before {
+  content: "\e070";
+}
+.glyphicon-backward:before {
+  content: "\e071";
+}
+.glyphicon-play:before {
+  content: "\e072";
+}
+.glyphicon-pause:before {
+  content: "\e073";
+}
+.glyphicon-stop:before {
+  content: "\e074";
+}
+.glyphicon-forward:before {
+  content: "\e075";
+}
+.glyphicon-fast-forward:before {
+  content: "\e076";
+}
+.glyphicon-step-forward:before {
+  content: "\e077";
+}
+.glyphicon-eject:before {
+  content: "\e078";
+}
+.glyphicon-chevron-left:before {
+  content: "\e079";
+}
+.glyphicon-chevron-right:before {
+  content: "\e080";
+}
+.glyphicon-plus-sign:before {
+  content: "\e081";
+}
+.glyphicon-minus-sign:before {
+  content: "\e082";
+}
+.glyphicon-remove-sign:before {
+  content: "\e083";
+}
+.glyphicon-ok-sign:before {
+  content: "\e084";
+}
+.glyphicon-question-sign:before {
+  content: "\e085";
+}
+.glyphicon-info-sign:before {
+  content: "\e086";
+}
+.glyphicon-screenshot:before {
+  content: "\e087";
+}
+.glyphicon-remove-circle:before {
+  content: "\e088";
+}
+.glyphicon-ok-circle:before {
+  content: "\e089";
+}
+.glyphicon-ban-circle:before {
+  content: "\e090";
+}
+.glyphicon-arrow-left:before {
+  content: "\e091";
+}
+.glyphicon-arrow-right:before {
+  content: "\e092";
+}
+.glyphicon-arrow-up:before {
+  content: "\e093";
+}
+.glyphicon-arrow-down:before {
+  content: "\e094";
+}
+.glyphicon-share-alt:before {
+  content: "\e095";
+}
+.glyphicon-resize-full:before {
+  content: "\e096";
+}
+.glyphicon-resize-small:before {
+  content: "\e097";
+}
+.glyphicon-exclamation-sign:before {
+  content: "\e101";
+}
+.glyphicon-gift:before {
+  content: "\e102";
+}
+.glyphicon-leaf:before {
+  content: "\e103";
+}
+.glyphicon-fire:before {
+  content: "\e104";
+}
+.glyphicon-eye-open:before {
+  content: "\e105";
+}
+.glyphicon-eye-close:before {
+  content: "\e106";
+}
+.glyphicon-warning-sign:before {
+  content: "\e107";
+}
+.glyphicon-plane:before {
+  content: "\e108";
+}
+.glyphicon-calendar:before {
+  content: "\e109";
+}
+.glyphicon-random:before {
+  content: "\e110";
+}
+.glyphicon-comment:before {
+  content: "\e111";
+}
+.glyphicon-magnet:before {
+  content: "\e112";
+}
+.glyphicon-chevron-up:before {
+  content: "\e113";
+}
+.glyphicon-chevron-down:before {
+  content: "\e114";
+}
+.glyphicon-retweet:before {
+  content: "\e115";
+}
+.glyphicon-shopping-cart:before {
+  content: "\e116";
+}
+.glyphicon-folder-close:before {
+  content: "\e117";
+}
+.glyphicon-folder-open:before {
+  content: "\e118";
+}
+.glyphicon-resize-vertical:before {
+  content: "\e119";
+}
+.glyphicon-resize-horizontal:before {
+  content: "\e120";
+}
+.glyphicon-hdd:before {
+  content: "\e121";
+}
+.glyphicon-bullhorn:before {
+  content: "\e122";
+}
+.glyphicon-bell:before {
+  content: "\e123";
+}
+.glyphicon-certificate:before {
+  content: "\e124";
+}
+.glyphicon-thumbs-up:before {
+  content: "\e125";
+}
+.glyphicon-thumbs-down:before {
+  content: "\e126";
+}
+.glyphicon-hand-right:before {
+  content: "\e127";
+}
+.glyphicon-hand-left:before {
+  content: "\e128";
+}
+.glyphicon-hand-up:before {
+  content: "\e129";
+}
+.glyphicon-hand-down:before {
+  content: "\e130";
+}
+.glyphicon-circle-arrow-right:before {
+  content: "\e131";
+}
+.glyphicon-circle-arrow-left:before {
+  content: "\e132";
+}
+.glyphicon-circle-arrow-up:before {
+  content: "\e133";
+}
+.glyphicon-circle-arrow-down:before {
+  content: "\e134";
+}
+.glyphicon-globe:before {
+  content: "\e135";
+}
+.glyphicon-wrench:before {
+  content: "\e136";
+}
+.glyphicon-tasks:before {
+  content: "\e137";
+}
+.glyphicon-filter:before {
+  content: "\e138";
+}
+.glyphicon-briefcase:before {
+  content: "\e139";
+}
+.glyphicon-fullscreen:before {
+  content: "\e140";
+}
+.glyphicon-dashboard:before {
+  content: "\e141";
+}
+.glyphicon-paperclip:before {
+  content: "\e142";
+}
+.glyphicon-heart-empty:before {
+  content: "\e143";
+}
+.glyphicon-link:before {
+  content: "\e144";
+}
+.glyphicon-phone:before {
+  content: "\e145";
+}
+.glyphicon-pushpin:before {
+  content: "\e146";
+}
+.glyphicon-usd:before {
+  content: "\e148";
+}
+.glyphicon-gbp:before {
+  content: "\e149";
+}
+.glyphicon-sort:before {
+  content: "\e150";
+}
+.glyphicon-sort-by-alphabet:before {
+  content: "\e151";
+}
+.glyphicon-sort-by-alphabet-alt:before {
+  content: "\e152";
+}
+.glyphicon-sort-by-order:before {
+  content: "\e153";
+}
+.glyphicon-sort-by-order-alt:before {
+  content: "\e154";
+}
+.glyphicon-sort-by-attributes:before {
+  content: "\e155";
+}
+.glyphicon-sort-by-attributes-alt:before {
+  content: "\e156";
+}
+.glyphicon-unchecked:before {
+  content: "\e157";
+}
+.glyphicon-expand:before {
+  content: "\e158";
+}
+.glyphicon-collapse-down:before {
+  content: "\e159";
+}
+.glyphicon-collapse-up:before {
+  content: "\e160";
+}
+.glyphicon-log-in:before {
+  content: "\e161";
+}
+.glyphicon-flash:before {
+  content: "\e162";
+}
+.glyphicon-log-out:before {
+  content: "\e163";
+}
+.glyphicon-new-window:before {
+  content: "\e164";
+}
+.glyphicon-record:before {
+  content: "\e165";
+}
+.glyphicon-save:before {
+  content: "\e166";
+}
+.glyphicon-open:before {
+  content: "\e167";
+}
+.glyphicon-saved:before {
+  content: "\e168";
+}
+.glyphicon-import:before {
+  content: "\e169";
+}
+.glyphicon-export:before {
+  content: "\e170";
+}
+.glyphicon-send:before {
+  content: "\e171";
+}
+.glyphicon-floppy-disk:before {
+  content: "\e172";
+}
+.glyphicon-floppy-saved:before {
+  content: "\e173";
+}
+.glyphicon-floppy-remove:before {
+  content: "\e174";
+}
+.glyphicon-floppy-save:before {
+  content: "\e175";
+}
+.glyphicon-floppy-open:before {
+  content: "\e176";
+}
+.glyphicon-credit-card:before {
+  content: "\e177";
+}
+.glyphicon-transfer:before {
+  content: "\e178";
+}
+.glyphicon-cutlery:before {
+  content: "\e179";
+}
+.glyphicon-header:before {
+  content: "\e180";
+}
+.glyphicon-compressed:before {
+  content: "\e181";
+}
+.glyphicon-earphone:before {
+  content: "\e182";
+}
+.glyphicon-phone-alt:before {
+  content: "\e183";
+}
+.glyphicon-tower:before {
+  content: "\e184";
+}
+.glyphicon-stats:before {
+  content: "\e185";
+}
+.glyphicon-sd-video:before {
+  content: "\e186";
+}
+.glyphicon-hd-video:before {
+  content: "\e187";
+}
+.glyphicon-subtitles:before {
+  content: "\e188";
+}
+.glyphicon-sound-stereo:before {
+  content: "\e189";
+}
+.glyphicon-sound-dolby:before {
+  content: "\e190";
+}
+.glyphicon-sound-5-1:before {
+  content: "\e191";
+}
+.glyphicon-sound-6-1:before {
+  content: "\e192";
+}
+.glyphicon-sound-7-1:before {
+  content: "\e193";
+}
+.glyphicon-copyright-mark:before {
+  content: "\e194";
+}
+.glyphicon-registration-mark:before {
+  content: "\e195";
+}
+.glyphicon-cloud-download:before {
+  content: "\e197";
+}
+.glyphicon-cloud-upload:before {
+  content: "\e198";
+}
+.glyphicon-tree-conifer:before {
+  content: "\e199";
+}
+.glyphicon-tree-deciduous:before {
+  content: "\e200";
+}
+.caret {
+  display: inline-block;
+  width: 0;
+  height: 0;
+  margin-left: 2px;
+  vertical-align: middle;
+  border-top: 4px solid;
+  border-right: 4px solid transparent;
+  border-left: 4px solid transparent;
+}
+.dropdown {
+  position: relative;
+}
+.dropdown-toggle:focus {
+  outline: 0;
+}
+.dropdown-menu {
+  position: absolute;
+  top: 100%;
+  left: 0;
+  z-index: 1000;
+  display: none;
+  float: left;
+  min-width: 160px;
+  padding: 5px 0;
+  margin: 2px 0 0;
+  font-size: 14px;
+  list-style: none;
+  background-color: #fff;
+  background-clip: padding-box;
+  border: 1px solid #ccc;
+  border: 1px solid rgba(0, 0, 0, .15);
+  border-radius: 4px;
+  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+          box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+}
+.dropdown-menu.pull-right {
+  right: 0;
+  left: auto;
+}
+.dropdown-menu .divider {
+  height: 1px;
+  margin: 9px 0;
+  overflow: hidden;
+  background-color: #e5e5e5;
+}
+.dropdown-menu > li > a {
+  display: block;
+  padding: 3px 20px;
+  clear: both;
+  font-weight: normal;
+  line-height: 1.428571429;
+  color: #333;
+  white-space: nowrap;
+}
+.dropdown-menu > li > a:hover,
+.dropdown-menu > li > a:focus {
+  color: #262626;
+  text-decoration: none;
+  background-color: #f5f5f5;
+}
+.dropdown-menu > .active > a,
+.dropdown-menu > .active > a:hover,
+.dropdown-menu > .active > a:focus {
+  color: #fff;
+  text-decoration: none;
+  background-color: #428bca;
+  outline: 0;
+}
+.dropdown-menu > .disabled > a,
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+  color: #999;
+}
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+  text-decoration: none;
+  cursor: not-allowed;
+  background-color: transparent;
+  background-image: none;
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+}
+.open > .dropdown-menu {
+  display: block;
+}
+.open > a {
+  outline: 0;
+}
+.dropdown-menu-right {
+  right: 0;
+  left: auto;
+}
+.dropdown-menu-left {
+  right: auto;
+  left: 0;
+}
+.dropdown-header {
+  display: block;
+  padding: 3px 20px;
+  font-size: 12px;
+  line-height: 1.428571429;
+  color: #999;
+}
+.dropdown-backdrop {
+  position: fixed;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+  z-index: 990;
+}
+.pull-right > .dropdown-menu {
+  right: 0;
+  left: auto;
+}
+.dropup .caret,
+.navbar-fixed-bottom .dropdown .caret {
+  content: "";
+  border-top: 0;
+  border-bottom: 4px solid;
+}
+.dropup .dropdown-menu,
+.navbar-fixed-bottom .dropdown .dropdown-menu {
+  top: auto;
+  bottom: 100%;
+  margin-bottom: 1px;
+}
+@media (min-width: 768px) {
+  .navbar-right .dropdown-menu {
+    right: 0;
+    left: auto;
+  }
+  .navbar-right .dropdown-menu-left {
+    right: auto;
+    left: 0;
+  }
+}
+.btn-group,
+.btn-group-vertical {
+  position: relative;
+  display: inline-block;
+  vertical-align: middle;
+}
+.btn-group > .btn,
+.btn-group-vertical > .btn {
+  position: relative;
+  float: left;
+}
+.btn-group > .btn:hover,
+.btn-group-vertical > .btn:hover,
+.btn-group > .btn:focus,
+.btn-group-vertical > .btn:focus,
+.btn-group > .btn:active,
+.btn-group-vertical > .btn:active,
+.btn-group > .btn.active,
+.btn-group-vertical > .btn.active {
+  z-index: 2;
+}
+.btn-group > .btn:focus,
+.btn-group-vertical > .btn:focus {
+  outline: none;
+}
+.btn-group .btn + .btn,
+.btn-group .btn + .btn-group,
+.btn-group .btn-group + .btn,
+.btn-group .btn-group + .btn-group {
+  margin-left: -1px;
+}
+.btn-toolbar {
+  margin-left: -5px;
+}
+.btn-toolbar .btn-group,
+.btn-toolbar .input-group {
+  float: left;
+}
+.btn-toolbar > .btn,
+.btn-toolbar > .btn-group,
+.btn-toolbar > .input-group {
+  margin-left: 5px;
+}
+.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {
+  border-radius: 0;
+}
+.btn-group > .btn:first-child {
+  margin-left: 0;
+}
+.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {
+  border-top-right-radius: 0;
+  border-bottom-right-radius: 0;
+}
+.btn-group > .btn:last-child:not(:first-child),
+.btn-group > .dropdown-toggle:not(:first-child) {
+  border-top-left-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group > .btn-group {
+  float: left;
+}
+.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
+  border-radius: 0;
+}
+.btn-group > .btn-group:first-child > .btn:last-child,
+.btn-group > .btn-group:first-child > .dropdown-toggle {
+  border-top-right-radius: 0;
+  border-bottom-right-radius: 0;
+}
+.btn-group > .btn-group:last-child > .btn:first-child {
+  border-top-left-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group .dropdown-toggle:active,
+.btn-group.open .dropdown-toggle {
+  outline: 0;
+}
+.btn-group-xs > .btn {
+  padding: 1px 5px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-group-sm > .btn {
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-group-lg > .btn {
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+.btn-group > .btn + .dropdown-toggle {
+  padding-right: 8px;
+  padding-left: 8px;
+}
+.btn-group > .btn-lg + .dropdown-toggle {
+  padding-right: 12px;
+  padding-left: 12px;
+}
+.btn-group.open .dropdown-toggle {
+  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn-group.open .dropdown-toggle.btn-link {
+  -webkit-box-shadow: none;
+          box-shadow: none;
+}
+.btn .caret {
+  margin-left: 0;
+}
+.btn-lg .caret {
+  border-width: 5px 5px 0;
+  border-bottom-width: 0;
+}
+.dropup .btn-lg .caret {
+  border-width: 0 5px 5px;
+}
+.btn-group-vertical > .btn,
+.btn-group-vertical > .btn-group,
+.btn-group-vertical > .btn-group > .btn {
+  display: block;
+  float: none;
+  width: 100%;
+  max-width: 100%;
+}
+.btn-group-vertical > .btn-group > .btn {
+  float: none;
+}
+.btn-group-vertical > .btn + .btn,
+.btn-group-vertical > .btn + .btn-group,
+.btn-group-vertical > .btn-group + .btn,
+.btn-group-vertical > .btn-group + .btn-group {
+  margin-top: -1px;
+  margin-left: 0;
+}
+.btn-group-vertical > .btn:not(:first-child):not(:last-child) {
+  border-radius: 0;
+}
+.btn-group-vertical > .btn:first-child:not(:last-child) {
+  border-top-right-radius: 4px;
+  border-bottom-right-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn:last-child:not(:first-child) {
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+  border-bottom-left-radius: 4px;
+}
+.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {
+  border-radius: 0;
+}
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
+  border-bottom-right-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
+.btn-group-justified {
+  display: table;
+  width: 100%;
+  table-layout: fixed;
+  border-collapse: separate;
+}
+.btn-group-justified > .btn,
+.btn-group-justified > .btn-group {
+  display: table-cell;
+  float: none;
+  width: 1%;
+}
+.btn-group-justified > .btn-group .btn {
+  width: 100%;
+}
+[data-toggle="buttons"] > .btn > input[type="radio"],
+[data-toggle="buttons"] > .btn > input[type="checkbox"] {
+  display: none;
+}
+.input-group {
+  position: relative;
+  display: table;
+  border-collapse: separate;
+}
+.input-group[class*="col-"] {
+  float: none;
+  padding-right: 0;
+  padding-left: 0;
+}
+.input-group .form-control {
+  float: left;
+  width: 100%;
+  margin-bottom: 0;
+}
+.input-group-lg > .form-control,
+.input-group-lg > .input-group-addon,
+.input-group-lg > .input-group-btn > .btn {
+  height: 46px;
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+select.input-group-lg > .form-control,
+select.input-group-lg > .input-group-addon,
+select.input-group-lg > .input-group-btn > .btn {
+  height: 46px;
+  line-height: 46px;
+}
+textarea.input-group-lg > .form-control,
+textarea.input-group-lg > .input-group-addon,
+textarea.input-group-lg > .input-group-btn > .btn,
+select[multiple].input-group-lg > .form-control,
+select[multiple].input-group-lg > .input-group-addon,
+select[multiple].input-group-lg > .input-group-btn > .btn {
+  height: auto;
+}
+.input-group-sm > .form-control,
+.input-group-sm > .input-group-addon,
+.input-group-sm > .input-group-btn > .btn {
+  height: 30px;
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+select.input-group-sm > .form-control,
+select.input-group-sm > .input-group-addon,
+select.input-group-sm > .input-group-btn > .btn {
+  height: 30px;
+  line-height: 30px;
+}
+textarea.input-group-sm > .form-control,
+textarea.input-group-sm > .input-group-addon,
+textarea.input-group-sm > .input-group-btn > .btn,
+select[multiple].input-group-sm > .form-control,
+select[multiple].input-group-sm > .input-group-addon,
+select[multiple].input-group-sm > .input-group-btn > .btn {
+  height: auto;
+}
+.input-group-addon,
+.input-group-btn,
+.input-group .form-control {
+  display: table-cell;
+}
+.input-group-addon:not(:first-child):not(:last-child),
+.input-group-btn:not(:first-child):not(:last-child),
+.input-group .form-control:not(:first-child):not(:last-child) {
+  border-radius: 0;
+}
+.input-group-addon,
+.input-group-btn {
+  width: 1%;
+  white-space: nowrap;
+  vertical-align: middle;
+}
+.input-group-addon {
+  padding: 6px 12px;
+  font-size: 14px;
+  font-weight: normal;
+  line-height: 1;
+  color: #555;
+  text-align: center;
+  background-color: #eee;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+}
+.input-group-addon.input-sm {
+  padding: 5px 10px;
+  font-size: 12px;
+  border-radius: 3px;
+}
+.input-group-addon.input-lg {
+  padding: 10px 16px;
+  font-size: 18px;
+  border-radius: 6px;
+}
+.input-group-addon input[type="radio"],
+.input-group-addon input[type="checkbox"] {
+  margin-top: 0;
+}
+.input-group .form-control:first-child,
+.input-group-addon:first-child,
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group > .btn,
+.input-group-btn:first-child > .dropdown-toggle,
+.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),
+.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {
+  border-top-right-radius: 0;
+  border-bottom-right-radius: 0;
+}
+.input-group-addon:first-child {
+  border-right: 0;
+}
+.input-group .form-control:last-child,
+.input-group-addon:last-child,
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group > .btn,
+.input-group-btn:last-child > .dropdown-toggle,
+.input-group-btn:first-child > .btn:not(:first-child),
+.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {
+  border-top-left-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.input-group-addon:last-child {
+  border-left: 0;
+}
+.input-group-btn {
+  position: relative;
+  font-size: 0;
+  white-space: nowrap;
+}
+.input-group-btn > .btn {
+  position: relative;
+}
+.input-group-btn > .btn + .btn {
+  margin-left: -1px;
+}
+.input-group-btn > .btn:hover,
+.input-group-btn > .btn:focus,
+.input-group-btn > .btn:active {
+  z-index: 2;
+}
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group {
+  margin-right: -1px;
+}
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group {
+  margin-left: -1px;
+}
+.nav {
+  padding-left: 0;
+  margin-bottom: 0;
+  list-style: none;
+}
+.nav > li {
+  position: relative;
+  display: block;
+}
+.nav > li > a {
+  position: relative;
+  display: block;
+  padding: 10px 15px;
+}
+.nav > li > a:hover,
+.nav > li > a:focus {
+  text-decoration: none;
+  background-color: #eee;
+}
+.nav > li.disabled > a {
+  color: #999;
+}
+.nav > li.disabled > a:hover,
+.nav > li.disabled > a:focus {
+  color: #999;
+  text-decoration: none;
+  cursor: not-allowed;
+  background-color: transparent;
+}
+.nav .open > a,
+.nav .open > a:hover,
+.nav .open > a:focus {
+  background-color: #eee;
+  border-color: #428bca;
+}
+.nav .nav-divider {
+  height: 1px;
+  margin: 9px 0;
+  overflow: hidden;
+  background-color: #e5e5e5;
+}
+.nav > li > a > img {
+  max-width: none;
+}
+.nav-tabs {
+  border-bottom: 1px solid #ddd;
+}
+.nav-tabs > li {
+  float: left;
+  margin-bottom: -1px;
+}
+.nav-tabs > li > a {
+  margin-right: 2px;
+  line-height: 1.428571429;
+  border: 1px solid transparent;
+  border-radius: 4px 4px 0 0;
+}
+.nav-tabs > li > a:hover {
+  border-color: #eee #eee #ddd;
+}
+.nav-tabs > li.active > a,
+.nav-tabs > li.active > a:hover,
+.nav-tabs > li.active > a:focus {
+  color: #555;
+  cursor: default;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-bottom-color: transparent;
+}
+.nav-tabs.nav-justified {
+  width: 100%;
+  border-bottom: 0;
+}
+.nav-tabs.nav-justified > li {
+  float: none;
+}
+.nav-tabs.nav-justified > li > a {
+  margin-bottom: 5px;
+  text-align: center;
+}
+.nav-tabs.nav-justified > .dropdown .dropdown-menu {
+  top: auto;
+  left: auto;
+}
+@media (min-width: 768px) {
+  .nav-tabs.nav-justified > li {
+    display: table-cell;
+    width: 1%;
+  }
+  .nav-tabs.nav-justified > li > a {
+    margin-bottom: 0;
+  }
+}
+.nav-tabs.nav-justified > li > a {
+  margin-right: 0;
+  border-radius: 4px;
+}
+.nav-tabs.nav-justified > .active > a,
+.nav-tabs.nav-justified > .active > a:hover,
+.nav-tabs.nav-justified > .active > a:focus {
+  border: 1px solid #ddd;
+}
+@media (min-width: 768px) {
+  .nav-tabs.nav-justified > li > a {
+    border-bottom: 1px solid #ddd;
+    border-radius: 4px 4px 0 0;
+  }
+  .nav-tabs.nav-justified > .active > a,
+  .nav-tabs.nav-justified > .active > a:hover,
+  .nav-tabs.nav-justified > .active > a:focus {
+    border-bottom-color: #fff;
+  }
+}
+.nav-pills > li {
+  float: left;
+}
+.nav-pills > li > a {
+  border-radius: 4px;
+}
+.nav-pills > li + li {
+  margin-left: 2px;
+}
+.nav-pills > li.active > a,
+.nav-pills > li.active > a:hover,
+.nav-pills > li.active > a:focus {
+  color: #fff;
+  background-color: #428bca;
+}
+.nav-stacked > li {
+  float: none;
+}
+.nav-stacked > li + li {
+  margin-top: 2px;
+  margin-left: 0;
+}
+.nav-justified {
+  width: 100%;
+}
+.nav-justified > li {
+  float: none;
+}
+.nav-justified > li > a {
+  margin-bottom: 5px;
+  text-align: center;
+}
+.nav-justified > .dropdown .dropdown-menu {
+  top: auto;
+  left: auto;
+}
+@media (min-width: 768px) {
+  .nav-justified > li {
+    display: table-cell;
+    width: 1%;
+  }
+  .nav-justified > li > a {
+    margin-bottom: 0;
+  }
+}
+.nav-tabs-justified {
+  border-bottom: 0;
+}
+.nav-tabs-justified > li > a {
+  margin-right: 0;
+  border-radius: 4px;
+}
+.nav-tabs-justified > .active > a,
+.nav-tabs-justified > .active > a:hover,
+.nav-tabs-justified > .active > a:focus {
+  border: 1px solid #ddd;
+}
+@media (min-width: 768px) {
+  .nav-tabs-justified > li > a {
+    border-bottom: 1px solid #ddd;
+    border-radius: 4px 4px 0 0;
+  }
+  .nav-tabs-justified > .active > a,
+  .nav-tabs-justified > .active > a:hover,
+  .nav-tabs-justified > .active > a:focus {
+    border-bottom-color: #fff;
+  }
+}
+.tab-content > .tab-pane {
+  display: none;
+}
+.tab-content > .active {
+  display: block;
+}
+.nav-tabs .dropdown-menu {
+  margin-top: -1px;
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
+.navbar {
+  position: relative;
+  min-height: 50px;
+  margin-bottom: 20px;
+  border: 1px solid transparent;
+}
+@media (min-width: 768px) {
+  .navbar {
+    border-radius: 4px;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-header {
+    float: left;
+  }
+}
+.navbar-collapse {
+  max-height: 340px;
+  padding-right: 15px;
+  padding-left: 15px;
+  overflow-x: visible;
+  -webkit-overflow-scrolling: touch;
+  border-top: 1px solid transparent;
+  box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);
+}
+.navbar-collapse.in {
+  overflow-y: auto;
+}
+@media (min-width: 768px) {
+  .navbar-collapse {
+    width: auto;
+    border-top: 0;
+    box-shadow: none;
+  }
+  .navbar-collapse.collapse {
+    display: block !important;
+    height: auto !important;
+    padding-bottom: 0;
+    overflow: visible !important;
+  }
+  .navbar-collapse.in {
+    overflow-y: visible;
+  }
+  .navbar-fixed-top .navbar-collapse,
+  .navbar-static-top .navbar-collapse,
+  .navbar-fixed-bottom .navbar-collapse {
+    padding-right: 0;
+    padding-left: 0;
+  }
+}
+.container > .navbar-header,
+.container-fluid > .navbar-header,
+.container > .navbar-collapse,
+.container-fluid > .navbar-collapse {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+@media (min-width: 768px) {
+  .container > .navbar-header,
+  .container-fluid > .navbar-header,
+  .container > .navbar-collapse,
+  .container-fluid > .navbar-collapse {
+    margin-right: 0;
+    margin-left: 0;
+  }
+}
+.navbar-static-top {
+  z-index: 1000;
+  border-width: 0 0 1px;
+}
+@media (min-width: 768px) {
+  .navbar-static-top {
+    border-radius: 0;
+  }
+}
+.navbar-fixed-top,
+.navbar-fixed-bottom {
+  position: fixed;
+  right: 0;
+  left: 0;
+  z-index: 1030;
+}
+@media (min-width: 768px) {
+  .navbar-fixed-top,
+  .navbar-fixed-bottom {
+    border-radius: 0;
+  }
+}
+.navbar-fixed-top {
+  top: 0;
+  border-width: 0 0 1px;
+}
+.navbar-fixed-bottom {
+  bottom: 0;
+  margin-bottom: 0;
+  border-width: 1px 0 0;
+}
+.navbar-brand {
+  float: left;
+  height: 20px;
+  padding: 15px 15px;
+  font-size: 18px;
+  line-height: 20px;
+}
+.navbar-brand:hover,
+.navbar-brand:focus {
+  text-decoration: none;
+}
+@media (min-width: 768px) {
+  .navbar > .container .navbar-brand,
+  .navbar > .container-fluid .navbar-brand {
+    margin-left: -15px;
+  }
+}
+.navbar-toggle {
+  position: relative;
+  float: right;
+  padding: 9px 10px;
+  margin-top: 8px;
+  margin-right: 15px;
+  margin-bottom: 8px;
+  background-color: transparent;
+  background-image: none;
+  border: 1px solid transparent;
+  border-radius: 4px;
+}
+.navbar-toggle:focus {
+  outline: none;
+}
+.navbar-toggle .icon-bar {
+  display: block;
+  width: 22px;
+  height: 2px;
+  border-radius: 1px;
+}
+.navbar-toggle .icon-bar + .icon-bar {
+  margin-top: 4px;
+}
+@media (min-width: 768px) {
+  .navbar-toggle {
+    display: none;
+  }
+}
+.navbar-nav {
+  margin: 7.5px -15px;
+}
+.navbar-nav > li > a {
+  padding-top: 10px;
+  padding-bottom: 10px;
+  line-height: 20px;
+}
+@media (max-width: 767px) {
+  .navbar-nav .open .dropdown-menu {
+    position: static;
+    float: none;
+    width: auto;
+    margin-top: 0;
+    background-color: transparent;
+    border: 0;
+    box-shadow: none;
+  }
+  .navbar-nav .open .dropdown-menu > li > a,
+  .navbar-nav .open .dropdown-menu .dropdown-header {
+    padding: 5px 15px 5px 25px;
+  }
+  .navbar-nav .open .dropdown-menu > li > a {
+    line-height: 20px;
+  }
+  .navbar-nav .open .dropdown-menu > li > a:hover,
+  .navbar-nav .open .dropdown-menu > li > a:focus {
+    background-image: none;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-nav {
+    float: left;
+    margin: 0;
+  }
+  .navbar-nav > li {
+    float: left;
+  }
+  .navbar-nav > li > a {
+    padding-top: 15px;
+    padding-bottom: 15px;
+  }
+  .navbar-nav.navbar-right:last-child {
+    margin-right: -15px;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-left {
+    float: left !important;
+  }
+  .navbar-right {
+    float: right !important;
+  }
+}
+.navbar-form {
+  padding: 10px 15px;
+  margin-top: 8px;
+  margin-right: -15px;
+  margin-bottom: 8px;
+  margin-left: -15px;
+  border-top: 1px solid transparent;
+  border-bottom: 1px solid transparent;
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);
+}
+@media (min-width: 768px) {
+  .navbar-form .form-group {
+    display: inline-block;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .navbar-form .form-control {
+    display: inline-block;
+    width: auto;
+    vertical-align: middle;
+  }
+  .navbar-form .control-label {
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .navbar-form .radio,
+  .navbar-form .checkbox {
+    display: inline-block;
+    padding-left: 0;
+    margin-top: 0;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .navbar-form .radio input[type="radio"],
+  .navbar-form .checkbox input[type="checkbox"] {
+    float: none;
+    margin-left: 0;
+  }
+  .navbar-form .has-feedback .form-control-feedback {
+    top: 0;
+  }
+}
+@media (max-width: 767px) {
+  .navbar-form .form-group {
+    margin-bottom: 5px;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-form {
+    width: auto;
+    padding-top: 0;
+    padding-bottom: 0;
+    margin-right: 0;
+    margin-left: 0;
+    border: 0;
+    -webkit-box-shadow: none;
+            box-shadow: none;
+  }
+  .navbar-form.navbar-right:last-child {
+    margin-right: -15px;
+  }
+}
+.navbar-nav > li > .dropdown-menu {
+  margin-top: 0;
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
+.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {
+  border-bottom-right-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.navbar-btn {
+  margin-top: 8px;
+  margin-bottom: 8px;
+}
+.navbar-btn.btn-sm {
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+.navbar-btn.btn-xs {
+  margin-top: 14px;
+  margin-bottom: 14px;
+}
+.navbar-text {
+  margin-top: 15px;
+  margin-bottom: 15px;
+}
+@media (min-width: 768px) {
+  .navbar-text {
+    float: left;
+    margin-right: 15px;
+    margin-left: 15px;
+  }
+  .navbar-text.navbar-right:last-child {
+    margin-right: 0;
+  }
+}
+.navbar-default {
+  background-color: #f8f8f8;
+  border-color: #e7e7e7;
+}
+.navbar-default .navbar-brand {
+  color: #777;
+}
+.navbar-default .navbar-brand:hover,
+.navbar-default .navbar-brand:focus {
+  color: #5e5e5e;
+  background-color: transparent;
+}
+.navbar-default .navbar-text {
+  color: #777;
+}
+.navbar-default .navbar-nav > li > a {
+  color: #777;
+}
+.navbar-default .navbar-nav > li > a:hover,
+.navbar-default .navbar-nav > li > a:focus {
+  color: #333;
+  background-color: transparent;
+}
+.navbar-default .navbar-nav > .active > a,
+.navbar-default .navbar-nav > .active > a:hover,
+.navbar-default .navbar-nav > .active > a:focus {
+  color: #555;
+  background-color: #e7e7e7;
+}
+.navbar-default .navbar-nav > .disabled > a,
+.navbar-default .navbar-nav > .disabled > a:hover,
+.navbar-default .navbar-nav > .disabled > a:focus {
+  color: #ccc;
+  background-color: transparent;
+}
+.navbar-default .navbar-toggle {
+  border-color: #ddd;
+}
+.navbar-default .navbar-toggle:hover,
+.navbar-default .navbar-toggle:focus {
+  background-color: #ddd;
+}
+.navbar-default .navbar-toggle .icon-bar {
+  background-color: #888;
+}
+.navbar-default .navbar-collapse,
+.navbar-default .navbar-form {
+  border-color: #e7e7e7;
+}
+.navbar-default .navbar-nav > .open > a,
+.navbar-default .navbar-nav > .open > a:hover,
+.navbar-default .navbar-nav > .open > a:focus {
+  color: #555;
+  background-color: #e7e7e7;
+}
+@media (max-width: 767px) {
+  .navbar-default .navbar-nav .open .dropdown-menu > li > a {
+    color: #777;
+  }
+  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,
+  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {
+    color: #333;
+    background-color: transparent;
+  }
+  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,
+  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,
+  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {
+    color: #555;
+    background-color: #e7e7e7;
+  }
+  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,
+  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+    color: #ccc;
+    background-color: transparent;
+  }
+}
+.navbar-default .navbar-link {
+  color: #777;
+}
+.navbar-default .navbar-link:hover {
+  color: #333;
+}
+.navbar-inverse {
+  background-color: #222;
+  border-color: #080808;
+}
+.navbar-inverse .navbar-brand {
+  color: #999;
+}
+.navbar-inverse .navbar-brand:hover,
+.navbar-inverse .navbar-brand:focus {
+  color: #fff;
+  background-color: transparent;
+}
+.navbar-inverse .navbar-text {
+  color: #999;
+}
+.navbar-inverse .navbar-nav > li > a {
+  color: #999;
+}
+.navbar-inverse .navbar-nav > li > a:hover,
+.navbar-inverse .navbar-nav > li > a:focus {
+  color: #fff;
+  background-color: transparent;
+}
+.navbar-inverse .navbar-nav > .active > a,
+.navbar-inverse .navbar-nav > .active > a:hover,
+.navbar-inverse .navbar-nav > .active > a:focus {
+  color: #fff;
+  background-color: #080808;
+}
+.navbar-inverse .navbar-nav > .disabled > a,
+.navbar-inverse .navbar-nav > .disabled > a:hover,
+.navbar-inverse .navbar-nav > .disabled > a:focus {
+  color: #444;
+  background-color: transparent;
+}
+.navbar-inverse .navbar-toggle {
+  border-color: #333;
+}
+.navbar-inverse .navbar-toggle:hover,
+.navbar-inverse .navbar-toggle:focus {
+  background-color: #333;
+}
+.navbar-inverse .navbar-toggle .icon-bar {
+  background-color: #fff;
+}
+.navbar-inverse .navbar-collapse,
+.navbar-inverse .navbar-form {
+  border-color: #101010;
+}
+.navbar-inverse .navbar-nav > .open > a,
+.navbar-inverse .navbar-nav > .open > a:hover,
+.navbar-inverse .navbar-nav > .open > a:focus {
+  color: #fff;
+  background-color: #080808;
+}
+@media (max-width: 767px) {
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {
+    border-color: #080808;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {
+    background-color: #080808;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {
+    color: #999;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {
+    color: #fff;
+    background-color: transparent;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {
+    color: #fff;
+    background-color: #080808;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+    color: #444;
+    background-color: transparent;
+  }
+}
+.navbar-inverse .navbar-link {
+  color: #999;
+}
+.navbar-inverse .navbar-link:hover {
+  color: #fff;
+}
+.breadcrumb {
+  padding: 8px 15px;
+  margin-bottom: 20px;
+  list-style: none;
+  background-color: #f5f5f5;
+  border-radius: 4px;
+}
+.breadcrumb > li {
+  display: inline-block;
+}
+.breadcrumb > li + li:before {
+  padding: 0 5px;
+  color: #ccc;
+  content: "/\00a0";
+}
+.breadcrumb > .active {
+  color: #999;
+}
+.pagination {
+  display: inline-block;
+  padding-left: 0;
+  margin: 20px 0;
+  border-radius: 4px;
+}
+.pagination > li {
+  display: inline;
+}
+.pagination > li > a,
+.pagination > li > span {
+  position: relative;
+  float: left;
+  padding: 6px 12px;
+  margin-left: -1px;
+  line-height: 1.428571429;
+  color: #428bca;
+  text-decoration: none;
+  background-color: #fff;
+  border: 1px solid #ddd;
+}
+.pagination > li:first-child > a,
+.pagination > li:first-child > span {
+  margin-left: 0;
+  border-top-left-radius: 4px;
+  border-bottom-left-radius: 4px;
+}
+.pagination > li:last-child > a,
+.pagination > li:last-child > span {
+  border-top-right-radius: 4px;
+  border-bottom-right-radius: 4px;
+}
+.pagination > li > a:hover,
+.pagination > li > span:hover,
+.pagination > li > a:focus,
+.pagination > li > span:focus {
+  color: #2a6496;
+  background-color: #eee;
+  border-color: #ddd;
+}
+.pagination > .active > a,
+.pagination > .active > span,
+.pagination > .active > a:hover,
+.pagination > .active > span:hover,
+.pagination > .active > a:focus,
+.pagination > .active > span:focus {
+  z-index: 2;
+  color: #fff;
+  cursor: default;
+  background-color: #428bca;
+  border-color: #428bca;
+}
+.pagination > .disabled > span,
+.pagination > .disabled > span:hover,
+.pagination > .disabled > span:focus,
+.pagination > .disabled > a,
+.pagination > .disabled > a:hover,
+.pagination > .disabled > a:focus {
+  color: #999;
+  cursor: not-allowed;
+  background-color: #fff;
+  border-color: #ddd;
+}
+.pagination-lg > li > a,
+.pagination-lg > li > span {
+  padding: 10px 16px;
+  font-size: 18px;
+}
+.pagination-lg > li:first-child > a,
+.pagination-lg > li:first-child > span {
+  border-top-left-radius: 6px;
+  border-bottom-left-radius: 6px;
+}
+.pagination-lg > li:last-child > a,
+.pagination-lg > li:last-child > span {
+  border-top-right-radius: 6px;
+  border-bottom-right-radius: 6px;
+}
+.pagination-sm > li > a,
+.pagination-sm > li > span {
+  padding: 5px 10px;
+  font-size: 12px;
+}
+.pagination-sm > li:first-child > a,
+.pagination-sm > li:first-child > span {
+  border-top-left-radius: 3px;
+  border-bottom-left-radius: 3px;
+}
+.pagination-sm > li:last-child > a,
+.pagination-sm > li:last-child > span {
+  border-top-right-radius: 3px;
+  border-bottom-right-radius: 3px;
+}
+.pager {
+  padding-left: 0;
+  margin: 20px 0;
+  text-align: center;
+  list-style: none;
+}
+.pager li {
+  display: inline;
+}
+.pager li > a,
+.pager li > span {
+  display: inline-block;
+  padding: 5px 14px;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 15px;
+}
+.pager li > a:hover,
+.pager li > a:focus {
+  text-decoration: none;
+  background-color: #eee;
+}
+.pager .next > a,
+.pager .next > span {
+  float: right;
+}
+.pager .previous > a,
+.pager .previous > span {
+  float: left;
+}
+.pager .disabled > a,
+.pager .disabled > a:hover,
+.pager .disabled > a:focus,
+.pager .disabled > span {
+  color: #999;
+  cursor: not-allowed;
+  background-color: #fff;
+}
+.label {
+  display: inline;
+  padding: .2em .6em .3em;
+  font-size: 75%;
+  font-weight: bold;
+  line-height: 1;
+  color: #fff;
+  text-align: center;
+  white-space: nowrap;
+  vertical-align: baseline;
+  border-radius: .25em;
+}
+.label[href]:hover,
+.label[href]:focus {
+  color: #fff;
+  text-decoration: none;
+  cursor: pointer;
+}
+.label:empty {
+  display: none;
+}
+.btn .label {
+  position: relative;
+  top: -1px;
+}
+.label-default {
+  background-color: #999;
+}
+.label-default[href]:hover,
+.label-default[href]:focus {
+  background-color: #808080;
+}
+.label-primary {
+  background-color: #428bca;
+}
+.label-primary[href]:hover,
+.label-primary[href]:focus {
+  background-color: #3071a9;
+}
+.label-success {
+  background-color: #5cb85c;
+}
+.label-success[href]:hover,
+.label-success[href]:focus {
+  background-color: #449d44;
+}
+.label-info {
+  background-color: #5bc0de;
+}
+.label-info[href]:hover,
+.label-info[href]:focus {
+  background-color: #31b0d5;
+}
+.label-warning {
+  background-color: #f0ad4e;
+}
+.label-warning[href]:hover,
+.label-warning[href]:focus {
+  background-color: #ec971f;
+}
+.label-danger {
+  background-color: #d9534f;
+}
+.label-danger[href]:hover,
+.label-danger[href]:focus {
+  background-color: #c9302c;
+}
+.badge {
+  display: inline-block;
+  min-width: 10px;
+  padding: 3px 7px;
+  font-size: 12px;
+  font-weight: bold;
+  line-height: 1;
+  color: #fff;
+  text-align: center;
+  white-space: nowrap;
+  vertical-align: baseline;
+  background-color: #999;
+  border-radius: 10px;
+}
+.badge:empty {
+  display: none;
+}
+.btn .badge {
+  position: relative;
+  top: -1px;
+}
+.btn-xs .badge {
+  top: 0;
+  padding: 1px 5px;
+}
+a.badge:hover,
+a.badge:focus {
+  color: #fff;
+  text-decoration: none;
+  cursor: pointer;
+}
+a.list-group-item.active > .badge,
+.nav-pills > .active > a > .badge {
+  color: #428bca;
+  background-color: #fff;
+}
+.nav-pills > li > a > .badge {
+  margin-left: 3px;
+}
+.jumbotron {
+  padding: 30px;
+  margin-bottom: 30px;
+  color: inherit;
+  background-color: #eee;
+}
+.jumbotron h1,
+.jumbotron .h1 {
+  color: inherit;
+}
+.jumbotron p {
+  margin-bottom: 15px;
+  font-size: 21px;
+  font-weight: 200;
+}
+.container .jumbotron {
+  border-radius: 6px;
+}
+.jumbotron .container {
+  max-width: 100%;
+}
+@media screen and (min-width: 768px) {
+  .jumbotron {
+    padding-top: 48px;
+    padding-bottom: 48px;
+  }
+  .container .jumbotron {
+    padding-right: 60px;
+    padding-left: 60px;
+  }
+  .jumbotron h1,
+  .jumbotron .h1 {
+    font-size: 63px;
+  }
+}
+.thumbnail {
+  display: block;
+  padding: 4px;
+  margin-bottom: 20px;
+  line-height: 1.428571429;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 4px;
+  -webkit-transition: all .2s ease-in-out;
+          transition: all .2s ease-in-out;
+}
+.thumbnail > img,
+.thumbnail a > img {
+  display: block;
+  max-width: 100%;
+  height: auto;
+  margin-right: auto;
+  margin-left: auto;
+}
+a.thumbnail:hover,
+a.thumbnail:focus,
+a.thumbnail.active {
+  border-color: #428bca;
+}
+.thumbnail .caption {
+  padding: 9px;
+  color: #333;
+}
+.alert {
+  padding: 15px;
+  margin-bottom: 20px;
+  border: 1px solid transparent;
+  border-radius: 4px;
+}
+.alert h4 {
+  margin-top: 0;
+  color: inherit;
+}
+.alert .alert-link {
+  font-weight: bold;
+}
+.alert > p,
+.alert > ul {
+  margin-bottom: 0;
+}
+.alert > p + p {
+  margin-top: 5px;
+}
+.alert-dismissable {
+  padding-right: 35px;
+}
+.alert-dismissable .close {
+  position: relative;
+  top: -2px;
+  right: -21px;
+  color: inherit;
+}
+.alert-success {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #d6e9c6;
+}
+.alert-success hr {
+  border-top-color: #c9e2b3;
+}
+.alert-success .alert-link {
+  color: #2b542c;
+}
+.alert-info {
+  color: #31708f;
+  background-color: #edf6fa;
+  border-color: #bce8f1;
+}
+.alert-info hr {
+  border-top-color: #a6e1ec;
+}
+.alert-info .alert-link {
+  color: #245269;
+  font-weight: bold;
+}
+.alert-info a {
+  font-weight: bold;
+}
+.alert-warning {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #faebcc;
+}
+.alert-warning hr {
+  border-top-color: #f7e1b5;
+}
+.alert-warning .alert-link {
+  color: #66512c;
+}
+.alert-danger {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #ebccd1;
+}
+.alert-danger hr {
+  border-top-color: #e4b9c0;
+}
+.alert-danger .alert-link {
+  color: #843534;
+}
+@-webkit-keyframes progress-bar-stripes {
+  from {
+    background-position: 40px 0;
+  }
+  to {
+    background-position: 0 0;
+  }
+}
+@keyframes progress-bar-stripes {
+  from {
+    background-position: 40px 0;
+  }
+  to {
+    background-position: 0 0;
+  }
+}
+.progress {
+  height: 20px;
+  margin-bottom: 20px;
+  overflow: hidden;
+  background-color: #f5f5f5;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);
+          box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);
+}
+.progress-bar {
+  float: left;
+  width: 0;
+  height: 100%;
+  font-size: 12px;
+  line-height: 20px;
+  color: #fff;
+  text-align: center;
+  background-color: #428bca;
+  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);
+          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);
+  -webkit-transition: width .6s ease;
+          transition: width .6s ease;
+}
+.progress-striped .progress-bar {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-size: 40px 40px;
+}
+.progress.active .progress-bar {
+  -webkit-animation: progress-bar-stripes 2s linear infinite;
+          animation: progress-bar-stripes 2s linear infinite;
+}
+.progress-bar-success {
+  background-color: #5cb85c;
+}
+.progress-striped .progress-bar-success {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-info {
+  background-color: #5bc0de;
+}
+.progress-striped .progress-bar-info {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-warning {
+  background-color: #f0ad4e;
+}
+.progress-striped .progress-bar-warning {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-danger {
+  background-color: #d9534f;
+}
+.progress-striped .progress-bar-danger {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.media,
+.media-body {
+  overflow: hidden;
+  zoom: 1;
+}
+.media,
+.media .media {
+  margin-top: 15px;
+}
+.media:first-child {
+  margin-top: 0;
+}
+.media-object {
+  display: block;
+}
+.media-heading {
+  margin: 0 0 5px;
+}
+.media > .pull-left {
+  margin-right: 10px;
+}
+.media > .pull-right {
+  margin-left: 10px;
+}
+.media-list {
+  padding-left: 0;
+  list-style: none;
+}
+.list-group {
+  padding-left: 0;
+  margin-bottom: 20px;
+}
+.list-group-item {
+  position: relative;
+  display: block;
+  padding: 10px 15px;
+  margin-bottom: -1px;
+  background-color: #fff;
+  border: 1px solid #ddd;
+}
+.list-group-item:first-child {
+  border-top-left-radius: 4px;
+  border-top-right-radius: 4px;
+}
+.list-group-item:last-child {
+  margin-bottom: 0;
+  border-bottom-right-radius: 4px;
+  border-bottom-left-radius: 4px;
+}
+.list-group-item > .badge {
+  float: right;
+}
+.list-group-item > .badge + .badge {
+  margin-right: 5px;
+}
+a.list-group-item {
+  color: #555;
+}
+a.list-group-item .list-group-item-heading {
+  color: #333;
+}
+a.list-group-item:hover,
+a.list-group-item:focus {
+  text-decoration: none;
+  background-color: #f5f5f5;
+}
+a.list-group-item.active,
+a.list-group-item.active:hover,
+a.list-group-item.active:focus {
+  z-index: 2;
+  color: #fff;
+  background-color: #428bca;
+  border-color: #428bca;
+}
+a.list-group-item.active .list-group-item-heading,
+a.list-group-item.active:hover .list-group-item-heading,
+a.list-group-item.active:focus .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item.active .list-group-item-text,
+a.list-group-item.active:hover .list-group-item-text,
+a.list-group-item.active:focus .list-group-item-text {
+  color: #e1edf7;
+}
+.list-group-item-success {
+  color: #3c763d;
+  background-color: #dff0d8;
+}
+a.list-group-item-success {
+  color: #3c763d;
+}
+a.list-group-item-success .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-success:hover,
+a.list-group-item-success:focus {
+  color: #3c763d;
+  background-color: #d0e9c6;
+}
+a.list-group-item-success.active,
+a.list-group-item-success.active:hover,
+a.list-group-item-success.active:focus {
+  color: #fff;
+  background-color: #3c763d;
+  border-color: #3c763d;
+}
+.list-group-item-info {
+  color: #31708f;
+  background-color: #d9edf7;
+}
+a.list-group-item-info {
+  color: #31708f;
+}
+a.list-group-item-info .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-info:hover,
+a.list-group-item-info:focus {
+  color: #31708f;
+  background-color: #c4e3f3;
+}
+a.list-group-item-info.active,
+a.list-group-item-info.active:hover,
+a.list-group-item-info.active:focus {
+  color: #fff;
+  background-color: #31708f;
+  border-color: #31708f;
+}
+.list-group-item-warning {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+}
+a.list-group-item-warning {
+  color: #8a6d3b;
+}
+a.list-group-item-warning .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-warning:hover,
+a.list-group-item-warning:focus {
+  color: #8a6d3b;
+  background-color: #faf2cc;
+}
+a.list-group-item-warning.active,
+a.list-group-item-warning.active:hover,
+a.list-group-item-warning.active:focus {
+  color: #fff;
+  background-color: #8a6d3b;
+  border-color: #8a6d3b;
+}
+.list-group-item-danger {
+  color: #a94442;
+  background-color: #f2dede;
+}
+a.list-group-item-danger {
+  color: #a94442;
+}
+a.list-group-item-danger .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-danger:hover,
+a.list-group-item-danger:focus {
+  color: #a94442;
+  background-color: #ebcccc;
+}
+a.list-group-item-danger.active,
+a.list-group-item-danger.active:hover,
+a.list-group-item-danger.active:focus {
+  color: #fff;
+  background-color: #a94442;
+  border-color: #a94442;
+}
+.list-group-item-heading {
+  margin-top: 0;
+  margin-bottom: 5px;
+}
+.list-group-item-text {
+  margin-bottom: 0;
+  line-height: 1.3;
+}
+.panel {
+  margin-bottom: 20px;
+  background-color: #fff;
+  border: 1px solid transparent;
+  border-radius: 4px;
+  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);
+          box-shadow: 0 1px 1px rgba(0, 0, 0, .05);
+}
+.panel-body {
+  padding: 15px;
+}
+.panel > .list-group {
+  margin-bottom: 0;
+}
+.panel > .list-group .list-group-item {
+  border-width: 1px 0;
+  border-radius: 0;
+}
+.panel > .list-group .list-group-item:first-child {
+  border-top: 0;
+}
+.panel > .list-group .list-group-item:last-child {
+  border-bottom: 0;
+}
+.panel > .list-group:first-child .list-group-item:first-child {
+  border-top-left-radius: 3px;
+  border-top-right-radius: 3px;
+}
+.panel > .list-group:last-child .list-group-item:last-child {
+  border-bottom-right-radius: 3px;
+  border-bottom-left-radius: 3px;
+}
+.panel-heading + .list-group .list-group-item:first-child {
+  border-top-width: 0;
+}
+.panel > .table,
+.panel > .table-responsive > .table {
+  margin-bottom: 0;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {
+  border-top-left-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {
+  border-top-right-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {
+  border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {
+  border-bottom-right-radius: 3px;
+}
+.panel > .panel-body + .table,
+.panel > .panel-body + .table-responsive {
+  border-top: 1px solid #ddd;
+}
+.panel > .table > tbody:first-child > tr:first-child th,
+.panel > .table > tbody:first-child > tr:first-child td {
+  border-top: 0;
+}
+.panel > .table-bordered,
+.panel > .table-responsive > .table-bordered {
+  border: 0;
+}
+.panel > .table-bordered > thead > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,
+.panel > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-bordered > thead > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,
+.panel > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-bordered > tfoot > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+  border-left: 0;
+}
+.panel > .table-bordered > thead > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,
+.panel > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-bordered > thead > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,
+.panel > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-bordered > tfoot > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+  border-right: 0;
+}
+.panel > .table-bordered > thead > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,
+.panel > .table-bordered > tbody > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th,
+.panel > .table-bordered > tfoot > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > tfoot > tr:first-child > th,
+.panel > .table-bordered > thead > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,
+.panel > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-bordered > tfoot > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > tfoot > tr:first-child > td {
+  border-top: 0;
+}
+.panel > .table-bordered > thead > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > thead > tr:last-child > th,
+.panel > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-bordered > tfoot > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+.panel > .table-bordered > thead > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > thead > tr:last-child > td,
+.panel > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-bordered > tfoot > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+  border-bottom: 0;
+}
+.panel > .table-responsive {
+  margin-bottom: 0;
+  border: 0;
+}
+.panel-heading {
+  padding: 10px 15px;
+  border-bottom: 1px solid transparent;
+  border-top-left-radius: 3px;
+  border-top-right-radius: 3px;
+}
+.panel-heading > .dropdown .dropdown-toggle {
+  color: inherit;
+}
+.panel-title {
+  margin-top: 0;
+  margin-bottom: 0;
+  font-size: 16px;
+  color: inherit;
+}
+.panel-title > a {
+  color: inherit;
+}
+.panel-footer {
+  padding: 10px 15px;
+  background-color: #f5f5f5;
+  border-top: 1px solid #ddd;
+  border-bottom-right-radius: 3px;
+  border-bottom-left-radius: 3px;
+}
+.panel-group {
+  margin-bottom: 20px;
+}
+.panel-group .panel {
+  margin-bottom: 0;
+  overflow: hidden;
+  border-radius: 4px;
+}
+.panel-group .panel + .panel {
+  margin-top: 5px;
+}
+.panel-group .panel-heading {
+  border-bottom: 0;
+}
+.panel-group .panel-heading + .panel-collapse .panel-body {
+  border-top: 1px solid #ddd;
+}
+.panel-group .panel-footer {
+  border-top: 0;
+}
+.panel-group .panel-footer + .panel-collapse .panel-body {
+  border-bottom: 1px solid #ddd;
+}
+.panel-default {
+  border-color: #ddd;
+}
+.panel-default > .panel-heading {
+  color: #333;
+  background-color: #f5f5f5;
+  border-color: #ddd;
+}
+.panel-default > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #ddd;
+}
+.panel-default > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #ddd;
+}
+.panel-primary {
+  border-color: #428bca;
+}
+.panel-primary > .panel-heading {
+  color: #fff;
+  background-color: #428bca;
+  border-color: #428bca;
+}
+.panel-primary > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #428bca;
+}
+.panel-primary > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #428bca;
+}
+.panel-success {
+  border-color: #d6e9c6;
+}
+.panel-success > .panel-heading {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #d6e9c6;
+}
+.panel-success > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #d6e9c6;
+}
+.panel-success > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #d6e9c6;
+}
+.panel-info {
+  border-color: #bce8f1;
+}
+.panel-info > .panel-heading {
+  color: #31708f;
+  background-color: #d9edf7;
+  border-color: #bce8f1;
+}
+.panel-info > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #bce8f1;
+}
+.panel-info > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #bce8f1;
+}
+.panel-warning {
+  border-color: #faebcc;
+}
+.panel-warning > .panel-heading {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #faebcc;
+}
+.panel-warning > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #faebcc;
+}
+.panel-warning > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #faebcc;
+}
+.panel-danger {
+  border-color: #ebccd1;
+}
+.panel-danger > .panel-heading {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #ebccd1;
+}
+.panel-danger > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #ebccd1;
+}
+.panel-danger > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #ebccd1;
+}
+.well {
+  min-height: 20px;
+  padding: 19px;
+  margin-bottom: 20px;
+  background-color: #f5f5f5;
+  border: 1px solid #e3e3e3;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);
+}
+.well blockquote {
+  border-color: #ddd;
+  border-color: rgba(0, 0, 0, .15);
+}
+.well-lg {
+  padding: 24px;
+  border-radius: 6px;
+}
+.well-sm {
+  padding: 9px;
+  border-radius: 3px;
+}
+.close {
+  float: right;
+  font-size: 21px;
+  font-weight: bold;
+  line-height: 1;
+  color: #000;
+  text-shadow: 0 1px 0 #fff;
+  filter: alpha(opacity=20);
+  opacity: .2;
+}
+.close:hover,
+.close:focus {
+  color: #000;
+  text-decoration: none;
+  cursor: pointer;
+  filter: alpha(opacity=50);
+  opacity: .5;
+}
+button.close {
+  -webkit-appearance: none;
+  padding: 0;
+  cursor: pointer;
+  background: transparent;
+  border: 0;
+}
+.modal-open {
+  overflow: hidden;
+}
+.modal {
+  position: fixed;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+  z-index: 1050;
+  display: none;
+  overflow: auto;
+  overflow-y: scroll;
+  -webkit-overflow-scrolling: touch;
+  outline: 0;
+}
+.modal.fade .modal-dialog {
+  -webkit-transition: -webkit-transform .3s ease-out;
+     -moz-transition:    -moz-transform .3s ease-out;
+       -o-transition:      -o-transform .3s ease-out;
+          transition:         transform .3s ease-out;
+  -webkit-transform: translate(0, -25%);
+      -ms-transform: translate(0, -25%);
+          transform: translate(0, -25%);
+}
+.modal.in .modal-dialog {
+  -webkit-transform: translate(0, 0);
+      -ms-transform: translate(0, 0);
+          transform: translate(0, 0);
+}
+.modal-dialog {
+  position: relative;
+  width: auto;
+  margin: 10px;
+}
+.modal-content {
+  position: relative;
+  background-color: #fff;
+  background-clip: padding-box;
+  border: 1px solid #999;
+  border: 1px solid rgba(0, 0, 0, .2);
+  border-radius: 6px;
+  outline: none;
+  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
+          box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
+}
+.modal-backdrop {
+  position: fixed;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+  z-index: 1040;
+  background-color: #000;
+}
+.modal-backdrop.fade {
+  filter: alpha(opacity=0);
+  opacity: 0;
+}
+.modal-backdrop.in {
+  filter: alpha(opacity=50);
+  opacity: .5;
+}
+.modal-header {
+  min-height: 16.428571429px;
+  padding: 15px;
+  border-bottom: 1px solid #e5e5e5;
+}
+.modal-header .close {
+  margin-top: -2px;
+}
+.modal-title {
+  margin: 0;
+  line-height: 1.428571429;
+}
+.modal-body {
+  position: relative;
+  padding: 20px;
+}
+.modal-footer {
+  padding: 19px 20px 20px;
+  margin-top: 15px;
+  text-align: right;
+  border-top: 1px solid #e5e5e5;
+}
+.modal-footer .btn + .btn {
+  margin-bottom: 0;
+  margin-left: 5px;
+}
+.modal-footer .btn-group .btn + .btn {
+  margin-left: -1px;
+}
+.modal-footer .btn-block + .btn-block {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .modal-dialog {
+    width: 600px;
+    margin: 30px auto;
+  }
+  .modal-content {
+    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);
+            box-shadow: 0 5px 15px rgba(0, 0, 0, .5);
+  }
+  .modal-sm {
+    width: 300px;
+  }
+  .modal-lg {
+    width: 900px;
+  }
+}
+.tooltip {
+  position: absolute;
+  z-index: 1030;
+  display: block;
+  font-size: 12px;
+  line-height: 1.4;
+  visibility: visible;
+  filter: alpha(opacity=0);
+  opacity: 0;
+}
+.tooltip.in {
+  filter: alpha(opacity=90);
+  opacity: .9;
+}
+.tooltip.top {
+  padding: 5px 0;
+  margin-top: -3px;
+}
+.tooltip.right {
+  padding: 0 5px;
+  margin-left: 3px;
+}
+.tooltip.bottom {
+  padding: 5px 0;
+  margin-top: 3px;
+}
+.tooltip.left {
+  padding: 0 5px;
+  margin-left: -3px;
+}
+.tooltip-inner {
+  max-width: 200px;
+  padding: 3px 8px;
+  color: #fff;
+  text-align: center;
+  text-decoration: none;
+  background-color: #000;
+  border-radius: 4px;
+}
+.tooltip-arrow {
+  position: absolute;
+  width: 0;
+  height: 0;
+  border-color: transparent;
+  border-style: solid;
+}
+.tooltip.top .tooltip-arrow {
+  bottom: 0;
+  left: 50%;
+  margin-left: -5px;
+  border-width: 5px 5px 0;
+  border-top-color: #000;
+}
+.tooltip.top-left .tooltip-arrow {
+  bottom: 0;
+  left: 5px;
+  border-width: 5px 5px 0;
+  border-top-color: #000;
+}
+.tooltip.top-right .tooltip-arrow {
+  right: 5px;
+  bottom: 0;
+  border-width: 5px 5px 0;
+  border-top-color: #000;
+}
+.tooltip.right .tooltip-arrow {
+  top: 50%;
+  left: 0;
+  margin-top: -5px;
+  border-width: 5px 5px 5px 0;
+  border-right-color: #000;
+}
+.tooltip.left .tooltip-arrow {
+  top: 50%;
+  right: 0;
+  margin-top: -5px;
+  border-width: 5px 0 5px 5px;
+  border-left-color: #000;
+}
+.tooltip.bottom .tooltip-arrow {
+  top: 0;
+  left: 50%;
+  margin-left: -5px;
+  border-width: 0 5px 5px;
+  border-bottom-color: #000;
+}
+.tooltip.bottom-left .tooltip-arrow {
+  top: 0;
+  left: 5px;
+  border-width: 0 5px 5px;
+  border-bottom-color: #000;
+}
+.tooltip.bottom-right .tooltip-arrow {
+  top: 0;
+  right: 5px;
+  border-width: 0 5px 5px;
+  border-bottom-color: #000;
+}
+.popover {
+  position: absolute;
+  top: 0;
+  left: 0;
+  z-index: 1010;
+  display: none;
+  max-width: 276px;
+  padding: 1px;
+  text-align: left;
+  white-space: normal;
+  background-color: #fff;
+  background-clip: padding-box;
+  border: 1px solid #ccc;
+  border: 1px solid rgba(0, 0, 0, .2);
+  border-radius: 6px;
+  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+          box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+}
+.popover.top {
+  margin-top: -10px;
+}
+.popover.right {
+  margin-left: 10px;
+}
+.popover.bottom {
+  margin-top: 10px;
+}
+.popover.left {
+  margin-left: -10px;
+}
+.popover-title {
+  padding: 8px 14px;
+  margin: 0;
+  font-size: 14px;
+  font-weight: normal;
+  line-height: 18px;
+  background-color: #f7f7f7;
+  border-bottom: 1px solid #ebebeb;
+  border-radius: 5px 5px 0 0;
+}
+.popover-content {
+  padding: 9px 14px;
+}
+.popover .arrow,
+.popover .arrow:after {
+  position: absolute;
+  display: block;
+  width: 0;
+  height: 0;
+  border-color: transparent;
+  border-style: solid;
+}
+.popover .arrow {
+  border-width: 11px;
+}
+.popover .arrow:after {
+  content: "";
+  border-width: 10px;
+}
+.popover.top .arrow {
+  bottom: -11px;
+  left: 50%;
+  margin-left: -11px;
+  border-top-color: #999;
+  border-top-color: rgba(0, 0, 0, .25);
+  border-bottom-width: 0;
+}
+.popover.top .arrow:after {
+  bottom: 1px;
+  margin-left: -10px;
+  content: " ";
+  border-top-color: #fff;
+  border-bottom-width: 0;
+}
+.popover.right .arrow {
+  top: 50%;
+  left: -11px;
+  margin-top: -11px;
+  border-right-color: #999;
+  border-right-color: rgba(0, 0, 0, .25);
+  border-left-width: 0;
+}
+.popover.right .arrow:after {
+  bottom: -10px;
+  left: 1px;
+  content: " ";
+  border-right-color: #fff;
+  border-left-width: 0;
+}
+.popover.bottom .arrow {
+  top: -11px;
+  left: 50%;
+  margin-left: -11px;
+  border-top-width: 0;
+  border-bottom-color: #999;
+  border-bottom-color: rgba(0, 0, 0, .25);
+}
+.popover.bottom .arrow:after {
+  top: 1px;
+  margin-left: -10px;
+  content: " ";
+  border-top-width: 0;
+  border-bottom-color: #fff;
+}
+.popover.left .arrow {
+  top: 50%;
+  right: -11px;
+  margin-top: -11px;
+  border-right-width: 0;
+  border-left-color: #999;
+  border-left-color: rgba(0, 0, 0, .25);
+}
+.popover.left .arrow:after {
+  right: 1px;
+  bottom: -10px;
+  content: " ";
+  border-right-width: 0;
+  border-left-color: #fff;
+}
+.carousel {
+  position: relative;
+}
+.carousel-inner {
+  position: relative;
+  width: 100%;
+  overflow: hidden;
+}
+.carousel-inner > .item {
+  position: relative;
+  display: none;
+  -webkit-transition: .6s ease-in-out left;
+          transition: .6s ease-in-out left;
+}
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+  display: block;
+  max-width: 100%;
+  height: auto;
+  line-height: 1;
+}
+.carousel-inner > .active,
+.carousel-inner > .next,
+.carousel-inner > .prev {
+  display: block;
+}
+.carousel-inner > .active {
+  left: 0;
+}
+.carousel-inner > .next,
+.carousel-inner > .prev {
+  position: absolute;
+  top: 0;
+  width: 100%;
+}
+.carousel-inner > .next {
+  left: 100%;
+}
+.carousel-inner > .prev {
+  left: -100%;
+}
+.carousel-inner > .next.left,
+.carousel-inner > .prev.right {
+  left: 0;
+}
+.carousel-inner > .active.left {
+  left: -100%;
+}
+.carousel-inner > .active.right {
+  left: 100%;
+}
+.carousel-control {
+  position: absolute;
+  top: 0;
+  bottom: 0;
+  left: 0;
+  width: 15%;
+  font-size: 20px;
+  color: #fff;
+  text-align: center;
+  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);
+  filter: alpha(opacity=50);
+  opacity: .5;
+}
+.carousel-control.left {
+  background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .5) 0%), color-stop(rgba(0, 0, 0, .0001) 100%));
+  background-image:         linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);
+  background-repeat: repeat-x;
+}
+.carousel-control.right {
+  right: 0;
+  left: auto;
+  background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .0001) 0%), color-stop(rgba(0, 0, 0, .5) 100%));
+  background-image:         linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);
+  background-repeat: repeat-x;
+}
+.carousel-control:hover,
+.carousel-control:focus {
+  color: #fff;
+  text-decoration: none;
+  filter: alpha(opacity=90);
+  outline: none;
+  opacity: .9;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-left,
+.carousel-control .glyphicon-chevron-right {
+  position: absolute;
+  top: 50%;
+  z-index: 5;
+  display: inline-block;
+}
+.carousel-control .icon-prev,
+.carousel-control .glyphicon-chevron-left {
+  left: 50%;
+}
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-right {
+  right: 50%;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next {
+  width: 20px;
+  height: 20px;
+  margin-top: -10px;
+  margin-left: -10px;
+  font-family: serif;
+}
+.carousel-control .icon-prev:before {
+  content: '\2039';
+}
+.carousel-control .icon-next:before {
+  content: '\203a';
+}
+.carousel-indicators {
+  position: absolute;
+  bottom: 10px;
+  left: 50%;
+  z-index: 15;
+  width: 60%;
+  padding-left: 0;
+  margin-left: -30%;
+  text-align: center;
+  list-style: none;
+}
+.carousel-indicators li {
+  display: inline-block;
+  width: 10px;
+  height: 10px;
+  margin: 1px;
+  text-indent: -999px;
+  cursor: pointer;
+  background-color: #000 \9;
+  background-color: rgba(0, 0, 0, 0);
+  border: 1px solid #fff;
+  border-radius: 10px;
+}
+.carousel-indicators .active {
+  width: 12px;
+  height: 12px;
+  margin: 0;
+  background-color: #fff;
+}
+.carousel-caption {
+  position: absolute;
+  right: 15%;
+  bottom: 20px;
+  left: 15%;
+  z-index: 10;
+  padding-top: 20px;
+  padding-bottom: 20px;
+  color: #fff;
+  text-align: center;
+  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);
+}
+.carousel-caption .btn {
+  text-shadow: none;
+}
+@media screen and (min-width: 768px) {
+  .carousel-control .glyphicons-chevron-left,
+  .carousel-control .glyphicons-chevron-right,
+  .carousel-control .icon-prev,
+  .carousel-control .icon-next {
+    width: 30px;
+    height: 30px;
+    margin-top: -15px;
+    margin-left: -15px;
+    font-size: 30px;
+  }
+  .carousel-caption {
+    right: 20%;
+    left: 20%;
+    padding-bottom: 30px;
+  }
+  .carousel-indicators {
+    bottom: 20px;
+  }
+}
+.clearfix:before,
+.clearfix:after,
+.container:before,
+.container:after,
+.container-fluid:before,
+.container-fluid:after,
+.row:before,
+.row:after,
+.form-horizontal .form-group:before,
+.form-horizontal .form-group:after,
+.btn-toolbar:before,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:before,
+.btn-group-vertical > .btn-group:after,
+.nav:before,
+.nav:after,
+.navbar:before,
+.navbar:after,
+.navbar-header:before,
+.navbar-header:after,
+.navbar-collapse:before,
+.navbar-collapse:after,
+.pager:before,
+.pager:after,
+.panel-body:before,
+.panel-body:after,
+.modal-footer:before,
+.modal-footer:after {
+  display: table;
+  content: " ";
+}
+.clearfix:after,
+.container:after,
+.container-fluid:after,
+.row:after,
+.form-horizontal .form-group:after,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:after,
+.nav:after,
+.navbar:after,
+.navbar-header:after,
+.navbar-collapse:after,
+.pager:after,
+.panel-body:after,
+.modal-footer:after {
+  clear: both;
+}
+.center-block {
+  display: block;
+  margin-right: auto;
+  margin-left: auto;
+}
+.pull-right {
+  float: right !important;
+}
+.pull-left {
+  float: left !important;
+}
+.hide {
+  display: none !important;
+}
+.show {
+  display: block !important;
+}
+.invisible {
+  visibility: hidden;
+}
+.text-hide {
+  font: 0/0 a;
+  color: transparent;
+  text-shadow: none;
+  background-color: transparent;
+  border: 0;
+}
+.hidden {
+  display: none !important;
+  visibility: hidden !important;
+}
+.affix {
+  position: fixed;
+}
+@-ms-viewport {
+  width: device-width;
+}
+.visible-xs,
+tr.visible-xs,
+th.visible-xs,
+td.visible-xs {
+  display: none !important;
+}
+@media (max-width: 767px) {
+  .visible-xs {
+    display: block !important;
+  }
+  table.visible-xs {
+    display: table;
+  }
+  tr.visible-xs {
+    display: table-row !important;
+  }
+  th.visible-xs,
+  td.visible-xs {
+    display: table-cell !important;
+  }
+}
+.visible-sm,
+tr.visible-sm,
+th.visible-sm,
+td.visible-sm {
+  display: none !important;
+}
+@media (min-width: 768px) and (max-width: 991px) {
+  .visible-sm {
+    display: block !important;
+  }
+  table.visible-sm {
+    display: table;
+  }
+  tr.visible-sm {
+    display: table-row !important;
+  }
+  th.visible-sm,
+  td.visible-sm {
+    display: table-cell !important;
+  }
+}
+.visible-md,
+tr.visible-md,
+th.visible-md,
+td.visible-md {
+  display: none !important;
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+  .visible-md {
+    display: block !important;
+  }
+  table.visible-md {
+    display: table;
+  }
+  tr.visible-md {
+    display: table-row !important;
+  }
+  th.visible-md,
+  td.visible-md {
+    display: table-cell !important;
+  }
+}
+.visible-lg,
+tr.visible-lg,
+th.visible-lg,
+td.visible-lg {
+  display: none !important;
+}
+@media (min-width: 1200px) {
+  .visible-lg {
+    display: block !important;
+  }
+  table.visible-lg {
+    display: table;
+  }
+  tr.visible-lg {
+    display: table-row !important;
+  }
+  th.visible-lg,
+  td.visible-lg {
+    display: table-cell !important;
+  }
+}
+@media (max-width: 767px) {
+  .hidden-xs,
+  tr.hidden-xs,
+  th.hidden-xs,
+  td.hidden-xs {
+    display: none !important;
+  }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+  .hidden-sm,
+  tr.hidden-sm,
+  th.hidden-sm,
+  td.hidden-sm {
+    display: none !important;
+  }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+  .hidden-md,
+  tr.hidden-md,
+  th.hidden-md,
+  td.hidden-md {
+    display: none !important;
+  }
+}
+@media (min-width: 1200px) {
+  .hidden-lg,
+  tr.hidden-lg,
+  th.hidden-lg,
+  td.hidden-lg {
+    display: none !important;
+  }
+}
+.visible-print,
+tr.visible-print,
+th.visible-print,
+td.visible-print {
+  display: none !important;
+}
+@media print {
+  .visible-print {
+    display: block !important;
+  }
+  table.visible-print {
+    display: table;
+  }
+  tr.visible-print {
+    display: table-row !important;
+  }
+  th.visible-print,
+  td.visible-print {
+    display: table-cell !important;
+  }
+}
+@media print {
+  .hidden-print,
+  tr.hidden-print,
+  th.hidden-print,
+  td.hidden-print {
+    display: none !important;
+  }
+}
+/*# sourceMappingURL=bootstrap.css.map */
diff --git a/doc/css/bootstrap.css.map b/doc/css/bootstrap.css.map
new file mode 100644 (file)
index 0000000..e1836ba
--- /dev/null
@@ -0,0 +1 @@
+{"version":3,"sources":["less/normalize.less","less/print.less","less/scaffolding.less","less/mixins.less","less/variables.less","less/type.less","less/code.less","less/grid.less","less/tables.less","less/forms.less","less/buttons.less","less/component-animations.less","less/glyphicons.less","less/dropdowns.less","less/button-groups.less","less/input-groups.less","less/navs.less","less/navbar.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/pager.less","less/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/progress-bars.less","less/media.less","less/list-group.less","less/panels.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/popovers.less","less/carousel.less","less/responsive-utilities.less"],"names":[],"mappings":";AAQA;EACE,uBAAA;EACA,0BAAA;EACA,8BAAA;;AAOF;EACE,SAAA;;AAUF;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;EACE,cAAA;;AAQF;AACA;AACA;AACA;EACE,qBAAA;EACA,wBAAA;;AAQF,KAAK,IAAI;EACP,aAAA;EACA,SAAA;;AAQF;AACA;EACE,aAAA;;AAUF;EACE,uBAAA;;AAOF,CAAC;AACD,CAAC;EACC,UAAA;;AAUF,IAAI;EACF,yBAAA;;AAOF;AACA;EACE,iBAAA;;AAOF;EACE,kBAAA;;AAQF;EACE,cAAA;EACA,gBAAA;;AAOF;EACE,gBAAA;EACA,WAAA;;AAOF;EACE,cAAA;;AAOF;AACA;EACE,cAAA;EACA,cAAA;EACA,kBAAA;EACA,wBAAA;;AAGF;EACE,WAAA;;AAGF;EACE,eAAA;;AAUF;EACE,SAAA;;AAOF,GAAG,IAAI;EACL,gBAAA;;AAUF;EACE,gBAAA;;AAOF;EACE,4BAAA;EACA,uBAAA;EACA,SAAA;;AAOF;EACE,cAAA;;AAOF;AACA;AACA;AACA;EACE,iCAAA;EACA,cAAA;;AAkBF;AACA;AACA;AACA;AACA;EACE,cAAA;EACA,aAAA;EACA,SAAA;;AAOF;EACE,iBAAA;;AAUF;AACA;EACE,oBAAA;;AAWF;AACA,IAAK,MAAK;AACV,KAAK;AACL,KAAK;EACH,0BAAA;EACA,eAAA;;AAOF,MAAM;AACN,IAAK,MAAK;EACR,eAAA;;AAOF,MAAM;AACN,KAAK;EACH,SAAA;EACA,UAAA;;AAQF;EACE,mBAAA;;AAWF,KAAK;AACL,KAAK;EACH,sBAAA;EACA,UAAA;;AASF,KAAK,eAAe;AACpB,KAAK,eAAe;EAClB,YAAA;;AASF,KAAK;EACH,6BAAA;EACA,4BAAA;EACA,+BAAA;EACA,uBAAA;;AASF,KAAK,eAAe;AACpB,KAAK,eAAe;EAClB,wBAAA;;AAOF;EACE,yBAAA;EACA,aAAA;EACA,8BAAA;;AAQF;EACE,SAAA;EACA,UAAA;;AAOF;EACE,cAAA;;AAQF;EACE,iBAAA;;AAUF;EACE,yBAAA;EACA,iBAAA;;AAGF;AACA;EACE,UAAA;;AChUF;EA9FE;IACE,4BAAA;IACA,sBAAA;IACA,kCAAA;IACA,2BAAA;;EAGF;EACA,CAAC;IACC,0BAAA;;EAGF,CAAC,MAAM;IACL,SAAS,KAAK,WAAW,GAAzB;;EAGF,IAAI,OAAO;IACT,SAAS,KAAK,YAAY,GAA1B;;EAIF,CAAC,qBAAqB;EACtB,CAAC,WAAW;IACV,SAAS,EAAT;;EAGF;EACA;IACE,sBAAA;IACA,wBAAA;;EAGF;IACE,2BAAA;;EAGF;EACA;IACE,wBAAA;;EAGF;IACE,0BAAA;;EAGF;EACA;EACA;IACE,UAAA;IACA,SAAA;;EAGF;EACA;IACE,uBAAA;;EAKF;IACE,2BAAA;;EAIF;IACE,aAAA;;EAEF,MACE;EADF,MAEE;IACE,iCAAA;;EAGJ,IAEE;EADF,OAAQ,OACN;IACE,iCAAA;;EAGJ;IACE,sBAAA;;EAGF;IACE,oCAAA;;EAEF,eACE;EADF,eAEE;IACE,iCAAA;;;ACtFN;EC0OE,8BAAA;EACG,2BAAA;EACK,sBAAA;;ADzOV,CAAC;AACD,CAAC;ECsOC,8BAAA;EACG,2BAAA;EACK,sBAAA;;ADjOV;EACE,gBAAA;EACA,6CAAA;;AAGF;EACE,aEcwB,8CFdxB;EACA,eAAA;EACA,wBAAA;EACA,cAAA;EACA,yBAAA;;AAIF;AACA;AACA;AACA;EACE,oBAAA;EACA,kBAAA;EACA,oBAAA;;AAMF;EACE,cAAA;EACA,qBAAA;;AAEA,CAAC;AACD,CAAC;EACC,cAAA;EACA,0BAAA;;AAGF,CAAC;ECzBD,oBAAA;EAEA,0CAAA;EACA,oBAAA;;ADiCF;EACE,SAAA;;AAMF;EACE,sBAAA;;AAIF;ECiTE,cAAA;EACA,eAAA;EACA,YAAA;;AD9SF;EACE,kBAAA;;AAMF;EACE,YAAA;EACA,wBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;EC+BA,wCAAA;EACQ,gCAAA;EAgQR,qBAAA;EACA,eAAA;EACA,YAAA;;AD1RF;EACE,kBAAA;;AAMF;EACE,gBAAA;EACA,mBAAA;EACA,SAAA;EACA,6BAAA;;AAQF;EACE,kBAAA;EACA,UAAA;EACA,WAAA;EACA,YAAA;EACA,UAAA;EACA,gBAAA;EACA,MAAM,gBAAN;EACA,SAAA;;AG5HF;AAAI;AAAI;AAAI;AAAI;AAAI;AACpB;AAAK;AAAK;AAAK;AAAK;AAAK;EACvB,oBAAA;EACA,gBAAA;EACA,gBAAA;EACA,cAAA;;AALF,EAOE;AAPE,EAOF;AAPM,EAON;AAPU,EAOV;AAPc,EAOd;AAPkB,EAOlB;AANF,GAME;AANG,GAMH;AANQ,GAMR;AANa,GAMb;AANkB,GAMlB;AANuB,GAMvB;AAPF,EAQE;AARE,EAQF;AARM,EAQN;AARU,EAQV;AARc,EAQd;AARkB,EAQlB;AAPF,GAOE;AAPG,GAOH;AAPQ,GAOR;AAPa,GAOb;AAPkB,GAOlB;AAPuB,GAOvB;EACE,mBAAA;EACA,cAAA;EACA,cAAA;;AAIJ;AAAI;AACJ;AAAI;AACJ;AAAI;EACF,gBAAA;EACA,mBAAA;;AAJF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;AAJF,EAIE;AAJE,GAIF;AANF,EAOE;AAPE,GAOF;AANF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;EACE,cAAA;;AAGJ;AAAI;AACJ;AAAI;AACJ;AAAI;EACF,gBAAA;EACA,mBAAA;;AAJF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;AAJF,EAIE;AAJE,GAIF;AANF,EAOE;AAPE,GAOF;AANF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;EACE,cAAA;;AAIJ;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AAMV;EACE,gBAAA;;AAGF;EACE,mBAAA;EACA,eAAA;EACA,gBAAA;EACA,gBAAA;;AAKF,QAHqC;EAGrC;IAFI,eAAA;;;AASJ;AACA;EAAU,cAAA;;AAGV;EAAU,kBAAA;;AAGV;EAAuB,gBAAA;;AACvB;EAAuB,iBAAA;;AACvB;EAAuB,kBAAA;;AACvB;EAAuB,mBAAA;;AAGvB;EACE,cAAA;;AAEF;EFsfE,cAAA;;AACA,CAAC,aAAC;EACA,cAAA;;AErfJ;EFmfE,cAAA;;AACA,CAAC,aAAC;EACA,cAAA;;AElfJ;EFgfE,cAAA;;AACA,CAAC,UAAC;EACA,cAAA;;AE/eJ;EF6eE,cAAA;;AACA,CAAC,aAAC;EACA,cAAA;;AE5eJ;EF0eE,cAAA;;AACA,CAAC,YAAC;EACA,cAAA;;AEreJ;EAGE,WAAA;EFudA,yBAAA;;AACA,CAAC,WAAC;EACA,yBAAA;;AEtdJ;EFodE,yBAAA;;AACA,CAAC,WAAC;EACA,yBAAA;;AEndJ;EFidE,yBAAA;;AACA,CAAC,QAAC;EACA,yBAAA;;AEhdJ;EF8cE,yBAAA;;AACA,CAAC,WAAC;EACA,yBAAA;;AE7cJ;EF2cE,yBAAA;;AACA,CAAC,UAAC;EACA,yBAAA;;AErcJ;EACE,mBAAA;EACA,mBAAA;EACA,gCAAA;;AAQF;AACA;EACE,aAAA;EACA,mBAAA;;AAHF,EAIE;AAHF,EAGE;AAJF,EAKE;AAJF,EAIE;EACE,gBAAA;;AAOJ;EACE,eAAA;EACA,gBAAA;;AAIF;EALE,eAAA;EACA,gBAAA;;AAIF,YAGE;EACE,qBAAA;EACA,iBAAA;EACA,kBAAA;;AAEA,YALF,KAKG;EACC,eAAA;;AAMN;EACE,aAAA;EACA,mBAAA;;AAEF;AACA;EACE,wBAAA;;AAEF;EACE,iBAAA;;AAEF;EACE,cAAA;;AAwBF,QAhB2C;EACzC,cACE;IACE,WAAA;IACA,YAAA;IACA,WAAA;IACA,iBAAA;IF5IJ,gBAAA;IACA,uBAAA;IACA,mBAAA;;EEqIA,cAQE;IACE,kBAAA;;;AAUN,IAAI;AAEJ,IAAI;EACF,YAAA;EACA,iCAAA;;AAEF;EACE,cAAA;EACA,yBAAA;;AAIF;EACE,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,8BAAA;;AAKE,UAHF,EAGG;AAAD,UAFF,GAEG;AAAD,UADF,GACG;EACC,gBAAA;;AAVN,UAgBE;AAhBF,UAiBE;AAjBF,UAkBE;EACE,cAAA;EACA,cAAA;EACA,wBAAA;EACA,cAAA;;AAEA,UARF,OAQG;AAAD,UAPF,MAOG;AAAD,UANF,OAMG;EACC,SAAS,aAAT;;AAQN;AACA,UAAU;EACR,mBAAA;EACA,eAAA;EACA,+BAAA;EACA,cAAA;EACA,iBAAA;;AAME,mBAHF,OAGG;AAAD,UAXM,WAQR,OAGG;AAAD,mBAFF,MAEG;AAAD,UAXM,WASR,MAEG;AAAD,mBADF,OACG;AAAD,UAXM,WAUR,OACG;EAAU,SAAS,EAAT;;AACX,mBAJF,OAIG;AAAD,UAZM,WAQR,OAIG;AAAD,mBAHF,MAGG;AAAD,UAZM,WASR,MAGG;AAAD,mBAFF,OAEG;AAAD,UAZM,WAUR,OAEG;EACC,SAAS,aAAT;;AAMN,UAAU;AACV,UAAU;EACR,SAAS,EAAT;;AAIF;EACE,mBAAA;EACA,kBAAA;EACA,wBAAA;;AChSF;AACA;AACA;AACA;EACE,sCFkCiD,wBElCjD;;AAIF;EACE,gBAAA;EACA,cAAA;EACA,cAAA;EACA,yBAAA;EACA,mBAAA;EACA,kBAAA;;AAIF;EACE,gBAAA;EACA,cAAA;EACA,cAAA;EACA,yBAAA;EACA,kBAAA;EACA,8CAAA;;AAIF;EACE,cAAA;EACA,cAAA;EACA,gBAAA;EACA,eAAA;EACA,wBAAA;EACA,qBAAA;EACA,qBAAA;EACA,cAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;;AAXF,GAcE;EACE,UAAA;EACA,kBAAA;EACA,cAAA;EACA,qBAAA;EACA,6BAAA;EACA,gBAAA;;AAKJ;EACE,iBAAA;EACA,kBAAA;;ACpDF;EJ0nBE,kBAAA;EACA,iBAAA;EACA,kBAAA;EACA,mBAAA;;AIvnBA,QAHmC;EAGnC;IAFE,YAAA;;;AAKF,QAHmC;EAGnC;IAFE,YAAA;;;AAKJ,QAHqC;EAGrC;IAFI,aAAA;;;AAUJ;EJsmBE,kBAAA;EACA,iBAAA;EACA,kBAAA;EACA,mBAAA;;AIhmBF;EJsmBE,kBAAA;EACA,mBAAA;;AAqIE;EACE,kBAAA;EAEA,eAAA;EAEA,kBAAA;EACA,mBAAA;;AAgBF;EACE,WAAA;;AAOJ,KAAK,EAAQ,CAAC;EACZ,WAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,UAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,0BAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,UAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,UAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,0BAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AASF,KAAK,EAAQ,MAAM;EACjB,WAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,0BAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,0BAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AANF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,QAAA;;AASF,KAAK,EAAQ,QAAQ;EACnB,iBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gCAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gCAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,eAAA;;AIpvBJ,QATmC;EJquB/B;IACE,WAAA;;EAOJ,KAAK,EAAQ,CAAC;IACZ,WAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EASF,KAAK,EAAQ,MAAM;IACjB,WAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EANF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,QAAA;;EASF,KAAK,EAAQ,QAAQ;IACnB,iBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,eAAA;;;AIvuBJ,QATmC;EJwtB/B;IACE,WAAA;;EAOJ,KAAK,EAAQ,CAAC;IACZ,WAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EASF,KAAK,EAAQ,MAAM;IACjB,WAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EANF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,QAAA;;EASF,KAAK,EAAQ,QAAQ;IACnB,iBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,eAAA;;;AI5tBJ,QAPmC;EJ2sB/B;IACE,WAAA;;EAOJ,KAAK,EAAQ,CAAC;IACZ,WAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EASF,KAAK,EAAQ,MAAM;IACjB,WAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EANF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,QAAA;;EASF,KAAK,EAAQ,QAAQ;IACnB,iBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,eAAA;;;AK3zBJ;EACE,eAAA;EACA,6BAAA;;AAEF;EACE,gBAAA;;AAMF;EACE,WAAA;EACA,mBAAA;;AAFF,MAIE,QAGE,KACE;AARN,MAKE,QAEE,KACE;AARN,MAME,QACE,KACE;AARN,MAIE,QAGE,KAEE;AATN,MAKE,QAEE,KAEE;AATN,MAME,QACE,KAEE;EACE,YAAA;EACA,wBAAA;EACA,mBAAA;EACA,6BAAA;;AAbR,MAkBE,QAAQ,KAAK;EACX,sBAAA;EACA,gCAAA;;AApBJ,MAuBE,UAAU,QAGR,KAAI,YACF;AA3BN,MAwBE,WAAW,QAET,KAAI,YACF;AA3BN,MAyBE,QAAO,YACL,KAAI,YACF;AA3BN,MAuBE,UAAU,QAGR,KAAI,YAEF;AA5BN,MAwBE,WAAW,QAET,KAAI,YAEF;AA5BN,MAyBE,QAAO,YACL,KAAI,YAEF;EACE,aAAA;;AA7BR,MAkCE,QAAQ;EACN,6BAAA;;AAnCJ,MAuCE;EACE,yBAAA;;AAOJ,gBACE,QAGE,KACE;AALN,gBAEE,QAEE,KACE;AALN,gBAGE,QACE,KACE;AALN,gBACE,QAGE,KAEE;AANN,gBAEE,QAEE,KAEE;AANN,gBAGE,QACE,KAEE;EACE,YAAA;;AAWR;EACE,yBAAA;;AADF,eAEE,QAGE,KACE;AANN,eAGE,QAEE,KACE;AANN,eAIE,QACE,KACE;AANN,eAEE,QAGE,KAEE;AAPN,eAGE,QAEE,KAEE;AAPN,eAIE,QACE,KAEE;EACE,yBAAA;;AARR,eAYE,QAAQ,KACN;AAbJ,eAYE,QAAQ,KAEN;EACE,wBAAA;;AAUN,cACE,QAAQ,KAAI,UAAU,KACpB;AAFJ,cACE,QAAQ,KAAI,UAAU,KAEpB;EACE,yBAAA;;AAUN,YACE,QAAQ,KAAI,MACV;AAFJ,YACE,QAAQ,KAAI,MAEV;EACE,yBAAA;;AAUN,KAAM,IAAG;EACP,gBAAA;EACA,WAAA;EACA,qBAAA;;AAKE,KAFF,GAEG;AAAD,KADF,GACG;EACC,gBAAA;EACA,WAAA;EACA,mBAAA;;AL4SJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,MAAS;AACX,MANK,QAAQ,KAMZ,CAAC,MAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,MAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,MAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,MAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,MAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,OAAS;AACX,MANK,QAAQ,KAMZ,CAAC,OAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,OAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,OAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,OAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,OAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,IAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,IAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,IAAS;AACX,MANK,QAAQ,KAMZ,CAAC,IAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,IAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,IAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,IAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,IAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,IAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,IAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,OAAS;AACX,MANK,QAAQ,KAMZ,CAAC,OAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,OAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,OAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,OAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,OAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,MAAS;AACX,MANK,QAAQ,KAMZ,CAAC,MAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,MAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,MAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,MAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,MAAQ,MAAO;EACf,yBAAA;;AKtON,QA/DmC;EACjC;IACE,WAAA;IACA,mBAAA;IACA,kBAAA;IACA,kBAAA;IACA,4CAAA;IACA,yBAAA;IACA,iCAAA;;EAPF,iBAUE;IACE,gBAAA;;EAXJ,iBAUE,SAIE,QAGE,KACE;EAlBR,iBAUE,SAKE,QAEE,KACE;EAlBR,iBAUE,SAME,QACE,KACE;EAlBR,iBAUE,SAIE,QAGE,KAEE;EAnBR,iBAUE,SAKE,QAEE,KAEE;EAnBR,iBAUE,SAME,QACE,KAEE;IACE,mBAAA;;EApBV,iBA2BE;IACE,SAAA;;EA5BJ,iBA2BE,kBAIE,QAGE,KACE,KAAI;EAnCZ,iBA2BE,kBAKE,QAEE,KACE,KAAI;EAnCZ,iBA2BE,kBAME,QACE,KACE,KAAI;EAnCZ,iBA2BE,kBAIE,QAGE,KAEE,KAAI;EApCZ,iBA2BE,kBAKE,QAEE,KAEE,KAAI;EApCZ,iBA2BE,kBAME,QACE,KAEE,KAAI;IACF,cAAA;;EArCV,iBA2BE,kBAIE,QAGE,KAKE,KAAI;EAvCZ,iBA2BE,kBAKE,QAEE,KAKE,KAAI;EAvCZ,iBA2BE,kBAME,QACE,KAKE,KAAI;EAvCZ,iBA2BE,kBAIE,QAGE,KAME,KAAI;EAxCZ,iBA2BE,kBAKE,QAEE,KAME,KAAI;EAxCZ,iBA2BE,kBAME,QACE,KAME,KAAI;IACF,eAAA;;EAzCV,iBA2BE,kBAsBE,QAEE,KAAI,WACF;EApDR,iBA2BE,kBAuBE,QACE,KAAI,WACF;EApDR,iBA2BE,kBAsBE,QAEE,KAAI,WAEF;EArDR,iBA2BE,kBAuBE,QACE,KAAI,WAEF;IACE,gBAAA;;;ACxNZ;EACE,UAAA;EACA,SAAA;EACA,SAAA;EAIA,YAAA;;AAGF;EACE,cAAA;EACA,WAAA;EACA,UAAA;EACA,mBAAA;EACA,eAAA;EACA,oBAAA;EACA,cAAA;EACA,SAAA;EACA,gCAAA;;AAGF;EACE,qBAAA;EACA,kBAAA;EACA,iBAAA;;AAWF,KAAK;ENuMH,8BAAA;EACG,2BAAA;EACK,sBAAA;;AMpMV,KAAK;AACL,KAAK;EACH,eAAA;EACA,kBAAA;;EACA,mBAAA;;AAIF,KAAK;EACH,cAAA;;AAIF,KAAK;EACH,cAAA;EACA,WAAA;;AAIF,MAAM;AACN,MAAM;EACJ,YAAA;;AAIF,KAAK,aAAa;AAClB,KAAK,cAAc;AACnB,KAAK,iBAAiB;EN7CpB,oBAAA;EAEA,0CAAA;EACA,oBAAA;;AM+CF;EACE,cAAA;EACA,gBAAA;EACA,eAAA;EACA,wBAAA;EACA,cAAA;;AA0BF;EACE,cAAA;EACA,WAAA;EACA,YAAA;EACA,iBAAA;EACA,eAAA;EACA,wBAAA;EACA,cAAA;EACA,yBAAA;EACA,sBAAA;EACA,yBAAA;EACA,kBAAA;ENFA,wDAAA;EACQ,gDAAA;EAKR,8EAAA;EACQ,sEAAA;;AA+vBR,aAAC;EACC,qBAAA;EACA,UAAA;EAxwBF,sFAAA;EACQ,8EAAA;;AAnER,aAAC;EAA+B,cAAA;;AAChC,aAAC;EAA+B,cAAA;EACA,UAAA;;AAChC,aAAC;EAA+B,cAAA;;AAChC,aAAC;EAA+B,cAAA;;AM8EhC,aAAC;AACD,aAAC;AACD,QAAQ,UAAW;EACjB,mBAAA;EACA,yBAAA;EACA,UAAA;;AAIF,QAAQ;EACN,YAAA;;AAQJ,KAAK;EACH,iBAAA;;AASF;EACE,mBAAA;;AAQF;AACA;EACE,cAAA;EACA,gBAAA;EACA,gBAAA;EACA,mBAAA;EACA,kBAAA;;AANF,MAOE;AANF,SAME;EACE,eAAA;EACA,mBAAA;EACA,eAAA;;AAGJ,MAAO,MAAK;AACZ,aAAc,MAAK;AACnB,SAAU,MAAK;AACf,gBAAiB,MAAK;EACpB,WAAA;EACA,kBAAA;;AAEF,MAAO;AACP,SAAU;EACR,gBAAA;;AAIF;AACA;EACE,qBAAA;EACA,kBAAA;EACA,gBAAA;EACA,sBAAA;EACA,mBAAA;EACA,eAAA;;AAEF,aAAc;AACd,gBAAiB;EACf,aAAA;EACA,iBAAA;;AAYA,KANG,cAMF;AAAD,KALG,iBAKF;AAAD,MAAC;AAAD,aAAC;AAAD,SAAC;AAAD,gBAAC;AACD,QAAQ,UAAW,MAPhB;AAOH,QAAQ,UAAW,MANhB;AAMH,QAAQ,UAAW;AAAnB,QAAQ,UAAW;AAAnB,QAAQ,UAAW;AAAnB,QAAQ,UAAW;EACjB,mBAAA;;AAUJ;ENiqBE,YAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AAEA,MAAM;EACJ,YAAA;EACA,iBAAA;;AAGF,QAAQ;AACR,MAAM,UAAU;EACd,YAAA;;AM1qBJ;EN6pBE,YAAA;EACA,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AAEA,MAAM;EACJ,YAAA;EACA,iBAAA;;AAGF,QAAQ;AACR,MAAM,UAAU;EACd,YAAA;;AMjqBJ;EAEE,kBAAA;;AAFF,aAKE;EACE,qBAAA;;AANJ,aAUE;EACE,kBAAA;EACA,SAAA;EACA,QAAA;EACA,cAAA;EACA,WAAA;EACA,YAAA;EACA,iBAAA;EACA,kBAAA;;AAKJ,YNkkBE;AMlkBF,YNmkBE;AMnkBF,YNokBE;AMpkBF,YNqkBE;AMrkBF,YNskBE;AMtkBF,YNukBE;EACE,cAAA;;AMxkBJ,YN2kBE;EACE,qBAAA;EAnuBF,wDAAA;EACQ,gDAAA;;AAouBN,YAHF,cAGG;EACC,qBAAA;EAtuBJ,yEAAA;EACQ,iEAAA;;AMsJV,YNqlBE;EACE,cAAA;EACA,qBAAA;EACA,yBAAA;;AMxlBJ,YN2lBE;EACE,cAAA;;AMzlBJ,YN+jBE;AM/jBF,YNgkBE;AMhkBF,YNikBE;AMjkBF,YNkkBE;AMlkBF,YNmkBE;AMnkBF,YNokBE;EACE,cAAA;;AMrkBJ,YNwkBE;EACE,qBAAA;EAnuBF,wDAAA;EACQ,gDAAA;;AAouBN,YAHF,cAGG;EACC,qBAAA;EAtuBJ,yEAAA;EACQ,iEAAA;;AMyJV,YNklBE;EACE,cAAA;EACA,qBAAA;EACA,yBAAA;;AMrlBJ,YNwlBE;EACE,cAAA;;AMtlBJ,UN4jBE;AM5jBF,UN6jBE;AM7jBF,UN8jBE;AM9jBF,UN+jBE;AM/jBF,UNgkBE;AMhkBF,UNikBE;EACE,cAAA;;AMlkBJ,UNqkBE;EACE,qBAAA;EAnuBF,wDAAA;EACQ,gDAAA;;AAouBN,UAHF,cAGG;EACC,qBAAA;EAtuBJ,yEAAA;EACQ,iEAAA;;AM4JV,UN+kBE;EACE,cAAA;EACA,qBAAA;EACA,yBAAA;;AMllBJ,UNqlBE;EACE,cAAA;;AM5kBJ;EACE,gBAAA;;AASF;EACE,cAAA;EACA,eAAA;EACA,mBAAA;EACA,cAAA;;AAgEF,QA7CqC;EA6CrC,YA3CI;IACE,qBAAA;IACA,gBAAA;IACA,sBAAA;;EAwCN,YApCI;IACE,qBAAA;IACA,WAAA;IACA,sBAAA;;EAiCN,YA9BI;IACE,gBAAA;IACA,sBAAA;;EA4BN,YAtBI;EAsBJ,YArBI;IACE,qBAAA;IACA,aAAA;IACA,gBAAA;IACA,eAAA;IACA,sBAAA;;EAgBN,YAdI,OAAO,MAAK;EAchB,YAbI,UAAU,MAAK;IACb,WAAA;IACA,cAAA;;EAWN,YAJI,cAAc;IACZ,MAAA;;;AAWN,gBAGE;AAHF,gBAIE;AAJF,gBAKE;AALF,gBAME;AANF,gBAOE;EACE,aAAA;EACA,gBAAA;EACA,gBAAA;;AAVJ,gBAcE;AAdF,gBAeE;EACE,gBAAA;;AAhBJ,gBAoBE;ENiQA,kBAAA;EACA,mBAAA;;AMtRF,gBAwBE;EACE,gBAAA;;AAUF,QANmC;EAMnC,gBALE;IACE,iBAAA;;;AA/BN,gBAuCE,cAAc;EACZ,MAAA;EACA,WAAA;;ACxZJ;EACE,qBAAA;EACA,gBAAA;EACA,mBAAA;EACA,kBAAA;EACA,sBAAA;EACA,eAAA;EACA,sBAAA;EACA,6BAAA;EACA,mBAAA;EP4gBA,iBAAA;EACA,eAAA;EACA,wBAAA;EACA,kBAAA;EApSA,yBAAA;EACG,sBAAA;EACC,qBAAA;EACC,oBAAA;EACG,iBAAA;;AO3OR,IAAC;EPWD,oBAAA;EAEA,0CAAA;EACA,oBAAA;;AOVA,IAAC;AACD,IAAC;EACC,cAAA;EACA,qBAAA;;AAGF,IAAC;AACD,IAAC;EACC,UAAA;EACA,sBAAA;EPwFF,wDAAA;EACQ,gDAAA;;AOrFR,IAAC;AACD,IAAC;AACD,QAAQ,UAAW;EACjB,mBAAA;EACA,oBAAA;EPqPF,aAAA;EAGA,yBAAA;EAxKA,wBAAA;EACQ,gBAAA;;AOvEV;EPicE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AO5dV,YPgeE;EACE,cAAA;EACA,yBAAA;;AO/dJ;EP8bE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AOzdV,YP6dE;EACE,cAAA;EACA,yBAAA;;AO3dJ;EP0bE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AOrdV,YPydE;EACE,cAAA;EACA,yBAAA;;AOvdJ;EPsbE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,SAAC;AACD,SAAC;AACD,SAAC;AACD,SAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,SAAC;AACD,SAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,SAHD;AAGC,SAFD;AAEC,QADM,UAAW;AAEjB,SAJD,SAIE;AAAD,SAHD,UAGE;AAAD,QAFM,UAAW,UAEhB;AACD,SALD,SAKE;AAAD,SAJD,UAIE;AAAD,QAHM,UAAW,UAGhB;AACD,SAND,SAME;AAAD,SALD,UAKE;AAAD,QAJM,UAAW,UAIhB;AACD,SAPD,SAOE;AAAD,SAND,UAME;AAAD,QALM,UAAW,UAKhB;EACC,yBAAA;EACI,qBAAA;;AOjdV,SPqdE;EACE,cAAA;EACA,yBAAA;;AOndJ;EPkbE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AO7cV,YPidE;EACE,cAAA;EACA,yBAAA;;AO/cJ;EP8aE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,WAAC;AACD,WAAC;AACD,WAAC;AACD,WAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,WAAC;AACD,WAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,WAHD;AAGC,WAFD;AAEC,QADM,UAAW;AAEjB,WAJD,SAIE;AAAD,WAHD,UAGE;AAAD,QAFM,UAAW,YAEhB;AACD,WALD,SAKE;AAAD,WAJD,UAIE;AAAD,QAHM,UAAW,YAGhB;AACD,WAND,SAME;AAAD,WALD,UAKE;AAAD,QAJM,UAAW,YAIhB;AACD,WAPD,SAOE;AAAD,WAND,UAME;AAAD,QALM,UAAW,YAKhB;EACC,yBAAA;EACI,qBAAA;;AOzcV,WP6cE;EACE,cAAA;EACA,yBAAA;;AOtcJ;EACE,cAAA;EACA,mBAAA;EACA,eAAA;EACA,gBAAA;;AAEA;AACA,SAAC;AACD,SAAC;AACD,QAAQ,UAAW;EACjB,6BAAA;EPgCF,wBAAA;EACQ,gBAAA;;AO9BR;AACA,SAAC;AACD,SAAC;AACD,SAAC;EACC,yBAAA;;AAEF,SAAC;AACD,SAAC;EACC,cAAA;EACA,0BAAA;EACA,6BAAA;;AAIA,SAFD,UAEE;AAAD,QADM,UAAW,UAChB;AACD,SAHD,UAGE;AAAD,QAFM,UAAW,UAEhB;EACC,cAAA;EACA,qBAAA;;AASN;EPsaE,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AOraF;EPkaE,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AOjaF;EP8ZE,gBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AOzZF;EACE,cAAA;EACA,WAAA;EACA,eAAA;EACA,gBAAA;;AAIF,UAAW;EACT,eAAA;;AAOA,KAHG,eAGF;AAAD,KAFG,cAEF;AAAD,KADG,eACF;EACC,WAAA;;AC/IJ;EACE,UAAA;ERsHA,wCAAA;EACQ,gCAAA;;AQrHR,KAAC;EACC,UAAA;;AAIJ;EACE,aAAA;;AACA,SAAC;EACC,cAAA;;AAGJ;EACE,kBAAA;EACA,SAAA;EACA,gBAAA;ERsGA,qCAAA;EACQ,6BAAA;;ASvHV;EACE,aAAa,sBAAb;EACA,qDAAA;EACA,2TAAA;;AAOF;EACE,kBAAA;EACA,QAAA;EACA,qBAAA;EACA,aAAa,sBAAb;EACA,kBAAA;EACA,mBAAA;EACA,cAAA;EACA,mCAAA;EACA,kCAAA;;AAIkC,mBAAC;EAAU,SAAS,KAAT;;AACX,eAAC;EAAU,SAAS,KAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,aAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,aAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,2BAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,0BAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,6BAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,0BAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,2BAAC;EAAU,SAAS,OAAT;;AACX,+BAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,6BAAC;EAAU,SAAS,OAAT;;AACX,iCAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AClO/C;EACE,qBAAA;EACA,QAAA;EACA,SAAA;EACA,gBAAA;EACA,sBAAA;EACA,qBAAA;EACA,mCAAA;EACA,kCAAA;;AAIF;EACE,kBAAA;;AAIF,gBAAgB;EACd,UAAA;;AAIF;EACE,kBAAA;EACA,SAAA;EACA,OAAA;EACA,aAAA;EACA,aAAA;EACA,WAAA;EACA,gBAAA;EACA,cAAA;EACA,eAAA;EACA,gBAAA;EACA,eAAA;EACA,yBAAA;EACA,yBAAA;EACA,qCAAA;EACA,kBAAA;EV+EA,mDAAA;EACQ,2CAAA;EU9ER,4BAAA;;AAKA,cAAC;EACC,QAAA;EACA,UAAA;;AAxBJ,cA4BE;EVsVA,WAAA;EACA,aAAA;EACA,gBAAA;EACA,yBAAA;;AUrXF,cAiCE,KAAK;EACH,cAAA;EACA,iBAAA;EACA,WAAA;EACA,mBAAA;EACA,wBAAA;EACA,cAAA;EACA,mBAAA;;AAMF,cADa,KAAK,IACjB;AACD,cAFa,KAAK,IAEjB;EACC,qBAAA;EACA,cAAA;EACA,yBAAA;;AAMF,cADa,UAAU;AAEvB,cAFa,UAAU,IAEtB;AACD,cAHa,UAAU,IAGtB;EACC,cAAA;EACA,qBAAA;EACA,UAAA;EACA,yBAAA;;AASF,cADa,YAAY;AAEzB,cAFa,YAAY,IAExB;AACD,cAHa,YAAY,IAGxB;EACC,cAAA;;AAKF,cADa,YAAY,IACxB;AACD,cAFa,YAAY,IAExB;EACC,qBAAA;EACA,6BAAA;EACA,sBAAA;EVoPF,mEAAA;EUlPE,mBAAA;;AAKJ,KAEE;EACE,cAAA;;AAHJ,KAOE;EACE,UAAA;;AAQJ;EACE,UAAA;EACA,QAAA;;AAQF;EACE,OAAA;EACA,WAAA;;AAIF;EACE,cAAA;EACA,iBAAA;EACA,eAAA;EACA,wBAAA;EACA,cAAA;;AAIF;EACE,eAAA;EACA,OAAA;EACA,QAAA;EACA,SAAA;EACA,MAAA;EACA,YAAA;;AAIF,WAAY;EACV,QAAA;EACA,UAAA;;AAQF,OAGE;AAFF,oBAAqB,UAEnB;EACE,aAAA;EACA,wBAAA;EACA,SAAS,EAAT;;AANJ,OASE;AARF,oBAAqB,UAQnB;EACE,SAAA;EACA,YAAA;EACA,kBAAA;;AAsBJ,QAb2C;EACzC,aACE;IAnEF,UAAA;IACA,QAAA;;EAiEA,aAME;IA9DF,OAAA;IACA,WAAA;;;AC7IF;AACA;EACE,kBAAA;EACA,qBAAA;EACA,sBAAA;;AAJF,UAKE;AAJF,mBAIE;EACE,kBAAA;EACA,WAAA;;AAEA,UAJF,OAIG;AAAD,mBAJF,OAIG;AACD,UALF,OAKG;AAAD,mBALF,OAKG;AACD,UANF,OAMG;AAAD,mBANF,OAMG;AACD,UAPF,OAOG;AAAD,mBAPF,OAOG;EACC,UAAA;;AAEF,UAVF,OAUG;AAAD,mBAVF,OAUG;EAEC,aAAA;;AAMN,UACE,KAAK;AADP,UAEE,KAAK;AAFP,UAGE,WAAW;AAHb,UAIE,WAAW;EACT,iBAAA;;AAKJ;EACE,iBAAA;;AADF,YAIE;AAJF,YAKE;EACE,WAAA;;AANJ,YAQE;AARF,YASE;AATF,YAUE;EACE,gBAAA;;AAIJ,UAAW,OAAM,IAAI,cAAc,IAAI,aAAa,IAAI;EACtD,gBAAA;;AAIF,UAAW,OAAM;EACf,cAAA;;AACA,UAFS,OAAM,YAEd,IAAI,aAAa,IAAI;EX4CtB,6BAAA;EACG,0BAAA;;AWxCL,UAAW,OAAM,WAAW,IAAI;AAChC,UAAW,mBAAkB,IAAI;EX8C/B,4BAAA;EACG,yBAAA;;AW1CL,UAAW;EACT,WAAA;;AAEF,UAAW,aAAY,IAAI,cAAc,IAAI,aAAc;EACzD,gBAAA;;AAEF,UAAW,aAAY,YACrB,OAAM;AADR,UAAW,aAAY,YAErB;EXyBA,6BAAA;EACG,0BAAA;;AWtBL,UAAW,aAAY,WAAY,OAAM;EX6BvC,4BAAA;EACG,yBAAA;;AWzBL,UAAW,iBAAgB;AAC3B,UAAU,KAAM;EACd,UAAA;;AAQF,aAAc;EX2bZ,gBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AW7bF,aAAc;EX0bZ,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AW5bF,aAAc;EXybZ,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AWrbF,UAAW,OAAO;EAChB,iBAAA;EACA,kBAAA;;AAEF,UAAW,UAAU;EACnB,kBAAA;EACA,mBAAA;;AAKF,UAAU,KAAM;EXId,wDAAA;EACQ,gDAAA;;AWDR,UAJQ,KAAM,iBAIb;EXAD,wBAAA;EACQ,gBAAA;;AWMV,IAAK;EACH,cAAA;;AAGF,OAAQ;EACN,uBAAA;EACA,sBAAA;;AAGF,OAAQ,QAAQ;EACd,uBAAA;;AAOF,mBACE;AADF,mBAEE;AAFF,mBAGE,aAAa;EACX,cAAA;EACA,WAAA;EACA,WAAA;EACA,eAAA;;AAPJ,mBAWE,aAEE;EACE,WAAA;;AAdN,mBAkBE,OAAO;AAlBT,mBAmBE,OAAO;AAnBT,mBAoBE,aAAa;AApBf,mBAqBE,aAAa;EACX,gBAAA;EACA,cAAA;;AAKF,mBADkB,OACjB,IAAI,cAAc,IAAI;EACrB,gBAAA;;AAEF,mBAJkB,OAIjB,YAAY,IAAI;EACf,4BAAA;EXtEF,6BAAA;EACC,4BAAA;;AWwED,mBARkB,OAQjB,WAAW,IAAI;EACd,8BAAA;EXlFF,0BAAA;EACC,yBAAA;;AWqFH,mBAAoB,aAAY,IAAI,cAAc,IAAI,aAAc;EAClE,gBAAA;;AAEF,mBAAoB,aAAY,YAAY,IAAI,aAC9C,OAAM;AADR,mBAAoB,aAAY,YAAY,IAAI,aAE9C;EXnFA,6BAAA;EACC,4BAAA;;AWsFH,mBAAoB,aAAY,WAAW,IAAI,cAAe,OAAM;EX/FlE,0BAAA;EACC,yBAAA;;AWuGH;EACE,cAAA;EACA,WAAA;EACA,mBAAA;EACA,yBAAA;;AAJF,oBAKE;AALF,oBAME;EACE,WAAA;EACA,mBAAA;EACA,SAAA;;AATJ,oBAWE,aAAa;EACX,WAAA;;AAMJ,uBAAwB,OAAO,QAAO;AACtC,uBAAwB,OAAO,QAAO;EACpC,aAAA;;AC1NF;EACE,kBAAA;EACA,cAAA;EACA,yBAAA;;AAGA,YAAC;EACC,WAAA;EACA,eAAA;EACA,gBAAA;;AATJ,YAYE;EAIE,WAAA;EAEA,WAAA;EACA,gBAAA;;AASJ,eAAgB;AAChB,eAAgB;AAChB,eAAgB,mBAAmB;EZ02BjC,YAAA;EACA,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AAEA,MAAM,eYl3BQ;AZk3Bd,MAAM,eYj3BQ;AZi3Bd,MAAM,eYh3BQ,mBAAmB;EZi3B/B,YAAA;EACA,iBAAA;;AAGF,QAAQ,eYv3BM;AZu3Bd,QAAQ,eYt3BM;AZs3Bd,QAAQ,eYr3BM,mBAAmB;AZs3BjC,MAAM,UAAU,eYx3BF;AZw3Bd,MAAM,UAAU,eYv3BF;AZu3Bd,MAAM,UAAU,eYt3BF,mBAAmB;EZu3B/B,YAAA;;AYt3BJ,eAAgB;AAChB,eAAgB;AAChB,eAAgB,mBAAmB;EZu2BjC,YAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AAEA,MAAM,eY/2BQ;AZ+2Bd,MAAM,eY92BQ;AZ82Bd,MAAM,eY72BQ,mBAAmB;EZ82B/B,YAAA;EACA,iBAAA;;AAGF,QAAQ,eYp3BM;AZo3Bd,QAAQ,eYn3BM;AZm3Bd,QAAQ,eYl3BM,mBAAmB;AZm3BjC,MAAM,UAAU,eYr3BF;AZq3Bd,MAAM,UAAU,eYp3BF;AZo3Bd,MAAM,UAAU,eYn3BF,mBAAmB;EZo3B/B,YAAA;;AY/2BJ;AACA;AACA,YAAa;EACX,mBAAA;;AAEA,kBAAC,IAAI,cAAc,IAAI;AAAvB,gBAAC,IAAI,cAAc,IAAI;AAAvB,YAHW,cAGV,IAAI,cAAc,IAAI;EACrB,gBAAA;;AAIJ;AACA;EACE,SAAA;EACA,mBAAA;EACA,sBAAA;;AAKF;EACE,iBAAA;EACA,eAAA;EACA,mBAAA;EACA,cAAA;EACA,cAAA;EACA,kBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;;AAGA,kBAAC;EACC,iBAAA;EACA,eAAA;EACA,kBAAA;;AAEF,kBAAC;EACC,kBAAA;EACA,eAAA;EACA,kBAAA;;AApBJ,kBAwBE,MAAK;AAxBP,kBAyBE,MAAK;EACH,aAAA;;AAKJ,YAAa,cAAa;AAC1B,kBAAkB;AAClB,gBAAgB,YAAa;AAC7B,gBAAgB,YAAa,aAAa;AAC1C,gBAAgB,YAAa;AAC7B,gBAAgB,WAAY,OAAM,IAAI,aAAa,IAAI;AACvD,gBAAgB,WAAY,aAAY,IAAI,aAAc;EZIxD,6BAAA;EACG,0BAAA;;AYFL,kBAAkB;EAChB,eAAA;;AAEF,YAAa,cAAa;AAC1B,kBAAkB;AAClB,gBAAgB,WAAY;AAC5B,gBAAgB,WAAY,aAAa;AACzC,gBAAgB,WAAY;AAC5B,gBAAgB,YAAa,OAAM,IAAI;AACvC,gBAAgB,YAAa,aAAY,IAAI,cAAe;EZA1D,4BAAA;EACG,yBAAA;;AYEL,kBAAkB;EAChB,cAAA;;AAKF;EACE,kBAAA;EAGA,YAAA;EACA,mBAAA;;AALF,gBASE;EACE,kBAAA;;AAVJ,gBASE,OAEE;EACE,iBAAA;;AAGF,gBANF,OAMG;AACD,gBAPF,OAOG;AACD,gBARF,OAQG;EACC,UAAA;;AAKJ,gBAAC,YACC;AADF,gBAAC,YAEC;EACE,kBAAA;;AAGJ,gBAAC,WACC;AADF,gBAAC,WAEC;EACE,iBAAA;;ACjJN;EACE,gBAAA;EACA,eAAA;EACA,gBAAA;;AAHF,IAME;EACE,kBAAA;EACA,cAAA;;AARJ,IAME,KAIE;EACE,kBAAA;EACA,cAAA;EACA,kBAAA;;AACA,IARJ,KAIE,IAIG;AACD,IATJ,KAIE,IAKG;EACC,qBAAA;EACA,yBAAA;;AAKJ,IAhBF,KAgBG,SAAU;EACT,cAAA;;AAEA,IAnBJ,KAgBG,SAAU,IAGR;AACD,IApBJ,KAgBG,SAAU,IAIR;EACC,cAAA;EACA,qBAAA;EACA,6BAAA;EACA,mBAAA;;AAOJ,IADF,MAAM;AAEJ,IAFF,MAAM,IAEH;AACD,IAHF,MAAM,IAGH;EACC,yBAAA;EACA,qBAAA;;AAzCN,IAkDE;EboVA,WAAA;EACA,aAAA;EACA,gBAAA;EACA,yBAAA;;AazYF,IAyDE,KAAK,IAAI;EACP,eAAA;;AASJ;EACE,gCAAA;;AADF,SAEE;EACE,WAAA;EAEA,mBAAA;;AALJ,SAEE,KAME;EACE,iBAAA;EACA,wBAAA;EACA,6BAAA;EACA,0BAAA;;AACA,SAXJ,KAME,IAKG;EACC,qCAAA;;AAMF,SAlBJ,KAiBG,OAAQ;AAEP,SAnBJ,KAiBG,OAAQ,IAEN;AACD,SApBJ,KAiBG,OAAQ,IAGN;EACC,cAAA;EACA,yBAAA;EACA,yBAAA;EACA,gCAAA;EACA,eAAA;;AAKN,SAAC;EAqDD,WAAA;EA8BA,gBAAA;;AAnFA,SAAC,cAuDD;EACE,WAAA;;AAxDF,SAAC,cAuDD,KAEG;EACC,kBAAA;EACA,kBAAA;;AA3DJ,SAAC,cA+DD,YAAY;EACV,SAAA;EACA,UAAA;;AAYJ,QATqC;EASrC,SA7EG,cAqEC;IACE,mBAAA;IACA,SAAA;;EAMN,SA7EG,cAqEC,KAGE;IACE,gBAAA;;;AAzEN,SAAC,cAqFD,KAAK;EAEH,eAAA;EACA,kBAAA;;AAxFF,SAAC,cA2FD,UAAU;AA3FV,SAAC,cA4FD,UAAU,IAAG;AA5Fb,SAAC,cA6FD,UAAU,IAAG;EACX,yBAAA;;AAcJ,QAXqC;EAWrC,SA5GG,cAkGC,KAAK;IACH,gCAAA;IACA,0BAAA;;EAQN,SA5GG,cAsGC,UAAU;EAMd,SA5GG,cAuGC,UAAU,IAAG;EAKjB,SA5GG,cAwGC,UAAU,IAAG;IACX,4BAAA;;;AAhGN,UACE;EACE,WAAA;;AAFJ,UACE,KAIE;EACE,kBAAA;;AANN,UACE,KAOE;EACE,gBAAA;;AAKA,UAbJ,KAYG,OAAQ;AAEP,UAdJ,KAYG,OAAQ,IAEN;AACD,UAfJ,KAYG,OAAQ,IAGN;EACC,cAAA;EACA,yBAAA;;AAQR,YACE;EACE,WAAA;;AAFJ,YACE,KAEE;EACE,eAAA;EACA,cAAA;;AAYN;EACE,WAAA;;AADF,cAGE;EACE,WAAA;;AAJJ,cAGE,KAEG;EACC,kBAAA;EACA,kBAAA;;AAPN,cAWE,YAAY;EACV,SAAA;EACA,UAAA;;AAYJ,QATqC;EASrC,cARI;IACE,mBAAA;IACA,SAAA;;EAMN,cARI,KAGE;IACE,gBAAA;;;AASR;EACE,gBAAA;;AADF,mBAGE,KAAK;EAEH,eAAA;EACA,kBAAA;;AANJ,mBASE,UAAU;AATZ,mBAUE,UAAU,IAAG;AAVf,mBAWE,UAAU,IAAG;EACX,yBAAA;;AAcJ,QAXqC;EAWrC,mBAVI,KAAK;IACH,gCAAA;IACA,0BAAA;;EAQN,mBANI,UAAU;EAMd,mBALI,UAAU,IAAG;EAKjB,mBAJI,UAAU,IAAG;IACX,4BAAA;;;AAUN,YACE;EACE,aAAA;;AAFJ,YAIE;EACE,cAAA;;AASJ,SAAU;EAER,gBAAA;Eb1IA,0BAAA;EACC,yBAAA;;Ac3FH;EACE,kBAAA;EACA,gBAAA;EACA,mBAAA;EACA,6BAAA;;AAQF,QAH6C;EAG7C;IAFI,kBAAA;;;AAgBJ,QAH6C;EAG7C;IAFI,WAAA;;;AAeJ;EACE,iBAAA;EACA,mBAAA;EACA,mBAAA;EACA,kBAAA;EACA,iCAAA;EACA,kDAAA;EAEA,iCAAA;;AAEA,gBAAC;EACC,gBAAA;;AA4BJ,QAzB6C;EAyB7C;IAxBI,WAAA;IACA,aAAA;IACA,gBAAA;;EAEA,gBAAC;IACC,yBAAA;IACA,uBAAA;IACA,iBAAA;IACA,4BAAA;;EAGF,gBAAC;IACC,mBAAA;;EAKF,iBAAkB;EAClB,kBAAmB;EACnB,oBAAqB;IACnB,eAAA;IACA,gBAAA;;;AAUN,UAEE;AADF,gBACE;AAFF,UAGE;AAFF,gBAEE;EACE,mBAAA;EACA,kBAAA;;AAMF,QAJ6C;EAI7C,UATA;EASA,gBATA;EASA,UARA;EAQA,gBARA;IAKI,eAAA;IACA,cAAA;;;AAaN;EACE,aAAA;EACA,qBAAA;;AAKF,QAH6C;EAG7C;IAFI,gBAAA;;;AAKJ;AACA;EACE,eAAA;EACA,QAAA;EACA,OAAA;EACA,aAAA;;AAMF,QAH6C;EAG7C;EAAA;IAFI,gBAAA;;;AAGJ;EACE,MAAA;EACA,qBAAA;;AAEF;EACE,SAAA;EACA,gBAAA;EACA,qBAAA;;AAMF;EACE,WAAA;EACA,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,YAAA;;AAEA,aAAC;AACD,aAAC;EACC,qBAAA;;AASJ,QAN6C;EACzC,OAAQ,aAAa;EACrB,OAAQ,mBAAmB;IACzB,kBAAA;;;AAWN;EACE,kBAAA;EACA,YAAA;EACA,kBAAA;EACA,iBAAA;EdwaA,eAAA;EACA,kBAAA;EcvaA,6BAAA;EACA,sBAAA;EACA,6BAAA;EACA,kBAAA;;AAIA,cAAC;EACC,aAAA;;AAdJ,cAkBE;EACE,cAAA;EACA,WAAA;EACA,WAAA;EACA,kBAAA;;AAtBJ,cAwBE,UAAU;EACR,eAAA;;AAMJ,QAH6C;EAG7C;IAFI,aAAA;;;AAUJ;EACE,mBAAA;;AADF,WAGE,KAAK;EACH,iBAAA;EACA,oBAAA;EACA,iBAAA;;AA2BF,QAxB+C;EAwB/C,WAtBE,MAAM;IACJ,gBAAA;IACA,WAAA;IACA,WAAA;IACA,aAAA;IACA,6BAAA;IACA,SAAA;IACA,gBAAA;;EAeJ,WAtBE,MAAM,eAQJ,KAAK;EAcT,WAtBE,MAAM,eASJ;IACE,0BAAA;;EAYN,WAtBE,MAAM,eAYJ,KAAK;IACH,iBAAA;;EACA,WAdJ,MAAM,eAYJ,KAAK,IAEF;EACD,WAfJ,MAAM,eAYJ,KAAK,IAGF;IACC,sBAAA;;;AAuBV,QAhB6C;EAgB7C;IAfI,WAAA;IACA,SAAA;;EAcJ,WAZI;IACE,WAAA;;EAWN,WAZI,KAEE;IACE,iBAAA;IACA,oBAAA;;EAIJ,WAAC,aAAa;IACZ,mBAAA;;;AAkBN,QAN2C;EACzC;ICnQA,sBAAA;;EDoQA;ICvQA,uBAAA;;;ADgRF;EACE,kBAAA;EACA,mBAAA;EACA,kBAAA;EACA,iCAAA;EACA,oCAAA;Ed1KA,4FAAA;EACQ,oFAAA;EAmeR,eAAA;EACA,kBAAA;;AMhPF,QA7CqC;EA6CrC,YA3CI;IACE,qBAAA;IACA,gBAAA;IACA,sBAAA;;EAwCN,YApCI;IACE,qBAAA;IACA,WAAA;IACA,sBAAA;;EAiCN,YA9BI;IACE,gBAAA;IACA,sBAAA;;EA4BN,YAtBI;EAsBJ,YArBI;IACE,qBAAA;IACA,aAAA;IACA,gBAAA;IACA,eAAA;IACA,sBAAA;;EAgBN,YAdI,OAAO,MAAK;EAchB,YAbI,UAAU,MAAK;IACb,WAAA;IACA,cAAA;;EAWN,YAJI,cAAc;IACZ,MAAA;;;AQ7DJ,QAHiD;EAGjD,YAJA;IAEI,kBAAA;;;AAsBN,QAd6C;EAc7C;IAbI,WAAA;IACA,SAAA;IACA,cAAA;IACA,eAAA;IACA,cAAA;IACA,iBAAA;IdjMF,wBAAA;IACQ,gBAAA;;EcoMN,YAAC,aAAa;IACZ,mBAAA;;;AASN,WAAY,KAAK;EACf,aAAA;EdtOA,0BAAA;EACC,yBAAA;;AcyOH,oBAAqB,YAAY,KAAK;EdlOpC,6BAAA;EACC,4BAAA;;Ac0OH;EduQE,eAAA;EACA,kBAAA;;AcrQA,WAAC;EdoQD,gBAAA;EACA,mBAAA;;AclQA,WAAC;EdiQD,gBAAA;EACA,mBAAA;;AcxPF;EduPE,gBAAA;EACA,mBAAA;;Ac3OF,QAV6C;EAU7C;IATI,WAAA;IACA,iBAAA;IACA,kBAAA;;EAGA,YAAC,aAAa;IACZ,eAAA;;;AASN;EACE,yBAAA;EACA,qBAAA;;AAFF,eAIE;EACE,cAAA;;AACA,eAFF,cAEG;AACD,eAHF,cAGG;EACC,cAAA;EACA,6BAAA;;AATN,eAaE;EACE,cAAA;;AAdJ,eAiBE,YACE,KAAK;EACH,cAAA;;AAEA,eAJJ,YACE,KAAK,IAGF;AACD,eALJ,YACE,KAAK,IAIF;EACC,cAAA;EACA,6BAAA;;AAIF,eAXJ,YAUE,UAAU;AAER,eAZJ,YAUE,UAAU,IAEP;AACD,eAbJ,YAUE,UAAU,IAGP;EACC,cAAA;EACA,yBAAA;;AAIF,eAnBJ,YAkBE,YAAY;AAEV,eApBJ,YAkBE,YAAY,IAET;AACD,eArBJ,YAkBE,YAAY,IAGT;EACC,cAAA;EACA,6BAAA;;AAxCR,eA6CE;EACE,qBAAA;;AACA,eAFF,eAEG;AACD,eAHF,eAGG;EACC,yBAAA;;AAjDN,eA6CE,eAME;EACE,yBAAA;;AApDN,eAwDE;AAxDF,eAyDE;EACE,qBAAA;;AAOE,eAHJ,YAEE,QAAQ;AAEN,eAJJ,YAEE,QAAQ,IAEL;AACD,eALJ,YAEE,QAAQ,IAGL;EACC,yBAAA;EACA,cAAA;;AAiCN,QA7BiD;EA6BjD,eAxCA,YAaI,MAAM,eACJ,KAAK;IACH,cAAA;;EACA,eAhBR,YAaI,MAAM,eACJ,KAAK,IAEF;EACD,eAjBR,YAaI,MAAM,eACJ,KAAK,IAGF;IACC,cAAA;IACA,6BAAA;;EAIF,eAvBR,YAaI,MAAM,eASJ,UAAU;EAER,eAxBR,YAaI,MAAM,eASJ,UAAU,IAEP;EACD,eAzBR,YAaI,MAAM,eASJ,UAAU,IAGP;IACC,cAAA;IACA,yBAAA;;EAIF,eA/BR,YAaI,MAAM,eAiBJ,YAAY;EAEV,eAhCR,YAaI,MAAM,eAiBJ,YAAY,IAET;EACD,eAjCR,YAaI,MAAM,eAiBJ,YAAY,IAGT;IACC,cAAA;IACA,6BAAA;;;AAjGZ,eA6GE;EACE,cAAA;;AACA,eAFF,aAEG;EACC,cAAA;;AAQN;EACE,yBAAA;EACA,qBAAA;;AAFF,eAIE;EACE,cAAA;;AACA,eAFF,cAEG;AACD,eAHF,cAGG;EACC,cAAA;EACA,6BAAA;;AATN,eAaE;EACE,cAAA;;AAdJ,eAiBE,YACE,KAAK;EACH,cAAA;;AAEA,eAJJ,YACE,KAAK,IAGF;AACD,eALJ,YACE,KAAK,IAIF;EACC,cAAA;EACA,6BAAA;;AAIF,eAXJ,YAUE,UAAU;AAER,eAZJ,YAUE,UAAU,IAEP;AACD,eAbJ,YAUE,UAAU,IAGP;EACC,cAAA;EACA,yBAAA;;AAIF,eAnBJ,YAkBE,YAAY;AAEV,eApBJ,YAkBE,YAAY,IAET;AACD,eArBJ,YAkBE,YAAY,IAGT;EACC,cAAA;EACA,6BAAA;;AAxCR,eA8CE;EACE,qBAAA;;AACA,eAFF,eAEG;AACD,eAHF,eAGG;EACC,yBAAA;;AAlDN,eA8CE,eAME;EACE,yBAAA;;AArDN,eAyDE;AAzDF,eA0DE;EACE,qBAAA;;AAME,eAFJ,YACE,QAAQ;AAEN,eAHJ,YACE,QAAQ,IAEL;AACD,eAJJ,YACE,QAAQ,IAGL;EACC,yBAAA;EACA,cAAA;;AAuCN,QAnCiD;EAmCjD,eA7CA,YAYI,MAAM,eACJ;IACE,qBAAA;;EA+BR,eA7CA,YAYI,MAAM,eAIJ;IACE,yBAAA;;EA4BR,eA7CA,YAYI,MAAM,eAOJ,KAAK;IACH,cAAA;;EACA,eArBR,YAYI,MAAM,eAOJ,KAAK,IAEF;EACD,eAtBR,YAYI,MAAM,eAOJ,KAAK,IAGF;IACC,cAAA;IACA,6BAAA;;EAIF,eA5BR,YAYI,MAAM,eAeJ,UAAU;EAER,eA7BR,YAYI,MAAM,eAeJ,UAAU,IAEP;EACD,eA9BR,YAYI,MAAM,eAeJ,UAAU,IAGP;IACC,cAAA;IACA,yBAAA;;EAIF,eApCR,YAYI,MAAM,eAuBJ,YAAY;EAEV,eArCR,YAYI,MAAM,eAuBJ,YAAY,IAET;EACD,eAtCR,YAYI,MAAM,eAuBJ,YAAY,IAGT;IACC,cAAA;IACA,6BAAA;;;AAvGZ,eA8GE;EACE,cAAA;;AACA,eAFF,aAEG;EACC,cAAA;;AE9lBN;EACE,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,yBAAA;EACA,kBAAA;;AALF,WAOE;EACE,qBAAA;;AARJ,WAOE,KAGE,KAAI;EACF,SAAS,QAAT;EACA,cAAA;EACA,cAAA;;AAbN,WAiBE;EACE,cAAA;;ACpBJ;EACE,qBAAA;EACA,eAAA;EACA,cAAA;EACA,kBAAA;;AAJF,WAME;EACE,eAAA;;AAPJ,WAME,KAEE;AARJ,WAME,KAGE;EACE,kBAAA;EACA,WAAA;EACA,iBAAA;EACA,wBAAA;EACA,qBAAA;EACA,cAAA;EACA,yBAAA;EACA,yBAAA;EACA,iBAAA;;AAEF,WAdF,KAcG,YACC;AADF,WAdF,KAcG,YAEC;EACE,cAAA;EjBsFN,8BAAA;EACG,2BAAA;;AiBnFD,WArBF,KAqBG,WACC;AADF,WArBF,KAqBG,WAEC;EjBwEJ,+BAAA;EACG,4BAAA;;AiBjED,WAFF,KAAK,IAEF;AAAD,WADF,KAAK,OACF;AACD,WAHF,KAAK,IAGF;AAAD,WAFF,KAAK,OAEF;EACC,cAAA;EACA,yBAAA;EACA,qBAAA;;AAMF,WAFF,UAAU;AAER,WADF,UAAU;AAER,WAHF,UAAU,IAGP;AAAD,WAFF,UAAU,OAEP;AACD,WAJF,UAAU,IAIP;AAAD,WAHF,UAAU,OAGP;EACC,UAAA;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;EACA,eAAA;;AAtDN,WA0DE,YACE;AA3DJ,WA0DE,YAEE,OAAM;AA5DV,WA0DE,YAGE,OAAM;AA7DV,WA0DE,YAIE;AA9DJ,WA0DE,YAKE,IAAG;AA/DP,WA0DE,YAME,IAAG;EACD,cAAA;EACA,yBAAA;EACA,qBAAA;EACA,mBAAA;;AASN,cjBsdE,KACE;AiBvdJ,cjBsdE,KAEE;EACE,kBAAA;EACA,eAAA;;AAEF,cANF,KAMG,YACC;AADF,cANF,KAMG,YAEC;EA9bJ,8BAAA;EACG,2BAAA;;AAicD,cAZF,KAYG,WACC;AADF,cAZF,KAYG,WAEC;EA5cJ,+BAAA;EACG,4BAAA;;AiBpBL,cjBidE,KACE;AiBldJ,cjBidE,KAEE;EACE,iBAAA;EACA,eAAA;;AAEF,cANF,KAMG,YACC;AADF,cANF,KAMG,YAEC;EA9bJ,8BAAA;EACG,2BAAA;;AAicD,cAZF,KAYG,WACC;AADF,cAZF,KAYG,WAEC;EA5cJ,+BAAA;EACG,4BAAA;;AkBpGL;EACE,eAAA;EACA,cAAA;EACA,gBAAA;EACA,kBAAA;;AAJF,MAME;EACE,eAAA;;AAPJ,MAME,GAEE;AARJ,MAME,GAGE;EACE,qBAAA;EACA,iBAAA;EACA,yBAAA;EACA,yBAAA;EACA,mBAAA;;AAdN,MAME,GAWE,IAAG;AAjBP,MAME,GAYE,IAAG;EACD,qBAAA;EACA,yBAAA;;AApBN,MAwBE,MACE;AAzBJ,MAwBE,MAEE;EACE,YAAA;;AA3BN,MA+BE,UACE;AAhCJ,MA+BE,UAEE;EACE,WAAA;;AAlCN,MAsCE,UACE;AAvCJ,MAsCE,UAEE,IAAG;AAxCP,MAsCE,UAGE,IAAG;AAzCP,MAsCE,UAIE;EACE,cAAA;EACA,yBAAA;EACA,mBAAA;;AC9CN;EACE,eAAA;EACA,uBAAA;EACA,cAAA;EACA,iBAAA;EACA,cAAA;EACA,cAAA;EACA,kBAAA;EACA,mBAAA;EACA,wBAAA;EACA,oBAAA;;AAIE,MADD,MACE;AACD,MAFD,MAEE;EACC,cAAA;EACA,qBAAA;EACA,eAAA;;AAKJ,MAAC;EACC,aAAA;;AAIF,IAAK;EACH,kBAAA;EACA,SAAA;;AAOJ;EnBqhBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmBrhBN;EnBihBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmBjhBN;EnB6gBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmB7gBN;EnBygBE,yBAAA;;AAEE,WADD,MACE;AACD,WAFD,MAEE;EACC,yBAAA;;AmBzgBN;EnBqgBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmBrgBN;EnBigBE,yBAAA;;AAEE,aADD,MACE;AACD,aAFD,MAEE;EACC,yBAAA;;AoB5jBN;EACE,qBAAA;EACA,eAAA;EACA,gBAAA;EACA,eAAA;EACA,iBAAA;EACA,cAAA;EACA,cAAA;EACA,wBAAA;EACA,mBAAA;EACA,kBAAA;EACA,yBAAA;EACA,mBAAA;;AAGA,MAAC;EACC,aAAA;;AAIF,IAAK;EACH,kBAAA;EACA,SAAA;;AAEF,OAAQ;EACN,MAAA;EACA,gBAAA;;AAMF,CADD,MACE;AACD,CAFD,MAEE;EACC,cAAA;EACA,qBAAA;EACA,eAAA;;AAKJ,CAAC,gBAAgB,OAAQ;AACzB,UAAW,UAAU,IAAI;EACvB,cAAA;EACA,yBAAA;;AAEF,UAAW,KAAK,IAAI;EAClB,gBAAA;;AChDF;EACE,aAAA;EACA,mBAAA;EACA,cAAA;EACA,yBAAA;;AAJF,UAME;AANF,UAOE;EACE,cAAA;;AARJ,UAUE;EACE,mBAAA;EACA,eAAA;EACA,gBAAA;;AAGF,UAAW;EACT,kBAAA;;AAjBJ,UAoBE;EACE,eAAA;;AAiBJ,mBAdgD;EAchD;IAbI,iBAAA;IACA,oBAAA;;EAEA,UAAW;IACT,kBAAA;IACA,mBAAA;;EAQN,UALI;EAKJ,UAJI;IACE,eAAA;;;AClCN;EACE,cAAA;EACA,YAAA;EACA,mBAAA;EACA,wBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;EtBmHA,wCAAA;EACQ,gCAAA;;AsB3HV,UAUE;AAVF,UAWE,EAAE;EtBgXF,cAAA;EACA,eAAA;EACA,YAAA;EsBhXE,iBAAA;EACA,kBAAA;;AAIF,CAAC,UAAC;AACF,CAAC,UAAC;AACF,CAAC,UAAC;EACA,qBAAA;;AArBJ,UAyBE;EACE,YAAA;EACA,cAAA;;ACzBJ;EACE,aAAA;EACA,mBAAA;EACA,6BAAA;EACA,kBAAA;;AAJF,MAOE;EACE,aAAA;EAEA,cAAA;;AAVJ,MAaE;EACE,iBAAA;;AAdJ,MAkBE;AAlBF,MAmBE;EACE,gBAAA;;AApBJ,MAsBE,IAAI;EACF,eAAA;;AAQJ;EACC,mBAAA;;AADD,kBAIE;EACE,kBAAA;EACA,SAAA;EACA,YAAA;EACA,cAAA;;AAQJ;EvBqXE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuBvXF,cvByXE;EACE,yBAAA;;AuB1XJ,cvB4XE;EACE,cAAA;;AuB1XJ;EvBkXE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuBpXF,WvBsXE;EACE,yBAAA;;AuBvXJ,WvByXE;EACE,cAAA;;AuBvXJ;EvB+WE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuBjXF,cvBmXE;EACE,yBAAA;;AuBpXJ,cvBsXE;EACE,cAAA;;AuBpXJ;EvB4WE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuB9WF,avBgXE;EACE,yBAAA;;AuBjXJ,avBmXE;EACE,cAAA;;AwB3aJ;EACE;IAAQ,2BAAA;;EACR;IAAQ,wBAAA;;;AAIV;EACE;IAAQ,2BAAA;;EACR;IAAQ,wBAAA;;;AASV;EACE,gBAAA;EACA,YAAA;EACA,mBAAA;EACA,yBAAA;EACA,kBAAA;ExB2FA,sDAAA;EACQ,8CAAA;;AwBvFV;EACE,WAAA;EACA,SAAA;EACA,YAAA;EACA,eAAA;EACA,iBAAA;EACA,cAAA;EACA,kBAAA;EACA,yBAAA;ExB8EA,sDAAA;EACQ,8CAAA;EAKR,mCAAA;EACQ,2BAAA;;AwB/EV,iBAAkB;ExBuSd,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;EwBtSF,0BAAA;;AAIF,SAAS,OAAQ;ExBqJf,0DAAA;EACQ,kDAAA;;AwB7IV;ExBoiBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AwBrRJ;ExBgiBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AwBjRJ;ExB4hBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AwB7QJ;ExBwhBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AyBjVJ;AACA;EACE,gBAAA;EACA,OAAA;;AAIF;AACA,MAAO;EACL,gBAAA;;AAEF,MAAM;EACJ,aAAA;;AAIF;EACE,cAAA;;AAIF;EACE,eAAA;;AAOF,MACE;EACE,kBAAA;;AAFJ,MAIE;EACE,iBAAA;;AASJ;EACE,eAAA;EACA,gBAAA;;AC7CF;EAEE,mBAAA;EACA,eAAA;;AAQF;EACE,kBAAA;EACA,cAAA;EACA,kBAAA;EAEA,mBAAA;EACA,yBAAA;EACA,yBAAA;;AAGA,gBAAC;E1BsED,4BAAA;EACC,2BAAA;;A0BpED,gBAAC;EACC,gBAAA;E1B0EF,+BAAA;EACC,8BAAA;;A0BzFH,gBAmBE;EACE,YAAA;;AApBJ,gBAsBE,SAAS;EACP,iBAAA;;AAUJ,CAAC;EACC,cAAA;;AADF,CAAC,gBAGC;EACE,cAAA;;AAIF,CARD,gBAQE;AACD,CATD,gBASE;EACC,qBAAA;EACA,yBAAA;;AAIF,CAfD,gBAeE;AACD,CAhBD,gBAgBE,OAAO;AACR,CAjBD,gBAiBE,OAAO;EACN,UAAA;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AANF,CAfD,gBAeE,OASC;AARF,CAhBD,gBAgBE,OAAO,MAQN;AAPF,CAjBD,gBAiBE,OAAO,MAON;EACE,cAAA;;AAVJ,CAfD,gBAeE,OAYC;AAXF,CAhBD,gBAgBE,OAAO,MAWN;AAVF,CAjBD,gBAiBE,OAAO,MAUN;EACE,cAAA;;A1BsYJ,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,OAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,OASZ;AACD,CAND,iBAJc,OAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,OAcZ;AACD,CAXD,iBAJc,OAeZ,OAAO;AACR,CAZD,iBAJc,OAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;AAnBN,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,IAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,IASZ;AACD,CAND,iBAJc,IAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,IAcZ;AACD,CAXD,iBAJc,IAeZ,OAAO;AACR,CAZD,iBAJc,IAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;AAnBN,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,OAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,OASZ;AACD,CAND,iBAJc,OAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,OAcZ;AACD,CAXD,iBAJc,OAeZ,OAAO;AACR,CAZD,iBAJc,OAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;AAnBN,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,MAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,MASZ;AACD,CAND,iBAJc,MAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,MAcZ;AACD,CAXD,iBAJc,MAeZ,OAAO;AACR,CAZD,iBAJc,MAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;A0BpYR;EACE,aAAA;EACA,kBAAA;;AAEF;EACE,gBAAA;EACA,gBAAA;;ACtGF;EACE,mBAAA;EACA,yBAAA;EACA,6BAAA;EACA,kBAAA;E3BgHA,iDAAA;EACQ,yCAAA;;A2B5GV;EACE,aAAA;;AAUF,MACE;EACE,gBAAA;;AAFJ,MACE,cAEE;EACE,mBAAA;EACA,gBAAA;;AACA,MALJ,cAEE,iBAGG;EACC,aAAA;;AAEF,MARJ,cAEE,iBAMG;EACC,gBAAA;;AAIJ,MAbF,cAaG,YACC,iBAAgB;E3B2DpB,4BAAA;EACC,2BAAA;;A2BvDC,MAnBF,cAmBG,WACC,iBAAgB;E3B6DpB,+BAAA;EACC,8BAAA;;A2BvDH,cAAe,cACb,iBAAgB;EACd,mBAAA;;AAUJ,MACE;AADF,MAEE,oBAAoB;EAClB,gBAAA;;AAHJ,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YACF,GAAE;AAXV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YACF,GAAE;AAXV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YACF,GAAE;AAXV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YACF,GAAE;AAXV,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YAEF,GAAE;AAZV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YAEF,GAAE;AAZV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YAEF,GAAE;AAZV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YAEF,GAAE;EACA,2BAAA;;AAbV,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YAKF,GAAE;AAfV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YAKF,GAAE;AAfV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YAKF,GAAE;AAfV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YAKF,GAAE;AAfV,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YAMF,GAAE;AAhBV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YAMF,GAAE;AAhBV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YAMF,GAAE;AAhBV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YAMF,GAAE;EACA,4BAAA;;AAjBV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WACF,GAAE;AA5BV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WACF,GAAE;AA5BV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WACF,GAAE;AA5BV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WACF,GAAE;AA5BV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WAEF,GAAE;AA7BV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WAEF,GAAE;AA7BV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WAEF,GAAE;AA7BV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WAEF,GAAE;EACA,8BAAA;;AA9BV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WAKF,GAAE;AAhCV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WAKF,GAAE;AAhCV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WAKF,GAAE;AAhCV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WAKF,GAAE;AAhCV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WAMF,GAAE;AAjCV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WAMF,GAAE;AAjCV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WAMF,GAAE;AAjCV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WAMF,GAAE;EACA,+BAAA;;AAlCV,MAuCE,cAAc;AAvChB,MAwCE,cAAc;EACZ,6BAAA;;AAzCJ,MA2CE,SAAS,QAAO,YAAa,KAAI,YAAa;AA3ChD,MA4CE,SAAS,QAAO,YAAa,KAAI,YAAa;EAC5C,aAAA;;AA7CJ,MA+CE;AA/CF,MAgDE,oBAAoB;EAClB,SAAA;;AAjDJ,MA+CE,kBAGE,QAGE,KACE,KAAI;AAtDZ,MAgDE,oBAAoB,kBAElB,QAGE,KACE,KAAI;AAtDZ,MA+CE,kBAIE,QAEE,KACE,KAAI;AAtDZ,MAgDE,oBAAoB,kBAGlB,QAEE,KACE,KAAI;AAtDZ,MA+CE,kBAKE,QACE,KACE,KAAI;AAtDZ,MAgDE,oBAAoB,kBAIlB,QACE,KACE,KAAI;AAtDZ,MA+CE,kBAGE,QAGE,KAEE,KAAI;AAvDZ,MAgDE,oBAAoB,kBAElB,QAGE,KAEE,KAAI;AAvDZ,MA+CE,kBAIE,QAEE,KAEE,KAAI;AAvDZ,MAgDE,oBAAoB,kBAGlB,QAEE,KAEE,KAAI;AAvDZ,MA+CE,kBAKE,QACE,KAEE,KAAI;AAvDZ,MAgDE,oBAAoB,kBAIlB,QACE,KAEE,KAAI;EACF,cAAA;;AAxDV,MA+CE,kBAGE,QAGE,KAKE,KAAI;AA1DZ,MAgDE,oBAAoB,kBAElB,QAGE,KAKE,KAAI;AA1DZ,MA+CE,kBAIE,QAEE,KAKE,KAAI;AA1DZ,MAgDE,oBAAoB,kBAGlB,QAEE,KAKE,KAAI;AA1DZ,MA+CE,kBAKE,QACE,KAKE,KAAI;AA1DZ,MAgDE,oBAAoB,kBAIlB,QACE,KAKE,KAAI;AA1DZ,MA+CE,kBAGE,QAGE,KAME,KAAI;AA3DZ,MAgDE,oBAAoB,kBAElB,QAGE,KAME,KAAI;AA3DZ,MA+CE,kBAIE,QAEE,KAME,KAAI;AA3DZ,MAgDE,oBAAoB,kBAGlB,QAEE,KAME,KAAI;AA3DZ,MA+CE,kBAKE,QACE,KAME,KAAI;AA3DZ,MAgDE,oBAAoB,kBAIlB,QACE,KAME,KAAI;EACF,eAAA;;AAEF,MAfN,kBAGE,QAGE,KASG,YAAa;AAAd,MAdN,oBAAoB,kBAElB,QAGE,KASG,YAAa;AAAd,MAfN,kBAIE,QAEE,KASG,YAAa;AAAd,MAdN,oBAAoB,kBAGlB,QAEE,KASG,YAAa;AAAd,MAfN,kBAKE,QACE,KASG,YAAa;AAAd,MAdN,oBAAoB,kBAIlB,QACE,KASG,YAAa;AACd,MAhBN,kBAGE,QAGE,KAUG,YAAa;AAAd,MAfN,oBAAoB,kBAElB,QAGE,KAUG,YAAa;AAAd,MAhBN,kBAIE,QAEE,KAUG,YAAa;AAAd,MAfN,oBAAoB,kBAGlB,QAEE,KAUG,YAAa;AAAd,MAhBN,kBAKE,QACE,KAUG,YAAa;AAAd,MAfN,oBAAoB,kBAIlB,QACE,KAUG,YAAa;EACZ,aAAA;;AAEF,MAnBN,kBAGE,QAGE,KAaG,WAAY;AAAb,MAlBN,oBAAoB,kBAElB,QAGE,KAaG,WAAY;AAAb,MAnBN,kBAIE,QAEE,KAaG,WAAY;AAAb,MAlBN,oBAAoB,kBAGlB,QAEE,KAaG,WAAY;AAAb,MAnBN,kBAKE,QACE,KAaG,WAAY;AAAb,MAlBN,oBAAoB,kBAIlB,QACE,KAaG,WAAY;AACb,MApBN,kBAGE,QAGE,KAcG,WAAY;AAAb,MAnBN,oBAAoB,kBAElB,QAGE,KAcG,WAAY;AAAb,MApBN,kBAIE,QAEE,KAcG,WAAY;AAAb,MAnBN,oBAAoB,kBAGlB,QAEE,KAcG,WAAY;AAAb,MApBN,kBAKE,QACE,KAcG,WAAY;AAAb,MAnBN,oBAAoB,kBAIlB,QACE,KAcG,WAAY;EACX,gBAAA;;AApEV,MAyEE;EACE,SAAA;EACA,gBAAA;;AAMJ;EACE,kBAAA;EACA,oCAAA;E3BjDA,4BAAA;EACC,2BAAA;;A2B8CH,cAKE,YAAY;EACV,cAAA;;AAKJ;EACE,aAAA;EACA,gBAAA;EACA,eAAA;EACA,cAAA;;AAJF,YAME;EACE,cAAA;;AAKJ;EACE,kBAAA;EACA,yBAAA;EACA,6BAAA;E3BjEA,+BAAA;EACC,8BAAA;;A2B0EH;EACE,mBAAA;;AADF,YAIE;EACE,gBAAA;EACA,kBAAA;EACA,gBAAA;;AAPJ,YAIE,OAIE;EACE,eAAA;;AATN,YAaE;EACE,gBAAA;;AAdJ,YAaE,eAEE,kBAAkB;EAChB,6BAAA;;AAhBN,YAmBE;EACE,aAAA;;AApBJ,YAmBE,cAEE,kBAAkB;EAChB,gCAAA;;AAON;E3BmME,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2B7MN;E3BgME,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2B1MN;E3B6LE,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2BvMN;E3B0LE,qBAAA;;AAEA,WAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,WAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,WAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2BpMN;E3BuLE,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2BjMN;E3BoLE,qBAAA;;AAEA,aAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,aAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,aAAE,gBACA,kBAAkB;EAChB,4BAAA;;A4B9ZN;EACE,gBAAA;EACA,aAAA;EACA,mBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;E5B8GA,uDAAA;EACQ,+CAAA;;A4BrHV,KAQE;EACE,kBAAA;EACA,iCAAA;;AAKJ;EACE,aAAA;EACA,kBAAA;;AAEF;EACE,YAAA;EACA,kBAAA;;ACtBF;EACE,YAAA;EACA,eAAA;EACA,iBAAA;EACA,cAAA;EACA,cAAA;EACA,4BAAA;E7BoRA,YAAA;EAGA,yBAAA;;A6BpRA,MAAC;AACD,MAAC;EACC,cAAA;EACA,qBAAA;EACA,eAAA;E7B6QF,YAAA;EAGA,yBAAA;;A6BzQA,MAAM;EACJ,UAAA;EACA,eAAA;EACA,uBAAA;EACA,SAAA;EACA,wBAAA;;ACpBJ;EACE,gBAAA;;AAIF;EACE,aAAA;EACA,cAAA;EACA,kBAAA;EACA,eAAA;EACA,MAAA;EACA,QAAA;EACA,SAAA;EACA,OAAA;EACA,aAAA;EACA,iCAAA;EAIA,UAAA;;AAGA,MAAC,KAAM;E9BkIP,mBAAmB,kBAAnB;EACI,eAAe,kBAAf;EACI,WAAW,kBAAX;EApBR,mDAAA;EACG,6CAAA;EACE,yCAAA;EACG,mCAAA;;A8B/GR,MAAC,GAAI;E9B8HL,mBAAmB,eAAnB;EACI,eAAe,eAAf;EACI,WAAW,eAAX;;A8B5HV;EACE,kBAAA;EACA,WAAA;EACA,YAAA;;AAIF;EACE,kBAAA;EACA,yBAAA;EACA,yBAAA;EACA,oCAAA;EACA,kBAAA;E9BsEA,gDAAA;EACQ,wCAAA;E8BrER,4BAAA;EAEA,aAAA;;AAIF;EACE,eAAA;EACA,MAAA;EACA,QAAA;EACA,SAAA;EACA,OAAA;EACA,aAAA;EACA,yBAAA;;AAEA,eAAC;E9B0ND,UAAA;EAGA,wBAAA;;A8B5NA,eAAC;E9ByND,YAAA;EAGA,yBAAA;;A8BvNF;EACE,aAAA;EACA,gCAAA;EACA,0BAAA;;AAGF,aAAc;EACZ,gBAAA;;AAIF;EACE,SAAA;EACA,wBAAA;;AAKF;EACE,kBAAA;EACA,aAAA;;AAIF;EACE,gBAAA;EACA,uBAAA;EACA,iBAAA;EACA,6BAAA;;AAJF,aAQE,KAAK;EACH,gBAAA;EACA,gBAAA;;AAVJ,aAaE,WAAW,KAAK;EACd,iBAAA;;AAdJ,aAiBE,WAAW;EACT,cAAA;;AAqBJ,QAhBmC;EAGjC;IACE,YAAA;IACA,iBAAA;;EAEF;I9BPA,iDAAA;IACQ,yCAAA;;E8BWR;IAAY,YAAA;;EACZ;IAAY,YAAA;;;ACjId;EACE,kBAAA;EACA,aAAA;EACA,cAAA;EACA,mBAAA;EACA,eAAA;EACA,gBAAA;E/BmRA,UAAA;EAGA,wBAAA;;A+BnRA,QAAC;E/BgRD,YAAA;EAGA,yBAAA;;A+BlRA,QAAC;EAAU,gBAAA;EAAmB,cAAA;;AAC9B,QAAC;EAAU,gBAAA;EAAmB,cAAA;;AAC9B,QAAC;EAAU,eAAA;EAAmB,cAAA;;AAC9B,QAAC;EAAU,iBAAA;EAAmB,cAAA;;AAIhC;EACE,gBAAA;EACA,gBAAA;EACA,cAAA;EACA,kBAAA;EACA,qBAAA;EACA,yBAAA;EACA,kBAAA;;AAIF;EACE,kBAAA;EACA,QAAA;EACA,SAAA;EACA,yBAAA;EACA,mBAAA;;AAGA,QAAC,IAAK;EACJ,SAAA;EACA,SAAA;EACA,iBAAA;EACA,uBAAA;EACA,yBAAA;;AAEF,QAAC,SAAU;EACT,SAAA;EACA,SAAA;EACA,uBAAA;EACA,yBAAA;;AAEF,QAAC,UAAW;EACV,SAAA;EACA,UAAA;EACA,uBAAA;EACA,yBAAA;;AAEF,QAAC,MAAO;EACN,QAAA;EACA,OAAA;EACA,gBAAA;EACA,2BAAA;EACA,2BAAA;;AAEF,QAAC,KAAM;EACL,QAAA;EACA,QAAA;EACA,gBAAA;EACA,2BAAA;EACA,0BAAA;;AAEF,QAAC,OAAQ;EACP,MAAA;EACA,SAAA;EACA,iBAAA;EACA,uBAAA;EACA,4BAAA;;AAEF,QAAC,YAAa;EACZ,MAAA;EACA,SAAA;EACA,uBAAA;EACA,4BAAA;;AAEF,QAAC,aAAc;EACb,MAAA;EACA,UAAA;EACA,uBAAA;EACA,4BAAA;;ACvFJ;EACE,kBAAA;EACA,MAAA;EACA,OAAA;EACA,aAAA;EACA,aAAA;EACA,gBAAA;EACA,YAAA;EACA,gBAAA;EACA,yBAAA;EACA,4BAAA;EACA,yBAAA;EACA,oCAAA;EACA,kBAAA;EhCwGA,iDAAA;EACQ,yCAAA;EgCrGR,mBAAA;;AAGA,QAAC;EAAW,iBAAA;;AACZ,QAAC;EAAW,iBAAA;;AACZ,QAAC;EAAW,gBAAA;;AACZ,QAAC;EAAW,kBAAA;;AAGd;EACE,SAAA;EACA,iBAAA;EACA,eAAA;EACA,mBAAA;EACA,iBAAA;EACA,yBAAA;EACA,gCAAA;EACA,0BAAA;;AAGF;EACE,iBAAA;;AAQA,QADO;AAEP,QAFO,OAEN;EACC,kBAAA;EACA,cAAA;EACA,QAAA;EACA,SAAA;EACA,yBAAA;EACA,mBAAA;;AAGJ,QAAS;EACP,kBAAA;;AAEF,QAAS,OAAM;EACb,kBAAA;EACA,SAAS,EAAT;;AAIA,QAAC,IAAK;EACJ,SAAA;EACA,kBAAA;EACA,sBAAA;EACA,yBAAA;EACA,qCAAA;EACA,aAAA;;AACA,QAPD,IAAK,OAOH;EACC,SAAS,GAAT;EACA,WAAA;EACA,kBAAA;EACA,sBAAA;EACA,yBAAA;;AAGJ,QAAC,MAAO;EACN,QAAA;EACA,WAAA;EACA,iBAAA;EACA,oBAAA;EACA,2BAAA;EACA,uCAAA;;AACA,QAPD,MAAO,OAOL;EACC,SAAS,GAAT;EACA,SAAA;EACA,aAAA;EACA,oBAAA;EACA,2BAAA;;AAGJ,QAAC,OAAQ;EACP,SAAA;EACA,kBAAA;EACA,mBAAA;EACA,4BAAA;EACA,wCAAA;EACA,UAAA;;AACA,QAPD,OAAQ,OAON;EACC,SAAS,GAAT;EACA,QAAA;EACA,kBAAA;EACA,mBAAA;EACA,4BAAA;;AAIJ,QAAC,KAAM;EACL,QAAA;EACA,YAAA;EACA,iBAAA;EACA,qBAAA;EACA,0BAAA;EACA,sCAAA;;AACA,QAPD,KAAM,OAOJ;EACC,SAAS,GAAT;EACA,UAAA;EACA,qBAAA;EACA,0BAAA;EACA,aAAA;;AC1HN;EACE,kBAAA;;AAGF;EACE,kBAAA;EACA,gBAAA;EACA,WAAA;;AAHF,eAKE;EACE,aAAA;EACA,kBAAA;EjC+GF,yCAAA;EACQ,iCAAA;;AiCvHV,eAKE,QAME;AAXJ,eAKE,QAOE,IAAI;EjC2WN,cAAA;EACA,eAAA;EACA,YAAA;EiC3WI,cAAA;;AAdN,eAkBE;AAlBF,eAmBE;AAnBF,eAoBE;EAAU,cAAA;;AApBZ,eAsBE;EACE,OAAA;;AAvBJ,eA0BE;AA1BF,eA2BE;EACE,kBAAA;EACA,MAAA;EACA,WAAA;;AA9BJ,eAiCE;EACE,UAAA;;AAlCJ,eAoCE;EACE,WAAA;;AArCJ,eAuCE,QAAO;AAvCT,eAwCE,QAAO;EACL,OAAA;;AAzCJ,eA4CE,UAAS;EACP,WAAA;;AA7CJ,eA+CE,UAAS;EACP,UAAA;;AAQJ;EACE,kBAAA;EACA,MAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EjCwNA,YAAA;EAGA,yBAAA;EiCzNA,eAAA;EACA,cAAA;EACA,kBAAA;EACA,yCAAA;;AAKA,iBAAC;EjCgOC,kBAAkB,8BAA8B,mCAAyC,uCAAzF;EACA,kBAAmB,4EAAnB;EACA,2BAAA;EACA,sHAAA;;AiChOF,iBAAC;EACC,UAAA;EACA,QAAA;EjC2NA,kBAAkB,8BAA8B,sCAAyC,oCAAzF;EACA,kBAAmB,4EAAnB;EACA,2BAAA;EACA,sHAAA;;AiCzNF,iBAAC;AACD,iBAAC;EACC,aAAA;EACA,cAAA;EACA,qBAAA;EjCgMF,YAAA;EAGA,yBAAA;;AiChOF,iBAkCE;AAlCF,iBAmCE;AAnCF,iBAoCE;AApCF,iBAqCE;EACE,kBAAA;EACA,QAAA;EACA,UAAA;EACA,qBAAA;;AAzCJ,iBA2CE;AA3CF,iBA4CE;EACE,SAAA;;AA7CJ,iBA+CE;AA/CF,iBAgDE;EACE,UAAA;;AAjDJ,iBAmDE;AAnDF,iBAoDE;EACE,WAAA;EACA,YAAA;EACA,iBAAA;EACA,kBAAA;EACA,kBAAA;;AAIA,iBADF,WACG;EACC,SAAS,OAAT;;AAIF,iBADF,WACG;EACC,SAAS,OAAT;;AAUN;EACE,kBAAA;EACA,YAAA;EACA,SAAA;EACA,WAAA;EACA,UAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AATF,oBAWE;EACE,qBAAA;EACA,WAAA;EACA,YAAA;EACA,WAAA;EACA,mBAAA;EACA,yBAAA;EACA,mBAAA;EACA,eAAA;EAUA,yBAAA;EACA,kCAAA;;AA9BJ,oBAgCE;EACE,SAAA;EACA,WAAA;EACA,YAAA;EACA,yBAAA;;AAOJ;EACE,kBAAA;EACA,SAAA;EACA,UAAA;EACA,YAAA;EACA,WAAA;EACA,iBAAA;EACA,oBAAA;EACA,cAAA;EACA,kBAAA;EACA,yCAAA;;AACA,iBAAE;EACA,iBAAA;;AAkCJ,mBA5B8C;EAG5C,iBACE;EADF,iBAEE;EAFF,iBAGE;EAHF,iBAIE;IACE,WAAA;IACA,YAAA;IACA,iBAAA;IACA,kBAAA;IACA,eAAA;;EAKJ;IACE,SAAA;IACA,UAAA;IACA,oBAAA;;EAIF;IACE,YAAA;;;AjClNF,SAAC;AACD,SAAC;AIXH,UJUG;AIVH,UJWG;AISH,gBJVG;AIUH,gBJTG;AIkBH,IJnBG;AImBH,IJlBG;AMmWH,gBAoBE,YNxXC;AMoWH,gBAoBE,YNvXC;AWkBH,YXnBG;AWmBH,YXlBG;AW8HH,mBAWE,aX1IC;AW+HH,mBAWE,aXzIC;AaZH,IbWG;AaXH,IbYG;AcVH,OdSG;AcTH,OdUG;AcUH,cdXG;AcWH,cdVG;Ac6BH,gBd9BG;Ac8BH,gBd7BG;AkBfH,MlBcG;AkBdH,MlBeG;A2BLH,W3BIG;A2BJH,W3BKG;A8B+EH,a9BhFG;A8BgFH,a9B/EG;EACC,SAAS,GAAT;EACA,cAAA;;AAEF,SAAC;AIfH,UJeG;AIKH,gBJLG;AIcH,IJdG;AM+VH,gBAoBE,YNnXC;AWcH,YXdG;AW0HH,mBAWE,aXrIC;AahBH,IbgBG;AcdH,OdcG;AcMH,cdNG;AcyBH,gBdzBG;AkBnBH,MlBmBG;A2BTH,W3BSG;A8B2EH,a9B3EG;EACC,WAAA;;AedJ;Ef6BE,cAAA;EACA,iBAAA;EACA,kBAAA;;Ae5BF;EACE,uBAAA;;AAEF;EACE,sBAAA;;AAQF;EACE,wBAAA;;AAEF;EACE,yBAAA;;AAEF;EACE,kBAAA;;AAEF;Ef+CE,WAAA;EACA,kBAAA;EACA,iBAAA;EACA,6BAAA;EACA,SAAA;;Ae1CF;EACE,wBAAA;EACA,6BAAA;;AAOF;EACE,eAAA;;AmBnCF;EACE,mBAAA;;AlCmmBE;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkC3lBR,QAHqC;EAGrC;IlCglBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AAIR;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkCplBR,QAHqC,uBAAgC;EAGrE;IlCykBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AAIR;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkC7kBR,QAHqC,uBAAgC;EAGrE;IlCkkBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AAIR;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkCtkBR,QAHqC;EAGrC;IlC2jBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AkCzjBZ,QAHqC;ElCgkBjC;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AkC3jBR,QAHqC,uBAAgC;ElC2jBjE;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AkCtjBR,QAHqC,uBAAgC;ElCsjBjE;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AkCjjBR,QAHqC;ElCijBjC;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AAHJ;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkCpiBR;EAAA;IlCyhBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AkCvhBZ;ElC2hBI;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA","sourcesContent":["/*! normalize.css v3.0.0 | MIT License | git.io/normalize */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS text size adjust after orientation change, without disabling\n//    user zoom.\n//\n\nhtml {\n  font-family: sans-serif; // 1\n  -ms-text-size-adjust: 100%; // 2\n  -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n  margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined in IE 8/9.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; // 1\n  vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9.\n// Hide the `template` element in IE, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n  background: transparent;\n}\n\n//\n// Improve readability when focused and also mouse hovered in all browsers.\n//\n\na:active,\na:hover {\n  outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// Address styling not present in IE 8/9, Safari 5, and Chrome.\n//\n\nabbr[title] {\n  border-bottom: 1px dotted;\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome.\n//\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n//\n// Address styling not present in Safari 5 and Chrome.\n//\n\ndfn {\n  font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari 5, and Chrome.\n//\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n  font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9.\n//\n\nimg {\n  border: 0;\n}\n\n//\n// Correct overflow displayed oddly in IE 9.\n//\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari 5.\n//\n\nfigure {\n  margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n  overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n//    Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; // 1\n  font: inherit; // 2\n  margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10.\n//\n\nbutton {\n  overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8+, and Opera\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n//    and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n//    `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; // 2\n  cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n  line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; // 1\n  padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome\n//    (include `-moz` to future-proof).\n//\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; // 1\n  -moz-box-sizing: content-box;\n  -webkit-box-sizing: content-box; // 2\n  box-sizing: content-box;\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n  border: 0; // 1\n  padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9.\n//\n\ntextarea {\n  overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n  font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}","//\n// Basic print styles\n// --------------------------------------------------\n// Source: https://github.com/h5bp/html5-boilerplate/blob/master/css/main.css\n\n@media print {\n\n  * {\n    text-shadow: none !important;\n    color: #000 !important; // Black prints faster: h5bp.com/s\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n\n  // Don't show links for images, or javascript/internal links\n  a[href^=\"javascript:\"]:after,\n  a[href^=\"#\"]:after {\n    content: \"\";\n  }\n\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n\n  thead {\n    display: table-header-group; // h5bp.com/t\n  }\n\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n\n  img {\n    max-width: 100% !important;\n  }\n\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n\n  // Chrome (OSX) fix for https://github.com/twbs/bootstrap/issues/11245\n  // Once fixed, we can just straight up remove this.\n  select {\n    background: #fff !important;\n  }\n\n  // Bootstrap components\n  .navbar {\n    display: none;\n  }\n  .table {\n    td,\n    th {\n      background-color: #fff !important;\n    }\n  }\n  .btn,\n  .dropup > .btn {\n    > .caret {\n      border-top-color: #000 !important;\n    }\n  }\n  .label {\n    border: 1px solid #000;\n  }\n\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table-bordered {\n    th,\n    td {\n      border: 1px solid #ddd !important;\n    }\n  }\n\n}\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// http://getbootstrap.com/getting-started/#third-box-sizing\n* {\n  .box-sizing(border-box);\n}\n*:before,\n*:after {\n  .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n  font-size: 62.5%;\n  -webkit-tap-highlight-color: rgba(0,0,0,0);\n}\n\nbody {\n  font-family: @font-family-base;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @text-color;\n  background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\n\n\n// Links\n\na {\n  color: @link-color;\n  text-decoration: none;\n\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: underline;\n  }\n\n  &:focus {\n    .tab-focus();\n  }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n  margin: 0;\n}\n\n\n// Images\n\nimg {\n  vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n  .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n  border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n  padding: @thumbnail-padding;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  // Keep them at most 100% wide\n  .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n  border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n  margin-top:    @line-height-computed;\n  margin-bottom: @line-height-computed;\n  border: 0;\n  border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: http://a11yproject.com/posts/how-to-hide-content/\n\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0,0,0,0);\n  border: 0;\n}\n","//\n// Mixins\n// --------------------------------------------------\n\n\n// Utilities\n// -------------------------\n\n// Clearfix\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n.clearfix() {\n  &:before,\n  &:after {\n    content: \" \"; // 1\n    display: table; // 2\n  }\n  &:after {\n    clear: both;\n  }\n}\n\n// WebKit-style focus\n.tab-focus() {\n  // Default\n  outline: thin dotted;\n  // WebKit\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n\n// Center-align a block level element\n.center-block() {\n  display: block;\n  margin-left: auto;\n  margin-right: auto;\n}\n\n// Sizing shortcuts\n.size(@width; @height) {\n  width: @width;\n  height: @height;\n}\n.square(@size) {\n  .size(@size; @size);\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  &:-moz-placeholder            { color: @color; } // Firefox 4-18\n  &::-moz-placeholder           { color: @color;   // Firefox 19+\n                                  opacity: 1; } // See https://github.com/twbs/bootstrap/pull/11526\n  &:-ms-input-placeholder       { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Text overflow\n// Requires inline-block or block for proper styling\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n\n// CSS image replacement\n//\n// Heads up! v3 launched with with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`. Note\n// that we cannot chain the mixins together in Less, so they are repeated.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (will be removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n\n\n\n// CSS3 PROPERTIES\n// --------------------------------------------------\n\n// Single side border-radius\n.border-top-radius(@radius) {\n  border-top-right-radius: @radius;\n   border-top-left-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-bottom-right-radius: @radius;\n     border-top-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n   border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-bottom-left-radius: @radius;\n     border-top-left-radius: @radius;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n//   supported browsers that have box shadow capabilities now support the\n//   standard `box-shadow` property.\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Transitions\n.transition(@transition) {\n  -webkit-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n// Transformations\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n          transform: rotate(@degrees);\n}\n.scale(@ratio; @ratio-y...) {\n  -webkit-transform: scale(@ratio, @ratio-y);\n      -ms-transform: scale(@ratio, @ratio-y); // IE9 only\n          transform: scale(@ratio, @ratio-y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n          transform: translate(@x, @y);\n}\n.skew(@x; @y) {\n  -webkit-transform: skew(@x, @y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n          transform: skew(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n.backface-visibility(@visibility){\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// User select\n// For selecting text on the page\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n       -o-user-select: @select;\n          user-select: @select;\n}\n\n// Resize anything\n.resizable(@direction) {\n  resize: @direction; // Options: horizontal, vertical, both\n  overflow: auto; // Safari fix\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  word-wrap: break-word;\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n}\n\n// Opacity\n.opacity(@opacity) {\n  opacity: @opacity;\n  // IE8 filter\n  @opacity-ie: (@opacity * 100);\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n}\n\n\n\n// GRADIENTS\n// --------------------------------------------------\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, color-stop(@start-color @start-percent), color-stop(@end-color @end-percent)); // Safari 5.1-6, Chrome 10+\n    background-image:  linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-repeat: repeat-x;\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n\n// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n\n\n\n// Retina images\n//\n// Short retina mixin for setting background-image and -size\n\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and (   min--moz-device-pixel-ratio: 2),\n  only screen and (     -o-min-device-pixel-ratio: 2/1),\n  only screen and (        min-device-pixel-ratio: 2),\n  only screen and (                min-resolution: 192dpi),\n  only screen and (                min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// COMPONENT MIXINS\n// --------------------------------------------------\n\n// Horizontal dividers\n// -------------------------\n// Dividers (basically an hr) within dropdowns and nav lists\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n\n// Panels\n// -------------------------\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse .panel-body {\n      border-top-color: @border;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n\n// Alerts\n// -------------------------\n.alert-variant(@background; @border; @text-color) {\n  background-color: @background;\n  border-color: @border;\n  color: @text-color;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n\n// Tables\n// -------------------------\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n\n// List Groups\n// -------------------------\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a& {\n      color: @color;\n\n      .list-group-item-heading { color: inherit; }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n\n// Button variants\n// -------------------------\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:hover,\n  &:focus,\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 8%);\n        border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    background-image: none;\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      background-color: @background;\n          border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n// -------------------------\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n\n// Pagination\n// -------------------------\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n\n// Labels\n// -------------------------\n.label-variant(@color) {\n  background-color: @color;\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n\n// Contextual backgrounds\n// -------------------------\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover {\n    background-color: darken(@color, 10%);\n  }\n}\n\n// Typography\n// -------------------------\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover {\n    color: darken(@color, 10%);\n  }\n}\n\n// Navbar vertical align\n// -------------------------\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n\n// Progress bars\n// -------------------------\n.progress-bar-variant(@color) {\n  background-color: @color;\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n\n// Responsive utilities\n// -------------------------\n// More easily include all the states for responsive-utilities.less.\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n    &,\n  tr&,\n  th&,\n  td& { display: none !important; }\n}\n\n\n// Grid System\n// -----------\n\n// Centered container element\n.container-fixed() {\n  margin-right: auto;\n  margin-left: auto;\n  padding-left:  (@grid-gutter-width / 2);\n  padding-right: (@grid-gutter-width / 2);\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-left:  (@gutter / -2);\n  margin-right: (@gutter / -2);\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  @media (min-width: @screen-xs-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-push(@columns) {\n  @media (min-width: @screen-xs-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-pull(@columns) {\n  @media (min-width: @screen-xs-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-left:  (@grid-gutter-width / 2);\n      padding-right: (@grid-gutter-width / 2);\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.make-grid-columns-float(@class) {\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = push) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = pull) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.make-grid(@index, @class, @type) when (@index >= 0) {\n  .calc-grid(@index, @class, @type);\n  // next iteration\n  .make-grid((@index - 1), @class, @type);\n}\n\n\n// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0,0,0,.075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    border-color: @border-color;\n    background-color: @background-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-focus-border` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n","//\n// Variables\n// --------------------------------------------------\n\n\n//== Colors\n//\n//## Gray and brand colors for use across Bootstrap.\n\n@gray-darker:            lighten(#000, 13.5%); // #222\n@gray-dark:              lighten(#000, 20%);   // #333\n@gray:                   lighten(#000, 33.5%); // #555\n@gray-light:             lighten(#000, 60%);   // #999\n@gray-lighter:           lighten(#000, 93.5%); // #eee\n\n@brand-primary:         #428bca;\n@brand-success:         #5cb85c;\n@brand-info:            #5bc0de;\n@brand-warning:         #f0ad4e;\n@brand-danger:          #d9534f;\n\n\n//== Scaffolding\n//\n// ## Settings for some of the most global styles.\n\n//** Background color for `<body>`.\n@body-bg:               #fff;\n//** Global text color on `<body>`.\n@text-color:            @gray-dark;\n\n//** Global textual link color.\n@link-color:            @brand-primary;\n//** Link hover color set via `darken()` function.\n@link-hover-color:      darken(@link-color, 15%);\n\n\n//== Typography\n//\n//## Font, line-height, and color for body text, headings, and more.\n\n@font-family-sans-serif:  \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n@font-family-serif:       Georgia, \"Times New Roman\", Times, serif;\n//** Default monospace fonts for `<code>`, `<kbd>`, and `<pre>`.\n@font-family-monospace:   Menlo, Monaco, Consolas, \"Courier New\", monospace;\n@font-family-base:        @font-family-sans-serif;\n\n@font-size-base:          14px;\n@font-size-large:         ceil((@font-size-base * 1.25)); // ~18px\n@font-size-small:         ceil((@font-size-base * 0.85)); // ~12px\n\n@font-size-h1:            floor((@font-size-base * 2.6)); // ~36px\n@font-size-h2:            floor((@font-size-base * 2.15)); // ~30px\n@font-size-h3:            ceil((@font-size-base * 1.7)); // ~24px\n@font-size-h4:            ceil((@font-size-base * 1.25)); // ~18px\n@font-size-h5:            @font-size-base;\n@font-size-h6:            ceil((@font-size-base * 0.85)); // ~12px\n\n//** Unit-less `line-height` for use in components like buttons.\n@line-height-base:        1.428571429; // 20/14\n//** Computed \"line-height\" (`font-size` * `line-height`) for use with `margin`, `padding`, etc.\n@line-height-computed:    floor((@font-size-base * @line-height-base)); // ~20px\n\n//** By default, this inherits from the `<body>`.\n@headings-font-family:    inherit;\n@headings-font-weight:    500;\n@headings-line-height:    1.1;\n@headings-color:          inherit;\n\n\n//-- Iconography\n//\n//## Specify custom locations of the include Glyphicons icon font. Useful for those including Bootstrap via Bower.\n\n@icon-font-path:          \"../fonts/\";\n@icon-font-name:          \"glyphicons-halflings-regular\";\n@icon-font-svg-id:\t\t\t\t\"glyphicons_halflingsregular\";\n\n//== Components\n//\n//## Define common padding and border radius sizes and more. Values based on 14px text and 1.428 line-height (~20px to start).\n\n@padding-base-vertical:     6px;\n@padding-base-horizontal:   12px;\n\n@padding-large-vertical:    10px;\n@padding-large-horizontal:  16px;\n\n@padding-small-vertical:    5px;\n@padding-small-horizontal:  10px;\n\n@padding-xs-vertical:       1px;\n@padding-xs-horizontal:     5px;\n\n@line-height-large:         1.33;\n@line-height-small:         1.5;\n\n@border-radius-base:        4px;\n@border-radius-large:       6px;\n@border-radius-small:       3px;\n\n//** Global color for active items (e.g., navs or dropdowns).\n@component-active-color:    #fff;\n//** Global background color for active items (e.g., navs or dropdowns).\n@component-active-bg:       @brand-primary;\n\n//** Width of the `border` for generating carets that indicator dropdowns.\n@caret-width-base:          4px;\n//** Carets increase slightly in size for larger components.\n@caret-width-large:         5px;\n\n\n//== Tables\n//\n//## Customizes the `.table` component with basic values, each used across all table variations.\n\n//** Padding for `<th>`s and `<td>`s.\n@table-cell-padding:            8px;\n//** Padding for cells in `.table-condensed`.\n@table-condensed-cell-padding:  5px;\n\n//** Default background color used for all tables.\n@table-bg:                      transparent;\n//** Background color used for `.table-striped`.\n@table-bg-accent:               #f9f9f9;\n//** Background color used for `.table-hover`.\n@table-bg-hover:                #f5f5f5;\n@table-bg-active:               @table-bg-hover;\n\n//** Border color for table and cell borders.\n@table-border-color:            #ddd;\n\n\n//== Buttons\n//\n//## For each of Bootstrap's buttons, define text, background and border color.\n\n@btn-font-weight:                normal;\n\n@btn-default-color:              #333;\n@btn-default-bg:                 #fff;\n@btn-default-border:             #ccc;\n\n@btn-primary-color:              #fff;\n@btn-primary-bg:                 @brand-primary;\n@btn-primary-border:             darken(@btn-primary-bg, 5%);\n\n@btn-success-color:              #fff;\n@btn-success-bg:                 @brand-success;\n@btn-success-border:             darken(@btn-success-bg, 5%);\n\n@btn-info-color:                 #fff;\n@btn-info-bg:                    @brand-info;\n@btn-info-border:                darken(@btn-info-bg, 5%);\n\n@btn-warning-color:              #fff;\n@btn-warning-bg:                 @brand-warning;\n@btn-warning-border:             darken(@btn-warning-bg, 5%);\n\n@btn-danger-color:               #fff;\n@btn-danger-bg:                  @brand-danger;\n@btn-danger-border:              darken(@btn-danger-bg, 5%);\n\n@btn-link-disabled-color:        @gray-light;\n\n\n//== Forms\n//\n//##\n\n//** `<input>` background color\n@input-bg:                       #fff;\n//** `<input disabled>` background color\n@input-bg-disabled:              @gray-lighter;\n\n//** Text color for `<input>`s\n@input-color:                    @gray;\n//** `<input>` border color\n@input-border:                   #ccc;\n//** `<input>` border radius\n@input-border-radius:            @border-radius-base;\n//** Border color for inputs on focus\n@input-border-focus:             #66afe9;\n\n//** Placeholder text color\n@input-color-placeholder:        @gray-light;\n\n//** Default `.form-control` height\n@input-height-base:              (@line-height-computed + (@padding-base-vertical * 2) + 2);\n//** Large `.form-control` height\n@input-height-large:             (ceil(@font-size-large * @line-height-large) + (@padding-large-vertical * 2) + 2);\n//** Small `.form-control` height\n@input-height-small:             (floor(@font-size-small * @line-height-small) + (@padding-small-vertical * 2) + 2);\n\n@legend-color:                   @gray-dark;\n@legend-border-color:            #e5e5e5;\n\n//** Background color for textual input addons\n@input-group-addon-bg:           @gray-lighter;\n//** Border color for textual input addons\n@input-group-addon-border-color: @input-border;\n\n\n//== Dropdowns\n//\n//## Dropdown menu container and contents.\n\n//** Background for the dropdown menu.\n@dropdown-bg:                    #fff;\n//** Dropdown menu `border-color`.\n@dropdown-border:                rgba(0,0,0,.15);\n//** Dropdown menu `border-color` **for IE8**.\n@dropdown-fallback-border:       #ccc;\n//** Divider color for between dropdown items.\n@dropdown-divider-bg:            #e5e5e5;\n\n//** Dropdown link text color.\n@dropdown-link-color:            @gray-dark;\n//** Hover color for dropdown links.\n@dropdown-link-hover-color:      darken(@gray-dark, 5%);\n//** Hover background for dropdown links.\n@dropdown-link-hover-bg:         #f5f5f5;\n\n//** Active dropdown menu item text color.\n@dropdown-link-active-color:     @component-active-color;\n//** Active dropdown menu item background color.\n@dropdown-link-active-bg:        @component-active-bg;\n\n//** Disabled dropdown menu item background color.\n@dropdown-link-disabled-color:   @gray-light;\n\n//** Text color for headers within dropdown menus.\n@dropdown-header-color:          @gray-light;\n\n// Note: Deprecated @dropdown-caret-color as of v3.1.0\n@dropdown-caret-color:           #000;\n\n\n//-- Z-index master list\n//\n// Warning: Avoid customizing these values. They're used for a bird's eye view\n// of components dependent on the z-axis and are designed to all work together.\n//\n// Note: These variables are not generated into the Customizer.\n\n@zindex-navbar:            1000;\n@zindex-dropdown:          1000;\n@zindex-popover:           1010;\n@zindex-tooltip:           1030;\n@zindex-navbar-fixed:      1030;\n@zindex-modal-background:  1040;\n@zindex-modal:             1050;\n\n\n//== Media queries breakpoints\n//\n//## Define the breakpoints at which your layout will change, adapting to different screen sizes.\n\n// Extra small screen / phone\n// Note: Deprecated @screen-xs and @screen-phone as of v3.0.1\n@screen-xs:                  480px;\n@screen-xs-min:              @screen-xs;\n@screen-phone:               @screen-xs-min;\n\n// Small screen / tablet\n// Note: Deprecated @screen-sm and @screen-tablet as of v3.0.1\n@screen-sm:                  768px;\n@screen-sm-min:              @screen-sm;\n@screen-tablet:              @screen-sm-min;\n\n// Medium screen / desktop\n// Note: Deprecated @screen-md and @screen-desktop as of v3.0.1\n@screen-md:                  992px;\n@screen-md-min:              @screen-md;\n@screen-desktop:             @screen-md-min;\n\n// Large screen / wide desktop\n// Note: Deprecated @screen-lg and @screen-lg-desktop as of v3.0.1\n@screen-lg:                  1200px;\n@screen-lg-min:              @screen-lg;\n@screen-lg-desktop:          @screen-lg-min;\n\n// So media queries don't overlap when required, provide a maximum\n@screen-xs-max:              (@screen-sm-min - 1);\n@screen-sm-max:              (@screen-md-min - 1);\n@screen-md-max:              (@screen-lg-min - 1);\n\n\n//== Grid system\n//\n//## Define your custom responsive grid.\n\n//** Number of columns in the grid.\n@grid-columns:              12;\n//** Padding between columns. Gets divided in half for the left and right.\n@grid-gutter-width:         30px;\n// Navbar collapse\n//** Point at which the navbar becomes uncollapsed.\n@grid-float-breakpoint:     @screen-sm-min;\n//** Point at which the navbar begins collapsing.\n@grid-float-breakpoint-max: (@grid-float-breakpoint - 1);\n\n\n//== Navbar\n//\n//##\n\n// Basics of a navbar\n@navbar-height:                    50px;\n@navbar-margin-bottom:             @line-height-computed;\n@navbar-border-radius:             @border-radius-base;\n@navbar-padding-horizontal:        floor((@grid-gutter-width / 2));\n@navbar-padding-vertical:          ((@navbar-height - @line-height-computed) / 2);\n@navbar-collapse-max-height:       340px;\n\n@navbar-default-color:             #777;\n@navbar-default-bg:                #f8f8f8;\n@navbar-default-border:            darken(@navbar-default-bg, 6.5%);\n\n// Navbar links\n@navbar-default-link-color:                #777;\n@navbar-default-link-hover-color:          #333;\n@navbar-default-link-hover-bg:             transparent;\n@navbar-default-link-active-color:         #555;\n@navbar-default-link-active-bg:            darken(@navbar-default-bg, 6.5%);\n@navbar-default-link-disabled-color:       #ccc;\n@navbar-default-link-disabled-bg:          transparent;\n\n// Navbar brand label\n@navbar-default-brand-color:               @navbar-default-link-color;\n@navbar-default-brand-hover-color:         darken(@navbar-default-brand-color, 10%);\n@navbar-default-brand-hover-bg:            transparent;\n\n// Navbar toggle\n@navbar-default-toggle-hover-bg:           #ddd;\n@navbar-default-toggle-icon-bar-bg:        #888;\n@navbar-default-toggle-border-color:       #ddd;\n\n\n// Inverted navbar\n// Reset inverted navbar basics\n@navbar-inverse-color:                      @gray-light;\n@navbar-inverse-bg:                         #222;\n@navbar-inverse-border:                     darken(@navbar-inverse-bg, 10%);\n\n// Inverted navbar links\n@navbar-inverse-link-color:                 @gray-light;\n@navbar-inverse-link-hover-color:           #fff;\n@navbar-inverse-link-hover-bg:              transparent;\n@navbar-inverse-link-active-color:          @navbar-inverse-link-hover-color;\n@navbar-inverse-link-active-bg:             darken(@navbar-inverse-bg, 10%);\n@navbar-inverse-link-disabled-color:        #444;\n@navbar-inverse-link-disabled-bg:           transparent;\n\n// Inverted navbar brand label\n@navbar-inverse-brand-color:                @navbar-inverse-link-color;\n@navbar-inverse-brand-hover-color:          #fff;\n@navbar-inverse-brand-hover-bg:             transparent;\n\n// Inverted navbar toggle\n@navbar-inverse-toggle-hover-bg:            #333;\n@navbar-inverse-toggle-icon-bar-bg:         #fff;\n@navbar-inverse-toggle-border-color:        #333;\n\n\n//== Navs\n//\n//##\n\n//=== Shared nav styles\n@nav-link-padding:                          10px 15px;\n@nav-link-hover-bg:                         @gray-lighter;\n\n@nav-disabled-link-color:                   @gray-light;\n@nav-disabled-link-hover-color:             @gray-light;\n\n@nav-open-link-hover-color:                 #fff;\n\n//== Tabs\n@nav-tabs-border-color:                     #ddd;\n\n@nav-tabs-link-hover-border-color:          @gray-lighter;\n\n@nav-tabs-active-link-hover-bg:             @body-bg;\n@nav-tabs-active-link-hover-color:          @gray;\n@nav-tabs-active-link-hover-border-color:   #ddd;\n\n@nav-tabs-justified-link-border-color:            #ddd;\n@nav-tabs-justified-active-link-border-color:     @body-bg;\n\n//== Pills\n@nav-pills-border-radius:                   @border-radius-base;\n@nav-pills-active-link-hover-bg:            @component-active-bg;\n@nav-pills-active-link-hover-color:         @component-active-color;\n\n\n//== Pagination\n//\n//##\n\n@pagination-color:                     @link-color;\n@pagination-bg:                        #fff;\n@pagination-border:                    #ddd;\n\n@pagination-hover-color:               @link-hover-color;\n@pagination-hover-bg:                  @gray-lighter;\n@pagination-hover-border:              #ddd;\n\n@pagination-active-color:              #fff;\n@pagination-active-bg:                 @brand-primary;\n@pagination-active-border:             @brand-primary;\n\n@pagination-disabled-color:            @gray-light;\n@pagination-disabled-bg:               #fff;\n@pagination-disabled-border:           #ddd;\n\n\n//== Pager\n//\n//##\n\n@pager-bg:                             @pagination-bg;\n@pager-border:                         @pagination-border;\n@pager-border-radius:                  15px;\n\n@pager-hover-bg:                       @pagination-hover-bg;\n\n@pager-active-bg:                      @pagination-active-bg;\n@pager-active-color:                   @pagination-active-color;\n\n@pager-disabled-color:                 @pagination-disabled-color;\n\n\n//== Jumbotron\n//\n//##\n\n@jumbotron-padding:              30px;\n@jumbotron-color:                inherit;\n@jumbotron-bg:                   @gray-lighter;\n@jumbotron-heading-color:        inherit;\n@jumbotron-font-size:            ceil((@font-size-base * 1.5));\n\n\n//== Form states and alerts\n//\n//## Define colors for form feedback states and, by default, alerts.\n\n@state-success-text:             #3c763d;\n@state-success-bg:               #dff0d8;\n@state-success-border:           darken(spin(@state-success-bg, -10), 5%);\n\n@state-info-text:                #31708f;\n@state-info-bg:                  #d9edf7;\n@state-info-border:              darken(spin(@state-info-bg, -10), 7%);\n\n@state-warning-text:             #8a6d3b;\n@state-warning-bg:               #fcf8e3;\n@state-warning-border:           darken(spin(@state-warning-bg, -10), 5%);\n\n@state-danger-text:              #a94442;\n@state-danger-bg:                #f2dede;\n@state-danger-border:            darken(spin(@state-danger-bg, -10), 5%);\n\n\n//== Tooltips\n//\n//##\n\n//** Tooltip max width\n@tooltip-max-width:           200px;\n//** Tooltip text color\n@tooltip-color:               #fff;\n//** Tooltip background color\n@tooltip-bg:                  #000;\n@tooltip-opacity:             .9;\n\n//** Tooltip arrow width\n@tooltip-arrow-width:         5px;\n//** Tooltip arrow color\n@tooltip-arrow-color:         @tooltip-bg;\n\n\n//== Popovers\n//\n//##\n\n//** Popover body background color\n@popover-bg:                          #fff;\n//** Popover maximum width\n@popover-max-width:                   276px;\n//** Popover border color\n@popover-border-color:                rgba(0,0,0,.2);\n//** Popover fallback border color\n@popover-fallback-border-color:       #ccc;\n\n//** Popover title background color\n@popover-title-bg:                    darken(@popover-bg, 3%);\n\n//** Popover arrow width\n@popover-arrow-width:                 10px;\n//** Popover arrow color\n@popover-arrow-color:                 #fff;\n\n//** Popover outer arrow width\n@popover-arrow-outer-width:           (@popover-arrow-width + 1);\n//** Popover outer arrow color\n@popover-arrow-outer-color:           rgba(0,0,0,.25);\n//** Popover outer arrow fallback color\n@popover-arrow-outer-fallback-color:  #999;\n\n\n//== Labels\n//\n//##\n\n//** Default label background color\n@label-default-bg:            @gray-light;\n//** Primary label background color\n@label-primary-bg:            @brand-primary;\n//** Success label background color\n@label-success-bg:            @brand-success;\n//** Info label background color\n@label-info-bg:               @brand-info;\n//** Warning label background color\n@label-warning-bg:            @brand-warning;\n//** Danger label background color\n@label-danger-bg:             @brand-danger;\n\n//** Default label text color\n@label-color:                 #fff;\n//** Default text color of a linked label\n@label-link-hover-color:      #fff;\n\n\n//== Modals\n//\n//##\n\n//** Padding applied to the modal body\n@modal-inner-padding:         20px;\n\n//** Padding applied to the modal title\n@modal-title-padding:         15px;\n//** Modal title line-height\n@modal-title-line-height:     @line-height-base;\n\n//** Background color of modal content area\n@modal-content-bg:                             #fff;\n//** Modal content border color\n@modal-content-border-color:                   rgba(0,0,0,.2);\n//** Modal content border color **for IE8**\n@modal-content-fallback-border-color:          #999;\n\n//** Modal backdrop background color\n@modal-backdrop-bg:           #000;\n//** Modal backdrop opacity\n@modal-backdrop-opacity:      .5;\n//** Modal header border color\n@modal-header-border-color:   #e5e5e5;\n//** Modal footer border color\n@modal-footer-border-color:   @modal-header-border-color;\n\n@modal-lg:                    900px;\n@modal-md:                    600px;\n@modal-sm:                    300px;\n\n\n//== Alerts\n//\n//## Define alert colors, border radius, and padding.\n\n@alert-padding:               15px;\n@alert-border-radius:         @border-radius-base;\n@alert-link-font-weight:      bold;\n\n@alert-success-bg:            @state-success-bg;\n@alert-success-text:          @state-success-text;\n@alert-success-border:        @state-success-border;\n\n@alert-info-bg:               @state-info-bg;\n@alert-info-text:             @state-info-text;\n@alert-info-border:           @state-info-border;\n\n@alert-warning-bg:            @state-warning-bg;\n@alert-warning-text:          @state-warning-text;\n@alert-warning-border:        @state-warning-border;\n\n@alert-danger-bg:             @state-danger-bg;\n@alert-danger-text:           @state-danger-text;\n@alert-danger-border:         @state-danger-border;\n\n\n//== Progress bars\n//\n//##\n\n//** Background color of the whole progress component\n@progress-bg:                 #f5f5f5;\n//** Progress bar text color\n@progress-bar-color:          #fff;\n\n//** Default progress bar color\n@progress-bar-bg:             @brand-primary;\n//** Success progress bar color\n@progress-bar-success-bg:     @brand-success;\n//** Warning progress bar color\n@progress-bar-warning-bg:     @brand-warning;\n//** Danger progress bar color\n@progress-bar-danger-bg:      @brand-danger;\n//** Info progress bar color\n@progress-bar-info-bg:        @brand-info;\n\n\n//== List group\n//\n//##\n\n//** Background color on `.list-group-item`\n@list-group-bg:                 #fff;\n//** `.list-group-item` border color\n@list-group-border:             #ddd;\n//** List group border radius\n@list-group-border-radius:      @border-radius-base;\n\n//** Background color of single list elements on hover\n@list-group-hover-bg:           #f5f5f5;\n//** Text color of active list elements\n@list-group-active-color:       @component-active-color;\n//** Background color of active list elements\n@list-group-active-bg:          @component-active-bg;\n//** Border color of active list elements\n@list-group-active-border:      @list-group-active-bg;\n@list-group-active-text-color:  lighten(@list-group-active-bg, 40%);\n\n@list-group-link-color:         #555;\n@list-group-link-heading-color: #333;\n\n\n//== Panels\n//\n//##\n\n@panel-bg:                    #fff;\n@panel-body-padding:          15px;\n@panel-border-radius:         @border-radius-base;\n\n//** Border color for elements within panels\n@panel-inner-border:          #ddd;\n@panel-footer-bg:             #f5f5f5;\n\n@panel-default-text:          @gray-dark;\n@panel-default-border:        #ddd;\n@panel-default-heading-bg:    #f5f5f5;\n\n@panel-primary-text:          #fff;\n@panel-primary-border:        @brand-primary;\n@panel-primary-heading-bg:    @brand-primary;\n\n@panel-success-text:          @state-success-text;\n@panel-success-border:        @state-success-border;\n@panel-success-heading-bg:    @state-success-bg;\n\n@panel-info-text:             @state-info-text;\n@panel-info-border:           @state-info-border;\n@panel-info-heading-bg:       @state-info-bg;\n\n@panel-warning-text:          @state-warning-text;\n@panel-warning-border:        @state-warning-border;\n@panel-warning-heading-bg:    @state-warning-bg;\n\n@panel-danger-text:           @state-danger-text;\n@panel-danger-border:         @state-danger-border;\n@panel-danger-heading-bg:     @state-danger-bg;\n\n\n//== Thumbnails\n//\n//##\n\n//** Padding around the thumbnail image\n@thumbnail-padding:           4px;\n//** Thumbnail background color\n@thumbnail-bg:                @body-bg;\n//** Thumbnail border color\n@thumbnail-border:            #ddd;\n//** Thumbnail border radius\n@thumbnail-border-radius:     @border-radius-base;\n\n//** Custom text color for thumbnail captions\n@thumbnail-caption-color:     @text-color;\n//** Padding around the thumbnail caption\n@thumbnail-caption-padding:   9px;\n\n\n//== Wells\n//\n//##\n\n@well-bg:                     #f5f5f5;\n@well-border:                 darken(@well-bg, 7%);\n\n\n//== Badges\n//\n//##\n\n@badge-color:                 #fff;\n//** Linked badge text color on hover\n@badge-link-hover-color:      #fff;\n@badge-bg:                    @gray-light;\n\n//** Badge text color in active nav link\n@badge-active-color:          @link-color;\n//** Badge background color in active nav link\n@badge-active-bg:             #fff;\n\n@badge-font-weight:           bold;\n@badge-line-height:           1;\n@badge-border-radius:         10px;\n\n\n//== Breadcrumbs\n//\n//##\n\n@breadcrumb-padding-vertical:   8px;\n@breadcrumb-padding-horizontal: 15px;\n//** Breadcrumb background color\n@breadcrumb-bg:                 #f5f5f5;\n//** Breadcrumb text color\n@breadcrumb-color:              #ccc;\n//** Text color of current page in the breadcrumb\n@breadcrumb-active-color:       @gray-light;\n//** Textual separator for between breadcrumb elements\n@breadcrumb-separator:          \"/\";\n\n\n//== Carousel\n//\n//##\n\n@carousel-text-shadow:                        0 1px 2px rgba(0,0,0,.6);\n\n@carousel-control-color:                      #fff;\n@carousel-control-width:                      15%;\n@carousel-control-opacity:                    .5;\n@carousel-control-font-size:                  20px;\n\n@carousel-indicator-active-bg:                #fff;\n@carousel-indicator-border-color:             #fff;\n\n@carousel-caption-color:                      #fff;\n\n\n//== Close\n//\n//##\n\n@close-font-weight:           bold;\n@close-color:                 #000;\n@close-text-shadow:           0 1px 0 #fff;\n\n\n//== Code\n//\n//##\n\n@code-color:                  #c7254e;\n@code-bg:                     #f9f2f4;\n\n@kbd-color:                   #fff;\n@kbd-bg:                      #333;\n\n@pre-bg:                      #f5f5f5;\n@pre-color:                   @gray-dark;\n@pre-border-color:            #ccc;\n@pre-scrollable-max-height:   340px;\n\n\n//== Type\n//\n//##\n\n//** Text muted color\n@text-muted:                  @gray-light;\n//** Abbreviations and acronyms border color\n@abbr-border-color:           @gray-light;\n//** Headings small color\n@headings-small-color:        @gray-light;\n//** Blockquote small color\n@blockquote-small-color:      @gray-light;\n//** Blockquote border color\n@blockquote-border-color:     @gray-lighter;\n//** Page header border color\n@page-header-border-color:    @gray-lighter;\n\n\n//== Miscellaneous\n//\n//##\n\n//** Horizontal line color.\n@hr-border:                   @gray-lighter;\n\n//** Horizontal offset for forms and lists.\n@component-offset-horizontal: 180px;\n\n\n//== Container sizes\n//\n//## Define the maximum width of `.container` for different screen sizes.\n\n// Small screen / tablet\n@container-tablet:             ((720px + @grid-gutter-width));\n//** For `@screen-sm-min` and up.\n@container-sm:                 @container-tablet;\n\n// Medium screen / desktop\n@container-desktop:            ((940px + @grid-gutter-width));\n//** For `@screen-md-min` and up.\n@container-md:                 @container-desktop;\n\n// Large screen / wide desktop\n@container-large-desktop:      ((1140px + @grid-gutter-width));\n//** For `@screen-lg-min` and up.\n@container-lg:                 @container-large-desktop;\n","//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n  font-family: @headings-font-family;\n  font-weight: @headings-font-weight;\n  line-height: @headings-line-height;\n  color: @headings-color;\n\n  small,\n  .small {\n    font-weight: normal;\n    line-height: 1;\n    color: @headings-small-color;\n  }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n  margin-top: @line-height-computed;\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 65%;\n  }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n  margin-top: (@line-height-computed / 2);\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 75%;\n  }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n  margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n  margin-bottom: @line-height-computed;\n  font-size: floor((@font-size-base * 1.15));\n  font-weight: 200;\n  line-height: 1.4;\n\n  @media (min-width: @screen-sm-min) {\n    font-size: (@font-size-base * 1.5);\n  }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: 14px base font * 85% = about 12px\nsmall,\n.small  { font-size: 85%; }\n\n// Undo browser default styling\ncite    { font-style: normal; }\n\n// Alignment\n.text-left           { text-align: left; }\n.text-right          { text-align: right; }\n.text-center         { text-align: center; }\n.text-justify        { text-align: justify; }\n\n// Contextual colors\n.text-muted {\n  color: @text-muted;\n}\n.text-primary {\n  .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n  .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n  .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n  .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n  .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n  // Given the contrast here, this is the only class to have its color inverted\n  // automatically.\n  color: #fff;\n  .bg-variant(@brand-primary);\n}\n.bg-success {\n  .bg-variant(@state-success-bg);\n}\n.bg-info {\n  .bg-variant(@state-info-bg);\n}\n.bg-warning {\n  .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n  .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n  padding-bottom: ((@line-height-computed / 2) - 1);\n  margin: (@line-height-computed * 2) 0 @line-height-computed;\n  border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// --------------------------------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n  margin-top: 0;\n  margin-bottom: (@line-height-computed / 2);\n  ul,\n  ol {\n    margin-bottom: 0;\n  }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n  .list-unstyled();\n\n  > li {\n    display: inline-block;\n    padding-left: 5px;\n    padding-right: 5px;\n\n    &:first-child {\n      padding-left: 0;\n    }\n  }\n}\n\n// Description Lists\ndl {\n  margin-top: 0; // Remove browser default\n  margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n  line-height: @line-height-base;\n}\ndt {\n  font-weight: bold;\n}\ndd {\n  margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n@media (min-width: @grid-float-breakpoint) {\n  .dl-horizontal {\n    dt {\n      float: left;\n      width: (@component-offset-horizontal - 20);\n      clear: left;\n      text-align: right;\n      .text-overflow();\n    }\n    dd {\n      margin-left: @component-offset-horizontal;\n      &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n    }\n  }\n}\n\n// MISC\n// ----\n\n// Abbreviations and acronyms\nabbr[title],\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[data-original-title] {\n  cursor: help;\n  border-bottom: 1px dotted @abbr-border-color;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\n\n// Blockquotes\nblockquote {\n  padding: (@line-height-computed / 2) @line-height-computed;\n  margin: 0 0 @line-height-computed;\n  font-size: (@font-size-base * 1.25);\n  border-left: 5px solid @blockquote-border-color;\n\n  p,\n  ul,\n  ol {\n    &:last-child {\n      margin-bottom: 0;\n    }\n  }\n\n  // Note: Deprecated small and .small as of v3.1.0\n  // Context: https://github.com/twbs/bootstrap/issues/11660\n  footer,\n  small,\n  .small {\n    display: block;\n    font-size: 80%; // back to default font-size\n    line-height: @line-height-base;\n    color: @blockquote-small-color;\n\n    &:before {\n      content: '\\2014 \\00A0'; // em dash, nbsp\n    }\n  }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  border-right: 5px solid @blockquote-border-color;\n  border-left: 0;\n  text-align: right;\n\n  // Account for citation\n  footer,\n  small,\n  .small {\n    &:before { content: ''; }\n    &:after {\n      content: '\\00A0 \\2014'; // nbsp, em dash\n    }\n  }\n}\n\n// Quotes\nblockquote:before,\nblockquote:after {\n  content: \"\";\n}\n\n// Addresses\naddress {\n  margin-bottom: @line-height-computed;\n  font-style: normal;\n  line-height: @line-height-base;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n  font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @code-color;\n  background-color: @code-bg;\n  white-space: nowrap;\n  border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @kbd-color;\n  background-color: @kbd-bg;\n  border-radius: @border-radius-small;\n  box-shadow: inset 0 -1px 0 rgba(0,0,0,.25);\n}\n\n// Blocks of code\npre {\n  display: block;\n  padding: ((@line-height-computed - 1) / 2);\n  margin: 0 0 (@line-height-computed / 2);\n  font-size: (@font-size-base - 1); // 14px to 13px\n  line-height: @line-height-base;\n  word-break: break-all;\n  word-wrap: break-word;\n  color: @pre-color;\n  background-color: @pre-bg;\n  border: 1px solid @pre-border-color;\n  border-radius: @border-radius-base;\n\n  // Account for some code outputs that place code tags in pre tags\n  code {\n    padding: 0;\n    font-size: inherit;\n    color: inherit;\n    white-space: pre-wrap;\n    background-color: transparent;\n    border-radius: 0;\n  }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n  max-height: @pre-scrollable-max-height;\n  overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n  .container-fixed();\n\n  @media (min-width: @screen-sm-min) {\n    width: @container-sm;\n  }\n  @media (min-width: @screen-md-min) {\n    width: @container-md;\n  }\n  @media (min-width: @screen-lg-min) {\n    width: @container-lg;\n  }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n  .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n  .make-row();\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid-columns-float(xs);\n.make-grid(@grid-columns, xs, width);\n.make-grid(@grid-columns, xs, pull);\n.make-grid(@grid-columns, xs, push);\n.make-grid(@grid-columns, xs, offset);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n  .make-grid-columns-float(sm);\n  .make-grid(@grid-columns, sm, width);\n  .make-grid(@grid-columns, sm, pull);\n  .make-grid(@grid-columns, sm, push);\n  .make-grid(@grid-columns, sm, offset);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n  .make-grid-columns-float(md);\n  .make-grid(@grid-columns, md, width);\n  .make-grid(@grid-columns, md, pull);\n  .make-grid(@grid-columns, md, push);\n  .make-grid(@grid-columns, md, offset);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n  .make-grid-columns-float(lg);\n  .make-grid(@grid-columns, lg, width);\n  .make-grid(@grid-columns, lg, pull);\n  .make-grid(@grid-columns, lg, push);\n  .make-grid(@grid-columns, lg, offset);\n}\n","//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n  max-width: 100%;\n  background-color: @table-bg;\n}\nth {\n  text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n  width: 100%;\n  margin-bottom: @line-height-computed;\n  // Cells\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-cell-padding;\n        line-height: @line-height-base;\n        vertical-align: top;\n        border-top: 1px solid @table-border-color;\n      }\n    }\n  }\n  // Bottom align for column headings\n  > thead > tr > th {\n    vertical-align: bottom;\n    border-bottom: 2px solid @table-border-color;\n  }\n  // Remove top border from thead by default\n  > caption + thead,\n  > colgroup + thead,\n  > thead:first-child {\n    > tr:first-child {\n      > th,\n      > td {\n        border-top: 0;\n      }\n    }\n  }\n  // Account for multiple tbody instances\n  > tbody + tbody {\n    border-top: 2px solid @table-border-color;\n  }\n\n  // Nesting\n  .table {\n    background-color: @body-bg;\n  }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-condensed-cell-padding;\n      }\n    }\n  }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n  border: 1px solid @table-border-color;\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        border: 1px solid @table-border-color;\n      }\n    }\n  }\n  > thead > tr {\n    > th,\n    > td {\n      border-bottom-width: 2px;\n    }\n  }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n  > tbody > tr:nth-child(odd) {\n    > td,\n    > th {\n      background-color: @table-bg-accent;\n    }\n  }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n  > tbody > tr:hover {\n    > td,\n    > th {\n      background-color: @table-bg-hover;\n    }\n  }\n}\n\n\n// Table cell sizing\n//\n// Reset default table behavior\n\ntable col[class*=\"col-\"] {\n  position: static; // Prevent border hiding in Firefox and IE9/10 (see https://github.com/twbs/bootstrap/issues/11623)\n  float: none;\n  display: table-column;\n}\ntable {\n  td,\n  th {\n    &[class*=\"col-\"] {\n      position: static; // Prevent border hiding in Firefox and IE9/10 (see https://github.com/twbs/bootstrap/issues/11623)\n      float: none;\n      display: table-cell;\n    }\n  }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n@media (max-width: @screen-xs-max) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: (@line-height-computed * 0.75);\n    overflow-y: hidden;\n    overflow-x: scroll;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid @table-border-color;\n    -webkit-overflow-scrolling: touch;\n\n    // Tighten up spacing\n    > .table {\n      margin-bottom: 0;\n\n      // Ensure the content doesn't wrap\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th,\n          > td {\n            white-space: nowrap;\n          }\n        }\n      }\n    }\n\n    // Special overrides for the bordered tables\n    > .table-bordered {\n      border: 0;\n\n      // Nuke the appropriate borders so that the parent can handle them\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th:first-child,\n          > td:first-child {\n            border-left: 0;\n          }\n          > th:last-child,\n          > td:last-child {\n            border-right: 0;\n          }\n        }\n      }\n\n      // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n      // chances are there will be only one `tr` in a `thead` and that would\n      // remove the border altogether.\n      > tbody,\n      > tfoot {\n        > tr:last-child {\n          > th,\n          > td {\n            border-bottom: 0;\n          }\n        }\n      }\n\n    }\n  }\n}\n","//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n  padding: 0;\n  margin: 0;\n  border: 0;\n  // Chrome and Firefox set a `min-width: -webkit-min-content;` on fieldsets,\n  // so we reset that to ensure it behaves more like a standard block element.\n  // See https://github.com/twbs/bootstrap/issues/12359.\n  min-width: 0;\n}\n\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: @line-height-computed;\n  font-size: (@font-size-base * 1.5);\n  line-height: inherit;\n  color: @legend-color;\n  border: 0;\n  border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n  display: inline-block;\n  margin-bottom: 5px;\n  font-weight: bold;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\n// Override content-box in Normalize (* isn't specific enough)\ninput[type=\"search\"] {\n  .box-sizing(border-box);\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9; /* IE8-9 */\n  line-height: normal;\n}\n\n// Set the height of file controls to match text inputs\ninput[type=\"file\"] {\n  display: block;\n}\n\n// Make range inputs behave like textual form controls\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\n\n// Make multiple select elements height not fixed\nselect[multiple],\nselect[size] {\n  height: auto;\n}\n\n// Focus for file, radio, and checkbox\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  .tab-focus();\n}\n\n// Adjust output element\noutput {\n  display: block;\n  padding-top: (@padding-base-vertical + 1);\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n}\n\n\n// Common form controls\n//\n// Shared size and type resets for form controls. Apply `.form-control` to any\n// of the following form controls:\n//\n// select\n// textarea\n// input[type=\"text\"]\n// input[type=\"password\"]\n// input[type=\"datetime\"]\n// input[type=\"datetime-local\"]\n// input[type=\"date\"]\n// input[type=\"month\"]\n// input[type=\"time\"]\n// input[type=\"week\"]\n// input[type=\"number\"]\n// input[type=\"email\"]\n// input[type=\"url\"]\n// input[type=\"search\"]\n// input[type=\"tel\"]\n// input[type=\"color\"]\n\n.form-control {\n  display: block;\n  width: 100%;\n  height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n  background-color: @input-bg;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid @input-border;\n  border-radius: @input-border-radius;\n  .box-shadow(inset 0 1px 1px rgba(0,0,0,.075));\n  .transition(~\"border-color ease-in-out .15s, box-shadow ease-in-out .15s\");\n\n  // Customize the `:focus` state to imitate native WebKit styles.\n  .form-control-focus();\n\n  // Placeholder\n  .placeholder();\n\n  // Disabled and read-only inputs\n  // Note: HTML5 says that controls under a fieldset > legend:first-child won't\n  // be disabled if the fieldset is disabled. Due to implementation difficulty,\n  // we don't honor that edge case; we style them as disabled anyway.\n  &[disabled],\n  &[readonly],\n  fieldset[disabled] & {\n    cursor: not-allowed;\n    background-color: @input-bg-disabled;\n    opacity: 1; // iOS fix for unreadable disabled content\n  }\n\n  // Reset height for `textarea`s\n  textarea& {\n    height: auto;\n  }\n}\n\n// Special styles for iOS date input\n//\n// In Mobile Safari, date inputs require a pixel line-height that matches the\n// given height of the input.\ninput[type=\"date\"] {\n  line-height: @input-height-base;\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n  margin-bottom: 15px;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n  display: block;\n  min-height: @line-height-computed; // clear the floating input if there is no label text\n  margin-top: 10px;\n  margin-bottom: 10px;\n  padding-left: 20px;\n  label {\n    display: inline;\n    font-weight: normal;\n    cursor: pointer;\n  }\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  float: left;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px; // Move up sibling radios or checkboxes for tighter spacing\n}\n\n// Radios and checkboxes on same line\n.radio-inline,\n.checkbox-inline {\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  vertical-align: middle;\n  font-weight: normal;\n  cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px; // space out consecutive inline controls\n}\n\n// Apply same disabled cursor tweak as for inputs\n//\n// Note: Neither radios nor checkboxes can be readonly.\ninput[type=\"radio\"],\ninput[type=\"checkbox\"],\n.radio,\n.radio-inline,\n.checkbox,\n.checkbox-inline {\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: not-allowed;\n  }\n}\n\n\n// Form control sizing\n//\n// Build on `.form-control` with modifier classes to decrease or increase the\n// height and font-size of form controls.\n\n.input-sm {\n  .input-size(@input-height-small; @padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n\n.input-lg {\n  .input-size(@input-height-large; @padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n\n\n// Form control feedback states\n//\n// Apply contextual and semantic states to individual form controls.\n\n.has-feedback {\n  // Enable absolute positioning\n  position: relative;\n\n  // Ensure icons don't overlap text\n  .form-control {\n    padding-right: (@input-height-base * 1.25);\n  }\n\n  // Feedback icon (requires .glyphicon classes)\n  .form-control-feedback {\n    position: absolute;\n    top: (@line-height-computed + 5); // Height of the `label` and its margin\n    right: 0;\n    display: block;\n    width: @input-height-base;\n    height: @input-height-base;\n    line-height: @input-height-base;\n    text-align: center;\n  }\n}\n\n// Feedback states\n.has-success {\n  .form-control-validation(@state-success-text; @state-success-text; @state-success-bg);\n}\n.has-warning {\n  .form-control-validation(@state-warning-text; @state-warning-text; @state-warning-bg);\n}\n.has-error {\n  .form-control-validation(@state-danger-text; @state-danger-text; @state-danger-bg);\n}\n\n\n// Static form control text\n//\n// Apply class to a `p` element to make any string of text align with labels in\n// a horizontal form layout.\n\n.form-control-static {\n  margin-bottom: 0; // Remove default margin from `p`\n}\n\n\n// Help text\n//\n// Apply to any element you wish to create light text for placement immediately\n// below a form control. Use for general help, formatting, or instructional text.\n\n.help-block {\n  display: block; // account for any element using help-block\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: lighten(@text-color, 25%); // lighten the text some for contrast\n}\n\n\n\n// Inline forms\n//\n// Make forms appear inline(-block) by adding the `.form-inline` class. Inline\n// forms begin stacked on extra small (mobile) devices and then go inline when\n// viewports reach <768px.\n//\n// Requires wrapping inputs and labels with `.form-group` for proper display of\n// default HTML form controls and our custom form controls (e.g., input groups).\n//\n// Heads up! This is mixin-ed into `.navbar-form` in navbars.less.\n\n.form-inline {\n\n  // Kick in the inline\n  @media (min-width: @screen-sm-min) {\n    // Inline-block all the things for \"inline\"\n    .form-group {\n      display: inline-block;\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // In navbar-form, allow folks to *not* use `.form-group`\n    .form-control {\n      display: inline-block;\n      width: auto; // Prevent labels from stacking above inputs in `.form-group`\n      vertical-align: middle;\n    }\n\n    .control-label {\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // Remove default margin on radios/checkboxes that were used for stacking, and\n    // then undo the floating of radios and checkboxes to match (which also avoids\n    // a bug in WebKit: https://github.com/twbs/bootstrap/issues/1969).\n    .radio,\n    .checkbox {\n      display: inline-block;\n      margin-top: 0;\n      margin-bottom: 0;\n      padding-left: 0;\n      vertical-align: middle;\n    }\n    .radio input[type=\"radio\"],\n    .checkbox input[type=\"checkbox\"] {\n      float: none;\n      margin-left: 0;\n    }\n\n    // Validation states\n    //\n    // Reposition the icon because it's now within a grid column and columns have\n    // `position: relative;` on them. Also accounts for the grid gutter padding.\n    .has-feedback .form-control-feedback {\n      top: 0;\n    }\n  }\n}\n\n\n// Horizontal forms\n//\n// Horizontal forms are built on grid classes and allow you to create forms with\n// labels on the left and inputs on the right.\n\n.form-horizontal {\n\n  // Consistent vertical alignment of labels, radios, and checkboxes\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline {\n    margin-top: 0;\n    margin-bottom: 0;\n    padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n  }\n  // Account for padding we're adding to ensure the alignment and of help text\n  // and other content below items\n  .radio,\n  .checkbox {\n    min-height: (@line-height-computed + (@padding-base-vertical + 1));\n  }\n\n  // Make form groups behave like rows\n  .form-group {\n    .make-row();\n  }\n\n  .form-control-static {\n    padding-top: (@padding-base-vertical + 1);\n  }\n\n  // Only right align form labels here when the columns stop stacking\n  @media (min-width: @screen-sm-min) {\n    .control-label {\n      text-align: right;\n    }\n  }\n\n  // Validation states\n  //\n  // Reposition the icon because it's now within a grid column and columns have\n  // `position: relative;` on them. Also accounts for the grid gutter padding.\n  .has-feedback .form-control-feedback {\n    top: 0;\n    right: (@grid-gutter-width / 2);\n  }\n}\n","//\n// Buttons\n// --------------------------------------------------\n\n\n// Base styles\n// --------------------------------------------------\n\n.btn {\n  display: inline-block;\n  margin-bottom: 0; // For input.btn\n  font-weight: @btn-font-weight;\n  text-align: center;\n  vertical-align: middle;\n  cursor: pointer;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  white-space: nowrap;\n  .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @border-radius-base);\n  .user-select(none);\n\n  &:focus {\n    .tab-focus();\n  }\n\n  &:hover,\n  &:focus {\n    color: @btn-default-color;\n    text-decoration: none;\n  }\n\n  &:active,\n  &.active {\n    outline: 0;\n    background-image: none;\n    .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: not-allowed;\n    pointer-events: none; // Future-proof disabling of clicks\n    .opacity(.65);\n    .box-shadow(none);\n  }\n}\n\n\n// Alternate buttons\n// --------------------------------------------------\n\n.btn-default {\n  .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);\n}\n.btn-primary {\n  .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);\n}\n// Success appears as green\n.btn-success {\n  .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);\n}\n// Info appears as blue-green\n.btn-info {\n  .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);\n}\n// Warning appears as orange\n.btn-warning {\n  .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);\n}\n// Danger and error appear as red\n.btn-danger {\n  .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);\n}\n\n\n// Link buttons\n// -------------------------\n\n// Make a button look and behave like a link\n.btn-link {\n  color: @link-color;\n  font-weight: normal;\n  cursor: pointer;\n  border-radius: 0;\n\n  &,\n  &:active,\n  &[disabled],\n  fieldset[disabled] & {\n    background-color: transparent;\n    .box-shadow(none);\n  }\n  &,\n  &:hover,\n  &:focus,\n  &:active {\n    border-color: transparent;\n  }\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: underline;\n    background-color: transparent;\n  }\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus {\n      color: @btn-link-disabled-color;\n      text-decoration: none;\n    }\n  }\n}\n\n\n// Button Sizes\n// --------------------------------------------------\n\n.btn-lg {\n  // line-height: ensure even-numbered height of button next to large input\n  .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n.btn-sm {\n  // line-height: ensure proper height of button next to small input\n  .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n.btn-xs {\n  .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n\n\n// Block button\n// --------------------------------------------------\n\n.btn-block {\n  display: block;\n  width: 100%;\n  padding-left: 0;\n  padding-right: 0;\n}\n\n// Vertically space out multiple block buttons\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\n\n// Specificity overrides\ninput[type=\"submit\"],\ninput[type=\"reset\"],\ninput[type=\"button\"] {\n  &.btn-block {\n    width: 100%;\n  }\n}\n","//\n// Component animations\n// --------------------------------------------------\n\n// Heads up!\n//\n// We don't use the `.opacity()` mixin here since it causes a bug with text\n// fields in IE7-8. Source: https://github.com/twitter/bootstrap/pull/3552.\n\n.fade {\n  opacity: 0;\n  .transition(opacity .15s linear);\n  &.in {\n    opacity: 1;\n  }\n}\n\n.collapse {\n  display: none;\n  &.in {\n    display: block;\n  }\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  .transition(height .35s ease);\n}\n","//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// <a href=\"#\"><span class=\"glyphicon glyphicon-star\"></span> Star</a>\n\n// Import the fonts\n@font-face {\n  font-family: 'Glyphicons Halflings';\n  src: ~\"url('@{icon-font-path}@{icon-font-name}.eot')\";\n  src: ~\"url('@{icon-font-path}@{icon-font-name}.eot?#iefix') format('embedded-opentype')\",\n       ~\"url('@{icon-font-path}@{icon-font-name}.woff') format('woff')\",\n       ~\"url('@{icon-font-path}@{icon-font-name}.ttf') format('truetype')\",\n       ~\"url('@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}') format('svg')\";\n}\n\n// Catchall baseclass\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk               { &:before { content: \"\\2a\"; } }\n.glyphicon-plus                   { &:before { content: \"\\2b\"; } }\n.glyphicon-euro                   { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus                  { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud                  { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope               { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil                 { &:before { content: \"\\270f\"; } }\n.glyphicon-glass                  { &:before { content: \"\\e001\"; } }\n.glyphicon-music                  { &:before { content: \"\\e002\"; } }\n.glyphicon-search                 { &:before { content: \"\\e003\"; } }\n.glyphicon-heart                  { &:before { content: \"\\e005\"; } }\n.glyphicon-star                   { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty             { &:before { content: \"\\e007\"; } }\n.glyphicon-user                   { &:before { content: \"\\e008\"; } }\n.glyphicon-film                   { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large               { &:before { content: \"\\e010\"; } }\n.glyphicon-th                     { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list                { &:before { content: \"\\e012\"; } }\n.glyphicon-ok                     { &:before { content: \"\\e013\"; } }\n.glyphicon-remove                 { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in                { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out               { &:before { content: \"\\e016\"; } }\n.glyphicon-off                    { &:before { content: \"\\e017\"; } }\n.glyphicon-signal                 { &:before { content: \"\\e018\"; } }\n.glyphicon-cog                    { &:before { content: \"\\e019\"; } }\n.glyphicon-trash                  { &:before { content: \"\\e020\"; } }\n.glyphicon-home                   { &:before { content: \"\\e021\"; } }\n.glyphicon-file                   { &:before { content: \"\\e022\"; } }\n.glyphicon-time                   { &:before { content: \"\\e023\"; } }\n.glyphicon-road                   { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt           { &:before { content: \"\\e025\"; } }\n.glyphicon-download               { &:before { content: \"\\e026\"; } }\n.glyphicon-upload                 { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox                  { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle            { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat                 { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh                { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt               { &:before { content: \"\\e032\"; } }\n.glyphicon-lock                   { &:before { content: \"\\e033\"; } }\n.glyphicon-flag                   { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones             { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off             { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down            { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up              { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode                 { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode                { &:before { content: \"\\e040\"; } }\n.glyphicon-tag                    { &:before { content: \"\\e041\"; } }\n.glyphicon-tags                   { &:before { content: \"\\e042\"; } }\n.glyphicon-book                   { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark               { &:before { content: \"\\e044\"; } }\n.glyphicon-print                  { &:before { content: \"\\e045\"; } }\n.glyphicon-camera                 { &:before { content: \"\\e046\"; } }\n.glyphicon-font                   { &:before { content: \"\\e047\"; } }\n.glyphicon-bold                   { &:before { content: \"\\e048\"; } }\n.glyphicon-italic                 { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height            { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width             { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left             { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center           { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right            { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify          { &:before { content: \"\\e055\"; } }\n.glyphicon-list                   { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left            { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right           { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video         { &:before { content: \"\\e059\"; } }\n.glyphicon-picture                { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker             { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust                 { &:before { content: \"\\e063\"; } }\n.glyphicon-tint                   { &:before { content: \"\\e064\"; } }\n.glyphicon-edit                   { &:before { content: \"\\e065\"; } }\n.glyphicon-share                  { &:before { content: \"\\e066\"; } }\n.glyphicon-check                  { &:before { content: \"\\e067\"; } }\n.glyphicon-move                   { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward          { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward          { &:before { content: \"\\e070\"; } }\n.glyphicon-backward               { &:before { content: \"\\e071\"; } }\n.glyphicon-play                   { &:before { content: \"\\e072\"; } }\n.glyphicon-pause                  { &:before { content: \"\\e073\"; } }\n.glyphicon-stop                   { &:before { content: \"\\e074\"; } }\n.glyphicon-forward                { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward           { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward           { &:before { content: \"\\e077\"; } }\n.glyphicon-eject                  { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left           { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right          { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign              { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign             { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign            { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign                { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign          { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign              { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot             { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle          { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle              { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle             { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left             { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right            { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up               { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down             { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt              { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full            { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small           { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign       { &:before { content: \"\\e101\"; } }\n.glyphicon-gift                   { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf                   { &:before { content: \"\\e103\"; } }\n.glyphicon-fire                   { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open               { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close              { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign           { &:before { content: \"\\e107\"; } }\n.glyphicon-plane                  { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar               { &:before { content: \"\\e109\"; } }\n.glyphicon-random                 { &:before { content: \"\\e110\"; } }\n.glyphicon-comment                { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet                 { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up             { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down           { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet                { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart          { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close           { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open            { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical        { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal      { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd                    { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn               { &:before { content: \"\\e122\"; } }\n.glyphicon-bell                   { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate            { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up              { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down            { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right             { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left              { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up                { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down              { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right     { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left      { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up        { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down      { &:before { content: \"\\e134\"; } }\n.glyphicon-globe                  { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench                 { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks                  { &:before { content: \"\\e137\"; } }\n.glyphicon-filter                 { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase              { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen             { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard              { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip              { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty            { &:before { content: \"\\e143\"; } }\n.glyphicon-link                   { &:before { content: \"\\e144\"; } }\n.glyphicon-phone                  { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin                { &:before { content: \"\\e146\"; } }\n.glyphicon-usd                    { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp                    { &:before { content: \"\\e149\"; } }\n.glyphicon-sort                   { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet       { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt   { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order          { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt      { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes     { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked              { &:before { content: \"\\e157\"; } }\n.glyphicon-expand                 { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down          { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up            { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in                 { &:before { content: \"\\e161\"; } }\n.glyphicon-flash                  { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out                { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window             { &:before { content: \"\\e164\"; } }\n.glyphicon-record                 { &:before { content: \"\\e165\"; } }\n.glyphicon-save                   { &:before { content: \"\\e166\"; } }\n.glyphicon-open                   { &:before { content: \"\\e167\"; } }\n.glyphicon-saved                  { &:before { content: \"\\e168\"; } }\n.glyphicon-import                 { &:before { content: \"\\e169\"; } }\n.glyphicon-export                 { &:before { content: \"\\e170\"; } }\n.glyphicon-send                   { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk            { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved           { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove          { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save            { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open            { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card            { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer               { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery                { &:before { content: \"\\e179\"; } }\n.glyphicon-header                 { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed             { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone               { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt              { &:before { content: \"\\e183\"; } }\n.glyphicon-tower                  { &:before { content: \"\\e184\"; } }\n.glyphicon-stats                  { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video               { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video               { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles              { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo           { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby            { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1              { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1              { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1              { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark         { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark      { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download         { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload           { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer           { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous         { &:before { content: \"\\e200\"; } }\n","//\n// Dropdown menus\n// --------------------------------------------------\n\n\n// Dropdown arrow/caret\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top:   @caret-width-base solid;\n  border-right: @caret-width-base solid transparent;\n  border-left:  @caret-width-base solid transparent;\n}\n\n// The dropdown wrapper (div)\n.dropdown {\n  position: relative;\n}\n\n// Prevent the focus on the dropdown toggle when closing dropdowns\n.dropdown-toggle:focus {\n  outline: 0;\n}\n\n// The dropdown menu (ul)\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: @zindex-dropdown;\n  display: none; // none by default, but block on \"open\" of the menu\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0; // override default ul\n  list-style: none;\n  font-size: @font-size-base;\n  background-color: @dropdown-bg;\n  border: 1px solid @dropdown-fallback-border; // IE8 fallback\n  border: 1px solid @dropdown-border;\n  border-radius: @border-radius-base;\n  .box-shadow(0 6px 12px rgba(0,0,0,.175));\n  background-clip: padding-box;\n\n  // Aligns the dropdown menu to right\n  //\n  // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`\n  &.pull-right {\n    right: 0;\n    left: auto;\n  }\n\n  // Dividers (basically an hr) within the dropdown\n  .divider {\n    .nav-divider(@dropdown-divider-bg);\n  }\n\n  // Links within the dropdown menu\n  > li > a {\n    display: block;\n    padding: 3px 20px;\n    clear: both;\n    font-weight: normal;\n    line-height: @line-height-base;\n    color: @dropdown-link-color;\n    white-space: nowrap; // prevent links from randomly breaking onto new lines\n  }\n}\n\n// Hover/Focus state\n.dropdown-menu > li > a {\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    color: @dropdown-link-hover-color;\n    background-color: @dropdown-link-hover-bg;\n  }\n}\n\n// Active state\n.dropdown-menu > .active > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-active-color;\n    text-decoration: none;\n    outline: 0;\n    background-color: @dropdown-link-active-bg;\n  }\n}\n\n// Disabled state\n//\n// Gray out text and ensure the hover/focus state remains gray\n\n.dropdown-menu > .disabled > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-disabled-color;\n  }\n}\n// Nuke hover/focus effects\n.dropdown-menu > .disabled > a {\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    background-color: transparent;\n    background-image: none; // Remove CSS gradient\n    .reset-filter();\n    cursor: not-allowed;\n  }\n}\n\n// Open state for the dropdown\n.open {\n  // Show the menu\n  > .dropdown-menu {\n    display: block;\n  }\n\n  // Remove the outline when :focus is triggered\n  > a {\n    outline: 0;\n  }\n}\n\n// Menu positioning\n//\n// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown\n// menu with the parent.\n.dropdown-menu-right {\n  left: auto; // Reset the default from `.dropdown-menu`\n  right: 0;\n}\n// With v3, we enabled auto-flipping if you have a dropdown within a right\n// aligned nav component. To enable the undoing of that, we provide an override\n// to restore the default dropdown menu alignment.\n//\n// This is only for left-aligning a dropdown menu within a `.navbar-right` or\n// `.pull-right` nav component.\n.dropdown-menu-left {\n  left: 0;\n  right: auto;\n}\n\n// Dropdown section headers\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: @font-size-small;\n  line-height: @line-height-base;\n  color: @dropdown-header-color;\n}\n\n// Backdrop to catch body clicks on mobile, etc.\n.dropdown-backdrop {\n  position: fixed;\n  left: 0;\n  right: 0;\n  bottom: 0;\n  top: 0;\n  z-index: (@zindex-dropdown - 10);\n}\n\n// Right aligned dropdowns\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n\n// Allow for dropdowns to go bottom up (aka, dropup-menu)\n//\n// Just add .dropup after the standard .dropdown class and you're set, bro.\n// TODO: abstract this so that the navbar fixed styles are not placed here?\n\n.dropup,\n.navbar-fixed-bottom .dropdown {\n  // Reverse the caret\n  .caret {\n    border-top: 0;\n    border-bottom: @caret-width-base solid;\n    content: \"\";\n  }\n  // Different positioning for bottom up menu\n  .dropdown-menu {\n    top: auto;\n    bottom: 100%;\n    margin-bottom: 1px;\n  }\n}\n\n\n// Component alignment\n//\n// Reiterate per navbar.less and the modified component alignment there.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-right {\n    .dropdown-menu {\n      .dropdown-menu-right();\n    }\n    // Necessary for overrides of the default right aligned menu.\n    // Will remove come v4 in all likelihood.\n    .dropdown-menu-left {\n      .dropdown-menu-left();\n    }\n  }\n}\n\n","//\n// Button groups\n// --------------------------------------------------\n\n// Make the div behave like a button\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle; // match .btn alignment given font-size hack above\n  > .btn {\n    position: relative;\n    float: left;\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      z-index: 2;\n    }\n    &:focus {\n      // Remove focus outline when dropdown JS adds it after closing the menu\n      outline: none;\n    }\n  }\n}\n\n// Prevent double borders when buttons are next to each other\n.btn-group {\n  .btn + .btn,\n  .btn + .btn-group,\n  .btn-group + .btn,\n  .btn-group + .btn-group {\n    margin-left: -1px;\n  }\n}\n\n// Optional: Group multiple button groups together for a toolbar\n.btn-toolbar {\n  margin-left: -5px; // Offset the first child's margin\n  &:extend(.clearfix all);\n\n  .btn-group,\n  .input-group {\n    float: left;\n  }\n  > .btn,\n  > .btn-group,\n  > .input-group {\n    margin-left: 5px;\n  }\n}\n\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n\n// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match\n.btn-group > .btn:first-child {\n  margin-left: 0;\n  &:not(:last-child):not(.dropdown-toggle) {\n    .border-right-radius(0);\n  }\n}\n// Need .dropdown-toggle since :last-child doesn't apply given a .dropdown-menu immediately after it\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  .border-left-radius(0);\n}\n\n// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-right-radius(0);\n  }\n}\n.btn-group > .btn-group:last-child > .btn:first-child {\n  .border-left-radius(0);\n}\n\n// On active and open, don't show outline\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n\n\n// Sizing\n//\n// Remix the default button sizing classes into new ones for easier manipulation.\n\n.btn-group-xs > .btn { .btn-xs(); }\n.btn-group-sm > .btn { .btn-sm(); }\n.btn-group-lg > .btn { .btn-lg(); }\n\n\n// Split button dropdowns\n// ----------------------\n\n// Give the line between buttons some depth\n.btn-group > .btn + .dropdown-toggle {\n  padding-left: 8px;\n  padding-right: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-left: 12px;\n  padding-right: 12px;\n}\n\n// The clickable button for toggling the menu\n// Remove the gradient and set the same inset shadow as the :active state\n.btn-group.open .dropdown-toggle {\n  .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n\n  // Show no shadow for `.btn-link` since it has no other button styles.\n  &.btn-link {\n    .box-shadow(none);\n  }\n}\n\n\n// Reposition the caret\n.btn .caret {\n  margin-left: 0;\n}\n// Carets in other button sizes\n.btn-lg .caret {\n  border-width: @caret-width-large @caret-width-large 0;\n  border-bottom-width: 0;\n}\n// Upside down carets for .dropup\n.dropup .btn-lg .caret {\n  border-width: 0 @caret-width-large @caret-width-large;\n}\n\n\n// Vertical button groups\n// ----------------------\n\n.btn-group-vertical {\n  > .btn,\n  > .btn-group,\n  > .btn-group > .btn {\n    display: block;\n    float: none;\n    width: 100%;\n    max-width: 100%;\n  }\n\n  // Clear floats so dropdown menus can be properly placed\n  > .btn-group {\n    &:extend(.clearfix all);\n    > .btn {\n      float: none;\n    }\n  }\n\n  > .btn + .btn,\n  > .btn + .btn-group,\n  > .btn-group + .btn,\n  > .btn-group + .btn-group {\n    margin-top: -1px;\n    margin-left: 0;\n  }\n}\n\n.btn-group-vertical > .btn {\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n  &:first-child:not(:last-child) {\n    border-top-right-radius: @border-radius-base;\n    .border-bottom-radius(0);\n  }\n  &:last-child:not(:first-child) {\n    border-bottom-left-radius: @border-radius-base;\n    .border-top-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-bottom-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-top-radius(0);\n}\n\n\n\n// Justified button groups\n// ----------------------\n\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n  > .btn,\n  > .btn-group {\n    float: none;\n    display: table-cell;\n    width: 1%;\n  }\n  > .btn-group .btn {\n    width: 100%;\n  }\n}\n\n\n// Checkbox and radio options\n[data-toggle=\"buttons\"] > .btn > input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn > input[type=\"checkbox\"] {\n  display: none;\n}\n","//\n// Input groups\n// --------------------------------------------------\n\n// Base styles\n// -------------------------\n.input-group {\n  position: relative; // For dropdowns\n  display: table;\n  border-collapse: separate; // prevent input groups from inheriting border styles from table cells when placed within a table\n\n  // Undo padding and float of grid classes\n  &[class*=\"col-\"] {\n    float: none;\n    padding-left: 0;\n    padding-right: 0;\n  }\n\n  .form-control {\n    // IE9 fubars the placeholder attribute in text inputs and the arrows on\n    // select elements in input groups. To fix it, we float the input. Details:\n    // https://github.com/twbs/bootstrap/issues/11561#issuecomment-28936855\n    float: left;\n\n    width: 100%;\n    margin-bottom: 0;\n  }\n}\n\n// Sizing options\n//\n// Remix the default form control sizing classes into new ones for easier\n// manipulation.\n\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn { .input-lg(); }\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn { .input-sm(); }\n\n\n// Display as table-cell\n// -------------------------\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n}\n// Addon and addon wrapper for buttons\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle; // Match the inputs\n}\n\n// Text input groups\n// -------------------------\n.input-group-addon {\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  font-weight: normal;\n  line-height: 1;\n  color: @input-color;\n  text-align: center;\n  background-color: @input-group-addon-bg;\n  border: 1px solid @input-group-addon-border-color;\n  border-radius: @border-radius-base;\n\n  // Sizing\n  &.input-sm {\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    border-radius: @border-radius-small;\n  }\n  &.input-lg {\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    border-radius: @border-radius-large;\n  }\n\n  // Nuke default margins from checkboxes and radios to vertically center within.\n  input[type=\"radio\"],\n  input[type=\"checkbox\"] {\n    margin-top: 0;\n  }\n}\n\n// Reset rounded corners\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  .border-right-radius(0);\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  .border-left-radius(0);\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n\n// Button input groups\n// -------------------------\n.input-group-btn {\n  position: relative;\n  // Jankily prevent input button groups from wrapping with `white-space` and\n  // `font-size` in combination with `inline-block` on buttons.\n  font-size: 0;\n  white-space: nowrap;\n\n  // Negative margin for spacing, position for bringing hovered/focused/actived\n  // element above the siblings.\n  > .btn {\n    position: relative;\n    + .btn {\n      margin-left: -1px;\n    }\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active {\n      z-index: 2;\n    }\n  }\n\n  // Negative margin to only have a 1px border between the two\n  &:first-child {\n    > .btn,\n    > .btn-group {\n      margin-right: -1px;\n    }\n  }\n  &:last-child {\n    > .btn,\n    > .btn-group {\n      margin-left: -1px;\n    }\n  }\n}\n","//\n// Navs\n// --------------------------------------------------\n\n\n// Base class\n// --------------------------------------------------\n\n.nav {\n  margin-bottom: 0;\n  padding-left: 0; // Override default ul/ol\n  list-style: none;\n  &:extend(.clearfix all);\n\n  > li {\n    position: relative;\n    display: block;\n\n    > a {\n      position: relative;\n      display: block;\n      padding: @nav-link-padding;\n      &:hover,\n      &:focus {\n        text-decoration: none;\n        background-color: @nav-link-hover-bg;\n      }\n    }\n\n    // Disabled state sets text to gray and nukes hover/tab effects\n    &.disabled > a {\n      color: @nav-disabled-link-color;\n\n      &:hover,\n      &:focus {\n        color: @nav-disabled-link-hover-color;\n        text-decoration: none;\n        background-color: transparent;\n        cursor: not-allowed;\n      }\n    }\n  }\n\n  // Open dropdowns\n  .open > a {\n    &,\n    &:hover,\n    &:focus {\n      background-color: @nav-link-hover-bg;\n      border-color: @link-color;\n    }\n  }\n\n  // Nav dividers (deprecated with v3.0.1)\n  //\n  // This should have been removed in v3 with the dropping of `.nav-list`, but\n  // we missed it. We don't currently support this anywhere, but in the interest\n  // of maintaining backward compatibility in case you use it, it's deprecated.\n  .nav-divider {\n    .nav-divider();\n  }\n\n  // Prevent IE8 from misplacing imgs\n  //\n  // See https://github.com/h5bp/html5-boilerplate/issues/984#issuecomment-3985989\n  > li > a > img {\n    max-width: none;\n  }\n}\n\n\n// Tabs\n// -------------------------\n\n// Give the tabs something to sit on\n.nav-tabs {\n  border-bottom: 1px solid @nav-tabs-border-color;\n  > li {\n    float: left;\n    // Make the list-items overlay the bottom border\n    margin-bottom: -1px;\n\n    // Actual tabs (as links)\n    > a {\n      margin-right: 2px;\n      line-height: @line-height-base;\n      border: 1px solid transparent;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n      &:hover {\n        border-color: @nav-tabs-link-hover-border-color @nav-tabs-link-hover-border-color @nav-tabs-border-color;\n      }\n    }\n\n    // Active state, and its :hover to override normal :hover\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-tabs-active-link-hover-color;\n        background-color: @nav-tabs-active-link-hover-bg;\n        border: 1px solid @nav-tabs-active-link-hover-border-color;\n        border-bottom-color: transparent;\n        cursor: default;\n      }\n    }\n  }\n  // pulling this in mainly for less shorthand\n  &.nav-justified {\n    .nav-justified();\n    .nav-tabs-justified();\n  }\n}\n\n\n// Pills\n// -------------------------\n.nav-pills {\n  > li {\n    float: left;\n\n    // Links rendered as pills\n    > a {\n      border-radius: @nav-pills-border-radius;\n    }\n    + li {\n      margin-left: 2px;\n    }\n\n    // Active state\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-pills-active-link-hover-color;\n        background-color: @nav-pills-active-link-hover-bg;\n      }\n    }\n  }\n}\n\n\n// Stacked pills\n.nav-stacked {\n  > li {\n    float: none;\n    + li {\n      margin-top: 2px;\n      margin-left: 0; // no need for this gap between nav items\n    }\n  }\n}\n\n\n// Nav variations\n// --------------------------------------------------\n\n// Justified nav links\n// -------------------------\n\n.nav-justified {\n  width: 100%;\n\n  > li {\n    float: none;\n     > a {\n      text-align: center;\n      margin-bottom: 5px;\n    }\n  }\n\n  > .dropdown .dropdown-menu {\n    top: auto;\n    left: auto;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li {\n      display: table-cell;\n      width: 1%;\n      > a {\n        margin-bottom: 0;\n      }\n    }\n  }\n}\n\n// Move borders to anchors instead of bottom of list\n//\n// Mixin for adding on top the shared `.nav-justified` styles for our tabs\n.nav-tabs-justified {\n  border-bottom: 0;\n\n  > li > a {\n    // Override margin from .nav-tabs\n    margin-right: 0;\n    border-radius: @border-radius-base;\n  }\n\n  > .active > a,\n  > .active > a:hover,\n  > .active > a:focus {\n    border: 1px solid @nav-tabs-justified-link-border-color;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li > a {\n      border-bottom: 1px solid @nav-tabs-justified-link-border-color;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n    }\n    > .active > a,\n    > .active > a:hover,\n    > .active > a:focus {\n      border-bottom-color: @nav-tabs-justified-active-link-border-color;\n    }\n  }\n}\n\n\n// Tabbable tabs\n// -------------------------\n\n// Hide tabbable panes to start, show them when `.active`\n.tab-content {\n  > .tab-pane {\n    display: none;\n  }\n  > .active {\n    display: block;\n  }\n}\n\n\n// Dropdowns\n// -------------------------\n\n// Specific dropdowns\n.nav-tabs .dropdown-menu {\n  // make dropdown border overlap tab border\n  margin-top: -1px;\n  // Remove the top rounded corners here since there is a hard edge above the menu\n  .border-top-radius(0);\n}\n","//\n// Navbars\n// --------------------------------------------------\n\n\n// Wrapper and base class\n//\n// Provide a static navbar from which we expand to create full-width, fixed, and\n// other navbar variations.\n\n.navbar {\n  position: relative;\n  min-height: @navbar-height; // Ensure a navbar always shows (e.g., without a .navbar-brand in collapsed mode)\n  margin-bottom: @navbar-margin-bottom;\n  border: 1px solid transparent;\n\n  // Prevent floats from breaking the navbar\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: @navbar-border-radius;\n  }\n}\n\n\n// Navbar heading\n//\n// Groups `.navbar-brand` and `.navbar-toggle` into a single component for easy\n// styling of responsive aspects.\n\n.navbar-header {\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n  }\n}\n\n\n// Navbar collapse (body)\n//\n// Group your navbar content into this for easy collapsing and expanding across\n// various device sizes. By default, this content is collapsed when <768px, but\n// will expand past that for a horizontal display.\n//\n// To start (on mobile devices) the navbar links, forms, and buttons are stacked\n// vertically and include a `max-height` to overflow in case you have too much\n// content for the user's viewport.\n\n.navbar-collapse {\n  max-height: @navbar-collapse-max-height;\n  overflow-x: visible;\n  padding-right: @navbar-padding-horizontal;\n  padding-left:  @navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255,255,255,.1);\n  &:extend(.clearfix all);\n  -webkit-overflow-scrolling: touch;\n\n  &.in {\n    overflow-y: auto;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n\n    &.collapse {\n      display: block !important;\n      height: auto !important;\n      padding-bottom: 0; // Override default setting\n      overflow: visible !important;\n    }\n\n    &.in {\n      overflow-y: visible;\n    }\n\n    // Undo the collapse side padding for navbars with containers to ensure\n    // alignment of right-aligned contents.\n    .navbar-fixed-top &,\n    .navbar-static-top &,\n    .navbar-fixed-bottom & {\n      padding-left: 0;\n      padding-right: 0;\n    }\n  }\n}\n\n\n// Both navbar header and collapse\n//\n// When a container is present, change the behavior of the header and collapse.\n\n.container,\n.container-fluid {\n  > .navbar-header,\n  > .navbar-collapse {\n    margin-right: -@navbar-padding-horizontal;\n    margin-left:  -@navbar-padding-horizontal;\n\n    @media (min-width: @grid-float-breakpoint) {\n      margin-right: 0;\n      margin-left:  0;\n    }\n  }\n}\n\n\n//\n// Navbar alignment options\n//\n// Display the navbar across the entirety of the page or fixed it to the top or\n// bottom of the page.\n\n// Static top (unfixed, but 100% wide) navbar\n.navbar-static-top {\n  z-index: @zindex-navbar;\n  border-width: 0 0 1px;\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n// Fix the top/bottom navbars when screen real estate supports it\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: @zindex-navbar-fixed;\n\n  // Undo the rounded corners\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0; // override .navbar defaults\n  border-width: 1px 0 0;\n}\n\n\n// Brand/project name\n\n.navbar-brand {\n  float: left;\n  padding: @navbar-padding-vertical @navbar-padding-horizontal;\n  font-size: @font-size-large;\n  line-height: @line-height-computed;\n  height: @line-height-computed;\n\n  &:hover,\n  &:focus {\n    text-decoration: none;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    .navbar > .container &,\n    .navbar > .container-fluid & {\n      margin-left: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Navbar toggle\n//\n// Custom button for toggling the `.navbar-collapse`, powered by the collapse\n// JavaScript plugin.\n\n.navbar-toggle {\n  position: relative;\n  float: right;\n  margin-right: @navbar-padding-horizontal;\n  padding: 9px 10px;\n  .navbar-vertical-align(34px);\n  background-color: transparent;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  border-radius: @border-radius-base;\n\n  // We remove the `outline` here, but later compensate by attaching `:hover`\n  // styles to `:focus`.\n  &:focus {\n    outline: none;\n  }\n\n  // Bars\n  .icon-bar {\n    display: block;\n    width: 22px;\n    height: 2px;\n    border-radius: 1px;\n  }\n  .icon-bar + .icon-bar {\n    margin-top: 4px;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    display: none;\n  }\n}\n\n\n// Navbar nav links\n//\n// Builds on top of the `.nav` components with its own modifier class to make\n// the nav the full height of the horizontal nav (above 768px).\n\n.navbar-nav {\n  margin: (@navbar-padding-vertical / 2) -@navbar-padding-horizontal;\n\n  > li > a {\n    padding-top:    10px;\n    padding-bottom: 10px;\n    line-height: @line-height-computed;\n  }\n\n  @media (max-width: @grid-float-breakpoint-max) {\n    // Dropdowns get custom display when collapsed\n    .open .dropdown-menu {\n      position: static;\n      float: none;\n      width: auto;\n      margin-top: 0;\n      background-color: transparent;\n      border: 0;\n      box-shadow: none;\n      > li > a,\n      .dropdown-header {\n        padding: 5px 15px 5px 25px;\n      }\n      > li > a {\n        line-height: @line-height-computed;\n        &:hover,\n        &:focus {\n          background-image: none;\n        }\n      }\n    }\n  }\n\n  // Uncollapse the nav\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin: 0;\n\n    > li {\n      float: left;\n      > a {\n        padding-top:    @navbar-padding-vertical;\n        padding-bottom: @navbar-padding-vertical;\n      }\n    }\n\n    &.navbar-right:last-child {\n      margin-right: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Component alignment\n//\n// Repurpose the pull utilities as their own navbar utilities to avoid specificity\n// issues with parents and chaining. Only do this when the navbar is uncollapsed\n// though so that navbar contents properly stack and align in mobile.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-left  { .pull-left(); }\n  .navbar-right { .pull-right(); }\n}\n\n\n// Navbar form\n//\n// Extension of the `.form-inline` with some extra flavor for optimum display in\n// our navbars.\n\n.navbar-form {\n  margin-left: -@navbar-padding-horizontal;\n  margin-right: -@navbar-padding-horizontal;\n  padding: 10px @navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  @shadow: inset 0 1px 0 rgba(255,255,255,.1), 0 1px 0 rgba(255,255,255,.1);\n  .box-shadow(@shadow);\n\n  // Mixin behavior for optimum display\n  .form-inline();\n\n  .form-group {\n    @media (max-width: @grid-float-breakpoint-max) {\n      margin-bottom: 5px;\n    }\n  }\n\n  // Vertically center in expanded, horizontal navbar\n  .navbar-vertical-align(@input-height-base);\n\n  // Undo 100% width for pull classes\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border: 0;\n    margin-left: 0;\n    margin-right: 0;\n    padding-top: 0;\n    padding-bottom: 0;\n    .box-shadow(none);\n\n    // Outdent the form if last child to line up with content down the page\n    &.navbar-right:last-child {\n      margin-right: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Dropdown menus\n\n// Menu position and menu carets\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  .border-top-radius(0);\n}\n// Menu position and menu caret support for dropups via extra dropup class\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  .border-bottom-radius(0);\n}\n\n\n// Buttons in navbars\n//\n// Vertically center a button within a navbar (when *not* in a form).\n\n.navbar-btn {\n  .navbar-vertical-align(@input-height-base);\n\n  &.btn-sm {\n    .navbar-vertical-align(@input-height-small);\n  }\n  &.btn-xs {\n    .navbar-vertical-align(22);\n  }\n}\n\n\n// Text in navbars\n//\n// Add a class to make any element properly align itself vertically within the navbars.\n\n.navbar-text {\n  .navbar-vertical-align(@line-height-computed);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin-left: @navbar-padding-horizontal;\n    margin-right: @navbar-padding-horizontal;\n\n    // Outdent the form if last child to line up with content down the page\n    &.navbar-right:last-child {\n      margin-right: 0;\n    }\n  }\n}\n\n// Alternate navbars\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  background-color: @navbar-default-bg;\n  border-color: @navbar-default-border;\n\n  .navbar-brand {\n    color: @navbar-default-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-brand-hover-color;\n      background-color: @navbar-default-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-default-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-default-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-hover-color;\n        background-color: @navbar-default-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n        background-color: @navbar-default-link-disabled-bg;\n      }\n    }\n  }\n\n  .navbar-toggle {\n    border-color: @navbar-default-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-default-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-default-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: @navbar-default-border;\n  }\n\n  // Dropdown menu items\n  .navbar-nav {\n    // Remove background color from open dropdown\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        background-color: @navbar-default-link-active-bg;\n        color: @navbar-default-link-active-color;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display when collapsed\n      .open .dropdown-menu {\n        > li > a {\n          color: @navbar-default-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-hover-color;\n            background-color: @navbar-default-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-active-color;\n            background-color: @navbar-default-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-disabled-color;\n            background-color: @navbar-default-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n\n  // Links in navbars\n  //\n  // Add a class to ensure links outside the navbar nav are colored correctly.\n\n  .navbar-link {\n    color: @navbar-default-link-color;\n    &:hover {\n      color: @navbar-default-link-hover-color;\n    }\n  }\n\n}\n\n// Inverse navbar\n\n.navbar-inverse {\n  background-color: @navbar-inverse-bg;\n  border-color: @navbar-inverse-border;\n\n  .navbar-brand {\n    color: @navbar-inverse-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-brand-hover-color;\n      background-color: @navbar-inverse-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-inverse-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-inverse-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-hover-color;\n        background-color: @navbar-inverse-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n        background-color: @navbar-inverse-link-disabled-bg;\n      }\n    }\n  }\n\n  // Darken the responsive nav toggle\n  .navbar-toggle {\n    border-color: @navbar-inverse-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-inverse-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-inverse-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: darken(@navbar-inverse-bg, 7%);\n  }\n\n  // Dropdowns\n  .navbar-nav {\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        background-color: @navbar-inverse-link-active-bg;\n        color: @navbar-inverse-link-active-color;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display\n      .open .dropdown-menu {\n        > .dropdown-header {\n          border-color: @navbar-inverse-border;\n        }\n        .divider {\n          background-color: @navbar-inverse-border;\n        }\n        > li > a {\n          color: @navbar-inverse-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-hover-color;\n            background-color: @navbar-inverse-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-active-color;\n            background-color: @navbar-inverse-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-disabled-color;\n            background-color: @navbar-inverse-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  .navbar-link {\n    color: @navbar-inverse-link-color;\n    &:hover {\n      color: @navbar-inverse-link-hover-color;\n    }\n  }\n\n}\n","//\n// Utility classes\n// --------------------------------------------------\n\n\n// Floats\n// -------------------------\n\n.clearfix {\n  .clearfix();\n}\n.center-block {\n  .center-block();\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n\n\n// Toggling content\n// -------------------------\n\n// Note: Deprecated .hide in favor of .hidden or .sr-only (as appropriate) in v3.0.1\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  .text-hide();\n}\n\n\n// Hide from screenreaders and browsers\n//\n// Credit: HTML5 Boilerplate\n\n.hidden {\n  display: none !important;\n  visibility: hidden !important;\n}\n\n\n// For Affix plugin\n// -------------------------\n\n.affix {\n  position: fixed;\n}\n","//\n// Breadcrumbs\n// --------------------------------------------------\n\n\n.breadcrumb {\n  padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;\n  margin-bottom: @line-height-computed;\n  list-style: none;\n  background-color: @breadcrumb-bg;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline-block;\n\n    + li:before {\n      content: \"@{breadcrumb-separator}\\00a0\"; // Unicode space added since inline-block means non-collapsing white-space\n      padding: 0 5px;\n      color: @breadcrumb-color;\n    }\n  }\n\n  > .active {\n    color: @breadcrumb-active-color;\n  }\n}\n","//\n// Pagination (multiple pages)\n// --------------------------------------------------\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline; // Remove list-style and block-level defaults\n    > a,\n    > span {\n      position: relative;\n      float: left; // Collapse white-space\n      padding: @padding-base-vertical @padding-base-horizontal;\n      line-height: @line-height-base;\n      text-decoration: none;\n      color: @pagination-color;\n      background-color: @pagination-bg;\n      border: 1px solid @pagination-border;\n      margin-left: -1px;\n    }\n    &:first-child {\n      > a,\n      > span {\n        margin-left: 0;\n        .border-left-radius(@border-radius-base);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius-base);\n      }\n    }\n  }\n\n  > li > a,\n  > li > span {\n    &:hover,\n    &:focus {\n      color: @pagination-hover-color;\n      background-color: @pagination-hover-bg;\n      border-color: @pagination-hover-border;\n    }\n  }\n\n  > .active > a,\n  > .active > span {\n    &,\n    &:hover,\n    &:focus {\n      z-index: 2;\n      color: @pagination-active-color;\n      background-color: @pagination-active-bg;\n      border-color: @pagination-active-border;\n      cursor: default;\n    }\n  }\n\n  > .disabled {\n    > span,\n    > span:hover,\n    > span:focus,\n    > a,\n    > a:hover,\n    > a:focus {\n      color: @pagination-disabled-color;\n      background-color: @pagination-disabled-bg;\n      border-color: @pagination-disabled-border;\n      cursor: not-allowed;\n    }\n  }\n}\n\n// Sizing\n// --------------------------------------------------\n\n// Large\n.pagination-lg {\n  .pagination-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @border-radius-large);\n}\n\n// Small\n.pagination-sm {\n  .pagination-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @border-radius-small);\n}\n","//\n// Pager pagination\n// --------------------------------------------------\n\n\n.pager {\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  list-style: none;\n  text-align: center;\n  &:extend(.clearfix all);\n  li {\n    display: inline;\n    > a,\n    > span {\n      display: inline-block;\n      padding: 5px 14px;\n      background-color: @pager-bg;\n      border: 1px solid @pager-border;\n      border-radius: @pager-border-radius;\n    }\n\n    > a:hover,\n    > a:focus {\n      text-decoration: none;\n      background-color: @pager-hover-bg;\n    }\n  }\n\n  .next {\n    > a,\n    > span {\n      float: right;\n    }\n  }\n\n  .previous {\n    > a,\n    > span {\n      float: left;\n    }\n  }\n\n  .disabled {\n    > a,\n    > a:hover,\n    > a:focus,\n    > span {\n      color: @pager-disabled-color;\n      background-color: @pager-bg;\n      cursor: not-allowed;\n    }\n  }\n\n}\n","//\n// Labels\n// --------------------------------------------------\n\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: bold;\n  line-height: 1;\n  color: @label-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n\n  // Add hover effects, but only for links\n  &[href] {\n    &:hover,\n    &:focus {\n      color: @label-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Empty labels collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for labels in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n}\n\n// Colors\n// Contextual variations (linked labels get darker on :hover)\n\n.label-default {\n  .label-variant(@label-default-bg);\n}\n\n.label-primary {\n  .label-variant(@label-primary-bg);\n}\n\n.label-success {\n  .label-variant(@label-success-bg);\n}\n\n.label-info {\n  .label-variant(@label-info-bg);\n}\n\n.label-warning {\n  .label-variant(@label-warning-bg);\n}\n\n.label-danger {\n  .label-variant(@label-danger-bg);\n}\n","//\n// Badges\n// --------------------------------------------------\n\n\n// Base classes\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: @font-size-small;\n  font-weight: @badge-font-weight;\n  color: @badge-color;\n  line-height: @badge-line-height;\n  vertical-align: baseline;\n  white-space: nowrap;\n  text-align: center;\n  background-color: @badge-bg;\n  border-radius: @badge-border-radius;\n\n  // Empty badges collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for badges in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n  .btn-xs & {\n    top: 0;\n    padding: 1px 5px;\n  }\n}\n\n// Hover state, but only for links\na.badge {\n  &:hover,\n  &:focus {\n    color: @badge-link-hover-color;\n    text-decoration: none;\n    cursor: pointer;\n  }\n}\n\n// Account for counters in navs\na.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: @badge-active-color;\n  background-color: @badge-active-bg;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n","//\n// Jumbotron\n// --------------------------------------------------\n\n\n.jumbotron {\n  padding: @jumbotron-padding;\n  margin-bottom: @jumbotron-padding;\n  color: @jumbotron-color;\n  background-color: @jumbotron-bg;\n\n  h1,\n  .h1 {\n    color: @jumbotron-heading-color;\n  }\n  p {\n    margin-bottom: (@jumbotron-padding / 2);\n    font-size: @jumbotron-font-size;\n    font-weight: 200;\n  }\n\n  .container & {\n    border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container\n  }\n\n  .container {\n    max-width: 100%;\n  }\n\n  @media screen and (min-width: @screen-sm-min) {\n    padding-top:    (@jumbotron-padding * 1.6);\n    padding-bottom: (@jumbotron-padding * 1.6);\n\n    .container & {\n      padding-left:  (@jumbotron-padding * 2);\n      padding-right: (@jumbotron-padding * 2);\n    }\n\n    h1,\n    .h1 {\n      font-size: (@font-size-base * 4.5);\n    }\n  }\n}\n","//\n// Thumbnails\n// --------------------------------------------------\n\n\n// Mixin and adjust the regular image class\n.thumbnail {\n  display: block;\n  padding: @thumbnail-padding;\n  margin-bottom: @line-height-computed;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  > img,\n  a > img {\n    .img-responsive();\n    margin-left: auto;\n    margin-right: auto;\n  }\n\n  // Add a hover state for linked versions only\n  a&:hover,\n  a&:focus,\n  a&.active {\n    border-color: @link-color;\n  }\n\n  // Image captions\n  .caption {\n    padding: @thumbnail-caption-padding;\n    color: @thumbnail-caption-color;\n  }\n}\n","//\n// Alerts\n// --------------------------------------------------\n\n\n// Base styles\n// -------------------------\n\n.alert {\n  padding: @alert-padding;\n  margin-bottom: @line-height-computed;\n  border: 1px solid transparent;\n  border-radius: @alert-border-radius;\n\n  // Headings for larger alerts\n  h4 {\n    margin-top: 0;\n    // Specified for the h4 to prevent conflicts of changing @headings-color\n    color: inherit;\n  }\n  // Provide class for links that match alerts\n  .alert-link {\n    font-weight: @alert-link-font-weight;\n  }\n\n  // Improve alignment and spacing of inner content\n  > p,\n  > ul {\n    margin-bottom: 0;\n  }\n  > p + p {\n    margin-top: 5px;\n  }\n}\n\n// Dismissable alerts\n//\n// Expand the right padding and account for the close button's positioning.\n\n.alert-dismissable {\n padding-right: (@alert-padding + 20);\n\n  // Adjust close link position\n  .close {\n    position: relative;\n    top: -2px;\n    right: -21px;\n    color: inherit;\n  }\n}\n\n// Alternate styles\n//\n// Generate contextual modifier classes for colorizing the alert.\n\n.alert-success {\n  .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);\n}\n.alert-info {\n  .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);\n}\n.alert-warning {\n  .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);\n}\n.alert-danger {\n  .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);\n}\n","//\n// Progress bars\n// --------------------------------------------------\n\n\n// Bar animations\n// -------------------------\n\n// WebKit\n@-webkit-keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n// Spec and IE10+\n@keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n\n\n// Bar itself\n// -------------------------\n\n// Outer container\n.progress {\n  overflow: hidden;\n  height: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  background-color: @progress-bg;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 2px rgba(0,0,0,.1));\n}\n\n// Bar of progress\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: @font-size-small;\n  line-height: @line-height-computed;\n  color: @progress-bar-color;\n  text-align: center;\n  background-color: @progress-bar-bg;\n  .box-shadow(inset 0 -1px 0 rgba(0,0,0,.15));\n  .transition(width .6s ease);\n}\n\n// Striped bars\n.progress-striped .progress-bar {\n  #gradient > .striped();\n  background-size: 40px 40px;\n}\n\n// Call animation for the active one\n.progress.active .progress-bar {\n  .animation(progress-bar-stripes 2s linear infinite);\n}\n\n\n\n// Variations\n// -------------------------\n\n.progress-bar-success {\n  .progress-bar-variant(@progress-bar-success-bg);\n}\n\n.progress-bar-info {\n  .progress-bar-variant(@progress-bar-info-bg);\n}\n\n.progress-bar-warning {\n  .progress-bar-variant(@progress-bar-warning-bg);\n}\n\n.progress-bar-danger {\n  .progress-bar-variant(@progress-bar-danger-bg);\n}\n","// Media objects\n// Source: http://stubbornella.org/content/?p=497\n// --------------------------------------------------\n\n\n// Common styles\n// -------------------------\n\n// Clear the floats\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n\n// Proper spacing between instances of .media\n.media,\n.media .media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n\n// For images and videos, set to block\n.media-object {\n  display: block;\n}\n\n// Reset margins on headings for tighter default spacing\n.media-heading {\n  margin: 0 0 5px;\n}\n\n\n// Media image alignment\n// -------------------------\n\n.media {\n  > .pull-left {\n    margin-right: 10px;\n  }\n  > .pull-right {\n    margin-left: 10px;\n  }\n}\n\n\n// Media list variation\n// -------------------------\n\n// Undo default ul/ol styles\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n","//\n// List groups\n// --------------------------------------------------\n\n\n// Base class\n//\n// Easily usable on <ul>, <ol>, or <div>.\n\n.list-group {\n  // No need to set list-style: none; since .list-group-item is block level\n  margin-bottom: 20px;\n  padding-left: 0; // reset padding because ul and ol\n}\n\n\n// Individual list items\n//\n// Use on `li`s or `div`s within the `.list-group` parent.\n\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  // Place the border on the list items and negative margin up for better styling\n  margin-bottom: -1px;\n  background-color: @list-group-bg;\n  border: 1px solid @list-group-border;\n\n  // Round the first and last items\n  &:first-child {\n    .border-top-radius(@list-group-border-radius);\n  }\n  &:last-child {\n    margin-bottom: 0;\n    .border-bottom-radius(@list-group-border-radius);\n  }\n\n  // Align badges within list items\n  > .badge {\n    float: right;\n  }\n  > .badge + .badge {\n    margin-right: 5px;\n  }\n}\n\n\n// Linked list items\n//\n// Use anchor elements instead of `li`s or `div`s to create linked list items.\n// Includes an extra `.active` modifier class for showing selected items.\n\na.list-group-item {\n  color: @list-group-link-color;\n\n  .list-group-item-heading {\n    color: @list-group-link-heading-color;\n  }\n\n  // Hover state\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    background-color: @list-group-hover-bg;\n  }\n\n  // Active class on item itself, not parent\n  &.active,\n  &.active:hover,\n  &.active:focus {\n    z-index: 2; // Place active items above their siblings for proper border styling\n    color: @list-group-active-color;\n    background-color: @list-group-active-bg;\n    border-color: @list-group-active-border;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-active-text-color;\n    }\n  }\n}\n\n\n// Contextual variants\n//\n// Add modifier classes to change text and background color on individual items.\n// Organizationally, this must come after the `:hover` states.\n\n.list-group-item-variant(success; @state-success-bg; @state-success-text);\n.list-group-item-variant(info; @state-info-bg; @state-info-text);\n.list-group-item-variant(warning; @state-warning-bg; @state-warning-text);\n.list-group-item-variant(danger; @state-danger-bg; @state-danger-text);\n\n\n// Custom content options\n//\n// Extra classes for creating well-formatted content within `.list-group-item`s.\n\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n","//\n// Panels\n// --------------------------------------------------\n\n\n// Base class\n.panel {\n  margin-bottom: @line-height-computed;\n  background-color: @panel-bg;\n  border: 1px solid transparent;\n  border-radius: @panel-border-radius;\n  .box-shadow(0 1px 1px rgba(0,0,0,.05));\n}\n\n// Panel contents\n.panel-body {\n  padding: @panel-body-padding;\n  &:extend(.clearfix all);\n}\n\n\n// List groups in panels\n//\n// By default, space out list group content from panel headings to account for\n// any kind of custom content between the two.\n\n.panel {\n  > .list-group {\n    margin-bottom: 0;\n    .list-group-item {\n      border-width: 1px 0;\n      border-radius: 0;\n      &:first-child {\n        border-top: 0;\n      }\n      &:last-child {\n        border-bottom: 0;\n      }\n    }\n    // Add border top radius for first one\n    &:first-child {\n      .list-group-item:first-child {\n        .border-top-radius((@panel-border-radius - 1));\n      }\n    }\n    // Add border bottom radius for last one\n    &:last-child {\n      .list-group-item:last-child {\n        .border-bottom-radius((@panel-border-radius - 1));\n      }\n    }\n  }\n}\n// Collapse space between when there's no additional content.\n.panel-heading + .list-group {\n  .list-group-item:first-child {\n    border-top-width: 0;\n  }\n}\n\n\n// Tables in panels\n//\n// Place a non-bordered `.table` within a panel (not within a `.panel-body`) and\n// watch it go full width.\n\n.panel {\n  > .table,\n  > .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  // Add border top radius for first one\n  > .table:first-child,\n  > .table-responsive:first-child > .table:first-child {\n    > thead:first-child,\n    > tbody:first-child {\n      > tr:first-child {\n        td:first-child,\n        th:first-child {\n          border-top-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-top-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  // Add border bottom radius for last one\n  > .table:last-child,\n  > .table-responsive:last-child > .table:last-child {\n    > tbody:last-child,\n    > tfoot:last-child {\n      > tr:last-child {\n        td:first-child,\n        th:first-child {\n          border-bottom-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-bottom-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  > .panel-body + .table,\n  > .panel-body + .table-responsive {\n    border-top: 1px solid @table-border-color;\n  }\n  > .table > tbody:first-child > tr:first-child th,\n  > .table > tbody:first-child > tr:first-child td {\n    border-top: 0;\n  }\n  > .table-bordered,\n  > .table-responsive > .table-bordered {\n    border: 0;\n    > thead,\n    > tbody,\n    > tfoot {\n      > tr {\n        > th:first-child,\n        > td:first-child {\n          border-left: 0;\n        }\n        > th:last-child,\n        > td:last-child {\n          border-right: 0;\n        }\n        &:first-child > th,\n        &:first-child > td {\n          border-top: 0;\n        }\n        &:last-child > th,\n        &:last-child > td {\n          border-bottom: 0;\n        }\n      }\n    }\n  }\n  > .table-responsive {\n    border: 0;\n    margin-bottom: 0;\n  }\n}\n\n\n// Optional heading\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  .border-top-radius((@panel-border-radius - 1));\n\n  > .dropdown .dropdown-toggle {\n    color: inherit;\n  }\n}\n\n// Within heading, strip any `h*` tag of its default margins for spacing.\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: ceil((@font-size-base * 1.125));\n  color: inherit;\n\n  > a {\n    color: inherit;\n  }\n}\n\n// Optional footer (stays gray in every modifier class)\n.panel-footer {\n  padding: 10px 15px;\n  background-color: @panel-footer-bg;\n  border-top: 1px solid @panel-inner-border;\n  .border-bottom-radius((@panel-border-radius - 1));\n}\n\n\n// Collapsable panels (aka, accordion)\n//\n// Wrap a series of panels in `.panel-group` to turn them into an accordion with\n// the help of our collapse JavaScript plugin.\n\n.panel-group {\n  margin-bottom: @line-height-computed;\n\n  // Tighten up margin so it's only between panels\n  .panel {\n    margin-bottom: 0;\n    border-radius: @panel-border-radius;\n    overflow: hidden; // crop contents when collapsed\n    + .panel {\n      margin-top: 5px;\n    }\n  }\n\n  .panel-heading {\n    border-bottom: 0;\n    + .panel-collapse .panel-body {\n      border-top: 1px solid @panel-inner-border;\n    }\n  }\n  .panel-footer {\n    border-top: 0;\n    + .panel-collapse .panel-body {\n      border-bottom: 1px solid @panel-inner-border;\n    }\n  }\n}\n\n\n// Contextual variations\n.panel-default {\n  .panel-variant(@panel-default-border; @panel-default-text; @panel-default-heading-bg; @panel-default-border);\n}\n.panel-primary {\n  .panel-variant(@panel-primary-border; @panel-primary-text; @panel-primary-heading-bg; @panel-primary-border);\n}\n.panel-success {\n  .panel-variant(@panel-success-border; @panel-success-text; @panel-success-heading-bg; @panel-success-border);\n}\n.panel-info {\n  .panel-variant(@panel-info-border; @panel-info-text; @panel-info-heading-bg; @panel-info-border);\n}\n.panel-warning {\n  .panel-variant(@panel-warning-border; @panel-warning-text; @panel-warning-heading-bg; @panel-warning-border);\n}\n.panel-danger {\n  .panel-variant(@panel-danger-border; @panel-danger-text; @panel-danger-heading-bg; @panel-danger-border);\n}\n","//\n// Wells\n// --------------------------------------------------\n\n\n// Base class\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: @well-bg;\n  border: 1px solid @well-border;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 1px rgba(0,0,0,.05));\n  blockquote {\n    border-color: #ddd;\n    border-color: rgba(0,0,0,.15);\n  }\n}\n\n// Sizes\n.well-lg {\n  padding: 24px;\n  border-radius: @border-radius-large;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: @border-radius-small;\n}\n","//\n// Close icons\n// --------------------------------------------------\n\n\n.close {\n  float: right;\n  font-size: (@font-size-base * 1.5);\n  font-weight: @close-font-weight;\n  line-height: 1;\n  color: @close-color;\n  text-shadow: @close-text-shadow;\n  .opacity(.2);\n\n  &:hover,\n  &:focus {\n    color: @close-color;\n    text-decoration: none;\n    cursor: pointer;\n    .opacity(.5);\n  }\n\n  // Additional properties for button version\n  // iOS requires the button element instead of an anchor tag.\n  // If you want the anchor version, it requires `href=\"#\"`.\n  button& {\n    padding: 0;\n    cursor: pointer;\n    background: transparent;\n    border: 0;\n    -webkit-appearance: none;\n  }\n}\n","//\n// Modals\n// --------------------------------------------------\n\n// .modal-open      - body class for killing the scroll\n// .modal           - container to scroll within\n// .modal-dialog    - positioning shell for the actual modal\n// .modal-content   - actual modal w/ bg and corners and shit\n\n// Kill the scroll on the body\n.modal-open {\n  overflow: hidden;\n}\n\n// Container that the modal scrolls within\n.modal {\n  display: none;\n  overflow: auto;\n  overflow-y: scroll;\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal;\n  -webkit-overflow-scrolling: touch;\n\n  // Prevent Chrome on Windows from adding a focus outline. For details, see\n  // https://github.com/twbs/bootstrap/pull/10951.\n  outline: 0;\n\n  // When fading in the modal, animate it to slide down\n  &.fade .modal-dialog {\n    .translate(0, -25%);\n    .transition-transform(~\"0.3s ease-out\");\n  }\n  &.in .modal-dialog { .translate(0, 0)}\n}\n\n// Shell div to position the modal with bottom padding\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n\n// Actual modal\n.modal-content {\n  position: relative;\n  background-color: @modal-content-bg;\n  border: 1px solid @modal-content-fallback-border-color; //old browsers fallback (ie8 etc)\n  border: 1px solid @modal-content-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 3px 9px rgba(0,0,0,.5));\n  background-clip: padding-box;\n  // Remove focus outline from opened modal\n  outline: none;\n}\n\n// Modal background\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal-background;\n  background-color: @modal-backdrop-bg;\n  // Fade for backdrop\n  &.fade { .opacity(0); }\n  &.in { .opacity(@modal-backdrop-opacity); }\n}\n\n// Modal header\n// Top section of the modal w/ title and dismiss\n.modal-header {\n  padding: @modal-title-padding;\n  border-bottom: 1px solid @modal-header-border-color;\n  min-height: (@modal-title-padding + @modal-title-line-height);\n}\n// Close icon\n.modal-header .close {\n  margin-top: -2px;\n}\n\n// Title text within header\n.modal-title {\n  margin: 0;\n  line-height: @modal-title-line-height;\n}\n\n// Modal body\n// Where all modal content resides (sibling of .modal-header and .modal-footer)\n.modal-body {\n  position: relative;\n  padding: @modal-inner-padding;\n}\n\n// Footer (for actions)\n.modal-footer {\n  margin-top: 15px;\n  padding: (@modal-inner-padding - 1) @modal-inner-padding @modal-inner-padding;\n  text-align: right; // right align buttons\n  border-top: 1px solid @modal-footer-border-color;\n  &:extend(.clearfix all); // clear it in case folks use .pull-* classes on buttons\n\n  // Properly space out buttons\n  .btn + .btn {\n    margin-left: 5px;\n    margin-bottom: 0; // account for input[type=\"submit\"] which gets the bottom margin like all other inputs\n  }\n  // but override that for button groups\n  .btn-group .btn + .btn {\n    margin-left: -1px;\n  }\n  // and override it for block buttons as well\n  .btn-block + .btn-block {\n    margin-left: 0;\n  }\n}\n\n// Scale up the modal\n@media (min-width: @screen-sm-min) {\n\n  // Automatically set modal's width for larger viewports\n  .modal-dialog {\n    width: @modal-md;\n    margin: 30px auto;\n  }\n  .modal-content {\n    .box-shadow(0 5px 15px rgba(0,0,0,.5));\n  }\n\n  // Modal sizes\n  .modal-sm { width: @modal-sm; }\n  .modal-lg { width: @modal-lg; }\n\n}\n","//\n// Tooltips\n// --------------------------------------------------\n\n\n// Base class\n.tooltip {\n  position: absolute;\n  z-index: @zindex-tooltip;\n  display: block;\n  visibility: visible;\n  font-size: @font-size-small;\n  line-height: 1.4;\n  .opacity(0);\n\n  &.in     { .opacity(@tooltip-opacity); }\n  &.top    { margin-top:  -3px; padding: @tooltip-arrow-width 0; }\n  &.right  { margin-left:  3px; padding: 0 @tooltip-arrow-width; }\n  &.bottom { margin-top:   3px; padding: @tooltip-arrow-width 0; }\n  &.left   { margin-left: -3px; padding: 0 @tooltip-arrow-width; }\n}\n\n// Wrapper for the tooltip content\n.tooltip-inner {\n  max-width: @tooltip-max-width;\n  padding: 3px 8px;\n  color: @tooltip-color;\n  text-align: center;\n  text-decoration: none;\n  background-color: @tooltip-bg;\n  border-radius: @border-radius-base;\n}\n\n// Arrows\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.tooltip {\n  &.top .tooltip-arrow {\n    bottom: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-left .tooltip-arrow {\n    bottom: 0;\n    left: @tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-right .tooltip-arrow {\n    bottom: 0;\n    right: @tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.right .tooltip-arrow {\n    top: 50%;\n    left: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-right-color: @tooltip-arrow-color;\n  }\n  &.left .tooltip-arrow {\n    top: 50%;\n    right: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-left-color: @tooltip-arrow-color;\n  }\n  &.bottom .tooltip-arrow {\n    top: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-left .tooltip-arrow {\n    top: 0;\n    left: @tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-right .tooltip-arrow {\n    top: 0;\n    right: @tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n}\n","//\n// Popovers\n// --------------------------------------------------\n\n\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: @zindex-popover;\n  display: none;\n  max-width: @popover-max-width;\n  padding: 1px;\n  text-align: left; // Reset given new insertion method\n  background-color: @popover-bg;\n  background-clip: padding-box;\n  border: 1px solid @popover-fallback-border-color;\n  border: 1px solid @popover-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 5px 10px rgba(0,0,0,.2));\n\n  // Overrides for proper insertion\n  white-space: normal;\n\n  // Offset the popover to account for the popover arrow\n  &.top     { margin-top: -10px; }\n  &.right   { margin-left: 10px; }\n  &.bottom  { margin-top: 10px; }\n  &.left    { margin-left: -10px; }\n}\n\n.popover-title {\n  margin: 0; // reset heading margin\n  padding: 8px 14px;\n  font-size: @font-size-base;\n  font-weight: normal;\n  line-height: 18px;\n  background-color: @popover-title-bg;\n  border-bottom: 1px solid darken(@popover-title-bg, 5%);\n  border-radius: 5px 5px 0 0;\n}\n\n.popover-content {\n  padding: 9px 14px;\n}\n\n// Arrows\n//\n// .arrow is outer, .arrow:after is inner\n\n.popover .arrow {\n  &,\n  &:after {\n    position: absolute;\n    display: block;\n    width: 0;\n    height: 0;\n    border-color: transparent;\n    border-style: solid;\n  }\n}\n.popover .arrow {\n  border-width: @popover-arrow-outer-width;\n}\n.popover .arrow:after {\n  border-width: @popover-arrow-width;\n  content: \"\";\n}\n\n.popover {\n  &.top .arrow {\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-bottom-width: 0;\n    border-top-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-top-color: @popover-arrow-outer-color;\n    bottom: -@popover-arrow-outer-width;\n    &:after {\n      content: \" \";\n      bottom: 1px;\n      margin-left: -@popover-arrow-width;\n      border-bottom-width: 0;\n      border-top-color: @popover-arrow-color;\n    }\n  }\n  &.right .arrow {\n    top: 50%;\n    left: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-left-width: 0;\n    border-right-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-right-color: @popover-arrow-outer-color;\n    &:after {\n      content: \" \";\n      left: 1px;\n      bottom: -@popover-arrow-width;\n      border-left-width: 0;\n      border-right-color: @popover-arrow-color;\n    }\n  }\n  &.bottom .arrow {\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-width: 0;\n    border-bottom-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-bottom-color: @popover-arrow-outer-color;\n    top: -@popover-arrow-outer-width;\n    &:after {\n      content: \" \";\n      top: 1px;\n      margin-left: -@popover-arrow-width;\n      border-top-width: 0;\n      border-bottom-color: @popover-arrow-color;\n    }\n  }\n\n  &.left .arrow {\n    top: 50%;\n    right: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-width: 0;\n    border-left-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-left-color: @popover-arrow-outer-color;\n    &:after {\n      content: \" \";\n      right: 1px;\n      border-right-width: 0;\n      border-left-color: @popover-arrow-color;\n      bottom: -@popover-arrow-width;\n    }\n  }\n\n}\n","//\n// Carousel\n// --------------------------------------------------\n\n\n// Wrapper for the slide container and indicators\n.carousel {\n  position: relative;\n}\n\n.carousel-inner {\n  position: relative;\n  overflow: hidden;\n  width: 100%;\n\n  > .item {\n    display: none;\n    position: relative;\n    .transition(.6s ease-in-out left);\n\n    // Account for jankitude on images\n    > img,\n    > a > img {\n      .img-responsive();\n      line-height: 1;\n    }\n  }\n\n  > .active,\n  > .next,\n  > .prev { display: block; }\n\n  > .active {\n    left: 0;\n  }\n\n  > .next,\n  > .prev {\n    position: absolute;\n    top: 0;\n    width: 100%;\n  }\n\n  > .next {\n    left: 100%;\n  }\n  > .prev {\n    left: -100%;\n  }\n  > .next.left,\n  > .prev.right {\n    left: 0;\n  }\n\n  > .active.left {\n    left: -100%;\n  }\n  > .active.right {\n    left: 100%;\n  }\n\n}\n\n// Left/right controls for nav\n// ---------------------------\n\n.carousel-control {\n  position: absolute;\n  top: 0;\n  left: 0;\n  bottom: 0;\n  width: @carousel-control-width;\n  .opacity(@carousel-control-opacity);\n  font-size: @carousel-control-font-size;\n  color: @carousel-control-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  // We can't have this transition here because WebKit cancels the carousel\n  // animation if you trip this while in the middle of another animation.\n\n  // Set gradients for backgrounds\n  &.left {\n    #gradient > .horizontal(@start-color: rgba(0,0,0,.5); @end-color: rgba(0,0,0,.0001));\n  }\n  &.right {\n    left: auto;\n    right: 0;\n    #gradient > .horizontal(@start-color: rgba(0,0,0,.0001); @end-color: rgba(0,0,0,.5));\n  }\n\n  // Hover/focus state\n  &:hover,\n  &:focus {\n    outline: none;\n    color: @carousel-control-color;\n    text-decoration: none;\n    .opacity(.9);\n  }\n\n  // Toggles\n  .icon-prev,\n  .icon-next,\n  .glyphicon-chevron-left,\n  .glyphicon-chevron-right {\n    position: absolute;\n    top: 50%;\n    z-index: 5;\n    display: inline-block;\n  }\n  .icon-prev,\n  .glyphicon-chevron-left {\n    left: 50%;\n  }\n  .icon-next,\n  .glyphicon-chevron-right {\n    right: 50%;\n  }\n  .icon-prev,\n  .icon-next {\n    width:  20px;\n    height: 20px;\n    margin-top: -10px;\n    margin-left: -10px;\n    font-family: serif;\n  }\n\n  .icon-prev {\n    &:before {\n      content: '\\2039';// SINGLE LEFT-POINTING ANGLE QUOTATION MARK (U+2039)\n    }\n  }\n  .icon-next {\n    &:before {\n      content: '\\203a';// SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (U+203A)\n    }\n  }\n}\n\n// Optional indicator pips\n//\n// Add an unordered list with the following class and add a list item for each\n// slide your carousel holds.\n\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  margin-left: -30%;\n  padding-left: 0;\n  list-style: none;\n  text-align: center;\n\n  li {\n    display: inline-block;\n    width:  10px;\n    height: 10px;\n    margin: 1px;\n    text-indent: -999px;\n    border: 1px solid @carousel-indicator-border-color;\n    border-radius: 10px;\n    cursor: pointer;\n\n    // IE8-9 hack for event handling\n    //\n    // Internet Explorer 8-9 does not support clicks on elements without a set\n    // `background-color`. We cannot use `filter` since that's not viewed as a\n    // background color by the browser. Thus, a hack is needed.\n    //\n    // For IE8, we set solid black as it doesn't support `rgba()`. For IE9, we\n    // set alpha transparency for the best results possible.\n    background-color: #000 \\9; // IE8\n    background-color: rgba(0,0,0,0); // IE9\n  }\n  .active {\n    margin: 0;\n    width:  12px;\n    height: 12px;\n    background-color: @carousel-indicator-active-bg;\n  }\n}\n\n// Optional captions\n// -----------------------------\n// Hidden by default for smaller viewports\n.carousel-caption {\n  position: absolute;\n  left: 15%;\n  right: 15%;\n  bottom: 20px;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: @carousel-caption-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  & .btn {\n    text-shadow: none; // No shadow for button elements in carousel-caption\n  }\n}\n\n\n// Scale up controls for tablets and up\n@media screen and (min-width: @screen-sm-min) {\n\n  // Scale up the controls a smidge\n  .carousel-control {\n    .glyphicons-chevron-left,\n    .glyphicons-chevron-right,\n    .icon-prev,\n    .icon-next {\n      width: 30px;\n      height: 30px;\n      margin-top: -15px;\n      margin-left: -15px;\n      font-size: 30px;\n    }\n  }\n\n  // Show and left align the captions\n  .carousel-caption {\n    left: 20%;\n    right: 20%;\n    padding-bottom: 30px;\n  }\n\n  // Move up the indicators\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n","//\n// Responsive: Utility classes\n// --------------------------------------------------\n\n\n// IE10 in Windows (Phone) 8\n//\n// Support for responsive views via media queries is kind of borked in IE10, for\n// Surface/desktop in split view and for Windows Phone 8. This particular fix\n// must be accompanied by a snippet of JavaScript to sniff the user agent and\n// apply some conditional CSS to *only* the Surface/desktop Windows 8. Look at\n// our Getting Started page for more information on this bug.\n//\n// For more information, see the following:\n//\n// Issue: https://github.com/twbs/bootstrap/issues/10497\n// Docs: http://getbootstrap.com/getting-started/#browsers\n// Source: http://timkadlec.com/2012/10/ie10-snap-mode-and-responsive-design/\n\n@-ms-viewport {\n  width: device-width;\n}\n\n\n// Visibility utilities\n.visible-xs {\n  .responsive-invisibility();\n\n  @media (max-width: @screen-xs-max) {\n    .responsive-visibility();\n  }\n}\n.visible-sm {\n  .responsive-invisibility();\n\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-visibility();\n  }\n}\n.visible-md {\n  .responsive-invisibility();\n\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-visibility();\n  }\n}\n.visible-lg {\n  .responsive-invisibility();\n\n  @media (min-width: @screen-lg-min) {\n    .responsive-visibility();\n  }\n}\n\n.hidden-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-invisibility();\n  }\n}\n\n\n// Print utilities\n//\n// Media queries are placed on the inside to be mixin-friendly.\n\n.visible-print {\n  .responsive-invisibility();\n\n  @media print {\n    .responsive-visibility();\n  }\n}\n\n.hidden-print {\n  @media print {\n    .responsive-invisibility();\n  }\n}\n"]}
\ No newline at end of file
diff --git a/doc/css/bootstrap.min.css b/doc/css/bootstrap.min.css
new file mode 100644 (file)
index 0000000..381834e
--- /dev/null
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+/*! normalize.css v3.0.0 | MIT License | git.io/normalize */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background:0 0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}@media print{*{text-shadow:none!important;color:#000!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:before,:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);border:0}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small,h1 .small,h2 .small,h3 .small,h4 .small,h5 .small,h6 .small,.h1 .small,.h2 .small,.h3 .small,.h4 .small,.h5 .small,.h6 .small{font-weight:400;line-height:1;color:#999}h1,.h1,h2,.h2,h3,.h3{margin-top:20px;margin-bottom:10px}h1 small,.h1 small,h2 small,.h2 small,h3 small,.h3 small,h1 .small,.h1 .small,h2 .small,.h2 .small,h3 .small,.h3 .small{font-size:65%}h4,.h4,h5,.h5,h6,.h6{margin-top:10px;margin-bottom:10px}h4 small,.h4 small,h5 small,.h5 small,h6 small,.h6 small,h4 .small,.h4 .small,h5 .small,.h5 .small,h6 .small,.h6 .small{font-size:75%}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:200;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}small,.small{font-size:85%}cite{font-style:normal}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-muted{color:#999}.text-primary{color:#428bca}a.text-primary:hover{color:#3071a9}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#428bca}a.bg-primary:hover{background-color:#3071a9}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-left:5px;padding-right:5px}.list-inline>li:first-child{padding-left:0}dl{margin-top:0;margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;clear:left;text-align:right;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}blockquote footer,blockquote small,blockquote .small{display:block;font-size:80%;line-height:1.428571429;color:#999}blockquote footer:before,blockquote small:before,blockquote .small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0;text-align:right}.blockquote-reverse footer:before,blockquote.pull-right footer:before,.blockquote-reverse small:before,blockquote.pull-right small:before,.blockquote-reverse .small:before,blockquote.pull-right .small:before{content:''}.blockquote-reverse footer:after,blockquote.pull-right footer:after,.blockquote-reverse small:after,blockquote.pull-right small:after,.blockquote-reverse .small:after,blockquote.pull-right .small:after{content:'\00A0 \2014'}blockquote:before,blockquote:after{content:""}address{margin-bottom:20px;font-style:normal;line-height:1.428571429}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;white-space:nowrap;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;word-break:break-all;word-wrap:break-word;color:#333;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-xs-1,.col-sm-1,.col-md-1,.col-lg-1,.col-xs-2,.col-sm-2,.col-md-2,.col-lg-2,.col-xs-3,.col-sm-3,.col-md-3,.col-lg-3,.col-xs-4,.col-sm-4,.col-md-4,.col-lg-4,.col-xs-5,.col-sm-5,.col-md-5,.col-lg-5,.col-xs-6,.col-sm-6,.col-md-6,.col-lg-6,.col-xs-7,.col-sm-7,.col-md-7,.col-lg-7,.col-xs-8,.col-sm-8,.col-md-8,.col-lg-8,.col-xs-9,.col-sm-9,.col-md-9,.col-lg-9,.col-xs-10,.col-sm-10,.col-md-10,.col-lg-10,.col-xs-11,.col-sm-11,.col-md-11,.col-lg-11,.col-xs-12,.col-sm-12,.col-md-12,.col-lg-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666666666666%}.col-xs-10{width:83.33333333333334%}.col-xs-9{width:75%}.col-xs-8{width:66.66666666666666%}.col-xs-7{width:58.333333333333336%}.col-xs-6{width:50%}.col-xs-5{width:41.66666666666667%}.col-xs-4{width:33.33333333333333%}.col-xs-3{width:25%}.col-xs-2{width:16.666666666666664%}.col-xs-1{width:8.333333333333332%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666666666666%}.col-xs-pull-10{right:83.33333333333334%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666666666666%}.col-xs-pull-7{right:58.333333333333336%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666666666667%}.col-xs-pull-4{right:33.33333333333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.666666666666664%}.col-xs-pull-1{right:8.333333333333332%}.col-xs-pull-0{right:0}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666666666666%}.col-xs-push-10{left:83.33333333333334%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666666666666%}.col-xs-push-7{left:58.333333333333336%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666666666667%}.col-xs-push-4{left:33.33333333333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.666666666666664%}.col-xs-push-1{left:8.333333333333332%}.col-xs-push-0{left:0}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666666666666%}.col-xs-offset-10{margin-left:83.33333333333334%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666666666666%}.col-xs-offset-7{margin-left:58.333333333333336%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666666666667%}.col-xs-offset-4{margin-left:33.33333333333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.666666666666664%}.col-xs-offset-1{margin-left:8.333333333333332%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666666666666%}.col-sm-10{width:83.33333333333334%}.col-sm-9{width:75%}.col-sm-8{width:66.66666666666666%}.col-sm-7{width:58.333333333333336%}.col-sm-6{width:50%}.col-sm-5{width:41.66666666666667%}.col-sm-4{width:33.33333333333333%}.col-sm-3{width:25%}.col-sm-2{width:16.666666666666664%}.col-sm-1{width:8.333333333333332%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666666666666%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-0{right:0}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666666666666%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-0{left:0}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666666666666%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666666666666%}.col-md-10{width:83.33333333333334%}.col-md-9{width:75%}.col-md-8{width:66.66666666666666%}.col-md-7{width:58.333333333333336%}.col-md-6{width:50%}.col-md-5{width:41.66666666666667%}.col-md-4{width:33.33333333333333%}.col-md-3{width:25%}.col-md-2{width:16.666666666666664%}.col-md-1{width:8.333333333333332%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666666666666%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-0{right:0}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666666666666%}.col-md-push-10{left:83.33333333333334%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666666666666%}.col-md-push-7{left:58.333333333333336%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666666666667%}.col-md-push-4{left:33.33333333333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.666666666666664%}.col-md-push-1{left:8.333333333333332%}.col-md-push-0{left:0}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666666666666%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.666666666666664%}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666666666666%}.col-lg-10{width:83.33333333333334%}.col-lg-9{width:75%}.col-lg-8{width:66.66666666666666%}.col-lg-7{width:58.333333333333336%}.col-lg-6{width:50%}.col-lg-5{width:41.66666666666667%}.col-lg-4{width:33.33333333333333%}.col-lg-3{width:25%}.col-lg-2{width:16.666666666666664%}.col-lg-1{width:8.333333333333332%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-0{right:0}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666666666666%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-1{left:8.333333333333332%}.col-lg-push-0{left:0}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666666666666%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-4{margin-left:33.33333333333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-0{margin-left:0}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table>thead>tr>th,.table>tbody>tr>th,.table>tfoot>tr>th,.table>thead>tr>td,.table>tbody>tr>td,.table>tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>th,.table>caption+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>td,.table>thead:first-child>tr:first-child>td{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>thead>tr>th,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>tbody>tr>td,.table-condensed>tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*=col-]{position:static;float:none;display:table-column}table td[class*=col-],table th[class*=col-]{position:static;float:none;display:table-cell}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover,.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th{background-color:#e8e8e8}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th{background-color:#d0e9c6}.table>thead>tr>td.info,.table>tbody>tr>td.info,.table>tfoot>tr>td.info,.table>thead>tr>th.info,.table>tbody>tr>th.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>tbody>tr.info>td,.table>tfoot>tr.info>td,.table>thead>tr.info>th,.table>tbody>tr.info>th,.table>tfoot>tr.info>th{background-color:#d9edf7}.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover,.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th{background-color:#c4e3f3}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th{background-color:#faf2cc}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th{background-color:#ebcccc}@media (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;overflow-x:scroll;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd;-webkit-overflow-scrolling:touch}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0;min-width:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=radio],input[type=checkbox]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=radio]:focus,input[type=checkbox]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.428571429;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=date]{line-height:34px}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;margin-top:10px;margin-bottom:10px;padding-left:20px}.radio label,.checkbox label{display:inline;font-weight:400;cursor:pointer}.radio input[type=radio],.radio-inline input[type=radio],.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;vertical-align:middle;font-weight:400;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type=radio][disabled],input[type=checkbox][disabled],.radio[disabled],.radio-inline[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type=radio],fieldset[disabled] input[type=checkbox],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm,select[multiple].input-sm{height:auto}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:46px;line-height:46px}textarea.input-lg,select[multiple].input-lg{height:auto}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.has-feedback .form-control-feedback{position:absolute;top:25px;right:0;display:block;width:34px;height:34px;line-height:34px;text-align:center}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;border-color:#3c763d;background-color:#dff0d8}.has-success .form-control-feedback{color:#3c763d}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;border-color:#8a6d3b;background-color:#fcf8e3}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;border-color:#a94442;background-color:#f2dede}.has-error .form-control-feedback{color:#a94442}.form-control-static{margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .radio,.form-inline .checkbox{display:inline-block;margin-top:0;margin-bottom:0;padding-left:0;vertical-align:middle}.form-inline .radio input[type=radio],.form-inline .checkbox input[type=checkbox]{float:none;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{margin-top:0;margin-bottom:0;padding-top:7px}.form-horizontal .radio,.form-horizontal .checkbox{min-height:27px}.form-horizontal .form-group{margin-left:-15px;margin-right:-15px}.form-horizontal .form-control-static{padding-top:7px}@media (min-width:768px){.form-horizontal .control-label{text-align:right}}.form-horizontal .has-feedback .form-control-feedback{top:0;right:15px}.btn{display:inline-block;margin-bottom:0;font-weight:400;text-align:center;vertical-align:middle;cursor:pointer;background-image:none;border:1px solid transparent;white-space:nowrap;padding:6px 12px;font-size:14px;line-height:1.428571429;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{outline:0;background-image:none;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;pointer-events:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-primary .badge{color:#428bca;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{color:#428bca;font-weight:400;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%;padding-left:0;padding-right:0}.btn-block+.btn-block{margin-top:5px}input[type=submit].btn-block,input[type=reset].btn-block,input[type=button].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:14px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{text-decoration:none;color:#262626;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;outline:0;background-color:#428bca}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu-left{left:0;right:auto}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-left:8px;padding-right:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-left:12px;padding-right:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-bottom-left-radius:4px;border-top-right-radius:0;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{float:none;display:table-cell;width:1%}.btn-group-justified>.btn-group .btn{width:100%}[data-toggle=buttons]>.btn>input[type=radio],[data-toggle=buttons]>.btn>input[type=checkbox]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-left:0;padding-right:0}.input-group .form-control{float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn,select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn,select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=radio],.input-group-addon input[type=checkbox]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:last-child>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-top-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:first-child>.btn-group:not(:first-child)>.btn{border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:hover,.input-group-btn>.btn:focus,.input-group-btn>.btn:active{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{margin-bottom:0;padding-left:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;background-color:transparent;cursor:not-allowed}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent;cursor:default}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center;margin-bottom:5px}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center;margin-bottom:5px}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{max-height:340px;overflow-x:visible;padding-right:15px;padding-left:15px;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,.1);-webkit-overflow-scrolling:touch}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{padding-left:0;padding-right:0}}.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;padding:15px;font-size:18px;line-height:20px;height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;margin-right:15px;padding:9px 10px;margin-top:8px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}.navbar-nav.navbar-right:last-child{margin-right:-15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{margin-left:-15px;margin-right:-15px;padding:10px 15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);margin-top:8px;margin-bottom:8px}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;margin-top:0;margin-bottom:0;padding-left:0;vertical-align:middle}.navbar-form .radio input[type=radio],.navbar-form .checkbox input[type=checkbox]{float:none;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media (min-width:768px){.navbar-form{width:auto;border:0;margin-left:0;margin-right:0;padding-top:0;padding-bottom:0;-webkit-box-shadow:none;box-shadow:none}.navbar-form.navbar-right:last-child{margin-right:-15px}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-left:15px;margin-right:15px}.navbar-text.navbar-right:last-child{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{background-color:#e7e7e7;color:#555}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{background-color:#080808;color:#fff}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{content:"/\00a0";padding:0 5px;color:#ccc}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;line-height:1.428571429;text-decoration:none;color:#428bca;background-color:#fff;border:1px solid #ddd;margin-left:-1px}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-bottom-right-radius:4px;border-top-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{color:#2a6496;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca;cursor:default}.pagination>.disabled>span,.pagination>.disabled>span:hover,.pagination>.disabled>span:focus,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;background-color:#fff;border-color:#ddd;cursor:not-allowed}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-bottom-right-radius:6px;border-top-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-bottom-right-radius:3px;border-top-right-radius:3px}.pager{padding-left:0;margin:20px 0;list-style:none;text-align:center}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;background-color:#fff;cursor:not-allowed}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:gray}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;color:#fff;line-height:1;vertical-align:baseline;white-space:nowrap;text-align:center;background-color:#999;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron h1,.jumbotron .h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.container .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-left:60px;padding-right:60px}.jumbotron h1,.jumbotron .h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img,.thumbnail a>img{display:block;max-width:100%;height:auto;margin-left:auto;margin-right:auto}a.thumbnail:hover,a.thumbnail:focus,a.thumbnail.active{border-color:#428bca}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{background-color:#dff0d8;border-color:#d6e9c6;color:#3c763d}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{background-color:#d9edf7;border-color:#bce8f1;color:#31708f}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{background-color:#fcf8e3;border-color:#faebcc;color:#8a6d3b}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{background-color:#f2dede;border-color:#ebccd1;color:#a94442}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{overflow:hidden;height:20px;margin-bottom:20px;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{margin-bottom:20px;padding-left:0}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}a.list-group-item.active,a.list-group-item.active:hover,a.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}a.list-group-item.active .list-group-item-heading,a.list-group-item.active:hover .list-group-item-heading,a.list-group-item.active:focus .list-group-item-heading{color:inherit}a.list-group-item.active .list-group-item-text,a.list-group-item.active:hover .list-group-item-text,a.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:hover,a.list-group-item-success:focus{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:hover,a.list-group-item-success.active:focus{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:hover,a.list-group-item-info:focus{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:hover,a.list-group-item-info.active:focus{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:hover,a.list-group-item-warning:focus{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:hover,a.list-group-item-danger:focus{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group .list-group-item:first-child{border-top:0}.panel>.list-group .list-group-item:last-child{border-bottom:0}.panel>.list-group:first-child .list-group-item:first-child{border-top-right-radius:3px;border-top-left-radius:3px}.panel>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child th,.panel>.table>tbody:first-child>tr:first-child td{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>tfoot>tr:first-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tfoot>tr:first-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:first-child>td{border-top:0}.panel>.table-bordered>thead>tr:last-child>th,.panel>.table-responsive>.table-bordered>thead>tr:last-child>th,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th,.panel>.table-bordered>thead>tr:last-child>td,.panel>.table-responsive>.table-bordered>thead>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}.panel>.table-responsive{border:0;margin-bottom:0}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px;overflow:hidden}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#faebcc}.panel-warning>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#ebccd1}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ebccd1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:0 0;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal{display:none;overflow:auto;overflow-y:scroll;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5);background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5;min-height:16.428571429px}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{margin-top:15px;padding:19px 20px 20px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1030;display:block;visibility:visible;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;right:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);white-space:normal}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{margin:0;padding:8px 14px;font-size:14px;font-weight:400;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{left:50%;margin-left:-11px;border-bottom-width:0;border-top-color:#999;border-top-color:rgba(0,0,0,.25);bottom:-11px}.popover.top .arrow:after{content:" ";bottom:1px;margin-left:-10px;border-bottom-width:0;border-top-color:#fff}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-left-width:0;border-right-color:#999;border-right-color:rgba(0,0,0,.25)}.popover.right .arrow:after{content:" ";left:1px;bottom:-10px;border-left-width:0;border-right-color:#fff}.popover.bottom .arrow{left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25);top:-11px}.popover.bottom .arrow:after{content:" ";top:1px;margin-left:-10px;border-top-width:0;border-bottom-color:#fff}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left .arrow:after{content:" ";right:1px;border-right-width:0;border-left-color:#fff;bottom:-10px}.carousel{position:relative}.carousel-inner{position:relative;overflow:hidden;width:100%}.carousel-inner>.item{display:none;position:relative;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;max-width:100%;height:auto;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;left:0;bottom:0;width:15%;opacity:.5;filter:alpha(opacity=50);font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-control.left{background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,.5) 0),color-stop(rgba(0,0,0,.0001) 100%));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1)}.carousel-control.right{left:auto;right:0;background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,.0001) 0),color-stop(rgba(0,0,0,.5) 100%));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1)}.carousel-control:hover,.carousel-control:focus{outline:0;color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .glyphicon-chevron-left{left:50%}.carousel-control .icon-next,.carousel-control .glyphicon-chevron-right{right:50%}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;margin-left:-30%;padding-left:0;list-style:none;text-align:center}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;border:1px solid #fff;border-radius:10px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0)}.carousel-indicators .active{margin:0;width:12px;height:12px;background-color:#fff}.carousel-caption{position:absolute;left:15%;right:15%;bottom:20px;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicons-chevron-left,.carousel-control .glyphicons-chevron-right,.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;margin-left:-15px;font-size:30px}.carousel-caption{left:20%;right:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after,.container:before,.container:after,.container-fluid:before,.container-fluid:after,.row:before,.row:after,.form-horizontal .form-group:before,.form-horizontal .form-group:after,.btn-toolbar:before,.btn-toolbar:after,.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after,.nav:before,.nav:after,.navbar:before,.navbar:after,.navbar-header:before,.navbar-header:after,.navbar-collapse:before,.navbar-collapse:after,.pager:before,.pager:after,.panel-body:before,.panel-body:after,.modal-footer:before,.modal-footer:after{content:" ";display:table}.clearfix:after,.container:after,.container-fluid:after,.row:after,.form-horizontal .form-group:after,.btn-toolbar:after,.btn-group-vertical>.btn-group:after,.nav:after,.navbar:after,.navbar-header:after,.navbar-collapse:after,.pager:after,.panel-body:after,.modal-footer:after{clear:both}.center-block{display:block;margin-left:auto;margin-right:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-xs,tr.visible-xs,th.visible-xs,td.visible-xs{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}.visible-sm,tr.visible-sm,th.visible-sm,td.visible-sm{display:none!important}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}.visible-md,tr.visible-md,th.visible-md,td.visible-md{display:none!important}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}.visible-lg,tr.visible-lg,th.visible-lg,td.visible-lg{display:none!important}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}@media (max-width:767px){.hidden-xs,tr.hidden-xs,th.hidden-xs,td.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm,tr.hidden-sm,th.hidden-sm,td.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md,tr.hidden-md,th.hidden-md,td.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg,tr.hidden-lg,th.hidden-lg,td.hidden-lg{display:none!important}}.visible-print,tr.visible-print,th.visible-print,td.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}}@media print{.hidden-print,tr.hidden-print,th.hidden-print,td.hidden-print{display:none!important}}
\ No newline at end of file
diff --git a/doc/css/button-override.css b/doc/css/button-override.css
new file mode 100644 (file)
index 0000000..4b9b21a
--- /dev/null
@@ -0,0 +1,11 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+.btn:hover,
+.btn:focus,
+.btn:active,
+.btn.active,
+.open .dropdown-toggle.btn {
+  opacity: 0.4;
+}
diff --git a/doc/css/carousel-override.css b/doc/css/carousel-override.css
new file mode 100644 (file)
index 0000000..c5cf082
--- /dev/null
@@ -0,0 +1,29 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+.carousel-control {
+  width: 5%;
+}
+
+.carousel-caption {
+  position: static;
+  background: rgba(0,0,0,0.6);
+  color: white;
+  padding-bottom: 35px;
+  padding-left: 1em;
+  padding-right: 1em;
+  padding-top: 15px;
+}
+
+.carousel {
+  overflow: hidden;
+  border-radius: 5px;
+  max-width: 900px;
+  margin: 1em;
+}
+
+.carousel-indicators {
+  bottom: 0px;
+}
+
diff --git a/doc/css/code.css b/doc/css/code.css
new file mode 100644 (file)
index 0000000..ff4a58e
--- /dev/null
@@ -0,0 +1,40 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+table.code {
+    font-family: Menlo,Monaco,Consolas,"Courier New",monospace;
+    display: block;
+    padding: 9.5px;
+    margin: 0px 0px 10px;
+    font-size: 13px;
+    line-height: 1.42857;
+    color: rgb(51, 51, 51);
+    word-break: break-all;
+    word-wrap: break-word;
+    background-color: rgb(245, 245, 245);
+    border: 1px solid rgb(204, 204, 204);
+    border-radius: 4px 4px 4px 4px;
+}
+
+table.code tr td {
+    white-space: pre;
+}
+
+table.code tr td:nth-child(2) {
+    color: #d14;
+    padding-left: .5em;
+}
+
+.userinput {
+    color: #d14;
+}
+
+table.CodeRay {
+    margin-left: 3em;
+    width: calc(100% - 6em);
+}
+
+td.line-numbers {
+    width: 2em;
+}
diff --git a/doc/css/font-awesome.css b/doc/css/font-awesome.css
new file mode 100644 (file)
index 0000000..eb4127b
--- /dev/null
@@ -0,0 +1,1566 @@
+/*!
+ *  Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
+ *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+/* FONT PATH
+ * -------------------------- */
+@font-face {
+  font-family: 'FontAwesome';
+  src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');
+  src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');
+  font-weight: normal;
+  font-style: normal;
+}
+.fa {
+  display: inline-block;
+  font-family: FontAwesome;
+  font-style: normal;
+  font-weight: normal;
+  line-height: 1;
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+/* makes the font 33% larger relative to the icon container */
+.fa-lg {
+  font-size: 1.33333333em;
+  line-height: 0.75em;
+  vertical-align: -15%;
+}
+.fa-2x {
+  font-size: 2em;
+}
+.fa-3x {
+  font-size: 3em;
+}
+.fa-4x {
+  font-size: 4em;
+}
+.fa-5x {
+  font-size: 5em;
+}
+.fa-fw {
+  width: 1.28571429em;
+  text-align: center;
+}
+.fa-ul {
+  padding-left: 0;
+  margin-left: 2.14285714em;
+  list-style-type: none;
+}
+.fa-ul > li {
+  position: relative;
+}
+.fa-li {
+  position: absolute;
+  left: -2.14285714em;
+  width: 2.14285714em;
+  top: 0.14285714em;
+  text-align: center;
+}
+.fa-li.fa-lg {
+  left: -1.85714286em;
+}
+.fa-border {
+  padding: .2em .25em .15em;
+  border: solid 0.08em #eeeeee;
+  border-radius: .1em;
+}
+.pull-right {
+  float: right;
+}
+.pull-left {
+  float: left;
+}
+.fa.pull-left {
+  margin-right: .3em;
+}
+.fa.pull-right {
+  margin-left: .3em;
+}
+.fa-spin {
+  -webkit-animation: spin 2s infinite linear;
+  -moz-animation: spin 2s infinite linear;
+  -o-animation: spin 2s infinite linear;
+  animation: spin 2s infinite linear;
+}
+@-moz-keyframes spin {
+  0% {
+    -moz-transform: rotate(0deg);
+  }
+  100% {
+    -moz-transform: rotate(359deg);
+  }
+}
+@-webkit-keyframes spin {
+  0% {
+    -webkit-transform: rotate(0deg);
+  }
+  100% {
+    -webkit-transform: rotate(359deg);
+  }
+}
+@-o-keyframes spin {
+  0% {
+    -o-transform: rotate(0deg);
+  }
+  100% {
+    -o-transform: rotate(359deg);
+  }
+}
+@keyframes spin {
+  0% {
+    -webkit-transform: rotate(0deg);
+    transform: rotate(0deg);
+  }
+  100% {
+    -webkit-transform: rotate(359deg);
+    transform: rotate(359deg);
+  }
+}
+.fa-rotate-90 {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
+  -webkit-transform: rotate(90deg);
+  -moz-transform: rotate(90deg);
+  -ms-transform: rotate(90deg);
+  -o-transform: rotate(90deg);
+  transform: rotate(90deg);
+}
+.fa-rotate-180 {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
+  -webkit-transform: rotate(180deg);
+  -moz-transform: rotate(180deg);
+  -ms-transform: rotate(180deg);
+  -o-transform: rotate(180deg);
+  transform: rotate(180deg);
+}
+.fa-rotate-270 {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
+  -webkit-transform: rotate(270deg);
+  -moz-transform: rotate(270deg);
+  -ms-transform: rotate(270deg);
+  -o-transform: rotate(270deg);
+  transform: rotate(270deg);
+}
+.fa-flip-horizontal {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
+  -webkit-transform: scale(-1, 1);
+  -moz-transform: scale(-1, 1);
+  -ms-transform: scale(-1, 1);
+  -o-transform: scale(-1, 1);
+  transform: scale(-1, 1);
+}
+.fa-flip-vertical {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
+  -webkit-transform: scale(1, -1);
+  -moz-transform: scale(1, -1);
+  -ms-transform: scale(1, -1);
+  -o-transform: scale(1, -1);
+  transform: scale(1, -1);
+}
+.fa-stack {
+  position: relative;
+  display: inline-block;
+  width: 2em;
+  height: 2em;
+  line-height: 2em;
+  vertical-align: middle;
+}
+.fa-stack-1x,
+.fa-stack-2x {
+  position: absolute;
+  left: 0;
+  width: 100%;
+  text-align: center;
+}
+.fa-stack-1x {
+  line-height: inherit;
+}
+.fa-stack-2x {
+  font-size: 2em;
+}
+.fa-inverse {
+  color: #ffffff;
+}
+/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
+   readers do not read off random characters that represent icons */
+.fa-glass:before {
+  content: "\f000";
+}
+.fa-music:before {
+  content: "\f001";
+}
+.fa-search:before {
+  content: "\f002";
+}
+.fa-envelope-o:before {
+  content: "\f003";
+}
+.fa-heart:before {
+  content: "\f004";
+}
+.fa-star:before {
+  content: "\f005";
+}
+.fa-star-o:before {
+  content: "\f006";
+}
+.fa-user:before {
+  content: "\f007";
+}
+.fa-film:before {
+  content: "\f008";
+}
+.fa-th-large:before {
+  content: "\f009";
+}
+.fa-th:before {
+  content: "\f00a";
+}
+.fa-th-list:before {
+  content: "\f00b";
+}
+.fa-check:before {
+  content: "\f00c";
+}
+.fa-times:before {
+  content: "\f00d";
+}
+.fa-search-plus:before {
+  content: "\f00e";
+}
+.fa-search-minus:before {
+  content: "\f010";
+}
+.fa-power-off:before {
+  content: "\f011";
+}
+.fa-signal:before {
+  content: "\f012";
+}
+.fa-gear:before,
+.fa-cog:before {
+  content: "\f013";
+}
+.fa-trash-o:before {
+  content: "\f014";
+}
+.fa-home:before {
+  content: "\f015";
+}
+.fa-file-o:before {
+  content: "\f016";
+}
+.fa-clock-o:before {
+  content: "\f017";
+}
+.fa-road:before {
+  content: "\f018";
+}
+.fa-download:before {
+  content: "\f019";
+}
+.fa-arrow-circle-o-down:before {
+  content: "\f01a";
+}
+.fa-arrow-circle-o-up:before {
+  content: "\f01b";
+}
+.fa-inbox:before {
+  content: "\f01c";
+}
+.fa-play-circle-o:before {
+  content: "\f01d";
+}
+.fa-rotate-right:before,
+.fa-repeat:before {
+  content: "\f01e";
+}
+.fa-refresh:before {
+  content: "\f021";
+}
+.fa-list-alt:before {
+  content: "\f022";
+}
+.fa-lock:before {
+  content: "\f023";
+}
+.fa-flag:before {
+  content: "\f024";
+}
+.fa-headphones:before {
+  content: "\f025";
+}
+.fa-volume-off:before {
+  content: "\f026";
+}
+.fa-volume-down:before {
+  content: "\f027";
+}
+.fa-volume-up:before {
+  content: "\f028";
+}
+.fa-qrcode:before {
+  content: "\f029";
+}
+.fa-barcode:before {
+  content: "\f02a";
+}
+.fa-tag:before {
+  content: "\f02b";
+}
+.fa-tags:before {
+  content: "\f02c";
+}
+.fa-book:before {
+  content: "\f02d";
+}
+.fa-bookmark:before {
+  content: "\f02e";
+}
+.fa-print:before {
+  content: "\f02f";
+}
+.fa-camera:before {
+  content: "\f030";
+}
+.fa-font:before {
+  content: "\f031";
+}
+.fa-bold:before {
+  content: "\f032";
+}
+.fa-italic:before {
+  content: "\f033";
+}
+.fa-text-height:before {
+  content: "\f034";
+}
+.fa-text-width:before {
+  content: "\f035";
+}
+.fa-align-left:before {
+  content: "\f036";
+}
+.fa-align-center:before {
+  content: "\f037";
+}
+.fa-align-right:before {
+  content: "\f038";
+}
+.fa-align-justify:before {
+  content: "\f039";
+}
+.fa-list:before {
+  content: "\f03a";
+}
+.fa-dedent:before,
+.fa-outdent:before {
+  content: "\f03b";
+}
+.fa-indent:before {
+  content: "\f03c";
+}
+.fa-video-camera:before {
+  content: "\f03d";
+}
+.fa-photo:before,
+.fa-image:before,
+.fa-picture-o:before {
+  content: "\f03e";
+}
+.fa-pencil:before {
+  content: "\f040";
+}
+.fa-map-marker:before {
+  content: "\f041";
+}
+.fa-adjust:before {
+  content: "\f042";
+}
+.fa-tint:before {
+  content: "\f043";
+}
+.fa-edit:before,
+.fa-pencil-square-o:before {
+  content: "\f044";
+}
+.fa-share-square-o:before {
+  content: "\f045";
+}
+.fa-check-square-o:before {
+  content: "\f046";
+}
+.fa-arrows:before {
+  content: "\f047";
+}
+.fa-step-backward:before {
+  content: "\f048";
+}
+.fa-fast-backward:before {
+  content: "\f049";
+}
+.fa-backward:before {
+  content: "\f04a";
+}
+.fa-play:before {
+  content: "\f04b";
+}
+.fa-pause:before {
+  content: "\f04c";
+}
+.fa-stop:before {
+  content: "\f04d";
+}
+.fa-forward:before {
+  content: "\f04e";
+}
+.fa-fast-forward:before {
+  content: "\f050";
+}
+.fa-step-forward:before {
+  content: "\f051";
+}
+.fa-eject:before {
+  content: "\f052";
+}
+.fa-chevron-left:before {
+  content: "\f053";
+}
+.fa-chevron-right:before {
+  content: "\f054";
+}
+.fa-plus-circle:before {
+  content: "\f055";
+}
+.fa-minus-circle:before {
+  content: "\f056";
+}
+.fa-times-circle:before {
+  content: "\f057";
+}
+.fa-check-circle:before {
+  content: "\f058";
+}
+.fa-question-circle:before {
+  content: "\f059";
+}
+.fa-info-circle:before {
+  content: "\f05a";
+}
+.fa-crosshairs:before {
+  content: "\f05b";
+}
+.fa-times-circle-o:before {
+  content: "\f05c";
+}
+.fa-check-circle-o:before {
+  content: "\f05d";
+}
+.fa-ban:before {
+  content: "\f05e";
+}
+.fa-arrow-left:before {
+  content: "\f060";
+}
+.fa-arrow-right:before {
+  content: "\f061";
+}
+.fa-arrow-up:before {
+  content: "\f062";
+}
+.fa-arrow-down:before {
+  content: "\f063";
+}
+.fa-mail-forward:before,
+.fa-share:before {
+  content: "\f064";
+}
+.fa-expand:before {
+  content: "\f065";
+}
+.fa-compress:before {
+  content: "\f066";
+}
+.fa-plus:before {
+  content: "\f067";
+}
+.fa-minus:before {
+  content: "\f068";
+}
+.fa-asterisk:before {
+  content: "\f069";
+}
+.fa-exclamation-circle:before {
+  content: "\f06a";
+}
+.fa-gift:before {
+  content: "\f06b";
+}
+.fa-leaf:before {
+  content: "\f06c";
+}
+.fa-fire:before {
+  content: "\f06d";
+}
+.fa-eye:before {
+  content: "\f06e";
+}
+.fa-eye-slash:before {
+  content: "\f070";
+}
+.fa-warning:before,
+.fa-exclamation-triangle:before {
+  content: "\f071";
+}
+.fa-plane:before {
+  content: "\f072";
+}
+.fa-calendar:before {
+  content: "\f073";
+}
+.fa-random:before {
+  content: "\f074";
+}
+.fa-comment:before {
+  content: "\f075";
+}
+.fa-magnet:before {
+  content: "\f076";
+}
+.fa-chevron-up:before {
+  content: "\f077";
+}
+.fa-chevron-down:before {
+  content: "\f078";
+}
+.fa-retweet:before {
+  content: "\f079";
+}
+.fa-shopping-cart:before {
+  content: "\f07a";
+}
+.fa-folder:before {
+  content: "\f07b";
+}
+.fa-folder-open:before {
+  content: "\f07c";
+}
+.fa-arrows-v:before {
+  content: "\f07d";
+}
+.fa-arrows-h:before {
+  content: "\f07e";
+}
+.fa-bar-chart-o:before {
+  content: "\f080";
+}
+.fa-twitter-square:before {
+  content: "\f081";
+}
+.fa-facebook-square:before {
+  content: "\f082";
+}
+.fa-camera-retro:before {
+  content: "\f083";
+}
+.fa-key:before {
+  content: "\f084";
+}
+.fa-gears:before,
+.fa-cogs:before {
+  content: "\f085";
+}
+.fa-comments:before {
+  content: "\f086";
+}
+.fa-thumbs-o-up:before {
+  content: "\f087";
+}
+.fa-thumbs-o-down:before {
+  content: "\f088";
+}
+.fa-star-half:before {
+  content: "\f089";
+}
+.fa-heart-o:before {
+  content: "\f08a";
+}
+.fa-sign-out:before {
+  content: "\f08b";
+}
+.fa-linkedin-square:before {
+  content: "\f08c";
+}
+.fa-thumb-tack:before {
+  content: "\f08d";
+}
+.fa-external-link:before {
+  content: "\f08e";
+}
+.fa-sign-in:before {
+  content: "\f090";
+}
+.fa-trophy:before {
+  content: "\f091";
+}
+.fa-github-square:before {
+  content: "\f092";
+}
+.fa-upload:before {
+  content: "\f093";
+}
+.fa-lemon-o:before {
+  content: "\f094";
+}
+.fa-phone:before {
+  content: "\f095";
+}
+.fa-square-o:before {
+  content: "\f096";
+}
+.fa-bookmark-o:before {
+  content: "\f097";
+}
+.fa-phone-square:before {
+  content: "\f098";
+}
+.fa-twitter:before {
+  content: "\f099";
+}
+.fa-facebook:before {
+  content: "\f09a";
+}
+.fa-github:before {
+  content: "\f09b";
+}
+.fa-unlock:before {
+  content: "\f09c";
+}
+.fa-credit-card:before {
+  content: "\f09d";
+}
+.fa-rss:before {
+  content: "\f09e";
+}
+.fa-hdd-o:before {
+  content: "\f0a0";
+}
+.fa-bullhorn:before {
+  content: "\f0a1";
+}
+.fa-bell:before {
+  content: "\f0f3";
+}
+.fa-certificate:before {
+  content: "\f0a3";
+}
+.fa-hand-o-right:before {
+  content: "\f0a4";
+}
+.fa-hand-o-left:before {
+  content: "\f0a5";
+}
+.fa-hand-o-up:before {
+  content: "\f0a6";
+}
+.fa-hand-o-down:before {
+  content: "\f0a7";
+}
+.fa-arrow-circle-left:before {
+  content: "\f0a8";
+}
+.fa-arrow-circle-right:before {
+  content: "\f0a9";
+}
+.fa-arrow-circle-up:before {
+  content: "\f0aa";
+}
+.fa-arrow-circle-down:before {
+  content: "\f0ab";
+}
+.fa-globe:before {
+  content: "\f0ac";
+}
+.fa-wrench:before {
+  content: "\f0ad";
+}
+.fa-tasks:before {
+  content: "\f0ae";
+}
+.fa-filter:before {
+  content: "\f0b0";
+}
+.fa-briefcase:before {
+  content: "\f0b1";
+}
+.fa-arrows-alt:before {
+  content: "\f0b2";
+}
+.fa-group:before,
+.fa-users:before {
+  content: "\f0c0";
+}
+.fa-chain:before,
+.fa-link:before {
+  content: "\f0c1";
+}
+.fa-cloud:before {
+  content: "\f0c2";
+}
+.fa-flask:before {
+  content: "\f0c3";
+}
+.fa-cut:before,
+.fa-scissors:before {
+  content: "\f0c4";
+}
+.fa-copy:before,
+.fa-files-o:before {
+  content: "\f0c5";
+}
+.fa-paperclip:before {
+  content: "\f0c6";
+}
+.fa-save:before,
+.fa-floppy-o:before {
+  content: "\f0c7";
+}
+.fa-square:before {
+  content: "\f0c8";
+}
+.fa-navicon:before,
+.fa-reorder:before,
+.fa-bars:before {
+  content: "\f0c9";
+}
+.fa-list-ul:before {
+  content: "\f0ca";
+}
+.fa-list-ol:before {
+  content: "\f0cb";
+}
+.fa-strikethrough:before {
+  content: "\f0cc";
+}
+.fa-underline:before {
+  content: "\f0cd";
+}
+.fa-table:before {
+  content: "\f0ce";
+}
+.fa-magic:before {
+  content: "\f0d0";
+}
+.fa-truck:before {
+  content: "\f0d1";
+}
+.fa-pinterest:before {
+  content: "\f0d2";
+}
+.fa-pinterest-square:before {
+  content: "\f0d3";
+}
+.fa-google-plus-square:before {
+  content: "\f0d4";
+}
+.fa-google-plus:before {
+  content: "\f0d5";
+}
+.fa-money:before {
+  content: "\f0d6";
+}
+.fa-caret-down:before {
+  content: "\f0d7";
+}
+.fa-caret-up:before {
+  content: "\f0d8";
+}
+.fa-caret-left:before {
+  content: "\f0d9";
+}
+.fa-caret-right:before {
+  content: "\f0da";
+}
+.fa-columns:before {
+  content: "\f0db";
+}
+.fa-unsorted:before,
+.fa-sort:before {
+  content: "\f0dc";
+}
+.fa-sort-down:before,
+.fa-sort-desc:before {
+  content: "\f0dd";
+}
+.fa-sort-up:before,
+.fa-sort-asc:before {
+  content: "\f0de";
+}
+.fa-envelope:before {
+  content: "\f0e0";
+}
+.fa-linkedin:before {
+  content: "\f0e1";
+}
+.fa-rotate-left:before,
+.fa-undo:before {
+  content: "\f0e2";
+}
+.fa-legal:before,
+.fa-gavel:before {
+  content: "\f0e3";
+}
+.fa-dashboard:before,
+.fa-tachometer:before {
+  content: "\f0e4";
+}
+.fa-comment-o:before {
+  content: "\f0e5";
+}
+.fa-comments-o:before {
+  content: "\f0e6";
+}
+.fa-flash:before,
+.fa-bolt:before {
+  content: "\f0e7";
+}
+.fa-sitemap:before {
+  content: "\f0e8";
+}
+.fa-umbrella:before {
+  content: "\f0e9";
+}
+.fa-paste:before,
+.fa-clipboard:before {
+  content: "\f0ea";
+}
+.fa-lightbulb-o:before {
+  content: "\f0eb";
+}
+.fa-exchange:before {
+  content: "\f0ec";
+}
+.fa-cloud-download:before {
+  content: "\f0ed";
+}
+.fa-cloud-upload:before {
+  content: "\f0ee";
+}
+.fa-user-md:before {
+  content: "\f0f0";
+}
+.fa-stethoscope:before {
+  content: "\f0f1";
+}
+.fa-suitcase:before {
+  content: "\f0f2";
+}
+.fa-bell-o:before {
+  content: "\f0a2";
+}
+.fa-coffee:before {
+  content: "\f0f4";
+}
+.fa-cutlery:before {
+  content: "\f0f5";
+}
+.fa-file-text-o:before {
+  content: "\f0f6";
+}
+.fa-building-o:before {
+  content: "\f0f7";
+}
+.fa-hospital-o:before {
+  content: "\f0f8";
+}
+.fa-ambulance:before {
+  content: "\f0f9";
+}
+.fa-medkit:before {
+  content: "\f0fa";
+}
+.fa-fighter-jet:before {
+  content: "\f0fb";
+}
+.fa-beer:before {
+  content: "\f0fc";
+}
+.fa-h-square:before {
+  content: "\f0fd";
+}
+.fa-plus-square:before {
+  content: "\f0fe";
+}
+.fa-angle-double-left:before {
+  content: "\f100";
+}
+.fa-angle-double-right:before {
+  content: "\f101";
+}
+.fa-angle-double-up:before {
+  content: "\f102";
+}
+.fa-angle-double-down:before {
+  content: "\f103";
+}
+.fa-angle-left:before {
+  content: "\f104";
+}
+.fa-angle-right:before {
+  content: "\f105";
+}
+.fa-angle-up:before {
+  content: "\f106";
+}
+.fa-angle-down:before {
+  content: "\f107";
+}
+.fa-desktop:before {
+  content: "\f108";
+}
+.fa-laptop:before {
+  content: "\f109";
+}
+.fa-tablet:before {
+  content: "\f10a";
+}
+.fa-mobile-phone:before,
+.fa-mobile:before {
+  content: "\f10b";
+}
+.fa-circle-o:before {
+  content: "\f10c";
+}
+.fa-quote-left:before {
+  content: "\f10d";
+}
+.fa-quote-right:before {
+  content: "\f10e";
+}
+.fa-spinner:before {
+  content: "\f110";
+}
+.fa-circle:before {
+  content: "\f111";
+}
+.fa-mail-reply:before,
+.fa-reply:before {
+  content: "\f112";
+}
+.fa-github-alt:before {
+  content: "\f113";
+}
+.fa-folder-o:before {
+  content: "\f114";
+}
+.fa-folder-open-o:before {
+  content: "\f115";
+}
+.fa-smile-o:before {
+  content: "\f118";
+}
+.fa-frown-o:before {
+  content: "\f119";
+}
+.fa-meh-o:before {
+  content: "\f11a";
+}
+.fa-gamepad:before {
+  content: "\f11b";
+}
+.fa-keyboard-o:before {
+  content: "\f11c";
+}
+.fa-flag-o:before {
+  content: "\f11d";
+}
+.fa-flag-checkered:before {
+  content: "\f11e";
+}
+.fa-terminal:before {
+  content: "\f120";
+}
+.fa-code:before {
+  content: "\f121";
+}
+.fa-mail-reply-all:before,
+.fa-reply-all:before {
+  content: "\f122";
+}
+.fa-star-half-empty:before,
+.fa-star-half-full:before,
+.fa-star-half-o:before {
+  content: "\f123";
+}
+.fa-location-arrow:before {
+  content: "\f124";
+}
+.fa-crop:before {
+  content: "\f125";
+}
+.fa-code-fork:before {
+  content: "\f126";
+}
+.fa-unlink:before,
+.fa-chain-broken:before {
+  content: "\f127";
+}
+.fa-question:before {
+  content: "\f128";
+}
+.fa-info:before {
+  content: "\f129";
+}
+.fa-exclamation:before {
+  content: "\f12a";
+}
+.fa-superscript:before {
+  content: "\f12b";
+}
+.fa-subscript:before {
+  content: "\f12c";
+}
+.fa-eraser:before {
+  content: "\f12d";
+}
+.fa-puzzle-piece:before {
+  content: "\f12e";
+}
+.fa-microphone:before {
+  content: "\f130";
+}
+.fa-microphone-slash:before {
+  content: "\f131";
+}
+.fa-shield:before {
+  content: "\f132";
+}
+.fa-calendar-o:before {
+  content: "\f133";
+}
+.fa-fire-extinguisher:before {
+  content: "\f134";
+}
+.fa-rocket:before {
+  content: "\f135";
+}
+.fa-maxcdn:before {
+  content: "\f136";
+}
+.fa-chevron-circle-left:before {
+  content: "\f137";
+}
+.fa-chevron-circle-right:before {
+  content: "\f138";
+}
+.fa-chevron-circle-up:before {
+  content: "\f139";
+}
+.fa-chevron-circle-down:before {
+  content: "\f13a";
+}
+.fa-html5:before {
+  content: "\f13b";
+}
+.fa-css3:before {
+  content: "\f13c";
+}
+.fa-anchor:before {
+  content: "\f13d";
+}
+.fa-unlock-alt:before {
+  content: "\f13e";
+}
+.fa-bullseye:before {
+  content: "\f140";
+}
+.fa-ellipsis-h:before {
+  content: "\f141";
+}
+.fa-ellipsis-v:before {
+  content: "\f142";
+}
+.fa-rss-square:before {
+  content: "\f143";
+}
+.fa-play-circle:before {
+  content: "\f144";
+}
+.fa-ticket:before {
+  content: "\f145";
+}
+.fa-minus-square:before {
+  content: "\f146";
+}
+.fa-minus-square-o:before {
+  content: "\f147";
+}
+.fa-level-up:before {
+  content: "\f148";
+}
+.fa-level-down:before {
+  content: "\f149";
+}
+.fa-check-square:before {
+  content: "\f14a";
+}
+.fa-pencil-square:before {
+  content: "\f14b";
+}
+.fa-external-link-square:before {
+  content: "\f14c";
+}
+.fa-share-square:before {
+  content: "\f14d";
+}
+.fa-compass:before {
+  content: "\f14e";
+}
+.fa-toggle-down:before,
+.fa-caret-square-o-down:before {
+  content: "\f150";
+}
+.fa-toggle-up:before,
+.fa-caret-square-o-up:before {
+  content: "\f151";
+}
+.fa-toggle-right:before,
+.fa-caret-square-o-right:before {
+  content: "\f152";
+}
+.fa-euro:before,
+.fa-eur:before {
+  content: "\f153";
+}
+.fa-gbp:before {
+  content: "\f154";
+}
+.fa-dollar:before,
+.fa-usd:before {
+  content: "\f155";
+}
+.fa-rupee:before,
+.fa-inr:before {
+  content: "\f156";
+}
+.fa-cny:before,
+.fa-rmb:before,
+.fa-yen:before,
+.fa-jpy:before {
+  content: "\f157";
+}
+.fa-ruble:before,
+.fa-rouble:before,
+.fa-rub:before {
+  content: "\f158";
+}
+.fa-won:before,
+.fa-krw:before {
+  content: "\f159";
+}
+.fa-bitcoin:before,
+.fa-btc:before {
+  content: "\f15a";
+}
+.fa-file:before {
+  content: "\f15b";
+}
+.fa-file-text:before {
+  content: "\f15c";
+}
+.fa-sort-alpha-asc:before {
+  content: "\f15d";
+}
+.fa-sort-alpha-desc:before {
+  content: "\f15e";
+}
+.fa-sort-amount-asc:before {
+  content: "\f160";
+}
+.fa-sort-amount-desc:before {
+  content: "\f161";
+}
+.fa-sort-numeric-asc:before {
+  content: "\f162";
+}
+.fa-sort-numeric-desc:before {
+  content: "\f163";
+}
+.fa-thumbs-up:before {
+  content: "\f164";
+}
+.fa-thumbs-down:before {
+  content: "\f165";
+}
+.fa-youtube-square:before {
+  content: "\f166";
+}
+.fa-youtube:before {
+  content: "\f167";
+}
+.fa-xing:before {
+  content: "\f168";
+}
+.fa-xing-square:before {
+  content: "\f169";
+}
+.fa-youtube-play:before {
+  content: "\f16a";
+}
+.fa-dropbox:before {
+  content: "\f16b";
+}
+.fa-stack-overflow:before {
+  content: "\f16c";
+}
+.fa-instagram:before {
+  content: "\f16d";
+}
+.fa-flickr:before {
+  content: "\f16e";
+}
+.fa-adn:before {
+  content: "\f170";
+}
+.fa-bitbucket:before {
+  content: "\f171";
+}
+.fa-bitbucket-square:before {
+  content: "\f172";
+}
+.fa-tumblr:before {
+  content: "\f173";
+}
+.fa-tumblr-square:before {
+  content: "\f174";
+}
+.fa-long-arrow-down:before {
+  content: "\f175";
+}
+.fa-long-arrow-up:before {
+  content: "\f176";
+}
+.fa-long-arrow-left:before {
+  content: "\f177";
+}
+.fa-long-arrow-right:before {
+  content: "\f178";
+}
+.fa-apple:before {
+  content: "\f179";
+}
+.fa-windows:before {
+  content: "\f17a";
+}
+.fa-android:before {
+  content: "\f17b";
+}
+.fa-linux:before {
+  content: "\f17c";
+}
+.fa-dribbble:before {
+  content: "\f17d";
+}
+.fa-skype:before {
+  content: "\f17e";
+}
+.fa-foursquare:before {
+  content: "\f180";
+}
+.fa-trello:before {
+  content: "\f181";
+}
+.fa-female:before {
+  content: "\f182";
+}
+.fa-male:before {
+  content: "\f183";
+}
+.fa-gittip:before {
+  content: "\f184";
+}
+.fa-sun-o:before {
+  content: "\f185";
+}
+.fa-moon-o:before {
+  content: "\f186";
+}
+.fa-archive:before {
+  content: "\f187";
+}
+.fa-bug:before {
+  content: "\f188";
+}
+.fa-vk:before {
+  content: "\f189";
+}
+.fa-weibo:before {
+  content: "\f18a";
+}
+.fa-renren:before {
+  content: "\f18b";
+}
+.fa-pagelines:before {
+  content: "\f18c";
+}
+.fa-stack-exchange:before {
+  content: "\f18d";
+}
+.fa-arrow-circle-o-right:before {
+  content: "\f18e";
+}
+.fa-arrow-circle-o-left:before {
+  content: "\f190";
+}
+.fa-toggle-left:before,
+.fa-caret-square-o-left:before {
+  content: "\f191";
+}
+.fa-dot-circle-o:before {
+  content: "\f192";
+}
+.fa-wheelchair:before {
+  content: "\f193";
+}
+.fa-vimeo-square:before {
+  content: "\f194";
+}
+.fa-turkish-lira:before,
+.fa-try:before {
+  content: "\f195";
+}
+.fa-plus-square-o:before {
+  content: "\f196";
+}
+.fa-space-shuttle:before {
+  content: "\f197";
+}
+.fa-slack:before {
+  content: "\f198";
+}
+.fa-envelope-square:before {
+  content: "\f199";
+}
+.fa-wordpress:before {
+  content: "\f19a";
+}
+.fa-openid:before {
+  content: "\f19b";
+}
+.fa-institution:before,
+.fa-bank:before,
+.fa-university:before {
+  content: "\f19c";
+}
+.fa-mortar-board:before,
+.fa-graduation-cap:before {
+  content: "\f19d";
+}
+.fa-yahoo:before {
+  content: "\f19e";
+}
+.fa-google:before {
+  content: "\f1a0";
+}
+.fa-reddit:before {
+  content: "\f1a1";
+}
+.fa-reddit-square:before {
+  content: "\f1a2";
+}
+.fa-stumbleupon-circle:before {
+  content: "\f1a3";
+}
+.fa-stumbleupon:before {
+  content: "\f1a4";
+}
+.fa-delicious:before {
+  content: "\f1a5";
+}
+.fa-digg:before {
+  content: "\f1a6";
+}
+.fa-pied-piper-square:before,
+.fa-pied-piper:before {
+  content: "\f1a7";
+}
+.fa-pied-piper-alt:before {
+  content: "\f1a8";
+}
+.fa-drupal:before {
+  content: "\f1a9";
+}
+.fa-joomla:before {
+  content: "\f1aa";
+}
+.fa-language:before {
+  content: "\f1ab";
+}
+.fa-fax:before {
+  content: "\f1ac";
+}
+.fa-building:before {
+  content: "\f1ad";
+}
+.fa-child:before {
+  content: "\f1ae";
+}
+.fa-paw:before {
+  content: "\f1b0";
+}
+.fa-spoon:before {
+  content: "\f1b1";
+}
+.fa-cube:before {
+  content: "\f1b2";
+}
+.fa-cubes:before {
+  content: "\f1b3";
+}
+.fa-behance:before {
+  content: "\f1b4";
+}
+.fa-behance-square:before {
+  content: "\f1b5";
+}
+.fa-steam:before {
+  content: "\f1b6";
+}
+.fa-steam-square:before {
+  content: "\f1b7";
+}
+.fa-recycle:before {
+  content: "\f1b8";
+}
+.fa-automobile:before,
+.fa-car:before {
+  content: "\f1b9";
+}
+.fa-cab:before,
+.fa-taxi:before {
+  content: "\f1ba";
+}
+.fa-tree:before {
+  content: "\f1bb";
+}
+.fa-spotify:before {
+  content: "\f1bc";
+}
+.fa-deviantart:before {
+  content: "\f1bd";
+}
+.fa-soundcloud:before {
+  content: "\f1be";
+}
+.fa-database:before {
+  content: "\f1c0";
+}
+.fa-file-pdf-o:before {
+  content: "\f1c1";
+}
+.fa-file-word-o:before {
+  content: "\f1c2";
+}
+.fa-file-excel-o:before {
+  content: "\f1c3";
+}
+.fa-file-powerpoint-o:before {
+  content: "\f1c4";
+}
+.fa-file-photo-o:before,
+.fa-file-picture-o:before,
+.fa-file-image-o:before {
+  content: "\f1c5";
+}
+.fa-file-zip-o:before,
+.fa-file-archive-o:before {
+  content: "\f1c6";
+}
+.fa-file-sound-o:before,
+.fa-file-audio-o:before {
+  content: "\f1c7";
+}
+.fa-file-movie-o:before,
+.fa-file-video-o:before {
+  content: "\f1c8";
+}
+.fa-file-code-o:before {
+  content: "\f1c9";
+}
+.fa-vine:before {
+  content: "\f1ca";
+}
+.fa-codepen:before {
+  content: "\f1cb";
+}
+.fa-jsfiddle:before {
+  content: "\f1cc";
+}
+.fa-life-bouy:before,
+.fa-life-saver:before,
+.fa-support:before,
+.fa-life-ring:before {
+  content: "\f1cd";
+}
+.fa-circle-o-notch:before {
+  content: "\f1ce";
+}
+.fa-ra:before,
+.fa-rebel:before {
+  content: "\f1d0";
+}
+.fa-ge:before,
+.fa-empire:before {
+  content: "\f1d1";
+}
+.fa-git-square:before {
+  content: "\f1d2";
+}
+.fa-git:before {
+  content: "\f1d3";
+}
+.fa-hacker-news:before {
+  content: "\f1d4";
+}
+.fa-tencent-weibo:before {
+  content: "\f1d5";
+}
+.fa-qq:before {
+  content: "\f1d6";
+}
+.fa-wechat:before,
+.fa-weixin:before {
+  content: "\f1d7";
+}
+.fa-send:before,
+.fa-paper-plane:before {
+  content: "\f1d8";
+}
+.fa-send-o:before,
+.fa-paper-plane-o:before {
+  content: "\f1d9";
+}
+.fa-history:before {
+  content: "\f1da";
+}
+.fa-circle-thin:before {
+  content: "\f1db";
+}
+.fa-header:before {
+  content: "\f1dc";
+}
+.fa-paragraph:before {
+  content: "\f1dd";
+}
+.fa-sliders:before {
+  content: "\f1de";
+}
+.fa-share-alt:before {
+  content: "\f1e0";
+}
+.fa-share-alt-square:before {
+  content: "\f1e1";
+}
+.fa-bomb:before {
+  content: "\f1e2";
+}
diff --git a/doc/css/images.css b/doc/css/images.css
new file mode 100644 (file)
index 0000000..73a1119
--- /dev/null
@@ -0,0 +1,15 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+img.full-width {
+    width: 100%
+}
+
+img.screenshot {
+    max-width: calc(100% - 2em);
+    border: 3px;
+    border-style: solid;
+    margin-left: 2em;
+    margin-bottom: 2em;
+}
diff --git a/doc/css/nav-list.css b/doc/css/nav-list.css
new file mode 100644 (file)
index 0000000..1cc57bc
--- /dev/null
@@ -0,0 +1,37 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 */
+
+// NAV LIST
+// --------
+
+.nav-list {
+  padding-left: 15px;
+  padding-right: 15px;
+  margin-bottom: 0;
+}
+.nav-list > li > a,
+.nav-list .nav-header {
+  margin-left:  -15px;
+  margin-right: -15px;
+  text-shadow: 0 1px 0 rgba(255,255,255,.5);
+}
+.nav-list > li > a {
+  padding: 3px 15px;
+}
+.nav-list > .active > a,
+.nav-list > .active > a:hover,
+.nav-list > .active > a:focus {
+  color: white;
+  text-shadow: 0 -1px 0 rgba(0,0,0,.2);
+  background-color: rgb(66, 139, 202);
+}
+
+.spaced-out li {
+   padding-bottom: 1em;
+}
+
+.inside-list ul {
+    list-style-position: inside;
+    padding-left: 0;
+}
\ No newline at end of file
diff --git a/doc/examples/pipeline_templates/gatk-exome-fq-snp.json b/doc/examples/pipeline_templates/gatk-exome-fq-snp.json
new file mode 100644 (file)
index 0000000..481dda3
--- /dev/null
@@ -0,0 +1,175 @@
+{
+ "name":"GATK / exome PE fastq to snp",
+ "components":{
+  "extract-reference":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"file-select",
+   "script_parameters":{
+    "names":[
+     "human_g1k_v37.fasta.gz",
+     "human_g1k_v37.fasta.fai.gz",
+     "human_g1k_v37.dict.gz"
+    ],
+    "input":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi"
+   },
+   "output_name":false
+  },
+  "bwa-index":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"bwa-index",
+   "script_parameters":{
+    "input":{
+     "output_of":"extract-reference"
+    },
+    "bwa_tbz":{
+     "value":"8b6e2c4916133e1d859c9e812861ce13+70",
+     "required":true
+    }
+   },
+   "output_name":false
+  },
+  "bwa-aln":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"bwa-aln",
+   "script_parameters":{
+    "input":{
+     "dataclass":"Collection",
+     "required":"true"
+    },
+    "reference_index":{
+     "output_of":"bwa-index"
+    },
+    "samtools_tgz":{
+     "value":"c777e23cf13e5d5906abfdc08d84bfdb+74",
+     "required":true
+    },
+    "bwa_tbz":{
+     "value":"8b6e2c4916133e1d859c9e812861ce13+70",
+     "required":true
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":1
+   },
+   "output_name":false
+  },
+  "picard-gatk2-prep":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"picard-gatk2-prep",
+   "script_parameters":{
+    "input":{
+     "output_of":"bwa-aln"
+    },
+    "reference":{
+     "output_of":"extract-reference"
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":1
+   },
+   "output_name":false
+  },
+  "GATK2-realign":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"GATK2-realign",
+   "script_parameters":{
+    "input":{
+     "output_of":"picard-gatk2-prep"
+    },
+    "gatk_bundle":{
+     "value":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi",
+     "required":true
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    },
+    "gatk_tbz":{
+     "value":"7e0a277d6d2353678a11f56bab3b13f2+87",
+     "required":true
+    },
+    "regions":{
+     "value":"13b53dbe1ec032dfc495fd974aa5dd4a+87/S02972011_Covered_sort_merged.bed"
+    },
+    "region_padding":{
+     "value":10
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":2
+   },
+   "output_name":false
+  },
+  "GATK2-bqsr":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"GATK2-bqsr",
+   "script_parameters":{
+    "input":{
+     "output_of":"GATK2-realign"
+    },
+    "gatk_bundle":{
+     "value":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi",
+     "required":true
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    },
+    "gatk_tbz":{
+     "value":"7e0a277d6d2353678a11f56bab3b13f2+87",
+     "required":true
+    }
+   },
+   "output_name":false
+  },
+  "GATK2-merge-call":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"GATK2-merge-call",
+   "script_parameters":{
+    "input":{
+     "output_of":"GATK2-bqsr"
+    },
+    "gatk_bundle":{
+     "value":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi",
+     "required":true
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    },
+    "gatk_tbz":{
+     "value":"7e0a277d6d2353678a11f56bab3b13f2+87",
+     "required":true
+    },
+    "regions":{
+     "value":"13b53dbe1ec032dfc495fd974aa5dd4a+87/S02972011_Covered_sort_merged.bed"
+    },
+    "region_padding":{
+     "value":10
+    },
+    "GATK2_UnifiedGenotyper_args":{
+     "default":[
+      "-stand_call_conf",
+      "30.0",
+      "-stand_emit_conf",
+      "30.0",
+      "-dcov",
+      "200"
+     ]
+    }
+   },
+   "output_name":"Variant calls from UnifiedGenotyper"
+  }
+ }
+}
diff --git a/doc/examples/pipeline_templates/rtg-fq-snp.json b/doc/examples/pipeline_templates/rtg-fq-snp.json
new file mode 100644 (file)
index 0000000..c951c4c
--- /dev/null
@@ -0,0 +1,76 @@
+{
+ "name":"Real Time Genomics / PE fastq to snp",
+ "components":{
+  "extract_reference":{
+   "script":"file-select",
+   "script_parameters":{
+    "names":[
+     "human_g1k_v37.fasta.gz"
+    ],
+    "input":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi"
+   },
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2"
+  },
+  "reformat_reference":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-fasta2sdf",
+   "script_parameters":{
+    "input":{
+     "output_of":"extract_reference"
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   }
+  },
+  "reformat_reads":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-fastq2sdf",
+   "script_parameters":{
+    "input":{
+     "optional":false
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   }
+  },
+  "map_reads":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-map",
+   "script_parameters":{
+    "input":{
+     "output_of":"reformat_reads"
+    },
+    "reference":{
+     "output_of":"reformat_reference"
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":1
+   }
+  },
+  "report_snp":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-snp",
+   "script_parameters":{
+    "input":{
+     "output_of":"map_reads"
+    },
+    "reference":{
+     "output_of":"reformat_reference"
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   }
+  }
+ }
+}
diff --git a/doc/examples/ruby/list-active-nodes.rb b/doc/examples/ruby/list-active-nodes.rb
new file mode 100755 (executable)
index 0000000..a3eb205
--- /dev/null
@@ -0,0 +1,16 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
+
+require 'arvados'
+
+arv = Arvados.new(api_version: 'v1')
+arv.node.list[:items].each do |node|
+  if node[:crunch_worker_state] != 'down'
+    ping_age = (Time.now - Time.parse(node[:last_ping_at])).to_i rescue -1
+    puts "#{node[:uuid]} #{node[:crunch_worker_state]} #{ping_age}"
+  end
+end
diff --git a/doc/fonts/FontAwesome.otf b/doc/fonts/FontAwesome.otf
new file mode 100644 (file)
index 0000000..3461e3f
Binary files /dev/null and b/doc/fonts/FontAwesome.otf differ
diff --git a/doc/fonts/fontawesome-webfont.eot b/doc/fonts/fontawesome-webfont.eot
new file mode 100755 (executable)
index 0000000..6cfd566
Binary files /dev/null and b/doc/fonts/fontawesome-webfont.eot differ
diff --git a/doc/fonts/fontawesome-webfont.svg b/doc/fonts/fontawesome-webfont.svg
new file mode 100755 (executable)
index 0000000..a9f8469
--- /dev/null
@@ -0,0 +1,504 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="fontawesomeregular" horiz-adv-x="1536" >
+<font-face units-per-em="1792" ascent="1536" descent="-256" />
+<missing-glyph horiz-adv-x="448" />
+<glyph unicode=" "  horiz-adv-x="448" />
+<glyph unicode="&#x09;" horiz-adv-x="448" />
+<glyph unicode="&#xa0;" horiz-adv-x="448" />
+<glyph unicode="&#xa8;" horiz-adv-x="1792" />
+<glyph unicode="&#xa9;" horiz-adv-x="1792" />
+<glyph unicode="&#xae;" horiz-adv-x="1792" />
+<glyph unicode="&#xb4;" horiz-adv-x="1792" />
+<glyph unicode="&#xc6;" horiz-adv-x="1792" />
+<glyph unicode="&#xd8;" horiz-adv-x="1792" />
+<glyph unicode="&#x2000;" horiz-adv-x="768" />
+<glyph unicode="&#x2001;" horiz-adv-x="1537" />
+<glyph unicode="&#x2002;" horiz-adv-x="768" />
+<glyph unicode="&#x2003;" horiz-adv-x="1537" />
+<glyph unicode="&#x2004;" horiz-adv-x="512" />
+<glyph unicode="&#x2005;" horiz-adv-x="384" />
+<glyph unicode="&#x2006;" horiz-adv-x="256" />
+<glyph unicode="&#x2007;" horiz-adv-x="256" />
+<glyph unicode="&#x2008;" horiz-adv-x="192" />
+<glyph unicode="&#x2009;" horiz-adv-x="307" />
+<glyph unicode="&#x200a;" horiz-adv-x="85" />
+<glyph unicode="&#x202f;" horiz-adv-x="307" />
+<glyph unicode="&#x205f;" horiz-adv-x="384" />
+<glyph unicode="&#x2122;" horiz-adv-x="1792" />
+<glyph unicode="&#x221e;" horiz-adv-x="1792" />
+<glyph unicode="&#x2260;" horiz-adv-x="1792" />
+<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xf000;" horiz-adv-x="1792" d="M93 1350q0 23 18 36.5t38 17.5t43 4h1408q23 0 43 -4t38 -17.5t18 -36.5q0 -35 -43 -78l-632 -632v-768h320q26 0 45 -19t19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45t45 19h320v768l-632 632q-43 43 -43 78z" />
+<glyph unicode="&#xf001;" d="M0 -64q0 50 34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v967q0 31 19 56.5t49 35.5l832 256q12 4 28 4q40 0 68 -28t28 -68v-1120q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89t34 89t86 60.5t103.5 32t96.5 10.5 q105 0 192 -39v537l-768 -237v-709q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89z" />
+<glyph unicode="&#xf002;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90q0 -52 -38 -90t-90 -38q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5z" />
+<glyph unicode="&#xf003;" horiz-adv-x="1792" d="M0 32v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5v768q-32 -36 -69 -66q-268 -206 -426 -338q-51 -43 -83 -67t-86.5 -48.5 t-102.5 -24.5h-1h-1q-48 0 -102.5 24.5t-86.5 48.5t-83 67q-158 132 -426 338q-37 30 -69 66v-768zM128 1120q0 -168 147 -284q193 -152 401 -317q6 -5 35 -29.5t46 -37.5t44.5 -31.5t50.5 -27.5t43 -9h1h1q20 0 43 9t50.5 27.5t44.5 31.5t46 37.5t35 29.5q208 165 401 317 q54 43 100.5 115.5t46.5 131.5v11v13.5t-0.5 13t-3 12.5t-5.5 9t-9 7.5t-14 2.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5z" />
+<glyph unicode="&#xf004;" horiz-adv-x="1792" d="M0 940q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138z " />
+<glyph unicode="&#xf005;" horiz-adv-x="1664" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -21 -10.5 -35.5t-30.5 -14.5q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500 l-364 354q-25 27 -25 48z" />
+<glyph unicode="&#xf006;" horiz-adv-x="1664" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -50 -41 -50q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354 q-25 27 -25 48zM221 829l306 -297l-73 -421l378 199l377 -199l-72 421l306 297l-422 62l-189 382l-189 -382z" />
+<glyph unicode="&#xf007;" horiz-adv-x="1408" d="M0 131q0 53 3.5 103.5t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q9 0 42 -21.5t74.5 -48t108 -48t133.5 -21.5t133.5 21.5t108 48t74.5 48t42 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5q0 -120 -73 -189.5t-194 -69.5 h-874q-121 0 -194 69.5t-73 189.5zM320 1024q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5z" />
+<glyph unicode="&#xf008;" horiz-adv-x="1920" d="M0 -96v1344q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1344q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 64v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45zM128 320q0 -26 19 -45t45 -19h128 q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM128 704q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM128 1088q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19 h-128q-26 0 -45 -19t-19 -45v-128zM512 -64q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512zM512 704q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512zM1536 64 v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45zM1536 320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM1536 704q0 -26 19 -45t45 -19h128q26 0 45 19t19 45 v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM1536 1088q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf009;" horiz-adv-x="1664" d="M0 128v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM0 896v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM896 128v384q0 52 38 90t90 38h512q52 0 90 -38 t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM896 896v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90z" />
+<glyph unicode="&#xf00a;" horiz-adv-x="1792" d="M0 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68zM640 1120v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 608v192 q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 1120v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf00b;" horiz-adv-x="1792" d="M0 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 96v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68zM640 608v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68zM640 1120v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf00c;" horiz-adv-x="1792" d="M121 608q0 40 28 68l136 136q28 28 68 28t68 -28l294 -295l656 657q28 28 68 28t68 -28l136 -136q28 -28 28 -68t-28 -68l-724 -724l-136 -136q-28 -28 -68 -28t-68 28l-136 136l-362 362q-28 28 -28 68z" />
+<glyph unicode="&#xf00d;" horiz-adv-x="1408" d="M110 214q0 40 28 68l294 294l-294 294q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -294l294 294q28 28 68 28t68 -28l136 -136q28 -28 28 -68t-28 -68l-294 -294l294 -294q28 -28 28 -68t-28 -68l-136 -136q-28 -28 -68 -28t-68 28l-294 294l-294 -294 q-28 -28 -68 -28t-68 28l-136 136q-28 28 -28 68z" />
+<glyph unicode="&#xf00e;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90t-37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM384 672v64q0 13 9.5 22.5t22.5 9.5h224v224q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-224h224q13 0 22.5 -9.5t9.5 -22.5v-64 q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-224q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v224h-224q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf010;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90t-37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM384 672v64q0 13 9.5 22.5t22.5 9.5h576q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-576q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf011;" d="M0 640q0 182 80.5 343t226.5 270q43 32 95.5 25t83.5 -50q32 -42 24.5 -94.5t-49.5 -84.5q-98 -74 -151.5 -181t-53.5 -228q0 -104 40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5t198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5q0 121 -53.5 228t-151.5 181 q-42 32 -49.5 84.5t24.5 94.5q31 43 84 50t95 -25q146 -109 226.5 -270t80.5 -343q0 -156 -61 -298t-164 -245t-245 -164t-298 -61t-298 61t-245 164t-164 245t-61 298zM640 768v640q0 52 38 90t90 38t90 -38t38 -90v-640q0 -52 -38 -90t-90 -38t-90 38t-38 90z" />
+<glyph unicode="&#xf012;" horiz-adv-x="1792" d="M0 -96v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM384 -96v320q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM768 -96v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-576 q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1152 -96v960q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-960q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1536 -96v1472q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1472q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf013;" d="M0 531v222q0 12 8 23t19 13l186 28q14 46 39 92q-40 57 -107 138q-10 12 -10 24q0 10 9 23q26 36 98.5 107.5t94.5 71.5q13 0 26 -10l138 -107q44 23 91 38q16 136 29 186q7 28 36 28h222q14 0 24.5 -8.5t11.5 -21.5l28 -184q49 -16 90 -37l142 107q9 9 24 9q13 0 25 -10 q129 -119 165 -170q7 -8 7 -22q0 -12 -8 -23q-15 -21 -51 -66.5t-54 -70.5q26 -50 41 -98l183 -28q13 -2 21 -12.5t8 -23.5v-222q0 -12 -8 -23t-20 -13l-185 -28q-19 -54 -39 -91q35 -50 107 -138q10 -12 10 -25t-9 -23q-27 -37 -99 -108t-94 -71q-12 0 -26 9l-138 108 q-44 -23 -91 -38q-16 -136 -29 -186q-7 -28 -36 -28h-222q-14 0 -24.5 8.5t-11.5 21.5l-28 184q-49 16 -90 37l-141 -107q-10 -9 -25 -9q-14 0 -25 11q-126 114 -165 168q-7 10 -7 23q0 12 8 23q15 21 51 66.5t54 70.5q-27 50 -41 99l-183 27q-13 2 -21 12.5t-8 23.5z M512 640q0 -106 75 -181t181 -75t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181z" />
+<glyph unicode="&#xf014;" horiz-adv-x="1408" d="M0 1056v64q0 14 9 23t23 9h309l70 167q15 37 54 63t79 26h320q40 0 79 -26t54 -63l70 -167h309q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96v-948q0 -83 -47 -143.5t-113 -60.5h-832q-66 0 -113 58.5t-47 141.5v952h-96q-14 0 -23 9t-9 23zM256 76q0 -22 7 -40.5 t14.5 -27t10.5 -8.5h832q3 0 10.5 8.5t14.5 27t7 40.5v948h-896v-948zM384 224v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23zM480 1152h448l-48 117q-7 9 -17 11h-317q-10 -2 -17 -11zM640 224v576q0 14 9 23t23 9h64 q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23zM896 224v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf015;" horiz-adv-x="1664" d="M26 636.5q1 13.5 11 21.5l719 599q32 26 76 26t76 -26l244 -204v195q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-408l219 -182q10 -8 11 -21.5t-7 -23.5l-62 -74q-8 -9 -21 -11h-3q-13 0 -21 7l-692 577l-692 -577q-12 -8 -24 -7q-13 2 -21 11l-62 74q-8 10 -7 23.5zM256 64 v480q0 1 0.5 3t0.5 3l575 474l575 -474q1 -2 1 -6v-480q0 -26 -19 -45t-45 -19h-384v384h-256v-384h-384q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf016;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22 v-376z" />
+<glyph unicode="&#xf017;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 544v64q0 14 9 23t23 9h224v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf018;" horiz-adv-x="1920" d="M50 73q0 54 26 116l417 1044q8 19 26 33t38 14h339q-13 0 -23 -9.5t-11 -22.5l-15 -192q-1 -14 8 -23t22 -9h166q13 0 22 9t8 23l-15 192q-1 13 -11 22.5t-23 9.5h339q20 0 38 -14t26 -33l417 -1044q26 -62 26 -116q0 -73 -46 -73h-704q13 0 22 9.5t8 22.5l-20 256 q-1 13 -11 22.5t-23 9.5h-272q-13 0 -23 -9.5t-11 -22.5l-20 -256q-1 -13 8 -22.5t22 -9.5h-704q-46 0 -46 73zM809 540q-1 -12 8 -20t21 -8h244q12 0 21 8t8 20v4l-24 320q-1 13 -11 22.5t-23 9.5h-186q-13 0 -23 -9.5t-11 -22.5l-24 -320v-4z" />
+<glyph unicode="&#xf019;" horiz-adv-x="1664" d="M0 96v320q0 40 28 68t68 28h465l135 -136q58 -56 136 -56t136 56l136 136h464q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68zM325 985q17 39 59 39h256v448q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-448h256q42 0 59 -39q17 -41 -14 -70 l-448 -448q-18 -19 -45 -19t-45 19l-448 448q-31 29 -14 70zM1152 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM1408 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf01a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM418 620q8 20 30 20h192v352q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-352h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-11 -9 -23 -9t-23 9l-320 320q-15 16 -7 35z" />
+<glyph unicode="&#xf01b;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM416 672q0 12 10 24l319 319q11 9 23 9t23 -9l320 -320q15 -16 7 -35q-8 -20 -30 -20h-192v-352q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v352h-192q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf01c;" d="M0 64v482q0 62 25 123l238 552q10 25 36.5 42t52.5 17h832q26 0 52.5 -17t36.5 -42l238 -552q25 -61 25 -123v-482q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM197 576h316l95 -192h320l95 192h316q-1 3 -2.5 8t-2.5 8l-212 496h-708l-212 -496q-1 -2 -2.5 -8 t-2.5 -8z" />
+<glyph unicode="&#xf01d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 320v640q0 37 32 56q33 18 64 -1l544 -320q32 -18 32 -55t-32 -55l-544 -320q-15 -9 -32 -9q-16 0 -32 8q-32 19 -32 56z" />
+<glyph unicode="&#xf01e;" d="M0 640q0 156 61 298t164 245t245 164t298 61q147 0 284.5 -55.5t244.5 -156.5l130 129q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l138 138q-148 137 -349 137q-104 0 -198.5 -40.5t-163.5 -109.5t-109.5 -163.5 t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5q119 0 225 52t179 147q7 10 23 12q14 0 25 -9l137 -138q9 -8 9.5 -20.5t-7.5 -22.5q-109 -132 -264 -204.5t-327 -72.5q-156 0 -298 61t-245 164t-164 245t-61 298z" />
+<glyph unicode="&#xf021;" d="M0 0v448q0 26 19 45t45 19h448q26 0 45 -19t19 -45t-19 -45l-137 -137q71 -66 161 -102t187 -36q134 0 250 65t186 179q11 17 53 117q8 23 30 23h192q13 0 22.5 -9.5t9.5 -22.5q0 -5 -1 -7q-64 -268 -268 -434.5t-478 -166.5q-146 0 -282.5 55t-243.5 157l-129 -129 q-19 -19 -45 -19t-45 19t-19 45zM18 800v7q65 268 270 434.5t480 166.5q146 0 284 -55.5t245 -156.5l130 129q19 19 45 19t45 -19t19 -45v-448q0 -26 -19 -45t-45 -19h-448q-26 0 -45 19t-19 45t19 45l138 138q-148 137 -349 137q-134 0 -250 -65t-186 -179 q-11 -17 -53 -117q-8 -23 -30 -23h-199q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf022;" horiz-adv-x="1792" d="M0 160v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM128 160q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5v832q0 13 -9.5 22.5t-22.5 9.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5v-832z M256 288v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 544v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z M256 800v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 288v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5z M512 544v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5zM512 800v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5z " />
+<glyph unicode="&#xf023;" horiz-adv-x="1152" d="M0 96v576q0 40 28 68t68 28h32v192q0 184 132 316t316 132t316 -132t132 -316v-192h32q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68zM320 768h512v192q0 106 -75 181t-181 75t-181 -75t-75 -181v-192z" />
+<glyph unicode="&#xf024;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -72 -64 -110v-1266q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v1266q-64 38 -64 110zM320 320v742q0 32 31 55q21 14 79 43q236 120 421 120q107 0 200 -29t219 -88q38 -19 88 -19 q54 0 117.5 21t110 47t88 47t54.5 21q26 0 45 -19t19 -45v-763q0 -25 -12.5 -38.5t-39.5 -27.5q-215 -116 -369 -116q-61 0 -123.5 22t-108.5 48t-115.5 48t-142.5 22q-192 0 -464 -146q-17 -9 -33 -9q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf025;" horiz-adv-x="1664" d="M0 650q0 151 67 291t179 242.5t266 163.5t320 61t320 -61t266 -163.5t179 -242.5t67 -291q0 -166 -60 -314l-20 -49l-185 -33q-22 -83 -90.5 -136.5t-156.5 -53.5v-32q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-32 q71 0 130 -35.5t93 -95.5l68 12q29 95 29 193q0 148 -88 279t-236.5 209t-315.5 78t-315.5 -78t-236.5 -209t-88 -279q0 -98 29 -193l68 -12q34 60 93 95.5t130 35.5v32q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v32 q-88 0 -156.5 53.5t-90.5 136.5l-185 33l-20 49q-60 148 -60 314z" />
+<glyph unicode="&#xf026;" horiz-adv-x="768" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf027;" horiz-adv-x="1152" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45zM908 464q0 21 12 35.5t29 25t34 23t29 35.5t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5 q15 0 25 -5q70 -27 112.5 -93t42.5 -142t-42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5z" />
+<glyph unicode="&#xf028;" horiz-adv-x="1664" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45zM908 464q0 21 12 35.5t29 25t34 23t29 35.5t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5 q15 0 25 -5q70 -27 112.5 -93t42.5 -142t-42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5zM1008 228q0 39 39 59q56 29 76 44q74 54 115.5 135.5t41.5 173.5t-41.5 173.5t-115.5 135.5q-20 15 -76 44q-39 20 -39 59q0 26 19 45t45 19q13 0 26 -5 q140 -59 225 -188.5t85 -282.5t-85 -282.5t-225 -188.5q-13 -5 -25 -5q-27 0 -46 19t-19 45zM1109 -7q0 36 39 59q7 4 22.5 10.5t22.5 10.5q46 25 82 51q123 91 192 227t69 289t-69 289t-192 227q-36 26 -82 51q-7 4 -22.5 10.5t-22.5 10.5q-39 23 -39 59q0 26 19 45t45 19 q13 0 26 -5q211 -91 338 -283.5t127 -422.5t-127 -422.5t-338 -283.5q-13 -5 -26 -5q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf029;" horiz-adv-x="1408" d="M0 0v640h640v-640h-640zM0 768v640h640v-640h-640zM128 129h384v383h-384v-383zM128 896h384v384h-384v-384zM256 256v128h128v-128h-128zM256 1024v128h128v-128h-128zM768 0v640h384v-128h128v128h128v-384h-384v128h-128v-384h-128zM768 768v640h640v-640h-640z M896 896h384v384h-384v-384zM1024 0v128h128v-128h-128zM1024 1024v128h128v-128h-128zM1280 0v128h128v-128h-128z" />
+<glyph unicode="&#xf02a;" horiz-adv-x="1792" d="M0 0v1408h63v-1408h-63zM94 1v1407h32v-1407h-32zM189 1v1407h31v-1407h-31zM346 1v1407h31v-1407h-31zM472 1v1407h62v-1407h-62zM629 1v1407h31v-1407h-31zM692 1v1407h31v-1407h-31zM755 1v1407h31v-1407h-31zM880 1v1407h63v-1407h-63zM1037 1v1407h63v-1407h-63z M1163 1v1407h63v-1407h-63zM1289 1v1407h63v-1407h-63zM1383 1v1407h63v-1407h-63zM1541 1v1407h94v-1407h-94zM1666 1v1407h32v-1407h-32zM1729 0v1408h63v-1408h-63z" />
+<glyph unicode="&#xf02b;" d="M0 864v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117zM192 1088q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5 t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf02c;" horiz-adv-x="1920" d="M0 864v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117zM192 1088q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5 t-90.5 -37.5t-37.5 -90.5zM704 1408h224q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-36 0 -59 14t-53 45l470 470q37 37 37 90q0 52 -37 91l-715 714q-38 38 -102 64.5t-117 26.5z" />
+<glyph unicode="&#xf02d;" horiz-adv-x="1664" d="M10 184q0 4 3 27t4 37q1 8 -3 21.5t-3 19.5q2 11 8 21t16.5 23.5t16.5 23.5q23 38 45 91.5t30 91.5q3 10 0.5 30t-0.5 28q3 11 17 28t17 23q21 36 42 92t25 90q1 9 -2.5 32t0.5 28q4 13 22 30.5t22 22.5q19 26 42.5 84.5t27.5 96.5q1 8 -3 25.5t-2 26.5q2 8 9 18t18 23 t17 21q8 12 16.5 30.5t15 35t16 36t19.5 32t26.5 23.5t36 11.5t47.5 -5.5l-1 -3q38 9 51 9h761q74 0 114 -56t18 -130l-274 -906q-36 -119 -71.5 -153.5t-128.5 -34.5h-869q-27 0 -38 -15q-11 -16 -1 -43q24 -70 144 -70h923q29 0 56 15.5t35 41.5l300 987q7 22 5 57 q38 -15 59 -43q40 -57 18 -129l-275 -906q-19 -64 -76.5 -107.5t-122.5 -43.5h-923q-77 0 -148.5 53.5t-99.5 131.5q-24 67 -2 127zM492 800q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5zM575 1056 q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5z" />
+<glyph unicode="&#xf02e;" horiz-adv-x="1280" d="M0 7v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62z" />
+<glyph unicode="&#xf02f;" horiz-adv-x="1664" d="M0 160v416q0 79 56.5 135.5t135.5 56.5h64v544q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-256h64q79 0 135.5 -56.5t56.5 -135.5v-416q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-160q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v160h-224 q-13 0 -22.5 9.5t-9.5 22.5zM384 0h896v256h-896v-256zM384 640h896v384h-160q-40 0 -68 28t-28 68v160h-640v-640zM1408 576q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf030;" horiz-adv-x="1920" d="M0 128v896q0 106 75 181t181 75h224l51 136q19 49 69.5 84.5t103.5 35.5h512q53 0 103.5 -35.5t69.5 -84.5l51 -136h224q106 0 181 -75t75 -181v-896q0 -106 -75 -181t-181 -75h-1408q-106 0 -181 75t-75 181zM512 576q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5 t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM672 576q0 119 84.5 203.5t203.5 84.5t203.5 -84.5t84.5 -203.5t-84.5 -203.5t-203.5 -84.5t-203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf031;" horiz-adv-x="1664" d="M0 -128l2 79q23 7 56 12.5t57 10.5t49.5 14.5t44.5 29t31 50.5l237 616l280 724h75h53q8 -14 11 -21l205 -480q33 -78 106 -257.5t114 -274.5q15 -34 58 -144.5t72 -168.5q20 -45 35 -57q19 -15 88 -29.5t84 -20.5q6 -38 6 -57q0 -4 -0.5 -13t-0.5 -13q-63 0 -190 8 t-191 8q-76 0 -215 -7t-178 -8q0 43 4 78l131 28q1 0 12.5 2.5t15.5 3.5t14.5 4.5t15 6.5t11 8t9 11t2.5 14q0 16 -31 96.5t-72 177.5t-42 100l-450 2q-26 -58 -76.5 -195.5t-50.5 -162.5q0 -22 14 -37.5t43.5 -24.5t48.5 -13.5t57 -8.5t41 -4q1 -19 1 -58q0 -9 -2 -27 q-58 0 -174.5 10t-174.5 10q-8 0 -26.5 -4t-21.5 -4q-80 -14 -188 -14zM555 527q33 0 136.5 -2t160.5 -2q19 0 57 2q-87 253 -184 452z" />
+<glyph unicode="&#xf032;" horiz-adv-x="1408" d="M0 -128l2 94q15 4 85 16t106 27q7 12 12.5 27t8.5 33.5t5.5 32.5t3 37.5t0.5 34v35.5v30q0 982 -22 1025q-4 8 -22 14.5t-44.5 11t-49.5 7t-48.5 4.5t-30.5 3l-4 83q98 2 340 11.5t373 9.5q23 0 68.5 -0.5t67.5 -0.5q70 0 136.5 -13t128.5 -42t108 -71t74 -104.5 t28 -137.5q0 -52 -16.5 -95.5t-39 -72t-64.5 -57.5t-73 -45t-84 -40q154 -35 256.5 -134t102.5 -248q0 -100 -35 -179.5t-93.5 -130.5t-138 -85.5t-163.5 -48.5t-176 -14q-44 0 -132 3t-132 3q-106 0 -307 -11t-231 -12zM533 1292q0 -50 4 -151t4 -152q0 -27 -0.5 -80 t-0.5 -79q0 -46 1 -69q42 -7 109 -7q82 0 143 13t110 44.5t74.5 89.5t25.5 142q0 70 -29 122.5t-79 82t-108 43.5t-124 14q-50 0 -130 -13zM538.5 165q0.5 -37 4.5 -83.5t12 -66.5q74 -32 140 -32q376 0 376 335q0 114 -41 180q-27 44 -61.5 74t-67.5 46.5t-80.5 25 t-84 10.5t-94.5 2q-73 0 -101 -10q0 -53 -0.5 -159t-0.5 -158q0 -8 -1 -67.5t-0.5 -96.5z" />
+<glyph unicode="&#xf033;" horiz-adv-x="1024" d="M0 -126l17 85q6 2 81.5 21.5t111.5 37.5q28 35 41 101q1 7 62 289t114 543.5t52 296.5v25q-24 13 -54.5 18.5t-69.5 8t-58 5.5l19 103q33 -2 120 -6.5t149.5 -7t120.5 -2.5q48 0 98.5 2.5t121 7t98.5 6.5q-5 -39 -19 -89q-30 -10 -101.5 -28.5t-108.5 -33.5 q-8 -19 -14 -42.5t-9 -40t-7.5 -45.5t-6.5 -42q-27 -148 -87.5 -419.5t-77.5 -355.5q-2 -9 -13 -58t-20 -90t-16 -83.5t-6 -57.5l1 -18q17 -4 185 -31q-3 -44 -16 -99q-11 0 -32.5 -1.5t-32.5 -1.5q-29 0 -87 10t-86 10q-138 2 -206 2q-51 0 -143 -9t-121 -11z" />
+<glyph unicode="&#xf034;" horiz-adv-x="1792" d="M0 1023v383l81 1l54 -27q12 -5 211 -5q44 0 132 2t132 2q36 0 107.5 -0.5t107.5 -0.5h293q6 0 21 -0.5t20.5 0t16 3t17.5 9t15 17.5l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 48t-14.5 73.5t-7.5 35.5 q-6 8 -12 12.5t-15.5 6t-13 2.5t-18 0.5t-16.5 -0.5q-17 0 -66.5 0.5t-74.5 0.5t-64 -2t-71 -6q-9 -81 -8 -136q0 -94 2 -388t2 -455q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9 t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29t78 27q19 42 19 383q0 101 -3 303t-3 303v117q0 2 0.5 15.5t0.5 25t-1 25.5t-3 24t-5 14q-11 12 -162 12q-33 0 -93 -12t-80 -26q-19 -13 -34 -72.5t-31.5 -111t-42.5 -53.5q-42 26 -56 44zM1414 109.5q9 18.5 42 18.5h80v1024 h-80q-33 0 -42 18.5t11 44.5l126 162q20 26 49 26t49 -26l126 -162q20 -26 11 -44.5t-42 -18.5h-80v-1024h80q33 0 42 -18.5t-11 -44.5l-126 -162q-20 -26 -49 -26t-49 26l-126 162q-20 26 -11 44.5z" />
+<glyph unicode="&#xf035;" d="M0 1023v383l81 1l54 -27q12 -5 211 -5q44 0 132 2t132 2q70 0 246.5 1t304.5 0.5t247 -4.5q33 -1 56 31l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 47.5t-15 73.5t-7 36q-10 13 -27 19q-5 2 -66 2q-30 0 -93 1 t-103 1t-94 -2t-96 -7q-9 -81 -8 -136l1 -152v52q0 -55 1 -154t1.5 -180t0.5 -153q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29 t78 27q7 16 11.5 74t6 145.5t1.5 155t-0.5 153.5t-0.5 89q0 7 -2.5 21.5t-2.5 22.5q0 7 0.5 44t1 73t0 76.5t-3 67.5t-6.5 32q-11 12 -162 12q-41 0 -163 -13.5t-138 -24.5q-19 -12 -34 -71.5t-31.5 -111.5t-42.5 -54q-42 26 -56 44zM5 -64q0 28 26 49q4 3 36 30t59.5 49 t57.5 41.5t42 19.5q13 0 20.5 -10.5t10 -28.5t2.5 -33.5t-1.5 -33t-1.5 -19.5h1024q0 2 -1.5 19.5t-1.5 33t2.5 33.5t10 28.5t20.5 10.5q12 0 42 -19.5t57.5 -41.5t59.5 -49t36 -30q26 -21 26 -49t-26 -49q-4 -3 -36 -30t-59.5 -49t-57.5 -41.5t-42 -19.5q-13 0 -20.5 10.5 t-10 28.5t-2.5 33.5t1.5 33t1.5 19.5h-1024q0 -2 1.5 -19.5t1.5 -33t-2.5 -33.5t-10 -28.5t-20.5 -10.5q-12 0 -42 19.5t-57.5 41.5t-59.5 49t-36 30q-26 21 -26 49z" />
+<glyph unicode="&#xf036;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 448v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM0 832v128q0 26 19 45t45 19h1536 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM0 1216v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf037;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM128 832v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM384 448v128q0 26 19 45t45 19h896 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45zM512 1216v128q0 26 19 45t45 19h640q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf038;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM128 832v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM384 448v128q0 26 19 45t45 19h1280 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM512 1216v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf039;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 448v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 832v128q0 26 19 45t45 19h1664 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 1216v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf03a;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5zM0 416v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5 t-9.5 22.5zM0 800v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5zM0 1184v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192 q-13 0 -22.5 9.5t-9.5 22.5zM384 32v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 416v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5 t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 800v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 1184v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192 q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03b;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM0 1184v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5 t-9.5 22.5zM32 704q0 14 9 23l288 288q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-576q0 -13 -9.5 -22.5t-22.5 -9.5q-14 0 -23 9l-288 288q-9 9 -9 23zM640 416v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088 q-13 0 -22.5 9.5t-9.5 22.5zM640 800v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03c;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM0 416v576q0 13 9.5 22.5t22.5 9.5q14 0 23 -9l288 -288q9 -9 9 -23t-9 -23l-288 -288q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5z M0 1184v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM640 416v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5 t-9.5 22.5zM640 800v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03d;" horiz-adv-x="1792" d="M0 288v704q0 119 84.5 203.5t203.5 84.5h704q119 0 203.5 -84.5t84.5 -203.5v-165l403 402q18 19 45 19q12 0 25 -5q39 -17 39 -59v-1088q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-403 403v-166q0 -119 -84.5 -203.5t-203.5 -84.5h-704q-119 0 -203.5 84.5 t-84.5 203.5z" />
+<glyph unicode="&#xf03e;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216z M256 128v192l320 320l160 -160l512 512l416 -416v-448h-1408zM256 960q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136z" />
+<glyph unicode="&#xf040;" d="M0 -128v416l832 832l416 -416l-832 -832h-416zM128 128h128v-128h107l91 91l-235 235l-91 -91v-107zM298 384q0 -22 22 -22q10 0 17 7l542 542q7 7 7 17q0 22 -22 22q-10 0 -17 -7l-542 -542q-7 -7 -7 -17zM896 1184l166 165q36 38 90 38q53 0 91 -38l235 -234 q37 -39 37 -91q0 -53 -37 -90l-166 -166z" />
+<glyph unicode="&#xf041;" horiz-adv-x="1024" d="M0 896q0 212 150 362t362 150t362 -150t150 -362q0 -109 -33 -179l-364 -774q-16 -33 -47.5 -52t-67.5 -19t-67.5 19t-46.5 52l-365 774q-33 70 -33 179zM256 896q0 -106 75 -181t181 -75t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181z" />
+<glyph unicode="&#xf042;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73v1088q-148 0 -273 -73t-198 -198t-73 -273z" />
+<glyph unicode="&#xf043;" horiz-adv-x="1024" d="M0 512q0 145 81 275q6 9 62.5 90.5t101 151t99.5 178t83 201.5q9 30 34 47t51 17t51.5 -17t33.5 -47q28 -93 83 -201.5t99.5 -178t101 -151t62.5 -90.5q81 -127 81 -275q0 -212 -150 -362t-362 -150t-362 150t-150 362zM256 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5 t37.5 90.5q0 36 -20 69q-1 1 -15.5 22.5t-25.5 38t-25 44t-21 50.5q-4 16 -21 16t-21 -16q-7 -23 -21 -50.5t-25 -44t-25.5 -38t-15.5 -22.5q-20 -33 -20 -69z" />
+<glyph unicode="&#xf044;" horiz-adv-x="1792" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-14 -14 -32 -8q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v126q0 13 9 22l64 64q15 15 35 7t20 -29v-190 q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM640 256v288l672 672l288 -288l-672 -672h-288zM736 448h96v-96h56l116 116l-152 152l-116 -116v-56zM944 688q16 -16 33 1l350 350q17 17 1 33t-33 -1l-350 -350q-17 -17 -1 -33zM1376 1280l92 92 q28 28 68 28t68 -28l152 -152q28 -28 28 -68t-28 -68l-92 -92z" />
+<glyph unicode="&#xf045;" horiz-adv-x="1664" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h255q13 0 22.5 -9.5t9.5 -22.5q0 -27 -26 -32q-77 -26 -133 -60q-10 -4 -16 -4h-112q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v214q0 19 18 29q28 13 54 37q16 16 35 8q21 -9 21 -29v-259 q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM256 704q0 49 3.5 91t14 90t28 88t47 81.5t68.5 74t94.5 61.5t124.5 48.5t159.5 30.5t196.5 11h160v192q0 42 39 59q13 5 25 5q26 0 45 -19l384 -384q19 -19 19 -45t-19 -45l-384 -384 q-18 -19 -45 -19q-12 0 -25 5q-39 17 -39 59v192h-160q-323 0 -438 -131q-119 -137 -74 -473q3 -23 -20 -34q-8 -2 -12 -2q-16 0 -26 13q-10 14 -21 31t-39.5 68.5t-49.5 99.5t-38.5 114t-17.5 122z" />
+<glyph unicode="&#xf046;" horiz-adv-x="1664" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-10 -10 -23 -10q-3 0 -9 2q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v254q0 13 9 22l64 64q10 10 23 10q6 0 12 -3 q20 -8 20 -29v-318q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM257 768q0 33 24 57l110 110q24 24 57 24t57 -24l263 -263l647 647q24 24 57 24t57 -24l110 -110q24 -24 24 -57t-24 -57l-814 -814q-24 -24 -57 -24t-57 24l-430 430 q-24 24 -24 57z" />
+<glyph unicode="&#xf047;" horiz-adv-x="1792" d="M0 640q0 26 19 45l256 256q19 19 45 19t45 -19t19 -45v-128h384v384h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-384h384v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45t-19 -45l-256 -256 q-19 -19 -45 -19t-45 19t-19 45v128h-384v-384h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v384h-384v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf048;" horiz-adv-x="1024" d="M0 -64v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf049;" horiz-adv-x="1792" d="M0 -64v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710q19 19 32 13t13 -32v-710q4 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45 t-45 -19h-128q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04a;" horiz-adv-x="1664" d="M122 640q0 26 19 45l710 710q19 19 32 13t13 -32v-710q5 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-8 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-19 19 -19 45z" />
+<glyph unicode="&#xf04b;" horiz-adv-x="1408" d="M0 -96v1472q0 26 16.5 36t39.5 -3l1328 -738q23 -13 23 -31t-23 -31l-1328 -738q-23 -13 -39.5 -3t-16.5 36z" />
+<glyph unicode="&#xf04c;" d="M0 -64v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45zM896 -64v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04d;" d="M0 -64v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04e;" horiz-adv-x="1664" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q19 -19 19 -45t-19 -45l-710 -710q-19 -19 -32 -13t-13 32v710q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf050;" horiz-adv-x="1792" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32v710 q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf051;" horiz-adv-x="1024" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf052;" horiz-adv-x="1538" d="M1 64v256q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM1 525q-6 13 13 32l710 710q19 19 45 19t45 -19l710 -710q19 -19 13 -32t-32 -13h-1472q-26 0 -32 13z" />
+<glyph unicode="&#xf053;" horiz-adv-x="1280" d="M154 704q0 26 19 45l742 742q19 19 45 19t45 -19l166 -166q19 -19 19 -45t-19 -45l-531 -531l531 -531q19 -19 19 -45t-19 -45l-166 -166q-19 -19 -45 -19t-45 19l-742 742q-19 19 -19 45z" />
+<glyph unicode="&#xf054;" horiz-adv-x="1280" d="M90 128q0 26 19 45l531 531l-531 531q-19 19 -19 45t19 45l166 166q19 19 45 19t45 -19l742 -742q19 -19 19 -45t-19 -45l-742 -742q-19 -19 -45 -19t-45 19l-166 166q-19 19 -19 45z" />
+<glyph unicode="&#xf055;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 576q0 -26 19 -45t45 -19h256v-256q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v256h256q26 0 45 19 t19 45v128q0 26 -19 45t-45 19h-256v256q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-256h-256q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf056;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 576q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-768q-26 0 -45 -19 t-19 -45v-128z" />
+<glyph unicode="&#xf057;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM387 414q0 -27 19 -46l90 -90q19 -19 46 -19q26 0 45 19l181 181l181 -181q19 -19 45 -19q27 0 46 19 l90 90q19 19 19 46q0 26 -19 45l-181 181l181 181q19 19 19 45q0 27 -19 46l-90 90q-19 19 -46 19q-26 0 -45 -19l-181 -181l-181 181q-19 19 -45 19q-27 0 -46 -19l-90 -90q-19 -19 -19 -46q0 -26 19 -45l181 -181l-181 -181q-19 -19 -19 -45z" />
+<glyph unicode="&#xf058;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 621q0 -27 18 -45l362 -362q19 -19 45 -19q27 0 46 19l543 543q18 18 18 45q0 28 -18 46l-91 90 q-19 19 -45 19t-45 -19l-408 -407l-226 226q-19 19 -45 19t-45 -19l-91 -90q-18 -18 -18 -46z" />
+<glyph unicode="&#xf059;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM417 939q-15 -24 8 -42l132 -100q7 -6 19 -6q16 0 25 12q53 68 86 92q34 24 86 24q48 0 85.5 -26 t37.5 -59q0 -38 -20 -61t-68 -45q-63 -28 -115.5 -86.5t-52.5 -125.5v-36q0 -14 9 -23t23 -9h192q14 0 23 9t9 23q0 19 21.5 49.5t54.5 49.5q32 18 49 28.5t46 35t44.5 48t28 60.5t12.5 81q0 88 -55.5 163t-138.5 116t-170 41q-243 0 -371 -213zM640 160q0 -14 9 -23t23 -9 h192q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-192z" />
+<glyph unicode="&#xf05a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM512 160q0 -14 9 -23t23 -9h448q14 0 23 9t9 23v160q0 14 -9 23t-23 9h-96v512q0 14 -9 23t-23 9h-320 q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h96v-320h-96q-14 0 -23 -9t-9 -23v-160zM640 1056q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v160q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-160z" />
+<glyph unicode="&#xf05b;" d="M0 576v128q0 26 19 45t45 19h143q37 161 154.5 278.5t278.5 154.5v143q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-143q161 -37 278.5 -154.5t154.5 -278.5h143q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-143q-37 -161 -154.5 -278.5t-278.5 -154.5v-143 q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v143q-161 37 -278.5 154.5t-154.5 278.5h-143q-26 0 -45 19t-19 45zM339 512q32 -108 112.5 -188.5t188.5 -112.5v109q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-109q108 32 188.5 112.5t112.5 188.5h-109q-26 0 -45 19 t-19 45v128q0 26 19 45t45 19h109q-32 108 -112.5 188.5t-188.5 112.5v-109q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v109q-108 -32 -188.5 -112.5t-112.5 -188.5h109q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-109z" />
+<glyph unicode="&#xf05c;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM429 480q0 13 10 23l137 137l-137 137q-10 10 -10 23t10 23l146 146q10 10 23 10t23 -10l137 -137l137 137q10 10 23 10t23 -10l146 -146q10 -10 10 -23t-10 -23l-137 -137l137 -137q10 -10 10 -23t-10 -23l-146 -146q-10 -10 -23 -10t-23 10l-137 137 l-137 -137q-10 -10 -23 -10t-23 10l-146 146q-10 10 -10 23z" />
+<glyph unicode="&#xf05d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM346 640q0 26 19 45l102 102q19 19 45 19t45 -19l147 -147l275 275q19 19 45 19t45 -19l102 -102q19 -19 19 -45t-19 -45l-422 -422q-19 -19 -45 -19t-45 19l-294 294q-19 19 -19 45z" />
+<glyph unicode="&#xf05e;" d="M0 643q0 157 61 299.5t163.5 245.5t245 164t298.5 61t298.5 -61t245 -164t163.5 -245.5t61 -299.5t-61 -300t-163.5 -246t-245 -164t-298.5 -61t-298.5 61t-245 164t-163.5 246t-61 300zM224 643q0 -162 89 -299l755 754q-135 91 -300 91q-148 0 -273 -73t-198 -199 t-73 -274zM471 185q137 -89 297 -89q111 0 211.5 43.5t173.5 116.5t116 174.5t43 212.5q0 161 -87 295z" />
+<glyph unicode="&#xf060;" d="M64 576q0 52 37 91l651 650q38 38 91 38q52 0 90 -38l75 -74q38 -38 38 -91t-38 -91l-293 -293h704q52 0 84.5 -37.5t32.5 -90.5v-128q0 -53 -32.5 -90.5t-84.5 -37.5h-704l293 -294q38 -36 38 -90t-38 -90l-75 -76q-37 -37 -90 -37q-52 0 -91 37l-651 652q-37 37 -37 90 z" />
+<glyph unicode="&#xf061;" d="M0 512v128q0 53 32.5 90.5t84.5 37.5h704l-293 294q-38 36 -38 90t38 90l75 75q38 38 90 38q53 0 91 -38l651 -651q37 -35 37 -90q0 -54 -37 -91l-651 -651q-39 -37 -91 -37q-51 0 -90 37l-75 75q-38 38 -38 91t38 91l293 293h-704q-52 0 -84.5 37.5t-32.5 90.5z" />
+<glyph unicode="&#xf062;" horiz-adv-x="1664" d="M53 565q0 53 38 91l651 651q35 37 90 37q54 0 91 -37l651 -651q37 -39 37 -91q0 -51 -37 -90l-75 -75q-38 -38 -91 -38q-54 0 -90 38l-294 293v-704q0 -52 -37.5 -84.5t-90.5 -32.5h-128q-53 0 -90.5 32.5t-37.5 84.5v704l-294 -293q-36 -38 -90 -38t-90 38l-75 75 q-38 38 -38 90z" />
+<glyph unicode="&#xf063;" horiz-adv-x="1664" d="M53 704q0 53 38 91l74 75q39 37 91 37q53 0 90 -37l294 -294v704q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-704l294 294q37 37 90 37q52 0 91 -37l75 -75q37 -39 37 -91q0 -53 -37 -90l-651 -652q-39 -37 -91 -37q-53 0 -90 37l-651 652q-38 36 -38 90z" />
+<glyph unicode="&#xf064;" horiz-adv-x="1792" d="M0 416q0 199 53 333q162 403 875 403h224v256q0 26 19 45t45 19t45 -19l512 -512q19 -19 19 -45t-19 -45l-512 -512q-19 -19 -45 -19t-45 19t-19 45v256h-224q-98 0 -175.5 -6t-154 -21.5t-133 -42.5t-105.5 -69.5t-80 -101t-48.5 -138.5t-17.5 -181q0 -55 5 -123 q0 -6 2.5 -23.5t2.5 -26.5q0 -15 -8.5 -25t-23.5 -10q-16 0 -28 17q-7 9 -13 22t-13.5 30t-10.5 24q-127 285 -127 451z" />
+<glyph unicode="&#xf065;" d="M0 -64v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23t-10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45zM781 800q0 13 10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448 q26 0 45 -19t19 -45v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23z" />
+<glyph unicode="&#xf066;" d="M13 32q0 13 10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23zM768 704v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10 t23 -10l114 -114q10 -10 10 -23t-10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf067;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h416v416q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-416h416q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-416v-416q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v416h-416q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf068;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h1216q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-1216q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf069;" horiz-adv-x="1664" d="M122.5 408.5q13.5 51.5 59.5 77.5l266 154l-266 154q-46 26 -59.5 77.5t12.5 97.5l64 110q26 46 77.5 59.5t97.5 -12.5l266 -153v307q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-307l266 153q46 26 97.5 12.5t77.5 -59.5l64 -110q26 -46 12.5 -97.5t-59.5 -77.5 l-266 -154l266 -154q46 -26 59.5 -77.5t-12.5 -97.5l-64 -110q-26 -46 -77.5 -59.5t-97.5 12.5l-266 153v-307q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v307l-266 -153q-46 -26 -97.5 -12.5t-77.5 59.5l-64 110q-26 46 -12.5 97.5z" />
+<glyph unicode="&#xf06a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM624 1126l17 -621q0 -10 10 -17.5t24 -7.5h185q14 0 23.5 7.5t10.5 17.5l18 621q0 12 -10 18 q-10 8 -24 8h-220q-14 0 -24 -8q-10 -6 -10 -18zM640 161q0 -13 10 -23t23 -10h192q13 0 22 9.5t9 23.5v190q0 14 -9 23.5t-22 9.5h-192q-13 0 -23 -10t-10 -23v-190z" />
+<glyph unicode="&#xf06b;" d="M0 544v320q0 14 9 23t23 9h440q-93 0 -158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5q107 0 168 -77l128 -165l128 165q61 77 168 77q93 0 158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5h440q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-96v-416q0 -40 -28 -68 t-68 -28h-1088q-40 0 -68 28t-28 68v416h-96q-14 0 -23 9t-9 23zM376 1120q0 -40 28 -68t68 -28h195l-126 161q-26 31 -69 31q-40 0 -68 -28t-28 -68zM608 180q0 -25 18 -38.5t46 -13.5h192q28 0 46 13.5t18 38.5v56v468v192h-320v-192v-468v-56zM870 1024h194q40 0 68 28 t28 68t-28 68t-68 28q-43 0 -69 -31z" />
+<glyph unicode="&#xf06c;" horiz-adv-x="1792" d="M0 121q0 35 31 73.5t68 65.5t68 56t31 48q0 4 -14 38t-16 44q-9 51 -9 104q0 115 43.5 220t119 184.5t170.5 139t204 95.5q55 18 145 25.5t179.5 9t178.5 6t163.5 24t113.5 56.5l29.5 29.5t29.5 28t27 20t36.5 16t43.5 4.5q39 0 70.5 -46t47.5 -112t24 -124t8 -96 q0 -95 -20 -193q-46 -224 -184.5 -383t-357.5 -268q-214 -108 -438 -108q-148 0 -286 47q-15 5 -88 42t-96 37q-16 0 -39.5 -32t-45 -70t-52.5 -70t-60 -32q-30 0 -51 11t-31 24t-27 42q-2 4 -6 11t-5.5 10t-3 9.5t-1.5 13.5zM384 448q0 -26 19 -45t45 -19q24 0 45 19 q27 24 74 71t67 66q137 124 268.5 176t313.5 52q26 0 45 19t19 45t-19 45t-45 19q-172 0 -318 -49.5t-259.5 -134t-235.5 -219.5q-19 -21 -19 -45z" />
+<glyph unicode="&#xf06d;" horiz-adv-x="1408" d="M0 -160q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v64zM256 640q0 78 24.5 144t64 112.5t87.5 88t96 77.5t87.5 72t64 81.5t24.5 96.5q0 94 -66 224l3 -1l-1 1q90 -41 160 -83t138.5 -100 t113.5 -122.5t72.5 -150.5t27.5 -184q0 -78 -24.5 -144t-64 -112.5t-87.5 -88t-96 -77.5t-87.5 -72t-64 -81.5t-24.5 -96.5q0 -96 67 -224l-4 1l1 -1q-90 41 -160 83t-138.5 100t-113.5 122.5t-72.5 150.5t-27.5 184z" />
+<glyph unicode="&#xf06e;" horiz-adv-x="1792" d="M0 576q0 34 20 69q140 229 376.5 368t499.5 139t499.5 -139t376.5 -368q20 -35 20 -69t-20 -69q-140 -230 -376.5 -368.5t-499.5 -138.5t-499.5 139t-376.5 368q-20 35 -20 69zM128 576q133 -205 333.5 -326.5t434.5 -121.5t434.5 121.5t333.5 326.5q-152 236 -381 353 q61 -104 61 -225q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 121 61 225q-229 -117 -381 -353zM592 704q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34t-14 34t-34 14q-125 0 -214.5 -89.5t-89.5 -214.5z" />
+<glyph unicode="&#xf070;" horiz-adv-x="1792" d="M0 576q0 38 20 69q153 235 380 371t496 136q89 0 180 -17l54 97q10 16 28 16q5 0 18 -6t31 -15.5t33 -18.5t31.5 -18.5t19.5 -11.5q16 -10 16 -27q0 -7 -1 -9q-105 -188 -315 -566t-316 -567l-49 -89q-10 -16 -28 -16q-12 0 -134 70q-16 10 -16 28q0 12 44 87 q-143 65 -263.5 173t-208.5 245q-20 31 -20 69zM128 576q167 -258 427 -375l78 141q-87 63 -136 159t-49 203q0 121 61 225q-229 -117 -381 -353zM592 704q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34t-14 34t-34 14q-125 0 -214.5 -89.5 t-89.5 -214.5zM896 0l74 132q212 18 392.5 137t301.5 307q-115 179 -282 294l63 112q95 -64 182.5 -153t144.5 -184q20 -34 20 -69t-20 -69q-39 -64 -109 -145q-150 -172 -347.5 -267t-419.5 -95zM1056 286l280 502q8 -45 8 -84q0 -139 -79 -253.5t-209 -164.5z" />
+<glyph unicode="&#xf071;" horiz-adv-x="1792" d="M16 61l768 1408q17 31 47 49t65 18t65 -18t47 -49l768 -1408q35 -63 -2 -126q-17 -29 -46.5 -46t-63.5 -17h-1536q-34 0 -63.5 17t-46.5 46q-37 63 -2 126zM752 992l17 -457q0 -10 10 -16.5t24 -6.5h185q14 0 23.5 6.5t10.5 16.5l18 459q0 12 -10 19q-13 11 -24 11h-220 q-11 0 -24 -11q-10 -7 -10 -21zM768 161q0 -14 9.5 -23.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 23.5v190q0 14 -9.5 23.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -23.5v-190z" />
+<glyph unicode="&#xf072;" horiz-adv-x="1408" d="M0 477q-1 13 9 25l96 97q9 9 23 9q6 0 8 -1l194 -53l259 259l-508 279q-14 8 -17 24q-2 16 9 27l128 128q14 13 30 8l665 -159l160 160q76 76 172 108t148 -12q44 -52 12 -148t-108 -172l-161 -161l160 -696q5 -19 -12 -33l-128 -96q-7 -6 -19 -6q-4 0 -7 1q-15 3 -21 16 l-279 508l-259 -259l53 -194q5 -17 -8 -31l-96 -96q-9 -9 -23 -9h-2q-15 2 -24 13l-189 252l-252 189q-11 7 -13 23z" />
+<glyph unicode="&#xf073;" horiz-adv-x="1664" d="M0 -128v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90zM128 -128h288v288h-288v-288zM128 224 h288v320h-288v-320zM128 608h288v288h-288v-288zM384 1088q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288zM480 -128h320v288h-320v-288zM480 224h320v320h-320v-320zM480 608h320v288h-320 v-288zM864 -128h320v288h-320v-288zM864 224h320v320h-320v-320zM864 608h320v288h-320v-288zM1152 1088q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288zM1248 -128h288v288h-288v-288z M1248 224h288v320h-288v-320zM1248 608h288v288h-288v-288z" />
+<glyph unicode="&#xf074;" horiz-adv-x="1792" d="M0 160v192q0 14 9 23t23 9h224q48 0 87 15t69 45t51 61.5t45 77.5q32 62 78 171q29 66 49.5 111t54 105t64 100t74 83t90 68.5t106.5 42t128 16.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192 h-256q-48 0 -87 -15t-69 -45t-51 -61.5t-45 -77.5q-32 -62 -78 -171q-29 -66 -49.5 -111t-54 -105t-64 -100t-74 -83t-90 -68.5t-106.5 -42t-128 -16.5h-224q-14 0 -23 9t-9 23zM0 1056v192q0 14 9 23t23 9h224q250 0 410 -225q-60 -92 -137 -273q-22 45 -37 72.5 t-40.5 63.5t-51 56.5t-63 35t-81.5 14.5h-224q-14 0 -23 9t-9 23zM743 353q59 93 136 273q22 -45 37 -72.5t40.5 -63.5t51 -56.5t63 -35t81.5 -14.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192 q-32 0 -85 -0.5t-81 -1t-73 1t-71 5t-64 10.5t-63 18.5t-58 28.5t-59 40t-55 53.5t-56 69.5z" />
+<glyph unicode="&#xf075;" horiz-adv-x="1792" d="M0 640q0 130 71 248.5t191 204.5t286 136.5t348 50.5q244 0 450 -85.5t326 -233t120 -321.5t-120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22q-17 -2 -30.5 9t-17.5 29v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5 t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281z" />
+<glyph unicode="&#xf076;" d="M0 576v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -52 23.5 -90t53.5 -57t71 -30t64 -13t44 -2t44 2t64 13t71 30t53.5 57t23.5 90v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -201 -98.5 -362t-274 -251.5t-395.5 -90.5t-395.5 90.5t-274 251.5 t-98.5 362zM0 960v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45zM1024 960v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf077;" horiz-adv-x="1792" d="M90 250.5q0 26.5 19 45.5l742 741q19 19 45 19t45 -19l742 -741q19 -19 19 -45.5t-19 -45.5l-166 -165q-19 -19 -45 -19t-45 19l-531 531l-531 -531q-19 -19 -45 -19t-45 19l-166 165q-19 19 -19 45.5z" />
+<glyph unicode="&#xf078;" horiz-adv-x="1792" d="M90 773.5q0 26.5 19 45.5l166 165q19 19 45 19t45 -19l531 -531l531 531q19 19 45 19t45 -19l166 -165q19 -19 19 -45.5t-19 -45.5l-742 -741q-19 -19 -45 -19t-45 19l-742 741q-19 19 -19 45.5z" />
+<glyph unicode="&#xf079;" horiz-adv-x="1920" d="M0 704q0 24 15 41l320 384q19 22 49 22t49 -22l320 -384q15 -17 15 -41q0 -26 -19 -45t-45 -19h-192v-384h576q16 0 25 -11l160 -192q7 -11 7 -21q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-8 0 -13.5 2t-9 7t-5.5 8t-3 11.5t-1 11.5v13v11v160v416h-192q-26 0 -45 19t-19 45z M640 1120q0 13 9.5 22.5t22.5 9.5h960q8 0 13.5 -2t9 -7t5.5 -8t3 -11.5t1 -11.5v-13v-11v-160v-416h192q26 0 45 -19t19 -45q0 -24 -15 -41l-320 -384q-20 -23 -49 -23t-49 23l-320 384q-15 17 -15 41q0 26 19 45t45 19h192v384h-576q-16 0 -25 12l-160 192q-7 9 -7 20z " />
+<glyph unicode="&#xf07a;" horiz-adv-x="1664" d="M0 1216q0 26 19 45t45 19h256q16 0 28.5 -6.5t20 -15.5t13 -24.5t7.5 -26.5t5.5 -29.5t4.5 -25.5h1201q26 0 45 -19t19 -45v-512q0 -24 -16 -42.5t-41 -21.5l-1044 -122q1 -7 4.5 -21.5t6 -26.5t2.5 -22q0 -16 -24 -64h920q26 0 45 -19t19 -45t-19 -45t-45 -19h-1024 q-26 0 -45 19t-19 45q0 14 11 39.5t29.5 59.5t20.5 38l-177 823h-204q-26 0 -45 19t-19 45zM384 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM1280 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5 t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf07b;" horiz-adv-x="1664" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158z" />
+<glyph unicode="&#xf07c;" horiz-adv-x="1920" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h-832q-94 0 -197 -47.5t-164 -119.5l-337 -396l-5 -6q0 4 -0.5 12.5t-0.5 12.5zM73 56q0 31 31 66l336 396q43 51 120.5 86.5t143.5 35.5h1088q34 0 60.5 -13t26.5 -43 q0 -31 -31 -66l-336 -396q-43 -51 -120.5 -86.5t-143.5 -35.5h-1088q-34 0 -60.5 13t-26.5 43z" />
+<glyph unicode="&#xf07d;" horiz-adv-x="768" d="M64 64q0 26 19 45t45 19h128v1024h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-1024h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf07e;" horiz-adv-x="1792" d="M0 640q0 26 19 45l256 256q19 19 45 19t45 -19t19 -45v-128h1024v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-1024v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf080;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216z M256 128v384h256v-384h-256zM640 128v896h256v-896h-256zM1024 128v640h256v-640h-256zM1408 128v1024h256v-1024h-256z" />
+<glyph unicode="&#xf081;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 286q148 -94 322 -94q112 0 210 35.5t168 95t120.5 137t75 162t24.5 168.5q0 18 -1 27q63 45 105 109 q-56 -25 -121 -34q68 40 93 117q-65 -38 -134 -51q-61 66 -153 66q-87 0 -148.5 -61.5t-61.5 -148.5q0 -29 5 -48q-129 7 -242 65t-192 155q-29 -50 -29 -106q0 -114 91 -175q-47 1 -100 26v-2q0 -75 50 -133.5t123 -72.5q-29 -8 -51 -8q-13 0 -39 4q21 -63 74.5 -104 t121.5 -42q-116 -90 -261 -90q-26 0 -50 3z" />
+<glyph unicode="&#xf082;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-192v608h203l30 224h-233v143q0 54 28 83t96 29l132 1v207q-96 9 -180 9q-136 0 -218 -80.5t-82 -225.5v-166h-224v-224h224v-608h-544 q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf083;" horiz-adv-x="1792" d="M0 0v1280q0 53 37.5 90.5t90.5 37.5h1536q53 0 90.5 -37.5t37.5 -90.5v-1280q0 -53 -37.5 -90.5t-90.5 -37.5h-1536q-53 0 -90.5 37.5t-37.5 90.5zM128 0h1536v128h-1536v-128zM128 1024h1536v118v138h-828l-64 -128h-644v-128zM256 1216h384v128h-384v-128zM512 574 q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM640 574q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM736 576q0 -14 9 -23t23 -9t23 9t9 23q0 40 28 68t68 28q14 0 23 9 t9 23t-9 23t-23 9q-66 0 -113 -47t-47 -113z" />
+<glyph unicode="&#xf084;" horiz-adv-x="1792" d="M0 752q0 160 95 313t248 248t313 95q163 0 265.5 -102.5t102.5 -265.5q0 -189 -131 -365l355 -355l96 96q-3 3 -26 24.5t-40 38.5t-33 36.5t-16 28.5q0 17 49 66t66 49q13 0 23 -10q6 -6 46 -44.5t82 -79.5t86.5 -86t73 -78t28.5 -41q0 -17 -49 -66t-66 -49 q-9 0 -28.5 16t-36.5 33t-38.5 40t-24.5 26l-96 -96l220 -220q28 -28 28 -68q0 -42 -39 -81t-81 -39q-40 0 -68 28l-671 671q-176 -131 -365 -131q-163 0 -265.5 102.5t-102.5 265.5zM192 768q0 -80 56 -136t136 -56t136 56t56 136q0 42 -19 83q41 -19 83 -19q80 0 136 56 t56 136t-56 136t-136 56t-136 -56t-56 -136q0 -42 19 -83q-41 19 -83 19q-80 0 -136 -56t-56 -136z" />
+<glyph unicode="&#xf085;" horiz-adv-x="1920" d="M0 549v185q0 10 7 19.5t16 10.5l155 24q11 35 32 76q-34 48 -90 115q-7 11 -7 20q0 12 7 20q22 30 82 89t79 59q11 0 21 -7l115 -90q34 18 77 32q11 108 23 154q7 24 30 24h186q11 0 20 -7.5t10 -17.5l23 -153q34 -10 75 -31l118 89q8 7 20 7q11 0 21 -8 q144 -133 144 -160q0 -9 -7 -19q-12 -16 -42 -54t-45 -60q23 -48 34 -82l152 -23q10 -2 17 -10.5t7 -19.5v-185q0 -10 -7 -19.5t-16 -10.5l-155 -24q-11 -35 -32 -76q34 -48 90 -115q7 -10 7 -20q0 -12 -7 -19q-23 -30 -82.5 -89.5t-78.5 -59.5q-11 0 -21 7l-115 90 q-37 -19 -77 -31q-11 -108 -23 -155q-7 -24 -30 -24h-186q-11 0 -20 7.5t-10 17.5l-23 153q-34 10 -75 31l-118 -89q-7 -7 -20 -7q-11 0 -21 8q-144 133 -144 160q0 9 7 19q10 14 41 53t47 61q-23 44 -35 82l-152 24q-10 1 -17 9.5t-7 19.5zM384 640q0 -106 75 -181t181 -75 t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181zM1152 58v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31 v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31zM1152 1082v140q0 16 149 31q13 29 30 52 q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71 q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31zM1408 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5q0 52 -38 90t-90 38t-90 -38t-38 -90zM1408 1152q0 -53 37.5 -90.5 t90.5 -37.5t90.5 37.5t37.5 90.5q0 52 -38 90t-90 38t-90 -38t-38 -90z" />
+<glyph unicode="&#xf086;" horiz-adv-x="1792" d="M0 768q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257t-94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25 t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224zM616 132q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5 t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132z" />
+<glyph unicode="&#xf087;" d="M0 128v640q0 53 37.5 90.5t90.5 37.5h274q36 24 137 155q58 75 107 128q24 25 35.5 85.5t30.5 126.5t62 108q39 37 90 37q84 0 151 -32.5t102 -101.5t35 -186q0 -93 -48 -192h176q104 0 180 -76t76 -179q0 -89 -49 -163q9 -33 9 -69q0 -77 -38 -144q3 -21 3 -43 q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5h-36h-93q-96 0 -189.5 22.5t-216.5 65.5q-116 40 -138 40h-288q-53 0 -90.5 37.5t-37.5 90.5zM128 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 128h32q13 0 31.5 -3t33 -6.5t38 -11t35 -11.5 t35.5 -12.5t29 -10.5q211 -73 342 -73h121q192 0 192 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5q32 1 53.5 47t21.5 81q0 51 -39 89.5t-89 38.5h-352q0 58 48 159.5t48 160.5q0 98 -32 145t-128 47q-26 -26 -38 -85 t-30.5 -125.5t-59.5 -109.5q-22 -23 -77 -91q-4 -5 -23 -30t-31.5 -41t-34.5 -42.5t-40 -44t-38.5 -35.5t-40 -27t-35.5 -9h-32v-640z" />
+<glyph unicode="&#xf088;" d="M0 512v640q0 53 37.5 90.5t90.5 37.5h288q22 0 138 40q128 44 223 66t200 22h112q140 0 226.5 -79t85.5 -216v-5q60 -77 60 -178q0 -22 -3 -43q38 -67 38 -144q0 -36 -9 -69q49 -74 49 -163q0 -103 -76 -179t-180 -76h-176q48 -99 48 -192q0 -118 -35 -186 q-35 -69 -102 -101.5t-151 -32.5q-51 0 -90 37q-34 33 -54 82t-25.5 90.5t-17.5 84.5t-31 64q-48 50 -107 127q-101 131 -137 155h-274q-53 0 -90.5 37.5t-37.5 90.5zM128 1088q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 512h32q16 0 35.5 -9 t40 -27t38.5 -35.5t40 -44t34.5 -42.5t31.5 -41t23 -30q55 -68 77 -91q41 -43 59.5 -109.5t30.5 -125.5t38 -85q96 0 128 47t32 145q0 59 -48 160.5t-48 159.5h352q50 0 89 38.5t39 89.5q0 35 -21.5 81t-53.5 47q15 17 25 47.5t10 55.5q0 69 -53 119q18 32 18 69t-17.5 73.5 t-47.5 52.5q5 30 5 56q0 85 -49 126t-136 41h-128q-131 0 -342 -73q-5 -2 -29 -10.5t-35.5 -12.5t-35 -11.5t-38 -11t-33 -6.5t-31.5 -3h-32v-640z" />
+<glyph unicode="&#xf089;" horiz-adv-x="896" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41v-1339l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48z" />
+<glyph unicode="&#xf08a;" horiz-adv-x="1792" d="M0 940q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138z M128 940q0 -168 187 -355l581 -560l580 559q188 188 188 356q0 81 -21.5 143t-55 98.5t-81.5 59.5t-94 31t-98 8t-112 -25.5t-110.5 -64t-86.5 -72t-60 -61.5q-18 -22 -49 -22t-49 22q-24 28 -60 61.5t-86.5 72t-110.5 64t-112 25.5t-98 -8t-94 -31t-81.5 -59.5t-55 -98.5 t-21.5 -143z" />
+<glyph unicode="&#xf08b;" horiz-adv-x="1664" d="M0 288v704q0 119 84.5 203.5t203.5 84.5h320q13 0 22.5 -9.5t9.5 -22.5q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-66 0 -113 -47t-47 -113v-704q0 -66 47 -113t113 -47h288h11h13t11.5 -1t11.5 -3t8 -5.5t7 -9t2 -13.5q0 -4 1 -20t0.5 -26.5t-3 -23.5 t-10 -19.5t-20.5 -6.5h-320q-119 0 -203.5 84.5t-84.5 203.5zM384 448v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45t-19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf08c;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM223 1030q0 -51 35.5 -85.5t92.5 -34.5h1q59 0 95 34.5t36 85.5q-1 52 -36 86t-93 34t-94.5 -34t-36.5 -86z M237 122h231v694h-231v-694zM595 122h231v388q0 38 7 56q15 35 45 59.5t74 24.5q116 0 116 -157v-371h231v398q0 154 -73 233t-193 79q-136 0 -209 -117h2v101h-231q3 -66 0 -694z" />
+<glyph unicode="&#xf08d;" horiz-adv-x="1152" d="M0 320q0 123 78.5 221.5t177.5 98.5v512q-52 0 -90 38t-38 90t38 90t90 38h640q52 0 90 -38t38 -90t-38 -90t-90 -38v-512q99 0 177.5 -98.5t78.5 -221.5q0 -26 -19 -45t-45 -19h-429l-51 -483q-2 -12 -10.5 -20.5t-20.5 -8.5h-1q-27 0 -32 27l-76 485h-404q-26 0 -45 19 t-19 45zM416 672q0 -14 9 -23t23 -9t23 9t9 23v448q0 14 -9 23t-23 9t-23 -9t-9 -23v-448z" />
+<glyph unicode="&#xf08e;" horiz-adv-x="1792" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v320q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-320q0 -119 -84.5 -203.5t-203.5 -84.5h-832 q-119 0 -203.5 84.5t-84.5 203.5zM685 576q0 13 10 23l652 652l-176 176q-19 19 -19 45t19 45t45 19h512q26 0 45 -19t19 -45v-512q0 -26 -19 -45t-45 -19t-45 19l-176 176l-652 -652q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23z" />
+<glyph unicode="&#xf090;" d="M0 448v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45t-19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45zM894.5 78.5q0.5 10.5 3 23.5t10 19.5t20.5 6.5h320q66 0 113 47t47 113v704q0 66 -47 113 t-113 47h-288h-11h-13t-11.5 1t-11.5 3t-8 5.5t-7 9t-2 13.5q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q119 0 203.5 -84.5t84.5 -203.5v-704q0 -119 -84.5 -203.5t-203.5 -84.5h-320q-13 0 -22.5 9.5t-9.5 22.5q0 4 -1 20t-0.5 26.5z" />
+<glyph unicode="&#xf091;" horiz-adv-x="1664" d="M0 928v128q0 40 28 68t68 28h288v96q0 66 47 113t113 47h576q66 0 113 -47t47 -113v-96h288q40 0 68 -28t28 -68v-128q0 -71 -41.5 -143t-112 -130t-173 -97.5t-215.5 -44.5q-42 -54 -95 -95q-38 -34 -52.5 -72.5t-14.5 -89.5q0 -54 30.5 -91t97.5 -37q75 0 133.5 -45.5 t58.5 -114.5v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 69 58.5 114.5t133.5 45.5q67 0 97.5 37t30.5 91q0 51 -14.5 89.5t-52.5 72.5q-53 41 -95 95q-113 5 -215.5 44.5t-173 97.5t-112 130t-41.5 143zM128 928q0 -78 94.5 -162t235.5 -113q-74 162 -74 371 h-256v-96zM1206 653q141 29 235.5 113t94.5 162v96h-256q0 -209 -74 -371z" />
+<glyph unicode="&#xf092;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-224q-16 0 -24.5 1t-19.5 5t-16 14.5t-5 27.5v239q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204 q-28 9 -81 -11t-92 -44l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52 t-49.5 24l-20 3q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -103t0.5 -68q0 -22 -11 -33.5t-22 -13t-33 -1.5h-224q-119 0 -203.5 84.5t-84.5 203.5zM271 315q3 5 13 2 q10 -5 7 -12q-5 -7 -13 -2q-10 5 -7 12zM304 290q6 6 16 -3q9 -11 2 -16q-6 -7 -16 3q-9 11 -2 16zM335 233q-9 13 0 18q9 7 17 -6q9 -12 0 -19q-8 -6 -17 7zM370 206q8 9 20 -3q12 -11 4 -19q-8 -9 -20 3q-13 11 -4 19zM419 168q4 11 19 7q16 -5 13 -16q-4 -12 -19 -6 q-17 4 -13 15zM481 154q0 11 16 11q17 2 17 -11q0 -11 -16 -11q-17 -2 -17 11zM540 158q-2 12 14 15q16 2 18 -9q2 -10 -14 -14t-18 8z" />
+<glyph unicode="&#xf093;" horiz-adv-x="1664" d="M0 -32v320q0 40 28 68t68 28h427q21 -56 70.5 -92t110.5 -36h256q61 0 110.5 36t70.5 92h427q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68zM325 936q-17 39 14 69l448 448q18 19 45 19t45 -19l448 -448q31 -30 14 -69q-17 -40 -59 -40 h-256v-448q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v448h-256q-42 0 -59 40zM1152 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM1408 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf094;" d="M0 433q0 111 18 217.5t54.5 209.5t100.5 194t150 156q78 59 232 120q194 78 316 78q60 0 175.5 -24t173.5 -24q19 0 57 5t58 5q81 0 118 -50.5t37 -134.5q0 -23 -5 -68t-5 -68q0 -10 1 -18.5t3 -17t4 -13.5t6.5 -16t6.5 -17q16 -40 25 -118.5t9 -136.5q0 -165 -70 -327.5 t-196 -288t-281 -180.5q-124 -44 -326 -44q-57 0 -170 14.5t-169 14.5q-24 0 -72.5 -14.5t-73.5 -14.5q-73 0 -123.5 55.5t-50.5 128.5q0 24 11 68t11 67q0 40 -12.5 120.5t-12.5 121.5zM128 434q0 -40 12.5 -120t12.5 -121q0 -23 -11 -66.5t-11 -65.5t12 -36.5t34 -14.5 q24 0 72.5 11t73.5 11q57 0 169.5 -15.5t169.5 -15.5q181 0 284 36q129 45 235.5 152.5t166 245.5t59.5 275q0 44 -7 113.5t-18 96.5q-12 30 -17 44t-9 36.5t-4 48.5q0 23 5 68.5t5 67.5q0 37 -10 55q-4 1 -13 1q-19 0 -58 -4.5t-59 -4.5q-60 0 -176 24t-175 24 q-43 0 -94.5 -11.5t-85 -23.5t-89.5 -34q-137 -54 -202 -103q-96 -73 -159.5 -189.5t-88 -236t-24.5 -248.5z" />
+<glyph unicode="&#xf095;" horiz-adv-x="1408" d="M0 1069q0 92 51 186q56 101 106 122q25 11 68.5 21t70.5 10q14 0 21 -3q18 -6 53 -76q11 -19 30 -54t35 -63.5t31 -53.5q3 -4 17.5 -25t21.5 -35.5t7 -28.5q0 -20 -28.5 -50t-62 -55t-62 -53t-28.5 -46q0 -9 5 -22.5t8.5 -20.5t14 -24t11.5 -19q76 -137 174 -235 t235 -174q2 -1 19 -11.5t24 -14t20.5 -8.5t22.5 -5q18 0 46 28.5t53 62t55 62t50 28.5q14 0 28.5 -7t35.5 -21.5t25 -17.5q25 -15 53.5 -31t63.5 -35t54 -30q70 -35 76 -53q3 -7 3 -21q0 -27 -10 -70.5t-21 -68.5q-21 -50 -122 -106q-94 -51 -186 -51q-27 0 -52.5 3.5 t-57.5 12.5t-47.5 14.5t-55.5 20.5t-49 18q-98 35 -175 83q-128 79 -264.5 215.5t-215.5 264.5q-48 77 -83 175q-3 9 -18 49t-20.5 55.5t-14.5 47.5t-12.5 57.5t-3.5 52.5z" />
+<glyph unicode="&#xf096;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832z" />
+<glyph unicode="&#xf097;" horiz-adv-x="1280" d="M0 7v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62zM128 38l423 406l89 85l89 -85l423 -406 v1242h-1024v-1242z" />
+<glyph unicode="&#xf098;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 905q0 -16 2.5 -34t5 -30.5t9 -33t10 -29.5t12.5 -33t11 -30q60 -164 216.5 -320.5t320.5 -216.5 q6 -2 30 -11t33 -12.5t29.5 -10t33 -9t30.5 -5t34 -2.5q57 0 130.5 34t94.5 80q22 53 22 101q0 11 -2 16q-3 8 -38.5 29.5t-88.5 49.5l-53 29q-5 3 -19 13t-25 15t-21 5q-18 0 -47 -32.5t-57 -65.5t-44 -33q-7 0 -16.5 3.5t-15.5 6.5t-17 9.5t-14 8.5q-99 55 -170.5 126.5 t-126.5 170.5q-2 3 -8.5 14t-9.5 17t-6.5 15.5t-3.5 16.5q0 13 20.5 33.5t45 38.5t45 39.5t20.5 36.5q0 10 -5 21t-15 25t-13 19q-3 6 -15 28.5t-25 45.5t-26.5 47.5t-25 40.5t-16.5 18t-16 2q-48 0 -101 -22q-46 -21 -80 -94.5t-34 -130.5z" />
+<glyph unicode="&#xf099;" horiz-adv-x="1664" d="M44 145q35 -4 78 -4q225 0 401 138q-105 2 -188 64.5t-114 159.5q33 -5 61 -5q43 0 85 11q-112 23 -185.5 111.5t-73.5 205.5v4q68 -38 146 -41q-66 44 -105 115t-39 154q0 88 44 163q121 -149 294.5 -238.5t371.5 -99.5q-8 38 -8 74q0 134 94.5 228.5t228.5 94.5 q140 0 236 -102q109 21 205 78q-37 -115 -142 -178q93 10 186 50q-67 -98 -162 -167q1 -14 1 -42q0 -130 -38 -259.5t-115.5 -248.5t-184.5 -210.5t-258 -146t-323 -54.5q-271 0 -496 145z" />
+<glyph unicode="&#xf09a;" horiz-adv-x="1024" d="M95 631v296h255v218q0 186 104 288.5t277 102.5q147 0 228 -12v-264h-157q-86 0 -116 -36t-30 -108v-189h293l-39 -296h-254v-759h-306v759h-255z" />
+<glyph unicode="&#xf09b;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5q0 -251 -146.5 -451.5t-378.5 -277.5q-27 -5 -39.5 7t-12.5 30v211q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204q-28 9 -81 -11t-92 -44 l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52t-49.5 24l-20 3 q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -89t0.5 -54q0 -18 -13 -30t-40 -7q-232 77 -378.5 277.5t-146.5 451.5z" />
+<glyph unicode="&#xf09c;" horiz-adv-x="1664" d="M0 96v576q0 40 28 68t68 28h672v192q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5v-256q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45v256q0 106 -75 181t-181 75t-181 -75t-75 -181v-192h96q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf09d;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v608h-1664v-608zM128 1024h1664v224q0 13 -9.5 22.5t-22.5 9.5h-1600 q-13 0 -22.5 -9.5t-9.5 -22.5v-224zM256 128v128h256v-128h-256zM640 128v128h384v-128h-384z" />
+<glyph unicode="&#xf09e;" horiz-adv-x="1408" d="M0 192q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 697v135q0 29 21 47q17 17 43 17h5q160 -13 306 -80.5t259 -181.5q114 -113 181.5 -259t80.5 -306q2 -28 -17 -48q-18 -21 -47 -21h-135q-25 0 -43 16.5t-20 41.5q-22 229 -184.5 391.5 t-391.5 184.5q-25 2 -41.5 20t-16.5 43zM0 1201v143q0 28 20 46q18 18 44 18h3q262 -13 501.5 -120t425.5 -294q187 -186 294 -425.5t120 -501.5q2 -27 -18 -47q-18 -20 -46 -20h-143q-26 0 -44.5 17.5t-19.5 42.5q-12 215 -101 408.5t-231.5 336t-336 231.5t-408.5 102 q-25 1 -42.5 19.5t-17.5 43.5z" />
+<glyph unicode="&#xf0a0;" d="M0 160v320q0 25 16 75l197 606q17 53 63 86t101 33h782q55 0 101 -33t63 -86l197 -606q16 -50 16 -75v-320q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113zM128 160q0 -13 9.5 -22.5t22.5 -9.5h1216q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-1216 q-13 0 -22.5 -9.5t-9.5 -22.5v-320zM178 640h1180l-157 482q-4 13 -16 21.5t-26 8.5h-782q-14 0 -26 -8.5t-16 -21.5zM880 320q0 33 23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5zM1136 320q0 33 23.5 56.5t56.5 23.5 t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5z" />
+<glyph unicode="&#xf0a1;" horiz-adv-x="1792" d="M0 672v192q0 66 47 113t113 47h480q435 0 896 384q52 0 90 -38t38 -90v-384q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5v-384q0 -52 -38 -90t-90 -38q-417 347 -812 380q-58 -19 -91 -66t-31 -100.5t40 -92.5q-20 -33 -23 -65.5t6 -58t33.5 -55t48 -50 t61.5 -50.5q-29 -58 -111.5 -83t-168.5 -11.5t-132 55.5q-7 23 -29.5 87.5t-32 94.5t-23 89t-15 101t3.5 98.5t22 110.5h-122q-66 0 -113 47t-47 113zM768 633q377 -42 768 -341v954q-394 -302 -768 -343v-270z" />
+<glyph unicode="&#xf0a2;" horiz-adv-x="1664" d="M0 128q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38 t-38 90zM183 128h1298q-164 181 -246.5 411.5t-82.5 484.5q0 256 -320 256t-320 -256q0 -254 -82.5 -484.5t-246.5 -411.5zM656 0q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16z" />
+<glyph unicode="&#xf0a3;" d="M2 435q-10 42 20 70l138 135l-138 135q-30 28 -20 70q12 41 52 51l188 48l-53 186q-12 41 19 70q29 31 70 19l186 -53l48 188q10 41 51 51q41 12 70 -19l135 -139l135 139q29 30 70 19q41 -10 51 -51l48 -188l186 53q41 12 70 -19q31 -29 19 -70l-53 -186l188 -48 q40 -10 52 -51q10 -42 -20 -70l-138 -135l138 -135q30 -28 20 -70q-12 -41 -52 -51l-188 -48l53 -186q12 -41 -19 -70q-29 -31 -70 -19l-186 53l-48 -188q-10 -40 -51 -52q-12 -2 -19 -2q-31 0 -51 22l-135 138l-135 -138q-28 -30 -70 -20q-41 11 -51 52l-48 188l-186 -53 q-41 -12 -70 19q-31 29 -19 70l53 186l-188 48q-40 10 -52 51z" />
+<glyph unicode="&#xf0a4;" horiz-adv-x="1792" d="M0 128v640q0 53 37.5 90.5t90.5 37.5h288q10 0 21.5 4.5t23.5 14t22.5 18t24 22.5t20.5 21.5t19 21.5t14 17q65 74 100 129q13 21 33 62t37 72t40.5 63t55 49.5t69.5 17.5q125 0 206.5 -67t81.5 -189q0 -68 -22 -128h374q104 0 180 -76t76 -179q0 -105 -75.5 -181 t-180.5 -76h-169q-4 -62 -37 -119q3 -21 3 -43q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5q-133 0 -322 69q-164 59 -223 59h-288q-53 0 -90.5 37.5t-37.5 90.5zM128 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 128h32q72 0 167 -32 t193.5 -64t179.5 -32q189 0 189 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5h331q52 0 90 38t38 90q0 51 -39 89.5t-89 38.5h-576q0 20 15 48.5t33 55t33 68t15 84.5q0 67 -44.5 97.5t-115.5 30.5q-24 0 -90 -139 q-24 -44 -37 -65q-40 -64 -112 -145q-71 -81 -101 -106q-69 -57 -140 -57h-32v-640z" />
+<glyph unicode="&#xf0a5;" horiz-adv-x="1792" d="M0 769q0 103 76 179t180 76h374q-22 60 -22 128q0 122 81.5 189t206.5 67q38 0 69.5 -17.5t55 -49.5t40.5 -63t37 -72t33 -62q35 -55 100 -129q2 -3 14 -17t19 -21.5t20.5 -21.5t24 -22.5t22.5 -18t23.5 -14t21.5 -4.5h288q53 0 90.5 -37.5t37.5 -90.5v-640 q0 -53 -37.5 -90.5t-90.5 -37.5h-288q-59 0 -223 -59q-190 -69 -317 -69q-142 0 -230 77.5t-87 217.5l1 5q-61 76 -61 178q0 22 3 43q-33 57 -37 119h-169q-105 0 -180.5 76t-75.5 181zM128 768q0 -52 38 -90t90 -38h331q-15 -17 -25 -47.5t-10 -55.5q0 -69 53 -119 q-18 -32 -18 -69t17.5 -73.5t47.5 -52.5q-4 -24 -4 -56q0 -85 48.5 -126t135.5 -41q84 0 183 32t194 64t167 32h32v640h-32q-35 0 -67.5 12t-62.5 37t-50 46t-49 54q-2 3 -3.5 4.5t-4 4.5t-4.5 5q-72 81 -112 145q-14 22 -38 68q-1 3 -10.5 22.5t-18.5 36t-20 35.5 t-21.5 30.5t-18.5 11.5q-71 0 -115.5 -30.5t-44.5 -97.5q0 -43 15 -84.5t33 -68t33 -55t15 -48.5h-576q-50 0 -89 -38.5t-39 -89.5zM1536 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a6;" d="M0 640q0 125 67 206.5t189 81.5q68 0 128 -22v374q0 104 76 180t179 76q105 0 181 -75.5t76 -180.5v-169q62 -4 119 -37q21 3 43 3q101 0 178 -60q139 1 219.5 -85t80.5 -227q0 -133 -69 -322q-59 -164 -59 -223v-288q0 -53 -37.5 -90.5t-90.5 -37.5h-640 q-53 0 -90.5 37.5t-37.5 90.5v288q0 10 -4.5 21.5t-14 23.5t-18 22.5t-22.5 24t-21.5 20.5t-21.5 19t-17 14q-74 65 -129 100q-21 13 -62 33t-72 37t-63 40.5t-49.5 55t-17.5 69.5zM128 640q0 -24 139 -90q44 -24 65 -37q64 -40 145 -112q81 -71 106 -101q57 -69 57 -140 v-32h640v32q0 72 32 167t64 193.5t32 179.5q0 189 -167 189q-26 0 -56 -5q-16 30 -52.5 47.5t-73.5 17.5t-69 -18q-50 53 -119 53q-25 0 -55.5 -10t-47.5 -25v331q0 52 -38 90t-90 38q-51 0 -89.5 -39t-38.5 -89v-576q-20 0 -48.5 15t-55 33t-68 33t-84.5 15 q-67 0 -97.5 -44.5t-30.5 -115.5zM1152 -64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a7;" d="M0 640q0 38 17.5 69.5t49.5 55t63 40.5t72 37t62 33q55 35 129 100q3 2 17 14t21.5 19t21.5 20.5t22.5 24t18 22.5t14 23.5t4.5 21.5v288q0 53 37.5 90.5t90.5 37.5h640q53 0 90.5 -37.5t37.5 -90.5v-288q0 -59 59 -223q69 -190 69 -317q0 -142 -77.5 -230t-217.5 -87 l-5 1q-76 -61 -178 -61q-22 0 -43 3q-54 -30 -119 -37v-169q0 -105 -76 -180.5t-181 -75.5q-103 0 -179 76t-76 180v374q-54 -22 -128 -22q-121 0 -188.5 81.5t-67.5 206.5zM128 640q0 -71 30.5 -115.5t97.5 -44.5q43 0 84.5 15t68 33t55 33t48.5 15v-576q0 -50 38.5 -89 t89.5 -39q52 0 90 38t38 90v331q46 -35 103 -35q69 0 119 53q32 -18 69 -18t73.5 17.5t52.5 47.5q24 -4 56 -4q85 0 126 48.5t41 135.5q0 84 -32 183t-64 194t-32 167v32h-640v-32q0 -35 -12 -67.5t-37 -62.5t-46 -50t-54 -49q-9 -8 -14 -12q-81 -72 -145 -112 q-22 -14 -68 -38q-3 -1 -22.5 -10.5t-36 -18.5t-35.5 -20t-30.5 -21.5t-11.5 -18.5zM1152 1344q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a8;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM251 640q0 -27 18 -45l91 -91l362 -362q18 -18 45 -18t45 18l91 91q18 18 18 45t-18 45l-189 189h502 q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-502l189 189q19 19 19 45t-19 45l-91 91q-18 18 -45 18t-45 -18l-362 -362l-91 -91q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0a9;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM256 576q0 -26 19 -45t45 -19h502l-189 -189q-19 -19 -19 -45t19 -45l91 -91q18 -18 45 -18t45 18 l362 362l91 91q18 18 18 45t-18 45l-91 91l-362 362q-18 18 -45 18t-45 -18l-91 -91q-18 -18 -18 -45t18 -45l189 -189h-502q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf0aa;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 641q0 -27 18 -45l91 -91q18 -18 45 -18t45 18l189 189v-502q0 -26 19 -45t45 -19h128q26 0 45 19 t19 45v502l189 -189q19 -19 45 -19t45 19l91 91q18 18 18 45t-18 45l-362 362l-91 91q-18 18 -45 18t-45 -18l-91 -91l-362 -362q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0ab;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 639q0 -27 18 -45l362 -362l91 -91q18 -18 45 -18t45 18l91 91l362 362q18 18 18 45t-18 45l-91 91 q-18 18 -45 18t-45 -18l-189 -189v502q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-502l-189 189q-19 19 -45 19t-45 -19l-91 -91q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0ac;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM226 979q7 -7 12 -8q4 -1 5 -9t2.5 -11t11.5 3q9 -8 3 -19q1 1 44 -27q19 -17 21 -21q3 -11 -10 -18 q-1 2 -9 9t-9 4q-3 -5 0.5 -18.5t10.5 -12.5q-7 0 -9.5 -16t-2.5 -35.5t-1 -23.5l2 -1q-3 -12 5.5 -34.5t21.5 -19.5q-13 -3 20 -43q6 -8 8 -9q3 -2 12 -7.5t15 -10t10 -10.5q4 -5 10 -22.5t14 -23.5q-2 -6 9.5 -20t10.5 -23q-1 0 -2.5 -1t-2.5 -1q3 -7 15.5 -14t15.5 -13 q1 -3 2 -10t3 -11t8 -2q2 20 -24 62q-15 25 -17 29q-3 5 -5.5 15.5t-4.5 14.5q2 0 6 -1.5t8.5 -3.5t7.5 -4t2 -3q-3 -7 2 -17.5t12 -18.5t17 -19t12 -13q6 -6 14 -19.5t0 -13.5q9 0 20 -10t17 -20q5 -8 8 -26t5 -24q2 -7 8.5 -13.5t12.5 -9.5l16 -8t13 -7q5 -2 18.5 -10.5 t21.5 -11.5q10 -4 16 -4t14.5 2.5t13.5 3.5q15 2 29 -15t21 -21q36 -19 55 -11q-2 -1 0.5 -7.5t8 -15.5t9 -14.5t5.5 -8.5q5 -6 18 -15t18 -15q6 4 7 9q-3 -8 7 -20t18 -10q14 3 14 32q-31 -15 -49 18q0 1 -2.5 5.5t-4 8.5t-2.5 8.5t0 7.5t5 3q9 0 10 3.5t-2 12.5t-4 13 q-1 8 -11 20t-12 15q-5 -9 -16 -8t-16 9q0 -1 -1.5 -5.5t-1.5 -6.5q-13 0 -15 1q1 3 2.5 17.5t3.5 22.5q1 4 5.5 12t7.5 14.5t4 12.5t-4.5 9.5t-17.5 2.5q-19 -1 -26 -20q-1 -3 -3 -10.5t-5 -11.5t-9 -7q-7 -3 -24 -2t-24 5q-13 8 -22.5 29t-9.5 37q0 10 2.5 26.5t3 25 t-5.5 24.5q3 2 9 9.5t10 10.5q2 1 4.5 1.5t4.5 0t4 1.5t3 6q-1 1 -4 3q-3 3 -4 3q7 -3 28.5 1.5t27.5 -1.5q15 -11 22 2q0 1 -2.5 9.5t-0.5 13.5q5 -27 29 -9q3 -3 15.5 -5t17.5 -5q3 -2 7 -5.5t5.5 -4.5t5 0.5t8.5 6.5q10 -14 12 -24q11 -40 19 -44q7 -3 11 -2t4.5 9.5 t0 14t-1.5 12.5l-1 8v18l-1 8q-15 3 -18.5 12t1.5 18.5t15 18.5q1 1 8 3.5t15.5 6.5t12.5 8q21 19 15 35q7 0 11 9q-1 0 -5 3t-7.5 5t-4.5 2q9 5 2 16q5 3 7.5 11t7.5 10q9 -12 21 -2q7 8 1 16q5 7 20.5 10.5t18.5 9.5q7 -2 8 2t1 12t3 12q4 5 15 9t13 5l17 11q3 4 0 4 q18 -2 31 11q10 11 -6 20q3 6 -3 9.5t-15 5.5q3 1 11.5 0.5t10.5 1.5q15 10 -7 16q-17 5 -43 -12q-2 -1 -9.5 -9.5t-13.5 -9.5q2 0 4.5 5t5 11t3.5 7q6 7 22 15q14 6 52 12q34 8 51 -11q-2 2 9.5 13t14.5 12q3 2 15 4.5t15 7.5l2 22q-12 -1 -17.5 7t-6.5 21q0 -2 -6 -8 q0 7 -4.5 8t-11.5 -1t-9 -1q-10 3 -15 7.5t-8 16.5t-4 15q-2 5 -9.5 10.5t-9.5 10.5q-1 2 -2.5 5.5t-3 6.5t-4 5.5t-5.5 2.5t-7 -5t-7.5 -10t-4.5 -5q-3 2 -6 1.5t-4.5 -1t-4.5 -3t-5 -3.5q-3 -2 -8.5 -3t-8.5 -2q15 5 -1 11q-10 4 -16 3q9 4 7.5 12t-8.5 14h5 q-1 4 -8.5 8.5t-17.5 8.5t-13 6q-8 5 -34 9.5t-33 0.5q-5 -6 -4.5 -10.5t4 -14t3.5 -12.5q1 -6 -5.5 -13t-6.5 -12q0 -7 14 -15.5t10 -21.5q-3 -8 -16 -16t-16 -12q-5 -8 -1.5 -18.5t10.5 -16.5q2 -2 1.5 -4t-3.5 -4.5t-5.5 -4t-6.5 -3.5l-3 -2q-11 -5 -20.5 6t-13.5 26 q-7 25 -16 30q-23 8 -29 -1q-5 13 -41 26q-25 9 -58 4q6 1 0 15q-7 15 -19 12q3 6 4 17.5t1 13.5q3 13 12 23q1 1 7 8.5t9.5 13.5t0.5 6q35 -4 50 11q5 5 11.5 17t10.5 17q9 6 14 5.5t14.5 -5.5t14.5 -5q14 -1 15.5 11t-7.5 20q12 -1 3 17q-5 7 -8 9q-12 4 -27 -5 q-8 -4 2 -8q-1 1 -9.5 -10.5t-16.5 -17.5t-16 5q-1 1 -5.5 13.5t-9.5 13.5q-8 0 -16 -15q3 8 -11 15t-24 8q19 12 -8 27q-7 4 -20.5 5t-19.5 -4q-5 -7 -5.5 -11.5t5 -8t10.5 -5.5t11.5 -4t8.5 -3q14 -10 8 -14q-2 -1 -8.5 -3.5t-11.5 -4.5t-6 -4q-3 -4 0 -14t-2 -14 q-5 5 -9 17.5t-7 16.5q7 -9 -25 -6l-10 1q-4 0 -16 -2t-20.5 -1t-13.5 8q-4 8 0 20q1 4 4 2q-4 3 -11 9.5t-10 8.5q-46 -15 -94 -41q6 -1 12 1q5 2 13 6.5t10 5.5q34 14 42 7l5 5q14 -16 20 -25q-7 4 -30 1q-20 -6 -22 -12q7 -12 5 -18q-4 3 -11.5 10t-14.5 11t-15 5 q-16 0 -22 -1q-146 -80 -235 -222zM877 26q0 -6 2 -16q206 36 351 189q-3 3 -12.5 4.5t-12.5 3.5q-18 7 -24 8q1 7 -2.5 13t-8 9t-12.5 8t-11 7q-2 2 -7 6t-7 5.5t-7.5 4.5t-8.5 2t-10 -1l-3 -1q-3 -1 -5.5 -2.5t-5.5 -3t-4 -3t0 -2.5q-21 17 -36 22q-5 1 -11 5.5t-10.5 7 t-10 1.5t-11.5 -7q-5 -5 -6 -15t-2 -13q-7 5 0 17.5t2 18.5q-3 6 -10.5 4.5t-12 -4.5t-11.5 -8.5t-9 -6.5t-8.5 -5.5t-8.5 -7.5q-3 -4 -6 -12t-5 -11q-2 4 -11.5 6.5t-9.5 5.5q2 -10 4 -35t5 -38q7 -31 -12 -48q-27 -25 -29 -40q-4 -22 12 -26q0 -7 -8 -20.5t-7 -21.5z" />
+<glyph unicode="&#xf0ad;" horiz-adv-x="1664" d="M21 0q0 53 38 91l681 681q39 -98 114.5 -173.5t173.5 -114.5l-682 -682q-37 -37 -90 -37q-52 0 -91 37l-106 108q-38 36 -38 90zM256 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM768 960q0 185 131.5 316.5t316.5 131.5q58 0 121.5 -16.5 t107.5 -46.5q16 -11 16 -28t-16 -28l-293 -169v-224l193 -107q5 3 79 48.5t135.5 81t70.5 35.5q15 0 23.5 -10t8.5 -25q0 -39 -23 -106q-47 -134 -164.5 -217.5t-258.5 -83.5q-185 0 -316.5 131.5t-131.5 316.5z" />
+<glyph unicode="&#xf0ae;" horiz-adv-x="1792" d="M0 64v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 576v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 1088v256q0 26 19 45t45 19h1664 q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM640 640h1024v128h-1024v-128zM1024 128h640v128h-640v-128zM1280 1152h384v128h-384v-128z" />
+<glyph unicode="&#xf0b0;" horiz-adv-x="1408" d="M5 1241q17 39 59 39h1280q42 0 59 -39q17 -41 -14 -70l-493 -493v-742q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-256 256q-19 19 -19 45v486l-493 493q-31 29 -14 70z" />
+<glyph unicode="&#xf0b1;" horiz-adv-x="1792" d="M0 160v480h672v-160q0 -26 19 -45t45 -19h320q26 0 45 19t19 45v160h672v-480q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM0 736v384q0 66 47 113t113 47h352v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h352q66 0 113 -47t47 -113v-384h-1792z M640 1280h512v128h-512v-128zM768 512v128h256v-128h-256z" />
+<glyph unicode="&#xf0b2;" d="M0 -64v448q0 42 40 59q39 17 69 -14l144 -144l355 355l-355 355l-144 -144q-19 -19 -45 -19q-12 0 -24 5q-40 17 -40 59v448q0 26 19 45t45 19h448q42 0 59 -40q17 -39 -14 -69l-144 -144l355 -355l355 355l-144 144q-31 30 -14 69q17 40 59 40h448q26 0 45 -19t19 -45 v-448q0 -42 -39 -59q-13 -5 -25 -5q-26 0 -45 19l-144 144l-355 -355l355 -355l144 144q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l144 144l-355 355l-355 -355l144 -144q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19 t-19 45z" />
+<glyph unicode="&#xf0c0;" horiz-adv-x="1920" d="M0 671q0 353 124 353q6 0 43.5 -21t97.5 -42.5t119 -21.5q67 0 133 23q-5 -37 -5 -66q0 -139 81 -256q-162 -5 -265 -128h-134q-82 0 -138 40.5t-56 118.5zM128 1280q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM256 3q0 53 3.5 103.5 t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q10 0 43 -21.5t73 -48t107 -48t135 -21.5t135 21.5t107 48t73 48t43 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5q0 -120 -73 -189.5t-194 -69.5h-874q-121 0 -194 69.5t-73 189.5 zM576 896q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5zM1280 1280q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM1327 640q81 117 81 256q0 29 -5 66q66 -23 133 -23 q59 0 119 21.5t97.5 42.5t43.5 21q124 0 124 -353q0 -78 -56 -118.5t-138 -40.5h-134q-103 123 -265 128z" />
+<glyph unicode="&#xf0c1;" horiz-adv-x="1664" d="M16 1088q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l206 -207q83 -83 83 -203q0 -123 -88 -209l88 -88q86 88 208 88q120 0 204 -84l208 -208q84 -84 84 -204t-85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-206 207q-83 83 -83 203q0 123 88 209l-88 88 q-86 -88 -208 -88q-120 0 -204 84l-208 208q-84 84 -84 204zM208 1088q0 -40 28 -68l208 -208q27 -27 68 -27q42 0 72 31q-3 3 -19 18.5t-21.5 21.5t-15 19t-13 25.5t-3.5 27.5q0 40 28 68t68 28q15 0 27.5 -3.5t25.5 -13t19 -15t21.5 -21.5t18.5 -19q33 31 33 73 q0 40 -28 68l-206 207q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67zM911 383q0 -40 28 -68l206 -207q27 -27 68 -27q40 0 68 26l147 146q28 28 28 67q0 40 -28 68l-208 208q-28 28 -68 28q-42 0 -72 -32q3 -3 19 -18.5t21.5 -21.5t15 -19t13 -25.5t3.5 -27.5 q0 -40 -28 -68t-68 -28q-15 0 -27.5 3.5t-25.5 13t-19 15t-21.5 21.5t-18.5 19q-33 -31 -33 -73z" />
+<glyph unicode="&#xf0c2;" horiz-adv-x="1920" d="M0 448q0 132 71 241.5t187 163.5q-2 28 -2 43q0 212 150 362t362 150q158 0 286.5 -88t187.5 -230q70 62 166 62q106 0 181 -75t75 -181q0 -75 -41 -138q129 -30 213 -134.5t84 -239.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z" />
+<glyph unicode="&#xf0c3;" horiz-adv-x="1664" d="M115.5 -64.5q-34.5 63.5 21.5 152.5l503 793v399h-64q-26 0 -45 19t-19 45t19 45t45 19h512q26 0 45 -19t19 -45t-19 -45t-45 -19h-64v-399l503 -793q56 -89 21.5 -152.5t-140.5 -63.5h-1152q-106 0 -140.5 63.5zM476 384h712l-272 429l-20 31v37v399h-128v-399v-37 l-20 -31z" />
+<glyph unicode="&#xf0c4;" horiz-adv-x="1792" d="M1 157q7 76 56 147t131 124q132 84 278 84q83 0 151 -31q9 13 22 22l122 73l-122 73q-13 9 -22 22q-68 -31 -151 -31q-146 0 -278 84q-82 53 -131 124t-56 147q-5 59 15.5 113t63.5 93q85 79 222 79q145 0 277 -84q83 -52 132 -123t56 -148q4 -48 -10 -97q4 -1 12 -5 l110 -66l690 387q14 8 31 8q16 0 29 -7l128 -64q30 -16 35 -51q3 -36 -25 -56l-507 -398l507 -398q28 -20 25 -56q-5 -35 -35 -51l-128 -64q-13 -7 -29 -7q-17 0 -31 8l-690 387l-110 -66q-8 -4 -12 -5q14 -49 10 -97q-7 -77 -56 -147.5t-132 -123.5q-132 -84 -277 -84 q-136 0 -222 78q-90 84 -79 207zM168 176q-25 -66 21 -108q39 -36 113 -36q100 0 192 59q81 51 106 117t-21 108q-39 36 -113 36q-100 0 -192 -59q-81 -51 -106 -117zM168 976q25 -66 106 -117q92 -59 192 -59q74 0 113 36q46 42 21 108t-106 117q-92 59 -192 59 q-74 0 -113 -36q-46 -42 -21 -108zM672 448l9 -8q2 -2 7 -6q4 -4 11 -12t11 -12l26 -26l160 96l96 -32l736 576l-128 64l-768 -431v-113zM672 704l96 -58v11q0 36 33 56l14 8l-79 47l-26 -26q-3 -3 -10 -11t-12 -12q-2 -2 -4 -3.5t-3 -2.5zM896 576q0 26 19 45t45 19t45 -19 t19 -45t-19 -45t-45 -19t-45 19t-19 45zM1018 391l582 -327l128 64l-520 408l-177 -138q-2 -3 -13 -7z" />
+<glyph unicode="&#xf0c5;" horiz-adv-x="1792" d="M0 224v672q0 40 20 88t48 76l408 408q28 28 76 48t88 20h416q40 0 68 -28t28 -68v-328q68 40 128 40h416q40 0 68 -28t28 -68v-1216q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v288h-544q-40 0 -68 28t-28 68zM128 256h512v256q0 40 20 88t48 76l316 316v416h-384 v-416q0 -40 -28 -68t-68 -28h-416v-640zM213 1024h299v299zM768 -128h896v1152h-384v-416q0 -40 -28 -68t-68 -28h-416v-640zM853 640h299v299z" />
+<glyph unicode="&#xf0c6;" horiz-adv-x="1408" d="M4 1023q0 159 110 270t269 111q158 0 273 -113l605 -606q10 -10 10 -22q0 -16 -30.5 -46.5t-46.5 -30.5q-13 0 -23 10l-606 607q-79 77 -181 77q-106 0 -179 -75t-73 -181q0 -105 76 -181l776 -777q63 -63 145 -63q64 0 106 42t42 106q0 82 -63 145l-581 581 q-26 24 -60 24q-29 0 -48 -19t-19 -48q0 -32 25 -59l410 -410q10 -10 10 -22q0 -16 -31 -47t-47 -31q-12 0 -22 10l-410 410q-63 61 -63 149q0 82 57 139t139 57q88 0 149 -63l581 -581q100 -98 100 -235q0 -117 -79 -196t-196 -79q-135 0 -235 100l-777 776 q-113 115 -113 271z" />
+<glyph unicode="&#xf0c7;" d="M0 -32v1344q0 40 28 68t68 28h928q40 0 88 -20t76 -48l280 -280q28 -28 48 -76t20 -88v-928q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 0h128v416q0 40 28 68t68 28h832q40 0 68 -28t28 -68v-416h128v896q0 14 -10 38.5t-20 34.5l-281 281q-10 10 -34 20 t-39 10v-416q0 -40 -28 -68t-68 -28h-576q-40 0 -68 28t-28 68v416h-128v-1280zM384 0h768v384h-768v-384zM640 928q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-320z" />
+<glyph unicode="&#xf0c8;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf0c9;" d="M0 64v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM0 576v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM0 1088v128q0 26 19 45t45 19h1408 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0ca;" horiz-adv-x="1792" d="M0 128q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 640q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 1152q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM512 32v192 q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 544v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z M512 1056v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0cb;" horiz-adv-x="1792" d="M15 438q0 51 23.5 93t56.5 68t66 47.5t56.5 43.5t23.5 45q0 25 -14.5 38.5t-39.5 13.5q-46 0 -81 -58l-85 59q24 51 71.5 79.5t105.5 28.5q73 0 123 -41.5t50 -112.5q0 -50 -34 -91.5t-75 -64.5t-75.5 -50.5t-35.5 -52.5h127v60h105v-159h-362q-6 36 -6 54zM19 -190 l57 88q49 -45 106 -45q29 0 50.5 14.5t21.5 42.5q0 64 -105 56l-26 56q8 10 32.5 43.5t42.5 54t37 38.5v1q-16 0 -48.5 -1t-48.5 -1v-53h-106v152h333v-88l-95 -115q51 -12 81 -49t30 -88q0 -80 -54.5 -126t-135.5 -46q-106 0 -172 66zM34 1400l136 127h106v-404h108v-99 h-335v99h107q0 41 0.5 122t0.5 121v12h-2q-8 -17 -50 -54zM512 32v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 544v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5v-192 q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 1056v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0cc;" horiz-adv-x="1792" d="M0 544v64q0 14 9 23t23 9h1728q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1728q-14 0 -23 9t-9 23zM384 972q0 181 134 309q133 127 393 127q50 0 167 -19q66 -12 177 -48q10 -38 21 -118q14 -123 14 -183q0 -18 -5 -45l-12 -3l-84 6l-14 2q-50 149 -103 205 q-88 91 -210 91q-114 0 -182 -59q-67 -58 -67 -146q0 -73 66 -140t279 -129q69 -20 173 -66q58 -28 95 -52h-743q-28 35 -51 80q-48 97 -48 188zM414 154q-1 30 0 68l2 37v44l102 2q15 -34 30 -71t22.5 -56t12.5 -27q35 -57 80 -94q43 -36 105 -57q59 -22 132 -22 q64 0 139 27q77 26 122 86q47 61 47 129q0 84 -81 157q-34 29 -137 71h411q7 -39 7 -92q0 -111 -41 -212q-23 -55 -71 -104q-37 -35 -109 -81q-80 -48 -153 -66q-80 -21 -203 -21q-114 0 -195 23l-140 40q-57 16 -72 28q-8 8 -8 22v13q0 108 -2 156z" />
+<glyph unicode="&#xf0cd;" d="M0 -32v-64q0 -14 9 -23t23 -9h1472q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-1472q-14 0 -23 -9t-9 -23zM0 1405q13 1 40 1q60 0 112 -4q132 -7 166 -7q86 0 168 3q116 4 146 5q56 0 86 2l-1 -14l2 -64v-9q-60 -9 -124 -9q-60 0 -79 -25q-13 -14 -13 -132q0 -13 0.5 -32.5 t0.5 -25.5l1 -229l14 -280q6 -124 51 -202q35 -59 96 -92q88 -47 177 -47q104 0 191 28q56 18 99 51q48 36 65 64q36 56 53 114q21 73 21 229q0 79 -3.5 128t-11 122.5t-13.5 159.5l-4 59q-5 67 -24 88q-34 35 -77 34l-100 -2l-14 3l2 86h84l205 -10q76 -3 196 10l18 -2 q6 -38 6 -51q0 -7 -4 -31q-45 -12 -84 -13q-73 -11 -79 -17q-15 -15 -15 -41q0 -7 1.5 -27t1.5 -31q8 -19 22 -396q6 -195 -15 -304q-15 -76 -41 -122q-38 -65 -112 -123q-75 -57 -182 -89q-109 -33 -255 -33q-167 0 -284 46q-119 47 -179 122q-61 76 -83 195 q-16 80 -16 237v333q0 188 -17 213q-25 36 -147 39q-37 2 -45 4z" />
+<glyph unicode="&#xf0ce;" horiz-adv-x="1664" d="M0 160v1088q0 66 47 113t113 47h1344q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113zM128 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM128 544q0 -14 9 -23t23 -9h320 q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM128 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM640 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9 t-9 -23v-192zM640 544q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM640 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23 v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 544q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192z" />
+<glyph unicode="&#xf0d0;" horiz-adv-x="1664" d="M27 160q0 27 18 45l1286 1286q18 18 45 18t45 -18l198 -198q18 -18 18 -45t-18 -45l-1286 -1286q-18 -18 -45 -18t-45 18l-198 198q-18 18 -18 45zM128 1408l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98zM320 1216l196 60l60 196l60 -196l196 -60l-196 -60 l-60 -196l-60 196zM768 1408l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98zM1083 1062l107 -107l293 293l-107 107zM1408 768l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98z" />
+<glyph unicode="&#xf0d1;" horiz-adv-x="1792" d="M64 192q0 26 19 45t45 19v320q0 8 -0.5 35t0 38t2.5 34.5t6.5 37t14 30.5t22.5 30l198 198q19 19 50.5 32t58.5 13h160v192q0 26 19 45t45 19h1024q26 0 45 -19t19 -45v-1024q0 -15 -4 -26.5t-13.5 -18.5t-16.5 -11.5t-23.5 -6t-22.5 -2t-25.5 0t-22.5 0.5 q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-64q-3 0 -22.5 -0.5t-25.5 0t-22.5 2t-23.5 6t-16.5 11.5t-13.5 18.5t-4 26.5zM256 640h384v256h-158q-13 0 -22 -9l-195 -195q-9 -9 -9 -22v-30zM384 128q0 -52 38 -90t90 -38 t90 38t38 90t-38 90t-90 38t-90 -38t-38 -90zM1280 128q0 -52 38 -90t90 -38t90 38t38 90t-38 90t-90 38t-90 -38t-38 -90z" />
+<glyph unicode="&#xf0d2;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103q-111 0 -218 32q59 93 78 164q9 34 54 211q20 -39 73 -67.5t114 -28.5q121 0 216 68.5t147 188.5t52 270q0 114 -59.5 214t-172.5 163t-255 63 q-105 0 -196 -29t-154.5 -77t-109 -110.5t-67 -129.5t-21.5 -134q0 -104 40 -183t117 -111q30 -12 38 20q2 7 8 31t8 30q6 23 -11 43q-51 61 -51 151q0 151 104.5 259.5t273.5 108.5q151 0 235.5 -82t84.5 -213q0 -170 -68.5 -289t-175.5 -119q-61 0 -98 43.5t-23 104.5 q8 35 26.5 93.5t30 103t11.5 75.5q0 50 -27 83t-77 33q-62 0 -105 -57t-43 -142q0 -73 25 -122l-99 -418q-17 -70 -13 -177q-206 91 -333 281t-127 423z" />
+<glyph unicode="&#xf0d3;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-725q85 122 108 210q9 34 53 209q21 -39 73.5 -67t112.5 -28q181 0 295.5 147.5t114.5 373.5q0 84 -35 162.5t-96.5 139t-152.5 97t-197 36.5 q-104 0 -194.5 -28.5t-153 -76.5t-107.5 -109.5t-66.5 -128t-21.5 -132.5q0 -102 39.5 -180t116.5 -110q13 -5 23.5 0t14.5 19q10 44 15 61q6 23 -11 42q-50 62 -50 150q0 150 103.5 256.5t270.5 106.5q149 0 232.5 -81t83.5 -210q0 -168 -67.5 -286t-173.5 -118 q-60 0 -97 43.5t-23 103.5q8 34 26.5 92.5t29.5 102t11 74.5q0 49 -26.5 81.5t-75.5 32.5q-61 0 -103.5 -56.5t-42.5 -139.5q0 -72 24 -121l-98 -414q-24 -100 -7 -254h-183q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf0d4;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM276 309q0 -43 18.5 -77.5t48.5 -56.5t69 -37t77.5 -21t76.5 -6q60 0 120.5 15.5t113.5 46t86 82.5t33 117 q0 49 -20 89.5t-49 66.5t-58 47.5t-49 44t-20 44.5t15.5 42.5t37.5 39.5t44 42t37.5 59.5t15.5 82.5q0 60 -22.5 99.5t-72.5 90.5h83l88 64h-265q-85 0 -161 -32t-127.5 -98t-51.5 -153q0 -93 64.5 -154.5t158.5 -61.5q22 0 43 3q-13 -29 -13 -54q0 -44 40 -94 q-175 -12 -257 -63q-47 -29 -75.5 -73t-28.5 -95zM395 338q0 46 25 80t65.5 51.5t82 25t84.5 7.5q20 0 31 -2q2 -1 23 -16.5t26 -19t23 -18t24.5 -22t19 -22.5t17 -26t9 -26.5t4.5 -31.5q0 -76 -58.5 -112.5t-139.5 -36.5q-41 0 -80.5 9.5t-75.5 28.5t-58 53t-22 78z M462 969q0 61 32 104t92 43q53 0 93.5 -45t58 -101t17.5 -107q0 -60 -33 -99.5t-92 -39.5q-53 0 -93 42.5t-57.5 96.5t-17.5 106zM960 672h128v-160h64v160h128v64h-128v128h-64v-128h-128v-64z" />
+<glyph unicode="&#xf0d5;" horiz-adv-x="1664" d="M32 182q0 81 44.5 150t118.5 115q131 82 404 100q-32 42 -47.5 74t-15.5 73q0 36 21 85q-46 -4 -68 -4q-148 0 -249.5 96.5t-101.5 244.5q0 82 36 159t99 131q77 66 182.5 98t217.5 32h418l-138 -88h-131q74 -63 112 -133t38 -160q0 -72 -24.5 -129.5t-59 -93t-69.5 -65 t-59.5 -61.5t-24.5 -66q0 -36 32 -70.5t77.5 -68t90.5 -73.5t77 -104t32 -142q0 -90 -48 -173q-72 -122 -211 -179.5t-298 -57.5q-132 0 -246.5 41.5t-171.5 137.5q-37 60 -37 131zM218 228q0 -70 35 -123.5t91.5 -83t119 -44t127.5 -14.5q58 0 111.5 13t99 39t73 73 t27.5 109q0 25 -7 49t-14.5 42t-27 41.5t-29.5 35t-38.5 34.5t-36.5 29t-41.5 30t-36.5 26q-16 2 -48 2q-53 0 -105 -7t-107.5 -25t-97 -46t-68.5 -74.5t-27 -105.5zM324 1222q0 -46 10 -97.5t31.5 -103t52 -92.5t75 -67t96.5 -26q38 0 78 16.5t66 43.5q53 57 53 159 q0 58 -17 125t-48.5 129.5t-84.5 103.5t-117 41q-42 0 -82.5 -19.5t-65.5 -52.5q-47 -59 -47 -160zM1084 731v108h212v217h105v-217h213v-108h-213v-219h-105v219h-212z" />
+<glyph unicode="&#xf0d6;" horiz-adv-x="1920" d="M0 64v1152q0 26 19 45t45 19h1792q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-1792q-26 0 -45 19t-19 45zM128 384q106 0 181 -75t75 -181h1152q0 106 75 181t181 75v512q-106 0 -181 75t-75 181h-1152q0 -106 -75 -181t-181 -75v-512zM640 640q0 70 21 142 t59.5 134t101.5 101t138 39t138 -39t101.5 -101t59.5 -134t21 -142t-21 -142t-59.5 -134t-101.5 -101t-138 -39t-138 39t-101.5 101t-59.5 134t-21 142zM762 791l77 -80q42 37 55 57h2v-288h-128v-96h384v96h-128v448h-114z" />
+<glyph unicode="&#xf0d7;" horiz-adv-x="1024" d="M0 832q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0d8;" horiz-adv-x="1024" d="M0 320q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0d9;" horiz-adv-x="640" d="M64 640q0 26 19 45l448 448q19 19 45 19t45 -19t19 -45v-896q0 -26 -19 -45t-45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0da;" horiz-adv-x="640" d="M0 192v896q0 26 19 45t45 19t45 -19l448 -448q19 -19 19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19t-19 45z" />
+<glyph unicode="&#xf0db;" horiz-adv-x="1664" d="M0 32v1216q0 66 47 113t113 47h1344q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h608v1152h-640v-1120zM896 0h608q13 0 22.5 9.5t9.5 22.5v1120h-640v-1152z" />
+<glyph unicode="&#xf0dc;" horiz-adv-x="1024" d="M0 448q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45zM0 832q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0dd;" horiz-adv-x="1024" d="M0 448q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0de;" horiz-adv-x="1024" d="M0 832q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0e0;" horiz-adv-x="1792" d="M0 32v794q44 -49 101 -87q362 -246 497 -345q57 -42 92.5 -65.5t94.5 -48t110 -24.5h1h1q51 0 110 24.5t94.5 48t92.5 65.5q170 123 498 345q57 39 100 87v-794q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM0 1098q0 78 41.5 130t118.5 52h1472 q65 0 112.5 -47t47.5 -113q0 -79 -49 -151t-122 -123q-376 -261 -468 -325q-10 -7 -42.5 -30.5t-54 -38t-52 -32.5t-57.5 -27t-50 -9h-1h-1q-23 0 -50 9t-57.5 27t-52 32.5t-54 38t-42.5 30.5q-91 64 -262 182.5t-205 142.5q-62 42 -117 115.5t-55 136.5z" />
+<glyph unicode="&#xf0e1;" d="M0 1217q0 74 51.5 122.5t134.5 48.5t133 -48.5t51 -122.5q1 -73 -50.5 -122t-135.5 -49h-2q-82 0 -132 49t-50 122zM19 -80v991h330v-991h-330zM531 -80q2 399 2 647t-1 296l-1 48h329v-144h-2q20 32 41 56t56.5 52t87 43.5t114.5 15.5q171 0 275 -113.5t104 -332.5v-568 h-329v530q0 105 -40.5 164.5t-126.5 59.5q-63 0 -105.5 -34.5t-63.5 -85.5q-11 -30 -11 -81v-553h-329z" />
+<glyph unicode="&#xf0e2;" d="M0 832v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298t-61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12 q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0e3;" horiz-adv-x="1792" d="M40 736q0 13 4.5 26t9 22t15.5 22t16.5 18.5t20.5 19t18 16.5q30 28 68 28q10 0 18 -1.5t16.5 -5.5t13.5 -6t13.5 -10t11.5 -10t13 -12.5t12 -12.5q-14 14 -14 34t14 34l348 348q14 14 34 14t34 -14q-2 2 -12.5 12t-12.5 13t-10 11.5t-10 13.5t-6 13.5t-5.5 16.5t-1.5 18 q0 38 28 68q3 3 16.5 18t19 20.5t18.5 16.5t22 15.5t22 9t26 4.5q40 0 68 -28l408 -408q28 -28 28 -68q0 -13 -4.5 -26t-9 -22t-15.5 -22t-16.5 -18.5t-20.5 -19t-18 -16.5q-30 -28 -68 -28q-10 0 -18 1.5t-16.5 5.5t-13.5 6t-13.5 10t-11.5 10t-13 12.5t-12 12.5 q14 -14 14 -34t-14 -34l-126 -126l256 -256q43 43 96 43q52 0 91 -37l363 -363q37 -39 37 -91q0 -53 -37 -90l-107 -108q-39 -37 -91 -37q-53 0 -90 37l-363 364q-38 36 -38 90q0 53 43 96l-256 256l-126 -126q-14 -14 -34 -14t-34 14q2 -2 12.5 -12t12.5 -13t10 -11.5 t10 -13.5t6 -13.5t5.5 -16.5t1.5 -18q0 -38 -28 -68q-3 -3 -16.5 -18t-19 -20.5t-18.5 -16.5t-22 -15.5t-22 -9t-26 -4.5q-40 0 -68 28l-408 408q-28 28 -28 68z" />
+<glyph unicode="&#xf0e4;" horiz-adv-x="1792" d="M0 384q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348q0 -261 -141 -483q-19 -29 -54 -29h-1402q-35 0 -54 29q-141 221 -141 483zM128 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z M320 832q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM710 241q-20 -77 20 -146t117 -89t146 20t89 117q16 60 -6 117t-72 91l101 382q6 26 -7.5 48.5t-38.5 29.5t-48 -6.5t-30 -39.5l-101 -382q-60 -5 -107 -43.5 t-63 -98.5zM768 1024q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1216 832q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1408 384q0 -53 37.5 -90.5 t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf0e5;" horiz-adv-x="1792" d="M0 640q0 174 120 321.5t326 233t450 85.5t450 -85.5t326 -233t120 -321.5t-120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22h-5q-15 0 -27 10.5t-16 27.5v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5 t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281zM128 640q0 -112 71.5 -213.5t201.5 -175.5l87 -50l-27 -96q-24 -91 -70 -172q152 63 275 171l43 38l57 -6q69 -8 130 -8q204 0 381.5 69.5t282 187.5t104.5 255t-104.5 255t-282 187.5t-381.5 69.5t-381.5 -69.5 t-282 -187.5t-104.5 -255z" />
+<glyph unicode="&#xf0e6;" horiz-adv-x="1792" d="M0 768q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257t-94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25 t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224zM128 768q0 -82 53 -158t149 -132l97 -56l-35 -84q34 20 62 39l44 31l53 -10q78 -14 153 -14q153 0 286 52t211.5 141t78.5 191t-78.5 191t-211.5 141t-286 52t-286 -52t-211.5 -141t-78.5 -191zM616 132 q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22 t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132z" />
+<glyph unicode="&#xf0e7;" horiz-adv-x="896" d="M1 551l201 825q4 14 16 23t28 9h328q19 0 32 -12.5t13 -29.5q0 -8 -5 -18l-171 -463l396 98q8 2 12 2q19 0 34 -15q18 -20 7 -44l-540 -1157q-13 -25 -42 -25q-4 0 -14 2q-17 5 -25.5 19t-4.5 30l197 808l-406 -101q-4 -1 -12 -1q-18 0 -31 11q-18 15 -13 39z" />
+<glyph unicode="&#xf0e8;" horiz-adv-x="1792" d="M0 -32v320q0 40 28 68t68 28h96v192q0 52 38 90t90 38h512v192h-96q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-96v-192h512q52 0 90 -38t38 -90v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf0e9;" horiz-adv-x="1664" d="M0 681q0 5 1 7q45 183 172.5 319.5t298 204.5t360.5 68q140 0 274.5 -40t246.5 -113.5t194.5 -187t115.5 -251.5q1 -2 1 -7q0 -13 -9.5 -22.5t-22.5 -9.5q-11 0 -23 10q-49 46 -93 69t-102 23q-68 0 -128 -37t-103 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -28 -17 q-18 0 -29 17q-4 6 -14.5 24t-17.5 28q-43 60 -102.5 97t-127.5 37t-127.5 -37t-102.5 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -29 -17q-17 0 -28 17q-4 6 -14.5 24t-17.5 28q-43 60 -103 97t-128 37q-58 0 -102 -23t-93 -69q-12 -10 -23 -10q-13 0 -22.5 9.5t-9.5 22.5z M384 128q0 26 19 45t45 19t45 -19t19 -45q0 -50 39 -89t89 -39t89 39t39 89v580q33 11 64 11t64 -11v-580q0 -104 -76 -180t-180 -76t-180 76t-76 180zM768 1310v98q0 26 19 45t45 19t45 -19t19 -45v-98q-42 2 -64 2t-64 -2z" />
+<glyph unicode="&#xf0ea;" horiz-adv-x="1792" d="M0 96v1344q0 40 28 68t68 28h1088q40 0 68 -28t28 -68v-328q21 -13 36 -28l408 -408q28 -28 48 -76t20 -88v-672q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v160h-544q-40 0 -68 28t-28 68zM256 1312q0 -13 9.5 -22.5t22.5 -9.5h704q13 0 22.5 9.5t9.5 22.5v64 q0 13 -9.5 22.5t-22.5 9.5h-704q-13 0 -22.5 -9.5t-9.5 -22.5v-64zM768 -128h896v640h-416q-40 0 -68 28t-28 68v416h-384v-1152zM1280 640h299l-299 299v-299z" />
+<glyph unicode="&#xf0eb;" horiz-adv-x="1024" d="M0 960q0 99 44.5 184.5t117 142t164 89t186.5 32.5t186.5 -32.5t164 -89t117 -142t44.5 -184.5q0 -155 -103 -268q-45 -49 -74.5 -87t-59.5 -95.5t-34 -107.5q47 -28 47 -82q0 -37 -25 -64q25 -27 25 -64q0 -52 -45 -81q13 -23 13 -47q0 -46 -31.5 -71t-77.5 -25 q-20 -44 -60 -70t-87 -26t-87 26t-60 70q-46 0 -77.5 25t-31.5 71q0 24 13 47q-45 29 -45 81q0 37 25 64q-25 27 -25 64q0 54 47 82q-4 50 -34 107.5t-59.5 95.5t-74.5 87q-103 113 -103 268zM128 960q0 -101 68 -180q10 -11 30.5 -33t30.5 -33q128 -153 141 -298h228 q13 145 141 298q10 11 30.5 33t30.5 33q68 79 68 180q0 72 -34.5 134t-90 101.5t-123 62t-136.5 22.5t-136.5 -22.5t-123 -62t-90 -101.5t-34.5 -134zM480 1088q0 13 9.5 22.5t22.5 9.5q50 0 99.5 -16t87 -54t37.5 -90q0 -13 -9.5 -22.5t-22.5 -9.5t-22.5 9.5t-9.5 22.5 q0 46 -54 71t-106 25q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0ec;" horiz-adv-x="1792" d="M0 256q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h1376q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5q-12 0 -24 10l-319 320q-9 9 -9 22zM0 800v192q0 13 9.5 22.5t22.5 9.5h1376v192q0 14 9 23 t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-1376q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0ed;" horiz-adv-x="1920" d="M0 448q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z M512 608q0 -14 9 -23l352 -352q9 -9 23 -9t23 9l351 351q10 12 10 24q0 14 -9 23t-23 9h-224v352q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-352h-224q-13 0 -22.5 -9.5t-9.5 -22.5z" />
+<glyph unicode="&#xf0ee;" horiz-adv-x="1920" d="M0 448q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z M512 672q0 -14 9 -23t23 -9h224v-352q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v352h224q13 0 22.5 9.5t9.5 22.5q0 14 -9 23l-352 352q-9 9 -23 9t-23 -9l-351 -351q-10 -12 -10 -24z" />
+<glyph unicode="&#xf0f0;" horiz-adv-x="1408" d="M0 131q0 68 5.5 131t24 138t47.5 132.5t81 103t120 60.5q-22 -52 -22 -120v-203q-58 -20 -93 -70t-35 -111q0 -80 56 -136t136 -56t136 56t56 136q0 61 -35.5 111t-92.5 70v203q0 62 25 93q132 -104 295 -104t295 104q25 -31 25 -93v-64q-106 0 -181 -75t-75 -181v-89 q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 52 38 90t90 38t90 -38t38 -90v-89q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 68 -34.5 127.5t-93.5 93.5q0 10 0.5 42.5t0 48t-2.5 41.5t-7 47t-13 40q68 -15 120 -60.5 t81 -103t47.5 -132.5t24 -138t5.5 -131q0 -121 -73 -190t-194 -69h-874q-121 0 -194 69t-73 190zM256 192q0 26 19 45t45 19t45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45zM320 1024q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5 t-271.5 112.5t-112.5 271.5z" />
+<glyph unicode="&#xf0f1;" horiz-adv-x="1408" d="M0 768v512q0 26 19 45t45 19q6 0 16 -2q17 30 47 48t65 18q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5q-33 0 -64 18v-402q0 -106 94 -181t226 -75t226 75t94 181v402q-31 -18 -64 -18q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5q35 0 65 -18t47 -48 q10 2 16 2q26 0 45 -19t19 -45v-512q0 -144 -110 -252t-274 -128v-132q0 -106 94 -181t226 -75t226 75t94 181v395q-57 21 -92.5 70t-35.5 111q0 80 56 136t136 56t136 -56t56 -136q0 -62 -35.5 -111t-92.5 -70v-395q0 -159 -131.5 -271.5t-316.5 -112.5t-316.5 112.5 t-131.5 271.5v132q-164 20 -274 128t-110 252zM1152 832q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0f2;" horiz-adv-x="1792" d="M0 96v832q0 92 66 158t158 66h64v-1280h-64q-92 0 -158 66t-66 158zM384 -128v1280h128v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h128v-1280h-1024zM640 1152h512v128h-512v-128zM1504 -128v1280h64q92 0 158 -66t66 -158v-832q0 -92 -66 -158t-158 -66h-64z " />
+<glyph unicode="&#xf0f3;" horiz-adv-x="1664" d="M0 128q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38 t-38 90zM656 0q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16z" />
+<glyph unicode="&#xf0f4;" horiz-adv-x="1920" d="M0 128h1792q0 -106 -75 -181t-181 -75h-1280q-106 0 -181 75t-75 181zM256 480v736q0 26 19 45t45 19h1152q159 0 271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5h-64v-32q0 -92 -66 -158t-158 -66h-704q-92 0 -158 66t-66 158zM1408 704h64q80 0 136 56t56 136 t-56 136t-136 56h-64v-384z" />
+<glyph unicode="&#xf0f5;" horiz-adv-x="1408" d="M0 832v640q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-640q0 -61 -35.5 -111t-92.5 -70v-779q0 -52 -38 -90t-90 -38h-128 q-52 0 -90 38t-38 90v779q-57 20 -92.5 70t-35.5 111zM768 416v800q0 132 94 226t226 94h256q26 0 45 -19t19 -45v-1600q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v512h-224q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f6;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM384 160v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64 q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM384 416v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM384 672v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf0f7;" horiz-adv-x="1408" d="M0 -192v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM128 -128h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224h384v1536h-1152v-1536zM256 160v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 416v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 928v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 1184v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f8;" horiz-adv-x="1408" d="M0 -192v1280q0 26 19 45t45 19h320v288q0 40 28 68t68 28h448q40 0 68 -28t28 -68v-288h320q26 0 45 -19t19 -45v-1280q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM128 -128h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224h384v1152h-256 v-32q0 -40 -28 -68t-68 -28h-448q-40 0 -68 28t-28 68v32h-256v-1152zM256 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64 q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 1056q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v96h128 v-96q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-96h-128v96q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-320zM768 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f9;" horiz-adv-x="1920" d="M64 192q0 26 19 45t45 19v416q0 26 13 58t32 51l198 198q19 19 51 32t58 13h160v320q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-192q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-128 q-26 0 -45 19t-19 45zM256 640h384v256h-158q-14 -2 -22 -9l-195 -195q-7 -12 -9 -22v-30zM384 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM896 800q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192 q14 0 23 9t9 23v224h224q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192zM1280 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf0fa;" horiz-adv-x="1792" d="M0 96v832q0 92 66 158t158 66h32v-1280h-32q-92 0 -158 66t-66 158zM352 -128v1280h160v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h160v-1280h-1088zM512 416q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23v192 q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192zM640 1152h512v128h-512v-128zM1536 -128v1280h32q92 0 158 -66t66 -158v-832q0 -92 -66 -158t-158 -66h-32z" />
+<glyph unicode="&#xf0fb;" horiz-adv-x="1920" d="M0 512v128l192 24v8h-128v32h-32v192l32 32h96l192 -224h160v416h-64v32h64h160h96q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-69l293 -352h64l224 -64l352 -32q261 -58 287 -93l1 -3q-1 -32 -288 -96l-352 -32l-224 -64h-64l-293 -352h69q26 0 45 -4.5t19 -11.5 t-19 -11.5t-45 -4.5h-96h-160h-64v32h64v416h-160l-192 -224h-96l-32 32v192h32v32h128v8z" />
+<glyph unicode="&#xf0fc;" horiz-adv-x="1664" d="M64 1152l32 128h480l32 128h960l32 -192l-64 -32v-800l128 -192v-192h-1152v192l128 192h-128q-159 0 -271.5 112.5t-112.5 271.5v320zM384 768q0 -53 37.5 -90.5t90.5 -37.5h128v384h-256v-256z" />
+<glyph unicode="&#xf0fd;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 192q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h512v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45 v896q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-512v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-896z" />
+<glyph unicode="&#xf0fe;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 576q0 -26 19 -45t45 -19h320v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h320q26 0 45 19t19 45 v128q0 26 -19 45t-45 19h-320v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-320q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf100;" horiz-adv-x="1024" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM429 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23 l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf101;" horiz-adv-x="1024" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM397 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10 l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf102;" horiz-adv-x="1152" d="M77 224q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM77 608q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23 l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf103;" horiz-adv-x="1152" d="M77 672q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM77 1056q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10 l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf104;" horiz-adv-x="640" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf105;" horiz-adv-x="640" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf106;" horiz-adv-x="1152" d="M77 352q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf107;" horiz-adv-x="1152" d="M77 800q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf108;" horiz-adv-x="1920" d="M0 288v1088q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-544q0 -37 16 -77.5t32 -71t16 -43.5q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45q0 14 16 44t32 70t16 78h-544q-66 0 -113 47t-47 113zM128 544q0 -13 9.5 -22.5 t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v832q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-832z" />
+<glyph unicode="&#xf109;" horiz-adv-x="1920" d="M0 96v96h160h1600h160v-96q0 -40 -47 -68t-113 -28h-1600q-66 0 -113 28t-47 68zM256 416v704q0 66 47 113t113 47h1088q66 0 113 -47t47 -113v-704q0 -66 -47 -113t-113 -47h-1088q-66 0 -113 47t-47 113zM384 416q0 -13 9.5 -22.5t22.5 -9.5h1088q13 0 22.5 9.5 t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5v-704zM864 112q0 -16 16 -16h160q16 0 16 16t-16 16h-160q-16 0 -16 -16z" />
+<glyph unicode="&#xf10a;" horiz-adv-x="1152" d="M0 160v1088q0 66 47 113t113 47h832q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-832q-66 0 -113 47t-47 113zM128 288q0 -13 9.5 -22.5t22.5 -9.5h832q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-832q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM512 128 q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf10b;" horiz-adv-x="768" d="M0 128v1024q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-1024q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM96 288q0 -13 9.5 -22.5t22.5 -9.5h512q13 0 22.5 9.5t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-512q-13 0 -22.5 -9.5t-9.5 -22.5v-704zM288 1136 q0 -16 16 -16h160q16 0 16 16t-16 16h-160q-16 0 -16 -16zM304 128q0 -33 23.5 -56.5t56.5 -23.5t56.5 23.5t23.5 56.5t-23.5 56.5t-56.5 23.5t-56.5 -23.5t-23.5 -56.5z" />
+<glyph unicode="&#xf10c;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273z" />
+<glyph unicode="&#xf10d;" horiz-adv-x="1664" d="M0 192v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136z M896 192v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136z" />
+<glyph unicode="&#xf10e;" horiz-adv-x="1664" d="M0 832v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136zM896 832v384 q0 80 56 136t136 56h384q80 0 136 -56t56 -136v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136z" />
+<glyph unicode="&#xf110;" horiz-adv-x="1568" d="M0 640q0 66 47 113t113 47t113 -47t47 -113t-47 -113t-113 -47t-113 47t-47 113zM176 1088q0 73 51.5 124.5t124.5 51.5t124.5 -51.5t51.5 -124.5t-51.5 -124.5t-124.5 -51.5t-124.5 51.5t-51.5 124.5zM208 192q0 60 42 102t102 42q59 0 101.5 -42t42.5 -102t-42.5 -102 t-101.5 -42q-60 0 -102 42t-42 102zM608 1280q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM672 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM1136 192q0 46 33 79t79 33t79 -33t33 -79 t-33 -79t-79 -33t-79 33t-33 79zM1168 1088q0 33 23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5zM1344 640q0 40 28 68t68 28t68 -28t28 -68t-28 -68t-68 -28t-68 28t-28 68z" />
+<glyph unicode="&#xf111;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5z" />
+<glyph unicode="&#xf112;" horiz-adv-x="1792" d="M0 896q0 26 19 45l512 512q19 19 45 19t45 -19t19 -45v-256h224q713 0 875 -403q53 -134 53 -333q0 -166 -127 -451q-3 -7 -10.5 -24t-13.5 -30t-13 -22q-12 -17 -28 -17q-15 0 -23.5 10t-8.5 25q0 9 2.5 26.5t2.5 23.5q5 68 5 123q0 101 -17.5 181t-48.5 138.5t-80 101 t-105.5 69.5t-133 42.5t-154 21.5t-175.5 6h-224v-256q0 -26 -19 -45t-45 -19t-45 19l-512 512q-19 19 -19 45z" />
+<glyph unicode="&#xf113;" horiz-adv-x="1664" d="M0 496q0 237 136 396q-27 82 -27 170q0 116 51 218q108 0 190 -39.5t189 -123.5q147 35 309 35q148 0 280 -32q105 82 187 121t189 39q51 -102 51 -218q0 -87 -27 -168q136 -160 136 -398q0 -207 -61 -331q-38 -77 -105.5 -133t-141 -86t-170 -47.5t-171.5 -22t-167 -4.5 q-78 0 -142 3t-147.5 12.5t-152.5 30t-137 51.5t-121 81t-86 115q-62 123 -62 331zM224 320q0 -88 32 -153.5t81 -103t122 -60t140 -29.5t149 -7h168q82 0 149 7t140 29.5t122 60t81 103t32 153.5q0 120 -69 204t-187 84q-41 0 -195 -21q-71 -11 -157 -11t-157 11 q-152 21 -195 21q-118 0 -187 -84t-69 -204zM384 320q0 40 12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82t-12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82zM1024 320q0 40 12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82t-12.5 -82t-43 -76t-72.5 -34t-72.5 34 t-43 76t-12.5 82z" />
+<glyph unicode="&#xf114;" horiz-adv-x="1664" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158zM128 224q0 -40 28 -68t68 -28h1216q40 0 68 28t28 68v704q0 40 -28 68t-68 28h-704q-40 0 -68 28t-28 68v64 q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-960z" />
+<glyph unicode="&#xf115;" horiz-adv-x="1920" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h192q54 0 99 -24.5t67 -70.5q15 -32 15 -68q0 -62 -46 -120l-295 -363q-43 -53 -116 -87.5t-140 -34.5h-1088q-92 0 -158 66t-66 158zM128 331l256 315q44 53 116 87.5 t140 34.5h768v160q0 40 -28 68t-68 28h-576q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-853zM171 163q0 -35 53 -35h1088q40 0 86 22t71 53l294 363q18 22 18 39q0 35 -53 35h-1088q-40 0 -85.5 -21.5t-71.5 -52.5l-294 -363q-18 -24 -18 -40z " />
+<glyph unicode="&#xf116;" horiz-adv-x="1792" />
+<glyph unicode="&#xf117;" horiz-adv-x="1792" />
+<glyph unicode="&#xf118;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM402 461q-8 25 4 48.5t38 31.5q25 8 48.5 -4t31.5 -38 q25 -80 92.5 -129.5t151.5 -49.5t151.5 49.5t92.5 129.5q8 26 32 38t49 4t37 -31.5t4 -48.5q-37 -121 -138 -195t-228 -74t-228 74t-138 195zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf119;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM402 307q37 121 138 195t228 74t228 -74t138 -195q8 -25 -4 -48.5 t-37 -31.5t-49 4t-32 38q-25 80 -92.5 129.5t-151.5 49.5t-151.5 -49.5t-92.5 -129.5q-8 -26 -31.5 -38t-48.5 -4q-26 8 -38 31.5t-4 48.5zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf11a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 448q0 26 19 45t45 19h640q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5 t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf11b;" horiz-adv-x="1920" d="M0 512q0 212 150 362t362 150h896q212 0 362 -150t150 -362t-150 -362t-362 -150q-192 0 -338 128h-220q-146 -128 -338 -128q-212 0 -362 150t-150 362zM192 448q0 -14 9 -23t23 -9h192v-192q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v192h192q14 0 23 9t9 23v128 q0 14 -9 23t-23 9h-192v192q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-192h-192q-14 0 -23 -9t-9 -23v-128zM1152 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1408 640q0 -53 37.5 -90.5t90.5 -37.5 t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf11c;" horiz-adv-x="1920" d="M0 128v896q0 53 37.5 90.5t90.5 37.5h1664q53 0 90.5 -37.5t37.5 -90.5v-896q0 -53 -37.5 -90.5t-90.5 -37.5h-1664q-53 0 -90.5 37.5t-37.5 90.5zM128 128h1664v896h-1664v-896zM256 272v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM256 528v96 q0 16 16 16h224q16 0 16 -16v-96q0 -16 -16 -16h-224q-16 0 -16 16zM256 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM512 272v96q0 16 16 16h864q16 0 16 -16v-96q0 -16 -16 -16h-864q-16 0 -16 16zM512 784v96q0 16 16 16h96q16 0 16 -16v-96 q0 -16 -16 -16h-96q-16 0 -16 16zM640 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM768 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM896 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16z M1024 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1152 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1280 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1408 528v96q0 16 16 16h112v240 q0 16 16 16h96q16 0 16 -16v-352q0 -16 -16 -16h-224q-16 0 -16 16zM1536 272v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16z" />
+<glyph unicode="&#xf11d;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64zM320 320v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86 q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56zM448 426 q245 113 433 113q55 0 103.5 -7.5t98 -26t77 -31t82.5 -39.5l28 -14q44 -22 101 -22q120 0 293 92v616q-169 -91 -306 -91q-82 0 -145 32q-100 49 -184 76.5t-178 27.5q-173 0 -403 -127v-599z" />
+<glyph unicode="&#xf11e;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64zM320 320v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86 q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56zM448 426 q205 96 384 110v192q-181 -16 -384 -117v-185zM448 836q215 111 384 118v197q-172 -8 -384 -126v-189zM832 730h19q102 0 192.5 -29t197.5 -82q19 -9 39 -15v-188q42 -17 91 -17q120 0 293 92v184q-235 -116 -384 -71v224q-20 6 -39 15q-5 3 -33 17t-34.5 17t-31.5 15 t-34.5 15.5t-32.5 13t-36 12.5t-35 8.5t-39.5 7.5t-39.5 4t-44 2q-23 0 -49 -3v-222zM1280 828q148 -42 384 90v189q-169 -91 -306 -91q-45 0 -78 8v-196z" />
+<glyph unicode="&#xf120;" horiz-adv-x="1664" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM640 32v64q0 14 9 23t23 9h960q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-960 q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf121;" horiz-adv-x="1920" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM712 -52l373 1291q4 13 15.5 19.5t23.5 2.5l62 -17q13 -4 19.5 -15.5t2.5 -24.5 l-373 -1291q-4 -13 -15.5 -19.5t-23.5 -2.5l-62 17q-13 4 -19.5 15.5t-2.5 24.5zM1293 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf122;" horiz-adv-x="1792" d="M0 896q0 26 19 45l512 512q29 31 70 14q39 -17 39 -59v-69l-397 -398q-19 -19 -19 -45t19 -45l397 -397v-70q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45zM384 896q0 26 19 45l512 512q29 31 70 14q39 -17 39 -59v-262q411 -28 599 -221 q169 -173 169 -509q0 -58 -17 -133.5t-38.5 -138t-48 -125t-40.5 -90.5l-20 -40q-8 -17 -28 -17q-6 0 -9 1q-25 8 -23 34q43 400 -106 565q-64 71 -170.5 110.5t-267.5 52.5v-251q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45z" />
+<glyph unicode="&#xf123;" horiz-adv-x="1664" d="M2 900.5q9 27.5 54 34.5l502 73l225 455q20 41 49 41q28 0 49 -41l225 -455l502 -73q45 -7 54 -34.5t-24 -59.5l-363 -354l86 -500q5 -33 -6 -51.5t-34 -18.5q-17 0 -40 12l-449 236l-449 -236q-23 -12 -40 -12q-23 0 -34 18.5t-6 51.5l86 500l-364 354q-32 32 -23 59.5z M832 310l59 -31l318 -168l-60 355l-12 66l49 47l257 250l-356 52l-66 10l-30 60l-159 322v-963z" />
+<glyph unicode="&#xf124;" horiz-adv-x="1408" d="M2 561q-5 22 4 42t29 30l1280 640q13 7 29 7q27 0 45 -19q15 -14 18.5 -34.5t-6.5 -39.5l-640 -1280q-17 -35 -57 -35q-5 0 -15 2q-22 5 -35.5 22.5t-13.5 39.5v576h-576q-22 0 -39.5 13.5t-22.5 35.5z" />
+<glyph unicode="&#xf125;" horiz-adv-x="1664" d="M0 928v192q0 14 9 23t23 9h224v224q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-224h851l246 247q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-247 -246v-851h224q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v224h-864 q-14 0 -23 9t-9 23v864h-224q-14 0 -23 9t-9 23zM512 301l595 595h-595v-595zM557 256h595v595z" />
+<glyph unicode="&#xf126;" horiz-adv-x="1024" d="M0 64q0 52 26 96.5t70 69.5v820q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136q0 -52 -26 -96.5t-70 -69.5v-497q54 26 154 57q55 17 87.5 29.5t70.5 31t59 39.5t40.5 51t28 69.5t8.5 91.5q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136 q0 -52 -26 -96.5t-70 -69.5q-2 -287 -226 -414q-68 -38 -203 -81q-128 -40 -169.5 -71t-41.5 -100v-26q44 -25 70 -69.5t26 -96.5q0 -80 -56 -136t-136 -56t-136 56t-56 136zM96 64q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68zM96 1216q0 -40 28 -68 t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68zM736 1088q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68z" />
+<glyph unicode="&#xf127;" horiz-adv-x="1664" d="M0 448q0 14 9 23t23 9h320q14 0 23 -9t9 -23t-9 -23t-23 -9h-320q-14 0 -23 9t-9 23zM16 1088q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l334 -335q21 -21 42 -56l-239 -18l-273 274q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68 l274 -274l-18 -240q-35 21 -56 42l-336 336q-84 86 -84 204zM128 32q0 13 9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-256 -256q-10 -9 -23 -9q-12 0 -23 9q-9 10 -9 23zM544 -96v320q0 14 9 23t23 9t23 -9t9 -23v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23zM633 364 l239 18l273 -274q27 -27 68 -27.5t68 26.5l147 146q28 28 28 67q0 40 -28 68l-274 275l18 239q35 -21 56 -42l336 -336q84 -86 84 -204q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-334 335q-21 21 -42 56zM1056 1184v320q0 14 9 23t23 9t23 -9t9 -23v-320 q0 -14 -9 -23t-23 -9t-23 9t-9 23zM1216 1120q0 13 9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-256 -256q-11 -9 -23 -9t-23 9q-9 10 -9 23zM1280 960q0 14 9 23t23 9h320q14 0 23 -9t9 -23t-9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf128;" horiz-adv-x="1024" d="M96.5 986q-2.5 15 5.5 28q160 266 464 266q80 0 161 -31t146 -83t106 -127.5t41 -158.5q0 -54 -15.5 -101t-35 -76.5t-55 -59.5t-57.5 -43.5t-61 -35.5q-41 -23 -68.5 -65t-27.5 -67q0 -17 -12 -32.5t-28 -15.5h-240q-15 0 -25.5 18.5t-10.5 37.5v45q0 83 65 156.5 t143 108.5q59 27 84 56t25 76q0 42 -46.5 74t-107.5 32q-65 0 -108 -29q-35 -25 -107 -115q-13 -16 -31 -16q-12 0 -25 8l-164 125q-13 10 -15.5 25zM384 40v240q0 16 12 28t28 12h240q16 0 28 -12t12 -28v-240q0 -16 -12 -28t-28 -12h-240q-16 0 -28 12t-12 28z" />
+<glyph unicode="&#xf129;" horiz-adv-x="640" d="M0 64v128q0 26 19 45t45 19h64v384h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-576h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45zM128 1152v192q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-192 q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf12a;" horiz-adv-x="640" d="M98 1344q-1 26 17.5 45t44.5 19h320q26 0 44.5 -19t17.5 -45l-28 -768q-1 -26 -20.5 -45t-45.5 -19h-256q-26 0 -45.5 19t-20.5 45zM128 64v224q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-224q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf12b;" d="M5 0v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258zM1013 713q0 64 26 117t65 86.5 t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q83 65 188 65q110 0 178 -59.5t68 -158.5q0 -56 -24.5 -103t-62 -76.5t-81.5 -58.5t-82 -50.5t-65.5 -51.5t-30.5 -63h232v80h126v-206h-514l-3 27q-4 28 -4 46z " />
+<glyph unicode="&#xf12c;" d="M5 0v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258zM1015 -183q0 64 26 117t65 86.5 t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q80 65 188 65q110 0 178 -59.5t68 -158.5q0 -66 -34.5 -118.5t-84 -86t-99.5 -62.5t-87 -63t-41 -73h232v80h126v-206h-514l-4 27q-3 45 -3 46z" />
+<glyph unicode="&#xf12d;" horiz-adv-x="1920" d="M1.5 146.5q5.5 37.5 30.5 65.5l896 1024q38 44 96 44h768q38 0 69.5 -20.5t47.5 -54.5q15 -34 9.5 -71.5t-30.5 -65.5l-896 -1024q-38 -44 -96 -44h-768q-38 0 -69.5 20.5t-47.5 54.5q-15 34 -9.5 71.5zM128 128h768l336 384h-768z" />
+<glyph unicode="&#xf12e;" horiz-adv-x="1664" d="M0 0v1024q2 -1 17.5 -3.5t34 -5t21.5 -3.5q150 -24 245 -24q80 0 117 35q46 44 46 89q0 22 -15 50.5t-33.5 53t-33.5 64.5t-15 83q0 82 59 127.5t144 45.5q80 0 134 -44.5t54 -123.5q0 -41 -17.5 -77.5t-38 -59t-38 -56.5t-17.5 -71q0 -57 42 -83.5t103 -26.5 q64 0 180 15t163 17v-2q-1 -2 -3.5 -17.5t-5 -34t-3.5 -21.5q-24 -150 -24 -245q0 -80 35 -117q44 -46 89 -46q22 0 50.5 15t53 33.5t64.5 33.5t83 15q82 0 127.5 -59t45.5 -143q0 -81 -44.5 -135t-123.5 -54q-41 0 -77.5 17.5t-59 38t-56.5 38t-71 17.5q-110 0 -110 -124 q0 -39 16 -115t15 -115v-5q-22 0 -33 -1q-34 -3 -97.5 -11.5t-115.5 -13.5t-98 -5q-61 0 -103 26.5t-42 83.5q0 37 17.5 71t38 56.5t38 59t17.5 77.5q0 79 -54 123.5t-135 44.5q-84 0 -143 -45.5t-59 -127.5q0 -43 15 -83t33.5 -64.5t33.5 -53t15 -50.5q0 -45 -46 -89 q-37 -35 -117 -35q-95 0 -245 24q-9 2 -27.5 4t-27.5 4l-13 2q-1 0 -3 1q-2 0 -2 1z" />
+<glyph unicode="&#xf130;" horiz-adv-x="1152" d="M0 704v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45 t19 45t45 19h256v132q-217 24 -364.5 187.5t-147.5 384.5zM256 704v512q0 132 94 226t226 94t226 -94t94 -226v-512q0 -132 -94 -226t-226 -94t-226 94t-94 226z" />
+<glyph unicode="&#xf131;" horiz-adv-x="1408" d="M13 64q0 13 10 23l1234 1234q10 10 23 10t23 -10l82 -82q10 -10 10 -23t-10 -23l-361 -361v-128q0 -132 -94 -226t-226 -94q-55 0 -109 19l-96 -96q97 -51 205 -51q185 0 316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -221 -147.5 -384.5 t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-125 13 -235 81l-254 -254q-10 -10 -23 -10t-23 10l-82 82q-10 10 -10 23zM128 704v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -53 15 -113l-101 -101 q-42 103 -42 214zM384 704v512q0 132 94 226t226 94q102 0 184.5 -59t116.5 -152z" />
+<glyph unicode="&#xf132;" horiz-adv-x="1280" d="M0 576v768q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-768q0 -86 -33.5 -170.5t-83 -150t-118 -127.5t-126.5 -103t-121 -77.5t-89.5 -49.5t-42.5 -20q-12 -6 -26 -6t-26 6q-16 7 -42.5 20t-89.5 49.5t-121 77.5t-126.5 103t-118 127.5t-83 150t-33.5 170.5zM640 79 q119 63 213 137q235 184 235 360v640h-448v-1137z" />
+<glyph unicode="&#xf133;" horiz-adv-x="1664" d="M0 -128v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90zM128 -128h1408v1024h-1408v-1024z M384 1088q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288zM1152 1088q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288z" />
+<glyph unicode="&#xf134;" horiz-adv-x="1408" d="M3.5 940q-8.5 25 3.5 49q5 10 14.5 26t37.5 53.5t60.5 70t85 67t108.5 52.5q-25 42 -25 86q0 66 47 113t113 47t113 -47t47 -113q0 -33 -14 -64h302q0 11 7 20t18 11l448 96q3 1 7 1q12 0 20 -7q12 -9 12 -25v-320q0 -16 -12 -25q-8 -7 -20 -7q-4 0 -7 1l-448 96 q-11 2 -18 11t-7 20h-256v-102q111 -23 183.5 -111t72.5 -203v-800q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v800q0 106 62.5 190.5t161.5 114.5v111h-32q-59 0 -115 -23.5t-91.5 -53t-66 -66.5t-40.5 -53.5t-14 -24.5q-17 -35 -57 -35q-16 0 -29 7q-23 12 -31.5 37 zM384 1344q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf135;" horiz-adv-x="1664" d="M36 464l224 384q10 14 26 16l379 20q96 114 176 195q188 187 358 258t431 71q14 0 24 -9.5t10 -22.5q0 -249 -75.5 -430.5t-253.5 -360.5q-81 -80 -195 -176l-20 -379q-2 -16 -16 -26l-384 -224q-7 -4 -16 -4q-12 0 -23 9l-64 64q-13 14 -8 32l85 276l-281 281l-276 -85 q-3 -1 -9 -1q-14 0 -23 9l-64 64q-17 19 -5 39zM1248 1088q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68z" />
+<glyph unicode="&#xf136;" horiz-adv-x="1792" d="M0 0l204 953l-153 327h1276q101 0 189.5 -40.5t147.5 -113.5q60 -73 81 -168.5t0 -194.5l-164 -763h-334l178 832q13 56 -15 88q-27 33 -83 33h-169l-204 -953h-334l204 953h-286l-204 -953h-334z" />
+<glyph unicode="&#xf137;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM346 640q0 -26 19 -45l454 -454q19 -19 45 -19t45 19l102 102q19 19 19 45t-19 45l-307 307l307 307 q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45z" />
+<glyph unicode="&#xf138;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM506 288q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l454 454q19 19 19 45t-19 45l-454 454 q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l307 -307l-307 -307q-19 -19 -19 -45z" />
+<glyph unicode="&#xf139;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM250 544q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l307 307l307 -307q19 -19 45 -19t45 19l102 102 q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45z" />
+<glyph unicode="&#xf13a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM250 736q0 -26 19 -45l454 -454q19 -19 45 -19t45 19l454 454q19 19 19 45t-19 45l-102 102 q-19 19 -45 19t-45 -19l-307 -307l-307 307q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45z" />
+<glyph unicode="&#xf13b;" horiz-adv-x="1408" d="M0 1408h1408l-128 -1438l-578 -162l-574 162zM262 1114l47 -534h612l-22 -228l-197 -53l-196 53l-13 140h-175l22 -278l362 -100h4v1l359 99l50 544h-644l-15 181h674l16 175h-884z" />
+<glyph unicode="&#xf13c;" horiz-adv-x="1792" d="M12 75l71 356h297l-29 -147l422 -161l486 161l68 339h-1208l58 297h1209l38 191h-1208l59 297h1505l-266 -1333l-804 -267z" />
+<glyph unicode="&#xf13d;" horiz-adv-x="1792" d="M0 0v352q0 14 9 23t23 9h352q22 0 30 -20q8 -19 -7 -35l-100 -100q67 -91 189.5 -153.5t271.5 -82.5v647h-192q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h192v163q-58 34 -93 92.5t-35 128.5q0 106 75 181t181 75t181 -75t75 -181q0 -70 -35 -128.5t-93 -92.5v-163h192 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-192v-647q149 20 271.5 82.5t189.5 153.5l-100 100q-15 16 -7 35q8 20 30 20h352q14 0 23 -9t9 -23v-352q0 -22 -20 -30q-8 -2 -12 -2q-13 0 -23 9l-93 93q-119 -143 -318.5 -226.5t-429.5 -83.5t-429.5 83.5t-318.5 226.5 l-93 -93q-9 -9 -23 -9q-4 0 -12 2q-20 8 -20 30zM832 1280q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf13e;" horiz-adv-x="1152" d="M0 96v576q0 40 28 68t68 28h32v320q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45q0 106 -75 181t-181 75t-181 -75t-75 -181v-320h736q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28 t-28 68z" />
+<glyph unicode="&#xf140;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM256 640q0 212 150 362t362 150t362 -150t150 -362t-150 -362t-362 -150t-362 150t-150 362zM384 640q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5 t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM512 640q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181z" />
+<glyph unicode="&#xf141;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM512 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM1024 608v192q0 40 28 68t68 28h192 q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf142;" horiz-adv-x="384" d="M0 96v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h192q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf143;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 256q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z M256 575q0 -13 8.5 -22t21.5 -10q154 -11 264 -121t121 -264q1 -13 10 -21.5t22 -8.5h128q13 0 23 10t9 24q-13 232 -177 396t-396 177q-14 1 -24 -9t-10 -23v-128zM256 959q0 -13 9 -22t22 -10q204 -7 378 -111.5t278.5 -278.5t111.5 -378q1 -13 10 -22t22 -9h128 q13 0 23 10q11 9 9 23q-5 154 -56 297.5t-139.5 260t-205 205t-260 139.5t-297.5 56q-14 1 -23 -9q-10 -10 -10 -23v-128z" />
+<glyph unicode="&#xf144;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM512 320q0 -37 32 -56q16 -8 32 -8q17 0 32 9l544 320q32 18 32 55t-32 55l-544 320q-31 19 -64 1 q-32 -19 -32 -56v-640z" />
+<glyph unicode="&#xf145;" horiz-adv-x="1792" d="M54 448.5q0 53.5 37 90.5l907 906q37 37 90.5 37t90.5 -37l125 -125q-56 -56 -56 -136t56 -136t136 -56t136 56l126 -125q37 -37 37 -90.5t-37 -90.5l-907 -908q-37 -37 -90.5 -37t-90.5 37l-126 126q56 56 56 136t-56 136t-136 56t-136 -56l-125 126q-37 37 -37 90.5z M342 512q0 -26 19 -45l362 -362q18 -18 45 -18t45 18l618 618q19 19 19 45t-19 45l-362 362q-18 18 -45 18t-45 -18l-618 -618q-19 -19 -19 -45zM452 512l572 572l316 -316l-572 -572z" />
+<glyph unicode="&#xf146;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 576q0 -26 19 -45t45 -19h896q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-128 z" />
+<glyph unicode="&#xf147;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832zM256 672v64q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf148;" horiz-adv-x="1024" d="M3 18q-8 20 4 35l160 192q9 11 25 11h320v640h-192q-40 0 -58 37q-17 37 9 68l320 384q18 22 49 22t49 -22l320 -384q27 -32 9 -68q-18 -37 -58 -37h-192v-864q0 -14 -9 -23t-23 -9h-704q-21 0 -29 18z" />
+<glyph unicode="&#xf149;" horiz-adv-x="1024" d="M3 1261q9 19 29 19h704q13 0 22.5 -9.5t9.5 -23.5v-863h192q40 0 58 -37t-9 -69l-320 -384q-18 -22 -49 -22t-49 22l-320 384q-26 31 -9 69q18 37 58 37h192v640h-320q-14 0 -25 11l-160 192q-13 14 -4 34z" />
+<glyph unicode="&#xf14a;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM218 640q0 -26 19 -45l358 -358q19 -19 45 -19t45 19l614 614q19 19 19 45t-19 45l-102 102q-19 19 -45 19 t-45 -19l-467 -467l-211 211q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45z" />
+<glyph unicode="&#xf14b;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 128h288l544 544l-288 288l-544 -544v-288zM352 320v56l52 52l152 -152l-52 -52h-56v96h-96zM494 494 q-14 13 3 30l291 291q17 17 30 3q14 -13 -3 -30l-291 -291q-17 -17 -30 -3zM864 1024l288 -288l92 92q28 28 28 68t-28 68l-152 152q-28 28 -68 28t-68 -28z" />
+<glyph unicode="&#xf14c;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM282 320q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l534 534l144 -144q18 -19 45 -19q12 0 25 5q39 17 39 59 v480q0 26 -19 45t-45 19h-480q-42 0 -59 -39q-17 -41 14 -70l144 -144l-534 -534q-19 -19 -19 -45z" />
+<glyph unicode="&#xf14d;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 448q0 -181 167 -404q10 -12 25 -12q7 0 13 3q22 9 19 33q-44 354 62 473q46 52 130 75.5t224 23.5v-160 q0 -42 40 -59q12 -5 24 -5q26 0 45 19l352 352q19 19 19 45t-19 45l-352 352q-30 31 -69 14q-40 -17 -40 -59v-160q-119 0 -216 -19.5t-162.5 -51t-114 -79t-76.5 -95.5t-44.5 -109t-21.5 -111.5t-5 -110.5z" />
+<glyph unicode="&#xf14e;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 241v542l512 256v-542zM640 448l256 128l-256 128v-256z" />
+<glyph unicode="&#xf150;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM391 861q17 35 57 35h640q40 0 57 -35q18 -35 -5 -66l-320 -448q-19 -27 -52 -27t-52 27l-320 448q-23 31 -5 66z" />
+<glyph unicode="&#xf151;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM391 419q-18 35 5 66l320 448q19 27 52 27t52 -27l320 -448q23 -31 5 -66q-17 -35 -57 -35h-640q-40 0 -57 35z" />
+<glyph unicode="&#xf152;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -14 9 -23t23 -9h960q14 0 23 9t9 23v960q0 14 -9 23t-23 9h-960q-14 0 -23 -9t-9 -23v-960z M512 320v640q0 40 35 57q35 18 66 -5l448 -320q27 -19 27 -52t-27 -52l-448 -320q-31 -23 -66 -5q-35 17 -35 57z" />
+<glyph unicode="&#xf153;" horiz-adv-x="1024" d="M0 514v113q0 13 9.5 22.5t22.5 9.5h66q-2 57 1 105h-67q-14 0 -23 9t-9 23v114q0 14 9 23t23 9h98q67 210 243.5 338t400.5 128q102 0 194 -23q11 -3 20 -15q6 -11 3 -24l-43 -159q-3 -13 -14 -19.5t-24 -2.5l-4 1q-4 1 -11.5 2.5l-17.5 3.5t-22.5 3.5t-26 3t-29 2.5 t-29.5 1q-126 0 -226 -64t-150 -176h468q16 0 25 -12q10 -12 7 -26l-24 -114q-5 -26 -32 -26h-488q-3 -37 0 -105h459q15 0 25 -12q9 -12 6 -27l-24 -112q-2 -11 -11 -18.5t-20 -7.5h-387q48 -117 149.5 -185.5t228.5 -68.5q18 0 36 1.5t33.5 3.5t29.5 4.5t24.5 5t18.5 4.5 l12 3l5 2q13 5 26 -2q12 -7 15 -21l35 -159q3 -12 -3 -22.5t-17 -14.5l-5 -1q-4 -2 -10.5 -3.5t-16 -4.5t-21.5 -5.5t-25.5 -5t-30 -5t-33.5 -4.5t-36.5 -3t-38.5 -1q-234 0 -409 130.5t-238 351.5h-95q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf154;" horiz-adv-x="1024" d="M0 32v150q0 13 9.5 22.5t22.5 9.5h97v383h-95q-14 0 -23 9.5t-9 22.5v131q0 14 9 23t23 9h95v223q0 171 123.5 282t314.5 111q185 0 335 -125q9 -8 10 -20.5t-7 -22.5l-103 -127q-9 -11 -22 -12q-13 -2 -23 7q-5 5 -26 19t-69 32t-93 18q-85 0 -137 -47t-52 -123v-215 h305q13 0 22.5 -9t9.5 -23v-131q0 -13 -9.5 -22.5t-22.5 -9.5h-305v-379h414v181q0 13 9 22.5t23 9.5h162q14 0 23 -9.5t9 -22.5v-367q0 -14 -9 -23t-23 -9h-956q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf155;" horiz-adv-x="1024" d="M52 171l103 135q7 10 23 12q15 2 24 -9l2 -2q113 -99 243 -125q37 -8 74 -8q81 0 142.5 43t61.5 122q0 28 -15 53t-33.5 42t-58.5 37.5t-66 32t-80 32.5q-39 16 -61.5 25t-61.5 26.5t-62.5 31t-56.5 35.5t-53.5 42.5t-43.5 49t-35.5 58t-21 66.5t-8.5 78q0 138 98 242 t255 134v180q0 13 9.5 22.5t22.5 9.5h135q14 0 23 -9t9 -23v-176q57 -6 110.5 -23t87 -33.5t63.5 -37.5t39 -29t15 -14q17 -18 5 -38l-81 -146q-8 -15 -23 -16q-14 -3 -27 7q-3 3 -14.5 12t-39 26.5t-58.5 32t-74.5 26t-85.5 11.5q-95 0 -155 -43t-60 -111q0 -26 8.5 -48 t29.5 -41.5t39.5 -33t56 -31t60.5 -27t70 -27.5q53 -20 81 -31.5t76 -35t75.5 -42.5t62 -50t53 -63.5t31.5 -76.5t13 -94q0 -153 -99.5 -263.5t-258.5 -136.5v-175q0 -14 -9 -23t-23 -9h-135q-13 0 -22.5 9.5t-9.5 22.5v175q-66 9 -127.5 31t-101.5 44.5t-74 48t-46.5 37.5 t-17.5 18q-17 21 -2 41z" />
+<glyph unicode="&#xf156;" horiz-adv-x="898" d="M0 605v127q0 13 9.5 22.5t22.5 9.5h112q132 0 212.5 43t102.5 125h-427q-14 0 -23 9t-9 23v102q0 14 9 23t23 9h413q-57 113 -268 113h-145q-13 0 -22.5 9.5t-9.5 22.5v133q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-233q47 -61 64 -144h171 q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-168q-23 -144 -129 -234t-276 -110q167 -178 459 -536q14 -16 4 -34q-8 -18 -29 -18h-195q-16 0 -25 12q-306 367 -498 571q-9 9 -9 22z" />
+<glyph unicode="&#xf157;" horiz-adv-x="1027" d="M4 1360q-8 16 0 32q10 16 28 16h194q19 0 29 -18l215 -425q19 -38 56 -125q10 24 30.5 68t27.5 61l191 420q8 19 29 19h191q17 0 27 -16q9 -14 1 -31l-313 -579h215q13 0 22.5 -9.5t9.5 -22.5v-104q0 -14 -9.5 -23t-22.5 -9h-290v-85h290q13 0 22.5 -9.5t9.5 -22.5v-103 q0 -14 -9.5 -23t-22.5 -9h-290v-330q0 -13 -9.5 -22.5t-22.5 -9.5h-172q-13 0 -22.5 9t-9.5 23v330h-288q-13 0 -22.5 9t-9.5 23v103q0 13 9.5 22.5t22.5 9.5h288v85h-288q-13 0 -22.5 9t-9.5 23v104q0 13 9.5 22.5t22.5 9.5h214z" />
+<glyph unicode="&#xf158;" horiz-adv-x="1280" d="M0 256v128q0 14 9 23t23 9h224v118h-224q-14 0 -23 9t-9 23v149q0 13 9 22.5t23 9.5h224v629q0 14 9 23t23 9h539q200 0 326.5 -122t126.5 -315t-126.5 -315t-326.5 -122h-340v-118h505q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9h-505v-192q0 -14 -9.5 -23t-22.5 -9 h-167q-14 0 -23 9t-9 23v192h-224q-14 0 -23 9t-9 23zM487 747h320q106 0 171 62t65 162t-65 162t-171 62h-320v-448z" />
+<glyph unicode="&#xf159;" horiz-adv-x="1792" d="M0 672v64q0 14 9 23t23 9h175l-33 128h-142q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h109l-89 344q-5 15 5 28q10 12 26 12h137q26 0 31 -24l90 -360h359l97 360q7 24 31 24h126q24 0 31 -24l98 -360h365l93 360q5 24 31 24h137q16 0 26 -12q10 -13 5 -28l-91 -344h111 q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-145l-34 -128h179q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-213l-164 -616q-7 -24 -31 -24h-159q-24 0 -31 24l-166 616h-209l-167 -616q-7 -24 -31 -24h-159q-11 0 -19.5 7t-10.5 17l-160 616h-208q-14 0 -23 9t-9 23z M373 896l32 -128h225l35 128h-292zM436 640l75 -300q1 -1 1 -3t1 -3q0 1 0.5 3.5t0.5 3.5l81 299h-159zM822 768h139l-35 128h-70zM1118 896l34 -128h230l33 128h-297zM1187 640l81 -299q0 -1 0.5 -3.5t1.5 -3.5q0 1 0.5 3t0.5 3l78 300h-162z" />
+<glyph unicode="&#xf15a;" horiz-adv-x="1280" d="M56 0l31 183h111q50 0 58 51v402h16q-6 1 -16 1v287q-13 68 -89 68h-111v164l212 -1q64 0 97 1v252h154v-247q82 2 122 2v245h154v-252q79 -7 140 -22.5t113 -45t82.5 -78t36.5 -114.5q18 -182 -131 -258q117 -28 175 -103t45 -214q-7 -71 -32.5 -125t-64.5 -89 t-97 -58.5t-121.5 -34.5t-145.5 -15v-255h-154v251q-80 0 -122 1v-252h-154v255q-18 0 -54 0.5t-55 0.5h-200zM522 182q8 0 37 -0.5t48 -0.5t53 1.5t58.5 4t57 8.5t55.5 14t47.5 21t39.5 30t24.5 40t9.5 51q0 36 -15 64t-37 46t-57.5 30.5t-65.5 18.5t-74 9t-69 3t-64.5 -1 t-47.5 -1v-338zM522 674q5 0 34.5 -0.5t46.5 0t50 2t55 5.5t51.5 11t48.5 18.5t37 27t27 38.5t9 51q0 33 -12.5 58.5t-30.5 42t-48 28t-55 16.5t-61.5 8t-58 2.5t-54 -1t-39.5 -0.5v-307z" />
+<glyph unicode="&#xf15b;" d="M0 -160v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472z" />
+<glyph unicode="&#xf15c;" d="M0 -160v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM384 160q0 -14 9 -23t23 -9h704q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM384 416q0 -14 9 -23t23 -9h704 q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM384 672q0 -14 9 -23t23 -9h704q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472z" />
+<glyph unicode="&#xf15d;" horiz-adv-x="1664" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM899 768v106h70l230 662h162l230 -662h70v-106h-288v106h75l-47 144h-243l-47 -144h75v-106 h-287zM988 -166l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -11v-2l14 2q9 2 30 2h248v119h121v-233h-584v90zM1191 1128h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18 t-7.5 -29z" />
+<glyph unicode="&#xf15e;" horiz-adv-x="1664" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM899 -150h70l230 662h162l230 -662h70v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287 v106zM988 768v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -10v-3l14 3q9 1 30 1h248v119h121v-233h-584zM1191 104h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29 z" />
+<glyph unicode="&#xf160;" horiz-adv-x="1792" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM896 -32q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9 t-9 23v192zM896 288v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23zM896 800v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23zM896 1312v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23 v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf161;" horiz-adv-x="1792" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM896 -32q0 14 9 23t23 9h256q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9 t-9 23v192zM896 288v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23zM896 800v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23zM896 1312v192q0 14 9 23t23 9h832q14 0 23 -9t9 -23 v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf162;" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM946 261q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5q0 -62 -13 -121.5t-41 -114 t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5zM976 1351l192 185h123v-654h165v-114h-469v114h167v432q0 7 0.5 19t0.5 17 v16h-2l-7 -12q-8 -13 -26 -31l-62 -58zM1085 261q0 -57 36.5 -95t104.5 -38q50 0 85 27t35 68q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94z" />
+<glyph unicode="&#xf163;" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM946 1285q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5q0 -62 -13 -121.5t-41 -114 t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5zM976 327l192 185h123v-654h165v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16 h-2l-7 -12q-8 -13 -26 -31l-62 -58zM1085 1285q0 -57 36.5 -95t104.5 -38q50 0 85 27t35 68q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94z" />
+<glyph unicode="&#xf164;" horiz-adv-x="1664" d="M0 64v640q0 26 19 45t45 19h288q26 0 45 -19t19 -45v-640q0 -26 -19 -45t-45 -19h-288q-26 0 -45 19t-19 45zM128 192q0 -27 18.5 -45.5t45.5 -18.5q26 0 45 18.5t19 45.5q0 26 -19 45t-45 19q-27 0 -45.5 -19t-18.5 -45zM480 64v641q0 25 18 43.5t43 20.5q24 2 76 59 t101 121q68 87 101 120q18 18 31 48t17.5 48.5t13.5 60.5q7 39 12.5 61t19.5 52t34 50q19 19 45 19q46 0 82.5 -10.5t60 -26t40 -40.5t24 -45t12 -50t5 -45t0.5 -39q0 -38 -9.5 -76t-19 -60t-27.5 -56q-3 -6 -10 -18t-11 -22t-8 -24h277q78 0 135 -57t57 -135 q0 -86 -55 -149q15 -44 15 -76q3 -76 -43 -137q17 -56 0 -117q-15 -57 -54 -94q9 -112 -49 -181q-64 -76 -197 -78h-36h-76h-17q-66 0 -144 15.5t-121.5 29t-120.5 39.5q-123 43 -158 44q-26 1 -45 19.5t-19 44.5z" />
+<glyph unicode="&#xf165;" horiz-adv-x="1664" d="M0 448q0 -26 19 -45t45 -19h288q26 0 45 19t19 45v640q0 26 -19 45t-45 19h-288q-26 0 -45 -19t-19 -45v-640zM128 960q0 27 18.5 45.5t45.5 18.5q26 0 45 -18.5t19 -45.5q0 -26 -19 -45t-45 -19q-27 0 -45.5 19t-18.5 45zM480 447v641q0 26 19 44.5t45 19.5q35 1 158 44 q77 26 120.5 39.5t121.5 29t144 15.5h17h76h36q133 -2 197 -78q58 -69 49 -181q39 -37 54 -94q17 -61 0 -117q46 -61 43 -137q0 -32 -15 -76q55 -61 55 -149q-1 -78 -57.5 -135t-134.5 -57h-277q4 -14 8 -24t11 -22t10 -18q18 -37 27 -57t19 -58.5t10 -76.5q0 -24 -0.5 -39 t-5 -45t-12 -50t-24 -45t-40 -40.5t-60 -26t-82.5 -10.5q-26 0 -45 19q-20 20 -34 50t-19.5 52t-12.5 61q-9 42 -13.5 60.5t-17.5 48.5t-31 48q-33 33 -101 120q-49 64 -101 121t-76 59q-25 2 -43 20.5t-18 43.5z" />
+<glyph unicode="&#xf166;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM218 366q0 -176 20 -260q10 -43 42.5 -73t75.5 -35q137 -15 412 -15t412 15q43 5 75.5 35t42.5 73 q20 84 20 260q0 177 -19 260q-10 44 -43 73.5t-76 34.5q-136 15 -412 15q-275 0 -411 -15q-44 -5 -76.5 -34.5t-42.5 -73.5q-20 -87 -20 -260zM300 551v70h232v-70h-80v-423h-74v423h-78zM396 1313l24 -69t23 -69q35 -103 46 -158v-201h74v201l90 296h-75l-51 -195l-53 195 h-78zM542 205v290h66v-270q0 -24 1 -26q1 -15 15 -15q20 0 42 31v280h67v-367h-67v40q-39 -45 -76 -45q-33 0 -42 28q-6 16 -6 54zM654 936q0 -58 21 -87q27 -38 78 -38q49 0 78 38q21 27 21 87v130q0 58 -21 87q-29 38 -78 38q-51 0 -78 -38q-21 -29 -21 -87v-130zM721 923 v156q0 52 32 52t32 -52v-156q0 -51 -32 -51t-32 51zM790 128v493h67v-161q32 40 68 40q41 0 53 -42q7 -21 7 -74v-146q0 -52 -7 -73q-12 -42 -53 -42q-35 0 -68 41v-36h-67zM857 200q16 -16 33 -16q29 0 29 49v157q0 50 -29 50q-17 0 -33 -16v-224zM907 893q0 -37 6 -55 q11 -27 43 -27q36 0 77 45v-40h67v370h-67v-283q-22 -31 -42 -31q-15 0 -16 16q-1 2 -1 26v272h-67v-293zM1037 247v129q0 59 20 86q29 38 80 38t78 -38q21 -28 21 -86v-76h-133v-65q0 -51 34 -51q24 0 30 26q0 1 0.5 7t0.5 16.5v21.5h68v-9q0 -29 -2 -43q-3 -22 -15 -40 q-27 -40 -80 -40q-52 0 -81 38q-21 27 -21 86zM1103 355h66v34q0 51 -33 51t-33 -51v-34z" />
+<glyph unicode="&#xf167;" d="M27 260q0 234 26 350q14 59 58 99t103 47q183 20 554 20t555 -20q58 -7 102.5 -47t57.5 -99q26 -112 26 -350q0 -234 -26 -350q-14 -59 -58 -99t-102 -46q-184 -21 -555 -21t-555 21q-58 6 -102.5 46t-57.5 99q-26 112 -26 350zM138 509h105v-569h100v569h107v94h-312 v-94zM266 1536h106l71 -263l68 263h102l-121 -399v-271h-100v271q-14 74 -61 212q-37 103 -65 187zM463 43q0 -49 8 -73q12 -37 58 -37q48 0 102 61v-54h89v494h-89v-378q-30 -42 -57 -42q-18 0 -21 21q-1 3 -1 35v364h-89v-391zM614 1028v175q0 80 28 117q38 51 105 51 q69 0 106 -51q28 -37 28 -117v-175q0 -81 -28 -118q-37 -51 -106 -51q-67 0 -105 51q-28 38 -28 118zM704 1011q0 -70 43 -70t43 70v210q0 69 -43 69t-43 -69v-210zM798 -60h89v48q45 -55 93 -55q54 0 71 55q9 27 9 100v197q0 73 -9 99q-17 56 -71 56q-50 0 -93 -54v217h-89 v-663zM887 36v301q22 22 45 22q39 0 39 -67v-211q0 -67 -39 -67q-23 0 -45 22zM955 971v394h91v-367q0 -33 1 -35q3 -22 21 -22q27 0 57 43v381h91v-499h-91v55q-53 -62 -103 -62q-46 0 -59 37q-8 24 -8 75zM1130 100q0 -79 29 -116q39 -51 108 -51q72 0 108 53q18 27 21 54 q2 9 2 58v13h-91q0 -51 -2 -61q-7 -36 -40 -36q-46 0 -46 69v87h179v103q0 79 -27 116q-39 51 -106 51q-68 0 -107 -51q-28 -37 -28 -116v-173zM1219 245v46q0 68 45 68t45 -68v-46h-90z" />
+<glyph unicode="&#xf168;" horiz-adv-x="1408" d="M5 384q-10 17 0 36l253 448q1 0 0 1l-161 279q-12 22 -1 37q9 15 32 15h239q40 0 66 -45l164 -286q-10 -18 -257 -456q-27 -46 -65 -46h-239q-21 0 -31 17zM536 539q18 32 531 942q25 45 64 45h241q22 0 31 -15q11 -16 0 -37l-528 -934v-1l336 -615q11 -20 1 -37 q-10 -15 -32 -15h-239q-42 0 -66 45z" />
+<glyph unicode="&#xf169;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM227 396q8 -13 24 -13h185q31 0 50 36l199 352q0 1 -126 222q-21 34 -52 34h-184q-18 0 -26 -11q-7 -12 1 -29 l125 -216v-1l-196 -346q-9 -14 0 -28zM638 516q1 -2 262 -481q20 -35 52 -35h184q18 0 25 12q8 13 -1 28l-260 476v1l409 723q8 16 0 28q-7 12 -24 12h-187q-30 0 -49 -35z" />
+<glyph unicode="&#xf16a;" horiz-adv-x="1792" d="M0 640q0 96 1 150t8.5 136.5t22.5 147.5q16 73 69 123t124 58q222 25 671 25t671 -25q71 -8 124.5 -58t69.5 -123q14 -65 21.5 -147.5t8.5 -136.5t1 -150t-1 -150t-8.5 -136.5t-22.5 -147.5q-16 -73 -69 -123t-124 -58q-222 -25 -671 -25t-671 25q-71 8 -124.5 58 t-69.5 123q-14 65 -21.5 147.5t-8.5 136.5t-1 150zM640 320q0 -38 33 -56q16 -8 31 -8q20 0 34 10l512 320q30 17 30 54t-30 54l-512 320q-31 20 -65 2q-33 -18 -33 -56v-640z" />
+<glyph unicode="&#xf16b;" horiz-adv-x="1792" d="M64 558l338 271l494 -305l-342 -285zM64 1099l490 319l342 -285l-494 -304zM407 166v108l147 -96l342 284v2l1 -1l1 1v-2l343 -284l147 96v-108l-490 -293v-1l-1 1l-1 -1v1zM896 524l494 305l338 -271l-489 -319zM896 1133l343 285l489 -319l-338 -270z" />
+<glyph unicode="&#xf16c;" horiz-adv-x="1408" d="M0 -255v736h121v-618h928v618h120v-701l-1 -35v-1h-1132l-35 1h-1zM221 -17v151l707 1v-151zM227 243l14 150l704 -65l-13 -150zM270 563l39 146l683 -183l-39 -146zM395 928l77 130l609 -360l-77 -130zM707 1303l125 86l398 -585l-124 -85zM1136 1510l149 26l121 -697 l-149 -26z" />
+<glyph unicode="&#xf16d;" d="M0 69v1142q0 81 58 139t139 58h1142q81 0 139 -58t58 -139v-1142q0 -81 -58 -139t-139 -58h-1142q-81 0 -139 58t-58 139zM171 110q0 -26 17.5 -43.5t43.5 -17.5h1069q25 0 43 17.5t18 43.5v648h-135q20 -63 20 -131q0 -126 -64 -232.5t-174 -168.5t-240 -62 q-197 0 -337 135.5t-140 327.5q0 68 20 131h-141v-648zM461 643q0 -124 90.5 -211.5t217.5 -87.5q128 0 218.5 87.5t90.5 211.5t-90.5 211.5t-218.5 87.5q-127 0 -217.5 -87.5t-90.5 -211.5zM1050 1003q0 -29 20 -49t49 -20h174q29 0 49 20t20 49v165q0 28 -20 48.5 t-49 20.5h-174q-29 0 -49 -20.5t-20 -48.5v-165z" />
+<glyph unicode="&#xf16e;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM274 640q0 -88 62 -150t150 -62t150 62t62 150t-62 150t-150 62t-150 -62t-62 -150zM838 640q0 -88 62 -150 t150 -62t150 62t62 150t-62 150t-150 62t-150 -62t-62 -150z" />
+<glyph unicode="&#xf170;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM309 384h94l104 160h522l104 -160h94l-459 691zM567 608l201 306l201 -306h-402z" />
+<glyph unicode="&#xf171;" horiz-adv-x="1408" d="M0 1222q3 26 17.5 48.5t31.5 37.5t45 30t46 22.5t48 18.5q125 46 313 64q379 37 676 -50q155 -46 215 -122q16 -20 16.5 -51t-5.5 -54q-26 -167 -111 -655q-5 -30 -27 -56t-43.5 -40t-54.5 -31q-252 -126 -610 -88q-248 27 -394 139q-15 12 -25.5 26.5t-17 35t-9 34 t-6 39.5t-5.5 35q-9 50 -26.5 150t-28 161.5t-23.5 147.5t-22 158zM173 285l6 16l18 9q223 -148 506.5 -148t507.5 148q21 -6 24 -23t-5 -45t-8 -37q-8 -26 -15.5 -76.5t-14 -84t-28.5 -70t-58 -56.5q-86 -48 -189.5 -71.5t-202 -22t-201.5 18.5q-46 8 -81.5 18t-76.5 27 t-73 43.5t-52 61.5q-25 96 -57 292zM243 1240q30 -28 76 -45.5t73.5 -22t87.5 -11.5q228 -29 448 -1q63 8 89.5 12t72.5 21.5t75 46.5q-20 27 -56 44.5t-58 22t-71 12.5q-291 47 -566 -2q-43 -7 -66 -12t-55 -22t-50 -43zM481 657q4 -91 77.5 -155t165.5 -56q91 8 152 84 t50 168q-14 107 -113 164t-197 13q-63 -28 -100.5 -88.5t-34.5 -129.5zM599 710q14 41 52 58q36 18 72.5 12t64 -35.5t27.5 -67.5q8 -63 -50.5 -101t-111.5 -6q-39 17 -53.5 58t-0.5 82z" />
+<glyph unicode="&#xf172;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM260 1060q8 -68 19 -138t29 -171t24 -137q1 -5 5 -31t7 -36t12 -27t22 -28q105 -80 284 -100q259 -28 440 63 q24 13 39.5 23t31 29t19.5 40q48 267 80 473q9 53 -8 75q-43 55 -155 88q-216 63 -487 36q-132 -12 -226 -46q-38 -15 -59.5 -25t-47 -34t-29.5 -54zM385 384q26 -154 41 -210q47 -81 204 -108q249 -46 428 53q34 19 49 51.5t22.5 85.5t12.5 71q0 7 5.5 26.5t3 32 t-17.5 16.5q-161 -106 -365 -106t-366 106l-12 -6zM436 1073q13 19 36 31t40 15.5t47 8.5q198 35 408 1q33 -5 51 -8.5t43 -16t39 -31.5q-20 -21 -53.5 -34t-53 -16t-63.5 -8q-155 -20 -324 0q-44 6 -63 9.5t-52.5 16t-54.5 32.5zM607 653q-2 49 25.5 93t72.5 64 q70 31 141.5 -10t81.5 -118q8 -66 -36 -121t-110 -61t-119 40t-56 113zM687.5 660.5q0.5 -52.5 43.5 -70.5q39 -23 81 4t36 72q0 43 -41 66t-77 1q-43 -20 -42.5 -72.5z" />
+<glyph unicode="&#xf173;" horiz-adv-x="1024" d="M78 779v217q91 30 155 84q64 55 103 132q39 78 54 196h219v-388h364v-241h-364v-394q0 -136 14 -172q13 -37 52 -60q50 -31 117 -31q117 0 232 76v-242q-102 -48 -178 -65q-77 -19 -173 -19q-105 0 -186 27q-78 25 -138 75q-58 51 -79 105q-22 54 -22 161v539h-170z" />
+<glyph unicode="&#xf174;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM413 744h127v-404q0 -78 17 -121q17 -42 59 -78q43 -37 104 -57q62 -20 140 -20q67 0 129 14q57 13 134 49v181 q-88 -56 -174 -56q-51 0 -88 23q-29 17 -39 45q-11 30 -11 129v295h274v181h-274v291h-164q-11 -90 -40 -147t-78 -99q-48 -40 -116 -63v-163z" />
+<glyph unicode="&#xf175;" horiz-adv-x="768" d="M3 237q9 19 29 19h224v1248q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1248h224q21 0 29 -19t-5 -35l-350 -384q-10 -10 -23 -10q-14 0 -24 10l-355 384q-13 16 -5 35z" />
+<glyph unicode="&#xf176;" horiz-adv-x="768" d="M3 1043q-8 19 5 35l350 384q10 10 23 10q14 0 24 -10l355 -384q13 -16 5 -35q-9 -19 -29 -19h-224v-1248q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1248h-224q-21 0 -29 19z" />
+<glyph unicode="&#xf177;" horiz-adv-x="1792" d="M64 637q0 14 10 24l384 354q16 14 35 6q19 -9 19 -29v-224h1248q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-1248v-224q0 -21 -19 -29t-35 5l-384 350q-10 10 -10 23z" />
+<glyph unicode="&#xf178;" horiz-adv-x="1792" d="M0 544v192q0 14 9 23t23 9h1248v224q0 21 19 29t35 -5l384 -350q10 -10 10 -23q0 -14 -10 -24l-384 -354q-16 -14 -35 -6q-19 9 -19 29v224h-1248q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf179;" horiz-adv-x="1408" d="M0 634q0 228 113 374q112 144 284 144q72 0 177 -30q104 -30 138 -30q45 0 143 34q102 34 173 34q119 0 213 -65q52 -36 104 -100q-79 -67 -114 -118q-65 -94 -65 -207q0 -124 69 -223t158 -126q-39 -125 -123 -250q-129 -196 -257 -196q-49 0 -140 32q-86 32 -151 32 q-61 0 -142 -33q-81 -34 -132 -34q-152 0 -301 259q-147 261 -147 503zM683 1131q3 149 78 257q74 107 250 148q1 -3 2.5 -11t2.5 -11q0 -4 0.5 -10t0.5 -10q0 -61 -29 -136q-30 -75 -93 -138q-54 -54 -108 -72q-37 -11 -104 -17z" />
+<glyph unicode="&#xf17a;" horiz-adv-x="1664" d="M0 -27v557h682v-651zM0 614v565l682 94v-659h-682zM757 -131v661h907v-786zM757 614v669l907 125v-794h-907z" />
+<glyph unicode="&#xf17b;" horiz-adv-x="1408" d="M0 337v430q0 42 30 72t73 30q42 0 72 -30t30 -72v-430q0 -43 -29.5 -73t-72.5 -30t-73 30t-30 73zM241 886q0 117 64 215.5t172 153.5l-71 131q-7 13 5 20q13 6 20 -6l72 -132q95 42 201 42t201 -42l72 132q7 12 20 6q12 -7 5 -20l-71 -131q107 -55 171 -153.5t64 -215.5 h-925zM245 184v666h918v-666q0 -46 -32 -78t-77 -32h-75v-227q0 -43 -30 -73t-73 -30t-73 30t-30 73v227h-138v-227q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73l-1 227h-74q-46 0 -78 32t-32 78zM455 1092q0 -16 11 -27.5t27 -11.5t27.5 11.5t11.5 27.5t-11.5 27.5 t-27.5 11.5t-27 -11.5t-11 -27.5zM876 1092q0 -16 11.5 -27.5t27.5 -11.5t27 11.5t11 27.5t-11 27.5t-27 11.5t-27.5 -11.5t-11.5 -27.5zM1203 337v430q0 43 30 72.5t72 29.5q43 0 73 -29.5t30 -72.5v-430q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73z" />
+<glyph unicode="&#xf17c;" d="M11 -115q-10 23 7 66.5t18 54.5q1 16 -4 40t-10 42.5t-4.5 36.5t10.5 27q14 12 57 14t60 12q30 18 42 35t12 51q21 -73 -32 -106q-32 -20 -83 -15q-34 3 -43 -10q-13 -15 5 -57q2 -6 8 -18t8.5 -18t4.5 -17t1 -22q0 -15 -17 -49t-14 -48q3 -17 37 -26q20 -6 84.5 -18.5 t99.5 -20.5q24 -6 74 -22t82.5 -23t55.5 -4q43 6 64.5 28t23 48t-7.5 58.5t-19 52t-20 36.5q-121 190 -169 242q-68 74 -113 40q-11 -9 -15 15q-3 16 -2 38q1 29 10 52t24 47t22 42q8 21 26.5 72t29.5 78t30 61t39 54q110 143 124 195q-12 112 -16 310q-2 90 24 151.5 t106 104.5q39 21 104 21q53 1 106 -13.5t89 -41.5q57 -42 91.5 -121.5t29.5 -147.5q-5 -95 30 -214q34 -113 133 -218q55 -59 99.5 -163t59.5 -191q8 -49 5 -84.5t-12 -55.5t-20 -22q-10 -2 -23.5 -19t-27 -35.5t-40.5 -33.5t-61 -14q-18 1 -31.5 5t-22.5 13.5t-13.5 15.5 t-11.5 20.5t-9 19.5q-22 37 -41 30t-28 -49t7 -97q20 -70 1 -195q-10 -65 18 -100.5t73 -33t85 35.5q59 49 89.5 66.5t103.5 42.5q53 18 77 36.5t18.5 34.5t-25 28.5t-51.5 23.5q-33 11 -49.5 48t-15 72.5t15.5 47.5q1 -31 8 -56.5t14.5 -40.5t20.5 -28.5t21 -19t21.5 -13 t16.5 -9.5q20 -12 31 -24.5t12 -24t-2.5 -22.5t-15.5 -22t-23.5 -19.5t-30 -18.5t-31.5 -16.5t-32 -15.5t-27 -13q-38 -19 -85.5 -56t-75.5 -64q-17 -16 -68 -19.5t-89 14.5q-18 9 -29.5 23.5t-16.5 25.5t-22 19.5t-47 9.5q-44 1 -130 1q-19 0 -57 -1.5t-58 -2.5 q-44 -1 -79.5 -15t-53.5 -30t-43.5 -28.5t-53.5 -11.5q-29 1 -111 31t-146 43q-19 4 -51 9.5t-50 9t-39.5 9.5t-33.5 14.5t-17 19.5zM321 495q-36 -65 10 -166q5 -12 25 -28t24 -20q20 -23 104 -90.5t93 -76.5q16 -15 17.5 -38t-14 -43t-45.5 -23q8 -15 29 -44.5t28 -54 t7 -70.5q46 24 7 92q-4 8 -10.5 16t-9.5 12t-2 6q3 5 13 9.5t20 -2.5q46 -52 166 -36q133 15 177 87q23 38 34 30q12 -6 10 -52q-1 -25 -23 -92q-9 -23 -6 -37.5t24 -15.5q3 19 14.5 77t13.5 90q2 21 -6.5 73.5t-7.5 97t23 70.5q15 18 51 18q1 37 34.5 53t72.5 10.5 t60 -22.5q0 18 -55 42q4 15 7.5 27.5t5 26t3 21.5t0.5 22.5t-1 19.5t-3.5 22t-4 20.5t-5 25t-5.5 26.5q-10 48 -47 103t-72 75q24 -20 57 -83q87 -162 54 -278q-11 -40 -50 -42q-31 -4 -38.5 18.5t-8 83.5t-11.5 107q-9 39 -19.5 69t-19.5 45.5t-15.5 24.5t-13 15t-7.5 7 q-14 62 -31 103t-29.5 56t-23.5 33t-15 40q-4 21 6 53.5t4.5 49.5t-44.5 25q-15 3 -44.5 18t-35.5 16q-8 1 -11 26t8 51t36 27q37 3 51 -30t4 -58q-11 -19 -2 -26.5t30 -0.5q13 4 13 36v37q-5 30 -13.5 50t-21 30.5t-23.5 15t-27 7.5q-107 -8 -89 -134q0 -15 -1 -15 q-9 9 -29.5 10.5t-33 -0.5t-15.5 5q1 57 -16 90t-45 34q-27 1 -41.5 -27.5t-16.5 -59.5q-1 -15 3.5 -37t13 -37.5t15.5 -13.5q10 3 16 14q4 9 -7 8q-7 0 -15.5 14.5t-9.5 33.5q-1 22 9 37t34 14q17 0 27 -21t9.5 -39t-1.5 -22q-22 -15 -31 -29q-8 -12 -27.5 -23.5 t-20.5 -12.5q-13 -14 -15.5 -27t7.5 -18q14 -8 25 -19.5t16 -19t18.5 -13t35.5 -6.5q47 -2 102 15q2 1 23 7t34.5 10.5t29.5 13t21 17.5q9 14 20 8q5 -3 6.5 -8.5t-3 -12t-16.5 -9.5q-20 -6 -56.5 -21.5t-45.5 -19.5q-44 -19 -70 -23q-25 -5 -79 2q-10 2 -9 -2t17 -19 q25 -23 67 -22q17 1 36 7t36 14t33.5 17.5t30 17t24.5 12t17.5 2.5t8.5 -11q0 -2 -1 -4.5t-4 -5t-6 -4.5t-8.5 -5t-9 -4.5t-10 -5t-9.5 -4.5q-28 -14 -67.5 -44t-66.5 -43t-49 -1q-21 11 -63 73q-22 31 -25 22q-1 -3 -1 -10q0 -25 -15 -56.5t-29.5 -55.5t-21 -58t11.5 -63 q-23 -6 -62.5 -90t-47.5 -141q-2 -18 -1.5 -69t-5.5 -59q-8 -24 -29 -3q-32 31 -36 94q-2 28 4 56q4 19 -1 18zM372 630q4 -1 12.5 7t12.5 18q1 3 2 7t2 6t1.5 4.5t0.5 4v3t-1 2.5t-3 2q-4 1 -6 -3t-4.5 -12.5t-5.5 -13.5t-10 -13q-7 -10 -1 -12zM603 1190q2 -5 5 -6 q10 0 7 -15q-3 -20 8 -20q3 0 3 3q3 17 -2.5 30t-11.5 15q-9 2 -9 -7zM634 1110q0 12 19 15h10q-11 -1 -15.5 -10.5t-8.5 -9.5q-5 -1 -5 5zM721 1122q24 11 32 -2q3 -6 -3 -9q-4 -1 -11.5 6.5t-17.5 4.5zM835 1196l4 -2q14 -4 18 -31q0 -3 8 2l2 3q0 11 -5 19.5t-11 12.5 t-9 3q-14 -1 -7 -7zM851 1381.5q-1 -2.5 3 -8.5q4 -3 8 0t11 9t15 9q1 1 9 1t15 2t9 7q0 2 -2.5 5t-9 7t-9.5 6q-15 15 -24 15q-9 -1 -11.5 -7.5t-1 -13t-0.5 -12.5q-1 -4 -6 -10.5t-6 -9zM981 1002q-14 -16 7 -43.5t39 -31.5q9 -1 14.5 8t3.5 20q-2 8 -6.5 11.5t-13 5 t-14.5 5.5q-5 3 -9.5 8t-7 8t-5.5 6.5t-4 4t-4 -1.5z" />
+<glyph unicode="&#xf17d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM112 640q0 -124 44 -236.5t124 -201.5q50 89 123.5 166.5t142.5 124.5t130.5 81t99.5 48l37 13 q4 1 13 3.5t13 4.5q-21 49 -53 111q-311 -93 -673 -93q-1 -7 -1 -21zM126 775q302 0 606 80q-120 213 -244 378q-138 -65 -234 -186t-128 -272zM350 134q184 -150 418 -150q132 0 256 52q-42 241 -140 498h-2l-2 -1q-16 -6 -43 -16.5t-101 -49t-137 -82t-131 -114.5 t-103 -148zM609 1276q1 1 2 1q-1 0 -2 -1zM613 1277q131 -170 246 -382q69 26 130 60.5t96.5 61.5t65.5 57t37.5 40.5l12.5 17.5q-185 164 -433 164q-76 0 -155 -19zM909 797q25 -53 44 -95q2 -6 6.5 -17.5t7.5 -16.5q36 5 74.5 7t73.5 2t69 -1.5t64 -4t56.5 -5.5t48 -6.5 t36.5 -6t25 -4.5l10 -2q-3 232 -149 410l-1 -1q-9 -12 -19 -24.5t-43.5 -44.5t-71 -60.5t-100 -65t-131.5 -64.5zM1007 565q87 -239 128 -469q111 75 185 189.5t96 250.5q-210 60 -409 29z" />
+<glyph unicode="&#xf17e;" d="M0 1024q0 159 112.5 271.5t271.5 112.5q130 0 234 -80q77 16 150 16q143 0 273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -73 -16 -150q80 -104 80 -234q0 -159 -112.5 -271.5t-271.5 -112.5q-130 0 -234 80q-77 -16 -150 -16q-143 0 -273.5 55.5t-225 150t-150 225 t-55.5 273.5q0 73 16 150q-80 104 -80 234zM376 399q0 -92 122 -157.5t291 -65.5q73 0 140 18.5t122.5 53.5t88.5 93.5t33 131.5q0 50 -19.5 91.5t-48.5 68.5t-73 49t-82.5 34t-87.5 23l-104 24q-30 7 -44 10.5t-35 11.5t-30 16t-16.5 21t-7.5 30q0 77 144 77q43 0 77 -12 t54 -28.5t38 -33.5t40 -29t48 -12q47 0 75.5 32t28.5 77q0 55 -56 99.5t-142 67.5t-182 23q-68 0 -132 -15.5t-119.5 -47t-89 -87t-33.5 -128.5q0 -61 19 -106.5t56 -75.5t80 -48.5t103 -32.5l146 -36q90 -22 112 -36q32 -20 32 -60q0 -39 -40 -64.5t-105 -25.5 q-51 0 -91.5 16t-65 38.5t-45.5 45t-46 38.5t-54 16q-50 0 -75.5 -30t-25.5 -75z" />
+<glyph unicode="&#xf180;" horiz-adv-x="1664" d="M0 640q0 75 53 128l587 587q53 53 128 53t128 -53l265 -265l-398 -399l-188 188q-42 42 -99 42q-59 0 -100 -41l-120 -121q-42 -40 -42 -99q0 -58 42 -100l406 -408q30 -28 67 -37l6 -4h28q60 0 99 41l619 619l2 -3q53 -53 53 -128t-53 -128l-587 -587 q-52 -53 -127.5 -53t-128.5 53l-587 587q-53 53 -53 128zM302 660q0 21 14 35l121 120q13 15 35 15t36 -15l252 -252l574 575q15 15 36 15t36 -15l120 -120q14 -15 14 -36t-14 -36l-730 -730q-17 -15 -37 -15q-4 0 -6 1q-18 2 -30 14l-407 408q-14 15 -14 36z" />
+<glyph unicode="&#xf181;" d="M0 -64v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM160 192q0 -14 9 -23t23 -9h480q14 0 23 9t9 23v1024q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-1024zM832 576q0 -14 9 -23t23 -9h480q14 0 23 9t9 23 v640q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-640z" />
+<glyph unicode="&#xf182;" horiz-adv-x="1280" d="M0 480q0 29 16 53l256 384q73 107 176 107h384q103 0 176 -107l256 -384q16 -24 16 -53q0 -40 -28 -68t-68 -28q-51 0 -80 43l-227 341h-45v-132l247 -411q9 -15 9 -33q0 -26 -19 -45t-45 -19h-192v-272q0 -46 -33 -79t-79 -33h-160q-46 0 -79 33t-33 79v272h-192 q-26 0 -45 19t-19 45q0 18 9 33l247 411v132h-45l-227 -341q-29 -43 -80 -43q-40 0 -68 28t-28 68zM416 1280q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf183;" horiz-adv-x="1024" d="M0 416v416q0 80 56 136t136 56h640q80 0 136 -56t56 -136v-416q0 -40 -28 -68t-68 -28t-68 28t-28 68v352h-64v-912q0 -46 -33 -79t-79 -33t-79 33t-33 79v464h-64v-464q0 -46 -33 -79t-79 -33t-79 33t-33 79v912h-64v-352q0 -40 -28 -68t-68 -28t-68 28t-28 68z M288 1280q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf184;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM399.5 766q8.5 -37 24.5 -59l349 -473l350 473q16 22 24.5 59t-6 85t-61.5 79q-40 26 -83 25.5 t-73.5 -17.5t-54.5 -45q-36 -40 -96 -40q-59 0 -95 40q-24 28 -54.5 45t-73.5 17.5t-84 -25.5q-46 -31 -60.5 -79t-6 -85z" />
+<glyph unicode="&#xf185;" horiz-adv-x="1792" d="M44 363q-5 17 4 29l180 248l-180 248q-9 13 -4 29q4 15 20 20l292 96v306q0 16 13 26q15 10 29 4l292 -94l180 248q9 12 26 12t26 -12l180 -248l292 94q14 6 29 -4q13 -10 13 -26v-306l292 -96q16 -5 20 -20q5 -16 -4 -29l-180 -248l180 -248q9 -12 4 -29q-4 -15 -20 -20 l-292 -96v-306q0 -16 -13 -26q-15 -10 -29 -4l-292 94l-180 -248q-10 -13 -26 -13t-26 13l-180 248l-292 -94q-14 -6 -29 4q-13 10 -13 26v306l-292 96q-16 5 -20 20zM320 640q0 -117 45.5 -223.5t123 -184t184 -123t223.5 -45.5t223.5 45.5t184 123t123 184t45.5 223.5 t-45.5 223.5t-123 184t-184 123t-223.5 45.5t-223.5 -45.5t-184 -123t-123 -184t-45.5 -223.5z" />
+<glyph unicode="&#xf186;" d="M0 640q0 153 57.5 292.5t156 241.5t235.5 164.5t290 68.5q44 2 61 -39q18 -41 -15 -72q-86 -78 -131.5 -181.5t-45.5 -218.5q0 -148 73 -273t198 -198t273 -73q118 0 228 51q41 18 72 -13q14 -14 17.5 -34t-4.5 -38q-94 -203 -283.5 -324.5t-413.5 -121.5q-156 0 -298 61 t-245 164t-164 245t-61 298zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51q144 0 273.5 61.5t220.5 171.5q-54 -9 -110 -9q-182 0 -337 90t-245 245t-90 337q0 192 104 357q-201 -60 -328.5 -229t-127.5 -384z" />
+<glyph unicode="&#xf187;" horiz-adv-x="1792" d="M64 1088v256q0 26 19 45t45 19h1536q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM128 -64v960q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-960q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM704 704q0 -26 19 -45t45 -19h256 q26 0 45 19t19 45t-19 45t-45 19h-256q-26 0 -45 -19t-19 -45z" />
+<glyph unicode="&#xf188;" horiz-adv-x="1664" d="M32 576q0 26 19 45t45 19h224v294l-173 173q-19 19 -19 45t19 45t45 19t45 -19l173 -173h844l173 173q19 19 45 19t45 -19t19 -45t-19 -45l-173 -173v-294h224q26 0 45 -19t19 -45t-19 -45t-45 -19h-224q0 -171 -67 -290l208 -209q19 -19 19 -45t-19 -45q-18 -19 -45 -19 t-45 19l-198 197q-5 -5 -15 -13t-42 -28.5t-65 -36.5t-82 -29t-97 -13v896h-128v-896q-51 0 -101.5 13.5t-87 33t-66 39t-43.5 32.5l-15 14l-183 -207q-20 -21 -48 -21q-24 0 -43 16q-19 18 -20.5 44.5t15.5 46.5l202 227q-58 114 -58 274h-224q-26 0 -45 19t-19 45z M512 1152q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5h-640z" />
+<glyph unicode="&#xf189;" horiz-adv-x="1920" d="M-1 1004q0 11 3 16l4 6q15 19 57 19l274 2q12 -2 23 -6.5t16 -8.5l5 -3q16 -11 24 -32q20 -50 46 -103.5t41 -81.5l16 -29q29 -60 56 -104t48.5 -68.5t41.5 -38.5t34 -14t27 5q2 1 5 5t12 22t13.5 47t9.5 81t0 125q-2 40 -9 73t-14 46l-6 12q-25 34 -85 43q-13 2 5 24 q17 19 38 30q53 26 239 24q82 -1 135 -13q20 -5 33.5 -13.5t20.5 -24t10.5 -32t3.5 -45.5t-1 -55t-2.5 -70.5t-1.5 -82.5q0 -11 -1 -42t-0.5 -48t3.5 -40.5t11.5 -39t22.5 -24.5q8 -2 17 -4t26 11t38 34.5t52 67t68 107.5q60 104 107 225q4 10 10 17.5t11 10.5l4 3l5 2.5 t13 3t20 0.5l288 2q39 5 64 -2.5t31 -16.5l6 -10q23 -64 -150 -294q-24 -32 -65 -85q-78 -100 -90 -131q-17 -41 14 -81q17 -21 81 -82h1l1 -1l1 -1l2 -2q141 -131 191 -221q3 -5 6.5 -12.5t7 -26.5t-0.5 -34t-25 -27.5t-59 -12.5l-256 -4q-24 -5 -56 5t-52 22l-20 12 q-30 21 -70 64t-68.5 77.5t-61 58t-56.5 15.5q-3 -1 -8 -3.5t-17 -14.5t-21.5 -29.5t-17 -52t-6.5 -77.5q0 -15 -3.5 -27.5t-7.5 -18.5l-4 -5q-18 -19 -53 -22h-115q-71 -4 -146 16.5t-131.5 53t-103 66t-70.5 57.5l-25 24q-10 10 -27.5 30t-71.5 91t-106 151t-122.5 211 t-130.5 272q-6 16 -6 27z" />
+<glyph unicode="&#xf18a;" horiz-adv-x="1792" d="M0 391q0 115 69.5 245t197.5 258q169 169 341.5 236t246.5 -7q65 -64 20 -209q-4 -14 -1 -20t10 -7t14.5 0.5t13.5 3.5l6 2q139 59 246 59t153 -61q45 -63 0 -178q-2 -13 -4.5 -20t4.5 -12.5t12 -7.5t17 -6q57 -18 103 -47t80 -81.5t34 -116.5q0 -68 -37 -139.5 t-109 -137t-168.5 -117.5t-226 -83t-270.5 -31t-275 33.5t-240.5 93t-171.5 151t-65 199.5zM181 320q9 -96 89 -170t208.5 -109t274.5 -21q223 23 369.5 141.5t132.5 264.5q-9 96 -89 170t-208.5 109t-274.5 21q-223 -23 -369.5 -141.5t-132.5 -264.5zM413.5 230.5 q-40.5 92.5 6.5 187.5q47 93 151.5 139t210.5 19q111 -29 158.5 -119.5t2.5 -190.5q-45 -102 -158 -150t-224 -12q-107 34 -147.5 126.5zM495 257.5q9 -34.5 43 -50.5t74.5 -2.5t62.5 47.5q21 34 11 69t-45 50q-34 14 -73 1t-60 -46q-22 -34 -13 -68.5zM705 399 q-17 -31 13 -45q14 -5 29 0.5t22 18.5q8 13 3.5 26.5t-17.5 18.5q-14 5 -28.5 -0.5t-21.5 -18.5zM1165 1274q-6 28 9.5 51.5t43.5 29.5q123 26 244 -11.5t208 -134.5q87 -96 112.5 -222.5t-13.5 -241.5q-9 -27 -34 -40t-52 -4t-40 34t-5 52q28 82 10 172t-80 158 q-62 69 -148 95.5t-173 8.5q-28 -6 -52 9.5t-30 43.5zM1224 1047q-5 24 8 44.5t37 25.5q60 13 119 -5.5t101 -65.5t54.5 -108.5t-6.5 -117.5q-8 -23 -29.5 -34t-44.5 -4q-23 8 -34 29.5t-4 44.5q20 63 -24 111t-107 35q-24 -5 -45 8t-25 37z" />
+<glyph unicode="&#xf18b;" d="M0 638q0 187 83.5 349.5t229.5 269.5t325 137v-485q0 -252 -126.5 -459.5t-330.5 -306.5q-181 215 -181 495zM398 -34q138 87 235.5 211t131.5 268q35 -144 132.5 -268t235.5 -211q-171 -94 -368 -94q-196 0 -367 94zM898 909v485q179 -30 325 -137t229.5 -269.5 t83.5 -349.5q0 -280 -181 -495q-204 99 -330.5 306.5t-126.5 459.5z" />
+<glyph unicode="&#xf18c;" horiz-adv-x="1408" d="M0 -211q0 19 13 31.5t32 12.5q173 1 322.5 107.5t251.5 294.5q-36 -14 -72 -23t-83 -13t-91 2.5t-93 28.5t-92 59t-84.5 100t-74.5 146q114 47 214 57t167.5 -7.5t124.5 -56.5t88.5 -77t56.5 -82q53 131 79 291q-7 -1 -18 -2.5t-46.5 -2.5t-69.5 0.5t-81.5 10t-88.5 23 t-84 42.5t-75 65t-54.5 94.5t-28.5 127.5q70 28 133.5 36.5t112.5 -1t92 -30t73.5 -50t56 -61t42 -63t27.5 -56t16 -39.5l4 -16q12 122 12 195q-8 6 -21.5 16t-49 44.5t-63.5 71.5t-54 93t-33 112.5t12 127t70 138.5q73 -25 127.5 -61.5t84.5 -76.5t48 -85t20.5 -89 t-0.5 -85.5t-13 -76.5t-19 -62t-17 -42l-7 -15q1 -5 1 -50.5t-1 -71.5q3 7 10 18.5t30.5 43t50.5 58t71 55.5t91.5 44.5t112 14.5t132.5 -24q-2 -78 -21.5 -141.5t-50 -104.5t-69.5 -71.5t-81.5 -45.5t-84.5 -24t-80 -9.5t-67.5 1t-46.5 4.5l-17 3q-23 -147 -73 -283 q6 7 18 18.5t49.5 41t77.5 52.5t99.5 42t117.5 20t129 -23.5t137 -77.5q-32 -80 -76 -138t-91 -88.5t-99 -46.5t-101.5 -14.5t-96.5 8.5t-86.5 22t-69.5 27.5t-46 22.5l-17 10q-113 -228 -289.5 -359.5t-384.5 -132.5q-19 0 -32 13t-13 32z" />
+<glyph unicode="&#xf18d;" horiz-adv-x="1280" d="M21 217v66h1238v-66q0 -85 -57.5 -144.5t-138.5 -59.5h-57l-260 -269v269h-529q-81 0 -138.5 59.5t-57.5 144.5zM21 354v255h1238v-255h-1238zM21 682v255h1238v-255h-1238zM21 1010v67q0 84 57.5 143.5t138.5 59.5h846q81 0 138.5 -59.5t57.5 -143.5v-67h-1238z" />
+<glyph unicode="&#xf18e;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM384 544v192q0 13 9.5 22.5t22.5 9.5h352v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-352q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf190;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM384 640q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h352q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-352v-192q0 -14 -9 -23t-23 -9q-12 0 -24 10l-319 319q-9 9 -9 23z" />
+<glyph unicode="&#xf191;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM448 640q0 33 27 52l448 320q17 12 37 12q26 0 45 -19t19 -45v-640q0 -26 -19 -45t-45 -19q-20 0 -37 12l-448 320q-27 19 -27 52z" />
+<glyph unicode="&#xf192;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 640q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181z" />
+<glyph unicode="&#xf193;" horiz-adv-x="1664" d="M0 320q0 181 104.5 330t274.5 211l17 -131q-122 -54 -195 -165.5t-73 -244.5q0 -185 131.5 -316.5t316.5 -131.5q126 0 232.5 65t165 175.5t49.5 236.5l102 -204q-58 -179 -210 -290t-339 -111q-156 0 -288.5 77.5t-210 210t-77.5 288.5zM416 1348q-2 16 6 42 q14 51 57 82.5t97 31.5q66 0 113 -47t47 -113q0 -69 -52 -117.5t-120 -41.5l37 -289h423v-128h-407l16 -128h455q40 0 57 -35l228 -455l198 99l58 -114l-256 -128q-13 -7 -29 -7q-40 0 -57 35l-239 477h-472q-24 0 -42.5 16.5t-21.5 40.5z" />
+<glyph unicode="&#xf194;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 806q16 -8 25.5 -26t21.5 -20q21 -3 54.5 8.5t58 10.5t41.5 -30q11 -18 18.5 -38.5t15 -48t12.5 -40.5 q17 -46 53 -187q36 -146 57 -197q42 -99 103 -125q43 -12 85 -1.5t76 31.5q131 77 250 237q104 139 172.5 292.5t82.5 226.5q16 85 -21 132q-52 65 -187 45q-17 -3 -41 -12.5t-57.5 -30.5t-64.5 -48.5t-59.5 -70t-44.5 -91.5q80 7 113.5 -16t26.5 -99q-5 -52 -52 -143 q-43 -78 -71 -99q-44 -32 -87 14q-23 24 -37.5 64.5t-19 73t-10 84t-8.5 71.5q-23 129 -34 164q-12 37 -35.5 69t-50.5 40q-57 16 -127 -25q-54 -32 -136.5 -106t-122.5 -102v-7z" />
+<glyph unicode="&#xf195;" horiz-adv-x="1152" d="M0 608v128q0 23 23 31l233 71v93l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v250q0 14 9 23t23 9h160q14 0 23 -9t9 -23v-181l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-93l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31 l-393 -121v-487q188 13 318 151t130 328q0 14 9 23t23 9h160q14 0 23 -9t9 -23q0 -191 -94.5 -353t-256.5 -256.5t-353 -94.5h-160q-14 0 -23 9t-9 23v611l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26z" />
+<glyph unicode="&#xf196;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832zM256 672v64q0 14 9 23t23 9h352v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-352h352q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-352v-352q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v352h-352q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf197;" horiz-adv-x="2176" d="M0 576q0 12 38.5 20.5t96.5 10.5q-7 25 -7 49q0 33 9.5 56.5t22.5 23.5h64v64h128q158 0 268 -64h1113q42 -7 106.5 -18t80.5 -14q89 -15 150 -40.5t83.5 -47.5t22.5 -40t-22.5 -40t-83.5 -47.5t-150 -40.5q-16 -3 -80.5 -14t-106.5 -18h-1113q-110 -64 -268 -64h-128v64 h-64q-13 0 -22.5 23.5t-9.5 56.5q0 24 7 49q-58 2 -96.5 10.5t-38.5 20.5zM323 336h29q157 0 273 64h1015q-217 -38 -456 -80q-57 0 -113 -24t-83 -48l-28 -24l-288 -288q-26 -26 -70.5 -45t-89.5 -19h-96zM323 816l93 464h96q46 0 90 -19t70 -45l288 -288q4 -4 11 -10.5 t30.5 -23t48.5 -29t61.5 -23t72.5 -10.5l456 -80h-1015q-116 64 -273 64h-29zM1739 484l81 -30q68 48 68 122t-68 122l-81 -30q53 -36 53 -92t-53 -92z" />
+<glyph unicode="&#xf198;" horiz-adv-x="1664" d="M0 796q0 47 27.5 85t71.5 53l157 53l-53 159q-8 24 -8 47q0 60 42 102.5t102 42.5q47 0 85 -27t53 -72l54 -160l310 105l-54 160q-8 24 -8 47q0 59 42.5 102t101.5 43q47 0 85.5 -27.5t53.5 -71.5l53 -161l162 55q21 6 43 6q60 0 102.5 -39.5t42.5 -98.5q0 -45 -30 -81.5 t-74 -51.5l-157 -54l105 -316l164 56q24 8 46 8q62 0 103.5 -40.5t41.5 -101.5q0 -97 -93 -130l-172 -59l56 -167q7 -21 7 -47q0 -59 -42 -102t-101 -43q-47 0 -85.5 27t-53.5 72l-55 165l-310 -106l55 -164q8 -24 8 -47q0 -59 -42 -102t-102 -43q-47 0 -85 27t-53 72 l-55 163l-153 -53q-29 -9 -50 -9q-61 0 -101.5 40t-40.5 101q0 47 27.5 85t71.5 53l156 53l-105 313l-156 -54q-26 -8 -48 -8q-60 0 -101 40.5t-41 100.5zM620 811l105 -313l310 105l-105 315z" />
+<glyph unicode="&#xf199;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 352q0 -40 28 -68t68 -28h832q40 0 68 28t28 68v436q-31 -35 -64 -55q-34 -22 -132.5 -85t-151.5 -99 q-98 -69 -164 -69t-164 69q-46 32 -141.5 92.5t-142.5 92.5q-12 8 -33 27t-31 27v-436zM256 928q0 -37 30.5 -76.5t67.5 -64.5q47 -32 137.5 -89t129.5 -83q3 -2 17 -11.5t21 -14t21 -13t23.5 -13t21.5 -9.5t22.5 -7.5t20.5 -2.5t20.5 2.5t22.5 7.5t21.5 9.5t23.5 13t21 13 t21 14t17 11.5l267 174q35 23 66.5 62.5t31.5 73.5q0 41 -27.5 70t-68.5 29h-832q-40 0 -68 -28t-28 -68z" />
+<glyph unicode="&#xf19a;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM41 640q0 -173 68 -331.5t182.5 -273t273 -182.5t331.5 -68t331.5 68t273 182.5t182.5 273t68 331.5 t-68 331.5t-182.5 273t-273 182.5t-331.5 68t-331.5 -68t-273 -182.5t-182.5 -273t-68 -331.5zM127 640q0 163 67 313l367 -1005q-196 95 -315 281t-119 411zM254 1062q105 160 274.5 253.5t367.5 93.5q147 0 280.5 -53t238.5 -149h-10q-55 0 -92 -40.5t-37 -95.5 q0 -12 2 -24t4 -21.5t8 -23t9 -21t12 -22.5t12.5 -21t14.5 -24t14 -23q63 -107 63 -212q0 -19 -2.5 -38.5t-10 -49.5t-11.5 -44t-17.5 -59t-17.5 -58l-76 -256l-278 826q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-75 1 -202 10q-12 1 -20.5 -5t-11.5 -15 t-1.5 -18.5t9 -16.5t19.5 -8l80 -8l120 -328l-168 -504l-280 832q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-7 0 -23 0.5t-26 0.5zM679 -97l230 670l237 -647q1 -6 5 -11q-126 -44 -255 -44q-112 0 -217 32zM1282 -24l235 678q59 169 59 276q0 42 -6 79 q95 -174 95 -369q0 -209 -104 -385.5t-279 -278.5z" />
+<glyph unicode="&#xf19b;" horiz-adv-x="1792" d="M0 455q0 140 100.5 263.5t275 205.5t391.5 108v-172q-217 -38 -356.5 -150t-139.5 -255q0 -152 154.5 -267t388.5 -145v1360l272 133v-1536l-272 -128q-228 20 -414 102t-293 208.5t-107 272.5zM1134 860v172q277 -33 481 -157l140 79l37 -390l-525 114l147 83 q-119 70 -280 99z" />
+<glyph unicode="&#xf19c;" horiz-adv-x="2048" d="M0 -128q0 26 20.5 45t48.5 19h1782q28 0 48.5 -19t20.5 -45v-128h-1920v128zM0 1024v128l960 384l960 -384v-128h-128q0 -26 -20.5 -45t-48.5 -19h-1526q-28 0 -48.5 19t-20.5 45h-128zM128 0v64q0 26 20.5 45t48.5 19h59v768h256v-768h128v768h256v-768h128v768h256 v-768h128v768h256v-768h59q28 0 48.5 -19t20.5 -45v-64h-1664z" />
+<glyph unicode="&#xf19d;" horiz-adv-x="2304" d="M0 1024q0 23 22 31l1120 352q4 1 10 1t10 -1l1120 -352q22 -8 22 -31t-22 -31l-1120 -352q-4 -1 -10 -1t-10 1l-652 206q-43 -34 -71 -111.5t-34 -178.5q63 -36 63 -109q0 -69 -58 -107l58 -433q2 -14 -8 -25q-9 -11 -24 -11h-192q-15 0 -24 11q-10 11 -8 25l58 433 q-58 38 -58 107q0 73 65 111q11 207 98 330l-333 104q-22 8 -22 31zM512 384l18 316l574 -181q22 -7 48 -7t48 7l574 181l18 -316q4 -69 -82 -128t-235 -93.5t-323 -34.5t-323 34.5t-235 93.5t-82 128z" />
+<glyph unicode="&#xf19e;" d="M109 1536q58 -15 108 -15q43 0 111 15q63 -111 133.5 -229.5t167 -276.5t138.5 -227q37 61 109.5 177.5t117.5 190t105 176t107 189.5q54 -14 107 -14q56 0 114 14q-28 -39 -60 -88.5t-49.5 -78.5t-56.5 -96t-49 -84q-146 -248 -353 -610l13 -707q-62 11 -105 11 q-41 0 -105 -11l13 707q-40 69 -168.5 295.5t-216.5 374.5t-181 287z" />
+<glyph unicode="&#xf1a0;" horiz-adv-x="1280" d="M111 182q0 81 44.5 150t118.5 115q131 82 404 100q-32 41 -47.5 73.5t-15.5 73.5q0 40 21 85q-46 -4 -68 -4q-148 0 -249.5 96.5t-101.5 244.5q0 82 36 159t99 131q76 66 182 98t218 32h417l-137 -88h-132q75 -63 113 -133t38 -160q0 -72 -24.5 -129.5t-59.5 -93 t-69.5 -65t-59 -61.5t-24.5 -66q0 -36 32 -70.5t77 -68t90.5 -73.5t77.5 -104t32 -142q0 -91 -49 -173q-71 -122 -209.5 -179.5t-298.5 -57.5q-132 0 -246.5 41.5t-172.5 137.5q-36 59 -36 131zM297 228q0 -56 23.5 -102t61 -75.5t87 -50t100 -29t101.5 -8.5q58 0 111.5 13 t99 39t73 73t27.5 109q0 25 -7 49t-14.5 42t-27 41.5t-29.5 35t-38.5 34.5t-36.5 29t-41.5 30t-36.5 26q-16 2 -49 2q-53 0 -104.5 -7t-107 -25t-97 -46t-68.5 -74.5t-27 -105.5zM403 1222q0 -46 10 -97.5t31.5 -103t52 -92.5t75 -67t96.5 -26q37 0 77.5 16.5t65.5 43.5 q53 56 53 159q0 59 -17 125.5t-48 129t-84 103.5t-117 41q-42 0 -82.5 -19.5t-66.5 -52.5q-46 -59 -46 -160z" />
+<glyph unicode="&#xf1a1;" horiz-adv-x="1984" d="M0 722q0 94 66 160t160 66q83 0 148 -55q248 158 592 164l134 423q4 14 17.5 21.5t28.5 4.5l347 -82q22 50 68.5 81t102.5 31q77 0 131.5 -54.5t54.5 -131.5t-54.5 -132t-131.5 -55q-76 0 -130.5 54t-55.5 131l-315 74l-116 -366q327 -14 560 -166q64 58 151 58 q94 0 160 -66t66 -160q0 -62 -31 -114t-83 -82q5 -33 5 -61q0 -121 -68.5 -230.5t-197.5 -193.5q-125 -82 -285.5 -125.5t-335.5 -43.5q-176 0 -336.5 43.5t-284.5 125.5q-129 84 -197.5 193t-68.5 231q0 29 5 66q-48 31 -77 81.5t-29 109.5zM77 722q0 -67 51 -111 q49 131 180 235q-36 25 -82 25q-62 0 -105.5 -43.5t-43.5 -105.5zM178 465q0 -101 59.5 -194t171.5 -166q116 -75 265.5 -115.5t313.5 -40.5t313.5 40.5t265.5 115.5q112 73 171.5 166t59.5 194t-59.5 193.5t-171.5 165.5q-116 75 -265.5 115.5t-313.5 40.5t-313.5 -40.5 t-265.5 -115.5q-112 -73 -171.5 -165.5t-59.5 -193.5zM555 572q0 57 41.5 98t97.5 41t96.5 -41t40.5 -98q0 -56 -40.5 -96t-96.5 -40q-57 0 -98 40t-41 96zM661 209.5q0 16.5 11 27.5t27 11t27 -11q77 -77 265 -77h2q188 0 265 77q11 11 27 11t27 -11t11 -27.5t-11 -27.5 q-99 -99 -319 -99h-2q-220 0 -319 99q-11 11 -11 27.5zM1153 572q0 57 41.5 98t97.5 41t96.5 -41t40.5 -98q0 -56 -40.5 -96t-96.5 -40q-57 0 -98 40t-41 96zM1555 1350q0 -45 32 -77t77 -32t77 32t32 77t-32 77t-77 32t-77 -32t-32 -77zM1672 843q131 -105 178 -238 q57 46 57 117q0 62 -43.5 105.5t-105.5 43.5q-49 0 -86 -28z" />
+<glyph unicode="&#xf1a2;" d="M0 193v894q0 133 94 227t226 94h896q132 0 226 -94t94 -227v-894q0 -133 -94 -227t-226 -94h-896q-132 0 -226 94t-94 227zM155 709q0 -37 19.5 -67.5t52.5 -45.5q-7 -25 -7 -54q0 -98 74 -181.5t201.5 -132t278.5 -48.5q150 0 277.5 48.5t201.5 132t74 181.5q0 27 -6 54 q35 14 57 45.5t22 70.5q0 51 -36 87.5t-87 36.5q-60 0 -98 -48q-151 107 -375 115l83 265l206 -49q1 -50 36.5 -85t84.5 -35q50 0 86 35.5t36 85.5t-36 86t-86 36q-36 0 -66 -20.5t-45 -53.5l-227 54q-9 2 -17.5 -2.5t-11.5 -14.5l-95 -302q-224 -4 -381 -113q-36 43 -93 43 q-51 0 -87 -36.5t-36 -87.5zM493 613q0 37 26 63t63 26t63 -26t26 -63t-26 -64t-63 -27t-63 27t-26 64zM560 375q0 11 8 18q7 7 17.5 7t17.5 -7q49 -51 172 -51h1h1q122 0 173 51q7 7 17.5 7t17.5 -7t7 -18t-7 -18q-65 -64 -208 -64h-1h-1q-143 0 -207 64q-8 7 -8 18z M882 613q0 37 26 63t63 26t63 -26t26 -63t-26 -64t-63 -27t-63 27t-26 64zM1143 1120q0 30 21 51t50 21q30 0 51 -21t21 -51q0 -29 -21 -50t-51 -21q-29 0 -50 21t-21 50z" />
+<glyph unicode="&#xf1a3;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 502q0 -82 57.5 -139t139.5 -57q81 0 138.5 56.5t57.5 136.5v280q0 19 13.5 33t33.5 14 q19 0 32.5 -14t13.5 -33v-54l60 -28l90 27v62q0 79 -58 135t-138 56t-138 -55.5t-58 -134.5v-283q0 -20 -14 -33.5t-33 -13.5t-32.5 13.5t-13.5 33.5v120h-151v-122zM806 500q0 -80 58 -137t139 -57t138.5 57t57.5 139v122h-150v-126q0 -20 -13.5 -33.5t-33.5 -13.5 q-19 0 -32.5 14t-13.5 33v123l-90 -26l-60 28v-123z" />
+<glyph unicode="&#xf1a4;" horiz-adv-x="1920" d="M0 336v266h328v-262q0 -43 30 -72.5t72 -29.5t72 29.5t30 72.5v620q0 171 126.5 292t301.5 121q176 0 302 -122t126 -294v-136l-195 -58l-131 61v118q0 42 -30 72t-72 30t-72 -30t-30 -72v-612q0 -175 -126 -299t-303 -124q-178 0 -303.5 125.5t-125.5 303.5zM1062 332 v268l131 -61l195 58v-270q0 -42 30 -71.5t72 -29.5t72 29.5t30 71.5v275h328v-266q0 -178 -125.5 -303.5t-303.5 -125.5q-177 0 -303 124.5t-126 300.5z" />
+<glyph unicode="&#xf1a5;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM64 640h704v-704h480q93 0 158.5 65.5t65.5 158.5v480h-704v704h-480q-93 0 -158.5 -65.5t-65.5 -158.5v-480z " />
+<glyph unicode="&#xf1a6;" horiz-adv-x="2048" d="M0 271v697h328v286h204v-983h-532zM205 435h123v369h-123v-369zM614 271h205v697h-205v-697zM614 1050h205v204h-205v-204zM901 26v163h328v82h-328v697h533v-942h-533zM1106 435h123v369h-123v-369zM1516 26v163h327v82h-327v697h532v-942h-532zM1720 435h123v369h-123 v-369z" />
+<glyph unicode="&#xf1a7;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM293 388l211 41v206q55 -19 116 -19q125 0 213.5 95t88.5 229t-88.5 229t-213.5 95q-74 0 -141 -36h-186v-840z M504 804v277q28 17 70 17q53 0 91 -45t38 -109t-38 -109.5t-91 -45.5q-43 0 -70 15zM636 -39l211 41v206q51 -19 117 -19q125 0 213 95t88 229t-88 229t-213 95q-20 0 -39 -3q-23 -78 -78 -136q-87 -95 -211 -101v-636zM847 377v277q28 17 70 17q53 0 91 -45.5t38 -109.5 t-38 -109t-91 -45q-43 0 -70 15z" />
+<glyph unicode="&#xf1a8;" horiz-adv-x="2038" d="M41 455q0 15 8.5 26.5t22.5 14.5l486 106q-8 14 -8 25t5.5 17.5t16 11.5t20 7t23 4.5t18.5 4.5q4 1 15.5 7.5t17.5 6.5q15 0 28 -16t20 -33q163 37 172 37q17 0 29.5 -11t12.5 -28q0 -15 -8.5 -26t-23.5 -14l-182 -40l-1 -16q-1 -26 81.5 -117.5t104.5 -91.5q47 0 119 80 t72 129q0 36 -23.5 53t-51 18.5t-51 11.5t-23.5 34q0 16 10 34l-68 19q43 44 43 117q0 26 -5 58q82 16 144 16q44 0 71.5 -1.5t48.5 -8.5t31 -13.5t20.5 -24.5t15.5 -33.5t17 -47.5t24 -60l50 25q-3 -40 -23 -60t-42.5 -21t-40 -6.5t-16.5 -20.5l1 -21q75 3 143.5 -20.5 t118 -58.5t101 -94.5t84 -108t75.5 -120.5q33 -56 78.5 -109t75.5 -80.5t99 -88.5q-48 -30 -108.5 -57.5t-138.5 -59t-114 -47.5q-44 37 -74 115t-43.5 164.5t-33 180.5t-42.5 168.5t-72.5 123t-122.5 48.5l-10 -2l-6 -4q4 -5 13 -14q6 -5 28 -23.5t25.5 -22t19 -18 t18 -20.5t11.5 -21t10.5 -27.5t4.5 -31t4 -40.5l1 -33q1 -26 -2.5 -57.5t-7.5 -52t-12.5 -58.5t-11.5 -53q-35 1 -101 -9.5t-98 -10.5q-39 0 -72 10q-2 16 -2 47q0 74 3 96q2 13 31.5 41.5t57 59t26.5 51.5q-24 2 -43 -24q-36 -53 -111.5 -99.5t-136.5 -46.5q-25 0 -75.5 63 t-106.5 139.5t-84 96.5q-6 4 -27 30q-482 -112 -513 -112q-16 0 -28 11t-12 27zM764 676q10 1 32.5 7t34.5 6q19 0 35 -10l-96 -20zM822 568l48 12l109 -177l-73 -48zM859 884q16 30 36 46.5t54 29.5t65.5 36t46 36.5t50 55t43.5 50.5q12 -9 28 -31.5t32 -36.5t38 -13l12 1 v-76l22 -1q247 95 371 190q28 21 50 39t42.5 37.5t33 31t29.5 34t24 31t24.5 37t23 38t27 47.5t29.5 53l7 9q-2 -53 -43 -139q-79 -165 -205 -264t-306 -142q-14 -3 -42 -7.5t-50 -9.5t-39 -14q3 -19 24.5 -46t21.5 -34q0 -11 -26 -30q-5 5 -13.5 15.5t-12 14.5t-10.5 11.5 t-10 10.5l-8 8t-8.5 7.5t-8 5t-8.5 4.5q-7 3 -14.5 5t-20.5 2.5t-22 0.5h-32.5h-37.5q-126 0 -217 -43zM1061 45h31l10 -83l-41 -12v95zM1061 -79q39 26 131.5 47.5t146.5 21.5q9 0 22.5 -15.5t28 -42.5t26 -50t24 -51t14.5 -33q-121 -45 -244 -45q-61 0 -125 11zM1116 29 q21 2 60.5 8.5t72 10t60.5 3.5h14q3 -15 3 -16q0 -7 -17.5 -14.5t-46 -13t-54 -9.5t-53.5 -7.5t-32 -4.5zM1947 1528l1 3l2 4l-1 -5zM1950 1535v1v-1zM1950 1535l1 1z" />
+<glyph unicode="&#xf1a9;" d="M0 520q0 89 19.5 172.5t49 145.5t70.5 118.5t78.5 94t78.5 69.5t64.5 46.5t42.5 24.5q14 8 51 26.5t54.5 28.5t48 30t60.5 44q36 28 58 72.5t30 125.5q129 -155 186 -193q44 -29 130 -68t129 -66q21 -13 39 -25t60.5 -46.5t76 -70.5t75 -95t69 -122t47 -148.5 t19.5 -177.5q0 -164 -62 -304.5t-166 -236t-242.5 -149.5t-290.5 -54t-293 57.5t-247.5 157t-170.5 241.5t-64 302zM333 256q-2 -112 74 -164q29 -20 62.5 -28.5t103.5 -8.5q57 0 132 32.5t134 71t120 70.5t93 31q26 -1 65 -31.5t71.5 -67t68 -67.5t55.5 -32q35 -3 58.5 14 t55.5 63q28 41 42.5 101t14.5 106q0 22 -5 44.5t-16.5 45t-34 36.5t-52.5 14q-33 0 -97 -41.5t-129 -83.5t-101 -42q-27 -1 -63.5 19t-76 49t-83.5 58t-100 49t-111 19q-115 -1 -197 -78.5t-84 -178.5zM685.5 -76q-0.5 -10 7.5 -20q34 -32 87.5 -46t102.5 -12.5t99 4.5 q41 4 84.5 20.5t65 30t28.5 20.5q12 12 7 29q-5 19 -24 5q-30 -22 -87 -39t-131 -17q-129 0 -193 49q-5 4 -13 4q-11 0 -26 -12q-7 -6 -7.5 -16zM852 31q9 -8 17.5 -4.5t31.5 23.5q3 2 10.5 8.5t10.5 8.5t10 7t11.5 7t12.5 5t15 4.5t16.5 2.5t20.5 1q27 0 44.5 -7.5 t23 -14.5t13.5 -22q10 -17 12.5 -20t12.5 1q23 12 14 34q-19 47 -39 61q-23 15 -76 15q-47 0 -71 -10q-29 -12 -78 -56q-26 -24 -12 -44z" />
+<glyph unicode="&#xf1aa;" d="M0 78q0 72 44.5 128t113.5 72q-22 86 1 173t88 152l12 12l151 -152l-11 -11q-37 -37 -37 -89t37 -90q37 -37 89 -37t89 37l30 30l151 152l161 160l151 -152l-160 -160l-151 -152l-30 -30q-65 -64 -151.5 -87t-171.5 -2q-16 -70 -72 -115t-129 -45q-85 0 -145 60.5 t-60 145.5zM2 1202q0 85 60 145.5t145 60.5q76 0 133.5 -49t69.5 -123q84 20 169.5 -3.5t149.5 -87.5l12 -12l-152 -152l-12 12q-37 37 -89 37t-89 -37t-37 -89.5t37 -89.5l29 -29l152 -152l160 -160l-151 -152l-161 160l-151 152l-30 30q-68 67 -90 159.5t5 179.5 q-70 15 -115 71t-45 129zM446 803l161 160l152 152l29 30q67 67 159 89.5t178 -3.5q11 75 68.5 126t135.5 51q85 0 145 -60.5t60 -145.5q0 -77 -51 -135t-127 -69q26 -85 3 -176.5t-90 -158.5l-12 -12l-151 152l12 12q37 37 37 89t-37 89t-89 37t-89 -37l-30 -30l-152 -152 l-160 -160zM776 793l152 152l160 -160l152 -152l29 -30q64 -64 87.5 -150.5t2.5 -171.5q76 -11 126.5 -68.5t50.5 -134.5q0 -85 -60 -145.5t-145 -60.5q-74 0 -131 47t-71 118q-86 -28 -179.5 -6t-161.5 90l-11 12l151 152l12 -12q37 -37 89 -37t89 37t37 89t-37 89l-30 30 l-152 152z" />
+<glyph unicode="&#xf1ab;" d="M0 -16v1078q3 9 4 10q5 6 20 11q106 35 149 50v384l558 -198q2 0 160.5 55t316 108.5t161.5 53.5q20 0 20 -21v-418l147 -47v-1079l-774 246q-14 -6 -375 -127.5t-368 -121.5q-13 0 -18 13q0 1 -1 3zM39 15l694 232v1032l-694 -233v-1031zM147 293q6 4 82 92 q21 24 85.5 115t78.5 118q17 30 51 98.5t36 77.5q-8 1 -110 -33q-8 -2 -27.5 -7.5t-34.5 -9.5t-17 -5q-2 -2 -2 -10.5t-1 -9.5q-5 -10 -31 -15q-23 -7 -47 0q-18 4 -28 21q-4 6 -5 23q6 2 24.5 5t29.5 6q58 16 105 32q100 35 102 35q10 2 43 19.5t44 21.5q9 3 21.5 8 t14.5 5.5t6 -0.5q2 -12 -1 -33q0 -2 -12.5 -27t-26.5 -53.5t-17 -33.5q-25 -50 -77 -131l64 -28q12 -6 74.5 -32t67.5 -28q4 -1 10.5 -25.5t4.5 -30.5q-1 -3 -12.5 0.5t-31.5 11.5l-20 9q-44 20 -87 49q-7 5 -41 31.5t-38 28.5q-67 -103 -134 -181q-81 -95 -105 -110 q-4 -2 -19.5 -4t-18.5 0zM268 933l1 3q3 -3 19.5 -5t26.5 0t58 16q36 12 55 14q17 0 21 -17q3 -15 -4 -28q-12 -23 -50 -38q-30 -12 -60 -12q-26 3 -49 26q-14 15 -18 41zM310 -116q0 8 5 13.5t13 5.5q4 0 18 -7.5t30.5 -16.5t20.5 -11q73 -37 159.5 -61.5t157.5 -24.5 q95 0 167 14.5t157 50.5q15 7 30.5 15.5t34 19t28.5 16.5l-43 73l158 -13l-54 -160l-40 66q-130 -83 -276 -108q-58 -12 -91 -12h-84q-79 0 -199.5 39t-183.5 85q-8 7 -8 16zM777 1294l573 -184v380zM885 453l102 -31l45 110l211 -65l37 -135l102 -31l-181 657l-100 31z M1071 630l76 185l63 -227z" />
+<glyph unicode="&#xf1ac;" horiz-adv-x="1792" d="M0 -96v1088q0 66 47 113t113 47h128q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-128q-66 0 -113 47t-47 113zM512 -96v1536q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-163q58 -34 93 -93t35 -128v-768q0 -106 -75 -181 t-181 -75h-864q-66 0 -113 47t-47 113zM640 896h896v256h-160q-40 0 -68 28t-28 68v160h-640v-512zM736 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM736 256q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9 h-128q-14 0 -23 -9t-9 -23v-128zM736 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 256q0 -14 9 -23t23 -9h128 q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM1248 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23 v-128zM1248 256q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM1248 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128z" />
+<glyph unicode="&#xf1ad;" d="M0 -192v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM256 160q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 1184q0 -14 9 -23 t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 96v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23zM512 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9 t-9 -23v-64zM512 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 928q0 -14 9 -23 t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 160q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9 t-9 -23v-64zM1024 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64z" />
+<glyph unicode="&#xf1ae;" horiz-adv-x="1280" d="M64 1056q0 40 28 68t68 28t68 -28l228 -228h368l228 228q28 28 68 28t68 -28t28 -68t-28 -68l-292 -292v-824q0 -46 -33 -79t-79 -33t-79 33t-33 79v384h-64v-384q0 -46 -33 -79t-79 -33t-79 33t-33 79v824l-292 292q-28 28 -28 68zM416 1152q0 93 65.5 158.5t158.5 65.5 t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf1b0;" horiz-adv-x="1664" d="M0 724q0 80 42 139.5t119 59.5q76 0 141.5 -55.5t100.5 -134t35 -152.5q0 -80 -42 -139t-119 -59q-76 0 -141.5 55.5t-100.5 133.5t-35 152zM256 19q0 86 56 191.5t139.5 192.5t187.5 146t193 59q118 0 255 -97.5t229 -237t92 -254.5q0 -46 -17 -76.5t-48.5 -45 t-64.5 -20t-76 -5.5q-68 0 -187.5 45t-182.5 45q-66 0 -192.5 -44.5t-200.5 -44.5q-183 0 -183 146zM333 1163q0 60 19 113.5t63 92.5t105 39q77 0 138.5 -57.5t91.5 -135t30 -151.5q0 -60 -19 -113.5t-63 -92.5t-105 -39q-76 0 -138 57.5t-92 135.5t-30 151zM884 1064 q0 74 30 151.5t91.5 135t138.5 57.5q61 0 105 -39t63 -92.5t19 -113.5q0 -73 -30 -151t-92 -135.5t-138 -57.5q-61 0 -105 39t-63 92.5t-19 113.5zM1226 581q0 74 35 152.5t100.5 134t141.5 55.5q77 0 119 -59.5t42 -139.5q0 -74 -35 -152t-100.5 -133.5t-141.5 -55.5 q-77 0 -119 59t-42 139z" />
+<glyph unicode="&#xf1b1;" horiz-adv-x="768" d="M64 1008q0 128 42.5 249.5t117.5 200t160 78.5t160 -78.5t117.5 -200t42.5 -249.5q0 -145 -57 -243.5t-152 -135.5l45 -821q2 -26 -16 -45t-44 -19h-192q-26 0 -44 19t-16 45l45 821q-95 37 -152 135.5t-57 243.5z" />
+<glyph unicode="&#xf1b2;" horiz-adv-x="1792" d="M0 256v768q0 40 23 73t61 47l704 256q22 8 44 8t44 -8l704 -256q38 -14 61 -47t23 -73v-768q0 -35 -18 -65t-49 -47l-704 -384q-28 -16 -61 -16t-61 16l-704 384q-31 17 -49 47t-18 65zM134 1026l698 -254l698 254l-698 254zM896 -93l640 349v636l-640 -233v-752z" />
+<glyph unicode="&#xf1b3;" horiz-adv-x="2304" d="M0 96v416q0 38 21.5 70t56.5 48l434 186v400q0 38 21.5 70t56.5 48l448 192q23 10 50 10t50 -10l448 -192q35 -16 56.5 -48t21.5 -70v-400l434 -186q36 -16 57 -48t21 -70v-416q0 -36 -19 -67t-52 -47l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-5 2 -7 4q-2 -2 -7 -4 l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-33 16 -52 47t-19 67zM172 531l404 -173l404 173l-404 173zM640 -96l384 192v314l-384 -164v-342zM647 1219l441 -189l441 189l-441 189zM1152 651l384 165v266l-384 -164v-267zM1196 531l404 -173l404 173l-404 173zM1664 -96 l384 192v314l-384 -164v-342z" />
+<glyph unicode="&#xf1b4;" horiz-adv-x="2048" d="M0 22v1260h594q87 0 155 -14t126.5 -47.5t90 -96.5t31.5 -154q0 -181 -172 -263q114 -32 172 -115t58 -204q0 -75 -24.5 -136.5t-66 -103.5t-98.5 -71t-121 -42t-134 -13h-611zM277 236h296q205 0 205 167q0 180 -199 180h-302v-347zM277 773h281q78 0 123.5 36.5 t45.5 113.5q0 144 -190 144h-260v-294zM1137 477q0 208 130.5 345.5t336.5 137.5q138 0 240.5 -68t153 -179t50.5 -248q0 -17 -2 -47h-658q0 -111 57.5 -171.5t166.5 -60.5q63 0 122 32t76 87h221q-100 -307 -427 -307q-214 0 -340.5 132t-126.5 347zM1337 1073h511v124 h-511v-124zM1388 576h408q-18 195 -200 195q-90 0 -146 -52.5t-62 -142.5z" />
+<glyph unicode="&#xf1b5;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 254h382q117 0 197 57.5t80 170.5q0 158 -143 200q107 52 107 164q0 57 -19.5 96.5t-56.5 60.5t-79 29.5 t-97 8.5h-371v-787zM301 388v217h189q124 0 124 -113q0 -104 -128 -104h-185zM301 723v184h163q119 0 119 -90q0 -94 -106 -94h-176zM838 538q0 -135 79 -217t213 -82q205 0 267 191h-138q-11 -34 -47.5 -54t-75.5 -20q-68 0 -104 38t-36 107h411q1 10 1 30 q0 132 -74.5 220.5t-203.5 88.5q-128 0 -210 -86t-82 -216zM964 911v77h319v-77h-319zM996 600q4 56 39 89t91 33q113 0 124 -122h-254z" />
+<glyph unicode="&#xf1b6;" horiz-adv-x="2048" d="M0 764q0 86 61 146.5t146 60.5q73 0 130 -46t73 -117l783 -315q49 29 106 29q14 0 21 -1l173 248q1 114 82 194.5t195 80.5q115 0 196.5 -81t81.5 -196t-81.5 -196.5t-196.5 -81.5l-265 -194q-8 -80 -67.5 -133.5t-138.5 -53.5q-73 0 -130 46t-73 117l-783 315 q-51 -30 -106 -30q-85 0 -146 61t-61 147zM55 764q0 -64 44.5 -108.5t107.5 -44.5q11 0 33 4l-64 26q-33 14 -52.5 44.5t-19.5 66.5q0 50 35.5 85.5t85.5 35.5q20 0 41 -8v1l76 -31q-20 37 -56.5 59t-78.5 22q-63 0 -107.5 -44.5t-44.5 -107.5zM1164 244q19 -37 55.5 -59 t79.5 -22q63 0 107.5 44.5t44.5 107.5t-44.5 108t-107.5 45q-13 0 -33 -4q2 -1 20 -8t21.5 -8.5t18.5 -8.5t19 -10t16 -11t15.5 -13.5t11 -14.5t10 -18t5 -21t2.5 -25q0 -50 -35.5 -85.5t-85.5 -35.5q-14 0 -31.5 4.5t-29 9t-31.5 13.5t-28 12zM1584 767q0 -77 54.5 -131.5 t131.5 -54.5t132 54.5t55 131.5t-55 131.5t-132 54.5q-76 0 -131 -54.5t-55 -131.5zM1623 767q0 62 43.5 105.5t104.5 43.5t105 -44t44 -105t-43.5 -104.5t-105.5 -43.5q-61 0 -104.5 43.5t-43.5 104.5z" />
+<glyph unicode="&#xf1b7;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 693q0 -53 38 -91t92 -38q36 0 66 18l489 -197q10 -44 45.5 -73t81.5 -29q50 0 86.5 34t41.5 83l167 122 q71 0 122 50.5t51 122.5t-51 123t-122 51q-72 0 -122.5 -50.5t-51.5 -121.5l-108 -155q-2 0 -6.5 0.5t-6.5 0.5q-35 0 -67 -19l-489 197q-10 44 -45.5 73t-80.5 29q-54 0 -92 -38t-38 -92zM162 693q0 40 28 68t68 28q27 0 49.5 -14t34.5 -37l-48 19q-29 11 -56.5 -2 t-38.5 -41q-12 -29 -0.5 -57t39.5 -40v-1l40 -16q-14 -2 -20 -2q-40 0 -68 27.5t-28 67.5zM855 369q5 -2 47 -19q29 -12 58 0.5t41 41.5q11 29 -1 57.5t-41 40.5l-40 16q14 2 21 2q39 0 67 -27.5t28 -67.5t-28 -67.5t-67 -27.5q-59 0 -85 51zM1118 695q0 48 34 82t83 34 q48 0 82 -34t34 -82t-34 -82t-82 -34q-49 0 -83 34t-34 82zM1142 696q0 -39 27.5 -66t65.5 -27t65.5 27t27.5 66q0 38 -27.5 65.5t-65.5 27.5t-65.5 -27.5t-27.5 -65.5z" />
+<glyph unicode="&#xf1b8;" horiz-adv-x="1792" d="M16 970l433 -17l180 -379l-147 92q-63 -72 -111.5 -144.5t-72.5 -125t-39.5 -94.5t-18.5 -63l-4 -21l-190 357q-17 26 -18 56t6 47l8 18q35 63 114 188zM270.5 158q-3.5 28 4 65t12 55t21.5 64t19 53q78 -12 509 -28l-15 -368l-2 -22l-420 29q-36 3 -67 31.5t-47 65.5 q-11 27 -14.5 55zM294 1124l225 356q20 31 60 45t80 10q24 -2 48.5 -12t42 -21t41.5 -33t36 -34.5t36 -39.5t32 -35q-47 -63 -265 -435l-317 187zM782 1524l405 -1q31 3 58 -10.5t39 -28.5l11 -15q39 -61 112 -190l142 83l-220 -373l-419 20l151 86q-34 89 -75 166 t-75.5 123.5t-64.5 80t-47 46.5zM953 197l211 362l7 -173q170 -16 283 -5t170 33l56 22l-188 -359q-12 -29 -36.5 -46.5t-43.5 -20.5l-18 -4q-71 -7 -219 -12l8 -164zM1218 847l313 195l19 11l212 -363q18 -37 12.5 -76t-27.5 -74q-13 -20 -33 -37t-38 -28t-48.5 -22 t-47 -16t-51.5 -14t-46 -12q-34 72 -265 436z" />
+<glyph unicode="&#xf1b9;" horiz-adv-x="1984" d="M0 160v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h704q98 0 179 -63.5t104 -157.5l105 -419h28q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-128v-128q0 -80 -56 -136t-136 -56t-136 56t-56 136v128h-928v-128q0 -80 -56 -136 t-136 -56t-136 56t-56 136v128h-96q-14 0 -23 9t-9 23zM160 448q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113zM516 768h952l-89 357q-2 8 -14 17.5t-21 9.5h-704q-9 0 -21 -9.5t-14 -17.5zM1472 448q0 -66 47 -113t113 -47t113 47t47 113 t-47 113t-113 47t-113 -47t-47 -113z" />
+<glyph unicode="&#xf1ba;" horiz-adv-x="1984" d="M0 32v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h128v224q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-224h64q98 0 179 -63.5t104 -157.5l105 -419h28q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-128v-64q0 -80 -56 -136t-136 -56 t-136 56t-56 136v64h-928v-64q0 -80 -56 -136t-136 -56t-136 56t-56 136v64h-96q-14 0 -23 9t-9 23zM160 320q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113zM516 640h952l-89 357q-2 8 -14 17.5t-21 9.5h-704q-9 0 -21 -9.5t-14 -17.5zM1472 320 q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113z" />
+<glyph unicode="&#xf1bb;" d="M32 64q0 26 19 45l402 403h-229q-26 0 -45 19t-19 45t19 45l402 403h-197q-26 0 -45 19t-19 45t19 45l384 384q19 19 45 19t45 -19l384 -384q19 -19 19 -45t-19 -45t-45 -19h-197l402 -403q19 -19 19 -45t-19 -45t-45 -19h-229l402 -403q19 -19 19 -45t-19 -45t-45 -19 h-462q1 -17 6 -87.5t5 -108.5q0 -25 -18 -42.5t-43 -17.5h-320q-25 0 -43 17.5t-18 42.5q0 38 5 108.5t6 87.5h-462q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf1bc;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM237 886q0 -31 20.5 -52t51.5 -21q11 0 40 8q133 37 307 37q159 0 309.5 -34t253.5 -95q21 -12 40 -12 q29 0 50.5 20.5t21.5 51.5q0 47 -40 70q-126 73 -293 110.5t-343 37.5q-204 0 -364 -47q-23 -7 -38.5 -25.5t-15.5 -48.5zM289 637q0 -25 17.5 -42.5t42.5 -17.5q7 0 37 8q122 33 251 33q279 0 488 -124q24 -13 38 -13q25 0 42.5 17.5t17.5 42.5q0 40 -35 61 q-237 141 -548 141q-153 0 -303 -42q-48 -13 -48 -64zM321 406q0 -20 13.5 -34.5t35.5 -14.5q5 0 37 8q132 27 243 27q226 0 397 -103q19 -11 33 -11q19 0 33 13.5t14 34.5q0 32 -30 51q-193 115 -447 115q-133 0 -287 -34q-42 -9 -42 -52z" />
+<glyph unicode="&#xf1bd;" d="M0 11v1258q0 58 40.5 98.5t98.5 40.5h1258q58 0 98.5 -40.5t40.5 -98.5v-1258q0 -58 -40.5 -98.5t-98.5 -40.5h-1258q-58 0 -98.5 40.5t-40.5 98.5zM71 11q0 -28 20 -48t48 -20h1258q28 0 48 20t20 48v1258q0 28 -20 48t-48 20h-1258q-28 0 -48 -20t-20 -48v-1258z M121 11v141l711 195l-212 439q4 1 12 2.5t12 1.5q170 32 303.5 21.5t221 -46t143.5 -94.5q27 -28 -25 -42q-64 -16 -256 -62l-97 198q-111 7 -240 -16l188 -387l533 145v-496q0 -7 -5.5 -12.5t-12.5 -5.5h-1258q-7 0 -12.5 5.5t-5.5 12.5zM121 709v560q0 7 5.5 12.5 t12.5 5.5h1258q7 0 12.5 -5.5t5.5 -12.5v-428q-85 30 -188 52q-294 64 -645 12l-18 -3l-65 134h-233l85 -190q-132 -51 -230 -137zM246 413q-24 203 166 305l129 -270l-255 -61q-14 -3 -26 4.5t-14 21.5z" />
+<glyph unicode="&#xf1be;" horiz-adv-x="2304" d="M0 405l17 128q2 9 9 9t9 -9l20 -128l-20 -126q-2 -9 -9 -9t-9 9zM79 405l23 207q0 9 9 9q8 0 10 -9l26 -207l-26 -203q-2 -9 -10 -9q-9 0 -9 10zM169 405l21 245q2 12 12 12q11 0 11 -12l25 -245l-25 -237q0 -11 -11 -11q-10 0 -12 11zM259 405l21 252q0 13 13 13 q12 0 14 -13l23 -252l-23 -244q-2 -13 -14 -13q-13 0 -13 13zM350 405l20 234q0 6 4.5 10.5t10.5 4.5q14 0 16 -15l21 -234l-21 -246q-2 -16 -16 -16q-6 0 -10.5 4.5t-4.5 11.5zM401 159zM442 405l18 380q2 18 18 18q7 0 12 -5.5t5 -12.5l21 -380l-21 -246q0 -7 -5 -12.5 t-12 -5.5q-16 0 -18 18zM534 403l16 468q2 19 20 19q8 0 13.5 -5.5t5.5 -13.5l19 -468l-19 -244q0 -8 -5.5 -13.5t-13.5 -5.5q-18 0 -20 19zM628 405l16 506q0 9 6.5 15.5t14.5 6.5q9 0 15 -6.5t7 -15.5l18 -506l-18 -242q-2 -21 -22 -21q-19 0 -21 21zM723 405l14 -241 q1 -10 7.5 -16.5t15.5 -6.5q22 0 24 23l16 241l-16 523q-1 10 -7.5 17t-16.5 7q-9 0 -16 -7t-7 -17zM784 164zM817 405l14 510q0 11 7.5 18t17.5 7t17.5 -7t7.5 -18l15 -510l-15 -239q0 -10 -7.5 -17.5t-17.5 -7.5t-17 7t-8 18zM913 404l12 492q1 12 9 20t19 8t18.5 -8 t8.5 -20l14 -492l-14 -236q0 -11 -8 -19t-19 -8t-19 8t-9 19zM1010 405q0 -1 11 -236v-1q0 -10 6 -17q9 -11 23 -11q11 0 20 9q9 7 9 20l1 24l11 211l-12 586q0 16 -13 24q-8 5 -16 5t-16 -5q-13 -8 -13 -24l-1 -6zM1079 169zM1103 404l12 636v3q2 15 12 24q9 7 20 7 q8 0 15 -5q14 -8 16 -26l14 -639l-14 -231q0 -13 -9 -22t-22 -9t-22 9t-10 22l-6 114zM1204 174v899q0 23 28 33q85 34 181 34q195 0 338 -131.5t160 -323.5q53 22 110 22q117 0 200 -83t83 -201q0 -117 -83 -199.5t-200 -82.5h-786q-13 2 -22 11t-9 22z" />
+<glyph unicode="&#xf1c0;" d="M0 0v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 384v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 768 v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 1152v128q0 69 103 128t280 93.5t385 34.5t385 -34.5t280 -93.5t103 -128v-128q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5 t-103 128z" />
+<glyph unicode="&#xf1c1;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM257 60q9 40 56 91.5t132 96.5q14 9 23 -6q2 -2 2 -4 q52 85 107 197q68 136 104 262q-24 82 -30.5 159.5t6.5 127.5q11 40 42 40h21h1q23 0 35 -15q18 -21 9 -68q-2 -6 -4 -8q1 -3 1 -8v-30q-2 -123 -14 -192q55 -164 146 -238q33 -26 84 -56q59 7 117 7q147 0 177 -49q16 -22 2 -52q0 -1 -1 -2l-2 -2v-1q-6 -38 -71 -38 q-48 0 -115 20t-130 53q-221 -24 -392 -83q-153 -262 -242 -262q-15 0 -28 7l-24 12q-1 1 -6 5q-10 10 -6 36zM318 54q52 24 137 158q-51 -40 -87.5 -84t-49.5 -74zM592 313q135 54 284 81q-2 1 -13 9.5t-16 13.5q-76 67 -127 176q-27 -86 -83 -197q-30 -56 -45 -83z M714 842q1 7 7 44q0 3 7 43q1 4 4 8q-1 1 -1 2t-0.5 1.5t-0.5 1.5q-1 22 -13 36q0 -1 -1 -2v-2q-15 -42 -2 -132zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376zM1098 353q76 -28 124 -28q14 0 18 1q0 1 -2 3q-24 24 -140 24z" />
+<glyph unicode="&#xf1c2;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM233 661h70l164 -661h159l128 485q7 20 10 46q2 16 2 24 h4l3 -24q1 -3 3.5 -20t5.5 -26l128 -485h159l164 661h70v107h-300v-107h90l-99 -438q-5 -20 -7 -46l-2 -21h-4l-3 21q-1 5 -4 21t-5 25l-144 545h-114l-144 -545q-2 -9 -4.5 -24.5t-3.5 -21.5l-4 -21h-4l-2 21q-2 26 -7 46l-99 438h90v107h-300v-107zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c3;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM429 0h281v106h-75l103 161q5 7 10 16.5t7.5 13.5t3.5 4 h2q1 -4 5 -10q2 -4 4.5 -7.5t6 -8t6.5 -8.5l107 -161h-76v-106h291v106h-68l-192 273l195 282h67v107h-279v-107h74l-103 -159q-4 -7 -10 -16.5t-9 -13.5l-2 -3h-2q-1 4 -5 10q-6 11 -17 23l-106 159h76v107h-290v-107h68l189 -272l-194 -283h-68v-106zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c4;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM416 0h327v106h-93v167h137q76 0 118 15q67 23 106.5 87 t39.5 146q0 81 -37 141t-100 87q-48 19 -130 19h-368v-107h92v-555h-92v-106zM650 386v268h120q52 0 83 -18q56 -33 56 -115q0 -89 -62 -120q-31 -15 -78 -15h-119zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c5;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 0v192l192 192l128 -128l384 384l320 -320v-320 h-1024zM256 704q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c6;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-128v-128h-128v128h-512v-1536zM384 192q0 25 8 52q21 63 120 396 v128h128v-128h79q22 0 39 -13t23 -34l107 -349q8 -27 8 -52q0 -83 -72.5 -137.5t-183.5 -54.5t-183.5 54.5t-72.5 137.5zM512 192q0 -26 37.5 -45t90.5 -19t90.5 19t37.5 45t-37.5 45t-90.5 19t-90.5 -19t-37.5 -45zM512 896h128v128h-128v-128zM512 1152h128v128h-128v-128 zM640 768h128v128h-128v-128zM640 1024h128v128h-128v-128zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c7;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 288v192q0 14 9 23t23 9h131l166 167q16 15 35 7 q20 -8 20 -30v-544q0 -22 -20 -30q-8 -2 -12 -2q-12 0 -23 9l-166 167h-131q-14 0 -23 9t-9 23zM762 206.5q1 -26.5 20 -44.5q20 -17 44 -17q27 0 47 20q87 93 87 219t-87 219q-18 19 -45 20t-46 -17t-20 -44.5t18 -46.5q52 -57 52 -131t-52 -131q-19 -20 -18 -46.5z M973.5 54.5q2.5 -26.5 23.5 -42.5q18 -15 40 -15q31 0 50 24q129 159 129 363t-129 363q-16 21 -43 24t-47 -14q-21 -17 -23.5 -43.5t14.5 -47.5q100 -123 100 -282t-100 -282q-17 -21 -14.5 -47.5zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c8;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 256v384q0 52 38 90t90 38h384q52 0 90 -38t38 -90 v-384q0 -52 -38 -90t-90 -38h-384q-52 0 -90 38t-38 90zM960 403v90l265 266q9 9 23 9q4 0 12 -2q20 -8 20 -30v-576q0 -22 -20 -30q-8 -2 -12 -2q-14 0 -23 9zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c9;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM254 429q-14 19 0 38l226 301q8 11 21 12.5t24 -6.5 l51 -38q11 -8 12.5 -21t-6.5 -24l-182 -243l182 -243q8 -11 6.5 -24t-12.5 -21l-51 -38q-11 -8 -24 -6.5t-21 12.5zM636 43l138 831q2 13 13 20.5t24 5.5l63 -10q13 -2 20.5 -13t5.5 -24l-138 -831q-2 -13 -13 -20.5t-24 -5.5l-63 10q-13 2 -20.5 13t-5.5 24zM947.5 181 q-1.5 13 6.5 24l182 243l-182 243q-8 11 -6.5 24t12.5 21l51 38q11 8 24 6.5t21 -12.5l226 -301q14 -19 0 -38l-226 -301q-8 -11 -21 -12.5t-24 6.5l-51 38q-11 8 -12.5 21zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1ca;" d="M39 1286h283q26 -218 70 -398.5t104.5 -317t121.5 -235.5t140 -195q169 169 287 406q-142 72 -223 220t-81 333q0 192 104 314.5t284 122.5q178 0 273 -105.5t95 -297.5q0 -159 -58 -286q-7 -1 -19.5 -3t-46 -2t-63 6t-62 25.5t-50.5 51.5q31 103 31 184q0 87 -29 132 t-79 45q-53 0 -85 -49.5t-32 -140.5q0 -186 105 -293.5t267 -107.5q62 0 121 14v-198q-101 -23 -198 -23q-65 -136 -165.5 -271t-181.5 -215.5t-128 -106.5q-80 -45 -162 3q-28 17 -60.5 43.5t-85 83.5t-102.5 128.5t-107.5 184t-105.5 244t-91.5 314.5t-70.5 390z" />
+<glyph unicode="&#xf1cb;" horiz-adv-x="1792" d="M0 367v546q0 41 34 64l819 546q21 13 43 13t43 -13l819 -546q34 -23 34 -64v-546q0 -41 -34 -64l-819 -546q-21 -13 -43 -13t-43 13l-819 546q-34 23 -34 64zM154 511l193 129l-193 129v-258zM216 367l603 -402v359l-334 223zM216 913l269 -180l334 223v359zM624 640 l272 -182l272 182l-272 182zM973 -35l603 402l-269 180l-334 -223v-359zM973 956l334 -223l269 180l-603 402v-359zM1445 640l193 -129v258z" />
+<glyph unicode="&#xf1cc;" horiz-adv-x="2048" d="M0 407q0 110 55 203t147 147q-12 39 -12 82q0 115 82 196t199 81q95 0 172 -58q75 154 222.5 248t326.5 94q166 0 306 -80.5t221.5 -218.5t81.5 -301q0 -6 -0.5 -18t-0.5 -18q111 -46 179.5 -145.5t68.5 -221.5q0 -164 -118 -280.5t-285 -116.5q-4 0 -11.5 0.5t-10.5 0.5 h-1209h-1h-2h-5q-170 10 -288 125.5t-118 280.5zM468 498q0 -122 84 -193t208 -71q137 0 240 99q-16 20 -47.5 56.5t-43.5 50.5q-67 -65 -144 -65q-55 0 -93.5 33.5t-38.5 87.5q0 53 38.5 87t91.5 34q44 0 84.5 -21t73 -55t65 -75t69 -82t77 -75t97 -55t121.5 -21 q121 0 204.5 71.5t83.5 190.5q0 121 -84 192t-207 71q-143 0 -241 -97q14 -16 29.5 -34t34.5 -40t29 -34q66 64 142 64q52 0 92 -33t40 -84q0 -57 -37 -91.5t-94 -34.5q-43 0 -82.5 21t-72 55t-65.5 75t-69.5 82t-77.5 75t-96.5 55t-118.5 21q-122 0 -207 -70.5t-85 -189.5z " />
+<glyph unicode="&#xf1cd;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM128 640q0 -190 90 -361l194 194q-28 82 -28 167t28 167l-194 194q-90 -171 -90 -361zM512 640 q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM535 -38q171 -90 361 -90t361 90l-194 194q-82 -28 -167 -28t-167 28zM535 1318l194 -194q82 28 167 28t167 -28l194 194q-171 90 -361 90t-361 -90z M1380 473l194 -194q90 171 90 361t-90 361l-194 -194q28 -82 28 -167t-28 -167z" />
+<glyph unicode="&#xf1ce;" horiz-adv-x="1792" d="M0 640q0 222 101 414.5t276.5 317t390.5 155.5v-260q-221 -45 -366.5 -221t-145.5 -406q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5q0 230 -145.5 406t-366.5 221v260q215 -31 390.5 -155.5t276.5 -317t101 -414.5 q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348z" />
+<glyph unicode="&#xf1d0;" horiz-adv-x="1792" d="M19 662q8 217 116 406t305 318h5q0 -1 -1 -3q-8 -8 -28 -33.5t-52 -76.5t-60 -110.5t-44.5 -135.5t-14 -150.5t39 -157.5t108.5 -154q50 -50 102 -69.5t90.5 -11.5t69.5 23.5t47 32.5l16 16q39 51 53 116.5t6.5 122.5t-21 107t-26.5 80l-14 29q-10 25 -30.5 49.5t-43 41 t-43.5 29.5t-35 19l-13 6l104 115q39 -17 78 -52t59 -61l19 -27q1 48 -18.5 103.5t-40.5 87.5l-20 31l161 183l160 -181q-33 -46 -52.5 -102.5t-22.5 -90.5l-4 -33q22 37 61.5 72.5t67.5 52.5l28 17l103 -115q-44 -14 -85 -50t-60 -65l-19 -29q-31 -56 -48 -133.5t-7 -170 t57 -156.5q33 -45 77.5 -60.5t85 -5.5t76 26.5t57.5 33.5l21 16q60 53 96.5 115t48.5 121.5t10 121.5t-18 118t-37 107.5t-45.5 93t-45 72t-34.5 47.5l-13 17q-14 13 -7 13l10 -3q40 -29 62.5 -46t62 -50t64 -58t58.5 -65t55.5 -77t45.5 -88t38 -103t23.5 -117t10.5 -136 q3 -259 -108 -465t-312 -321t-456 -115q-185 0 -351 74t-283.5 198t-184 293t-60.5 353z" />
+<glyph unicode="&#xf1d1;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM44 640q0 -173 67.5 -331t181.5 -272t272 -181.5t331 -67.5t331 67.5t272 181.5t181.5 272t67.5 331 t-67.5 331t-181.5 272t-272 181.5t-331 67.5t-331 -67.5t-272 -181.5t-181.5 -272t-67.5 -331zM87 640q0 205 98 385l57 -33q-30 -56 -49 -112l82 -28q-35 -100 -35 -212q0 -109 36 -212l-83 -28q22 -60 49 -112l-57 -33q-98 180 -98 385zM206 217l58 34q29 -49 73 -99 l65 57q148 -168 368 -212l-17 -86q65 -12 121 -13v-66q-208 6 -385 109.5t-283 275.5zM207 1063q106 172 282 275.5t385 109.5v-66q-65 -2 -121 -13l17 -86q-220 -42 -368 -211l-65 56q-38 -42 -73 -98zM415 805q33 93 99 169l185 -162q59 68 147 86l-48 240q44 10 98 10 t98 -10l-48 -240q88 -18 147 -86l185 162q66 -76 99 -169l-233 -80q14 -42 14 -85t-14 -85l232 -80q-31 -92 -98 -169l-185 162q-57 -67 -147 -85l48 -241q-52 -10 -98 -10t-98 10l48 241q-90 18 -147 85l-185 -162q-67 77 -98 169l232 80q-14 42 -14 85t14 85zM918 -102 q56 1 121 13l-17 86q220 44 368 212l65 -57q44 50 73 99l58 -34q-106 -172 -283 -275.5t-385 -109.5v66zM918 1382v66q209 -6 385 -109.5t282 -275.5l-57 -33q-35 56 -73 98l-65 -56q-148 169 -368 211l17 86q-56 11 -121 13zM1516 428q36 103 36 212q0 112 -35 212l82 28 q-19 56 -49 112l57 33q98 -180 98 -385t-98 -385l-57 33q27 52 49 112z" />
+<glyph unicode="&#xf1d2;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 218q0 -45 20 -78.5t54 -51t72 -25.5t81 -8q224 0 224 188q0 67 -48 99t-126 46q-27 5 -51.5 20.5 t-24.5 39.5q0 44 49 52q77 15 122 70t45 134q0 24 -10 52q37 9 49 13v125q-78 -29 -135 -29q-50 29 -110 29q-86 0 -145 -57t-59 -143q0 -50 29.5 -102t73.5 -67v-3q-38 -17 -38 -85q0 -53 41 -77v-3q-113 -37 -113 -139zM382 225q0 64 98 64q102 0 102 -61q0 -66 -93 -66 q-107 0 -107 63zM395 693q0 90 77 90q36 0 55 -25.5t19 -63.5q0 -85 -74 -85q-77 0 -77 84zM755 1072q0 -36 25 -62.5t60 -26.5t59.5 27t24.5 62q0 36 -24 63.5t-60 27.5t-60.5 -27t-24.5 -64zM771 350h137q-2 27 -2 82v387q0 46 2 69h-137q3 -23 3 -71v-392q0 -50 -3 -75z M966 771q36 3 37 3q3 0 11 -0.5t12 -0.5v-2h-2v-217q0 -37 2.5 -64t11.5 -56.5t24.5 -48.5t43.5 -31t66 -12q64 0 108 24v121q-30 -21 -68 -21q-53 0 -53 82v225h52q9 0 26.5 -1t26.5 -1v117h-105q0 82 3 102h-140q4 -24 4 -55v-47h-60v-117z" />
+<glyph unicode="&#xf1d3;" horiz-adv-x="1792" d="M68 7q0 165 182 225v4q-67 41 -67 126q0 109 63 137v4q-72 24 -119.5 108.5t-47.5 165.5q0 139 95 231.5t235 92.5q96 0 178 -47q98 0 218 47v-202q-36 -12 -79 -22q16 -43 16 -84q0 -127 -73 -216.5t-197 -112.5q-40 -8 -59.5 -27t-19.5 -58q0 -31 22.5 -51.5t58 -32 t78.5 -22t86 -25.5t78.5 -37.5t58 -64t22.5 -98.5q0 -304 -363 -304q-69 0 -130 12.5t-116 41t-87.5 82t-32.5 127.5zM272 18q0 -101 172 -101q151 0 151 105q0 100 -165 100q-158 0 -158 -104zM293 775q0 -135 124 -135q119 0 119 137q0 61 -30 102t-89 41 q-124 0 -124 -145zM875 1389q0 59 39.5 103t98.5 44q58 0 96.5 -44.5t38.5 -102.5t-39 -101.5t-96 -43.5q-58 0 -98 43.5t-40 101.5zM901 220q4 45 4 134v609q0 94 -4 128h222q-4 -33 -4 -124v-613q0 -89 4 -134h-222zM1217 901v190h96v76q0 54 -6 89h227q-6 -41 -6 -165 h171v-190q-15 0 -43.5 2t-42.5 2h-85v-365q0 -131 87 -131q61 0 109 33v-196q-71 -39 -174 -39q-62 0 -107 20t-70 50t-39.5 78t-18.5 92t-4 103v351h2v4q-7 0 -19 1t-18 1q-21 0 -59 -6z" />
+<glyph unicode="&#xf1d4;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM368 1135l323 -589v-435h134v436l343 588h-150q-21 -39 -63.5 -118.5t-68 -128.5t-59.5 -118.5t-60 -128.5h-3 q-21 48 -44.5 97t-52 105.5t-46.5 92t-54 104.5t-49 95h-150z" />
+<glyph unicode="&#xf1d5;" horiz-adv-x="1280" d="M57 953q0 119 46.5 227t124.5 186t186 124t226 46q158 0 292.5 -78t212.5 -212.5t78 -292.5t-78 -292t-212.5 -212t-292.5 -78q-64 0 -131 14q-21 5 -32.5 23.5t-6.5 39.5q5 20 23 31.5t39 7.5q51 -13 108 -13q97 0 186 38t153 102t102 153t38 186t-38 186t-102 153 t-153 102t-186 38t-186 -38t-153 -102t-102 -153t-38 -186q0 -114 52 -218q10 -20 3.5 -40t-25.5 -30t-39.5 -3t-30.5 26q-64 123 -64 265zM113.5 38.5q10.5 121.5 29.5 217t54 186t69 155.5t74 125q61 90 132 165q-16 35 -16 77q0 80 56.5 136.5t136.5 56.5t136.5 -56.5 t56.5 -136.5t-57 -136.5t-136 -56.5q-60 0 -111 35q-62 -67 -115 -146q-247 -371 -202 -859q1 -22 -12.5 -38.5t-34.5 -18.5h-5q-20 0 -35 13.5t-17 33.5q-14 126 -3.5 247.5z" />
+<glyph unicode="&#xf1d6;" horiz-adv-x="1792" d="M18 264q0 275 252 466q-8 19 -8 52q0 20 11 49t24 45q-1 22 7.5 53t22.5 43q0 139 92.5 288.5t217.5 209.5q139 66 324 66q133 0 266 -55q49 -21 90 -48t71 -56t55 -68t42 -74t32.5 -84.5t25.5 -89.5t22 -98l1 -5q55 -83 55 -150q0 -14 -9 -40t-9 -38q0 -1 1.5 -3.5 t3.5 -5t2 -3.5q77 -114 120.5 -214.5t43.5 -208.5q0 -43 -19.5 -100t-55.5 -57q-9 0 -19.5 7.5t-19 17.5t-19 26t-16 26.5t-13.5 26t-9 17.5q-1 1 -3 1l-5 -4q-59 -154 -132 -223q20 -20 61.5 -38.5t69 -41.5t35.5 -65q-2 -4 -4 -16t-7 -18q-64 -97 -302 -97q-53 0 -110.5 9 t-98 20t-104.5 30q-15 5 -23 7q-14 4 -46 4.5t-40 1.5q-41 -45 -127.5 -65t-168.5 -20q-35 0 -69 1.5t-93 9t-101 20.5t-74.5 40t-32.5 64q0 40 10 59.5t41 48.5q11 2 40.5 13t49.5 12q4 0 14 2q2 2 2 4l-2 3q-48 11 -108 105.5t-73 156.5l-5 3q-4 0 -12 -20 q-18 -41 -54.5 -74.5t-77.5 -37.5h-1q-4 0 -6 4.5t-5 5.5q-23 54 -23 100z" />
+<glyph unicode="&#xf1d7;" horiz-adv-x="2048" d="M0 858q0 169 97.5 311t264 223.5t363.5 81.5q176 0 332.5 -66t262 -182.5t136.5 -260.5q-31 4 -70 4q-169 0 -311 -77t-223.5 -208.5t-81.5 -287.5q0 -78 23 -152q-35 -3 -68 -3q-26 0 -50 1.5t-55 6.5t-44.5 7t-54.5 10.5t-50 10.5l-253 -127l72 218q-290 203 -290 490z M380 1075q0 -39 33 -64.5t76 -25.5q41 0 66 24.5t25 65.5t-25 66t-66 25q-43 0 -76 -25.5t-33 -65.5zM816 404q0 143 81.5 264t223.5 191.5t311 70.5q161 0 303 -70.5t227.5 -192t85.5 -263.5q0 -117 -68.5 -223.5t-185.5 -193.5l55 -181l-199 109q-150 -37 -218 -37 q-169 0 -311 70.5t-223.5 191.5t-81.5 264zM888 1075q0 -39 33 -64.5t76 -25.5q41 0 65.5 24.5t24.5 65.5t-24.5 66t-65.5 25q-43 0 -76 -25.5t-33 -65.5zM1160 568q0 -28 22.5 -50.5t49.5 -22.5q40 0 65.5 22t25.5 51q0 28 -25.5 50t-65.5 22q-27 0 -49.5 -22.5 t-22.5 -49.5zM1559 568q0 -28 22.5 -50.5t49.5 -22.5q39 0 65 22t26 51q0 28 -26 50t-65 22q-27 0 -49.5 -22.5t-22.5 -49.5z" />
+<glyph unicode="&#xf1d8;" horiz-adv-x="1792" d="M0 508q-2 40 32 59l1664 960q15 9 32 9q20 0 36 -11q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-453 185l-242 -295q-18 -23 -49 -23q-13 0 -22 4q-19 7 -30.5 23.5t-11.5 36.5v349l864 1059l-1069 -925l-395 162q-37 14 -40 55z" />
+<glyph unicode="&#xf1d9;" horiz-adv-x="1792" d="M0 508q-3 39 32 59l1664 960q35 21 68 -2q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-527 215l-298 -327q-18 -21 -47 -21q-14 0 -23 4q-19 7 -30 23.5t-11 36.5v452l-472 193q-37 14 -40 55zM209 522l336 -137l863 639l-478 -797l492 -201 l221 1323z" />
+<glyph unicode="&#xf1da;" d="M0 832v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298t-61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12 q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45zM512 480v64q0 14 9 23t23 9h224v352 q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf1db;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5z" />
+<glyph unicode="&#xf1dc;" horiz-adv-x="1792" d="M62 1338q0 26 12 48t36 22q46 0 138.5 -3.5t138.5 -3.5q42 0 126.5 3.5t126.5 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17 -43.5t-38.5 -14.5t-49.5 -4t-43 -13q-35 -21 -35 -160l1 -320q0 -21 1 -32q13 -3 39 -3h699q25 0 38 3q1 11 1 32l1 320q0 139 -35 160 q-18 11 -58.5 12.5t-66 13t-25.5 49.5q0 26 12.5 48t37.5 22q44 0 132 -3.5t132 -3.5q43 0 129 3.5t129 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17.5 -44t-40 -14.5t-51.5 -3t-44 -12.5q-35 -23 -35 -161l1 -943q0 -119 34 -140q16 -10 46 -13.5t53.5 -4.5t41.5 -15.5t18 -44.5 q0 -26 -12 -48t-36 -22q-44 0 -132.5 3.5t-133.5 3.5q-44 0 -132 -3.5t-132 -3.5q-24 0 -37 20.5t-13 45.5q0 31 17 46t39 17t51 7t45 15q33 21 33 140l-1 391q0 21 -1 31q-13 4 -50 4h-675q-38 0 -51 -4q-1 -10 -1 -31l-1 -371q0 -142 37 -164q16 -10 48 -13t57 -3.5 t45 -15t20 -45.5q0 -26 -12.5 -48t-36.5 -22q-47 0 -139.5 3.5t-138.5 3.5q-43 0 -128 -3.5t-127 -3.5q-23 0 -35.5 21t-12.5 45q0 30 15.5 45t36 17.5t47.5 7.5t42 15q33 23 33 143l-1 57v813q0 3 0.5 26t0 36.5t-1.5 38.5t-3.5 42t-6.5 36.5t-11 31.5t-16 18 q-15 10 -45 12t-53 2t-41 14t-18 45z" />
+<glyph unicode="&#xf1dd;" horiz-adv-x="1280" d="M24 926q0 166 88 286q88 118 209 159q111 37 417 37h479q25 0 43 -18t18 -43v-73q0 -29 -18.5 -61t-42.5 -32q-50 0 -54 -1q-26 -6 -32 -31q-3 -11 -3 -64v-1152q0 -25 -18 -43t-43 -18h-108q-25 0 -43 18t-18 43v1218h-143v-1218q0 -25 -17.5 -43t-43.5 -18h-108 q-26 0 -43.5 18t-17.5 43v496q-147 12 -245 59q-126 58 -192 179q-64 117 -64 259z" />
+<glyph unicode="&#xf1de;" d="M0 736v64q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM128 -96v672h256v-672q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM128 960v416q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-416h-256zM512 224v64q0 40 28 68 t68 28h320q40 0 68 -28t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 64h256v-160q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v160zM640 448v928q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-928h-256zM1024 992v64q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1152 -96v928h256v-928q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1152 1216v160q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-160h-256z" />
+<glyph unicode="&#xf1e0;" d="M0 640q0 133 93.5 226.5t226.5 93.5q126 0 218 -86l360 180q-2 22 -2 34q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5t-93.5 -226.5t-226.5 -93.5q-126 0 -218 86l-360 -180q2 -22 2 -34t-2 -34l360 -180q92 86 218 86q133 0 226.5 -93.5t93.5 -226.5 t-93.5 -226.5t-226.5 -93.5t-226.5 93.5t-93.5 226.5q0 12 2 34l-360 180q-92 -86 -218 -86q-133 0 -226.5 93.5t-93.5 226.5z" />
+<glyph unicode="&#xf1e1;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 640q0 -88 62.5 -150.5t150.5 -62.5q83 0 145 57l241 -120q-2 -16 -2 -23q0 -88 63 -150.5t151 -62.5 t150.5 62.5t62.5 150.5t-62.5 151t-150.5 63q-84 0 -145 -58l-241 120q2 16 2 23t-2 23l241 120q61 -58 145 -58q88 0 150.5 63t62.5 151t-62.5 150.5t-150.5 62.5t-151 -62.5t-63 -150.5q0 -7 2 -23l-241 -120q-62 57 -145 57q-88 0 -150.5 -62.5t-62.5 -150.5z" />
+<glyph unicode="&#xf1e2;" horiz-adv-x="1792" d="M0 448q0 143 55.5 273.5t150 225t225 150t273.5 55.5q182 0 343 -89l64 64q19 19 45.5 19t45.5 -19l68 -68l243 244l46 -46l-244 -243l68 -68q19 -19 19 -45.5t-19 -45.5l-64 -64q89 -161 89 -343q0 -143 -55.5 -273.5t-150 -225t-225 -150t-273.5 -55.5t-273.5 55.5 t-225 150t-150 225t-55.5 273.5zM170 615q10 -24 35 -34q13 -5 24 -5q42 0 60 40q34 84 98.5 148.5t148.5 98.5q25 11 35 35t0 49t-34 35t-49 0q-108 -44 -191 -127t-127 -191q-10 -25 0 -49zM1376 1472q0 13 9 23q10 9 23 9t23 -9l90 -91q10 -9 10 -22.5t-10 -22.5 q-10 -10 -22 -10q-13 0 -23 10l-91 90q-9 10 -9 23zM1536 1408v96q0 14 9 23t23 9t23 -9t9 -23v-96q0 -14 -9 -23t-23 -9t-23 9t-9 23zM1605 1242.5q0 13.5 10 22.5q9 10 22.5 10t22.5 -10l91 -90q9 -10 9 -23t-9 -23q-11 -9 -23 -9t-23 9l-90 91q-10 9 -10 22.5z M1605 1381.5q0 13.5 10 22.5l90 91q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-91 -90q-10 -10 -22 -10q-13 0 -23 10q-10 9 -10 22.5zM1632 1312q0 14 9 23t23 9h96q14 0 23 -9t9 -23t-9 -23t-23 -9h-96q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf1e3;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e4;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e5;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e6;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e7;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e8;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e9;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ea;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1eb;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ec;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ed;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ee;" horiz-adv-x="1792" />
+<glyph unicode="&#xf500;" horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+</font>
+</defs></svg> 
\ No newline at end of file
diff --git a/doc/fonts/fontawesome-webfont.ttf b/doc/fonts/fontawesome-webfont.ttf
new file mode 100755 (executable)
index 0000000..5cd6cff
Binary files /dev/null and b/doc/fonts/fontawesome-webfont.ttf differ
diff --git a/doc/fonts/fontawesome-webfont.woff b/doc/fonts/fontawesome-webfont.woff
new file mode 100755 (executable)
index 0000000..9eaecb3
Binary files /dev/null and b/doc/fonts/fontawesome-webfont.woff differ
diff --git a/doc/fonts/glyphicons-halflings-regular.eot b/doc/fonts/glyphicons-halflings-regular.eot
new file mode 100644 (file)
index 0000000..423bd5d
Binary files /dev/null and b/doc/fonts/glyphicons-halflings-regular.eot differ
diff --git a/doc/fonts/glyphicons-halflings-regular.svg b/doc/fonts/glyphicons-halflings-regular.svg
new file mode 100644 (file)
index 0000000..4469488
--- /dev/null
@@ -0,0 +1,229 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph />
+<glyph />
+<glyph unicode="&#xd;" />
+<glyph unicode=" " />
+<glyph unicode="*" d="M100 500v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259z" />
+<glyph unicode="+" d="M0 400v300h400v400h300v-400h400v-300h-400v-400h-300v400h-400z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#x2000;" horiz-adv-x="652" />
+<glyph unicode="&#x2001;" horiz-adv-x="1304" />
+<glyph unicode="&#x2002;" horiz-adv-x="652" />
+<glyph unicode="&#x2003;" horiz-adv-x="1304" />
+<glyph unicode="&#x2004;" horiz-adv-x="434" />
+<glyph unicode="&#x2005;" horiz-adv-x="326" />
+<glyph unicode="&#x2006;" horiz-adv-x="217" />
+<glyph unicode="&#x2007;" horiz-adv-x="217" />
+<glyph unicode="&#x2008;" horiz-adv-x="163" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="326" />
+<glyph unicode="&#x20ac;" d="M100 500l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406l-100 -100 h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217z" />
+<glyph unicode="&#x2212;" d="M200 400h900v300h-900v-300z" />
+<glyph unicode="&#x2601;" d="M-14 494q0 -80 56.5 -137t135.5 -57h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5z" />
+<glyph unicode="&#x2709;" d="M0 100l400 400l200 -200l200 200l400 -400h-1200zM0 300v600l300 -300zM0 1100l600 -603l600 603h-1200zM900 600l300 300v-600z" />
+<glyph unicode="&#x270f;" d="M-13 -13l333 112l-223 223zM187 403l214 -214l614 614l-214 214zM887 1103l214 -214l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13z" />
+<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xe001;" d="M0 1200h1200l-500 -550v-550h300v-100h-800v100h300v550z" />
+<glyph unicode="&#xe002;" d="M14 84q18 -55 86 -75.5t147 5.5q65 21 109 69t44 90v606l600 155v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7 q-79 -25 -122.5 -82t-25.5 -112z" />
+<glyph unicode="&#xe003;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
+<glyph unicode="&#xe005;" d="M100 784q0 64 28 123t73 100.5t104.5 64t119 20.5t120 -38.5t104.5 -104.5q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5 t-94 124.5t-33.5 117.5z" />
+<glyph unicode="&#xe006;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1z" />
+<glyph unicode="&#xe007;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1zM237 700l196 -142l-73 -226l192 140l195 -141l-74 229l193 140h-235l-77 211l-78 -211h-239z" />
+<glyph unicode="&#xe008;" d="M0 0v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100l400 -257v-143h-1200z" />
+<glyph unicode="&#xe009;" d="M0 0v1100h1200v-1100h-1200zM100 100h100v100h-100v-100zM100 300h100v100h-100v-100zM100 500h100v100h-100v-100zM100 700h100v100h-100v-100zM100 900h100v100h-100v-100zM300 100h600v400h-600v-400zM300 600h600v400h-600v-400zM1000 100h100v100h-100v-100z M1000 300h100v100h-100v-100zM1000 500h100v100h-100v-100zM1000 700h100v100h-100v-100zM1000 900h100v100h-100v-100z" />
+<glyph unicode="&#xe010;" d="M0 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM0 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5zM600 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM600 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe011;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 450v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe012;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5 t-14.5 -35.5v-200zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe013;" d="M29 454l419 -420l818 820l-212 212l-607 -607l-206 207z" />
+<glyph unicode="&#xe014;" d="M106 318l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282l-212 -212l-282 282l-282 -282z" />
+<glyph unicode="&#xe015;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233zM300 600v200h100v100h200v-100h100v-200h-100v-100h-200v100h-100z" />
+<glyph unicode="&#xe016;" d="M23 694q0 200 142 342t342 142t342 -142t142 -342q0 -141 -78 -262l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 601h400v200h-400v-200z" />
+<glyph unicode="&#xe017;" d="M23 600q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5 zM500 750q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400z" />
+<glyph unicode="&#xe018;" d="M100 1h200v300h-200v-300zM400 1v500h200v-500h-200zM700 1v800h200v-800h-200zM1000 1v1200h200v-1200h-200z" />
+<glyph unicode="&#xe019;" d="M26 601q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39l5 -2l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38 l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73zM385 601 q0 88 63 151t152 63t152 -63t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152z" />
+<glyph unicode="&#xe020;" d="M100 1025v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18zM200 100v800h900v-800q0 -41 -29.5 -71t-70.5 -30h-700q-41 0 -70.5 30 t-29.5 71zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM500 1100h300v100h-300v-100zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
+<glyph unicode="&#xe021;" d="M1 601l656 644l644 -644h-200v-600h-300v400h-300v-400h-300v600h-200z" />
+<glyph unicode="&#xe022;" d="M100 25v1150q0 11 7 18t18 7h475v-500h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18zM700 800v300l300 -300h-300z" />
+<glyph unicode="&#xe023;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 500v400h100 v-300h200v-100h-300z" />
+<glyph unicode="&#xe024;" d="M-100 0l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538l-41 400h-242l-40 -400h-539zM488 500h224l-27 300h-170z" />
+<glyph unicode="&#xe025;" d="M0 0v400h490l-290 300h200v500h300v-500h200l-290 -300h490v-400h-1100zM813 200h175v100h-175v-100z" />
+<glyph unicode="&#xe026;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM188 600q0 -170 121 -291t291 -121t291 121t121 291t-121 291t-291 121 t-291 -121t-121 -291zM350 600h150v300h200v-300h150l-250 -300z" />
+<glyph unicode="&#xe027;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM350 600l250 300 l250 -300h-150v-300h-200v300h-150z" />
+<glyph unicode="&#xe028;" d="M0 25v475l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18zM200 500h200l50 -200h300l50 200h200l-97 500h-606z" />
+<glyph unicode="&#xe029;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 397v401 l297 -200z" />
+<glyph unicode="&#xe030;" d="M23 600q0 -118 45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123t123 184t45.5 224.5h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123 t-123 -184t-45.5 -224.5z" />
+<glyph unicode="&#xe031;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150zM100 0v400h400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122z" />
+<glyph unicode="&#xe032;" d="M100 0h1100v1200h-1100v-1200zM200 100v900h900v-900h-900zM300 200v100h100v-100h-100zM300 400v100h100v-100h-100zM300 600v100h100v-100h-100zM300 800v100h100v-100h-100zM500 200h500v100h-500v-100zM500 400v100h500v-100h-500zM500 600v100h500v-100h-500z M500 800v100h500v-100h-500z" />
+<glyph unicode="&#xe033;" d="M0 100v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
+<glyph unicode="&#xe034;" d="M100 0v1100h100v-1100h-100zM300 400q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500z" />
+<glyph unicode="&#xe035;" d="M0 275q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5 t-49.5 -227v-300zM200 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14zM800 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14z" />
+<glyph unicode="&#xe036;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM688 459l141 141l-141 141l71 71l141 -141l141 141l71 -71l-141 -141l141 -141l-71 -71l-141 141l-141 -141z" />
+<glyph unicode="&#xe037;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
+<glyph unicode="&#xe038;" d="M0 401v400h300l300 200v-800l-300 200h-300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257zM889 951l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8l81 -66l6 8q142 178 142 405q0 230 -144 408l-6 8z" />
+<glyph unicode="&#xe039;" d="M0 0h500v500h-200v100h-100v-100h-200v-500zM0 600h100v100h400v100h100v100h-100v300h-500v-600zM100 100v300h300v-300h-300zM100 800v300h300v-300h-300zM200 200v100h100v-100h-100zM200 900h100v100h-100v-100zM500 500v100h300v-300h200v-100h-100v-100h-200v100 h-100v100h100v200h-200zM600 0v100h100v-100h-100zM600 1000h100v-300h200v-300h300v200h-200v100h200v500h-600v-200zM800 800v300h300v-300h-300zM900 0v100h300v-100h-300zM900 900v100h100v-100h-100zM1100 200v100h100v-100h-100z" />
+<glyph unicode="&#xe040;" d="M0 200h100v1000h-100v-1000zM100 0v100h300v-100h-300zM200 200v1000h100v-1000h-100zM500 0v91h100v-91h-100zM500 200v1000h200v-1000h-200zM700 0v91h100v-91h-100zM800 200v1000h100v-1000h-100zM900 0v91h200v-91h-200zM1000 200v1000h200v-1000h-200z" />
+<glyph unicode="&#xe041;" d="M1 700v475q0 10 7.5 17.5t17.5 7.5h474l700 -700l-500 -500zM148 953q0 -42 29 -71q30 -30 71.5 -30t71.5 30q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71z" />
+<glyph unicode="&#xe042;" d="M2 700v475q0 11 7 18t18 7h474l700 -700l-500 -500zM148 953q0 -42 30 -71q29 -30 71 -30t71 30q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71zM701 1200h100l700 -700l-500 -500l-50 50l450 450z" />
+<glyph unicode="&#xe043;" d="M100 0v1025l175 175h925v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900z" />
+<glyph unicode="&#xe044;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
+<glyph unicode="&#xe045;" d="M0 100v700h200l100 -200h600l100 200h200v-700h-200v200h-800v-200h-200zM253 829l40 -124h592l62 124l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18zM281 24l38 152q2 10 11.5 17t19.5 7h500q10 0 19.5 -7t11.5 -17l38 -152q2 -10 -3.5 -17t-15.5 -7h-600 q-10 0 -15.5 7t-3.5 17z" />
+<glyph unicode="&#xe046;" d="M0 200q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600z M356 500q0 100 72 172t172 72t172 -72t72 -172t-72 -172t-172 -72t-172 72t-72 172zM494 500q0 -44 31 -75t75 -31t75 31t31 75t-31 75t-75 31t-75 -31t-31 -75zM900 700v100h100v-100h-100z" />
+<glyph unicode="&#xe047;" d="M53 0h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66zM416 521l178 457l46 -140l116 -317h-340 z" />
+<glyph unicode="&#xe048;" d="M100 0v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21t-29 14t-49 14.5v70h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111 t-162 -38.5h-500zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400zM400 700h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5v-379z" />
+<glyph unicode="&#xe049;" d="M200 0v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500z" />
+<glyph unicode="&#xe050;" d="M-75 200h75v800h-75l125 167l125 -167h-75v-800h75l-125 -167zM300 900v300h150h700h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49z " />
+<glyph unicode="&#xe051;" d="M33 51l167 125v-75h800v75l167 -125l-167 -125v75h-800v-75zM100 901v300h150h700h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50z" />
+<glyph unicode="&#xe052;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 350q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM0 650q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 950q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe053;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 650q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM200 350q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM200 950q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe054;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe055;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe056;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe057;" d="M-101 500v100h201v75l166 -125l-166 -125v75h-201zM300 0h100v1100h-100v-1100zM500 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35 v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 650q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100 q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe058;" d="M1 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 650 q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM801 0v1100h100v-1100 h-100zM934 550l167 -125v75h200v100h-200v75z" />
+<glyph unicode="&#xe059;" d="M0 275v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53zM900 600l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M0 44v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31zM100 263l247 182l298 -131l-74 156l293 318l236 -288v500h-1000v-737zM208 750q0 56 39 95t95 39t95 -39t39 -95t-39 -95t-95 -39t-95 39t-39 95z " />
+<glyph unicode="&#xe062;" d="M148 745q0 124 60.5 231.5t165 172t226.5 64.5q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262zM342 772q0 -107 75.5 -182.5t181.5 -75.5 q107 0 182.5 75.5t75.5 182.5t-75.5 182t-182.5 75t-182 -75.5t-75 -181.5z" />
+<glyph unicode="&#xe063;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM173 600q0 -177 125.5 -302t301.5 -125v854q-176 0 -301.5 -125 t-125.5 -302z" />
+<glyph unicode="&#xe064;" d="M117 406q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5zM243 414q14 -82 59.5 -136 t136.5 -80l16 98q-7 6 -18 17t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156z" />
+<glyph unicode="&#xe065;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125l200 200v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM436 341l161 50l412 412l-114 113l-405 -405zM995 1015l113 -113l113 113l-21 85l-92 28z" />
+<glyph unicode="&#xe066;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5 zM423 524q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5z" />
+<glyph unicode="&#xe067;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM342 632l283 -284l566 567l-136 137l-430 -431l-147 147z" />
+<glyph unicode="&#xe068;" d="M0 603l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296l-300 -300v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198z" />
+<glyph unicode="&#xe069;" d="M200 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe070;" d="M0 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe071;" d="M136 550l564 550v-487l500 487v-1100l-500 488v-488z" />
+<glyph unicode="&#xe072;" d="M200 0l900 550l-900 550v-1100z" />
+<glyph unicode="&#xe073;" d="M200 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800zM600 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe074;" d="M200 150q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe075;" d="M0 0v1100l500 -487v487l564 -550l-564 -550v488z" />
+<glyph unicode="&#xe076;" d="M0 0v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488z" />
+<glyph unicode="&#xe077;" d="M300 0v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438z" />
+<glyph unicode="&#xe078;" d="M100 250v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5zM100 500h1100l-550 564z" />
+<glyph unicode="&#xe079;" d="M185 599l592 -592l240 240l-353 353l353 353l-240 240z" />
+<glyph unicode="&#xe080;" d="M272 194l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1l-592 -591z" />
+<glyph unicode="&#xe081;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h200v-200h200v200h200v200h-200v200h-200v-200h-200v-200z" />
+<glyph unicode="&#xe082;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h600v200h-600v-200z" />
+<glyph unicode="&#xe083;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM246 459l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141l-141 142l-212 -213l141 -141z" />
+<glyph unicode="&#xe084;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM270 551l276 -277l411 411l-175 174l-236 -236l-102 102z" />
+<glyph unicode="&#xe085;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM363 700h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26 q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3q-105 0 -172 -56t-67 -183zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe086;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM400 300h400v100h-100v300h-300v-100h100v-200h-100v-100zM500 800h200v100h-200v-100z" />
+<glyph unicode="&#xe087;" d="M0 500v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194zM290 500q24 -73 79.5 -127.5t130.5 -78.5v206h200 v-206q149 48 201 206h-201v200h200q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210z" />
+<glyph unicode="&#xe088;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM356 465l135 135 l-135 135l109 109l135 -135l135 135l109 -109l-135 -135l135 -135l-109 -109l-135 135l-135 -135z" />
+<glyph unicode="&#xe089;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM322 537l141 141 l87 -87l204 205l142 -142l-346 -345z" />
+<glyph unicode="&#xe090;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -115 62 -215l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5zM391 245q97 -59 209 -59q171 0 292.5 121.5t121.5 292.5 q0 112 -59 209z" />
+<glyph unicode="&#xe091;" d="M0 547l600 453v-300h600v-300h-600v-301z" />
+<glyph unicode="&#xe092;" d="M0 400v300h600v300l600 -453l-600 -448v301h-600z" />
+<glyph unicode="&#xe093;" d="M204 600l450 600l444 -600h-298v-600h-300v600h-296z" />
+<glyph unicode="&#xe094;" d="M104 600h296v600h300v-600h298l-449 -600z" />
+<glyph unicode="&#xe095;" d="M0 200q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453l-600 -448v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5z" />
+<glyph unicode="&#xe096;" d="M0 0v400l129 -129l294 294l142 -142l-294 -294l129 -129h-400zM635 777l142 -142l294 294l129 -129v400h-400l129 -129z" />
+<glyph unicode="&#xe097;" d="M34 176l295 295l-129 129h400v-400l-129 130l-295 -295zM600 600v400l129 -129l295 295l142 -141l-295 -295l129 -130h-400z" />
+<glyph unicode="&#xe101;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5t224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5zM456 851l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5 t21.5 34.5l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe102;" d="M0 800h100v-200h400v300h200v-300h400v200h100v100h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100z M100 0h400v400h-400v-400zM200 900q-3 0 14 48t35 96l18 47l214 -191h-281zM700 0v400h400v-400h-400zM731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269z" />
+<glyph unicode="&#xe103;" d="M0 -22v143l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55z M238.5 300.5q19.5 -6.5 86.5 76.5q55 66 367 234q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5z" />
+<glyph unicode="&#xe104;" d="M111 408q0 -33 5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5 t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5q2 -12 8 -41.5t8 -43t6 -39.5 t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85z" />
+<glyph unicode="&#xe105;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30l26 -40l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5 t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30zM120 600q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5t123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54 q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l105 105q-37 24 -75 72t-57 84l-20 36z" />
+<glyph unicode="&#xe106;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43zM120 600q210 -282 393 -336l37 141q-107 18 -178.5 101.5t-71.5 193.5 q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68l-14 26zM780 161l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52l26 -40l-26 -40 q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5t-124 -100t-146.5 -79z" />
+<glyph unicode="&#xe107;" d="M-97.5 34q13.5 -34 50.5 -34h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66zM155 200l445 723l445 -723h-345v100h-200v-100h-345zM500 600l100 -300l100 300v100h-200v-100z" />
+<glyph unicode="&#xe108;" d="M100 262v41q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64 q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5z" />
+<glyph unicode="&#xe109;" d="M0 50q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100v-750zM0 900h1100v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 100v100h100v-100h-100zM100 300v100h100v-100h-100z M100 500v100h100v-100h-100zM300 100v100h100v-100h-100zM300 300v100h100v-100h-100zM300 500v100h100v-100h-100zM500 100v100h100v-100h-100zM500 300v100h100v-100h-100zM500 500v100h100v-100h-100zM700 100v100h100v-100h-100zM700 300v100h100v-100h-100zM700 500 v100h100v-100h-100zM900 100v100h100v-100h-100zM900 300v100h100v-100h-100zM900 500v100h100v-100h-100z" />
+<glyph unicode="&#xe110;" d="M0 200v200h259l600 600h241v198l300 -295l-300 -300v197h-159l-600 -600h-341zM0 800h259l122 -122l141 142l-181 180h-341v-200zM678 381l141 142l122 -123h159v198l300 -295l-300 -300v197h-241z" />
+<glyph unicode="&#xe111;" d="M0 400v600q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5z" />
+<glyph unicode="&#xe112;" d="M100 600v200h300v-250q0 -113 6 -145q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5 t-58 109.5t-31.5 116t-15 104t-3 83zM100 900v300h300v-300h-300zM800 900v300h300v-300h-300z" />
+<glyph unicode="&#xe113;" d="M-30 411l227 -227l352 353l353 -353l226 227l-578 579z" />
+<glyph unicode="&#xe114;" d="M70 797l580 -579l578 579l-226 227l-353 -353l-352 353z" />
+<glyph unicode="&#xe115;" d="M-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196zM402 1000l215 -200h381v-400h-198l299 -283l299 283h-200v600h-796z" />
+<glyph unicode="&#xe116;" d="M18 939q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15 t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43z" />
+<glyph unicode="&#xe117;" d="M0 0v800h1200v-800h-1200zM0 900v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-100h-1200z" />
+<glyph unicode="&#xe118;" d="M1 0l300 700h1200l-300 -700h-1200zM1 400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000z" />
+<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
+<glyph unicode="&#xe120;" d="M0 600l300 298v-198h600v198l300 -298l-300 -297v197h-600v-197z" />
+<glyph unicode="&#xe121;" d="M0 100v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM800 100h100v100h-100v-100z M1000 100h100v100h-100v-100z" />
+<glyph unicode="&#xe122;" d="M-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5zM99 500v250v5q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351z M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe123;" d="M74 350q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37 t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5zM497 110q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6t-103 6z" />
+<glyph unicode="&#xe124;" d="M21 445l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180l-155 180l-45 -233l-224 78l78 -225l-233 -44l179 -156z" />
+<glyph unicode="&#xe125;" d="M0 200h200v600h-200v-600zM300 275q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400z M400 300v375l150 212l100 213h50v-175l-50 -225h450v-125l-250 -375h-214l-136 100h-100z" />
+<glyph unicode="&#xe126;" d="M0 400v600h200v-600h-200zM300 525v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5h-50q-27 0 -51 20t-38 48l-96 198l-145 196 q-20 26 -20 63zM400 525l150 -212l100 -213h50v175l-50 225h450v125l-250 375h-214l-136 -100h-100v-375z" />
+<glyph unicode="&#xe127;" d="M8 200v600h200v-600h-200zM308 275v525q0 17 14 35.5t28 28.5l14 9l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341 q-7 0 -90 81t-83 94zM408 289l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83l-339 -236v-503z" />
+<glyph unicode="&#xe128;" d="M-101 651q0 72 54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111zM-1 601h222 q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237l-87 -83l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100zM999 201v600h200v-600h-200z" />
+<glyph unicode="&#xe129;" d="M97 719l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53zM172 739l83 86l183 -146 q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294l-89 -100h-503zM400 0v200h600v-200h-600z" />
+<glyph unicode="&#xe130;" d="M1 585q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15zM76 565l237 339h503l89 -100v-294l-340 -130 q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146zM305 1104v200h600v-200h-600z" />
+<glyph unicode="&#xe131;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 500h300l-2 -194l402 294l-402 298v-197h-298v-201z" />
+<glyph unicode="&#xe132;" d="M0 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5zM200 600l400 -294v194h302v201h-300v197z" />
+<glyph unicode="&#xe133;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600h200v-300h200v300h200l-300 400z" />
+<glyph unicode="&#xe134;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600l300 -400l300 400h-200v300h-200v-300h-200z" />
+<glyph unicode="&#xe135;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM254 780q-8 -34 5.5 -93t7.5 -87q0 -9 17 -44t16 -60q12 0 23 -5.5 t23 -15t20 -13.5q20 -10 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55.5t-20 -57.5q12 -21 22.5 -34.5t28 -27t36.5 -17.5q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q101 -2 221 111q31 30 47 48t34 49t21 62q-14 9 -37.5 9.5t-35.5 7.5q-14 7 -49 15t-52 19 q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q8 16 22 22q6 -1 26 -1.5t33.5 -4.5t19.5 -13q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5 t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23 q-19 -3 -37 0q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6q-15 -3 -46 0t-45 -3q-20 -6 -51.5 -25.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79zM518 915q3 12 16 30.5t16 25.5q10 -10 18.5 -10t14 6t14.5 14.5t16 12.5q0 -18 8 -42.5t16.5 -44 t9.5 -23.5q-6 1 -39 5t-53.5 10t-36.5 16z" />
+<glyph unicode="&#xe136;" d="M0 164.5q0 21.5 15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138l145 -232l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M0 196v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 596v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5zM0 996v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM600 596h500v100h-500v-100zM800 196h300v100h-300v-100zM900 996h200v100h-200v-100z" />
+<glyph unicode="&#xe138;" d="M100 1100v100h1000v-100h-1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
+<glyph unicode="&#xe139;" d="M0 200v200h1200v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500z M500 1000h200v100h-200v-100z" />
+<glyph unicode="&#xe140;" d="M0 0v400l129 -129l200 200l142 -142l-200 -200l129 -129h-400zM0 800l129 129l200 -200l142 142l-200 200l129 129h-400v-400zM729 329l142 142l200 -200l129 129v-400h-400l129 129zM729 871l200 200l-129 129h400v-400l-129 129l-200 -200z" />
+<glyph unicode="&#xe141;" d="M0 596q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 596q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM291 655 q0 23 15.5 38.5t38.5 15.5t39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39zM400 850q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5zM513 609q0 32 21 56.5t52 29.5l122 126l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5 q22 0 38 -16t16 -39t-16 -39t-38 -16q-16 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5q-37 0 -62.5 25.5t-25.5 61.5zM800 655q0 22 16 38t39 16t38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39z" />
+<glyph unicode="&#xe142;" d="M-40 375q-13 -95 35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36 q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256l7 -7l69 -60l517 511 q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163z" />
+<glyph unicode="&#xe143;" d="M79 784q0 131 99 229.5t230 98.5q144 0 242 -129q103 129 245 129q130 0 227 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-197 -191 -293 -322l-17 -23l-16 23q-43 58 -100 122.5t-92 99.5t-101 100l-84.5 84.5t-68 74t-60 78t-33.5 70.5t-15 78z M250 784q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203l12 12q64 62 97.5 97t64.5 79t31 72q0 71 -48 119.5t-106 48.5q-73 0 -131 -83l-118 -171l-114 174q-51 80 -124 80q-59 0 -108.5 -49.5t-49.5 -118.5z" />
+<glyph unicode="&#xe144;" d="M57 353q0 -94 66 -160l141 -141q66 -66 159 -66q95 0 159 66l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159zM269 706q0 -93 66 -159l141 -141l19 -17l105 105 l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159z" />
+<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM300 300h600v700h-600v-700zM496 150q0 -43 30.5 -73.5t73.5 -30.5t73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5 t-73.5 -30.5t-30.5 -73.5z" />
+<glyph unicode="&#xe146;" d="M0 0l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207z" />
+<glyph unicode="&#xe148;" d="M295 433h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5v-307l64 -14 q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5zM466 889q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3v274q-61 -8 -97.5 -37.5t-36.5 -102.5zM700 237 q170 18 170 151q0 64 -44 99.5t-126 60.5v-311z" />
+<glyph unicode="&#xe149;" d="M100 600v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5 t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10 t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221z" />
+<glyph unicode="&#xe150;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM602 900l298 300l298 -300h-198v-900h-200v900h-198z" />
+<glyph unicode="&#xe151;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v200h100v-100h200v-100h-300zM700 400v100h300v-200h-99v-100h-100v100h99v100h-200zM700 700v500h300v-500h-100v100h-100v-100h-100zM801 900h100v200h-100v-200z" />
+<glyph unicode="&#xe152;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v500h300v-500h-100v100h-100v-100h-100zM700 700v200h100v-100h200v-100h-300zM700 1100v100h300v-200h-99v-100h-100v100h99v100h-200zM801 200h100v200h-100v-200z" />
+<glyph unicode="&#xe153;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 100v400h300v-500h-100v100h-200zM800 1100v100h200v-500h-100v400h-100zM901 200h100v200h-100v-200z" />
+<glyph unicode="&#xe154;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 400v100h200v-500h-100v400h-100zM800 800v400h300v-500h-100v100h-200zM901 900h100v200h-100v-200z" />
+<glyph unicode="&#xe155;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h500v-200h-500zM700 400v200h400v-200h-400zM700 700v200h300v-200h-300zM700 1000v200h200v-200h-200z" />
+<glyph unicode="&#xe156;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h200v-200h-200zM700 400v200h300v-200h-300zM700 700v200h400v-200h-400zM700 1000v200h500v-200h-500z" />
+<glyph unicode="&#xe157;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500z" />
+<glyph unicode="&#xe158;" d="M0 400v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-163 0 -281.5 117.5t-118.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM400 300l333 250l-333 250v-500z" />
+<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 700l250 -333l250 333h-500z" />
+<glyph unicode="&#xe160;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 400h500l-250 333z" />
+<glyph unicode="&#xe161;" d="M0 400v300h300v200l400 -350l-400 -350v200h-300zM500 0v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400z" />
+<glyph unicode="&#xe162;" d="M216 519q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32l9 -8l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40z" />
+<glyph unicode="&#xe163;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300zM600 400v300h300v200l400 -350l-400 -350v200h-300z " />
+<glyph unicode="&#xe164;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98l-78 73l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5 v-300zM496 709l353 342l-149 149h500v-500l-149 149l-342 -353z" />
+<glyph unicode="&#xe165;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM406 600 q0 80 57 137t137 57t137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137z" />
+<glyph unicode="&#xe166;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 800l445 -500l450 500h-295v400h-300v-400h-300zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe167;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 700h300v-300h300v300h295l-445 500zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe168;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 705l305 -305l596 596l-154 155l-442 -442l-150 151zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe169;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 988l97 -98l212 213l-97 97zM200 401h700v699l-250 -239l-149 149l-212 -212l149 -149zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe170;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM200 612l212 -212l98 97l-213 212zM300 1200l239 -250l-149 -149l212 -212l149 148l248 -237v700h-699zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe171;" d="M23 415l1177 784v-1079l-475 272l-310 -393v416h-392zM494 210l672 938l-672 -712v-226z" />
+<glyph unicode="&#xe172;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe173;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120l-126 -127h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM581 306l123 123l120 -120l353 352l123 -123l-475 -476zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe174;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170l-298 -298h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200zM700 133l170 170l-170 170l127 127l170 -170l170 170l127 -128l-170 -169l170 -170 l-127 -127l-170 170l-170 -170z" />
+<glyph unicode="&#xe175;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300l300 -300l300 300h-200v300h-200v-300h-200zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe176;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200l-298 -298h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300h200v-300h200v300h200l-300 300zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe177;" d="M0 250q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200v-550zM0 900h1200v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 300v200h400v-200h-400z" />
+<glyph unicode="&#xe178;" d="M0 400l300 298v-198h400v-200h-400v-198zM100 800v200h100v-200h-100zM300 800v200h100v-200h-100zM500 800v200h400v198l300 -298l-300 -298v198h-400zM800 300v200h100v-200h-100zM1000 300h100v200h-100v-200z" />
+<glyph unicode="&#xe179;" d="M100 700v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300l50 100l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447zM800 597q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5 t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359z" />
+<glyph unicode="&#xe180;" d="M100 0h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5 t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56z" />
+<glyph unicode="&#xe181;" d="M0 300q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM100 100h400l200 200h105l295 98v-298h-425l-100 -100h-375zM100 300v200h300v-200h-300zM100 600v200h300v-200h-300z M100 1000h400l200 -200v-98l295 98h105v200h-425l-100 100h-375zM700 402v163l400 133v-163z" />
+<glyph unicode="&#xe182;" d="M16.5 974.5q0.5 -21.5 16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118q17 17 20 41.5 t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14l-162 -162q-1 -11 -0.5 -32.5z" />
+<glyph unicode="&#xe183;" d="M0 50v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5t30 -27.5t12 -24l1 -10v-50l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5zM0 712 q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40 t-53.5 -36.5t-31 -27.5l-9 -10v-200z" />
+<glyph unicode="&#xe184;" d="M100 0v100h1100v-100h-1100zM175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250z" />
+<glyph unicode="&#xe185;" d="M100 0h300v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400zM500 0v1000q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300zM900 0v700q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300z" />
+<glyph unicode="&#xe186;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe187;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h100v200h100v-200h100v500h-100v-200h-100v200h-100v-500zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe188;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v100h-200v300h200v100h-300v-500zM600 300h300v100h-200v300h200v100h-300v-500z" />
+<glyph unicode="&#xe189;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 550l300 -150v300zM600 400l300 150l-300 150v-300z" />
+<glyph unicode="&#xe190;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300v500h700v-500h-700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM575 549 q0 -65 27 -107t68 -42h130v300h-130q-38 0 -66.5 -43t-28.5 -108z" />
+<glyph unicode="&#xe191;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe192;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v400h-200v100h-100v-500zM301 400v200h100v-200h-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe193;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 700v100h300v-300h-99v-100h-100v100h99v200h-200zM201 300v100h100v-100h-100zM601 300v100h100v-100h-100z M700 700v100h200v-500h-100v400h-100z" />
+<glyph unicode="&#xe194;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 500v200 l100 100h300v-100h-300v-200h300v-100h-300z" />
+<glyph unicode="&#xe195;" d="M0 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 400v400h300 l100 -100v-100h-100v100h-200v-100h200v-100h-200v-100h-100zM700 400v100h100v-100h-100z" />
+<glyph unicode="&#xe197;" d="M-14 494q0 -80 56.5 -137t135.5 -57h222v300h400v-300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200h200v300h200v-300 h200l-300 -300z" />
+<glyph unicode="&#xe198;" d="M-14 494q0 -80 56.5 -137t135.5 -57h8l414 414l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200l300 300 l300 -300h-200v-300h-200v300h-200z" />
+<glyph unicode="&#xe199;" d="M100 200h400v-155l-75 -45h350l-75 45v155h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170z" />
+<glyph unicode="&#xe200;" d="M121 700q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350l-75 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5 t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5z" />
+</font>
+</defs></svg> 
\ No newline at end of file
diff --git a/doc/fonts/glyphicons-halflings-regular.ttf b/doc/fonts/glyphicons-halflings-regular.ttf
new file mode 100644 (file)
index 0000000..a498ef4
Binary files /dev/null and b/doc/fonts/glyphicons-halflings-regular.ttf differ
diff --git a/doc/fonts/glyphicons-halflings-regular.woff b/doc/fonts/glyphicons-halflings-regular.woff
new file mode 100644 (file)
index 0000000..d83c539
Binary files /dev/null and b/doc/fonts/glyphicons-halflings-regular.woff differ
diff --git a/doc/gen_api_method_docs.py b/doc/gen_api_method_docs.py
new file mode 100755 (executable)
index 0000000..d2a743b
--- /dev/null
@@ -0,0 +1,130 @@
+#! /usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# gen_api_method_docs.py
+#
+# Generate docs for Arvados methods.
+#
+# This script will retrieve the discovery document at
+# https://localhost:9900/discovery/v1/apis/arvados/v1/rest
+# and will generate Textile documentation files in the current
+# directory.
+
+import argparse
+import pprint
+import re
+import requests
+import os
+import sys #debugging
+
+p = argparse.ArgumentParser(description='Generate Arvados API method documentation.')
+
+p.add_argument('--host',
+               type=str,
+               default='localhost',
+               help="The hostname or IP address of the API server")
+
+p.add_argument('--port',
+               type=int,
+               default=9900,
+               help="The port of the API server")
+
+p.add_argument('--output-dir',
+               type=str,
+               default='.',
+               help="Directory in which to write output files.")
+
+args = p.parse_args()
+
+api_url = 'https://{host}:{port}/discovery/v1/apis/arvados/v1/rest'.format(**vars(args))
+
+r = requests.get(api_url, verify=False)
+if r.status_code != 200:
+    raise Exception('Bad status code %d: %s' % (r.status_code, r.text))
+
+if 'application/json' not in r.headers.get('content-type', ''):
+    raise Exception('Unexpected content type: %s: %s' %
+                    (r.headers.get('content-type', ''), r.text))
+
+api = r.json()
+
+resource_num = 0
+for resource in sorted(api[u'resources']):
+    resource_num = resource_num + 1
+    out_fname = os.path.join(args.output_dir, resource + '.textile')
+    if os.path.exists(out_fname):
+        backup_name = out_fname + '.old'
+        try:
+            os.rename(out_fname, backup_name)
+        except OSError as e:
+            print "WARNING: could not back up {1} as {2}: {3}".format(
+                out_fname, backup_name, e)
+    outf = open(out_fname, 'w')
+    outf.write(
+"""---
+navsection: api
+navmenu: API Methods
+title: "{resource}"
+navorder: {resource_num}
+---
+
+h1. {resource}
+
+Required arguments are displayed in %{{background:#ccffcc}}green%.
+
+""".format(resource_num=resource_num, resource=resource))
+
+    methods = api['resources'][resource]['methods']
+    for method in sorted(methods.keys()):
+        methodinfo = methods[method]
+        outf.write(
+"""
+h2. {method}
+
+{description}
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+""".format(
+    method=method, description=methodinfo['description']))
+
+        required = []
+        notrequired = []
+        for param, paraminfo in methodinfo['parameters'].iteritems():
+            paraminfo.setdefault(u'description', '')
+            paraminfo.setdefault(u'location', '')
+            limit = ''
+            if paraminfo.get('minimum', '') or paraminfo.get('maximum', ''):
+                limit = "range {0}-{1}".format(
+                    paraminfo.get('minimum', ''),
+                    paraminfo.get('maximum', 'unlimited'))
+            if paraminfo.get('default', ''):
+                if limit:
+                    limit = limit + '; '
+                limit = limit + 'default %d' % paraminfo['default']
+            if limit:
+                paraminfo['type'] = '{0} ({1})'.format(
+                    paraminfo['type'], limit)
+
+            row = "|{param}|{type}|{description}|{location}||\n".format(
+                param=param, **paraminfo)
+            if paraminfo.get('required', False):
+                required.append(row)
+            else:
+                notrequired.append(row)
+
+        for row in sorted(required):
+            outf.write("{background:#ccffcc}." + row)
+        for row in sorted(notrequired):
+            outf.write(row)
+
+        # pprint.pprint(methodinfo)
+
+    outf.close()
+    print "wrote ", out_fname
+
+
diff --git a/doc/gen_api_schema_docs.py b/doc/gen_api_schema_docs.py
new file mode 100755 (executable)
index 0000000..3c3ab2e
--- /dev/null
@@ -0,0 +1,79 @@
+#! /usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# gen_api_schema_docs.py
+#
+# Generate Textile documentation pages for Arvados schema resources.
+
+import requests
+import re
+import os
+
+r = requests.get('https://localhost:9900/arvados/v1/schema',
+                 verify=False)
+if r.status_code != 200:
+    raise Exception('Bad status code %d: %s' % (r.status_code, r.text))
+
+if 'application/json' not in r.headers.get('content-type', ''):
+    raise Exception('Unexpected content type: %s: %s' %
+                    (r.headers.get('content-type', ''), r.text))
+
+schema = r.json()
+navorder = 0
+for resource in sorted(schema.keys()):
+    navorder = navorder + 1
+    properties = schema[resource]
+    res_api_endpoint = re.sub(r'([a-z])([A-Z])', r'\1_\2', resource).lower()
+    outfile = "{}.textile".format(resource)
+    if os.path.exists(outfile):
+        outfile = "{}_new.textile".format(resource)
+    print outfile, "..."
+    with open(outfile, "w") as f:
+        f.write("""---
+layout: default
+navsection: api
+navmenu: Schema
+title: {resource}
+---
+
+h1. {resource}
+
+A **{resource}** represents...
+
+h2. Methods
+
+        See "REST methods for working with Arvados resources":{{{{site.baseurl}}}}/api/methods.html
+
+API endpoint base: @https://{{{{ site.arvados_api_host }}}}/arvados/v1/{res_api_endpoint}@
+
+h2. Creation
+
+h3. Prerequisites
+
+Prerequisites for creating a {resource}.
+
+h3. Side effects
+
+Side effects of creating a {resource}.
+
+h2. Resources
+
+Each {resource} has, in addition to the usual "attributes of Arvados resources":resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+""".format(
+    resource=resource,
+    navorder=navorder,
+    res_api_endpoint=res_api_endpoint))
+
+        for prop in properties:
+            if prop not in ['id', 'uuid', 'href', 'kind', 'etag', 'self_link',
+                            'owner_uuid', 'created_at',
+                            'modified_by_client_uuid',
+                            'modified_by_user_uuid',
+                            'modified_at']:
+                f.write('|{name}|{type}|||\n'.format(**prop))
+
diff --git a/doc/images/Arvados_Permissions.svg b/doc/images/Arvados_Permissions.svg
new file mode 100644 (file)
index 0000000..d3e1135
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" standalone="yes"?>
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 -->
+
+<svg version="1.1" viewBox="0.0 0.0 1381.4540682414697 465.73490813648294" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="p.0"><path d="m0 0l1381.4541 0l0 465.7349l-1381.4541 0l0 -465.7349z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l1381.4541 0l0 465.7349l-1381.4541 0z" fill-rule="nonzero"></path><path fill="#76a5af" d="m167.81102 247.17343l0 0c0 -5.809967 4.709915 -10.519882 10.519897 -10.519882l116.660995 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728546 3.0812073 4.648636 3.0812073 7.438675l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099823 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m167.81102 247.17343l0 0c0 -5.809967 4.709915 -10.519882 10.519897 -10.519882l116.660995 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728546 3.0812073 4.648636 3.0812073 7.438675l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099823 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m226.62677 275.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm15.802948 11.296875l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#f3a7eb" fill-opacity="0.5269" d="m322.10236 83.18898l0 0c0 -19.803978 25.586761 -35.858273 57.149628 -35.858273l0 0c31.562836 0 57.149597 16.054295 57.149597 35.858273l0 0c0 19.80397 -25.586761 35.85826 -57.149597 35.85826l0 0c-31.562866 0 -57.149628 -16.05429 -57.149628 -35.85826z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m322.10236 83.18898l0 0c0 -19.803978 25.586761 -35.858273 57.149628 -35.858273l0 0c31.562836 0 57.149597 16.054295 57.149597 35.858273l0 0c0 19.80397 -25.586761 35.85826 -57.149597 35.85826l0 0c-31.562866 0 -57.149628 -16.05429 -57.149628 -35.85826z" fill-rule="nonzero"></path><path fill="#000000" d="m368.687 76.51523l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.59793 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#f3a7eb" fill-opacity="0.5255" d="m730.10236 115.18898l0 0c0 -15.385696 20.7359 -27.858269 46.31494 -27.858269l0 0c25.579102 0 46.315002 12.472572 46.315002 27.858269l0 0c0 15.385696 -20.7359 27.858261 -46.315002 27.858261l0 0c-25.57904 0 -46.31494 -12.472565 -46.31494 -27.858261z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m730.10236 115.18898l0 0c0 -15.385696 20.7359 -27.858269 46.31494 -27.858269l0 0c25.579102 0 46.315002 12.472572 46.315002 27.858269l0 0c0 15.385696 -20.7359 27.858261 -46.315002 27.858261l0 0c-25.57904 0 -46.31494 -12.472565 -46.31494 -27.858261z" fill-rule="nonzero"></path><path fill="#000000" d="m765.85236 108.51522l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm21.722961 11.171875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#f3a7eb" fill-opacity="0.5255" d="m738.10236 319.28348l0 0c0 -13.22876 17.880432 -23.952759 39.93701 -23.952759l0 0c22.05658 0 39.93701 10.723999 39.93701 23.952759l0 0c0 13.228729 -17.880432 23.952728 -39.93701 23.952728l0 0c-22.05658 0 -39.93701 -10.723999 -39.93701 -23.952728z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m738.10236 319.28348l0 0c0 -13.22876 17.880432 -23.952759 39.93701 -23.952759l0 0c22.05658 0 39.93701 10.723999 39.93701 23.952759l0 0c0 13.228729 -17.880432 23.952728 -39.93701 23.952728l0 0c-22.05658 0 -39.93701 -10.723999 -39.93701 -23.952728z" fill-rule="nonzero"></path><path fill="#000000" d="m767.4744 312.6097l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm12.832336 9.984375l2.515625 -0.3125q0.125 0.96875 0.65625 1.484375q0.53125 0.5 1.28125 0.5q0.796875 0 1.34375 -0.609375q0.5625 -0.609375 0.5625 -1.640625q0 -0.984375 -0.53125 -1.5625q-0.53125 -0.578125 -1.28125 -0.578125q-0.5 0 -1.203125 0.203125l0.28125 -2.125q1.0625 0.015625 1.609375 -0.46875q0.5625 -0.484375 0.5625 -1.296875q0 -0.6875 -0.40625 -1.09375q-0.40625 -0.40625 -1.078125 -0.40625q-0.671875 0 -1.140625 0.46875q-0.46875 0.46875 -0.578125 1.359375l-2.40625 -0.421875q0.25 -1.234375 0.75 -1.96875q0.515625 -0.734375 1.421875 -1.15625q0.90625 -0.421875 2.03125 -0.421875q1.90625 0 3.078125 1.21875q0.953125 1.0 0.953125 2.265625q0 1.796875 -1.953125 2.859375q1.15625 0.25 1.859375 1.125q0.703125 0.875 0.703125 2.109375q0 1.78125 -1.3125 3.046875q-1.296875 1.265625 -3.25 1.265625q-1.84375 0 -3.0625 -1.0625q-1.21875 -1.0625 -1.40625 -2.78125z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m379.25198 119.04724l-142.58269 117.60631" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m379.25198 119.04724l-137.95406 113.78847" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m240.24692 231.56151l-2.4498596 4.1618195l4.5518646 -1.6134033z" fill-rule="evenodd"></path><path fill="#76a5af" d="m55.811024 383.17343l0 0c0 -5.809967 4.709919 -10.519897 10.519894 -10.519897l116.660995 0c2.7900543 0 5.4658356 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.709915 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099747 0 -10.519894 -4.7099304 -10.519894 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m55.811024 383.17343l0 0c0 -5.809967 4.709919 -10.519897 10.519894 -10.519897l116.660995 0c2.7900543 0 5.4658356 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.709915 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099747 0 -10.519894 -4.7099304 -10.519894 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m114.62677 411.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm17.927948 8.875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#76a5af" d="m287.81104 383.17343l0 0c0 -5.809967 4.7099 -10.519897 10.519897 -10.519897l116.66098 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.66098 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m287.81104 383.17343l0 0c0 -5.809967 4.7099 -10.519897 10.519897 -10.519897l116.66098 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.66098 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m346.62677 411.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm9.037323 7.6875l2.515625 -0.3125q0.125 0.96875 0.65625 1.484375q0.53125 0.5 1.28125 0.5q0.796875 0 1.34375 -0.609375q0.5625 -0.609375 0.5625 -1.640625q0 -0.984375 -0.53125 -1.5625q-0.53125 -0.578125 -1.28125 -0.578125q-0.5 0 -1.203125 0.203125l0.28125 -2.125q1.0625 0.015625 1.609375 -0.46875q0.5625 -0.484375 0.5625 -1.296875q0 -0.6875 -0.40625 -1.09375q-0.40625 -0.40625 -1.078125 -0.40625q-0.671875 0 -1.140625 0.46875q-0.46875 0.46875 -0.578125 1.359375l-2.40625 -0.421875q0.25 -1.234375 0.75 -1.96875q0.515625 -0.734375 1.421875 -1.15625q0.90625 -0.421875 2.03125 -0.421875q1.90625 0 3.078125 1.21875q0.953125 1.0 0.953125 2.265625q0 1.796875 -1.953125 2.859375q1.15625 0.25 1.859375 1.125q0.703125 0.875 0.703125 2.109375q0 1.78125 -1.3125 3.046875q-1.296875 1.265625 -3.25 1.265625q-1.84375 0 -3.0625 -1.0625q-1.21875 -1.0625 -1.40625 -2.78125z" fill-rule="nonzero"></path><path fill="#76a5af" d="m447.81104 247.17343l0 0c0 -5.809967 4.7099 -10.519882 10.519897 -10.519882l116.66101 0c2.790039 0 5.4658203 1.1083374 7.4386597 3.0812073c1.9728394 1.9728546 3.0812378 4.648636 3.0812378 7.438675l0 42.07834c0 5.809967 -4.709961 10.519897 -10.519897 10.519897l-116.66101 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m447.81104 247.17343l0 0c0 -5.809967 4.7099 -10.519882 10.519897 -10.519882l116.66101 0c2.790039 0 5.4658203 1.1083374 7.4386597 3.0812073c1.9728394 1.9728546 3.0812378 4.648636 3.0812378 7.438675l0 42.07834c0 5.809967 -4.709961 10.519897 -10.519897 10.519897l-116.66101 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m506.62677 275.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm14.2404175 11.296875l0 -2.734375l-5.5625 0l0 -2.28125l5.890625 -8.640625l2.1875 0l0 8.625l1.6875 0l0 2.296875l-1.6875 0l0 2.734375l-2.515625 0zm0 -5.03125l0 -4.640625l-3.125 4.640625l3.125 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m730.10236 115.18898l-422.96063 128.06299" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0,1.0,3.0" d="m730.10236 115.18898l-417.21808 126.324265" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m312.40564 239.93239l-3.864746 2.895935l4.8220215 0.26579285z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m738.10236 319.28348l-152.59839 -51.08664" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0,1.0,3.0" d="m738.10236 319.28348l-146.90881 -49.181885" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m591.7179 268.5353l-4.8276978 0.12564087l3.7789917 3.006958z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m379.25198 119.04724l137.41733 117.60631" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m379.25198 119.04724l132.85886 113.70499" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m511.0368 234.00714l4.5217896 1.6958466l-2.3737793 -4.2056427z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m236.66142 299.77167l-112.00001 72.88187" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m236.66142 299.77167l-106.97102 69.609375" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m128.7895 367.9966l-2.902771 3.8595886l4.704544 -1.0907593z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m236.66142 299.77167l119.999985 72.88187" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m236.66142 299.77167l114.87175 69.76724" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m350.67575 370.95065l4.7361755 0.94400024l-3.0213318 -3.767517z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m500.65353 107.34646l142.58267 0l0 47.90551l-142.58267 0z" fill-rule="nonzero"></path><path fill="#000000" d="m512.21606 122.95396l0 1.9375l-1.7969055 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40628052 0.640625q-0.5000305 0.21875 -0.7500305 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.8750305 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4626465 7.7031174l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.5937424 0.515625 -2.7812424q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.8593674q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34374237 0 -0.43749237q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.2343674q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859367l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.6249924l0 6.0625l-1.671875 0l0 -6.0q0 -1.0156174 -0.203125 -1.5156174q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.5781174l0 5.375l-1.671875 0zm8.844482 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm13.735046 -3.78125l-3.015625 -9.859367l1.71875 0l1.5625 5.6874924l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.7812424l1.71875 0l1.46875 5.7187424l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.7031174l1.625 0l-3.078125 9.859367l-1.734375 0l-1.578125 -5.90625l-0.375 -1.6718674l-2.0 7.5781174l-1.734375 0zm11.629211 0l0 -9.859367l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.8906174 -0.28125 1.9531174l0 5.15625l-1.671875 0zm6.2439575 -11.687492l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.687492l0 -9.859367l1.671875 0l0 9.859367l-1.671875 0zm7.7854004 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.6562424l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.7499924q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm8.2771 -1.671875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.4843674 1.265625 -3.8593674q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.7968674q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.2343674 -0.625 -1.8593674q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.0468674zm8.672546 -5.9218674l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m643.4173 256.7638l120.0 0l0 41.95273l-120.0 0z" fill-rule="nonzero"></path><path fill="#000000" d="m654.9798 272.37128l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4627075 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844421 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.891357 -3.78125l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9783325 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547546 4.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469482 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.828857 -6.875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m559.7454 366.88452l114.299194 0l0 51.08661l-114.299194 0z" fill-rule="nonzero"></path><path fill="#000000" d="m571.3079 382.492l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4626465 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844482 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.891296 -3.78125l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9783325 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547607 4.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469421 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.828857 -6.875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m302.40683 310.40683l152.59842 0l0 41.95276l-152.59842 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m321.48495 332.3268l2.671875 0.84375q-0.609375 2.21875 -2.046875 3.3125q-1.421875 1.078125 -3.609375 1.078125q-2.703125 0 -4.453125 -1.84375q-1.734375 -1.859375 -1.734375 -5.078125q0 -3.390625 1.75 -5.265625q1.75 -1.875 4.609375 -1.875q2.5 0 4.046875 1.46875q0.9375 0.875 1.390625 2.5l-2.71875 0.65625q-0.234375 -1.0625 -1.0 -1.671875q-0.765625 -0.609375 -1.859375 -0.609375q-1.515625 0 -2.453125 1.09375q-0.9375 1.078125 -0.9375 3.5q0 2.578125 0.921875 3.6875q0.921875 1.09375 2.40625 1.09375q1.109375 0 1.890625 -0.6875q0.78125 -0.703125 1.125 -2.203125zm7.254181 5.0l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463409 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm6.469452 -1.078125l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm9.453857 -5.125l0 2.078125l-1.78125 0l0 3.984375q0 1.203125 0.046875 1.40625q0.0625 0.1875 0.234375 0.328125q0.1875 0.125 0.453125 0.125q0.359375 0 1.046875 -0.25l0.21875 2.015625q-0.90625 0.390625 -2.0625 0.390625q-0.703125 0 -1.265625 -0.234375q-0.5625 -0.234375 -0.828125 -0.609375q-0.265625 -0.375 -0.375 -1.015625q-0.078125 -0.453125 -0.078125 -1.84375l0 -4.296875l-1.203125 0l0 -2.078125l1.203125 0l0 -1.953125l2.609375 -1.515625l0 3.46875l1.78125 0zm7.400177 6.71875l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563202 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm21.72293 11.171875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m546.8373 393.3071l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m556.5248 415.8052l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm21.7229 11.171875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m514.8373 81.30708l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m524.5248 103.80521l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.5979 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m882.69293 27.729658l444.2834 0l0 407.5276l-444.2834 0z" fill-rule="nonzero"></path><path stroke="#c27ba0" stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m882.69293 27.729658l444.2834 0l0 407.5276l-444.2834 0z" fill-rule="nonzero"></path><path fill="#000000" d="m893.06793 54.64966l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm15.3810425 8.15625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm7.0163574 5.765625l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm4.572754 -9.859375l2.40625 0l0 1.359375q1.28125 -1.578125 3.0625 -1.578125q0.953125 0 1.640625 0.390625q0.703125 0.390625 1.140625 1.1875q0.65625 -0.796875 1.40625 -1.1875q0.75 -0.390625 1.609375 -0.390625q1.078125 0 1.828125 0.453125q0.75 0.4375 1.125 1.28125q0.265625 0.640625 0.265625 2.046875l0 6.296875l-2.609375 0l0 -5.625q0 -1.46875 -0.265625 -1.90625q-0.359375 -0.546875 -1.109375 -0.546875q-0.546875 0 -1.03125 0.34375q-0.484375 0.328125 -0.703125 0.96875q-0.203125 0.640625 -0.203125 2.03125l0 4.734375l-2.609375 0l0 -5.40625q0 -1.4375 -0.140625 -1.84375q-0.140625 -0.421875 -0.4375 -0.625q-0.28125 -0.203125 -0.78125 -0.203125q-0.609375 0 -1.09375 0.328125q-0.484375 0.3125 -0.6875 0.9375q-0.203125 0.609375 -0.203125 2.03125l0 4.78125l-2.609375 0l0 -9.859375zm16.775879 -1.328125l0 -2.40625l2.609375 0l0 2.40625l-2.609375 0zm0 11.1875l0 -9.859375l2.609375 0l0 9.859375l-2.609375 0zm4.2770996 -2.8125l2.609375 -0.390625q0.171875 0.75 0.671875 1.15625q0.515625 0.390625 1.4375 0.390625q1.0 0 1.515625 -0.375q0.34375 -0.265625 0.34375 -0.703125q0 -0.296875 -0.1875 -0.484375q-0.1875 -0.1875 -0.875 -0.34375q-3.140625 -0.703125 -4.0 -1.265625q-1.15625 -0.796875 -1.15625 -2.21875q0 -1.28125 1.0 -2.15625q1.015625 -0.875 3.140625 -0.875q2.03125 0 3.0 0.65625q0.984375 0.65625 1.359375 1.953125l-2.453125 0.453125q-0.15625 -0.578125 -0.609375 -0.875q-0.4375 -0.3125 -1.25 -0.3125q-1.03125 0 -1.46875 0.296875q-0.296875 0.203125 -0.296875 0.515625q0 0.28125 0.25 0.484375q0.359375 0.25 2.4375 0.734375q2.078125 0.46875 2.90625 1.15625q0.828125 0.6875 0.828125 1.9375q0 1.359375 -1.140625 2.328125q-1.125 0.96875 -3.34375 0.96875q-2.015625 0 -3.1875 -0.8125q-1.171875 -0.8125 -1.53125 -2.21875zm10.375671 0l2.609375 -0.390625q0.171875 0.75 0.671875 1.15625q0.515625 0.390625 1.4375 0.390625q1.0 0 1.515625 -0.375q0.34375 -0.265625 0.34375 -0.703125q0 -0.296875 -0.1875 -0.484375q-0.1875 -0.1875 -0.875 -0.34375q-3.140625 -0.703125 -4.0 -1.265625q-1.15625 -0.796875 -1.15625 -2.21875q0 -1.28125 1.0 -2.15625q1.015625 -0.875 3.140625 -0.875q2.03125 0 3.0 0.65625q0.984375 0.65625 1.359375 1.953125l-2.453125 0.453125q-0.15625 -0.578125 -0.609375 -0.875q-0.4375 -0.3125 -1.25 -0.3125q-1.03125 0 -1.46875 0.296875q-0.296875 0.203125 -0.296875 0.515625q0 0.28125 0.25 0.484375q0.359375 0.25 2.4375 0.734375q2.078125 0.46875 2.90625 1.15625q0.828125 0.6875 0.828125 1.9375q0 1.359375 -1.140625 2.328125q-1.125 0.96875 -3.34375 0.96875q-2.015625 0 -3.1875 -0.8125q-1.171875 -0.8125 -1.53125 -2.21875zm11.281982 -8.375l0 -2.40625l2.609375 0l0 2.40625l-2.609375 0zm0 11.1875l0 -9.859375l2.609375 0l0 9.859375l-2.609375 0zm4.5895386 -5.0625q0 -1.296875 0.640625 -2.515625q0.640625 -1.21875 1.8125 -1.859375q1.171875 -0.640625 2.609375 -0.640625q2.25 0 3.671875 1.453125q1.421875 1.453125 1.421875 3.671875q0 2.234375 -1.4375 3.703125q-1.4375 1.46875 -3.625 1.46875q-1.359375 0 -2.59375 -0.609375q-1.21875 -0.609375 -1.859375 -1.796875q-0.640625 -1.1875 -0.640625 -2.875zm2.671875 0.140625q0 1.46875 0.6875 2.25q0.703125 0.765625 1.71875 0.765625q1.015625 0 1.703125 -0.765625q0.703125 -0.78125 0.703125 -2.265625q0 -1.453125 -0.703125 -2.234375q-0.6875 -0.78125 -1.703125 -0.78125q-1.015625 0 -1.71875 0.78125q-0.6875 0.78125 -0.6875 2.25zm18.286621 4.921875l-2.609375 0l0 -5.03125q0 -1.59375 -0.171875 -2.0625q-0.15625 -0.46875 -0.53125 -0.71875q-0.375 -0.265625 -0.90625 -0.265625q-0.6875 0 -1.234375 0.375q-0.53125 0.359375 -0.734375 0.984375q-0.1875 0.609375 -0.1875 2.25l0 4.46875l-2.609375 0l0 -9.859375l2.421875 0l0 1.453125q1.296875 -1.671875 3.25 -1.671875q0.859375 0 1.578125 0.3125q0.71875 0.3125 1.078125 0.796875q0.359375 0.484375 0.5 1.09375q0.15625 0.609375 0.15625 1.75l0 6.125zm1.5209961 -2.8125l2.609375 -0.390625q0.171875 0.75 0.671875 1.15625q0.515625 0.390625 1.4375 0.390625q1.0 0 1.515625 -0.375q0.34375 -0.265625 0.34375 -0.703125q0 -0.296875 -0.1875 -0.484375q-0.1875 -0.1875 -0.875 -0.34375q-3.140625 -0.703125 -4.0 -1.265625q-1.15625 -0.796875 -1.15625 -2.21875q0 -1.28125 1.0 -2.15625q1.015625 -0.875 3.140625 -0.875q2.03125 0 3.0 0.65625q0.984375 0.65625 1.359375 1.953125l-2.453125 0.453125q-0.15625 -0.578125 -0.609375 -0.875q-0.4375 -0.3125 -1.25 -0.3125q-1.03125 0 -1.46875 0.296875q-0.296875 0.203125 -0.296875 0.515625q0 0.28125 0.25 0.484375q0.359375 0.25 2.4375 0.734375q2.078125 0.46875 2.90625 1.15625q0.828125 0.6875 0.828125 1.9375q0 1.359375 -1.140625 2.328125q-1.125 0.96875 -3.34375 0.96875q-2.015625 0 -3.1875 -0.8125q-1.171875 -0.8125 -1.53125 -2.21875zm11.781982 -4.4375l0 -2.609375l2.609375 0l0 2.609375l-2.609375 0zm0 7.25l0 -2.609375l2.609375 0l0 2.609375l-2.609375 0z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 85.05591l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm10.1604 13.59375l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm9.090271 -4.078125l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm16.256104 7.140625l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm4.9850464 0l0 -1.90625l1.90625 0l0 1.90625q0 1.046875 -0.375 1.6875q-0.375 0.65625 -1.171875 1.0l-0.46875 -0.71875q0.53125 -0.21875 0.78125 -0.671875q0.25 -0.453125 0.28125 -1.296875l-0.953125 0zm10.147888 0l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm18.740417 5.53125l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm2.5007324 1.609375l0 -1.90625l1.90625 0l0 1.90625q0 1.046875 -0.375 1.6875q-0.375 0.65625 -1.171875 1.0l-0.46875 -0.71875q0.53125 -0.21875 0.78125 -0.671875q0.25 -0.453125 0.28125 -1.296875l-0.953125 0zm10.147888 0l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm9.9747925 3.546875l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm11.266357 3.59375l0 -1.90625l1.906189 0l0 1.90625q0 1.046875 -0.375 1.6875q-0.375 0.65625 -1.171814 1.0l-0.46875 -0.71875q0.53125 -0.21875 0.78125 -0.671875q0.25 -0.453125 0.28125 -1.296875l-0.953125 0zm10.147888 0l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm15.318481 7.140625l0 -3.25l-5.90625 0l0 -1.53125l6.21875 -8.8125l1.359375 0l0 8.8125l1.84375 0l0 1.53125l-1.84375 0l0 3.25l-1.671875 0zm0 -4.78125l0 -6.140625l-4.25 6.140625l4.25 0zm10.027832 0.703125l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm13.349121 -7.234375l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4626465 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844482 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.906982 -3.78125l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm21.978271 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.7819824 5.75l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047607 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.672607 -5.921875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0zm12.145996 15.796875q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm2.4001465 -8.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm11.110107 4.921875l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.644775 0l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm17.125732 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm7.3221436 4.0l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 129.05591l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm12.644775 11.984375l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm6.605896 -2.46875l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm16.256104 7.140625l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm9.090271 -4.078125l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm13.34906 -7.234375l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4627075 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844421 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm13.735107 -3.78125l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.62915 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.2440186 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm7.7854004 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm8.2771 -1.671875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.672607 -5.921875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0zm12.145874 15.796875q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm2.3533936 -6.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 2.9375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0631104 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.978394 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm15.99646 4.921875l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm8.766357 8.796875l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm23.730103 -17.390625l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm10.160522 13.59375l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm5.6413574 4.0l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 173.05591l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm12.644775 11.984375l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm6.605896 -2.46875l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm18.740479 5.53125l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm6.605896 -2.46875l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm13.34906 -7.234375l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4627075 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844421 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.906982 -3.78125l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm21.978271 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.7819824 5.75l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047607 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.672607 -5.921875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0zm12.145996 15.796875q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm2.3532715 -6.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 2.9375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm15.99646 4.921875l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm8.766357 8.796875l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm23.730225 -17.390625l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm10.1604 13.59375l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm9.090332 -4.078125l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm11.739746 4.890625l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm9.281982 5.109375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.6657715 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm14.031982 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.85510254 -1.4375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375z" fill-rule="nonzero"></path><path fill="#000000" d="m892.94293 212.43091l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm15.610046 1.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.2283325 0l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm15.5563965 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.4573364 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.328125 0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.015625 -8.75l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.5042114 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281921 4.921875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm19.215271 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.9020996 -3.421875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm13.793396 1.984375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 2.9375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm13.668335 0.953125q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm12.938232 3.421875l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270996 1.5l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm17.125732 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.5563965 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm14.511353 0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm16.453125 2.9375l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm10.360107 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.890625 3.609375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm21.996582 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.4069824 2.0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm24.323853 -10.65625l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm3.8792725 10.0l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm11.922607 7.59375l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 239.05591l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm12.644775 11.984375l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm6.605896 -2.46875l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm9.9748535 3.546875l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm15.371521 -0.484375l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm13.34906 -7.234375l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4627075 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844421 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm13.735107 -3.78125l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.62915 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.2440186 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm7.7854004 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm8.2771 -1.671875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.672607 -5.921875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0zm12.145874 15.796875q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm3.0408936 -15.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.1291504 0l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm10.375732 0l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm17.125732 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.2438965 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm7.7854004 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm8.2771 -1.671875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm14.855835 4.921875l0 -8.546875l-1.484375 0l0 -1.3125l1.484375 0l0 -1.046875q0 -0.984375 0.171875 -1.46875q0.234375 -0.65625 0.84375 -1.046875q0.609375 -0.40625 1.703125 -0.40625q0.703125 0 1.5625 0.15625l-0.25 1.46875q-0.515625 -0.09375 -0.984375 -0.09375q-0.765625 0 -1.078125 0.328125q-0.3125 0.3125 -0.3125 1.203125l0 0.90625l1.921875 0l0 1.3125l-1.921875 0l0 8.546875l-1.65625 0zm4.7614746 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.6032715 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm20.942871 0l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm16.256104 7.140625l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm5.6412354 4.0l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 283.05588l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm12.644775 11.984375l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm6.605896 -2.46875l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm15.3186035 7.140625l0 -3.25l-5.90625 0l0 -1.53125l6.21875 -8.8125l1.359375 0l0 8.8125l1.84375 0l0 1.53125l-1.84375 0l0 3.25l-1.671875 0zm0 -4.78125l0 -6.140625l-4.25 6.140625l4.25 0zm10.027771 0.703125l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.03656 4.078125l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm9.750732 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm17.125732 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 327.05588l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm3.8791504 10.0l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm15.371521 -0.484375l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm16.256104 7.140625l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm4.9850464 0l0 -1.90625l1.90625 0l0 1.90625q0 1.046875 -0.375 1.6875q-0.375 0.65625 -1.171875 1.0l-0.46875 -0.71875q0.53125 -0.21875 0.78125 -0.671875q0.25 -0.453125 0.28125 -1.296875l-0.953125 0zm10.147888 0l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm9.9747925 3.546875l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm15.371521 -0.484375l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.036621 4.078125l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm9.750732 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm17.125732 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 371.05588l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm3.8791504 10.0l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm15.371521 -0.484375l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm18.740479 5.53125l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm6.605896 -2.46875l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm13.34906 -7.234375l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4627075 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844421 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.891357 -3.78125l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547607 4.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469482 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.828857 -6.875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0zm12.145996 15.796875q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm2.3532715 -6.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 2.9375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm15.99646 4.921875l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm8.766357 8.796875l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm23.730225 -17.390625l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm12.644775 11.984375l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm3.1569824 5.609375l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m902.08356 415.05588l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm3.8791504 10.0l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm15.371521 -0.484375l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm12.255371 4.078125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm15.3186035 7.140625l0 -3.25l-5.90625 0l0 -1.53125l6.21875 -8.8125l1.359375 0l0 8.8125l1.84375 0l0 1.53125l-1.84375 0l0 3.25l-1.671875 0zm0 -4.78125l0 -6.140625l-4.25 6.140625l4.25 0zm10.027771 0.703125l0 -1.6875l5.125 0l0 1.6875l-5.125 0zm13.34906 -7.234375l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4627075 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844421 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.891357 -3.78125l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547607 4.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469482 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.828857 -6.875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0zm12.145996 15.796875q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm2.3532715 -6.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 2.9375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm15.99646 4.921875l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm8.766357 8.796875l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm23.730225 -17.390625l1.796875 0l0 7.84375q0 2.0625 -0.46875 3.265625q-0.453125 1.203125 -1.671875 1.96875q-1.203125 0.75 -3.171875 0.75q-1.90625 0 -3.125 -0.65625q-1.21875 -0.65625 -1.734375 -1.90625q-0.515625 -1.25 -0.515625 -3.421875l0 -7.84375l1.796875 0l0 7.84375q0 1.765625 0.328125 2.609375q0.328125 0.84375 1.125 1.296875q0.8125 0.453125 1.96875 0.453125q1.984375 0 2.828125 -0.890625q0.84375 -0.90625 0.84375 -3.46875l0 -7.84375zm10.1604 13.59375l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm5.6413574 4.0l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m748.18634 137.59842l-557.7638 242.01575" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0,1.0,3.0" d="m748.18634 137.59842l-552.2596 239.62747" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m195.26929 375.71063l-3.5056152 3.3216248l4.8205566 -0.2911377z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m634.56696 151.3307l152.59839 0l0 41.95276l-152.59839 0z" fill-rule="nonzero"></path><path fill="#000000" d="m646.12946 166.9382l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4626465 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844482 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.906921 -3.78125l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm21.978333 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.813171 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.7819824 5.75l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047607 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.672607 -5.921875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m602.8373 289.3071l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m612.5248 311.8052l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.5979 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m618.8373 177.30708l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m628.5248 199.8052l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.5979 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#76a5af" d="m167.81102 247.17343l0 0c0 -5.809967 4.709915 -10.519882 10.519897 -10.519882l116.660995 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728546 3.0812073 4.648636 3.0812073 7.438675l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099823 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m167.81102 247.17343l0 0c0 -5.809967 4.709915 -10.519882 10.519897 -10.519882l116.660995 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728546 3.0812073 4.648636 3.0812073 7.438675l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099823 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m226.62677 275.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm15.802948 11.296875l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#f3a7eb" fill-opacity="0.5269" d="m322.10236 83.18898l0 0c0 -19.803978 25.586761 -35.858273 57.149628 -35.858273l0 0c31.562836 0 57.149597 16.054295 57.149597 35.858273l0 0c0 19.80397 -25.586761 35.85826 -57.149597 35.85826l0 0c-31.562866 0 -57.149628 -16.05429 -57.149628 -35.85826z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m322.10236 83.18898l0 0c0 -19.803978 25.586761 -35.858273 57.149628 -35.858273l0 0c31.562836 0 57.149597 16.054295 57.149597 35.858273l0 0c0 19.80397 -25.586761 35.85826 -57.149597 35.85826l0 0c-31.562866 0 -57.149628 -16.05429 -57.149628 -35.85826z" fill-rule="nonzero"></path><path fill="#000000" d="m368.687 76.51523l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.59793 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#f3a7eb" fill-opacity="0.5255" d="m730.10236 115.18898l0 0c0 -15.385696 20.7359 -27.858269 46.31494 -27.858269l0 0c25.579102 0 46.315002 12.472572 46.315002 27.858269l0 0c0 15.385696 -20.7359 27.858261 -46.315002 27.858261l0 0c-25.57904 0 -46.31494 -12.472565 -46.31494 -27.858261z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m730.10236 115.18898l0 0c0 -15.385696 20.7359 -27.858269 46.31494 -27.858269l0 0c25.579102 0 46.315002 12.472572 46.315002 27.858269l0 0c0 15.385696 -20.7359 27.858261 -46.315002 27.858261l0 0c-25.57904 0 -46.31494 -12.472565 -46.31494 -27.858261z" fill-rule="nonzero"></path><path fill="#000000" d="m765.85236 108.51522l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm21.722961 11.171875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#f3a7eb" fill-opacity="0.5255" d="m738.10236 319.28348l0 0c0 -13.22876 17.880432 -23.952759 39.93701 -23.952759l0 0c22.05658 0 39.93701 10.723999 39.93701 23.952759l0 0c0 13.228729 -17.880432 23.952728 -39.93701 23.952728l0 0c-22.05658 0 -39.93701 -10.723999 -39.93701 -23.952728z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m738.10236 319.28348l0 0c0 -13.22876 17.880432 -23.952759 39.93701 -23.952759l0 0c22.05658 0 39.93701 10.723999 39.93701 23.952759l0 0c0 13.228729 -17.880432 23.952728 -39.93701 23.952728l0 0c-22.05658 0 -39.93701 -10.723999 -39.93701 -23.952728z" fill-rule="nonzero"></path><path fill="#000000" d="m767.4744 312.6097l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm12.832336 9.984375l2.515625 -0.3125q0.125 0.96875 0.65625 1.484375q0.53125 0.5 1.28125 0.5q0.796875 0 1.34375 -0.609375q0.5625 -0.609375 0.5625 -1.640625q0 -0.984375 -0.53125 -1.5625q-0.53125 -0.578125 -1.28125 -0.578125q-0.5 0 -1.203125 0.203125l0.28125 -2.125q1.0625 0.015625 1.609375 -0.46875q0.5625 -0.484375 0.5625 -1.296875q0 -0.6875 -0.40625 -1.09375q-0.40625 -0.40625 -1.078125 -0.40625q-0.671875 0 -1.140625 0.46875q-0.46875 0.46875 -0.578125 1.359375l-2.40625 -0.421875q0.25 -1.234375 0.75 -1.96875q0.515625 -0.734375 1.421875 -1.15625q0.90625 -0.421875 2.03125 -0.421875q1.90625 0 3.078125 1.21875q0.953125 1.0 0.953125 2.265625q0 1.796875 -1.953125 2.859375q1.15625 0.25 1.859375 1.125q0.703125 0.875 0.703125 2.109375q0 1.78125 -1.3125 3.046875q-1.296875 1.265625 -3.25 1.265625q-1.84375 0 -3.0625 -1.0625q-1.21875 -1.0625 -1.40625 -2.78125z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m379.25198 119.04724l-142.58269 117.60631" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m379.25198 119.04724l-137.95406 113.78847" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m240.24692 231.56151l-2.4498596 4.1618195l4.5518646 -1.6134033z" fill-rule="evenodd"></path><path fill="#76a5af" d="m55.811024 383.17343l0 0c0 -5.809967 4.709919 -10.519897 10.519894 -10.519897l116.660995 0c2.7900543 0 5.4658356 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.709915 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099747 0 -10.519894 -4.7099304 -10.519894 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m55.811024 383.17343l0 0c0 -5.809967 4.709919 -10.519897 10.519894 -10.519897l116.660995 0c2.7900543 0 5.4658356 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.709915 10.519897 -10.519897 10.519897l-116.660995 0c-5.8099747 0 -10.519894 -4.7099304 -10.519894 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m114.62677 411.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm17.927948 8.875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#76a5af" d="m447.81104 247.17343l0 0c0 -5.809967 4.7099 -10.519882 10.519897 -10.519882l116.66101 0c2.790039 0 5.4658203 1.1083374 7.4386597 3.0812073c1.9728394 1.9728546 3.0812378 4.648636 3.0812378 7.438675l0 42.07834c0 5.809967 -4.709961 10.519897 -10.519897 10.519897l-116.66101 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m447.81104 247.17343l0 0c0 -5.809967 4.7099 -10.519882 10.519897 -10.519882l116.66101 0c2.790039 0 5.4658203 1.1083374 7.4386597 3.0812073c1.9728394 1.9728546 3.0812378 4.648636 3.0812378 7.438675l0 42.07834c0 5.809967 -4.709961 10.519897 -10.519897 10.519897l-116.66101 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m506.62677 275.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm14.2404175 11.296875l0 -2.734375l-5.5625 0l0 -2.28125l5.890625 -8.640625l2.1875 0l0 8.625l1.6875 0l0 2.296875l-1.6875 0l0 2.734375l-2.515625 0zm0 -5.03125l0 -4.640625l-3.125 4.640625l3.125 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m730.10236 115.18898l-422.96063 128.06299" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0,1.0,3.0" d="m730.10236 115.18898l-417.21808 126.324265" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m312.40564 239.93239l-3.864746 2.895935l4.8220215 0.26579285z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m738.10236 319.28348l-152.59839 -51.08664" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0,1.0,3.0" d="m738.10236 319.28348l-146.90881 -49.181885" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m591.7179 268.5353l-4.8276978 0.12564087l3.7789917 3.006958z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m379.25198 119.04724l137.41733 117.60631" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m379.25198 119.04724l132.85886 113.70499" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m511.0368 234.00714l4.5217896 1.6958466l-2.3737793 -4.2056427z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m236.66142 299.77167l-112.00001 72.88187" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m236.66142 299.77167l-106.97102 69.609375" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m128.7895 367.9966l-2.902771 3.8595886l4.704544 -1.0907593z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m236.66142 299.77167l119.999985 72.88187" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m236.66142 299.77167l114.87175 69.76724" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m350.67575 370.95065l4.7361755 0.94400024l-3.0213318 -3.767517z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m749.7996 336.2206l-556.28345 68.0" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0,1.0,3.0" d="m749.7996 336.2206l-550.32776 67.272" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m199.27144 401.8531l-4.3041534 2.190155l4.7049713 1.0888977z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m559.7454 366.88452l114.299194 0l0 51.08661l-114.299194 0z" fill-rule="nonzero"></path><path fill="#000000" d="m571.3079 382.492l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm2.875 0l0 1.9375l-1.796875 0l0 -1.53125q0 -1.25 0.296875 -1.796875q0.390625 -0.75 1.234375 -1.125l0.40625 0.640625q-0.5 0.21875 -0.75 0.640625q-0.234375 0.421875 -0.265625 1.234375l0.875 0zm8.4626465 7.703125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm8.844482 3.78125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.891296 -3.78125l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9783325 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547607 4.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469421 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.828857 -6.875l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.265625 -1.203125l-0.859375 0zm2.875 0l0 -1.9375l1.78125 0l0 1.53125q0 1.234375 -0.28125 1.78125q-0.40625 0.75 -1.25 1.140625l-0.40625 -0.671875q0.5 -0.203125 0.75 -0.640625q0.25 -0.4375 0.28125 -1.203125l-0.875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m302.40683 310.40683l152.59842 0l0 41.95276l-152.59842 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m321.48495 332.3268l2.671875 0.84375q-0.609375 2.21875 -2.046875 3.3125q-1.421875 1.078125 -3.609375 1.078125q-2.703125 0 -4.453125 -1.84375q-1.734375 -1.859375 -1.734375 -5.078125q0 -3.390625 1.75 -5.265625q1.75 -1.875 4.609375 -1.875q2.5 0 4.046875 1.46875q0.9375 0.875 1.390625 2.5l-2.71875 0.65625q-0.234375 -1.0625 -1.0 -1.671875q-0.765625 -0.609375 -1.859375 -0.609375q-1.515625 0 -2.453125 1.09375q-0.9375 1.078125 -0.9375 3.5q0 2.578125 0.921875 3.6875q0.921875 1.09375 2.40625 1.09375q1.109375 0 1.890625 -0.6875q0.78125 -0.703125 1.125 -2.203125zm7.254181 5.0l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463409 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm6.469452 -1.078125l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm9.453857 -5.125l0 2.078125l-1.78125 0l0 3.984375q0 1.203125 0.046875 1.40625q0.0625 0.1875 0.234375 0.328125q0.1875 0.125 0.453125 0.125q0.359375 0 1.046875 -0.25l0.21875 2.015625q-0.90625 0.390625 -2.0625 0.390625q-0.703125 0 -1.265625 -0.234375q-0.5625 -0.234375 -0.828125 -0.609375q-0.265625 -0.375 -0.375 -1.015625q-0.078125 -0.453125 -0.078125 -1.84375l0 -4.296875l-1.203125 0l0 -2.078125l1.203125 0l0 -1.953125l2.609375 -1.515625l0 3.46875l1.78125 0zm7.400177 6.71875l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563202 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm21.72293 11.171875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m546.8373 393.3071l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m556.5248 415.8052l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm21.7229 11.171875l0 2.421875l-9.140625 0q0.15625 -1.375 0.890625 -2.59375q0.75 -1.234375 2.9375 -3.265625q1.765625 -1.640625 2.15625 -2.234375q0.546875 -0.796875 0.546875 -1.59375q0 -0.875 -0.46875 -1.34375q-0.46875 -0.46875 -1.296875 -0.46875q-0.8125 0 -1.296875 0.5q-0.484375 0.484375 -0.5625 1.625l-2.59375 -0.25q0.234375 -2.15625 1.453125 -3.09375q1.21875 -0.9375 3.0625 -0.9375q2.015625 0 3.15625 1.09375q1.15625 1.078125 1.15625 2.6875q0 0.921875 -0.328125 1.75q-0.328125 0.828125 -1.046875 1.734375q-0.46875 0.609375 -1.703125 1.75q-1.234375 1.125 -1.5625 1.5q-0.328125 0.359375 -0.53125 0.71875l5.171875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m602.8373 289.3071l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m612.5248 311.8052l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.5979 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m618.8373 177.30708l142.58264 0l0 41.95276l-142.58264 0z" fill-rule="nonzero"></path><path fill="#a61c00" d="m628.5248 199.8052l2.671875 -0.265625q0.234375 1.34375 0.96875 1.984375q0.75 0.625 2.0 0.625q1.328125 0 2.0 -0.5625q0.671875 -0.5625 0.671875 -1.3125q0 -0.484375 -0.28125 -0.8125q-0.28125 -0.34375 -0.984375 -0.59375q-0.484375 -0.171875 -2.203125 -0.59375q-2.203125 -0.546875 -3.09375 -1.34375q-1.265625 -1.125 -1.265625 -2.734375q0 -1.046875 0.59375 -1.953125q0.59375 -0.90625 1.703125 -1.375q1.109375 -0.46875 2.671875 -0.46875q2.5625 0 3.859375 1.125q1.296875 1.109375 1.359375 2.984375l-2.75 0.125q-0.171875 -1.046875 -0.75 -1.5q-0.578125 -0.46875 -1.75 -0.46875q-1.1875 0 -1.875 0.5q-0.4375 0.3125 -0.4375 0.84375q0 0.484375 0.421875 0.828125q0.515625 0.421875 2.515625 0.90625q2.0 0.46875 2.953125 0.984375q0.96875 0.5 1.515625 1.375q0.546875 0.875 0.546875 2.15625q0 1.171875 -0.65625 2.203125q-0.640625 1.015625 -1.828125 1.515625q-1.1875 0.484375 -2.96875 0.484375q-2.578125 0 -3.96875 -1.1875q-1.375 -1.1875 -1.640625 -3.46875zm15.7247925 -9.171875l0 5.0q1.25 -1.484375 3.015625 -1.484375q0.890625 0 1.609375 0.34375q0.734375 0.328125 1.09375 0.84375q0.375 0.515625 0.5 1.15625q0.140625 0.625 0.140625 1.953125l0 5.78125l-2.609375 0l0 -5.203125q0 -1.546875 -0.15625 -1.96875q-0.140625 -0.421875 -0.515625 -0.65625q-0.375 -0.25 -0.9375 -0.25q-0.65625 0 -1.171875 0.3125q-0.5 0.3125 -0.734375 0.953125q-0.234375 0.640625 -0.234375 1.875l0 4.9375l-2.609375 0l0 -13.59375l2.609375 0zm10.739746 6.75l-2.359375 -0.4375q0.390625 -1.421875 1.359375 -2.109375q0.984375 -0.6875 2.90625 -0.6875q1.734375 0 2.59375 0.421875q0.859375 0.40625 1.203125 1.046875q0.34375 0.625 0.34375 2.328125l-0.03125 3.046875q0 1.296875 0.125 1.921875q0.125 0.609375 0.46875 1.3125l-2.578125 0q-0.09375 -0.265625 -0.25 -0.765625q-0.0625 -0.234375 -0.09375 -0.3125q-0.65625 0.65625 -1.421875 0.984375q-0.765625 0.3125 -1.625 0.3125q-1.515625 0 -2.40625 -0.8125q-0.875 -0.828125 -0.875 -2.09375q0 -0.84375 0.390625 -1.484375q0.40625 -0.65625 1.125 -1.0q0.71875 -0.359375 2.078125 -0.625q1.828125 -0.328125 2.53125 -0.625l0 -0.265625q0 -0.75 -0.375 -1.0625q-0.359375 -0.328125 -1.390625 -0.328125q-0.703125 0 -1.09375 0.28125q-0.390625 0.265625 -0.625 0.953125zm3.484375 2.109375q-0.5 0.171875 -1.59375 0.40625q-1.078125 0.234375 -1.40625 0.453125q-0.515625 0.359375 -0.515625 0.921875q0 0.546875 0.40625 0.953125q0.40625 0.390625 1.046875 0.390625q0.703125 0 1.34375 -0.46875q0.46875 -0.359375 0.625 -0.859375q0.09375 -0.34375 0.09375 -1.28125l0 -0.515625zm7.4382324 4.734375l-2.609375 0l0 -9.859375l2.421875 0l0 1.40625q0.625 -0.984375 1.109375 -1.296875q0.5 -0.328125 1.140625 -0.328125q0.890625 0 1.71875 0.5l-0.8125 2.265625q-0.65625 -0.421875 -1.21875 -0.421875q-0.546875 0 -0.9375 0.296875q-0.375 0.296875 -0.59375 1.09375q-0.21875 0.78125 -0.21875 3.296875l0 3.046875zm10.463379 -3.140625l2.609375 0.4375q-0.5 1.4375 -1.59375 2.1875q-1.078125 0.734375 -2.703125 0.734375q-2.5625 0 -3.796875 -1.671875q-0.96875 -1.34375 -0.96875 -3.40625q0 -2.4375 1.265625 -3.828125q1.28125 -1.390625 3.25 -1.390625q2.1875 0 3.453125 1.453125q1.28125 1.453125 1.234375 4.453125l-6.53125 0q0.015625 1.15625 0.625 1.8125q0.609375 0.640625 1.5 0.640625q0.609375 0 1.03125 -0.328125q0.421875 -0.34375 0.625 -1.09375zm0.15625 -2.625q-0.03125 -1.140625 -0.59375 -1.71875q-0.546875 -0.59375 -1.34375 -0.59375q-0.859375 0 -1.40625 0.625q-0.5625 0.609375 -0.546875 1.6875l3.890625 0zm13.563232 5.765625l-2.421875 0l0 -1.453125q-0.609375 0.84375 -1.4375 1.265625q-0.8125 0.40625 -1.640625 0.40625q-1.703125 0 -2.921875 -1.359375q-1.203125 -1.375 -1.203125 -3.828125q0 -2.5 1.171875 -3.796875q1.1875 -1.3125 2.984375 -1.3125q1.65625 0 2.859375 1.375l0 -4.890625l2.609375 0l0 13.59375zm-6.96875 -5.140625q0 1.578125 0.4375 2.28125q0.640625 1.015625 1.765625 1.015625q0.90625 0 1.53125 -0.765625q0.625 -0.765625 0.625 -2.28125q0 -1.703125 -0.609375 -2.4375q-0.609375 -0.75 -1.5625 -0.75q-0.9375 0 -1.5625 0.734375q-0.625 0.734375 -0.625 2.203125zm14.391785 5.140625l0 -13.59375l2.609375 0l0 4.890625q1.203125 -1.375 2.859375 -1.375q1.796875 0 2.96875 1.3125q1.1875 1.296875 1.1875 3.734375q0 2.53125 -1.203125 3.890625q-1.203125 1.359375 -2.921875 1.359375q-0.84375 0 -1.671875 -0.421875q-0.8125 -0.421875 -1.40625 -1.25l0 1.453125l-2.421875 0zm2.59375 -5.140625q0 1.53125 0.484375 2.265625q0.671875 1.03125 1.796875 1.03125q0.859375 0 1.46875 -0.734375q0.609375 -0.734375 0.609375 -2.328125q0 -1.6875 -0.609375 -2.421875q-0.609375 -0.75 -1.578125 -0.75q-0.9375 0 -1.5625 0.734375q-0.609375 0.71875 -0.609375 2.203125zm7.677246 -4.71875l2.78125 0l2.359375 7.0l2.296875 -7.0l2.703125 0l-3.484375 9.484375l-0.625 1.71875q-0.34375 0.859375 -0.65625 1.3125q-0.296875 0.46875 -0.703125 0.75q-0.40625 0.28125 -1.0 0.4375q-0.59375 0.15625 -1.328125 0.15625q-0.75 0 -1.46875 -0.15625l-0.234375 -2.046875q0.609375 0.125 1.09375 0.125q0.921875 0 1.34375 -0.53125q0.4375 -0.53125 0.671875 -1.359375l-3.75 -9.890625zm16.793396 -3.734375l2.75 0l0 7.359375q0 1.75 0.109375 2.265625q0.171875 0.84375 0.828125 1.359375q0.671875 0.5 1.8125 0.5q1.171875 0 1.765625 -0.484375q0.59375 -0.484375 0.71875 -1.171875q0.125 -0.703125 0.125 -2.3125l0 -7.515625l2.734375 0l0 7.140625q0 2.4375 -0.21875 3.453125q-0.21875 1.015625 -0.828125 1.71875q-0.59375 0.6875 -1.59375 1.109375q-1.0 0.40625 -2.609375 0.40625q-1.953125 0 -2.96875 -0.453125q-1.0 -0.453125 -1.59375 -1.171875q-0.578125 -0.71875 -0.75 -1.5q-0.28125 -1.171875 -0.28125 -3.453125l0 -7.25zm19.5979 13.59375l-2.609375 0l0 -9.828125q-1.4375 1.34375 -3.375 1.984375l0 -2.375q1.03125 -0.328125 2.21875 -1.25q1.203125 -0.9375 1.640625 -2.1875l2.125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#76a5af" d="m287.81104 383.17343l0 0c0 -5.809967 4.7099 -10.519897 10.519897 -10.519897l116.66098 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.66098 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m287.81104 383.17343l0 0c0 -5.809967 4.7099 -10.519897 10.519897 -10.519897l116.66098 0c2.790039 0 5.4658203 1.1083374 7.43869 3.0812073c1.9728699 1.9728699 3.0812073 4.648651 3.0812073 7.43869l0 42.07834c0 5.809967 -4.7099304 10.519897 -10.519897 10.519897l-116.66098 0c-5.8099976 0 -10.519897 -4.7099304 -10.519897 -10.519897z" fill-rule="nonzero"></path><path fill="#000000" d="m346.62677 411.1326l0 -13.59375l4.421875 0q2.5 0 3.265625 0.203125q1.15625 0.296875 1.9375 1.328125q0.796875 1.015625 0.796875 2.640625q0 1.25 -0.453125 2.109375q-0.453125 0.859375 -1.15625 1.34375q-0.703125 0.484375 -1.421875 0.640625q-0.984375 0.203125 -2.84375 0.203125l-1.796875 0l0 5.125l-2.75 0zm2.75 -11.296875l0 3.859375l1.5 0q1.625 0 2.171875 -0.21875q0.546875 -0.21875 0.859375 -0.671875q0.3125 -0.453125 0.3125 -1.046875q0 -0.75 -0.4375 -1.234375q-0.4375 -0.484375 -1.09375 -0.59375q-0.5 -0.09375 -1.984375 -0.09375l-1.328125 0zm9.037323 7.6875l2.515625 -0.3125q0.125 0.96875 0.65625 1.484375q0.53125 0.5 1.28125 0.5q0.796875 0 1.34375 -0.609375q0.5625 -0.609375 0.5625 -1.640625q0 -0.984375 -0.53125 -1.5625q-0.53125 -0.578125 -1.28125 -0.578125q-0.5 0 -1.203125 0.203125l0.28125 -2.125q1.0625 0.015625 1.609375 -0.46875q0.5625 -0.484375 0.5625 -1.296875q0 -0.6875 -0.40625 -1.09375q-0.40625 -0.40625 -1.078125 -0.40625q-0.671875 0 -1.140625 0.46875q-0.46875 0.46875 -0.578125 1.359375l-2.40625 -0.421875q0.25 -1.234375 0.75 -1.96875q0.515625 -0.734375 1.421875 -1.15625q0.90625 -0.421875 2.03125 -0.421875q1.90625 0 3.078125 1.21875q0.953125 1.0 0.953125 2.265625q0 1.796875 -1.953125 2.859375q1.15625 0.25 1.859375 1.125q0.703125 0.875 0.703125 2.109375q0 1.78125 -1.3125 3.046875q-1.296875 1.265625 -3.25 1.265625q-1.84375 0 -3.0625 -1.0625q-1.21875 -1.0625 -1.40625 -2.78125z" fill-rule="nonzero"></path></g></svg>
+
diff --git a/doc/images/Arvados_arch.svg b/doc/images/Arvados_arch.svg
new file mode 100644 (file)
index 0000000..7680470
--- /dev/null
@@ -0,0 +1,514 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.2" width="280mm" height="210mm" viewBox="0 0 28000 21000" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xml:space="preserve">
+ <defs class="ClipPathGroup">
+  <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
+   <rect x="0" y="0" width="28000" height="21000"/>
+  </clipPath>
+  <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
+   <rect x="28" y="21" width="27944" height="20958"/>
+  </clipPath>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_1" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="y" horiz-adv-x="1059" d="M 604,1 C 579,-64 553,-123 527,-175 500,-227 471,-272 438,-309 405,-346 369,-374 329,-394 289,-413 243,-423 191,-423 168,-423 147,-423 128,-423 109,-423 88,-420 67,-414 L 67,-279 C 80,-282 94,-284 110,-284 126,-284 140,-284 151,-284 204,-284 253,-264 298,-225 343,-186 383,-124 417,-38 L 434,5 5,1082 197,1082 425,484 C 432,466 440,442 451,412 461,382 471,352 482,322 492,292 501,265 509,241 517,217 522,202 523,196 525,203 530,218 538,240 545,261 554,285 564,312 573,339 583,366 593,393 603,420 611,444 618,464 L 830,1082 1020,1082 604,1 Z"/>
+   <glyph unicode="x" horiz-adv-x="1033" d="M 801,0 L 510,444 217,0 23,0 408,556 41,1082 240,1082 510,661 778,1082 979,1082 612,558 1002,0 801,0 Z"/>
+   <glyph unicode="w" horiz-adv-x="1535" d="M 1174,0 L 965,0 792,698 C 787,716 781,738 776,765 770,792 764,818 759,843 752,872 746,903 740,934 734,904 728,874 721,845 716,820 710,793 704,766 697,739 691,715 686,694 L 508,0 300,0 -3,1082 175,1082 358,347 C 363,332 367,313 372,291 377,268 381,246 386,225 391,200 396,175 401,149 406,174 412,199 418,223 423,244 429,265 434,286 439,307 444,325 448,339 L 644,1082 837,1082 1026,339 C 1031,322 1036,302 1041,280 1046,258 1051,237 1056,218 1061,195 1067,172 1072,149 1077,174 1083,199 1088,223 1093,244 1098,265 1103,288 1108,310 1112,330 1117,347 L 1308,1082 1484,1082 1174,0 Z"/>
+   <glyph unicode="v" horiz-adv-x="1059" d="M 613,0 L 400,0 7,1082 199,1082 437,378 C 442,363 447,346 454,325 460,304 466,282 473,259 480,236 486,215 492,194 497,173 502,155 506,141 510,155 515,173 522,194 528,215 534,236 541,258 548,280 555,302 562,323 569,344 575,361 580,376 L 826,1082 1017,1082 613,0 Z"/>
+   <glyph unicode="u" horiz-adv-x="901" d="M 314,1082 L 314,396 C 314,343 318,299 326,264 333,229 346,200 363,179 380,157 403,142 432,133 460,124 495,119 537,119 580,119 618,127 653,142 687,157 716,178 741,207 765,235 784,270 797,312 810,353 817,401 817,455 L 817,1082 997,1082 997,228 C 997,205 997,181 998,156 998,131 998,107 999,85 1000,62 1000,43 1001,27 1002,11 1002,3 1003,3 L 833,3 C 832,6 832,15 831,30 830,44 830,61 829,79 828,98 827,117 826,136 825,156 825,172 825,185 L 822,185 C 805,154 786,125 765,100 744,75 720,53 693,36 666,18 634,4 599,-6 564,-15 523,-20 476,-20 416,-20 364,-13 321,2 278,17 242,39 214,70 186,101 166,140 153,188 140,236 133,294 133,361 L 133,1082 314,1082 Z"/>
+   <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 527,1 499,-5 471,-10 442,-14 409,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 467,127 484,128 501,131 517,134 535,137 554,141 L 554,8 Z"/>
+   <glyph unicode="s" horiz-adv-x="927" d="M 950,299 C 950,248 940,203 921,164 901,124 872,91 835,64 798,37 752,16 698,2 643,-13 581,-20 511,-20 448,-20 392,-15 342,-6 291,4 247,20 209,41 171,62 139,91 114,126 88,161 69,203 57,254 L 216,285 C 231,227 263,185 311,158 359,131 426,117 511,117 550,117 585,120 618,125 650,130 678,140 701,153 724,166 743,183 756,205 769,226 775,253 775,285 775,318 767,345 752,366 737,387 715,404 688,418 661,432 628,444 589,455 550,465 507,476 460,489 417,500 374,513 331,527 288,541 250,560 216,583 181,606 153,634 132,668 111,702 100,745 100,796 100,895 135,970 206,1022 276,1073 378,1099 513,1099 632,1099 727,1078 798,1036 868,994 912,927 931,834 L 769,814 C 763,842 752,866 736,885 720,904 701,919 678,931 655,942 630,951 602,956 573,961 544,963 513,963 432,963 372,951 333,926 294,901 275,864 275,814 275,785 282,761 297,742 311,723 331,707 357,694 382,681 413,669 449,660 485,650 525,640 568,629 597,622 626,614 656,606 686,597 715,587 744,576 772,564 799,550 824,535 849,519 870,500 889,478 908,456 923,430 934,401 945,372 950,338 950,299 Z"/>
+   <glyph unicode="r" horiz-adv-x="556" d="M 142,0 L 142,830 C 142,853 142,876 142,900 141,923 141,946 140,968 139,990 139,1011 138,1030 137,1049 137,1067 136,1082 L 306,1082 C 307,1067 308,1049 309,1030 310,1010 311,990 312,969 313,948 313,929 314,910 314,891 314,874 314,861 L 318,861 C 331,902 344,938 359,969 373,999 390,1024 409,1044 428,1063 451,1078 478,1088 505,1097 537,1102 575,1102 590,1102 604,1101 617,1099 630,1096 641,1094 648,1092 L 648,927 C 636,930 622,933 606,935 590,936 572,937 552,937 511,937 476,928 447,909 418,890 394,865 376,832 357,799 344,759 335,714 326,668 322,618 322,564 L 322,0 142,0 Z"/>
+   <glyph unicode="p" horiz-adv-x="953" d="M 1053,546 C 1053,464 1046,388 1033,319 1020,250 998,190 967,140 936,90 895,51 844,23 793,-6 730,-20 655,-20 578,-20 510,-5 452,24 394,53 350,101 319,168 L 314,168 C 315,167 315,161 316,150 316,139 316,126 317,110 317,94 317,76 318,57 318,37 318,17 318,-2 L 318,-425 138,-425 138,864 C 138,891 138,916 138,940 137,964 137,986 136,1005 135,1025 135,1042 134,1056 133,1070 133,1077 132,1077 L 306,1077 C 307,1075 308,1068 309,1057 310,1045 311,1031 312,1014 313,998 314,980 315,961 316,943 316,925 316,908 L 320,908 C 337,943 356,972 377,997 398,1021 423,1041 450,1057 477,1072 508,1084 542,1091 575,1098 613,1101 655,1101 730,1101 793,1088 844,1061 895,1034 936,997 967,949 998,900 1020,842 1033,774 1046,705 1053,629 1053,546 Z M 864,542 C 864,609 860,668 852,720 844,772 830,816 811,852 791,888 765,915 732,934 699,953 658,962 609,962 569,962 531,956 496,945 461,934 430,912 404,880 377,848 356,804 341,748 326,691 318,618 318,528 318,451 324,387 337,334 350,281 368,238 393,205 417,172 447,149 483,135 519,120 560,113 607,113 657,113 699,123 732,142 765,161 791,189 811,226 830,263 844,308 852,361 860,414 864,474 864,542 Z"/>
+   <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 490,-20 422,-9 363,14 304,37 254,71 213,118 172,165 140,223 119,294 97,364 86,447 86,542 86,915 248,1102 571,1102 655,1102 728,1090 789,1067 850,1044 900,1009 939,962 978,915 1006,857 1025,787 1044,717 1053,635 1053,542 Z M 864,542 C 864,626 858,695 845,750 832,805 813,848 788,881 763,914 732,937 696,950 660,963 619,969 574,969 528,969 487,962 450,949 413,935 381,912 355,879 329,846 309,802 296,747 282,692 275,624 275,542 275,458 282,389 297,334 312,279 332,235 358,202 383,169 414,146 449,133 484,120 522,113 563,113 609,113 651,120 688,133 725,146 757,168 783,201 809,234 829,278 843,333 857,388 864,458 864,542 Z"/>
+   <glyph unicode="n" horiz-adv-x="900" d="M 825,0 L 825,686 C 825,739 821,783 814,818 806,853 793,882 776,904 759,925 736,941 708,950 679,959 644,963 602,963 559,963 521,956 487,941 452,926 423,904 399,876 374,847 355,812 342,771 329,729 322,681 322,627 L 322,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 334,928 353,957 374,982 395,1007 419,1029 446,1047 473,1064 505,1078 540,1088 575,1097 616,1102 663,1102 723,1102 775,1095 818,1080 861,1065 897,1043 925,1012 953,981 974,942 987,894 1000,845 1006,788 1006,721 L 1006,0 825,0 Z"/>
+   <glyph unicode="m" horiz-adv-x="1456" d="M 768,0 L 768,686 C 768,739 765,783 758,818 751,853 740,882 725,904 709,925 688,941 663,950 638,959 607,963 570,963 532,963 498,956 467,941 436,926 410,904 389,876 367,847 350,812 339,771 327,729 321,681 321,627 L 321,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 333,928 350,957 369,982 388,1007 410,1029 435,1047 460,1064 488,1078 521,1088 553,1097 590,1102 633,1102 715,1102 780,1086 828,1053 875,1020 908,968 927,897 L 930,897 C 946,928 964,957 984,982 1004,1007 1027,1029 1054,1047 1081,1064 1111,1078 1144,1088 1177,1097 1215,1102 1258,1102 1313,1102 1360,1095 1400,1080 1439,1065 1472,1043 1497,1012 1522,981 1541,942 1553,894 1565,845 1571,788 1571,721 L 1571,0 1393,0 1393,686 C 1393,739 1390,783 1383,818 1376,853 1365,882 1350,904 1334,925 1313,941 1288,950 1263,959 1232,963 1195,963 1157,963 1123,956 1092,942 1061,927 1035,906 1014,878 992,850 975,815 964,773 952,731 946,682 946,627 L 946,0 768,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="927" d="M 816,0 L 450,494 318,385 318,0 138,0 138,1484 318,1484 318,557 793,1082 1004,1082 565,617 1027,0 816,0 Z"/>
+   <glyph unicode="j" horiz-adv-x="372" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 317,-132 C 317,-174 314,-212 307,-247 300,-283 287,-313 269,-339 251,-365 227,-386 196,-401 165,-416 125,-423 77,-423 54,-423 32,-423 11,-423 -11,-423 -31,-421 -50,-416 L -50,-277 C -41,-278 -31,-280 -19,-281 -7,-282 3,-283 12,-283 37,-283 58,-280 75,-273 91,-266 104,-256 113,-242 122,-227 129,-209 132,-187 135,-164 137,-138 137,-107 L 137,1082 317,1082 317,-132 Z"/>
+   <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/>
+   <glyph unicode="h" horiz-adv-x="874" d="M 317,897 C 337,934 359,965 382,991 405,1016 431,1037 459,1054 487,1071 518,1083 551,1091 584,1098 622,1102 663,1102 732,1102 789,1093 834,1074 878,1055 913,1029 939,996 964,962 982,922 992,875 1001,828 1006,777 1006,721 L 1006,0 825,0 825,686 C 825,732 822,772 817,807 811,842 800,871 784,894 768,917 745,934 716,946 687,957 649,963 602,963 559,963 521,955 487,940 452,925 423,903 399,875 374,847 355,813 342,773 329,733 322,688 322,638 L 322,0 142,0 142,1484 322,1484 322,1098 C 322,1076 322,1054 321,1032 320,1010 320,990 319,971 318,952 317,937 316,924 315,911 315,902 314,897 L 317,897 Z"/>
+   <glyph unicode="g" horiz-adv-x="954" d="M 548,-425 C 486,-425 431,-419 383,-406 335,-393 294,-375 260,-352 226,-328 198,-300 177,-267 156,-234 140,-198 131,-158 L 312,-132 C 324,-182 351,-220 392,-248 433,-274 486,-288 553,-288 594,-288 631,-282 664,-271 697,-260 726,-241 749,-217 772,-191 790,-159 803,-119 816,-79 822,-30 822,27 L 822,201 820,201 C 807,174 790,148 771,123 751,98 727,75 699,56 670,37 637,21 600,10 563,-2 520,-8 472,-8 403,-8 345,4 296,27 247,50 207,84 176,130 145,176 122,233 108,302 93,370 86,449 86,539 86,626 93,704 108,773 122,842 145,901 178,950 210,998 252,1035 304,1061 355,1086 418,1099 492,1099 569,1099 635,1082 692,1047 748,1012 791,962 822,897 L 824,897 C 824,914 825,933 826,953 827,974 828,994 829,1012 830,1031 831,1046 832,1060 833,1073 835,1080 836,1080 L 1007,1080 C 1006,1074 1006,1064 1005,1050 1004,1035 1004,1018 1003,998 1002,978 1002,956 1002,932 1001,907 1001,882 1001,856 L 1001,30 C 1001,-121 964,-234 890,-311 815,-387 701,-425 548,-425 Z M 822,541 C 822,616 814,681 798,735 781,788 760,832 733,866 706,900 676,925 642,941 607,957 572,965 536,965 490,965 451,957 418,941 385,925 357,900 336,866 314,831 298,787 288,734 277,680 272,616 272,541 272,463 277,398 288,345 298,292 314,249 335,216 356,183 383,160 416,146 449,132 488,125 533,125 569,125 604,133 639,148 673,163 704,188 731,221 758,254 780,297 797,350 814,403 822,466 822,541 Z"/>
+   <glyph unicode="f" horiz-adv-x="557" d="M 361,951 L 361,0 181,0 181,951 29,951 29,1082 181,1082 181,1204 C 181,1243 185,1280 192,1314 199,1347 213,1377 233,1402 252,1427 279,1446 313,1461 347,1475 391,1482 445,1482 466,1482 489,1481 512,1479 535,1477 555,1474 572,1470 L 572,1333 C 561,1335 548,1337 533,1339 518,1340 504,1341 492,1341 465,1341 444,1337 427,1330 410,1323 396,1312 387,1299 377,1285 370,1268 367,1248 363,1228 361,1205 361,1179 L 361,1082 572,1082 572,951 361,951 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,446 282,394 294,347 305,299 323,258 348,224 372,189 403,163 441,144 479,125 525,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 1008,206 992,176 972,146 951,115 924,88 890,64 856,39 814,19 763,4 712,-12 650,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,649 100,735 125,806 150,876 185,933 229,977 273,1021 324,1053 383,1073 442,1092 504,1102 571,1102 662,1102 738,1087 799,1058 860,1029 909,988 946,937 983,885 1009,824 1025,754 1040,684 1048,608 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 538,969 507,964 474,955 441,945 410,928 382,903 354,878 330,845 311,803 292,760 281,706 278,641 L 862,641 Z"/>
+   <glyph unicode="d" horiz-adv-x="954" d="M 821,174 C 788,105 744,55 689,25 634,-5 565,-20 484,-20 347,-20 247,26 183,118 118,210 86,349 86,536 86,913 219,1102 484,1102 566,1102 634,1087 689,1057 744,1027 788,979 821,914 L 823,914 C 823,921 823,931 823,946 822,960 822,975 822,991 821,1006 821,1021 821,1035 821,1049 821,1059 821,1065 L 821,1484 1001,1484 1001,219 C 1001,193 1001,168 1002,143 1002,119 1002,97 1003,77 1004,57 1004,40 1005,26 1006,11 1006,4 1007,4 L 835,4 C 834,11 833,20 832,32 831,44 830,58 829,73 828,89 827,105 826,123 825,140 825,157 825,174 L 821,174 Z M 275,542 C 275,467 280,403 289,350 298,297 313,253 334,219 355,184 381,159 413,143 445,127 484,119 530,119 577,119 619,127 656,142 692,157 722,182 747,217 771,251 789,296 802,351 815,406 821,474 821,554 821,631 815,696 802,749 789,802 771,844 746,877 721,910 691,933 656,948 620,962 579,969 532,969 488,969 450,961 418,946 386,931 359,906 338,872 317,838 301,794 291,740 280,685 275,619 275,542 Z"/>
+   <glyph unicode="c" horiz-adv-x="875" d="M 275,546 C 275,484 280,427 289,375 298,323 313,278 334,241 355,203 384,174 419,153 454,132 497,122 548,122 612,122 666,139 709,173 752,206 778,258 788,328 L 970,328 C 964,283 951,239 931,197 911,155 884,118 850,86 815,54 773,28 724,9 675,-10 618,-20 553,-20 468,-20 396,-6 337,23 278,52 230,91 193,142 156,192 129,251 112,320 95,388 87,462 87,542 87,615 93,679 105,735 117,790 134,839 156,881 177,922 203,957 232,986 261,1014 293,1037 328,1054 362,1071 398,1083 436,1091 474,1098 512,1102 551,1102 612,1102 666,1094 713,1077 760,1060 801,1038 836,1009 870,980 898,945 919,906 940,867 955,824 964,779 L 779,765 C 770,825 746,873 708,908 670,943 616,961 546,961 495,961 452,953 418,936 383,919 355,893 334,859 313,824 298,781 289,729 280,677 275,616 275,546 Z"/>
+   <glyph unicode="b" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 573,-20 505,-5 451,25 396,54 352,102 318,168 L 316,168 C 316,150 316,132 315,113 314,94 313,77 312,61 311,45 310,31 309,19 308,8 307,2 306,2 L 132,2 C 133,8 133,18 134,32 135,47 135,64 136,84 137,104 137,126 138,150 138,174 138,199 138,225 L 138,1484 318,1484 318,1061 C 318,1041 318,1022 318,1004 317,985 317,969 316,955 315,938 315,923 314,908 L 318,908 C 351,977 396,1027 451,1057 506,1087 574,1102 655,1102 792,1102 892,1056 957,964 1021,872 1053,733 1053,546 Z M 864,540 C 864,615 859,679 850,732 841,785 826,829 805,864 784,898 758,923 726,939 694,955 655,963 609,963 562,963 520,955 484,940 447,925 417,900 393,866 368,832 350,787 337,732 324,677 318,609 318,529 318,452 324,387 337,334 350,281 368,239 393,206 417,173 447,149 483,135 519,120 560,113 607,113 651,113 689,121 721,136 753,151 780,176 801,210 822,244 838,288 849,343 859,397 864,463 864,540 Z"/>
+   <glyph unicode="a" horiz-adv-x="1060" d="M 414,-20 C 305,-20 224,9 169,66 114,124 87,203 87,303 87,375 101,434 128,480 155,526 190,562 234,588 277,614 327,632 383,642 439,652 496,657 554,657 L 797,657 797,717 C 797,762 792,800 783,832 774,863 759,889 740,908 721,928 697,942 668,951 639,960 604,965 565,965 530,965 499,963 471,958 443,953 419,944 398,931 377,918 361,900 348,878 335,855 327,827 323,793 L 135,810 C 142,853 154,892 173,928 192,963 218,994 253,1020 287,1046 330,1066 382,1081 433,1095 496,1102 569,1102 705,1102 807,1071 876,1009 945,946 979,856 979,738 L 979,272 C 979,219 986,179 1000,152 1014,125 1041,111 1080,111 1090,111 1100,112 1110,113 1120,114 1130,116 1139,118 L 1139,6 C 1116,1 1094,-3 1072,-6 1049,-9 1025,-10 1000,-10 966,-10 937,-5 913,4 888,13 868,26 853,45 838,63 826,86 818,113 810,140 805,171 803,207 L 797,207 C 778,172 757,141 734,113 711,85 684,61 653,42 622,22 588,7 549,-4 510,-15 465,-20 414,-20 Z M 455,115 C 512,115 563,125 606,146 649,167 684,194 713,226 741,259 762,294 776,332 790,371 797,408 797,443 L 797,531 600,531 C 556,531 514,528 475,522 435,517 400,506 370,489 340,472 316,449 299,418 281,388 272,349 272,300 272,241 288,195 320,163 351,131 396,115 455,115 Z"/>
+   <glyph unicode="W" horiz-adv-x="1906" d="M 1511,0 L 1283,0 1039,895 C 1032,920 1024,950 1016,985 1007,1020 1000,1053 993,1084 985,1121 977,1158 969,1196 960,1157 952,1120 944,1083 937,1051 929,1018 921,984 913,950 905,920 898,895 L 652,0 424,0 9,1409 208,1409 461,514 C 472,472 483,430 494,389 504,348 513,311 520,278 529,239 537,203 544,168 554,214 564,259 575,304 580,323 584,342 589,363 594,384 599,404 604,424 609,444 614,463 619,482 624,500 628,517 632,532 L 877,1409 1060,1409 1305,532 C 1309,517 1314,500 1319,482 1324,463 1329,444 1334,425 1339,405 1343,385 1348,364 1353,343 1357,324 1362,305 1373,260 1383,215 1393,168 1394,168 1397,180 1402,203 1407,226 1414,254 1422,289 1430,324 1439,361 1449,402 1458,442 1468,479 1478,514 L 1727,1409 1926,1409 1511,0 Z"/>
+   <glyph unicode="S" horiz-adv-x="1139" d="M 1272,389 C 1272,330 1261,275 1238,225 1215,175 1179,132 1131,96 1083,59 1023,31 950,11 877,-10 790,-20 690,-20 515,-20 378,11 280,72 182,133 120,222 93,338 L 278,375 C 287,338 302,305 321,275 340,245 367,219 400,198 433,176 473,159 522,147 571,135 629,129 697,129 754,129 806,134 853,144 900,153 941,168 975,188 1009,208 1036,234 1055,266 1074,297 1083,335 1083,379 1083,425 1073,462 1052,491 1031,520 1001,543 963,562 925,581 880,596 827,609 774,622 716,635 652,650 613,659 573,668 534,679 494,689 456,701 420,716 383,730 349,747 317,766 285,785 257,809 234,836 211,863 192,894 179,930 166,965 159,1006 159,1053 159,1120 173,1177 200,1225 227,1272 264,1311 312,1342 360,1373 417,1395 482,1409 547,1423 618,1430 694,1430 781,1430 856,1423 918,1410 980,1396 1032,1375 1075,1348 1118,1321 1152,1287 1178,1247 1203,1206 1224,1159 1239,1106 L 1051,1073 C 1042,1107 1028,1137 1011,1164 993,1191 970,1213 941,1231 912,1249 878,1263 837,1272 796,1281 747,1286 692,1286 627,1286 572,1280 528,1269 483,1257 448,1241 421,1221 394,1201 374,1178 363,1151 351,1124 345,1094 345,1063 345,1021 356,987 377,960 398,933 426,910 462,892 498,874 540,859 587,847 634,835 685,823 738,811 781,801 825,791 868,781 911,770 952,758 991,744 1030,729 1067,712 1102,693 1136,674 1166,650 1191,622 1216,594 1236,561 1251,523 1265,485 1272,440 1272,389 Z"/>
+   <glyph unicode="P" horiz-adv-x="1086" d="M 1258,985 C 1258,924 1248,867 1228,814 1207,761 1177,715 1137,676 1096,637 1046,606 985,583 924,560 854,549 773,549 L 359,549 359,0 168,0 168,1409 761,1409 C 844,1409 917,1399 979,1379 1041,1358 1093,1330 1134,1293 1175,1256 1206,1211 1227,1159 1248,1106 1258,1048 1258,985 Z M 1066,983 C 1066,1072 1039,1140 984,1187 929,1233 847,1256 738,1256 L 359,1256 359,700 746,700 C 856,700 937,724 989,773 1040,822 1066,892 1066,983 Z"/>
+   <glyph unicode="L" horiz-adv-x="900" d="M 168,0 L 168,1409 359,1409 359,156 1071,156 1071,0 168,0 Z"/>
+   <glyph unicode="I" horiz-adv-x="186" d="M 189,0 L 189,1409 380,1409 380,0 189,0 Z"/>
+   <glyph unicode="F" horiz-adv-x="1006" d="M 359,1253 L 359,729 1145,729 1145,571 359,571 359,0 168,0 168,1409 1169,1409 1169,1253 359,1253 Z"/>
+   <glyph unicode="E" horiz-adv-x="1112" d="M 168,0 L 168,1409 1237,1409 1237,1253 359,1253 359,801 1177,801 1177,647 359,647 359,156 1278,156 1278,0 168,0 Z"/>
+   <glyph unicode="C" horiz-adv-x="1297" d="M 792,1274 C 712,1274 641,1261 580,1234 518,1207 466,1169 425,1120 383,1071 351,1011 330,942 309,873 298,796 298,711 298,626 310,549 333,479 356,408 389,348 432,297 475,246 527,207 590,179 652,151 722,137 800,137 855,137 905,144 950,159 995,173 1035,193 1072,219 1108,245 1140,276 1169,312 1198,347 1223,387 1245,430 L 1401,352 C 1376,299 1344,250 1307,205 1270,160 1226,120 1176,87 1125,54 1068,28 1005,9 941,-10 870,-20 791,-20 677,-20 577,-2 492,35 406,71 334,122 277,187 219,252 176,329 147,418 118,507 104,605 104,711 104,821 119,920 150,1009 180,1098 224,1173 283,1236 341,1298 413,1346 498,1380 583,1413 681,1430 790,1430 940,1430 1065,1401 1166,1342 1267,1283 1341,1196 1388,1081 L 1207,1021 C 1194,1054 1176,1086 1153,1117 1130,1147 1102,1174 1068,1197 1034,1220 994,1239 949,1253 903,1267 851,1274 792,1274 Z"/>
+   <glyph unicode="A" horiz-adv-x="1350" d="M 1167,0 L 1006,412 364,412 202,0 4,0 579,1409 796,1409 1362,0 1167,0 Z M 768,1026 C 757,1053 747,1080 738,1107 728,1134 719,1159 712,1182 705,1204 699,1223 694,1238 689,1253 686,1262 685,1265 684,1262 681,1252 676,1237 671,1222 665,1203 658,1180 650,1157 641,1132 632,1105 622,1078 612,1051 602,1024 L 422,561 949,561 768,1026 Z"/>
+   <glyph unicode="3" horiz-adv-x="980" d="M 1049,389 C 1049,324 1039,267 1018,216 997,165 966,123 926,88 885,53 835,26 776,8 716,-11 648,-20 571,-20 484,-20 410,-9 351,13 291,34 242,63 203,99 164,134 135,175 116,221 97,266 84,313 78,362 L 264,379 C 269,342 279,308 294,277 308,246 327,220 352,198 377,176 407,159 443,147 479,135 522,129 571,129 662,129 733,151 785,196 836,241 862,307 862,395 862,447 851,489 828,521 805,552 776,577 742,595 707,612 670,624 630,630 589,636 552,639 518,639 L 416,639 416,795 514,795 C 548,795 583,799 620,806 657,813 690,825 721,844 751,862 776,887 796,918 815,949 825,989 825,1038 825,1113 803,1173 759,1217 714,1260 648,1282 561,1282 482,1282 418,1262 369,1221 320,1180 291,1123 283,1049 L 102,1063 C 109,1125 126,1179 153,1225 180,1271 214,1309 255,1340 296,1370 342,1393 395,1408 448,1423 504,1430 563,1430 642,1430 709,1420 766,1401 823,1381 869,1354 905,1321 941,1287 968,1247 985,1202 1002,1157 1010,1108 1010,1057 1010,1016 1004,977 993,941 982,905 964,873 940,844 916,815 886,791 849,770 812,749 767,734 715,723 L 715,719 C 772,713 821,700 863,681 905,661 940,636 967,607 994,578 1015,544 1029,507 1042,470 1049,430 1049,389 Z"/>
+   <glyph unicode="0" horiz-adv-x="980" d="M 1059,705 C 1059,570 1046,456 1021,364 995,271 960,197 916,140 871,83 819,42 759,17 699,-8 635,-20 567,-20 498,-20 434,-8 375,17 316,42 264,82 221,139 177,196 143,270 118,363 93,455 80,569 80,705 80,847 93,965 118,1058 143,1151 177,1225 221,1280 265,1335 317,1374 377,1397 437,1419 502,1430 573,1430 640,1430 704,1419 763,1397 822,1374 873,1335 917,1280 961,1225 996,1151 1021,1058 1046,965 1059,847 1059,705 Z M 876,705 C 876,817 869,910 856,985 843,1059 823,1118 797,1163 771,1207 739,1238 702,1257 664,1275 621,1284 573,1284 522,1284 478,1275 439,1256 400,1237 368,1206 342,1162 315,1117 295,1058 282,984 269,909 262,816 262,705 262,597 269,506 283,432 296,358 316,299 343,254 369,209 401,176 439,157 477,137 520,127 569,127 616,127 659,137 697,157 735,176 767,209 794,254 820,299 840,358 855,432 869,506 876,597 876,705 Z"/>
+   <glyph unicode="." horiz-adv-x="186" d="M 187,0 L 187,219 382,219 382,0 187,0 Z"/>
+   <glyph unicode="-" horiz-adv-x="504" d="M 91,464 L 91,624 591,624 591,464 91,464 Z"/>
+   <glyph unicode="," horiz-adv-x="212" d="M 385,219 L 385,51 C 385,16 384,-16 381,-46 378,-74 373,-101 366,-127 359,-151 351,-175 342,-197 332,-219 320,-241 307,-262 L 184,-262 C 214,-219 237,-175 254,-131 270,-87 278,-43 278,0 L 190,0 190,219 385,219 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_2" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="bold" font-style="normal" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="x" horiz-adv-x="1139" d="M 819,0 L 567,392 313,0 14,0 410,559 33,1082 336,1082 567,728 797,1082 1102,1082 725,562 1124,0 819,0 Z"/>
+   <glyph unicode="w" horiz-adv-x="1615" d="M 436,255 L 645,1082 946,1082 1153,255 1337,1082 1597,1082 1313,0 1016,0 797,882 571,0 274,0 -6,1082 258,1082 436,255 Z"/>
+   <glyph unicode="v" horiz-adv-x="1139" d="M 565,227 L 836,1082 1130,1082 731,0 395,0 8,1082 305,1082 565,227 Z"/>
+   <glyph unicode="t" horiz-adv-x="636" d="M 420,-18 C 337,-18 274,5 229,50 184,95 162,163 162,254 L 162,892 25,892 25,1082 176,1082 264,1336 440,1336 440,1082 645,1082 645,892 440,892 440,330 C 440,277 450,239 470,214 490,189 521,176 563,176 580,176 596,177 610,180 624,183 640,186 657,190 L 657,16 C 622,5 586,-4 547,-10 508,-15 466,-18 420,-18 Z"/>
+   <glyph unicode="s" horiz-adv-x="980" d="M 1055,316 C 1055,264 1044,217 1023,176 1001,135 969,100 928,71 887,42 836,19 776,4 716,-12 648,-20 571,-20 502,-20 440,-15 385,-5 330,5 281,22 240,45 198,68 163,97 135,134 107,171 86,216 72,270 L 319,307 C 327,277 338,253 352,234 366,215 383,201 404,191 425,181 449,174 477,171 504,168 536,166 571,166 603,166 633,168 661,172 688,175 712,182 733,191 753,200 769,212 780,229 791,245 797,265 797,290 797,318 789,340 773,357 756,373 734,386 706,397 677,407 644,416 606,424 567,431 526,440 483,450 438,460 393,472 349,486 305,500 266,519 231,543 196,567 168,598 147,635 126,672 115,718 115,775 115,826 125,872 145,913 165,953 194,987 233,1016 272,1044 320,1066 377,1081 434,1096 499,1103 573,1103 632,1103 686,1098 737,1087 788,1076 833,1058 873,1035 913,1011 947,981 974,944 1001,907 1019,863 1030,811 L 781,785 C 776,811 768,833 756,850 744,867 729,880 712,890 694,900 673,907 650,911 627,914 601,916 573,916 506,916 456,908 423,891 390,874 373,845 373,805 373,780 380,761 394,746 407,731 427,719 452,710 477,700 506,692 541,685 575,678 612,669 653,659 703,648 752,636 801,622 849,607 892,588 930,563 967,538 998,505 1021,466 1044,427 1055,377 1055,316 Z"/>
+   <glyph unicode="r" horiz-adv-x="662" d="M 143,0 L 143,833 C 143,856 143,881 143,907 142,933 142,958 141,982 140,1006 139,1027 138,1046 137,1065 136,1075 135,1075 L 403,1075 C 404,1067 406,1054 407,1035 408,1016 410,995 411,972 412,950 414,927 415,905 416,883 416,865 416,851 L 420,851 C 434,890 448,926 462,957 476,988 493,1014 512,1036 531,1057 553,1074 580,1086 607,1097 640,1103 679,1103 696,1103 712,1102 729,1099 745,1096 757,1092 766,1088 L 766,853 C 748,857 730,861 712,864 693,867 671,868 646,868 576,868 522,840 483,783 444,726 424,642 424,531 L 424,0 143,0 Z"/>
+   <glyph unicode="p" horiz-adv-x="1059" d="M 1167,546 C 1167,464 1159,388 1143,319 1126,250 1101,190 1067,140 1033,90 990,51 938,23 885,-6 823,-20 752,-20 720,-20 688,-17 657,-10 625,-3 595,8 566,23 537,38 511,57 487,82 462,106 441,136 424,172 L 418,172 C 419,169 419,160 420,147 421,134 421,118 422,101 423,83 423,64 424,45 424,25 424,7 424,-10 L 424,-425 143,-425 143,833 C 143,888 142,938 141,981 139,1024 137,1058 135,1082 L 408,1082 C 409,1077 411,1068 413,1055 414,1042 416,1026 417,1009 418,992 418,974 419,955 420,936 420,920 420,906 L 424,906 C 458,977 505,1028 564,1059 623,1090 692,1105 770,1105 839,1105 898,1091 948,1063 998,1035 1039,996 1072,947 1104,898 1128,839 1144,771 1159,702 1167,627 1167,546 Z M 874,546 C 874,669 855,761 818,821 781,880 725,910 651,910 623,910 595,904 568,893 540,881 515,861 494,833 472,804 454,766 441,719 427,671 420,611 420,538 420,467 427,409 440,362 453,315 471,277 493,249 514,221 539,201 566,190 593,178 621,172 649,172 685,172 717,179 745,194 773,208 797,230 816,261 835,291 849,330 859,377 869,424 874,481 874,546 Z"/>
+   <glyph unicode="o" horiz-adv-x="1086" d="M 1171,542 C 1171,459 1160,384 1137,315 1114,246 1079,187 1033,138 987,88 930,49 861,22 792,-6 712,-20 621,-20 533,-20 455,-6 388,21 321,48 264,87 219,136 173,185 138,245 115,314 92,383 80,459 80,542 80,623 91,697 114,766 136,834 170,893 215,943 260,993 317,1032 386,1060 455,1088 535,1102 627,1102 724,1102 807,1088 876,1060 945,1032 1001,993 1045,944 1088,894 1120,835 1141,767 1161,698 1171,623 1171,542 Z M 877,542 C 877,671 856,764 814,822 772,880 711,909 631,909 548,909 485,880 441,821 397,762 375,669 375,542 375,477 381,422 393,375 404,328 421,290 442,260 463,230 489,208 519,194 549,179 582,172 618,172 659,172 696,179 729,194 761,208 788,230 810,260 832,290 849,328 860,375 871,422 877,477 877,542 Z"/>
+   <glyph unicode="n" horiz-adv-x="1006" d="M 844,0 L 844,607 C 844,649 841,688 834,723 827,758 816,788 801,813 786,838 766,857 741,871 716,885 686,892 651,892 617,892 586,885 559,870 531,855 507,833 487,806 467,778 452,745 441,707 430,668 424,626 424,580 L 424,0 143,0 143,845 C 143,868 143,892 143,917 142,942 142,966 141,988 140,1010 139,1031 138,1048 137,1066 136,1075 135,1075 L 403,1075 C 404,1067 406,1055 407,1038 408,1021 410,1002 411,981 412,961 414,940 415,919 416,899 416,881 416,867 L 420,867 C 458,950 506,1010 563,1047 620,1084 689,1103 768,1103 833,1103 889,1092 934,1071 979,1050 1015,1020 1044,983 1072,946 1092,902 1105,851 1118,800 1124,746 1124,687 L 1124,0 844,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="292" d="M 143,0 L 143,1484 424,1484 424,0 143,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="1033" d="M 834,0 L 545,490 424,406 424,0 143,0 143,1484 424,1484 424,634 810,1082 1112,1082 732,660 1141,0 834,0 Z"/>
+   <glyph unicode="i" horiz-adv-x="292" d="M 143,1277 L 143,1484 424,1484 424,1277 143,1277 Z M 143,0 L 143,1082 424,1082 424,0 143,0 Z"/>
+   <glyph unicode="g" horiz-adv-x="1060" d="M 596,-434 C 525,-434 462,-427 408,-413 353,-398 307,-378 269,-353 230,-327 200,-296 177,-261 154,-225 138,-186 129,-143 L 410,-110 C 420,-153 442,-187 475,-212 508,-237 551,-249 604,-249 637,-249 668,-244 696,-235 723,-226 747,-210 767,-188 786,-165 802,-136 813,-99 824,-62 829,-17 829,37 829,56 829,75 829,94 829,113 829,131 830,147 831,166 831,184 831,201 L 829,201 C 796,131 751,80 692,49 633,18 562,2 481,2 412,2 353,16 304,43 254,70 213,107 180,156 147,204 123,262 108,329 92,396 84,469 84,550 84,633 92,709 109,777 126,844 151,902 186,951 220,1000 263,1037 316,1064 368,1090 430,1103 502,1103 574,1103 639,1088 696,1057 753,1026 797,977 829,908 L 834,908 C 834,922 835,939 836,957 837,976 838,994 839,1011 840,1029 842,1044 844,1058 845,1071 847,1078 848,1078 L 1114,1078 C 1113,1054 1111,1020 1110,977 1109,934 1108,885 1108,829 L 1108,32 C 1108,-47 1097,-115 1074,-173 1051,-231 1018,-280 975,-318 931,-357 877,-386 814,-405 750,-424 677,-434 596,-434 Z M 831,556 C 831,624 824,681 811,726 798,771 780,808 759,835 738,862 713,882 686,893 658,904 630,910 602,910 566,910 534,903 507,889 479,875 455,853 436,824 417,795 402,757 392,712 382,667 377,613 377,550 377,433 396,345 433,286 470,227 526,197 600,197 628,197 656,203 684,214 711,225 736,244 758,272 780,299 798,336 811,382 824,428 831,486 831,556 Z"/>
+   <glyph unicode="f" horiz-adv-x="663" d="M 473,892 L 473,0 193,0 193,892 35,892 35,1082 193,1082 193,1195 C 193,1236 198,1275 208,1310 218,1345 235,1375 259,1401 283,1427 315,1447 356,1462 397,1477 447,1484 508,1484 540,1484 572,1482 603,1479 634,1476 661,1472 686,1468 L 686,1287 C 674,1290 661,1292 646,1294 631,1295 617,1296 604,1296 578,1296 557,1293 540,1288 523,1283 509,1275 500,1264 490,1253 483,1240 479,1224 475,1207 473,1188 473,1167 L 473,1082 686,1082 686,892 473,892 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 586,-20 C 508,-20 438,-8 376,15 313,38 260,73 216,120 172,167 138,226 115,297 92,368 80,451 80,546 80,649 94,736 122,807 149,878 187,935 234,979 281,1022 335,1054 396,1073 457,1092 522,1102 590,1102 675,1102 748,1087 809,1057 869,1027 918,986 957,932 996,878 1024,814 1042,739 1060,664 1069,582 1069,491 L 1069,491 375,491 C 375,445 379,402 387,363 395,323 408,289 426,261 444,232 467,209 496,193 525,176 559,168 600,168 649,168 690,179 721,200 752,221 775,253 788,297 L 1053,274 C 1041,243 1024,211 1003,176 981,141 952,110 916,81 880,52 835,28 782,9 728,-10 663,-20 586,-20 Z M 586,925 C 557,925 531,920 506,911 481,901 459,886 441,865 422,844 407,816 396,783 385,750 378,710 377,663 L 797,663 C 792,750 771,816 734,860 697,903 648,925 586,925 Z"/>
+   <glyph unicode="c" horiz-adv-x="1007" d="M 594,-20 C 508,-20 433,-7 369,20 304,47 251,84 208,133 165,182 133,240 112,309 91,377 80,452 80,535 80,625 92,705 115,776 138,846 172,905 216,954 260,1002 314,1039 379,1064 443,1089 516,1102 598,1102 668,1102 730,1092 785,1073 839,1054 886,1028 925,995 964,963 996,924 1021,879 1045,834 1062,786 1071,734 L 788,734 C 780,787 760,830 728,861 696,893 651,909 592,909 517,909 462,878 427,816 392,754 375,664 375,546 375,297 449,172 596,172 649,172 694,188 730,221 766,253 788,302 797,366 L 1079,366 C 1072,315 1057,267 1034,220 1010,174 978,133 938,97 897,62 848,33 791,12 734,-9 668,-20 594,-20 Z"/>
+   <glyph unicode="a" horiz-adv-x="1112" d="M 393,-20 C 341,-20 295,-13 254,2 213,16 178,37 149,65 120,93 98,127 83,168 68,208 60,255 60,307 60,371 71,425 94,469 116,513 146,548 185,575 224,602 269,622 321,634 373,647 428,653 487,653 L 720,653 720,709 C 720,748 717,782 710,808 703,835 692,857 679,873 666,890 649,902 630,909 610,916 587,920 562,920 539,920 518,918 500,913 481,909 465,901 452,890 439,879 428,864 420,845 411,826 405,803 402,774 L 109,774 C 117,822 132,866 153,906 174,946 204,981 242,1010 279,1039 326,1062 381,1078 436,1094 500,1102 574,1102 641,1102 701,1094 754,1077 807,1060 851,1036 888,1003 925,970 953,929 972,881 991,833 1001,777 1001,714 L 1001,320 C 1001,295 1002,272 1005,252 1007,232 1011,215 1018,202 1024,188 1033,178 1045,171 1056,164 1071,160 1090,160 1111,160 1132,162 1152,166 L 1152,14 C 1135,10 1120,6 1107,3 1094,0 1080,-3 1067,-5 1054,-7 1040,-9 1025,-10 1010,-11 992,-12 972,-12 901,-12 849,5 816,40 782,75 762,126 755,193 L 749,193 C 712,126 664,73 606,36 547,-1 476,-20 393,-20 Z M 720,499 L 576,499 C 546,499 518,497 491,493 464,490 440,482 420,470 399,459 383,442 371,420 359,397 353,367 353,329 353,277 365,239 389,214 412,189 444,176 483,176 519,176 552,184 581,199 610,214 635,234 656,259 676,284 692,312 703,345 714,377 720,411 720,444 L 720,499 Z"/>
+   <glyph unicode="S" horiz-adv-x="1218" d="M 1286,406 C 1286,342 1274,284 1251,232 1228,179 1192,134 1143,97 1094,60 1031,31 955,11 878,-10 787,-20 682,-20 589,-20 506,-12 435,5 364,22 303,46 252,79 201,112 159,152 128,201 96,249 73,304 59,367 L 344,414 C 352,383 364,354 379,328 394,302 416,280 443,261 470,242 503,227 544,217 584,206 633,201 690,201 790,201 867,216 920,247 973,277 999,324 999,389 999,428 988,459 967,484 946,509 917,529 882,545 847,561 806,574 760,585 714,596 666,606 616,616 576,625 536,635 496,645 456,655 418,667 382,681 345,695 311,712 280,731 249,750 222,774 199,803 176,831 158,864 145,902 132,940 125,985 125,1036 125,1106 139,1166 167,1216 195,1266 234,1307 284,1339 333,1370 392,1393 461,1408 530,1423 605,1430 686,1430 778,1430 857,1423 923,1409 988,1394 1043,1372 1088,1343 1132,1314 1167,1277 1193,1233 1218,1188 1237,1136 1249,1077 L 963,1038 C 948,1099 919,1144 874,1175 829,1206 764,1221 680,1221 628,1221 585,1217 551,1208 516,1199 489,1186 469,1171 448,1156 434,1138 425,1118 416,1097 412,1076 412,1053 412,1018 420,990 437,968 454,945 477,927 507,912 537,897 573,884 615,874 656,863 702,853 752,842 796,833 840,823 883,813 926,802 968,790 1007,776 1046,762 1083,745 1117,725 1151,705 1181,681 1206,652 1231,623 1250,588 1265,548 1279,508 1286,461 1286,406 Z"/>
+   <glyph unicode="I" horiz-adv-x="292" d="M 137,0 L 137,1409 432,1409 432,0 137,0 Z"/>
+   <glyph unicode="E" horiz-adv-x="1139" d="M 137,0 L 137,1409 1245,1409 1245,1181 432,1181 432,827 1184,827 1184,599 432,599 432,228 1286,228 1286,0 137,0 Z"/>
+   <glyph unicode=")" horiz-adv-x="583" d="M 2,-425 C 55,-347 101,-270 139,-196 177,-120 208,-44 233,33 257,110 275,190 286,272 297,353 303,439 303,530 303,620 297,706 286,788 275,869 257,949 233,1026 208,1103 177,1180 139,1255 101,1330 55,1407 2,1484 L 283,1484 C 334,1410 379,1337 416,1264 453,1191 484,1116 509,1039 533,962 551,882 563,799 574,716 580,626 580,531 580,436 574,347 563,264 551,180 533,99 509,22 484,-55 453,-131 416,-204 379,-277 334,-351 283,-425 L 2,-425 Z"/>
+   <glyph unicode="(" horiz-adv-x="583" d="M 399,-425 C 348,-351 303,-277 266,-204 229,-131 198,-55 174,22 149,99 131,180 120,264 108,347 102,436 102,531 102,626 108,716 120,799 131,882 149,962 174,1039 198,1116 229,1191 266,1264 303,1337 348,1410 399,1484 L 680,1484 C 627,1407 581,1330 543,1255 505,1180 474,1103 450,1026 425,949 407,869 396,788 385,706 379,620 379,530 379,439 385,353 396,272 407,190 425,110 450,33 474,-44 505,-120 543,-196 581,-270 627,-347 680,-425 L 399,-425 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs class="TextShapeIndex">
+  <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18 id19 id20 id21 id22 id23 id24 id25 id26 id27 id28 id29 id30 id31 id32 id33 id34 id35 id36 id37 id38 id39 id40 id41 id42"/>
+ </defs>
+ <defs class="EmbeddedBulletChars">
+  <g id="bullet-char-template(57356)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
+  </g>
+  <g id="bullet-char-template(57354)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
+  </g>
+  <g id="bullet-char-template(10146)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10132)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10007)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
+  </g>
+  <g id="bullet-char-template(10004)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
+  </g>
+  <g id="bullet-char-template(9679)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
+  </g>
+  <g id="bullet-char-template(8226)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
+  </g>
+  <g id="bullet-char-template(8211)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
+  </g>
+  <g id="bullet-char-template(61548)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
+  </g>
+ </defs>
+ <defs class="TextEmbeddedBitmaps"/>
+ <g>
+  <g id="id2" class="Master_Slide">
+   <g id="bg-id2" class="Background"/>
+   <g id="bo-id2" class="BackgroundObjects"/>
+  </g>
+ </g>
+ <g class="SlideGroup">
+  <g>
+   <g id="container-id1">
+    <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
+     <g class="Page">
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id3">
+        <rect class="BoundingBox" stroke="none" fill="none" x="16493" y="6587" width="2416" height="2289"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 16494,6588 L 18907,8874"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id4">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13572" y="1506" width="2036" height="1909"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14589,1507 C 15165,1507 15605,1919 15605,2459 15605,2999 15165,3412 14589,3412 14013,3412 13573,2999 13573,2459 13573,1919 14013,1507 14589,1507 Z M 13573,1507 L 13573,1507 Z M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14589,1507 C 15165,1507 15605,1919 15605,2459 15605,2999 15165,3412 14589,3412 14013,3412 13573,2999 13573,2459 13573,1919 14013,1507 14589,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 14258,2005 C 14311,2005 14352,2076 14352,2169 14352,2262 14311,2333 14258,2333 14205,2333 14165,2262 14165,2169 14165,2076 14205,2005 14258,2005 Z M 13573,1507 L 13573,1507 Z M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14258,2005 C 14311,2005 14352,2076 14352,2169 14352,2262 14311,2333 14258,2333 14205,2333 14165,2262 14165,2169 14165,2076 14205,2005 14258,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 14916,2005 C 14969,2005 15010,2076 15010,2169 15010,2262 14969,2333 14916,2333 14863,2333 14823,2262 14823,2169 14823,2076 14863,2005 14916,2005 Z M 13573,1507 L 13573,1507 Z M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14916,2005 C 14969,2005 15010,2076 15010,2169 15010,2262 14969,2333 14916,2333 14863,2333 14823,2262 14823,2169 14823,2076 14863,2005 14916,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14031,2787 C 14389,3141 14789,3141 15147,2787"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13573,1507 L 13573,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15606,3413 L 15606,3413 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id5">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7349" y="1506" width="2036" height="1909"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 8366,1507 C 8942,1507 9382,1919 9382,2459 9382,2999 8942,3412 8366,3412 7790,3412 7350,2999 7350,2459 7350,1919 7790,1507 8366,1507 Z M 7350,1507 L 7350,1507 Z M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8366,1507 C 8942,1507 9382,1919 9382,2459 9382,2999 8942,3412 8366,3412 7790,3412 7350,2999 7350,2459 7350,1919 7790,1507 8366,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 8035,2005 C 8088,2005 8129,2076 8129,2169 8129,2262 8088,2333 8035,2333 7982,2333 7942,2262 7942,2169 7942,2076 7982,2005 8035,2005 Z M 7350,1507 L 7350,1507 Z M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8035,2005 C 8088,2005 8129,2076 8129,2169 8129,2262 8088,2333 8035,2333 7982,2333 7942,2262 7942,2169 7942,2076 7982,2005 8035,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+        <path fill="rgb(91,127,166)" stroke="none" d="M 8693,2005 C 8746,2005 8787,2076 8787,2169 8787,2262 8746,2333 8693,2333 8640,2333 8600,2262 8600,2169 8600,2076 8640,2005 8693,2005 Z M 7350,1507 L 7350,1507 Z M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8693,2005 C 8746,2005 8787,2076 8787,2169 8787,2262 8746,2333 8693,2333 8640,2333 8600,2262 8600,2169 8600,2076 8640,2005 8693,2005 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7808,2787 C 8166,3141 8566,3141 8924,2787"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7350,1507 L 7350,1507 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9383,3413 L 9383,3413 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id6">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12682" y="5570" width="4194" height="1400"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14779,6968 L 12683,6968 12683,5571 16874,5571 16874,6968 14779,6968 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14779,6968 L 12683,6968 12683,5571 16874,5571 16874,6968 14779,6968 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="13528" y="6441"><tspan fill="rgb(0,0,0)" stroke="none">Workbench</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id7">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5824" y="8618" width="4194" height="1654"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 7921,10270 L 5825,10270 5825,8619 10016,8619 10016,10270 7921,10270 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7921,10270 L 5825,10270 5825,8619 10016,8619 10016,10270 7921,10270 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="6784" y="9339"><tspan fill="rgb(0,0,0)" stroke="none">keepproxy</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="6850" y="9894"><tspan fill="rgb(0,0,0)" stroke="none">keep-web</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id8">
+        <rect class="BoundingBox" stroke="none" fill="none" x="22080" y="8492" width="4194" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 24177,10271 L 22081,10271 22081,8493 26272,8493 26272,10271 24177,10271 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 24177,10271 L 22081,10271 22081,8493 26272,8493 26272,10271 24177,10271 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="22856" y="9554"><tspan fill="rgb(0,0,0)" stroke="none">arv-git-httpd</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id9">
+        <rect class="BoundingBox" stroke="none" fill="none" x="17635" y="8492" width="4194" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 19732,10271 L 17636,10271 17636,8493 21827,8493 21827,10271 19732,10271 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19732,10271 L 17636,10271 17636,8493 21827,8493 21827,10271 19732,10271 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="19008" y="9554"><tspan fill="rgb(0,0,0)" stroke="none">arv-ws</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id10">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5825" y="15730" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 7604,18144 L 5826,18144 5826,15731 9382,15731 9382,18144 7604,18144 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7604,18144 L 5826,18144 5826,15731 9382,15731 9382,18144 7604,18144 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id11">
+        <rect class="BoundingBox" stroke="none" fill="none" x="6079" y="16111" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 7858,18525 L 6080,18525 6080,16112 9636,16112 9636,18525 7858,18525 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7858,18525 L 6080,18525 6080,16112 9636,16112 9636,18525 7858,18525 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id12">
+        <rect class="BoundingBox" stroke="none" fill="none" x="6460" y="16492" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 8239,18906 L 6461,18906 6461,16493 10017,16493 10017,18906 8239,18906 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8239,18906 L 6461,18906 6461,16493 10017,16493 10017,18906 8239,18906 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="7149" y="17871"><tspan fill="rgb(0,0,0)" stroke="none">keepstore</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id13">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12556" y="15730" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14335,18144 L 12557,18144 12557,15731 16113,15731 16113,18144 14335,18144 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14335,18144 L 12557,18144 12557,15731 16113,15731 16113,18144 14335,18144 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id14">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12810" y="16111" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14589,18525 L 12811,18525 12811,16112 16367,16112 16367,18525 14589,18525 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14589,18525 L 12811,18525 12811,16112 16367,16112 16367,18525 14589,18525 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id15">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13191" y="16492" width="3559" height="2416"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14970,18906 L 13192,18906 13192,16493 16748,16493 16748,18906 14970,18906 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14970,18906 L 13192,18906 13192,16493 16748,16493 16748,18906 14970,18906 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="13671" y="17871"><tspan fill="rgb(0,0,0)" stroke="none">compute0...</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id16">
+        <rect class="BoundingBox" stroke="none" fill="none" x="15477" y="10143" width="5972" height="5972"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 15478,10144 L 21447,16113"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id17">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="6968" width="3" height="1527"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,6969 L 14589,8493"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id18">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7984" y="10270" width="3" height="5464"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 7985,10271 L 7985,15732"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id19">
+        <rect class="BoundingBox" stroke="none" fill="none" x="10016" y="17382" width="2543" height="3"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 10017,17383 L 12557,17383"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id20">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12047" y="13064" width="5210" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14652,14843 L 12048,14843 12048,13065 17255,13065 17255,14843 14652,14843 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14652,14843 L 12048,14843 12048,13065 17255,13065 17255,14843 14652,14843 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="12209" y="14126"><tspan fill="rgb(0,0,0)" stroke="none">crunch-dispatch-slurm</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id21">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="10143" width="3" height="2924"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,10144 L 14589,13065"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id22">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="14842" width="3" height="892"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,14843 L 14589,15732"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id23">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1582" y="12123" width="24872" height="107"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 1635,12176 L 1844,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 1978,12176 L 2187,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 2322,12176 L 2531,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 2665,12176 L 2874,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 3009,12176 L 3218,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 3352,12176 L 3561,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 3696,12176 L 3904,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 4039,12176 L 4248,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 4383,12176 L 4591,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 4726,12176 L 4935,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 5069,12176 L 5278,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 5413,12176 L 5622,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 5756,12176 L 5965,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 6100,12176 L 6309,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 6443,12176 L 6652,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 6787,12176 L 6995,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 7130,12176 L 7339,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 7473,12176 L 7682,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 7817,12176 L 8026,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 8160,12176 L 8369,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 8504,12176 L 8713,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 8847,12176 L 9056,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 9191,12176 L 9399,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 9534,12176 L 9743,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 9878,12176 L 10086,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 10221,12176 L 10430,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 10564,12176 L 10773,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 10908,12176 L 11117,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 11251,12176 L 11460,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 11595,12176 L 11804,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 11938,12176 L 12147,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 12282,12176 L 12490,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 12625,12176 L 12834,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 12969,12176 L 13177,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 13312,12176 L 13521,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 13655,12176 L 13864,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 13999,12176 L 14208,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 14342,12176 L 14551,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 14686,12176 L 14895,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 15029,12176 L 15238,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 15373,12176 L 15581,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 15716,12176 L 15925,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 16059,12176 L 16268,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 16403,12176 L 16612,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 16746,12176 L 16955,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 17090,12176 L 17299,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 17433,12176 L 17642,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 17777,12176 L 17986,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 18120,12176 L 18329,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 18464,12176 L 18672,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 18807,12176 L 19016,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 19150,12176 L 19359,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 19494,12176 L 19703,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 19837,12176 L 20046,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 20181,12176 L 20390,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 20524,12176 L 20733,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 20868,12176 L 21076,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 21211,12176 L 21420,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 21555,12176 L 21763,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 21898,12176 L 22107,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 22241,12176 L 22450,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 22585,12176 L 22794,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 22928,12176 L 23137,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 23272,12176 L 23481,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 23615,12176 L 23824,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 23959,12176 L 24167,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 24302,12176 L 24511,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 24645,12176 L 24854,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 24989,12176 L 25198,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 25332,12176 L 25541,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 25676,12176 L 25885,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 26019,12176 L 26228,12176"/>
+        <path fill="none" stroke="rgb(0,0,0)" stroke-width="106" stroke-linejoin="round" d="M 26363,12176 L 26400,12176"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id24">
+        <rect class="BoundingBox" stroke="none" fill="none" x="16366" y="9381" width="1273" height="3"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 16367,9382 L 17637,9382"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id25">
+        <rect class="BoundingBox" stroke="none" fill="none" x="22462" y="12936" width="3306" height="2417"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 L 22463,15049 C 22463,15213 23213,15351 24114,15351 25015,15351 25766,15213 25766,15049 L 25766,13238 C 25766,13074 25015,12937 24114,12937 L 24114,12937 Z M 22463,12937 L 22463,12937 Z M 25766,15351 L 25766,15351 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 L 22463,15049 C 22463,15213 23213,15351 24114,15351 25015,15351 25766,15213 25766,15049 L 25766,13238 C 25766,13074 25015,12937 24114,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22463,12937 L 22463,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 25766,15351 L 25766,15351 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 22463,13403 23213,13540 24114,13540 25015,13540 25766,13403 25766,13238 25766,13074 25015,12937 24114,12937 L 24114,12937 Z M 22463,12937 L 22463,12937 Z M 25766,15351 L 25766,15351 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 24114,12937 C 23213,12937 22463,13074 22463,13238 22463,13403 23213,13540 24114,13540 25015,13540 25766,13403 25766,13238 25766,13074 25015,12937 24114,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22463,12937 L 22463,12937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 25766,15351 L 25766,15351 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="23162" y="14466"><tspan fill="rgb(0,0,0)" stroke="none">git repos</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id26">
+        <rect class="BoundingBox" stroke="none" fill="none" x="23986" y="10270" width="3" height="2670"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 23987,10271 L 23987,12938"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id27">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14588" y="4301" width="3" height="1273"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 14589,4302 L 14589,5572"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id28">
+        <rect class="BoundingBox" stroke="none" fill="none" x="9381" y="4809" width="3432" height="3686"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 12811,8493 L 9382,4810"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id29">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7984" y="4809" width="3" height="3813"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 7985,8620 L 7985,4810"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id30">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7350" y="3666" width="2541" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="7956" y="4105"><tspan fill="rgb(0,0,0)" stroke="none">CLI user</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id31">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13319" y="3539" width="2541" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="13831" y="3978"><tspan fill="rgb(0,0,0)" stroke="none">Web user</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id32">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5445" y="10651" width="2541" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="5502" y="11090"><tspan fill="rgb(0,0,0)" stroke="none">Storage access</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id33">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1254" y="10524" width="2541" height="1398"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1783" y="10950"><tspan fill="rgb(0,0,0)" stroke="none">External </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1957" y="11344"><tspan fill="rgb(0,0,0)" stroke="none">facing </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1824" y="11738"><tspan fill="rgb(0,0,0)" stroke="none">services</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id34">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1123" y="12556" width="2811" height="1398"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1889" y="12982"><tspan fill="rgb(0,0,0)" stroke="none">Internal</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1756" y="13376"><tspan fill="rgb(0,0,0)" stroke="none">Services </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="700"><tspan class="TextPosition" x="1106" y="13770"><tspan fill="rgb(0,0,0)" stroke="none">(private network)</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id35">
+        <rect class="BoundingBox" stroke="none" fill="none" x="17636" y="10525" width="3938" height="1017"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="17792" y="10957"><tspan fill="rgb(0,0,0)" stroke="none">Publish change events </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="18294" y="11351"><tspan fill="rgb(0,0,0)" stroke="none">over websockets</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id36">
+        <rect class="BoundingBox" stroke="none" fill="none" x="11508" y="10271" width="2855" height="1525"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="11492" y="10760"><tspan fill="rgb(0,0,0)" stroke="none">Storage metadata,</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="11801" y="11154"><tspan fill="rgb(0,0,0)" stroke="none">Compute jobs,</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="11977" y="11548"><tspan fill="rgb(0,0,0)" stroke="none">Permissions</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id37">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5444" y="19033" width="5462" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="5526" y="19472"><tspan fill="rgb(0,0,0)" stroke="none">Content-addressed object storage</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id38">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12811" y="19033" width="4065" height="636"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-weight="400"><tspan class="TextPosition" x="13074" y="19472"><tspan fill="rgb(0,0,0)" stroke="none">Elastic compute nodes</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id39">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1000" y="1127" width="5843" height="2033"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1190" y="2008"><tspan fill="rgb(0,0,0)" stroke="none">An Arvados cluster </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1595" y="2719"><tspan fill="rgb(0,0,0)" stroke="none">From 30000 feet</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id40">
+        <rect class="BoundingBox" stroke="none" fill="none" x="19795" y="15985" width="3814" height="3306"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 L 19796,18876 C 19796,19101 20662,19289 21701,19289 22740,19289 23607,19101 23607,18876 L 23607,16398 C 23607,16173 22740,15986 21701,15986 L 21701,15986 Z M 19796,15986 L 19796,15986 Z M 23607,19289 L 23607,19289 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 L 19796,18876 C 19796,19101 20662,19289 21701,19289 22740,19289 23607,19101 23607,18876 L 23607,16398 C 23607,16173 22740,15986 21701,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19796,15986 L 19796,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23607,19289 L 23607,19289 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 19796,16624 20662,16811 21701,16811 22740,16811 23607,16624 23607,16398 23607,16173 22740,15986 21701,15986 L 21701,15986 Z M 19796,15986 L 19796,15986 Z M 23607,19289 L 23607,19289 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21701,15986 C 20662,15986 19796,16173 19796,16398 19796,16624 20662,16811 21701,16811 22740,16811 23607,16624 23607,16398 23607,16173 22740,15986 21701,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19796,15986 L 19796,15986 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23607,19289 L 23607,19289 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="20377" y="18015"><tspan fill="rgb(0,0,0)" stroke="none">Postgres db</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id41">
+        <rect class="BoundingBox" stroke="none" fill="none" x="10016" y="9381" width="2924" height="3"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 10017,9382 L 12938,9382"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id42">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12810" y="8491" width="3559" height="1654"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 14589,10143 L 12811,10143 12811,8492 16367,8492 16367,10143 14589,10143 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14589,10143 L 12811,10143 12811,8492 16367,8492 16367,10143 14589,10143 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="494px" font-weight="400"><tspan class="TextPosition" x="14189" y="9489"><tspan fill="rgb(0,0,0)" stroke="none">API</tspan></tspan></tspan></text>
+       </g>
+      </g>
+     </g>
+    </g>
+   </g>
+  </g>
+ </g>
+</svg>
\ No newline at end of file
diff --git a/doc/images/Crunch_dispatch.svg b/doc/images/Crunch_dispatch.svg
new file mode 100644 (file)
index 0000000..69c2193
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" standalone="yes"?>
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 -->
+
+<svg version="1.1" viewBox="0.0 0.0 960.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="g115a30441b_0_35.0"><path d="m0 0l960.0 0l0 540.0l-960.0 0l0 -540.0z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#g115a30441b_0_35.0)"><path fill="#ffffff" d="m0 0l960.0 0l0 540.0l-960.0 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m32.72441 46.721786l894.55115 0l0 60.125984l-894.55115 0z" fill-rule="nonzero"></path><path fill="#000000" d="m63.47441 82.28053l3.5 0.875q-1.09375 4.328125 -3.96875 6.59375q-2.859375 2.265625 -6.984375 2.265625q-4.28125 0 -6.96875 -1.734375q-2.6875 -1.75 -4.09375 -5.046875q-1.390625 -3.3125 -1.390625 -7.109375q0 -4.140625 1.578125 -7.21875q1.578125 -3.078125 4.5 -4.671875q2.921875 -1.609375 6.421875 -1.609375q3.96875 0 6.671875 2.03125q2.71875 2.015625 3.796875 5.6875l-3.453125 0.8125q-0.921875 -2.890625 -2.6875 -4.203125q-1.75 -1.328125 -4.40625 -1.328125q-3.046875 0 -5.09375 1.46875q-2.046875 1.453125 -2.890625 3.921875q-0.828125 2.46875 -0.828125 5.09375q0 3.375 0.984375 5.890625q0.984375 2.515625 3.0625 3.765625q2.078125 1.25 4.5 1.25q2.953125 0 4.984375 -1.6875q2.046875 -1.703125 2.765625 -5.046875zm7.613434 9.28125l0 -19.1875l2.921875 0l0 2.90625q1.125 -2.03125 2.0625 -2.6875q0.953125 -0.65625 2.09375 -0.65625q1.640625 0 3.34375 1.046875l-1.125 3.015625q-1.1875 -0.703125 -2.375 -0.703125q-1.078125 0 -1.921875 0.640625q-0.84375 0.640625 -1.203125 1.78125q-0.546875 1.734375 -0.546875 3.796875l0 10.046875l-3.25 0zm25.039932 0l0 -2.8125q-2.25 3.25 -6.09375 3.25q-1.6875 0 -3.171875 -0.65625q-1.46875 -0.65625 -2.1875 -1.640625q-0.703125 -0.984375 -1.0 -2.40625q-0.203125 -0.953125 -0.203125 -3.03125l0 -11.890625l3.265625 0l0 10.640625q0 2.546875 0.1875 3.4375q0.3125 1.28125 1.296875 2.015625q1.0 0.734375 2.46875 0.734375q1.453125 0 2.734375 -0.75q1.296875 -0.75 1.828125 -2.046875q0.53125 -1.296875 0.53125 -3.75l0 -10.28125l3.25 0l0 19.1875l-2.90625 0zm8.182007 0l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm33.275757 -7.03125l3.203125 0.421875q-0.515625 3.296875 -2.6875 5.171875q-2.15625 1.875 -5.296875 1.875q-3.9375 0 -6.328125 -2.578125q-2.390625 -2.578125 -2.390625 -7.375q0 -3.109375 1.015625 -5.4375q1.03125 -2.34375 3.140625 -3.5q2.109375 -1.171875 4.578125 -1.171875q3.125 0 5.109375 1.59375q2.0 1.578125 2.546875 4.484375l-3.15625 0.484375q-0.453125 -1.9375 -1.609375 -2.90625q-1.140625 -0.984375 -2.765625 -0.984375q-2.453125 0 -4.0 1.765625q-1.53125 1.765625 -1.53125 5.578125q0 3.859375 1.484375 5.625q1.484375 1.75 3.875 1.75q1.90625 0 3.1875 -1.171875q1.28125 -1.1875 1.625 -3.625zm6.1484375 7.03125l0 -26.484375l3.25 0l0 9.5q2.28125 -2.640625 5.75 -2.640625q2.125 0 3.703125 0.84375q1.578125 0.84375 2.25 2.328125q0.671875 1.46875 0.671875 4.296875l0 12.15625l-3.25 0l0 -12.15625q0 -2.4375 -1.0625 -3.546875q-1.046875 -1.109375 -2.984375 -1.109375q-1.4375 0 -2.71875 0.75q-1.265625 0.734375 -1.8125 2.03125q-0.546875 1.28125 -0.546875 3.53125l0 10.5l-3.25 0zm31.552963 0l0 -26.484375l9.125 0q3.078125 0 4.703125 0.375q2.28125 0.53125 3.890625 1.90625q2.09375 1.765625 3.125 4.53125q1.046875 2.75 1.046875 6.28125q0 3.015625 -0.703125 5.359375q-0.703125 2.328125 -1.8125 3.859375q-1.09375 1.515625 -2.40625 2.390625q-1.3125 0.875 -3.171875 1.328125q-1.84375 0.453125 -4.25 0.453125l-9.546875 0zm3.5 -3.125l5.65625 0q2.625 0 4.109375 -0.484375q1.484375 -0.484375 2.375 -1.375q1.25 -1.25 1.9375 -3.34375q0.703125 -2.109375 0.703125 -5.109375q0 -4.15625 -1.375 -6.390625q-1.359375 -2.234375 -3.3125 -2.984375q-1.40625 -0.546875 -4.53125 -0.546875l-5.5625 0l0 20.234375zm23.050934 -19.625l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm6.9806213 -5.734375l3.21875 -0.5q0.265625 1.9375 1.5 2.96875q1.234375 1.03125 3.46875 1.03125q2.234375 0 3.3125 -0.90625q1.09375 -0.921875 1.09375 -2.15625q0 -1.09375 -0.96875 -1.734375q-0.65625 -0.4375 -3.3125 -1.09375q-3.578125 -0.90625 -4.96875 -1.5625q-1.375 -0.671875 -2.09375 -1.828125q-0.703125 -1.171875 -0.703125 -2.578125q0 -1.28125 0.578125 -2.375q0.59375 -1.09375 1.59375 -1.8125q0.765625 -0.5625 2.078125 -0.953125q1.3125 -0.390625 2.8125 -0.390625q2.25 0 3.953125 0.65625q1.71875 0.65625 2.53125 1.765625q0.8125 1.109375 1.109375 2.96875l-3.171875 0.4375q-0.21875 -1.484375 -1.265625 -2.3125q-1.03125 -0.84375 -2.921875 -0.84375q-2.25 0 -3.203125 0.75q-0.953125 0.734375 -0.953125 1.734375q0 0.625 0.390625 1.140625q0.40625 0.515625 1.25 0.859375q0.484375 0.1875 2.875 0.828125q3.453125 0.921875 4.8125 1.515625q1.359375 0.578125 2.140625 1.703125q0.78125 1.125 0.78125 2.78125q0 1.625 -0.953125 3.0625q-0.953125 1.4375 -2.75 2.234375q-1.78125 0.78125 -4.03125 0.78125q-3.75 0 -5.703125 -1.546875q-1.953125 -1.5625 -2.5 -4.625zm19.960938 13.09375l0 -26.546875l2.96875 0l0 2.5q1.046875 -1.46875 2.359375 -2.203125q1.328125 -0.734375 3.203125 -0.734375q2.453125 0 4.328125 1.265625q1.890625 1.265625 2.84375 3.578125q0.953125 2.296875 0.953125 5.046875q0 2.9375 -1.0625 5.296875q-1.046875 2.359375 -3.0625 3.625q-2.015625 1.25 -4.234375 1.25q-1.625 0 -2.921875 -0.6875q-1.296875 -0.6875 -2.125 -1.734375l0 9.34375l-3.25 0zm2.953125 -16.84375q0 3.703125 1.5 5.484375q1.5 1.765625 3.625 1.765625q2.171875 0 3.703125 -1.828125q1.546875 -1.84375 1.546875 -5.6875q0 -3.671875 -1.515625 -5.5q-1.5 -1.828125 -3.59375 -1.828125q-2.078125 0 -3.671875 1.953125q-1.59375 1.9375 -1.59375 5.640625zm30.322617 7.125q-1.796875 1.53125 -3.46875 2.171875q-1.671875 0.625 -3.5937347 0.625q-3.15625 0 -4.859375 -1.546875q-1.6875 -1.546875 -1.6875 -3.953125q0 -1.40625 0.640625 -2.5625q0.640625 -1.171875 1.671875 -1.875q1.046875 -0.703125 2.34375 -1.0625q0.953125 -0.265625 2.890625 -0.5q3.9374847 -0.46875 5.7968597 -1.109375q0.015625 -0.671875 0.015625 -0.859375q0 -1.984375 -0.921875 -2.796875q-1.25 -1.09375 -3.703125 -1.09375q-2.2968597 0 -3.3906097 0.796875q-1.09375 0.796875 -1.609375 2.84375l-3.1875 -0.4375q0.4375 -2.03125 1.421875 -3.28125q1.0 -1.265625 2.875 -1.9375q1.890625 -0.6875 4.3593597 -0.6875q2.46875 0 4.0 0.578125q1.53125 0.578125 2.25 1.453125q0.734375 0.875 1.015625 2.21875q0.15625 0.828125 0.15625 3.0l0 4.328125q0 4.546875 0.203125 5.75q0.21875 1.1875 0.828125 2.296875l-3.390625 0q-0.5 -1.015625 -0.65625 -2.359375zm-0.265625 -7.265625q-1.765625 0.71875 -5.3125 1.21875q-1.9999847 0.296875 -2.8437347 0.65625q-0.828125 0.359375 -1.28125 1.0625q-0.4375 0.6875 -0.4375 1.53125q0 1.3125 0.984375 2.1875q0.984375 0.859375 2.875 0.859375q1.8749847 0 3.3437347 -0.828125q1.46875 -0.828125 2.15625 -2.25q0.515625 -1.09375 0.515625 -3.25l0 -1.1875zm15.619507 6.71875l0.46875 2.875q-1.375 0.28125 -2.46875 0.28125q-1.765625 0 -2.75 -0.5625q-0.96875 -0.5625 -1.375 -1.46875q-0.390625 -0.90625 -0.390625 -3.84375l0 -11.03125l-2.375 0l0 -2.53125l2.375 0l0 -4.75l3.234375 -1.953125l0 6.703125l3.28125 0l0 2.53125l-3.28125 0l0 11.21875q0 1.390625 0.171875 1.796875q0.171875 0.390625 0.5625 0.625q0.390625 0.234375 1.109375 0.234375q0.546875 0 1.4375 -0.125zm15.777191 -4.125l3.203125 0.421875q-0.515625 3.296875 -2.6875 5.171875q-2.15625 1.875 -5.2968445 1.875q-3.9375 0 -6.328125 -2.578125q-2.390625 -2.578125 -2.390625 -7.375q0 -3.109375 1.015625 -5.4375q1.03125 -2.34375 3.140625 -3.5q2.109375 -1.171875 4.578125 -1.171875q3.1249695 0 5.1093445 1.59375q2.0 1.578125 2.546875 4.484375l-3.15625 0.484375q-0.453125 -1.9375 -1.609375 -2.90625q-1.140625 -0.984375 -2.7655945 -0.984375q-2.453125 0 -4.0 1.765625q-1.53125 1.765625 -1.53125 5.578125q0 3.859375 1.484375 5.625q1.484375 1.75 3.875 1.75q1.9062195 0 3.1874695 -1.171875q1.28125 -1.1875 1.625 -3.625zm6.1484375 7.03125l0 -26.484375l3.25 0l0 9.5q2.28125 -2.640625 5.75 -2.640625q2.125 0 3.703125 0.84375q1.578125 0.84375 2.25 2.328125q0.671875 1.46875 0.671875 4.296875l0 12.15625l-3.25 0l0 -12.15625q0 -2.4375 -1.0625 -3.546875q-1.046875 -1.109375 -2.984375 -1.109375q-1.4375 0 -2.71875 0.75q-1.265625 0.734375 -1.8125 2.03125q-0.546875 1.28125 -0.546875 3.53125l0 10.5l-3.25 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m145.72684 103.48555l673.67993 0l0 505.26257l-673.67993 0z" fill-rule="nonzero"></path><g transform="matrix(0.7017498687664041 0.0 0.0 0.7017535433070867 145.72683779527557 103.48555144356955)"><clipPath id="g115a30441b_0_35.1"><path d="m-2.842171E-14 0l960.0 0l0 720.0l-960.0 0z" clip-rule="nonzero"></path></clipPath><image clip-path="url(#g115a30441b_0_35.1)" fill="#000" width="960.0" height="720.0" x="0.0" y="0.0" preserveAspectRatio="none" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA8AAAALQCAYAAABfdxm0AACAAElEQVR42uy9DXBdTV6f2VXote47rybRBiWIxEmpggpUE4dSBlu6HpRwiTOlbJRCAUGZRAkyuvJ4ZkSNdnBNBJhFL+OaUjIecIFqyjNrQLMxjGFNyrOYxAkGtIxDzGCmBGVYUWtALM6uqHgXb8rsOlnD3r2/63/7/m/r3A99fz1PVZd0z0efPuf27dPP6T7dIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwJ+s915k4Vz7QPFmeODUzOEQiEnQmV39jJ4gkKHQAAAACAPaB9cGq6XCl/Xg4lAoGwS2GgeD0U3m6jBAIAAAAA2D35vRgr5IMTH3s58dEbpelP3CIQCDsUvvW//bHSO7/2wv+n39xb73nfL4bCBzsoiQAAAAAAdphc/lxPuRL+QhXxH/wf/l1p+Q+eEwiEXQg//e/+l9KXD3/4VUtwfuoapREAAAAAwA4TW3+/8hu/GykhEHY5fO5XfrfU9be/o/SO95x/QSswAAAAAMAOYwPylM7PfxYhIRD2IKg7tL0PfIESCQAAAABgFwRY7yUiIwTC7gf99vQb1G+REgkAAAAAAAEmEBBgAAAAAABAgAkEBBgAAAAAAPatAP/a7z0r/cTP/lLp+k/969Kte1/YE/H4wuOnpX/zhd/elXP9V//+t9YFHf8gi9vPf/F3Sp//zf+wbvnSr/9+5vLtCreXvljJN//ic79YubYIMAAAAAAA7FsB/uHP/HTpS//8l5X+4vG/Ujp5+m+W3vHWW6XBoa+vCNV2SO03/sOJlraVRCkNO32+kt3ypS592Zf/pcrxFHT+x9pzpQ9990f3rZjpWn7Dt4zXXa/v7v0f/p51y7VP1vLtEN93ffW7S+/8M3+2cmxdz7/c81crIowAAwAAAADAvhPgn7z7yxXx++gPfur1MrUW/vV3nyr9nb/3D7ZNNvdTC3BMk/765R/7oR+pLJfY7Ucxa3Ytd1OAf3nlDysPDr713IXXrb76q88S4YPcmo4AAwAAAAAcUgGW5I5807euW65u0OPF6ZputN92YaYiWd/8j4s1kqhW0x/7lz9X2V4tx5ItCVKMX9ImCVMckqRL8z9U+vrhv1/ZVnHGlmbFqc/xf8V79Ud+svS3zvzd0vA3fHPlGD6NikfrFJdaseNybad1So/203FbEWAFLf/Ep368pnVc56DjKM70Gum8tE77KO3xuvj/FXQecx//5Lp9dQ3Of+ifvr5esSVc6Y7rYvfl9FpuVoD9dfMPPpqdrw8fefufVwQ47fIs8ZUEx2urdOoc3lN4byVO7Rf30TlPf+T7Kut0vv47VND1yvp+EWAAAAAAAAR4U6Hzz33pOglKgwRMrXrqyiw5k5iqm3QUPImXZEjCJNFTN1hJj9b9s09+piJt2k9yJBH7qr/21aVP3rhd+vTNOxXJk+CkXaD1v7rWSoC0rYRSx4wyKLmN8eiY6sKsFlyt0/loW4mf0pFKWpYAS9SUNu0XhVzxKF6dg46jVvF4XtpX237X5R+oyKwk7kva2irp1nqdR/xfQXHH7svaXuf2nd/7sco2Wq5r6OPVuWidzj+2xKfXcjMCLKlUt+V43ZROf93S8/UPQXzQd9aoO3YMOpa2U5r1kED5LYq1RFnnp3VKg3oiqEdC1vfr04kAAwAAAAAgwJsKUagabSNhkYz4ZWqxU0twFK9v/+B31rQOeqELrtuuWvI02Fb8LNny0uv/l1B60ZMYqnVXEqx1frAuxdP7Ve96LXI9X/GVTbsS+yD5Upr9+6tKi+L1rbbaTi2XEkrJm39I0KoA62+8drHrsMQwDiQlOY7XSGIe/9+OLtD6nvz73XqIER8E6MGFl0x/vq0eKx1sTA8I/P7qbRD300MDPVSJ63WeccCu9PuVPDf6ThFgAAAAAAAEuGlQi5/vmpsKTGyNS1v7vOTqr29F1v/1BFhyo1ZUyaOERlJbT4DTAbGiVEoSFWccwCoOYhWP44/fyjvAik9xeImPDwfi4GAxxP1i63JW+poJsNImyfXx+vewJccSQB1bghhFcLMCrNbauFzvWKtlV/HogYHvYt3ofLO6ztdrAfYt7kq7Wnol3epFoPOOadF1l3TrXP/GqdMVWda+zb5fBBgAAAAAAAHeVFBLrkLWCL+xO7CEJd3Gt4BuRIAlOrFbs+LWthsVYHWTDTZYVTqV0WYEOA4GJhHz56HParFOjyFJk6RK7Hy8Erx6Aqwu3F6Adf3SeH1LqaRUXX8lmpJGrWsmwBLd+A512g1Z3a3Ta6CWfUlw7GLd6HzTOCWrWe8A67NkVWnX96s8JMmW1OocsgbkkiQrfdpP7wQ3+34RYAAAAAAABHhTQWKi1kff1VfyJVFVF1V91ru6krDYbVYiI3GKUwa1IsCxK7MkS/Lru+duVICjZPlBmpQWpXmzAhy7B/vzVKull9w4XZTORYIn4fXdd4PrTq4u42olj1Koz1GAJYCS0iiPapVVF2iJn+LVtn6d4lWaYrrrzbUrKVX6/HkpPdonvlurFmUvydpHaYnn67tm63uK55v1XrjWqXdA1ijQWq9jK2/5Qa/iyNHxYYjvcq24dPxm3y8CDAAAAACAAG86SBglfpLaevMAS5okJZI4dVtVK24Uo0YCrG20n4IkMQqv/kpo1GqpY0t6WhXgKGeSRqVDQf/HUaI3K8BKq84ttnarBVIyp7i0LA5O5eVd2+u9Vl07L8BKg+RPravaxg8aJRFUnOoCrmW6NlEKlQZdlyjM2jd2zdY6nae2j0Kbtr7qOHrIIKlV/Prfz22s7yAOLqZzUnxxhOWs863XPd53YY77xHmA4zvLOk+lQV2udS46J+Wr2OIs2dcxdCylW9/vz3z+N5p+vwgwAAAAAAACvOV5XSUYGmzIDz7kg+RE0pKulyj77rv638uzBjaSGMZ3TSVIiifO+Ru72fp5gLPmBNZn3xqp40iUFBodP0sU63XtVRq9GOt4SrtPb9p9VwKpY4ZkQDFdL63TflqfTl0Ur0M673B8D1brohCm1zJrYKr0uLouWdMlxeumbdL1zc4361rqPJRvlOasaZH8d6Tg49X/Ma1pS3O97xcBBgAAAABAgAl7HEILI2oT9n9AgHeAXP5cTy4/VTiWnxzPDRQnFNoHiiPtp4u9XB0AAAAABBgBPnhB3YCzuiYTEOAjyZuDk0Pt+eJCx9e+/3/TBa0X3jn0/j/KnT7/o9qeqwYAAACAABMIhN0LEx+9YW42dZESaRO8cep8f1lof8FL7pcPf7h0enK+NPqRa6Vvmv105a8+/4X3fqhGht/62gu/9ubgVJ6rCAAAAHA0yA1MFlUPVB0RGSEQdjf82up/qrhapQU4P3WWEmmjBdhg8XKU2a6//R2lD3z8p0qfXWo8f9WP//xvls7Pf7ayfdw3Nzg5Hwpvt3FFAQAAAA43x06/ry+XL/7JO4feX/rMz/0GUkIg7GKY/4mlin+9efr8//POr3lfFyVSqxQ+2NGen7yji/eO95yviO/nV/7jhi6+ttd+2r/SGjz0vruKl4sLAAAAcLgp1/2uxIaQ/n/0/ZU6obpEEwiEnQv/9cwPv3av9sGpaUqiluX37bb2geJdXTh1af7Ru8tbegpx7Wcevm4NfiXBtAQDAAAAHPr6ZFmC1RLcaOwYAoGwveHN/NR/4d3fDaLuyvE93zu/urotTfGf+5Xffd0XvdIdGgAAAAAOPcdOFk8cyxdnNTAWgUDY4aAZevLneih5NiK/+alC7Pa83e9sKL7YJM/AWAAAAAAAALDHAlz8FQnqhxc+tyMvZev9jzg6NFcbAAAAAAAA9oRjg1OjsevzF373j3ZEgBVv7ArNPMEAAAAAAACwJ+QGizclpt/1qZ/d0aG5YyvwO97zvh/jqgMAAAAAAMDuUni7rSzAfywx/bfLT3ZUgDWwlo7zzqH3/xEXHgAAAAAAAHYVDUolKX3Xt3zvrkzQHLtBt58u9nL1AQAAAAAAYNfIDRQnJKTfNPvpXRHgr7vwiVcCPFAc4eoDAAAAAADArqH52SSk5+c/uysCLNGuzAlcFm+uPgAAAAAAAOyeAA9MzklIpz9xa1cE+Fu+57oJ8GSRqw8AAAAAAAC7J8D5qUsSUo3QvBsCPPqRaxUBPpafHOfqAwAAAAAAwK6x2+8An56cf9UCnJ8qcPUBAAAAAABg9wS4LKIS0nf/k4/u6ijQufy5Hq4+AAAAAAAA7B6Fc7myBP9nSen/9Jt/uKPy+9ml34rzAP/vXHgAAAAAAADYddrzk3ckph9dvLejAqz3jCtTIOWLC1x1AAAAAAAA2HU0IJXE9CtGZ0u/tvqfdkR+P7/yH0tdf/s7KgL85uDkEFcdAAAAAAAAdp/C223tg5MrO9kKHFt/c6fP/wIXHAAAAAAAAPaMY4NToxLU/+rrPlD63K/87rbK74/eXS694z3nKwL8xqnz/VxtAAAAAAAA2GMJLt6IXaG3a0CsO7+6WvoL7/3Qq9bfweJlrjIAAAAAAADsPRoRerD4MErwVluCP/Nzv/F62qM3Txd/lgsMAAAAAAAA+4a3Tp3rjhKs7tB6J3ijA2N94Xf/qPThhc+97vbcPlC8Gwof7ODqAgAAAAAAwP6icC4Xu0PH1mCJcLNu0f92+Unpuz71s69bfV91e56c1yBbXFQAAAAAAADYt7waGOvV6NAxvPuffLT0TbOfrozqPP2JW6Xz85+tfH7Xt3xvyW+Xyxd/JZefKnAVAQAAAAAA4GBQeLutMk9wfvJOWWj/s5fcNOQGi39cDjclzlw4AAAAAAAAOMAyfC6nVt3cQHHijcHJX6hMazQ4eU+f3xycytPVGQAAAAAAAA4dxwYm5yTA+svVAAAAAAAAAAQYAAAAAAAAAAEGAAAAAAAAQIABAAAAAAAA9o8A5waLl7kaAAAAAAAAcGjRyM+VFuDB4iJXAwAAAAAAABBgAAAAAAAAAAQYAAAAAAAAAAEGAAAAAAAAQIABAAAAAAAAEGAAAAAAAACA7aJ9oDgiAW7PT97hagAAAAAAAMChJZefKlQEeLC4xNUAAAAAAAAABBgAAAAAAAAAAYbDwrH81NljA5NzhCMcynmAXwIAAAAAIMBw6OXX8gLhiAckGHhoxsMu8i2BfEseJFB2IsBwuCsE5R+18sLpyfnS9CduEY5g0HdfEeByXuAXATw042EX+ZZAviUPEig7Dx1vDk7lbRqkB1wNBFh5QSK0/AfPCUcw6LtHgIGHZjzsIt8SyLfkQQJl5yFuAT7XY08zVrkaVGYRYASYgh0oM/itk28J5FvyIIGyEwEGKgUECnYAygx+6+RbAvmWPEgepJ6EAAOVAgIFO1Bm1IZfXvnD0k/87C+Vbt37QunXfu8ZvzF+69zrCOTbPcqDX3j8tFIWq0z+/G/+B/ITZScCjAADlQICBTtsV5nxb77w26W/8/f+QelL2tpK5c0qofPPfWnpI2//8z3N33/x+F8pvf/D37OlOL7hW8YrQf8rLsW5W5XX8x/6py1te/2n/nXlmv+rf/9b/Na513GPIg+Wpj/yfaV3/pk/+7o8VtmsMloPKeM2Wq6yY6++242Wp3uVXupJCDBQKSBQuQDKjJoy4+e/+DulL/3zX1Z611e/u/Rj//LnKi2/am1Q5UYVlr2U4O0WYFW+dut8PvqDn6pcPwSYex33KAR4o2WHhPe7Lv9AaenXf78ivZ+8cbtSTkuC94sAb7Q8RYChMpXRZueyah+cvGpf5LPtnidL6eLboUAmULmAo1NmfOM/nKhUrHzLgl+nVghJsdZLltWyKVHWZ4mylvl9/DJto9Zl/S+5U+Un6ziK81987hcrXf181+sowHH97aUvtvS70HbaXnF5AVbaYnr8tkpXeh7aTsfVPlnrY/iZz/9GZb26Kvqu5N/5vR97LbWKJ65ThVbba78sAdZxso6HAHOv4x51NPLg1w///dLg0NevWz738U9WygmVIalQqrxIu0n7Zdonhlg2xvWxfPVlWCyHGnW9zipPfVnuyz2f3lbiJg8e8sJ8382RReagQCZQuYAjU2aoEvSOt96q21VXlaVYSVGrxN84dboSVJH5W2f+bkVOT57+m+u6xcVl2ufLvvwvlUa+6Vsrx5FMH2vPla7+yE++3l7/a7m6XCtoe1WgogC/p/De18t1XMWdVqy8eGp7HUPb/+Wev1pJb1YXaJ3XX3/3qUq6tEwtLtouCriWfeu5C5W0qXVc67/9g99ZI869X/Wuyvq4vz7rmsXW3xhU2VO83/yPi5XttL3SqLQqzVGA43XS+cYWIH7r3Ou4Rx2tPDhenK6UAV5Io1z6ss8LsMrFtLeMX6ayTVKth53a70Pf/dHK+uFv+OZKWRmXazu16vry+tM377TUBTotyxXnD3/mp2vSq+Mpbh1TcfsyjjxIC3ALrcBVYaUFmMoslQIqFxTssJkyQy2OqpR84lM/3nK3PEmcWi8VWhFgxS+RVMVNEihxlpjG46sSpPfdopCrgtTzFV/5WkJVkfrJu79c+SwxbpRetVgr7th6+s8++ZnXlbq0wqZKnraNLdISWp1frLDFY8dWZ7V6K62KU58l1hLYKMxqCdH2qlhmdYFWRU8Vv3gu2l4VRJ17FGBdm/jA4dsuzFSOFyu8CDD3Ou5RRyMPqvzSwzSVCforIZZcpr1nNirA2l7lksRax9B6lTExjlhmqVxTOaSyTdvoczMBVpwqP2P5p331wFDxx1c74vnE8vljP/QjlWXxgSd5EJoSBZgrQWWWSgGVCwp22GyZIRkLLb6XFStHvttaqwLs9/FiKAmVNPpuzxJDL6ESwVbfC5ZgXpr/oZplavWoJ8BqrZBMx4qlr2BquyjmMUjOJan6X138fPc/7auW4pi2VIDV2qwHAT4+SbXiiQLsK4JaHtx7wQgw9zruUUcnD+rBlwRRZU5snVV5pW7QmxVgyWe6Xt2t4+f4QFTvG7cy0JVfJ9mNDzZjULmutOt1kJje9OGl0pSWi+RBQICBSgGBgh12rMzQk3hVSnylaiODOrUqwPXiUcUn3b+Z7NYTYMlolsyrxTpLgCWsktlgI6wqHboOvgu076od94+VPEm9uo6rdUQt1opDrR31BFjx1evulzUIVqyMIsDc67hHkQf1sDKO1B97kWxUgNOyVp9j2ejLHF+GtirAWfGnx1Dc6TgOeu0j631n8iAgwEClgEDBDjtWZkjo1HU4K3/FkUfV3bmeAKsrsF8mKWxVgNW6q1bTtNUgthhvRID17m3aehG7RTeaBknirDSpchns3bh4nNjdOYaYXqVRLRdf9de+uiK1armVUOtaNBLgGLcfREZxIcDc67hHkQd9b5G0HItlo1qB44O0ZgKsePZagH3PF8Wtni3pgF9+ZGvyICDAQKWAQMEOO15mqCuwWi7TiokqW6q8xBbPegKswUz8MrWItirA6uKnY/su0qr4BRvpdCMCHGU+7TItUc0SYHXLS+NRi3Ds4hwHwfLrJb9apgcCSqMfHEYCrMppHFAsPXdV8mLcXs51rRDg3bvXxRFo09FrCdyj9kt9Sz1K1CrqXw2JZYIfpyAVYC+z6kKtV0J2S4DjGAe+LNdvTeV77GGkuGN36JjG3ZhvnnrS4RLgZxUB7j/XydWgMnuYBTgOepNWHH23wTSoQFUlNb7Pl1WoU7kAyoyq6KrlUpUXvcclcVNlRuKoZfG91CyZjb9BSZ/+j6OXtirAqgDpONpe4qsux6r8xUrZRgVYLbaqIKqSpfQonji6c1phk3xrnSpf2laf/TvEcaRmH5fWqzxR2aL/tSxOoyTxj8u0vyqpsUVZ2+h93ziStLbXNdZnnTcCvPP3OuVjtdj7e4Xymm9pq9eKReAetZv1LeXJOKq8yg+VmSpjVbaqrI5i7Os1KldU/mhb7R9HfN4tAZb46v/Yeq2gB4b6jcWB/BS30qSyVg8PVa/TA9Sdng6JetLhEuBVfZm5/LkergaV2cMswHp/TwVqWjn0le9YgY1Bn3XziN06EWCAxmWGKlT63ahypUqMWlKj3PlKWZYcSBglFrH1VYOcSITr7ZMuUytBHL1ZlSVV9GIFTwPAqEKXDkSVLkslWBWvmB4NZOVHZtb+Pu0qX2LFzQ+gFUVbYqv/1VUvvnsXzyNeL52Pzlv7xwqlKn06L12b+C5xFGXto33jcsWrOPzcv3GU1rgMAd78vU4PLCQP+g7j1DLK2/qshxxxTmYEmHvUfqlvKZ/q/ViViSovVI6oLPMD9SmvxjJJy1XuanuJs1pdozxrvf6P5bKfbsm/lhHLHF/OpWVmo2mQ0rJc8cc5i2N6Y/mv/RTvbvTEoJ6EAAOVggMVYhceVWhVmPo5OL0AZ4mtWoC1LwIMcLQemm1XUAWtkWjzWz84+TbeK6LopveYKAFegCUU6UNXLYsVdu0b16t1WRV9v17HUqt/bP3SQx1tl87tWu/ep21jenWcGI/iT6fDyVqmfXTe/qFKfFe+lWWKT+n3MoQAU3Y2EmDqSYAAAwXyNgRVPlU50Y1fFRQ9wfeTwDcSYLUi6ck+AgxAJQ4BPtr5Vq29ugeowp6+V+m7X3oBrtd9P1b44/1HLV76G/OLunSq5U7vg+v+pc9q9dL6OJ2N1jfq/qp91YoW4/L3r6x86ZdJXPW+ue5/Wh57Q8XzzmrlTpfFcQF0fKVFPSo221KHAB/OEFtyd3oEZ8pOQIDhyBXI/v0U3Xx1I/ejstYTYD1h140/Tt6OAANQidtoUPe8rJFY+a0fzHyrHkTBxoiQIEry0hbhzQiw3mNUPGopjfvo1Z04mrmkVyKs1tT47nlWa3SUcT+QmuJQN+2NCHDsghqFVfc/iWyc07qZAOvddd1r4/H00FnnmI7WjgAf3aA8oV55yhN+7nLqSYAAAwXyFkMcZdVLq4TWT7kSKyCqYKgCoKDKQ7DBTWL3NAQYgEocv3XyrSRV74X7wbAkw7H78GYEOIqt38e3lio+SWyjgYZi0Hvkann1PZ3S7RsJsPbT/ul6P391MwGW7KbT0miE+JAxhysCTKDshN0W4MeVqZBOF3u5GlRmD2OBHJ/W66Ycg27yWhbfoYoVkDhyrYKerqfTuSDAAFTi+K2Tb9PWVg1aptbZON3VZgTYvyectU8rI+02e6eyVQGOcfuHwgpq9Y7paibA2l7XxO+vFuTN3kMRYAJlJ2ynAC+9agGeKnA1qMwetgJZ3b50w1WLryoEPsQpjpq9A4wAA1CJ47dOvq1cu498X42EpiPhZrWO7oUAq1u25HWjAqx9vADrVSH9n4ZWBFi9p9SFO2t/3zKNABMoOwEBBgrkbQxx/sys7lZxdGd1WUOAKdiBMoNKHPm2WYiDOmWNwKwuv3G8CC+C8V1dL326/+ykAMd57/29L3Y/9gLs58FWd2utj1Ksh8TxHWJ//pJaP7WgX693OeN5a9AtdRFP06VrtJmBsBDg9e/PxjyVjtxNqG0IiQ9dlO+2Mlcw9SQEGKgUHIig94/qDbihd7h0s1fXNQSYgh2OTplx1OdoRYA3n29ViZbUxe7OmiP1uy7/QCU/SYzjazM+j+m+ocGgJIS6d8SZCHZSgKOQqxVWc0Qr6H+/vQZnU4uv3hfWesms0hUFWLKrdH/n936sso//HOfJVnz6/Ombdypp03WJ562HBPqs89b6mIbNjvaLANcGvQ+uMU6Un/Qd7seyZj/Ul/zvJH3oQ9mJACPAVGYPpQDrRqun7/XW68asbmuS4XTS9qyQNbk7lWKgzECA+a0fnXyr1jZ1hZYwqkKtbs+6l/hWYUmJ7i2+N1LcXqMrSwgloPFhrPKjnz83Tg2TdrGO8wy3cj9SOjVQVxxlV6LuhUT7x5GetV5p1DH8iOWS3DhuhtKvB8b+GEpP71e9q3IMiYUeCPjzVtr0IFr7azulZ7OtlQjw+vqNWjM1oOdmBhU7KgKs/KaHBUqHfrcIMCDAcOgFmEDBDke3zFj69d+vVHrS7qpegFU58i1v6TK1+MX/1X1TccaudHGUeY3eG7siqrudtsvqIstvnXy7X1rCuEcd/DzoWzXT99IlxFrnu/vqf5Vf+qsySmWWPiuk5WZcFveJDzPiflmSqXIwfRgT0xhfNUvL2nr5VOWoylCVr7ELs593O10WuzjHdPipwbRM5b3+qtu+Hj5RdgICDAgwgYIdDl2ZoXcX1SU1TmmmVq5YKWplgKK4LHZPVUuZ/mpQvVjpVKuf4lY3Ty1XxUrHUbdSbav1/NbJtwgw+XYn8mAUXElpbAHWd6zW/Dj6tspAtfzHkcHVEhpH4tasFyoLU3n2y7SPWvBVXqqsU3zqJu9nyFD8Wq54tY1eEYi9GXQcxaX1sVxUWdpM7OPsHQrx3XUvzzEvx2X6X68k6Pjx/GIPC4lxvD5bfTBJPekQkRss3qx8mfmps1wNKrMIMAJMwQ4HvcyIc3/HSpqe/KtSGCt1mxFgvU+pSpRaOOIytSaoBULxq9KlSmeUC72TFzY53ym/de51OxEO+is8CHDzoO9XXdVjy298T1viJ5nV/3qXW+WS8kMrAhz3iS3C6soe56OOZaGO48vaOP+z1kmIY48ZdZEPbgrKegKsLvXKpyrDU9mtJ8D+IWcsf7f7YQ/1pMN0MxgsLlZagAeKE1wNKrMIMAJMwQ6HRYDVChwrXr4r4GYE2Hebi8t8NztV+GKlcL+3tiHA3Ou4Rx2+PBhH8U7LHMmgZFcym06N1YoAq8U37V0jQY0jgafvqktuYxqCDTTquy5rmQZEayTAUbizZLeeAMfB2Xw86TRf5EFAgIFKAYGCHQ5tmaHBTtTtLrYmqNIW32XbjAD7yldc1qgiiQCTbwnk293Mg1lllQ+S2ax5m5sJcByt3McTl6Ujk2fJbPrObRRTibniiUEDpGWJa6sCnMouAgwIMFApIFCww5ErM9QdTyPbxnfD4pRoCDACzL2Oe9Rhy4Nx7udUgGPvl1YFWPM0tyrA2jYd68DPT5xVBkYxVRdnxRVDbBWuJ8C+23Ts5YMAAwIMVAoIFOxAmWEVQVXM/Kih8d2zVIDjO3J+ZFO1RCDA5FsC+fYg5UE98FOvFy99KgP18E/dkOsJsMpKv73epW1VgNWzRtv7slbdl2O36UYC3KjV2K+PXbvT6bkQYNgS5S/y1/VllsPt9vzUNa4IlVlutFQu+EXAQS4z9L6bBqRSJU6VL1Wc1PqrgaxSAVbl6kva2iqtGNpW85qqwogAk28J5NuDlgf18E7v+UoQVfZoJGQN0KdW4CwB1gjOKv8krSontb32b1WAVdZKdmNZq+Pqc5yvejsEWEHH03zHSqPWaSAuBBg2zZuDU8fbB4p/auH/rYjwQHGEK0NllkDlAuAglxkaPVSDUqnipHeAx4vTlRYSrVPlTJ/jtuomrZFTta2m6FAlK1YU1U1P/8dpPfwyfzzFGSt9+33EXQSYex33qMOZB9USK6nVyMsqzyS0cbA+yaAv9+L2ElqVkZJKlWHaLpZl+j9OJ+RfEfHLFL8+63g6bpx2Kb4jnJaBWuZbc7NGsk7XKw49wNQx1GKdlstZ+zQ7DmXnkW79nbpa6f5cmQpp6qK1BD8J/ec6uTpUCghULgAoM/itk28J5FvyIIF60uGgLLnlL/GZvsg3Tp3vD4W328r/338lwZO3uEBUCggU7ACUGfzWybcE8i15kEA96XDcBPLF2VeyO3UvLsvlz/WUlz2vfLmDk2NcJSoFBAp2AMoMfuvkWwL5ljxIHqSedLApnMuVv8C1V+/8nh/2q8pCPG1doZ++depcNxeLSgGBgh2gXplxenK+kncIByfoOzvqAky+Jd+SBwmUnUcMTXlkkructb59oHi3sj4/eYerRaWAQMEOkHGfuGD3EcJBDeXvkHxLIN+SBwmUnUfjBjA4uVKp6OYnx7PWV0aHtveDmR+YSgGBgh2gXrmhByaEgxeO8u+cfEu+JQ8SKDuPWkvf4NTo69GeC2+3tdBK/EzvBnPlqBQQKNgBAAAAAA6W5AwWl0xsZ5pvO3nLtl3iygEAAAAAAMCB4c3BqXxs1Q2FD3Y02/6dX/O+Lg2GZaNFT3MFAQAAAAAA4EBQFtnblfd6ByfnW91H0yGZND8/dvp9fVxFAAAAAAAA2N/ye7rYW5bYl+XwYqPTGx0bLC6+mhu4+KDRe8MAAAAAAAAAey/A+alrNsLr9Q3v3H+uU4NmWVfoi1xNAAAAAAAA2JeoxVctv5VW3E12Y24fOD9sXaFfvnHqfD9XFQAAAAAAAPYdlelNXsnr7a3E054vLlg8y3SFBgAAAAAAgP1F4VxOoz5LXN8cnBzaWlwf7CjHs/pqIK3iZS4uAAAAAAAA7Bs03+/rAay2AUm0Dab1UtMqcYUBAAAAAABg7ym83VYW1cevBHhqdBul+sqrrtCTK2ph5kIDAAAAAADAnnIsP3XW3tl9vL1ifS7Xni8+slGhr3KlAQAAAAAAYE/RYFU29dGF7Y5bI0FbV+hSLj9V4GoDAAAAAADAniAptdbftZ3qpuxGl17VAFlcdQAAAAAAANh12geKdyvv/uanLu3YQQpvt2lwrcqI0EyLBAAAAAAAALuNdU9Wy+yL0H+uc0cPhvgCAAAAAAAcKrrKQS2pd8phqRwWyyGdU1fL+uz/2XIobuPx2ywNzVhQGt4cnDreni8u5AYn53foeozaOWbRZ9eia4++K30vV8iyAAAAAAAAmxOqZ+WwUg4SyjkT4VI5zLjt9Llg/0dJ3i7ulcNEk23GbbvdYM7OMYuCXYuePfq+dJ1WybYAAAAAAAAbQ92Hn5bD7fCqFTaVwJfl0J8hwNvNahMB1iBXa+WQR4ARYGiOvSYwwwjmAAAAAABVZk1yu+pI50IdAU67QEsGr5k0qvX4rFsXu+yeLIdbto227bb1Wve8HO6H+t2OJX1xrl+J+vVEhofDqxbpXCKxo/a/tr3h0jeSpE/HvWDrL2YI8FCodgGPAjzqzkfn4EeJVhqny+GuW+/fVY7XY96l6UxyziP2YGLJ0t6XCPCZUO2ynh4fjjhxBHP95WoAAAAAALxCAvWgxW3rdYGW/K6ZDI6Y+D03kYzCps/qYj1j67V97M48Fl61Ql83kc1CInnVfb5nEh25naRPMvjCxLVgkn/NpPWyfb7g0rdmgr1oy70AD1n6Y3fwKMDPbNsxO7f7Lj03LM4JO+aSbRMFXQL7xNI0Yd+D0nTCpemlpVXX5KYdr9vWvbD4pi1dz+36ASDAALAp6DkChwnlY90Dyc9AHoSU2Lq4FQGWxD1Mti2alOVM2EqhdlCtaRO84ISwURfo56G2xXkm1LYIR8G+nEh1MDG9lcR3yYTSp6/PrY8CnMqvF2Dfyt1ry9Qq22//+xbqDkvPBXe+N/1vxKQ2XoMn7lziOd6zBwwxvb1JeukWDQgwAFBuAJCfgTwITQT4zhYFWOL5wCQshgUngVHY/DvGcVkrAtzl5DIVzh47hoTxYqi2ZittsUX0ZVg/YvUJ2/+kHfdpsn7O4nxuaWvLEOC0y/Gq7Tdj6+eSsOqu2Woi1f4adGecb0iu3VrGMgQYKHQBgHIDgPwM5EFowLUm4jRtkthIgLX/sn1OQ1+G7G5UgHtC9gBcj0K1u7KO1W+yG1tbR1y6JxrEmSWPc7ZerbDP7HMqwCmPnew+r3M9ig3ONy6rd76NZBcBBgpdAKDcACA/A3kQmjAc6rc29ppQTjcRYHV/Tt8/7bI4O7ZBgHO27ViyXHIaB4mK+z51Ahrft5XAXqxz3t0NBPi+ewjgR8OOAtzjtm8L1XeCx52Ie4ZCtZt1IwHusP3T871kyxBgoNAFAMoNAPIzkAdhk+jd0rVQOwCVugivmFR1NBHgOAjTCbf/oslobgMCPNMgjY9NAD15O+6LUB1R+qZ99u/8XrX4j9tnnc8DJ7j1BNiPAq1tl010owB76Z+1tHSb/EuGFxL5femkt5EABxP7++7a99l5jbYowGOh9p1roNAFAKDcAPIzAHkQTLJuherIxmv2v1p2/UBL9QS4zf5/aWL52OIZqiO7WcviKM7LddIoib2bsXzNRD2Ndzw5v7smqEsm5svu3FoR4Cigc06Ab9mxH1ncvsV22K7BY7smLxNhbibA3ZbGpybCOvaVBulNl/n3jeEoFrqDU6Ptg1NXGXkQAKisAfkZgDwI2fSEVy2MkscTddbnnKClcwf3mgQOh9ruvx2htrtw1rI2E+b+OmnrNwlMj9kdqq2/wdLXE2oHrYronDRy88mMtBxPlnUm8cZjHXfHCJbm0ZA9j3KHXYux5EFCsHg6mizTOeQtzb1N0psuGwm100YBAABQWQPyMwB5EA4QaiW+yGVoiVuBLtAAALCRyho9R+AQwRysQB6Ew0BPeNWluJNL0ZRuLgEAAAAAAMDBRq2avVwGAAAAAAAAAAAAAAAAAAAAOBq8cep8f/tgcYb3TgAAAAAAAOBQw8iDAAAAAAD7g6IFoXd6F/dJWraL4y1udyHUzlcMgAADwJ5BzxE4TDACL5AHYT+x6KR3ohxK+yQt24GmH2pVODRS89oGhBkAAQYAyg0A8jOQB+EAC/BhS8vqBgRYXNlH1wIodAGAcoNyA8jPAORB2ARt4VUX37vlsGSi15UhnRvtAq14p+vEOxvWd2f2y0bt/znbd3wTAtxRDpds/3sWf5s71vNyWLZ0xfTOJOn18xifKIeXgXl7gUIXACg3AMjPQB6EA8v1cnhqsjpSDg9NDNvC1rpAK941i3fU4n1o65YyZNYvU+Z7Ug6PyuFGOYxtUICV9vt2vLMWVsrhjq0ftnO+bXFH2V+zhwFjduwHSbxPbD0AhS4AUG4AkJ+BPAgHjOMmtaNuWa+JX+8WBDjGO+KW9Vm8PS0KsFpbfQvsRgRYaX0Rqi3O8byUpiH77LtAx3VnMtLr3/u9Y0IOQKELAJQbAORnIA/CAWPUxC9XZ/1mBXjMtm2rs74VAV5pkJZmqPX5icXjg7o9z2YI8Nkm6fVpWCLbwLYWuoNTo+2DU1cZeRAAqKwB+RmAPAg7SzOp3awAN9u2FQFe2oIAx+7MixlhNEOAWz03BBgAAKisAZCfgTwIB5QhE790ep9r5dC/BQE+Y9umA0Zdt3j17m3alfjBNgrw1XJ4nLF82J2rF+BhS6/vMh3fgT7hlinNd8k2AACwp5U1eo7AIYI5WIE8CLv6fYdXXYWvuWUS3Zcmio0EeNhCvXjXMuJ9YfEumIB22LqCHXMjAjxhMp1Fv6V1Itm+5PbxI0DH9F7JSK+XeA2qNU+2gQ1XVvNTZ1WwbjqU9+cqAlB2UH4AAABsnSGTP7WYPjDpK2ZIZyrAS6Fxd2DF+9TifWjxjtu6PjvmMxNRyfCNDQpwKTSex3fajvnIji/BnnHrFyyOtQbpHXPbd1gcBbIMbLQCq241Ww1UYgEoOyg/AAAAtge1gKrbst6P9S2eXaHaLVgC2OPWSWLvtBDvsMXblaxTfCN2XG3X6bbpDOu7T3clcUjSLzY5fpcdQ8dPu3mri3Pegk9TvfRK3lfIKrDhSqy9V/LeD3y8dPn6/7jhoP14LwWAsoPyAwAAYG+ZD9UW3b1ALcQndvF4ah2f4GuHzVZiVRndDNqPCiwAZQflBwAAwN7SfYSOP2wCDIAAAwACDAAAAIeaQljfhRoAAQYABBhgizACL5AHAQCoxFKBBaDsQIDhSOV18imQBwEAqMRSgQWg7ECAAfkAIA8CACDAAEDZQfkByAcAeRAAAAEGAASY8gOQDwDyIAAAAgwACDAA8gFAHgQAQIABAAEGQD6APAiwW7xx6nx/ORPOMBQ5UImlAgtA2YEAA/IBQB4EMiEAAgwAlB2UH3DgYQ5WIA8CN30EGKjEUoEFoOxAgAEAAA7NjT0/dVY35KxQvlkv6Yatv/W20f5cRUCAAYCyg/IDAABg38uvCe6WAhIMCDAAUHZQfgAAAByIm/p7P/Dxys15o0H7cUMHBBgAKDsoPwAAABACAPI7ACDAlB8AAAAIAQD5HQAQYID9AiPwAnkQuKlzQwfyO/kdgLIDAYYjldfJp0AeBG7q3NCB/E5+B6DsQIAB+QAgDwJCQCYF8jsAUHZQfgDyAUAeBIQAgPwOAAgw5QcgHwDkQUAIAMjvAIAAAyAfAORBQAgAyO8AgAADIB9AHgRACADI7wCAAAMgH0AeBEAIAMjvAIAAA2wQ5mAF8iBwU+eGDnuRL/OT48fyU5faTxd7ye8AgAADAAAAQgCHPl9WQr74G9slw+R3AECAAQAAuKkjBLB/Bbg2/PpWZJj8DgAIMAAAADf1bROC2G+fQNhKKOeppToC7MPyRmUYAQYABBgAAICb+rYJQYOWOwJhB8PUvVZEGAHOpKccTpZDxwFIa64c+svhRDm0UYIDAgwAAAB7KsC0ABN2qwX42GDx/2wfKF7PnSqeCYW32/Yivx9wRsrhcTk8K4fVcnhRDld3UCzHymF4k/sqTZfK4Xk5PCmHp+WwVg6j25Q2CfV0i9sulsNufP9L5TCxx3lkzs6XeyUCDEcERuAF8iBwU+eGDnuYL9dJb774RxuVXvJ7JgUT3olEAtdMNHeC1S0I3bzJ+km3THG9LIf8LoteX3jVao4Ac6/kfgmHNq+TT4E8CNzUuaHDHgnwscHi/7UV6SW/Z/KgHK5kLB9zy3tMwBQWyuG4LVer6zVbdjLZXzJ6NVRbSf0+amm+bfItupxgXQz1u2B3m+ieyVintJ51n/vt+NeT5Z3lMGOSv2DhhNtHsrnshLPL0rRo2/pjj7pzGLVrMGPbqhW5LTnupTrnOGP76lqO1xFgxTdr5zOWrFf6L1vc8ybmwX138+64nUma4rldStYFS0s8l8tNBLhg10fnMJIcX9dyyNbpe+o9DGUSAgzIBwB5EBBgbuiw7bQPTk1vl/SS3zMpOYlrJDeS1vsmYFFY1RJbNIFTV+TYrfmMfZ4x+bxTDo9s3ZCtu27CqbjWTJ4kXDftOFnf9VlLRzNGbLtZk68Viz8K2XOTygsmZc9NrntNzO/bubRZuhft2JdCbUuz7wKt/5+YKCreVRPPKJorLp5Fizdn61ddGot1BPiZHb9ox5m1db1uXYz7mcWds20vue/hnu2Xs2PetOt+3T5HMV+wNE6YtD5vIMAX7DucsfT53gMF+77v2rpblqYcAsz9EpAPAPIgIMAA5Pe9EOCeFgT4hZOWDvt8MpGg+05AfStlrx0nOOGLLayXTY4ibSbWIxnpmLB9m/HQ0pMev8eC/j/u1q+5hwC+q2+3pc9z36U9FeDryfVYsv8vumsTWQ7V1t5V26Yeiueq+zxsUhlMxovJ9YvnetyEvc+JePxeiu6hROSBLe9I9gsmyvUE+GnyfZ2x/NFh1/W5yzu5FvMcAgyAfAB5kDwICAEA+X1HBPhkCwLsxXPI9ltyYdmkJ6IWR7UiqtXxSQMBvmOffVzPQvbgUmed+DU7pxPJslU7j54kLX5dKsAhVLsX37RzfNlAgOcSWV9y69aSc1xz2/vj1xNg/w5wRyKRwybIt+3hgV933T6rdfeKk9rrddJ0zX2/not1BDhez84630Eh46EFAsz9EpAPAPIgIAQA5Pc9QWI0m7FcUhW76aYSkw+1Lao+iDhQ1bRt29dAgG+bKKfxdGakKbbk9mWsi+/zhrC+dTqEaivvRgT4hMWl81ELZ3cioxsR4MUG57hRAe6yc+iy5WsmqIUMORbHbTu1tMfu3hLdWxlp6rJr16oAd9u23W6Zb+VFgLlfAvIBQB4EhGCX6QkH+H0zIL/vMOqq+zSRSonZQyc8qcTo96RWWt/NWbJ70/5fDrVTCV1IhOqxE7pZ277Nxf0g1J8m6baJnB9E6qSJakyP3nP1A3sN2frODQrwjF0Hf12ebUKAi3bOMc06V3WJHt2AAN9IrvWK/X8zOdeCE8x+S3+bO+5zeyhx1sTZp+meXcNcWN+t+V6o3wV6Jfm+Y0t9GwKMAAPyAUAeBIRgd1ClLg5uo8qWui2qtaN7h47XFbZvHtK9fFBQ2sD5XiC/H5pCecEE8Y4J1ZoJT0cdAY6C98z2vW6/tWEnkYrjsq1bCrXv3cauuvF90/smanP2dynUn4O42wR5zdJ6J1Rbab0QP3Vy+NQJWjMBnrFzUflxwv6/bvEvW7i8QQFus/8f2b4P7JxzGxDgeL4Ldt3PuOM8t/QtWNxPbX0U7QeWtiV3bdvsQYJP06NQbZWOo3Vfse3WGgjwsG173aVvtEHeQYARYDgAMAcrkAcBAT44N/Q2q0SrYhyn2zhuFfoHO3TMwzBH5kYEeM5V7snvhwP9VsZDdcoaj6QoXyfPTNh+x5N1Z2xdFLW8PTiJ8amVsN/9Zkds+5EG8usphOrUTFnT6sRjTCSylcuQzbwTv5iWkeQcz4ZqC3J8v9jPA5zOCdztzi/Ge6bOOfrjZ9Fv8cVrdDwj/T7efrdNs2vr05TLyBPxO+wJ2V3P/fmOZ+SFrLxTCIwCjQADAAAgBNuGKmC+a5+voD10ldQJq8hechV+VdBnTPDSUWhzto/WzbrKreKNU6eMJZU8bXsxNG95HrJ0zCaV+X5bN27xZFVQ0zlBJ9y5j1lldMbiP5Gx/6zF3Z8IcLPzXUmOPbSB8yW/AwACTPkBAACAEGwD6up8o4Xt4sio6lY4bTIYR0HVZ3UHjN0q43ygilvdNtXV76UJpYQ6joI7Y9vHQYD0WaOzPgnZrVQhVLuLXrT90q6k2lct13czBDhrWhjfvTDOLxrl+lmotn6dsM/ztv6xE+DYin7bznfBzrffne+qO9/L7rPiS98pJb8DAPdKyg8AAACEYAdYCtlTqGQJsH+P9YbJb6TbpO+4hWuhtvvgg1AdDMd3gY5zb3rhna8j5Z22rR+x9mKozs8ZBbhel9BWBHg2ifueO98rSVwldw7Xm5zvUoPzjdPGHPr8/sbA1LXcQHFCoX2geEHx+pAbnJw/NlhcTEN527vl/ZfqhNt+29xg8XIlroHJ4rHBybE3B6fyb506102pBYAAAwAAAAKsltKrLQpwwX1WC+j9UJ2yREGtsWNOiKctbknkizoCPGb7+XjuW/wpOn46r6mfMqbZu7atCPCJ5HjP3fn6Lsw9obYLdLPzjekasXXp+a4ekfz+y/q7R+F5OdxvH5y6WhbqkVA4x2jnAAgwAAAAHDEBVuvjwzrrlpz0ZgnwZVvmg0Sw10mtugT3h9q5Ob0Ax+lF0njy+0SAn7UgwP581Up+MuN8Y7pG7RxaOd9Dl9/L4nnbtepeT1uAj+WLs7GF2If2gfPDGtUwKxwbnBqtbjtZfNWSXLxcOUZ+8k57vviofOxnGUL8tPL7K3ywgxINAAEG2AqMwAvkQeCmfnBu6JK3Fyaingsmdd11BPhGqB3JWd2T71l8FxOp7gi184HqnG+645dC7Tuwakmdz0hrp6XVy2LaBbqRAA+bbEdOZgjwdPJw4LY7X9/l+6wT4OmM833qzncmrO8C7Ue81UOCq+T3neWdX/O+roosD07OmxRHEV7O5c/17JNL3R2aj3IMgAAD7NO8Tj4F8iBwUz8YN/QxE8u7JpF3Q2135iwB7jPJu2GC99DJYt7im3Ny+NhkVYyH6pyZwf7G92+v2LELddI6axLrtx1uUYA7Lc23TLAfWjq8AMf5WOdtXb8T9Tjo1yXbNgpwf3K+sQv3JXd9nzvJVdxP3Dm8cOdAft8l9IS0fXByxSR4bY8lWL+ZZcsnq6HaTX47WqdzdR4oZVEIu9Mdv8d+P2l44h4c7Uc2ci0BAQbkA4A8CNzU9/EN/bjJ25wJbTpo0ETGsm63jyQvnavzkkmvKrsnndS2mQTPJBXvOIVQb5O0Drm402mQmolkj+0Xpx8ac5Khiv+opetiWD9/aLfbtyepqPcnaepPzvdscr71pnIiv+8mhQ922ABbpdxg8WEovN22B6noM/Gddb8h5T2NSH59G2WzFerNYbxTAuwfOrTZNXiZ8dvbL2zkWgICDMgHAHkQuKlzQ9/XrIb6Lc/k98Oa3/vPdZbTtlpJX37q0h6k4GbIHgVcefFGIqd6wLJoD4u63Lo4T/dsqL57H2X6qknbXKh2rdaDngXb9qJ7CNTjHtTEhzx6WHPN4kmn6xq1dYrrZJL2UYv7Sljfkp0lwJG0t0ne4tdxRjLimbd1Q5b2eI4zSfwxTem+8d19//DjhJ1vui69lh3uO7lc53woOxBgQD4AyIPATZ0bOgJMft8/5E4Vz8Su0HswOrS62o832UaiFefhHjfhU5f82CtDXf8fh2o3/PiaQDBBK5nMdtjyFfusoK7X150krrr/9crAPRPqm/Y5Xp/LdsyiyabW+dcRdF63w/opwuoJcJulXd2/YwtwHCRvxo7zOFTf0++2dfMmxg9s3546v2c/+F5fqM7rPW7nGB9CHLdzmbHjPwjV9//Ta3nHHlKcNdF/Gran2zplB/dLQD4AyIOAEACQ33dQggeLD22k6gu7fOhSCw9eJH+PkmV3TUKjAF9JZO9OIps+riH3eSJU351PBVhC2ekEVfH0h+qAdL7VV9ftvjv+4wbnE9OUhkehtpX3cah91SBv4toWquMV1JPqRgK8GGq7l0vq1Q39hF2bJ+7hQo8T+/RarobaAQTPBgYwQ4AB+QAgDwI3dW7oQH7f7/n9WH5y3AT47i4fOh1wLotroXbU9ZAI4FIiil5qU2nLmaxet22eNRDg1TqyPmT/L7mwbFIc03a7BQEesv/HTDovJOksWbzxGPed5Mau4CER0lYEeNnk2qc/zt3dZtdVx3loDxm661zLC7bfmsVdoLRAgAH54GoAeRC4qXNDB/L7vs/vb506123doJ/v8mBY90LtFFuRPpM4yZfePb2RrPfTdG1EgO/aOnX9VWtuMVRbblsV4LwT0TSkstlIgHvcsmFbFltbY4vzmYxjtJnAX0nifdJAgK+6NElsL2XE67sva3A6dbd+YPG2hexBsLTPWUvPy7DPR3RHgAF2BuZgBfIgcFPf/zf0rrB+VGeAI/3AJ06L9MbA5MldPKy6/KoV0XdLVuvnHSex8V3YDieHasWc3YQAl5JjLdSR3kYCHOf29i3XksWbWxDgYBL5xJ3ncqhOJxbTtGznL3FfCdX3i9N5vVdC9d3qNpPeRSfDd5PyUNurC/RoqHYf92ntTq6lvqNHoXYUdz1IuEiJgQADAABwU98/N/Qxq+jFd+5Ww/qRVbci1aMtbtusgrxdzIX17xlKJG7v8wcA3aF5t1jy+3YLcH7yjnWDHtnlQ18yCVZr8A37XS67PCqBu2XCpZbfByZbHS0IsLZ5bgLYZ+K3HKrdlLXd0w0KcDzGMxPo62H9nNybEeBO+30u2Of4zq/E+or9P+2uyZJdi3k7Jx/nJTuvRdtmyaWp28rB+3Y9V+z6xjSshOo71stuv/ju80MT3ziH+bx9b08CDxYRYAAAAG7q++aGPh6q7xu2hWoryouwPe+ubURq1fVyaJcE+EGo7ebYbxXfe/s4i+3WAwIE2AvwQPF6ZU7ggeLEHhxeow+fNbE8E9aPnBxs+YSJZlvye+pOHqD0u899tp8ELmdlgD6ftHgKttzPA5w1J3Ah1A7y1GPxjIfauXt7wvopkzw5d8yU3qRs6HbX5USyrdI+EqrzlKdSXbB18fr0JWkYddfb0+GuUbrOX8tgaZ2w7RkACwEGAADgpr5PbuiqKK6Z8KaoO+DFpMJ50SRsJKkUqqLXZesvuQqp9rltYjnmjjlm8VxKKrVegIfs84htW0wq921WAY7rfKV52CrMsyG7xVT7LGUsHwnr3+U7Y9tfDOtbcU7YOUyH2tbZ7lDb8hZsXXdyrrPJ9Qru3OdsfX+Da0l+3w0BzhcX9mgkaNge6s0tDAgwAAAAHDEhiIPWNGuhkISp2+A1kz2NlBqnC+kJ1ZFR1eVPLZTPQ7VlNY4EO2PbqwuhWlklE+oq+DJUW1PSKUlit2R1OVSXwgUnv/dMBhWPuimuhNqun49t3/kWBbjNjvnQLbsSqnOM6th+MB2l+ZmT2MehtptoKtJ+8J1R2/dSqHbJHHGi/MSu1yXbbqjOtSS/78Z5DhYX97AFGLaOfnvHuQwIMAAAACAEZzNELYtbTj6DVSZfmBhHAS649XoncSJDanP2f2cS91wdAfaSOuYEc9Qk0bf63g/V9wCXQvYIul6AX1h8MTwzuYzdIXtNzn3FecGJfzqwzewGBPhJWD9P6Ir9fy3UzkU6FqqtwHSB3gPKaby9R+8AA3CvRIDhAMMIvEAeBG7q+++GfsZErdn0LlmDYj0wyY0C3OPWLdUR4GDyqy7Lal3VqKrPGwjwVbdfwQmmtotzbMbwyO2bDv6TJcDLFmfB9nscat8FjO9G+2Pct+OEsP4d6ZMtCvBxW3fLxXsrVFviT5qMP7F1I0m6EeDdF+CnlRbg/LkeSjkABBhgo3mdfArkQeCmvn9u6BIutXJmDTylrsULDQT44SYEOI7metviVxfsGw0EeK6BAC85gY2hbwMCnHaBvmkS3OEE+EnGMeIAQM8TAe5vUYDj9RrNiDu2aPs5RJ+F6rymCPBun+PJ4gmbB/gJJRwAAgyAfAB5EODgC8Etk0HfnbjbhG3ObZPVBfpEiwJ82/6X1D1126nleWUTAjwWauc/DZa+iS0IcKelLbY6xy7Qfj5PdbG+bP/fdf+LGZe+Prsm8Zp2WVxRmNdCdS7SeG7xGl1O1k27tM7ZAwPy+y7RPjh11bo/X9+Fw9WbZ7jDHrx0UcoeeLqSMoV7JQIMyAcAeRAQ4F3OpKqQLZu8Sf6umaDdDdWu0WrdVEvkdRO9x05UmwmwpFetpVdsm+d2nAu23YqTzlYFWOlSd+SHJog3LM09WxDgmNaXodrKu2DHnA3V+UaHnKzo87xJ61qSPnWVvmfrlM4nToDH3b6ztu+0k/vYLfyi7RfPJc6xeoX8vvO8depcdzl9zytpPFk8scOHG3Z53/8Ouix/KFzao0vh5w9Of4f+IVD6m4XqdxsH+stZeXukp0ZCgAH5ACAPAjf1vb6hx3kz49REZzK26Tb5nUvWx2mQOpIKX6+LezxURy6OUwfN2v99odq9OmsaJH/8sSTNcTql6VDbOuaPn0W/bRPqSLDvEu6nQUrj7LHzmLZzWE0eLMzY+l47Xr1pkNIu6CdDdYTooeScJ8IRHwX6jVPn+3ODxcvH8lM7J4SFt9vaB6fuver+PHlrF35/Ky5/9YRqd/6CyW/bHn7VXoD9PMDx4VekLzDdUBbpAzk9/LvKvRIBBuQDgDwICDA39INMIRFg8vs25vcove0DxT+wd3LLYeriTpzXm4NTx8vxL8V3f9/5Ne/b6a7HeuhyO8lLoyaTalV9Zg9hsloNO+zhzGLGNnrApN4C103A2pzEzpmwxl4f6UOYEdsvPmBactIbH8DMmQDP2fL4XntwD7EWLP7R5MHRhIn0goUTycOj2Ao+Gxp3/c7bOcTz96O2z9n66+78Ttj2WjbW5PesMG5xXwzVh3mLdg38Q4lul2Zdr/j6w6iVC7fdtdG6p+EId2lHgAH5ACAPAjd1bugIMPm9Jel1YbumJSq83aYuzprnV6295bhfRvndha7P4m6odoEPTqQkVfMmS2kvixCqXe3vODF7ZMuVH/We/iXbV93wbzoBjXN3+/m4e2y9lqnVuRiqc1FndYG+YPFMWFp9F+jYzX/G4nkcqnNyFyz+B7YuSn48v/hahB4M6PWG5TrX7Yxdmxnb9k6ojtIe3DlKsGM35Dh394TFe7lO3HGk+RuWxie2/YKd9xP3nfXYtlftvJWOe+4hwIoJd3/ynV/gXokAA/IBQB4EhIBMCkc4v7+W3rJ8ZkqvC28OTl6UtFbEdaB44VV6pqbjstfrBqcuat3rMFhcLIcb1sr72AlvDC+1jd4B3qVL6d8t9wLc7MFK7Hbf5oR4wWT0XiJ3XSbEJ50A+0G3lkO1m+6TUDvq+3wdAY7xRLwAr4babr8nTLI7Q3WU9K7kGhRMgrUudrOOI6K31Tl/34rbm6QnnZ88nbu716UpS4C9TMd3+f3n+B1dcQ8X4vewFqqvimSNSTAf9tGI7ggwwM7AHKxAHgRu6tzQgfyemd+Tbsd7ECZX1PqbG5gsKi27fCnTQeRaFWBtV+/95NWw/l3+dO5uP/J7lLTujPSMbEKA0zjE01DtWvwkI70FJ5cSU7Uaq1W1v8G1O2vSf9fiLDW4ri9M9JdcKIXsaeDSEdfTKcj85yVLv483tn7XE+CLIXsgPsoO7pcAAAAIMMCRyO8aeGrg/HBusHizvPxFI2Etb7NmLbmV0J6fulZJT7644Jf7dTFIciutw/mpQvvpYm8onMvt8aWUhPVuQoDVPflOskzy3hGy5+6W/I1niKuXtM6M9IxuUoD7kmNodPN8nXPyAhxMxJWem2H9dGSReZPkaYu3r4kAP3fn70OujgAvtvj5nqUljbezgQBfCtVu0pQd3C8BAAAQAq4mHOn83n+us9KtebD4IEuAJbaH6FKq5XJ4EwIc34GN787qr1oeT5o4+rmLTzixbSTAQl19/Ujj15oIcC5DgBWH7248Eqrv+TYSYKXvUah933kt1A6iFXkUat+dnm4iwGol9vOZ6zqthPXvVm9UgNVirdb1Nvc9LIdqN+4sAb6apIWyg/slAAAAQnBA8S05J10lsFFFfqtsJW5fke0IuzPFkI6jrqsvLN3j5PcGxzn9vr7c4OR87XvBOzMC9B6hrraXNiHAcd9HJmHLofouaq+JtfLZFRPluURc6wlw3mT1msW3EurPA/zMZLc/EeC8HXPRRO+Zi7+RAEdRjed0186rXivtmm13PVS7NB+vI8D9lqab7ppcbqFcaPa509L7wOJ7FGrnUV+078LnWW17lnslAgwAAIAAH/wbugal6c6oVO+kAKdzBG8EP/fwZtN4ImxsntZbJi45e0jwLDR+z5H8Xvl2ql2kc6eKZw5R0XEm1I50LGnrc3KVb2H/ibD+nd9O+11MhNpphnKhtrtx/B10J78p7aeW1y6XP9P09ITqu8PpPMDd9nBnIlmedU75UO0yrN/SsO03Ukd+6517PlQH1ypk7Ntp4jkRagcBS/HfQSuf41zqWd9DHMgrHu+4yXcuHFEQYAAAAG7q+/2GfjwRzJ5Q23pxPFS7KEYBTqf/iHKplim1dl1MKtxphXEs2daPGDts62YtHakAq5KrKUbmwvoRZH28Z5wA65h+ypm+sL7bZX+GOATbb82O12zkYB3nZSIEamm7Qn4/0txvImRweLgcalv8uVdSfsAhhRF4gTwI3NQPtgCXQrWFZj7UdjW8FKrvG8aujKP2/+1QHflVg9A8NFGMo7ZmtYJIDtUq+shEdsEEM0rwksm14r4aaltuu2zdLdv3voU2lz6/PnZl7AzV+UjnLA0v3TkKDVpTrHONTto10Dmqi2W+wXYvkmXT4QAOiIMAbyt6uHKHy3Do6bbyKHeULwICDEctr5NPgTwI3NQP5g3dv7P2wCRywn0eSQQ4iqrvAl0K1RbStlAdFTZLgNN5Su+EaqvJUqh9H88LcHxvMJKzdWdd+vzAOY3et9RxZtxDAIlrR5PrJJG+aNtezVifHiPYNVoivwMA90rKD0A+AMiDgBDsDy6FakvpmomhPneZyOZaEOBmU594AX6aLNM1ue3ivVBHKu+F2sFmhN63vVLnmI0EWGlfdud/s8k16rbt1LJ9v865Fez6IcBUYAG4VyLAgHwAkAcBAd6nmbTfxO6siWCffZa83aojtTspwBN1BFitv7MZAjy/CQHuMLnXAEKab3S4zrU5addA28Z3nuuhc/NTx8Q03CK/AwD3SsoPQD4AyIOAEOwfVhP5lACrhXR8BwS4FGpHrlWL6kwLApzVBTqK+0YFOJjQ3rI46o30rDjSgboakc77qnO7QH4HAO6VlB+AfACQBwEh2D9cDbVza0ocNVBUZx2pVUuxBrIqblKAH5lYSkD1znFHCwLsB8GSMD+w7dtaEOB+O584RZEYsrTMb+N1VNrXknPLkd8BgHsl5QcgHwDkQUAI9g+aPsi39qqFNp0qyM8D3GUSeiZkz9Xrt80S4DhdUTHUDj4Vp0GKNJoGaSysnwbJH9PPAxwsrRfd8bSvBrTq2+Zrmbf0TScPEMjvAMC9kvIDkA8A8iAgBEeIKMB7TS4c0CmKyO8AgAAD7HElgjlYgTwI3NS5oR8wAdZAXJoX+CS5m/wOAAgwAAAAIASHGXVNznEZyO8AgAADAAAAQgDkd/I7ACDAAAAA3NS5oQP5nfwOAAgwAAAAN3Vu6EB+J78DUHYgwAAAAAgBN3QgvwMAZQflBxxsGIEXyIPATZ0bOpDfye8AlB0IMBypvE4+BfIgcFPnhg7kd/I7AGUHAgzIBwB5EBACMimQ3wGAsoPyA5APAPIgIAQA5PfdZbEc+uqsmy2H0Rbi6CiHrgP8Vev8r+zyMTX/9vEWt73S4DsCBBgA+QDyIHkQEAIA8nuLlMqhUGfd1XKYaCGO5QZxHASU9tVdPubtFq+tuFkO/fwiEWAA5AOAPAgIAQD5fecEOKWzHHrtr2e1hTh6wuZbiXXM45vYL9fkuL22rhUB7rbt2zaYvjZ3HM9ShgB32bbdLZxbV4P0dNt5t9VJT4/73FEnfYAAAyAfQB4EQICB/H6kBFiSFtM+XQ5rtuxpOVy25WqdfGnrxjLiUNfdx+XwsBxWyuGWxRHqiGfJCVrcd9n2ve9ETelaTAS75D6r6/aTcnhgfxedEEoQH1mIaVptING3XVw6z7yty7v0xXOM8jph5/nI1r9011Jdml/YdZyxdMVjaJ/n5XCjzgOGVVv32LZ/7OQ7Z9+HP+8hd31WLX5dp6KFp7ZM53WdXz0CDIB8AHkQAAEG8jsC/ErYTtj/x032OjMELeVeOczb/20WZ6sCLLmbdesWnaQ1EuAuk8hhJ4b3TTaDCe+iS9PdBgJ8yWQyZ58v2uc22+dikr7bToBfhOq7uyP22V/b2AJ81kS5zV3fkhPbVIB92h+6azRn55lzx3waqi3hJXuQkbOg63vGtu20fY/zy0eAAVqFOViBPAjc1LmhA/n9sArwQxO/mbB+QKZ6AixBe+nEOcpeKwJ8wgnbhIUrJnTNBFgt0c/cfgo3TcaDyfGQ23ekgQArrRf8vdbOq9+Ol3Pr+u18owDfr5O+VICDE98zdo39g4BUgM8k0h2/o0f22Z/3CzvXePzO5OHEIxPoE/ziEWAAAABAgIH8jgC/Qq2qsTX0pf1t1gLclYhclN5WBLhg/y9mhGYCLPFby9hv1h2jN0lTPQF+HLIHqyrYMUJGGjpCtQt0KwLcayKqNNy2dDYS4EIdAV41qU3PO59x/GDf36yJ+kt7yMG7wAgwAAAAIMBAfj/SAixRmnbLu022xpsIsFCLrW+xvJAI8BO3rsOJX9oNWKjl+az9Lxm/69blneCp67NagP0gUEOh2iVa6fXvK483EOA7dqzgZPWmS58fsCoeN2xQgNWt+1aDBwetCrAeSvgu2W12vbszjp+zdTknwyuhtss5IMAAAADc1BFgIL8fOgGeD7XdZocTAc6ZqF4ykRoy0Ttp20mcLofalt7gRFWDQPWbpK46MdT2L01A+0zmXrp4JJ9qnTxh+0vwriWyWbB03HOCJ/F7ZGLZa+l94iT+oqW538LjBgKswbTWLA7FpRbaOEDVXUtjn6VhJVTfd25FgK/Z8qt2nt3uGC/d9W1VgM+6Bw4x3ieh9h1gj877iq3L23kO88tHgAEAAAABBvL7Yc3vSxnhqq3z8wCfNDFbNVk76+KI7/VeqHOMWRNSbbOQiOGECfJyqL6rG1tVOywNccTm+VDbqhvjvW/i5uPtNgGOozRP10nTfUv3zQbX6KzJ94qlpyNJ34o7RpsT9KtJenz6RuzzRVt30z0cGDE5jjLq5wFO5wSeDbVdtGNaV23b3jrHD7YujoD9ILQ+LzEgwAAAANzUuaED+Z383gITGSIGQNlB+QEHEEbgBfIgcFPnhg7k9y3n9zdOne/PDRYv5wYmiwgwAAJ8hC+l3ofv3OFjqCdHN7l2a3mdeh2QB4GbOjd0IL9vKL9H6S1v/3va5xD/TlTR7Cd3AWUH98sW8O+4bxSVMxdb2I6HcsgHkAcBEGCA3cjvr6V3oPg7UXp94HcCgAAjwJsW4HSqNAQY+QDyIAACDLDb+d219D7Okt4kLGlfAoFwsIJ+uwjwhhgL1XmrxxMB1gjzl0J1Lm8/EJ4Gcrti6zToXZxyrd+kNg6uJzpsf22rwfh6EgE+69b1cgdEPoA8CIAAA/l9C/ld4lv+f7kF6SUQCIckIMAtoVHPH9vfUfv/ghPg5+HVCPAaFf1BqLbq6nWKZya1Bfv70uQ1TicWR4gPJrl37LOkWtN+dZoAa7+bJuI3LQ2AfAB5EAABBvL7lvJ74Vzu2ODU6LHB4o3y8ue0ABMItABzv6yIq0S1yz5r3u8+J8B+irLj4dV81t32fzq9mZ8n23eBzptI59y2l+w4E6F2HvAudwxAPoA8CGQyBBjI79uS370MD0z+Me8AA1B2HNH7pUTzoUnnQxPTTifA6fmvOcnVX3VZvlcOT8KrltwsAZbkPqhz/Kx3gJWWHnIy8gHkQQAEGMjvO5Hfa2S4+H8jwAAI8BG8X6o1Vi26j0xo6wmwWnLVojti/8/YZ73jW68FWO8VL2ccrxMB3hrMwQrkQeCmzg0dyO9by+813aSnLvItACDAh/xy6X3cBfdZ7+GuOAFWq3Ac+ErSq/d+cya4d9x+J0xcz2QIsGT2pZPaNpPlkU0IcFeodtcOlpZ02x4TcgAAQAgQYCC/AwBlB+VHjbhKam+bsKqLc9EJsGRY3ZdvlMPT8GqgLDFUDi/Cq9Gfr4VXLbzaNg6gVXTroxCvWZza9paJ8EYFeDHUTq9UsO3T/Sf4JQAAcFNHCID8DgCUHZQfKeqKrJbf8UQ8++xzf8a6YJ+1XCM7qyW2N1QH0BJ5Cz6+iVDtJi26Lf6QSG2uTlr7kmN0JvHF/RlECwCAmzpCAOR3AKDsoPwAAABACADI7wBA2UH5AQAAgBAAkN8BAAEG2F8wAi+QB4GbOjd0IL+T3wEoOxBgOFJ5nXwK5EHgps4NHcjv5HcAyg4EGJAPAPIgIARkUiC/AwBlB+XHjnE81B/FGZAPIA8CIAQA5HcAQIAPDath/dREgHwAeRAAIQAgvwMAAowAA/IB5EEAhADI7+R3AECAN8OJclgoh8VymC6HNls+Uw5nyuG6Le8vh4lkX51vp/u/38U1nmw7VA7XbN2FRIDPlsOVjHVZzJkwa9vhchhNBLrH0h4sbTN27OsWhpAP7nNAHgRu6ggBkN/J7wCUHUdPgCW/L8ph1mTygQlsFNMn5TBfDkWT36Vk/5IJZ/x/xbabtniHbZ0E9ZnJ7YhtN+eO89j20b5PM0Q7PebDcrjsRNhf94LFGWX4eTncL4cxk2yl6zjyAUAeBG7qCAGQ38nvAJQdR6v8kDxec5/7TIajmF5y61oR4DG37qYT09uJpJ4M1VbaVZPfyNXwqqW2kQAXknNoJMClRHjXwhHtcs0crEAeBG7qCAGQ38nvAJQdR1mAl0L91tbVZF0rAlxPTBXXSIPj+P3mbN8o0UsWZjOO2aoANzoeAABwU0cIgPwOAJQdR0SA/Tu3mo6ov0UB7t6AAKvL81hynBMtCPCYHVdhqEUBHkaAAQC4qSMEQH4nvwMAApyi93vvuM9qpX1SR4A1UJXe1W1zctqqAKub9Q23Tu8UP2pBgLNIBVhdpm8lnzcrwB1h/fvBPaF2nmKJfxe/HAAAhACA/A4ACPDBostEdMkEVe/HjtYR4E6T4wcmnPds+1YE+LjJ8113nOFtEuB8eDWw1W1L050tCPCE2zfUOa+lJukDAACEAID8DgAI8D6lzWR0PNS2fkosu5NtJcFq+R2x/bRNbB0thOqUSKIvEdWcyfXZJN58sl+P7VuPQqhtkY2CrfQP2bq8O2Yqu+nxPN1u31DnvPqbpA8AABACAPI7ACDAAHsJI/ACeRC4qXNDB/I7+R2AsgMBhiOV18mnQB4Eburc0IH8vt/yu6b+GM5YrsFdJjYZ50b21eisndt8TrG7ZKsMbfI4WV0eW03fVdt/3pYN23ex3egdwu5tiGfWfaf3w6uuqfEvIMAAyAeQB4GbOjd0IL8fiPy+WkdWJU6bTd9G9n0Zat/d2w4uhdYHjhkP6+cbbZW5sLkBavImwBLvy7Ysa0Cc7aC0TdfXDzIUH3AsbOEhCWUHAgzIBwB5EBACMimQ3/elAKsFMQ4yo21PZmzfb+t6MgRYLbyjtr4/2UeC5geqUetowcS0mbgdt31HQ7UVudNk7W6obQU+YXGOuWNpW7XALifbdrt4c3WO3e2O48+p145TCNUpXFIKoTra6+oGBLjPtsuKu8PSOxZqW9S9AHfYvh2JjE+E7IF2+tx36gX4ip3nwg48vKDs4H4JyAcAeRAQAgDy+54KsNavWLhi+1xx26oVc80kSdOcPHX75u2zuvfeSNbNmqBpmpMhEzfJ6H2LS3FON5DIpxanpiR5ZpLbZ3E8dmlcsM9RWJ+7be/bceK2oy69ivdJHckbcseZzbgOD2x9VvfuXKjO93m8RQG+4uJ+aOnucA8Snlp6b9n//YkAd9g+V92DhnuWxkU79rw73rTFs+jOc86JdGjwcAAQYCCvIx9AHgRu6tzQgfx+oAVYghlbTiVtmguz1+TKd2PuNFGbc1I44+IdM5kKiaBFybvj1vXZcY5npO+6k7lgxzhj//uuyZ0mjP492HtOWidCtQt0zqRvNBHPW3WunT9Of5LWKJhXWvweGglw3qTdx30/VLtP303kdc59Lpns30+u17SJbWxJ7rLv+KT9/9JJdIeljfIZAQZAPoA8CIAAA/n9SAhwKoH3bfm4iZTnZiJLvSa+sxbvah0BXjYBnnPhWcgeaGnUJE3yd9GOkSWmURglkUUT0ifJuUUBPmnp8cdfNCluJsAzdk08xYxrsxkBnrXz9Fxw6X4Rsrulx+u7YufgW2xvW3r9ua7atRy1a+S5hgAjwADIB5AHARBgIL8f9Py+YjKV8sgtnwjrB3tasvUTGeLn3xe9aBJ70/6/2ECAH5uYzSWhv07aT9j6BxbPhQwx7bBzUYiDN92rI8CFDAGeayB+/jizYf1gWrHr+FYFeC7jAYRPt9Lc10CA5+0a+dboe+4hhw+6Bmcz0jKPACPA+4Q43sBZ+/0pX/seHh1Wzixa8OWb8rd6iqj3xII9FOq2z9r2eqjtAaL/9bpDHFhv2vbxn/1xZ235tbD50eUPBczBCuRB4KbODR3I7/s1v9/IkCt1tX3pKnCpyMWuwlofu+f61sWHTpaeJxXK6QYCfMsqpZG2UH+gpQuh2uU5FVAvpqokr4XaQaOW6whwd4ZMFkK1u3QjAR6xa+KPsxDqd5/eiADHFlkf9zX77uJDjHG37myoTqkUr2+ffRdxsK+0u3mUXD1U6LXvvzMRZspnBHg/MGF5+bb9PhfsN5BzefWmlQ/j9ruccL/Zpya6V2yfVXs4VrBy5YUr+2IPkEsW1zN7mOY/x2nkrttvSsedsXh6yPUAANzUuaED+X0T+T2XP9fTPjh1tbzfzDafzkmrTN4xOY1dYe8mFc6XVsE7Y9veS+QoVvwWrOI352T4lqtcPg61XYqfmMj1m5y9MOE8Y5XYRyF7sKVpi2vM5POhVUqDVT5X7e9JS/uExbloldZ5J5fPXHqvW2V6zMnzxTrX7pJtWzQ5jV24h+0c4ju1G6nULyZhyM7/kVX4z7i4+92+a5beMbumZzMeMFyy69Jhy56bBBRMqBVHl3swct/OZd62pXxGgPeLAKdd+h9Znm+z34cf6dz3SJkLta8ldGU84FpywrwYanu/3LQywn+ec+XgvEtXIWzPHNwAAIAQAByd/P7m4FS+fXDyVnn7lzv4G+m1ilus7EWh8xXOJZPOxYwKZkeodgm8aBJWsHUSratuv26rMMbWxSilsTX3hEm0ll12QpbFuEvzeJKeK6E66NNoqHZvHDGpjILYZum5bv+3hWqX70W3XRbddpzL7rixC+SCnUur9IfsrtdRcjtDtcVZcaddnkfsHOI5Blfh73TneslJeY+lP3Yj7XH7tblzuWTfT4FfPwK8TwQ4fd3AS24sz+7YQ7IXiQAvZjwEvGoP/VZt+4mMeJt9HraHSM8trnFyPAAAN3Vu6EB+byW/F95uOzY4OXZssPhA2/mwR7+RrAonACDAeyXA6bgDt0L1Adsze6AjIe0K61uAvQDH0dv1kCdOxba0SQEWcR7zy5aOaXI9AAA3dW7oQH6vl9/7z3Wqi3N7fup/TcUXAQag7OB++bo88lOORenNm3y+CNUeLOqVodcUFuoIsF7L8F2iFdfzTQrwcqi+DyxuhtoxDQAAgJs6N3Qgvyu+yvu9+akfKofn9cTXhSXtt6vh5Ld97Ni7/9GVXT8ugXDIgn6/+h2/9wMfL/3SF3+b++XmBViS+sgEVN2W4zv9nfY5zsH90B7e3aojwL0Wl2T1qknsQxffRgT4gol4HFjwSaidnu1IwQi8QB4EhIAbOpDf1+f3wW+/fGywuBjf7yUQCEcncL/ckgBLctVaq/ds02nSJMFjFrosxNHPe8L69+cVj97111gBajE+7uLsC7Xvxjf73GtpinEd+fsk9TogDwJCwA0dyO+1+b1wLndscGq0LMI3ysv2ZwswgUCgBXj/CTAgHwDkQUCAAQ50fm9BhvmNABzdsoP7ZQW9Z3uT3IR8AJAHAQEGOEz53cvwwOQfI8AAlB3cLwH5ACAPAgIMcPjze03L8NRFvgEABJj7JSAfAORBQIAByO8AgAADbHfezU+OH8tPXWo/XezdSF4nnwICDNzUuaED+Z38DkDZgQDDgcy7FpabyTDyAQgwcFPnhg7kd/I7AGUHAgyHQYBLzWSYOVhhryEPAkIAQH4HAAQYtpk3B6eO5/LnerYjSCJVWd+u0D5QHMkNFCe2I5Tjur1OfgeKf9qKDAMAcFPnhg7kd/I7AGXHlsqP7ZwTvCwy148NFhe3Kdyw+Y63JeQGiw/Lf1e3MbQy3zph6+FKKLzdxq8eALipIwRAfie/A1B2bIcAE/ZHWNtmQd+2hwftA8W72/ZgY6C43PRaDBT/y7HBqf/+jVPn+/m1AwA3dYQAyO/kdwDKjv3aAnxhu7rKVsI2duN9c3Aqv11djBVC/7lOcuLm825WODY4+X/kBouX3zp1rpsrBQDc1BECIL+T3wFg2wWY8gP2XoAnV/TwRHPVc4UAgJs6N3Qgv5PfAQABhkMowFP3NMBWo+0ZgRf2GvIgIAQA5HcAQIABNpd388VZvQvc6vu9zMEK+6W8JQ8CQgBAfgcABBgA+QDyIABCAEB+BwAEGAD5APIgAEIAQH4HAAQYAPmAPUADrik/tTraOHkQEAIA8jsAIMAACDAc6DxVDi/KMnz92MniCfIgIAQA5HcAQIABEGA4zAJcDQPFu+0D54fJg4AQAJDfAQABBkCA4RAL8OSfvP4/X3yUG5gs+jmpyYOAEACQ3wEAAQbYFd48PfXflPPpkqZOUl4lELYalJ/WtQAn4Vh+6nllu1PffkV5r2kezE+d5dcKCAEA+R0AEGCAbcnrBMJ+D0gwIAQA5HcAQIABtkQuP1Wg1ZKw2y3AnV/3gdLgt31/qfj2j5S+/7/7XKW8rBfe+4GPU5YCQgBAfgcABBgAYP+Wn1nS+/6Pfab0i7/6P5de/smfUpYCQgBAfgcABBgA4FAJ8NNjg5Nf1P9q6aUsBYQAgPwOAAgwAMChoX1walrz/+ZOFc+Ewttt1MUAIQAgvwMAAgywL4jvAOsvVwOoiwGZkEwI5HfyOwBlB+UHHPq8Tj4F6mJAJiQTAvmd/A5A2YEAAwIMQF0MyIRkQiC/AwBlB+UHIMAA1MWATAhAfgcABJjyAxBgAOpiQCYEIL8DAAIMgAAD5SllKZAJAcjvAIAAAyDAQHlKWQpkQgDyOwAgwAAIMFAXAyATApDfAQABBtggzAMM1MWATEgmBPI7+R2AsgMBBgCgLgZkQjIhkN8BgLKD8gMAgLoYkAkByO8AQNlB+QEAQF0MyIQA5HcAQIABAChPKUuBTAhAfgcABBgAgPKUshTIhADkdwBAgAF2AkaBBupiQCY8GJkwVw4ny6G/HNr4VoFCFwAQYIDN53XyKVAXAzLh/syEkt35cnheDk/KYc3C6DbFL6meaXHbxXLgh0h+BwDKDsoPQIABqIsBmXBHWCiHlXLoc8smyuFleNUavFUWLbSC0tBDjiK/AwBlB+UHIMAA1MWATLjdHDfRzWesu1IOY+7zkMny9XIYd8slrDMWxzXb5v9v735C41r7PLE/b7+6vnp5FRCJGrTQokhMRmG8ENO+tpoIInBeDGOCFl4YYohurNuIN4bxwhO0MEENZjC0CSJ44YUXWpjBSQxjgiFamCCCCSbjhRYOiOCFIF4IojAmeBKn2z2t1HP1O11PHVeVqmRJluzPBx4k1Tl1/tVP59S3nlPnnIthOUBvRJuPx8aabSlCcR7/UjGt3Os8W4Tw6WLcm6n91Ow8neUYlscZKYZVy5OX9VqXdb+eWj3OM8XyTRW/V/I4o7XnPqqta4plz+tzN7ZfnveV2rQWDumDBfUOCMAgAGN/al+KIhxADmg7fYyXA9+HCKH5OZsR8KrQl4e9inD3MP7OAfVss72Mdjntfc94q9lWIhjeiQB+PqZVngK9HsuWw+Ri2js9+14RfrdiXnPxvI2Yfophb2IZr3dYn+VYh/zc2832PuZXBe/12vi7qdUzvRLTvhbb430R2vN08+njj2O8a7EsldHYNhPq3U4XHCvtPxCAwXsxFOHxulMLaN1sRritnI1QOBHhL/8+XgzfKUJheQr0RMyz9Cq1elzrAfhBMd7NIpje6RBSN1Ort3crxu9kKELoTPHYgz4D8EQtDGe59/lFEYBfF8OGIyBX81osxlXvdrrgWGn/gQAM3ouhCI9R7h3d7mO8HPoma4+9i5BbBeDSVpcAnOXTf3NP7tMIrR97BODy4lllMF2N5V4v2nbx3HL+dY1Y3uEu0+4VgKt1Lee7UXyIsJw+/75zDtcPi7B/Xb3b6YJjpQCMAAzei6EIj78Iz3YJtylC20r83umCWDsHCMBTEXjzeuZTovOpzC97BOD5HgH4UYTSso32EYDHY3lH+wzAw7UA/L7DfCd6BODzsa3yNv5QC97qHXCstP/gG+M+wHgvhiI82UWYT8ldS+0XkZqJoFpdwCmH1LvF8MsxfKSPAPyoCIX5dOHXtTD64QABOJ/e/LZY5nxac+5dnesjAGf1U7qfFtPOF/7aTq0Lbl0uAvBorPfl4rl5nR73CMDZm9jOj9S7nS44VgrAAN6LoQi/XhGOR3jMoe9JhOEc8srv6uZezPcx/EH8XgXI/QJwPo15J7WumPwhguC9CMMbESIHCcDD8fubYjovU/tFsHoF4JlYh9zL/Sy1TqdOEXK3Y5s8iWlvp9b3fqsLXz2I4Jt/n94nAN+ObTTzVet0+sb1M9O/3PnxzxfO2ukCAjCA7GFr8j0X4XSEzOup8714czC8FuM0ao/PdphWdYpx7km9nFq9po2YxtUYp5FatxIq7wM8ldovrDWePj8N+1JM63Jqv0VSOf9ewf96TGM5tZ/2XK3r1QjV06n91OVqHa7VlrGROp9OnrfP5kmp02gbBw3DdrqAAAwge6AIFeHpVQ/AhyWfop0DZj79+dYJC8C7Bw3D6h0QgAFkDxThkRRhdSEF7ejab//0H6z95vd/+uawp/vD5D/+b9Jv/uTTb373b//vZ/7s+t2vvZ7NGlsvg++Ziwt/2/z5adAwbKcLCMAAAjCK8EiKsEevnaYdXpv+NQzXH7+fZv9yyE4XEIChf64CjQCMItQDrJ2AVu8B7tguLPz1mYsLqz/89BdTdrqAAAwHr3V1igCMIlSEnIA6bYXdX/5N8ffO8MWFu7//6efxk1bvOYznZRu+cGPBqwgCsOMlAjBqTPZAEcLgAfjXdmPzxwsLi2n25+GTVO9V6G2O9/bvv7Ps/wMEYMdLBGCQPVCEMHgA/uVFM/heOUn13in0tl20y/8HCMCOlwjAIHugCKGvOp1eWNrv+73HXe/dQ++Nv+sQgtd9l1vTTv91CP7wx7/6dT9wkJafa3+gnaJrbqhT7UhrTPZAAIZTVO/DF29suwK3pmmapmnawZrsgQAMp6neL/7nd89c/GXuzMWFxz9euPGv+9jR+yRd0/QA2x9oeoA1NaYHGAEYTnm9z/483ArDC/+P7wCDfYfjJaeV+wAje6AIFSHqvf96L8PwxRv/rwAM9h2OlwCyB4oQvv16bwvDv9y29UEAdrwE7E9lDxQhqHdAAAawP7UvRRGCegcEYAD7U/tSFCGod0AABvBeDBQhqHdAAIZBuAo03ouhCBUh6l29g32HAMx3VevqFO/FUISKEPWu3sG+QwBGAAbvxVCEihD1Dth32H/8Kq9fo8Pjt5pt6oDTnB/guSPNNnzCtslon+N9yTY6nFr/R//pP/vxp5/7rdOhAdbtoNukbrLZppvtfLRquy3XWn5s/Jhe28P4n15qtrnip/2pfSmKENQ7IACfArvNNtvh8a0IsgexPsBz33UJ4F/L6gDL/qTZLn/Vpf3tD//3malr/dbpyy6v9X7Gmm37gEv4OIL3avysautZLQC/arb3x1AL41GfX+phLPfDQwrU3ouBIgT1DgjAJzAAdwsoQ6nVg9cpAOeet4ku869Pczj13xs43GW61TxH9wl2w32G90bqr6d6qMey91rWankGXc/dDgF4pMvrtNXhtR7aZ5mqdd/t8PjIPstcbs+ynjrVVl6OzWa7M2D97rcME31OZzSm1W0aQ7XHrsY6XG+2a/an9qUoQlDvgAD87QTgvA1yb+ebZttIe72BM7UwsBPPeR3jVc+djMe2i1b1mr6M+ede4OkIGblH7X089iae382DYty3qXWK7dkIXjvR1opQmpcr9z6+iOd9THunsWb34+/8nFvFY9W6fUh7PZmdtlH+/W6s3/tY9jJ83Y/Ht2JZZ2qvwWr8vN1hPe8Wy7Ad2/vvt99vzvx+97f/3n/0KNbxRYz7LuZXLV9+/T7Vnr9cLNNWbZlKr2LZtuJ1ykHxaTx3O2qi2+t0OV6Pq/Gz14crG6nVm7qc2ntq6yF8N8bZjtdlrQiwq7F8mzF8p6i5cjqzsQ2fxHgfo/5SUbtvY1u+i2lWy3S+9tP+1L4URQjqHRCAv6EA/LEIOVXgrXolPxbh6VJMs3ru8wh/ZRB8UZt/o5hPDstVr+1CBJChDstX9SpWwXYpAlSKaVRBZigCzlrxvHKd5yIYVuGp7AG+HMFotBaeproE4OexTYYjNN6LYTcjjI0V89wp5rkbYb6RPu+xHo/lGy2e+6rcfkUP8IMIaamY79vaazpb234Txev2PnXuMa+Hz9ViXbM7PV6n1KW2HsUyzMdyPo9t3RggAD9Mre8152leL5Zvu1i3laiJTgF4N+osxeua/z4Xf78pPhwZj3Vctz+1L0URgnoHBOBvPwA/qQ3fjtB0sxbIqgBaPbcKhDn8TUc4We8SgDcjIM8WLYe/Tr1sz1L76bLVqcfjMc3ytNjJeGw0lutN7XnlMtRPgR6N5c/TuBYhcbZLAC4vhnQ3tXqL1yOsleu1U4y/m7r3oOaQXPU8z6XPT9Pd/eEf/icrxX2AR2KZz0cA3+oSgHPgfFxbpnep1TvcKwC/j9e+3IYf4vXtNwC/jHV6UYTZ8tTxfgLwueLvp6nVe7ya2ntyZ4vtUA/AHzos22xMeze1n/Z+WwC2L0URgnoHBODTLYeZy30E4Lu14a9i+HKHUPCseO5cBKvtCF3PegTgPM+NGF62Tlda7vZd3bPp8++rDhfzme+wvN0CcA7RaxGSql7l3R4BeLYW4FaLYZsd1utyh/l3cj7CaqfTdMvnzsR8diJYPu4RgNf3Waa0T/hsdAmO/Qbg+Vqw/FB7nfsJwOUyrNYC8HKfAXiry3pUH1KU5gVg+1IUIah3QAA+3XLgXKo9NhYh4XwRRspTa4ciOJ9Pn/eoptTeA5zHu14Mu9kjAL+pBaM8n9wj2em03Ge1UJ7Hqb4HWw9HeTk/RRAeJADn3uiXqdULOHTAAJzDaP3iTnOp1UvdKwCP10LpbGrvFS+fu1Gbz5XUuwe4/qHGldT5Al718Lmd2nu7R2KZ+r0lVKfvAD+PQF6dRr1Ue52mjjkAdzqT4L4AbF+KIgT1DgjAp1sOpLn3LX8PMveeXoowslELcx8jiA3F32+K4LmTWt+jrL5jWwWcj8Ww6qJCm7Ugcz2mcysCyLliPu9S56svX4n5TsXwlSKcVD3NoxFgqgsdpT4CcO7xfRjLmgPPq9Q6jfvBAQPw9VjW6kJfi7HNx/sIwI3YhtUpxzMRNieK587HuubXpPre8UQs+6dauFuK+c7FhxPVd7cXasvU6QORuZjP/fiQoxHh91Fqfcf2oAF4IpZnuXh930ctjMcHMMcZgFPMcy1q7FpsHwHYvhRFCOodEIC/gRC8EW/w36VWT2oZ5nIQqK4wvJY+72Gt7uP6NJ5f9VrOReB9H8+7Gj+rnr4HETquxN9LMf6HmF+vq0DPF9N+VixzFcqqqyGvpNZ3Zy/H36X14rnV8t6Px6pToN/EdloplrW8D/CT1N4DOp/ae9YXYxrvI5BPd5l/J9fjuR8jaF4pht0vtt9MvI7VPK7EtKtezNsx7mKxjG9i/V6l3qcwP0qtK0UPFfPdiXUfH6Deut0/eT6292jMYyWm/zYC6HqPbbZUhOqlWsCeKj4AKe8DXD6eOryOI6nV6/s4Phh5Yn9qX4oiBPUOCMDftrI3E74XD1L7KdCPUvsVze1P7UtRhKDeAQFYAOa45Ks/5xotrgLN4anOesg95rn3N/dGT9qf2peiCEG9AwLwty2fEjpjM5zcWlenRyKfhp1PP8/bNn8/ffx7rjHvxVCEoN4BARgEYOxP7UtRhKDeAQEYBGC8F7MvRRGCegcEYBCA8V4MFCGod0AABgEY78VAEaLe1TsgAIMAjPdiKEJFiHpX74AAjAAM3ouhCBUh6l29g32H/QffHPcBxnsxFKEiRL2rd7DvEIABvBdDESpC1Dtg32H/AeC9GIoQ1Dtg32H/AeC9GIoQ1DsgAAPYn9qXoghBvQMCMID9qX0pihDUOyAAw1FwFWi8F0MRKkLUu3oH+w4BmO+q1tUp3ouhCBUh6l29g32HAIwADN6LoQgVIeodsO+w/0AABu/FUISg3gEB2P4DARi8F0MRgnoHBGAQgLE/tS9FEYJ6BwRgEICxP7UvRRGCegcEYBCAOY019oc//tWv+8VBW36eGkUgAPUOCMDwxdwHmCPfn1688TTvC7+05enYmggEoN4BARjgxGruB9fzvvDP5v/p7n92758N3PLzIgSv25oIBKDeAQEY4MQH4Bxm/8fN/3bglp8nACMQgHoHBGAAARgEAlDvgAAMIACDQIB6V++AAAwgAOOg7oCOelfvgADM98RVoBGAcVB3QEe9q3ew7xCA+a5qXZ0iAOOg7oCOelfvYN8hACMAgwCMQGBHiXoH7DvsPxCAQQBGIAD1DgjA9h8IwCAAIxCAegcEYBCAEYAFYAQCUO+AAAwCMAKwAIxAAOodEIBBAEYABoEA1DsgAMOA3AcYARgHdQd01Lt6B/sOARhAAMZB3QEd9Q7Yd9h/AAjACASg3gH7DvsPAAEYgQDUOyAAAwjAAjACAah3QAAGEIAFYAQCUO+AAAxHwVWgEYBxUHdAR72rd7DvEID5rmpdnSIA46DugI56V+9g3yEAIwBz3JZrbanZzh3StGebbU4AxkHdAR31rt4BARgBmJNgt9lWIvzeb7bnzfax2W4eUrheFYBxUHdAR72rd0AARgDmpATgRu2xa832qcPjY8020WU6QzH+8IABeDym28lIj2ECMAIBqHdAALb/QADmiwNw9qbZbhdB9Fmz7TTbZrO9bbbpYtzFGJaf8z61eo/LADwdw67G35PN9rrZtpptu9nWYj7V89bj8RzERwVgBAJQ74B9h/0HAjBHFYAfF+H1Udo7NXoo/s7B+F3a6+3Nz82nTJ+PYefi74kiAE/G+FeL6W802934fSjm96gIwHkaU6l7j7MAjEAA6h0QgO0/EIA59ACce24vF8OG4rGZZruV9nprS2NFkH2Z9npy7xTDz8Z8r6S9C2XNRqjeqT1vYAIwAgGod0AAhhPBfYBPVQB+XYTWPM5kbfhWBNjlCMud5GEfIkjn8atTnGdjmqsdWvW8AwVQARiBANQ7IAADDBKAz8fj54qwW56+nE99rk57XoywXMpXlZ5K7d8Bzj26D+L3iZh+eXpzIwK1AIxA4ICOegfsO+w/gCMLwPn04/loudc3n4p8vxgnP5YvfpV7gcci1FahN1/F+X08dzh+5r9HawF4MkLzbPydL3r1PEJwbq+SHmAc1B3QUe+AfYf9B3CE1mvtSWrv7c2GihCce4MfRfCtnI/n5mEvU+uCWDkMLxXjLRYhdySm8zbag9Q6RTo/b0UARiAA9Q7Yd9h/AAjACASg3gEB2P4DEIAFYAQCUO+AAAxfmatAIwDjoO6AjnpX72DfIQDzXdW6OkUAxkHdAR31rt7BvkMARgAGARiBwI4S9Q7Yd9h/IACDAIxAAOodEIDtPxCAOX759kjjfY57q9kaAjAO6g7oqHf1DvYdAjACMKdRDonzfY6b7y88JQDjoO6AjnpX72DfIQAjAHMcRlP3XtiJZhvr8dw8fKT22FaHADwS8xjtc3kmDrCsQ92eJwAjEIB6BwRgEIC/b8Nprwf2fbO9aba3zTYZw3Kv7Ga07WZbK4JwDrcvmm0jwu7HZrsZw+4326dm20l7pzfnULoaf+fxPzTbg1pYno3fd5vtUcwvL9PrYp716eRlnY5hjZjOekxjXgBGIAD1DgjAcDJTmPsAfy13ImRWPbh3m+1lhM0cMJeK8Pk8wnIVgHPoPRd/X4tgW4baKoQuRIgejr/PRUgd7xKAV2N+wxHKb8ew5dqyXm+2dzFuI557J4aPCMAIBKDeAQEYoJTD7mL5WUSEx/Nprxd3uBg2HaG3CsAvi2FVAO0UgKsAPRrTXYhxG10CcPkhyGoE3xRh+H4Mr9rHWK5q/l1PrxaAEQhAvQMCMPB96/Rd3RThcqv2WBly83PW+wzAedirtHfqcj5t+u4BA3Aeby0eK9tkh/kLwAgEoN4BARigTQ6kt4u/z6a97+BOps97VC+nve/lDhqAc0h9mvZ6gbOxAwbg/L3fm8WwoZjHqACMQADqHbDvsP8A9nM9Amj+Xm4+9flJhNXsVfw+FoE4B9D7fQbgPOxBPL4aQXs4pvU0xp0cMADn8JsvjjUd08qPv4vfBWAEAlDvgABs/wHsKwfLNxFEH6XWBaTG4u/8+GYEzqoXN/cGrxTTGK8F4vl4zlIMW4vp5ItYXYtgWwXd8j7A66n9nsBLqf0U7XJZ14oQXZ+/AIxAAOodEIDhZHIVaI6aAIxAAOodEIDhRNW6OkUAxkHdAR31rt7BvkMARgAGARiBwI4S9Q7Yd9h/IACDAIxAcBzyzbzno539SstQ3fsM9Q7Yd9h/IACDAIxAcOjyVfHylefypdjzleueNdvHZrt3SNO/mtqvrNdLeXl41Dtg32H/gQAMAjACwaF6HgF4pHhsKkLwlUOY/mo01Ls3sGDfIQAjAPOtmkvt9/0VgHFQP4EH9Hyj8PLm3aV8M/Hzxd/5XmX5fmf5fmUrRWDOYTn3HOdTpzeiVfc4y/dV245W9QLnaeabhr+NcW8X8yjvj7YSv+dp5/ujPU5790VLxXSexbA8vfK07Wp5qnu21Y3E9DfjA4CbxfLV7wWXrRfzrp6bt8Or1H4/t/z7rbR3g/SXzXYnWulhs11S794YgGOl/QcCMN+U+n1+BWAc1E/gAX0hwmk//9A5LE5HWK56jbPZZvuU9np5J2OanyKQjkQ4fRoBMp9u/SF2Do0Igh+KQFieAr0ey3Y5gvqrCI8ppv2+2RZjOosx7mgM3yqC+HSH9Xkcy5+XZyamtV6E2PrOYDe1vpu8FsF7Mqadg/y1GLYc67MUHyDk9dpJrRutT6S9nvUR9e6NAThW2n/wbXIf4JP5ssT7vkE0erxnG0vtHTO95tso3guWRuO9oQCMQHDMn1Rt9TFeDpdXa//0Vc/xbPxe7iDepdYpIOUp0PkfvTyteiS19xjXA/C9Wliv/knvRQgtvYwgXAXg613WZSQC+rniseU+A/Bk/D5aW65XxXRe1J6bt8Vc/H4nwrd69wYWHCvtP4CjV52huFW8d91K7acql+8/l+O92maM9zHe65XvCVfivXHu9FiLkFufTv79SbwPrM6GLDtl8nvZ3AHzJrU6ffr+wEQARiA4uGvxz9fJePEPXfaApiJkzhYBuNOwegCu5vkidgh5vJ0eAXixtgNbL8bb7dCWO8y/rtFheef7DMCzXea7Vew06993zo89jd9zb/El9e4NLDhWCsDAsQXg/F4tn1E40eV9Yj0A52BbdZQsxnvV8j3h3fh9NLV3EtUD8JvU6jTJZzE+j9/nYhnGa8soACMQHIO8I/jU5R/uWRHmPtY+tRqKncP0gAH4cgTuK8UO4XWPADzfJaQ+arYHtXmOdZl/6jDebmo/5WSxFoBfFsNGigA8E+s9VBs+3CMAN2L7XY7QP6TevYEFx0oBGDi2ALzV431qpwD8vPY+brcWgMvrzqx3eR+7WgTl+vvYPOx+bZl2BGAEguOTP5HaTO2nBC+m9tOEcw/m49rw7Qh++wXgB8Vz8wWvNorxpmI+gwbgq7GjqELsePw910cArqZ9rwjzL4tpVyG9OqX7ZhGAh2O9y57px6l1OnanAFzNbzMd3q2l1DsgAAP0F4DXBwzAq/sE4EafAXi5y3K8SJ/f9nO/964CMALBIRqKEJx7Kd9FkMw/y+/q5oCZe2rfxs88fCaG7ReAr8bw1zGd6gJVL6OtFcGw3wCcUuu7E+uxzCsD7ETORiDdiHV6U0x7qJhmfrz6/ka1s5uJEPy6mEZjnwBcndoyqd69gQXHSgEYOFEB+OkxB+D7tXkMx/twARiB4JjlHs+pfUJa/oc/lwY/jbe8Ut5QzKdxCMucT6M+nwa/ql+1HOdiGssddo45JE/s89x+A23umX517HU5feP6melf7vz45wtn1TsgAMPxcBXoEx+AcwfM43gvmjtqPhxzAK7uZnIzQm8+k/CTAIwAzHHqFIAPw3hq3Y/4+teqy2gbhxWG1TsgAMP+ta5OT4T81baV2mOTEYJzT3A+0+9WEWLzz6Xae7n1WuAtb3+0EvNIqf0+wPV7AteXI3fg5J7nfCbktdS6to4AjADMsZir7ewOy1jsKJe/Zl12aF8UhtU7IACDAMyB5a8ZLtRC9m4a4IxGAZhvLgBXp85o2pe0aufY1i7c+LsvDcMCMCAAgwDMgeWv0eVToHOPcK6RfM2ZB4NMQADmmwvAPXruNO0o2/00+5dDx13vgGOl/QcCMN+ZRto79fpOs10a9MkCMN9cANYDrB1ZD/Dn7W/OXFj45z/89BdTX6veAcdK+w8EYBCA+Y4DMBxmXXZqZ6YX/tXwxYW7v//p53H1DgjAIABzYg0LwAjAcOAAfGPzxwsLi2n252H1DgjAIABz4u2kvXsHTwrACMDQdwD+5UUz+MOIhJkAACQgSURBVF5R74AADEfLfYA5ZPkK0f+y2f6vtHd3kWs//jT/PwvACMBQr8vphaUzFxdWB/l+r3oHBGCAv7ccAfSktP+z2f42Df341z/8B/9YAEYABvUOCMAA35QcfD9E+N1utqUzU9f+Fz3ACASg3gEBGOBb87fN9i+a7XL1gO8AIxCAegcEYIBv0Wd37BCAEQhAvQMCMMB3QQBGIAD1DgjAcCK4CjQCMA7qDuiod/UO9h0CMN9VratTBGAc1B3QUe/qHew7BGAEYBCAEQjsKFHvgH2H/QcCMAjACASg3gEB2P4DARgEYAQCUO+AAAwCMAKwAIxAAOodEIBBAEYAFoARCEC9AwIwCMAIwCAQgHoHBGAYkPsAIwDjoO6AjnpX72DfIQADCMA4qDugo94B+w77DwABGIEA1Dtg32H/ASAAIxCAegcEYAABWABGIAD1DgjAAAKwAIxAAOodEIDhKLgKNAIwDuoO6Kh39Q72HQIw31Wtq1MEYBzUHdBR7+od7DsEYARgEIARCOwoUe+AfYf9BwIwCMAIBKDeAQHY/gMBGARgBAJQ74AA/M3Z6vDYRDw+8QXT7Oe5Q8129YRtj+lmGz3k9TyyWv/h3/+PB6nTawec1dAXPPdes4032/3ads3Te91sn5rtY7OtNdvZr7AZnwxQg/1ug/x65MC3Gk0AFoARCEC9AwLwCbHb4bFGPN74gmn289zZLgH8a2+Pftd7eYCwfOh+2/gPV3/z40i/ddro8lr3Y/4LXqe12EbPm204HrvZbDsRKMci+D5qtu0Iy8dpPdbvMLffVATrGQFYAEYgAPUOCMCnLwBPRTC5HGGh3lOXg03uRbseYaceIs/H83I7VzznVoSe2WLc8ZjO9T7CUDXufIfQOhWPXy2CVzYZ41bLdKn2nN0IZtW8x+LvPO6VtNcbWgb44Wj595EYt9Oyd1vW0ZjvVLH9Oq3ntfr2++2f/oO13/zwu90chGvrV23r6eLxa7Fus8X2GC2m2y3053HvdXidGvG8/V6nap5VTeTt+TG2ZSlv11e1MLrfNhuPYXPx/Gr8ueJ1Go7tMBbD6vWw3mWe9fUaZPtdLR6/dpp3DgIwAgGod0AA/h4DcH7zmnsAn0YYel8EmBz63qS9nr778Xv53Dz+22a7m/Z6+T5GkB6L53xIrV6y/HjuGXwQbacW4lItVO/Ecx/EMk0X83wXP5/HslfLk8ffjLD1MKZxN4YtxbLn9ZyJsLkTy12tx/PatmsU2+t1jPsitfdmni+mU63XlSJEb8W0N4qAWwayd/Hc+/HcHKzGmuH3f02/+ZO//s2P/9ZajLsY870X88nbdiGGPY1lXI1tPxXjrhbTneuwnfO4L2uv03xs7wfxWK/Xqe56PHc/07Vt9r4Ik7PF9noYvz+L1/RBbM8HRS2/j7qs6mEz6rYegC/HuA871N8g22+o9lMAFoARCEC9AwLwKQvAz4rhV1PrlNjbEZBSEU7K5+bAMVkMfxKBoQx/la0ISKkIdK+7LHcOmXeKv29HkM3z+pTae+TyPB8XAfh1LZC97RBqq/kv1UL3bo8AXPYmvy0C26tYvspchNpye3X7LvF8bXkvFduovv0epfZe2nvF69aoLXvefsu16W53CW3lKdDDtQ9Aqg8OXvdZb8uxPfazUdtmV2K+I8U2O1sMK7fhlWJ5O702L4tplwH4bbzmlYVYji/dfgKwAIxAAOodEIBPWQBeKIaPxPDxCFh3a8//UDw3B4Lcs5ZPd36QWr1m9QA3EdNciWCxHON3+97lxwik+wXGeiCqX5ioHiLrp2+fjWnei9DTKwCPFMOqYDUUwx4V63W/CGz1+ddNxDZ7G9vjUhGyOj03P3YztuPbWI5OAe5TbIdqme6m7t9/LgPwdGz70mQ8d7iPesvBc7PPupzs8JpPxzru1Nb5XZfXtJE+73FeLj4YqF6nTvW3UqxXv9vv7Le0c6gC8J/N/9Nfw+ygLT9PAEYgAPUOCMAnLQA3ugSaiVpIqAwXw5+n9p6wFIGtEeO9jpYD5NUIHqtdgkqe5mJqfYd1PnW/QFEed6pLWHtZe+zyAQPwYmqdEpsD/Nw+ATh1CMDVhwVLHdZrJPV3IbDRGP9JLM+zLsv+LMJlDtjXY7m7BeD8+50OyzS+TwCeiQ84On1gMtJHvc1GeBztEo4Xi/qq1+X7mH99vXv93Yh6rAfg57XXqVqHm11epy/Zfqc3AF+48TwC7Je15nQcnRAIQL0DAvBJsBNv+ks57L2rhbl7tUBZDb+b2r8Xe7YILzPx+1BtWp0C8FAEq8vFuDMR4jp5ndpPV52P5ZiO6ZQB616xjP0E4KoXb7M2j6sHCMApttVC7QOGx6l18axeAXgutZ8KXJ6GXT636sEse00f9QjAb2qv+9lYppF9AvBoBNip2vB3fdZbfp3f1uqpWv73xTbbqn34MVUE50EDcL1n9nlqnT5f9tS/T+3fg54u6u9Ltt+p35/+4Y9/9et+cdCWn+e9GAIBqHdAAD5JFlP76Zz558da+FiPcHAntS60VA0fi79XIuS9Sq1ToCdiWnci2FYXnXpchLmPEYZGIuhtRwBfjBDT7TW4GqHrVrFMV4uAU11R+G4s+/k+A3Ce5tPUupVNns6VCDsbqb2ns98AfD22ye3YRpsRTlMfAXiq2IZVyH9WbL9Psf2q1+FRbOvqQmAvi9cpL+ODeF2uxnZZSq3vuna7Zc9MLMPd4gOF6vuyt2Ldrg9Qc9Mx77V4fnWV6afFONXFsqrXtwzNBwnAG7Ge92NeYx1ep8Va/b0t6m+/7fc6nfJbHnkvhiIE9Q4IwN/L/mM6gsFqhIypDmHuZgSgHGIv1YZPFMNmIkCMFtN+EMNyMDuXPr/QUHk/3cvF+HP7LPdMal0xeqZ4fCimW129uez9m6tNtxHLW26L5ZjeSASc1Qivo/F3I8atlnu0Q1Cfr23H6fiQ4FGEu6Eu8+/kfDx3NbVOEU7D07/M/nb8H/4Pf/L7P62uTDxZm0cjtV/E62osZ6PDMs2n3hdwqr9Oc/EaraT+rwBdGo9tWl3F+0qP1/fhPq9Zr7+rAFz15t5N7acp11+nS8U8r3b40GW5qKdBtp/3YqAIQb0DAvApUf8OMCeo1tVpT1UAxnsxFCGod8C+w/5DABaABWC8F0MRgnoHBGD7DwRg1Jj3YihCUO+AAAwCMPan9qUoQlDvgAAMAjDei9mXoghBvQMCMAjAeC8GihC+Wr3/cOHG+Tzemekb1215EIAdLxGAQfZAEcK3U++zPw//eGHhSrM9ao6zncfzvwH2HY6XnCb5PsC5RvNPWwPZA0WoCFHvbfX+u4u/TAxfuLHw4/SN583g+/9Vobds/jfAvsPxEkD2QBHCqa33ZlsfvrjwulPg7dDWfz0VWtO0U9vy/3H+f/7DH//q1/3AQVp+rn2Cpmn2p1+2P632pbIHAjAccb3/9y/+5e74H/7Jbp+hV9M0TdM0TTuqdmFh0TtbBGA44nr/rx7+i6j3hY3mzx09wJqmx0IPsKZp2oD71AsL//UXPl/4RQCGr1Hvv7t4Y2b44sLdHy/+8r91C8D+N8C+w/ESAARg+KbqfXj650YzCN9sttxT9DcCMNh3OF5y2rgKNGoQB3UHdNT74PU++1+MnLl44+qZiwurORTb8iAAO15ymmpdnaIGcVB3QEe9q3ew7xCAET5ADSIQKFLUO2DfYf+B8AFqEIEA1DsgANt/IHyAGkQgAPUOCMAgfIAaRCAA9Q4IwCB8oAZBIAD1DgjAIHygBkEgAPUOCMAwIPdgRQ3ioO6AjnpX72DfIQADgIO6AzrqHbDvsP8AAIEA1Dtg32H/AQACwSE722zXm+1as018pWWYbLaGilLvgH2H/QcACARHYaTZnjfb+/i53mwfm+3OIU3/SrPd63Pc1Wbzpka9A/Yd9h8AIBAciWfN9rLZRovHZprtU4TXL7UarR9jteVAvQP2HfYfnCquwIsaxEH95B7Q8ynHu/GzbiGCcGW+2dbSXg9xXs6RYhr3096p0y9inKtFkH4bbSkem2q2xzGdPP7N2jzn4velmM5qjPuwFo7zdJ7EsDy9RjHsfkwnD7vVYd2GYh3WYxpXi+WbKX4vQ/xY/J7X+248t1zXFPO8HsvzLNZnoTatPN/z6t0bWHCstP/g2651dYoaxEH95B3Qc6jd7mO82xFiZyN45vD3PIblx3Jv8dMIdrfi7/w94vEIiWvxvBwe38c4ObDmHuZ8uvWlImhW2yDPYycC5XSzvY4QnOK57yM8N+I574qAvBXjLxTTLuXpvIzwfiWmtV5sk/Xa+LtFwH4e7VxMe7sIwcsxrTuxbJdjHYZieN4mH9Ip7+UWgAEBGIQP1CCK7DQGgqUIi/vZSu09neOp1XM8G7+PFMPfxeNVqK1Ogc7B71ox3nCzvYrQ2SkA3y/GXSyCae6BfVZbxlep1du6VUyzbjhC91Tx2L0+A/DZ+H2stlyvigBcPncotsWVYns/Ve/ewIJjpQCM8AFqEIHg+Is0h9GdLsNGOwTAeiieLQJwp2H1AJwiSOce1M2013v6vkcAXiyeVwbTPN6HmE/VPhTPLedf1+iwvPN9BuBqXcv5bhcfIiynz7/vnMP14/h9Mx3O96rVOyAAg/ABahCBYED5lNx8unKn76Q+KYJb7jGdLoYNReCcHjAAX4rnlbda6tUDPN8lpD5qtpXaPIe7zL9uLJa3vNXTYo8APFIE4JkI7Km2LVKPAHw2tt9MhOUh9e4NLDhWCsAIH6AGEQi+TpHmMLkZQa0Mm5+K0PusFuzy8J0InfsF4IdFkM7fJd4oxpuMcDhoAL4eYXI8/h6Pv6/1EYCr0L1cBNiXxbQvR8gdKeZbBeDh1N5jXS3zWo8AnGL6b1L7Kd3q3U4ZHCvtPxA+QA0iEByzkQioHyOkbUe4nSvGacSwNxEe8zjVFaL3C8DXIkw/j+m8i2k8K4LnvQEDcBXcq4tXbdeC534BeDLGeRXrtFlMO4fc16l1Ia21WOZGDK8umvUqwvzb4sODbgF4IbbROfXuDSw4VgrACB+gBhEIvn6RjkaoPZ/aTycu5QtHTdeGD6fPvx88URtnPLVOOR6O+UwV8616csv7AI+n9gtrjRTjldOd7WP+nQzHukykzy9elWI7nCumN9Thuedrj4+m9gtkpSI0v1bv3sCCY6UAzLfPPVhRgzioO6CfdJ0C8GEYj/Cbe4rn1bt6B8dKARgABAIH9K8tn+69dATTzT3H+VTpla9Sl9M3rp+Z/uXOj3++cFa9AwIwACAQ8M3XZbSNwwrD6h0QgAHAQV0g4CQH4N3DCsPHUO8jXj1wrHS8BIDvJABXX1zXtC9pzZpa/yz8Xrjxd18aho8wAOeLiv13zfav7VXAsVIABoDvJAD36LnTtKNs99PsXw4dc73n3t7/stn+j2b767R3u6hdexVwrBSAOa1cgRc1iIO6HmDtpPQAf97+5syFhX/+w09/MXWs9f7Tz7u//Xf+3f+pObkPEXg/FuG3bOWb3GXDDDPsdA8bmvhHAjDfzftCdYoaRAB2QOcr1GWndmZ64V8NX1y4+/uffh7/yvWe75ucbxV1J+3diqoKxIBjpeMlwgeoQQRg+JIAfGPzxwsLi2n25+ETXO9jXj1wrHS8RPgANYgADAcMwL+8aAbfK+r9yMynzlevzo+PH3CaVwd47tB3Wuaz8XO69njebtfT3um3eTuOntDlzxefuxy/z9SaY6XjJcIHqEEEYOi7LqcXls5cXFgd5Pu96v3A8mnbjS6Pzx5wmlt9PjeHu9dHsE63I0T243qMfxB3BphP/QOCufiQofxwZzHtfaf8RbOtNturZnsf457ED07W4/f7sbzVT8dKx0uED1CDCMCg3gXgmkY6mu9Nr0dA60cObMvHMJ9Snt/D4md2OcLvbIegmR8/e8LqpgzAjQi/Z79gWwrAIHyAGuT4iuwPf/yrXw/Og7b8PEWKAPzNB+CltNc7uRbhNofG8vvOObzlnty3MW4ZgPMpvY9jWH78SWqdHv0q5rMej+XToXOv6psYP4eqbt/5zmHrWUxzowii+WfuNd2MZcmuxjy2Yto3i+XejsdXiuk+jcfy8nU7/X6pmE8170vFfHIv7vkuz52L588Vy5jHf9Bl/Be1oLzSIYhX2zS/Lo9i++V1XaiF1qXi76l4PTpt01epdYpzNhLL8Dbmd68IwNUp71dT9+/B523xPKb9sjbtlVjOzXRCe5C/9FjpeInwAWqQEyJfVOhQ7sOaL04EAvC3GoBXI+xdikD6KMJwFZo+RpgbjZBUPnc9gt1ozOd1BNtsppj/UISqPPxcBKrnRfCry/O/G8FsJpZhMuaTw9vtCGN5Wh9inOEItLvx+EiE3ZWYXx7+rphuXt+d1Pl7rWPFfEYjTH5Mre/tLsZ8++25/ZC6n+p8O8JsFWLXe7yGr+L1GY9lyoH1WgxbrgXM2QikKZb5XQTk0Qio74sQX73mebrT8cHBep/rNh7rdzOmfbV4TVLxocFcbPNv91jpeMkJ5x6sqEG+mxDc+76sCxtx0H7W8Z6tDuYIwN9DAH5QC3+78TNvi2flsSvCzWwRsqoLOY1EkKpCWCO1nwL9vhaAcqD9lDpfpCv3Ij6JkFeFrFQEqvni8bInthHhrVy36vW8GoGxdD+WuZNyPg/S572Xa6n/U4J7nXI+XwTVXgF4KrZX2Wu+GKF4vwB8vQjZqVinB/HhxMfadrw9QADuNO7DYlnW0yk4dXr/Y2Uf9zh3vASAUxAaLi6s5jf9wxcW5m0NBODvNgAvdBmeh92rDduqBeAcBN9F2+wSgMfj93fx/LJNdFi+yZjupwi0K0VQLoPpUISrjQjYr2sBvQzAyxH06vN/0kcAftYhxA1yUajt1P2CWncj8O8XgK/F9qgv/8s+AvBybJdO697oUCdXBwjAKx22w83i+Qf9LjUAgAAMAvBAcuC71CFc7qZWr2oOL3eK4cNFIFrpEBCrHtbR1H7qa4qw3CkAD0V4K6/8PdQl/FaBeShCbz51tvr+cT1Q3Ywgdz61brm00yUA30yfX5V6NHW/FVE5n0fp8+/mPunwWDf5+c+Lv6divYbiQ4M7XQLwePFazEbIL+XtM9YlAF8qAvCt1OopLtd9JNpu1EVlcYAAfCe1nyVQ1cEzARgAEIBBAD5OTyP4VEFzLIJJGQRXI4RVPaw5aG7E7zMRuqrvus6lVu9wFaQnirD2tgjMVQCupvs8hlVBNZ86+y51vldwfrzslX5RhMTnRah9EMMq11J77/bD1PqecSNC+KUiPL6phf96AL5ZrPd2sa6TsV36vS9uI57/MOZ7Pl6D7dj2VQi/EgF+tAi1VQAejmE3iw8QnhXb+3ZMcyjaahGAz+6z7mvFdsrzeTlAAK6+H32uqIM83+sCMAAgAIMAfJzGI9zsFu1lar94U3VP2uoqypupvTfwdmqdXvwqtX/H9kmEsvUIzQ9S67umIxGIdyMkjce838U4Wz0C5OWY7usYb60I0rdimk9jParpvY7leVmExKsR/F4Xf1frshPTGOmyDEsxnyoY3o3nvqoF0X6djbBencb8Kf4uw/5QrOv72M6PYtxG8YFE9Tq9i3UtrxD9JpatOm18q5j/fLFNd2JbDRd1Ul3peytez/UB1m2xtm3u1T5IcHwBAARgEICPTXWV5k63salOEx5Jnb8vXD6/W8hu9Jj3xIDjpyIMNlL7BbDK5RmpjdftVOaRWsgd6rEt6sZS+0Wnqm00fAivRbVMkxGu69uo1/J12y5pn22x37pP9HjufoZr6/WNHSt/mcv7hh9++ospe1tOK1fgRQ1CLsRm8HVQ51sKwO57PbDye7JAxwDsw2K+neOke7CiBgG+Ae57fWD59Ns5FQQCMMIHqEGAUxaC3fcaEIBB+EANAtjxehML2HcgfAgfqEEAb2IB7DsQPkANAnwjhi/cWPiKF33L936t7gFbXkU4X5k3358133Yn31rn7AnYVPl2QeMdfh9EXtcqLJyN6VQ/95vnUDx3acB5H3RZv8R4j3U6CcuX5dsnTR1CLRxEfs0vdxl2+QjrfUgABuEDNQifB4K4HLmrQMORexKhcLV4LN8CJ9+79W08nu9L+yGC39eU7wU72+H3QTTS3n10qxC1Wfzcb575tkDvYpuMHXC5j8tsar/f7klbvpS+7ErfX7rMOTBW9xTOt1l6Xgw7qnsE59p7IQCD8IEaBAd1+Lqhcjp+5h613EO10WwPU3tvVQ4bn9Jer923EoDz+j0ufh5VYPtaAfOkL9/XDMC9Piw4qgA8yIcSfR4rb1z1YTGnnXuwogZBAIbjDiGTqdWrmU///Nhsox3GnY9xU4ybQ/JmvKl/FuEyy2/GVyLc5F7k3Jtcno6bn/uoGFb+n+dTsddimusRzvcLwLkH+0ExvVu15b4dy5mD/c0iAJ+PZZ0u1qvuSYyTe7+3Y74rXYL1sxie57PQZbnXU/tpv0vF+s/Hsr5sttcxzSqMvYmWbw11vfZ3J1Ox7JVLMd2t2L7na8t3N+a5Ga9bt1N18+P3Y7z6uLl27sTwtzG9K8Vzzxavbf7A4WktAC/EttuK+ih72adj+d/G9N91CcD3a7X2uFZfj+JDnMvF67gRNb8eHwLln/eKZX2a2k+3vtxjWz7p8vqO1+YDACAAwwmQA8yrPsZ7EuFiLN7cr0VQSKnVW3w3QtzNWqh+UYSKHGx2IlTkv/Op1osRanOQeZ9a38fsFoDXiumdi2B2swiVW/H4RKzb7gDbo5rPWMxjJXX+7umzCE0jEbA+FMGoXNbdWnAre0GXYztV90BuxPjPIqDfjOk+iW1yM7ZPJ7Op1dtYLc/VeA2q6YwXy7cZ85iMkNntdPeHEf4a0V7Gelfb+mOE+Easz/sIyEMxj7uxDPNRI8u112kmhj8s6rCqi/kYdqfDdiwD57PieZ9S67TjsZhO9T3wKohei0DdiOVcjw87LsX2WE+trwhU23IuluVWh23Z6fUdqs0HAEAAhhNgOfXXQzWdWj10w7XnzUaoLe3GG/+x+L3s3ZuJYbfT59+RfBqBp1sAbnSYXg4ab+L39dTeIzxzwABcD6t1axFMzxdhKx0gAK8Vw6p1O1f7+2zt7/0CcA6Tj2rDX8T2rpav7Km9Xmy/0kgEyqnafHaKEPu2Nn71us8UYbjyvFj318WHFlVNfYx1vxlBu9vrUjobgbS6YNnTmM5wrNdasazrHbZVVTP1nulq3Ed9bMtur299PgAAAjDfn99dvDFzgr7Hl8NitwtCNVLrStE56OWetncRgDZ6BIoyAM/2CG2rEV62au1+jwA8G6Gs/pzNYrzLHULlYQfgyQhXn2J7PIgAOGgAXu2wrI19/t4vAK93WO7VYl5bqb1XstqmnV7/3Q7beqtDqKy/7vMdauJBsVx52HaH6U532C7VOs12Wfe3MWw1PgzZiL+fpNap6fsF4PLYU47bbVs+EoABAAEYTl+9T0Zo6fSd2Bwk8umuQxFW8umsVU/nzT4DcDX98pZLM9Fy0H1ae95YlyBZ/X4+wlo5veFiufIyL3ZYv8MOwNUFxPKyzkUIu9tHAH5yDAH4SfEhQuVpLXyW32O90uVDkPGY33htW4/3EYCvRM2kLuu+GWG1HrizpdTeM5697hGA78W2345luxftfZdlHSQAP02tU767bctyuR4LwACAAAwnu95zMHlThODhCFA5QEyk1mnM1RWhRyKQ9BOAU4TDm8W0q5Cap1ed9loFrhxirvcIwDl0vkvt31ldLQJT9Z3mKkSvHFEA3qwF7bUuAbj8fnLeljvHEICvF2EwxfbN23m6WL5qvnl7vkidL/SVYls+LP6+H6/ffgF4JNb1WrHu74t1X4kaGilC+MdY5uq05qli3XZ7BOCZqIlquS7H8192CbX107N7BeBu27L6AKG8CNtEjFsG4O10iPcC/vHCwhVXgea0cwVe1CDkQmwGAQd1BOCvJoeQR/HGfjt+bqT2XsJ7ESrW403/3eLN/X4B+HyE4I14zmoRCqoLNL2KUFLejqnbRbCmY3qbqXX14fEi0D2J8JWX8/kRBeAq3GzEMuTtMtphGtdje27F8hxHD3D1er2PbbNdC+vVVZnfRnB8XgTRusnUulJztb0n+wjAVdDcjmWorpa9XNTc03idXsfP8mrOi8Xyb8S26/ZGZSiev1J8yPKx9iFJuawjsd7VmQ+9AnB9W+7UtuVc8fq+rb2+1Qc69V70b2nfAYPXsXuwogYBBOATYjjCS7c362Ppy65o20jtF4uqz3d0wOlNROtk/LBCRw9D+2yv8gOGRjrEnsA+AnA53+Eer+f4IWzrfrbR8D41NdRjux3VazdIffTalr1e36HDrEMBGOED1CCAAMz3Lp+WW95GCPsOED5ADQJ4E8s3Kd+SJ5/GPG1T2HeA8AFqEMCbWMC+A4QPUIMAx234wo0FF30DBGCED1CDfK+BIC5HLhAAgACM8AFqEAd1gK+vvNJvvt/s2SOYRyPtfa837w/zrYQO44PBfIuh8S9c38MclwMdK29c9WExp517sKIGQQAGTq611H4f3hfFsPr9Yw/LVkz7Tup9L+JBpznogb6+vr3kW1i9Vi4AAAIwcHrtFgF4Nn1+r92jnudhhupBA/Ag69uI5QYAQAAGvqJ8au79ZnvbbJvNttJsI8XwW832JoY/KIY9iVCX76+bT3feaLaPaa93NsV0LhfjLjbbywiNj2rzWIx55PnPxzQ6nZK8XpvnUmr1Mle/P415rNWC8rV43lbMa77PALwUy/U2lns8Wn1982OPY7ytWOdqHV7Fclfrlbf5nWK73q9tDwAAAVgA5nvwu4s3ZnyP71jlgPsiglkjgt1SLfydi+FPImBmExHqZiK85YD5rgid5SnQWzGd82nvO7s59C0XwTQ/bzrmsZa69/I2avMsT4HOv28326W0993j58WyTkVYrZ53pTaPbgF4LkJqo1i2hxFg6+v7IgLyaDz2Kj4ESDHfan75uffS3inR1XZ9Fs8FAEAARr1zxAF4I4LjcAS4oSIYXi3GHYsgNxZ/9zoFuh6AF4th9yKwVsHxdoeQ2+iyvOWwegBeKcabj/CaImSer81jpwi9vQLwVvwciTbcZX1nY9ulGO9hsY7VOlXyvC8Xf+fA/inpBQYAEAhQ7xypHLpy7+OHtNdLmnsjJ4uw+S6CXtnOHyAAlwFzuQiHVcDsFnIHCcDLtQBcLc9QDMtB/31qnZK8XwCuTg/fiYCaw/p0jwD8PLZX7one7BKAR3ps1wnlCAB893IQcEooAjBHGIBHIuzlU3Xzab4vY1gOxZdq4090CaMHDcB5XjeLYeNHEIDzqdxvI7gPFdPZLwBXPeIpnvs4gmt9fUdjW90qxr+buvcAf0rtPdJDwi8AAHyHhi/cWPjx4i8rPvA5NvlU3afF30tFAF6NQDxUhMr3qXWqbg51U0Ug3C7G7TcA34xwOhGB89ERBOC8js+KYQt9BuC8LV4V63StFoC3i/BaLlc+RXyz2K5VAK6227MYVk33Vm3bAQAAcARyj+vrCIGvI4xOF0HuRYSz16l1kalKdcGq+ZjOTmp9R7jfAJxDX/7u7odoj1L794wPIwBPxbJvRHsSP+f3CcAjsY7bEYRz+L1SbLftYlkfx/qvx7TzFbOr7yAPx3bN456P576M6b2JnzNKEQAA4HhMpO69rmOpdQXjTs9LRZgdH3C+OfhNFn/nKyN/PIL1G4p1GD3ghwSd1r++vuOp9z2KJ7pMFwAAgO9AvgJ0Pl34crTcM/rQZgEAAOBbk3tR8y2Snke7lXwXFgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgEPz/wMn95qJV0/ZuQAAAABJRU5ErkJggg=="></image></g></g></svg>
+
diff --git a/doc/images/Keep_manifests.svg b/doc/images/Keep_manifests.svg
new file mode 100644 (file)
index 0000000..568335b
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" standalone="yes"?>
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 -->
+
+<svg version="1.1" viewBox="0.0 0.0 960.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="g1586814eb6_0_6.0"><path d="m0 0l960.0 0l0 540.0l-960.0 0l0 -540.0z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#g1586814eb6_0_6.0)"><path fill="#ffffff" d="m0 0l960.0 0l0 540.0l-960.0 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m32.72441 46.721786l894.55115 0l0 60.125984l-894.55115 0z" fill-rule="nonzero"></path><path fill="#000000" d="m63.47441 82.28053l3.5 0.875q-1.09375 4.328125 -3.96875 6.59375q-2.859375 2.265625 -6.984375 2.265625q-4.28125 0 -6.96875 -1.734375q-2.6875 -1.75 -4.09375 -5.046875q-1.390625 -3.3125 -1.390625 -7.109375q0 -4.140625 1.578125 -7.21875q1.578125 -3.078125 4.5 -4.671875q2.921875 -1.609375 6.421875 -1.609375q3.96875 0 6.671875 2.03125q2.71875 2.015625 3.796875 5.6875l-3.453125 0.8125q-0.921875 -2.890625 -2.6875 -4.203125q-1.75 -1.328125 -4.40625 -1.328125q-3.046875 0 -5.09375 1.46875q-2.046875 1.453125 -2.890625 3.921875q-0.828125 2.46875 -0.828125 5.09375q0 3.375 0.984375 5.890625q0.984375 2.515625 3.0625 3.765625q2.078125 1.25 4.5 1.25q2.953125 0 4.984375 -1.6875q2.046875 -1.703125 2.765625 -5.046875zm6.441559 -0.3125q0 -5.328125 2.953125 -7.890625q2.484375 -2.140625 6.03125 -2.140625q3.96875 0 6.46875 2.59375q2.515625 2.59375 2.515625 7.171875q0 3.703125 -1.109375 5.828125q-1.109375 2.109375 -3.234375 3.296875q-2.125 1.171875 -4.640625 1.171875q-4.015625 0 -6.5 -2.578125q-2.484375 -2.59375 -2.484375 -7.453125zm3.34375 0q0 3.6875 1.59375 5.53125q1.609375 1.828125 4.046875 1.828125q2.421875 0 4.03125 -1.84375q1.609375 -1.84375 1.609375 -5.625q0 -3.5625 -1.625 -5.390625q-1.609375 -1.828125 -4.015625 -1.828125q-2.4375 0 -4.046875 1.828125q-1.59375 1.8125 -1.59375 5.5zm18.541382 9.59375l0 -26.484375l3.265625 0l0 26.484375l-3.265625 0zm8.293121 0l0 -26.484375l3.265625 0l0 26.484375l-3.265625 0zm21.511871 -6.171875l3.359375 0.40625q-0.796875 2.953125 -2.953125 4.578125q-2.140625 1.625 -5.484375 1.625q-4.21875 0 -6.6875 -2.59375q-2.453125 -2.59375 -2.453125 -7.28125q0 -4.828125 2.484375 -7.5q2.5 -2.6875 6.46875 -2.6875q3.859375 0 6.296875 2.625q2.4375 2.625 2.4375 7.375q0 0.28125 -0.015625 0.859375l-14.3125 0q0.171875 3.171875 1.78125 4.859375q1.609375 1.671875 4.015625 1.671875q1.78125 0 3.046875 -0.9375q1.265625 -0.953125 2.015625 -3.0zm-10.6875 -5.265625l10.71875 0q-0.21875 -2.421875 -1.234375 -3.625q-1.546875 -1.890625 -4.015625 -1.890625q-2.25 0 -3.78125 1.5q-1.515625 1.5 -1.6875 4.015625zm30.822632 4.40625l3.203125 0.421875q-0.515625 3.296875 -2.6875 5.171875q-2.15625 1.875 -5.296875 1.875q-3.9375 0 -6.328125 -2.578125q-2.390625 -2.578125 -2.390625 -7.375q0 -3.109375 1.015625 -5.4375q1.03125 -2.34375 3.140625 -3.5q2.109375 -1.171875 4.578125 -1.171875q3.125 0 5.109375 1.59375q2.0 1.578125 2.546875 4.484375l-3.15625 0.484375q-0.453125 -1.9375 -1.609375 -2.90625q-1.140625 -0.984375 -2.765625 -0.984375q-2.453125 0 -4.0 1.765625q-1.53125 1.765625 -1.53125 5.578125q0 3.859375 1.484375 5.625q1.484375 1.75 3.875 1.75q1.90625 0 3.1875 -1.171875q1.28125 -1.1875 1.625 -3.625zm13.2578125 4.125l0.46875 2.875q-1.375 0.28125 -2.46875 0.28125q-1.765625 0 -2.75 -0.5625q-0.96875 -0.5625 -1.375 -1.46875q-0.390625 -0.90625 -0.390625 -3.84375l0 -11.03125l-2.375 0l0 -2.53125l2.375 0l0 -4.75l3.234375 -1.953125l0 6.703125l3.28125 0l0 2.53125l-3.28125 0l0 11.21875q0 1.390625 0.171875 1.796875q0.171875 0.390625 0.5625 0.625q0.390625 0.234375 1.109375 0.234375q0.546875 0 1.4375 -0.125zm3.2772064 -19.84375l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm7.0743713 -9.59375q0 -5.328125 2.953125 -7.890625q2.484375 -2.140625 6.03125 -2.140625q3.96875 0 6.46875 2.59375q2.515625 2.59375 2.515625 7.171875q0 3.703125 -1.109375 5.828125q-1.109375 2.109375 -3.234375 3.296875q-2.125 1.171875 -4.640625 1.171875q-4.015625 0 -6.5 -2.578125q-2.484375 -2.59375 -2.484375 -7.453125zm3.34375 0q0 3.6875 1.59375 5.53125q1.609375 1.828125 4.046875 1.828125q2.421875 0 4.03125 -1.84375q1.609375 -1.84375 1.609375 -5.625q0 -3.5625 -1.625 -5.390625q-1.609375 -1.828125 -4.015625 -1.828125q-2.4375 0 -4.046875 1.828125q-1.59375 1.8125 -1.59375 5.5zm18.619507 9.59375l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm19.463257 -5.734375l3.21875 -0.5q0.265625 1.9375 1.5 2.96875q1.234375 1.03125 3.46875 1.03125q2.234375 0 3.3125 -0.90625q1.09375 -0.921875 1.09375 -2.15625q0 -1.09375 -0.96875 -1.734375q-0.65625 -0.4375 -3.3125 -1.09375q-3.578125 -0.90625 -4.96875 -1.5625q-1.375 -0.671875 -2.09375 -1.828125q-0.703125 -1.171875 -0.703125 -2.578125q0 -1.28125 0.578125 -2.375q0.59375 -1.09375 1.59375 -1.8125q0.765625 -0.5625 2.078125 -0.953125q1.3125 -0.390625 2.8125 -0.390625q2.25 0 3.953125 0.65625q1.71875 0.65625 2.53125 1.765625q0.8125 1.109375 1.109375 2.96875l-3.171875 0.4375q-0.21875 -1.484375 -1.265625 -2.3125q-1.03125 -0.84375 -2.921875 -0.84375q-2.25 0 -3.203125 0.75q-0.953125 0.734375 -0.953125 1.734375q0 0.625 0.390625 1.140625q0.40625 0.515625 1.25 0.859375q0.484375 0.1875 2.875 0.828125q3.453125 0.921875 4.8125 1.515625q1.359375 0.578125 2.140625 1.703125q0.78125 1.125 0.78125 2.78125q0 1.625 -0.953125 3.0625q-0.953125 1.4375 -2.75 2.234375q-1.78125 0.78125 -4.03125 0.78125q-3.75 0 -5.703125 -1.546875q-1.953125 -1.5625 -2.5 -4.625zm20.867188 -9.75l0 -3.703125l3.703125 0l0 3.703125l-3.703125 0zm0 15.484375l0 -3.703125l3.703125 0l0 3.703125l-3.703125 0zm20.148163 0l0 -26.484375l5.265625 0l6.28125 18.75q0.859375 2.625 1.265625 3.921875q0.4375 -1.4375 1.40625 -4.25l6.34375 -18.421875l4.703125 0l0 26.484375l-3.375 0l0 -22.171875l-7.6875 22.171875l-3.171875 0l-7.65625 -22.546875l0 22.546875l-3.375 0zm43.29773 -2.359375q-1.796875 1.53125 -3.46875 2.171875q-1.671875 0.625 -3.59375 0.625q-3.15625 0 -4.859375 -1.546875q-1.6875 -1.546875 -1.6875 -3.953125q0 -1.40625 0.640625 -2.5625q0.640625 -1.171875 1.671875 -1.875q1.046875 -0.703125 2.34375 -1.0625q0.953125 -0.265625 2.890625 -0.5q3.9375 -0.46875 5.796875 -1.109375q0.015625 -0.671875 0.015625 -0.859375q0 -1.984375 -0.921875 -2.796875q-1.25 -1.09375 -3.703125 -1.09375q-2.296875 0 -3.390625 0.796875q-1.09375 0.796875 -1.609375 2.84375l-3.1875 -0.4375q0.4375 -2.03125 1.421875 -3.28125q1.0 -1.265625 2.875 -1.9375q1.890625 -0.6875 4.359375 -0.6875q2.46875 0 4.0 0.578125q1.53125 0.578125 2.25 1.453125q0.734375 0.875 1.015625 2.21875q0.15625 0.828125 0.15625 3.0l0 4.328125q0 4.546875 0.203125 5.75q0.21875 1.1875 0.828125 2.296875l-3.390625 0q-0.5 -1.015625 -0.65625 -2.359375zm-0.265625 -7.265625q-1.765625 0.71875 -5.3125 1.21875q-2.0 0.296875 -2.84375 0.65625q-0.828125 0.359375 -1.28125 1.0625q-0.4375 0.6875 -0.4375 1.53125q0 1.3125 0.984375 2.1875q0.984375 0.859375 2.875 0.859375q1.875 0 3.34375 -0.828125q1.46875 -0.828125 2.15625 -2.25q0.515625 -1.09375 0.515625 -3.25l0 -1.1875zm8.510132 9.625l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm20.775757 -22.75l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm9.058746 0l0 -16.65625l-2.875 0l0 -2.53125l2.875 0l0 -2.046875q0 -1.921875 0.34375 -2.859375q0.46875 -1.265625 1.640625 -2.046875q1.1875 -0.796875 3.328125 -0.796875q1.375 0 3.03125 0.328125l-0.484375 2.828125q-1.015625 -0.171875 -1.921875 -0.171875q-1.484375 0 -2.09375 0.640625q-0.609375 0.625 -0.609375 2.359375l0 1.765625l3.734375 0l0 2.53125l-3.734375 0l0 16.65625l-3.234375 0zm22.730347 -6.171875l3.359375 0.40625q-0.796875 2.953125 -2.953125 4.578125q-2.140625 1.625 -5.484375 1.625q-4.21875 0 -6.6875 -2.59375q-2.453125 -2.59375 -2.453125 -7.28125q0 -4.828125 2.484375 -7.5q2.5 -2.6875 6.46875 -2.6875q3.859375 0 6.296875 2.625q2.4375 2.625 2.4375 7.375q0 0.28125 -0.015625 0.859375l-14.3125 0q0.171875 3.171875 1.78125 4.859375q1.609375 1.671875 4.015625 1.671875q1.78125 0 3.046875 -0.9375q1.265625 -0.953125 2.015625 -3.0zm-10.6875 -5.265625l10.71875 0q-0.21875 -2.421875 -1.234375 -3.625q-1.546875 -1.890625 -4.015625 -1.890625q-2.25 0 -3.78125 1.5q-1.515625 1.5 -1.6875 4.015625zm17.010132 5.703125l3.21875 -0.5q0.265625 1.9375 1.5 2.96875q1.234375 1.03125 3.46875 1.03125q2.234375 0 3.3125 -0.90625q1.09375 -0.921875 1.09375 -2.15625q0 -1.09375 -0.96875 -1.734375q-0.65625 -0.4375 -3.3125 -1.09375q-3.578125 -0.90625 -4.96875 -1.5625q-1.375 -0.671875 -2.09375 -1.828125q-0.703125 -1.171875 -0.703125 -2.578125q0 -1.28125 0.578125 -2.375q0.59375 -1.09375 1.59375 -1.8125q0.765625 -0.5625 2.078125 -0.953125q1.3125 -0.390625 2.8125 -0.390625q2.25 0 3.953125 0.65625q1.71875 0.65625 2.53125 1.765625q0.8125 1.109375 1.109375 2.96875l-3.171875 0.4375q-0.21875 -1.484375 -1.265625 -2.3125q-1.03125 -0.84375 -2.921875 -0.84375q-2.25 0 -3.203125 0.75q-0.953125 0.734375 -0.953125 1.734375q0 0.625 0.390625 1.140625q0.40625 0.515625 1.25 0.859375q0.484375 0.1875 2.875 0.828125q3.453125 0.921875 4.8125 1.515625q1.359375 0.578125 2.140625 1.703125q0.78125 1.125 0.78125 2.78125q0 1.625 -0.953125 3.0625q-0.953125 1.4375 -2.75 2.234375q-1.78125 0.78125 -4.03125 0.78125q-3.75 0 -5.703125 -1.546875q-1.953125 -1.5625 -2.5 -4.625zm27.070312 2.828125l0.46875 2.875q-1.375 0.28125 -2.46875 0.28125q-1.765625 0 -2.75 -0.5625q-0.96875 -0.5625 -1.375 -1.46875q-0.390625 -0.90625 -0.390625 -3.84375l0 -11.03125l-2.375 0l0 -2.53125l2.375 0l0 -4.75l3.234375 -1.953125l0 6.703125l3.28125 0l0 2.53125l-3.28125 0l0 11.21875q0 1.390625 0.171875 1.796875q0.171875 0.390625 0.5625 0.625q0.390625 0.234375 1.109375 0.234375q0.546875 0 1.4375 -0.125zm1.9647217 -2.828125l3.21875 -0.5q0.265625 1.9375 1.5 2.96875q1.234375 1.03125 3.46875 1.03125q2.234375 0 3.3125 -0.90625q1.09375 -0.921875 1.09375 -2.15625q0 -1.09375 -0.96875 -1.734375q-0.65625 -0.4375 -3.3125 -1.09375q-3.578125 -0.90625 -4.96875 -1.5625q-1.375 -0.671875 -2.09375 -1.828125q-0.703125 -1.171875 -0.703125 -2.578125q0 -1.28125 0.578125 -2.375q0.59375 -1.09375 1.59375 -1.8125q0.765625 -0.5625 2.078125 -0.953125q1.3125 -0.390625 2.8125 -0.390625q2.25 0 3.953125 0.65625q1.71875 0.65625 2.53125 1.765625q0.8125 1.109375 1.109375 2.96875l-3.171875 0.4375q-0.21875 -1.484375 -1.265625 -2.3125q-1.03125 -0.84375 -2.921875 -0.84375q-2.25 0 -3.203125 0.75q-0.953125 0.734375 -0.953125 1.734375q0 0.625 0.390625 1.140625q0.40625 0.515625 1.25 0.859375q0.484375 0.1875 2.875 0.828125q3.453125 0.921875 4.8125 1.515625q1.359375 0.578125 2.140625 1.703125q0.78125 1.125 0.78125 2.78125q0 1.625 -0.953125 3.0625q-0.953125 1.4375 -2.75 2.234375q-1.78125 0.78125 -4.03125 0.78125q-3.75 0 -5.703125 -1.546875q-1.953125 -1.5625 -2.5 -4.625z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m32.08399 481.27823l894.5512 0l0 46.015717l-894.5512 0z" fill-rule="nonzero"></path><path fill="#000000" d="m46.005863 508.1982l0 -12.0l-4.46875 0l0 -1.59375l10.765625 0l0 1.59375l-4.5 0l0 12.0l-1.796875 0zm14.474106 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547596 2.265625l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.2187538 -1.328125 -1.2187538 -3.796875q0 -1.59375 0.5156288 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.890625 3.609375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm10.375717 0l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm10.391342 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm10.566696 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 2.390625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.047592 4.9375l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm8.6875 -2.9375l1.6562424 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.7031174 -0.34375 -1.0781174 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.8281174 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9374924 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.999992 6.71875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm15.610092 1.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547592 2.265625l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.21875 0.671875l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.46875 -5.015625l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm0 7.953125l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0z" fill-rule="nonzero"></path><path fill="#0097a7" d="m186.46445 508.1982l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm14.031967 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm5.183304 0l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270538 5.28125l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.188217 1.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.46875 -5.015625l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm0 7.953125l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm3.4645538 0.234375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm5.183304 0l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm12.823929 -0.234375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm16.016342 1.75l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm11.844482 5.875l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm7.0625 0l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm11.152039 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm8.9626465 0l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm13.03125 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469482 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.641357 0q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm8.610077 1.984375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.46875 2.9375l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm4.089569 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.266327 4.921875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.931427 0.8125l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625305 -2.5 0.5625305q-1.765625 0 -2.859375 -0.7969055q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm8.047607 5.34375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm6.4332886 3.546875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.844482 4.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.6032715 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 -6.734375l0 -1.9375l1.65625 0l0 1.9375l-1.65625 0zm-2.125 15.4844055l0.3125 -1.4219055q0.5 0.125 0.796875 0.125q0.515625 0 0.765625 -0.34375q0.25 -0.328125 0.25 -1.6875l0 -10.359375l1.65625 0l0 10.390625q0 1.828125 -0.46875 2.546875q-0.59375 0.9219055 -2.0 0.9219055q-0.671875 0 -1.3125 -0.171875zm13.019836 -7.0000305l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547577 2.265625l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm6.546875 2.109375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.8551941 -1.4375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm8.7499695 3.171875l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm12.870789 -1.453125q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm8.962677 0l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm13.03125 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.469452 4.9375l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm8.641357 0q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm8.610107 1.984375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.7812805 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.8437805 -0.46875 -2.5625305 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375305 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.1562805 0 -1.6406555 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.4687805 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.9219055 0 -2.9375305 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm8.7500305 3.171875l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm8.261414 -0.234375l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.660461 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.1448364 0l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm9.328125 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm2.8791504 0.234375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm6.5739746 -0.234375l0 -13.59375l1.796875 0l0 6.734375l6.765625 -6.734375l2.4375 0l-5.703125 5.5l5.953125 8.09375l-2.375 0l-4.84375 -6.890625l-2.234375 2.171875l0 4.71875l-1.796875 0zm19.052917 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.860046 2.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm7.3288574 8.65625l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm11.906982 -3.78125l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm21.978333 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0787964 4.9375l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm10.391357 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.5355225 0l0 -8.546875l-1.484375 0l0 -1.3125l1.484375 0l0 -1.046875q0 -0.984375 0.171875 -1.46875q0.234375 -0.65625 0.84375 -1.046875q0.609375 -0.40625 1.703125 -0.40625q0.703125 0 1.5625 0.15625l-0.25 1.46875q-0.515625 -0.09375 -0.984375 -0.09375q-0.765625 0 -1.078125 0.328125q-0.3125 0.3125 -0.3125 1.203125l0 0.90625l1.921875 0l0 1.3125l-1.921875 0l0 8.546875l-1.65625 0zm11.526978 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm13.65625 1.4375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm-0.0041503906 5.28125l0 -1.21875l11.0625 0l0 1.21875l-11.0625 0zm12.313232 -3.78125l0 -8.546875l-1.484375 0l0 -1.3125l1.484375 0l0 -1.046875q0 -0.984375 0.171875 -1.46875q0.234375 -0.65625 0.84375 -1.046875q0.609375 -0.40625 1.703125 -0.40625q0.703125 0 1.5625 0.15625l-0.25 1.46875q-0.515625 -0.09375 -0.984375 -0.09375q-0.765625 0 -1.078125 0.328125q-0.3125 0.3125 -0.3125 1.203125l0 0.90625l1.921875 0l0 1.3125l-1.921875 0l0 8.546875l-1.65625 0zm4.1519775 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.266357 4.921875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.2283936 0l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm21.978271 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm7.7351074 3.4375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125z" fill-rule="nonzero"></path><path stroke="#0097a7" stroke-width="1.3671875" stroke-linecap="butt" d="m185.21445 510.1761l560.99927 0" fill-rule="nonzero"></path><a xlink:href="https://www.google.com/url?q=https://dev.arvados.org/projects/arvados/wiki/Keep_manifest_format&amp;sa=D&amp;ust=1478895969188000&amp;usg=AFQjCNHMNIzr5ezz4laFKPqTOrFHC9sgsA" target="_blank" rel="noreferrer"><path fill="transparent" fill-opacity="0" d="m185.21445 512.15173l0 -20.84253l560.99927 0l0 20.84253z" fill-rule="evenodd"></path></a><path fill="#000000" fill-opacity="0.0" d="m178.12337 46.721786l600.2048 0l0 473.36218l-600.2048 0z" fill-rule="nonzero"></path><g transform="matrix(0.6538178477690288 0.0 0.0 0.6538152230971128 178.12336036745407 46.72178477690289)"><clipPath id="g1586814eb6_0_6.1"><path d="m0 1.4210855E-14l918.0 0l0 724.0l-918.0 0z" clip-rule="nonzero"></path></clipPath><image clip-path="url(#g1586814eb6_0_6.1)" fill="#000" width="918.0" height="724.0" x="0.0" y="0.0" preserveAspectRatio="none" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA5YAAALUCAYAAABw7K2tAACAAElEQVR42uy9D5SV5X3v+yqjTGDUqaCOZjSTSCJHCYdQTNCO6VjMnVQSUdGilzTEQ7Ow0iuNxBDFipVYEolyDZdiinFsMBktsXjEiA1p5kSqlqtekkWycBWXZJWskh7WWZy1uHd5bzk9z32/735+zDMP7957/uyZ2X8+n7W+C2bv9//++9m/50+SAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEB9syRNX5q2OjiX9jQtVXIs09Mc9McEAAAAAABQ16xJ49J01Ph5LEhzrErOQ5K+v06uKwAAAAAAQMOIZbWcx9ykUKl0iCUAAAAAACCW+aga1+UFqrnEcs1+ma6kfDNbNV/tLLPNcHvtQzyPpjSz/LrTyhzLVL9c3KS21R9jZ1K8ue3GNMfTHEqzHbEEAAAAAIBGFssV/rZtXspMuLYl/ZU45WiapTnbXOHvC5ftjYTM9rswWvZwmvnR9lYmhWau4fbCfqF90X0Hg3UX+m2G97+RFPpAGl3+9vC4D/lzb/LC+F6wvv6/Nrg2hpq/rvcSWi+VYAAAAAAAgCGLZZ5U6t/Xk0I1TpKnqt+cNDv8sktypHSHX2aaX0fr7srZr4RxlZexuV7Ojvv/i06/3JY0M/xxLvfL7PTLqBrZ45dbFKy7wN+2N023X1cifMTLZnsklsf8ca/1x5QE293g9z/dy6PdFtJSRtgBAAAAAADqXixNCnuTgdW4Rf72ldG6TV7aDvv/q7nqUX9bXM1b5bfRHe13Y7Rch5fGbdFy06PlVnvBKyVy+/3xTI3WneeX3RyJ5e5ouRn+9p6c69brj7N9kMIOAAAAAABQ92K5IemvNMZs8fd1elEKs9nfNysQtg05y4X3hfudlbM/VUeP+f8v9MvtSwqVymmDFLmOElIo1Fz2QCSWa6JlVib9Fdn4fOy+RYglAAAAAAAglv19B48l+VN2xH0Y89LlBazccj3RfvMG67Hmp1ZpVGXyeLCNA/629hIiV0wW43MKl10SLbN5EOezGrEEAAAAAADEsr+SONcLXF8RCesqkdZALNeVWG56tN+8EVa35UjnVL99SacNxnPY7zdP5DrLiKWavb5XRiw3+tsXlzifDsQSAAAAAAAQy4ECZAPTLM8RrBk568/xgtUcyNy6nOUkgPOD/dh+5+Qsq4qkjewqEe2O7m8KjmlRkfOYmhRv2qv1rS9oKbG0PqcLc7ahJrnzArFFLAEAAAAAALH0f7d4qQubxFr/yG3Rui1eAo/5/0vYVEXUqKvx3JUmrIuj/fZGy833t9vAPD1FpDaWvjU5y2kU2uM56y5PBjZjLSaWHX79N5KBlVWdZ18J2UYsAQAAAACgocUyFMmwSezWpH/k1GVezvb525ZFYigZO+TFbUm0blO0Xy27wy+nSqeap2o0V6sEzvC3aXurguWO+eWsuaw1w1UVcmOwrpY76venZbb4fe4PZLGYWIbHaYMHLfPnEY4qi1gCAAAAAEBDs8QLZFxhXOdv7/J/N0UyaZW8vGai6qu50wuh81K4Psmf53FZ0l/9O+rFL54eRM1ld3lJ1HJH/HLhMWvbqn7aaK8msGqyui1Y96A/t/BYZvlj6C5yjRb5c7UBhPYnA5sKD+W6AgAAAAAAQIUwseziUgAAAAAAAABiCQAAAAAAAIglAAAAAAAA1BYLksKIr9O5FAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACOi/aIP/svUc9veI4RUPudfcOErvMsAAAAAQN2jL797//mYI4RUPpNbWo7wLgMAAAAAiCUhBLEEAAAAAEAsCUEsAQAAAAAQS0IQSwAAAAAAxJIQxBIAAAAAALEkhCCWAAAAAACIJSGIJQAAAMDwaEqzKE1vmn1pDqTZkWZ5muZo2QVpeuQc43CcnX7f06vgmrWk2ZJmv79m8xFLQghiCQAAAI3KtDR707g0h71Qbk9zyN8maWoLll/jb+8Yh2Nd4vfdVQXXbYs/Fl27bWnmIJaEEMQSAAAAGpHWpFCdPJ4UqpNNwX36/2ovT68jlichoTwSXTPEkhCCWAIAAEDDsc6L2qoSy+zwy8wvIpaS0/YKH1e73+5IxXLqCAS4xa9bTBwP+tQ1iCUhiCUAAABAOfSl5lhycj/KkBlpFib9fSpNLLvT7Pb/V9R0dkEkhxKvFTnb1Hq9wd+9PouS/ia4zi83vYxYqt+lqq47vQwmXoL3B9vROfYE95dCTVp3Beu+l2Zr0t8ceKE/r+M+B6NzQSwJIYglAAAANAzTvTjtHOJ6awJZ6/Wyt9JLqkSrwy/X4Zdbk7MNyVhf8HefX/9oUqiiapsb/foHSohlpz+O/YH4zfLHsduL7rw06/162wYhldqe+pou9+uu9dvTPlRFneaP44jPEi/ZiCUhBLEEAACAhqPLy1bPMMVyY3S7Sd+KYYpl2NzW6PW3T88RyzypFKv8MjOibWmwnR1lzk19Sd9LTh51drHf5vroHGgKSwhBLAEAAACxHIFYdkW3T4tEcqhi+d4g9mViudJLZTxibSiBas6qSmLzYB0qKV3VVBXzAGJJCEEsAQAAAPqZkQyueWgx2euIbu8YoVgeHIJYWv/GvOqiBtvZngzsI6nmvkvLSObcEsdrx+gQS0IIYgkAAAAwEFX9DpVZRpVINUldUEViudvfJrl8PckfuVV9Ldf6+4/79faXkMtOxBKxJASxBAAAABg6PUn/CK/FWJsMnJKkEmJ5dIRiaX/boDwrg3UkjnEVc2rS319zUTGHSkpXcA95MUUsCSGIJQAAAECABEzVvMM5MpZ4gVNTUn35aR2iWLb7v+OpOLr97ZUQS0nkgWRgk9itfpl4bs0lZcRSvOG3Fa+7wK+7GbEkhCCWAAAAACezLOnvr6iRU5f623qD27tzZK+cWAqbS3Kdl0FVPY96Ua2EWJr86jZrEtvpj3tvUphzUvcv9vIcCnIetu5Bv47WtYGCDkfCiVgSQhBLAAAAgEjOdif9A97YADka9GZWEdkbjFiqivhGsM0jXtj6KiiWYnMysEmsqpKHovPZnXMuxa7FG9F12J5zvoglIQSxBAAAAMihxQtUh/9/pWjz22wa4/Ox/bYOx6lG4TogloQQxBIAAAAAEEtCCGIJAAAAAIBYEoJYAgAAAAAgloQglgAAAAAAiCUhiCUAAAAAAGJJCEEsAQAAAAAQS0IQSwAAAAAAxJIQxBIAAABgPNHckovT9KbpS7MxzaxBrLc8TU/sIf42ZdUg118f3LY0WD/MFr986zhep05/LNMRS0IIYgkAAAAwUCp3pHFpXvdyeTTN8TTzS6zX5Zdx0e0d/jblvTQtJfZ7xC93MLi9x9922N9+MFjObh8vsVvij6ELsSSEIJYAAAAA/az2srQiuK3NC9xhL4AxrV74XAmxNBlcXGS/8wL5zBPLWN6ak0LFUvftQiwRS0Je3vO2++Frv3RvvnsUsQQAAAAYR6xq2Jdz37I0O9O059zX62VwbwmxtMrn9iL77vHrHxykWBp5+yx3jh1FziNGQj11mGLZPMj9TPXLlWrS2zyEY65asXzhlZ+70yc2uy3PvnTSfRKBux/4hrvkspluQlOTmzR5svtE59W5y+o23adllCu7PuW++/xPyu7/i3d+xV3QflEmH/G+51xxVW66r7vppGVv/9K9ruPij2Q/opx3/vvd55etcK/u/82JZRYvXV50e5Yfv/XOieW/3bvDfezyK7JrU+p8JEzzb7zFtZ49JVtW63zrqR/knuvXNz3lLp05O7uWU845z93yhWUDjnEo0eOSd90qHV3H4e5H18YeE+XOex5ELAEAAADGkTn+i9nSIayzKCk0ge3yQlpMLK1fZF5zWInTsaRQLR2qWL7hZbgc2sd6vx/7Anoo51z7fNYGyy0dgli25Oxnf3JyM+I5/thdkNf97eG2evw1C495Sa2JpUTKvvjnyaKESfd9ct6n3ep1j2ViIMmQGIXypHV1m+67676HslzY8aHstrzthvJm11ASEt73zM5Xs9slbNpuGMlbKJWSPi2r433w0cdPHLdE15aTjMbbUSShWlby2PezX2XLbtq6/aTzsfN+8rkfDZByCaJulySueXhTdq20PYlfeD66T7fr2HUttbzW09/DqeRJpPOuW6Vz3c2Lh70fnaPW1b96XLbt2oNYAgAAAIwjoSgt8aIjydudZmHO8pLGo0n/YDvlxNKau8bNYRf42zuGIJYtSX+z3dWDOLedftnNabq96NnxrorE8ogXw61+vVlDEMtd/raN/nzn++vnArls9oK431/XLi+vx/ztzcG5H/fH1+XXfz0Z5+a3QxVLSZ1JVZ5Yqjqn2yUWsYyeceZZmTjabarC6baw4qf/S9ZCCQwjidP+JVd54vLQY09kt+s4S52HhCXJqYaZ1JRb35aTTNptH519eXbs4fno+FSRDGX16u7PZOuqEhnLmM5L4qm/X/nFr7Pro+2GEinB1Po6h3oUS1WBdc3oYwkAAABQHaxJ+putqkq2zcvVYX/7smDZJi9hewMRKieW1tQ2bg671ctXUkIsi2XLIM5rfiCVSXQOe/25To3OYdEQRTzcz7poOV2fAz6i0y+3PFpOwq2Bk6YF12J3tEy7P8ZF4/UkGYpYqlmozlXVSjV1zBNLiZpuDyt0cSVT4iVRuuba691td9yVKxYSqrxj0DqqAurfPHGxY5SUlToXiau2E1f9JHWSr1JVsg1PPJPtQ9cgvF3bkwTGy0+75NLsPquUSpok1XnNi7Vd7T+UZP0bN+HV9dF1irehiq2EU9FxxtchFEvtT8tJcK3qmhf9WKDldBzFmrbaMtrnngNHioql7tMytmzYpFf3aXldQ/14oP/b/nQe+nu4TYARSwAAAICRi6UqZzNCl/BSJAFr87epiqZqWljNKyeWSXJyc9gWv78VZcRyZzJwupHtSf+AQJvLnNdGv1ze6LFLk4FVVDuH5mGI5Rb/97ScZdf7+2Z4OTzuhX1ZUrzv5Ot+OYnqnGp5kgxFLNUsdPnd92cCYBW/vCarkoC8Zpqq2qkiV0oOtK6qfnmCpmahWl8SU0xctA9VRXWMqjpKmqwCGAqMNUM1UdN2SsmVRccu6ZEoajuxrOrYQ5mLK7DaT5JT0bUkvglxWBXd3vdW7rXUdu1vLSNZjX+s0TISuFgsJeBW9VUku3EFNG+bWkf9W8ProWMJl9H1l/TGj4+aQauJcrisBNmOT8+l+PhNyO24h1OlRSwBAAAAKiOWG0sI2CIvkxKeldEygxHLuDnsYr+ttjJi2ZVzTJK/HUnp0WbFrqT4AD9d/r41wTkcGuT1isWyLyldXQ2XXZ70T89i/TDXRlI61x+LLaMvnFv9NawJscxrSlqqL2QYVTC1vJqB5t3/vRd/mm1T1VDJUDzgjQRFt1uFs5hYSlQkfvo3fKwkaya0qkbqNvWBVIU1XFbHFzZlLdaUNK4iWtVO21KFUs1VFUmWbtP5mYhpffXvzBu0Rvdp0KOwyWyepNv527FKAHV9JG9aXpGw6TZdj/j4dfs3H3/6RKXUBgdSxdPkW1Jny2l7Em+rFFtfWftb11LrhMuEj4+2q+3r8bVrof1KuO3HglIVSz03JKth02PEEgAAAGBsWJyc3OQ1T8BM9g4FIqjYIDP6f28RsVTzU1XqrDns9mTgdCFDHbynM9p+JcTy4AjFckmJtIWO5oVdTY6PJv3V4jmRPKuJ7WYvn/YFfPV4PUnGQixV+dJgNZKUuHpoCfttSkzCJpeSGsmGhMskK08sTcy0n6+ufSTblyRGldZQ5qwyJgGU9KkKq/O56XNLT9weVyPDPo+So2LVTGsiHEYVvlAOQ5nKa8ZrVTqr+pXrw6jj0rlZE9q4yqzl7HxMLONBgqwZrpor20i0ectpXzo+nZMeo3Cd8PHSMuHjo8dU5xz/EKDtqVoa/uCg87ZrQB9LAAAAgPFnlv9iuz7nvkVJf8VyaTKwWarlcCB5q4qIpdiQ9DerfS8ZOOrqUMWyaxBiaU1hZ+TctywZ2KdyJGJpTWE7cpbVbdO8WLf4ax02t20KjsWa9k7P2ZbWO+JFtC7FUtVBk8q8fpehFEoytF2rslmTUsmQ5CPs95gnllpeQmQVsbjpqJaXzJlYhhW6uKKnZrfxNiSruk+VyLwpViRE2qbul2TqeFQR1W3h4D06BpuGRc1dJW+qqOo6qamoxHYoYpl3LLpWuhaS8XA5O7+8qqyW1TGEzXCL/RAQymc8CFEoybZfG7hJ1z6ORD3sU4tYAgAAAFQf+70gxvM3qo9j2GQ1j8E0hRVWZdzpxbJ1mGLZnPSP9lqqKawNqhMP9COZ25ecPHjPcMVyYVJ8kKBwP9asOJ7GZFqw/lR/vfPm/dxbr2KpZouSJ0lDKaksJnBqXmkSJlGRpFisuaW2O5i5Em0kVW3bmuXmDX4j4UpyqnAmXjqfvD6iahqa+OpkfJ+a7ybRSLM6Buu/qPNTxU7nJaGy47JzLNUU1uRb66riapXCxPebtD6NsVgWG43V7rPtl7qmNlBT3nMgHn02Kd+sHLEEAAAAqGK6vdDs97Kmv3uTgc1FRyqWwvoOxuJUTCz3Jv1zTFqs+ehuL2+lsHPY6s9JEmhTd8TTjQxXLMWuYD8S2gXBdbHmq63+/I8l/VOJqGL6hr/2ndG52xQp3cFt6+pNLCVwEiZVqfIGn7F+fHmSZs0yJTcmKKUyGAmx41V10ORRzUTLDaBjsWafxdax48y7HlYhVZPbcgMDaTkJov62ZrV5VUM1p7Uqn+RSlUaJpM39qKpt3uisdpx5shpuMxbXvNgcm3n9HvPEUtsPfxyIg1gCAAAAVDddXuTCQWNWDWK93hwpa/e3xc1r1/rb4/kxdycDp9hYnwzsxxlGEqfRZAczgmuTl7EjwXkdSE6udPYmJ0/xUYyF/jjmBre1+GM+GuxHEhkPdDTdH384gM9eL4/htjZ7AQ0fi9WDEOmaEkurDkokig2EI/HRMjYya56IqdIXTp8RRttOfFXTBqGRNOo2+7tU1VDiEs6pGfYHTXKmErHzLTYqqVVZ85qF6ngSXy2165NX2bTlbBvW1DQeKEjCqMqp9Rm1661rEW9Ty+SJZdxcWNsMpzCRBBd7fHXtJdilhNmmlrH9qrmrqqd5QqvrEl43xBIAAACgulFzzI46PK/25OSmvqO1n7YyyzT7a9xaZrmOpPi0JDUtlurbp0qlmo2WmlbEBniRzIRNWbWOBurJ6/9Yro+hljehDQVGsmiD7tjtxfpSmhDF+7Y+h8Xmt9Rx6JjVvDU8b/3fRly1Y1VFUn+H21JlUMenvqU20I7JXnwtrQmqCacJdTzQjh4bm1IkFks1sw2XNZE0cdY1s76h4bVUddLEW7dLziWM4Q8IOi9VT8P92vZjCbUmxOUG79F2dD6lRuwNH28tG1db9bduz6ugW3/P4e4TsQQAAACAxv61ocJiaaOQlorJhiTF+gFKVFRVlKjkSdJgB6+x5qMaAEdVQQmh5EwVvrBKJ2mzqqckS8IloU1yqpXh4D95Fbe4aqlziM/HqpXh1CnhedvUHnFfVMljeD52fcPBgCTmWldRX1Tt64Zbl2TX1vpxmsSaWGp/Ol/9rWa/dh3C87NlJbbat21TAmzSpj6w4bnoetvgS+HjE15v/att6zGUvKoZb7mmsEOZx9KeG/Fz0yqseXOIJlE/z0rNnYlYAgAAAABiWWZeSn1Bj6s/kgvdXiqa6zCsbqlKKAGTTEhuwkFuSvXvi7cVypjES9uTBKlCmDd6qmRH1T/Jlw2aU0wiJBmStnLHpWPXOWh7dj55Axep36Sdt6L/F6uGSsDVpFXbk2DqWOLpUFRh1b60LZ2ztqfbFF0nu6aa21J/S0Zt/zp/iXyeNKuiKPG0fevxjSuB4WMoCVXTXNtP+Pho+9qPxFLb07LaXvwY6jGJ5d62N5hBoOy5ET839bduzxvx156bw90nYgkAAAAAiGUd9GUjhD6WAAAAAACIJSGIJQAAAAAAYkkIQSwBAAAAALEkhCCWAAAAAACIJSGIJQAAAAAAYkkIYgkAAAAAgFiORzR1heZF1FyKZ5zV6n5rylR3znlt7kMfnj6oaUwIQSwBAAAAABpQLH/81jvZnIO33XFXNhfl+e0XnZjwvqmpyU1NRfPZl19DdAhiCQAAAACAWB5zew4cySasv+u+h9w1117vzjv//a717Cnuk/M+7Zbffb/btHW7u/Orf54JZcsZZ7pr5l/vXt3/GySHIJYAAAAAAI0qltt27XEPPvq4u+ULy9wll83MmrheOnO2W7x0uXvosSfcC6/8/KR1Xt7ztmtufl+2HnJDEEsAAAAAgAYSS/WP/NZTP3BfvPMr7squT7kzzjzLXdjxITf/xlvc3Q98w33vxZ9mFcvBbEtyidgQxBIAAAAAoI7FUoIoUZQwdl93UyaQEkkJpcRSginRRE4IYgkAAAAAgFhmUZNVNV1VE1Y1ZVWTVjVtVRNXNVnd3vcWIkIQSwAAAAAAxLKQV37x62wQHQ2mo0F1NLiOBtnRYDuqUGrwncE2aSUEsQQAAKgNZqXpTNPGpQBALIeaN9896p7Z+ar76tpHsr6QHRd/JKtGfqLz6mz6D00DoulAkAyCWAIAAAwPm0dte5nl9vrl+kbxWDr8PnqC21qDfSu9o7DfpjRLeCoA1I9Y/vC1X7pvPv501qT1Y5df4U6f2Jw1ab3h1iVuzcObMslEKAhBLAEAoPJi+V6aliLLTAuWG02xbE9zMM364Lblfr9b08xPCpXLSrPd7xcAalAsNf/jt3t3uDvvedBd3f0ZN+Wc87Lo/7pty7MvMUckIYglAACMgVju9v8uLrLM6jRHxkAs81jj9zttFPfRh1gC1I5YqtqoqqOqj9MuuTSrRqoq+fllK7IqJVN4EIJYAgDA+IjlWi+OxZrD7kuzpYRYdqVZ4SVwWY4Edvhl7P8rvKzOi5Zr9stND7bb4/e7KNiGMSMpVDS134V+/SRnm/P9/lb6fTZFx66mtoejfQNAdaB+1QveN2ny8TlXXJX1i5RMXnfz4qy/pCRT/SeRA0IQSwAAGH+xXOPFMa857Cy/zLwcsZzmpVO3H036q5rHvTwaVnVc5u9zQXYFoteRDOxj6XKS+OU3J/1NeA/5/+vfucF+1bT2gL/vsD9G50VyapF99PCUABg39P7T6X8E2uZf03pf2TFpcstxNXfVSK6IACGIJQAAVK9YmjjGzWHX+i93TTliucuLXXdwm6qIByNJNbHUB9dCvy2J3Y6kvxqZJ5bhuh05t21M+quUM7xESiBb/W1b/XHMCdZdlvRXaQ2awgKMD3rdLvGvZf3gcyzN62k2+PeFE60fxmoeS0IQSwAAgJGJZVOS3xz2gP+Sl0Ri2eTFbU3ONtdHMmgiuCpabk6w/8GKpURSlcc3cvbb7ZddEQjjkeTkKqzksguxBBhT9GPSfP+jzi7/Oj7g30dW+PeD5qIrI5aEIJYAAFATYinUvPS9pL+ZqIlfZ45YxqgflJqhLvfSlyeWXdE6HcMQy7lJ/7QjXVFMLLdH66riutHfn/fFFbEEqCzN/rW6wr9WD3iR3OnFUoLZOiQrRSwJQSwBAKBmxLLT/73U/73BfyFMiohlh//SaH0Xra/lgVEUy64kv+9lmLCqagMThdOq6JjbEUuAiqEmq4v9e8Yb/nW21/+go9tHPCgWYkkIYgnQCOjLuPqIzCixzEK/TF7mJSdXUVr8fd1l9t3ml5vGwwAVEEuh6t6u4P9ri4hls79fXyDXpVkQfHlcMwZiudbflpe2aD8SzLl+W68n/QP4IJYAQ6fVfzbp9bTD/5ik148G21nhPxObK71TxJIQxBKgEbARKV8vsczBpHSF5XDS39ww/IJdbs5A+5K9hIcBKiSWG5L+AXl036wiYrkwZ11j2yiKZVsysLlr/EPLumA/K4u8Nnb5bbQjlgAl0Y8yahKvJu7qC7k/KQywsyv4QaltLA4EsSQEsQSod+zLt00wP6OMWHZE0RdgDWhy3P/q24pYwjiLpTWH1XN2b86ysViui5bpTPqnFJkzCmIpdvrbFkbbszkvbWRbfQk+En3xbfLnpddbcyCWR5KB81sCNCId/nW1wX+uWZNWTUe0NCndMgexJASxBIAR0Os/eG2uv41lxLIYG5L+ef4QSxhPsRRWhV9dQiyb/fP6uH8daBvb/OvBKoLzR0ks24PX1C6//F7/97bohx/70abXL7c/eq0l/nVr06Fs5CkBDYK6XKgrhn7c3O6f/4f8/1f612tLtRwsYkkIYglQz0z1X6KtSZ5+3T1W5IO4nFguKfIFe7BiudSvu9/vS02W8gZLmOHvO+CX25kM7Mc51X/51pfrsHrT5G/bXE1fNKAi6PFeEN221N/ekbPsqkjwNvjnaZ9/jug51uaXtfkpF/i/p+e8hsL9299Lg2Vs3anRuq3+WHb6fet1uDjn/Gb5560dY0+O4Oo5vd5vaxVPCahTZvkfVFR93Oc/r3b75/7CZOCAVtX3gYtYEoJYAtQxK7zULQi+jMeVkMGK5epkZBXLwz7r/Bf9o/5Lw/ToC/pxv9x6v6yN3rkiWG59cnIVa13OcgAAUJ20+/f89f5z5JiXyR7/OTMrqbGm34glIYglQD2jD+mwX5aqHu/52wcrllpnkf/QV9qGKZZxP7IZ/lh2Bvuxkfvaov2/4YXTRpZVE8e9fv3pfh+6fzsPOQBA1aH3cfVtXunfpw/5zwT9Xz9aqrlra62fJGJJCGIJUK/YxPEbott7/e1zi4hlsUjiwoFIhiqWq3Pus2NpTfoHWsmrOM5LTq5QzvLH9Lr/kqJM5WEHABh39MOhWshs9j8CHvPv1fo80g+VdTn9FGJJCGIJUK9sTvpHxAznpLTbtxYRy54om73sxX1bhiqWefNd2qAnnUnxwVPs127nRTTEmvoeTwZOhQIAAGPkU0mhSavmbdVAVWp5csB/xug9Wj9yNsRoxoglIYglQD3S7D/cy1Ugp+aI5WAZqlh2lRDLruD/c4tsJ29ewKXB+cznYQcAGPXPlrleGHv954Y+a3b693C9D7c26sVBLAlBLAHqkcVetoqNHmkSt3IMxXJRzn02hUlH0l99zBPE6f6+zdH+rXmVjv1IQlNYAIBKMs1/nmg0ZfV1f8//u9HfPp1LhFgSglgC1Dd9XsTay0jhgTEUy7jpbZPf50H/t82z2VtChBcH676eDBy8R/fv4KEHABgWrf6HvTX+vdQGU9vmf/hTd4NmLhNiSQhiCdA4DFb4bIL47jESS6ugNnvh3R7JYuK/wOi2tf5LjpbVsPPqQ7kv+FJjU5+EFVebQH4ZTwEAgJLoxzn1fVzuf/TT/MLH/OeC3n/VZ5IWIIglIYglQIOz1gvW0jLLLfLLbRsjsVydDOz3eTw5uamuBunpTU7uD6p92BQkc/y6rycDB4Ro8eegL0fTeBoAAAx4v9bI2xv8e6feJzVa62b/WTGDS4RYEoJYAkBMm/8SUW4Uvia/nDWXbfd/D+UX745k4JyTeTT75Zr9L+ALvdSWWm+aX2ZxzheeqX57LTnrtfr7WnkaAECDove/ef7HPLUM0ZexQ/7/K/2PfS1cJsSSEMQSAAAAAIR+4FP/dHUB2JIUugyoGrk7zfqk0KS1ncuEWBKCWAIAAACA0e5lcb2Xx2NeJrd4uZyVNMickYglIYglAAAAAJRHzVW7kkLzVTVjVXPWI/7/auaq5q40+0csCUEsAQAAAOAE6k+ugXQ0oI4G1rF5ejXgjvqcd3CJEEtCEEsAAAAAOOEgSaFJq0bx3uUlUvMKa+oPTQGi0a9p0opYEkIQSwAAAIAMjYg9N82KpDC9k6ZF0tRLO9KsSTM/oUkrYkkIQSwBAAAAAqYnhamSNqZ5I817/t+N/nbm2UUsCSGIJQAARKgP2EFSV9nG03rQqNKoiqMqj6pAHvXXsDcpVChVqWzmMiGWhBDEEgAASnDmWa1H+cCrr0xsft9hntm5qM/jHC+M6gt5wIuk+kiqr6T6TE7lMiGWhBDEEgAAEEvEErE01GRVo7FqVFZV5jXAjkZr1aitGr11BpcIEEtCEEsAAEAsCWJpqEmr5oXU/JCaJ1JfYA75/2seyc6kMK8kAGJJCGIJAACIJUEssyats9IsS9OTZl9SqEb2pVmfFJq0tvPqBsSSEMQSAAAQS4JYGpLEhV4ad3uJlExu8XI5K2HOSEAsCUEsAQAAsSSIpUfNVbuSQvNVNWNVc9Yj/v9q5jovoUkrIJaEIJYAAIBYEsQyQAPoaCAdDaijgXU0Z6SqkhpwR1XKDl6lgFgSglgCAABiSRDLE9/bk0L/x3VJYYoPNWndnxSm/lieFKYCoUkrIJaEIJYAAIBYEsQyozkpjMSqOSO3pTmYFOaM3JFmTZrupDCSKwBiSQhiCUNl4vsm7dVFJYRUPu0f+OBrvMsglmTcxHJ6msVpNib9TVrfSApNWnX7NF5tgFgSglhChdAF5YlFyOjkvPPff5x3GcSSjIlYqtI4P83aNDt9JfJAml5foZzrK5YAiCUhBLFELAlBLBFLglhmgjjHC+NWL5ASyV1eLNVnciqvJEAsCSGIJWJJCGKJWPIcQiwNNVldlBSasL6eFAbYUdNWjdq6JCmM4gqAWBJCEEvEkhDEEhBLxDJDTVo1L6Tmh9SgOvrQ17yRGmxH80hq8B3mjATEkhCCWCKWhCCWgFgilhmaxmNWUpjWoyfNPl+N7EuzPik0aW3jFQGIJe8hhCCWiCUhiCUglsTEsj3NQi+Nu71ESia3pFnqJRMAEEtCEEvEkhCCWCKW5Jh7df9v3JZnX3J33vOgu7r7M+6UU0759/ShlVxuT7MqKTR3pUkrAGJJCGKJWBJCEEvEslHz549sdpMmT3bpY+W+/+Ir7pmdr7o1D29yN9y6xF1y2Ux3+sRm97HLr3CLly5333z8aTdxYvN/5ZkNgFgSglgiloQQxBKxJCdy6qkTXPpQZZl8xpmu4+KPuPk33uK+uvaRTDLffPfoaE43AoBYEkIQS8SSEMQSEMtaz2mnnZZJ5amnnqrn+WhPNwLQsDQ1nfYbvX4IIZXPaaed/ibvMoglIYglYknGMQ+s/0t36oQJ7oyzWt2mrdsRSwAAAMQSsSQEsUQsSdXMYwkAAACIJSEEsUQsCWIJAACAWBJCEEvEkiCWAAAAgFgSglgilgSxBAAAAMSSEMQSsSSIJQAAACCWhCCWiCViSRBLAAAAxJIQglgilgSxBAAAAMSSEMQSsSSIJQAAACCWhCCWiCVBLAEAAACxJASxBMQSsQQAAADEkhCCWCKWBLEEAAAAxJIQxBKxJIglAAAAIJaEIJaIJUEsAQAAALEkBLEExBKxBAAAAMSSEIJYIpYEsQQAAADEkhDEErEkg8+mrdvdlHPOdR/88CXumZ2vIpYAAACAWBKCWCKWZGiZMKHJpQ+Ru+w/znYXdnwIsQQAAADEkhDEErEkQ4ukUmm7oN2d1Xq2677uJvfgo4+7H7/1DmIJAACAWPJliRDEErEk5XPvQxvcqaeemlUuv/5/9Lg1D29y11x7vZs0ebK7dOZs98U7v+KefO5H7s13jyKWAAAAiCUhBLFELMngI5Hc8uxL7rY77nLTLrnUnXHmWQOqmYglAAAAYkkIQSwRSzKkvLzn7QHVzFNOOfXf0od2bZrONE08ywEAABBLQghiiViSIVUzTzvt9P+WPrTr0uxLczRNb5oladp4xgMAACCWhBDEErEkQx0Vtj3N0jTb0hxL80ZCNRMAGgP9oNZXJz+q6RxaxnH/rWlWp9nlr+n2NIt5iiGWhCCWgFg2jliGSCS7EqqZANAYrEkKo2t31Ph5zPfv1+N1HvqB8mAafc/akaYnzV5/bXt5miGWhCCWgFg2nljmfVmgmgkAiCXnUYqtfv9d0e09/vb5PNUQS0IQS0AsG1ssQ6hmAkCji2Wr/2FtVpkf15r8Mp1+nVI0p5lbZptaZo7f3tRhnMcM//7dXuZYWvw+mnNun+uPoTlnPf3w+HqR/eq4NvJUQywJQSwBsUQsi0E1EwDqUSxX+Nu2Be9lLf6HtOP+PuWI/2EtZqm/z5bTOluSgf0fbb/WhNWWPZycXN1bFi2j7Ap+0OuL7jsYrKttHYju1/LTgmW6/O3Lg+M+7M+9yf+Y+F6wvt7vV0fHKCmennMt5vh1NvBUQywJQSwBsUQsBwPVTACoB7HMk8omL2PH/fKqwql6t8MvuziSShO/Ti9bK/26u3L2e8z/v8Mvv98vO8Mv15n091Oc5YXQtrcjkDprcrrIH5vo9stpmwv8sSzz+zwcvDebWOp9e6d/H1/r79vi79vi9xPua+0gru+WhKawiCUhiCUglojlCKCaCQC1JpaLcqRSLPS3r8r5QW2flzTR7OVsX8773Gq/jXnRfuMmotO8DG6LlourgWujdfMqr/v98cQ/7nVH+zaxfCNabkZSfPCdHf44p5a4tosDyQbEkhDEEhBLxHLEUM0EgGoXS6sC7s6Rwi1J8f6L6/x9swJBW52z3PRkYJPQNcF6Ma/798kkkN19/se69kEIsv24p797iix/yItnKJZromVW+tu7S0jjoiLbX5r0V0un8jRDLAlBLAGxRCxHA6qZAFBtYml9IY/lCGTchzEvXf7HsnLL9UT7zRsIx5qa2qA/m5OBfTv3+/fMqSXEspgsxucULhv3F908iPPJk+i1/r7d/HiIWBKCWAJiWQP5du8Od27bBe7sqee6Z19+rZbEMoRqJgBUg1iqkjg36R/cJk/ClpRIWyCWW0os1xnttyXnmLblSKf9INeb9A/kcyiQz1gsO8uIpaTvvUGK5coS5zMrej+3Y99e5NwAsSQEsUQsef5UWya3nHHiV+Nz286vVbGMoZoJAOMhliZkG5P+EVJjwcob9dQG8mkOZC5vUBsJ4LzgBzPb75ycZVWRtJFdO5L+fpmhwFnz3EVFzmNq0t9fNO8HPesLWkosbSCjhTnb6PDn2xJsc0cg6bxfI5aEIJaAWNZK9AE+9bw219z8Pjdp8mR39wPfcHsOHKl1sYy//OgLD9VMABgrsWzxUhc2iZ3vl9karatlNZXHe17k9J6l99YjOe9R1hdzabTfeJs2sM56//dW//e0MtJn25sRLNPnj21GtO6ySICLiaXOX9+bXs8RxV3JwD6i65PSFVJALAlBLAGxrNY89NgT7vTTJ7oJE5rcA9/c7K7u/oybcs55mWC++e7RehDLGKqZADDaYhmKZNgkttffttO/Dy3z70HOS1647nEvp9aE1Cqe4cBAYd/ObX65Nf69TbJqTVxn5GxvdbCcNZe1ZriSQBsgaJZf7ohfR8tsTPoH1WktI5ZJ0t9f8g1/zkv9NbAmv/befDy4ZnlZxVMNsSQEsQTEsobyzM5XM8G8sOND7sFHHy8qmDUqliFUMwGgEqzw0haPtrrR374weM9Z7W8zIdR7T96oqF1eIk22jnjZa8kR2pWBoKq62JPzHhZvT7K4NVquxQuq3gsPB8Kp5rs7/LbDY2kN1p0bnWvM4uAYnV92VSDJ8/1tpbKepxpiSQhiCYhlDWbLsy+5OVdclQmmKpt1KJYxVDMBoJYwseziUgBiSQhiiViSmhDMj11+hbvkspluwxPP1LNYhlDNBADEEgCxJASxRCxJpSOplFwqmqakzsUyhmomACCWgFgSQhBLxJJUUjA7Lv6IO/XUU/+/Bv1CQzUTAKqBBUmhP+V0LgUgloQgloglqcloQJ+m007770lhREGN1jergZ/eVDMBAACxJIQgloglGU58U1iJkyp1h9Jsb3DBTBKqmQAAgFgSQhBLxJIMWSwNDUuvIfcPe5GaxrM+g2omAAAgloQQxBKxJIMUy1AwNaea5jnrQTAHQDUTAAAQS0IIYolYkiHMY6mJs9d4wdyMOOVCNRMAABBLQhBLQCwRy0E89BJMVehUnduAYBaFaiYAACCWhCCWgFgilmVo82IpYVrjhROKQzUTAAAQS0IQS0AsEcsSgqm+l0cQzEFDNRMAABBLQhBLQCwRyxymecHUNjSabDOvkkFDNRMAABBLQhBLxJIglgHTk8L8lwjm8KCaCQAAiCUhiCViSRpeLI1ZXjAPeCmi8jY8qGYCAABiSQhiiViShhVLQwLUFwgmDB+qmQAAgFgSglgilqQhxdLo8oK5N80CXkEVgWomAAAgloQgloglaSixNBZ4uVS6eSVVDKqZAACAWNZyfvjaL90Lr/zcbXn2Jbdp63b34KOPu6+ufcTd/qV73eeXrXDX3bzYzb/xFjfniqtOyqUzZ7sL2i8adD46+/Lc7Wgfym133JXtd83Dm7Lj0DE9+dyPsmP88Vvv8HghlpUim78wFcv/h+cPYjlCwZQA9XkhgspCNXMsL/ZFH/yXqee2vUcIqXzOv+DCV3iXQSxrOn0/+5V7Zuer7ltP/SATRUmbBPFjl1+RSd6EpiaXPjSSEndRx8Xu41de5a66+lPuxkWfc5/7T7e7O+9e7f7sa+vd+o1/5b6Z5pnnXz4pL/79q+61vW+flD373sm9fduLf3/SNp7+2x9m+1Duvm+t+9/S/d68+AvuhvQ4dEyzP36Fe/+FH3C/dfaU7HgnTZ7sOi7+iPtE59WZjEpEJaHf7t2RCfKeA0cQSyiGppPY4ishEsv/znsFYlmBKpsqage8YM7hZUY1sxbRl1/eQwgZnciDeJdBLKs+r+7/jfveiz/NxGrx0uXuyq5PZdI1cWKzO+PMs9z0y2a6rk/9vrt1yR+5r6TStmFzj/tBKneSvAP/ctT98397r+byi3d/437yj3vd08/90K3b8JeZiJqEXtTxoUyYp55zXlZN7b7uJnfnPQ9mcv3ynrcRy8Zllv8Sqjf2NWladSNNYav//U2tKIbyY9E4iGUsmAeTwkiys3jZUc1ELAkhiCViWZVRNe6bjz+dVeiu7v6M+6AXSMnjjYv+0N3zwDr3ZO/ful3/8Kbb/89HalIaKxVVTLe//FP36OYn3dI7VmRyLdmUbKtie8sXlrnV6x7LpLweKpyIZVG6koHzEbaEdyKW1Rs1fW8540z3vkmT3fntF9WCWBrN/rl22EvPNF6GVDMRS0IQS95lEMtx7/uovoaqukmK1Bz0mk9/1i3/0lfdX/Z8PxPIRpbH4eRn//TrrPntfV9bnzW3nfEfZ2dNa9XPU5XN7z7/E8SyPliQDJweIneCe8SyenP7XfdmTd+TrPl7S9bXukbEMhZMfZnoQTCpZiKWhCCWgFiOaZ/Ir296yt1w6xJ3QfsHMpFUJXL9xi1Z01XEcHSi6u5f/80LWWVT1V9VNa+59vqsoqkqMWJZU1WLRUn/aJ0Ly32JRCyrN70v/YM75ZRTMrFsPXtK1oe6xsTSULPrNV4wN1NBo5qJWBKCWAJiOWpRHyKJjIRm/vU3u699c6N7FZEc1z6c39qyNeuXOvXc87J+q2qC/Oa7RxHL6kSVoWVJ/+Ap8we7ImJZ3dGgY01Np2UVyx/8+P+sVbEMBXOtF5sNSA3VTMSSEMQSEMuK9R9S00uNzqpBZzTqaq0OplPPefdfj7nNPd/P+mhq9Fz1b63GQYAaVCxbk/6+bNv9F8MhgVhWf9QHWl0CNCjZYKYcqmKxNNq8WB71QtPKNwSqmYglIYglIJbD+pKk0VslKcvu/LL76Zu/QOBqJP+47x234u7V2WOnwX+qadCfBhPLqUHTwq3JCEbfRCxrJ8vvvt9dctnMbKTYGhfLUDC3JNFIxUA1E7EkBLEExLJsNKfktEsudX+49PasuSWyVrt9Mv/XJX+UfcndtmsPYjm2X/qs0qO+aiMeDAWxrK3oBx0NtFWqWXoNiaWh53GPF0xV4Jv5xkA1E7EkBLEExLJkX6EPTvuIe+Y//x1yVifp6f3b7DFVMz3Ecsy+eK+v5Jc6xLK2IqFUf3SljsQyfJ6HU+MgmFQzEUtCEEtALE+ef/K3zp5ClbJO58vUwCLjPU1JnYrlrOCL9qg0FUQsa7M7geaiVZeCOhPL+Hl/0FfHmB6DaiZiSQhiCYhlIfoC9KdfWY2I1WmW3v4n7nev+X3EsnLoC9uuNIdGu3KDWNZm1M9S3Qruuu+hehRLY24ycC5WBJNqJmJJCGIJjS6WkyZPdj/7p18jYXWavn/8WTbnKGI5Yhb4L2T7x+qLNGJZ2yNra1Tthx57ol7FMvyhRYK5179GgGomYkkIYolYNuqTKT1998BfrEfC6jRfWnUfYjmyL2FL/BewMf/ijFjWdrb3vZWN0qy5gOtYLI35/jWyNxnCXK1ANROxJASxRCzrTCzPOfc85qmsw6jf7NlTpmSVE8RySKh56wrfzK/P/8I/5iCWtZ8nn/uRm3LOedm/dS6WYWV/73i+bqA+q5mIJSGIJWJZA5F0/OEXl7uPzfl41mwSIauP7Pwve9zMWbPdilX3I5aDRwPwrE4KA/JogJI543kwiGV95FtP/SCrXKqC2QBiGVb67YeZuXzLoJqJWBKCWCKWDSKW+nfz95537Rd+wN334Nfdu/96DDmr0eixu+f+r2WP5RPP7BjwGCOWRWnzv9LrjVVTh0yvhoNCLOsnDz76ePY6nNjc/K8NVgGTYB70P9TM4tsG1UzEkhDEErFsALFU/svPfuWu+fRn3SX/4TK37pGNbv8/H0HWaiRqyvzopi3u0o/OdFd/6vezxzLvMUYsB6C5+Tb4L0n6t6OaDg6xrK9olNhTTjlFr4WWBvuItabl1hJgGt86qGYiloQglohlnYul5fs//Km75fN/lM1vefOtf+iefeHvkLcqzYt//6r7wh/9cdZP9sZFn3NbX/jJoB7jBhfLWb4yecR/GarK0RARy/rLhAlN/3f60O5OGnN6jlAwexBMqpmIJSGIJWLZAGIZzsd2/9cfcx+dNdt1fPBid8efftlt3fYCA/2Mc57/u59mzV3Vh/LiD3/Effn+dQMqlIhlUeb6iom+2K5MCn0qqxbEsv7i+1hu82nUuR/1uluT9Dc9b+NbCNVMxJIQxBKxrHOxDPPsy6+6u1Y/6K785NXuzLPOclf8ziezqSxUzUQ0R38uyrUPb3Dd1342u/YS/S/+yUr3V707KvoY17FYzkv6J3Nf7isnVQ9iWbdi2eSfjxsa/KPXBNOaoiOYVDMRS0IQS8SyEcQyzJ4DR9x3tu10t//pKjfrtz/uJk9uyURz6e1/4h5+bHPWPJP+mcPLa3vfdk88/Tfu7tUPuGuvuyFr4qp5KBd9/o/cw3/51+6VX/x6TB7jOhBLfaGxqQ/2+i8yNVUhQiyrP2rVoXkq33z36FDEUrT45+VqPoEzwbC+zuuqvSUBjE81E7EkBLFELOtULPO+XP3VMy+6u+9f525Y9Dn3H2bMdBMnNruLP3xJVmVTZfPxp3rdrn94E+H0eWv/r9xzP/z7rBKpPpKz53zcTUoFve2C92eD76gi+ehffc/98LVfVsVjXENiGU51sNvLZU2CWFZ3/vfvPOMmNDW500+f6N5/UUf2g9sQxNKE6oD/og2F67E5KTSRXYNgUs1ELAlBLBHLBhTLYvnBrj2pHD3tlq+8113z+5/N+gNKOM+eMsVd9tGZmXRKqtRfcNMTWzPR2rPvnboYofWVN3/here/lI3UKrHWIEidv/t7WV9VfRnVwEiq9qoSee/XHnE9z+0aVjUSsTxBODDIzqQOJmdHLKs7p512uksfpiz6UUiVyyGKpdDUNodq+QeQUUCD+tjgWiuTGmm6DqNbzUQsCUEsEcsGF8ti+fFb77jvv/iKe+w7z7jVDz3q/tMfr3Dzr7/ZzfnElVnFTl/U1AS0/aIPuMvnXpkJmcTsc7d9MZM0zbUpYVOTUfXxVNSENM7P/unXQxJCSW28DQmi7UPyq/2qmarJ4o1/cGvWDFgD6eh41QdS8vyBVCA/fuVVWQX3jpWr3Z+v3+S+3bsjmyR9MJUNxHLQWD8tm8qgbubKQyyrvb9kszvl1FOz96v3TZrsXtj98+GIpZjrn7+dfBqfJJjb/LVZgWA2djUz/Vz9H5oPVt8feP8hBLFELBHLIeXlPW9nzUCf+JuXsma2X9vweDZCraqft6UiKmHr/swN7hOpvKnyp36ISnsQVQWtohCmqakp9/ZMZv267w+ifUgSJb/ar/qW3nHXve6hDd92f7HxO+476TF+9/mfZMfbV2J01kZ9jEdJLK1flo0sWXdTFyCW1R21MFhxz4Puy2u+4f545X3uY5dfUbavZRGxFPO8QE3nE/kkZvkfjQ4lNdhXGipTzWw548x/u+ba692kyZPdpTNnuy/e+RX35HM/KvuaU1cd3q8IQSwRywYXS4JYlqhibEkaYCRJxLK2cmXXp9xtd9w1XLEUi7w8MTpqPnOS/tGdEcwGw5rCSiS3PPtS9lqbdsml7owzz3Ld193k8qqZ+qH3nPPOz1oL8R5FCGKJWCKWBLEMqxa9SQMN7IFY1lb0JTZ9rpfsa1lGLBPf5HN/wsA1pejygqkmkvRNbTCxzGvdtObhTS6vmrn87j9zp51+uptyznnu9i/dO+jRmwlBLAGxRCxJfYqlvkTuSPr7WbU0yvsSYll70ZdZfYnVl91himXiK/G7G+m5Pky6k/7phBDMBhXLMHE18+wp52TdXDRgnkZu/vjvdNFHkxDEErFELEkDiuWCqNlbww3cgVjWZu6858Gi/S0HKZaJr85vo7nnoN8r9vr3iy4uR+OKZdyC4JRTTnGnnXZaNqCeBtiSZOqHH5rGEoJYIpaIJal/sdSX6EVBFWJhI3+xRizrr7/lEMRSz/tdSWFeRxgci/0PUQgmYkkIQSwRS8SSNKhYqhq5LPhSOJ93JcSyHvtbDkEshZrCvp4U+hTD4IV8iX8vqavphxBLxJIQxBKxRDoIYllcLDVAifpN2hyUzOOHWNZNvvfiT13r2VOyaYiGKZZCI8Tu9z+8wNAEM3xvmcElQSwJIYglYolYkvoTy6m+CqM3sq1UFRDLes3dD3wjG6XS+lsOQyyFptjRNCQLeVUMmWYvmLp+dTnfLWJJCEEsEUukgzSiWLYnhREvNQflZr7kIZaNkKu7P+MWL10+ErFM/I8vR6jqD5vW4MesHv9eBIglIQSxRCwRS1JLj/GUc877H/7LnN641idMAI9YNlBe+cWvs9fghieeGYlYii7/o8x0Xh0VEcwNvBchloQQxBKxRCxJDTzGz+x8NavWnHrqBOe/zDHpO2LZ0P0tJ05s/q8jfFqoOexhKm4jpi1oPbGe9ybEkhDEEhBLxJJU4WOsCao/0Xl1Niqm+pid23bBcd5lEEv6W37DnXLKqf+WjHwKHfUZ1IA+U3mVVEQwN/oKJj9+IZaEIJaAWCKWpBoeYzX100AlHRd/xD346OMnBiwZwjyWgFjWdU49dcJ7vlI2Utal2Z0UpiSBkaP+3tZcf1VSGPQHEEtCEEtALBFLMlaPseRREjntkkvdJZfNzORyCNONAGLZUJnY/L5/TQpzLFZihFeJ0PYKVEBhoGBqpOrDvjKMYNaoWGouWX0e6fNJ/766/zdFP8PUyqbccnE0R208T62i9TXFUF7U3zpe/sdvveO+vumpbP/fff4nJx1bsW1ZtH64zp4DR7LjGsz5qIm+lvvm409n16vYctqGltGyulYjeQ/UdvKuW6WjYx3ufnQNV697zH1+2Qp3+5fuzb2Gug7q7oNYAmKJWPIYVyB641XTvgs7PuTmXHFVyQ8bxBKxJMfCUWFthNeRjoosodyRZguvloozy0u7Hq8lyHttiaWk4PSJzerbfyJnnHmWW/PwpgHLbe97K/sMi5fL+4E0jARwQlNT9tmX1+Q93F4YSUq47PK778+2Ey6jbZrISByLbStcPjwffd7H5yOZi2X2yq5PDVhO10vHkyfQ2ka47EdnX36S0A7l+0jedat04mszlHxy3qcHnG8slhJyXa/48UQsAbFELHmMhzHCpT58ppxzXjYwj95ghzDdCCCWiGWBZWn2VqAipqawahK7jlfMqAnmLl9lRjBrQCy/9dQPMhmQOEnMTLjUokYS9+RzPzrx46g+GyVNVtl64ZWfZ9Kk5fT/YtU7k7c8cbnh1iXZ+pKOOOEPsJJfbUPLq1qo47nzngez2667efGJz9u87SjqcqJlTZa1vsY1mDR5cnYN7Ly1XHw+JpVfvPMr2fko+r9u++raRwYIqLanbWh9VVAfeuyJbHuSr3oUS11Hk2f9P69SqYHY8n4oQCwBsUQseYwHGX3A3HbHXdkbqj709IE12HURS8SSHMubx7I3KcznOlI0AM1+33QTRoeuNH1eMBdwOapXLCVNqibFzU71mRVKmwnoXfc9NGA5iadJV9725994SyZwxcRFAquU60KiH2c1JoGNRWDRwHc6/jypCauI2v9Nn1t60nHHsmPnKWm1apv+7r7uptxrp89427eagmrZ+Adkk9DhNAWtdrG0KrE9T8If1fUdSFKtxw6xBMQSsUQshxH9SqnJ3fWrrv61X4CHEsQSsSS5YtniRWVRBZ4uHWkOJZXpuwmlBXOvD4JZhWIp6ZEAlJMN64MZN+nctmtPtpw+7/L6B+o+Va3yxEWSKPFQFbLcyOlaX9W/vL6hsWzGgiOx1ed62ETTtmkCGTbbDCXImurmNfdVtVL3WVVXYydoX3lTiWm5sOmsjlnVU7VkklhLmiWvtq1YLNWc+Jprr8/2oX/z+kNqmzomCa+WU5VUfSfz+lPaMrr2Jofx46Nrq+fGxy6/IjtG/UgQHp+OX8eiddVEWnJp10nXzyrMdv55YqnltV583oglYol0kIZ+jPXGqTdH/XqpD+rh9qdALBFLUlQsrallJfpbiulJYV7GTl49o84CL5eve9mEKhHLYvl2745MBlSFK9UM0sQiFoOX97x94gfWYhUxEw59ZiqqPkp4JHNhBVJVUi2nKqp+vJUM3vKFZZnYlKpUhlXEuN+kJFOf1/q8t89rbcv6C9rAQNbcNm9cBO1f95m8SZJ1/HnNgcOqpwRQspb4Jsj67qD7rJ9rKI06Ph2nbVvnY/1c1Tw4fCxsm7qOuu6SVf0d/nCgfek23af/61+rKMb9T3W7jklCqe1pvzoO26+ujZrAal0tq/XtWuj5YxVaE9c8sTQBzRNgxBKxRDpIwz3G+vDRL456U9WHX94odoglYkkqJpaiUv0tEy+V+oIwg1fQmLDIV537EMzqFUuJkPoJSiry+k7qNomQiUc8yI8iydA2TPzyxNLEzCpeut+azapCZp+nkkirWEpsJKzqy2jrFWsZJGHU8RVr4inxUdVOy0jKJHDadlgZVUWtmBSZpOk+7SvJaRKaV/3VqLZ51VJ9n7AqX/h9JF5Wj4+uj47bpFjV0CSnqbL1D9VjZs1/JYpxP9f48ZFw6hqrIh3KqzU9tv0Wawqb11w27xrqPkn7SH6MRywRS8SS1PxjrF/j9CasDzU1PSn3qyliiViSiomlqFR/S6HmsGoW28GraEzQgD5LvGDu8FVoqBKxlLRY5StPGE2AJJaq7kn0JGfhOAKSG90e9inME0vJkCQllCFV81QdS4I+kSZwJrFaxpqSaj+qmuUdp23HBufJu9+OX/uQNGl5VWGt2aw+2yXQkqywkmiSa8JUTrDC89eyksu8H6Lj66TvI3nNa62ZsVUPizXD1eOiCrD2qeupdVRNjpu8hvu15s0S+mLNku0xG6lY0scSEEvEsmEfY32Q6ddLG2xATTdK9e1ALBFLMmpiqf6W+7ygVILlSWFAnzZeSWMqmLrueoy3I5jjL5aqGumzTRIgGRnMOqqCSfhUnbQqoIQrnoojGcLgMPpctRFbQ7HM68ep6pvuCytrJoSqPurH37x9WP/IWJ5M2MKqns7RKqSSTBNNqxJqW0OpWIaVXwmmtiNRt5FrY7HMGzjIZM3kW/8vN/KstquqbLlBgiTNiR/pVecTxpo+23kiloBYIpZkiI+xPuQkkfqA0i+55ebsQiwRSzLqYilmeCmpVDPWtUmhD2ALr6YxRU2aV/iqcU9Smf6ziOUQxVKD1kiYJIWSnaGsa+InqZSMSDTVZzKc7kP367NV/x/MZ6hVDyUl1k8ybz1rThv3obQmrHlzTSr6LJcc5rU20r51HcL7JI46dp2rVShtBFnbdzG5syk5TBBVDQ3nftR1198mqrFY5klbLHTl5M6atxb7Dqv7bL/6vqPtSXR1W16saS5iCYglYkkG+Rjrw0C/2upNX/0U8jrvI5aIJRk3sUx8xXJfBWVwS5qdCfMvjgd6DNckhT6vEswOLsnYiKVV5FTNKjY6p26XGIQjq8bTaegz0voElopJiISsWHNba46rz2GTx7xRYW1+y3iUVGv2GVcyLfqhuNj3ORPlcqO623lbM2A1R7XKbd4gRSZW1mdU68fNYfPEMk9W423q8dM1y2varGsjMbapZfJaWqlCbPu1qm04R+dQpxtBLAGxRCyJT9sF7dkboIRSA/MMZ+4pxBKxJGMilomXkJ4KNs9Us8ytvKLGjVYvmHrsNyQ0Tx5VsZR4SUr0eVdqvmUbHTVP7lTtUoWv1OB1SU5TUJPHuM+f/rYBdexvbV+fx/F21TRT96mfYHi7JK9Ys09br5g8al01o7VKpbYfj44rOdP3QS0b9+mMBzyyiq2NNGtNXvPkPU8sw7kyY6nVeA/hfJ7xY2ADBakKaRXRuM+pzVlq+7Xrndd0WetKyu15gFgCYllHYqkXq94s7I2l2K+MWiaO3mz0JpfXDES/Vg1m6Gf9Cpa37VIjfOkNN28dNVsp9aE2lmk548zsTTJvNDzEErEkVSeWle5vqe3tTrOeV9X4upEXy6P+31YuSeXF0uROUpT32WyVQH2mS7YkoGo2a9UwqwzmDfRSTixNenQM9p1Bn7v6W2ITthKyKqIE1wbvsTkm43kwrelp3tQf4Xcj7UP7ss/68HxCCZI469ztvCVvJqZh81xtR3Kn5sAmy7p+cTVRghwLnrZdrI9l4gcUMmnUPm0/4fcxO2e7lvrRQBKox0zrSr4lqeFjqGO2frXhfu066F+rUltzaZ2PCflIxXKk81ja+nEzaZ2rbo9H3rUfSXRf/GMEYolYNrxY2hDResPLa54SvhkXi9aNm6LYG1m5/etNqNS29eYZC6a13S8WbXO8h50e78cYsUQsyZDEUlS6v6UkRoP5rOCVNe60ebE87CuZCGaFxNIqVeU+k0MZszkPrT+miV25geySIoP3qPJm27GpRiQucT9PfccxIdP9+u6S+Dkb4+8/JjKxcOb1zwwH5bHjkEyF56PrZMdmy0ns8pqK6od5m4/SrpWqmuH3GrWCkuDZOds2dS0koPrb9q/vI5JFSadNtZL46Vji70o6Htt3uP1Q2vR/OxdbRtsPm8La9baBkcJltf9QiEcqliOdx9LWj7dt+8x7ztl313JNnRFLxLKhxFIvevsVLIkmys0TS/2yp1//wmi4aHtTDd94hiqW8XbVRMI6puvNMPxVyMRSE/aG6+jXJnVstw8KxBIQSzIEsUySyve3bPMys5BXV1WgQX3U5PmIF8xmLsnIxFJVrPjzO07cFUTVQMmTmlXqe8dgW/bkbSts+qrvBpIDCWWpSpK2oe8z+v5SrMqlY9T+BiMO2pf2qX2XOh87by2nYy31A7juk7TqGknC8lqG6drbiLD6N6zY6thNltWyTGJr+1e1Ta3Uiom8tmPb1fJ5+7Zt6RpaRVr7yXt8tG+di/ardWKJt2tdqsVZqcdjpPNY2vrxtm2feeek23TfSKaJQywRy7oTS+vMrjcY/dKkX69KiWWxgWesKUr4a9NQxbLY/dZB3YbDDsWy2K9TJsrjWbVELBFLUpNimSSV7W8ppnuRmccrrOoE87CvKCOYwxRLQghiiVgillnUtt46dNtQ3NYxfChimddcoFJiaXNRqWmG/cpVTiwHOxobYgmIJWKZgyRjb5plFXxqdXqJmcOrrKrQvJfb/WOjx5uRfBFLQhBLxBKxHGqsX4R1llcH7aRIG/dyYikZTXx/yEqLZdgB3PZfSixVpZSI5g3XjVgCYln/0Y9Rek9afvefucuv+OS/nz5x4rvDrGgd8eJRKdQc9pCvYEL1CaamiDmQFJpDI5iIJSGIJWKJWA42VqG0Ub1s1DJVBuO+CaXEUoJqo4GFHeUrKZY2RLmJpImlmrzq2CzqPK7+nhrBbLxHh0UsEUsy9iL58d/5Xdf8vkmu4+IPuwsu/IBramr6f321cDgs8qLRUsGnmKpiGtCH6S+qk640ff5xb/h+sYglIYglYolYDuqLmEYMC+dOstHAJGzqjJ0nllpHx26xkcUUjfw1klFhSy0TVyjLjQqr4xzu6GCIJWLJB15tiuSnF9ycvnf9RfYD2J+u/tr/nDRpsqaYmDvCp8TmNL0VfpqtTvNGhYUVKi+YeozUJHoBYkkIQSwRS8SyxJw9EjCNuhrONSWhTPworHliqcqkRNCiEVg1b1XeHJiVFMt4KOlSTWFVgVXfUd2v0ccQS0As618kv/fiKyey9E++bFJZiSano9HfUmxMs4sml1XPAv/4v+FlE7EkhCCWiCViGcam8SgVGz56MH0si51jpcTSphCxZrvlBu+x/qLjOeUIYolYkpFHw+mfPfXckiIZ5vPL7qykVBqj0d9SQrltFKqhMDqoWayax/Y1kmAiloQgloglYlkyGtxGE+RK6DRqahybOuSaa6+vCrHUF0v1m9TotTbnUjmx1Ci3eZVXxBIQy9qK3o9+e25nUZEMc8ttt//PM8486zfJ6PRfHI3+lqqG7k6zgVdeTaAfA5b458HOCv/QgFgSglgCYll70mED4WgOy1LTe0g+bR7I8RJLHYv6bup+NYcd7DyWJsdxv0/EEhDL2opGhL7tjrvKSuXvX/8HbhSl0lDz1e0V3qZEVYP5rOTVV1OCuTQpTFGyvZ4FE7EkBLFELBHLklEVL5wTslSfRpO5sRDLsK+noi+TGlwo8aO/qgoZi+UNty4ZsI5kWV9EdX4S42d2vopYAmJZ4++V39j012Wl8pzz2t7R9+AxEAr1tVtR4e1KhjUNyWJegTVFs38uSDB7kkKTacSSEIJYIpaNIR1PPvejTMjUZ7HUcmoSq+VUuVTVcCzEMi9q/qppUWIJLjcqrAYZCvuIIpaAWNZe9D509tRzigrld/9zn7vyd68xqRyrEVY7kkJ/yzkV3u50L5fdvAprUjBX++dFXQkmYkkIYolYIpYlv6hJDl/e83bZZTUKo5aV1GlOSP1f/R0Huy9bv9xyqipquTg61lL9RPPW0T7jOTgRS0AsazNqgfB7n/5sUanU4FwXXPiBXyZjP22HRgo9mKa1wtvt9NWvubwSaxI9H9Z4wdyQ1MFcpYglIYglYolYEh5jxBKxrPmoZcXyu9ecJJXf+cHfudmfuNJ94IMf/r+S8ZuuQ+KwfRS2O99XLqfzaqxpwdTz42itCyZiSQhiiVgiHYTHGLFELGs+rb81xW186rmTpFJN3cdZKpNk9PpbCht5tI1XZE3T5sXysK9kttbaCSCWhCCWiCXSQXiMEUvEsqajJvIXffDiAVKp5u6SypmzP/6TKnm6dCSj099SrEqzLxn7Zr4wOs+THv9cWVNLjyliSQhiiVgiHYTHGLFELGs6mhbpszctPiGVm7Y+r/6U1SSVhvW3HI0RaVXt2j3OlVmoHNO8YB7yle5mxJIQxBIQS8SSIJaIJRnFXNn1KfeVP18/QCp/5+ru56v0abMuzc5R2K6EcpsPclk/aN5L9c9VE9nl1fzYIpaEIJaIJdJBeIwRS8SyZqM5aydNmpz1p1z/+NPunPPOd7/3v3xmWxU/bZp8ZXHVKG57A6/OuhTMHUmhP+2SahRMxJIQxBKxRDoIjzFiiVjWbNSX8tKZszOpPHvqua57/sLv18BTp91XoDpHYdvqk7d3lMQVxp+uNH1eMBchloQgloBYIpYEsUQsSQVy2x13uY//TlcmlX/wh0v/qoaePt1Jof/caPS3bAsqW1C/grnb/4iwALEkBLEExBKxJIglYklGkI/OvtxNmDCh1qTSGK3+lmK6F9cFvFLrmgVeLvd62UQsCUEsAbFELBFLxLIG35eO6tqR8Uvz+yb9+/Iv/9kjNfoUGs3+lmJuMnpNbqH6BHN/UmgmOy6CiVgSglgilkgH4TFGLAHGj9Hsbym6/fanc6nrHv1QoebPB7xgzkIsCUEsAbFELHmMEUuAxmFeUmi22jZK2188ytuH6hRM/aCwfawEE7EkBLFELJEOwmOMWAKMP2vS7EpGbxqJFUmhqWQLl7phaPaPuwSzN800xJIQxBIQS15UPMaIJUD9V5l2ecEcLTS/5W4vHNBYgql+vPpi2jNagolYEoJYIpZIB+ExRiwBqgM1VVWT1XmjuA9VrraNYmUUqpdW/8OFvqBuTCrcNBqxJASxRCyRDsJjjFgCVA+j3d/SKqObudQNLZjr0xxNClXsijzXEEtCEEvEEukgPMaIJUB1Mdr9LdXP8o1kdJvdQvXT5sXyiH8utCKWhCCWiCXSQXiMEUuA+mEs+ltKKjSYzzIud8OjKW96RiqYiCUhiCViiXQQHmPEEqA6q0mj3d9yut/HQi43JIVBfXr8c0KjyQ5pkCfEkhDEErFEOgiPMWIJUJ10JoWpItpHcR+z/D46udwQ/OCw3T8vJJiDapKNWBKCWCKWSAfhMUYsAaoXTROxOxndUVy7kkIzyOlcboh+dJBgHkizpNxzELEkBLFELJEOwmOMWAJUNzvTrBvlfSz0Fao2LjdEqJrdFwgmYkkIYolYIh2ExxixBKhBpiaFfm/do7wfNXvc7/cHENPlBXNvmgWIJSGIJWKJdBAeY8QSoDarRqPd31JofkM1vW3hkkMRFni53Bv+2IFYEoJYIpY1kNazp7g9B47wwqrTvPKLX7sLOz6EWAJAOcaiv6XYmhT61jVxyaGMYO7zVcwuxJIQxBKxrIF0XPwRd/cD3+CFVae5/Uv3utvuuAuxBIDBMBb9LSWUO9Js4XLDIJ4r6nd54LTTTv/37734Uz7XCUEsEctqzpqHN7kp55xH1bJOq5WqSP/4rXcQSwAYDOr/eDDN/FHeT4uvjq7lksNgBLPljLP+Lf0scVd3f8Y9s/NVPuMJQSwRy2rN4qXL3UdnX+62973FC6xOog/eS2fOdsvvvn/cjwWxBKgp5iSF6UE6Rnk/GiFWg/ks55JD2V88zm17Tz+Aq4WVfgy/5trr3Quv/JzPe0IQS8SyGrNp6/ZskJe77nvIvfnuUV5oNRo9dnfe82D2WH67d0dVHBNiCVBzaATXN5LR7wcpedWItAu55FBOLO0zRYKp7ypqkXPdzYsRTEIQS8SyGtP3s19lzUymXXKpW73uMffq/t/wgquR6IP2wUcfd5dcNtN9ct6ns8eyWo4NsQSoSTTAzoYx2M+MNEeTwsi0AGXFMuzuoXEEJJg3fW7puHf7IASxBMQyJ+ogrzdp+zVwy7MvcV2q+LG65QvLsqZBeqy++/xPqu4YEUuAmqQ1KfS3XDAG++r0cjmDyw6DFctQML9451fcGWeelXXtyRPMH772S74zEIJYIpbjGVUsVblUXz1NWaHRRdVkloF+xjeSRzV31eOiUX3VJKiaKpSIJUDdMFb9LYWawx4ao31BHYmlRUIpsZRgSjQlnNYaSwP/UNEkBLFELKtoMBjJzCc6r87etOdccVXWBEXVTERzdKNBlb669pGsmbKuvYRSkl8tfSgRS4C6Zqz6W9q+NKBPG5cdhiqWoWDecOuSrNWVvqesenC9mzix2f32JzoZQ4IQxBKxrMb+fBJK/SKo0WQnTZ6ciaZ+KdT0JWqeSf/M4UXNdTY88Uw2mqtGvVMTVw3Eo6bJX9/01IlfYGspiCVAzaP+lhvHaF+agkRTkbRw2WE4YmnRoD7qInLOeee7dBPuzLNa3dI/+TLfNQhBLBHLam8yq+qZmmTqTVwDyJw+sTlrpqkqm34x/ObjT7ttu/YgnMEvqk8+96OsEqk+kiboaq6jwXdUkdQ1q4d+IYglQM0jyTuQZtEY7W9Lmh1jVCWFOhVL6395yimnZGKpTJgwwf3l08/zPYQQxBKxrLVIJCVHEksJpkRTwqnmKZJP3SapUhNbVeMkWi/vebsuKrr6pVSyrZFadf4SbjUjVl/VCU1N2TWQTKoSKbnUuddiNRKxBGgYZiWF/pbTxmBfTb5KupXLDiMRS+Wl137pVt6/zl31e91uyjnnurNaz6a/JSGIJWJZT9U6NZdVk09J1eeXrXDd193kPnb5FVnFTr8qWhNQ3SYhk5hJwiRpqoxK2LS+muQqquzFGeqANpLaeBsSRNuH5Ff7VTNVk8X5N96SNQNWv0cdr/pASp4lkLpdy2hZNRGWaKq/ZKP1SUUsAeqGZWn2pmkeoyqpmsSu57LDSMQy7zsIc18Sglgilg0UkzwJnVX+NEKtJE0iKmFTH0TJmyp/kro4qgomvvlLGFUM8243mY2jfSiSX+1XfUt1HDqmhx57IjtGjdQ6HJlFLAGgxuhNs3msfCIpDOazgsuOWPJ5SghiiVgSglgilgD1w1j3t2xPczgpTEcCiCUhBLFELAlBLAGgThjL/pZiut9fF5cesSSEIJaIJSGIJQDUD2PZ31J0+srlLC49YkkIQSwRS0IQSwCoHzRqa88Y7k/NYQ+NYaUUEEtCEEtALAlBLAFglFF/y31ploxxpVQD+rRx+RFLQghiiVgSglgCQH0wIyk0UZ0xhvtck+YNL7aAWBJCEEvEkhDEEgDqgCW+cjmWoqcpT3alaeLyI5aEEMQSsSQEsQSA+qAnGdv+lhLKbUlhXk1ALAkhiCViSQhiCQB1wHj0t9SItLvTbODyI5aWPQeOuDvvedB9ovNqN+eKq9z8G29x333+J2XXe2bnq9nym7ZuP+m+u+57yF138+Lc9P3sVwOW3bZrT3a7tqV88c6vuFf3/+bE/RueeKbotixPPvejE8tr3eV333/ifLqvu2nA/WEeeuwJ98l5n86Wu+ba6923nvpB7nIv73nbLV66/MQx3nbHXSedx2BT6rpVMiPdz+1fute1nj3FpU8nd3X3Z066f3vfW9n2f/zWO7nrf33TU9l64eM63GuGWCKWhBDEEgBKMx79LSW0GsxnBZcfsZRUfuzyKzJ5kGDd9Lml7sKOD7kJTU3uwUcfL7lex8UfydbLW+6C9ouy+/Lyw9d+OUAata/0cy/bt45By2jbJpcSnGLbsnzz8adPSKUdl52Ptq19aF/hMUqgtZzO94Zbl2Tyo78lkLFUTjnnPDdp8uRMYiWq2p7288ovfj3kz/gtz75U9LpVMiPZz7d7d5y4NpLoNQ9vGnC/ZNKuc/h4WnSddN8ll83Mrq0EU9dM1zFvecQSsUQsCUEsAWDkjEd/S40Qq2lIFnH5G1ssVamUAKxe99iAit+0Sy51Z5x5ViaQeevd8oVlmSjkiYtkK0/Q4khOtI9LZ84eUKHUsWh9HVup9b/34k/d6RObs0pjfD5fXfvIgPOR6Ibno2qayeeb7x49af2wcmkiqQpgLG3lzrFWxVIVZ62bV+3UdZBw5v1QoGgd3S65zDue8PFCLCFj4sTm//pbU6YeI4RUPu0XdfwL7zIADYUG1hnrvo/TfbW0m8vfuGJpFb1QrhQ1W5QE5DWJlXTpvs8vW5ErLlbtUjPTwciLBDG8XceiauLdD3yj6Lomi0oopVYpiyuJdqwmQaqg6W9VI+N9q6qmZrQmyZLKvKagWkZVzPja6W8175VIScJiOY+FT8voOheTeKsQ6zqpSW+xKqmW0Xa0nI6hlFhqG7rfls1rBqt1tUx4u0TaKsxWXY7FUj866PYXXvn5SdvVjwi6ZoglAAAAwOigvo97k8Kck2NJp5fLuTwEjSmWxaIqneQg7j+nPnISLwlpMXGREOp2yZUERuKRJ0MSEwnKcI7P5CWuqJlAxkJsEmR9/NSMs9i+1STW5Meqb3mSa/IVirEqntYv0aJKaVgRtuumJqbWnNSWy9uPtqn7bDmJnc4/FEI1VQ2XUUXRHofw8ZGEWwXWltXxhk1d85oxmzzqeCWXejzt/GOx1DWOhdRilfD4Go529RaxBAAAgEZiWhp9WZk1xvtdkBSaxU7nIWhssVTFSwOySHj0ZV994/Jk0KqExcTS+i6qyWMoMPo7FFVtRxKnfWq7atYqodNypfrhSVi1Xa0T36f1JL4SIPW7lPTZ+ehfW+6jsy8fIDhh1C/QJFTnpv/H/TMVu0/NavW3mt8mvgmo9quKne7T8eh4rTpq102R5Ol8tLz18bTtheKla6JlbKAj3aYBikL51TmpomnLxE2VJaLah27XurruEnCTbqsw6zbbh+RUx2vV1PAHgmJiWa7fps45/BFCzwPrI4tYAgAAAFQG9Xk8kIxtf0ux1O+3jYegccVSVUgTHslVXK2UOElKrBpYTCxVldLtV3Z9KpMkSZnJpu4zSdHfGjhIMql/JSpWTZOMxc1ULZIsrRv2eYz7+GmbSVBx07GETU1VdYslzvptJkGVrliT0FAs7fxNuuOmpSacJqd23XTO4bKSdZNi+1uyLWHMq/ypKhlKclwVNmG047N+pXHfVR2DthdWcEud93DEUs8lGwCJwXsAAAAAxobx6G8pViWF5rgtPASNKZYSRomEmm2qeaRkRVUtm1pCUhBW/YqJpfpO5g28YzJnA+uYwElo4ylAkiIVU8mmxNP6QMZRk07dr8qcKmSSTx2LBC0cJEhyo/PT7epPqnPRcUl+TIyHKpZ5UdXSmu3acnbdwgGG8vonWl/WvOXsPGygpLAKaFEVMNyvyX2esFtV12S9kmKp546NNFxsOhfEEgAAAKDyjFd/S6H5LfvSNPEwNJ5YxpJpwqKKliqYSlhhG+qooxKMJJgTUVKX5Ay0o0hE8vpAqglnEkwvEjfllSxq3XggHJPVUHhVnTSJTIK+kCZ3OlcboVaSWkwsw0GKVJXUNdN2rWoaN0m165YnbaHQ2fZLyZikTstovbzRW8P9WlPbUrFjqpRYan2rVOZdQ8QSAAAAYHQZr/6WEsptPshlA4ulIiFQ086wT+BgpKRUH04tZ9VGNfks1s/RJKjYqKJ5I6haM9awqho290x8k9g84dW6tk0dn87dRFHrxfM4hqPaql9jWJHVeUlOVWmUoGvdPLHMEy2TNW3TxLJU/0MTy7xzLiaW+rtYrPlzJcRSTW+tWXOxZsuIJQAAAMDoM179LSWUu5NC9RLqXCwlahKOPBGT9KnyJvHSYC5xJGmJ7yuov7WcBo5RZdMGlskTP0lXOPKsjdQaRnJmfQ3j+THzBu2xQX2SInNLmtTaujoWiV8sqNav0ZqWSrTCY45HzpU4aR2dQ+IH0CnWx3IwTWHtmth0IEmR+Ty1rpbV8el484Q5bgqrpsX625o3x9cunPJkpGJpMq3nT9xXF7EEAAAAGHskd9vHYb+S2X1Jod8l1LFY2gAv8VyS1gQ0rxJWqimsJEtCqipVOL9kOHek7cuqgZoiJB58J+9263OoSmGpKquSN/BQEkwbYuITD95j83eGQmWD44TbVD9FCZ0167XqYNzXUdfAmtvGYqnbQwmV8GmbVtGV5Kmvq5r2htdSt+s2VZNtMCMJblgVtBFgw/3a9VNfy/gYtT3t2/YzErHU46vj0Q8M8XMgbz5NrV9uOZ2PlosfV90W71/L6La8+TkRS/j/2XsfoMrWs9zzM9lJOOeQc8g5nBySkLiv4SYkkkgiUaLo5VzioJKbVolFUjiil3LwSlUo0xNbJXWx0pPB2CoTezIdq3Ml19YiFlpYoraTtqQMlfQ91cmg01o4Q0ZS4gyZyy2pWziiwWTPfuj3Pfvl5Vtrrw0b9t7086t6C/baa33/19rfs97vDyGEEPKwAu/hvaJN1SBurBC7WbQxVsPFFZa6KA8EDDxjECFYTAfCAB7DtA5/0hxLFaUQTvgfIg5iKSZU1UOHvwgHnk6kB/MrvSdT92VMm3OI75B2iC4IUJsfCET1yOEvzoFg1PPUo+eFFzx5uB7iC/mBSMW1SKd6/yBicEy38tA5krraajAeSi03lDm8vSgfpEHnIlqBqHNDIdIQNwz5CGaVWV2ISOtQy1vneNr60fKGhxPnITwVvnavzdMIS/Vko/1A3Mas0n0sdciv964HGYYdG0Z9lqvPUlgSQgghpBHJhwfzLXtqEDf2tsQel0OshospLFVcQgzoYjoQKBhOGltUxwuupD0I4QnU/SBhKspinigIMRVfSEPSPpa65yGGbaalC2IIgk0XzYFgQ368SEYc8DjqeUgD0hLzdCFMmx+IO+/lRTlC0Gl4SCuEEwQy8q+r32q5QRiqt1FXuo3NRYRYVjEJw/BlL66xiizq0IaF8H39IG8Qn0iPDc97brWsdWuZmOk5fpVZ9aamWaX7WCIOnOc9wj48Fc+xdFFYEkIIIYSEcCk88B621CDu3qJtF62P1XAxhSWNRqOwJIQQQsjDQ63mW4LB8MBz2clqoLCk0SgsKSwJIYQQ0rjUcr4lGBVx2caqoLCk0SgsCSGEEEIal3yo3XxLcLlo6+H8t0AhFJY0GoUlIYQQQkgVqeV8S4AhudjnsolVQWFJo1FYEkIIIYQ0LrNFu13D+BfFcqwKCksajcKSEEIIIaQxgaCD1/BKDeO/U7TrrAoKSxqNwpIQQgghpHFpD7XdBgTzLLGY0DSrgsKSRqOwJIQQQghpXHQbkNYaxY8VYrGYzzirgsKSRqOwJIQQQghpXGo937JTxO0wq4LCkkajsCSEEEIIaUxqPd8SYPuTWg7LJRSWNBqFJSGEEELIKan1fEswEB7ssdnJ6qCwpNEoLAkhhBBCGhPMt8T+lm01TMOICNw2VgeFJY1GYUkIIYQQ0pjMhAfbgNRyf8mp8GBBnxZWB4UljUZhSQghhBDSeOj+kjM1Tse18GDeZzOrhMKSRqOwJIQQQghpPDAMFau0DtQ4HbeKthhq6z2lsKTRaBSWhBBCCCEnZEDEZS3nOkJQYhuUm6wOCksajcKSEEIIIaQxqYf5lhgKe7doV1kdFJY0GoUlIYQQQkjjUS/zLeE1xWI+k6wSCksajcKSEEIIIaTxqJf5lh2SjmFWCYUljUZhSQghhBDSeNTDfEvQVTR0xvpYJRSWNBqFJSGEEEJI43ElPNj+o9YrtEJU7hatk1VCYUmjUVgSQgghhDQeWKF1tg7SgeGw8KDmWSUUljQahSUhhBBCSINpDRF0g3WQlqnwYEGfNlYLhSWNRmFJCCGEENJYYCjqdtHa6yAt8J5ieG4zq4XCkkajsCSEEEIIaSzqZb4lmC/acp2khcKSRqNRWBJCCCGEVEC9zLeEoFwSgUkoLGk0CktCCCGEkEbSHaF+5ltiKOxqnQhdCksajUZhSQghhBBSAfU03xJCF4v5TLFaKCxpNApLQgghhJDGAkLuXqiPOY4QuPCiDrNaKCxpNApLQgghhJDGAnMc5+okLZ1F2y1aP6uFwpJGo7AkhBBCCGkcWoq2WbRLdZIeDNFFp62bVUNhSaNRWBJCCCGENA49IubydZIeDIfFsNgOVg2FJY1GYUkIIYQQ0jjU03xLMBEeLOjTxqqhsKTRKCwJIYQQQhqHeppvCWaKdjc82JKEUFjSaBSWhBBCCCENQL3NtwQ3inY71I8nlcKSRqOwpLAkhBBCCClDvc23hKBcLNoCq4bCkkajsCSEEEIIaRwwv3GtaE11kh4MhV0N9TVMl8KSRqOwJIQQQgghZYCH8EYdpQfDdLGYzxSrhsKSRqOwJIQQQghpDOAl3CjaSB2lCSvEbtdZmigsaTQKS0IIIYQQkkJ3eDDfsp72k+wUcTnA6qGwpNEoLAkhhBBCGoN6m28J+kRc9rB6KCxpNApLEgMrv42FB3tW7RetULTd8GBfrX53LobDrMj5jcAVSe9F3egZ+Wovc86glEF3jdPafUZtB52urofgPr0u5Teacs6CnBOzm5H7WevkCh+DhNQl9TbfEmBLlK3wwINJKCxpNApLcoRbIiY35X/8iC0akTlhzs3LsZkGydu8pDd/AeutT14A9Jc5b0zKoL/G6e0/g7aDRSU2Gqg9npR2Kbu98GARjSQ25b61gvKeHC9Eyl/rZJ6PQULqknqcbwnGJV1trCIKSxqNwpIovdKxXA7HN0Ful7eS++bHg8KyfsgqGC+ysGy09nhSZiSfs2XqclMsBry6eCAfmPuBwpKQ+gcjCzD8tN48hNPhwVDdZlYRhSWNRmFJwBXpWA4mfH9Zvh9J6Mh3Sue0s8wb1145ryciYL1QgCfOzylpk+t7Q/p8Ez2v6wTCskWu7Zf/0/LTl+G8nOS3XPlompsT0tOX8MN9VsKyXLzVEpY9GcqwybSdtnMSluXitGj7zzLMOMu9ElIE45qkB8Jw4QTC0grUcQpLQhruReL9OhRxOkQ/xyqisKTRKCyJio7rKSIqb340tCOP4bJ3Qml4Hey2+9HD/5jXte/Ow5vXS5HO7iXpNBckbBVdi+76XdMxtiLupjsPP3bLGYSlXntgrtXOe7M775rLz4GUnf9R1TkoPj1trtwh2Pfk/w1XbjY9+/J2ODjBrLZZobAclvBXTR6bpF59vFdN/qbkeGye31X5rrOMsES5rrs4YsJwSura5nPRpHfMfVcw6fPzOJdMG7OsmnLPEmcwonjNnbchgtSL3km5N+y5iLc14z06INfoPMhlqaO2EwjLSZMmCktCGov5OrxXc/KMXGT1UFjSaBSWpDmU5l/dF/GS5hXMOyGJjim8NTpP85o5V4XejIiNvAjCPem8NzlhuSPXzIpwyUnnHZ1oeE47pEO/HBEPN4xA7pTz7pq0pgnLK6bj3iF2LZSGHto3s9oJ75J4dGjiTScEkGbMbeuTuCfl2F0ninZEaF0Npc2nl03cnRKXCkkVlzbuKSdoygnLmKgMkg6tw04xXw6tIgRvJ4iauynp6Df1sST56jKi77ITeDpEu0fq5LKke8W8dBgxddJv0rfgOj57kRcorXLsakKc+Uiceg/syQuSYfk8KGJ5L5S85Xqv7MmLkgEJd968nMnCgqRBF2kacUKzEmG5Ktf2UVgS0pC/1/dD/S2el5NnyxyriMKSRqOwJO2mc2+9SLeNwPPC8r47nhOxeM98vieddM91J/ZmEjraIxHBoWHfl469CoSDSFzNck45YbksafdeR6RHFy7qCCXvlUcFdIfpvO+F4x4p9XZ2GLHnBVpfSPYg35brmxMEYxZhmSQqe1LEjnrIND+L7rNN90QGYbkWaTvrIrJz8sJhN3JeCKWh2UOuPc5E6tOnbTMcXfhm1IisSuLUlxg9kfvIDlPNm3h9fpHXjSx9lIiQ17RuRdK6KWGPOZs0onI1UicUloQ0Bl3yu1ZvK2Gr6L1MYUlhSaNRWBLtGI9JJ9OuIrliRIh2lmNvJnX1ybS3mt1GxHph6Ycpqhe0V861pp37bhFLhYS3uDcyCMurpsONMGJDDCeNcPJpGQ+l4YVNIi6WImE0RcTeVEJaLkXimXYCp1JhOSdpux+Oe6TVazsaiVe/G5ZzhyJp1yHPLRmEZazjMWvE2oBJr0/LgGt/MWE5btqNtq9tIxC1fhdEnNm0ZYlzS9p5PmJrRtRq2m5F8rtS5l7x7W4koV0PRYRlIcEO5KVAG4UlIQ1Nvc63bJNn0NjDXDkUljQahSWJY4eTzqR05JM6y02hNL/Mdnj3E4RlfyS8QhnrN4IhJrBmMgjL5nB8HueaCKpmF06azZjymc/QMYiJ4fkM8YydUFjq/NTYNVnivWxeEGyH0rBX9aAtlElHf8oLgDEjlMYypGU+pT3q1hw6bPiupK3XiDQdHnszUkbl4ixkMJu22VMIS53HeTcc3UJE56guR4TldigtQqXWm9AJpbAkpDGZr9P7tlNevg1RWNJoNArLh/PH6VrK9y0iBNcqFJY5IwxXpHM9Ij86MxUKy/4Uawklr87ACYWlFSTwSC6F0py8FRfOREpa8lUUliMp8bSdUFguSvp2pZ6s0LiZIV5bhuphxLBe9RgPnkJYTpi8jBlBlpSWzjLtUYUY2gc8dbrYE/J+w6RlwJVRljj1xUNau6z0JUzSix2Naz5iW5G2vRmyCVYKS0Iam3qdbwn65AVXH4UljUajsHy4uB/ShzD6uZNZO8s6fDA2Z+9GODonMUlYqtjpTOh04/wmETRJQyyzbDfSGxGlCPeOiX88RRTlJQ0653A/HF3sxf7Y3pa/ScJyOkUkd8nxlhMKSz1Phbidx6le3+GEN9CDro10hpJXcCnE5/sliZjplDaRD6U5kdcSXnQMmfpMao/TIihHXf0vShudk5cHTU7EZYlzI5Tm93oGQmkI7mmF5fWUtmDrbJbCkpCHknqdbxnkmbkV6m/vTQpLGo3CkpwhuhIm5he2RUTlNScGsnaWRxJEBOLYcUInSVjqXL5bkTe1myJ4m0UcbIfjXrj2UBr6mSYs1+T65gRR2iYGwbgeOU+3kug14gWixu9tuGAEdZKw7AolT6lf8OVuOLq5/Wn2sVxxx/KhtJJtk4tXF33xnZe7UnZ7Id3r7UXMRkI92YWftH7bXRjaHkdde7yaUI5eaKlndMe1q0ri1M9+y5tuKcM7VRCWdoGexP6LtMltU2cUloQ8XIxEnqn1wqg8j9oepgqhsKTRKCwfZnQPKrsS7HwoLWyiorOpws5ymwiOPRGv8LpcljA13MEywtKKsRURBfC23Y907HUPzHU557IRCuWEpYrg+5JWCDH1li5ERMm6hD9hRKUVKXnp7O+KsB4zZTznxF7MA6rDTNckLxOhNNfVCqhhk+75CoVlp9S3FeMzJjyNV0VlbJ6glkch4xtzFTF7Jg5tEzhmV1kdlPrcMWV4y7RHFd0t5rx5l46NcNxrnjdp9ntxVhLnupx7S86bluv2zAuF0wjLkZRyt2i7GqGwJOSh5UYoP8e9VlwO9bnQEIUljUZhSc6QMensqhBT79VUOOo5a5PzYoJozv249UqH/MCENyqixoahcXcnCF8rJnWrjNiQzQEjwJCPayI+VzK8MR2R9OnCQhsiCPzqqUMS/74To34YaF46/btGjE46EbMSkucljkp6DuT6e+H4Vh6I87oIiY2QPJx5MKF8JyJ1OeLivR+StxBpMfWahW6Jb1iEzJ55mRGr+175Tue7borQao7kY12+H3QdmlgZL4WjKx2fJM4Wae9bRiwvuXxUcq94rsq15QR7fyjNYdYXMZV0MLVOrvARSEjD0iQvIifqNH1z7uXchSaXe9GXX9L0yDaNRqu+vehFL/48H/mEXEx0uOkki4IQQmoKpljsJLykqwcWxXKsKkIIIYRY0DlQr2MLi4MQQmpOPc+31JXir7OaCCGEEKJg6KsuwMQhlIQQUj/U83xLCF4M2Z1mNRFCCCEEYO4q3jxjDiOHNRFCSP1Q7/MtMfccXtVxVhUhhBBCCCGE1C/1Pt8SC/hh4bNLrCpCCCGEEEIIqV8g2rCqdb3Ogcf2UtiWq49VRQghhBBCCCH1C7b5WKrj9A2IuOxkVRFCCCGEEEJIfYI58Lofdb0yIuKyjdVFCCEXj+XwYPjMWU6sn5I42lPOaZdzriV8jx+hu3LOVAOVLxZW6GIzSwSbaC+wGAghpCrkw4P5lj11nEb8hq8Hbl1FCCEXCgxHwRYS+0W7f4bxzEg8+TI/hjhnPkFUrsv3Mw1UvvjR3GiwNJ83eFGwwmIghJCqUe/zLQGG7eLFYjOrixBCLgbXjFjD3/46FJZWVE41WPnmG1AMnzftgUOiCCHkLITbUp2nEaNVFgO3sSKEkIYHD3LMc1iVzv1BiA9JzIlA0reKGF4zGNKHd7bLOb1y/UmFZVZR2SyiGHF2lMl3j0tbUl5zcs5QSpg5CQ/nYKW7JpemPkn7nITb5K7vlLT0h/hbW5Rjq8SDRQ/8UvJNksbBUPky8y0S5lBIX0hB8zGYcF6LqddOCbNVjrVkEJNJwlLjLZe+tPq0aeyXsGLl1CTpbeVjgRBygX7j632+JdJ4u2g3WF2EENLYXBLRMymf8XDfj3TyVfBdkR+pgrG7rjOOH4mb7pw1+dGoVFiqqDwo88N4RdJt47wTyQeEx4Y7b1tESSyv9925t5z4g+jZdOfshtIm1WPuO+sR7pCys9/tS7yWTYl31Zyn+4DpAgi+rLOstndVyrXctZczlK2+NBg159yT6+5E4u6Qc66bPK5E6nTPxXvblX9PQn0OubCmI3nwee0PycOwCSGkUcFvWr3Pt2yW30OO7CGEkAZmWTrcrUaoFCLiRsWWejTxA9UlnXAct4vt6NDaGyIgcN6S6dBnFZbWUzmZcs1lOWdJ4moVQbcn4iFnwt4zQrJVhOZdyVdPJK8Is11+9FQ83TRxb4mwQTgtEsY9uTYveRgxeeqX81rkWqRnXM7tlvooSJ6ssNyXeCZFjDVLHlTY90p+BiV/2yF9Xs2gq6MWEYUHTghOynnLkr5WOc+XrZYNOi9z0n5GTPvwCzbp+d0JwnIiEu+UHFvMWJ+9RvxrvWleh6VM75o4uyUNV/hYIIRcMAblN6eeR2Tob/4kq4sQQhqPNumAL5pjGA64Kx39mODzi/vk5Px78rlZOuwrkfM2KhCWt0PJU4nPqyE+xFHTuxb5XkXRiHxWj6l/a9sqaV5yadgMx4etLkma2s15c+6cbjnW7cKzb2KvyLGxSDndlzw1G9FVCMeH4t4TIecF5EAoP6fTCzsF4u2qScuOpMeX7bgrWw3vujuvPyKUg7SFNSeeVyKC3ce7IEKyuYL6nE7I66TJKyGEXHRm5be1numQ5/8wq4sQQhoLFTd+2ODNyPEkEaWiQIWoesImE37UsgpLHRaKH5db8nk2cn6/ETR5Z/3h+HDLrch5eRFpexnyOmoElc5PPZAyGwrxOZIxYXlH8teUUi8DLt1ePKlHL5afmLi3DBnxPB3iQ6R6Q8mr6cPvMd9ZYXkpoX1YEakexKkEYdkVjnvBQ0K42xnqsz9DXgkh5KKD36zVUP+jMvAScEd+KwghhDQIOsx0y4jDTXmgq2hJE0cxYTkWjnqyLGMVCMs986PSEkpeu4GEMNNM81HIYDmThskUITttRNK6uV6Hkl4qU3a2zJLKaSwiupSeDHnZLFP/M+HovMNtIyKtiE6zFScs+xPisR7DG+Ho8Gufx/6QbfXfLPWpcUyHo/M1d+RlQCcfA4SQh4h2edbXu2jD78Aun9GEENIYqNcInp35iG2F0pDPkwjLsch54+Hk2430S3rwg2gXjVHxMyvnxKzLCJG1lPP6XRouR9KX5JGF0Lsi4kiH706klN16OO6F9OU0kiIsu+WchZS89GZoB80igiH2NozARBkPh5LnMCmO7gzCst3UUZOIusVIG1pJEO9JHGSoTzuUFnHDU3vd5HUvcJsTQsjDRSPMtwzyG7QVjs/RJ4QQUmfooipJ4uNyODr8NKuw7AnJw1avn0JYhlAaSnvHCbrY3D7QIuJM33jq3MXYXE0IjgGXhpuR86aNgFLx5fPT5dIZK7vFcNSjFiunnhRh2RyOL7Sj5ETYp2090i2i3KMLL42afNxMKNtRU7bl9kC9Ix0EXYV4KEVYtqa0gTE5r6OC+uxOeNFxNeUlCCGEXGQaYb4lwMiV9cBtoAghpG6BKIGnZiPlnNZQ8hDmKhCWQcLdCUc9QW2hNMT2pMIS6VgLx1et3ZD8dKaIJCsK/fySHifSNA27Lq1tJq+6v2VM1Kow0sVj1GNn9+gajhwLkoddVzcxYRlCaaVdL+Z0RdW0OYo3Q3zhm2kn/CDe9iNlqyJ/MqOw1JVx75s2FVLyeC9S/jlzvDmlPrtdfV5PeImi5aSLRHAfS0LIw0KjzLfU35vVEF/DgBBCSI3RDnW5oYbqVRupUFj2Scd+S360roTSAjSnEZYqvPYlfJ0j0ivCcld+gCaM6FoxIkb3ydItKyZEfO3K9X4V1wMRwzMmDzhm97y8HUpDUielTHU1Wyuy9iWOJZNuLd/bobRC6U44Or80TVjmRaTti0CdEMF4IGlI226kU+LZkTIbFwF2EI6uAmvLdk7i0HTblXrLCUtdvTfJm+3z2GPSNyPlo/t4jpswVzPWp+YB349JXvclr7qAUn/gPpaEkIeHdvld7m+AtM7L72eO1UYIIfXFtVAaTpjGgJwHIdAm/8eGDS6IBScMbkuHfkviHJIw0ua0aTxpb1F1OOS8+ZHpCqWtKFQczYTjq67qfpT3Q8kjuxCOeuRUWN6SvOt+k7fDca9Xs5yjnj0IoeVwfGEEDAFdEwGlC/vkRDCtybXbEmdnpHznUjoGN0JpcaMNOTeL161bBNm2XLsu17ZEROitSNk2R+okbfjttJzTmdCG5iLxLhqxDRE5fIL6tO1jy+W11ZUH97EkhDxMDMhzsd7nmufkt5Uv/gghhDQUKiz5A0YIIeSig5dzd0L9ewPxIhEvGGdZZeQcwPxevLS/CItHwdnTUsP44chS58+uPG8G2cQIhSUhhBByschJR2+mAdIKz+p6KL8dFSGnRaf45Bs8HxhRdlDDfEBU6tSmq3Lv3g9H10AhhMKSEEIIuSBAsGFI7EADpFXnhg6z2giFZd3nQ9flsFOlMPpA114h5MLTIjfiJRYFIYSQh4RGmW8JdAX1PlYbOWdBlhfzQ8cx3BTeQbzwSBs+2y7njIXjK/NrHzRv4hqV81tTwhuR8PpcunAvz0k++iLp0r3McW1/JE+6Wn6T3HNjkfLokDSOhvjaLXDSLCQcLwTuU0sIIYQQcmE7040w3zJIRxnD67pYbeSchKWKtDknNO/JcTUMPb3qwsvJdQfu3Lvh6MscjXdSzt2Tz1jkccKFOefC0kUcdQHDFfed3cFhSsK032+Foy9r+kNpRwhNt24ZB1G6HIn/Vii/NRCuvy/3LyGEEEIIuYA00nxLMCyd4Q5WHTljYRkTlU0ikCD+RuQzPIs3wvF9tq+G0v7lrXKvDcu1a+H49m0QfToHEcJTt1fTRW+GQmkLt2ZzbF/O1etiHsvxUNruTkWojliw+8L3G6F8Xa7T3SGW5fhlib/ZiOG0LQOHRfAeBM6xJIQQQgi50DTSfMsgndn10BhDeEljCsuYqLQCbSJyPcTdrojNZif4LLq3/LCL17/caZUwbrvz/DZvYyL2kgRykPt7Oxz3LPaEktfRCsvb7rxeOX49kh8d4hp72WM9qCuh8eevEkIIIYSQMvRJx7NR5j/BG3Q3lB+CR0ilwnLR/PXcku96Q2nupdp1+Q5izXoX/Xn9Tm/DVBcAAF7sSURBVKRpvF0Jwmxf/h8MpaGp1yScpjICGXSGkuc0xnooLaqjafP7e18JpVVdfX4mQ/KKrx1yziVJ9x7F5cUBjXAzHB1vnUS7OXfqHOI7C4ZN/OfxQzl1zvFlpdekiyvqEUIISQKdR3hYcg2S3pvhgWclx6ojVRSWOvcQQze7I0KvUMYgzsYynDfv4o21Y/UGqncenskdEwaE2oJLpxeWKhaThrtrnuy5Y5F7rVx+yg2nHywjcEmDMW8qvxz5ChpKNeI7C+xNnT/nB1K+juq936RrjLcBIYSQFCDUZhskreiIw6t0i9VGqtiPg3iDlw+ewjUn+O6Yfl6SNZk+6OWU81pdvC2RNC1FRGdO+nZXJX0qMNsShKUOY72akO+7cn2asFRv7EBKfmz6WxLuVx0SSygsG05YdkvaZxIa+FkIuPOMj8KSEEJItUFnF96awQZJL4bCwst6jVVHqiQsVZBNR/rBOveyN3I9RNeotEmduxibkwgBiHmWXS7e2FY6uBfX5f++EJ/bqdePJuQDfVJ4X29Hrm0SUXmvjLCcCMnDXZFXzD1tN3EtR85rlzCW2NQuprDUfXAuRYRQOWHZJDcQGt5QiM9xSIvvtHMimkJpqMFIiO8L1ByS9x5qkh9N3CCd5sc0H44uBtAejr6F0b17YnluicTn05Az5Za2XHpXOLrHUFpeTiosW0P8LZPmc0SsI9LpSCtX/Y7zXgghpDFptPmWLdL5nmLVkSoKS/Rx1sLRIbEqGP0WPW1yz+jiPWAjHF1xVdF5mpdcvD5MXShoWj7fSBC1KoCHXHi2b6yez353ra5cO1VGWCJ/+5KnNtfX1jLSclsJ8WHEmm9Oy7qAwlKXB7ZjtMczCsspuXHsuOp90/Bj8V118e2eomENR+L3+/iEkDwUtk/eANlrb5oGb130myb9fny536g5NhTWpmHYhKfm54Y0yzF7zn3z0DjJMNuYsOwJpX2SbOeh2dWb2qIRipPm+KVI29DvuIk1IYQ0Lo0231I79iOsOlIlYan9pYNwdEjsNTkPLzNm5fO2nGf7Rb3S19oTUTgTSluILEbi1aG3+Lwg4a0aoZqXvqcNz56n6RsJpXmiy+ZanTd6U669bfq9uTLCUoXuQSgtHnRV+qgFpwG6JY1I63WJ6244Oq+UXDBhqRu0LobSZqkHRgwkCcsrToyuOJE3mxLfnUh8PSf44dg36b8qjXbP3ORpwrLNpFdd9bovz36KsNyT7xfNQ0FFX1ZhuS/hQcDaiddW0C+7sBckvQdVFJYd8lBQUdkZeZuk5bBs4taHU6spKz+vZdWIfEIIIY1NI823DPJ7ht/XAVYdOQGXpO/a6o5PynH7wnxI7g9dHHExxF+od4gIXJfzViW8XKT/OCLxbIaSU6EpEt5NE96anGdHieXkvl1xorFVBOF9uRb96CmXls5IXi19ktcNCeN2KHlKLXlJp563Ejgd60ILy1n3RsWLh5iwtILC7h/VbN5E2OWS5xPEqRU6lU64Hwlxj9iQ3Cyj5iaMCcurIT4sdMwJKi8svQi24rI5o7C0Qxx6w1FvaZByi53b5oToaYTlFak7Fcvd7mGi580llI0Ov1g0Ylnz327Om+btRgghDU+jzbfUju92qPzFNSG1QvuP/SwK0qjC0m8qrMJwN0VYWoExEnmQe1ExHxFfirrOK92KpDsc97rOyo+ef6sTE5a3TT798J6dFGG55s6djYRdTliOuzD8ctMT5ph/+3O9SsJyLxwdDm2xw1it4Mw5YRpCacloO5F7KtTnqriEEEJOTo/8PjbSc31YBHEnq49QWBJy9sKy3HcxYWmFk1/QpSkilNLiWwknXzH2Rojvn7PjxFtMWK6nCNqVFGG5kvAQqERYDpYRlvb6zgzxnURY+rmbuYQ4yu25lAuleaq6utdq4DLShBByEcGLw3uhsfaLHA9HR1cRQmFJyBkJS+9B1NWiDlKEpZ1f6YeYtJnvrkfiyyWIuN0T5gVDMq8Zoeg3pk0SlqtGhHo2zlhY9pcRlpfNMT8/ZLaKwtIK84mEPM2F0tYp1i5F0rQfjg6jHeetRgghF46lcHSaRCMwLYKYq5STeqZf+lh5FgVpVGFpV2S13qf7KcJyIBxd5dVih3GORuKzQqkplBbQuVthHiBgMUzUDuNscaJsJkVYWlFl52j2hPQ5luchLAci4lzLa6NKwnLWCfudUNpqZDTEvast0l66wvEJ3nZIsp9zSQgh5OLQIr+Jlxos3dfD0QVMCCGEVFlYbotYgDhYCMfnR8aEZc4InAM5t19Ens7d2wyluY4+vkERJ0vmeKV7Tl1xac2lCNuYsLSL5uxIeNPh6OI4tRKWOScgb0jZ2oWRqrXdSHdExLYYwb8ubQMi0W6z4ud+rrq0LfA2I4SQC0sjzrfEb+tiOLq9AyGEkCoJS/wo+H0c1VvZnCIs7Y9K0hzHnkh866G0vYW1k+yP1RQRM0lhJu1jOZtw3XaNhSXoC0cX2NEFd25XWVja+jkIpTmdY+Ho1iblROO4O2eQtxkhhFxoGnG+pfYd5lh9hBBSHa6IQIJAwJBS3V9mXR62dghjWyjtf+P3ncF3GAq7Fkp77eD69oT45kJpP5vNUNpQ9qRDJpsk7LuhtH/QXTlmV4YdNHnwk/eHRFjBe3pZrouJyAWTB8tYJOzYMZuGbheGHr/ijmNhJMwfXZa/eSda2yssr24T12BCHc84cbvoynYyoRPRHI56pTnUiBBCLj6NON+yWfofl1l9hBBCqsElEbUYOutXXlWPZa2Gy7TKD/XlkLzdyEGdlafdZoZvggkh5OGgUedbtkm6R1mFhBByMYHA689oTaeMyy5QsyGfsSfnrVD7VU3xNnXfCMjL8qMNj6YOj70t52Ytr+4zSuukmB1S3cWmTAghDw34fcEUmI4G7HPgt4tTNwgh5AJiF/kpZ/lTxoWhmisp4dd65bjJlLTtGKGYtbzOak9JP2f2OpsxIYQ8dGD0D6bFNDVYuvvkd6yXVUgIIRcLeOVmMlpLFeKDcBwWMYQ5lBj6ihVYR0J9zBGEeLwqgntZ/kJwtppzspbX2BmlcUbKDfNmOaSIEEIeXhbkN7TRwJQTeC47TxGGXeuBRqNV17iSMyGEEELIQwSmcWBqyUgDpn1MOrBtJ7m49eVt+2t/s1eg0WjVt8eam3f4eCWEEEIIebho1PmWAOsY2O3WKCxpNApLQgghhBBSIxp1viXAquYV761NYUmjUVgSQgghhJDq06jzLSEoF8Uyi0sKSxqNwpIQQgghhFQfDCddD425qJuuGp95T2YKSxqNwpIQQgghhJwN2NN4OzTm3sYQxhjOe4XCkkajsCSEEEIIIbUFq62eaEGcOgArxGKV23EKSxqNwpIQQgghhNSWebFGBHtbYo/LSxSWNBqFJSGEEEIIqR3wVsJrOdag6e8ND4b09lFY0mgUloQQQgghpHY08nxLMCjp76SwpNEoLAkhhBBCSO1o5PmWYCQ8GBbbRmFJo1FYEkIIIYSQ2tHI8y3BVHiwjUozhSWNRmFJCCGEEEJqQ6PPtwTY33LViksKSxqNwpI8PHQU7cojjzUvPP7EE/+p2EC3Hnn0sf/3JU2PbL/kJY/8b8XvrocHK741sagIIYSQM/9NRkexu4HzsFC0xaLlKCxpNApLcvFpeeSxxz7S8rInv9Ty5FP/8N5/++8OPvTLHy/c/K0/LCytfKHwR8/9VeEPPveXhV/7nU8X3v/B2X984ze9defFL2nae8kjj/zPITJ/ghBCCCFVA/MVsUdko863hKC8U7QbFJY0GoUlubjknnjyyf/+kUcf+4fvG/lv/xnCMWvD/eMvfLHw3h/98a/g2sdbWj6obyIJIYQQUnUgyhYaOP0QxfeKNkNhSaNRWJKLR+dLH2/5f76lr/8f4ZU8aQP+vc/8eeEtb3v7V9pe+eq/CPReEkIIIWcBpp+sFW2igfOAPsJ680sf/woFAI1GYUkuCC957LH/ptjw/vHnP/bJqjTiz//1buHHf/JnCy97qnXvVf/idd/EEiaEEEKqzkWYb9nxghe88Gu/+PHfoAig0SgsSaPz5JNPjj/R8rJ/qmTYa1b78Ec/UXhZ69P/+IY3v/mtLGlCCCGk6jT6fMvQ8rKn/umpp58pnEU/hEajsKSwJOdEe3v79zz51Mv/6TRDX8sZ3kJCXD7x8pe/liVOCCGEVJ2Gnm+JOZZYHLDlyacKZ9kfodEoLAk5Q135RMvL/v5XF5bPvFFPz3608PK2V/7XRn6jSgghhNQpDT3fUhfvwYtoeC6x8jwFAY1GYUkah1zry5/54vs/+OFza9jf/96xwuu/8U0rLHpCCCGk6uTDg/mWPY0qLGEf+LmPFPKvfV1h5c++RFFAo1FYkkag/TX5933zt377V8+zYX92/cuFtle9+qv5fMcl1gAhhBBSdfD7ulm0lkYVlrAfnpjC6vKH/QYKAxqNwpLUN7mXPdW6i/kM59245z7xKQyJ/S+sAkIIIeRMmCvaUiMLS9i7fnC08OzgOw9Xmac4oNEoLEmdUgtvpbU3dHV/Nf8v/+V7WROEEEJI1ckV7V7RphpZWEJQfufAdx9Oo6E4oNEoLEmd8uqvf+3Wx24t1ayBY6/Mf/Ha1/0frAlCCCHkTMiHBppvGROWOoUGQ2J/7H0/RYFAo1FYknp8fjc98uhXazm05LmNnQLSgLSwOgghhJAzoWHmWyYJS9gff+GLh4v5YFEfigQajcKS1BGdb3rL+9/xvd9X80aO4S1db/7mf8caIYQQQs6MhphvmSYsYX/wub8sPPOKVx1uR0KhQKNRWJI64XVv6Pr8h3754zVv5O/7mQ8Vvumbv/V/ZY0QQgghZ0ZDzLcsJyxhi3eeK7z08ScKv/Y7n6ZYoNEoLEk98NrXv3G7Hh7Kv/LJ3y684U3dm6wRQggh5ExpL9p20foaWVjC0H+BuITIpGCg0SgsSa0f3k8/8w8YUlLrRo4fhbZXtu+xRgghhJAzZ7BoW6FO1zbIKixhGA6LYbH10Jeh0SgsyUPNi1704q9i8ZxaN3L8IDz9zCv+kTVCCCGEnAuzRbvd6MIS9tNXf+lwQR8s7EPhQKNRWJIa8YIXvvBr9dDI/+i5vyo0v/RxrAy7WcZWi7aSYneKNl/GrhdtpoxNFm2sjA0UrT/FMMwoX8a4Ei4hhJBakJPf1CuNLixh2IIEW5FgSxKKBxqNwpLUSFjWcqsR67FsfeYV/5RBiPWWEXP9GQThRAZhOZdBoN4uI3JXMghlzHMplLEptlRCCCFnQF3OtzyJsIR9/3vHCt/W/12FeujX0GgUluShA8NP62FeAibgv+FN3X/XwEV5FuIvL+KTEEIIOSvqbr7lSYUlBOWzg+8sDP3AeyggaDQKS3LefMPrOv/u13/3T+piVdhv/KZv/psGLsqzEIBdRVtnKyWEEHLG1NV8y5MKSxiGwmJI7A9PTFFE0GgUluQ8efNbvuVL9bDBMCbef8u393+ewvII/eHBcFpCCCHkLMnJ7810owtL2MqffelwMZ8P/NxHKCRoNApLcl58z/e955OYk1DrRj74rncX3vG9l36ZwvIIGJ50h62UEELIOdAWHgyJHWh0YQnDCrFPPf1MoR5entNoFJbk4RCW7xp+C/Z/qnUjf6r15V/79meffT2F5RGw0NA8WykhhJBzYkDEZVujC0vY0soXCi1PPlW4+Vt/+PwcTAoLGoUlhSU5y1eUr2zfX7zzXM0aOB78r3jVq/++wYvxLITlOIUlIYSQcwYro2O0TK7RhaUuDgjP5Xt+ZKJQDLdQD3t302gUluTC8h3PDq6Mjk/WrIFj76me3u/8NIVl9Md9hi2UEELIOZITYVmz359qCkusfP+a/GsLudyLCo8++hjnXdIoLCksyVnyvd8/8rrmlz7+NcxHOO/GjTeHL3uq9Z9f0/HGb6SwpLAkhBBSF9R0vmW1hCVWvX/0sccKTY88+vze0MU+B72WNApLQs6Sb+t/x91aeC2nZz9aeE2+4wsXoAjPQlhiGOw4WychhJAaULP5ltX2WP7oT7y/8NLHnyi8pOmRotBspteSRmFJyFnyr7/ne177WPNLv/p7n/nzc/VWvqL967/S1Nz8rygsE4XlGFsnIYSQGlGT+ZbVFJZqWLjnF/6X/1h43Ru6Ck89/XJ6LWkUloScJW//znfMve6Nb/raeT1sR374x/75mVe+avmCFN9ZCEv8mA+yZRJCCKkRNZlveRbC0nsxYRQZNApLQs6QN7zpLX9+HkNisfT3o4899nfFKFsoLBPBZtX9bJWEEEJqSGt4MCT23F50nrWwpNEoLAk5H1qebH36789y/gG2F3ms+fH/rxjX0AUqt7MQlutF62KTJIQQUmP6irZdtHYKSxqNwpKQzDzx8pe/tvXptr9738986ExE5aPNzXvFaKYuWLFtnlGYebZIQgghdcCVoq2Gc5hvSWFJo1FYkotFW8tTrf95ZOy/+yomvFejIS9++j/9c9Ojj/3XCygqwxnlCQK8hU2REEJInXC7aLMUljQahSUhldL8sqdaV17Z/vX/gDmRp1mJ7d/+xOX/8wUvfCHmVHL7jOwUWASEEELqiHOZb0lhSaNRWJKLy6VHH2v+z88OvvPvfu13Pl2RoPzwRz/x148/0fLXxTCw+mueRVkR11kEhBBC6owzn29JYUmjUViSi01z0a68+MUv+b+eaHnZfxkZm/ib/+k//NYO5kx+dv3Lhw31M3/xt4Xf+L0//b//x1/5D//7t3x7/13xUGJl0xEWHyGEEHJhONP5lhSWNBqFJXl46C7atfBgrsX6133d1/198e9B0XaLthYeeCenwzmtHkcIIYSQc+fM5ltSWNJoFJaEEEIIIeThAIvLYfXySxSWtPMyTLP6g8/95aFVa3FJCktCCCGEEEJqS0/R0EnN14uwfNcPjhZe2f6aY8dX/uxLhZ63f0fURscnj5yLKT449swrXoVF9A7//tj7fuqIkMH3SeGp2TB//mOfLLzxzW8tvDCXKzz62GOFb+v/rsKnbn/2WDpxDN/hnBe/pKnwlre9vfCrC8vRvGLP8Vfnv+EwjU89/Uzhhyemnp+eVKnh2li5VdtOEw/KRusE9qFf/vixc77/vWPHyv753QnuPFd4dvCdhZc+/sTz9Yp69GWGz0inxoX0ov5PWrYUloQQQgghhJQH223dC1Wcb3lSYfnhj37iedHhv/vYraXnBRiEgrXBd737+fOe29gpvOmtbzsUgO/5kYlD8fKO7/2+w2uHfuA9z5+Ha3w4MISv8ei5CAPHXv+Nby789NVfKrz/gx8+/B7iEWJHz/vN3//Tw2OwH/2J9xdmfuFjhyIT1+J/mx8IHRxH2hD+u39o/PDzdw5894kFeazcqm2niQeiENf++E/+7GGe/+i5vzomtPF9TFhClKJcW558qjD5gX9/eD3qE+ejvlHveu639j17eBwiVcsW7SFJsFJYEkIIIYQQUh2WijZXS2GJoZHq5YsJF4g5HLdCLmYQLTgPItUeV3GJRQvLiR+kwXoj4fmCkLQeL4hIL1bVo+lX4EfcCPOPv/DF5/OK8xBXLO2/8snfvpDCEsIcZemPY/FIDTdJWEKgowx9/WmZqXBH2eEzhL09D59xHC8oKCwJIYQQQgg5G6o637JSYYkhqvA6wdQL5c+BOIPwLBcWhAvCiQ2jfN/PfKjwe5/588Rr4Y1E3BCx9jiOQdjE4oKYxP8QjSHB4wihie8QPj4jHfjs9xiHcIXgtB5YLR+EAe8bbO4Tnzo2rNMKPohinPeLH/+NQ9GWVOYYoovzIMK999CfA8GGz0nCEkOVEV8sfUgD8gpxDsP/KtwhFHEM+YYXF17jmLCEqIwdR30G8U7iM+JGXfl6Rpw4D2VvX2bguAp+CktCCCGEEEJOT9XmW1YqLDG0EaIRYiBJuHS8/o2H8xUhUiAeIIa8eIBQwLUIT4UajiWJK2sQF0gDhKJfVAbzIL3HEkIVcakI/PXf/ZPnh3nGvLFW/Kj3NDbnD/lEfPoZ4eo8TGuYZ2jnbmq5YW6hPw+Cz8YRCxPCToWvlgc8jPYcpE2H9trw4C1E2dlzMWRVPa8q6qypSMR38Nyq0EwSllqfSaLdeyi96dBj67FUb2dsrieFJSGEEEIIISenKvMtKxGWEDlW1MSEJQRFkEVYdKisGjycKjhUwEDoQGDZcyHm0gSmxuu9iDq8UkXn9OxHDz2a8FbCVNzC44frEU4sj/gOc//wGcIJwiuWDnyn+YfARZ5xLsLQYxCKKDP1ltr0QwDrUFyI3/xrX3dYDppOlAHCU8+hikiNV+PReaoQXYgT5+gcSVs/OvcVol/jgBcS1+vQVcx/hLjWMsP/SV7CNGEZM51PGVtICXlF+nSOJdqA/R55Q1yNNjyWwpIQQgghhDQCp55vmVVYQhDCc2aHj8aEpQoziCGIAQgTzHHEdVbM6SI78KxBwGAxGHg3NUyIn9gWFzrnMUnQYJinDtENxsMHgWnDUxHn5wGqh1LD14WCYnFpnjRd+OwX/oEhL0iDLzfvndS5oLpyri6Q48+DMEZdQOBreXsvIOoLotTWD9IB0e1Fux+iWi7fJxGWyBPigHCMfY9yC8aD2mieSQpLQgghhBDSyDQXbaNoI2ctLCE60OG38/tiwhLeLQghP/QVok6Ha+I7FZYY/unnDGKFWHwHoRnbQiMkLJoDMQXBCPGENCBOiCgdRmkX74HnC2IP8UP04Bx47nA9RLEKJgi4LMIylhZ45iCYdCsNW26IOyacUcYoJ7syq11FNWmhpNg2KRj6q/EiDPwPzym8n96QZ+S92sISeVRRCdGetB8mXgiot1TzrcOkKSwJIYQQQgg5e7rDg/mWHWclLCHwgiyUg46/mnr38H9sQZnY/Eycj30mNUy/+I2dE+n3vNRFeGAxgaJeLzv/0Itgu1ItxKWKXYhRnAOBA0+milAIJ4jPpKGw1hOJsJEf9RRquLqXY7n9P72gSxuG6+ceog6S5irauaNpZtNUDWEJca3iG+WZJCpjYhRho+yyXkNhSQghhBBCyOmZKNpa0ZrOQlja7SWyiBKIgZgg0JVcISzhzQsJ8xz9Ajp+mG1McMJ0f8nYarLqIS03xFKHhaq3TIfVxhajgYdPvXy6BQtEJIalwmOqItbOxdTyTBKMKEcN01+XJiwxjDZp+KmdV4qXAfblgDX7cuC0whKeawxzDgmLJNnzYse13GOCmcKSEEIIIYSQs2OhaDfOQlja7TOsYc6eijWdB6hixu8PqcNpg+xPCeEJIRbbbkQX9vFbiejWH0kLuKjIii3qo6JWh9ciLB++HVqq6dfr/FBTeDatMNa0QTTHVo/1whKfvajSMNWLq0OC/TxQlB2G6OJ7lDvOwUJFaYsLwSB67Sq2Pt926PFphCWGH0Mcw5vr9yj1+5Aifch3kjc4JugpLAkhhBBCCDk7TjTfstLtRpL2Y7SrsgbZI9J6LSHU/KI7KkKtGMM1Kiq8t0qFSNKKseoFxeqjNm4IFwgqCCsVKggLQ16tlw5CD55Eu4Irjum+jDZM9Y6qiLXDfG2a8Dm41Vm13PxCNiq8VTjr9hx2bqgd8ovFfTAvEfMjIfKsULXbhvjy9oJaw7Oe4NMIS53bmSYqYfge50Eg2+Mqlu3KsKfdx1Lnkvr2gmN2DitEfDX3y6SwJIQQQgghjUjF8y2rLSztMEbMX8Q8P5wHcQYBZMUiBCK8eRCRECM4V717sXmS+A5hZBkaCiGJhX4glnAN4rAeOYgKeEzxHc6DuIGohNl5mDB4A21+dH9IO4wX1yAOXI+hsEgHxKtufRKMhxLXQeQibnh9ca56f/0wX12sCGEgbogtxIPzVRBBzOOY5kUXB8JnWz8Q1Vq+yIOmEZ/hYbSC/aTCEsIslBk2reUGoa7zdLUcNH9IjxV3p93H0ots69G1bVLbdLVWpaWwJIQQQgghjUpF8y1PIyzh6YrNkdQ5jfAcQnxAkEHwxIY8QuzA2wfhhHMheGIrvqrI8ttqxAzXQzBBXEKgQOjG9k7UxXZwHgQXPIhJixBBlGp+MHwXwtfPI8UcUAgjhIc8Q6xiziY8jygn3XdS9+7Edxo/RE6SmIHXEx5gxI1yQnn5lWIRtg0LXr9Y/eA6DNvFOZoXhOeHnCJ9sLRyxvcIyw+bRpxpZrdkQRmiLCEsbf68V1q3ookNsc76EsSXBdKuCzb5Nn3SeCgsCSGEEELIReJW0W6etbCk0WgUloQQQggh5OKC+Zb3izZGYUmjUVgSQgghhBByUrqKti1/KSxpNApLQgghhBBCTgQ8lvBcNlNY0mgUloQQQgghhJyUeTEKSxqNwpIQQgghhJATkTrfksKSRqOwJIQQQgghJAuJ8y0pLGk0CktCCCGEEEKyEp1vSWFJo1FYEkIIIYQQUgnH5ltSWNJoFJaEEEIIIYRUQlPR1oo2QWFJo1FYEkIIIYQQclI6iobObjeFJY1GYUkIIYQQQshJGSnaRtGaKSxpNApLQgghhBBCTsqNoi1QWNJoFJaEEEIIIYSclMP5ls0vffwrFAA0GoUlIYQQQgghJ6Xj617wgq996vZnKQJoNApLQgghhBBCTsZLH3/iK6/Of0Phs+tfphCg0SgsCSGEEEIIqRzMsXz3D40XBt/1bgoBGo3CkhBCCCGEkJMJy8//9W7hjW9+a+EDP/cRigEajcKSEEIIIYSQyoUlOsB/8Lm/LLQ8+VThN3//TykIaDQKS0IIIYQQQioXlrC5T3yq8Mr21xQ+8xd/S1FAo1FYEkIIIYQQUrmwhI2OTxaeHXwnRQGNRmFJCCGEEELIyYQl51vSaBSWhBBCCCGEnEpYcr4ljUZhSQghhBBCyKmFJedb0mgUloQQQgghhJxaWHK+JY1GYUkqo6lo+aK1pJzTJue0sbhIlWhvsPbULPdA00P4bGjOcG41nw9ZnkmNysPYji4KLVJ3ORbFwyUsMd/yLW97e+F9P/MhigQajcKSlKG/aIWizSR8P1y0g6JtF623gfLVJ3lrNNBpufIQtLvNoq00UHrH5D5pxDY1fkLRp8+GsQznFqpYn+WeSY1MvbcjCN5p/ixGmZG6y7MoHi5hCfuj5/6q8NTTzxR+7Xc+TaFAo1FYkhN24qyo7GygPPVW0CGuN25I2i86q0VbaKD0DosY7m2wcr5yis4wheXDJywflucPhSWpSFjCPnZrqfDMK15VWPmzL1Es0GgUlqTCTlyjispKO8T1xjw7dqROOsMUlg+fsOTzh8KSwjLFfvQn3l/4tv7volig0SgsSQWdOBWVmymiEkM2L8l1V4s2GpLnDXWFB54TnHc58sPcJh0u/O0OD4ZizUo6YvNZeiW8WQmvy3zXLceRp5sSrp0jlpdrZiWM7oTOH473SJrHXd4GTL7xXda5YE1STlfFxty1yO+q6cwPuuttvBOReDtMmONyXp/L+5TkXcNoS0jnmCmjDhO2P1/L86qc25WxLIYj+dN2ck3+VuId7DB5m0mol2Epjyb5fraCNPv82zbbYdrjiGmz2pavRvI6KJaTa2xZB5NGvWfaIvdLd5lyxd8laU+X5Tt7/w5I+q5JmQ2kCEubl6EKhGWfuZ8nMt4rScJyUNLS4fIxKOfOyvf2Xu2MXPN8X1K+6yojBIcSvhuSurNpGZK0XJN8D5QRlm0p6YvdI9V4/sxmfP70VRhvt1yXk7rO8vxpTUjneMLzpzlyX9rnT2eGMojlTeuhJ/J8w/F2Jyx7TbseSfid0riuyrWxutTnQJuc4+/1cr+d9r5oDuTMhSXnW9JoFJaksk6cisr1kDwvC8fX5LotObcgf30HSUXeXtHuy9995wXpN0LwQMK+bzqr9gfzmhzfMeEVQmlekP74W9Mf40kJf19Es157w3UMcGzBfK9hIB3LJv51CW8n0lGJdWK1nDbM/ztGIGy6dGtHHfHeMefrebvO86Gd1nkTxh35bkrSuifXb5swelzdrpu63ZHrliKeFlue9yWsA+kElcPPsZwy6dGwClLf5Zg1125KmguSx7yL87bkb1/yV5A0j1boadI2Oydh7Zj2Mm/ysyXh2zYaJO935e+BScuedBS1bdu8tGXw6NlyXXHtaTNy/27L8X35vBi5L1cljRsmnStOwHlh2WTajN6rB1JHfRU+k2z7WDT3apsRQtum3doXYl3m2eLRYcI9KWm5LWXjO+0tclzLq908s7Zcmc5naEdjGe4R/xzQMt3OUKZtCc+fbfPM9s+fefPsWok8f/xzb8bcsxrGsnw3nfL86XJCccM9f/ZNW8q7NmGfP3vyeapMWSDce+7YZMLLkaty3ArLBZO+PXOP5NwLjS3zu7iV8Humz4H7pszGTNxaZusmf5MujSv0pJ6fsOR8SxqNwpJk78QNm05wWkflrpw35LyIu+4He8J06ppMh+y2XN/t0nDgPADjrlPYEekkNpv0tKZ01gZNB6DNdH5vyvErrpOsgrNbPLMh4dwO+dHfKeM5UPHT68r+wHXmY0PRFk28OdNx2ZDORt51Wnclzb2S/i7TybNiYFSO33KdFF+3l02Z9BvvhQrXlkh5DlUgLFuMeM2ZsFTEp3kh+kwechEhcs3Fqe1HO3c9RmSeRFgemPbRZITOlhEs7aYT6juDtj0OmTBvmLqadu0uq7AMIT58T+vokhNKKlq6ytyX05Gy9Z3ya5E22y73ym6CpypJWMZEZZD2ceDu824jMvXcexKnH1FxXyyNEYl73B0fd+18UdLS78r0niv/0whLrbdJJ2BU8Kc9f7Q+uiPPn1sZnz+TzpOmbbrNtbVtKZdeuQe6E54/Y5Hn+V25HwcjLwBsOQ6aMO3zZyHD8+e6nNPm2lJB4rZpXJN71OevJ/ICZcgc0xdcva4tHYSjc8vtc6BHfoObTbtbNM+qFlMXfa4cZ8LFXEW5LoUl51vSaBSWpHwn7q7xShSkQxQb3tMj38+meAB0+Jd2eHw47e6NeH9E4Ch3jMegPyHuTvlRb0nprN2R/HkvbM50AmwneSvioTgwb+AtQ5GOl2c+4a3ykOt8+I6dltVSJMwBVx5jCYKjV4RKTKDtmM6rCtDrkfOWXYd4WerFd2aapBO/WoGwzEc8O3r8Ukhf0XRAOqb5SL36MDclbbmEjnPzCYTlrQTPx+WE+m93HUo/DHNbOutNztsUu19OKiyvJNy/405w9qd4++65svQe9v2ENjCYUD5JwnLKeIZz7oVOrM3YF1pDrk4uOQFaLh22Pd9xx1fds20mxFdznnLt5qTCss28fMkqfi0Lcr2/Ty45j61//nQYL13Sc++qa2vTkZc/Sc8f2266U35bVl07vpPy/NmL1FfsuTlqnhV75sVKv3v2+tEwE5Hnq70fx1Lq42bCc6Ajcn/tRF6GtEReRpIaCEvYj73vpw7nW2J4LMUDjUZhSY52bPStabPpXMxFzr9sxMeYs1njpWg1AnUsYjqU1KYhNhxx2nj6mkJpSNGa/JD3RYRCrLPmvamxN9gdprOzmNAhXork5XJKJ1cZNm/El6Rzkk8RoP66iYRw903HbCzD2/pWKcsxybe9fiLlet9B3pW6iNWtDjXNKixz4ehw6lmJp9L94tqkPYyH0uqWXliuViD6swjL6YTzBsrEsRLii6RsJrTTagpLK747pG1flpdL9r7pjwgy7wHrigiEvpR7ZSLh/ordv9omdiOi33q7fBwzTqC0mPtOmXMvmnolXmst7vnQ7l6EzCYIUX3Rddl4LE8rLC+Zckt6/txMKVMdnbAnYYyb/KQ9f9JEq768uePa2kCZe1SfPzdcu9FnzGDK707ePPc2E54/OpIj7WXBXuR+Ug/ujHsedrj8xeZh2vvxhvkd9GnT8h02z4HdSPq0/cfytx3Kj7Ag5yAsISi/te/Zwo//5M8efv7s+pcpImg0CksKS+OxbDYeh82EH/nYHEZvM+bHNs02Iz/s5Tr0HdIx2jfh7DhvQayzlrZq5YyLIyYSxzLkp9yqmCOhNLetYIR3msdyLKSvzLlpyjFtxcmJcHQOlQrKPZPumQrqoZDBsgpL9Q7cDEfnte6KgCknMK+YFw6atzsJwnKlysJyrMx5JxGWK2csLNvEA2XvoW0jgsYy3Jexe2bFiZiT3iv+ZVfMi57lOTTvPHbq4cpJfpddufnrNW894ehQZH3Z1enExZIr080qCsuxCvOb1Ib982c14/OnP8N9nPb8mIzcoyuuLWR5/uSr9PxZkDYQxOOq/981L5+WRaSWe0njheV8hrSNmefAZkJ4p8kfOQdhCfvjL3zxcEjsB37uI4XWp58pfI7ikkajsKSwTBw+qQvTtEc8iANlwm3P8BY9ZPCMaHx9kbe6g+I12HYez1hnLWlonnovbEcx1kkr5zmshHZJm87Lsh6ZSjwGwQnDpE7gqHl5MCJeJhVr2xGP5aUMHsuDUH64ayXC0tZrv3T2NhO8gl5UqtfkkuvsP4zCcjeDsFwLpUWW+o3XbixBWMY8SLMpHkv17k+d8pm0YDr4sYWjsszlDS5N4+b/YXd/e8+QHYJ9X8otiNi46zx3G8bbhXS2unSWE5ax54odTZBluGtW8pKOJfOMby4jLJMWtzrIIAwnjIj1z5+Yx3Iog8cS/98+RRmMmvZ717Q1XTCnVcr/6imEZVuGdMSEZUtIHn5M6kxYwn7yg/9DIfeiFx0K/veMTVBI0GgUlhSWCZ3Uq+aHXzsCfl6ND+u6eQOuKwjmIuLhpukkaRpiQ8vsqow470bkh707oeNtO/2rIsJiCxzoioK5FGGZT/mx75B0DZURQFdTRG1/QseuMyTPP1VPyo0youa2HPcLprS6jl1nyD7H8m5Kec6VEYNeQPVKnN0JHay0+VJad34uks4XXbqAwrIv4Z5ti3gDfWe4O6UdzyYIy1hd3o3cMysuHcsJouZmmXvFP5PyobSaaLMrg1hb1fbU58TflrSl+RCfv5bGlBFYXuClpeV6RmE5k/Bizt+bsXrrlGfAYEr6ZxLqUZ8/vQnPn7RVdTXfc2WEpY4eaCmTx+6QPAXDr3x6L+G+1zxdKVOfLeblykHkt+h65IVmVmE5mSLGhyV9HSnCUp8DsfUJclLXk+y61IewxAqxTY88WnjBC194KCwffay58NzGDsUEjUZhSWEZ+S4XSkO5Ztzb+T33o9tm3trnXUd1NqGz5Ve59EvPD7sO9ZATUr4zfzmlQzwW4quHXomEmTSsTDtII04k3w7lNz1X76Tv7PpFhfwiL9r5OHAdx5ZQWnCpu4yoUVHY7dKti9bcdSLUrrSZC0dXZfQdZL9S51TINixvM9KhXHJh6SIbaVuOaBnY8rLbMqxcQGGpLwTuG6GVk7adJCz7nEC5E3k5s5sgLHfCUU/wWCi/KuxipHOdM21xqMJn0mREvN2Xl0797r645+4LK5x1e47rlfY7jXfPbz/SE+ILbPWH0tDugYT2oXW5ZsK0q5uuRJ4Dw66tr2R4/ugKuj2uPlbD0UVwYt42vcfKlfNMmRdbXe75sxTJ4x2XR//8ybv2MO+eGZdTRH7seb7nwtX5l7oFUDiBsGwzL0Ly7gXAjrS/pjLCctrkIxdJg31JyX0sa+yx/L3P/HnhX3/3vym86EUvLrywKDAxLJZigkajsKSwjNNhfmj7jUdA9zdckU7krnvzqz/S2sHfkI7vuunY5lwadNGXJdNZWnNvuhfNubdCaQ6W7WTrYh1erOrb+U3puCXtlZkkjOxecPckjK0Ub6svx20pozty7WY4vn2JdozsMLh8KK3Wq+Wt5T+VQdQMhNKQ21tiW1K2a+Hoirhtplx25Jpd0zmMzcfakvzcM3VRbul7L6B83WgHcz2kb02hQwS3Td62pV1shqPbSVwUYWnD06HMuq/hPXe9vpzRFZ/ti4a7Eo6u8LuQ8MJn1dyXq+HoQl9JwtK2ozXX3udO8EzKhdLiQv2mo74VuS+ShuF2GIHSc4Jn5WJIHj1gt42YD6VVS1XsT6a0j6x1aZ8DOnxzO+Pzp9M8o/3zZzLyom3fvHzoMOf65/1kRPQkPX923D16V9rGVgXPH3uP3nLPjHvm2ZxFZE2Fo3P9vRC+eUJhqffdvthyKM2/9S9kk4RlzrS3Tclr0u8V97GssbBUW1r5QuFNb3lbofXlz9BrSaNRWD605ENpXlASQ3LOuHuDf0V+/G6L96I7pfN/U34A5+VzLtKRHJPvFuWHeDIcH+qUc+EtSLr8eX3ytvdmOLqUe78cv5OQFu1AXErIC+KZMOLnepmy88J0WvKn1/ZF8jchaZt2nonJMuXdHUoLJ3l6JD6tg1GJa1CuaXNp0DqfkO+SOlUDpjyT6iKpU+dF2SXxHN+RfE6FbHuz9ct1K/J32HTuZkx6YnFqvOX2gfNlq/dN1jrwcYwlvMxJSqNvkzmpw0XpuF6RNjIWuX5U7oM5c+2kufZaKM17mzbX501eRuWe1Psyds+MRe6VcXOvlBuuWe6Z1BGJp0XKTO+LuZTnUJAXFfdP+Kzskfi7Ep4Ll6V8lkXodcjxmVAa5RBrH5XUZfMpnz8zpqyuuxdFtm3Enj9T7vnTGbkPk54/feYetc/dwci9F3v+zIbk7ZpunOD5Y8tjOPJMi9Vzf8JzoiWlvV41beJqJP1jIX0u8iVXbqORe4/7WNaJsLQeTIhMCgoajcKS1NZrOsaiqO1vq3RiYh1VXeijicVEGvglWpa9K0n9PX90rn2OxUTqXVjSaDQKS0JhSUqbwWO4nXp5c1Iv3JSbNCpoy/A+wWuUtOAUqZ/nz/1Q8uzh+TMekocgE0JhSaNRWBJCYVmnDIfSghZb5n/MHWtj8ZAGROfMVWu7IHK2z5998/zR/zHUlYvTEApLGo3CkpCytISj++mRGv/GhgdzeTBvB8MG+1gkpIHplbbMdtx4z58p1huhsKSdhX3+r3cLf/C5v+RCSxSWhBBCCCGEnL+w/PmPfbLQ8/bviNrHbi0dWyl26AfeU3hl+2sO7Vv7ni3c/K0/fP77P/7CFxPDUnvfz3zoiBia/MC/L3S8/o2H4eEvPuO4T+cvfvw3Cm9529sPz8u/9nWFH56YKnzmL/722Hk4Njo+WXh1/hsOz0Wcv7qwfGIRgTAQ11mLldPEM/MLHys8+thjOpLlmLj87PqXC29881sPyzB2/a988rcP69LWq697GISrrf/vHPjuwq/9zqfLpu/DH/3E4fm//rt/QmFJCCGEEELIRRSW7/rB0cILc7nnxYI1K0Q+dfuzh+Kl5cmnCj/6E+8/tGde8arDa1Vc/tFzfxUNB/bSx584FD1WPL3je7/v8BgEyo//5M8+//nb+r/rSBohNnEc4fzY+37qMAykAwITYlbPg6B6/Te++TBN7/6h8cMwcQ4+QzydREQgXpTRWYuVk8YDIY38PfX0M4ei/aev/tIxUYnyRfgf+uWPH7se5+M7iHqULQyiHMemZz96RFQiDtQjhDvKFtfgPLycSEofrlPRa19CUFgSQgghhBBygYQlhBgsy3kQc1bIQUi++CVNh57EtGtxDUQJwoDQwTF4umJi6j0/MnF4XL2MECYQThCIeq16TxG3vf4DP/eRYwIK1+BaiNKYJ7TRhSXKB9dCEMb2IoWnMogn0wvLlT/70vP1Z8sGZQZxCRGpZQ5PJcL4zd//0yPnoVzxgiFpeO6b3vq2w/qjsCTk/MEqgvmMhnmcOfm/9YzTlTNx1hqko+0hyOdJaJH012oLFaxUigWrhiosw/bQePORm2tc1oQQcmphCQ8fOv3f/96x1PNUBEK4xYZhwnuVdj2GVkLA2P0wIXJiYgeCEsd1yOz7P/jhRK+YeltV/EBEQcD68+B5s2LV5h/DM5EGhB/br9MKPggjnLt457nEvCItGEaK8yDCYmIW8cKDiiGiEIZpwhLiHZ5jpA97inphqHUDLyLC0rJAnlHmEIfw8MbKGuEG55lUg1DFdzrUFWmDRzlWBzgP6fTfwdMMbyXaR0xYwttq00xhSUh16Q+l1R7LmW7cjf/nz0HMaZy1BulYqVJY2BB9vE7zeRJmJP395tilMm2o2nGrJQnF1ki8m1Ws0/NiLFLWhBDSUMISw1vxLMNwSHgV0fGH0PJz9CDycJ4KGwiBmJBImv+HayEyYgLSzyvUuHToqgocL6pgKlggfpBm/P/s4DuPnQeB59MAMaXDc61hGK7Nvx6D582eB7EMYecFrA8T3lIrWJEveH71ewhjHeprhSUEKYbzqrdPDcNaVYipqLOm4hHX4oWB1mtIGAqL72PCTj3HaCNpCwZhOCzy7L9DO0LaUSb6EsELS62/WLooLAmpjoCbcbZpRIC1/odUWEKAzFUprANXdhdRWE7LsQXJq7VLVYx7X9oq4u5NOe+OnEdhSQghNRaWKvrscEUYvH52TqIKGIgQCDc9F0Mg4XVL84giLJwXW6108F3vPgwL8zURH0QkPkPIqadPRVdsGKWKH/X84X8cSxouqsINokfFoXofIVyRt+A8eFomGC4KgQgRpvMSIfK89w/nIUykH8fgsYMnVePAZ4hNFcN6jheWEIYqvFX8IV0oH403zWNpPaVpwjJmeGkAsYjhsLHvEa8u+IP0oB15ry2u1XQmCUscjy0SRWFJyNkKqUIZwfcwCctqUngIhOVSeLBJ+3mXZVp7prAkhJA6EJYqzCB85j7xqUNBBPECjxoEgw6DhNBTQQThhHMwRBXz61TYxcLXOY9+QRk1xIcwg/G4QYTaoaYqivwwTJ23qYJJvZKxYbnqzVThhvTD0+a9rhB+OM8ODcZniCy/Aq0KP00rxHnsPIhl5AlhQ/zhGr86qgovTZ8K4djQU/Xgahh6btpw5EqEJUShemeTBB8EpdYXhKEO51VD+dn5uEnCknMsCalfYYmhh1fkfwiMjoRrBop2Tc7Dno7tpxCWg9LBtnFhniI8YfAq3izaZDg63y4v13RF4mmR77rLdOgH3bE+k6erId1jFqSsVBisyv9tLp/4f1rCnA7JQzuHXF6zzBVsM3Eir7Ny/biUXxDBgnBvhLhnsUnOvyHXdyUIS4i2O6dof0jPcEoeuyNl2Z1Sd+tF23HnqbBskfDnJU9J4XRLPWvddGXIx6jUVVIdjriyHTFt6lrkWi8s21LyPhxps7H7pJWPO0LIeQpLdPQh/vxQSOvRw2eIB/1sPWEq7mCxuYQQnhAYMW8lRCvmAKr3DmHhL0QujlsRoquaelGrC9OUE5ZIW0hZHAfpw5BPHYZrz8NneFZjW3QEmXeqwjUmBP0CSLE5oCh/G68KMaQHos0aRHwwc1CrKSxRByjjkDCf1gpwxIt5n8gP6ljFJV5Q4Hr89cKZwpI8bPRLJxzD+rak01frzl4WYbkWHnildkJp6Oy+iEjbkV2Q77ZEBOzJecMnEJZTcmzRiKFmk96Not2V8HdCaVNwdMAxBHUpEs9EKO8F8nMsr8kxFSc78vlaShi9ppz25P9ek8+7cnxHykrP6zFhNEtb0bg1r7uh/Abo/eaFwIHEsWuOTZtw9yNljzK8L8fXxQ4kDbb8WkxZDITSMOqBjG2vTdqWpuWexGPzOBUpy6mE8DQ/B+68TWkvmyaMAwlz3IWh9b0r9b0r506VycuyxO3v51Y5vmDauubnvmkLBXNOTFj2h2Rvt/fIJt0nWdoOIYRUTVimGYYyQuDZbUFiW3aoF87PxVMxg+9j4UPAIHzvNcQwS3hH4TWzwgvh4PwgHkQMkdXhp/CsqcCKxeeHwqowhCdWvZ6w2JDUJNEGT6V+Fws/aa/KpBV04dXU63XuYZrpkN9qCUsM80Wdw1NdyZxHFdhID+oSIhMvAqwYtgsw4fNJVuelsCSNxkjCzbseartKaBZhCbsSjnq7DlxndlbOmzbntYjARKe2owJhGROVwQjXUXOsXTrPO6YclyV9vpO/Go4Pk0wTlvlwfAhmkxF8HRnCmk8ozzFzfDQiLG7KsQmX13URCK0ZhCXKfdCke9WIuE4j7rZE3CiLUn4jkTRasTNgRBj+bptzlkL5FU3vSDy2PjuNAGxLKcu09hwbCovrr5s0dUm6tyJibt6c1yTlUe6FxHCkvuzLDK2HJclznxOCWjedVRCWC5E2pm1nJzTuqsSEkAskLHU4pBWPMY9T0oqfuhKpDqf1BnFoxWNsqGXM82iHXer8S3jQ0ryGfvEe9aBBSGFoKQQPhGJMIIbIAkN2pVyEWYmwxBDc2HcQZHq95gteQ5RrzHQxo2oIS+QFohqWNt8x5nlWbzC82lqu5cwPnaWwJBeNJtfp9natzoXlvch39yRPQcQfOul3I+d1ZcijFZaTCaKyPSK+FF2ZdDylk98Rss1vtMKyVz77xXw6RFQ1ZQgrJiyXI+eiLO8bQX6QcN6ghDGZQVjedMdVsHvv23worbSqccc8vgtO7EwZEdluhOpShjrXdnErpT5nqigsd117svlRobUmwsvXa4uI9MWUeHNy7WrkZcaWifuyvKTxTLqyPamwVI997D4ZytB2CCGkasISwgpDUWMCwi46o8Mv/SItOp8O3/kVUjHsM7ZaqI0bQitJ1CJ+FU4QTX5eop5n91BEemNh+u1GkGeE74cAqxfSC0t4NpPC1CGf3stqV79FWeAvPHnwuvp4MQTVxqvDSXW4q/csQsCpODutsFRRCc9tbLsV3RYE6baLFdmFfoKsxguvNdLhTRdFQv7w2c9DpbAkF43eMm9X7te5sJwvc123EWRjEcN3dzIIy7vSKd4Px+dmDhth6cO/7IRUU6STr/MD8xUIy1woebs2RGAOhux7CyYJy6sJ4mDTicelSF6nUgSZF5ZeQCYtCDNvyibp2hCODyVuFoHYFBFZW1KPSWWlaRlOeBGD725XUVjGFu+x+W4OpREEsTa8Hcp7u+dcG9OXGbORcxFfj+T/aigNCT6tsLxkXsyUu08IIeRMhWXSEFddIEYX3YEQgkiEILOiCEIEx+Gtis0ZjAkRv/iN35/SL9YD0YXhmd4Tieu8oNLFgqyAQlqQbjsPVOd++iGZQz/wnqiwRPx2qK8NU714KrC95xZlgOshwFSgo3xjiyhpvAgT5QrRrAvgqHcQIjcYT/BphCUEHsoBliQq7dBl5MPv4an5TlsdOGmOJdKOYzaPlZhe7z2gKD8cj22VgmP4LuZ9pbAk1aK/jLDcaHBhWS5/5faGzJvzlkPcqzeWIQ6bzuuuk78Rsq0M6tMKgXsjlOZW6ly/a+G4ByyrsJwpIyzHTlmeWh9jJxCWYwnXpl0f41YZIT9TJiyfx7MWlvmQbX/XNNQLO+3yaIdMd4TSUGo7HH61SsIyS9u5xUcyIeQ8hCWGU0IcwRsFoYchmLpQDoScFV7wokFYQJRBxGCLEAgSeLt8B173x0yaX6lzKSHOECYEHeJGGpAWpMnOvVThhbThPIgZXAexYwUC/sdQU3yHsJBO/WzFsw7TxZBbeGEhSOFtRN4g6Kwgxnk4jnwiPxCF+Iww7QI1SK+WJdKLuHXRIyskVcwjDpyDvyhHxGsFrZY3vkN6ca4OT7ar1p5GWOowZuQNeYqZeopRpzgP6UR6UA+av9jiRlmE5Wn3sdTrfd61TPwLD7sQ1WmG41JYknLoULp67OhVQ1j2hGTPTBY0ngXX4bfiRueojmcMs9t08vtSxFI5QaNARPZKeBsh2atXDWGpXqeTDlk8jbAcSMmbv749JK+QW05Yjof68ljqQkSLp7yf7pkXRRvhqNe8KZTmj16W+6ZZvps4hbDcM/lLmutJCCHnLiy1Ew4RpkNTMWwTQiu2wApEBoY1quiAoIh5uiBCdM5dWtzwmEGkQPwhPAhNiDI/rBZpgfdUz8NfCJuY1wlhIgzND8SnDoG14ano1DwjPHgikR54BTVszYduUYLz8X1s7ijSjbiRD43be/IQN4a42rBQXihLP/QV5Y3jWt4Qln44Mrx95cpa68PPn8TLARxPM/vSAC8i8BLApieLKES8Pqxq7GOp1/s0aJnEXmzgGL47qZeUwpJkZTpBVO6F8sMz611YNotwjs2xbAul4avlhOWMEeIYergbSkNiO0LyHMtuETJ+y4U1sTlJX3OFwrJP4uusoFyqISx1PmlsjmWn5HXojIRl2hzLm+569by1R0Q48pL2YFTv3vXId4ORFxVnLSyDSXNsaK9un1OOSfNSwL8I0Rccc5HrrpcRln0Jbafdtdks98kQH8eEkPMSljQajcKSnA1XQmlrAd3Go6fGaaqGsLSiY9p1yHUhl9EKhCUYjogrFTJ2tdKWUNoGw3vPdD7idsjuFY4t3rMQjg571U7/lQxh3TmBsASLEXFot5HoPyNhGUJpUZsRF+Z+gvhZMuWTC6W5huWEmM6pHXBCSbc36TihsNxxLxGyCkttLzdcfV8P2RZ+0vaoW3vsuXT0JLwwGDBlO5RQV61SJvdDabGhJnN/rUTuk1HXdvQ+4ZYjhBAKSxqNwpJcAJqkI9teJ+mplrD0e+fh/y3TUQ8VCsuYuLL7K94PR/eVjA3dbDUd9qx7K/pO+o1Q2psTx++ZlwLlPKBr5iXCRIXC0u7x6PN6tUy8pxWWraac7xkBuBq5/pYrH13s6FYoPwe1I5SGFd+T6/dDfO/TrMLyRjg+5zarsPR7sa6Y9N3JkJ/ghHksvXdMfudDaTsefTEzmVJXmje7x+aqKbuQ4T65wkcwIYTCkkajsCTkLBgLyZ6YFvnuUsbr0PEekQ4wOs3XQ7aFXjQef267HJ9ywnxcOuKIA8Mlu1PCvheO7lVYjpmIIBuSvMxLvOMh28qwSP+0KYekfAbJ41SkPEdNecITmMXDnZd4fLl0y/G8O35Jjre4cp6QeBH/YMr1tnyuh8qGWvr6nEl46ZLUDmPhTUqax0zZxoZix/Id5CXEdZP34QrvKS2nrsh3OVOuNr9N8v9IhrrSspqU68Yi+fPlOlfmPiGEEApLGo3CkhCSgO7pN8OiIIQQQigsaTQKS0JIJcAzA88e5rLti8AkhBBCCIUljUZhSQjJjM77y7KADCGEEEIoLGk0CktCyDHgrcQ8Nq5+SQghhFBY0mgUloQQQgghhFBY0mgUloQQQgghhFBY0mg0CktCCCGEEEIoLGk0CktCksAefvlwfC+/s6RJ4myug/znJC2tF7R+zyJ/zRJm0wW/N+o9n6et1/7wYP/LkQtQl23h+J6fhBAKSxqNwpKQc2Q1PFg9dfkE4nA6xDezz9KhLYT4pvW16JwjLfMXtH7PIn9jEmZ/ub5H0WbrUGhfCdm2oMmaz1pxmnq9GUorJxfq5CVPJc+eq+7ZsyL5IIRQWNJoFJaE1IBO6YxtFO2gQpE4LdfmG1xYtkmn9AqFZdWF5Z2ibdZZeVTSbgelbXTXad2etN22SBncL1pX0ToarE3fjNThnJQHIYTCkkajsCSkBsxKB21I/l6t4NqZCyIsLzq1FJYrdSgsT9NuL1qbuNqg6Z9nHRJCYUmjUVgSUj9gSOB20dbk87p8zmW4Fp6cJencXZbPlktFuy4dQIjXzozCcliO2WGKGKKHPSlvSniT4fh80D65FmkflXNvSljl8tMs5/Wl5OFayLYnZrfJE8rkhlw/FeJzWFskP/OS3vGQPNdtyKQH4ijJyzRgzpuQ82LCskniSytXbSd63g0JP4uwHJM2tRMpXx931qGpHRJWi7S7ealvpU3CSipP326HXftplbq+Icc6Iu1RhdmMxINzR1w7u+TS5du+b5eIa87cL1k9pL5ch+WzLZ/rrp765LuClMWYi69b0qDXDiXE2y3h3hSB2irlOyj31JQJQ8NvNeU2myAMeyW8ebFp19YR/mrk2TOY8KJq0D2LfNm2mTrukvqflzR08GeCEApLGo3CktQr6OTclU7RrnTK2mqUFvVSTsln7WwOZ7h2QdKP87fks4o0neu0If/vhQfDbCfLCMs5OTZnjkGQbsrxe1J2BxJnp/Ng4Njtou2LWNb03SmTl3xEeC2G0lDBFRHcWTw86g27JulcM9euh6Pz2HrluwPJl+Ztw3W4myRftkx35FxfV3OmTrTs70fy58t1NaFcm017vW/SuJZBWG5KXRzI/9fK1OluBvGugvZWKM0NXDGCaVfiXJWwNd1tCe121bSfTSNYClKXMQE9LHHsS9z3TTq0fq/Jse6ISN+RPCs3XZ1tyefpDPehr9dNae9bks81Sae9z6+ZOHblminXfvckLRvy+bYT6AW5R/ZNeeXlmjVp6xr/gdiItPct+b4gn+3z74YrizWTnh5Th3uRZ4+fY5mT72x4O/J5NvIsmjP337qJt4s/W4RQWNJoFJak3hgPRxfLKJjOVS1WJF2WjpTG3S6fs85Tig0pvGk8CdYrpx32vgRhORe5LkjHcs917LukzNaN10eHxt01+WmSTnZBRFxWYdkrn684cafiqyVDmWw6gaYd5nET3pbkw3ZceyS/q+bYtUjZqIA/MF6VIdPhzxlvzHpEgNzLWK5zLt1ad3vhZENhcxI+BMlAJO7dMvfCmBFEA5L3LqmTHYkr79K6L2WS1m7njdDtFmuLCMsO8+LCzkcekfNumPz4lyS2jibk84Q5L2fKSNMzeAJh6cNrN2Xr2/xMJG23XRufNi9LbLwq0PLmvl4xwr/JvEwrRO6pKdeu+0Lcs37Jla2tr7xra4VIPduyaDIvjUbcs+jAlfeoyQshhMKSRqOwJHVDk/GUxOzaOaenTTpSS+64esY6TyAsmyXM1ci5HUb0eGE55zwq3pMQ8xKqd3XAdTS9x2sklJ/L6YXlpYhXQ8/rCelDa7VMJt3xLteRH3ECw6LzXruk3ew5D5cPU9vOontREFzHfj5DuU65ck2Ke/aEwnIgoWxtR34qg7D01084sRAiLzvyGYTlYEJ8ms+rCe1M7519I6juycsD214W5Bz1bK5HztGXMV4QZxWWe5Hwbrk8x4TlbWk/sREU91ze9IVYrL4LkTD2IudrGq6blypJw2ML7oVXFmGJH+WNlLJddffDQiTencAFgQihsKTRKCxJndGXIip1uN55csUIoLyxywmelizCsi+kDxfdMCJDO3N2SFxzQhpvSVzW5p0HRD+3JIjTSoRlcygNX90MpXlmTRWUSX9CHDfl85yJ0+dtyaS5O5Q8aTMRs0N9kda1SJpyLn+XTUc6qVynzcuA2EuPwRMKyysh2RPXGsp7iFTojSaIx7lInpblu0sZhGVbGWF5x7RxH48Kmx4ndgdMu9o3+Ws2bSxWt7uh/MJHMWF5N8O9GhOW2wntx75I6DHx3k6o79iP4WZEoOVD8tzfbqmvy+Z+qERYatg3EvKjow/sM2ImY7oJIRSWNBqFJakp/WWE5fo5p2e9THp2Mwgp31ktJ+JWIsKyYDqO1xPC35LrYjblOpqhCsJSj90IpTlZOt9qNmOZ9JeJYz5D3iZM+ndTzlNPy25KJ3jXxD1jxHxauaZ1uPtPKCxnylxXKNORT1o0aNmItCQbziAsy8Wnw4/T4tFh1y1OSI47oZk37SoprHLPhZiwXDmhsEwre19vSasMJ60CnEVY5uT+2jP33FYoeVsrEZbdKW3XthcKS0IoLGk0CkvScKi3IknI3TzHtKhncUE6Vd5uhWxbgVTqsUQnccN15madJ8gKhmnXEU+j2sLS0iNp0flro1UQltdDtiHHPSF56KgHZXs/gwC5krFc1WMZ814PhOp7LNtC+W1RkoSl1n+WucqnEZZ35D7OZbzXdKGZpkhZ6F6Si6e4l6spLNM8ln4xorMQlto2lqW8W1NE72k9lnelHiksCaGwpNEoLElDciUkL96TP8d0JM1HDO5t/70KO+gqniudY6kdQfXcNDvxEhNVlySsvjMQlsPSuW1LKJfZKghLFSyXE8TMgsTXFEqLxcSEGNKp8zTxQiA2R84vipJWrkOuXHcSxOr0CYVlf0rcWiaTJxCWk+H4IkP2u1vh+BzLjhMIy2spoly3yGiOCPDJBPGyIWXsRwfg81JI9ridhbBcDslzLO+H43Msqy0sdYEvXxad4eRzLDdD9jmWFJaEUFjSaBSWpKGYCkeHV66GbAvlVItmEXAbZc7TrRp6Mooo7byVWxW2P0Xw6cIxOiTWriA66DqkW5KPtjMQlkPG25GLiKlqeCx1VVi/xUaXHLNzTnU+5tVwdHVLP3dQBeSyubYllLYLmc9QrpuuXDU/005g71YgLDU8FQy6BUbSqrBtJxCWLabcul39a3vXsrts6jFXobDUVWHXnagZCqVtYzzaVguRF0gqOBdM+eRCyaN95RyF5aARcLFVYedS4q2GsNSFw7oSnh32+lnzciyXICxtunPm+bfk7mMKS0IoLGk0CkvS0KBTVYstRsZDtv0YJ0P6UDLbEbUrRKLjpsNaN0Nl+1jmjAjqN94KHYKq+yjuS5jDEQ9GNYQluGXyZffzWw7ZVoXtzxBHXygtEpS2n6MtU78f37XIi4tCKK1muWc63En7WNpy3XflmjMd8Q2TxpWMwvKqaSOLkbjXwsn2sYzFq/tYHkh53jN12OvOs/NmKxGWIZT2sVQhuWbae0dKm4gJFLu1iNbZlhGb5YbcVlNYavs5MO1mK/Ky4qyEZb/EvS/xLUs6lqUuN10d+PnpWfax1BcisX0sKSwJobCk0SgsCamAfukst5U5r0XOGylz3qB0yLxnZUg8BfOhtMKoRfcI7Ih0NnHcerOaRBDfkPDgcWqPiIqYeEyKJzjhNhYRNYMi3OYlL0MZyrc7oXyT4kA5YyjrTYlnMqVubJnOhuS9Obvl+3kJuykh7izlqlyS825IOtoytqOcxDHj2pKN+6bkuyVD+XaUibdNwrpZJtwBSdO0pCWp/STF1yblNS95GA/Ji11pWXWl5KvP1NlcyDavOETqdTjE569qu2x27bE7cm5XKK0QnJSWsYSXAIPuxURaumL3RFcoDSm+Zr7rlXNbUp49gwl12Gfu46uRPLellEVSeRJCYUlhSaNRWBJCCCGEEEJhSaNRWBJCCCGEEEJhSaNRWBJCCCGEEEJhSaPRKCwJIYQQQgiFJY1Go7AkhBBCCCGEwpJGo7AkhBBCCCGEwpJGo7AkhBBCCCGEwpJGo1FYkv+/vfuFiaRJ4zheYsSIEVzC3U0u5DICgUAgViAQJAgEAoFAIEYgEAgEYgUCsQKBQCAQCAQCgUAgViAQiBUIBALBJQgEYsUIBJcg5rbzPs/NMw9V/YdlWRa+n6Ty5h16+k91dW/9urp7/iTZb8FNhr9+qD6lKdO05P/H5P/LlGbFaUPO37Pl1wu2px36f8C+SDbP7LfvaiWnHw/x37h7D97ztv1JRqQN119pOQBAsKRQCJbAT8s6lt3w1w+G54W1bJp1+f9T+f8ypV1x2lAwzYOsayOynjM/yn14+gP2ebJAefmjrJWc/ka2/z0qs21ZfS39KJ84dF5EXdreuPlsT9p66xcvW5fzHi6O7dCUAIIlhUKwBP68YDktn2nZlb8fuc+zMhz5LJv2e2JaDZZXkb+v/Chn8vfjSAf9xqxjFbMSWIdLTHvwo2x94GC5IPU/yaHzItYi9Umw/JjbARAsKRQKwRIfLlhW/bvXlRCT9/dUwNERxmyaUfP5ioTDgWfWw2VBHXwEZYJlm2D5otYJlmwHQLCkUCgESxAsXz9YZjZkmgUTNm9/lP3ItNktm9kI47FsY3YbZ+zZtSyYPpbozK+E3i27KrsFd0eWkY3ezofiZzZbUl9jibDRdmF6XuadLWNblhlj12U7JwBOynTZKPMXCeRFwXJWptf2sp5Y9pH8d6ZCOxyW+e3Luu9U2MbxyDQj0k6OpN4WIvtkQPZntsxDWX7sNuqxSBvyt2Jn31uT+RxInRaNgM+G3m3le7IuNigNy3ofyHxXEm13WJZ3JNuyGMo9nxkLZCNSD6uuvmwd5LXxAfmurnNWJ4OR7V6ROvxs9tFsZH6+XtddvWbzuTDnH3vc1KUu9mQZG+Hps+Rj8r1BqcND2TZft4dSt0uh/LOvE9I+tc5S+yVbh82Cul2XY3ZK1mVTtn09Mc+pSHvW9dHjczay7/ScNC/L+WIu1tlzUNXjGwRLCoVCsMQvlv1DraNvD/IPeeuV1+FPDJZ66+2M6UR1XYcwSEdOl5XV7bfQu822EQl63VD8rKUPXxpyL2UZuj9PStZ7u8T2H8hn57KMa/n/HRc+dbpr6TzeyP9vuvl/ls/vZDm3UiffC+p9U6bR795Elq31o8veLxGy5yTU6/Kzch9pUzXZft2HR7IeXenwqyWZX0em0eBxbNZlRNYxm+5M/nYv35kw81qQabQNnZnlD5i2cyff/2rm9RDyR3Y3ZXlan2cu8F3IfM7MPj9z9Tkny7mX5Z6Z7w5WDJZDsh53LoCtmP1j6/PEhZoR+e6j7MNjWbe70P9M7p58di7/3TPHzZY7Jovq9cy0lRtphxpIL01daJt6lPOCP3d9Db3nuI9N3d5H6vYyFD/HvWSOxUPZVl2Xeom6PXXnqK7M59Gs50bivBdkPreR89StLEfr5sC0p5b5zD7T3sg5B330uzxAsKRQCJZ4E5ZC/OU0d6Hay2deKlheSychVs7eULCcNAFAO15b8h1fb7cSJmuRel+MzNt27ssEywHp6B24abZD8e2iZYPlqOlE2oClHeEh+Ww50jGvSfi0IXzMdKQbZmTnuESgt/vabpsue9vUdc3Uw3LO/OqyL33YH5T90XGd8K6MoqiGdHYf5DvDsk++hf7bov0o97m0owkXrK6l3dTN/j5PtKFlExC7LowNy/yL6jPvVthvJhza8D5lwtO9hIima1uxdpkXLJuyD3yonDBhq5FzzNfk+53QPwrfkvq8NnVot69hvu8vGMXqdSRSr7GR1xOpg3nXpr7J52NuO27lM21DWrdXkbrVi4B5OrKOtci+njfHol7YaLiLGf6CUNdc+GpIUB+QdfHPm4+642TGBNNa5ALTsguWen5syDqORtanFnp3LwwHECwJlhQKwRK/Td2MVMTK9m8IlmXKawbLjhm9OnWjYI8ukJ24AKLu5cr8gKv7TyH+LOaJfKdssGyakGZHIQakQ9Z4gWCpHftd1ylsSodPl3slneNapK09yDrazrq/BXfoJ4LltQQSv2y9RTlvXw9KYJyK/G3fBYbLxDZOyna1pDPddYFRA6je/jceCet+++ZNQLgqaEM7iQsJYyb4PydY+jqZdhdF1hPTBQmVjyH/meM9c0EmFirtPohtx7mMtIXQu2sgNuKvFx6m3XLHI23h0YS23ZL16oPlsBl58z6F/tH+dmK9P7t1TtVbSuzihm83u5HgrM5M3QZz0TF2jDyG/tHpDRf4vob08+c3su9tsLxw0+jxsldwDgLBkhBAoRAs8RtMFAS4q98QLN/arbAPMo0tZ9IhHIt0jmLz2zHzyjpXqwVX17XDOFgyWGqnrSuB9FA6/UMV6r0oWNalPeibdPdlRGPQBVmt0/VI6Zj6yQvPz3l5z4AZDQmJgFNUpxpCxyT4rcp2dlxg6JYYKdKR17zbbzXoHEXqas+N9myZNnSSaEMToXeL4qV07CdDud9GrfLynkl3nB2bkSS/HaeheNR8z5xv9OKFdyXbHmtXFyZ0ahDbz6nTNbPcVBu8kgsVVerVB8u58PT2aKsjoc+2Zx8g9ZbrrZy6nSpRt3ruWY60G/tsaKpuW5FzgqWB3t4VcOum/S7bHFvOjTleWol/C+wL0/Qc1C5xTINgSaFQCJZ4xTBHsEz/vcpvRd6YjqLvEC2F3q28Wr4lAmaZt3H68NWQDvOlW8ZheJkRSx3J2TSdQB213XEdwvtIGL9xwfs0p+5PnxEsWwXtp0ydLoTes5LaebXPm5VZjt2GojeErof+50RjZbNEG/K3jB6G3vN+Ov/ZXxgsT82xlCrjJcLPbU4Q1edQ85YxZLbjNmc6+3KivDZ44+r1qKBefbBs5xxb/kJU6i3HZep2ruBCSba9567dZPNtVqjb0RLnRHv+m4pse+pCnd+HecfYoAT768Q5CARLgiWFQrDEb9KQf+hTwfI1f+z7PQTL64L56cjaXOg9F/Q1p6M9UCFYWkNSFxpCtl8oWFrDEnQuzUhF0aihpbfFpeqxarBshN7oX4zWdypk63NmVzJqZEdBDsLTEcvYcuoS8rIObt6I5YjUlY5YVn2rpbYhHc06SQSKSemE68tiBn9RsNRtfe6tiPaWzpa5MGH3ld4iW0RHLKdKLjc1YnmbuLDm6/XB1GvVEUv7jGYqWBa12yqy+p03+0uP04vQf7treOY5UdvQsKlbu97fJeAWKXvxpuXOQZ/5Jx0ESwqFYInfK+/lPUOvuB7vIVh+jXRUW9I5XKwQRLNA2ClYlg2W49JZnIp0gou2Qet9xX3+yX13VgLMSCQk2f12LR3IRiR0ZfWjt3Z+SQSAZnj+M5aXsux6ZNnfQ/4I/FpOyLt2gUGf6fOhcc6E9NRzh3ox5zj0nhmLXcCZlvqaNm0oFlAuTRvaTBw/G6H4dtSfCZZad3OJ+R4WnEt8IFuOXBDRZyxjP4uzG3pv/dWRsq3IdHNS7xNuuZ8iF03sLbnbJes19nZb+3ZXS9dzsyBYalCeT4Too5y6HZVlx75r36a8m6iHIPV4aI6pvGNTA+EXOQ/uRc6Pj4n1PTDTp4LljMxjLLG/eDMsCJYUCsESb0BbrtB3zQjIa79h7z0ES/+yCg012olrueBmX2ZjdRKfp4LlkHTYssBjRzljb3X0tPN7aUZemhJuH8PTl/ccueC25kZllsxoyICpA+28Lptl6Nsuh0zoKvtW2Dmzr/WWvsXw9PbfeuiN7C2VuMDi33q7ZY6LppvW3n43JHXYke0ekv1rty/2dlx9O2jbdZRvpH6a8r07OUZbLjjYNqR1vGCmGQj9b6tNWQz9I89VgqXuyzvX6Z817TJUCJZaLzZo2RFlG0xWQ/8LcrK6ughP38Q6Gnpv92265dq33g6Ytj/m6nU+Ua/+JTjjZn462r3qAtiVLGOkIFg2ZZ39T6VMl6jbhnz32tXZpHx3P1K3rUjAP6xwTtRnp2MvrtKXPp2ZfVAzFzU2C4LluAnq9Uj4XuafchAsKRSCJd6OZniZW64+arCcCPHbSmekA/oonV69des2EuD1ZzhWKgRL7QQ+ynLOQ2+U7SLk31KrI132+Sd9I+aZW4aGrHvpjOvFiK+hf/Ru002nL7/xvyWpv8+n66xB865EvbdC//Nugy7c67LvQ/rNq74TfmVCdrZ8HeWMjWzpT5joC1j0Nxync7bveyToN117OJf6vw/9I4AzoXdLq29DGk4G3effQvw3E2M+hd4Lah4rBksdgeuYNndtjq+ii1SxYDli2mPDHN8PJlBp+zt3oXnE7MsbEzT9/tkzQce2QV9fZet1wbTHC9Ou9DnJu5z9mwqWel7pmLZpt22kxAUYe17Q7fBhc86do27MdjQrnBMXzPxjVtz63JmLmY2CYBk7vm/N93nGEgRLCoVgCfy/g92OXOUObiSnHeK3w5X5eyyIzhX8fbridlyF+K1vTelUbUtAa4f4M2nrJUaXtCPo121URkZ2pAM2W6GzNSmBZ8uMpk1HlpGNGqzJ6MyXkH6WbUxGEnZl+omc/b4q0y1L53KuZL2PyjJWXX3p53vy39GSdVCXoLAtdTEn9afPrI5G6mJd1n01xH/2Qff7jkwbe4lNTfbVhky3UjCvvDZUl5E1nddqyH9hUWzfrsp+mJBlNBLHqj/OsgsYS6b+FkK55y4nQvwZ30n53AbTIVOfeW28LvtvU9ZnOVKnGixrst93pL0MJ+ZXpl6nZT8vRj7fyFkXPXelfjpE63ar4PyRugizWmK/NEvUbdE5sS7TjJdYH13OdOQiT96/BePm3JJ3DgLBkkKhECyBP1o7pJ8jKlKTkYIdqhH45WIjpQAIlhQKhWAJvAlZOMxGLbee8V29dbJJNQIESwAESwqFYAl8bJMSEKuMWmaBNHsuaYXqAwiWAAiWFArBEkAme25otML0WQhdoNqA1+uDhvLPnwIgWFIoBEsAAACAYEmhUAiWAAAAAMGSQiFYAgAAAARLCoVgCQAAALxJfxv8+3//NfTvLoVCefky+I9//oezDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgPfof2ESqmB2MDeUAAAAASUVORK5CYII="></image></g></g></svg>
+
diff --git a/doc/images/Keep_reading_writing_block.svg b/doc/images/Keep_reading_writing_block.svg
new file mode 100644 (file)
index 0000000..7c0a4c4
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" standalone="yes"?>
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 -->
+
+<svg version="1.1" viewBox="0.0 0.0 960.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="g150061aa58_0_0.0"><path d="m0 0l960.0 0l0 540.0l-960.0 0l0 -540.0z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#g150061aa58_0_0.0)"><path fill="#ffffff" d="m0 0l960.0 0l0 540.0l-960.0 0z" fill-rule="nonzero"></path><path fill="#eeeeee" d="m312.64752 161.47145c-1.304718 -6.8049164 2.9787598 -13.541397 11.032776 -17.350906c8.053986 -3.8095093 18.466064 -4.023941 26.817963 -0.5523224l0 0c2.9583435 -3.9569397 8.373169 -6.6889496 14.606506 -7.3696136c6.2333374 -0.6806488 12.55304 0.769989 17.047424 3.9131317l0 0c2.5202026 -3.5875702 7.46875 -5.9979553 13.08963 -6.375824c5.6208496 -0.37786865 11.1185 1.3302612 14.542084 4.5182495l0 0c4.5532227 -3.8025818 11.797455 -5.40361 18.598114 -4.110321c6.800659 1.2933044 11.936279 5.2486267 13.184662 10.1545105l0 0c5.5782166 1.0799713 10.224731 3.8253632 12.739075 7.5268555c2.5143433 3.7014923 2.6498718 7.995987 0.3715515 11.773926l0 0c5.4933167 5.074188 6.778412 11.835541 3.375702 17.760818c-3.40271 5.9252777 -10.982025 10.124237 -19.909393 11.029907c-0.062927246 5.561264 -4.359955 10.664215 -11.234833 13.341904c-6.874878 2.6776886 -15.253998 2.5119781 -21.907654 -0.43325806c-2.8341675 6.660553 -10.811493 11.56134 -20.485474 12.585007c-9.673981 1.0236511 -19.310303 -2.013321 -24.745697 -7.798828c-6.662628 2.851654 -14.657257 3.6731415 -22.18042 2.279129c-7.5231934 -1.3939972 -13.941193 -4.8860626 -17.806213 -9.688431l0 0c-6.808319 0.565506 -13.391052 -1.9381409 -16.481201 -6.268387c-3.0901794 -4.330246 -2.0299683 -9.565308 2.6544495 -13.107056l0 0c-6.073517 -2.53714 -9.172607 -7.571686 -7.6812134 -12.478317c1.491394 -4.9066315 7.2353516 -8.573486 14.236572 -9.08844z" fill-rule="nonzero"></path><path fill="#eeeeee" d="m347.71796 228.58267c0 1.2978516 -1.052124 2.3499603 -2.3499756 2.3499603c-1.297821 0 -2.349945 -1.0521088 -2.349945 -2.3499603c0 -1.2978363 1.052124 -2.349945 2.349945 -2.349945c1.2978516 0 2.3499756 1.0521088 2.3499756 2.349945z" fill-rule="nonzero"></path><path fill="#eeeeee" d="m351.94028 225.65413c0 2.5956879 -2.1042175 4.6999207 -4.69989 4.6999207c-2.5957031 0 -4.6999207 -2.1042328 -4.6999207 -4.6999207c0 -2.5956879 2.1042175 -4.6999054 4.6999207 -4.6999054c2.5956726 0 4.69989 2.1042175 4.69989 4.6999054z" fill-rule="nonzero"></path><path fill="#eeeeee" d="m358.6943 218.76582c0 3.8935394 -3.1563416 7.0498657 -7.0498657 7.0498657c-3.8935242 0 -7.0498657 -3.1563263 -7.0498657 -7.0498657c0 -3.8935394 3.1563416 -7.0498657 7.0498657 -7.0498657c3.8935242 0 7.0498657 3.1563263 7.0498657 7.0498657z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m315.61838 184.52805l0 0c-3.3113708 0.18397522 -6.6223755 -0.35913086 -9.488464 -1.5564117m17.860107 18.588898c-1.331543 0.3761902 -2.7273254 0.6266632 -4.151306 0.7449341m39.921875 7.4392548l0 0c-1.0014954 -1.0660095 -1.8399963 -2.2051392 -2.5012207 -3.3979645m48.75699 -5.0752716c-0.14724731 1.264389 -0.48202515 2.5141602 -0.99871826 3.7284546m20.857605 -26.765305l0 0c7.5073853 2.592987 12.245026 8.014023 12.17807 13.934677m16.547363 -28.776062c-1.2158203 2.0160675 -3.0718994 3.8045044 -5.422699 5.225067m-7.589813 -24.612915l0 0c0.20715332 0.81414795 0.30303955 1.6405334 0.28631592 2.4677124m-34.918304 -5.346176l0 0c0.7065735 -1.1384583 1.642334 -2.198471 2.7781372 -3.1470337m-28.761261 4.6465454l0 0c0.28826904 -0.9408264 0.74020386 -1.8525238 1.345459 -2.7141113m-31.83963 3.6362305l0 0c1.767273 0.7345886 3.4022217 1.6187592 4.8689575 2.6330566m-41.849762 18.061127l0 0c-0.3857727 -0.9061127 -0.6699219 -1.8324738 -0.84973145 -2.7703857" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m312.64752 161.47145c-1.304718 -6.8049164 2.9787598 -13.541397 11.032776 -17.350906c8.053986 -3.8095093 18.466064 -4.023941 26.817963 -0.5523224l0 0c2.9583435 -3.9569397 8.373169 -6.6889496 14.606506 -7.3696136c6.2333374 -0.6806488 12.55304 0.769989 17.047424 3.9131317l0 0c2.5202026 -3.5875702 7.46875 -5.9979553 13.08963 -6.375824c5.6208496 -0.37786865 11.1185 1.3302612 14.542084 4.5182495l0 0c4.5532227 -3.8025818 11.797455 -5.40361 18.598114 -4.110321c6.800659 1.2933044 11.936279 5.2486267 13.184662 10.1545105l0 0c5.5782166 1.0799713 10.224731 3.8253632 12.739075 7.5268555c2.5143433 3.7014923 2.6498718 7.995987 0.3715515 11.773926l0 0c5.4933167 5.074188 6.778412 11.835541 3.375702 17.760818c-3.40271 5.9252777 -10.982025 10.124237 -19.909393 11.029907c-0.062927246 5.561264 -4.359955 10.664215 -11.234833 13.341904c-6.874878 2.6776886 -15.253998 2.5119781 -21.907654 -0.43325806c-2.8341675 6.660553 -10.811493 11.56134 -20.485474 12.585007c-9.673981 1.0236511 -19.310303 -2.013321 -24.745697 -7.798828c-6.662628 2.851654 -14.657257 3.6731415 -22.18042 2.279129c-7.5231934 -1.3939972 -13.941193 -4.8860626 -17.806213 -9.688431l0 0c-6.808319 0.565506 -13.391052 -1.9381409 -16.481201 -6.268387c-3.0901794 -4.330246 -2.0299683 -9.565308 2.6544495 -13.107056l0 0c-6.073517 -2.53714 -9.172607 -7.571686 -7.6812134 -12.478317c1.491394 -4.9066315 7.2353516 -8.573486 14.236572 -9.08844z" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m347.71796 228.58267c0 1.2978516 -1.052124 2.3499603 -2.3499756 2.3499603c-1.297821 0 -2.349945 -1.0521088 -2.349945 -2.3499603c0 -1.2978363 1.052124 -2.349945 2.349945 -2.349945c1.2978516 0 2.3499756 1.0521088 2.3499756 2.349945z" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m351.94028 225.65413c0 2.5956879 -2.1042175 4.6999207 -4.69989 4.6999207c-2.5957031 0 -4.6999207 -2.1042328 -4.6999207 -4.6999207c0 -2.5956879 2.1042175 -4.6999054 4.6999207 -4.6999054c2.5956726 0 4.69989 2.1042175 4.69989 4.6999054z" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m358.6943 218.76582c0 3.8935394 -3.1563416 7.0498657 -7.0498657 7.0498657c-3.8935242 0 -7.0498657 -3.1563263 -7.0498657 -7.0498657c0 -3.8935394 3.1563416 -7.0498657 7.0498657 -7.0498657c3.8935242 0 7.0498657 3.1563263 7.0498657 7.0498657z" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m315.61838 184.52805l0 0c-3.3113708 0.18397522 -6.6223755 -0.35913086 -9.488464 -1.5564117m17.860107 18.588898c-1.331543 0.3761902 -2.7273254 0.6266632 -4.151306 0.7449341m39.921875 7.4392548l0 0c-1.0014954 -1.0660095 -1.8399963 -2.2051392 -2.5012207 -3.3979645m48.75699 -5.0752716c-0.14724731 1.264389 -0.48202515 2.5141602 -0.99871826 3.7284546m20.857605 -26.765305l0 0c7.5073853 2.592987 12.245026 8.014023 12.17807 13.934677m16.547363 -28.776062c-1.2158203 2.0160675 -3.0718994 3.8045044 -5.422699 5.225067m-7.589813 -24.612915l0 0c0.20715332 0.81414795 0.30303955 1.6405334 0.28631592 2.4677124m-34.918304 -5.346176l0 0c0.7065735 -1.1384583 1.642334 -2.198471 2.7781372 -3.1470337m-28.761261 4.6465454l0 0c0.28826904 -0.9408264 0.74020386 -1.8525238 1.345459 -2.7141113m-31.83963 3.6362305l0 0c1.767273 0.7345886 3.4022217 1.6187592 4.8689575 2.6330566m-41.849762 18.061127l0 0c-0.3857727 -0.9061127 -0.6699219 -1.8324738 -0.84973145 -2.7703857" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m32.72441 46.721786l894.55115 0l0 60.125984l-894.55115 0z" fill-rule="nonzero"></path><path fill="#000000" d="m44.63066 91.56178l0 -26.484375l11.75 0q3.53125 0 5.375 0.71875q1.84375 0.703125 2.9375 2.515625q1.109375 1.8125 1.109375 3.984375q0 2.828125 -1.828125 4.765625q-1.8125 1.921875 -5.625 2.453125q1.390625 0.671875 2.109375 1.3125q1.53125 1.40625 2.90625 3.53125l4.609375 7.203125l-4.40625 0l-3.5 -5.515625q-1.546875 -2.375 -2.546875 -3.640625q-0.984375 -1.265625 -1.765625 -1.765625q-0.78125 -0.515625 -1.59375 -0.71875q-0.609375 -0.125 -1.953125 -0.125l-4.078125 0l0 11.765625l-3.5 0zm3.5 -14.796875l7.546875 0q2.390625 0 3.75 -0.5q1.359375 -0.5 2.0625 -1.578125q0.703125 -1.09375 0.703125 -2.390625q0 -1.875 -1.375 -3.078125q-1.359375 -1.21875 -4.296875 -1.21875l-8.390625 0l0 8.765625zm36.12906 8.625l3.359375 0.40625q-0.796875 2.953125 -2.953125 4.578125q-2.140625 1.625 -5.484375 1.625q-4.21875 0 -6.6875 -2.59375q-2.453125 -2.59375 -2.453125 -7.28125q0 -4.828125 2.484375 -7.5q2.5 -2.6875 6.46875 -2.6875q3.859375 0 6.296875 2.625q2.4375 2.625 2.4375 7.375q0 0.28125 -0.015625 0.859375l-14.3125 0q0.171875 3.171875 1.78125 4.859375q1.609375 1.671875 4.015625 1.671875q1.78125 0 3.046875 -0.9375q1.265625 -0.953125 2.015625 -3.0zm-10.6875 -5.265625l10.71875 0q-0.21875 -2.421875 -1.234375 -3.625q-1.546875 -1.890625 -4.015625 -1.890625q-2.25 0 -3.78125 1.5q-1.515625 1.5 -1.6875 4.015625zm30.822632 9.078125q-1.796875 1.53125 -3.46875 2.171875q-1.671875 0.625 -3.59375 0.625q-3.15625 0 -4.859375 -1.546875q-1.6875 -1.546875 -1.6875 -3.953125q0 -1.40625 0.640625 -2.5625q0.640625 -1.171875 1.671875 -1.875q1.046875 -0.703125 2.34375 -1.0625q0.953125 -0.265625 2.890625 -0.5q3.9375 -0.46875 5.796875 -1.109375q0.015625 -0.671875 0.015625 -0.859375q0 -1.984375 -0.921875 -2.796875q-1.25 -1.09375 -3.703125 -1.09375q-2.296875 0 -3.390625 0.796875q-1.09375 0.796875 -1.609375 2.84375l-3.1875 -0.4375q0.4375 -2.03125 1.421875 -3.28125q1.0 -1.265625 2.875 -1.9375q1.890625 -0.6875 4.359375 -0.6875q2.46875 0 4.0 0.578125q1.53125 0.578125 2.25 1.453125q0.734375 0.875 1.015625 2.21875q0.15625 0.828125 0.15625 3.0l0 4.328125q0 4.546875 0.203125 5.75q0.21875 1.1875 0.828125 2.296875l-3.390625 0q-0.5 -1.015625 -0.65625 -2.359375zm-0.265625 -7.265625q-1.765625 0.71875 -5.3125 1.21875q-2.0 0.296875 -2.84375 0.65625q-0.828125 0.359375 -1.28125 1.0625q-0.4375 0.6875 -0.4375 1.53125q0 1.3125 0.984375 2.1875q0.984375 0.859375 2.875 0.859375q1.875 0 3.34375 -0.828125q1.46875 -0.828125 2.15625 -2.25q0.515625 -1.09375 0.515625 -3.25l0 -1.1875zm20.963257 9.625l0 -2.421875q-1.828125 2.859375 -5.375 2.859375q-2.28125 0 -4.21875 -1.265625q-1.921875 -1.265625 -2.984375 -3.53125q-1.046875 -2.265625 -1.046875 -5.21875q0 -2.875 0.953125 -5.203125q0.96875 -2.34375 2.875 -3.59375q1.921875 -1.25 4.28125 -1.25q1.734375 0 3.09375 0.734375q1.359375 0.734375 2.203125 1.90625l0 -9.5l3.234375 0l0 26.484375l-3.015625 0zm-10.28125 -9.578125q0 3.6875 1.546875 5.515625q1.5625 1.828125 3.671875 1.828125q2.125 0 3.609375 -1.75q1.5 -1.75 1.5 -5.3125q0 -3.953125 -1.515625 -5.78125q-1.515625 -1.84375 -3.734375 -1.84375q-2.171875 0 -3.625 1.765625q-1.453125 1.765625 -1.453125 5.578125zm18.603882 -13.171875l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm8.277496 0l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm20.166382 1.59375l3.15625 0.46875q0.203125 1.453125 1.109375 2.125q1.203125 0.90625 3.296875 0.90625q2.265625 0 3.484375 -0.90625q1.234375 -0.90625 1.671875 -2.53125q0.25 -0.984375 0.234375 -4.171875q-2.125 2.515625 -5.3125 2.515625q-3.953125 0 -6.125 -2.84375q-2.171875 -2.859375 -2.171875 -6.859375q0 -2.75 1.0 -5.0625q1.0 -2.328125 2.875 -3.59375q1.890625 -1.265625 4.4375 -1.265625q3.40625 0 5.609375 2.75l0 -2.3125l3.0 0l0 16.578125q0 4.484375 -0.921875 6.359375q-0.90625 1.875 -2.890625 2.953125q-1.96875 1.078125 -4.859375 1.078125q-3.4375 0 -5.5625 -1.546875q-2.109375 -1.53125 -2.03125 -4.640625zm2.6875 -11.53125q0 3.78125 1.5 5.515625q1.5 1.734375 3.765625 1.734375q2.234375 0 3.75 -1.71875q1.515625 -1.734375 1.515625 -5.421875q0 -3.515625 -1.5625 -5.296875q-1.5625 -1.796875 -3.765625 -1.796875q-2.171875 0 -3.6875 1.765625q-1.515625 1.75 -1.515625 5.21875zm16.228882 10.390625l7.671875 -27.390625l2.609375 0l-7.65625 27.390625l-2.625 0zm16.355331 -0.453125l-5.875 -19.1875l3.359375 0l3.046875 11.078125l1.140625 4.109375q0.078125 -0.296875 1.0 -3.953125l3.046875 -11.234375l3.34375 0l2.875 11.125l0.953125 3.671875l1.109375 -3.703125l3.28125 -11.09375l3.171875 0l-6.0 19.1875l-3.375 0l-3.0625 -11.484375l-0.734375 -3.28125l-3.890625 14.765625l-3.390625 0zm23.379059 0l0 -19.1875l2.921875 0l0 2.90625q1.125 -2.03125 2.0625 -2.6875q0.953125 -0.65625 2.09375 -0.65625q1.640625 0 3.34375 1.046875l-1.125 3.015625q-1.1875 -0.703125 -2.375 -0.703125q-1.078125 0 -1.921875 0.640625q-0.84375 0.640625 -1.203125 1.78125q-0.546875 1.734375 -0.546875 3.796875l0 10.046875l-3.25 0zm12.477432 -22.75l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm15.386871 -2.90625l0.46875 2.875q-1.375 0.28125 -2.46875 0.28125q-1.765625 0 -2.75 -0.5625q-0.96875 -0.5625 -1.375 -1.46875q-0.390625 -0.90625 -0.390625 -3.84375l0 -11.03125l-2.375 0l0 -2.53125l2.375 0l0 -4.75l3.234375 -1.953125l0 6.703125l3.28125 0l0 2.53125l-3.28125 0l0 11.21875q0 1.390625 0.171875 1.796875q0.171875 0.390625 0.5625 0.625q0.390625 0.234375 1.109375 0.234375q0.546875 0 1.4375 -0.125zm3.2772064 -19.84375l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm8.277496 0l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm20.166382 1.59375l3.15625 0.46875q0.203125 1.453125 1.109375 2.125q1.203125 0.90625 3.296875 0.90625q2.2655945 0 3.4843445 -0.90625q1.234375 -0.90625 1.671875 -2.53125q0.25 -0.984375 0.234375 -4.171875q-2.125 2.515625 -5.3124695 2.515625q-3.953125 0 -6.125 -2.84375q-2.171875 -2.859375 -2.171875 -6.859375q0 -2.75 1.0 -5.0625q1.0 -2.328125 2.875 -3.59375q1.890625 -1.265625 4.4375 -1.265625q3.4062195 0 5.6093445 2.75l0 -2.3125l3.0 0l0 16.578125q0 4.484375 -0.921875 6.359375q-0.90625 1.875 -2.890625 2.953125q-1.96875 1.078125 -4.8593445 1.078125q-3.4375 0 -5.5625 -1.546875q-2.109375 -1.53125 -2.03125 -4.640625zm2.6875 -11.53125q0 3.78125 1.5 5.515625q1.5 1.734375 3.765625 1.734375q2.2343445 0 3.7499695 -1.71875q1.515625 -1.734375 1.515625 -5.421875q0 -3.515625 -1.5625 -5.296875q-1.5625 -1.796875 -3.7655945 -1.796875q-2.171875 0 -3.6875 1.765625q-1.515625 1.75 -1.515625 5.21875zm41.552948 7.578125q-1.796875 1.53125 -3.46875 2.171875q-1.671875 0.625 -3.59375 0.625q-3.15625 0 -4.859375 -1.546875q-1.6875 -1.546875 -1.6875 -3.953125q0 -1.40625 0.640625 -2.5625q0.640625 -1.171875 1.671875 -1.875q1.046875 -0.703125 2.34375 -1.0625q0.953125 -0.265625 2.890625 -0.5q3.9375 -0.46875 5.796875 -1.109375q0.015625 -0.671875 0.015625 -0.859375q0 -1.984375 -0.921875 -2.796875q-1.25 -1.09375 -3.703125 -1.09375q-2.296875 0 -3.390625 0.796875q-1.09375 0.796875 -1.609375 2.84375l-3.1875 -0.4375q0.4375 -2.03125 1.421875 -3.28125q1.0 -1.265625 2.875 -1.9375q1.890625 -0.6875 4.359375 -0.6875q2.46875 0 4.0 0.578125q1.53125 0.578125 2.25 1.453125q0.734375 0.875 1.015625 2.21875q0.15625 0.828125 0.15625 3.0l0 4.328125q0 4.546875 0.203125 5.75q0.21875 1.1875 0.828125 2.296875l-3.390625 0q-0.5 -1.015625 -0.65625 -2.359375zm-0.265625 -7.265625q-1.765625 0.71875 -5.3125 1.21875q-2.0 0.296875 -2.84375 0.65625q-0.828125 0.359375 -1.28125 1.0625q-0.4375 0.6875 -0.4375 1.53125q0 1.3125 0.984375 2.1875q0.984375 0.859375 2.875 0.859375q1.875 0 3.34375 -0.828125q1.46875 -0.828125 2.15625 -2.25q0.515625 -1.09375 0.515625 -3.25l0 -1.1875zm21.881104 9.625l-3.015625 0l0 -26.484375l3.25 0l0 9.453125q2.0625 -2.59375 5.265625 -2.59375q1.765625 0 3.34375 0.71875q1.578125 0.71875 2.59375 2.015625q1.03125 1.28125 1.609375 3.109375q0.578125 1.828125 0.578125 3.90625q0 4.921875 -2.4375 7.625q-2.4375 2.6875 -5.859375 2.6875q-3.390625 0 -5.328125 -2.84375l0 2.40625zm-0.03125 -9.734375q0 3.453125 0.9375 4.984375q1.53125 2.515625 4.15625 2.515625q2.125 0 3.671875 -1.84375q1.5625 -1.859375 1.5625 -5.53125q0 -3.765625 -1.5 -5.546875q-1.484375 -1.796875 -3.59375 -1.796875q-2.125 0 -3.6875 1.859375q-1.546875 1.84375 -1.546875 5.359375zm17.713257 9.734375l0 -26.484375l3.265625 0l0 26.484375l-3.265625 0zm7.1681213 -9.59375q0 -5.328125 2.953125 -7.890625q2.484375 -2.140625 6.03125 -2.140625q3.96875 0 6.46875 2.59375q2.515625 2.59375 2.515625 7.171875q0 3.703125 -1.109375 5.828125q-1.109375 2.109375 -3.234375 3.296875q-2.125 1.171875 -4.640625 1.171875q-4.015625 0 -6.5 -2.578125q-2.484375 -2.59375 -2.484375 -7.453125zm3.34375 0q0 3.6875 1.59375 5.53125q1.609375 1.828125 4.046875 1.828125q2.421875 0 4.03125 -1.84375q1.609375 -1.84375 1.609375 -5.625q0 -3.5625 -1.625 -5.390625q-1.609375 -1.828125 -4.015625 -1.828125q-2.4375 0 -4.046875 1.828125q-1.59375 1.8125 -1.59375 5.5zm31.135132 2.5625l3.203125 0.421875q-0.515625 3.296875 -2.6875 5.171875q-2.15625 1.875 -5.296875 1.875q-3.9375 0 -6.328125 -2.578125q-2.390625 -2.578125 -2.390625 -7.375q0 -3.109375 1.015625 -5.4375q1.03125 -2.34375 3.140625 -3.5q2.109375 -1.171875 4.578125 -1.171875q3.125 0 5.109375 1.59375q2.0 1.578125 2.546875 4.484375l-3.15625 0.484375q-0.453125 -1.9375 -1.609375 -2.90625q-1.140625 -0.984375 -2.765625 -0.984375q-2.453125 0 -4.0 1.765625q-1.53125 1.765625 -1.53125 5.578125q0 3.859375 1.484375 5.625q1.484375 1.75 3.875 1.75q1.90625 0 3.1875 -1.171875q1.28125 -1.1875 1.625 -3.625zm6.1640625 7.03125l0 -26.484375l3.25 0l0 15.109375l7.703125 -7.8125l4.203125 0l-7.328125 7.125l8.078125 12.0625l-4.015625 0l-6.34375 -9.8125l-2.296875 2.203125l0 7.609375l-3.25 0z" fill-rule="nonzero"></path><path fill="#fff2cc" d="m440.2152 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m440.2152 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m453.62442 438.38455l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.860077 2.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.391327 -1.828125q0 -2.421875 0.5 -3.890625q0.5 -1.46875 1.46875 -2.265625q0.984375 -0.796875 2.46875 -0.796875q1.09375 0 1.921875 0.4375q0.828125 0.4375 1.359375 1.28125q0.546875 0.828125 0.84375 2.015625q0.3125 1.1875 0.3125 3.21875q0 2.390625 -0.5 3.859375q-0.484375 1.46875 -1.46875 2.28125q-0.96875 0.796875 -2.46875 0.796875q-1.96875 0 -3.078125 -1.40625q-1.359375 -1.703125 -1.359375 -5.53125zm1.71875 0q0 3.34375 0.78125 4.453125q0.796875 1.109375 1.9375 1.109375q1.15625 0 1.9375 -1.109375q0.78125 -1.125 0.78125 -4.453125q0 -3.359375 -0.78125 -4.46875q-0.78125 -1.109375 -1.953125 -1.109375q-1.15625 0 -1.828125 0.984375q-0.875 1.234375 -0.875 4.59375z" fill-rule="nonzero"></path><path fill="#fff2cc" d="m529.6798 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m529.6798 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m543.089 438.38455l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.860046 2.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm14.688232 4.875l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625z" fill-rule="nonzero"></path><path fill="#fff2cc" d="m619.14435 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m619.14435 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m632.5535 438.38455l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.860107 2.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm17.172546 3.265625l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0z" fill-rule="nonzero"></path><path fill="#fff2cc" d="m708.60895 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m708.60895 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m722.0181 438.38455l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.860046 2.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.406982 1.28125l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625z" fill-rule="nonzero"></path><path fill="#fff2cc" d="m798.0735 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m798.0735 411.24408l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m811.48267 438.38455l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.860107 2.703125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm13.750671 4.875l0 -3.25l-5.90625 0l0 -1.53125l6.21875 -8.8125l1.359375 0l0 8.8125l1.84375 0l0 1.53125l-1.84375 0l0 3.25l-1.671875 0zm0 -4.78125l0 -6.140625l-4.25 6.140625l4.25 0z" fill-rule="nonzero"></path><path fill="#ead1dc" d="m260.48032 227.8727l75.1181 0l0 40.440964l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m260.48032 227.8727l75.1181 0l0 40.440964l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m283.95062 251.4038l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.859375 3.609375l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm4.191681 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm10.879211 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110077 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm14.031982 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125z" fill-rule="nonzero"></path><path fill="#d9ead3" d="m65.75328 411.2454l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m65.75328 411.2454l75.1181 0l0 40.44095l-75.1181 0z" fill-rule="nonzero"></path><path fill="#000000" d="m85.53747 438.38586l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm10.813217 4.921875l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm7.594467 5.234375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm12.870804 -1.453125q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.078842 8.71875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.875717 -6.8125l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m298.03937 268.31366l-194.74016 142.92911" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m298.03937 268.31366l-189.90314 139.379" fill-rule="evenodd"></path><path fill="#595959" stroke="#595959" stroke-width="1.0" stroke-linecap="butt" d="m107.15892 406.36108l-2.6811676 4.0167236l4.6357803 -1.3535767z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m298.03937 268.31366l179.74805 142.92911" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m298.03937 268.31366l175.05176 139.19482" fill-rule="evenodd"></path><path fill="#595959" stroke="#595959" stroke-width="1.0" stroke-linecap="butt" d="m472.0631 408.8013l4.5800476 1.5316162l-2.5240173 -4.117279z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m298.03937 268.31366l358.6772 142.92911" fill-rule="nonzero"></path><path stroke="#595959" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m298.03937 268.31366l353.1034 140.70804" fill-rule="evenodd"></path><path fill="#595959" stroke="#595959" stroke-width="1.0" stroke-linecap="butt" d="m650.5313 410.5561l4.8271484 0.14550781l-3.604248 -3.2142944z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m106.43307 301.02625l111.37008 0l0 32.409454l-111.37008 0z" fill-rule="nonzero"></path><path fill="#000000" d="m122.51119 327.94626l-1.671875 0l0 -10.640625q-0.59375 0.578125 -1.578125 1.15625q-0.984375 0.5625 -1.765625 0.859375l0 -1.625q1.40625 -0.65625 2.453125 -1.59375q1.046875 -0.9375 1.484375 -1.8125l1.078125 0l0 13.65625zm5.016342 0l0 -1.90625l1.9062576 0l0 1.90625l-1.9062576 0zm16.28849 0l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm9.281967 -6.765625l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.4573212 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm16.4375 -0.671875l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.265625 -1.3125q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm12.016342 4.921875l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm13.34375 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094467 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m445.85303 301.02625l111.370056 0l0 32.409454l-111.370056 0z" fill-rule="nonzero"></path><path fill="#000000" d="m455.6499 324.3525l1.671875 -0.21875q0.28125 1.421875 0.96875 2.046875q0.703125 0.625 1.6875 0.625q1.1875 0 2.0 -0.8125q0.8125 -0.828125 0.8125 -2.03125q0 -1.140625 -0.765625 -1.890625q-0.75 -0.75 -1.90625 -0.75q-0.46875 0 -1.171875 0.1875l0.1875 -1.46875q0.15625 0.015625 0.265625 0.015625q1.0625 0 1.90625 -0.546875q0.859375 -0.5625 0.859375 -1.71875q0 -0.921875 -0.625 -1.515625q-0.609375 -0.609375 -1.59375 -0.609375q-0.96875 0 -1.625 0.609375q-0.640625 0.609375 -0.828125 1.84375l-1.671875 -0.296875q0.296875 -1.6875 1.375 -2.609375q1.09375 -0.921875 2.71875 -0.921875q1.109375 0 2.046875 0.484375q0.9375 0.46875 1.421875 1.296875q0.5 0.828125 0.5 1.75q0 0.890625 -0.46875 1.609375q-0.46875 0.71875 -1.40625 1.15625q1.21875 0.265625 1.875 1.15625q0.671875 0.875 0.671875 2.1875q0 1.78125 -1.296875 3.015625q-1.296875 1.234375 -3.28125 1.234375q-1.796875 0 -2.984375 -1.0625q-1.171875 -1.0625 -1.34375 -2.765625zm11.297577 3.59375l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm9.600983 0.8125l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047607 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm12.766327 4.375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.2770691 1.734375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm6.4332886 3.546875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm15.313232 4.875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm7.5788574 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m308.41995 146.15486l146.77164 0l0 32.40944l-146.77164 0z" fill-rule="nonzero"></path><path fill="#000000" d="m326.98245 171.46548l0 1.609375l-8.984375 0q-0.015625 -0.609375 0.1875 -1.15625q0.34375 -0.921875 1.09375 -1.8125q0.765625 -0.890625 2.1875 -2.0625q2.21875 -1.8125 3.0 -2.875q0.78125 -1.0625 0.78125 -2.015625q0 -0.984375 -0.71875 -1.671875q-0.703125 -0.6875 -1.84375 -0.6875q-1.203125 0 -1.9375 0.734375q-0.71875 0.71875 -0.71875 2.0l-1.71875 -0.171875q0.171875 -1.921875 1.328125 -2.921875q1.15625 -1.015625 3.09375 -1.015625q1.953125 0 3.09375 1.09375q1.140625 1.078125 1.140625 2.6875q0 0.8125 -0.34375 1.609375q-0.328125 0.78125 -1.109375 1.65625q-0.765625 0.859375 -2.5625 2.390625q-1.5 1.265625 -1.9375 1.71875q-0.421875 0.4375 -0.703125 0.890625l6.671875 0zm2.531952 1.609375l0 -1.90625l1.90625 0l0 1.90625l-1.90625 0zm9.882233 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.978302 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.766327 0l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm16.016357 1.75l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.235077 5.875l0 -1.359375l6.265625 -7.1875q-1.0625 0.046875 -1.875 0.046875l-4.015625 0l0 -1.359375l8.046875 0l0 1.109375l-5.34375 6.25l-1.015625 1.140625q1.109375 -0.078125 2.09375 -0.078125l4.5625 0l0 1.4375l-8.71875 0zm12.9375 0l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm5.96875 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm15.735107 4.921875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.250702 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375z" fill-rule="nonzero"></path></g></svg>
+
diff --git a/doc/images/Keep_rendezvous_hashing.svg b/doc/images/Keep_rendezvous_hashing.svg
new file mode 100644 (file)
index 0000000..c80084d
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" standalone="yes"?>
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 -->
+
+<svg version="1.1" viewBox="0.0 0.0 960.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="g150061aa58_0_20.0"><path d="m0 0l960.0 0l0 540.0l-960.0 0l0 -540.0z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#g150061aa58_0_20.0)"><path fill="#ffffff" d="m0 0l960.0 0l0 540.0l-960.0 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m32.72441 46.721786l894.55115 0l0 60.125984l-894.55115 0z" fill-rule="nonzero"></path><path fill="#000000" d="m44.63066 91.56178l0 -26.484375l11.75 0q3.53125 0 5.375 0.71875q1.84375 0.703125 2.9375 2.515625q1.109375 1.8125 1.109375 3.984375q0 2.828125 -1.828125 4.765625q-1.8125 1.921875 -5.625 2.453125q1.390625 0.671875 2.109375 1.3125q1.53125 1.40625 2.90625 3.53125l4.609375 7.203125l-4.40625 0l-3.5 -5.515625q-1.546875 -2.375 -2.546875 -3.640625q-0.984375 -1.265625 -1.765625 -1.765625q-0.78125 -0.515625 -1.59375 -0.71875q-0.609375 -0.125 -1.953125 -0.125l-4.078125 0l0 11.765625l-3.5 0zm3.5 -14.796875l7.546875 0q2.390625 0 3.75 -0.5q1.359375 -0.5 2.0625 -1.578125q0.703125 -1.09375 0.703125 -2.390625q0 -1.875 -1.375 -3.078125q-1.359375 -1.21875 -4.296875 -1.21875l-8.390625 0l0 8.765625zm36.12906 8.625l3.359375 0.40625q-0.796875 2.953125 -2.953125 4.578125q-2.140625 1.625 -5.484375 1.625q-4.21875 0 -6.6875 -2.59375q-2.453125 -2.59375 -2.453125 -7.28125q0 -4.828125 2.484375 -7.5q2.5 -2.6875 6.46875 -2.6875q3.859375 0 6.296875 2.625q2.4375 2.625 2.4375 7.375q0 0.28125 -0.015625 0.859375l-14.3125 0q0.171875 3.171875 1.78125 4.859375q1.609375 1.671875 4.015625 1.671875q1.78125 0 3.046875 -0.9375q1.265625 -0.953125 2.015625 -3.0zm-10.6875 -5.265625l10.71875 0q-0.21875 -2.421875 -1.234375 -3.625q-1.546875 -1.890625 -4.015625 -1.890625q-2.25 0 -3.78125 1.5q-1.515625 1.5 -1.6875 4.015625zm18.307007 11.4375l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm33.213257 0l0 -2.421875q-1.828125 2.859375 -5.375 2.859375q-2.28125 0 -4.21875 -1.265625q-1.921875 -1.265625 -2.984375 -3.53125q-1.046875 -2.265625 -1.046875 -5.21875q0 -2.875 0.953125 -5.203125q0.96875 -2.34375 2.875 -3.59375q1.921875 -1.25 4.28125 -1.25q1.734375 0 3.09375 0.734375q1.359375 0.734375 2.203125 1.90625l0 -9.5l3.234375 0l0 26.484375l-3.015625 0zm-10.28125 -9.578125q0 3.6875 1.546875 5.515625q1.5625 1.828125 3.671875 1.828125q2.125 0 3.609375 -1.75q1.5 -1.75 1.5 -5.3125q0 -3.953125 -1.515625 -5.78125q-1.515625 -1.84375 -3.734375 -1.84375q-2.171875 0 -3.625 1.765625q-1.453125 1.765625 -1.453125 5.578125zm31.728882 3.40625l3.359375 0.40625q-0.796875 2.953125 -2.953125 4.578125q-2.140625 1.625 -5.484375 1.625q-4.21875 0 -6.6875 -2.59375q-2.453125 -2.59375 -2.453125 -7.28125q0 -4.828125 2.484375 -7.5q2.5 -2.6875 6.46875 -2.6875q3.859375 0 6.296875 2.625q2.4375 2.625 2.4375 7.375q0 0.28125 -0.015625 0.859375l-14.3125 0q0.171875 3.171875 1.78125 4.859375q1.609375 1.671875 4.015625 1.671875q1.78125 0 3.046875 -0.9375q1.265625 -0.953125 2.015625 -3.0zm-10.6875 -5.265625l10.71875 0q-0.21875 -2.421875 -1.234375 -3.625q-1.546875 -1.890625 -4.015625 -1.890625q-2.25 0 -3.78125 1.5q-1.515625 1.5 -1.6875 4.015625zm16.588257 11.4375l0 -2.640625l12.21875 -14.015625q-2.078125 0.109375 -3.671875 0.109375l-7.8125 0l0 -2.640625l15.671875 0l0 2.15625l-10.390625 12.171875l-2.0 2.21875q2.1875 -0.15625 4.09375 -0.15625l8.875 0l0 2.796875l-16.984375 0zm25.710938 0l-7.296875 -19.1875l3.4375 0l4.109375 11.484375q0.671875 1.875 1.234375 3.875q0.4375 -1.515625 1.203125 -3.65625l4.265625 -11.703125l3.34375 0l-7.265625 19.1875l-3.03125 0zm12.1328125 -9.59375q0 -5.328125 2.953125 -7.890625q2.484375 -2.140625 6.03125 -2.140625q3.96875 0 6.46875 2.59375q2.515625 2.59375 2.515625 7.171875q0 3.703125 -1.109375 5.828125q-1.109375 2.109375 -3.234375 3.296875q-2.125 1.171875 -4.640625 1.171875q-4.015625 0 -6.5 -2.578125q-2.484375 -2.59375 -2.484375 -7.453125zm3.34375 0q0 3.6875 1.59375 5.53125q1.609375 1.828125 4.046875 1.828125q2.421875 0 4.03125 -1.84375q1.609375 -1.84375 1.609375 -5.625q0 -3.5625 -1.625 -5.390625q-1.609375 -1.828125 -4.015625 -1.828125q-2.4375 0 -4.046875 1.828125q-1.59375 1.8125 -1.59375 5.5zm31.197632 9.59375l0 -2.8125q-2.25 3.25 -6.09375 3.25q-1.6875 0 -3.171875 -0.65625q-1.46875 -0.65625 -2.1875 -1.640625q-0.703125 -0.984375 -1.0 -2.40625q-0.203125 -0.953125 -0.203125 -3.03125l0 -11.890625l3.265625 0l0 10.640625q0 2.546875 0.1875 3.4375q0.3125 1.28125 1.296875 2.015625q1.0 0.734375 2.46875 0.734375q1.453125 0 2.734375 -0.75q1.296875 -0.75 1.828125 -2.046875q0.53125 -1.296875 0.53125 -3.75l0 -10.28125l3.25 0l0 19.1875l-2.90625 0zm6.885132 -5.734375l3.21875 -0.5q0.265625 1.9375 1.5 2.96875q1.234375 1.03125 3.46875 1.03125q2.234375 0 3.3125 -0.90625q1.09375 -0.921875 1.09375 -2.15625q0 -1.09375 -0.96875 -1.734375q-0.65625 -0.4375 -3.3125 -1.09375q-3.578125 -0.90625 -4.96875 -1.5625q-1.375 -0.671875 -2.09375 -1.828125q-0.703125 -1.171875 -0.703125 -2.578125q0 -1.28125 0.578125 -2.375q0.59375 -1.09375 1.59375 -1.8125q0.765625 -0.5625 2.078125 -0.953125q1.3125 -0.390625 2.8125 -0.390625q2.25 0 3.953125 0.65625q1.71875 0.65625 2.53125 1.765625q0.8125 1.109375 1.109375 2.96875l-3.171875 0.4375q-0.21875 -1.484375 -1.265625 -2.3125q-1.03125 -0.84375 -2.921875 -0.84375q-2.25 0 -3.203125 0.75q-0.953125 0.734375 -0.953125 1.734375q0 0.625 0.390625 1.140625q0.40625 0.515625 1.25 0.859375q0.484375 0.1875 2.875 0.828125q3.453125 0.921875 4.8125 1.515625q1.359375 0.578125 2.140625 1.703125q0.78125 1.125 0.78125 2.78125q0 1.625 -0.953125 3.0625q-0.953125 1.4375 -2.75 2.234375q-1.78125 0.78125 -4.03125 0.78125q-3.75 0 -5.703125 -1.546875q-1.953125 -1.5625 -2.5 -4.625zm30.331894 5.734375l0 -26.484375l3.25 0l0 9.5q2.28125 -2.640625 5.75 -2.640625q2.125 0 3.703125 0.84375q1.578125 0.84375 2.25 2.328125q0.671875 1.46875 0.671875 4.296875l0 12.15625l-3.25 0l0 -12.15625q0 -2.4375 -1.0625 -3.546875q-1.046875 -1.109375 -2.984375 -1.109375q-1.4375 0 -2.71875 0.75q-1.265625 0.734375 -1.8125 2.03125q-0.546875 1.28125 -0.546875 3.53125l0 10.5l-3.25 0zm33.275726 -2.359375q-1.796875 1.53125 -3.46875 2.171875q-1.671875 0.625 -3.5937195 0.625q-3.15625 0 -4.859375 -1.546875q-1.6875 -1.546875 -1.6875 -3.953125q0 -1.40625 0.640625 -2.5625q0.640625 -1.171875 1.671875 -1.875q1.046875 -0.703125 2.34375 -1.0625q0.953125 -0.265625 2.8905945 -0.5q3.9375 -0.46875 5.796875 -1.109375q0.015625 -0.671875 0.015625 -0.859375q0 -1.984375 -0.921875 -2.796875q-1.25 -1.09375 -3.703125 -1.09375q-2.2968445 0 -3.3905945 0.796875q-1.09375 0.796875 -1.609375 2.84375l-3.1875 -0.4375q0.4375 -2.03125 1.421875 -3.28125q1.0 -1.265625 2.875 -1.9375q1.890625 -0.6875 4.3593445 -0.6875q2.46875 0 4.0 0.578125q1.53125 0.578125 2.25 1.453125q0.734375 0.875 1.015625 2.21875q0.15625 0.828125 0.15625 3.0l0 4.328125q0 4.546875 0.203125 5.75q0.21875 1.1875 0.828125 2.296875l-3.390625 0q-0.5 -1.015625 -0.65625 -2.359375zm-0.265625 -7.265625q-1.765625 0.71875 -5.3125 1.21875q-1.9999695 0.296875 -2.8437195 0.65625q-0.828125 0.359375 -1.28125 1.0625q-0.4375 0.6875 -0.4375 1.53125q0 1.3125 0.984375 2.1875q0.984375 0.859375 2.8749695 0.859375q1.875 0 3.34375 -0.828125q1.46875 -0.828125 2.15625 -2.25q0.515625 -1.09375 0.515625 -3.25l0 -1.1875zm7.213257 3.890625l3.21875 -0.5q0.265625 1.9375 1.5 2.96875q1.234375 1.03125 3.46875 1.03125q2.234375 0 3.3125 -0.90625q1.09375 -0.921875 1.09375 -2.15625q0 -1.09375 -0.96875 -1.734375q-0.65625 -0.4375 -3.3125 -1.09375q-3.578125 -0.90625 -4.96875 -1.5625q-1.375 -0.671875 -2.09375 -1.828125q-0.703125 -1.171875 -0.703125 -2.578125q0 -1.28125 0.578125 -2.375q0.59375 -1.09375 1.59375 -1.8125q0.765625 -0.5625 2.078125 -0.953125q1.3125 -0.390625 2.8125 -0.390625q2.25 0 3.953125 0.65625q1.71875 0.65625 2.53125 1.765625q0.8125 1.109375 1.109375 2.96875l-3.171875 0.4375q-0.21875 -1.484375 -1.265625 -2.3125q-1.03125 -0.84375 -2.921875 -0.84375q-2.25 0 -3.203125 0.75q-0.953125 0.734375 -0.953125 1.734375q0 0.625 0.390625 1.140625q0.40625 0.515625 1.25 0.859375q0.484375 0.1875 2.875 0.828125q3.453125 0.921875 4.8125 1.515625q1.359375 0.578125 2.140625 1.703125q0.78125 1.125 0.78125 2.78125q0 1.625 -0.953125 3.0625q-0.953125 1.4375 -2.75 2.234375q-1.78125 0.78125 -4.03125 0.78125q-3.75 0 -5.703125 -1.546875q-1.953125 -1.5625 -2.5 -4.625zm19.960938 5.734375l0 -26.484375l3.25 0l0 9.5q2.28125 -2.640625 5.75 -2.640625q2.125 0 3.703125 0.84375q1.578125 0.84375 2.25 2.328125q0.671875 1.46875 0.671875 4.296875l0 12.15625l-3.25 0l0 -12.15625q0 -2.4375 -1.0625 -3.546875q-1.046875 -1.109375 -2.984375 -1.109375q-1.4375 0 -2.71875 0.75q-1.265625 0.734375 -1.8125 2.03125q-0.546875 1.28125 -0.546875 3.53125l0 10.5l-3.25 0zm20.775757 -22.75l0 -3.734375l3.25 0l0 3.734375l-3.25 0zm0 22.75l0 -19.1875l3.25 0l0 19.1875l-3.25 0zm8.277496 0l0 -19.1875l2.921875 0l0 2.734375q2.125 -3.171875 6.109375 -3.171875q1.734375 0 3.1875 0.625q1.453125 0.625 2.171875 1.640625q0.734375 1.015625 1.015625 2.40625q0.1875 0.890625 0.1875 3.15625l0 11.796875l-3.25 0l0 -11.671875q0 -1.984375 -0.390625 -2.96875q-0.375 -0.984375 -1.34375 -1.5625q-0.953125 -0.59375 -2.265625 -0.59375q-2.078125 0 -3.59375 1.3125q-1.5 1.3125 -1.5 5.0l0 10.484375l-3.25 0zm20.166382 1.59375l3.15625 0.46875q0.203125 1.453125 1.109375 2.125q1.203125 0.90625 3.296875 0.90625q2.265625 0 3.484375 -0.90625q1.234375 -0.90625 1.671875 -2.53125q0.25 -0.984375 0.234375 -4.171875q-2.125 2.515625 -5.3125 2.515625q-3.953125 0 -6.125 -2.84375q-2.171875 -2.859375 -2.171875 -6.859375q0 -2.75 1.0 -5.0625q1.0 -2.328125 2.875 -3.59375q1.890625 -1.265625 4.4375 -1.265625q3.40625 0 5.609375 2.75l0 -2.3125l3.0 0l0 16.578125q0 4.484375 -0.921875 6.359375q-0.90625 1.875 -2.890625 2.953125q-1.96875 1.078125 -4.859375 1.078125q-3.4375 0 -5.5625 -1.546875q-2.109375 -1.53125 -2.03125 -4.640625zm2.6875 -11.53125q0 3.78125 1.5 5.515625q1.5 1.734375 3.765625 1.734375q2.234375 0 3.75 -1.71875q1.515625 -1.734375 1.515625 -5.421875q0 -3.515625 -1.5625 -5.296875q-1.5625 -1.796875 -3.765625 -1.796875q-2.171875 0 -3.6875 1.765625q-1.515625 1.75 -1.515625 5.21875z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m32.72441 467.15222l894.55115 0l0 54.519714l-894.55115 0z" fill-rule="nonzero"></path><path fill="#595959" d="m167.41017 499.19223l-3.796875 -12.453125l2.171875 0l1.984375 7.1875l0.734375 2.671875q0.046875 -0.203125 0.640625 -2.5625l1.984375 -7.296875l2.171875 0l1.859375 7.21875l0.625 2.390625l0.71875 -2.40625l2.125 -7.203125l2.046875 0l-3.890625 12.453125l-2.1875 0l-1.984375 -7.453125l-0.46875 -2.125l-2.53125 9.578125l-2.203125 0zm23.566406 -4.015625l2.171875 0.28125q-0.515625 1.90625 -1.90625 2.96875q-1.390625 1.046875 -3.5625 1.046875q-2.734375 0 -4.34375 -1.671875q-1.59375 -1.6875 -1.59375 -4.734375q0 -3.140625 1.609375 -4.875q1.625 -1.734375 4.203125 -1.734375q2.5 0 4.078125 1.703125q1.59375 1.703125 1.59375 4.78125q0 0.1875 -0.015625 0.5625l-9.28125 0q0.109375 2.046875 1.15625 3.140625q1.046875 1.09375 2.609375 1.09375q1.15625 0 1.96875 -0.609375q0.828125 -0.609375 1.3125 -1.953125zm-6.9375 -3.40625l6.953125 0q-0.140625 -1.5625 -0.796875 -2.359375q-1.0 -1.21875 -2.609375 -1.21875q-1.453125 0 -2.453125 0.984375q-0.984375 0.96875 -1.09375 2.59375zm11.769531 -7.328125l0 -2.4375l2.109375 0l0 2.4375l-2.109375 0zm0 14.75l0 -12.453125l2.109375 0l0 12.453125l-2.109375 0zm4.9414062 1.03125l2.046875 0.3125q0.125 0.9375 0.71875 1.375q0.78125 0.59375 2.140625 0.59375q1.46875 0 2.265625 -0.59375q0.796875 -0.578125 1.078125 -1.640625q0.15625 -0.640625 0.140625 -2.703125q-1.375 1.625 -3.4375 1.625q-2.5625 0 -3.96875 -1.84375q-1.40625 -1.859375 -1.40625 -4.453125q0 -1.78125 0.640625 -3.28125q0.640625 -1.515625 1.859375 -2.328125q1.234375 -0.828125 2.890625 -0.828125q2.203125 0 3.625 1.78125l0 -1.5l1.953125 0l0 10.765625q0 2.90625 -0.59375 4.109375q-0.59375 1.21875 -1.875 1.921875q-1.28125 0.703125 -3.15625 0.703125q-2.234375 0 -3.609375 -1.0q-1.359375 -1.0 -1.3125 -3.015625zm1.734375 -7.484375q0 2.453125 0.96875 3.578125q0.984375 1.125 2.453125 1.125q1.453125 0 2.4375 -1.109375q0.984375 -1.125 0.984375 -3.515625q0 -2.28125 -1.015625 -3.4375q-1.015625 -1.171875 -2.453125 -1.171875q-1.40625 0 -2.390625 1.140625q-0.984375 1.140625 -0.984375 3.390625zm11.988281 6.453125l0 -17.1875l2.109375 0l0 6.171875q1.484375 -1.71875 3.734375 -1.71875q1.375 0 2.390625 0.546875q1.03125 0.546875 1.46875 1.515625q0.4375 0.953125 0.4375 2.78125l0 7.890625l-2.109375 0l0 -7.890625q0 -1.578125 -0.6875 -2.296875q-0.6875 -0.71875 -1.9375 -0.71875q-0.9375 0 -1.765625 0.484375q-0.828125 0.484375 -1.1875 1.3125q-0.34375 0.828125 -0.34375 2.296875l0 6.8125l-2.109375 0zm17.957031 -1.890625l0.3125 1.859375q-0.890625 0.203125 -1.59375 0.203125q-1.15625 0 -1.796875 -0.359375q-0.625 -0.375 -0.890625 -0.96875q-0.25 -0.59375 -0.25 -2.484375l0 -7.171875l-1.546875 0l0 -1.640625l1.546875 0l0 -3.078125l2.09375 -1.265625l0 4.34375l2.125 0l0 1.640625l-2.125 0l0 7.28125q0 0.90625 0.109375 1.171875q0.125 0.25 0.375 0.40625q0.25 0.140625 0.71875 0.140625q0.34375 0 0.921875 -0.078125zm19.835938 -8.21875l-11.34375 0l0 -1.96875l11.34375 0l0 1.96875zm0 5.21875l-11.34375 0l0 -1.96875l11.34375 0l0 1.96875zm9.5742035 4.890625l0 -17.1875l2.109375 0l0 6.171875q1.484375 -1.71875 3.734375 -1.71875q1.375 0 2.390625 0.546875q1.03125 0.546875 1.46875 1.515625q0.4375 0.953125 0.4375 2.78125l0 7.890625l-2.109375 0l0 -7.890625q0 -1.578125 -0.6875 -2.296875q-0.6875 -0.71875 -1.9375 -0.71875q-0.9375 0 -1.765625 0.484375q-0.828125 0.484375 -1.1875 1.3125q-0.34375 0.828125 -0.34375 2.296875l0 6.8125l-2.109375 0zm12.566406 -6.21875q0 -3.46875 1.921875 -5.125q1.609375 -1.390625 3.921875 -1.390625q2.5625 0 4.1875 1.6875q1.625 1.6875 1.625 4.640625q0 2.40625 -0.71875 3.78125q-0.71875 1.375 -2.09375 2.140625q-1.375 0.765625 -3.0 0.765625q-2.625 0 -4.234375 -1.671875q-1.609375 -1.6875 -1.609375 -4.828125zm2.171875 0q0 2.390625 1.03125 3.578125q1.046875 1.1875 2.640625 1.1875q1.5625 0 2.609375 -1.1875q1.046875 -1.203125 1.046875 -3.65625q0 -2.3125 -1.0625 -3.5q-1.046875 -1.1875 -2.59375 -1.1875q-1.59375 0 -2.640625 1.1875q-1.03125 1.1875 -1.03125 3.578125zm14.253906 6.21875l-3.796875 -12.453125l2.171875 0l1.984375 7.1875l0.734375 2.671875q0.046875 -0.203125 0.640625 -2.5625l1.984375 -7.296875l2.171875 0l1.859375 7.21875l0.625 2.390625l0.71875 -2.40625l2.125 -7.203125l2.046875 0l-3.890625 12.453125l-2.1875 0l-1.984375 -7.453125l-0.46875 -2.125l-2.53125 9.578125l-2.203125 0zm29.828125 -1.53125q-1.171875 0.984375 -2.265625 1.40625q-1.078125 0.40625 -2.3125 0.40625q-2.046875 0 -3.15625 -1.0q-1.09375 -1.0 -1.09375 -2.5625q0 -0.921875 0.40625 -1.671875q0.421875 -0.75 1.09375 -1.203125q0.671875 -0.46875 1.515625 -0.703125q0.625 -0.15625 1.875 -0.3125q2.5625 -0.3125 3.765625 -0.734375q0.015625 -0.421875 0.015625 -0.546875q0 -1.28125 -0.609375 -1.8125q-0.796875 -0.71875 -2.390625 -0.71875q-1.5 0 -2.203125 0.53125q-0.703125 0.515625 -1.046875 1.84375l-2.0625 -0.28125q0.28125 -1.328125 0.921875 -2.140625q0.640625 -0.8125 1.859375 -1.25q1.21875 -0.453125 2.828125 -0.453125q1.59375 0 2.59375 0.375q1.0 0.375 1.46875 0.953125q0.46875 0.5625 0.65625 1.4375q0.09375 0.53125 0.09375 1.9375l0 2.8125q0 2.9375 0.140625 3.71875q0.140625 0.78125 0.53125 1.5l-2.203125 0q-0.328125 -0.65625 -0.421875 -1.53125zm-0.171875 -4.71875q-1.15625 0.46875 -3.453125 0.796875q-1.296875 0.1875 -1.84375 0.421875q-0.53125 0.234375 -0.828125 0.6875q-0.28125 0.453125 -0.28125 1.0q0 0.84375 0.625 1.40625q0.640625 0.5625 1.875 0.5625q1.21875 0 2.171875 -0.53125q0.953125 -0.53125 1.390625 -1.453125q0.34375 -0.71875 0.34375 -2.109375l0 -0.78125zm5.3945312 11.015625l0 -17.21875l1.921875 0l0 1.625q0.6875 -0.953125 1.53125 -1.421875q0.859375 -0.484375 2.078125 -0.484375q1.59375 0 2.8125 0.828125q1.21875 0.8125 1.84375 2.3125q0.625 1.5 0.625 3.28125q0 1.90625 -0.6875 3.4375q-0.6875 1.53125 -2.0 2.34375q-1.296875 0.8125 -2.734375 0.8125q-1.0625 0 -1.90625 -0.4375q-0.828125 -0.453125 -1.375 -1.140625l0 6.0625l-2.109375 0zm1.921875 -10.921875q0 2.40625 0.96875 3.5625q0.96875 1.140625 2.359375 1.140625q1.40625 0 2.40625 -1.1875q1.0 -1.1875 1.0 -3.6875q0 -2.375 -0.984375 -3.5625q-0.96875 -1.1875 -2.328125 -1.1875q-1.359375 0 -2.390625 1.265625q-1.03125 1.25 -1.03125 3.65625zm11.425781 10.921875l0 -17.21875l1.921875 0l0 1.625q0.6875 -0.953125 1.53125 -1.421875q0.859375 -0.484375 2.078125 -0.484375q1.59375 0 2.8125 0.828125q1.21875 0.8125 1.84375 2.3125q0.625 1.5 0.625 3.28125q0 1.90625 -0.6875 3.4375q-0.6875 1.53125 -2.0 2.34375q-1.296875 0.8125 -2.734375 0.8125q-1.0625 0 -1.90625 -0.4375q-0.828125 -0.453125 -1.375 -1.140625l0 6.0625l-2.109375 0zm1.921875 -10.921875q0 2.40625 0.96875 3.5625q0.96875 1.140625 2.359375 1.140625q1.40625 0 2.40625 -1.1875q1.0 -1.1875 1.0 -3.6875q0 -2.375 -0.984375 -3.5625q-0.96875 -1.1875 -2.328125 -1.1875q-1.359375 0 -2.390625 1.265625q-1.03125 1.25 -1.03125 3.65625zm11.410156 6.15625l0 -12.453125l1.890625 0l0 1.890625q0.734375 -1.328125 1.34375 -1.75q0.625 -0.421875 1.359375 -0.421875q1.0625 0 2.171875 0.6875l-0.734375 1.953125q-0.765625 -0.453125 -1.546875 -0.453125q-0.6875 0 -1.25 0.421875q-0.546875 0.40625 -0.78125 1.140625q-0.34375 1.125 -0.34375 2.46875l0 6.515625l-2.109375 0zm7.2265625 -6.21875q0 -3.46875 1.921875 -5.125q1.609375 -1.390625 3.921875 -1.390625q2.5625 0 4.1875 1.6875q1.625 1.6875 1.625 4.640625q0 2.40625 -0.71875 3.78125q-0.71875 1.375 -2.09375 2.140625q-1.375 0.765625 -3.0 0.765625q-2.625 0 -4.234375 -1.671875q-1.609375 -1.6875 -1.609375 -4.828125zm2.171875 0q0 2.390625 1.03125 3.578125q1.046875 1.1875 2.640625 1.1875q1.5625 0 2.609375 -1.1875q1.046875 -1.203125 1.046875 -3.65625q0 -2.3125 -1.0625 -3.5q-1.046875 -1.1875 -2.59375 -1.1875q-1.59375 0 -2.640625 1.1875q-1.03125 1.1875 -1.03125 3.578125zm11.957031 10.984375l0 -17.21875l1.921875 0l0 1.625q0.6875 -0.953125 1.53125 -1.421875q0.859375 -0.484375 2.078125 -0.484375q1.59375 0 2.8125 0.828125q1.21875 0.8125 1.84375 2.3125q0.625 1.5 0.625 3.28125q0 1.90625 -0.6875 3.4375q-0.6875 1.53125 -2.0 2.34375q-1.296875 0.8125 -2.734375 0.8125q-1.0625 0 -1.90625 -0.4375q-0.828125 -0.453125 -1.375 -1.140625l0 6.0625l-2.109375 0zm1.921875 -10.921875q0 2.40625 0.96875 3.5625q0.96875 1.140625 2.359375 1.140625q1.40625 0 2.40625 -1.1875q1.0 -1.1875 1.0 -3.6875q0 -2.375 -0.984375 -3.5625q-0.96875 -1.1875 -2.328125 -1.1875q-1.359375 0 -2.390625 1.265625q-1.03125 1.25 -1.03125 3.65625zm11.410156 6.15625l0 -12.453125l1.890625 0l0 1.890625q0.734375 -1.328125 1.34375 -1.75q0.625 -0.421875 1.359375 -0.421875q1.0625 0 2.171875 0.6875l-0.734375 1.953125q-0.765625 -0.453125 -1.546875 -0.453125q-0.6875 0 -1.25 0.421875q-0.546875 0.40625 -0.78125 1.140625q-0.34375 1.125 -0.34375 2.46875l0 6.515625l-2.109375 0zm8.0234375 -14.75l0 -2.4375l2.109375 0l0 2.4375l-2.109375 0zm0 14.75l0 -12.453125l2.109375 0l0 12.453125l-2.109375 0zm13.441406 -1.53125q-1.171875 0.984375 -2.265625 1.40625q-1.078125 0.40625 -2.3125 0.40625q-2.046875 0 -3.15625 -1.0q-1.09375 -1.0 -1.09375 -2.5625q0 -0.921875 0.40625 -1.671875q0.421875 -0.75 1.09375 -1.203125q0.671875 -0.46875 1.515625 -0.703125q0.625 -0.15625 1.875 -0.3125q2.5625 -0.3125 3.765625 -0.734375q0.015625 -0.421875 0.015625 -0.546875q0 -1.28125 -0.609375 -1.8125q-0.796875 -0.71875 -2.390625 -0.71875q-1.5 0 -2.203125 0.53125q-0.703125 0.515625 -1.046875 1.84375l-2.0625 -0.28125q0.28125 -1.328125 0.921875 -2.140625q0.640625 -0.8125 1.859375 -1.25q1.21875 -0.453125 2.828125 -0.453125q1.59375 0 2.59375 0.375q1.0 0.375 1.46875 0.953125q0.46875 0.5625 0.65625 1.4375q0.09375 0.53125 0.09375 1.9375l0 2.8125q0 2.9375 0.140625 3.71875q0.140625 0.78125 0.53125 1.5l-2.203125 0q-0.328125 -0.65625 -0.421875 -1.53125zm-0.171875 -4.71875q-1.15625 0.46875 -3.453125 0.796875q-1.296875 0.1875 -1.84375 0.421875q-0.53125 0.234375 -0.828125 0.6875q-0.28125 0.453125 -0.28125 1.0q0 0.84375 0.625 1.40625q0.640625 0.5625 1.875 0.5625q1.21875 0 2.171875 -0.53125q0.953125 -0.53125 1.390625 -1.453125q0.34375 -0.71875 0.34375 -2.109375l0 -0.78125zm10.003906 4.359375l0.3125 1.859375q-0.890625 0.203125 -1.59375 0.203125q-1.15625 0 -1.796875 -0.359375q-0.625 -0.375 -0.890625 -0.96875q-0.25 -0.59375 -0.25 -2.484375l0 -7.171875l-1.546875 0l0 -1.640625l1.546875 0l0 -3.078125l2.09375 -1.265625l0 4.34375l2.125 0l0 1.640625l-2.125 0l0 7.28125q0 0.90625 0.109375 1.171875q0.125 0.25 0.375 0.40625q0.25 0.140625 0.71875 0.140625q0.34375 0 0.921875 -0.078125zm10.589844 -2.125l2.171875 0.28125q-0.515625 1.90625 -1.90625 2.96875q-1.390625 1.046875 -3.5625 1.046875q-2.734375 0 -4.34375 -1.671875q-1.59375 -1.6875 -1.59375 -4.734375q0 -3.140625 1.609375 -4.875q1.625 -1.734375 4.203125 -1.734375q2.5 0 4.078125 1.703125q1.59375 1.703125 1.59375 4.78125q0 0.1875 -0.015625 0.5625l-9.28125 0q0.109375 2.046875 1.15625 3.140625q1.046875 1.09375 2.609375 1.09375q1.15625 0 1.96875 -0.609375q0.828125 -0.609375 1.3125 -1.953125zm-6.9375 -3.40625l6.953125 0q-0.140625 -1.5625 -0.796875 -2.359375q-1.0 -1.21875 -2.609375 -1.21875q-1.453125 0 -2.453125 0.984375q-0.984375 0.96875 -1.09375 2.59375zm18.4375 -7.328125l0 -2.4375l2.109375 0l0 2.4375l-2.109375 0zm0 14.75l0 -12.453125l2.109375 0l0 12.453125l-2.109375 0zm4.4726562 -3.71875l2.09375 -0.328125q0.171875 1.25 0.96875 1.921875q0.8125 0.671875 2.25 0.671875q1.453125 0 2.15625 -0.59375q0.703125 -0.59375 0.703125 -1.390625q0 -0.71875 -0.625 -1.125q-0.421875 -0.28125 -2.15625 -0.71875q-2.3125 -0.578125 -3.21875 -1.0q-0.890625 -0.4375 -1.359375 -1.1875q-0.453125 -0.765625 -0.453125 -1.671875q0 -0.828125 0.375 -1.53125q0.390625 -0.71875 1.046875 -1.1875q0.484375 -0.359375 1.328125 -0.609375q0.859375 -0.265625 1.828125 -0.265625q1.46875 0 2.578125 0.421875q1.109375 0.421875 1.625 1.15625q0.53125 0.71875 0.734375 1.921875l-2.0625 0.28125q-0.140625 -0.96875 -0.8125 -1.5q-0.671875 -0.546875 -1.90625 -0.546875q-1.453125 0 -2.078125 0.484375q-0.625 0.484375 -0.625 1.125q0 0.40625 0.265625 0.734375q0.25 0.34375 0.8125 0.5625q0.3125 0.125 1.859375 0.546875q2.234375 0.59375 3.109375 0.984375q0.890625 0.375 1.390625 1.109375q0.515625 0.71875 0.515625 1.796875q0 1.046875 -0.625 1.984375q-0.609375 0.9375 -1.765625 1.453125q-1.15625 0.5 -2.625 0.5q-2.421875 0 -3.703125 -1.0q-1.265625 -1.015625 -1.625 -3.0zm18.667969 0l2.09375 -0.328125q0.171875 1.25 0.96875 1.921875q0.8125 0.671875 2.25 0.671875q1.453125 0 2.15625 -0.59375q0.703125 -0.59375 0.703125 -1.390625q0 -0.71875 -0.625 -1.125q-0.421875 -0.28125 -2.15625 -0.71875q-2.3125 -0.578125 -3.21875 -1.0q-0.890625 -0.4375 -1.359375 -1.1875q-0.453125 -0.765625 -0.453125 -1.671875q0 -0.828125 0.375 -1.53125q0.390625 -0.71875 1.046875 -1.1875q0.484375 -0.359375 1.328125 -0.609375q0.859375 -0.265625 1.828125 -0.265625q1.46875 0 2.578125 0.421875q1.109375 0.421875 1.625 1.15625q0.53125 0.71875 0.734375 1.921875l-2.0625 0.28125q-0.140625 -0.96875 -0.8125 -1.5q-0.671875 -0.546875 -1.90625 -0.546875q-1.453125 0 -2.078125 0.484375q-0.625 0.484375 -0.625 1.125q0 0.40625 0.265625 0.734375q0.25 0.34375 0.8125 0.5625q0.3125 0.125 1.859375 0.546875q2.234375 0.59375 3.109375 0.984375q0.890625 0.375 1.390625 1.109375q0.515625 0.71875 0.515625 1.796875q0 1.046875 -0.625 1.984375q-0.609375 0.9375 -1.765625 1.453125q-1.15625 0.5 -2.625 0.5q-2.421875 0 -3.703125 -1.0q-1.265625 -1.015625 -1.625 -3.0zm21.375 -0.296875l2.171875 0.28125q-0.515625 1.90625 -1.90625 2.96875q-1.390625 1.046875 -3.5625 1.046875q-2.734375 0 -4.34375 -1.671875q-1.59375 -1.6875 -1.59375 -4.734375q0 -3.140625 1.609375 -4.875q1.625 -1.734375 4.203125 -1.734375q2.5 0 4.078125 1.703125q1.59375 1.703125 1.59375 4.78125q0 0.1875 -0.015625 0.5625l-9.28125 0q0.109375 2.046875 1.15625 3.140625q1.046875 1.09375 2.609375 1.09375q1.15625 0 1.96875 -0.609375q0.828125 -0.609375 1.3125 -1.953125zm-6.9375 -3.40625l6.953125 0q-0.140625 -1.5625 -0.796875 -2.359375q-1.0 -1.21875 -2.609375 -1.21875q-1.453125 0 -2.453125 0.984375q-0.984375 0.96875 -1.09375 2.59375zm11.738281 7.421875l0 -12.453125l1.890625 0l0 1.890625q0.734375 -1.328125 1.34375 -1.75q0.625 -0.421875 1.359375 -0.421875q1.0625 0 2.171875 0.6875l-0.734375 1.953125q-0.765625 -0.453125 -1.546875 -0.453125q-0.6875 0 -1.25 0.421875q-0.546875 0.40625 -0.78125 1.140625q-0.34375 1.125 -0.34375 2.46875l0 6.515625l-2.109375 0zm11.4765625 0l-4.734375 -12.453125l2.21875 0l2.671875 7.453125q0.4375 1.21875 0.796875 2.515625q0.28125 -0.984375 0.78125 -2.375l2.765625 -7.59375l2.171875 0l-4.703125 12.453125l-1.96875 0zm17.0625 -4.015625l2.171875 0.28125q-0.515625 1.90625 -1.90625 2.96875q-1.390625 1.046875 -3.5625 1.046875q-2.734375 0 -4.34375 -1.671875q-1.59375 -1.6875 -1.59375 -4.734375q0 -3.140625 1.609375 -4.875q1.625 -1.734375 4.203125 -1.734375q2.5 0 4.078125 1.703125q1.59375 1.703125 1.59375 4.78125q0 0.1875 -0.015625 0.5625l-9.28125 0q0.109375 2.046875 1.15625 3.140625q1.046875 1.09375 2.609375 1.09375q1.15625 0 1.96875 -0.609375q0.828125 -0.609375 1.3125 -1.953125zm-6.9375 -3.40625l6.953125 0q-0.140625 -1.5625 -0.796875 -2.359375q-1.0 -1.21875 -2.609375 -1.21875q-1.453125 0 -2.453125 0.984375q-0.984375 0.96875 -1.09375 2.59375zm11.738281 7.421875l0 -12.453125l1.890625 0l0 1.890625q0.734375 -1.328125 1.34375 -1.75q0.625 -0.421875 1.359375 -0.421875q1.0625 0 2.171875 0.6875l-0.734375 1.953125q-0.765625 -0.453125 -1.546875 -0.453125q-0.6875 0 -1.25 0.421875q-0.546875 0.40625 -0.78125 1.140625q-0.34375 1.125 -0.34375 2.46875l0 6.515625l-2.109375 0zm16.332031 -14.296875l0 2.453125l-2.265625 0l0 -1.9375q0 -1.578125 0.375 -2.28125q0.5 -0.9375 1.5625 -1.40625l0.515625 0.8125q-0.640625 0.265625 -0.953125 0.8125q-0.296875 0.53125 -0.328125 1.546875l1.09375 0zm3.640625 0l0 2.453125l-2.265625 0l0 -1.9375q0 -1.578125 0.375 -2.28125q0.484375 -0.9375 1.5625 -1.40625l0.515625 0.8125q-0.65625 0.265625 -0.96875 0.8125q-0.296875 0.53125 -0.328125 1.546875l1.109375 0zm2.1953735 8.78125l2.140625 -0.1875q0.15625 1.28125 0.703125 2.109375q0.5625 0.828125 1.734375 1.34375q1.171875 0.5 2.640625 0.5q1.296875 0 2.296875 -0.375q1.0 -0.390625 1.484375 -1.0625q0.484375 -0.6875 0.484375 -1.484375q0 -0.796875 -0.46875 -1.40625q-0.46875 -0.609375 -1.546875 -1.015625q-0.6875 -0.265625 -3.0625 -0.828125q-2.359375 -0.578125 -3.3125 -1.078125q-1.234375 -0.640625 -1.84375 -1.59375q-0.59375 -0.96875 -0.59375 -2.140625q0 -1.3125 0.734375 -2.4375q0.75 -1.125 2.15625 -1.703125q1.421875 -0.59375 3.15625 -0.59375q1.90625 0 3.359375 0.609375q1.46875 0.609375 2.25 1.8125q0.796875 1.1875 0.84375 2.703125l-2.171875 0.171875q-0.171875 -1.640625 -1.1875 -2.46875q-1.015625 -0.828125 -3.0 -0.828125q-2.0625 0 -3.015625 0.765625q-0.9375 0.75 -0.9375 1.8125q0 0.921875 0.671875 1.515625q0.65625 0.609375 3.421875 1.234375q2.78125 0.625 3.8125 1.09375q1.5 0.6875 2.203125 1.75q0.71875 1.0625 0.71875 2.4375q0 1.375 -0.78125 2.59375q-0.78125 1.203125 -2.25 1.890625q-1.46875 0.671875 -3.3125 0.671875q-2.328125 0 -3.90625 -0.671875q-1.578125 -0.6875 -2.484375 -2.046875q-0.890625 -1.375 -0.9375 -3.09375zm15.9453125 -9.390625l0 -2.453125l2.265625 0l0 1.9375q0 1.5625 -0.359375 2.265625q-0.5 0.9375 -1.578125 1.421875l-0.515625 -0.828125q0.640625 -0.265625 0.953125 -0.8125q0.3125 -0.5625 0.34375 -1.53125l-1.109375 0zm3.640625 0l0 -2.453125l2.265625 0l0 1.9375q0 1.5625 -0.375 2.265625q-0.5 0.9375 -1.5625 1.421875l-0.515625 -0.828125q0.625 -0.265625 0.9375 -0.8125q0.3125 -0.5625 0.34375 -1.53125l-1.09375 0zm12.097656 14.90625l0 -10.8125l-1.875 0l0 -1.640625l1.875 0l0 -1.3125q0 -1.265625 0.21875 -1.875q0.296875 -0.8125 1.0625 -1.3125q0.78125 -0.515625 2.15625 -0.515625q0.890625 0 1.96875 0.203125l-0.3125 1.84375q-0.65625 -0.125 -1.25 -0.125q-0.953125 0 -1.359375 0.421875q-0.390625 0.40625 -0.390625 1.53125l0 1.140625l2.421875 0l0 1.640625l-2.421875 0l0 10.8125l-2.09375 0zm5.3710938 -6.21875q0 -3.46875 1.921875 -5.125q1.609375 -1.390625 3.921875 -1.390625q2.5625 0 4.1875 1.6875q1.625 1.6875 1.625 4.640625q0 2.40625 -0.71875 3.78125q-0.71875 1.375 -2.09375 2.140625q-1.375 0.765625 -3.0 0.765625q-2.625 0 -4.234375 -1.671875q-1.609375 -1.6875 -1.609375 -4.828125zm2.171875 0q0 2.390625 1.03125 3.578125q1.046875 1.1875 2.640625 1.1875q1.5625 0 2.609375 -1.1875q1.046875 -1.203125 1.046875 -3.65625q0 -2.3125 -1.0625 -3.5q-1.046875 -1.1875 -2.59375 -1.1875q-1.59375 0 -2.640625 1.1875q-1.03125 1.1875 -1.03125 3.578125zm11.941406 6.21875l0 -12.453125l1.890625 0l0 1.890625q0.734375 -1.328125 1.34375 -1.75q0.625 -0.421875 1.359375 -0.421875q1.0625 0 2.171875 0.6875l-0.734375 1.953125q-0.765625 -0.453125 -1.546875 -0.453125q-0.6875 0 -1.25 0.421875q-0.546875 0.40625 -0.78125 1.140625q-0.34375 1.125 -0.34375 2.46875l0 6.515625l-2.109375 0zm13.832031 -3.71875l2.09375 -0.328125q0.171875 1.25 0.96875 1.921875q0.8125 0.671875 2.25 0.671875q1.453125 0 2.15625 -0.59375q0.703125 -0.59375 0.703125 -1.390625q0 -0.71875 -0.625 -1.125q-0.421875 -0.28125 -2.15625 -0.71875q-2.3125 -0.578125 -3.21875 -1.0q-0.890625 -0.4375 -1.359375 -1.1875q-0.453125 -0.765625 -0.453125 -1.671875q0 -0.828125 0.375 -1.53125q0.390625 -0.71875 1.046875 -1.1875q0.484375 -0.359375 1.328125 -0.609375q0.859375 -0.265625 1.828125 -0.265625q1.46875 0 2.578125 0.421875q1.109375 0.421875 1.625 1.15625q0.53125 0.71875 0.734375 1.921875l-2.0625 0.28125q-0.140625 -0.96875 -0.8125 -1.5q-0.671875 -0.546875 -1.90625 -0.546875q-1.453125 0 -2.078125 0.484375q-0.625 0.484375 -0.625 1.125q0 0.40625 0.265625 0.734375q0.25 0.34375 0.8125 0.5625q0.3125 0.125 1.859375 0.546875q2.234375 0.59375 3.109375 0.984375q0.890625 0.375 1.390625 1.109375q0.515625 0.71875 0.515625 1.796875q0 1.046875 -0.625 1.984375q-0.609375 0.9375 -1.765625 1.453125q-1.15625 0.5 -2.625 0.5q-2.421875 0 -3.703125 -1.0q-1.265625 -1.015625 -1.625 -3.0zm17.453125 1.828125l0.3125 1.859375q-0.890625 0.203125 -1.59375 0.203125q-1.15625 0 -1.796875 -0.359375q-0.625 -0.375 -0.890625 -0.96875q-0.25 -0.59375 -0.25 -2.484375l0 -7.171875l-1.546875 0l0 -1.640625l1.546875 0l0 -3.078125l2.09375 -1.265625l0 4.34375l2.125 0l0 1.640625l-2.125 0l0 7.28125q0 0.90625 0.109375 1.171875q0.125 0.25 0.375 0.40625q0.25 0.140625 0.71875 0.140625q0.34375 0 0.921875 -0.078125zm1.2773438 -4.328125q0 -3.46875 1.921875 -5.125q1.609375 -1.390625 3.921875 -1.390625q2.5625 0 4.1875 1.6875q1.625 1.6875 1.625 4.640625q0 2.40625 -0.71875 3.78125q-0.71875 1.375 -2.09375 2.140625q-1.375 0.765625 -3.0 0.765625q-2.625 0 -4.234375 -1.671875q-1.609375 -1.6875 -1.609375 -4.828125zm2.171875 0q0 2.390625 1.03125 3.578125q1.046875 1.1875 2.640625 1.1875q1.5625 0 2.609375 -1.1875q1.046875 -1.203125 1.046875 -3.65625q0 -2.3125 -1.0625 -3.5q-1.046875 -1.1875 -2.59375 -1.1875q-1.59375 0 -2.640625 1.1875q-1.03125 1.1875 -1.03125 3.578125zm11.941406 6.21875l0 -12.453125l1.890625 0l0 1.890625q0.734375 -1.328125 1.34375 -1.75q0.625 -0.421875 1.359375 -0.421875q1.0625 0 2.171875 0.6875l-0.734375 1.953125q-0.765625 -0.453125 -1.546875 -0.453125q-0.6875 0 -1.25 0.421875q-0.546875 0.40625 -0.78125 1.140625q-0.34375 1.125 -0.34375 2.46875l0 6.515625l-2.109375 0zm8.0234375 -14.75l0 -2.4375l2.109375 0l0 2.4375l-2.109375 0zm0 14.75l0 -12.453125l2.109375 0l0 12.453125l-2.109375 0zm5.3164062 0l0 -12.453125l1.90625 0l0 1.78125q1.375 -2.0625 3.953125 -2.0625q1.125 0 2.0625 0.40625q0.953125 0.40625 1.421875 1.0625q0.46875 0.65625 0.65625 1.5625q0.125 0.578125 0.125 2.046875l0 7.65625l-2.109375 0l0 -7.578125q0 -1.28125 -0.25 -1.921875q-0.25 -0.640625 -0.875 -1.015625q-0.625 -0.390625 -1.46875 -0.390625q-1.34375 0 -2.328125 0.859375q-0.984375 0.859375 -0.984375 3.25l0 6.796875l-2.109375 0zm12.972656 1.03125l2.046875 0.3125q0.125 0.9375 0.71875 1.375q0.78125 0.59375 2.140625 0.59375q1.46875 0 2.265625 -0.59375q0.796875 -0.578125 1.078125 -1.640625q0.15625 -0.640625 0.140625 -2.703125q-1.375 1.625 -3.4375 1.625q-2.5625 0 -3.96875 -1.84375q-1.40625 -1.859375 -1.40625 -4.453125q0 -1.78125 0.640625 -3.28125q0.640625 -1.515625 1.859375 -2.328125q1.234375 -0.828125 2.890625 -0.828125q2.203125 0 3.625 1.78125l0 -1.5l1.953125 0l0 10.765625q0 2.90625 -0.59375 4.109375q-0.59375 1.21875 -1.875 1.921875q-1.28125 0.703125 -3.15625 0.703125q-2.234375 0 -3.609375 -1.0q-1.359375 -1.0 -1.3125 -3.015625zm1.734375 -7.484375q0 2.453125 0.96875 3.578125q0.984375 1.125 2.453125 1.125q1.453125 0 2.4375 -1.109375q0.984375 -1.125 0.984375 -3.515625q0 -2.28125 -1.015625 -3.4375q-1.015625 -1.171875 -2.453125 -1.171875q-1.40625 0 -2.390625 1.140625q-0.984375 1.140625 -0.984375 3.390625zm20.609375 6.453125l-1.953125 0l0 -17.1875l2.109375 0l0 6.140625q1.328125 -1.6875 3.40625 -1.6875q1.140625 0 2.171875 0.46875q1.03125 0.46875 1.6875 1.3125q0.65625 0.828125 1.03125 2.015625q0.375 1.171875 0.375 2.53125q0 3.1875 -1.578125 4.9375q-1.578125 1.75 -3.796875 1.75q-2.203125 0 -3.453125 -1.84375l0 1.5625zm-0.03125 -6.3125q0 2.234375 0.609375 3.234375q1.0 1.625 2.703125 1.625q1.375 0 2.375 -1.203125q1.015625 -1.203125 1.015625 -3.578125q0 -2.4375 -0.96875 -3.59375q-0.953125 -1.171875 -2.328125 -1.171875q-1.390625 0 -2.40625 1.203125q-1.0 1.203125 -1.0 3.484375zm11.378906 6.3125l0 -17.1875l2.109375 0l0 17.1875l-2.109375 0zm4.5976562 -6.21875q0 -3.46875 1.921875 -5.125q1.609375 -1.390625 3.921875 -1.390625q2.5625 0 4.1875 1.6875q1.625 1.6875 1.625 4.640625q0 2.40625 -0.71875 3.78125q-0.71875 1.375 -2.09375 2.140625q-1.375 0.765625 -3.0 0.765625q-2.625 0 -4.234375 -1.671875q-1.609375 -1.6875 -1.609375 -4.828125zm2.171875 0q0 2.390625 1.03125 3.578125q1.046875 1.1875 2.640625 1.1875q1.5625 0 2.609375 -1.1875q1.046875 -1.203125 1.046875 -3.65625q0 -2.3125 -1.0625 -3.5q-1.046875 -1.1875 -2.59375 -1.1875q-1.59375 0 -2.640625 1.1875q-1.03125 1.1875 -1.03125 3.578125zm20.082031 1.65625l2.078125 0.265625q-0.34375 2.15625 -1.75 3.375q-1.390625 1.203125 -3.4375 1.203125q-2.546875 0 -4.109375 -1.671875q-1.546875 -1.671875 -1.546875 -4.78125q0 -2.015625 0.671875 -3.53125q0.671875 -1.515625 2.03125 -2.265625q1.359375 -0.765625 2.96875 -0.765625q2.03125 0 3.3125 1.03125q1.296875 1.03125 1.65625 2.90625l-2.046875 0.328125q-0.296875 -1.265625 -1.046875 -1.890625q-0.734375 -0.640625 -1.796875 -0.640625q-1.59375 0 -2.59375 1.15625q-0.984375 1.140625 -0.984375 3.609375q0 2.5 0.953125 3.640625q0.96875 1.140625 2.515625 1.140625q1.234375 0 2.0625 -0.765625q0.84375 -0.765625 1.0625 -2.34375zm3.890625 4.5625l0 -17.1875l2.109375 0l0 9.796875l5.0 -5.0625l2.71875 0l-4.75 4.625l5.234375 7.828125l-2.59375 0l-4.125 -6.359375l-1.484375 1.421875l0 4.9375l-2.109375 0zm20.308594 -14.296875l0 2.453125l-2.265625 0l0 -1.9375q0 -1.578125 0.375 -2.28125q0.5 -0.9375 1.5625 -1.40625l0.515625 0.8125q-0.640625 0.265625 -0.953125 0.8125q-0.296875 0.53125 -0.328125 1.546875l1.09375 0zm3.640625 0l0 2.453125l-2.265625 0l0 -1.9375q0 -1.578125 0.375 -2.28125q0.484375 -0.9375 1.5625 -1.40625l0.515625 0.8125q-0.65625 0.265625 -0.96875 0.8125q-0.296875 0.53125 -0.328125 1.546875l1.109375 0zm2.8828125 14.296875l0 -17.1875l6.4375 0q1.96875 0 3.15625 0.53125q1.1875 0.515625 1.859375 1.609375q0.6875 1.078125 0.6875 2.265625q0 1.09375 -0.609375 2.078125q-0.59375 0.96875 -1.796875 1.5625q1.5625 0.453125 2.390625 1.5625q0.84375 1.09375 0.84375 2.59375q0 1.203125 -0.515625 2.25q-0.5 1.03125 -1.25 1.59375q-0.75 0.5625 -1.890625 0.859375q-1.125 0.28125 -2.765625 0.28125l-6.546875 0zm2.265625 -9.96875l3.71875 0q1.515625 0 2.171875 -0.1875q0.859375 -0.265625 1.296875 -0.859375q0.4375 -0.59375 0.4375 -1.5q0 -0.859375 -0.40625 -1.5q-0.40625 -0.65625 -1.171875 -0.890625q-0.765625 -0.25 -2.609375 -0.25l-3.4375 0l0 5.1875zm0 7.9375l4.28125 0q1.09375 0 1.546875 -0.078125q0.78125 -0.140625 1.3125 -0.46875q0.53125 -0.328125 0.859375 -0.953125q0.34375 -0.625 0.34375 -1.453125q0 -0.953125 -0.5 -1.65625q-0.484375 -0.71875 -1.359375 -1.0q-0.875 -0.296875 -2.515625 -0.296875l-3.96875 0l0 5.90625zm12.9921875 -12.875l0 -2.453125l2.265625 0l0 1.9375q0 1.5625 -0.359375 2.265625q-0.5 0.9375 -1.578125 1.421875l-0.515625 -0.828125q0.640625 -0.265625 0.953125 -0.8125q0.3125 -0.5625 0.34375 -1.53125l-1.109375 0zm3.640625 0l0 -2.453125l2.265625 0l0 1.9375q0 1.5625 -0.375 2.265625q-0.5 0.9375 -1.5625 1.421875l-0.515625 -0.828125q0.625 -0.265625 0.9375 -0.8125q0.3125 -0.5625 0.34375 -1.53125l-1.09375 0zm8.8671875 10.671875q-0.015625 -0.421875 -0.015625 -0.625q0 -1.25 0.359375 -2.15625q0.25 -0.671875 0.828125 -1.359375q0.421875 -0.515625 1.515625 -1.46875q1.09375 -0.96875 1.421875 -1.546875q0.328125 -0.578125 0.328125 -1.25q0 -1.234375 -0.96875 -2.15625q-0.953125 -0.9375 -2.34375 -0.9375q-1.34375 0 -2.25 0.84375q-0.90625 0.84375 -1.1875 2.625l-2.15625 -0.25q0.28125 -2.40625 1.734375 -3.671875q1.453125 -1.28125 3.828125 -1.28125q2.515625 0 4.015625 1.375q1.5 1.359375 1.5 3.3125q0 1.125 -0.53125 2.078125q-0.515625 0.9375 -2.0625 2.296875q-1.03125 0.921875 -1.34375 1.359375q-0.3125 0.421875 -0.46875 0.984375q-0.15625 0.5625 -0.171875 1.828125l-2.03125 0zm-0.125 4.234375l0 -2.40625l2.40625 0l0 2.40625l-2.40625 0z" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.666668 199.10498l0 245.38583" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m340.52756 199.10498l0 245.38583" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m636.3884 199.10498l0 245.38583" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m932.2493 199.10498l0 245.38583" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 199.60367l888.5801 0" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 240.80052l888.5801 0" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 280.80054l888.5801 0" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 320.80054l888.5801 0" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 360.80054l888.5801 0" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 402.39633l888.5801 0" fill-rule="nonzero"></path><path stroke="#9e9e9e" stroke-width="1.0" stroke-linecap="butt" d="m44.16798 443.99213l888.5801 0" fill-rule="nonzero"></path><path fill="#000000" d="m55.432293 226.52367l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0zm4.886429 0l0 -13.59375l4.687496 0q1.578125 0 2.421875 0.1875q1.15625 0.265625 1.984375 0.96875q1.078125 0.921875 1.609375 2.34375q0.53125 1.40625 0.53125 3.21875q0 1.546875 -0.359375 2.75q-0.359375 1.1875 -0.921875 1.984375q-0.5625 0.78125 -1.234375 1.234375q-0.671875 0.4375 -1.625 0.671875q-0.953125 0.234375 -2.1875 0.234375l-4.906246 0zm1.796875 -1.609375l2.9062462 0q1.34375 0 2.109375 -0.25q0.765625 -0.25 1.21875 -0.703125q0.640625 -0.640625 1.0 -1.71875q0.359375 -1.078125 0.359375 -2.625q0 -2.125 -0.703125 -3.265625q-0.703125 -1.15625 -1.703125 -1.546875q-0.71875 -0.28125 -2.328125 -0.28125l-2.8593712 0l0 10.390625zm14.644817 5.609375q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm2.634552 -8.375l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm13.927948 8.375l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m350.9338 226.52367l0 -13.59375l2.71875 0l3.21875 9.625q0.4375 1.34375 0.640625 2.015625q0.234375 -0.75 0.734375 -2.1875l3.25 -9.453125l2.421875 0l0 13.59375l-1.734375 0l0 -11.390625l-3.953125 11.390625l-1.625 0l-3.9375 -11.578125l0 11.578125l-1.734375 0zm15.603302 0l0 -13.59375l4.6875 0q1.578125 0 2.421875 0.1875q1.15625 0.265625 1.984375 0.96875q1.078125 0.921875 1.609375 2.34375q0.53125 1.40625 0.53125 3.21875q0 1.546875 -0.359375 2.75q-0.359375 1.1875 -0.921875 1.984375q-0.5625 0.78125 -1.234375 1.234375q-0.671875 0.4375 -1.625 0.671875q-0.953125 0.234375 -2.1875 0.234375l-4.90625 0zm1.796875 -1.609375l2.90625 0q1.34375 0 2.109375 -0.25q0.765625 -0.25 1.21875 -0.703125q0.640625 -0.640625 1.0 -1.71875q0.359375 -1.078125 0.359375 -2.625q0 -2.125 -0.703125 -3.265625q-0.703125 -1.15625 -1.703125 -1.546875q-0.71875 -0.28125 -2.328125 -0.28125l-2.859375 0l0 10.390625zm10.988586 -1.953125l1.765625 -0.15625q0.1875 1.28125 0.890625 1.9375q0.71875 0.640625 1.71875 0.640625q1.203125 0 2.03125 -0.90625q0.84375 -0.90625 0.84375 -2.421875q0 -1.421875 -0.8125 -2.25q-0.796875 -0.828125 -2.09375 -0.828125q-0.796875 0 -1.453125 0.375q-0.640625 0.359375 -1.015625 0.953125l-1.578125 -0.203125l1.328125 -7.0l6.765625 0l0 1.609375l-5.4375 0l-0.734375 3.640625q1.234375 -0.84375 2.578125 -0.84375q1.78125 0 3.0 1.234375q1.234375 1.234375 1.234375 3.171875q0 1.84375 -1.078125 3.1875q-1.3125 1.65625 -3.578125 1.65625q-1.859375 0 -3.03125 -1.03125q-1.171875 -1.046875 -1.34375 -2.765625zm14.031952 7.5625q-1.375 -1.75 -2.328125 -4.078125q-0.953125 -2.34375 -0.953125 -4.84375q0 -2.21875 0.703125 -4.234375q0.84375 -2.34375 2.578125 -4.671875l1.203125 0q-1.125 1.921875 -1.484375 2.75q-0.5625 1.28125 -0.890625 2.671875q-0.40625 1.734375 -0.40625 3.484375q0 4.46875 2.78125 8.921875l-1.203125 0zm3.165802 -4.0l0 -13.59375l5.109375 0q1.546875 0 2.484375 0.40625q0.953125 0.40625 1.484375 1.265625q0.53125 0.859375 0.53125 1.796875q0 0.875 -0.46875 1.65625q-0.46875 0.765625 -1.4375 1.234375q1.234375 0.359375 1.890625 1.234375q0.671875 0.875 0.671875 2.0625q0 0.953125 -0.40625 1.78125q-0.390625 0.8125 -0.984375 1.265625q-0.59375 0.4375 -1.5 0.671875q-0.890625 0.21875 -2.1875 0.21875l-5.1875 0zm1.796875 -7.890625l2.9375 0q1.203125 0 1.71875 -0.15625q0.6875 -0.203125 1.03125 -0.671875q0.359375 -0.46875 0.359375 -1.1875q0 -0.671875 -0.328125 -1.1875q-0.328125 -0.515625 -0.9375 -0.703125q-0.59375 -0.203125 -2.0625 -0.203125l-2.71875 0l0 4.109375zm0 6.28125l3.390625 0q0.875 0 1.21875 -0.0625q0.625 -0.109375 1.046875 -0.359375q0.421875 -0.265625 0.6875 -0.765625q0.265625 -0.5 0.265625 -1.140625q0 -0.765625 -0.390625 -1.328125q-0.390625 -0.5625 -1.078125 -0.78125q-0.6875 -0.234375 -1.984375 -0.234375l-3.15625 0l0 4.671875zm11.599823 5.609375l-1.1875 0q2.765625 -4.453125 2.765625 -8.921875q0 -1.734375 -0.390625 -3.453125q-0.328125 -1.390625 -0.890625 -2.671875q-0.359375 -0.84375 -1.484375 -2.78125l1.1875 0q1.75 2.328125 2.578125 4.671875q0.71875 2.015625 0.71875 4.234375q0 2.5 -0.96875 4.84375q-0.953125 2.328125 -2.328125 4.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m648.46655 226.52367l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm18.394836 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.125732 -5.8125l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.8323364 0.8125l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm9.297546 5.109375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm14.031982 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m61.447918 263.88052l-1.75 0l-3.421875 -3.9375l0 3.9375l-1.28125 0l0 -10.34375l1.28125 0l0 6.359375l3.296875 -3.375l1.6875 0l-3.453125 3.390625l3.640625 3.96875zm7.667446 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171871 0q0 1.125 0.6249962 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6874962 -0.25 -1.1249962 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0624962 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.14062119 0.40625 -0.18749619 0.890625l3.8749962 0zm9.3862 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.4487 0.4375q0 0.984375 -0.28125 1.71875q-0.265625 0.734375 -0.75 1.21875q-0.484375 0.484375 -1.140625 0.734375q-0.65625 0.234375 -1.421875 0.234375q-0.359375 0 -0.703125 -0.046875q-0.34375 -0.03125 -0.703125 -0.125l0 3.078125l-1.28125 0l0 -10.359375l1.140625 0l0.078125 1.234375q0.546875 -0.75 1.171875 -1.046875q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.34375 0.15625 0.734375 0.25q0.390625 0.078125 0.765625 0.078125q1.03125 0 1.609375 -0.703125q0.59375 -0.703125 0.59375 -2.109375zm9.5112 -1.03125q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625z" fill-rule="nonzero"></path><path fill="#000000" d="m355.41818 263.88052l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.1987 3.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.620575 -3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm2.6987 0.234375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.245575 3.609375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151825 -2.40625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm2.85495 -1.203125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.401825 -0.203125q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.7612 1.640625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.026825 8.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058075 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 0.265625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.370575 -9.15625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.4487 4.953125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.1987 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm7.808075 2.796875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 -3.796875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm2.79245 0.734375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339325 -0.453125q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.6987 -4.90625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.558105 4.328125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.6250305 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.31253052 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.776855 0.953125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.839294 -0.8125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm14.94873 7.09375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058044 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058105 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm9.026855 8.34375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm10.167419 1.234375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9331055 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375z" fill-rule="nonzero"></path><path fill="#000000" d="m652.6853 263.88052l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151855 -2.40625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.558044 -0.71875q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.214355 -2.375q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm10.964294 8.390625q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.870605 -8.890625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.386169 -1.03125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.058105 3.0625q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.683044 3.875q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.97998 2.71875l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.526794 5.9375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.44873 -0.09375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.386169 -6.078125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.058105 3.0625q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.276794 5.75q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125zm8.183105 -2.421875q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.573669 5.625q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.276855 0.546875q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.386169 -3.015625q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.401855 3.328125q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.401794 0.265625l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.120605 5.984375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.761169 -6.84375q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.19873 4.296875q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm6.8080444 3.0625l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.44873 0.28125q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.308044 -1.0q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm8.88623 3.984375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.745605 3.5625q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.979919 2.71875l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.433105 8.34375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875z" fill-rule="nonzero"></path><path fill="#000000" d="m61.447918 303.88052l-1.75 0l-3.421875 -3.9375l0 3.9375l-1.28125 0l0 -10.34375l1.28125 0l0 6.359375l3.296875 -3.375l1.6875 0l-3.453125 3.390625l3.640625 3.96875zm7.667446 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171871 0q0 1.125 0.6249962 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6874962 -0.25 -1.1249962 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0624962 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.14062119 0.40625 -0.18749619 0.890625l3.8749962 0zm9.3862 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.4487 0.4375q0 0.984375 -0.28125 1.71875q-0.265625 0.734375 -0.75 1.21875q-0.484375 0.484375 -1.140625 0.734375q-0.65625 0.234375 -1.421875 0.234375q-0.359375 0 -0.703125 -0.046875q-0.34375 -0.03125 -0.703125 -0.125l0 3.078125l-1.28125 0l0 -10.359375l1.140625 0l0.078125 1.234375q0.546875 -0.75 1.171875 -1.046875q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.34375 0.15625 0.734375 0.25q0.390625 0.078125 0.765625 0.078125q1.03125 0 1.609375 -0.703125q0.59375 -0.703125 0.59375 -2.109375zm9.1987 3.75l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875z" fill-rule="nonzero"></path><path fill="#000000" d="m355.41818 303.88052l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.1987 3.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.620575 -3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm2.6987 0.234375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.245575 3.609375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151825 -2.40625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm2.85495 -1.203125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.401825 -0.203125q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.7612 1.640625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.026825 8.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058075 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 0.265625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.370575 -9.15625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.4487 4.953125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.1987 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm7.808075 2.796875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 -3.796875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm2.79245 0.734375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339325 -0.453125q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.6987 -4.90625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.558105 4.328125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.6250305 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.31253052 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.776855 0.953125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.839294 -0.8125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm14.94873 7.09375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058044 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058105 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm9.026855 8.34375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm10.167419 1.234375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9331055 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375z" fill-rule="nonzero"></path><path fill="#000000" d="m652.77905 303.88052l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm1.4643555 -3.515625q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.44873 -1.75q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.323669 -4.140625l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm6.6206055 8.390625l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.698669 0.9375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.91748 0.203125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.526794 4.28125q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.85498 2.140625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.636169 5.3125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm2.6831055 -0.5625q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.44873 -1.75q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.136169 1.1875q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.245605 -5.328125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.433044 -0.765625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.558105 4.328125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm2.6830444 -0.5625q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm11.839355 3.609375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm8.198669 3.34375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.19873 3.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.026855 8.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm7.0580444 0.265625l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.63623 -5.046875l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.058044 3.0625q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.464355 4.59375q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm8.886169 3.984375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.495605 -8.125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.339294 3.609375q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.82373 -4.328125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0z" fill-rule="nonzero"></path><path fill="#000000" d="m61.447918 343.88052l-1.75 0l-3.421875 -3.9375l0 3.9375l-1.28125 0l0 -10.34375l1.28125 0l0 6.359375l3.296875 -3.375l1.6875 0l-3.453125 3.390625l3.640625 3.96875zm7.667446 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171871 0q0 1.125 0.6249962 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6874962 -0.25 -1.1249962 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0624962 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.14062119 0.40625 -0.18749619 0.890625l3.8749962 0zm9.3862 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.4487 0.4375q0 0.984375 -0.28125 1.71875q-0.265625 0.734375 -0.75 1.21875q-0.484375 0.484375 -1.140625 0.734375q-0.65625 0.234375 -1.421875 0.234375q-0.359375 0 -0.703125 -0.046875q-0.34375 -0.03125 -0.703125 -0.125l0 3.078125l-1.28125 0l0 -10.359375l1.140625 0l0.078125 1.234375q0.546875 -0.75 1.171875 -1.046875q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.34375 0.15625 0.734375 0.25q0.390625 0.078125 0.765625 0.078125q1.03125 0 1.609375 -0.703125q0.59375 -0.703125 0.59375 -2.109375zm9.29245 3.75l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875z" fill-rule="nonzero"></path><path fill="#000000" d="m355.41818 343.88052l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.1987 3.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.620575 -3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm2.6987 0.234375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.245575 3.609375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151825 -2.40625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm2.85495 -1.203125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.401825 -0.203125q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.7612 1.640625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.026825 8.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058075 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 0.265625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.370575 -9.15625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.4487 4.953125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.1987 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm7.808075 2.796875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 -3.796875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm2.79245 0.734375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339325 -0.453125q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.6987 -4.90625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.558105 4.328125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.6250305 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.31253052 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.776855 0.953125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.839294 -0.8125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm14.94873 7.09375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058044 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058105 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm9.026855 8.34375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm10.167419 1.234375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9331055 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375z" fill-rule="nonzero"></path><path fill="#ffff00" d="m645.3884 332.9957l257.8584 0l0 17.16098l-257.8584 0l0 -17.16098z" fill-rule="nonzero"></path><path fill="#000000" d="m652.77905 341.47427q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.26123 -0.59375q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125zm8.245544 0.5q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.38623 -3.015625q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.401794 0.265625l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.026855 8.390625l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm7.9017944 -3.0625q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm7.8081055 2.796875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.620544 -3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm7.7924805 3.75l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.604919 3.34375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm1.5581055 -3.515625q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm6.7455444 0.09375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.808105 1.5l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.464294 -0.046875l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.120605 5.984375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm8.948669 2.046875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm1.9643555 -3.25q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.276794 -4.78125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm7.8706055 5.328125q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.308044 3.0625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm7.5581055 -0.265625q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.464355 -0.046875l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.120544 5.984375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.19873 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.245544 -5.328125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm8.120605 5.984375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.198669 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.308105 3.0625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm7.8705444 -2.90625q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125zm8.35498 -0.125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375z" fill-rule="nonzero"></path><path fill="#000000" d="m61.447918 383.88052l-1.75 0l-3.421875 -3.9375l0 3.9375l-1.28125 0l0 -10.34375l1.28125 0l0 6.359375l3.296875 -3.375l1.6875 0l-3.453125 3.390625l3.640625 3.96875zm7.667446 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171871 0q0 1.125 0.6249962 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6874962 -0.25 -1.1249962 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0624962 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.14062119 0.40625 -0.18749619 0.890625l3.8749962 0zm9.3862 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.4487 0.4375q0 0.984375 -0.28125 1.71875q-0.265625 0.734375 -0.75 1.21875q-0.484375 0.484375 -1.140625 0.734375q-0.65625 0.234375 -1.421875 0.234375q-0.359375 0 -0.703125 -0.046875q-0.34375 -0.03125 -0.703125 -0.125l0 3.078125l-1.28125 0l0 -10.359375l1.140625 0l0.078125 1.234375q0.546875 -0.75 1.171875 -1.046875q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.34375 0.15625 0.734375 0.25q0.390625 0.078125 0.765625 0.078125q1.03125 0 1.609375 -0.703125q0.59375 -0.703125 0.59375 -2.109375zm9.10495 0.84375q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125z" fill-rule="nonzero"></path><path fill="#000000" d="m355.41818 383.88052l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.1987 3.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.620575 -3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm2.6987 0.234375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.245575 3.609375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151825 -2.40625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm2.85495 -1.203125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.401825 -0.203125q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.7612 1.640625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.026825 8.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058075 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 0.265625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.370575 -9.15625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.4487 4.953125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.1987 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm7.808075 2.796875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 -3.796875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm2.79245 0.734375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339325 -0.453125q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.6987 -4.90625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.558105 4.328125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.6250305 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.31253052 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.776855 0.953125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.839294 -0.8125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm14.94873 7.09375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058044 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058105 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm9.026855 8.34375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm10.167419 1.234375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9331055 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375z" fill-rule="nonzero"></path><path fill="#000000" d="m652.6853 383.88052l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.620605 -2.109375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.276794 5.28125q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.41748 0.03125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.589294 -6.203125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm8.85498 3.84375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.245544 3.609375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.370605 -4.78125q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.261169 1.765625q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.245605 -2.265625q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.526794 4.84375q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm7.7924805 3.75l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.917419 -1.4375q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.01123 4.5625q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558044 -2.140625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.44873 2.3125l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm7.8080444 -3.0625q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.214355 3.0625l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151794 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm7.8862305 4.25l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm10.167419 1.234375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.339355 5.4375q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125zm1.6517944 -0.609375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.808105 1.5l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9330444 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.401855 -0.203125q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.604919 -5.40625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.51123 3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.401794 0.71875q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.276855 -1.109375q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.386169 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.38623 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0z" fill-rule="nonzero"></path><path fill="#000000" d="m61.447918 425.47632l-1.75 0l-3.421875 -3.9375l0 3.9375l-1.28125 0l0 -10.34375l1.28125 0l0 6.359375l3.296875 -3.375l1.6875 0l-3.453125 3.390625l3.640625 3.96875zm7.667446 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171871 0q0 1.125 0.6249962 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6874962 -0.25 -1.1249962 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0624962 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.14062119 0.40625 -0.18749619 0.890625l3.8749962 0zm9.3862 0.1875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.4487 0.4375q0 0.984375 -0.28125 1.71875q-0.265625 0.734375 -0.75 1.21875q-0.484375 0.484375 -1.140625 0.734375q-0.65625 0.234375 -1.421875 0.234375q-0.359375 0 -0.703125 -0.046875q-0.34375 -0.03125 -0.703125 -0.125l0 3.078125l-1.28125 0l0 -10.359375l1.140625 0l0.078125 1.234375q0.546875 -0.75 1.171875 -1.046875q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.34375 0.15625 0.734375 0.25q0.390625 0.078125 0.765625 0.078125q1.03125 0 1.609375 -0.703125q0.59375 -0.703125 0.59375 -2.109375zm9.7612 1.640625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375z" fill-rule="nonzero"></path><path fill="#000000" d="m355.41818 425.47632l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.1987 3.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.620575 -3.546875q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm2.6987 0.234375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.245575 3.609375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.151825 -2.40625q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm2.85495 -1.203125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.401825 -0.203125q0 0.921875 -0.25 1.640625q-0.25 0.71875 -0.71875 1.21875q-0.46875 0.5 -1.140625 0.78125q-0.65625 0.265625 -1.484375 0.265625q-0.65625 0 -1.34375 -0.125q-0.671875 -0.125 -1.34375 -0.40625l0 -9.90625l1.28125 0l0 2.84375l-0.0625 1.359375q0.546875 -0.734375 1.171875 -1.03125q0.625 -0.3125 1.34375 -0.3125q0.625 0 1.09375 0.265625q0.484375 0.265625 0.8125 0.75q0.328125 0.46875 0.484375 1.15625q0.15625 0.671875 0.15625 1.5zm-1.296875 0.0625q0 -0.578125 -0.09375 -1.0625q-0.078125 -0.484375 -0.265625 -0.828125q-0.171875 -0.34375 -0.46875 -0.53125q-0.28125 -0.203125 -0.671875 -0.203125q-0.25 0 -0.5 0.078125q-0.25 0.078125 -0.515625 0.265625q-0.265625 0.171875 -0.5625 0.46875q-0.296875 0.296875 -0.625 0.734375l0 3.5625q0.359375 0.15625 0.75 0.25q0.390625 0.078125 0.75 0.078125q0.4375 0 0.828125 -0.140625q0.40625 -0.140625 0.703125 -0.46875q0.3125 -0.328125 0.484375 -0.859375q0.1875 -0.546875 0.1875 -1.34375zm9.7612 1.640625l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.026825 8.078125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058075 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 0.265625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.370575 -9.15625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.4487 4.953125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.1987 -0.75q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm7.808075 2.796875q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558075 -3.796875q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm2.79245 0.734375q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339325 -0.453125q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.6987 -4.90625q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm15.558105 4.328125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.6250305 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.31253052 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.776855 0.953125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.839294 -0.8125q-1.015625 -0.21875 -1.734375 -0.21875q-1.71875 0 -1.71875 1.796875l0 1.296875l3.21875 0l0 1.0625l-3.21875 0l0 5.21875l-1.296875 0l0 -5.21875l-2.359375 0l0 -1.0625l2.359375 0l0 -1.21875q0 -2.9375 3.0625 -2.9375q0.765625 0 1.6875 0.171875l0 1.109375zm-7.703125 1.796875l0 0zm14.94873 7.09375q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058044 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058105 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm9.026794 -1.84375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm9.026855 8.34375l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm10.167419 1.234375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9331055 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.339294 1.203125q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375z" fill-rule="nonzero"></path><path fill="#ffff00" d="m645.3884 414.5915l257.8584 0l0 17.16098l-257.8584 0l0 -17.16098z" fill-rule="nonzero"></path><path fill="#000000" d="m652.59155 422.57007q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125zm8.245605 2.90625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm7.8080444 -3.0625q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.776855 0.953125l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm3.9330444 4.828125q0 -0.9375 0.25 -1.671875q0.265625 -0.734375 0.734375 -1.234375q0.46875 -0.5 1.125 -0.75q0.671875 -0.265625 1.484375 -0.265625q0.359375 0 0.6875 0.046875q0.34375 0.03125 0.671875 0.125l0 -3.078125l1.28125 0l0 10.34375l-1.140625 0l-0.03125 -1.390625q-0.546875 0.78125 -1.171875 1.15625q-0.609375 0.359375 -1.34375 0.359375q-0.625 0 -1.109375 -0.25q-0.46875 -0.265625 -0.796875 -0.75q-0.3125 -0.484375 -0.484375 -1.15625q-0.15625 -0.671875 -0.15625 -1.484375zm1.3125 -0.09375q0 1.34375 0.390625 2.0q0.390625 0.65625 1.109375 0.65625q0.484375 0 1.015625 -0.4375q0.53125 -0.4375 1.125 -1.28125l0 -3.421875q-0.3125 -0.140625 -0.6875 -0.21875q-0.375 -0.078125 -0.75 -0.078125q-1.046875 0 -1.625 0.671875q-0.578125 0.671875 -0.578125 2.109375zm13.276855 -4.78125l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm7.6205444 8.125q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.370605 -2.640625q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125zm7.7455444 2.640625q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.058105 0q-0.5 0.1875 -1.03125 0.28125q-0.515625 0.09375 -1.078125 0.09375q-1.734375 0 -2.6875 -0.9375q-0.9375 -0.953125 -0.9375 -2.78125q0 -0.859375 0.265625 -1.5625q0.28125 -0.71875 0.765625 -1.21875q0.5 -0.515625 1.171875 -0.78125q0.6875 -0.28125 1.5 -0.28125q0.578125 0 1.078125 0.078125q0.5 0.078125 0.953125 0.265625l0 1.21875q-0.484375 -0.25 -0.984375 -0.359375q-0.484375 -0.125 -1.015625 -0.125q-0.484375 0 -0.921875 0.1875q-0.4375 0.1875 -0.765625 0.546875q-0.328125 0.34375 -0.53125 0.859375q-0.1875 0.5 -0.1875 1.140625q0 1.328125 0.640625 2.0q0.65625 0.65625 1.8125 0.65625q0.515625 0 1.0 -0.109375q0.5 -0.125 0.953125 -0.359375l0 1.1875zm8.558044 0.265625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.058105 -4.0625q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.604919 -0.53125q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.620605 1.796875q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.526855 -1.71875q0 1.09375 -0.234375 2.0q-0.21875 0.90625 -0.671875 1.5625q-0.4375 0.640625 -1.109375 1.0q-0.65625 0.34375 -1.546875 0.34375q-0.765625 0 -1.40625 -0.28125q-0.625 -0.296875 -1.078125 -0.890625q-0.4375 -0.59375 -0.6875 -1.53125q-0.234375 -0.9375 -0.234375 -2.203125q0 -1.09375 0.21875 -2.0q0.234375 -0.921875 0.671875 -1.5625q0.453125 -0.65625 1.109375 -1.0q0.671875 -0.359375 1.5625 -0.359375q0.765625 0 1.390625 0.296875q0.625 0.28125 1.078125 0.890625q0.453125 0.59375 0.6875 1.53125q0.25 0.921875 0.25 2.203125zm-1.296875 0.046875q0 -0.25 -0.015625 -0.5q-0.015625 -0.25 -0.046875 -0.484375l-4.046875 3.015625q0.109375 0.375 0.28125 0.703125q0.171875 0.328125 0.40625 0.5625q0.234375 0.21875 0.53125 0.359375q0.3125 0.125 0.703125 0.125q0.5 0 0.90625 -0.234375q0.40625 -0.25 0.6875 -0.71875q0.28125 -0.484375 0.4375 -1.1875q0.15625 -0.71875 0.15625 -1.640625zm-4.375 -0.09375q0 0.234375 0 0.46875q0 0.21875 0.03125 0.421875l4.046875 -2.984375q-0.109375 -0.375 -0.28125 -0.6875q-0.171875 -0.3125 -0.40625 -0.53125q-0.234375 -0.21875 -0.53125 -0.34375q-0.296875 -0.125 -0.671875 -0.125q-0.5 0 -0.90625 0.25q-0.40625 0.234375 -0.703125 0.71875q-0.28125 0.46875 -0.4375 1.1875q-0.140625 0.703125 -0.140625 1.625zm13.511169 4.828125l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm8.526855 -2.109375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.636169 5.3125q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.183105 2.953125l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.620544 -2.109375l-1.640625 0l0 2.109375l-1.296875 0l0 -2.109375l-4.609375 0l0 -1.125l4.078125 -6.34375l1.828125 0l0 6.34375l1.640625 0l0 1.125zm-2.9375 -6.234375l-3.328125 5.109375l3.328125 0l0 -5.109375zm10.433105 8.34375l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.089294 -5.328125q0 1.40625 -0.34375 2.421875q-0.328125 1.0 -0.984375 1.65625q-0.65625 0.640625 -1.640625 0.953125q-0.96875 0.296875 -2.25 0.296875l-0.796875 0l0 -1.109375l0.890625 0q0.953125 0 1.640625 -0.1875q0.6875 -0.203125 1.140625 -0.5625q0.453125 -0.375 0.6875 -0.90625q0.25 -0.53125 0.3125 -1.21875l0.03125 -0.296875q-0.46875 0.28125 -1.078125 0.453125q-0.59375 0.15625 -1.296875 0.15625q-0.71875 0 -1.265625 -0.21875q-0.546875 -0.21875 -0.921875 -0.59375q-0.359375 -0.375 -0.546875 -0.890625q-0.171875 -0.53125 -0.171875 -1.15625q0 -0.65625 0.234375 -1.234375q0.25 -0.578125 0.6875 -1.0q0.4375 -0.4375 1.03125 -0.6875q0.609375 -0.25 1.34375 -0.25q0.71875 0 1.3125 0.234375q0.609375 0.234375 1.046875 0.765625q0.4375 0.515625 0.6875 1.359375q0.25 0.828125 0.25 2.015625zm-3.34375 -3.328125q-0.421875 0 -0.765625 0.140625q-0.34375 0.125 -0.609375 0.390625q-0.25 0.265625 -0.390625 0.640625q-0.140625 0.375 -0.140625 0.875q0 0.4375 0.09375 0.796875q0.109375 0.34375 0.328125 0.59375q0.234375 0.25 0.578125 0.390625q0.359375 0.125 0.84375 0.125q0.265625 0 0.546875 -0.046875q0.296875 -0.0625 0.5625 -0.140625q0.28125 -0.09375 0.53125 -0.203125q0.25 -0.125 0.453125 -0.265625q0 -0.9375 -0.140625 -1.5625q-0.140625 -0.640625 -0.40625 -1.015625q-0.265625 -0.390625 -0.640625 -0.546875q-0.375 -0.171875 -0.84375 -0.171875zm11.464355 8.65625l-6.3125 0l0 -1.140625l2.46875 -2.46875q0.609375 -0.59375 0.984375 -1.03125q0.390625 -0.4375 0.59375 -0.796875q0.21875 -0.375 0.296875 -0.6875q0.078125 -0.328125 0.078125 -0.703125q0 -0.34375 -0.109375 -0.65625q-0.09375 -0.328125 -0.296875 -0.5625q-0.1875 -0.25 -0.5 -0.390625q-0.3125 -0.140625 -0.75 -0.140625q-0.609375 0 -1.109375 0.28125q-0.5 0.265625 -0.921875 0.6875l-0.703125 -0.828125q0.546875 -0.578125 1.25 -0.921875q0.703125 -0.34375 1.640625 -0.34375q0.640625 0 1.15625 0.1875q0.53125 0.1875 0.90625 0.546875q0.390625 0.359375 0.59375 0.890625q0.21875 0.515625 0.21875 1.15625q0 0.5625 -0.15625 1.03125q-0.140625 0.46875 -0.4375 0.9375q-0.296875 0.453125 -0.75 0.953125q-0.453125 0.5 -1.0625 1.09375l-1.734375 1.6875l4.65625 0l0 1.21875zm7.9642944 0l-6.0 0l0 -1.1875l2.453125 0l0 -6.984375l-2.296875 1.25l-0.46875 -1.09375l3.046875 -1.59375l1.125 0l0 8.421875l2.140625 0l0 1.1875zm8.089355 -8.390625l-4.015625 8.390625l-1.453125 0l4.171875 -8.390625l-5.171875 0l0 -1.1875l6.46875 0l0 1.1875zm6.6205444 8.390625l-0.03125 -0.984375q-0.59375 0.59375 -1.21875 0.859375q-0.609375 0.25 -1.296875 0.25q-0.625 0 -1.078125 -0.15625q-0.4375 -0.15625 -0.734375 -0.4375q-0.28125 -0.28125 -0.421875 -0.65625q-0.140625 -0.390625 -0.140625 -0.84375q0 -1.09375 0.828125 -1.71875q0.828125 -0.640625 2.4375 -0.640625l1.515625 0l0 -0.640625q0 -0.65625 -0.421875 -1.046875q-0.40625 -0.390625 -1.265625 -0.390625q-0.625 0 -1.234375 0.140625q-0.59375 0.140625 -1.234375 0.40625l0 -1.15625q0.234375 -0.09375 0.53125 -0.171875q0.296875 -0.078125 0.625 -0.140625q0.328125 -0.078125 0.6875 -0.109375q0.359375 -0.046875 0.734375 -0.046875q0.65625 0 1.1875 0.15625q0.546875 0.140625 0.90625 0.4375q0.375 0.296875 0.5625 0.75q0.203125 0.453125 0.203125 1.078125l0 5.0625l-1.140625 0zm-0.140625 -3.34375l-1.609375 0q-0.484375 0 -0.828125 0.09375q-0.34375 0.09375 -0.5625 0.265625q-0.21875 0.171875 -0.328125 0.421875q-0.09375 0.25 -0.09375 0.5625q0 0.203125 0.0625 0.40625q0.0625 0.1875 0.203125 0.34375q0.15625 0.140625 0.390625 0.234375q0.234375 0.078125 0.5625 0.078125q0.4375 0 1.0 -0.265625q0.578125 -0.265625 1.203125 -0.84375l0 -1.296875zm9.69873 0.9375q0 0.609375 -0.25 1.09375q-0.25 0.46875 -0.703125 0.796875q-0.453125 0.3125 -1.0625 0.484375q-0.59375 0.15625 -1.3125 0.15625q-0.78125 0 -1.375 -0.171875q-0.59375 -0.171875 -1.0 -0.484375q-0.40625 -0.3125 -0.609375 -0.75q-0.203125 -0.4375 -0.203125 -0.953125q0 -0.875 0.484375 -1.515625q0.5 -0.640625 1.515625 -1.15625q-0.9375 -0.484375 -1.375 -1.0625q-0.421875 -0.578125 -0.421875 -1.328125q0 -0.46875 0.1875 -0.890625q0.1875 -0.4375 0.578125 -0.765625q0.390625 -0.34375 0.96875 -0.546875q0.578125 -0.203125 1.359375 -0.203125q0.734375 0 1.296875 0.15625q0.5625 0.15625 0.9375 0.453125q0.390625 0.296875 0.578125 0.71875q0.1875 0.40625 0.1875 0.921875q0 0.828125 -0.46875 1.421875q-0.46875 0.578125 -1.3125 1.015625q0.421875 0.21875 0.78125 0.484375q0.375 0.25 0.640625 0.5625q0.265625 0.3125 0.421875 0.703125q0.15625 0.390625 0.15625 0.859375zm-1.53125 -4.953125q0 -0.640625 -0.453125 -0.953125q-0.453125 -0.328125 -1.28125 -0.328125q-0.828125 0 -1.28125 0.3125q-0.453125 0.3125 -0.453125 0.9375q0 0.296875 0.109375 0.546875q0.109375 0.234375 0.328125 0.453125q0.234375 0.203125 0.578125 0.40625q0.34375 0.203125 0.828125 0.421875q0.828125 -0.390625 1.21875 -0.8125q0.40625 -0.421875 0.40625 -0.984375zm0.140625 5.046875q0 -0.265625 -0.09375 -0.515625q-0.078125 -0.265625 -0.3125 -0.515625q-0.21875 -0.25 -0.609375 -0.5q-0.375 -0.25 -0.96875 -0.5q-0.5 0.234375 -0.84375 0.46875q-0.328125 0.234375 -0.546875 0.484375q-0.203125 0.25 -0.296875 0.515625q-0.078125 0.265625 -0.078125 0.546875q0 0.328125 0.140625 0.59375q0.140625 0.25 0.390625 0.421875q0.25 0.171875 0.59375 0.265625q0.34375 0.09375 0.75 0.09375q0.390625 0 0.734375 -0.078125q0.34375 -0.09375 0.59375 -0.25q0.25 -0.171875 0.390625 -0.421875q0.15625 -0.25 0.15625 -0.609375zm9.448669 -1.75q0 0.265625 -0.015625 0.453125q0 0.1875 -0.015625 0.34375l-5.171875 0q0 1.125 0.625 1.734375q0.640625 0.59375 1.828125 0.59375q0.3125 0 0.640625 -0.015625q0.328125 -0.03125 0.625 -0.0625q0.296875 -0.046875 0.5625 -0.09375q0.28125 -0.0625 0.515625 -0.140625l0 1.046875q-0.515625 0.15625 -1.171875 0.234375q-0.65625 0.09375 -1.359375 0.09375q-0.9375 0 -1.625 -0.25q-0.6875 -0.25 -1.125 -0.734375q-0.421875 -0.5 -0.640625 -1.203125q-0.203125 -0.703125 -0.203125 -1.59375q0 -0.78125 0.21875 -1.46875q0.21875 -0.703125 0.640625 -1.21875q0.4375 -0.53125 1.0625 -0.828125q0.625 -0.3125 1.421875 -0.3125q0.765625 0 1.359375 0.25q0.59375 0.234375 1.0 0.6875q0.40625 0.4375 0.609375 1.078125q0.21875 0.625 0.21875 1.40625zm-1.328125 -0.1875q0.015625 -0.484375 -0.109375 -0.890625q-0.109375 -0.40625 -0.359375 -0.703125q-0.234375 -0.296875 -0.609375 -0.453125q-0.359375 -0.171875 -0.84375 -0.171875q-0.421875 0 -0.765625 0.171875q-0.34375 0.15625 -0.59375 0.453125q-0.25 0.28125 -0.40625 0.703125q-0.140625 0.40625 -0.1875 0.890625l3.875 0zm9.495605 1.21875q0 0.65625 -0.234375 1.234375q-0.234375 0.578125 -0.6875 1.015625q-0.4375 0.421875 -1.0625 0.671875q-0.609375 0.234375 -1.359375 0.234375q-0.796875 0 -1.40625 -0.25q-0.609375 -0.25 -1.015625 -0.765625q-0.40625 -0.53125 -0.625 -1.328125q-0.203125 -0.796875 -0.203125 -1.890625q0 -0.734375 0.09375 -1.421875q0.09375 -0.6875 0.3125 -1.296875q0.21875 -0.609375 0.578125 -1.109375q0.375 -0.5 0.921875 -0.859375q0.546875 -0.375 1.28125 -0.578125q0.734375 -0.203125 1.71875 -0.203125l0.9375 0l0 1.125l-1.015625 0q-0.859375 0 -1.5 0.203125q-0.625 0.203125 -1.046875 0.578125q-0.421875 0.375 -0.65625 0.90625q-0.21875 0.515625 -0.28125 1.171875l-0.03125 0.296875q0.46875 -0.265625 1.0625 -0.421875q0.609375 -0.171875 1.3125 -0.171875q0.71875 0 1.265625 0.21875q0.546875 0.203125 0.90625 0.578125q0.375 0.375 0.546875 0.90625q0.1875 0.53125 0.1875 1.15625zm-1.328125 0.078125q0 -0.4375 -0.109375 -0.796875q-0.109375 -0.359375 -0.34375 -0.59375q-0.21875 -0.25 -0.5625 -0.375q-0.34375 -0.140625 -0.828125 -0.140625q-0.28125 0 -0.578125 0.046875q-0.28125 0.046875 -0.5625 0.140625q-0.265625 0.09375 -0.515625 0.21875q-0.25 0.109375 -0.453125 0.234375q0 0.953125 0.125 1.59375q0.140625 0.625 0.390625 1.015625q0.265625 0.375 0.640625 0.53125q0.390625 0.15625 0.890625 0.15625q0.421875 0 0.765625 -0.125q0.34375 -0.140625 0.59375 -0.40625q0.25 -0.265625 0.390625 -0.640625q0.15625 -0.375 0.15625 -0.859375zm9.026794 -0.109375q0 0.71875 -0.3125 1.3125q-0.296875 0.578125 -0.84375 1.0q-0.53125 0.40625 -1.265625 0.640625q-0.734375 0.234375 -1.578125 0.234375q-0.21875 0 -0.46875 -0.015625q-0.234375 0 -0.484375 -0.015625q-0.234375 -0.015625 -0.46875 -0.046875q-0.234375 -0.015625 -0.421875 -0.046875l0 -1.15625q0.40625 0.09375 0.90625 0.140625q0.515625 0.046875 1.03125 0.046875q0.59375 0 1.0625 -0.140625q0.46875 -0.140625 0.796875 -0.390625q0.328125 -0.265625 0.5 -0.640625q0.171875 -0.375 0.171875 -0.828125q0 -0.90625 -0.640625 -1.3125q-0.640625 -0.40625 -1.84375 -0.40625l-1.8125 0l0 -4.890625l5.15625 0l0 1.125l-3.953125 0l0 2.6875l0.84375 0q0.6875 0 1.328125 0.125q0.65625 0.125 1.15625 0.4375q0.515625 0.296875 0.828125 0.828125q0.3125 0.515625 0.3125 1.3125zm8.120605 0.15625q0 0.625 -0.265625 1.1875q-0.25 0.546875 -0.765625 0.96875q-0.515625 0.40625 -1.296875 0.640625q-0.765625 0.234375 -1.796875 0.234375q-0.578125 0 -1.03125 -0.03125q-0.453125 -0.03125 -0.84375 -0.09375l0 -1.140625q0.453125 0.078125 0.953125 0.125q0.515625 0.046875 1.03125 0.046875q0.71875 0 1.21875 -0.125q0.515625 -0.140625 0.84375 -0.375q0.328125 -0.25 0.46875 -0.59375q0.140625 -0.34375 0.140625 -0.765625q0 -0.40625 -0.171875 -0.6875q-0.171875 -0.296875 -0.5 -0.5q-0.3125 -0.203125 -0.765625 -0.296875q-0.4375 -0.09375 -0.953125 -0.09375l-1.09375 0l0 -1.046875l1.109375 0q0.421875 0 0.78125 -0.109375q0.359375 -0.125 0.609375 -0.328125q0.25 -0.21875 0.375 -0.53125q0.140625 -0.3125 0.140625 -0.703125q0 -0.765625 -0.46875 -1.109375q-0.46875 -0.359375 -1.375 -0.359375q-0.484375 0 -1.0 0.09375q-0.5 0.09375 -1.09375 0.28125l0 -1.109375q0.25 -0.09375 0.53125 -0.15625q0.28125 -0.078125 0.5625 -0.125q0.28125 -0.046875 0.5625 -0.0625q0.28125 -0.03125 0.53125 -0.03125q0.765625 0 1.34375 0.171875q0.578125 0.15625 0.96875 0.46875q0.390625 0.296875 0.578125 0.75q0.203125 0.4375 0.203125 0.984375q0 0.8125 -0.421875 1.375q-0.421875 0.5625 -1.15625 0.890625q0.375 0.046875 0.734375 0.234375q0.375 0.171875 0.65625 0.453125q0.296875 0.265625 0.46875 0.640625q0.1875 0.375 0.1875 0.828125z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m284.04724 136.9462l391.90552 0l0 51.181107l-391.90552 0z" fill-rule="nonzero"></path><path fill="#000000" d="m319.1797 158.57675l-3.796875 -12.453125l2.171875 0l1.984375 7.1875l0.734375 2.671875q0.046875 -0.203125 0.640625 -2.5625l1.984375 -7.296875l2.171875 0l1.859375 7.21875l0.625 2.390625l0.71875 -2.40625l2.125 -7.203125l2.046875 0l-3.890625 12.453125l-2.1875 0l-1.984375 -7.453125l-0.46875 -2.125l-2.53125 9.578125l-2.203125 0zm23.566406 -4.015625l2.171875 0.28125q-0.515625 1.90625 -1.90625 2.96875q-1.390625 1.046875 -3.5625 1.046875q-2.734375 0 -4.34375 -1.671875q-1.59375 -1.6875 -1.59375 -4.734375q0 -3.140625 1.609375 -4.875q1.625 -1.734375 4.203125 -1.734375q2.5 0 4.078125 1.703125q1.59375 1.703125 1.59375 4.78125q0 0.1875 -0.015625 0.5625l-9.28125 0q0.109375 2.046875 1.15625 3.140625q1.046875 1.09375 2.609375 1.09375q1.15625 0 1.96875 -0.609375q0.828125 -0.609375 1.3125 -1.953125zm-6.9375 -3.40625l6.953125 0q-0.140625 -1.5625 -0.796875 -2.359375q-1.0 -1.21875 -2.609375 -1.21875q-1.453125 0 -2.453125 0.984375q-0.984375 0.96875 -1.09375 2.59375zm11.769531 -7.328125l0 -2.4375l2.109375 0l0 2.4375l-2.109375 0zm0 14.75l0 -12.453125l2.109375 0l0 12.453125l-2.109375 0zm4.9414062 1.03125l2.046875 0.3125q0.125 0.9375 0.71875 1.375q0.78125 0.59375 2.140625 0.59375q1.46875 0 2.265625 -0.59375q0.796875 -0.578125 1.078125 -1.640625q0.15625 -0.640625 0.140625 -2.703125q-1.375 1.625 -3.4375 1.625q-2.5625 0 -3.96875 -1.84375q-1.40625 -1.859375 -1.40625 -4.453125q0 -1.78125 0.640625 -3.28125q0.640625 -1.515625 1.859375 -2.328125q1.234375 -0.828125 2.890625 -0.828125q2.203125 0 3.625 1.78125l0 -1.5l1.953125 0l0 10.765625q0 2.90625 -0.59375 4.109375q-0.59375 1.21875 -1.875 1.921875q-1.28125 0.703125 -3.15625 0.703125q-2.234375 0 -3.609375 -1.0q-1.359375 -1.0 -1.3125 -3.015625zm1.734375 -7.484375q0 2.453125 0.96875 3.578125q0.984375 1.125 2.453125 1.125q1.453125 0 2.4375 -1.109375q0.984375 -1.125 0.984375 -3.515625q0 -2.28125 -1.015625 -3.4375q-1.015625 -1.171875 -2.453125 -1.171875q-1.40625 0 -2.390625 1.140625q-0.984375 1.140625 -0.984375 3.390625zm11.988281 6.453125l0 -17.1875l2.109375 0l0 6.171875q1.484375 -1.71875 3.734375 -1.71875q1.375 0 2.390625 0.546875q1.03125 0.546875 1.46875 1.515625q0.4375 0.953125 0.4375 2.78125l0 7.890625l-2.109375 0l0 -7.890625q0 -1.578125 -0.6875 -2.296875q-0.6875 -0.71875 -1.9375 -0.71875q-0.9375 0 -1.765625 0.484375q-0.828125 0.484375 -1.1875 1.3125q-0.34375 0.828125 -0.34375 2.296875l0 6.8125l-2.109375 0zm17.957031 -1.890625l0.3125 1.859375q-0.890625 0.203125 -1.59375 0.203125q-1.15625 0 -1.796875 -0.359375q-0.625 -0.375 -0.890625 -0.96875q-0.25 -0.59375 -0.25 -2.484375l0 -7.171875l-1.546875 0l0 -1.640625l1.546875 0l0 -3.078125l2.09375 -1.265625l0 4.34375l2.125 0l0 1.640625l-2.125 0l0 7.28125q0 0.90625 0.109375 1.171875q0.125 0.25 0.375 0.40625q0.25 0.140625 0.71875 0.140625q0.34375 0 0.921875 -0.078125zm19.835938 -8.21875l-11.34375 0l0 -1.96875l11.34375 0l0 1.96875zm0 5.21875l-11.34375 0l0 -1.96875l11.34375 0l0 1.96875zm9.777344 4.890625l0 -17.1875l3.421875 0l4.0625 12.171875q0.5625 1.703125 0.828125 2.546875q0.296875 -0.9375 0.90625 -2.765625l4.125 -11.953125l3.046875 0l0 17.1875l-2.1875 0l0 -14.375l-4.984375 14.375l-2.0625 0l-4.96875 -14.625l0 14.625l-2.1875 0zm20.070312 0l0 -17.1875l5.90625 0q2.015625 0 3.0625 0.25q1.484375 0.34375 2.515625 1.234375q1.359375 1.140625 2.03125 2.9375q0.6875 1.78125 0.6875 4.078125q0 1.953125 -0.46875 3.46875q-0.453125 1.515625 -1.171875 2.515625q-0.703125 0.984375 -1.5625 1.546875q-0.84375 0.5625 -2.046875 0.859375q-1.203125 0.296875 -2.765625 0.296875l-6.1875 0zm2.265625 -2.03125l3.671875 0q1.703125 0 2.65625 -0.3125q0.96875 -0.3125 1.546875 -0.890625q0.8125 -0.8125 1.265625 -2.171875q0.453125 -1.375 0.453125 -3.3125q0 -2.703125 -0.890625 -4.140625q-0.890625 -1.453125 -2.15625 -1.9375q-0.90625 -0.359375 -2.9375 -0.359375l-3.609375 0l0 13.125zm14.207031 -2.46875l2.21875 -0.1875q0.234375 1.609375 1.125 2.4375q0.90625 0.8125 2.171875 0.8125q1.53125 0 2.578125 -1.140625q1.0625 -1.15625 1.0625 -3.0625q0 -1.796875 -1.015625 -2.84375q-1.015625 -1.046875 -2.65625 -1.046875q-1.015625 0 -1.84375 0.46875q-0.8125 0.453125 -1.28125 1.203125l-1.984375 -0.265625l1.65625 -8.828125l8.546875 0l0 2.015625l-6.859375 0l-0.921875 4.625q1.546875 -1.078125 3.25 -1.078125q2.25 0 3.796875 1.5625q1.546875 1.546875 1.546875 4.0q0 2.328125 -1.359375 4.03125q-1.65625 2.09375 -4.515625 2.09375q-2.34375 0 -3.828125 -1.3125q-1.484375 -1.3125 -1.6875 -3.484375zm17.957031 9.546875q-1.734375 -2.203125 -2.953125 -5.15625q-1.203125 -2.953125 -1.203125 -6.109375q0 -2.796875 0.90625 -5.34375q1.046875 -2.96875 3.25 -5.90625l1.515625 0q-1.421875 2.4375 -1.875 3.46875q-0.71875 1.625 -1.125 3.375q-0.5 2.203125 -0.5 4.40625q0 5.640625 3.5 11.265625l-1.515625 0zm4.6171875 -5.046875l0 -17.1875l2.28125 0l0 17.1875l-2.28125 0zm6.2929688 0l0 -17.1875l5.90625 0q2.015625 0 3.0625 0.25q1.484375 0.34375 2.515625 1.234375q1.359375 1.140625 2.03125 2.9375q0.6875 1.78125 0.6875 4.078125q0 1.953125 -0.46875 3.46875q-0.453125 1.515625 -1.171875 2.515625q-0.703125 0.984375 -1.5625 1.546875q-0.84375 0.5625 -2.046875 0.859375q-1.203125 0.296875 -2.765625 0.296875l-6.1875 0zm2.265625 -2.03125l3.671875 0q1.703125 0 2.65625 -0.3125q0.96875 -0.3125 1.546875 -0.890625q0.8125 -0.8125 1.265625 -2.171875q0.453125 -1.375 0.453125 -3.3125q0 -2.703125 -0.890625 -4.140625q-0.890625 -1.453125 -2.15625 -1.9375q-0.90625 -0.359375 -2.9375 -0.359375l-3.609375 0l0 13.125zm18.816406 7.078125q-1.734375 -2.203125 -2.953125 -5.15625q-1.203125 -2.953125 -1.203125 -6.109375q0 -2.796875 0.90625 -5.34375q1.046875 -2.96875 3.25 -5.90625l1.515625 0q-1.421875 2.4375 -1.875 3.46875q-0.71875 1.625 -1.125 3.375q-0.5 2.203125 -0.5 4.40625q0 5.640625 3.5 11.265625l-1.515625 0zm3.4609375 -10.5625l2.140625 -0.1875q0.15625 1.28125 0.703125 2.109375q0.5625 0.828125 1.734375 1.34375q1.171875 0.5 2.640625 0.5q1.296875 0 2.296875 -0.375q1.0 -0.390625 1.484375 -1.0625q0.484375 -0.6875 0.484375 -1.484375q0 -0.796875 -0.46875 -1.40625q-0.46875 -0.609375 -1.546875 -1.015625q-0.6875 -0.265625 -3.0625 -0.828125q-2.359375 -0.578125 -3.3125 -1.078125q-1.234375 -0.640625 -1.84375 -1.59375q-0.59375 -0.96875 -0.59375 -2.140625q0 -1.3125 0.734375 -2.4375q0.75 -1.125 2.15625 -1.703125q1.421875 -0.59375 3.15625 -0.59375q1.90625 0 3.359375 0.609375q1.46875 0.609375 2.25 1.8125q0.796875 1.1875 0.84375 2.703125l-2.171875 0.171875q-0.171875 -1.640625 -1.1875 -2.46875q-1.015625 -0.828125 -3.0 -0.828125q-2.0625 0 -3.015625 0.765625q-0.9375 0.75 -0.9375 1.8125q0 0.921875 0.671875 1.515625q0.65625 0.609375 3.421875 1.234375q2.78125 0.625 3.8125 1.09375q1.5 0.6875 2.203125 1.75q0.71875 1.0625 0.71875 2.4375q0 1.375 -0.78125 2.59375q-0.78125 1.203125 -2.25 1.890625q-1.46875 0.671875 -3.3125 0.671875q-2.328125 0 -3.90625 -0.671875q-1.578125 -0.6875 -2.484375 -2.046875q-0.890625 -1.375 -0.9375 -3.09375zm17.898438 10.5625l-1.515625 0q3.5 -5.625 3.5 -11.265625q0 -2.203125 -0.5 -4.359375q-0.390625 -1.765625 -1.109375 -3.375q-0.453125 -1.0625 -1.890625 -3.515625l1.515625 0q2.203125 2.9375 3.25 5.90625q0.90625 2.546875 0.90625 5.34375q0 3.15625 -1.21875 6.109375q-1.203125 2.953125 -2.9375 5.15625zm17.707031 -7.828125l0 -4.703125l-4.671875 0l0 -1.96875l4.671875 0l0 -4.6875l1.984375 0l0 4.6875l4.6875 0l0 1.96875l-4.6875 0l0 4.703125l-1.984375 0zm16.449219 2.78125l0 -17.1875l3.421875 0l4.0625 12.171875q0.5625 1.703125 0.828125 2.546875q0.296875 -0.9375 0.90625 -2.765625l4.125 -11.953125l3.046875 0l0 17.1875l-2.1875 0l0 -14.375l-4.984375 14.375l-2.0625 0l-4.96875 -14.625l0 14.625l-2.1875 0zm20.070312 0l0 -17.1875l5.90625 0q2.015625 0 3.0625 0.25q1.484375 0.34375 2.515625 1.234375q1.359375 1.140625 2.03125 2.9375q0.6875 1.78125 0.6875 4.078125q0 1.953125 -0.46875 3.46875q-0.453125 1.515625 -1.171875 2.515625q-0.703125 0.984375 -1.5625 1.546875q-0.84375 0.5625 -2.046875 0.859375q-1.203125 0.296875 -2.765625 0.296875l-6.1875 0zm2.265625 -2.03125l3.671875 0q1.703125 0 2.65625 -0.3125q0.96875 -0.3125 1.546875 -0.890625q0.8125 -0.8125 1.265625 -2.171875q0.453125 -1.375 0.453125 -3.3125q0 -2.703125 -0.890625 -4.140625q-0.890625 -1.453125 -2.15625 -1.9375q-0.90625 -0.359375 -2.9375 -0.359375l-3.609375 0l0 13.125zm14.207031 -2.46875l2.21875 -0.1875q0.234375 1.609375 1.125 2.4375q0.90625 0.8125 2.171875 0.8125q1.53125 0 2.578125 -1.140625q1.0625 -1.15625 1.0625 -3.0625q0 -1.796875 -1.015625 -2.84375q-1.015625 -1.046875 -2.65625 -1.046875q-1.015625 0 -1.84375 0.46875q-0.8125 0.453125 -1.28125 1.203125l-1.984375 -0.265625l1.65625 -8.828125l8.546875 0l0 2.015625l-6.859375 0l-0.921875 4.625q1.546875 -1.078125 3.25 -1.078125q2.25 0 3.796875 1.5625q1.546875 1.546875 1.546875 4.0q0 2.328125 -1.359375 4.03125q-1.65625 2.09375 -4.515625 2.09375q-2.34375 0 -3.828125 -1.3125q-1.484375 -1.3125 -1.6875 -3.484375zm17.957031 9.546875q-1.734375 -2.203125 -2.953125 -5.15625q-1.203125 -2.953125 -1.203125 -6.109375q0 -2.796875 0.90625 -5.34375q1.046875 -2.96875 3.25 -5.90625l1.515625 0q-1.421875 2.4375 -1.875 3.46875q-0.71875 1.625 -1.125 3.375q-0.5 2.203125 -0.5 4.40625q0 5.640625 3.5 11.265625l-1.515625 0zm4.1484375 -5.046875l0 -17.1875l6.4375 0q1.96875 0 3.15625 0.53125q1.1875 0.515625 1.859375 1.609375q0.6875 1.078125 0.6875 2.265625q0 1.09375 -0.609375 2.078125q-0.59375 0.96875 -1.796875 1.5625q1.5625 0.453125 2.390625 1.5625q0.84375 1.09375 0.84375 2.59375q0 1.203125 -0.515625 2.25q-0.5 1.03125 -1.25 1.59375q-0.75 0.5625 -1.890625 0.859375q-1.125 0.28125 -2.765625 0.28125l-6.546875 0zm2.265625 -9.96875l3.71875 0q1.515625 0 2.171875 -0.1875q0.859375 -0.265625 1.296875 -0.859375q0.4375 -0.59375 0.4375 -1.5q0 -0.859375 -0.40625 -1.5q-0.40625 -0.65625 -1.171875 -0.890625q-0.765625 -0.25 -2.609375 -0.25l-3.4375 0l0 5.1875zm0 7.9375l4.28125 0q1.09375 0 1.546875 -0.078125q0.78125 -0.140625 1.3125 -0.46875q0.53125 -0.328125 0.859375 -0.953125q0.34375 -0.625 0.34375 -1.453125q0 -0.953125 -0.5 -1.65625q-0.484375 -0.71875 -1.359375 -1.0q-0.875 -0.296875 -2.515625 -0.296875l-3.96875 0l0 5.90625zm14.9453125 7.078125l-1.515625 0q3.5 -5.625 3.5 -11.265625q0 -2.203125 -0.5 -4.359375q-0.390625 -1.765625 -1.109375 -3.375q-0.453125 -1.0625 -1.890625 -3.515625l1.515625 0q2.203125 2.9375 3.25 5.90625q0.90625 2.546875 0.90625 5.34375q0 3.15625 -1.21875 6.109375q-1.203125 2.953125 -2.9375 5.15625zm7.9921875 0l-1.515625 0q3.5 -5.625 3.5 -11.265625q0 -2.203125 -0.5 -4.359375q-0.390625 -1.765625 -1.109375 -3.375q-0.453125 -1.0625 -1.890625 -3.515625l1.515625 0q2.203125 2.9375 3.25 5.90625q0.90625 2.546875 0.90625 5.34375q0 3.15625 -1.21875 6.109375q-1.203125 2.953125 -2.9375 5.15625z" fill-rule="nonzero"></path></g></svg>
+
diff --git a/doc/images/Session_Establishment.svg b/doc/images/Session_Establishment.svg
new file mode 100644 (file)
index 0000000..f1efa12
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0" standalone="yes"?>
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0 -->
+
+<svg version="1.1" viewBox="0.0 0.0 1338.0 1283.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="p.0"><path d="m0 0l1338.0 0l0 1283.0l-1338.0 0l0 -1283.0z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l1338.0 0l0 1283.0l-1338.0 0z" fill-rule="nonzero"></path><path fill="#d9ead3" d="m529.084 59.792652l179.27557 0l0 94.645676l-179.27557 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m529.084 59.792652l179.27557 0l0 94.645676l-179.27557 0z" fill-rule="nonzero"></path><path fill="#000000" d="m573.0276 114.035484l-3.609375 -13.59375l1.84375 0l2.0625 8.90625q0.34375 1.40625 0.578125 2.78125q0.515625 -2.171875 0.609375 -2.515625l2.59375 -9.171875l2.171875 0l1.953125 6.875q0.734375 2.5625 1.046875 4.8125q0.265625 -1.28125 0.6875 -2.953125l2.125 -8.734375l1.8125 0l-3.734375 13.59375l-1.734375 0l-2.859375 -10.359375q-0.359375 -1.296875 -0.421875 -1.59375q-0.21875 0.9375 -0.40625 1.59375l-2.890625 10.359375l-1.828125 0zm14.389893 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.266357 4.921875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.2438965 0l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm10.859375 0l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm15.594482 1.828125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.813171 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.890625 3.609375l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m186.2126 85.77165l342.2677 2.708664" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m186.2126 85.77165l336.26794 2.6611862" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m522.4674 90.08451l4.5510254 -1.6157684l-4.5248413 -1.6875916z" fill-rule="evenodd"></path><path fill="#d9ead3" d="m464.64304 281.8714l154.07877 -82.47244l154.07874 82.47244l-154.07874 82.47244z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m464.64304 281.8714l154.07877 -82.47244l154.07874 82.47244l-154.07874 82.47244z" fill-rule="nonzero"></path><path fill="#000000" d="m550.6512 266.79138l5.234375 -13.593735l1.9375 0l5.5625 13.593735l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.7031097 -0.96875 -2.8124847q-0.265625 1.3125 -0.734375 2.5937347l-1.5 4.0zm9.8029175 5.578125l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm9.750732 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm10.297546 3.796875l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm9.40625 -3.796875l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm14.9158325 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm15.735107 4.921875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.2506714 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375z" fill-rule="nonzero"></path><path fill="#000000" d="m558.36993 287.57263q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.516296 1.328125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 0l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.640625 0.4375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.328125 0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm21.933289 -0.234375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.813232 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm5.6257324 4.9375l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm8.813171 5.0l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm10.926086 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375z" fill-rule="nonzero"></path><path fill="#000000" d="m559.7137 309.182q-0.828125 0.921875 -1.8125 1.390625q-0.96875 0.453125 -2.09375 0.453125q-2.09375 0 -3.3125 -1.40625q-1.0 -1.15625 -1.0 -2.578125q0 -1.265625 0.8125 -2.28125q0.8125 -1.015625 2.421875 -1.78125q-0.90625 -1.0625 -1.21875 -1.71875q-0.296875 -0.65625 -0.296875 -1.265625q0 -1.234375 0.953125 -2.125q0.953125 -0.90625 2.421875 -0.90625q1.390625 0 2.265625 0.859375q0.890625 0.84375 0.890625 2.046875q0 1.9375 -2.5625 3.3125l2.4375 3.09375q0.421875 -0.8125 0.640625 -1.890625l1.734375 0.375q-0.4375 1.78125 -1.203125 2.9375q0.9375 1.234375 2.125 2.078125l-1.125 1.328125q-1.0 -0.640625 -2.078125 -1.921875zm-3.40625 -7.078125q1.09375 -0.640625 1.40625 -1.125q0.328125 -0.484375 0.328125 -1.0625q0 -0.703125 -0.453125 -1.140625q-0.4375 -0.4375 -1.09375 -0.4375q-0.671875 0 -1.125 0.4375q-0.453125 0.421875 -0.453125 1.0625q0 0.3125 0.15625 0.65625q0.171875 0.34375 0.5 0.734375l0.734375 0.875zm2.359375 5.765625l-3.0625 -3.796875q-1.359375 0.8125 -1.84375 1.5q-0.46875 0.6875 -0.46875 1.375q0 0.8125 0.65625 1.703125q0.671875 0.890625 1.875 0.890625q0.75 0 1.546875 -0.46875q0.8125 -0.46875 1.296875 -1.203125zm17.329956 1.703125q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.516357 1.328125l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.328125 0l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.640625 0.4375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.328125 0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.015625 -8.75l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm5.6760864 0l-1.546875 0l0 -13.59375l1.65625 0l0 4.84375q1.0625 -1.328125 2.703125 -1.328125q0.90625 0 1.71875 0.375q0.8125 0.359375 1.328125 1.03125q0.53125 0.65625 0.828125 1.59375q0.296875 0.9375 0.296875 2.0q0 2.53125 -1.25 3.921875q-1.25 1.375 -3.0 1.375q-1.75 0 -2.734375 -1.453125l0 1.234375zm-0.015625 -5.0q0 1.765625 0.46875 2.5625q0.796875 1.28125 2.140625 1.28125q1.09375 0 1.890625 -0.9375q0.796875 -0.953125 0.796875 -2.84375q0 -1.921875 -0.765625 -2.84375q-0.765625 -0.921875 -1.84375 -0.921875q-1.09375 0 -1.890625 0.953125q-0.796875 0.953125 -0.796875 2.75zm8.813171 5.0l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm10.926086 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm12.235107 2.53125q0 -0.34375 0 -0.5q0 -0.984375 0.265625 -1.703125q0.21875 -0.546875 0.671875 -1.09375q0.328125 -0.390625 1.1875 -1.15625q0.875 -0.765625 1.125 -1.21875q0.265625 -0.453125 0.265625 -1.0q0 -0.96875 -0.765625 -1.703125q-0.75 -0.734375 -1.859375 -0.734375q-1.0625 0 -1.78125 0.671875q-0.703125 0.65625 -0.9375 2.078125l-1.71875 -0.203125q0.234375 -1.90625 1.375 -2.90625q1.15625 -1.015625 3.03125 -1.015625q2.0 0 3.1875 1.09375q1.1875 1.078125 1.1875 2.609375q0 0.890625 -0.421875 1.640625q-0.40625 0.75 -1.625 1.828125q-0.8125 0.734375 -1.0625 1.078125q-0.25 0.34375 -0.375 0.796875q-0.125 0.4375 -0.140625 1.4375l-1.609375 0zm-0.09375 3.34375l0 -1.90625l1.890625 0l0 1.90625l-1.890625 0z" fill-rule="nonzero"></path><path fill="#d9ead3" d="m848.9265 239.90552l156.34644 0l0 88.59842l-156.34644 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m848.9265 239.90552l156.34644 0l0 88.59842l-156.34644 0z" fill-rule="nonzero"></path><path fill="#000000" d="m865.75464 274.7966l0 -1.609375l5.765625 0l0 5.046875q-1.328125 1.0625 -2.75 1.59375q-1.40625 0.53125 -2.890625 0.53125q-2.0 0 -3.640625 -0.859375q-1.625 -0.859375 -2.46875 -2.484375q-0.828125 -1.625 -0.828125 -3.625q0 -1.984375 0.828125 -3.703125q0.828125 -1.71875 2.390625 -2.546875q1.5625 -0.84375 3.59375 -0.84375q1.46875 0 2.65625 0.484375q1.203125 0.46875 1.875 1.328125q0.671875 0.84375 1.03125 2.21875l-1.625 0.4375q-0.3125 -1.03125 -0.765625 -1.625q-0.453125 -0.59375 -1.296875 -0.953125q-0.84375 -0.359375 -1.875 -0.359375q-1.234375 0 -2.140625 0.375q-0.890625 0.375 -1.453125 1.0q-0.546875 0.609375 -0.84375 1.34375q-0.53125 1.25 -0.53125 2.734375q0 1.8125 0.625 3.046875q0.640625 1.21875 1.828125 1.8125q1.203125 0.59375 2.546875 0.59375q1.171875 0 2.28125 -0.453125q1.109375 -0.453125 1.6875 -0.953125l0 -2.53125l-4.0 0zm14.683289 2.15625l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm12.766357 4.375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm6.694702 1.5l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9783325 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.375 -1.984375q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm15.735046 4.921875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.9069824 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.6658325 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.640625 0.4375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#000000" d="m859.58276 302.12473l0 -8.546875l-1.484375 0l0 -1.3125l1.484375 0l0 -1.046875q0 -0.984375 0.171875 -1.46875q0.234375 -0.65625 0.84375 -1.046875q0.609375 -0.40625 1.703125 -0.40625q0.703125 0 1.5625 0.15625l-0.25 1.46875q-0.515625 -0.09375 -0.984375 -0.09375q-0.765625 0 -1.078125 0.328125q-0.3125 0.3125 -0.3125 1.203125l0 0.90625l1.921875 0l0 1.3125l-1.921875 0l0 8.546875l-1.65625 0zm4.7614136 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.6033325 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281921 4.921875l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm19.442871 0l5.234375 -13.59375l1.9375 0l5.5625 13.59375l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.703125 -0.96875 -2.8125q-0.265625 1.3125 -0.734375 2.59375l-1.5 4.0zm10.0217285 5.578125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm10.9435425 7.140625l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0zm9.460388 -4.375l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm19.584167 1.203125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm8.9626465 0l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm13.34375 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#d9ead3" d="m467.042 484.1076l154.07874 -74.80313l154.07874 74.80313l-154.07874 74.80316z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m467.042 484.1076l154.07874 -74.80313l154.07874 74.80313l-154.07874 74.80316z" fill-rule="nonzero"></path><path fill="#000000" d="m553.94073 486.65262l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm19.584229 1.203125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438171 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.328125 0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.015625 -8.75l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.5042114 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm22.309021 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.000732 5.875l3.59375 -5.125l-3.328125 -4.734375l2.09375 0l1.515625 2.3125q0.421875 0.65625 0.671875 1.109375q0.421875 -0.609375 0.765625 -1.09375l1.65625 -2.328125l1.984375 0l-3.390625 4.640625l3.65625 5.21875l-2.046875 0l-2.03125 -3.0625l-0.53125 -0.828125l-2.59375 3.890625l-2.015625 0zm10.453125 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.4572754 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm13.65625 1.4375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.8552246 -1.4375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm13.125 -0.40625q0 -0.34375 0 -0.5q0 -0.984375 0.265625 -1.703125q0.21875 -0.546875 0.671875 -1.09375q0.328125 -0.390625 1.1875 -1.15625q0.875 -0.765625 1.125 -1.21875q0.265625 -0.453125 0.265625 -1.0q0 -0.96875 -0.765625 -1.703125q-0.75 -0.734375 -1.859375 -0.734375q-1.0625 0 -1.78125 0.671875q-0.703125 0.65625 -0.9375 2.078125l-1.71875 -0.203125q0.234375 -1.90625 1.375 -2.90625q1.15625 -1.015625 3.03125 -1.015625q2.0 0 3.1875 1.09375q1.1875 1.078125 1.1875 2.609375q0 0.890625 -0.421875 1.640625q-0.40625 0.75 -1.625 1.828125q-0.8125 0.734375 -1.0625 1.078125q-0.25 0.34375 -0.375 0.796875q-0.125 0.4375 -0.140625 1.4375l-1.609375 0zm-0.09375 3.34375l0 -1.90625l1.890625 0l0 1.90625l-1.890625 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m618.7218 154.43832l1.1968384 48.0" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m618.7218 154.43832l1.0472412 42.00186" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m618.11786 196.48135l1.7643433 4.495514l1.5380859 -4.5778503z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m896.65094 455.34122l2.3936768 43.653534" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m896.65094 455.34122l2.0651855 37.662506" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m897.06683 493.09418l1.8977661 4.440857l1.4007568 -4.621704z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m772.80054 281.8714l76.12598 1.669281" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m772.80054 281.8714l70.12744 1.5377502" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m842.8917 285.0605l4.573242 -1.5518494l-4.5007935 -1.750824z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m620.52234 360.3176l1.1968384 48.0" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m620.52234 360.3176l1.0472412 42.00183" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m619.9184 402.36063l1.7643433 4.495514l1.5380859 -4.5778503z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m585.021 367.1076l58.80316 0l0 34.4252l-58.80316 0z" fill-rule="nonzero"></path><path fill="#000000" d="m595.4741 394.02762l0 -13.59375l1.84375 0l7.140625 10.671875l0 -10.671875l1.71875 0l0 13.59375l-1.84375 0l-7.140625 -10.6875l0 10.6875l-1.71875 0zm12.644836 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m788.84515 248.8924l58.80316 0l0 34.4252l-58.80316 0z" fill-rule="nonzero"></path><path fill="#000000" d="m803.142 275.81238l0 -5.765625l-5.234375 -7.828125l2.1875 0l2.671875 4.09375q0.75 1.15625 1.390625 2.296875q0.609375 -1.0625 1.484375 -2.40625l2.625 -3.984375l2.109375 0l-5.4375 7.828125l0 5.765625l-1.796875 0zm15.1466675 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375z" fill-rule="nonzero"></path><path fill="#d9ead3" d="m845.084 442.14172l156.34644 0l0 88.59845l-156.34644 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m845.084 442.14172l156.34644 0l0 88.59845l-156.34644 0z" fill-rule="nonzero"></path><path fill="#000000" d="m861.9121 477.0328l0 -1.609375l5.765625 0l0 5.046875q-1.328125 1.0625 -2.75 1.59375q-1.40625 0.53125 -2.890625 0.53125q-2.0 0 -3.640625 -0.859375q-1.625 -0.859375 -2.46875 -2.484375q-0.828125 -1.625 -0.828125 -3.625q0 -1.984375 0.828125 -3.703125q0.828125 -1.71875 2.390625 -2.546875q1.5625 -0.84375 3.59375 -0.84375q1.46875 0 2.65625 0.484375q1.203125 0.46875 1.875 1.328125q0.671875 0.84375 1.03125 2.21875l-1.625 0.4375q-0.3125 -1.03125 -0.765625 -1.625q-0.453125 -0.59375 -1.296875 -0.953125q-0.84375 -0.359375 -1.875 -0.359375q-1.234375 0 -2.140625 0.375q-0.890625 0.375 -1.453125 1.0q-0.546875 0.609375 -0.84375 1.34375q-0.53125 1.25 -0.53125 2.734375q0 1.8125 0.625 3.046875q0.640625 1.21875 1.828125 1.8125q1.203125 0.59375 2.546875 0.59375q1.171875 0 2.28125 -0.453125q1.109375 -0.453125 1.6875 -0.953125l0 -2.53125l-4.0 0zm14.683289 2.15625l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm12.766357 4.375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm6.694763 1.5l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.375 -1.984375q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm15.735107 4.921875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.9069214 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.6658325 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.640625 0.4375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#000000" d="m855.74023 504.36093l0 -8.546875l-1.484375 0l0 -1.3125l1.484375 0l0 -1.046875q0 -0.984375 0.171875 -1.46875q0.234375 -0.65625 0.84375 -1.046875q0.609375 -0.40625 1.703125 -0.40625q0.703125 0 1.5625 0.15625l-0.25 1.46875q-0.515625 -0.09375 -0.984375 -0.09375q-0.765625 0 -1.078125 0.328125q-0.3125 0.3125 -0.3125 1.203125l0 0.90625l1.921875 0l0 1.3125l-1.921875 0l0 8.546875l-1.65625 0zm4.7614136 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.6033325 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm19.44281 0l5.234375 -13.59375l1.9375 0l5.5625 13.59375l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.703125 -0.96875 -2.8125q-0.265625 1.3125 -0.734375 2.59375l-1.5 4.0zm10.0217285 5.578125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm10.9435425 7.140625l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0zm9.460388 -4.375l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm19.584167 1.203125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm8.9627075 0l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm13.34375 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094421 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m768.958 484.1076l76.12598 1.6693115" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m768.958 484.1076l70.12744 1.5377808" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m839.0492 487.2967l4.573242 -1.5518494l-4.5007935 -1.750824z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m785.0026 451.1286l58.80316 0l0 34.4252l-58.80316 0z" fill-rule="nonzero"></path><path fill="#000000" d="m799.2995 478.0486l0 -5.765625l-5.234375 -7.828125l2.1875 0l2.671875 4.09375q0.75 1.15625 1.390625 2.296875q0.609375 -1.0625 1.484375 -2.40625l2.625 -3.984375l2.109375 0l-5.4375 7.828125l0 5.765625l-1.796875 0zm15.1467285 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438171 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m1093.5826 486.44095l3.4645996 -377.88977" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m1093.5826 486.44095l3.4645996 -377.88977" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m1005.27295 284.2047l89.60632 1.6378174" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m1005.27295 284.2047l83.6073 1.5281677" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m1088.8501 287.38434l4.567505 -1.5685425l-4.507202 -1.734375z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m1099.9213 111.42519l-391.55908 -2.8661423" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m1099.9213 111.42519l-385.5592 -2.8222198" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m714.37415 106.95129l-4.550049 1.6184692l4.525879 1.684906z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m1001.4304 485.62204l89.60632 1.6378174" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m1001.4304 485.62204l83.6073 1.5281372" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m1085.0076 488.80167l4.567505 -1.5685425l-4.50708 -1.734375z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m621.1207 558.91077l0.12597656 76.81891" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m621.1207 558.91077l0.1161499 70.81891" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m619.58514 629.73236l1.6591797 4.5354004l1.6442871 -4.5408325z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m579.0289 573.6352l47.338562 0l0 34.42517l-47.338562 0z" fill-rule="nonzero"></path><path fill="#000000" d="m589.482 600.5552l0 -13.59375l1.84375 0l7.140625 10.671875l0 -10.671875l1.71875 0l0 13.59375l-1.84375 0l-7.140625 -10.6875l0 10.6875l-1.71875 0zm12.644836 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125z" fill-rule="nonzero"></path><path fill="#ead1dc" d="m545.084 634.39105l156.34644 0l0 70.26776l-156.34644 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m545.084 634.39105l156.34644 0l0 70.26776l-156.34644 0z" fill-rule="nonzero"></path><path fill="#000000" d="m557.92773 654.44495l-3.609375 -13.59375l1.84375 0l2.0625 8.90625q0.34375 1.40625 0.578125 2.78125q0.515625 -2.171875 0.609375 -2.515625l2.59375 -9.171875l2.171875 0l1.953125 6.875q0.734375 2.5625 1.046875 4.8125q0.265625 -1.28125 0.6875 -2.953125l2.125 -8.734375l1.8125 0l-3.734375 13.59375l-1.734375 0l-2.859375 -10.359375q-0.359375 -1.296875 -0.421875 -1.59375q-0.21875 0.9375 -0.40625 1.59375l-2.890625 10.359375l-1.828125 0zm21.764893 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.078857 5.875l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm10.613586 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.265625 -1.3125q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm22.290771 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm14.293396 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm15.297607 3.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.7819824 5.75l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047546 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#000000" d="m557.1621 676.44495l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.660461 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm7.7854614 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270386 1.5l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm19.215271 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm7.9645386 0.28125q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.9313965 0.8125l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047607 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm12.766357 4.375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125z" fill-rule="nonzero"></path><path fill="#000000" d="m554.05273 698.44495l5.234375 -13.59375l1.9375 0l5.5625 13.59375l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.703125 -0.96875 -2.8125q-0.265625 1.3125 -0.734375 2.59375l-1.5 4.0zm10.0217285 5.578125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm10.9435425 7.140625l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0zm8.601013 0.234375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm11.585327 -0.234375l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm3.5510864 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm8.985107 5.734375l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm9.313171 -6.578125l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.1292114 0l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m603.2966 782.2992l2.3937378 43.653564" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m603.2966 782.2992l2.0652466 37.662598" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m603.7125 820.0522l1.8977051 4.440857l1.4008179 -4.621704z" fill-rule="evenodd"></path><path fill="#bf9000" d="m512.5171 813.52496l114.74011 -60.960632l114.74017 60.960632l-114.74017 60.96057z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m512.5171 813.52496l114.74011 -60.960632l114.74017 60.960632l-114.74017 60.96057z" fill-rule="nonzero"></path><path fill="#000000" d="m605.663 816.06995l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm12.4436035 0l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm12.5060425 -2.25q0 -3.390625 1.8125 -5.296875q1.828125 -1.921875 4.703125 -1.921875q1.875 0 3.390625 0.90625q1.515625 0.890625 2.296875 2.5q0.796875 1.609375 0.796875 3.65625q0 2.0625 -0.84375 3.703125q-0.828125 1.625 -2.359375 2.46875q-1.53125 0.84375 -3.296875 0.84375q-1.921875 0 -3.4375 -0.921875q-1.5 -0.9375 -2.28125 -2.53125q-0.78125 -1.609375 -0.78125 -3.40625zm1.859375 0.03125q0 2.453125 1.3125 3.875q1.328125 1.40625 3.3125 1.40625q2.03125 0 3.34375 -1.421875q1.3125 -1.4375 1.3125 -4.0625q0 -1.65625 -0.5625 -2.890625q-0.546875 -1.234375 -1.640625 -1.921875q-1.078125 -0.6875 -2.421875 -0.6875q-1.90625 0 -3.28125 1.3125q-1.375 1.3125 -1.375 4.390625z" fill-rule="nonzero"></path><path fill="#f1c232" d="m677.6772 941.51184l179.27557 0l0 94.64563l-179.27557 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m677.6772 941.51184l179.27557 0l0 94.64563l-179.27557 0z" fill-rule="nonzero"></path><path fill="#000000" d="m725.6051 990.4265l0 -1.609375l5.765625 0l0 5.046875q-1.328125 1.0625 -2.75 1.59375q-1.40625 0.53125 -2.890625 0.53125q-2.0 0 -3.640625 -0.859375q-1.625 -0.859375 -2.46875 -2.484375q-0.828125 -1.625 -0.828125 -3.625q0 -1.984375 0.828125 -3.703125q0.828125 -1.71875 2.390625 -2.546875q1.5625 -0.84375 3.59375 -0.84375q1.46875 0 2.65625 0.484375q1.203125 0.46875 1.875 1.328125q0.671875 0.84375 1.03125 2.21875l-1.625 0.4375q-0.3125 -1.03125 -0.765625 -1.625q-0.453125 -0.59375 -1.296875 -0.953125q-0.84375 -0.359375 -1.875 -0.359375q-1.234375 0 -2.140625 0.375q-0.890625 0.375 -1.453125 1.0q-0.546875 0.609375 -0.84375 1.34375q-0.53125 1.25 -0.53125 2.734375q0 1.8125 0.625 3.046875q0.640625 1.21875 1.828125 1.8125q1.203125 0.59375 2.546875 0.59375q1.171875 0 2.28125 -0.453125q1.109375 -0.453125 1.6875 -0.953125l0 -2.53125l-4.0 0zm7.9332886 5.328125l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm21.978333 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0944824 -6.75l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.0979004 0l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm15.796875 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm10.531982 4.9375l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm7.5788574 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270386 1.5l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#ffd966" d="m400.60892 941.51184l179.2756 0l0 94.64563l-179.2756 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m400.60892 941.51184l179.2756 0l0 94.64563l-179.2756 0z" fill-rule="nonzero"></path><path fill="#000000" d="m422.49536 995.75464l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.250702 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm16.75 -0.234375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm6.228302 0l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm16.813202 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0788574 4.9375l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm22.290802 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm13.043396 6.109375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm11.616577 3.546875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.188232 1.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm11.828125 2.9375l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm18.035461 0l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m627.2572 874.48553l0 33.513184l-137.00787 0l0 33.510498" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m627.2572 874.48553l0 33.513123l-137.00787 0l0 30.083435" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m490.24933 938.0821l-1.1245728 -1.1245728l1.1245728 3.0897827l1.1246033 -3.0897827z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m627.2572 874.48553l0 33.513184l140.06299 0l0 33.510498" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m627.2572 874.48553l0 33.513123l140.06299 0l0 30.083435" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m767.3202 938.0821l-1.1245728 -1.1245728l1.1245728 3.0897827l1.1245728 -3.0897827z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m733.7454 1068.1392l137.00787 0l0 48.0l-137.00787 0z" fill-rule="nonzero"></path><path fill="#000000" d="m742.7142 1095.0591l5.234375 -13.59375l1.9375 0l5.5625 13.59375l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.703125 -0.96875 -2.8125q-0.265625 1.3125 -0.734375 2.59375l-1.5 4.0zm16.256042 5.578125l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm7.5788574 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270386 1.5l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm19.215271 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.9020386 -3.421875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.297607 4.921875l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m1027.8976 907.0079l229.48035 0l0 94.64569l-229.48035 0z" fill-rule="nonzero"></path><path fill="#000000" d="m1038.3976 933.92786l0 -13.59375l6.03125 0q1.8125 0 2.75 0.359375q0.953125 0.359375 1.515625 1.296875q0.5625 0.921875 0.5625 2.046875q0 1.453125 -0.9375 2.453125q-0.921875 0.984375 -2.890625 1.25q0.71875 0.34375 1.09375 0.671875q0.78125 0.734375 1.484375 1.8125l2.375 3.703125l-2.265625 0l-1.796875 -2.828125q-0.796875 -1.21875 -1.3125 -1.875q-0.5 -0.65625 -0.90625 -0.90625q-0.40625 -0.265625 -0.8125 -0.359375q-0.3125 -0.078125 -1.015625 -0.078125l-2.078125 0l0 6.046875l-1.796875 0zm1.796875 -7.59375l3.859375 0q1.234375 0 1.921875 -0.25q0.703125 -0.265625 1.0625 -0.828125q0.375 -0.5625 0.375 -1.21875q0 -0.96875 -0.703125 -1.578125q-0.703125 -0.625 -2.21875 -0.625l-4.296875 0l0 4.5zm18.176147 4.421875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm9.281982 -6.765625l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.1135254 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9782715 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547607 2.265625l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm6.546875 2.109375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm10.366577 0l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.9020996 -3.421875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm13.18396 4.921875l5.234375 -13.59375l1.9375 0l5.5625 13.59375l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.703125 -0.96875 -2.8125q-0.265625 1.3125 -0.734375 2.59375l-1.5 4.0zm10.0217285 5.578125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm10.9436035 7.140625l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0z" fill-rule="nonzero"></path><path fill="#000000" d="m1037.757 951.55286l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm19.584229 1.203125l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm8.9626465 0l-3.75 -9.859375l1.765625 0l2.125 5.90625q0.34375 0.953125 0.625 1.984375q0.21875 -0.78125 0.625 -1.875l2.1875 -6.015625l1.71875 0l-3.734375 9.859375l-1.5625 0zm13.34375 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094482 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm18.423096 0l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.6604 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm7.7854004 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270996 1.5l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" d="m1037.757 973.55286l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm12.4436035 0l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm12.5061035 -2.25q0 -3.390625 1.8125 -5.296875q1.828125 -1.921875 4.703125 -1.921875q1.875 0 3.390625 0.90625q1.515625 0.890625 2.296875 2.5q0.796875 1.609375 0.796875 3.65625q0 2.0625 -0.84375 3.703125q-0.828125 1.625 -2.359375 2.46875q-1.53125 0.84375 -3.296875 0.84375q-1.921875 0 -3.4375 -0.921875q-1.5 -0.9375 -2.28125 -2.53125q-0.78125 -1.609375 -0.78125 -3.40625zm1.859375 0.03125q0 2.453125 1.3125 3.875q1.328125 1.40625 3.3125 1.40625q2.03125 0 3.34375 -1.421875q1.3125 -1.4375 1.3125 -4.0625q0 -1.65625 -0.5625 -2.890625q-0.546875 -1.234375 -1.640625 -1.921875q-1.078125 -0.6875 -2.421875 -0.6875q-1.90625 0 -3.28125 1.3125q-1.375 1.3125 -1.375 4.390625zm21.819702 5.09375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.9020996 -3.421875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.297607 4.921875l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0z" fill-rule="nonzero"></path><path fill="#bf9000" d="m550.4829 1121.1864l156.3465 0l0 76.81885l-156.3465 0z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m550.4829 1121.1864l156.3465 0l0 76.81885l-156.3465 0z" fill-rule="nonzero"></path><path fill="#000000" d="m571.6152 1166.5157l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0zm11.058289 0l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm16.016357 1.75l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm14.031921 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5427246 -10.1875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.5354004 0l0 -8.546875l-1.484375 0l0 -1.3125l1.484375 0l0 -1.046875q0 -0.984375 0.171875 -1.46875q0.234375 -0.65625 0.84375 -1.046875q0.609375 -0.40625 1.703125 -0.40625q0.703125 0 1.5625 0.15625l-0.25 1.46875q-0.515625 -0.09375 -0.984375 -0.09375q-0.765625 0 -1.078125 0.328125q-0.3125 0.3125 -0.3125 1.203125l0 0.90625l1.921875 0l0 1.3125l-1.921875 0l0 8.546875l-1.65625 0zm4.6989746 3.796875l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm21.042664 -3.796875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.2507324 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm16.75 -0.234375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.094421 5.875l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m490.2467 1036.1575l0 42.51465l138.42523 0l0 42.52478" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m490.2467 1036.1575l0 42.51465l138.42523 0l0 39.097656" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m628.67194 1117.7698l-1.1246338 -1.1246338l1.1246338 3.0898438l1.1245728 -3.0898438z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m767.31494 1036.1575l0 42.51465l-138.64563 0l0 42.52478" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m767.31494 1036.1575l0 42.51465l-138.64563 0l0 39.097656" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m628.6693 1117.7698l-1.1246338 -1.1246338l1.1246338 3.0898438l1.1245728 -3.0898438z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m623.2572 704.6588l4.0 47.905518" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m623.2572 704.6588l3.5007324 41.92633" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m625.11194 746.72253l2.0236206 4.3849487l1.2684326 -4.65979z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m628.6562 1198.0052l0 25.002075l385.45148 0l0 -553.4745l-312.66412 0" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m628.6562 1198.0052l0 25.002075l385.45148 0l0 -553.4745l-309.237 0" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m704.87067 669.53284l1.1245728 -1.1246338l-3.0897827 1.1246338l3.0897827 1.1245728z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m701.4305 651.92975l522.5573 3.0775146l0 -581.44293l-519.1407 0" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m701.4304 651.92975l522.5575 3.0775146l0 -581.44293l-515.71375 0" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m708.2742 73.56431l1.1246338 -1.124588l-3.0897827 1.124588l3.0897827 1.1245804z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m808.0315 611.3517l466.8661 0l0 43.653564l-466.8661 0z" fill-rule="nonzero"></path><path fill="#000000" d="m818.5315 638.2717l0 -13.59375l6.03125 0q1.8125 0 2.75 0.359375q0.953125 0.359375 1.515625 1.296875q0.5625 0.921875 0.5625 2.046875q0 1.453125 -0.9375 2.453125q-0.921875 0.984375 -2.890625 1.25q0.71875 0.34375 1.09375 0.671875q0.78125 0.734375 1.484375 1.8125l2.375 3.703125l-2.265625 0l-1.796875 -2.828125q-0.796875 -1.21875 -1.3125 -1.875q-0.5 -0.65625 -0.90625 -0.90625q-0.40625 -0.265625 -0.8125 -0.359375q-0.3125 -0.078125 -1.015625 -0.078125l-2.078125 0l0 6.046875l-1.796875 0zm1.796875 -7.59375l3.859375 0q1.234375 0 1.921875 -0.25q0.703125 -0.265625 1.0625 -0.828125q0.375 -0.5625 0.375 -1.21875q0 -0.96875 -0.703125 -1.578125q-0.703125 -0.625 -2.21875 -0.625l-4.296875 0l0 4.5zm18.176086 4.421875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.500732 5.875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm9.281921 -6.765625l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm4.1135864 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.9783325 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm15.547546 2.265625l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm6.546875 2.109375l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm10.366638 0l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.9020386 -3.421875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm13.215271 5.15625l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm8.261414 -0.234375l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm18.394836 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.078857 5.875l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm10.613586 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm2.265625 -1.3125q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281921 4.921875l0 -9.859375l1.5 0l0 1.390625q0.453125 -0.71875 1.21875 -1.15625q0.78125 -0.453125 1.765625 -0.453125q1.09375 0 1.796875 0.453125q0.703125 0.453125 0.984375 1.28125q1.171875 -1.734375 3.046875 -1.734375q1.46875 0 2.25 0.8125q0.796875 0.8125 0.796875 2.5l0 6.765625l-1.671875 0l0 -6.203125q0 -1.0 -0.15625 -1.4375q-0.15625 -0.453125 -0.59375 -0.71875q-0.421875 -0.265625 -1.0 -0.265625q-1.03125 0 -1.71875 0.6875q-0.6875 0.6875 -0.6875 2.21875l0 5.71875l-1.671875 0l0 -6.40625q0 -1.109375 -0.40625 -1.65625q-0.40625 -0.5625 -1.34375 -0.5625q-0.703125 0 -1.3125 0.375q-0.59375 0.359375 -0.859375 1.078125q-0.265625 0.71875 -0.265625 2.0625l0 5.109375l-1.671875 0zm22.290833 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm14.293396 9.65625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm15.297607 3.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.7819214 5.75l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.62506104 -0.453125 0.85943604 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.093811 1.296875 -2.718811 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875061 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015686 0.5625 -2.500061 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921936 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.79693604 -0.921875 -1.921936 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047668 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm16.12146 5.875l-3.015625 -9.859375l1.71875 0l1.5625 5.6875l0.59375 2.125q0.03125 -0.15625 0.5 -2.03125l1.578125 -5.78125l1.71875 0l1.46875 5.71875l0.484375 1.890625l0.578125 -1.90625l1.6875 -5.703125l1.625 0l-3.078125 9.859375l-1.734375 0l-1.578125 -5.90625l-0.375 -1.671875l-2.0 7.578125l-1.734375 0zm11.6604 -11.6875l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm7.7855225 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm1.5270996 1.5l0 -13.59375l1.671875 0l0 4.875q1.171875 -1.359375 2.953125 -1.359375q1.09375 0 1.890625 0.4375q0.8125 0.421875 1.15625 1.1875q0.359375 0.765625 0.359375 2.203125l0 6.25l-1.671875 0l0 -6.25q0 -1.25 -0.546875 -1.8125q-0.546875 -0.578125 -1.53125 -0.578125q-0.75 0 -1.40625 0.390625q-0.640625 0.375 -0.921875 1.046875q-0.28125 0.65625 -0.28125 1.8125l0 5.390625l-1.671875 0zm14.887085 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm16.75 -0.234375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.328125 0l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.015625 -8.75l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.5042725 -4.921875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.281982 4.921875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0zm19.21521 -1.5l0.234375 1.484375q-0.703125 0.140625 -1.265625 0.140625q-0.90625 0 -1.40625 -0.28125q-0.5 -0.296875 -0.703125 -0.75q-0.203125 -0.46875 -0.203125 -1.984375l0 -5.65625l-1.234375 0l0 -1.3125l1.234375 0l0 -2.4375l1.65625 -1.0l0 3.4375l1.6875 0l0 1.3125l-1.6875 0l0 5.75q0 0.71875 0.078125 0.921875q0.09375 0.203125 0.296875 0.328125q0.203125 0.125 0.578125 0.125q0.265625 0 0.734375 -0.078125zm0.9020996 -3.421875q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm9.297607 4.921875l0 -13.59375l1.671875 0l0 7.75l3.953125 -4.015625l2.15625 0l-3.765625 3.65625l4.140625 6.203125l-2.0625 0l-3.25 -5.03125l-1.171875 1.125l0 3.90625l-1.671875 0zm16.0625 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm9.110107 5.875l0 -9.859375l1.5 0l0 1.40625q1.09375 -1.625 3.140625 -1.625q0.890625 0 1.640625 0.328125q0.75 0.3125 1.109375 0.84375q0.375 0.515625 0.53125 1.21875q0.09375 0.46875 0.09375 1.625l0 6.0625l-1.671875 0l0 -6.0q0 -1.015625 -0.203125 -1.515625q-0.1875 -0.515625 -0.6875 -0.8125q-0.5 -0.296875 -1.171875 -0.296875q-1.0625 0 -1.84375 0.671875q-0.765625 0.671875 -0.765625 2.578125l0 5.375l-1.671875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m767.6221 103.74803l179.27557 0l0 43.65355l-179.27557 0z" fill-rule="nonzero"></path><path fill="#000000" d="m778.1221 130.66803l0 -13.59375l6.03125 0q1.8125 0 2.75 0.359375q0.953125 0.359375 1.515625 1.296875q0.5625 0.921875 0.5625 2.046875q0 1.453125 -0.9375 2.453125q-0.921875 0.984375 -2.890625 1.25q0.71875 0.34375 1.09375 0.671875q0.78125 0.734375 1.484375 1.8125l2.375 3.703125l-2.265625 0l-1.796875 -2.828125q-0.796875 -1.21875 -1.3125 -1.875q-0.5 -0.65625 -0.90625 -0.90625q-0.40625 -0.265625 -0.8125 -0.359375q-0.3125 -0.078125 -1.015625 -0.078125l-2.078125 0l0 6.046875l-1.796875 0zm1.796875 -7.59375l3.859375 0q1.234375 0 1.921875 -0.25q0.703125 -0.265625 1.0625 -0.828125q0.375 -0.5625 0.375 -1.21875q0 -0.96875 -0.703125 -1.578125q-0.703125 -0.625 -2.21875 -0.625l-4.296875 0l0 4.5zm18.176025 4.421875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438232 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.375 -1.984375q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm15.735107 4.921875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.9069824 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.6657715 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.640625 0.4375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm13.590271 2.015625l1.625 -0.21875q0.0625 1.546875 0.578125 2.125q0.53125 0.578125 1.4375 0.578125q0.6875 0 1.171875 -0.3125q0.5 -0.3125 0.671875 -0.84375q0.1875 -0.53125 0.1875 -1.703125l0 -9.359375l1.8125 0l0 9.265625q0 1.703125 -0.421875 2.640625q-0.40625 0.9375 -1.3125 1.4375q-0.890625 0.484375 -2.09375 0.484375q-1.796875 0 -2.75 -1.03125q-0.9375 -1.03125 -0.90625 -3.0625zm9.640625 -0.515625l1.6875 -0.140625q0.125 1.015625 0.5625 1.671875q0.4375 0.65625 1.359375 1.0625q0.9375 0.40625 2.09375 0.40625q1.03125 0 1.8125 -0.3125q0.796875 -0.3125 1.1875 -0.84375q0.390625 -0.53125 0.390625 -1.15625q0 -0.640625 -0.375 -1.109375q-0.375 -0.484375 -1.234375 -0.8125q-0.546875 -0.21875 -2.421875 -0.65625q-1.875 -0.453125 -2.625 -0.859375q-0.96875 -0.515625 -1.453125 -1.265625q-0.46875 -0.75 -0.46875 -1.6875q0 -1.03125 0.578125 -1.921875q0.59375 -0.90625 1.703125 -1.359375q1.125 -0.46875 2.5 -0.46875q1.515625 0 2.671875 0.484375q1.15625 0.484375 1.765625 1.4375q0.625 0.9375 0.671875 2.140625l-1.71875 0.125q-0.140625 -1.28125 -0.953125 -1.9375q-0.796875 -0.671875 -2.359375 -0.671875q-1.625 0 -2.375 0.609375q-0.75 0.59375 -0.75 1.4375q0 0.734375 0.53125 1.203125q0.515625 0.46875 2.703125 0.96875q2.203125 0.5 3.015625 0.875q1.1875 0.546875 1.75 1.390625q0.578125 0.828125 0.578125 1.921875q0 1.09375 -0.625 2.0625q-0.625 0.953125 -1.796875 1.484375q-1.15625 0.53125 -2.609375 0.53125q-1.84375 0 -3.09375 -0.53125q-1.25 -0.546875 -1.96875 -1.625q-0.703125 -1.078125 -0.734375 -2.453125zm12.5061035 -2.25q0 -3.390625 1.8125 -5.296875q1.828125 -1.921875 4.703125 -1.921875q1.875 0 3.390625 0.90625q1.515625 0.890625 2.296875 2.5q0.796875 1.609375 0.796875 3.65625q0 2.0625 -0.84375 3.703125q-0.828125 1.625 -2.359375 2.46875q-1.53125 0.84375 -3.296875 0.84375q-1.921875 0 -3.4375 -0.921875q-1.5 -0.9375 -2.28125 -2.53125q-0.78125 -1.609375 -0.78125 -3.40625zm1.859375 0.03125q0 2.453125 1.3125 3.875q1.328125 1.40625 3.3125 1.40625q2.03125 0 3.34375 -1.421875q1.3125 -1.4375 1.3125 -4.0625q0 -1.65625 -0.5625 -2.890625q-0.546875 -1.234375 -1.640625 -1.921875q-1.078125 -0.6875 -2.421875 -0.6875q-1.90625 0 -3.28125 1.3125q-1.375 1.3125 -1.375 4.390625zm13.183289 6.59375l0 -13.59375l1.84375 0l7.140625 10.671875l0 -10.671875l1.71875 0l0 13.59375l-1.84375 0l-7.140625 -10.6875l0 10.6875l-1.71875 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m529.084 131.11548l-343.0866 -1.102356" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m529.084 131.11548l-337.08667 -1.0830841" fill-rule="evenodd"></path><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m192.00266 128.38068l-4.5433807 1.637146l4.5327606 1.6663055z" fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0" d="m258.7034 136.56955l156.34647 0l0 70.267715l-156.34647 0z" fill-rule="nonzero"></path><path fill="#000000" d="m269.17215 163.48955l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm16.865448 5.921875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.0632324 4.9375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm5.556427 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm16.75 -0.234375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm13.012146 5.875l5.234375 -13.59375l1.9375 0l5.5625 13.59375l-2.046875 0l-1.59375 -4.125l-5.6875 0l-1.484375 4.125l-1.921875 0zm3.921875 -5.578125l4.609375 0l-1.40625 -3.78125q-0.65625 -1.703125 -0.96875 -2.8125q-0.265625 1.3125 -0.734375 2.59375l-1.5 4.0zm10.021698 5.578125l0 -13.59375l5.125 0q1.359375 0 2.078125 0.125q1.0 0.171875 1.671875 0.640625q0.671875 0.46875 1.078125 1.3125q0.421875 0.84375 0.421875 1.84375q0 1.734375 -1.109375 2.9375q-1.09375 1.203125 -3.984375 1.203125l-3.484375 0l0 5.53125l-1.796875 0zm1.796875 -7.140625l3.515625 0q1.75 0 2.46875 -0.640625q0.734375 -0.65625 0.734375 -1.828125q0 -0.859375 -0.4375 -1.46875q-0.421875 -0.609375 -1.125 -0.796875q-0.453125 -0.125 -1.671875 -0.125l-3.484375 0l0 4.859375zm10.943573 7.140625l0 -13.59375l1.8125 0l0 13.59375l-1.8125 0zm9.835358 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.978302 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438202 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 6.71875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625z" fill-rule="nonzero"></path><path fill="#000000" d="m276.73465 183.88017q-0.828125 0.921875 -1.8125 1.390625q-0.96875 0.453125 -2.09375 0.453125q-2.09375 0 -3.3125 -1.40625q-1.0 -1.15625 -1.0 -2.578125q0 -1.265625 0.8125 -2.28125q0.8125 -1.015625 2.421875 -1.78125q-0.90625 -1.0625 -1.21875 -1.71875q-0.296875 -0.65625 -0.296875 -1.265625q0 -1.234375 0.953125 -2.125q0.953125 -0.90625 2.421875 -0.90625q1.390625 0 2.265625 0.859375q0.890625 0.84375 0.890625 2.046875q0 1.9375 -2.5625 3.3125l2.4375 3.09375q0.421875 -0.8125 0.640625 -1.890625l1.734375 0.375q-0.4375 1.78125 -1.203125 2.9375q0.9375 1.234375 2.125 2.078125l-1.125 1.328125q-1.0 -0.640625 -2.078125 -1.921875zm-3.40625 -7.078125q1.09375 -0.640625 1.40625 -1.125q0.328125 -0.484375 0.328125 -1.0625q0 -0.703125 -0.453125 -1.140625q-0.4375 -0.4375 -1.09375 -0.4375q-0.671875 0 -1.125 0.4375q-0.453125 0.421875 -0.453125 1.0625q0 0.3125 0.15625 0.65625q0.171875 0.34375 0.5 0.734375l0.734375 0.875zm2.359375 5.765625l-3.0625 -3.796875q-1.359375 0.8125 -1.84375 1.5q-0.46875 0.6875 -0.46875 1.375q0 0.8125 0.65625 1.703125q0.671875 0.890625 1.875 0.890625q0.75 0 1.546875 -0.46875q0.8125 -0.46875 1.296875 -1.203125zm17.283142 2.921875l0 -1.25q-0.9375 1.46875 -2.75 1.46875q-1.171875 0 -2.171875 -0.640625q-0.984375 -0.65625 -1.53125 -1.8125q-0.53125 -1.171875 -0.53125 -2.6875q0 -1.46875 0.484375 -2.671875q0.5 -1.203125 1.46875 -1.84375q0.984375 -0.640625 2.203125 -0.640625q0.890625 0 1.578125 0.375q0.703125 0.375 1.140625 0.984375l0 -4.875l1.65625 0l0 13.59375l-1.546875 0zm-5.28125 -4.921875q0 1.890625 0.796875 2.828125q0.8125 0.9375 1.890625 0.9375q1.09375 0 1.859375 -0.890625q0.765625 -0.890625 0.765625 -2.734375q0 -2.015625 -0.78125 -2.953125q-0.78125 -0.953125 -1.921875 -0.953125q-1.109375 0 -1.859375 0.90625q-0.75 0.90625 -0.75 2.859375zm9.281952 -6.765625l0 -1.90625l1.671875 0l0 1.90625l-1.671875 0zm0 11.6875l0 -9.859375l1.671875 0l0 9.859375l-1.671875 0zm3.4573364 -2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm10.0 6.71875l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm8.828827 4.875l0 -13.59375l1.671875 0l0 13.59375l-1.671875 0zm10.613586 -1.21875q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm4.000702 8.734375l-0.171875 -1.5625q0.546875 0.140625 0.953125 0.140625q0.546875 0 0.875 -0.1875q0.34375 -0.1875 0.5625 -0.515625q0.15625 -0.25 0.5 -1.25q0.046875 -0.140625 0.15625 -0.40625l-3.734375 -9.875l1.796875 0l2.046875 5.71875q0.40625 1.078125 0.71875 2.28125q0.28125 -1.15625 0.6875 -2.25l2.09375 -5.75l1.671875 0l-3.75 10.03125q-0.59375 1.625 -0.9375 2.234375q-0.4375 0.828125 -1.015625 1.203125q-0.578125 0.390625 -1.375 0.390625q-0.484375 0 -1.078125 -0.203125zm14.589569 -0.015625l0 -13.640625l1.53125 0l0 1.28125q0.53125 -0.75 1.203125 -1.125q0.6875 -0.375 1.640625 -0.375q1.265625 0 2.234375 0.65625q0.96875 0.640625 1.453125 1.828125q0.5 1.1875 0.5 2.59375q0 1.515625 -0.546875 2.734375q-0.546875 1.203125 -1.578125 1.84375q-1.03125 0.640625 -2.171875 0.640625q-0.84375 0 -1.515625 -0.34375q-0.65625 -0.359375 -1.078125 -0.890625l0 4.796875l-1.671875 0zm1.515625 -8.65625q0 1.90625 0.765625 2.8125q0.78125 0.90625 1.875 0.90625q1.109375 0 1.890625 -0.9375q0.796875 -0.9375 0.796875 -2.921875q0 -1.875 -0.78125 -2.8125q-0.765625 -0.9375 -1.84375 -0.9375q-1.0625 0 -1.890625 1.0q-0.8125 1.0 -0.8125 2.890625zm15.297577 3.65625q-0.9375 0.796875 -1.796875 1.125q-0.859375 0.3125 -1.84375 0.3125q-1.609375 0 -2.484375 -0.78125q-0.875 -0.796875 -0.875 -2.03125q0 -0.734375 0.328125 -1.328125q0.328125 -0.59375 0.859375 -0.953125q0.53125 -0.359375 1.203125 -0.546875q0.5 -0.140625 1.484375 -0.25q2.03125 -0.25 2.984375 -0.578125q0 -0.34375 0 -0.4375q0 -1.015625 -0.46875 -1.4375q-0.640625 -0.5625 -1.90625 -0.5625q-1.171875 0 -1.734375 0.40625q-0.5625 0.40625 -0.828125 1.46875l-1.640625 -0.234375q0.234375 -1.046875 0.734375 -1.6875q0.515625 -0.640625 1.46875 -0.984375q0.96875 -0.359375 2.25 -0.359375q1.265625 0 2.046875 0.296875q0.78125 0.296875 1.15625 0.75q0.375 0.453125 0.515625 1.140625q0.09375 0.421875 0.09375 1.53125l0 2.234375q0 2.328125 0.09375 2.953125q0.109375 0.609375 0.4375 1.171875l-1.75 0q-0.265625 -0.515625 -0.328125 -1.21875zm-0.140625 -3.71875q-0.90625 0.359375 -2.734375 0.625q-1.03125 0.140625 -1.453125 0.328125q-0.421875 0.1875 -0.65625 0.546875q-0.234375 0.359375 -0.234375 0.796875q0 0.671875 0.5 1.125q0.515625 0.4375 1.484375 0.4375q0.96875 0 1.71875 -0.421875q0.75 -0.4375 1.109375 -1.15625q0.265625 -0.578125 0.265625 -1.671875l0 -0.609375zm3.7819824 5.75l1.609375 0.25q0.109375 0.75 0.578125 1.09375q0.609375 0.453125 1.6875 0.453125q1.171875 0 1.796875 -0.46875q0.625 -0.453125 0.859375 -1.28125q0.125 -0.515625 0.109375 -2.15625q-1.09375 1.296875 -2.71875 1.296875q-2.03125 0 -3.15625 -1.46875q-1.109375 -1.46875 -1.109375 -3.515625q0 -1.40625 0.515625 -2.59375q0.515625 -1.203125 1.484375 -1.84375q0.96875 -0.65625 2.265625 -0.65625q1.75 0 2.875 1.40625l0 -1.1875l1.546875 0l0 8.515625q0 2.3125 -0.46875 3.265625q-0.46875 0.96875 -1.484375 1.515625q-1.015625 0.5625 -2.5 0.5625q-1.765625 0 -2.859375 -0.796875q-1.078125 -0.796875 -1.03125 -2.390625zm1.375 -5.921875q0 1.953125 0.765625 2.84375q0.78125 0.890625 1.9375 0.890625q1.140625 0 1.921875 -0.890625q0.78125 -0.890625 0.78125 -2.78125q0 -1.8125 -0.8125 -2.71875q-0.796875 -0.921875 -1.921875 -0.921875q-1.109375 0 -1.890625 0.90625q-0.78125 0.890625 -0.78125 2.671875zm16.047577 1.9375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#ffffff" d="m94.25984 75.59843l0 0c0 -12.054596 10.597107 -21.826775 23.669289 -21.826775l0 0c13.072197 0 23.669289 9.772179 23.669289 21.826775l0 0c0 12.054588 -10.597092 21.826767 -23.669289 21.826767l0 0c-13.072182 0 -23.669289 -9.772179 -23.669289 -21.826767z" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m94.25984 75.59843l0 0c0 -12.054596 10.597107 -21.826775 23.669289 -21.826775l0 0c13.072197 0 23.669289 9.772179 23.669289 21.826775l0 0c0 12.054588 -10.597092 21.826767 -23.669289 21.826767l0 0c-13.072182 0 -23.669289 -9.772179 -23.669289 -21.826767z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m117.92913 97.42519l1.1653595 119.55906" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m117.92913 97.42519l1.1653595 119.55906" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m117.92913 128.50131l29.574806 42.48819" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m117.92913 128.50131l29.574806 42.48819" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m91.50131 170.50131l26.425194 -41.07086" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m91.50131 170.50131l26.425194 -41.07086" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m235.77428 40.0l179.27559 0l0 48.0l-179.27559 0z" fill-rule="nonzero"></path><path fill="#000000" d="m273.33563 65.59187l0 -1.609375l5.765625 0l0 5.046875q-1.328125 1.0625 -2.75 1.59375q-1.40625 0.53125 -2.890625 0.53125q-2.0 0 -3.640625 -0.859375q-1.625 -0.859375 -2.46875 -2.484375q-0.828125 -1.625 -0.828125 -3.625q0 -1.984375 0.828125 -3.703125q0.828125 -1.71875 2.390625 -2.546875q1.5625 -0.84375 3.59375 -0.84375q1.46875 0 2.65625 0.484375q1.203125 0.46875 1.875 1.328125q0.671875 0.84375 1.03125 2.21875l-1.625 0.4375q-0.3125 -1.03125 -0.765625 -1.625q-0.453125 -0.59375 -1.296875 -0.953125q-0.84375 -0.359375 -1.875 -0.359375q-1.234375 0 -2.140625 0.375q-0.890625 0.375 -1.453125 1.0q-0.546875 0.609375 -0.84375 1.34375q-0.53125 1.25 -0.53125 2.734375q0 1.8125 0.625 3.046875q0.640625 1.21875 1.828125 1.8125q1.203125 0.59375 2.546875 0.59375q1.171875 0 2.28125 -0.453125q1.109375 -0.453125 1.6875 -0.953125l0 -2.53125l-4.0 0zm8.183289 5.328125l0 -13.59375l9.84375 0l0 1.59375l-8.046875 0l0 4.171875l7.53125 0l0 1.59375l-7.53125 0l0 4.625l8.359375 0l0 1.609375l-10.15625 0zm15.865448 0l0 -12.0l-4.46875 0l0 -1.59375l10.765625 0l0 1.59375l-4.5 0l0 12.0l-1.796875 0zm11.65741 0.234375l3.9375 -14.0625l1.34375 0l-3.9375 14.0625l-1.34375 0zm6.417694 -0.234375l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.978302 -3.171875l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875zm8.438202 2.9375l1.65625 -0.265625q0.140625 1.0 0.765625 1.53125q0.640625 0.515625 1.78125 0.515625q1.15625 0 1.703125 -0.46875q0.5625 -0.46875 0.5625 -1.09375q0 -0.5625 -0.484375 -0.890625q-0.34375 -0.21875 -1.703125 -0.5625q-1.84375 -0.46875 -2.5625 -0.796875q-0.703125 -0.34375 -1.078125 -0.9375q-0.359375 -0.609375 -0.359375 -1.328125q0 -0.65625 0.296875 -1.21875q0.3125 -0.5625 0.828125 -0.9375q0.390625 -0.28125 1.0625 -0.484375q0.671875 -0.203125 1.4375 -0.203125q1.171875 0 2.046875 0.34375q0.875 0.328125 1.28125 0.90625q0.421875 0.5625 0.578125 1.515625l-1.625 0.21875q-0.109375 -0.75 -0.65625 -1.171875q-0.53125 -0.4375 -1.5 -0.4375q-1.15625 0 -1.640625 0.390625q-0.484375 0.375 -0.484375 0.875q0 0.328125 0.203125 0.59375q0.203125 0.265625 0.640625 0.4375q0.25 0.09375 1.46875 0.4375q1.765625 0.46875 2.46875 0.765625q0.703125 0.296875 1.09375 0.875q0.40625 0.578125 0.40625 1.4375q0 0.828125 -0.484375 1.578125q-0.484375 0.734375 -1.40625 1.140625q-0.921875 0.390625 -2.078125 0.390625q-1.921875 0 -2.9375 -0.796875q-1.0 -0.796875 -1.28125 -2.359375zm9.375 -1.984375q0 -2.734375 1.53125 -4.0625q1.265625 -1.09375 3.09375 -1.09375q2.03125 0 3.3125 1.34375q1.296875 1.328125 1.296875 3.671875q0 1.90625 -0.578125 3.0q-0.5625 1.078125 -1.65625 1.6875q-1.078125 0.59375 -2.375 0.59375q-2.0625 0 -3.34375 -1.328125q-1.28125 -1.328125 -1.28125 -3.8125zm1.71875 0q0 1.890625 0.828125 2.828125q0.828125 0.9375 2.078125 0.9375q1.25 0 2.0625 -0.9375q0.828125 -0.953125 0.828125 -2.890625q0 -1.828125 -0.828125 -2.765625q-0.828125 -0.9375 -2.0625 -0.9375q-1.25 0 -2.078125 0.9375q-0.828125 0.9375 -0.828125 2.828125zm15.735107 4.921875l0 -1.453125q-1.140625 1.671875 -3.125 1.671875q-0.859375 0 -1.625 -0.328125q-0.75 -0.34375 -1.125 -0.84375q-0.359375 -0.5 -0.515625 -1.234375q-0.09375 -0.5 -0.09375 -1.5625l0 -6.109375l1.671875 0l0 5.46875q0 1.3125 0.09375 1.765625q0.15625 0.65625 0.671875 1.03125q0.515625 0.375 1.265625 0.375q0.75 0 1.40625 -0.375q0.65625 -0.390625 0.921875 -1.046875q0.28125 -0.671875 0.28125 -1.9375l0 -5.28125l1.671875 0l0 9.859375l-1.5 0zm3.906952 0l0 -9.859375l1.5 0l0 1.5q0.578125 -1.046875 1.0625 -1.375q0.484375 -0.34375 1.078125 -0.34375q0.84375 0 1.71875 0.546875l-0.578125 1.546875q-0.609375 -0.359375 -1.234375 -0.359375q-0.546875 0 -0.984375 0.328125q-0.421875 0.328125 -0.609375 0.90625q-0.28125 0.890625 -0.28125 1.953125l0 5.15625l-1.671875 0zm12.665802 -3.609375l1.640625 0.21875q-0.265625 1.6875 -1.375 2.65625q-1.109375 0.953125 -2.734375 0.953125q-2.015625 0 -3.25 -1.3125q-1.21875 -1.328125 -1.21875 -3.796875q0 -1.59375 0.515625 -2.78125q0.53125 -1.203125 1.609375 -1.796875q1.09375 -0.609375 2.359375 -0.609375q1.609375 0 2.625 0.8125q1.015625 0.8125 1.3125 2.3125l-1.625 0.25q-0.234375 -1.0 -0.828125 -1.5q-0.59375 -0.5 -1.421875 -0.5q-1.265625 0 -2.0625 0.90625q-0.78125 0.90625 -0.78125 2.859375q0 1.984375 0.765625 2.890625q0.765625 0.890625 1.984375 0.890625q0.984375 0 1.640625 -0.59375q0.65625 -0.609375 0.84375 -1.859375zm9.640625 0.4375l1.71875 0.21875q-0.40625 1.5 -1.515625 2.34375q-1.09375 0.828125 -2.8125 0.828125q-2.15625 0 -3.421875 -1.328125q-1.265625 -1.328125 -1.265625 -3.734375q0 -2.484375 1.265625 -3.859375q1.28125 -1.375 3.328125 -1.375q1.984375 0 3.234375 1.34375q1.25 1.34375 1.25 3.796875q0 0.140625 -0.015625 0.4375l-7.34375 0q0.09375 1.625 0.921875 2.484375q0.828125 0.859375 2.0625 0.859375q0.90625 0 1.546875 -0.46875q0.65625 -0.484375 1.046875 -1.546875zm-5.484375 -2.703125l5.5 0q-0.109375 -1.234375 -0.625 -1.859375q-0.796875 -0.96875 -2.078125 -0.96875q-1.140625 0 -1.9375 0.78125q-0.78125 0.765625 -0.859375 2.046875z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m119.125984 215.50131l-38.58268 53.07086" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m119.125984 215.50131l-38.58268 53.07086" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m119.62467 215.50131l42.99212 58.992126" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m119.62467 215.50131l42.99212 58.992126" fill-rule="nonzero"></path></g></svg>
+
diff --git a/doc/images/add-new-repository.png b/doc/images/add-new-repository.png
new file mode 100644 (file)
index 0000000..6193844
Binary files /dev/null and b/doc/images/add-new-repository.png differ
diff --git a/doc/images/added-new-repository.png b/doc/images/added-new-repository.png
new file mode 100644 (file)
index 0000000..7d187aa
Binary files /dev/null and b/doc/images/added-new-repository.png differ
diff --git a/doc/images/api-token-host.png b/doc/images/api-token-host.png
new file mode 100644 (file)
index 0000000..cda04d6
Binary files /dev/null and b/doc/images/api-token-host.png differ
diff --git a/doc/images/arvados_federation.svg b/doc/images/arvados_federation.svg
new file mode 100644 (file)
index 0000000..036ce75
--- /dev/null
@@ -0,0 +1,1133 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.2" width="279.4mm" height="215.9mm" viewBox="0 0 27940 21590" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xml:space="preserve">
+ <defs class="ClipPathGroup">
+  <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
+   <rect x="0" y="0" width="27940" height="21590"/>
+  </clipPath>
+  <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
+   <rect x="27" y="21" width="27885" height="21547"/>
+  </clipPath>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_1" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="y" horiz-adv-x="1059" d="M 604,1 C 579,-64 553,-123 527,-175 500,-227 471,-272 438,-309 405,-346 369,-374 329,-394 289,-413 243,-423 191,-423 168,-423 147,-423 128,-423 109,-423 88,-420 67,-414 L 67,-279 C 80,-282 94,-284 110,-284 126,-284 140,-284 151,-284 204,-284 253,-264 298,-225 343,-186 383,-124 417,-38 L 434,5 5,1082 197,1082 425,484 C 432,466 440,442 451,412 461,382 471,352 482,322 492,292 501,265 509,241 517,217 522,202 523,196 525,203 530,218 538,240 545,261 554,285 564,312 573,339 583,366 593,393 603,420 611,444 618,464 L 830,1082 1020,1082 604,1 Z"/>
+   <glyph unicode="x" horiz-adv-x="1033" d="M 801,0 L 510,444 217,0 23,0 408,556 41,1082 240,1082 510,661 778,1082 979,1082 612,558 1002,0 801,0 Z"/>
+   <glyph unicode="w" horiz-adv-x="1535" d="M 1174,0 L 965,0 792,698 C 787,716 781,738 776,765 770,792 764,818 759,843 752,872 746,903 740,934 734,904 728,874 721,845 716,820 710,793 704,766 697,739 691,715 686,694 L 508,0 300,0 -3,1082 175,1082 358,347 C 363,332 367,313 372,291 377,268 381,246 386,225 391,200 396,175 401,149 406,174 412,199 418,223 423,244 429,265 434,286 439,307 444,325 448,339 L 644,1082 837,1082 1026,339 C 1031,322 1036,302 1041,280 1046,258 1051,237 1056,218 1061,195 1067,172 1072,149 1077,174 1083,199 1088,223 1093,244 1098,265 1103,288 1108,310 1112,330 1117,347 L 1308,1082 1484,1082 1174,0 Z"/>
+   <glyph unicode="v" horiz-adv-x="1059" d="M 613,0 L 400,0 7,1082 199,1082 437,378 C 442,363 447,346 454,325 460,304 466,282 473,259 480,236 486,215 492,194 497,173 502,155 506,141 510,155 515,173 522,194 528,215 534,236 541,258 548,280 555,302 562,323 569,344 575,361 580,376 L 826,1082 1017,1082 613,0 Z"/>
+   <glyph unicode="u" horiz-adv-x="901" d="M 314,1082 L 314,396 C 314,343 318,299 326,264 333,229 346,200 363,179 380,157 403,142 432,133 460,124 495,119 537,119 580,119 618,127 653,142 687,157 716,178 741,207 765,235 784,270 797,312 810,353 817,401 817,455 L 817,1082 997,1082 997,228 C 997,205 997,181 998,156 998,131 998,107 999,85 1000,62 1000,43 1001,27 1002,11 1002,3 1003,3 L 833,3 C 832,6 832,15 831,30 830,44 830,61 829,79 828,98 827,117 826,136 825,156 825,172 825,185 L 822,185 C 805,154 786,125 765,100 744,75 720,53 693,36 666,18 634,4 599,-6 564,-15 523,-20 476,-20 416,-20 364,-13 321,2 278,17 242,39 214,70 186,101 166,140 153,188 140,236 133,294 133,361 L 133,1082 314,1082 Z"/>
+   <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 527,1 499,-5 471,-10 442,-14 409,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 467,127 484,128 501,131 517,134 535,137 554,141 L 554,8 Z"/>
+   <glyph unicode="s" horiz-adv-x="927" d="M 950,299 C 950,248 940,203 921,164 901,124 872,91 835,64 798,37 752,16 698,2 643,-13 581,-20 511,-20 448,-20 392,-15 342,-6 291,4 247,20 209,41 171,62 139,91 114,126 88,161 69,203 57,254 L 216,285 C 231,227 263,185 311,158 359,131 426,117 511,117 550,117 585,120 618,125 650,130 678,140 701,153 724,166 743,183 756,205 769,226 775,253 775,285 775,318 767,345 752,366 737,387 715,404 688,418 661,432 628,444 589,455 550,465 507,476 460,489 417,500 374,513 331,527 288,541 250,560 216,583 181,606 153,634 132,668 111,702 100,745 100,796 100,895 135,970 206,1022 276,1073 378,1099 513,1099 632,1099 727,1078 798,1036 868,994 912,927 931,834 L 769,814 C 763,842 752,866 736,885 720,904 701,919 678,931 655,942 630,951 602,956 573,961 544,963 513,963 432,963 372,951 333,926 294,901 275,864 275,814 275,785 282,761 297,742 311,723 331,707 357,694 382,681 413,669 449,660 485,650 525,640 568,629 597,622 626,614 656,606 686,597 715,587 744,576 772,564 799,550 824,535 849,519 870,500 889,478 908,456 923,430 934,401 945,372 950,338 950,299 Z"/>
+   <glyph unicode="r" horiz-adv-x="556" d="M 142,0 L 142,830 C 142,853 142,876 142,900 141,923 141,946 140,968 139,990 139,1011 138,1030 137,1049 137,1067 136,1082 L 306,1082 C 307,1067 308,1049 309,1030 310,1010 311,990 312,969 313,948 313,929 314,910 314,891 314,874 314,861 L 318,861 C 331,902 344,938 359,969 373,999 390,1024 409,1044 428,1063 451,1078 478,1088 505,1097 537,1102 575,1102 590,1102 604,1101 617,1099 630,1096 641,1094 648,1092 L 648,927 C 636,930 622,933 606,935 590,936 572,937 552,937 511,937 476,928 447,909 418,890 394,865 376,832 357,799 344,759 335,714 326,668 322,618 322,564 L 322,0 142,0 Z"/>
+   <glyph unicode="q" horiz-adv-x="954" d="M 484,-20 C 347,-20 246,26 182,119 118,212 86,351 86,536 86,724 119,865 185,960 250,1055 350,1102 484,1102 529,1102 568,1098 603,1090 637,1082 668,1070 695,1055 722,1039 745,1019 766,996 786,973 804,945 821,914 L 823,914 C 823,931 824,949 825,970 826,990 827,1010 828,1028 829,1046 830,1061 831,1074 832,1087 834,1094 835,1094 L 1008,1094 C 1007,1083 1005,1053 1004,1005 1002,957 1001,889 1001,799 L 1001,-425 821,-425 821,14 C 821,31 821,49 822,68 822,87 822,104 823,121 824,140 824,159 825,178 L 823,178 C 806,144 787,115 766,90 745,65 720,44 693,28 666,11 635,-1 601,-9 566,-16 527,-20 484,-20 Z M 821,554 C 821,633 814,699 801,752 787,805 768,848 743,880 718,912 687,935 652,949 616,962 576,969 532,969 486,969 447,961 414,944 381,927 355,901 335,866 314,831 299,786 290,733 280,679 275,615 275,542 275,470 280,407 289,354 298,301 312,257 333,222 353,187 379,162 412,145 444,128 483,119 530,119 570,119 608,125 643,138 678,150 709,172 736,205 762,237 783,281 798,338 813,394 821,466 821,554 Z"/>
+   <glyph unicode="p" horiz-adv-x="953" d="M 1053,546 C 1053,464 1046,388 1033,319 1020,250 998,190 967,140 936,90 895,51 844,23 793,-6 730,-20 655,-20 578,-20 510,-5 452,24 394,53 350,101 319,168 L 314,168 C 315,167 315,161 316,150 316,139 316,126 317,110 317,94 317,76 318,57 318,37 318,17 318,-2 L 318,-425 138,-425 138,864 C 138,891 138,916 138,940 137,964 137,986 136,1005 135,1025 135,1042 134,1056 133,1070 133,1077 132,1077 L 306,1077 C 307,1075 308,1068 309,1057 310,1045 311,1031 312,1014 313,998 314,980 315,961 316,943 316,925 316,908 L 320,908 C 337,943 356,972 377,997 398,1021 423,1041 450,1057 477,1072 508,1084 542,1091 575,1098 613,1101 655,1101 730,1101 793,1088 844,1061 895,1034 936,997 967,949 998,900 1020,842 1033,774 1046,705 1053,629 1053,546 Z M 864,542 C 864,609 860,668 852,720 844,772 830,816 811,852 791,888 765,915 732,934 699,953 658,962 609,962 569,962 531,956 496,945 461,934 430,912 404,880 377,848 356,804 341,748 326,691 318,618 318,528 318,451 324,387 337,334 350,281 368,238 393,205 417,172 447,149 483,135 519,120 560,113 607,113 657,113 699,123 732,142 765,161 791,189 811,226 830,263 844,308 852,361 860,414 864,474 864,542 Z"/>
+   <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 490,-20 422,-9 363,14 304,37 254,71 213,118 172,165 140,223 119,294 97,364 86,447 86,542 86,915 248,1102 571,1102 655,1102 728,1090 789,1067 850,1044 900,1009 939,962 978,915 1006,857 1025,787 1044,717 1053,635 1053,542 Z M 864,542 C 864,626 858,695 845,750 832,805 813,848 788,881 763,914 732,937 696,950 660,963 619,969 574,969 528,969 487,962 450,949 413,935 381,912 355,879 329,846 309,802 296,747 282,692 275,624 275,542 275,458 282,389 297,334 312,279 332,235 358,202 383,169 414,146 449,133 484,120 522,113 563,113 609,113 651,120 688,133 725,146 757,168 783,201 809,234 829,278 843,333 857,388 864,458 864,542 Z"/>
+   <glyph unicode="n" horiz-adv-x="900" d="M 825,0 L 825,686 C 825,739 821,783 814,818 806,853 793,882 776,904 759,925 736,941 708,950 679,959 644,963 602,963 559,963 521,956 487,941 452,926 423,904 399,876 374,847 355,812 342,771 329,729 322,681 322,627 L 322,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 334,928 353,957 374,982 395,1007 419,1029 446,1047 473,1064 505,1078 540,1088 575,1097 616,1102 663,1102 723,1102 775,1095 818,1080 861,1065 897,1043 925,1012 953,981 974,942 987,894 1000,845 1006,788 1006,721 L 1006,0 825,0 Z"/>
+   <glyph unicode="m" horiz-adv-x="1456" d="M 768,0 L 768,686 C 768,739 765,783 758,818 751,853 740,882 725,904 709,925 688,941 663,950 638,959 607,963 570,963 532,963 498,956 467,941 436,926 410,904 389,876 367,847 350,812 339,771 327,729 321,681 321,627 L 321,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 333,928 350,957 369,982 388,1007 410,1029 435,1047 460,1064 488,1078 521,1088 553,1097 590,1102 633,1102 715,1102 780,1086 828,1053 875,1020 908,968 927,897 L 930,897 C 946,928 964,957 984,982 1004,1007 1027,1029 1054,1047 1081,1064 1111,1078 1144,1088 1177,1097 1215,1102 1258,1102 1313,1102 1360,1095 1400,1080 1439,1065 1472,1043 1497,1012 1522,981 1541,942 1553,894 1565,845 1571,788 1571,721 L 1571,0 1393,0 1393,686 C 1393,739 1390,783 1383,818 1376,853 1365,882 1350,904 1334,925 1313,941 1288,950 1263,959 1232,963 1195,963 1157,963 1123,956 1092,942 1061,927 1035,906 1014,878 992,850 975,815 964,773 952,731 946,682 946,627 L 946,0 768,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="927" d="M 816,0 L 450,494 318,385 318,0 138,0 138,1484 318,1484 318,557 793,1082 1004,1082 565,617 1027,0 816,0 Z"/>
+   <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/>
+   <glyph unicode="h" horiz-adv-x="874" d="M 317,897 C 337,934 359,965 382,991 405,1016 431,1037 459,1054 487,1071 518,1083 551,1091 584,1098 622,1102 663,1102 732,1102 789,1093 834,1074 878,1055 913,1029 939,996 964,962 982,922 992,875 1001,828 1006,777 1006,721 L 1006,0 825,0 825,686 C 825,732 822,772 817,807 811,842 800,871 784,894 768,917 745,934 716,946 687,957 649,963 602,963 559,963 521,955 487,940 452,925 423,903 399,875 374,847 355,813 342,773 329,733 322,688 322,638 L 322,0 142,0 142,1484 322,1484 322,1098 C 322,1076 322,1054 321,1032 320,1010 320,990 319,971 318,952 317,937 316,924 315,911 315,902 314,897 L 317,897 Z"/>
+   <glyph unicode="g" horiz-adv-x="954" d="M 548,-425 C 486,-425 431,-419 383,-406 335,-393 294,-375 260,-352 226,-328 198,-300 177,-267 156,-234 140,-198 131,-158 L 312,-132 C 324,-182 351,-220 392,-248 433,-274 486,-288 553,-288 594,-288 631,-282 664,-271 697,-260 726,-241 749,-217 772,-191 790,-159 803,-119 816,-79 822,-30 822,27 L 822,201 820,201 C 807,174 790,148 771,123 751,98 727,75 699,56 670,37 637,21 600,10 563,-2 520,-8 472,-8 403,-8 345,4 296,27 247,50 207,84 176,130 145,176 122,233 108,302 93,370 86,449 86,539 86,626 93,704 108,773 122,842 145,901 178,950 210,998 252,1035 304,1061 355,1086 418,1099 492,1099 569,1099 635,1082 692,1047 748,1012 791,962 822,897 L 824,897 C 824,914 825,933 826,953 827,974 828,994 829,1012 830,1031 831,1046 832,1060 833,1073 835,1080 836,1080 L 1007,1080 C 1006,1074 1006,1064 1005,1050 1004,1035 1004,1018 1003,998 1002,978 1002,956 1002,932 1001,907 1001,882 1001,856 L 1001,30 C 1001,-121 964,-234 890,-311 815,-387 701,-425 548,-425 Z M 822,541 C 822,616 814,681 798,735 781,788 760,832 733,866 706,900 676,925 642,941 607,957 572,965 536,965 490,965 451,957 418,941 385,925 357,900 336,866 314,831 298,787 288,734 277,680 272,616 272,541 272,463 277,398 288,345 298,292 314,249 335,216 356,183 383,160 416,146 449,132 488,125 533,125 569,125 604,133 639,148 673,163 704,188 731,221 758,254 780,297 797,350 814,403 822,466 822,541 Z"/>
+   <glyph unicode="f" horiz-adv-x="557" d="M 361,951 L 361,0 181,0 181,951 29,951 29,1082 181,1082 181,1204 C 181,1243 185,1280 192,1314 199,1347 213,1377 233,1402 252,1427 279,1446 313,1461 347,1475 391,1482 445,1482 466,1482 489,1481 512,1479 535,1477 555,1474 572,1470 L 572,1333 C 561,1335 548,1337 533,1339 518,1340 504,1341 492,1341 465,1341 444,1337 427,1330 410,1323 396,1312 387,1299 377,1285 370,1268 367,1248 363,1228 361,1205 361,1179 L 361,1082 572,1082 572,951 361,951 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,446 282,394 294,347 305,299 323,258 348,224 372,189 403,163 441,144 479,125 525,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 1008,206 992,176 972,146 951,115 924,88 890,64 856,39 814,19 763,4 712,-12 650,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,649 100,735 125,806 150,876 185,933 229,977 273,1021 324,1053 383,1073 442,1092 504,1102 571,1102 662,1102 738,1087 799,1058 860,1029 909,988 946,937 983,885 1009,824 1025,754 1040,684 1048,608 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 538,969 507,964 474,955 441,945 410,928 382,903 354,878 330,845 311,803 292,760 281,706 278,641 L 862,641 Z"/>
+   <glyph unicode="d" horiz-adv-x="954" d="M 821,174 C 788,105 744,55 689,25 634,-5 565,-20 484,-20 347,-20 247,26 183,118 118,210 86,349 86,536 86,913 219,1102 484,1102 566,1102 634,1087 689,1057 744,1027 788,979 821,914 L 823,914 C 823,921 823,931 823,946 822,960 822,975 822,991 821,1006 821,1021 821,1035 821,1049 821,1059 821,1065 L 821,1484 1001,1484 1001,219 C 1001,193 1001,168 1002,143 1002,119 1002,97 1003,77 1004,57 1004,40 1005,26 1006,11 1006,4 1007,4 L 835,4 C 834,11 833,20 832,32 831,44 830,58 829,73 828,89 827,105 826,123 825,140 825,157 825,174 L 821,174 Z M 275,542 C 275,467 280,403 289,350 298,297 313,253 334,219 355,184 381,159 413,143 445,127 484,119 530,119 577,119 619,127 656,142 692,157 722,182 747,217 771,251 789,296 802,351 815,406 821,474 821,554 821,631 815,696 802,749 789,802 771,844 746,877 721,910 691,933 656,948 620,962 579,969 532,969 488,969 450,961 418,946 386,931 359,906 338,872 317,838 301,794 291,740 280,685 275,619 275,542 Z"/>
+   <glyph unicode="c" horiz-adv-x="875" d="M 275,546 C 275,484 280,427 289,375 298,323 313,278 334,241 355,203 384,174 419,153 454,132 497,122 548,122 612,122 666,139 709,173 752,206 778,258 788,328 L 970,328 C 964,283 951,239 931,197 911,155 884,118 850,86 815,54 773,28 724,9 675,-10 618,-20 553,-20 468,-20 396,-6 337,23 278,52 230,91 193,142 156,192 129,251 112,320 95,388 87,462 87,542 87,615 93,679 105,735 117,790 134,839 156,881 177,922 203,957 232,986 261,1014 293,1037 328,1054 362,1071 398,1083 436,1091 474,1098 512,1102 551,1102 612,1102 666,1094 713,1077 760,1060 801,1038 836,1009 870,980 898,945 919,906 940,867 955,824 964,779 L 779,765 C 770,825 746,873 708,908 670,943 616,961 546,961 495,961 452,953 418,936 383,919 355,893 334,859 313,824 298,781 289,729 280,677 275,616 275,546 Z"/>
+   <glyph unicode="b" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 573,-20 505,-5 451,25 396,54 352,102 318,168 L 316,168 C 316,150 316,132 315,113 314,94 313,77 312,61 311,45 310,31 309,19 308,8 307,2 306,2 L 132,2 C 133,8 133,18 134,32 135,47 135,64 136,84 137,104 137,126 138,150 138,174 138,199 138,225 L 138,1484 318,1484 318,1061 C 318,1041 318,1022 318,1004 317,985 317,969 316,955 315,938 315,923 314,908 L 318,908 C 351,977 396,1027 451,1057 506,1087 574,1102 655,1102 792,1102 892,1056 957,964 1021,872 1053,733 1053,546 Z M 864,540 C 864,615 859,679 850,732 841,785 826,829 805,864 784,898 758,923 726,939 694,955 655,963 609,963 562,963 520,955 484,940 447,925 417,900 393,866 368,832 350,787 337,732 324,677 318,609 318,529 318,452 324,387 337,334 350,281 368,239 393,206 417,173 447,149 483,135 519,120 560,113 607,113 651,113 689,121 721,136 753,151 780,176 801,210 822,244 838,288 849,343 859,397 864,463 864,540 Z"/>
+   <glyph unicode="a" horiz-adv-x="1060" d="M 414,-20 C 305,-20 224,9 169,66 114,124 87,203 87,303 87,375 101,434 128,480 155,526 190,562 234,588 277,614 327,632 383,642 439,652 496,657 554,657 L 797,657 797,717 C 797,762 792,800 783,832 774,863 759,889 740,908 721,928 697,942 668,951 639,960 604,965 565,965 530,965 499,963 471,958 443,953 419,944 398,931 377,918 361,900 348,878 335,855 327,827 323,793 L 135,810 C 142,853 154,892 173,928 192,963 218,994 253,1020 287,1046 330,1066 382,1081 433,1095 496,1102 569,1102 705,1102 807,1071 876,1009 945,946 979,856 979,738 L 979,272 C 979,219 986,179 1000,152 1014,125 1041,111 1080,111 1090,111 1100,112 1110,113 1120,114 1130,116 1139,118 L 1139,6 C 1116,1 1094,-3 1072,-6 1049,-9 1025,-10 1000,-10 966,-10 937,-5 913,4 888,13 868,26 853,45 838,63 826,86 818,113 810,140 805,171 803,207 L 797,207 C 778,172 757,141 734,113 711,85 684,61 653,42 622,22 588,7 549,-4 510,-15 465,-20 414,-20 Z M 455,115 C 512,115 563,125 606,146 649,167 684,194 713,226 741,259 762,294 776,332 790,371 797,408 797,443 L 797,531 600,531 C 556,531 514,528 475,522 435,517 400,506 370,489 340,472 316,449 299,418 281,388 272,349 272,300 272,241 288,195 320,163 351,131 396,115 455,115 Z"/>
+   <glyph unicode="S" horiz-adv-x="1139" d="M 1272,389 C 1272,330 1261,275 1238,225 1215,175 1179,132 1131,96 1083,59 1023,31 950,11 877,-10 790,-20 690,-20 515,-20 378,11 280,72 182,133 120,222 93,338 L 278,375 C 287,338 302,305 321,275 340,245 367,219 400,198 433,176 473,159 522,147 571,135 629,129 697,129 754,129 806,134 853,144 900,153 941,168 975,188 1009,208 1036,234 1055,266 1074,297 1083,335 1083,379 1083,425 1073,462 1052,491 1031,520 1001,543 963,562 925,581 880,596 827,609 774,622 716,635 652,650 613,659 573,668 534,679 494,689 456,701 420,716 383,730 349,747 317,766 285,785 257,809 234,836 211,863 192,894 179,930 166,965 159,1006 159,1053 159,1120 173,1177 200,1225 227,1272 264,1311 312,1342 360,1373 417,1395 482,1409 547,1423 618,1430 694,1430 781,1430 856,1423 918,1410 980,1396 1032,1375 1075,1348 1118,1321 1152,1287 1178,1247 1203,1206 1224,1159 1239,1106 L 1051,1073 C 1042,1107 1028,1137 1011,1164 993,1191 970,1213 941,1231 912,1249 878,1263 837,1272 796,1281 747,1286 692,1286 627,1286 572,1280 528,1269 483,1257 448,1241 421,1221 394,1201 374,1178 363,1151 351,1124 345,1094 345,1063 345,1021 356,987 377,960 398,933 426,910 462,892 498,874 540,859 587,847 634,835 685,823 738,811 781,801 825,791 868,781 911,770 952,758 991,744 1030,729 1067,712 1102,693 1136,674 1166,650 1191,622 1216,594 1236,561 1251,523 1265,485 1272,440 1272,389 Z"/>
+   <glyph unicode="R" horiz-adv-x="1218" d="M 1164,0 L 798,585 359,585 359,0 168,0 168,1409 831,1409 C 911,1409 982,1400 1044,1382 1105,1363 1157,1337 1199,1302 1241,1267 1273,1225 1295,1175 1317,1125 1328,1069 1328,1006 1328,961 1322,917 1309,874 1296,831 1275,791 1247,755 1219,719 1183,688 1140,662 1097,636 1045,618 984,607 L 1384,0 1164,0 Z M 1136,1004 C 1136,1047 1129,1084 1114,1115 1099,1146 1078,1173 1050,1194 1022,1215 988,1230 948,1241 908,1251 863,1256 812,1256 L 359,1256 359,736 820,736 C 875,736 922,743 962,757 1002,770 1035,789 1061,813 1086,837 1105,865 1118,898 1130,931 1136,966 1136,1004 Z"/>
+   <glyph unicode="P" horiz-adv-x="1086" d="M 1258,985 C 1258,924 1248,867 1228,814 1207,761 1177,715 1137,676 1096,637 1046,606 985,583 924,560 854,549 773,549 L 359,549 359,0 168,0 168,1409 761,1409 C 844,1409 917,1399 979,1379 1041,1358 1093,1330 1134,1293 1175,1256 1206,1211 1227,1159 1248,1106 1258,1048 1258,985 Z M 1066,983 C 1066,1072 1039,1140 984,1187 929,1233 847,1256 738,1256 L 359,1256 359,700 746,700 C 856,700 937,724 989,773 1040,822 1066,892 1066,983 Z"/>
+   <glyph unicode="I" horiz-adv-x="186" d="M 189,0 L 189,1409 380,1409 380,0 189,0 Z"/>
+   <glyph unicode="H" horiz-adv-x="1165" d="M 1121,0 L 1121,653 359,653 359,0 168,0 168,1409 359,1409 359,813 1121,813 1121,1409 1312,1409 1312,0 1121,0 Z"/>
+   <glyph unicode="C" horiz-adv-x="1297" d="M 792,1274 C 712,1274 641,1261 580,1234 518,1207 466,1169 425,1120 383,1071 351,1011 330,942 309,873 298,796 298,711 298,626 310,549 333,479 356,408 389,348 432,297 475,246 527,207 590,179 652,151 722,137 800,137 855,137 905,144 950,159 995,173 1035,193 1072,219 1108,245 1140,276 1169,312 1198,347 1223,387 1245,430 L 1401,352 C 1376,299 1344,250 1307,205 1270,160 1226,120 1176,87 1125,54 1068,28 1005,9 941,-10 870,-20 791,-20 677,-20 577,-2 492,35 406,71 334,122 277,187 219,252 176,329 147,418 118,507 104,605 104,711 104,821 119,920 150,1009 180,1098 224,1173 283,1236 341,1298 413,1346 498,1380 583,1413 681,1430 790,1430 940,1430 1065,1401 1166,1342 1267,1283 1341,1196 1388,1081 L 1207,1021 C 1194,1054 1176,1086 1153,1117 1130,1147 1102,1174 1068,1197 1034,1220 994,1239 949,1253 903,1267 851,1274 792,1274 Z"/>
+   <glyph unicode="B" horiz-adv-x="1086" d="M 1258,397 C 1258,326 1244,265 1216,215 1188,164 1150,123 1103,92 1056,60 1001,37 938,22 875,7 809,0 740,0 L 168,0 168,1409 680,1409 C 758,1409 828,1403 889,1390 950,1377 1002,1356 1045,1328 1088,1300 1120,1265 1143,1222 1165,1179 1176,1127 1176,1067 1176,1028 1171,991 1160,956 1149,921 1132,890 1110,862 1087,833 1059,809 1026,789 992,768 953,753 908,743 965,736 1015,723 1059,704 1102,685 1139,660 1168,630 1197,600 1220,565 1235,526 1250,486 1258,443 1258,397 Z M 984,1044 C 984,1120 958,1174 906,1207 854,1240 779,1256 680,1256 L 359,1256 359,810 680,810 C 736,810 783,816 822,827 861,838 892,853 916,874 940,894 957,918 968,947 979,976 984,1008 984,1044 Z M 1065,412 C 1065,457 1057,495 1041,526 1024,557 1001,583 970,603 939,623 903,638 860,647 817,656 768,661 715,661 L 359,661 359,153 730,153 C 779,153 824,157 865,165 906,173 941,187 971,207 1000,227 1023,254 1040,287 1057,320 1065,362 1065,412 Z"/>
+   <glyph unicode="A" horiz-adv-x="1350" d="M 1167,0 L 1006,412 364,412 202,0 4,0 579,1409 796,1409 1362,0 1167,0 Z M 768,1026 C 757,1053 747,1080 738,1107 728,1134 719,1159 712,1182 705,1204 699,1223 694,1238 689,1253 686,1262 685,1265 684,1262 681,1252 676,1237 671,1222 665,1203 658,1180 650,1157 641,1132 632,1105 622,1078 612,1051 602,1024 L 422,561 949,561 768,1026 Z"/>
+   <glyph unicode="4" horiz-adv-x="1033" d="M 881,319 L 881,0 711,0 711,319 47,319 47,459 692,1409 881,1409 881,461 1079,461 1079,319 881,319 Z M 711,1206 C 710,1203 706,1196 701,1186 696,1177 690,1166 683,1154 676,1142 670,1129 663,1117 656,1105 649,1094 644,1086 L 283,551 C 280,546 275,539 269,530 262,522 256,513 249,504 242,495 236,486 229,477 222,468 217,464 213,464 L 711,464 711,1206 Z"/>
+   <glyph unicode="3" horiz-adv-x="980" d="M 1049,389 C 1049,324 1039,267 1018,216 997,165 966,123 926,88 885,53 835,26 776,8 716,-11 648,-20 571,-20 484,-20 410,-9 351,13 291,34 242,63 203,99 164,134 135,175 116,221 97,266 84,313 78,362 L 264,379 C 269,342 279,308 294,277 308,246 327,220 352,198 377,176 407,159 443,147 479,135 522,129 571,129 662,129 733,151 785,196 836,241 862,307 862,395 862,447 851,489 828,521 805,552 776,577 742,595 707,612 670,624 630,630 589,636 552,639 518,639 L 416,639 416,795 514,795 C 548,795 583,799 620,806 657,813 690,825 721,844 751,862 776,887 796,918 815,949 825,989 825,1038 825,1113 803,1173 759,1217 714,1260 648,1282 561,1282 482,1282 418,1262 369,1221 320,1180 291,1123 283,1049 L 102,1063 C 109,1125 126,1179 153,1225 180,1271 214,1309 255,1340 296,1370 342,1393 395,1408 448,1423 504,1430 563,1430 642,1430 709,1420 766,1401 823,1381 869,1354 905,1321 941,1287 968,1247 985,1202 1002,1157 1010,1108 1010,1057 1010,1016 1004,977 993,941 982,905 964,873 940,844 916,815 886,791 849,770 812,749 767,734 715,723 L 715,719 C 772,713 821,700 863,681 905,661 940,636 967,607 994,578 1015,544 1029,507 1042,470 1049,430 1049,389 Z"/>
+   <glyph unicode="2" horiz-adv-x="927" d="M 103,0 L 103,127 C 137,205 179,274 228,334 277,393 328,447 382,496 436,544 490,589 543,630 596,671 643,713 686,754 729,795 763,839 790,884 816,929 829,981 829,1038 829,1078 823,1113 811,1144 799,1174 782,1199 759,1220 736,1241 709,1256 678,1267 646,1277 611,1282 572,1282 536,1282 502,1277 471,1267 439,1257 411,1242 386,1222 361,1202 341,1177 326,1148 310,1118 300,1083 295,1044 L 111,1061 C 117,1112 131,1159 153,1204 175,1249 205,1288 244,1322 283,1355 329,1382 384,1401 438,1420 501,1430 572,1430 642,1430 704,1422 759,1405 814,1388 860,1364 898,1331 935,1298 964,1258 984,1210 1004,1162 1014,1107 1014,1044 1014,997 1006,952 989,909 972,866 949,826 921,787 892,748 859,711 822,675 785,639 746,604 705,570 664,535 623,501 582,468 541,434 502,400 466,366 429,332 397,298 368,263 339,228 317,191 301,153 L 1036,153 1036,0 103,0 Z"/>
+   <glyph unicode="1" horiz-adv-x="874" d="M 156,0 L 156,153 515,153 515,1237 197,1010 197,1180 530,1409 696,1409 696,153 1039,153 1039,0 156,0 Z"/>
+   <glyph unicode="0" horiz-adv-x="980" d="M 1059,705 C 1059,570 1046,456 1021,364 995,271 960,197 916,140 871,83 819,42 759,17 699,-8 635,-20 567,-20 498,-20 434,-8 375,17 316,42 264,82 221,139 177,196 143,270 118,363 93,455 80,569 80,705 80,847 93,965 118,1058 143,1151 177,1225 221,1280 265,1335 317,1374 377,1397 437,1419 502,1430 573,1430 640,1430 704,1419 763,1397 822,1374 873,1335 917,1280 961,1225 996,1151 1021,1058 1046,965 1059,847 1059,705 Z M 876,705 C 876,817 869,910 856,985 843,1059 823,1118 797,1163 771,1207 739,1238 702,1257 664,1275 621,1284 573,1284 522,1284 478,1275 439,1256 400,1237 368,1206 342,1162 315,1117 295,1058 282,984 269,909 262,816 262,705 262,597 269,506 283,432 296,358 316,299 343,254 369,209 401,176 439,157 477,137 520,127 569,127 616,127 659,137 697,157 735,176 767,209 794,254 820,299 840,358 855,432 869,506 876,597 876,705 Z"/>
+   <glyph unicode="." horiz-adv-x="186" d="M 187,0 L 187,219 382,219 382,0 187,0 Z"/>
+   <glyph unicode="-" horiz-adv-x="504" d="M 91,464 L 91,624 591,624 591,464 91,464 Z"/>
+   <glyph unicode="," horiz-adv-x="212" d="M 385,219 L 385,51 C 385,16 384,-16 381,-46 378,-74 373,-101 366,-127 359,-151 351,-175 342,-197 332,-219 320,-241 307,-262 L 184,-262 C 214,-219 237,-175 254,-131 270,-87 278,-43 278,0 L 190,0 190,219 385,219 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs class="TextShapeIndex">
+  <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18 id19 id20 id21 id22 id23"/>
+ </defs>
+ <defs class="EmbeddedBulletChars">
+  <g id="bullet-char-template(57356)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
+  </g>
+  <g id="bullet-char-template(57354)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
+  </g>
+  <g id="bullet-char-template(10146)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10132)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10007)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
+  </g>
+  <g id="bullet-char-template(10004)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
+  </g>
+  <g id="bullet-char-template(9679)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
+  </g>
+  <g id="bullet-char-template(8226)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
+  </g>
+  <g id="bullet-char-template(8211)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
+  </g>
+  <g id="bullet-char-template(61548)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
+  </g>
+ </defs>
+ <defs class="TextEmbeddedBitmaps"/>
+ <g>
+  <g id="id2" class="Master_Slide">
+   <g id="bg-id2" class="Background"/>
+   <g id="bo-id2" class="BackgroundObjects"/>
+  </g>
+ </g>
+ <g class="SlideGroup">
+  <g>
+   <g id="container-id1">
+    <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
+     <g class="Page">
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id3">
+        <rect class="BoundingBox" stroke="none" fill="none" x="11667" y="3539" width="2289" height="1019"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 12811,4556 L 11668,4556 11668,3540 13954,3540 13954,4556 12811,4556 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12811,4556 L 11668,4556 11668,3540 13954,3540 13954,4556 12811,4556 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="12073" y="4269"><tspan fill="rgb(0,0,0)" stroke="none">client</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id4">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7348" y="6841" width="5718" height="1781"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 10207,8620 L 7349,8620 7349,6842 13064,6842 13064,8620 10207,8620 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10207,8620 L 7349,8620 7349,6842 13064,6842 13064,8620 10207,8620 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="7673" y="7952"><tspan fill="rgb(0,0,0)" stroke="none">arvados-controller</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id5">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14587" y="6205" width="2925" height="2671"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 16049,6206 C 15252,6206 14588,6357 14588,6539 L 14588,8540 C 14588,8722 15252,8874 16049,8874 16845,8874 17510,8722 17510,8540 L 17510,6539 C 17510,6357 16845,6206 16049,6206 L 16049,6206 Z M 14588,6206 L 14588,6206 Z M 17510,8874 L 17510,8874 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16049,6206 C 15252,6206 14588,6357 14588,6539 L 14588,8540 C 14588,8722 15252,8874 16049,8874 16845,8874 17510,8722 17510,8540 L 17510,6539 C 17510,6357 16845,6206 16049,6206 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14588,6206 L 14588,6206 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17510,8874 L 17510,8874 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 16049,6206 C 15252,6206 14588,6357 14588,6539 14588,6721 15252,6873 16049,6873 16845,6873 17510,6721 17510,6539 17510,6357 16845,6206 16049,6206 L 16049,6206 Z M 14588,6206 L 14588,6206 Z M 17510,8874 L 17510,8874 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16049,6206 C 15252,6206 14588,6357 14588,6539 14588,6721 15252,6873 16049,6873 16845,6873 17510,6721 17510,6539 17510,6357 16845,6206 16049,6206 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14588,6206 L 14588,6206 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17510,8874 L 17510,8874 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="15169" y="7572"><tspan fill="rgb(0,0,0)" stroke="none">home </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="14747" y="8283"><tspan fill="rgb(0,0,0)" stroke="none">database</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id6">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1379" y="12047" width="5464" height="1654"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 4111,13699 L 1380,13699 1380,12048 6841,12048 6841,13699 4111,13699 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4111,13699 L 1380,13699 1380,12048 6841,12048 6841,13699 4111,13699 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1577" y="13094"><tspan fill="rgb(0,0,0)" stroke="none">arvados-controller</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id7">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7602" y="11032" width="3687" height="2798"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 9445,11033 C 8440,11033 7603,11191 7603,11382 L 7603,13478 C 7603,13669 8440,13828 9445,13828 10449,13828 11287,13669 11287,13478 L 11287,11382 C 11287,11191 10449,11033 9445,11033 L 9445,11033 Z M 7603,11033 L 7603,11033 Z M 11287,13828 L 11287,13828 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9445,11033 C 8440,11033 7603,11191 7603,11382 L 7603,13478 C 7603,13669 8440,13828 9445,13828 10449,13828 11287,13669 11287,13478 L 11287,11382 C 11287,11191 10449,11033 9445,11033 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7603,11033 L 7603,11033 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11287,13828 L 11287,13828 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 9445,11033 C 8440,11033 7603,11191 7603,11382 7603,11572 8440,11731 9445,11731 10449,11731 11287,11572 11287,11382 11287,11191 10449,11033 9445,11033 L 9445,11033 Z M 7603,11033 L 7603,11033 Z M 11287,13828 L 11287,13828 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9445,11033 C 8440,11033 7603,11191 7603,11382 7603,11572 8440,11731 9445,11731 10449,11731 11287,11572 11287,11382 11287,11191 10449,11033 9445,11033 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7603,11033 L 7603,11033 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11287,13828 L 11287,13828 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="8459" y="12470"><tspan fill="rgb(0,0,0)" stroke="none">remote</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="8143" y="13181"><tspan fill="rgb(0,0,0)" stroke="none">database</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id8">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13063" y="11920" width="5464" height="1654"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 15795,13572 L 13064,13572 13064,11921 18525,11921 18525,13572 15795,13572 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15795,13572 L 13064,13572 13064,11921 18525,11921 18525,13572 15795,13572 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="13261" y="12967"><tspan fill="rgb(0,0,0)" stroke="none">arvados-controller</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id9">
+        <rect class="BoundingBox" stroke="none" fill="none" x="19286" y="10777" width="3687" height="2925"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 21129,10778 C 20124,10778 19287,10944 19287,11143 L 19287,13334 C 19287,13533 20124,13700 21129,13700 22133,13700 22971,13533 22971,13334 L 22971,11143 C 22971,10944 22133,10778 21129,10778 L 21129,10778 Z M 19287,10778 L 19287,10778 Z M 22971,13700 L 22971,13700 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21129,10778 C 20124,10778 19287,10944 19287,11143 L 19287,13334 C 19287,13533 20124,13700 21129,13700 22133,13700 22971,13533 22971,13334 L 22971,11143 C 22971,10944 22133,10778 21129,10778 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19287,10778 L 19287,10778 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22971,13700 L 22971,13700 Z"/>
+        <path fill="rgb(165,195,226)" stroke="none" d="M 21129,10778 C 20124,10778 19287,10944 19287,11143 19287,11342 20124,11508 21129,11508 22133,11508 22971,11342 22971,11143 22971,10944 22133,10778 21129,10778 L 21129,10778 Z M 19287,10778 L 19287,10778 Z M 22971,13700 L 22971,13700 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21129,10778 C 20124,10778 19287,10944 19287,11143 19287,11342 20124,11508 21129,11508 22133,11508 22971,11342 22971,11143 22971,10944 22133,10778 21129,10778 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19287,10778 L 19287,10778 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22971,13700 L 22971,13700 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="20143" y="12286"><tspan fill="rgb(0,0,0)" stroke="none">remote</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="19827" y="12997"><tspan fill="rgb(0,0,0)" stroke="none">database</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id10">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1126" y="10650" width="10417" height="3686"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6376,14334 L 6334,14334 6283,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6232,14334 L 6181,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6130,14334 L 6079,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6028,14334 L 5977,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5926,14334 L 5875,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5824,14334 L 5773,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5722,14334 L 5671,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5620,14334 L 5569,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5518,14334 L 5467,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5416,14334 L 5365,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5314,14334 L 5263,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5212,14334 L 5161,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5110,14334 L 5059,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5008,14334 L 4957,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4906,14334 L 4855,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4804,14334 L 4753,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4702,14334 L 4651,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4600,14334 L 4549,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4498,14334 L 4447,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4396,14334 L 4345,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4294,14334 L 4243,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4192,14334 L 4141,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4090,14334 L 4039,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3988,14334 L 3937,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3886,14334 L 3835,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3784,14334 L 3733,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3682,14334 L 3631,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3580,14334 L 3529,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3478,14334 L 3427,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3376,14334 L 3325,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3274,14334 L 3223,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3172,14334 L 3121,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3070,14334 L 3019,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2968,14334 L 2917,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2866,14334 L 2815,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2764,14334 L 2713,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2662,14334 L 2611,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2560,14334 L 2509,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2458,14334 L 2407,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2356,14334 L 2305,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2254,14334 L 2203,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2152,14334 L 2101,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2050,14334 L 1999,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1948,14334 L 1897,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1846,14334 L 1795,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1744,14334 L 1693,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1642,14334 L 1591,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1540,14334 L 1489,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1438,14334 L 1387,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1336,14334 L 1285,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1234,14334 L 1183,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1132,14334 L 1127,14334 1127,14288"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,14237 L 1127,14186"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,14135 L 1127,14084"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,14033 L 1127,13982"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13931 L 1127,13880"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13829 L 1127,13778"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13727 L 1127,13676"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13625 L 1127,13574"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13523 L 1127,13472"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13421 L 1127,13370"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13319 L 1127,13268"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13217 L 1127,13166"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13115 L 1127,13064"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,13013 L 1127,12962"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12911 L 1127,12860"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12809 L 1127,12758"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12707 L 1127,12656"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12605 L 1127,12554"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12503 L 1127,12452"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12401 L 1127,12350"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12299 L 1127,12248"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12197 L 1127,12146"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,12095 L 1127,12044"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11993 L 1127,11942"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11891 L 1127,11840"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11789 L 1127,11738"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11687 L 1127,11636"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11585 L 1127,11534"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11483 L 1127,11432"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11381 L 1127,11330"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11279 L 1127,11228"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11177 L 1127,11126"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,11075 L 1127,11024"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,10973 L 1127,10922"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,10871 L 1127,10820"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,10769 L 1127,10718"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1127,10667 L 1127,10651 1162,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1213,10651 L 1264,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1315,10651 L 1366,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1417,10651 L 1468,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1519,10651 L 1570,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1621,10651 L 1672,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1723,10651 L 1774,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1825,10651 L 1876,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 1927,10651 L 1978,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2029,10651 L 2080,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2131,10651 L 2182,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2233,10651 L 2284,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2335,10651 L 2386,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2437,10651 L 2488,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2539,10651 L 2590,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2641,10651 L 2692,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2743,10651 L 2794,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2845,10651 L 2896,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 2947,10651 L 2998,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3049,10651 L 3100,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3151,10651 L 3202,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3253,10651 L 3304,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3355,10651 L 3406,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3457,10651 L 3508,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3559,10651 L 3610,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3661,10651 L 3712,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3763,10651 L 3814,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3865,10651 L 3916,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 3967,10651 L 4018,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4069,10651 L 4120,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4171,10651 L 4222,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4273,10651 L 4324,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4375,10651 L 4426,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4477,10651 L 4528,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4579,10651 L 4630,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4681,10651 L 4732,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4783,10651 L 4834,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4885,10651 L 4936,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4987,10651 L 5038,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5089,10651 L 5140,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5191,10651 L 5242,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5293,10651 L 5344,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5395,10651 L 5446,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5497,10651 L 5548,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5599,10651 L 5650,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5701,10651 L 5752,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5803,10651 L 5854,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5905,10651 L 5956,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6007,10651 L 6058,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6109,10651 L 6160,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6211,10651 L 6262,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6313,10651 L 6364,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6415,10651 L 6466,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6517,10651 L 6568,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6619,10651 L 6670,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6721,10651 L 6772,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6823,10651 L 6874,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6925,10651 L 6976,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7027,10651 L 7078,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7129,10651 L 7180,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7231,10651 L 7282,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7333,10651 L 7384,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7435,10651 L 7486,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7537,10651 L 7588,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7639,10651 L 7690,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7741,10651 L 7792,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7843,10651 L 7894,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7945,10651 L 7996,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8047,10651 L 8098,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8149,10651 L 8200,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8251,10651 L 8302,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8353,10651 L 8404,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8455,10651 L 8506,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8557,10651 L 8608,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8659,10651 L 8710,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8761,10651 L 8812,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8863,10651 L 8914,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8965,10651 L 9016,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9067,10651 L 9118,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9169,10651 L 9220,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9271,10651 L 9322,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9373,10651 L 9424,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9475,10651 L 9526,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9577,10651 L 9628,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9679,10651 L 9730,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9781,10651 L 9832,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9883,10651 L 9934,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9985,10651 L 10036,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10087,10651 L 10138,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10189,10651 L 10240,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10291,10651 L 10342,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10393,10651 L 10444,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10495,10651 L 10546,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10597,10651 L 10648,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10699,10651 L 10750,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10801,10651 L 10852,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10903,10651 L 10954,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11005,10651 L 11056,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11107,10651 L 11158,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11209,10651 L 11260,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11311,10651 L 11362,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11413,10651 L 11464,10651"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11515,10651 L 11541,10651 11541,10676"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,10727 L 11541,10778"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,10829 L 11541,10880"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,10931 L 11541,10982"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11033 L 11541,11084"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11135 L 11541,11186"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11237 L 11541,11288"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11339 L 11541,11390"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11441 L 11541,11492"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11543 L 11541,11594"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11645 L 11541,11696"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11747 L 11541,11798"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11849 L 11541,11900"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,11951 L 11541,12002"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12053 L 11541,12104"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12155 L 11541,12206"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12257 L 11541,12308"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12359 L 11541,12410"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12461 L 11541,12512"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12563 L 11541,12614"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12665 L 11541,12716"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12767 L 11541,12818"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12869 L 11541,12920"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,12971 L 11541,13022"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13073 L 11541,13124"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13175 L 11541,13226"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13277 L 11541,13328"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13379 L 11541,13430"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13481 L 11541,13532"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13583 L 11541,13634"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13685 L 11541,13736"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13787 L 11541,13838"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13889 L 11541,13940"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,13991 L 11541,14042"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,14093 L 11541,14144"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,14195 L 11541,14246"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11541,14297 L 11541,14334 11527,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11476,14334 L 11425,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11374,14334 L 11323,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11272,14334 L 11221,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11170,14334 L 11119,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11068,14334 L 11017,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10966,14334 L 10915,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10864,14334 L 10813,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10762,14334 L 10711,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10660,14334 L 10609,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10558,14334 L 10507,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10456,14334 L 10405,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10354,14334 L 10303,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10252,14334 L 10201,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10150,14334 L 10099,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10048,14334 L 9997,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9946,14334 L 9895,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9844,14334 L 9793,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9742,14334 L 9691,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9640,14334 L 9589,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9538,14334 L 9487,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9436,14334 L 9385,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9334,14334 L 9283,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9232,14334 L 9181,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9130,14334 L 9079,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9028,14334 L 8977,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8926,14334 L 8875,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8824,14334 L 8773,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8722,14334 L 8671,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8620,14334 L 8569,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8518,14334 L 8467,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8416,14334 L 8365,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8314,14334 L 8263,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8212,14334 L 8161,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8110,14334 L 8059,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8008,14334 L 7957,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7906,14334 L 7855,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7804,14334 L 7753,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7702,14334 L 7651,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7600,14334 L 7549,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7498,14334 L 7447,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7396,14334 L 7345,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7294,14334 L 7243,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7192,14334 L 7141,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7090,14334 L 7039,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6988,14334 L 6937,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6886,14334 L 6835,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6784,14334 L 6733,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6682,14334 L 6631,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6580,14334 L 6529,14334"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 6478,14334 L 6427,14334"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1377" y="11352"><tspan fill="rgb(0,0,0)" stroke="none">Cluster A</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id11">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12682" y="10651" width="10544" height="3559"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17996,14208 L 17954,14208 17903,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17852,14208 L 17801,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17750,14208 L 17699,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17648,14208 L 17597,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17546,14208 L 17495,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17444,14208 L 17393,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17342,14208 L 17291,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17240,14208 L 17189,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17138,14208 L 17087,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17036,14208 L 16985,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16934,14208 L 16883,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16832,14208 L 16781,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16730,14208 L 16679,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16628,14208 L 16577,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16526,14208 L 16475,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16424,14208 L 16373,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16322,14208 L 16271,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16220,14208 L 16169,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16118,14208 L 16067,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16016,14208 L 15965,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15914,14208 L 15863,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15812,14208 L 15761,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15710,14208 L 15659,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15608,14208 L 15557,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15506,14208 L 15455,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15404,14208 L 15353,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15302,14208 L 15251,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15200,14208 L 15149,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15098,14208 L 15047,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14996,14208 L 14945,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14894,14208 L 14843,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14792,14208 L 14741,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14690,14208 L 14639,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14588,14208 L 14537,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14486,14208 L 14435,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14384,14208 L 14333,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14282,14208 L 14231,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14180,14208 L 14129,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14078,14208 L 14027,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13976,14208 L 13925,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13874,14208 L 13823,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13772,14208 L 13721,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13670,14208 L 13619,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13568,14208 L 13517,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13466,14208 L 13415,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13364,14208 L 13313,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13262,14208 L 13211,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13160,14208 L 13109,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13058,14208 L 13007,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12956,14208 L 12905,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12854,14208 L 12803,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12752,14208 L 12701,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,14175 L 12683,14124"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,14073 L 12683,14022"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13971 L 12683,13920"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13869 L 12683,13818"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13767 L 12683,13716"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13665 L 12683,13614"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13563 L 12683,13512"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13461 L 12683,13410"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13359 L 12683,13308"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13257 L 12683,13206"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13155 L 12683,13104"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,13053 L 12683,13002"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12951 L 12683,12900"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12849 L 12683,12798"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12747 L 12683,12696"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12645 L 12683,12594"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12543 L 12683,12492"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12441 L 12683,12390"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12339 L 12683,12288"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12237 L 12683,12186"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12135 L 12683,12084"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,12033 L 12683,11982"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11931 L 12683,11880"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11829 L 12683,11778"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11727 L 12683,11676"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11625 L 12683,11574"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11523 L 12683,11472"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11421 L 12683,11370"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11319 L 12683,11268"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11217 L 12683,11166"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11115 L 12683,11064"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,11013 L 12683,10962"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,10911 L 12683,10860"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,10809 L 12683,10758"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12683,10707 L 12683,10656"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12731,10652 L 12782,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12833,10652 L 12884,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12935,10652 L 12986,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13037,10652 L 13088,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13139,10652 L 13190,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13241,10652 L 13292,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13343,10652 L 13394,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13445,10652 L 13496,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13547,10652 L 13598,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13649,10652 L 13700,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13751,10652 L 13802,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13853,10652 L 13904,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13955,10652 L 14006,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14057,10652 L 14108,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14159,10652 L 14210,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14261,10652 L 14312,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14363,10652 L 14414,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14465,10652 L 14516,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14567,10652 L 14618,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14669,10652 L 14720,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14771,10652 L 14822,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14873,10652 L 14924,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14975,10652 L 15026,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15077,10652 L 15128,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15179,10652 L 15230,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15281,10652 L 15332,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15383,10652 L 15434,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15485,10652 L 15536,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15587,10652 L 15638,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15689,10652 L 15740,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15791,10652 L 15842,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15893,10652 L 15944,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15995,10652 L 16046,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16097,10652 L 16148,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16199,10652 L 16250,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16301,10652 L 16352,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16403,10652 L 16454,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16505,10652 L 16556,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16607,10652 L 16658,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16709,10652 L 16760,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16811,10652 L 16862,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16913,10652 L 16964,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17015,10652 L 17066,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17117,10652 L 17168,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17219,10652 L 17270,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17321,10652 L 17372,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17423,10652 L 17474,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17525,10652 L 17576,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17627,10652 L 17678,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17729,10652 L 17780,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17831,10652 L 17882,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17933,10652 L 17984,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18035,10652 L 18086,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18137,10652 L 18188,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18239,10652 L 18290,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18341,10652 L 18392,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18443,10652 L 18494,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18545,10652 L 18596,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18647,10652 L 18698,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18749,10652 L 18800,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18851,10652 L 18902,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18953,10652 L 19004,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19055,10652 L 19106,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19157,10652 L 19208,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19259,10652 L 19310,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19361,10652 L 19412,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19463,10652 L 19514,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19565,10652 L 19616,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19667,10652 L 19718,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19769,10652 L 19820,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19871,10652 L 19922,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19973,10652 L 20024,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20075,10652 L 20126,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20177,10652 L 20228,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20279,10652 L 20330,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20381,10652 L 20432,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20483,10652 L 20534,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20585,10652 L 20636,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20687,10652 L 20738,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20789,10652 L 20840,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20891,10652 L 20942,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20993,10652 L 21044,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21095,10652 L 21146,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21197,10652 L 21248,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21299,10652 L 21350,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21401,10652 L 21452,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21503,10652 L 21554,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21605,10652 L 21656,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21707,10652 L 21758,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21809,10652 L 21860,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21911,10652 L 21962,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22013,10652 L 22064,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22115,10652 L 22166,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22217,10652 L 22268,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22319,10652 L 22370,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22421,10652 L 22472,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22523,10652 L 22574,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22625,10652 L 22676,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22727,10652 L 22778,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22829,10652 L 22880,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22931,10652 L 22982,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23033,10652 L 23084,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23135,10652 L 23186,10652"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,10665 L 23224,10716"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,10767 L 23224,10818"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,10869 L 23224,10920"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,10971 L 23224,11022"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11073 L 23224,11124"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11175 L 23224,11226"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11277 L 23224,11328"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11379 L 23224,11430"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11481 L 23224,11532"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11583 L 23224,11634"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11685 L 23224,11736"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11787 L 23224,11838"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11889 L 23224,11940"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,11991 L 23224,12042"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12093 L 23224,12144"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12195 L 23224,12246"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12297 L 23224,12348"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12399 L 23224,12450"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12501 L 23224,12552"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12603 L 23224,12654"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12705 L 23224,12756"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12807 L 23224,12858"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,12909 L 23224,12960"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13011 L 23224,13062"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13113 L 23224,13164"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13215 L 23224,13266"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13317 L 23224,13368"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13419 L 23224,13470"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13521 L 23224,13572"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13623 L 23224,13674"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13725 L 23224,13776"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13827 L 23224,13878"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,13929 L 23224,13980"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,14031 L 23224,14082"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23224,14133 L 23224,14184"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23198,14208 L 23147,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 23096,14208 L 23045,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22994,14208 L 22943,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22892,14208 L 22841,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22790,14208 L 22739,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22688,14208 L 22637,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22586,14208 L 22535,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22484,14208 L 22433,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22382,14208 L 22331,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22280,14208 L 22229,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22178,14208 L 22127,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22076,14208 L 22025,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21974,14208 L 21923,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21872,14208 L 21821,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21770,14208 L 21719,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21668,14208 L 21617,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21566,14208 L 21515,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21464,14208 L 21413,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21362,14208 L 21311,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21260,14208 L 21209,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21158,14208 L 21107,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21056,14208 L 21005,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20954,14208 L 20903,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20852,14208 L 20801,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20750,14208 L 20699,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20648,14208 L 20597,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20546,14208 L 20495,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20444,14208 L 20393,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20342,14208 L 20291,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20240,14208 L 20189,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20138,14208 L 20087,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 20036,14208 L 19985,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19934,14208 L 19883,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19832,14208 L 19781,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19730,14208 L 19679,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19628,14208 L 19577,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19526,14208 L 19475,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19424,14208 L 19373,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19322,14208 L 19271,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19220,14208 L 19169,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19118,14208 L 19067,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19016,14208 L 18965,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18914,14208 L 18863,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18812,14208 L 18761,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18710,14208 L 18659,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18608,14208 L 18557,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18506,14208 L 18455,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18404,14208 L 18353,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18302,14208 L 18251,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18200,14208 L 18149,14208"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18098,14208 L 18047,14208"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="12933" y="11353"><tspan fill="rgb(0,0,0)" stroke="none">Cluster B</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id12">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7095" y="5443" width="10925" height="3686"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12595,9127 L 12557,9127 12506,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12455,9127 L 12404,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12353,9127 L 12302,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12251,9127 L 12200,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12149,9127 L 12098,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12047,9127 L 11996,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11945,9127 L 11894,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11843,9127 L 11792,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11741,9127 L 11690,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11639,9127 L 11588,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11537,9127 L 11486,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11435,9127 L 11384,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11333,9127 L 11282,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11231,9127 L 11180,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11129,9127 L 11078,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11027,9127 L 10976,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10925,9127 L 10874,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10823,9127 L 10772,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10721,9127 L 10670,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10619,9127 L 10568,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10517,9127 L 10466,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10415,9127 L 10364,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10313,9127 L 10262,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10211,9127 L 10160,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10109,9127 L 10058,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10007,9127 L 9956,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9905,9127 L 9854,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9803,9127 L 9752,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9701,9127 L 9650,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9599,9127 L 9548,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9497,9127 L 9446,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9395,9127 L 9344,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9293,9127 L 9242,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9191,9127 L 9140,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9089,9127 L 9038,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8987,9127 L 8936,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8885,9127 L 8834,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8783,9127 L 8732,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8681,9127 L 8630,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8579,9127 L 8528,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8477,9127 L 8426,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8375,9127 L 8324,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8273,9127 L 8222,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8171,9127 L 8120,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8069,9127 L 8018,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7967,9127 L 7916,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7865,9127 L 7814,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7763,9127 L 7712,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7661,9127 L 7610,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7559,9127 L 7508,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7457,9127 L 7406,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7355,9127 L 7304,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7253,9127 L 7202,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7151,9127 L 7100,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,9080 L 7096,9029"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8978 L 7096,8927"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8876 L 7096,8825"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8774 L 7096,8723"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8672 L 7096,8621"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8570 L 7096,8519"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8468 L 7096,8417"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8366 L 7096,8315"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8264 L 7096,8213"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8162 L 7096,8111"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,8060 L 7096,8009"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7958 L 7096,7907"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7856 L 7096,7805"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7754 L 7096,7703"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7652 L 7096,7601"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7550 L 7096,7499"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7448 L 7096,7397"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7346 L 7096,7295"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7244 L 7096,7193"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7142 L 7096,7091"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,7040 L 7096,6989"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6938 L 7096,6887"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6836 L 7096,6785"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6734 L 7096,6683"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6632 L 7096,6581"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6530 L 7096,6479"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6428 L 7096,6377"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6326 L 7096,6275"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6224 L 7096,6173"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6122 L 7096,6071"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,6020 L 7096,5969"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,5918 L 7096,5867"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,5816 L 7096,5765"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,5714 L 7096,5663"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,5612 L 7096,5561"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7096,5510 L 7096,5459"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7132,5444 L 7183,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7234,5444 L 7285,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7336,5444 L 7387,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7438,5444 L 7489,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7540,5444 L 7591,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7642,5444 L 7693,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7744,5444 L 7795,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7846,5444 L 7897,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 7948,5444 L 7999,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8050,5444 L 8101,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8152,5444 L 8203,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8254,5444 L 8305,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8356,5444 L 8407,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8458,5444 L 8509,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8560,5444 L 8611,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8662,5444 L 8713,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8764,5444 L 8815,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8866,5444 L 8917,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 8968,5444 L 9019,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9070,5444 L 9121,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9172,5444 L 9223,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9274,5444 L 9325,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9376,5444 L 9427,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9478,5444 L 9529,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9580,5444 L 9631,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9682,5444 L 9733,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9784,5444 L 9835,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9886,5444 L 9937,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 9988,5444 L 10039,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10090,5444 L 10141,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10192,5444 L 10243,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10294,5444 L 10345,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10396,5444 L 10447,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10498,5444 L 10549,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10600,5444 L 10651,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10702,5444 L 10753,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10804,5444 L 10855,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10906,5444 L 10957,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11008,5444 L 11059,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11110,5444 L 11161,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11212,5444 L 11263,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11314,5444 L 11365,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11416,5444 L 11467,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11518,5444 L 11569,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11620,5444 L 11671,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11722,5444 L 11773,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11824,5444 L 11875,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 11926,5444 L 11977,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12028,5444 L 12079,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12130,5444 L 12181,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12232,5444 L 12283,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12334,5444 L 12385,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12436,5444 L 12487,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12538,5444 L 12589,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12640,5444 L 12691,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12742,5444 L 12793,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12844,5444 L 12895,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12946,5444 L 12997,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13048,5444 L 13099,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13150,5444 L 13201,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13252,5444 L 13303,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13354,5444 L 13405,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13456,5444 L 13507,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13558,5444 L 13609,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13660,5444 L 13711,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13762,5444 L 13813,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13864,5444 L 13915,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13966,5444 L 14017,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14068,5444 L 14119,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14170,5444 L 14221,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14272,5444 L 14323,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14374,5444 L 14425,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14476,5444 L 14527,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14578,5444 L 14629,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14680,5444 L 14731,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14782,5444 L 14833,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14884,5444 L 14935,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14986,5444 L 15037,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15088,5444 L 15139,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15190,5444 L 15241,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15292,5444 L 15343,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15394,5444 L 15445,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15496,5444 L 15547,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15598,5444 L 15649,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15700,5444 L 15751,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15802,5444 L 15853,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15904,5444 L 15955,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16006,5444 L 16057,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16108,5444 L 16159,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16210,5444 L 16261,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16312,5444 L 16363,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16414,5444 L 16465,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16516,5444 L 16567,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16618,5444 L 16669,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16720,5444 L 16771,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16822,5444 L 16873,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16924,5444 L 16975,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17026,5444 L 17077,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17128,5444 L 17179,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17230,5444 L 17281,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17332,5444 L 17383,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17434,5444 L 17485,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17536,5444 L 17587,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17638,5444 L 17689,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17740,5444 L 17791,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17842,5444 L 17893,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17944,5444 L 17995,5444"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,5472 L 18018,5523"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,5574 L 18018,5625"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,5676 L 18018,5727"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,5778 L 18018,5829"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,5880 L 18018,5931"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,5982 L 18018,6033"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6084 L 18018,6135"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6186 L 18018,6237"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6288 L 18018,6339"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6390 L 18018,6441"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6492 L 18018,6543"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6594 L 18018,6645"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6696 L 18018,6747"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6798 L 18018,6849"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,6900 L 18018,6951"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7002 L 18018,7053"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7104 L 18018,7155"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7206 L 18018,7257"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7308 L 18018,7359"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7410 L 18018,7461"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7512 L 18018,7563"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7614 L 18018,7665"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7716 L 18018,7767"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7818 L 18018,7869"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,7920 L 18018,7971"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8022 L 18018,8073"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8124 L 18018,8175"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8226 L 18018,8277"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8328 L 18018,8379"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8430 L 18018,8481"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8532 L 18018,8583"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8634 L 18018,8685"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8736 L 18018,8787"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8838 L 18018,8889"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,8940 L 18018,8991"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18018,9042 L 18018,9093"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18001,9127 L 17950,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17899,9127 L 17848,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17797,9127 L 17746,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17695,9127 L 17644,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17593,9127 L 17542,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17491,9127 L 17440,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17389,9127 L 17338,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17287,9127 L 17236,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17185,9127 L 17134,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17083,9127 L 17032,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16981,9127 L 16930,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16879,9127 L 16828,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16777,9127 L 16726,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16675,9127 L 16624,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16573,9127 L 16522,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16471,9127 L 16420,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16369,9127 L 16318,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16267,9127 L 16216,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16165,9127 L 16114,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16063,9127 L 16012,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15961,9127 L 15910,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15859,9127 L 15808,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15757,9127 L 15706,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15655,9127 L 15604,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15553,9127 L 15502,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15451,9127 L 15400,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15349,9127 L 15298,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15247,9127 L 15196,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15145,9127 L 15094,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15043,9127 L 14992,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14941,9127 L 14890,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14839,9127 L 14788,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14737,9127 L 14686,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14635,9127 L 14584,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14533,9127 L 14482,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14431,9127 L 14380,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14329,9127 L 14278,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14227,9127 L 14176,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14125,9127 L 14074,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14023,9127 L 13972,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13921,9127 L 13870,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13819,9127 L 13768,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13717,9127 L 13666,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13615,9127 L 13564,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13513,9127 L 13462,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13411,9127 L 13360,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13309,9127 L 13258,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13207,9127 L 13156,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13105,9127 L 13054,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 13003,9127 L 12952,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12901,9127 L 12850,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12799,9127 L 12748,9127"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12697,9127 L 12646,9127"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="7346" y="6145"><tspan fill="rgb(0,0,0)" stroke="none">Home Cluster</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id13">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13064" y="7581" width="1526" height="301"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 13065,7731 L 14159,7731"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 14589,7731 L 14139,7581 14139,7881 14589,7731 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id14">
+        <rect class="BoundingBox" stroke="none" fill="none" x="5317" y="8619" width="4067" height="3431"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 9382,8620 L 5647,11772"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 5318,12049 L 5759,11873 5565,11644 5318,12049 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id15">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12683" y="8619" width="4701" height="3304"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 12684,8620 L 17031,11675"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 17383,11922 L 17101,11541 16929,11786 17383,11922 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id16">
+        <rect class="BoundingBox" stroke="none" fill="none" x="6841" y="12788" width="764" height="301"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 6842,12938 L 7174,12938"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 7604,12938 L 7154,12788 7154,13088 7604,12938 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id17">
+        <rect class="BoundingBox" stroke="none" fill="none" x="18525" y="12661" width="764" height="301"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 18526,12811 L 18858,12811"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 19288,12811 L 18838,12661 18838,12961 19288,12811 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.LineShape">
+       <g id="id18">
+        <rect class="BoundingBox" stroke="none" fill="none" x="11287" y="4555" width="1526" height="2288"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 12811,4556 L 11526,6484"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 11287,6842 L 11661,6551 11412,6384 11287,6842 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id19">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14152" y="1253" width="8821" height="2393"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15347,1254 L 22971,1254 22971,3033 15347,3033 15347,1254 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15347,1254 L 15347,1254 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22971,3033 L 22971,3033 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 14153,3644 L 15098,1808"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 15347,1254 L 15347,1254 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 22971,3033 L 22971,3033 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="15899" y="2008"><tspan fill="rgb(0,0,0)" stroke="none">1. client sends request </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="16585" y="2719"><tspan fill="rgb(0,0,0)" stroke="none">to its home cluster</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id20">
+        <rect class="BoundingBox" stroke="none" fill="none" x="12779" y="3538" width="14004" height="2904"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18271,3539 L 26781,3539 26781,5699 18271,5699 18271,3539 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18271,3539 L 18271,3539 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 26781,5699 L 26781,5699 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 12780,6440 L 18023,4094"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 18271,3539 L 18271,3539 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 26781,5699 L 26781,5699 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="18370" y="4484"><tspan fill="rgb(0,0,0)" stroke="none">2. arvados-controller decides </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="19113" y="5195"><tspan fill="rgb(0,0,0)" stroke="none">how to route the request</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id21">
+        <rect class="BoundingBox" stroke="none" fill="none" x="17172" y="8110" width="9771" height="3024"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19320,8111 L 26941,8111 26941,10525 19320,10525 19320,8111 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19320,8111 L 19320,8111 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 26941,10525 L 26941,10525 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 17173,11132 L 19071,8666"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 19320,8111 L 19320,8111 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 26941,10525 L 26941,10525 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="19938" y="8827"><tspan fill="rgb(0,0,0)" stroke="none">3. requests for remote </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="19627" y="9538"><tspan fill="rgb(0,0,0)" stroke="none">resources are proxied to </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="21122" y="10249"><tspan fill="rgb(0,0,0)" stroke="none">remote cluster</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id22">
+        <rect class="BoundingBox" stroke="none" fill="none" x="16619" y="14969" width="9782" height="2289"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 21510,17256 L 16620,17256 16620,14970 26399,14970 26399,17256 21510,17256 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="16821" y="15978"><tspan fill="rgb(0,0,0)" stroke="none">4. Response from remote cluster </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="18303" y="16689"><tspan fill="rgb(0,0,0)" stroke="none">is returned to the client</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id23">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1252" y="1253" width="7750" height="3305"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 5127,4556 L 1253,4556 1253,1254 9000,1254 9000,4556 5127,4556 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1556" y="2059"><tspan fill="rgb(0,0,0)" stroke="none">0. Client can be anything </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="1939" y="2770"><tspan fill="rgb(0,0,0)" stroke="none">using the Arvados API,</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="2280" y="3481"><tspan fill="rgb(0,0,0)" stroke="none">Such as workbench </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="2053" y="4192"><tspan fill="rgb(0,0,0)" stroke="none">or arvados-cwl-runner</tspan></tspan></tspan></text>
+       </g>
+      </g>
+     </g>
+    </g>
+   </g>
+  </g>
+ </g>
+</svg>
\ No newline at end of file
diff --git a/doc/images/dax-reading-book.png b/doc/images/dax-reading-book.png
new file mode 100644 (file)
index 0000000..d10d3be
Binary files /dev/null and b/doc/images/dax-reading-book.png differ
diff --git a/doc/images/dax.png b/doc/images/dax.png
new file mode 100644 (file)
index 0000000..c511f0e
Binary files /dev/null and b/doc/images/dax.png differ
diff --git a/doc/images/doc-bg.jpg b/doc/images/doc-bg.jpg
new file mode 100644 (file)
index 0000000..e3abc50
Binary files /dev/null and b/doc/images/doc-bg.jpg differ
diff --git a/doc/images/download-shared-collection.png b/doc/images/download-shared-collection.png
new file mode 100644 (file)
index 0000000..e608379
Binary files /dev/null and b/doc/images/download-shared-collection.png differ
diff --git a/doc/images/favicon.ico b/doc/images/favicon.ico
new file mode 100644 (file)
index 0000000..4c763b6
Binary files /dev/null and b/doc/images/favicon.ico differ
diff --git a/doc/images/files-uploaded.png b/doc/images/files-uploaded.png
new file mode 100644 (file)
index 0000000..ccd8e16
Binary files /dev/null and b/doc/images/files-uploaded.png differ
diff --git a/doc/images/glyphicons-halflings-white.png b/doc/images/glyphicons-halflings-white.png
new file mode 100644 (file)
index 0000000..3bf6484
Binary files /dev/null and b/doc/images/glyphicons-halflings-white.png differ
diff --git a/doc/images/glyphicons-halflings.png b/doc/images/glyphicons-halflings.png
new file mode 100644 (file)
index 0000000..a996999
Binary files /dev/null and b/doc/images/glyphicons-halflings.png differ
diff --git a/doc/images/keyfeatures/chooseinputs.png b/doc/images/keyfeatures/chooseinputs.png
new file mode 100644 (file)
index 0000000..957daec
Binary files /dev/null and b/doc/images/keyfeatures/chooseinputs.png differ
diff --git a/doc/images/keyfeatures/collectionpage.png b/doc/images/keyfeatures/collectionpage.png
new file mode 100644 (file)
index 0000000..64ecf2c
Binary files /dev/null and b/doc/images/keyfeatures/collectionpage.png differ
diff --git a/doc/images/keyfeatures/dashboard2.png b/doc/images/keyfeatures/dashboard2.png
new file mode 100644 (file)
index 0000000..9ee8b3b
Binary files /dev/null and b/doc/images/keyfeatures/dashboard2.png differ
diff --git a/doc/images/keyfeatures/graph.png b/doc/images/keyfeatures/graph.png
new file mode 100644 (file)
index 0000000..a30691b
Binary files /dev/null and b/doc/images/keyfeatures/graph.png differ
diff --git a/doc/images/keyfeatures/log.png b/doc/images/keyfeatures/log.png
new file mode 100644 (file)
index 0000000..3cc6f6f
Binary files /dev/null and b/doc/images/keyfeatures/log.png differ
diff --git a/doc/images/keyfeatures/provenance.png b/doc/images/keyfeatures/provenance.png
new file mode 100644 (file)
index 0000000..eb0a1f9
Binary files /dev/null and b/doc/images/keyfeatures/provenance.png differ
diff --git a/doc/images/keyfeatures/rerun.png b/doc/images/keyfeatures/rerun.png
new file mode 100644 (file)
index 0000000..c11c111
Binary files /dev/null and b/doc/images/keyfeatures/rerun.png differ
diff --git a/doc/images/keyfeatures/running2.png b/doc/images/keyfeatures/running2.png
new file mode 100644 (file)
index 0000000..2c91855
Binary files /dev/null and b/doc/images/keyfeatures/running2.png differ
diff --git a/doc/images/keyfeatures/shared.png b/doc/images/keyfeatures/shared.png
new file mode 100644 (file)
index 0000000..1412be9
Binary files /dev/null and b/doc/images/keyfeatures/shared.png differ
diff --git a/doc/images/keyfeatures/webupload.png b/doc/images/keyfeatures/webupload.png
new file mode 100644 (file)
index 0000000..00f5f6e
Binary files /dev/null and b/doc/images/keyfeatures/webupload.png differ
diff --git a/doc/images/publicproject/collection-files.png b/doc/images/publicproject/collection-files.png
new file mode 100644 (file)
index 0000000..6e98459
Binary files /dev/null and b/doc/images/publicproject/collection-files.png differ
diff --git a/doc/images/publicproject/collection-graph.png b/doc/images/publicproject/collection-graph.png
new file mode 100644 (file)
index 0000000..85fc3f3
Binary files /dev/null and b/doc/images/publicproject/collection-graph.png differ
diff --git a/doc/images/publicproject/collection-show.png b/doc/images/publicproject/collection-show.png
new file mode 100644 (file)
index 0000000..b867f49
Binary files /dev/null and b/doc/images/publicproject/collection-show.png differ
diff --git a/doc/images/publicproject/collections.png b/doc/images/publicproject/collections.png
new file mode 100644 (file)
index 0000000..9d85552
Binary files /dev/null and b/doc/images/publicproject/collections.png differ
diff --git a/doc/images/publicproject/description.png b/doc/images/publicproject/description.png
new file mode 100644 (file)
index 0000000..34a31e9
Binary files /dev/null and b/doc/images/publicproject/description.png differ
diff --git a/doc/images/publicproject/instance-advanced.png b/doc/images/publicproject/instance-advanced.png
new file mode 100644 (file)
index 0000000..0b8c3c1
Binary files /dev/null and b/doc/images/publicproject/instance-advanced.png differ
diff --git a/doc/images/publicproject/instance-components.png b/doc/images/publicproject/instance-components.png
new file mode 100644 (file)
index 0000000..f99a94d
Binary files /dev/null and b/doc/images/publicproject/instance-components.png differ
diff --git a/doc/images/publicproject/instance-graph.png b/doc/images/publicproject/instance-graph.png
new file mode 100644 (file)
index 0000000..730f244
Binary files /dev/null and b/doc/images/publicproject/instance-graph.png differ
diff --git a/doc/images/publicproject/instance-job.png b/doc/images/publicproject/instance-job.png
new file mode 100644 (file)
index 0000000..64d5281
Binary files /dev/null and b/doc/images/publicproject/instance-job.png differ
diff --git a/doc/images/publicproject/instance-log.png b/doc/images/publicproject/instance-log.png
new file mode 100644 (file)
index 0000000..54f799b
Binary files /dev/null and b/doc/images/publicproject/instance-log.png differ
diff --git a/doc/images/publicproject/instance-show.png b/doc/images/publicproject/instance-show.png
new file mode 100644 (file)
index 0000000..56f0781
Binary files /dev/null and b/doc/images/publicproject/instance-show.png differ
diff --git a/doc/images/publicproject/instances.png b/doc/images/publicproject/instances.png
new file mode 100644 (file)
index 0000000..75b24b2
Binary files /dev/null and b/doc/images/publicproject/instances.png differ
diff --git a/doc/images/quickstart/1.png b/doc/images/quickstart/1.png
new file mode 100644 (file)
index 0000000..79bf5d6
Binary files /dev/null and b/doc/images/quickstart/1.png differ
diff --git a/doc/images/quickstart/2.png b/doc/images/quickstart/2.png
new file mode 100644 (file)
index 0000000..ddeb6f8
Binary files /dev/null and b/doc/images/quickstart/2.png differ
diff --git a/doc/images/quickstart/3.png b/doc/images/quickstart/3.png
new file mode 100644 (file)
index 0000000..8440b6b
Binary files /dev/null and b/doc/images/quickstart/3.png differ
diff --git a/doc/images/quickstart/4.png b/doc/images/quickstart/4.png
new file mode 100644 (file)
index 0000000..405501c
Binary files /dev/null and b/doc/images/quickstart/4.png differ
diff --git a/doc/images/quickstart/5.png b/doc/images/quickstart/5.png
new file mode 100644 (file)
index 0000000..f455cd4
Binary files /dev/null and b/doc/images/quickstart/5.png differ
diff --git a/doc/images/quickstart/6.png b/doc/images/quickstart/6.png
new file mode 100644 (file)
index 0000000..328cdac
Binary files /dev/null and b/doc/images/quickstart/6.png differ
diff --git a/doc/images/quickstart/7.png b/doc/images/quickstart/7.png
new file mode 100644 (file)
index 0000000..f6d9b3d
Binary files /dev/null and b/doc/images/quickstart/7.png differ
diff --git a/doc/images/repositories-panel.png b/doc/images/repositories-panel.png
new file mode 100644 (file)
index 0000000..3e12860
Binary files /dev/null and b/doc/images/repositories-panel.png differ
diff --git a/doc/images/shared-collection.png b/doc/images/shared-collection.png
new file mode 100644 (file)
index 0000000..446bab5
Binary files /dev/null and b/doc/images/shared-collection.png differ
diff --git a/doc/images/ssh-adding-public-key.png b/doc/images/ssh-adding-public-key.png
new file mode 100644 (file)
index 0000000..8aea827
Binary files /dev/null and b/doc/images/ssh-adding-public-key.png differ
diff --git a/doc/images/trash-button-topnav.png b/doc/images/trash-button-topnav.png
new file mode 100644 (file)
index 0000000..d266437
Binary files /dev/null and b/doc/images/trash-button-topnav.png differ
diff --git a/doc/images/upload-tab-in-new-collection.png b/doc/images/upload-tab-in-new-collection.png
new file mode 100644 (file)
index 0000000..f027c79
Binary files /dev/null and b/doc/images/upload-tab-in-new-collection.png differ
diff --git a/doc/images/upload-using-workbench.png b/doc/images/upload-using-workbench.png
new file mode 100644 (file)
index 0000000..3d67577
Binary files /dev/null and b/doc/images/upload-using-workbench.png differ
diff --git a/doc/images/uses/choosefiles.png b/doc/images/uses/choosefiles.png
new file mode 100644 (file)
index 0000000..2d38363
Binary files /dev/null and b/doc/images/uses/choosefiles.png differ
diff --git a/doc/images/uses/gotohome.png b/doc/images/uses/gotohome.png
new file mode 100644 (file)
index 0000000..144a3b1
Binary files /dev/null and b/doc/images/uses/gotohome.png differ
diff --git a/doc/images/uses/rename.png b/doc/images/uses/rename.png
new file mode 100644 (file)
index 0000000..d633242
Binary files /dev/null and b/doc/images/uses/rename.png differ
diff --git a/doc/images/uses/shared.png b/doc/images/uses/shared.png
new file mode 100644 (file)
index 0000000..2631419
Binary files /dev/null and b/doc/images/uses/shared.png differ
diff --git a/doc/images/uses/sharedsubdirs.png b/doc/images/uses/sharedsubdirs.png
new file mode 100644 (file)
index 0000000..6778f54
Binary files /dev/null and b/doc/images/uses/sharedsubdirs.png differ
diff --git a/doc/images/uses/sharing.png b/doc/images/uses/sharing.png
new file mode 100644 (file)
index 0000000..f578c48
Binary files /dev/null and b/doc/images/uses/sharing.png differ
diff --git a/doc/images/uses/uploaddata.png b/doc/images/uses/uploaddata.png
new file mode 100644 (file)
index 0000000..f369270
Binary files /dev/null and b/doc/images/uses/uploaddata.png differ
diff --git a/doc/images/uses/uploading.png b/doc/images/uses/uploading.png
new file mode 100644 (file)
index 0000000..473fc5a
Binary files /dev/null and b/doc/images/uses/uploading.png differ
diff --git a/doc/images/vm-access-with-webshell.png b/doc/images/vm-access-with-webshell.png
new file mode 100644 (file)
index 0000000..b980fdc
Binary files /dev/null and b/doc/images/vm-access-with-webshell.png differ
diff --git a/doc/images/workbench-dashboard.png b/doc/images/workbench-dashboard.png
new file mode 100644 (file)
index 0000000..3cdf1e4
Binary files /dev/null and b/doc/images/workbench-dashboard.png differ
diff --git a/doc/images/workbench-move-selected.png b/doc/images/workbench-move-selected.png
new file mode 100644 (file)
index 0000000..bba1a1c
Binary files /dev/null and b/doc/images/workbench-move-selected.png differ
diff --git a/doc/index.html.liquid b/doc/index.html.liquid
new file mode 100644 (file)
index 0000000..ee35387
--- /dev/null
@@ -0,0 +1,92 @@
+---
+layout: default
+no_nav_left: true
+navsection: top
+title: Arvados | Documentation
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+<div class="jumbotron">
+  <div class="container">
+    <div class="row">
+      <div class="col-sm-6">
+        <h1>ARVADOS</h1>
+        <p>A free and open source platform for big data science</p>
+      </div>
+      <div class="col-sm-6">
+        <img src="images/dax-reading-book.png" style="max-height: 10em" alt="Dax reading a book" />
+      </div>
+    </div>
+  </div>
+</div>
+
+<div class="container-fluid">
+  <div class="row">
+    <div class="col-sm-6">
+      <p><strong>What is Arvados</strong>
+      <p><a href="https://arvados.org/">Arvados</a> is a platform for managing compute and storage for cloud and HPC clusters. It allows you to track your methods and datasets, share them securely, and easily re-run analyses.  It also make it possible to run analysis across multiple clusters (HPC, cloud, or hybrid) with <a href="{{site.baseurl}}/user/cwl/federated-workflows.html">Federated Multi-Cluster Workflows</a>.
+      </p>
+
+      <a name="Support"></a>
+      <p><strong>Support and Community</strong></p>
+
+<p>The recommended place to ask a question about Arvados is on Biostars. After you have <a href="//www.biostars.org/t/arvados/">read previous questions and answers</a> you can <a href="https://www.biostars.org/p/new/post/?tag_val=arvados">post your question using the 'arvados' tag</a>.</p>
+
+      <p>There is a <a href="http://lists.arvados.org/mailman/listinfo/arvados">mailing list</a>. The <a href="https://gitter.im/curoverse/arvados">#arvados channel</a> at gitter.im is available for live discussion and community support.
+      </p>
+
+      <p>Curoverse, a Veritas Genetics company, provides managed Arvados installations as well as commercial support for Arvados. Please visit <a href="https://curoverse.com">curoverse.com</a> or contact <a href="mailto:researchsales@veritasgenetics.com">researchsales@veritasgenetics.com</a> for more information.</p>
+
+      <p><strong>Contributing</strong></p>
+      <p>Please visit the <a href="https://dev.arvados.org/projects/arvados/wiki/Wiki#Contributing-and-hacking">developer site</a>. Arvados is 100% free and open source software, check out the code on <a href="https://github.com/curoverse/arvados">github</a>.
+
+      <p>Arvados is under active development, see the <a href="https://dev.arvados.org/projects/arvados/activity">recent developer activity</a>.
+      </p>
+      <p><strong>License</strong></p>
+      <p>Most of Arvados is licensed under the <a href="{{ site.baseurl }}/user/copying/agpl-3.0.html">GNU AGPL v3</a>. The SDKs are licensed under the <a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License 2.0</a> so that they can be incorporated into proprietary code. See the <a href="https://github.com/curoverse/arvados/blob/master/COPYING">COPYING file</a> for more information.
+      </p>
+
+    </div>
+    <div class="col-sm-6" style="border-left: solid; border-width: 1px">
+      <p><strong>More in-depth guides
+      </strong></p>
+      <!--<p>-->
+        <!--<a href="{{ site.baseurl }}/start/index.html">Getting Started</a> &mdash; Start here if you're new to Arvados.-->
+      <!--</p>-->
+      <p>
+        <a href="{{ site.baseurl }}/user/index.html">User Guide</a> &mdash; How to manage data and do analysis with Arvados.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/sdk/index.html">SDK Reference</a> &mdash; Details about the accessing Arvados from various programming languages.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/architecture/index.html">Arvados Architecture</a> &mdash; Details about the the Arvados components and architecture.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/api/index.html">API Reference</a> &mdash; Details about the the Arvados REST API.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/admin/index.html">Admin Guide</a> &mdash; Details about administering an Arvados cluster.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/install/index.html">Install Guide</a> &mdash; How to install Arvados.
+      </p>
+    </div>
+  </div>
+
+  <div class="row">
+    <div class="col-sm-12">
+      <br>
+      <p><em><small>
+      The content of the above documentation is licensed under the
+      <a href="{{ site.baseurl }}/user/copying/by-sa-3.0.html">Creative
+        Commons Attribution-Share Alike 3.0 United States</a> license. Code samples in the above documentation are licensed under the
+      <a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License, Version 2.0.</a></small></em>
+      </p>
+    </div>
+  </div>
+</div>
diff --git a/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid b/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
new file mode 100644 (file)
index 0000000..88b2d57
--- /dev/null
@@ -0,0 +1,62 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes - Google Kubernetes Engine
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents the setup of the prerequisites to run the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
+
+h3. Install tooling
+
+Install @gcloud@:
+
+* Follow the instructions at "https://cloud.google.com/sdk/downloads":https://cloud.google.com/sdk/downloads
+
+Install @kubectl@:
+
+<pre>
+$ gcloud components install kubectl
+</pre>
+
+Install @helm@:
+
+* Follow the instructions at "https://docs.helm.sh/using_helm/#installing-helm":https://docs.helm.sh/using_helm/#installing-helm
+
+h3. Boot the GKE cluster
+
+This can be done via the "cloud console":https://console.cloud.google.com/kubernetes/ or via the command line:
+
+<pre>
+$ gcloud container clusters create <CLUSTERNAME> --zone us-central1-a --machine-type n1-standard-2 --cluster-version 1.10
+</pre>
+
+It takes a few minutes for the cluster to be initialized.
+
+h3. Reserve a static IP
+
+Reserve a "static IP":https://console.cloud.google.com/networking/addresses in GCE. Make sure the IP is in the same region as your GKE cluster, and is of the "Regional" type.
+
+h3. Connect to the GKE cluster.
+
+Via the web:
+* Click the "Connect" button next to your "GKE cluster"https://console.cloud.google.com/kubernetes/.
+* Execute the "Command-line access" command on your development machine.
+
+Alternatively, use this command:
+
+<pre>
+$ gcloud container clusters get-credentials <CLUSTERNAME> --zone us-central1-a --project <YOUR-PROJECT>
+</pre>
+
+Test the connection:
+
+<pre>
+$ kubectl get nodes
+</pre>
+
+Now proceed to the "Initialize helm on the Kubernetes cluster":/install/arvados-on-kubernetes.html#helm section.
diff --git a/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid b/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
new file mode 100644 (file)
index 0000000..132b443
--- /dev/null
@@ -0,0 +1,34 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes - Minikube
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents the setup of the prerequisites to run the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
+
+h3. Install tooling
+
+Install @kubectl@:
+
+* Follow the instructions at "https://kubernetes.io/docs/tasks/tools/install-kubectl/":https://kubernetes.io/docs/tasks/tools/install-kubectl/
+
+Install @helm@:
+
+* Follow the instructions at "https://docs.helm.sh/using_helm/#installing-helm":https://docs.helm.sh/using_helm/#installing-helm
+
+h3. Install Minikube
+
+Follow the instructions at "https://kubernetes.io/docs/setup/minikube/":https://kubernetes.io/docs/setup/minikube/
+
+Test the connection:
+
+<pre>
+$ kubectl get nodes
+</pre>
+
+Now proceed to the "Initialize helm on the Kubernetes cluster":/install/arvados-on-kubernetes.html#helm section.
diff --git a/doc/install/arvados-on-kubernetes.html.textile.liquid b/doc/install/arvados-on-kubernetes.html.textile.liquid
new file mode 100644 (file)
index 0000000..01999f0
--- /dev/null
@@ -0,0 +1,133 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados on Kubernetes is implemented as a Helm Chart.
+
+{% include 'notebox_begin_warning' %}
+This Helm Chart does not retain any state after it is deleted. An Arvados cluster created with this Helm Chart is entirely ephemeral, and all data stored on the cluster will be deleted when it is shut down. This will be fixed in a future version.
+{% include 'notebox_end' %}
+
+h2(#overview). Overview
+
+This Helm Chart provides a basic, small Arvados cluster.
+
+Current limitations, to be addressed in the future:
+
+* An Arvados cluster created with this Helm Chart is entirely ephemeral, and all data stored on the cluster will be deleted when it is shut down.
+* No dynamic scaling of compute nodes (but you can adjust @values.yaml@ and "reload the Helm Chart":#reload
+* All compute nodes are the same size
+* Compute nodes have no cpu/memory/disk constraints yet
+* No git server
+
+h2. Requirements
+
+* Kubernetes 1.10+ cluster with at least 3 nodes, 2 or more cores per node
+* @kubectl@ and @helm@ installed locally, and able to connect to your Kubernetes cluster
+
+If you do not have a Kubernetes cluster already set up, you can use "Google Kubernetes Engine":/install/arvados-on-kubernetes-GKE.html for multi-node development and testing or "another Kubernetes solution":https://kubernetes.io/docs/setup/pick-right-solution/. Minikube is not supported yet.
+
+h2(#helm). Initialize helm on the Kubernetes cluster
+
+If you already have helm running on the Kubernetes cluster, proceed directly to "Start the Arvados cluster":#Start below.
+
+<pre>
+$ helm init
+$ kubectl create serviceaccount --namespace kube-system tiller
+$ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+$ kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+</pre>
+
+Test @helm@ by running
+
+<pre>
+$ helm ls
+</pre>
+
+There should be no errors. The command will return nothing.
+
+h2(#git). Clone the repository
+
+Clone the repository and nagivate to the @arvados-kubernetes/charts/arvados@ directory:
+
+<pre>
+$ git clone https://github.com/curoverse/arvados-kubernetes.git
+$ cd arvados-kubernetes/charts/arvados
+</pre>
+
+h2(#Start). Start the Arvados cluster
+
+Next, determine the IP address that the Arvados cluster will use to expose its API, Workbench, etc. If you want this Arvados cluster to be reachable from places other than the local machine, the IP address will need to be routable as appropriate.
+
+<pre>
+$ ./cert-gen.sh <IP ADDRESS>
+</pre>
+
+The @values.yaml@ file contains a number of variables that can be modified. At a minimum, review and/or modify the values for
+
+<pre>
+  adminUserEmail
+  adminUserPassword
+  superUserSecret
+  anonymousUserSecret
+</pre>
+
+Now start the Arvados cluster:
+
+<pre>
+$ helm install --name arvados . --set externalIP=<IP ADDRESS>
+</pre>
+
+At this point, you can use kubectl to see the Arvados cluster boot:
+
+<pre>
+$ kubectl get pods
+$ kubectl get svc
+</pre>
+
+After a few minutes, you can access Arvados Workbench at the IP address specified
+
+* https://&lt;IP ADDRESS&gt;
+
+with the username and password specified in the @values.yaml@ file.
+
+Alternatively, use the Arvados cli tools or SDKs:
+
+Set the environment variables:
+
+<pre>
+$ export ARVADOS_API_TOKEN=<superUserSecret from values.yaml>
+$ export ARVADOS_API_HOST=<STATIC IP>:444
+$ export ARVADOS_API_HOST_INSECURE=true
+</pre>
+
+Test access with:
+
+<pre>
+$ arv user current
+</pre>
+
+h2(#reload). Reload
+
+If you make changes to the Helm Chart (e.g. to @values.yaml@), you can reload Arvados with
+
+<pre>
+$ helm upgrade arvados .
+</pre>
+
+h2. Shut down
+
+{% include 'notebox_begin_warning' %}
+This Helm Chart does not retain any state after it is deleted. An Arvados cluster created with this Helm Chart is entirely ephemeral, and <strong>all data stored on the Arvados cluster will be deleted</strong> when it is shut down. This will be fixed in a future version.
+{% include 'notebox_end' %}
+
+<pre>
+$ helm del arvados --purge
+</pre>
diff --git a/doc/install/arvbox.html.textile.liquid b/doc/install/arvbox.html.textile.liquid
new file mode 100644 (file)
index 0000000..8827cf8
--- /dev/null
@@ -0,0 +1,145 @@
+---
+layout: default
+navsection: installguide
+title: Arvados-in-a-box
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvbox is a Docker-based self-contained development, demonstration and testing environment for Arvados.  It is not intended for production use.
+
+h2. Quick start
+
+<pre>
+$ git clone https://github.com/curoverse/arvados.git
+$ cd arvados/tools/arvbox/bin
+$ ./arvbox start localdemo
+</pre>
+
+h2. Requirements
+
+* Linux 3.x+ and Docker 1.9+
+* Minimum of 3 GiB of RAM  + additional memory to run jobs
+* Minimum of 3 GiB of disk + storage for actual data
+
+h2. Usage
+
+<pre>
+$ arvbox
+Arvados-in-a-box                      http://arvados.org
+
+build   <config>      build arvbox Docker image
+rebuild <config>      build arvbox Docker image, no layer cache
+start|run <config>  start arvbox container
+open       open arvbox workbench in a web browser
+shell      enter arvbox shell
+ip         print arvbox docker container ip address
+host       print arvbox published host
+status     print some information about current arvbox
+stop       stop arvbox container
+restart <config>  stop, then run again
+reboot  <config>  stop, build arvbox Docker image, run
+reset      delete arvbox arvados data (be careful!)
+destroy    delete all arvbox code and data (be careful!)
+log <service> tail log of specified service
+ls <options>  list directories inside arvbox
+cat <files>   get contents of files inside arvbox
+pipe       run a bash script piped in from stdin
+sv <start|stop|restart> <service> change state of service inside arvbox
+clone <from> <to>   clone an arvbox
+</pre>
+
+h2. Configs
+
+h3. dev
+
+Development configuration.  Boots a complete Arvados environment inside the container.  The "arvados", "arvado-dev" and "sso-devise-omniauth-provider" code directories along data directories "postgres", "var", "passenger" and "gems" are bind mounted from the host file system for easy access and persistence across container rebuilds.  Services are bound to the Docker container's network IP address and can only be accessed on the local host.
+
+In "dev" mode, you can override the default autogenerated settings of Rails projects by adding "application.yml.override" to any Rails project (sso, api, workbench).  This can be used to test out API server settings or point Workbench at an alternate API server.
+
+h3. localdemo
+
+Demo configuration.  Boots a complete Arvados environment inside the container. Unlike the development configuration, code directories are included in the demo image, and data directories are stored in a separate data volume container. Services are bound to the Docker container's network IP address and can only be accessed on the local host.
+
+h3. test
+
+Run the test suite.
+
+h3. publicdev
+
+Publicly accessible development configuration.  Similar to 'dev' except that service ports are published to the host's IP address and can accessed by anyone who can connect to the host system.  See below for more information.  WARNING! The public arvbox configuration is NOT SECURE and must not be placed on a public IP address or used for production work.
+
+h3. publicdemo
+
+Publicly accessible development configuration.  Similar to 'localdemo' except that service ports are published to the host's IP address and can accessed by anyone who can connect to the host system.  See below for more information.  WARNING! The public arvbox configuration is NOT SECURE and must not be placed on a public IP address or used for production work.
+
+h2. Environment variables
+
+h3. ARVBOX_DOCKER
+
+The location of Dockerfile.base and associated files used by "arvbox build".
+default: result of $(readlink -f $(dirname $0)/../lib/arvbox/docker)
+
+h3. ARVBOX_CONTAINER
+
+The name of the Docker container to manipulate.
+default: arvbox
+
+h3. ARVBOX_BASE
+
+The base directory to store persistent data for arvbox containers.
+default: $HOME/.arvbox
+
+h3. ARVBOX_DATA
+
+The base directory to store persistent data for the current container.
+default: $ARVBOX_BASE/$ARVBOX_CONTAINER
+
+h3. ARVADOS_ROOT
+
+The root directory of the Arvados source tree
+default: $ARVBOX_DATA/arvados
+
+h3. ARVADOS_DEV_ROOT
+
+The root directory of the Arvados-dev source tree
+default: $ARVBOX_DATA/arvados-dev
+
+h3. SSO_ROOT
+
+The root directory of the SSO source tree
+default: $ARVBOX_DATA/sso-devise-omniauth-provider
+
+h3. ARVBOX_PUBLISH_IP
+
+The IP address on which to publish services when running in public configuration.  Overrides default detection of the host's IP address.
+
+h2. Using Arvbox for Arvados development
+
+The "Arvbox section of Hacking Arvados":https://dev.arvados.org/projects/arvados/wiki/Arvbox has information about using Arvbox for Arvados development.
+
+h2. Making Arvbox accessible from other hosts
+
+In "dev" and "localdemo" mode, Arvbox can only be accessed on the same host it is running.  To publish Arvbox service ports to the host's service ports and advertise the host's IP address for services, use @publicdev@ or @publicdemo@:
+
+<pre>
+$ arvbox start publicdemo
+</pre>
+
+This attempts to auto-detect the correct IP address to use by taking the IP address of the default route device.  If the auto-detection is wrong, you want to publish a hostname instead of a raw address, or you need to access it through a different device (such as a router or firewall), set @ARVBOX_PUBLISH_IP@ to the desire hostname or IP address.
+
+<pre>
+$ export ARVBOX_PUBLISH_IP=example.com
+$ arvbox start publicdemo
+</pre>
+
+Note: this expects to bind the host's port 80 (http) for workbench, so you cannot have a conflicting web server already running on the host.  It does not attempt to take bind the host's port 22 (ssh), as a result the arvbox ssh port is not published.
+
+h2. Notes
+
+Services are designed to install and auto-configure on start or restart.  For example, the service script for keepstore always compiles keepstore from source and registers the daemon with the API server.
+
+Services are run with process supervision, so a service which exits will be restarted.  Dependencies between services are handled by repeatedly trying and failing the service script until dependencies are fulfilled (by other service scripts) enabling the service script to complete.
diff --git a/doc/install/cheat_sheet.html.textile.liquid b/doc/install/cheat_sheet.html.textile.liquid
new file mode 100644 (file)
index 0000000..562b76d
--- /dev/null
@@ -0,0 +1,75 @@
+---
+layout: default
+navsection: admin
+title: User management at the CLI
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h3. Workbench: user management
+
+As an Admin user, use the gear icon on the top right to visit the Users page. From there, use the 'Add new user' button to create a new user. Alternatively, visit an existing user with the 'Show' button next to the user's name. Then use the 'Admin' tab and click the 'Setup' button to activate the user, and create a virtual machine login as well as git repository for them.
+
+h3. CLI setup
+
+<pre>
+ARVADOS_API_HOST={{ site.arvados_api_host }}
+ARVADOS_API_TOKEN=1234567890qwertyuiopasdfghjklzxcvbnm1234567890zzzz
+</pre>
+
+h3. CLI: Create VM
+
+<pre>
+arv virtual_machine create --virtual-machine '{"hostname":"xxxxxxxchangeme.example.com"}'
+</pre>
+
+h3. CLI: Activate user
+
+<pre>
+user_uuid=xxxxxxxchangeme
+
+arv user update --uuid "$user_uuid" --user '{"is_active":true}'
+</pre>
+
+h3. User &rarr; VM
+
+Give @$user_uuid@ permission to log in to @$vm_uuid@ as @$target_username@
+
+<pre>
+user_uuid=xxxxxxxchangeme
+vm_uuid=xxxxxxxchangeme
+target_username=xxxxxxxchangeme
+
+read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
+{
+"tail_uuid":"$user_uuid",
+"head_uuid":"$vm_uuid",
+"link_class":"permission",
+"name":"can_login",
+"properties":{"username":"$target_username"}
+}
+EOF
+</pre>
+
+h3. CLI: User &rarr; repo
+
+Give @$user_uuid@ permission to commit to @$repo_uuid@ as @$repo_username@
+
+<pre>
+user_uuid=xxxxxxxchangeme
+repo_uuid=xxxxxxxchangeme
+repo_username=xxxxxxxchangeme
+
+read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
+{
+"tail_uuid":"$user_uuid",
+"head_uuid":"$repo_uuid",
+"link_class":"permission",
+"name":"can_write",
+"properties":{"username":"$repo_username"}
+}
+EOF
+</pre>
diff --git a/doc/install/client.html.textile.liquid b/doc/install/client.html.textile.liquid
new file mode 100644 (file)
index 0000000..30f8c15
--- /dev/null
@@ -0,0 +1,14 @@
+---
+layout: default
+navsection: installguide
+title: Install client libraries
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The "SDK Reference":{{site.baseurl}}/sdk/index.html page has installation instructions for each of the SDKs.
+
diff --git a/doc/install/configure-azure-blob-storage.html.textile.liquid b/doc/install/configure-azure-blob-storage.html.textile.liquid
new file mode 100644 (file)
index 0000000..8a0e7bf
--- /dev/null
@@ -0,0 +1,113 @@
+---
+layout: default
+navsection: installguide
+title: Configure Azure Blob storage
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keepstore can store data in one or more Azure Storage containers.
+
+h2. Set up VMs and Storage Accounts
+
+Before starting the configuration of individual keepstore servers is good to have an idea of the keepstores servers' final layout. One key decision is the amount of servers and type of VM to run. Azure may change over time the bandwith capacity of each type. After conducting some empirical saturation tests, the conclusion was that the bandwith is proportional to the amount of cores with some exceptions. As a rule of thumb, is better to invest resources in more cores instead of memory or IOps.
+
+Another decision is how many VMs should be running keepstore. For example there could be 8 VMs with one core each or one machine with 8 cores. Or anything in between. Assuming is the same cost for Cloud resources, there is always the benefit of distributing the risk of faulty VMs. The recommendation is to start with 2 VMs and expand in pairs. Having a minimum of 2 cores each. The total amount of VMs will be a function of the budget and the pipeline traffic to avoid saturation during periods of high usage. Standard D v3 family is a balanced choice, making Standard_D2_v3 the 2-core option
+
+There are many options for storage accounts. You can read details from Azure on their documentation https://docs.microsoft.com/en-us/azure/storage/common/storage-introduction. The type of storage and access tier will be a function of the budget and desired responsiveness. A balanced option is to have General-purpose Standard Storage account and use Blob storage, hot access tiers.
+
+Keepstore can be configure to reflect the level of underlaying redundancy the storage will have. This is call data replication option. For example LRS (Locally Redundant Storage) saves 3 copies of the data. There desired redundancy can be chosen at the keepstore layer or at the Storage Accunt layer. The decision where the redundancy will be done and the type of Storage Account data replication (LRS, ZRS, GRS and RA-GRS) has trade-offs. Please read more on https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy and decide what is best for your needs.
+
+h2. Create a storage container
+
+Using the Azure web portal or command line tool, create or choose a storage account with a suitable redundancy profile and availability region. Use the storage account keys to create a new container.
+
+<notextile>
+<pre><code>~$ <span class="userinput">azure config mode arm</span>
+~$ <span class="userinput">azure login</span>
+~$ <span class="userinput">azure group create exampleGroupName eastus</span>
+~$ <span class="userinput">azure storage account create --type LRS --location eastus --resource-group exampleGroupName exampleStorageAccountName</span>
+~$ <span class="userinput">azure storage account keys list --resource-group exampleGroupName exampleStorageAccountName</span>
+info:    Executing command storage account keys list
++ Getting storage account keys
+data:    Primary: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
+data:    Secondary: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy==
+info:    storage account keys list command OK
+~$ <span class="userinput">AZURE_STORAGE_ACCOUNT="exampleStorageAccountName" \
+AZURE_STORAGE_ACCESS_KEY="zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==" \
+azure storage container create exampleContainerName</span>
+</code></pre>
+</notextile>
+
+Note that Keepstore services may be configued to use multiple Azure Storage accounts and multiple containers within a storage account.
+
+h2. Configure keepstore
+
+Copy the primary storage account key to a file where it will be accessible to keepstore at startup time.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sh -c 'cat &gt;/etc/arvados/keepstore/azure_storage_account_key.txt &lt;&lt;EOF'
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
+EOF</span>
+~$ <span class="userinput">sudo chmod 0400 /etc/arvados/keepstore/azure_storage_account_key.txt</span>
+</code></pre>
+</notextile>
+
+Next, edit the @Volumes@ section of the @keepstore.yml@ config file:
+
+<pre>
+Volumes:
+- # The volume type, this indicates Azure blob storage
+  Type: Azure
+
+  # How much replication is performed by the underlying container.
+  # This is used to inform replication decisions at the Keep layer.
+  AzureReplication: 3
+
+  # The storage container to use for the backing store.
+  ContainerName: exampleContainerName
+
+  # If true, do not accept write or trash operations, only reads.
+  ReadOnly: false
+
+  # Amount of time to wait for a response before failing the request
+  RequestTimeout: 2m0s
+
+  # The storage account name, used for authentication
+  StorageAccountName: exampleStorageAccountName
+
+  # The storage account secret key, used for authentication
+  StorageAccountKeyFile: /etc/arvados/keepstore/azure_storage_account_key.txt
+
+  # The cloud environment to use.  If blank, use the default cloud
+  # environment.  See below for an example of an alternate cloud environment.
+  StorageBaseURL: ""
+
+  # Storage classes to associate with this volume.  See "Storage
+  # classes" in the "Admin" section of doc.arvados.org.
+  StorageClasses: null
+
+- # Example configuration to use Azure China.
+  #
+  # The alternate cloud environment to use.
+  # Note that cloud environments are different from regions.  A
+  # cloud environment is an entirely separate instance of Azure with
+  # separate accounts, requiring separate credentials.
+  #
+  StorageBaseURL: core.chinacloudapi.cn
+  StorageAccountKeyFile: /etc/arvados/keepstore/azure_cn_storage_account_key.txt
+  StorageAccountName: cn-account-name
+  ContainerName: exampleChinaContainerName
+
+  # The rest are the same as above
+  Type: Azure
+  AzureReplication: 3
+  ReadOnly: false
+  RequestTimeout: 10m0s
+  StorageClasses: null
+</pre>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
diff --git a/doc/install/configure-fs-storage.html.textile.liquid b/doc/install/configure-fs-storage.html.textile.liquid
new file mode 100644 (file)
index 0000000..ddd54c3
--- /dev/null
@@ -0,0 +1,56 @@
+---
+layout: default
+navsection: installguide
+title: Filesystem storage
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keepstore can store data in local and network-attached POSIX filesystems.
+
+h2. Setting up filesystem mounts
+
+Volumes are configured in the @Volumes@ section of the configuration file.  You may provide multiple volumes for a single keepstore process to manage multiple disks.  Keepstore distributes blocks among volumes in round-robin fashion.
+
+<pre>
+Volumes:
+- # The volume type, indicates this is a filesystem directory.
+  Type: Directory
+
+  # The directory that will be used as the backing store.
+  Root: /mnt/local-disk
+
+  # How much replication is performed by the underlying filesystem.
+  # (for example, a network filesystem may provide its own replication).
+  # This is used to inform replication decisions at the Keep layer.
+  DirectoryReplication: 1
+
+  # If true, do not accept write or trash operations, only reads.
+  ReadOnly: false
+
+  # When true, read and write operations (for whole 64MiB blocks) on
+  # an individual volume will queued and issued serially.  When
+  # false, read and write operations will be issued concurrently.
+  #
+  # May improve throughput if you experience contention when there are
+  # multiple requests to the same volume.
+  #
+  # When using SSDs, RAID, or a parallel network filesystem, you probably
+  # don't want this.
+  Serialize: false
+
+  # Storage classes to associate with this volume.  See "Storage
+  # classes" in the "Admin" section of doc.arvados.org.
+  StorageClasses: null
+
+  # Example of a second volume section
+- DirectoryReplication: 2
+  ReadOnly: false
+  Root: /mnt/network-disk
+  Serialize: false
+  StorageClasses: null
+  Type: Directory
+</pre>
diff --git a/doc/install/configure-s3-object-storage.html.textile.liquid b/doc/install/configure-s3-object-storage.html.textile.liquid
new file mode 100644 (file)
index 0000000..88172fa
--- /dev/null
@@ -0,0 +1,112 @@
+---
+layout: default
+navsection: installguide
+title: Configure S3 object storage
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keepstore can store data in object storage compatible with the S3 API, such as Amazon S3, Google Cloud Storage, or Ceph RADOS.
+
+h2. Configure keepstore
+
+Copy the "access key" and "secret key" to files where they will be accessible to keepstore at startup time.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sh -c 'cat &gt;/etc/arvados/keepstore/aws_s3_access_key.txt &lt;&lt;EOF'
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
+EOF</span>
+~$ <span class="userinput">sudo chmod 0400 /etc/arvados/keepstore/aws_s3_access_key.txt</span>
+</code></pre>
+</notextile>
+
+Next, edit the @Volumes@ section of the @keepstore.yml@ config file.
+
+h3. Example config for Amazon S3
+
+<pre>
+Volumes:
+- # The volume type, this indicates object storage compatible with the S3 API
+  Type: S3
+
+  # Storage provider.  If blank, uses Amazon S3 by default.
+  # See below for example alternate configuration for Google cloud
+  # storage.
+  Endpoint: ""
+
+  # The bucket to use for the backing store.
+  Bucket: example-bucket-name
+
+  # The region where the bucket is located.
+  Region: us-east-1
+
+  # The credentials to use to access the bucket.
+  AccessKeyFile: /etc/arvados/keepstore/aws_s3_access_key.txt
+  SecretKeyFile: /etc/arvados/keepstore/aws_s3_secret_key.txt
+
+  # Maximum time to wait making the initial connection to the backend before
+  # failing the request.
+  ConnectTimeout: 1m0s
+
+  # Page size for s3 "list bucket contents" requests
+  IndexPageSize: 1000
+
+  # True if the region requires a LocationConstraint declaration
+  LocationConstraint: false
+
+  # Maximum eventual consistency latency
+  RaceWindow: 24h0m0s
+
+  # If true, do not accept write or trash operations, only reads.
+  ReadOnly: false
+
+  # Maximum time to wait for a complete response from the backend before
+  # failing the request.
+  ReadTimeout: 2m0s
+
+  # How much replication is performed by the underlying bucket.
+  # This is used to inform replication decisions at the Keep layer.
+  S3Replication: 2
+
+  # Storage classes to associate with this volume.  See
+  # "Storage classes" in the "Admin" section of doc.arvados.org.
+  StorageClasses: null
+
+  # Enable deletion (garbage collection) even when TrashLifetime is
+  # zero.  WARNING: eventual consistency may result in race conditions
+  # that can cause data loss.  Do not enable this unless you know what
+  # you are doing.
+  UnsafeDelete: false
+</pre>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
+
+h3. Example config for Google cloud storage
+
+See previous section for documentation of configuration fields.
+
+<pre>
+Volumes:
+- # Example configuration using alternate storage provider
+  # Configuration for Google cloud storage
+  Endpoint: https://storage.googleapis.com
+  Region: ""
+
+  AccessKeyFile: /etc/arvados/keepstore/gce_s3_access_key.txt
+  SecretKeyFile: /etc/arvados/keepstore/gce_s3_secret_key.txt
+  Bucket: example-bucket-name
+  ConnectTimeout: 1m0s
+  IndexPageSize: 1000
+  LocationConstraint: false
+  RaceWindow: 24h0m0s
+  ReadOnly: false
+  ReadTimeout: 2m0s
+  S3Replication: 2
+  StorageClasses: null
+  UnsafeDelete: false
+</pre>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
diff --git a/doc/install/copy_pipeline_from_curoverse.html.textile.liquid b/doc/install/copy_pipeline_from_curoverse.html.textile.liquid
new file mode 100644 (file)
index 0000000..fa497c9
--- /dev/null
@@ -0,0 +1,68 @@
+---
+layout: default
+navsection: installguide
+title: Copy pipeline from the Arvados Playground
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This tutorial describes how to find and copy a publicly shared pipeline from the Arvados Playground. Please note that you can use similar steps to copy any template you can access from the Arvados Playground to your cluster.
+
+h3. Access a public pipeline in the Arvados Playground using Workbench
+
+the Arvados Playground provides access to some public data, which can be used to experience Arvados in action. Let's access a public pipeline and copy it to your cluster, so that you can run it in your environment.
+
+Start by visiting the "*Arvados Playground public projects page*":https://playground.arvados.org/projects/public. This page lists all the publicly accessible projects in this arvados installation. Click on one of these projects to open it. We will use "*lobSTR v.3 (Public)*":https://playground.arvados.org/projects/qr1hi-j7d0g-up6qgpqz5ie2vfq as the example in this tutorial.
+
+Once in the "*lobSTR v.3 (Public)*":https://playground.arvados.org/projects/qr1hi-j7d0g-up6qgpqz5ie2vfq project, click on the *Pipeline templates* tab. In the pipeline templates tab, you will see a template named *lobSTR v.3*. Click on the <span class="fa fa-lg fa-gears"></span> *Show* button to the left of this name. This will take to you to the "*lobSTR v.3*":https://playground.arvados.org/pipeline_templates/qr1hi-p5p6p-9pkaxt6qjnkxhhu template page.
+
+Once in this page, you can take the *uuid* of this template from the address bar, which is *qr1hi-p5p6p-9pkaxt6qjnkxhhu*. Next, we will copy this template to your Arvados instance.
+
+h3. Copying a pipeline template from the Arvados Playground to your cluster
+
+As described above, navigate to the publicly shared pipeline template "*lobSTR v.3*":https://playground.arvados.org/pipeline_templates/qr1hi-p5p6p-9pkaxt6qjnkxhhu on the Arvados Playground.  We will now copy this template with uuid *qr1hi-p5p6p-9pkaxt6qjnkxhhu* to your cluster.
+
+{% include 'tutorial_expectations' %}
+
+We will use the Arvados *arv-copy* command to copy this template to your cluster. In order to use arv-copy, first you need to setup the source and destination cluster configuration files. Here, *qr1hi* would be the source cluster and your Arvados instance would be the *dst_cluster*.
+
+During this setup, if you have an account in the Arvados Playground, you can use "your access token":#using-your-token to create the source configuration file. If you do not have an account in the Arvados Playground, you can use the "anonymous access token":#using-anonymous-token for the source cluster configuration.
+
+h4(#using-anonymous-token). *Configuring source and destination setup files using anonymous access token*
+
+Configure the source and destination clusters as described in the "*Using arv-copy*":http://doc.arvados.org/user/topics/arv-copy.html tutorial in user guide, while using *5vqmz9mik2ou2k9objb8pnyce8t97p6vocyaouqo3qalvpmjs5* as the API token for source configuration.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd ~/.config/arvados</span>
+~$ <span class="userinput">echo "ARVADOS_API_HOST=qr1hi.arvadosapi.com" >> qr1hi.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_TOKEN=5vqmz9mik2ou2k9objb8pnyce8t97p6vocyaouqo3qalvpmjs5" >> qr1hi.conf</span>
+</code></pre>
+</notextile>
+
+You can now copy the pipeline template from *qr1hi* to *your cluster*. Replace *dst_cluster* with the *uuid_prefix* of your cluster.
+
+<notextile>
+<pre><code>~$ <span class="userinput"> arv-copy --no-recursive --src qr1hi --dst dst_cluster qr1hi-p5p6p-9pkaxt6qjnkxhhu</span>
+</code></pre>
+</notextile>
+
+*Note:* When you are using anonymous access token to copy the template, you will not be able to do a recursive copy since you will not be able to provide the dst-git-repo parameter. In order to perform a recursive copy of the template, you would need to use the Arvados API token from your account as explained in the "using your token":#using-your-token section below.
+
+h4(#using-your-token). *Configuring source and destination setup files using personal access token*
+
+If you already have an account in the Arvados Playground, you can follow the instructions in the "*Using arv-copy*":http://doc.arvados.org/user/topics/arv-copy.html user guide to get your *Current token* for source and destination clusters, and use them to create the source *qr1hi.conf* and dst_cluster.conf configuration files.
+
+You can now copy the pipeline template from *qr1hi* to *your cluster* with or without recursion. Replace *dst_cluster* with the *uuid_prefix* of your cluster.
+
+*Non-recursive copy:*
+<notextile>
+<pre><code>~$ <span class="userinput"> arv-copy --no-recursive --src qr1hi --dst dst_cluster qr1hi-p5p6p-9pkaxt6qjnkxhhu</span></code></pre>
+</notextile>
+
+*Recursive copy:*
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --dst-git-repo $USER/tutorial qr1hi-p5p6p-9pkaxt6qjnkxhhu</span></code></pre>
+</notextile>
diff --git a/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid b/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
new file mode 100644 (file)
index 0000000..d8a62a9
--- /dev/null
@@ -0,0 +1,43 @@
+---
+layout: default
+navsection: installguide
+title: Set up a compute node
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Install dependencies
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% include 'note_python_sc' %}
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-fuse crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-fuse crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+{% include 'install_compute_docker' %}
+
+{% include 'install_compute_fuse' %}
+
+{% include 'install_docker_cleaner' %}
+
+h2. Set up SLURM
+
+Install SLURM on the compute node using the same process you used on the API server in the "previous step":install-slurm.html.
+
+The @slurm.conf@ and @/etc/munge/munge.key@ files must be identical on all SLURM nodes. Copy the files you created on the API server in the "previous step":install-slurm.html to each compute node.
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
new file mode 100644 (file)
index 0000000..cd33829
--- /dev/null
@@ -0,0 +1,173 @@
+---
+layout: default
+navsection: installguide
+title: Install the SLURM dispatcher
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The SLURM dispatcher can run on any node that can submit requests to both the Arvados API server and the SLURM controller.  It is not resource-intensive, so you can run it on the API server node.
+
+h2. Install the dispatcher
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install crunch-dispatch-slurm</span>
+~$ <span class="userinput">sudo systemctl enable crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+h2. Create a dispatcher token
+
+Create an Arvados superuser token for use by the dispatcher. If you have multiple dispatch processes, you should give each one a different token.
+
+{% include 'create_superuser_token' %}
+
+h2. Configure the dispatcher
+
+Set up crunch-dispatch-slurm's configuration directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados</span>
+~$ <span class="userinput">sudo install -d -o root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+Edit @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ to authenticate to your Arvados API server, using the token you generated in the previous step.  Follow this YAML format:
+
+<notextile>
+<pre><code class="userinput">Client:
+  APIHost: <b>zzzzz.arvadosapi.com</b>
+  AuthToken: <b>zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</b>
+</code></pre>
+</notextile>
+
+This is the only configuration required by crunch-dispatch-slurm.  The subsections below describe optional configuration flags you can set inside the main configuration object.
+
+h3(#KeepServiceURIs). Client::KeepServiceURIs
+
+Override Keep service discovery with a predefined list of Keep URIs. This can be useful if the compute nodes run a local keepstore that should handle all Keep traffic. Example:
+
+<notextile>
+<pre><code class="userinput">Client:
+  APIHost: zzzzz.arvadosapi.com
+  AuthToken: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+  KeepServiceURIs:
+  - <b>http://127.0.0.1:25107</b>
+</code></pre>
+</notextile>
+
+h3(#PollPeriod). PollPeriod
+
+crunch-dispatch-slurm polls the API server periodically for new containers to run.  The @PollPeriod@ option controls how often this poll happens.  Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@.  For example:
+
+<notextile>
+<pre><code class="userinput">PollPeriod: <b>3m30s</b>
+</code></pre>
+</notextile>
+
+h3(#PrioritySpread). PrioritySpread
+
+crunch-dispatch-slurm adjusts the "nice" values of its SLURM jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.
+* If non-Arvados jobs run on your SLURM cluster, and your Arvados containers are waiting too long in the SLURM queue because their "nice" values are too high for them to compete with other SLURM jobs, you should use a smaller PrioritySpread value.
+* If you have an older SLURM system that limits nice values to 10000, a smaller @PrioritySpread@ can help avoid reaching that limit.
+* In other cases, a larger value is beneficial because it reduces the total number of adjustments made by executing @scontrol@.
+
+The smallest usable value is @1@. The default value of @10@ is used if this option is zero or negative. Example:
+
+<notextile>
+<pre><code class="userinput">PrioritySpread: <b>1000</b>
+</code></pre>
+</notextile>
+
+h3(#SbatchArguments). SbatchArguments
+
+When crunch-dispatch-slurm invokes @sbatch@, you can add arguments to the command by specifying @SbatchArguments@.  You can use this to send the jobs to specific cluster partitions or add resource requests.  Set @SbatchArguments@ to an array of strings.  For example:
+
+<notextile>
+<pre><code class="userinput">SbatchArguments:
+- <b>"--partition=PartitionName"</b>
+</code></pre>
+</notextile>
+
+Note: If an argument is supplied multiple times, @slurm@ uses the value of the last occurrence of the argument on the command line.  Arguments specified through Arvados are added after the arguments listed in SbatchArguments.  This means, for example, an Arvados container with that specifies @partitions@ in @scheduling_parameter@ will override an occurrence of @--partition@ in SbatchArguments.  As a result, for container parameters that can be specified through Arvados, SbatchArguments can be used to specify defaults but not enforce specific policy.
+
+h3(#CrunchRunCommand-cgroups). CrunchRunCommand: Dispatch to SLURM cgroups
+
+If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside SLURM's cgroups.  This provides consistent enforcement of resource constraints.  To do this, use a crunch-dispatch-slurm configuration like the following:
+
+<notextile>
+<pre><code class="userinput">CrunchRunCommand:
+- <b>crunch-run</b>
+- <b>"-cgroup-parent-subsystem=memory"</b>
+</code></pre>
+</notextile>
+
+The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf@. Limits for other resource types will also be respected.  The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM.  When doing this, you should also set "ReserveExtraRAM":#ReserveExtraRAM .
+
+{% include 'notebox_begin' %}
+
+Some versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice.  This causes an error when specifying a cgroup parent created outside systemd, such as those created by SLURM.
+
+You can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use SLURM's cgroups as container parents.  To do this, "configure the Docker daemon on all compute nodes":install-compute-node.html#configure_docker_daemon to run with the option @--exec-opt native.cgroupdriver=cgroupfs@.
+
+{% include 'notebox_end' %}
+
+h3(#CrunchRunCommand-network). CrunchRunCommand: Using host networking for containers
+
+Older Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups.  This by is indicated by blocked kernel tasks in "Workqueue: netns cleanup_net".   If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster.  Be aware this reduces container isolation, which may be a security risk.
+
+<notextile>
+<pre><code class="userinput">CrunchRunCommand:
+- <b>crunch-run</b>
+- <b>"-container-enable-networking=always"</b>
+- <b>"-container-network-mode=host"</b>
+</code></pre>
+</notextile>
+
+h3(#MinRetryPeriod). MinRetryPeriod: Rate-limit repeated attempts to start containers
+
+If SLURM is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to SLURM more than once in the given time span.
+
+<notextile>
+<pre><code class="userinput">MinRetryPeriod: <b>30s</b>
+</code></pre>
+</notextile>
+
+h3(#ReserveExtraRAM). ReserveExtraRAM: Extra RAM for jobs
+
+Extra RAM to reserve (in bytes) on each SLURM job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@.  If not provided, the default value is zero.  Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process.  In this situation, at least 256MiB is recommended to accomodate each container's @crunch-run@ and @arv-mount@ processes.
+
+<notextile>
+<pre><code class="userinput">ReserveExtraRAM: <b>268435456</b>
+</code></pre>
+</notextile>
+
+h2. Restart the dispatcher
+
+{% include 'notebox_begin' %}
+
+The crunch-dispatch-slurm package includes configuration files for systemd.  If you're using a different init system, you'll need to configure a service to start and stop a @crunch-dispatch-slurm@ process as desired.  The process should run from a directory where the @crunch@ user has write permission on all compute nodes, such as its home directory or @/tmp@.  You do not need to specify any additional switches or environment variables.
+
+{% include 'notebox_end' %}
+
+Restart the dispatcher to run with your new configuration:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
diff --git a/doc/install/crunch2-slurm/install-prerequisites.html.textile.liquid b/doc/install/crunch2-slurm/install-prerequisites.html.textile.liquid
new file mode 100644 (file)
index 0000000..eceeefa
--- /dev/null
@@ -0,0 +1,14 @@
+---
+layout: default
+navsection: installguide
+title: Containers API SLURM prerequisites
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Containers can be dispatched to a SLURM cluster.  The dispatcher sends work to the cluster using SLURM's @sbatch@ command, so it works in a variety of SLURM configurations.
+
+In order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
diff --git a/doc/install/crunch2-slurm/install-slurm.html.textile.liquid b/doc/install/crunch2-slurm/install-slurm.html.textile.liquid
new file mode 100644 (file)
index 0000000..e1593a4
--- /dev/null
@@ -0,0 +1,115 @@
+---
+layout: default
+navsection: installguide
+title: Set up SLURM
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+On the API server, install SLURM and munge, and generate a munge key.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-get install slurm-llnl munge</span>
+~$ <span class="userinput">sudo /usr/sbin/create-munge-key</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install slurm munge slurm-munge</span>
+</code></pre>
+</notextile>
+
+Now we need to give SLURM a configuration file.  On Debian-based systems, this is installed at @/etc/slurm-llnl/slurm.conf@.  On Red Hat-based systems, this is installed at @/etc/slurm/slurm.conf@.  Here's an example @slurm.conf@:
+
+<notextile>
+<pre>
+ControlMachine=uuid_prefix.your.domain
+SlurmctldPort=6817
+SlurmdPort=6818
+AuthType=auth/munge
+StateSaveLocation=/tmp
+SlurmdSpoolDir=/tmp/slurmd
+SwitchType=switch/none
+MpiDefault=none
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+ProctrackType=proctrack/pgid
+CacheGroups=0
+ReturnToService=2
+TaskPlugin=task/affinity
+#
+# TIMERS
+SlurmctldTimeout=300
+SlurmdTimeout=300
+InactiveLimit=0
+MinJobAge=300
+KillWait=30
+Waittime=0
+#
+# SCHEDULING
+SchedulerType=sched/backfill
+SchedulerPort=7321
+SelectType=select/linear
+FastSchedule=0
+#
+# LOGGING
+SlurmctldDebug=3
+#SlurmctldLogFile=
+SlurmdDebug=3
+#SlurmdLogFile=
+JobCompType=jobcomp/none
+#JobCompLoc=
+JobAcctGatherType=jobacct_gather/none
+#
+# COMPUTE NODES
+NodeName=DEFAULT
+PartitionName=DEFAULT MaxTime=INFINITE State=UP
+
+NodeName=compute[0-255]
+PartitionName=compute Nodes=compute[0-255] Default=YES Shared=YES
+</pre>
+</notextile>
+
+h3. SLURM configuration essentials
+
+Whenever you change this file, you will need to update the copy _on every compute node_ as well as the controller node, and then run @sudo scontrol reconfigure@.
+
+*@ControlMachine@* should be a DNS name that resolves to the SLURM controller (dispatch/API server). This must resolve correctly on all SLURM worker nodes as well as the controller itself. In general SLURM is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.
+
+*@SelectType=select/linear@* is needed on cloud-based installations that update node sizes dynamically, but it can only schedule one container at a time on each node. On a static or homogeneous cluster, use @SelectType=select/cons_res@ with @SelectTypeParameters=CR_CPU_Memory@ instead to enable node sharing.
+
+*@NodeName=compute[0-255]@* establishes that the hostnames of the worker nodes will be compute0, compute1, etc. through compute255.
+* There are several ways to compress sequences of names, like @compute[0-9,80,100-110]@. See the "hostlist" discussion in the @slurm.conf(5)@ and @scontrol(1)@ man pages for more information.
+* It is not necessary for all of the nodes listed here to be alive in order for SLURM to work, although you should make sure the DNS entries exist. It is easiest to define lots of hostnames up front, assigning them to real nodes and updating your DNS records as the nodes appear. This minimizes the frequency of @slurm.conf@ updates and use of @scontrol reconfigure@.
+
+Each hostname in @slurm.conf@ must also resolve correctly on all SLURM worker nodes as well as the controller itself. Furthermore, the hostnames used in the configuration file must match the hostnames reported by @hostname@ or @hostname -s@ on the nodes themselves. This applies to the ControlMachine as well as the worker nodes.
+
+For example:
+* In @slurm.conf@ on control and worker nodes: @ControlMachine=uuid_prefix.your.domain@
+* In @slurm.conf@ on control and worker nodes: @NodeName=compute[0-255]@
+* In @/etc/resolv.conf@ on control and worker nodes: @search uuid_prefix.your.domain@
+* On the control node: @hostname@ reports @uuid_prefix.your.domain@
+* On worker node 123: @hostname@ reports @compute123.uuid_prefix.your.domain@
+
+h3. Automatic hostname assignment
+
+The API server will choose an unused hostname from the set given in @application.yml@, which defaults to @compute[0-255]@.
+
+If it is not feasible to give your compute nodes hostnames like compute0, compute1, etc., you can accommodate other naming schemes with a bit of extra configuration.
+
+If you want Arvados to assign names to your nodes with a different consecutive numeric series like @{worker1-0000, worker1-0001, worker1-0002}@, add an entry to @application.yml@; see @/var/www/arvados-api/current/config/application.default.yml@ for details. Example:
+* In @application.yml@: <code>assign_node_hostname: worker1-%<slot_number>04d</code>
+* In @slurm.conf@: <code>NodeName=worker1-[0000-0255]</code>
+
+If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script (see "Installing a compute node":install-compute-node.html) send its current hostname, rather than expect Arvados to assign one.
+* In @application.yml@: <code>assign_node_hostname: false</code>
+* In @slurm.conf@: <code>NodeName=alice,bob,clay,darlene</code>
+
+If your worker hostnames are already assigned by other means, but the full set of names is _not_ known in advance, you can use the @slurm.conf@ and @application.yml@ settings in the previous example, but you must also update @slurm.conf@ (both on the controller and on all worker nodes) and run @sudo scontrol reconfigure@ whenever a new node comes online.
diff --git a/doc/install/crunch2-slurm/install-test.html.textile.liquid b/doc/install/crunch2-slurm/install-test.html.textile.liquid
new file mode 100644 (file)
index 0000000..ca509bb
--- /dev/null
@@ -0,0 +1,114 @@
+---
+layout: default
+navsection: installguide
+title: Test SLURM dispatch
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Test compute node setup
+
+You should now be able to submit SLURM jobs that run in Docker containers.  On the node where you're running the dispatcher, you can test this by running:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo -u <b>crunch</b> srun -N1 docker run busybox echo OK
+</code></pre>
+</notextile>
+
+If it works, this command should print @OK@ (it may also show some status messages from SLURM and/or Docker).  If it does not print @OK@, double-check your compute node setup, and that the @crunch@ user can submit SLURM jobs.
+
+h2. Test the dispatcher
+
+On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo journalctl -o cat -fu crunch-dispatch-slurm.service</span>
+</code></pre>
+</notextile>
+
+*On your shell server*, submit a simple container request:
+
+<notextile>
+<pre><code>shell:~$ <span class="userinput">arv container_request create --container-request '{
+  "name":            "test",
+  "state":           "Committed",
+  "priority":        1,
+  "container_image": "arvados/jobs:latest",
+  "command":         ["echo", "Hello, Crunch!"],
+  "output_path":     "/out",
+  "mounts": {
+    "/out": {
+      "kind":        "tmp",
+      "capacity":    1000
+    }
+  },
+  "runtime_constraints": {
+    "vcpus": 1,
+    "ram": 8388608
+  }
+}'</span>
+</code></pre>
+</notextile>
+
+This command should return a record with a @container_uuid@ field.  Once crunch-dispatch-slurm polls the API server for new containers to run, you should see it dispatch that same container.  It will log messages like:
+
+<notextile>
+<pre><code>2016/08/05 13:52:54 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 started
+2016/08/05 13:53:04 About to submit queued container zzzzz-dz642-hdp2vpu9nq14tx0
+2016/08/05 13:53:04 sbatch succeeded: Submitted batch job 8102
+</code></pre>
+</notextile>
+
+If you do not see crunch-dispatch-slurm try to dispatch the container, double-check that it is running and that the API hostname and token in @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ are correct.
+
+Before the container finishes, SLURM's @squeue@ command will show the new job in the list of queued and running jobs.  For example, you might see:
+
+<notextile>
+<pre><code>~$ <span class="userinput">squeue --long</span>
+Fri Aug  5 13:57:50 2016
+  JOBID PARTITION     NAME     USER    STATE       TIME TIMELIMIT  NODES NODELIST(REASON)
+   8103   compute zzzzz-dz   crunch  RUNNING       1:56 UNLIMITED      1 compute0
+</code></pre>
+</notextile>
+
+The job's name corresponds to the container's UUID.  You can get more information about it by running, e.g., <notextile><code>scontrol show job Name=<b>UUID</b></code></notextile>.
+
+When the container finishes, the dispatcher will log that, with the final result:
+
+<notextile>
+<pre><code>2016/08/05 13:53:14 Container zzzzz-dz642-hdp2vpu9nq14tx0 now in state "Complete" with locked_by_uuid ""
+2016/08/05 13:53:14 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 finished
+</code></pre>
+</notextile>
+
+After the container finishes, you can get the container record by UUID *from a shell server* to see its results:
+
+<notextile>
+<pre><code>shell:~$ <span class="userinput">arv get <b>zzzzz-dz642-hdp2vpu9nq14tx0</b></span>
+{
+ ...
+ "exit_code":0,
+ "log":"a01df2f7e5bc1c2ad59c60a837e90dc6+166",
+ "output":"d41d8cd98f00b204e9800998ecf8427e+0",
+ "state":"Complete",
+ ...
+}
+</code></pre>
+</notextile>
+
+You can use standard Keep tools to view the container's output and logs from their corresponding fields.  For example, to see the logs from the collection referenced in the @log@ field:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b></span>
+./crunch-run.txt
+./stderr.txt
+./stdout.txt
+~$ <span class="userinput">arv keep get <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b>/stdout.txt</span>
+2016-08-05T13:53:06.201011Z Hello, Crunch!
+</code></pre>
+</notextile>
+
+If the container does not dispatch successfully, refer to the crunch-dispatch-slurm logs for information about why it failed.
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..c31b2ed
--- /dev/null
@@ -0,0 +1,29 @@
+---
+layout: default
+navsection: installguide
+title: Installation options
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados components run on GNU/Linux systems, and supports multiple cloud operating stacks.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
+
+Arvados components can be installed and configured in a number of different ways.
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|||\5=. Appropriate for|
+||_. Ease of setup|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados System Testing|
+|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|yes|no|yes|yes|
+|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|yes ^2^|no ^2^|no|yes|
+|"Manual installation":install-manual-prerequisites.html|Complicated|yes|yes|yes|no|no|
+|"Arvados Playground":https://playground.arvados.org hosted by Veritas Genetics|N/A ^3^|yes|yes|no|no|no|
+|"Cluster Operation Subscription":https://curoverse.com/products supported by Veritas Genetics|N/A ^3^|yes|yes|yes|yes|yes|
+</div>
+
+* ^1^ Assumes a Kubernetes cluster is available
+* ^2^ Arvados on Kubernetes is under development and not yet ready for production use
+* ^3^ No installation necessary, Veritas Genetics run and managed
diff --git a/doc/install/install-api-server.html.textile.liquid b/doc/install/install-api-server.html.textile.liquid
new file mode 100644 (file)
index 0000000..a6b843b
--- /dev/null
@@ -0,0 +1,243 @@
+---
+layout: default
+navsection: installguide
+title: Install the API server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Install prerequisites
+
+The Arvados package repository includes an API server package that can help automate much of the deployment.
+
+h3(#install_ruby_and_bundler). Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
+
+h2(#install_apiserver). Install API server and dependencies
+
+On a Debian-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install bison build-essential libcurl4-openssl-dev git arvados-api-server</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install bison make automake gcc gcc-c++ libcurl-devel git arvados-api-server</span>
+</code></pre>
+</notextile>
+
+{% include 'install_git' %}
+
+h2(#configure). Set up the database
+
+Configure the API server to connect to your database by updating @/etc/arvados/api/database.yml@. Replace the @xxxxxxxx@ database password placeholder with the "password you generated during database setup":install-postgresql.html#api. Be sure to update the @production@ section.
+
+<notextile>
+<pre><code>~$ <span class="userinput">editor /etc/arvados/api/database.yml</span>
+</code></pre></notextile>
+
+h2(#configure_application). Configure the API server
+
+Edit @/etc/arvados/api/application.yml@ to configure the settings described in the following sections.  The API server reads both @application.yml@ and its own @config/application.default.yml@ file.  The settings in @application.yml@ take precedence over the defaults that are defined in @config/application.default.yml@.  The @config/application.yml.example@ file is not read by the API server and is provided as a starting template only.
+
+@config/application.default.yml@ documents additional configuration settings not listed here.  You can "view the current source version":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/services/api/config/application.default.yml for reference.
+
+Only put local configuration in @application.yml@.  Do not edit @application.default.yml@.
+
+h3(#uuid_prefix). uuid_prefix
+
+Define your @uuid_prefix@ in @application.yml@ by setting the @uuid_prefix@ field in the section for your environment.  This prefix is used for all database identifiers to identify the record as originating from this site.  It must be exactly 5 lowercase ASCII letters and digits.
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  uuid_prefix: <span class="userinput">zzzzz</span></code></pre>
+</notextile>
+
+h3. secret_token
+
+The @secret_token@ is used for for signing cookies.  IMPORTANT: This is a site secret. It should be at least 50 characters.  Generate a random value and set it in @application.yml@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
+yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
+</code></pre></notextile>
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  secret_token: <span class="userinput">yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy</span></code></pre>
+</notextile>
+
+h3(#blob_signing_key). blob_signing_key
+
+The @blob_signing_key@ is used to enforce access control to Keep blocks.  This same key must be provided to the Keepstore daemons when "installing Keepstore servers.":install-keepstore.html  IMPORTANT: This is a site secret. It should be at least 50 characters.  Generate a random value and set it in @application.yml@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+</code></pre></notextile>
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  blob_signing_key: <span class="userinput">xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx</span></code></pre>
+</notextile>
+
+h3(#omniauth). sso_app_secret, sso_app_id, sso_provider_url
+
+The following settings enable the API server to communicate with the "Single Sign On (SSO) server":install-sso.html to authenticate user log in.
+
+Set @sso_provider_url@ to the base URL where your SSO server is installed.  This should be a URL consisting of the scheme and host (and optionally, port), without a trailing slash.
+
+Set @sso_app_secret@ and @sso_app_id@ to the corresponding values for @app_secret@ and @app_id@ used in the "Create arvados-server client for Single Sign On (SSO)":install-sso.html#client step.
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  sso_app_id: <span class="userinput">arvados-server</span>
+  sso_app_secret: <span class="userinput">wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww</span>
+  sso_provider_url: <span class="userinput">https://sso.example.com</span>
+</code></pre>
+</notextile>
+
+h3. workbench_address
+
+Set @workbench_address@ to the URL of your workbench application after following "Install Workbench.":install-workbench-app.html
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  workbench_address: <span class="userinput">https://workbench.zzzzz.example.com</span></code></pre>
+</notextile>
+
+h3. websocket_address
+
+Set @websocket_address@ to the @wss://@ URL of the API server websocket endpoint after following "Set up Web servers":#set_up.  The path of the default endpoint is @/websocket@.
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  websocket_address: <span class="userinput">wss://ws.zzzzz.example.com</span>/websocket</code></pre>
+</notextile>
+
+h3(#git_repositories_dir). git_repositories_dir
+
+The @git_repositories_dir@ setting specifies the directory where user git repositories will be stored.
+
+The git server setup process is covered on "its own page":install-arv-git-httpd.html. For now, create an empty directory in the default location:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /var/lib/arvados/git/repositories</span>
+</code></pre></notextile>
+
+If you intend to store your git repositories in a different location, specify that location in @application.yml@.
+
+Default setting in @application.default.yml@:
+
+<notextile>
+<pre><code>  git_repositories_dir: <span class="userinput">/var/lib/arvados/git/repositories</span>
+</code></pre>
+</notextile>
+
+h3(#git_internal_dir). git_internal_dir
+
+The @git_internal_dir@ setting specifies the location of Arvados' internal git repository.  By default this is @/var/lib/arvados/internal.git@.  This repository stores git commits that have been used to run Crunch jobs.  It should _not_ be a subdirectory of @git_repositories_dir@.
+
+Example @application.yml@:
+
+<notextile>
+<pre><code>  git_internal_dir: <span class="userinput">/var/lib/arvados/internal.git</span>
+</code></pre>
+</notextile>
+
+h3(#enable_legacy_jobs_api). enable_legacy_jobs_api
+
+Enable the legacy "Jobs API":install-crunch-dispatch.html .  Note: new installations should use the "Containers API":crunch2-slurm/install-prerequisites.html
+
+Disabling the jobs API means methods involving @jobs@, @job_tasks@, @pipeline_templates@ and @pipeline_instances@ are disabled.  This functionality is superceded by the containers API which consists of @container_requests@, @containers@ and @workflows@.  Arvados clients (such as @arvados-cwl-runner@) detect which APIs are available and adjust behavior accordingly.
+
+* auto -- (default) enable the Jobs API only if it has been used before (i.e., there are job records in the database), otherwise disable jobs API .
+* true -- enable the Jobs API even if there are no existing job records.
+* false -- disable the Jobs API even in the presence of existing job records.
+
+<notextile>
+<pre><code>  enable_legacy_jobs_api: <span class="userinput">auto</span>
+</code></pre>
+</notextile>
+
+h2(#set_up). Set up Nginx and Passenger
+
+The Nginx server will serve API requests using Passenger. It will also be used to proxy SSL requests to other services which are covered later in this guide.
+
+First, "Install Nginx and Phusion Passenger":https://www.phusionpassenger.com/library/walkthroughs/deploy/ruby/ownserver/nginx/oss/install_passenger_main.html.
+
+Edit the http section of your Nginx configuration to run the Passenger server. Add a block like the following, adding SSL and logging parameters to taste:
+
+<notextile>
+<pre><code>
+server {
+  listen 127.0.0.1:8000;
+  server_name localhost-api;
+
+  root /var/www/arvados-api/current/public;
+  index  index.html index.htm index.php;
+
+  passenger_enabled on;
+  # If you're using RVM, uncomment the line below.
+  #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+
+  # This value effectively limits the size of API objects users can
+  # create, especially collections.  If you change this, you should
+  # also ensure the following settings match it:
+  # * `client_max_body_size` in the server section below
+  # * `client_max_body_size` in the Workbench Nginx configuration (twice)
+  # * `max_request_size` in the API server's application.yml file
+  client_max_body_size 128m;
+}
+
+upstream api {
+  server     127.0.0.1:8000  fail_timeout=10s;
+}
+
+proxy_http_version 1.1;
+
+# When Keep clients request a list of Keep services from the API server, the
+# server will automatically return the list of available proxies if
+# the request headers include X-External-Client: 1.  Following the example
+# here, at the end of this section, add a line for each netmask that has
+# direct access to Keep storage daemons to set this header value to 0.
+geo $external_client {
+  default        1;
+  <span class="userinput">10.20.30.0/24</span>  0;
+}
+</code></pre>
+</notextile>
+
+Restart Nginx to apply the new configuration.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo nginx -s reload</span>
+</code></pre>
+</notextile>
+
+h2. Prepare the API server deployment
+
+{% assign railspkg = "arvados-api-server" %}
+{% include 'install_rails_reconfigure' %}
+
+{% include 'notebox_begin' %}
+You can safely ignore the following messages if they appear while this command runs:
+
+<notextile><pre>Don't run Bundler as root. Bundler can ask for sudo if it is needed, and installing your bundle as root will
+break this application for all non-root users on this machine.</pre></notextile>
+
+<notextile><pre>fatal: Not a git repository (or any of the parent directories): .git</pre></notextile>
+{% include 'notebox_end' %}
diff --git a/doc/install/install-arv-git-httpd.html.textile.liquid b/doc/install/install-arv-git-httpd.html.textile.liquid
new file mode 100644 (file)
index 0000000..7fc3321
--- /dev/null
@@ -0,0 +1,385 @@
+---
+layout: default
+navsection: installguide
+title: Install the Git server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados allows users to create their own private and public git repositories, and clone/push them using SSH and HTTPS.
+
+The git hosting setup involves three components.
+* The "arvados-git-sync.rb" script polls the API server for the current list of repositories, creates bare repositories, and updates the local permission cache used by gitolite.
+* Gitolite provides SSH access.
+* arvados-git-http provides HTTPS access.
+
+It is not strictly necessary to deploy _both_ SSH and HTTPS access, but we recommend deploying both:
+* SSH is a more appropriate way to authenticate from a user's workstation because it does not require managing tokens on the client side;
+* HTTPS is a more appropriate way to authenticate from a shell VM because it does not depend on SSH agent forwarding (SSH clients' agent forwarding features tend to behave as if the remote machine is fully trusted).
+* HTTPS is also used by Arvados Composer to access git repositories from the browser.
+
+The HTTPS instructions given below will not work if you skip the SSH setup steps.
+
+h2. Set up DNS
+
+By convention, we use the following hostname for the git service:
+
+<notextile>
+<pre><code>git.<span class="userinput">uuid_prefix</span>.your.domain
+</code></pre>
+</notextile>
+
+{% include 'notebox_begin' %}
+Here, we show how to install the git hosting services *on the same host as your API server.* Using a different host is not yet fully supported. On this page we will refer to it as your git server.
+{% include 'notebox_end' %}
+
+DNS and network configuration should be set up so port 443 reaches your HTTPS proxy, and port 22 reaches the OpenSSH service on your git server.
+
+h2. Generate an API token
+
+{% assign railshost = "gitserver" %}
+{% assign railscmd = "bundle exec ./script/create_superuser_token.rb" %}
+{% assign railsout = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz" %}
+Use the following command to generate an API token.  {% include 'install_rails_command' %}
+
+Copy that token; you'll need it in a minute.
+
+h2. Install git and other dependencies
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo apt-get install git openssh-server</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo yum install git perl-Data-Dumper openssh-server</span>
+</code></pre>
+</notextile>
+
+{% include 'install_git' %}
+
+h2. Create a "git" user and a storage directory
+
+Gitolite and some additional scripts will be installed in @/var/lib/arvados/git@, which means hosted repository data will be stored in @/var/lib/arvados/git/repositories@. If you choose to install gitolite in a different location, make sure to update the @git_repositories_dir@ entry in your API server's @application.yml@ file accordingly: for example, if you install gitolite at @/data/gitolite@ then your @git_repositories_dir@ will be @/data/gitolite/repositories@.
+
+A new UNIX account called "git" will own the files. This makes git URLs look familiar to users (<code>git@[...]:username/reponame.git</code>).
+
+On Debian- or Red Hat-based systems:
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo mkdir -p /var/lib/arvados/git</span>
+gitserver:~$ <span class="userinput">sudo useradd --comment git --home-dir /var/lib/arvados/git git</span>
+gitserver:~$ <span class="userinput">sudo chown -R git:git ~git</span>
+</code></pre>
+</notextile>
+
+The git user needs its own SSH key. (It must be able to run <code>ssh git@localhost</code> from scripts.)
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo -u git -i bash</span>
+git@gitserver:~$ <span class="userinput">ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa</span>
+git@gitserver:~$ <span class="userinput">cp .ssh/id_rsa.pub .ssh/authorized_keys</span>
+git@gitserver:~$ <span class="userinput">ssh -o stricthostkeychecking=no localhost cat .ssh/id_rsa.pub</span>
+Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7aBIDAAgMQN16Pg6eHmvc+D+6TljwCGr4YGUBphSdVb25UyBCeAEgzqRiqy0IjQR2BLtSirXr+1SJAcQfBgI/jwR7FG+YIzJ4ND9JFEfcpq20FvWnMMQ6XD3y3xrZ1/h/RdBNwy4QCqjiXuxDpDB7VNP9/oeAzoATPZGhqjPfNS+RRVEQpC6BzZdsR+S838E53URguBOf9yrPwdHvosZn7VC0akeWQerHqaBIpSfDMtaM4+9s1Gdsz0iP85rtj/6U/K/XOuv2CZsuVZZ52nu3soHnEX2nx2IaXMS3L8Z+lfOXB2T6EaJgXF7Z9ME5K1tx9TSNTRcYCiKztXLNLSbp git@gitserver
+git@gitserver:~$ <span class="userinput">rm .ssh/authorized_keys</span>
+</code></pre>
+</notextile>
+
+h2. Install gitolite
+
+Check "https://github.com/sitaramc/gitolite/tags":https://github.com/sitaramc/gitolite/tags for the latest stable version. This guide was tested with @v3.6.4@. _Versions below 3.0 are missing some features needed by Arvados, and should not be used._
+
+Download and install the version you selected.
+
+<notextile>
+<pre><code>git@gitserver:~$ <span class="userinput">echo 'PATH=$HOME/bin:$PATH' &gt;.profile</span>
+git@gitserver:~$ <span class="userinput">source .profile</span>
+git@gitserver:~$ <span class="userinput">git clone --branch <b>v3.6.4</b> https://github.com/sitaramc/gitolite</span>
+...
+Note: checking out '5d24ae666bfd2fa9093d67c840eb8d686992083f'.
+...
+git@gitserver:~$ <span class="userinput">mkdir bin</span>
+git@gitserver:~$ <span class="userinput">gitolite/install -ln ~git/bin</span>
+git@gitserver:~$ <span class="userinput">bin/gitolite setup -pk .ssh/id_rsa.pub</span>
+Initialized empty Git repository in /var/lib/arvados/git/repositories/gitolite-admin.git/
+Initialized empty Git repository in /var/lib/arvados/git/repositories/testing.git/
+WARNING: /var/lib/arvados/git/.ssh/authorized_keys missing; creating a new one
+    (this is normal on a brand new install)
+</code></pre>
+</notextile>
+
+_If this didn't go well, more detail about installing gitolite, and information about how it works, can be found on the "gitolite home page":http://gitolite.com/._
+
+Clone the gitolite-admin repository. The arvados-git-sync.rb script works by editing the files in this working directory and pushing them to gitolite. Here we make sure "git push" won't produce any errors or warnings.
+
+<notextile>
+<pre><code>git@gitserver:~$ <span class="userinput">git clone git@localhost:gitolite-admin</span>
+Cloning into 'gitolite-admin'...
+remote: Counting objects: 6, done.
+remote: Compressing objects: 100% (4/4), done.
+remote: Total 6 (delta 0), reused 0 (delta 0)
+Receiving objects: 100% (6/6), done.
+Checking connectivity... done.
+git@gitserver:~$ <span class="userinput">cd gitolite-admin</span>
+git@gitserver:~/gitolite-admin$ <span class="userinput">git config user.email arvados</span>
+git@gitserver:~/gitolite-admin$ <span class="userinput">git config user.name arvados</span>
+git@gitserver:~/gitolite-admin$ <span class="userinput">git config push.default simple</span>
+git@gitserver:~/gitolite-admin$ <span class="userinput">git push</span>
+Everything up-to-date
+</code></pre>
+</notextile>
+
+h3. Configure gitolite
+
+Configure gitolite to look up a repository name like @username/reponame.git@ and find the appropriate bare repository storage directory.
+
+Add the following lines to the top of @~git/.gitolite.rc@:
+
+<notextile>
+<pre><code><span class="userinput">my $repo_aliases;
+my $aliases_src = "$ENV{HOME}/.gitolite/arvadosaliases.pl";
+if ($ENV{HOME} && (-e $aliases_src)) {
+    $repo_aliases = do $aliases_src;
+}
+$repo_aliases ||= {};
+</span></code></pre>
+</notextile>
+
+Add the following lines inside the section that begins @%RC = (@:
+
+<notextile>
+<pre><code><span class="userinput">    REPO_ALIASES => $repo_aliases,
+</span></code></pre>
+</notextile>
+
+Inside that section, adjust the 'UMASK' setting to @022@, to ensure the API server has permission to read repositories:
+
+<notextile>
+<pre><code>    UMASK => <span class="userinput">022</span>,
+</code></pre>
+</notextile>
+
+Uncomment the 'Alias' line in the section that begins @ENABLE => [@:
+
+<notextile>
+<pre><code><span class="userinput">            # access a repo by another (possibly legacy) name
+            'Alias',
+</span></code></pre>
+</notextile>
+
+h2. Configure git synchronization
+
+Create a configuration file @/var/www/arvados-api/current/config/arvados-clients.yml@ using the following template, filling in the appropriate values for your system.
+* For @arvados_api_token@, use the token you generated above.
+* For @gitolite_arvados_git_user_key@, provide the public key you generated above, i.e., the contents of @~git/.ssh/id_rsa.pub@.
+
+<notextile>
+<pre><code>production:
+  gitolite_url: /var/lib/arvados/git/repositories/gitolite-admin.git
+  gitolite_tmp: /var/lib/arvados/git
+  arvados_api_host: <span class="userinput">uuid_prefix.example.com</span>
+  arvados_api_token: "<span class="userinput">zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</span>"
+  arvados_api_host_insecure: <span class="userinput">false</span>
+  gitolite_arvados_git_user_key: "<span class="userinput">ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7aBIDAAgMQN16Pg6eHmvc+D+6TljwCGr4YGUBphSdVb25UyBCeAEgzqRiqy0IjQR2BLtSirXr+1SJAcQfBgI/jwR7FG+YIzJ4ND9JFEfcpq20FvWnMMQ6XD3y3xrZ1/h/RdBNwy4QCqjiXuxDpDB7VNP9/oeAzoATPZGhqjPfNS+RRVEQpC6BzZdsR+S838E53URguBOf9yrPwdHvosZn7VC0akeWQerHqaBIpSfDMtaM4+9s1Gdsz0iP85rtj/6U/K/XOuv2CZsuVZZ52nu3soHnEX2nx2IaXMS3L8Z+lfOXB2T6EaJgXF7Z9ME5K1tx9TSNTRcYCiKztXLNLSbp git@gitserver</span>"
+</code></pre>
+</notextile>
+
+h3. Enable the synchronization script
+
+The API server package includes a script that retrieves the current set of repository names and permissions from the API, writes them to @arvadosaliases.pl@ in a format usable by gitolite, and triggers gitolite hooks which create new empty repositories if needed. This script should run every 2 to 5 minutes.
+
+If you are using RVM, create @/etc/cron.d/arvados-git-sync@ with the following content:
+
+<notextile>
+<pre><code><span class="userinput">*/5 * * * * git cd /var/www/arvados-api/current && /usr/local/rvm/bin/rvm-exec default bundle exec script/arvados-git-sync.rb production</span>
+</code></pre>
+</notextile>
+
+Otherwise, create @/etc/cron.d/arvados-git-sync@ with the following content:
+
+<notextile>
+<pre><code><span class="userinput">*/5 * * * * git cd /var/www/arvados-api/current && bundle exec script/arvados-git-sync.rb production</span>
+</code></pre>
+</notextile>
+
+h3. Configure the API server to advertise the correct SSH URLs
+
+In your API server's @application.yml@ file, add the following entry:
+
+<notextile>
+<pre><code>git_repo_ssh_base: "git@git.<span class="userinput">uuid_prefix.your.domain</span>:"
+</code></pre>
+</notextile>
+
+Make sure to include the trailing colon.
+
+h2. Install the arvados-git-httpd package
+
+This is needed only for HTTPS access.
+
+The arvados-git-httpd package provides HTTP access, using Arvados authentication tokens instead of passwords. It is intended to be installed on the system where your git repositories are stored, and accessed through a web proxy that provides SSL support.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install git arvados-git-httpd</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install git arvados-git-httpd</span>
+~$ <span class="userinput">sudo systemctl enable arvados-git-httpd</span>
+</code></pre>
+</notextile>
+
+Verify that @arvados-git-httpd@ and @git-http-backend@ can be run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-git-httpd -h</span>
+[...]
+Usage: arvados-git-httpd [-config path/to/arvados/git-httpd.yml]
+[...]
+~$ <span class="userinput">git http-backend</span>
+Status: 500 Internal Server Error
+Expires: Fri, 01 Jan 1980 00:00:00 GMT
+Pragma: no-cache
+Cache-Control: no-cache, max-age=0, must-revalidate
+
+fatal: No REQUEST_METHOD from server
+</code></pre>
+</notextile>
+
+h3. Enable arvados-git-httpd
+
+{% include 'notebox_begin' %}
+
+The arvados-git-httpd package includes configuration files for systemd.  If you're using a different init system, you'll need to configure a service to start and stop an @arvados-git-httpd@ process as desired.
+
+{% include 'notebox_end' %}
+
+Create the configuration file @/etc/arvados/git-httpd/git-httpd.yml@. Run @arvados-git-httpd -h@ to learn more about configuration entries.
+
+<notextile>
+<pre><code>Client:
+  APIHost: <b>uuid_prefix.your.domain</b>
+  Insecure: false
+GitCommand: /var/lib/arvados/git/gitolite/src/gitolite-shell
+GitoliteHome: /var/lib/arvados/git
+Listen: :9001
+RepoRoot: /var/lib/arvados/git/repositories
+</code></pre>
+</notextile>
+
+Restart the systemd service to ensure the new configuration is used.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart arvados-git-httpd</span>
+</code></pre>
+</notextile>
+
+h3. Set up a reverse proxy to provide SSL service
+
+The arvados-git-httpd service will be accessible from anywhere on the internet, so we recommend using SSL.
+
+This is best achieved by putting a reverse proxy with SSL support in front of arvados-git-httpd, running on port 443 and passing requests to @arvados-git-httpd@ on port 9001 (or whichever port you used in your run script).
+
+Add the following configuration to the @http@ section of your Nginx configuration:
+
+<notextile>
+<pre><code>
+upstream arvados-git-httpd {
+  server                  127.0.0.1:<span class="userinput">9001</span>;
+}
+server {
+  listen                  <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name             git.<span class="userinput">uuid_prefix.your.domain</span>;
+  proxy_connect_timeout   90s;
+  proxy_read_timeout      300s;
+
+  ssl on;
+  ssl_certificate         <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key     <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
+
+  # The server needs to accept potentially large refpacks from push clients.
+  client_max_body_size 50m;
+
+  location  / {
+    proxy_pass            http://arvados-git-httpd;
+  }
+}
+</code></pre>
+</notextile>
+
+h3. Configure the API server to advertise the correct HTTPS URLs
+
+In your API server's @application.yml@ file, add the following entry:
+
+<notextile>
+<pre><code>git_repo_https_base: https://git.<span class="userinput">uuid_prefix.your.domain</span>/
+</code></pre>
+</notextile>
+
+Make sure to include the trailing slash.
+
+h2. Restart Nginx
+
+Restart Nginx to make the Nginx and API server configuration changes take effect.
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo nginx -s reload</span>
+</code></pre>
+</notextile>
+
+h2. Clone Arvados repository
+
+Here we create a repository object which will be used to set up a hosted clone of the arvados repository on this cluster.
+
+<notextile>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
+~$ <span class="userinput">all_users_group_uuid="$uuid_prefix-j7d0g-fffffffffffffff"</span>
+~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$uuid_prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
+~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
+</code></pre></notextile>
+
+Create a link object to make the repository object readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
+
+<notextile>
+<pre><code>~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF</span>
+</code></pre></notextile>
+
+In a couple of minutes, your arvados-git-sync cron job will create an empty repository on your git server. Seed it with the real arvados repository. If your git credential helpers were configured correctly when you "set up your shell server":install-shell-server.html, the "git push" command will use your API token instead of prompting you for a username and password.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd /tmp</span>
+/tmp$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git</span>
+/tmp <span class="userinput">git --git-dir arvados.git push https://git.<b>uuid_prefix.your.domain</b>/arvados.git '*:*'</span>
+</code></pre>
+</notextile>
+
+If you did not set up a HTTPS service, you can push to <code>git@git.uuid_prefix.your.domain:arvados.git</code> using your SSH key, or by logging in to your git server and using sudo.
+
+<notextile>
+<pre><code>gitserver:~$ <span class="userinput">sudo -u git -i bash</span>
+git@gitserver:~$ <span class="userinput">git clone --bare https://github.com/curoverse/arvados.git /tmp/arvados.git</span>
+git@gitserver:~$ <span class="userinput">cd /tmp/arvados.git</span>
+git@gitserver:/tmp/arvados.git$ <span class="userinput">gitolite push /var/lib/arvados/git/repositories/<b>your_arvados_repo_uuid</b>.git '*:*'</span>
+</code></pre>
+</notextile>
diff --git a/doc/install/install-components.html.textile.liquid b/doc/install/install-components.html.textile.liquid
new file mode 100644 (file)
index 0000000..b21c4bd
--- /dev/null
@@ -0,0 +1,28 @@
+---
+layout: default
+navsection: installguide
+title: Choosing which components to install
+...
+
+Arvados consists of many components, some of which may be omitted (at the cost of reduced functionality.)  It may also be helpful to review the "Arvados Architecture":{{site.baseurl}}/architecture to understand how these components interact.
+
+table(table table-bordered table-condensed).
+|\3=. *Core*|
+|"Postgres database":install-postgresql.html |Stores data for the API server.|Required.|
+|"API server":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.|
+|\3=. *Keep (storage)*|
+|"Keepstore":install-keepstore.html |Stores content-addressed blocks in a variety of backends (local filesystem, cloud object storage).|Required.|
+|"Keepproxy":install-keepproxy.html |Gateway service to access keep servers from external networks.|Required to be able to use arv-put, arv-get, or arv-mount outside the private Arvados network.|
+|"Keep-web":install-keep-web.html |Gateway service providing read/write HTTP and WebDAV support on top of Keep.|Required to be able to download files from Keep over plain HTTP in Workbench.|
+|"Keep-balance":install-keep-balance.html |Storage cluster maintenance daemon responsible for moving blocks to their optimal server location, adjusting block replication levels, and trashing unreferenced blocks.|Required to free deleted data from underlying storage, and to ensure proper replication and block distribution (including support for storage classes).|
+|\3=. *User interface*|
+|"Single Sign On server":install-sso.html |Login server.|Required for web based login to Workbench.|
+|"Workbench":install-workbench-app.html |Primary graphical user interface for working with file collections and running containers.|Optional.  Depends on API server, SSO server, keep-web, websockets server.|
+|"Workflow Composer":install-composer.html |Graphical user interface for editing Common Workflow Language workflows.|Optional.  Depends on git server (arv-git-httpd).|
+|\3=. *Additional services*|
+|"Websockets server":install-ws.html |Event distribution server.|Required to view streaming container logs in Workbench.|
+|"Shell server":install-shell-server.html |Synchronize (create/delete/configure) Unix shell accounts with Arvados users.|Optional.|
+|"Git server":install-arv-git-httpd.html |Arvados-hosted git repositories, with Arvados-token based authentication.|Optional, but required by Workflow Composer.|
+|\3=. *Crunch (running containers)*|
+|"crunch-dispatch-slurm":crunch2-slurm/install-prerequisites.html |Run analysis workflows using Docker containers distributed across a SLURM cluster.|Optional if you wish to use Arvados for data management only.|
+|"Node Manager":install-nodemanager.html |Allocate and free cloud VM instances on demand based on workload.|Optional, not needed for a static SLURM cluster (such as on-premise HPC).|
diff --git a/doc/install/install-composer.html.textile.liquid b/doc/install/install-composer.html.textile.liquid
new file mode 100644 (file)
index 0000000..9bd25ed
--- /dev/null
@@ -0,0 +1,59 @@
+---
+layout: default
+navsection: installguide
+title: Install Composer
+...
+
+Arvados Composer is a single-page javascript application for building Common Workflow Languge (CWL) Workflows.
+
+h2. Prerequisites
+
+In addition to Arvados core services, Composer requires "Arvados hosted git repositories":install-arv-git-httpd.html which are used for storing workflow files.
+
+h2. Install
+
+Composer may be installed on the same host as Workbench, or on a different host.  Composer communicates directly with the Arvados API server.  It does not require its own backend and should be served as a static file.
+
+On a Debian-based system, install the following package:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-composer</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following package:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-composer</span>
+</code></pre>
+</notextile>
+
+h2. Configure
+
+h3. composer.yml
+
+Edit @/etc/arvados/composer/composer.yml@ and set @apiEndPoint@ to your API server:
+
+<pre>
+apiEndPoint: https://zzzzz.arvadosapi.com
+</pre>
+
+h3. Nginx
+
+Add Composer to your Nginx configuration.  This example will host Composer at @/composer@.
+
+<pre>
+location /composer {
+  root   /var/www/arvados-composer
+  index  index.html
+}
+</pre>
+
+h3. Workbench link to composer
+
+Edit the workbench @application.yml@ and set @composer_url@ to the location from which it is served.
+
+<pre>
+production:
+  composer_url: 'https://workbench.zzzzz.arvadosapi.com/composer'
+</pre>
diff --git a/doc/install/install-compute-node.html.textile.liquid b/doc/install/install-compute-node.html.textile.liquid
new file mode 100644 (file)
index 0000000..02eb216
--- /dev/null
@@ -0,0 +1,112 @@
+---
+layout: default
+navsection: installguide
+title: Install a compute node
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Install dependencies
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% include 'note_python_sc' %}
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install perl python-virtualenv fuse python-arvados-python-client python-arvados-fuse crunchrunner crunchstat arvados-docker-cleaner iptables ca-certificates</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install perl python-virtualenv fuse python-arvados-python-client python-arvados-fuse crunchrunner crunchstat arvados-docker-cleaner iptables ca-certificates</span>
+</code></pre>
+</notextile>
+
+{% include 'install_compute_docker' %}
+
+h2. Set up SLURM
+
+Install SLURM following "the same process you used to install the Crunch dispatcher":install-crunch-dispatch.html#slurm.
+
+h2. Copy configuration files from the dispatcher (API server)
+
+The @slurm.conf@ and @/etc/munge/munge.key@ files need to be identical across the dispatcher and all compute nodes. Copy the files you created in the "Install the Crunch dispatcher":install-crunch-dispatch.html step to this compute node.
+
+{% include 'install_compute_fuse' %}
+
+{% include 'install_docker_cleaner' %}
+
+h2. Add a Crunch user account
+
+Create a Crunch user account, and add it to the @fuse@ and @docker@ groups so it can use those tools:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo useradd --groups fuse,docker crunch</span>
+</code></pre>
+</notextile>
+
+The crunch user should have the same UID, GID, and home directory across all compute nodes and the dispatcher (API server).
+
+h2. Tell the API server about this compute node
+
+Load your API superuser token on the compute node:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'</span>
+~$ <span class="userinput">export ARVADOS_API_TOKEN=@your-superuser-token@</span>
+~$ <span class="userinput">export ARVADOS_API_HOST=@uuid_prefix.your.domain@</span>
+~$ <span class="userinput">unset ARVADOS_API_HOST_INSECURE</span>
+</code>
+</pre>
+</notextile>
+
+Then execute this script to create a compute node object, and set up a cron job to have the compute node ping the API server every five minutes:
+
+<notextile>
+<pre><code>
+#!/bin/bash
+set -e
+if ! test -f /root/node.json ; then
+    python - &lt;&lt;EOF
+import arvados, json, socket
+fqdn = socket.getfqdn()
+hostname, _, domain = fqdn.partition('.')
+node = arvados.api('v1').nodes().create(body={'hostname': hostname, 'domain': domain}).execute()
+with open('/root/node.json', 'w') as node_file:
+    json.dump(node, node_file, indent=2)
+EOF
+
+    # Make sure /dev/fuse permissions are correct (the device appears after fuse is loaded)
+    chmod 1660 /dev/fuse && chgrp fuse /dev/fuse
+fi
+
+UUID=`grep \"uuid\" /root/node.json  |cut -f4 -d\"`
+PING_SECRET=`grep \"ping_secret\" /root/node.json  |cut -f4 -d\"`
+
+if ! test -f /etc/cron.d/node_ping ; then
+    echo "*/5 * * * * root /usr/bin/curl -k -d ping_secret=$PING_SECRET https://$ARVADOS_API_HOST/arvados/v1/nodes/$UUID/ping" > /etc/cron.d/node_ping
+fi
+
+/usr/bin/curl -k -d ping_secret=$PING_SECRET https://$ARVADOS_API_HOST/arvados/v1/nodes/$UUID/ping?ping_secret=$PING_SECRET
+</code>
+</pre>
+</notextile>
+
+And remove your token from the environment:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">unset ARVADOS_API_TOKEN</span>
+~$ <span class="userinput">unset ARVADOS_API_HOST</span>
+</code>
+</pre>
+</notextile>
diff --git a/doc/install/install-compute-ping.html.textile.liquid b/doc/install/install-compute-ping.html.textile.liquid
new file mode 100644 (file)
index 0000000..be3f58b
--- /dev/null
@@ -0,0 +1,14 @@
+---
+layout: default
+navsection: installguide
+title: Sample compute node ping script
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+When a new elastic compute node is booted, it needs to contact Arvados to register itself.  Here is an example ping script to run on boot.
+
+<notextile> {% code 'compute_ping_rb' as ruby %} </notextile>
diff --git a/doc/install/install-controller.html.textile.liquid b/doc/install/install-controller.html.textile.liquid
new file mode 100644 (file)
index 0000000..3e94b29
--- /dev/null
@@ -0,0 +1,180 @@
+---
+layout: default
+navsection: installguide
+title: Install the controller
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The arvados-controller service must be installed on your API server node.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-controller</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-controller</span>
+</code></pre>
+</notextile>
+
+Verify the @arvados-controller@ program is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-controller -h</span>
+Usage:
+  -config file
+[...]
+</code></pre>
+</notextile>
+
+h3. Configure Nginx to route requests to the controller
+
+Add @upstream@ and @server@ definitions inside the @http@ section of your Nginx configuration using the following template.
+
+{% include 'notebox_begin' %}
+
+If you are adding arvados-controller to an existing system as part of the upgrade procedure, do not add a new "server" part here. Instead, add only the "upstream" part as shown here, and update your existing "server" section by changing its @proxy_pass@ directive from @http://api@ to @http://controller@.
+
+{% include 'notebox_end' %}
+
+<notextile>
+<pre><code>upstream controller {
+  server     127.0.0.1:9004  fail_timeout=10s;
+}
+
+server {
+  listen       <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name  <span class="userinput">uuid_prefix.your.domain</span>;
+
+  ssl on;
+  ssl_certificate     <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
+
+  # Refer to the comment about this setting in the passenger (arvados
+  # api server) section of your Nginx configuration.
+  client_max_body_size 128m;
+
+  location / {
+    proxy_pass            http://controller;
+    proxy_redirect        off;
+    proxy_connect_timeout 90s;
+    proxy_read_timeout    300s;
+
+    proxy_set_header      X-Forwarded-Proto https;
+    proxy_set_header      Host $http_host;
+    proxy_set_header      X-External-Client $external_client;
+    proxy_set_header      X-Real-IP $remote_addr;
+    proxy_set_header      X-Forwarded-For $proxy_add_x_forwarded_for;
+  }
+}
+</code></pre>
+</notextile>
+
+Restart Nginx to apply the new configuration.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo nginx -s reload</span>
+</code></pre>
+</notextile>
+
+h3(#configuration). Configure arvados-controller
+
+Create the cluster configuration file @/etc/arvados/config.yml@ using the following template.
+
+<notextile>
+<pre><code>Clusters:
+  <span class="userinput">uuid_prefix</span>:
+    NodeProfiles:
+      apiserver:
+        arvados-controller:
+          Listen: ":<span class="userinput">9004</span>" # must match the "upstream controller" section of your Nginx config
+        arvados-api-server:
+          Listen: ":<span class="userinput">8000</span>" # must match the "upstream api" section of your Nginx config
+    PostgreSQL:
+      ConnectionPool: 128
+      Connection:
+        host: localhost
+        dbname: arvados_production
+        user: arvados
+        password: <span class="userinput">xxxxxxxx</span>
+        sslmode: require
+</code></pre>
+</notextile>
+
+Create the host configuration file @/etc/arvados/environment@.
+
+<notextile>
+<pre><code>ARVADOS_NODE_PROFILE=apiserver
+</code></pre>
+</notextile>
+
+h3. Start the service (option 1: systemd)
+
+If your system does not use systemd, skip this section and follow the "runit instructions":#runit instead.
+
+If your system uses systemd, the arvados-controller service should already be set up. Restart it to load the new configuration file, and check its status:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart arvados-controller</span>
+~$ <span class="userinput">sudo systemctl status arvados-controller</span>
+&#x25cf; arvados-controller.service - Arvados controller
+   Loaded: loaded (/lib/systemd/system/arvados-controller.service; enabled; vendor preset: enabled)
+   Active: active (running) since Tue 2018-07-31 13:17:44 UTC; 3s ago
+     Docs: https://doc.arvados.org/
+ Main PID: 25066 (arvados-control)
+   CGroup: /system.slice/arvados-controller.service
+           └─25066 /usr/bin/arvados-controller
+
+Jul 31 13:17:44 zzzzz systemd[1]: Starting Arvados controller...
+Jul 31 13:17:44 zzzzz arvados-controller[25191]: {"Listen":"[::]:9004","Service":"arvados-controller","level":"info","msg":"listening","time":"2018-07-31T13:17:44.521694195Z"}
+Jul 31 13:17:44 zzzzz systemd[1]: Started Arvados controller.
+</code></pre>
+</notextile>
+
+Skip ahead to "confirm the service is working":#confirm.
+
+h3(#runit). Start the service (option 2: runit)
+
+Install runit to supervise the arvados-controller daemon.  {% include 'install_runit' %}
+
+Create a supervised service.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir /etc/service/arvados-controller</span>
+~$ <span class="userinput">cd /etc/service/arvados-controller</span>
+~$ <span class="userinput">sudo mkdir log log/main</span>
+~$ <span class="userinput">printf '#!/bin/sh\nset -a\n. /etc/arvados/environment\nexec arvados-controller 2>&1\n' | sudo tee run</span>
+~$ <span class="userinput">printf '#!/bin/sh\nexec svlogd main\n' | sudo tee log/run</span>
+~$ <span class="userinput">sudo chmod +x run log/run</span>
+~$ <span class="userinput">sudo sv exit .</span>
+~$ <span class="userinput">cd -</span>
+</code></pre>
+</notextile>
+
+Use @sv stat@ and check the log file to verify the service is running.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sv stat /etc/service/arvados-controller</span>
+run: /etc/service/arvados-controller: (pid 12520) 2s; run: log: (pid 12519) 2s
+~$ <span class="userinput">tail /etc/service/arvados-controller/log/main/current</span>
+{"Listen":"[::]:9004","Service":"arvados-controller","level":"info","msg":"listening","time":"2018-07-31T13:17:44.521694195Z"}
+</code></pre>
+</notextile>
+
+h3(#confirm). Confirm the service is working
+
+Confirm the service is listening on its assigned port and responding to requests.
+
+<notextile>
+<pre><code>~$ <span class="userinput">curl -X OPTIONS http://0.0.0.0:<b>9004</b>/login</span>
+{"errors":["Forbidden"],"error_token":"1533044555+684b532c"}
+</code></pre>
+</notextile>
diff --git a/doc/install/install-crunch-dispatch.html.textile.liquid b/doc/install/install-crunch-dispatch.html.textile.liquid
new file mode 100644 (file)
index 0000000..e0ed147
--- /dev/null
@@ -0,0 +1,207 @@
+---
+layout: default
+navsection: installguide
+title: Install the Crunch dispatcher
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The dispatcher normally runs on the same host/VM as the API server.
+
+h2. Test the Arvados job queue
+
+Crunch dispatches work from the job queue on the Arvados API server.  Before you start installing the Crunch dispatcher, now's a good time to check that the API server and Git server can coordinate to create job records.  Run these commands *on your shell server* to create a collection, and a job to calculate the MD5 checksum of every file in it:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'Hello, Crunch!' | arv-put --portable-data-hash -</span>
+&hellip;
+d40c7f35d80da669afb9db1896e760ad+49
+~$ <span class="userinput">read -rd $'\000' newjob &lt;&lt;EOF; arv job create --job "$newjob"
+{"script_parameters":{"input":"d40c7f35d80da669afb9db1896e760ad+49"},
+ "script_version":"0988acb472849dc0",
+ "script":"hash",
+ "repository":"arvados"}
+EOF</span>
+</code></pre>
+</notextile>
+
+If you get the error
+
+<pre>
+ArgumentError: Specified script_version does not resolve to a commit
+</pre>
+
+it often means that the API server can't read the specified repository&mdash;either because it doesn't exist, or because the user running the API server doesn't have permission to read the repository files.  Check the API server's log (@/var/www/arvados-api/current/log/production.log@) for details, and double-check the instructions in the "Git server installation guide":install-arv-git-httpd.html.
+
+If everything goes well, the API server should create a job record, and your @arv@ command will output the JSON for that record.  It should have state @Queued@ and script_version @0988acb472849dc08d576ee40493e70bde2132ca@.  If the job JSON includes those fields, you can proceed to install the Crunch dispatcher and a compute node.  This job will remain queued until you install those services.
+
+h2. Perl SDK dependencies
+
+Install the Perl SDK on the controller.
+
+* See "Perl SDK":{{site.baseurl}}/sdk/perl/index.html page for details.
+
+h2. Python SDK dependencies
+
+Install the Python SDK and CLI tools on controller and all compute nodes.
+
+* See "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html page for details.
+
+h2(#slurm). Set up SLURM
+
+On the API server, install SLURM and munge, and generate a munge key.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-get install slurm-llnl munge</span>
+~$ <span class="userinput">sudo /usr/sbin/create-munge-key</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install slurm munge slurm-munge</span>
+</code></pre>
+</notextile>
+
+Now we need to give SLURM a configuration file.  On Debian-based systems, this is installed at @/etc/slurm-llnl/slurm.conf@.  On Red Hat-based systems, this is installed at @/etc/slurm/slurm.conf@.  Here's an example @slurm.conf@:
+
+<notextile>
+<pre>
+ControlMachine=uuid_prefix.your.domain
+SlurmctldPort=6817
+SlurmdPort=6818
+AuthType=auth/munge
+StateSaveLocation=/tmp
+SlurmdSpoolDir=/tmp/slurmd
+SwitchType=switch/none
+MpiDefault=none
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+ProctrackType=proctrack/pgid
+CacheGroups=0
+ReturnToService=2
+TaskPlugin=task/affinity
+#
+# TIMERS
+SlurmctldTimeout=300
+SlurmdTimeout=300
+InactiveLimit=0
+MinJobAge=300
+KillWait=30
+Waittime=0
+#
+# SCHEDULING
+SchedulerType=sched/backfill
+SchedulerPort=7321
+SelectType=select/linear
+FastSchedule=0
+#
+# LOGGING
+SlurmctldDebug=3
+#SlurmctldLogFile=
+SlurmdDebug=3
+#SlurmdLogFile=
+JobCompType=jobcomp/none
+#JobCompLoc=
+JobAcctGatherType=jobacct_gather/none
+#
+# COMPUTE NODES
+NodeName=DEFAULT
+PartitionName=DEFAULT MaxTime=INFINITE State=UP
+
+NodeName=compute[0-255]
+PartitionName=compute Nodes=compute[0-255] Default=YES Shared=YES
+</pre>
+</notextile>
+
+h3. SLURM configuration essentials
+
+Whenever you change this file, you will need to update the copy _on every compute node_ as well as the controller node, and then run @sudo scontrol reconfigure@.
+
+*@ControlMachine@* should be a DNS name that resolves to the SLURM controller (dispatch/API server). This must resolve correctly on all SLURM worker nodes as well as the controller itself. In general SLURM is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.
+
+*@NodeName=compute[0-255]@* establishes that the hostnames of the worker nodes will be compute0, compute1, etc. through compute255.
+* There are several ways to compress sequences of names, like @compute[0-9,80,100-110]@. See the "hostlist" discussion in the @slurm.conf(5)@ and @scontrol(1)@ man pages for more information.
+* It is not necessary for all of the nodes listed here to be alive in order for SLURM to work, although you should make sure the DNS entries exist. It is easiest to define lots of hostnames up front, assigning them to real nodes and updating your DNS records as the nodes appear. This minimizes the frequency of @slurm.conf@ updates and use of @scontrol reconfigure@.
+
+Each hostname in @slurm.conf@ must also resolve correctly on all SLURM worker nodes as well as the controller itself. Furthermore, the hostnames used in the configuration file must match the hostnames reported by @hostname@ or @hostname -s@ on the nodes themselves. This applies to the ControlMachine as well as the worker nodes.
+
+For example:
+* In @slurm.conf@ on control and worker nodes: @ControlMachine=uuid_prefix.your.domain@
+* In @slurm.conf@ on control and worker nodes: @NodeName=compute[0-255]@
+* In @/etc/resolv.conf@ on control and worker nodes: @search uuid_prefix.your.domain@
+* On the control node: @hostname@ reports @uuid_prefix.your.domain@
+* On worker node 123: @hostname@ reports @compute123.uuid_prefix.your.domain@
+
+h3. Automatic hostname assignment
+
+If your worker node bootstrapping script (see "Installing a compute node":install-compute-node.html) does not send the worker's current hostname, the API server will choose an unused hostname from the set given in @application.yml@, which defaults to @compute[0-255]@.
+
+If it is not feasible to give your compute nodes hostnames like compute0, compute1, etc., you can accommodate other naming schemes with a bit of extra configuration.
+
+If you want Arvados to assign names to your nodes with a different consecutive numeric series like @{worker1-0000, worker1-0001, worker1-0002}@, add an entry to @application.yml@; see @/var/www/arvados-api/current/config/application.default.yml@ for details. Example:
+* In @application.yml@: <code>assign_node_hostname: worker1-%<slot_number>04d</code>
+* In @slurm.conf@: <code>NodeName=worker1-[0000-0255]</code>
+
+If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script (see "Installing a compute node":install-compute-node.html) send its current hostname, rather than expect Arvados to assign one.
+* In @application.yml@: <code>assign_node_hostname: false</code>
+* In @slurm.conf@: <code>NodeName=alice,bob,clay,darlene</code>
+
+If your worker hostnames are already assigned by other means, but the full set of names is _not_ known in advance, you can use the @slurm.conf@ and @application.yml@ settings in the previous example, but you must also update @slurm.conf@ (both on the controller and on all worker nodes) and run @sudo scontrol reconfigure@ whenever a new node comes online.
+
+h2. Enable SLURM job dispatch
+
+In your API server's @application.yml@ configuration file, add the line @crunch_job_wrapper: :slurm_immediate@ under the appropriate section.  (The second colon is not a typo.  It denotes a Ruby symbol.)
+
+h2. Crunch user account
+
+Run @sudo adduser crunch@.  The crunch user should have the same UID, GID, and home directory on all compute nodes and on the dispatcher (API server).
+
+h2. Run the Crunch dispatcher service
+
+To dispatch Arvados jobs:
+
+* The API server script @crunch-dispatch.rb@ must be running.
+* @crunch-job@ needs the installation path of the Perl SDK in its @PERLLIB@.
+* @crunch-job@ needs the @ARVADOS_API_HOST@ (and, if necessary, @ARVADOS_API_HOST_INSECURE@) environment variable set.
+
+Install runit to monitor the Crunch dispatch daemon.  {% include 'install_runit' %}
+
+Install the script below as the run script for the Crunch dispatch service, modifying it as directed by the comments.
+
+<notextile>
+<pre><code>#!/bin/sh
+set -e
+
+rvmexec=""
+## Uncomment this line if you use RVM:
+#rvmexec="/usr/local/rvm/bin/rvm-exec default"
+
+export ARVADOS_API_HOST=<span class="userinput">uuid_prefix.your.domain</span>
+export CRUNCH_DISPATCH_LOCKFILE=/var/lock/crunch-dispatch
+export HOME=$(pwd)
+export RAILS_ENV=production
+
+## Uncomment and edit this line if your compute nodes have cgroup info
+## somewhere other than /sys/fs/cgroup (e.g., "/cgroup" for CentOS 7)
+#export CRUNCH_CGROUP_ROOT="/sys/fs/cgroup"
+
+## Uncomment this line if your cluster uses self-signed SSL certificates:
+#export ARVADOS_API_HOST_INSECURE=yes
+
+# This is the path to docker on your compute nodes. You might need to
+# change it to "docker", "/opt/bin/docker", etc.
+export CRUNCH_JOB_DOCKER_BIN=<span class="userinput">docker.io</span>
+
+fuser -TERM -k $CRUNCH_DISPATCH_LOCKFILE || true
+cd /var/www/arvados-api/current
+exec $rvmexec bundle exec ./script/crunch-dispatch.rb 2>&1
+</code></pre>
+</notextile>
diff --git a/doc/install/install-keep-balance.html.textile.liquid b/doc/install/install-keep-balance.html.textile.liquid
new file mode 100644 (file)
index 0000000..68bf07a
--- /dev/null
@@ -0,0 +1,180 @@
+---
+layout: default
+navsection: installguide
+title: Install Keep-balance
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keep-balance deletes unreferenced and overreplicated blocks from Keep servers, makes additional copies of underreplicated blocks, and moves blocks into optimal locations as needed (e.g., after adding new servers).
+
+{% include 'notebox_begin' %}
+
+If you are installing keep-balance on an existing system with valuable data, you can run keep-balance in "dry run" mode first and review its logs as a precaution. To do this, edit your keep-balance startup script to use the flags @-commit-pulls=false -commit-trash=false@.
+
+{% include 'notebox_end' %}
+
+h2. Install keep-balance
+
+Keep-balance can be installed anywhere with network access to Keep services. Typically it runs on the same host as keepproxy.
+
+*A cluster should have only one keep-balance process running at a time.*
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install keep-balance</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install keep-balance</span>
+</code></pre>
+</notextile>
+
+Verify that @keep-balance@ is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keep-balance -h</span>
+...
+Usage: keep-balance [options]
+
+Options:
+  -commit-pulls
+        send pull requests (make more replicas of blocks that are underreplicated or are not in optimal rendezvous probe order)
+  -commit-trash
+        send trash requests (delete unreferenced old blocks, and excess replicas of overreplicated blocks)
+...
+</code></pre>
+</notextile>
+
+h3. Create a keep-balance token
+
+Create an Arvados superuser token for use by keep-balance.
+
+{% include 'create_superuser_token' %}
+
+h3. Update keepstore configuration files
+
+On each node that runs keepstore, save the token you generated in the previous step in a text file like @/etc/arvados/keepstore/system-auth-token.txt@ and then create or update @/etc/arvados/keepstore/keepstore.yml@ with the following key:
+
+<notextile>
+<pre><code>SystemAuthTokenFile: /etc/arvados/keepstore/system-auth-token.txt
+</code></pre>
+</notextile>
+
+Restart all keepstore services to apply the updated configuration.
+
+h3. Create a keep-balance configuration file
+
+On the host running keep-balance, create @/etc/arvados/keep-balance/keep-balance.yml@ using the token you generated above.  Follow this YAML format:
+
+<notextile>
+<pre><code>Listen: :9005
+Client:
+  APIHost: <span class="userinput">uuid_prefix.your.domain</span>:443
+  AuthToken: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+KeepServiceTypes:
+  - disk
+Listen: :9005
+ManagementToken: <span class="userinput">xyzzy</span>
+RunPeriod: 10m
+CollectionBatchSize: 100000
+CollectionBuffers: 1000
+</code></pre>
+</notextile>
+
+If your API server's SSL certificate is not signed by a recognized CA, add the @Insecure@ option to the @Client@ section:
+
+<notextile>
+<pre><code>Client:
+  <span class="userinput">Insecure: true</span>
+  APIHost: ...
+</code></pre>
+</notextile>
+
+h3. Start the service (option 1: systemd)
+
+If your system does not use systemd, skip this section and follow the "runit instructions":#runit instead.
+
+If your system uses systemd, the keep-balance service should already be set up. Start it and check its status:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart keep-balance</span>
+~$ <span class="userinput">sudo systemctl status keep-balance</span>
+&#x25cf; keep-balance.service - Arvados Keep Balance
+   Loaded: loaded (/lib/systemd/system/keep-balance.service; enabled)
+   Active: active (running) since Sat 2017-02-14 18:46:01 UTC; 3 days ago
+     Docs: https://doc.arvados.org/
+ Main PID: 541 (keep-balance)
+   CGroup: /system.slice/keep-balance.service
+           └─541 /usr/bin/keep-balance -commit-pulls -commit-trash
+
+Feb 14 18:46:01 zzzzz.arvadosapi.com keep-balance[541]: 2017/02/14 18:46:01 starting up: will scan every 10m0s and on SIGUSR1
+Feb 14 18:56:01 zzzzz.arvadosapi.com keep-balance[541]: 2017/02/14 18:56:01 Run: start
+Feb 14 18:56:01 zzzzz.arvadosapi.com keep-balance[541]: 2017/02/14 18:56:01 skipping zzzzz-bi6l4-rbtrws2jxul6i4t with service type "proxy"
+Feb 14 18:56:01 zzzzz.arvadosapi.com keep-balance[541]: 2017/02/14 18:56:01 clearing existing trash lists, in case the new rendezvous order differs from previous run
+</code></pre>
+</notextile>
+
+h3(#runit). Start the service (option 2: runit)
+
+Install runit to supervise the keep-balance daemon.  {% include 'install_runit' %}
+
+Create a supervised service.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir /etc/service/keep-balance</span>
+~$ <span class="userinput">cd /etc/service/keep-balance</span>
+~$ <span class="userinput">sudo mkdir log log/main</span>
+~$ <span class="userinput">printf '#!/bin/sh\nexec keep-balance -commit-pulls -commit-trash 2>&1\n' | sudo tee run</span>
+~$ <span class="userinput">printf '#!/bin/sh\nexec svlogd main\n' | sudo tee log/run</span>
+~$ <span class="userinput">sudo chmod +x run log/run</span>
+~$ <span class="userinput">sudo sv exit .</span>
+~$ <span class="userinput">cd -</span>
+</code></pre>
+</notextile>
+
+Use @sv stat@ and check the log file to verify the service is running.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sv stat /etc/service/keep-balance</span>
+run: /etc/service/keep-balance: (pid 12520) 2s; run: log: (pid 12519) 2s
+~$ <span class="userinput">tail /etc/service/keep-balance/log/main/current</span>
+2017/02/14 18:46:01 starting up: will scan every 10m0s and on SIGUSR1
+2017/02/14 18:56:01 Run: start
+2017/02/14 18:56:01 skipping zzzzz-bi6l4-rbtrws2jxul6i4t with service type "proxy"
+2017/02/14 18:56:01 clearing existing trash lists, in case the new rendezvous order differs from previous run
+</code></pre>
+</notextile>
+
+h2. Enable delete operations on keepstore volumes
+
+Ensure your keepstore services have the "delete" operation enabled. If it is disabled (which is the default), unneeded blocks will be identified by keep-balance, but will never be deleted from the underlying storage devices.
+
+Add the @-never-delete=false@ command line flag to your keepstore run script:
+
+<notextile>
+<pre><code>keepstore <span class="userinput">-never-delete=false</span> -volume=...
+</code></pre>
+</notextile>
+
+{% comment %}
+// To replace the above section when the keepstore page recommends YAML...
+
+Use the @EnableDelete@ flag in your YAML configuration file @/etc/arvados/keepstore/keepstore.yml@:
+
+<notextile>
+<pre><code>...
+BlobSigningKeyFile: /etc/keepstore/blob-signing.key
+<span class="userinput">EnableDelete: true</span>
+Listen: :25107
+...
+</code></pre>
+</notextile>
+{% endcomment %}
diff --git a/doc/install/install-keep-web.html.textile.liquid b/doc/install/install-keep-web.html.textile.liquid
new file mode 100644 (file)
index 0000000..2991d7b
--- /dev/null
@@ -0,0 +1,157 @@
+---
+layout: default
+navsection: installguide
+title: Install Keep-web server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Keep-web server provides read/write HTTP (WebDAV) access to files stored in Keep. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, typically behind a web proxy that provides TLS support. See the "godoc page":http://godoc.org/github.com/curoverse/arvados/services/keep-web for more detail.
+
+By convention, we use the following hostnames for the Keep-web service:
+
+<notextile>
+<pre><code>download.<span class="userinput">uuid_prefix</span>.your.domain
+collections.<span class="userinput">uuid_prefix</span>.your.domain
+*.collections.<span class="userinput">uuid_prefix</span>.your.domain
+</code></pre>
+</notextile>
+
+The above hostnames should resolve from anywhere on the internet.
+
+h2. Install Keep-web
+
+Typically Keep-web runs on the same host as Keepproxy.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install keep-web</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install keep-web</span>
+</code></pre>
+</notextile>
+
+Verify that @Keep-web@ is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keep-web -h</span>
+Usage of keep-web:
+  -allow-anonymous
+        Serve public data to anonymous clients. Try the token supplied in the ARVADOS_API_TOKEN environment variable when none of the tokens provided in an HTTP request succeed in reading the desired collection. (default false)
+  -attachment-only-host string
+        Accept credentials, and add "Content-Disposition: attachment" response headers, for requests at this hostname:port. Prohibiting inline display makes it possible to serve untrusted and non-public content from a single origin, i.e., without wildcard DNS or TLS.
+  -listen string
+        Address to listen on: "host:port", or ":port" to listen on all interfaces. (default ":80")
+  -trust-all-content
+        Serve non-public content from a single origin. Dangerous: read docs before using!
+</code></pre>
+</notextile>
+
+{% assign railscmd = "bundle exec ./script/get_anonymous_user_token.rb --get" %}
+{% assign railsout = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz" %}
+If you intend to use Keep-web to serve public data to anonymous clients, configure it with an anonymous token. You can use the same one you used when you set up your Keepproxy server, or use the following command on the <strong>API server</strong> to create another. {% include 'install_rails_command' %}
+
+Install runit to supervise the Keep-web daemon.  {% include 'install_runit' %}
+
+The basic command to start Keep-web in the service run script is:
+
+<notextile>
+<pre><code>export ARVADOS_API_HOST=<span class="userinput">uuid_prefix</span>.your.domain
+export ARVADOS_API_TOKEN="<span class="userinput">{{railsout}}</span>"
+exec sudo -u nobody keep-web \
+ -listen=<span class="userinput">:9002</span> \
+ -attachment-only-host=<span class="userinput">download.uuid_prefix.your.domain</span> \
+ -allow-anonymous \
+ 2&gt;&amp;1
+</code></pre>
+</notextile>
+
+Omit the @-allow-anonymous@ argument if you do not want to serve public data.
+
+Set @ARVADOS_API_HOST_INSECURE=1@ if your API server's TLS certificate is not signed by a recognized CA.
+
+h3. Set up a reverse proxy with TLS support
+
+The Keep-web service will be accessible from anywhere on the internet, so we recommend using TLS for transport encryption.
+
+This is best achieved by putting a reverse proxy with TLS support in front of Keep-web, running on port 443 and passing requests to Keep-web on port 9002 (or whatever port you chose in your run script).
+
+Note: A wildcard TLS certificate is required in order to support a full-featured secure Keep-web service. Without it, Keep-web can offer file downloads for all Keep data; however, in order to avoid cross-site scripting vulnerabilities, Keep-web refuses to serve private data as web content except when it is accessed using a "secret link" share. With a wildcard TLS certificate and DNS configured appropriately, all data can be served as web content.
+
+For example, using Nginx:
+
+<notextile><pre>
+upstream keep-web {
+  server                127.0.0.1:<span class="userinput">9002</span>;
+}
+
+server {
+  listen                <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name           download.<span class="userinput">uuid_prefix</span>.your.domain
+                        collections.<span class="userinput">uuid_prefix</span>.your.domain
+                        *.collections.<span class="userinput">uuid_prefix</span>.your.domain
+                        ~.*--collections.<span class="userinput">uuid_prefix</span>.your.domain;
+
+  proxy_connect_timeout 90s;
+  proxy_read_timeout    300s;
+
+  ssl                   on;
+  ssl_certificate       <span class="userinput"/>YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key   <span class="userinput"/>YOUR/PATH/TO/cert.key</span>;
+
+  location / {
+    proxy_pass          http://keep-web;
+    proxy_set_header    Host            $host;
+    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
+
+    client_max_body_size    0;
+    proxy_http_version      1.1;
+    proxy_request_buffering off;
+  }
+}
+</pre></notextile>
+
+{% include 'notebox_begin' %}
+If you restrict access to your Arvados services based on network topology -- for example, your proxy server is not reachable from the public internet -- additional proxy configuration might be needed to thwart cross-site scripting attacks that would circumvent your restrictions. Read the "'Intranet mode' section of the Keep-web documentation":https://godoc.org/github.com/curoverse/arvados/services/keep-web#hdr-Intranet_mode now.
+{% include 'notebox_end' %}
+
+h3. Configure DNS
+
+Configure your DNS servers so the following names resolve to your Nginx proxy's public IP address.
+* @download.uuid_prefix.your.domain@
+* @collections.uuid_prefix.your.domain@
+* @*--collections.uuid_prefix.your.domain@, if you have a wildcard TLS certificate valid for @*.uuid_prefix.your.domain@ and your DNS server allows this without interfering with other DNS names.
+* @*.collections.uuid_prefix.your.domain@, if you have a wildcard TLS certificate valid for these names.
+
+If neither of the above wildcard options is feasible, you have two choices:
+# Serve web content at @collections.uuid_prefix.your.domain@, but only for unauthenticated requests (public data and collection sharing links). Authenticated requests will always result in file downloads, using the @download@ name. For example, the Workbench "preview" button and the "view entire log file" link will invoke file downloads instead of displaying content in the browser window.
+# In the special case where you know you are immune to XSS exploits, you can enable the "trust all content" mode in Keep-web (with the @-trust-all-content@ command line flag) and Workbench (with the @trust_all_content@ item in @application.yml@). With both of these enabled, inline web content can be served from a single @collections@ host name; no wildcard DNS or certificate is needed. Do not do this without understanding the security implications described in the "Keep-web documentation":http://godoc.org/github.com/curoverse/arvados/services/keep-web.
+
+h3. Tell Workbench about the Keep-web service
+
+Workbench has features like "download file from collection" and "show image" which work better if the content is served by Keep-web rather than Workbench itself. We recommend using the two different hostnames ("download" and "collections" above) for file downloads and inline content respectively.
+
+Add the following entry to your Workbench configuration file (@/etc/arvados/workbench/application.yml@). This URL will be used for file downloads.
+
+<notextile>
+<pre><code>keep_web_download_url: https://download.<span class="userinput">uuid_prefix</span>.your.domain/c=%{uuid_or_pdh}
+</code></pre>
+</notextile>
+
+Additionally, add *one* of the following entries to your Workbench configuration file, depending on your DNS setup. This URL will be used to serve user content that can be displayed in the browser, like image previews and static HTML pages.
+
+<notextile>
+<pre><code>keep_web_url: https://%{uuid_or_pdh}--collections.<span class="userinput">uuid_prefix</span>.your.domain
+keep_web_url: https://%{uuid_or_pdh}.collections.<span class="userinput">uuid_prefix</span>.your.domain
+keep_web_url: https://collections.<span class="userinput">uuid_prefix</span>.your.domain/c=%{uuid_or_pdh}
+</code></pre>
+</notextile>
diff --git a/doc/install/install-keepproxy.html.textile.liquid b/doc/install/install-keepproxy.html.textile.liquid
new file mode 100644 (file)
index 0000000..db24953
--- /dev/null
@@ -0,0 +1,142 @@
+---
+layout: default
+navsection: installguide
+title: Install Keepproxy server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Keepproxy server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is suitable for clients located elsewhere on the internet. Specifically, in contrast to Keepstore:
+* A client writing through Keepproxy generates less network traffic: the client sends a single copy of a data block, and Keepproxy sends copies to the appropriate Keepstore servers.
+* A client can write through Keepproxy without precomputing content hashes. Notably, the browser-based upload feature in Workbench requires Keepproxy.
+* Keepproxy checks API token validity before processing requests. (Clients that can connect directly to Keepstore can use it as scratch space even without a valid API token.)
+
+By convention, we use the following hostname for the Keepproxy server:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Hostname_|
+|keep.@uuid_prefix@.your.domain|
+</div>
+
+This hostname should resolve from anywhere on the internet.
+
+h2. Install Keepproxy
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install keepproxy</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install keepproxy</span>
+</code></pre>
+</notextile>
+
+Verify that Keepproxy is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keepproxy -h</span>
+...
+Usage: keepproxy [-config path/to/keepproxy.yml]
+...
+</code></pre>
+</notextile>
+
+h3. Create an API token for the Keepproxy server
+
+{% assign railscmd = "bundle exec ./script/get_anonymous_user_token.rb --get" %}
+{% assign railsout = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz" %}
+The Keepproxy server needs a token to talk to the API server.  On the <strong>API server</strong>, use the following command to create the token.  {% include 'install_rails_command' %}
+
+h3. Set up the Keepproxy service
+
+Install runit to supervise the keepproxy daemon.  {% include 'install_runit' %}
+
+The run script for the keepproxy service should set the environment variables @ARVADOS_API_TOKEN@ (with the token you just generated), @ARVADOS_API_HOST@, and, if needed, @ARVADOS_API_HOST_INSECURE@.  The core keepproxy command to run is:
+
+<notextile>
+<pre><code>ARVADOS_API_TOKEN=<span class="userinput">{{railsout}}</span> ARVADOS_API_HOST=<span class="userinput">uuid_prefix.your.domain</span> exec keepproxy
+</code></pre>
+</notextile>
+
+h3. Set up a reverse proxy with SSL support
+
+Because the Keepproxy is intended for access from anywhere on the internet, it is recommended to use SSL for transport encryption.
+
+This is best achieved by putting a reverse proxy with SSL support in front of Keepproxy. Keepproxy itself runs on port 25107 by default; your reverse proxy can run on port 443 and pass requests to Keepproxy on port 25107.
+
+<notextile><pre>
+upstream keepproxy {
+  server                127.0.0.1:<span class="userinput">25107</span>;
+}
+
+server {
+  listen                  <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name             keep.<span class="userinput">uuid_prefix</span>.your.domain;
+
+  proxy_connect_timeout   90s;
+  proxy_read_timeout      300s;
+  proxy_set_header        X-Real-IP $remote_addr;
+  proxy_http_version      1.1;
+  proxy_request_buffering off;
+
+  ssl                     on;
+  ssl_certificate         /etc/nginx/keep.<span class="userinput">uuid_prefix</span>.your.domain-ssl.crt;
+  ssl_certificate_key     /etc/nginx/keep.<span class="userinput">uuid_prefix</span>.your.domain-ssl.key;
+
+  # Clients need to be able to upload blocks of data up to 64MiB in size.
+  client_max_body_size    64m;
+
+  location / {
+    proxy_pass            http://keepproxy;
+  }
+}
+</pre></notextile>
+
+Note: if the Web uploader is failing to upload data and there are no logs from keepproxy, be sure to check the nginx proxy logs.  In addition to "GET" and "PUT", The nginx proxy must pass "OPTIONS" requests to keepproxy, which should respond with appropriate Cross-origin resource sharing headers.  If the CORS headers are not present, brower security policy will cause the upload request to silently fail.  The CORS headers are generated by keepproxy and should not be set in nginx.
+
+h3. Tell the API server about the Keepproxy server
+
+The API server needs to be informed about the presence of your Keepproxy server.
+
+First, if you don't already have an admin token, create a superuser token.
+
+{% include 'create_superuser_token' %}
+
+Configure your environment to run @arv@ using the output of create_superuser_token.rb:
+
+<pre>
+export ARVADOS_API_HOST=zzzzz.example.com
+export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</pre>
+
+<notextile>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
+~$ <span class="userinput">read -rd $'\000' keepservice &lt;&lt;EOF; arv keep_service create --keep-service "$keepservice"</span>
+<span class="userinput">{
+ "service_host":"<strong>keep.$uuid_prefix.your.domain</strong>",
+ "service_port":443,
+ "service_ssl_flag":true,
+ "service_type":"proxy"
+}
+EOF</span>
+</code></pre></notextile>
+
+h3. Testing keepproxy
+
+Log into a host that is on an external network from your private Arvados network.  The host should be able to contact your keepproxy server (eg keep.$uuid_prefix.arvadosapi.com), but not your keepstore servers (eg keep[0-9].$uuid_prefix.arvadosapi.com).
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+
+@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.
+
+You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections, for an example see "Testing keep.":install-keepstore.html#testing on the keepstore install page.
diff --git a/doc/install/install-keepstore.html.textile.liquid b/doc/install/install-keepstore.html.textile.liquid
new file mode 100644 (file)
index 0000000..5044cc0
--- /dev/null
@@ -0,0 +1,249 @@
+---
+layout: default
+navsection: installguide
+title: Install Keepstore servers
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Keepstore provides access to underlying storage for reading and writing content-addressed blocks, with enforcement of Arvados permissions.  Keepstore supports a variety of cloud object storage and POSIX filesystems for its backing store.
+
+We recommend starting off with two Keepstore servers.  Exact server specifications will be site and workload specific, but in general keepstore will be I/O bound and should be set up to maximize aggregate bandwidth with compute nodes.  To increase capacity (either space or throughput) it is straightforward to add additional servers, or (in cloud environments) to increase the machine size of the existing servers.
+
+By convention, we use the following hostname pattern:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Hostname_|
+|keep0.@uuid_prefix@.your.domain|
+|keep1.@uuid_prefix@.your.domain|
+</div>
+
+Keepstore servers should not be directly accessible from the Internet (they are accessed via "keepproxy":install-keepproxy.html), so the hostnames only need to resolve on the private network.
+
+h2. Install Keepstore
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install keepstore</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install keepstore</span>
+</code></pre>
+</notextile>
+
+Verify that Keepstore is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keepstore --version</span>
+</code></pre>
+</notextile>
+
+h3. Create config file
+
+By default, keepstore will look for its configuration file at @/etc/arvados/keepstore/keepstore.yml@
+
+You can override the configuration file location using the @-config@ command line option to keepstore.
+
+The following is a sample configuration file:
+
+<pre>
+# Duration for which new permission signatures (returned in PUT
+# responses) will be valid.  This should be equal to the API
+# server's blob_signature_ttl configuration entry.
+BlobSignatureTTL: 336h0m0s
+
+# Local file containing the secret blob signing key (used to generate
+# and verify blob signatures).  The contents of the key file must be
+# identical to the API server's blob_signing_key configuration entry.
+BlobSigningKeyFile: ""
+
+# Print extra debug logging
+Debug: false
+
+# Maximum number of concurrent block deletion operations (per
+# volume) when emptying trash. Default is 1.
+EmptyTrashWorkers: 1
+
+# Enable trash and delete features. If false, trash lists will be
+# accepted but blocks will not be trashed or deleted.
+# Keepstore does not delete data on its own.  The keep-balance
+# service determines which blocks are candidates for deletion
+# and instructs the keepstore to move those blocks to the trash.
+EnableDelete: true
+
+# Local port to listen on. Can be 'address:port' or ':port', where
+# 'address' is a host IP address or name and 'port' is a port number
+# or name.
+Listen: :25107
+
+# Format of request/response and error logs: "json" or "text".
+LogFormat: json
+
+# The secret key that must be provided by monitoring services when
+# using the health check and metrics endpoints (/_health, /metrics).
+ManagementToken: xyzzy
+
+# Maximum RAM to use for data buffers, given in multiples of block
+# size (64 MiB). When this limit is reached, HTTP requests requiring
+# buffers (like GET and PUT) will wait for buffer space to be
+# released.
+#
+# It should be set such that MaxBuffers * 64MiB + 10% fits
+# comfortably in memory. On a host dedicated to running keepstore,
+# divide total memory by 88MiB to suggest a suitable value. For example,
+# if grep MemTotal /proc/meminfo reports MemTotal: 7125440 kB,
+# compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
+MaxBuffers: 128
+
+# Maximum concurrent requests. When this limit is reached, new
+# requests will receive 503 responses. Note: this limit does not
+# include idle connections from clients using HTTP keepalive, so it
+# does not strictly limit the number of concurrent connections. If
+# omitted or zero, the default is 2 * MaxBuffers.
+MaxRequests: 0
+
+# Path to write PID file during startup. This file is kept open and
+# locked with LOCK_EX until keepstore exits, so "fuser -k pidfile" is
+# one way to shut down. Exit immediately if there is an error
+# opening, locking, or writing the PID file.
+PIDFile: ""
+
+# Maximum number of concurrent pull operations. Default is 1, i.e.,
+# pull lists are processed serially.  A pull operation copies a block
+# from another keepstore server.
+PullWorkers: 1
+
+# Honor read requests only if a valid signature is provided.  This
+# should be true, except for development use and when migrating from
+# a very old version.
+RequireSignatures: true
+
+# Local file containing the Arvados API token used by keep-balance
+# or data manager.  Delete, trash, and index requests are honored
+# only for this token.
+SystemAuthTokenFile: ""
+
+# Path to server certificate file in X509 format. Enables TLS mode.
+#
+# Example: /var/lib/acme/live/keep0.example.com/fullchain
+TLSCertificateFile: ""
+
+# Path to server key file in X509 format. Enables TLS mode.
+#
+# The key pair is read from disk during startup, and whenever SIGHUP
+# is received.
+#
+# Example: /var/lib/acme/live/keep0.example.com/privkey
+TLSKeyFile: ""
+
+# How often to check for (and delete) trashed blocks whose
+# TrashLifetime has expired.
+TrashCheckInterval: 24h0m0s
+
+# Time duration after a block is trashed during which it can be
+# recovered using an /untrash request.
+TrashLifetime: 336h0m0s
+
+# Maximum number of concurrent trash operations (moving a block to the
+# trash, or permanently deleting it) . Default is 1, i.e., trash lists
+# are processed serially.  If individual trash operations have high
+# latency (eg some cloud platforms) you should increase this.
+TrashWorkers: 1
+</pre>
+
+h3. Notes on storage management
+
+On its own, a keepstore server never deletes data.  The "keep-balance":install-keep-balance.html service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash.
+
+When a block is newly written, it is protected from deletion for the duration in @BlobSignatureTTL@.  During this time, it cannot be trashed.
+
+If keep-balance instructs keepstore to trash a block which is older than @BlobSignatureTTL@, and @EnableDelete@ is true, the block will be moved to "trash".  A block which is in the trash is no longer accessible by read requests, but has not yet been permanently deleted.  Blocks which are in the trash may be recovered using the "untrash" API endpoint.  Blocks are permanently deleted after they have been in the trash for the duration in @TrashLifetime@.
+
+Keep-balance is also responsible for balancing the distribution of blocks across keepstore servers by asking servers to pull blocks from other servers (as determined by their "storage class":{{site.baseurl}}/admin/storage-classes.html and "rendezvous hashing order":{{site.baseurl}}/api/storage.html).  Pulling a block makes a copy.  If a block is overreplicated (i.e. there are excess copies) after pulling, it will be subsequently trashed on the original server.
+
+h3. Configure storage volumes
+
+Available storage volume types include POSIX filesystems and cloud object storage.
+
+* To use a POSIX filesystem, including both local filesystems (ext4, xfs) and network file system such as GPFS or Lustre, follow the setup instructions on "Filesystem storage":configure-fs-storage.html
+* If you are using S3-compatible object storage (including Amazon S3, Google Cloud Storage, and Ceph RADOS), follow the setup instructions on "S3 Object Storage":configure-s3-object-storage.html
+* If you are using Azure Blob Storage, follow the setup instructions on "Azure Blob Storage":configure-azure-blob-storage.html
+
+h3. Run keepstore as a supervised service
+
+Install runit to supervise the keepstore daemon.  {% include 'install_runit' %}
+
+Install this script as the run script @/etc/sv/keepstore/run@ for the keepstore service:
+
+<notextile>
+<pre><code>#!/bin/sh
+
+exec 2>&1
+GOGC=10 exec keepstore -config /etc/arvados/keepstore/keepstore.yml
+</code></pre>
+</notextile>
+
+h3. Set up additional servers
+
+Repeat the above sections to prepare volumes and bring up supervised services on each Keepstore server you are setting up.
+
+h3. Tell the API server about the Keepstore servers
+
+The API server needs to be informed about the presence of your Keepstore servers.
+
+First, if you don't already have an admin token, create a superuser token.
+
+{% include 'create_superuser_token' %}
+
+Configure your environment to run @arv@ using the output of create_superuser_token.rb:
+
+<pre>
+export ARVADOS_API_HOST=zzzzz.example.com
+export ARVADOS_API_TOKEN=zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</pre>
+
+Use this command to register each keepstore server you have installed.  Make sure to update the @service_host@ value.
+
+<notextile>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
+~$ <span class="userinput">read -rd $'\000' keepservice &lt;&lt;EOF; arv keep_service create --keep-service "$keepservice"</span>
+<span class="userinput">{
+ "service_host":"<strong>keep0.$uuid_prefix.your.domain</strong>",
+ "service_port":25107,
+ "service_ssl_flag":false,
+ "service_type":"disk"
+}
+EOF</span>
+</code></pre></notextile>
+
+h3(#testing). Testing keep
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+
+@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.
+
+You should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections:
+
+<pre>
+$ echo "hello world!" > hello.txt
+
+$ arv-put --portable-data-hash hello.txt
+2018-07-12 13:35:25 arvados.arv_put[28702] INFO: Creating new cache file at /home/example/.cache/arvados/arv-put/1571ec0adb397c6a18d5c74cc95b3a2a
+0M / 0M 100.0% 2018-07-12 13:35:27 arvados.arv_put[28702] INFO:
+
+2018-07-12 13:35:27 arvados.arv_put[28702] INFO: Collection saved as 'Saved at 2018-07-12 17:35:25 UTC by example@example'
+59389a8f9ee9d399be35462a0f92541c+53
+
+$ arv-get 59389a8f9ee9d399be35462a0f92541c+53/hello.txt
+hello world!
+</pre>
diff --git a/doc/install/install-manual-overview.html.textile.liquid b/doc/install/install-manual-overview.html.textile.liquid
new file mode 100644 (file)
index 0000000..e888894
--- /dev/null
@@ -0,0 +1,20 @@
+---
+layout: default
+navsection: installguide
+title: Overview
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% comment %}
+Obsolete page, no longer in nav.
+{% endcomment %}
+
+<notextile><script>
+window.location = "install-manual-prerequisites.html";
+</script></notextile>
+
+Please proceed to "Prerequisites":install-manual-prerequisites.html.
diff --git a/doc/install/install-manual-prerequisites.html.textile.liquid b/doc/install/install-manual-prerequisites.html.textile.liquid
new file mode 100644 (file)
index 0000000..6201716
--- /dev/null
@@ -0,0 +1,145 @@
+---
+layout: default
+navsection: installguide
+title: Prerequisites
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Supported Cloud and HPC platforms
+
+Arvados can run in a variety of configurations.  For compute scheduling, Arvados supports HPC clusters using @slurm@, and supports elastic cloud computing on AWS, Google and Azure.  For storage, Arvados can store blocks on regular file systems such as ext4 or xfs, on network file systems such as GPFS, or object storage such as Azure blob storage, Amazon S3, and other object storage that supports the S3 API including Google Cloud Storage and Ceph.
+
+h2. Hardware (or virtual machines)
+
+This guide assumes you have seven systems available in the same network subnet:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_. Function|_. Number of nodes|
+|Arvados API, Crunch dispatcher, Git, Websockets and Workbench|1|
+|Arvados Compute node|1|
+|Arvados Keepproxy and Keep-web server|1|
+|Arvados Keepstore servers|2|
+|Arvados Shell server|1|
+|Arvados SSO server|1|
+</div>
+
+The number of Keepstore, shell and compute nodes listed above is a minimum. In a real production installation, you will likely run many more of each of those types of nodes. In such a scenario, you would probably also want to dedicate a node to the Workbench server and Crunch dispatcher, respectively. For performance reasons, you may want to run the database server on a separate node as well.
+
+h2. Supported GNU/Linux distributions
+
+table(table table-bordered table-condensed).
+|_. Distribution|_. State|_. Last supported version|
+|CentOS 7|Supported|Latest|
+|Debian 8 ("jessie")|Supported|Latest|
+|Debian 9 ("stretch")|Supported|Latest|
+|Ubuntu 14.04 ("trusty")|Supported|Latest|
+|Ubuntu 16.04 ("xenial")|Supported|Latest|
+|Ubuntu 18.04 ("bionic")|Supported|Latest|
+|Ubuntu 12.04 ("precise")|EOL|8ed7b6dd5d4df93a3f37096afe6d6f81c2a7ef6e (2017-05-03)|
+|Debian 7 ("wheezy")|EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
+|CentOS 6 |EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
+
+h2(#repos). Arvados package repositories
+
+On any host where you install Arvados software, you'll need to set up an Arvados package repository.  They're available for several popular distributions.
+
+h3. CentOS
+
+Packages are available for CentOS 7. To install them with yum, save this configuration block in @/etc/yum.repos.d/arvados.repo@:
+
+<notextile>
+<pre><code>[arvados]
+name=Arvados
+baseurl=http://rpm.arvados.org/CentOS/$releasever/os/$basearch/
+gpgcheck=1
+gpgkey=http://rpm.arvados.org/CentOS/RPM-GPG-KEY-curoverse
+</code></pre>
+</notextile>
+
+{% include 'install_redhat_key' %}
+
+h3. Debian and Ubuntu
+
+Packages are available for Debian 8 ("jessie"), Debian 9 ("stretch"), Ubuntu 14.04 ("trusty"), Ubuntu 16.04 ("xenial") and Ubuntu 18.04 ("bionic").
+
+First, register the Curoverse signing key in apt's database:
+
+{% include 'install_debian_key' %}
+
+Configure apt to retrieve packages from the Arvados package repository. This command depends on your OS vendor and version:
+
+table(table table-bordered table-condensed).
+|_. OS version|_. Command|
+|Debian 8 ("jessie")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ jessie main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Ubuntu 14.04 ("trusty")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ trusty main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Ubuntu 16.04 ("xenial")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ xenial main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Ubuntu 18.04 ("bionic")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ bionic main" &#x7c; sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+
+{% include 'notebox_begin' %}
+
+fn1. Arvados packages for Ubuntu may depend on third-party packages in Ubuntu's "universe" repository.  If you're installing on Ubuntu, make sure you have the universe sources uncommented in @/etc/apt/sources.list@.
+
+{% include 'notebox_end' %}
+
+Retrieve the package list:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get update</span>
+</code></pre>
+</notextile>
+
+h2. A unique identifier
+
+Each Arvados installation should have a globally unique identifier, which is a unique 5-character lowercase alphanumeric string. For testing purposes, here is one way to make a random 5-character string:
+
+<notextile>
+<pre><code>~$ <span class="userinput">tr -dc 0-9a-z &lt;/dev/urandom | head -c5; echo</span>
+</code></pre>
+</notextile>
+
+You may also use a different method to pick the unique identifier. The unique identifier will be part of the hostname of the services in your Arvados cluster. The rest of this documentation will refer to it as your @uuid_prefix@.
+
+
+h2. SSL certificates
+
+There are six public-facing services that require an SSL certificate. If you do not have official SSL certificates, you can use self-signed certificates.
+
+{% include 'notebox_begin' %}
+
+Most Arvados clients and services will accept self-signed certificates when the @ARVADOS_API_HOST_INSECURE@ environment variable is set to @true@.  However, web browsers generally do not make it easy for users to accept self-signed certificates from Web sites.
+
+Users who log in through Workbench will visit at least three sites: the SSO server, the API server, and Workbench itself.  When a browser visits each of these sites, it will warn the user if the site uses a self-signed certificate, and the user must accept it before continuing.  This procedure usually only needs to be done once in a browser.
+
+After that's done, Workbench includes JavaScript clients for other Arvados services.  Users are usually not warned if these client connections are refused because the server uses a self-signed certificate, and it is especially difficult to accept those cerficiates:
+
+* JavaScript connects to the Websockets server to provide incremental page updates and view logs from running jobs.
+* JavaScript connects to the API and Keepproxy servers to upload local files to collections.
+* JavaScript connects to the Keep-web server to download log files.
+
+In sum, Workbench will be much less pleasant to use in a cluster that uses self-signed certificates.  You should avoid using self-signed certificates unless you plan to deploy a cluster without Workbench; you are deploying only to evaluate Arvados as an individual system administrator; or you can push configuration to users' browsers to trust your self-signed certificates.
+
+{% include 'notebox_end' %}
+
+By convention, we use the following hostname pattern:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_. Function|_. Hostname|
+|Arvados API|@uuid_prefix@.your.domain|
+|Arvados Git server|git.@uuid_prefix@.your.domain|
+|Arvados Keepproxy server|keep.@uuid_prefix@.your.domain|
+|Arvados Keep-web server|download.@uuid_prefix@.your.domain
+_and_
+*.collections.@uuid_prefix@.your.domain or
+*<notextile>--</notextile>collections.@uuid_prefix@.your.domain or
+collections.@uuid_prefix@.your.domain (see the "keep-web install docs":install-keep-web.html)|
+|Arvados SSO Server|auth.your.domain|
+|Arvados Websockets endpoint|ws.@uuid_prefix@.your.domain|
+|Arvados Workbench|workbench.@uuid_prefix@.your.domain|
+</div>
diff --git a/doc/install/install-nodemanager.html.textile.liquid b/doc/install/install-nodemanager.html.textile.liquid
new file mode 100644 (file)
index 0000000..defec25
--- /dev/null
@@ -0,0 +1,629 @@
+---
+layout: default
+navsection: installguide
+title: Install Node Manager
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados Node Manager provides elastic computing for Arvados and SLURM by creating and destroying virtual machines on demand.  Node Manager currently supports Amazon Web Services (AWS), Google Cloud Platform (GCP) and Microsoft Azure.
+
+Note: node manager is only required for elastic computing cloud environments.  Fixed size clusters (such as on-premise HPC) do not require node manager.
+
+h2. Install
+
+Node manager may run anywhere, however it must be able to communicate with the cloud provider's APIs, and use the command line tools @sinfo@, @squeue@ and @scontrol@ to communicate with the cluster's SLURM controller.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-node-manager</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-node-manager</span>
+</code></pre>
+</notextile>
+
+h2. Create compute image
+
+Configure a virtual machine following the "instructions to set up a compute node.":{{site.baseurl}}/install/crunch2-slurm/install-compute-node.html and set it up to run a "ping script":{{site.baseurl}}/install/install-compute-ping.html at boot.
+
+Create a virtual machine image using the commands provided by your cloud provider.  We recommend using a tool such as "Packer":https://www.packer.io/ to automate this process.
+
+Configure node manager to use the image with the @image@ or @image_id@ parameter.
+
+h2. Configure node manager
+
+The configuration file at @/etc/arvados-node-manager/config.ini@ .  Some configuration details are specific to the cloud provider you are using:
+
+* "Amazon Web Services":#aws
+* "Google Cloud Platform":#gcp
+* "Microsoft Azure":#azure
+
+h3(#aws). Amazon Web Services
+
+<pre>
+# EC2 configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll EC2 nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = ec2
+
+# It's usually most cost-effective to shut down compute nodes during narrow
+# windows of time.  For example, EC2 bills each node by the hour, so the best
+# time to shut down a node is right before a new hour of uptime starts.
+# Shutdown windows define these periods of time.  These are windows in
+# full minutes, separated by commas.  Counting from the time the node is
+# booted, the node WILL NOT shut down for N1 minutes; then it MAY shut down
+# for N2 minutes; then it WILL NOT shut down for N3 minutes; and so on.
+# For example, "54, 5, 1" means the node may shut down from the 54th to the
+# 59th minute of each hour of uptime.
+# Specify at least two windows.  You can add as many as you need beyond that.
+shutdown_windows = 54, 5, 1
+
+[Cloud Credentials]
+key = KEY
+secret = SECRET_KEY
+region = us-east-1
+timeout = 60
+
+[Cloud List]
+# This section defines filters that find compute nodes.
+# Tags that you specify here will automatically be added to nodes you create.
+# Replace colons in Amazon filters with underscores
+# (e.g., write "tag:mytag" as "tag_mytag").
+instance-state-name = running
+tag_arvados-class = dynamic-compute
+tag_cluster = zyxwv
+
+[Cloud Create]
+# New compute nodes will send pings to Arvados at this host.
+# You may specify a port, and use brackets to disambiguate IPv6 addresses.
+ping_host = hostname:port
+
+# Give the name of an SSH key on AWS...
+ex_keyname = string
+
+# ... or a file path for an SSH key that can log in to the compute node.
+# (One or the other, not both.)
+# ssh_key = path
+
+# The EC2 IDs of the image and subnet compute nodes should use.
+image_id = idstring
+subnet_id = idstring
+
+# Comma-separated EC2 IDs for the security group(s) assigned to each
+# compute node.
+security_groups = idstring1, idstring2
+
+# Apply an Instance Profile ARN to the newly created compute nodes
+# For more info, see:
+# https://aws.amazon.com/premiumsupport/knowledge-center/iam-policy-restrict-vpc/
+# ex_iamprofile = arn:aws:iam::ACCOUNTNUMBER:instance-profile/ROLENAME
+
+
+# You can define any number of Size sections to list EC2 sizes you're
+# willing to use.  The Node Manager should boot the cheapest size(s) that
+# can run jobs in the queue.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Amazon's provided
+# data fields (such as price per hour) by setting them here.
+
+[Size m4.large]
+cores = 2
+price = 0.126
+scratch = 100
+
+[Size m4.xlarge]
+cores = 4
+price = 0.252
+scratch = 100
+</pre>
+
+h3(#gcp). Google Cloud Platform
+
+<pre>
+# Google Compute Engine configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# running at all times.  By default, these will be the cheapest node size.
+max_nodes = 8
+
+# Poll compute nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = gce
+
+# Shutdown windows define periods of time when a node may and may not
+# be shut down.  These are windows in full minutes, separated by
+# commas.  Counting from the time the node is booted, the node WILL
+# NOT shut down for N1 minutes; then it MAY shut down for N2 minutes;
+# then it WILL NOT shut down for N3 minutes; and so on.  For example,
+# "54, 5, 1" means the node may shut down from the 54th to the 59th
+# minute of each hour of uptime.
+# GCE bills by the minute, and does not provide information about when
+# a node booted.  Node Manager will store this information in metadata
+# when it boots a node; if that information is not available, it will
+# assume the node booted at the epoch.  These shutdown settings are
+# very aggressive.  You may want to adjust this if you want more
+# continuity of service from a single node.
+shutdown_windows = 20, 999999
+
+[Cloud Credentials]
+user_id = client_email_address@developer.gserviceaccount.com
+key = path_to_certificate.pem
+project = project-id-from-google-cloud-dashboard
+timeout = 60
+
+# Valid location (zone) names: https://cloud.google.com/compute/docs/zones
+datacenter = us-central1-a
+
+# Optional settings. For full documentation see
+# http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#libcloud.compute.drivers.gce.GCENodeDriver
+#
+# auth_type = SA               # SA, IA or GCE
+# scopes = https://www.googleapis.com/auth/compute
+# credential_file =
+
+[Cloud List]
+# A comma-separated list of tags that must be applied to a node for it to
+# be considered a compute node.
+# The driver will automatically apply these tags to nodes it creates.
+tags = zyxwv, compute
+
+[Cloud Create]
+# New compute nodes will send pings to Arvados at this host.
+# You may specify a port, and use brackets to disambiguate IPv6 addresses.
+ping_host = hostname:port
+
+# A file path for an SSH key that can log in to the compute node.
+# ssh_key = path
+
+# The GCE image name and network zone name to use when creating new nodes.
+image = debian-7
+# network = your_network_name
+
+# JSON string of service account authorizations for this cluster.
+# See http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#specifying-service-account-scopes
+# service_accounts = [{'email':'account@example.com', 'scopes':['storage-ro']}]
+
+
+# You can define any number of Size sections to list node sizes you're
+# willing to use.  The Node Manager should boot the cheapest size(s) that
+# can run jobs in the queue.
+#
+# The Size fields are interpreted the same way as with a libcloud NodeSize:
+# http://libcloud.readthedocs.org/en/latest/compute/api.html#libcloud.compute.base.NodeSize
+#
+# See https://cloud.google.com/compute/docs/machine-types for a list
+# of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.
+# You can also override Google's provided data fields (such as price per hour)
+# by setting them here.
+
+[Size n1-standard-2]
+cores = 2
+price = 0.076
+scratch = 100
+
+[Size n1-standard-4]
+cores = 4
+price = 0.152
+scratch = 200
+</pre>
+
+h3(#azure). Microsoft Azure
+
+<pre>
+# Azure configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll Azure nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = azure
+
+# Shutdown windows define periods of time when a node may and may not be shut
+# down.  These are windows in full minutes, separated by commas.  Counting from
+# the time the node is booted, the node WILL NOT shut down for N1 minutes; then
+# it MAY shut down for N2 minutes; then it WILL NOT shut down for N3 minutes;
+# and so on.  For example, "20, 999999" means the node may shut down between
+# the 20th and 999999th minutes of uptime.
+# Azure bills by the minute, so it makes sense to agressively shut down idle
+# nodes.  Specify at least two windows.  You can add as many as you need beyond
+# that.
+shutdown_windows = 20, 999999
+
+[Cloud Credentials]
+# Use "azure account list" with the azure CLI to get these values.
+tenant_id = 00000000-0000-0000-0000-000000000000
+subscription_id = 00000000-0000-0000-0000-000000000000
+
+# The following directions are based on
+# https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/
+# and updated for v2 of the Azure cli tool.
+#
+# az ad app create --display-name "Node Manager" --homepage "https://arvados.org" --identifier-uris "https://<Your_Application_Uri>" --password <Your_Password>
+# az ad sp create "<Application_Id>"
+# az role assignment create --assignee "<Application_Id>" --role Owner --resource-group "<Your_Azure_Arvados_Resource_Group>"
+#
+# Use <Application_Id> for "key" and the <Your_Password> for "secret"
+#
+key = 00000000-0000-0000-0000-000000000000
+secret = PASSWORD
+timeout = 60
+region = East US
+
+[Cloud List]
+# The resource group in which the compute node virtual machines will be created
+# and listed.
+ex_resource_group = ArvadosResourceGroup
+
+[Cloud Create]
+# The compute node image, as a link to a VHD in Azure blob store.
+image = https://example.blob.core.windows.net/system/Microsoft.Compute/Images/images/zyxwv-compute-osDisk.vhd
+
+# Path to a local ssh key file that will be used to provision new nodes.
+ssh_key = /home/arvadosuser/.ssh/id_rsa.pub
+
+# The account name for the admin user that will be provisioned on new nodes.
+ex_user_name = arvadosuser
+
+# The Azure storage account that will be used to store the node OS disk images.
+ex_storage_account = arvadosstorage
+
+# The virtual network the VMs will be associated with.
+ex_network = ArvadosNetwork
+
+# Optional subnet of the virtual network.
+#ex_subnet = default
+
+# Node tags
+tag_arvados-class = dynamic-compute
+tag_cluster = zyxwv
+
+# the API server to ping
+ping_host = hostname:port
+
+# You can define any number of Size sections to list Azure sizes you're willing
+# to use.  The Node Manager should boot the cheapest size(s) that can run jobs
+# in the queue.  You must also provide price per hour as the Azure driver
+# compute currently does not report prices.
+#
+# See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/
+# for a list of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Microsoft's provided
+# data fields by setting them here.
+
+[Size Standard_D3]
+cores = 4
+price = 0.56
+
+[Size Standard_D4]
+cores = 8
+price = 1.12
+</pre>
+
+h2. Running
+
+<pre>
+$ arvados-node-manager --config /etc/arvados-node-manager/config.ini
+</pre>
diff --git a/doc/install/install-postgresql.html.textile.liquid b/doc/install/install-postgresql.html.textile.liquid
new file mode 100644 (file)
index 0000000..aabe662
--- /dev/null
@@ -0,0 +1,64 @@
+---
+layout: default
+navsection: installguide
+title: Set up PostgreSQL databases
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Two Arvados Rails servers store data in a PostgreSQL database: the SSO server, and the API server.  The API server requires at least version *9.4* of PostgreSQL.  Beyond that, you have the flexibility to deploy PostgreSQL any way that the Rails servers will be able to connect to it.  Our recommended deployment strategy is:
+
+* Install PostgreSQL on the the same host as the SSO server, and dedicate that install to hosting the SSO database.  This provides the best security for the SSO server, because the database does not have to accept any client connections over the network.  Typical load on the SSO server is light enough that deploying both it and its database on the same host does not compromise performance.
+* If you want to provide the most scalability for your Arvados cluster, install PostgreSQL for the API server on a dedicated host.  This gives you the most flexibility to avoid resource contention, and tune performance separately for the API server and its database.  If performance is less of a concern for your installation, you can install PostgreSQL on the API server host directly, as with the SSO server.
+
+Find the section for your distribution below, and follow it to install PostgreSQL on each host where you will deploy it.  Then follow the steps in the later section(s) to set up PostgreSQL for the Arvados service(s) that need it.
+
+It is important to make sure that autovacuum is enabled for the PostgreSQL database that backs the API server. Autovacuum is enabled by default since PostgreSQL 8.3.
+
+h2. Install PostgreSQL 9.4+
+
+The API server requires at least version *9.4* of PostgreSQL.
+
+h3(#centos7). CentOS 7
+{% assign rh_version = "7" %}
+{% include 'note_python_sc' %}
+
+# Install PostgreSQL:
+  <notextile><pre>~$ <span class="userinput">sudo yum install rh-postgresql95</span>
+~$ <span class="userinput">scl enable rh-postgresql95 bash</span></pre></notextile>
+# Initialize the database:
+  <notextile><pre>~$ <span class="userinput">sudo postgresql-setup initdb</span></pre></notextile>
+# Configure the database to accept password connections:
+  <notextile><pre><code>~$ <span class="userinput">sudo sed -ri -e 's/^(host +all +all +(127\.0\.0\.1\/32|::1\/128) +)ident$/\1md5/' /var/lib/pgsql/data/pg_hba.conf</span></code></pre></notextile>
+# Configure the database to launch at boot:
+  <notextile><pre>~$ <span class="userinput">sudo systemctl enable rh-postgresql95-postgresql</span></pre></notextile>
+# Start the database:
+  <notextile><pre>~$ <span class="userinput">sudo systemctl start rh-postgresql95-postgresql</span></pre></notextile>
+# "Set up Arvados credentials and databases":#rails_setup for the services that will use this PostgreSQL install.
+
+h3(#debian). Debian or Ubuntu
+
+Debian 8 (Jessie) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
+
+Ubuntu 14.04 (Trusty) requires an updated PostgreSQL version, see "the PostgreSQL ubuntu repository":https://www.postgresql.org/download/linux/ubuntu/
+
+# Install PostgreSQL:
+  <notextile><pre>~$ <span class="userinput">sudo apt-get install postgresql</span></pre></notextile>
+# "Set up Arvados credentials and databases":#rails_setup for the services that will use this PostgreSQL install.
+
+<a name="rails_setup"></a>
+
+h2(#sso). Set up SSO server credentials and database
+
+{% assign service_role = "arvados_sso" %}
+{% assign service_database = "arvados_sso_production" %}
+{% include 'install_postgres_database' %}
+
+h2(#api). Set up API server credentials and database
+
+{% assign service_role = "arvados" %}
+{% assign service_database = "arvados_production" %}
+{% include 'install_postgres_database' %}
diff --git a/doc/install/install-shell-server.html.textile.liquid b/doc/install/install-shell-server.html.textile.liquid
new file mode 100644 (file)
index 0000000..1cbe749
--- /dev/null
@@ -0,0 +1,178 @@
+---
+layout: default
+navsection: installguide
+title: Install a shell server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+There is nothing inherently special about an Arvados shell server. It is just a GNU/Linux machine with Arvados utilites and SDKs installed. For optimal performance, the Arvados shell server should be on the same LAN as the Arvados cluster, but that is not required.
+
+h2. Install API tokens
+
+Please follow the "API token guide":../user/reference/api-tokens.html to get API tokens for your Arvados account and install them on your shell server. We will use those tokens to test the SDKs as we install them.
+
+h2. Install the Ruby SDK and utilities
+
+First, install the curl development libraries necessary to build the Arvados Ruby SDK.  On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install libcurl4-openssl-dev</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install libcurl-devel</span>
+</code></pre>
+</notextile>
+
+Next, install the arvados-cli Ruby gem.  If you're using RVM:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/local/rvm/bin/rvm-exec default gem install arvados-cli</span>
+</code></pre>
+</notextile>
+
+If you're not using RVM:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo -i gem install arvados-cli</span>
+</code></pre>
+</notextile>
+
+h2. Install the Python SDK and utilities
+
+{% assign rh_version = "7" %}
+{% include 'note_python_sc' %}
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-python-client python-arvados-fuse crunchrunner</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client python-arvados-fuse crunchrunner</span>
+</code></pre>
+</notextile>
+
+h2. Install Git and curl
+
+{% include 'install_git_curl' %}
+
+h2. Update Git Config
+
+Configure git to use the ARVADOS_API_TOKEN environment variable to authenticate to arv-git-httpd. We use the @--system@ flag so it takes effect for all current and future user accounts. It does not affect git's behavior when connecting to other git servers.
+
+<notextile>
+<pre>
+<code>~$ <span class="userinput">sudo git config --system 'credential.https://git.<b>uuid_prefix.your.domain</b>/.username' none</span></code>
+<code>~$ <span class="userinput">sudo git config --system 'credential.https://git.<b>uuid_prefix.your.domain</b>/.helper' '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'</span></code>
+</pre>
+</notextile>
+
+h2. Install arvados-login-sync
+
+This program makes it possible for Arvados users to log in to the shell server -- subject to permissions assigned by the Arvados administrator -- using the SSH keys they upload to Workbench. It sets up login accounts, updates group membership, and adds users' public keys to the appropriate @authorized_keys@ files.
+
+Create an Arvados virtual_machine object representing this shell server. This will assign a UUID.
+
+<notextile>
+<pre>
+<code>apiserver:~$ <span class="userinput">arv --format=uuid virtual_machine create --virtual-machine '{"hostname":"<b>your.shell.server.hostname.without.domain</b>"}'</span>
+zzzzz-2x53u-zzzzzzzzzzzzzzz</code>
+</pre>
+</notextile>
+
+Create a token that is allowed to read login information for this VM.
+
+<notextile>
+<pre>
+<code>apiserver:~$ <span class="userinput">arv api_client_authorization create --api-client-authorization '{"scopes":["GET /arvados/v1/virtual_machines/<b>zzzzz-2x53u-zzzzzzzzzzzzzzz</b>/logins"]}'
+{
+ ...
+ "api_token":"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
+ ...
+}</code>
+</pre>
+</notextile>
+
+Note the UUID and the API token output by the above commands: you will need them in a minute.
+
+Install the arvados-login-sync program.
+
+If you're using RVM:
+
+<notextile>
+<pre>
+<code>shellserver:~$ <span class="userinput">sudo -i `which rvm-exec` default gem install arvados-login-sync</span></code>
+</pre>
+</notextile>
+
+If you're not using RVM:
+
+<notextile>
+<pre>
+<code>shellserver:~$ <span class="userinput">sudo -i gem install arvados-login-sync</span></code>
+</pre>
+</notextile>
+
+Install cron.
+
+On Red Hat-based distributions:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install cronie</span>
+~$ <span class="userinput">sudo systemctl enable crond</span>
+~$ <span class="userinput">sudo systemctl start crond</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install cron</span>
+</code></pre>
+</notextile>
+
+Configure cron to run the @arvados-login-sync@ program every 2 minutes.
+
+If you're using RVM:
+
+<notextile>
+<pre>
+<code>shellserver:~$ <span class="userinput">sudo bash -c 'umask 077; tee /etc/cron.d/arvados-login-sync' &lt;&lt;'EOF'
+ARVADOS_API_HOST="<strong>uuid_prefix.your.domain</strong>"
+ARVADOS_API_TOKEN="<strong>the_token_you_created_above</strong>"
+ARVADOS_VIRTUAL_MACHINE_UUID="<strong>zzzzz-2x53u-zzzzzzzzzzzzzzz</strong>"
+*/2 * * * * root /usr/local/rvm/bin/rvm-exec default arvados-login-sync
+EOF</span></code>
+</pre>
+</notextile>
+
+If you're not using RVM:
+
+<notextile>
+<pre>
+<code>shellserver:~$ <span class="userinput">sudo bash -c 'umask 077; tee /etc/cron.d/arvados-login-sync' &lt;&lt;'EOF'
+ARVADOS_API_HOST="<strong>uuid_prefix.your.domain</strong>"
+ARVADOS_API_TOKEN="<strong>the_token_you_created_above</strong>"
+ARVADOS_VIRTUAL_MACHINE_UUID="<strong>zzzzz-2x53u-zzzzzzzzzzzzzzz</strong>"
+*/2 * * * * root arvados-login-sync
+EOF</span></code>
+</pre>
+</notextile>
+
+A user should be able to log in to the shell server when the following conditions are satisfied:
+* The user has uploaded an SSH public key: Workbench &rarr; Account menu &rarr; "SSH keys" item &rarr; "Add new SSH key" button.
+* As an admin user, you have given the user permission to log in: Workbench &rarr; Admin menu &rarr; "Users" item &rarr; "Show" button &rarr; "Admin" tab &rarr; "Setup shell account" button.
+* Two minutes have elapsed since the above conditions were satisfied, and the cron job has had a chance to run.
diff --git a/doc/install/install-sso.html.textile.liquid b/doc/install/install-sso.html.textile.liquid
new file mode 100644 (file)
index 0000000..b2a4f67
--- /dev/null
@@ -0,0 +1,247 @@
+---
+layout: default
+navsection: installguide
+title: Install the Single Sign On (SSO) server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2(#dependencies). Install prerequisites
+
+The Arvados package repository includes an SSO server package that can help automate much of the deployment.
+
+h3(#install_ruby_and_bundler). Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
+
+h3(#install_web_server). Set up a Web server
+
+For best performance, we recommend you use Nginx as your Web server frontend with a Passenger backend to serve the SSO server. The Passenger team provides "Nginx + Passenger installation instructions":https://www.phusionpassenger.com/library/walkthroughs/deploy/ruby/ownserver/nginx/oss/install_passenger_main.html.
+
+Follow the instructions until you see the section that says you are ready to deploy your Ruby application on the production server.
+
+h2(#install). Install the SSO server
+
+On a Debian-based system, install the following package:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-sso-server</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following package:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-sso-server</span>
+</code></pre>
+</notextile>
+
+h2(#configure). Configure the SSO server
+
+The package has installed three configuration files in @/etc/arvados/sso@:
+
+<notextile>
+<pre><code>/etc/arvados/sso/application.yml
+/etc/arvados/sso/database.yml
+/etc/arvados/sso/production.rb
+</code></pre>
+</notextile>
+
+The SSO server runs from the @/var/www/arvados-sso/current/@ directory. The files @/var/www/arvados-sso/current/config/application.yml@, @/var/www/arvados-sso/current/config/database.yml@ and @/var/www/arvados-sso/current/config/environments/production.rb@ are symlinked to the configuration files in @/etc/arvados/sso/@.
+
+The SSO server reads the @config/application.yml@ file, as well as the @config/application.defaults.yml@ file. Values in @config/application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by the SSO server and is provided for installation convenience only.
+
+Consult @config/application.default.yml@ for a full list of configuration options.  Local configuration goes in @/etc/arvados/sso/application.yml@, do not edit @config/application.default.yml@.
+
+h3(#uuid_prefix). uuid_prefix
+
+Generate a uuid prefix for the single sign on service.  This prefix is used to identify user records as originating from this site.  It must be exactly 5 lowercase ASCII letters and/or digits.  You may use the following snippet to generate a uuid prefix:
+
+<notextile>
+<pre><code>~$ <span class="userinput">ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"'</span>
+abcde
+</code></pre></notextile>
+
+Edit @/etc/arvados/sso/application.yml@ and set @uuid_prefix@ in the "common" section.
+
+h3(#secret_token). secret_token
+
+Generate a new secret token for signing cookies:
+
+<notextile>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</code></pre></notextile>
+
+Edit @/etc/arvados/sso/application.yml@ and set @secret_token@ in the "common" section.
+
+There are other configuration options in @/etc/arvados/sso/application.yml@. See the "Authentication methods":install-sso.html#authentication_methods section below for more details.
+
+h2(#database). Set up the database
+
+Configure the SSO server to connect to your database by updating @/etc/arvados/sso/database.yml@. Replace the @xxxxxxxx@ database password placeholder with the "password you generated during database setup":install-postgresql.html#sso. Be sure to update the @production@ section.
+
+<notextile>
+<pre><code>~$ <span class="userinput">editor /etc/arvados/sso/database.yml</span>
+</code></pre></notextile>
+
+h2(#reconfigure_package). Reconfigure the package
+
+{% assign railspkg = "arvados-sso-server" %}
+{% include 'install_rails_reconfigure' %}
+
+h2(#client). Create arvados-server client
+
+{% assign railshost = "" %}
+{% assign railsdir = "/var/www/arvados-sso/current" %}
+Use @rails console@ to create a @Client@ record that will be used by the Arvados API server.  {% include 'install_rails_command' %}
+
+Enter the following commands at the console.  The values that appear after you assign @app_id@ and @app_secret@ correspond to the values for @sso_app_id@ and @sso_app_secret@, respectively, in the "API server's SSO settings":install-api-server.html#omniauth.
+
+<notextile>
+<pre><code>:001 &gt; <span class="userinput">c = Client.new</span>
+:002 &gt; <span class="userinput">c.name = "joshid"</span>
+:003 &gt; <span class="userinput">c.app_id = "arvados-server"</span>
+:004 &gt; <span class="userinput">c.app_secret = rand(2**400).to_s(36)</span>
+=&gt; "<strong>save this string for your API server's sso_app_secret</strong>"
+:005 &gt; <span class="userinput">c.save!</span>
+:006 &gt; <span class="userinput">quit</span>
+</code></pre>
+</notextile>
+
+h2(#configure_web_server). Configure your web server
+
+Edit the http section of your Nginx configuration to run the Passenger server and act as a frontend for it. You might add a block like the following, adding SSL and logging parameters to taste:
+
+<notextile>
+<pre><code>server {
+  listen 127.0.0.1:8900;
+  server_name localhost-sso;
+
+  root   /var/www/arvados-sso/current/public;
+  index  index.html;
+
+  passenger_enabled on;
+  # If you're not using RVM, comment out the line below.
+  passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+}
+
+upstream sso {
+  server     127.0.0.1:8900  fail_timeout=10s;
+}
+
+proxy_http_version 1.1;
+
+server {
+  listen       <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name  auth.<span class="userinput">your.domain</span>;
+
+  ssl on;
+  ssl_certificate     <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
+
+  index  index.html;
+
+  location / {
+    proxy_pass            http://sso;
+    proxy_redirect        off;
+    proxy_connect_timeout 90s;
+    proxy_read_timeout    300s;
+
+    proxy_set_header      X-Forwarded-Proto https;
+    proxy_set_header      Host $http_host;
+    proxy_set_header      X-Real-IP $remote_addr;
+    proxy_set_header      X-Forwarded-For $proxy_add_x_forwarded_for;
+  }
+}
+</code></pre>
+</notextile>
+
+Finally, restart Nginx and your Arvados SSO server should be up and running. You can verify that by visiting the URL you configured your Nginx web server to listen on in the server section above (port 443). Read on if you want to configure your Arvados SSO server to use a different authentication backend.
+
+h2(#authentication_methods). Authentication methods
+
+Authentication methods are configured in @application.yml@.  Currently three authentication methods are supported: local accounts, LDAP, and Google+.  If neither Google+ nor LDAP are enabled, the SSO server defaults to local user accounts.   Only one authentication mechanism should be in use at a time.
+
+h3(#local_accounts). Local account authentication
+
+There are two configuration options for local accounts:
+
+<pre>
+  # If true, allow new creation of new accounts in the SSO server's internal
+  # user database.
+  allow_account_registration: false
+
+  # If true, send an email confirmation before activating new accounts in the
+  # SSO server's internal user database (otherwise users are activated immediately.)
+  require_email_confirmation: false
+</pre>
+
+For more information about configuring backend support for sending email (required to send email confirmations) see "Configuring Action Mailer":http://guides.rubyonrails.org/configuring.html#configuring-action-mailer
+
+If @allow_account_registration@ is false, you may manually create local accounts on the SSO server from the Rails console.  {% include 'install_rails_command' %}
+
+Enter the following commands at the console.
+
+<notextile>
+<pre><code>:001 &gt; <span class="userinput">user = User.new(:email =&gt; "test@example.com")</span>
+:002 &gt; <span class="userinput">user.password = "passw0rd"</span>
+:003 &gt; <span class="userinput">user.save!</span>
+:004 &gt; <span class="userinput">quit</span>
+</code></pre>
+</notextile>
+
+h3(#ldap). LDAP authentication
+
+The following options are available to configure LDAP authentication.  Note that you must preserve the indentation of the fields listed under @use_ldap@.
+
+<pre>
+  use_ldap:
+    title: Example LDAP
+    host: ldap.example.com
+    port: 636
+    method: ssl
+    base: "ou=Users, dc=example, dc=com"
+    uid: uid
+    email_domain: example.com
+    #bind_dn: "some_user"
+    #password: "some_password"
+</pre>
+
+table(table).
+|_. Option|_. Description|
+|title |Title displayed to the user on the login page|
+|host  |LDAP server hostname|
+|port  |LDAP server port|
+|method|One of "plain", "ssl", "tls"|
+|base  |Directory lookup base|
+|uid   |User id field used for directory lookup|
+|email_domain|Strip off specified email domain from login and perform lookup on bare username|
+|bind_dn|If required by server, username to log with in before performing directory lookup|
+|password|If required by server, password to log with before performing directory lookup|
+
+h3(#google). Google+ authentication
+
+In order to use Google+ authentication, you must use the <a href="https://console.developers.google.com" target="_blank">Google Developers Console</a> to create a set of client credentials.
+
+# Go to the <a href="https://console.developers.google.com" target="_blank">Google Developers Console</a> and select or create a project; this will take you to the project page.
+# On the sidebar, click on *APIs & auth* then select *APIs*.
+## Search for *Contacts API* and click on *Enable API*.
+## Search for *Google+ API* and click on *Enable API*.
+# On the sidebar, click on *Credentials*; under *OAuth* click on *Create new Client ID* to bring up the *Create Client ID* dialog box.
+# Under *Application type* select *Web application*.
+# If the authorization origins are not displayed, clicking on *Create Client ID* will take you to *Consent screen* settings.
+## On consent screen settings, enter the appropriate details and click on *Save*.
+## This will return you to the *Create Client ID* dialog box.
+# You must set the authorization origins.  Edit @auth.your.domain@ to the appropriate hostname that you will use to access the SSO service:
+## JavaScript origin should be @https://auth.your.domain/@
+## Redirect URI should be @https://auth.your.domain/users/auth/google_oauth2/callback@
+# Copy the values of *Client ID* and *Client secret* from the Google Developers Console into the Google section of @config/application.yml@, like this:
+
+<notextile>
+<pre><code>  # Google API tokens required for OAuth2 login.
+  google_oauth2_client_id: <span class="userinput">"---YOUR---CLIENT---ID---HERE--"-</span>
+  google_oauth2_client_secret: <span class="userinput">"---YOUR---CLIENT---SECRET---HERE--"-</span></code></pre></notextile>
diff --git a/doc/install/install-workbench-app.html.textile.liquid b/doc/install/install-workbench-app.html.textile.liquid
new file mode 100644 (file)
index 0000000..593e801
--- /dev/null
@@ -0,0 +1,198 @@
+---
+layout: default
+navsection: installguide
+title: Install Workbench
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Install prerequisites
+
+The Arvados package repository includes a Workbench server package that can help automate much of the deployment.
+
+h3(#install_ruby_and_bundler). Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
+
+h2(#install_workbench). Install Workbench and dependencies
+
+Workbench doesn't need its own database, so it does not need to have PostgreSQL installed.
+
+{% assign rh_version = "7" %}
+{% include 'note_python_sc' %}
+
+On a Debian-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install bison build-essential graphviz git python-arvados-python-client arvados-workbench</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install bison make automake gcc gcc-c++ graphviz git python-arvados-python-client arvados-workbench</span>
+</code></pre>
+</notextile>
+
+h2(#configure). Configure Workbench
+
+Edit @/etc/arvados/workbench/application.yml@ following the instructions below.  Workbench reads both @application.yml@ and its own @config/application.defaults.yml@ file.  Values in @application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@.  The @config/application.yml.example@ file is not read by Workbench and is provided for installation convenience only.
+
+Consult @config/application.default.yml@ for a full list of configuration options.  Always put your local configuration in @/etc/arvados/workbench/application.yml@&mdash;never edit @config/application.default.yml@.
+
+h3. secret_token
+
+This application needs a secret token. Generate a new secret:
+
+<notextile>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+</code></pre>
+</notextile>
+
+Then put that value in the @secret_token@ field.
+
+h3. arvados_login_base and arvados_v1_base
+
+Point @arvados_login_base@ and @arvados_v1_base@ at your "API server":install-api-server.html. For example like this:
+
+<notextile>
+<pre><code>arvados_login_base: https://prefix_uuid.your.domain/login
+arvados_v1_base: https://prefix_uuid.your.domain/arvados/v1
+</code></pre>
+</notextile>
+
+h3. site_name
+
+@site_name@ can be set to any arbitrary string. It is used to identify this Workbench to people visiting it.
+
+h3. arvados_insecure_https
+
+If the SSL certificate you use for your API server isn't an official certificate signed by a CA, make sure @arvados_insecure_https@ is @true@.
+
+h3. Other options
+
+Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
+
+h2. Configure Piwik
+
+In @/var/www/arvados-workbench/current/config@, copy @piwik.yml.example@ to @piwik.yml@ and edit to suit.
+
+h2. Set up Web server
+
+For best performance, we recommend you use Nginx as your Web server front-end, with a Passenger backend to serve Workbench.  To do that:
+
+<notextile>
+<ol>
+<li><a href="https://www.phusionpassenger.com/library/walkthroughs/deploy/ruby/ownserver/nginx/oss/install_passenger_main.html">Install Nginx and Phusion Passenger</a>.</li>
+
+<li><p>Edit the http section of your Nginx configuration to run the Passenger server, and act as a front-end for it.  You might add a block like the following, adding SSL and logging parameters to taste:</p>
+
+<pre><code>server {
+  listen 127.0.0.1:9000;
+  server_name localhost-workbench;
+
+  root /var/www/arvados-workbench/current/public;
+  index  index.html index.htm index.php;
+
+  passenger_enabled on;
+  # If you're using RVM, uncomment the line below.
+  #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+
+  # `client_max_body_size` should match the corresponding setting in
+  # the API server's Nginx configuration.
+  client_max_body_size 128m;
+}
+
+upstream workbench {
+  server     127.0.0.1:9000  fail_timeout=10s;
+}
+
+proxy_http_version 1.1;
+
+server {
+  listen       <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name  workbench.<span class="userinput">uuid-prefix.your.domain</span>;
+
+  ssl on;
+  ssl_certificate     <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
+
+  index  index.html index.htm index.php;
+  # `client_max_body_size` should match the corresponding setting in
+  # the API server's Nginx configuration.
+  client_max_body_size 128m;
+
+  location / {
+    proxy_pass            http://workbench;
+    proxy_redirect        off;
+    proxy_connect_timeout 90s;
+    proxy_read_timeout    300s;
+
+    proxy_set_header      X-Forwarded-Proto https;
+    proxy_set_header      Host $http_host;
+    proxy_set_header      X-Real-IP $remote_addr;
+    proxy_set_header      X-Forwarded-For $proxy_add_x_forwarded_for;
+  }
+}
+</code></pre>
+</li>
+
+<li>Restart Nginx.</li>
+
+</ol>
+</notextile>
+
+h2. Prepare the Workbench deployment
+
+{% assign railspkg = "arvados-workbench" %}
+{% include 'install_rails_reconfigure' %}
+
+{% include 'notebox_begin' %}
+You can safely ignore the following error message you may see when Ruby Gems are installed:
+<notextile>
+<pre><code>themes_for_rails at /usr/local/rvm/gems/ruby-2.1.1/bundler/gems/themes_for_rails-1fd2d7897d75 did not have a valid gemspec.
+This prevents bundler from installing bins or native extensions, but that may not affect its functionality.
+The validation message from Rubygems was:
+  duplicate dependency on rails (= 3.0.11, development), (>= 3.0.0) use:
+    add_runtime_dependency 'rails', '= 3.0.11', '>= 3.0.0'
+Using themes_for_rails (0.5.1) from https://github.com/holtkampw/themes_for_rails (at 1fd2d78)
+</code></pre>
+</notextile>
+{% include 'notebox_end' %}
+
+h2. Trusted client setting
+
+Log in to Workbench once to ensure that the Arvados API server has a record of the Workbench client. (It's OK if Workbench says your account hasn't been activated yet. We'll deal with that next.)
+
+In the <strong>API server</strong> project root, start the Rails console.  {% include 'install_rails_command' %}
+
+At the console, enter the following commands to locate the ApiClient record for your Workbench installation (typically, while you're setting this up, the @last@ one in the database is the one you want), then set the @is_trusted@ flag for the appropriate client record:
+
+<notextile><pre><code>irb(main):001:0&gt; <span class="userinput">wb = ApiClient.all.last; [wb.url_prefix, wb.created_at]</span>
+=&gt; ["https://workbench.example.com/", Sat, 19 Apr 2014 03:35:12 UTC +00:00]
+irb(main):002:0&gt; <span class="userinput">include CurrentApiClient</span>
+=&gt; true
+irb(main):003:0&gt; <span class="userinput">act_as_system_user do wb.update_attributes!(is_trusted: true) end</span>
+=&gt; true
+</code></pre>
+</notextile>
+
+h2(#admin-user). Add an admin user
+
+Next, we're going to use the Rails console on the <strong>API server</strong> to activate your account and give yourself admin privileges.  {% include 'install_rails_command' %}
+
+Enter the following commands at the console:
+
+<notextile>
+<pre><code>irb(main):001:0&gt; <span class="userinput">Thread.current[:user] = User.all.select(&:identity_url).last</span>
+irb(main):002:0&gt; <span class="userinput">Thread.current[:user].update_attributes is_admin: true, is_active: true</span>
+irb(main):003:0&gt; <span class="userinput">User.where(is_admin: true).collect &:email</span>
+=&gt; ["root", "<b>your_address@example.com</b>"]
+</code></pre></notextile>
+
+At this point, you should have a working Workbench login with administrator privileges. Revisit your Workbench URL in a browser and reload the page to access it.
diff --git a/doc/install/install-ws.html.textile.liquid b/doc/install/install-ws.html.textile.liquid
new file mode 100644 (file)
index 0000000..f5f816c
--- /dev/null
@@ -0,0 +1,205 @@
+---
+layout: default
+navsection: installguide
+title: Install the websocket server
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The arvados-ws server provides event notifications to websocket clients. It can be installed anywhere with access to Postgres database and the Arvados API server, typically behind a web proxy that provides SSL support. See the "godoc page":http://godoc.org/github.com/curoverse/arvados/services/ws for additional information.
+
+By convention, we use the following hostname for the websocket service.
+
+<notextile>
+<pre><code>ws.<span class="userinput">uuid_prefix.your.domain</span></code></pre>
+</notextile>
+
+The above hostname should resolve from anywhere on the internet.
+
+h2. Install arvados-ws
+
+Typically arvados-ws runs on the same host as the API server.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-ws</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-ws</span>
+</code></pre>
+</notextile>
+
+Verify that @arvados-ws@ is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-ws -h</span>
+Usage of arvados-ws:
+  -config path
+        path to config file (default "/etc/arvados/ws/ws.yml")
+  -dump-config
+        show current configuration and exit
+</code></pre>
+</notextile>
+
+h3. Create a configuration file
+
+Create @/etc/arvados/ws/ws.yml@ using the following template. Replace @xxxxxxxx@ with the "password you generated during database setup":install-postgresql.html#api.
+
+<notextile>
+<pre><code>Client:
+  APIHost: <span class="userinput">uuid_prefix.your.domain</span>:443
+Listen: ":<span class="userinput">9003</span>"
+Postgres:
+  dbname: arvados_production
+  host: localhost
+  password: <span class="userinput">xxxxxxxx</span>
+  user: arvados
+</code></pre>
+</notextile>
+
+h3. Start the service (option 1: systemd)
+
+If your system does not use systemd, skip this section and follow the "runit instructions":#runit instead.
+
+If your system uses systemd, the arvados-ws service should already be set up. Start it and check its status:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart arvados-ws</span>
+~$ <span class="userinput">sudo systemctl status arvados-ws</span>
+&#x25cf; arvados-ws.service - Arvados websocket server
+   Loaded: loaded (/lib/systemd/system/arvados-ws.service; enabled)
+   Active: active (running) since Tue 2016-12-06 11:20:48 EST; 10s ago
+     Docs: https://doc.arvados.org/
+ Main PID: 9421 (arvados-ws)
+   CGroup: /system.slice/arvados-ws.service
+           └─9421 /usr/bin/arvados-ws
+
+Dec 06 11:20:48 zzzzz arvados-ws[9421]: {"level":"info","msg":"started","time":"2016-12-06T11:20:48.207617188-05:00"}
+Dec 06 11:20:48 zzzzz arvados-ws[9421]: {"Listen":":9003","level":"info","msg":"listening","time":"2016-12-06T11:20:48.244956506-05:00"}
+Dec 06 11:20:48 zzzzz systemd[1]: Started Arvados websocket server.
+</code></pre>
+</notextile>
+
+If it is not running, use @journalctl@ to check logs for errors:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo journalctl -n10 -u arvados-ws</span>
+...
+Dec 06 11:12:48 zzzzz systemd[1]: Starting Arvados websocket server...
+Dec 06 11:12:48 zzzzz arvados-ws[8918]: {"level":"info","msg":"started","time":"2016-12-06T11:12:48.030496636-05:00"}
+Dec 06 11:12:48 zzzzz arvados-ws[8918]: {"error":"pq: password authentication failed for user \"arvados\"","level":"fatal","msg":"db.Ping failed","time":"2016-12-06T11:12:48.058206400-05:00"}
+</code></pre>
+</notextile>
+
+Skip ahead to "confirm the service is working":#confirm.
+
+h3(#runit). Start the service (option 2: runit)
+
+Install runit to supervise the arvados-ws daemon.  {% include 'install_runit' %}
+
+Create a supervised service.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir /etc/service/arvados-ws</span>
+~$ <span class="userinput">cd /etc/service/arvados-ws</span>
+~$ <span class="userinput">sudo mkdir log log/main</span>
+~$ <span class="userinput">printf '#!/bin/sh\nexec arvados-ws 2>&1\n' | sudo tee run</span>
+~$ <span class="userinput">printf '#!/bin/sh\nexec svlogd main\n' | sudo tee log/run</span>
+~$ <span class="userinput">sudo chmod +x run log/run</span>
+~$ <span class="userinput">sudo sv exit .</span>
+~$ <span class="userinput">cd -</span>
+</code></pre>
+</notextile>
+
+Use @sv stat@ and check the log file to verify the service is running.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sv stat /etc/service/arvados-ws</span>
+run: /etc/service/arvados-ws: (pid 12520) 2s; run: log: (pid 12519) 2s
+~$ <span class="userinput">tail /etc/service/arvados-ws/log/main/current</span>
+{"level":"info","msg":"started","time":"2016-12-06T11:56:20.669171449-05:00"}
+{"Listen":":9003","level":"info","msg":"listening","time":"2016-12-06T11:56:20.708847627-05:00"}
+</code></pre>
+</notextile>
+
+h3(#confirm). Confirm the service is working
+
+Confirm the service is listening on its assigned port and responding to requests.
+
+<notextile>
+<pre><code>~$ <span class="userinput">curl http://0.0.0.0:<b>9003</b>/status.json</span>
+{"Clients":1}
+</code></pre>
+</notextile>
+
+h3. Set up a reverse proxy with SSL support
+
+The arvados-ws service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption.
+
+This is best achieved by putting a reverse proxy with SSL support in front of arvados-ws, running on port 443 and passing requests to arvados-ws on port 9003 (or whatever port you chose in your configuration file).
+
+For example, using Nginx:
+
+<notextile><pre>
+upstream arvados-ws {
+  server                127.0.0.1:<span class="userinput">9003</span>;
+}
+
+server {
+  listen                <span class="userinput">[your public IP address]</span>:443 ssl;
+  server_name           ws.<span class="userinput">uuid_prefix.your.domain</span>;
+
+  proxy_connect_timeout 90s;
+  proxy_read_timeout    300s;
+
+  ssl                   on;
+  ssl_certificate       <span class="userinput"/>YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key   <span class="userinput"/>YOUR/PATH/TO/cert.key</span>;
+
+  location / {
+    proxy_pass          http://arvados-ws;
+    proxy_set_header    Upgrade         $http_upgrade;
+    proxy_set_header    Connection      "upgrade";
+    proxy_set_header    Host            $host;
+    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;
+  }
+}
+</pre></notextile>
+
+{% include 'notebox_begin' %}
+If you are upgrading a cluster where Nginx is configured to proxy @ws@ requests to puma, change the @server_name@ value in the old configuration block so it doesn't conflict. When the new configuration is working, delete the old Nginx configuration sections (i.e., the "upstream websockets" block, and the "server" block that references @http://websockets@), and disable/remove the runit or systemd files for the puma server.
+{% include 'notebox_end' %}
+
+h3. Update API server configuration
+
+Ensure the websocket server address is correct in the API server configuration file @/etc/arvados/api/application.yml@.
+
+<notextile>
+<pre><code>websocket_address: wss://ws.<span class="userinput">uuid_prefix.your.domain</span>/websocket
+</code></pre>
+</notextile>
+
+Restart Nginx to reload the API server configuration.
+
+<notextile>
+<pre><code>$ sudo nginx -s reload</span>
+</code></pre>
+</notextile>
+
+h3. Verify DNS and proxy setup
+
+Use a host elsewhere on the Internet to confirm that your DNS, proxy, and SSL are configured correctly.
+
+<notextile>
+<pre><code>$ <span class="userinput">curl https://ws.<b>uuid_prefix.your.domain</b>/status.json</span>
+{"Clients":1}
+</code></pre>
+</notextile>
diff --git a/doc/install/migrate-docker19.html.textile.liquid b/doc/install/migrate-docker19.html.textile.liquid
new file mode 100644 (file)
index 0000000..7b7e2a8
--- /dev/null
@@ -0,0 +1,39 @@
+---
+layout: default
+navsection: admin
+title: Migrating from Docker 1.9
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+If you have an existing Arvados installation using Docker 1.9 and wish to update to Docker 1.10+, you must migrate the Docker images stored in Keep.
+
+The @arv-migrate-docker19@ tool converts Docker images stored in Arvados from image format v1 (Docker <= 1.9) to image format v2 (Docker >= 1.10).
+
+Requires Docker running on the local host (can be either 1.9 or 1.10+). Linux kernel >= 3.18-rc6 to support overlayfs.
+
+Usage:
+
+# Install arvados/migrate-docker19 image: @docker pull arvados/migrate-docker19:1.0@. If you're unable to do this, you can run @arvados/docker/migrate-docker19/build.sh@ to create @arvados/migrate-docker19@ Docker image.
+# Make sure you have the right modules installed: @sudo modprobe overlayfs bridge br_netfilter nf_nat@
+# Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
+# Your temporary directory should have the size of all layers of the biggest image in the cluster, this is hard to estimate, but you can start with five times that size. You can set up a different directory by using the @--tempdir@ switch. Make sure that the user running the docker daemon has permissions to write in that directory.
+# Run @arv-migrate-docker19 --dry-run@ from the Arvados Python SDK on the host (not in a container). This will print out some information useful for the migration.
+# Finally to make the migration run @arv-migrate-docker19@ from the Arvados Python SDK on the host (not in a container).
+
+This will query Arvados for v1 format Docker images.  For each image that does not already have a corresponding v2 format image (as indicated by a docker_image_migration tag) it will perform the following process:
+
+i) download the image from Arvados
+ii) load it into Docker
+iii) update the Docker version, which updates the image
+iv) save the v2 format image and upload to Arvados
+v) create a migration link
+
+Once the Docker images in Keep have been migrated, upgrade the version of Docker used across the cluster.  Finally, update the API server configuration from "v1" to "v2" to reflect the supported Docker image version:
+
+<pre>
+docker_image_formats: ["v2"]
+</pre>
diff --git a/doc/install/pre-built-docker.html.textile.liquid b/doc/install/pre-built-docker.html.textile.liquid
new file mode 100644 (file)
index 0000000..6f92c35
--- /dev/null
@@ -0,0 +1,75 @@
+---
+layout: default
+navsection: installguide
+title: Install pre-built Docker images
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This method is intended for evaluation and development on a local workstation. It is not suitable for production use in a cluster deployment.
+
+{% include 'notebox_begin' %}
+* The automatic network configuration allows you to log in to Workbench from a browser _running on the same host as Docker_. Connecting from other hosts requires additional configuration (not covered here).
+* Your data will be stored inside the Docker containers.  You may stop and restart the containers without loss, but if you delete the container, your data will be gone.
+* Updating the Arvados software inside the Docker containers is not supported.  You may download updated Docker images, but migrating data to updated containers is not yet supported.
+{% include 'notebox_end' %}
+
+h2. Prerequisites
+
+# A GNU/Linux x64 (virtual) machine
+# A working Docker installation (see "Installing Docker":https://docs.docker.com/installation/)
+# curl
+
+h2. Verify prerequisites
+
+Make sure that @curl@ and @docker@ are installed on your system, and that you are in the docker group (see "Installing Docker":https://docs.docker.com/installation/).
+
+<notextile><pre><code>~$ <span class="userinput">which curl</span>
+/usr/bin/curl
+~$ <span class="userinput">docker.io --version</span>
+Docker version 1.2.0-dev, build dc243c8
+~$ <span class="userinput">groups</span>
+yourusername sudo fuse docker
+</code></pre></notextile>
+
+h2. Download and install Arvados.
+
+<notextile>
+<pre><code>~$ <span class="userinput">\curl -sSL get.arvados.org | bash</span>
+</code></pre></notextile>
+
+This command will download the latest build of the Arvados docker images. It also gets the @arvdock@ command and saves it in the current working directory. It then uses @arvdock@ to spin up Arvados. Note that the Arvados Docker images are large and may take a while to download.
+
+If you prefer, you can also download and inspect the installation script before running it. @get.arvados.org@ redirects to "https://raw.githubusercontent.com/curoverse/arvados-dev/master/install/easy-docker-install.sh":https://raw.githubusercontent.com/curoverse/arvados-dev/master/install/easy-docker-install.sh, which is the installation script.
+
+The @arvdock@ command usage is listed here:
+
+<pre>
+usage: ./arvdock (start|stop|restart|reset|test) [options]
+
+start    run new or restart stopped arvados containers
+stop     stop arvados containers
+restart  stop and then start arvados containers
+reset    stop and delete containers WARNING: this will delete the data inside Arvados!
+test     run tests
+
+./arvdock start/stop/restart options:
+  -d[port], --doc[=port]        Documentation server (default port 9898)
+  -w[port], --workbench[=port]  Workbench server (default port 9899)
+  -s[port], --sso[=port]        SSO server (default port 9901)
+  -a[port], --api[=port]        API server (default port 9900)
+  -c, --compute                 Compute nodes (starts 2)
+  -v, --vm                      Shell server
+  -n, --nameserver              Nameserver
+  -k, --keep                    Keep servers
+  -p, --keepproxy               Keepproxy server
+  -h, --help                    Display this help and exit
+
+  If no options are given, the action is applied to all servers.
+
+./arvdock test [testname] [testname] ...
+  By default, all tests are run.
+</pre>
diff --git a/doc/js/bootstrap.js b/doc/js/bootstrap.js
new file mode 100644 (file)
index 0000000..39ec471
--- /dev/null
@@ -0,0 +1,1951 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+if (typeof jQuery === 'undefined') { throw new Error('Bootstrap requires jQuery') }
+
+/* ========================================================================
+ * Bootstrap: transition.js v3.1.0
+ * http://getbootstrap.com/javascript/#transitions
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)
+  // ============================================================
+
+  function transitionEnd() {
+    var el = document.createElement('bootstrap')
+
+    var transEndEventNames = {
+      'WebkitTransition' : 'webkitTransitionEnd',
+      'MozTransition'    : 'transitionend',
+      'OTransition'      : 'oTransitionEnd otransitionend',
+      'transition'       : 'transitionend'
+    }
+
+    for (var name in transEndEventNames) {
+      if (el.style[name] !== undefined) {
+        return { end: transEndEventNames[name] }
+      }
+    }
+
+    return false // explicit for ie8 (  ._.)
+  }
+
+  // http://blog.alexmaccaw.com/css-transitions
+  $.fn.emulateTransitionEnd = function (duration) {
+    var called = false, $el = this
+    $(this).one($.support.transition.end, function () { called = true })
+    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }
+    setTimeout(callback, duration)
+    return this
+  }
+
+  $(function () {
+    $.support.transition = transitionEnd()
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: alert.js v3.1.0
+ * http://getbootstrap.com/javascript/#alerts
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // ALERT CLASS DEFINITION
+  // ======================
+
+  var dismiss = '[data-dismiss="alert"]'
+  var Alert   = function (el) {
+    $(el).on('click', dismiss, this.close)
+  }
+
+  Alert.prototype.close = function (e) {
+    var $this    = $(this)
+    var selector = $this.attr('data-target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
+    }
+
+    var $parent = $(selector)
+
+    if (e) e.preventDefault()
+
+    if (!$parent.length) {
+      $parent = $this.hasClass('alert') ? $this : $this.parent()
+    }
+
+    $parent.trigger(e = $.Event('close.bs.alert'))
+
+    if (e.isDefaultPrevented()) return
+
+    $parent.removeClass('in')
+
+    function removeElement() {
+      $parent.trigger('closed.bs.alert').remove()
+    }
+
+    $.support.transition && $parent.hasClass('fade') ?
+      $parent
+        .one($.support.transition.end, removeElement)
+        .emulateTransitionEnd(150) :
+      removeElement()
+  }
+
+
+  // ALERT PLUGIN DEFINITION
+  // =======================
+
+  var old = $.fn.alert
+
+  $.fn.alert = function (option) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.alert')
+
+      if (!data) $this.data('bs.alert', (data = new Alert(this)))
+      if (typeof option == 'string') data[option].call($this)
+    })
+  }
+
+  $.fn.alert.Constructor = Alert
+
+
+  // ALERT NO CONFLICT
+  // =================
+
+  $.fn.alert.noConflict = function () {
+    $.fn.alert = old
+    return this
+  }
+
+
+  // ALERT DATA-API
+  // ==============
+
+  $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: button.js v3.1.0
+ * http://getbootstrap.com/javascript/#buttons
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // BUTTON PUBLIC CLASS DEFINITION
+  // ==============================
+
+  var Button = function (element, options) {
+    this.$element  = $(element)
+    this.options   = $.extend({}, Button.DEFAULTS, options)
+    this.isLoading = false
+  }
+
+  Button.DEFAULTS = {
+    loadingText: 'loading...'
+  }
+
+  Button.prototype.setState = function (state) {
+    var d    = 'disabled'
+    var $el  = this.$element
+    var val  = $el.is('input') ? 'val' : 'html'
+    var data = $el.data()
+
+    state = state + 'Text'
+
+    if (!data.resetText) $el.data('resetText', $el[val]())
+
+    $el[val](data[state] || this.options[state])
+
+    // push to event loop to allow forms to submit
+    setTimeout($.proxy(function () {
+      if (state == 'loadingText') {
+        this.isLoading = true
+        $el.addClass(d).attr(d, d)
+      } else if (this.isLoading) {
+        this.isLoading = false
+        $el.removeClass(d).removeAttr(d)
+      }
+    }, this), 0)
+  }
+
+  Button.prototype.toggle = function () {
+    var changed = true
+    var $parent = this.$element.closest('[data-toggle="buttons"]')
+
+    if ($parent.length) {
+      var $input = this.$element.find('input')
+      if ($input.prop('type') == 'radio') {
+        if ($input.prop('checked') && this.$element.hasClass('active')) changed = false
+        else $parent.find('.active').removeClass('active')
+      }
+      if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change')
+    }
+
+    if (changed) this.$element.toggleClass('active')
+  }
+
+
+  // BUTTON PLUGIN DEFINITION
+  // ========================
+
+  var old = $.fn.button
+
+  $.fn.button = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.button')
+      var options = typeof option == 'object' && option
+
+      if (!data) $this.data('bs.button', (data = new Button(this, options)))
+
+      if (option == 'toggle') data.toggle()
+      else if (option) data.setState(option)
+    })
+  }
+
+  $.fn.button.Constructor = Button
+
+
+  // BUTTON NO CONFLICT
+  // ==================
+
+  $.fn.button.noConflict = function () {
+    $.fn.button = old
+    return this
+  }
+
+
+  // BUTTON DATA-API
+  // ===============
+
+  $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) {
+    var $btn = $(e.target)
+    if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')
+    $btn.button('toggle')
+    e.preventDefault()
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: carousel.js v3.1.0
+ * http://getbootstrap.com/javascript/#carousel
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // CAROUSEL CLASS DEFINITION
+  // =========================
+
+  var Carousel = function (element, options) {
+    this.$element    = $(element)
+    this.$indicators = this.$element.find('.carousel-indicators')
+    this.options     = options
+    this.paused      =
+    this.sliding     =
+    this.interval    =
+    this.$active     =
+    this.$items      = null
+
+    this.options.pause == 'hover' && this.$element
+      .on('mouseenter', $.proxy(this.pause, this))
+      .on('mouseleave', $.proxy(this.cycle, this))
+  }
+
+  Carousel.DEFAULTS = {
+    interval: 5000,
+    pause: 'hover',
+    wrap: true
+  }
+
+  Carousel.prototype.cycle =  function (e) {
+    e || (this.paused = false)
+
+    this.interval && clearInterval(this.interval)
+
+    this.options.interval
+      && !this.paused
+      && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
+
+    return this
+  }
+
+  Carousel.prototype.getActiveIndex = function () {
+    this.$active = this.$element.find('.item.active')
+    this.$items  = this.$active.parent().children()
+
+    return this.$items.index(this.$active)
+  }
+
+  Carousel.prototype.to = function (pos) {
+    var that        = this
+    var activeIndex = this.getActiveIndex()
+
+    if (pos > (this.$items.length - 1) || pos < 0) return
+
+    if (this.sliding)       return this.$element.one('slid.bs.carousel', function () { that.to(pos) })
+    if (activeIndex == pos) return this.pause().cycle()
+
+    return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos]))
+  }
+
+  Carousel.prototype.pause = function (e) {
+    e || (this.paused = true)
+
+    if (this.$element.find('.next, .prev').length && $.support.transition) {
+      this.$element.trigger($.support.transition.end)
+      this.cycle(true)
+    }
+
+    this.interval = clearInterval(this.interval)
+
+    return this
+  }
+
+  Carousel.prototype.next = function () {
+    if (this.sliding) return
+    return this.slide('next')
+  }
+
+  Carousel.prototype.prev = function () {
+    if (this.sliding) return
+    return this.slide('prev')
+  }
+
+  Carousel.prototype.slide = function (type, next) {
+    var $active   = this.$element.find('.item.active')
+    var $next     = next || $active[type]()
+    var isCycling = this.interval
+    var direction = type == 'next' ? 'left' : 'right'
+    var fallback  = type == 'next' ? 'first' : 'last'
+    var that      = this
+
+    if (!$next.length) {
+      if (!this.options.wrap) return
+      $next = this.$element.find('.item')[fallback]()
+    }
+
+    if ($next.hasClass('active')) return this.sliding = false
+
+    var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction })
+    this.$element.trigger(e)
+    if (e.isDefaultPrevented()) return
+
+    this.sliding = true
+
+    isCycling && this.pause()
+
+    if (this.$indicators.length) {
+      this.$indicators.find('.active').removeClass('active')
+      this.$element.one('slid.bs.carousel', function () {
+        var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()])
+        $nextIndicator && $nextIndicator.addClass('active')
+      })
+    }
+
+    if ($.support.transition && this.$element.hasClass('slide')) {
+      $next.addClass(type)
+      $next[0].offsetWidth // force reflow
+      $active.addClass(direction)
+      $next.addClass(direction)
+      $active
+        .one($.support.transition.end, function () {
+          $next.removeClass([type, direction].join(' ')).addClass('active')
+          $active.removeClass(['active', direction].join(' '))
+          that.sliding = false
+          setTimeout(function () { that.$element.trigger('slid.bs.carousel') }, 0)
+        })
+        .emulateTransitionEnd($active.css('transition-duration').slice(0, -1) * 1000)
+    } else {
+      $active.removeClass('active')
+      $next.addClass('active')
+      this.sliding = false
+      this.$element.trigger('slid.bs.carousel')
+    }
+
+    isCycling && this.cycle()
+
+    return this
+  }
+
+
+  // CAROUSEL PLUGIN DEFINITION
+  // ==========================
+
+  var old = $.fn.carousel
+
+  $.fn.carousel = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.carousel')
+      var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)
+      var action  = typeof option == 'string' ? option : options.slide
+
+      if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))
+      if (typeof option == 'number') data.to(option)
+      else if (action) data[action]()
+      else if (options.interval) data.pause().cycle()
+    })
+  }
+
+  $.fn.carousel.Constructor = Carousel
+
+
+  // CAROUSEL NO CONFLICT
+  // ====================
+
+  $.fn.carousel.noConflict = function () {
+    $.fn.carousel = old
+    return this
+  }
+
+
+  // CAROUSEL DATA-API
+  // =================
+
+  $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) {
+    var $this   = $(this), href
+    var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
+    var options = $.extend({}, $target.data(), $this.data())
+    var slideIndex = $this.attr('data-slide-to')
+    if (slideIndex) options.interval = false
+
+    $target.carousel(options)
+
+    if (slideIndex = $this.attr('data-slide-to')) {
+      $target.data('bs.carousel').to(slideIndex)
+    }
+
+    e.preventDefault()
+  })
+
+  $(window).on('load', function () {
+    $('[data-ride="carousel"]').each(function () {
+      var $carousel = $(this)
+      $carousel.carousel($carousel.data())
+    })
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: collapse.js v3.1.0
+ * http://getbootstrap.com/javascript/#collapse
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // COLLAPSE PUBLIC CLASS DEFINITION
+  // ================================
+
+  var Collapse = function (element, options) {
+    this.$element      = $(element)
+    this.options       = $.extend({}, Collapse.DEFAULTS, options)
+    this.transitioning = null
+
+    if (this.options.parent) this.$parent = $(this.options.parent)
+    if (this.options.toggle) this.toggle()
+  }
+
+  Collapse.DEFAULTS = {
+    toggle: true
+  }
+
+  Collapse.prototype.dimension = function () {
+    var hasWidth = this.$element.hasClass('width')
+    return hasWidth ? 'width' : 'height'
+  }
+
+  Collapse.prototype.show = function () {
+    if (this.transitioning || this.$element.hasClass('in')) return
+
+    var startEvent = $.Event('show.bs.collapse')
+    this.$element.trigger(startEvent)
+    if (startEvent.isDefaultPrevented()) return
+
+    var actives = this.$parent && this.$parent.find('> .panel > .in')
+
+    if (actives && actives.length) {
+      var hasData = actives.data('bs.collapse')
+      if (hasData && hasData.transitioning) return
+      actives.collapse('hide')
+      hasData || actives.data('bs.collapse', null)
+    }
+
+    var dimension = this.dimension()
+
+    this.$element
+      .removeClass('collapse')
+      .addClass('collapsing')
+      [dimension](0)
+
+    this.transitioning = 1
+
+    var complete = function () {
+      this.$element
+        .removeClass('collapsing')
+        .addClass('collapse in')
+        [dimension]('auto')
+      this.transitioning = 0
+      this.$element.trigger('shown.bs.collapse')
+    }
+
+    if (!$.support.transition) return complete.call(this)
+
+    var scrollSize = $.camelCase(['scroll', dimension].join('-'))
+
+    this.$element
+      .one($.support.transition.end, $.proxy(complete, this))
+      .emulateTransitionEnd(350)
+      [dimension](this.$element[0][scrollSize])
+  }
+
+  Collapse.prototype.hide = function () {
+    if (this.transitioning || !this.$element.hasClass('in')) return
+
+    var startEvent = $.Event('hide.bs.collapse')
+    this.$element.trigger(startEvent)
+    if (startEvent.isDefaultPrevented()) return
+
+    var dimension = this.dimension()
+
+    this.$element
+      [dimension](this.$element[dimension]())
+      [0].offsetHeight
+
+    this.$element
+      .addClass('collapsing')
+      .removeClass('collapse')
+      .removeClass('in')
+
+    this.transitioning = 1
+
+    var complete = function () {
+      this.transitioning = 0
+      this.$element
+        .trigger('hidden.bs.collapse')
+        .removeClass('collapsing')
+        .addClass('collapse')
+    }
+
+    if (!$.support.transition) return complete.call(this)
+
+    this.$element
+      [dimension](0)
+      .one($.support.transition.end, $.proxy(complete, this))
+      .emulateTransitionEnd(350)
+  }
+
+  Collapse.prototype.toggle = function () {
+    this[this.$element.hasClass('in') ? 'hide' : 'show']()
+  }
+
+
+  // COLLAPSE PLUGIN DEFINITION
+  // ==========================
+
+  var old = $.fn.collapse
+
+  $.fn.collapse = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.collapse')
+      var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)
+
+      if (!data && options.toggle && option == 'show') option = !option
+      if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.collapse.Constructor = Collapse
+
+
+  // COLLAPSE NO CONFLICT
+  // ====================
+
+  $.fn.collapse.noConflict = function () {
+    $.fn.collapse = old
+    return this
+  }
+
+
+  // COLLAPSE DATA-API
+  // =================
+
+  $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) {
+    var $this   = $(this), href
+    var target  = $this.attr('data-target')
+        || e.preventDefault()
+        || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7
+    var $target = $(target)
+    var data    = $target.data('bs.collapse')
+    var option  = data ? 'toggle' : $this.data()
+    var parent  = $this.attr('data-parent')
+    var $parent = parent && $(parent)
+
+    if (!data || !data.transitioning) {
+      if ($parent) $parent.find('[data-toggle=collapse][data-parent="' + parent + '"]').not($this).addClass('collapsed')
+      $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed')
+    }
+
+    $target.collapse(option)
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: dropdown.js v3.1.0
+ * http://getbootstrap.com/javascript/#dropdowns
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // DROPDOWN CLASS DEFINITION
+  // =========================
+
+  var backdrop = '.dropdown-backdrop'
+  var toggle   = '[data-toggle=dropdown]'
+  var Dropdown = function (element) {
+    $(element).on('click.bs.dropdown', this.toggle)
+  }
+
+  Dropdown.prototype.toggle = function (e) {
+    var $this = $(this)
+
+    if ($this.is('.disabled, :disabled')) return
+
+    var $parent  = getParent($this)
+    var isActive = $parent.hasClass('open')
+
+    clearMenus()
+
+    if (!isActive) {
+      if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {
+        // if mobile we use a backdrop because click events don't delegate
+        $('<div class="dropdown-backdrop"/>').insertAfter($(this)).on('click', clearMenus)
+      }
+
+      var relatedTarget = { relatedTarget: this }
+      $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))
+
+      if (e.isDefaultPrevented()) return
+
+      $parent
+        .toggleClass('open')
+        .trigger('shown.bs.dropdown', relatedTarget)
+
+      $this.focus()
+    }
+
+    return false
+  }
+
+  Dropdown.prototype.keydown = function (e) {
+    if (!/(38|40|27)/.test(e.keyCode)) return
+
+    var $this = $(this)
+
+    e.preventDefault()
+    e.stopPropagation()
+
+    if ($this.is('.disabled, :disabled')) return
+
+    var $parent  = getParent($this)
+    var isActive = $parent.hasClass('open')
+
+    if (!isActive || (isActive && e.keyCode == 27)) {
+      if (e.which == 27) $parent.find(toggle).focus()
+      return $this.click()
+    }
+
+    var desc = ' li:not(.divider):visible a'
+    var $items = $parent.find('[role=menu]' + desc + ', [role=listbox]' + desc)
+
+    if (!$items.length) return
+
+    var index = $items.index($items.filter(':focus'))
+
+    if (e.keyCode == 38 && index > 0)                 index--                        // up
+    if (e.keyCode == 40 && index < $items.length - 1) index++                        // down
+    if (!~index)                                      index = 0
+
+    $items.eq(index).focus()
+  }
+
+  function clearMenus(e) {
+    $(backdrop).remove()
+    $(toggle).each(function () {
+      var $parent = getParent($(this))
+      var relatedTarget = { relatedTarget: this }
+      if (!$parent.hasClass('open')) return
+      $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))
+      if (e.isDefaultPrevented()) return
+      $parent.removeClass('open').trigger('hidden.bs.dropdown', relatedTarget)
+    })
+  }
+
+  function getParent($this) {
+    var selector = $this.attr('data-target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
+    }
+
+    var $parent = selector && $(selector)
+
+    return $parent && $parent.length ? $parent : $this.parent()
+  }
+
+
+  // DROPDOWN PLUGIN DEFINITION
+  // ==========================
+
+  var old = $.fn.dropdown
+
+  $.fn.dropdown = function (option) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.dropdown')
+
+      if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))
+      if (typeof option == 'string') data[option].call($this)
+    })
+  }
+
+  $.fn.dropdown.Constructor = Dropdown
+
+
+  // DROPDOWN NO CONFLICT
+  // ====================
+
+  $.fn.dropdown.noConflict = function () {
+    $.fn.dropdown = old
+    return this
+  }
+
+
+  // APPLY TO STANDARD DROPDOWN ELEMENTS
+  // ===================================
+
+  $(document)
+    .on('click.bs.dropdown.data-api', clearMenus)
+    .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
+    .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)
+    .on('keydown.bs.dropdown.data-api', toggle + ', [role=menu], [role=listbox]', Dropdown.prototype.keydown)
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: modal.js v3.1.0
+ * http://getbootstrap.com/javascript/#modals
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // MODAL CLASS DEFINITION
+  // ======================
+
+  var Modal = function (element, options) {
+    this.options   = options
+    this.$element  = $(element)
+    this.$backdrop =
+    this.isShown   = null
+
+    if (this.options.remote) {
+      this.$element
+        .find('.modal-content')
+        .load(this.options.remote, $.proxy(function () {
+          this.$element.trigger('loaded.bs.modal')
+        }, this))
+    }
+  }
+
+  Modal.DEFAULTS = {
+    backdrop: true,
+    keyboard: true,
+    show: true
+  }
+
+  Modal.prototype.toggle = function (_relatedTarget) {
+    return this[!this.isShown ? 'show' : 'hide'](_relatedTarget)
+  }
+
+  Modal.prototype.show = function (_relatedTarget) {
+    var that = this
+    var e    = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })
+
+    this.$element.trigger(e)
+
+    if (this.isShown || e.isDefaultPrevented()) return
+
+    this.isShown = true
+
+    this.escape()
+
+    this.$element.on('click.dismiss.bs.modal', '[data-dismiss="modal"]', $.proxy(this.hide, this))
+
+    this.backdrop(function () {
+      var transition = $.support.transition && that.$element.hasClass('fade')
+
+      if (!that.$element.parent().length) {
+        that.$element.appendTo(document.body) // don't move modals dom position
+      }
+
+      that.$element
+        .show()
+        .scrollTop(0)
+
+      if (transition) {
+        that.$element[0].offsetWidth // force reflow
+      }
+
+      that.$element
+        .addClass('in')
+        .attr('aria-hidden', false)
+
+      that.enforceFocus()
+
+      var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })
+
+      transition ?
+        that.$element.find('.modal-dialog') // wait for modal to slide in
+          .one($.support.transition.end, function () {
+            that.$element.focus().trigger(e)
+          })
+          .emulateTransitionEnd(300) :
+        that.$element.focus().trigger(e)
+    })
+  }
+
+  Modal.prototype.hide = function (e) {
+    if (e) e.preventDefault()
+
+    e = $.Event('hide.bs.modal')
+
+    this.$element.trigger(e)
+
+    if (!this.isShown || e.isDefaultPrevented()) return
+
+    this.isShown = false
+
+    this.escape()
+
+    $(document).off('focusin.bs.modal')
+
+    this.$element
+      .removeClass('in')
+      .attr('aria-hidden', true)
+      .off('click.dismiss.bs.modal')
+
+    $.support.transition && this.$element.hasClass('fade') ?
+      this.$element
+        .one($.support.transition.end, $.proxy(this.hideModal, this))
+        .emulateTransitionEnd(300) :
+      this.hideModal()
+  }
+
+  Modal.prototype.enforceFocus = function () {
+    $(document)
+      .off('focusin.bs.modal') // guard against infinite focus loop
+      .on('focusin.bs.modal', $.proxy(function (e) {
+        if (this.$element[0] !== e.target && !this.$element.has(e.target).length) {
+          this.$element.focus()
+        }
+      }, this))
+  }
+
+  Modal.prototype.escape = function () {
+    if (this.isShown && this.options.keyboard) {
+      this.$element.on('keyup.dismiss.bs.modal', $.proxy(function (e) {
+        e.which == 27 && this.hide()
+      }, this))
+    } else if (!this.isShown) {
+      this.$element.off('keyup.dismiss.bs.modal')
+    }
+  }
+
+  Modal.prototype.hideModal = function () {
+    var that = this
+    this.$element.hide()
+    this.backdrop(function () {
+      that.removeBackdrop()
+      that.$element.trigger('hidden.bs.modal')
+    })
+  }
+
+  Modal.prototype.removeBackdrop = function () {
+    this.$backdrop && this.$backdrop.remove()
+    this.$backdrop = null
+  }
+
+  Modal.prototype.backdrop = function (callback) {
+    var animate = this.$element.hasClass('fade') ? 'fade' : ''
+
+    if (this.isShown && this.options.backdrop) {
+      var doAnimate = $.support.transition && animate
+
+      this.$backdrop = $('<div class="modal-backdrop ' + animate + '" />')
+        .appendTo(document.body)
+
+      this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {
+        if (e.target !== e.currentTarget) return
+        this.options.backdrop == 'static'
+          ? this.$element[0].focus.call(this.$element[0])
+          : this.hide.call(this)
+      }, this))
+
+      if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
+
+      this.$backdrop.addClass('in')
+
+      if (!callback) return
+
+      doAnimate ?
+        this.$backdrop
+          .one($.support.transition.end, callback)
+          .emulateTransitionEnd(150) :
+        callback()
+
+    } else if (!this.isShown && this.$backdrop) {
+      this.$backdrop.removeClass('in')
+
+      $.support.transition && this.$element.hasClass('fade') ?
+        this.$backdrop
+          .one($.support.transition.end, callback)
+          .emulateTransitionEnd(150) :
+        callback()
+
+    } else if (callback) {
+      callback()
+    }
+  }
+
+
+  // MODAL PLUGIN DEFINITION
+  // =======================
+
+  var old = $.fn.modal
+
+  $.fn.modal = function (option, _relatedTarget) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.modal')
+      var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)
+
+      if (!data) $this.data('bs.modal', (data = new Modal(this, options)))
+      if (typeof option == 'string') data[option](_relatedTarget)
+      else if (options.show) data.show(_relatedTarget)
+    })
+  }
+
+  $.fn.modal.Constructor = Modal
+
+
+  // MODAL NO CONFLICT
+  // =================
+
+  $.fn.modal.noConflict = function () {
+    $.fn.modal = old
+    return this
+  }
+
+
+  // MODAL DATA-API
+  // ==============
+
+  $(document).on('click.bs.modal.data-api', '[data-toggle="modal"]', function (e) {
+    var $this   = $(this)
+    var href    = $this.attr('href')
+    var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\s]+$)/, ''))) //strip for ie7
+    var option  = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())
+
+    if ($this.is('a')) e.preventDefault()
+
+    $target
+      .modal(option, this)
+      .one('hide', function () {
+        $this.is(':visible') && $this.focus()
+      })
+  })
+
+  $(document)
+    .on('show.bs.modal', '.modal', function () { $(document.body).addClass('modal-open') })
+    .on('hidden.bs.modal', '.modal', function () { $(document.body).removeClass('modal-open') })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: tooltip.js v3.1.0
+ * http://getbootstrap.com/javascript/#tooltip
+ * Inspired by the original jQuery.tipsy by Jason Frame
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TOOLTIP PUBLIC CLASS DEFINITION
+  // ===============================
+
+  var Tooltip = function (element, options) {
+    this.type       =
+    this.options    =
+    this.enabled    =
+    this.timeout    =
+    this.hoverState =
+    this.$element   = null
+
+    this.init('tooltip', element, options)
+  }
+
+  Tooltip.DEFAULTS = {
+    animation: true,
+    placement: 'top',
+    selector: false,
+    template: '<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
+    trigger: 'hover focus',
+    title: '',
+    delay: 0,
+    html: false,
+    container: false
+  }
+
+  Tooltip.prototype.init = function (type, element, options) {
+    this.enabled  = true
+    this.type     = type
+    this.$element = $(element)
+    this.options  = this.getOptions(options)
+
+    var triggers = this.options.trigger.split(' ')
+
+    for (var i = triggers.length; i--;) {
+      var trigger = triggers[i]
+
+      if (trigger == 'click') {
+        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
+      } else if (trigger != 'manual') {
+        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'
+        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
+
+        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
+        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
+      }
+    }
+
+    this.options.selector ?
+      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
+      this.fixTitle()
+  }
+
+  Tooltip.prototype.getDefaults = function () {
+    return Tooltip.DEFAULTS
+  }
+
+  Tooltip.prototype.getOptions = function (options) {
+    options = $.extend({}, this.getDefaults(), this.$element.data(), options)
+
+    if (options.delay && typeof options.delay == 'number') {
+      options.delay = {
+        show: options.delay,
+        hide: options.delay
+      }
+    }
+
+    return options
+  }
+
+  Tooltip.prototype.getDelegateOptions = function () {
+    var options  = {}
+    var defaults = this.getDefaults()
+
+    this._options && $.each(this._options, function (key, value) {
+      if (defaults[key] != value) options[key] = value
+    })
+
+    return options
+  }
+
+  Tooltip.prototype.enter = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type)
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'in'
+
+    if (!self.options.delay || !self.options.delay.show) return self.show()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'in') self.show()
+    }, self.options.delay.show)
+  }
+
+  Tooltip.prototype.leave = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type)
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'out'
+
+    if (!self.options.delay || !self.options.delay.hide) return self.hide()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'out') self.hide()
+    }, self.options.delay.hide)
+  }
+
+  Tooltip.prototype.show = function () {
+    var e = $.Event('show.bs.' + this.type)
+
+    if (this.hasContent() && this.enabled) {
+      this.$element.trigger(e)
+
+      if (e.isDefaultPrevented()) return
+      var that = this;
+
+      var $tip = this.tip()
+
+      this.setContent()
+
+      if (this.options.animation) $tip.addClass('fade')
+
+      var placement = typeof this.options.placement == 'function' ?
+        this.options.placement.call(this, $tip[0], this.$element[0]) :
+        this.options.placement
+
+      var autoToken = /\s?auto?\s?/i
+      var autoPlace = autoToken.test(placement)
+      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
+
+      $tip
+        .detach()
+        .css({ top: 0, left: 0, display: 'block' })
+        .addClass(placement)
+
+      this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)
+
+      var pos          = this.getPosition()
+      var actualWidth  = $tip[0].offsetWidth
+      var actualHeight = $tip[0].offsetHeight
+
+      if (autoPlace) {
+        var $parent = this.$element.parent()
+
+        var orgPlacement = placement
+        var docScroll    = document.documentElement.scrollTop || document.body.scrollTop
+        var parentWidth  = this.options.container == 'body' ? window.innerWidth  : $parent.outerWidth()
+        var parentHeight = this.options.container == 'body' ? window.innerHeight : $parent.outerHeight()
+        var parentLeft   = this.options.container == 'body' ? 0 : $parent.offset().left
+
+        placement = placement == 'bottom' && pos.top   + pos.height  + actualHeight - docScroll > parentHeight  ? 'top'    :
+                    placement == 'top'    && pos.top   - docScroll   - actualHeight < 0                         ? 'bottom' :
+                    placement == 'right'  && pos.right + actualWidth > parentWidth                              ? 'left'   :
+                    placement == 'left'   && pos.left  - actualWidth < parentLeft                               ? 'right'  :
+                    placement
+
+        $tip
+          .removeClass(orgPlacement)
+          .addClass(placement)
+      }
+
+      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)
+
+      this.applyPlacement(calculatedOffset, placement)
+      this.hoverState = null
+
+      var complete = function() {
+        that.$element.trigger('shown.bs.' + that.type)
+      }
+
+      $.support.transition && this.$tip.hasClass('fade') ?
+        $tip
+          .one($.support.transition.end, complete)
+          .emulateTransitionEnd(150) :
+        complete()
+    }
+  }
+
+  Tooltip.prototype.applyPlacement = function (offset, placement) {
+    var replace
+    var $tip   = this.tip()
+    var width  = $tip[0].offsetWidth
+    var height = $tip[0].offsetHeight
+
+    // manually read margins because getBoundingClientRect includes difference
+    var marginTop = parseInt($tip.css('margin-top'), 10)
+    var marginLeft = parseInt($tip.css('margin-left'), 10)
+
+    // we must check for NaN for ie 8/9
+    if (isNaN(marginTop))  marginTop  = 0
+    if (isNaN(marginLeft)) marginLeft = 0
+
+    offset.top  = offset.top  + marginTop
+    offset.left = offset.left + marginLeft
+
+    // $.fn.offset doesn't round pixel values
+    // so we use setOffset directly with our own function B-0
+    $.offset.setOffset($tip[0], $.extend({
+      using: function (props) {
+        $tip.css({
+          top: Math.round(props.top),
+          left: Math.round(props.left)
+        })
+      }
+    }, offset), 0)
+
+    $tip.addClass('in')
+
+    // check to see if placing tip in new offset caused the tip to resize itself
+    var actualWidth  = $tip[0].offsetWidth
+    var actualHeight = $tip[0].offsetHeight
+
+    if (placement == 'top' && actualHeight != height) {
+      replace = true
+      offset.top = offset.top + height - actualHeight
+    }
+
+    if (/bottom|top/.test(placement)) {
+      var delta = 0
+
+      if (offset.left < 0) {
+        delta       = offset.left * -2
+        offset.left = 0
+
+        $tip.offset(offset)
+
+        actualWidth  = $tip[0].offsetWidth
+        actualHeight = $tip[0].offsetHeight
+      }
+
+      this.replaceArrow(delta - width + actualWidth, actualWidth, 'left')
+    } else {
+      this.replaceArrow(actualHeight - height, actualHeight, 'top')
+    }
+
+    if (replace) $tip.offset(offset)
+  }
+
+  Tooltip.prototype.replaceArrow = function (delta, dimension, position) {
+    this.arrow().css(position, delta ? (50 * (1 - delta / dimension) + '%') : '')
+  }
+
+  Tooltip.prototype.setContent = function () {
+    var $tip  = this.tip()
+    var title = this.getTitle()
+
+    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
+    $tip.removeClass('fade in top bottom left right')
+  }
+
+  Tooltip.prototype.hide = function () {
+    var that = this
+    var $tip = this.tip()
+    var e    = $.Event('hide.bs.' + this.type)
+
+    function complete() {
+      if (that.hoverState != 'in') $tip.detach()
+      that.$element.trigger('hidden.bs.' + that.type)
+    }
+
+    this.$element.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    $tip.removeClass('in')
+
+    $.support.transition && this.$tip.hasClass('fade') ?
+      $tip
+        .one($.support.transition.end, complete)
+        .emulateTransitionEnd(150) :
+      complete()
+
+    this.hoverState = null
+
+    return this
+  }
+
+  Tooltip.prototype.fixTitle = function () {
+    var $e = this.$element
+    if ($e.attr('title') || typeof($e.attr('data-original-title')) != 'string') {
+      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
+    }
+  }
+
+  Tooltip.prototype.hasContent = function () {
+    return this.getTitle()
+  }
+
+  Tooltip.prototype.getPosition = function () {
+    var el = this.$element[0]
+    return $.extend({}, (typeof el.getBoundingClientRect == 'function') ? el.getBoundingClientRect() : {
+      width: el.offsetWidth,
+      height: el.offsetHeight
+    }, this.$element.offset())
+  }
+
+  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {
+    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2  } :
+           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2  } :
+           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :
+        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width   }
+  }
+
+  Tooltip.prototype.getTitle = function () {
+    var title
+    var $e = this.$element
+    var o  = this.options
+
+    title = $e.attr('data-original-title')
+      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
+
+    return title
+  }
+
+  Tooltip.prototype.tip = function () {
+    return this.$tip = this.$tip || $(this.options.template)
+  }
+
+  Tooltip.prototype.arrow = function () {
+    return this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow')
+  }
+
+  Tooltip.prototype.validate = function () {
+    if (!this.$element[0].parentNode) {
+      this.hide()
+      this.$element = null
+      this.options  = null
+    }
+  }
+
+  Tooltip.prototype.enable = function () {
+    this.enabled = true
+  }
+
+  Tooltip.prototype.disable = function () {
+    this.enabled = false
+  }
+
+  Tooltip.prototype.toggleEnabled = function () {
+    this.enabled = !this.enabled
+  }
+
+  Tooltip.prototype.toggle = function (e) {
+    var self = e ? $(e.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type) : this
+    self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
+  }
+
+  Tooltip.prototype.destroy = function () {
+    clearTimeout(this.timeout)
+    this.hide().$element.off('.' + this.type).removeData('bs.' + this.type)
+  }
+
+
+  // TOOLTIP PLUGIN DEFINITION
+  // =========================
+
+  var old = $.fn.tooltip
+
+  $.fn.tooltip = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.tooltip')
+      var options = typeof option == 'object' && option
+
+      if (!data && option == 'destroy') return
+      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.tooltip.Constructor = Tooltip
+
+
+  // TOOLTIP NO CONFLICT
+  // ===================
+
+  $.fn.tooltip.noConflict = function () {
+    $.fn.tooltip = old
+    return this
+  }
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: popover.js v3.1.0
+ * http://getbootstrap.com/javascript/#popovers
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // POPOVER PUBLIC CLASS DEFINITION
+  // ===============================
+
+  var Popover = function (element, options) {
+    this.init('popover', element, options)
+  }
+
+  if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')
+
+  Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {
+    placement: 'right',
+    trigger: 'click',
+    content: '',
+    template: '<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
+  })
+
+
+  // NOTE: POPOVER EXTENDS tooltip.js
+  // ================================
+
+  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)
+
+  Popover.prototype.constructor = Popover
+
+  Popover.prototype.getDefaults = function () {
+    return Popover.DEFAULTS
+  }
+
+  Popover.prototype.setContent = function () {
+    var $tip    = this.tip()
+    var title   = this.getTitle()
+    var content = this.getContent()
+
+    $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)
+    $tip.find('.popover-content')[ // we use append for html objects to maintain js events
+      this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text'
+    ](content)
+
+    $tip.removeClass('fade top bottom left right in')
+
+    // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do
+    // this manually by checking the contents.
+    if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()
+  }
+
+  Popover.prototype.hasContent = function () {
+    return this.getTitle() || this.getContent()
+  }
+
+  Popover.prototype.getContent = function () {
+    var $e = this.$element
+    var o  = this.options
+
+    return $e.attr('data-content')
+      || (typeof o.content == 'function' ?
+            o.content.call($e[0]) :
+            o.content)
+  }
+
+  Popover.prototype.arrow = function () {
+    return this.$arrow = this.$arrow || this.tip().find('.arrow')
+  }
+
+  Popover.prototype.tip = function () {
+    if (!this.$tip) this.$tip = $(this.options.template)
+    return this.$tip
+  }
+
+
+  // POPOVER PLUGIN DEFINITION
+  // =========================
+
+  var old = $.fn.popover
+
+  $.fn.popover = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.popover')
+      var options = typeof option == 'object' && option
+
+      if (!data && option == 'destroy') return
+      if (!data) $this.data('bs.popover', (data = new Popover(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.popover.Constructor = Popover
+
+
+  // POPOVER NO CONFLICT
+  // ===================
+
+  $.fn.popover.noConflict = function () {
+    $.fn.popover = old
+    return this
+  }
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: scrollspy.js v3.1.0
+ * http://getbootstrap.com/javascript/#scrollspy
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // SCROLLSPY CLASS DEFINITION
+  // ==========================
+
+  function ScrollSpy(element, options) {
+    var href
+    var process  = $.proxy(this.process, this)
+
+    this.$element       = $(element).is('body') ? $(window) : $(element)
+    this.$body          = $('body')
+    this.$scrollElement = this.$element.on('scroll.bs.scroll-spy.data-api', process)
+    this.options        = $.extend({}, ScrollSpy.DEFAULTS, options)
+    this.selector       = (this.options.target
+      || ((href = $(element).attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
+      || '') + ' .nav li > a'
+    this.offsets        = $([])
+    this.targets        = $([])
+    this.activeTarget   = null
+
+    this.refresh()
+    this.process()
+  }
+
+  ScrollSpy.DEFAULTS = {
+    offset: 10
+  }
+
+  ScrollSpy.prototype.refresh = function () {
+    var offsetMethod = this.$element[0] == window ? 'offset' : 'position'
+
+    this.offsets = $([])
+    this.targets = $([])
+
+    var self     = this
+    var $targets = this.$body
+      .find(this.selector)
+      .map(function () {
+        var $el   = $(this)
+        var href  = $el.data('target') || $el.attr('href')
+        var $href = /^#./.test(href) && $(href)
+
+        return ($href
+          && $href.length
+          && $href.is(':visible')
+          && [[ $href[offsetMethod]().top + (!$.isWindow(self.$scrollElement.get(0)) && self.$scrollElement.scrollTop()), href ]]) || null
+      })
+      .sort(function (a, b) { return a[0] - b[0] })
+      .each(function () {
+        self.offsets.push(this[0])
+        self.targets.push(this[1])
+      })
+  }
+
+  ScrollSpy.prototype.process = function () {
+    var scrollTop    = this.$scrollElement.scrollTop() + this.options.offset
+    var scrollHeight = this.$scrollElement[0].scrollHeight || this.$body[0].scrollHeight
+    var maxScroll    = scrollHeight - this.$scrollElement.height()
+    var offsets      = this.offsets
+    var targets      = this.targets
+    var activeTarget = this.activeTarget
+    var i
+
+    if (scrollTop >= maxScroll) {
+      return activeTarget != (i = targets.last()[0]) && this.activate(i)
+    }
+
+    if (activeTarget && scrollTop <= offsets[0]) {
+      return activeTarget != (i = targets[0]) && this.activate(i)
+    }
+
+    for (i = offsets.length; i--;) {
+      activeTarget != targets[i]
+        && scrollTop >= offsets[i]
+        && (!offsets[i + 1] || scrollTop <= offsets[i + 1])
+        && this.activate( targets[i] )
+    }
+  }
+
+  ScrollSpy.prototype.activate = function (target) {
+    this.activeTarget = target
+
+    $(this.selector)
+      .parentsUntil(this.options.target, '.active')
+      .removeClass('active')
+
+    var selector = this.selector +
+        '[data-target="' + target + '"],' +
+        this.selector + '[href="' + target + '"]'
+
+    var active = $(selector)
+      .parents('li')
+      .addClass('active')
+
+    if (active.parent('.dropdown-menu').length) {
+      active = active
+        .closest('li.dropdown')
+        .addClass('active')
+    }
+
+    active.trigger('activate.bs.scrollspy')
+  }
+
+
+  // SCROLLSPY PLUGIN DEFINITION
+  // ===========================
+
+  var old = $.fn.scrollspy
+
+  $.fn.scrollspy = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.scrollspy')
+      var options = typeof option == 'object' && option
+
+      if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.scrollspy.Constructor = ScrollSpy
+
+
+  // SCROLLSPY NO CONFLICT
+  // =====================
+
+  $.fn.scrollspy.noConflict = function () {
+    $.fn.scrollspy = old
+    return this
+  }
+
+
+  // SCROLLSPY DATA-API
+  // ==================
+
+  $(window).on('load', function () {
+    $('[data-spy="scroll"]').each(function () {
+      var $spy = $(this)
+      $spy.scrollspy($spy.data())
+    })
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: tab.js v3.1.0
+ * http://getbootstrap.com/javascript/#tabs
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TAB CLASS DEFINITION
+  // ====================
+
+  var Tab = function (element) {
+    this.element = $(element)
+  }
+
+  Tab.prototype.show = function () {
+    var $this    = this.element
+    var $ul      = $this.closest('ul:not(.dropdown-menu)')
+    var selector = $this.data('target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
+    }
+
+    if ($this.parent('li').hasClass('active')) return
+
+    var previous = $ul.find('.active:last a')[0]
+    var e        = $.Event('show.bs.tab', {
+      relatedTarget: previous
+    })
+
+    $this.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    var $target = $(selector)
+
+    this.activate($this.parent('li'), $ul)
+    this.activate($target, $target.parent(), function () {
+      $this.trigger({
+        type: 'shown.bs.tab',
+        relatedTarget: previous
+      })
+    })
+  }
+
+  Tab.prototype.activate = function (element, container, callback) {
+    var $active    = container.find('> .active')
+    var transition = callback
+      && $.support.transition
+      && $active.hasClass('fade')
+
+    function next() {
+      $active
+        .removeClass('active')
+        .find('> .dropdown-menu > .active')
+        .removeClass('active')
+
+      element.addClass('active')
+
+      if (transition) {
+        element[0].offsetWidth // reflow for transition
+        element.addClass('in')
+      } else {
+        element.removeClass('fade')
+      }
+
+      if (element.parent('.dropdown-menu')) {
+        element.closest('li.dropdown').addClass('active')
+      }
+
+      callback && callback()
+    }
+
+    transition ?
+      $active
+        .one($.support.transition.end, next)
+        .emulateTransitionEnd(150) :
+      next()
+
+    $active.removeClass('in')
+  }
+
+
+  // TAB PLUGIN DEFINITION
+  // =====================
+
+  var old = $.fn.tab
+
+  $.fn.tab = function ( option ) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.tab')
+
+      if (!data) $this.data('bs.tab', (data = new Tab(this)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.tab.Constructor = Tab
+
+
+  // TAB NO CONFLICT
+  // ===============
+
+  $.fn.tab.noConflict = function () {
+    $.fn.tab = old
+    return this
+  }
+
+
+  // TAB DATA-API
+  // ============
+
+  $(document).on('click.bs.tab.data-api', '[data-toggle="tab"], [data-toggle="pill"]', function (e) {
+    e.preventDefault()
+    $(this).tab('show')
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: affix.js v3.1.0
+ * http://getbootstrap.com/javascript/#affix
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // AFFIX CLASS DEFINITION
+  // ======================
+
+  var Affix = function (element, options) {
+    this.options = $.extend({}, Affix.DEFAULTS, options)
+    this.$window = $(window)
+      .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))
+      .on('click.bs.affix.data-api',  $.proxy(this.checkPositionWithEventLoop, this))
+
+    this.$element     = $(element)
+    this.affixed      =
+    this.unpin        =
+    this.pinnedOffset = null
+
+    this.checkPosition()
+  }
+
+  Affix.RESET = 'affix affix-top affix-bottom'
+
+  Affix.DEFAULTS = {
+    offset: 0
+  }
+
+  Affix.prototype.getPinnedOffset = function () {
+    if (this.pinnedOffset) return this.pinnedOffset
+    this.$element.removeClass(Affix.RESET).addClass('affix')
+    var scrollTop = this.$window.scrollTop()
+    var position  = this.$element.offset()
+    return (this.pinnedOffset = position.top - scrollTop)
+  }
+
+  Affix.prototype.checkPositionWithEventLoop = function () {
+    setTimeout($.proxy(this.checkPosition, this), 1)
+  }
+
+  Affix.prototype.checkPosition = function () {
+    if (!this.$element.is(':visible')) return
+
+    var scrollHeight = $(document).height()
+    var scrollTop    = this.$window.scrollTop()
+    var position     = this.$element.offset()
+    var offset       = this.options.offset
+    var offsetTop    = offset.top
+    var offsetBottom = offset.bottom
+
+    if (this.affixed == 'top') position.top += scrollTop
+
+    if (typeof offset != 'object')         offsetBottom = offsetTop = offset
+    if (typeof offsetTop == 'function')    offsetTop    = offset.top(this.$element)
+    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)
+
+    var affix = this.unpin   != null && (scrollTop + this.unpin <= position.top) ? false :
+                offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ? 'bottom' :
+                offsetTop    != null && (scrollTop <= offsetTop) ? 'top' : false
+
+    if (this.affixed === affix) return
+    if (this.unpin) this.$element.css('top', '')
+
+    var affixType = 'affix' + (affix ? '-' + affix : '')
+    var e         = $.Event(affixType + '.bs.affix')
+
+    this.$element.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    this.affixed = affix
+    this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null
+
+    this.$element
+      .removeClass(Affix.RESET)
+      .addClass(affixType)
+      .trigger($.Event(affixType.replace('affix', 'affixed')))
+
+    if (affix == 'bottom') {
+      this.$element.offset({ top: scrollHeight - offsetBottom - this.$element.height() })
+    }
+  }
+
+
+  // AFFIX PLUGIN DEFINITION
+  // =======================
+
+  var old = $.fn.affix
+
+  $.fn.affix = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.affix')
+      var options = typeof option == 'object' && option
+
+      if (!data) $this.data('bs.affix', (data = new Affix(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.affix.Constructor = Affix
+
+
+  // AFFIX NO CONFLICT
+  // =================
+
+  $.fn.affix.noConflict = function () {
+    $.fn.affix = old
+    return this
+  }
+
+
+  // AFFIX DATA-API
+  // ==============
+
+  $(window).on('load', function () {
+    $('[data-spy="affix"]').each(function () {
+      var $spy = $(this)
+      var data = $spy.data()
+
+      data.offset = data.offset || {}
+
+      if (data.offsetBottom) data.offset.bottom = data.offsetBottom
+      if (data.offsetTop)    data.offset.top    = data.offsetTop
+
+      $spy.affix(data)
+    })
+  })
+
+}(jQuery);
diff --git a/doc/js/bootstrap.min.js b/doc/js/bootstrap.min.js
new file mode 100644 (file)
index 0000000..1d4a4ed
--- /dev/null
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap requires jQuery");+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one(a.support.transition.end,function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b()})}(jQuery),+function(a){"use strict";var b='[data-dismiss="alert"]',c=function(c){a(c).on("click",b,this.close)};c.prototype.close=function(b){function c(){f.trigger("closed.bs.alert").remove()}var d=a(this),e=d.attr("data-target");e||(e=d.attr("href"),e=e&&e.replace(/.*(?=#[^\s]*$)/,""));var f=a(e);b&&b.preventDefault(),f.length||(f=d.hasClass("alert")?d:d.parent()),f.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one(a.support.transition.end,c).emulateTransitionEnd(150):c())};var d=a.fn.alert;a.fn.alert=function(b){return this.each(function(){var d=a(this),e=d.data("bs.alert");e||d.data("bs.alert",e=new c(this)),"string"==typeof b&&e[b].call(d)})},a.fn.alert.Constructor=c,a.fn.alert.noConflict=function(){return a.fn.alert=d,this},a(document).on("click.bs.alert.data-api",b,c.prototype.close)}(jQuery),+function(a){"use strict";var b=function(c,d){this.$element=a(c),this.options=a.extend({},b.DEFAULTS,d),this.isLoading=!1};b.DEFAULTS={loadingText:"loading..."},b.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",f.resetText||d.data("resetText",d[e]()),d[e](f[b]||this.options[b]),setTimeout(a.proxy(function(){"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},b.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$element.hasClass("active")).trigger("change")}a&&this.$element.toggleClass("active")};var c=a.fn.button;a.fn.button=function(c){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof c&&c;e||d.data("bs.button",e=new b(this,f)),"toggle"==c?e.toggle():c&&e.setState(c)})},a.fn.button.Constructor=b,a.fn.button.noConflict=function(){return a.fn.button=c,this},a(document).on("click.bs.button.data-api","[data-toggle^=button]",function(b){var c=a(b.target);c.hasClass("btn")||(c=c.closest(".btn")),c.button("toggle"),b.preventDefault()})}(jQuery),+function(a){"use strict";var b=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,"hover"==this.options.pause&&this.$element.on("mouseenter",a.proxy(this.pause,this)).on("mouseleave",a.proxy(this.cycle,this))};b.DEFAULTS={interval:5e3,pause:"hover",wrap:!0},b.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},b.prototype.getActiveIndex=function(){return this.$active=this.$element.find(".item.active"),this.$items=this.$active.parent().children(),this.$items.index(this.$active)},b.prototype.to=function(b){var c=this,d=this.getActiveIndex();return b>this.$items.length-1||0>b?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){c.to(b)}):d==b?this.pause().cycle():this.slide(b>d?"next":"prev",a(this.$items[b]))},b.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},b.prototype.next=function(){return this.sliding?void 0:this.slide("next")},b.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},b.prototype.slide=function(b,c){var d=this.$element.find(".item.active"),e=c||d[b](),f=this.interval,g="next"==b?"left":"right",h="next"==b?"first":"last",i=this;if(!e.length){if(!this.options.wrap)return;e=this.$element.find(".item")[h]()}if(e.hasClass("active"))return this.sliding=!1;var j=a.Event("slide.bs.carousel",{relatedTarget:e[0],direction:g});return this.$element.trigger(j),j.isDefaultPrevented()?void 0:(this.sliding=!0,f&&this.pause(),this.$indicators.length&&(this.$indicators.find(".active").removeClass("active"),this.$element.one("slid.bs.carousel",function(){var b=a(i.$indicators.children()[i.getActiveIndex()]);b&&b.addClass("active")})),a.support.transition&&this.$element.hasClass("slide")?(e.addClass(b),e[0].offsetWidth,d.addClass(g),e.addClass(g),d.one(a.support.transition.end,function(){e.removeClass([b,g].join(" ")).addClass("active"),d.removeClass(["active",g].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger("slid.bs.carousel")},0)}).emulateTransitionEnd(1e3*d.css("transition-duration").slice(0,-1))):(d.removeClass("active"),e.addClass("active"),this.sliding=!1,this.$element.trigger("slid.bs.carousel")),f&&this.cycle(),this)};var c=a.fn.carousel;a.fn.carousel=function(c){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},b.DEFAULTS,d.data(),"object"==typeof c&&c),g="string"==typeof c?c:f.slide;e||d.data("bs.carousel",e=new b(this,f)),"number"==typeof c?e.to(c):g?e[g]():f.interval&&e.pause().cycle()})},a.fn.carousel.Constructor=b,a.fn.carousel.noConflict=function(){return a.fn.carousel=c,this},a(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",function(b){var c,d=a(this),e=a(d.attr("data-target")||(c=d.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"")),f=a.extend({},e.data(),d.data()),g=d.attr("data-slide-to");g&&(f.interval=!1),e.carousel(f),(g=d.attr("data-slide-to"))&&e.data("bs.carousel").to(g),b.preventDefault()}),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var b=a(this);b.carousel(b.data())})})}(jQuery),+function(a){"use strict";var b=function(c,d){this.$element=a(c),this.options=a.extend({},b.DEFAULTS,d),this.transitioning=null,this.options.parent&&(this.$parent=a(this.options.parent)),this.options.toggle&&this.toggle()};b.DEFAULTS={toggle:!0},b.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},b.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b=a.Event("show.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.$parent&&this.$parent.find("> .panel > .in");if(c&&c.length){var d=c.data("bs.collapse");if(d&&d.transitioning)return;c.collapse("hide"),d||c.data("bs.collapse",null)}var e=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[e](0),this.transitioning=1;var f=function(){this.$element.removeClass("collapsing").addClass("collapse in")[e]("auto"),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return f.call(this);var g=a.camelCase(["scroll",e].join("-"));this.$element.one(a.support.transition.end,a.proxy(f,this)).emulateTransitionEnd(350)[e](this.$element[0][g])}}},b.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse").removeClass("in"),this.transitioning=1;var d=function(){this.transitioning=0,this.$element.trigger("hidden.bs.collapse").removeClass("collapsing").addClass("collapse")};return a.support.transition?void this.$element[c](0).one(a.support.transition.end,a.proxy(d,this)).emulateTransitionEnd(350):d.call(this)}}},b.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()};var c=a.fn.collapse;a.fn.collapse=function(c){return this.each(function(){var d=a(this),e=d.data("bs.collapse"),f=a.extend({},b.DEFAULTS,d.data(),"object"==typeof c&&c);!e&&f.toggle&&"show"==c&&(c=!c),e||d.data("bs.collapse",e=new b(this,f)),"string"==typeof c&&e[c]()})},a.fn.collapse.Constructor=b,a.fn.collapse.noConflict=function(){return a.fn.collapse=c,this},a(document).on("click.bs.collapse.data-api","[data-toggle=collapse]",function(b){var c,d=a(this),e=d.attr("data-target")||b.preventDefault()||(c=d.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,""),f=a(e),g=f.data("bs.collapse"),h=g?"toggle":d.data(),i=d.attr("data-parent"),j=i&&a(i);g&&g.transitioning||(j&&j.find('[data-toggle=collapse][data-parent="'+i+'"]').not(d).addClass("collapsed"),d[f.hasClass("in")?"addClass":"removeClass"]("collapsed")),f.collapse(h)})}(jQuery),+function(a){"use strict";function b(b){a(d).remove(),a(e).each(function(){var d=c(a(this)),e={relatedTarget:this};d.hasClass("open")&&(d.trigger(b=a.Event("hide.bs.dropdown",e)),b.isDefaultPrevented()||d.removeClass("open").trigger("hidden.bs.dropdown",e))})}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}var d=".dropdown-backdrop",e="[data-toggle=dropdown]",f=function(b){a(b).on("click.bs.dropdown",this.toggle)};f.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a('<div class="dropdown-backdrop"/>').insertAfter(a(this)).on("click",b);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;f.toggleClass("open").trigger("shown.bs.dropdown",h),e.focus()}return!1}},f.prototype.keydown=function(b){if(/(38|40|27)/.test(b.keyCode)){var d=a(this);if(b.preventDefault(),b.stopPropagation(),!d.is(".disabled, :disabled")){var f=c(d),g=f.hasClass("open");if(!g||g&&27==b.keyCode)return 27==b.which&&f.find(e).focus(),d.click();var h=" li:not(.divider):visible a",i=f.find("[role=menu]"+h+", [role=listbox]"+h);if(i.length){var j=i.index(i.filter(":focus"));38==b.keyCode&&j>0&&j--,40==b.keyCode&&j<i.length-1&&j++,~j||(j=0),i.eq(j).focus()}}}};var g=a.fn.dropdown;a.fn.dropdown=function(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new f(this)),"string"==typeof b&&d[b].call(c)})},a.fn.dropdown.Constructor=f,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=g,this},a(document).on("click.bs.dropdown.data-api",b).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",e,f.prototype.toggle).on("keydown.bs.dropdown.data-api",e+", [role=menu], [role=listbox]",f.prototype.keydown)}(jQuery),+function(a){"use strict";var b=function(b,c){this.options=c,this.$element=a(b),this.$backdrop=this.isShown=null,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};b.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},b.prototype.toggle=function(a){return this[this.isShown?"hide":"show"](a)},b.prototype.show=function(b){var c=this,d=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(d),this.isShown||d.isDefaultPrevented()||(this.isShown=!0,this.escape(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.backdrop(function(){var d=a.support.transition&&c.$element.hasClass("fade");c.$element.parent().length||c.$element.appendTo(document.body),c.$element.show().scrollTop(0),d&&c.$element[0].offsetWidth,c.$element.addClass("in").attr("aria-hidden",!1),c.enforceFocus();var e=a.Event("shown.bs.modal",{relatedTarget:b});d?c.$element.find(".modal-dialog").one(a.support.transition.end,function(){c.$element.focus().trigger(e)}).emulateTransitionEnd(300):c.$element.focus().trigger(e)}))},b.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").attr("aria-hidden",!0).off("click.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one(a.support.transition.end,a.proxy(this.hideModal,this)).emulateTransitionEnd(300):this.hideModal())},b.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.focus()},this))},b.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keyup.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keyup.dismiss.bs.modal")},b.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.removeBackdrop(),a.$element.trigger("hidden.bs.modal")})},b.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},b.prototype.backdrop=function(b){var c=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var d=a.support.transition&&c;if(this.$backdrop=a('<div class="modal-backdrop '+c+'" />').appendTo(document.body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus.call(this.$element[0]):this.hide.call(this))},this)),d&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;d?this.$backdrop.one(a.support.transition.end,b).emulateTransitionEnd(150):b()}else!this.isShown&&this.$backdrop?(this.$backdrop.removeClass("in"),a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one(a.support.transition.end,b).emulateTransitionEnd(150):b()):b&&b()};var c=a.fn.modal;a.fn.modal=function(c,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},b.DEFAULTS,e.data(),"object"==typeof c&&c);f||e.data("bs.modal",f=new b(this,g)),"string"==typeof c?f[c](d):g.show&&f.show(d)})},a.fn.modal.Constructor=b,a.fn.modal.noConflict=function(){return a.fn.modal=c,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(b){var c=a(this),d=c.attr("href"),e=a(c.attr("data-target")||d&&d.replace(/.*(?=#[^\s]+$)/,"")),f=e.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(d)&&d},e.data(),c.data());c.is("a")&&b.preventDefault(),e.modal(f,this).one("hide",function(){c.is(":visible")&&c.focus()})}),a(document).on("show.bs.modal",".modal",function(){a(document.body).addClass("modal-open")}).on("hidden.bs.modal",".modal",function(){a(document.body).removeClass("modal-open")})}(jQuery),+function(a){"use strict";var b=function(a,b){this.type=this.options=this.enabled=this.timeout=this.hoverState=this.$element=null,this.init("tooltip",a,b)};b.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1},b.prototype.init=function(b,c,d){this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d);for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},b.prototype.getDefaults=function(){return b.DEFAULTS},b.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},b.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},b.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget)[this.type](this.getDelegateOptions()).data("bs."+this.type);return clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show()},b.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget)[this.type](this.getDelegateOptions()).data("bs."+this.type);return clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},b.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){if(this.$element.trigger(b),b.isDefaultPrevented())return;var c=this,d=this.tip();this.setContent(),this.options.animation&&d.addClass("fade");var e="function"==typeof this.options.placement?this.options.placement.call(this,d[0],this.$element[0]):this.options.placement,f=/\s?auto?\s?/i,g=f.test(e);g&&(e=e.replace(f,"")||"top"),d.detach().css({top:0,left:0,display:"block"}).addClass(e),this.options.container?d.appendTo(this.options.container):d.insertAfter(this.$element);var h=this.getPosition(),i=d[0].offsetWidth,j=d[0].offsetHeight;if(g){var k=this.$element.parent(),l=e,m=document.documentElement.scrollTop||document.body.scrollTop,n="body"==this.options.container?window.innerWidth:k.outerWidth(),o="body"==this.options.container?window.innerHeight:k.outerHeight(),p="body"==this.options.container?0:k.offset().left;e="bottom"==e&&h.top+h.height+j-m>o?"top":"top"==e&&h.top-m-j<0?"bottom":"right"==e&&h.right+i>n?"left":"left"==e&&h.left-i<p?"right":e,d.removeClass(l).addClass(e)}var q=this.getCalculatedOffset(e,h,i,j);this.applyPlacement(q,e),this.hoverState=null;var r=function(){c.$element.trigger("shown.bs."+c.type)};a.support.transition&&this.$tip.hasClass("fade")?d.one(a.support.transition.end,r).emulateTransitionEnd(150):r()}},b.prototype.applyPlacement=function(b,c){var d,e=this.tip(),f=e[0].offsetWidth,g=e[0].offsetHeight,h=parseInt(e.css("margin-top"),10),i=parseInt(e.css("margin-left"),10);isNaN(h)&&(h=0),isNaN(i)&&(i=0),b.top=b.top+h,b.left=b.left+i,a.offset.setOffset(e[0],a.extend({using:function(a){e.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),e.addClass("in");var j=e[0].offsetWidth,k=e[0].offsetHeight;if("top"==c&&k!=g&&(d=!0,b.top=b.top+g-k),/bottom|top/.test(c)){var l=0;b.left<0&&(l=-2*b.left,b.left=0,e.offset(b),j=e[0].offsetWidth,k=e[0].offsetHeight),this.replaceArrow(l-f+j,j,"left")}else this.replaceArrow(k-g,k,"top");d&&e.offset(b)},b.prototype.replaceArrow=function(a,b,c){this.arrow().css(c,a?50*(1-a/b)+"%":"")},b.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},b.prototype.hide=function(){function b(){"in"!=c.hoverState&&d.detach(),c.$element.trigger("hidden.bs."+c.type)}var c=this,d=this.tip(),e=a.Event("hide.bs."+this.type);return this.$element.trigger(e),e.isDefaultPrevented()?void 0:(d.removeClass("in"),a.support.transition&&this.$tip.hasClass("fade")?d.one(a.support.transition.end,b).emulateTransitionEnd(150):b(),this.hoverState=null,this)},b.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},b.prototype.hasContent=function(){return this.getTitle()},b.prototype.getPosition=function(){var b=this.$element[0];return a.extend({},"function"==typeof b.getBoundingClientRect?b.getBoundingClientRect():{width:b.offsetWidth,height:b.offsetHeight},this.$element.offset())},b.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},b.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},b.prototype.tip=function(){return this.$tip=this.$tip||a(this.options.template)},b.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},b.prototype.validate=function(){this.$element[0].parentNode||(this.hide(),this.$element=null,this.options=null)},b.prototype.enable=function(){this.enabled=!0},b.prototype.disable=function(){this.enabled=!1},b.prototype.toggleEnabled=function(){this.enabled=!this.enabled},b.prototype.toggle=function(b){var c=b?a(b.currentTarget)[this.type](this.getDelegateOptions()).data("bs."+this.type):this;c.tip().hasClass("in")?c.leave(c):c.enter(c)},b.prototype.destroy=function(){clearTimeout(this.timeout),this.hide().$element.off("."+this.type).removeData("bs."+this.type)};var c=a.fn.tooltip;a.fn.tooltip=function(c){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof c&&c;(e||"destroy"!=c)&&(e||d.data("bs.tooltip",e=new b(this,f)),"string"==typeof c&&e[c]())})},a.fn.tooltip.Constructor=b,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=c,this}}(jQuery),+function(a){"use strict";var b=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");b.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),b.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),b.prototype.constructor=b,b.prototype.getDefaults=function(){return b.DEFAULTS},b.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content")[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},b.prototype.hasContent=function(){return this.getTitle()||this.getContent()},b.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},b.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")},b.prototype.tip=function(){return this.$tip||(this.$tip=a(this.options.template)),this.$tip};var c=a.fn.popover;a.fn.popover=function(c){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof c&&c;(e||"destroy"!=c)&&(e||d.data("bs.popover",e=new b(this,f)),"string"==typeof c&&e[c]())})},a.fn.popover.Constructor=b,a.fn.popover.noConflict=function(){return a.fn.popover=c,this}}(jQuery),+function(a){"use strict";function b(c,d){var e,f=a.proxy(this.process,this);this.$element=a(a(c).is("body")?window:c),this.$body=a("body"),this.$scrollElement=this.$element.on("scroll.bs.scroll-spy.data-api",f),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||(e=a(c).attr("href"))&&e.replace(/.*(?=#[^\s]+$)/,"")||"")+" .nav li > a",this.offsets=a([]),this.targets=a([]),this.activeTarget=null,this.refresh(),this.process()}b.DEFAULTS={offset:10},b.prototype.refresh=function(){var b=this.$element[0]==window?"offset":"position";this.offsets=a([]),this.targets=a([]);{var c=this;this.$body.find(this.selector).map(function(){var d=a(this),e=d.data("target")||d.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[b]().top+(!a.isWindow(c.$scrollElement.get(0))&&c.$scrollElement.scrollTop()),e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){c.offsets.push(this[0]),c.targets.push(this[1])})}},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.$scrollElement[0].scrollHeight||this.$body[0].scrollHeight,d=c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(b>=d)return g!=(a=f.last()[0])&&this.activate(a);if(g&&b<=e[0])return g!=(a=f[0])&&this.activate(a);for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(!e[a+1]||b<=e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,a(this.selector).parentsUntil(this.options.target,".active").removeClass("active");var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")};var c=a.fn.scrollspy;a.fn.scrollspy=function(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})},a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=c,this},a(window).on("load",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);b.scrollspy(b.data())})})}(jQuery),+function(a){"use strict";var b=function(b){this.element=a(b)};b.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a")[0],f=a.Event("show.bs.tab",{relatedTarget:e});if(b.trigger(f),!f.isDefaultPrevented()){var g=a(d);this.activate(b.parent("li"),c),this.activate(g,g.parent(),function(){b.trigger({type:"shown.bs.tab",relatedTarget:e})})}}},b.prototype.activate=function(b,c,d){function e(){f.removeClass("active").find("> .dropdown-menu > .active").removeClass("active"),b.addClass("active"),g?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu")&&b.closest("li.dropdown").addClass("active"),d&&d()}var f=c.find("> .active"),g=d&&a.support.transition&&f.hasClass("fade");g?f.one(a.support.transition.end,e).emulateTransitionEnd(150):e(),f.removeClass("in")};var c=a.fn.tab;a.fn.tab=function(c){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new b(this)),"string"==typeof c&&e[c]()})},a.fn.tab.Constructor=b,a.fn.tab.noConflict=function(){return a.fn.tab=c,this},a(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"]',function(b){b.preventDefault(),a(this).tab("show")})}(jQuery),+function(a){"use strict";var b=function(c,d){this.options=a.extend({},b.DEFAULTS,d),this.$window=a(window).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(c),this.affixed=this.unpin=this.pinnedOffset=null,this.checkPosition()};b.RESET="affix affix-top affix-bottom",b.DEFAULTS={offset:0},b.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(b.RESET).addClass("affix");var a=this.$window.scrollTop(),c=this.$element.offset();return this.pinnedOffset=c.top-a},b.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},b.prototype.checkPosition=function(){if(this.$element.is(":visible")){var c=a(document).height(),d=this.$window.scrollTop(),e=this.$element.offset(),f=this.options.offset,g=f.top,h=f.bottom;"top"==this.affixed&&(e.top+=d),"object"!=typeof f&&(h=g=f),"function"==typeof g&&(g=f.top(this.$element)),"function"==typeof h&&(h=f.bottom(this.$element));var i=null!=this.unpin&&d+this.unpin<=e.top?!1:null!=h&&e.top+this.$element.height()>=c-h?"bottom":null!=g&&g>=d?"top":!1;if(this.affixed!==i){this.unpin&&this.$element.css("top","");var j="affix"+(i?"-"+i:""),k=a.Event(j+".bs.affix");this.$element.trigger(k),k.isDefaultPrevented()||(this.affixed=i,this.unpin="bottom"==i?this.getPinnedOffset():null,this.$element.removeClass(b.RESET).addClass(j).trigger(a.Event(j.replace("affix","affixed"))),"bottom"==i&&this.$element.offset({top:c-h-this.$element.height()}))}}};var c=a.fn.affix;a.fn.affix=function(c){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof c&&c;e||d.data("bs.affix",e=new b(this,f)),"string"==typeof c&&e[c]()})},a.fn.affix.Constructor=b,a.fn.affix.noConflict=function(){return a.fn.affix=c,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var b=a(this),c=b.data();c.offset=c.offset||{},c.offsetBottom&&(c.offset.bottom=c.offsetBottom),c.offsetTop&&(c.offset.top=c.offsetTop),b.affix(c)})})}(jQuery);
\ No newline at end of file
diff --git a/doc/js/jquery.min.js b/doc/js/jquery.min.js
new file mode 100644 (file)
index 0000000..006e953
--- /dev/null
@@ -0,0 +1,5 @@
+/*! jQuery v1.9.1 | (c) 2005, 2012 jQuery Foundation, Inc. | jquery.org/license
+//@ sourceMappingURL=jquery.min.map
+*/(function(e,t){var n,r,i=typeof t,o=e.document,a=e.location,s=e.jQuery,u=e.$,l={},c=[],p="1.9.1",f=c.concat,d=c.push,h=c.slice,g=c.indexOf,m=l.toString,y=l.hasOwnProperty,v=p.trim,b=function(e,t){return new b.fn.init(e,t,r)},x=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,w=/\S+/g,T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,k=/^[\],:{}\s]*$/,E=/(?:^|:|,)(?:\s*\[)+/g,S=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,A=/"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g,j=/^-ms-/,D=/-([\da-z])/gi,L=function(e,t){return t.toUpperCase()},H=function(e){(o.addEventListener||"load"===e.type||"complete"===o.readyState)&&(q(),b.ready())},q=function(){o.addEventListener?(o.removeEventListener("DOMContentLoaded",H,!1),e.removeEventListener("load",H,!1)):(o.detachEvent("onreadystatechange",H),e.detachEvent("onload",H))};b.fn=b.prototype={jquery:p,constructor:b,init:function(e,n,r){var i,a;if(!e)return this;if("string"==typeof e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:N.exec(e),!i||!i[1]&&n)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n instanceof b?n[0]:n,b.merge(this,b.parseHTML(i[1],n&&n.nodeType?n.ownerDocument||n:o,!0)),C.test(i[1])&&b.isPlainObject(n))for(i in n)b.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return this}if(a=o.getElementById(i[2]),a&&a.parentNode){if(a.id!==i[2])return r.find(e);this.length=1,this[0]=a}return this.context=o,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):b.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),b.makeArray(e,this))},selector:"",length:0,size:function(){return this.length},toArray:function(){return h.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=b.merge(this.constructor(),e);return t.prevObject=this,t.context=this.context,t},each:function(e,t){return b.each(this,e,t)},ready:function(e){return b.ready.promise().done(e),this},slice:function(){return this.pushStack(h.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(b.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:d,sort:[].sort,splice:[].splice},b.fn.init.prototype=b.fn,b.extend=b.fn.extend=function(){var e,n,r,i,o,a,s=arguments[0]||{},u=1,l=arguments.length,c=!1;for("boolean"==typeof s&&(c=s,s=arguments[1]||{},u=2),"object"==typeof s||b.isFunction(s)||(s={}),l===u&&(s=this,--u);l>u;u++)if(null!=(o=arguments[u]))for(i in o)e=s[i],r=o[i],s!==r&&(c&&r&&(b.isPlainObject(r)||(n=b.isArray(r)))?(n?(n=!1,a=e&&b.isArray(e)?e:[]):a=e&&b.isPlainObject(e)?e:{},s[i]=b.extend(c,a,r)):r!==t&&(s[i]=r));return s},b.extend({noConflict:function(t){return e.$===b&&(e.$=u),t&&e.jQuery===b&&(e.jQuery=s),b},isReady:!1,readyWait:1,holdReady:function(e){e?b.readyWait++:b.ready(!0)},ready:function(e){if(e===!0?!--b.readyWait:!b.isReady){if(!o.body)return setTimeout(b.ready);b.isReady=!0,e!==!0&&--b.readyWait>0||(n.resolveWith(o,[b]),b.fn.trigger&&b(o).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===b.type(e)},isArray:Array.isArray||function(e){return"array"===b.type(e)},isWindow:function(e){return null!=e&&e==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[m.call(e)]||"object":typeof e},isPlainObject:function(e){if(!e||"object"!==b.type(e)||e.nodeType||b.isWindow(e))return!1;try{if(e.constructor&&!y.call(e,"constructor")&&!y.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(n){return!1}var r;for(r in e);return r===t||y.call(e,r)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||o;var r=C.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=b.buildFragment([e],t,i),i&&b(i).remove(),b.merge([],r.childNodes))},parseJSON:function(n){return e.JSON&&e.JSON.parse?e.JSON.parse(n):null===n?n:"string"==typeof n&&(n=b.trim(n),n&&k.test(n.replace(S,"@").replace(A,"]").replace(E,"")))?Function("return "+n)():(b.error("Invalid JSON: "+n),t)},parseXML:function(n){var r,i;if(!n||"string"!=typeof n)return null;try{e.DOMParser?(i=new DOMParser,r=i.parseFromString(n,"text/xml")):(r=new ActiveXObject("Microsoft.XMLDOM"),r.async="false",r.loadXML(n))}catch(o){r=t}return r&&r.documentElement&&!r.getElementsByTagName("parsererror").length||b.error("Invalid XML: "+n),r},noop:function(){},globalEval:function(t){t&&b.trim(t)&&(e.execScript||function(t){e.eval.call(e,t)})(t)},camelCase:function(e){return e.replace(j,"ms-").replace(D,L)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,a=M(e);if(n){if(a){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(a){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:v&&!v.call("\ufeff\u00a0")?function(e){return null==e?"":v.call(e)}:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(M(Object(e))?b.merge(n,"string"==typeof e?[e]:e):d.call(n,e)),n},inArray:function(e,t,n){var r;if(t){if(g)return g.call(t,e,n);for(r=t.length,n=n?0>n?Math.max(0,r+n):n:0;r>n;n++)if(n in t&&t[n]===e)return n}return-1},merge:function(e,n){var r=n.length,i=e.length,o=0;if("number"==typeof r)for(;r>o;o++)e[i++]=n[o];else while(n[o]!==t)e[i++]=n[o++];return e.length=i,e},grep:function(e,t,n){var r,i=[],o=0,a=e.length;for(n=!!n;a>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,a=M(e),s=[];if(a)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(s[s.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(s[s.length]=r);return f.apply([],s)},guid:1,proxy:function(e,n){var r,i,o;return"string"==typeof n&&(o=e[n],n=e,e=o),b.isFunction(e)?(r=h.call(arguments,2),i=function(){return e.apply(n||this,r.concat(h.call(arguments)))},i.guid=e.guid=e.guid||b.guid++,i):t},access:function(e,n,r,i,o,a,s){var u=0,l=e.length,c=null==r;if("object"===b.type(r)){o=!0;for(u in r)b.access(e,n,u,r[u],!0,a,s)}else if(i!==t&&(o=!0,b.isFunction(i)||(s=!0),c&&(s?(n.call(e,i),n=null):(c=n,n=function(e,t,n){return c.call(b(e),n)})),n))for(;l>u;u++)n(e[u],r,s?i:i.call(e[u],u,n(e[u],r)));return o?e:c?n.call(e):l?n(e[0],r):a},now:function(){return(new Date).getTime()}}),b.ready.promise=function(t){if(!n)if(n=b.Deferred(),"complete"===o.readyState)setTimeout(b.ready);else if(o.addEventListener)o.addEventListener("DOMContentLoaded",H,!1),e.addEventListener("load",H,!1);else{o.attachEvent("onreadystatechange",H),e.attachEvent("onload",H);var r=!1;try{r=null==e.frameElement&&o.documentElement}catch(i){}r&&r.doScroll&&function a(){if(!b.isReady){try{r.doScroll("left")}catch(e){return setTimeout(a,50)}q(),b.ready()}}()}return n.promise(t)},b.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function M(e){var t=e.length,n=b.type(e);return b.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}r=b(o);var _={};function F(e){var t=_[e]={};return b.each(e.match(w)||[],function(e,n){t[n]=!0}),t}b.Callbacks=function(e){e="string"==typeof e?_[e]||F(e):b.extend({},e);var n,r,i,o,a,s,u=[],l=!e.once&&[],c=function(t){for(r=e.memory&&t,i=!0,a=s||0,s=0,o=u.length,n=!0;u&&o>a;a++)if(u[a].apply(t[0],t[1])===!1&&e.stopOnFalse){r=!1;break}n=!1,u&&(l?l.length&&c(l.shift()):r?u=[]:p.disable())},p={add:function(){if(u){var t=u.length;(function i(t){b.each(t,function(t,n){var r=b.type(n);"function"===r?e.unique&&p.has(n)||u.push(n):n&&n.length&&"string"!==r&&i(n)})})(arguments),n?o=u.length:r&&(s=t,c(r))}return this},remove:function(){return u&&b.each(arguments,function(e,t){var r;while((r=b.inArray(t,u,r))>-1)u.splice(r,1),n&&(o>=r&&o--,a>=r&&a--)}),this},has:function(e){return e?b.inArray(e,u)>-1:!(!u||!u.length)},empty:function(){return u=[],this},disable:function(){return u=l=r=t,this},disabled:function(){return!u},lock:function(){return l=t,r||p.disable(),this},locked:function(){return!l},fireWith:function(e,t){return t=t||[],t=[e,t.slice?t.slice():t],!u||i&&!l||(n?l.push(t):c(t)),this},fire:function(){return p.fireWith(this,arguments),this},fired:function(){return!!i}};return p},b.extend({Deferred:function(e){var t=[["resolve","done",b.Callbacks("once memory"),"resolved"],["reject","fail",b.Callbacks("once memory"),"rejected"],["notify","progress",b.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return b.Deferred(function(n){b.each(t,function(t,o){var a=o[0],s=b.isFunction(e[t])&&e[t];i[o[1]](function(){var e=s&&s.apply(this,arguments);e&&b.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[a+"With"](this===r?n.promise():this,s?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?b.extend(e,r):r}},i={};return r.pipe=r.then,b.each(t,function(e,o){var a=o[2],s=o[3];r[o[1]]=a.add,s&&a.add(function(){n=s},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=a.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=h.call(arguments),r=n.length,i=1!==r||e&&b.isFunction(e.promise)?r:0,o=1===i?e:b.Deferred(),a=function(e,t,n){return function(r){t[e]=this,n[e]=arguments.length>1?h.call(arguments):r,n===s?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},s,u,l;if(r>1)for(s=Array(r),u=Array(r),l=Array(r);r>t;t++)n[t]&&b.isFunction(n[t].promise)?n[t].promise().done(a(t,l,n)).fail(o.reject).progress(a(t,u,s)):--i;return i||o.resolveWith(l,n),o.promise()}}),b.support=function(){var t,n,r,a,s,u,l,c,p,f,d=o.createElement("div");if(d.setAttribute("className","t"),d.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",n=d.getElementsByTagName("*"),r=d.getElementsByTagName("a")[0],!n||!r||!n.length)return{};s=o.createElement("select"),l=s.appendChild(o.createElement("option")),a=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t={getSetAttribute:"t"!==d.className,leadingWhitespace:3===d.firstChild.nodeType,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/top/.test(r.getAttribute("style")),hrefNormalized:"/a"===r.getAttribute("href"),opacity:/^0.5/.test(r.style.opacity),cssFloat:!!r.style.cssFloat,checkOn:!!a.value,optSelected:l.selected,enctype:!!o.createElement("form").enctype,html5Clone:"<:nav></:nav>"!==o.createElement("nav").cloneNode(!0).outerHTML,boxModel:"CSS1Compat"===o.compatMode,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,boxSizingReliable:!0,pixelPosition:!1},a.checked=!0,t.noCloneChecked=a.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!l.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}a=o.createElement("input"),a.setAttribute("value",""),t.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),t.radioValue="t"===a.value,a.setAttribute("checked","t"),a.setAttribute("name","t"),u=o.createDocumentFragment(),u.appendChild(a),t.appendChecked=a.checked,t.checkClone=u.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.cloneNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;return d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip,b(function(){var n,r,a,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",u=o.getElementsByTagName("body")[0];u&&(n=o.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",u.appendChild(n).appendChild(d),d.innerHTML="<table><tr><td></td><td>t</td></tr></table>",a=d.getElementsByTagName("td"),a[0].style.cssText="padding:0;margin:0;border:0;display:none",p=0===a[0].offsetHeight,a[0].style.display="",a[1].style.display="none",t.reliableHiddenOffsets=p&&0===a[0].offsetHeight,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",t.boxSizing=4===d.offsetWidth,t.doesNotIncludeMarginInBodyOffset=1!==u.offsetTop,e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(d,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(d,null)||{width:"4px"}).width,r=d.appendChild(o.createElement("div")),r.style.cssText=d.style.cssText=s,r.style.marginRight=r.style.width="0",d.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),typeof d.style.zoom!==i&&(d.innerHTML="",d.style.cssText=s+"width:1px;padding:1px;display:inline;zoom:1",t.inlineBlockNeedsLayout=3===d.offsetWidth,d.style.display="block",d.innerHTML="<div></div>",d.firstChild.style.width="5px",t.shrinkWrapBlocks=3!==d.offsetWidth,t.inlineBlockNeedsLayout&&(u.style.zoom=1)),u.removeChild(n),n=d=a=r=null)}),n=s=u=l=r=a=null,t}();var O=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,B=/([A-Z])/g;function P(e,n,r,i){if(b.acceptData(e)){var o,a,s=b.expando,u="string"==typeof n,l=e.nodeType,p=l?b.cache:e,f=l?e[s]:e[s]&&s;if(f&&p[f]&&(i||p[f].data)||!u||r!==t)return f||(l?e[s]=f=c.pop()||b.guid++:f=s),p[f]||(p[f]={},l||(p[f].toJSON=b.noop)),("object"==typeof n||"function"==typeof n)&&(i?p[f]=b.extend(p[f],n):p[f].data=b.extend(p[f].data,n)),o=p[f],i||(o.data||(o.data={}),o=o.data),r!==t&&(o[b.camelCase(n)]=r),u?(a=o[n],null==a&&(a=o[b.camelCase(n)])):a=o,a}}function R(e,t,n){if(b.acceptData(e)){var r,i,o,a=e.nodeType,s=a?b.cache:e,u=a?e[b.expando]:b.expando;if(s[u]){if(t&&(o=n?s[u]:s[u].data)){b.isArray(t)?t=t.concat(b.map(t,b.camelCase)):t in o?t=[t]:(t=b.camelCase(t),t=t in o?[t]:t.split(" "));for(r=0,i=t.length;i>r;r++)delete o[t[r]];if(!(n?$:b.isEmptyObject)(o))return}(n||(delete s[u].data,$(s[u])))&&(a?b.cleanData([e],!0):b.support.deleteExpando||s!=s.window?delete s[u]:s[u]=null)}}}b.extend({cache:{},expando:"jQuery"+(p+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(e){return e=e.nodeType?b.cache[e[b.expando]]:e[b.expando],!!e&&!$(e)},data:function(e,t,n){return P(e,t,n)},removeData:function(e,t){return R(e,t)},_data:function(e,t,n){return P(e,t,n,!0)},_removeData:function(e,t){return R(e,t,!0)},acceptData:function(e){if(e.nodeType&&1!==e.nodeType&&9!==e.nodeType)return!1;var t=e.nodeName&&b.noData[e.nodeName.toLowerCase()];return!t||t!==!0&&e.getAttribute("classid")===t}}),b.fn.extend({data:function(e,n){var r,i,o=this[0],a=0,s=null;if(e===t){if(this.length&&(s=b.data(o),1===o.nodeType&&!b._data(o,"parsedAttrs"))){for(r=o.attributes;r.length>a;a++)i=r[a].name,i.indexOf("data-")||(i=b.camelCase(i.slice(5)),W(o,i,s[i]));b._data(o,"parsedAttrs",!0)}return s}return"object"==typeof e?this.each(function(){b.data(this,e)}):b.access(this,function(n){return n===t?o?W(o,e,b.data(o,e)):null:(this.each(function(){b.data(this,e,n)}),t)},null,n,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){b.removeData(this,e)})}});function W(e,n,r){if(r===t&&1===e.nodeType){var i="data-"+n.replace(B,"-$1").toLowerCase();if(r=e.getAttribute(i),"string"==typeof r){try{r="true"===r?!0:"false"===r?!1:"null"===r?null:+r+""===r?+r:O.test(r)?b.parseJSON(r):r}catch(o){}b.data(e,n,r)}else r=t}return r}function $(e){var t;for(t in e)if(("data"!==t||!b.isEmptyObject(e[t]))&&"toJSON"!==t)return!1;return!0}b.extend({queue:function(e,n,r){var i;return e?(n=(n||"fx")+"queue",i=b._data(e,n),r&&(!i||b.isArray(r)?i=b._data(e,n,b.makeArray(r)):i.push(r)),i||[]):t},dequeue:function(e,t){t=t||"fx";var n=b.queue(e,t),r=n.length,i=n.shift(),o=b._queueHooks(e,t),a=function(){b.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),o.cur=i,i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return b._data(e,n)||b._data(e,n,{empty:b.Callbacks("once memory").add(function(){b._removeData(e,t+"queue"),b._removeData(e,n)})})}}),b.fn.extend({queue:function(e,n){var r=2;return"string"!=typeof e&&(n=e,e="fx",r--),r>arguments.length?b.queue(this[0],e):n===t?this:this.each(function(){var t=b.queue(this,e,n);b._queueHooks(this,e),"fx"===e&&"inprogress"!==t[0]&&b.dequeue(this,e)})},dequeue:function(e){return this.each(function(){b.dequeue(this,e)})},delay:function(e,t){return e=b.fx?b.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,n){var r,i=1,o=b.Deferred(),a=this,s=this.length,u=function(){--i||o.resolveWith(a,[a])};"string"!=typeof e&&(n=e,e=t),e=e||"fx";while(s--)r=b._data(a[s],e+"queueHooks"),r&&r.empty&&(i++,r.empty.add(u));return u(),o.promise(n)}});var I,z,X=/[\t\r\n]/g,U=/\r/g,V=/^(?:input|select|textarea|button|object)$/i,Y=/^(?:a|area)$/i,J=/^(?:checked|selected|autofocus|autoplay|async|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped)$/i,G=/^(?:checked|selected)$/i,Q=b.support.getSetAttribute,K=b.support.input;b.fn.extend({attr:function(e,t){return b.access(this,b.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){b.removeAttr(this,e)})},prop:function(e,t){return b.access(this,b.prop,e,t,arguments.length>1)},removeProp:function(e){return e=b.propFix[e]||e,this.each(function(){try{this[e]=t,delete this[e]}catch(n){}})},addClass:function(e){var t,n,r,i,o,a=0,s=this.length,u="string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).addClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=b.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,a=0,s=this.length,u=0===arguments.length||"string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).removeClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?b.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e,r="boolean"==typeof t;return b.isFunction(e)?this.each(function(n){b(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var o,a=0,s=b(this),u=t,l=e.match(w)||[];while(o=l[a++])u=r?u:!s.hasClass(o),s[u?"addClass":"removeClass"](o)}else(n===i||"boolean"===n)&&(this.className&&b._data(this,"__className__",this.className),this.className=this.className||e===!1?"":b._data(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(X," ").indexOf(t)>=0)return!0;return!1},val:function(e){var n,r,i,o=this[0];{if(arguments.length)return i=b.isFunction(e),this.each(function(n){var o,a=b(this);1===this.nodeType&&(o=i?e.call(this,n,a.val()):e,null==o?o="":"number"==typeof o?o+="":b.isArray(o)&&(o=b.map(o,function(e){return null==e?"":e+""})),r=b.valHooks[this.type]||b.valHooks[this.nodeName.toLowerCase()],r&&"set"in r&&r.set(this,o,"value")!==t||(this.value=o))});if(o)return r=b.valHooks[o.type]||b.valHooks[o.nodeName.toLowerCase()],r&&"get"in r&&(n=r.get(o,"value"))!==t?n:(n=o.value,"string"==typeof n?n.replace(U,""):null==n?"":n)}}}),b.extend({valHooks:{option:{get:function(e){var t=e.attributes.value;return!t||t.specified?e.value:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,a=o?null:[],s=o?i+1:r.length,u=0>i?s:o?i:0;for(;s>u;u++)if(n=r[u],!(!n.selected&&u!==i||(b.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&b.nodeName(n.parentNode,"optgroup"))){if(t=b(n).val(),o)return t;a.push(t)}return a},set:function(e,t){var n=b.makeArray(t);return b(e).find("option").each(function(){this.selected=b.inArray(b(this).val(),n)>=0}),n.length||(e.selectedIndex=-1),n}}},attr:function(e,n,r){var o,a,s,u=e.nodeType;if(e&&3!==u&&8!==u&&2!==u)return typeof e.getAttribute===i?b.prop(e,n,r):(a=1!==u||!b.isXMLDoc(e),a&&(n=n.toLowerCase(),o=b.attrHooks[n]||(J.test(n)?z:I)),r===t?o&&a&&"get"in o&&null!==(s=o.get(e,n))?s:(typeof e.getAttribute!==i&&(s=e.getAttribute(n)),null==s?t:s):null!==r?o&&a&&"set"in o&&(s=o.set(e,r,n))!==t?s:(e.setAttribute(n,r+""),r):(b.removeAttr(e,n),t))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(w);if(o&&1===e.nodeType)while(n=o[i++])r=b.propFix[n]||n,J.test(n)?!Q&&G.test(n)?e[b.camelCase("default-"+n)]=e[r]=!1:e[r]=!1:b.attr(e,n,""),e.removeAttribute(Q?n:r)},attrHooks:{type:{set:function(e,t){if(!b.support.radioValue&&"radio"===t&&b.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(e,n,r){var i,o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return a=1!==s||!b.isXMLDoc(e),a&&(n=b.propFix[n]||n,o=b.propHooks[n]),r!==t?o&&"set"in o&&(i=o.set(e,r,n))!==t?i:e[n]=r:o&&"get"in o&&null!==(i=o.get(e,n))?i:e[n]},propHooks:{tabIndex:{get:function(e){var n=e.getAttributeNode("tabindex");return n&&n.specified?parseInt(n.value,10):V.test(e.nodeName)||Y.test(e.nodeName)&&e.href?0:t}}}}),z={get:function(e,n){var r=b.prop(e,n),i="boolean"==typeof r&&e.getAttribute(n),o="boolean"==typeof r?K&&Q?null!=i:G.test(n)?e[b.camelCase("default-"+n)]:!!i:e.getAttributeNode(n);return o&&o.value!==!1?n.toLowerCase():t},set:function(e,t,n){return t===!1?b.removeAttr(e,n):K&&Q||!G.test(n)?e.setAttribute(!Q&&b.propFix[n]||n,n):e[b.camelCase("default-"+n)]=e[n]=!0,n}},K&&Q||(b.attrHooks.value={get:function(e,n){var r=e.getAttributeNode(n);return b.nodeName(e,"input")?e.defaultValue:r&&r.specified?r.value:t},set:function(e,n,r){return b.nodeName(e,"input")?(e.defaultValue=n,t):I&&I.set(e,n,r)}}),Q||(I=b.valHooks.button={get:function(e,n){var r=e.getAttributeNode(n);return r&&("id"===n||"name"===n||"coords"===n?""!==r.value:r.specified)?r.value:t},set:function(e,n,r){var i=e.getAttributeNode(r);return i||e.setAttributeNode(i=e.ownerDocument.createAttribute(r)),i.value=n+="","value"===r||n===e.getAttribute(r)?n:t}},b.attrHooks.contenteditable={get:I.get,set:function(e,t,n){I.set(e,""===t?!1:t,n)}},b.each(["width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{set:function(e,r){return""===r?(e.setAttribute(n,"auto"),r):t}})})),b.support.hrefNormalized||(b.each(["href","src","width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{get:function(e){var r=e.getAttribute(n,2);return null==r?t:r}})}),b.each(["href","src"],function(e,t){b.propHooks[t]={get:function(e){return e.getAttribute(t,4)}}})),b.support.style||(b.attrHooks.style={get:function(e){return e.style.cssText||t},set:function(e,t){return e.style.cssText=t+""}}),b.support.optSelected||(b.propHooks.selected=b.extend(b.propHooks.selected,{get:function(e){var t=e.parentNode;return t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex),null}})),b.support.enctype||(b.propFix.enctype="encoding"),b.support.checkOn||b.each(["radio","checkbox"],function(){b.valHooks[this]={get:function(e){return null===e.getAttribute("value")?"on":e.value}}}),b.each(["radio","checkbox"],function(){b.valHooks[this]=b.extend(b.valHooks[this],{set:function(e,n){return b.isArray(n)?e.checked=b.inArray(b(e).val(),n)>=0:t}})});var Z=/^(?:input|select|textarea)$/i,et=/^key/,tt=/^(?:mouse|contextmenu)|click/,nt=/^(?:focusinfocus|focusoutblur)$/,rt=/^([^.]*)(?:\.(.+)|)$/;function it(){return!0}function ot(){return!1}b.event={global:{},add:function(e,n,r,o,a){var s,u,l,c,p,f,d,h,g,m,y,v=b._data(e);if(v){r.handler&&(c=r,r=c.handler,a=c.selector),r.guid||(r.guid=b.guid++),(u=v.events)||(u=v.events={}),(f=v.handle)||(f=v.handle=function(e){return typeof b===i||e&&b.event.triggered===e.type?t:b.event.dispatch.apply(f.elem,arguments)},f.elem=e),n=(n||"").match(w)||[""],l=n.length;while(l--)s=rt.exec(n[l])||[],g=y=s[1],m=(s[2]||"").split(".").sort(),p=b.event.special[g]||{},g=(a?p.delegateType:p.bindType)||g,p=b.event.special[g]||{},d=b.extend({type:g,origType:y,data:o,handler:r,guid:r.guid,selector:a,needsContext:a&&b.expr.match.needsContext.test(a),namespace:m.join(".")},c),(h=u[g])||(h=u[g]=[],h.delegateCount=0,p.setup&&p.setup.call(e,o,m,f)!==!1||(e.addEventListener?e.addEventListener(g,f,!1):e.attachEvent&&e.attachEvent("on"+g,f))),p.add&&(p.add.call(e,d),d.handler.guid||(d.handler.guid=r.guid)),a?h.splice(h.delegateCount++,0,d):h.push(d),b.event.global[g]=!0;e=null}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,p,f,d,h,g,m=b.hasData(e)&&b._data(e);if(m&&(c=m.events)){t=(t||"").match(w)||[""],l=t.length;while(l--)if(s=rt.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){p=b.event.special[d]||{},d=(r?p.delegateType:p.bindType)||d,f=c[d]||[],s=s[2]&&RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),u=o=f.length;while(o--)a=f[o],!i&&g!==a.origType||n&&n.guid!==a.guid||s&&!s.test(a.namespace)||r&&r!==a.selector&&("**"!==r||!a.selector)||(f.splice(o,1),a.selector&&f.delegateCount--,p.remove&&p.remove.call(e,a));u&&!f.length&&(p.teardown&&p.teardown.call(e,h,m.handle)!==!1||b.removeEvent(e,d,m.handle),delete c[d])}else for(d in c)b.event.remove(e,d+t[l],n,r,!0);b.isEmptyObject(c)&&(delete m.handle,b._removeData(e,"events"))}},trigger:function(n,r,i,a){var s,u,l,c,p,f,d,h=[i||o],g=y.call(n,"type")?n.type:n,m=y.call(n,"namespace")?n.namespace.split("."):[];if(l=f=i=i||o,3!==i.nodeType&&8!==i.nodeType&&!nt.test(g+b.event.triggered)&&(g.indexOf(".")>=0&&(m=g.split("."),g=m.shift(),m.sort()),u=0>g.indexOf(":")&&"on"+g,n=n[b.expando]?n:new b.Event(g,"object"==typeof n&&n),n.isTrigger=!0,n.namespace=m.join("."),n.namespace_re=n.namespace?RegExp("(^|\\.)"+m.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,n.result=t,n.target||(n.target=i),r=null==r?[n]:b.makeArray(r,[n]),p=b.event.special[g]||{},a||!p.trigger||p.trigger.apply(i,r)!==!1)){if(!a&&!p.noBubble&&!b.isWindow(i)){for(c=p.delegateType||g,nt.test(c+g)||(l=l.parentNode);l;l=l.parentNode)h.push(l),f=l;f===(i.ownerDocument||o)&&h.push(f.defaultView||f.parentWindow||e)}d=0;while((l=h[d++])&&!n.isPropagationStopped())n.type=d>1?c:p.bindType||g,s=(b._data(l,"events")||{})[n.type]&&b._data(l,"handle"),s&&s.apply(l,r),s=u&&l[u],s&&b.acceptData(l)&&s.apply&&s.apply(l,r)===!1&&n.preventDefault();if(n.type=g,!(a||n.isDefaultPrevented()||p._default&&p._default.apply(i.ownerDocument,r)!==!1||"click"===g&&b.nodeName(i,"a")||!b.acceptData(i)||!u||!i[g]||b.isWindow(i))){f=i[u],f&&(i[u]=null),b.event.triggered=g;try{i[g]()}catch(v){}b.event.triggered=t,f&&(i[u]=f)}return n.result}},dispatch:function(e){e=b.event.fix(e);var n,r,i,o,a,s=[],u=h.call(arguments),l=(b._data(this,"events")||{})[e.type]||[],c=b.event.special[e.type]||{};if(u[0]=e,e.delegateTarget=this,!c.preDispatch||c.preDispatch.call(this,e)!==!1){s=b.event.handlers.call(this,e,l),n=0;while((o=s[n++])&&!e.isPropagationStopped()){e.currentTarget=o.elem,a=0;while((i=o.handlers[a++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(i.namespace))&&(e.handleObj=i,e.data=i.data,r=((b.event.special[i.origType]||{}).handle||i.handler).apply(o.elem,u),r!==t&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,e),e.result}},handlers:function(e,n){var r,i,o,a,s=[],u=n.delegateCount,l=e.target;if(u&&l.nodeType&&(!e.button||"click"!==e.type))for(;l!=this;l=l.parentNode||this)if(1===l.nodeType&&(l.disabled!==!0||"click"!==e.type)){for(o=[],a=0;u>a;a++)i=n[a],r=i.selector+" ",o[r]===t&&(o[r]=i.needsContext?b(r,this).index(l)>=0:b.find(r,this,null,[l]).length),o[r]&&o.push(i);o.length&&s.push({elem:l,handlers:o})}return n.length>u&&s.push({elem:this,handlers:n.slice(u)}),s},fix:function(e){if(e[b.expando])return e;var t,n,r,i=e.type,a=e,s=this.fixHooks[i];s||(this.fixHooks[i]=s=tt.test(i)?this.mouseHooks:et.test(i)?this.keyHooks:{}),r=s.props?this.props.concat(s.props):this.props,e=new b.Event(a),t=r.length;while(t--)n=r[t],e[n]=a[n];return e.target||(e.target=a.srcElement||o),3===e.target.nodeType&&(e.target=e.target.parentNode),e.metaKey=!!e.metaKey,s.filter?s.filter(e,a):e},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,n){var r,i,a,s=n.button,u=n.fromElement;return null==e.pageX&&null!=n.clientX&&(i=e.target.ownerDocument||o,a=i.documentElement,r=i.body,e.pageX=n.clientX+(a&&a.scrollLeft||r&&r.scrollLeft||0)-(a&&a.clientLeft||r&&r.clientLeft||0),e.pageY=n.clientY+(a&&a.scrollTop||r&&r.scrollTop||0)-(a&&a.clientTop||r&&r.clientTop||0)),!e.relatedTarget&&u&&(e.relatedTarget=u===e.target?n.toElement:u),e.which||s===t||(e.which=1&s?1:2&s?3:4&s?2:0),e}},special:{load:{noBubble:!0},click:{trigger:function(){return b.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):t}},focus:{trigger:function(){if(this!==o.activeElement&&this.focus)try{return this.focus(),!1}catch(e){}},delegateType:"focusin"},blur:{trigger:function(){return this===o.activeElement&&this.blur?(this.blur(),!1):t},delegateType:"focusout"},beforeunload:{postDispatch:function(e){e.result!==t&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=b.extend(new b.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?b.event.trigger(i,null,t):b.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},b.removeEvent=o.removeEventListener?function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)}:function(e,t,n){var r="on"+t;e.detachEvent&&(typeof e[r]===i&&(e[r]=null),e.detachEvent(r,n))},b.Event=function(e,n){return this instanceof b.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.returnValue===!1||e.getPreventDefault&&e.getPreventDefault()?it:ot):this.type=e,n&&b.extend(this,n),this.timeStamp=e&&e.timeStamp||b.now(),this[b.expando]=!0,t):new b.Event(e,n)},b.Event.prototype={isDefaultPrevented:ot,isPropagationStopped:ot,isImmediatePropagationStopped:ot,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=it,e&&(e.preventDefault?e.preventDefault():e.returnValue=!1)},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=it,e&&(e.stopPropagation&&e.stopPropagation(),e.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=it,this.stopPropagation()}},b.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){b.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;
+return(!i||i!==r&&!b.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),b.support.submitBubbles||(b.event.special.submit={setup:function(){return b.nodeName(this,"form")?!1:(b.event.add(this,"click._submit keypress._submit",function(e){var n=e.target,r=b.nodeName(n,"input")||b.nodeName(n,"button")?n.form:t;r&&!b._data(r,"submitBubbles")&&(b.event.add(r,"submit._submit",function(e){e._submit_bubble=!0}),b._data(r,"submitBubbles",!0))}),t)},postDispatch:function(e){e._submit_bubble&&(delete e._submit_bubble,this.parentNode&&!e.isTrigger&&b.event.simulate("submit",this.parentNode,e,!0))},teardown:function(){return b.nodeName(this,"form")?!1:(b.event.remove(this,"._submit"),t)}}),b.support.changeBubbles||(b.event.special.change={setup:function(){return Z.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(b.event.add(this,"propertychange._change",function(e){"checked"===e.originalEvent.propertyName&&(this._just_changed=!0)}),b.event.add(this,"click._change",function(e){this._just_changed&&!e.isTrigger&&(this._just_changed=!1),b.event.simulate("change",this,e,!0)})),!1):(b.event.add(this,"beforeactivate._change",function(e){var t=e.target;Z.test(t.nodeName)&&!b._data(t,"changeBubbles")&&(b.event.add(t,"change._change",function(e){!this.parentNode||e.isSimulated||e.isTrigger||b.event.simulate("change",this.parentNode,e,!0)}),b._data(t,"changeBubbles",!0))}),t)},handle:function(e){var n=e.target;return this!==n||e.isSimulated||e.isTrigger||"radio"!==n.type&&"checkbox"!==n.type?e.handleObj.handler.apply(this,arguments):t},teardown:function(){return b.event.remove(this,"._change"),!Z.test(this.nodeName)}}),b.support.focusinBubbles||b.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){b.event.simulate(t,e.target,b.event.fix(e),!0)};b.event.special[t]={setup:function(){0===n++&&o.addEventListener(e,r,!0)},teardown:function(){0===--n&&o.removeEventListener(e,r,!0)}}}),b.fn.extend({on:function(e,n,r,i,o){var a,s;if("object"==typeof e){"string"!=typeof n&&(r=r||n,n=t);for(a in e)this.on(a,n,r,e[a],o);return this}if(null==r&&null==i?(i=n,r=n=t):null==i&&("string"==typeof n?(i=r,r=t):(i=r,r=n,n=t)),i===!1)i=ot;else if(!i)return this;return 1===o&&(s=i,i=function(e){return b().off(e),s.apply(this,arguments)},i.guid=s.guid||(s.guid=b.guid++)),this.each(function(){b.event.add(this,e,i,r,n)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,n,r){var i,o;if(e&&e.preventDefault&&e.handleObj)return i=e.handleObj,b(e.delegateTarget).off(i.namespace?i.origType+"."+i.namespace:i.origType,i.selector,i.handler),this;if("object"==typeof e){for(o in e)this.off(o,n,e[o]);return this}return(n===!1||"function"==typeof n)&&(r=n,n=t),r===!1&&(r=ot),this.each(function(){b.event.remove(this,e,r,n)})},bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},trigger:function(e,t){return this.each(function(){b.event.trigger(e,t,this)})},triggerHandler:function(e,n){var r=this[0];return r?b.event.trigger(e,n,r,!0):t}}),function(e,t){var n,r,i,o,a,s,u,l,c,p,f,d,h,g,m,y,v,x="sizzle"+-new Date,w=e.document,T={},N=0,C=0,k=it(),E=it(),S=it(),A=typeof t,j=1<<31,D=[],L=D.pop,H=D.push,q=D.slice,M=D.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},_="[\\x20\\t\\r\\n\\f]",F="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=F.replace("w","w#"),B="([*^$|!~]?=)",P="\\["+_+"*("+F+")"+_+"*(?:"+B+_+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+O+")|)|)"+_+"*\\]",R=":("+F+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+P.replace(3,8)+")*)|.*)\\)|)",W=RegExp("^"+_+"+|((?:^|[^\\\\])(?:\\\\.)*)"+_+"+$","g"),$=RegExp("^"+_+"*,"+_+"*"),I=RegExp("^"+_+"*([\\x20\\t\\r\\n\\f>+~])"+_+"*"),z=RegExp(R),X=RegExp("^"+O+"$"),U={ID:RegExp("^#("+F+")"),CLASS:RegExp("^\\.("+F+")"),NAME:RegExp("^\\[name=['\"]?("+F+")['\"]?\\]"),TAG:RegExp("^("+F.replace("w","w*")+")"),ATTR:RegExp("^"+P),PSEUDO:RegExp("^"+R),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+_+"*(even|odd|(([+-]|)(\\d*)n|)"+_+"*(?:([+-]|)"+_+"*(\\d+)|))"+_+"*\\)|)","i"),needsContext:RegExp("^"+_+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+_+"*((?:-\\d)?\\d*)"+_+"*\\)|)(?=[^-]|$)","i")},V=/[\x20\t\r\n\f]*[+~]/,Y=/^[^{]+\{\s*\[native code/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,G=/^(?:input|select|textarea|button)$/i,Q=/^h\d$/i,K=/'|\\/g,Z=/\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g,et=/\\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g,tt=function(e,t){var n="0x"+t-65536;return n!==n?t:0>n?String.fromCharCode(n+65536):String.fromCharCode(55296|n>>10,56320|1023&n)};try{q.call(w.documentElement.childNodes,0)[0].nodeType}catch(nt){q=function(e){var t,n=[];while(t=this[e++])n.push(t);return n}}function rt(e){return Y.test(e+"")}function it(){var e,t=[];return e=function(n,r){return t.push(n+=" ")>i.cacheLength&&delete e[t.shift()],e[n]=r}}function ot(e){return e[x]=!0,e}function at(e){var t=p.createElement("div");try{return e(t)}catch(n){return!1}finally{t=null}}function st(e,t,n,r){var i,o,a,s,u,l,f,g,m,v;if((t?t.ownerDocument||t:w)!==p&&c(t),t=t||p,n=n||[],!e||"string"!=typeof e)return n;if(1!==(s=t.nodeType)&&9!==s)return[];if(!d&&!r){if(i=J.exec(e))if(a=i[1]){if(9===s){if(o=t.getElementById(a),!o||!o.parentNode)return n;if(o.id===a)return n.push(o),n}else if(t.ownerDocument&&(o=t.ownerDocument.getElementById(a))&&y(t,o)&&o.id===a)return n.push(o),n}else{if(i[2])return H.apply(n,q.call(t.getElementsByTagName(e),0)),n;if((a=i[3])&&T.getByClassName&&t.getElementsByClassName)return H.apply(n,q.call(t.getElementsByClassName(a),0)),n}if(T.qsa&&!h.test(e)){if(f=!0,g=x,m=t,v=9===s&&e,1===s&&"object"!==t.nodeName.toLowerCase()){l=ft(e),(f=t.getAttribute("id"))?g=f.replace(K,"\\$&"):t.setAttribute("id",g),g="[id='"+g+"'] ",u=l.length;while(u--)l[u]=g+dt(l[u]);m=V.test(e)&&t.parentNode||t,v=l.join(",")}if(v)try{return H.apply(n,q.call(m.querySelectorAll(v),0)),n}catch(b){}finally{f||t.removeAttribute("id")}}}return wt(e.replace(W,"$1"),t,n,r)}a=st.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},c=st.setDocument=function(e){var n=e?e.ownerDocument||e:w;return n!==p&&9===n.nodeType&&n.documentElement?(p=n,f=n.documentElement,d=a(n),T.tagNameNoComments=at(function(e){return e.appendChild(n.createComment("")),!e.getElementsByTagName("*").length}),T.attributes=at(function(e){e.innerHTML="<select></select>";var t=typeof e.lastChild.getAttribute("multiple");return"boolean"!==t&&"string"!==t}),T.getByClassName=at(function(e){return e.innerHTML="<div class='hidden e'></div><div class='hidden'></div>",e.getElementsByClassName&&e.getElementsByClassName("e").length?(e.lastChild.className="e",2===e.getElementsByClassName("e").length):!1}),T.getByName=at(function(e){e.id=x+0,e.innerHTML="<a name='"+x+"'></a><div name='"+x+"'></div>",f.insertBefore(e,f.firstChild);var t=n.getElementsByName&&n.getElementsByName(x).length===2+n.getElementsByName(x+0).length;return T.getIdNotName=!n.getElementById(x),f.removeChild(e),t}),i.attrHandle=at(function(e){return e.innerHTML="<a href='#'></a>",e.firstChild&&typeof e.firstChild.getAttribute!==A&&"#"===e.firstChild.getAttribute("href")})?{}:{href:function(e){return e.getAttribute("href",2)},type:function(e){return e.getAttribute("type")}},T.getIdNotName?(i.find.ID=function(e,t){if(typeof t.getElementById!==A&&!d){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){return e.getAttribute("id")===t}}):(i.find.ID=function(e,n){if(typeof n.getElementById!==A&&!d){var r=n.getElementById(e);return r?r.id===e||typeof r.getAttributeNode!==A&&r.getAttributeNode("id").value===e?[r]:t:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){var n=typeof e.getAttributeNode!==A&&e.getAttributeNode("id");return n&&n.value===t}}),i.find.TAG=T.tagNameNoComments?function(e,n){return typeof n.getElementsByTagName!==A?n.getElementsByTagName(e):t}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},i.find.NAME=T.getByName&&function(e,n){return typeof n.getElementsByName!==A?n.getElementsByName(name):t},i.find.CLASS=T.getByClassName&&function(e,n){return typeof n.getElementsByClassName===A||d?t:n.getElementsByClassName(e)},g=[],h=[":focus"],(T.qsa=rt(n.querySelectorAll))&&(at(function(e){e.innerHTML="<select><option selected=''></option></select>",e.querySelectorAll("[selected]").length||h.push("\\["+_+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)"),e.querySelectorAll(":checked").length||h.push(":checked")}),at(function(e){e.innerHTML="<input type='hidden' i=''/>",e.querySelectorAll("[i^='']").length&&h.push("[*^$]="+_+"*(?:\"\"|'')"),e.querySelectorAll(":enabled").length||h.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),h.push(",.*:")})),(T.matchesSelector=rt(m=f.matchesSelector||f.mozMatchesSelector||f.webkitMatchesSelector||f.oMatchesSelector||f.msMatchesSelector))&&at(function(e){T.disconnectedMatch=m.call(e,"div"),m.call(e,"[s!='']:x"),g.push("!=",R)}),h=RegExp(h.join("|")),g=RegExp(g.join("|")),y=rt(f.contains)||f.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},v=f.compareDocumentPosition?function(e,t){var r;return e===t?(u=!0,0):(r=t.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(t))?1&r||e.parentNode&&11===e.parentNode.nodeType?e===n||y(w,e)?-1:t===n||y(w,t)?1:0:4&r?-1:1:e.compareDocumentPosition?-1:1}:function(e,t){var r,i=0,o=e.parentNode,a=t.parentNode,s=[e],l=[t];if(e===t)return u=!0,0;if(!o||!a)return e===n?-1:t===n?1:o?-1:a?1:0;if(o===a)return ut(e,t);r=e;while(r=r.parentNode)s.unshift(r);r=t;while(r=r.parentNode)l.unshift(r);while(s[i]===l[i])i++;return i?ut(s[i],l[i]):s[i]===w?-1:l[i]===w?1:0},u=!1,[0,0].sort(v),T.detectDuplicates=u,p):p},st.matches=function(e,t){return st(e,null,null,t)},st.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&c(e),t=t.replace(Z,"='$1']"),!(!T.matchesSelector||d||g&&g.test(t)||h.test(t)))try{var n=m.call(e,t);if(n||T.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(r){}return st(t,p,null,[e]).length>0},st.contains=function(e,t){return(e.ownerDocument||e)!==p&&c(e),y(e,t)},st.attr=function(e,t){var n;return(e.ownerDocument||e)!==p&&c(e),d||(t=t.toLowerCase()),(n=i.attrHandle[t])?n(e):d||T.attributes?e.getAttribute(t):((n=e.getAttributeNode(t))||e.getAttribute(t))&&e[t]===!0?t:n&&n.specified?n.value:null},st.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},st.uniqueSort=function(e){var t,n=[],r=1,i=0;if(u=!T.detectDuplicates,e.sort(v),u){for(;t=e[r];r++)t===e[r-1]&&(i=n.push(r));while(i--)e.splice(n[i],1)}return e};function ut(e,t){var n=t&&e,r=n&&(~t.sourceIndex||j)-(~e.sourceIndex||j);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function lt(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function ct(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function pt(e){return ot(function(t){return t=+t,ot(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}o=st.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r];r++)n+=o(t);return n},i=st.selectors={cacheLength:50,createPseudo:ot,match:U,find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(et,tt),e[3]=(e[4]||e[5]||"").replace(et,tt),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||st.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&st.error(e[0]),e},PSEUDO:function(e){var t,n=!e[5]&&e[2];return U.CHILD.test(e[0])?null:(e[4]?e[2]=e[4]:n&&z.test(n)&&(t=ft(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){return"*"===e?function(){return!0}:(e=e.replace(et,tt).toLowerCase(),function(t){return t.nodeName&&t.nodeName.toLowerCase()===e})},CLASS:function(e){var t=k[e+" "];return t||(t=RegExp("(^|"+_+")"+e+"("+_+"|$)"))&&k(e,function(e){return t.test(e.className||typeof e.getAttribute!==A&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=st.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,p,f,d,h,g=o!==a?"nextSibling":"previousSibling",m=t.parentNode,y=s&&t.nodeName.toLowerCase(),v=!u&&!s;if(m){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?m.firstChild:m.lastChild],a&&v){c=m[x]||(m[x]={}),l=c[e]||[],d=l[0]===N&&l[1],f=l[0]===N&&l[2],p=d&&m.childNodes[d];while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[N,d,f];break}}else if(v&&(l=(t[x]||(t[x]={}))[e])&&l[0]===N)f=l[1];else while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(v&&((p[x]||(p[x]={}))[e]=[N,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=i.pseudos[e]||i.setFilters[e.toLowerCase()]||st.error("unsupported pseudo: "+e);return r[x]?r(t):r.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?ot(function(e,n){var i,o=r(e,t),a=o.length;while(a--)i=M.call(e,o[a]),e[i]=!(n[i]=o[a])}):function(e){return r(e,0,n)}):r}},pseudos:{not:ot(function(e){var t=[],n=[],r=s(e.replace(W,"$1"));return r[x]?ot(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:ot(function(e){return function(t){return st(e,t).length>0}}),contains:ot(function(e){return function(t){return(t.textContent||t.innerText||o(t)).indexOf(e)>-1}}),lang:ot(function(e){return X.test(e||"")||st.error("unsupported lang: "+e),e=e.replace(et,tt).toLowerCase(),function(t){var n;do if(n=d?t.getAttribute("xml:lang")||t.getAttribute("lang"):t.lang)return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===f},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent:function(e){return!i.pseudos.empty(e)},header:function(e){return Q.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:pt(function(){return[0]}),last:pt(function(e,t){return[t-1]}),eq:pt(function(e,t,n){return[0>n?n+t:n]}),even:pt(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:pt(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:pt(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:pt(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}};for(n in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})i.pseudos[n]=lt(n);for(n in{submit:!0,reset:!0})i.pseudos[n]=ct(n);function ft(e,t){var n,r,o,a,s,u,l,c=E[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=i.preFilter;while(s){(!n||(r=$.exec(s)))&&(r&&(s=s.slice(r[0].length)||s),u.push(o=[])),n=!1,(r=I.exec(s))&&(n=r.shift(),o.push({value:n,type:r[0].replace(W," ")}),s=s.slice(n.length));for(a in i.filter)!(r=U[a].exec(s))||l[a]&&!(r=l[a](r))||(n=r.shift(),o.push({value:n,type:a,matches:r}),s=s.slice(n.length));if(!n)break}return t?s.length:s?st.error(e):E(e,u).slice(0)}function dt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function ht(e,t,n){var i=t.dir,o=n&&"parentNode"===i,a=C++;return t.first?function(t,n,r){while(t=t[i])if(1===t.nodeType||o)return e(t,n,r)}:function(t,n,s){var u,l,c,p=N+" "+a;if(s){while(t=t[i])if((1===t.nodeType||o)&&e(t,n,s))return!0}else while(t=t[i])if(1===t.nodeType||o)if(c=t[x]||(t[x]={}),(l=c[i])&&l[0]===p){if((u=l[1])===!0||u===r)return u===!0}else if(l=c[i]=[p],l[1]=e(t,n,s)||r,l[1]===!0)return!0}}function gt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function mt(e,t,n,r,i){var o,a=[],s=0,u=e.length,l=null!=t;for(;u>s;s++)(o=e[s])&&(!n||n(o,r,i))&&(a.push(o),l&&t.push(s));return a}function yt(e,t,n,r,i,o){return r&&!r[x]&&(r=yt(r)),i&&!i[x]&&(i=yt(i,o)),ot(function(o,a,s,u){var l,c,p,f=[],d=[],h=a.length,g=o||xt(t||"*",s.nodeType?[s]:s,[]),m=!e||!o&&t?g:mt(g,f,e,s,u),y=n?i||(o?e:h||r)?[]:a:m;if(n&&n(m,y,s,u),r){l=mt(y,d),r(l,[],s,u),c=l.length;while(c--)(p=l[c])&&(y[d[c]]=!(m[d[c]]=p))}if(o){if(i||e){if(i){l=[],c=y.length;while(c--)(p=y[c])&&l.push(m[c]=p);i(null,y=[],l,u)}c=y.length;while(c--)(p=y[c])&&(l=i?M.call(o,p):f[c])>-1&&(o[l]=!(a[l]=p))}}else y=mt(y===a?y.splice(h,y.length):y),i?i(null,a,y,u):H.apply(a,y)})}function vt(e){var t,n,r,o=e.length,a=i.relative[e[0].type],s=a||i.relative[" "],u=a?1:0,c=ht(function(e){return e===t},s,!0),p=ht(function(e){return M.call(t,e)>-1},s,!0),f=[function(e,n,r){return!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;o>u;u++)if(n=i.relative[e[u].type])f=[ht(gt(f),n)];else{if(n=i.filter[e[u].type].apply(null,e[u].matches),n[x]){for(r=++u;o>r;r++)if(i.relative[e[r].type])break;return yt(u>1&&gt(f),u>1&&dt(e.slice(0,u-1)).replace(W,"$1"),n,r>u&&vt(e.slice(u,r)),o>r&&vt(e=e.slice(r)),o>r&&dt(e))}f.push(n)}return gt(f)}function bt(e,t){var n=0,o=t.length>0,a=e.length>0,s=function(s,u,c,f,d){var h,g,m,y=[],v=0,b="0",x=s&&[],w=null!=d,T=l,C=s||a&&i.find.TAG("*",d&&u.parentNode||u),k=N+=null==T?1:Math.random()||.1;for(w&&(l=u!==p&&u,r=n);null!=(h=C[b]);b++){if(a&&h){g=0;while(m=e[g++])if(m(h,u,c)){f.push(h);break}w&&(N=k,r=++n)}o&&((h=!m&&h)&&v--,s&&x.push(h))}if(v+=b,o&&b!==v){g=0;while(m=t[g++])m(x,y,u,c);if(s){if(v>0)while(b--)x[b]||y[b]||(y[b]=L.call(f));y=mt(y)}H.apply(f,y),w&&!s&&y.length>0&&v+t.length>1&&st.uniqueSort(f)}return w&&(N=k,l=T),x};return o?ot(s):s}s=st.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=ft(e)),n=t.length;while(n--)o=vt(t[n]),o[x]?r.push(o):i.push(o);o=S(e,bt(i,r))}return o};function xt(e,t,n){var r=0,i=t.length;for(;i>r;r++)st(e,t[r],n);return n}function wt(e,t,n,r){var o,a,u,l,c,p=ft(e);if(!r&&1===p.length){if(a=p[0]=p[0].slice(0),a.length>2&&"ID"===(u=a[0]).type&&9===t.nodeType&&!d&&i.relative[a[1].type]){if(t=i.find.ID(u.matches[0].replace(et,tt),t)[0],!t)return n;e=e.slice(a.shift().value.length)}o=U.needsContext.test(e)?0:a.length;while(o--){if(u=a[o],i.relative[l=u.type])break;if((c=i.find[l])&&(r=c(u.matches[0].replace(et,tt),V.test(a[0].type)&&t.parentNode||t))){if(a.splice(o,1),e=r.length&&dt(a),!e)return H.apply(n,q.call(r,0)),n;break}}}return s(e,p)(r,t,d,n,V.test(e)),n}i.pseudos.nth=i.pseudos.eq;function Tt(){}i.filters=Tt.prototype=i.pseudos,i.setFilters=new Tt,c(),st.attr=b.attr,b.find=st,b.expr=st.selectors,b.expr[":"]=b.expr.pseudos,b.unique=st.uniqueSort,b.text=st.getText,b.isXMLDoc=st.isXML,b.contains=st.contains}(e);var at=/Until$/,st=/^(?:parents|prev(?:Until|All))/,ut=/^.[^:#\[\.,]*$/,lt=b.expr.match.needsContext,ct={children:!0,contents:!0,next:!0,prev:!0};b.fn.extend({find:function(e){var t,n,r,i=this.length;if("string"!=typeof e)return r=this,this.pushStack(b(e).filter(function(){for(t=0;i>t;t++)if(b.contains(r[t],this))return!0}));for(n=[],t=0;i>t;t++)b.find(e,this[t],n);return n=this.pushStack(i>1?b.unique(n):n),n.selector=(this.selector?this.selector+" ":"")+e,n},has:function(e){var t,n=b(e,this),r=n.length;return this.filter(function(){for(t=0;r>t;t++)if(b.contains(this,n[t]))return!0})},not:function(e){return this.pushStack(ft(this,e,!1))},filter:function(e){return this.pushStack(ft(this,e,!0))},is:function(e){return!!e&&("string"==typeof e?lt.test(e)?b(e,this.context).index(this[0])>=0:b.filter(e,this).length>0:this.filter(e).length>0)},closest:function(e,t){var n,r=0,i=this.length,o=[],a=lt.test(e)||"string"!=typeof e?b(e,t||this.context):0;for(;i>r;r++){n=this[r];while(n&&n.ownerDocument&&n!==t&&11!==n.nodeType){if(a?a.index(n)>-1:b.find.matchesSelector(n,e)){o.push(n);break}n=n.parentNode}}return this.pushStack(o.length>1?b.unique(o):o)},index:function(e){return e?"string"==typeof e?b.inArray(this[0],b(e)):b.inArray(e.jquery?e[0]:e,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?b(e,t):b.makeArray(e&&e.nodeType?[e]:e),r=b.merge(this.get(),n);return this.pushStack(b.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),b.fn.andSelf=b.fn.addBack;function pt(e,t){do e=e[t];while(e&&1!==e.nodeType);return e}b.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return b.dir(e,"parentNode")},parentsUntil:function(e,t,n){return b.dir(e,"parentNode",n)},next:function(e){return pt(e,"nextSibling")},prev:function(e){return pt(e,"previousSibling")},nextAll:function(e){return b.dir(e,"nextSibling")},prevAll:function(e){return b.dir(e,"previousSibling")},nextUntil:function(e,t,n){return b.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return b.dir(e,"previousSibling",n)},siblings:function(e){return b.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return b.sibling(e.firstChild)},contents:function(e){return b.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:b.merge([],e.childNodes)}},function(e,t){b.fn[e]=function(n,r){var i=b.map(this,t,n);return at.test(e)||(r=n),r&&"string"==typeof r&&(i=b.filter(r,i)),i=this.length>1&&!ct[e]?b.unique(i):i,this.length>1&&st.test(e)&&(i=i.reverse()),this.pushStack(i)}}),b.extend({filter:function(e,t,n){return n&&(e=":not("+e+")"),1===t.length?b.find.matchesSelector(t[0],e)?[t[0]]:[]:b.find.matches(e,t)},dir:function(e,n,r){var i=[],o=e[n];while(o&&9!==o.nodeType&&(r===t||1!==o.nodeType||!b(o).is(r)))1===o.nodeType&&i.push(o),o=o[n];return i},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function ft(e,t,n){if(t=t||0,b.isFunction(t))return b.grep(e,function(e,r){var i=!!t.call(e,r,e);return i===n});if(t.nodeType)return b.grep(e,function(e){return e===t===n});if("string"==typeof t){var r=b.grep(e,function(e){return 1===e.nodeType});if(ut.test(t))return b.filter(t,r,!n);t=b.filter(t,r)}return b.grep(e,function(e){return b.inArray(e,t)>=0===n})}function dt(e){var t=ht.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return n}var ht="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",gt=/ jQuery\d+="(?:null|\d+)"/g,mt=RegExp("<(?:"+ht+")[\\s/>]","i"),yt=/^\s+/,vt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bt=/<([\w:]+)/,xt=/<tbody/i,wt=/<|&#?\w+;/,Tt=/<(?:script|style|link)/i,Nt=/^(?:checkbox|radio)$/i,Ct=/checked\s*(?:[^=]|=\s*.checked.)/i,kt=/^$|\/(?:java|ecma)script/i,Et=/^true\/(.*)/,St=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,At={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:b.support.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},jt=dt(o),Dt=jt.appendChild(o.createElement("div"));At.optgroup=At.option,At.tbody=At.tfoot=At.colgroup=At.caption=At.thead,At.th=At.td,b.fn.extend({text:function(e){return b.access(this,function(e){return e===t?b.text(this):this.empty().append((this[0]&&this[0].ownerDocument||o).createTextNode(e))},null,e,arguments.length)},wrapAll:function(e){if(b.isFunction(e))return this.each(function(t){b(this).wrapAll(e.call(this,t))});if(this[0]){var t=b(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.firstChild.nodeType)e=e.firstChild;return e}).append(this)}return this},wrapInner:function(e){return b.isFunction(e)?this.each(function(t){b(this).wrapInner(e.call(this,t))}):this.each(function(){var t=b(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=b.isFunction(e);return this.each(function(n){b(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){b.nodeName(this,"body")||b(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.appendChild(e)})},prepend:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.insertBefore(e,this.firstChild)})},before:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},remove:function(e,t){var n,r=0;for(;null!=(n=this[r]);r++)(!e||b.filter(e,[n]).length>0)&&(t||1!==n.nodeType||b.cleanData(Ot(n)),n.parentNode&&(t&&b.contains(n.ownerDocument,n)&&Mt(Ot(n,"script")),n.parentNode.removeChild(n)));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++){1===e.nodeType&&b.cleanData(Ot(e,!1));while(e.firstChild)e.removeChild(e.firstChild);e.options&&b.nodeName(e,"select")&&(e.options.length=0)}return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return b.clone(this,e,t)})},html:function(e){return b.access(this,function(e){var n=this[0]||{},r=0,i=this.length;if(e===t)return 1===n.nodeType?n.innerHTML.replace(gt,""):t;if(!("string"!=typeof e||Tt.test(e)||!b.support.htmlSerialize&&mt.test(e)||!b.support.leadingWhitespace&&yt.test(e)||At[(bt.exec(e)||["",""])[1].toLowerCase()])){e=e.replace(vt,"<$1></$2>");try{for(;i>r;r++)n=this[r]||{},1===n.nodeType&&(b.cleanData(Ot(n,!1)),n.innerHTML=e);n=0}catch(o){}}n&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(e){var t=b.isFunction(e);return t||"string"==typeof e||(e=b(e).not(this).detach()),this.domManip([e],!0,function(e){var t=this.nextSibling,n=this.parentNode;n&&(b(this).remove(),n.insertBefore(e,t))})},detach:function(e){return this.remove(e,!0)},domManip:function(e,n,r){e=f.apply([],e);var i,o,a,s,u,l,c=0,p=this.length,d=this,h=p-1,g=e[0],m=b.isFunction(g);if(m||!(1>=p||"string"!=typeof g||b.support.checkClone)&&Ct.test(g))return this.each(function(i){var o=d.eq(i);m&&(e[0]=g.call(this,i,n?o.html():t)),o.domManip(e,n,r)});if(p&&(l=b.buildFragment(e,this[0].ownerDocument,!1,this),i=l.firstChild,1===l.childNodes.length&&(l=i),i)){for(n=n&&b.nodeName(i,"tr"),s=b.map(Ot(l,"script"),Ht),a=s.length;p>c;c++)o=l,c!==h&&(o=b.clone(o,!0,!0),a&&b.merge(s,Ot(o,"script"))),r.call(n&&b.nodeName(this[c],"table")?Lt(this[c],"tbody"):this[c],o,c);if(a)for(u=s[s.length-1].ownerDocument,b.map(s,qt),c=0;a>c;c++)o=s[c],kt.test(o.type||"")&&!b._data(o,"globalEval")&&b.contains(u,o)&&(o.src?b.ajax({url:o.src,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0}):b.globalEval((o.text||o.textContent||o.innerHTML||"").replace(St,"")));l=i=null}return this}});function Lt(e,t){return e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function Ht(e){var t=e.getAttributeNode("type");return e.type=(t&&t.specified)+"/"+e.type,e}function qt(e){var t=Et.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function Mt(e,t){var n,r=0;for(;null!=(n=e[r]);r++)b._data(n,"globalEval",!t||b._data(t[r],"globalEval"))}function _t(e,t){if(1===t.nodeType&&b.hasData(e)){var n,r,i,o=b._data(e),a=b._data(t,o),s=o.events;if(s){delete a.handle,a.events={};for(n in s)for(r=0,i=s[n].length;i>r;r++)b.event.add(t,n,s[n][r])}a.data&&(a.data=b.extend({},a.data))}}function Ft(e,t){var n,r,i;if(1===t.nodeType){if(n=t.nodeName.toLowerCase(),!b.support.noCloneEvent&&t[b.expando]){i=b._data(t);for(r in i.events)b.removeEvent(t,r,i.handle);t.removeAttribute(b.expando)}"script"===n&&t.text!==e.text?(Ht(t).text=e.text,qt(t)):"object"===n?(t.parentNode&&(t.outerHTML=e.outerHTML),b.support.html5Clone&&e.innerHTML&&!b.trim(t.innerHTML)&&(t.innerHTML=e.innerHTML)):"input"===n&&Nt.test(e.type)?(t.defaultChecked=t.checked=e.checked,t.value!==e.value&&(t.value=e.value)):"option"===n?t.defaultSelected=t.selected=e.defaultSelected:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}}b.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){b.fn[e]=function(e){var n,r=0,i=[],o=b(e),a=o.length-1;for(;a>=r;r++)n=r===a?this:this.clone(!0),b(o[r])[t](n),d.apply(i,n.get());return this.pushStack(i)}});function Ot(e,n){var r,o,a=0,s=typeof e.getElementsByTagName!==i?e.getElementsByTagName(n||"*"):typeof e.querySelectorAll!==i?e.querySelectorAll(n||"*"):t;if(!s)for(s=[],r=e.childNodes||e;null!=(o=r[a]);a++)!n||b.nodeName(o,n)?s.push(o):b.merge(s,Ot(o,n));return n===t||n&&b.nodeName(e,n)?b.merge([e],s):s}function Bt(e){Nt.test(e.type)&&(e.defaultChecked=e.checked)}b.extend({clone:function(e,t,n){var r,i,o,a,s,u=b.contains(e.ownerDocument,e);if(b.support.html5Clone||b.isXMLDoc(e)||!mt.test("<"+e.nodeName+">")?o=e.cloneNode(!0):(Dt.innerHTML=e.outerHTML,Dt.removeChild(o=Dt.firstChild)),!(b.support.noCloneEvent&&b.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||b.isXMLDoc(e)))for(r=Ot(o),s=Ot(e),a=0;null!=(i=s[a]);++a)r[a]&&Ft(i,r[a]);if(t)if(n)for(s=s||Ot(e),r=r||Ot(o),a=0;null!=(i=s[a]);a++)_t(i,r[a]);else _t(e,o);return r=Ot(o,"script"),r.length>0&&Mt(r,!u&&Ot(e,"script")),r=s=i=null,o},buildFragment:function(e,t,n,r){var i,o,a,s,u,l,c,p=e.length,f=dt(t),d=[],h=0;for(;p>h;h++)if(o=e[h],o||0===o)if("object"===b.type(o))b.merge(d,o.nodeType?[o]:o);else if(wt.test(o)){s=s||f.appendChild(t.createElement("div")),u=(bt.exec(o)||["",""])[1].toLowerCase(),c=At[u]||At._default,s.innerHTML=c[1]+o.replace(vt,"<$1></$2>")+c[2],i=c[0];while(i--)s=s.lastChild;if(!b.support.leadingWhitespace&&yt.test(o)&&d.push(t.createTextNode(yt.exec(o)[0])),!b.support.tbody){o="table"!==u||xt.test(o)?"<table>"!==c[1]||xt.test(o)?0:s:s.firstChild,i=o&&o.childNodes.length;while(i--)b.nodeName(l=o.childNodes[i],"tbody")&&!l.childNodes.length&&o.removeChild(l)
+}b.merge(d,s.childNodes),s.textContent="";while(s.firstChild)s.removeChild(s.firstChild);s=f.lastChild}else d.push(t.createTextNode(o));s&&f.removeChild(s),b.support.appendChecked||b.grep(Ot(d,"input"),Bt),h=0;while(o=d[h++])if((!r||-1===b.inArray(o,r))&&(a=b.contains(o.ownerDocument,o),s=Ot(f.appendChild(o),"script"),a&&Mt(s),n)){i=0;while(o=s[i++])kt.test(o.type||"")&&n.push(o)}return s=null,f},cleanData:function(e,t){var n,r,o,a,s=0,u=b.expando,l=b.cache,p=b.support.deleteExpando,f=b.event.special;for(;null!=(n=e[s]);s++)if((t||b.acceptData(n))&&(o=n[u],a=o&&l[o])){if(a.events)for(r in a.events)f[r]?b.event.remove(n,r):b.removeEvent(n,r,a.handle);l[o]&&(delete l[o],p?delete n[u]:typeof n.removeAttribute!==i?n.removeAttribute(u):n[u]=null,c.push(o))}}});var Pt,Rt,Wt,$t=/alpha\([^)]*\)/i,It=/opacity\s*=\s*([^)]*)/,zt=/^(top|right|bottom|left)$/,Xt=/^(none|table(?!-c[ea]).+)/,Ut=/^margin/,Vt=RegExp("^("+x+")(.*)$","i"),Yt=RegExp("^("+x+")(?!px)[a-z%]+$","i"),Jt=RegExp("^([+-])=("+x+")","i"),Gt={BODY:"block"},Qt={position:"absolute",visibility:"hidden",display:"block"},Kt={letterSpacing:0,fontWeight:400},Zt=["Top","Right","Bottom","Left"],en=["Webkit","O","Moz","ms"];function tn(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=en.length;while(i--)if(t=en[i]+n,t in e)return t;return r}function nn(e,t){return e=t||e,"none"===b.css(e,"display")||!b.contains(e.ownerDocument,e)}function rn(e,t){var n,r,i,o=[],a=0,s=e.length;for(;s>a;a++)r=e[a],r.style&&(o[a]=b._data(r,"olddisplay"),n=r.style.display,t?(o[a]||"none"!==n||(r.style.display=""),""===r.style.display&&nn(r)&&(o[a]=b._data(r,"olddisplay",un(r.nodeName)))):o[a]||(i=nn(r),(n&&"none"!==n||!i)&&b._data(r,"olddisplay",i?n:b.css(r,"display"))));for(a=0;s>a;a++)r=e[a],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[a]||"":"none"));return e}b.fn.extend({css:function(e,n){return b.access(this,function(e,n,r){var i,o,a={},s=0;if(b.isArray(n)){for(o=Rt(e),i=n.length;i>s;s++)a[n[s]]=b.css(e,n[s],!1,o);return a}return r!==t?b.style(e,n,r):b.css(e,n)},e,n,arguments.length>1)},show:function(){return rn(this,!0)},hide:function(){return rn(this)},toggle:function(e){var t="boolean"==typeof e;return this.each(function(){(t?e:nn(this))?b(this).show():b(this).hide()})}}),b.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Wt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":b.support.cssFloat?"cssFloat":"styleFloat"},style:function(e,n,r,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,a,s,u=b.camelCase(n),l=e.style;if(n=b.cssProps[u]||(b.cssProps[u]=tn(l,u)),s=b.cssHooks[n]||b.cssHooks[u],r===t)return s&&"get"in s&&(o=s.get(e,!1,i))!==t?o:l[n];if(a=typeof r,"string"===a&&(o=Jt.exec(r))&&(r=(o[1]+1)*o[2]+parseFloat(b.css(e,n)),a="number"),!(null==r||"number"===a&&isNaN(r)||("number"!==a||b.cssNumber[u]||(r+="px"),b.support.clearCloneStyle||""!==r||0!==n.indexOf("background")||(l[n]="inherit"),s&&"set"in s&&(r=s.set(e,r,i))===t)))try{l[n]=r}catch(c){}}},css:function(e,n,r,i){var o,a,s,u=b.camelCase(n);return n=b.cssProps[u]||(b.cssProps[u]=tn(e.style,u)),s=b.cssHooks[n]||b.cssHooks[u],s&&"get"in s&&(a=s.get(e,!0,r)),a===t&&(a=Wt(e,n,i)),"normal"===a&&n in Kt&&(a=Kt[n]),""===r||r?(o=parseFloat(a),r===!0||b.isNumeric(o)?o||0:a):a},swap:function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i}}),e.getComputedStyle?(Rt=function(t){return e.getComputedStyle(t,null)},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s.getPropertyValue(n)||s[n]:t,l=e.style;return s&&(""!==u||b.contains(e.ownerDocument,e)||(u=b.style(e,n)),Yt.test(u)&&Ut.test(n)&&(i=l.width,o=l.minWidth,a=l.maxWidth,l.minWidth=l.maxWidth=l.width=u,u=s.width,l.width=i,l.minWidth=o,l.maxWidth=a)),u}):o.documentElement.currentStyle&&(Rt=function(e){return e.currentStyle},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s[n]:t,l=e.style;return null==u&&l&&l[n]&&(u=l[n]),Yt.test(u)&&!zt.test(n)&&(i=l.left,o=e.runtimeStyle,a=o&&o.left,a&&(o.left=e.currentStyle.left),l.left="fontSize"===n?"1em":u,u=l.pixelLeft+"px",l.left=i,a&&(o.left=a)),""===u?"auto":u});function on(e,t,n){var r=Vt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function an(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,a=0;for(;4>o;o+=2)"margin"===n&&(a+=b.css(e,n+Zt[o],!0,i)),r?("content"===n&&(a-=b.css(e,"padding"+Zt[o],!0,i)),"margin"!==n&&(a-=b.css(e,"border"+Zt[o]+"Width",!0,i))):(a+=b.css(e,"padding"+Zt[o],!0,i),"padding"!==n&&(a+=b.css(e,"border"+Zt[o]+"Width",!0,i)));return a}function sn(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=Rt(e),a=b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=Wt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Yt.test(i))return i;r=a&&(b.support.boxSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+an(e,t,n||(a?"border":"content"),r,o)+"px"}function un(e){var t=o,n=Gt[e];return n||(n=ln(e,t),"none"!==n&&n||(Pt=(Pt||b("<iframe frameborder='0' width='0' height='0'/>").css("cssText","display:block !important")).appendTo(t.documentElement),t=(Pt[0].contentWindow||Pt[0].contentDocument).document,t.write("<!doctype html><html><body>"),t.close(),n=ln(e,t),Pt.detach()),Gt[e]=n),n}function ln(e,t){var n=b(t.createElement(e)).appendTo(t.body),r=b.css(n[0],"display");return n.remove(),r}b.each(["height","width"],function(e,n){b.cssHooks[n]={get:function(e,r,i){return r?0===e.offsetWidth&&Xt.test(b.css(e,"display"))?b.swap(e,Qt,function(){return sn(e,n,i)}):sn(e,n,i):t},set:function(e,t,r){var i=r&&Rt(e);return on(e,t,r?an(e,n,r,b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,i),i):0)}}}),b.support.opacity||(b.cssHooks.opacity={get:function(e,t){return It.test((t&&e.currentStyle?e.currentStyle.filter:e.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":t?"1":""},set:function(e,t){var n=e.style,r=e.currentStyle,i=b.isNumeric(t)?"alpha(opacity="+100*t+")":"",o=r&&r.filter||n.filter||"";n.zoom=1,(t>=1||""===t)&&""===b.trim(o.replace($t,""))&&n.removeAttribute&&(n.removeAttribute("filter"),""===t||r&&!r.filter)||(n.filter=$t.test(o)?o.replace($t,i):o+" "+i)}}),b(function(){b.support.reliableMarginRight||(b.cssHooks.marginRight={get:function(e,n){return n?b.swap(e,{display:"inline-block"},Wt,[e,"marginRight"]):t}}),!b.support.pixelPosition&&b.fn.position&&b.each(["top","left"],function(e,n){b.cssHooks[n]={get:function(e,r){return r?(r=Wt(e,n),Yt.test(r)?b(e).position()[n]+"px":r):t}}})}),b.expr&&b.expr.filters&&(b.expr.filters.hidden=function(e){return 0>=e.offsetWidth&&0>=e.offsetHeight||!b.support.reliableHiddenOffsets&&"none"===(e.style&&e.style.display||b.css(e,"display"))},b.expr.filters.visible=function(e){return!b.expr.filters.hidden(e)}),b.each({margin:"",padding:"",border:"Width"},function(e,t){b.cssHooks[e+t]={expand:function(n){var r=0,i={},o="string"==typeof n?n.split(" "):[n];for(;4>r;r++)i[e+Zt[r]+t]=o[r]||o[r-2]||o[0];return i}},Ut.test(e)||(b.cssHooks[e+t].set=on)});var cn=/%20/g,pn=/\[\]$/,fn=/\r?\n/g,dn=/^(?:submit|button|image|reset|file)$/i,hn=/^(?:input|select|textarea|keygen)/i;b.fn.extend({serialize:function(){return b.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=b.prop(this,"elements");return e?b.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!b(this).is(":disabled")&&hn.test(this.nodeName)&&!dn.test(e)&&(this.checked||!Nt.test(e))}).map(function(e,t){var n=b(this).val();return null==n?null:b.isArray(n)?b.map(n,function(e){return{name:t.name,value:e.replace(fn,"\r\n")}}):{name:t.name,value:n.replace(fn,"\r\n")}}).get()}}),b.param=function(e,n){var r,i=[],o=function(e,t){t=b.isFunction(t)?t():null==t?"":t,i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(t)};if(n===t&&(n=b.ajaxSettings&&b.ajaxSettings.traditional),b.isArray(e)||e.jquery&&!b.isPlainObject(e))b.each(e,function(){o(this.name,this.value)});else for(r in e)gn(r,e[r],n,o);return i.join("&").replace(cn,"+")};function gn(e,t,n,r){var i;if(b.isArray(t))b.each(t,function(t,i){n||pn.test(e)?r(e,i):gn(e+"["+("object"==typeof i?t:"")+"]",i,n,r)});else if(n||"object"!==b.type(t))r(e,t);else for(i in t)gn(e+"["+i+"]",t[i],n,r)}b.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(e,t){b.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),b.fn.hover=function(e,t){return this.mouseenter(e).mouseleave(t||e)};var mn,yn,vn=b.now(),bn=/\?/,xn=/#.*$/,wn=/([?&])_=[^&]*/,Tn=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Nn=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Cn=/^(?:GET|HEAD)$/,kn=/^\/\//,En=/^([\w.+-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/,Sn=b.fn.load,An={},jn={},Dn="*/".concat("*");try{yn=a.href}catch(Ln){yn=o.createElement("a"),yn.href="",yn=yn.href}mn=En.exec(yn.toLowerCase())||[];function Hn(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(w)||[];if(b.isFunction(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function qn(e,n,r,i){var o={},a=e===jn;function s(u){var l;return o[u]=!0,b.each(e[u]||[],function(e,u){var c=u(n,r,i);return"string"!=typeof c||a||o[c]?a?!(l=c):t:(n.dataTypes.unshift(c),s(c),!1)}),l}return s(n.dataTypes[0])||!o["*"]&&s("*")}function Mn(e,n){var r,i,o=b.ajaxSettings.flatOptions||{};for(i in n)n[i]!==t&&((o[i]?e:r||(r={}))[i]=n[i]);return r&&b.extend(!0,e,r),e}b.fn.load=function(e,n,r){if("string"!=typeof e&&Sn)return Sn.apply(this,arguments);var i,o,a,s=this,u=e.indexOf(" ");return u>=0&&(i=e.slice(u,e.length),e=e.slice(0,u)),b.isFunction(n)?(r=n,n=t):n&&"object"==typeof n&&(a="POST"),s.length>0&&b.ajax({url:e,type:a,dataType:"html",data:n}).done(function(e){o=arguments,s.html(i?b("<div>").append(b.parseHTML(e)).find(i):e)}).complete(r&&function(e,t){s.each(r,o||[e.responseText,t,e])}),this},b.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){b.fn[t]=function(e){return this.on(t,e)}}),b.each(["get","post"],function(e,n){b[n]=function(e,r,i,o){return b.isFunction(r)&&(o=o||i,i=r,r=t),b.ajax({url:e,type:n,dataType:o,data:r,success:i})}}),b.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:yn,type:"GET",isLocal:Nn.test(mn[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Dn,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":e.String,"text html":!0,"text json":b.parseJSON,"text xml":b.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Mn(Mn(e,b.ajaxSettings),t):Mn(b.ajaxSettings,e)},ajaxPrefilter:Hn(An),ajaxTransport:Hn(jn),ajax:function(e,n){"object"==typeof e&&(n=e,e=t),n=n||{};var r,i,o,a,s,u,l,c,p=b.ajaxSetup({},n),f=p.context||p,d=p.context&&(f.nodeType||f.jquery)?b(f):b.event,h=b.Deferred(),g=b.Callbacks("once memory"),m=p.statusCode||{},y={},v={},x=0,T="canceled",N={readyState:0,getResponseHeader:function(e){var t;if(2===x){if(!c){c={};while(t=Tn.exec(a))c[t[1].toLowerCase()]=t[2]}t=c[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return 2===x?a:null},setRequestHeader:function(e,t){var n=e.toLowerCase();return x||(e=v[n]=v[n]||e,y[e]=t),this},overrideMimeType:function(e){return x||(p.mimeType=e),this},statusCode:function(e){var t;if(e)if(2>x)for(t in e)m[t]=[m[t],e[t]];else N.always(e[N.status]);return this},abort:function(e){var t=e||T;return l&&l.abort(t),k(0,t),this}};if(h.promise(N).complete=g.add,N.success=N.done,N.error=N.fail,p.url=((e||p.url||yn)+"").replace(xn,"").replace(kn,mn[1]+"//"),p.type=n.method||n.type||p.method||p.type,p.dataTypes=b.trim(p.dataType||"*").toLowerCase().match(w)||[""],null==p.crossDomain&&(r=En.exec(p.url.toLowerCase()),p.crossDomain=!(!r||r[1]===mn[1]&&r[2]===mn[2]&&(r[3]||("http:"===r[1]?80:443))==(mn[3]||("http:"===mn[1]?80:443)))),p.data&&p.processData&&"string"!=typeof p.data&&(p.data=b.param(p.data,p.traditional)),qn(An,p,n,N),2===x)return N;u=p.global,u&&0===b.active++&&b.event.trigger("ajaxStart"),p.type=p.type.toUpperCase(),p.hasContent=!Cn.test(p.type),o=p.url,p.hasContent||(p.data&&(o=p.url+=(bn.test(o)?"&":"?")+p.data,delete p.data),p.cache===!1&&(p.url=wn.test(o)?o.replace(wn,"$1_="+vn++):o+(bn.test(o)?"&":"?")+"_="+vn++)),p.ifModified&&(b.lastModified[o]&&N.setRequestHeader("If-Modified-Since",b.lastModified[o]),b.etag[o]&&N.setRequestHeader("If-None-Match",b.etag[o])),(p.data&&p.hasContent&&p.contentType!==!1||n.contentType)&&N.setRequestHeader("Content-Type",p.contentType),N.setRequestHeader("Accept",p.dataTypes[0]&&p.accepts[p.dataTypes[0]]?p.accepts[p.dataTypes[0]]+("*"!==p.dataTypes[0]?", "+Dn+"; q=0.01":""):p.accepts["*"]);for(i in p.headers)N.setRequestHeader(i,p.headers[i]);if(p.beforeSend&&(p.beforeSend.call(f,N,p)===!1||2===x))return N.abort();T="abort";for(i in{success:1,error:1,complete:1})N[i](p[i]);if(l=qn(jn,p,n,N)){N.readyState=1,u&&d.trigger("ajaxSend",[N,p]),p.async&&p.timeout>0&&(s=setTimeout(function(){N.abort("timeout")},p.timeout));try{x=1,l.send(y,k)}catch(C){if(!(2>x))throw C;k(-1,C)}}else k(-1,"No Transport");function k(e,n,r,i){var c,y,v,w,T,C=n;2!==x&&(x=2,s&&clearTimeout(s),l=t,a=i||"",N.readyState=e>0?4:0,r&&(w=_n(p,N,r)),e>=200&&300>e||304===e?(p.ifModified&&(T=N.getResponseHeader("Last-Modified"),T&&(b.lastModified[o]=T),T=N.getResponseHeader("etag"),T&&(b.etag[o]=T)),204===e?(c=!0,C="nocontent"):304===e?(c=!0,C="notmodified"):(c=Fn(p,w),C=c.state,y=c.data,v=c.error,c=!v)):(v=C,(e||!C)&&(C="error",0>e&&(e=0))),N.status=e,N.statusText=(n||C)+"",c?h.resolveWith(f,[y,C,N]):h.rejectWith(f,[N,C,v]),N.statusCode(m),m=t,u&&d.trigger(c?"ajaxSuccess":"ajaxError",[N,p,c?y:v]),g.fireWith(f,[N,C]),u&&(d.trigger("ajaxComplete",[N,p]),--b.active||b.event.trigger("ajaxStop")))}return N},getScript:function(e,n){return b.get(e,t,n,"script")},getJSON:function(e,t,n){return b.get(e,t,n,"json")}});function _n(e,n,r){var i,o,a,s,u=e.contents,l=e.dataTypes,c=e.responseFields;for(s in c)s in r&&(n[c[s]]=r[s]);while("*"===l[0])l.shift(),o===t&&(o=e.mimeType||n.getResponseHeader("Content-Type"));if(o)for(s in u)if(u[s]&&u[s].test(o)){l.unshift(s);break}if(l[0]in r)a=l[0];else{for(s in r){if(!l[0]||e.converters[s+" "+l[0]]){a=s;break}i||(i=s)}a=a||i}return a?(a!==l[0]&&l.unshift(a),r[a]):t}function Fn(e,t){var n,r,i,o,a={},s=0,u=e.dataTypes.slice(),l=u[0];if(e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u[1])for(i in e.converters)a[i.toLowerCase()]=e.converters[i];for(;r=u[++s];)if("*"!==r){if("*"!==l&&l!==r){if(i=a[l+" "+r]||a["* "+r],!i)for(n in a)if(o=n.split(" "),o[1]===r&&(i=a[l+" "+o[0]]||a["* "+o[0]])){i===!0?i=a[n]:a[n]!==!0&&(r=o[0],u.splice(s--,0,r));break}if(i!==!0)if(i&&e["throws"])t=i(t);else try{t=i(t)}catch(c){return{state:"parsererror",error:i?c:"No conversion from "+l+" to "+r}}}l=r}return{state:"success",data:t}}b.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(e){return b.globalEval(e),e}}}),b.ajaxPrefilter("script",function(e){e.cache===t&&(e.cache=!1),e.crossDomain&&(e.type="GET",e.global=!1)}),b.ajaxTransport("script",function(e){if(e.crossDomain){var n,r=o.head||b("head")[0]||o.documentElement;return{send:function(t,i){n=o.createElement("script"),n.async=!0,e.scriptCharset&&(n.charset=e.scriptCharset),n.src=e.url,n.onload=n.onreadystatechange=function(e,t){(t||!n.readyState||/loaded|complete/.test(n.readyState))&&(n.onload=n.onreadystatechange=null,n.parentNode&&n.parentNode.removeChild(n),n=null,t||i(200,"success"))},r.insertBefore(n,r.firstChild)},abort:function(){n&&n.onload(t,!0)}}}});var On=[],Bn=/(=)\?(?=&|$)|\?\?/;b.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=On.pop()||b.expando+"_"+vn++;return this[e]=!0,e}}),b.ajaxPrefilter("json jsonp",function(n,r,i){var o,a,s,u=n.jsonp!==!1&&(Bn.test(n.url)?"url":"string"==typeof n.data&&!(n.contentType||"").indexOf("application/x-www-form-urlencoded")&&Bn.test(n.data)&&"data");return u||"jsonp"===n.dataTypes[0]?(o=n.jsonpCallback=b.isFunction(n.jsonpCallback)?n.jsonpCallback():n.jsonpCallback,u?n[u]=n[u].replace(Bn,"$1"+o):n.jsonp!==!1&&(n.url+=(bn.test(n.url)?"&":"?")+n.jsonp+"="+o),n.converters["script json"]=function(){return s||b.error(o+" was not called"),s[0]},n.dataTypes[0]="json",a=e[o],e[o]=function(){s=arguments},i.always(function(){e[o]=a,n[o]&&(n.jsonpCallback=r.jsonpCallback,On.push(o)),s&&b.isFunction(a)&&a(s[0]),s=a=t}),"script"):t});var Pn,Rn,Wn=0,$n=e.ActiveXObject&&function(){var e;for(e in Pn)Pn[e](t,!0)};function In(){try{return new e.XMLHttpRequest}catch(t){}}function zn(){try{return new e.ActiveXObject("Microsoft.XMLHTTP")}catch(t){}}b.ajaxSettings.xhr=e.ActiveXObject?function(){return!this.isLocal&&In()||zn()}:In,Rn=b.ajaxSettings.xhr(),b.support.cors=!!Rn&&"withCredentials"in Rn,Rn=b.support.ajax=!!Rn,Rn&&b.ajaxTransport(function(n){if(!n.crossDomain||b.support.cors){var r;return{send:function(i,o){var a,s,u=n.xhr();if(n.username?u.open(n.type,n.url,n.async,n.username,n.password):u.open(n.type,n.url,n.async),n.xhrFields)for(s in n.xhrFields)u[s]=n.xhrFields[s];n.mimeType&&u.overrideMimeType&&u.overrideMimeType(n.mimeType),n.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");try{for(s in i)u.setRequestHeader(s,i[s])}catch(l){}u.send(n.hasContent&&n.data||null),r=function(e,i){var s,l,c,p;try{if(r&&(i||4===u.readyState))if(r=t,a&&(u.onreadystatechange=b.noop,$n&&delete Pn[a]),i)4!==u.readyState&&u.abort();else{p={},s=u.status,l=u.getAllResponseHeaders(),"string"==typeof u.responseText&&(p.text=u.responseText);try{c=u.statusText}catch(f){c=""}s||!n.isLocal||n.crossDomain?1223===s&&(s=204):s=p.text?200:404}}catch(d){i||o(-1,d)}p&&o(s,c,p,l)},n.async?4===u.readyState?setTimeout(r):(a=++Wn,$n&&(Pn||(Pn={},b(e).unload($n)),Pn[a]=r),u.onreadystatechange=r):r()},abort:function(){r&&r(t,!0)}}}});var Xn,Un,Vn=/^(?:toggle|show|hide)$/,Yn=RegExp("^(?:([+-])=|)("+x+")([a-z%]*)$","i"),Jn=/queueHooks$/,Gn=[nr],Qn={"*":[function(e,t){var n,r,i=this.createTween(e,t),o=Yn.exec(t),a=i.cur(),s=+a||0,u=1,l=20;if(o){if(n=+o[2],r=o[3]||(b.cssNumber[e]?"":"px"),"px"!==r&&s){s=b.css(i.elem,e,!0)||n||1;do u=u||".5",s/=u,b.style(i.elem,e,s+r);while(u!==(u=i.cur()/a)&&1!==u&&--l)}i.unit=r,i.start=s,i.end=o[1]?s+(o[1]+1)*n:n}return i}]};function Kn(){return setTimeout(function(){Xn=t}),Xn=b.now()}function Zn(e,t){b.each(t,function(t,n){var r=(Qn[t]||[]).concat(Qn["*"]),i=0,o=r.length;for(;o>i;i++)if(r[i].call(e,t,n))return})}function er(e,t,n){var r,i,o=0,a=Gn.length,s=b.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;var t=Xn||Kn(),n=Math.max(0,l.startTime+l.duration-t),r=n/l.duration||0,o=1-r,a=0,u=l.tweens.length;for(;u>a;a++)l.tweens[a].run(o);return s.notifyWith(e,[l,o,n]),1>o&&u?n:(s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:b.extend({},t),opts:b.extend(!0,{specialEasing:{}},n),originalProperties:t,originalOptions:n,startTime:Xn||Kn(),duration:n.duration,tweens:[],createTween:function(t,n){var r=b.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;r>n;n++)l.tweens[n].run(1);return t?s.resolveWith(e,[l,t]):s.rejectWith(e,[l,t]),this}}),c=l.props;for(tr(c,l.opts.specialEasing);a>o;o++)if(r=Gn[o].call(l,e,c,l.opts))return r;return Zn(l,c),b.isFunction(l.opts.start)&&l.opts.start.call(e,l),b.fx.timer(b.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always)}function tr(e,t){var n,r,i,o,a;for(i in e)if(r=b.camelCase(i),o=t[r],n=e[i],b.isArray(n)&&(o=n[1],n=e[i]=n[0]),i!==r&&(e[r]=n,delete e[i]),a=b.cssHooks[r],a&&"expand"in a){n=a.expand(n),delete e[r];for(i in n)i in e||(e[i]=n[i],t[i]=o)}else t[r]=o}b.Animation=b.extend(er,{tweener:function(e,t){b.isFunction(e)?(t=e,e=["*"]):e=e.split(" ");var n,r=0,i=e.length;for(;i>r;r++)n=e[r],Qn[n]=Qn[n]||[],Qn[n].unshift(t)},prefilter:function(e,t){t?Gn.unshift(e):Gn.push(e)}});function nr(e,t,n){var r,i,o,a,s,u,l,c,p,f=this,d=e.style,h={},g=[],m=e.nodeType&&nn(e);n.queue||(c=b._queueHooks(e,"fx"),null==c.unqueued&&(c.unqueued=0,p=c.empty.fire,c.empty.fire=function(){c.unqueued||p()}),c.unqueued++,f.always(function(){f.always(function(){c.unqueued--,b.queue(e,"fx").length||c.empty.fire()})})),1===e.nodeType&&("height"in t||"width"in t)&&(n.overflow=[d.overflow,d.overflowX,d.overflowY],"inline"===b.css(e,"display")&&"none"===b.css(e,"float")&&(b.support.inlineBlockNeedsLayout&&"inline"!==un(e.nodeName)?d.zoom=1:d.display="inline-block")),n.overflow&&(d.overflow="hidden",b.support.shrinkWrapBlocks||f.always(function(){d.overflow=n.overflow[0],d.overflowX=n.overflow[1],d.overflowY=n.overflow[2]}));for(i in t)if(a=t[i],Vn.exec(a)){if(delete t[i],u=u||"toggle"===a,a===(m?"hide":"show"))continue;g.push(i)}if(o=g.length){s=b._data(e,"fxshow")||b._data(e,"fxshow",{}),"hidden"in s&&(m=s.hidden),u&&(s.hidden=!m),m?b(e).show():f.done(function(){b(e).hide()}),f.done(function(){var t;b._removeData(e,"fxshow");for(t in h)b.style(e,t,h[t])});for(i=0;o>i;i++)r=g[i],l=f.createTween(r,m?s[r]:0),h[r]=s[r]||b.style(e,r),r in s||(s[r]=l.start,m&&(l.end=l.start,l.start="width"===r||"height"===r?1:0))}}function rr(e,t,n,r,i){return new rr.prototype.init(e,t,n,r,i)}b.Tween=rr,rr.prototype={constructor:rr,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||"swing",this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(b.cssNumber[n]?"":"px")},cur:function(){var e=rr.propHooks[this.prop];return e&&e.get?e.get(this):rr.propHooks._default.get(this)},run:function(e){var t,n=rr.propHooks[this.prop];return this.pos=t=this.options.duration?b.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):rr.propHooks._default.set(this),this}},rr.prototype.init.prototype=rr.prototype,rr.propHooks={_default:{get:function(e){var t;return null==e.elem[e.prop]||e.elem.style&&null!=e.elem.style[e.prop]?(t=b.css(e.elem,e.prop,""),t&&"auto"!==t?t:0):e.elem[e.prop]},set:function(e){b.fx.step[e.prop]?b.fx.step[e.prop](e):e.elem.style&&(null!=e.elem.style[b.cssProps[e.prop]]||b.cssHooks[e.prop])?b.style(e.elem,e.prop,e.now+e.unit):e.elem[e.prop]=e.now}}},rr.propHooks.scrollTop=rr.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},b.each(["toggle","show","hide"],function(e,t){var n=b.fn[t];b.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ir(t,!0),e,r,i)}}),b.fn.extend({fadeTo:function(e,t,n,r){return this.filter(nn).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=b.isEmptyObject(e),o=b.speed(t,n,r),a=function(){var t=er(this,b.extend({},e),o);a.finish=function(){t.stop(!0)},(i||b._data(this,"finish"))&&t.stop(!0)};return a.finish=a,i||o.queue===!1?this.each(a):this.queue(o.queue,a)},stop:function(e,n,r){var i=function(e){var t=e.stop;delete e.stop,t(r)};return"string"!=typeof e&&(r=n,n=e,e=t),n&&e!==!1&&this.queue(e||"fx",[]),this.each(function(){var t=!0,n=null!=e&&e+"queueHooks",o=b.timers,a=b._data(this);if(n)a[n]&&a[n].stop&&i(a[n]);else for(n in a)a[n]&&a[n].stop&&Jn.test(n)&&i(a[n]);for(n=o.length;n--;)o[n].elem!==this||null!=e&&o[n].queue!==e||(o[n].anim.stop(r),t=!1,o.splice(n,1));(t||!r)&&b.dequeue(this,e)})},finish:function(e){return e!==!1&&(e=e||"fx"),this.each(function(){var t,n=b._data(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=b.timers,a=r?r.length:0;for(n.finish=!0,b.queue(this,e,[]),i&&i.cur&&i.cur.finish&&i.cur.finish.call(this),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;a>t;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}});function ir(e,t){var n,r={height:e},i=0;for(t=t?1:0;4>i;i+=2-t)n=Zt[i],r["margin"+n]=r["padding"+n]=e;return t&&(r.opacity=r.width=e),r}b.each({slideDown:ir("show"),slideUp:ir("hide"),slideToggle:ir("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){b.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),b.speed=function(e,t,n){var r=e&&"object"==typeof e?b.extend({},e):{complete:n||!n&&t||b.isFunction(e)&&e,duration:e,easing:n&&t||t&&!b.isFunction(t)&&t};return r.duration=b.fx.off?0:"number"==typeof r.duration?r.duration:r.duration in b.fx.speeds?b.fx.speeds[r.duration]:b.fx.speeds._default,(null==r.queue||r.queue===!0)&&(r.queue="fx"),r.old=r.complete,r.complete=function(){b.isFunction(r.old)&&r.old.call(this),r.queue&&b.dequeue(this,r.queue)},r},b.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2}},b.timers=[],b.fx=rr.prototype.init,b.fx.tick=function(){var e,n=b.timers,r=0;for(Xn=b.now();n.length>r;r++)e=n[r],e()||n[r]!==e||n.splice(r--,1);n.length||b.fx.stop(),Xn=t},b.fx.timer=function(e){e()&&b.timers.push(e)&&b.fx.start()},b.fx.interval=13,b.fx.start=function(){Un||(Un=setInterval(b.fx.tick,b.fx.interval))},b.fx.stop=function(){clearInterval(Un),Un=null},b.fx.speeds={slow:600,fast:200,_default:400},b.fx.step={},b.expr&&b.expr.filters&&(b.expr.filters.animated=function(e){return b.grep(b.timers,function(t){return e===t.elem}).length}),b.fn.offset=function(e){if(arguments.length)return e===t?this:this.each(function(t){b.offset.setOffset(this,e,t)});var n,r,o={top:0,left:0},a=this[0],s=a&&a.ownerDocument;if(s)return n=s.documentElement,b.contains(n,a)?(typeof a.getBoundingClientRect!==i&&(o=a.getBoundingClientRect()),r=or(s),{top:o.top+(r.pageYOffset||n.scrollTop)-(n.clientTop||0),left:o.left+(r.pageXOffset||n.scrollLeft)-(n.clientLeft||0)}):o},b.offset={setOffset:function(e,t,n){var r=b.css(e,"position");"static"===r&&(e.style.position="relative");var i=b(e),o=i.offset(),a=b.css(e,"top"),s=b.css(e,"left"),u=("absolute"===r||"fixed"===r)&&b.inArray("auto",[a,s])>-1,l={},c={},p,f;u?(c=i.position(),p=c.top,f=c.left):(p=parseFloat(a)||0,f=parseFloat(s)||0),b.isFunction(t)&&(t=t.call(e,n,o)),null!=t.top&&(l.top=t.top-o.top+p),null!=t.left&&(l.left=t.left-o.left+f),"using"in t?t.using.call(e,l):i.css(l)}},b.fn.extend({position:function(){if(this[0]){var e,t,n={top:0,left:0},r=this[0];return"fixed"===b.css(r,"position")?t=r.getBoundingClientRect():(e=this.offsetParent(),t=this.offset(),b.nodeName(e[0],"html")||(n=e.offset()),n.top+=b.css(e[0],"borderTopWidth",!0),n.left+=b.css(e[0],"borderLeftWidth",!0)),{top:t.top-n.top-b.css(r,"marginTop",!0),left:t.left-n.left-b.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent||o.documentElement;while(e&&!b.nodeName(e,"html")&&"static"===b.css(e,"position"))e=e.offsetParent;return e||o.documentElement})}}),b.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,n){var r=/Y/.test(n);b.fn[e]=function(i){return b.access(this,function(e,i,o){var a=or(e);return o===t?a?n in a?a[n]:a.document.documentElement[i]:e[i]:(a?a.scrollTo(r?b(a).scrollLeft():o,r?o:b(a).scrollTop()):e[i]=o,t)},e,i,arguments.length,null)}});function or(e){return b.isWindow(e)?e:9===e.nodeType?e.defaultView||e.parentWindow:!1}b.each({Height:"height",Width:"width"},function(e,n){b.each({padding:"inner"+e,content:n,"":"outer"+e},function(r,i){b.fn[i]=function(i,o){var a=arguments.length&&(r||"boolean"!=typeof i),s=r||(i===!0||o===!0?"margin":"border");return b.access(this,function(n,r,i){var o;return b.isWindow(n)?n.document.documentElement["client"+e]:9===n.nodeType?(o=n.documentElement,Math.max(n.body["scroll"+e],o["scroll"+e],n.body["offset"+e],o["offset"+e],o["client"+e])):i===t?b.css(n,r,s):b.style(n,r,i,s)},n,a?i:t,a,null)}})}),e.jQuery=e.$=b,"function"==typeof define&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return b})})(window);
\ No newline at end of file
diff --git a/doc/sdk/cli/index.html.textile.liquid b/doc/sdk/cli/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..511a41e
--- /dev/null
@@ -0,0 +1,62 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "Overview"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The @arv@ CLI tool provide provides a convenient interface to manipulate API resources. Additionally, it provides access to a number of subcommands.
+
+h3. Syntax
+
+The @arv@ command takes the following arguments:
+
+<pre>
+Arvados command line client
+Usage: arv [--flags] subcommand|resource [method] [--parameters]
+
+Available flags:
+  -n, --dry-run       Don't actually do anything
+  -v, --verbose       Print some things on stderr
+  -f, --format=<s>    Set the output format. Must be one of json (default),
+                      yaml or uuid. (Default: json)
+  -s, --short         Return only UUIDs (equivalent to --format=uuid)
+
+Use 'arv subcommand|resource --help' to get more information about a particular
+command or resource.
+
+Available subcommands: copy, create, edit, keep, pipeline, run, tag, ws
+
+Available resources: api_client_authorization, api_client, authorized_key,
+collection, user_agreement, group, job_task, link, log, keep_disk,
+pipeline_instance, node, repository, specimen, pipeline_template, user,
+virtual_machine, trait, human, job, keep_service
+
+Additional options:
+  -e, --version       Print version and exit
+  -h, --help          Show this message
+</pre>
+
+h4. Flags: @--format@
+
+- @--format=json@ := Output response as JSON. This is the default format.
+
+- @--format=yaml@ := Output response as YAML
+
+- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.
+
+
+
+h3. Resources
+
+See the "arv reference":{{site.baseurl}}/sdk/cli/reference.html page.
+
+h3. Subcommands
+
+See the "arv subcommands":{{site.baseurl}}/sdk/cli/subcommands.html page.
diff --git a/doc/sdk/cli/install.html.textile.liquid b/doc/sdk/cli/install.html.textile.liquid
new file mode 100644 (file)
index 0000000..e72dc67
--- /dev/null
@@ -0,0 +1,54 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados CLI tools are written in Ruby and Python.  To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.  The @arv@ command also relies on other Arvados tools.  To get those, install the @arvados-python-client@ and @arvados-cwl-runner@ packages, either from PyPI or source.
+
+h3. Prerequisites: Ruby, Bundler, and curl libraries
+
+{% include 'install_ruby_and_bundler' %}
+
+Install curl libraries with your system's package manager. For example, on Debian or Ubuntu:
+
+<notextile>
+<pre>
+~$ <code class="userinput">sudo apt-get install libcurl3 libcurl3-gnutls libcurl4-openssl-dev</code>
+</pre>
+</notextile>
+
+h3. Option 1: Install from RubyGems and PyPI
+
+<notextile>
+<pre>
+~$ <code class="userinput">sudo -i gem install arvados-cli</code>
+</pre>
+</notextile>
+
+<notextile>
+<pre>
+~$ <code class="userinput">pip install arvados-python-client arvados-cwl-runner</code>
+</pre>
+</notextile>
+
+h3. Option 2: Build and install from source
+
+<notextile>
+<pre>
+~$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+~$ <code class="userinput">cd arvados/sdk/cli</code>
+~/arvados/sdk/cli$ <code class="userinput">gem build arvados-cli.gemspec</code>
+~/arvados/sdk/cli$ <code class="userinput">sudo -i gem install arvados-cli-*.gem</code>
+~/arvados/sdk/cli$ <code class="userinput">cd ../python</code>
+~/arvados/sdk/python$ <code class="userinput">python setup.py install</code>
+~/arvados/sdk/python$ <code class="userinput">cd ../cwl</code>
+~/arvados/sdk/cwl$ <code class="userinput">python setup.py install</code>
+</pre>
+</notextile>
diff --git a/doc/sdk/cli/reference.html.textile.liquid b/doc/sdk/cli/reference.html.textile.liquid
new file mode 100644 (file)
index 0000000..cd70dfd
--- /dev/null
@@ -0,0 +1,77 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "arv reference"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+_In order to use the @arv@ command, make sure that you have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html_
+
+h3. Usage
+
+See the "CLI overview":{{site.baseurl}}/sdk/cli/index.html page.
+
+h3. Resource types and methods
+
+Get list of resource types
+@arv --help@
+
+Get list of resource methods for the "user" resource type
+@arv user --help@
+
+
+h3. Basic examples
+
+Get record for current user
+@arv user current@
+
+Get entire record for some specific user
+@arv user get --uuid 6dnxa-tpzed-iimd25zhzh84gbk@
+
+Update user record
+@arv user update --uuid 6dnxa-tpzed-iimd25zhzh84gbk --user '{"first_name":"Bob"}'@
+
+Get list of groups
+@arv group list@
+
+Delete a group
+@arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@
+
+
+h3. Common commands
+
+Most @arv@ resources accept the following commands:
+
+* @get@
+* @list@
+* @create@
+* @update@
+* @delete@
+
+
+h4. @list@
+
+Arguments accepted by the @list@ subcommand include:
+
+<pre>
+  -l, --limit=<i>        Maximum number of items to return. (Default: 100)
+  -o, --offset=<i>       Number of items to skip before first returned record. (Default: 0)
+  -f, --filters=<s>      Conditions for filtering items.
+  -r, --order=<s>        Order in which to return matching items.
+  -s, --select=<s>       Select which fields to return.
+  -d, --distinct         Return each distinct object.
+  -c, --count=<s>        Type of count to return in items_available ('none' or 'exact'). (Default: exact)
+</pre>
+
+The @--filters@ option takes a string describing a JSON list of filters on which the returned resources should be returned. Each filter is a three-element list of _[field, operator, value]_, where the _operator_ may be one of @=@, @<@, @<=@, @>@, @>=@, @!=@, @like@, or @ilike@.
+
+Example:
+
+@arv collection list --filters '[["name", "=", "PGP VAR inputs"], ["created_at", ">=", "2014-10-01"]]'@
+
+will return a list of all collections visible to the current user which are named "PGP VAR inputs" and were created on or after October 1, 2014.
diff --git a/doc/sdk/cli/subcommands.html.textile.liquid b/doc/sdk/cli/subcommands.html.textile.liquid
new file mode 100644 (file)
index 0000000..f9652ef
--- /dev/null
@@ -0,0 +1,424 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "arv subcommands"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+_In order to use the @arv@ command, make sure that you have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html_
+
+h3(#arv-create). arv create
+
+@arv create@ can be used to create Arvados objects from the command line. Arv create opens up the editor of your choice (set the EDITOR environment variable) and allows you to type or paste a json or yaml description. When saved the object will be created on the API server, if it passes validation.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv create --help</code>
+Options:
+  --project-uuid, -p &lt;s&gt;:   Project uuid in which to create the object
+              --help, -h:   Show this message
+</pre>
+</notextile>
+
+h3(#arv-get). arv get
+
+@arv get@ can be used to get a textual representation of Arvados objects from the command line. The output can be limited to a subset of the object's fields. This command can be used with only the knowledge of an object's UUID.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv get --help</code>
+Usage: arv [--format json|yaml] get [uuid] [fields...]
+
+Fetch the specified Arvados object, select the specified fields,
+and print a text representation.
+</pre>
+</notextile>
+
+h3(#arv-edit). arv edit
+
+@arv edit@ can be used to edit Arvados objects from the command line. Arv edit opens up the editor of your choice (set the EDITOR environment variable) with the json or yaml description of the object. Saving the file will update the Arvados object on the API server, if it passes validation.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv edit --help</code>
+Arvados command line client
+Usage: arv edit [uuid] [fields...]
+
+Fetch the specified Arvados object, select the specified fields,
+open an interactive text editor on a text representation (json or
+yaml, use --format) and then update the object.  Will use 'nano'
+by default, customize with the EDITOR or VISUAL environment variable.
+</pre>
+</notextile>
+
+h3(#arv-copy). arv copy
+
+@arv copy@ can be used to copy a pipeline instance, template or collection from one Arvados instance to another. It takes care of copying the object and all its dependencies.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv copy --help</code>
+usage: arv_copy.py [-h] [-v] [--progress] [--no-progress] [-f] --src
+                   SOURCE_ARVADOS --dst DESTINATION_ARVADOS [--recursive]
+                   [--no-recursive] [--dst-git-repo DST_GIT_REPO]
+                   [--project-uuid PROJECT_UUID] [--retries RETRIES]
+                   object_uuid
+
+Copy a pipeline instance, template or collection from one Arvados instance to
+another.
+
+positional arguments:
+  object_uuid           The UUID of the object to be copied.
+
+optional arguments:
+  -h, --help            show this help message and exit
+  -v, --verbose         Verbose output.
+  --progress            Report progress on copying collections. (default)
+  --no-progress         Do not report progress on copying collections.
+  -f, --force           Perform copy even if the object appears to exist at
+                        the remote destination.
+  --src SOURCE_ARVADOS  The name of the source Arvados instance (required) -
+                        points at an Arvados config file. May be either a
+                        pathname to a config file, or (for example) "foo" as
+                        shorthand for $HOME/.config/arvados/foo.conf.
+  --dst DESTINATION_ARVADOS
+                        The name of the destination Arvados instance
+                        (required) - points at an Arvados config file. May be
+                        either a pathname to a config file, or (for example)
+                        "foo" as shorthand for $HOME/.config/arvados/foo.conf.
+  --recursive           Recursively copy any dependencies for this object.
+                        (default)
+  --no-recursive        Do not copy any dependencies. NOTE: if this option is
+                        given, the copied object will need to be updated
+                        manually in order to be functional.
+  --dst-git-repo DST_GIT_REPO
+                        The name of the destination git repository. Required
+                        when copying a pipeline recursively.
+  --project-uuid PROJECT_UUID
+                        The UUID of the project at the destination to which
+                        the pipeline should be copied.
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+</pre>
+</notextile>
+
+h3(#arv-tag). arv tag
+
+@arv tag@ is used to tag Arvados objects.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv tag --help</code>
+
+Usage:
+arv tag add tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]
+arv tag remove tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]
+arv tag remove --all
+
+  --dry-run, -n:   Don't actually do anything
+  --verbose, -v:   Print some things on stderr
+     --uuid, -u:   Return the UUIDs of the objects in the response, one per
+                   line (default)
+     --json, -j:   Return the entire response received from the API server, as
+                   a JSON object
+    --human, -h:   Return the response received from the API server, as a JSON
+                   object with whitespace added for human consumption
+   --pretty, -p:   Synonym of --human
+     --yaml, -y:   Return the response received from the API server, in YAML
+                   format
+     --help, -e:   Show this message
+</pre>
+</notextile>
+
+
+h3(#arv-ws). arv ws
+
+@arv ws@ provides access to the websockets event stream.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv ws --help</code>
+usage: arv-ws [-h] [-u UUID] [-f FILTERS]
+              [--poll-interval POLL_INTERVAL | --no-poll]
+              [-p PIPELINE | -j JOB]
+
+optional arguments:
+  -h, --help            show this help message and exit
+  -u UUID, --uuid UUID  Filter events on object_uuid
+  -f FILTERS, --filters FILTERS
+                        Arvados query filter to apply to log events (JSON
+                        encoded)
+  --poll-interval POLL_INTERVAL
+                        If websockets is not available, specify the polling
+                        interval, default is every 15 seconds
+  --no-poll             Do not poll if websockets are not available, just fail
+  -p PIPELINE, --pipeline PIPELINE
+                        Supply pipeline uuid, print log output from pipeline
+                        and its jobs
+  -j JOB, --job JOB     Supply job uuid, print log output from jobs
+</pre>
+</notextile>
+
+h3(#arv-keep). arv keep
+
+@arv keep@ provides access to the Keep storage service.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep --help</code>
+Usage: arv keep [method] [--parameters]
+Use 'arv keep [method] --help' to get more information about specific methods.
+
+Available methods: ls, get, put, docker
+</pre>
+</notextile>
+
+h3(#arv-keep-ls). arv keep ls
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep ls --help</code>
+usage: arv-ls [-h] [--retries RETRIES] [-s] locator
+
+List contents of a manifest
+
+positional arguments:
+  locator            Collection UUID or locator
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --retries RETRIES  Maximum number of times to retry server requests that
+                     encounter temporary failures (e.g., server down). Default
+                     3.
+  -s                 List file sizes, in KiB.
+</pre>
+</notextile>
+
+h3(#arv-keep-get). arv keep get
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep get --help</code>
+usage: arv-get [-h] [--retries RETRIES]
+               [--progress | --no-progress | --batch-progress]
+               [--hash HASH | --md5sum] [-n] [-r] [-f | --skip-existing]
+               locator [destination]
+
+Copy data from Keep to a local file or pipe.
+
+positional arguments:
+  locator            Collection locator, optionally with a file path or
+                     prefix.
+  destination        Local file or directory where the data is to be written.
+                     Default: /dev/stdout.
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --retries RETRIES  Maximum number of times to retry server requests that
+                     encounter temporary failures (e.g., server down). Default
+                     3.
+  --progress         Display human-readable progress on stderr (bytes and, if
+                     possible, percentage of total data size). This is the
+                     default behavior when it is not expected to interfere
+                     with the output: specifically, stderr is a tty _and_
+                     either stdout is not a tty, or output is being written to
+                     named files rather than stdout.
+  --no-progress      Do not display human-readable progress on stderr.
+  --batch-progress   Display machine-readable progress on stderr (bytes and,
+                     if known, total data size).
+  --hash HASH        Display the hash of each file as it is read from Keep,
+                     using the given hash algorithm. Supported algorithms
+                     include md5, sha1, sha224, sha256, sha384, and sha512.
+  --md5sum           Display the MD5 hash of each file as it is read from
+                     Keep.
+  -n                 Do not write any data -- just read from Keep, and report
+                     md5sums if requested.
+  -r                 Retrieve all files in the specified collection/prefix.
+                     This is the default behavior if the "locator" argument
+                     ends with a forward slash.
+  -f                 Overwrite existing files while writing. The default
+                     behavior is to refuse to write *anything* if any of the
+                     output files already exist. As a special case, -f is not
+                     needed to write to /dev/stdout.
+  --skip-existing    Skip files that already exist. The default behavior is to
+                     refuse to write *anything* if any files exist that would
+                     have to be overwritten. This option causes even devices,
+                     sockets, and fifos to be skipped.
+</pre>
+</notextile>
+
+h3(#arv-keep-put). arv keep put
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep put --help</code>
+usage: arv-put [-h] [--max-manifest-depth N | --normalize]
+               [--as-stream | --stream | --as-manifest | --in-manifest | --manifest | --as-raw | --raw]
+               [--use-filename FILENAME] [--filename FILENAME]
+               [--portable-data-hash] [--replication N]
+               [--project-uuid UUID] [--name NAME]
+               [--progress | --no-progress | --batch-progress]
+               [--resume | --no-resume] [--retries RETRIES]
+               [path [path ...]]
+
+Copy data from the local filesystem to Keep.
+
+positional arguments:
+  path                  Local file or directory. Default: read from standard
+                        input.
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --max-manifest-depth N
+                        Maximum depth of directory tree to represent in the
+                        manifest structure. A directory structure deeper than
+                        this will be represented as a single stream in the
+                        manifest. If N=0, the manifest will contain a single
+                        stream. Default: -1 (unlimited), i.e., exactly one
+                        manifest stream per filesystem directory that contains
+                        files.
+  --normalize           Normalize the manifest by re-ordering files and
+                        streams after writing data.
+  --as-stream           Synonym for --stream.
+  --stream              Store the file content and display the resulting
+                        manifest on stdout. Do not write the manifest to Keep
+                        or save a Collection object in Arvados.
+  --as-manifest         Synonym for --manifest.
+  --in-manifest         Synonym for --manifest.
+  --manifest            Store the file data and resulting manifest in Keep,
+                        save a Collection object in Arvados, and display the
+                        manifest locator (Collection uuid) on stdout. This is
+                        the default behavior.
+  --as-raw              Synonym for --raw.
+  --raw                 Store the file content and display the data block
+                        locators on stdout, separated by commas, with a
+                        trailing newline. Do not store a manifest.
+  --use-filename FILENAME
+                        Synonym for --filename.
+  --filename FILENAME   Use the given filename in the manifest, instead of the
+                        name of the local file. This is useful when "-" or
+                        "/dev/stdin" is given as an input file. It can be used
+                        only if there is exactly one path given and it is not
+                        a directory. Implies --manifest.
+  --portable-data-hash  Print the portable data hash instead of the Arvados
+                        UUID for the collection created by the upload.
+  --replication N       Set the replication level for the new collection: how
+                        many different physical storage devices (e.g., disks)
+                        should have a copy of each data block. Default is to
+                        use the server-provided default (if any) or 2.
+  --project-uuid UUID   Store the collection in the specified project, instead
+                        of your Home project.
+  --name NAME           Save the collection with the specified name.
+  --progress            Display human-readable progress on stderr (bytes and,
+                        if possible, percentage of total data size). This is
+                        the default behavior when stderr is a tty.
+  --no-progress         Do not display human-readable progress on stderr, even
+                        if stderr is a tty.
+  --batch-progress      Display machine-readable progress on stderr (bytes
+                        and, if known, total data size).
+  --resume              Continue interrupted uploads from cached state
+                        (default).
+  --no-resume           Do not continue interrupted uploads from cached state.
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+</pre>
+</notextile>
+
+
+h3(#arv-pipeline-run). arv pipeline run
+
+@arv pipeline run@ can be used to start a pipeline run from the command line.
+
+The User Guide has a page with a bit more information on "using arv pipeline run":{{site.baseurl}}/user/topics/running-pipeline-command-line.html.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv pipeline run --help</code>
+Options:
+        --dry-run, -n:   Do not start any new jobs or wait for existing jobs to
+                         finish. Just find out whether jobs are finished,
+                         queued, or running for each component.
+    --status-text &lt;s&gt;:   Store plain text status in given file. (Default:
+                         /dev/stdout)
+    --status-json &lt;s&gt;:   Store json-formatted pipeline in given file. (Default:
+                         /dev/null)
+            --no-wait:   Do not wait for jobs to finish. Just look up status,
+                         submit new jobs if needed, and exit.
+           --no-reuse:   Do not reuse existing jobs to satisfy pipeline
+                         components. Submit a new job for every component.
+          --debug, -d:   Print extra debugging information on stderr.
+    --debug-level &lt;i&gt;:   Set debug verbosity level.
+       --template &lt;s&gt;:   UUID of pipeline template, or path to local pipeline
+                         template file.
+       --instance &lt;s&gt;:   UUID of pipeline instance.
+             --submit:   Submit the pipeline instance to the server, and exit.
+                         Let the Crunch dispatch service satisfy the components
+                         by finding/running jobs.
+  --run-pipeline-here:   Manage the pipeline instance in-process. Submit jobs
+                         to Crunch as needed. Do not exit until the pipeline
+                         finishes (or fails).
+      --run-jobs-here:   Run jobs in the local terminal session instead of
+                         submitting them to Crunch. Implies
+                         --run-pipeline-here. Note: this results in a
+                         significantly different job execution environment, and
+                         some Crunch features are not supported. It can be
+                         necessary to modify a pipeline in order to make it run
+                         this way.
+           --run-here:   Synonym for --run-jobs-here.
+    --description &lt;s&gt;:   Description for the pipeline instance.
+        --version, -v:   Print version and exit
+           --help, -h:   Show this message
+</pre>
+</notextile>
+
+h3(#arv-run). arv run
+
+The @arv-run@ command creates Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvados compute nodes.
+
+The User Guide has a page on "using arv-run":{{site.baseurl}}/user/topics/arv-run.html.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv run --help</code>
+usage: arv-run [-h] [--retries RETRIES] [--dry-run] [--local]
+               [--docker-image DOCKER_IMAGE] [--ignore-rcode] [--no-reuse]
+               [--no-wait] [--project-uuid PROJECT_UUID] [--git-dir GIT_DIR]
+               [--repository REPOSITORY] [--script-version SCRIPT_VERSION]
+               ...
+
+positional arguments:
+  args
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+  --dry-run             Print out the pipeline that would be submitted and
+                        exit
+  --local               Run locally using arv-run-pipeline-instance
+  --docker-image DOCKER_IMAGE
+                        Docker image to use, otherwise use instance default.
+  --ignore-rcode        Commands that return non-zero return codes should not
+                        be considered failed.
+  --no-reuse            Do not reuse past jobs.
+  --no-wait             Do not wait and display logs after submitting command,
+                        just exit.
+  --project-uuid PROJECT_UUID
+                        Parent project of the pipeline
+  --git-dir GIT_DIR     Git repository passed to arv-crunch-job when using
+                        --local
+  --repository REPOSITORY
+                        repository field of component, default 'arvados'
+  --script-version SCRIPT_VERSION
+                        script_version field of component, default 'master'
+</pre>
+</notextile>
diff --git a/doc/sdk/go/example.html.textile.liquid b/doc/sdk/go/example.html.textile.liquid
new file mode 100644 (file)
index 0000000..a5a109b
--- /dev/null
@@ -0,0 +1,81 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Examples
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+See "Arvados GoDoc":https://godoc.org/git.curoverse.com/arvados.git/sdk/go for detailed documentation.
+
+In these examples, the site prefix is @aaaaa@.
+
+h2.  Initialize SDK
+
+{% codeblock as go %}
+import (
+  "git.curoverse.com/arvados.git/sdk/go/arvados"
+  "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+}
+
+func main() {
+  arv, err := arvadosclient.MakeArvadosClient()
+  if err != nil {
+    log.Fatalf("Error setting up arvados client %s", err.Error())
+  }
+}
+{% endcodeblock %}
+
+h2. create
+
+{% codeblock as go %}
+  var collection arvados.Collection
+  err := api.Create("collections", Dict{"collection": Dict{"name": "create example"}}, &collection)
+{% endcodeblock %}
+
+h2. delete
+
+{% codeblock as go %}
+  var collection arvados.Collection
+  err := api.Delete("collections", "aaaaa-4zz18-ccccccccccccccc", Dict{}, &collection)
+{% endcodeblock %}
+
+h2. get
+
+{% codeblock as go %}
+  var collection arvados.Collection
+  err := api.Get("collections", "aaaaa-4zz18-ccccccccccccccc", Dict{}, &collection)
+{% endcodeblock %}
+
+h2. list
+
+{% codeblock as go %}
+  var collection arvados.Collection
+  err := api.List("collections", Dict{}, &collection)
+{% endcodeblock %}
+
+h2. update
+
+{% codeblock as go %}
+  var collection arvados.Collection
+  err := api.Update("collections", "aaaaa-4zz18-ccccccccccccccc", Dict{"collection": Dict{"name": "update example"}}, &collection)
+{% endcodeblock %}
+
+h2. Get current user
+
+{% codeblock as go %}
+  var user arvados.User
+  err := api.Get("users", "current", Dict{}, &user)
+{% endcodeblock %}
+
+h2. Example program
+
+You can save this source as a .go file and run it:
+
+<notextile>{% code 'example_sdk_go' as go %}</notextile>
+
+A few more usage examples can be found in the "services/keepproxy":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/services/keepproxy and "sdk/go/keepclient":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/go/keepclient directories in the arvados source tree.
diff --git a/doc/sdk/go/index.html.textile.liquid b/doc/sdk/go/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..a06d518
--- /dev/null
@@ -0,0 +1,28 @@
+---
+layout: default
+navsection: sdk
+navmenu: Go
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Go ("Golang":http://golang.org) SDK provides a generic set of wrappers so you can make API calls easily.
+
+See "Arvados GoDoc":https://godoc.org/git.curoverse.com/arvados.git/sdk/go for detailed documentation.
+
+h3. Installation
+
+Use @go get git.curoverse.com/arvados.git/sdk/go/arvadosclient@.  The go tools will fetch the relevant code and dependencies for you.
+
+{% codeblock as go %}
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+{% endcodeblock %}
+
+If you need pre-release client code, you can use the latest version from the repo by following "these instructions.":https://dev.arvados.org/projects/arvados/wiki/Go#Using-Go-with-Arvados
diff --git a/doc/sdk/index.html.textile.liquid b/doc/sdk/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..dbfcaed
--- /dev/null
@@ -0,0 +1,22 @@
+---
+layout: default
+navsection: sdk
+title: "SDK Reference"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This section documents language bindings for the "Arvados API":{{site.baseurl}}/api and Keep that are available for various programming languages.  Not all features are available in every SDK.  The most complete SDK is the Python SDK.  Note that this section only gives a high level overview of each SDK.  Consult the "Arvados API":{{site.baseurl}}/api section for detailed documentation about Arvados API calls available on each resource.
+
+* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+* "Command line SDK":{{site.baseurl}}/sdk/cli/install.html ("arv")
+* "Go SDK":{{site.baseurl}}/sdk/go/index.html
+* "R SDK":{{site.baseurl}}/sdk/R/index.html
+* "Perl SDK":{{site.baseurl}}/sdk/perl/index.html
+* "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
+* "Java SDK":{{site.baseurl}}/sdk/java/index.html
+
+Many Arvados Workbench pages, under the the *Advanced* tab, provide examples of API and SDK use for accessing the current resource .
diff --git a/doc/sdk/java/example.html.textile.liquid b/doc/sdk/java/example.html.textile.liquid
new file mode 100644 (file)
index 0000000..bc15b83
--- /dev/null
@@ -0,0 +1,83 @@
+---
+layout: default
+navsection: sdk
+navmenu: Java
+title: "Examples"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Initialize SDK
+
+{% codeblock as java %}
+import org.arvados.sdk.Arvados;
+{% endcodeblock %}
+
+{% codeblock as java %}
+    String apiName = "arvados";
+    String apiVersion = "v1";
+
+    Arvados arv = new Arvados(apiName, apiVersion);
+{% endcodeblock %}
+
+h2. create
+
+{% codeblock as java %}
+    Map<String, String> collection = new HashMap<String, String>();
+    collection.put("name", "create example");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("collection", collection);
+    Map response = arv.call("collections", "create", params);
+{% endcodeblock %}
+
+h2. delete
+
+{% codeblock as java %}
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    Map response = arv.call("collections", "delete", params);
+{% endcodeblock %}
+
+h2. get
+
+{% codeblock as java %}
+    params = new HashMap<String, Object>();
+    params.put("uuid", userUuid);
+    Map response = arv.call("users", "get", params);
+{% endcodeblock %}
+
+h2. list
+
+{% codeblock as java %}
+    Map<String, Object> params = new HashMap<String, Object>();
+    Map response = arv.call("users", "list", params);
+
+    // get uuid of the first user from the response
+    List items = (List)response.get("items");
+
+    Map firstUser = (Map)items.get(0);
+    String userUuid = (String)firstUser.get("uuid");
+{% endcodeblock %}
+
+h2. update
+
+{% codeblock as java %}
+    Map<String, String> collection = new HashMap<String, String>();
+    collection.put("name", "update example");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    params.put("collection", collection);
+    Map response = arv.call("collections", "update", params);
+{% endcodeblock %}
+
+h2. Get current user
+
+{% codeblock as java %}
+    Map<String, Object> params = new HashMap<String, Object>();
+    Map response = arv.call("users", "current", params);
+{% endcodeblock %}
diff --git a/doc/sdk/java/index.html.textile.liquid b/doc/sdk/java/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..6099d7f
--- /dev/null
@@ -0,0 +1,144 @@
+---
+layout: default
+navsection: sdk
+navmenu: Java
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Java SDK provides a generic set of wrappers so you can make API calls in java.
+
+h3. Introdution
+
+* The Java SDK requires Java 6 or later
+
+* The Java SDK is implemented as a maven project. Hence, you would need a working
+maven environment to be able to build the source code. If you do not have maven setup,
+you may find the "Maven in 5 Minutes":http://maven.apache.org/guides/getting-started/maven-in-five-minutes.html link useful.
+
+* In this document $ARVADOS_HOME is used to refer to the directory where
+arvados code is cloned in your system. For ex: $ARVADOS_HOME = $HOME/arvados
+
+
+h3. Setting up the environment
+
+* The SDK requires a running Arvados API server. The following information
+         about the API server needs to be passed to the SDK using environment
+         variables or during the construction of the Arvados instance.
+
+<notextile>
+<pre>
+ARVADOS_API_TOKEN: API client token to be used to authorize with API server.
+
+ARVADOS_API_HOST: Host name of the API server.
+
+ARVADOS_API_HOST_INSECURE: Set this to true if you are using self-signed
+    certificates and would like to bypass certificate validations.
+</pre>
+</notextile>
+
+* Please see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for full details.
+
+
+h3. Building the Arvados SDK
+
+<notextile>
+<pre>
+$ <code class="userinput">cd $ARVADOS_HOME/sdk/java</code>
+
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+  This will generate arvados sdk jar file in the target directory
+</pre>
+</notextile>
+
+
+h3. Implementing your code to use SDK
+
+* The following two sample programs serve as sample implementations using the SDK.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExample.java</code> is a simple program
+        that makes a few calls to API server.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExampleWithPrompt.java</code> can be
+        used to make calls to API server interactively.
+
+Please use these implementations to see how you would use the SDK from your java program.
+
+Also, refer to <code class="userinput">$ARVADOS_HOME/arvados/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java</code>
+for more sample API invocation examples.
+
+Below are the steps to compile and run these java program.
+
+* These programs create an instance of Arvados SDK class and use it to
+make various <code class="userinput">call</code> requests.
+
+* To compile the examples
+<notextile>
+<pre>
+$ <code class="userinput">javac -cp $ARVADOS_HOME/sdk/java/target/arvados-sdk-1.1-jar-with-dependencies.jar \
+ArvadosSDKJavaExample*.java</code>
+This results in the generation of the ArvadosSDKJavaExample*.class files
+in the same directory as the java files
+</pre>
+</notextile>
+
+* To run the samples
+<notextile>
+<pre>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.1-jar-with-dependencies.jar \
+ArvadosSDKJavaExample</code>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.1-jar-with-dependencies.jar \
+ArvadosSDKJavaExampleWithPrompt</code>
+</pre>
+</notextile>
+
+
+h3. Viewing and Managing SDK logging
+
+* SDK uses log4j logging
+
+* The default location of the log file is
+  <code class="userinput">$ARVADOS_HOME/sdk/java/log/arvados_sdk_java.log</code>
+
+* Update <code class="userinput">log4j.properties</code> file to change name and location of the log file.
+
+<notextile>
+<pre>
+$ <code class="userinput">nano $ARVADOS_HOME/sdk/java/src/main/resources/log4j.properties</code>
+and modify the <code class="userinput">log4j.appender.fileAppender.File</code> property as needed.
+
+Rebuild the SDK:
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+</pre>
+</notextile>
+
+
+h3. Using the SDK in eclipse
+
+* To develop in eclipse, you can use the provided <code class="userinput">eclipse project</code>
+
+* Install "m2eclipse":https://www.eclipse.org/m2e/ plugin in your eclipse
+
+* Set <code class="userinput">M2_REPO</code> classpath variable in eclipse to point to your local repository.
+The local repository is usually located in your home directory at <code class="userinput">$HOME/.m2/repository</code>.
+
+<notextile>
+<pre>
+In Eclipse IDE:
+Window -> Preferences -> Java -> Build Path -> Classpath Variables
+    Click on the "New..." button and add a new
+    M2_REPO variable and set it to your local Maven repository
+</pre>
+</notextile>
+
+
+* Open the SDK project in eclipse
+<notextile>
+<pre>
+In Eclipse IDE:
+File -> Import -> Existing Projects into Workspace -> Next -> Browse
+    and select $ARVADOS_HOME/sdk/java
+</pre>
+</notextile>
diff --git a/doc/sdk/perl/example.html.textile.liquid b/doc/sdk/perl/example.html.textile.liquid
new file mode 100644 (file)
index 0000000..b51cfe4
--- /dev/null
@@ -0,0 +1,77 @@
+---
+layout: default
+navsection: sdk
+navmenu: Perl
+title: "Examples"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Initialize SDK
+
+Set up an API client user agent:
+
+{% codeblock as perl %}
+use Arvados;
+my $arv = Arvados->new('apiVersion' => 'v1');
+{% endcodeblock %}
+
+The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
+
+h2. create
+
+Create an object:
+
+{% codeblock as perl %}
+my $test_link = $arv->{'links'}->{'create'}->execute('link' => { 'link_class' => 'test', 'name' => 'test' });
+{% endcodeblock %}
+
+h2. delete
+
+{% codeblock as perl %}
+my $some_user = $arv->{'collections'}->{'get'}->execute('uuid' => $collection_uuid);
+{% endcodeblock %}
+
+h2. get
+
+Retrieve an object by ID:
+
+{% codeblock as perl %}
+my $some_user = $arv->{'users'}->{'get'}->execute('uuid' => $current_user_uuid);
+{% endcodeblock %}
+
+Get the UUID of an object that was retrieved using the SDK:
+
+{% codeblock as perl %}
+my $current_user_uuid = $current_user->{'uuid'}
+{% endcodeblock %}
+
+h2. list
+
+Get a list of objects:
+
+{% codeblock as perl %}
+my $repos = $arv->{'repositories'}->{'list'}->execute;
+print ("UUID of first repo returned is ", $repos->{'items'}->[0], "\n");
+{% endcodeblock %}
+
+h2. update
+
+Update an object:
+
+{% codeblock as perl %}
+my $test_link = $arv->{'links'}->{'update'}->execute(
+        'uuid' => $test_link->{'uuid'},
+        'link' => { 'properties' => { 'foo' => 'bar' } });
+{% endcodeblock %}
+
+h2. Get current user
+
+Get the User object for the current user:
+
+{% codeblock as perl %}
+my $current_user = $arv->{'users'}->{'current'}->execute;
+{% endcodeblock %}
diff --git a/doc/sdk/perl/index.html.textile.liquid b/doc/sdk/perl/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..4ee29c0
--- /dev/null
@@ -0,0 +1,69 @@
+---
+layout: default
+navsection: sdk
+navmenu: Perl
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Perl SDK provides a generic set of wrappers so you can make API calls easily.
+
+It should be treated as alpha/experimental. Currently, limitations include:
+* Verbose syntax.
+* No native Keep client.
+* No CPAN package.
+
+h3. Installation
+
+h4. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl libipc-system-simple-perl libarvados-perl</code>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install perl-ExtUtils-MakeMaker perl-JSON perl-IO-Socket-SSL perl-Crypt-SSLeay perl-WWW-Curl libarvados-perl</code>
+</code></pre>
+</notextile>
+
+h4. Option 2: Install from source
+
+First, install dependencies from your distribution.  Refer to the package lists above, but don't install @libarvados-perl@.
+
+Then run the following:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+~$ <span class="userinput">cd arvados/sdk/perl</span>
+~$ <span class="userinput">perl Makefile.PL</span>
+~$ <span class="userinput">sudo make install</span>
+</code></pre>
+</notextile>
+
+h3. Test installation
+
+If the SDK is installed, @perl -MArvados -e ''@ should produce no errors.
+
+If your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), the following test script should work:
+
+<notextile>
+<pre>~$ <code class="userinput">perl &lt;&lt;'EOF'
+use Arvados;
+my $arv = Arvados-&gt;new('apiVersion' => 'v1');
+my $me = $arv-&gt;{'users'}-&gt;{'current'}-&gt;execute;
+print ("arvados.v1.users.current.full_name = '", $me-&gt;{'full_name'}, "'\n");
+EOF</code>
+arvados.v1.users.current.full_name = 'Your Name'
+</pre>
+</notextile>
diff --git a/doc/sdk/python/arvados-fuse.html.textile.liquid b/doc/sdk/python/arvados-fuse.html.textile.liquid
new file mode 100644 (file)
index 0000000..6169734
--- /dev/null
@@ -0,0 +1,64 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Arvados FUSE driver
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados FUSE driver is a Python utility that allows you to see the Keep service as a normal filesystem, so that data can be accessed using standard tools. This driver requires the Python SDK installed in order to access Arvados services.
+
+h3. Installation
+
+If you are logged in to an Arvados VM, the @arv-mount@ utility should already be installed.
+
+To use the FUSE driver elsewhere, you can install from a distribution package, PyPI, or source.
+
+{% include 'notebox_begin' %}
+The Python SDK requires Python 2.7.
+{% include 'notebox_end' %}
+
+h4. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% assign rh_version = "6" %}
+{% include 'note_python_sc' %}
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-fuse</code>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-fuse</code>
+</code></pre>
+</notextile>
+
+h4. Option 2: Install with pip
+
+Run @pip-2.7 install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
+
+h4. Option 3: Install from source
+
+Install the @python-setuptools@ package from your distribution.  Then run the following:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+~$ <span class="userinput">cd arvados/services/fuse</span>
+~/arvados/services/fuse$ <span class="userinput">python2.7 setup.py install</span>
+</code></pre>
+</notextile>
+
+h3. Usage
+
+Please refer to the "Accessing Keep from GNU/Linux":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html tutorial for more information.
diff --git a/doc/sdk/python/cookbook.html.textile.liquid b/doc/sdk/python/cookbook.html.textile.liquid
new file mode 100644 (file)
index 0000000..4a6c453
--- /dev/null
@@ -0,0 +1,208 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Code cookbook
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. Cancel a container request
+
+{% codeblock as python %}
+import arvados
+arvados.api().container_requests().update(uuid=container_request_uuid, body={"priority": 0}).execute()
+{% endcodeblock %}
+
+h2. Cancel all container requests
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+result = api.container_requests().list(filters=[["state", "=", "Committed"], ["priority", ">", "0"]]).execute()["items"]
+for container_request in result:
+    api.container_requests().update(uuid=container_request["uuid"], body={"priority": 0}).execute()
+{% endcodeblock %}
+
+h2. List completed container requests
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+result = api.container_requests().list(filters=[["name", "like", name], ["state", "=", "Final"]]).execute()["items"]
+container_uuids = [cr["container_uuid"] for cr in result]
+containers = api.containers().list(filters=[["uuid", "in", container_uuids]]).execute()["items"]
+container_dict = {c["uuid"]: c for c in containers}
+
+for container_request in result:
+    container = container_dict[container_request["container_uuid"]]
+    print("%s, %s, %s" % (container_request["uuid"], container_request["name"], "Success" if container["exit_code"] == 0 else "Failed"))
+{% endcodeblock %}
+
+h2. Get input of a CWL workflow
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+container_request_uuid="qr1hi-xvhdp-zzzzzzzzzzzzzzz"
+container_request = arvados.api().container_requests().get(uuid=container_request_uuid).execute()
+print(container_request["mounts"]["/var/lib/cwl/cwl.input.json"])
+{% endcodeblock %}
+
+h2. Get output of a CWL workflow
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+api = arvados.api()
+container_request_uuid="qr1hi-xvhdp-zzzzzzzzzzzzzzz"
+container_request = arvados.api().container_requests().get(uuid=container_request_uuid).execute()
+collection = arvados.collection.CollectionReader(container_request["output_uuid"])
+print(collection.open("cwl.output.json").read())
+{% endcodeblock %}
+
+h2. List input of child requests
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+parent_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
+namefilter = "bwa%"  # the "like" filter uses SQL pattern match syntax
+container_request = arvados.api().container_requests().get(uuid=parent_request_uuid).execute()
+parent_container_uuid = container_request["container_uuid"]
+child_requests = arvados.api().container_requests().list(filters=[
+    ["requesting_container_uuid", "=", parent_container_uuid],
+    ["name", "like", namefilter]]).execute()
+for c in child_requests["items"]:
+    print("%s" % c["name"])
+    for m in c["mounts"].values():
+        if "portable_data_hash" in m:
+            print("  %s" % m["portable_data_hash"])
+{% endcodeblock %}
+
+h2. List output of child requests
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+parent_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
+namefilter = "bwa%"  # the "like" filter uses SQL pattern match syntax
+container_request = arvados.api().container_requests().get(uuid=parent_request_uuid).execute()
+parent_container_uuid = container_request["container_uuid"]
+child_requests = arvados.api().container_requests().list(filters=[
+    ["requesting_container_uuid", "=", parent_container_uuid],
+    ["name", "like", namefilter]]).execute()
+output_uuids = [c["output_uuid"] for c in child_requests["items"]]
+collections = arvados.api().collections().list(filters=[["uuid", "in", output_uuids]]).execute()
+uuid_to_pdh = {c["uuid"]: c["portable_data_hash"] for c in collections["items"]}
+for c in child_requests["items"]:
+    print("%s -> %s" % (c["name"], uuid_to_pdh[c["output_uuid"]]))
+{% endcodeblock %}
+
+h2. List failed child requests
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+parent_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
+container_request = arvados.api().container_requests().get(uuid=parent_request_uuid).execute()
+parent_container_uuid = container_request["container_uuid"]
+child_requests = arvados.api().container_requests().list(filters=[
+    ["requesting_container_uuid", "=", parent_container_uuid]], limit=1000).execute()
+child_containers = {c["container_uuid"]: c for c in child_requests["items"]}
+cancelled_child_containers = arvados.api().containers().list(filters=[
+    ["exit_code", "!=", "0"],
+    ["uuid", "in", child_containers.keys()]], limit=1000).execute()
+for c in cancelled_child_containers["items"]:
+    print("%s (%s)" % (child_containers[c["uuid"]]["name"], child_containers[c["uuid"]]["uuid"]))
+{% endcodeblock %}
+
+h2. Get log of a child request
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+api = arvados.api()
+container_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
+container_request = arvados.api().container_requests().get(uuid=container_request_uuid).execute()
+collection = arvados.collection.CollectionReader(container_request["log_uuid"])
+for c in collection:
+    print(collection.open(c).read())
+{% endcodeblock %}
+
+h2. Create a collection sharing link
+
+{% codeblock as python %}
+import arvados
+api = arvados.api()
+download="https://your.download.server"
+collection_uuid="qr1hi-4zz18-zzzzzzzzzzzzzzz"
+token = api.api_client_authorizations().create(body={"api_client_authorization":{"scopes": [
+    "GET /arvados/v1/collections/%s" % collection_uuid,
+    "GET /arvados/v1/collections/%s/" % collection_uuid,
+    "GET /arvados/v1/keep_services/accessible"]}}).execute()
+print("%s/c=%s/t=%s/_/" % (download, collection_uuid, token["api_token"]))
+{% endcodeblock %}
+
+h2. Combine two or more collections
+
+Note, if two collections have files of the same name, the contents will be concatenated in the resulting manifest.
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+api = arvados.api()
+project_uuid = "qr1hi-tpzed-zzzzzzzzzzzzzzz"
+collection_uuids = ["qr1hi-4zz18-aaaaaaaaaaaaaaa", "qr1hi-4zz18-bbbbbbbbbbbbbbb"]
+combined_manifest = ""
+for u in collection_uuids:
+    c = api.collections().get(uuid=u).execute()
+    combined_manifest += c["manifest_text"]
+newcol = arvados.collection.Collection(combined_manifest)
+newcol.save_new(name="My combined collection", owner_uuid=project_uuid)
+{% endcodeblock %}
+
+h2. Upload a file into a new collection
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+
+project_uuid = "qr1hi-j7d0g-zzzzzzzzzzzzzzz"
+collection_name = "My collection"
+filename = "file1.txt"
+
+api = arvados.api()
+c = arvados.collection.Collection()
+with open(filename, "rb") as reader:
+    with c.open(filename, "wb") as writer:
+        content = reader.read(128*1024)
+        while content:
+            writer.write(content)
+            content = reader.read(128*1024)
+c.save_new(name=collection_name, owner_uuid=project_uuid)
+print("Saved %s to %s" % (collection_name, c.manifest_locator()))
+{% endcodeblock %}
+
+h2. Download a file from a collection
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+
+collection_uuid = "qr1hi-4zz18-zzzzzzzzzzzzzzz"
+filename = "file1.txt"
+
+api = arvados.api()
+c = arvados.collection.CollectionReader(collection_uuid)
+with c.open(filename, "rb") as reader:
+    with open(filename, "wb") as writer:
+        content = reader.read(128*1024)
+        while content:
+            writer.write(content)
+            content = reader.read(128*1024)
+print("Finished downloading %s" % filename)
+{% endcodeblock %}
diff --git a/doc/sdk/python/crunch-utility-libraries.html.textile.liquid b/doc/sdk/python/crunch-utility-libraries.html.textile.liquid
new file mode 100644 (file)
index 0000000..3e26315
--- /dev/null
@@ -0,0 +1,228 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "Crunch utility libraries"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+Several utility libraries are included with Arvados. They are intended to make it quicker and easier to write your own crunch scripts.
+
+* "Python SDK extras":#pythonsdk
+* "Toolkit wrappers":#toolkit_wrappers
+
+h2(#pythonsdk). Python SDK extras
+
+The Python SDK adds some convenience features that are particularly useful in crunch scripts, in addition to the standard set of API calls.
+
+In a crunch job, the environment variables @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ will be set up so the job has the privileges of the user who submitted the job.
+
+<pre>
+import arvados
+
+my_user = arvados.api().users().current().execute()
+my_uuid = my_user['uuid']
+</pre>
+
+h3. Get the current job and task parameters
+
+@arvados.current_job()@ and @arvados.current_task()@ are convenient ways to retrieve the current Job and Task, using the @JOB_UUID@ and @TASK_UUID@ environment variables provided to each crunch task process.
+
+<pre>
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_job_input = this_job['script_parameters']['input']
+this_task_input = this_task['parameters']['input']
+</pre>
+
+h3(#one_task_per_input). Queue a task for each input file
+
+A common pattern for a crunch job is to run one task to scan the input, and one task per input file to do the work.
+
+The @one_task_per_input_file()@ function implements this pattern. Pseudocode:
+
+<pre>
+if this is the job's first (default) task:
+    for each file in the 'input' collection:
+        queue a new task, with parameters['input'] = file
+    exit
+else:
+    return
+</pre>
+
+Usage:
+
+<pre>
+import arvados
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+# Now do the work on a single file
+my_input = this_task['parameters']['input']
+</pre>
+
+h3. Set the current task's output and success flag
+
+Each task in a crunch job must make an API call to record its output and set its @success@ attribute to True. The object returned by @current_task()@ has a @set_output()@ method to make the process more succinct.
+
+<pre>
+arvados.current_task().set_output(my_output_locator)
+</pre>
+
+h3. arvados_ipc.py
+
+Manage child processes and FIFOs (pipes).
+
+
+This module makes it easier to check the exit status of every child process you start, and close the unused end of each FIFO at the appropriate time.
+
+<pre>
+from arvados_ipc import *
+
+children = {}
+pipes = {}
+
+pipe_setup(pipes, 'hellopipe')
+if 0 == named_fork(children, 'child_a'):
+    pipe_closeallbut(pipes, ('hellopipe', 'w'))
+    os.write(pipes['hellopipe', 'w'], "Hello, parent.")
+    os._exit(0)
+
+pipe_closeallbut(pipes, ('hellopipe', 'r'))
+with os.fdopen(pipes['hellopipe', 'r'], 'rb') as f:
+    message = f.read()
+    sys.stderr.write("Child says: " + message + "\n")
+
+if not waitpid_and_check_children(children):
+    raise Exception("Child process exited non-zero.")
+</pre>
+
+The "crunch scripts" included with Arvados include some more examples of using the arvados_ipc module.
+
+h2(#toolkit_wrappers). Toolkit wrappers
+
+The following *arvados-&lowast;.py* modules provide "extract, build, run" helpers to make it easy to incorporate common analysis tools in your crunch scripts.
+
+h3. arvados_bwa.py
+
+Build and run the "bwa":http://bio-bwa.sourceforge.net/bwa.shtml program.
+
+The module retrieves the bwa source code from Keep, using the job's @bwa_tbz@ parameter.
+
+<pre>
+import arvados_bwa
+arvados_bwa.run('aln', [ref_basename, '-'],
+                stdin=open(fastq_filename,'rb'),
+                stdout=open(aln_filename,'wb'))
+</pre>
+
+On qr1hi.arvadosapi.com, the source distribution @bwa-0.7.5a.tar.bz2@ is available in the collection @8b6e2c4916133e1d859c9e812861ce13+70@.
+
+<pre>
+{
+ "script_parameters":{
+  "bwa_tbz":"8b6e2c4916133e1d859c9e812861ce13+70",
+  ...
+ },
+ ...
+}
+</pre>
+
+h3. arvados_gatk2.py
+
+Extract and run the "Genome Analysis Toolkit":http://www.broadinstitute.org/gatk/ programs.
+
+The module retrieves the binary distribution tarball from Keep, using the job's @gatk_tbz@ parameter.
+
+<pre>
+arvados_gatk2.run(
+    args=[
+        '-nct', 8,
+        '-T', 'BaseRecalibrator',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', recal_file,
+        ])
+</pre>
+
+On qr1hi.arvadosapi.com, the binary distribution @GenomeAnalysisTK-2.6-4.tar.bz2@ is available in the collection @5790482512cf6d5d6dfd50b7fd61e1d1+86@.
+
+The GATK data bundle is available in the collection @d237a90bae3870b3b033aea1e99de4a9+10820@.
+
+<pre>
+{
+ "script_parameters":{
+  "gatk_tbz":"7e0a277d6d2353678a11f56bab3b13f2+87",
+  "gatk_bundle":"d237a90bae3870b3b033aea1e99de4a9+10820",
+  ...
+ },
+ ...
+}
+</pre>
+
+h3. arvados_samtools.py
+
+Build and run the "samtools":http://samtools.sourceforge.net/samtools.shtml program.
+
+
+The module retrieves the samtools source code from Keep, using the job's @samtools_tgz@ parameter.
+
+<pre>
+import arvados_samtools
+arvados_samtools.run('view', ['-S', '-b', '-'],
+                     stdin=open(sam_filename,'rb'),
+                     stdout=open(bam_filename,'wb'))
+</pre>
+
+On qr1hi.arvadosapi.com, the source distribution @samtools-0.1.19.tar.gz@ is available in the collection @c777e23cf13e5d5906abfdc08d84bfdb+74@.
+
+<pre>
+{
+ "script_parameters":{
+  "samtools_tgz":"c777e23cf13e5d5906abfdc08d84bfdb+74",
+  ...
+ },
+ ...
+}
+</pre>
+
+
+h3. arvados_picard.py
+
+Build and run the "picard":http://picard.sourceforge.net/command-line-overview.shtml program.
+
+
+The module retrieves the picard binary distribution from Keep, using the job's @picard_zip@ parameter.
+
+<pre>
+import arvados_picard
+arvados_picard.run(
+    'FixMateInformation',
+    params={
+        'i': input_bam_path,
+        'o': '/dev/stdout',
+        'quiet': 'true',
+        'so': 'coordinate',
+        'validation_stringency': 'LENIENT',
+        'compression_level': 0
+        },
+    stdout=open('out.bam','wb'))
+</pre>
+
+On qr1hi.arvadosapi.com, the binary distribution @picard-tools-1.82.zip@ is available in the collection @687f74675c6a0e925dec619cc2bec25f+77@.
+
+<pre>
+{
+ "script_parameters":{
+  "picard_zip":"687f74675c6a0e925dec619cc2bec25f+77",
+  ...
+ },
+ ...
+}
+</pre>
diff --git a/doc/sdk/python/events.html.textile.liquid b/doc/sdk/python/events.html.textile.liquid
new file mode 100644 (file)
index 0000000..afbec20
--- /dev/null
@@ -0,0 +1,29 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Subscribing to events
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados applications can subscribe to a live event stream from the database.  Events are described in the "Log resource.":{{site.baseurl}}/api/methods/logs.html
+
+{% codeblock as python %}
+#!/usr/bin/env python
+
+import arvados
+import arvados.events
+
+# 'ev' is a dict containing the log table record describing the change.
+def on_message(ev):
+    if ev.get("event_type") == "create" and ev.get("object_kind") == "arvados#collection":
+        print "A new collection was created: %s" % ev["object_uuid"]
+
+api = arvados.api("v1")
+ws = arvados.events.subscribe(api, [], on_message)
+ws.run_forever()
+{% endcodeblock %}
diff --git a/doc/sdk/python/example.html.textile.liquid b/doc/sdk/python/example.html.textile.liquid
new file mode 100644 (file)
index 0000000..504d078
--- /dev/null
@@ -0,0 +1,56 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Examples
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+In these examples, the site prefix is @aaaaa@.
+
+h2.  Initialize SDK
+
+{% codeblock as python %}
+import arvados
+api = arvados.api("v1")
+{% endcodeblock %}
+
+h2. create
+
+{% codeblock as python %}
+result = api.collections().create(body={"collection": {"name": "create example"}}).execute()
+{% endcodeblock %}
+
+h2. delete
+
+{% codeblock as python %}
+result = api.collections().delete(uuid="aaaaa-4zz18-ccccccccccccccc").execute()
+{% endcodeblock %}
+
+h2. get
+
+{% codeblock as python %}
+result = api.collections().get(uuid="aaaaa-4zz18-ccccccccccccccc").execute()
+{% endcodeblock %}
+
+h2. list
+
+{% codeblock as python %}
+result = api.collections().list(filters=[["uuid", "=", "aaaaa-bbbbb-ccccccccccccccc"]]).execute()
+{% endcodeblock %}
+
+h2. update
+
+{% codeblock as python %}
+result = api.collections().update(uuid="aaaaa-4zz18-ccccccccccccccc", body={"collection": {"name": "update example"}}).execute()
+{% endcodeblock %}
+
+h2. Get current user
+
+{% codeblock as python %}
+result = api.users().current().execute()
+{% endcodeblock %}
diff --git a/doc/sdk/python/python.html.textile.liquid b/doc/sdk/python/python.html.textile.liquid
new file mode 100644 (file)
index 0000000..8ba2dc7
--- /dev/null
@@ -0,0 +1,15 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "PyDoc Reference"
+
+no_nav_left: true
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+notextile. <iframe src="arvados/" style="width:100%; height:100%; border:none" />
diff --git a/doc/sdk/python/sdk-python.html.textile.liquid b/doc/sdk/python/sdk-python.html.textile.liquid
new file mode 100644 (file)
index 0000000..be82439
--- /dev/null
@@ -0,0 +1,174 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Python SDK provides access from Python to the Arvados API and Keep.  It also includes a number of command line tools for using and administering Arvados and Keep, and some conveniences for use in Crunch scripts; see "Crunch utility libraries":crunch-utility-libraries.html for details.
+
+h2. Installation
+
+If you are logged in to an Arvados VM, the Python SDK should be installed.
+
+To use the Python SDK elsewhere, you can install from PyPI or a distribution package.
+
+{% include 'notebox_begin' %}
+The Python SDK requires Python 2.7.
+{% include 'notebox_end' %}
+
+h3. Option 1: Install with pip
+
+This installation method is recommended to make the SDK available for use in your own Python programs. It can coexist with the system-wide installation method from a distribution package (option 2, below).
+
+Run @pip-2.7 install arvados-python-client@ in an appropriate installation environment, such as a virtualenv.
+
+If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip-2.7 install --pre arvados-python-client@.
+
+h3. Option 2: Install from a distribution package
+
+This installation method is recommended to make the CLI tools available system-wide. It can coexist with the installation method described in option 1, above.
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install python-arvados-python-client</code>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client</code>
+</code></pre>
+</notextile>
+
+h3. Test installation
+
+If the SDK is installed and your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), @import arvados@ should produce no errors.
+
+If you installed with pip (option 1, above):
+
+<notextile>
+<pre>~$ <code class="userinput">python</code>
+Python 2.7.4 (default, Sep 26 2013, 03:20:26)
+[GCC 4.7.3] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>> <code class="userinput">import arvados</code>
+>>> <code class="userinput">arvados.api('v1')</code>
+&lt;apiclient.discovery.Resource object at 0x233bb50&gt;
+</pre>
+</notextile>
+
+If you installed from a distribution package (option 2): the package includes a virtualenv, which means the correct Python environment needs to be loaded before the Arvados SDK can be imported. This can be done by activating the virtualenv first:
+
+<notextile>
+<pre>~$ <code class="userinput">source /usr/share/python2.7/dist/python-arvados-python-client/bin/activate</code>
+(python-arvados-python-client) ~$ <code class="userinput">python</code>
+Python 2.7.4 (default, Sep 26 2013, 03:20:26)
+[GCC 4.7.3] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>> <code class="userinput">import arvados</code>
+>>> <code class="userinput">arvados.api('v1')</code>
+&lt;apiclient.discovery.Resource object at 0x233bb50&gt;
+</pre>
+</notextile>
+
+Or alternatively, by using the Python executable from the virtualenv directly:
+
+<notextile>
+<pre>~$ <code class="userinput">/usr/share/python2.7/dist/python-arvados-python-client/bin/python</code>
+Python 2.7.4 (default, Sep 26 2013, 03:20:26)
+[GCC 4.7.3] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>> <code class="userinput">import arvados</code>
+>>> <code class="userinput">arvados.api('v1')</code>
+&lt;apiclient.discovery.Resource object at 0x233bb50&gt;
+</pre>
+</notextile>
+
+h3. Examples
+
+Get the User object for the current user:
+
+<notextile>
+<pre><code class="userinput">current_user = arvados.api('v1').users().current().execute()
+</code></pre>
+</notextile>
+
+Get the UUID of an object that was retrieved using the SDK:
+
+<notextile>
+<pre><code class="userinput">my_uuid = current_user['uuid']
+</code></pre>
+</notextile>
+
+Retrieve an object by ID:
+
+<notextile>
+<pre><code class="userinput">some_user = arvados.api('v1').users().get(uuid=my_uuid).execute()
+</code></pre>
+</notextile>
+
+Create an object:
+
+<notextile>
+<pre><code class="userinput">test_link = arvados.api('v1').links().create(
+    body={'link_class':'test','name':'test'}).execute()
+</code></pre>
+</notextile>
+
+Update an object:
+
+<notextile>
+<pre><code class="userinput">arvados.api('v1').links().update(
+    uuid=test_link['uuid'],
+    body={'properties':{'foo':'bar'}}).execute()
+</code></pre>
+</notextile>
+
+Get a list of objects:
+
+<notextile>
+<pre><code class="userinput">repos = arvados.api('v1').repositories().list().execute()
+len(repos['items'])</code>
+2
+<code class="userinput">repos['items'][0]['uuid']</code>
+u'qr1hi-s0uqq-kg8cawglrf74bmw'
+</code></pre>
+</notextile>
+
+h3. Notes
+
+The general form of an API call is:
+
+<notextile>
+<pre><code class="userinput">arvados.api(<i>api_version</i>).<i>plural_resource_type</i>().<i>api_method</i>(<i>parameter</i>=<i>value</i>, ...).execute()
+</code></pre>
+</notextile>
+
+Many API methods accept a parameter whose name is the same as the resource type. For example, @links.create@ accepts a parameter called @link@. This parameter should be given as @body@.
+
+<notextile>
+<pre><code class="userinput">arvados.api('v1').links().create(
+    uuid=test_link['uuid'],
+    body={'properties':{'foo':'bar'}}).execute()
+</code></pre>
+</notextile>
+
+One way to make API calls slightly less verbose is:
+
+<notextile>
+<pre><code class="userinput">arv = arvados.api('v1')
+j = arv.jobs().list().execute()
+</code></pre>
+</notextile>
+
+The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
diff --git a/doc/sdk/ruby/example.html.textile.liquid b/doc/sdk/ruby/example.html.textile.liquid
new file mode 100644 (file)
index 0000000..b8c0dcb
--- /dev/null
@@ -0,0 +1,81 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Examples
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2.  Initialize SDK
+
+Import the module and set up an API client user agent:
+
+{% codeblock as ruby %}
+require 'arvados'
+arv = Arvados.new(apiVersion: 'v1')
+{% endcodeblock %}
+
+The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
+
+h2. create
+
+Create an object:
+
+{% codeblock as ruby %}
+new_link = arv.link.create(link: {link_class: 'test', name: 'test'})
+{% endcodeblock %}
+
+h2. delete
+
+Delete an object:
+
+{% codeblock as ruby %}
+arv.link.delete(uuid: new_link[:uuid])
+{% endcodeblock %}
+
+h2. get
+
+Retrieve an object by ID:
+
+{% codeblock as ruby %}
+some_user = arv.user.get(uuid: current_user_uuid)
+{% endcodeblock %}
+
+h2. list
+
+Get a list of objects:
+
+{% codeblock as ruby %}
+repos = arv.repository.list
+first_repo = repos[:items][0]
+puts "UUID of first repo returned is #{first_repo[:uuid]}"</code>
+{% endcodeblock %}
+
+UUID of first repo returned is qr1hi-s0uqq-b1bnybpx3u5temz
+
+h2. update
+
+Update an object:
+
+{% codeblock as ruby %}
+updated_link = arv.link.update(uuid: new_link[:uuid],
+                               link: {properties: {foo: 'bar'}})
+{% endcodeblock %}
+
+h2. Get current user
+
+Get the User object for the current user:
+
+{% codeblock as ruby %}
+current_user = arv.user.current
+{% endcodeblock %}
+
+Get the UUID of an object that was retrieved using the SDK:
+
+{% codeblock as ruby %}
+current_user_uuid = current_user[:uuid]
+{% endcodeblock %}
diff --git a/doc/sdk/ruby/index.html.textile.liquid b/doc/sdk/ruby/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..ec7bfaa
--- /dev/null
@@ -0,0 +1,58 @@
+---
+layout: default
+navsection: sdk
+navmenu: Ruby
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Ruby SDK provides a generic set of wrappers so you can make API calls easily.
+
+h3. Installation
+
+If you are logged in to an Arvados VM, the Ruby SDK should be installed.
+
+To use it elsewhere, you can either install the @arvados@ gem via RubyGems or build and install the package using the arvados source tree.
+
+h4. Prerequisites: Ruby &gt;= 2.0.0
+
+You can use "RVM":http://rvm.io/rvm/install to install and manage Ruby versions.
+
+h4. Option 1: install with RubyGems
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo -i gem install arvados</code>
+</pre>
+</notextile>
+
+h4. Option 2: build and install from source
+
+<notextile>
+<pre>
+$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+$ <code class="userinput">cd arvados/sdk/ruby</code>
+$ <code class="userinput">gem build arvados.gemspec</code>
+$ <code class="userinput">sudo -i gem install arvados-*.gem</code>
+</pre>
+</notextile>
+
+h4. Test installation
+
+If the SDK is installed, @ruby -r arvados -e 'puts "OK!"'@ should produce no errors.
+
+If your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), the following test script should work:
+
+<notextile>
+<pre>$ <code class="userinput">ruby -r arvados &lt;&lt;'EOF'
+arv = Arvados.new api_version: 'v1'
+my_full_name = arv.user.current[:full_name]
+puts "arvados.v1.users.current.full_name = '#{my_full_name}'"
+EOF</code>
+arvados.v1.users.current.full_name = 'Your Name'
+</pre>
+</notextile>
diff --git a/doc/start/getting_started/firstpipeline.html.textile.liquid b/doc/start/getting_started/firstpipeline.html.textile.liquid
new file mode 100644 (file)
index 0000000..43369a3
--- /dev/null
@@ -0,0 +1,94 @@
+---
+layout: default
+navsection: start 
+title: Run your first pipeline in minutes
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. LobSTR v3 
+
+In this quickstart guide, we'll run an existing pipeline with pre-existing data. Step-by-step instructions are shown below. You can follow along using your own local install or by using the <a href="https://playground.arvados.org/">Arvados Playground</a> (any Google account can be used to log in).
+
+(For more information about this pipeline, see our <a href="https://dev.arvados.org/projects/arvados/wiki/LobSTR_tutorial">detailed lobSTR guide</a>).
+
+<div id="carousel-firstpipe" class="carousel slide" data-interval="false">
+  <!-- Indicators -->
+  <ol class="carousel-indicators">
+    <li data-target="#carousel-firstpipe" data-slide-to="0" class="active"></li>
+    <li data-target="#carousel-firstpipe" data-slide-to="1"></li>
+    <li data-target="#carousel-firstpipe" data-slide-to="2"></li>
+    <li data-target="#carousel-firstpipe" data-slide-to="3"></li>
+    <li data-target="#carousel-firstpipe" data-slide-to="4"></li>
+    <li data-target="#carousel-firstpipe" data-slide-to="5"></li>
+    <li data-target="#carousel-firstpipe" data-slide-to="6"></li>
+  </ol>
+
+  <!-- Wrapper for slides -->
+  <div class="carousel-inner" role="listbox">
+    <div class="item active">
+      <img src="{{ site.baseurl }}/images/quickstart/1.png" alt="Step 1. At the dashboard, click 'Run a pipeline...'.">
+      <div class="carousel-caption">
+        Step 1. At the dashboard, click 'Run a pipeline...'.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/quickstart/2.png" alt="Choose 'lobstr v.3' and hit 'Next'.">
+      <div class="carousel-caption">
+        Choose 'lobstr v.3' and hit 'Next'.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/quickstart/3.png" alt="Rename the pipeline instance, then click 'Run'. Click 'Choose' to change the default inputs.">
+      <div class="carousel-caption">
+        Rename the pipeline instance, then click 'Run'. Click 'Choose' to change the default inputs.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/quickstart/4.png" alt="Here we search for and choose new inputs.">
+      <div class="carousel-caption">
+        Here we search for and choose new inputs.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/quickstart/5.png" alt="After the job completes, you can re-run it with one click.">
+      <div class="carousel-caption">
+        After the job completes, you can re-run it with one click.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/quickstart/6.png" alt="You can inspect details about the pipeline which are automatically logged.">
+      <div class="carousel-caption">
+        You can inspect automatically-logged details about the pipeline.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/quickstart/7.png" alt="Click 'Create sharing link' to share the output files with people outside Arvados. [END]">
+      <div class="carousel-caption">
+        Click 'Create sharing link' to share the output files with people outside Arvados. [END]
+      </div>
+    </div>
+
+  </div>
+
+  <!-- Controls -->
+  <a class="left carousel-control" href="#carousel-firstpipe" role="button" data-slide="prev">
+    <span class="glyphicon glyphicon-chevron-left" aria-hidden="true"></span>
+    <span class="sr-only">Previous</span>
+  </a>
+  <a class="right carousel-control" href="#carousel-firstpipe" role="button" data-slide="next">
+    <span class="glyphicon glyphicon-chevron-right" aria-hidden="true"></span>
+    <span class="sr-only">Next</span>
+  </a>
+</div>
+
+Tip: You may need to make your browser window bigger to see full-size images in the gallery above.
diff --git a/doc/start/getting_started/nextsteps.html.textile.liquid b/doc/start/getting_started/nextsteps.html.textile.liquid
new file mode 100644 (file)
index 0000000..dd059ea
--- /dev/null
@@ -0,0 +1,12 @@
+---
+layout: default
+navsection: start 
+title: Check out the User Guide 
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Now that you've finished the Getting Started guide, check out the "User Guide":{{site.baseurl}}/user/index.html. The User Guide goes into more depth than the Getting Started guide, covers how to develop your own pipelines in addition to using pre-existing pipelines, covers the Arvados command line tools in addition to the Workbench graphical interface to Arvados, and can be referenced in any order.
diff --git a/doc/start/getting_started/publicproject.html.textile.liquid b/doc/start/getting_started/publicproject.html.textile.liquid
new file mode 100644 (file)
index 0000000..0fabad7
--- /dev/null
@@ -0,0 +1,133 @@
+---
+layout: default
+navsection: start
+title: Visit an Arvados Public Project
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. <a href="https://workbench.qr1hi.arvadosapi.com/projects/qr1hi-j7d0g-662ij1pcw6bj8uj">Mason Lab - Pathomap / Ancestry Mapper (Public)</a>
+
+You can see Arvados in action by accessing the <a href="https://workbench.qr1hi.arvadosapi.com/projects/qr1hi-j7d0g-662ij1pcw6bj8uj">Mason Lab - Pathomap / Ancestry Mapper (Public) project</a>. By visiting this project, you can see what an Arvados project is, access data collections in this project, and click through a pipeline instance's contents.
+
+You will be accessing this project in read-only mode and will not be able to make any modifications such as running a new pipeline instance.
+
+<div id="carousel-publicproject" class="carousel slide" data-interval="false">
+  <!-- Indicators -->
+  <ol class="carousel-indicators">
+    <li data-target="#carousel-publicproject" data-slide-to="0" class="active"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="1"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="2"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="3"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="4"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="5"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="6"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="7"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="8"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="9"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="10"></li>
+    <li data-target="#carousel-publicproject" data-slide-to="11"></li>
+  </ol>
+
+  <!-- Wrapper for slides -->
+  <div class="carousel-inner" role="listbox">
+    <div class="item active">
+      <img src="{{ site.baseurl }}/images/publicproject/description.png" alt="Step 1. The project's first tab, *Description*, describes what this project is all about.">
+      <div class="carousel-caption">
+        Step 1. The project's first tab, *Description*, describes what this project is all about.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/collections.png" alt="The *Data collections* tab contains the various pipeline inputs, logs, and outputs.">
+      <div class="carousel-caption">
+        The *Data collections* tab contains the various pipeline inputs, logs, and outputs.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instances.png" alt="You can see the jobs and pipelines in this project by accessing the *Jobs and pipelines* tab.">
+      <div class="carousel-caption">
+        You can see the jobs and pipelines in this project by accessing the *Jobs and pipelines* tab.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/collection-show.png" alt="In the *Data collections* tab, click on the *Show* icon to the left of a collection to see the collection contents.">
+      <div class="carousel-caption">
+        In the *Data collections* tab, click on the *Show* icon to the left of a collection to see the collection contents.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/collection-files.png" alt="The collection page lists the details about it. The *Files* tab can be used to view and download individual files in it.">
+      <div class="carousel-caption">
+        The collection page lists the details about it. The *Files* tab can be used to view and download individual files in it.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/collection-graph.png" alt="The collection *Provenance graph* tab gives a visual representation of this collection's provenance.">
+      <div class="carousel-caption">
+        The collection *Provenance graph* tab gives a visual representation of this collection's provenance.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instance-show.png" alt="In the project *Jobs and pipelines* tab, click on the *Show* icon to the left of a pipeline to access the pipeline contents.">
+      <div class="carousel-caption">
+        In the project *Jobs and pipelines* tab, click on the *Show* icon to the left of a pipeline to access the pipeline contents.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instance-components.png" alt="The pipeline *Components* tab details the various jobs in it and how long it took to run it.">
+      <div class="carousel-caption">
+        The pipeline *Components* tab details the various jobs in it and how long it took to run it.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instance-job.png" alt="Click on the down arrow in one of the job rows to see the job details. You can also click on the job's output.">
+      <div class="carousel-caption">
+        Click on the down arrow <i class="fa fa-lg fa-fw fa-caret-down"></i> in one of the job rows to see the job details. You can also click on the job's output.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instance-log.png" alt="The *Log* tab can be used to see the log for the pipeline instance.">
+      <div class="carousel-caption">
+        The *Log* tab can be used to see the log for the pipeline instance.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instance-graph.png" alt="The *Graph* tab provides a visual representation of the pipeline run.">
+      <div class="carousel-caption">
+        The *Graph* tab provides a visual representation of the pipeline run.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/publicproject/instance-advanced.png" alt="The *Advanced* tab can be used to access metadata about the pipeline. [END]">
+      <div class="carousel-caption">
+        The *Advanced* tab can be used to access metadata about the pipeline. [END]
+      </div>
+    </div>
+  </div>
+
+  <!-- Controls -->
+  <a class="left carousel-control" href="#carousel-publicproject" role="button" data-slide="prev">
+    <span class="glyphicon glyphicon-chevron-left" aria-hidden="true"></span>
+    <span class="sr-only">Previous</span>
+  </a>
+  <a class="right carousel-control" href="#carousel-publicproject" role="button" data-slide="next">
+    <span class="glyphicon glyphicon-chevron-right" aria-hidden="true"></span>
+    <span class="sr-only">Next</span>
+  </a>
+</div>
+
+Tip: You may need to make your browser window bigger to see full-size images in the gallery above.
diff --git a/doc/start/getting_started/sharedata.html.textile.liquid b/doc/start/getting_started/sharedata.html.textile.liquid
new file mode 100644 (file)
index 0000000..02e0b70
--- /dev/null
@@ -0,0 +1,102 @@
+---
+layout: default
+navsection: start 
+title: Sharing Data 
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+You can easily share data entirely through Workbench, the web interface to Arvados.
+
+h2. Upload and share your existing data
+
+Step-by-step instructions are shown below.
+
+<div id="carousel-sharedata" class="carousel slide" data-interval="false">
+  <!-- Indicators -->
+  <ol class="carousel-indicators">
+    <li data-target="#carousel-sharedata" data-slide-to="0" class="active"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="1"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="2"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="3"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="4"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="5"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="6"></li>
+    <li data-target="#carousel-sharedata" data-slide-to="7"></li>
+  </ol>
+
+  <!-- Wrapper for slides -->
+  <div class="carousel-inner" role="listbox">
+    <div class="item active">
+      <img src="{{ site.baseurl }}/images/uses/gotohome.png" alt="Step 1. From the dashboard, go to your Home project.">
+      <div class="carousel-caption">
+        Step 1. From the dashboard, go to your Home project.
+      </div>
+    </div>
+
+    <div class="item">
+    <img src="{{ site.baseurl }}/images/uses/uploaddata.png" alt="Click 'Add data' &rarr; 'Upload files'.">
+      <div class="carousel-caption">
+        Click 'Add data' &rarr; 'Upload files'.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/uses/choosefiles.png" alt="A new collection is created automatically. Choose files to upload and hit Start.">
+      <div class="carousel-caption">
+        A new collection is created automatically. Choose files to upload and hit Start.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/uses/uploading.png" alt="Files will upload and stay uploaded even if the browser is closed.">
+      <div class="carousel-caption">
+        Files will upload and stay uploaded even if the browser is closed.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/uses/rename.png" alt="Rename the collection appropriately.">
+      <div class="carousel-caption">
+        Rename the collection appropriately.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/uses/sharing.png" alt="Click 'Create sharing link'. You can click 'unshare' at any later point.">
+      <div class="carousel-caption">
+        Click 'Create sharing link'. You can click 'Unshare' at any later point.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/uses/shared.png" alt="Now just share this link with anyone you want.">
+      <div class="carousel-caption">
+        Now just share this link with anyone you want.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/uses/sharedsubdirs.png" alt="Here's a more complex collection. [END]">
+      <div class="carousel-caption">
+        Here's a more complex collection. [END]
+      </div>
+    </div>
+
+  </div>
+
+  <!-- Controls -->
+  <a class="left carousel-control" href="#carousel-sharedata" role="button" data-slide="prev">
+    <span class="glyphicon glyphicon-chevron-left" aria-hidden="true"></span>
+    <span class="sr-only">Previous</span>
+  </a>
+  <a class="right carousel-control" href="#carousel-sharedata" role="button" data-slide="next">
+    <span class="glyphicon glyphicon-chevron-right" aria-hidden="true"></span>
+    <span class="sr-only">Next</span>
+  </a>
+</div>
+
+Tip: You may need to make your browser window bigger to see full-size images in the gallery above.
diff --git a/doc/start/index.html.textile.liquid b/doc/start/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..cddfb8e
--- /dev/null
@@ -0,0 +1,133 @@
+---
+layout: default
+navsection: start 
+title: Welcome to Arvados!
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This guide provides an introduction to using Arvados to solve big data bioinformatics problems.
+
+h2. What is Arvados?
+
+Arvados is a free and open source bioinformatics platform for genomic and biomedical data.
+
+We address the needs of IT directors, lab principals, and bioinformaticians.
+
+h2. Why use Arvados?
+
+Arvados enables you to quickly begin using cloud computing resources in your bioinformatics work. It allows you to track your methods and datasets, share them securely, and easily re-run analyses.
+
+h3. Take a look (Screenshots gallery) 
+
+<div id="carousel-keyfeatures" class="carousel slide" data-interval="false">
+  <!-- Indicators -->
+  <ol class="carousel-indicators">
+    <li data-target="#carousel-keyfeatures" data-slide-to="0" class="active"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="1"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="2"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="3"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="4"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="5"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="6"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="7"></li>
+    <li data-target="#carousel-keyfeatures" data-slide-to="8"></li>
+  </ol>
+
+  <!-- Wrapper for slides -->
+  <div class="carousel-inner" role="listbox">
+    <div class="item active">
+      <img src="{{ site.baseurl }}/images/keyfeatures/dashboard2.png" alt="[START] After logging in, you will see Workbench's dashboard.">
+      <div class="carousel-caption">
+        [START] After logging in, you will see Workbench's dashboard.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/running2.png" alt="Pipelines describe a set of computational tasks (jobs).">
+      <div class="carousel-caption">
+        Pipelines describe a set of computational tasks (jobs).
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/log.png" alt="The output of all jobs is logged and stored automatically.">
+      <div class="carousel-caption">
+        The output of all jobs is logged and stored automatically.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/graph.png" alt="Pipelines can also be viewed in auto-generated graph form.">
+      <div class="carousel-caption">
+        Pipelines can also be viewed in auto-generated graph form.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/rerun.png" alt="Pipelines can easily be re-run exactly as before, or...">
+      <div class="carousel-caption">
+        Pipelines can easily be re-run exactly as before, or...
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/chooseinputs.png" alt="...you can change parameters or pick new datasets.">
+      <div class="carousel-caption">
+        ...you can change parameters or pick new datasets.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/webupload.png" alt="With web upload, data can be uploaded right in Workbench.">
+      <div class="carousel-caption">
+        With web upload, data can be uploaded right in Workbench.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/collectionpage.png" alt="Collections allow sharing datasets and job outputs easily. 'Create sharing link' with one click.">
+      <div class="carousel-caption">
+        Collections allow sharing datasets and job outputs easily. 'Create sharing link' with one click.
+      </div>
+    </div>
+
+    <div class="item">
+      <img src="{{ site.baseurl }}/images/keyfeatures/provenance.png" alt="Data provenance is tracked automatically. [END]">
+      <div class="carousel-caption">
+        Data provenance is tracked automatically. [END]
+      </div>
+    </div>
+
+
+  </div>
+
+  <!-- Controls -->
+  <a class="left carousel-control" href="#carousel-keyfeatures" role="button" data-slide="prev">
+    <span class="glyphicon glyphicon-chevron-left" aria-hidden="true"></span>
+    <span class="sr-only">Previous</span>
+  </a>
+  <a class="right carousel-control" href="#carousel-keyfeatures" role="button" data-slide="next">
+    <span class="glyphicon glyphicon-chevron-right" aria-hidden="true"></span>
+    <span class="sr-only">Next</span>
+  </a>
+</div>
+
+Note: Workbench is the web interface to Arvados.
+Tip: You may need to make your browser window bigger to see full-size images in the gallery above.
+
+h3. Key Features
+
+<ul>
+<li><strong>Track your methods</strong><br/>
+We log every compute job: software versions, machine images, input and output data hashes. Rely on a computer, not your memory and your note-taking skills.<br/><br/></li>
+<li><strong>Share your methods</strong><br/>
+Show other people what you did. Let them use your workflow on their own data. Publish a permalink to your methods and data, so others can reproduce and build on them easily.<br/><br/></li>
+<li><strong>Track data origin</strong><br/>
+Did you really only use fully consented public data in this analysis?<br/><br/></li>
+<li><strong>Get results sooner</strong><br/>
+Run your compute jobs faster by using multi-nodes and multi-cores, even if your programs are single-threaded.<br/><br/></li>
+</ul>
diff --git a/doc/user/composer/c1.png b/doc/user/composer/c1.png
new file mode 100644 (file)
index 0000000..6e89aa0
Binary files /dev/null and b/doc/user/composer/c1.png differ
diff --git a/doc/user/composer/c10.png b/doc/user/composer/c10.png
new file mode 100644 (file)
index 0000000..1bca579
Binary files /dev/null and b/doc/user/composer/c10.png differ
diff --git a/doc/user/composer/c11.png b/doc/user/composer/c11.png
new file mode 100644 (file)
index 0000000..4d64476
Binary files /dev/null and b/doc/user/composer/c11.png differ
diff --git a/doc/user/composer/c12.png b/doc/user/composer/c12.png
new file mode 100644 (file)
index 0000000..f192ab7
Binary files /dev/null and b/doc/user/composer/c12.png differ
diff --git a/doc/user/composer/c13.png b/doc/user/composer/c13.png
new file mode 100644 (file)
index 0000000..7ba72dc
Binary files /dev/null and b/doc/user/composer/c13.png differ
diff --git a/doc/user/composer/c14.png b/doc/user/composer/c14.png
new file mode 100644 (file)
index 0000000..f7d446b
Binary files /dev/null and b/doc/user/composer/c14.png differ
diff --git a/doc/user/composer/c15.png b/doc/user/composer/c15.png
new file mode 100644 (file)
index 0000000..54fa54d
Binary files /dev/null and b/doc/user/composer/c15.png differ
diff --git a/doc/user/composer/c16.png b/doc/user/composer/c16.png
new file mode 100644 (file)
index 0000000..bbdd65a
Binary files /dev/null and b/doc/user/composer/c16.png differ
diff --git a/doc/user/composer/c17.png b/doc/user/composer/c17.png
new file mode 100644 (file)
index 0000000..5706e61
Binary files /dev/null and b/doc/user/composer/c17.png differ
diff --git a/doc/user/composer/c18.png b/doc/user/composer/c18.png
new file mode 100644 (file)
index 0000000..fc2b736
Binary files /dev/null and b/doc/user/composer/c18.png differ
diff --git a/doc/user/composer/c19.png b/doc/user/composer/c19.png
new file mode 100644 (file)
index 0000000..97202cd
Binary files /dev/null and b/doc/user/composer/c19.png differ
diff --git a/doc/user/composer/c2.png b/doc/user/composer/c2.png
new file mode 100644 (file)
index 0000000..89fdf33
Binary files /dev/null and b/doc/user/composer/c2.png differ
diff --git a/doc/user/composer/c20.png b/doc/user/composer/c20.png
new file mode 100644 (file)
index 0000000..df31c9c
Binary files /dev/null and b/doc/user/composer/c20.png differ
diff --git a/doc/user/composer/c21.png b/doc/user/composer/c21.png
new file mode 100644 (file)
index 0000000..cc3f928
Binary files /dev/null and b/doc/user/composer/c21.png differ
diff --git a/doc/user/composer/c22.png b/doc/user/composer/c22.png
new file mode 100644 (file)
index 0000000..9c7781f
Binary files /dev/null and b/doc/user/composer/c22.png differ
diff --git a/doc/user/composer/c23.png b/doc/user/composer/c23.png
new file mode 100644 (file)
index 0000000..f5be591
Binary files /dev/null and b/doc/user/composer/c23.png differ
diff --git a/doc/user/composer/c24.png b/doc/user/composer/c24.png
new file mode 100644 (file)
index 0000000..b544356
Binary files /dev/null and b/doc/user/composer/c24.png differ
diff --git a/doc/user/composer/c2b.png b/doc/user/composer/c2b.png
new file mode 100644 (file)
index 0000000..39acd60
Binary files /dev/null and b/doc/user/composer/c2b.png differ
diff --git a/doc/user/composer/c2c.png b/doc/user/composer/c2c.png
new file mode 100644 (file)
index 0000000..931181c
Binary files /dev/null and b/doc/user/composer/c2c.png differ
diff --git a/doc/user/composer/c3.png b/doc/user/composer/c3.png
new file mode 100644 (file)
index 0000000..3e650c2
Binary files /dev/null and b/doc/user/composer/c3.png differ
diff --git a/doc/user/composer/c4.png b/doc/user/composer/c4.png
new file mode 100644 (file)
index 0000000..0f706a0
Binary files /dev/null and b/doc/user/composer/c4.png differ
diff --git a/doc/user/composer/c5.png b/doc/user/composer/c5.png
new file mode 100644 (file)
index 0000000..aaff6f5
Binary files /dev/null and b/doc/user/composer/c5.png differ
diff --git a/doc/user/composer/c6.png b/doc/user/composer/c6.png
new file mode 100644 (file)
index 0000000..9275d86
Binary files /dev/null and b/doc/user/composer/c6.png differ
diff --git a/doc/user/composer/c7.png b/doc/user/composer/c7.png
new file mode 100644 (file)
index 0000000..2d77fe2
Binary files /dev/null and b/doc/user/composer/c7.png differ
diff --git a/doc/user/composer/c8.png b/doc/user/composer/c8.png
new file mode 100644 (file)
index 0000000..1620887
Binary files /dev/null and b/doc/user/composer/c8.png differ
diff --git a/doc/user/composer/c9.png b/doc/user/composer/c9.png
new file mode 100644 (file)
index 0000000..43b1210
Binary files /dev/null and b/doc/user/composer/c9.png differ
diff --git a/doc/user/composer/composer.html.textile.liquid b/doc/user/composer/composer.html.textile.liquid
new file mode 100644 (file)
index 0000000..e8ef0b6
--- /dev/null
@@ -0,0 +1,119 @@
+---
+layout: default
+navsection: userguide
+title: Create a Workflow with Composer
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados Workflow Composer is a graphical interface for building Common Workflow Language (CWL) workflows to run on Arvados.
+
+This tutorial will demonstrate:
+
+# Creating a new git repository through Arvados to store the workflow
+# Creating CommandLineTools for "sort" and "uniq"
+# Creating a Workflow which uses "sort" and "uniq" to remove duplicate lines from a text file
+# Submitting the Workflow to run on Arvados
+
+h3. 1. Access from workbench
+
+!(screenshot)c1.png!
+
+h3. 2. Composer starting page
+
+!(screenshot)c2.png!
+
+h3. 3. Manage git repositories (opens Workbench in new tab)
+
+!(screenshot)c2b.png!
+
+h3. 4. Add a new repository
+
+!(screenshot)c4.png!
+
+!(screenshot)c3.png!
+
+h3. 5. Return to Composer.  Use refresh button to discover new repository (may take a few moments to show up).
+
+!(screenshot)c2c.png!
+
+h3. 6. Create a new Command Line Tool
+
+!(screenshot)c5.png!
+
+!(screenshot)c20.png!
+
+h3. 7. Set Docker image, base command, and input port for "sort" tool
+
+The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:8@)  You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
+
+!(screenshot)c6.png!
+
+h3. 8. Redirect stdout to a file
+
+!(screenshot)c7.png!
+
+h3. 9. Capture output file
+
+!(screenshot)c8.png!
+
+h3. 10. Save Command Line Tool
+
+!(screenshot)c22.png!
+
+h3. 11. Repeat steps 6-10 for "uniq" tool
+
+Create a new tool with a "base command" of "uniq".
+
+h3. 12. Switch back to "Home" tab and create workflow
+
+!(screenshot)c24.png!
+
+!(screenshot)c9.png!
+
+!(screenshot)c10.png!
+
+h3. 13. Drag and drop tools into Workflow
+
+!(screenshot)c11.png!
+
+h3. 14. Drag from input port of "sort" to empty space to create workflow input
+
+!(screenshot)c21.png!
+
+h3. 15. Drag from output port of "sort" to input port of "uniq"
+
+!(screenshot)c13.png!
+
+h3. 16. Drag from output port of "uniq" to empty space to create workflow output
+
+!(screenshot)c14.png!
+
+h3. 17. Save Workflow
+
+!(screenshot)c23.png!
+
+h3. 18. Click on "Test" tab then click "Run"
+
+!(screenshot)c15.png!
+
+h3. 19. Choose input file
+
+You may need to "upload an input file":{{site.baseurl}}/user/tutorials/tutorial-keep.html
+
+!(screenshot)c16.png!
+
+h3. 20. Run the workflow
+
+!(screenshot)c17.png!
+
+h3. 21. Monitor progress (may take several minutes)
+
+!(screenshot)c18.png!
+
+h3. 22. Get workflow output
+
+!(screenshot)c19.png!
diff --git a/doc/user/copying/LICENSE-2.0.html b/doc/user/copying/LICENSE-2.0.html
new file mode 100644 (file)
index 0000000..129916f
--- /dev/null
@@ -0,0 +1,182 @@
+---
+layout: default
+navsection: userguide
+title: "Apache License"
+...
+
+<div id="content" class="grid_16"><div class="section-content"></br>Version 2.0, January 2004<br></br>
+<a href="http://www.apache.org/licenses/">http://www.apache.org/licenses/</a> </p>
+<p>TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION</p>
+<p><strong><a name="definitions">1. Definitions</a></strong>.</p>
+<p>"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.</p>
+<p>"Licensor" shall mean the copyright owner or entity authorized by the
+copyright owner that is granting the License.</p>
+<p>"Legal Entity" shall mean the union of the acting entity and all other
+entities that control, are controlled by, or are under common control with
+that entity. For the purposes of this definition, "control" means (i) the
+power, direct or indirect, to cause the direction or management of such
+entity, whether by contract or otherwise, or (ii) ownership of fifty
+percent (50%) or more of the outstanding shares, or (iii) beneficial
+ownership of such entity.</p>
+<p>"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.</p>
+<p>"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation source,
+and configuration files.</p>
+<p>"Object" form shall mean any form resulting from mechanical transformation
+or translation of a Source form, including but not limited to compiled
+object code, generated documentation, and conversions to other media types.</p>
+<p>"Work" shall mean the work of authorship, whether in Source or Object form,
+made available under the License, as indicated by a copyright notice that
+is included in or attached to the work (an example is provided in the
+Appendix below).</p>
+<p>"Derivative Works" shall mean any work, whether in Source or Object form,
+that is based on (or derived from) the Work and for which the editorial
+revisions, annotations, elaborations, or other modifications represent, as
+a whole, an original work of authorship. For the purposes of this License,
+Derivative Works shall not include works that remain separable from, or
+merely link (or bind by name) to the interfaces of, the Work and Derivative
+Works thereof.</p>
+<p>"Contribution" shall mean any work of authorship, including the original
+version of the Work and any modifications or additions to that Work or
+Derivative Works thereof, that is intentionally submitted to Licensor for
+inclusion in the Work by the copyright owner or by an individual or Legal
+Entity authorized to submit on behalf of the copyright owner. For the
+purposes of this definition, "submitted" means any form of electronic,
+verbal, or written communication sent to the Licensor or its
+representatives, including but not limited to communication on electronic
+mailing lists, source code control systems, and issue tracking systems that
+are managed by, or on behalf of, the Licensor for the purpose of discussing
+and improving the Work, but excluding communication that is conspicuously
+marked or otherwise designated in writing by the copyright owner as "Not a
+Contribution."</p>
+<p>"Contributor" shall mean Licensor and any individual or Legal Entity on
+behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.</p>
+<p><strong><a name="copyright">2. Grant of Copyright License</a></strong>. Subject to the
+terms and conditions of this License, each Contributor hereby grants to You
+a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+copyright license to reproduce, prepare Derivative Works of, publicly
+display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.</p>
+<p><strong><a name="patent">3. Grant of Patent License</a></strong>. Subject to the terms
+and conditions of this License, each Contributor hereby grants to You a
+perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+(except as stated in this section) patent license to make, have made, use,
+offer to sell, sell, import, and otherwise transfer the Work, where such
+license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by
+combination of their Contribution(s) with the Work to which such
+Contribution(s) was submitted. If You institute patent litigation against
+any entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that the Work or a Contribution incorporated within the Work constitutes
+direct or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate as of the
+date such litigation is filed.</p>
+<p><strong><a name="redistribution">4. Redistribution</a></strong>. You may reproduce and
+distribute copies of the Work or Derivative Works thereof in any medium,
+with or without modifications, and in Source or Object form, provided that
+You meet the following conditions:</p>
+<ol style="list-style: lower-latin;">
+<li>You must give any other recipients of the Work or Derivative Works a
+copy of this License; and</li>
+
+<li>You must cause any modified files to carry prominent notices stating
+that You changed the files; and</li>
+
+<li>You must retain, in the Source form of any Derivative Works that You
+distribute, all copyright, patent, trademark, and attribution notices from
+the Source form of the Work, excluding those notices that do not pertain to
+any part of the Derivative Works; and</li>
+
+<li>If the Work includes a "NOTICE" text file as part of its distribution,
+then any Derivative Works that You distribute must include a readable copy
+of the attribution notices contained within such NOTICE file, excluding
+those notices that do not pertain to any part of the Derivative Works, in
+at least one of the following places: within a NOTICE text file distributed
+as part of the Derivative Works; within the Source form or documentation,
+if provided along with the Derivative Works; or, within a display generated
+by the Derivative Works, if and wherever such third-party notices normally
+appear. The contents of the NOTICE file are for informational purposes only
+and do not modify the License. You may add Your own attribution notices
+within Derivative Works that You distribute, alongside or as an addendum to
+the NOTICE text from the Work, provided that such additional attribution
+notices cannot be construed as modifying the License.
+<br/>
+<br/>
+You may add Your own copyright statement to Your modifications and may
+provide additional or different license terms and conditions for use,
+reproduction, or distribution of Your modifications, or for any such
+Derivative Works as a whole, provided Your use, reproduction, and
+distribution of the Work otherwise complies with the conditions stated in
+this License.
+</li>
+
+</ol>
+
+<p><strong><a name="contributions">5. Submission of Contributions</a></strong>. Unless You
+explicitly state otherwise, any Contribution intentionally submitted for
+inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the
+terms of any separate license agreement you may have executed with Licensor
+regarding such Contributions.</p>
+<p><strong><a name="trademarks">6. Trademarks</a></strong>. This License does not grant
+permission to use the trade names, trademarks, service marks, or product
+names of the Licensor, except as required for reasonable and customary use
+in describing the origin of the Work and reproducing the content of the
+NOTICE file.</p>
+<p><strong><a name="no-warranty">7. Disclaimer of Warranty</a></strong>. Unless required by
+applicable law or agreed to in writing, Licensor provides the Work (and
+each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including,
+without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You
+are solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise
+of permissions under this License.</p>
+<p><strong><a name="no-liability">8. Limitation of Liability</a></strong>. In no event and
+under no legal theory, whether in tort (including negligence), contract, or
+otherwise, unless required by applicable law (such as deliberate and
+grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special,
+incidental, or consequential damages of any character arising as a result
+of this License or out of the use or inability to use the Work (including
+but not limited to damages for loss of goodwill, work stoppage, computer
+failure or malfunction, or any and all other commercial damages or losses),
+even if such Contributor has been advised of the possibility of such
+damages.</p>
+<p><strong><a name="additional">9. Accepting Warranty or Additional Liability</a></strong>.
+While redistributing the Work or Derivative Works thereof, You may choose
+to offer, and charge a fee for, acceptance of support, warranty, indemnity,
+or other liability obligations and/or rights consistent with this License.
+However, in accepting such obligations, You may act only on Your own behalf
+and on Your sole responsibility, not on behalf of any other Contributor,
+and only if You agree to indemnify, defend, and hold each Contributor
+harmless for any liability incurred by, or claims asserted against, such
+Contributor by reason of your accepting any such warranty or additional
+liability.</p>
+<p>END OF TERMS AND CONDITIONS</p>
+<h1 id="apply">APPENDIX: How to apply the Apache License to your work</h1>
+<p>To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included
+on the same "printed page" as the copyright notice for easier
+identification within third-party archives.</p>
+<div class="codehilite"><pre>Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+</pre></div></div></div>
diff --git a/doc/user/copying/agpl-3.0.html b/doc/user/copying/agpl-3.0.html
new file mode 100644 (file)
index 0000000..aad493a
--- /dev/null
@@ -0,0 +1,684 @@
+---
+layout: default
+navsection: userguide
+title: "GNU Affero General Public License"
+...
+
+<p style="text-align: center;">Version 3, 19 November 2007</p>
+
+<p>Copyright &copy; 2007 Free Software Foundation,
+Inc. &lt;<a href="http://www.fsf.org/">http://fsf.org/</a>&gt;
+ <br />
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.</p>
+
+<h3><a name="preamble"></a>Preamble</h3>
+
+<p>The GNU Affero General Public License is a free, copyleft license
+for software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.</p>
+
+<p>The licenses for most software and other practical works are
+designed to take away your freedom to share and change the works.  By
+contrast, our General Public Licenses are intended to guarantee your
+freedom to share and change all versions of a program--to make sure it
+remains free software for all its users.</p>
+
+<p>When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.</p>
+
+<p>Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.</p>
+
+<p>A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.</p>
+
+<p>The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.</p>
+
+<p>An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.</p>
+
+<p>The precise terms and conditions for copying, distribution and
+modification follow.</p>
+
+<h3><a name="terms"></a>TERMS AND CONDITIONS</h3>
+
+<h4><a name="section0"></a>0. Definitions.</h4>
+
+<p>&quot;This License&quot; refers to version 3 of the GNU Affero General Public
+License.</p>
+
+<p>&quot;Copyright&quot; also means copyright-like laws that apply to other kinds
+of works, such as semiconductor masks.</p>
+
+<p>&quot;The Program&quot; refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as &quot;you&quot;.  &quot;Licensees&quot; and
+&quot;recipients&quot; may be individuals or organizations.</p>
+
+<p>To &quot;modify&quot; a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a &quot;modified version&quot; of the
+earlier work or a work &quot;based on&quot; the earlier work.</p>
+
+<p>A &quot;covered work&quot; means either the unmodified Program or a work based
+on the Program.</p>
+
+<p>To &quot;propagate&quot; a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.</p>
+
+<p>To &quot;convey&quot; a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.</p>
+
+<p>An interactive user interface displays &quot;Appropriate Legal Notices&quot;
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.</p>
+
+<h4><a name="section1"></a>1. Source Code.</h4>
+
+<p>The &quot;source code&quot; for a work means the preferred form of the work
+for making modifications to it.  &quot;Object code&quot; means any non-source
+form of a work.</p>
+
+<p>A &quot;Standard Interface&quot; means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.</p>
+
+<p>The &quot;System Libraries&quot; of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+&quot;Major Component&quot;, in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.</p>
+
+<p>The &quot;Corresponding Source&quot; for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.</p>
+
+<p>The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.</p>
+
+<p>The Corresponding Source for a work in source code form is that
+same work.</p>
+
+<h4><a name="section2"></a>2. Basic Permissions.</h4>
+
+<p>All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.</p>
+
+<p>You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.</p>
+
+<p>Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.</p>
+
+<h4><a name="section3"></a>3. Protecting Users' Legal Rights From Anti-Circumvention Law.</h4>
+
+<p>No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.</p>
+
+<p>When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.</p>
+
+<h4><a name="section4"></a>4. Conveying Verbatim Copies.</h4>
+
+<p>You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.</p>
+
+<p>You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.</p>
+
+<h4><a name="section5"></a>5. Conveying Modified Source Versions.</h4>
+
+<p>You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:</p>
+
+<ul>
+
+<li>a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.</li>
+
+<li>b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    &quot;keep intact all notices&quot;.</li>
+
+<li>c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.</li>
+
+<li>d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.</li>
+
+</ul>
+
+<p>A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+&quot;aggregate&quot; if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.</p>
+
+<h4><a name="section6"></a>6. Conveying Non-Source Forms.</h4>
+
+<p>You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:</p>
+
+<ul>
+
+<li>a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.</li>
+
+<li>b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.</li>
+
+<li>c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.</li>
+
+<li>d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.</li>
+
+<li>e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.</li>
+
+</ul>
+
+<p>A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.</p>
+
+<p>A &quot;User Product&quot; is either (1) a &quot;consumer product&quot;, which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, &quot;normally used&quot; refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.</p>
+
+<p>&quot;Installation Information&quot; for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.</p>
+
+<p>If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).</p>
+
+<p>The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.</p>
+
+<p>Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.</p>
+
+<h4><a name="section7"></a>7. Additional Terms.</h4>
+
+<p>&quot;Additional permissions&quot; are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.</p>
+
+<p>When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.</p>
+
+<p>Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:</p>
+
+<ul>
+
+<li>a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or</li>
+
+<li>b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or</li>
+
+<li>c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or</li>
+
+<li>d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or</li>
+
+<li>e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or</li>
+
+<li>f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.</li>
+
+</ul>
+
+<p>All other non-permissive additional terms are considered &quot;further
+restrictions&quot; within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further restriction,
+you may remove that term.  If a license document contains a further
+restriction but permits relicensing or conveying under this License, you
+may add to a covered work material governed by the terms of that license
+document, provided that the further restriction does not survive such
+relicensing or conveying.</p>
+
+<p>If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.</p>
+
+<p>Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.</p>
+
+<h4><a name="section8"></a>8. Termination.</h4>
+
+<p>You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).</p>
+
+<p>However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.</p>
+
+<p>Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.</p>
+
+<p>Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.</p>
+
+<h4><a name="section9"></a>9. Acceptance Not Required for Having Copies.</h4>
+
+<p>You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.</p>
+
+<h4><a name="section10"></a>10. Automatic Licensing of Downstream Recipients.</h4>
+
+<p>Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.</p>
+
+<p>An &quot;entity transaction&quot; is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.</p>
+
+<p>You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.</p>
+
+<h4><a name="section11"></a>11. Patents.</h4>
+
+<p>A &quot;contributor&quot; is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's &quot;contributor version&quot;.</p>
+
+<p>A contributor's &quot;essential patent claims&quot; are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, &quot;control&quot; includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.</p>
+
+<p>Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.</p>
+
+<p>In the following three paragraphs, a &quot;patent license&quot; is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To &quot;grant&quot; such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.</p>
+
+<p>If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  &quot;Knowingly relying&quot; means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.</p>
+
+<p>If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.</p>
+
+<p>A patent license is &quot;discriminatory&quot; if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.</p>
+
+<p>Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.</p>
+
+<h4><a name="section12"></a>12. No Surrender of Others' Freedom.</h4>
+
+<p>If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.</p>
+
+<h4><a name="section13"></a>13. Remote Network Interaction; Use with the GNU General Public License.</h4>
+
+<p>Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.</p>
+
+<p>Notwithstanding any other provision of this License, you have permission
+to link or combine any covered work with a work licensed under version 3
+of the GNU General Public License into a single combined work, and to
+convey the resulting work.  The terms of this License will continue to
+apply to the part which is the covered work, but the work with which it is
+combined will remain governed by version 3 of the GNU General Public
+License.</p>
+
+<h4><a name="section14"></a>14. Revised Versions of this License.</h4>
+
+<p>The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new
+versions will be similar in spirit to the present version, but may differ
+in detail to address new problems or concerns.</p>
+
+<p>Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero
+General Public License &quot;or any later version&quot; applies to it, you have
+the option of following the terms and conditions either of that
+numbered version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number
+of the GNU Affero General Public License, you may choose any version
+ever published by the Free Software Foundation.</p>
+
+<p>If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that
+proxy's public statement of acceptance of a version permanently
+authorizes you to choose that version for the Program.</p>
+
+<p>Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.</p>
+
+<h4><a name="section15"></a>15. Disclaimer of Warranty.</h4>
+
+<p>THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM &quot;AS IS&quot; WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.</p>
+
+<h4><a name="section16"></a>16. Limitation of Liability.</h4>
+
+<p>IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.</p>
+
+<h4><a name="section17"></a>17. Interpretation of Sections 15 and 16.</h4>
+
+<p>If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.</p>
+
+<p>END OF TERMS AND CONDITIONS</p>
+
+<h3><a name="howto"></a>How to Apply These Terms to Your New Programs</h3>
+
+<p>If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.</p>
+
+<p>To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the &quot;copyright&quot; line and a pointer to where the full notice is found.</p>
+
+<pre>    &lt;one line to give the program's name and a brief idea of what it does.&gt;
+    Copyright (C) &lt;year&gt;  &lt;name of author&gt;
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as
+    published by the Free Software Foundation, either version 3 of the
+    License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see &lt;http://www.gnu.org/licenses/&gt;.
+</pre>
+
+<p>Also add information on how to contact you by electronic and paper mail.</p>
+
+<p>If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a &quot;Source&quot; link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.</p>
+
+<p>You should also get your employer (if you work as a programmer) or school,
+if any, to sign a &quot;copyright disclaimer&quot; for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+&lt;<a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a>&gt;.</p>
+
+
diff --git a/doc/user/copying/by-sa-3.0.html b/doc/user/copying/by-sa-3.0.html
new file mode 100644 (file)
index 0000000..f88374a
--- /dev/null
@@ -0,0 +1,418 @@
+---
+layout: default
+navsection: userguide
+title: "Creative Commons"
+...
+
+<div id="deed" class="green">
+    <div id="deed-head">
+
+      <div id="deed-license">
+        <h2>Attribution-ShareAlike 3.0 United States</h2>
+      </div>
+    </div>
+
+        <h3><em>License</em></h3>
+
+        <p>THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS
+        OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR
+        "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER
+        APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
+        AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS
+        PROHIBITED.</p>
+
+        <p>BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU
+        ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE.
+        TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A
+        CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE
+        IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
+        CONDITIONS.</p>
+
+        <p><strong>1. Definitions</strong></p>
+
+        <ol type="a">
+          <li><strong>"Collective Work"</strong> means a work, such
+          as a periodical issue, anthology or encyclopedia, in
+          which the Work in its entirety in unmodified form, along
+          with one or more other contributions, constituting
+          separate and independent works in themselves, are
+          assembled into a collective whole. A work that
+          constitutes a Collective Work will not be considered a
+          Derivative Work (as defined below) for the purposes of
+          this License.</li>
+
+          <li><strong>"Creative Commons Compatible
+          License"</strong> means a license that is listed at
+          http://creativecommons.org/compatiblelicenses that has
+          been approved by Creative Commons as being essentially
+          equivalent to this License, including, at a minimum,
+          because that license: (i) contains terms that have the
+          same purpose, meaning and effect as the License Elements
+          of this License; and, (ii) explicitly permits the
+          relicensing of derivatives of works made available under
+          that license under this License or either a Creative
+          Commons unported license or a Creative Commons
+          jurisdiction license with the same License Elements as
+          this License.</li>
+
+          <li><strong>"Derivative Work"</strong> means a work based
+          upon the Work or upon the Work and other pre-existing
+          works, such as a translation, musical arrangement,
+          dramatization, fictionalization, motion picture version,
+          sound recording, art reproduction, abridgment,
+          condensation, or any other form in which the Work may be
+          recast, transformed, or adapted, except that a work that
+          constitutes a Collective Work will not be considered a
+          Derivative Work for the purpose of this License. For the
+          avoidance of doubt, where the Work is a musical
+          composition or sound recording, the synchronization of
+          the Work in timed-relation with a moving image
+          ("synching") will be considered a Derivative Work for the
+          purpose of this License.</li>
+
+          <li><strong>"License Elements"</strong> means the
+          following high-level license attributes as selected by
+          Licensor and indicated in the title of this License:
+          Attribution, ShareAlike.</li>
+
+          <li><strong>"Licensor"</strong> means the individual,
+          individuals, entity or entities that offers the Work
+          under the terms of this License.</li>
+
+          <li><strong>"Original Author"</strong> means the
+          individual, individuals, entity or entities who created
+          the Work.</li>
+
+          <li><strong>"Work"</strong> means the copyrightable work
+          of authorship offered under the terms of this
+          License.</li>
+
+          <li><strong>"You"</strong> means an individual or entity
+          exercising rights under this License who has not
+          previously violated the terms of this License with
+          respect to the Work, or who has received express
+          permission from the Licensor to exercise rights under
+          this License despite a previous violation.</li>
+        </ol>
+
+        <p><strong>2. Fair Use Rights.</strong> Nothing in this
+        license is intended to reduce, limit, or restrict any
+        rights arising from fair use, first sale or other
+        limitations on the exclusive rights of the copyright owner
+        under copyright law or other applicable laws.</p>
+
+        <p><strong>3. License Grant.</strong> Subject to the terms
+        and conditions of this License, Licensor hereby grants You
+        a worldwide, royalty-free, non-exclusive, perpetual (for
+        the duration of the applicable copyright) license to
+        exercise the rights in the Work as stated below:</p>
+
+        <ol type="a">
+          <li>to reproduce the Work, to incorporate the Work into
+          one or more Collective Works, and to reproduce the Work
+          as incorporated in the Collective Works;</li>
+
+          <li>to create and reproduce Derivative Works provided
+          that any such Derivative Work, including any translation
+          in any medium, takes reasonable steps to clearly label,
+          demarcate or otherwise identify that changes were made to
+          the original Work. For example, a translation could be
+          marked "The original work was translated from English to
+          Spanish," or a modification could indicate "The original
+          work has been modified.";</li>
+
+          <li>to distribute copies or phonorecords of, display
+          publicly, perform publicly, and perform publicly by means
+          of a digital audio transmission the Work including as
+          incorporated in Collective Works;</li>
+
+          <li>to distribute copies or phonorecords of, display
+          publicly, perform publicly, and perform publicly by means
+          of a digital audio transmission Derivative Works.</li>
+
+          <li>
+            <p>For the avoidance of doubt, where the Work is a
+            musical composition:</p>
+
+            <ol type="i">
+              <li><strong>Performance Royalties Under Blanket
+              Licenses</strong>. Licensor waives the exclusive
+              right to collect, whether individually or, in the
+              event that Licensor is a member of a performance
+              rights society (e.g. ASCAP, BMI, SESAC), via that
+              society, royalties for the public performance or
+              public digital performance (e.g. webcast) of the
+              Work.</li>
+
+              <li><strong>Mechanical Rights and Statutory
+              Royalties</strong>. Licensor waives the exclusive
+              right to collect, whether individually or via a music
+              rights agency or designated agent (e.g. Harry Fox
+              Agency), royalties for any phonorecord You create
+              from the Work ("cover version") and distribute,
+              subject to the compulsory license created by 17 USC
+              Section 115 of the US Copyright Act (or the
+              equivalent in other jurisdictions).</li>
+            </ol>
+          </li>
+
+          <li><strong>Webcasting Rights and Statutory
+          Royalties</strong>. For the avoidance of doubt, where the
+          Work is a sound recording, Licensor waives the exclusive
+          right to collect, whether individually or via a
+          performance-rights society (e.g. SoundExchange),
+          royalties for the public digital performance (e.g.
+          webcast) of the Work, subject to the compulsory license
+          created by 17 USC Section 114 of the US Copyright Act (or
+          the equivalent in other jurisdictions).</li>
+        </ol>
+
+        <p>The above rights may be exercised in all media and
+        formats whether now known or hereafter devised. The above
+        rights include the right to make such modifications as are
+        technically necessary to exercise the rights in other media
+        and formats. All rights not expressly granted by Licensor
+        are hereby reserved.</p>
+
+        <p><strong>4. Restrictions.</strong> The license granted in
+        Section 3 above is expressly made subject to and limited by
+        the following restrictions:</p>
+
+        <ol type="a">
+          <li>You may distribute, publicly display, publicly
+          perform, or publicly digitally perform the Work only
+          under the terms of this License, and You must include a
+          copy of, or the Uniform Resource Identifier for, this
+          License with every copy or phonorecord of the Work You
+          distribute, publicly display, publicly perform, or
+          publicly digitally perform. You may not offer or impose
+          any terms on the Work that restrict the terms of this
+          License or the ability of a recipient of the Work to
+          exercise of the rights granted to that recipient under
+          the terms of the License. You may not sublicense the
+          Work. You must keep intact all notices that refer to this
+          License and to the disclaimer of warranties. When You
+          distribute, publicly display, publicly perform, or
+          publicly digitally perform the Work, You may not impose
+          any technological measures on the Work that restrict the
+          ability of a recipient of the Work from You to exercise
+          of the rights granted to that recipient under the terms
+          of the License. This Section 4(a) applies to the Work as
+          incorporated in a Collective Work, but this does not
+          require the Collective Work apart from the Work itself to
+          be made subject to the terms of this License. If You
+          create a Collective Work, upon notice from any Licensor
+          You must, to the extent practicable, remove from the
+          Collective Work any credit as required by Section 4(c),
+          as requested. If You create a Derivative Work, upon
+          notice from any Licensor You must, to the extent
+          practicable, remove from the Derivative Work any credit
+          as required by Section 4(c), as requested.</li>
+
+          <li>You may distribute, publicly display, publicly
+          perform, or publicly digitally perform a Derivative Work
+          only under: (i) the terms of this License; (ii) a later
+          version of this License with the same License Elements as
+          this License; (iii) either the Creative Commons
+          (Unported) license or a Creative Commons jurisdiction
+          license (either this or a later license version) that
+          contains the same License Elements as this License (e.g.
+          Attribution-ShareAlike 3.0 (Unported)); (iv) a Creative
+          Commons Compatible License. If you license the Derivative
+          Work under one of the licenses mentioned in (iv), you
+          must comply with the terms of that license. If you
+          license the Derivative Work under the terms of any of the
+          licenses mentioned in (i), (ii) or (iii) (the "Applicable
+          License"), you must comply with the terms of the
+          Applicable License generally and with the following
+          provisions: (I) You must include a copy of, or the
+          Uniform Resource Identifier for, the Applicable License
+          with every copy or phonorecord of each Derivative Work
+          You distribute, publicly display, publicly perform, or
+          publicly digitally perform; (II) You may not offer or
+          impose any terms on the Derivative Works that restrict
+          the terms of the Applicable License or the ability of a
+          recipient of the Work to exercise the rights granted to
+          that recipient under the terms of the Applicable License;
+          (III) You must keep intact all notices that refer to the
+          Applicable License and to the disclaimer of warranties;
+          and, (IV) when You distribute, publicly display, publicly
+          perform, or publicly digitally perform the Work, You may
+          not impose any technological measures on the Derivative
+          Work that restrict the ability of a recipient of the
+          Derivative Work from You to exercise the rights granted
+          to that recipient under the terms of the Applicable
+          License. This Section 4(b) applies to the Derivative Work
+          as incorporated in a Collective Work, but this does not
+          require the Collective Work apart from the Derivative
+          Work itself to be made subject to the terms of the
+          Applicable License.</li>
+
+          <li>If You distribute, publicly display, publicly
+          perform, or publicly digitally perform the Work (as
+          defined in Section 1 above) or any Derivative Works (as
+          defined in Section 1 above) or Collective Works (as
+          defined in Section 1 above), You must, unless a request
+          has been made pursuant to Section 4(a), keep intact all
+          copyright notices for the Work and provide, reasonable to
+          the medium or means You are utilizing: (i) the name of
+          the Original Author (or pseudonym, if applicable) if
+          supplied, and/or (ii) if the Original Author and/or
+          Licensor designate another party or parties (e.g. a
+          sponsor institute, publishing entity, journal) for
+          attribution ("Attribution Parties") in Licensor's
+          copyright notice, terms of service or by other reasonable
+          means, the name of such party or parties; the title of
+          the Work if supplied; to the extent reasonably
+          practicable, the Uniform Resource Identifier, if any,
+          that Licensor specifies to be associated with the Work,
+          unless such URI does not refer to the copyright notice or
+          licensing information for the Work; and, consistent with
+          Section 3(b) in the case of a Derivative Work, a credit
+          identifying the use of the Work in the Derivative Work
+          (e.g., "French translation of the Work by Original
+          Author," or "Screenplay based on original Work by
+          Original Author"). The credit required by this Section
+          4(c) may be implemented in any reasonable manner;
+          provided, however, that in the case of a Derivative Work
+          or Collective Work, at a minimum such credit will appear,
+          if a credit for all contributing authors of the
+          Derivative Work or Collective Work appears, then as part
+          of these credits and in a manner at least as prominent as
+          the credits for the other contributing authors. For the
+          avoidance of doubt, You may only use the credit required
+          by this Section for the purpose of attribution in the
+          manner set out above and, by exercising Your rights under
+          this License, You may not implicitly or explicitly assert
+          or imply any connection with, sponsorship or endorsement
+          by the Original Author, Licensor and/or Attribution
+          Parties, as appropriate, of You or Your use of the Work,
+          without the separate, express prior written permission of
+          the Original Author, Licensor and/or Attribution
+          Parties.</li>
+        </ol>
+
+        <p><strong>5. Representations, Warranties and
+        Disclaimer</strong></p>
+
+        <p>UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN
+        WRITING, LICENSOR OFFERS THE WORK AS-IS AND ONLY TO THE
+        EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK BY THE
+        LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR
+        WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS,
+        IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT
+        LIMITATION, WARRANTIES OF TITLE, MARKETABILITY,
+        MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE,
+        NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS,
+        ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR
+        NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE
+        EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT
+        APPLY TO YOU.</p>
+
+        <p><strong>6. Limitation on Liability.</strong> EXCEPT TO
+        THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL
+        LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY
+        SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY
+        DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK,
+        EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF
+        SUCH DAMAGES.</p>
+
+        <p><strong>7. Termination</strong></p>
+
+        <ol type="a">
+          <li>This License and the rights granted hereunder will
+          terminate automatically upon any breach by You of the
+          terms of this License. Individuals or entities who have
+          received Derivative Works or Collective Works from You
+          under this License, however, will not have their licenses
+          terminated provided such individuals or entities remain
+          in full compliance with those licenses. Sections 1, 2, 5,
+          6, 7, and 8 will survive any termination of this
+          License.</li>
+
+          <li>Subject to the above terms and conditions, the
+          license granted here is perpetual (for the duration of
+          the applicable copyright in the Work). Notwithstanding
+          the above, Licensor reserves the right to release the
+          Work under different license terms or to stop
+          distributing the Work at any time; provided, however that
+          any such election will not serve to withdraw this License
+          (or any other license that has been, or is required to
+          be, granted under the terms of this License), and this
+          License will continue in full force and effect unless
+          terminated as stated above.</li>
+        </ol>
+
+        <p><strong>8. Miscellaneous</strong></p>
+
+        <ol type="a">
+          <li>Each time You distribute or publicly digitally
+          perform the Work (as defined in Section 1 above) or a
+          Collective Work (as defined in Section 1 above), the
+          Licensor offers to the recipient a license to the Work on
+          the same terms and conditions as the license granted to
+          You under this License.</li>
+
+          <li>Each time You distribute or publicly digitally
+          perform a Derivative Work, Licensor offers to the
+          recipient a license to the original Work on the same
+          terms and conditions as the license granted to You under
+          this License.</li>
+
+          <li>If any provision of this License is invalid or
+          unenforceable under applicable law, it shall not affect
+          the validity or enforceability of the remainder of the
+          terms of this License, and without further action by the
+          parties to this agreement, such provision shall be
+          reformed to the minimum extent necessary to make such
+          provision valid and enforceable.</li>
+
+          <li>No term or provision of this License shall be deemed
+          waived and no breach consented to unless such waiver or
+          consent shall be in writing and signed by the party to be
+          charged with such waiver or consent.</li>
+
+          <li>This License constitutes the entire agreement between
+          the parties with respect to the Work licensed here. There
+          are no understandings, agreements or representations with
+          respect to the Work not specified here. Licensor shall
+          not be bound by any additional provisions that may appear
+          in any communication from You. This License may not be
+          modified without the mutual written agreement of the
+          Licensor and You.</li>
+        </ol>
+        <!-- BREAKOUT FOR CC NOTICE.  NOT A PART OF THE LICENSE -->
+
+        <blockquote>
+          <h3>Creative Commons Notice</h3>
+
+          <p>Creative Commons is not a party to this License, and
+          makes no warranty whatsoever in connection with the Work.
+          Creative Commons will not be liable to You or any party
+          on any legal theory for any damages whatsoever, including
+          without limitation any general, special, incidental or
+          consequential damages arising in connection to this
+          license. Notwithstanding the foregoing two (2) sentences,
+          if Creative Commons has expressly identified itself as
+          the Licensor hereunder, it shall have all rights and
+          obligations of Licensor.</p>
+
+          <p>Except for the limited purpose of indicating to the
+          public that the Work is licensed under the CCPL, Creative
+          Commons does not authorize the use by either party of the
+          trademark "Creative Commons" or any related trademark or
+          logo of Creative Commons without the prior written
+          consent of Creative Commons. Any permitted use will be in
+          compliance with Creative Commons' then-current trademark
+          usage guidelines, as may be published on its website or
+          otherwise made available upon request from time to time.
+          For the avoidance of doubt, this trademark restriction
+          does not form part of this License.</p>
+
+          <p>Creative Commons may be contacted at <a href=
+          "http://creativecommons.org/">http://creativecommons.org/</a>.</p>
+        </blockquote>
+      </div>
+    </div>
+
+  </div>
diff --git a/doc/user/copying/copying.html.textile.liquid b/doc/user/copying/copying.html.textile.liquid
new file mode 100644 (file)
index 0000000..3785ef5
--- /dev/null
@@ -0,0 +1,16 @@
+---
+layout: default
+navsection: userguide
+title: "Arvados Free Software Licenses"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Server-side components of Arvados contained in the apps/ and services/ directories, including the API Server, Workbench, Keep, and Crunch, are licenced under the "GNU Affero General Public License version 3":agpl-3.0.html.
+
+The Arvados client Software Development Kits contained in the sdk/ directory, example scripts in the crunch_scripts/ directory, and code samples in the Aravados documentation are licensed under the "Apache License, Version 2.0":LICENSE-2.0.html
+
+The Arvados Documentation located in the doc/ directory is licensed under the "Creative Commons Attribution-Share Alike 3.0 United States":by-sa-3.0.html
diff --git a/doc/user/cwl/bwa-mem/bwa-mem-input-local.yml b/doc/user/cwl/bwa-mem/bwa-mem-input-local.yml
new file mode 100755 (executable)
index 0000000..6fbac26
--- /dev/null
@@ -0,0 +1,18 @@
+#!/usr/bin/env cwltool
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+cwl:tool: bwa-mem.cwl
+reference:
+  class: File
+  location: 19.fasta.bwt
+read_p1:
+  class: File
+  location: HWI-ST1027_129_D0THKACXX.1_1.fastq
+read_p2:
+  class: File
+  location: HWI-ST1027_129_D0THKACXX.1_2.fastq
+group_id: arvados_tutorial
+sample_id: HWI-ST1027_129
+PL: illumina
diff --git a/doc/user/cwl/bwa-mem/bwa-mem-input.yml b/doc/user/cwl/bwa-mem/bwa-mem-input.yml
new file mode 100755 (executable)
index 0000000..243f152
--- /dev/null
@@ -0,0 +1,18 @@
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+cwl:tool: bwa-mem.cwl
+reference:
+  class: File
+  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt
+read_p1:
+  class: File
+  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_1.fastq
+read_p2:
+  class: File
+  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_2.fastq
+group_id: arvados_tutorial
+sample_id: HWI-ST1027_129
+PL: illumina
diff --git a/doc/user/cwl/bwa-mem/bwa-mem-template.yml b/doc/user/cwl/bwa-mem/bwa-mem-template.yml
new file mode 100644 (file)
index 0000000..410f139
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+reference:
+  class: File
+  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt
+PL: illumina
diff --git a/doc/user/cwl/bwa-mem/bwa-mem.cwl b/doc/user/cwl/bwa-mem/bwa-mem.cwl
new file mode 100755 (executable)
index 0000000..2001971
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+
+hints:
+  DockerRequirement:
+    dockerPull: lh3lh3/bwa
+
+baseCommand: [mem]
+
+arguments:
+  - {prefix: "-t", valueFrom: $(runtime.cores)}
+  - {prefix: "-R", valueFrom: "@RG\tID:$(inputs.group_id)\tPL:$(inputs.PL)\tSM:$(inputs.sample_id)"}
+
+inputs:
+  reference:
+    type: File
+    inputBinding:
+      position: 1
+      valueFrom: $(self.dirname)/$(self.nameroot)
+    secondaryFiles:
+      - ^.ann
+      - ^.amb
+      - ^.pac
+      - ^.sa
+    doc: The index files produced by `bwa index`
+  read_p1:
+    type: File
+    inputBinding:
+      position: 2
+    doc: The reads, in fastq format.
+  read_p2:
+    type: File?
+    inputBinding:
+      position: 3
+    doc:  For mate paired reads, the second file (optional).
+  group_id: string
+  sample_id: string
+  PL: string
+
+stdout: $(inputs.read_p1.nameroot).sam
+
+outputs:
+  aligned_sam:
+    type: stdout
diff --git a/doc/user/cwl/cwl-extensions.html.textile.liquid b/doc/user/cwl/cwl-extensions.html.textile.liquid
new file mode 100644 (file)
index 0000000..d620022
--- /dev/null
@@ -0,0 +1,161 @@
+---
+layout: default
+navsection: userguide
+title: Arvados CWL Extensions
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados provides several extensions to CWL for workflow optimization, site-specific configuration, and to enable access the Arvados API.
+
+To use Arvados CWL extensions, add the following @$namespaces@ section at the top of your CWL file:
+
+<pre>
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+</pre>
+
+For portability, Arvados extensions should go into the @hints@ section of your CWL file, for example:
+
+<pre>
+hints:
+  arv:RunInSingleContainer: {}
+  arv:RuntimeConstraints:
+    keep_cache: 123456
+    outputDirType: keep_output_dir
+  arv:PartitionRequirement:
+    partition: dev_partition
+  arv:APIRequirement: {}
+  cwltool:LoadListingRequirement:
+    loadListing: shallow_listing
+  arv:IntermediateOutput:
+    outputTTL: 3600
+  arv:ReuseRequirement:
+    enableReuse: false
+  cwltool:Secrets:
+    secrets: [input1, input2]
+  cwltool:TimeLimit:
+    timelimit: 14400
+  arv:WorkflowRunnerResources:
+    ramMin: 2048
+    coresMin: 2
+    keep_cache: 512
+  arv:ClusterTarget:
+    cluster_id: clsr1
+    project_uuid: clsr1-j7d0g-qxc4jcji7n4lafx
+</pre>
+
+The one exception to this is @arv:APIRequirement@, see note below.
+
+h2. arv:RunInSingleContainer
+
+Indicates that a subworkflow should run in a single container and not be scheduled as separate steps.
+
+h2. arv:RuntimeConstraints
+
+Set Arvados-specific runtime hints.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|keep_cache|int|Size of file data buffer for Keep mount in MiB. Default is 256 MiB. Increase this to reduce cache thrashing in situations such as accessing multiple large (64+ MiB) files at the same time, or performing random access on a large file.|
+|outputDirType|enum|Preferred backing store for output staging.  If not specified, the system may choose which one to use.  One of *local_output_dir* or *keep_output_dir*|
+
+*local_output_dir*: Use regular file system local to the compute node. There must be sufficient local scratch space to store entire output; specify this with @outdirMin@ of @ResourceRequirement@.  Files are batch uploaded to Keep when the process completes.  Most compatible, but upload step can be time consuming for very large files.
+
+*keep_output_dir*: Use writable Keep mount.  Files are streamed to Keep as they are written.  Does not consume local scratch space, but does consume RAM for output buffers (up to 192 MiB per file simultaneously open for writing.)  Best suited to processes which produce sequential output of large files (non-sequential writes may produced fragmented file manifests).  Supports regular files and directories, does not support special files such as symlinks, hard links, named pipes, named sockets, or device nodes.|
+
+h2. arv:PartitionRequirement
+
+Select preferred compute partitions on which to run jobs.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|partition|string or array of strings||
+
+h2. arv:APIRequirement
+
+Indicates that process wants to access to the Arvados API.  Will be granted network access and have @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ set in the environment.  Tools which rely on the Arvados API being present should put @arv:APIRequirement@ in the @requirements@ section of the tool (rather than @hints@) to indicate that that it is not portable to non-Arvados CWL runners.
+
+Use @arv:APIRequirement@ in @hints@ to enable general (non-Arvados-specific) network access for a tool.
+
+h2. cwltool:LoadListingRequirement
+
+In CWL v1.0 documents, the default behavior for Directory objects is to recursively expand the @listing@ for access by parameter references an expressions.  For directory trees containing many files, this can be expensive in both time and memory usage.  Use @cwltool:LoadListingRequirement@ to change the behavior for expansion of directory listings in the workflow runner.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|loadListing|string|One of @no_listing@, @shallow_listing@, or @deep_listing@|
+
+*no_listing*: Do not expand directory listing at all.  The @listing@ field on the Directory object will be undefined.
+
+*shallow_listing*: Only expand the first level of directory listing.  The @listing@ field on the toplevel Directory object will contain the directory contents, however @listing@ will not be defined on subdirectories.
+
+*deep_listing*: Recursively expand all levels of directory listing.  The @listing@ field will be provided on the toplevel object and all subdirectories.
+
+h2. arv:IntermediateOutput
+
+Specify desired handling of intermediate output collections.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|outputTTL|int|If the value is greater than zero, consider intermediate output collections to be temporary and should be automatically trashed. Temporary collections will be trashed @outputTTL@ seconds after creation.  A value of zero means intermediate output should be retained indefinitely (this is the default behavior).
+Note: arvados-cwl-runner currently does not take workflow dependencies into account when setting the TTL on an intermediate output collection. If the TTL is too short, it is possible for a collection to be trashed before downstream steps that consume it are started.  The recommended minimum value for TTL is the expected duration of the entire the workflow.|
+
+h2. arv:ReuseRequirement
+
+Enable/disable work reuse for current process.  Default true (work reuse enabled).
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|enableReuse|boolean|Enable/disable work reuse for current process.  Default true (work reuse enabled).|
+
+h2. cwltool:Secrets
+
+Indicate that one or more input parameters are "secret".  Must be applied at the top level Workflow.  Secret parameters are not stored in keep, are hidden from logs and API responses, and are wiped from the database after the workflow completes.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|secrets|array<string>|Input parameters which are considered "secret".  Must be strings.|
+
+
+h2. cwltool:TimeLimit
+
+Set an upper limit on the execution time of a CommandLineTool or ExpressionTool.  A tool execution which exceeds the time limit may be preemptively terminated and considered failed.  May also be used by batch systems to make scheduling decisions.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|timelimit|int|Execution time limit in seconds. If set to zero, no limit is enforced.|
+
+h2. arv:WorkflowRunnerResources
+
+Specify resource requirements for the workflow runner process (arvados-cwl-runner) that manages a workflow run.  Must be applied to the top level workflow.  Will also be set implicitly when using @--submit-runner-ram@ on the command line along with @--create-workflow@ or @--update-workflow@.  Use this to adjust the runner's allocation if the workflow runner is getting "out of memory" exceptions or being killed by the out-of-memory (OOM) killer.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|ramMin|int|RAM, in mebibytes, to reserve for the arvados-cwl-runner process. Default 1 GiB|
+|coresMin|int|Number of cores to reserve to the arvados-cwl-runner process. Default 1 core.|
+|keep_cache|int|Size of collection metadata cache for the workflow runner, in MiB.  Default 256 MiB.  Will be added on to the RAM request when determining node size to request.|
+
+h2(#clustertarget). arv:ClusterTarget
+
+Specify which Arvados cluster should execute a container or subworkflow, and the parent project for the container request.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|cluster_id|string|The five-character alphanumeric cluster id (uuid prefix) where a container or subworkflow will execute.  May be an expression.|
+|project_uuid|string|The uuid of the project which will own container request and output of the container.  May be an expression.|
+
+h2. arv:dockerCollectionPDH
+
+This is an optional extension field appearing on the standard @DockerRequirement@.  It specifies the portable data hash of the Arvados collection containing the Docker image.  If present, it takes precedence over @dockerPull@ or @dockerImageId@.
+
+<pre>
+requirements:
+  DockerRequirement:
+    dockerPull: "debian:8"
+    arv:dockerCollectionPDH: "feaf1fc916103d7cdab6489e1f8c3a2b+174"
+</pre>
diff --git a/doc/user/cwl/cwl-run-options.html.textile.liquid b/doc/user/cwl/cwl-run-options.html.textile.liquid
new file mode 100644 (file)
index 0000000..27970f4
--- /dev/null
@@ -0,0 +1,144 @@
+---
+layout: default
+navsection: userguide
+title: "Using arvados-cwl-runner"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "*Command line options*":#options
+# "*Specify workflow and output names*":#names
+# "*Submit a workflow without waiting for the result*":#nowait
+# "*Control a workflow locally*":#local
+# "*Automatically delete intermediate outputs*":#delete
+# "*Run workflow on a remote federated cluster*":#federation
+
+h3(#options). Command line options
+
+The following command line options are available for @arvados-cwl-runner@:
+
+table(table table-bordered table-condensed).
+|_. Option |_. Description |
+|==--basedir== BASEDIR|     Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).|
+|==--eval-timeout EVAL_TIMEOUT==|Time to wait for a Javascript expression to evaluate before giving an error, default 20s.|
+|==--print-dot==|           Print workflow visualization in graphviz format and exit|
+|==--version==|             Print version and exit|
+|==--validate==|            Validate CWL document only.|
+|==--verbose==|             Default logging|
+|==--quiet==|               Only print warnings and errors.|
+|==--debug==|               Print even more logging|
+|==--metrics==|             Print timing metrics|
+|==--tool-help==|           Print command line help for tool|
+|==--enable-reuse==|        Enable job or container reuse (default)|
+|==--disable-reuse==|       Disable job or container reuse|
+|==--project-uuid UUID==|   Project that will own the workflow jobs, if not provided, will go to home project.|
+|==--output-name OUTPUT_NAME==|Name to use for collection that stores the final output.|
+|==--output-tags OUTPUT_TAGS==|Tags for the final output collection separated by commas, e.g., =='--output-tags tag0,tag1,tag2'==.|
+|==--ignore-docker-for-reuse==|Ignore Docker image version when deciding whether to reuse past jobs.|
+|==--submit==|              Submit workflow runner to Arvados to manage the workflow (default).|
+|==--local==|               Run workflow on local host (still submits jobs to Arvados).|
+|==--create-template==|     (Deprecated) synonym for --create-workflow.|
+|==--create-workflow==|     Create an Arvados workflow (if using the 'containers' API) or pipeline template (if using the 'jobs' API). See --api.|
+|==--update-workflow== UUID|Update an existing Arvados workflow or pipeline template with the given UUID.|
+|==--wait==|                After submitting workflow runner job, wait for completion.|
+|==--no-wait==|             Submit workflow runner job and exit.|
+|==--log-timestamps==|      Prefix logging lines with timestamp|
+|==--no-log-timestamps==|   No timestamp on logging lines|
+|==--api== {jobs,containers}|Select work submission API. Default is 'jobs' if that API is available, otherwise 'containers'.|
+|==--compute-checksum==|    Compute checksum of contents while collecting outputs|
+|==--submit-runner-ram== SUBMIT_RUNNER_RAM|RAM (in MiB) required for the workflow runner job (default 1024)|
+|==--submit-runner-image== SUBMIT_RUNNER_IMAGE|Docker image for workflow runner job|
+|==--always-submit-runner==|When invoked with --submit --wait, always submit a runner to manage the workflow, even when only running a single CommandLineTool|
+|==--submit-request-uuid== UUID|Update and commit to supplied container request instead of creating a new one (containers API only).|
+|==--submit-runner-cluster== CLUSTER_ID|Submit workflow runner to a remote cluster (containers API only)|
+|==--name NAME==|Name to use for workflow execution instance.|
+|==--on-error== {stop,continue}|Desired workflow behavior when a step fails.  One of 'stop' (do not submit any more steps) or 'continue' (may submit other steps that are not downstream from the error). Default is 'continue'.|
+|==--enable-dev==|Enable loading and running development versions of CWL spec.|
+|==--storage-classes== STORAGE_CLASSES|Specify comma separated list of storage classes to be used when saving workflow output to Keep.|
+|==--intermediate-output-ttl== N|If N > 0, intermediate output collections will be trashed N seconds after creation. Default is 0 (don't trash).|
+|==--priority== PRIORITY|Workflow priority (range 1..1000, higher has precedence over lower, containers api only)|
+|==--thread-count== THREAD_COUNT|Number of threads to use for job submit and output collection.|
+|==--http-timeout== HTTP_TIMEOUT|API request timeout in seconds. Default is 300 seconds (5 minutes).|
+|==--trash-intermediate==|Immediately trash intermediate outputs on workflow success.|
+|==--no-trash-intermediate==|Do not trash intermediate outputs (default).|
+
+
+h3(#names). Specify workflow and output names
+
+Use the @--name@ and @--output-name@ options to specify the name of the workflow and name of the output collection.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --name "Example bwa run" --output-name "Example bwa output" bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+h3(#nowait). Submit a workflow without waiting for the result
+
+To submit a workflow and exit immediately, use the @--no-wait@ option.  This will submit the workflow to Arvados, print out the UUID of the job that was submitted to standard output, and exit.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --no-wait bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 15:07:52 arvados.arv-run[12480] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 15:07:52 arvados.arv-run[12480] INFO: Uploaded to qr1hi-4zz18-eqnfwrow8aysa9q
+2016-06-30 15:07:52 arvados.cwl-runner[12480] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+qr1hi-8i9sb-fm2n3b1w0l6bskg
+</code></pre>
+</notextile>
+
+h3(#local). Control a workflow locally
+
+To run a workflow with local control, use @--local@.  This means that the host where you run @arvados-cwl-runner@ will be responsible for submitting jobs, however, the jobs themselves will still run on the Arvados cluster.  With @--local@, if you interrupt @arvados-cwl-runner@ or log out, the workflow will be terminated.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --local bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-07-01 10:05:19 arvados.cwl-runner[16290] INFO: Pipeline instance qr1hi-d1hrv-92wcu6ldtio74r4
+2016-07-01 10:05:28 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-2nzzfbuf9zjrj4g) is Queued
+2016-07-01 10:05:29 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-2nzzfbuf9zjrj4g) is Running
+2016-07-01 10:05:45 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-2nzzfbuf9zjrj4g) is Complete
+2016-07-01 10:05:46 arvados.cwl-runner[16290] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "size": 30738986,
+        "path": "keep:15f56bad0aaa7364819bf14ca2a27c63+88/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File"
+    }
+}
+</code></pre>
+</notextile>
+
+h3(#delete). Automatically delete intermediate outputs
+
+Use the @--intermediate-output-ttl@ and @--trash-intermediate@ options to specify how long intermediate outputs should be kept (in seconds) and whether to trash them immediately upon successful workflow completion.
+
+Temporary collections will be trashed @intermediate-output-ttl@ seconds after creation.  A value of zero (default) means intermediate output should be retained indefinitely.
+
+Note: arvados-cwl-runner currently does not take workflow dependencies into account when setting the TTL on an intermediate output collection. If the TTL is too short, it is possible for a collection to be trashed before downstream steps that consume it are started.  The recommended minimum value for TTL is the expected duration for the entire the workflow.
+
+Using @--trash-intermediate@ without @--intermediate-output-ttl@ means that intermediate files will be trashed on successful completion, but will remain on workflow failure.
+
+Using @--intermediate-output-ttl@ without @--trash-intermediate@ means that intermediate files will be trashed only after the TTL expires (regardless of workflow success or failure).
+
+h3(#federation). Run workflow on a remote federated cluster
+
+By default, the workflow runner will run on the local (home) cluster.  Using @--submit-runner-cluster@ you can specify that the runner should be submitted to a remote federated cluster.  When doing this, @--project-uuid@ should specify a project on that cluster.  Steps making up the workflow will be submitted to the remote federated cluster by default, but the behavior of @arv:ClusterTarget@ is unchanged.  Note: when using this option, any resources that need to be uploaded in order to run the workflow (such as files or Docker images) will be uploaded to the local (home) cluster, and streamed to the federated cluster on demand.
diff --git a/doc/user/cwl/cwl-runner.html.textile.liquid b/doc/user/cwl/cwl-runner.html.textile.liquid
new file mode 100644 (file)
index 0000000..ad5d3bd
--- /dev/null
@@ -0,0 +1,158 @@
+---
+layout: default
+navsection: userguide
+title: "Running an Arvados workflow"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'what_is_cwl' %}
+
+{% include 'tutorial_expectations' %}
+
+{% include 'notebox_begin' %}
+
+By default, the @arvados-cwl-runner@ is installed on Arvados shell nodes.  If you want to submit jobs from somewhere else, such as your workstation, you may install "arvados-cwl-runner.":#setup
+
+{% include 'notebox_end' %}
+
+This tutorial will demonstrate how to submit a workflow at the command line using @arvados-cwl-runner@.
+
+h2. Running arvados-cwl-runner
+
+h3. Get the example files
+
+The tutorial files are located in the "documentation section of the Arvados source repository:":https://github.com/curoverse/arvados/tree/master/doc/user/cwl/bwa-mem
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados</span>
+~$ <span class="userinput">cd arvados/doc/user/cwl/bwa-mem</span>
+</code></pre>
+</notextile>
+
+The tutorial data is hosted on "https://playground.arvados.org":https://playground.arvados.org (also referred to by the identifier *qr1hi*).  If you are using a different Arvados instance, you may need to copy the data to your own instance.  The easiest way to do this is with "arv-copy":{{site.baseurl}}/user/topics/arv-copy.html (this requires signing up for a free playground.arvados.org account).
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst settings 2463fa9efeb75e099685528b3b9071e0+438</span>
+~$ <span class="userinput">arv-copy --src qr1hi --dst settings ae480c5099b81e17267b7445e35b4bc7+180</span>
+~$ <span class="userinput">arv-copy --src qr1hi --dst settings 655c6cd07550151b210961ed1d3852cf+57</span>
+</code></pre>
+</notextile>
+
+If you do not wish to create an account on "https://playground.arvados.org":https://playground.arvados.org, you may download the files anonymously and upload them to your local Arvados instance:
+
+"https://playground.arvados.org/collections/2463fa9efeb75e099685528b3b9071e0+438":https://playground.arvados.org/collections/2463fa9efeb75e099685528b3b9071e0+438
+
+"https://playground.arvados.org/collections/ae480c5099b81e17267b7445e35b4bc7+180":https://playground.arvados.org/collections/ae480c5099b81e17267b7445e35b4bc7+180
+
+"https://playground.arvados.org/collections/655c6cd07550151b210961ed1d3852cf+57":https://playground.arvados.org/collections/655c6cd07550151b210961ed1d3852cf+57
+
+h2. Submitting a workflow to an Arvados cluster
+
+h3. Submit a workflow and wait for results
+
+Use @arvados-cwl-runner@ to submit CWL workflows to Arvados.  After submitting the job, it will wait for the workflow to complete and print out the final result to standard output.
+
+*Note:* Once submitted, the workflow runs entirely on Arvados, so even if you log out, the workflow will continue to run.  However, if you interrupt @arvados-cwl-runner@ with control-C it will cancel the workflow.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+h3. Referencing files
+
+When running a workflow on an Arvados cluster, the input files must be stored in Keep.  There are several ways this can happen.
+
+A URI reference to Keep uses the @keep:@ scheme followed by the portable data hash, collection size, and path to the file inside the collection.  For example, @keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt@.
+
+If you reference a file in "arv-mount":{{site.baseurl}}/user/tutorials/tutorial-keep-mount.html, such as @/home/example/keep/by_id/2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt@, then @arvados-cwl-runner@ will automatically determine the appropriate Keep URI reference.
+
+If you reference a local file which is not in @arv-mount@, then @arvados-cwl-runner@ will upload the file to Keep and use the Keep URI reference from the upload.
+
+You can also execute CWL files directly from Keep:
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner keep:655c6cd07550151b210961ed1d3852cf+57/bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+h3. Work reuse
+
+Workflows submitted with @arvados-cwl-runner@ will take advantage of Arvados job reuse.  If you submit a workflow which is identical to one that has run before, it will short cut the execution and return the result of the previous run.  This also applies to individual workflow steps.  For example, a two step workflow where the first step has run before will reuse results for first step and only execute the new second step.  You can disable this behavior with @--disable-reuse@.
+
+h3. Command line options
+
+See "Using arvados-cwl-runner":{{site.baseurl}}/user/cwl/cwl-run-options.html
+
+h2(#setup). Setting up arvados-cwl-runner
+
+By default, the @arvados-cwl-runner@ is installed on Arvados shell nodes.  If you want to submit jobs from somewhere else, such as your workstation, you may install @arvados-cwl-runner@ using @pip@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">virtualenv ~/venv</span>
+~$ <span class="userinput">. ~/venv/bin/activate</span>
+~$ <span class="userinput">pip install -U setuptools</span>
+~$ <span class="userinput">pip install arvados-cwl-runner</span>
+</code></pre>
+</notextile>
+
+h3. Check Docker access
+
+In order to pull and upload Docker images, @arvados-cwl-runner@ requires access to Docker.  You do not need Docker if the Docker images you intend to use are already available in Arvados.
+
+You can determine if you have access to Docker by running @docker version@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">docker version</span>
+Client:
+ Version:      1.9.1
+ API version:  1.21
+ Go version:   go1.4.2
+ Git commit:   a34a1d5
+ Built:        Fri Nov 20 12:59:02 UTC 2015
+ OS/Arch:      linux/amd64
+
+Server:
+ Version:      1.9.1
+ API version:  1.21
+ Go version:   go1.4.2
+ Git commit:   a34a1d5
+ Built:        Fri Nov 20 12:59:02 UTC 2015
+ OS/Arch:      linux/amd64
+</code></pre>
+</notextile>
+
+If this returns an error, contact the sysadmin of your cluster for assistance.
diff --git a/doc/user/cwl/cwl-style.html.textile.liquid b/doc/user/cwl/cwl-style.html.textile.liquid
new file mode 100644 (file)
index 0000000..fe53f4a
--- /dev/null
@@ -0,0 +1,185 @@
+---
+layout: default
+navsection: userguide
+title: Best Practices for writing CWL
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+* To run on Arvados, a workflow should provide a @DockerRequirement@ in the @hints@ section.
+
+* Build a reusable library of components.  Share tool wrappers and subworkflows between projects.  Make use of and contribute to "community maintained workflows and tools":https://github.com/common-workflow-language/workflows and tool registries such as "Dockstore":http://dockstore.org .
+
+* When combining a parameter value with a string, such as adding a filename extension, write @$(inputs.file.basename).ext@ instead of @$(inputs.file.basename + 'ext')@.  The first form is evaluated as a simple text substitution, the second form (using the @+@ operator) is evaluated as an arbitrary Javascript expression and requires that you declare @InlineJavascriptRequirement@.
+
+* Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@ unless you specifically need them.  Don't include them "just in case" because they change the default behavior and may imply extra overhead.
+
+* Don't write CWL scripts that access the Arvados SDK.  This is non-portable; a script that access Arvados directly won't work with @cwltool@ or crunch v2.
+
+* CommandLineTools wrapping custom scripts should represent the script as an input parameter with the script file as a default value.  Use @secondaryFiles@ for scripts that consist of multiple files.  For example:
+
+<pre>
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: python
+inputs:
+  script:
+    type: File
+    inputBinding: {position: 1}
+    default:
+      class: File
+      location: bclfastq.py
+      secondaryFiles:
+        - class: File
+          location: helper1.py
+        - class: File
+          location: helper2.py
+  inputfile:
+    type: File
+    inputBinding: {position: 2}
+outputs:
+  out:
+    type: File
+    outputBinding:
+      glob: "*.fastq"
+</pre>
+
+* You can get the designated temporary directory using @$(runtime.tmpdir)@ in your CWL file, or from the @$TMPDIR@ environment variable in your script.
+
+* Similarly, you can get the designated output directory using $(runtime.outdir), or from the @HOME@ environment variable in your script.
+
+* Use @ExpressionTool@ to efficiently rearrange input files between steps of a Workflow.  For example, the following expression accepts a directory containing files paired by @_R1_@ and @_R2_@ and produces an array of Directories containing each pair.
+
+<pre>
+class: ExpressionTool
+cwlVersion: v1.0
+inputs:
+  inputdir: Directory
+outputs:
+  out: Directory[]
+requirements:
+  InlineJavascriptRequirement: {}
+expression: |
+  ${
+    var samples = {};
+    for (var i = 0; i < inputs.inputdir.listing.length; i++) {
+      var file = inputs.inputdir.listing[i];
+      var groups = file.basename.match(/^(.+)(_R[12]_)(.+)$/);
+      if (groups) {
+        if (!samples[groups[1]]) {
+          samples[groups[1]] = [];
+        }
+        samples[groups[1]].push(file);
+      }
+    }
+    var dirs = [];
+    for (var key in samples) {
+      dirs.push({"class": "Directory",
+                 "basename": key,
+                 "listing": [samples[key]]});
+    }
+    return {"out": dirs};
+  }
+</pre>
+
+* Avoid specifying resource requirements in CommandLineTool.  Prefer to specify them in the workflow.  You can provide a default resource requirement in the top level @hints@ section, and individual steps can override it with their own resource requirement.
+
+<pre>
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp: File
+hints:
+  ResourceRequirement:
+    ramMin: 1000
+    coresMin: 1
+    tmpdirMin: 45000
+steps:
+  step1:
+    in: {inp: inp}
+    out: [out]
+    run: tool1.cwl
+  step2:
+    in: {inp: step1/inp}
+    out: [out]
+    run: tool2.cwl
+    hints:
+      ResourceRequirement:
+        ramMin: 2000
+        coresMin: 2
+        tmpdirMin: 90000
+</pre>
+
+* Available compute nodes types vary over time and across different cloud providers, so try to limit the RAM requirement to what the program actually needs.  However, if you need to target a specific compute node type, see this discussion on "calculating RAM request and choosing instance type for containers.":{{site.baseurl}}/api/execution.html#RAM
+
+* Instead of scattering separate steps, prefer to scatter over a subworkflow.
+
+With the following pattern, @step1@ has to wait for all samples to complete before @step2@ can start computing on any samples.  This means a single long-running sample can prevent the rest of the workflow from moving on:
+
+<pre>
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp: File
+steps:
+  step1:
+    in: {inp: inp}
+    scatter: inp
+    out: [out]
+    run: tool1.cwl
+  step2:
+    in: {inp: step1/inp}
+    scatter: inp
+    out: [out]
+    run: tool2.cwl
+  step3:
+    in: {inp: step2/inp}
+    scatter: inp
+    out: [out]
+    run: tool3.cwl
+</pre>
+
+Instead, scatter over a subworkflow.  In this pattern, a sample can proceed to @step2@ as soon as @step1@ is done, independently of any other samples.
+Example: (note, the subworkflow can also be put in a separate file)
+
+<pre>
+cwlVersion: v1.0
+class: Workflow
+steps:
+  step1:
+    in: {inp: inp}
+    scatter: inp
+    out: [out]
+    run:
+      class: Workflow
+      inputs:
+        inp: File
+      outputs:
+        out:
+          type: File
+          outputSource: step3/out
+      steps:
+        step1:
+          in: {inp: inp}
+          out: [out]
+          run: tool1.cwl
+        step2:
+          in: {inp: step1/inp}
+          out: [out]
+          run: tool2.cwl
+        step3:
+          in: {inp: step2/inp}
+          out: [out]
+          run: tool3.cwl
+</pre>
+
+h2(#migrate). Migrating running CWL on jobs API to containers API
+
+When migrating from jobs API (--api=jobs) (sometimes referred to as "crunch v1") to the containers API (--api=containers) ("crunch v2") there are a few differences in behavior:
+
+* A tool may fail to find an input file that could be found when run under the jobs API.  This is because tools are limited to accessing collections explicitly listed in the input, and further limited to those individual files or subdirectories that are listed.  For example, given an explicit file input @/dir/subdir/file1.txt@, a tool will not be allowed to implicitly access a file in the parent directory @/dir/file2.txt@.  Use @secondaryFiles@ or a @Directory@ for files that need to be grouped together.
+* A tool may fail when attempting to rename or delete a file in the output directory.  This may happen because files listed in @InitialWorkDirRequirement@ appear in the output directory as normal files (not symlinks) but cannot be moved, renamed or deleted unless marked as "writable" in CWL.  These files will be added to the output collection but without any additional copies of the underlying data.
+* A tool may fail when attempting to access the network.  This may happen because, unlike the jobs API, under the containers API network access is disabled by default.  Tools which require network access should add @arv:APIRequirement: {}@ to the @requirements@ section.
diff --git a/doc/user/cwl/federated-workflow.odg b/doc/user/cwl/federated-workflow.odg
new file mode 100644 (file)
index 0000000..198791a
Binary files /dev/null and b/doc/user/cwl/federated-workflow.odg differ
diff --git a/doc/user/cwl/federated-workflow.svg b/doc/user/cwl/federated-workflow.svg
new file mode 100644 (file)
index 0000000..d113662
--- /dev/null
@@ -0,0 +1,239 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.2" width="210mm" height="148mm" viewBox="0 0 21000 14800" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xml:space="preserve">
+ <defs class="ClipPathGroup">
+  <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
+   <rect x="0" y="0" width="21000" height="14800"/>
+  </clipPath>
+  <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
+   <rect x="21" y="14" width="20958" height="14771"/>
+  </clipPath>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_1" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="y" horiz-adv-x="1059" d="M 604,1 C 579,-64 553,-123 527,-175 500,-227 471,-272 438,-309 405,-346 369,-374 329,-394 289,-413 243,-423 191,-423 168,-423 147,-423 128,-423 109,-423 88,-420 67,-414 L 67,-279 C 80,-282 94,-284 110,-284 126,-284 140,-284 151,-284 204,-284 253,-264 298,-225 343,-186 383,-124 417,-38 L 434,5 5,1082 197,1082 425,484 C 432,466 440,442 451,412 461,382 471,352 482,322 492,292 501,265 509,241 517,217 522,202 523,196 525,203 530,218 538,240 545,261 554,285 564,312 573,339 583,366 593,393 603,420 611,444 618,464 L 830,1082 1020,1082 604,1 Z"/>
+   <glyph unicode="w" horiz-adv-x="1535" d="M 1174,0 L 965,0 792,698 C 787,716 781,738 776,765 770,792 764,818 759,843 752,872 746,903 740,934 734,904 728,874 721,845 716,820 710,793 704,766 697,739 691,715 686,694 L 508,0 300,0 -3,1082 175,1082 358,347 C 363,332 367,313 372,291 377,268 381,246 386,225 391,200 396,175 401,149 406,174 412,199 418,223 423,244 429,265 434,286 439,307 444,325 448,339 L 644,1082 837,1082 1026,339 C 1031,322 1036,302 1041,280 1046,258 1051,237 1056,218 1061,195 1067,172 1072,149 1077,174 1083,199 1088,223 1093,244 1098,265 1103,288 1108,310 1112,330 1117,347 L 1308,1082 1484,1082 1174,0 Z"/>
+   <glyph unicode="u" horiz-adv-x="901" d="M 314,1082 L 314,396 C 314,343 318,299 326,264 333,229 346,200 363,179 380,157 403,142 432,133 460,124 495,119 537,119 580,119 618,127 653,142 687,157 716,178 741,207 765,235 784,270 797,312 810,353 817,401 817,455 L 817,1082 997,1082 997,228 C 997,205 997,181 998,156 998,131 998,107 999,85 1000,62 1000,43 1001,27 1002,11 1002,3 1003,3 L 833,3 C 832,6 832,15 831,30 830,44 830,61 829,79 828,98 827,117 826,136 825,156 825,172 825,185 L 822,185 C 805,154 786,125 765,100 744,75 720,53 693,36 666,18 634,4 599,-6 564,-15 523,-20 476,-20 416,-20 364,-13 321,2 278,17 242,39 214,70 186,101 166,140 153,188 140,236 133,294 133,361 L 133,1082 314,1082 Z"/>
+   <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 527,1 499,-5 471,-10 442,-14 409,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 467,127 484,128 501,131 517,134 535,137 554,141 L 554,8 Z"/>
+   <glyph unicode="s" horiz-adv-x="927" d="M 950,299 C 950,248 940,203 921,164 901,124 872,91 835,64 798,37 752,16 698,2 643,-13 581,-20 511,-20 448,-20 392,-15 342,-6 291,4 247,20 209,41 171,62 139,91 114,126 88,161 69,203 57,254 L 216,285 C 231,227 263,185 311,158 359,131 426,117 511,117 550,117 585,120 618,125 650,130 678,140 701,153 724,166 743,183 756,205 769,226 775,253 775,285 775,318 767,345 752,366 737,387 715,404 688,418 661,432 628,444 589,455 550,465 507,476 460,489 417,500 374,513 331,527 288,541 250,560 216,583 181,606 153,634 132,668 111,702 100,745 100,796 100,895 135,970 206,1022 276,1073 378,1099 513,1099 632,1099 727,1078 798,1036 868,994 912,927 931,834 L 769,814 C 763,842 752,866 736,885 720,904 701,919 678,931 655,942 630,951 602,956 573,961 544,963 513,963 432,963 372,951 333,926 294,901 275,864 275,814 275,785 282,761 297,742 311,723 331,707 357,694 382,681 413,669 449,660 485,650 525,640 568,629 597,622 626,614 656,606 686,597 715,587 744,576 772,564 799,550 824,535 849,519 870,500 889,478 908,456 923,430 934,401 945,372 950,338 950,299 Z"/>
+   <glyph unicode="r" horiz-adv-x="556" d="M 142,0 L 142,830 C 142,853 142,876 142,900 141,923 141,946 140,968 139,990 139,1011 138,1030 137,1049 137,1067 136,1082 L 306,1082 C 307,1067 308,1049 309,1030 310,1010 311,990 312,969 313,948 313,929 314,910 314,891 314,874 314,861 L 318,861 C 331,902 344,938 359,969 373,999 390,1024 409,1044 428,1063 451,1078 478,1088 505,1097 537,1102 575,1102 590,1102 604,1101 617,1099 630,1096 641,1094 648,1092 L 648,927 C 636,930 622,933 606,935 590,936 572,937 552,937 511,937 476,928 447,909 418,890 394,865 376,832 357,799 344,759 335,714 326,668 322,618 322,564 L 322,0 142,0 Z"/>
+   <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 490,-20 422,-9 363,14 304,37 254,71 213,118 172,165 140,223 119,294 97,364 86,447 86,542 86,915 248,1102 571,1102 655,1102 728,1090 789,1067 850,1044 900,1009 939,962 978,915 1006,857 1025,787 1044,717 1053,635 1053,542 Z M 864,542 C 864,626 858,695 845,750 832,805 813,848 788,881 763,914 732,937 696,950 660,963 619,969 574,969 528,969 487,962 450,949 413,935 381,912 355,879 329,846 309,802 296,747 282,692 275,624 275,542 275,458 282,389 297,334 312,279 332,235 358,202 383,169 414,146 449,133 484,120 522,113 563,113 609,113 651,120 688,133 725,146 757,168 783,201 809,234 829,278 843,333 857,388 864,458 864,542 Z"/>
+   <glyph unicode="n" horiz-adv-x="900" d="M 825,0 L 825,686 C 825,739 821,783 814,818 806,853 793,882 776,904 759,925 736,941 708,950 679,959 644,963 602,963 559,963 521,956 487,941 452,926 423,904 399,876 374,847 355,812 342,771 329,729 322,681 322,627 L 322,0 142,0 142,853 C 142,876 142,900 142,925 141,950 141,974 140,996 139,1019 139,1038 138,1054 137,1070 137,1078 136,1078 L 306,1078 C 307,1075 307,1066 308,1052 309,1037 310,1021 311,1002 312,984 312,965 313,945 314,926 314,910 314,897 L 317,897 C 334,928 353,957 374,982 395,1007 419,1029 446,1047 473,1064 505,1078 540,1088 575,1097 616,1102 663,1102 723,1102 775,1095 818,1080 861,1065 897,1043 925,1012 953,981 974,942 987,894 1000,845 1006,788 1006,721 L 1006,0 825,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="927" d="M 816,0 L 450,494 318,385 318,0 138,0 138,1484 318,1484 318,557 793,1082 1004,1082 565,617 1027,0 816,0 Z"/>
+   <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/>
+   <glyph unicode="f" horiz-adv-x="557" d="M 361,951 L 361,0 181,0 181,951 29,951 29,1082 181,1082 181,1204 C 181,1243 185,1280 192,1314 199,1347 213,1377 233,1402 252,1427 279,1446 313,1461 347,1475 391,1482 445,1482 466,1482 489,1481 512,1479 535,1477 555,1474 572,1470 L 572,1333 C 561,1335 548,1337 533,1339 518,1340 504,1341 492,1341 465,1341 444,1337 427,1330 410,1323 396,1312 387,1299 377,1285 370,1268 367,1248 363,1228 361,1205 361,1179 L 361,1082 572,1082 572,951 361,951 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,446 282,394 294,347 305,299 323,258 348,224 372,189 403,163 441,144 479,125 525,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 1008,206 992,176 972,146 951,115 924,88 890,64 856,39 814,19 763,4 712,-12 650,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,649 100,735 125,806 150,876 185,933 229,977 273,1021 324,1053 383,1073 442,1092 504,1102 571,1102 662,1102 738,1087 799,1058 860,1029 909,988 946,937 983,885 1009,824 1025,754 1040,684 1048,608 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 538,969 507,964 474,955 441,945 410,928 382,903 354,878 330,845 311,803 292,760 281,706 278,641 L 862,641 Z"/>
+   <glyph unicode="a" horiz-adv-x="1060" d="M 414,-20 C 305,-20 224,9 169,66 114,124 87,203 87,303 87,375 101,434 128,480 155,526 190,562 234,588 277,614 327,632 383,642 439,652 496,657 554,657 L 797,657 797,717 C 797,762 792,800 783,832 774,863 759,889 740,908 721,928 697,942 668,951 639,960 604,965 565,965 530,965 499,963 471,958 443,953 419,944 398,931 377,918 361,900 348,878 335,855 327,827 323,793 L 135,810 C 142,853 154,892 173,928 192,963 218,994 253,1020 287,1046 330,1066 382,1081 433,1095 496,1102 569,1102 705,1102 807,1071 876,1009 945,946 979,856 979,738 L 979,272 C 979,219 986,179 1000,152 1014,125 1041,111 1080,111 1090,111 1100,112 1110,113 1120,114 1130,116 1139,118 L 1139,6 C 1116,1 1094,-3 1072,-6 1049,-9 1025,-10 1000,-10 966,-10 937,-5 913,4 888,13 868,26 853,45 838,63 826,86 818,113 810,140 805,171 803,207 L 797,207 C 778,172 757,141 734,113 711,85 684,61 653,42 622,22 588,7 549,-4 510,-15 465,-20 414,-20 Z M 455,115 C 512,115 563,125 606,146 649,167 684,194 713,226 741,259 762,294 776,332 790,371 797,408 797,443 L 797,531 600,531 C 556,531 514,528 475,522 435,517 400,506 370,489 340,472 316,449 299,418 281,388 272,349 272,300 272,241 288,195 320,163 351,131 396,115 455,115 Z"/>
+   <glyph unicode="W" horiz-adv-x="1906" d="M 1511,0 L 1283,0 1039,895 C 1032,920 1024,950 1016,985 1007,1020 1000,1053 993,1084 985,1121 977,1158 969,1196 960,1157 952,1120 944,1083 937,1051 929,1018 921,984 913,950 905,920 898,895 L 652,0 424,0 9,1409 208,1409 461,514 C 472,472 483,430 494,389 504,348 513,311 520,278 529,239 537,203 544,168 554,214 564,259 575,304 580,323 584,342 589,363 594,384 599,404 604,424 609,444 614,463 619,482 624,500 628,517 632,532 L 877,1409 1060,1409 1305,532 C 1309,517 1314,500 1319,482 1324,463 1329,444 1334,425 1339,405 1343,385 1348,364 1353,343 1357,324 1362,305 1373,260 1383,215 1393,168 1394,168 1397,180 1402,203 1407,226 1414,254 1422,289 1430,324 1439,361 1449,402 1458,442 1468,479 1478,514 L 1727,1409 1926,1409 1511,0 Z"/>
+   <glyph unicode="J" horiz-adv-x="848" d="M 457,-20 C 343,-20 250,10 177,69 104,128 55,222 32,350 L 219,381 C 226,338 237,301 252,270 267,239 286,213 307,193 328,173 352,158 378,149 404,140 431,135 458,135 527,135 582,159 622,207 662,254 682,324 682,416 L 682,1253 411,1253 411,1409 872,1409 872,420 C 872,353 863,292 844,238 825,184 798,138 763,100 727,61 683,32 632,11 581,-10 522,-20 457,-20 Z"/>
+   <glyph unicode="C" horiz-adv-x="1297" d="M 792,1274 C 712,1274 641,1261 580,1234 518,1207 466,1169 425,1120 383,1071 351,1011 330,942 309,873 298,796 298,711 298,626 310,549 333,479 356,408 389,348 432,297 475,246 527,207 590,179 652,151 722,137 800,137 855,137 905,144 950,159 995,173 1035,193 1072,219 1108,245 1140,276 1169,312 1198,347 1223,387 1245,430 L 1401,352 C 1376,299 1344,250 1307,205 1270,160 1226,120 1176,87 1125,54 1068,28 1005,9 941,-10 870,-20 791,-20 677,-20 577,-2 492,35 406,71 334,122 277,187 219,252 176,329 147,418 118,507 104,605 104,711 104,821 119,920 150,1009 180,1098 224,1173 283,1236 341,1298 413,1346 498,1380 583,1413 681,1430 790,1430 940,1430 1065,1401 1166,1342 1267,1283 1341,1196 1388,1081 L 1207,1021 C 1194,1054 1176,1086 1153,1117 1130,1147 1102,1174 1068,1197 1034,1220 994,1239 949,1253 903,1267 851,1274 792,1274 Z"/>
+   <glyph unicode="A" horiz-adv-x="1350" d="M 1167,0 L 1006,412 364,412 202,0 4,0 579,1409 796,1409 1362,0 1167,0 Z M 768,1026 C 757,1053 747,1080 738,1107 728,1134 719,1159 712,1182 705,1204 699,1223 694,1238 689,1253 686,1262 685,1265 684,1262 681,1252 676,1237 671,1222 665,1203 658,1180 650,1157 641,1132 632,1105 622,1078 612,1051 602,1024 L 422,561 949,561 768,1026 Z"/>
+   <glyph unicode="3" horiz-adv-x="980" d="M 1049,389 C 1049,324 1039,267 1018,216 997,165 966,123 926,88 885,53 835,26 776,8 716,-11 648,-20 571,-20 484,-20 410,-9 351,13 291,34 242,63 203,99 164,134 135,175 116,221 97,266 84,313 78,362 L 264,379 C 269,342 279,308 294,277 308,246 327,220 352,198 377,176 407,159 443,147 479,135 522,129 571,129 662,129 733,151 785,196 836,241 862,307 862,395 862,447 851,489 828,521 805,552 776,577 742,595 707,612 670,624 630,630 589,636 552,639 518,639 L 416,639 416,795 514,795 C 548,795 583,799 620,806 657,813 690,825 721,844 751,862 776,887 796,918 815,949 825,989 825,1038 825,1113 803,1173 759,1217 714,1260 648,1282 561,1282 482,1282 418,1262 369,1221 320,1180 291,1123 283,1049 L 102,1063 C 109,1125 126,1179 153,1225 180,1271 214,1309 255,1340 296,1370 342,1393 395,1408 448,1423 504,1430 563,1430 642,1430 709,1420 766,1401 823,1381 869,1354 905,1321 941,1287 968,1247 985,1202 1002,1157 1010,1108 1010,1057 1010,1016 1004,977 993,941 982,905 964,873 940,844 916,815 886,791 849,770 812,749 767,734 715,723 L 715,719 C 772,713 821,700 863,681 905,661 940,636 967,607 994,578 1015,544 1029,507 1042,470 1049,430 1049,389 Z"/>
+   <glyph unicode="2" horiz-adv-x="927" d="M 103,0 L 103,127 C 137,205 179,274 228,334 277,393 328,447 382,496 436,544 490,589 543,630 596,671 643,713 686,754 729,795 763,839 790,884 816,929 829,981 829,1038 829,1078 823,1113 811,1144 799,1174 782,1199 759,1220 736,1241 709,1256 678,1267 646,1277 611,1282 572,1282 536,1282 502,1277 471,1267 439,1257 411,1242 386,1222 361,1202 341,1177 326,1148 310,1118 300,1083 295,1044 L 111,1061 C 117,1112 131,1159 153,1204 175,1249 205,1288 244,1322 283,1355 329,1382 384,1401 438,1420 501,1430 572,1430 642,1430 704,1422 759,1405 814,1388 860,1364 898,1331 935,1298 964,1258 984,1210 1004,1162 1014,1107 1014,1044 1014,997 1006,952 989,909 972,866 949,826 921,787 892,748 859,711 822,675 785,639 746,604 705,570 664,535 623,501 582,468 541,434 502,400 466,366 429,332 397,298 368,263 339,228 317,191 301,153 L 1036,153 1036,0 103,0 Z"/>
+   <glyph unicode="1" horiz-adv-x="874" d="M 156,0 L 156,153 515,153 515,1237 197,1010 197,1180 530,1409 696,1409 696,153 1039,153 1039,0 156,0 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs>
+  <font id="EmbeddedFont_2" horiz-adv-x="2048">
+   <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="italic" ascent="1852" descent="423"/>
+   <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+   <glyph unicode="w" horiz-adv-x="1510" d="M 1068,0 L 859,0 822,698 C 821,711 821,731 820,757 819,783 819,809 818,836 817,867 817,900 816,934 804,904 792,874 780,845 769,820 758,793 747,766 735,739 724,715 715,694 L 402,0 194,0 102,1082 280,1082 320,347 C 321,339 321,326 322,308 323,289 323,270 324,250 325,229 325,210 326,191 327,172 327,158 327,149 337,173 347,197 357,220 366,240 375,261 384,283 393,305 401,324 408,339 L 749,1082 942,1082 986,339 C 988,303 990,268 991,235 992,202 992,173 992,149 1002,173 1012,197 1023,220 1032,240 1042,261 1052,284 1061,307 1070,328 1079,347 L 1413,1082 1589,1082 1068,0 Z"/>
+   <glyph unicode="u" horiz-adv-x="1060" d="M 415,1082 L 289,437 C 284,411 279,385 276,358 273,331 271,307 271,287 271,234 285,193 313,164 341,135 387,120 450,120 493,120 533,129 571,146 608,163 642,187 673,218 704,249 730,286 752,330 773,373 789,422 800,476 L 918,1082 1098,1082 932,228 C 927,205 923,181 919,156 914,131 910,107 907,85 903,62 900,43 898,27 895,11 894,3 893,3 L 723,3 C 723,6 724,15 726,30 728,44 731,61 734,79 737,98 740,117 743,136 746,156 748,172 751,185 L 748,185 C 725,154 702,125 678,100 654,75 628,53 599,36 570,18 538,4 503,-5 468,-14 428,-19 383,-19 284,-19 210,5 161,54 111,103 86,173 86,265 86,289 88,316 93,346 97,376 102,404 107,429 L 234,1082 415,1082 Z"/>
+   <glyph unicode="t" horiz-adv-x="530" d="M 448,4 C 423,-2 396,-7 367,-13 338,-17 307,-20 275,-20 218,-20 174,-3 142,31 109,65 93,110 93,166 93,187 95,210 98,235 101,259 105,279 108,296 L 234,951 109,951 135,1082 262,1082 367,1324 487,1324 440,1082 640,1082 614,951 414,951 289,306 C 286,293 284,276 281,257 278,238 277,222 277,211 277,183 284,161 298,146 312,131 335,123 367,123 384,123 401,124 416,127 431,129 448,132 467,137 L 448,4 Z"/>
+   <glyph unicode="s" horiz-adv-x="980" d="M 907,317 C 907,260 896,211 873,169 850,126 818,91 777,63 735,35 684,14 625,1 566,-13 499,-20 425,-20 363,-20 309,-15 262,-4 215,7 175,22 142,43 108,63 80,88 58,119 35,149 18,184 5,223 L 152,279 C 162,252 175,229 191,208 206,187 226,169 249,155 272,140 299,129 331,122 362,115 399,111 441,111 484,111 523,115 559,122 594,129 625,140 651,155 676,170 696,190 711,214 725,238 732,267 732,301 732,328 726,351 713,370 700,389 683,405 660,420 637,434 609,447 576,460 543,472 506,484 465,497 422,511 381,526 342,543 303,560 268,580 239,603 209,626 185,654 168,686 150,717 141,754 141,797 141,852 153,898 177,937 200,975 232,1006 273,1030 313,1054 360,1072 414,1083 467,1094 524,1099 584,1099 639,1099 689,1094 734,1085 779,1076 819,1061 853,1041 887,1020 915,994 937,962 959,929 974,890 982,844 L 819,819 C 804,872 777,910 736,933 695,956 641,968 572,968 537,968 504,965 473,960 442,955 414,946 391,934 368,922 349,906 336,887 322,868 315,844 315,817 315,790 321,767 334,749 347,730 365,714 388,700 411,686 438,674 471,663 503,652 539,640 579,627 617,615 656,601 695,585 734,569 769,549 800,526 831,502 857,473 877,440 897,406 907,365 907,317 Z"/>
+   <glyph unicode="r" horiz-adv-x="742" d="M 718,938 C 707,941 693,944 678,947 662,950 645,951 628,951 585,951 547,939 513,914 479,889 449,858 424,820 398,782 377,740 360,695 343,649 331,605 324,564 L 214,0 34,0 196,830 C 201,853 205,877 209,900 213,923 217,946 221,968 224,990 228,1011 231,1031 234,1050 237,1067 239,1082 L 409,1082 C 407,1067 405,1050 402,1030 399,1010 395,990 392,969 389,948 386,929 383,910 380,891 377,874 374,861 L 378,861 C 399,902 419,938 440,969 460,999 481,1024 503,1044 525,1063 549,1078 574,1088 599,1097 626,1102 656,1102 663,1102 671,1102 680,1101 689,1100 698,1098 707,1097 716,1096 724,1094 732,1093 740,1091 746,1089 751,1088 L 718,938 Z"/>
+   <glyph unicode="p" horiz-adv-x="1138" d="M 554,-20 C 472,-20 405,-3 354,32 302,67 265,115 244,178 L 239,178 C 239,177 238,170 237,159 236,147 234,132 231,115 228,98 225,79 222,58 218,37 214,17 210,-2 L 128,-425 -51,-425 198,864 C 203,891 208,916 212,940 216,964 220,986 223,1005 226,1025 228,1042 230,1056 231,1070 232,1077 233,1077 L 400,1077 C 400,1072 400,1063 399,1052 398,1040 397,1027 396,1013 394,998 392,983 390,967 388,950 386,935 383,921 L 387,921 C 411,952 436,979 461,1002 486,1025 512,1044 541,1059 569,1074 599,1085 632,1092 665,1099 701,1102 741,1102 794,1102 842,1094 883,1077 924,1060 959,1037 987,1006 1015,975 1036,938 1051,895 1066,851 1073,802 1073,748 1073,715 1072,678 1069,639 1066,599 1060,558 1052,516 1034,421 1010,340 981,273 952,205 916,149 875,106 834,63 786,31 733,11 680,-10 620,-20 554,-20 Z M 689,963 C 646,963 606,957 568,944 529,931 494,910 461,879 428,848 400,806 375,753 350,700 329,634 314,554 301,489 295,430 295,377 295,334 301,297 312,264 323,231 340,203 361,181 382,158 407,141 437,130 466,119 499,113 535,113 576,113 614,119 647,132 680,144 711,165 738,196 765,226 788,267 809,318 830,369 847,433 862,510 877,591 885,659 885,716 885,798 869,860 838,901 807,942 757,963 689,963 Z"/>
+   <glyph unicode="o" horiz-adv-x="1007" d="M 1074,683 C 1074,648 1072,614 1068,579 1064,544 1057,506 1048,467 1028,379 1000,304 965,242 929,180 887,130 839,91 791,52 738,24 679,7 620,-11 558,-20 491,-20 427,-20 369,-10 317,10 265,29 221,58 184,96 147,133 118,179 98,234 77,288 67,350 67,419 68,450 70,483 73,516 76,549 81,584 89,620 108,704 135,776 169,837 203,897 243,947 290,986 337,1025 390,1054 449,1073 508,1092 572,1101 642,1101 713,1101 775,1092 829,1073 882,1054 927,1027 964,991 1000,955 1027,911 1046,860 1065,808 1074,749 1074,683 Z M 888,683 C 888,734 882,778 871,814 860,850 843,880 822,903 800,926 774,942 743,953 712,964 678,969 640,969 605,969 569,965 534,957 498,948 464,931 432,906 399,881 370,845 343,798 316,751 294,689 276,612 267,575 261,541 258,508 254,475 252,444 252,416 252,361 258,315 271,276 284,237 301,206 324,182 346,158 372,141 403,130 433,119 466,113 502,113 538,113 574,117 609,125 644,133 677,150 708,176 739,201 768,238 795,285 821,332 843,395 861,473 870,513 877,550 881,583 884,616 887,650 888,683 Z"/>
+   <glyph unicode="n" horiz-adv-x="1033" d="M 717,0 L 843,645 C 848,671 853,698 856,725 859,752 861,775 861,795 861,848 847,889 819,918 791,947 745,962 682,962 639,962 599,954 562,937 524,920 490,896 459,865 428,834 402,796 381,753 359,709 343,660 332,606 L 214,0 34,0 200,853 C 205,876 209,900 214,925 218,950 222,974 226,996 229,1019 232,1038 235,1054 237,1070 238,1078 239,1078 L 409,1078 C 409,1075 408,1066 406,1052 404,1037 402,1021 399,1002 396,984 393,965 390,945 387,926 384,910 381,897 L 384,897 C 407,928 430,957 454,982 478,1007 505,1029 534,1047 563,1064 595,1078 630,1087 665,1096 704,1101 749,1101 848,1101 922,1077 972,1028 1021,979 1046,909 1046,817 1046,793 1044,766 1040,736 1035,706 1030,678 1025,653 L 898,0 717,0 Z"/>
+   <glyph unicode="m" horiz-adv-x="1589" d="M 660,0 L 784,634 C 787,647 790,662 793,678 796,694 798,710 801,726 803,742 805,757 807,772 808,786 809,798 809,808 809,858 796,896 771,923 746,949 704,962 647,962 609,962 573,954 539,937 504,920 473,896 446,865 419,834 395,796 375,752 355,707 340,658 331,604 L 213,0 34,0 200,853 C 205,876 209,900 214,925 218,950 222,974 226,996 229,1019 232,1038 235,1054 237,1070 238,1078 239,1078 L 409,1078 C 409,1075 408,1066 406,1052 404,1037 402,1021 399,1002 396,984 393,965 390,945 387,926 384,910 381,897 L 384,897 C 404,928 425,957 446,982 467,1007 491,1029 516,1047 541,1064 570,1078 601,1087 632,1096 667,1101 706,1101 787,1101 851,1081 898,1042 945,1002 974,944 983,869 1004,902 1026,933 1049,961 1072,989 1097,1014 1125,1035 1152,1056 1183,1072 1217,1084 1250,1095 1288,1101 1331,1101 1421,1101 1490,1077 1539,1028 1587,979 1611,909 1611,817 1611,793 1609,766 1605,736 1600,706 1595,678 1590,653 L 1463,0 1285,0 1409,634 C 1412,647 1415,662 1418,678 1421,694 1423,710 1426,726 1428,742 1430,757 1432,772 1433,786 1434,798 1434,808 1434,858 1421,896 1396,923 1371,949 1329,962 1272,962 1234,962 1198,954 1164,937 1129,920 1098,897 1071,866 1044,835 1020,798 1000,754 980,710 965,661 956,607 L 838,0 660,0 Z"/>
+   <glyph unicode="l" horiz-adv-x="504" d="M 33,0 L 321,1484 501,1484 212,0 33,0 Z"/>
+   <glyph unicode="k" horiz-adv-x="1113" d="M 721,0 L 453,502 285,378 213,0 34,0 322,1484 502,1484 323,567 527,757 888,1082 1110,1082 580,617 916,0 721,0 Z"/>
+   <glyph unicode="i" horiz-adv-x="478" d="M 287,1312 L 321,1484 501,1484 467,1312 287,1312 Z M 33,0 L 243,1082 423,1082 212,0 33,0 Z"/>
+   <glyph unicode="h" horiz-adv-x="1033" d="M 383,897 C 406,928 429,957 453,982 477,1007 504,1029 533,1047 562,1064 594,1078 629,1087 664,1096 703,1101 748,1101 847,1101 921,1077 971,1028 1020,979 1045,909 1045,817 1045,793 1043,766 1039,736 1034,706 1029,678 1024,653 L 897,0 716,0 842,645 C 847,671 852,698 855,725 858,752 860,775 860,795 860,848 846,889 818,918 790,947 744,962 681,962 638,962 598,954 561,937 523,920 489,896 458,865 427,834 401,796 380,753 358,709 342,660 331,606 L 213,0 34,0 322,1484 502,1484 427,1098 C 423,1076 419,1054 414,1032 409,1010 404,990 399,972 394,953 390,937 387,924 384,911 381,902 380,897 L 383,897 Z"/>
+   <glyph unicode="f" horiz-adv-x="663" d="M 434,951 L 249,0 69,0 254,951 102,951 128,1082 280,1082 303,1204 C 311,1243 321,1280 334,1314 347,1348 365,1378 389,1403 412,1428 443,1448 480,1463 517,1477 565,1484 622,1484 643,1484 665,1483 688,1481 710,1479 729,1476 746,1472 L 720,1335 C 714,1336 707,1337 700,1338 692,1339 684,1340 675,1341 666,1342 658,1342 650,1342 642,1342 635,1342 629,1342 604,1342 583,1338 566,1331 549,1324 535,1313 524,1299 513,1285 504,1268 497,1248 490,1228 484,1205 479,1179 L 460,1082 671,1082 645,951 434,951 Z"/>
+   <glyph unicode="e" horiz-adv-x="980" d="M 256,503 C 253,484 251,466 250,447 249,428 248,409 247,390 247,301 269,233 314,186 358,139 425,115 514,115 551,115 585,120 616,130 647,139 675,152 700,169 725,185 747,204 766,226 785,247 800,270 813,294 L 951,231 C 934,201 914,171 890,142 866,112 836,85 801,61 765,37 722,18 672,3 622,-12 562,-20 493,-20 426,-20 367,-10 314,9 261,28 217,55 181,92 144,128 117,172 98,225 79,278 69,338 69,405 69,510 83,606 112,692 140,778 179,851 230,912 280,973 339,1020 408,1053 476,1086 550,1102 630,1102 703,1102 767,1092 821,1073 875,1054 920,1027 956,992 992,957 1019,916 1037,868 1054,819 1063,766 1063,708 1063,694 1063,679 1062,662 1061,645 1059,628 1057,610 1055,592 1053,574 1050,556 1047,537 1043,520 1039,503 L 256,503 Z M 880,641 C 881,654 882,667 883,679 884,690 884,702 884,713 884,757 878,795 866,828 854,860 837,887 815,908 793,929 767,944 736,954 705,964 671,969 634,969 602,969 568,964 533,955 498,945 464,928 432,903 399,878 370,845 343,803 316,760 295,706 280,641 L 880,641 Z"/>
+   <glyph unicode="d" horiz-adv-x="1166" d="M 401,-21 C 348,-21 300,-13 259,4 218,21 183,44 155,75 127,106 106,143 91,187 76,230 69,279 69,333 69,363 71,399 74,440 77,481 82,523 90,565 108,660 132,741 161,809 190,876 226,932 267,975 308,1018 356,1050 409,1071 462,1091 522,1101 588,1101 670,1101 737,1084 789,1049 840,1014 877,966 898,903 L 903,903 C 904,910 906,921 909,936 912,951 915,968 918,985 921,1002 923,1018 926,1033 929,1048 930,1059 931,1065 L 1013,1484 1193,1484 948,219 C 943,193 938,168 934,143 929,119 925,97 922,77 919,57 916,40 914,26 912,11 911,4 910,4 L 738,4 C 738,17 740,38 744,66 747,95 752,126 759,160 L 754,160 C 730,129 706,102 681,79 656,56 629,38 601,23 573,8 543,-3 510,-11 477,-17 441,-21 401,-21 Z M 453,118 C 496,118 536,124 575,137 613,150 648,172 681,203 714,234 743,275 768,328 793,381 813,447 828,527 841,592 847,651 847,704 847,747 841,785 830,818 819,851 803,878 782,901 761,923 735,940 706,951 676,962 643,968 607,968 566,968 529,962 496,950 462,937 432,916 405,886 378,855 354,815 334,764 313,713 295,648 280,571 265,490 257,422 257,365 257,283 273,221 304,180 335,139 385,118 453,118 Z"/>
+   <glyph unicode="c" horiz-adv-x="927" d="M 469,122 C 506,122 540,128 570,139 600,150 627,165 650,185 673,205 694,229 712,258 730,286 745,317 758,352 L 914,303 C 895,253 873,208 846,169 819,129 787,95 750,67 713,39 670,18 623,3 576,-12 523,-20 465,-20 396,-20 337,-10 287,11 236,32 195,61 163,98 130,135 106,178 91,229 75,280 67,335 67,395 67,422 68,451 71,482 73,513 77,544 83,574 98,648 117,712 140,767 163,822 188,869 217,908 245,947 276,979 309,1004 342,1029 376,1049 411,1064 446,1078 481,1088 518,1094 554,1099 590,1102 625,1102 684,1102 737,1094 782,1079 827,1064 865,1042 896,1014 927,986 952,953 970,914 987,875 998,831 1001,784 L 824,759 C 822,789 816,816 807,841 798,866 785,887 768,905 751,922 730,936 705,946 680,956 652,961 619,961 573,961 532,954 495,941 458,928 426,906 397,876 368,846 343,807 322,759 301,710 284,651 270,581 264,549 259,515 256,480 253,445 251,414 251,389 251,304 268,239 303,192 337,145 392,122 469,122 Z"/>
+   <glyph unicode="b" horiz-adv-x="1060" d="M 744,1102 C 797,1102 845,1094 886,1077 927,1060 962,1037 990,1006 1018,975 1039,938 1054,895 1069,851 1076,802 1076,748 1076,715 1075,678 1072,639 1069,599 1063,558 1055,516 1037,421 1013,340 984,273 955,205 919,149 878,106 837,63 789,31 736,11 683,-10 623,-20 557,-20 475,-20 408,-3 357,32 306,67 269,115 248,178 L 245,178 C 242,160 238,142 233,122 228,102 224,83 220,66 215,48 212,33 209,21 206,8 203,2 202,2 L 29,2 C 31,8 34,18 37,32 40,47 44,64 49,84 53,104 58,126 63,150 68,174 73,199 78,225 L 323,1484 503,1484 420,1061 C 417,1042 413,1023 409,1006 404,989 400,974 397,961 393,946 389,933 386,921 L 390,921 C 414,952 439,979 464,1002 489,1025 515,1044 544,1059 572,1074 602,1085 635,1092 668,1099 704,1102 744,1102 Z M 692,963 C 649,963 609,957 571,944 532,931 497,910 464,879 431,848 403,806 378,753 353,700 332,634 317,554 304,489 298,430 298,377 298,334 304,297 315,264 326,231 343,203 364,181 385,158 410,141 440,130 469,119 502,113 538,113 579,113 617,119 650,132 683,144 714,165 741,196 768,226 791,267 812,318 833,369 850,433 865,510 880,591 888,659 888,716 888,798 872,860 841,901 810,942 760,963 692,963 Z"/>
+   <glyph unicode="a" horiz-adv-x="1033" d="M 1055,6 C 1036,1 1015,-2 993,-6 970,-8 948,-10 927,-10 865,-10 820,3 792,29 763,54 749,92 749,143 749,153 750,164 751,176 752,187 753,198 754,207 L 748,207 C 725,172 701,140 676,112 651,84 623,60 593,41 562,21 528,6 491,-5 454,-15 410,-20 361,-20 309,-20 264,-12 225,5 186,22 153,44 126,72 99,100 79,131 66,168 53,204 46,241 46,279 46,333 54,380 70,419 85,459 107,493 134,521 161,549 192,572 229,589 265,607 304,621 345,631 386,641 428,648 472,652 516,656 559,658 601,658 L 833,658 840,694 C 843,711 846,727 849,743 851,758 852,772 852,786 852,847 834,892 799,921 764,950 715,965 652,965 619,965 589,963 561,958 532,953 507,944 485,931 462,918 443,900 426,878 409,855 395,827 384,793 L 206,822 C 219,863 236,901 258,936 280,970 309,999 345,1024 381,1049 425,1068 477,1082 528,1095 590,1102 662,1102 721,1102 774,1095 820,1080 866,1065 905,1045 936,1019 967,993 991,962 1008,926 1024,890 1032,850 1032,807 1032,786 1030,762 1027,733 1023,704 1018,676 1013,650 L 939,272 C 936,257 933,242 931,227 929,212 928,197 928,184 928,159 935,141 948,129 961,117 981,111 1009,111 1019,111 1029,112 1040,113 1050,114 1060,116 1069,118 L 1055,6 Z M 809,530 L 610,530 C 583,530 556,530 527,530 498,530 470,527 443,520 415,514 389,505 364,495 339,484 317,469 298,451 279,432 265,410 254,383 243,357 237,325 237,288 237,266 241,245 248,225 255,204 265,186 280,170 295,154 313,141 335,132 356,122 382,117 411,117 469,117 520,127 563,147 606,166 643,191 674,220 705,248 729,280 747,314 764,347 776,379 782,407 L 809,530 Z"/>
+   <glyph unicode="U" horiz-adv-x="1377" d="M 654,-20 C 585,-20 520,-11 459,7 398,25 344,53 299,90 254,127 218,174 192,231 166,288 153,354 153,431 153,445 154,461 155,480 156,498 158,516 161,535 163,554 165,572 168,590 171,607 173,622 176,635 L 326,1409 517,1409 355,566 C 350,542 346,517 343,492 340,466 338,443 338,423 338,374 346,331 363,295 380,259 403,229 432,206 461,182 496,164 537,153 578,141 622,135 670,135 728,135 782,142 832,157 881,172 926,195 966,227 1005,259 1039,301 1068,353 1096,404 1117,467 1131,541 L 1299,1409 1489,1409 1319,530 C 1300,436 1272,355 1234,286 1195,217 1148,159 1091,114 1034,69 969,35 896,13 823,-9 742,-20 654,-20 Z"/>
+   <glyph unicode="I" horiz-adv-x="478" d="M 81,0 L 355,1409 546,1409 272,0 81,0 Z"/>
+   <glyph unicode="3" horiz-adv-x="1059" d="M 566,795 C 590,795 616,796 644,799 671,802 699,807 726,814 753,821 778,831 803,844 828,857 849,875 868,896 887,917 902,942 913,973 924,1003 930,1039 930,1081 930,1110 925,1137 916,1162 906,1187 892,1208 873,1226 854,1243 830,1257 803,1267 776,1277 744,1282 708,1282 629,1282 561,1262 504,1221 447,1180 407,1123 384,1049 L 206,1063 C 245,1187 309,1279 398,1340 487,1400 593,1430 718,1430 780,1430 836,1422 886,1407 935,1391 978,1368 1013,1339 1048,1309 1074,1273 1093,1231 1112,1188 1121,1140 1121,1086 1121,1034 1113,987 1096,945 1079,902 1054,865 1022,834 990,803 951,777 906,758 861,738 810,724 753,717 L 752,713 C 839,696 907,661 956,609 1005,556 1029,489 1029,407 1029,349 1019,294 998,242 977,190 945,145 904,106 862,67 810,37 747,14 684,-9 610,-20 526,-20 450,-20 384,-9 328,13 272,34 225,62 187,97 148,132 118,170 96,213 73,255 57,297 48,338 L 212,386 C 220,355 233,325 250,295 267,264 289,237 316,212 343,187 375,167 412,152 449,137 492,129 541,129 590,129 634,136 671,150 708,163 740,182 765,207 790,231 809,260 822,294 835,327 841,364 841,404 841,444 834,479 820,508 806,537 787,562 762,581 737,600 707,615 673,625 639,634 602,639 562,639 L 438,639 468,795 566,795 Z"/>
+   <glyph unicode="2" horiz-adv-x="1112" d="M -12,0 L 12,127 C 49,189 90,244 135,293 180,342 228,386 277,426 326,465 377,501 428,534 479,567 528,598 576,629 623,659 668,689 710,719 751,749 788,781 819,815 850,849 875,886 893,927 911,967 920,1012 920,1063 920,1097 914,1128 903,1155 892,1182 876,1205 856,1224 835,1243 811,1257 783,1267 754,1277 723,1282 689,1282 616,1282 553,1262 499,1223 445,1183 406,1123 381,1044 L 211,1081 C 227,1132 249,1178 276,1221 303,1264 337,1301 377,1332 417,1363 464,1387 518,1404 571,1421 632,1430 700,1430 759,1430 814,1422 864,1405 914,1388 957,1365 994,1334 1030,1303 1058,1266 1079,1223 1099,1180 1109,1131 1109,1078 1109,1021 1099,969 1080,921 1060,873 1033,829 1000,788 967,747 928,708 884,672 840,636 794,601 745,568 696,535 647,502 596,470 545,438 497,405 450,372 403,339 360,304 321,269 282,233 249,194 222,153 L 949,153 920,0 -12,0 Z"/>
+   <glyph unicode="1" horiz-adv-x="953" d="M 53,0 L 83,153 442,153 650,1223 289,1000 324,1180 701,1409 867,1409 623,153 966,153 936,0 53,0 Z"/>
+   <glyph unicode="," horiz-adv-x="292" d="M 299,51 C 292,16 285,-16 276,-46 267,-74 256,-101 245,-127 234,-151 221,-175 207,-197 193,-219 177,-241 160,-262 L 37,-262 C 75,-219 107,-175 132,-131 157,-87 173,-43 182,0 L 94,0 136,219 331,219 299,51 Z"/>
+   <glyph unicode=" " horiz-adv-x="556"/>
+  </font>
+ </defs>
+ <defs class="TextShapeIndex">
+  <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18"/>
+ </defs>
+ <defs class="EmbeddedBulletChars">
+  <g id="bullet-char-template(57356)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
+  </g>
+  <g id="bullet-char-template(57354)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
+  </g>
+  <g id="bullet-char-template(10146)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10132)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
+  </g>
+  <g id="bullet-char-template(10007)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
+  </g>
+  <g id="bullet-char-template(10004)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
+  </g>
+  <g id="bullet-char-template(9679)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
+  </g>
+  <g id="bullet-char-template(8226)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
+  </g>
+  <g id="bullet-char-template(8211)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
+  </g>
+  <g id="bullet-char-template(61548)" transform="scale(0.00048828125,-0.00048828125)">
+   <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
+  </g>
+ </defs>
+ <defs class="TextEmbeddedBitmaps"/>
+ <g>
+  <g id="id2" class="Master_Slide">
+   <g id="bg-id2" class="Background"/>
+   <g id="bo-id2" class="BackgroundObjects"/>
+  </g>
+ </g>
+ <g class="SlideGroup">
+  <g>
+   <g id="container-id1">
+    <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
+     <g class="Page">
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id3">
+        <rect class="BoundingBox" stroke="none" fill="none" x="1761" y="2523" width="5845" height="9147"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4683,11668 L 1762,11668 1762,2524 7604,2524 7604,11668 4683,11668 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="2012" y="3225"><tspan fill="rgb(0,0,0)" stroke="none">Cluster 1</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id4">
+        <rect class="BoundingBox" stroke="none" fill="none" x="2015" y="3920" width="5083" height="1019"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 4556,4937 L 2016,4937 2016,3921 7096,3921 7096,4937 4556,4937 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4556,4937 L 2016,4937 2016,3921 7096,3921 7096,4937 4556,4937 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="2261" y="4650"><tspan fill="rgb(0,0,0)" stroke="none">Workflow runner</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id5">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7857" y="4555" width="5845" height="3559"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10779,8112 L 7858,8112 7858,4556 13700,4556 13700,8112 10779,8112 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="8108" y="5257"><tspan fill="rgb(0,0,0)" stroke="none">Cluster 2</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id6">
+        <rect class="BoundingBox" stroke="none" fill="none" x="13953" y="4555" width="5845" height="3559"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16875,8112 L 13954,8112 13954,4556 19796,4556 19796,8112 16875,8112 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="14204" y="5257"><tspan fill="rgb(0,0,0)" stroke="none">Cluster 3</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id7">
+        <rect class="BoundingBox" stroke="none" fill="none" x="2015" y="6460" width="5083" height="1019"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 4556,7477 L 2016,7477 2016,6461 7096,6461 7096,7477 4556,7477 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4556,7477 L 2016,7477 2016,6461 7096,6461 7096,7477 4556,7477 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="2446" y="7190"><tspan fill="rgb(0,0,0)" stroke="none">Analysis task 1</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id8">
+        <rect class="BoundingBox" stroke="none" fill="none" x="8237" y="6460" width="4956" height="1019"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 10715,7477 L 8238,7477 8238,6461 13191,6461 13191,7477 10715,7477 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 10715,7477 L 8238,7477 8238,6461 13191,6461 13191,7477 10715,7477 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="8604" y="7190"><tspan fill="rgb(0,0,0)" stroke="none">Analysis task 2</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id9">
+        <rect class="BoundingBox" stroke="none" fill="none" x="14206" y="6460" width="4956" height="1019"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 16684,7477 L 14207,7477 14207,6461 19160,6461 19160,7477 16684,7477 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 16684,7477 L 14207,7477 14207,6461 19160,6461 19160,7477 16684,7477 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="14573" y="7190"><tspan fill="rgb(0,0,0)" stroke="none">Analysis task 3</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id10">
+        <rect class="BoundingBox" stroke="none" fill="none" x="2015" y="10270" width="5083" height="1019"/>
+        <path fill="rgb(114,159,207)" stroke="none" d="M 4556,11287 L 2016,11287 2016,10271 7096,10271 7096,11287 4556,11287 Z"/>
+        <path fill="none" stroke="rgb(52,101,164)" d="M 4556,11287 L 2016,11287 2016,10271 7096,10271 7096,11287 4556,11287 Z"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="2954" y="11000"><tspan fill="rgb(0,0,0)" stroke="none">Join results</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.ConnectorShape">
+       <g id="id11">
+        <rect class="BoundingBox" stroke="none" fill="none" x="4555" y="4936" width="6311" height="1526"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 4556,4937 L 4556,5699 10715,5699 10715,6031"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 10715,6461 L 10865,6011 10565,6011 10715,6461 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.ConnectorShape">
+       <g id="id12">
+        <rect class="BoundingBox" stroke="none" fill="none" x="4555" y="4936" width="12280" height="1526"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 4556,4937 L 4556,5699 16684,5699 16684,6031"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 16684,6461 L 16834,6011 16534,6011 16684,6461 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.ConnectorShape">
+       <g id="id13">
+        <rect class="BoundingBox" stroke="none" fill="none" x="4406" y="4936" width="301" height="1526"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 4556,4937 L 4556,6031"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 4556,6461 L 4706,6011 4406,6011 4556,6461 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.ConnectorShape">
+       <g id="id14">
+        <rect class="BoundingBox" stroke="none" fill="none" x="4406" y="7476" width="6311" height="2796"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 10715,7477 L 10715,8874 4556,8874 4556,9841"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 4556,10271 L 4706,9821 4406,9821 4556,10271 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.ConnectorShape">
+       <g id="id15">
+        <rect class="BoundingBox" stroke="none" fill="none" x="4406" y="7476" width="12280" height="2796"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 16684,7477 L 16684,8874 4556,8874 4556,9841"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 4556,10271 L 4706,9821 4406,9821 4556,10271 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.ConnectorShape">
+       <g id="id16">
+        <rect class="BoundingBox" stroke="none" fill="none" x="4406" y="7476" width="301" height="2796"/>
+        <path fill="none" stroke="rgb(0,0,0)" d="M 4556,7477 L 4556,9841"/>
+        <path fill="rgb(0,0,0)" stroke="none" d="M 4556,10271 L 4706,9821 4406,9821 4556,10271 Z"/>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id17">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7857" y="3286" width="7748" height="1017"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-style="italic" font-weight="400"><tspan class="TextPosition" x="8107" y="3718"><tspan fill="rgb(0,0,0)" stroke="none">User from cluster 1 is able to authenticate </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-style="italic" font-weight="400"><tspan class="TextPosition" x="8107" y="4112"><tspan fill="rgb(0,0,0)" stroke="none">to clusters 2 and 3 and submit work</tspan></tspan></tspan></text>
+       </g>
+      </g>
+      <g class="com.sun.star.drawing.CustomShape">
+       <g id="id18">
+        <rect class="BoundingBox" stroke="none" fill="none" x="7730" y="9127" width="8048" height="1144"/>
+        <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-style="italic" font-weight="400"><tspan class="TextPosition" x="7980" y="9623"><tspan fill="rgb(0,0,0)" stroke="none">Input and output data for workflow steps is </tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="353px" font-style="italic" font-weight="400"><tspan class="TextPosition" x="7980" y="10017"><tspan fill="rgb(0,0,0)" stroke="none">streamed to, from or between clusters on demand</tspan></tspan></tspan></text>
+       </g>
+      </g>
+     </g>
+    </g>
+   </g>
+  </g>
+ </g>
+</svg>
\ No newline at end of file
diff --git a/doc/user/cwl/federated-workflows.html.textile.liquid b/doc/user/cwl/federated-workflows.html.textile.liquid
new file mode 100644 (file)
index 0000000..7e2150d
--- /dev/null
@@ -0,0 +1,57 @@
+---
+layout: default
+navsection: userguide
+title: Federated Multi-Cluster Workflows
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+To support running analysis on geographically dispersed data (avoiding expensive data transfers by sending the computation to the data), and "hybrid cloud" configurations where an on-premise cluster can expand its capabilities by delegating work to a cloud-hosted cluster, Arvados supports federated workflows.  In a federated workflow, different steps of a workflow may execute on different clusters.  Arvados manages data transfer and delegation of credentials, so that all that is required is adding "arv:ClusterTarget":cwl-extensions.html#clustertarget hints to your existing workflow.
+
+!(full-width)federated-workflow.svg!
+
+For more information, visit the "architecture":{{site.baseurl}}/architecture/federation.html and "admin":{{site.baseurl}}/admin/federation.html sections about Arvados federation.
+
+h2. Get the example files
+
+The tutorial files are located in the "documentation section of the Arvados source repository:":https://github.com/curoverse/arvados/tree/master/doc/user/cwl/federated or "see below":#fed-example
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados</span>
+~$ <span class="userinput">cd arvados/doc/user/cwl/federated</span>
+</code></pre>
+</notextile>
+
+h2. Run example
+
+{% include 'notebox_begin' %}
+
+At this time, remote steps of a workflow on Workbench are not displayed.  As a workaround, you can find the UUIDs of the remote steps in the live logs of the workflow runner (the "Logs" tab).  You may visit the remote cluster's workbench and enter the UUID into the search box to view the details of the remote step.  This will be fixed in a future version of workbench.
+
+{% include 'notebox_end' %}
+
+Run it like any other workflow:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-cwl-runner federated.cwl shards.cwl</span>
+</code></pre>
+</notextile>
+
+You can also "run a workflow on a remote federated cluster":cwl-run-options.html#federation .
+
+h2(#fed-example). Federated scatter/gather example
+
+In this following example, an analysis task is executed on three different clusters with different data, then the results are combined to produce the final output.
+
+{% codeblock as yaml %}
+{% include 'federated_cwl' %}
+{% endcodeblock %}
+
+Example input document:
+
+{% codeblock as yaml %}
+{% include 'shards_yml' %}
+{% endcodeblock %}
diff --git a/doc/user/cwl/federated/cat.cwl b/doc/user/cwl/federated/cat.cwl
new file mode 100644 (file)
index 0000000..17132fe
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp:
+    type: File[]
+    inputBinding: {}
+outputs:
+  joined: stdout
+stdout: joined.txt
+baseCommand: cat
diff --git a/doc/user/cwl/federated/federated.cwl b/doc/user/cwl/federated/federated.cwl
new file mode 100644 (file)
index 0000000..5314a76
--- /dev/null
@@ -0,0 +1,87 @@
+#
+# Demonstrate Arvados federation features.  This performs a parallel
+# scatter over some arbitrary number of files and federated clusters,
+# then joins the results.
+#
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  # When using Arvados extensions to CWL, must declare the 'arv' namespace
+  arv: "http://arvados.org/cwl#"
+
+requirements:
+  InlineJavascriptRequirement: {}
+  ScatterFeatureRequirement: {}
+  StepInputExpressionRequirement: {}
+
+  DockerRequirement:
+    # Replace this with your own Docker container
+    dockerPull: arvados/jobs
+
+  # Define a record type so we can conveniently associate the input
+  # file, the cluster on which the file lives, and the project on that
+  # cluster that will own the container requests and intermediate
+  # outputs.
+  SchemaDefRequirement:
+    types:
+      - name: FileOnCluster
+        type: record
+        fields:
+          file: File
+          cluster: string
+          project: string
+
+inputs:
+  # Expect an array of FileOnCluster records (defined above)
+  # as our input.
+  shards:
+    type:
+      type: array
+      items: FileOnCluster
+
+outputs:
+  # Will produce an output file with the results of the distributed
+  # analysis jobs joined together.
+  joined:
+    type: File
+    outputSource: gather-results/joined
+
+steps:
+  distributed-analysis:
+    in:
+      # Take "shards" array as input, we scatter over it below.
+      shard: shards
+
+      # Use an expression to extract the "file" field to assign to the
+      # "inp" parameter of the tool.
+      inp: {valueFrom: $(inputs.shard.file)}
+
+    # Scatter over shards, this means creating a parallel job for each
+    # element in the "shards" array.  Expressions are evaluated for
+    # each element.
+    scatter: shard
+
+    # Specify the cluster target for this job.  This means each
+    # separate scatter job will execute on the cluster that was
+    # specified in the "cluster" field.
+    #
+    # Arvados handles streaming data between clusters, for example,
+    # the Docker image containing the code for a particular tool will
+    # be fetched on demand, as long as it is available somewhere in
+    # the federation.
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.shard.cluster)
+        project_uuid: $(inputs.shard.project)
+
+    out: [out]
+    run: md5sum.cwl
+
+  # Collect the results of the distributed step and join them into a
+  # single output file.  Arvados handles streaming inputs,
+  # intermediate results, and outputs between clusters on demand.
+  gather-results:
+    in:
+      inp: distributed-analysis/out
+    out: [joined]
+    run: cat.cwl
diff --git a/doc/user/cwl/federated/file-on-clsr1.dat b/doc/user/cwl/federated/file-on-clsr1.dat
new file mode 100644 (file)
index 0000000..e79f152
--- /dev/null
@@ -0,0 +1 @@
+file-on-clsr1.dat
diff --git a/doc/user/cwl/federated/file-on-clsr2.dat b/doc/user/cwl/federated/file-on-clsr2.dat
new file mode 100644 (file)
index 0000000..9179dc8
--- /dev/null
@@ -0,0 +1 @@
+file-on-clsr2.dat
diff --git a/doc/user/cwl/federated/file-on-clsr3.dat b/doc/user/cwl/federated/file-on-clsr3.dat
new file mode 100644 (file)
index 0000000..58b5902
--- /dev/null
@@ -0,0 +1 @@
+file-on-clsr3.dat
diff --git a/doc/user/cwl/federated/md5sum.cwl b/doc/user/cwl/federated/md5sum.cwl
new file mode 100644 (file)
index 0000000..9c78dc2
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+inputs:
+  inp:
+    type: File
+outputs:
+  out:
+    type: File
+    outputBinding:
+      glob: out.txt
+stdin: $(inputs.inp.path)
+stdout: out.txt
+arguments: ["md5sum", "-"]
diff --git a/doc/user/cwl/federated/shards.yml b/doc/user/cwl/federated/shards.yml
new file mode 100644 (file)
index 0000000..ed8a83a
--- /dev/null
@@ -0,0 +1,18 @@
+shards:
+  - cluster: clsr1
+    project: clsr1-j7d0g-qxc4jcji7n4lafx
+    file:
+      class: File
+      location: keep:485df2c5cec3207a32f49c42f1cdcca9+61/file-on-clsr1.dat
+
+  - cluster: clsr2
+    project: clsr2-j7d0g-ivdrm1hyym21vkq
+    file:
+      class: File
+      location: keep:ae6e9c3e9bfa52a0122ecb489d8198ff+61/file-on-clsr2.dat
+
+  - cluster: clsr3
+    project: clsr3-j7d0g-e3njz2s53lyb0ka
+    file:
+      class: File
+      location: keep:0b43a0ef9ea592d5d7b299978dfa8643+61/file-on-clsr3.dat
diff --git a/doc/user/examples/crunch-examples.html.textile.liquid b/doc/user/examples/crunch-examples.html.textile.liquid
new file mode 100644 (file)
index 0000000..c93766a
--- /dev/null
@@ -0,0 +1,102 @@
+---
+layout: default
+navsection: userguide
+title: "Scripts provided by Arvados"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+Several crunch scripts are included with Arvados in the "/crunch_scripts directory":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/crunch_scripts. They are intended to provide examples and starting points for writing your own scripts.
+
+h4. bwa-aln
+
+Run the bwa aligner on a set of paired-end fastq files, producing a BAM file for each pair. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/bwa-aln
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|bwa_tbz|Collection with the bwa source distribution.|@8b6e2c4916133e1d859c9e812861ce13+70@|
+|samtools_tgz|Collection with the samtools source distribution.|@c777e23cf13e5d5906abfdc08d84bfdb+74@|
+|input|Collection with fastq reads (pairs of *_1.fastq.gz and *_2.fastq.gz).|@d0136bc494c21f79fc1b6a390561e6cb+2778@|
+</div>
+
+h4. bwa-index
+
+Generate an index of a fasta reference genome suitable for use by bwa-aln. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/bwa-index
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|bwa_tbz|Collection with the bwa source distribution.|@8b6e2c4916133e1d859c9e812861ce13+70@|
+|input|Collection with reference data (*.fasta.gz, *.fasta.fai.gz, *.dict.gz).|@c361dbf46ee3397b0958802b346e9b5a+925@|
+</div>
+
+h4. picard-gatk2-prep
+
+Using the FixMateInformation, SortSam, ReorderSam, AddOrReplaceReadGroups, and BuildBamIndex modules from picard, prepare a BAM file for use with the GATK2 tools. Additionally, run picard's CollectAlignmentSummaryMetrics module to produce a @*.casm.tsv@ statistics file for each BAM file. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/picard-gatk2-prep
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing aligned bam files.||
+|picard_zip|Collection with the picard binary distribution.|@687f74675c6a0e925dec619cc2bec25f+77@|
+|reference|Collection with reference data (*.fasta.gz, *.fasta.fai.gz, *.dict.gz).|@c361dbf46ee3397b0958802b346e9b5a+925@|
+</div>
+
+h4. GATK2-realign
+
+Run GATK's RealignerTargetCreator and IndelRealigner modules on a set of BAM files. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-realign
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing aligned bam files.||
+|picard_zip|Collection with the picard binary distribution.|@687f74675c6a0e925dec619cc2bec25f+77@|
+|gatk_tbz|Collection with the GATK2 binary distribution.|@7e0a277d6d2353678a11f56bab3b13f2+87@|
+|gatk_bundle|Collection with the GATK data bundle.|@d237a90bae3870b3b033aea1e99de4a9+10820@|
+|known_sites|List of files in the data bundle to use as GATK @-known@ arguments. Optional. |@["dbsnp_137.b37.vcf","Mills_and_1000G_gold_standard.indels.b37.vcf"]@ (this is the default value)|
+|regions|Collection with .bed files indicating sequencing target regions. Optional.||
+|region_padding|Corresponds to GATK @--interval_padding@ argument. Required if a regions parameter is given.|10|
+</div>
+
+h4. GATK2-bqsr
+
+Run GATK's BaseQualityScoreRecalibration module on a set of BAM files. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-bqsr
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing bam files.||
+|gatk_tbz|Collection with the GATK2 binary distribution.|@7e0a277d6d2353678a11f56bab3b13f2+87@|
+|gatk_bundle|Collection with the GATK data bundle.|@d237a90bae3870b3b033aea1e99de4a9+10820@|
+</div>
+
+h4. GATK2-merge-call
+
+Merge a set of BAM files using picard, and run GATK's UnifiedGenotyper module on the merged set to produce a VCF file. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-merge-call
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing bam files.||
+|picard_zip|Collection with the picard binary distribution.|@687f74675c6a0e925dec619cc2bec25f+77@|
+|gatk_tbz|Collection with the GATK2 binary distribution.|@7e0a277d6d2353678a11f56bab3b13f2+87@|
+|gatk_bundle|Collection with the GATK data bundle.|@d237a90bae3870b3b033aea1e99de4a9+10820@|
+|regions|Collection with .bed files indicating sequencing target regions. Optional.||
+|region_padding|Corresponds to GATK @--interval_padding@ argument. Required if a regions parameter is given.|10|
+</div>
+
+h4. file-select
+
+Pass through the named files from input to output collection, and ignore the rest. "View source.":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/file-select
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|names|List of filenames to include in the output.|@["human_g1k_v37.fasta.gz","human_g1k_v37.fasta.fai.gz"]@|
+</div>
diff --git a/doc/user/getting_started/check-environment.html.textile.liquid b/doc/user/getting_started/check-environment.html.textile.liquid
new file mode 100644 (file)
index 0000000..b707891
--- /dev/null
@@ -0,0 +1,45 @@
+---
+layout: default
+navsection: userguide
+title: "Checking your environment"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+First, log into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.
+
+Check that you are able to access the Arvados API server using @arv user current@.  If it is able to access the API server, it will print out information about your account:
+
+<notextile>
+<pre><code>$ <span class="userinput">arv user current</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/users/qr1hi-xioed-9z2p3pn12yqdaem",
+ "kind":"arvados#user",
+ "etag":"8u0xwb9f3otb2xx9hto4wyo03",
+ "uuid":"qr1hi-tpzed-92d3kxnimy3d4e8",
+ "owner_uuid":"qr1hi-tpqed-23iddeohxta2r59",
+ "created_at":"2013-12-02T17:05:47Z",
+ "modified_by_client_uuid":"qr1hi-xxfg8-owxa2oa2s33jyej",
+ "modified_by_user_uuid":"qr1hi-tpqed-23iddeohxta2r59",
+ "modified_at":"2013-12-02T17:07:08Z",
+ "updated_at":"2013-12-05T19:51:08Z",
+ "email":"you@example.com",
+ "full_name":"Example User",
+ "first_name":"Example",
+ "last_name":"User",
+ "identity_url":"https://www.google.com/accounts/o8/id?id=AItOawnhlZr-pQ_Ic2f2W22XaO02oL3avJ322k1",
+ "is_active": true,
+ "is_admin": false,
+ "prefs":{}
+}
+</code></pre>
+</notextile>
+
+However, if you receive the following message:
+
+bc. ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables
+
+follow the instructions for "getting an API token,":{{site.baseurl}}/user/reference/api-tokens.html and try @arv user current@ again.
diff --git a/doc/user/getting_started/community.html.textile.liquid b/doc/user/getting_started/community.html.textile.liquid
new file mode 100644 (file)
index 0000000..40c67ad
--- /dev/null
@@ -0,0 +1,26 @@
+---
+layout: default
+navsection: userguide
+title: Arvados Community and Getting Help
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2. On the web
+
+The Arvados Free Sofware project page is located at "https://arvados.org":https://arvados.org .  The "Arvados Wiki":https://dev.arvados.org/projects/arvados/wiki is a collaborative site for documenting Arvados and provides an overview of the Arvados Platform and Components.  The "Arvados blog":https://dev.arvados.org/projects/arvados/blogs posts articles of interest about Arvados.
+
+h2. Mailing lists
+
+The "Arvados user mailing list":http://lists.arvados.org/mailman/listinfo/arvados is a forum for general discussion, questions, and news about Arvados development.  The "Arvados developer mailing list":http://lists.arvados.org/mailman/listinfo/arvados-dev is a forum for more technical discussion, intended for developers and contributers to Arvados.
+
+h2. Chat
+
+The "curoverse/arvados channel":https://gitter.im/curoverse/arvados channel at "gitter.im":https://gitter.im is available for live discussion and support.
+
+h2. Bug tracking
+
+If you think you have found a bug, or would like to make a feature request, check the "Arvados issue tracker":https://dev.arvados.org/projects/arvados/issues to see if has already been reported or "add a new issue.":https://dev.arvados.org/projects/arvados/issues/new
diff --git a/doc/user/getting_started/ssh-access-unix.html.textile.liquid b/doc/user/getting_started/ssh-access-unix.html.textile.liquid
new file mode 100644 (file)
index 0000000..284d0a1
--- /dev/null
@@ -0,0 +1,119 @@
+---
+layout: default
+navsection: userguide
+title: Accessing an Arvados VM with SSH - Unix Environments
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This document is for accessing an Arvados VM using SSH keys in Unix environments (Linux, OS X, Cygwin). If you would like to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Windows environment, please visit the "Accessing an Arvados VM with SSH - Windows Environments":ssh-access-windows.html page.
+
+{% include 'ssh_intro' %}
+
+h1(#gettingkey). Getting your SSH key
+
+h3(#unix). Generate a key using ssh-keygen
+
+Start by opening a terminal window.  Check if you have an existing public key:
+
+notextile. <pre><code>$ <span class="userinput">ls ~/.ssh/id_rsa.pub</span></code></pre>
+
+If the file @id_rsa.pub@ exists, then you may use your existing key.  Copy the contents of @~/.ssh/id_rsa.pub@ onto the clipboard (this is your public key).  You can skip the rest of this section and proceed by "adding your key to the Arvados Workbench.":#workbench
+
+If there is no file @~/.ssh/id_rsa.pub@, you must generate a new key.  Use @ssh-keygen@ to do this:
+
+<notextile>
+<pre><code>$ <span class="userinput">ssh-keygen -t rsa -C "you@example.com"</span>
+Generating public/private rsa key pair.
+Enter file in which to save the key (/home/example/.ssh/id_rsa):
+Enter passphrase (empty for no passphrase):
+Enter same passphrase again:
+</code></pre>
+</notextile>
+
+* @-t@ specifies the key type (must be "rsa")
+* @-C@ specifies a comment (to remember which account the key is associated with)
+
+We strongly recommend that you protect your key with a passphrase.  This means that when the key is used, you will be required to enter the passphrase.  However, unlike logging into remote system using a password, the passphrase is never sent over the network, it is only used to decrypt your private key.
+
+Display the contents of @~/.ssh/id_rsa.pub@ (this is your public key) using @cat@ and then copy it onto the clipboard:
+
+<notextile>
+<pre><code>$ <span class="userinput">cat ~/.ssh/id_rsa.pub</span>
+ssh-rsa AAAAB3NzaC1ycEDoNotUseExampleKeyDoNotUseExampleKeyDoNotUseExampleKeyDoNotUse9lmzkpBq983bQradKGT3LuKda9QOGe8MatI6wzSrJLSGhHm3hk6D8OWWUG4SneuCtKIk2bH0pgBj1G29+uzDIez90WzfCTZKbz4RcVQmPkowSSUAQDwb0ffwvRDhCgcJ1loT1wQAJzqJmljQ7xEYaCOIMqnfYE0lX7B3MSvCV6Ie2rWL33YecLp48LVtqiCOZU4XRyO8RSDFRFLVW+mjkLirwtDHZCRtORScaIEN0jw51p+T+9X5iA9QH/Mn+xlgk7fCgH+JtpBj808N/Qds2Gpff+Kb6ulUrVVfMK6L you@example.com
+</code></pre>
+</notextile>
+
+Now you can set up @ssh-agent@ (next) or proceed with "adding your key to the Arvados Workbench.":#workbench
+
+h3. Set up ssh-agent (recommended)
+
+If you find you are entering your passphrase frequently, you can use @ssh-agent@ to manage your credentials.  Use @ssh-add -l@ to test if you already have ssh-agent running:
+
+notextile. <pre><code>$ <span class="userinput">ssh-add -l</span></code></pre>
+
+If you get the error "Could not open a connection to your authentication agent" you will need to run @ssh-agent@ with the following command:
+
+notextile. <pre><code>$ <span class="userinput">eval $(ssh-agent -s)</span></code></pre>
+
+@ssh-agent -s@ prints out values for environment variables SSH_AUTH_SOCK and SSH_AGENT_PID and then runs in the background.  Using "eval" on the output as shown here causes those variables to be set in the current shell environment so that subsequent calls to SSH can discover how to access the agent process.
+
+After running @ssh-agent@, or if @ssh-add -l@ prints "The agent has no identities", add your key using the following command.  The passphrase to decrypt the key is the same used to protect the key when it was created with @ssh-keygen@:
+
+<notextile>
+<pre><code>$ <span class="userinput">ssh-add</span>
+Enter passphrase for /home/example/.ssh/id_rsa:
+Identity added: /home/example/.ssh/id_rsa (/home/example/.ssh/id_rsa)
+</code></pre>
+</notextile>
+
+When everything is set up, @ssh-add -l@ should yield output that looks something like this:
+
+<notextile>
+<pre><code>$ <span class="userinput">ssh-add -l</span>
+2048 eb:fa:15:f2:44:26:95:58:37:37:f4:aa:ff:ee:c2:85 you@example.com (RSA)
+</code></pre>
+</notextile>
+
+{% include 'ssh_addkey' %}
+
+h3. Connecting to the virtual machine
+
+Use the following command to connect to the _shell_ VM instance as _you_.  Replace *<code>you@shell</code>* at the end of the following command with your *login* and *hostname* from Workbench:
+
+notextile. <pre><code>$ <span class="userinput">ssh -o "ProxyCommand ssh -p2222 turnout@switchyard.{{ site.arvados_api_host }} -x -a <b>shell</b>" -x <b>you@shell</b></span></code></pre>
+
+This command does several things at once. You usually cannot log in directly to virtual machines over the public Internet.  Instead, you log into a "switchyard" server and then tell the switchyard which virtual machine you want to connect to.
+
+* @-o "ProxyCommand ..."@ configures SSH to run the specified command to create a proxy and route your connection through it.
+* @-p2222@ specifies that the switchyard is running on non-standard port 2222.
+* <code>turnout@switchyard.{{ site.arvados_api_host }}</code> specifies the user (@turnout@) and hostname (@switchyard.{{ site.arvados_api_host }}@) of the switchyard server that will proxy our connection to the VM.
+* @-x@ tells SSH not to forward your X session to the switchyard.
+* @-a@ tells SSH not to forward your ssh-agent credentials to the switchyard.
+* *@shell@* is the name of the VM that we want to connect to.  This is sent to the switchyard server as if it were an SSH command, and the switchyard server connects to the VM on our behalf.
+* After the ProxyCommand section, we repeat @-x@ to disable X session forwarding to the virtual machine.
+* Finally, *<code>you@shell</code>* specifies your login name and repeats the hostname of the VM.  The username can be found in the *logins* column in the VMs Workbench page, discussed in the previous section.
+
+You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
+
+h3. Configuration (recommended)
+
+The command line above is cumbersome, but you can configure SSH to remember many of these settings.  Add this text to the file @.ssh/config@ in your home directory (create a new file if @.ssh/config@ doesn't exist):
+
+<notextile>
+<pre><code class="userinput">Host *.{{ site.arvados_cluster_uuid }}
+  TCPKeepAlive yes
+  ServerAliveInterval 60
+  ProxyCommand ssh -p2222 turnout@switchyard.{{ site.arvados_api_host }} -x -a $SSH_PROXY_FLAGS %h
+  User <b>you</b>
+</code></pre>
+</notextile>
+
+This will recognize any host ending in ".{{ site.arvados_cluster_uuid }}" and automatically apply the proxy, user and forwarding settings from the configuration file, allowing you to log in with a much simpler command:
+
+notextile. <pre><code>$ <span class="userinput">ssh <b>shell</b>.{{ site.arvados_cluster_uuid }}</span></code></pre>
+
+You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
diff --git a/doc/user/getting_started/ssh-access-windows.html.textile.liquid b/doc/user/getting_started/ssh-access-windows.html.textile.liquid
new file mode 100644 (file)
index 0000000..0406e7c
--- /dev/null
@@ -0,0 +1,81 @@
+---
+layout: default
+navsection: userguide
+title: Accessing an Arvados VM with SSH - Windows Environments
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This document is for accessing an Arvados VM using SSH keys in Windows environments. If you would like to use to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Unix environment (Linux, OS X, Cygwin), please visit the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.
+
+{% include 'ssh_intro' %}
+
+h1(#gettingkey). Getting your SSH key
+
+(Note: if you are using the SSH client that comes with "Cygwin":http://cygwin.com, please use instructions found in the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.)
+
+We will be using PuTTY to connect to Arvados. "PuTTY":http://www.chiark.greenend.org.uk/~sgtatham/putty/ is a free (MIT-licensed) Win32 Telnet and SSH client. PuTTY includes all the tools a Windows user needs to create private keys and make SSH connections to your virtual machines in the Arvados Cloud.
+
+You can "download PuTTY from its Web site":http://www.chiark.greenend.org.uk/~sgtatham/putty/.  Note that you should download the installer or .zip file with all of the PuTTY tools (PuTTYtel is not required).
+
+If you downloaded the zip file, extract it to the location you wish to install the PuTTY applications. This document assumes that you installed PuTTY in the default directory under @C:\Program Files\@ or @C:\Program Files (x86)\@ (if you are using a 64 bit operating system).
+
+h3. Step 1 - Adding PuTTY to the PATH
+
+# After downloading PuTTY and installing it, you should have a PuTTY folder in @C:\Program Files\@ or @C:\Program Files (x86)\@ (if you are using a 64 bit operating system).
+# Open the Control Panel.
+# Select _Advanced System Settings_, and choose _Environment Variables_.
+If you are using newer systems like Windows 10, you may use the following to open _Advanced System Settings_. Open Control Panel. Click on _System and Security_. Click on _System_. Click on _Advanced system settings_ and choose _Environment Variables..._
+# Under system variables, find and edit @PATH@.
+# If you installed PuTTY in @C:\Program Files\PuTTY\@, add the following to the end of PATH:
+<code>;C:\Program Files\PuTTY</code>
+If you installed PuTTY in @C:\Program Files (x86)\PuTTY\@, add the following to the end of PATH:
+<code>;C:\Program Files (x86)\PuTTY</code>
+# Click through the OKs to close all the dialogs you’ve opened.
+
+h3. Step 2 - Creating a Public Key
+
+# Start PuTTYgen from the Start Menu or the folder where it was installed.
+# At the bottom of the window, make sure the ‘Number of bits in a generated key’ field is set to 4096.
+# Click Generate and follow the instructions to generate a key.
+# Click the _Save public key_ button.
+# Click the _Save private key_ button (we recommend using a strong passphrase).
+# Select the text of the Public Key and copy it to the clipboard.
+
+h3. Step 3 - Set up Pageant
+
+Pageant is a PuTTY utility that manages your private keys so is not necessary to enter your private key passphrase every time you make a new SSH connection.
+
+# Start Pageant from the Start Menu or the folder where it was installed.
+# Pageant will now be running in the system tray. Click the Pageant icon to configure.
+# Choose _Add Key_ and add the private key which you created in the previous step.
+
+{% include 'ssh_addkey' %}
+
+h3. Initial configuration
+
+# Open PuTTY from the Start Menu.
+# On the Session screen set the Host Name (or IP address) to “shell”, which is the hostname listed in the _Virtual Machines_ page.
+# On the Session screen set the Port to “22”.
+# On the Connection %(rarr)&rarr;% Data screen set the Auto-login username to the username listed in the *Login name* column on the Arvados Workbench Virtual machines_ page.
+# On the Connection %(rarr)&rarr;% Proxy screen set the Proxy Type to “Local”.
+# On the Connection %(rarr)&rarr;% Proxy screen in the “Telnet command, or local proxy command” box enter:
+<code>plink -P 2222 turnout@switchyard.{{ site.arvados_api_host }} %host</code>
+Make sure there is no newline at the end of the text entry.
+# Return to the Session screen. In the Saved Sessions box, enter a name for this configuration and click Save.
+
+_Note: We recommend you do not delete the “Default” Saved Session._
+
+h3. Connecting to the VM
+
+# Open PuTTY from the Start Menu.
+# Click on the Saved Session name you created in the previous section.
+# Click Load to load those saved session settings.
+# Click Open to open the SSH window at the command prompt. You will now be logged into your virtual machine.
+
+_Note_: If you see a hung PuTTY terminal window with no further action: open a new _Command Prompt_ window using the Windows -> Start menu and type <code>plink -P 2222 turnout@switchyard.{{ site.arvados_api_host }} shell</code> in it. Please make sure to replace *shell* with the hostname listed in the _Virtual Machines_ page. Hit enter and type _y_ when prompted to cache the session state. Go back and start PuTTY session using the start menu button.
+
+You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
diff --git a/doc/user/getting_started/vm-login-with-webshell.html.textile.liquid b/doc/user/getting_started/vm-login-with-webshell.html.textile.liquid
new file mode 100644 (file)
index 0000000..551002e
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: userguide
+title: Accessing an Arvados VM with Webshell
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This document describes how to access an Arvados VM with Webshell from Workbench.
+
+h2(#webshell). Access VM using webshell
+
+Webshell gives you access to an arvados virtual machine from your browser with no additional setup.
+
+In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Virtual machines* to see the list of virtual machines you can access.  If you do not have access to any virtual machines, please click on <span class="btn btn-sm btn-primary">Send request for shell access</span> or send an email to "support@curoverse.com":mailto:support@curoverse.com.
+
+Each row in the Virtual Machines panel lists the hostname of the VM, along with a <code>Log in as *you*</code> button under the column "Web shell". Clicking on this button will open up a webshell terminal for you in a new browser tab and log you in.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/vm-access-with-webshell.png!
+
+You are now ready to work in your Arvados VM.
diff --git a/doc/user/getting_started/workbench.html.textile.liquid b/doc/user/getting_started/workbench.html.textile.liquid
new file mode 100644 (file)
index 0000000..fc70422
--- /dev/null
@@ -0,0 +1,22 @@
+---
+layout: default
+navsection: userguide
+title: Accessing Arvados Workbench
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+If you are using the default Arvados instance for this guide, you can Access Arvados Workbench using this link:
+
+<a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}/</a>
+
+(If you are using a different Arvados instance than the default for this guide, replace *{{ site.arvados_workbench_host }}* with your private instance in all of the examples in this guide.)
+
+You may be asked to log in using a Google account.  Arvados uses only your name and email address from Google services for identification, and will never access any personal information.  If you are accessing Arvados for the first time, the Workbench may indicate your account status is *New / inactive*.  If this is the case, contact the administrator of the Arvados instance to request activation of your account.
+
+Once your account is active, logging in to the Workbench will present you with the Dashboard. This gives a summary of your projects and recent activity in the Arvados instance.  "You are now ready to run your first pipeline.":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/workbench-dashboard.png!
diff --git a/doc/user/index.html.textile.liquid b/doc/user/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..202e297
--- /dev/null
@@ -0,0 +1,38 @@
+---
+layout: default
+navsection: userguide
+title: Welcome to Arvados&trade;!
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This guide provides a reference for using Arvados to solve scientific big data problems, including:
+
+* Robust storage of very large files, such as whole genome sequences, using the "Arvados Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html content-addressable cluster file system.
+* Running compute-intensive scientific analysis pipelines, such as genomic alignment and variant calls using the "Arvados Crunch":{{site.baseurl}}/user/tutorials/intro-crunch.html cluster compute engine.
+* Accessing, organizing, and sharing data, workflows and results using the "Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html web application.
+* Running an analysis using multiple clusters (HPC, cloud, or hybrid) with "Federated Multi-Cluster Workflows":{{site.baseurl}}/user/cwl/federated-workflows.html .
+
+The examples in this guide use the public Arvados instance located at <a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}</a>.  If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
+
+h2. Typographic conventions
+
+This manual uses the following typographic conventions:
+
+<notextile>
+<ul>
+<li>Code blocks which are set aside from the text indicate user input to the system.  Commands that should be entered into a Unix shell are indicated by the directory where you should  enter the command ('~' indicates your home directory) followed by '$', followed by the highlighted <span class="userinput">command to enter</span> (do not enter the '$'), and possibly followed by example command output in black.  For example, the following block indicates that you should type <code>ls foo.*</code> while in your home directory and the expected output will be "foo.input" and "foo.output".
+<pre><code>~$ <span class="userinput">ls foo.*</span>
+foo.input foo.output
+</code></pre>
+</li>
+
+<li>Code blocks inline with text emphasize specific <code>programs</code>, <code>files</code>, or <code>options</code> that are being discussed.</li>
+<li>Bold text emphasizes <b>specific items</b> to review on Arvados Workbench pages.</li>
+<li>A sequence of steps separated by right arrows (<span class="rarr">&rarr;</span>) indicate a path the user should follow through the Arvados Workbench.  The steps indicate a menu, hyperlink, column name, field name, or other label on the page that guide the user where to look or click.
+</li>
+</ul>
+</notextile>
diff --git a/doc/user/reference/api-tokens.html.textile.liquid b/doc/user/reference/api-tokens.html.textile.liquid
new file mode 100644 (file)
index 0000000..d5172f0
--- /dev/null
@@ -0,0 +1,49 @@
+---
+layout: default
+navsection: userguide
+title: "Getting an API token"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados API token is a secret key that enables the @arv@ command line client to access Arvados with the proper permissions.
+
+Access the Arvados Workbench using this link: "{{site.arvados_workbench_host}}/":{{site.arvados_workbench_host}}/  (Replace the hostname portion with the hostname of your local Arvados instance if necessary.)
+
+Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or SSH (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
+
+In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Current token*, which lists your current token and instructions to set up your environment.
+
+h2. Setting environment variables
+
+The *Current token* page, accessed using the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu, includes a command you may copy and paste directly into the shell.  It will look something as the following.
+
+bc. HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
+export ARVADOS_API_TOKEN=2jv9346o396exampledonotuseexampledonotuseexes7j1ld
+export ARVADOS_API_HOST={{ site.arvados_api_host }}
+unset ARVADOS_API_HOST_INSECURE
+
+* The @export@ command puts a local shell variable into the environment that will be inherited by child processes such as the @arv@ client.
+
+h2. settings.conf
+
+Arvados tools will also look for the authentication information in @~/.config/arvados/settings.conf@. If you have already put the variables into the environment following the instructions above, you can use these commands to create an Arvados configuration file:
+
+<notextile>
+<pre><code>$ <span class="userinput">echo "ARVADOS_API_HOST=$ARVADOS_API_HOST" > ~/.config/arvados/settings.conf</span>
+$ <span class="userinput">echo "ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN" >> ~/.config/arvados/settings.conf</span>
+</code></pre>
+</notextile>
+
+h2. .bashrc
+
+Alternately, you may add the declarations of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system on which you intend to use the Arvados client.  If you have already put the variables into the environment following the instructions above, you can use these commands to append the environment variables to your @~/.bashrc@:
+
+<notextile>
+<pre><code>$ <span class="userinput">echo "export ARVADOS_API_HOST=$ARVADOS_API_HOST" >> ~/.bashrc</span>
+$ <span class="userinput">echo "export ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN" >> ~/.bashrc</span>
+</code></pre>
+</notextile>
diff --git a/doc/user/reference/cookbook.html.textile.liquid b/doc/user/reference/cookbook.html.textile.liquid
new file mode 100644 (file)
index 0000000..b2411f3
--- /dev/null
@@ -0,0 +1,12 @@
+---
+layout: default
+navsection: userguide
+title: "Arvados SDK Examples"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Code snippets for perform various tasks with the API are "documented in the SDK section":{{site.baseurl}}/sdk/python/cookbook.html .
diff --git a/doc/user/reference/job-pipeline-ref.html.textile.liquid b/doc/user/reference/job-pipeline-ref.html.textile.liquid
new file mode 100644 (file)
index 0000000..f80cec9
--- /dev/null
@@ -0,0 +1,14 @@
+---
+layout: default
+navsection: userguide
+title: "Pipeline template reference"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+Pipeline template options are described on the "pipeline template schema page.":{{site.baseurl}}/api/methods/pipeline_templates.html
diff --git a/doc/user/topics/arv-copy.html.textile.liquid b/doc/user/topics/arv-copy.html.textile.liquid
new file mode 100644 (file)
index 0000000..f1adfe2
--- /dev/null
@@ -0,0 +1,111 @@
+---
+layout: default
+navsection: userguide
+title: "Using arv-copy"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'crunch1only_begin' %}
+On those sites, the "copy a pipeline template" feature described below is not available. However, "copy a workflow" feature is not yet implemented.
+{% include 'crunch1only_end' %}
+
+This tutorial describes how to copy Arvados objects from one cluster to another by using @arv-copy@.
+
+{% include 'tutorial_expectations' %}
+
+h2. arv-copy
+
+@arv-copy@ allows users to copy collections and pipeline templates from one cluster to another. By default, @arv-copy@ will recursively go through a template and copy all dependencies associated with the object.
+
+For example, let's copy from the <a href="https://playground.arvados.org/">Arvados playground</a>, also known as *qr1hi*, to *dst_cluster*. The names *qr1hi* and *dst_cluster* are interchangable with any cluster name. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *qr1hi*-4zz18-tci4vn4fa95w0zx, the cluster name is qr1hi.
+
+In order to communicate with both clusters, you must create custom configuration files for each cluster. In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Current token*. Copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ in both of your clusters. Then, create two configuration files, one for each cluster. The names of the files must have the format of *uuid_prefix.conf*. In our example, let's make two files, one for *qr1hi* and one for *dst_cluster*. From your *Current token* page in *qr1hi* and *dst_cluster*, copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/api-token-host.png!
+
+Copy your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ into the config files as shown below in the shell account from which you are executing the commands. For example, the default shell you may have access to is shell.qr1hi. You can add these files in ~/.config/arvados/ in the qr1hi shell terminal.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd ~/.config/arvados</span>
+~$ <span class="userinput">echo "ARVADOS_API_HOST=qr1hi.arvadosapi.com" >> qr1hi.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_TOKEN=123456789abcdefghijkl" >> qr1hi.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_HOST=dst_cluster.arvadosapi.com" >> dst_cluster.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_TOKEN=987654321lkjihgfedcba" >> dst_cluster.conf</span>
+</code></pre>
+</notextile>
+
+Now you're ready to copy between *qr1hi* and *dst_cluster*!
+
+h3. How to copy a collection
+
+First, select the uuid of the collection you want to copy from the source cluster. The uuid can be found in the collection display page in the collection summary area (top left box), or from the URL bar (the part after @collections/...@)
+
+Now copy the collection from *qr1hi* to *dst_cluster*. We will use the uuid @qr1hi-4zz18-tci4vn4fa95w0zx@ as an example. You can find this collection in the <a href="https://playground.arvados.org/collections/qr1hi-4zz18-tci4vn4fa95w0zx">lobSTR v.3 project on playground.arvados.org</a>.
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster qr1hi-4zz18-tci4vn4fa95w0zx</span>
+qr1hi-4zz18-tci4vn4fa95w0zx: 6.1M / 6.1M 100.0%
+arvados.arv-copy[1234] INFO: Success: created copy with uuid dst_cluster-4zz18-8765943210cdbae
+</code></pre>
+</notextile>
+
+The output of arv-copy displays the uuid of the collection generated in the destination cluster. By default, the output is placed in your home project in the destination cluster. If you want to place your collection in a pre-created project, you can specify the project you want it to be in using the tag @--project-uuid@ followed by the project uuid.
+
+For example, this will copy the collection to project dst_cluster-j7d0g-a894213ukjhal12 in the destination cluster.
+
+<notextile> <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --project-uuid dst_cluster-j7d0g-a894213ukjhal12 qr1hi-4zz18-tci4vn4fa95w0zx</span>
+</code></pre>
+</notextile>
+
+h3. How to copy a pipeline template
+
+{% include 'arv_copy_expectations' %}
+
+We will use the uuid @qr1hi-p5p6p-9pkaxt6qjnkxhhu@ as an example pipeline template.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --dst-git-repo $USER/tutorial qr1hi-p5p6p-9pkaxt6qjnkxhhu</span>
+To git@git.dst_cluster.arvadosapi.com:$USER/tutorial.git
+ * [new branch] git_git_qr1hi_arvadosapi_com_arvados_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d -> git_git_qr1hi_arvadosapi_com_arvados_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d
+arvados.arv-copy[19694] INFO: Success: created copy with uuid dst_cluster-p5p6p-rym2h5ub9m8ofwj
+</code></pre>
+</notextile>
+
+New branches in the destination git repo will be created for each branch used in the pipeline template. For example, if your source branch was named ac21f0d45a76294aaca0c0c0fdf06eb72d03368d, your new branch will be named @git_git_qr1hi_arvadosapi_com_reponame_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d@.
+
+By default, if you copy a pipeline template recursively, you will find that the template as well as all the dependencies are in your home project.
+
+If you would like to copy the object without dependencies, you can use the @--no-recursive@ tag.
+
+For example, we can copy the same object using this tag.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --dst-git-repo $USER/tutorial --no-recursive qr1hi-p5p6p-9pkaxt6qjnkxhhu</span>
+</code></pre>
+</notextile>
+
+h3. How to copy a workflow
+
+We will use the uuid @zzzzz-7fd4e-sampleworkflow1@ as an example workflow.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src zzzzz --dst dst_cluster --dst-git-repo $USER/tutorial zzzzz-7fd4e-sampleworkflow1</span>
+zzzzz-4zz18-jidprdejysravcr: 1143M / 1143M 100.0%
+2017-01-04 04:11:58 arvados.arv-copy[5906] INFO:
+2017-01-04 04:11:58 arvados.arv-copy[5906] INFO: Success: created copy with uuid dst_cluster-7fd4e-ojtgpne594ubkt7
+</code></pre>
+</notextile>
+
+The name, description, and workflow definition from the original workflow will be used for the destination copy. In addition, any *locations* and *docker images* found in the src workflow definition will also be copied to the destination recursively.
+
+If you would like to copy the object without dependencies, you can use the @--no-recursive@ flag.
+
+For example, we can copy the same object non-recursively using the following:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src zzzzz --dst dst_cluster --dst-git-repo $USER/tutorial --no-recursive zzzzz-7fd4e-sampleworkflow1</span>
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/arv-docker.html.textile.liquid b/doc/user/topics/arv-docker.html.textile.liquid
new file mode 100644 (file)
index 0000000..f34c21a
--- /dev/null
@@ -0,0 +1,215 @@
+---
+layout: default
+navsection: userguide
+title: "Customizing Crunch environment using Docker"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to customize the runtime environment (e.g., the programs, libraries, and other dependencies needed to run a job) that a crunch script will be run in using "Docker.":https://www.docker.com/  Docker is a tool for building and running containers that isolate applications from other applications running on the same node.  For detailed information about Docker, see the "Docker User Guide.":https://docs.docker.com/userguide/
+
+This page will demonstrate how to:
+
+# Fetch the arvados/jobs Docker image
+# Manually install additional software into the container
+# Create a new custom image
+# Upload that image to Arvados for use by Crunch jobs
+# Share your image with others
+
+{% include 'tutorial_expectations_workstation' %}
+
+You also need ensure that "Docker is installed,":https://docs.docker.com/installation/ the Docker daemon is running, and you have permission to access Docker.  You can test this by running @docker version@.  If you receive a permission denied error, your user account may need to be added to the @docker@ group.  If you have root access, you can add yourself to the @docker@ group using @$ sudo addgroup $USER docker@ then log out and log back in again; otherwise consult your local sysadmin.
+
+h2. Fetch a starting image
+
+The easiest way to begin is to start from the "arvados/jobs" image which already has the Arvados SDK installed along with other configuration required for use with Crunch.
+
+Download the latest "arvados/jobs" image from the Docker registry:
+
+<notextile>
+<pre><code>$ <span class="userinput">docker pull arvados/jobs:latest</span>
+Pulling repository arvados/jobs
+3132168f2acb: Download complete
+a42b7f2c59b6: Download complete
+e5afdf26a7ae: Download complete
+5cae48636278: Download complete
+7a4f91b70558: Download complete
+a04a275c1fd6: Download complete
+c433ff206a22: Download complete
+b2e539b45f96: Download complete
+073b2581c6be: Download complete
+593915af19dc: Download complete
+32260b35005e: Download complete
+6e5b860c1cde: Download complete
+95f0bfb43d4d: Download complete
+c7fd77eedb96: Download complete
+0d7685aafd00: Download complete
+</code></pre>
+</notextile>
+
+h2. Install new packages
+
+Next, enter the container using @docker run@, providing the arvados/jobs image and the program you want to run (in this case the bash shell).
+
+<notextile>
+<pre><code>$ <span class="userinput">docker run --interactive --tty --user root arvados/jobs /bin/bash</span>
+root@fbf1d0f529d5:/#
+</code></pre>
+</notextile>
+
+Next, update the package list using @apt-get update@.
+
+<notextile>
+<pre><code>root@fbf1d0f529d5:/# apt-get update
+Hit http://security.debian.org jessie/updates InRelease
+Ign http://httpredir.debian.org jessie InRelease
+Ign http://apt.arvados.org jessie InRelease
+Hit http://apt.arvados.org jessie Release.gpg
+Get:1 http://security.debian.org jessie/updates/main amd64 Packages [431 kB]
+Hit http://apt.arvados.org jessie Release
+Hit http://httpredir.debian.org jessie-updates InRelease
+Get:2 http://apt.arvados.org jessie/main amd64 Packages [257 kB]
+Get:3 http://httpredir.debian.org jessie-updates/main amd64 Packages [17.6 kB]
+Hit http://httpredir.debian.org jessie Release.gpg
+Hit http://httpredir.debian.org jessie Release
+Get:4 http://httpredir.debian.org jessie/main amd64 Packages [9049 kB]
+Fetched 9755 kB in 2s (3894 kB/s)
+Reading package lists... Done
+</code></pre>
+</notextile>
+
+In this example, we will install the "R" statistical language Debian package "r-base-core".  Use @apt-get install@:
+
+<notextile>
+<pre><code>root@fbf1d0f529d5:/# <span class="userinput">apt-get install r-base-core</span>
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following extra packages will be installed:
+  [...]
+  libxxf86vm1 make patch r-base-core r-base-dev r-cran-boot r-cran-class
+  r-cran-cluster r-cran-codetools r-cran-foreign r-cran-kernsmooth
+  r-cran-lattice r-cran-mass r-cran-matrix r-cran-mgcv r-cran-nlme r-cran-nnet
+  r-cran-rpart r-cran-spatial r-cran-survival r-doc-html r-recommended
+  [...]
+Suggested packages:
+  [...]
+The following NEW packages will be installed:
+  [...]
+  libxxf86vm1 make patch r-base-core r-base-dev r-cran-boot r-cran-class
+  r-cran-cluster r-cran-codetools r-cran-foreign r-cran-kernsmooth
+  r-cran-lattice r-cran-mass r-cran-matrix r-cran-mgcv r-cran-nlme r-cran-nnet
+  r-cran-rpart r-cran-spatial r-cran-survival r-doc-html r-recommended
+  [...]
+0 upgraded, 203 newly installed, 0 to remove and 39 not upgraded.
+Need to get 124 MB of archives.
+After this operation, 334 MB of additional disk space will be used.
+Do you want to continue [Y/n]? y
+[...]
+Get:130 http://httpredir.debian.org/debian/ jessie/main r-cran-cluster amd64 1.15.3-1 [475 kB]
+Get:131 http://httpredir.debian.org/debian/ jessie/main r-base-dev all 3.1.1-1 [4018 B]
+Get:132 http://httpredir.debian.org/debian/ jessie/main r-cran-boot all 1.3-13-1 [571 kB]
+Get:133 http://httpredir.debian.org/debian/ jessie/main r-cran-codetools all 0.2-9-1 [45.7 kB]
+Get:134 http://httpredir.debian.org/debian/ jessie/main r-cran-rpart amd64 4.1-8-1 [862 kB]
+Get:135 http://httpredir.debian.org/debian/ jessie/main r-cran-foreign amd64 0.8.61-1 [213 kB]
+[...]
+Fetched 124 MB in 52s (2380 kB/s)
+debconf: delaying package configuration, since apt-utils is not installed
+[...]
+Unpacking r-base-core (3.1.1-1+b2) ...
+Selecting previously unselected package r-base-dev.
+Preparing to unpack .../r-base-dev_3.1.1-1_all.deb ...
+Unpacking r-base-dev (3.1.1-1) ...
+Selecting previously unselected package r-cran-boot.
+Preparing to unpack .../r-cran-boot_1.3-13-1_all.deb ...
+Unpacking r-cran-boot (1.3-13-1) ...
+Selecting previously unselected package r-cran-mass.
+[...]
+Setting up r-base-core (3.1.1-1+b2) ...
+
+Creating config file /etc/R/Renviron with new version
+Setting up r-base-dev (3.1.1-1) ...
+Setting up r-cran-boot (1.3-13-1) ...
+Setting up r-cran-mass (7.3-34-1) ...
+Setting up r-cran-class (7.3-11-1) ...
+[...]
+</code></pre>
+</notextile>
+
+Now we can verify that "R" is installed:
+
+<notextile>
+<pre><code>root@fbf1d0f529d5:/# <span class="userinput">R</span>
+
+R version 3.1.1 (2014-07-10) -- "Sock it to Me"
+Copyright (C) 2014 The R Foundation for Statistical Computing
+Platform: x86_64-pc-linux-gnu (64-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+>
+</code></pre>
+</notextile>
+
+Note that you are not limited to installing Debian packages.  You may compile programs or libraries from source and install them, edit systemwide configuration files, use other package managers such as @pip@ or @gem@, and perform any other customization necessary to run your program.
+
+h2. Create a new image
+
+We're now ready to create a new Docker image.  First, quit the container, then use @docker commit@ to create a new image from the stopped container.  The container id can be found in the default hostname of the container displayed in the prompt, in this case @fbf1d0f529d5@:
+
+<notextile>
+<pre><code>root@fbf1d0f529d5:/# <span class="userinput">exit</span>
+$ <span class="userinput">docker commit fbf1d0f529d5 arvados/jobs-with-r</span>
+sha256:2818853ff9f9af5d7f77979803baac9c4710790ad2b84c1a754b02728fdff205
+$ <span class="userinput">docker images</span>
+$ docker images |head
+REPOSITORY            TAG                 IMAGE ID            CREATED             SIZE
+arvados/jobs-with-r   latest              2818853ff9f9        9 seconds ago       703.1 MB
+arvados/jobs          latest              12b9f859d48c        4 days ago          362 MB
+</code></pre>
+</notextile>
+
+h2. Upload your image
+
+Finally, we are ready to upload the new Docker image to Arvados.  Use @arv-keepdocker@ with the image repository name to upload the image.  Without arguments, @arv-keepdocker@ will print out the list of Docker images in Arvados that are available to you.
+
+<notextile>
+<pre><code>$ <span class="userinput">arv-keepdocker arvados/jobs-with-r</span>
+703M / 703M 100.0%
+Collection saved as 'Docker image arvados/jobs-with-r:latest 2818853ff9f9'
+qr1hi-4zz18-abcdefghijklmno
+$ <span class="userinput">arv-keepdocker</span>
+REPOSITORY                      TAG         IMAGE ID      COLLECTION                     CREATED
+arvados/jobs-with-r             latest      2818853ff9f9  qr1hi-4zz18-abcdefghijklmno    Tue Jan 17 20:35:53 2017
+</code></pre>
+</notextile>
+
+You are now able to specify the runtime environment for your program using @DockerRequirement@ in your workflow:
+
+<pre>
+hints:
+  DockerRequirement:
+    dockerPull: arvados/jobs-with-r
+</pre>
+
+h2. Share Docker images
+
+Docker images are subject to normal Arvados permissions.  If wish to share your Docker image with others (or wish to share a pipeline template that uses your Docker image) you will need to use @arv-keepdocker@ with the @--project-uuid@ option to upload the image to a shared project.
+
+<notextile>
+<pre><code>$ <span class="userinput">arv-keepdocker arvados/jobs-with-r --project-uuid qr1hi-j7d0g-xxxxxxxxxxxxxxx</span>
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/arv-run.html.textile.liquid b/doc/user/topics/arv-run.html.textile.liquid
new file mode 100644 (file)
index 0000000..9752ca7
--- /dev/null
@@ -0,0 +1,163 @@
+---
+layout: default
+navsection: userguide
+title: "Using arv-run"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'crunch1only_begin' %}
+On those sites, the features described here are not yet implemented.
+{% include 'crunch1only_end' %}
+
+The @arv-run@ command enables you create Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvados compute nodes.
+
+{% include 'tutorial_expectations' %}
+
+h1. Usage
+
+Using @arv-run@ you can write and test command lines interactively, then insert @arv-run@ at the beginning of the command line to run the command on Arvados.  For example:
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/3229739b505d2b878b62aed09895a55a+142</span>
+$ <span class="userinput">ls *.fastq</span>
+HWI-ST1027_129_D0THKACXX.1_1.fastq  HWI-ST1027_129_D0THKACXX.1_2.fastq
+$ <span class="userinput">grep -H -n ATTGGAGGAAAGATGAGTGAC HWI-ST1027_129_D0THKACXX.1_1.fastq</span>
+HWI-ST1027_129_D0THKACXX.1_1.fastq:14:TCTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCCCAACCTA
+HWI-ST1027_129_D0THKACXX.1_1.fastq:18:AACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCT
+HWI-ST1027_129_D0THKACXX.1_1.fastq:30:ATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCTGTGATACG
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC HWI-ST1027_129_D0THKACXX.1_1.fastq</span>
+Running pipeline qr1hi-d1hrv-mg3bju0u7r6w241
+[...]
+ 0 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq
+ 0 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:14:TCTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCCCAACCTA
+ 0 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:18:AACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCT
+ 0 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:30:ATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCTGTGATACG
+ 0 stderr run-command: completed with exit code 0 (success)
+[...]
+</pre>
+</notextile>
+
+A key feature of @arv-run@ is the ability to introspect the command line to determine which arguments are file inputs, and transform those paths so they are usable inside the Arvados container.  In the above example, @HWI-ST1027_129_D0THKACXX.1_2.fastq@ is transformed into @/keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq@.  @arv-run@ also works together with @arv-mount@ to identify that the file is already part of an Arvados collection.  In this case, it will use the existing collection without any upload step.  If you specify a file that is only available on the local filesystem, @arv-run@ will upload a new collection.
+
+If you find that @arv-run@ is incorrectly rewriting one of your command line arguments, place a backslash @\@ at the beginning of the affected argument to quote it (suppress rewriting).
+
+h2. Parallel tasks
+
+@arv-run@ will parallelize over files listed on the command line after @--@.
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/3229739b505d2b878b62aed09895a55a+142</span>
+$ <span class="userinput">ls *.fastq</span>
+HWI-ST1027_129_D0THKACXX.1_1.fastq  HWI-ST1027_129_D0THKACXX.1_2.fastq
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC -- *.fastq</span>
+Running pipeline qr1hi-d1hrv-mg3bju0u7r6w241
+[...]
+ 0 stderr run-command: parallelizing on input0 with items [u'/keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq', u'/keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq']
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq
+[...]
+ 1 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:14:TCTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCCCAACCTA
+ 1 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:18:AACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCT
+ 1 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:30:ATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCTGTGATACG
+ 1 stderr run-command: completed with exit code 0 (success)
+ 2 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq:34:CTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAG
+ 2 stderr run-command: completed with exit code 0 (success)
+</pre>
+</notextile>
+
+You may specify @--batch-size N@ (or the short form @-bN@) after the @--@ but before listing any files to specify how many files to provide put on the command line for each task.  See "Putting it all together" below for an example.
+
+h2. Redirection
+
+You may use standard input (@<@) and standard output (@>@) redirection.  This will create a separate task for each file listed in standard input.  You are only permitted to supply a single file name for stdout @>@ redirection.  If there are multiple tasks with their output sent to the same file, the output will be collated at the end of the pipeline.
+
+(Note: because the syntax is designed to mimic standard shell syntax, it is necessary to quote the metacharacters @<@, @>@ and @|@ as either @\<@, @\>@ and @\|@ or @'<'@, @'>'@ and @'|'@.)
+
+{% include 'arv_run_redirection' %}
+
+You may use "run-command":run-command.html parameter substitution in the output file name to generate different filenames for each task:
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/3229739b505d2b878b62aed09895a55a+142</span>
+$ <span class="userinput">ls *.fastq</span>
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC \< *.fastq \> '$(task.uuid).txt'</span>
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq > qr1hi-ot0gb-hmmxf2zubfpmhfk.txt
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq > qr1hi-ot0gb-iu2xgy4hkx4mmri.txt
+ 1 stderr run-command: completed with exit code 0 (success)
+ 1 stderr run-command: the following output files will be saved to keep:
+ 1 stderr run-command:          363 ./qr1hi-ot0gb-hmmxf2zubfpmhfk.txt
+ 1 stderr run-command: start writing output to keep
+ 1 stderr upload wrote 363 total 363
+ 2 stderr run-command: completed with exit code 0 (success)
+ 2 stderr run-command: the following output files will be saved to keep:
+ 2 stderr run-command:          121 ./qr1hi-ot0gb-iu2xgy4hkx4mmri.txt
+ 2 stderr run-command: start writing output to keep
+ 2 stderr upload wrote 121 total 121
+[...]
+</pre>
+</notextile>
+
+h2. Pipes
+
+Multiple commands may be connected by pipes and execute in the same container:
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/3229739b505d2b878b62aed09895a55a+142</span>
+$ <span class="userinput">ls *.fastq</span>
+$ <span class="userinput">arv-run cat -- *.fastq \| grep -H -n ATTGGAGGAAAGATGAGTGAC \> output.txt</span>
+[...]
+ 1 stderr run-command: cat /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq | grep -H -n ATTGGAGGAAAGATGAGTGAC > output.txt
+ 2 stderr run-command: cat /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq | grep -H -n ATTGGAGGAAAGATGAGTGAC > output.txt
+[...]
+</pre>
+</notextile>
+
+If you need to capture intermediate results of a pipe, use the @tee@ command.
+
+h2. Running a shell script
+
+<notextile>
+<pre>
+$ <span class="userinput">echo 'echo hello world' > hello.sh</span>
+$ <span class="userinput">arv-run /bin/sh hello.sh</span>
+Upload local files: "hello.sh"
+Uploaded to qr1hi-4zz18-23u3hxugbm71qmn
+Running pipeline qr1hi-d1hrv-slcnhq5czo764b1
+[...]
+ 0 stderr run-command: /bin/sh /keep/5d3a4131b7d8f233f2a917d8a5c3c2b2+52/hello.sh
+ 0 stderr hello world
+ 0 stderr run-command: completed with exit code 0 (success)
+[...]
+</pre>
+</notextile>
+
+h2. Additional options
+
+* @--docker-image IMG@ : By default, commands run based in a container created from the @default_docker_image_for_jobs@ setting on the API server.  Use this option to specify a different image to use.  Note: the Docker image must be uploaded to Arvados using @arv keep docker@.
+* @--dry-run@ : Print out the final Arvados pipeline generated by @arv-run@ without submitting it.
+* @--local@ : By default, the pipeline will be submitted to your configured Arvados instance.  Use this option to run the command locally using @arv-run-pipeline-instance --run-jobs-here@.
+* @--ignore-rcode@ : Some commands use non-zero exit codes to indicate nonfatal conditions (e.g., @grep@ returns 1 when no match is found).  Set this to indicate that commands that return non-zero return codes should not be considered failed.
+* @--no-wait@ : Do not wait and display logs after submitting command, just exit.
+
+h2. Putting it all together: bwa mem
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/d0136bc494c21f79fc1b6a390561e6cb+2778</span>
+$ <span class="userinput">arv-run --docker-image arvados/jobs-java-bwa-samtools bwa mem ../3514b8e5da0e8d109946bc809b20a78a+5698/human_g1k_v37.fasta -- --batch-size 2 *.fastq.gz \> '$(task.uuid).sam'</span>
+ 0 stderr run-command: parallelizing on input0 with items [[u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_1.fastq.gz', u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_2.fastq.gz'], [u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_1.fastq.gz', u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_2.fastq.gz']]
+[...]
+ 1 stderr run-command: bwa mem /keep/3514b8e5da0e8d109946bc809b20a78a+5698/human_g1k_v37.fasta /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_1.fastq.gz /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_2.fastq.gz > qr1hi-ot0gb-a4bzzyqqz4ubair.sam
+ 2 stderr run-command: bwa mem /keep/3514b8e5da0e8d109946bc809b20a78a+5698/human_g1k_v37.fasta /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_1.fastq.gz /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_2.fastq.gz > qr1hi-ot0gb-14j9ncw0ymkxq0v.sam
+</pre>
+</notextile>
diff --git a/doc/user/topics/arv-web.html.textile.liquid b/doc/user/topics/arv-web.html.textile.liquid
new file mode 100644 (file)
index 0000000..9671e97
--- /dev/null
@@ -0,0 +1,106 @@
+---
+layout: default
+navsection: userguide
+title: "Using arv-web"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+@arv-web@ enables you to run a custom web service from the contents of an Arvados collection.
+
+{% include 'tutorial_expectations_workstation' %}
+
+h2. Usage
+
+@arv-web@ enables you to set up a web service based on the most recent collection in a project.  An arv-web application is a reproducible, immutable application bundle where the web app is packaged with both the code to run and the data to serve.  Because Arvados Collections can be updated with minimum duplication, it is efficient to produce a new application bundle when the code or data needs to be updated; retaining old application bundles makes it easy to go back and run older versions of your web app.
+
+<pre>
+$ cd $HOME/arvados/services/arv-web
+usage: arv-web.py [-h] --project-uuid PROJECT_UUID [--port PORT]
+                  [--image IMAGE]
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --project-uuid PROJECT_UUID
+                        Project uuid to watch
+  --port PORT           Host port to listen on (default 8080)
+  --image IMAGE         Docker image to run
+</pre>
+
+At startup, @arv-web@ queries an Arvados project and mounts the most recently modified collection into a temporary directory.  It then runs a Docker image with the collection bound to @/mnt@ inside the container.  When a new collection is added to the project, or an existing project is updated, it will stop the running Docker container, unmount the old collection, mount the new most recently modified collection, and restart the Docker container with the new mount.
+
+h2. Docker container
+
+The @Dockerfile@ in @arvados/docker/arv-web@ builds a Docker image that runs Apache with @/mnt@ as the DocumentRoot.  It is configured to run web applications which use Python WSGI, Ruby Rack, or CGI; to serve static HTML; or browse the contents of the @public@ subdirectory of the collection using default Apache index pages.
+
+To build the Docker image:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/docker</span>
+~/arvados/docker$ <span class="userinput">docker build -t arvados/arv-web arv-web</span>
+</code></pre>
+</notextile>
+
+h2. Running sample applications
+
+First, in Arvados Workbench, create a new project.  Copy the project UUID from the URL bar (this is the part of the URL after @projects/...@).
+
+Now upload a collection containing a "Python WSGI web app:":http://wsgi.readthedocs.org/en/latest/
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/services/arv-web</span>
+~/arvados/services/arv-web$ <span class="userinput">arv-put --project [zzzzz-j7d0g-yourprojectuuid] --name sample-wsgi-app sample-wsgi-app</span>
+0M / 0M 100.0%
+Collection saved as 'sample-wsgi-app'
+zzzzz-4zz18-ebohzfbzh82qmqy
+~/arvados/services/arv-web$ <span class="userinput">./arv-web.py --project [zzzzz-j7d0g-yourprojectuuid] --port 8888</span>
+2015-01-30 11:21:00 arvados.arv-web[4897] INFO: Mounting zzzzz-4zz18-ebohzfbzh82qmqy
+2015-01-30 11:21:01 arvados.arv-web[4897] INFO: Starting Docker container arvados/arv-web
+2015-01-30 11:21:02 arvados.arv-web[4897] INFO: Container id e79e70558d585a3e038e4bfbc97e5c511f21b6101443b29a8017bdf3d84689a3
+2015-01-30 11:21:03 arvados.arv-web[4897] INFO: Waiting for events
+</code></pre>
+</notextile>
+
+The sample application will be available at @http://localhost:8888@.
+
+h3. Updating the application
+
+If you upload a new collection to the same project, arv-web will restart the web service and serve the new collection.  For example, uploading a collection containing a "Ruby Rack web app:":https://github.com/rack/rack/wiki
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/services/arv-web</span>
+~/arvados/services/arv-web$ <span class="userinput">arv-put --project [zzzzz-j7d0g-yourprojectuuid] --name sample-rack-app sample-rack-app</span>
+0M / 0M 100.0%
+Collection saved as 'sample-rack-app'
+zzzzz-4zz18-dhhm0ay8k8cqkvg
+</code></pre>
+</notextile>
+
+@arv-web@ will automatically notice the change, load a new container, and send an update signal (SIGHUP) to the service:
+
+<pre>
+2015-01-30 11:21:03 arvados.arv-web[4897] INFO:Waiting for events
+2015-01-30 11:21:04 arvados.arv-web[4897] INFO:create zzzzz-4zz18-dhhm0ay8k8cqkvg
+2015-01-30 11:21:05 arvados.arv-web[4897] INFO:Mounting zzzzz-4zz18-dhhm0ay8k8cqkvg
+2015-01-30 11:21:06 arvados.arv-web[4897] INFO:Sending refresh signal to container
+2015-01-30 11:21:07 arvados.arv-web[4897] INFO:Waiting for events
+</pre>
+
+h2. Writing your own applications
+
+The @arvados/arv-web@ image serves Python and Ruby applications using Phusion Passenger and Apache @mod_passenger@.  See "Phusion Passenger users guide for Apache":https://www.phusionpassenger.com/documentation/Users%20guide%20Apache.html for details, and look at the sample apps @arvados/services/arv-web/sample-wsgi-app@ and @arvados/services/arv-web/sample-rack-app@.
+
+You can serve CGI applications using standard Apache CGI support.  See "Apache Tutorial: Dynamic Content with CGI":https://httpd.apache.org/docs/current/howto/cgi.html for details, and look at the sample app @arvados/services/arv-web/sample-cgi-app@.
+
+You can also serve static content from the @public@ directory of the collection.  Look at @arvados/services/arv-web/sample-static-page@ for an example.  If no @index.html@ is found in @public/@, it will render default Apache index pages, permitting simple browsing of the collection contents.
+
+h3. Custom images
+
+You can provide your own Docker image.  The Docker image that will be used create the web application container is specified in the @docker_image@ file in the root of the collection.  You can also specify @--image@ on the command @arv-web@ line to choose the docker image (this will override the contents of @docker_image@).
+
+h3. Reloading the web service
+
+Stopping the Docker container and starting it again can result in a small amount of downtime.  When the collection containing a new or updated web application uses the same Docker image as the currently running web application, it is possible to avoid this downtime by keeping the existing container and only reloading the web server.  This is accomplished by providing a file called @reload@ in the root of the collection, which should contain the commands necessary to reload the web server inside the container.
diff --git a/doc/user/topics/arvados-sync-groups.html.textile.liquid b/doc/user/topics/arvados-sync-groups.html.textile.liquid
new file mode 100644 (file)
index 0000000..9a60903
--- /dev/null
@@ -0,0 +1,53 @@
+---
+layout: default
+navsection: admin
+title: "Synchronizing external groups"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The @arvados-sync-groups@ tool allows to synchronize groups in Arvados from an external source.
+
+h1. Using arvados-sync-groups
+
+This tool reads a CSV (comma-separated values) file having information about external groups and their members. When running it for the first time, it'll create a special group named 'Externally synchronized groups' meant to be the parent of all the remote groups.
+
+Every line on the file should have 2 values: a group name and a local user identifier, meaning that the named user is a member of the group. The tool will create the group if it doesn't exist, and add the user to it. If group member is not present on the input file, the account will be removed from the group.
+
+Users can be identified by their email address or username: the tool will check if every user exist on the system, and report back when not found. Groups on the other hand, are identified by their name.
+
+This tool is designed to be run periodically reading a file created by a remote auth system (ie: LDAP) dump script, applying what's included on the file as the source of truth.
+
+
+bq. NOTE: @arvados-sync-groups@ needs to perform several administrative tasks on Arvados, so must be run using a superuser token
+
+h2. Options
+
+The following command line options are supported:
+
+table(table table-bordered table-condensed).
+|_. Option |_. Description |
+|==--help==|             This list of options|
+|==--parent-group-uuid==|   UUID of group to own all the externally synchronized groups|
+|==--user-id== |            Identifier to use in looking up user. One of 'email' or 'username' (Default: 'email')|
+|==--verbose==|             Log informational messages (Default: False)|
+|==--version==|             Print version and exit|
+
+h2. Examples
+
+To sync groups using the username to identify every account, reading from some @external_groups.csv@ file, the command should be called as follows:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-sync-groups --user-id username /path/to/external_groups.csv </span>
+</code></pre>
+</notextile>
+
+If you want to use a specific preexisting group as the parent of all the remote groups, you can do it this way:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arvados-sync-groups --parent-group-uuid &lt;preexisting group UUID&gt; --user-id username /path/to/external_groups.csv </span>
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/collection-versioning.html.textile.liquid b/doc/user/topics/collection-versioning.html.textile.liquid
new file mode 100644 (file)
index 0000000..01670d8
--- /dev/null
@@ -0,0 +1,107 @@
+---
+layout: default
+navsection: userguide
+title: Using collection versioning
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+When collection versioning is enabled, updating certain collection attributes (@name@, @description@, @properties@, @manifest_text@) will save a copy of the collection state, previous to the update. This copy (a new collection record) will have its own @uuid@, and a @current_version_uuid@ attribute pointing to the current version's @uuid@.
+
+Every collection has a @version@ attribute that indicates its version number, starting from 1 on new collections and incrementing by 1 with every versionable update. All collections point to their most current version via the @current_version_uuid@ attribute, being @uuid@ and @current_version_uuid@ equal on those collection records that are the the current version of themselves. Note that the "current version" collection record doesn't change its @uuid@, "past versions" are saved as new records every time it's needed, pointing to the current collection record.
+
+A version will be saved when one of the following conditions is true:
+
+One is by "configuring (system-wide) the collection's idle time":{{site.baseurl}}/admin/collection-versioning.html. This idle time is checked against the @modified_at@ attribute so that the version is saved when one or more of the previously enumerated attributes get updated and the @modified_at@ is at least at the configured idle time in the past. This way, a frequently updated collection won't create lots of version records that may not be useful.
+
+The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated.
+
+h3. Collection's past versions behavior & limitations
+
+Past version collection records are read-only, if you need to make changes to one of them, the suggested approach is to copy it into a new collection before updating.
+
+Some attributes are automatically synced when they change on the current version: @owner_uuid@, @delete_at@, @trash_at@, @is_trashed@, @replication_desired@ and @storage_classes_desired@. This way, old versions follow the current one on several configurations. In the special case that a current version's @uuid@ gets updated, their past versions get also updated to point to the newer UUID. When a collection is deleted, any past versions are deleted along with it.
+
+Permissions on past versions are the same as their current version, the system does not allow attaching permission links to old versions. If you need to give special access to someone to a particular old version, the correct procedure is by copying it as a new collection.
+
+h3. Example: Accessing past versions of a collection
+
+To request a particular collection with all its versions you should request a list filtering the current version's UUID and passing the @include_old_versions@ query parameter. For example, using the @arv@ command line client:
+
+<pre>
+$ arv collection index --filters '[["current_version_uuid", "=", "o967z-4zz18-ynmlhyjbg1arnr2"]]' --include-old-versions
+{
+ "items":[
+  {
+   "uuid":"o967z-4zz18-i3ucessyo6xxadt",
+   "created_at":"2018-10-05T14:43:38.916885000Z",
+   "modified_at":"2018-10-05T14:44:31.098019000Z",
+   "version":1,
+   "current_version_uuid":"o967z-4zz18-ynmlhyjbg1arnr2"
+  },
+  {
+   "uuid":"o967z-4zz18-ynmlhyjbg1arnr2",
+   "created_at":"2018-10-05T14:43:38.916885000Z",
+   "modified_at":"2018-10-05T14:44:31.078643000Z",
+   "version":2,
+   "current_version_uuid":"o967z-4zz18-ynmlhyjbg1arnr2"
+  }
+ ],
+ "items_available":2
+}
+</pre>
+
+To access a specific collection version using filters:
+
+<pre>
+$ arv collection index --filters '[["current_version_uuid", "=", "o967z-4zz18-ynmlhyjbg1arnr2"], ["version", "=", 1]]' --include-old-versions
+{
+ "items":[
+  {
+   "uuid":"o967z-4zz18-i3ucessyo6xxadt",
+   "created_at":"2018-10-05T14:43:38.916885000Z",
+   "modified_at":"2018-10-05T14:44:31.098019000Z",
+   "version":1,
+   "current_version_uuid":"o967z-4zz18-ynmlhyjbg1arnr2"
+  }
+ ],
+ "items_available":1
+}
+</pre>
+
+You can also access it directly via a GET request using its UUID:
+
+<pre>
+$ arv collection get --uuid o967z-4zz18-i3ucessyo6xxadt
+{
+ "uuid":"o967z-4zz18-i3ucessyo6xxadt",
+ "created_at":"2018-10-05T14:43:38.916885000Z",
+ "modified_at":"2018-10-05T14:44:31.098019000Z",
+ "version":1,
+ "current_version_uuid":"o967z-4zz18-ynmlhyjbg1arnr2"
+}
+</pre>
+
+h3. Example: Ensuring a version is preserved
+
+As stated before, regardless of the collection's auto-save idle time cluster configuration, the user has the ability to request that a particular collection state should be preserved.
+
+When working on a collection, if there's a need to preserve the current state as a new version, the @preserve_version@ attribute should be set to @true@. This will trigger a new version creation on the next update, keeping this "version 2" state as a snapshot.
+
+<pre>
+$ arv collection update --uuid o967z-4zz18-ynmlhyjbg1arnr2 -c '{"preserve_version":true}'
+{
+ "uuid":"o967z-4zz18-ynmlhyjbg1arnr2",
+ "created_at":"2018-10-05T14:43:38.916885000Z",
+ "modified_at":"2018-10-05T15:12:57.986454000Z",
+ "version":2,
+ "current_version_uuid":"o967z-4zz18-ynmlhyjbg1arnr2",
+ "preserve_version":true
+}
+</pre>
+
+Once the @preserve_version@ attribute is set to @true@, it cannot be changed to @false@ and it will only be reset when a versionable update on the collection triggers a version save.
diff --git a/doc/user/topics/crunch-tools-overview.html.textile.liquid b/doc/user/topics/crunch-tools-overview.html.textile.liquid
new file mode 100644 (file)
index 0000000..c4d01cf
--- /dev/null
@@ -0,0 +1,70 @@
+---
+layout: default
+navsection: userguide
+title: "Tools for writing Crunch pipelines"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+Arvados includes a number of tools to help you develop pipelines and jobs for Crunch.  This overview explains each tool's intended use to help you choose the right one.
+
+h2. Use the "arv-run command-line utility":arv-run.html
+
+arv-run is an interactive command-line tool.  You run it as the first command of a traditional Unix shell command line, and it converts that work into an Arvados pipeline.  It automatically uploads any required data to Arvados, and dispatches work in parallel when possible.  This lets you easily migrate analysis work that you're doing on the command line to Arvados compute nodes.
+
+arv-run is best suited to complement work you already do on the command line.  If you write a shell one-liner that generates useful data, you can then call it with arv-run to parallelize it across a larger data set and save the results in Arvados.  For example, this run searches multiple FASTQ files in parallel, and saves the results to Keep through shell redirection:
+
+{% include 'arv_run_redirection' %}
+
+arv-run does not generate pipeline templates, or implement higher-level shell constructs like flow control.  If you want to make it easy to rerun your pipeline with different data later, or adapt to different inputs, it's best to write your own template.
+
+Refer to the "arv-run documentation":arv-run.html for details.
+
+h2. Write a "pipeline template":{{site.baseurl}}/user/tutorials/running-external-program.html
+
+Pipeline templates describe a set of analysis programs that should be run, and the inputs they require.  You can provide a high-level description of how data flows through the pipeline—for example, the outputs of programs A and B are provided as input to program C—and let Crunch take care of the details of starting the individual programs at the right time with the inputs you specified.
+
+Pipeline templates are written in JSON.  Once you save a pipeline template in Arvados, you run it by creating a pipeline instance that lists the specific inputs you'd like to use.  Arvados Workbench and the @arv pipeline run@ command-line tool both provide high-level interfaces to do this easily.  The pipeline's final output(s) will be saved in a project you specify.
+
+See the User Guide topic to learn how to "write and run your own pipelines":{{site.baseurl}}/user/tutorials/running-external-program.html.  The rest of this page suggests specific tools to use in your templates.
+
+h3. The "run-command Crunch script":run-command.html
+
+run-command is a Crunch script that is included with Arvados.  It builds a command line from its input parameters.  It runs that command on files in Collections using the Keep mount provided by Crunch.  Output files created by the command are saved in a new collection, which is considered the program's final output.  It can run the command in parallel on a list of inputs, and introspect arguments so you can, for example, generate output filenames based on input filenames.
+
+run-command is a great way to use an existing analysis tool inside an Arvados pipeline.  You might use one or two tools in a larger pipeline, or convert a simple series of tool invocations into a pipeline to benefit from Arvados' provenance tracking and job reuse.  For example, here's a one-step pipeline that uses run-command with bwa to align a single paired-end read FASTQ sample:
+
+<notextile>{% code 'run_command_simple_example' as javascript %}</notextile>
+
+run-command is limited to manipulating the tool's command-line arguments, and can only parallelize on simple lists of inputs.  If you need to preprocess input, or dispatch work differently based on those inputs, consider writing your own Crunch script.
+
+Refer to the "run-command reference":run-command.html for details.
+
+h3. Writing "your own Crunch script":{{site.baseurl}}/user/tutorials/tutorial-firstscript.html with the Python SDK
+
+Arvados includes a Python SDK designed to help you write your own Crunch scripts.  It provides a native Arvados API client; Collection classes that provide file-like objects to interact with data in Keep; and utility functions to work within Crunch's execution environment.  Using the Python SDK, you can efficiently dispatch work with however much sophistication you require.
+
+Writing your own Crunch script is the best way to do analysis in Arvados when an existing tool does not meet your needs.  By interacting directly with Arvados objects, you'll have full power to introspect and adapt to your input, introduce minimal overhead, and get very direct error messages in case there's any trouble.  As a simple example, here's a Crunch script that checksums each file in a collection in parallel, saving the results in Keep:
+
+<notextile>{% code 'tutorial_hash_script_py' as python %}</notextile>
+
+There's no limit to what you can do with your own Crunch script.  The downside is the amount of time and effort you're required to invest to write and debug new code.  If you have to do that anyway, writing a Crunch script will give you the most benefit from using Arvados.
+
+Refer to the "User Guide topic on writing Crunch scripts":{{site.baseurl}}/user/tutorials/tutorial-firstscript.html and the "Python SDK reference":{{site.baseurl}}/sdk/python/python.html for details.
+
+h3. Combining run-command and custom Crunch scripts in a pipeline
+
+Just because you need to write some new code to do some work doesn't mean that you have to do all the work in your own Crunch script.  You can combine your custom steps with existing tools in a pipeline, passing data between them.  For example, maybe there's a third-party tool that does most of the analysis work you need, but you often need to massage the tool's data.  You could write your own preprocessing script that creates a new collection to use as the input of a run-command job, or a postprocessing script to create a final output after the tool is done, and tie them all together in a pipeline.  Just like Unix pipes, Arvados pipelines let you combine smaller tools to maximize utility.
+
+h3. Using run-command with your legacy scripts
+
+Perhaps you've already written your own analysis program that you want to run inside Arvados.  Currently, the easiest way to do that is to copy run-command from the Arvados source code to your own Arvados git repository, along with your internal tool.  Then your pipeline can call run-command from your own repository to execute the internal tool alongside it.
+
+This approach has the downside that you'll have to copy and push run-command again any time there's an update you'd like to use.  Future Arvados development will make it possible to get code from multiple git repositories, so your job can use the latest run-command in the Arvados source, as well as the latest tool in your own git repository.  Follow "Arvados issue #4561":https://arvados.org/issues/4561 for updates.
+
+Alternatively, you can "build a Docker image that includes your program, add it to Arvados":arv-docker.html, then run the Arvados run-command script inside that Docker image.
diff --git a/doc/user/topics/keep.html.textile.liquid b/doc/user/topics/keep.html.textile.liquid
new file mode 100644 (file)
index 0000000..dd50439
--- /dev/null
@@ -0,0 +1,59 @@
+---
+layout: default
+navsection: userguide
+title: "How Keep works"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados distributed file system is called *Keep*.  Keep is a content-addressable file system.  This means that files are managed using special unique identifiers derived from the _contents_ of the file (specifically, the MD5 hash), rather than human-assigned file names.  This has a number of advantages:
+* Files can be stored and replicated across a cluster of servers without requiring a central name server.
+* Both the server and client systematically validate data integrity because the checksum is built into the identifier.
+* Data duplication is minimized—two files with the same contents will have in the same identifier, and will not be stored twice.
+* It avoids data race conditions, since an identifier always points to the same data.
+
+In Keep, information is stored in *data blocks*.  Data blocks are normally between 1 byte and 64 megabytes in size.  If a file exceeds the maximum size of a single data block, the file will be split across multiple data blocks until the entire file can be stored.  These data blocks may be stored and replicated across multiple disks, servers, or clusters.  Each data block has its own identifier for the contents of that specific data block.
+
+In order to reassemble the file, Keep stores a *collection* data block which lists in sequence the data blocks that make up the original file.  A collection data block may store the information for multiple files, including a directory structure.
+
+In this example we will use @c1bad4b39ca5a924e481008009d94e32+210@, which we added to Keep in "how to upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html.  First let us examine the contents of this collection using @arv keep get@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep get c1bad4b39ca5a924e481008009d94e32+210</span>
+. 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+The command @arv keep get@ fetches the contents of the collection @c1bad4b39ca5a924e481008009d94e32+210@.  In this example, this collection includes a single file @var-GS000016015-ASM.tsv.bz2@ which is 227212247 bytes long, and is stored using four sequential data blocks, @204e43b8a1185621ca55a94839582e6f+67108864@, @b9677abbac956bd3e86b1deb28dfac03+67108864@, @fc15aff2a762b13f521baf042140acec+67108864@, and @323d2a3ce20370c4ca1d3462a344f8fd+25885655@.
+
+Let's use @arv keep get@ to download the first data block:
+
+notextile. <pre><code>~$ <span class="userinput">cd /scratch/<b>you</b></span>
+/scratch/<b>you</b>$ <span class="userinput">arv keep get 204e43b8a1185621ca55a94839582e6f+67108864 &gt; block1</span></code></pre>
+
+{% include 'notebox_begin' %}
+
+When you run this command, you may get this API warning:
+
+notextile. <pre><code>WARNING:root:API lookup failed for collection 204e43b8a1185621ca55a94839582e6f+67108864 (&lt;class 'apiclient.errors.HttpError'&gt;: &lt;HttpError 404 when requesting https://qr1hi.arvadosapi.com/arvados/v1/collections/204e43b8a1185621ca55a94839582e6f%2B67108864?alt=json returned "Not Found"&gt;)</code></pre>
+
+This happens because @arv keep get@ tries to find a collection with this identifier.  When that fails, it emits this warning, then looks for a datablock instead, which succeeds.
+
+{% include 'notebox_end' %}
+
+Let's look at the size and compute the MD5 hash of @block1@:
+
+<notextile>
+<pre><code>/scratch/<b>you</b>$ <span class="userinput">ls -l block1</span>
+-rw-r--r-- 1 you group 67108864 Dec  9 20:14 block1
+/scratch/<b>you</b>$ <span class="userinput">md5sum block1</span>
+204e43b8a1185621ca55a94839582e6f  block1
+</code></pre>
+</notextile>
+
+Notice that the block identifer <code>204e43b8a1185621ca55a94839582e6f+67108864</code> consists of:
+* the MD5 hash of @block1@, @204e43b8a1185621ca55a94839582e6f@, plus
+* the size of @block1@, @67108864@.
diff --git a/doc/user/topics/link-accounts.html.textile.liquid b/doc/user/topics/link-accounts.html.textile.liquid
new file mode 100644 (file)
index 0000000..3854bf6
--- /dev/null
@@ -0,0 +1,38 @@
+---
+layout: default
+navsection: userguide
+title: "Linking alternate login accounts"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to link additional login accounts to the same Arvados account.  This can be used to migrate login accounts, for example, from one Google account to another.  It can also be used to migrate login providers, for example from LDAP to Google.  In order to do this, you must be able to log into both the "old" and "new" accounts.
+
+h2. Link accounts
+
+Follow this process to link the "new" login to the "old" login.
+
+# Log in using the "old" account
+# Under the users menu, choose *Link account*
+# On the link accounts page, press the button *Add another login to this account*
+# Follow login instructions from the login provider (eg Google)
+# You will be returned to the *Link accounts* confirmation page.
+# Press the *Link account* button to confirm.
+# After the accounts are linked, you will be returned to the dashboard.
+# Both the "old" and "new" logins will now log in to the same Arvados account.
+
+h2. Link accounts (alternate flow)
+
+You can also link accounts starting with logging into the "new" account first.
+
+# Log in using the "new" account
+# Under the users menu, choose *Link account* (if the user is inactive, there will be a link on the inactive user page)
+# On the link accounts page, press the button *Use this login to access another account*
+# Follow login instructions from the login provider (eg Google)
+# You will be returned to the *Link accounts* confirmation page.
+# Press the *Link account* button to confirm.
+# After the accounts are linked, you will be returned to the dashboard.
+# Both the "old" and "new" logins will now log in to the same Arvados account.
diff --git a/doc/user/topics/run-command.html.textile.liquid b/doc/user/topics/run-command.html.textile.liquid
new file mode 100644 (file)
index 0000000..6996475
--- /dev/null
@@ -0,0 +1,319 @@
+---
+layout: default
+navsection: userguide
+title: "run-command reference"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+The @run-command@ crunch script enables you run command line programs.
+
+{% include 'tutorial_expectations_workstation' %}
+
+h1. Using run-command
+
+The basic @run-command@ process evaluates its inputs and builds a command line, executes the command, and saves the contents of the output directory back to Keep.  For large datasets, @run-command@ can schedule concurrent tasks to execute the wrapped program over a range of inputs (see @task.foreach@ below.)
+
+@run-command@ is controlled through the @script_parameters@ section of a pipeline component.  @script_parameters@ is a JSON object consisting of key-value pairs.  There are three categories of keys that are meaningful to run-command:
+* The @command@ section defining the template to build the command line of task
+* Special processing directives such as @task.foreach@ @task.cwd@ @task.vwd@ @task.stdin@ @task.stdout@
+* User-defined parameters (everything else)
+
+In the following examples, you can use "dry run mode" to determine the command line that @run-command@ will use without actually running the command.  For example:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME/arvados/crunch_scripts</span>
+~$ <span class="userinput">./run-command --dry-run --script-parameters '{
+  "command": ["echo", "hello world"]
+}'</span>
+run-command: echo hello world
+</code></pre>
+</notextile>
+
+h2. Command template
+
+The value of the "command" key is a list.  The first parameter of the list is the actual program to invoke, followed by the command arguments.  The simplest @run-command@ invocation simply runs a program with static parameters.  In this example, run "echo" with the first argument "hello world":
+
+<pre>
+{
+  "command": ["echo", "hello world"]
+}
+</pre>
+
+Running this job will print "hello world" to the job log.
+
+By default, the command will start with the current working directory set to the output directory.  Anything written to the output directory will be saved to Keep when the command is finished.  You can change the default working directory using @task.cwd@ and get the path to the output directory using @$(task.outdir)@ as explained below.
+
+Items in the "command" list may include lists and objects in addition to strings.  Lists are flattened to produce the final command line.  JSON objects are evaluated as list item functions (see below).  For example, the following evaluates to @["echo", "hello", "world"]@:
+
+<pre>
+{
+  "command": ["echo", ["hello", "world"]]
+}
+</pre>
+
+Finally, if "command" is a list of lists, it specifies a Unix pipeline where the standard output of the previous command is piped into the standard input of the next command.  The following example describes the Unix pipeline @cat foo | grep bar@:
+
+<pre>
+{
+  "command": [["cat", "foo"], ["grep", "bar"]]
+}
+</pre>
+
+h2. Parameter substitution
+
+The "command" list can include parameter substitutions.  Substitutions are enclosed in "$(...)" and may contain the name of a user-defined parameter.  In the following example, the value of "a" is "hello world"; so when "command" is evaluated, it will substitute "hello world" for "$(a)":
+
+<pre>
+{
+  "a": "c1bad4b39ca5a924e481008009d94e32+210/var-GS000016015-ASM.tsv.bz2",
+  "command": ["echo", "$(file $(a))"]
+}
+</pre>
+
+table(table table-bordered table-condensed).
+|_. Function|_. Action|
+|$(file ...)       | Takes a reference to a file within an Arvados collection and evaluates to a file path on the local file system where that file can be accessed by your command.  Will raise an error if the file is not accessible.|
+|$(dir ...)        | Takes a reference to an Arvados collection or directory within an Arvados collection and evaluates to a directory path on the local file system where that directory can be accessed by your command.  The path may include a file name, in which case it will evaluate to the parent directory of the file.  Uses Python's os.path.dirname(), so "/foo/bar" will evaluate to "/foo" but "/foo/bar/" will evaluate to "/foo/bar".  Will raise an error if the directory is not accessible. |
+|$(basename&nbsp;...)   | Strip leading directory and trailing file extension from the path provided.  For example, $(basename /foo/bar.baz.txt) will evaluate to "bar.baz".|
+|$(glob ...)       | Take a Unix shell path pattern (supports @*@ @?@ and @[]@) and search the local filesystem, returning the first match found.  Use together with $(dir ...) to get a local filesystem path for Arvados collections.  For example: $(glob $(dir $(mycollection)/*.bam)) will find the first .bam file in the collection specified by the user parameter "mycollection".  If there is more than one match, which one is returned is undefined.  Will raise an error if no matches are found.|
+|$(task.tmpdir)|Designated temporary directory.  This directory will be discarded when the job completes.|
+|$(task.outdir)|Designated output directory.  The contents of this directory will be saved to Keep when the job completes.  A symlink to a file in the keep mount will reference existing Keep blocks in your job output collection, with no data copying or duplication.|
+|$(job.srcdir)|Path to the git working directory ($CRUNCH_SRC).|
+|$(node.cores)|Number of CPU cores on the node.|
+|$(job.uuid)|Current job uuid ($JOB_UUID)|
+|$(task.uuid)|Current task uuid ($TASK_UUID)|
+
+h3. Escape sequences
+
+If your command includes a @$()@ sequence that shouldn't be interpreted by run-command&mdash;for example, because you're writing shell code that calls a subcommand&mdash;you can prevent run-command from interpreting it by placing a backslash in front of the @$@ character.  Note that JSON also uses backslash to escape characters, so you'll need to write two backslashes for run-command to see one after parsing the parameter.  This example uppercases all alphabetic characters in the "pattern" parameter before using it as a regular expression in grep:
+
+<pre>{"command": ["bash", "-c", "grep \\$(echo '$(pattern)' | tr a-z A-Z) '$(input)'"]}</pre>
+
+You can put a literal backslash in your command by escaping it with another backslash.  Ultimately this means that where the primary Unix command includes a single backslash, you'll need to write four backslashes: double the backslashes for run-command escaping, then double them again for JSON escaping.
+
+<pre>{"command": ["grep", "\\\\bword\\\\b", "$(input)"]}</pre>
+
+h2. List context
+
+Where specified by the documentation, parameters may be evaluated in a "list context".  That means the value will evaluate to a list instead of a string.  Parameter values can be a static list, a path to a file, a path to a directory, or a JSON object describing a list context function.
+
+If the value is a string, it is interpreted as a path.  If the path specifies a regular file, that file will be opened as a text file and produce a list with one item for each line in the file (end-of-line characters will be stripped).  If the path specifies a directory, produce a list containing all of the entries in the directory.  Note that parameter expansion is not performed on list items produced this way.
+
+If the value is a static list, it will evaluate each item and return the expanded list.  Each item may be a string (evaluated for parameter substitution), a list (recursively evaluated), or a JSON object (indicating a list function, described below).
+
+If the value is a JSON object, it is evaluated as a list function described below.
+
+h2. List functions
+
+When @run-command@ is evaluating a list (such as "command"), in addition to string parameter substitution, you can use list item functions.  In the following functions, you specify the name of a user parameter to act on (@"$(a)"@ in the first example); the value of that user parameter will be evaluated in a list context (as described above) to get the list value. Alternately, you can provide list value directly in line.  As an example, the following two fragments yield the same result:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"foreach": "$(a)",
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+<pre>
+{
+  "command": ["echo", {"foreach": ["alice", "bob"],
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+Note: when you provide the list inline with "foreach" or "index", you must include the "var" parameter to specify the substitution variable name to use when evaluating the command fragment.
+
+You can also nest functions.  This filters @["alice", "bob", "betty"]@ on the regular expression @"b.*"@ to get the list @["bob", "betty"]@, assigns @a_var@ to each value of the list, then expands @"command"@ to get @["--something", "bob", "--something", "betty"]@.
+
+<pre>
+{
+  "command": ["echo", {"foreach": {"filter": ["alice", "bob", "betty"],
+                                   "regex": "b.*"},
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h3. foreach
+
+The @foreach@ list item function (not to be confused with the @task.foreach@ directive) expands a command template for each item in the specified user parameter (the value of the user parameter is evaluated in a list context, as described above).  The following example will evaluate "command" to @["echo", "--something", "alice", "--something", "bob"]@:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"foreach": "$(a)",
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h3. index
+
+This function extracts a single item from a list.  The value of @index@ is zero-based (i.e. the first item is at index 0, the second item index 1, etc).  The following example will evaluate "command" to @["echo", "--something", "bob"]@:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"list": "$(a)",
+                       "var": "a_var",
+                       "index": 1,
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h3. filter
+
+Filter the list so that it only includes items that match a regular expression.  The following example will evaluate to @["echo", "bob"]@
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"filter": "$(a)",
+                       "regex": "b.*"}]
+}
+</pre>
+
+h3. group
+
+Generate a list of lists, where items are grouped on common subexpression match.  Items which don't match the regular expression are excluded.  In the following example, the subexpression is @(a?)@, resulting in two groups, strings that contain the letter 'a' and strings that do not.  The following example evaluates to @["echo", "--group", "alice", "carol", "dave", "--group", "bob", "betty"]@:
+
+<pre>
+{
+  "a": ["alice", "bob", "betty", "carol", "dave"],
+  "b": {"group": "$(a)",
+        "regex": "[^a]*(a?).*"},
+  "command": ["echo", {"foreach": "$(b)",
+                       "var": "b_var",
+                       "command": ["--group", "$(b_var)"]}]
+}
+</pre>
+
+h3. extract
+
+Generate a list of lists, where items are split by subexpression match.  Items which don't match the regular expression are excluded.  The following example evaluates to @["echo", "--something", "c", "a", "rol", "--something", "d", "a", "ve"]@:
+
+<pre>
+{
+  "a": ["alice", "bob", "carol", "dave"],
+  "b": {"extract": "$(a)",
+        "regex": "(.+)(a)(.*)"},
+  "command": ["echo", {"foreach": "$(b)",
+                       "var": "b_var",
+                       "command": ["--something", "$(b_var)"]}]
+}
+</pre>
+
+h3. batch
+
+Generate a list of lists, where items are split into a batch size.  If the list does not divide evenly into batch sizes, the last batch will be short.  The following example evaluates to @["echo", "--something", "alice", "bob", "--something", "carol", "dave"]@
+
+<pre>
+{
+  "a": ["alice", "bob", "carol", "dave"],
+  "command": ["echo", {"foreach":{"batch": "$(a)",
+                                  "size": 2},
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h2. Directives
+
+Directives alter the behavior of run-command.  All directives are optional.
+
+h3. task.cwd
+
+This directive sets the initial current working directory in which your command will run.  If @task.cwd@ is not specified, the default current working directory is @task.outdir@.
+
+h3. task.ignore_rcode
+
+By Unix convention a task which exits with a non-zero return code is considered failed.  However, some programs (such as @grep@) return non-zero codes for conditions that should not be considered fatal errors.  Set @"task.ignore_rcode": true@ to indicate the task should always be considered a success regardless of the return code.
+
+h3. task.stdin and task.stdout
+
+Provide standard input and standard output redirection.
+
+@task.stdin@ must evaluate to a path to a file to be bound to the standard input stream of the command.  When command describes a Unix pipeline, this goes into the first command.
+
+@task.stdout@ specifies the desired file name in the output directory to save the content of standard output.  When command describes a Unix pipeline, this captures the output of the last command.
+
+h3. task.env
+
+Set environment variables for the command.  Accepts an object mapping environment variables to the desired values.  Parameter substitution is performed on values, but not on the environment variable names themselves.  Example usage:
+
+<pre>
+{
+  "command": ["/bin/sh", "-c", "echo $MY_ENV_VAR"],
+  "task.env": {
+    "MY_ENV_VAR": "Hello world!"
+  }
+}
+</pre>
+
+h3. task.vwd
+
+Background: because Keep collections are read-only, this does not play well with certain tools that expect to be able to write their outputs alongside their inputs (such as tools that generate indexes that are closely associated with the original file.)  The run-command's solution to this is the "virtual working directory".
+
+@task.vwd@ specifies a Keep collection with the starting contents of the output directory.  @run-command@ will populate @task.outdir@ with directories and symlinks to mirror the contents of the @task.vwd@ collection.  Your command will then be able to both access its input files and write its output files from within @task.outdir@.  When the command completes, run-command will write the contents of the output directory, which will include the output of your command as well as symlinks to files in starting collection.  Note that files from the starting collection remain read-only and cannot be altered, but may be deleted or renamed.
+
+h3. task.foreach
+
+Using @task.foreach@, you can run your command concurrently over large datasets.
+
+@task.foreach@ takes the names of one or more user-defined parameters.  The value of these parameters are evaluated in a list context.  @run-command@ then generates tasks based on the Cartesian product (i.e. all combinations) of the input lists.  The outputs of all tasks are merged to create the final output collection.  Note that if two tasks output a file in the same directory with the same name, that file will be concatenated in the final output.  In the following example, three tasks will be created for the "grep" command, based on the contents of user parameter "a":
+
+<pre>
+{
+  "command": ["echo", "$(a)"],
+  "task.foreach": "a",
+  "a": ["alice", "bob", "carol"]
+}
+</pre>
+
+This evaluates to the commands:
+<notextile>
+<pre>
+["echo", "alice"]
+["echo", "bob"]
+["echo", "carol"]
+</pre>
+</notextile>
+
+You can also specify multiple parameters:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "b": ["carol", "dave"],
+  "task.foreach": ["a", "b"],
+  "command": ["echo", "$(a)", "$(b)"]
+}
+</pre>
+
+This evaluates to the commands:
+
+<pre>
+["echo", "alice", "carol"]
+["echo", "alice", "dave"]
+["echo", "bob", "carol"]
+["echo", "bob", "dave"]
+</pre>
+
+h1. Examples
+
+The following is a single task pipeline using @run-command@ to run the bwa alignment tool to align a single paired-end read fastq sample.  The input to this pipeline is the reference genome and a collection consisting of two fastq files for the read pair.
+
+<notextile>{% code 'run_command_simple_example' as javascript %}</notextile>
+
+The following is a concurrent task pipeline using @run-command@ to run the bwa alignment tool to align a set of fastq reads over multiple samples.  The input to this pipeline is the reference genome and a collection consisting subdirectories for each sample, with each subdirectory containing pairs of fastq files for each set of reads.
+
+<notextile>{% code 'run_command_foreach_example' as javascript %}</notextile>
diff --git a/doc/user/topics/running-pipeline-command-line.html.textile.liquid b/doc/user/topics/running-pipeline-command-line.html.textile.liquid
new file mode 100644 (file)
index 0000000..ffa5710
--- /dev/null
@@ -0,0 +1,58 @@
+---
+layout: default
+navsection: userguide
+title: "Running an Arvados pipeline"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'crunch1only_begin' %}
+If the Jobs API is not available, use the "Common Workflow Language":{{site.baseurl}}/user/cwl/cwl-runner.html instead.
+{% include 'crunch1only_end' %}
+
+This tutorial demonstrates how to use the command line to run the same pipeline as described in "running a pipeline using Workbench.":{{site.baseurl}}/user/tutorials/tutorial-workflow-workbench.html
+
+{% include 'tutorial_expectations' %}
+{% include 'tutorial_cluster_name' %}
+
+When you use the command line, you must use Arvados unique identifiers to refer to objects.  The identifiers in this example correspond to the following Arvados objects:
+
+* <i class="fa fa-fw fa-gear"></i> "Tutorial align using bwa mem (qr1hi-p5p6p-itzkwxblfermlwv)":{{site.arvados_workbench_host}}/pipeline_templates/qr1hi-p5p6p-itzkwxblfermlwv
+* <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":{{site.arvados_workbench_host}}/collections/2463fa9efeb75e099685528b3b9071e0+438
+* <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":{{site.arvados_workbench_host}}/collections/3229739b505d2b878b62aed09895a55a+142
+
+Use @arv pipeline run@ to run the pipeline, supplying the inputs to the bwa-mem component on the command line:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv pipeline run --run-pipeline-here --template qr1hi-p5p6p-itzkwxblfermlwv bwa-mem::reference_collection=2463fa9efeb75e099685528b3b9071e0+438 bwa-mem::sample=3229739b505d2b878b62aed09895a55a+142</span>
+
+2014-07-25 18:05:26 +0000 -- pipeline_instance qr1hi-d1hrv-d14trje19pna7f2
+bwa-mem qr1hi-8i9sb-67n1qvsronmd2z6 queued 2014-07-25T18:05:25Z
+
+2014-07-25 18:05:36 +0000 -- pipeline_instance qr1hi-d1hrv-d14trje19pna7f2
+bwa-mem qr1hi-8i9sb-67n1qvsronmd2z6 {:done=>0, :running=>1, :failed=>0, :todo=>0}
+
+2014-07-25 18:05:46 +0000 -- pipeline_instance qr1hi-d1hrv-d14trje19pna7f2
+bwa-mem qr1hi-8i9sb-67n1qvsronmd2z6 49bae1066f4ebce72e2587a3efa61c7d+88
+</code></pre>
+</notextile>
+
+This instantiates your pipeline and displays periodic status reports in your terminal window. The new pipeline instance will also show up on the Workbench Dashboard.
+
+
+@arv pipeline run@ submits a job for each pipeline component as soon as the component's inputs are known (i.e., any dependencies are satsified). It terminates when there is no work left to do: this means either all components are satisfied and all jobs have completed successfully, _or_ one or more jobs have failed and it is therefore unproductive to submit any further jobs.
+
+The Keep locators of the output of the @bwa-mem@ components are available from the last status report shown above:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls -s 49bae1066f4ebce72e2587a3efa61c7d+88</span>
+     29226 ./HWI-ST1027_129_D0THKACXX.1_1.sam
+</code></pre>
+</notextile>
+
+h2. Re-using existing jobs and outputs
+
+When satisfying a pipeline component that is not marked as nondeterministic in the pipeline template, @arv pipeline run@ checks for a previously submitted job that satisfies the component's requirements. If such a job is found, @arv pipeline run@ uses the existing job rather than submitting a new one. Usually this is a safe way to conserve time and compute resources. In some cases it's desirable to re-run jobs with identical specifications (e.g., to demonstrate that a job or entire pipeline thought to be repeatable is in fact repeatable). For such cases, job re-use features can be disabled entirely by passing the @--no-reuse@ flag to the @arv pipeline run@ command.
diff --git a/doc/user/topics/storage-classes.html.textile.liquid b/doc/user/topics/storage-classes.html.textile.liquid
new file mode 100644 (file)
index 0000000..96c8083
--- /dev/null
@@ -0,0 +1,53 @@
+---
+layout: default
+navsection: userguide
+title: Using storage classes
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Storage classes (alternately known as "storage tiers") allow you to control which volumes should be used to store particular collection data blocks.  This can be used to implement data storage policies such as moving data to archival storage.
+
+Names of storage classes are internal to the cluster and decided by the administrator.  Aside from "default", Arvados currently does not define any standard storage class names.
+
+h3. arv-put
+
+You may specify the desired storage class for a collection uploaded using @arv-put@:
+
+<pre>
+$ arv-put --storage-classes=hot myfile.txt
+</pre>
+
+h3. arvados-cwl-runner
+
+You may also specify the desired storage class for the final output collection produced by @arvados-cwl-runner@:
+
+<pre>
+$ arvados-cwl-runner --storage-classes=hot myworkflow.cwl myinput.yml
+</pre>
+
+(Note: intermediate collections produced by a workflow run will have "default" storage class.)
+
+h3. arv command line
+
+You may set the storage class on an existing collection by setting the "storage_classes_desired" field of a Collection.  For example, at the command line:
+
+<pre>
+$ arv collection update --uuid zzzzz-4zz18-dhhm0ay8k8cqkvg --collection '{"storage_classes_desired": ["archival"]}'
+</pre>
+
+By setting "storage_classes_desired" to "archival", the blocks that make up the collection will be preferentially moved to keepstore volumes which are configured with the "archival" storage class.
+
+h3. Storage class notes
+
+Collection blocks will be in the "default" storage class if not otherwise specified.
+
+Currently, a collection may only have one desired storage class.
+
+Any user with write access to a collection may set any storage class on that collection.
+
+Names of storage classes are internal to the cluster and decided by the administrator.  Aside from "default", Arvados currently does not define any standard storage class names.
diff --git a/doc/user/topics/tutorial-gatk-variantfiltration.html.textile.liquid b/doc/user/topics/tutorial-gatk-variantfiltration.html.textile.liquid
new file mode 100644 (file)
index 0000000..752488e
--- /dev/null
@@ -0,0 +1,173 @@
+---
+layout: default
+navsection: userguide
+title: "Using GATK with Arvados"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This tutorial demonstrates how to use the Genome Analysis Toolkit (GATK) with Arvados. In this example we will install GATK and then create a VariantFiltration job to assign pass/fail scores to variants in a VCF file.
+
+{% include 'tutorial_expectations' %}
+
+h2. Installing GATK
+
+Download the GATK binary tarball[1] -- e.g., @GenomeAnalysisTK-2.6-4.tar.bz2@ -- and "copy it to your Arvados VM":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep put GenomeAnalysisTK-2.6-4.tar.bz2</span>
+c905c8d8443a9c44274d98b7c6cfaa32+94
+</code></pre>
+</notextile>
+
+Next, you need the GATK Resource Bundle[2].  This may already be available in Arvados.  If not, you will need to download the files listed below and put them into Keep.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls -s d237a90bae3870b3b033aea1e99de4a9+10820</span>
+  50342 1000G_omni2.5.b37.vcf.gz
+      1 1000G_omni2.5.b37.vcf.gz.md5
+    464 1000G_omni2.5.b37.vcf.idx.gz
+      1 1000G_omni2.5.b37.vcf.idx.gz.md5
+  43981 1000G_phase1.indels.b37.vcf.gz
+      1 1000G_phase1.indels.b37.vcf.gz.md5
+    326 1000G_phase1.indels.b37.vcf.idx.gz
+      1 1000G_phase1.indels.b37.vcf.idx.gz.md5
+ 537210 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz
+      1 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz.md5
+   3473 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz
+      1 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz.md5
+  19403 Mills_and_1000G_gold_standard.indels.b37.vcf.gz
+      1 Mills_and_1000G_gold_standard.indels.b37.vcf.gz.md5
+    536 Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz
+      1 Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz.md5
+  29291 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz.md5
+    565 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz.md5
+  37930 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz.md5
+    592 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz.md5
+5898484 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam
+    112 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz.md5
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.md5
+   3837 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz.md5
+     65 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz.md5
+ 275757 dbsnp_137.b37.excluding_sites_after_129.vcf.gz
+      1 dbsnp_137.b37.excluding_sites_after_129.vcf.gz.md5
+   3735 dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz
+      1 dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz.md5
+ 998153 dbsnp_137.b37.vcf.gz
+      1 dbsnp_137.b37.vcf.gz.md5
+   3890 dbsnp_137.b37.vcf.idx.gz
+      1 dbsnp_137.b37.vcf.idx.gz.md5
+  58418 hapmap_3.3.b37.vcf.gz
+      1 hapmap_3.3.b37.vcf.gz.md5
+    999 hapmap_3.3.b37.vcf.idx.gz
+      1 hapmap_3.3.b37.vcf.idx.gz.md5
+      3 human_g1k_v37.dict.gz
+      1 human_g1k_v37.dict.gz.md5
+      2 human_g1k_v37.fasta.fai.gz
+      1 human_g1k_v37.fasta.fai.gz.md5
+ 849537 human_g1k_v37.fasta.gz
+      1 human_g1k_v37.fasta.gz.md5
+      1 human_g1k_v37.stats.gz
+      1 human_g1k_v37.stats.gz.md5
+      3 human_g1k_v37_decoy.dict.gz
+      1 human_g1k_v37_decoy.dict.gz.md5
+      2 human_g1k_v37_decoy.fasta.fai.gz
+      1 human_g1k_v37_decoy.fasta.fai.gz.md5
+ 858592 human_g1k_v37_decoy.fasta.gz
+      1 human_g1k_v37_decoy.fasta.gz.md5
+      1 human_g1k_v37_decoy.stats.gz
+      1 human_g1k_v37_decoy.stats.gz.md5
+</code></pre>
+</notextile>
+
+h2. Submit a GATK job
+
+The Arvados distribution includes an example crunch script ("crunch_scripts/GATK2-VariantFiltration":https://dev.arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-VariantFiltration) that runs the GATK VariantFiltration tool with some default settings.
+
+<notextile>
+<pre><code>~$ <span class="userinput">src_version=76588bfc57f33ea1b36b82ca7187f465b73b4ca4</span>
+~$ <span class="userinput">vcf_input=5ee633fe2569d2a42dd81b07490d5d13+82</span>
+~$ <span class="userinput">gatk_binary=c905c8d8443a9c44274d98b7c6cfaa32+94</span>
+~$ <span class="userinput">gatk_bundle=d237a90bae3870b3b033aea1e99de4a9+10820</span>
+~$ <span class="userinput">cat &gt;the_job &lt;&lt;EOF
+{
+ "script":"GATK2-VariantFiltration",
+ "repository":"arvados",
+ "script_version":"$src_version",
+ "script_parameters":
+ {
+  "input":"$vcf_input",
+  "gatk_binary_tarball":"$gatk_binary",
+  "gatk_bundle":"$gatk_bundle"
+ }
+}
+EOF</span>
+</code></pre>
+</notextile>
+
+* @"input"@ is collection containing the source VCF data. Here we are using an exome report from PGP participant hu34D5B9.
+* @"gatk_binary_tarball"@ is a Keep collection containing the GATK 2 binary distribution[1] tar file.
+* @"gatk_bundle"@ is a Keep collection containing the GATK resource bundle[2].
+
+Now start a job:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv job create --job "$(cat the_job)"</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-n9k7qyp7bs5b9d4",
+ "kind":"arvados#job",
+ "etag":"9j99n1feoxw3az448f8ises12",
+ "uuid":"qr1hi-8i9sb-n9k7qyp7bs5b9d4",
+ "owner_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "created_at":"2013-12-17T19:02:15Z",
+ "modified_by_client_uuid":"qr1hi-ozdt8-obw7foaks3qjyej",
+ "modified_by_user_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "modified_at":"2013-12-17T19:02:15Z",
+ "updated_at":"2013-12-17T19:02:15Z",
+ "submit_id":null,
+ "priority":null,
+ "script":"GATK2-VariantFiltration",
+ "script_parameters":{
+  "input":"5ee633fe2569d2a42dd81b07490d5d13+82",
+  "gatk_binary_tarball":"c905c8d8443a9c44274d98b7c6cfaa32+94",
+  "gatk_bundle":"d237a90bae3870b3b033aea1e99de4a9+10820"
+ },
+ "script_version":"76588bfc57f33ea1b36b82ca7187f465b73b4ca4",
+ "cancelled_at":null,
+ "cancelled_by_client_uuid":null,
+ "cancelled_by_user_uuid":null,
+ "started_at":null,
+ "finished_at":null,
+ "output":null,
+ "success":null,
+ "running":null,
+ "is_locked_by_uuid":null,
+ "log":null,
+ "runtime_constraints":{},
+ "tasks_summary":{}
+}
+</code></pre>
+</notextile>
+
+Once the job completes, the output can be found in hu34D5B9-exome-filtered.vcf:
+
+<notextile><pre><code>~$ <span class="userinput">arv keep ls bedd6ff56b3ae9f90d873b1fcb72f9a3+91</span>
+hu34D5B9-exome-filtered.vcf
+</code></pre>
+</notextile>
+
+h2. Notes
+
+fn1. "Download the GATK tools":http://www.broadinstitute.org/gatk/download
+
+fn2. "Information about the GATK resource bundle":http://gatkforums.broadinstitute.org/discussion/1213/whats-in-the-resource-bundle-and-how-can-i-get-it and "direct download link":ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/2.5/b37/ (if prompted, submit an empty password)
diff --git a/doc/user/topics/tutorial-job1.html.textile.liquid b/doc/user/topics/tutorial-job1.html.textile.liquid
new file mode 100644 (file)
index 0000000..34c452a
--- /dev/null
@@ -0,0 +1,214 @@
+---
+layout: default
+navsection: userguide
+title: "Running a Crunch job on the command line"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This tutorial introduces how to run individual Crunch jobs using the @arv@ command line tool.
+
+{% include 'tutorial_expectations' %}
+
+You will create a job to run the "hash" Crunch script.  The "hash" script computes the MD5 hash of each file in a collection.
+
+h2. Jobs
+
+Crunch pipelines consist of one or more jobs.  A "job" is a single run of a specific version of a Crunch script with a specific input.  You can also run jobs individually.
+
+A request to run a Crunch job are is described using a JSON object.  For example:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cat &gt;~/the_job &lt;&lt;EOF
+{
+ "script": "hash",
+ "repository": "arvados",
+ "script_version": "master",
+ "script_parameters": {
+  "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ },
+ "no_reuse": "true"
+}
+EOF
+</code></pre>
+</notextile>
+
+* @cat@ is a standard Unix utility that writes a sequence of input to standard output.
+* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@.
+* @>~/the_job@ redirects standard output to a file called @~/the_job@.
+* @"repository"@ is the name of a Git repository to search for the script version.  You can access a list of available git repositories on the Arvados Workbench under "*Code repositories*":{{site.arvados_workbench_host}}/repositories.
+* @"script_version"@ specifies the version of the script that you wish to run.  This can be in the form of an explicit Git revision hash, a tag, or a branch.  Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the name of the script to run.  The script must be given relative to the @crunch_scripts/@ subdirectory of the Git repository.
+* @"script_parameters"@ are provided to the script.  In this case, the input is the PGP data Collection that we "put in Keep earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+* Setting the @"no_reuse"@ flag tells Crunch not to reuse work from past jobs.  This helps ensure that you can watch a new Job process for the rest of this tutorial, without reusing output from a past run that you made, or somebody else marked as public.  (If you want to experiment, after the first run below finishes, feel free to edit this job to remove the @"no_reuse"@ line and resubmit it.  See what happens!)
+
+Use @arv job create@ to actually submit the job.  It should print out a JSON object which describes the newly created job:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv job create --job "$(cat ~/the_job)"</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-1pm1t02dezhupss",
+ "kind":"arvados#job",
+ "etag":"ax3cn7w9whq2hdh983yxvq09p",
+ "uuid":"qr1hi-8i9sb-1pm1t02dezhupss",
+ "owner_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "created_at":"2013-12-16T20:44:32Z",
+ "modified_by_client_uuid":"qr1hi-ozdt8-obw7foaks3qjyej",
+ "modified_by_user_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "modified_at":"2013-12-16T20:44:32Z",
+ "updated_at":"2013-12-16T20:44:33Z",
+ "submit_id":null,
+ "priority":null,
+ "script":"hash",
+ "script_parameters":{
+  "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ },
+ "script_version":"d9cd657b733d578ac0d2167dd75967aa4f22e0ac",
+ "cancelled_at":null,
+ "cancelled_by_client_uuid":null,
+ "cancelled_by_user_uuid":null,
+ "started_at":null,
+ "finished_at":null,
+ "output":null,
+ "success":null,
+ "running":null,
+ "is_locked_by_uuid":null,
+ "log":null,
+ "runtime_constraints":{},
+ "tasks_summary":{}
+}
+</code></pre>
+</notextile>
+
+The job is now queued and will start running as soon as it reaches the front of the queue.  Fields to pay attention to include:
+
+ * @"uuid"@ is the unique identifier for this specific job.
+ * @"script_version"@ is the actual revision of the script used.  This is useful if the version was described using the "repository:branch" format.
+
+h2. Monitor job progress
+
+Go to "*Recent jobs*":{{site.arvados_workbench_host}}/jobs in Workbench.  Your job should be near the top of the table.  This table refreshes automatically.  When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
+
+h2. Inspect the job output
+
+On the "Workbench Dashboard":{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table.  Click on the link under *Output* for your job to go to the files page with the job output.  The files page lists all the files that were output by the job.  Click on the link under the *file* column to view a file, or click on the download button <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
+
+On the command line, you can use @arv job get@ to access a JSON object describing the output:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv job get --uuid qr1hi-8i9sb-xxxxxxxxxxxxxxx</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-1pm1t02dezhupss",
+ "kind":"arvados#job",
+ "etag":"1bk98tdj0qipjy0rvrj03ta5r",
+ "uuid":"qr1hi-8i9sb-1pm1t02dezhupss",
+ "owner_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "created_at":"2013-12-16T20:44:32Z",
+ "modified_by_client_uuid":null,
+ "modified_by_user_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "modified_at":"2013-12-16T20:44:55Z",
+ "updated_at":"2013-12-16T20:44:55Z",
+ "submit_id":null,
+ "priority":null,
+ "script":"hash",
+ "script_parameters":{
+  "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ },
+ "script_version":"d9cd657b733d578ac0d2167dd75967aa4f22e0ac",
+ "cancelled_at":null,
+ "cancelled_by_client_uuid":null,
+ "cancelled_by_user_uuid":null,
+ "started_at":"2013-12-16T20:44:36Z",
+ "finished_at":"2013-12-16T20:44:53Z",
+ "output":"dd755dbc8d49a67f4fe7dc843e4f10a6+54",
+ "success":true,
+ "running":false,
+ "is_locked_by_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "log":"2afdc6c8b67372ffd22d8ce89d35411f+91",
+ "runtime_constraints":{},
+ "tasks_summary":{
+  "done":2,
+  "running":0,
+  "failed":0,
+  "todo":0
+ }
+}
+</code></pre>
+</notextile>
+
+* @"output"@ is the unique identifier for this specific job's output.  This is a Keep collection.  Because the output of Arvados jobs should be deterministic, the known expected output is <code>dd755dbc8d49a67f4fe7dc843e4f10a6+54</code>.
+
+Now you can list the files in the collection:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls dd755dbc8d49a67f4fe7dc843e4f10a6+54</span>
+./md5sum.txt
+</code></pre>
+</notextile>
+
+This collection consists of the @md5sum.txt@ file.  Use @arv keep get@ to show the contents of the @md5sum.txt@ file:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep get dd755dbc8d49a67f4fe7dc843e4f10a6+54/md5sum.txt</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f ./var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+This MD5 hash matches the MD5 hash which we "computed earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+
+h2. The job log
+
+When the job completes, you can access the job log.  On the Workbench, visit "*Recent jobs*":{{site.arvados_workbench_host}}/jobs %(rarr)&rarr;% your job's UUID under the *uuid* column %(rarr)&rarr;% the collection link on the *log* row.
+
+On the command line, the Keep identifier listed in the @"log"@ field from @arv job get@ specifies a collection.  You can list the files in the collection:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+91</span>
+./qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt
+</code></pre>
+</notextile>
+
+The log collection consists of one log file named with the job's UUID.  You can access it using @arv keep get@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep get xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+91/qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt</span>
+2013-12-16_20:44:35 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  check slurm allocation
+2013-12-16_20:44:35 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  node compute13 - 8 slots
+2013-12-16_20:44:36 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  start
+2013-12-16_20:44:36 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Install revision d9cd657b733d578ac0d2167dd75967aa4f22e0ac
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Clean-work-dir exited 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Install exited 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  script hash
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  script_version d9cd657b733d578ac0d2167dd75967aa4f22e0ac
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  runtime_constraints {"max_tasks_per_node":0}
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  start level 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 0 done, 0 running, 1 todo
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 job_task qr1hi-ot0gb-23c1k3kwrf8da62
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 child 7681 started on compute13.1
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 0 done, 1 running, 0 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 child 7681 on compute13.1 exit 0 signal 0 success=true
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 success in 1 seconds
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 output
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  wait for last 0 children to finish
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 1 done, 0 running, 1 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  start level 1
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 1 done, 0 running, 1 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 job_task qr1hi-ot0gb-iwr0o3unqothg28
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 child 7716 started on compute13.1
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 1 done, 1 running, 0 todo
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 child 7716 on compute13.1 exit 0 signal 0 success=true
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 success in 13 seconds
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 output dd755dbc8d49a67f4fe7dc843e4f10a6+54
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  wait for last 0 children to finish
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 2 done, 0 running, 0 todo
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  release job allocation
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Freeze not implemented
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  collate
+2013-12-16_20:44:53 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  output dd755dbc8d49a67f4fe7dc843e4f10a6+54+K@qr1hi
+2013-12-16_20:44:53 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  finish
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/tutorial-parallel.html.textile.liquid b/doc/user/topics/tutorial-parallel.html.textile.liquid
new file mode 100644 (file)
index 0000000..4ff402e
--- /dev/null
@@ -0,0 +1,85 @@
+---
+layout: default
+navsection: userguide
+title: "Concurrent Crunch tasks"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+In the previous tutorials, we used @arvados.job_setup.one_task_per_input_file()@ to automatically create concurrent jobs by creating a separate task per file.  For some types of jobs, you may need to split the work up differently, for example creating tasks to process different segments of a single large file.  This tutorial will demonstrate how to create Crunch tasks directly.
+
+Start by entering the @crunch_scripts@ directory of your Git repository:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $USER/crunch_scripts</span>
+</code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @concurrent-hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano concurrent-hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection:
+
+<notextile> {% code 'concurrent_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x concurrent-hash.py</span></code></pre>
+
+Add the file to the Git staging area, commit, and push:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add concurrent-hash.py</span>
+~/$USER/crunch_scripts$ <span class="userinput">git commit -m"concurrent hash"</span>
+~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
+</code></pre>
+</notextile>
+
+You should now be able to run your new script using Crunch, with "script" referring to our new "concurrent-hash.py" script.  We will use a different input from our previous examples.  We will use @887cd41e9c613463eab2f0d885c6dd96+83@ which consists of three files, "alice.txt", "bob.txt" and "carol.txt" (the example collection used previously in "fetching data from Arvados using Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html#dir).
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cat &gt;~/the_job &lt;&lt;EOF
+{
+ "script": "concurrent-hash.py",
+ "repository": "$USER/$USER",
+ "script_version": "master",
+ "script_parameters":
+ {
+  "input": "887cd41e9c613463eab2f0d885c6dd96+83"
+ }
+}
+EOF</span>
+~/$USER/crunch_scripts$ <span class="userinput">arv job create --job "$(cat ~/the_job)"</span>
+{
+ ...
+ "uuid":"qr1hi-xxxxx-xxxxxxxxxxxxxxx"
+ ...
+}
+~/$USER/crunch_scripts$ <span class="userinput">arv job get --uuid qr1hi-xxxxx-xxxxxxxxxxxxxxx</span>
+{
+ ...
+ "output":"e2ccd204bca37c77c0ba59fc470cd0f7+162",
+ ...
+}
+</code></pre>
+</notextile>
+
+(Your shell should automatically fill in @$USER@ with your login name.  The job JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
+
+Because the job ran in concurrent, each instance of concurrent-hash creates a separate @md5sum.txt@ as output.  Arvados automatically collates theses files into a single collection, which is the output of the job:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">arv keep ls e2ccd204bca37c77c0ba59fc470cd0f7+162</span>
+./md5sum.txt
+~/$USER/crunch_scripts$ <span class="userinput">arv keep get e2ccd204bca37c77c0ba59fc470cd0f7+162/md5sum.txt</span>
+0f1d6bcf55c34bed7f92a805d2d89bbf alice.txt
+504938460ef369cd275e4ef58994cffe bob.txt
+8f3b36aff310e06f3c5b9e95678ff77a carol.txt
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/tutorial-trait-search.html.textile.liquid b/doc/user/topics/tutorial-trait-search.html.textile.liquid
new file mode 100644 (file)
index 0000000..d396802
--- /dev/null
@@ -0,0 +1,278 @@
+---
+layout: default
+navsection: userguide
+title: "Querying the Metadata Database"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin_warning' %}
+The humans, specimens and traits tables are deprecated and will be removed in a future release.  The recommended way to store and search on user-defined metadata is using the "properties" field of Arvados resources.
+{% include 'notebox_end' %}
+
+This tutorial introduces the Arvados Metadata Database.  The Metadata Database stores information about files in Keep.  This example will use the Python SDK to find public WGS (Whole Genome Sequencing) data for people who have reported a certain medical condition.
+
+{% include 'tutorial_expectations' %}
+
+In the tutorial examples, three angle brackets (&gt;&gt;&gt;) will be used to denote code to enter at the interactive Python prompt.
+
+Start by running Python.
+
+<notextile>
+<pre><code>~$ <span class="userinput">python</span>
+Python 2.7.3 (default, Jan  2 2013, 13:56:14)
+[GCC 4.7.2] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+&gt;&gt;&gt;
+</code></pre>
+</notextile>
+
+If everything is set up correctly, you will be able to import the arvados SDK.
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">import arvados</span></pre></code>
+
+This tutorial will also use the regular expression (re) python module:
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">import re</span>
+</code></pre>
+</notextile>
+
+h2. Finding traits
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">all_traits = arvados.api().traits().list(limit=1000).execute()</span></code></pre>
+
+* @arvados.api()@ gets an object that provides access to the Arvados API server
+* @.traits()@ gets an object that provides access to the "traits" resource on the Arvados API server
+* @.list(limit=1000)@ constructs a query to list all elements of the "traits" resource, with a limit of 1000 entries returned
+* @.execute()@ executes the query and returns the result, which we assign to "all_traits"
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">cancer_traits = filter(lambda t: re.search('cancer', t['name']), all_traits['items'])</span></code></pre>
+
+* @lambda t: re.search('cancer', t['name'])@ is an inline function that takes a parameter @t@ and uses a simple regular expression to test if @t['name']@ contains the substring 'cancer'
+* @all_traits['items']@ is the input sequence of traits
+* @filter@ tests each element @t@ and constructs a new sequence consisting only of the elements that pass the filter
+* @cancer_traits@ gets the result of @filter@
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">for t in cancer_traits: print(t['uuid'], t['name'])</span>
+...
+qr1hi-q1cn2-8q57g2diohwnzm0 Cervical cancer
+qr1hi-q1cn2-vqp4243janpjbyj Breast cancer
+qr1hi-q1cn2-v6usijujcpwqrn1 Non-melanoma skin cancer
+...
+</code></pre>
+</notextile>
+
+In this tutorial wil will use "Non-melanoma skin cancer" trait with uuid @qr1hi-q1cn2-v6usijujcpwqrn1@.
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">non_melanoma_cancer = 'qr1hi-q1cn2-v6usijujcpwqrn1'</code></pre>
+
+h2. Finding humans with the selected trait
+
+We query the "links" resource to find humans that report the selected trait.  Links are directional connections between Arvados data items, for example, from a human to their reported traits.
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">trait_filter = [
+    ['link_class', '=', 'human_trait'],
+    ['tail_uuid', 'is_a', 'arvados#human'],
+    ['head_uuid', '=', non_melanoma_cancer],
+  ]
+</code></pre>
+</notextile>
+
+* @['link_class', '=', 'human_trait']@ filters on links that connect phenotype traits to individuals in the database.
+* @['tail_uuid', 'is_a', 'arvados#human']@ filters that the "tail" must be a "human" database object.
+* @['head_uuid', '=', non_melanoma_cancer]@ filters that the "head" of the link must connect to the "trait" database object non_melanoma_cancer .
+
+The query will return links that match all three conditions.
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">trait_links = arvados.api().links().list(limit=1000, filters=trait_filter).execute()</span>
+</code></pre>
+</notextile>
+
+* @arvados.api()@ gets an object that provides access to the Arvados API server
+* @.links()@ gets an object that provides access to the "links" resource on the Arvados API server
+* @.list(limit=1000, filters=trait_filter)@ constructs a query to elements of the "links" resource that match the criteria discussed above, with a limit of 1000 entries returned
+* @.execute()@ executes the query and returns the result, which we assign to "trait_links"
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">human_uuids = map(lambda l: l['tail_uuid'], trait_links['items'])</span>
+&gt;&gt;&gt; <span class="userinput">human_uuids</span>
+[u'1h9kt-7a9it-c0uqa4kcdh29wdf', u'1h9kt-7a9it-x4tru6mn40hc6ah',
+u'1h9kt-7a9it-yqb8m5s9cpy88i8', u'1h9kt-7a9it-46sm75w200ngwny',
+u'1h9kt-7a9it-gx85a4tdkpzsg3w', u'1h9kt-7a9it-8cvlaa8909lgeo9',
+u'1h9kt-7a9it-as37qum2pq8vizb', u'1h9kt-7a9it-14fph66z2baqxb9',
+u'1h9kt-7a9it-e9zc7i4crmw3v69', u'1h9kt-7a9it-np7f35hlijlxdmt',
+u'1h9kt-7a9it-j9hqyjwbvo9cojn', u'1h9kt-7a9it-lqxdtm1gynmsv13',
+u'1h9kt-7a9it-zkhhxjfg2o22ywq', u'1h9kt-7a9it-nsjoxqd33lzldw9',
+u'1h9kt-7a9it-ytect4smzcgd4kg', u'1h9kt-7a9it-y6tl353b3jc4tos',
+u'1h9kt-7a9it-98f8qave4f8vbs5', u'1h9kt-7a9it-gd72sh15q0p4wq3',
+u'1h9kt-7a9it-zlx25dscak94q9h', u'1h9kt-7a9it-8gronw4rbgmim01',
+u'1h9kt-7a9it-wclfkjcb23tr5es', u'1h9kt-7a9it-rvp2qe7szfz4dy6',
+u'1h9kt-7a9it-50iffhmpzsktwjm', u'1h9kt-7a9it-ul412id5y31a5o8',
+u'1h9kt-7a9it-732kwkfzylmt4ik', u'1h9kt-7a9it-v9zqxegpblsbtai',
+u'1h9kt-7a9it-kmaraqduit1v5wd', u'1h9kt-7a9it-t1nwtlo1hru5vvq',
+u'1h9kt-7a9it-q3w6j9od4ibpoyl', u'1h9kt-7a9it-qz8vzkuuz97ezwv',
+u'1h9kt-7a9it-t1v8sjz6dm9jmjf', u'1h9kt-7a9it-qe8wrbyvuqs5jew']
+</code></pre>
+</notextile>
+
+* @lambda l: l['tail_uuid']@ is an inline function that returns the 'tail_uuid' attribute of 'l'
+* @trait_links['items']@ is the input set from the query
+* @map@ converts each item in a sequence into a different item using the embedded function, in this case to produce a sequence of uuids which refer to humans that have the specified trait.
+
+h2. Find Personal Genome Project identifiers from Arvados UUIDs
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">human_filters = [
+    ["link_class", "=", "identifier"],
+    ["head_uuid", "in", human_uuids]
+  ]</span>
+&gt;&gt;&gt; <span class="userinput">pgpid_links = arvados.api('v1').links().list(limit=1000, filters=human_filters).execute()</span>
+&gt;&gt;&gt; <span class="userinput">map(lambda l: l['name'], pgpid_links['items'])</span>
+[u'hu01024B', u'hu11603C', u'hu15402B', u'hu174334', u'hu1BD549', u'hu237A50',
+ u'hu34A921', u'hu397733', u'hu414115', u'hu43860C', u'hu474789', u'hu553620',
+ u'hu56B3B6', u'hu5917F3', u'hu599905', u'hu5E55F5', u'hu602487', u'hu633787',
+ u'hu68F245', u'hu6C3F34', u'hu7260DD', u'hu7A2F1D', u'hu94040B', u'hu9E356F',
+ u'huAB8707', u'huB1FD55', u'huB4883B', u'huD09050', u'huD09534', u'huD3A569',
+ u'huDF04CC', u'huE2E371']
+</code></pre>
+</notextile>
+
+These PGP IDs let us find public profiles, for example:
+
+* "https://my.pgp-hms.org/profile/huE2E371":https://my.pgp-hms.org/profile/huE2E371
+* "https://my.pgp-hms.org/profile/huDF04CC":https://my.pgp-hms.org/profile/huDF04CC
+* ...
+
+h2. Find genomic data from specific humans
+
+Now we want to find collections in Keep that were provided by these humans.  We search the "links" resource for "provenance" links that point to entries in the list of humans with the non-melanoma skin cancer trait:
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">provenance_links = arvados.api().links().list(limit=1000, filters=[
+    ["link_class", "=", "provenance"],
+    ["name", "=", "provided"],
+    ["tail_uuid", "in", human_uuids]
+  ]).execute()
+collection_uuids = map(lambda l: l['head_uuid'], provenance_links['items'])
+
+# build map of human uuid -> PGP ID
+pgpid = {}
+for pgpid_link in pgpid_links['items']:
+  pgpid[pgpid_link['head_uuid']] = pgpid_link['name']
+
+# build map of collection uuid -> PGP ID
+for p_link in provenance_links['items']:
+  pgpid[p_link['head_uuid']] = pgpid[p_link['tail_uuid']]
+
+# get details (e.g., list of files) of each collection
+collections = arvados.api('v1').collections().list(filters=[
+    ["uuid", "in", collection_uuids]
+  ]).execute()
+
+# print PGP public profile links with file locators
+for c in collections['items']:
+  for f in c['files']:
+    print "https://my.pgp-hms.org/profile/%s %s %s%s" % (pgpid[c['uuid']], c['uuid'], ('' if f[0] == '.' else f[0]+'/'), f[1])
+</span>
+https://my.pgp-hms.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d var-GS000010320-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873 var-GS000010427-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7 var-GS000014566-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df var-GS000015272-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f var-GS000016374-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af var-GS000016015-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc+302 var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d+291 var-GS000010320-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873+242 var-GS000010427-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7+242 var-GS000014566-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df+242 var-GS000015272-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f+242 var-GS000016374-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af+242 var-GS000016015-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 d6e2e57cd60ba5979006d0b03e45e726+81 Witch_results.zip
+https://my.pgp-hms.org/profile/hu553620 ea4f2d325592a1272f989d141a917fdd+85 Devenwood_results.zip
+https://my.pgp-hms.org/profile/hu7A2F1D 4580f6620bb15b25b18373766e14e4a7+85 Innkeeper_results.zip
+https://my.pgp-hms.org/profile/huD09534 fee37be9440b912eb90f5e779f272416+82 Hallet_results.zip
+</code></pre>
+</notextile>
+
+h3. Search for a variant
+
+Now we will use crunch to issue a 'grep' job to look for variant rs1126809 in each of the "var-" files (these contain variant calls from WGS data).
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">job = {}
+for c in collections['items']:
+  if [] != filter(lambda f: re.search('^var-.*\.tsv\.bz2', f[1]), c['files']):
+    job[c['uuid']] = arvados.api('v1').jobs().create(body={
+      'script': 'grep',
+      'script_parameters': {'input': c['uuid'], 'pattern': "rs1126809\\b"},
+      'script_version': 'e7aeb42'
+    }).execute()
+    print "%s %s" % (pgpid[c['uuid']], job[c['uuid']]['uuid'])
+</span>
+hu43860C qr1hi-8i9sb-wbf3uthbhkcy8ji
+huB1FD55 qr1hi-8i9sb-scklkiy8dc27dab
+huDF04CC qr1hi-8i9sb-pg0w4rfrwfd9srg
+hu7A2F1D qr1hi-8i9sb-n7u0u0rj8b47168
+hu553620 qr1hi-8i9sb-k7gst7vyhg20pt1
+huD09534 qr1hi-8i9sb-4w65pm48123fte5
+hu599905 qr1hi-8i9sb-wmwa5b5r3eghnev
+hu43860C qr1hi-8i9sb-j1mngmakdh8iv9o
+huB1FD55 qr1hi-8i9sb-4j6ehiatcolaoxb
+huDF04CC qr1hi-8i9sb-n6lcmcr3lowqr5u
+hu7A2F1D qr1hi-8i9sb-0hwsdtojfcxjo40
+hu553620 qr1hi-8i9sb-cvvqzqea7jhwb0i
+huD09534 qr1hi-8i9sb-d0y0qtzuwzbrjj0
+hu599905 qr1hi-8i9sb-i9ec9g8d7rt70xg
+</code></pre>
+</notextile>
+
+
+Monitor job progress by refreshing the Jobs page in Workbench, or by using the API:
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">map(lambda j: arvados.api('v1').jobs().get(uuid=j['uuid']).execute()['success'], job.values())
+[None, True, None, None, None, None, None, None, None, None, None, None, None, None]
+</code></pre>
+</notextile>
+
+Unfinished jobs will appear as None, failed jobs as False, and completed jobs as True.
+
+After the jobs have completed, check output file sizes.
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">for collection_uuid in job:
+  job_uuid = job[collection_uuid]['uuid']
+  job_output = arvados.api('v1').jobs().get(uuid=job_uuid).execute()['output']
+  output_files = arvados.api('v1').collections().get(uuid=job_output).execute()['files']
+  # Test the output size.  If greater than zero, that means 'grep' found the variant
+  if output_files[0][2] > 0:
+    print("%s has variant rs1126809" % (pgpid[collection_uuid]))
+  else:
+    print("%s does not have variant rs1126809" % (pgpid[collection_uuid]))
+</span>
+hu553620 does not have variant rs1126809
+hu43860C does not have variant rs1126809
+hu599905 has variant rs1126809
+huD09534 has variant rs1126809
+hu553620 does not have variant rs1126809
+huB1FD55 does not have variant rs1126809
+huDF04CC has variant rs1126809
+hu7A2F1D has variant rs1126809
+hu7A2F1D has variant rs1126809
+hu599905 has variant rs1126809
+huDF04CC has variant rs1126809
+huB1FD55 does not have variant rs1126809
+huD09534 has variant rs1126809
+hu43860C does not have variant rs1126809
+</code></pre>
+</notextile>
+
+Thus, of the 14 WGS results available for PGP participants reporting non-melanoma skin cancer, 8 include the rs1126809 variant.
diff --git a/doc/user/tutorials/add-new-repository.html.textile.liquid b/doc/user/tutorials/add-new-repository.html.textile.liquid
new file mode 100644 (file)
index 0000000..9d8e768
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: userguide
+title: Adding a new Arvados git repository
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados repositories are managed through the Git revision control system. You can use these repositories to store your crunch scripts and run them in the arvados cluster.
+
+{% include 'tutorial_expectations' %}
+
+h2. Setting up Git
+
+Before you start using Git and arvados repositories, you should do some basic configuration (you only need to do this the first time):
+
+<notextile>
+<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
+~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
+</notextile>
+
+h2. Add "tutorial" repository
+
+On the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Repositories*.
+
+In the *Repositories* page, you will see the *Add new repository* button.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/repositories-panel.png!
+
+Click the *Add new Repository* button to open the popup to add a new arvados repository. You will see a text box where you can enter the name of the repository. Enter *tutorial* in this text box and click on *Create*.
+
+{% include 'notebox_begin' %}
+The name you enter here must begin with a letter and can only contain alphanumeric characters.
+{% include 'notebox_end' %}
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/add-new-repository.png!
+
+This will create a new repository with the name @$USER/tutorial@. It can be accessed using the URL <notextile><code>https://git.{{ site.arvados_api_host }}/$USER/tutorial.git</code></notextile> or <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/tutorial.git</code></notextile>
+
+Back in the *Repositories* page, you should see the @$USER/tutorial@ repository listed in the name column with these URLs.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/added-new-repository.png!
+
+You are now ready to use this *tutorial* repository to run your crunch scripts.
diff --git a/doc/user/tutorials/git-arvados-guide.html.textile.liquid b/doc/user/tutorials/git-arvados-guide.html.textile.liquid
new file mode 100644 (file)
index 0000000..2e25521
--- /dev/null
@@ -0,0 +1,105 @@
+---
+layout: default
+navsection: userguide
+title: Working with an Arvados git repository
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This tutorial describes how to work with a new Arvados git repository. Working with an Arvados git repository is analogous to working with other public git repositories. It will show you how to upload custom scripts to a remote Arvados repository, so you can use it in Arvados pipelines.
+
+{% include 'tutorial_expectations' %}
+
+{% include 'tutorial_git_repo_expectations' %}
+
+{% include 'notebox_begin' %}
+For more information about using Git, try
+<notextile>
+<pre><code>$ <span class="userinput">man gittutorial</span></code></pre>
+</notextile> or *"search Google for Git tutorials":http://google.com/#q=git+tutorial*.
+{% include 'notebox_end' %}
+
+h2. Cloning an Arvados repository
+
+Before you start using Git, you should do some basic configuration (you only need to do this the first time):
+
+<notextile>
+<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
+~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
+</notextile>
+
+On the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Repositories*. In the *Repositories* page, you should see the @$USER/tutorial@ repository listed in the *name* column.  Next to *name* is the column *URL*. Copy the *URL* value associated with your repository.  This should look like <notextile><code>https://git.{{ site.arvados_api_host }}/$USER/tutorial.git</code></notextile>. Alternatively, you can use <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/tutorial.git</code></notextile>
+
+Next, on the Arvados virtual machine, clone your Git repository:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://git.{{ site.arvados_api_host }}/$USER/tutorial.git</span>
+Cloning into 'tutorial'...</code></pre>
+</notextile>
+
+This will create a Git repository in the directory called @tutorial@ in your home directory. Say yes when prompted to continue with connection.
+Ignore any warning that you are cloning an empty repository.
+
+*Note:* If you are prompted for username and password when you try to git clone using this command, you may first need to update your git configuration. Execute the following commands to update your git configuration.
+
+<notextile>
+<pre>
+<code>~$ <span class="userinput">git config 'credential.https://git.{{ site.arvados_api_host }}/.username' none</span></code>
+<code>~$ <span class="userinput">git config 'credential.https://git.{{ site.arvados_api_host }}/.helper' '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'</span></code>
+</pre>
+</notextile>
+
+h2. Creating a git branch in an Arvados repository
+
+Create a git branch named *tutorial_branch* in the *tutorial* Arvados git repository.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd tutorial</span>
+~/tutorial$ <span class="userinput">git checkout -b tutorial_branch</span>
+</code></pre>
+</notextile>
+
+h2. Adding scripts to an Arvados repository
+
+Arvados crunch scripts need to be added in a *crunch_scripts* subdirectory in the repository. If this subdirectory does not exist, first create it in the local repository and change to that directory:
+
+<notextile>
+<pre><code>~/tutorial$ <span class="userinput">mkdir crunch_scripts</span>
+~/tutorial$ <span class="userinput">cd crunch_scripts</span></code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/tutorial/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection
+
+<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+
+Next, add the file to the git repository.  This tells @git@ that the file should be included on the next commit.
+
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
+
+Next, commit your changes.  All staged changes are recorded into the local git repository:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git commit -m "my first script"</span>
+</code></pre>
+</notextile>
+
+Finally, upload your changes to the remote repository:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git push origin tutorial_branch</span>
+</code></pre>
+</notextile>
+
+Although this tutorial shows how to add a python script to Arvados, the same steps can be used to add any of your custom bash, R, or python scripts to an Arvados repository.
diff --git a/doc/user/tutorials/intro-crunch.html.textile.liquid b/doc/user/tutorials/intro-crunch.html.textile.liquid
new file mode 100644 (file)
index 0000000..f5577f8
--- /dev/null
@@ -0,0 +1,28 @@
+---
+layout: default
+navsection: userguide
+title: Introduction to Crunch
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados "Crunch" framework is designed to support processing very large data batches (gigabytes to terabytes) efficiently, and provides the following benefits:
+* Increase concurrency by running tasks asynchronously, using many CPUs and network interfaces at once (especially beneficial for CPU-bound and I/O-bound tasks respectively).
+* Track inputs, outputs, and settings so you can verify that the inputs, settings, and sequence of programs you used to arrive at an output is really what you think it was.
+* Ensure that your programs and workflows are repeatable with different versions of your code, OS updates, etc.
+* Interrupt and resume long-running jobs consisting of many short tasks.
+* Maintain timing statistics automatically, so they're there when you want them.
+
+h2. Prerequisites
+
+To get the most value out of this section, you should be comfortable with the following:
+
+# Using a secure shell client such as SSH or PuTTY to log on to a remote server
+# Using the Unix command line shell, Bash
+# Viewing and editing files using a unix text editor such as vi, Emacs, or nano
+# Revision control using Git
+
+We also recommend you read the "Arvados Platform Overview":https://dev.arvados.org/projects/arvados/wiki#Platform-Overview for an introduction and background information about Arvados.
diff --git a/doc/user/tutorials/running-external-program.html.textile.liquid b/doc/user/tutorials/running-external-program.html.textile.liquid
new file mode 100644 (file)
index 0000000..a4e58b8
--- /dev/null
@@ -0,0 +1,85 @@
+---
+layout: default
+navsection: userguide
+title: "Writing a pipeline template"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+This tutorial demonstrates how to construct a two stage pipeline template that uses the "bwa mem":http://bio-bwa.sourceforge.net/ tool to produce a "Sequence Alignment/Map (SAM)":https://samtools.github.io/ file, then uses the "Picard SortSam tool":http://picard.sourceforge.net/command-line-overview.shtml#SortSam to produce a BAM (Binary Alignment/Map) file.
+
+{% include 'tutorial_expectations' %}
+
+Use the following command to create an empty template using @arv create pipeline_template@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv create pipeline_template</span></code></pre>
+</notextile>
+
+This will open the template record in an interactive text editor (as specified by $EDITOR or $VISUAL, otherwise defaults to @nano@).  Now, update the contents of the editor with the following content:
+
+<notextile>{% code 'tutorial_bwa_sortsam_pipeline' as javascript %}</notextile>
+
+* @"name"@ is a human-readable name for the pipeline.
+* @"components"@ is a set of scripts or commands that make up the pipeline.  Each component is given an identifier (@"bwa-mem"@ and @"SortSam"@) in this example).
+** Each entry in components @"components"@ is an Arvados job submission.  For more information about individual jobs, see the "job resource reference.":{{site.baseurl}}/api/methods/jobs.html
+* @"repository"@, @"script_version"@, and @"script"@ indicate that we intend to use the external @"run-command"@ tool wrapper that is part of the Arvados.  These parameters are described in more detail in "Writing a script":tutorial-firstscript.html.
+* @"runtime_constraints"@ describes runtime resource requirements for the component.
+** @"docker_image"@ specifies the "Docker":https://www.docker.com/ runtime environment in which to run the job.  The Docker image @"bcosc/arv-base-java"@ supplied here has the Java runtime environment, bwa, and samtools installed.
+** @"arvados_sdk_version"@ specifies a version of the Arvados SDK to load alongside the job's script. The example uses 'master'. If you would like to use a specific version of the sdk, you can find it in the "Arvados Python sdk repository":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/python under *Latest revisions*.
+* @"script_parameters"@ describes the component parameters.
+** @"command"@ is the actual command line to invoke the @bwa@ and then @SortSam@.  The notation @$()@ denotes macro substitution commands evaluated by the run-command tool wrapper.
+** @"task.stdout"@ indicates that the output of this command should be captured to a file.
+** @$(node.cores)@ evaluates to the number of cores available on the compute node at time the command is run.
+** @$(tmpdir)@ evaluates to the local path for temporary directory the command should use for scratch data.
+** @$(reference_collection)@ evaluates to the script_parameter @"reference_collection"@
+** @$(dir $(...))@ constructs a local path to a directory representing the supplied Arvados collection.
+** @$(file $(...))@ constructs a local path to a given file within the supplied Arvados collection.
+** @$(glob $(...))@ searches the specified path based on a file glob pattern and evalutes to the first result.
+** @$(basename $(...))@ evaluates to the supplied path with leading path portion and trailing filename extensions stripped
+* @"output_of"@ indicates that the @output@ of the @bwa-mem@ component should be used as the @"input"@ script parameter of @SortSam@.  Arvados uses these dependencies between components to automatically determine the correct order to run them.
+
+When using @run-command@, the tool should write its output to the current working directory.  The output will be automatically uploaded to Keep when the job completes.
+
+See the "run-command reference":{{site.baseurl}}/user/topics/run-command.html for more information about using @run-command@.
+
+*Note:* When trying to get job reproducibility without re-computation, you need to set these parameters to their specific hashes. Using a version such as master in @"arvados_sdk_version"@ will grab the latest version hash, which will allow Arvados to re-compute your job if the sdk gets updated.
+* @"arvados_sdk_version"@ : The latest version can be found on the "Arvados Python sdk repository":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/python under *Latest revisions*.
+* @"script_version"@ : The current version of your script in your git repository can be found by using the following command:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git rev-parse HEAD</span></code></pre>
+</notextile>
+
+* @"docker_image"@ : The docker image hash used is found on the "Collection page":https://playground.arvados.org/collections/qr1hi-4zz18-dov6im679g3jr1n as the *Content address*.
+
+h2. Running your pipeline
+
+Your new pipeline template should appear at the top of the Workbench "pipeline&nbsp;templates":{{site.arvados_workbench_host}}/pipeline_templates page.  You can run your pipeline "using Workbench":tutorial-workflow-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+
+Test data is available in the "Arvados Tutorial":{{site.arvados_workbench_host}}/projects/qr1hi-j7d0g-u7zg1qdaowykd8d project:
+
+* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":{{site.arvados_workbench_host}}/collections/2463fa9efeb75e099685528b3b9071e0+438 for the "reference_collection" parameter
+* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":{{site.arvados_workbench_host}}/collections/3229739b505d2b878b62aed09895a55a+142 for the "sample" parameter
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/methods/pipeline_templates.html
+
+h2. Re-using your pipeline run
+
+Arvados allows users to re-use jobs that have the same inputs in order to save computing time and resources. Users are able to change a job downstream without re-computing earlier jobs. This section shows which version control parameters should be tuned to make sure Arvados will not re-compute your jobs.
+
+Note: Job reuse can only happen if all input collections do not change.
+
+* @"arvados_sdk_version"@ : The arvados_sdk_version parameter is used to download the specific version of the Arvados sdk into the docker image. The latest version can be found in the "Arvados Python sdk repository":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/python under *Latest revisions*. Make sure you set this to the same version as the previous run that you are trying to reuse.
+* @"script_version"@ : The script_version is the commit hash of the git branch that the crunch script resides in. This information can be found in your git repository by using the following command:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git rev-parse HEAD</span></code></pre>
+</notextile>
+
+* @"docker_image"@ : This specifies the "Docker":https://www.docker.com/ runtime environment where jobs run their scripts. Docker version control is similar to git, and you can commit and push changes to your images. You must re-use the docker image hash from the previous run to use the same image. It can be found on the "Collection page":https://playground.arvados.org/collections/qr1hi-4zz18-dov6im679g3jr1n as the *Content address* or the *docker_image_locator* in a job's metadata.
diff --git a/doc/user/tutorials/tutorial-firstscript.html.textile.liquid b/doc/user/tutorials/tutorial-firstscript.html.textile.liquid
new file mode 100644 (file)
index 0000000..3937698
--- /dev/null
@@ -0,0 +1,112 @@
+---
+layout: default
+navsection: userguide
+navmenu: Tutorials
+title: "Writing a Crunch script"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+This tutorial demonstrates how to write a script using Arvados Python SDK.  The Arvados SDK supports access to advanced features not available using the @run-command@ wrapper, such as scheduling concurrent tasks across nodes.
+
+{% include 'tutorial_expectations' %}
+
+This tutorial uses @$USER@ to denote your username.  Replace @$USER@ with your user name in all the following examples.
+
+Start by creating a directory called @tutorial@ in your home directory.  Next, create a subdirectory called @crunch_scripts@ and change to that directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span>
+~$ <span class="userinput">mkdir -p tutorial/crunch_scripts</span>
+~$ <span class="userinput">cd tutorial/crunch_scripts</span></code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/tutorial/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection:
+
+<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+
+Next, create a submission job record.  This describes a specific invocation of your script:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">cat &gt;~/the_job &lt;&lt;EOF
+{
+ "repository":"",
+ "script":"hash.py",
+ "script_version":"$HOME/tutorial",
+ "script_parameters":{
+   "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ }
+}
+EOF</span>
+</code></pre>
+</notextile>
+
+You can now run your script on your local workstation or VM using @arv-crunch-job@:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts</span>$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
+2014-08-06_15:16:22 qr1hi-8i9sb-qyrat80ef927lam 14473  check slurm allocation
+2014-08-06_15:16:22 qr1hi-8i9sb-qyrat80ef927lam 14473  node localhost - 1 slots
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  start
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  script hash.py
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  script_version $HOME/tutorial
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  runtime_constraints {"max_tasks_per_node":0}
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  start level 0
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 0 done, 0 running, 1 todo
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 0 job_task qr1hi-ot0gb-lptn85mwkrn9pqo
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 0 child 14478 started on localhost.1
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 0 done, 1 running, 0 todo
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 child 14478 on localhost.1 exit 0 signal 0 success=true
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 success in 1 seconds
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 output
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  wait for last 0 children to finish
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 1 done, 0 running, 1 todo
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  start level 1
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 1 done, 0 running, 1 todo
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 1 job_task qr1hi-ot0gb-e3obm0lv6k6p56a
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 started on localhost.1
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 1 done, 1 running, 0 todo
+2014-08-06_15:16:26 qr1hi-8i9sb-qyrat80ef927lam 14473 1 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 on localhost.1 exit 0 signal 0 success=true
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 success in 10 seconds
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 output 8c20281b9840f624a486e4f1a78a1da8+105+A234be74ceb5ea31db6e11b6be26f3eb76d288ad0@54987018
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  wait for last 0 children to finish
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 2 done, 0 running, 0 todo
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  release job allocation
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  Freeze not implemented
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  collate
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  collated output manifest text to send to API server is 105 bytes with access tokens
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  output hash c1b44b6dc41ef334cf1136033ca950e6+54
+2014-08-06_15:16:37 qr1hi-8i9sb-qyrat80ef927lam 14473  finish
+2014-08-06_15:16:38 qr1hi-8i9sb-qyrat80ef927lam 14473  log manifest is 7fe8cf1d45d438a3ca3ac4a184b7aff4+83
+</code></pre>
+</notextile>
+
+Although the job runs locally, the output of the job has been saved to Keep, the Arvados file store.  The "output hash" line (third from the bottom) provides the portable data hash of the Arvados collection where the script's output has been saved.  Copy the output hash and use @arv-ls@ to list the contents of your output collection, and @arv-get@ to download it to the current directory:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">arv-ls c1b44b6dc41ef334cf1136033ca950e6+54</span>
+./md5sum.txt
+~/tutorial/crunch_scripts$ <span class="userinput">arv-get c1b44b6dc41ef334cf1136033ca950e6+54/ .</span>
+0 MiB / 0 MiB 100.0%
+~/tutorial/crunch_scripts$ <span class="userinput">cat md5sum.txt</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f c1bad4b39ca5a924e481008009d94e32+210/var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+Running locally is convenient for development and debugging, as it permits a fast iterative development cycle.  Your job run is also recorded by Arvados, and will appear in the *Recent jobs and pipelines* panel on the "Workbench Dashboard":{{site.arvados_workbench_host}}.  This provides limited provenance, by recording the input parameters, the execution log, and the output.  However, running locally does not allow you to scale out to multiple nodes, and does not store the complete system snapshot required to achieve reproducibility; to do that you need to "submit a job to the Arvados cluster":{{site.baseurl}}/user/tutorials/tutorial-submit-job.html.
diff --git a/doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid b/doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid
new file mode 100644 (file)
index 0000000..2375e8b
--- /dev/null
@@ -0,0 +1,71 @@
+---
+layout: default
+navsection: userguide
+title: "Keep collection lifecycle"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+During it's lifetime, a keep collection can be in various states. These states are *persisted*, *expiring*, *trashed*  and *permanently deleted*.
+
+A collection is *expiring* when it has a *trash_at* time in the future. An expiring collection can be accessed as normal, but is scheduled to be trashed automatically at the *trash_at* time.
+
+A collection is *trashed* when it has a *trash_at* time in the past. The *is_trashed* attribute will also be "true". The delete operation immediately puts the collection in the trash by setting the *trash_at* time to "now". Once trashed, the collection is no longer readable through normal data access APIs. The collection will have *delete_at* set to some time in the future. The trashed collection is recoverable until the delete_at time passes, at which point the collection is permanently deleted.
+
+# "*Collection lifecycle attributes*":#collection_attributes
+# "*Deleting / trashing collections*":#delete-collection
+# "*Recovering trashed collections*":#trash-recovery
+
+{% include 'tutorial_expectations' %}
+
+h2(#collection_attributes). Collection lifecycle attributes
+
+As listed above the attributes that are used to manage a collection lifecycle are it's *is_trashed*, *trash_at*, and *delete_at*. The table below lists the values of these attributes and how they influence the state of a collection and it's accessibility.
+
+table(table table-bordered table-condensed).
+|_. collection state|_. is_trashed|_. trash_at|_. delete_at|_. get|_. list|_. list?include_trash=true|_. can be modified|
+|persisted collection|false |null |null |yes |yes |yes |yes |
+|expiring collection|false |future |future |yes  |yes |yes |yes |
+|trashed collection|true |past |future |no |no |yes |only is_trashed, trash_at and delete_at attribtues|
+|deleted collection|true|past |past |no |no |no |no |
+
+h2(#delete-collection). Deleting / trashing collections
+
+A collection can be deleted using either the arv command line tool or the workbench.
+
+h3. Trashing a collection using arv command line tool
+
+<pre>
+arv collection delete --uuid=qr1hi-4zz18-xxxxxxxxxxxxxxx
+</pre>
+
+h3. Trashing a collection using workbench
+
+To trash a collection using workbench, go to the Data collections tab in the project, and use the trash icon for this collection row.
+
+h2(#trash-recovery). Recovering trashed collections
+
+A collection can be un-trashed / recovered using either the arv command line tool or the workbench.
+
+h3. Un-trashing a collection using arv command line tool
+
+You can list the trashed collections using the list command.
+
+<pre>
+arv collection list --include-trash=true --filters '[["is_trashed", "=", "true"]]'
+</pre>
+
+You can then untrash a particular collection using arv using it's uuid.
+
+<pre>
+arv collection untrash --uuid=qr1hi-4zz18-xxxxxxxxxxxxxxx
+</pre>
+
+h3. Un-trashing a collection using workbench
+
+To untrash a collection using workbench, go to trash page on workbench by clicking on the "Trash" icon in the top navigation in workbench and use the recycle icon or selection dropdown option.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/trash-button-topnav.png!
diff --git a/doc/user/tutorials/tutorial-keep-get.html.textile.liquid b/doc/user/tutorials/tutorial-keep-get.html.textile.liquid
new file mode 100644 (file)
index 0000000..f206d30
--- /dev/null
@@ -0,0 +1,97 @@
+---
+layout: default
+navsection: userguide
+title: "Downloading data"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados Data collections can be downloaded using either the arv commands or using Workbench.
+
+# "*Downloading using arv commands*":#download-using-arv
+# "*Downloading using Workbench*":#download-using-workbench
+# "*Downloading a shared collection using Workbench*":#download-shared-collection
+
+h2(#download-using-arv). Downloading using arv commands
+
+{% include 'tutorial_expectations' %}
+
+You can download Arvados data collections using the command line tools @arv-ls@ and @arv-get@.
+
+Use @arv-ls@ to view the contents of a collection:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-ls c1bad4b39ca5a924e481008009d94e32+210</span>
+var-GS000016015-ASM.tsv.bz2
+</code></pre>
+
+<pre><code>~$ <span class="userinput">arv-ls 887cd41e9c613463eab2f0d885c6dd96+83</span>
+alice.txt
+bob.txt
+carol.txt
+</code></pre>
+</notextile>
+
+Use @-s@ to print file sizes rounded up to the nearest kilobyte:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-ls -s c1bad4b39ca5a924e481008009d94e32+210</span>
+221887 var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+Use @arv-get@ to download the contents of a collection and place it in the directory specified in the second argument (in this example, @.@ for the current directory):
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-get c1bad4b39ca5a924e481008009d94e32+210/ .</span>
+~$ <span class="userinput">ls var-GS000016015-ASM.tsv.bz2</span>
+var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+You can also download individual files:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-get 887cd41e9c613463eab2f0d885c6dd96+83/alice.txt .</span>
+</code></pre>
+</notextile>
+
+h3. Federated downloads
+
+If your cluster is "configured to be part of a federation":{{site.baseurl}}/admin/federation.html you can also download collections hosted on other clusters (with appropriate permissions).
+
+If you request a collection by portable data hash, it will first search the home cluster, then search federated clusters.
+
+You may also request a collection by UUID.  In this case, it will contact the cluster named in the UUID prefix (in this example, @qr1hi@).
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-get qr1hi-4zz18-fw6dnjxtkvzdewt/ .</span>
+</code></pre>
+</notextile>
+
+h2(#download-using-workbench). Downloading using Workbench
+
+You can also download Arvados data collections using the Workbench.
+
+Visit the Workbench *Dashboard*. Click on *Projects*<span class="caret"></span> dropdown menu in the top navigation menu, select your *Home* project. You will see the *Data collections* tab, which lists the collections in this project.
+
+You can access the contents of a collection by clicking on the *<i class="fa fa-fw fa-archive"></i> Show* button next to the collection. This will take you to the collection's page. Using this page you can see the collection's contents, download individual files, and set sharing options.
+
+You can now download the collection files by clicking on the <span class="btn btn-sm btn-info"><i class="fa fa-download"></i></span> button(s).
+
+h2(#download-shared-collection). Downloading a shared collection using Workbench
+
+Collections can be shared to allow downloads by anonymous users.
+
+To share a collection with anonymous users, visit the collection page using Workbench as described in the above section. Once on this page, click on the <span class="btn btn-sm btn-primary" >Create sharing link</span> button.
+
+This will create a sharing link for the collection as shown below. You can copy the sharing link in this page and share it with other users.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/shared-collection.png!
+
+A user with this url can download this collection by simply accessing this url using browser. It will present a downloadable version of the collection as shown below.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/download-shared-collection.png!
diff --git a/doc/user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid b/doc/user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid
new file mode 100644 (file)
index 0000000..e176021
--- /dev/null
@@ -0,0 +1,61 @@
+---
+layout: default
+navsection: userguide
+title: "Accessing Keep from GNU/Linux"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This tutoral describes how to access Arvados collections on GNU/Linux using traditional filesystem tools by mounting Keep as a file system using @arv-mount@.
+
+{% include 'tutorial_expectations' %}
+
+h2. Arv-mount
+
+@arv-mount@ provides several features:
+
+* You can browse, open and read Keep entries as if they are regular files.
+* It is easy for existing tools to access files in Keep.
+* Data is streamed on demand.  It is not necessary to download an entire file or collection to start processing.
+
+The default mode permits browsing any collection in Arvados as a subdirectory under the mount directory.  To avoid having to fetch a potentially large list of all collections, collection directories only come into existence when explicitly accessed by UUID or portable data hash. For instance, a collection may be found by its content hash in the @keep/by_id@ directory.
+
+<notextile>
+<pre><code>~$ <span class="userinput">mkdir -p keep</span>
+~$ <span class="userinput">arv-mount keep</span>
+~$ <span class="userinput">cd keep/by_id/c1bad4b39ca5a924e481008009d94e32+210</span>
+~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">ls</span>
+var-GS000016015-ASM.tsv.bz2
+~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">md5sum var-GS000016015-ASM.tsv.bz2</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f  var-GS000016015-ASM.tsv.bz2
+~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">cd ../..</span>
+~$ <span class="userinput">fusermount -u keep</span>
+</code></pre>
+</notextile>
+
+The last line unmounts Keep.  Subdirectories will no longer be accessible.
+
+In the top level directory of each collection, arv-mount provides a special file called @.arvados#collection@ that contains a JSON-formatted API record for the collection. This can be used to determine the collection's @portable_data_hash@, @uuid@, etc. This file does not show up in @ls@ or @ls -a@.
+
+h3. Modifying files and directories in Keep
+
+By default, all files in the Keep mount are read only.  However, @arv-mount --read-write@ enables you to perform the following operations using normal Unix command line tools (@touch@, @mv@, @rm@, @mkdir@, @rmdir@) and your own programs using standard POSIX file system APIs:
+
+* Create, update, rename and delete individual files within collections
+* Create and delete subdirectories inside collections
+* Move files and directories within and between collections
+* Create and delete collections within a project (using @mkdir@ and @rmdir@ in a project directory)
+
+Not supported:
+
+* Symlinks, hard links
+* Changing permissions
+* Extended attributes
+* Moving a subdirectory of a collection into a project, or moving a collection from a project into another collection
+
+If multiple clients (separate instances of arv-mount or other arvados applications) modify the same file in the same collection within a short time interval, this may result in a conflict.  In this case, the most recent commit wins, and the "loser" will be renamed to a conflict file in the form @name~YYYYMMDD-HHMMSS~conflict~@.
+
+Please note this feature is in beta testing.  In particular, the conflict mechanism is itself currently subject to race conditions with potential for data loss when a collection is being modified simultaneously by multiple clients.  This issue will be resolved in future development.
diff --git a/doc/user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid b/doc/user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid
new file mode 100644 (file)
index 0000000..a4e0f5e
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: userguide
+title: "Accessing Keep from OS X"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+OS X users can browse Keep read-only via WebDAV. Specific collections can also be accessed read-write via WebDAV.
+
+h3. Browsing Keep (read-only)
+
+In Finder, use "Connect to Server..." under the "Go" menu and enter @https://collections.uuid_prefix.your.domain/@ in popup dialog. When prompted for credentials, put a valid Arvados token in the @Password@ field and anything in the Name field (it will be ignored by Arvados).
+
+This mount is read-only. Write support for the @/users/@ directory is planned for a future release.
+
+h3. Accessing a specific collection in Keep (read-write)
+
+In Finder, use "Connect to Server..." under the "Go" menu and enter @https://collections.uuid_prefix.your.domain/@ in popup dialog. When prompted for credentials, put a valid Arvados token in the @Password@ field and anything in the Name field (it will be ignored by Arvados).
+
+This collection is now accessible read/write.
diff --git a/doc/user/tutorials/tutorial-keep-mount-windows.html.textile.liquid b/doc/user/tutorials/tutorial-keep-mount-windows.html.textile.liquid
new file mode 100644 (file)
index 0000000..4384cd0
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: userguide
+title: "Accessing Keep from Windows"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Windows users can browse Keep read-only via WebDAV. Specific collections can also be accessed read-write via WebDAV.
+
+h3. Browsing Keep (read-only)
+
+Use the 'Map network drive' functionality, and enter @https://collections.uuid_prefix.your.domain/@ in the Folder field. When prompted for credentials, you can fill in an arbitrary string for @Username@, it is ignored by Arvados. Windows will not accept an empty @Username@. Put a valid Arvados token in the @Password@ field.
+
+This mount is read-only. Write support for the @/users/@ directory is planned for a future release.
+
+h3. Accessing a specific collection in Keep (read-write)
+
+Use the 'Map network drive' functionality, and enter @https://collections.uuid_prefix.your.domain/c=your-collection-uuid@ in the Folder field. When prompted for credentials, you can fill in an arbitrary string for @Username@, it is ignored by Arvados. Windows will not accept an empty @Username@. Put a valid token in the @Password@ field.
+
+This collection is now accessible read/write.
diff --git a/doc/user/tutorials/tutorial-keep.html.textile.liquid b/doc/user/tutorials/tutorial-keep.html.textile.liquid
new file mode 100644 (file)
index 0000000..53cdfe4
--- /dev/null
@@ -0,0 +1,85 @@
+---
+layout: default
+navsection: userguide
+title: "Uploading data"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados Data collections can be uploaded using either the @arv keep put@ command line tool or using Workbench.
+
+# "*Upload using command line tool*":#upload-using-command
+# "*Upload using Workbench*":#upload-using-workbench
+
+notextile. <div class="spaced-out">
+
+h2(#upload-using-command). Upload using command line tool
+
+{% include 'tutorial_expectations' %}
+
+To upload a file to Keep using @arv keep put@:
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep put var-GS000016015-ASM.tsv.bz2</span>
+216M / 216M 100.0%
+Collection saved as ...
+qr1hi-4zz18-xxxxxxxxxxxxxxx
+</code></pre>
+</notextile>
+
+
+The output value @qr1hi-4zz18-xxxxxxxxxxxxxxx@ is the uuid of the Arvados collection created.
+
+Note: The file used in this example is a freely available TSV file containing variant annotations from the "Personal Genome Project (PGP)":http://www.pgp-hms.org participant "hu599905":https://my.pgp-hms.org/profile/hu599905), downloadable "here":https://warehouse.pgp-hms.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2. Alternatively, you can replace @var-GS000016015-ASM.tsv.bz2@ with the name of any file you have locally, or you could get the TSV file by "downloading it from Keep.":{{site.baseurl}}/user/tutorials/tutorial-keep-get.html
+
+<notextile><a name="dir"></a></notextile>It is also possible to upload an entire directory with @arv keep put@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">mkdir tmp</span>
+~$ <span class="userinput">echo "hello alice" > tmp/alice.txt</span>
+~$ <span class="userinput">echo "hello bob" > tmp/bob.txt</span>
+~$ <span class="userinput">echo "hello carol" > tmp/carol.txt</span>
+~$ <span class="userinput">arv keep put tmp</span>
+0M / 0M 100.0%
+Collection saved as ...
+qr1hi-4zz18-yyyyyyyyyyyyyyy
+</code></pre>
+</notextile>
+
+In both examples, the @arv keep put@ command created a collection. The first collection contains the single uploaded file. The second collection contains the entire uploaded directory.
+
+@arv keep put@ accepts quite a few optional command line arguments, which are described on the "arv subcommands":{{site.baseurl}}/sdk/cli/subcommands.html#arv-keep-put page.
+
+h3. Locate your collection in Workbench
+
+Visit the Workbench *Dashboard*.  Click on *Projects*<span class="caret"></span> dropdown menu in the top navigation menu, select your *Home* project.  Your newly uploaded collection should appear near the top of the *Data collections* tab.  The collection name printed by @arv keep put@ will appear under the *name* column.
+
+To move the collection to a different project, check the box at the left of the collection row.  Pull down the *Selection...*<span class="caret"></span> menu near the top of the page tab, and select *Move selected...* button. This will open a dialog box where you can select a destination project for the collection.  Click a project, then finally the <span class="btn btn-sm btn-primary">Move</span> button.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/workbench-move-selected.png!
+
+Click on the *<i class="fa fa-fw fa-archive"></i> Show* button next to the collection's listing on a project page to go to the Workbench page for your collection.  On this page, you can see the collection's contents, download individual files, and set sharing options.
+
+notextile. </div>
+
+h2(#upload-using-workbench). Upload using Workbench
+
+To upload using Workbench, visit the Workbench *Dashboard*. Click on *Projects*<span class="caret"></span> dropdown menu in the top navigation menu and select your *Home* project or any other project of your choosing.  You will see the *Data collections* tab for this project, which lists the collections in this project.
+
+To upload files into a new collection, click on *Add data*<span class="caret"></span> dropdown menu and select *Upload files from my computer*.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/upload-using-workbench.png!
+
+<br/>This will create a new empty collection in your chosen project and will take you to the *Upload* tab for that collection.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/upload-tab-in-new-collection.png!
+
+Click on the *Browse...* button and select the files you would like to upload. Selected files will be added to a list of files to be uploaded. After you are done selecting files to upload, click on the *<i class="fa fa-fw fa-play"></i> Start* button to start upload. This will start uploading files to Arvados and Workbench will show you the progress bar. When upload is completed, you will see an indication to that effect.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/files-uploaded.png!
+
+*Note:* If you leave the collection page during the upload, the upload process will be aborted and you will need to upload the files again.
+
+*Note:* You can also use the Upload tab to add additional files to an existing collection.
diff --git a/doc/user/tutorials/tutorial-submit-job.html.textile.liquid b/doc/user/tutorials/tutorial-submit-job.html.textile.liquid
new file mode 100644 (file)
index 0000000..ff78aab
--- /dev/null
@@ -0,0 +1,95 @@
+---
+layout: default
+navsection: userguide
+navmenu: Tutorials
+title: "Running on an Arvados cluster"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'pipeline_deprecation_notice' %}
+
+This tutorial demonstrates how to create a pipeline to run your crunch script on an Arvados cluster.  Cluster jobs can scale out to multiple nodes, and use @git@ and @docker@ to store the complete system snapshot required to achieve reproducibilty.
+
+{% include 'tutorial_expectations' %}
+
+This tutorial uses @$USER@ to denote your username.  Replace @$USER@ with your user name in all the following examples.
+
+Also, this tutorial uses the @tutorial@ arvados repository created in "Adding a new arvados repository":add-new-repository.html as the example repository.
+
+h2. Clone Arvados repository
+
+Please clone the *tutorial* repository using the instructions from "Working with Arvados git repository":git-arvados-guide.html, if you have not yet cloned already.
+
+h2. Creating a Crunch script
+
+Start by entering the @tutorial@ directory created by @git clone@. Next, create a subdirectory called @crunch_scripts@ and change to that directory:
+
+<notextile>
+<pre><code>>~$ <span class="userinput">cd tutorial</span>
+~/tutorial$ <span class="userinput">mkdir crunch_scripts</span>
+~/tutorial$ <span class="userinput">cd crunch_scripts</span></code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/tutorial/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection (if you already completed "Writing a Crunch script":tutorial-firstscript.html you can just copy the @hash.py@ file you created previously.)
+
+<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+
+Next, add the file to the staging area.  This tells @git@ that the file should be included on the next commit.
+
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
+
+Next, commit your changes.  All staged changes are recorded into the local git repository:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git commit -m "my first script"</span>
+[master (root-commit) 27fd88b] my first script
+ 1 file changed, 45 insertions(+)
+ create mode 100755 crunch_scripts/hash.py</code></pre>
+</notextile>
+
+Finally, upload your changes to the Arvados server:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git push origin master</span>
+Counting objects: 4, done.
+Compressing objects: 100% (2/2), done.
+Writing objects: 100% (4/4), 682 bytes, done.
+Total 4 (delta 0), reused 0 (delta 0)
+To git@git.qr1hi.arvadosapi.com:$USER/tutorial.git
+ * [new branch]      master -> master</code></pre>
+</notextile>
+
+h2. Create a pipeline template
+
+Next, create a new template using @arv create pipeline_template@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv create pipeline_template</span></code></pre>
+</notextile>
+
+In the editor, enter the following template:
+
+<notextile> {% code 'tutorial_submit_job' as javascript %} </notextile>
+
+* @"repository"@ is the name of a git repository to search for the script version.  You can access a list of available git repositories on the Arvados Workbench in the *Repositories* page using the <span class="fa fa-lg fa-user"></span> <span class="caret"></span> top navigation menu icon.
+* @"script_version"@ specifies the version of the script that you wish to run.  This can be in the form of an explicit Git revision hash, a tag, or a branch (in which case it will use the HEAD of the specified branch).  Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the filename of the script to run.  Crunch expects to find this in the @crunch_scripts/@ subdirectory of the Git repository.
+* @"runtime_constraints"@ describes the runtime environment required to run the job.  These are described in the "job record schema":{{site.baseurl}}/api/methods/jobs.html
+
+h2. Running your pipeline
+
+Your new pipeline template should appear at the top of the Workbench "pipeline&nbsp;templates":{{site.arvados_workbench_host}}/pipeline_templates page.  You can run your pipeline "using Workbench":tutorial-workflow-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/methods/pipeline_templates.html
diff --git a/doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid b/doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid
new file mode 100644 (file)
index 0000000..8dcb8e6
--- /dev/null
@@ -0,0 +1,36 @@
+---
+layout: default
+navsection: userguide
+title: "Running a workflow using Workbench"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+A "workflow" (sometimes called a "pipeline" in other systems) is a sequence of steps that apply various programs or tools to transform input data to output data.  Workflows are the principal means of performing computation with Arvados.  This tutorial demonstrates how to run a single-stage workflow to take a small data set of paired-end reads from a sample "exome":https://en.wikipedia.org/wiki/Exome in "FASTQ":https://en.wikipedia.org/wiki/FASTQ_format format and align them to "Chromosome 19":https://en.wikipedia.org/wiki/Chromosome_19_%28human%29 using the "bwa mem":http://bio-bwa.sourceforge.net/ tool, producing a "Sequence Alignment/Map (SAM)":https://samtools.github.io/ file.  This tutorial will introduce the following Arvados features:
+
+<div>
+* How to create a new process from an existing workflow.
+* How to browse and select input data for the workflow and submit the process to run on the Arvados cluster.
+* How to access your process results.
+</div>
+
+h3. Steps
+
+notextile. <div class="spaced-out">
+
+# Start from the *Workbench Dashboard*.  You can access the Dashboard by clicking on *<i class="fa fa-lg fa-fw fa-dashboard"></i> Dashboard* in the upper left corner of any Workbench page.
+# Click on the <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a process...</span> button.  This will open a dialog box titled *Choose a pipeline or workflow to run*.
+# In the search box, type in *Tutorial bwa mem cwl*.
+# Select *<i class="fa fa-fw fa-gear"></i> Tutorial bwa mem cwl* and click the <span class="btn btn-sm btn-primary" >Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i></span> button.  This will create a new process in your *Home* project and will open it. You can now supply the inputs for the process. Please note that all required inputs are populated with default values and you can change them if you prefer.
+# For example, let's see how to change *"reference" parameter* for this workflow. Click the <span class="btn btn-sm btn-primary">Choose</span> button beneath the *"reference" parameter* header.  This will open a dialog box titled *Choose a dataset for "reference" parameter for cwl-runner in bwa-mem.cwl component*.
+# Open the *Home <span class="caret"></span>* menu and select *All Projects*. Search for and select *<i class="fa fa-fw fa-archive"></i> Tutorial chromosome 19 reference*. You will then see a list of files. Select *<i class="fa fa-fw fa-file"></i> 19-fasta.bwt* and click the <span class="btn btn-sm btn-primary" >OK</span> button.
+# Repeat the previous two steps to set the *"read_p1" parameter for cwl-runner script in bwa-mem.cwl component* and *"read_p2" parameter for cwl-runner script in bwa-mem.cwl component* parameters.
+# Click on the <span class="btn btn-sm btn-primary" >Run <i class="fa fa-fw fa-play"></i></span> button.  The page updates to show you that the process has been submitted to run on the Arvados cluster.
+# After the process starts running, you can track the progress by watching log messages from the component(s).  This page refreshes automatically.  You will see a <span class="label label-success">complete</span> label when the process completes successfully.
+# Click on the *Output* link to see the results of the process.  This will load a new page listing the output files from this process.  You'll see the output SAM file from the alignment tool under the *Files* tab.
+# Click on the <span class="btn btn-sm btn-info"><i class="fa fa-download"></i></span> download button to the right of the SAM file to download your results.
+
+notextile. </div>
diff --git a/doc/user/tutorials/writing-cwl-workflow.html.textile.liquid b/doc/user/tutorials/writing-cwl-workflow.html.textile.liquid
new file mode 100644 (file)
index 0000000..2f1f80c
--- /dev/null
@@ -0,0 +1,205 @@
+---
+layout: default
+navsection: userguide
+title: "Writing a CWL workflow"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'what_is_cwl' %}
+
+{% include 'tutorial_expectations' %}
+
+h2. Developing workflows
+
+For an introduction and and detailed documentation about writing CWL, see the "CWL User Guide":https://www.commonwl.org/user_guide and the "CWL Specification":http://commonwl.org/v1.0 .
+
+See "Best Practices for writing CWL":{{site.baseurl}}/user/cwl/cwl-style.html and "Arvados CWL Extensions":{{site.baseurl}}/user/cwl/cwl-extensions.html for additional information about using CWL on Arvados.
+
+h2. Using Composer
+
+You can create new workflows in the browser using "Arvados Composer":{{site.baseurl}}/user/composer/composer.html
+
+h2. Registering a workflow to use in Workbench
+
+Use @--create-workflow@ to register a CWL workflow with Arvados.  This enables you to share workflows with other Arvados users, and run them by clicking the <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a process...</span> button on the Workbench Dashboard and on the command line by UUID.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --create-workflow bwa-mem.cwl</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Upload local files: "bwa-mem.cwl"
+2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Uploaded to qr1hi-4zz18-7e0hedrmkuyoei3
+2016-07-01 12:21:01 arvados.cwl-runner[15796] INFO: Created template qr1hi-p5p6p-rjleou1dwr167v5
+qr1hi-p5p6p-rjleou1dwr167v5
+</code></pre>
+</notextile>
+
+You can provide a partial input file to set default values for the workflow input parameters.  You can also use the @--name@ option to set the name of the workflow:
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner --name "My workflow with defaults" --create-workflow bwa-mem.cwl bwa-mem-template.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-07-01 14:09:50 arvados.arv-run[3730] INFO: Upload local files: "bwa-mem.cwl"
+2016-07-01 14:09:50 arvados.arv-run[3730] INFO: Uploaded to qr1hi-4zz18-0f91qkovk4ml18o
+2016-07-01 14:09:50 arvados.cwl-runner[3730] INFO: Created template qr1hi-p5p6p-0deqe6nuuyqns2i
+qr1hi-p5p6p-zuniv58hn8d0qd8
+</code></pre>
+</notextile>
+
+h3. Running registered workflows at the command line
+
+You can run a registered workflow at the command line by its UUID:
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arvados-cwl-runner qr1hi-p5p6p-zuniv58hn8d0qd8 --help</span>
+/home/peter/work/scripts/venv/bin/arvados-cwl-runner 0d62edcb9d25bf4dcdb20d8872ea7b438e12fc59 1.0.20161209192028, arvados-python-client 0.1.20161212125425, cwltool 1.0.20161207161158
+Resolved 'qr1hi-p5p6p-zuniv58hn8d0qd8' to 'keep:655c6cd07550151b210961ed1d3852cf+57/bwa-mem.cwl'
+usage: qr1hi-p5p6p-zuniv58hn8d0qd8 [-h] [--PL PL] --group_id GROUP_ID
+                                   --read_p1 READ_P1 [--read_p2 READ_P2]
+                                   [--reference REFERENCE] --sample_id
+                                   SAMPLE_ID
+                                   [job_order]
+
+positional arguments:
+  job_order             Job input json file
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --PL PL
+  --group_id GROUP_ID
+  --read_p1 READ_P1     The reads, in fastq format.
+  --read_p2 READ_P2     For mate paired reads, the second file (optional).
+  --reference REFERENCE
+                        The index files produced by `bwa index`
+  --sample_id SAMPLE_ID
+</code></pre>
+</notextile>
+
+h2. Using cwltool
+
+When developing a workflow, it is often helpful to run it on the local host to avoid the overhead of submitting to the cluster.  To execute a workflow only on the local host (without submitting jobs to an Arvados cluster) you can use the @cwltool@ command.  Note that when using @cwltool@ you must have the input data accessible on the local file system using either @arv-mount@ or @arv-get@ to fetch the data from Keep.
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arv-get 2463fa9efeb75e099685528b3b9071e0+438/ .</span>
+156 MiB / 156 MiB 100.0%
+~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">arv-get ae480c5099b81e17267b7445e35b4bc7+180/ .</span>
+23 MiB / 23 MiB 100.0%
+~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">cwltool bwa-mem-input.yml bwa-mem-input-local.yml</span>
+cwltool 1.0.20160629140624
+[job bwa-mem.cwl] /home/example/arvados/doc/user/cwl/bwa-mem$ docker \
+    run \
+    -i \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.ann:/var/lib/cwl/job979368791_bwa-mem/19.fasta.ann:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq:/var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.sa:/var/lib/cwl/job979368791_bwa-mem/19.fasta.sa:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.amb:/var/lib/cwl/job979368791_bwa-mem/19.fasta.amb:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.pac:/var/lib/cwl/job979368791_bwa-mem/19.fasta.pac:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq:/var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.bwt:/var/lib/cwl/job979368791_bwa-mem/19.fasta.bwt:ro \
+    --volume=/home/example/arvados/doc/user/cwl/bwa-mem:/var/spool/cwl:rw \
+    --volume=/tmp/tmpgzyou9:/tmp:rw \
+    --workdir=/var/spool/cwl \
+    --read-only=true \
+    --log-driver=none \
+    --user=1001 \
+    --rm \
+    --env=TMPDIR=/tmp \
+    --env=HOME=/var/spool/cwl \
+    biodckr/bwa \
+    bwa \
+    mem \
+    -t \
+    1 \
+    -R \
+    '@RG       ID:arvados_tutorial     PL:illumina     SM:HWI-ST1027_129' \
+    /var/lib/cwl/job979368791_bwa-mem/19.fasta \
+    /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq \
+    /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq > /home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.sam
+[M::bwa_idx_load_from_disk] read 0 ALT contigs
+[M::process] read 100000 sequences (10000000 bp)...
+[M::mem_pestat] # candidate unique pairs for (FF, FR, RF, RR): (0, 4745, 1, 0)
+[M::mem_pestat] skip orientation FF as there are not enough pairs
+[M::mem_pestat] analyzing insert size distribution for orientation FR...
+[M::mem_pestat] (25, 50, 75) percentile: (154, 181, 214)
+[M::mem_pestat] low and high boundaries for computing mean and std.dev: (34, 334)
+[M::mem_pestat] mean and std.dev: (185.63, 44.88)
+[M::mem_pestat] low and high boundaries for proper pairs: (1, 394)
+[M::mem_pestat] skip orientation RF as there are not enough pairs
+[M::mem_pestat] skip orientation RR as there are not enough pairs
+[M::mem_process_seqs] Processed 100000 reads in 9.848 CPU sec, 9.864 real sec
+[main] Version: 0.7.12-r1039
+[main] CMD: bwa mem -t 1 -R @RG        ID:arvados_tutorial     PL:illumina     SM:HWI-ST1027_129 /var/lib/cwl/job979368791_bwa-mem/19.fasta /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq
+[main] Real time: 10.061 sec; CPU: 10.032 sec
+Final process status is success
+{
+    "aligned_sam": {
+        "size": 30738959,
+        "path": "/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0c668cca45fef02397bb5302880526d300ee4dac",
+        "class": "File"
+    }
+}
+</code></pre>
+</notextile>
+
+If you get the error @JavascriptException: Long-running script killed after 20 seconds.@ this may be due to the Dockerized Node.js engine taking too long to start.  You may address this by installing Node.js locally (run @apt-get install nodejs@ on Debian or Ubuntu) or by specifying a longer timeout with the @--eval-timeout@ option.  For example, run the workflow with @cwltool --eval-timeout=40@ for a 40-second timeout.
+
+h2. Making workflows directly executable
+
+You can make a workflow file directly executable (@cwl-runner@ should be an alias to @arvados-cwl-runner@) by adding the following line to the top of the file:
+
+<notextile>
+<pre><code>#!/usr/bin/env cwl-runner
+</code></pre>
+</notextile>
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">./bwa-mem.cwl bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
+
+You can even make an input file directly executable the same way with the following two lines at the top:
+
+<notextile>
+<pre><code>#!/usr/bin/env cwl-runner
+cwl:tool: <span class="userinput">bwa-mem.cwl</span>
+</code></pre>
+</notextile>
+
+<notextile>
+<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class="userinput">./bwa-mem-input.yml</span>
+arvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: "bwa-mem.cwl"
+2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to qr1hi-4zz18-h7ljh5u76760ww2
+2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job qr1hi-8i9sb-fm2n3b1w0l6bskg
+2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Running
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (qr1hi-8i9sb-fm2n3b1w0l6bskg) is Complete
+2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success
+{
+    "aligned_sam": {
+        "path": "keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam",
+        "checksum": "sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a",
+        "class": "File",
+        "size": 30738986
+    }
+}
+</code></pre>
+</notextile>
diff --git a/doc/zenweb-liquid.rb b/doc/zenweb-liquid.rb
new file mode 100644 (file)
index 0000000..baa8fe4
--- /dev/null
@@ -0,0 +1,110 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+require 'zenweb'
+require 'liquid'
+
+module ZenwebLiquid
+  VERSION = '0.0.1'
+end
+
+module Zenweb
+
+  class Page
+
+    def render_liquid page, content
+      liquid self.body, content, page, binding
+    end
+
+    ##
+    # Render a page's liquid and return the intermediate result
+    def liquid template, content, page, binding = TOPLEVEL_BINDING
+      Liquid::Template.file_system = Liquid::LocalFileSystem.new(File.join(File.dirname(Rake.application().rakefile), "_includes"))
+      unless defined? @liquid_template
+        @liquid_template = Liquid::Template.parse(template)
+      end
+
+      vars = {}
+      vars["content"] = content
+
+      vars["site"] = site.config.h.clone
+      pages = {}
+      site.pages.each do |f, p|
+        pages[f] = p.config.h.clone
+        pages[f]["url"] = p.url
+      end
+      vars["site"]["pages"] = pages
+
+      vars["page"] = page.config.h.clone
+      vars["page"]["url"] = page.url
+
+      @liquid_template.render(vars)
+    end
+  end
+
+  class LiquidCode < Liquid::Include
+    Syntax = /(#{Liquid::QuotedFragment}+)(\s+(?:as)\s+(#{Liquid::QuotedFragment}+))?/o
+
+    def initialize(tag_name, markup, tokens)
+      Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)
+
+      if markup =~ Syntax
+        @template_name = $1
+        @language = $3
+        @attributes    = {}
+      else
+        raise SyntaxError.new("Error in tag 'code' - Valid syntax: include '[code_file]' as '[language]'")
+      end
+    end
+
+    def render(context)
+      require 'coderay'
+
+      partial = load_cached_partial(context)
+      html = ''
+
+      context.stack do
+        html = CodeRay.scan(partial.root.nodelist.join, @language).div
+      end
+
+      html
+    end
+
+    Liquid::Template.register_tag('code', LiquidCode)
+  end
+
+  class LiquidCodeBlock < Liquid::Block
+    Syntax = /((?:as)\s+(#{Liquid::QuotedFragment}+))?/o
+
+    def initialize(tag_name, markup, tokens)
+      Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)
+
+      if markup =~ Syntax
+        @language = $2
+        @attributes = {}
+      else
+        raise SyntaxError.new("Error in tag 'code' - Valid syntax: codeblock as '[language]'")
+      end
+    end
+
+    def render(context)
+      require 'coderay'
+
+      partial = super
+      html = ''
+
+      if partial[0] == '\n'
+        partial = partial[1..-1]
+      end
+
+      context.stack do
+        html = CodeRay.scan(partial, @language).div
+      end
+
+      "<notextile>#{html}</notextile>"
+    end
+
+    Liquid::Template.register_tag('codeblock', LiquidCodeBlock)
+  end
+end
diff --git a/doc/zenweb-textile.rb b/doc/zenweb-textile.rb
new file mode 100644 (file)
index 0000000..0b28a61
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+require 'zenweb'
+
+module ZenwebTextile
+  VERSION = '0.0.1'
+end
+
+module Zenweb
+  class Page
+    
+    ##
+    # Render a page's textile and return the resulting html
+    def render_textile page, content
+      require 'RedCloth'
+      RedCloth.new(content ? content : self.body).to_html
+    end
+  end
+end
diff --git a/docker/jobs/1078ECD7.key b/docker/jobs/1078ECD7.key
new file mode 100644 (file)
index 0000000..edc62f4
--- /dev/null
@@ -0,0 +1,30 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBEzhgeoBCAChhoK1dqpWzNyDWqRGEvdFdkJaA9D2HRwKPfBfjAoePX6ZyrpA
+ItlUsvt/8s/DRiTiPEFQR4S7VqocmU6whJc3gDEGyOM6b1NF873lIfSVwUoE42QE
+a76dO8woOYgLUyxu2mKG+bJgGMumjBJt6ZOndYVjTYB/7sEeVxwmMVulfZe0s6zg
+ut0+SoTYg2R36qIqeIcWllYt97sEYnyy1qXMis4/3IZnuWkS/frsPR3aeUI4W+o2
+NDN1kj49+LMe7Fb5b7jZY08rZbAWXi1rU1hQx4jC9RvYqlT4HNld4Bn7os1IvOOA
+wNiR0oiVdiuDbBxcMvRPktxMrFVjowusRLq/ABEBAAG0PUN1cm92ZXJzZSwgSW5j
+IEF1dG9tYXRpYyBTaWduaW5nIEtleSA8c3lzYWRtaW5AY3Vyb3ZlcnNlLmNvbT6J
+ATgEEwECACIFAlNgYIECGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFcW
+WREQeOzXPkEH/jQJDIYI1dxWcYiA+hczmpaZvN2/pc/kwIW/6a03+6zqmSNkebOE
+TgoDILacSYc17hy20R1/rWyUstOMKcEgFDBlSehhHyl0f7q/w7d8Ais6MabzsPfx
+IceJpsjUg87+BR7qWhgQ0sxmtIF2TKuTFLs+nkGsgSsiBOEF4NvHxuj3HD4y8F27
+HNqrkqwjLS8xJwwH5Gp2uMEVr1AXIH3iSRjJ8X124s8iEP97Q/3IazoYRf9/MCSm
+QEx8KzxwDX6t4bW6O4D01K+e9gdkTY70dcMgJoqm5IsX7yxjEubiOunphtlJnZ9d
+Oi1yBN5UM3pWKAdcfRj4rcfV9Simvpx9av+5AQ0ETOGB6gEIAMAA0HVMG0BbdnU7
+wWgl5eFdT0AUSrXK/WdcKqVEGGv+c68NETSHWZOJX7O46Eao4gY4cTYprVMBzxpY
+/BtQSYLpE0HLvBc1fcFd61Yz4H/9rGSNY0GcIQEbOjbJY5mr8qFsQ1K/mAf3aUL3
+b6ni4sHVicRiRr0Gl4Ihorlskpfu1SHs/C5tvTSVNF9p4vtl5892y1yILQeVpcBs
+NCR7MUpdS49xCpvnAWsDZX+ij6LTR3lzCm/ZLCg4gNuZkjgU9oqVfGkqysW7WZ8S
+OLvzAwUw7i1EIFX8q6QdudGoezxz8m8OgZM1v8AFpYEKlhEPf1W0MSfaRDwrj866
+8nCLruEAEQEAAYkBHwQYAQIACQUCTOGB6gIbDAAKCRBXFlkREHjs199EB/4+p0G1
+3PHxt6rLWSCGXobDOu4ZOA/qnv0D/JhOLroFds5TzQv6vnS8eAkhCTjHVA+b58cm
+kXpI0oYcD4ZP+KK1CHKq2rGfwou7HfAF+icnNqYkeBOkjjbCgkvBlcCInuAuU8JX
+DZMkfFk52+eBKwTjS/J/fQp0vDru8bHLp98WgdRHWfJQ3mc3gz4A5sR6zhrGPW6/
+ssnROS4dC2Ohp35GpgN1KjD3EmEw5RoSBYlyrARCaMsivgIKMxGUEyFZWhuJt3N1
+2MTddRwz28hbmYCi+MzHYDbRv+cSyUDmvXaWhfkNKBepClBA1rTWBcldit5vvlqr
+yPet6wIKrtLGhAqZ
+=CLkG
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/docker/jobs/Dockerfile b/docker/jobs/Dockerfile
new file mode 100644 (file)
index 0000000..079276e
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Based on Debian Stretch
+FROM debian:stretch-slim
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+RUN apt-get update -q
+RUN apt-get install -yq --no-install-recommends gnupg
+
+ARG repo_version
+RUN echo repo_version $repo_version
+ADD apt.arvados.org-$repo_version.list /etc/apt/sources.list.d/
+
+ADD 1078ECD7.key /tmp/
+RUN cat /tmp/1078ECD7.key | apt-key add -
+
+ARG python_sdk_version
+ARG cwl_runner_version
+RUN echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
+
+RUN apt-get update -q
+RUN apt-get install -yq --no-install-recommends nodejs \
+    python-arvados-python-client=$python_sdk_version \
+    python-arvados-cwl-runner=$cwl_runner_version
+
+# use the Python executable from the python-arvados-cwl-runner package
+RUN rm -f /usr/bin/python && ln -s /usr/share/python2.7/dist/python-arvados-cwl-runner/bin/python /usr/bin/python
+
+# Install dependencies and set up system.
+RUN /usr/sbin/adduser --disabled-password \
+      --gecos 'Crunch execution user' crunch && \
+    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
+
+USER crunch
diff --git a/docker/jobs/apt.arvados.org-dev.list b/docker/jobs/apt.arvados.org-dev.list
new file mode 100644 (file)
index 0000000..468000e
--- /dev/null
@@ -0,0 +1,2 @@
+# apt.arvados.org
+deb http://apt.arvados.org/ stretch-dev main
diff --git a/docker/jobs/apt.arvados.org-stable.list b/docker/jobs/apt.arvados.org-stable.list
new file mode 100644 (file)
index 0000000..afbc51e
--- /dev/null
@@ -0,0 +1,2 @@
+# apt.arvados.org
+deb http://apt.arvados.org/ stretch main
diff --git a/docker/jobs/apt.arvados.org-testing.list b/docker/jobs/apt.arvados.org-testing.list
new file mode 100644 (file)
index 0000000..c8ea91d
--- /dev/null
@@ -0,0 +1,2 @@
+# apt.arvados.org
+deb http://apt.arvados.org/ stretch-testing main
diff --git a/docker/migrate-docker19/Dockerfile b/docker/migrate-docker19/Dockerfile
new file mode 100644 (file)
index 0000000..a515ec4
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+FROM debian:8
+
+ENV DEBIAN_FRONTEND noninteractive
+
+RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7 && \
+    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    apt-key adv --keyserver hkp://pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D || \
+    apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+
+VOLUME /var/lib/docker
+
+RUN mkdir -p /etc/apt/sources.list.d && \
+    echo deb http://apt.arvados.org/ jessie main > /etc/apt/sources.list.d/apt.arvados.org.list && \
+    apt-get clean && \
+    apt-get update && \
+    apt-get install -yq --no-install-recommends -o Acquire::Retries=6 \
+        git curl python-arvados-python-client apt-transport-https ca-certificates && \
+    apt-get clean
+
+RUN echo deb https://apt.dockerproject.org/repo debian-jessie main > /etc/apt/sources.list.d/docker.list && \
+    apt-get update && \
+    apt-get install -yq --no-install-recommends -o Acquire::Retries=6 \
+        docker-engine=1.9.1-0~jessie && \
+    apt-get clean
+
+RUN mkdir /root/pkgs && \
+    cd /root/pkgs && \
+    curl -L -O https://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.13.1-0~debian-jessie_amd64.deb && \
+    curl -L -O http://httpredir.debian.org/debian/pool/main/libt/libtool/libltdl7_2.4.2-1.11+b1_amd64.deb
+
+ADD migrate.sh dnd.sh /root/
diff --git a/docker/migrate-docker19/build.sh b/docker/migrate-docker19/build.sh
new file mode 100755 (executable)
index 0000000..5d76ec7
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+exec docker build -t arvados/migrate-docker19:1.0 .
diff --git a/docker/migrate-docker19/dnd.sh b/docker/migrate-docker19/dnd.sh
new file mode 100755 (executable)
index 0000000..703a124
--- /dev/null
@@ -0,0 +1,102 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Taken from https://github.com/jpetazzo/dind
+
+exec 2>&1
+
+# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
+dmsetup mknodes
+
+: {LOG:=stdio}
+
+# First, make sure that cgroups are mounted correctly.
+CGROUP=/sys/fs/cgroup
+[ -d $CGROUP ] || mkdir $CGROUP
+
+if mountpoint -q $CGROUP ; then
+    true
+else
+    mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP
+fi
+
+if ! mountpoint -q $CGROUP ; then
+    echo "Could not find or mount cgroups. Tried /sys/fs/cgroup and /cgroup.  Did you use --privileged?"
+    exit 1
+fi
+
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
+then
+    mount -t securityfs none /sys/kernel/security || {
+        echo "Could not mount /sys/kernel/security."
+        echo "AppArmor detection and --privileged mode might break."
+    }
+fi
+
+# Mount the cgroup hierarchies exactly as they are in the parent system.
+for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
+do
+        [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
+        mountpoint -q $CGROUP/$SUBSYS ||
+                mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
+
+        # The two following sections address a bug which manifests itself
+        # by a cryptic "lxc-start: no ns_cgroup option specified" when
+        # trying to start containers withina container.
+        # The bug seems to appear when the cgroup hierarchies are not
+        # mounted on the exact same directories in the host, and in the
+        # container.
+
+        # Named, control-less cgroups are mounted with "-o name=foo"
+        # (and appear as such under /proc/<pid>/cgroup) but are usually
+        # mounted on a directory named "foo" (without the "name=" prefix).
+        # Systemd and OpenRC (and possibly others) both create such a
+        # cgroup. To avoid the aforementioned bug, we symlink "foo" to
+        # "name=foo". This shouldn't have any adverse effect.
+        #echo $SUBSYS | grep -q ^name= && {
+        #        NAME=$(echo $SUBSYS | sed s/^name=//)
+        #        ln -s $SUBSYS $CGROUP/$NAME
+        #}
+
+        # Likewise, on at least one system, it has been reported that
+        # systemd would mount the CPU and CPU accounting controllers
+        # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
+        # but on a directory called "cpu,cpuacct" (note the inversion
+        # in the order of the groups). This tries to work around it.
+        [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
+done
+
+# Note: as I write those lines, the LXC userland tools cannot setup
+# a "sub-container" properly if the "devices" cgroup is not in its
+# own hierarchy. Let's detect this and issue a warning.
+grep -q :devices: /proc/1/cgroup ||
+       echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
+grep -qw devices /proc/1/cgroup ||
+       echo "WARNING: it looks like the 'devices' cgroup is not mounted."
+
+# Now, close extraneous file descriptors.
+pushd /proc/self/fd >/dev/null
+for FD in *
+do
+       case "$FD" in
+       # Keep stdin/stdout/stderr
+       [012])
+               ;;
+       # Nuke everything else
+       *)
+               eval exec "$FD>&-"
+               ;;
+       esac
+done
+popd >/dev/null
+
+
+# If a pidfile is still around (for example after a container restart),
+# delete it so that docker can start.
+rm -rf /var/run/docker.pid
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+
+exec docker daemon --storage-driver=$1 $DOCKER_DAEMON_ARGS
diff --git a/docker/migrate-docker19/migrate.sh b/docker/migrate-docker19/migrate.sh
new file mode 100755 (executable)
index 0000000..76fe823
--- /dev/null
@@ -0,0 +1,113 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# This script is called by arv-migrate-docker19 to perform the actual migration
+# of a single image.  This works by running Docker-in-Docker (dnd.sh) to
+# download the image using Docker 1.9 and then upgrading to Docker 1.13 and
+# uploading the converted image.
+
+# When using bash in pid 1 and using "trap on EXIT"
+# it will sometimes go into an 100% CPU infinite loop.
+#
+# Using workaround from here:
+#
+# https://github.com/docker/docker/issues/4854
+if [ "$$" = 1 ]; then
+  $0 "$@"
+  exit $?
+fi
+
+# -x           show script
+# -e           exit on error
+# -o pipefail  use exit code from 1st failure in pipeline, not last
+set -x -e -o pipefail
+
+image_tar_keepref=$1
+image_id=$2
+image_repo=$3
+image_tag=$4
+project_uuid=$5
+graph_driver=$6
+
+if [[ "$image_repo" = "<none>" ]] ; then
+  image_repo=none
+  image_tag=latest
+fi
+
+# Print free space in /var/lib/docker
+function freespace() {
+    df -B1 /var/lib/docker | tail -n1 | sed 's/  */ /g' | cut -d' ' -f4
+}
+
+# Run docker-in-docker script and then wait for it to come up
+function start_docker {
+    /root/dnd.sh $graph_driver &
+    for i in $(seq 1 10) ; do
+        if docker version >/dev/null 2>/dev/null ; then
+            return
+        fi
+        sleep 1
+    done
+    false
+}
+
+# Kill docker from pid then wait for it to be down
+function kill_docker {
+    if test -f /var/run/docker.pid ; then
+        kill $(cat /var/run/docker.pid)
+    fi
+    for i in $(seq 1 10) ; do
+        if ! docker version >/dev/null 2>/dev/null ; then
+            return
+        fi
+        sleep 1
+    done
+    false
+}
+
+# Ensure that we clean up docker graph and/or lingering cache files on exit
+function cleanup {
+    kill_docker
+    rm -rf /var/lib/docker/*
+    rm -rf /root/.cache/arvados/docker/*
+    echo "Available space after cleanup is $(freespace)"
+}
+
+trap cleanup EXIT
+
+start_docker
+
+echo "Initial available space is $(freespace)"
+
+arv-get $image_tar_keepref | docker load
+
+
+docker tag $image_id $image_repo:$image_tag
+
+docker images -a
+
+kill_docker
+
+echo "Available space after image load is $(freespace)"
+
+cd /root/pkgs
+dpkg -i libltdl7_2.4.2-1.11+b1_amd64.deb docker-engine_1.13.1-0~debian-jessie_amd64.deb
+
+echo "Available space after image upgrade is $(freespace)"
+
+start_docker
+
+docker images -a
+
+if [[ "$image_repo" = "none" ]] ; then
+  image_repo=$(docker images -a --no-trunc | sed 's/  */ /g' | grep ^none | cut -d' ' -f3)
+  image_tag=""
+fi
+
+UUID=$(arv-keepdocker --force-image-format --project-uuid=$project_uuid $image_repo $image_tag)
+
+echo "Available space after arv-keepdocker is $(freespace)"
+
+echo "Migrated uuid is $UUID"
diff --git a/lib/cli/external.go b/lib/cli/external.go
new file mode 100644 (file)
index 0000000..35933f9
--- /dev/null
@@ -0,0 +1,124 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package cli
+
+import (
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os/exec"
+       "strings"
+       "syscall"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+)
+
+var (
+       Create = rubyArvCmd{"create"}
+       Edit   = rubyArvCmd{"edit"}
+
+       Copy = externalCmd{"arv-copy"}
+       Tag  = externalCmd{"arv-tag"}
+       Ws   = externalCmd{"arv-ws"}
+       Run  = externalCmd{"arv-run"}
+
+       Keep = cmd.Multi(map[string]cmd.Handler{
+               "get":       externalCmd{"arv-get"},
+               "put":       externalCmd{"arv-put"},
+               "ls":        externalCmd{"arv-ls"},
+               "normalize": externalCmd{"arv-normalize"},
+               "docker":    externalCmd{"arv-keepdocker"},
+       })
+       Pipeline = cmd.Multi(map[string]cmd.Handler{
+               "run": externalCmd{"arv-run-pipeline-instance"},
+       })
+       // user, group, container, specimen, etc.
+       APICall = apiCallCmd{}
+)
+
+// When using the ruby "arv" command, flags must come before the
+// subcommand: "arv --format=yaml get foo" works, but "arv get
+// --format=yaml foo" does not work.
+func legacyFlagsToFront(subcommand string, argsin []string) (argsout []string) {
+       flags, _ := LegacyFlagSet()
+       flags.SetOutput(ioutil.Discard)
+       flags.Parse(argsin)
+       narg := flags.NArg()
+       argsout = append(argsout, argsin[:len(argsin)-narg]...)
+       argsout = append(argsout, subcommand)
+       argsout = append(argsout, argsin[len(argsin)-narg:]...)
+       return
+}
+
+type apiCallCmd struct{}
+
+func (cmd apiCallCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       split := strings.Split(prog, " ")
+       if len(split) < 2 {
+               fmt.Fprintf(stderr, "internal error: no api model in %q\n", prog)
+               return 2
+       }
+       model := split[len(split)-1]
+       return externalCmd{"arv"}.RunCommand("arv", legacyFlagsToFront(model, args), stdin, stdout, stderr)
+}
+
+type rubyArvCmd struct {
+       subcommand string
+}
+
+func (rc rubyArvCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       return externalCmd{"arv"}.RunCommand("arv", legacyFlagsToFront(rc.subcommand, args), stdin, stdout, stderr)
+}
+
+type externalCmd struct {
+       prog string
+}
+
+func (ec externalCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       cmd := exec.Command(ec.prog, args...)
+       cmd.Stdin = stdin
+       cmd.Stdout = stdout
+       cmd.Stderr = stderr
+       err := cmd.Run()
+       switch err := err.(type) {
+       case nil:
+               return 0
+       case *exec.ExitError:
+               status := err.Sys().(syscall.WaitStatus)
+               if status.Exited() {
+                       return status.ExitStatus()
+               }
+               fmt.Fprintf(stderr, "%s failed: %s\n", ec.prog, err)
+               return 1
+       case *exec.Error:
+               fmt.Fprintln(stderr, err)
+               if ec.prog == "arv" || ec.prog == "arv-run-pipeline-instance" {
+                       fmt.Fprint(stderr, rubyInstallHints)
+               } else if strings.HasPrefix(ec.prog, "arv-") {
+                       fmt.Fprint(stderr, pythonInstallHints)
+               }
+               return 1
+       default:
+               fmt.Fprintf(stderr, "error running %s: %s\n", ec.prog, err)
+               return 1
+       }
+}
+
+var (
+       rubyInstallHints = `
+Note: This subcommand uses the arvados-cli Ruby gem. If that is not
+installed, try "gem install arvados-cli", or see
+https://doc.arvados.org/install for more details.
+
+`
+       pythonInstallHints = `
+Note: This subcommand uses the "arvados" Python module. If that is
+not installed, try:
+* "pip install arvados" (either as root or in a virtualenv), or
+* "sudo apt-get install python-arvados-python-client", or
+* see https://doc.arvados.org/install for more details.
+
+`
+)
diff --git a/lib/cli/flags.go b/lib/cli/flags.go
new file mode 100644 (file)
index 0000000..7147e0c
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package cli
+
+import (
+       "flag"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "rsc.io/getopt"
+)
+
+type LegacyFlagValues struct {
+       Format  string
+       DryRun  bool
+       Short   bool
+       Verbose bool
+}
+
+func LegacyFlagSet() (cmd.FlagSet, *LegacyFlagValues) {
+       values := &LegacyFlagValues{Format: "json"}
+       flags := getopt.NewFlagSet("", flag.ContinueOnError)
+       flags.BoolVar(&values.DryRun, "dry-run", false, "Don't actually do anything")
+       flags.Alias("n", "dry-run")
+       flags.StringVar(&values.Format, "format", values.Format, "Output format: json, yaml, or uuid")
+       flags.Alias("f", "format")
+       flags.BoolVar(&values.Short, "short", false, "Return only UUIDs (equivalent to --format=uuid)")
+       flags.Alias("s", "short")
+       flags.BoolVar(&values.Verbose, "verbose", false, "Print more debug/progress messages on stderr")
+       flags.Alias("v", "verbose")
+       return flags, values
+}
diff --git a/lib/cli/get.go b/lib/cli/get.go
new file mode 100644 (file)
index 0000000..2c60f43
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package cli
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/ghodss/yaml"
+)
+
+var Get cmd.Handler = getCmd{}
+
+type getCmd struct{}
+
+func (getCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       var err error
+       defer func() {
+               if err != nil {
+                       fmt.Fprintf(stderr, "%s\n", err)
+               }
+       }()
+
+       flags, opts := LegacyFlagSet()
+       flags.SetOutput(stderr)
+       err = flags.Parse(args)
+       if err != nil {
+               return 2
+       }
+       if len(flags.Args()) != 1 {
+               fmt.Fprintf(stderr, "usage of %s:\n", prog)
+               flags.PrintDefaults()
+               return 2
+       }
+       if opts.Short {
+               opts.Format = "uuid"
+       }
+
+       id := flags.Args()[0]
+       client := arvados.NewClientFromEnv()
+       path, err := client.PathForUUID("show", id)
+       if err != nil {
+               return 1
+       }
+
+       var obj map[string]interface{}
+       err = client.RequestAndDecode(&obj, "GET", path, nil, nil)
+       if err != nil {
+               err = fmt.Errorf("GET %s: %s", path, err)
+               return 1
+       }
+       if opts.Format == "yaml" {
+               var buf []byte
+               buf, err = yaml.Marshal(obj)
+               if err == nil {
+                       _, err = stdout.Write(buf)
+               }
+       } else if opts.Format == "uuid" {
+               fmt.Fprintln(stdout, obj["uuid"])
+       } else {
+               enc := json.NewEncoder(stdout)
+               enc.SetIndent("", "  ")
+               err = enc.Encode(obj)
+       }
+       if err != nil {
+               err = fmt.Errorf("encoding: %s", err)
+               return 1
+       }
+       return 0
+}
diff --git a/lib/cli/get_test.go b/lib/cli/get_test.go
new file mode 100644 (file)
index 0000000..b2128a4
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package cli
+
+import (
+       "bytes"
+       "regexp"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&GetSuite{})
+
+type GetSuite struct{}
+
+func (s *GetSuite) TestGetCollectionJSON(c *check.C) {
+       stdout := bytes.NewBuffer(nil)
+       stderr := bytes.NewBuffer(nil)
+       exited := Get.RunCommand("arvados-client get", []string{arvadostest.FooCollection}, bytes.NewReader(nil), stdout, stderr)
+       c.Check(stdout.String(), check.Matches, `(?ms){.*"uuid": "`+arvadostest.FooCollection+`".*}\n`)
+       c.Check(stdout.String(), check.Matches, `(?ms){.*"portable_data_hash": "`+regexp.QuoteMeta(arvadostest.FooCollectionPDH)+`".*}\n`)
+       c.Check(stderr.String(), check.Equals, "")
+       c.Check(exited, check.Equals, 0)
+}
diff --git a/lib/cloud/azure/azure.go b/lib/cloud/azure/azure.go
new file mode 100644 (file)
index 0000000..d19e4be
--- /dev/null
@@ -0,0 +1,663 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package azure
+
+import (
+       "context"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "regexp"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
+       "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
+       storageacct "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage"
+       "github.com/Azure/azure-sdk-for-go/storage"
+       "github.com/Azure/go-autorest/autorest"
+       "github.com/Azure/go-autorest/autorest/azure"
+       "github.com/Azure/go-autorest/autorest/azure/auth"
+       "github.com/Azure/go-autorest/autorest/to"
+       "github.com/jmcvetta/randutil"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+)
+
+// Driver is the azure implementation of the cloud.Driver interface.
+var Driver = cloud.DriverFunc(newAzureInstanceSet)
+
+type azureInstanceSetConfig struct {
+       SubscriptionID               string
+       ClientID                     string
+       ClientSecret                 string
+       TenantID                     string
+       CloudEnvironment             string
+       ResourceGroup                string
+       Location                     string
+       Network                      string
+       Subnet                       string
+       StorageAccount               string
+       BlobContainer                string
+       DeleteDanglingResourcesAfter arvados.Duration
+       AdminUsername                string
+}
+
+const tagKeyInstanceSecret = "InstanceSecret"
+
+type containerWrapper interface {
+       GetBlobReference(name string) *storage.Blob
+       ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error)
+}
+
+type virtualMachinesClientWrapper interface {
+       createOrUpdate(ctx context.Context,
+               resourceGroupName string,
+               VMName string,
+               parameters compute.VirtualMachine) (result compute.VirtualMachine, err error)
+       delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error)
+       listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error)
+}
+
+type virtualMachinesClientImpl struct {
+       inner compute.VirtualMachinesClient
+}
+
+func (cl *virtualMachinesClientImpl) createOrUpdate(ctx context.Context,
+       resourceGroupName string,
+       VMName string,
+       parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
+
+       future, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, VMName, parameters)
+       if err != nil {
+               return compute.VirtualMachine{}, wrapAzureError(err)
+       }
+       future.WaitForCompletionRef(ctx, cl.inner.Client)
+       r, err := future.Result(cl.inner)
+       return r, wrapAzureError(err)
+}
+
+func (cl *virtualMachinesClientImpl) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+       future, err := cl.inner.Delete(ctx, resourceGroupName, VMName)
+       if err != nil {
+               return nil, wrapAzureError(err)
+       }
+       err = future.WaitForCompletionRef(ctx, cl.inner.Client)
+       return future.Response(), wrapAzureError(err)
+}
+
+func (cl *virtualMachinesClientImpl) listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
+       r, err := cl.inner.ListComplete(ctx, resourceGroupName)
+       return r, wrapAzureError(err)
+}
+
+type interfacesClientWrapper interface {
+       createOrUpdate(ctx context.Context,
+               resourceGroupName string,
+               networkInterfaceName string,
+               parameters network.Interface) (result network.Interface, err error)
+       delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result *http.Response, err error)
+       listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error)
+}
+
+type interfacesClientImpl struct {
+       inner network.InterfacesClient
+}
+
+func (cl *interfacesClientImpl) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+       future, err := cl.inner.Delete(ctx, resourceGroupName, VMName)
+       if err != nil {
+               return nil, wrapAzureError(err)
+       }
+       err = future.WaitForCompletionRef(ctx, cl.inner.Client)
+       return future.Response(), wrapAzureError(err)
+}
+
+func (cl *interfacesClientImpl) createOrUpdate(ctx context.Context,
+       resourceGroupName string,
+       networkInterfaceName string,
+       parameters network.Interface) (result network.Interface, err error) {
+
+       future, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters)
+       if err != nil {
+               return network.Interface{}, wrapAzureError(err)
+       }
+       future.WaitForCompletionRef(ctx, cl.inner.Client)
+       r, err := future.Result(cl.inner)
+       return r, wrapAzureError(err)
+}
+
+func (cl *interfacesClientImpl) listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
+       r, err := cl.inner.ListComplete(ctx, resourceGroupName)
+       return r, wrapAzureError(err)
+}
+
+var quotaRe = regexp.MustCompile(`(?i:exceed|quota|limit)`)
+
+type azureRateLimitError struct {
+       azure.RequestError
+       firstRetry time.Time
+}
+
+func (ar *azureRateLimitError) EarliestRetry() time.Time {
+       return ar.firstRetry
+}
+
+type azureQuotaError struct {
+       azure.RequestError
+}
+
+func (ar *azureQuotaError) IsQuotaError() bool {
+       return true
+}
+
+func wrapAzureError(err error) error {
+       de, ok := err.(autorest.DetailedError)
+       if !ok {
+               return err
+       }
+       rq, ok := de.Original.(*azure.RequestError)
+       if !ok {
+               return err
+       }
+       if rq.Response == nil {
+               return err
+       }
+       if rq.Response.StatusCode == 429 || len(rq.Response.Header["Retry-After"]) >= 1 {
+               // API throttling
+               ra := rq.Response.Header["Retry-After"][0]
+               earliestRetry, parseErr := http.ParseTime(ra)
+               if parseErr != nil {
+                       // Could not parse as a timestamp, must be number of seconds
+                       dur, parseErr := strconv.ParseInt(ra, 10, 64)
+                       if parseErr == nil {
+                               earliestRetry = time.Now().Add(time.Duration(dur) * time.Second)
+                       } else {
+                               // Couldn't make sense of retry-after,
+                               // so set retry to 20 seconds
+                               earliestRetry = time.Now().Add(20 * time.Second)
+                       }
+               }
+               return &azureRateLimitError{*rq, earliestRetry}
+       }
+       if rq.ServiceError == nil {
+               return err
+       }
+       if quotaRe.FindString(rq.ServiceError.Code) != "" || quotaRe.FindString(rq.ServiceError.Message) != "" {
+               return &azureQuotaError{*rq}
+       }
+       return err
+}
+
+type azureInstanceSet struct {
+       azconfig     azureInstanceSetConfig
+       vmClient     virtualMachinesClientWrapper
+       netClient    interfacesClientWrapper
+       blobcont     containerWrapper
+       azureEnv     azure.Environment
+       interfaces   map[string]network.Interface
+       dispatcherID string
+       namePrefix   string
+       ctx          context.Context
+       stopFunc     context.CancelFunc
+       stopWg       sync.WaitGroup
+       deleteNIC    chan string
+       deleteBlob   chan storage.Blob
+       logger       logrus.FieldLogger
+}
+
+func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+       azcfg := azureInstanceSetConfig{}
+       err = json.Unmarshal(config, &azcfg)
+       if err != nil {
+               return nil, err
+       }
+
+       az := azureInstanceSet{logger: logger}
+       az.ctx, az.stopFunc = context.WithCancel(context.Background())
+       err = az.setup(azcfg, string(dispatcherID))
+       if err != nil {
+               az.stopFunc()
+               return nil, err
+       }
+       return &az, nil
+}
+
+func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID string) (err error) {
+       az.azconfig = azcfg
+       vmClient := compute.NewVirtualMachinesClient(az.azconfig.SubscriptionID)
+       netClient := network.NewInterfacesClient(az.azconfig.SubscriptionID)
+       storageAcctClient := storageacct.NewAccountsClient(az.azconfig.SubscriptionID)
+
+       az.azureEnv, err = azure.EnvironmentFromName(az.azconfig.CloudEnvironment)
+       if err != nil {
+               return err
+       }
+
+       authorizer, err := auth.ClientCredentialsConfig{
+               ClientID:     az.azconfig.ClientID,
+               ClientSecret: az.azconfig.ClientSecret,
+               TenantID:     az.azconfig.TenantID,
+               Resource:     az.azureEnv.ResourceManagerEndpoint,
+               AADEndpoint:  az.azureEnv.ActiveDirectoryEndpoint,
+       }.Authorizer()
+       if err != nil {
+               return err
+       }
+
+       vmClient.Authorizer = authorizer
+       netClient.Authorizer = authorizer
+       storageAcctClient.Authorizer = authorizer
+
+       az.vmClient = &virtualMachinesClientImpl{vmClient}
+       az.netClient = &interfacesClientImpl{netClient}
+
+       result, err := storageAcctClient.ListKeys(az.ctx, az.azconfig.ResourceGroup, az.azconfig.StorageAccount)
+       if err != nil {
+               az.logger.WithError(err).Warn("Couldn't get account keys")
+               return err
+       }
+
+       key1 := *(*result.Keys)[0].Value
+       client, err := storage.NewBasicClientOnSovereignCloud(az.azconfig.StorageAccount, key1, az.azureEnv)
+       if err != nil {
+               az.logger.WithError(err).Warn("Couldn't make client")
+               return err
+       }
+
+       blobsvc := client.GetBlobService()
+       az.blobcont = blobsvc.GetContainerReference(az.azconfig.BlobContainer)
+
+       az.dispatcherID = dispatcherID
+       az.namePrefix = fmt.Sprintf("compute-%s-", az.dispatcherID)
+
+       go func() {
+               az.stopWg.Add(1)
+               defer az.stopWg.Done()
+
+               tk := time.NewTicker(5 * time.Minute)
+               for {
+                       select {
+                       case <-az.ctx.Done():
+                               tk.Stop()
+                               return
+                       case <-tk.C:
+                               az.manageBlobs()
+                       }
+               }
+       }()
+
+       az.deleteNIC = make(chan string)
+       az.deleteBlob = make(chan storage.Blob)
+
+       for i := 0; i < 4; i++ {
+               go func() {
+                       for {
+                               nicname, ok := <-az.deleteNIC
+                               if !ok {
+                                       return
+                               }
+                               _, delerr := az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, nicname)
+                               if delerr != nil {
+                                       az.logger.WithError(delerr).Warnf("Error deleting %v", nicname)
+                               } else {
+                                       az.logger.Printf("Deleted NIC %v", nicname)
+                               }
+                       }
+               }()
+               go func() {
+                       for {
+                               blob, ok := <-az.deleteBlob
+                               if !ok {
+                                       return
+                               }
+                               err := blob.Delete(nil)
+                               if err != nil {
+                                       az.logger.WithError(err).Warnf("Error deleting %v", blob.Name)
+                               } else {
+                                       az.logger.Printf("Deleted blob %v", blob.Name)
+                               }
+                       }
+               }()
+       }
+
+       return nil
+}
+
+func (az *azureInstanceSet) Create(
+       instanceType arvados.InstanceType,
+       imageID cloud.ImageID,
+       newTags cloud.InstanceTags,
+       initCommand cloud.InitCommand,
+       publicKey ssh.PublicKey) (cloud.Instance, error) {
+
+       az.stopWg.Add(1)
+       defer az.stopWg.Done()
+
+       name, err := randutil.String(15, "abcdefghijklmnopqrstuvwxyz0123456789")
+       if err != nil {
+               return nil, err
+       }
+
+       name = az.namePrefix + name
+
+       timestamp := time.Now().Format(time.RFC3339Nano)
+
+       tags := make(map[string]*string)
+       tags["created-at"] = &timestamp
+       for k, v := range newTags {
+               newstr := v
+               tags["dispatch-"+k] = &newstr
+       }
+
+       nicParameters := network.Interface{
+               Location: &az.azconfig.Location,
+               Tags:     tags,
+               InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
+                       IPConfigurations: &[]network.InterfaceIPConfiguration{
+                               network.InterfaceIPConfiguration{
+                                       Name: to.StringPtr("ip1"),
+                                       InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
+                                               Subnet: &network.Subnet{
+                                                       ID: to.StringPtr(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers"+
+                                                               "/Microsoft.Network/virtualnetworks/%s/subnets/%s",
+                                                               az.azconfig.SubscriptionID,
+                                                               az.azconfig.ResourceGroup,
+                                                               az.azconfig.Network,
+                                                               az.azconfig.Subnet)),
+                                               },
+                                               PrivateIPAllocationMethod: network.Dynamic,
+                                       },
+                               },
+                       },
+               },
+       }
+       nic, err := az.netClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name+"-nic", nicParameters)
+       if err != nil {
+               return nil, wrapAzureError(err)
+       }
+
+       blobname := fmt.Sprintf("%s-os.vhd", name)
+       instanceVhd := fmt.Sprintf("https://%s.blob.%s/%s/%s",
+               az.azconfig.StorageAccount,
+               az.azureEnv.StorageEndpointSuffix,
+               az.azconfig.BlobContainer,
+               blobname)
+
+       customData := base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))
+
+       vmParameters := compute.VirtualMachine{
+               Location: &az.azconfig.Location,
+               Tags:     tags,
+               VirtualMachineProperties: &compute.VirtualMachineProperties{
+                       HardwareProfile: &compute.HardwareProfile{
+                               VMSize: compute.VirtualMachineSizeTypes(instanceType.ProviderType),
+                       },
+                       StorageProfile: &compute.StorageProfile{
+                               OsDisk: &compute.OSDisk{
+                                       OsType:       compute.Linux,
+                                       Name:         to.StringPtr(name + "-os"),
+                                       CreateOption: compute.FromImage,
+                                       Image: &compute.VirtualHardDisk{
+                                               URI: to.StringPtr(string(imageID)),
+                                       },
+                                       Vhd: &compute.VirtualHardDisk{
+                                               URI: &instanceVhd,
+                                       },
+                               },
+                       },
+                       NetworkProfile: &compute.NetworkProfile{
+                               NetworkInterfaces: &[]compute.NetworkInterfaceReference{
+                                       compute.NetworkInterfaceReference{
+                                               ID: nic.ID,
+                                               NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
+                                                       Primary: to.BoolPtr(true),
+                                               },
+                                       },
+                               },
+                       },
+                       OsProfile: &compute.OSProfile{
+                               ComputerName:  &name,
+                               AdminUsername: to.StringPtr(az.azconfig.AdminUsername),
+                               LinuxConfiguration: &compute.LinuxConfiguration{
+                                       DisablePasswordAuthentication: to.BoolPtr(true),
+                                       SSH: &compute.SSHConfiguration{
+                                               PublicKeys: &[]compute.SSHPublicKey{
+                                                       {
+                                                               Path:    to.StringPtr("/home/" + az.azconfig.AdminUsername + "/.ssh/authorized_keys"),
+                                                               KeyData: to.StringPtr(string(ssh.MarshalAuthorizedKey(publicKey))),
+                                                       },
+                                               },
+                                       },
+                               },
+                               CustomData: &customData,
+                       },
+               },
+       }
+
+       vm, err := az.vmClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name, vmParameters)
+       if err != nil {
+               _, delerr := az.blobcont.GetBlobReference(blobname).DeleteIfExists(nil)
+               if delerr != nil {
+                       az.logger.WithError(delerr).Warnf("Error cleaning up vhd blob after failed create")
+               }
+
+               _, delerr = az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, *nic.Name)
+               if delerr != nil {
+                       az.logger.WithError(delerr).Warnf("Error cleaning up NIC after failed create")
+               }
+
+               return nil, wrapAzureError(err)
+       }
+
+       return &azureInstance{
+               provider: az,
+               nic:      nic,
+               vm:       vm,
+       }, nil
+}
+
+func (az *azureInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {
+       az.stopWg.Add(1)
+       defer az.stopWg.Done()
+
+       interfaces, err := az.manageNics()
+       if err != nil {
+               return nil, err
+       }
+
+       result, err := az.vmClient.listComplete(az.ctx, az.azconfig.ResourceGroup)
+       if err != nil {
+               return nil, wrapAzureError(err)
+       }
+
+       instances := make([]cloud.Instance, 0)
+
+       for ; result.NotDone(); err = result.Next() {
+               if err != nil {
+                       return nil, wrapAzureError(err)
+               }
+               if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
+                       instances = append(instances, &azureInstance{
+                               provider: az,
+                               vm:       result.Value(),
+                               nic:      interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID]})
+               }
+       }
+       return instances, nil
+}
+
+// ManageNics returns a list of Azure network interface resources.
+// Also performs garbage collection of NICs which have "namePrefix", are
+// not associated with a virtual machine and have a "create-at" time
+// more than DeleteDanglingResourcesAfter (to prevent racing and
+// deleting newly created NICs) in the past are deleted.
+func (az *azureInstanceSet) manageNics() (map[string]network.Interface, error) {
+       az.stopWg.Add(1)
+       defer az.stopWg.Done()
+
+       result, err := az.netClient.listComplete(az.ctx, az.azconfig.ResourceGroup)
+       if err != nil {
+               return nil, wrapAzureError(err)
+       }
+
+       interfaces := make(map[string]network.Interface)
+
+       timestamp := time.Now()
+       for ; result.NotDone(); err = result.Next() {
+               if err != nil {
+                       az.logger.WithError(err).Warnf("Error listing nics")
+                       return interfaces, nil
+               }
+               if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
+                       if result.Value().VirtualMachine != nil {
+                               interfaces[*result.Value().ID] = result.Value()
+                       } else {
+                               if result.Value().Tags["created-at"] != nil {
+                                       createdAt, err := time.Parse(time.RFC3339Nano, *result.Value().Tags["created-at"])
+                                       if err == nil {
+                                               if timestamp.Sub(createdAt) > az.azconfig.DeleteDanglingResourcesAfter.Duration() {
+                                                       az.logger.Printf("Will delete %v because it is older than %s", *result.Value().Name, az.azconfig.DeleteDanglingResourcesAfter)
+                                                       az.deleteNIC <- *result.Value().Name
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+       return interfaces, nil
+}
+
+// ManageBlobs garbage collects blobs (VM disk images) in the
+// configured storage account container.  It will delete blobs which
+// have "namePrefix", are "available" (which means they are not
+// leased to a VM) and haven't been modified for
+// DeleteDanglingResourcesAfter seconds.
+func (az *azureInstanceSet) manageBlobs() {
+
+       page := storage.ListBlobsParameters{Prefix: az.namePrefix}
+       timestamp := time.Now()
+
+       for {
+               response, err := az.blobcont.ListBlobs(page)
+               if err != nil {
+                       az.logger.WithError(err).Warn("Error listing blobs")
+                       return
+               }
+               for _, b := range response.Blobs {
+                       age := timestamp.Sub(time.Time(b.Properties.LastModified))
+                       if b.Properties.BlobType == storage.BlobTypePage &&
+                               b.Properties.LeaseState == "available" &&
+                               b.Properties.LeaseStatus == "unlocked" &&
+                               age.Seconds() > az.azconfig.DeleteDanglingResourcesAfter.Duration().Seconds() {
+
+                               az.logger.Printf("Blob %v is unlocked and not modified for %v seconds, will delete", b.Name, age.Seconds())
+                               az.deleteBlob <- b
+                       }
+               }
+               if response.NextMarker != "" {
+                       page.Marker = response.NextMarker
+               } else {
+                       break
+               }
+       }
+}
+
+func (az *azureInstanceSet) Stop() {
+       az.stopFunc()
+       az.stopWg.Wait()
+       close(az.deleteNIC)
+       close(az.deleteBlob)
+}
+
+type azureInstance struct {
+       provider *azureInstanceSet
+       nic      network.Interface
+       vm       compute.VirtualMachine
+}
+
+func (ai *azureInstance) ID() cloud.InstanceID {
+       return cloud.InstanceID(*ai.vm.ID)
+}
+
+func (ai *azureInstance) String() string {
+       return *ai.vm.Name
+}
+
+func (ai *azureInstance) ProviderType() string {
+       return string(ai.vm.VirtualMachineProperties.HardwareProfile.VMSize)
+}
+
+func (ai *azureInstance) SetTags(newTags cloud.InstanceTags) error {
+       ai.provider.stopWg.Add(1)
+       defer ai.provider.stopWg.Done()
+
+       tags := make(map[string]*string)
+
+       for k, v := range ai.vm.Tags {
+               if !strings.HasPrefix(k, "dispatch-") {
+                       tags[k] = v
+               }
+       }
+       for k, v := range newTags {
+               newstr := v
+               tags["dispatch-"+k] = &newstr
+       }
+
+       vmParameters := compute.VirtualMachine{
+               Location: &ai.provider.azconfig.Location,
+               Tags:     tags,
+       }
+       vm, err := ai.provider.vmClient.createOrUpdate(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name, vmParameters)
+       if err != nil {
+               return wrapAzureError(err)
+       }
+       ai.vm = vm
+
+       return nil
+}
+
+func (ai *azureInstance) Tags() cloud.InstanceTags {
+       tags := make(map[string]string)
+
+       for k, v := range ai.vm.Tags {
+               if strings.HasPrefix(k, "dispatch-") {
+                       tags[k[9:]] = *v
+               }
+       }
+
+       return tags
+}
+
+func (ai *azureInstance) Destroy() error {
+       ai.provider.stopWg.Add(1)
+       defer ai.provider.stopWg.Done()
+
+       _, err := ai.provider.vmClient.delete(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name)
+       return wrapAzureError(err)
+}
+
+func (ai *azureInstance) Address() string {
+       if ai.nic.IPConfigurations != nil &&
+               len(*ai.nic.IPConfigurations) > 0 &&
+               (*ai.nic.IPConfigurations)[0].PrivateIPAddress != nil {
+
+               return *(*ai.nic.IPConfigurations)[0].PrivateIPAddress
+       }
+       return ""
+}
+
+func (ai *azureInstance) RemoteUser() string {
+       return ai.provider.azconfig.AdminUsername
+}
+
+func (ai *azureInstance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {
+       return cloud.ErrNotImplemented
+}
diff --git a/lib/cloud/azure/azure_test.go b/lib/cloud/azure/azure_test.go
new file mode 100644 (file)
index 0000000..96bfb4f
--- /dev/null
@@ -0,0 +1,374 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+//
+//
+// How to manually run individual tests against the real cloud:
+//
+// $ go test -v git.curoverse.com/arvados.git/lib/cloud/azure -live-azure-cfg azconfig.yml -check.f=TestCreate
+//
+// Tests should be run individually and in the order they are listed in the file:
+//
+// Example azconfig.yml:
+//
+// ImageIDForTestSuite: "https://example.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.vhd"
+// DriverParameters:
+//      SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+//      ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+//      Location: centralus
+//      CloudEnvironment: AzurePublicCloud
+//      ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+//      TenantId: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+//      ResourceGroup: zzzzz
+//      Network: zzzzz
+//      Subnet: zzzzz-subnet-private
+//      StorageAccount: example
+//      BlobContainer: vhds
+//      DeleteDanglingResourcesAfter: 20s
+//      AdminUsername: crunch
+
+package azure
+
+import (
+       "context"
+       "encoding/json"
+       "errors"
+       "flag"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
+       "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
+       "github.com/Azure/azure-sdk-for-go/storage"
+       "github.com/Azure/go-autorest/autorest"
+       "github.com/Azure/go-autorest/autorest/azure"
+       "github.com/Azure/go-autorest/autorest/to"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+type AzureInstanceSetSuite struct{}
+
+var _ = check.Suite(&AzureInstanceSetSuite{})
+
+type VirtualMachinesClientStub struct{}
+
+func (*VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
+       resourceGroupName string,
+       VMName string,
+       parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
+       parameters.ID = &VMName
+       parameters.Name = &VMName
+       return parameters, nil
+}
+
+func (*VirtualMachinesClientStub) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+       return nil, nil
+}
+
+func (*VirtualMachinesClientStub) listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
+       return compute.VirtualMachineListResultIterator{}, nil
+}
+
+type InterfacesClientStub struct{}
+
+func (*InterfacesClientStub) createOrUpdate(ctx context.Context,
+       resourceGroupName string,
+       nicName string,
+       parameters network.Interface) (result network.Interface, err error) {
+       parameters.ID = to.StringPtr(nicName)
+       (*parameters.IPConfigurations)[0].PrivateIPAddress = to.StringPtr("192.168.5.5")
+       return parameters, nil
+}
+
+func (*InterfacesClientStub) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+       return nil, nil
+}
+
+func (*InterfacesClientStub) listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
+       return network.InterfaceListResultIterator{}, nil
+}
+
+type BlobContainerStub struct{}
+
+func (*BlobContainerStub) GetBlobReference(name string) *storage.Blob {
+       return nil
+}
+
+func (*BlobContainerStub) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
+       return storage.BlobListResponse{}, nil
+}
+
+type testConfig struct {
+       ImageIDForTestSuite string
+       DriverParameters    json.RawMessage
+}
+
+var live = flag.String("live-azure-cfg", "", "Test with real azure API, provide config file")
+
+func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
+       cluster := arvados.Cluster{
+               InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
+                       "tiny": arvados.InstanceType{
+                               Name:         "tiny",
+                               ProviderType: "Standard_D1_v2",
+                               VCPUs:        1,
+                               RAM:          4000000000,
+                               Scratch:      10000000000,
+                               Price:        .02,
+                               Preemptible:  false,
+                       },
+               })}
+       if *live != "" {
+               var exampleCfg testConfig
+               err := config.LoadFile(&exampleCfg, *live)
+               if err != nil {
+                       return nil, cloud.ImageID(""), cluster, err
+               }
+
+               ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+               return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
+       }
+       ap := azureInstanceSet{
+               azconfig: azureInstanceSetConfig{
+                       BlobContainer: "vhds",
+               },
+               dispatcherID: "test123",
+               namePrefix:   "compute-test123-",
+               logger:       logrus.StandardLogger(),
+               deleteNIC:    make(chan string),
+               deleteBlob:   make(chan storage.Blob),
+       }
+       ap.ctx, ap.stopFunc = context.WithCancel(context.Background())
+       ap.vmClient = &VirtualMachinesClientStub{}
+       ap.netClient = &InterfacesClientStub{}
+       ap.blobcont = &BlobContainerStub{}
+       return &ap, cloud.ImageID("blob"), cluster, nil
+}
+
+func (*AzureInstanceSetSuite) TestCreate(c *check.C) {
+       ap, img, cluster, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+       c.Assert(err, check.IsNil)
+
+       inst, err := ap.Create(cluster.InstanceTypes["tiny"],
+               img, map[string]string{
+                       "TestTagName": "test tag value",
+               }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+       c.Assert(err, check.IsNil)
+
+       tags := inst.Tags()
+       c.Check(tags["TestTagName"], check.Equals, "test tag value")
+       c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*AzureInstanceSetSuite) TestListInstances(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       l, err := ap.Instances(nil)
+
+       c.Assert(err, check.IsNil)
+
+       for _, i := range l {
+               tg := i.Tags()
+               log.Printf("%v %v %v", i.String(), i.Address(), tg)
+       }
+}
+
+func (*AzureInstanceSetSuite) TestManageNics(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       ap.(*azureInstanceSet).manageNics()
+       ap.Stop()
+}
+
+func (*AzureInstanceSetSuite) TestManageBlobs(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       ap.(*azureInstanceSet).manageBlobs()
+       ap.Stop()
+}
+
+func (*AzureInstanceSetSuite) TestDestroyInstances(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       l, err := ap.Instances(nil)
+       c.Assert(err, check.IsNil)
+
+       for _, i := range l {
+               c.Check(i.Destroy(), check.IsNil)
+       }
+}
+
+func (*AzureInstanceSetSuite) TestDeleteFake(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       _, err = ap.(*azureInstanceSet).netClient.delete(context.Background(), "fakefakefake", "fakefakefake")
+
+       de, ok := err.(autorest.DetailedError)
+       if ok {
+               rq := de.Original.(*azure.RequestError)
+
+               log.Printf("%v %q %q", rq.Response.StatusCode, rq.ServiceError.Code, rq.ServiceError.Message)
+       }
+}
+
+func (*AzureInstanceSetSuite) TestWrapError(c *check.C) {
+       retryError := autorest.DetailedError{
+               Original: &azure.RequestError{
+                       DetailedError: autorest.DetailedError{
+                               Response: &http.Response{
+                                       StatusCode: 429,
+                                       Header:     map[string][]string{"Retry-After": []string{"123"}},
+                               },
+                       },
+                       ServiceError: &azure.ServiceError{},
+               },
+       }
+       wrapped := wrapAzureError(retryError)
+       _, ok := wrapped.(cloud.RateLimitError)
+       c.Check(ok, check.Equals, true)
+
+       quotaError := autorest.DetailedError{
+               Original: &azure.RequestError{
+                       DetailedError: autorest.DetailedError{
+                               Response: &http.Response{
+                                       StatusCode: 503,
+                               },
+                       },
+                       ServiceError: &azure.ServiceError{
+                               Message: "No more quota",
+                       },
+               },
+       }
+       wrapped = wrapAzureError(quotaError)
+       _, ok = wrapped.(cloud.QuotaError)
+       c.Check(ok, check.Equals, true)
+}
+
+func (*AzureInstanceSetSuite) TestSetTags(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+       l, err := ap.Instances(nil)
+       c.Assert(err, check.IsNil)
+
+       if len(l) > 0 {
+               err = l[0].SetTags(map[string]string{"foo": "bar"})
+               if err != nil {
+                       c.Fatal("Error setting tags", err)
+               }
+       }
+       l, err = ap.Instances(nil)
+       c.Assert(err, check.IsNil)
+
+       if len(l) > 0 {
+               tg := l[0].Tags()
+               log.Printf("tags are %v", tg)
+       }
+}
+
+func (*AzureInstanceSetSuite) TestSSH(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+       l, err := ap.Instances(nil)
+       c.Assert(err, check.IsNil)
+
+       if len(l) > 0 {
+               sshclient, err := SetupSSHClient(c, l[0])
+               c.Assert(err, check.IsNil)
+               defer sshclient.Conn.Close()
+
+               sess, err := sshclient.NewSession()
+               c.Assert(err, check.IsNil)
+               defer sess.Close()
+               _, err = sess.Output("find /var/run/test-file -maxdepth 0 -user root -perm 0600")
+               c.Assert(err, check.IsNil)
+
+               sess, err = sshclient.NewSession()
+               c.Assert(err, check.IsNil)
+               defer sess.Close()
+               out, err := sess.Output("sudo cat /var/run/test-file")
+               c.Assert(err, check.IsNil)
+               c.Check(string(out), check.Equals, "test-file-data")
+       }
+}
+
+func SetupSSHClient(c *check.C, inst cloud.Instance) (*ssh.Client, error) {
+       addr := inst.Address() + ":2222"
+       if addr == "" {
+               return nil, errors.New("instance has no address")
+       }
+
+       f, err := os.Open("azconfig_sshkey")
+       c.Assert(err, check.IsNil)
+
+       keybytes, err := ioutil.ReadAll(f)
+       c.Assert(err, check.IsNil)
+
+       priv, err := ssh.ParsePrivateKey(keybytes)
+       c.Assert(err, check.IsNil)
+
+       var receivedKey ssh.PublicKey
+       client, err := ssh.Dial("tcp", addr, &ssh.ClientConfig{
+               User: "crunch",
+               Auth: []ssh.AuthMethod{
+                       ssh.PublicKeys(priv),
+               },
+               HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
+                       receivedKey = key
+                       return nil
+               },
+               Timeout: time.Minute,
+       })
+
+       if err != nil {
+               return nil, err
+       } else if receivedKey == nil {
+               return nil, errors.New("BUG: key was never provided to HostKeyCallback")
+       }
+
+       err = inst.VerifyHostKey(receivedKey, client)
+       c.Assert(err, check.IsNil)
+
+       return client, nil
+}
diff --git a/lib/cloud/ec2/ec2.go b/lib/cloud/ec2/ec2.go
new file mode 100644 (file)
index 0000000..c5565d4
--- /dev/null
@@ -0,0 +1,335 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ec2
+
+import (
+       "crypto/md5"
+       "crypto/rsa"
+       "crypto/sha1"
+       "crypto/x509"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "math/big"
+       "strings"
+       "sync"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/session"
+       "github.com/aws/aws-sdk-go/service/ec2"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+)
+
+const arvadosDispatchID = "arvados-dispatch-id"
+const tagPrefix = "arvados-dispatch-tag-"
+
+// Driver is the ec2 implementation of the cloud.Driver interface.
+var Driver = cloud.DriverFunc(newEC2InstanceSet)
+
+type ec2InstanceSetConfig struct {
+       AccessKeyID      string
+       SecretAccessKey  string
+       Region           string
+       SecurityGroupIDs []string
+       SubnetID         string
+       AdminUsername    string
+       EBSVolumeType    string
+}
+
+type ec2Interface interface {
+       DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
+       ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
+       RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error)
+       DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
+       CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
+       TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
+}
+
+type ec2InstanceSet struct {
+       ec2config    ec2InstanceSetConfig
+       dispatcherID cloud.InstanceSetID
+       logger       logrus.FieldLogger
+       client       ec2Interface
+       keysMtx      sync.Mutex
+       keys         map[string]string
+}
+
+func newEC2InstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+       instanceSet := &ec2InstanceSet{
+               dispatcherID: dispatcherID,
+               logger:       logger,
+       }
+       err = json.Unmarshal(config, &instanceSet.ec2config)
+       if err != nil {
+               return nil, err
+       }
+       awsConfig := aws.NewConfig().
+               WithCredentials(credentials.NewStaticCredentials(
+                       instanceSet.ec2config.AccessKeyID,
+                       instanceSet.ec2config.SecretAccessKey,
+                       "")).
+               WithRegion(instanceSet.ec2config.Region)
+       instanceSet.client = ec2.New(session.Must(session.NewSession(awsConfig)))
+       instanceSet.keys = make(map[string]string)
+       if instanceSet.ec2config.EBSVolumeType == "" {
+               instanceSet.ec2config.EBSVolumeType = "gp2"
+       }
+       return instanceSet, nil
+}
+
+func awsKeyFingerprint(pk ssh.PublicKey) (md5fp string, sha1fp string, err error) {
+       // AWS key fingerprints don't use the usual key fingerprint
+       // you get from ssh-keygen or ssh.FingerprintLegacyMD5()
+       // (you can get that from md5.Sum(pk.Marshal())
+       //
+       // AWS uses the md5 or sha1 of the PKIX DER encoding of the
+       // public key, so calculate those fingerprints here.
+       var rsaPub struct {
+               Name string
+               E    *big.Int
+               N    *big.Int
+       }
+       if err := ssh.Unmarshal(pk.Marshal(), &rsaPub); err != nil {
+               return "", "", fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
+       }
+       rsaPk := rsa.PublicKey{
+               E: int(rsaPub.E.Int64()),
+               N: rsaPub.N,
+       }
+       pkix, _ := x509.MarshalPKIXPublicKey(&rsaPk)
+       md5pkix := md5.Sum([]byte(pkix))
+       sha1pkix := sha1.Sum([]byte(pkix))
+       md5fp = ""
+       sha1fp = ""
+       for i := 0; i < len(md5pkix); i += 1 {
+               md5fp += fmt.Sprintf(":%02x", md5pkix[i])
+       }
+       for i := 0; i < len(sha1pkix); i += 1 {
+               sha1fp += fmt.Sprintf(":%02x", sha1pkix[i])
+       }
+       return md5fp[1:], sha1fp[1:], nil
+}
+
+func (instanceSet *ec2InstanceSet) Create(
+       instanceType arvados.InstanceType,
+       imageID cloud.ImageID,
+       newTags cloud.InstanceTags,
+       initCommand cloud.InitCommand,
+       publicKey ssh.PublicKey) (cloud.Instance, error) {
+
+       md5keyFingerprint, sha1keyFingerprint, err := awsKeyFingerprint(publicKey)
+       if err != nil {
+               return nil, fmt.Errorf("Could not make key fingerprint: %v", err)
+       }
+       instanceSet.keysMtx.Lock()
+       var keyname string
+       var ok bool
+       if keyname, ok = instanceSet.keys[md5keyFingerprint]; !ok {
+               keyout, err := instanceSet.client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{
+                       Filters: []*ec2.Filter{&ec2.Filter{
+                               Name:   aws.String("fingerprint"),
+                               Values: []*string{&md5keyFingerprint, &sha1keyFingerprint},
+                       }},
+               })
+               if err != nil {
+                       return nil, fmt.Errorf("Could not search for keypair: %v", err)
+               }
+
+               if len(keyout.KeyPairs) > 0 {
+                       keyname = *(keyout.KeyPairs[0].KeyName)
+               } else {
+                       keyname = "arvados-dispatch-keypair-" + md5keyFingerprint
+                       _, err := instanceSet.client.ImportKeyPair(&ec2.ImportKeyPairInput{
+                               KeyName:           &keyname,
+                               PublicKeyMaterial: ssh.MarshalAuthorizedKey(publicKey),
+                       })
+                       if err != nil {
+                               return nil, fmt.Errorf("Could not import keypair: %v", err)
+                       }
+               }
+               instanceSet.keys[md5keyFingerprint] = keyname
+       }
+       instanceSet.keysMtx.Unlock()
+
+       ec2tags := []*ec2.Tag{
+               &ec2.Tag{
+                       Key:   aws.String(arvadosDispatchID),
+                       Value: aws.String(string(instanceSet.dispatcherID)),
+               },
+               &ec2.Tag{
+                       Key:   aws.String("arvados-class"),
+                       Value: aws.String("dynamic-compute"),
+               },
+       }
+       for k, v := range newTags {
+               ec2tags = append(ec2tags, &ec2.Tag{
+                       Key:   aws.String(tagPrefix + k),
+                       Value: aws.String(v),
+               })
+       }
+
+       rii := ec2.RunInstancesInput{
+               ImageId:      aws.String(string(imageID)),
+               InstanceType: &instanceType.ProviderType,
+               MaxCount:     aws.Int64(1),
+               MinCount:     aws.Int64(1),
+               KeyName:      &keyname,
+
+               NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
+                       &ec2.InstanceNetworkInterfaceSpecification{
+                               AssociatePublicIpAddress: aws.Bool(false),
+                               DeleteOnTermination:      aws.Bool(true),
+                               DeviceIndex:              aws.Int64(0),
+                               Groups:                   aws.StringSlice(instanceSet.ec2config.SecurityGroupIDs),
+                               SubnetId:                 &instanceSet.ec2config.SubnetID,
+                       }},
+               DisableApiTermination:             aws.Bool(false),
+               InstanceInitiatedShutdownBehavior: aws.String("terminate"),
+               UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
+               TagSpecifications: []*ec2.TagSpecification{
+                       &ec2.TagSpecification{
+                               ResourceType: aws.String("instance"),
+                               Tags:         ec2tags,
+                       }},
+       }
+
+       if instanceType.AddedScratch > 0 {
+               rii.BlockDeviceMappings = []*ec2.BlockDeviceMapping{&ec2.BlockDeviceMapping{
+                       DeviceName: aws.String("/dev/xvdt"),
+                       Ebs: &ec2.EbsBlockDevice{
+                               DeleteOnTermination: aws.Bool(true),
+                               VolumeSize:          aws.Int64((int64(instanceType.AddedScratch) + (1<<30 - 1)) >> 30),
+                               VolumeType:          &instanceSet.ec2config.EBSVolumeType,
+                       }}}
+       }
+
+       if instanceType.Preemptible {
+               rii.InstanceMarketOptions = &ec2.InstanceMarketOptionsRequest{
+                       MarketType: aws.String("spot"),
+                       SpotOptions: &ec2.SpotMarketOptions{
+                               InstanceInterruptionBehavior: aws.String("terminate"),
+                               MaxPrice:                     aws.String(fmt.Sprintf("%v", instanceType.Price)),
+                       }}
+       }
+
+       rsv, err := instanceSet.client.RunInstances(&rii)
+
+       if err != nil {
+               return nil, err
+       }
+
+       return &ec2Instance{
+               provider: instanceSet,
+               instance: rsv.Instances[0],
+       }, nil
+}
+
+func (instanceSet *ec2InstanceSet) Instances(cloud.InstanceTags) (instances []cloud.Instance, err error) {
+       dii := &ec2.DescribeInstancesInput{
+               Filters: []*ec2.Filter{&ec2.Filter{
+                       Name:   aws.String("tag:" + arvadosDispatchID),
+                       Values: []*string{aws.String(string(instanceSet.dispatcherID))},
+               }}}
+
+       for {
+               dio, err := instanceSet.client.DescribeInstances(dii)
+               if err != nil {
+                       return nil, err
+               }
+
+               for _, rsv := range dio.Reservations {
+                       for _, inst := range rsv.Instances {
+                               if *inst.State.Name != "shutting-down" && *inst.State.Name != "terminated" {
+                                       instances = append(instances, &ec2Instance{instanceSet, inst})
+                               }
+                       }
+               }
+               if dio.NextToken == nil {
+                       return instances, err
+               }
+               dii.NextToken = dio.NextToken
+       }
+}
+
+func (az *ec2InstanceSet) Stop() {
+}
+
+type ec2Instance struct {
+       provider *ec2InstanceSet
+       instance *ec2.Instance
+}
+
+func (inst *ec2Instance) ID() cloud.InstanceID {
+       return cloud.InstanceID(*inst.instance.InstanceId)
+}
+
+func (inst *ec2Instance) String() string {
+       return *inst.instance.InstanceId
+}
+
+func (inst *ec2Instance) ProviderType() string {
+       return *inst.instance.InstanceType
+}
+
+func (inst *ec2Instance) SetTags(newTags cloud.InstanceTags) error {
+       ec2tags := []*ec2.Tag{
+               &ec2.Tag{
+                       Key:   aws.String(arvadosDispatchID),
+                       Value: aws.String(string(inst.provider.dispatcherID)),
+               },
+       }
+       for k, v := range newTags {
+               ec2tags = append(ec2tags, &ec2.Tag{
+                       Key:   aws.String(tagPrefix + k),
+                       Value: aws.String(v),
+               })
+       }
+
+       _, err := inst.provider.client.CreateTags(&ec2.CreateTagsInput{
+               Resources: []*string{inst.instance.InstanceId},
+               Tags:      ec2tags,
+       })
+
+       return err
+}
+
+func (inst *ec2Instance) Tags() cloud.InstanceTags {
+       tags := make(map[string]string)
+
+       for _, t := range inst.instance.Tags {
+               if strings.HasPrefix(*t.Key, tagPrefix) {
+                       tags[(*t.Key)[len(tagPrefix):]] = *t.Value
+               }
+       }
+
+       return tags
+}
+
+func (inst *ec2Instance) Destroy() error {
+       _, err := inst.provider.client.TerminateInstances(&ec2.TerminateInstancesInput{
+               InstanceIds: []*string{inst.instance.InstanceId},
+       })
+       return err
+}
+
+func (inst *ec2Instance) Address() string {
+       if inst.instance.PrivateIpAddress != nil {
+               return *inst.instance.PrivateIpAddress
+       } else {
+               return ""
+       }
+}
+
+func (inst *ec2Instance) RemoteUser() string {
+       return inst.provider.ec2config.AdminUsername
+}
+
+func (inst *ec2Instance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {
+       return cloud.ErrNotImplemented
+}
diff --git a/lib/cloud/ec2/ec2_test.go b/lib/cloud/ec2/ec2_test.go
new file mode 100644 (file)
index 0000000..50ba011
--- /dev/null
@@ -0,0 +1,247 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+//
+//
+// How to manually run individual tests against the real cloud:
+//
+// $ go test -v git.curoverse.com/arvados.git/lib/cloud/ec2 -live-ec2-cfg ec2config.yml -check.f=TestCreate
+//
+// Tests should be run individually and in the order they are listed in the file:
+//
+// Example azconfig.yml:
+//
+// ImageIDForTestSuite: ami-xxxxxxxxxxxxxxxxx
+// DriverParameters:
+//       AccessKeyID: XXXXXXXXXXXXXX
+//       SecretAccessKey: xxxxxxxxxxxxxxxxxxxx
+//       Region: us-east-1
+//       SecurityGroupIDs: [sg-xxxxxxxx]
+//       SubnetID: subnet-xxxxxxxx
+//       AdminUsername: crunch
+
+package ec2
+
+import (
+       "encoding/json"
+       "flag"
+       "testing"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/service/ec2"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+var live = flag.String("live-ec2-cfg", "", "Test with real EC2 API, provide config file")
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+type EC2InstanceSetSuite struct{}
+
+var _ = check.Suite(&EC2InstanceSetSuite{})
+
+type testConfig struct {
+       ImageIDForTestSuite string
+       DriverParameters    json.RawMessage
+}
+
+type ec2stub struct {
+}
+
+func (e *ec2stub) ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) {
+       return nil, nil
+}
+
+func (e *ec2stub) DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) {
+       return &ec2.DescribeKeyPairsOutput{}, nil
+}
+
+func (e *ec2stub) RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+       return &ec2.Reservation{Instances: []*ec2.Instance{&ec2.Instance{
+               InstanceId: aws.String("i-123"),
+               Tags:       input.TagSpecifications[0].Tags,
+       }}}, nil
+}
+
+func (e *ec2stub) DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
+       return &ec2.DescribeInstancesOutput{}, nil
+}
+
+func (e *ec2stub) CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
+       return nil, nil
+}
+
+func (e *ec2stub) TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) {
+       return nil, nil
+}
+
+func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
+       cluster := arvados.Cluster{
+               InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
+                       "tiny": arvados.InstanceType{
+                               Name:         "tiny",
+                               ProviderType: "t2.micro",
+                               VCPUs:        1,
+                               RAM:          4000000000,
+                               Scratch:      10000000000,
+                               Price:        .02,
+                               Preemptible:  false,
+                       },
+                       "tiny-with-extra-scratch": arvados.InstanceType{
+                               Name:         "tiny",
+                               ProviderType: "t2.micro",
+                               VCPUs:        1,
+                               RAM:          4000000000,
+                               Price:        .02,
+                               Preemptible:  false,
+                               AddedScratch: 20000000000,
+                       },
+                       "tiny-preemptible": arvados.InstanceType{
+                               Name:         "tiny",
+                               ProviderType: "t2.micro",
+                               VCPUs:        1,
+                               RAM:          4000000000,
+                               Scratch:      10000000000,
+                               Price:        .02,
+                               Preemptible:  true,
+                       },
+               })}
+       if *live != "" {
+               var exampleCfg testConfig
+               err := config.LoadFile(&exampleCfg, *live)
+               if err != nil {
+                       return nil, cloud.ImageID(""), cluster, err
+               }
+
+               ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+               return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
+       }
+       ap := ec2InstanceSet{
+               ec2config:    ec2InstanceSetConfig{},
+               dispatcherID: "test123",
+               logger:       logrus.StandardLogger(),
+               client:       &ec2stub{},
+               keys:         make(map[string]string),
+       }
+       return &ap, cloud.ImageID("blob"), cluster, nil
+}
+
+func (*EC2InstanceSetSuite) TestCreate(c *check.C) {
+       ap, img, cluster, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+       c.Assert(err, check.IsNil)
+
+       inst, err := ap.Create(cluster.InstanceTypes["tiny"],
+               img, map[string]string{
+                       "TestTagName": "test tag value",
+               }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+       c.Assert(err, check.IsNil)
+
+       tags := inst.Tags()
+       c.Check(tags["TestTagName"], check.Equals, "test tag value")
+       c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*EC2InstanceSetSuite) TestCreateWithExtraScratch(c *check.C) {
+       ap, img, cluster, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+       c.Assert(err, check.IsNil)
+
+       inst, err := ap.Create(cluster.InstanceTypes["tiny-with-extra-scratch"],
+               img, map[string]string{
+                       "TestTagName": "test tag value",
+               }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+       c.Assert(err, check.IsNil)
+
+       tags := inst.Tags()
+       c.Check(tags["TestTagName"], check.Equals, "test tag value")
+       c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*EC2InstanceSetSuite) TestCreatePreemptible(c *check.C) {
+       ap, img, cluster, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+       c.Assert(err, check.IsNil)
+
+       inst, err := ap.Create(cluster.InstanceTypes["tiny-preemptible"],
+               img, map[string]string{
+                       "TestTagName": "test tag value",
+               }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+       c.Assert(err, check.IsNil)
+
+       tags := inst.Tags()
+       c.Check(tags["TestTagName"], check.Equals, "test tag value")
+       c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       l, err := ap.Instances(nil)
+       c.Assert(err, check.IsNil)
+
+       for _, i := range l {
+               tg := i.Tags()
+               tg["TestTag2"] = "123 test tag 2"
+               c.Check(i.SetTags(tg), check.IsNil)
+       }
+}
+
+func (*EC2InstanceSetSuite) TestListInstances(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider: ", err)
+       }
+
+       l, err := ap.Instances(nil)
+
+       c.Assert(err, check.IsNil)
+
+       for _, i := range l {
+               tg := i.Tags()
+               c.Logf("%v %v %v", i.String(), i.Address(), tg)
+       }
+}
+
+func (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {
+       ap, _, _, err := GetInstanceSet()
+       if err != nil {
+               c.Fatal("Error making provider", err)
+       }
+
+       l, err := ap.Instances(nil)
+       c.Assert(err, check.IsNil)
+
+       for _, i := range l {
+               c.Check(i.Destroy(), check.IsNil)
+       }
+}
diff --git a/lib/cloud/interfaces.go b/lib/cloud/interfaces.go
new file mode 100644 (file)
index 0000000..792e737
--- /dev/null
@@ -0,0 +1,198 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloud
+
+import (
+       "encoding/json"
+       "errors"
+       "io"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+)
+
+// A RateLimitError should be returned by an InstanceSet when the
+// cloud service indicates it is rejecting all API calls for some time
+// interval.
+type RateLimitError interface {
+       // Time before which the caller should expect requests to
+       // fail.
+       EarliestRetry() time.Time
+       error
+}
+
+// A QuotaError should be returned by an InstanceSet when the cloud
+// service indicates the account cannot create more VMs than already
+// exist.
+type QuotaError interface {
+       // If true, don't create more instances until some existing
+       // instances are destroyed. If false, don't handle the error
+       // as a quota error.
+       IsQuotaError() bool
+       error
+}
+
+type InstanceSetID string
+type InstanceTags map[string]string
+type InstanceID string
+type ImageID string
+
+// An Executor executes commands on an ExecutorTarget.
+type Executor interface {
+       // Update the set of private keys used to authenticate to
+       // targets.
+       SetSigners(...ssh.Signer)
+
+       // Set the target used for subsequent command executions.
+       SetTarget(ExecutorTarget)
+
+       // Return the current target.
+       Target() ExecutorTarget
+
+       // Execute a shell command and return the resulting stdout and
+       // stderr. stdin can be nil.
+       Execute(cmd string, stdin io.Reader) (stdout, stderr []byte, err error)
+}
+
+var ErrNotImplemented = errors.New("not implemented")
+
+// An ExecutorTarget is a remote command execution service.
+type ExecutorTarget interface {
+       // SSH server hostname or IP address, or empty string if
+       // unknown while instance is booting.
+       Address() string
+
+       // Remote username to send during SSH authentication.
+       RemoteUser() string
+
+       // Return nil if the given public key matches the instance's
+       // SSH server key. If the provided Dialer is not nil,
+       // VerifyHostKey can use it to make outgoing network
+       // connections from the instance -- e.g., to use the cloud's
+       // "this instance's metadata" API.
+       //
+       // Return ErrNotImplemented if no verification mechanism is
+       // available.
+       VerifyHostKey(ssh.PublicKey, *ssh.Client) error
+}
+
+// Instance is implemented by the provider-specific instance types.
+type Instance interface {
+       ExecutorTarget
+
+       // ID returns the provider's instance ID. It must be stable
+       // for the life of the instance.
+       ID() InstanceID
+
+       // String typically returns the cloud-provided instance ID.
+       String() string
+
+       // Cloud provider's "instance type" ID. Matches a ProviderType
+       // in the cluster's InstanceTypes configuration.
+       ProviderType() string
+
+       // Get current tags
+       Tags() InstanceTags
+
+       // Replace tags with the given tags
+       SetTags(InstanceTags) error
+
+       // Shut down the node
+       Destroy() error
+}
+
+// An InstanceSet manages a set of VM instances created by an elastic
+// cloud provider like AWS, GCE, or Azure.
+//
+// All public methods of an InstanceSet, and all public methods of the
+// instances it returns, are goroutine safe.
+type InstanceSet interface {
+       // Create a new instance with the given type, image, and
+       // initial set of tags. If supported by the driver, add the
+       // provided public key to /root/.ssh/authorized_keys.
+       //
+       // The given InitCommand should be executed on the newly
+       // created instance. This is optional for a driver whose
+       // instances' VerifyHostKey() method never returns
+       // ErrNotImplemented. InitCommand will be under 1 KiB.
+       //
+       // The returned error should implement RateLimitError and
+       // QuotaError where applicable.
+       Create(arvados.InstanceType, ImageID, InstanceTags, InitCommand, ssh.PublicKey) (Instance, error)
+
+       // Return all instances, including ones that are booting or
+       // shutting down. Optionally, filter out nodes that don't have
+       // all of the given InstanceTags (the caller will ignore these
+       // anyway).
+       //
+       // An instance returned by successive calls to Instances() may
+       // -- but does not need to -- be represented by the same
+       // Instance object each time. Thus, the caller is responsible
+       // for de-duplicating the returned instances by comparing the
+       // InstanceIDs returned by the instances' ID() methods.
+       Instances(InstanceTags) ([]Instance, error)
+
+       // Stop any background tasks and release other resources.
+       Stop()
+}
+
+type InitCommand string
+
+// A Driver returns an InstanceSet that uses the given InstanceSetID
+// and driver-dependent configuration parameters.
+//
+// The supplied id will be of the form "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+// where each z can be any alphanum. The returned InstanceSet must use
+// this id to tag long-lived cloud resources that it creates, and must
+// assume control of any existing resources that are tagged with the
+// same id. Tagging can be accomplished by including the ID in
+// resource names, using the cloud provider's tagging feature, or any
+// other mechanism. The tags must be visible to another instance of
+// the same driver running on a different host.
+//
+// The returned InstanceSet must ignore existing resources that are
+// visible but not tagged with the given id, except that it should log
+// a summary of such resources -- only once -- when it starts
+// up. Thus, two identically configured InstanceSets running on
+// different hosts with different ids should log about the existence
+// of each other's resources at startup, but will not interfere with
+// each other.
+//
+// Example:
+//
+//     type exampleInstanceSet struct {
+//             ownID     string
+//             AccessKey string
+//     }
+//
+//     type exampleDriver struct {}
+//
+//     func (*exampleDriver) InstanceSet(config json.RawMessage, id InstanceSetID) (InstanceSet, error) {
+//             var is exampleInstanceSet
+//             if err := json.Unmarshal(config, &is); err != nil {
+//                     return nil, err
+//             }
+//             is.ownID = id
+//             return &is, nil
+//     }
+//
+//     var _ = registerCloudDriver("example", &exampleDriver{})
+type Driver interface {
+       InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+}
+
+// DriverFunc makes a Driver using the provided function as its
+// InstanceSet method. This is similar to http.HandlerFunc.
+func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
+       return driverFunc(fn)
+}
+
+type driverFunc func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+
+func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error) {
+       return df(config, id, logger)
+}
diff --git a/lib/cmd/cmd.go b/lib/cmd/cmd.go
new file mode 100644 (file)
index 0000000..9292ef7
--- /dev/null
@@ -0,0 +1,132 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// package cmd helps define reusable functions that can be exposed as
+// [subcommands of] command line programs.
+package cmd
+
+import (
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "path/filepath"
+       "regexp"
+       "runtime"
+       "sort"
+       "strings"
+)
+
+type Handler interface {
+       RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int
+}
+
+type HandlerFunc func(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int
+
+func (f HandlerFunc) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       return f(prog, args, stdin, stdout, stderr)
+}
+
+type Version string
+
+func (v Version) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       prog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, "")
+       fmt.Fprintf(stdout, "%s %s (%s)\n", prog, v, runtime.Version())
+       return 0
+}
+
+// Multi is a Handler that looks up its first argument in a map (after
+// stripping any "arvados-" or "crunch-" prefix), and invokes the
+// resulting Handler with the remaining args.
+//
+// Example:
+//
+//     os.Exit(Multi(map[string]Handler{
+//             "foobar": HandlerFunc(func(prog string, args []string) int {
+//                     fmt.Println(args[0])
+//                     return 2
+//             }),
+//     })("/usr/bin/multi", []string{"foobar", "baz"}))
+//
+// ...prints "baz" and exits 2.
+type Multi map[string]Handler
+
+func (m Multi) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       _, basename := filepath.Split(prog)
+       basename = strings.TrimPrefix(basename, "arvados-")
+       basename = strings.TrimPrefix(basename, "crunch-")
+       if cmd, ok := m[basename]; ok {
+               return cmd.RunCommand(prog, args, stdin, stdout, stderr)
+       } else if len(args) < 1 {
+               fmt.Fprintf(stderr, "usage: %s command [args]\n", prog)
+               m.Usage(stderr)
+               return 2
+       } else if cmd, ok = m[args[0]]; ok {
+               return cmd.RunCommand(prog+" "+args[0], args[1:], stdin, stdout, stderr)
+       } else {
+               fmt.Fprintf(stderr, "%s: unrecognized command %q\n", prog, args[0])
+               m.Usage(stderr)
+               return 2
+       }
+}
+
+func (m Multi) Usage(stderr io.Writer) {
+       fmt.Fprintf(stderr, "\nAvailable commands:\n")
+       m.listSubcommands(stderr, "")
+}
+
+func (m Multi) listSubcommands(out io.Writer, prefix string) {
+       var subcommands []string
+       for sc := range m {
+               if strings.HasPrefix(sc, "-") {
+                       // Some subcommands have alternate versions
+                       // like "--version" for compatibility. Don't
+                       // clutter the subcommand summary with those.
+                       continue
+               }
+               subcommands = append(subcommands, sc)
+       }
+       sort.Strings(subcommands)
+       for _, sc := range subcommands {
+               switch cmd := m[sc].(type) {
+               case Multi:
+                       cmd.listSubcommands(out, prefix+sc+" ")
+               default:
+                       fmt.Fprintf(out, "    %s%s\n", prefix, sc)
+               }
+       }
+}
+
+type FlagSet interface {
+       Init(string, flag.ErrorHandling)
+       Args() []string
+       NArg() int
+       Parse([]string) error
+       SetOutput(io.Writer)
+       PrintDefaults()
+}
+
+// SubcommandToFront silently parses args using flagset, and returns a
+// copy of args with the first non-flag argument moved to the
+// front. If parsing fails or consumes all of args, args is returned
+// unchanged.
+//
+// SubcommandToFront invokes methods on flagset that have side
+// effects, including Parse. In typical usage, flagset will not used
+// for anything else after being passed to SubcommandToFront.
+func SubcommandToFront(args []string, flagset FlagSet) []string {
+       flagset.Init("", flag.ContinueOnError)
+       flagset.SetOutput(ioutil.Discard)
+       if err := flagset.Parse(args); err != nil || flagset.NArg() == 0 {
+               // No subcommand found.
+               return args
+       }
+       // Move subcommand to the front.
+       flagargs := len(args) - flagset.NArg()
+       newargs := make([]string, len(args))
+       newargs[0] = args[flagargs]
+       copy(newargs[1:flagargs+1], args[:flagargs])
+       copy(newargs[flagargs+1:], args[flagargs+1:])
+       return newargs
+}
diff --git a/lib/cmd/cmd_test.go b/lib/cmd/cmd_test.go
new file mode 100644 (file)
index 0000000..2fc5098
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package cmd
+
+import (
+       "bytes"
+       "flag"
+       "fmt"
+       "io"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/lib/cmdtest"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&CmdSuite{})
+
+type CmdSuite struct{}
+
+var testCmd = Multi(map[string]Handler{
+       "echo": HandlerFunc(func(prog string, args []string, stdin io.Reader, stdout io.Writer, stderr io.Writer) int {
+               fmt.Fprintln(stdout, strings.Join(args, " "))
+               return 0
+       }),
+})
+
+func (s *CmdSuite) TestHello(c *check.C) {
+       defer cmdtest.LeakCheck(c)()
+       stdout := bytes.NewBuffer(nil)
+       stderr := bytes.NewBuffer(nil)
+       exited := testCmd.RunCommand("prog", []string{"echo", "hello", "world"}, bytes.NewReader(nil), stdout, stderr)
+       c.Check(exited, check.Equals, 0)
+       c.Check(stdout.String(), check.Equals, "hello world\n")
+       c.Check(stderr.String(), check.Equals, "")
+}
+
+func (s *CmdSuite) TestHelloViaProg(c *check.C) {
+       defer cmdtest.LeakCheck(c)()
+       stdout := bytes.NewBuffer(nil)
+       stderr := bytes.NewBuffer(nil)
+       exited := testCmd.RunCommand("/usr/local/bin/echo", []string{"hello", "world"}, bytes.NewReader(nil), stdout, stderr)
+       c.Check(exited, check.Equals, 0)
+       c.Check(stdout.String(), check.Equals, "hello world\n")
+       c.Check(stderr.String(), check.Equals, "")
+}
+
+func (s *CmdSuite) TestUsage(c *check.C) {
+       defer cmdtest.LeakCheck(c)()
+       stdout := bytes.NewBuffer(nil)
+       stderr := bytes.NewBuffer(nil)
+       exited := testCmd.RunCommand("prog", []string{"nosuchcommand", "hi"}, bytes.NewReader(nil), stdout, stderr)
+       c.Check(exited, check.Equals, 2)
+       c.Check(stdout.String(), check.Equals, "")
+       c.Check(stderr.String(), check.Matches, `(?ms)^prog: unrecognized command "nosuchcommand"\n.*echo.*\n`)
+}
+
+func (s *CmdSuite) TestSubcommandToFront(c *check.C) {
+       defer cmdtest.LeakCheck(c)()
+       flags := flag.NewFlagSet("", flag.ContinueOnError)
+       flags.String("format", "json", "")
+       flags.Bool("n", false, "")
+       args := SubcommandToFront([]string{"--format=yaml", "-n", "-format", "beep", "echo", "hi"}, flags)
+       c.Check(args, check.DeepEquals, []string{"echo", "--format=yaml", "-n", "-format", "beep", "hi"})
+}
diff --git a/lib/cmdtest/leakcheck.go b/lib/cmdtest/leakcheck.go
new file mode 100644 (file)
index 0000000..c132f1b
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Package cmdtest provides tools for testing command line tools.
+package cmdtest
+
+import (
+       "io"
+       "io/ioutil"
+       "os"
+
+       check "gopkg.in/check.v1"
+)
+
+// LeakCheck tests for output being leaked to os.Stdout and os.Stderr
+// that should be sent elsewhere (e.g., the stdout and stderr streams
+// passed to a cmd.RunFunc).
+//
+// It redirects os.Stdout and os.Stderr to a tempfile, and returns a
+// func, which the caller is expected to defer, that restores os.* and
+// checks that the tempfile is empty.
+//
+// Example:
+//
+//     func (s *Suite) TestSomething(c *check.C) {
+//             defer cmdtest.LeakCheck(c)()
+//             // ... do things that shouldn't print to os.Stderr or os.Stdout
+//     }
+func LeakCheck(c *check.C) func() {
+       tmpfiles := map[string]*os.File{"stdout": nil, "stderr": nil}
+       for i := range tmpfiles {
+               var err error
+               tmpfiles[i], err = ioutil.TempFile("", "")
+               c.Assert(err, check.IsNil)
+               err = os.Remove(tmpfiles[i].Name())
+               c.Assert(err, check.IsNil)
+       }
+
+       stdout, stderr := os.Stdout, os.Stderr
+       os.Stdout, os.Stderr = tmpfiles["stdout"], tmpfiles["stderr"]
+       return func() {
+               os.Stdout, os.Stderr = stdout, stderr
+
+               for i, tmpfile := range tmpfiles {
+                       c.Log("checking %s", i)
+                       _, err := tmpfile.Seek(0, io.SeekStart)
+                       c.Assert(err, check.IsNil)
+                       leaked, err := ioutil.ReadAll(tmpfile)
+                       c.Assert(err, check.IsNil)
+                       c.Check(string(leaked), check.Equals, "")
+               }
+       }
+}
diff --git a/lib/controller/cmd.go b/lib/controller/cmd.go
new file mode 100644 (file)
index 0000000..c1d4657
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "context"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/service"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
+
+func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile) service.Handler {
+       return &Handler{Cluster: cluster, NodeProfile: np}
+}
diff --git a/lib/controller/fed_collections.go b/lib/controller/fed_collections.go
new file mode 100644 (file)
index 0000000..ab49e39
--- /dev/null
@@ -0,0 +1,314 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bufio"
+       "bytes"
+       "context"
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "strings"
+       "sync"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+func rewriteSignatures(clusterID string, expectHash string,
+       resp *http.Response, requestError error) (newResponse *http.Response, err error) {
+
+       if requestError != nil {
+               return resp, requestError
+       }
+
+       if resp.StatusCode != http.StatusOK {
+               return resp, nil
+       }
+
+       originalBody := resp.Body
+       defer originalBody.Close()
+
+       var col arvados.Collection
+       err = json.NewDecoder(resp.Body).Decode(&col)
+       if err != nil {
+               return nil, err
+       }
+
+       // rewriting signatures will make manifest text 5-10% bigger so calculate
+       // capacity accordingly
+       updatedManifest := bytes.NewBuffer(make([]byte, 0, int(float64(len(col.ManifestText))*1.1)))
+
+       hasher := md5.New()
+       mw := io.MultiWriter(hasher, updatedManifest)
+       sz := 0
+
+       scanner := bufio.NewScanner(strings.NewReader(col.ManifestText))
+       scanner.Buffer(make([]byte, 1048576), len(col.ManifestText))
+       for scanner.Scan() {
+               line := scanner.Text()
+               tokens := strings.Split(line, " ")
+               if len(tokens) < 3 {
+                       return nil, fmt.Errorf("Invalid stream (<3 tokens): %q", line)
+               }
+
+               n, err := mw.Write([]byte(tokens[0]))
+               if err != nil {
+                       return nil, fmt.Errorf("Error updating manifest: %v", err)
+               }
+               sz += n
+               for _, token := range tokens[1:] {
+                       n, err = mw.Write([]byte(" "))
+                       if err != nil {
+                               return nil, fmt.Errorf("Error updating manifest: %v", err)
+                       }
+                       sz += n
+
+                       m := keepclient.SignedLocatorRe.FindStringSubmatch(token)
+                       if m != nil {
+                               // Rewrite the block signature to be a remote signature
+                               _, err = fmt.Fprintf(updatedManifest, "%s%s%s+R%s-%s%s", m[1], m[2], m[3], clusterID, m[5][2:], m[8])
+                               if err != nil {
+                                       return nil, fmt.Errorf("Error updating manifest: %v", err)
+                               }
+
+                               // for hash checking, ignore signatures
+                               n, err = fmt.Fprintf(hasher, "%s%s", m[1], m[2])
+                               if err != nil {
+                                       return nil, fmt.Errorf("Error updating manifest: %v", err)
+                               }
+                               sz += n
+                       } else {
+                               n, err = mw.Write([]byte(token))
+                               if err != nil {
+                                       return nil, fmt.Errorf("Error updating manifest: %v", err)
+                               }
+                               sz += n
+                       }
+               }
+               n, err = mw.Write([]byte("\n"))
+               if err != nil {
+                       return nil, fmt.Errorf("Error updating manifest: %v", err)
+               }
+               sz += n
+       }
+
+       // Check that expected hash is consistent with
+       // portable_data_hash field of the returned record
+       if expectHash == "" {
+               expectHash = col.PortableDataHash
+       } else if expectHash != col.PortableDataHash {
+               return nil, fmt.Errorf("portable_data_hash %q on returned record did not match expected hash %q ", expectHash, col.PortableDataHash)
+       }
+
+       // Certify that the computed hash of the manifest_text matches our expectation
+       sum := hasher.Sum(nil)
+       computedHash := fmt.Sprintf("%x+%v", sum, sz)
+       if computedHash != expectHash {
+               return nil, fmt.Errorf("Computed manifest_text hash %q did not match expected hash %q", computedHash, expectHash)
+       }
+
+       col.ManifestText = updatedManifest.String()
+
+       newbody, err := json.Marshal(col)
+       if err != nil {
+               return nil, err
+       }
+
+       buf := bytes.NewBuffer(newbody)
+       resp.Body = ioutil.NopCloser(buf)
+       resp.ContentLength = int64(buf.Len())
+       resp.Header.Set("Content-Length", fmt.Sprintf("%v", buf.Len()))
+
+       return resp, nil
+}
+
+func filterLocalClusterResponse(resp *http.Response, requestError error) (newResponse *http.Response, err error) {
+       if requestError != nil {
+               return resp, requestError
+       }
+
+       if resp.StatusCode == http.StatusNotFound {
+               // Suppress returning this result, because we want to
+               // search the federation.
+               return nil, nil
+       }
+       return resp, nil
+}
+
+type searchRemoteClusterForPDH struct {
+       pdh           string
+       remoteID      string
+       mtx           *sync.Mutex
+       sentResponse  *bool
+       sharedContext *context.Context
+       cancelFunc    func()
+       errors        *[]string
+       statusCode    *int
+}
+
+func fetchRemoteCollectionByUUID(
+       h *genericFederatedRequestHandler,
+       effectiveMethod string,
+       clusterId *string,
+       uuid string,
+       remainder string,
+       w http.ResponseWriter,
+       req *http.Request) bool {
+
+       if effectiveMethod != "GET" {
+               // Only handle GET requests right now
+               return false
+       }
+
+       if uuid != "" {
+               // Collection UUID GET request
+               *clusterId = uuid[0:5]
+               if *clusterId != "" && *clusterId != h.handler.Cluster.ClusterID {
+                       // request for remote collection by uuid
+                       resp, err := h.handler.remoteClusterRequest(*clusterId, req)
+                       newResponse, err := rewriteSignatures(*clusterId, "", resp, err)
+                       h.handler.proxy.ForwardResponse(w, newResponse, err)
+                       return true
+               }
+       }
+
+       return false
+}
+
+func fetchRemoteCollectionByPDH(
+       h *genericFederatedRequestHandler,
+       effectiveMethod string,
+       clusterId *string,
+       uuid string,
+       remainder string,
+       w http.ResponseWriter,
+       req *http.Request) bool {
+
+       if effectiveMethod != "GET" {
+               // Only handle GET requests right now
+               return false
+       }
+
+       m := collectionsByPDHRe.FindStringSubmatch(req.URL.Path)
+       if len(m) != 2 {
+               return false
+       }
+
+       // Request for collection by PDH.  Search the federation.
+
+       // First, query the local cluster.
+       resp, err := h.handler.localClusterRequest(req)
+       newResp, err := filterLocalClusterResponse(resp, err)
+       if newResp != nil || err != nil {
+               h.handler.proxy.ForwardResponse(w, newResp, err)
+               return true
+       }
+
+       // Create a goroutine for each cluster in the
+       // RemoteClusters map.  The first valid result gets
+       // returned to the client.  When that happens, all
+       // other outstanding requests are cancelled
+       sharedContext, cancelFunc := context.WithCancel(req.Context())
+       req = req.WithContext(sharedContext)
+       wg := sync.WaitGroup{}
+       pdh := m[1]
+       success := make(chan *http.Response)
+       errorChan := make(chan error, len(h.handler.Cluster.RemoteClusters))
+
+       // use channel as a semaphore to limit the number of concurrent
+       // requests at a time
+       sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
+
+       defer cancelFunc()
+
+       for remoteID := range h.handler.Cluster.RemoteClusters {
+               if remoteID == h.handler.Cluster.ClusterID {
+                       // No need to query local cluster again
+                       continue
+               }
+
+               wg.Add(1)
+               go func(remote string) {
+                       defer wg.Done()
+                       // blocks until it can put a value into the
+                       // channel (which has a max queue capacity)
+                       sem <- true
+                       select {
+                       case <-sharedContext.Done():
+                               return
+                       default:
+                       }
+
+                       resp, err := h.handler.remoteClusterRequest(remote, req)
+                       wasSuccess := false
+                       defer func() {
+                               if resp != nil && !wasSuccess {
+                                       resp.Body.Close()
+                               }
+                       }()
+                       if err != nil {
+                               errorChan <- err
+                               return
+                       }
+                       if resp.StatusCode != http.StatusOK {
+                               errorChan <- HTTPError{resp.Status, resp.StatusCode}
+                               return
+                       }
+                       select {
+                       case <-sharedContext.Done():
+                               return
+                       default:
+                       }
+
+                       newResponse, err := rewriteSignatures(remote, pdh, resp, nil)
+                       if err != nil {
+                               errorChan <- err
+                               return
+                       }
+                       select {
+                       case <-sharedContext.Done():
+                       case success <- newResponse:
+                               wasSuccess = true
+                       }
+                       <-sem
+               }(remoteID)
+       }
+       go func() {
+               wg.Wait()
+               cancelFunc()
+       }()
+
+       errorCode := http.StatusNotFound
+
+       for {
+               select {
+               case newResp = <-success:
+                       h.handler.proxy.ForwardResponse(w, newResp, nil)
+                       return true
+               case <-sharedContext.Done():
+                       var errors []string
+                       for len(errorChan) > 0 {
+                               err := <-errorChan
+                               if httperr, ok := err.(HTTPError); ok {
+                                       if httperr.Code != http.StatusNotFound {
+                                               errorCode = http.StatusBadGateway
+                                       }
+                               }
+                               errors = append(errors, err.Error())
+                       }
+                       httpserver.Errors(w, errors, errorCode)
+                       return true
+               }
+       }
+
+       // shouldn't ever get here
+       return true
+}
diff --git a/lib/controller/fed_containers.go b/lib/controller/fed_containers.go
new file mode 100644 (file)
index 0000000..7fd5b25
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "io/ioutil"
+       "net/http"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+func remoteContainerRequestCreate(
+       h *genericFederatedRequestHandler,
+       effectiveMethod string,
+       clusterId *string,
+       uuid string,
+       remainder string,
+       w http.ResponseWriter,
+       req *http.Request) bool {
+
+       if effectiveMethod != "POST" || uuid != "" || remainder != "" {
+               return false
+       }
+
+       // First make sure supplied token is valid.
+       creds := auth.NewCredentials()
+       creds.LoadTokensFromHTTPRequest(req)
+
+       currentUser, err := h.handler.validateAPItoken(req, creds.Tokens[0])
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusForbidden)
+               return true
+       }
+
+       if *clusterId == "" {
+               *clusterId = h.handler.Cluster.ClusterID
+       }
+
+       if strings.HasPrefix(currentUser.Authorization.UUID, h.handler.Cluster.ClusterID) &&
+               *clusterId == h.handler.Cluster.ClusterID {
+               // local user submitting container request to local cluster
+               return false
+       }
+
+       if req.Header.Get("Content-Type") != "application/json" {
+               httpserver.Error(w, "Expected Content-Type: application/json, got "+req.Header.Get("Content-Type"), http.StatusBadRequest)
+               return true
+       }
+
+       originalBody := req.Body
+       defer originalBody.Close()
+       var request map[string]interface{}
+       err = json.NewDecoder(req.Body).Decode(&request)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusBadRequest)
+               return true
+       }
+
+       crString, ok := request["container_request"].(string)
+       if ok {
+               var crJson map[string]interface{}
+               err := json.Unmarshal([]byte(crString), &crJson)
+               if err != nil {
+                       httpserver.Error(w, err.Error(), http.StatusBadRequest)
+                       return true
+               }
+
+               request["container_request"] = crJson
+       }
+
+       containerRequest, ok := request["container_request"].(map[string]interface{})
+       if !ok {
+               // Use toplevel object as the container_request object
+               containerRequest = request
+       }
+
+       // If runtime_token is not set, create a new token
+       if _, ok := containerRequest["runtime_token"]; !ok {
+               if len(currentUser.Authorization.Scopes) != 1 || currentUser.Authorization.Scopes[0] != "all" {
+                       httpserver.Error(w, "Token scope is not [all]", http.StatusForbidden)
+                       return true
+               }
+
+               if strings.HasPrefix(currentUser.Authorization.UUID, h.handler.Cluster.ClusterID) {
+                       // Local user, submitting to a remote cluster.
+                       // Create a new time-limited token.
+                       newtok, err := h.handler.createAPItoken(req, currentUser.UUID, nil)
+                       if err != nil {
+                               httpserver.Error(w, err.Error(), http.StatusForbidden)
+                               return true
+                       }
+                       containerRequest["runtime_token"] = newtok.TokenV2()
+               } else {
+                       // Remote user. Container request will use the
+                       // current token, minus the trailing portion
+                       // (optional container uuid).
+                       sp := strings.Split(creds.Tokens[0], "/")
+                       if len(sp) >= 3 {
+                               containerRequest["runtime_token"] = strings.Join(sp[0:3], "/")
+                       } else {
+                               containerRequest["runtime_token"] = creds.Tokens[0]
+                       }
+               }
+       }
+
+       newbody, err := json.Marshal(request)
+       buf := bytes.NewBuffer(newbody)
+       req.Body = ioutil.NopCloser(buf)
+       req.ContentLength = int64(buf.Len())
+       req.Header.Set("Content-Length", fmt.Sprintf("%v", buf.Len()))
+
+       resp, err := h.handler.remoteClusterRequest(*clusterId, req)
+       h.handler.proxy.ForwardResponse(w, resp, err)
+       return true
+}
diff --git a/lib/controller/fed_generic.go b/lib/controller/fed_generic.go
new file mode 100644 (file)
index 0000000..9c8b161
--- /dev/null
@@ -0,0 +1,352 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "regexp"
+       "sync"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+type federatedRequestDelegate func(
+       h *genericFederatedRequestHandler,
+       effectiveMethod string,
+       clusterId *string,
+       uuid string,
+       remainder string,
+       w http.ResponseWriter,
+       req *http.Request) bool
+
+type genericFederatedRequestHandler struct {
+       next      http.Handler
+       handler   *Handler
+       matcher   *regexp.Regexp
+       delegates []federatedRequestDelegate
+}
+
+func (h *genericFederatedRequestHandler) remoteQueryUUIDs(w http.ResponseWriter,
+       req *http.Request,
+       clusterID string, uuids []string) (rp []map[string]interface{}, kind string, err error) {
+
+       found := make(map[string]bool)
+       prev_len_uuids := len(uuids) + 1
+       // Loop while
+       // (1) there are more uuids to query
+       // (2) we're making progress - on each iteration the set of
+       // uuids we are expecting for must shrink.
+       for len(uuids) > 0 && len(uuids) < prev_len_uuids {
+               var remoteReq http.Request
+               remoteReq.Header = req.Header
+               remoteReq.Method = "POST"
+               remoteReq.URL = &url.URL{Path: req.URL.Path}
+               remoteParams := make(url.Values)
+               remoteParams.Set("_method", "GET")
+               remoteParams.Set("count", "none")
+               if req.Form.Get("select") != "" {
+                       remoteParams.Set("select", req.Form.Get("select"))
+               }
+               content, err := json.Marshal(uuids)
+               if err != nil {
+                       return nil, "", err
+               }
+               remoteParams["filters"] = []string{fmt.Sprintf(`[["uuid", "in", %s]]`, content)}
+               enc := remoteParams.Encode()
+               remoteReq.Body = ioutil.NopCloser(bytes.NewBufferString(enc))
+
+               rc := multiClusterQueryResponseCollector{clusterID: clusterID}
+
+               var resp *http.Response
+               if clusterID == h.handler.Cluster.ClusterID {
+                       resp, err = h.handler.localClusterRequest(&remoteReq)
+               } else {
+                       resp, err = h.handler.remoteClusterRequest(clusterID, &remoteReq)
+               }
+               rc.collectResponse(resp, err)
+
+               if rc.error != nil {
+                       return nil, "", rc.error
+               }
+
+               kind = rc.kind
+
+               if len(rc.responses) == 0 {
+                       // We got zero responses, no point in doing
+                       // another query.
+                       return rp, kind, nil
+               }
+
+               rp = append(rp, rc.responses...)
+
+               // Go through the responses and determine what was
+               // returned.  If there are remaining items, loop
+               // around and do another request with just the
+               // stragglers.
+               for _, i := range rc.responses {
+                       uuid, ok := i["uuid"].(string)
+                       if ok {
+                               found[uuid] = true
+                       }
+               }
+
+               l := []string{}
+               for _, u := range uuids {
+                       if !found[u] {
+                               l = append(l, u)
+                       }
+               }
+               prev_len_uuids = len(uuids)
+               uuids = l
+       }
+
+       return rp, kind, nil
+}
+
+func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.ResponseWriter,
+       req *http.Request, clusterId *string) bool {
+
+       var filters [][]interface{}
+       err := json.Unmarshal([]byte(req.Form.Get("filters")), &filters)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusBadRequest)
+               return true
+       }
+
+       // Split the list of uuids by prefix
+       queryClusters := make(map[string][]string)
+       expectCount := 0
+       for _, filter := range filters {
+               if len(filter) != 3 {
+                       return false
+               }
+
+               if lhs, ok := filter[0].(string); !ok || lhs != "uuid" {
+                       return false
+               }
+
+               op, ok := filter[1].(string)
+               if !ok {
+                       return false
+               }
+
+               if op == "in" {
+                       if rhs, ok := filter[2].([]interface{}); ok {
+                               for _, i := range rhs {
+                                       if u, ok := i.(string); ok && len(u) == 27 {
+                                               *clusterId = u[0:5]
+                                               queryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)
+                                               expectCount += 1
+                                       }
+                               }
+                       }
+               } else if op == "=" {
+                       if u, ok := filter[2].(string); ok && len(u) == 27 {
+                               *clusterId = u[0:5]
+                               queryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)
+                               expectCount += 1
+                       }
+               } else {
+                       return false
+               }
+
+       }
+
+       if len(queryClusters) <= 1 {
+               // Query does not search for uuids across multiple
+               // clusters.
+               return false
+       }
+
+       // Validations
+       count := req.Form.Get("count")
+       if count != "" && count != `none` && count != `"none"` {
+               httpserver.Error(w, "Federated multi-object query must have 'count=none'", http.StatusBadRequest)
+               return true
+       }
+       if req.Form.Get("limit") != "" || req.Form.Get("offset") != "" || req.Form.Get("order") != "" {
+               httpserver.Error(w, "Federated multi-object may not provide 'limit', 'offset' or 'order'.", http.StatusBadRequest)
+               return true
+       }
+       if expectCount > h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse() {
+               httpserver.Error(w, fmt.Sprintf("Federated multi-object request for %v objects which is more than max page size %v.",
+                       expectCount, h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse()), http.StatusBadRequest)
+               return true
+       }
+       if req.Form.Get("select") != "" {
+               foundUUID := false
+               var selects []string
+               err := json.Unmarshal([]byte(req.Form.Get("select")), &selects)
+               if err != nil {
+                       httpserver.Error(w, err.Error(), http.StatusBadRequest)
+                       return true
+               }
+
+               for _, r := range selects {
+                       if r == "uuid" {
+                               foundUUID = true
+                               break
+                       }
+               }
+               if !foundUUID {
+                       httpserver.Error(w, "Federated multi-object request must include 'uuid' in 'select'", http.StatusBadRequest)
+                       return true
+               }
+       }
+
+       // Perform concurrent requests to each cluster
+
+       // use channel as a semaphore to limit the number of concurrent
+       // requests at a time
+       sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
+       defer close(sem)
+       wg := sync.WaitGroup{}
+
+       req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+       mtx := sync.Mutex{}
+       errors := []error{}
+       var completeResponses []map[string]interface{}
+       var kind string
+
+       for k, v := range queryClusters {
+               if len(v) == 0 {
+                       // Nothing to query
+                       continue
+               }
+
+               // blocks until it can put a value into the
+               // channel (which has a max queue capacity)
+               sem <- true
+               wg.Add(1)
+               go func(k string, v []string) {
+                       rp, kn, err := h.remoteQueryUUIDs(w, req, k, v)
+                       mtx.Lock()
+                       if err == nil {
+                               completeResponses = append(completeResponses, rp...)
+                               kind = kn
+                       } else {
+                               errors = append(errors, err)
+                       }
+                       mtx.Unlock()
+                       wg.Done()
+                       <-sem
+               }(k, v)
+       }
+       wg.Wait()
+
+       if len(errors) > 0 {
+               var strerr []string
+               for _, e := range errors {
+                       strerr = append(strerr, e.Error())
+               }
+               httpserver.Errors(w, strerr, http.StatusBadGateway)
+               return true
+       }
+
+       w.Header().Set("Content-Type", "application/json")
+       w.WriteHeader(http.StatusOK)
+       itemList := make(map[string]interface{})
+       itemList["items"] = completeResponses
+       itemList["kind"] = kind
+       json.NewEncoder(w).Encode(itemList)
+
+       return true
+}
+
+func (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+       m := h.matcher.FindStringSubmatch(req.URL.Path)
+       clusterId := ""
+
+       if len(m) > 0 && m[2] != "" {
+               clusterId = m[2]
+       }
+
+       // Get form parameters from URL and form body (if POST).
+       if err := loadParamsFromForm(req); err != nil {
+               httpserver.Error(w, err.Error(), http.StatusBadRequest)
+               return
+       }
+
+       // Check if the parameters have an explicit cluster_id
+       if req.Form.Get("cluster_id") != "" {
+               clusterId = req.Form.Get("cluster_id")
+       }
+
+       // Handle the POST-as-GET special case (workaround for large
+       // GET requests that potentially exceed maximum URL length,
+       // like multi-object queries where the filter has 100s of
+       // items)
+       effectiveMethod := req.Method
+       if req.Method == "POST" && req.Form.Get("_method") != "" {
+               effectiveMethod = req.Form.Get("_method")
+       }
+
+       if effectiveMethod == "GET" &&
+               clusterId == "" &&
+               req.Form.Get("filters") != "" &&
+               h.handleMultiClusterQuery(w, req, &clusterId) {
+               return
+       }
+
+       var uuid string
+       if len(m[1]) > 0 {
+               // trim leading slash
+               uuid = m[1][1:]
+       }
+       for _, d := range h.delegates {
+               if d(h, effectiveMethod, &clusterId, uuid, m[3], w, req) {
+                       return
+               }
+       }
+
+       if clusterId == "" || clusterId == h.handler.Cluster.ClusterID {
+               h.next.ServeHTTP(w, req)
+       } else {
+               resp, err := h.handler.remoteClusterRequest(clusterId, req)
+               h.handler.proxy.ForwardResponse(w, resp, err)
+       }
+}
+
+type multiClusterQueryResponseCollector struct {
+       responses []map[string]interface{}
+       error     error
+       kind      string
+       clusterID string
+}
+
+func (c *multiClusterQueryResponseCollector) collectResponse(resp *http.Response,
+       requestError error) (newResponse *http.Response, err error) {
+       if requestError != nil {
+               c.error = requestError
+               return nil, nil
+       }
+
+       defer resp.Body.Close()
+       var loadInto struct {
+               Kind   string                   `json:"kind"`
+               Items  []map[string]interface{} `json:"items"`
+               Errors []string                 `json:"errors"`
+       }
+       err = json.NewDecoder(resp.Body).Decode(&loadInto)
+
+       if err != nil {
+               c.error = fmt.Errorf("error fetching from %v (%v): %v", c.clusterID, resp.Status, err)
+               return nil, nil
+       }
+       if resp.StatusCode != http.StatusOK {
+               c.error = fmt.Errorf("error fetching from %v (%v): %v", c.clusterID, resp.Status, loadInto.Errors)
+               return nil, nil
+       }
+
+       c.responses = loadInto.Items
+       c.kind = loadInto.Kind
+
+       return nil, nil
+}
diff --git a/lib/controller/federation.go b/lib/controller/federation.go
new file mode 100644 (file)
index 0000000..557c7c3
--- /dev/null
@@ -0,0 +1,293 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "database/sql"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "mime"
+       "net/http"
+       "net/url"
+       "regexp"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "github.com/jmcvetta/randutil"
+)
+
+var pathPattern = `^/arvados/v1/%s(/([0-9a-z]{5})-%s-[0-9a-z]{15})?(.*)$`
+var wfRe = regexp.MustCompile(fmt.Sprintf(pathPattern, "workflows", "7fd4e"))
+var containersRe = regexp.MustCompile(fmt.Sprintf(pathPattern, "containers", "dz642"))
+var containerRequestsRe = regexp.MustCompile(fmt.Sprintf(pathPattern, "container_requests", "xvhdp"))
+var collectionsRe = regexp.MustCompile(fmt.Sprintf(pathPattern, "collections", "4zz18"))
+var collectionsByPDHRe = regexp.MustCompile(`^/arvados/v1/collections/([0-9a-fA-F]{32}\+[0-9]+)+$`)
+var linksRe = regexp.MustCompile(fmt.Sprintf(pathPattern, "links", "o0j2j"))
+
+func (h *Handler) remoteClusterRequest(remoteID string, req *http.Request) (*http.Response, error) {
+       remote, ok := h.Cluster.RemoteClusters[remoteID]
+       if !ok {
+               return nil, HTTPError{fmt.Sprintf("no proxy available for cluster %v", remoteID), http.StatusNotFound}
+       }
+       scheme := remote.Scheme
+       if scheme == "" {
+               scheme = "https"
+       }
+       saltedReq, err := h.saltAuthToken(req, remoteID)
+       if err != nil {
+               return nil, err
+       }
+       urlOut := &url.URL{
+               Scheme:   scheme,
+               Host:     remote.Host,
+               Path:     saltedReq.URL.Path,
+               RawPath:  saltedReq.URL.RawPath,
+               RawQuery: saltedReq.URL.RawQuery,
+       }
+       client := h.secureClient
+       if remote.Insecure {
+               client = h.insecureClient
+       }
+       return h.proxy.Do(saltedReq, urlOut, client)
+}
+
+// Buffer request body, parse form parameters in request, and then
+// replace original body with the buffer so it can be re-read by
+// downstream proxy steps.
+func loadParamsFromForm(req *http.Request) error {
+       var postBody *bytes.Buffer
+       if ct := req.Header.Get("Content-Type"); ct == "" {
+               // Assume application/octet-stream, i.e., no form to parse.
+       } else if ct, _, err := mime.ParseMediaType(ct); err != nil {
+               return err
+       } else if ct == "application/x-www-form-urlencoded" && req.Body != nil {
+               var cl int64
+               if req.ContentLength > 0 {
+                       cl = req.ContentLength
+               }
+               postBody = bytes.NewBuffer(make([]byte, 0, cl))
+               originalBody := req.Body
+               defer originalBody.Close()
+               req.Body = ioutil.NopCloser(io.TeeReader(req.Body, postBody))
+       }
+
+       err := req.ParseForm()
+       if err != nil {
+               return err
+       }
+
+       if req.Body != nil && postBody != nil {
+               req.Body = ioutil.NopCloser(postBody)
+       }
+       return nil
+}
+
+func (h *Handler) setupProxyRemoteCluster(next http.Handler) http.Handler {
+       mux := http.NewServeMux()
+
+       wfHandler := &genericFederatedRequestHandler{next, h, wfRe, nil}
+       containersHandler := &genericFederatedRequestHandler{next, h, containersRe, nil}
+       containerRequestsHandler := &genericFederatedRequestHandler{next, h, containerRequestsRe,
+               []federatedRequestDelegate{remoteContainerRequestCreate}}
+       collectionsRequestsHandler := &genericFederatedRequestHandler{next, h, collectionsRe,
+               []federatedRequestDelegate{fetchRemoteCollectionByUUID, fetchRemoteCollectionByPDH}}
+       linksRequestsHandler := &genericFederatedRequestHandler{next, h, linksRe, nil}
+
+       mux.Handle("/arvados/v1/workflows", wfHandler)
+       mux.Handle("/arvados/v1/workflows/", wfHandler)
+       mux.Handle("/arvados/v1/containers", containersHandler)
+       mux.Handle("/arvados/v1/containers/", containersHandler)
+       mux.Handle("/arvados/v1/container_requests", containerRequestsHandler)
+       mux.Handle("/arvados/v1/container_requests/", containerRequestsHandler)
+       mux.Handle("/arvados/v1/collections", collectionsRequestsHandler)
+       mux.Handle("/arvados/v1/collections/", collectionsRequestsHandler)
+       mux.Handle("/arvados/v1/links", linksRequestsHandler)
+       mux.Handle("/arvados/v1/links/", linksRequestsHandler)
+       mux.Handle("/", next)
+
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               parts := strings.Split(req.Header.Get("Authorization"), "/")
+               alreadySalted := (len(parts) == 3 && parts[0] == "Bearer v2" && len(parts[2]) == 40)
+
+               if alreadySalted ||
+                       strings.Index(req.Header.Get("Via"), "arvados-controller") != -1 {
+                       // The token is already salted, or this is a
+                       // request from another instance of
+                       // arvados-controller.  In either case, we
+                       // don't want to proxy this query, so just
+                       // continue down the instance handler stack.
+                       next.ServeHTTP(w, req)
+                       return
+               }
+
+               mux.ServeHTTP(w, req)
+       })
+
+       return mux
+}
+
+type CurrentUser struct {
+       Authorization arvados.APIClientAuthorization
+       UUID          string
+}
+
+// validateAPItoken extracts the token from the provided http request,
+// checks it again api_client_authorizations table in the database,
+// and fills in the token scope and user UUID.  Does not handle remote
+// tokens unless they are already in the database and not expired.
+func (h *Handler) validateAPItoken(req *http.Request, token string) (*CurrentUser, error) {
+       user := CurrentUser{Authorization: arvados.APIClientAuthorization{APIToken: token}}
+       db, err := h.db(req)
+       if err != nil {
+               return nil, err
+       }
+
+       var uuid string
+       if strings.HasPrefix(token, "v2/") {
+               sp := strings.Split(token, "/")
+               uuid = sp[1]
+               token = sp[2]
+       }
+       user.Authorization.APIToken = token
+       var scopes string
+       err = db.QueryRowContext(req.Context(), `SELECT api_client_authorizations.uuid, api_client_authorizations.scopes, users.uuid FROM api_client_authorizations JOIN users on api_client_authorizations.user_id=users.id WHERE api_token=$1 AND (expires_at IS NULL OR expires_at > current_timestamp) LIMIT 1`, token).Scan(&user.Authorization.UUID, &scopes, &user.UUID)
+       if err != nil {
+               return nil, err
+       }
+       if uuid != "" && user.Authorization.UUID != uuid {
+               return nil, fmt.Errorf("UUID embedded in v2 token did not match record")
+       }
+       err = json.Unmarshal([]byte(scopes), &user.Authorization.Scopes)
+       if err != nil {
+               return nil, err
+       }
+       return &user, nil
+}
+
+func (h *Handler) createAPItoken(req *http.Request, userUUID string, scopes []string) (*arvados.APIClientAuthorization, error) {
+       db, err := h.db(req)
+       if err != nil {
+               return nil, err
+       }
+       rd, err := randutil.String(15, "abcdefghijklmnopqrstuvwxyz0123456789")
+       if err != nil {
+               return nil, err
+       }
+       uuid := fmt.Sprintf("%v-gj3su-%v", h.Cluster.ClusterID, rd)
+       token, err := randutil.String(50, "abcdefghijklmnopqrstuvwxyz0123456789")
+       if err != nil {
+               return nil, err
+       }
+       if len(scopes) == 0 {
+               scopes = append(scopes, "all")
+       }
+       scopesjson, err := json.Marshal(scopes)
+       if err != nil {
+               return nil, err
+       }
+       _, err = db.ExecContext(req.Context(),
+               `INSERT INTO api_client_authorizations
+(uuid, api_token, expires_at, scopes,
+user_id,
+api_client_id, created_at, updated_at)
+VALUES ($1, $2, CURRENT_TIMESTAMP + INTERVAL '2 weeks', $3,
+(SELECT id FROM users WHERE users.uuid=$4 LIMIT 1),
+0, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)`,
+               uuid, token, string(scopesjson), userUUID)
+
+       if err != nil {
+               return nil, err
+       }
+
+       return &arvados.APIClientAuthorization{
+               UUID:      uuid,
+               APIToken:  token,
+               ExpiresAt: "",
+               Scopes:    scopes}, nil
+}
+
+// Extract the auth token supplied in req, and replace it with a
+// salted token for the remote cluster.
+func (h *Handler) saltAuthToken(req *http.Request, remote string) (updatedReq *http.Request, err error) {
+       updatedReq = (&http.Request{
+               Method:        req.Method,
+               URL:           req.URL,
+               Header:        req.Header,
+               Body:          req.Body,
+               ContentLength: req.ContentLength,
+               Host:          req.Host,
+       }).WithContext(req.Context())
+
+       creds := auth.NewCredentials()
+       creds.LoadTokensFromHTTPRequest(updatedReq)
+       if len(creds.Tokens) == 0 && updatedReq.Header.Get("Content-Type") == "application/x-www-form-encoded" {
+               // Override ParseForm's 10MiB limit by ensuring
+               // req.Body is a *http.maxBytesReader.
+               updatedReq.Body = http.MaxBytesReader(nil, updatedReq.Body, 1<<28) // 256MiB. TODO: use MaxRequestSize from discovery doc or config.
+               if err := creds.LoadTokensFromHTTPRequestBody(updatedReq); err != nil {
+                       return nil, err
+               }
+               // Replace req.Body with a buffer that re-encodes the
+               // form without api_token, in case we end up
+               // forwarding the request.
+               if updatedReq.PostForm != nil {
+                       updatedReq.PostForm.Del("api_token")
+               }
+               updatedReq.Body = ioutil.NopCloser(bytes.NewBufferString(updatedReq.PostForm.Encode()))
+       }
+       if len(creds.Tokens) == 0 {
+               return updatedReq, nil
+       }
+
+       token, err := auth.SaltToken(creds.Tokens[0], remote)
+
+       if err == auth.ErrObsoleteToken {
+               // If the token exists in our own database, salt it
+               // for the remote. Otherwise, assume it was issued by
+               // the remote, and pass it through unmodified.
+               currentUser, err := h.validateAPItoken(req, creds.Tokens[0])
+               if err == sql.ErrNoRows {
+                       // Not ours; pass through unmodified.
+                       token = creds.Tokens[0]
+               } else if err != nil {
+                       return nil, err
+               } else {
+                       // Found; make V2 version and salt it.
+                       token, err = auth.SaltToken(currentUser.Authorization.TokenV2(), remote)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+       } else if err != nil {
+               return nil, err
+       }
+       updatedReq.Header = http.Header{}
+       for k, v := range req.Header {
+               if k != "Authorization" {
+                       updatedReq.Header[k] = v
+               }
+       }
+       updatedReq.Header.Set("Authorization", "Bearer "+token)
+
+       // Remove api_token=... from the the query string, in case we
+       // end up forwarding the request.
+       if values, err := url.ParseQuery(updatedReq.URL.RawQuery); err != nil {
+               return nil, err
+       } else if _, ok := values["api_token"]; ok {
+               delete(values, "api_token")
+               updatedReq.URL = &url.URL{
+                       Scheme:   req.URL.Scheme,
+                       Host:     req.URL.Host,
+                       Path:     req.URL.Path,
+                       RawPath:  req.URL.RawPath,
+                       RawQuery: values.Encode(),
+               }
+       }
+       return updatedReq, nil
+}
diff --git a/lib/controller/federation_test.go b/lib/controller/federation_test.go
new file mode 100644 (file)
index 0000000..62916ac
--- /dev/null
@@ -0,0 +1,907 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "os"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+var _ = check.Suite(&FederationSuite{})
+
+type FederationSuite struct {
+       log logrus.FieldLogger
+       // testServer and testHandler are the controller being tested,
+       // "zhome".
+       testServer  *httpserver.Server
+       testHandler *Handler
+       // remoteServer ("zzzzz") forwards requests to the Rails API
+       // provided by the integration test environment.
+       remoteServer *httpserver.Server
+       // remoteMock ("zmock") appends each incoming request to
+       // remoteMockRequests, and returns an empty 200 response.
+       remoteMock         *httpserver.Server
+       remoteMockRequests []http.Request
+}
+
+func (s *FederationSuite) SetUpTest(c *check.C) {
+       s.log = ctxlog.TestLogger(c)
+
+       s.remoteServer = newServerFromIntegrationTestEnv(c)
+       c.Assert(s.remoteServer.Start(), check.IsNil)
+
+       s.remoteMock = newServerFromIntegrationTestEnv(c)
+       s.remoteMock.Server.Handler = http.HandlerFunc(s.remoteMockHandler)
+       c.Assert(s.remoteMock.Start(), check.IsNil)
+
+       nodeProfile := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI:   arvados.SystemServiceInstance{Listen: ":1"}, // local reqs will error "connection refused"
+       }
+       s.testHandler = &Handler{Cluster: &arvados.Cluster{
+               ClusterID:  "zhome",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": nodeProfile,
+               },
+               RequestLimits: arvados.RequestLimits{
+                       MaxItemsPerResponse:            1000,
+                       MultiClusterRequestConcurrency: 4,
+               },
+       }, NodeProfile: &nodeProfile}
+       s.testServer = newServerFromIntegrationTestEnv(c)
+       s.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.log, s.testHandler))
+
+       s.testHandler.Cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+               "zzzzz": {
+                       Host:   s.remoteServer.Addr,
+                       Proxy:  true,
+                       Scheme: "http",
+               },
+               "zmock": {
+                       Host:   s.remoteMock.Addr,
+                       Proxy:  true,
+                       Scheme: "http",
+               },
+       }
+
+       c.Assert(s.testServer.Start(), check.IsNil)
+
+       s.remoteMockRequests = nil
+}
+
+func (s *FederationSuite) remoteMockHandler(w http.ResponseWriter, req *http.Request) {
+       b := &bytes.Buffer{}
+       io.Copy(b, req.Body)
+       req.Body.Close()
+       req.Body = ioutil.NopCloser(b)
+       s.remoteMockRequests = append(s.remoteMockRequests, *req)
+}
+
+func (s *FederationSuite) TearDownTest(c *check.C) {
+       if s.remoteServer != nil {
+               s.remoteServer.Close()
+       }
+       if s.testServer != nil {
+               s.testServer.Close()
+       }
+}
+
+func (s *FederationSuite) testRequest(req *http.Request) *http.Response {
+       resp := httptest.NewRecorder()
+       s.testServer.Server.Handler.ServeHTTP(resp, req)
+       return resp.Result()
+}
+
+func (s *FederationSuite) TestLocalRequest(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zhome-", 1), nil)
+       resp := s.testRequest(req)
+       s.checkHandledLocally(c, resp)
+}
+
+func (s *FederationSuite) checkHandledLocally(c *check.C, resp *http.Response) {
+       // Our "home" controller can't handle local requests because
+       // it doesn't have its own stub/test Rails API, so we rely on
+       // "connection refused" to indicate the controller tried to
+       // proxy the request to its local Rails API.
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
+       s.checkJSONErrorMatches(c, resp, `.*connection refused`)
+}
+
+func (s *FederationSuite) TestNoAuth(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
+       s.checkJSONErrorMatches(c, resp, `Not logged in`)
+}
+
+func (s *FederationSuite) TestBadAuth(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
+       s.checkJSONErrorMatches(c, resp, `Not logged in`)
+}
+
+func (s *FederationSuite) TestNoAccess(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.SpectatorToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       s.checkJSONErrorMatches(c, resp, `.*not found`)
+}
+
+func (s *FederationSuite) TestGetUnknownRemote(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zz404-", 1), nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       s.checkJSONErrorMatches(c, resp, `.*no proxy available for cluster zz404`)
+}
+
+func (s *FederationSuite) TestRemoteError(c *check.C) {
+       rc := s.testHandler.Cluster.RemoteClusters["zzzzz"]
+       rc.Scheme = "https"
+       s.testHandler.Cluster.RemoteClusters["zzzzz"] = rc
+
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
+       s.checkJSONErrorMatches(c, resp, `.*HTTP response to HTTPS client`)
+}
+
+func (s *FederationSuite) TestGetRemoteWorkflow(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var wf arvados.Workflow
+       c.Check(json.NewDecoder(resp.Body).Decode(&wf), check.IsNil)
+       c.Check(wf.UUID, check.Equals, arvadostest.WorkflowWithDefinitionYAMLUUID)
+       c.Check(wf.OwnerUUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *FederationSuite) TestOptionsMethod(c *check.C) {
+       req := httptest.NewRequest("OPTIONS", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
+       req.Header.Set("Origin", "https://example.com")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       body, err := ioutil.ReadAll(resp.Body)
+       c.Check(err, check.IsNil)
+       c.Check(string(body), check.Equals, "")
+       c.Check(resp.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*")
+       for _, hdr := range []string{"Authorization", "Content-Type"} {
+               c.Check(resp.Header.Get("Access-Control-Allow-Headers"), check.Matches, ".*"+hdr+".*")
+       }
+       for _, method := range []string{"GET", "HEAD", "PUT", "POST", "DELETE"} {
+               c.Check(resp.Header.Get("Access-Control-Allow-Methods"), check.Matches, ".*"+method+".*")
+       }
+}
+
+func (s *FederationSuite) TestRemoteWithTokenInQuery(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1)+"?api_token="+arvadostest.ActiveToken, nil)
+       s.testRequest(req)
+       c.Assert(s.remoteMockRequests, check.HasLen, 1)
+       pr := s.remoteMockRequests[0]
+       // Token is salted and moved from query to Authorization header.
+       c.Check(pr.URL.String(), check.Not(check.Matches), `.*api_token=.*`)
+       c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
+}
+
+func (s *FederationSuite) TestLocalTokenSalted(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1), nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       s.testRequest(req)
+       c.Assert(s.remoteMockRequests, check.HasLen, 1)
+       pr := s.remoteMockRequests[0]
+       // The salted token here has a "zzzzz-" UUID instead of a
+       // "ztest-" UUID because ztest's local database has the
+       // "zzzzz-" test fixtures. The "secret" part is HMAC(sha1,
+       // arvadostest.ActiveToken, "zmock") = "7fd3...".
+       c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
+}
+
+func (s *FederationSuite) TestRemoteTokenNotSalted(c *check.C) {
+       // remoteToken can be any v1 token that doesn't appear in
+       // ztest's local db.
+       remoteToken := "abcdef00000000000000000000000000000000000000000000"
+       req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1), nil)
+       req.Header.Set("Authorization", "Bearer "+remoteToken)
+       s.testRequest(req)
+       c.Assert(s.remoteMockRequests, check.HasLen, 1)
+       pr := s.remoteMockRequests[0]
+       c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer "+remoteToken)
+}
+
+func (s *FederationSuite) TestWorkflowCRUD(c *check.C) {
+       wf := arvados.Workflow{
+               Description: "TestCRUD",
+       }
+       {
+               body := &strings.Builder{}
+               json.NewEncoder(body).Encode(&wf)
+               req := httptest.NewRequest("POST", "/arvados/v1/workflows", strings.NewReader(url.Values{
+                       "workflow": {body.String()},
+               }.Encode()))
+               req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               rec := httptest.NewRecorder()
+               s.remoteServer.Server.Handler.ServeHTTP(rec, req) // direct to remote -- can't proxy a create req because no uuid
+               resp := rec.Result()
+               s.checkResponseOK(c, resp)
+               json.NewDecoder(resp.Body).Decode(&wf)
+
+               defer func() {
+                       req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
+                       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+                       s.remoteServer.Server.Handler.ServeHTTP(httptest.NewRecorder(), req)
+               }()
+               c.Check(wf.UUID, check.Not(check.Equals), "")
+
+               c.Assert(wf.ModifiedAt, check.NotNil)
+               c.Logf("wf.ModifiedAt: %v", wf.ModifiedAt)
+               c.Check(time.Since(*wf.ModifiedAt) < time.Minute, check.Equals, true)
+       }
+       for _, method := range []string{"PATCH", "PUT", "POST"} {
+               form := url.Values{
+                       "workflow": {`{"description": "Updated with ` + method + `"}`},
+               }
+               if method == "POST" {
+                       form["_method"] = []string{"PATCH"}
+               }
+               req := httptest.NewRequest(method, "/arvados/v1/workflows/"+wf.UUID, strings.NewReader(form.Encode()))
+               req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp := s.testRequest(req)
+               s.checkResponseOK(c, resp)
+               err := json.NewDecoder(resp.Body).Decode(&wf)
+               c.Check(err, check.IsNil)
+
+               c.Check(wf.Description, check.Equals, "Updated with "+method)
+       }
+       {
+               req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp := s.testRequest(req)
+               s.checkResponseOK(c, resp)
+               err := json.NewDecoder(resp.Body).Decode(&wf)
+               c.Check(err, check.IsNil)
+       }
+       {
+               req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+wf.UUID, nil)
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp := s.testRequest(req)
+               c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       }
+}
+
+func (s *FederationSuite) checkResponseOK(c *check.C, resp *http.Response) {
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       if resp.StatusCode != http.StatusOK {
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Logf("... response body = %q, %v\n", body, err)
+       }
+}
+
+func (s *FederationSuite) checkJSONErrorMatches(c *check.C, resp *http.Response, re string) {
+       var jresp httpserver.ErrorResponse
+       err := json.NewDecoder(resp.Body).Decode(&jresp)
+       c.Check(err, check.IsNil)
+       c.Assert(jresp.Errors, check.HasLen, 1)
+       c.Check(jresp.Errors[0], check.Matches, re)
+}
+
+func (s *FederationSuite) localServiceHandler(c *check.C, h http.Handler) *httpserver.Server {
+       srv := &httpserver.Server{
+               Server: http.Server{
+                       Handler: h,
+               },
+       }
+
+       c.Assert(srv.Start(), check.IsNil)
+
+       np := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI: arvados.SystemServiceInstance{Listen: srv.Addr,
+                       TLS: false, Insecure: true}}
+       s.testHandler.Cluster.NodeProfiles["*"] = np
+       s.testHandler.NodeProfile = &np
+
+       return srv
+}
+
+func (s *FederationSuite) localServiceReturns404(c *check.C) *httpserver.Server {
+       return s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               w.WriteHeader(404)
+       }))
+}
+
+func (s *FederationSuite) TestGetLocalCollection(c *check.C) {
+       np := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
+                       TLS: true, Insecure: true}}
+       s.testHandler.Cluster.ClusterID = "zzzzz"
+       s.testHandler.Cluster.NodeProfiles["*"] = np
+       s.testHandler.NodeProfile = &np
+
+       // HTTP GET
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var col arvados.Collection
+       c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
+       c.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)
+       c.Check(col.ManifestText, check.Matches,
+               `\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
+`)
+
+       // HTTP POST with _method=GET as a form parameter
+
+       req = httptest.NewRequest("POST", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, bytes.NewBufferString((url.Values{
+               "_method": {"GET"},
+       }).Encode()))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
+       resp = s.testRequest(req)
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       col = arvados.Collection{}
+       c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
+       c.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)
+       c.Check(col.ManifestText, check.Matches,
+               `\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
+`)
+}
+
+func (s *FederationSuite) TestGetRemoteCollection(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var col arvados.Collection
+       c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
+       c.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)
+       c.Check(col.ManifestText, check.Matches,
+               `\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+Rzzzzz-[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
+`)
+}
+
+func (s *FederationSuite) TestGetRemoteCollectionError(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/zzzzz-4zz18-fakefakefakefak", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
+func (s *FederationSuite) TestSignedLocatorPattern(c *check.C) {
+       // Confirm the regular expression identifies other groups of hints correctly
+       c.Check(keepclient.SignedLocatorRe.FindStringSubmatch(`6a4ff0499484c6c79c95cd8c566bd25f+249025+B1+C2+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b+D3+E4`),
+               check.DeepEquals,
+               []string{"6a4ff0499484c6c79c95cd8c566bd25f+249025+B1+C2+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b+D3+E4",
+                       "6a4ff0499484c6c79c95cd8c566bd25f",
+                       "+249025",
+                       "+B1+C2", "+C2",
+                       "+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b",
+                       "05227438989d04712ea9ca1c91b556cef01d5cc7", "5ba5405b",
+                       "+D3+E4", "+E4"})
+}
+
+func (s *FederationSuite) TestGetLocalCollectionByPDH(c *check.C) {
+       np := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
+                       TLS: true, Insecure: true}}
+       s.testHandler.Cluster.NodeProfiles["*"] = np
+       s.testHandler.NodeProfile = &np
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var col arvados.Collection
+       c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
+       c.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)
+       c.Check(col.ManifestText, check.Matches,
+               `\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
+`)
+}
+
+func (s *FederationSuite) TestGetRemoteCollectionByPDH(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+
+       var col arvados.Collection
+       c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
+       c.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)
+       c.Check(col.ManifestText, check.Matches,
+               `\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+Rzzzzz-[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
+`)
+}
+
+func (s *FederationSuite) TestGetCollectionByPDHError(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+
+       resp := s.testRequest(req)
+       defer resp.Body.Close()
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
+func (s *FederationSuite) TestGetCollectionByPDHErrorBadHash(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+
+       srv2 := &httpserver.Server{
+               Server: http.Server{
+                       Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+                               w.WriteHeader(200)
+                               // Return a collection where the hash
+                               // of the manifest text doesn't match
+                               // PDH that was requested.
+                               var col arvados.Collection
+                               col.PortableDataHash = "99999999999999999999999999999999+99"
+                               col.ManifestText = `. 6a4ff0499484c6c79c95cd8c566bd25f\+249025 0:249025:GNU_General_Public_License,_version_3.pdf
+`
+                               enc := json.NewEncoder(w)
+                               enc.Encode(col)
+                       }),
+               },
+       }
+
+       c.Assert(srv2.Start(), check.IsNil)
+       defer srv2.Close()
+
+       // Direct zzzzz to service that returns a 200 result with a bogus manifest_text
+       s.testHandler.Cluster.RemoteClusters["zzzzz"] = arvados.RemoteCluster{
+               Host:   srv2.Addr,
+               Proxy:  true,
+               Scheme: "http",
+       }
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+
+       resp := s.testRequest(req)
+       defer resp.Body.Close()
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
+func (s *FederationSuite) TestSaltedTokenGetCollectionByPDH(c *check.C) {
+       np := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
+                       TLS: true, Insecure: true}}
+       s.testHandler.Cluster.NodeProfiles["*"] = np
+       s.testHandler.NodeProfile = &np
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
+       req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
+       resp := s.testRequest(req)
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var col arvados.Collection
+       c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
+       c.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)
+       c.Check(col.ManifestText, check.Matches,
+               `\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
+`)
+}
+
+func (s *FederationSuite) TestSaltedTokenGetCollectionByPDHError(c *check.C) {
+       np := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
+                       TLS: true, Insecure: true}}
+       s.testHandler.Cluster.NodeProfiles["*"] = np
+       s.testHandler.NodeProfile = &np
+
+       req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
+       req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
+       resp := s.testRequest(req)
+
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
+func (s *FederationSuite) TestGetRemoteContainerRequest(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       req := httptest.NewRequest("GET", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cr arvados.ContainerRequest
+       c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
+       c.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)
+       c.Check(cr.Priority, check.Equals, 1)
+}
+
+func (s *FederationSuite) TestUpdateRemoteContainerRequest(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       setPri := func(pri int) {
+               req := httptest.NewRequest("PATCH", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID,
+                       strings.NewReader(fmt.Sprintf(`{"container_request": {"priority": %d}}`, pri)))
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               req.Header.Set("Content-type", "application/json")
+               resp := s.testRequest(req)
+               c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+               var cr arvados.ContainerRequest
+               c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
+               c.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)
+               c.Check(cr.Priority, check.Equals, pri)
+       }
+       setPri(696)
+       setPri(1) // Reset fixture so side effect doesn't break other tests.
+}
+
+func (s *FederationSuite) TestCreateRemoteContainerRequest(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       // pass cluster_id via query parameter, this allows arvados-controller
+       // to avoid parsing the body
+       req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zzzzz",
+               strings.NewReader(`{
+  "container_request": {
+    "name": "hello world",
+    "state": "Uncommitted",
+    "output_path": "/",
+    "container_image": "123",
+    "command": ["abc"]
+  }
+}
+`))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       req.Header.Set("Content-type", "application/json")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cr arvados.ContainerRequest
+       c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
+       c.Check(cr.Name, check.Equals, "hello world")
+       c.Check(strings.HasPrefix(cr.UUID, "zzzzz-"), check.Equals, true)
+}
+
+func (s *FederationSuite) TestCreateRemoteContainerRequestCheckRuntimeToken(c *check.C) {
+       // Send request to zmock and check that outgoing request has
+       // runtime_token set with a new random v2 token.
+
+       defer s.localServiceReturns404(c).Close()
+       // pass cluster_id via query parameter, this allows arvados-controller
+       // to avoid parsing the body
+       req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
+               strings.NewReader(`{
+  "container_request": {
+    "name": "hello world",
+    "state": "Uncommitted",
+    "output_path": "/",
+    "container_image": "123",
+    "command": ["abc"]
+  }
+}
+`))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+       req.Header.Set("Content-type", "application/json")
+
+       np := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"),
+                       TLS: true, Insecure: true}}
+       s.testHandler.Cluster.ClusterID = "zzzzz"
+       s.testHandler.Cluster.NodeProfiles["*"] = np
+       s.testHandler.NodeProfile = &np
+
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cr struct {
+               arvados.ContainerRequest `json:"container_request"`
+       }
+       c.Check(json.NewDecoder(s.remoteMockRequests[0].Body).Decode(&cr), check.IsNil)
+       c.Check(strings.HasPrefix(cr.ContainerRequest.RuntimeToken, "v2/zzzzz-gj3su-"), check.Equals, true)
+       c.Check(cr.ContainerRequest.RuntimeToken, check.Not(check.Equals), arvadostest.ActiveTokenV2)
+}
+
+func (s *FederationSuite) TestCreateRemoteContainerRequestCheckSetRuntimeToken(c *check.C) {
+       // Send request to zmock and check that outgoing request has
+       // runtime_token set with the explicitly provided token.
+
+       defer s.localServiceReturns404(c).Close()
+       // pass cluster_id via query parameter, this allows arvados-controller
+       // to avoid parsing the body
+       req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
+               strings.NewReader(`{
+  "container_request": {
+    "name": "hello world",
+    "state": "Uncommitted",
+    "output_path": "/",
+    "container_image": "123",
+    "command": ["abc"],
+    "runtime_token": "xyz"
+  }
+}
+`))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       req.Header.Set("Content-type", "application/json")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cr struct {
+               arvados.ContainerRequest `json:"container_request"`
+       }
+       c.Check(json.NewDecoder(s.remoteMockRequests[0].Body).Decode(&cr), check.IsNil)
+       c.Check(cr.ContainerRequest.RuntimeToken, check.Equals, "xyz")
+}
+
+func (s *FederationSuite) TestCreateRemoteContainerRequestRuntimeTokenFromAuth(c *check.C) {
+       // Send request to zmock and check that outgoing request has
+       // runtime_token set using the Auth token because the user is remote.
+
+       defer s.localServiceReturns404(c).Close()
+       // pass cluster_id via query parameter, this allows arvados-controller
+       // to avoid parsing the body
+       req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
+               strings.NewReader(`{
+  "container_request": {
+    "name": "hello world",
+    "state": "Uncommitted",
+    "output_path": "/",
+    "container_image": "123",
+    "command": ["abc"]
+  }
+}
+`))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2+"/zzzzz-dz642-parentcontainer")
+       req.Header.Set("Content-type", "application/json")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cr struct {
+               arvados.ContainerRequest `json:"container_request"`
+       }
+       c.Check(json.NewDecoder(s.remoteMockRequests[0].Body).Decode(&cr), check.IsNil)
+       c.Check(cr.ContainerRequest.RuntimeToken, check.Equals, arvadostest.ActiveTokenV2)
+}
+
+func (s *FederationSuite) TestCreateRemoteContainerRequestError(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       // pass cluster_id via query parameter, this allows arvados-controller
+       // to avoid parsing the body
+       req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zz404",
+               strings.NewReader(`{
+  "container_request": {
+    "name": "hello world",
+    "state": "Uncommitted",
+    "output_path": "/",
+    "container_image": "123",
+    "command": ["abc"]
+  }
+}
+`))
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       req.Header.Set("Content-type", "application/json")
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
+func (s *FederationSuite) TestGetRemoteContainer(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       req := httptest.NewRequest("GET", "/arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cn arvados.Container
+       c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
+       c.Check(cn.UUID, check.Equals, arvadostest.QueuedContainerUUID)
+}
+
+func (s *FederationSuite) TestListRemoteContainer(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       req := httptest.NewRequest("GET", "/arvados/v1/containers?count=none&filters="+
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v"]]]`, arvadostest.QueuedContainerUUID)), nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cn arvados.ContainerList
+       c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
+       c.Check(cn.Items[0].UUID, check.Equals, arvadostest.QueuedContainerUUID)
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainers(c *check.C) {
+       defer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               bd, _ := ioutil.ReadAll(req.Body)
+               c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%5D%5D%5D&select=%5B%22uuid%22%2C+%22command%22%5D`)
+               w.WriteHeader(200)
+               w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr5queuedcontnr", "command": ["abc"]}]}`))
+       })).Close()
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&select=%s",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID)),
+               url.QueryEscape(`["uuid", "command"]`)),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var cn arvados.ContainerList
+       c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
+       c.Check(cn.Items, check.HasLen, 2)
+       mp := make(map[string]arvados.Container)
+       for _, cr := range cn.Items {
+               mp[cr.UUID] = cr
+       }
+       c.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{"echo", "hello"})
+       c.Check(mp[arvadostest.QueuedContainerUUID].ContainerImage, check.Equals, "")
+       c.Check(mp["zhome-xvhdp-cr5queuedcontnr"].Command, check.DeepEquals, []string{"abc"})
+       c.Check(mp["zhome-xvhdp-cr5queuedcontnr"].ContainerImage, check.Equals, "")
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainerError(c *check.C) {
+       defer s.localServiceReturns404(c).Close()
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&select=%s",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID)),
+               url.QueryEscape(`["uuid", "command"]`)),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
+       s.checkJSONErrorMatches(c, resp, `error fetching from zhome \(404 Not Found\): EOF`)
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainersPaged(c *check.C) {
+
+       callCount := 0
+       defer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               bd, _ := ioutil.ReadAll(req.Body)
+               if callCount == 0 {
+                       c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%2C%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)
+                       w.WriteHeader(200)
+                       w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr5queuedcontnr", "command": ["abc"]}]}`))
+               } else if callCount == 1 {
+                       c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)
+                       w.WriteHeader(200)
+                       w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr6queuedcontnr", "command": ["efg"]}]}`))
+               }
+               callCount += 1
+       })).Close()
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr", "zhome-xvhdp-cr6queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID))),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       c.Check(callCount, check.Equals, 2)
+       var cn arvados.ContainerList
+       c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
+       c.Check(cn.Items, check.HasLen, 3)
+       mp := make(map[string]arvados.Container)
+       for _, cr := range cn.Items {
+               mp[cr.UUID] = cr
+       }
+       c.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{"echo", "hello"})
+       c.Check(mp["zhome-xvhdp-cr5queuedcontnr"].Command, check.DeepEquals, []string{"abc"})
+       c.Check(mp["zhome-xvhdp-cr6queuedcontnr"].Command, check.DeepEquals, []string{"efg"})
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainersMissing(c *check.C) {
+
+       callCount := 0
+       defer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               bd, _ := ioutil.ReadAll(req.Body)
+               if callCount == 0 {
+                       c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%2C%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)
+                       w.WriteHeader(200)
+                       w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr6queuedcontnr", "command": ["efg"]}]}`))
+               } else if callCount == 1 {
+                       c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%5D%5D%5D`)
+                       w.WriteHeader(200)
+                       w.Write([]byte(`{"kind": "arvados#containerList", "items": []}`))
+               }
+               callCount += 1
+       })).Close()
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr", "zhome-xvhdp-cr6queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID))),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       c.Check(callCount, check.Equals, 2)
+       var cn arvados.ContainerList
+       c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
+       c.Check(cn.Items, check.HasLen, 2)
+       mp := make(map[string]arvados.Container)
+       for _, cr := range cn.Items {
+               mp[cr.UUID] = cr
+       }
+       c.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{"echo", "hello"})
+       c.Check(mp["zhome-xvhdp-cr6queuedcontnr"].Command, check.DeepEquals, []string{"efg"})
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainerPageSizeError(c *check.C) {
+       s.testHandler.Cluster.RequestLimits.MaxItemsPerResponse = 1
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID))),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
+       s.checkJSONErrorMatches(c, resp, `Federated multi-object request for 2 objects which is more than max page size 1.`)
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainerLimitError(c *check.C) {
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&limit=1",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID))),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
+       s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainerOffsetError(c *check.C) {
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&offset=1",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID))),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
+       s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainerOrderError(c *check.C) {
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&order=uuid",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID))),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
+       s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
+}
+
+func (s *FederationSuite) TestListMultiRemoteContainerSelectError(c *check.C) {
+       req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&select=%s",
+               url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
+                       arvadostest.QueuedContainerUUID)),
+               url.QueryEscape(`["command"]`)),
+               nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := s.testRequest(req)
+       c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
+       s.checkJSONErrorMatches(c, resp, `Federated multi-object request must include 'uuid' in 'select'`)
+}
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
new file mode 100644 (file)
index 0000000..53125ae
--- /dev/null
@@ -0,0 +1,173 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "context"
+       "database/sql"
+       "errors"
+       "net"
+       "net/http"
+       "net/url"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       _ "github.com/lib/pq"
+)
+
+type Handler struct {
+       Cluster     *arvados.Cluster
+       NodeProfile *arvados.NodeProfile
+
+       setupOnce      sync.Once
+       handlerStack   http.Handler
+       proxy          *proxy
+       secureClient   *http.Client
+       insecureClient *http.Client
+       pgdb           *sql.DB
+       pgdbMtx        sync.Mutex
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+       h.setupOnce.Do(h.setup)
+       if req.Method != "GET" && req.Method != "HEAD" {
+               // http.ServeMux returns 301 with a cleaned path if
+               // the incoming request has a double slash. Some
+               // clients (including the Go standard library) change
+               // the request method to GET when following a 301
+               // redirect if the original method was not HEAD
+               // (RFC7231 6.4.2 specifically allows this in the case
+               // of POST). Thus "POST //foo" gets misdirected to
+               // "GET /foo". To avoid this, eliminate double slashes
+               // before passing the request to ServeMux.
+               for strings.Contains(req.URL.Path, "//") {
+                       req.URL.Path = strings.Replace(req.URL.Path, "//", "/", -1)
+               }
+       }
+       if h.Cluster.HTTPRequestTimeout > 0 {
+               ctx, cancel := context.WithDeadline(req.Context(), time.Now().Add(time.Duration(h.Cluster.HTTPRequestTimeout)))
+               req = req.WithContext(ctx)
+               defer cancel()
+       }
+
+       h.handlerStack.ServeHTTP(w, req)
+}
+
+func (h *Handler) CheckHealth() error {
+       h.setupOnce.Do(h.setup)
+       _, _, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       return err
+}
+
+func neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }
+
+func (h *Handler) setup() {
+       mux := http.NewServeMux()
+       mux.Handle("/_health/", &health.Handler{
+               Token:  h.Cluster.ManagementToken,
+               Prefix: "/_health/",
+       })
+       hs := http.NotFoundHandler()
+       hs = prepend(hs, h.proxyRailsAPI)
+       hs = h.setupProxyRemoteCluster(hs)
+       mux.Handle("/", hs)
+       h.handlerStack = mux
+
+       sc := *arvados.DefaultSecureClient
+       sc.CheckRedirect = neverRedirect
+       h.secureClient = &sc
+
+       ic := *arvados.InsecureHTTPClient
+       ic.CheckRedirect = neverRedirect
+       h.insecureClient = &ic
+
+       h.proxy = &proxy{
+               Name: "arvados-controller",
+       }
+}
+
+var errDBConnection = errors.New("database connection error")
+
+func (h *Handler) db(req *http.Request) (*sql.DB, error) {
+       h.pgdbMtx.Lock()
+       defer h.pgdbMtx.Unlock()
+       if h.pgdb != nil {
+               return h.pgdb, nil
+       }
+
+       db, err := sql.Open("postgres", h.Cluster.PostgreSQL.Connection.String())
+       if err != nil {
+               httpserver.Logger(req).WithError(err).Error("postgresql connect failed")
+               return nil, errDBConnection
+       }
+       if p := h.Cluster.PostgreSQL.ConnectionPool; p > 0 {
+               db.SetMaxOpenConns(p)
+       }
+       if err := db.Ping(); err != nil {
+               httpserver.Logger(req).WithError(err).Error("postgresql connect succeeded but ping failed")
+               return nil, errDBConnection
+       }
+       h.pgdb = db
+       return db, nil
+}
+
+type middlewareFunc func(http.ResponseWriter, *http.Request, http.Handler)
+
+func prepend(next http.Handler, middleware middlewareFunc) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               middleware(w, req, next)
+       })
+}
+
+func (h *Handler) localClusterRequest(req *http.Request) (*http.Response, error) {
+       urlOut, insecure, err := findRailsAPI(h.Cluster, h.NodeProfile)
+       if err != nil {
+               return nil, err
+       }
+       urlOut = &url.URL{
+               Scheme:   urlOut.Scheme,
+               Host:     urlOut.Host,
+               Path:     req.URL.Path,
+               RawPath:  req.URL.RawPath,
+               RawQuery: req.URL.RawQuery,
+       }
+       client := h.secureClient
+       if insecure {
+               client = h.insecureClient
+       }
+       return h.proxy.Do(req, urlOut, client)
+}
+
+func (h *Handler) proxyRailsAPI(w http.ResponseWriter, req *http.Request, next http.Handler) {
+       resp, err := h.localClusterRequest(req)
+       n, err := h.proxy.ForwardResponse(w, resp, err)
+       if err != nil {
+               httpserver.Logger(req).WithError(err).WithField("bytesCopied", n).Error("error copying response body")
+       }
+}
+
+// For now, findRailsAPI always uses the rails API running on this
+// node.
+func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, bool, error) {
+       hostport := np.RailsAPI.Listen
+       if len(hostport) > 1 && hostport[0] == ':' && strings.TrimRight(hostport[1:], "0123456789") == "" {
+               // ":12345" => connect to indicated port on localhost
+               hostport = "localhost" + hostport
+       } else if _, _, err := net.SplitHostPort(hostport); err == nil {
+               // "[::1]:12345" => connect to indicated address & port
+       } else {
+               return nil, false, err
+       }
+       proto := "http"
+       if np.RailsAPI.TLS {
+               proto = "https"
+       }
+       url, err := url.Parse(proto + "://" + hostport)
+       return url, np.RailsAPI.Insecure, err
+}
diff --git a/lib/controller/handler_test.go b/lib/controller/handler_test.go
new file mode 100644 (file)
index 0000000..dfe60d9
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "context"
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "os"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&HandlerSuite{})
+
+type HandlerSuite struct {
+       cluster *arvados.Cluster
+       handler http.Handler
+       ctx     context.Context
+       cancel  context.CancelFunc
+}
+
+func (s *HandlerSuite) SetUpTest(c *check.C) {
+       s.ctx, s.cancel = context.WithCancel(context.Background())
+       s.ctx = ctxlog.Context(s.ctx, ctxlog.New(os.Stderr, "json", "debug"))
+       s.cluster = &arvados.Cluster{
+               ClusterID:  "zzzzz",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": {
+                               Controller: arvados.SystemServiceInstance{Listen: ":"},
+                               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
+                       },
+               },
+       }
+       node := s.cluster.NodeProfiles["*"]
+       s.handler = newHandler(s.ctx, s.cluster, &node)
+}
+
+func (s *HandlerSuite) TearDownTest(c *check.C) {
+       s.cancel()
+}
+
+func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
+       req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var dd arvados.DiscoveryDocument
+       err := json.Unmarshal(resp.Body.Bytes(), &dd)
+       c.Check(err, check.IsNil)
+       c.Check(dd.BlobSignatureTTL, check.Not(check.Equals), int64(0))
+       c.Check(dd.BlobSignatureTTL > 0, check.Equals, true)
+       c.Check(len(dd.Resources), check.Not(check.Equals), 0)
+       c.Check(len(dd.Schemas), check.Not(check.Equals), 0)
+}
+
+func (s *HandlerSuite) TestRequestTimeout(c *check.C) {
+       s.cluster.HTTPRequestTimeout = arvados.Duration(time.Nanosecond)
+       req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusBadGateway)
+       var jresp httpserver.ErrorResponse
+       err := json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Assert(len(jresp.Errors), check.Equals, 1)
+       c.Check(jresp.Errors[0], check.Matches, `.*context deadline exceeded.*`)
+}
+
+func (s *HandlerSuite) TestProxyWithoutToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+       jresp := map[string]interface{}{}
+       err := json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Check(jresp["errors"], check.FitsTypeOf, []interface{}{})
+}
+
+func (s *HandlerSuite) TestProxyWithToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var u arvados.User
+       err := json.Unmarshal(resp.Body.Bytes(), &u)
+       c.Check(err, check.IsNil)
+       c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *HandlerSuite) TestProxyWithTokenInRequestBody(c *check.C) {
+       req := httptest.NewRequest("POST", "/arvados/v1/users/current", strings.NewReader(url.Values{
+               "_method":   {"GET"},
+               "api_token": {arvadostest.ActiveToken},
+       }.Encode()))
+       req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var u arvados.User
+       err := json.Unmarshal(resp.Body.Bytes(), &u)
+       c.Check(err, check.IsNil)
+       c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *HandlerSuite) TestProxyNotFound(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/xyzzy", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+       jresp := map[string]interface{}{}
+       err := json.Unmarshal(resp.Body.Bytes(), &jresp)
+       c.Check(err, check.IsNil)
+       c.Check(jresp["errors"], check.FitsTypeOf, []interface{}{})
+}
+
+func (s *HandlerSuite) TestProxyRedirect(c *check.C) {
+       req := httptest.NewRequest("GET", "https://0.0.0.0:1/login?return_to=foo", nil)
+       resp := httptest.NewRecorder()
+       s.handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusFound)
+       c.Check(resp.Header().Get("Location"), check.Matches, `https://0.0.0.0:1/auth/joshid\?return_to=%2Cfoo&?`)
+}
+
+func (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       user, err := s.handler.(*Handler).validateAPItoken(req, arvadostest.ActiveToken)
+       c.Assert(err, check.IsNil)
+       c.Check(user.Authorization.UUID, check.Equals, arvadostest.ActiveTokenUUID)
+       c.Check(user.Authorization.APIToken, check.Equals, arvadostest.ActiveToken)
+       c.Check(user.Authorization.Scopes, check.DeepEquals, []string{"all"})
+       c.Check(user.UUID, check.Equals, arvadostest.ActiveUserUUID)
+}
+
+func (s *HandlerSuite) TestValidateV2APIToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       user, err := s.handler.(*Handler).validateAPItoken(req, arvadostest.ActiveTokenV2)
+       c.Assert(err, check.IsNil)
+       c.Check(user.Authorization.UUID, check.Equals, arvadostest.ActiveTokenUUID)
+       c.Check(user.Authorization.APIToken, check.Equals, arvadostest.ActiveToken)
+       c.Check(user.Authorization.Scopes, check.DeepEquals, []string{"all"})
+       c.Check(user.UUID, check.Equals, arvadostest.ActiveUserUUID)
+       c.Check(user.Authorization.TokenV2(), check.Equals, arvadostest.ActiveTokenV2)
+}
+
+func (s *HandlerSuite) TestCreateAPIToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       auth, err := s.handler.(*Handler).createAPItoken(req, arvadostest.ActiveUserUUID, nil)
+       c.Assert(err, check.IsNil)
+       c.Check(auth.Scopes, check.DeepEquals, []string{"all"})
+
+       user, err := s.handler.(*Handler).validateAPItoken(req, auth.TokenV2())
+       c.Assert(err, check.IsNil)
+       c.Check(user.Authorization.UUID, check.Equals, auth.UUID)
+       c.Check(user.Authorization.APIToken, check.Equals, auth.APIToken)
+       c.Check(user.Authorization.Scopes, check.DeepEquals, []string{"all"})
+       c.Check(user.UUID, check.Equals, arvadostest.ActiveUserUUID)
+       c.Check(user.Authorization.TokenV2(), check.Equals, auth.TokenV2())
+}
diff --git a/lib/controller/proxy.go b/lib/controller/proxy.go
new file mode 100644 (file)
index 0000000..c01c152
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "io"
+       "net/http"
+       "net/url"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+type proxy struct {
+       Name string // to use in Via header
+}
+
+type HTTPError struct {
+       Message string
+       Code    int
+}
+
+func (h HTTPError) Error() string {
+       return h.Message
+}
+
+// headers that shouldn't be forwarded when proxying. See
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers
+var dropHeaders = map[string]bool{
+       "Connection":          true,
+       "Keep-Alive":          true,
+       "Proxy-Authenticate":  true,
+       "Proxy-Authorization": true,
+       "TE":                true,
+       "Trailer":           true,
+       "Transfer-Encoding": true, // *-Encoding headers interfer with Go's automatic compression/decompression
+       "Content-Encoding":  true,
+       "Accept-Encoding":   true,
+       "Upgrade":           true,
+}
+
+type ResponseFilter func(*http.Response, error) (*http.Response, error)
+
+// Forward a request to upstream service, and return response or error.
+func (p *proxy) Do(
+       reqIn *http.Request,
+       urlOut *url.URL,
+       client *http.Client) (*http.Response, error) {
+
+       // Copy headers from incoming request, then add/replace proxy
+       // headers like Via and X-Forwarded-For.
+       hdrOut := http.Header{}
+       for k, v := range reqIn.Header {
+               if !dropHeaders[k] {
+                       hdrOut[k] = v
+               }
+       }
+       xff := reqIn.RemoteAddr
+       if xffIn := reqIn.Header.Get("X-Forwarded-For"); xffIn != "" {
+               xff = xffIn + "," + xff
+       }
+       hdrOut.Set("X-Forwarded-For", xff)
+       if hdrOut.Get("X-Forwarded-Proto") == "" {
+               hdrOut.Set("X-Forwarded-Proto", reqIn.URL.Scheme)
+       }
+       hdrOut.Add("Via", reqIn.Proto+" arvados-controller")
+
+       reqOut := (&http.Request{
+               Method: reqIn.Method,
+               URL:    urlOut,
+               Host:   reqIn.Host,
+               Header: hdrOut,
+               Body:   reqIn.Body,
+       }).WithContext(reqIn.Context())
+
+       resp, err := client.Do(reqOut)
+       return resp, err
+}
+
+// Copy a response (or error) to the downstream client
+func (p *proxy) ForwardResponse(w http.ResponseWriter, resp *http.Response, err error) (int64, error) {
+       if err != nil {
+               if he, ok := err.(HTTPError); ok {
+                       httpserver.Error(w, he.Message, he.Code)
+               } else {
+                       httpserver.Error(w, err.Error(), http.StatusBadGateway)
+               }
+               return 0, nil
+       }
+
+       defer resp.Body.Close()
+       for k, v := range resp.Header {
+               for _, v := range v {
+                       w.Header().Add(k, v)
+               }
+       }
+       w.WriteHeader(resp.StatusCode)
+       return io.Copy(w, resp.Body)
+}
diff --git a/lib/controller/server_test.go b/lib/controller/server_test.go
new file mode 100644 (file)
index 0000000..ae89c3d
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "net/http"
+       "os"
+       "path/filepath"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       check "gopkg.in/check.v1"
+)
+
+func integrationTestCluster() *arvados.Cluster {
+       cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+       if err != nil {
+               panic(err)
+       }
+       cc, err := cfg.GetCluster("zzzzz")
+       if err != nil {
+               panic(err)
+       }
+       return cc
+}
+
+// Return a new unstarted controller server, using the Rails API
+// provided by the integration-testing environment.
+func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
+       log := ctxlog.TestLogger(c)
+
+       nodeProfile := arvados.NodeProfile{
+               Controller: arvados.SystemServiceInstance{Listen: ":"},
+               RailsAPI:   arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true, Insecure: true},
+       }
+       handler := &Handler{Cluster: &arvados.Cluster{
+               ClusterID:  "zzzzz",
+               PostgreSQL: integrationTestCluster().PostgreSQL,
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": nodeProfile,
+               },
+       }, NodeProfile: &nodeProfile}
+
+       srv := &httpserver.Server{
+               Server: http.Server{
+                       Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
+               },
+               Addr: nodeProfile.Controller.Listen,
+       }
+       return srv
+}
diff --git a/lib/crunchstat/crunchstat.go b/lib/crunchstat/crunchstat.go
new file mode 100644 (file)
index 0000000..8afe828
--- /dev/null
@@ -0,0 +1,498 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Package crunchstat reports resource usage (CPU, memory, disk,
+// network) for a cgroup.
+package crunchstat
+
+import (
+       "bufio"
+       "bytes"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "strconv"
+       "strings"
+       "syscall"
+       "time"
+)
+
+// This magically allows us to look up userHz via _SC_CLK_TCK:
+
+/*
+#include <unistd.h>
+#include <sys/types.h>
+#include <pwd.h>
+#include <stdlib.h>
+*/
+import "C"
+
+// A Reporter gathers statistics for a cgroup and writes them to a
+// log.Logger.
+type Reporter struct {
+       // CID of the container to monitor. If empty, read the CID
+       // from CIDFile (first waiting until a non-empty file appears
+       // at CIDFile). If CIDFile is also empty, report host
+       // statistics.
+       CID string
+
+       // Path to a file we can read CID from.
+       CIDFile string
+
+       // Where cgroup accounting files live on this system, e.g.,
+       // "/sys/fs/cgroup".
+       CgroupRoot string
+
+       // Parent cgroup, e.g., "docker".
+       CgroupParent string
+
+       // Interval between samples. Must be positive.
+       PollPeriod time.Duration
+
+       // Temporary directory, will be monitored for available, used & total space.
+       TempDir string
+
+       // Where to write statistics. Must not be nil.
+       Logger *log.Logger
+
+       reportedStatFile    map[string]string
+       lastNetSample       map[string]ioSample
+       lastDiskIOSample    map[string]ioSample
+       lastCPUSample       cpuSample
+       lastDiskSpaceSample diskSpaceSample
+
+       done    chan struct{} // closed when we should stop reporting
+       flushed chan struct{} // closed when we have made our last report
+}
+
+// Start starts monitoring in a new goroutine, and returns
+// immediately.
+//
+// The monitoring goroutine waits for a non-empty CIDFile to appear
+// (unless CID is non-empty). Then it waits for the accounting files
+// to appear for the monitored container. Then it collects and reports
+// statistics until Stop is called.
+//
+// Callers should not call Start more than once.
+//
+// Callers should not modify public data fields after calling Start.
+func (r *Reporter) Start() {
+       r.done = make(chan struct{})
+       r.flushed = make(chan struct{})
+       go r.run()
+}
+
+// Stop reporting. Do not call more than once, or before calling
+// Start.
+//
+// Nothing will be logged after Stop returns.
+func (r *Reporter) Stop() {
+       close(r.done)
+       <-r.flushed
+}
+
+func (r *Reporter) readAllOrWarn(in io.Reader) ([]byte, error) {
+       content, err := ioutil.ReadAll(in)
+       if err != nil {
+               r.Logger.Printf("warning: %v", err)
+       }
+       return content, err
+}
+
+// Open the cgroup stats file in /sys/fs corresponding to the target
+// cgroup, and return an io.ReadCloser. If no stats file is available,
+// return nil.
+//
+// Log the file that was opened, if it isn't the same file opened on
+// the last openStatFile for this stat.
+//
+// Log "not available" if no file is found and either this stat has
+// been available in the past, or verbose==true.
+//
+// TODO: Instead of trying all options, choose a process in the
+// container, and read /proc/PID/cgroup to determine the appropriate
+// cgroup root for the given statgroup. (This will avoid falling back
+// to host-level stats during container setup and teardown.)
+func (r *Reporter) openStatFile(statgroup, stat string, verbose bool) (io.ReadCloser, error) {
+       var paths []string
+       if r.CID != "" {
+               // Collect container's stats
+               paths = []string{
+                       fmt.Sprintf("%s/%s/%s/%s/%s", r.CgroupRoot, statgroup, r.CgroupParent, r.CID, stat),
+                       fmt.Sprintf("%s/%s/%s/%s", r.CgroupRoot, r.CgroupParent, r.CID, stat),
+               }
+       } else {
+               // Collect this host's stats
+               paths = []string{
+                       fmt.Sprintf("%s/%s/%s", r.CgroupRoot, statgroup, stat),
+                       fmt.Sprintf("%s/%s", r.CgroupRoot, stat),
+               }
+       }
+       var path string
+       var file *os.File
+       var err error
+       for _, path = range paths {
+               file, err = os.Open(path)
+               if err == nil {
+                       break
+               } else {
+                       path = ""
+               }
+       }
+       if pathWas := r.reportedStatFile[stat]; pathWas != path {
+               // Log whenever we start using a new/different cgroup
+               // stat file for a given statistic. This typically
+               // happens 1 to 3 times per statistic, depending on
+               // whether we happen to collect stats [a] before any
+               // processes have been created in the container and
+               // [b] after all contained processes have exited.
+               if path == "" && verbose {
+                       r.Logger.Printf("notice: stats not available: stat %s, statgroup %s, cid %s, parent %s, root %s\n", stat, statgroup, r.CID, r.CgroupParent, r.CgroupRoot)
+               } else if pathWas != "" {
+                       r.Logger.Printf("notice: stats moved from %s to %s\n", r.reportedStatFile[stat], path)
+               } else {
+                       r.Logger.Printf("notice: reading stats from %s\n", path)
+               }
+               r.reportedStatFile[stat] = path
+       }
+       return file, err
+}
+
+func (r *Reporter) getContainerNetStats() (io.Reader, error) {
+       procsFile, err := r.openStatFile("cpuacct", "cgroup.procs", true)
+       if err != nil {
+               return nil, err
+       }
+       defer procsFile.Close()
+       reader := bufio.NewScanner(procsFile)
+       for reader.Scan() {
+               taskPid := reader.Text()
+               statsFilename := fmt.Sprintf("/proc/%s/net/dev", taskPid)
+               stats, err := ioutil.ReadFile(statsFilename)
+               if err != nil {
+                       r.Logger.Printf("notice: %v", err)
+                       continue
+               }
+               return strings.NewReader(string(stats)), nil
+       }
+       return nil, errors.New("Could not read stats for any proc in container")
+}
+
+type ioSample struct {
+       sampleTime time.Time
+       txBytes    int64
+       rxBytes    int64
+}
+
+func (r *Reporter) doBlkIOStats() {
+       c, err := r.openStatFile("blkio", "blkio.io_service_bytes", true)
+       if err != nil {
+               return
+       }
+       defer c.Close()
+       b := bufio.NewScanner(c)
+       var sampleTime = time.Now()
+       newSamples := make(map[string]ioSample)
+       for b.Scan() {
+               var device, op string
+               var val int64
+               if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &val); err != nil {
+                       continue
+               }
+               var thisSample ioSample
+               var ok bool
+               if thisSample, ok = newSamples[device]; !ok {
+                       thisSample = ioSample{sampleTime, -1, -1}
+               }
+               switch op {
+               case "Read":
+                       thisSample.rxBytes = val
+               case "Write":
+                       thisSample.txBytes = val
+               }
+               newSamples[device] = thisSample
+       }
+       for dev, sample := range newSamples {
+               if sample.txBytes < 0 || sample.rxBytes < 0 {
+                       continue
+               }
+               delta := ""
+               if prev, ok := r.lastDiskIOSample[dev]; ok {
+                       delta = fmt.Sprintf(" -- interval %.4f seconds %d write %d read",
+                               sample.sampleTime.Sub(prev.sampleTime).Seconds(),
+                               sample.txBytes-prev.txBytes,
+                               sample.rxBytes-prev.rxBytes)
+               }
+               r.Logger.Printf("blkio:%s %d write %d read%s\n", dev, sample.txBytes, sample.rxBytes, delta)
+               r.lastDiskIOSample[dev] = sample
+       }
+}
+
+type memSample struct {
+       sampleTime time.Time
+       memStat    map[string]int64
+}
+
+func (r *Reporter) doMemoryStats() {
+       c, err := r.openStatFile("memory", "memory.stat", true)
+       if err != nil {
+               return
+       }
+       defer c.Close()
+       b := bufio.NewScanner(c)
+       thisSample := memSample{time.Now(), make(map[string]int64)}
+       wantStats := [...]string{"cache", "swap", "pgmajfault", "rss"}
+       for b.Scan() {
+               var stat string
+               var val int64
+               if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err != nil {
+                       continue
+               }
+               thisSample.memStat[stat] = val
+       }
+       var outstat bytes.Buffer
+       for _, key := range wantStats {
+               if val, ok := thisSample.memStat[key]; ok {
+                       outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+               }
+       }
+       r.Logger.Printf("mem%s\n", outstat.String())
+}
+
+func (r *Reporter) doNetworkStats() {
+       sampleTime := time.Now()
+       stats, err := r.getContainerNetStats()
+       if err != nil {
+               return
+       }
+
+       scanner := bufio.NewScanner(stats)
+       for scanner.Scan() {
+               var ifName string
+               var rx, tx int64
+               words := strings.Fields(scanner.Text())
+               if len(words) != 17 {
+                       // Skip lines with wrong format
+                       continue
+               }
+               ifName = strings.TrimRight(words[0], ":")
+               if ifName == "lo" || ifName == "" {
+                       // Skip loopback interface and lines with wrong format
+                       continue
+               }
+               if tx, err = strconv.ParseInt(words[9], 10, 64); err != nil {
+                       continue
+               }
+               if rx, err = strconv.ParseInt(words[1], 10, 64); err != nil {
+                       continue
+               }
+               nextSample := ioSample{}
+               nextSample.sampleTime = sampleTime
+               nextSample.txBytes = tx
+               nextSample.rxBytes = rx
+               var delta string
+               if prev, ok := r.lastNetSample[ifName]; ok {
+                       interval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()
+                       delta = fmt.Sprintf(" -- interval %.4f seconds %d tx %d rx",
+                               interval,
+                               tx-prev.txBytes,
+                               rx-prev.rxBytes)
+               }
+               r.Logger.Printf("net:%s %d tx %d rx%s\n", ifName, tx, rx, delta)
+               r.lastNetSample[ifName] = nextSample
+       }
+}
+
+type diskSpaceSample struct {
+       hasData    bool
+       sampleTime time.Time
+       total      uint64
+       used       uint64
+       available  uint64
+}
+
+func (r *Reporter) doDiskSpaceStats() {
+       s := syscall.Statfs_t{}
+       err := syscall.Statfs(r.TempDir, &s)
+       if err != nil {
+               return
+       }
+       bs := uint64(s.Bsize)
+       nextSample := diskSpaceSample{
+               hasData:    true,
+               sampleTime: time.Now(),
+               total:      s.Blocks * bs,
+               used:       (s.Blocks - s.Bfree) * bs,
+               available:  s.Bavail * bs,
+       }
+
+       var delta string
+       if r.lastDiskSpaceSample.hasData {
+               prev := r.lastDiskSpaceSample
+               interval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()
+               delta = fmt.Sprintf(" -- interval %.4f seconds %d used",
+                       interval,
+                       int64(nextSample.used-prev.used))
+       }
+       r.Logger.Printf("statfs %d available %d used %d total%s\n",
+               nextSample.available, nextSample.used, nextSample.total, delta)
+       r.lastDiskSpaceSample = nextSample
+}
+
+type cpuSample struct {
+       hasData    bool // to distinguish the zero value from real data
+       sampleTime time.Time
+       user       float64
+       sys        float64
+       cpus       int64
+}
+
+// Return the number of CPUs available in the container. Return 0 if
+// we can't figure out the real number of CPUs.
+func (r *Reporter) getCPUCount() int64 {
+       cpusetFile, err := r.openStatFile("cpuset", "cpuset.cpus", true)
+       if err != nil {
+               return 0
+       }
+       defer cpusetFile.Close()
+       b, err := r.readAllOrWarn(cpusetFile)
+       if err != nil {
+               return 0
+       }
+       sp := strings.Split(string(b), ",")
+       cpus := int64(0)
+       for _, v := range sp {
+               var min, max int64
+               n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
+               if n == 2 {
+                       cpus += (max - min) + 1
+               } else {
+                       cpus++
+               }
+       }
+       return cpus
+}
+
+func (r *Reporter) doCPUStats() {
+       statFile, err := r.openStatFile("cpuacct", "cpuacct.stat", true)
+       if err != nil {
+               return
+       }
+       defer statFile.Close()
+       b, err := r.readAllOrWarn(statFile)
+       if err != nil {
+               return
+       }
+
+       var userTicks, sysTicks int64
+       fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
+       userHz := float64(C.sysconf(C._SC_CLK_TCK))
+       nextSample := cpuSample{
+               hasData:    true,
+               sampleTime: time.Now(),
+               user:       float64(userTicks) / userHz,
+               sys:        float64(sysTicks) / userHz,
+               cpus:       r.getCPUCount(),
+       }
+
+       delta := ""
+       if r.lastCPUSample.hasData {
+               delta = fmt.Sprintf(" -- interval %.4f seconds %.4f user %.4f sys",
+                       nextSample.sampleTime.Sub(r.lastCPUSample.sampleTime).Seconds(),
+                       nextSample.user-r.lastCPUSample.user,
+                       nextSample.sys-r.lastCPUSample.sys)
+       }
+       r.Logger.Printf("cpu %.4f user %.4f sys %d cpus%s\n",
+               nextSample.user, nextSample.sys, nextSample.cpus, delta)
+       r.lastCPUSample = nextSample
+}
+
+// Report stats periodically until we learn (via r.done) that someone
+// called Stop.
+func (r *Reporter) run() {
+       defer close(r.flushed)
+
+       r.reportedStatFile = make(map[string]string)
+
+       if !r.waitForCIDFile() || !r.waitForCgroup() {
+               return
+       }
+
+       r.lastNetSample = make(map[string]ioSample)
+       r.lastDiskIOSample = make(map[string]ioSample)
+
+       if len(r.TempDir) == 0 {
+               // Temporary dir not provided, try to get it from the environment.
+               r.TempDir = os.Getenv("TMPDIR")
+       }
+       if len(r.TempDir) > 0 {
+               r.Logger.Printf("notice: monitoring temp dir %s\n", r.TempDir)
+       }
+
+       ticker := time.NewTicker(r.PollPeriod)
+       for {
+               r.doMemoryStats()
+               r.doCPUStats()
+               r.doBlkIOStats()
+               r.doNetworkStats()
+               r.doDiskSpaceStats()
+               select {
+               case <-r.done:
+                       return
+               case <-ticker.C:
+               }
+       }
+}
+
+// If CID is empty, wait for it to appear in CIDFile. Return true if
+// we get it before we learn (via r.done) that someone called Stop.
+func (r *Reporter) waitForCIDFile() bool {
+       if r.CID != "" || r.CIDFile == "" {
+               return true
+       }
+
+       ticker := time.NewTicker(100 * time.Millisecond)
+       defer ticker.Stop()
+       for {
+               cid, err := ioutil.ReadFile(r.CIDFile)
+               if err == nil && len(cid) > 0 {
+                       r.CID = string(cid)
+                       return true
+               }
+               select {
+               case <-ticker.C:
+               case <-r.done:
+                       r.Logger.Printf("warning: CID never appeared in %+q: %v", r.CIDFile, err)
+                       return false
+               }
+       }
+}
+
+// Wait for the cgroup stats files to appear in cgroup_root. Return
+// true if they appear before r.done indicates someone called Stop. If
+// they don't appear within one poll interval, log a warning and keep
+// waiting.
+func (r *Reporter) waitForCgroup() bool {
+       ticker := time.NewTicker(100 * time.Millisecond)
+       defer ticker.Stop()
+       warningTimer := time.After(r.PollPeriod)
+       for {
+               c, err := r.openStatFile("cpuacct", "cgroup.procs", false)
+               if err == nil {
+                       c.Close()
+                       return true
+               }
+               select {
+               case <-ticker.C:
+               case <-warningTimer:
+                       r.Logger.Printf("warning: cgroup stats files have not appeared after %v (config error?) -- still waiting...", r.PollPeriod)
+               case <-r.done:
+                       r.Logger.Printf("warning: cgroup stats files never appeared for %v", r.CID)
+                       return false
+               }
+       }
+}
diff --git a/lib/crunchstat/crunchstat_test.go b/lib/crunchstat/crunchstat_test.go
new file mode 100644 (file)
index 0000000..c27e392
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package crunchstat
+
+import (
+       "bufio"
+       "io"
+       "log"
+       "os"
+       "regexp"
+       "testing"
+)
+
+func bufLogger() (*log.Logger, *bufio.Reader) {
+       r, w := io.Pipe()
+       logger := log.New(w, "", 0)
+       return logger, bufio.NewReader(r)
+}
+
+func TestReadAllOrWarnFail(t *testing.T) {
+       logger, rcv := bufLogger()
+       rep := Reporter{Logger: logger}
+
+       done := make(chan bool)
+       var msg []byte
+       var err error
+       go func() {
+               msg, err = rcv.ReadBytes('\n')
+               close(done)
+       }()
+       {
+               // The special file /proc/self/mem can be opened for
+               // reading, but reading from byte 0 returns an error.
+               f, err := os.Open("/proc/self/mem")
+               if err != nil {
+                       t.Fatalf("Opening /proc/self/mem: %s", err)
+               }
+               if x, err := rep.readAllOrWarn(f); err == nil {
+                       t.Fatalf("Expected error, got %v", x)
+               }
+       }
+       <-done
+       if err != nil {
+               t.Fatal(err)
+       } else if matched, err := regexp.MatchString("^warning: read /proc/self/mem: .*", string(msg)); err != nil || !matched {
+               t.Fatalf("Expected error message about unreadable file, got \"%s\"", msg)
+       }
+}
+
+func TestReadAllOrWarnSuccess(t *testing.T) {
+       rep := Reporter{Logger: log.New(os.Stderr, "", 0)}
+
+       f, err := os.Open("./crunchstat_test.go")
+       if err != nil {
+               t.Fatalf("Opening ./crunchstat_test.go: %s", err)
+       }
+       data, err := rep.readAllOrWarn(f)
+       if err != nil {
+               t.Fatalf("got error %s", err)
+       }
+       if matched, err := regexp.MatchString("\npackage crunchstat\n", string(data)); err != nil || !matched {
+               t.Fatalf("data failed regexp: err %v, matched %v", err, matched)
+       }
+}
diff --git a/lib/dispatchcloud/cmd.go b/lib/dispatchcloud/cmd.go
new file mode 100644 (file)
index 0000000..7231e83
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "context"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/lib/service"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchCloud, newHandler)
+
+func newHandler(ctx context.Context, cluster *arvados.Cluster, _ *arvados.NodeProfile) service.Handler {
+       d := &dispatcher{Cluster: cluster, Context: ctx}
+       go d.Start()
+       return d
+}
diff --git a/lib/dispatchcloud/container/queue.go b/lib/dispatchcloud/container/queue.go
new file mode 100644 (file)
index 0000000..bbe4762
--- /dev/null
@@ -0,0 +1,458 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package container
+
+import (
+       "io"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
+)
+
+type typeChooser func(*arvados.Container) (arvados.InstanceType, error)
+
+// An APIClient performs Arvados API requests. It is typically an
+// *arvados.Client.
+type APIClient interface {
+       RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error
+}
+
+// A QueueEnt is an entry in the queue, consisting of a container
+// record and the instance type that should be used to run it.
+type QueueEnt struct {
+       // The container to run. Only the UUID, State, Priority, and
+       // RuntimeConstraints fields are populated.
+       Container    arvados.Container    `json:"container"`
+       InstanceType arvados.InstanceType `json:"instance_type"`
+}
+
+// String implements fmt.Stringer by returning the queued container's
+// UUID.
+func (c *QueueEnt) String() string {
+       return c.Container.UUID
+}
+
+// A Queue is an interface to an Arvados cluster's container
+// database. It presents only the containers that are eligible to be
+// run by, are already being run by, or have recently been run by the
+// present dispatcher.
+//
+// The Entries, Get, and Forget methods do not block: they return
+// immediately, using cached data.
+//
+// The updating methods (Cancel, Lock, Unlock, Update) do block: they
+// return only after the operation has completed.
+//
+// A Queue's Update method should be called periodically to keep the
+// cache up to date.
+type Queue struct {
+       logger     logrus.FieldLogger
+       reg        *prometheus.Registry
+       chooseType typeChooser
+       client     APIClient
+
+       auth    *arvados.APIClientAuthorization
+       current map[string]QueueEnt
+       updated time.Time
+       mtx     sync.Mutex
+
+       // Methods that modify the Queue (like Lock) add the affected
+       // container UUIDs to dontupdate. When applying a batch of
+       // updates received from the network, anything appearing in
+       // dontupdate is skipped, in case the received update has
+       // already been superseded by the locally initiated change.
+       // When no network update is in progress, this protection is
+       // not needed, and dontupdate is nil.
+       dontupdate map[string]struct{}
+
+       // active notification subscribers (see Subscribe)
+       subscribers map[<-chan struct{}]chan struct{}
+}
+
+// NewQueue returns a new Queue. When a new container appears in the
+// Arvados cluster's queue during Update, chooseType will be called to
+// assign an appropriate arvados.InstanceType for the queue entry.
+func NewQueue(logger logrus.FieldLogger, reg *prometheus.Registry, chooseType typeChooser, client APIClient) *Queue {
+       return &Queue{
+               logger:      logger,
+               reg:         reg,
+               chooseType:  chooseType,
+               client:      client,
+               current:     map[string]QueueEnt{},
+               subscribers: map[<-chan struct{}]chan struct{}{},
+       }
+}
+
+// Subscribe returns a channel that becomes ready to receive when an
+// entry in the Queue is updated.
+//
+//     ch := q.Subscribe()
+//     defer q.Unsubscribe(ch)
+//     for range ch {
+//             // ...
+//     }
+func (cq *Queue) Subscribe() <-chan struct{} {
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       ch := make(chan struct{}, 1)
+       cq.subscribers[ch] = ch
+       return ch
+}
+
+// Unsubscribe stops sending updates to the given channel. See
+// Subscribe.
+func (cq *Queue) Unsubscribe(ch <-chan struct{}) {
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       delete(cq.subscribers, ch)
+}
+
+// Caller must have lock.
+func (cq *Queue) notify() {
+       for _, ch := range cq.subscribers {
+               select {
+               case ch <- struct{}{}:
+               default:
+               }
+       }
+}
+
+// Forget drops the specified container from the cache. It should be
+// called on finalized containers to avoid leaking memory over
+// time. It is a no-op if the indicated container is not in a
+// finalized state.
+func (cq *Queue) Forget(uuid string) {
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       ctr := cq.current[uuid].Container
+       if ctr.State == arvados.ContainerStateComplete || ctr.State == arvados.ContainerStateCancelled {
+               cq.delEnt(uuid, ctr.State)
+       }
+}
+
+// Get returns the (partial) Container record for the specified
+// container. Like a map lookup, its second return value is false if
+// the specified container is not in the Queue.
+func (cq *Queue) Get(uuid string) (arvados.Container, bool) {
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       if ctr, ok := cq.current[uuid]; !ok {
+               return arvados.Container{}, false
+       } else {
+               return ctr.Container, true
+       }
+}
+
+// Entries returns all cache entries, keyed by container UUID.
+//
+// The returned threshold indicates the maximum age of any cached data
+// returned in the map. This makes it possible for a scheduler to
+// determine correctly the outcome of a remote process that updates
+// container state. It must first wait for the remote process to exit,
+// then wait for the Queue to start and finish its next Update --
+// i.e., it must wait until threshold > timeProcessExited.
+func (cq *Queue) Entries() (entries map[string]QueueEnt, threshold time.Time) {
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       entries = make(map[string]QueueEnt, len(cq.current))
+       for uuid, ctr := range cq.current {
+               entries[uuid] = ctr
+       }
+       threshold = cq.updated
+       return
+}
+
+// Update refreshes the cache from the Arvados API. It adds newly
+// queued containers, and updates the state of previously queued
+// containers.
+func (cq *Queue) Update() error {
+       cq.mtx.Lock()
+       cq.dontupdate = map[string]struct{}{}
+       updateStarted := time.Now()
+       cq.mtx.Unlock()
+
+       next, err := cq.poll()
+       if err != nil {
+               return err
+       }
+
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       for uuid, ctr := range next {
+               if _, dontupdate := cq.dontupdate[uuid]; dontupdate {
+                       // Don't clobber a local update that happened
+                       // after we started polling.
+                       continue
+               }
+               if cur, ok := cq.current[uuid]; !ok {
+                       cq.addEnt(uuid, *ctr)
+               } else {
+                       cur.Container = *ctr
+                       cq.current[uuid] = cur
+               }
+       }
+       for uuid, ent := range cq.current {
+               if _, dontupdate := cq.dontupdate[uuid]; dontupdate {
+                       // Don't expunge an entry that was
+                       // added/updated locally after we started
+                       // polling.
+                       continue
+               } else if _, stillpresent := next[uuid]; !stillpresent {
+                       // Expunge an entry that no longer appears in
+                       // the poll response (evidently it's
+                       // cancelled, completed, deleted, or taken by
+                       // a different dispatcher).
+                       cq.delEnt(uuid, ent.Container.State)
+               }
+       }
+       cq.dontupdate = nil
+       cq.updated = updateStarted
+       cq.notify()
+       return nil
+}
+
+// Caller must have lock.
+func (cq *Queue) delEnt(uuid string, state arvados.ContainerState) {
+       cq.logger.WithFields(logrus.Fields{
+               "ContainerUUID": uuid,
+               "State":         state,
+       }).Info("dropping container from queue")
+       delete(cq.current, uuid)
+}
+
+func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
+       it, err := cq.chooseType(&ctr)
+       if err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) {
+               // We assume here that any chooseType error is a hard
+               // error: it wouldn't help to try again, or to leave
+               // it for a different dispatcher process to attempt.
+               errorString := err.Error()
+               logger := cq.logger.WithField("ContainerUUID", ctr.UUID)
+               logger.WithError(err).Warn("cancel container with no suitable instance type")
+               go func() {
+                       if ctr.State == arvados.ContainerStateQueued {
+                               // Can't set runtime error without
+                               // locking first. If Lock() is
+                               // successful, it will call addEnt()
+                               // again itself, and we'll fall
+                               // through to the
+                               // setRuntimeError/Cancel code below.
+                               err := cq.Lock(ctr.UUID)
+                               if err != nil {
+                                       logger.WithError(err).Warn("lock failed")
+                                       // ...and try again on the
+                                       // next Update, if the problem
+                                       // still exists.
+                               }
+                               return
+                       }
+                       var err error
+                       defer func() {
+                               if err == nil {
+                                       return
+                               }
+                               // On failure, check current container
+                               // state, and don't log the error if
+                               // the failure came from losing a
+                               // race.
+                               var latest arvados.Container
+                               cq.client.RequestAndDecode(&latest, "GET", "arvados/v1/containers/"+ctr.UUID, nil, map[string][]string{"select": {"state"}})
+                               if latest.State == arvados.ContainerStateCancelled {
+                                       return
+                               }
+                               logger.WithError(err).Warn("error while trying to cancel unsatisfiable container")
+                       }()
+                       err = cq.setRuntimeError(ctr.UUID, errorString)
+                       if err != nil {
+                               return
+                       }
+                       err = cq.Cancel(ctr.UUID)
+                       if err != nil {
+                               return
+                       }
+               }()
+               return
+       }
+       cq.logger.WithFields(logrus.Fields{
+               "ContainerUUID": ctr.UUID,
+               "State":         ctr.State,
+               "Priority":      ctr.Priority,
+               "InstanceType":  it.Name,
+       }).Info("adding container to queue")
+       cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it}
+}
+
+// Lock acquires the dispatch lock for the given container.
+func (cq *Queue) Lock(uuid string) error {
+       return cq.apiUpdate(uuid, "lock")
+}
+
+// Unlock releases the dispatch lock for the given container.
+func (cq *Queue) Unlock(uuid string) error {
+       return cq.apiUpdate(uuid, "unlock")
+}
+
+// setRuntimeError sets runtime_status["error"] to the given value.
+// Container should already have state==Locked or Running.
+func (cq *Queue) setRuntimeError(uuid, errorString string) error {
+       return cq.client.RequestAndDecode(nil, "PUT", "arvados/v1/containers/"+uuid, nil, map[string]map[string]map[string]interface{}{
+               "container": {
+                       "runtime_status": {
+                               "error": errorString,
+                       },
+               },
+       })
+}
+
+// Cancel cancels the given container.
+func (cq *Queue) Cancel(uuid string) error {
+       err := cq.client.RequestAndDecode(nil, "PUT", "arvados/v1/containers/"+uuid, nil, map[string]map[string]interface{}{
+               "container": {"state": arvados.ContainerStateCancelled},
+       })
+       if err != nil {
+               return err
+       }
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       cq.notify()
+       return nil
+}
+
+func (cq *Queue) apiUpdate(uuid, action string) error {
+       var resp arvados.Container
+       err := cq.client.RequestAndDecode(&resp, "POST", "arvados/v1/containers/"+uuid+"/"+action, nil, nil)
+       if err != nil {
+               return err
+       }
+
+       cq.mtx.Lock()
+       defer cq.mtx.Unlock()
+       if cq.dontupdate != nil {
+               cq.dontupdate[uuid] = struct{}{}
+       }
+       if ent, ok := cq.current[uuid]; !ok {
+               cq.addEnt(uuid, resp)
+       } else {
+               ent.Container.State, ent.Container.Priority, ent.Container.LockedByUUID = resp.State, resp.Priority, resp.LockedByUUID
+               cq.current[uuid] = ent
+       }
+       cq.notify()
+       return nil
+}
+
+func (cq *Queue) poll() (map[string]*arvados.Container, error) {
+       cq.mtx.Lock()
+       size := len(cq.current)
+       auth := cq.auth
+       cq.mtx.Unlock()
+
+       if auth == nil {
+               auth = &arvados.APIClientAuthorization{}
+               err := cq.client.RequestAndDecode(auth, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
+               if err != nil {
+                       return nil, err
+               }
+               cq.mtx.Lock()
+               cq.auth = auth
+               cq.mtx.Unlock()
+       }
+
+       next := make(map[string]*arvados.Container, size)
+       apply := func(updates []arvados.Container) {
+               for _, upd := range updates {
+                       if next[upd.UUID] == nil {
+                               next[upd.UUID] = &arvados.Container{}
+                       }
+                       *next[upd.UUID] = upd
+               }
+       }
+       selectParam := []string{"uuid", "state", "priority", "runtime_constraints"}
+       limitParam := 1000
+
+       mine, err := cq.fetchAll(arvados.ResourceListParams{
+               Select:  selectParam,
+               Order:   "uuid",
+               Limit:   &limitParam,
+               Count:   "none",
+               Filters: []arvados.Filter{{"locked_by_uuid", "=", auth.UUID}},
+       })
+       if err != nil {
+               return nil, err
+       }
+       apply(mine)
+
+       avail, err := cq.fetchAll(arvados.ResourceListParams{
+               Select:  selectParam,
+               Order:   "uuid",
+               Limit:   &limitParam,
+               Count:   "none",
+               Filters: []arvados.Filter{{"state", "=", arvados.ContainerStateQueued}, {"priority", ">", "0"}},
+       })
+       if err != nil {
+               return nil, err
+       }
+       apply(avail)
+
+       var missing []string
+       cq.mtx.Lock()
+       for uuid, ent := range cq.current {
+               if next[uuid] == nil &&
+                       ent.Container.State != arvados.ContainerStateCancelled &&
+                       ent.Container.State != arvados.ContainerStateComplete {
+                       missing = append(missing, uuid)
+               }
+       }
+       cq.mtx.Unlock()
+
+       for i, page := 0, 20; i < len(missing); i += page {
+               batch := missing[i:]
+               if len(batch) > page {
+                       batch = batch[:page]
+               }
+               ended, err := cq.fetchAll(arvados.ResourceListParams{
+                       Select:  selectParam,
+                       Order:   "uuid",
+                       Count:   "none",
+                       Filters: []arvados.Filter{{"uuid", "in", batch}},
+               })
+               if err != nil {
+                       return nil, err
+               }
+               apply(ended)
+       }
+       return next, nil
+}
+
+func (cq *Queue) fetchAll(initialParams arvados.ResourceListParams) ([]arvados.Container, error) {
+       var results []arvados.Container
+       params := initialParams
+       params.Offset = 0
+       for {
+               // This list variable must be a new one declared
+               // inside the loop: otherwise, items in the API
+               // response would get deep-merged into the items
+               // loaded in previous iterations.
+               var list arvados.ContainerList
+
+               err := cq.client.RequestAndDecode(&list, "GET", "arvados/v1/containers", nil, params)
+               if err != nil {
+                       return nil, err
+               }
+               if len(list.Items) == 0 {
+                       break
+               }
+
+               results = append(results, list.Items...)
+               if len(params.Order) == 1 && params.Order == "uuid" {
+                       params.Filters = append(initialParams.Filters, arvados.Filter{"uuid", ">", list.Items[len(list.Items)-1].UUID})
+               } else {
+                       params.Offset += len(list.Items)
+               }
+       }
+       return results, nil
+}
diff --git a/lib/dispatchcloud/container/queue_test.go b/lib/dispatchcloud/container/queue_test.go
new file mode 100644 (file)
index 0000000..91d6535
--- /dev/null
@@ -0,0 +1,131 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package container
+
+import (
+       "errors"
+       "os"
+       "sync"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&IntegrationSuite{})
+
+func logger() logrus.FieldLogger {
+       logger := logrus.StandardLogger()
+       if os.Getenv("ARVADOS_DEBUG") != "" {
+               logger.SetLevel(logrus.DebugLevel)
+       }
+       return logger
+}
+
+type IntegrationSuite struct{}
+
+func (suite *IntegrationSuite) TearDownTest(c *check.C) {
+       err := arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil)
+       c.Check(err, check.IsNil)
+}
+
+func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
+       typeChooser := func(ctr *arvados.Container) (arvados.InstanceType, error) {
+               return arvados.InstanceType{Name: "testType"}, nil
+       }
+
+       client := arvados.NewClientFromEnv()
+       cq := NewQueue(logger(), nil, typeChooser, client)
+
+       err := cq.Update()
+       c.Check(err, check.IsNil)
+
+       ents, threshold := cq.Entries()
+       c.Check(len(ents), check.Not(check.Equals), 0)
+       c.Check(time.Since(threshold) < time.Minute, check.Equals, true)
+       c.Check(time.Since(threshold) > 0, check.Equals, true)
+
+       _, ok := ents[arvadostest.QueuedContainerUUID]
+       c.Check(ok, check.Equals, true)
+
+       var wg sync.WaitGroup
+       for uuid, ent := range ents {
+               c.Check(ent.Container.UUID, check.Equals, uuid)
+               c.Check(ent.InstanceType.Name, check.Equals, "testType")
+               c.Check(ent.Container.State, check.Equals, arvados.ContainerStateQueued)
+               c.Check(ent.Container.Priority > 0, check.Equals, true)
+
+               ctr, ok := cq.Get(uuid)
+               c.Check(ok, check.Equals, true)
+               c.Check(ctr.UUID, check.Equals, uuid)
+
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       err := cq.Unlock(uuid)
+                       c.Check(err, check.NotNil)
+                       err = cq.Lock(uuid)
+                       c.Check(err, check.IsNil)
+                       ctr, ok := cq.Get(uuid)
+                       c.Check(ok, check.Equals, true)
+                       c.Check(ctr.State, check.Equals, arvados.ContainerStateLocked)
+                       err = cq.Lock(uuid)
+                       c.Check(err, check.NotNil)
+                       err = cq.Unlock(uuid)
+                       c.Check(err, check.IsNil)
+                       ctr, ok = cq.Get(uuid)
+                       c.Check(ok, check.Equals, true)
+                       c.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)
+                       err = cq.Unlock(uuid)
+                       c.Check(err, check.NotNil)
+               }()
+       }
+       wg.Wait()
+
+       err = cq.Cancel(arvadostest.CompletedContainerUUID)
+       c.Check(err, check.ErrorMatches, `.*State cannot change from Complete to Cancelled.*`)
+}
+
+func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
+       errorTypeChooser := func(ctr *arvados.Container) (arvados.InstanceType, error) {
+               return arvados.InstanceType{}, errors.New("no suitable instance type")
+       }
+
+       client := arvados.NewClientFromEnv()
+       cq := NewQueue(logger(), nil, errorTypeChooser, client)
+
+       var ctr arvados.Container
+       err := client.RequestAndDecode(&ctr, "GET", "arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil, nil)
+       c.Check(err, check.IsNil)
+       c.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)
+
+       cq.Update()
+
+       // Wait for the cancel operation to take effect. Container
+       // will have state=Cancelled or just disappear from the queue.
+       suite.waitfor(c, time.Second, func() bool {
+               err := client.RequestAndDecode(&ctr, "GET", "arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil, nil)
+               return err == nil && ctr.State == arvados.ContainerStateCancelled
+       })
+       c.Check(ctr.RuntimeStatus["error"], check.Equals, `no suitable instance type`)
+}
+
+func (suite *IntegrationSuite) waitfor(c *check.C, timeout time.Duration, fn func() bool) {
+       defer func() {
+               c.Check(fn(), check.Equals, true)
+       }()
+       deadline := time.Now().Add(timeout)
+       for !fn() && time.Now().Before(deadline) {
+               time.Sleep(timeout / 1000)
+       }
+}
diff --git a/lib/dispatchcloud/dispatcher.go b/lib/dispatchcloud/dispatcher.go
new file mode 100644 (file)
index 0000000..adf1028
--- /dev/null
@@ -0,0 +1,226 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "context"
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/scheduler"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/ssh_executor"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/julienschmidt/httprouter"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/prometheus/client_golang/prometheus/promhttp"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+)
+
+const (
+       defaultPollInterval     = time.Second
+       defaultStaleLockTimeout = time.Minute
+)
+
+type pool interface {
+       scheduler.WorkerPool
+       Instances() []worker.InstanceView
+       SetIdleBehavior(cloud.InstanceID, worker.IdleBehavior) error
+       Stop()
+}
+
+type dispatcher struct {
+       Cluster       *arvados.Cluster
+       Context       context.Context
+       InstanceSetID cloud.InstanceSetID
+
+       logger      logrus.FieldLogger
+       reg         *prometheus.Registry
+       instanceSet cloud.InstanceSet
+       pool        pool
+       queue       scheduler.ContainerQueue
+       httpHandler http.Handler
+       sshKey      ssh.Signer
+
+       setupOnce sync.Once
+       stop      chan struct{}
+       stopped   chan struct{}
+}
+
+// Start starts the dispatcher. Start can be called multiple times
+// with no ill effect.
+func (disp *dispatcher) Start() {
+       disp.setupOnce.Do(disp.setup)
+}
+
+// ServeHTTP implements service.Handler.
+func (disp *dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       disp.Start()
+       disp.httpHandler.ServeHTTP(w, r)
+}
+
+// CheckHealth implements service.Handler.
+func (disp *dispatcher) CheckHealth() error {
+       disp.Start()
+       return nil
+}
+
+// Stop dispatching containers and release resources. Typically used
+// in tests.
+func (disp *dispatcher) Close() {
+       disp.Start()
+       select {
+       case disp.stop <- struct{}{}:
+       default:
+       }
+       <-disp.stopped
+}
+
+// Make a worker.Executor for the given instance.
+func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
+       exr := ssh_executor.New(inst)
+       exr.SetTargetPort(disp.Cluster.CloudVMs.SSHPort)
+       exr.SetSigners(disp.sshKey)
+       return exr
+}
+
+func (disp *dispatcher) typeChooser(ctr *arvados.Container) (arvados.InstanceType, error) {
+       return ChooseInstanceType(disp.Cluster, ctr)
+}
+
+func (disp *dispatcher) setup() {
+       disp.initialize()
+       go disp.run()
+}
+
+func (disp *dispatcher) initialize() {
+       arvClient := arvados.NewClientFromEnv()
+       if disp.InstanceSetID == "" {
+               if strings.HasPrefix(arvClient.AuthToken, "v2/") {
+                       disp.InstanceSetID = cloud.InstanceSetID(strings.Split(arvClient.AuthToken, "/")[1])
+               } else {
+                       // Use some other string unique to this token
+                       // that doesn't reveal the token itself.
+                       disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(arvClient.AuthToken))))
+               }
+       }
+       disp.stop = make(chan struct{}, 1)
+       disp.stopped = make(chan struct{})
+       disp.logger = ctxlog.FromContext(disp.Context)
+
+       if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Dispatch.PrivateKey)); err != nil {
+               disp.logger.Fatalf("error parsing configured Dispatch.PrivateKey: %s", err)
+       } else {
+               disp.sshKey = key
+       }
+
+       instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger)
+       if err != nil {
+               disp.logger.Fatalf("error initializing driver: %s", err)
+       }
+       disp.instanceSet = instanceSet
+       disp.reg = prometheus.NewRegistry()
+       disp.pool = worker.NewPool(disp.logger, arvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
+       disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, arvClient)
+
+       if disp.Cluster.ManagementToken == "" {
+               disp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+                       http.Error(w, "Management API authentication is not configured", http.StatusForbidden)
+               })
+       } else {
+               mux := httprouter.New()
+               mux.HandlerFunc("GET", "/arvados/v1/dispatch/containers", disp.apiContainers)
+               mux.HandlerFunc("GET", "/arvados/v1/dispatch/instances", disp.apiInstances)
+               mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/hold", disp.apiInstanceHold)
+               mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/drain", disp.apiInstanceDrain)
+               mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/run", disp.apiInstanceRun)
+               metricsH := promhttp.HandlerFor(disp.reg, promhttp.HandlerOpts{
+                       ErrorLog: disp.logger,
+               })
+               mux.Handler("GET", "/metrics", metricsH)
+               mux.Handler("GET", "/metrics.json", metricsH)
+               disp.httpHandler = auth.RequireLiteralToken(disp.Cluster.ManagementToken, mux)
+       }
+}
+
+func (disp *dispatcher) run() {
+       defer close(disp.stopped)
+       defer disp.instanceSet.Stop()
+       defer disp.pool.Stop()
+
+       staleLockTimeout := time.Duration(disp.Cluster.Dispatch.StaleLockTimeout)
+       if staleLockTimeout == 0 {
+               staleLockTimeout = defaultStaleLockTimeout
+       }
+       pollInterval := time.Duration(disp.Cluster.Dispatch.PollInterval)
+       if pollInterval <= 0 {
+               pollInterval = defaultPollInterval
+       }
+       sched := scheduler.New(disp.Context, disp.queue, disp.pool, staleLockTimeout, pollInterval)
+       sched.Start()
+       defer sched.Stop()
+
+       <-disp.stop
+}
+
+// Management API: all active and queued containers.
+func (disp *dispatcher) apiContainers(w http.ResponseWriter, r *http.Request) {
+       var resp struct {
+               Items []container.QueueEnt `json:"items"`
+       }
+       qEntries, _ := disp.queue.Entries()
+       for _, ent := range qEntries {
+               resp.Items = append(resp.Items, ent)
+       }
+       json.NewEncoder(w).Encode(resp)
+}
+
+// Management API: all active instances (cloud VMs).
+func (disp *dispatcher) apiInstances(w http.ResponseWriter, r *http.Request) {
+       var resp struct {
+               Items []worker.InstanceView `json:"items"`
+       }
+       resp.Items = disp.pool.Instances()
+       json.NewEncoder(w).Encode(resp)
+}
+
+// Management API: set idle behavior to "hold" for specified instance.
+func (disp *dispatcher) apiInstanceHold(w http.ResponseWriter, r *http.Request) {
+       disp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorHold)
+}
+
+// Management API: set idle behavior to "drain" for specified instance.
+func (disp *dispatcher) apiInstanceDrain(w http.ResponseWriter, r *http.Request) {
+       disp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorDrain)
+}
+
+// Management API: set idle behavior to "run" for specified instance.
+func (disp *dispatcher) apiInstanceRun(w http.ResponseWriter, r *http.Request) {
+       disp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorRun)
+}
+
+func (disp *dispatcher) apiInstanceIdleBehavior(w http.ResponseWriter, r *http.Request, want worker.IdleBehavior) {
+       id := cloud.InstanceID(r.FormValue("instance_id"))
+       if id == "" {
+               httpserver.Error(w, "instance_id parameter not provided", http.StatusBadRequest)
+               return
+       }
+       err := disp.pool.SetIdleBehavior(id, want)
+       if err != nil {
+               httpserver.Error(w, err.Error(), http.StatusNotFound)
+               return
+       }
+}
diff --git a/lib/dispatchcloud/dispatcher_test.go b/lib/dispatchcloud/dispatcher_test.go
new file mode 100644 (file)
index 0000000..36b0602
--- /dev/null
@@ -0,0 +1,272 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "context"
+       "encoding/json"
+       "io/ioutil"
+       "math/rand"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "golang.org/x/crypto/ssh"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&DispatcherSuite{})
+
+type DispatcherSuite struct {
+       ctx        context.Context
+       cancel     context.CancelFunc
+       cluster    *arvados.Cluster
+       stubDriver *test.StubDriver
+       disp       *dispatcher
+}
+
+func (s *DispatcherSuite) SetUpTest(c *check.C) {
+       s.ctx, s.cancel = context.WithCancel(context.Background())
+       s.ctx = ctxlog.Context(s.ctx, ctxlog.TestLogger(c))
+       dispatchpub, _ := test.LoadTestKey(c, "test/sshkey_dispatch")
+       dispatchprivraw, err := ioutil.ReadFile("test/sshkey_dispatch")
+       c.Assert(err, check.IsNil)
+
+       _, hostpriv := test.LoadTestKey(c, "test/sshkey_vm")
+       s.stubDriver = &test.StubDriver{
+               HostKey:                   hostpriv,
+               AuthorizedKeys:            []ssh.PublicKey{dispatchpub},
+               ErrorRateDestroy:          0.1,
+               MinTimeBetweenCreateCalls: time.Millisecond,
+       }
+
+       s.cluster = &arvados.Cluster{
+               CloudVMs: arvados.CloudVMs{
+                       Driver:          "test",
+                       SyncInterval:    arvados.Duration(10 * time.Millisecond),
+                       TimeoutIdle:     arvados.Duration(150 * time.Millisecond),
+                       TimeoutBooting:  arvados.Duration(150 * time.Millisecond),
+                       TimeoutProbe:    arvados.Duration(15 * time.Millisecond),
+                       TimeoutShutdown: arvados.Duration(5 * time.Millisecond),
+               },
+               Dispatch: arvados.Dispatch{
+                       PrivateKey:         string(dispatchprivraw),
+                       PollInterval:       arvados.Duration(5 * time.Millisecond),
+                       ProbeInterval:      arvados.Duration(5 * time.Millisecond),
+                       StaleLockTimeout:   arvados.Duration(5 * time.Millisecond),
+                       MaxProbesPerSecond: 1000,
+               },
+               InstanceTypes: arvados.InstanceTypeMap{
+                       test.InstanceType(1).Name:  test.InstanceType(1),
+                       test.InstanceType(2).Name:  test.InstanceType(2),
+                       test.InstanceType(3).Name:  test.InstanceType(3),
+                       test.InstanceType(4).Name:  test.InstanceType(4),
+                       test.InstanceType(6).Name:  test.InstanceType(6),
+                       test.InstanceType(8).Name:  test.InstanceType(8),
+                       test.InstanceType(16).Name: test.InstanceType(16),
+               },
+               NodeProfiles: map[string]arvados.NodeProfile{
+                       "*": {
+                               Controller:    arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_API_HOST")},
+                               DispatchCloud: arvados.SystemServiceInstance{Listen: ":"},
+                       },
+               },
+       }
+       s.disp = &dispatcher{
+               Cluster: s.cluster,
+               Context: s.ctx,
+       }
+       // Test cases can modify s.cluster before calling
+       // initialize(), and then modify private state before calling
+       // go run().
+}
+
+func (s *DispatcherSuite) TearDownTest(c *check.C) {
+       s.cancel()
+       s.disp.Close()
+}
+
+// DispatchToStubDriver checks that the dispatcher wires everything
+// together effectively. It uses a real scheduler and worker pool with
+// a fake queue and cloud driver. The fake cloud driver injects
+// artificial errors in order to exercise a variety of code paths.
+func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
+       drivers["test"] = s.stubDriver
+       s.disp.setupOnce.Do(s.disp.initialize)
+       queue := &test.Queue{
+               ChooseType: func(ctr *arvados.Container) (arvados.InstanceType, error) {
+                       return ChooseInstanceType(s.cluster, ctr)
+               },
+       }
+       for i := 0; i < 200; i++ {
+               queue.Containers = append(queue.Containers, arvados.Container{
+                       UUID:     test.ContainerUUID(i + 1),
+                       State:    arvados.ContainerStateQueued,
+                       Priority: int64(i%20 + 1),
+                       RuntimeConstraints: arvados.RuntimeConstraints{
+                               RAM:   int64(i%3+1) << 30,
+                               VCPUs: i%8 + 1,
+                       },
+               })
+       }
+       s.disp.queue = queue
+
+       var mtx sync.Mutex
+       done := make(chan struct{})
+       waiting := map[string]struct{}{}
+       for _, ctr := range queue.Containers {
+               waiting[ctr.UUID] = struct{}{}
+       }
+       executeContainer := func(ctr arvados.Container) int {
+               mtx.Lock()
+               defer mtx.Unlock()
+               if _, ok := waiting[ctr.UUID]; !ok {
+                       c.Logf("container completed twice: %s -- perhaps completed after stub instance was killed?", ctr.UUID)
+                       return 1
+               }
+               delete(waiting, ctr.UUID)
+               if len(waiting) == 0 {
+                       close(done)
+               }
+               return int(rand.Uint32() & 0x3)
+       }
+       n := 0
+       s.stubDriver.Queue = queue
+       s.stubDriver.SetupVM = func(stubvm *test.StubVM) {
+               n++
+               stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
+               stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
+               stubvm.ExecuteContainer = executeContainer
+               switch n % 7 {
+               case 0:
+                       stubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)
+               case 1:
+                       stubvm.CrunchRunMissing = true
+               default:
+                       stubvm.CrunchRunCrashRate = 0.1
+               }
+       }
+
+       start := time.Now()
+       go s.disp.run()
+       err := s.disp.CheckHealth()
+       c.Check(err, check.IsNil)
+
+       select {
+       case <-done:
+               c.Logf("containers finished (%s), waiting for instances to shutdown and queue to clear", time.Since(start))
+       case <-time.After(10 * time.Second):
+               c.Fatalf("timed out; still waiting for %d containers: %q", len(waiting), waiting)
+       }
+
+       deadline := time.Now().Add(5 * time.Second)
+       for range time.NewTicker(10 * time.Millisecond).C {
+               insts, err := s.stubDriver.InstanceSets()[0].Instances(nil)
+               c.Check(err, check.IsNil)
+               queue.Update()
+               ents, _ := queue.Entries()
+               if len(ents) == 0 && len(insts) == 0 {
+                       break
+               }
+               if time.Now().After(deadline) {
+                       c.Fatalf("timed out with %d containers (%v), %d instances (%+v)", len(ents), ents, len(insts), insts)
+               }
+       }
+}
+
+func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
+       s.cluster.ManagementToken = "abcdefgh"
+       drivers["test"] = s.stubDriver
+       s.disp.setupOnce.Do(s.disp.initialize)
+       s.disp.queue = &test.Queue{}
+       go s.disp.run()
+
+       for _, token := range []string{"abc", ""} {
+               req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
+               if token != "" {
+                       req.Header.Set("Authorization", "Bearer "+token)
+               }
+               resp := httptest.NewRecorder()
+               s.disp.ServeHTTP(resp, req)
+               if token == "" {
+                       c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+               } else {
+                       c.Check(resp.Code, check.Equals, http.StatusForbidden)
+               }
+       }
+}
+
+func (s *DispatcherSuite) TestAPIDisabled(c *check.C) {
+       s.cluster.ManagementToken = ""
+       drivers["test"] = s.stubDriver
+       s.disp.setupOnce.Do(s.disp.initialize)
+       s.disp.queue = &test.Queue{}
+       go s.disp.run()
+
+       for _, token := range []string{"abc", ""} {
+               req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
+               if token != "" {
+                       req.Header.Set("Authorization", "Bearer "+token)
+               }
+               resp := httptest.NewRecorder()
+               s.disp.ServeHTTP(resp, req)
+               c.Check(resp.Code, check.Equals, http.StatusForbidden)
+       }
+}
+
+func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
+       s.cluster.ManagementToken = "abcdefgh"
+       s.cluster.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)
+       drivers["test"] = s.stubDriver
+       s.disp.setupOnce.Do(s.disp.initialize)
+       s.disp.queue = &test.Queue{}
+       go s.disp.run()
+
+       type instance struct {
+               Instance             string
+               WorkerState          string `json:"worker_state"`
+               Price                float64
+               LastContainerUUID    string `json:"last_container_uuid"`
+               ArvadosInstanceType  string `json:"arvados_instance_type"`
+               ProviderInstanceType string `json:"provider_instance_type"`
+       }
+       type instancesResponse struct {
+               Items []instance
+       }
+       getInstances := func() instancesResponse {
+               req := httptest.NewRequest("GET", "/arvados/v1/dispatch/instances", nil)
+               req.Header.Set("Authorization", "Bearer abcdefgh")
+               resp := httptest.NewRecorder()
+               s.disp.ServeHTTP(resp, req)
+               var sr instancesResponse
+               c.Check(resp.Code, check.Equals, http.StatusOK)
+               err := json.Unmarshal(resp.Body.Bytes(), &sr)
+               c.Check(err, check.IsNil)
+               return sr
+       }
+
+       sr := getInstances()
+       c.Check(len(sr.Items), check.Equals, 0)
+
+       ch := s.disp.pool.Subscribe()
+       defer s.disp.pool.Unsubscribe(ch)
+       ok := s.disp.pool.Create(test.InstanceType(1))
+       c.Check(ok, check.Equals, true)
+       <-ch
+
+       sr = getInstances()
+       c.Assert(len(sr.Items), check.Equals, 1)
+       c.Check(sr.Items[0].Instance, check.Matches, "stub.*")
+       c.Check(sr.Items[0].WorkerState, check.Equals, "booting")
+       c.Check(sr.Items[0].Price, check.Equals, 0.123)
+       c.Check(sr.Items[0].LastContainerUUID, check.Equals, "")
+       c.Check(sr.Items[0].ProviderInstanceType, check.Equals, test.InstanceType(1).ProviderType)
+       c.Check(sr.Items[0].ArvadosInstanceType, check.Equals, test.InstanceType(1).Name)
+}
diff --git a/lib/dispatchcloud/driver.go b/lib/dispatchcloud/driver.go
new file mode 100644 (file)
index 0000000..0343f85
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "fmt"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/lib/cloud/azure"
+       "git.curoverse.com/arvados.git/lib/cloud/ec2"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+)
+
+var drivers = map[string]cloud.Driver{
+       "azure": azure.Driver,
+       "ec2":   ec2.Driver,
+}
+
+func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+       driver, ok := drivers[cluster.CloudVMs.Driver]
+       if !ok {
+               return nil, fmt.Errorf("unsupported cloud driver %q", cluster.CloudVMs.Driver)
+       }
+       return driver.InstanceSet(cluster.CloudVMs.DriverParameters, setID, logger)
+}
diff --git a/lib/dispatchcloud/gocheck_test.go b/lib/dispatchcloud/gocheck_test.go
new file mode 100644 (file)
index 0000000..22f89f0
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/lib/dispatchcloud/logger.go b/lib/dispatchcloud/logger.go
new file mode 100644 (file)
index 0000000..90bb6ca
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "sync"
+       "time"
+)
+
+type logger interface {
+       Printf(string, ...interface{})
+       Warnf(string, ...interface{})
+       Debugf(string, ...interface{})
+}
+
+var nextSpam = map[string]time.Time{}
+var nextSpamMtx sync.Mutex
+
+func unspam(msg string) bool {
+       nextSpamMtx.Lock()
+       defer nextSpamMtx.Unlock()
+       if nextSpam[msg].Before(time.Now()) {
+               nextSpam[msg] = time.Now().Add(time.Minute)
+               return true
+       }
+       return false
+}
diff --git a/lib/dispatchcloud/node_size.go b/lib/dispatchcloud/node_size.go
new file mode 100644 (file)
index 0000000..6fb46b5
--- /dev/null
@@ -0,0 +1,132 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "errors"
+       "regexp"
+       "sort"
+       "strconv"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+var ErrInstanceTypesNotConfigured = errors.New("site configuration does not list any instance types")
+
+var discountConfiguredRAMPercent = 5
+
+// ConstraintsNotSatisfiableError includes a list of available instance types
+// to be reported back to the user.
+type ConstraintsNotSatisfiableError struct {
+       error
+       AvailableTypes []arvados.InstanceType
+}
+
+var pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\+(\d+)$`)
+
+// estimateDockerImageSize estimates how much disk space will be used
+// by a Docker image, given the PDH of a collection containing a
+// Docker image that was created by "arv-keepdocker".  Returns
+// estimated number of bytes of disk space that should be reserved.
+func estimateDockerImageSize(collectionPDH string) int64 {
+       m := pdhRegexp.FindStringSubmatch(collectionPDH)
+       if m == nil {
+               return 0
+       }
+       n, err := strconv.ParseInt(m[1], 10, 64)
+       if err != nil || n < 122 {
+               return 0
+       }
+       // To avoid having to fetch the collection, take advantage of
+       // the fact that the manifest storing a container image
+       // uploaded by arv-keepdocker has a predictable format, which
+       // allows us to estimate the size of the image based on just
+       // the size of the manifest.
+       //
+       // Use the following heuristic:
+       // - Start with the length of the manifest (n)
+       // - Subtract 80 characters for the filename and file segment
+       // - Divide by 42 to get the number of block identifiers ('hash\+size\ ' is 32+1+8+1)
+       // - Assume each block is full, multiply by 64 MiB
+       return ((n - 80) / 42) * (64 * 1024 * 1024)
+}
+
+// EstimateScratchSpace estimates how much available disk space (in
+// bytes) is needed to run the container by summing the capacity
+// requested by 'tmp' mounts plus disk space required to load the
+// Docker image.
+func EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {
+       for _, m := range ctr.Mounts {
+               if m.Kind == "tmp" {
+                       needScratch += m.Capacity
+               }
+       }
+
+       // Account for disk space usage by Docker, assumes the following behavior:
+       // - Layer tarballs are buffered to disk during "docker load".
+       // - Individual layer tarballs are extracted from buffered
+       // copy to the filesystem
+       dockerImageSize := estimateDockerImageSize(ctr.ContainerImage)
+
+       // The buffer is only needed during image load, so make sure
+       // the baseline scratch space at least covers dockerImageSize,
+       // and assume it will be released to the job afterwards.
+       if needScratch < dockerImageSize {
+               needScratch = dockerImageSize
+       }
+
+       // Now reserve space for the extracted image on disk.
+       needScratch += dockerImageSize
+
+       return
+}
+
+// ChooseInstanceType returns the cheapest available
+// arvados.InstanceType big enough to run ctr.
+func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
+       if len(cc.InstanceTypes) == 0 {
+               err = ErrInstanceTypesNotConfigured
+               return
+       }
+
+       needScratch := EstimateScratchSpace(ctr)
+
+       needVCPUs := ctr.RuntimeConstraints.VCPUs
+
+       needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
+       needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
+
+       ok := false
+       for _, it := range cc.InstanceTypes {
+               switch {
+               case ok && it.Price > best.Price:
+               case int64(it.Scratch) < needScratch:
+               case int64(it.RAM) < needRAM:
+               case it.VCPUs < needVCPUs:
+               case it.Preemptible != ctr.SchedulingParameters.Preemptible:
+               case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs):
+                       // Equal price, but worse specs
+               default:
+                       // Lower price || (same price && better specs)
+                       best = it
+                       ok = true
+               }
+       }
+       if !ok {
+               availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
+               for _, t := range cc.InstanceTypes {
+                       availableTypes = append(availableTypes, t)
+               }
+               sort.Slice(availableTypes, func(a, b int) bool {
+                       return availableTypes[a].Price < availableTypes[b].Price
+               })
+               err = ConstraintsNotSatisfiableError{
+                       errors.New("constraints not satisfiable by any configured instance type"),
+                       availableTypes,
+               }
+               return
+       }
+       return
+}
diff --git a/lib/dispatchcloud/node_size_test.go b/lib/dispatchcloud/node_size_test.go
new file mode 100644 (file)
index 0000000..eef86f7
--- /dev/null
@@ -0,0 +1,143 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&NodeSizeSuite{})
+
+const GiB = arvados.ByteSize(1 << 30)
+
+type NodeSizeSuite struct{}
+
+func (*NodeSizeSuite) TestChooseNotConfigured(c *check.C) {
+       _, err := ChooseInstanceType(&arvados.Cluster{}, &arvados.Container{
+               RuntimeConstraints: arvados.RuntimeConstraints{
+                       RAM:   1234567890,
+                       VCPUs: 2,
+               },
+       })
+       c.Check(err, check.Equals, ErrInstanceTypesNotConfigured)
+}
+
+func (*NodeSizeSuite) TestChooseUnsatisfiable(c *check.C) {
+       checkUnsatisfiable := func(ctr *arvados.Container) {
+               _, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: map[string]arvados.InstanceType{
+                       "small1": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Name: "small1"},
+                       "small2": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Name: "small2"},
+                       "small4": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Name: "small4", Scratch: GiB},
+               }}, ctr)
+               c.Check(err, check.FitsTypeOf, ConstraintsNotSatisfiableError{})
+       }
+
+       for _, rc := range []arvados.RuntimeConstraints{
+               {RAM: 9876543210, VCPUs: 2},
+               {RAM: 1234567890, VCPUs: 20},
+               {RAM: 1234567890, VCPUs: 2, KeepCacheRAM: 9876543210},
+       } {
+               checkUnsatisfiable(&arvados.Container{RuntimeConstraints: rc})
+       }
+       checkUnsatisfiable(&arvados.Container{
+               Mounts:             map[string]arvados.Mount{"/tmp": {Kind: "tmp", Capacity: int64(2 * GiB)}},
+               RuntimeConstraints: arvados.RuntimeConstraints{RAM: 12345, VCPUs: 1},
+       })
+}
+
+func (*NodeSizeSuite) TestChoose(c *check.C) {
+       for _, menu := range []map[string]arvados.InstanceType{
+               {
+                       "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "best":   {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "small":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+               },
+               {
+                       "costly":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "goodenough": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
+                       "best":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "small":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+               },
+               {
+                       "small":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "goodenough": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
+                       "best":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "costly":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+               },
+               {
+                       "small":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: "small"},
+                       "nearly": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: GiB, Name: "nearly"},
+                       "best":   {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+               },
+       } {
+               best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{
+                       Mounts: map[string]arvados.Mount{
+                               "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
+                       },
+                       RuntimeConstraints: arvados.RuntimeConstraints{
+                               VCPUs:        2,
+                               RAM:          987654321,
+                               KeepCacheRAM: 123456789,
+                       },
+               })
+               c.Check(err, check.IsNil)
+               c.Check(best.Name, check.Equals, "best")
+               c.Check(best.RAM >= 1234567890, check.Equals, true)
+               c.Check(best.VCPUs >= 2, check.Equals, true)
+               c.Check(best.Scratch >= 2*GiB, check.Equals, true)
+       }
+}
+
+func (*NodeSizeSuite) TestChoosePreemptable(c *check.C) {
+       menu := map[string]arvados.InstanceType{
+               "costly":      {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Preemptible: true, Name: "costly"},
+               "almost best": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "almost best"},
+               "best":        {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Preemptible: true, Name: "best"},
+               "small":       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Preemptible: true, Name: "small"},
+       }
+       best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{
+               Mounts: map[string]arvados.Mount{
+                       "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
+               },
+               RuntimeConstraints: arvados.RuntimeConstraints{
+                       VCPUs:        2,
+                       RAM:          987654321,
+                       KeepCacheRAM: 123456789,
+               },
+               SchedulingParameters: arvados.SchedulingParameters{
+                       Preemptible: true,
+               },
+       })
+       c.Check(err, check.IsNil)
+       c.Check(best.Name, check.Equals, "best")
+       c.Check(best.RAM >= 1234567890, check.Equals, true)
+       c.Check(best.VCPUs >= 2, check.Equals, true)
+       c.Check(best.Scratch >= 2*GiB, check.Equals, true)
+       c.Check(best.Preemptible, check.Equals, true)
+}
+
+func (*NodeSizeSuite) TestScratchForDockerImage(c *check.C) {
+       n := EstimateScratchSpace(&arvados.Container{
+               ContainerImage: "d5025c0f29f6eef304a7358afa82a822+342",
+       })
+       // Actual image is 371.1 MiB (according to workbench)
+       // Estimated size is 384 MiB (402653184 bytes)
+       // Want to reserve 2x the estimated size, so 805306368 bytes
+       c.Check(n, check.Equals, int64(805306368))
+
+       n = EstimateScratchSpace(&arvados.Container{
+               ContainerImage: "d5025c0f29f6eef304a7358afa82a822+-342",
+       })
+       // Parse error will return 0
+       c.Check(n, check.Equals, int64(0))
+
+       n = EstimateScratchSpace(&arvados.Container{
+               ContainerImage: "d5025c0f29f6eef304a7358afa82a822+34",
+       })
+       // Short manifest will return 0
+       c.Check(n, check.Equals, int64(0))
+}
diff --git a/lib/dispatchcloud/readme.go b/lib/dispatchcloud/readme.go
new file mode 100644 (file)
index 0000000..c8491fb
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dispatchcloud
+
+// A dispatcher comprises a container queue, a scheduler, a worker
+// pool, a remote command executor, and a cloud driver.
+// 1. Choose a provider.
+// 2. Start a worker pool.
+// 3. Start a container queue.
+// 4. Run the scheduler's stale-lock fixer.
+// 5. Run the scheduler's mapper.
+// 6. Run the scheduler's syncer.
+// 7. Wait for updates to the container queue or worker pool.
+// 8. Repeat from 5.
+//
+//
+// A cloud driver creates new cloud VM instances and gets the latest
+// list of instances. The returned instances are caches/proxies for
+// the provider's metadata and control interfaces (get IP address,
+// update tags, shutdown).
+//
+//
+// A worker pool tracks workers' instance types and readiness states
+// (available to do work now, booting, suffering a temporary network
+// outage, shutting down). It loads internal state from the cloud
+// provider's list of instances at startup, and syncs periodically
+// after that.
+//
+//
+// An executor maintains a multiplexed SSH connection to a cloud
+// instance, retrying/reconnecting as needed, so the worker pool can
+// execute commands. It asks the cloud driver's instance to verify its
+// SSH public key once when first connecting, and again later if the
+// key changes.
+//
+//
+// A container queue tracks the known state (according to
+// arvados-controller) of each container of interest -- i.e., queued,
+// or locked/running using our own dispatch token. It also proxies the
+// dispatcher's lock/unlock/cancel requests to the controller. It
+// handles concurrent refresh and update operations without exposing
+// out-of-order updates to its callers. (It drops any new information
+// that might have originated before its own most recent
+// lock/unlock/cancel operation.)
+//
+//
+// The scheduler's stale-lock fixer waits for any already-locked
+// containers (i.e., locked by a prior dispatcher process) to appear
+// on workers as the worker pool recovers its state. It
+// unlocks/requeues any that still remain when all workers are
+// recovered or shutdown, or its timer expires.
+//
+//
+// The scheduler's mapper chooses which containers to assign to which
+// idle workers, and decides what to do when there are not enough idle
+// workers (including shutting down some idle nodes).
+//
+//
+// The scheduler's syncer updates state to Cancelled when a running
+// container process dies without finalizing its entry in the
+// controller database. It also calls the worker pool to kill
+// containers that have priority=0 while locked or running.
+//
+//
+// An instance set proxy wraps a driver's instance set with
+// rate-limiting logic. After the wrapped instance set receives a
+// cloud.RateLimitError, the proxy starts returning errors to callers
+// immediately without calling through to the wrapped instance set.
diff --git a/lib/dispatchcloud/readme_states.txt b/lib/dispatchcloud/readme_states.txt
new file mode 100644 (file)
index 0000000..b654bbf
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# cpan -I -T install Graph::Easy
+# (eval `perl -I ~/perl5/lib/perl5 -Mlocal::lib`; cpan -T install Graph::Easy)
+# graph-easy --as=svg < readme_states.txt
+
+[Nonexistent] - appears in cloud list -> [Unknown]
+[Nonexistent] - create() returns ID -> [Booting]
+[Unknown] - create() returns ID -> [Booting]
+[Unknown] - boot timeout -> [Shutdown]
+[Booting] - boot+run probes succeed -> [Idle]
+[Idle] - idle timeout -> [Shutdown]
+[Idle] - probe timeout -> [Shutdown]
+[Idle] - want=drain -> [Shutdown]
+[Idle] - container starts -> [Running]
+[Running] - container ends -> [Idle]
+[Running] - container ends, want=drain -> [Shutdown]
+[Shutdown] - instance disappears from cloud -> [Gone]
+
+# Layouter fails if we add these
+#[Hold] - want=run -> [Booting]
+#[Hold] - want=drain -> [Shutdown]
+#[Running] - container ends, want=hold -> [Hold]
+#[Unknown] - want=hold -> [Hold]
+#[Booting] - want=hold -> [Hold]
+#[Idle] - want=hold -> [Hold]
+
+# Not worth saying?
+#[Booting] - boot probe succeeds, run probe fails -> [Booting]
diff --git a/lib/dispatchcloud/scheduler/fix_stale_locks.go b/lib/dispatchcloud/scheduler/fix_stale_locks.go
new file mode 100644 (file)
index 0000000..148b653
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package scheduler
+
+import (
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// fixStaleLocks waits for any already-locked containers (i.e., locked
+// by a prior dispatcher process) to appear on workers as the worker
+// pool recovers its state. It unlocks any that still remain when all
+// workers are recovered or shutdown, or its timer
+// (sch.staleLockTimeout) expires.
+func (sch *Scheduler) fixStaleLocks() {
+       wp := sch.pool.Subscribe()
+       defer sch.pool.Unsubscribe(wp)
+
+       var stale []string
+       timeout := time.NewTimer(sch.staleLockTimeout)
+waiting:
+       for sch.pool.CountWorkers()[worker.StateUnknown] > 0 {
+               running := sch.pool.Running()
+               qEntries, _ := sch.queue.Entries()
+
+               stale = nil
+               for uuid, ent := range qEntries {
+                       if ent.Container.State != arvados.ContainerStateLocked {
+                               continue
+                       }
+                       if _, running := running[uuid]; running {
+                               continue
+                       }
+                       stale = append(stale, uuid)
+               }
+               if len(stale) == 0 {
+                       return
+               }
+
+               select {
+               case <-wp:
+               case <-timeout.C:
+                       // Give up.
+                       break waiting
+               }
+
+       }
+
+       for _, uuid := range stale {
+               err := sch.queue.Unlock(uuid)
+               if err != nil {
+                       sch.logger.Warnf("Unlock %s: %s", uuid, err)
+               }
+       }
+}
diff --git a/lib/dispatchcloud/scheduler/gocheck_test.go b/lib/dispatchcloud/scheduler/gocheck_test.go
new file mode 100644 (file)
index 0000000..558c60f
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package scheduler
+
+import (
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/lib/dispatchcloud/scheduler/interfaces.go b/lib/dispatchcloud/scheduler/interfaces.go
new file mode 100644 (file)
index 0000000..18cdc94
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package scheduler
+
+import (
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// A ContainerQueue is a set of containers that need to be started or
+// stopped. Implemented by container.Queue and test stubs. See
+// container.Queue method documentation for details.
+type ContainerQueue interface {
+       Entries() (entries map[string]container.QueueEnt, updated time.Time)
+       Lock(uuid string) error
+       Unlock(uuid string) error
+       Cancel(uuid string) error
+       Forget(uuid string)
+       Get(uuid string) (arvados.Container, bool)
+       Subscribe() <-chan struct{}
+       Unsubscribe(<-chan struct{})
+       Update() error
+}
+
+// A WorkerPool asynchronously starts and stops worker VMs, and starts
+// and stops containers on them. Implemented by worker.Pool and test
+// stubs. See worker.Pool method documentation for details.
+type WorkerPool interface {
+       Running() map[string]time.Time
+       Unallocated() map[arvados.InstanceType]int
+       CountWorkers() map[worker.State]int
+       AtQuota() bool
+       Create(arvados.InstanceType) bool
+       Shutdown(arvados.InstanceType) bool
+       StartContainer(arvados.InstanceType, arvados.Container) bool
+       KillContainer(uuid string)
+       Subscribe() <-chan struct{}
+       Unsubscribe(<-chan struct{})
+}
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
new file mode 100644 (file)
index 0000000..d102d2f
--- /dev/null
@@ -0,0 +1,187 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package scheduler
+
+import (
+       "sort"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+)
+
+func (sch *Scheduler) runQueue() {
+       unsorted, _ := sch.queue.Entries()
+       sorted := make([]container.QueueEnt, 0, len(unsorted))
+       for _, ent := range unsorted {
+               sorted = append(sorted, ent)
+       }
+       sort.Slice(sorted, func(i, j int) bool {
+               return sorted[i].Container.Priority > sorted[j].Container.Priority
+       })
+
+       running := sch.pool.Running()
+       unalloc := sch.pool.Unallocated()
+
+       sch.logger.WithFields(logrus.Fields{
+               "Containers": len(sorted),
+               "Processes":  len(running),
+       }).Debug("runQueue")
+
+       dontstart := map[arvados.InstanceType]bool{}
+       var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
+
+tryrun:
+       for i, ctr := range sorted {
+               ctr, it := ctr.Container, ctr.InstanceType
+               logger := sch.logger.WithFields(logrus.Fields{
+                       "ContainerUUID": ctr.UUID,
+                       "InstanceType":  it.Name,
+               })
+               if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
+                       continue
+               }
+               switch ctr.State {
+               case arvados.ContainerStateQueued:
+                       if unalloc[it] < 1 && sch.pool.AtQuota() {
+                               logger.Debug("not locking: AtQuota and no unalloc workers")
+                               overquota = sorted[i:]
+                               break tryrun
+                       }
+                       go sch.lockContainer(logger, ctr.UUID)
+                       unalloc[it]--
+               case arvados.ContainerStateLocked:
+                       if unalloc[it] > 0 {
+                               unalloc[it]--
+                       } else if sch.pool.AtQuota() {
+                               logger.Debug("not starting: AtQuota and no unalloc workers")
+                               overquota = sorted[i:]
+                               break tryrun
+                       } else {
+                               logger.Info("creating new instance")
+                               if !sch.pool.Create(it) {
+                                       // (Note pool.Create works
+                                       // asynchronously and logs its
+                                       // own failures, so we don't
+                                       // need to log this as a
+                                       // failure.)
+
+                                       sch.queue.Unlock(ctr.UUID)
+                                       // Don't let lower-priority
+                                       // containers starve this one
+                                       // by using keeping idle
+                                       // workers alive on different
+                                       // instance types.  TODO:
+                                       // avoid getting starved here
+                                       // if instances of a specific
+                                       // type always fail.
+                                       overquota = sorted[i:]
+                                       break tryrun
+                               }
+                       }
+
+                       if dontstart[it] {
+                               // We already tried & failed to start
+                               // a higher-priority container on the
+                               // same instance type. Don't let this
+                               // one sneak in ahead of it.
+                       } else if sch.pool.StartContainer(it, ctr) {
+                               // Success.
+                       } else {
+                               dontstart[it] = true
+                       }
+               }
+       }
+
+       if len(overquota) > 0 {
+               // Unlock any containers that are unmappable while
+               // we're at quota.
+               for _, ctr := range overquota {
+                       ctr := ctr.Container
+                       if ctr.State == arvados.ContainerStateLocked {
+                               logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
+                               logger.Debug("unlock because pool capacity is used by higher priority containers")
+                               err := sch.queue.Unlock(ctr.UUID)
+                               if err != nil {
+                                       logger.WithError(err).Warn("error unlocking")
+                               }
+                       }
+               }
+               // Shut down idle workers that didn't get any
+               // containers mapped onto them before we hit quota.
+               for it, n := range unalloc {
+                       if n < 1 {
+                               continue
+                       }
+                       sch.pool.Shutdown(it)
+               }
+       }
+}
+
+// Lock the given container. Should be called in a new goroutine.
+func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
+       if !sch.uuidLock(uuid, "lock") {
+               return
+       }
+       defer sch.uuidUnlock(uuid)
+       if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
+               // This happens if the container has been cancelled or
+               // locked since runQueue called sch.queue.Entries(),
+               // possibly by a lockContainer() call from a previous
+               // runQueue iteration. In any case, we will respond
+               // appropriately on the next runQueue iteration, which
+               // will have already been triggered by the queue
+               // update.
+               logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
+               return
+       }
+       err := sch.queue.Lock(uuid)
+       if err != nil {
+               logger.WithError(err).Warn("error locking container")
+               return
+       }
+       logger.Debug("lock succeeded")
+       ctr, ok := sch.queue.Get(uuid)
+       if !ok {
+               logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
+       } else if ctr.State != arvados.ContainerStateLocked {
+               logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
+       }
+}
+
+// Acquire a non-blocking lock for specified UUID, returning true if
+// successful.  The op argument is used only for debug logs.
+//
+// If the lock is not available, uuidLock arranges to wake up the
+// scheduler after a short delay, so it can retry whatever operation
+// is trying to get the lock (if that operation is still worth doing).
+//
+// This mechanism helps avoid spamming the controller/database with
+// concurrent updates for any single container, even when the
+// scheduler loop is running frequently.
+func (sch *Scheduler) uuidLock(uuid, op string) bool {
+       sch.mtx.Lock()
+       defer sch.mtx.Unlock()
+       logger := sch.logger.WithFields(logrus.Fields{
+               "ContainerUUID": uuid,
+               "Op":            op,
+       })
+       if op, locked := sch.uuidOp[uuid]; locked {
+               logger.Debugf("uuidLock not available, Op=%s in progress", op)
+               // Make sure the scheduler loop wakes up to retry.
+               sch.wakeup.Reset(time.Second / 4)
+               return false
+       }
+       logger.Debug("uuidLock acquired")
+       sch.uuidOp[uuid] = op
+       return true
+}
+
+func (sch *Scheduler) uuidUnlock(uuid string) {
+       sch.mtx.Lock()
+       defer sch.mtx.Unlock()
+       delete(sch.uuidOp, uuid)
+}
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
new file mode 100644 (file)
index 0000000..4296a13
--- /dev/null
@@ -0,0 +1,337 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package scheduler
+
+import (
+       "context"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+var (
+       // arbitrary example container UUIDs
+       uuids = func() (r []string) {
+               for i := 0; i < 16; i++ {
+                       r = append(r, test.ContainerUUID(i))
+               }
+               return
+       }()
+)
+
+type stubQuotaError struct {
+       error
+}
+
+func (stubQuotaError) IsQuotaError() bool { return true }
+
+type stubPool struct {
+       notify    <-chan struct{}
+       unalloc   map[arvados.InstanceType]int // idle+booting+unknown
+       idle      map[arvados.InstanceType]int
+       running   map[string]time.Time
+       atQuota   bool
+       canCreate int
+       creates   []arvados.InstanceType
+       starts    []string
+       shutdowns int
+       sync.Mutex
+}
+
+func (p *stubPool) AtQuota() bool               { return p.atQuota }
+func (p *stubPool) Subscribe() <-chan struct{}  { return p.notify }
+func (p *stubPool) Unsubscribe(<-chan struct{}) {}
+func (p *stubPool) Running() map[string]time.Time {
+       p.Lock()
+       defer p.Unlock()
+       r := map[string]time.Time{}
+       for k, v := range p.running {
+               r[k] = v
+       }
+       return r
+}
+func (p *stubPool) Unallocated() map[arvados.InstanceType]int {
+       p.Lock()
+       defer p.Unlock()
+       r := map[arvados.InstanceType]int{}
+       for it, n := range p.unalloc {
+               r[it] = n
+       }
+       return r
+}
+func (p *stubPool) Create(it arvados.InstanceType) bool {
+       p.Lock()
+       defer p.Unlock()
+       p.creates = append(p.creates, it)
+       if p.canCreate < 1 {
+               return false
+       }
+       p.canCreate--
+       p.unalloc[it]++
+       return true
+}
+func (p *stubPool) KillContainer(uuid string) {
+       p.Lock()
+       defer p.Unlock()
+       delete(p.running, uuid)
+}
+func (p *stubPool) Shutdown(arvados.InstanceType) bool {
+       p.shutdowns++
+       return false
+}
+func (p *stubPool) CountWorkers() map[worker.State]int {
+       p.Lock()
+       defer p.Unlock()
+       return map[worker.State]int{
+               worker.StateBooting: len(p.unalloc) - len(p.idle),
+               worker.StateIdle:    len(p.idle),
+               worker.StateRunning: len(p.running),
+       }
+}
+func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container) bool {
+       p.Lock()
+       defer p.Unlock()
+       p.starts = append(p.starts, ctr.UUID)
+       if p.idle[it] == 0 {
+               return false
+       }
+       p.idle[it]--
+       p.unalloc[it]--
+       p.running[ctr.UUID] = time.Time{}
+       return true
+}
+
+func chooseType(ctr *arvados.Container) (arvados.InstanceType, error) {
+       return test.InstanceType(ctr.RuntimeConstraints.VCPUs), nil
+}
+
+var _ = check.Suite(&SchedulerSuite{})
+
+type SchedulerSuite struct{}
+
+// Assign priority=4 container to idle node. Create a new instance for
+// the priority=3 container. Don't try to start any priority<3
+// containers because priority=3 container didn't start
+// immediately. Don't try to create any other nodes after the failed
+// create.
+func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+       queue := test.Queue{
+               ChooseType: chooseType,
+               Containers: []arvados.Container{
+                       {
+                               UUID:     test.ContainerUUID(1),
+                               Priority: 1,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+                       {
+                               UUID:     test.ContainerUUID(2),
+                               Priority: 2,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+                       {
+                               UUID:     test.ContainerUUID(3),
+                               Priority: 3,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+                       {
+                               UUID:     test.ContainerUUID(4),
+                               Priority: 4,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+               },
+       }
+       queue.Update()
+       pool := stubPool{
+               unalloc: map[arvados.InstanceType]int{
+                       test.InstanceType(1): 1,
+                       test.InstanceType(2): 2,
+               },
+               idle: map[arvados.InstanceType]int{
+                       test.InstanceType(1): 1,
+                       test.InstanceType(2): 2,
+               },
+               running:   map[string]time.Time{},
+               canCreate: 0,
+       }
+       New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
+       c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)})
+       c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
+       c.Check(pool.running, check.HasLen, 1)
+       for uuid := range pool.running {
+               c.Check(uuid, check.Equals, uuids[4])
+       }
+}
+
+// If Create() fails, shutdown some nodes, and don't call Create()
+// again.  Don't call Create() at all if AtQuota() is true.
+func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+       for quota := 0; quota < 2; quota++ {
+               c.Logf("quota=%d", quota)
+               shouldCreate := []arvados.InstanceType{}
+               for i := 0; i < quota; i++ {
+                       shouldCreate = append(shouldCreate, test.InstanceType(3))
+               }
+               queue := test.Queue{
+                       ChooseType: chooseType,
+                       Containers: []arvados.Container{
+                               {
+                                       UUID:     test.ContainerUUID(2),
+                                       Priority: 2,
+                                       State:    arvados.ContainerStateLocked,
+                                       RuntimeConstraints: arvados.RuntimeConstraints{
+                                               VCPUs: 2,
+                                               RAM:   2 << 30,
+                                       },
+                               },
+                               {
+                                       UUID:     test.ContainerUUID(3),
+                                       Priority: 3,
+                                       State:    arvados.ContainerStateLocked,
+                                       RuntimeConstraints: arvados.RuntimeConstraints{
+                                               VCPUs: 3,
+                                               RAM:   3 << 30,
+                                       },
+                               },
+                       },
+               }
+               queue.Update()
+               pool := stubPool{
+                       atQuota: quota == 0,
+                       unalloc: map[arvados.InstanceType]int{
+                               test.InstanceType(2): 2,
+                       },
+                       idle: map[arvados.InstanceType]int{
+                               test.InstanceType(2): 2,
+                       },
+                       running:   map[string]time.Time{},
+                       creates:   []arvados.InstanceType{},
+                       starts:    []string{},
+                       canCreate: 0,
+               }
+               New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
+               c.Check(pool.creates, check.DeepEquals, shouldCreate)
+               c.Check(pool.starts, check.DeepEquals, []string{})
+               c.Check(pool.shutdowns, check.Not(check.Equals), 0)
+       }
+}
+
+// Start lower-priority containers while waiting for new/existing
+// workers to come up for higher-priority containers.
+func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+       pool := stubPool{
+               unalloc: map[arvados.InstanceType]int{
+                       test.InstanceType(1): 2,
+                       test.InstanceType(2): 2,
+               },
+               idle: map[arvados.InstanceType]int{
+                       test.InstanceType(1): 1,
+                       test.InstanceType(2): 1,
+               },
+               running:   map[string]time.Time{},
+               canCreate: 4,
+       }
+       queue := test.Queue{
+               ChooseType: chooseType,
+               Containers: []arvados.Container{
+                       {
+                               // create a new worker
+                               UUID:     test.ContainerUUID(1),
+                               Priority: 1,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+                       {
+                               // tentatively map to unalloc worker
+                               UUID:     test.ContainerUUID(2),
+                               Priority: 2,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+                       {
+                               // start now on idle worker
+                               UUID:     test.ContainerUUID(3),
+                               Priority: 3,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+                       {
+                               // create a new worker
+                               UUID:     test.ContainerUUID(4),
+                               Priority: 4,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 2,
+                                       RAM:   2 << 30,
+                               },
+                       },
+                       {
+                               // tentatively map to unalloc worker
+                               UUID:     test.ContainerUUID(5),
+                               Priority: 5,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 2,
+                                       RAM:   2 << 30,
+                               },
+                       },
+                       {
+                               // start now on idle worker
+                               UUID:     test.ContainerUUID(6),
+                               Priority: 6,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 2,
+                                       RAM:   2 << 30,
+                               },
+                       },
+               },
+       }
+       queue.Update()
+       New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
+       c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)})
+       c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]})
+       running := map[string]bool{}
+       for uuid, t := range pool.running {
+               if t.IsZero() {
+                       running[uuid] = false
+               } else {
+                       running[uuid] = true
+               }
+       }
+       c.Check(running, check.DeepEquals, map[string]bool{uuids[3]: false, uuids[6]: false})
+}
diff --git a/lib/dispatchcloud/scheduler/scheduler.go b/lib/dispatchcloud/scheduler/scheduler.go
new file mode 100644 (file)
index 0000000..eb82c48
--- /dev/null
@@ -0,0 +1,124 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Package scheduler uses a resizable worker pool to execute
+// containers in priority order.
+package scheduler
+
+import (
+       "context"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "github.com/sirupsen/logrus"
+)
+
+// A Scheduler maps queued containers onto unallocated workers in
+// priority order, creating new workers if needed. It locks containers
+// that can be mapped onto existing/pending workers, and starts them
+// if possible.
+//
+// A Scheduler unlocks any containers that are locked but can't be
+// mapped. (For example, this happens when the cloud provider reaches
+// quota/capacity and a previously mappable container's priority is
+// surpassed by a newer container.)
+//
+// If it encounters errors while creating new workers, a Scheduler
+// shuts down idle workers, in case they are consuming quota.
+type Scheduler struct {
+       logger              logrus.FieldLogger
+       queue               ContainerQueue
+       pool                WorkerPool
+       staleLockTimeout    time.Duration
+       queueUpdateInterval time.Duration
+
+       uuidOp map[string]string // operation in progress: "lock", "cancel", ...
+       mtx    sync.Mutex
+       wakeup *time.Timer
+
+       runOnce sync.Once
+       stop    chan struct{}
+       stopped chan struct{}
+}
+
+// New returns a new unstarted Scheduler.
+//
+// Any given queue and pool should not be used by more than one
+// scheduler at a time.
+func New(ctx context.Context, queue ContainerQueue, pool WorkerPool, staleLockTimeout, queueUpdateInterval time.Duration) *Scheduler {
+       return &Scheduler{
+               logger:              ctxlog.FromContext(ctx),
+               queue:               queue,
+               pool:                pool,
+               staleLockTimeout:    staleLockTimeout,
+               queueUpdateInterval: queueUpdateInterval,
+               wakeup:              time.NewTimer(time.Second),
+               stop:                make(chan struct{}),
+               stopped:             make(chan struct{}),
+               uuidOp:              map[string]string{},
+       }
+}
+
+// Start starts the scheduler.
+func (sch *Scheduler) Start() {
+       go sch.runOnce.Do(sch.run)
+}
+
+// Stop stops the scheduler. No other method should be called after
+// Stop.
+func (sch *Scheduler) Stop() {
+       close(sch.stop)
+       <-sch.stopped
+}
+
+func (sch *Scheduler) run() {
+       defer close(sch.stopped)
+
+       // Ensure the queue is fetched once before attempting anything.
+       for err := sch.queue.Update(); err != nil; err = sch.queue.Update() {
+               sch.logger.Errorf("error updating queue: %s", err)
+               d := sch.queueUpdateInterval / 10
+               if d < time.Second {
+                       d = time.Second
+               }
+               sch.logger.Infof("waiting %s before retry", d)
+               time.Sleep(d)
+       }
+
+       // Keep the queue up to date.
+       poll := time.NewTicker(sch.queueUpdateInterval)
+       defer poll.Stop()
+       go func() {
+               for range poll.C {
+                       err := sch.queue.Update()
+                       if err != nil {
+                               sch.logger.Errorf("error updating queue: %s", err)
+                       }
+               }
+       }()
+
+       t0 := time.Now()
+       sch.logger.Infof("FixStaleLocks starting.")
+       sch.fixStaleLocks()
+       sch.logger.Infof("FixStaleLocks finished (%s), starting scheduling.", time.Since(t0))
+
+       poolNotify := sch.pool.Subscribe()
+       defer sch.pool.Unsubscribe(poolNotify)
+
+       queueNotify := sch.queue.Subscribe()
+       defer sch.queue.Unsubscribe(queueNotify)
+
+       for {
+               sch.runQueue()
+               sch.sync()
+               select {
+               case <-sch.stop:
+                       return
+               case <-queueNotify:
+               case <-poolNotify:
+               case <-sch.wakeup.C:
+               }
+       }
+}
diff --git a/lib/dispatchcloud/scheduler/sync.go b/lib/dispatchcloud/scheduler/sync.go
new file mode 100644 (file)
index 0000000..23fc621
--- /dev/null
@@ -0,0 +1,119 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package scheduler
+
+import (
+       "fmt"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+)
+
+// sync resolves discrepancies between the queue and the pool:
+//
+// Lingering crunch-run processes for finalized and unlocked/requeued
+// containers are killed.
+//
+// Locked containers whose crunch-run processes have exited are
+// requeued.
+//
+// Running containers whose crunch-run processes have exited are
+// cancelled.
+func (sch *Scheduler) sync() {
+       running := sch.pool.Running()
+       qEntries, qUpdated := sch.queue.Entries()
+       for uuid, ent := range qEntries {
+               exited, running := running[uuid]
+               switch ent.Container.State {
+               case arvados.ContainerStateRunning:
+                       if !running {
+                               go sch.cancel(ent, "not running on any worker")
+                       } else if !exited.IsZero() && qUpdated.After(exited) {
+                               go sch.cancel(ent, "state=\"Running\" after crunch-run exited")
+                       } else if ent.Container.Priority == 0 {
+                               go sch.kill(ent, "priority=0")
+                       }
+               case arvados.ContainerStateComplete, arvados.ContainerStateCancelled:
+                       if running {
+                               // Kill crunch-run in case it's stuck;
+                               // nothing it does now will matter
+                               // anyway. If crunch-run has already
+                               // exited and we just haven't found
+                               // out about it yet, the only effect
+                               // of kill() will be to make the
+                               // worker available for the next
+                               // container.
+                               go sch.kill(ent, fmt.Sprintf("state=%q", ent.Container.State))
+                       } else {
+                               sch.logger.WithFields(logrus.Fields{
+                                       "ContainerUUID": uuid,
+                                       "State":         ent.Container.State,
+                               }).Info("container finished")
+                               sch.queue.Forget(uuid)
+                       }
+               case arvados.ContainerStateQueued:
+                       if running {
+                               // Can happen if a worker returns from
+                               // a network outage and is still
+                               // preparing to run a container that
+                               // has already been unlocked/requeued.
+                               go sch.kill(ent, fmt.Sprintf("state=%q", ent.Container.State))
+                       }
+               case arvados.ContainerStateLocked:
+                       if running && !exited.IsZero() && qUpdated.After(exited) {
+                               go sch.requeue(ent, "crunch-run exited")
+                       } else if running && exited.IsZero() && ent.Container.Priority == 0 {
+                               go sch.kill(ent, "priority=0")
+                       } else if !running && ent.Container.Priority == 0 {
+                               go sch.requeue(ent, "priority=0")
+                       }
+               default:
+                       sch.logger.WithFields(logrus.Fields{
+                               "ContainerUUID": uuid,
+                               "State":         ent.Container.State,
+                       }).Error("BUG: unexpected state")
+               }
+       }
+}
+
+func (sch *Scheduler) cancel(ent container.QueueEnt, reason string) {
+       uuid := ent.Container.UUID
+       if !sch.uuidLock(uuid, "cancel") {
+               return
+       }
+       defer sch.uuidUnlock(uuid)
+       logger := sch.logger.WithField("ContainerUUID", uuid)
+       logger.Infof("cancelling container because %s", reason)
+       err := sch.queue.Cancel(uuid)
+       if err != nil {
+               logger.WithError(err).Print("error cancelling container")
+       }
+}
+
+func (sch *Scheduler) kill(ent container.QueueEnt, reason string) {
+       uuid := ent.Container.UUID
+       logger := sch.logger.WithField("ContainerUUID", uuid)
+       logger.Debugf("killing crunch-run process because %s", reason)
+       sch.pool.KillContainer(uuid)
+}
+
+func (sch *Scheduler) requeue(ent container.QueueEnt, reason string) {
+       uuid := ent.Container.UUID
+       if !sch.uuidLock(uuid, "cancel") {
+               return
+       }
+       defer sch.uuidUnlock(uuid)
+       logger := sch.logger.WithFields(logrus.Fields{
+               "ContainerUUID": uuid,
+               "State":         ent.Container.State,
+               "Priority":      ent.Container.Priority,
+       })
+       logger.Infof("requeueing locked container because %s", reason)
+       err := sch.queue.Unlock(uuid)
+       if err != nil {
+               logger.WithError(err).Error("error requeueing container")
+       }
+}
diff --git a/lib/dispatchcloud/ssh_executor/executor.go b/lib/dispatchcloud/ssh_executor/executor.go
new file mode 100644 (file)
index 0000000..feed1c2
--- /dev/null
@@ -0,0 +1,220 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Package ssh_executor provides an implementation of pool.Executor
+// using a long-lived multiplexed SSH session.
+package ssh_executor
+
+import (
+       "bytes"
+       "errors"
+       "io"
+       "net"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "golang.org/x/crypto/ssh"
+)
+
+// New returns a new Executor, using the given target.
+func New(t cloud.ExecutorTarget) *Executor {
+       return &Executor{target: t}
+}
+
+// An Executor uses a multiplexed SSH connection to execute shell
+// commands on a remote target. It reconnects automatically after
+// errors.
+//
+// When setting up a connection, the Executor accepts whatever host
+// key is provided by the remote server, then passes the received key
+// and the SSH connection to the target's VerifyHostKey method before
+// executing commands on the connection.
+//
+// A zero Executor must not be used before calling SetTarget.
+//
+// An Executor must not be copied.
+type Executor struct {
+       target     cloud.ExecutorTarget
+       targetPort string
+       targetUser string
+       signers    []ssh.Signer
+       mtx        sync.RWMutex // controls access to instance after creation
+
+       client      *ssh.Client
+       clientErr   error
+       clientOnce  sync.Once     // initialized private state
+       clientSetup chan bool     // len>0 while client setup is in progress
+       hostKey     ssh.PublicKey // most recent host key that passed verification, if any
+}
+
+// SetSigners updates the set of private keys that will be offered to
+// the target next time the Executor sets up a new connection.
+func (exr *Executor) SetSigners(signers ...ssh.Signer) {
+       exr.mtx.Lock()
+       defer exr.mtx.Unlock()
+       exr.signers = signers
+}
+
+// SetTarget sets the current target. The new target will be used next
+// time a new connection is set up; until then, the Executor will
+// continue to use the existing target.
+//
+// The new target is assumed to represent the same host as the
+// previous target, although its address and host key might differ.
+func (exr *Executor) SetTarget(t cloud.ExecutorTarget) {
+       exr.mtx.Lock()
+       defer exr.mtx.Unlock()
+       exr.target = t
+}
+
+// SetTargetPort sets the default port (name or number) to connect
+// to. This is used only when the address returned by the target's
+// Address() method does not specify a port. If the given port is
+// empty (or SetTargetPort is not called at all), the default port is
+// "ssh".
+func (exr *Executor) SetTargetPort(port string) {
+       exr.mtx.Lock()
+       defer exr.mtx.Unlock()
+       exr.targetPort = port
+}
+
+// Target returns the current target.
+func (exr *Executor) Target() cloud.ExecutorTarget {
+       exr.mtx.RLock()
+       defer exr.mtx.RUnlock()
+       return exr.target
+}
+
+// Execute runs cmd on the target. If an existing connection is not
+// usable, it sets up a new connection to the current target.
+func (exr *Executor) Execute(env map[string]string, cmd string, stdin io.Reader) ([]byte, []byte, error) {
+       session, err := exr.newSession()
+       if err != nil {
+               return nil, nil, err
+       }
+       defer session.Close()
+       for k, v := range env {
+               err = session.Setenv(k, v)
+               if err != nil {
+                       return nil, nil, err
+               }
+       }
+       var stdout, stderr bytes.Buffer
+       session.Stdin = stdin
+       session.Stdout = &stdout
+       session.Stderr = &stderr
+       err = session.Run(cmd)
+       return stdout.Bytes(), stderr.Bytes(), err
+}
+
+// Close shuts down any active connections.
+func (exr *Executor) Close() {
+       // Ensure exr is initialized
+       exr.sshClient(false)
+
+       exr.clientSetup <- true
+       if exr.client != nil {
+               defer exr.client.Close()
+       }
+       exr.client, exr.clientErr = nil, errors.New("closed")
+       <-exr.clientSetup
+}
+
+// Create a new SSH session. If session setup fails or the SSH client
+// hasn't been setup yet, setup a new SSH client and try again.
+func (exr *Executor) newSession() (*ssh.Session, error) {
+       try := func(create bool) (*ssh.Session, error) {
+               client, err := exr.sshClient(create)
+               if err != nil {
+                       return nil, err
+               }
+               return client.NewSession()
+       }
+       session, err := try(false)
+       if err != nil {
+               session, err = try(true)
+       }
+       return session, err
+}
+
+// Get the latest SSH client. If another goroutine is in the process
+// of setting one up, wait for it to finish and return its result (or
+// the last successfully setup client, if it fails).
+func (exr *Executor) sshClient(create bool) (*ssh.Client, error) {
+       exr.clientOnce.Do(func() {
+               exr.clientSetup = make(chan bool, 1)
+               exr.clientErr = errors.New("client not yet created")
+       })
+       defer func() { <-exr.clientSetup }()
+       select {
+       case exr.clientSetup <- true:
+               if create {
+                       client, err := exr.setupSSHClient()
+                       if err == nil || exr.client == nil {
+                               if exr.client != nil {
+                                       // Hang up the previous
+                                       // (non-working) client
+                                       go exr.client.Close()
+                               }
+                               exr.client, exr.clientErr = client, err
+                       }
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+       default:
+               // Another goroutine is doing the above case.  Wait
+               // for it to finish and return whatever it leaves in
+               // wkr.client.
+               exr.clientSetup <- true
+       }
+       return exr.client, exr.clientErr
+}
+
+// Create a new SSH client.
+func (exr *Executor) setupSSHClient() (*ssh.Client, error) {
+       target := exr.Target()
+       addr := target.Address()
+       if addr == "" {
+               return nil, errors.New("instance has no address")
+       }
+       if h, p, err := net.SplitHostPort(addr); err != nil || p == "" {
+               // Target address does not specify a port.  Use
+               // targetPort, or "ssh".
+               if h == "" {
+                       h = addr
+               }
+               if p = exr.targetPort; p == "" {
+                       p = "ssh"
+               }
+               addr = net.JoinHostPort(h, p)
+       }
+       var receivedKey ssh.PublicKey
+       client, err := ssh.Dial("tcp", addr, &ssh.ClientConfig{
+               User: target.RemoteUser(),
+               Auth: []ssh.AuthMethod{
+                       ssh.PublicKeys(exr.signers...),
+               },
+               HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
+                       receivedKey = key
+                       return nil
+               },
+               Timeout: time.Minute,
+       })
+       if err != nil {
+               return nil, err
+       } else if receivedKey == nil {
+               return nil, errors.New("BUG: key was never provided to HostKeyCallback")
+       }
+
+       if exr.hostKey == nil || !bytes.Equal(exr.hostKey.Marshal(), receivedKey.Marshal()) {
+               err = target.VerifyHostKey(receivedKey, client)
+               if err != nil {
+                       return nil, err
+               }
+               exr.hostKey = receivedKey
+       }
+       return client, nil
+}
diff --git a/lib/dispatchcloud/ssh_executor/executor_test.go b/lib/dispatchcloud/ssh_executor/executor_test.go
new file mode 100644 (file)
index 0000000..e7c0235
--- /dev/null
@@ -0,0 +1,176 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ssh_executor
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "sync"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "golang.org/x/crypto/ssh"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&ExecutorSuite{})
+
+type testTarget struct {
+       test.SSHService
+}
+
+func (*testTarget) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {
+       return nil
+}
+
+// Address returns the wrapped SSHService's host, with the port
+// stripped. This ensures the executor won't work until
+// SetTargetPort() is called -- see (*testTarget)Port().
+func (tt *testTarget) Address() string {
+       h, _, err := net.SplitHostPort(tt.SSHService.Address())
+       if err != nil {
+               panic(err)
+       }
+       return h
+}
+
+func (tt *testTarget) Port() string {
+       _, p, err := net.SplitHostPort(tt.SSHService.Address())
+       if err != nil {
+               panic(err)
+       }
+       return p
+}
+
+type mitmTarget struct {
+       test.SSHService
+}
+
+func (*mitmTarget) VerifyHostKey(key ssh.PublicKey, client *ssh.Client) error {
+       return fmt.Errorf("host key failed verification: %#v", key)
+}
+
+type ExecutorSuite struct{}
+
+func (s *ExecutorSuite) TestBadHostKey(c *check.C) {
+       _, hostpriv := test.LoadTestKey(c, "../test/sshkey_vm")
+       clientpub, clientpriv := test.LoadTestKey(c, "../test/sshkey_dispatch")
+       target := &mitmTarget{
+               SSHService: test.SSHService{
+                       Exec: func(map[string]string, string, io.Reader, io.Writer, io.Writer) uint32 {
+                               c.Error("Target Exec func called even though host key verification failed")
+                               return 0
+                       },
+                       HostKey:        hostpriv,
+                       AuthorizedUser: "username",
+                       AuthorizedKeys: []ssh.PublicKey{clientpub},
+               },
+       }
+
+       err := target.Start()
+       c.Check(err, check.IsNil)
+       c.Logf("target address %q", target.Address())
+       defer target.Close()
+
+       exr := New(target)
+       exr.SetSigners(clientpriv)
+
+       _, _, err = exr.Execute(nil, "true", nil)
+       c.Check(err, check.ErrorMatches, "host key failed verification: .*")
+}
+
+func (s *ExecutorSuite) TestExecute(c *check.C) {
+       command := `foo 'bar' "baz"`
+       stdinData := "foobar\nbaz\n"
+       _, hostpriv := test.LoadTestKey(c, "../test/sshkey_vm")
+       clientpub, clientpriv := test.LoadTestKey(c, "../test/sshkey_dispatch")
+       for _, exitcode := range []int{0, 1, 2} {
+               target := &testTarget{
+                       SSHService: test.SSHService{
+                               Exec: func(env map[string]string, cmd string, stdin io.Reader, stdout, stderr io.Writer) uint32 {
+                                       c.Check(env["TESTVAR"], check.Equals, "test value")
+                                       c.Check(cmd, check.Equals, command)
+                                       var wg sync.WaitGroup
+                                       wg.Add(2)
+                                       go func() {
+                                               io.WriteString(stdout, "stdout\n")
+                                               wg.Done()
+                                       }()
+                                       go func() {
+                                               io.WriteString(stderr, "stderr\n")
+                                               wg.Done()
+                                       }()
+                                       buf, err := ioutil.ReadAll(stdin)
+                                       wg.Wait()
+                                       c.Check(err, check.IsNil)
+                                       if err != nil {
+                                               return 99
+                                       }
+                                       _, err = stdout.Write(buf)
+                                       c.Check(err, check.IsNil)
+                                       return uint32(exitcode)
+                               },
+                               HostKey:        hostpriv,
+                               AuthorizedUser: "username",
+                               AuthorizedKeys: []ssh.PublicKey{clientpub},
+                       },
+               }
+               err := target.Start()
+               c.Check(err, check.IsNil)
+               c.Logf("target address %q", target.Address())
+               defer target.Close()
+
+               exr := New(target)
+               exr.SetSigners(clientpriv)
+
+               // Use the default target port (ssh). Execute will
+               // return a connection error or an authentication
+               // error, depending on whether the test host is
+               // running an SSH server.
+               _, _, err = exr.Execute(nil, command, nil)
+               c.Check(err, check.ErrorMatches, `.*(unable to authenticate|connection refused).*`)
+
+               // Use a bogus target port. Execute will return a
+               // connection error.
+               exr.SetTargetPort("0")
+               _, _, err = exr.Execute(nil, command, nil)
+               c.Check(err, check.ErrorMatches, `.*connection refused.*`)
+
+               // Use the test server's listening port.
+               exr.SetTargetPort(target.Port())
+
+               done := make(chan bool)
+               go func() {
+                       stdout, stderr, err := exr.Execute(map[string]string{"TESTVAR": "test value"}, command, bytes.NewBufferString(stdinData))
+                       if exitcode == 0 {
+                               c.Check(err, check.IsNil)
+                       } else {
+                               c.Check(err, check.NotNil)
+                               err, ok := err.(*ssh.ExitError)
+                               c.Assert(ok, check.Equals, true)
+                               c.Check(err.ExitStatus(), check.Equals, exitcode)
+                       }
+                       c.Check(stdout, check.DeepEquals, []byte("stdout\n"+stdinData))
+                       c.Check(stderr, check.DeepEquals, []byte("stderr\n"))
+                       close(done)
+               }()
+
+               timeout := time.NewTimer(time.Second)
+               select {
+               case <-done:
+               case <-timeout.C:
+                       c.Fatal("timed out")
+               }
+       }
+}
diff --git a/lib/dispatchcloud/test/doc.go b/lib/dispatchcloud/test/doc.go
new file mode 100644 (file)
index 0000000..12f3b16
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Package test provides fakes and other tools for testing cloud
+// drivers and other dispatcher modules.
+package test
diff --git a/lib/dispatchcloud/test/fixtures.go b/lib/dispatchcloud/test/fixtures.go
new file mode 100644 (file)
index 0000000..68bdb3d
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package test
+
+import (
+       "fmt"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// ContainerUUID returns a fake container UUID.
+func ContainerUUID(i int) string {
+       return fmt.Sprintf("zzzzz-dz642-%015d", i)
+}
+
+// InstanceType returns a fake arvados.InstanceType called "type{i}"
+// with i CPUs and i GiB of memory.
+func InstanceType(i int) arvados.InstanceType {
+       return arvados.InstanceType{
+               Name:         fmt.Sprintf("type%d", i),
+               ProviderType: fmt.Sprintf("providertype%d", i),
+               VCPUs:        i,
+               RAM:          arvados.ByteSize(i) << 30,
+               Price:        float64(i) * 0.123,
+       }
+}
diff --git a/lib/dispatchcloud/test/queue.go b/lib/dispatchcloud/test/queue.go
new file mode 100644 (file)
index 0000000..e18a2b5
--- /dev/null
@@ -0,0 +1,171 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package test
+
+import (
+       "fmt"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// Queue is a test stub for container.Queue. The caller specifies the
+// initial queue state.
+type Queue struct {
+       // Containers represent the API server database contents.
+       Containers []arvados.Container
+
+       // ChooseType will be called for each entry in Containers. It
+       // must not be nil.
+       ChooseType func(*arvados.Container) (arvados.InstanceType, error)
+
+       entries     map[string]container.QueueEnt
+       updTime     time.Time
+       subscribers map[<-chan struct{}]chan struct{}
+
+       mtx sync.Mutex
+}
+
+// Entries returns the containers that were queued when Update was
+// last called.
+func (q *Queue) Entries() (map[string]container.QueueEnt, time.Time) {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       updTime := q.updTime
+       r := map[string]container.QueueEnt{}
+       for uuid, ent := range q.entries {
+               r[uuid] = ent
+       }
+       return r, updTime
+}
+
+// Get returns the container from the cached queue, i.e., as it was
+// when Update was last called -- just like a container.Queue does. If
+// the state has been changed (via Lock, Unlock, or Cancel) since the
+// last Update, the updated state is returned.
+func (q *Queue) Get(uuid string) (arvados.Container, bool) {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       ent, ok := q.entries[uuid]
+       return ent.Container, ok
+}
+
+func (q *Queue) Forget(uuid string) {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       delete(q.entries, uuid)
+}
+
+func (q *Queue) Lock(uuid string) error {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       return q.changeState(uuid, arvados.ContainerStateQueued, arvados.ContainerStateLocked)
+}
+
+func (q *Queue) Unlock(uuid string) error {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       return q.changeState(uuid, arvados.ContainerStateLocked, arvados.ContainerStateQueued)
+}
+
+func (q *Queue) Cancel(uuid string) error {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       return q.changeState(uuid, q.entries[uuid].Container.State, arvados.ContainerStateCancelled)
+}
+
+func (q *Queue) Subscribe() <-chan struct{} {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       if q.subscribers == nil {
+               q.subscribers = map[<-chan struct{}]chan struct{}{}
+       }
+       ch := make(chan struct{}, 1)
+       q.subscribers[ch] = ch
+       return ch
+}
+
+func (q *Queue) Unsubscribe(ch <-chan struct{}) {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       delete(q.subscribers, ch)
+}
+
+// caller must have lock.
+func (q *Queue) notify() {
+       for _, ch := range q.subscribers {
+               select {
+               case ch <- struct{}{}:
+               default:
+               }
+       }
+}
+
+// caller must have lock.
+func (q *Queue) changeState(uuid string, from, to arvados.ContainerState) error {
+       ent := q.entries[uuid]
+       if ent.Container.State != from {
+               return fmt.Errorf("changeState failed: state=%q", ent.Container.State)
+       }
+       ent.Container.State = to
+       q.entries[uuid] = ent
+       for i, ctr := range q.Containers {
+               if ctr.UUID == uuid {
+                       q.Containers[i].State = to
+                       break
+               }
+       }
+       q.notify()
+       return nil
+}
+
+// Update rebuilds the current entries from the Containers slice.
+func (q *Queue) Update() error {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       updTime := time.Now()
+       upd := map[string]container.QueueEnt{}
+       for _, ctr := range q.Containers {
+               _, exists := q.entries[ctr.UUID]
+               if !exists && (ctr.State == arvados.ContainerStateComplete || ctr.State == arvados.ContainerStateCancelled) {
+                       continue
+               }
+               if ent, ok := upd[ctr.UUID]; ok {
+                       ent.Container = ctr
+                       upd[ctr.UUID] = ent
+               } else {
+                       it, _ := q.ChooseType(&ctr)
+                       upd[ctr.UUID] = container.QueueEnt{
+                               Container:    ctr,
+                               InstanceType: it,
+                       }
+               }
+       }
+       q.entries = upd
+       q.updTime = updTime
+       q.notify()
+       return nil
+}
+
+// Notify adds/updates an entry in the Containers slice.  This
+// simulates the effect of an API update from someone other than the
+// dispatcher -- e.g., crunch-run updating state to "Complete" when a
+// container exits.
+//
+// The resulting changes are not exposed through Get() or Entries()
+// until the next call to Update().
+func (q *Queue) Notify(upd arvados.Container) {
+       q.mtx.Lock()
+       defer q.mtx.Unlock()
+       for i, ctr := range q.Containers {
+               if ctr.UUID == upd.UUID {
+                       q.Containers[i] = upd
+                       return
+               }
+       }
+       q.Containers = append(q.Containers, upd)
+}
diff --git a/lib/dispatchcloud/test/ssh_service.go b/lib/dispatchcloud/test/ssh_service.go
new file mode 100644 (file)
index 0000000..f1fde4f
--- /dev/null
@@ -0,0 +1,190 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package test
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "strings"
+       "sync"
+
+       "golang.org/x/crypto/ssh"
+       check "gopkg.in/check.v1"
+)
+
+func LoadTestKey(c *check.C, fnm string) (ssh.PublicKey, ssh.Signer) {
+       rawpubkey, err := ioutil.ReadFile(fnm + ".pub")
+       c.Assert(err, check.IsNil)
+       pubkey, _, _, _, err := ssh.ParseAuthorizedKey(rawpubkey)
+       c.Assert(err, check.IsNil)
+       rawprivkey, err := ioutil.ReadFile(fnm)
+       c.Assert(err, check.IsNil)
+       privkey, err := ssh.ParsePrivateKey(rawprivkey)
+       c.Assert(err, check.IsNil)
+       return pubkey, privkey
+}
+
+// An SSHExecFunc handles an "exec" session on a multiplexed SSH
+// connection.
+type SSHExecFunc func(env map[string]string, command string, stdin io.Reader, stdout, stderr io.Writer) uint32
+
+// An SSHService accepts SSH connections on an available TCP port and
+// passes clients' "exec" sessions to the provided SSHExecFunc.
+type SSHService struct {
+       Exec           SSHExecFunc
+       HostKey        ssh.Signer
+       AuthorizedUser string
+       AuthorizedKeys []ssh.PublicKey
+
+       listener net.Listener
+       conn     *ssh.ServerConn
+       setup    sync.Once
+       mtx      sync.Mutex
+       started  chan bool
+       closed   bool
+       err      error
+}
+
+// Address returns the host:port where the SSH server is listening. It
+// returns "" if called before the server is ready to accept
+// connections.
+func (ss *SSHService) Address() string {
+       ss.setup.Do(ss.start)
+       ss.mtx.Lock()
+       ln := ss.listener
+       ss.mtx.Unlock()
+       if ln == nil {
+               return ""
+       }
+       return ln.Addr().String()
+}
+
+// RemoteUser returns the username that will be accepted.
+func (ss *SSHService) RemoteUser() string {
+       return ss.AuthorizedUser
+}
+
+// Close shuts down the server and releases resources. Established
+// connections are unaffected.
+func (ss *SSHService) Close() {
+       ss.Start()
+       ss.mtx.Lock()
+       ln := ss.listener
+       ss.closed = true
+       ss.mtx.Unlock()
+       if ln != nil {
+               ln.Close()
+       }
+}
+
+// Start returns when the server is ready to accept connections.
+func (ss *SSHService) Start() error {
+       ss.setup.Do(ss.start)
+       <-ss.started
+       return ss.err
+}
+
+func (ss *SSHService) start() {
+       ss.started = make(chan bool)
+       go ss.run()
+}
+
+func (ss *SSHService) run() {
+       defer close(ss.started)
+       config := &ssh.ServerConfig{
+               PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
+                       for _, ak := range ss.AuthorizedKeys {
+                               if bytes.Equal(ak.Marshal(), pubKey.Marshal()) {
+                                       return &ssh.Permissions{}, nil
+                               }
+                       }
+                       return nil, fmt.Errorf("unknown public key for %q", c.User())
+               },
+       }
+       config.AddHostKey(ss.HostKey)
+
+       listener, err := net.Listen("tcp", "127.0.0.1:")
+       if err != nil {
+               ss.err = err
+               return
+       }
+
+       ss.mtx.Lock()
+       ss.listener = listener
+       ss.mtx.Unlock()
+
+       go func() {
+               for {
+                       nConn, err := listener.Accept()
+                       if err != nil && strings.Contains(err.Error(), "use of closed network connection") && ss.closed {
+                               return
+                       } else if err != nil {
+                               log.Printf("accept: %s", err)
+                               return
+                       }
+                       go ss.serveConn(nConn, config)
+               }
+       }()
+}
+
+func (ss *SSHService) serveConn(nConn net.Conn, config *ssh.ServerConfig) {
+       defer nConn.Close()
+       conn, newchans, reqs, err := ssh.NewServerConn(nConn, config)
+       if err != nil {
+               log.Printf("ssh.NewServerConn: %s", err)
+               return
+       }
+       defer conn.Close()
+       go ssh.DiscardRequests(reqs)
+       for newch := range newchans {
+               if newch.ChannelType() != "session" {
+                       newch.Reject(ssh.UnknownChannelType, "unknown channel type")
+                       continue
+               }
+               ch, reqs, err := newch.Accept()
+               if err != nil {
+                       log.Printf("accept channel: %s", err)
+                       return
+               }
+               didExec := false
+               sessionEnv := map[string]string{}
+               go func() {
+                       for req := range reqs {
+                               switch {
+                               case didExec:
+                                       // Reject anything after exec
+                                       req.Reply(false, nil)
+                               case req.Type == "exec":
+                                       var execReq struct {
+                                               Command string
+                                       }
+                                       req.Reply(true, nil)
+                                       ssh.Unmarshal(req.Payload, &execReq)
+                                       go func() {
+                                               var resp struct {
+                                                       Status uint32
+                                               }
+                                               resp.Status = ss.Exec(sessionEnv, execReq.Command, ch, ch, ch.Stderr())
+                                               ch.SendRequest("exit-status", false, ssh.Marshal(&resp))
+                                               ch.Close()
+                                       }()
+                                       didExec = true
+                               case req.Type == "env":
+                                       var envReq struct {
+                                               Name  string
+                                               Value string
+                                       }
+                                       req.Reply(true, nil)
+                                       ssh.Unmarshal(req.Payload, &envReq)
+                                       sessionEnv[envReq.Name] = envReq.Value
+                               }
+                       }
+               }()
+       }
+}
diff --git a/lib/dispatchcloud/test/sshkey_dispatch b/lib/dispatchcloud/test/sshkey_dispatch
new file mode 100644 (file)
index 0000000..5584519
--- /dev/null
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAqYm4XsQHm8sBSZFwUX5VeW1OkGsfoNzcGPG2nzzYRhNhClYZ
+0ABHhUk82HkaC/8l6d/jpYTf42HrK42nNQ0r0Yzs7qw8yZMQioK4Yk+kFyVLF78E
+GRG4pGAWXFs6pUchs/lm8fo9zcda4R3XeqgI+NO+nEERXmdRJa1FhI+Za3/S/+CV
+mg+6O00wZz2+vKmDPptGN4MCKmQOCKsMJts7wSZGyVcTtdNv7jjfr6yPAIOIL8X7
+LtarBCFaK/pD7uWll/Uj7h7D8K48nIZUrvBJJjXL8Sm4LxCNoz3Z83k8J5ZzuDRD
+gRiQe/C085mhO6VL+2fypDLwcKt1tOL8fI81MwIDAQABAoIBACR3tEnmHsDbNOav
+Oxq8cwRQh9K2yDHg8BMJgz/TZa4FIx2HEbxVIw0/iLADtJ+Z/XzGJQCIiWQuvtg6
+exoFQESt7JUWRWkSkj9JCQJUoTY9Vl7APtBpqG7rIEQzd3TvzQcagZNRQZQO6rR7
+p8sBdBSZ72lK8cJ9tM3G7Kor/VNK7KgRZFNhEWnmvEa3qMd4hzDcQ4faOn7C9NZK
+dwJAuJVVfwOLlOORYcyEkvksLaDOK2DsB/p0AaCpfSmThRbBKN5fPXYaKgUdfp3w
+70Hpp27WWymb1cgjyqSH3DY+V/kvid+5QxgxCBRq865jPLn3FFT9bWEVS/0wvJRj
+iMIRrjECgYEA4Ffv9rBJXqVXonNQbbstd2PaprJDXMUy9/UmfHL6pkq1xdBeuM7v
+yf2ocXheA8AahHtIOhtgKqwv/aRhVK0ErYtiSvIk+tXG+dAtj/1ZAKbKiFyxjkZV
+X72BH7cTlR6As5SRRfWM/HaBGEgED391gKsI5PyMdqWWdczT5KfxAksCgYEAwXYE
+ewPmV1GaR5fbh2RupoPnUJPMj36gJCnwls7sGaXDQIpdlq56zfKgrLocGXGgj+8f
+QH7FHTJQO15YCYebtsXWwB3++iG43gVlJlecPAydsap2CCshqNWC5JU5pan0QzsP
+exzNzWqfUPSbTkR2SRaN+MenZo2Y/WqScOAth7kCgYBgVoLujW9EXH5QfXJpXLq+
+jTvE38I7oVcs0bJwOLPYGzcJtlwmwn6IYAwohgbhV2pLv+EZSs42JPEK278MLKxY
+lgVkp60npgunFTWroqDIvdc1TZDVxvA8h9VeODEJlSqxczgbMcIUXBM9yRctTI+5
+7DiKlMUA4kTFW2sWwuOlFwKBgGXvrYS0FVbFJKm8lmvMu5D5x5RpjEu/yNnFT4Pn
+G/iXoz4Kqi2PWh3STl804UF24cd1k94D7hDoReZCW9kJnz67F+C67XMW+bXi2d1O
+JIBvlVfcHb1IHMA9YG7ZQjrMRmx2Xj3ce4RVPgUGHh8ra7gvLjd72/Tpf0doNClN
+ti/hAoGBAMW5D3LhU05LXWmOqpeT4VDgqk4MrTBcstVe7KdVjwzHrVHCAmI927vI
+pjpphWzpC9m3x4OsTNf8m+g6H7f3IiQS0aiFNtduXYlcuT5FHS2fSATTzg5PBon9
+1E6BudOve+WyFyBs7hFWAqWFBdWujAl4Qk5Ek09U2ilFEPE7RTgJ
+-----END RSA PRIVATE KEY-----
diff --git a/lib/dispatchcloud/test/sshkey_dispatch.pub b/lib/dispatchcloud/test/sshkey_dispatch.pub
new file mode 100644 (file)
index 0000000..1d5c1ea
--- /dev/null
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpibhexAebywFJkXBRflV5bU6Qax+g3NwY8bafPNhGE2EKVhnQAEeFSTzYeRoL/yXp3+OlhN/jYesrjac1DSvRjOzurDzJkxCKgrhiT6QXJUsXvwQZEbikYBZcWzqlRyGz+Wbx+j3Nx1rhHdd6qAj4076cQRFeZ1ElrUWEj5lrf9L/4JWaD7o7TTBnPb68qYM+m0Y3gwIqZA4Iqwwm2zvBJkbJVxO102/uON+vrI8Ag4gvxfsu1qsEIVor+kPu5aWX9SPuHsPwrjychlSu8EkmNcvxKbgvEI2jPdnzeTwnlnO4NEOBGJB78LTzmaE7pUv7Z/KkMvBwq3W04vx8jzUz tom@curve
diff --git a/lib/dispatchcloud/test/sshkey_vm b/lib/dispatchcloud/test/sshkey_vm
new file mode 100644 (file)
index 0000000..10b7ed1
--- /dev/null
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEApIfWk2StZGDtmunumIeXLJ46AQrbHHvuxrSAkQf6+zUwjB2I
+rse7ezBRHWcge9U5EsigixmhUM4ozFLnUQNwC862jbmsjbyA97arG/REECNlUrEB
+HQPYHhai5yyJ89AfjWVxKyINfW0K2HX1R8nl4kdVraAgpohPLh0dGjfwzm/BcXDG
++TxW9zRz0KCs9ZRI6s2MNdv08ahKQ0azk8gRTqMADJmYNWIo3zPQ+fhlwyr6EZJ/
+HFbRtjpajEPMJPwoVPO+Wj6wztfHDYKkPIrIWbhMl6w+tEKdsmygd3Iq94ktLS3X
+AbRCfn4njS2QSlkKFEepkUJWCSSWZgFn6DLm2wIDAQABAoIBAQCb137LxcTnG1h0
+L7isCWKMBKN0cU/xvwIAfOB6f1CfuVXuodrhkpZmrPFoJFKEeQbCX/6RQwmlfGDw
+iGZKOjNbO8V2oLRs3GxcNk4FAG2ny58hoD8puIZwmYhb57gTlMMOL1PuQyb78tkf
+Bzv5b6ermV3yQ4Ypt1solrMGLo6NOZD0oDX9p0Zt9kueIhjzgP0v5//T1F4PGHZK
++sLSsMiu9u6F+PB+Oc6uv0Zee9Lnts/QiWH5f18oEculjwKWFx+JwJWiLffGg2Bl
+vbpmvHFRoRWkHTpgSiLwSUqs0ZUWU9R5h11ROg5L39MLsxQoBvHsPEnP5ssN8jGt
+aH86EZjBAoGBAM+A5B/UjhIn9m05EhDTDRzI92hGhM8f7uAwobbnjvIQyZbWlBwj
+2TmgbJdpTGVbD+iTBIwKQdcFBbWobTCZsNMpghqA/ir4YIAnZ5OX9VQ1Bc+bWE7V
+dPmMVpCgyg+ERAe+79FrYWcI3vhnBpHCsY/9p9pGQIKDzlGTWNF1HJGjAoGBAMr7
+2CTVnFImTgD3E+rH4AAAfkz+cyqfK6BUhli/NifFYZhWCs16r9QCGSORnp4gPhMY
+3mf7VBs9rk123zOMo89eJt3adTgbZ+QIxXeXilGXpbT3w1+CJMaZRrIy80E1tB5/
+KvDZcrZ78o8XWMNUa+9k55ukvgyC24ICAmOIWNlpAoGBALEFvphBF2r52MtZUsYz
+pw4VjKvS7V5eWcW891k4tsRf+frK2NQg6SK2b63EUT5ur2W0dr6ZyY2MZVCSfYRm
+uWmMEchWn389IeZyt3Q8wTize1+foXivtflm9jqwUXFnXzpUc/du6kuiT8YO7pXP
+SPgUZ+xY3pP5qjwBvlYC2PqNAoGAZ1CKMi1bdGC0wT8BLzXuqHGX136HhcEgRmnf
+O5qPaOzJAO2CcBWrGuC6hOUgc+F7VuMIiKpeo8LgTeNcNfO2iNymMbN4iEdCuMlS
+IM3MBD2IhTS6h4lJSKBJYHgYYi+AbylQ5Of4wDMUQYqjjkAQ8/dK/2h5pwqPyXtW
+VezXNEkCgYEAq4S0++y9tjlLn+w9BIkmx3bAVRDQZIzIEwxTh+jpqaUp1J0iyseJ
+71pwqQojGNF6x8GglVXa6bMrETae21WhEeHnWmzlpCWIODsYPUQ+erjDuAWi9eGk
+HLklqSEoLB8pzC6zDqjxDw+CnGERIDSaoaeoWiNKZ95IH1WiEwYjuxU=
+-----END RSA PRIVATE KEY-----
diff --git a/lib/dispatchcloud/test/sshkey_vm.pub b/lib/dispatchcloud/test/sshkey_vm.pub
new file mode 100644 (file)
index 0000000..b9d44c9
--- /dev/null
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkh9aTZK1kYO2a6e6Yh5csnjoBCtsce+7GtICRB/r7NTCMHYiux7t7MFEdZyB71TkSyKCLGaFQzijMUudRA3ALzraNuayNvID3tqsb9EQQI2VSsQEdA9geFqLnLInz0B+NZXErIg19bQrYdfVHyeXiR1WtoCCmiE8uHR0aN/DOb8FxcMb5PFb3NHPQoKz1lEjqzYw12/TxqEpDRrOTyBFOowAMmZg1YijfM9D5+GXDKvoRkn8cVtG2OlqMQ8wk/ChU875aPrDO18cNgqQ8ishZuEyXrD60Qp2ybKB3cir3iS0tLdcBtEJ+fieNLZBKWQoUR6mRQlYJJJZmAWfoMubb tom@curve
diff --git a/lib/dispatchcloud/test/stub_driver.go b/lib/dispatchcloud/test/stub_driver.go
new file mode 100644 (file)
index 0000000..4df39d0
--- /dev/null
@@ -0,0 +1,402 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package test
+
+import (
+       "crypto/rand"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       math_rand "math/rand"
+       "regexp"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+)
+
+// A StubDriver implements cloud.Driver by setting up local SSH
+// servers that do fake command executions.
+type StubDriver struct {
+       HostKey        ssh.Signer
+       AuthorizedKeys []ssh.PublicKey
+
+       // SetupVM, if set, is called upon creation of each new
+       // StubVM. This is the caller's opportunity to customize the
+       // VM's error rate and other behaviors.
+       SetupVM func(*StubVM)
+
+       // StubVM's fake crunch-run uses this Queue to read and update
+       // container state.
+       Queue *Queue
+
+       // Frequency of artificially introduced errors on calls to
+       // Destroy. 0=always succeed, 1=always fail.
+       ErrorRateDestroy float64
+
+       // If Create() or Instances() is called too frequently, return
+       // rate-limiting errors.
+       MinTimeBetweenCreateCalls    time.Duration
+       MinTimeBetweenInstancesCalls time.Duration
+
+       // If true, Create and Destroy calls block until Release() is
+       // called.
+       HoldCloudOps bool
+
+       instanceSets []*StubInstanceSet
+       holdCloudOps chan bool
+}
+
+// InstanceSet returns a new *StubInstanceSet.
+func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+       if sd.holdCloudOps == nil {
+               sd.holdCloudOps = make(chan bool)
+       }
+       sis := StubInstanceSet{
+               driver:  sd,
+               logger:  logger,
+               servers: map[cloud.InstanceID]*StubVM{},
+       }
+       sd.instanceSets = append(sd.instanceSets, &sis)
+
+       var err error
+       if params != nil {
+               err = json.Unmarshal(params, &sis)
+       }
+       return &sis, err
+}
+
+// InstanceSets returns all instances that have been created by the
+// driver. This can be used to test a component that uses the driver
+// but doesn't expose the InstanceSets it has created.
+func (sd *StubDriver) InstanceSets() []*StubInstanceSet {
+       return sd.instanceSets
+}
+
+// ReleaseCloudOps releases n pending Create/Destroy calls. If there
+// are fewer than n blocked calls pending, it waits for the rest to
+// arrive.
+func (sd *StubDriver) ReleaseCloudOps(n int) {
+       for i := 0; i < n; i++ {
+               <-sd.holdCloudOps
+       }
+}
+
+type StubInstanceSet struct {
+       driver  *StubDriver
+       logger  logrus.FieldLogger
+       servers map[cloud.InstanceID]*StubVM
+       mtx     sync.RWMutex
+       stopped bool
+
+       allowCreateCall    time.Time
+       allowInstancesCall time.Time
+}
+
+func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, cmd cloud.InitCommand, authKey ssh.PublicKey) (cloud.Instance, error) {
+       if sis.driver.HoldCloudOps {
+               sis.driver.holdCloudOps <- true
+       }
+       sis.mtx.Lock()
+       defer sis.mtx.Unlock()
+       if sis.stopped {
+               return nil, errors.New("StubInstanceSet: Create called after Stop")
+       }
+       if sis.allowCreateCall.After(time.Now()) {
+               return nil, RateLimitError{sis.allowCreateCall}
+       } else {
+               sis.allowCreateCall = time.Now().Add(sis.driver.MinTimeBetweenCreateCalls)
+       }
+
+       ak := sis.driver.AuthorizedKeys
+       if authKey != nil {
+               ak = append([]ssh.PublicKey{authKey}, ak...)
+       }
+       svm := &StubVM{
+               sis:          sis,
+               id:           cloud.InstanceID(fmt.Sprintf("stub-%s-%x", it.ProviderType, math_rand.Int63())),
+               tags:         copyTags(tags),
+               providerType: it.ProviderType,
+               initCommand:  cmd,
+       }
+       svm.SSHService = SSHService{
+               HostKey:        sis.driver.HostKey,
+               AuthorizedUser: "root",
+               AuthorizedKeys: ak,
+               Exec:           svm.Exec,
+       }
+       if setup := sis.driver.SetupVM; setup != nil {
+               setup(svm)
+       }
+       sis.servers[svm.id] = svm
+       return svm.Instance(), nil
+}
+
+func (sis *StubInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {
+       sis.mtx.RLock()
+       defer sis.mtx.RUnlock()
+       if sis.allowInstancesCall.After(time.Now()) {
+               return nil, RateLimitError{sis.allowInstancesCall}
+       } else {
+               sis.allowInstancesCall = time.Now().Add(sis.driver.MinTimeBetweenInstancesCalls)
+       }
+       var r []cloud.Instance
+       for _, ss := range sis.servers {
+               r = append(r, ss.Instance())
+       }
+       return r, nil
+}
+
+func (sis *StubInstanceSet) Stop() {
+       sis.mtx.Lock()
+       defer sis.mtx.Unlock()
+       if sis.stopped {
+               panic("Stop called twice")
+       }
+       sis.stopped = true
+}
+
+type RateLimitError struct{ Retry time.Time }
+
+func (e RateLimitError) Error() string            { return fmt.Sprintf("rate limited until %s", e.Retry) }
+func (e RateLimitError) EarliestRetry() time.Time { return e.Retry }
+
+// StubVM is a fake server that runs an SSH service. It represents a
+// VM running in a fake cloud.
+//
+// Note this is distinct from a stubInstance, which is a snapshot of
+// the VM's metadata. Like a VM in a real cloud, a StubVM keeps
+// running (and might change IP addresses, shut down, etc.)  without
+// updating any stubInstances that have been returned to callers.
+type StubVM struct {
+       Boot                 time.Time
+       Broken               time.Time
+       CrunchRunMissing     bool
+       CrunchRunCrashRate   float64
+       CrunchRunDetachDelay time.Duration
+       ExecuteContainer     func(arvados.Container) int
+
+       sis          *StubInstanceSet
+       id           cloud.InstanceID
+       tags         cloud.InstanceTags
+       initCommand  cloud.InitCommand
+       providerType string
+       SSHService   SSHService
+       running      map[string]bool
+       sync.Mutex
+}
+
+func (svm *StubVM) Instance() stubInstance {
+       svm.Lock()
+       defer svm.Unlock()
+       return stubInstance{
+               svm:  svm,
+               addr: svm.SSHService.Address(),
+               // We deliberately return a cached/stale copy of the
+               // real tags here, so that (Instance)Tags() sometimes
+               // returns old data after a call to
+               // (Instance)SetTags().  This is permitted by the
+               // driver interface, and this might help remind
+               // callers that they need to tolerate it.
+               tags: copyTags(svm.tags),
+       }
+}
+
+func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader, stdout, stderr io.Writer) uint32 {
+       stdinData, err := ioutil.ReadAll(stdin)
+       if err != nil {
+               fmt.Fprintf(stderr, "error reading stdin: %s\n", err)
+               return 1
+       }
+       queue := svm.sis.driver.Queue
+       uuid := regexp.MustCompile(`.{5}-dz642-.{15}`).FindString(command)
+       if eta := svm.Boot.Sub(time.Now()); eta > 0 {
+               fmt.Fprintf(stderr, "stub is booting, ETA %s\n", eta)
+               return 1
+       }
+       if !svm.Broken.IsZero() && svm.Broken.Before(time.Now()) {
+               fmt.Fprintf(stderr, "cannot fork\n")
+               return 2
+       }
+       if svm.CrunchRunMissing && strings.Contains(command, "crunch-run") {
+               fmt.Fprint(stderr, "crunch-run: command not found\n")
+               return 1
+       }
+       if strings.HasPrefix(command, "crunch-run --detach --stdin-env ") {
+               var stdinKV map[string]string
+               err := json.Unmarshal(stdinData, &stdinKV)
+               if err != nil {
+                       fmt.Fprintf(stderr, "unmarshal stdin: %s (stdin was: %q)\n", err, stdinData)
+                       return 1
+               }
+               for _, name := range []string{"ARVADOS_API_HOST", "ARVADOS_API_TOKEN"} {
+                       if stdinKV[name] == "" {
+                               fmt.Fprintf(stderr, "%s env var missing from stdin %q\n", name, stdin)
+                               return 1
+                       }
+               }
+               svm.Lock()
+               if svm.running == nil {
+                       svm.running = map[string]bool{}
+               }
+               svm.running[uuid] = true
+               svm.Unlock()
+               time.Sleep(svm.CrunchRunDetachDelay)
+               fmt.Fprintf(stderr, "starting %s\n", uuid)
+               logger := svm.sis.logger.WithFields(logrus.Fields{
+                       "Instance":      svm.id,
+                       "ContainerUUID": uuid,
+               })
+               logger.Printf("[test] starting crunch-run stub")
+               go func() {
+                       crashluck := math_rand.Float64()
+                       ctr, ok := queue.Get(uuid)
+                       if !ok {
+                               logger.Print("[test] container not in queue")
+                               return
+                       }
+                       if crashluck > svm.CrunchRunCrashRate/2 {
+                               time.Sleep(time.Duration(math_rand.Float64()*20) * time.Millisecond)
+                               ctr.State = arvados.ContainerStateRunning
+                               queue.Notify(ctr)
+                       }
+
+                       time.Sleep(time.Duration(math_rand.Float64()*20) * time.Millisecond)
+                       svm.Lock()
+                       _, running := svm.running[uuid]
+                       svm.Unlock()
+                       if !running {
+                               logger.Print("[test] container was killed")
+                               return
+                       }
+                       if svm.ExecuteContainer != nil {
+                               ctr.ExitCode = svm.ExecuteContainer(ctr)
+                       }
+                       // TODO: Check whether the stub instance has
+                       // been destroyed, and if so, don't call
+                       // queue.Notify. Then "container finished
+                       // twice" can be classified as a bug.
+                       if crashluck < svm.CrunchRunCrashRate {
+                               logger.Print("[test] crashing crunch-run stub")
+                       } else {
+                               ctr.State = arvados.ContainerStateComplete
+                               queue.Notify(ctr)
+                       }
+                       logger.Print("[test] exiting crunch-run stub")
+                       svm.Lock()
+                       defer svm.Unlock()
+                       delete(svm.running, uuid)
+               }()
+               return 0
+       }
+       if command == "crunch-run --list" {
+               svm.Lock()
+               defer svm.Unlock()
+               for uuid := range svm.running {
+                       fmt.Fprintf(stdout, "%s\n", uuid)
+               }
+               return 0
+       }
+       if strings.HasPrefix(command, "crunch-run --kill ") {
+               svm.Lock()
+               defer svm.Unlock()
+               if svm.running[uuid] {
+                       delete(svm.running, uuid)
+               } else {
+                       fmt.Fprintf(stderr, "%s: container is not running\n", uuid)
+               }
+               return 0
+       }
+       if command == "true" {
+               return 0
+       }
+       fmt.Fprintf(stderr, "%q: command not found", command)
+       return 1
+}
+
+type stubInstance struct {
+       svm  *StubVM
+       addr string
+       tags cloud.InstanceTags
+}
+
+func (si stubInstance) ID() cloud.InstanceID {
+       return si.svm.id
+}
+
+func (si stubInstance) Address() string {
+       return si.addr
+}
+
+func (si stubInstance) RemoteUser() string {
+       return si.svm.SSHService.AuthorizedUser
+}
+
+func (si stubInstance) Destroy() error {
+       sis := si.svm.sis
+       if sis.driver.HoldCloudOps {
+               sis.driver.holdCloudOps <- true
+       }
+       if math_rand.Float64() < si.svm.sis.driver.ErrorRateDestroy {
+               return errors.New("instance could not be destroyed")
+       }
+       si.svm.SSHService.Close()
+       sis.mtx.Lock()
+       defer sis.mtx.Unlock()
+       delete(sis.servers, si.svm.id)
+       return nil
+}
+
+func (si stubInstance) ProviderType() string {
+       return si.svm.providerType
+}
+
+func (si stubInstance) SetTags(tags cloud.InstanceTags) error {
+       tags = copyTags(tags)
+       svm := si.svm
+       go func() {
+               svm.Lock()
+               defer svm.Unlock()
+               svm.tags = tags
+       }()
+       return nil
+}
+
+func (si stubInstance) Tags() cloud.InstanceTags {
+       // Return a copy to ensure a caller can't change our saved
+       // tags just by writing to the returned map.
+       return copyTags(si.tags)
+}
+
+func (si stubInstance) String() string {
+       return string(si.svm.id)
+}
+
+func (si stubInstance) VerifyHostKey(key ssh.PublicKey, client *ssh.Client) error {
+       buf := make([]byte, 512)
+       _, err := io.ReadFull(rand.Reader, buf)
+       if err != nil {
+               return err
+       }
+       sig, err := si.svm.sis.driver.HostKey.Sign(rand.Reader, buf)
+       if err != nil {
+               return err
+       }
+       return key.Verify(buf, sig)
+}
+
+func copyTags(src cloud.InstanceTags) cloud.InstanceTags {
+       dst := cloud.InstanceTags{}
+       for k, v := range src {
+               dst[k] = v
+       }
+       return dst
+}
diff --git a/lib/dispatchcloud/worker/gocheck_test.go b/lib/dispatchcloud/worker/gocheck_test.go
new file mode 100644 (file)
index 0000000..b4ca66c
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/lib/dispatchcloud/worker/pool.go b/lib/dispatchcloud/worker/pool.go
new file mode 100644 (file)
index 0000000..e81c2c0
--- /dev/null
@@ -0,0 +1,807 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "crypto/rand"
+       "errors"
+       "fmt"
+       "io"
+       "sort"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/crypto/ssh"
+)
+
+const (
+       tagKeyInstanceType   = "InstanceType"
+       tagKeyIdleBehavior   = "IdleBehavior"
+       tagKeyInstanceSecret = "InstanceSecret"
+)
+
+// An InstanceView shows a worker's current state and recent activity.
+type InstanceView struct {
+       Instance             cloud.InstanceID `json:"instance"`
+       Address              string           `json:"address"`
+       Price                float64          `json:"price"`
+       ArvadosInstanceType  string           `json:"arvados_instance_type"`
+       ProviderInstanceType string           `json:"provider_instance_type"`
+       LastContainerUUID    string           `json:"last_container_uuid"`
+       LastBusy             time.Time        `json:"last_busy"`
+       WorkerState          string           `json:"worker_state"`
+       IdleBehavior         IdleBehavior     `json:"idle_behavior"`
+}
+
+// An Executor executes shell commands on a remote host.
+type Executor interface {
+       // Run cmd on the current target.
+       Execute(env map[string]string, cmd string, stdin io.Reader) (stdout, stderr []byte, err error)
+
+       // Use the given target for subsequent operations. The new
+       // target is the same host as the previous target, but it
+       // might return a different address and verify a different
+       // host key.
+       //
+       // SetTarget is called frequently, and in most cases the new
+       // target will behave exactly the same as the old one. An
+       // implementation should optimize accordingly.
+       //
+       // SetTarget must not block on concurrent Execute calls.
+       SetTarget(cloud.ExecutorTarget)
+
+       Close()
+}
+
+const (
+       defaultSyncInterval       = time.Minute
+       defaultProbeInterval      = time.Second * 10
+       defaultMaxProbesPerSecond = 10
+       defaultTimeoutIdle        = time.Minute
+       defaultTimeoutBooting     = time.Minute * 10
+       defaultTimeoutProbe       = time.Minute * 10
+       defaultTimeoutShutdown    = time.Second * 10
+
+       // Time after a quota error to try again anyway, even if no
+       // instances have been shutdown.
+       quotaErrorTTL = time.Minute
+
+       // Time between "X failed because rate limiting" messages
+       logRateLimitErrorInterval = time.Second * 10
+)
+
+func duration(conf arvados.Duration, def time.Duration) time.Duration {
+       if conf > 0 {
+               return time.Duration(conf)
+       } else {
+               return def
+       }
+}
+
+// NewPool creates a Pool of workers backed by instanceSet.
+//
+// New instances are configured and set up according to the given
+// cluster configuration.
+func NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *prometheus.Registry, instanceSet cloud.InstanceSet, newExecutor func(cloud.Instance) Executor, installPublicKey ssh.PublicKey, cluster *arvados.Cluster) *Pool {
+       wp := &Pool{
+               logger:             logger,
+               arvClient:          arvClient,
+               instanceSet:        &throttledInstanceSet{InstanceSet: instanceSet},
+               newExecutor:        newExecutor,
+               bootProbeCommand:   cluster.CloudVMs.BootProbeCommand,
+               imageID:            cloud.ImageID(cluster.CloudVMs.ImageID),
+               instanceTypes:      cluster.InstanceTypes,
+               maxProbesPerSecond: cluster.Dispatch.MaxProbesPerSecond,
+               probeInterval:      duration(cluster.Dispatch.ProbeInterval, defaultProbeInterval),
+               syncInterval:       duration(cluster.CloudVMs.SyncInterval, defaultSyncInterval),
+               timeoutIdle:        duration(cluster.CloudVMs.TimeoutIdle, defaultTimeoutIdle),
+               timeoutBooting:     duration(cluster.CloudVMs.TimeoutBooting, defaultTimeoutBooting),
+               timeoutProbe:       duration(cluster.CloudVMs.TimeoutProbe, defaultTimeoutProbe),
+               timeoutShutdown:    duration(cluster.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),
+               installPublicKey:   installPublicKey,
+               stop:               make(chan bool),
+       }
+       wp.registerMetrics(reg)
+       go func() {
+               wp.setupOnce.Do(wp.setup)
+               go wp.runMetrics()
+               go wp.runProbes()
+               go wp.runSync()
+       }()
+       return wp
+}
+
+// Pool is a resizable worker pool backed by a cloud.InstanceSet. A
+// zero Pool should not be used. Call NewPool to create a new Pool.
+type Pool struct {
+       // configuration
+       logger             logrus.FieldLogger
+       arvClient          *arvados.Client
+       instanceSet        *throttledInstanceSet
+       newExecutor        func(cloud.Instance) Executor
+       bootProbeCommand   string
+       imageID            cloud.ImageID
+       instanceTypes      map[string]arvados.InstanceType
+       syncInterval       time.Duration
+       probeInterval      time.Duration
+       maxProbesPerSecond int
+       timeoutIdle        time.Duration
+       timeoutBooting     time.Duration
+       timeoutProbe       time.Duration
+       timeoutShutdown    time.Duration
+       installPublicKey   ssh.PublicKey
+
+       // private state
+       subscribers  map[<-chan struct{}]chan<- struct{}
+       creating     map[string]createCall // unfinished (cloud.InstanceSet)Create calls (key is instance secret)
+       workers      map[cloud.InstanceID]*worker
+       loaded       bool                 // loaded list of instances from InstanceSet at least once
+       exited       map[string]time.Time // containers whose crunch-run proc has exited, but KillContainer has not been called
+       atQuotaUntil time.Time
+       atQuotaErr   cloud.QuotaError
+       stop         chan bool
+       mtx          sync.RWMutex
+       setupOnce    sync.Once
+
+       throttleCreate    throttle
+       throttleInstances throttle
+
+       mContainersRunning prometheus.Gauge
+       mInstances         *prometheus.GaugeVec
+       mInstancesPrice    *prometheus.GaugeVec
+       mVCPUs             *prometheus.GaugeVec
+       mMemory            *prometheus.GaugeVec
+}
+
+type createCall struct {
+       time         time.Time
+       instanceType arvados.InstanceType
+}
+
+// Subscribe returns a buffered channel that becomes ready after any
+// change to the pool's state that could have scheduling implications:
+// a worker's state changes, a new worker appears, the cloud
+// provider's API rate limiting period ends, etc.
+//
+// Additional events that occur while the channel is already ready
+// will be dropped, so it is OK if the caller services the channel
+// slowly.
+//
+// Example:
+//
+//     ch := wp.Subscribe()
+//     defer wp.Unsubscribe(ch)
+//     for range ch {
+//             tryScheduling(wp)
+//             if done {
+//                     break
+//             }
+//     }
+func (wp *Pool) Subscribe() <-chan struct{} {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       ch := make(chan struct{}, 1)
+       wp.subscribers[ch] = ch
+       return ch
+}
+
+// Unsubscribe stops sending updates to the given channel.
+func (wp *Pool) Unsubscribe(ch <-chan struct{}) {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       delete(wp.subscribers, ch)
+}
+
+// Unallocated returns the number of unallocated (creating + booting +
+// idle + unknown) workers for each instance type.  Workers in
+// hold/drain mode are not included.
+func (wp *Pool) Unallocated() map[arvados.InstanceType]int {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.RLock()
+       defer wp.mtx.RUnlock()
+       unalloc := map[arvados.InstanceType]int{}
+       creating := map[arvados.InstanceType]int{}
+       oldestCreate := map[arvados.InstanceType]time.Time{}
+       for _, cc := range wp.creating {
+               it := cc.instanceType
+               creating[it]++
+               if t, ok := oldestCreate[it]; !ok || t.After(cc.time) {
+                       oldestCreate[it] = cc.time
+               }
+       }
+       for _, wkr := range wp.workers {
+               // Skip workers that are not expected to become
+               // available soon. Note len(wkr.running)>0 is not
+               // redundant here: it can be true even in
+               // StateUnknown.
+               if wkr.state == StateShutdown ||
+                       wkr.state == StateRunning ||
+                       wkr.idleBehavior != IdleBehaviorRun ||
+                       len(wkr.running) > 0 {
+                       continue
+               }
+               it := wkr.instType
+               unalloc[it]++
+               if wkr.state == StateUnknown && creating[it] > 0 && wkr.appeared.After(oldestCreate[it]) {
+                       // If up to N new workers appear in
+                       // Instances() while we are waiting for N
+                       // Create() calls to complete, we assume we're
+                       // just seeing a race between Instances() and
+                       // Create() responses.
+                       //
+                       // The other common reason why nodes have
+                       // state==Unknown is that they appeared at
+                       // startup, before any Create calls. They
+                       // don't match the above timing condition, so
+                       // we never mistakenly attribute them to
+                       // pending Create calls.
+                       creating[it]--
+               }
+       }
+       for it, c := range creating {
+               unalloc[it] += c
+       }
+       return unalloc
+}
+
+// Create a new instance with the given type, and add it to the worker
+// pool. The worker is added immediately; instance creation runs in
+// the background.
+//
+// Create returns false if a pre-existing error state prevents it from
+// even attempting to create a new instance. Those errors are logged
+// by the Pool, so the caller does not need to log anything in such
+// cases.
+func (wp *Pool) Create(it arvados.InstanceType) bool {
+       logger := wp.logger.WithField("InstanceType", it.Name)
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       if time.Now().Before(wp.atQuotaUntil) || wp.throttleCreate.Error() != nil {
+               return false
+       }
+       now := time.Now()
+       secret := randomHex(instanceSecretLength)
+       wp.creating[secret] = createCall{time: now, instanceType: it}
+       go func() {
+               defer wp.notify()
+               tags := cloud.InstanceTags{
+                       tagKeyInstanceType:   it.Name,
+                       tagKeyIdleBehavior:   string(IdleBehaviorRun),
+                       tagKeyInstanceSecret: secret,
+               }
+               initCmd := cloud.InitCommand(fmt.Sprintf("umask 0177 && echo -n %q >%s", secret, instanceSecretFilename))
+               inst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)
+               wp.mtx.Lock()
+               defer wp.mtx.Unlock()
+               // delete() is deferred so the updateWorker() call
+               // below knows to use StateBooting when adding a new
+               // worker.
+               defer delete(wp.creating, secret)
+               if err != nil {
+                       if err, ok := err.(cloud.QuotaError); ok && err.IsQuotaError() {
+                               wp.atQuotaErr = err
+                               wp.atQuotaUntil = time.Now().Add(quotaErrorTTL)
+                               time.AfterFunc(quotaErrorTTL, wp.notify)
+                       }
+                       logger.WithError(err).Error("create failed")
+                       wp.instanceSet.throttleCreate.CheckRateLimitError(err, wp.logger, "create instance", wp.notify)
+                       return
+               }
+               wp.updateWorker(inst, it)
+       }()
+       return true
+}
+
+// AtQuota returns true if Create is not expected to work at the
+// moment.
+func (wp *Pool) AtQuota() bool {
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       return time.Now().Before(wp.atQuotaUntil)
+}
+
+// SetIdleBehavior determines how the indicated instance will behave
+// when it has no containers running.
+func (wp *Pool) SetIdleBehavior(id cloud.InstanceID, idleBehavior IdleBehavior) error {
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       wkr, ok := wp.workers[id]
+       if !ok {
+               return errors.New("requested instance does not exist")
+       }
+       wkr.idleBehavior = idleBehavior
+       wkr.saveTags()
+       wkr.shutdownIfIdle()
+       return nil
+}
+
+// Add or update worker attached to the given instance.
+//
+// The second return value is true if a new worker is created.
+//
+// A newly added instance has state=StateBooting if its tags match an
+// entry in wp.creating, otherwise StateUnknown.
+//
+// Caller must have lock.
+func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*worker, bool) {
+       inst = tagVerifier{inst}
+       id := inst.ID()
+       if wkr := wp.workers[id]; wkr != nil {
+               wkr.executor.SetTarget(inst)
+               wkr.instance = inst
+               wkr.updated = time.Now()
+               wkr.saveTags()
+               return wkr, false
+       }
+
+       state := StateUnknown
+       if _, ok := wp.creating[inst.Tags()[tagKeyInstanceSecret]]; ok {
+               state = StateBooting
+       }
+
+       // If an instance has a valid IdleBehavior tag when it first
+       // appears, initialize the new worker accordingly (this is how
+       // we restore IdleBehavior that was set by a prior dispatch
+       // process); otherwise, default to "run". After this,
+       // wkr.idleBehavior is the source of truth, and will only be
+       // changed via SetIdleBehavior().
+       idleBehavior := IdleBehavior(inst.Tags()[tagKeyIdleBehavior])
+       if !validIdleBehavior[idleBehavior] {
+               idleBehavior = IdleBehaviorRun
+       }
+
+       logger := wp.logger.WithFields(logrus.Fields{
+               "InstanceType": it.Name,
+               "Instance":     inst.ID(),
+               "Address":      inst.Address(),
+       })
+       logger.WithFields(logrus.Fields{
+               "State":        state,
+               "IdleBehavior": idleBehavior,
+       }).Infof("instance appeared in cloud")
+       now := time.Now()
+       wkr := &worker{
+               mtx:          &wp.mtx,
+               wp:           wp,
+               logger:       logger,
+               executor:     wp.newExecutor(inst),
+               state:        state,
+               idleBehavior: idleBehavior,
+               instance:     inst,
+               instType:     it,
+               appeared:     now,
+               probed:       now,
+               busy:         now,
+               updated:      now,
+               running:      make(map[string]struct{}),
+               starting:     make(map[string]struct{}),
+               probing:      make(chan struct{}, 1),
+       }
+       wp.workers[id] = wkr
+       return wkr, true
+}
+
+// caller must have lock.
+func (wp *Pool) notifyExited(uuid string, t time.Time) {
+       wp.exited[uuid] = t
+}
+
+// Shutdown shuts down a worker with the given type, or returns false
+// if all workers with the given type are busy.
+func (wp *Pool) Shutdown(it arvados.InstanceType) bool {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       logger := wp.logger.WithField("InstanceType", it.Name)
+       logger.Info("shutdown requested")
+       for _, tryState := range []State{StateBooting, StateIdle} {
+               // TODO: shutdown the worker with the longest idle
+               // time (Idle) or the earliest create time (Booting)
+               for _, wkr := range wp.workers {
+                       if wkr.idleBehavior != IdleBehaviorHold && wkr.state == tryState && wkr.instType == it {
+                               logger.WithField("Instance", wkr.instance).Info("shutting down")
+                               wkr.shutdown()
+                               return true
+                       }
+               }
+       }
+       return false
+}
+
+// CountWorkers returns the current number of workers in each state.
+func (wp *Pool) CountWorkers() map[State]int {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       r := map[State]int{}
+       for _, w := range wp.workers {
+               r[w.state]++
+       }
+       return r
+}
+
+// Running returns the container UUIDs being prepared/run on workers.
+//
+// In the returned map, the time value indicates when the Pool
+// observed that the container process had exited. A container that
+// has not yet exited has a zero time value. The caller should use
+// KillContainer() to garbage-collect the entries for exited
+// containers.
+func (wp *Pool) Running() map[string]time.Time {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       r := map[string]time.Time{}
+       for _, wkr := range wp.workers {
+               for uuid := range wkr.running {
+                       r[uuid] = time.Time{}
+               }
+               for uuid := range wkr.starting {
+                       r[uuid] = time.Time{}
+               }
+       }
+       for uuid, exited := range wp.exited {
+               r[uuid] = exited
+       }
+       return r
+}
+
+// StartContainer starts a container on an idle worker immediately if
+// possible, otherwise returns false.
+func (wp *Pool) StartContainer(it arvados.InstanceType, ctr arvados.Container) bool {
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       var wkr *worker
+       for _, w := range wp.workers {
+               if w.instType == it && w.state == StateIdle {
+                       if wkr == nil || w.busy.After(wkr.busy) {
+                               wkr = w
+                       }
+               }
+       }
+       if wkr == nil {
+               return false
+       }
+       wkr.startContainer(ctr)
+       return true
+}
+
+// KillContainer kills the crunch-run process for the given container
+// UUID, if it's running on any worker.
+//
+// KillContainer returns immediately; the act of killing the container
+// takes some time, and runs in the background.
+func (wp *Pool) KillContainer(uuid string) {
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       if _, ok := wp.exited[uuid]; ok {
+               wp.logger.WithField("ContainerUUID", uuid).Debug("clearing placeholder for exited crunch-run process")
+               delete(wp.exited, uuid)
+               return
+       }
+       for _, wkr := range wp.workers {
+               if _, ok := wkr.running[uuid]; ok {
+                       go wp.kill(wkr, uuid)
+                       return
+               }
+       }
+       wp.logger.WithField("ContainerUUID", uuid).Debug("cannot kill: already disappeared")
+}
+
+func (wp *Pool) kill(wkr *worker, uuid string) {
+       logger := wp.logger.WithFields(logrus.Fields{
+               "ContainerUUID": uuid,
+               "Instance":      wkr.instance.ID(),
+       })
+       logger.Debug("killing process")
+       cmd := "crunch-run --kill 15 " + uuid
+       if u := wkr.instance.RemoteUser(); u != "root" {
+               cmd = "sudo " + cmd
+       }
+       stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
+       if err != nil {
+               logger.WithFields(logrus.Fields{
+                       "stderr": string(stderr),
+                       "stdout": string(stdout),
+                       "error":  err,
+               }).Warn("kill failed")
+               return
+       }
+       logger.Debug("killing process succeeded")
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       if _, ok := wkr.running[uuid]; ok {
+               delete(wkr.running, uuid)
+               if wkr.state == StateRunning && len(wkr.running)+len(wkr.starting) == 0 {
+                       wkr.state = StateIdle
+               }
+               wkr.updated = time.Now()
+               go wp.notify()
+       }
+}
+
+func (wp *Pool) registerMetrics(reg *prometheus.Registry) {
+       if reg == nil {
+               reg = prometheus.NewRegistry()
+       }
+       wp.mContainersRunning = prometheus.NewGauge(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "dispatchcloud",
+               Name:      "containers_running",
+               Help:      "Number of containers reported running by cloud VMs.",
+       })
+       reg.MustRegister(wp.mContainersRunning)
+       wp.mInstances = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "dispatchcloud",
+               Name:      "instances_total",
+               Help:      "Number of cloud VMs.",
+       }, []string{"category"})
+       reg.MustRegister(wp.mInstances)
+       wp.mInstancesPrice = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "dispatchcloud",
+               Name:      "instances_price",
+               Help:      "Price of cloud VMs.",
+       }, []string{"category"})
+       reg.MustRegister(wp.mInstancesPrice)
+       wp.mVCPUs = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "dispatchcloud",
+               Name:      "vcpus_total",
+               Help:      "Total VCPUs on all cloud VMs.",
+       }, []string{"category"})
+       reg.MustRegister(wp.mVCPUs)
+       wp.mMemory = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "dispatchcloud",
+               Name:      "memory_bytes_total",
+               Help:      "Total memory on all cloud VMs.",
+       }, []string{"category"})
+       reg.MustRegister(wp.mMemory)
+}
+
+func (wp *Pool) runMetrics() {
+       ch := wp.Subscribe()
+       defer wp.Unsubscribe(ch)
+       wp.updateMetrics()
+       for range ch {
+               wp.updateMetrics()
+       }
+}
+
+func (wp *Pool) updateMetrics() {
+       wp.mtx.RLock()
+       defer wp.mtx.RUnlock()
+
+       instances := map[string]int64{}
+       price := map[string]float64{}
+       cpu := map[string]int64{}
+       mem := map[string]int64{}
+       var running int64
+       for _, wkr := range wp.workers {
+               var cat string
+               switch {
+               case len(wkr.running)+len(wkr.starting) > 0:
+                       cat = "inuse"
+               case wkr.idleBehavior == IdleBehaviorHold:
+                       cat = "hold"
+               case wkr.state == StateBooting:
+                       cat = "booting"
+               case wkr.state == StateUnknown:
+                       cat = "unknown"
+               default:
+                       cat = "idle"
+               }
+               instances[cat]++
+               price[cat] += wkr.instType.Price
+               cpu[cat] += int64(wkr.instType.VCPUs)
+               mem[cat] += int64(wkr.instType.RAM)
+               running += int64(len(wkr.running) + len(wkr.starting))
+       }
+       for _, cat := range []string{"inuse", "hold", "booting", "unknown", "idle"} {
+               wp.mInstances.WithLabelValues(cat).Set(float64(instances[cat]))
+               wp.mInstancesPrice.WithLabelValues(cat).Set(price[cat])
+               wp.mVCPUs.WithLabelValues(cat).Set(float64(cpu[cat]))
+               wp.mMemory.WithLabelValues(cat).Set(float64(mem[cat]))
+       }
+       wp.mContainersRunning.Set(float64(running))
+}
+
+func (wp *Pool) runProbes() {
+       maxPPS := wp.maxProbesPerSecond
+       if maxPPS < 1 {
+               maxPPS = defaultMaxProbesPerSecond
+       }
+       limitticker := time.NewTicker(time.Second / time.Duration(maxPPS))
+       defer limitticker.Stop()
+
+       probeticker := time.NewTicker(wp.probeInterval)
+       defer probeticker.Stop()
+
+       workers := []cloud.InstanceID{}
+       for range probeticker.C {
+               workers = workers[:0]
+               wp.mtx.Lock()
+               for id, wkr := range wp.workers {
+                       if wkr.state == StateShutdown || wkr.shutdownIfIdle() {
+                               continue
+                       }
+                       workers = append(workers, id)
+               }
+               wp.mtx.Unlock()
+
+               for _, id := range workers {
+                       wp.mtx.Lock()
+                       wkr, ok := wp.workers[id]
+                       wp.mtx.Unlock()
+                       if !ok {
+                               // Deleted while we were probing
+                               // others
+                               continue
+                       }
+                       go wkr.ProbeAndUpdate()
+                       select {
+                       case <-wp.stop:
+                               return
+                       case <-limitticker.C:
+                       }
+               }
+       }
+}
+
+func (wp *Pool) runSync() {
+       // sync once immediately, then wait syncInterval, sync again,
+       // etc.
+       timer := time.NewTimer(1)
+       for {
+               select {
+               case <-timer.C:
+                       err := wp.getInstancesAndSync()
+                       if err != nil {
+                               wp.logger.WithError(err).Warn("sync failed")
+                       }
+                       timer.Reset(wp.syncInterval)
+               case <-wp.stop:
+                       wp.logger.Debug("worker.Pool stopped")
+                       return
+               }
+       }
+}
+
+// Stop synchronizing with the InstanceSet.
+func (wp *Pool) Stop() {
+       wp.setupOnce.Do(wp.setup)
+       close(wp.stop)
+}
+
+// Instances returns an InstanceView for each worker in the pool,
+// summarizing its current state and recent activity.
+func (wp *Pool) Instances() []InstanceView {
+       var r []InstanceView
+       wp.setupOnce.Do(wp.setup)
+       wp.mtx.Lock()
+       for _, w := range wp.workers {
+               r = append(r, InstanceView{
+                       Instance:             w.instance.ID(),
+                       Address:              w.instance.Address(),
+                       Price:                w.instType.Price,
+                       ArvadosInstanceType:  w.instType.Name,
+                       ProviderInstanceType: w.instType.ProviderType,
+                       LastContainerUUID:    w.lastUUID,
+                       LastBusy:             w.busy,
+                       WorkerState:          w.state.String(),
+                       IdleBehavior:         w.idleBehavior,
+               })
+       }
+       wp.mtx.Unlock()
+       sort.Slice(r, func(i, j int) bool {
+               return strings.Compare(string(r[i].Instance), string(r[j].Instance)) < 0
+       })
+       return r
+}
+
+func (wp *Pool) setup() {
+       wp.creating = map[string]createCall{}
+       wp.exited = map[string]time.Time{}
+       wp.workers = map[cloud.InstanceID]*worker{}
+       wp.subscribers = map[<-chan struct{}]chan<- struct{}{}
+}
+
+func (wp *Pool) notify() {
+       wp.mtx.RLock()
+       defer wp.mtx.RUnlock()
+       for _, send := range wp.subscribers {
+               select {
+               case send <- struct{}{}:
+               default:
+               }
+       }
+}
+
+func (wp *Pool) getInstancesAndSync() error {
+       wp.setupOnce.Do(wp.setup)
+       if err := wp.instanceSet.throttleInstances.Error(); err != nil {
+               return err
+       }
+       wp.logger.Debug("getting instance list")
+       threshold := time.Now()
+       instances, err := wp.instanceSet.Instances(cloud.InstanceTags{})
+       if err != nil {
+               wp.instanceSet.throttleInstances.CheckRateLimitError(err, wp.logger, "list instances", wp.notify)
+               return err
+       }
+       wp.sync(threshold, instances)
+       wp.logger.Debug("sync done")
+       return nil
+}
+
+// Add/remove/update workers based on instances, which was obtained
+// from the instanceSet. However, don't clobber any other updates that
+// already happened after threshold.
+func (wp *Pool) sync(threshold time.Time, instances []cloud.Instance) {
+       wp.mtx.Lock()
+       defer wp.mtx.Unlock()
+       wp.logger.WithField("Instances", len(instances)).Debug("sync instances")
+       notify := false
+
+       for _, inst := range instances {
+               itTag := inst.Tags()[tagKeyInstanceType]
+               it, ok := wp.instanceTypes[itTag]
+               if !ok {
+                       wp.logger.WithField("Instance", inst).Errorf("unknown InstanceType tag %q --- ignoring", itTag)
+                       continue
+               }
+               if wkr, isNew := wp.updateWorker(inst, it); isNew {
+                       notify = true
+               } else if wkr.state == StateShutdown && time.Since(wkr.destroyed) > wp.timeoutShutdown {
+                       wp.logger.WithField("Instance", inst).Info("worker still listed after shutdown; retrying")
+                       wkr.shutdown()
+               }
+       }
+
+       for id, wkr := range wp.workers {
+               if wkr.updated.After(threshold) {
+                       continue
+               }
+               logger := wp.logger.WithFields(logrus.Fields{
+                       "Instance":    wkr.instance.ID(),
+                       "WorkerState": wkr.state,
+               })
+               logger.Info("instance disappeared in cloud")
+               delete(wp.workers, id)
+               go wkr.executor.Close()
+               notify = true
+       }
+
+       if !wp.loaded {
+               wp.loaded = true
+               wp.logger.WithField("N", len(wp.workers)).Info("loaded initial instance list")
+       }
+
+       if notify {
+               go wp.notify()
+       }
+}
+
+// Return a random string of n hexadecimal digits (n*4 random bits). n
+// must be even.
+func randomHex(n int) string {
+       buf := make([]byte, n/2)
+       _, err := rand.Read(buf)
+       if err != nil {
+               panic(err)
+       }
+       return fmt.Sprintf("%x", buf)
+}
diff --git a/lib/dispatchcloud/worker/pool_test.go b/lib/dispatchcloud/worker/pool_test.go
new file mode 100644 (file)
index 0000000..fc33a7a
--- /dev/null
@@ -0,0 +1,290 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "sort"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
+)
+
+const GiB arvados.ByteSize = 1 << 30
+
+var _ = check.Suite(&PoolSuite{})
+
+type lessChecker struct {
+       *check.CheckerInfo
+}
+
+func (*lessChecker) Check(params []interface{}, names []string) (result bool, error string) {
+       return params[0].(int) < params[1].(int), ""
+}
+
+var less = &lessChecker{&check.CheckerInfo{Name: "less", Params: []string{"obtained", "expected"}}}
+
+type PoolSuite struct{}
+
+func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
+       type1 := test.InstanceType(1)
+       type2 := test.InstanceType(2)
+       type3 := test.InstanceType(3)
+       waitForIdle := func(pool *Pool, notify <-chan struct{}) {
+               timeout := time.NewTimer(time.Second)
+               for {
+                       instances := pool.Instances()
+                       sort.Slice(instances, func(i, j int) bool {
+                               return strings.Compare(instances[i].ArvadosInstanceType, instances[j].ArvadosInstanceType) < 0
+                       })
+                       if len(instances) == 3 &&
+                               instances[0].ArvadosInstanceType == type1.Name &&
+                               instances[0].WorkerState == StateIdle.String() &&
+                               instances[1].ArvadosInstanceType == type1.Name &&
+                               instances[1].WorkerState == StateIdle.String() &&
+                               instances[2].ArvadosInstanceType == type2.Name &&
+                               instances[2].WorkerState == StateIdle.String() {
+                               return
+                       }
+                       select {
+                       case <-timeout.C:
+                               c.Logf("pool.Instances() == %#v", instances)
+                               c.Error("timed out")
+                               return
+                       case <-notify:
+                       }
+               }
+       }
+
+       logger := ctxlog.TestLogger(c)
+       driver := &test.StubDriver{}
+       is, err := driver.InstanceSet(nil, "", logger)
+       c.Assert(err, check.IsNil)
+
+       newExecutor := func(cloud.Instance) Executor {
+               return stubExecutor{
+                       "crunch-run --list": stubResp{},
+                       "true":              stubResp{},
+               }
+       }
+
+       cluster := &arvados.Cluster{
+               Dispatch: arvados.Dispatch{
+                       MaxProbesPerSecond: 1000,
+                       ProbeInterval:      arvados.Duration(time.Millisecond * 10),
+               },
+               CloudVMs: arvados.CloudVMs{
+                       BootProbeCommand: "true",
+                       SyncInterval:     arvados.Duration(time.Millisecond * 10),
+               },
+               InstanceTypes: arvados.InstanceTypeMap{
+                       type1.Name: type1,
+                       type2.Name: type2,
+                       type3.Name: type3,
+               },
+       }
+
+       pool := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), is, newExecutor, nil, cluster)
+       notify := pool.Subscribe()
+       defer pool.Unsubscribe(notify)
+       pool.Create(type1)
+       pool.Create(type1)
+       pool.Create(type2)
+       waitForIdle(pool, notify)
+       var heldInstanceID cloud.InstanceID
+       for _, inst := range pool.Instances() {
+               if inst.ArvadosInstanceType == type2.Name {
+                       heldInstanceID = cloud.InstanceID(inst.Instance)
+                       pool.SetIdleBehavior(heldInstanceID, IdleBehaviorHold)
+               }
+       }
+       // Wait for the tags to save to the cloud provider
+       deadline := time.Now().Add(time.Second)
+       for !func() bool {
+               pool.mtx.RLock()
+               defer pool.mtx.RUnlock()
+               for _, wkr := range pool.workers {
+                       if wkr.instType == type2 {
+                               return wkr.instance.Tags()[tagKeyIdleBehavior] == string(IdleBehaviorHold)
+                       }
+               }
+               return false
+       }() {
+               if time.Now().After(deadline) {
+                       c.Fatal("timeout")
+               }
+               time.Sleep(time.Millisecond * 10)
+       }
+       pool.Stop()
+
+       c.Log("------- starting new pool, waiting to recover state")
+
+       pool2 := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), is, newExecutor, nil, cluster)
+       notify2 := pool2.Subscribe()
+       defer pool2.Unsubscribe(notify2)
+       waitForIdle(pool2, notify2)
+       for _, inst := range pool2.Instances() {
+               if inst.ArvadosInstanceType == type2.Name {
+                       c.Check(inst.Instance, check.Equals, heldInstanceID)
+                       c.Check(inst.IdleBehavior, check.Equals, IdleBehaviorHold)
+               } else {
+                       c.Check(inst.IdleBehavior, check.Equals, IdleBehaviorRun)
+               }
+       }
+       pool2.Stop()
+}
+
+func (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {
+       logger := ctxlog.TestLogger(c)
+       driver := test.StubDriver{HoldCloudOps: true}
+       instanceSet, err := driver.InstanceSet(nil, "", logger)
+       c.Assert(err, check.IsNil)
+
+       type1 := arvados.InstanceType{Name: "a1s", ProviderType: "a1.small", VCPUs: 1, RAM: 1 * GiB, Price: .01}
+       type2 := arvados.InstanceType{Name: "a2m", ProviderType: "a2.medium", VCPUs: 2, RAM: 2 * GiB, Price: .02}
+       type3 := arvados.InstanceType{Name: "a2l", ProviderType: "a2.large", VCPUs: 4, RAM: 4 * GiB, Price: .04}
+       pool := &Pool{
+               logger:      logger,
+               newExecutor: func(cloud.Instance) Executor { return stubExecutor{} },
+               instanceSet: &throttledInstanceSet{InstanceSet: instanceSet},
+               instanceTypes: arvados.InstanceTypeMap{
+                       type1.Name: type1,
+                       type2.Name: type2,
+                       type3.Name: type3,
+               },
+       }
+       notify := pool.Subscribe()
+       defer pool.Unsubscribe(notify)
+       notify2 := pool.Subscribe()
+       defer pool.Unsubscribe(notify2)
+
+       c.Check(pool.Unallocated()[type1], check.Equals, 0)
+       c.Check(pool.Unallocated()[type2], check.Equals, 0)
+       c.Check(pool.Unallocated()[type3], check.Equals, 0)
+       pool.Create(type2)
+       pool.Create(type1)
+       pool.Create(type2)
+       pool.Create(type3)
+       c.Check(pool.Unallocated()[type1], check.Equals, 1)
+       c.Check(pool.Unallocated()[type2], check.Equals, 2)
+       c.Check(pool.Unallocated()[type3], check.Equals, 1)
+
+       // Unblock the pending Create calls.
+       go driver.ReleaseCloudOps(4)
+
+       // Wait for each instance to either return from its Create
+       // call, or show up in a poll.
+       suite.wait(c, pool, notify, func() bool {
+               pool.mtx.RLock()
+               defer pool.mtx.RUnlock()
+               return len(pool.workers) == 4
+       })
+
+       // Place type3 node on admin-hold
+       ivs := suite.instancesByType(pool, type3)
+       c.Assert(ivs, check.HasLen, 1)
+       type3instanceID := ivs[0].Instance
+       err = pool.SetIdleBehavior(type3instanceID, IdleBehaviorHold)
+       c.Check(err, check.IsNil)
+
+       // Check admin-hold behavior: refuse to shutdown, and don't
+       // report as Unallocated ("available now or soon").
+       c.Check(pool.Shutdown(type3), check.Equals, false)
+       suite.wait(c, pool, notify, func() bool {
+               return pool.Unallocated()[type3] == 0
+       })
+       c.Check(suite.instancesByType(pool, type3), check.HasLen, 1)
+
+       // Shutdown both type2 nodes
+       c.Check(pool.Shutdown(type2), check.Equals, true)
+       suite.wait(c, pool, notify, func() bool {
+               return pool.Unallocated()[type1] == 1 && pool.Unallocated()[type2] == 1
+       })
+       c.Check(pool.Shutdown(type2), check.Equals, true)
+       suite.wait(c, pool, notify, func() bool {
+               return pool.Unallocated()[type1] == 1 && pool.Unallocated()[type2] == 0
+       })
+       c.Check(pool.Shutdown(type2), check.Equals, false)
+       for {
+               // Consume any waiting notifications to ensure the
+               // next one we get is from Shutdown.
+               select {
+               case <-notify:
+                       continue
+               default:
+               }
+               break
+       }
+
+       // Shutdown type1 node
+       c.Check(pool.Shutdown(type1), check.Equals, true)
+       suite.wait(c, pool, notify, func() bool {
+               return pool.Unallocated()[type1] == 0 && pool.Unallocated()[type2] == 0 && pool.Unallocated()[type3] == 0
+       })
+       select {
+       case <-notify2:
+       case <-time.After(time.Second):
+               c.Error("notify did not receive")
+       }
+
+       // Put type3 node back in service.
+       err = pool.SetIdleBehavior(type3instanceID, IdleBehaviorRun)
+       c.Check(err, check.IsNil)
+       suite.wait(c, pool, notify, func() bool {
+               return pool.Unallocated()[type3] == 1
+       })
+
+       // Check admin-drain behavior: shut down right away, and don't
+       // report as Unallocated.
+       err = pool.SetIdleBehavior(type3instanceID, IdleBehaviorDrain)
+       c.Check(err, check.IsNil)
+       suite.wait(c, pool, notify, func() bool {
+               return pool.Unallocated()[type3] == 0
+       })
+       suite.wait(c, pool, notify, func() bool {
+               ivs := suite.instancesByType(pool, type3)
+               return len(ivs) == 1 && ivs[0].WorkerState == StateShutdown.String()
+       })
+
+       // Unblock all pending Destroy calls. Pool calls Destroy again
+       // if a node still appears in the provider list after a
+       // previous attempt, so there might be more than 4 Destroy
+       // calls to unblock.
+       go driver.ReleaseCloudOps(4444)
+
+       // Sync until all instances disappear from the provider list.
+       suite.wait(c, pool, notify, func() bool {
+               pool.getInstancesAndSync()
+               return len(pool.Instances()) == 0
+       })
+}
+
+func (suite *PoolSuite) instancesByType(pool *Pool, it arvados.InstanceType) []InstanceView {
+       var ivs []InstanceView
+       for _, iv := range pool.Instances() {
+               if iv.ArvadosInstanceType == it.Name {
+                       ivs = append(ivs, iv)
+               }
+       }
+       return ivs
+}
+
+func (suite *PoolSuite) wait(c *check.C, pool *Pool, notify <-chan struct{}, ready func() bool) {
+       timeout := time.NewTimer(time.Second).C
+       for !ready() {
+               select {
+               case <-notify:
+                       continue
+               case <-timeout:
+               }
+               break
+       }
+       c.Check(ready(), check.Equals, true)
+}
diff --git a/lib/dispatchcloud/worker/throttle.go b/lib/dispatchcloud/worker/throttle.go
new file mode 100644 (file)
index 0000000..c5ea793
--- /dev/null
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "fmt"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "github.com/sirupsen/logrus"
+)
+
+type throttle struct {
+       err   error
+       until time.Time
+       mtx   sync.Mutex
+}
+
+// CheckRateLimitError checks whether the given error is a
+// cloud.RateLimitError, and if so, ensures Error() returns a non-nil
+// error until the rate limiting holdoff period expires.
+//
+// If a notify func is given, it will be called after the holdoff
+// period expires.
+func (thr *throttle) CheckRateLimitError(err error, logger logrus.FieldLogger, callType string, notify func()) {
+       rle, ok := err.(cloud.RateLimitError)
+       if !ok {
+               return
+       }
+       until := rle.EarliestRetry()
+       if !until.After(time.Now()) {
+               return
+       }
+       dur := until.Sub(time.Now())
+       logger.WithFields(logrus.Fields{
+               "CallType": callType,
+               "Duration": dur,
+               "ResumeAt": until,
+       }).Info("suspending remote calls due to rate-limit error")
+       thr.ErrorUntil(fmt.Errorf("remote calls are suspended for %s, until %s", dur, until), until, notify)
+}
+
+func (thr *throttle) ErrorUntil(err error, until time.Time, notify func()) {
+       thr.mtx.Lock()
+       defer thr.mtx.Unlock()
+       thr.err, thr.until = err, until
+       if notify != nil {
+               time.AfterFunc(until.Sub(time.Now()), notify)
+       }
+}
+
+func (thr *throttle) Error() error {
+       thr.mtx.Lock()
+       defer thr.mtx.Unlock()
+       if thr.err != nil && time.Now().After(thr.until) {
+               thr.err = nil
+       }
+       return thr.err
+}
+
+type throttledInstanceSet struct {
+       cloud.InstanceSet
+       throttleCreate    throttle
+       throttleInstances throttle
+}
diff --git a/lib/dispatchcloud/worker/throttle_test.go b/lib/dispatchcloud/worker/throttle_test.go
new file mode 100644 (file)
index 0000000..045b617
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "errors"
+       "time"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ThrottleSuite{})
+
+type ThrottleSuite struct{}
+
+func (s *ThrottleSuite) TestRateLimitError(c *check.C) {
+       var t throttle
+       c.Check(t.Error(), check.IsNil)
+       t.ErrorUntil(errors.New("wait"), time.Now().Add(time.Second), nil)
+       c.Check(t.Error(), check.NotNil)
+       t.ErrorUntil(nil, time.Now(), nil)
+       c.Check(t.Error(), check.IsNil)
+
+       notified := false
+       t.ErrorUntil(errors.New("wait"), time.Now().Add(time.Millisecond), func() { notified = true })
+       c.Check(t.Error(), check.NotNil)
+       time.Sleep(time.Millisecond * 10)
+       c.Check(t.Error(), check.IsNil)
+       c.Check(notified, check.Equals, true)
+}
diff --git a/lib/dispatchcloud/worker/verify.go b/lib/dispatchcloud/worker/verify.go
new file mode 100644 (file)
index 0000000..e22c85d
--- /dev/null
@@ -0,0 +1,56 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "golang.org/x/crypto/ssh"
+)
+
+var (
+       errBadInstanceSecret = errors.New("bad instance secret")
+
+       // filename on instance, as given to shell (quoted accordingly)
+       instanceSecretFilename = "/var/run/arvados-instance-secret"
+       instanceSecretLength   = 40 // hex digits
+)
+
+type tagVerifier struct {
+       cloud.Instance
+}
+
+func (tv tagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) error {
+       expectSecret := tv.Instance.Tags()[tagKeyInstanceSecret]
+       if err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || expectSecret == "" {
+               // If the wrapped instance indicates it has a way to
+               // verify the key, return that decision.
+               return err
+       }
+       session, err := client.NewSession()
+       if err != nil {
+               return err
+       }
+       defer session.Close()
+       var stdout, stderr bytes.Buffer
+       session.Stdin = bytes.NewBuffer(nil)
+       session.Stdout = &stdout
+       session.Stderr = &stderr
+       cmd := fmt.Sprintf("cat %s", instanceSecretFilename)
+       if u := tv.RemoteUser(); u != "root" {
+               cmd = "sudo " + cmd
+       }
+       err = session.Run(cmd)
+       if err != nil {
+               return err
+       }
+       if stdout.String() != expectSecret {
+               return errBadInstanceSecret
+       }
+       return nil
+}
diff --git a/lib/dispatchcloud/worker/worker.go b/lib/dispatchcloud/worker/worker.go
new file mode 100644 (file)
index 0000000..64e1f77
--- /dev/null
@@ -0,0 +1,470 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/stats"
+       "github.com/sirupsen/logrus"
+)
+
+const (
+       // TODO: configurable
+       maxPingFailTime = 10 * time.Minute
+)
+
+// State indicates whether a worker is available to do work, and (if
+// not) whether/when it is expected to become ready.
+type State int
+
+const (
+       StateUnknown  State = iota // might be running a container already
+       StateBooting               // instance is booting
+       StateIdle                  // instance booted, no containers are running
+       StateRunning               // instance is running one or more containers
+       StateShutdown              // worker has stopped monitoring the instance
+)
+
+var stateString = map[State]string{
+       StateUnknown:  "unknown",
+       StateBooting:  "booting",
+       StateIdle:     "idle",
+       StateRunning:  "running",
+       StateShutdown: "shutdown",
+}
+
+// String implements fmt.Stringer.
+func (s State) String() string {
+       return stateString[s]
+}
+
+// MarshalText implements encoding.TextMarshaler so a JSON encoding of
+// map[State]anything uses the state's string representation.
+func (s State) MarshalText() ([]byte, error) {
+       return []byte(stateString[s]), nil
+}
+
+// IdleBehavior indicates the behavior desired when a node becomes idle.
+type IdleBehavior string
+
+const (
+       IdleBehaviorRun   IdleBehavior = "run"   // run containers, or shutdown on idle timeout
+       IdleBehaviorHold  IdleBehavior = "hold"  // don't shutdown or run more containers
+       IdleBehaviorDrain IdleBehavior = "drain" // shutdown immediately when idle
+)
+
+var validIdleBehavior = map[IdleBehavior]bool{
+       IdleBehaviorRun:   true,
+       IdleBehaviorHold:  true,
+       IdleBehaviorDrain: true,
+}
+
+type worker struct {
+       logger   logrus.FieldLogger
+       executor Executor
+       wp       *Pool
+
+       mtx          sync.Locker // must be wp's Locker.
+       state        State
+       idleBehavior IdleBehavior
+       instance     cloud.Instance
+       instType     arvados.InstanceType
+       vcpus        int64
+       memory       int64
+       appeared     time.Time
+       probed       time.Time
+       updated      time.Time
+       busy         time.Time
+       destroyed    time.Time
+       lastUUID     string
+       running      map[string]struct{} // remember to update state idle<->running when this changes
+       starting     map[string]struct{} // remember to update state idle<->running when this changes
+       probing      chan struct{}
+}
+
+// caller must have lock.
+func (wkr *worker) startContainer(ctr arvados.Container) {
+       logger := wkr.logger.WithFields(logrus.Fields{
+               "ContainerUUID": ctr.UUID,
+               "Priority":      ctr.Priority,
+       })
+       logger = logger.WithField("Instance", wkr.instance.ID())
+       logger.Debug("starting container")
+       wkr.starting[ctr.UUID] = struct{}{}
+       if wkr.state != StateRunning {
+               wkr.state = StateRunning
+               go wkr.wp.notify()
+       }
+       go func() {
+               env := map[string]string{
+                       "ARVADOS_API_HOST":  wkr.wp.arvClient.APIHost,
+                       "ARVADOS_API_TOKEN": wkr.wp.arvClient.AuthToken,
+               }
+               if wkr.wp.arvClient.Insecure {
+                       env["ARVADOS_API_HOST_INSECURE"] = "1"
+               }
+               envJSON, err := json.Marshal(env)
+               if err != nil {
+                       panic(err)
+               }
+               stdin := bytes.NewBuffer(envJSON)
+               cmd := "crunch-run --detach --stdin-env '" + ctr.UUID + "'"
+               if u := wkr.instance.RemoteUser(); u != "root" {
+                       cmd = "sudo " + cmd
+               }
+               stdout, stderr, err := wkr.executor.Execute(nil, cmd, stdin)
+               wkr.mtx.Lock()
+               defer wkr.mtx.Unlock()
+               now := time.Now()
+               wkr.updated = now
+               wkr.busy = now
+               delete(wkr.starting, ctr.UUID)
+               wkr.running[ctr.UUID] = struct{}{}
+               wkr.lastUUID = ctr.UUID
+               if err != nil {
+                       logger.WithField("stdout", string(stdout)).
+                               WithField("stderr", string(stderr)).
+                               WithError(err).
+                               Error("error starting crunch-run process")
+                       // Leave uuid in wkr.running, though: it's
+                       // possible the error was just a communication
+                       // failure and the process was in fact
+                       // started.  Wait for next probe to find out.
+                       return
+               }
+               logger.Info("crunch-run process started")
+               wkr.lastUUID = ctr.UUID
+       }()
+}
+
+// ProbeAndUpdate conducts appropriate boot/running probes (if any)
+// for the worker's curent state. If a previous probe is still
+// running, it does nothing.
+//
+// It should be called in a new goroutine.
+func (wkr *worker) ProbeAndUpdate() {
+       select {
+       case wkr.probing <- struct{}{}:
+               wkr.probeAndUpdate()
+               <-wkr.probing
+       default:
+               wkr.logger.Debug("still waiting for last probe to finish")
+       }
+}
+
+// probeAndUpdate calls probeBooted and/or probeRunning if needed, and
+// updates state accordingly.
+//
+// In StateUnknown: Call both probeBooted and probeRunning.
+// In StateBooting: Call probeBooted; if successful, call probeRunning.
+// In StateRunning: Call probeRunning.
+// In StateIdle: Call probeRunning.
+// In StateShutdown: Do nothing.
+//
+// If both probes succeed, wkr.state changes to
+// StateIdle/StateRunning.
+//
+// If probeRunning succeeds, wkr.running is updated. (This means
+// wkr.running might be non-empty even in StateUnknown, if the boot
+// probe failed.)
+//
+// probeAndUpdate should be called in a new goroutine.
+func (wkr *worker) probeAndUpdate() {
+       wkr.mtx.Lock()
+       updated := wkr.updated
+       initialState := wkr.state
+       wkr.mtx.Unlock()
+
+       var (
+               booted   bool
+               ctrUUIDs []string
+               ok       bool
+               stderr   []byte // from probeBooted
+       )
+
+       switch initialState {
+       case StateShutdown:
+               return
+       case StateIdle, StateRunning:
+               booted = true
+       case StateUnknown, StateBooting:
+       default:
+               panic(fmt.Sprintf("unknown state %s", initialState))
+       }
+
+       probeStart := time.Now()
+       logger := wkr.logger.WithField("ProbeStart", probeStart)
+
+       if !booted {
+               booted, stderr = wkr.probeBooted()
+               if !booted {
+                       // Pretend this probe succeeded if another
+                       // concurrent attempt succeeded.
+                       wkr.mtx.Lock()
+                       booted = wkr.state == StateRunning || wkr.state == StateIdle
+                       wkr.mtx.Unlock()
+               }
+               if booted {
+                       logger.Info("instance booted; will try probeRunning")
+               }
+       }
+       if booted || wkr.state == StateUnknown {
+               ctrUUIDs, ok = wkr.probeRunning()
+       }
+       wkr.mtx.Lock()
+       defer wkr.mtx.Unlock()
+       if !ok || (!booted && len(ctrUUIDs) == 0 && len(wkr.running) == 0) {
+               if wkr.state == StateShutdown && wkr.updated.After(updated) {
+                       // Skip the logging noise if shutdown was
+                       // initiated during probe.
+                       return
+               }
+               // Using the start time of the probe as the timeout
+               // threshold ensures we always initiate at least one
+               // probe attempt after the boot/probe timeout expires
+               // (otherwise, a slow probe failure could cause us to
+               // shutdown an instance even though it did in fact
+               // boot/recover before the timeout expired).
+               dur := probeStart.Sub(wkr.probed)
+               if wkr.shutdownIfBroken(dur) {
+                       // stderr from failed run-probes will have
+                       // been logged already, but boot-probe
+                       // failures are normal so they are logged only
+                       // at Debug level. This is our chance to log
+                       // some evidence about why the node never
+                       // booted, even in non-debug mode.
+                       if !booted {
+                               logger.WithFields(logrus.Fields{
+                                       "Duration": dur,
+                                       "stderr":   string(stderr),
+                               }).Info("boot failed")
+                       }
+               }
+               return
+       }
+
+       updateTime := time.Now()
+       wkr.probed = updateTime
+
+       if updated != wkr.updated {
+               // Worker was updated after the probe began, so
+               // wkr.running might have a container UUID that was
+               // not yet running when ctrUUIDs was generated. Leave
+               // wkr.running alone and wait for the next probe to
+               // catch up on any changes.
+               return
+       }
+
+       if len(ctrUUIDs) > 0 {
+               wkr.busy = updateTime
+               wkr.lastUUID = ctrUUIDs[0]
+       } else if len(wkr.running) > 0 {
+               // Actual last-busy time was sometime between wkr.busy
+               // and now. Now is the earliest opportunity to take
+               // advantage of the non-busy state, though.
+               wkr.busy = updateTime
+       }
+       changed := false
+
+       // Build a new "running" map. Set changed=true if it differs
+       // from the existing map (wkr.running) to ensure the scheduler
+       // gets notified below.
+       running := map[string]struct{}{}
+       for _, uuid := range ctrUUIDs {
+               running[uuid] = struct{}{}
+               if _, ok := wkr.running[uuid]; !ok {
+                       if _, ok := wkr.starting[uuid]; !ok {
+                               // We didn't start it -- it must have
+                               // been started by a previous
+                               // dispatcher process.
+                               logger.WithField("ContainerUUID", uuid).Info("crunch-run process detected")
+                       }
+                       changed = true
+               }
+       }
+       for uuid := range wkr.running {
+               if _, ok := running[uuid]; !ok {
+                       logger.WithField("ContainerUUID", uuid).Info("crunch-run process ended")
+                       wkr.wp.notifyExited(uuid, updateTime)
+                       changed = true
+               }
+       }
+
+       // Update state if this was the first successful boot-probe.
+       if booted && (wkr.state == StateUnknown || wkr.state == StateBooting) {
+               // Note: this will change again below if
+               // len(wkr.starting)+len(wkr.running) > 0.
+               wkr.state = StateIdle
+               changed = true
+       }
+
+       // If wkr.state and wkr.running aren't changing then there's
+       // no need to log anything, notify the scheduler, move state
+       // back and forth between idle/running, etc.
+       if !changed {
+               return
+       }
+
+       // Log whenever a run-probe reveals crunch-run processes
+       // appearing/disappearing before boot-probe succeeds.
+       if wkr.state == StateUnknown && len(running) != len(wkr.running) {
+               logger.WithFields(logrus.Fields{
+                       "RunningContainers": len(running),
+                       "State":             wkr.state,
+               }).Info("crunch-run probe succeeded, but boot probe is still failing")
+       }
+
+       wkr.running = running
+       if wkr.state == StateIdle && len(wkr.starting)+len(wkr.running) > 0 {
+               wkr.state = StateRunning
+       } else if wkr.state == StateRunning && len(wkr.starting)+len(wkr.running) == 0 {
+               wkr.state = StateIdle
+       }
+       wkr.updated = updateTime
+       if booted && (initialState == StateUnknown || initialState == StateBooting) {
+               logger.WithFields(logrus.Fields{
+                       "RunningContainers": len(running),
+                       "State":             wkr.state,
+               }).Info("probes succeeded, instance is in service")
+       }
+       go wkr.wp.notify()
+}
+
+func (wkr *worker) probeRunning() (running []string, ok bool) {
+       cmd := "crunch-run --list"
+       if u := wkr.instance.RemoteUser(); u != "root" {
+               cmd = "sudo " + cmd
+       }
+       stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
+       if err != nil {
+               wkr.logger.WithFields(logrus.Fields{
+                       "Command": cmd,
+                       "stdout":  string(stdout),
+                       "stderr":  string(stderr),
+               }).WithError(err).Warn("probe failed")
+               return nil, false
+       }
+       stdout = bytes.TrimRight(stdout, "\n")
+       if len(stdout) == 0 {
+               return nil, true
+       }
+       return strings.Split(string(stdout), "\n"), true
+}
+
+func (wkr *worker) probeBooted() (ok bool, stderr []byte) {
+       cmd := wkr.wp.bootProbeCommand
+       if cmd == "" {
+               cmd = "true"
+       }
+       stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
+       logger := wkr.logger.WithFields(logrus.Fields{
+               "Command": cmd,
+               "stdout":  string(stdout),
+               "stderr":  string(stderr),
+       })
+       if err != nil {
+               logger.WithError(err).Debug("boot probe failed")
+               return false, stderr
+       }
+       logger.Info("boot probe succeeded")
+       return true, stderr
+}
+
+// caller must have lock.
+func (wkr *worker) shutdownIfBroken(dur time.Duration) bool {
+       if wkr.idleBehavior == IdleBehaviorHold {
+               // Never shut down.
+               return false
+       }
+       label, threshold := "", wkr.wp.timeoutProbe
+       if wkr.state == StateUnknown || wkr.state == StateBooting {
+               label, threshold = "new ", wkr.wp.timeoutBooting
+       }
+       if dur < threshold {
+               return false
+       }
+       wkr.logger.WithFields(logrus.Fields{
+               "Duration": dur,
+               "Since":    wkr.probed,
+               "State":    wkr.state,
+       }).Warnf("%sinstance unresponsive, shutting down", label)
+       wkr.shutdown()
+       return true
+}
+
+// caller must have lock.
+func (wkr *worker) shutdownIfIdle() bool {
+       if wkr.idleBehavior == IdleBehaviorHold {
+               // Never shut down.
+               return false
+       }
+       age := time.Since(wkr.busy)
+
+       old := age >= wkr.wp.timeoutIdle
+       draining := wkr.idleBehavior == IdleBehaviorDrain
+       shouldShutdown := ((old || draining) && wkr.state == StateIdle) ||
+               (draining && wkr.state == StateBooting)
+       if !shouldShutdown {
+               return false
+       }
+
+       wkr.logger.WithFields(logrus.Fields{
+               "State":        wkr.state,
+               "IdleDuration": stats.Duration(age),
+               "IdleBehavior": wkr.idleBehavior,
+       }).Info("shutdown idle worker")
+       wkr.shutdown()
+       return true
+}
+
+// caller must have lock.
+func (wkr *worker) shutdown() {
+       now := time.Now()
+       wkr.updated = now
+       wkr.destroyed = now
+       wkr.state = StateShutdown
+       go wkr.wp.notify()
+       go func() {
+               err := wkr.instance.Destroy()
+               if err != nil {
+                       wkr.logger.WithError(err).Warn("shutdown failed")
+                       return
+               }
+       }()
+}
+
+// Save worker tags to cloud provider metadata, if they don't already
+// match. Caller must have lock.
+func (wkr *worker) saveTags() {
+       instance := wkr.instance
+       tags := instance.Tags()
+       update := cloud.InstanceTags{
+               tagKeyInstanceType: wkr.instType.Name,
+               tagKeyIdleBehavior: string(wkr.idleBehavior),
+       }
+       save := false
+       for k, v := range update {
+               if tags[k] != v {
+                       tags[k] = v
+                       save = true
+               }
+       }
+       if save {
+               go func() {
+                       err := instance.SetTags(tags)
+                       if err != nil {
+                               wkr.wp.logger.WithField("Instance", instance.ID()).WithError(err).Warnf("error updating tags")
+                       }
+               }()
+       }
+}
diff --git a/lib/dispatchcloud/worker/worker_test.go b/lib/dispatchcloud/worker/worker_test.go
new file mode 100644 (file)
index 0000000..3bc33b6
--- /dev/null
@@ -0,0 +1,240 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package worker
+
+import (
+       "errors"
+       "io"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/cloud"
+       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&WorkerSuite{})
+
+type WorkerSuite struct{}
+
+func (suite *WorkerSuite) TestProbeAndUpdate(c *check.C) {
+       logger := ctxlog.TestLogger(c)
+       bootTimeout := time.Minute
+       probeTimeout := time.Second
+
+       is, err := (&test.StubDriver{}).InstanceSet(nil, "", logger)
+       c.Assert(err, check.IsNil)
+       inst, err := is.Create(arvados.InstanceType{}, "", nil, "echo InitCommand", nil)
+       c.Assert(err, check.IsNil)
+
+       type trialT struct {
+               testCaseComment string // displayed in test output to help identify failure case
+               age             time.Duration
+               state           State
+               running         int
+               starting        int
+               respBoot        stubResp // zero value is success
+               respRun         stubResp // zero value is success + nothing running
+               expectState     State
+               expectRunning   int
+       }
+
+       errFail := errors.New("failed")
+       respFail := stubResp{"", "command failed\n", errFail}
+       respContainerRunning := stubResp{"zzzzz-dz642-abcdefghijklmno\n", "", nil}
+       for _, trial := range []trialT{
+               {
+                       testCaseComment: "Unknown, probes fail",
+                       state:           StateUnknown,
+                       respBoot:        respFail,
+                       respRun:         respFail,
+                       expectState:     StateUnknown,
+               },
+               {
+                       testCaseComment: "Unknown, boot probe fails, but one container is running",
+                       state:           StateUnknown,
+                       respBoot:        respFail,
+                       respRun:         respContainerRunning,
+                       expectState:     StateUnknown,
+                       expectRunning:   1,
+               },
+               {
+                       testCaseComment: "Unknown, boot probe fails, previously running container has exited",
+                       state:           StateUnknown,
+                       running:         1,
+                       respBoot:        respFail,
+                       expectState:     StateUnknown,
+                       expectRunning:   0,
+               },
+               {
+                       testCaseComment: "Unknown, boot timeout exceeded, boot probe fails",
+                       state:           StateUnknown,
+                       age:             bootTimeout + time.Second,
+                       respBoot:        respFail,
+                       respRun:         respFail,
+                       expectState:     StateShutdown,
+               },
+               {
+                       testCaseComment: "Unknown, boot timeout exceeded, boot probe succeeds but crunch-run fails",
+                       state:           StateUnknown,
+                       age:             bootTimeout * 2,
+                       respRun:         respFail,
+                       expectState:     StateShutdown,
+               },
+               {
+                       testCaseComment: "Unknown, boot timeout exceeded, boot probe fails but crunch-run succeeds",
+                       state:           StateUnknown,
+                       age:             bootTimeout * 2,
+                       respBoot:        respFail,
+                       expectState:     StateShutdown,
+               },
+               {
+                       testCaseComment: "Unknown, boot timeout exceeded, boot probe fails but container is running",
+                       state:           StateUnknown,
+                       age:             bootTimeout * 2,
+                       respBoot:        respFail,
+                       respRun:         respContainerRunning,
+                       expectState:     StateUnknown,
+                       expectRunning:   1,
+               },
+               {
+                       testCaseComment: "Booting, boot probe fails, run probe fails",
+                       state:           StateBooting,
+                       respBoot:        respFail,
+                       respRun:         respFail,
+                       expectState:     StateBooting,
+               },
+               {
+                       testCaseComment: "Booting, boot probe fails, run probe succeeds (but isn't expected to be called)",
+                       state:           StateBooting,
+                       respBoot:        respFail,
+                       expectState:     StateBooting,
+               },
+               {
+                       testCaseComment: "Booting, boot probe succeeds, run probe fails",
+                       state:           StateBooting,
+                       respRun:         respFail,
+                       expectState:     StateBooting,
+               },
+               {
+                       testCaseComment: "Booting, boot probe succeeds, run probe succeeds",
+                       state:           StateBooting,
+                       expectState:     StateIdle,
+               },
+               {
+                       testCaseComment: "Booting, boot probe succeeds, run probe succeeds, container is running",
+                       state:           StateBooting,
+                       respRun:         respContainerRunning,
+                       expectState:     StateRunning,
+                       expectRunning:   1,
+               },
+               {
+                       testCaseComment: "Booting, boot timeout exceeded",
+                       state:           StateBooting,
+                       age:             bootTimeout * 2,
+                       respRun:         respFail,
+                       expectState:     StateShutdown,
+               },
+               {
+                       testCaseComment: "Idle, probe timeout exceeded, one container running",
+                       state:           StateIdle,
+                       age:             probeTimeout * 2,
+                       respRun:         respContainerRunning,
+                       expectState:     StateRunning,
+                       expectRunning:   1,
+               },
+               {
+                       testCaseComment: "Idle, probe timeout exceeded, one container running, probe fails",
+                       state:           StateIdle,
+                       age:             probeTimeout * 2,
+                       running:         1,
+                       respRun:         respFail,
+                       expectState:     StateShutdown,
+                       expectRunning:   1,
+               },
+               {
+                       testCaseComment: "Idle, probe timeout exceeded, nothing running, probe fails",
+                       state:           StateIdle,
+                       age:             probeTimeout * 2,
+                       respRun:         respFail,
+                       expectState:     StateShutdown,
+               },
+               {
+                       testCaseComment: "Running, one container still running",
+                       state:           StateRunning,
+                       running:         1,
+                       respRun:         respContainerRunning,
+                       expectState:     StateRunning,
+                       expectRunning:   1,
+               },
+               {
+                       testCaseComment: "Running, container has exited",
+                       state:           StateRunning,
+                       running:         1,
+                       expectState:     StateIdle,
+                       expectRunning:   0,
+               },
+               {
+                       testCaseComment: "Running, probe timeout exceeded, nothing running, new container being started",
+                       state:           StateRunning,
+                       age:             probeTimeout * 2,
+                       starting:        1,
+                       expectState:     StateRunning,
+               },
+       } {
+               c.Logf("------- %#v", trial)
+               ctime := time.Now().Add(-trial.age)
+               exr := stubExecutor{
+                       "bootprobe":         trial.respBoot,
+                       "crunch-run --list": trial.respRun,
+               }
+               wp := &Pool{
+                       newExecutor:      func(cloud.Instance) Executor { return exr },
+                       bootProbeCommand: "bootprobe",
+                       timeoutBooting:   bootTimeout,
+                       timeoutProbe:     probeTimeout,
+                       exited:           map[string]time.Time{},
+               }
+               wkr := &worker{
+                       logger:   logger,
+                       executor: exr,
+                       wp:       wp,
+                       mtx:      &wp.mtx,
+                       state:    trial.state,
+                       instance: inst,
+                       appeared: ctime,
+                       busy:     ctime,
+                       probed:   ctime,
+                       updated:  ctime,
+               }
+               if trial.running > 0 {
+                       wkr.running = map[string]struct{}{"zzzzz-dz642-abcdefghijklmno": struct{}{}}
+               }
+               if trial.starting > 0 {
+                       wkr.starting = map[string]struct{}{"zzzzz-dz642-abcdefghijklmno": struct{}{}}
+               }
+               wkr.probeAndUpdate()
+               c.Check(wkr.state, check.Equals, trial.expectState)
+               c.Check(len(wkr.running), check.Equals, trial.expectRunning)
+       }
+}
+
+type stubResp struct {
+       stdout string
+       stderr string
+       err    error
+}
+type stubExecutor map[string]stubResp
+
+func (se stubExecutor) SetTarget(cloud.ExecutorTarget) {}
+func (se stubExecutor) Close()                         {}
+func (se stubExecutor) Execute(env map[string]string, cmd string, stdin io.Reader) (stdout, stderr []byte, err error) {
+       resp, ok := se[cmd]
+       if !ok {
+               return nil, []byte("command not found\n"), errors.New("command not found")
+       }
+       return []byte(resp.stdout), []byte(resp.stderr), resp.err
+}
diff --git a/lib/service/cmd.go b/lib/service/cmd.go
new file mode 100644 (file)
index 0000000..d99af0e
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// package service provides a cmd.Handler that brings up a system service.
+package service
+
+import (
+       "context"
+       "flag"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+
+       "git.curoverse.com/arvados.git/lib/cmd"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/coreos/go-systemd/daemon"
+       "github.com/sirupsen/logrus"
+)
+
+type Handler interface {
+       http.Handler
+       CheckHealth() error
+}
+
+type NewHandlerFunc func(context.Context, *arvados.Cluster, *arvados.NodeProfile) Handler
+
+type command struct {
+       newHandler NewHandlerFunc
+       svcName    arvados.ServiceName
+}
+
+// Command returns a cmd.Handler that loads site config, calls
+// newHandler with the current cluster and node configs, and brings up
+// an http server with the returned handler.
+//
+// The handler is wrapped with server middleware (adding X-Request-ID
+// headers, logging requests/responses, etc).
+func Command(svcName arvados.ServiceName, newHandler NewHandlerFunc) cmd.Handler {
+       return &command{
+               newHandler: newHandler,
+               svcName:    svcName,
+       }
+}
+
+func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       log := ctxlog.New(stderr, "json", "info")
+
+       var err error
+       defer func() {
+               if err != nil {
+                       log.WithError(err).Info("exiting")
+               }
+       }()
+       flags := flag.NewFlagSet("", flag.ContinueOnError)
+       flags.SetOutput(stderr)
+       configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
+       nodeProfile := flags.String("node-profile", "", "`Name` of NodeProfiles config entry to use (if blank, use $ARVADOS_NODE_PROFILE or hostname reported by OS)")
+       err = flags.Parse(args)
+       if err == flag.ErrHelp {
+               err = nil
+               return 0
+       } else if err != nil {
+               return 2
+       }
+       cfg, err := arvados.GetConfig(*configFile)
+       if err != nil {
+               return 1
+       }
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return 1
+       }
+       log = ctxlog.New(stderr, cluster.Logging.Format, cluster.Logging.Level).WithFields(logrus.Fields{
+               "PID": os.Getpid(),
+       })
+       ctx := ctxlog.Context(context.Background(), log)
+       profileName := *nodeProfile
+       if profileName == "" {
+               profileName = os.Getenv("ARVADOS_NODE_PROFILE")
+       }
+       profile, err := cluster.GetNodeProfile(profileName)
+       if err != nil {
+               return 1
+       }
+       listen := profile.ServicePorts()[c.svcName]
+       if listen == "" {
+               err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
+               return 1
+       }
+       handler := c.newHandler(ctx, cluster, profile)
+       if err = handler.CheckHealth(); err != nil {
+               return 1
+       }
+       srv := &httpserver.Server{
+               Server: http.Server{
+                       Handler: httpserver.AddRequestIDs(httpserver.LogRequests(log, handler)),
+               },
+               Addr: listen,
+       }
+       err = srv.Start()
+       if err != nil {
+               return 1
+       }
+       log.WithFields(logrus.Fields{
+               "Listen":  srv.Addr,
+               "Service": c.svcName,
+       }).Info("listening")
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.WithError(err).Errorf("error notifying init daemon")
+       }
+       err = srv.Wait()
+       if err != nil {
+               return 1
+       }
+       return 0
+}
+
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
diff --git a/sdk/R/.Rbuildignore b/sdk/R/.Rbuildignore
new file mode 100644 (file)
index 0000000..b9136fe
--- /dev/null
@@ -0,0 +1,4 @@
+^.*\.Rproj$
+^\.Rproj\.user$
+^docs$
+^pkgdown$
diff --git a/sdk/R/ArvadosR.Rproj b/sdk/R/ArvadosR.Rproj
new file mode 100644 (file)
index 0000000..a648ce1
--- /dev/null
@@ -0,0 +1,20 @@
+Version: 1.0
+
+RestoreWorkspace: Default
+SaveWorkspace: Default
+AlwaysSaveHistory: Default
+
+EnableCodeIndexing: Yes
+UseSpacesForTab: Yes
+NumSpacesForTab: 4
+Encoding: UTF-8
+
+RnwWeave: Sweave
+LaTeX: pdfLaTeX
+
+AutoAppendNewline: Yes
+StripTrailingWhitespace: Yes
+
+BuildType: Package
+PackageUseDevtools: Yes
+PackageInstallArgs: --no-multiarch --with-keep.source
diff --git a/sdk/R/DESCRIPTION b/sdk/R/DESCRIPTION
new file mode 100644 (file)
index 0000000..878a709
--- /dev/null
@@ -0,0 +1,20 @@
+Package: ArvadosR
+Type: Package
+Title: Arvados R SDK
+Version: 0.0.5
+Authors@R: person("Fuad", "Muhic", role = c("aut", "cre"), email = "fmuhic@capeannenterprises.com")
+Maintainer: Ward Vandewege <wvandewege@veritasgenetics.com>
+Description: This is the Arvados R SDK
+URL: http://doc.arvados.org
+License: Apache-2.0
+Encoding: UTF-8
+LazyData: true
+RoxygenNote: 6.0.1.9000
+Imports:
+    R6,
+    httr,
+    stringr,
+    jsonlite,
+    curl,
+    XML
+Suggests: testthat
diff --git a/sdk/R/NAMESPACE b/sdk/R/NAMESPACE
new file mode 100644 (file)
index 0000000..1cc6768
--- /dev/null
@@ -0,0 +1,11 @@
+# Generated by roxygen2: do not edit by hand
+
+S3method(print,ArvadosFile)
+S3method(print,Collection)
+S3method(print,Subcollection)
+export(Arvados)
+export(ArvadosFile)
+export(Collection)
+export(Subcollection)
+export(generateAPI)
+export(listAll)
diff --git a/sdk/R/R/Arvados.R b/sdk/R/R/Arvados.R
new file mode 100644 (file)
index 0000000..744cb3c
--- /dev/null
@@ -0,0 +1,5724 @@
+#' users.get
+#' 
+#' users.get is a method defined in Arvados class.
+#' 
+#' @usage arv$users.get(uuid)
+#' @param uuid The UUID of the User in question.
+#' @return User object.
+#' @name users.get
+NULL
+
+#' users.create
+#' 
+#' users.create is a method defined in Arvados class.
+#' 
+#' @usage arv$users.create(user, ensure_unique_name = "false")
+#' @param user User object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return User object.
+#' @name users.create
+NULL
+
+#' users.update
+#' 
+#' users.update is a method defined in Arvados class.
+#' 
+#' @usage arv$users.update(user, uuid)
+#' @param user User object.
+#' @param uuid The UUID of the User in question.
+#' @return User object.
+#' @name users.update
+NULL
+
+#' users.delete
+#' 
+#' users.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$users.delete(uuid)
+#' @param uuid The UUID of the User in question.
+#' @return User object.
+#' @name users.delete
+NULL
+
+#' users.current
+#' 
+#' users.current is a method defined in Arvados class.
+#' 
+#' @usage arv$users.current(NULL)
+#' @return User object.
+#' @name users.current
+NULL
+
+#' users.system
+#' 
+#' users.system is a method defined in Arvados class.
+#' 
+#' @usage arv$users.system(NULL)
+#' @return User object.
+#' @name users.system
+NULL
+
+#' users.activate
+#' 
+#' users.activate is a method defined in Arvados class.
+#' 
+#' @usage arv$users.activate(uuid)
+#' @param uuid 
+#' @return User object.
+#' @name users.activate
+NULL
+
+#' users.setup
+#' 
+#' users.setup is a method defined in Arvados class.
+#' 
+#' @usage arv$users.setup(user = NULL, openid_prefix = NULL,
+#'     repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+#' @param user 
+#' @param openid_prefix 
+#' @param repo_name 
+#' @param vm_uuid 
+#' @param send_notification_email 
+#' @return User object.
+#' @name users.setup
+NULL
+
+#' users.unsetup
+#' 
+#' users.unsetup is a method defined in Arvados class.
+#' 
+#' @usage arv$users.unsetup(uuid)
+#' @param uuid 
+#' @return User object.
+#' @name users.unsetup
+NULL
+
+#' users.update_uuid
+#' 
+#' users.update_uuid is a method defined in Arvados class.
+#' 
+#' @usage arv$users.update_uuid(uuid, new_uuid)
+#' @param uuid 
+#' @param new_uuid 
+#' @return User object.
+#' @name users.update_uuid
+NULL
+
+#' users.merge
+#' 
+#' users.merge is a method defined in Arvados class.
+#' 
+#' @usage arv$users.merge(new_owner_uuid,
+#'     new_user_token, redirect_to_new_user = NULL)
+#' @param new_owner_uuid 
+#' @param new_user_token 
+#' @param redirect_to_new_user 
+#' @return User object.
+#' @name users.merge
+NULL
+
+#' users.list
+#' 
+#' users.list is a method defined in Arvados class.
+#' 
+#' @usage arv$users.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return UserList object.
+#' @name users.list
+NULL
+
+#' api_client_authorizations.get
+#' 
+#' api_client_authorizations.get is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.get(uuid)
+#' @param uuid The UUID of the ApiClientAuthorization in question.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.get
+NULL
+
+#' api_client_authorizations.create
+#' 
+#' api_client_authorizations.create is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.create(apiclientauthorization,
+#'     ensure_unique_name = "false")
+#' @param apiClientAuthorization ApiClientAuthorization object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.create
+NULL
+
+#' api_client_authorizations.update
+#' 
+#' api_client_authorizations.update is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.update(apiclientauthorization,
+#'     uuid)
+#' @param apiClientAuthorization ApiClientAuthorization object.
+#' @param uuid The UUID of the ApiClientAuthorization in question.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.update
+NULL
+
+#' api_client_authorizations.delete
+#' 
+#' api_client_authorizations.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.delete(uuid)
+#' @param uuid The UUID of the ApiClientAuthorization in question.
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.delete
+NULL
+
+#' api_client_authorizations.create_system_auth
+#' 
+#' api_client_authorizations.create_system_auth is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.create_system_auth(api_client_id = NULL,
+#'     scopes = NULL)
+#' @param api_client_id 
+#' @param scopes 
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.create_system_auth
+NULL
+
+#' api_client_authorizations.current
+#' 
+#' api_client_authorizations.current is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.current(NULL)
+#' @return ApiClientAuthorization object.
+#' @name api_client_authorizations.current
+NULL
+
+#' api_client_authorizations.list
+#' 
+#' api_client_authorizations.list is a method defined in Arvados class.
+#' 
+#' @usage arv$api_client_authorizations.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ApiClientAuthorizationList object.
+#' @name api_client_authorizations.list
+NULL
+
+#' containers.get
+#' 
+#' containers.get is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.get(uuid)
+#' @param uuid The UUID of the Container in question.
+#' @return Container object.
+#' @name containers.get
+NULL
+
+#' containers.create
+#' 
+#' containers.create is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.create(container,
+#'     ensure_unique_name = "false")
+#' @param container Container object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Container object.
+#' @name containers.create
+NULL
+
+#' containers.update
+#' 
+#' containers.update is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.update(container,
+#'     uuid)
+#' @param container Container object.
+#' @param uuid The UUID of the Container in question.
+#' @return Container object.
+#' @name containers.update
+NULL
+
+#' containers.delete
+#' 
+#' containers.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.delete(uuid)
+#' @param uuid The UUID of the Container in question.
+#' @return Container object.
+#' @name containers.delete
+NULL
+
+#' containers.auth
+#' 
+#' containers.auth is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.auth(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.auth
+NULL
+
+#' containers.lock
+#' 
+#' containers.lock is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.lock(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.lock
+NULL
+
+#' containers.unlock
+#' 
+#' containers.unlock is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.unlock(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.unlock
+NULL
+
+#' containers.secret_mounts
+#' 
+#' containers.secret_mounts is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.secret_mounts(uuid)
+#' @param uuid 
+#' @return Container object.
+#' @name containers.secret_mounts
+NULL
+
+#' containers.current
+#' 
+#' containers.current is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.current(NULL)
+#' @return Container object.
+#' @name containers.current
+NULL
+
+#' containers.list
+#' 
+#' containers.list is a method defined in Arvados class.
+#' 
+#' @usage arv$containers.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ContainerList object.
+#' @name containers.list
+NULL
+
+#' api_clients.get
+#' 
+#' api_clients.get is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.get(uuid)
+#' @param uuid The UUID of the ApiClient in question.
+#' @return ApiClient object.
+#' @name api_clients.get
+NULL
+
+#' api_clients.create
+#' 
+#' api_clients.create is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.create(apiclient,
+#'     ensure_unique_name = "false")
+#' @param apiClient ApiClient object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return ApiClient object.
+#' @name api_clients.create
+NULL
+
+#' api_clients.update
+#' 
+#' api_clients.update is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.update(apiclient,
+#'     uuid)
+#' @param apiClient ApiClient object.
+#' @param uuid The UUID of the ApiClient in question.
+#' @return ApiClient object.
+#' @name api_clients.update
+NULL
+
+#' api_clients.delete
+#' 
+#' api_clients.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.delete(uuid)
+#' @param uuid The UUID of the ApiClient in question.
+#' @return ApiClient object.
+#' @name api_clients.delete
+NULL
+
+#' api_clients.list
+#' 
+#' api_clients.list is a method defined in Arvados class.
+#' 
+#' @usage arv$api_clients.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ApiClientList object.
+#' @name api_clients.list
+NULL
+
+#' container_requests.get
+#' 
+#' container_requests.get is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.get(uuid)
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.get
+NULL
+
+#' container_requests.create
+#' 
+#' container_requests.create is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.create(containerrequest,
+#'     ensure_unique_name = "false")
+#' @param containerRequest ContainerRequest object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return ContainerRequest object.
+#' @name container_requests.create
+NULL
+
+#' container_requests.update
+#' 
+#' container_requests.update is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.update(containerrequest,
+#'     uuid)
+#' @param containerRequest ContainerRequest object.
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.update
+NULL
+
+#' container_requests.delete
+#' 
+#' container_requests.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.delete(uuid)
+#' @param uuid The UUID of the ContainerRequest in question.
+#' @return ContainerRequest object.
+#' @name container_requests.delete
+NULL
+
+#' container_requests.list
+#' 
+#' container_requests.list is a method defined in Arvados class.
+#' 
+#' @usage arv$container_requests.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return ContainerRequestList object.
+#' @name container_requests.list
+NULL
+
+#' authorized_keys.get
+#' 
+#' authorized_keys.get is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.get(uuid)
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.get
+NULL
+
+#' authorized_keys.create
+#' 
+#' authorized_keys.create is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.create(authorizedkey,
+#'     ensure_unique_name = "false")
+#' @param authorizedKey AuthorizedKey object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.create
+NULL
+
+#' authorized_keys.update
+#' 
+#' authorized_keys.update is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.update(authorizedkey,
+#'     uuid)
+#' @param authorizedKey AuthorizedKey object.
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.update
+NULL
+
+#' authorized_keys.delete
+#' 
+#' authorized_keys.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.delete(uuid)
+#' @param uuid The UUID of the AuthorizedKey in question.
+#' @return AuthorizedKey object.
+#' @name authorized_keys.delete
+NULL
+
+#' authorized_keys.list
+#' 
+#' authorized_keys.list is a method defined in Arvados class.
+#' 
+#' @usage arv$authorized_keys.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return AuthorizedKeyList object.
+#' @name authorized_keys.list
+NULL
+
+#' collections.get
+#' 
+#' collections.get is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.get(uuid)
+#' @param uuid The UUID of the Collection in question.
+#' @return Collection object.
+#' @name collections.get
+NULL
+
+#' collections.create
+#' 
+#' collections.create is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.create(collection,
+#'     ensure_unique_name = "false")
+#' @param collection Collection object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Collection object.
+#' @name collections.create
+NULL
+
+#' collections.update
+#' 
+#' collections.update is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.update(collection,
+#'     uuid)
+#' @param collection Collection object.
+#' @param uuid The UUID of the Collection in question.
+#' @return Collection object.
+#' @name collections.update
+NULL
+
+#' collections.delete
+#' 
+#' collections.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.delete(uuid)
+#' @param uuid The UUID of the Collection in question.
+#' @return Collection object.
+#' @name collections.delete
+NULL
+
+#' collections.provenance
+#' 
+#' collections.provenance is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.provenance(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.provenance
+NULL
+
+#' collections.used_by
+#' 
+#' collections.used_by is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.used_by(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.used_by
+NULL
+
+#' collections.trash
+#' 
+#' collections.trash is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.trash(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.trash
+NULL
+
+#' collections.untrash
+#' 
+#' collections.untrash is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.untrash(uuid)
+#' @param uuid 
+#' @return Collection object.
+#' @name collections.untrash
+NULL
+
+#' collections.list
+#' 
+#' collections.list is a method defined in Arvados class.
+#' 
+#' @usage arv$collections.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact", include_trash = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include collections whose is_trashed attribute is true.
+#' @return CollectionList object.
+#' @name collections.list
+NULL
+
+#' humans.get
+#' 
+#' humans.get is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.get(uuid)
+#' @param uuid The UUID of the Human in question.
+#' @return Human object.
+#' @name humans.get
+NULL
+
+#' humans.create
+#' 
+#' humans.create is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.create(human, ensure_unique_name = "false")
+#' @param human Human object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Human object.
+#' @name humans.create
+NULL
+
+#' humans.update
+#' 
+#' humans.update is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.update(human, uuid)
+#' @param human Human object.
+#' @param uuid The UUID of the Human in question.
+#' @return Human object.
+#' @name humans.update
+NULL
+
+#' humans.delete
+#' 
+#' humans.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.delete(uuid)
+#' @param uuid The UUID of the Human in question.
+#' @return Human object.
+#' @name humans.delete
+NULL
+
+#' humans.list
+#' 
+#' humans.list is a method defined in Arvados class.
+#' 
+#' @usage arv$humans.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return HumanList object.
+#' @name humans.list
+NULL
+
+#' job_tasks.get
+#' 
+#' job_tasks.get is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.get(uuid)
+#' @param uuid The UUID of the JobTask in question.
+#' @return JobTask object.
+#' @name job_tasks.get
+NULL
+
+#' job_tasks.create
+#' 
+#' job_tasks.create is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.create(jobtask, ensure_unique_name = "false")
+#' @param jobTask JobTask object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return JobTask object.
+#' @name job_tasks.create
+NULL
+
+#' job_tasks.update
+#' 
+#' job_tasks.update is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.update(jobtask, uuid)
+#' @param jobTask JobTask object.
+#' @param uuid The UUID of the JobTask in question.
+#' @return JobTask object.
+#' @name job_tasks.update
+NULL
+
+#' job_tasks.delete
+#' 
+#' job_tasks.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.delete(uuid)
+#' @param uuid The UUID of the JobTask in question.
+#' @return JobTask object.
+#' @name job_tasks.delete
+NULL
+
+#' job_tasks.list
+#' 
+#' job_tasks.list is a method defined in Arvados class.
+#' 
+#' @usage arv$job_tasks.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return JobTaskList object.
+#' @name job_tasks.list
+NULL
+
+#' jobs.get
+#' 
+#' jobs.get is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.get(uuid)
+#' @param uuid The UUID of the Job in question.
+#' @return Job object.
+#' @name jobs.get
+NULL
+
+#' jobs.create
+#' 
+#' jobs.create is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.create(job, ensure_unique_name = "false",
+#'     find_or_create = "false", filters = NULL,
+#'     minimum_script_version = NULL, exclude_script_versions = NULL)
+#' @param job Job object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @param find_or_create 
+#' @param filters 
+#' @param minimum_script_version 
+#' @param exclude_script_versions 
+#' @return Job object.
+#' @name jobs.create
+NULL
+
+#' jobs.update
+#' 
+#' jobs.update is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.update(job, uuid)
+#' @param job Job object.
+#' @param uuid The UUID of the Job in question.
+#' @return Job object.
+#' @name jobs.update
+NULL
+
+#' jobs.delete
+#' 
+#' jobs.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.delete(uuid)
+#' @param uuid The UUID of the Job in question.
+#' @return Job object.
+#' @name jobs.delete
+NULL
+
+#' jobs.queue
+#' 
+#' jobs.queue is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.queue(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return Job object.
+#' @name jobs.queue
+NULL
+
+#' jobs.queue_size
+#' 
+#' jobs.queue_size is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.queue_size(NULL)
+#' @return Job object.
+#' @name jobs.queue_size
+NULL
+
+#' jobs.cancel
+#' 
+#' jobs.cancel is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.cancel(uuid)
+#' @param uuid 
+#' @return Job object.
+#' @name jobs.cancel
+NULL
+
+#' jobs.lock
+#' 
+#' jobs.lock is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.lock(uuid)
+#' @param uuid 
+#' @return Job object.
+#' @name jobs.lock
+NULL
+
+#' jobs.list
+#' 
+#' jobs.list is a method defined in Arvados class.
+#' 
+#' @usage arv$jobs.list(filters = NULL, where = NULL,
+#'     order = NULL, select = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return JobList object.
+#' @name jobs.list
+NULL
+
+#' keep_disks.get
+#' 
+#' keep_disks.get is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.get(uuid)
+#' @param uuid The UUID of the KeepDisk in question.
+#' @return KeepDisk object.
+#' @name keep_disks.get
+NULL
+
+#' keep_disks.create
+#' 
+#' keep_disks.create is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.create(keepdisk,
+#'     ensure_unique_name = "false")
+#' @param keepDisk KeepDisk object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return KeepDisk object.
+#' @name keep_disks.create
+NULL
+
+#' keep_disks.update
+#' 
+#' keep_disks.update is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.update(keepdisk,
+#'     uuid)
+#' @param keepDisk KeepDisk object.
+#' @param uuid The UUID of the KeepDisk in question.
+#' @return KeepDisk object.
+#' @name keep_disks.update
+NULL
+
+#' keep_disks.delete
+#' 
+#' keep_disks.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.delete(uuid)
+#' @param uuid The UUID of the KeepDisk in question.
+#' @return KeepDisk object.
+#' @name keep_disks.delete
+NULL
+
+#' keep_disks.ping
+#' 
+#' keep_disks.ping is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.ping(uuid = NULL,
+#'     ping_secret, node_uuid = NULL, filesystem_uuid = NULL,
+#'     service_host = NULL, service_port, service_ssl_flag)
+#' @param uuid 
+#' @param ping_secret 
+#' @param node_uuid 
+#' @param filesystem_uuid 
+#' @param service_host 
+#' @param service_port 
+#' @param service_ssl_flag 
+#' @return KeepDisk object.
+#' @name keep_disks.ping
+NULL
+
+#' keep_disks.list
+#' 
+#' keep_disks.list is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_disks.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return KeepDiskList object.
+#' @name keep_disks.list
+NULL
+
+#' nodes.get
+#' 
+#' nodes.get is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.get(uuid)
+#' @param uuid The UUID of the Node in question.
+#' @return Node object.
+#' @name nodes.get
+NULL
+
+#' nodes.create
+#' 
+#' nodes.create is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.create(node, ensure_unique_name = "false",
+#'     assign_slot = NULL)
+#' @param node Node object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @param assign_slot assign slot and hostname
+#' @return Node object.
+#' @name nodes.create
+NULL
+
+#' nodes.update
+#' 
+#' nodes.update is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.update(node, uuid, assign_slot = NULL)
+#' @param node Node object.
+#' @param uuid The UUID of the Node in question.
+#' @param assign_slot assign slot and hostname
+#' @return Node object.
+#' @name nodes.update
+NULL
+
+#' nodes.delete
+#' 
+#' nodes.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.delete(uuid)
+#' @param uuid The UUID of the Node in question.
+#' @return Node object.
+#' @name nodes.delete
+NULL
+
+#' nodes.ping
+#' 
+#' nodes.ping is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.ping(uuid, ping_secret)
+#' @param uuid 
+#' @param ping_secret 
+#' @return Node object.
+#' @name nodes.ping
+NULL
+
+#' nodes.list
+#' 
+#' nodes.list is a method defined in Arvados class.
+#' 
+#' @usage arv$nodes.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return NodeList object.
+#' @name nodes.list
+NULL
+
+#' links.get
+#' 
+#' links.get is a method defined in Arvados class.
+#' 
+#' @usage arv$links.get(uuid)
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.get
+NULL
+
+#' links.create
+#' 
+#' links.create is a method defined in Arvados class.
+#' 
+#' @usage arv$links.create(link, ensure_unique_name = "false")
+#' @param link Link object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Link object.
+#' @name links.create
+NULL
+
+#' links.update
+#' 
+#' links.update is a method defined in Arvados class.
+#' 
+#' @usage arv$links.update(link, uuid)
+#' @param link Link object.
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.update
+NULL
+
+#' links.delete
+#' 
+#' links.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$links.delete(uuid)
+#' @param uuid The UUID of the Link in question.
+#' @return Link object.
+#' @name links.delete
+NULL
+
+#' links.list
+#' 
+#' links.list is a method defined in Arvados class.
+#' 
+#' @usage arv$links.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return LinkList object.
+#' @name links.list
+NULL
+
+#' links.get_permissions
+#' 
+#' links.get_permissions is a method defined in Arvados class.
+#' 
+#' @usage arv$links.get_permissions(uuid)
+#' @param uuid 
+#' @return Link object.
+#' @name links.get_permissions
+NULL
+
+#' keep_services.get
+#' 
+#' keep_services.get is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.get(uuid)
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.get
+NULL
+
+#' keep_services.create
+#' 
+#' keep_services.create is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.create(keepservice,
+#'     ensure_unique_name = "false")
+#' @param keepService KeepService object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return KeepService object.
+#' @name keep_services.create
+NULL
+
+#' keep_services.update
+#' 
+#' keep_services.update is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.update(keepservice,
+#'     uuid)
+#' @param keepService KeepService object.
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.update
+NULL
+
+#' keep_services.delete
+#' 
+#' keep_services.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.delete(uuid)
+#' @param uuid The UUID of the KeepService in question.
+#' @return KeepService object.
+#' @name keep_services.delete
+NULL
+
+#' keep_services.accessible
+#' 
+#' keep_services.accessible is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.accessible(NULL)
+#' @return KeepService object.
+#' @name keep_services.accessible
+NULL
+
+#' keep_services.list
+#' 
+#' keep_services.list is a method defined in Arvados class.
+#' 
+#' @usage arv$keep_services.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return KeepServiceList object.
+#' @name keep_services.list
+NULL
+
+#' pipeline_templates.get
+#' 
+#' pipeline_templates.get is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.get(uuid)
+#' @param uuid The UUID of the PipelineTemplate in question.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.get
+NULL
+
+#' pipeline_templates.create
+#' 
+#' pipeline_templates.create is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.create(pipelinetemplate,
+#'     ensure_unique_name = "false")
+#' @param pipelineTemplate PipelineTemplate object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.create
+NULL
+
+#' pipeline_templates.update
+#' 
+#' pipeline_templates.update is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.update(pipelinetemplate,
+#'     uuid)
+#' @param pipelineTemplate PipelineTemplate object.
+#' @param uuid The UUID of the PipelineTemplate in question.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.update
+NULL
+
+#' pipeline_templates.delete
+#' 
+#' pipeline_templates.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.delete(uuid)
+#' @param uuid The UUID of the PipelineTemplate in question.
+#' @return PipelineTemplate object.
+#' @name pipeline_templates.delete
+NULL
+
+#' pipeline_templates.list
+#' 
+#' pipeline_templates.list is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_templates.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return PipelineTemplateList object.
+#' @name pipeline_templates.list
+NULL
+
+#' pipeline_instances.get
+#' 
+#' pipeline_instances.get is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.get(uuid)
+#' @param uuid The UUID of the PipelineInstance in question.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.get
+NULL
+
+#' pipeline_instances.create
+#' 
+#' pipeline_instances.create is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.create(pipelineinstance,
+#'     ensure_unique_name = "false")
+#' @param pipelineInstance PipelineInstance object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.create
+NULL
+
+#' pipeline_instances.update
+#' 
+#' pipeline_instances.update is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.update(pipelineinstance,
+#'     uuid)
+#' @param pipelineInstance PipelineInstance object.
+#' @param uuid The UUID of the PipelineInstance in question.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.update
+NULL
+
+#' pipeline_instances.delete
+#' 
+#' pipeline_instances.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.delete(uuid)
+#' @param uuid The UUID of the PipelineInstance in question.
+#' @return PipelineInstance object.
+#' @name pipeline_instances.delete
+NULL
+
+#' pipeline_instances.cancel
+#' 
+#' pipeline_instances.cancel is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.cancel(uuid)
+#' @param uuid 
+#' @return PipelineInstance object.
+#' @name pipeline_instances.cancel
+NULL
+
+#' pipeline_instances.list
+#' 
+#' pipeline_instances.list is a method defined in Arvados class.
+#' 
+#' @usage arv$pipeline_instances.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return PipelineInstanceList object.
+#' @name pipeline_instances.list
+NULL
+
+#' repositories.get
+#' 
+#' repositories.get is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.get(uuid)
+#' @param uuid The UUID of the Repository in question.
+#' @return Repository object.
+#' @name repositories.get
+NULL
+
+#' repositories.create
+#' 
+#' repositories.create is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.create(repository,
+#'     ensure_unique_name = "false")
+#' @param repository Repository object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Repository object.
+#' @name repositories.create
+NULL
+
+#' repositories.update
+#' 
+#' repositories.update is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.update(repository,
+#'     uuid)
+#' @param repository Repository object.
+#' @param uuid The UUID of the Repository in question.
+#' @return Repository object.
+#' @name repositories.update
+NULL
+
+#' repositories.delete
+#' 
+#' repositories.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.delete(uuid)
+#' @param uuid The UUID of the Repository in question.
+#' @return Repository object.
+#' @name repositories.delete
+NULL
+
+#' repositories.get_all_permissions
+#' 
+#' repositories.get_all_permissions is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.get_all_permissions(NULL)
+#' @return Repository object.
+#' @name repositories.get_all_permissions
+NULL
+
+#' repositories.list
+#' 
+#' repositories.list is a method defined in Arvados class.
+#' 
+#' @usage arv$repositories.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return RepositoryList object.
+#' @name repositories.list
+NULL
+
+#' specimens.get
+#' 
+#' specimens.get is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.get(uuid)
+#' @param uuid The UUID of the Specimen in question.
+#' @return Specimen object.
+#' @name specimens.get
+NULL
+
+#' specimens.create
+#' 
+#' specimens.create is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.create(specimen,
+#'     ensure_unique_name = "false")
+#' @param specimen Specimen object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Specimen object.
+#' @name specimens.create
+NULL
+
+#' specimens.update
+#' 
+#' specimens.update is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.update(specimen,
+#'     uuid)
+#' @param specimen Specimen object.
+#' @param uuid The UUID of the Specimen in question.
+#' @return Specimen object.
+#' @name specimens.update
+NULL
+
+#' specimens.delete
+#' 
+#' specimens.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.delete(uuid)
+#' @param uuid The UUID of the Specimen in question.
+#' @return Specimen object.
+#' @name specimens.delete
+NULL
+
+#' specimens.list
+#' 
+#' specimens.list is a method defined in Arvados class.
+#' 
+#' @usage arv$specimens.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return SpecimenList object.
+#' @name specimens.list
+NULL
+
+#' logs.get
+#' 
+#' logs.get is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.get(uuid)
+#' @param uuid The UUID of the Log in question.
+#' @return Log object.
+#' @name logs.get
+NULL
+
+#' logs.create
+#' 
+#' logs.create is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.create(log, ensure_unique_name = "false")
+#' @param log Log object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Log object.
+#' @name logs.create
+NULL
+
+#' logs.update
+#' 
+#' logs.update is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.update(log, uuid)
+#' @param log Log object.
+#' @param uuid The UUID of the Log in question.
+#' @return Log object.
+#' @name logs.update
+NULL
+
+#' logs.delete
+#' 
+#' logs.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.delete(uuid)
+#' @param uuid The UUID of the Log in question.
+#' @return Log object.
+#' @name logs.delete
+NULL
+
+#' logs.list
+#' 
+#' logs.list is a method defined in Arvados class.
+#' 
+#' @usage arv$logs.list(filters = NULL, where = NULL,
+#'     order = NULL, select = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return LogList object.
+#' @name logs.list
+NULL
+
+#' traits.get
+#' 
+#' traits.get is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.get(uuid)
+#' @param uuid The UUID of the Trait in question.
+#' @return Trait object.
+#' @name traits.get
+NULL
+
+#' traits.create
+#' 
+#' traits.create is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.create(trait, ensure_unique_name = "false")
+#' @param trait Trait object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Trait object.
+#' @name traits.create
+NULL
+
+#' traits.update
+#' 
+#' traits.update is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.update(trait, uuid)
+#' @param trait Trait object.
+#' @param uuid The UUID of the Trait in question.
+#' @return Trait object.
+#' @name traits.update
+NULL
+
+#' traits.delete
+#' 
+#' traits.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.delete(uuid)
+#' @param uuid The UUID of the Trait in question.
+#' @return Trait object.
+#' @name traits.delete
+NULL
+
+#' traits.list
+#' 
+#' traits.list is a method defined in Arvados class.
+#' 
+#' @usage arv$traits.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return TraitList object.
+#' @name traits.list
+NULL
+
+#' virtual_machines.get
+#' 
+#' virtual_machines.get is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.get(uuid)
+#' @param uuid The UUID of the VirtualMachine in question.
+#' @return VirtualMachine object.
+#' @name virtual_machines.get
+NULL
+
+#' virtual_machines.create
+#' 
+#' virtual_machines.create is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.create(virtualmachine,
+#'     ensure_unique_name = "false")
+#' @param virtualMachine VirtualMachine object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return VirtualMachine object.
+#' @name virtual_machines.create
+NULL
+
+#' virtual_machines.update
+#' 
+#' virtual_machines.update is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.update(virtualmachine,
+#'     uuid)
+#' @param virtualMachine VirtualMachine object.
+#' @param uuid The UUID of the VirtualMachine in question.
+#' @return VirtualMachine object.
+#' @name virtual_machines.update
+NULL
+
+#' virtual_machines.delete
+#' 
+#' virtual_machines.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.delete(uuid)
+#' @param uuid The UUID of the VirtualMachine in question.
+#' @return VirtualMachine object.
+#' @name virtual_machines.delete
+NULL
+
+#' virtual_machines.logins
+#' 
+#' virtual_machines.logins is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.logins(uuid)
+#' @param uuid 
+#' @return VirtualMachine object.
+#' @name virtual_machines.logins
+NULL
+
+#' virtual_machines.get_all_logins
+#' 
+#' virtual_machines.get_all_logins is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.get_all_logins(NULL)
+#' @return VirtualMachine object.
+#' @name virtual_machines.get_all_logins
+NULL
+
+#' virtual_machines.list
+#' 
+#' virtual_machines.list is a method defined in Arvados class.
+#' 
+#' @usage arv$virtual_machines.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return VirtualMachineList object.
+#' @name virtual_machines.list
+NULL
+
+#' workflows.get
+#' 
+#' workflows.get is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.get(uuid)
+#' @param uuid The UUID of the Workflow in question.
+#' @return Workflow object.
+#' @name workflows.get
+NULL
+
+#' workflows.create
+#' 
+#' workflows.create is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.create(workflow,
+#'     ensure_unique_name = "false")
+#' @param workflow Workflow object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Workflow object.
+#' @name workflows.create
+NULL
+
+#' workflows.update
+#' 
+#' workflows.update is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.update(workflow,
+#'     uuid)
+#' @param workflow Workflow object.
+#' @param uuid The UUID of the Workflow in question.
+#' @return Workflow object.
+#' @name workflows.update
+NULL
+
+#' workflows.delete
+#' 
+#' workflows.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.delete(uuid)
+#' @param uuid The UUID of the Workflow in question.
+#' @return Workflow object.
+#' @name workflows.delete
+NULL
+
+#' workflows.list
+#' 
+#' workflows.list is a method defined in Arvados class.
+#' 
+#' @usage arv$workflows.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return WorkflowList object.
+#' @name workflows.list
+NULL
+
+#' groups.get
+#' 
+#' groups.get is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.get(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name groups.get
+NULL
+
+#' groups.create
+#' 
+#' groups.create is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.create(group, ensure_unique_name = "false")
+#' @param group Group object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Group object.
+#' @name groups.create
+NULL
+
+#' groups.update
+#' 
+#' groups.update is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.update(group, uuid)
+#' @param group Group object.
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name groups.update
+NULL
+
+#' groups.delete
+#' 
+#' groups.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.delete(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name groups.delete
+NULL
+
+#' groups.contents
+#' 
+#' groups.contents is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.contents(filters = NULL,
+#'     where = NULL, order = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact",
+#'     include_trash = NULL, uuid = NULL, recursive = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include items whose is_trashed attribute is true.
+#' @param uuid 
+#' @param recursive Include contents from child groups recursively.
+#' @return Group object.
+#' @name groups.contents
+NULL
+
+#' groups.trash
+#' 
+#' groups.trash is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.trash(uuid)
+#' @param uuid 
+#' @return Group object.
+#' @name groups.trash
+NULL
+
+#' groups.untrash
+#' 
+#' groups.untrash is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.untrash(uuid)
+#' @param uuid 
+#' @return Group object.
+#' @name groups.untrash
+NULL
+
+#' groups.list
+#' 
+#' groups.list is a method defined in Arvados class.
+#' 
+#' @usage arv$groups.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact", include_trash = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include items whose is_trashed attribute is true.
+#' @return GroupList object.
+#' @name groups.list
+NULL
+
+#' user_agreements.get
+#' 
+#' user_agreements.get is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.get(uuid)
+#' @param uuid The UUID of the UserAgreement in question.
+#' @return UserAgreement object.
+#' @name user_agreements.get
+NULL
+
+#' user_agreements.create
+#' 
+#' user_agreements.create is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.create(useragreement,
+#'     ensure_unique_name = "false")
+#' @param userAgreement UserAgreement object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return UserAgreement object.
+#' @name user_agreements.create
+NULL
+
+#' user_agreements.update
+#' 
+#' user_agreements.update is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.update(useragreement,
+#'     uuid)
+#' @param userAgreement UserAgreement object.
+#' @param uuid The UUID of the UserAgreement in question.
+#' @return UserAgreement object.
+#' @name user_agreements.update
+NULL
+
+#' user_agreements.delete
+#' 
+#' user_agreements.delete is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.delete(uuid)
+#' @param uuid The UUID of the UserAgreement in question.
+#' @return UserAgreement object.
+#' @name user_agreements.delete
+NULL
+
+#' user_agreements.signatures
+#' 
+#' user_agreements.signatures is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.signatures(NULL)
+#' @return UserAgreement object.
+#' @name user_agreements.signatures
+NULL
+
+#' user_agreements.sign
+#' 
+#' user_agreements.sign is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.sign(NULL)
+#' @return UserAgreement object.
+#' @name user_agreements.sign
+NULL
+
+#' user_agreements.list
+#' 
+#' user_agreements.list is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.list(filters = NULL,
+#'     where = NULL, order = NULL, select = NULL,
+#'     distinct = NULL, limit = "100", offset = "0",
+#'     count = "exact")
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param select 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @return UserAgreementList object.
+#' @name user_agreements.list
+NULL
+
+#' user_agreements.new
+#' 
+#' user_agreements.new is a method defined in Arvados class.
+#' 
+#' @usage arv$user_agreements.new(NULL)
+#' @return UserAgreement object.
+#' @name user_agreements.new
+NULL
+
+#' project.get
+#' 
+#' projects.get is equivalent to groups.get method.
+#' 
+#' @usage arv$projects.get(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name projects.get
+NULL
+
+#' project.create
+#' 
+#' projects.create wrapps groups.create method by setting group_class attribute to "project".
+#' 
+#' @usage arv$projects.create(group, ensure_unique_name = "false")
+#' @param group Group object.
+#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.
+#' @return Group object.
+#' @name projects.create
+NULL
+
+#' project.update
+#' 
+#' projects.update wrapps groups.update method by setting group_class attribute to "project".
+#' 
+#' @usage arv$projects.update(group, uuid)
+#' @param group Group object.
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name projects.update
+NULL
+
+#' project.delete
+#' 
+#' projects.delete is equivalent to groups.delete method.
+#' 
+#' @usage arv$project.delete(uuid)
+#' @param uuid The UUID of the Group in question.
+#' @return Group object.
+#' @name projects.delete
+NULL
+
+#' project.list
+#' 
+#' projects.list wrapps groups.list method by setting group_class attribute to "project".
+#' 
+#' @usage arv$projects.list(filters = NULL,
+#'     where = NULL, order = NULL, distinct = NULL,
+#'     limit = "100", offset = "0", count = "exact",
+#'     include_trash = NULL, uuid = NULL, recursive = NULL)
+#' @param filters 
+#' @param where 
+#' @param order 
+#' @param distinct 
+#' @param limit 
+#' @param offset 
+#' @param count 
+#' @param include_trash Include items whose is_trashed attribute is true.
+#' @param uuid 
+#' @param recursive Include contents from child groups recursively.
+#' @return Group object.
+#' @name projects.list
+NULL
+
+#' Arvados
+#'
+#' Arvados class gives users ability to access Arvados REST API.
+#'
+#' @section Usage:
+#' \preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
+#'
+#' @section Arguments:
+#' \describe{
+#'     \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
+#'     \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
+#'     \item{numRetries}{Number which specifies how many times to retry failed service requests.}
+#' }
+#'
+#' @section Methods:
+#' \describe{
+#'     \item{}{\code{\link{api_client_authorizations.create}}}
+#'     \item{}{\code{\link{api_client_authorizations.create_system_auth}}}
+#'     \item{}{\code{\link{api_client_authorizations.current}}}
+#'     \item{}{\code{\link{api_client_authorizations.delete}}}
+#'     \item{}{\code{\link{api_client_authorizations.get}}}
+#'     \item{}{\code{\link{api_client_authorizations.list}}}
+#'     \item{}{\code{\link{api_client_authorizations.update}}}
+#'     \item{}{\code{\link{api_clients.create}}}
+#'     \item{}{\code{\link{api_clients.delete}}}
+#'     \item{}{\code{\link{api_clients.get}}}
+#'     \item{}{\code{\link{api_clients.list}}}
+#'     \item{}{\code{\link{api_clients.update}}}
+#'     \item{}{\code{\link{authorized_keys.create}}}
+#'     \item{}{\code{\link{authorized_keys.delete}}}
+#'     \item{}{\code{\link{authorized_keys.get}}}
+#'     \item{}{\code{\link{authorized_keys.list}}}
+#'     \item{}{\code{\link{authorized_keys.update}}}
+#'     \item{}{\code{\link{collections.create}}}
+#'     \item{}{\code{\link{collections.delete}}}
+#'     \item{}{\code{\link{collections.get}}}
+#'     \item{}{\code{\link{collections.list}}}
+#'     \item{}{\code{\link{collections.provenance}}}
+#'     \item{}{\code{\link{collections.trash}}}
+#'     \item{}{\code{\link{collections.untrash}}}
+#'     \item{}{\code{\link{collections.update}}}
+#'     \item{}{\code{\link{collections.used_by}}}
+#'     \item{}{\code{\link{container_requests.create}}}
+#'     \item{}{\code{\link{container_requests.delete}}}
+#'     \item{}{\code{\link{container_requests.get}}}
+#'     \item{}{\code{\link{container_requests.list}}}
+#'     \item{}{\code{\link{container_requests.update}}}
+#'     \item{}{\code{\link{containers.auth}}}
+#'     \item{}{\code{\link{containers.create}}}
+#'     \item{}{\code{\link{containers.current}}}
+#'     \item{}{\code{\link{containers.delete}}}
+#'     \item{}{\code{\link{containers.get}}}
+#'     \item{}{\code{\link{containers.list}}}
+#'     \item{}{\code{\link{containers.lock}}}
+#'     \item{}{\code{\link{containers.secret_mounts}}}
+#'     \item{}{\code{\link{containers.unlock}}}
+#'     \item{}{\code{\link{containers.update}}}
+#'     \item{}{\code{\link{groups.contents}}}
+#'     \item{}{\code{\link{groups.create}}}
+#'     \item{}{\code{\link{groups.delete}}}
+#'     \item{}{\code{\link{groups.get}}}
+#'     \item{}{\code{\link{groups.list}}}
+#'     \item{}{\code{\link{groups.trash}}}
+#'     \item{}{\code{\link{groups.untrash}}}
+#'     \item{}{\code{\link{groups.update}}}
+#'     \item{}{\code{\link{humans.create}}}
+#'     \item{}{\code{\link{humans.delete}}}
+#'     \item{}{\code{\link{humans.get}}}
+#'     \item{}{\code{\link{humans.list}}}
+#'     \item{}{\code{\link{humans.update}}}
+#'     \item{}{\code{\link{jobs.cancel}}}
+#'     \item{}{\code{\link{jobs.create}}}
+#'     \item{}{\code{\link{jobs.delete}}}
+#'     \item{}{\code{\link{jobs.get}}}
+#'     \item{}{\code{\link{jobs.list}}}
+#'     \item{}{\code{\link{jobs.lock}}}
+#'     \item{}{\code{\link{jobs.queue}}}
+#'     \item{}{\code{\link{jobs.queue_size}}}
+#'     \item{}{\code{\link{jobs.update}}}
+#'     \item{}{\code{\link{job_tasks.create}}}
+#'     \item{}{\code{\link{job_tasks.delete}}}
+#'     \item{}{\code{\link{job_tasks.get}}}
+#'     \item{}{\code{\link{job_tasks.list}}}
+#'     \item{}{\code{\link{job_tasks.update}}}
+#'     \item{}{\code{\link{keep_disks.create}}}
+#'     \item{}{\code{\link{keep_disks.delete}}}
+#'     \item{}{\code{\link{keep_disks.get}}}
+#'     \item{}{\code{\link{keep_disks.list}}}
+#'     \item{}{\code{\link{keep_disks.ping}}}
+#'     \item{}{\code{\link{keep_disks.update}}}
+#'     \item{}{\code{\link{keep_services.accessible}}}
+#'     \item{}{\code{\link{keep_services.create}}}
+#'     \item{}{\code{\link{keep_services.delete}}}
+#'     \item{}{\code{\link{keep_services.get}}}
+#'     \item{}{\code{\link{keep_services.list}}}
+#'     \item{}{\code{\link{keep_services.update}}}
+#'     \item{}{\code{\link{links.create}}}
+#'     \item{}{\code{\link{links.delete}}}
+#'     \item{}{\code{\link{links.get}}}
+#'     \item{}{\code{\link{links.get_permissions}}}
+#'     \item{}{\code{\link{links.list}}}
+#'     \item{}{\code{\link{links.update}}}
+#'     \item{}{\code{\link{logs.create}}}
+#'     \item{}{\code{\link{logs.delete}}}
+#'     \item{}{\code{\link{logs.get}}}
+#'     \item{}{\code{\link{logs.list}}}
+#'     \item{}{\code{\link{logs.update}}}
+#'     \item{}{\code{\link{nodes.create}}}
+#'     \item{}{\code{\link{nodes.delete}}}
+#'     \item{}{\code{\link{nodes.get}}}
+#'     \item{}{\code{\link{nodes.list}}}
+#'     \item{}{\code{\link{nodes.ping}}}
+#'     \item{}{\code{\link{nodes.update}}}
+#'     \item{}{\code{\link{pipeline_instances.cancel}}}
+#'     \item{}{\code{\link{pipeline_instances.create}}}
+#'     \item{}{\code{\link{pipeline_instances.delete}}}
+#'     \item{}{\code{\link{pipeline_instances.get}}}
+#'     \item{}{\code{\link{pipeline_instances.list}}}
+#'     \item{}{\code{\link{pipeline_instances.update}}}
+#'     \item{}{\code{\link{pipeline_templates.create}}}
+#'     \item{}{\code{\link{pipeline_templates.delete}}}
+#'     \item{}{\code{\link{pipeline_templates.get}}}
+#'     \item{}{\code{\link{pipeline_templates.list}}}
+#'     \item{}{\code{\link{pipeline_templates.update}}}
+#'     \item{}{\code{\link{projects.create}}}
+#'     \item{}{\code{\link{projects.delete}}}
+#'     \item{}{\code{\link{projects.get}}}
+#'     \item{}{\code{\link{projects.list}}}
+#'     \item{}{\code{\link{projects.update}}}
+#'     \item{}{\code{\link{repositories.create}}}
+#'     \item{}{\code{\link{repositories.delete}}}
+#'     \item{}{\code{\link{repositories.get}}}
+#'     \item{}{\code{\link{repositories.get_all_permissions}}}
+#'     \item{}{\code{\link{repositories.list}}}
+#'     \item{}{\code{\link{repositories.update}}}
+#'     \item{}{\code{\link{specimens.create}}}
+#'     \item{}{\code{\link{specimens.delete}}}
+#'     \item{}{\code{\link{specimens.get}}}
+#'     \item{}{\code{\link{specimens.list}}}
+#'     \item{}{\code{\link{specimens.update}}}
+#'     \item{}{\code{\link{traits.create}}}
+#'     \item{}{\code{\link{traits.delete}}}
+#'     \item{}{\code{\link{traits.get}}}
+#'     \item{}{\code{\link{traits.list}}}
+#'     \item{}{\code{\link{traits.update}}}
+#'     \item{}{\code{\link{user_agreements.create}}}
+#'     \item{}{\code{\link{user_agreements.delete}}}
+#'     \item{}{\code{\link{user_agreements.get}}}
+#'     \item{}{\code{\link{user_agreements.list}}}
+#'     \item{}{\code{\link{user_agreements.new}}}
+#'     \item{}{\code{\link{user_agreements.sign}}}
+#'     \item{}{\code{\link{user_agreements.signatures}}}
+#'     \item{}{\code{\link{user_agreements.update}}}
+#'     \item{}{\code{\link{users.activate}}}
+#'     \item{}{\code{\link{users.create}}}
+#'     \item{}{\code{\link{users.current}}}
+#'     \item{}{\code{\link{users.delete}}}
+#'     \item{}{\code{\link{users.get}}}
+#'     \item{}{\code{\link{users.list}}}
+#'     \item{}{\code{\link{users.merge}}}
+#'     \item{}{\code{\link{users.setup}}}
+#'     \item{}{\code{\link{users.system}}}
+#'     \item{}{\code{\link{users.unsetup}}}
+#'     \item{}{\code{\link{users.update}}}
+#'     \item{}{\code{\link{users.update_uuid}}}
+#'     \item{}{\code{\link{virtual_machines.create}}}
+#'     \item{}{\code{\link{virtual_machines.delete}}}
+#'     \item{}{\code{\link{virtual_machines.get}}}
+#'     \item{}{\code{\link{virtual_machines.get_all_logins}}}
+#'     \item{}{\code{\link{virtual_machines.list}}}
+#'     \item{}{\code{\link{virtual_machines.logins}}}
+#'     \item{}{\code{\link{virtual_machines.update}}}
+#'     \item{}{\code{\link{workflows.create}}}
+#'     \item{}{\code{\link{workflows.delete}}}
+#'     \item{}{\code{\link{workflows.get}}}
+#'     \item{}{\code{\link{workflows.list}}}
+#'     \item{}{\code{\link{workflows.update}}}
+#' }
+#'
+#' @name Arvados
+#' @examples
+#' \dontrun{
+#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+#'
+#' collection <- arv$collections.get("uuid")
+#'
+#' collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
+#' collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test%")))
+#'
+#' deletedCollection <- arv$collections.delete("uuid")
+#'
+#' updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"),
+#'                                             "uuid")
+#'
+#' createdCollection <- arv$collections.create(list(name = "Example",
+#'                                                  description = "This is a test collection"))
+#' }
+NULL
+
+#' @export
+Arvados <- R6::R6Class(
+
+       "Arvados",
+
+       public = list(
+
+               initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
+               {
+                       if(!is.null(hostName))
+                               Sys.setenv(ARVADOS_API_HOST = hostName)
+
+                       if(!is.null(authToken))
+                               Sys.setenv(ARVADOS_API_TOKEN = authToken)
+
+                       hostName <- Sys.getenv("ARVADOS_API_HOST")
+                       token    <- Sys.getenv("ARVADOS_API_TOKEN")
+
+                       if(hostName == "" | token == "")
+                               stop(paste("Please provide host name and authentification token",
+                                                  "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
+                                                  "environment variables."))
+
+                       private$token <- token
+                       private$host  <- paste0("https://", hostName, "/arvados/v1/")
+                       private$numRetries <- numRetries
+                       private$REST <- RESTService$new(token, hostName,
+                                                       HttpRequest$new(), HttpParser$new(),
+                                                       numRetries)
+
+               },
+
+               projects.get = function(uuid)
+               {
+                       self$groups.get(uuid)
+               },
+
+               projects.create = function(group, ensure_unique_name = "false")
+               {
+                       group <- c("group_class" = "project", group)
+                       self$groups.create(group, ensure_unique_name)
+               },
+
+               projects.update = function(group, uuid)
+               {
+                       group <- c("group_class" = "project", group)
+                       self$groups.update(group, uuid)
+               },
+
+               projects.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact",
+                       include_trash = NULL)
+               {
+                       filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+                       self$groups.list(filters, where, order, select, distinct,
+                                        limit, offset, count, include_trash)
+               },
+
+               projects.delete = function(uuid)
+               {
+                       self$groups.delete(uuid)
+               },
+
+               users.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.create = function(user, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("users")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(user) > 0)
+                               body <- jsonlite::toJSON(list(user = user), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.update = function(user, uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(user) > 0)
+                               body <- jsonlite::toJSON(list(user = user), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.current = function()
+               {
+                       endPoint <- stringr::str_interp("users/current")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.system = function()
+               {
+                       endPoint <- stringr::str_interp("users/system")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.activate = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}/activate")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.setup = function(user = NULL, openid_prefix = NULL,
+                       repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+               {
+                       endPoint <- stringr::str_interp("users/setup")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(user = user, openid_prefix = openid_prefix,
+                                                         repo_name = repo_name, vm_uuid = vm_uuid,
+                                                         send_notification_email = send_notification_email)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.unsetup = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}/unsetup")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.update_uuid = function(uuid, new_uuid)
+               {
+                       endPoint <- stringr::str_interp("users/${uuid}/update_uuid")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(new_uuid = new_uuid)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.merge = function(new_owner_uuid, new_user_token,
+                       redirect_to_new_user = NULL)
+               {
+                       endPoint <- stringr::str_interp("users/merge")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(new_owner_uuid = new_owner_uuid,
+                                                         new_user_token = new_user_token, redirect_to_new_user = redirect_to_new_user)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               users.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("users")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.create = function(apiclientauthorization,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(apiclientauthorization) > 0)
+                               body <- jsonlite::toJSON(list(apiclientauthorization = apiclientauthorization), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.update = function(apiclientauthorization, uuid)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(apiclientauthorization) > 0)
+                               body <- jsonlite::toJSON(list(apiclientauthorization = apiclientauthorization), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.create_system_auth = function(api_client_id = NULL, scopes = NULL)
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/create_system_auth")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(api_client_id = api_client_id,
+                                                         scopes = scopes)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.current = function()
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations/current")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_client_authorizations.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("api_client_authorizations")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.create = function(container, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("containers")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(container) > 0)
+                               body <- jsonlite::toJSON(list(container = container), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.update = function(container, uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(container) > 0)
+                               body <- jsonlite::toJSON(list(container = container), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.auth = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/auth")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.lock = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/lock")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.unlock = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/unlock")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.secret_mounts = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("containers/${uuid}/secret_mounts")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.current = function()
+               {
+                       endPoint <- stringr::str_interp("containers/current")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               containers.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("containers")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_clients/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.create = function(apiclient, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("api_clients")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(apiclient) > 0)
+                               body <- jsonlite::toJSON(list(apiclient = apiclient), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.update = function(apiclient, uuid)
+               {
+                       endPoint <- stringr::str_interp("api_clients/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(apiclient) > 0)
+                               body <- jsonlite::toJSON(list(apiclient = apiclient), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("api_clients/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               api_clients.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("api_clients")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.create = function(containerrequest,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("container_requests")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(containerrequest) > 0)
+                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.update = function(containerrequest, uuid)
+               {
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(containerrequest) > 0)
+                               body <- jsonlite::toJSON(list(containerrequest = containerrequest), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("container_requests/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               container_requests.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("container_requests")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.create = function(authorizedkey,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("authorized_keys")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(authorizedkey) > 0)
+                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.update = function(authorizedkey, uuid)
+               {
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(authorizedkey) > 0)
+                               body <- jsonlite::toJSON(list(authorizedkey = authorizedkey), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("authorized_keys/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               authorized_keys.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("authorized_keys")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.create = function(collection, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("collections")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(collection) > 0)
+                               body <- jsonlite::toJSON(list(collection = collection), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.update = function(collection, uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(collection) > 0)
+                               body <- jsonlite::toJSON(list(collection = collection), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.provenance = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/provenance")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.used_by = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/used_by")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.trash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/trash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.untrash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("collections/${uuid}/untrash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               collections.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact", include_trash = NULL)
+               {
+                       endPoint <- stringr::str_interp("collections")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count,
+                                                         include_trash = include_trash)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("humans/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.create = function(human, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("humans")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(human) > 0)
+                               body <- jsonlite::toJSON(list(human = human), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.update = function(human, uuid)
+               {
+                       endPoint <- stringr::str_interp("humans/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(human) > 0)
+                               body <- jsonlite::toJSON(list(human = human), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("humans/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               humans.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("humans")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("job_tasks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.create = function(jobtask, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("job_tasks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(jobtask) > 0)
+                               body <- jsonlite::toJSON(list(jobtask = jobtask), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.update = function(jobtask, uuid)
+               {
+                       endPoint <- stringr::str_interp("job_tasks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(jobtask) > 0)
+                               body <- jsonlite::toJSON(list(jobtask = jobtask), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("job_tasks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               job_tasks.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("job_tasks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.create = function(job, ensure_unique_name = "false",
+                       find_or_create = "false", filters = NULL,
+                       minimum_script_version = NULL, exclude_script_versions = NULL)
+               {
+                       endPoint <- stringr::str_interp("jobs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
+                                                         find_or_create = find_or_create, filters = filters,
+                                                         minimum_script_version = minimum_script_version,
+                                                         exclude_script_versions = exclude_script_versions)
+                       
+                       if(length(job) > 0)
+                               body <- jsonlite::toJSON(list(job = job), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.update = function(job, uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(job) > 0)
+                               body <- jsonlite::toJSON(list(job = job), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.queue = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("jobs/queue")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.queue_size = function()
+               {
+                       endPoint <- stringr::str_interp("jobs/queue_size")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.cancel = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}/cancel")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.lock = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("jobs/${uuid}/lock")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               jobs.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("jobs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.create = function(keepdisk, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("keep_disks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(keepdisk) > 0)
+                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.update = function(keepdisk, uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(keepdisk) > 0)
+                               body <- jsonlite::toJSON(list(keepdisk = keepdisk), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.ping = function(uuid = NULL, ping_secret,
+                       node_uuid = NULL, filesystem_uuid = NULL,
+                       service_host = NULL, service_port, service_ssl_flag)
+               {
+                       endPoint <- stringr::str_interp("keep_disks/ping")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(uuid = uuid, ping_secret = ping_secret,
+                                                         node_uuid = node_uuid, filesystem_uuid = filesystem_uuid,
+                                                         service_host = service_host, service_port = service_port,
+                                                         service_ssl_flag = service_ssl_flag)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_disks.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("keep_disks")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.create = function(node, ensure_unique_name = "false",
+                       assign_slot = NULL)
+               {
+                       endPoint <- stringr::str_interp("nodes")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name,
+                                                         assign_slot = assign_slot)
+                       
+                       if(length(node) > 0)
+                               body <- jsonlite::toJSON(list(node = node), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.update = function(node, uuid, assign_slot = NULL)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(assign_slot = assign_slot)
+                       
+                       if(length(node) > 0)
+                               body <- jsonlite::toJSON(list(node = node), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.ping = function(uuid, ping_secret)
+               {
+                       endPoint <- stringr::str_interp("nodes/${uuid}/ping")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ping_secret = ping_secret)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               nodes.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("nodes")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("links/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.create = function(link, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("links")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(link) > 0)
+                               body <- jsonlite::toJSON(list(link = link), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.update = function(link, uuid)
+               {
+                       endPoint <- stringr::str_interp("links/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(link) > 0)
+                               body <- jsonlite::toJSON(list(link = link), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("links/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("links")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               links.get_permissions = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("permissions/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.create = function(keepservice,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("keep_services")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(keepservice) > 0)
+                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.update = function(keepservice, uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(keepservice) > 0)
+                               body <- jsonlite::toJSON(list(keepservice = keepservice), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("keep_services/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.accessible = function()
+               {
+                       endPoint <- stringr::str_interp("keep_services/accessible")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               keep_services.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("keep_services")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.create = function(pipelinetemplate,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(pipelinetemplate) > 0)
+                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.update = function(pipelinetemplate, uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(pipelinetemplate) > 0)
+                               body <- jsonlite::toJSON(list(pipelinetemplate = pipelinetemplate), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_templates.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("pipeline_templates")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.create = function(pipelineinstance,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(pipelineinstance) > 0)
+                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.update = function(pipelineinstance, uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(pipelineinstance) > 0)
+                               body <- jsonlite::toJSON(list(pipelineinstance = pipelineinstance), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.cancel = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances/${uuid}/cancel")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               pipeline_instances.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("pipeline_instances")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("repositories/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.create = function(repository, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("repositories")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(repository) > 0)
+                               body <- jsonlite::toJSON(list(repository = repository), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.update = function(repository, uuid)
+               {
+                       endPoint <- stringr::str_interp("repositories/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(repository) > 0)
+                               body <- jsonlite::toJSON(list(repository = repository), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("repositories/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.get_all_permissions = function()
+               {
+                       endPoint <- stringr::str_interp("repositories/get_all_permissions")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               repositories.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("repositories")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("specimens/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.create = function(specimen, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("specimens")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(specimen) > 0)
+                               body <- jsonlite::toJSON(list(specimen = specimen), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.update = function(specimen, uuid)
+               {
+                       endPoint <- stringr::str_interp("specimens/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(specimen) > 0)
+                               body <- jsonlite::toJSON(list(specimen = specimen), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("specimens/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               specimens.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("specimens")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("logs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.create = function(log, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("logs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(log) > 0)
+                               body <- jsonlite::toJSON(list(log = log), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.update = function(log, uuid)
+               {
+                       endPoint <- stringr::str_interp("logs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(log) > 0)
+                               body <- jsonlite::toJSON(list(log = log), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("logs/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               logs.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("logs")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("traits/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.create = function(trait, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("traits")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(trait) > 0)
+                               body <- jsonlite::toJSON(list(trait = trait), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.update = function(trait, uuid)
+               {
+                       endPoint <- stringr::str_interp("traits/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(trait) > 0)
+                               body <- jsonlite::toJSON(list(trait = trait), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("traits/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               traits.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact")
+               {
+                       endPoint <- stringr::str_interp("traits")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.create = function(virtualmachine,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("virtual_machines")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(virtualmachine) > 0)
+                               body <- jsonlite::toJSON(list(virtualmachine = virtualmachine), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.update = function(virtualmachine, uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(virtualmachine) > 0)
+                               body <- jsonlite::toJSON(list(virtualmachine = virtualmachine), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.logins = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/${uuid}/logins")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.get_all_logins = function()
+               {
+                       endPoint <- stringr::str_interp("virtual_machines/get_all_logins")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               virtual_machines.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("virtual_machines")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("workflows/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.create = function(workflow, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("workflows")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(workflow) > 0)
+                               body <- jsonlite::toJSON(list(workflow = workflow), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.update = function(workflow, uuid)
+               {
+                       endPoint <- stringr::str_interp("workflows/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(workflow) > 0)
+                               body <- jsonlite::toJSON(list(workflow = workflow), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("workflows/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               workflows.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("workflows")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.create = function(group, ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("groups")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(group) > 0)
+                               body <- jsonlite::toJSON(list(group = group), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.update = function(group, uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(group) > 0)
+                               body <- jsonlite::toJSON(list(group = group), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.contents = function(filters = NULL,
+                       where = NULL, order = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact",
+                       include_trash = NULL, uuid = NULL, recursive = NULL)
+               {
+                       endPoint <- stringr::str_interp("groups/contents")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, distinct = distinct, limit = limit,
+                                                         offset = offset, count = count, include_trash = include_trash,
+                                                         uuid = uuid, recursive = recursive)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.trash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}/trash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.untrash = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("groups/${uuid}/untrash")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               groups.list = function(filters = NULL, where = NULL,
+                       order = NULL, select = NULL, distinct = NULL,
+                       limit = "100", offset = "0", count = "exact",
+                       include_trash = NULL)
+               {
+                       endPoint <- stringr::str_interp("groups")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count,
+                                                         include_trash = include_trash)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.get = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("user_agreements/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.create = function(useragreement,
+                       ensure_unique_name = "false")
+               {
+                       endPoint <- stringr::str_interp("user_agreements")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(ensure_unique_name = ensure_unique_name)
+                       
+                       if(length(useragreement) > 0)
+                               body <- jsonlite::toJSON(list(useragreement = useragreement), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.update = function(useragreement, uuid)
+               {
+                       endPoint <- stringr::str_interp("user_agreements/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       if(length(useragreement) > 0)
+                               body <- jsonlite::toJSON(list(useragreement = useragreement), 
+                                                        auto_unbox = TRUE)
+                       else
+                               body <- NULL
+                       
+                       response <- private$REST$http$exec("PUT", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.delete = function(uuid)
+               {
+                       endPoint <- stringr::str_interp("user_agreements/${uuid}")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("DELETE", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.signatures = function()
+               {
+                       endPoint <- stringr::str_interp("user_agreements/signatures")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.sign = function()
+               {
+                       endPoint <- stringr::str_interp("user_agreements/sign")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("POST", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.list = function(filters = NULL,
+                       where = NULL, order = NULL, select = NULL,
+                       distinct = NULL, limit = "100", offset = "0",
+                       count = "exact")
+               {
+                       endPoint <- stringr::str_interp("user_agreements")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- list(filters = filters, where = where,
+                                                         order = order, select = select, distinct = distinct,
+                                                         limit = limit, offset = offset, count = count)
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               user_agreements.new = function()
+               {
+                       endPoint <- stringr::str_interp("user_agreements/new")
+                       url <- paste0(private$host, endPoint)
+                       headers <- list(Authorization = paste("OAuth2", private$token), 
+                                       "Content-Type" = "application/json")
+                       queryArgs <- NULL
+                       
+                       body <- NULL
+                       
+                       response <- private$REST$http$exec("GET", url, headers, body,
+                                                          queryArgs, private$numRetries)
+                       resource <- private$REST$httpParser$parseJSONResponse(response)
+                       
+                       if(!is.null(resource$errors))
+                               stop(resource$errors)
+                       
+                       resource
+               },
+
+               getHostName = function() private$host,
+               getToken = function() private$token,
+               setRESTService = function(newREST) private$REST <- newREST,
+               getRESTService = function() private$REST
+       ),
+
+       private = list(
+
+               token = NULL,
+               host = NULL,
+               REST = NULL,
+               numRetries = NULL
+       ),
+
+       cloneable = FALSE
+)
diff --git a/sdk/R/R/ArvadosFile.R b/sdk/R/R/ArvadosFile.R
new file mode 100644 (file)
index 0000000..70bb445
--- /dev/null
@@ -0,0 +1,311 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("./R/util.R")
+
+#' ArvadosFile
+#'
+#' ArvadosFile class represents a file inside Arvados collection.
+#'
+#' @section Usage:
+#' \preformatted{file = ArvadosFile$new(name)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{name}{Name of the file.}
+#' }
+#'
+#' @section Methods:
+#' \describe{
+#'   \item{getName()}{Returns name of the file.}
+#'   \item{getRelativePath()}{Returns file path relative to the root.}
+#'   \item{read(contentType = "raw", offset = 0, length = 0)}{Read file content.}
+#'   \item{write(content, contentType = "text/html")}{Write to file (override current content of the file).}
+#'   \item{connection(rw)}{Get connection opened in "read" or "write" mode.}
+#'   \item{flush()}{Write connections content to a file (override current content of the file).}
+#'   \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+#'   \item{getSizeInBytes()}{Returns file size in bytes.}
+#'   \item{move(destination)}{Moves file to a new location inside collection.}
+#'   \item{copy(destination)}{Copies file to a new location inside collection.}
+#' }
+#'
+#' @name ArvadosFile
+#' @examples
+#' \dontrun{
+#' myFile <- ArvadosFile$new("myFile")
+#'
+#' myFile$write("This is new file content")
+#' fileContent <- myFile$read()
+#' fileContent <- myFile$read("text")
+#' fileContent <- myFile$read("raw", offset = 8, length = 4)
+#'
+#' #Write a table:
+#' arvConnection <- myFile$connection("w")
+#' write.table(mytable, arvConnection)
+#' arvadosFile$flush()
+#'
+#' #Read a table:
+#' arvConnection <- myFile$connection("r")
+#' mytable <- read.table(arvConnection)
+#'
+#' myFile$move("newFolder/myFile")
+#' myFile$copy("newFolder/myFile")
+#' }
+NULL
+
+#' @export
+ArvadosFile <- R6::R6Class(
+
+    "ArvadosFile",
+
+    public = list(
+
+        initialize = function(name)
+        {
+            if(name == "")
+                stop("Invalid name.")
+
+            private$name <- name
+        },
+
+        getName = function() private$name,
+
+        getFileListing = function(fullpath = TRUE)
+        {
+            self$getName()
+        },
+
+        getSizeInBytes = function()
+        {
+            if(is.null(private$collection))
+                return(0)
+
+            REST <- private$collection$getRESTService()
+
+            fileSize <- REST$getResourceSize(self$getRelativePath(),
+                                             private$collection$uuid)
+            fileSize
+        },
+
+        get = function(fileLikeObjectName)
+        {
+            return(NULL)
+        },
+
+        getFirst = function()
+        {
+            return(NULL)
+        },
+
+        getCollection = function() private$collection,
+
+        setCollection = function(collection, setRecursively = TRUE)
+        {
+            private$collection <- collection
+        },
+
+        getRelativePath = function()
+        {
+            relativePath <- c(private$name)
+            parent <- private$parent
+
+            while(!is.null(parent))
+            {
+                relativePath <- c(parent$getName(), relativePath)
+                parent <- parent$getParent()
+            }
+
+            relativePath <- relativePath[relativePath != ""]
+            paste0(relativePath, collapse = "/")
+        },
+
+        getParent = function() private$parent,
+
+        setParent = function(newParent) private$parent <- newParent,
+
+        read = function(contentType = "raw", offset = 0, length = 0)
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            if(offset < 0 || length < 0)
+                stop("Offset and length must be positive values.")
+
+            REST <- private$collection$getRESTService()
+
+            fileContent <- REST$read(self$getRelativePath(),
+                                     private$collection$uuid,
+                                     contentType, offset, length)
+            fileContent
+        },
+
+        connection = function(rw)
+        {
+            if (rw == "r" || rw == "rb")
+            {
+                REST <- private$collection$getRESTService()
+                return(REST$getConnection(self$getRelativePath(),
+                                          private$collection$uuid,
+                                          rw))
+            }
+            else if (rw == "w")
+            {
+                private$buffer <- textConnection(NULL, "w")
+
+                return(private$buffer)
+            }
+        },
+
+        flush = function()
+        {
+            v <- textConnectionValue(private$buffer)
+            close(private$buffer)
+            self$write(paste(v, collapse='\n'))
+        },
+
+        write = function(content, contentType = "text/html")
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            REST <- private$collection$getRESTService()
+
+            writeResult <- REST$write(self$getRelativePath(),
+                                      private$collection$uuid,
+                                      content, contentType)
+            writeResult
+        },
+
+        move = function(destination)
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent))
+                stop("Unable to get destination subcollection.")
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$move(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            private$dettachFromCurrentParent()
+            private$attachToNewParent(self, newParent)
+
+            private$parent <- newParent
+            private$name <- nameAndPath$name
+
+            self
+        },
+
+        copy = function(destination)
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent))
+                stop("Unable to get destination subcollection.")
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$copy(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            newFile <- self$duplicate(nameAndPath$name)
+            newFile$setCollection(self$getCollection())
+            private$attachToNewParent(newFile, newParent)
+            newFile$setParent(newParent)
+
+            newFile
+        },
+
+        duplicate = function(newName = NULL)
+        {
+            name <- if(!is.null(newName)) newName else private$name
+            newFile <- ArvadosFile$new(name)
+            newFile
+        }
+    ),
+
+    private = list(
+
+        name       = NULL,
+        size       = NULL,
+        parent     = NULL,
+        collection = NULL,
+        buffer     = NULL,
+
+        attachToNewParent = function(content, newParent)
+        {
+            # We temporary set parents collection to NULL. This will ensure that
+            # add method doesn't post this file on REST.
+            # We also need to set content's collection to NULL because
+            # add method throws exception if we try to add content that already
+            # belongs to a collection.
+            parentsCollection <- newParent$getCollection()
+            content$setCollection(NULL, setRecursively = FALSE)
+            newParent$setCollection(NULL, setRecursively = FALSE)
+            newParent$add(content)
+            content$setCollection(parentsCollection, setRecursively = FALSE)
+            newParent$setCollection(parentsCollection, setRecursively = FALSE)
+        },
+
+        dettachFromCurrentParent = function()
+        {
+            # We temporary set parents collection to NULL. This will ensure that
+            # remove method doesn't remove this file from REST.
+            parent <- private$parent
+            parentsCollection <- parent$getCollection()
+            parent$setCollection(NULL, setRecursively = FALSE)
+            parent$remove(private$name)
+            parent$setCollection(parentsCollection, setRecursively = FALSE)
+        }
+    ),
+
+    cloneable = FALSE
+)
+
+#' print.ArvadosFile
+#'
+#' Custom print function for ArvadosFile class
+#'
+#' @param x Instance of ArvadosFile class
+#' @param ... Optional arguments.
+#' @export
+print.ArvadosFile = function(x, ...)
+{
+    collection   <- NULL
+    relativePath <- x$getRelativePath()
+
+    if(!is.null(x$getCollection()))
+    {
+        collection <- x$getCollection()$uuid
+        relativePath <- paste0("/", relativePath)
+    }
+
+    cat(paste0("Type:          ", "\"", "ArvadosFile", "\""), sep = "\n")
+    cat(paste0("Name:          ", "\"", x$getName(),   "\""), sep = "\n")
+    cat(paste0("Relative path: ", "\"", relativePath,  "\""), sep = "\n")
+    cat(paste0("Collection:    ", "\"", collection,    "\""), sep = "\n")
+}
diff --git a/sdk/R/R/Collection.R b/sdk/R/R/Collection.R
new file mode 100644 (file)
index 0000000..8869d7b
--- /dev/null
@@ -0,0 +1,262 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("./R/Subcollection.R")
+source("./R/ArvadosFile.R")
+source("./R/RESTService.R")
+source("./R/util.R")
+
+#' Collection
+#'
+#' Collection class provides interface for working with Arvados collections.
+#'
+#' @section Usage:
+#' \preformatted{collection = Collection$new(arv, uuid)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{arv}{Arvados object.}
+#'   \item{uuid}{UUID of a collection.}
+#' }
+#'
+#' @section Methods:
+#' \describe{
+#'   \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
+#'   \item{create(files)}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
+#'   \item{remove(fileNames)}{Remove one or more files from the collection.}
+#'   \item{move(content, destination)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+#'   \item{copy(content, destination)}{Copies ArvadosFile or Subcollection to another location in the collection.}
+#'   \item{getFileListing()}{Returns collections file content as character vector.}
+#'   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+#' }
+#'
+#' @name Collection
+#' @examples
+#' \dontrun{
+#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+#' collection <- Collection$new(arv, "uuid")
+#'
+#' createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+#'
+#' collection$remove("location/to/my/file.cpp")
+#'
+#' collection$move("folder/file.cpp", "file.cpp")
+#'
+#' arvadosFile <- collection$get("location/to/my/file.cpp")
+#' arvadosSubcollection <- collection$get("location/to/my/directory/")
+#' }
+NULL
+
+#' @export
+Collection <- R6::R6Class(
+
+    "Collection",
+
+    public = list(
+
+               uuid = NULL,
+
+               initialize = function(api, uuid)
+        {
+            private$REST <- api$getRESTService()
+            self$uuid <- uuid
+        },
+
+        add = function(content, relativePath = "")
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            if(relativePath == ""  ||
+               relativePath == "." ||
+               relativePath == "./")
+            {
+                subcollection <- private$tree$getTree()
+            }
+            else
+            {
+                relativePath <- trimFromEnd(relativePath, "/")
+                subcollection <- self$get(relativePath)
+            }
+
+            if(is.null(subcollection))
+                stop(paste("Subcollection", relativePath, "doesn't exist."))
+
+            if("ArvadosFile"   %in% class(content) ||
+               "Subcollection" %in% class(content))
+            {
+                if(!is.null(content$getCollection()))
+                    stop("Content already belongs to a collection.")
+
+                if(content$getName() == "")
+                    stop("Content has invalid name.")
+
+                subcollection$add(content)
+                content
+            }
+            else
+            {
+                stop(paste0("Expected AravodsFile or Subcollection object, got ",
+                            paste0("(", paste0(class(content), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        create = function(files)
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            if(is.character(files))
+            {
+                sapply(files, function(file)
+                {
+                    childWithSameName <- self$get(file)
+                    if(!is.null(childWithSameName))
+                        stop("Destination already contains file with same name.")
+
+                    newTreeBranch <- private$tree$createBranch(file)
+                    private$tree$addBranch(private$tree$getTree(), newTreeBranch)
+
+                    private$REST$create(file, self$uuid)
+                    newTreeBranch$setCollection(self)
+                })
+
+                "Created"
+            }
+            else
+            {
+                stop(paste0("Expected character vector, got ",
+                            paste0("(", paste0(class(files), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        remove = function(paths)
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            if(is.character(paths))
+            {
+                sapply(paths, function(filePath)
+                {
+                    filePath <- trimFromEnd(filePath, "/")
+                    file <- self$get(filePath)
+
+                    if(is.null(file))
+                        stop(paste("File", filePath, "doesn't exist."))
+
+                    parent <- file$getParent()
+
+                    if(is.null(parent))
+                        stop("You can't delete root folder.")
+
+                    parent$remove(file$getName())
+                })
+
+                "Content removed"
+            }
+            else
+            {
+                stop(paste0("Expected character vector, got ",
+                            paste0("(", paste0(class(paths), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        move = function(content, destination)
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            content <- trimFromEnd(content, "/")
+
+            elementToMove <- self$get(content)
+
+            if(is.null(elementToMove))
+                stop("Content you want to move doesn't exist in the collection.")
+
+            elementToMove$move(destination)
+        },
+
+        copy = function(content, destination)
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            content <- trimFromEnd(content, "/")
+
+            elementToCopy <- self$get(content)
+
+            if(is.null(elementToCopy))
+                stop("Content you want to copy doesn't exist in the collection.")
+
+            elementToCopy$copy(destination)
+        },
+
+        refresh = function()
+        {
+            if(!is.null(private$tree))
+            {
+                private$tree$getTree()$setCollection(NULL, setRecursively = TRUE)
+                private$tree <- NULL
+            }
+        },
+
+        getFileListing = function()
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            content <- private$REST$getCollectionContent(self$uuid)
+            content[order(tolower(content))]
+        },
+
+        get = function(relativePath)
+        {
+            if(is.null(private$tree))
+                private$generateCollectionTreeStructure()
+
+            private$tree$getElement(relativePath)
+        },
+
+        getRESTService = function() private$REST,
+        setRESTService = function(newRESTService) private$REST <- newRESTService
+    ),
+
+    private = list(
+
+        REST        = NULL,
+        tree        = NULL,
+        fileContent = NULL,
+
+        generateCollectionTreeStructure = function()
+        {
+            if(is.null(self$uuid))
+                stop("Collection uuid is not defined.")
+
+            if(is.null(private$REST))
+                stop("REST service is not defined.")
+
+            private$fileContent <- private$REST$getCollectionContent(self$uuid)
+            private$tree <- CollectionTree$new(private$fileContent, self)
+        }
+    ),
+
+    cloneable = FALSE
+)
+
+#' print.Collection
+#'
+#' Custom print function for Collection class
+#'
+#' @param x Instance of Collection class
+#' @param ... Optional arguments.
+#' @export
+print.Collection = function(x, ...)
+{
+    cat(paste0("Type: ", "\"", "Arvados Collection", "\""), sep = "\n")
+    cat(paste0("uuid: ", "\"", x$uuid,               "\""), sep = "\n")
+}
diff --git a/sdk/R/R/CollectionTree.R b/sdk/R/R/CollectionTree.R
new file mode 100644 (file)
index 0000000..5f7a294
--- /dev/null
@@ -0,0 +1,115 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("./R/Subcollection.R")
+source("./R/ArvadosFile.R")
+source("./R/util.R")
+
+CollectionTree <- R6::R6Class(
+    "CollectionTree",
+    public = list(
+
+        pathsList = NULL,
+
+        initialize = function(fileContent, collection)
+        {
+            self$pathsList <- fileContent
+            treeBranches <- sapply(fileContent, function(filePath) self$createBranch(filePath))
+            root <- Subcollection$new("")
+            sapply(treeBranches, function(branch) self$addBranch(root, branch))
+            root$setCollection(collection)
+            private$tree <- root
+        },
+
+        createBranch = function(filePath)
+        {
+            splitPath <- unlist(strsplit(filePath, "/", fixed = TRUE))
+            branch <- NULL
+            lastElementIndex <- length(splitPath)
+
+            for(elementIndex in lastElementIndex:1)
+            {
+                if(elementIndex == lastElementIndex)
+                {
+                    branch <- ArvadosFile$new(splitPath[[elementIndex]])
+                }
+                else
+                {
+                    newFolder <- Subcollection$new(splitPath[[elementIndex]])
+                    newFolder$add(branch)
+                    branch <- newFolder
+                }
+            }
+
+            branch
+        },
+
+        addBranch = function(container, node)
+        {
+            child <- container$get(node$getName())
+
+            if(is.null(child))
+            {
+                # Make sure we are don't make any REST call while adding child
+                collection <- container$getCollection()
+                container$setCollection(NULL, setRecursively = FALSE)
+                container$add(node)
+                container$setCollection(collection, setRecursively = FALSE)
+            }
+            else
+            {
+                # Note: REST always returns folder name alone before other folder
+                # content, so in first iteration we don't know if it's a file
+                # or folder since its just a name, so we assume it's a file.
+                # If we encounter that same name again we know
+                # it's a folder so we need to replace ArvadosFile with Subcollection.
+                if("ArvadosFile" %in% class(child))
+                    child = private$replaceFileWithSubcollection(child)
+
+                self$addBranch(child, node$getFirst())
+            }
+        },
+
+        getElement = function(relativePath)
+        {
+            relativePath <- trimFromStart(relativePath, "./")
+            relativePath <- trimFromEnd(relativePath, "/")
+
+            if(endsWith(relativePath, "/"))
+                relativePath <- substr(relativePath, 0, nchar(relativePath) - 1)
+
+            splitPath <- unlist(strsplit(relativePath, "/", fixed = TRUE))
+            returnElement <- private$tree
+
+            for(pathFragment in splitPath)
+            {
+                returnElement <- returnElement$get(pathFragment)
+
+                if(is.null(returnElement))
+                    return(NULL)
+            }
+
+            returnElement
+        },
+
+        getTree = function() private$tree
+    ),
+
+    private = list(
+
+        tree = NULL,
+
+        replaceFileWithSubcollection = function(arvadosFile)
+        {
+            subcollection <- Subcollection$new(arvadosFile$getName())
+            fileParent <- arvadosFile$getParent()
+            fileParent$remove(arvadosFile$getName())
+            fileParent$add(subcollection)
+
+            arvadosFile$setParent(NULL)
+
+            subcollection
+        }
+    )
+)
diff --git a/sdk/R/R/HttpParser.R b/sdk/R/R/HttpParser.R
new file mode 100644 (file)
index 0000000..cd49216
--- /dev/null
@@ -0,0 +1,57 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+HttpParser <- R6::R6Class(
+
+    "HttrParser",
+
+    public = list(
+
+        validContentTypes = NULL,
+
+        initialize = function()
+        {
+            self$validContentTypes <- c("text", "raw")
+        },
+
+        parseJSONResponse = function(serverResponse)
+        {
+            parsed_response <- httr::content(serverResponse,
+                                             as = "parsed",
+                                             type = "application/json")
+        },
+
+        parseResponse = function(serverResponse, outputType)
+        {
+            parsed_response <- httr::content(serverResponse, as = outputType)
+        },
+
+        getFileNamesFromResponse = function(response, uri)
+        {
+            text <- rawToChar(response$content)
+            doc <- XML::xmlParse(text, asText=TRUE)
+            base <- paste(paste("/", strsplit(uri, "/")[[1]][-1:-3], sep="", collapse=""), "/", sep="")
+            result <- unlist(
+                XML::xpathApply(doc, "//D:response/D:href", function(node) {
+                    sub(base, "", URLdecode(XML::xmlValue(node)), fixed=TRUE)
+                })
+            )
+            result <- result[result != ""]
+            result[-1]
+        },
+
+        getFileSizesFromResponse = function(response, uri)
+        {
+            text <- rawToChar(response$content)
+            doc <- XML::xmlParse(text, asText=TRUE)
+
+            base <- paste(paste("/", strsplit(uri, "/")[[1]][-1:-3], sep="", collapse=""), "/", sep="")
+            result <- XML::xpathApply(doc, "//D:response/D:propstat/D:prop/D:getcontentlength", function(node) {
+              XML::xmlValue(node)
+            })
+
+            unlist(result)
+        }
+    )
+)
diff --git a/sdk/R/R/HttpRequest.R b/sdk/R/R/HttpRequest.R
new file mode 100644 (file)
index 0000000..07defca
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("./R/util.R")
+
+HttpRequest <- R6::R6Class(
+
+    "HttrRequest",
+
+    public = list(
+
+        validContentTypes = NULL,
+        validVerbs = NULL,
+
+        initialize = function()
+        {
+            self$validContentTypes <- c("text", "raw")
+            self$validVerbs <- c("GET", "POST", "PUT", "DELETE", "PROPFIND", "MOVE", "COPY")
+        },
+
+        exec = function(verb, url, headers = NULL, body = NULL, queryParams = NULL,
+                        retryTimes = 0)
+        {
+            if(!(verb %in% self$validVerbs))
+                stop("Http verb is not valid.")
+
+            urlQuery <- self$createQuery(queryParams)
+            url      <- paste0(url, urlQuery)
+
+            config <- httr::add_headers(unlist(headers))
+            if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE"))
+               config$options = list(ssl_verifypeer = 0L)
+
+            # times = 1 regular call + numberOfRetries
+            response <- httr::RETRY(verb, url = url, body = body,
+                                    config = config, times = retryTimes + 1)
+        },
+
+        createQuery = function(queryParams)
+        {
+            queryParams <- Filter(Negate(is.null), queryParams)
+
+            query <- sapply(queryParams, function(param)
+            {
+                if(is.list(param) || length(param) > 1)
+                    param <- RListToPythonList(param, ",")
+
+                URLencode(as.character(param), reserved = T, repeated = T)
+
+            }, USE.NAMES = TRUE)
+
+            if(length(query) > 0)
+            {
+                query <- paste0(names(query), "=", query, collapse = "&")
+
+                return(paste0("/?", query))
+            }
+
+            return("")
+        },
+
+        getConnection = function(url, headers, openMode)
+        {
+            h <- curl::new_handle()
+            curl::handle_setheaders(h, .list = headers)
+
+            if(toString(Sys.getenv("ARVADOS_API_HOST_INSECURE") == "TRUE"))
+               curl::handle_setopt(h, ssl_verifypeer = 0L)
+
+            conn <- curl::curl(url = url, open = openMode, handle = h)
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/R/RESTService.R b/sdk/R/R/RESTService.R
new file mode 100644 (file)
index 0000000..78b2c35
--- /dev/null
@@ -0,0 +1,241 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+RESTService <- R6::R6Class(
+
+    "RESTService",
+
+    public = list(
+
+        token      = NULL,
+        http       = NULL,
+        httpParser = NULL,
+        numRetries = NULL,
+
+        initialize = function(token, rawHost,
+                              http, httpParser,
+                              numRetries     = 0,
+                              webDavHostName = NULL)
+        {
+            self$token      <- token
+            self$http       <- http
+            self$httpParser <- httpParser
+            self$numRetries <- numRetries
+
+            private$rawHostName    <- rawHost
+            private$webDavHostName <- webDavHostName
+        },
+
+        setNumConnRetries = function(newNumOfRetries)
+        {
+            self$numRetries <- newNumOfRetries
+        },
+
+        getWebDavHostName = function()
+        {
+            if(is.null(private$webDavHostName))
+            {
+                discoveryDocumentURL <- paste0("https://", private$rawHostName,
+                                               "/discovery/v1/apis/arvados/v1/rest")
+
+                headers <- list(Authorization = paste("OAuth2", self$token))
+
+                serverResponse <- self$http$exec("GET", discoveryDocumentURL, headers,
+                                                 retryTimes = self$numRetries)
+
+                discoveryDocument <- self$httpParser$parseJSONResponse(serverResponse)
+                private$webDavHostName <- discoveryDocument$keepWebServiceUrl
+
+                if(is.null(private$webDavHostName))
+                    stop("Unable to find WebDAV server.")
+            }
+
+            private$webDavHostName
+        },
+
+        create = function(files, uuid)
+        {
+            sapply(files, function(filePath)
+            {
+                private$createNewFile(filePath, uuid, "text/html")
+            })
+        },
+
+        delete = function(relativePath, uuid)
+        {
+            fileURL <- paste0(self$getWebDavHostName(), "c=",
+                              uuid, "/", relativePath);
+            headers <- list(Authorization = paste("OAuth2", self$token))
+
+            serverResponse <- self$http$exec("DELETE", fileURL, headers,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            serverResponse
+        },
+
+        move = function(from, to, uuid)
+        {
+            collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/")
+            fromURL <- paste0(collectionURL, from)
+            toURL <- paste0(collectionURL, trimFromStart(to, "/"))
+
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                            "Destination" = toURL)
+
+            serverResponse <- self$http$exec("MOVE", fromURL, headers,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            serverResponse
+        },
+
+        copy = function(from, to, uuid)
+        {
+            collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/")
+            fromURL <- paste0(collectionURL, from)
+            toURL <- paste0(collectionURL, trimFromStart(to, "/"))
+
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                            "Destination" = toURL)
+
+            serverResponse <- self$http$exec("COPY", fromURL, headers,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            serverResponse
+        },
+
+        getCollectionContent = function(uuid)
+        {
+            collectionURL <- URLencode(paste0(self$getWebDavHostName(),
+                                              "c=", uuid))
+
+            headers <- list("Authorization" = paste("OAuth2", self$token))
+
+            response <- self$http$exec("PROPFIND", collectionURL, headers,
+                                       retryTimes = self$numRetries)
+
+            if(all(response == ""))
+                stop("Response is empty, request may be misconfigured")
+
+            if(response$status_code < 200 || response$status_code >= 300)
+                stop(paste("Server code:", response$status_code))
+
+            self$httpParser$getFileNamesFromResponse(response, collectionURL)
+        },
+
+        getResourceSize = function(relativePath, uuid)
+        {
+            collectionURL <- URLencode(paste0(self$getWebDavHostName(),
+                                              "c=", uuid))
+
+            subcollectionURL <- paste0(collectionURL, "/", relativePath);
+
+            headers <- list("Authorization" = paste("OAuth2", self$token))
+
+            response <- self$http$exec("PROPFIND", subcollectionURL, headers,
+                                       retryTimes = self$numRetries)
+
+            if(all(response == ""))
+                stop("Response is empty, request may be misconfigured")
+
+            if(response$status_code < 200 || response$status_code >= 300)
+                stop(paste("Server code:", response$status_code))
+
+            sizes <- self$httpParser$getFileSizesFromResponse(response,
+                                                              collectionURL)
+            as.numeric(sizes)
+        },
+
+        read = function(relativePath, uuid, contentType = "raw", offset = 0, length = 0)
+        {
+            fileURL <- paste0(self$getWebDavHostName(),
+                             "c=", uuid, "/", relativePath);
+
+            range <- paste0("bytes=", offset, "-")
+
+            if(length > 0)
+                range = paste0(range, offset + length - 1)
+
+            if(offset == 0 && length == 0)
+            {
+                headers <- list(Authorization = paste("OAuth2", self$token))
+            }
+            else
+            {
+                headers <- list(Authorization = paste("OAuth2", self$token),
+                                Range = range)
+            }
+
+            if(!(contentType %in% self$httpParser$validContentTypes))
+                stop("Invalid contentType. Please use text or raw.")
+
+            serverResponse <- self$http$exec("GET", fileURL, headers,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            self$httpParser$parseResponse(serverResponse, contentType)
+        },
+
+        write = function(relativePath, uuid, content, contentType)
+        {
+            fileURL <- paste0(self$getWebDavHostName(),
+                             "c=", uuid, "/", relativePath);
+            headers <- list(Authorization = paste("OAuth2", self$token),
+                            "Content-Type" = contentType)
+            body <- content
+
+            serverResponse <- self$http$exec("PUT", fileURL, headers, body,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            self$httpParser$parseResponse(serverResponse, "text")
+        },
+
+        getConnection = function(relativePath, uuid, openMode)
+        {
+            fileURL <- paste0(self$getWebDavHostName(),
+                              "c=", uuid, "/", relativePath);
+            headers <- list(Authorization = paste("OAuth2", self$token))
+
+            conn <- self$http$getConnection(fileURL, headers, openMode)
+        }
+    ),
+
+    private = list(
+
+        webDavHostName = NULL,
+        rawHostName    = NULL,
+
+        createNewFile = function(relativePath, uuid, contentType)
+        {
+            fileURL <- paste0(self$getWebDavHostName(), "c=",
+                              uuid, "/", relativePath)
+            headers <- list(Authorization = paste("OAuth2", self$token),
+                            "Content-Type" = contentType)
+            body <- NULL
+
+            serverResponse <- self$http$exec("PUT", fileURL, headers, body,
+                                             retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            paste("File created:", relativePath)
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/R/Subcollection.R b/sdk/R/R/Subcollection.R
new file mode 100644 (file)
index 0000000..17a9ef3
--- /dev/null
@@ -0,0 +1,377 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("./R/util.R")
+
+#' Subcollection
+#'
+#' Subcollection class represents a folder inside Arvados collection.
+#' It is essentially a composite of arvadosFiles and other subcollections.
+#'
+#' @section Usage:
+#' \preformatted{subcollection = Subcollection$new(name)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{name}{Name of the subcollection.}
+#' }
+#'
+#' @section Methods:
+#' \describe{
+#'   \item{getName()}{Returns name of the subcollection.}
+#'   \item{getRelativePath()}{Returns subcollection path relative to the root.}
+#'   \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the subcollection.}
+#'   \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+#'   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+#'   \item{getFileListing()}{Returns subcollections file content as character vector.}
+#'   \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
+#'   \item{move(destination)}{Moves subcollection to a new location inside collection.}
+#'   \item{copy(destination)}{Copies subcollection to a new location inside collection.}
+#' }
+#'
+#' @name Subcollection
+#' @examples
+#' \dontrun{
+#' myFolder <- Subcollection$new("myFolder")
+#' myFile   <- ArvadosFile$new("myFile")
+#'
+#' myFolder$add(myFile)
+#' myFolder$get("myFile")
+#' myFolder$remove("myFile")
+#'
+#' myFolder$move("newLocation/myFolder")
+#' myFolder$copy("newLocation/myFolder")
+#' }
+NULL
+
+#' @export
+Subcollection <- R6::R6Class(
+
+    "Subcollection",
+
+    public = list(
+
+        initialize = function(name)
+        {
+            private$name <- name
+        },
+
+        getName = function() private$name,
+
+        getRelativePath = function()
+        {
+            relativePath <- c(private$name)
+            parent <- private$parent
+
+            while(!is.null(parent))
+            {
+                relativePath <- c(parent$getName(), relativePath)
+                parent <- parent$getParent()
+            }
+
+            relativePath <- relativePath[relativePath != ""]
+            paste0(relativePath, collapse = "/")
+        },
+
+        add = function(content)
+        {
+            if("ArvadosFile"   %in% class(content) ||
+               "Subcollection" %in% class(content))
+            {
+                if(!is.null(content$getCollection()))
+                    stop("Content already belongs to a collection.")
+
+                if(content$getName() == "")
+                    stop("Content has invalid name.")
+
+                childWithSameName <- self$get(content$getName())
+
+                if(!is.null(childWithSameName))
+                    stop(paste("Subcollection already contains ArvadosFile",
+                               "or Subcollection with same name."))
+
+                if(!is.null(private$collection))
+                {
+                    if(self$getRelativePath() != "")
+                        contentPath <- paste0(self$getRelativePath(),
+                                              "/", content$getFileListing())
+                    else
+                        contentPath <- content$getFileListing()
+
+                    REST <- private$collection$getRESTService()
+                    REST$create(contentPath, private$collection$uuid)
+                    content$setCollection(private$collection)
+                }
+
+                private$children <- c(private$children, content)
+                content$setParent(self)
+
+                "Content added successfully."
+            }
+            else
+            {
+                stop(paste0("Expected AravodsFile or Subcollection object, got ",
+                            paste0("(", paste0(class(content), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        remove = function(name)
+        {
+            if(is.character(name))
+            {
+                child <- self$get(name)
+
+                if(is.null(child))
+                    stop(paste("Subcollection doesn't contains ArvadosFile",
+                               "or Subcollection with specified name."))
+
+                if(!is.null(private$collection))
+                {
+                    REST <- private$collection$getRESTService()
+                    REST$delete(child$getRelativePath(), private$collection$uuid)
+
+                    child$setCollection(NULL)
+                }
+
+                private$removeChild(name)
+                child$setParent(NULL)
+
+                "Content removed"
+            }
+            else
+            {
+                stop(paste0("Expected character, got ",
+                            paste0("(", paste0(class(name), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        getFileListing = function(fullPath = TRUE)
+        {
+            content <- private$getContentAsCharVector(fullPath)
+            content[order(tolower(content))]
+        },
+
+        getSizeInBytes = function()
+        {
+            if(is.null(private$collection))
+                return(0)
+
+            REST <- private$collection$getRESTService()
+
+            fileSizes <- REST$getResourceSize(paste0(self$getRelativePath(), "/"),
+                                              private$collection$uuid)
+            return(sum(fileSizes))
+        },
+
+        move = function(destination)
+        {
+            if(is.null(private$collection))
+                stop("Subcollection doesn't belong to any collection.")
+
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent))
+                stop("Unable to get destination subcollection.")
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$move(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            private$dettachFromCurrentParent()
+            private$attachToNewParent(self, newParent)
+
+            private$parent <- newParent
+            private$name <- nameAndPath$name
+
+            self
+        },
+
+        copy = function(destination)
+        {
+            if(is.null(private$collection))
+                stop("Subcollection doesn't belong to any collection.")
+
+            destination <- trimFromEnd(destination, "/")
+            nameAndPath <- splitToPathAndName(destination)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent) || !("Subcollection" %in% class(newParent)))
+                stop("Unable to get destination subcollection.")
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$copy(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            newContent <- self$duplicate(nameAndPath$name)
+            newContent$setCollection(self$getCollection(), setRecursively = TRUE)
+            newContent$setParent(newParent)
+            private$attachToNewParent(newContent, newParent)
+
+            newContent
+        },
+
+        duplicate = function(newName = NULL)
+        {
+            name <- if(!is.null(newName)) newName else private$name
+            root <- Subcollection$new(name)
+            for(child in private$children)
+                root$add(child$duplicate())
+
+            root
+        },
+
+        get = function(name)
+        {
+            for(child in private$children)
+            {
+                if(child$getName() == name)
+                    return(child)
+            }
+
+            return(NULL)
+        },
+
+        getFirst = function()
+        {
+            if(length(private$children) == 0)
+               return(NULL)
+
+            private$children[[1]]
+        },
+
+        setCollection = function(collection, setRecursively = TRUE)
+        {
+            private$collection = collection
+
+            if(setRecursively)
+            {
+                for(child in private$children)
+                    child$setCollection(collection)
+            }
+        },
+
+        getCollection = function() private$collection,
+
+        getParent = function() private$parent,
+
+        setParent = function(newParent) private$parent <- newParent
+    ),
+
+    private = list(
+
+        name       = NULL,
+        children   = NULL,
+        parent     = NULL,
+        collection = NULL,
+
+        removeChild = function(name)
+        {
+            numberOfChildren = length(private$children)
+            if(numberOfChildren > 0)
+            {
+                for(childIndex in 1:numberOfChildren)
+                {
+                    if(private$children[[childIndex]]$getName() == name)
+                    {
+                        private$children = private$children[-childIndex]
+                        return()
+                    }
+                }
+            }
+        },
+
+        attachToNewParent = function(content, newParent)
+        {
+            # We temporary set parents collection to NULL. This will ensure that
+            # add method doesn't post this subcollection to REST.
+            # We also need to set content's collection to NULL because
+            # add method throws exception if we try to add content that already
+            # belongs to a collection.
+            parentsCollection <- newParent$getCollection()
+            content$setCollection(NULL, setRecursively = FALSE)
+            newParent$setCollection(NULL, setRecursively = FALSE)
+            newParent$add(content)
+            content$setCollection(parentsCollection, setRecursively = FALSE)
+            newParent$setCollection(parentsCollection, setRecursively = FALSE)
+        },
+
+        dettachFromCurrentParent = function()
+        {
+            # We temporary set parents collection to NULL. This will ensure that
+            # remove method doesn't remove this subcollection from REST.
+            parent <- private$parent
+            parentsCollection <- parent$getCollection()
+            parent$setCollection(NULL, setRecursively = FALSE)
+            parent$remove(private$name)
+            parent$setCollection(parentsCollection, setRecursively = FALSE)
+        },
+
+        getContentAsCharVector = function(fullPath = TRUE)
+        {
+            content <- NULL
+
+            if(fullPath)
+            {
+                for(child in private$children)
+                    content <- c(content, child$getFileListing())
+
+                if(private$name != "")
+                    content <- unlist(paste0(private$name, "/", content))
+            }
+            else
+            {
+                for(child in private$children)
+                    content <- c(content, child$getName())
+            }
+
+            content
+        }
+    ),
+
+    cloneable = FALSE
+)
+
+#' print.Subcollection
+#'
+#' Custom print function for Subcollection class
+#'
+#' @param x Instance of Subcollection class
+#' @param ... Optional arguments.
+#' @export
+print.Subcollection = function(x, ...)
+{
+    collection   <- NULL
+    relativePath <- x$getRelativePath()
+
+    if(!is.null(x$getCollection()))
+    {
+        collection <- x$getCollection()$uuid
+
+        if(!x$getName() == "")
+            relativePath <- paste0("/", relativePath)
+    }
+
+    cat(paste0("Type:          ", "\"", "Arvados Subcollection", "\""), sep = "\n")
+    cat(paste0("Name:          ", "\"", x$getName(),             "\""), sep = "\n")
+    cat(paste0("Relative path: ", "\"", relativePath,            "\""), sep = "\n")
+    cat(paste0("Collection:    ", "\"", collection,              "\""), sep = "\n")
+}
diff --git a/sdk/R/R/autoGenAPI.R b/sdk/R/R/autoGenAPI.R
new file mode 100644 (file)
index 0000000..1aef20b
--- /dev/null
@@ -0,0 +1,575 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+getAPIDocument <- function(){
+    url <- "https://4xphq.arvadosapi.com/discovery/v1/apis/arvados/v1/rest"
+    serverResponse <- httr::RETRY("GET", url = url)
+
+    httr::content(serverResponse, as = "parsed", type = "application/json")
+}
+
+#' @export
+generateAPI <- function()
+{
+    #TODO: Consider passing discovery document URL as parameter.
+    #TODO: Consider passing location where to create new files.
+    discoveryDocument <- getAPIDocument()
+
+    methodResources <- discoveryDocument$resources
+    resourceNames   <- names(methodResources)
+
+    methodDoc <- genMethodsDoc(methodResources, resourceNames)
+    classDoc <- genAPIClassDoc(methodResources, resourceNames)
+    arvadosAPIHeader <- genAPIClassHeader()
+    arvadosProjectMethods <- genProjectMethods()
+    arvadosClassMethods <- genClassContent(methodResources, resourceNames)
+    arvadosAPIFooter <- genAPIClassFooter()
+
+    arvadosClass <- c(methodDoc,
+                      classDoc,
+                      arvadosAPIHeader,
+                      arvadosProjectMethods,
+                      arvadosClassMethods,
+                      arvadosAPIFooter)
+
+    fileConn <- file("./R/Arvados.R", "w")
+    writeLines(unlist(arvadosClass), fileConn)
+    close(fileConn)
+    NULL
+}
+
+genAPIClassHeader <- function()
+{
+    c("Arvados <- R6::R6Class(",
+      "",
+      "\t\"Arvados\",",
+      "",
+      "\tpublic = list(",
+      "",
+      "\t\tinitialize = function(authToken = NULL, hostName = NULL, numRetries = 0)",
+      "\t\t{",
+      "\t\t\tif(!is.null(hostName))",
+      "\t\t\t\tSys.setenv(ARVADOS_API_HOST = hostName)",
+      "",
+      "\t\t\tif(!is.null(authToken))",
+      "\t\t\t\tSys.setenv(ARVADOS_API_TOKEN = authToken)",
+      "",
+      "\t\t\thostName <- Sys.getenv(\"ARVADOS_API_HOST\")",
+      "\t\t\ttoken    <- Sys.getenv(\"ARVADOS_API_TOKEN\")",
+      "",
+      "\t\t\tif(hostName == \"\" | token == \"\")",
+      "\t\t\t\tstop(paste(\"Please provide host name and authentification token\",",
+      "\t\t\t\t\t\t   \"or set ARVADOS_API_HOST and ARVADOS_API_TOKEN\",",
+      "\t\t\t\t\t\t   \"environment variables.\"))",
+      "",
+      "\t\t\tprivate$token <- token",
+      "\t\t\tprivate$host  <- paste0(\"https://\", hostName, \"/arvados/v1/\")",
+      "\t\t\tprivate$numRetries <- numRetries",
+      "\t\t\tprivate$REST <- RESTService$new(token, hostName,",
+      "\t\t\t                                HttpRequest$new(), HttpParser$new(),",
+      "\t\t\t                                numRetries)",
+      "",
+      "\t\t},\n")
+}
+
+genProjectMethods <- function()
+{
+    c("\t\tprojects.get = function(uuid)",
+      "\t\t{",
+      "\t\t\tself$groups.get(uuid)",
+      "\t\t},",
+      "",
+      "\t\tprojects.create = function(group, ensure_unique_name = \"false\")",
+      "\t\t{",
+      "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
+      "\t\t\tself$groups.create(group, ensure_unique_name)",
+      "\t\t},",
+      "",
+      "\t\tprojects.update = function(group, uuid)",
+      "\t\t{",
+      "\t\t\tgroup <- c(\"group_class\" = \"project\", group)",
+      "\t\t\tself$groups.update(group, uuid)",
+      "\t\t},",
+      "",
+      "\t\tprojects.list = function(filters = NULL, where = NULL,",
+      "\t\t\torder = NULL, select = NULL, distinct = NULL,",
+      "\t\t\tlimit = \"100\", offset = \"0\", count = \"exact\",",
+      "\t\t\tinclude_trash = NULL)",
+      "\t\t{",
+      "\t\t\tfilters[[length(filters) + 1]] <- list(\"group_class\", \"=\", \"project\")",
+      "\t\t\tself$groups.list(filters, where, order, select, distinct,",
+      "\t\t\t                 limit, offset, count, include_trash)",
+      "\t\t},",
+      "",
+      "\t\tprojects.delete = function(uuid)",
+      "\t\t{",
+      "\t\t\tself$groups.delete(uuid)",
+      "\t\t},",
+      "")
+}
+
+genClassContent <- function(methodResources, resourceNames)
+{
+    arvadosMethods <- Map(function(resource, resourceName)
+    {
+        methodNames <- names(resource$methods)
+
+        functions <- Map(function(methodMetaData, methodName)
+        {
+            #NOTE: Index, show and destroy are aliases for the preferred names
+            # "list", "get" and "delete". Until they are removed from discovery
+            # document we will filter them here.
+            if(methodName %in% c("index", "show", "destroy"))
+               return(NULL)
+
+            methodName <- paste0(resourceName, ".", methodName)
+            createMethod(methodName, methodMetaData)
+
+        }, resource$methods, methodNames)
+
+        unlist(unname(functions))
+
+    }, methodResources, resourceNames)
+
+    arvadosMethods
+}
+
+genAPIClassFooter <- function()
+{
+    c("\t\tgetHostName = function() private$host,",
+      "\t\tgetToken = function() private$token,",
+      "\t\tsetRESTService = function(newREST) private$REST <- newREST,",
+      "\t\tgetRESTService = function() private$REST",
+      "\t),",
+      "",
+      "\tprivate = list(",
+      "",
+      "\t\ttoken = NULL,",
+      "\t\thost = NULL,",
+      "\t\tREST = NULL,",
+      "\t\tnumRetries = NULL",
+      "\t),",
+      "",
+      "\tcloneable = FALSE",
+      ")")
+}
+
+createMethod <- function(name, methodMetaData)
+{
+    args      <- getMethodArguments(methodMetaData)
+    signature <- getMethodSignature(name, args)
+    body      <- getMethodBody(methodMetaData)
+
+    c(signature,
+      "\t\t{",
+          body,
+      "\t\t},\n")
+}
+
+getMethodArguments <- function(methodMetaData)
+{
+    request <- methodMetaData$request
+    requestArgs <- NULL
+
+    if(!is.null(request))
+    {
+        resourceName <- tolower(request$properties[[1]][[1]])
+
+        if(request$required)
+            requestArgs <- resourceName
+        else
+            requestArgs <- paste(resourceName, "=", "NULL")
+    }
+
+    argNames <- names(methodMetaData$parameters)
+
+    args <- sapply(argNames, function(argName)
+    {
+        arg <- methodMetaData$parameters[[argName]]
+
+        if(!arg$required)
+        {
+            if(!is.null(arg$default))
+                return(paste0(argName, " = ", "\"", arg$default, "\""))
+            else
+                return(paste(argName, "=", "NULL"))
+        }
+
+        argName
+    })
+
+    c(requestArgs, args)
+}
+
+getMethodSignature <- function(methodName, args)
+{
+    collapsedArgs <- paste0(args, collapse = ", ")
+    lineLengthLimit <- 40
+
+    if(nchar(collapsedArgs) > lineLengthLimit)
+    {
+        return(paste0("\t\t",
+                      formatArgs(paste(methodName, "= function("),
+                                 "\t", args, ")", lineLengthLimit)))
+    }
+    else
+    {
+        return(paste0("\t\t", methodName, " = function(", collapsedArgs, ")"))
+    }
+}
+
+getMethodBody <- function(methodMetaData)
+{
+    url              <- getRequestURL(methodMetaData)
+    headers          <- getRequestHeaders()
+    requestQueryList <- getRequestQueryList(methodMetaData)
+    requestBody      <- getRequestBody(methodMetaData)
+    request          <- getRequest(methodMetaData)
+    response         <- getResponse(methodMetaData)
+    errorCheck       <- getErrorCheckingCode()
+    returnStatement  <- getReturnObject()
+
+    body <- c(url,
+              headers,
+              requestQueryList, "",
+              requestBody, "",
+              request, response, "",
+              errorCheck, "",
+              returnStatement)
+
+    paste0("\t\t\t", body)
+}
+
+getRequestURL <- function(methodMetaData)
+{
+    endPoint <- methodMetaData$path
+    endPoint <- stringr::str_replace_all(endPoint, "\\{", "${")
+    url <- c(paste0("endPoint <- stringr::str_interp(\"", endPoint, "\")"),
+             paste0("url <- paste0(private$host, endPoint)"))
+    url
+}
+
+getRequestHeaders <- function()
+{
+    c("headers <- list(Authorization = paste(\"OAuth2\", private$token), ",
+      "                \"Content-Type\" = \"application/json\")")
+}
+
+getRequestQueryList <- function(methodMetaData)
+{
+    queryArgs <- names(Filter(function(arg) arg$location == "query",
+                        methodMetaData$parameters))
+
+    if(length(queryArgs) == 0)
+        return("queryArgs <- NULL")
+
+    queryArgs <- sapply(queryArgs, function(arg) paste0(arg, " = ", arg))
+    collapsedArgs <- paste0(queryArgs, collapse = ", ")
+
+    lineLengthLimit <- 40
+
+    if(nchar(collapsedArgs) > lineLengthLimit)
+        return(formatArgs("queryArgs <- list(", "\t\t\t\t  ", queryArgs, ")",
+                          lineLengthLimit))
+    else
+        return(paste0("queryArgs <- list(", collapsedArgs, ")"))
+}
+
+getRequestBody <- function(methodMetaData)
+{
+    request <- methodMetaData$request
+
+    if(is.null(request) || !request$required)
+        return("body <- NULL")
+
+    resourceName <- tolower(request$properties[[1]][[1]])
+
+    requestParameterName <- names(request$properties)[1]
+
+    c(paste0("if(length(", resourceName, ") > 0)"),
+      paste0("\tbody <- jsonlite::toJSON(list(", resourceName, " = ", resourceName, "), "),
+             "\t                         auto_unbox = TRUE)",
+      "else",
+      "\tbody <- NULL")
+}
+
+getRequest <- function(methodMetaData)
+{
+    method <- methodMetaData$httpMethod
+    c(paste0("response <- private$REST$http$exec(\"", method, "\", url, headers, body,"),
+      "                                   queryArgs, private$numRetries)")
+}
+
+getResponse <- function(methodMetaData)
+{
+    "resource <- private$REST$httpParser$parseJSONResponse(response)"
+}
+
+getErrorCheckingCode <- function()
+{
+    c("if(!is.null(resource$errors))",
+      "\tstop(resource$errors)")
+}
+
+getReturnObject <- function()
+{
+    "resource"
+}
+
+#NOTE: Arvados class documentation:
+
+genMethodsDoc <- function(methodResources, resourceNames)
+{
+    methodsDoc <- unlist(unname(Map(function(resource, resourceName)
+    {
+        methodNames <- names(resource$methods)
+
+        methodDoc <- Map(function(methodMetaData, methodName)
+        {
+            #NOTE: Index, show and destroy are aliases for the preferred names
+            # "list", "get" and "delete". Until they are removed from discovery
+            # document we will filter them here.
+            if(methodName %in% c("index", "show", "destroy"))
+               return(NULL)
+
+            methodName <- paste0(resourceName, ".", methodName)
+            getMethodDoc(methodName, methodMetaData)
+
+        }, resource$methods, methodNames)
+
+        unlist(unname(methodDoc))
+
+    }, methodResources, resourceNames)))
+
+    projectDoc <- genProjectMethodsDoc()
+
+    c(methodsDoc, projectDoc)
+}
+
+genAPIClassDoc <- function(methodResources, resourceNames)
+{
+    c("#' Arvados",
+      "#'",
+      "#' Arvados class gives users ability to access Arvados REST API.",
+      "#'" ,
+      "#' @section Usage:",
+      "#' \\preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}",
+      "#'",
+      "#' @section Arguments:",
+      "#' \\describe{",
+      "#' \t\\item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}",
+      "#' \t\\item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}",
+      "#' \t\\item{numRetries}{Number which specifies how many times to retry failed service requests.}",
+      "#' }",
+      "#'",
+      "#' @section Methods:",
+      "#' \\describe{",
+      getAPIClassMethodList(methodResources, resourceNames),
+      "#' }",
+      "#'",
+      "#' @name Arvados",
+      "#' @examples",
+      "#' \\dontrun{",
+      "#' arv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")",
+      "#'",
+      "#' collection <- arv$collections.get(\"uuid\")",
+      "#'",
+      "#' collectionList <- arv$collections.list(list(list(\"name\", \"like\", \"Test%\")))",
+      "#' collectionList <- listAll(arv$collections.list, list(list(\"name\", \"like\", \"Test%\")))",
+      "#'",
+      "#' deletedCollection <- arv$collections.delete(\"uuid\")",
+      "#'",
+      "#' updatedCollection <- arv$collections.update(list(name = \"New name\", description = \"New description\"),",
+      "#'                                             \"uuid\")",
+      "#'",
+      "#' createdCollection <- arv$collections.create(list(name = \"Example\",",
+      "#'                                                  description = \"This is a test collection\"))",
+      "#' }",
+      "NULL",
+      "",
+      "#' @export")
+}
+
+getAPIClassMethodList <- function(methodResources, resourceNames)
+{
+    methodList <- unlist(unname(Map(function(resource, resourceName)
+    {
+        methodNames <- names(resource$methods)
+        paste0(resourceName,
+               ".",
+               methodNames[!(methodNames %in% c("index", "show", "destroy"))])
+
+    }, methodResources, resourceNames)))
+
+    hardcodedMethods <- c("projects.create", "projects.get",
+                          "projects.list", "projects.update", "projects.delete")
+    paste0("#' \t\\item{}{\\code{\\link{", sort(c(methodList, hardcodedMethods)), "}}}")
+}
+
+getMethodDoc <- function(methodName, methodMetaData)
+{
+    name        <- paste("#' @name", methodName)
+    usage       <- getMethodUsage(methodName, methodMetaData)
+    description <- paste("#'", methodName, "is a method defined in Arvados class.")
+    params      <- getMethodDescription(methodMetaData)
+    returnValue <- paste("#' @return", methodMetaData$response[["$ref"]], "object.")
+
+    c(paste("#'", methodName),
+      "#' ",
+      description,
+      "#' ",
+      usage,
+      params,
+      returnValue,
+      name,
+      "NULL",
+      "")
+}
+
+getMethodUsage <- function(methodName, methodMetaData)
+{
+    lineLengthLimit <- 40
+    args <- getMethodArguments(methodMetaData)
+    c(formatArgs(paste0("#' @usage arv$", methodName,
+                        "("), "#' \t", args, ")", lineLengthLimit))
+}
+
+getMethodDescription <- function(methodMetaData)
+{
+    request <- methodMetaData$request
+    requestDoc <- NULL
+
+    if(!is.null(request))
+    {
+        requestDoc <- unname(unlist(sapply(request$properties, function(prop)
+                             {
+                                 className <- sapply(prop, function(ref) ref)
+                                 objectName <- paste0(tolower(substr(className, 1, 1)),
+                                                      substr(className, 2, nchar(className)))
+                                 paste("#' @param", objectName, className, "object.")
+                             })))
+    }
+
+    argNames <- names(methodMetaData$parameters)
+
+    argsDoc <- unname(unlist(sapply(argNames, function(argName)
+    {
+        arg <- methodMetaData$parameters[[argName]]
+        argDescription <- arg$description
+        paste("#' @param", argName, argDescription)
+    })))
+
+    c(requestDoc, argsDoc)
+}
+
+genProjectMethodsDoc <- function()
+{
+    #TODO: Manually update this documentation to reflect changes in discovery document.
+    c("#' project.get",
+    "#' ",
+    "#' projects.get is equivalent to groups.get method.",
+    "#' ",
+    "#' @usage arv$projects.get(uuid)",
+    "#' @param uuid The UUID of the Group in question.",
+    "#' @return Group object.",
+    "#' @name projects.get",
+    "NULL",
+    "",
+    "#' project.create",
+    "#' ",
+    "#' projects.create wrapps groups.create method by setting group_class attribute to \"project\".",
+    "#' ",
+    "#' @usage arv$projects.create(group, ensure_unique_name = \"false\")",
+    "#' @param group Group object.",
+    "#' @param ensure_unique_name Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+    "#' @return Group object.",
+    "#' @name projects.create",
+    "NULL",
+    "",
+    "#' project.update",
+    "#' ",
+    "#' projects.update wrapps groups.update method by setting group_class attribute to \"project\".",
+    "#' ",
+    "#' @usage arv$projects.update(group, uuid)",
+    "#' @param group Group object.",
+    "#' @param uuid The UUID of the Group in question.",
+    "#' @return Group object.",
+    "#' @name projects.update",
+    "NULL",
+    "",
+    "#' project.delete",
+    "#' ",
+    "#' projects.delete is equivalent to groups.delete method.",
+    "#' ",
+    "#' @usage arv$project.delete(uuid)",
+    "#' @param uuid The UUID of the Group in question.",
+    "#' @return Group object.",
+    "#' @name projects.delete",
+    "NULL",
+    "",
+    "#' project.list",
+    "#' ",
+    "#' projects.list wrapps groups.list method by setting group_class attribute to \"project\".",
+    "#' ",
+    "#' @usage arv$projects.list(filters = NULL,",
+    "#'        where = NULL, order = NULL, distinct = NULL,",
+    "#'        limit = \"100\", offset = \"0\", count = \"exact\",",
+    "#'        include_trash = NULL, uuid = NULL, recursive = NULL)",
+    "#' @param filters ",
+    "#' @param where ",
+    "#' @param order ",
+    "#' @param distinct ",
+    "#' @param limit ",
+    "#' @param offset ",
+    "#' @param count ",
+    "#' @param include_trash Include items whose is_trashed attribute is true.",
+    "#' @param uuid ",
+    "#' @param recursive Include contents from child groups recursively.",
+    "#' @return Group object.",
+    "#' @name projects.list",
+    "NULL",
+    "")
+}
+
+#NOTE: Utility functions:
+
+# This function is used to split very long lines of code into smaller chunks.
+# This is usually the case when we pass a lot of named argumets to a function.
+formatArgs <- function(prependAtStart, prependToEachSplit,
+                       args, appendAtEnd, lineLength)
+{
+    if(length(args) > 1)
+    {
+        args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], ",")
+    }
+
+    args[1] <- paste0(prependAtStart, args[1])
+    args[length(args)] <- paste0(args[length(args)], appendAtEnd)
+
+    argsLength <- length(args)
+    argLines <- list()
+    index <- 1
+
+    while(index <= argsLength)
+    {
+        line <- args[index]
+        index <- index + 1
+
+        while(nchar(line) < lineLength && index <= argsLength)
+        {
+            line <- paste(line, args[index])
+            index <- index + 1
+        }
+
+        argLines <- c(argLines, line)
+    }
+
+    argLines <- unlist(argLines)
+    argLinesLen <- length(argLines)
+
+    if(argLinesLen > 1)
+        argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen])
+
+    argLines
+}
diff --git a/sdk/R/R/util.R b/sdk/R/R/util.R
new file mode 100644 (file)
index 0000000..f796cb7
--- /dev/null
@@ -0,0 +1,93 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+#' listAll
+#'
+#' List all resources even if the number of items is greater than maximum API limit.
+#'
+#' @param fn Arvados method used to retrieve items from REST service.
+#' @param ... Optional arguments which will be pased to fn .
+#' @examples
+#' \dontrun{
+#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+#' cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test%"))
+#' }
+#' @export 
+listAll <- function(fn, ...)
+{
+    offset <- 0
+    itemsAvailable <- .Machine$integer.max
+    items <- c()
+
+    while(length(items) < itemsAvailable)
+    {
+        serverResponse <- fn(offset = offset, ...)
+
+        if(!is.null(serverResponse$errors))
+            stop(serverResponse$errors)
+
+        items          <- c(items, serverResponse$items)
+        offset         <- length(items)
+        itemsAvailable <- serverResponse$items_available
+    }
+
+    items
+}
+
+
+#NOTE: Package private functions
+
+trimFromStart <- function(sample, trimCharacters)
+{
+    if(startsWith(sample, trimCharacters))
+        sample <- substr(sample, nchar(trimCharacters) + 1, nchar(sample))
+
+    sample
+}
+
+trimFromEnd <- function(sample, trimCharacters)
+{
+    if(endsWith(sample, trimCharacters))
+        sample <- substr(sample, 0, nchar(sample) - nchar(trimCharacters))
+
+    sample
+}
+
+RListToPythonList <- function(RList, separator = ", ")
+{
+    pythonArrayContent <- sapply(RList, function(elementInList)
+    {
+        if((is.vector(elementInList) || is.list(elementInList)) &&
+            length(elementInList) > 1)
+        {
+            return(RListToPythonList(elementInList, separator))
+        }
+        else
+        {
+            return(paste0("\"", elementInList, "\""))
+        }
+    })
+
+    pythonArray <- paste0("[", paste0(pythonArrayContent, collapse = separator), "]")
+    pythonArray
+}
+
+appendToStartIfNotExist <- function(sample, characters)
+{
+    if(!startsWith(sample, characters))
+        sample <- paste0(characters, sample)
+
+    sample
+}
+
+splitToPathAndName = function(path)
+{
+    path <- appendToStartIfNotExist(path, "/")
+    components <- unlist(stringr::str_split(path, "/"))
+    nameAndPath <- list()
+    nameAndPath$name <- components[length(components)]
+    nameAndPath$path <- trimFromStart(paste0(components[-length(components)], collapse = "/"),
+                                      "/")
+    nameAndPath
+}
diff --git a/sdk/R/R/zzz.R b/sdk/R/R/zzz.R
new file mode 100644 (file)
index 0000000..fa0cda4
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+.onLoad <- function(libName, pkgName)
+{
+    minAllowedRVersion <- "3.3.0"
+    currentRVersion <- getRversion()
+
+    if(currentRVersion < minAllowedRVersion)
+        print(paste0("Minimum R version required to run ", pkgName, " is ",
+                     minAllowedRVersion, ". Your current version is ",
+                     toString(currentRVersion), ". Please update R and try again."))
+}
diff --git a/sdk/R/README.Rmd b/sdk/R/README.Rmd
new file mode 100644 (file)
index 0000000..c1d6c7c
--- /dev/null
@@ -0,0 +1,335 @@
+[comment]: # (Copyright (c) The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
+
+## R SDK for Arvados
+
+This SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections.
+The API is not final and feedback is solicited from users on ways in which it could be improved.
+
+### Installation
+
+```{r include=FALSE}
+knitr::opts_chunk$set(eval=FALSE)
+```
+
+```{r}
+install.packages("ArvadosR", repos=c("http://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE)
+```
+
+Note: on Linux, you may have to install supporting packages.
+
+On Centos 7, this is:
+
+```{bash}
+yum install libxml2-devel openssl-devel curl-devel
+```
+
+On Debian, this is:
+
+```{bash}
+apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev
+```
+
+Minimum R version required to run ArvadosR is 3.3.0.
+
+
+### Usage
+
+#### Initializing API
+
+* Load Library and Initialize API:
+
+```{r}
+library('ArvadosR')
+# use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST
+arv <- Arvados$new()
+
+# provide them explicitly
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+```
+
+Optionally, add numRetries parameter to specify number of times to retry failed service requests.
+Default is 0.
+
+```{r}
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3)
+```
+
+This parameter can be set at any time using setNumRetries
+
+```{r}
+arv$setNumRetries(5)
+```
+
+
+#### Working with collections
+
+* Get a collection:
+
+```{r}
+collection <- arv$collections.get("uuid")
+```
+
+* List collections:
+
+```{r}
+# offset of 0 and default limit of 100
+collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
+
+collectionList <- arv$collections.list(list(list("name", "like", "Test%")), limit = 10, offset = 2)
+```
+
+```{r}
+# count of total number of items (may be more than returned due to paging)
+collectionList$items_available
+
+# items which match the filter criteria
+collectionList$items
+```
+
+* List all collections even if the number of items is greater than maximum API limit:
+
+```{r}
+collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test%")))
+```
+
+* Delete a collection:
+
+```{r}
+deletedCollection <- arv$collections.delete("uuid")
+```
+
+* Update a collection's metadata:
+
+```{r}
+updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"), "uuid")
+```
+
+* Create collection:
+
+```{r}
+newCollection <- arv$collections.create(list(name = "Example", description = "This is a test collection"))
+```
+
+
+#### Manipulating collection content
+
+* Create collection object:
+
+```{r}
+collection <- Collection$new(arv, "uuid")
+```
+
+* Get list of files:
+
+```{r}
+files <- collection$getFileListing()
+```
+
+* Get ArvadosFile or Subcollection from internal tree-like structure:
+
+```{r}
+arvadosFile <- collection$get("location/to/my/file.cpp")
+```
+
+or
+
+```{r}
+arvadosSubcollection <- collection$get("location/to/my/directory/")
+```
+
+* Read a table:
+
+```{r}
+arvadosFile   <- collection$get("myinput.txt")
+arvConnection <- arvadosFile$connection("r")
+mytable       <- read.table(arvConnection)
+```
+
+* Write a table:
+
+```{r}
+arvadosFile   <- collection$create("myoutput.txt")
+arvConnection <- arvadosFile$connection("w")
+write.table(mytable, arvConnection)
+arvadosFile$flush()
+```
+
+* Write to existing file (override current content of the file):
+
+```{r}
+arvadosFile <- collection$get("location/to/my/file.cpp")
+arvadosFile$write("This is new file content")
+```
+
+* Read whole file or just a portion of it:
+
+```{r}
+fileContent <- arvadosFile$read()
+fileContent <- arvadosFile$read("text")
+fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
+```
+
+* Get ArvadosFile or Subcollection size:
+
+```{r}
+size <- arvadosFile$getSizeInBytes()
+```
+
+or
+
+```{r}
+size <- arvadosSubcollection$getSizeInBytes()
+```
+
+* Create new file in a collection:
+
+```{r}
+collection$create(files)
+```
+
+Example:
+
+```{r}
+mainFile <- collection$create("cpp/src/main.cpp")
+fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h"))
+```
+
+* Delete file from a collection:
+
+```{r}
+collection$remove("location/to/my/file.cpp")
+```
+
+You can remove both Subcollection and ArvadosFile.
+If subcollection contains more files or folders they will be removed recursively.
+
+You can also remove multiple files at once:
+
+```{r}
+collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp"))
+```
+
+* Delete file or folder from a Subcollection:
+
+```{r}
+subcollection <- collection$get("mySubcollection/")
+subcollection$remove("fileInsideSubcollection.exe")
+subcollection$remove("folderInsideSubcollection/")
+```
+
+* Move or rename a file or folder within a collection (moving between collections is currently not supported):
+
+Directly from collection
+
+```{r}
+collection$move("folder/file.cpp", "file.cpp")
+```
+
+Or from file
+
+```{r}
+file <- collection$get("location/to/my/file.cpp")
+file$move("newDestination/file.cpp")
+```
+
+Or from subcollection
+
+```{r}
+subcollection <- collection$get("location/to/folder")
+subcollection$move("newDestination/folder")
+```
+
+Make sure to include new file name in destination.
+In second example file$move("newDestination/") will not work.
+
+* Copy file or folder within a collection (copying between collections is currently not supported):
+
+Directly from collection
+
+```{r}
+collection$copy("folder/file.cpp", "file.cpp")
+```
+
+Or from file
+
+```{r}
+file <- collection$get("location/to/my/file.cpp")
+file$copy("destination/file.cpp")
+```
+
+Or from subcollection
+
+```{r}
+subcollection <- collection$get("location/to/folder")
+subcollection$copy("destination/folder")
+```
+
+#### Working with Aravdos projects
+
+* Get a project:
+
+```{r}
+project <- arv$projects.get("uuid")
+```
+
+* List projects:
+
+```{r}
+list subprojects of a project
+projects <- arv$projects.list(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc")))
+
+list projects which have names beginning with Example
+examples <- arv$projects.list(list(list("name","like","Example%")))
+```
+
+* List all projects even if the number of items is greater than maximum API limit:
+
+```{r}
+projects <- listAll(arv$projects.list, list(list("name","like","Example%")))
+```
+
+* Delete a project:
+
+```{r}
+deletedProject <- arv$projects.delete("uuid")
+```
+
+* Update project:
+
+```{r}
+updatedProject <- arv$projects.update(list(name = "new_name", description = "new description"), "uuid")
+```
+
+* Create project:
+
+```{r}
+newProject <- arv$projects.update(list(name = "project_name", description = "project description"))
+```
+
+#### Help
+
+* View help page of Arvados classes by puting ? before class name:
+
+```{r}
+?Arvados
+?Collection
+?Subcollection
+?ArvadosFile
+```
+
+* View help page of any method defined in Arvados class by puting ? before method name:
+
+```{r}
+?collections.update
+?jobs.get
+```
+
+### Building the ArvadosR package
+
+```{bash}
+cd arvados/sdk && R CMD build R
+```
+
+This will create a tarball of the ArvadosR package in the current directory.
diff --git a/sdk/R/createDoc.R b/sdk/R/createDoc.R
new file mode 100644 (file)
index 0000000..5decab9
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+#Run script with $Rscript createDoc.R input.Rmd output.html
+
+require(knitr) # required for knitting from rmd to md
+require(markdown) # required for md to html
+
+args <- commandArgs(TRUE)
+
+if(length(args) != 2)
+    stop("Please provide 2 arguments corresponding to input and output file!")
+
+inputFile <- args[[1]] # .Rmd file
+outputFile <- args[[2]] # .html file
+
+# Create and fill temp .md file from existing .Rmd file
+#tempMdFile <- tempfile("tempREADME", fileext = "md")
+knitr::knit(inputFile, outputFile)
+#knitr::knit(inputFile, tempMdFile)
+
+# Generate HTML from temporary .md file
+#markdown::markdownToHTML(tempMdFile, outputFile)
diff --git a/sdk/R/install_deps.R b/sdk/R/install_deps.R
new file mode 100644 (file)
index 0000000..593129b
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+options(repos=structure(c(CRAN="http://cran.wustl.edu/")))
+if (!requireNamespace("devtools")) {
+  install.packages("devtools")
+}
+if (!requireNamespace("roxygen2")) {
+  install.packages("roxygen2")
+}
+if (!requireNamespace("knitr")) {
+  install.packages("knitr")
+}
+if (!requireNamespace("markdown")) {
+  install.packages("markdown")
+}
+
+devtools::install_dev_deps()
diff --git a/sdk/R/man/Arvados.Rd b/sdk/R/man/Arvados.Rd
new file mode 100644 (file)
index 0000000..51f98d8
--- /dev/null
@@ -0,0 +1,207 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{Arvados}
+\alias{Arvados}
+\title{Arvados}
+\description{
+Arvados class gives users ability to access Arvados REST API.
+}
+\section{Usage}{
+
+\preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
+}
+
+\section{Arguments}{
+
+\describe{
+       \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
+       \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
+       \item{numRetries}{Number which specifies how many times to retry failed service requests.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+       \item{}{\code{\link{api_client_authorizations.create}}}
+       \item{}{\code{\link{api_client_authorizations.create_system_auth}}}
+       \item{}{\code{\link{api_client_authorizations.current}}}
+       \item{}{\code{\link{api_client_authorizations.delete}}}
+       \item{}{\code{\link{api_client_authorizations.get}}}
+       \item{}{\code{\link{api_client_authorizations.list}}}
+       \item{}{\code{\link{api_client_authorizations.update}}}
+       \item{}{\code{\link{api_clients.create}}}
+       \item{}{\code{\link{api_clients.delete}}}
+       \item{}{\code{\link{api_clients.get}}}
+       \item{}{\code{\link{api_clients.list}}}
+       \item{}{\code{\link{api_clients.update}}}
+       \item{}{\code{\link{authorized_keys.create}}}
+       \item{}{\code{\link{authorized_keys.delete}}}
+       \item{}{\code{\link{authorized_keys.get}}}
+       \item{}{\code{\link{authorized_keys.list}}}
+       \item{}{\code{\link{authorized_keys.update}}}
+       \item{}{\code{\link{collections.create}}}
+       \item{}{\code{\link{collections.delete}}}
+       \item{}{\code{\link{collections.get}}}
+       \item{}{\code{\link{collections.list}}}
+       \item{}{\code{\link{collections.provenance}}}
+       \item{}{\code{\link{collections.trash}}}
+       \item{}{\code{\link{collections.untrash}}}
+       \item{}{\code{\link{collections.update}}}
+       \item{}{\code{\link{collections.used_by}}}
+       \item{}{\code{\link{container_requests.create}}}
+       \item{}{\code{\link{container_requests.delete}}}
+       \item{}{\code{\link{container_requests.get}}}
+       \item{}{\code{\link{container_requests.list}}}
+       \item{}{\code{\link{container_requests.update}}}
+       \item{}{\code{\link{containers.auth}}}
+       \item{}{\code{\link{containers.create}}}
+       \item{}{\code{\link{containers.current}}}
+       \item{}{\code{\link{containers.delete}}}
+       \item{}{\code{\link{containers.get}}}
+       \item{}{\code{\link{containers.list}}}
+       \item{}{\code{\link{containers.lock}}}
+       \item{}{\code{\link{containers.secret_mounts}}}
+       \item{}{\code{\link{containers.unlock}}}
+       \item{}{\code{\link{containers.update}}}
+       \item{}{\code{\link{groups.contents}}}
+       \item{}{\code{\link{groups.create}}}
+       \item{}{\code{\link{groups.delete}}}
+       \item{}{\code{\link{groups.get}}}
+       \item{}{\code{\link{groups.list}}}
+       \item{}{\code{\link{groups.trash}}}
+       \item{}{\code{\link{groups.untrash}}}
+       \item{}{\code{\link{groups.update}}}
+       \item{}{\code{\link{humans.create}}}
+       \item{}{\code{\link{humans.delete}}}
+       \item{}{\code{\link{humans.get}}}
+       \item{}{\code{\link{humans.list}}}
+       \item{}{\code{\link{humans.update}}}
+       \item{}{\code{\link{jobs.cancel}}}
+       \item{}{\code{\link{jobs.create}}}
+       \item{}{\code{\link{jobs.delete}}}
+       \item{}{\code{\link{jobs.get}}}
+       \item{}{\code{\link{jobs.list}}}
+       \item{}{\code{\link{jobs.lock}}}
+       \item{}{\code{\link{jobs.queue}}}
+       \item{}{\code{\link{jobs.queue_size}}}
+       \item{}{\code{\link{jobs.update}}}
+       \item{}{\code{\link{job_tasks.create}}}
+       \item{}{\code{\link{job_tasks.delete}}}
+       \item{}{\code{\link{job_tasks.get}}}
+       \item{}{\code{\link{job_tasks.list}}}
+       \item{}{\code{\link{job_tasks.update}}}
+       \item{}{\code{\link{keep_disks.create}}}
+       \item{}{\code{\link{keep_disks.delete}}}
+       \item{}{\code{\link{keep_disks.get}}}
+       \item{}{\code{\link{keep_disks.list}}}
+       \item{}{\code{\link{keep_disks.ping}}}
+       \item{}{\code{\link{keep_disks.update}}}
+       \item{}{\code{\link{keep_services.accessible}}}
+       \item{}{\code{\link{keep_services.create}}}
+       \item{}{\code{\link{keep_services.delete}}}
+       \item{}{\code{\link{keep_services.get}}}
+       \item{}{\code{\link{keep_services.list}}}
+       \item{}{\code{\link{keep_services.update}}}
+       \item{}{\code{\link{links.create}}}
+       \item{}{\code{\link{links.delete}}}
+       \item{}{\code{\link{links.get}}}
+       \item{}{\code{\link{links.get_permissions}}}
+       \item{}{\code{\link{links.list}}}
+       \item{}{\code{\link{links.update}}}
+       \item{}{\code{\link{logs.create}}}
+       \item{}{\code{\link{logs.delete}}}
+       \item{}{\code{\link{logs.get}}}
+       \item{}{\code{\link{logs.list}}}
+       \item{}{\code{\link{logs.update}}}
+       \item{}{\code{\link{nodes.create}}}
+       \item{}{\code{\link{nodes.delete}}}
+       \item{}{\code{\link{nodes.get}}}
+       \item{}{\code{\link{nodes.list}}}
+       \item{}{\code{\link{nodes.ping}}}
+       \item{}{\code{\link{nodes.update}}}
+       \item{}{\code{\link{pipeline_instances.cancel}}}
+       \item{}{\code{\link{pipeline_instances.create}}}
+       \item{}{\code{\link{pipeline_instances.delete}}}
+       \item{}{\code{\link{pipeline_instances.get}}}
+       \item{}{\code{\link{pipeline_instances.list}}}
+       \item{}{\code{\link{pipeline_instances.update}}}
+       \item{}{\code{\link{pipeline_templates.create}}}
+       \item{}{\code{\link{pipeline_templates.delete}}}
+       \item{}{\code{\link{pipeline_templates.get}}}
+       \item{}{\code{\link{pipeline_templates.list}}}
+       \item{}{\code{\link{pipeline_templates.update}}}
+       \item{}{\code{\link{projects.create}}}
+       \item{}{\code{\link{projects.delete}}}
+       \item{}{\code{\link{projects.get}}}
+       \item{}{\code{\link{projects.list}}}
+       \item{}{\code{\link{projects.update}}}
+       \item{}{\code{\link{repositories.create}}}
+       \item{}{\code{\link{repositories.delete}}}
+       \item{}{\code{\link{repositories.get}}}
+       \item{}{\code{\link{repositories.get_all_permissions}}}
+       \item{}{\code{\link{repositories.list}}}
+       \item{}{\code{\link{repositories.update}}}
+       \item{}{\code{\link{specimens.create}}}
+       \item{}{\code{\link{specimens.delete}}}
+       \item{}{\code{\link{specimens.get}}}
+       \item{}{\code{\link{specimens.list}}}
+       \item{}{\code{\link{specimens.update}}}
+       \item{}{\code{\link{traits.create}}}
+       \item{}{\code{\link{traits.delete}}}
+       \item{}{\code{\link{traits.get}}}
+       \item{}{\code{\link{traits.list}}}
+       \item{}{\code{\link{traits.update}}}
+       \item{}{\code{\link{user_agreements.create}}}
+       \item{}{\code{\link{user_agreements.delete}}}
+       \item{}{\code{\link{user_agreements.get}}}
+       \item{}{\code{\link{user_agreements.list}}}
+       \item{}{\code{\link{user_agreements.new}}}
+       \item{}{\code{\link{user_agreements.sign}}}
+       \item{}{\code{\link{user_agreements.signatures}}}
+       \item{}{\code{\link{user_agreements.update}}}
+       \item{}{\code{\link{users.activate}}}
+       \item{}{\code{\link{users.create}}}
+       \item{}{\code{\link{users.current}}}
+       \item{}{\code{\link{users.delete}}}
+       \item{}{\code{\link{users.get}}}
+       \item{}{\code{\link{users.list}}}
+       \item{}{\code{\link{users.merge}}}
+       \item{}{\code{\link{users.setup}}}
+       \item{}{\code{\link{users.system}}}
+       \item{}{\code{\link{users.unsetup}}}
+       \item{}{\code{\link{users.update}}}
+       \item{}{\code{\link{users.update_uuid}}}
+       \item{}{\code{\link{virtual_machines.create}}}
+       \item{}{\code{\link{virtual_machines.delete}}}
+       \item{}{\code{\link{virtual_machines.get}}}
+       \item{}{\code{\link{virtual_machines.get_all_logins}}}
+       \item{}{\code{\link{virtual_machines.list}}}
+       \item{}{\code{\link{virtual_machines.logins}}}
+       \item{}{\code{\link{virtual_machines.update}}}
+       \item{}{\code{\link{workflows.create}}}
+       \item{}{\code{\link{workflows.delete}}}
+       \item{}{\code{\link{workflows.get}}}
+       \item{}{\code{\link{workflows.list}}}
+       \item{}{\code{\link{workflows.update}}}
+}
+}
+
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+
+collection <- arv$collections.get("uuid")
+
+collectionList <- arv$collections.list(list(list("name", "like", "Test\%")))
+collectionList <- listAll(arv$collections.list, list(list("name", "like", "Test\%")))
+
+deletedCollection <- arv$collections.delete("uuid")
+
+updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"),
+                                            "uuid")
+
+createdCollection <- arv$collections.create(list(name = "Example",
+                                                 description = "This is a test collection"))
+}
+}
diff --git a/sdk/R/man/ArvadosFile.Rd b/sdk/R/man/ArvadosFile.Rd
new file mode 100644 (file)
index 0000000..514e9e8
--- /dev/null
@@ -0,0 +1,58 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosFile.R
+\name{ArvadosFile}
+\alias{ArvadosFile}
+\title{ArvadosFile}
+\description{
+ArvadosFile class represents a file inside Arvados collection.
+}
+\section{Usage}{
+
+\preformatted{file = ArvadosFile$new(name)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{name}{Name of the file.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{getName()}{Returns name of the file.}
+  \item{getRelativePath()}{Returns file path relative to the root.}
+  \item{read(contentType = "raw", offset = 0, length = 0)}{Read file content.}
+  \item{write(content, contentType = "text/html")}{Write to file (override current content of the file).}
+  \item{connection(rw)}{Get connection opened in "read" or "write" mode.}
+  \item{flush()}{Write connections content to a file (override current content of the file).}
+  \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+  \item{getSizeInBytes()}{Returns file size in bytes.}
+  \item{move(destination)}{Moves file to a new location inside collection.}
+  \item{copy(destination)}{Copies file to a new location inside collection.}
+}
+}
+
+\examples{
+\dontrun{
+myFile <- ArvadosFile$new("myFile")
+
+myFile$write("This is new file content")
+fileContent <- myFile$read()
+fileContent <- myFile$read("text")
+fileContent <- myFile$read("raw", offset = 8, length = 4)
+
+#Write a table:
+arvConnection <- myFile$connection("w")
+write.table(mytable, arvConnection)
+arvadosFile$flush()
+
+#Read a table:
+arvConnection <- myFile$connection("r")
+mytable <- read.table(arvConnection)
+
+myFile$move("newFolder/myFile")
+myFile$copy("newFolder/myFile")
+}
+}
diff --git a/sdk/R/man/Collection.Rd b/sdk/R/man/Collection.Rd
new file mode 100644 (file)
index 0000000..fbe6038
--- /dev/null
@@ -0,0 +1,49 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Collection.R
+\name{Collection}
+\alias{Collection}
+\title{Collection}
+\description{
+Collection class provides interface for working with Arvados collections.
+}
+\section{Usage}{
+
+\preformatted{collection = Collection$new(arv, uuid)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{arv}{Arvados object.}
+  \item{uuid}{UUID of a collection.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
+  \item{create(files)}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
+  \item{remove(fileNames)}{Remove one or more files from the collection.}
+  \item{move(content, destination)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+  \item{copy(content, destination)}{Copies ArvadosFile or Subcollection to another location in the collection.}
+  \item{getFileListing()}{Returns collections file content as character vector.}
+  \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+}
+}
+
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+collection <- Collection$new(arv, "uuid")
+
+createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+
+collection$remove("location/to/my/file.cpp")
+
+collection$move("folder/file.cpp", "file.cpp")
+
+arvadosFile <- collection$get("location/to/my/file.cpp")
+arvadosSubcollection <- collection$get("location/to/my/directory/")
+}
+}
diff --git a/sdk/R/man/Subcollection.Rd b/sdk/R/man/Subcollection.Rd
new file mode 100644 (file)
index 0000000..0b27a8b
--- /dev/null
@@ -0,0 +1,49 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Subcollection.R
+\name{Subcollection}
+\alias{Subcollection}
+\title{Subcollection}
+\description{
+Subcollection class represents a folder inside Arvados collection.
+It is essentially a composite of arvadosFiles and other subcollections.
+}
+\section{Usage}{
+
+\preformatted{subcollection = Subcollection$new(name)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{name}{Name of the subcollection.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{getName()}{Returns name of the subcollection.}
+  \item{getRelativePath()}{Returns subcollection path relative to the root.}
+  \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the subcollection.}
+  \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+  \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+  \item{getFileListing()}{Returns subcollections file content as character vector.}
+  \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
+  \item{move(destination)}{Moves subcollection to a new location inside collection.}
+  \item{copy(destination)}{Copies subcollection to a new location inside collection.}
+}
+}
+
+\examples{
+\dontrun{
+myFolder <- Subcollection$new("myFolder")
+myFile   <- ArvadosFile$new("myFile")
+
+myFolder$add(myFile)
+myFolder$get("myFile")
+myFolder$remove("myFile")
+
+myFolder$move("newLocation/myFolder")
+myFolder$copy("newLocation/myFolder")
+}
+}
diff --git a/sdk/R/man/api_client_authorizations.create.Rd b/sdk/R/man/api_client_authorizations.create.Rd
new file mode 100644 (file)
index 0000000..e322419
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.create}
+\alias{api_client_authorizations.create}
+\title{api_client_authorizations.create}
+\usage{
+arv$api_client_authorizations.create(apiclientauthorization,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{apiClientAuthorization}{ApiClientAuthorization object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.create_system_auth.Rd b/sdk/R/man/api_client_authorizations.create_system_auth.Rd
new file mode 100644 (file)
index 0000000..3eb172a
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.create_system_auth}
+\alias{api_client_authorizations.create_system_auth}
+\title{api_client_authorizations.create_system_auth}
+\usage{
+arv$api_client_authorizations.create_system_auth(api_client_id = NULL,
+       scopes = NULL)
+}
+\arguments{
+\item{api_client_id}{}
+
+\item{scopes}{}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.create_system_auth is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.current.Rd b/sdk/R/man/api_client_authorizations.current.Rd
new file mode 100644 (file)
index 0000000..c286237
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.current}
+\alias{api_client_authorizations.current}
+\title{api_client_authorizations.current}
+\usage{
+arv$api_client_authorizations.current(NULL)
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.current is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.delete.Rd b/sdk/R/man/api_client_authorizations.delete.Rd
new file mode 100644 (file)
index 0000000..054cc79
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.delete}
+\alias{api_client_authorizations.delete}
+\title{api_client_authorizations.delete}
+\usage{
+arv$api_client_authorizations.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClientAuthorization in question.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.get.Rd b/sdk/R/man/api_client_authorizations.get.Rd
new file mode 100644 (file)
index 0000000..3f5b630
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.get}
+\alias{api_client_authorizations.get}
+\title{api_client_authorizations.get}
+\usage{
+arv$api_client_authorizations.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClientAuthorization in question.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.list.Rd b/sdk/R/man/api_client_authorizations.list.Rd
new file mode 100644 (file)
index 0000000..7c8ae69
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.list}
+\alias{api_client_authorizations.list}
+\title{api_client_authorizations.list}
+\usage{
+arv$api_client_authorizations.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ApiClientAuthorizationList object.
+}
+\description{
+api_client_authorizations.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_client_authorizations.update.Rd b/sdk/R/man/api_client_authorizations.update.Rd
new file mode 100644 (file)
index 0000000..e6380cc
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_client_authorizations.update}
+\alias{api_client_authorizations.update}
+\title{api_client_authorizations.update}
+\usage{
+arv$api_client_authorizations.update(apiclientauthorization,
+       uuid)
+}
+\arguments{
+\item{apiClientAuthorization}{ApiClientAuthorization object.}
+
+\item{uuid}{The UUID of the ApiClientAuthorization in question.}
+}
+\value{
+ApiClientAuthorization object.
+}
+\description{
+api_client_authorizations.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.create.Rd b/sdk/R/man/api_clients.create.Rd
new file mode 100644 (file)
index 0000000..2601168
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.create}
+\alias{api_clients.create}
+\title{api_clients.create}
+\usage{
+arv$api_clients.create(apiclient,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{apiClient}{ApiClient object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.delete.Rd b/sdk/R/man/api_clients.delete.Rd
new file mode 100644 (file)
index 0000000..90eaa99
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.delete}
+\alias{api_clients.delete}
+\title{api_clients.delete}
+\usage{
+arv$api_clients.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClient in question.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.get.Rd b/sdk/R/man/api_clients.get.Rd
new file mode 100644 (file)
index 0000000..4a1b6c0
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.get}
+\alias{api_clients.get}
+\title{api_clients.get}
+\usage{
+arv$api_clients.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ApiClient in question.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.list.Rd b/sdk/R/man/api_clients.list.Rd
new file mode 100644 (file)
index 0000000..0679c9c
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.list}
+\alias{api_clients.list}
+\title{api_clients.list}
+\usage{
+arv$api_clients.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ApiClientList object.
+}
+\description{
+api_clients.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/api_clients.update.Rd b/sdk/R/man/api_clients.update.Rd
new file mode 100644 (file)
index 0000000..a37e533
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{api_clients.update}
+\alias{api_clients.update}
+\title{api_clients.update}
+\usage{
+arv$api_clients.update(apiclient,
+       uuid)
+}
+\arguments{
+\item{apiClient}{ApiClient object.}
+
+\item{uuid}{The UUID of the ApiClient in question.}
+}
+\value{
+ApiClient object.
+}
+\description{
+api_clients.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.create.Rd b/sdk/R/man/authorized_keys.create.Rd
new file mode 100644 (file)
index 0000000..e0d226a
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.create}
+\alias{authorized_keys.create}
+\title{authorized_keys.create}
+\usage{
+arv$authorized_keys.create(authorizedkey,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{authorizedKey}{AuthorizedKey object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.delete.Rd b/sdk/R/man/authorized_keys.delete.Rd
new file mode 100644 (file)
index 0000000..db1f0e7
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.delete}
+\alias{authorized_keys.delete}
+\title{authorized_keys.delete}
+\usage{
+arv$authorized_keys.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the AuthorizedKey in question.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.get.Rd b/sdk/R/man/authorized_keys.get.Rd
new file mode 100644 (file)
index 0000000..31a2dd3
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.get}
+\alias{authorized_keys.get}
+\title{authorized_keys.get}
+\usage{
+arv$authorized_keys.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the AuthorizedKey in question.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.list.Rd b/sdk/R/man/authorized_keys.list.Rd
new file mode 100644 (file)
index 0000000..cd19bc6
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.list}
+\alias{authorized_keys.list}
+\title{authorized_keys.list}
+\usage{
+arv$authorized_keys.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+AuthorizedKeyList object.
+}
+\description{
+authorized_keys.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/authorized_keys.update.Rd b/sdk/R/man/authorized_keys.update.Rd
new file mode 100644 (file)
index 0000000..65d93d0
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{authorized_keys.update}
+\alias{authorized_keys.update}
+\title{authorized_keys.update}
+\usage{
+arv$authorized_keys.update(authorizedkey,
+       uuid)
+}
+\arguments{
+\item{authorizedKey}{AuthorizedKey object.}
+
+\item{uuid}{The UUID of the AuthorizedKey in question.}
+}
+\value{
+AuthorizedKey object.
+}
+\description{
+authorized_keys.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.create.Rd b/sdk/R/man/collections.create.Rd
new file mode 100644 (file)
index 0000000..af8e398
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.create}
+\alias{collections.create}
+\title{collections.create}
+\usage{
+arv$collections.create(collection,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{collection}{Collection object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.delete.Rd b/sdk/R/man/collections.delete.Rd
new file mode 100644 (file)
index 0000000..28b3543
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.delete}
+\alias{collections.delete}
+\title{collections.delete}
+\usage{
+arv$collections.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Collection in question.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.get.Rd b/sdk/R/man/collections.get.Rd
new file mode 100644 (file)
index 0000000..3878aaf
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.get}
+\alias{collections.get}
+\title{collections.get}
+\usage{
+arv$collections.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Collection in question.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.list.Rd b/sdk/R/man/collections.list.Rd
new file mode 100644 (file)
index 0000000..87f6f78
--- /dev/null
@@ -0,0 +1,36 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.list}
+\alias{collections.list}
+\title{collections.list}
+\usage{
+arv$collections.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact", include_trash = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include collections whose is_trashed attribute is true.}
+}
+\value{
+CollectionList object.
+}
+\description{
+collections.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.provenance.Rd b/sdk/R/man/collections.provenance.Rd
new file mode 100644 (file)
index 0000000..001a7b4
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.provenance}
+\alias{collections.provenance}
+\title{collections.provenance}
+\usage{
+arv$collections.provenance(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.provenance is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.trash.Rd b/sdk/R/man/collections.trash.Rd
new file mode 100644 (file)
index 0000000..4862109
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.trash}
+\alias{collections.trash}
+\title{collections.trash}
+\usage{
+arv$collections.trash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.trash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.untrash.Rd b/sdk/R/man/collections.untrash.Rd
new file mode 100644 (file)
index 0000000..c41bc3d
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.untrash}
+\alias{collections.untrash}
+\title{collections.untrash}
+\usage{
+arv$collections.untrash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.untrash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.update.Rd b/sdk/R/man/collections.update.Rd
new file mode 100644 (file)
index 0000000..c9b201c
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.update}
+\alias{collections.update}
+\title{collections.update}
+\usage{
+arv$collections.update(collection,
+       uuid)
+}
+\arguments{
+\item{collection}{Collection object.}
+
+\item{uuid}{The UUID of the Collection in question.}
+}
+\value{
+Collection object.
+}
+\description{
+collections.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/collections.used_by.Rd b/sdk/R/man/collections.used_by.Rd
new file mode 100644 (file)
index 0000000..53b8e49
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{collections.used_by}
+\alias{collections.used_by}
+\title{collections.used_by}
+\usage{
+arv$collections.used_by(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Collection object.
+}
+\description{
+collections.used_by is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.create.Rd b/sdk/R/man/container_requests.create.Rd
new file mode 100644 (file)
index 0000000..e114d32
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.create}
+\alias{container_requests.create}
+\title{container_requests.create}
+\usage{
+arv$container_requests.create(containerrequest,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{containerRequest}{ContainerRequest object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.delete.Rd b/sdk/R/man/container_requests.delete.Rd
new file mode 100644 (file)
index 0000000..905739b
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.delete}
+\alias{container_requests.delete}
+\title{container_requests.delete}
+\usage{
+arv$container_requests.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ContainerRequest in question.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.get.Rd b/sdk/R/man/container_requests.get.Rd
new file mode 100644 (file)
index 0000000..54fe5d4
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.get}
+\alias{container_requests.get}
+\title{container_requests.get}
+\usage{
+arv$container_requests.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the ContainerRequest in question.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.list.Rd b/sdk/R/man/container_requests.list.Rd
new file mode 100644 (file)
index 0000000..9c2412b
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.list}
+\alias{container_requests.list}
+\title{container_requests.list}
+\usage{
+arv$container_requests.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ContainerRequestList object.
+}
+\description{
+container_requests.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/container_requests.update.Rd b/sdk/R/man/container_requests.update.Rd
new file mode 100644 (file)
index 0000000..063417b
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{container_requests.update}
+\alias{container_requests.update}
+\title{container_requests.update}
+\usage{
+arv$container_requests.update(containerrequest,
+       uuid)
+}
+\arguments{
+\item{containerRequest}{ContainerRequest object.}
+
+\item{uuid}{The UUID of the ContainerRequest in question.}
+}
+\value{
+ContainerRequest object.
+}
+\description{
+container_requests.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.auth.Rd b/sdk/R/man/containers.auth.Rd
new file mode 100644 (file)
index 0000000..a594d2f
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.auth}
+\alias{containers.auth}
+\title{containers.auth}
+\usage{
+arv$containers.auth(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.auth is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.create.Rd b/sdk/R/man/containers.create.Rd
new file mode 100644 (file)
index 0000000..4ce25bb
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.create}
+\alias{containers.create}
+\title{containers.create}
+\usage{
+arv$containers.create(container,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{container}{Container object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Container object.
+}
+\description{
+containers.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.current.Rd b/sdk/R/man/containers.current.Rd
new file mode 100644 (file)
index 0000000..0f6ad4e
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.current}
+\alias{containers.current}
+\title{containers.current}
+\usage{
+arv$containers.current(NULL)
+}
+\value{
+Container object.
+}
+\description{
+containers.current is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.delete.Rd b/sdk/R/man/containers.delete.Rd
new file mode 100644 (file)
index 0000000..e2e3cd7
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.delete}
+\alias{containers.delete}
+\title{containers.delete}
+\usage{
+arv$containers.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Container in question.}
+}
+\value{
+Container object.
+}
+\description{
+containers.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.get.Rd b/sdk/R/man/containers.get.Rd
new file mode 100644 (file)
index 0000000..05d97d3
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.get}
+\alias{containers.get}
+\title{containers.get}
+\usage{
+arv$containers.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Container in question.}
+}
+\value{
+Container object.
+}
+\description{
+containers.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.list.Rd b/sdk/R/man/containers.list.Rd
new file mode 100644 (file)
index 0000000..d445796
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.list}
+\alias{containers.list}
+\title{containers.list}
+\usage{
+arv$containers.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+ContainerList object.
+}
+\description{
+containers.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.lock.Rd b/sdk/R/man/containers.lock.Rd
new file mode 100644 (file)
index 0000000..72bcdf0
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.lock}
+\alias{containers.lock}
+\title{containers.lock}
+\usage{
+arv$containers.lock(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.lock is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.secret_mounts.Rd b/sdk/R/man/containers.secret_mounts.Rd
new file mode 100644 (file)
index 0000000..d0f8444
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.secret_mounts}
+\alias{containers.secret_mounts}
+\title{containers.secret_mounts}
+\usage{
+arv$containers.secret_mounts(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.secret_mounts is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.unlock.Rd b/sdk/R/man/containers.unlock.Rd
new file mode 100644 (file)
index 0000000..5c41f20
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.unlock}
+\alias{containers.unlock}
+\title{containers.unlock}
+\usage{
+arv$containers.unlock(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Container object.
+}
+\description{
+containers.unlock is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/containers.update.Rd b/sdk/R/man/containers.update.Rd
new file mode 100644 (file)
index 0000000..3a85726
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{containers.update}
+\alias{containers.update}
+\title{containers.update}
+\usage{
+arv$containers.update(container,
+       uuid)
+}
+\arguments{
+\item{container}{Container object.}
+
+\item{uuid}{The UUID of the Container in question.}
+}
+\value{
+Container object.
+}
+\description{
+containers.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.contents.Rd b/sdk/R/man/groups.contents.Rd
new file mode 100644 (file)
index 0000000..26647df
--- /dev/null
@@ -0,0 +1,38 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.contents}
+\alias{groups.contents}
+\title{groups.contents}
+\usage{
+arv$groups.contents(filters = NULL,
+       where = NULL, order = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact",
+       include_trash = NULL, uuid = NULL, recursive = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include items whose is_trashed attribute is true.}
+
+\item{uuid}{}
+
+\item{recursive}{Include contents from child groups recursively.}
+}
+\value{
+Group object.
+}
+\description{
+groups.contents is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.create.Rd b/sdk/R/man/groups.create.Rd
new file mode 100644 (file)
index 0000000..8719603
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.create}
+\alias{groups.create}
+\title{groups.create}
+\usage{
+arv$groups.create(group, ensure_unique_name = "false")
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Group object.
+}
+\description{
+groups.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.delete.Rd b/sdk/R/man/groups.delete.Rd
new file mode 100644 (file)
index 0000000..1b4a0d9
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.delete}
+\alias{groups.delete}
+\title{groups.delete}
+\usage{
+arv$groups.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+groups.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.get.Rd b/sdk/R/man/groups.get.Rd
new file mode 100644 (file)
index 0000000..28a1872
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.get}
+\alias{groups.get}
+\title{groups.get}
+\usage{
+arv$groups.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+groups.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.list.Rd b/sdk/R/man/groups.list.Rd
new file mode 100644 (file)
index 0000000..7699f3e
--- /dev/null
@@ -0,0 +1,36 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.list}
+\alias{groups.list}
+\title{groups.list}
+\usage{
+arv$groups.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact", include_trash = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include items whose is_trashed attribute is true.}
+}
+\value{
+GroupList object.
+}
+\description{
+groups.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.trash.Rd b/sdk/R/man/groups.trash.Rd
new file mode 100644 (file)
index 0000000..c529618
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.trash}
+\alias{groups.trash}
+\title{groups.trash}
+\usage{
+arv$groups.trash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Group object.
+}
+\description{
+groups.trash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.untrash.Rd b/sdk/R/man/groups.untrash.Rd
new file mode 100644 (file)
index 0000000..014190c
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.untrash}
+\alias{groups.untrash}
+\title{groups.untrash}
+\usage{
+arv$groups.untrash(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Group object.
+}
+\description{
+groups.untrash is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/groups.update.Rd b/sdk/R/man/groups.update.Rd
new file mode 100644 (file)
index 0000000..47abde7
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{groups.update}
+\alias{groups.update}
+\title{groups.update}
+\usage{
+arv$groups.update(group, uuid)
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+groups.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.create.Rd b/sdk/R/man/humans.create.Rd
new file mode 100644 (file)
index 0000000..44c9aa3
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.create}
+\alias{humans.create}
+\title{humans.create}
+\usage{
+arv$humans.create(human, ensure_unique_name = "false")
+}
+\arguments{
+\item{human}{Human object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Human object.
+}
+\description{
+humans.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.delete.Rd b/sdk/R/man/humans.delete.Rd
new file mode 100644 (file)
index 0000000..ae66b8e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.delete}
+\alias{humans.delete}
+\title{humans.delete}
+\usage{
+arv$humans.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Human in question.}
+}
+\value{
+Human object.
+}
+\description{
+humans.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.get.Rd b/sdk/R/man/humans.get.Rd
new file mode 100644 (file)
index 0000000..820c562
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.get}
+\alias{humans.get}
+\title{humans.get}
+\usage{
+arv$humans.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Human in question.}
+}
+\value{
+Human object.
+}
+\description{
+humans.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.list.Rd b/sdk/R/man/humans.list.Rd
new file mode 100644 (file)
index 0000000..a8db4c7
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.list}
+\alias{humans.list}
+\title{humans.list}
+\usage{
+arv$humans.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+HumanList object.
+}
+\description{
+humans.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/humans.update.Rd b/sdk/R/man/humans.update.Rd
new file mode 100644 (file)
index 0000000..83956a2
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{humans.update}
+\alias{humans.update}
+\title{humans.update}
+\usage{
+arv$humans.update(human, uuid)
+}
+\arguments{
+\item{human}{Human object.}
+
+\item{uuid}{The UUID of the Human in question.}
+}
+\value{
+Human object.
+}
+\description{
+humans.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.create.Rd b/sdk/R/man/job_tasks.create.Rd
new file mode 100644 (file)
index 0000000..2da0b0c
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.create}
+\alias{job_tasks.create}
+\title{job_tasks.create}
+\usage{
+arv$job_tasks.create(jobtask, ensure_unique_name = "false")
+}
+\arguments{
+\item{jobTask}{JobTask object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.delete.Rd b/sdk/R/man/job_tasks.delete.Rd
new file mode 100644 (file)
index 0000000..b78a38e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.delete}
+\alias{job_tasks.delete}
+\title{job_tasks.delete}
+\usage{
+arv$job_tasks.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the JobTask in question.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.get.Rd b/sdk/R/man/job_tasks.get.Rd
new file mode 100644 (file)
index 0000000..07d2054
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.get}
+\alias{job_tasks.get}
+\title{job_tasks.get}
+\usage{
+arv$job_tasks.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the JobTask in question.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.list.Rd b/sdk/R/man/job_tasks.list.Rd
new file mode 100644 (file)
index 0000000..51c4b49
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.list}
+\alias{job_tasks.list}
+\title{job_tasks.list}
+\usage{
+arv$job_tasks.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+JobTaskList object.
+}
+\description{
+job_tasks.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/job_tasks.update.Rd b/sdk/R/man/job_tasks.update.Rd
new file mode 100644 (file)
index 0000000..42d10bd
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{job_tasks.update}
+\alias{job_tasks.update}
+\title{job_tasks.update}
+\usage{
+arv$job_tasks.update(jobtask, uuid)
+}
+\arguments{
+\item{jobTask}{JobTask object.}
+
+\item{uuid}{The UUID of the JobTask in question.}
+}
+\value{
+JobTask object.
+}
+\description{
+job_tasks.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.cancel.Rd b/sdk/R/man/jobs.cancel.Rd
new file mode 100644 (file)
index 0000000..7399d28
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.cancel}
+\alias{jobs.cancel}
+\title{jobs.cancel}
+\usage{
+arv$jobs.cancel(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.cancel is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.create.Rd b/sdk/R/man/jobs.create.Rd
new file mode 100644 (file)
index 0000000..4c4d61a
--- /dev/null
@@ -0,0 +1,29 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.create}
+\alias{jobs.create}
+\title{jobs.create}
+\usage{
+arv$jobs.create(job, ensure_unique_name = "false",
+       find_or_create = "false", filters = NULL,
+       minimum_script_version = NULL, exclude_script_versions = NULL)
+}
+\arguments{
+\item{job}{Job object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+
+\item{find_or_create}{}
+
+\item{filters}{}
+
+\item{minimum_script_version}{}
+
+\item{exclude_script_versions}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.delete.Rd b/sdk/R/man/jobs.delete.Rd
new file mode 100644 (file)
index 0000000..7f75608
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.delete}
+\alias{jobs.delete}
+\title{jobs.delete}
+\usage{
+arv$jobs.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Job in question.}
+}
+\value{
+Job object.
+}
+\description{
+jobs.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.get.Rd b/sdk/R/man/jobs.get.Rd
new file mode 100644 (file)
index 0000000..072b613
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.get}
+\alias{jobs.get}
+\title{jobs.get}
+\usage{
+arv$jobs.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Job in question.}
+}
+\value{
+Job object.
+}
+\description{
+jobs.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.list.Rd b/sdk/R/man/jobs.list.Rd
new file mode 100644 (file)
index 0000000..53055f5
--- /dev/null
@@ -0,0 +1,33 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.list}
+\alias{jobs.list}
+\title{jobs.list}
+\usage{
+arv$jobs.list(filters = NULL, where = NULL,
+       order = NULL, select = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+JobList object.
+}
+\description{
+jobs.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.lock.Rd b/sdk/R/man/jobs.lock.Rd
new file mode 100644 (file)
index 0000000..3c2e232
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.lock}
+\alias{jobs.lock}
+\title{jobs.lock}
+\usage{
+arv$jobs.lock(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.lock is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.queue.Rd b/sdk/R/man/jobs.queue.Rd
new file mode 100644 (file)
index 0000000..a9deaa9
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.queue}
+\alias{jobs.queue}
+\title{jobs.queue}
+\usage{
+arv$jobs.queue(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+Job object.
+}
+\description{
+jobs.queue is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.queue_size.Rd b/sdk/R/man/jobs.queue_size.Rd
new file mode 100644 (file)
index 0000000..2185820
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.queue_size}
+\alias{jobs.queue_size}
+\title{jobs.queue_size}
+\usage{
+arv$jobs.queue_size(NULL)
+}
+\value{
+Job object.
+}
+\description{
+jobs.queue_size is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/jobs.update.Rd b/sdk/R/man/jobs.update.Rd
new file mode 100644 (file)
index 0000000..666d7fd
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{jobs.update}
+\alias{jobs.update}
+\title{jobs.update}
+\usage{
+arv$jobs.update(job, uuid)
+}
+\arguments{
+\item{job}{Job object.}
+
+\item{uuid}{The UUID of the Job in question.}
+}
+\value{
+Job object.
+}
+\description{
+jobs.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.create.Rd b/sdk/R/man/keep_disks.create.Rd
new file mode 100644 (file)
index 0000000..524c5b6
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.create}
+\alias{keep_disks.create}
+\title{keep_disks.create}
+\usage{
+arv$keep_disks.create(keepdisk,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{keepDisk}{KeepDisk object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.delete.Rd b/sdk/R/man/keep_disks.delete.Rd
new file mode 100644 (file)
index 0000000..80f39f3
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.delete}
+\alias{keep_disks.delete}
+\title{keep_disks.delete}
+\usage{
+arv$keep_disks.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepDisk in question.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.get.Rd b/sdk/R/man/keep_disks.get.Rd
new file mode 100644 (file)
index 0000000..1b511fe
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.get}
+\alias{keep_disks.get}
+\title{keep_disks.get}
+\usage{
+arv$keep_disks.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepDisk in question.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.list.Rd b/sdk/R/man/keep_disks.list.Rd
new file mode 100644 (file)
index 0000000..fdb599f
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.list}
+\alias{keep_disks.list}
+\title{keep_disks.list}
+\usage{
+arv$keep_disks.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+KeepDiskList object.
+}
+\description{
+keep_disks.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.ping.Rd b/sdk/R/man/keep_disks.ping.Rd
new file mode 100644 (file)
index 0000000..6ae5595
--- /dev/null
@@ -0,0 +1,31 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.ping}
+\alias{keep_disks.ping}
+\title{keep_disks.ping}
+\usage{
+arv$keep_disks.ping(uuid = NULL,
+       ping_secret, node_uuid = NULL, filesystem_uuid = NULL,
+       service_host = NULL, service_port, service_ssl_flag)
+}
+\arguments{
+\item{uuid}{}
+
+\item{ping_secret}{}
+
+\item{node_uuid}{}
+
+\item{filesystem_uuid}{}
+
+\item{service_host}{}
+
+\item{service_port}{}
+
+\item{service_ssl_flag}{}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.ping is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_disks.update.Rd b/sdk/R/man/keep_disks.update.Rd
new file mode 100644 (file)
index 0000000..1ca3363
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_disks.update}
+\alias{keep_disks.update}
+\title{keep_disks.update}
+\usage{
+arv$keep_disks.update(keepdisk,
+       uuid)
+}
+\arguments{
+\item{keepDisk}{KeepDisk object.}
+
+\item{uuid}{The UUID of the KeepDisk in question.}
+}
+\value{
+KeepDisk object.
+}
+\description{
+keep_disks.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.accessible.Rd b/sdk/R/man/keep_services.accessible.Rd
new file mode 100644 (file)
index 0000000..3caae2f
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.accessible}
+\alias{keep_services.accessible}
+\title{keep_services.accessible}
+\usage{
+arv$keep_services.accessible(NULL)
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.accessible is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.create.Rd b/sdk/R/man/keep_services.create.Rd
new file mode 100644 (file)
index 0000000..59c43ab
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.create}
+\alias{keep_services.create}
+\title{keep_services.create}
+\usage{
+arv$keep_services.create(keepservice,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{keepService}{KeepService object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.delete.Rd b/sdk/R/man/keep_services.delete.Rd
new file mode 100644 (file)
index 0000000..726771e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.delete}
+\alias{keep_services.delete}
+\title{keep_services.delete}
+\usage{
+arv$keep_services.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepService in question.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.get.Rd b/sdk/R/man/keep_services.get.Rd
new file mode 100644 (file)
index 0000000..065cf84
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.get}
+\alias{keep_services.get}
+\title{keep_services.get}
+\usage{
+arv$keep_services.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the KeepService in question.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.list.Rd b/sdk/R/man/keep_services.list.Rd
new file mode 100644 (file)
index 0000000..22aa3aa
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.list}
+\alias{keep_services.list}
+\title{keep_services.list}
+\usage{
+arv$keep_services.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+KeepServiceList object.
+}
+\description{
+keep_services.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/keep_services.update.Rd b/sdk/R/man/keep_services.update.Rd
new file mode 100644 (file)
index 0000000..2680a5c
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{keep_services.update}
+\alias{keep_services.update}
+\title{keep_services.update}
+\usage{
+arv$keep_services.update(keepservice,
+       uuid)
+}
+\arguments{
+\item{keepService}{KeepService object.}
+
+\item{uuid}{The UUID of the KeepService in question.}
+}
+\value{
+KeepService object.
+}
+\description{
+keep_services.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.create.Rd b/sdk/R/man/links.create.Rd
new file mode 100644 (file)
index 0000000..06b012e
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.create}
+\alias{links.create}
+\title{links.create}
+\usage{
+arv$links.create(link, ensure_unique_name = "false")
+}
+\arguments{
+\item{link}{Link object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Link object.
+}
+\description{
+links.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.delete.Rd b/sdk/R/man/links.delete.Rd
new file mode 100644 (file)
index 0000000..3a78b7f
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.delete}
+\alias{links.delete}
+\title{links.delete}
+\usage{
+arv$links.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Link in question.}
+}
+\value{
+Link object.
+}
+\description{
+links.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.get.Rd b/sdk/R/man/links.get.Rd
new file mode 100644 (file)
index 0000000..bf26271
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.get}
+\alias{links.get}
+\title{links.get}
+\usage{
+arv$links.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Link in question.}
+}
+\value{
+Link object.
+}
+\description{
+links.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.get_permissions.Rd b/sdk/R/man/links.get_permissions.Rd
new file mode 100644 (file)
index 0000000..982dbb9
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.get_permissions}
+\alias{links.get_permissions}
+\title{links.get_permissions}
+\usage{
+arv$links.get_permissions(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+Link object.
+}
+\description{
+links.get_permissions is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.list.Rd b/sdk/R/man/links.list.Rd
new file mode 100644 (file)
index 0000000..540fdc1
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.list}
+\alias{links.list}
+\title{links.list}
+\usage{
+arv$links.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+LinkList object.
+}
+\description{
+links.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/links.update.Rd b/sdk/R/man/links.update.Rd
new file mode 100644 (file)
index 0000000..398b6fd
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{links.update}
+\alias{links.update}
+\title{links.update}
+\usage{
+arv$links.update(link, uuid)
+}
+\arguments{
+\item{link}{Link object.}
+
+\item{uuid}{The UUID of the Link in question.}
+}
+\value{
+Link object.
+}
+\description{
+links.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/listAll.Rd b/sdk/R/man/listAll.Rd
new file mode 100644 (file)
index 0000000..2084b47
--- /dev/null
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/util.R
+\name{listAll}
+\alias{listAll}
+\title{listAll}
+\usage{
+listAll(fn, ...)
+}
+\arguments{
+\item{fn}{Arvados method used to retrieve items from REST service.}
+
+\item{...}{Optional arguments which will be pased to fn .}
+}
+\description{
+List all resources even if the number of items is greater than maximum API limit.
+}
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+cl <- listAll(arv$collections.list, filters = list(list("name", "like", "test\%"))
+}
+}
diff --git a/sdk/R/man/logs.create.Rd b/sdk/R/man/logs.create.Rd
new file mode 100644 (file)
index 0000000..a575e5f
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.create}
+\alias{logs.create}
+\title{logs.create}
+\usage{
+arv$logs.create(log, ensure_unique_name = "false")
+}
+\arguments{
+\item{log}{Log object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Log object.
+}
+\description{
+logs.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.delete.Rd b/sdk/R/man/logs.delete.Rd
new file mode 100644 (file)
index 0000000..63d6a0b
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.delete}
+\alias{logs.delete}
+\title{logs.delete}
+\usage{
+arv$logs.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Log in question.}
+}
+\value{
+Log object.
+}
+\description{
+logs.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.get.Rd b/sdk/R/man/logs.get.Rd
new file mode 100644 (file)
index 0000000..d3053d1
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.get}
+\alias{logs.get}
+\title{logs.get}
+\usage{
+arv$logs.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Log in question.}
+}
+\value{
+Log object.
+}
+\description{
+logs.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.list.Rd b/sdk/R/man/logs.list.Rd
new file mode 100644 (file)
index 0000000..58dbdb7
--- /dev/null
@@ -0,0 +1,33 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.list}
+\alias{logs.list}
+\title{logs.list}
+\usage{
+arv$logs.list(filters = NULL, where = NULL,
+       order = NULL, select = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+LogList object.
+}
+\description{
+logs.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/logs.update.Rd b/sdk/R/man/logs.update.Rd
new file mode 100644 (file)
index 0000000..efd670c
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{logs.update}
+\alias{logs.update}
+\title{logs.update}
+\usage{
+arv$logs.update(log, uuid)
+}
+\arguments{
+\item{log}{Log object.}
+
+\item{uuid}{The UUID of the Log in question.}
+}
+\value{
+Log object.
+}
+\description{
+logs.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.create.Rd b/sdk/R/man/nodes.create.Rd
new file mode 100644 (file)
index 0000000..eb73e69
--- /dev/null
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.create}
+\alias{nodes.create}
+\title{nodes.create}
+\usage{
+arv$nodes.create(node, ensure_unique_name = "false",
+       assign_slot = NULL)
+}
+\arguments{
+\item{node}{Node object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+
+\item{assign_slot}{assign slot and hostname}
+}
+\value{
+Node object.
+}
+\description{
+nodes.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.delete.Rd b/sdk/R/man/nodes.delete.Rd
new file mode 100644 (file)
index 0000000..0591ded
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.delete}
+\alias{nodes.delete}
+\title{nodes.delete}
+\usage{
+arv$nodes.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Node in question.}
+}
+\value{
+Node object.
+}
+\description{
+nodes.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.get.Rd b/sdk/R/man/nodes.get.Rd
new file mode 100644 (file)
index 0000000..dcd7b12
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.get}
+\alias{nodes.get}
+\title{nodes.get}
+\usage{
+arv$nodes.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Node in question.}
+}
+\value{
+Node object.
+}
+\description{
+nodes.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.list.Rd b/sdk/R/man/nodes.list.Rd
new file mode 100644 (file)
index 0000000..7ccfad6
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.list}
+\alias{nodes.list}
+\title{nodes.list}
+\usage{
+arv$nodes.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+NodeList object.
+}
+\description{
+nodes.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.ping.Rd b/sdk/R/man/nodes.ping.Rd
new file mode 100644 (file)
index 0000000..e77d2b5
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.ping}
+\alias{nodes.ping}
+\title{nodes.ping}
+\usage{
+arv$nodes.ping(uuid, ping_secret)
+}
+\arguments{
+\item{uuid}{}
+
+\item{ping_secret}{}
+}
+\value{
+Node object.
+}
+\description{
+nodes.ping is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/nodes.update.Rd b/sdk/R/man/nodes.update.Rd
new file mode 100644 (file)
index 0000000..f87245f
--- /dev/null
@@ -0,0 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{nodes.update}
+\alias{nodes.update}
+\title{nodes.update}
+\usage{
+arv$nodes.update(node, uuid, assign_slot = NULL)
+}
+\arguments{
+\item{node}{Node object.}
+
+\item{uuid}{The UUID of the Node in question.}
+
+\item{assign_slot}{assign slot and hostname}
+}
+\value{
+Node object.
+}
+\description{
+nodes.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.cancel.Rd b/sdk/R/man/pipeline_instances.cancel.Rd
new file mode 100644 (file)
index 0000000..026de81
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.cancel}
+\alias{pipeline_instances.cancel}
+\title{pipeline_instances.cancel}
+\usage{
+arv$pipeline_instances.cancel(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.cancel is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.create.Rd b/sdk/R/man/pipeline_instances.create.Rd
new file mode 100644 (file)
index 0000000..9ee5586
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.create}
+\alias{pipeline_instances.create}
+\title{pipeline_instances.create}
+\usage{
+arv$pipeline_instances.create(pipelineinstance,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{pipelineInstance}{PipelineInstance object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.delete.Rd b/sdk/R/man/pipeline_instances.delete.Rd
new file mode 100644 (file)
index 0000000..7297da5
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.delete}
+\alias{pipeline_instances.delete}
+\title{pipeline_instances.delete}
+\usage{
+arv$pipeline_instances.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineInstance in question.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.get.Rd b/sdk/R/man/pipeline_instances.get.Rd
new file mode 100644 (file)
index 0000000..e500df5
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.get}
+\alias{pipeline_instances.get}
+\title{pipeline_instances.get}
+\usage{
+arv$pipeline_instances.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineInstance in question.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.list.Rd b/sdk/R/man/pipeline_instances.list.Rd
new file mode 100644 (file)
index 0000000..407f944
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.list}
+\alias{pipeline_instances.list}
+\title{pipeline_instances.list}
+\usage{
+arv$pipeline_instances.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+PipelineInstanceList object.
+}
+\description{
+pipeline_instances.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_instances.update.Rd b/sdk/R/man/pipeline_instances.update.Rd
new file mode 100644 (file)
index 0000000..4a66660
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_instances.update}
+\alias{pipeline_instances.update}
+\title{pipeline_instances.update}
+\usage{
+arv$pipeline_instances.update(pipelineinstance,
+       uuid)
+}
+\arguments{
+\item{pipelineInstance}{PipelineInstance object.}
+
+\item{uuid}{The UUID of the PipelineInstance in question.}
+}
+\value{
+PipelineInstance object.
+}
+\description{
+pipeline_instances.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.create.Rd b/sdk/R/man/pipeline_templates.create.Rd
new file mode 100644 (file)
index 0000000..afb1e58
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.create}
+\alias{pipeline_templates.create}
+\title{pipeline_templates.create}
+\usage{
+arv$pipeline_templates.create(pipelinetemplate,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{pipelineTemplate}{PipelineTemplate object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.delete.Rd b/sdk/R/man/pipeline_templates.delete.Rd
new file mode 100644 (file)
index 0000000..c74d88b
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.delete}
+\alias{pipeline_templates.delete}
+\title{pipeline_templates.delete}
+\usage{
+arv$pipeline_templates.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineTemplate in question.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.get.Rd b/sdk/R/man/pipeline_templates.get.Rd
new file mode 100644 (file)
index 0000000..48ef739
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.get}
+\alias{pipeline_templates.get}
+\title{pipeline_templates.get}
+\usage{
+arv$pipeline_templates.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the PipelineTemplate in question.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.list.Rd b/sdk/R/man/pipeline_templates.list.Rd
new file mode 100644 (file)
index 0000000..c6c7413
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.list}
+\alias{pipeline_templates.list}
+\title{pipeline_templates.list}
+\usage{
+arv$pipeline_templates.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+PipelineTemplateList object.
+}
+\description{
+pipeline_templates.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/pipeline_templates.update.Rd b/sdk/R/man/pipeline_templates.update.Rd
new file mode 100644 (file)
index 0000000..25e02bf
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{pipeline_templates.update}
+\alias{pipeline_templates.update}
+\title{pipeline_templates.update}
+\usage{
+arv$pipeline_templates.update(pipelinetemplate,
+       uuid)
+}
+\arguments{
+\item{pipelineTemplate}{PipelineTemplate object.}
+
+\item{uuid}{The UUID of the PipelineTemplate in question.}
+}
+\value{
+PipelineTemplate object.
+}
+\description{
+pipeline_templates.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/print.ArvadosFile.Rd b/sdk/R/man/print.ArvadosFile.Rd
new file mode 100644 (file)
index 0000000..566ec8b
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosFile.R
+\name{print.ArvadosFile}
+\alias{print.ArvadosFile}
+\title{print.ArvadosFile}
+\usage{
+\method{print}{ArvadosFile}(x, ...)
+}
+\arguments{
+\item{x}{Instance of ArvadosFile class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for ArvadosFile class
+}
diff --git a/sdk/R/man/print.Collection.Rd b/sdk/R/man/print.Collection.Rd
new file mode 100644 (file)
index 0000000..8852383
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Collection.R
+\name{print.Collection}
+\alias{print.Collection}
+\title{print.Collection}
+\usage{
+\method{print}{Collection}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Collection class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Collection class
+}
diff --git a/sdk/R/man/print.Subcollection.Rd b/sdk/R/man/print.Subcollection.Rd
new file mode 100644 (file)
index 0000000..621350f
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Subcollection.R
+\name{print.Subcollection}
+\alias{print.Subcollection}
+\title{print.Subcollection}
+\usage{
+\method{print}{Subcollection}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Subcollection class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Subcollection class
+}
diff --git a/sdk/R/man/projects.create.Rd b/sdk/R/man/projects.create.Rd
new file mode 100644 (file)
index 0000000..66b1f2a
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.create}
+\alias{projects.create}
+\title{project.create}
+\usage{
+arv$projects.create(group, ensure_unique_name = "false")
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Group object.
+}
+\description{
+projects.create wrapps groups.create method by setting group_class attribute to "project".
+}
diff --git a/sdk/R/man/projects.delete.Rd b/sdk/R/man/projects.delete.Rd
new file mode 100644 (file)
index 0000000..7170792
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.delete}
+\alias{projects.delete}
+\title{project.delete}
+\usage{
+arv$project.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+projects.delete is equivalent to groups.delete method.
+}
diff --git a/sdk/R/man/projects.get.Rd b/sdk/R/man/projects.get.Rd
new file mode 100644 (file)
index 0000000..1939378
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.get}
+\alias{projects.get}
+\title{project.get}
+\usage{
+arv$projects.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+projects.get is equivalent to groups.get method.
+}
diff --git a/sdk/R/man/projects.list.Rd b/sdk/R/man/projects.list.Rd
new file mode 100644 (file)
index 0000000..ff4c1c9
--- /dev/null
@@ -0,0 +1,38 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.list}
+\alias{projects.list}
+\title{project.list}
+\usage{
+arv$projects.list(filters = NULL,
+       where = NULL, order = NULL, distinct = NULL,
+       limit = "100", offset = "0", count = "exact",
+       include_trash = NULL, uuid = NULL, recursive = NULL)
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+
+\item{include_trash}{Include items whose is_trashed attribute is true.}
+
+\item{uuid}{}
+
+\item{recursive}{Include contents from child groups recursively.}
+}
+\value{
+Group object.
+}
+\description{
+projects.list wrapps groups.list method by setting group_class attribute to "project".
+}
diff --git a/sdk/R/man/projects.update.Rd b/sdk/R/man/projects.update.Rd
new file mode 100644 (file)
index 0000000..824c5b5
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{projects.update}
+\alias{projects.update}
+\title{project.update}
+\usage{
+arv$projects.update(group, uuid)
+}
+\arguments{
+\item{group}{Group object.}
+
+\item{uuid}{The UUID of the Group in question.}
+}
+\value{
+Group object.
+}
+\description{
+projects.update wrapps groups.update method by setting group_class attribute to "project".
+}
diff --git a/sdk/R/man/repositories.create.Rd b/sdk/R/man/repositories.create.Rd
new file mode 100644 (file)
index 0000000..1603604
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.create}
+\alias{repositories.create}
+\title{repositories.create}
+\usage{
+arv$repositories.create(repository,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{repository}{Repository object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.delete.Rd b/sdk/R/man/repositories.delete.Rd
new file mode 100644 (file)
index 0000000..36fac73
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.delete}
+\alias{repositories.delete}
+\title{repositories.delete}
+\usage{
+arv$repositories.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Repository in question.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.get.Rd b/sdk/R/man/repositories.get.Rd
new file mode 100644 (file)
index 0000000..b855b76
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.get}
+\alias{repositories.get}
+\title{repositories.get}
+\usage{
+arv$repositories.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Repository in question.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.get_all_permissions.Rd b/sdk/R/man/repositories.get_all_permissions.Rd
new file mode 100644 (file)
index 0000000..f16dbd1
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.get_all_permissions}
+\alias{repositories.get_all_permissions}
+\title{repositories.get_all_permissions}
+\usage{
+arv$repositories.get_all_permissions(NULL)
+}
+\value{
+Repository object.
+}
+\description{
+repositories.get_all_permissions is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.list.Rd b/sdk/R/man/repositories.list.Rd
new file mode 100644 (file)
index 0000000..d1f4772
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.list}
+\alias{repositories.list}
+\title{repositories.list}
+\usage{
+arv$repositories.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+RepositoryList object.
+}
+\description{
+repositories.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/repositories.update.Rd b/sdk/R/man/repositories.update.Rd
new file mode 100644 (file)
index 0000000..1be4b61
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{repositories.update}
+\alias{repositories.update}
+\title{repositories.update}
+\usage{
+arv$repositories.update(repository,
+       uuid)
+}
+\arguments{
+\item{repository}{Repository object.}
+
+\item{uuid}{The UUID of the Repository in question.}
+}
+\value{
+Repository object.
+}
+\description{
+repositories.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.create.Rd b/sdk/R/man/specimens.create.Rd
new file mode 100644 (file)
index 0000000..12344f2
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.create}
+\alias{specimens.create}
+\title{specimens.create}
+\usage{
+arv$specimens.create(specimen,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{specimen}{Specimen object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.delete.Rd b/sdk/R/man/specimens.delete.Rd
new file mode 100644 (file)
index 0000000..8ed2d39
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.delete}
+\alias{specimens.delete}
+\title{specimens.delete}
+\usage{
+arv$specimens.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Specimen in question.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.get.Rd b/sdk/R/man/specimens.get.Rd
new file mode 100644 (file)
index 0000000..e757056
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.get}
+\alias{specimens.get}
+\title{specimens.get}
+\usage{
+arv$specimens.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Specimen in question.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.list.Rd b/sdk/R/man/specimens.list.Rd
new file mode 100644 (file)
index 0000000..4e07f4a
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.list}
+\alias{specimens.list}
+\title{specimens.list}
+\usage{
+arv$specimens.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+SpecimenList object.
+}
+\description{
+specimens.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/specimens.update.Rd b/sdk/R/man/specimens.update.Rd
new file mode 100644 (file)
index 0000000..73a9010
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{specimens.update}
+\alias{specimens.update}
+\title{specimens.update}
+\usage{
+arv$specimens.update(specimen,
+       uuid)
+}
+\arguments{
+\item{specimen}{Specimen object.}
+
+\item{uuid}{The UUID of the Specimen in question.}
+}
+\value{
+Specimen object.
+}
+\description{
+specimens.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.create.Rd b/sdk/R/man/traits.create.Rd
new file mode 100644 (file)
index 0000000..bf6e0c1
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.create}
+\alias{traits.create}
+\title{traits.create}
+\usage{
+arv$traits.create(trait, ensure_unique_name = "false")
+}
+\arguments{
+\item{trait}{Trait object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.delete.Rd b/sdk/R/man/traits.delete.Rd
new file mode 100644 (file)
index 0000000..9ab9570
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.delete}
+\alias{traits.delete}
+\title{traits.delete}
+\usage{
+arv$traits.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Trait in question.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.get.Rd b/sdk/R/man/traits.get.Rd
new file mode 100644 (file)
index 0000000..7d2bac5
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.get}
+\alias{traits.get}
+\title{traits.get}
+\usage{
+arv$traits.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Trait in question.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.list.Rd b/sdk/R/man/traits.list.Rd
new file mode 100644 (file)
index 0000000..e91b929
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.list}
+\alias{traits.list}
+\title{traits.list}
+\usage{
+arv$traits.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+TraitList object.
+}
+\description{
+traits.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/traits.update.Rd b/sdk/R/man/traits.update.Rd
new file mode 100644 (file)
index 0000000..f594434
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{traits.update}
+\alias{traits.update}
+\title{traits.update}
+\usage{
+arv$traits.update(trait, uuid)
+}
+\arguments{
+\item{trait}{Trait object.}
+
+\item{uuid}{The UUID of the Trait in question.}
+}
+\value{
+Trait object.
+}
+\description{
+traits.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.create.Rd b/sdk/R/man/user_agreements.create.Rd
new file mode 100644 (file)
index 0000000..7991305
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.create}
+\alias{user_agreements.create}
+\title{user_agreements.create}
+\usage{
+arv$user_agreements.create(useragreement,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{userAgreement}{UserAgreement object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.delete.Rd b/sdk/R/man/user_agreements.delete.Rd
new file mode 100644 (file)
index 0000000..30c9bf8
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.delete}
+\alias{user_agreements.delete}
+\title{user_agreements.delete}
+\usage{
+arv$user_agreements.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the UserAgreement in question.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.get.Rd b/sdk/R/man/user_agreements.get.Rd
new file mode 100644 (file)
index 0000000..6311605
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.get}
+\alias{user_agreements.get}
+\title{user_agreements.get}
+\usage{
+arv$user_agreements.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the UserAgreement in question.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.list.Rd b/sdk/R/man/user_agreements.list.Rd
new file mode 100644 (file)
index 0000000..5e69861
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.list}
+\alias{user_agreements.list}
+\title{user_agreements.list}
+\usage{
+arv$user_agreements.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+UserAgreementList object.
+}
+\description{
+user_agreements.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.new.Rd b/sdk/R/man/user_agreements.new.Rd
new file mode 100644 (file)
index 0000000..c213cb4
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.new}
+\alias{user_agreements.new}
+\title{user_agreements.new}
+\usage{
+arv$user_agreements.new(NULL)
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.new is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.sign.Rd b/sdk/R/man/user_agreements.sign.Rd
new file mode 100644 (file)
index 0000000..9811610
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.sign}
+\alias{user_agreements.sign}
+\title{user_agreements.sign}
+\usage{
+arv$user_agreements.sign(NULL)
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.sign is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.signatures.Rd b/sdk/R/man/user_agreements.signatures.Rd
new file mode 100644 (file)
index 0000000..d889579
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.signatures}
+\alias{user_agreements.signatures}
+\title{user_agreements.signatures}
+\usage{
+arv$user_agreements.signatures(NULL)
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.signatures is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/user_agreements.update.Rd b/sdk/R/man/user_agreements.update.Rd
new file mode 100644 (file)
index 0000000..578e179
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{user_agreements.update}
+\alias{user_agreements.update}
+\title{user_agreements.update}
+\usage{
+arv$user_agreements.update(useragreement,
+       uuid)
+}
+\arguments{
+\item{userAgreement}{UserAgreement object.}
+
+\item{uuid}{The UUID of the UserAgreement in question.}
+}
+\value{
+UserAgreement object.
+}
+\description{
+user_agreements.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.activate.Rd b/sdk/R/man/users.activate.Rd
new file mode 100644 (file)
index 0000000..201caf4
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.activate}
+\alias{users.activate}
+\title{users.activate}
+\usage{
+arv$users.activate(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+User object.
+}
+\description{
+users.activate is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.create.Rd b/sdk/R/man/users.create.Rd
new file mode 100644 (file)
index 0000000..1805c66
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.create}
+\alias{users.create}
+\title{users.create}
+\usage{
+arv$users.create(user, ensure_unique_name = "false")
+}
+\arguments{
+\item{user}{User object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+User object.
+}
+\description{
+users.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.current.Rd b/sdk/R/man/users.current.Rd
new file mode 100644 (file)
index 0000000..4e8af94
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.current}
+\alias{users.current}
+\title{users.current}
+\usage{
+arv$users.current(NULL)
+}
+\value{
+User object.
+}
+\description{
+users.current is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.delete.Rd b/sdk/R/man/users.delete.Rd
new file mode 100644 (file)
index 0000000..df9e238
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.delete}
+\alias{users.delete}
+\title{users.delete}
+\usage{
+arv$users.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the User in question.}
+}
+\value{
+User object.
+}
+\description{
+users.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.get.Rd b/sdk/R/man/users.get.Rd
new file mode 100644 (file)
index 0000000..ec2b284
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.get}
+\alias{users.get}
+\title{users.get}
+\usage{
+arv$users.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the User in question.}
+}
+\value{
+User object.
+}
+\description{
+users.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.list.Rd b/sdk/R/man/users.list.Rd
new file mode 100644 (file)
index 0000000..7131943
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.list}
+\alias{users.list}
+\title{users.list}
+\usage{
+arv$users.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+UserList object.
+}
+\description{
+users.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.merge.Rd b/sdk/R/man/users.merge.Rd
new file mode 100644 (file)
index 0000000..a539591
--- /dev/null
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.merge}
+\alias{users.merge}
+\title{users.merge}
+\usage{
+arv$users.merge(new_owner_uuid,
+       new_user_token, redirect_to_new_user = NULL)
+}
+\arguments{
+\item{new_owner_uuid}{}
+
+\item{new_user_token}{}
+
+\item{redirect_to_new_user}{}
+}
+\value{
+User object.
+}
+\description{
+users.merge is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.setup.Rd b/sdk/R/man/users.setup.Rd
new file mode 100644 (file)
index 0000000..869403d
--- /dev/null
@@ -0,0 +1,26 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.setup}
+\alias{users.setup}
+\title{users.setup}
+\usage{
+arv$users.setup(user = NULL, openid_prefix = NULL,
+       repo_name = NULL, vm_uuid = NULL, send_notification_email = "false")
+}
+\arguments{
+\item{user}{}
+
+\item{openid_prefix}{}
+
+\item{repo_name}{}
+
+\item{vm_uuid}{}
+
+\item{send_notification_email}{}
+}
+\value{
+User object.
+}
+\description{
+users.setup is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.system.Rd b/sdk/R/man/users.system.Rd
new file mode 100644 (file)
index 0000000..c321c23
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.system}
+\alias{users.system}
+\title{users.system}
+\usage{
+arv$users.system(NULL)
+}
+\value{
+User object.
+}
+\description{
+users.system is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.unsetup.Rd b/sdk/R/man/users.unsetup.Rd
new file mode 100644 (file)
index 0000000..85de6f9
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.unsetup}
+\alias{users.unsetup}
+\title{users.unsetup}
+\usage{
+arv$users.unsetup(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+User object.
+}
+\description{
+users.unsetup is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.update.Rd b/sdk/R/man/users.update.Rd
new file mode 100644 (file)
index 0000000..fcd9c71
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.update}
+\alias{users.update}
+\title{users.update}
+\usage{
+arv$users.update(user, uuid)
+}
+\arguments{
+\item{user}{User object.}
+
+\item{uuid}{The UUID of the User in question.}
+}
+\value{
+User object.
+}
+\description{
+users.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/users.update_uuid.Rd b/sdk/R/man/users.update_uuid.Rd
new file mode 100644 (file)
index 0000000..af62c2c
--- /dev/null
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{users.update_uuid}
+\alias{users.update_uuid}
+\title{users.update_uuid}
+\usage{
+arv$users.update_uuid(uuid, new_uuid)
+}
+\arguments{
+\item{uuid}{}
+
+\item{new_uuid}{}
+}
+\value{
+User object.
+}
+\description{
+users.update_uuid is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.create.Rd b/sdk/R/man/virtual_machines.create.Rd
new file mode 100644 (file)
index 0000000..689a0f9
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.create}
+\alias{virtual_machines.create}
+\title{virtual_machines.create}
+\usage{
+arv$virtual_machines.create(virtualmachine,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{virtualMachine}{VirtualMachine object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.delete.Rd b/sdk/R/man/virtual_machines.delete.Rd
new file mode 100644 (file)
index 0000000..c513833
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.delete}
+\alias{virtual_machines.delete}
+\title{virtual_machines.delete}
+\usage{
+arv$virtual_machines.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the VirtualMachine in question.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.get.Rd b/sdk/R/man/virtual_machines.get.Rd
new file mode 100644 (file)
index 0000000..3e56e17
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.get}
+\alias{virtual_machines.get}
+\title{virtual_machines.get}
+\usage{
+arv$virtual_machines.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the VirtualMachine in question.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.get_all_logins.Rd b/sdk/R/man/virtual_machines.get_all_logins.Rd
new file mode 100644 (file)
index 0000000..b2af1e4
--- /dev/null
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.get_all_logins}
+\alias{virtual_machines.get_all_logins}
+\title{virtual_machines.get_all_logins}
+\usage{
+arv$virtual_machines.get_all_logins(NULL)
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.get_all_logins is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.list.Rd b/sdk/R/man/virtual_machines.list.Rd
new file mode 100644 (file)
index 0000000..42ed58b
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.list}
+\alias{virtual_machines.list}
+\title{virtual_machines.list}
+\usage{
+arv$virtual_machines.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+VirtualMachineList object.
+}
+\description{
+virtual_machines.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.logins.Rd b/sdk/R/man/virtual_machines.logins.Rd
new file mode 100644 (file)
index 0000000..7e25110
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.logins}
+\alias{virtual_machines.logins}
+\title{virtual_machines.logins}
+\usage{
+arv$virtual_machines.logins(uuid)
+}
+\arguments{
+\item{uuid}{}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.logins is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/virtual_machines.update.Rd b/sdk/R/man/virtual_machines.update.Rd
new file mode 100644 (file)
index 0000000..d1a07eb
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{virtual_machines.update}
+\alias{virtual_machines.update}
+\title{virtual_machines.update}
+\usage{
+arv$virtual_machines.update(virtualmachine,
+       uuid)
+}
+\arguments{
+\item{virtualMachine}{VirtualMachine object.}
+
+\item{uuid}{The UUID of the VirtualMachine in question.}
+}
+\value{
+VirtualMachine object.
+}
+\description{
+virtual_machines.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.create.Rd b/sdk/R/man/workflows.create.Rd
new file mode 100644 (file)
index 0000000..8a84e00
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.create}
+\alias{workflows.create}
+\title{workflows.create}
+\usage{
+arv$workflows.create(workflow,
+       ensure_unique_name = "false")
+}
+\arguments{
+\item{workflow}{Workflow object.}
+
+\item{ensure_unique_name}{Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.create is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.delete.Rd b/sdk/R/man/workflows.delete.Rd
new file mode 100644 (file)
index 0000000..96a561e
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.delete}
+\alias{workflows.delete}
+\title{workflows.delete}
+\usage{
+arv$workflows.delete(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Workflow in question.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.delete is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.get.Rd b/sdk/R/man/workflows.get.Rd
new file mode 100644 (file)
index 0000000..8a8c3a8
--- /dev/null
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.get}
+\alias{workflows.get}
+\title{workflows.get}
+\usage{
+arv$workflows.get(uuid)
+}
+\arguments{
+\item{uuid}{The UUID of the Workflow in question.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.get is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.list.Rd b/sdk/R/man/workflows.list.Rd
new file mode 100644 (file)
index 0000000..e24b74d
--- /dev/null
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.list}
+\alias{workflows.list}
+\title{workflows.list}
+\usage{
+arv$workflows.list(filters = NULL,
+       where = NULL, order = NULL, select = NULL,
+       distinct = NULL, limit = "100", offset = "0",
+       count = "exact")
+}
+\arguments{
+\item{filters}{}
+
+\item{where}{}
+
+\item{order}{}
+
+\item{select}{}
+
+\item{distinct}{}
+
+\item{limit}{}
+
+\item{offset}{}
+
+\item{count}{}
+}
+\value{
+WorkflowList object.
+}
+\description{
+workflows.list is a method defined in Arvados class.
+}
diff --git a/sdk/R/man/workflows.update.Rd b/sdk/R/man/workflows.update.Rd
new file mode 100644 (file)
index 0000000..d3f6186
--- /dev/null
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{workflows.update}
+\alias{workflows.update}
+\title{workflows.update}
+\usage{
+arv$workflows.update(workflow,
+       uuid)
+}
+\arguments{
+\item{workflow}{Workflow object.}
+
+\item{uuid}{The UUID of the Workflow in question.}
+}
+\value{
+Workflow object.
+}
+\description{
+workflows.update is a method defined in Arvados class.
+}
diff --git a/sdk/R/run_test.R b/sdk/R/run_test.R
new file mode 100644 (file)
index 0000000..156dde1
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+results <- devtools::test()
+any_error <- any(as.data.frame(results)$error)
+if (any_error) {
+  q("no", 1)
+} else {
+  q("no", 0)
+}
diff --git a/sdk/R/tests/testthat.R b/sdk/R/tests/testthat.R
new file mode 100644 (file)
index 0000000..9ca4f86
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+library(testthat)
+library(ArvadosR)
+
+test_check("ArvadosR")
diff --git a/sdk/R/tests/testthat/fakes/FakeArvados.R b/sdk/R/tests/testthat/fakes/FakeArvados.R
new file mode 100644 (file)
index 0000000..4fcfd6c
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+FakeArvados <- R6::R6Class(
+
+    "FakeArvados",
+
+    public = list(
+
+        token      = NULL,
+        host       = NULL,
+        webdavHost = NULL,
+        http       = NULL,
+        httpParser = NULL,
+        REST       = NULL,
+
+        initialize = function(token      = NULL,
+                              host       = NULL,
+                              webdavHost = NULL,
+                              http       = NULL,
+                              httpParser = NULL)
+        {
+            self$token      <- token
+            self$host       <- host
+            self$webdavHost <- webdavHost
+            self$http       <- http
+            self$httpParser <- httpParser
+        },
+
+        getToken    = function() self$token,
+        getHostName = function() self$host,
+        getHttpClient = function() self$http,
+        getHttpParser = function() self$httpParser,
+        getWebDavHostName = function() self$webdavHost
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/tests/testthat/fakes/FakeHttpParser.R b/sdk/R/tests/testthat/fakes/FakeHttpParser.R
new file mode 100644 (file)
index 0000000..c232839
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+FakeHttpParser <- R6::R6Class(
+
+    "FakeHttrParser",
+
+    public = list(
+
+        validContentTypes = NULL,
+        parserCallCount = NULL,
+
+        initialize = function()
+        {
+            self$parserCallCount <- 0
+            self$validContentTypes <- c("text", "raw")
+        },
+
+        parseJSONResponse = function(serverResponse)
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        },
+
+        parseResponse = function(serverResponse, outputType)
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        },
+
+        getFileNamesFromResponse = function(serverResponse, uri)
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        },
+
+        getFileSizesFromResponse = function(serverResponse, uri)
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        }
+    )
+)
diff --git a/sdk/R/tests/testthat/fakes/FakeHttpRequest.R b/sdk/R/tests/testthat/fakes/FakeHttpRequest.R
new file mode 100644 (file)
index 0000000..7734e0d
--- /dev/null
@@ -0,0 +1,183 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+FakeHttpRequest <- R6::R6Class(
+
+    "FakeHttpRequest",
+
+    public = list(
+
+        serverMaxElementsPerRequest = NULL,
+
+        content                                 = NULL,
+        expectedURL                             = NULL,
+        URLIsProperlyConfigured                 = NULL,
+        expectedQueryFilters                    = NULL,
+        queryFiltersAreCorrect                  = NULL,
+        requestHeaderContainsAuthorizationField = NULL,
+        requestHeaderContainsDestinationField   = NULL,
+        requestHeaderContainsRangeField         = NULL,
+        requestHeaderContainsContentTypeField   = NULL,
+        JSONEncodedBodyIsProvided               = NULL,
+        requestBodyIsProvided                   = NULL,
+
+        numberOfGETRequests        = NULL,
+        numberOfDELETERequests     = NULL,
+        numberOfPUTRequests        = NULL,
+        numberOfPOSTRequests       = NULL,
+        numberOfMOVERequests       = NULL,
+        numberOfCOPYRequests       = NULL,
+        numberOfgetConnectionCalls = NULL,
+
+        initialize = function(expectedURL      = NULL,
+                              serverResponse   = NULL,
+                              expectedFilters  = NULL)
+        {
+            if(is.null(serverResponse))
+            {
+                self$content <- list()
+                self$content$status_code <- 200
+            }
+            else
+                self$content <- serverResponse
+
+            self$expectedURL                             <- expectedURL
+            self$URLIsProperlyConfigured                 <- FALSE
+            self$expectedQueryFilters                    <- expectedFilters
+            self$queryFiltersAreCorrect                  <- FALSE
+            self$requestHeaderContainsAuthorizationField <- FALSE
+            self$requestHeaderContainsDestinationField   <- FALSE
+            self$requestHeaderContainsRangeField         <- FALSE
+            self$requestHeaderContainsContentTypeField   <- FALSE
+            self$JSONEncodedBodyIsProvided               <- FALSE
+            self$requestBodyIsProvided                   <- FALSE
+
+            self$numberOfGETRequests    <- 0
+            self$numberOfDELETERequests <- 0
+            self$numberOfPUTRequests    <- 0
+            self$numberOfPOSTRequests   <- 0
+            self$numberOfMOVERequests   <- 0
+            self$numberOfCOPYRequests   <- 0
+
+            self$numberOfgetConnectionCalls <- 0
+
+            self$serverMaxElementsPerRequest <- 5
+        },
+
+        exec = function(verb, url, headers = NULL, body = NULL, query = NULL,
+                        limit = NULL, offset = NULL, retryTimes = 0)
+        {
+            private$validateURL(url)
+            private$validateHeaders(headers)
+            private$validateFilters(queryFilters)
+            private$validateBody(body)
+
+            if(verb == "GET")
+                self$numberOfGETRequests <- self$numberOfGETRequests + 1
+            else if(verb == "POST")
+                self$numberOfPOSTRequests <- self$numberOfPOSTRequests + 1
+            else if(verb == "PUT")
+                self$numberOfPUTRequests <- self$numberOfPUTRequests + 1
+            else if(verb == "DELETE")
+                self$numberOfDELETERequests <- self$numberOfDELETERequests + 1
+            else if(verb == "MOVE")
+                self$numberOfMOVERequests <- self$numberOfMOVERequests + 1
+            else if(verb == "COPY")
+                self$numberOfCOPYRequests <- self$numberOfCOPYRequests + 1
+            else if(verb == "PROPFIND")
+            {
+                return(self$content)
+            }
+
+            if(!is.null(self$content$items_available))
+                return(private$getElements(offset, limit))
+            else
+                return(self$content)
+        },
+
+        getConnection = function(url, headers, openMode)
+        {
+            self$numberOfgetConnectionCalls <- self$numberOfgetConnectionCalls + 1
+            c(url, headers, openMode)
+        }
+    ),
+
+    private = list(
+
+        validateURL = function(url)
+        {
+            if(!is.null(self$expectedURL) && url == self$expectedURL)
+                self$URLIsProperlyConfigured <- TRUE
+        },
+
+        validateHeaders = function(headers)
+        {
+            if(!is.null(headers$Authorization))
+                self$requestHeaderContainsAuthorizationField <- TRUE
+
+            if(!is.null(headers$Destination))
+                self$requestHeaderContainsDestinationField <- TRUE
+
+            if(!is.null(headers$Range))
+                self$requestHeaderContainsRangeField <- TRUE
+
+            if(!is.null(headers[["Content-Type"]]))
+                self$requestHeaderContainsContentTypeField <- TRUE
+        },
+
+        validateBody = function(body)
+        {
+            if(!is.null(body))
+            {
+                self$requestBodyIsProvided <- TRUE
+
+                if(class(body) == "json")
+                    self$JSONEncodedBodyIsProvided <- TRUE
+            }
+        },
+
+        validateFilters = function(filters)
+        {
+            if(!is.null(self$expectedQueryFilters) &&
+               !is.null(filters) &&
+               all.equal(unname(filters), self$expectedQueryFilters))
+            {
+                self$queryFiltersAreCorrect <- TRUE
+            }
+        },
+
+        getElements = function(offset, limit)
+        {
+            start <- 1
+            elementCount <- self$serverMaxElementsPerRequest
+
+            if(!is.null(offset))
+            {
+                if(offset > self$content$items_available)
+                    stop("Invalid offset")
+
+                start <- offset + 1
+            }
+
+            if(!is.null(limit))
+                if(limit < self$serverMaxElementsPerRequest)
+                    elementCount <- limit - 1
+
+
+            serverResponse <- list()
+            serverResponse$items_available <- self$content$items_available
+            serverResponse$items <- self$content$items[start:(start + elementCount - 1)]
+
+            if(start + elementCount > self$content$items_available)
+            {
+                elementCount = self$content$items_available - start
+                serverResponse$items <- self$content$items[start:(start + elementCount)]
+            }
+
+            serverResponse
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/tests/testthat/fakes/FakeRESTService.R b/sdk/R/tests/testthat/fakes/FakeRESTService.R
new file mode 100644 (file)
index 0000000..a91da04
--- /dev/null
@@ -0,0 +1,179 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+FakeRESTService <- R6::R6Class(
+
+    "FakeRESTService",
+
+    public = list(
+
+        getResourceCallCount    = NULL,
+        createResourceCallCount = NULL,
+        listResourcesCallCount  = NULL,
+        deleteResourceCallCount = NULL,
+        updateResourceCallCount = NULL,
+        fetchAllItemsCallCount  = NULL,
+
+        createCallCount               = NULL,
+        deleteCallCount               = NULL,
+        moveCallCount                 = NULL,
+        copyCallCount                 = NULL,
+        getCollectionContentCallCount = NULL,
+        getResourceSizeCallCount      = NULL,
+        readCallCount                 = NULL,
+        writeCallCount                = NULL,
+        getConnectionCallCount        = NULL,
+        writeBuffer                   = NULL,
+        filtersAreConfiguredCorrectly = NULL,
+        bodyIsConfiguredCorrectly     = NULL,
+        expectedFilterContent         = NULL,
+
+        collectionContent = NULL,
+        returnContent     = NULL,
+
+        initialize = function(collectionContent = NULL, returnContent = NULL,
+                              expectedFilterContent = NULL)
+        {
+            self$getResourceCallCount    <- 0
+            self$createResourceCallCount <- 0
+            self$listResourcesCallCount  <- 0
+            self$deleteResourceCallCount <- 0
+            self$updateResourceCallCount <- 0
+            self$fetchAllItemsCallCount  <- 0
+
+            self$createCallCount               <- 0
+            self$deleteCallCount               <- 0
+            self$moveCallCount                 <- 0
+            self$copyCallCount                 <- 0
+            self$getCollectionContentCallCount <- 0
+            self$getResourceSizeCallCount      <- 0
+            self$readCallCount                 <- 0
+            self$writeCallCount                <- 0
+            self$getConnectionCallCount        <- 0
+            self$filtersAreConfiguredCorrectly <- FALSE
+            self$bodyIsConfiguredCorrectly     <- FALSE
+
+            self$collectionContent     <- collectionContent
+            self$returnContent         <- returnContent
+            self$expectedFilterContent <- expectedFilterContent
+        },
+
+        getWebDavHostName = function()
+        {
+        },
+
+        getResource = function(resource, uuid)
+        {
+            self$getResourceCallCount <- self$getResourceCallCount + 1
+            self$returnContent
+        },
+
+        listResources = function(resource, filters = NULL, limit = 100, offset = 0)
+        {
+            self$listResourcesCallCount <- self$listResourcesCallCount + 1
+
+            if(!is.null(self$expectedFilterContent) && !is.null(filters))
+               if(all.equal(filters, self$expectedFilterContent))
+                    self$filtersAreConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        fetchAllItems = function(resourceURL, filters)
+        {
+            self$fetchAllItemsCallCount <- self$fetchAllItemsCallCount + 1
+
+            if(!is.null(self$expectedFilterContent) && !is.null(filters))
+               if(all.equal(filters, self$expectedFilterContent))
+                    self$filtersAreConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        deleteResource = function(resource, uuid)
+        {
+            self$deleteResourceCallCount <- self$deleteResourceCallCount + 1
+            self$returnContent
+        },
+
+        updateResource = function(resource, uuid, newContent)
+        {
+            self$updateResourceCallCount <- self$updateResourceCallCount + 1
+
+            if(!is.null(self$returnContent) && !is.null(newContent))
+               if(all.equal(newContent, self$returnContent))
+                    self$bodyIsConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        createResource = function(resource, content)
+        {
+            self$createResourceCallCount <- self$createResourceCallCount + 1
+
+            if(!is.null(self$returnContent) && !is.null(content))
+               if(all.equal(content, self$returnContent))
+                    self$bodyIsConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        create = function(files, uuid)
+        {
+            self$createCallCount <- self$createCallCount + 1
+            self$returnContent
+        },
+
+        delete = function(relativePath, uuid)
+        {
+            self$deleteCallCount <- self$deleteCallCount + 1
+            self$returnContent
+        },
+
+        move = function(from, to, uuid)
+        {
+            self$moveCallCount <- self$moveCallCount + 1
+            self$returnContent
+        },
+
+        copy = function(from, to, uuid)
+        {
+            self$copyCallCount <- self$copyCallCount + 1
+            self$returnContent
+        },
+
+        getCollectionContent = function(uuid)
+        {
+            self$getCollectionContentCallCount <- self$getCollectionContentCallCount + 1
+            self$collectionContent
+        },
+
+        getResourceSize = function(uuid, relativePathToResource)
+        {
+            self$getResourceSizeCallCount <- self$getResourceSizeCallCount + 1
+            self$returnContent
+        },
+
+        read = function(relativePath, uuid, contentType = "text", offset = 0, length = 0)
+        {
+            self$readCallCount <- self$readCallCount + 1
+            self$returnContent
+        },
+
+        write = function(relativePath, uuid, content, contentType)
+        {
+            self$writeBuffer <- content
+            self$writeCallCount <- self$writeCallCount + 1
+            self$returnContent
+        },
+
+        getConnection = function(uuid, relativePath, openMode)
+        {
+            self$getConnectionCallCount <- self$getConnectionCallCount + 1
+            self$returnContent
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/tests/testthat/test-ArvadosFile.R b/sdk/R/tests/testthat/test-ArvadosFile.R
new file mode 100644 (file)
index 0000000..e3457c9
--- /dev/null
@@ -0,0 +1,353 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("fakes/FakeRESTService.R")
+
+context("ArvadosFile")
+
+test_that("constructor raises error if  file name is empty string", {
+
+    expect_that(ArvadosFile$new(""), throws_error("Invalid name."))
+})
+
+test_that("getFileListing always returns file name", {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$getFileListing(), equals("dog"))
+})
+
+test_that("get always returns NULL", {
+
+    dog <- ArvadosFile$new("dog")
+
+    responseIsNull <- is.null(dog$get("something"))
+    expect_that(responseIsNull, is_true())
+})
+
+test_that("getFirst always returns NULL", {
+
+    dog <- ArvadosFile$new("dog")
+
+    responseIsNull <- is.null(dog$getFirst())
+    expect_that(responseIsNull, is_true())
+})
+
+test_that(paste("getSizeInBytes returns zero if arvadosFile",
+                "is not part of a collection"), {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$getSizeInBytes(), equals(0))
+})
+
+test_that(paste("getSizeInBytes delegates size calculation",
+                "to REST service class"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    returnSize <- 100
+    fakeREST <- FakeRESTService$new(collectionContent, returnSize)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    resourceSize <- fish$getSizeInBytes()
+
+    expect_that(resourceSize, equals(100))
+})
+
+test_that("getRelativePath returns path relative to the tree root", {
+
+    animal <- Subcollection$new("animal")
+    fish <- Subcollection$new("fish")
+    shark <- ArvadosFile$new("shark")
+
+    animal$add(fish)
+    fish$add(shark)
+
+    expect_that(shark$getRelativePath(), equals("animal/fish/shark"))
+})
+
+test_that("read raises exception if file doesn't belong to a collection", {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$read(),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+})
+
+test_that("read raises exception offset or length is negative number", {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$read(contentType = "text", offset = -1),
+                throws_error("Offset and length must be positive values."))
+    expect_that(fish$read(contentType = "text", length = -1),
+                throws_error("Offset and length must be positive values."))
+    expect_that(fish$read(contentType = "text", offset = -1, length = -1),
+                throws_error("Offset and length must be positive values."))
+})
+
+test_that("read delegates reading operation to REST service class", {
+
+    collectionContent <- c("animal", "animal/fish")
+    readContent <- "my file"
+    fakeREST <- FakeRESTService$new(collectionContent, readContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    fileContent <- fish$read("text")
+
+    expect_that(fileContent, equals("my file"))
+    expect_that(fakeREST$readCallCount, equals(1))
+})
+
+test_that(paste("connection delegates connection creation ro RESTService class",
+                "which returns curl connection opened in read mode when",
+                "'r' of 'rb' is passed as argument"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    connection <- fish$connection("r")
+
+    expect_that(fakeREST$getConnectionCallCount, equals(1))
+})
+
+test_that(paste("connection returns textConnection opened",
+                "in write mode when 'w' is passed as argument"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    connection <- fish$connection("w")
+
+    writeLines("file", connection)
+    writeLines("content", connection)
+
+    writeResult <- textConnectionValue(connection)
+
+    expect_that(writeResult[1], equals("file"))
+    expect_that(writeResult[2], equals("content"))
+})
+
+test_that("flush sends data stored in a connection to a REST server", {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    connection <- fish$connection("w")
+
+    writeLines("file content", connection)
+
+    fish$flush()
+
+    expect_that(fakeREST$writeBuffer, equals("file content"))
+})
+
+test_that("write raises exception if file doesn't belong to a collection", {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$write(),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+})
+
+test_that("write delegates writing operation to REST service class", {
+
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    fileContent <- fish$write("new file content")
+
+    expect_that(fakeREST$writeBuffer, equals("new file content"))
+})
+
+test_that(paste("move raises exception if arvados file",
+                "doesn't belong to any collection"), {
+
+    animal <- ArvadosFile$new("animal")
+
+    expect_that(animal$move("new/location"),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+})
+
+test_that(paste("move raises exception if newLocationInCollection",
+                "parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$move("objects/dog"),
+                throws_error("Unable to get destination subcollection."))
+})
+
+test_that("move raises exception if new location contains content with the same name", {
+
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "dog")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$move("dog"),
+                throws_error("Destination already contains content with same name."))
+
+})
+
+test_that("move moves arvados file inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    dog$move("dog")
+    dogIsNullOnOldLocation <- is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogIsNullOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+})
+
+test_that(paste("copy raises exception if arvados file",
+                "doesn't belong to any collection"), {
+
+    animal <- ArvadosFile$new("animal")
+
+    expect_that(animal$copy("new/location"),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+})
+
+test_that(paste("copy raises exception if location parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$copy("objects/dog"),
+                throws_error("Unable to get destination subcollection."))
+})
+
+test_that("copy raises exception if new location contains content with the same name", {
+
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "dog")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$copy("dog"),
+                throws_error("Destination already contains content with same name."))
+
+})
+
+test_that("copy copies arvados file inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    dog$copy("dog")
+    dogExistsOnOldLocation <- !is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogExistsOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+})
+
+test_that("duplicate performs deep cloning of Arvados file", {
+    arvFile <- ArvadosFile$new("foo")
+    newFile1 <- arvFile$duplicate()
+    newFile2 <- arvFile$duplicate("bar")
+
+    expect_that(newFile1$getFileListing(), equals(arvFile$getFileListing()))
+    expect_that(newFile2$getFileListing(), equals(c("bar")))
+})
diff --git a/sdk/R/tests/testthat/test-Collection.R b/sdk/R/tests/testthat/test-Collection.R
new file mode 100644 (file)
index 0000000..636359a
--- /dev/null
@@ -0,0 +1,297 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("fakes/FakeRESTService.R")
+
+context("Collection")
+
+test_that(paste("constructor creates file tree from text content",
+                "retreived form REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    root <- collection$get("")
+
+    expect_that(fakeREST$getCollectionContentCallCount, equals(1))
+    expect_that(root$getName(), equals(""))
+})
+
+test_that(paste("add raises exception if passed argumet is not",
+                "ArvadosFile or Subcollection"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newNumber <- 10
+
+    expect_that(collection$add(newNumber),
+    throws_error(paste("Expected AravodsFile or Subcollection",
+                       "object, got (numeric)."), fixed = TRUE))
+})
+
+test_that("add raises exception if relative path is not valid", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newPen <- ArvadosFile$new("pen")
+
+    expect_that(collection$add(newPen, "objects"),
+                throws_error("Subcollection objects doesn't exist.",
+                              fixed = TRUE))
+})
+
+test_that("add raises exception if content name is empty string", {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    rootFolder <- Subcollection$new("")
+
+    expect_that(collection$add(rootFolder),
+                throws_error("Content has invalid name.", fixed = TRUE))
+})
+
+test_that(paste("add adds ArvadosFile or Subcollection",
+                "to local tree structure and remote REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newDog <- ArvadosFile$new("dog")
+    collection$add(newDog, "animal")
+
+    dog <- collection$get("animal/dog")
+    dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog"
+
+    expect_that(dogExistsInCollection, is_true())
+    expect_that(fakeREST$createCallCount, equals(1))
+})
+
+test_that("create raises exception if passed argumet is not character vector", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$create(10),
+                throws_error("Expected character vector, got (numeric).",
+                             fixed = TRUE))
+})
+
+test_that(paste("create adds files specified by fileNames",
+                "to local tree structure and remote REST service"), {
+
+    fakeREST <- FakeRESTService$new()
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$create(c("animal/dog", "animal/cat"))
+
+    dog <- collection$get("animal/dog")
+    cat <- collection$get("animal/cat")
+    dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog"
+    catExistsInCollection <- !is.null(cat) && cat$getName() == "cat"
+
+    expect_that(dogExistsInCollection, is_true())
+    expect_that(catExistsInCollection, is_true())
+    expect_that(fakeREST$createCallCount, equals(2))
+})
+
+test_that("remove raises exception if passed argumet is not character vector", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$remove(10),
+                throws_error("Expected character vector, got (numeric).",
+                             fixed = TRUE))
+})
+
+test_that("remove raises exception if user tries to remove root folder", {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$remove(""),
+                throws_error("You can't delete root folder.", fixed = TRUE))
+})
+
+test_that(paste("remove removes files specified by paths",
+                "from local tree structure and from remote REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/cat", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$remove(c("animal/dog", "animal/cat"))
+
+    dog <- collection$get("animal/dog")
+    cat <- collection$get("animal/dog")
+    dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog"
+    catExistsInCollection <- !is.null(cat) && cat$getName() == "cat"
+
+    expect_that(dogExistsInCollection, is_false())
+    expect_that(catExistsInCollection, is_false())
+    expect_that(fakeREST$deleteCallCount, equals(2))
+})
+
+test_that(paste("move moves content to a new location inside file tree",
+                "and on REST service"), {
+
+    collectionContent <- c("animal", "animal/dog", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$move("animal/dog", "dog")
+
+    dogIsNullOnOldLocation <- is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogIsNullOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+    expect_that(fakeREST$moveCallCount, equals(1))
+})
+
+test_that("move raises exception if new location is not valid", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$move("fish", "object"),
+                throws_error("Content you want to move doesn't exist in the collection.",
+                             fixed = TRUE))
+})
+
+test_that("getFileListing returns sorted collection content received from REST service", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    contentMatchExpected <- all(collection$getFileListing() ==
+                                c("animal", "animal/fish", "ball"))
+
+    expect_that(contentMatchExpected, is_true())
+    #2 calls because Collection$new calls getFileListing once
+    expect_that(fakeREST$getCollectionContentCallCount, equals(2))
+
+})
+
+test_that("get returns arvados file or subcollection from internal tree structure", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    fish <- collection$get("animal/fish")
+    fishIsNotNull <- !is.null(fish)
+
+    expect_that(fishIsNotNull, is_true())
+    expect_that(fish$getName(), equals("fish"))
+})
+
+test_that(paste("copy copies content to a new location inside file tree",
+                "and on REST service"), {
+
+    collectionContent <- c("animal", "animal/dog", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$copy("animal/dog", "dog")
+
+    dogExistsOnOldLocation <- !is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogExistsOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+    expect_that(fakeREST$copyCallCount, equals(1))
+})
+
+test_that("copy raises exception if new location is not valid", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$copy("fish", "object"),
+                throws_error("Content you want to copy doesn't exist in the collection.",
+                             fixed = TRUE))
+})
+
+test_that("refresh invalidates current tree structure", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "aaaaa-j7d0g-ccccccccccccccc")
+
+    # Before refresh
+    fish <- collection$get("animal/fish")
+    expect_that(fish$getName(), equals("fish"))
+    expect_that(fish$getCollection()$uuid, equals("aaaaa-j7d0g-ccccccccccccccc"))
+
+    collection$refresh()
+
+    # After refresh
+    expect_that(fish$getName(), equals("fish"))
+    expect_true(is.null(fish$getCollection()))
+})
diff --git a/sdk/R/tests/testthat/test-CollectionTree.R b/sdk/R/tests/testthat/test-CollectionTree.R
new file mode 100644 (file)
index 0000000..1a3aefe
--- /dev/null
@@ -0,0 +1,106 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+context("CollectionTree")
+
+test_that("constructor creates file tree from character array properly", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal",
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    root   <- collectionTree$getTree()
+    animal <- collectionTree$getElement("animal")
+    dog    <- collectionTree$getElement("animal/dog")
+    boat   <- collectionTree$getElement("boat")
+
+    rootHasNoParent             <- is.null(root$getParent())
+    rootIsOfTypeSubcollection   <- "Subcollection" %in% class(root)
+    animalIsOfTypeSubcollection <- "Subcollection" %in% class(animal)
+    dogIsOfTypeArvadosFile      <- "ArvadosFile" %in% class(dog)
+    boatIsOfTypeArvadosFile     <- "ArvadosFile" %in% class(boat)
+    animalsParentIsRoot         <- animal$getParent()$getName() == root$getName()
+    animalContainsDog           <- animal$getFirst()$getName() == dog$getName()
+    dogsParentIsAnimal          <- dog$getParent()$getName() == animal$getName()
+    boatsParentIsRoot           <- boat$getParent()$getName() == root$getName()
+
+    allElementsBelongToSameCollection <- root$getCollection()   == "myCollection" &&
+                                         animal$getCollection() == "myCollection" &&
+                                         dog$getCollection()    == "myCollection" &&
+                                         boat$getCollection()   == "myCollection"
+
+    expect_that(root$getName(), equals(""))
+    expect_that(rootIsOfTypeSubcollection, is_true())
+    expect_that(rootHasNoParent, is_true())
+    expect_that(animalIsOfTypeSubcollection, is_true())
+    expect_that(animalsParentIsRoot, is_true())
+    expect_that(animalContainsDog, is_true())
+    expect_that(dogIsOfTypeArvadosFile, is_true())
+    expect_that(dogsParentIsAnimal, is_true())
+    expect_that(boatIsOfTypeArvadosFile, is_true())
+    expect_that(boatsParentIsRoot, is_true())
+    expect_that(allElementsBelongToSameCollection, is_true())
+})
+
+test_that("getElement returns element from tree if element exists on specified path", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal",
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    dog <- collectionTree$getElement("animal/dog")
+
+    expect_that(dog$getName(), equals("dog"))
+})
+
+test_that("getElement returns NULL from tree if element doesn't exists on specified path", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal",
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    fish <- collectionTree$getElement("animal/fish")
+    fishIsNULL <- is.null(fish)
+
+    expect_that(fishIsNULL, is_true())
+})
+
+test_that("getElement trims ./ from start of relativePath", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal",
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    dog <- collectionTree$getElement("animal/dog")
+    dogWithDotSlash <- collectionTree$getElement("./animal/dog")
+
+    expect_that(dogWithDotSlash$getName(), equals(dog$getName()))
+})
+
+test_that("getElement trims / from end of relativePath", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal",
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    animal <- collectionTree$getElement("animal")
+    animalWithSlash <- collectionTree$getElement("animal/")
+
+    expect_that(animalWithSlash$getName(), equals(animal$getName()))
+})
diff --git a/sdk/R/tests/testthat/test-HttpParser.R b/sdk/R/tests/testthat/test-HttpParser.R
new file mode 100644 (file)
index 0000000..82c0fb0
--- /dev/null
@@ -0,0 +1,96 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+context("Http Parser")
+
+
+test_that("parseJSONResponse generates and returns JSON object from server response", {
+
+    JSONContent <- "{\"bar\":{\"foo\":[10]}}"
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(JSONContent)
+    serverResponse$headers[["Content-Type"]] <- "application/json; charset=utf-8"
+    class(serverResponse) <- c("response")
+
+    parser <- HttpParser$new()
+
+    result <- parser$parseJSONResponse(serverResponse)
+    barExists <- !is.null(result$bar)
+
+    expect_that(barExists, is_true())
+    expect_that(unlist(result$bar$foo), equals(10))
+})
+
+test_that(paste("parseResponse generates and returns character vector",
+                "from server response if outputType is text"), {
+
+    content <- "random text"
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(content)
+    serverResponse$headers[["Content-Type"]] <- "text/plain; charset=utf-8"
+    class(serverResponse) <- c("response")
+
+    parser <- HttpParser$new()
+    parsedResponse <- parser$parseResponse(serverResponse, "text")
+
+    expect_that(parsedResponse, equals("random text"))
+})
+
+
+webDAVResponseSample =
+    paste0("<?xml version=\"1.0\" encoding=\"UTF-8\"?><D:multistatus xmlns:",
+           "D=\"DAV:\"><D:response><D:href>/c=aaaaa-bbbbb-ccccccccccccccc</D",
+           ":href><D:propstat><D:prop><D:resourcetype><D:collection xmlns:D=",
+           "\"DAV:\"/></D:resourcetype><D:getlastmodified>Fri, 11 Jan 2018 1",
+           "1:11:11 GMT</D:getlastmodified><D:displayname></D:displayname><D",
+           ":supportedlock><D:lockentry xmlns:D=\"DAV:\"><D:lockscope><D:exc",
+           "lusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:loc",
+           "kentry></D:supportedlock></D:prop><D:status>HTTP/1.1 200 OK</D:s",
+           "tatus></D:propstat></D:response><D:response><D:href>/c=aaaaa-bbb",
+           "bb-ccccccccccccccc/myFile.exe</D:href><D:propstat><D:prop><D:r",
+           "esourcetype></D:resourcetype><D:getlastmodified>Fri, 12 Jan 2018",
+           " 22:22:22 GMT</D:getlastmodified><D:getcontenttype>text/x-c++src",
+           "; charset=utf-8</D:getcontenttype><D:displayname>myFile.exe</D",
+           ":displayname><D:getcontentlength>25</D:getcontentlength><D:getet",
+           "ag>\"123b12dd1234567890\"</D:getetag><D:supportedlock><D:lockent",
+           "ry xmlns:D=\"DAV:\"><D:lockscope><D:exclusive/></D:lockscope><D:",
+           "locktype><D:write/></D:locktype></D:lockentry></D:supportedlock>",
+           "</D:prop><D:status>HTTP/1.1 200 OK</D:status></D:propstat></D:re",
+           "sponse></D:multistatus>")
+
+
+
+test_that(paste("getFileNamesFromResponse returns file names belonging to specific",
+                "collection parsed from webDAV server response"), {
+
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(webDAVResponseSample)
+    serverResponse$headers[["Content-Type"]] <- "text/xml; charset=utf-8"
+    class(serverResponse) <- c("response")
+    url <- URLencode("https://webdav/c=aaaaa-bbbbb-ccccccccccccccc")
+
+    parser <- HttpParser$new()
+    result <- parser$getFileNamesFromResponse(serverResponse, url)
+    expectedResult <- "myFile.exe"
+    resultMatchExpected <- all.equal(result, expectedResult)
+
+    expect_that(resultMatchExpected, is_true())
+})
+
+test_that(paste("getFileSizesFromResponse returns file sizes",
+                "parsed from webDAV server response"), {
+
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(webDAVResponseSample)
+    serverResponse$headers[["Content-Type"]] <- "text/xml; charset=utf-8"
+    class(serverResponse) <- c("response")
+    url <- URLencode("https://webdav/c=aaaaa-bbbbb-ccccccccccccccc")
+
+    parser <- HttpParser$new()
+    expectedResult <- "25"
+    result <- parser$getFileSizesFromResponse(serverResponse, url)
+    resultMatchExpected <- result == expectedResult
+
+    expect_that(resultMatchExpected, is_true())
+})
diff --git a/sdk/R/tests/testthat/test-HttpRequest.R b/sdk/R/tests/testthat/test-HttpRequest.R
new file mode 100644 (file)
index 0000000..f12463c
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+context("Http Request")
+
+
+test_that("execute raises exception if http verb is not valid", {
+
+    http <- HttpRequest$new()
+    expect_that(http$exec("FAKE VERB", "url"),
+               throws_error("Http verb is not valid."))
+})
+
+test_that("createQuery generates and encodes query portion of http", {
+
+    http <- HttpRequest$new()
+    queryParams <- list()
+    queryParams$filters <- list(list("color", "=", "red"))
+    queryParams$limit <- 20
+    queryParams$offset <- 50
+    expect_that(http$createQuery(queryParams),
+                equals(paste0("/?filters=%5B%5B%22color%22%2C%22%3D%22%2C%22red",
+                              "%22%5D%5D&limit=20&offset=50")))
+})
+
+test_that("createQuery generates and empty string when queryParams is an empty list", {
+
+    http <- HttpRequest$new()
+    expect_that(http$createQuery(list()), equals(""))
+})
+
+test_that("exec calls httr functions correctly", {
+    httrNamespace <- getNamespace("httr")
+
+    # Monkeypatch httr functions and assert that they are called later
+    add_headersCalled <- FALSE
+    unlockBinding("add_headers", httrNamespace)
+    newAddHeaders <- function(h)
+    {
+        add_headersCalled <<- TRUE
+        list()
+    }
+    httrNamespace$add_headers <- newAddHeaders
+    lockBinding("add_headers", httrNamespace)
+
+    expectedConfig <- list()
+    retryCalled <- FALSE
+    unlockBinding("RETRY", httrNamespace)
+    newRETRY <- function(verb, url, body, config, times)
+    {
+        retryCalled <<- TRUE
+        expectedConfig <<- config
+    }
+    httrNamespace$RETRY <- newRETRY
+    lockBinding("RETRY", httrNamespace)
+
+    Sys.setenv("ARVADOS_API_HOST_INSECURE" = TRUE)
+    http <- HttpRequest$new()
+    http$exec("GET", "url")
+
+    expect_that(add_headersCalled, is_true())
+    expect_that(retryCalled, is_true())
+    expect_that(expectedConfig$options, equals(list(ssl_verifypeer = 0L)))
+})
+
+test_that("getConnection calls curl functions correctly", {
+    curlNamespace <- getNamespace("curl")
+
+    # Monkeypatch curl functions and assert that they are called later
+    curlCalled <- FALSE
+    unlockBinding("curl", curlNamespace)
+    newCurl <- function(url, open, handle) curlCalled <<- TRUE
+    curlNamespace$curl <- newCurl
+    lockBinding("curl", curlNamespace)
+
+    new_handleCalled <- FALSE
+    unlockBinding("new_handle", curlNamespace)
+    newHandleFun <- function()
+    {
+        new_handleCalled <<- TRUE
+        list()
+    }
+    curlNamespace$new_handle <- newHandleFun
+    lockBinding("new_handle", curlNamespace)
+
+    handle_setheadersCalled <- FALSE
+    unlockBinding("handle_setheaders", curlNamespace)
+    newHandleSetHeaders <- function(h, .list) handle_setheadersCalled <<- TRUE
+    curlNamespace$handle_setheaders <- newHandleSetHeaders
+    lockBinding("handle_setheaders", curlNamespace)
+
+    handle_setoptCalled <- FALSE
+    unlockBinding("handle_setopt", curlNamespace)
+    newHandleSetOpt <- function(h, ssl_verifypeer) handle_setoptCalled <<- TRUE
+    curlNamespace$handle_setopt <- newHandleSetOpt
+    lockBinding("handle_setopt", curlNamespace)
+
+
+    Sys.setenv("ARVADOS_API_HOST_INSECURE" = TRUE)
+    http <- HttpRequest$new()
+    http$getConnection("location", list(), "r")
+
+    expect_that(new_handleCalled, is_true())
+    expect_that(handle_setheadersCalled, is_true())
+    expect_that(handle_setoptCalled, is_true())
+    expect_that(curlCalled, is_true())
+})
diff --git a/sdk/R/tests/testthat/test-RESTService.R b/sdk/R/tests/testthat/test-RESTService.R
new file mode 100644 (file)
index 0000000..64988e3
--- /dev/null
@@ -0,0 +1,426 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("fakes/FakeArvados.R")
+source("fakes/FakeHttpRequest.R")
+source("fakes/FakeHttpParser.R")
+
+context("REST service")
+
+test_that("getWebDavHostName calls REST service properly", {
+
+    expectedURL <- "https://host/discovery/v1/apis/arvados/v1/rest"
+    serverResponse <- list(keepWebServiceUrl = "https://myWebDavServer.com")
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host",
+                            httpRequest, FakeHttpParser$new())
+
+    REST$getWebDavHostName()
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$numberOfGETRequests, equals(1))
+})
+
+test_that("getWebDavHostName returns webDAV host name properly", {
+
+    serverResponse <- list(keepWebServiceUrl = "https://myWebDavServer.com")
+    httpRequest <- FakeHttpRequest$new(expectedURL = NULL, serverResponse)
+
+    REST <- RESTService$new("token", "host",
+                            httpRequest, FakeHttpParser$new())
+
+    expect_that("https://myWebDavServer.com", equals(REST$getWebDavHostName()))
+})
+
+test_that("create calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$create("file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$numberOfPUTRequests, equals(1))
+})
+
+test_that("create raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$create("file", uuid),
+                throws_error("Server code: 404"))
+})
+
+test_that("delete calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$delete("file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$numberOfDELETERequests, equals(1))
+})
+
+test_that("delete raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$delete("file", uuid),
+                throws_error("Server code: 404"))
+})
+
+test_that("move calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$move("file", "newDestination/file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsDestinationField, is_true())
+    expect_that(fakeHttp$numberOfMOVERequests, equals(1))
+})
+
+test_that("move raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$move("file", "newDestination/file", uuid),
+                throws_error("Server code: 404"))
+})
+
+test_that("copy calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$copy("file", "newDestination/file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsDestinationField, is_true())
+    expect_that(fakeHttp$numberOfCOPYRequests, equals(1))
+})
+
+test_that("copy raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$copy("file", "newDestination/file", uuid),
+                throws_error("Server code: 404"))
+})
+
+test_that("getCollectionContent retreives correct content from WebDAV server", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc"
+    returnContent <- list()
+    returnContent$status_code <- 200
+    returnContent$content <- c("animal", "animal/dog", "ball")
+
+    fakeHttp <- FakeHttpRequest$new(expectedURL, returnContent)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    returnResult <- REST$getCollectionContent(uuid)
+    returnedContentMatchExpected <- all.equal(returnResult,
+                                              c("animal", "animal/dog", "ball"))
+
+    expect_that(returnedContentMatchExpected, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+})
+
+test_that("getCollectionContent raises exception if server returns empty response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- ""
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getCollectionContent(uuid),
+                throws_error("Response is empty, request may be misconfigured"))
+})
+
+test_that("getCollectionContent parses server response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "https://host/",
+                            FakeHttpRequest$new(), fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$getCollectionContent(uuid)
+
+    expect_that(fakeHttpParser$parserCallCount, equals(1))
+})
+
+test_that("getCollectionContent raises exception if server returns empty response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- ""
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getCollectionContent(uuid),
+                throws_error("Response is empty, request may be misconfigured"))
+})
+
+test_that(paste("getCollectionContent raises exception if server",
+                "response code is not between 200 and 300"), {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getCollectionContent(uuid),
+                throws_error("Server code: 404"))
+})
+
+
+test_that("getResourceSize calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    response <- list()
+    response$status_code <- 200
+    response$content <- c(6, 2, 931, 12003)
+    fakeHttp <- FakeHttpRequest$new(expectedURL, response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    returnResult <- REST$getResourceSize("file", uuid)
+    returnedContentMatchExpected <- all.equal(returnResult,
+                                              c(6, 2, 931, 12003))
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(returnedContentMatchExpected, is_true())
+})
+
+test_that("getResourceSize raises exception if server returns empty response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- ""
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getResourceSize("file", uuid),
+                throws_error("Response is empty, request may be misconfigured"))
+})
+
+test_that(paste("getResourceSize raises exception if server",
+                "response code is not between 200 and 300"), {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getResourceSize("file", uuid),
+                throws_error("Server code: 404"))
+})
+
+test_that("getResourceSize parses server response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "https://host/",
+                            FakeHttpRequest$new(), fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$getResourceSize("file", uuid)
+
+    expect_that(fakeHttpParser$parserCallCount, equals(1))
+})
+
+test_that("read calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    serverResponse <- list()
+    serverResponse$status_code <- 200
+    serverResponse$content <- "file content"
+
+    fakeHttp <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    returnResult <- REST$read("file", uuid, "text", 1024, 512)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsRangeField, is_true())
+    expect_that(returnResult, equals("file content"))
+})
+
+test_that("read raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$read("file", uuid),
+                throws_error("Server code: 404"))
+})
+
+test_that("read raises exception if contentType is not valid", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttp <- FakeHttpRequest$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$read("file", uuid, "some invalid content type"),
+                throws_error("Invalid contentType. Please use text or raw."))
+})
+
+test_that("read parses server response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "https://host/",
+                            FakeHttpRequest$new(), fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$read("file", uuid, "text", 1024, 512)
+
+    expect_that(fakeHttpParser$parserCallCount, equals(1))
+})
+
+test_that("write calls REST service properly", {
+
+    fileContent <- "new file content"
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    REST$write("file", uuid, fileContent, "text/html")
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestBodyIsProvided, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsContentTypeField, is_true())
+})
+
+test_that("write raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fileContent <- "new file content"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$write("file", uuid, fileContent, "text/html"),
+                throws_error("Server code: 404"))
+})
+
+test_that("getConnection calls REST service properly", {
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttp <- FakeHttpRequest$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    REST$getConnection("file", uuid, "r")
+
+    expect_that(fakeHttp$numberOfgetConnectionCalls, equals(1))
+})
diff --git a/sdk/R/tests/testthat/test-Subcollection.R b/sdk/R/tests/testthat/test-Subcollection.R
new file mode 100644 (file)
index 0000000..a6e4209
--- /dev/null
@@ -0,0 +1,444 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source("fakes/FakeRESTService.R")
+
+context("Subcollection")
+
+test_that("getRelativePath returns path relative to the tree root", {
+
+    animal <- Subcollection$new("animal")
+
+    fish <- Subcollection$new("fish")
+    animal$add(fish)
+
+    expect_that(animal$getRelativePath(), equals("animal"))
+    expect_that(fish$getRelativePath(), equals("animal/fish"))
+})
+
+test_that(paste("getFileListing by default returns sorted path of all files",
+                "relative to the current subcollection"), {
+
+    animal   <- Subcollection$new("animal")
+    fish     <- Subcollection$new("fish")
+    shark    <- ArvadosFile$new("shark")
+    blueFish <- ArvadosFile$new("blueFish")
+
+    animal$add(fish)
+    fish$add(shark)
+    fish$add(blueFish)
+
+    result <- animal$getFileListing()
+
+    #expect sorted array
+    expectedResult <- c("animal/fish/blueFish", "animal/fish/shark")
+
+    resultsMatch <- length(expectedResult) == length(result) &&
+                    all(expectedResult == result)
+
+    expect_that(resultsMatch, is_true())
+})
+
+test_that(paste("getFileListing returns sorted names of all direct children",
+                "if fullPath is set to FALSE"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    shark  <- ArvadosFile$new("shark")
+    dog    <- ArvadosFile$new("dog")
+
+    animal$add(fish)
+    animal$add(dog)
+    fish$add(shark)
+
+    result <- animal$getFileListing(fullPath = FALSE)
+    expectedResult <- c("dog", "fish")
+
+    resultsMatch <- length(expectedResult) == length(result) &&
+                    all(expectedResult == result)
+
+    expect_that(resultsMatch, is_true())
+})
+
+test_that("add adds content to inside collection tree", {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    dog    <- ArvadosFile$new("dog")
+
+    animal$add(fish)
+    animal$add(dog)
+
+    animalContainsFish <- animal$get("fish")$getName() == fish$getName()
+    animalContainsDog  <- animal$get("dog")$getName()  == dog$getName()
+
+    expect_that(animalContainsFish, is_true())
+    expect_that(animalContainsDog, is_true())
+})
+
+test_that("add raises exception if content name is empty string", {
+
+    animal     <- Subcollection$new("animal")
+    rootFolder <- Subcollection$new("")
+
+    expect_that(animal$add(rootFolder),
+                throws_error("Content has invalid name.", fixed = TRUE))
+})
+
+test_that(paste("add raises exception if ArvadosFile/Subcollection",
+                "with same name already exists in the subcollection"), {
+
+    animal     <- Subcollection$new("animal")
+    fish       <- Subcollection$new("fish")
+    secondFish <- Subcollection$new("fish")
+    thirdFish  <- ArvadosFile$new("fish")
+
+    animal$add(fish)
+
+    expect_that(animal$add(secondFish),
+                throws_error(paste("Subcollection already contains ArvadosFile or",
+                                   "Subcollection with same name."), fixed = TRUE))
+    expect_that(animal$add(thirdFish),
+                throws_error(paste("Subcollection already contains ArvadosFile or",
+                                   "Subcollection with same name."), fixed = TRUE))
+})
+
+test_that(paste("add raises exception if passed argument is",
+                "not ArvadosFile or Subcollection"), {
+
+    animal <- Subcollection$new("animal")
+    number <- 10
+
+    expect_that(animal$add(number),
+                throws_error(paste("Expected AravodsFile or Subcollection object,",
+                                   "got (numeric)."), fixed = TRUE))
+})
+
+test_that(paste("add post content to a REST service",
+                "if subcollection belongs to a collection"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    animal <- collection$get("animal")
+    dog <- ArvadosFile$new("dog")
+
+    animal$add(dog)
+
+    expect_that(fakeREST$createCallCount, equals(1))
+})
+
+test_that("remove removes content from subcollection", {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+
+    animal$add(fish)
+    animal$remove("fish")
+
+    returnValueAfterRemovalIsNull <- is.null(animal$get("fish"))
+
+    expect_that(returnValueAfterRemovalIsNull, is_true())
+})
+
+test_that(paste("remove raises exception",
+                "if content to remove doesn't exist in the subcollection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$remove("fish"),
+                throws_error(paste("Subcollection doesn't contains ArvadosFile",
+                                   "or Subcollection with specified name.")))
+})
+
+test_that("remove raises exception if passed argument is not character vector", {
+
+    animal <- Subcollection$new("animal")
+    number <- 10
+
+    expect_that(animal$remove(number),
+                throws_error(paste("Expected character,",
+                                   "got (numeric)."), fixed = TRUE))
+})
+
+test_that(paste("remove removes content from REST service",
+                "if subcollection belongs to a collection"), {
+
+    collectionContent <- c("animal", "animal/fish", "animal/dog")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    animal <- collection$get("animal")
+
+    animal$remove("fish")
+
+    expect_that(fakeREST$deleteCallCount, equals(1))
+})
+
+test_that(paste("get returns ArvadosFile or Subcollection",
+                "if file or folder with given name exists"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    dog    <- ArvadosFile$new("dog")
+
+    animal$add(fish)
+    animal$add(dog)
+
+    returnedFish <- animal$get("fish")
+    returnedDog  <- animal$get("dog")
+
+    returnedFishIsSubcollection <- "Subcollection" %in% class(returnedFish)
+    returnedDogIsArvadosFile    <- "ArvadosFile"   %in% class(returnedDog)
+
+    expect_that(returnedFishIsSubcollection, is_true())
+    expect_that(returnedFish$getName(), equals("fish"))
+
+    expect_that(returnedDogIsArvadosFile, is_true())
+    expect_that(returnedDog$getName(), equals("dog"))
+})
+
+test_that(paste("get returns NULL if file or folder",
+                "with given name doesn't exists"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+
+    animal$add(fish)
+
+    returnedDogIsNull <- is.null(animal$get("dog"))
+
+    expect_that(returnedDogIsNull, is_true())
+})
+
+test_that("getFirst returns first child in the subcollection", {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+
+    animal$add(fish)
+
+    expect_that(animal$getFirst()$getName(), equals("fish"))
+})
+
+test_that("getFirst returns NULL if subcollection contains no children", {
+
+    animal <- Subcollection$new("animal")
+
+    returnedElementIsNull <- is.null(animal$getFirst())
+
+    expect_that(returnedElementIsNull, is_true())
+})
+
+test_that(paste("setCollection by default sets collection",
+                "filed of subcollection and all its children"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    animal$add(fish)
+
+    animal$setCollection("myCollection")
+
+    expect_that(animal$getCollection(), equals("myCollection"))
+    expect_that(fish$getCollection(), equals("myCollection"))
+})
+
+test_that(paste("setCollection sets collection filed of subcollection only",
+                "if parameter setRecursively is set to FALSE"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    animal$add(fish)
+
+    animal$setCollection("myCollection", setRecursively = FALSE)
+    fishCollectionIsNull <- is.null(fish$getCollection())
+
+    expect_that(animal$getCollection(), equals("myCollection"))
+    expect_that(fishCollectionIsNull, is_true())
+})
+
+test_that(paste("move raises exception if subcollection",
+                "doesn't belong to any collection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$move("new/location"),
+                throws_error("Subcollection doesn't belong to any collection"))
+})
+
+test_that("move raises exception if new location contains content with the same name", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$move("fish"),
+                throws_error("Destination already contains content with same name."))
+
+})
+
+test_that(paste("move raises exception if newLocationInCollection",
+                "parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$move("objects/dog"),
+                throws_error("Unable to get destination subcollection."))
+})
+
+test_that("move moves subcollection inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    fish$move("fish")
+    fishIsNullOnOldLocation <- is.null(collection$get("animal/fish"))
+    fishExistsOnNewLocation <- !is.null(collection$get("fish"))
+
+    expect_that(fishIsNullOnOldLocation, is_true())
+    expect_that(fishExistsOnNewLocation, is_true())
+})
+
+test_that(paste("getSizeInBytes returns zero if subcollection",
+                "is not part of a collection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$getSizeInBytes(), equals(0))
+})
+
+test_that(paste("getSizeInBytes delegates size calculation",
+                "to REST service class"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    returnSize <- 100
+    fakeREST <- FakeRESTService$new(collectionContent, returnSize)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    animal <- collection$get("animal")
+
+    resourceSize <- animal$getSizeInBytes()
+
+    expect_that(resourceSize, equals(100))
+})
+
+#########################
+test_that(paste("copy raises exception if subcollection",
+                "doesn't belong to any collection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$copy("new/location"),
+                throws_error("Subcollection doesn't belong to any collection."))
+})
+
+test_that("copy raises exception if new location contains content with the same name", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$copy("fish"),
+                throws_error("Destination already contains content with same name."))
+
+})
+
+test_that(paste("copy raises exception if location parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$copy("objects/dog"),
+                throws_error("Unable to get destination subcollection."))
+})
+
+test_that("copy copies subcollection inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    fish$copy("fish")
+    fishExistsOnOldLocation <- !is.null(collection$get("animal/fish"))
+    fishExistsOnNewLocation <- !is.null(collection$get("fish"))
+
+    expect_that(fishExistsOnOldLocation, is_true())
+    expect_that(fishExistsOnNewLocation, is_true())
+})
+
+test_that("duplicate performs deep cloning of Subcollection", {
+    foo <- ArvadosFile$new("foo")
+    bar <- ArvadosFile$new("bar")
+    sub <- Subcollection$new("qux")
+    sub$add(foo)
+    sub$add(bar)
+
+    newSub1 <- sub$duplicate()
+    newSub2 <- sub$duplicate("quux")
+
+    expect_that(newSub1$getFileListing(), equals(sub$getFileListing()))
+    expect_that(sort(newSub2$getFileListing()), equals(c("quux/bar", "quux/foo")))
+})
diff --git a/sdk/R/tests/testthat/test-util.R b/sdk/R/tests/testthat/test-util.R
new file mode 100644 (file)
index 0000000..419e878
--- /dev/null
@@ -0,0 +1,113 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+context("Utility function")
+
+test_that("listAll always returns all resource items from server", {
+
+    serverResponseLimit <- 3
+    itemsAvailable <- 8
+    items <- list("collection1", "collection2", "collection3", "collection4",
+                  "collection5", "collection6", "collection7", "collection8")
+
+    testFunction <- function(offset, ...)
+    {
+        response <- list()
+        response$items_available <- itemsAvailable
+
+        maxIndex <- offset + serverResponseLimit
+        lastElementIndex <- if(maxIndex < itemsAvailable) maxIndex else itemsAvailable
+
+        response$items <- items[(offset + 1):lastElementIndex]
+        response
+    }
+
+    result <- listAll(testFunction)
+
+    expect_that(length(result), equals(8))
+})
+
+test_that("trimFromStart trims string correctly if string starts with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "./something/"
+
+    result <- trimFromStart(sample, trimCharacters)
+
+    expect_that(result, equals("random"))
+})
+
+test_that("trimFromStart returns original string if string doesn't starts with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "./nothing/"
+
+    result <- trimFromStart(sample, trimCharacters)
+
+    expect_that(result, equals("./something/random"))
+})
+
+test_that("trimFromEnd trims string correctly if string ends with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "/random"
+
+    result <- trimFromEnd(sample, trimCharacters)
+
+    expect_that(result, equals("./something"))
+})
+
+test_that("trimFromEnd returns original string if string doesn't end with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "specific"
+
+    result <- trimFromStart(sample, trimCharacters)
+
+    expect_that(result, equals("./something/random"))
+})
+
+test_that("RListToPythonList converts nested R list to char representation of Python list", {
+
+    sample <- list("insert", list("random", list("text")), list("here"))
+
+    result              <- RListToPythonList(sample)
+    resultWithSeparator <- RListToPythonList(sample, separator = ",+")
+
+    expect_that(result, equals("[\"insert\", [\"random\", \"text\"], \"here\"]"))
+    expect_that(resultWithSeparator,
+                equals("[\"insert\",+[\"random\",+\"text\"],+\"here\"]"))
+})
+
+test_that("appendToStartIfNotExist appends characters to beginning of a string", {
+
+    sample <- "New Year"
+    charactersToAppend <- "Happy "
+
+    result <- appendToStartIfNotExist(sample, charactersToAppend)
+
+    expect_that(result, equals("Happy New Year"))
+})
+
+test_that(paste("appendToStartIfNotExist returns original string if string",
+                "doesn't start with specified characters"), {
+
+    sample <- "Happy New Year"
+    charactersToAppend <- "Happy"
+
+    result <- appendToStartIfNotExist(sample, charactersToAppend)
+
+    expect_that(result, equals("Happy New Year"))
+})
+
+test_that(paste("splitToPathAndName splits relative path to file/folder",
+                "name and rest of the path"), {
+
+    relativePath <- "path/to/my/file.exe"
+
+    result <- splitToPathAndName( relativePath)
+
+    expect_that(result$name, equals("file.exe"))
+    expect_that(result$path, equals("path/to/my"))
+})
diff --git a/sdk/cli/.gitignore b/sdk/cli/.gitignore
new file mode 100644 (file)
index 0000000..51463cf
--- /dev/null
@@ -0,0 +1,3 @@
+arvados-cli*gem
+tmp
+Gemfile.lock
diff --git a/sdk/cli/Gemfile b/sdk/cli/Gemfile
new file mode 100644 (file)
index 0000000..f34204e
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source 'https://rubygems.org'
+gemspec
+gem 'minitest', '>= 5.0.0'
+gem 'rake'
diff --git a/sdk/cli/LICENSE-2.0.txt b/sdk/cli/LICENSE-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/sdk/cli/Rakefile b/sdk/cli/Rakefile
new file mode 100644 (file)
index 0000000..d9aa7ed
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'rake/testtask'
+
+Rake::TestTask.new do |t|
+  t.libs << 'test'
+end
+
+desc 'Run tests'
+task default: :test
diff --git a/sdk/cli/arvados-cli.gemspec b/sdk/cli/arvados-cli.gemspec
new file mode 100644 (file)
index 0000000..c7e20e2
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+if not File.exist?('/usr/bin/git') then
+  STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
+  exit
+end
+
+git_latest_tag = `git tag -l |sort -V -r |head -n1`
+git_latest_tag = git_latest_tag.encode('utf-8').strip
+git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
+git_timestamp = Time.at(git_timestamp.to_i).utc
+
+Gem::Specification.new do |s|
+  s.name        = 'arvados-cli'
+  s.version     = "#{git_latest_tag}.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.date        = git_timestamp.strftime("%Y-%m-%d")
+  s.summary     = "Arvados CLI tools"
+  s.description = "Arvados command line tools, git commit #{git_hash}"
+  s.authors     = ["Arvados Authors"]
+  s.email       = 'gem-dev@curoverse.com'
+  #s.bindir      = '.'
+  s.licenses    = ['Apache-2.0']
+  s.files       = ["bin/arv", "bin/arv-run-pipeline-instance",
+                   "bin/arv-crunch-job", "bin/arv-tag", "bin/crunch-job",
+                   "LICENSE-2.0.txt"]
+  s.executables << "arv"
+  s.executables << "arv-run-pipeline-instance"
+  s.executables << "arv-crunch-job"
+  s.executables << "arv-tag"
+  s.required_ruby_version = '>= 2.1.0'
+  s.add_runtime_dependency 'arvados', '~> 1.3.0', '>= 1.3.0'
+  # Our google-api-client dependency used to be < 0.9, but that could be
+  # satisfied by the buggy 0.9.pre*.  https://dev.arvados.org/issues/9213
+  s.add_runtime_dependency 'cure-google-api-client', '~> 0.6', '>= 0.6.3', '<0.8.9'
+  s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 5'
+  s.add_runtime_dependency 'json', '>= 1.7.7', '<3'
+  s.add_runtime_dependency 'optimist', '~> 3.0'
+  s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
+  s.add_runtime_dependency 'oj', '~> 3.0'
+  s.add_runtime_dependency 'curb', '~> 0.8'
+  s.homepage    =
+    'https://arvados.org'
+end
diff --git a/sdk/cli/bin/arv b/sdk/cli/bin/arv
new file mode 100755 (executable)
index 0000000..7110b4b
--- /dev/null
@@ -0,0 +1,746 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Arvados cli client
+#
+# Ward Vandewege <ward@curoverse.com>
+
+require 'fileutils'
+require 'shellwords'
+
+if RUBY_VERSION < '1.9.3' then
+  abort <<-EOS
+#{$0.gsub(/^\.\//,'')} requires Ruby version 1.9.3 or higher.
+  EOS
+end
+
+begin
+  require 'json'
+  require 'net/http'
+  require 'pp'
+  require 'tempfile'
+  require 'yaml'
+rescue LoadError => error
+  abort "Error loading libraries: #{error}\n"
+end
+
+begin
+  require 'rubygems'
+  # Load the gems with more requirements first, so we respect any version
+  # constraints they put on gems loaded later.
+  require 'arvados/google_api_client'
+  require 'active_support/inflector'
+  require 'andand'
+  require 'curb'
+  require 'oj'
+  require 'optimist'
+rescue LoadError => error
+  abort <<-EOS
+
+Error loading gems: #{error}
+
+Please install all required gems:
+
+  gem install arvados activesupport andand curb json oj optimist
+
+  EOS
+end
+
+# Search for 'ENTRY POINT' to see where things get going
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.irregular 'specimen', 'specimens'
+  inflect.irregular 'human', 'humans'
+end
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+class ArvadosClient < Google::APIClient
+  def execute(*args)
+    if args.last.is_a? Hash
+      args.last[:headers] ||= {}
+      args.last[:headers]['Accept'] ||= 'application/json'
+    end
+    super(*args)
+  end
+end
+
+def init_config
+  # read authentication data from arvados configuration file if present
+  lineno = 0
+  config_file = File.expand_path('~/.config/arvados/settings.conf') rescue nil
+  if not config_file.nil? and File.exist? config_file then
+    File.open(config_file, 'r').each do |line|
+      lineno = lineno + 1
+      # skip comments
+      if line.match('^\s*#') then
+        next
+      end
+      var, val = line.chomp.split('=', 2)
+      # allow environment settings to override config files.
+      if var and val
+        ENV[var] ||= val
+      else
+        warn "#{config_file}: #{lineno}: could not parse `#{line}'"
+      end
+    end
+  end
+end
+
+
+subcommands = %w(copy create edit get keep pipeline run tag ws)
+
+def exec_bin bin, opts
+  bin_path = `which #{bin.shellescape}`.strip
+  if bin_path.empty?
+    raise "#{bin}: command not found"
+  end
+  exec bin_path, *opts
+end
+
+def check_subcommands client, arvados, subcommand, global_opts, remaining_opts
+  case subcommand
+  when 'create'
+    arv_create client, arvados, global_opts, remaining_opts
+  when 'edit'
+    arv_edit client, arvados, global_opts, remaining_opts
+  when 'get'
+    arv_get client, arvados, global_opts, remaining_opts
+  when 'copy', 'tag', 'ws', 'run'
+    exec_bin "arv-#{subcommand}", remaining_opts
+  when 'keep'
+    @sub = remaining_opts.shift
+    if ['get', 'put', 'ls', 'normalize'].index @sub then
+      # Native Arvados
+      exec_bin "arv-#{@sub}", remaining_opts
+    elsif @sub == 'docker'
+      exec_bin "arv-keepdocker", remaining_opts
+    else
+      puts "Usage: arv keep [method] [--parameters]\n"
+      puts "Use 'arv keep [method] --help' to get more information about specific methods.\n\n"
+      puts "Available methods: ls, get, put, docker"
+    end
+    abort
+  when 'pipeline'
+    sub = remaining_opts.shift
+    if sub == 'run'
+      exec_bin "arv-run-pipeline-instance", remaining_opts
+    else
+      puts "Usage: arv pipeline [method] [--parameters]\n"
+      puts "Use 'arv pipeline [method] --help' to get more information about specific methods.\n\n"
+      puts "Available methods: run"
+    end
+    abort
+  end
+end
+
+def command_exists?(command)
+  File.executable?(command) || ENV['PATH'].split(':').any? {|folder| File.executable?(File.join(folder, command))}
+end
+
+def run_editor path
+  pid = Process::fork
+  if pid.nil?
+    editor = nil
+    [ENV["VISUAL"], ENV["EDITOR"], "nano", "vi"].each do |e|
+      editor ||= e if e and command_exists? e
+    end
+    if editor.nil?
+      abort "Could not find any editor to use, please set $VISUAL or $EDITOR to your desired editor."
+    end
+    exec editor, path
+  else
+    Process.wait pid
+  end
+
+  if $?.exitstatus != 0
+    raise "Editor exited with status #{$?.exitstatus}"
+  end
+end
+
+def edit_and_commit_object initial_obj, tmp_stem, global_opts, &block
+
+  content = get_obj_content initial_obj, global_opts
+
+  tmp_file = Tempfile.new([tmp_stem, ".#{global_opts[:format]}"])
+  tmp_file.write(content)
+  tmp_file.close
+
+  begin
+    error_text = ''
+    while true
+      begin
+        run_editor tmp_file.path
+
+        tmp_file.open
+        newcontent = tmp_file.read()
+        tmp_file.close
+
+        # Strip lines starting with '#'
+        newcontent = newcontent.lines.select {|l| !l.start_with? '#'}.join
+
+        # Load the new object
+        newobj = case global_opts[:format]
+                 when 'json'
+                   Oj.load(newcontent)
+                 when 'yaml'
+                   YAML.load(newcontent)
+                 else
+                   abort "Unrecognized format #{global_opts[:format]}"
+                 end
+
+        yield newobj
+
+        break
+      rescue => e
+        can_retry = true
+        if e.is_a? Psych::SyntaxError
+          this_error = "YAML error parsing your input: #{e}"
+        elsif e.is_a? JSON::ParserError or e.is_a? Oj::ParseError
+          this_error = "JSON error parsing your input: #{e}"
+        elsif e.is_a? ArvadosAPIError
+          this_error = "API responded with error #{e}"
+        else
+          this_error = "#{e.class}: #{e}"
+          can_retry = false
+        end
+        puts this_error
+
+        tmp_file.open
+        newcontent = tmp_file.read()
+        tmp_file.close
+
+        if newcontent == error_text or not can_retry
+          FileUtils::cp tmp_file.path, tmp_file.path + ".saved"
+          puts "File is unchanged, edit aborted." if can_retry
+          abort "Saved contents to " + tmp_file.path + ".saved"
+        else
+          tmp_file.open
+          tmp_file.truncate 0
+          error_text = this_error.to_s.lines.map {|l| '# ' + l}.join + "\n"
+          error_text += "# Please fix the error and try again.\n"
+          error_text += newcontent.lines.select {|l| !l.start_with? '#'}.join
+          tmp_file.write error_text
+          tmp_file.close
+        end
+      end
+    end
+  ensure
+    tmp_file.close(true)
+  end
+
+  nil
+end
+
+class ArvadosAPIError < RuntimeError
+end
+
+def check_response result
+  begin
+    results = JSON.parse result.body
+  rescue JSON::ParserError, Oj::ParseError => e
+    raise "Failed to parse server response:\n" + e.to_s
+  end
+
+  if result.response.status != 200
+    raise ArvadosAPIError.new("#{result.response.status}: #{
+                              ((results['errors'] && results['errors'].join('\n')) ||
+                                Net::HTTPResponse::CODE_TO_OBJ[status.to_s].to_s.sub(/^Net::HTTP/, '').titleize)}")
+  end
+
+  results
+end
+
+def lookup_uuid_rsc arvados, uuid
+  m = /([a-z0-9]{5})-([a-z0-9]{5})-([a-z0-9]{15})/.match uuid
+  if !m
+    if /^[a-f0-9]{32}/.match uuid
+      abort "Arvados collections are not editable."
+    else
+      abort "'#{uuid}' does not appear to be an Arvados uuid"
+    end
+  end
+
+  rsc = nil
+  arvados.discovery_document["resources"].each do |k,v|
+    klass = k.singularize.camelize
+    dig = Digest::MD5.hexdigest(klass).to_i(16).to_s(36)[-5..-1]
+    if dig == m[2]
+      rsc = k
+    end
+  end
+
+  if rsc.nil?
+    abort "Could not determine resource type #{m[2]}"
+  end
+
+  return rsc
+end
+
+def fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts
+
+  begin
+    result = client.execute(:api_method => eval('arvados.' + rsc + '.get'),
+                            :parameters => {"uuid" => uuid},
+                            :authenticated => false,
+                            :headers => {
+                              authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                            })
+    obj = check_response result
+  rescue => e
+    abort "Server error: #{e}"
+  end
+
+  if remaining_opts.length > 0
+    obj.select! { |k, v| remaining_opts.include? k }
+  end
+
+  return obj
+end
+
+def get_obj_content obj, global_opts
+  content = case global_opts[:format]
+            when 'json'
+              Oj.dump(obj, :indent => 1)
+            when 'yaml'
+              obj.to_yaml
+            else
+              abort "Unrecognized format #{global_opts[:format]}"
+            end
+  return content
+end
+
+def arv_edit client, arvados, global_opts, remaining_opts
+  uuid = remaining_opts.shift
+  if uuid.nil? or uuid == "-h" or uuid == "--help"
+    puts head_banner
+    puts "Usage: arv edit [uuid] [fields...]\n\n"
+    puts "Fetch the specified Arvados object, select the specified fields, \n"
+    puts "open an interactive text editor on a text representation (json or\n"
+    puts "yaml, use --format) and then update the object.  Will use 'nano'\n"
+    puts "by default, customize with the EDITOR or VISUAL environment variable.\n"
+    exit 255
+  end
+
+  rsc = lookup_uuid_rsc arvados, uuid
+  oldobj = fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts
+
+  edit_and_commit_object oldobj, uuid, global_opts do |newobj|
+    newobj.select! {|k| newobj[k] != oldobj[k]}
+    if !newobj.empty?
+      result = client.execute(:api_method => eval('arvados.' + rsc + '.update'),
+                     :parameters => {"uuid" => uuid},
+                     :body_object => { rsc.singularize => newobj },
+                     :authenticated => false,
+                     :headers => {
+                       authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                     })
+      results = check_response result
+      STDERR.puts "Updated object #{results['uuid']}"
+    else
+      STDERR.puts "Object is unchanged, did not update."
+    end
+  end
+
+  exit 0
+end
+
+def arv_get client, arvados, global_opts, remaining_opts
+  uuid = remaining_opts.shift
+  if uuid.nil? or uuid == "-h" or uuid == "--help"
+    puts head_banner
+    puts "Usage: arv [--format json|yaml] get [uuid] [fields...]\n\n"
+    puts "Fetch the specified Arvados object, select the specified fields,\n"
+    puts "and print a text representation.\n"
+    exit 255
+  end
+
+  rsc = lookup_uuid_rsc arvados, uuid
+  obj = fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts
+  content = get_obj_content obj, global_opts
+
+  puts content
+  exit 0
+end
+
+def arv_create client, arvados, global_opts, remaining_opts
+  types = resource_types(arvados.discovery_document)
+  create_opts = Optimist::options do
+    opt :project_uuid, "Project uuid in which to create the object", :type => :string
+    stop_on resource_types(arvados.discovery_document)
+  end
+
+  object_type = remaining_opts.shift
+  if object_type.nil?
+    abort "Missing resource type, must be one of #{types.join ', '}"
+  end
+
+  rsc = arvados.discovery_document["resources"].keys.select { |k| object_type == k.singularize }
+  if rsc.empty?
+    abort "Could not determine resource type #{object_type}"
+  end
+  rsc = rsc.first
+
+  discovered_params = arvados.discovery_document["resources"][rsc]["methods"]["create"]["parameters"]
+  method_opts = Optimist::options do
+    banner head_banner
+    banner "Usage: arv create [--project-uuid] #{object_type} [create parameters]"
+    banner ""
+    banner "This method supports the following parameters:"
+    banner ""
+    discovered_params.each do |k,v|
+      opts = Hash.new()
+      opts[:type] = v["type"].to_sym if v.include?("type")
+      if [:datetime, :text, :object, :array].index opts[:type]
+        opts[:type] = :string                       # else optimist bork
+      end
+      opts[:default] = v["default"] if v.include?("default")
+      opts[:default] = v["default"].to_i if opts[:type] == :integer
+      opts[:default] = to_boolean(v["default"]) if opts[:type] == :boolean
+      opts[:required] = true if v.include?("required") and v["required"]
+      description = ''
+      description = '  ' + v["description"] if v.include?("description")
+      opt k.to_sym, description, opts
+    end
+  end
+
+  initial_obj = {}
+  if create_opts[:project_uuid]
+    initial_obj["owner_uuid"] = create_opts[:project_uuid]
+  end
+
+  edit_and_commit_object initial_obj, "", global_opts do |newobj|
+    result = client.execute(:api_method => eval('arvados.' + rsc + '.create'),
+                   :parameters => method_opts,
+                   :body_object => {object_type => newobj},
+                   :authenticated => false,
+                   :headers => {
+                     authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                   })
+    results = check_response result
+    puts "Created object #{results['uuid']}"
+  end
+
+  exit 0
+end
+
+def to_boolean(s)
+  !!(s =~ /^(true|t|yes|y|1)$/i)
+end
+
+def head_banner
+  "Arvados command line client\n"
+end
+
+def help_methods(discovery_document, resource, method=nil)
+  banner = head_banner
+  banner += "Usage: arv #{resource} [method] [--parameters]\n"
+  banner += "Use 'arv #{resource} [method] --help' to get more information about specific methods.\n\n"
+  banner += "The #{resource} resource supports the following methods:"
+  banner += "\n\n"
+  discovery_document["resources"][resource.pluralize]["methods"].
+    each do |k,v|
+    description = ''
+    if v.include? "description"
+      # add only the first line of the discovery doc description
+      description = '  ' + v["description"].split("\n").first.chomp
+    end
+    banner += "   #{sprintf("%20s",k)}#{description}\n"
+  end
+  banner += "\n"
+  STDERR.puts banner
+
+  if not method.nil? and method != '--help' and method != '-h' then
+    abort "Unknown method #{method.inspect} " +
+                  "for resource #{resource.inspect}"
+  end
+  exit 255
+end
+
+def help_resources(option_parser, discovery_document, resource)
+  option_parser.educate
+  exit 255
+end
+
+def resource_types discovery_document
+  resource_types = Array.new()
+  discovery_document["resources"].each do |k,v|
+    resource_types << k.singularize
+  end
+  resource_types
+end
+
+def parse_arguments(discovery_document, subcommands)
+  resources_and_subcommands = resource_types(discovery_document) + subcommands
+
+  option_parser = Optimist::Parser.new do
+    version __FILE__
+    banner head_banner
+    banner "Usage: arv [--flags] subcommand|resource [method] [--parameters]"
+    banner ""
+    banner "Available flags:"
+
+    opt :dry_run, "Don't actually do anything", :short => "-n"
+    opt :verbose, "Print some things on stderr"
+    opt :format,
+        "Set the output format. Must be one of json (default), yaml or uuid.",
+        :type => :string,
+        :default => 'json'
+    opt :short, "Return only UUIDs (equivalent to --format=uuid)"
+
+    banner ""
+    banner "Use 'arv subcommand|resource --help' to get more information about a particular command or resource."
+    banner ""
+    banner "Available subcommands: #{subcommands.join(', ')}"
+    banner ""
+
+    banner "Available resources: #{discovery_document['resources'].keys.map { |k| k.singularize }.join(', ')}"
+
+    banner ""
+    banner "Additional options:"
+
+    conflicts :short, :format
+    stop_on resources_and_subcommands
+  end
+
+  global_opts = Optimist::with_standard_exception_handling option_parser do
+    o = option_parser.parse ARGV
+  end
+
+  unless %w(json yaml uuid).include?(global_opts[:format])
+    $stderr.puts "#{$0}: --format must be one of json, yaml or uuid."
+    $stderr.puts "Use #{$0} --help for more information."
+    abort
+  end
+
+  if global_opts[:short]
+    global_opts[:format] = 'uuid'
+  end
+
+  resource = ARGV.shift
+
+  if not subcommands.include? resource
+    if not resources_and_subcommands.include?(resource)
+      puts "Resource or subcommand '#{resource}' is not recognized.\n\n" if !resource.nil?
+      help_resources(option_parser, discovery_document, resource)
+    end
+
+    method = ARGV.shift
+    if not (discovery_document["resources"][resource.pluralize]["methods"].
+            include?(method))
+      help_methods(discovery_document, resource, method)
+    end
+
+    discovered_params = discovery_document\
+    ["resources"][resource.pluralize]\
+    ["methods"][method]["parameters"]
+    method_opts = Optimist::options do
+      banner head_banner
+      banner "Usage: arv #{resource} #{method} [--parameters]"
+      banner ""
+      banner "This method supports the following parameters:"
+      banner ""
+      discovered_params.each do |k,v|
+        opts = Hash.new()
+        opts[:type] = v["type"].to_sym if v.include?("type")
+        if [:datetime, :text, :object, :array].index opts[:type]
+          opts[:type] = :string                       # else optimist bork
+        end
+        opts[:default] = v["default"] if v.include?("default")
+        opts[:default] = v["default"].to_i if opts[:type] == :integer
+        opts[:default] = to_boolean(v["default"]) if opts[:type] == :boolean
+        opts[:required] = true if v.include?("required") and v["required"]
+        description = ''
+        description = '  ' + v["description"] if v.include?("description")
+        opt k.to_sym, description, opts
+      end
+
+      body_object = discovery_document["resources"][resource.pluralize]["methods"][method]["request"]
+      if body_object and discovered_params[resource].nil?
+        is_required = true
+        if body_object["required"] == false
+          is_required = false
+        end
+        resource_opt_desc = "Either a string representing #{resource} as JSON or a filename from which to read #{resource} JSON (use '-' to read from stdin)."
+        if is_required
+          resource_opt_desc += " This option must be specified."
+        end
+        opt resource.to_sym, resource_opt_desc, {
+          required: is_required,
+          type: :string
+        }
+      end
+    end
+
+    discovered_params.merge({resource => {'type' => 'object'}}).each do |k,v|
+      k = k.to_sym
+      if ['object', 'array'].index(v["type"]) and method_opts.has_key? k
+        if method_opts[k].andand.match /^\//
+          method_opts[k] = File.open method_opts[k], 'rb' do |f| f.read end
+        end
+      end
+    end
+  end
+
+  return resource, method, method_opts, global_opts, ARGV
+end
+
+#
+# ENTRY POINT
+#
+
+init_config
+
+ENV['ARVADOS_API_VERSION'] ||= 'v1'
+
+if not ENV.include?('ARVADOS_API_HOST') or not ENV.include?('ARVADOS_API_TOKEN') then
+  abort <<-EOS
+ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables.
+  EOS
+end
+
+# do this if you're testing with a dev server and you don't care about SSL certificate checks:
+if ENV['ARVADOS_API_HOST_INSECURE']
+  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+begin
+  client = ArvadosClient.new(:host => ENV['ARVADOS_API_HOST'], :application_name => 'arvados-cli', :application_version => '1.0')
+  arvados = client.discovered_api('arvados', ENV['ARVADOS_API_VERSION'])
+rescue Exception => e
+  puts "Failed to connect to Arvados API server: #{e}"
+  exit 1
+end
+
+# Parse arguments here
+resource_schema, method, method_opts, global_opts, remaining_opts = parse_arguments(arvados.discovery_document, subcommands)
+
+check_subcommands client, arvados, resource_schema, global_opts, remaining_opts
+
+controller = resource_schema.pluralize
+
+api_method = 'arvados.' + controller + '.' + method
+
+if global_opts[:dry_run]
+  if global_opts[:verbose]
+    $stderr.puts "#{api_method} #{method_opts.inspect}"
+  end
+  exit
+end
+
+request_parameters = {_profile:true}.merge(method_opts)
+resource_body = request_parameters.delete(resource_schema.to_sym)
+if resource_body
+  # check if resource_body is valid JSON by attempting to parse it
+  resource_body_is_json = true
+  begin
+    # we don't actually need the results of the parsing,
+    # just checking for the JSON::ParserError exception
+    JSON.parse resource_body
+  rescue JSON::ParserError => e
+    resource_body_is_json = false
+  end
+  resource_body_is_readable_file = false
+  # if resource_body is not valid JSON, it should be a filename (or '-' for stdin)
+  if resource_body == '-'
+    resource_body_is_readable_file = true
+    resource_body_file = $stdin
+  elsif File.readable? resource_body
+      resource_body_is_readable_file = true
+      resource_body_file = File.open(resource_body, 'r')
+  end
+  if resource_body_is_json and resource_body_is_readable_file
+    abort "Argument specified for option '--#{resource_schema.to_sym}' is both valid JSON and a readable file. Please consider renaming the file: '#{resource_body}'"
+  elsif !resource_body_is_json and !resource_body_is_readable_file
+    if File.exists? resource_body
+      # specified file exists but is not readable
+      abort "Argument specified for option '--#{resource_schema.to_sym}' is an existing file but is not readable. Please check permissions on: '#{resource_body}'"
+    else
+      # specified file does not exist
+      abort "Argument specified for option '--#{resource_schema.to_sym}' is neither valid JSON nor an existing file: '#{resource_body}'"
+    end
+  elsif resource_body_is_readable_file
+    resource_body = resource_body_file.read()
+    begin
+      # we don't actually need the results of the parsing,
+      # just checking for the JSON::ParserError exception
+      JSON.parse resource_body
+    rescue JSON::ParserError => e
+      abort "Contents of file '#{resource_body_file.path}' is not valid JSON: #{e}"
+    end
+    resource_body_file.close()
+  end
+  request_body = {
+    resource_schema => resource_body
+  }
+else
+  request_body = nil
+end
+
+case api_method
+when
+  'arvados.jobs.log_tail_follow'
+
+  # Special case for methods that respond with data streams rather
+  # than JSON (TODO: use the discovery document instead of a static
+  # list of methods)
+  uri_s = eval(api_method).generate_uri(request_parameters)
+  Curl::Easy.perform(uri_s) do |curl|
+    curl.headers['Accept'] = 'text/plain'
+    curl.headers['Authorization'] = "OAuth2 #{ENV['ARVADOS_API_TOKEN']}"
+    if ENV['ARVADOS_API_HOST_INSECURE']
+      curl.ssl_verify_peer = false
+      curl.ssl_verify_host = false
+    end
+    if global_opts[:verbose]
+      curl.on_header { |data| $stderr.write data }
+    end
+    curl.on_body { |data| $stdout.write data }
+  end
+  exit 0
+else
+  result = client.execute(:api_method => eval(api_method),
+                          :parameters => request_parameters,
+                          :body_object => request_body,
+                          :authenticated => false,
+                          :headers => {
+                            authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                          })
+end
+
+begin
+  results = JSON.parse result.body
+rescue JSON::ParserError => e
+  abort "Failed to parse server response:\n" + e.to_s
+end
+
+if results["errors"] then
+  abort "Error: #{results["errors"][0]}"
+end
+
+case global_opts[:format]
+when 'json'
+  puts Oj.dump(results, :indent => 1)
+when 'yaml'
+  puts results.to_yaml
+else
+  if results["items"] and results["kind"].match /list$/i
+    results['items'].each do |i| puts i['uuid'] end
+  elsif results['uuid'].nil?
+    abort("Response did not include a uuid:\n" +
+          Oj.dump(results, :indent => 1) +
+          "\n")
+  else
+    puts results['uuid']
+  end
+end
diff --git a/sdk/cli/bin/arv-copy b/sdk/cli/bin/arv-copy
new file mode 120000 (symlink)
index 0000000..1ad64f4
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-copy
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-crunch-job b/sdk/cli/bin/arv-crunch-job
new file mode 100755 (executable)
index 0000000..6e4b5e0
--- /dev/null
@@ -0,0 +1,6 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+exec File.join(File.dirname(File.realpath(__FILE__)), 'crunch-job'), *ARGV
diff --git a/sdk/cli/bin/arv-get b/sdk/cli/bin/arv-get
new file mode 120000 (symlink)
index 0000000..bfd8274
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-get
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-keepdocker b/sdk/cli/bin/arv-keepdocker
new file mode 120000 (symlink)
index 0000000..f35d645
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-keepdocker
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-ls b/sdk/cli/bin/arv-ls
new file mode 120000 (symlink)
index 0000000..64613d9
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-ls
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-mount b/sdk/cli/bin/arv-mount
new file mode 120000 (symlink)
index 0000000..7ad787e
--- /dev/null
@@ -0,0 +1 @@
+../../../services/fuse/bin/arv-mount
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-normalize b/sdk/cli/bin/arv-normalize
new file mode 120000 (symlink)
index 0000000..beee344
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-normalize
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-put b/sdk/cli/bin/arv-put
new file mode 120000 (symlink)
index 0000000..487caf4
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-put
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-run-pipeline-instance b/sdk/cli/bin/arv-run-pipeline-instance
new file mode 100755 (executable)
index 0000000..336b1a2
--- /dev/null
@@ -0,0 +1,781 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class WhRunPipelineInstance
+end
+
+if RUBY_VERSION < '1.9.3' then
+  abort <<-EOS
+#{$0.gsub(/^\.\//,'')} requires Ruby version 1.9.3 or higher.
+  EOS
+end
+
+begin
+  require 'arvados'
+  require 'rubygems'
+  require 'json'
+  require 'pp'
+  require 'optimist'
+  require 'google/api_client'
+rescue LoadError => l
+  $stderr.puts $:
+  abort <<-EOS
+#{$0}: fatal: #{l.message}
+Some runtime dependencies may be missing.
+Try: gem install arvados pp google-api-client json optimist
+  EOS
+end
+
+def debuglog(message, verbosity=1)
+  $stderr.puts "#{File.split($0).last} #{$$}: #{message}" if $debuglevel >= verbosity
+end
+
+# Parse command line options (the kind that control the behavior of
+# this program, that is, not the pipeline component parameters).
+
+p = Optimist::Parser.new do
+  version __FILE__
+  banner(<<EOF)
+
+Usage:
+  arv-run-pipeline-instance --template TEMPLATE_UUID [options] [--] [parameters]
+  arv-run-pipeline-instance --instance INSTANCE_UUID [options] [--] [parameters]
+
+Parameters:
+  param_name=param_value
+  param_name param_value
+                         Set (or override) the default value for every
+                         pipeline component parameter with the given
+                         name.
+
+  component_name::param_name=param_value
+  component_name::param_name param_value
+  --component_name::param_name=param_value
+  --component_name::param_name param_value
+                         Set the value of a parameter for a single
+                         pipeline component.
+
+Options:
+EOF
+  opt(:dry_run,
+      "Do not start any new jobs or wait for existing jobs to finish. Just find out whether jobs are finished, queued, or running for each component.",
+      :type => :boolean,
+      :short => :n)
+  opt(:status_text,
+      "Store plain text status in given file.",
+      :short => :none,
+      :type => :string,
+      :default => '/dev/stdout')
+  opt(:status_json,
+      "Store json-formatted pipeline in given file.",
+      :short => :none,
+      :type => :string,
+      :default => '/dev/null')
+  opt(:no_wait,
+      "Do not wait for jobs to finish. Just look up status, submit new jobs if needed, and exit.",
+      :short => :none,
+      :type => :boolean)
+  opt(:no_reuse,
+      "Do not reuse existing jobs to satisfy pipeline components. Submit a new job for every component.",
+      :short => :none,
+      :type => :boolean)
+  opt(:debug,
+      "Print extra debugging information on stderr.",
+      :type => :boolean)
+  opt(:debug_level,
+      "Set debug verbosity level.",
+      :short => :none,
+      :type => :integer)
+  opt(:template,
+      "UUID of pipeline template, or path to local pipeline template file.",
+      :short => :none,
+      :type => :string)
+  opt(:instance,
+      "UUID of pipeline instance.",
+      :short => :none,
+      :type => :string)
+  opt(:submit,
+      "Submit the pipeline instance to the server, and exit. Let the Crunch dispatch service satisfy the components by finding/running jobs.",
+      :short => :none,
+      :type => :boolean)
+  opt(:run_pipeline_here,
+      "Manage the pipeline instance in-process. Submit jobs to Crunch as needed. Do not exit until the pipeline finishes (or fails).",
+      :short => :none,
+      :type => :boolean)
+  opt(:run_jobs_here,
+      "Run jobs in the local terminal session instead of submitting them to Crunch. Implies --run-pipeline-here. Note: this results in a significantly different job execution environment, and some Crunch features are not supported. It can be necessary to modify a pipeline in order to make it run this way.",
+      :short => :none,
+      :type => :boolean)
+  opt(:run_here,
+      "Synonym for --run-jobs-here.",
+      :short => :none,
+      :type => :boolean)
+  opt(:description,
+      "Description for the pipeline instance.",
+      :short => :none,
+      :type => :string)
+  opt(:project_uuid,
+      "UUID of the project for the pipeline instance.",
+      short: :none,
+      type: :string)
+  stop_on [:'--']
+end
+$options = Optimist::with_standard_exception_handling p do
+  p.parse ARGV
+end
+$debuglevel = $options[:debug_level] || ($options[:debug] && 1) || 0
+
+$options[:run_jobs_here] ||= $options[:run_here] # old flag name
+$options[:run_pipeline_here] ||= $options[:run_jobs_here] # B requires A
+
+if $options[:instance]
+  if $options[:template] or $options[:submit]
+    abort "#{$0}: syntax error: --instance cannot be combined with --template or --submit."
+  end
+elsif not $options[:template]
+  $stderr.puts "error: you must supply a --template or --instance."
+  p.educate
+  abort
+end
+
+if $options[:run_pipeline_here] == $options[:submit]
+  abort "#{$0}: error: you must supply --run-pipeline-here, --run-jobs-here, or --submit."
+end
+
+# Set up the API client.
+
+$arv = Arvados.new api_version: 'v1'
+$client = $arv.client
+$arvados = $arv.arvados_api
+
+class PipelineInstance
+  def self.find(uuid)
+    result = $client.execute(:api_method => $arvados.pipeline_instances.get,
+                             :parameters => {
+                               :uuid => uuid
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    unless j.is_a? Hash and j[:uuid]
+      debuglog "Failed to get pipeline_instance: #{j[:errors] rescue nil}", 0
+      nil
+    else
+      debuglog "Retrieved pipeline_instance #{j[:uuid]}"
+      self.new(j)
+    end
+  end
+  def self.create(attributes)
+    result = $client.execute(:api_method => $arvados.pipeline_instances.create,
+                             :body_object => {
+                               :pipeline_instance => attributes
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    unless j.is_a? Hash and j[:uuid]
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nFailed to create pipeline_instance: #{j[:errors] rescue nil} #{j.inspect}"
+    end
+    debuglog "Created pipeline instance: #{j[:uuid]}"
+    self.new(j)
+  end
+  def save
+    result = $client.execute(:api_method => $arvados.pipeline_instances.update,
+                             :parameters => {
+                               :uuid => @pi[:uuid]
+                             },
+                             :body_object => {
+                               :pipeline_instance => @attributes_to_update
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    unless j.is_a? Hash and j[:uuid]
+      debuglog "Failed to save pipeline_instance: #{j[:errors] rescue nil}", 0
+      nil
+    else
+      @attributes_to_update = {}
+      @pi = j
+    end
+  end
+  def []=(x,y)
+    @attributes_to_update[x] = y
+    @pi[x] = y
+  end
+  def [](x)
+    @pi[x]
+  end
+
+  def log_stderr(msg)
+    $arv.log.create log: {
+      event_type: 'stderr',
+      object_uuid: self[:uuid],
+      owner_uuid: self[:owner_uuid],
+      properties: {"text" => msg},
+    }
+  end
+
+  protected
+  def initialize(j)
+    @attributes_to_update = {}
+    @pi = j
+  end
+end
+
+class JobCache
+  def self.get(uuid)
+    @cache ||= {}
+    result = $client.execute(:api_method => $arvados.jobs.get,
+                             :parameters => {
+                               :uuid => uuid
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+                             })
+    @cache[uuid] = JSON.parse result.body, :symbolize_names => true
+  end
+  def self.where(conditions)
+    result = $client.execute(:api_method => $arvados.jobs.list,
+                             :parameters => {
+                               :limit => 10000,
+                               :where => conditions.to_json
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+                             })
+    list = JSON.parse result.body, :symbolize_names => true
+    if list and list[:items].is_a? Array
+      list[:items]
+    else
+      []
+    end
+  end
+
+  # create() returns [job, exception]. If both job and exception are
+  # nil, there was a non-retryable error and the call should not be
+  # attempted again.
+  def self.create(pipeline, component, job, create_params)
+    @cache ||= {}
+
+    body = {job: no_nil_values(job)}.merge(no_nil_values(create_params))
+
+    result = nil
+    begin
+      result = $client.execute(
+        :api_method => $arvados.jobs.create,
+        :body_object => body,
+        :authenticated => false,
+        :headers => {
+          authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+        })
+      if result.status == 429 || result.status >= 500
+        raise Exception.new("HTTP status #{result.status}")
+      end
+    rescue Exception => e
+      return nil, e
+    end
+    j = JSON.parse(result.body, :symbolize_names => true) rescue nil
+    if result.status == 200 && j.is_a?(Hash) && j[:uuid]
+      @cache[j[:uuid]] = j
+      return j, nil
+    else
+      errors = j[:errors] rescue []
+      debuglog "create job: [#{result.status}] #{errors.inspect} with attributes #{body}", 0
+
+      msg = ""
+      errors.each do |err|
+        msg += "Error creating job for component #{component}: #{err}\n"
+      end
+      msg += "Job submission was: #{body.to_json}"
+
+      pipeline.log_stderr(msg)
+      return nil, nil
+    end
+  end
+
+  protected
+
+  def self.no_nil_values(hash)
+    hash.reject { |key, value| value.nil? }
+  end
+end
+
+class WhRunPipelineInstance
+  attr_reader :instance
+
+  def initialize(_options)
+    @options = _options
+  end
+
+  def fetch_template(template)
+    if template.match /[^-0-9a-z]/
+      # Doesn't look like a uuid -- use it as a filename.
+      @template = JSON.parse File.read(template), :symbolize_names => true
+    else
+      result = $client.execute(:api_method => $arvados.pipeline_templates.get,
+                               :parameters => {
+                                 :uuid => template
+                               },
+                               :authenticated => false,
+                               :headers => {
+                                 authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+                               })
+      @template = JSON.parse result.body, :symbolize_names => true
+      if !@template[:uuid]
+        abort "#{$0}: fatal: failed to retrieve pipeline template #{template} #{@template[:errors].inspect rescue nil}"
+      end
+    end
+    self
+  end
+
+  def fetch_instance(instance_uuid)
+    @instance = PipelineInstance.find(instance_uuid)
+    @template = @instance
+    self
+  end
+
+  def apply_parameters(params_args)
+    params_args.shift if params_args[0] == '--'
+    params = {}
+    while !params_args.empty?
+      if (re = params_args[0].match /^(--)?([^-].*?)=(.+)/)
+        params[re[2]] = re[3]
+        params_args.shift
+      elsif params_args.size > 1
+        param = params_args.shift.sub /^--/, ''
+        params[param] = params_args.shift
+      else
+        abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: I do not know what to do with arg \"#{params_args[0]}\""
+      end
+    end
+
+    if not @template[:components].is_a?(Hash)
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: Template missing \"components\" hash"
+    end
+    @components = @template[:components].dup
+
+    bad_components = @components.each_pair.select do |cname, cspec|
+      not cspec.is_a?(Hash)
+    end
+    if bad_components.any?
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: Components not specified with hashes: #{bad_components.map(&:first).join(', ')}"
+    end
+
+    bad_components = @components.each_pair.select do |cname, cspec|
+      not cspec[:script_parameters].is_a?(Hash)
+    end
+    if bad_components.any?
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: Components missing \"script_parameters\" hashes: #{bad_components.map(&:first).join(', ')}"
+    end
+
+    errors = []
+    @components.each do |componentname, component|
+      component[:script_parameters].each do |parametername, parameter|
+        parameter = { :value => parameter } unless parameter.is_a? Hash
+        if params.has_key?("#{componentname}::#{parametername}")
+          value = params["#{componentname}::#{parametername}"]
+        elsif parameter.has_key?(:value)
+          value = parameter[:value]
+        elsif parameter.has_key?(:output_of)
+          if !@components[parameter[:output_of].intern]
+            errors << [componentname, parametername, "output_of refers to nonexistent component '#{parameter[:output_of]}'"]
+          else
+            # value will be filled in later when the upstream
+            # component's output becomes known
+          end
+          next
+        elsif params.has_key?(parametername.to_s)
+          value = params[parametername.to_s]
+        elsif parameter.has_key?(:default)
+          value = parameter[:default]
+        elsif [false, 'false', 0, '0'].index(parameter[:required])
+          value = nil
+        else
+          errors << [componentname, parametername, "required parameter is missing"]
+          next
+        end
+        debuglog "parameter #{componentname}::#{parametername} == #{value}"
+
+        component[:script_parameters][parametername] =
+          parameter.dup.merge(value: value)
+      end
+    end
+    if !errors.empty?
+      all_errors = errors.collect do |c,p,e|
+        "#{c}::#{p} - #{e}\n"
+      end.join("")
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nErrors:\n#{all_errors}"
+    end
+    debuglog "options=" + @options.pretty_inspect
+    self
+  end
+
+  def setup_instance
+    if @instance
+      @instance[:properties][:run_options] ||= {}
+      if @options[:no_reuse]
+        # override properties of existing instance
+        @instance[:properties][:run_options][:enable_job_reuse] = false
+      else
+        # Default to "enable reuse" if not specified. (This code path
+        # can go away when old clients go away.)
+        if @instance[:properties][:run_options][:enable_job_reuse].nil?
+          @instance[:properties][:run_options][:enable_job_reuse] = true
+        end
+      end
+    else
+      description = $options[:description] ||
+                    ("Created at #{Time.now.localtime}" + (@template[:name].andand.size.andand>0 ? " using the pipeline template *#{@template[:name]}*" : ""))
+      instance_body = {
+        components: @components,
+        properties: {
+          run_options: {
+            enable_job_reuse: !@options[:no_reuse]
+          }
+        },
+        pipeline_template_uuid: @template[:uuid],
+        description: description,
+        state: ($options[:submit] ? 'RunningOnServer' : 'RunningOnClient')
+      }
+      if @options[:project_uuid]
+        instance_body[:owner_uuid] = @options[:project_uuid]
+      end
+      @instance = PipelineInstance.create(instance_body)
+    end
+    self
+  end
+
+  def run
+    moretodo = true
+    interrupted = false
+
+    if @instance[:started_at].nil?
+      @instance[:started_at] = Time.now
+    end
+
+    job_creation_failed = 0
+    while moretodo
+      moretodo = false
+      @components.each do |cname, c|
+        job = nil
+        owner_uuid = @instance[:owner_uuid]
+        # Is the job satisfying this component already known to be
+        # finished? (Already meaning "before we query API server about
+        # the job's current state")
+        c_already_finished = (c[:job] &&
+                              c[:job][:uuid] &&
+                              ["Complete", "Failed", "Cancelled"].include?(c[:job][:state]))
+        if !c[:job] and
+            c[:script_parameters].select { |pname, p| p.is_a? Hash and p[:output_of]}.empty?
+          # No job yet associated with this component and is component inputs
+          # are fully specified (any output_of script_parameters are resolved
+          # to real value)
+          my_submit_id = "instance #{@instance[:uuid]} rand #{rand(2**64).to_s(36)}"
+          job, err = JobCache.create(@instance, cname, {
+            :script => c[:script],
+            :script_parameters => Hash[c[:script_parameters].map do |key, spec|
+                                         [key, spec[:value]]
+                                       end],
+            :script_version => c[:script_version],
+            :repository => c[:repository],
+            :nondeterministic => c[:nondeterministic],
+            :runtime_constraints => c[:runtime_constraints],
+            :owner_uuid => owner_uuid,
+            :is_locked_by_uuid => (@options[:run_jobs_here] ? owner_uuid : nil),
+            :submit_id => my_submit_id,
+            :state => (if @options[:run_jobs_here] then "Running" else "Queued" end)
+          }, {
+            # This is the right place to put these attributes when
+            # dealing with new API servers.
+            :minimum_script_version => c[:minimum_script_version],
+            :exclude_script_versions => c[:exclude_minimum_script_versions],
+            :find_or_create => (@instance[:properties][:run_options].andand[:enable_job_reuse] &&
+                                !c[:nondeterministic]),
+            :filters => c[:filters]
+          })
+          if job
+            debuglog "component #{cname} new job #{job[:uuid]}"
+            c[:job] = job
+            c[:run_in_process] = (@options[:run_jobs_here] and
+                                  job[:submit_id] == my_submit_id)
+          elsif err.nil?
+            debuglog "component #{cname} new job failed", 0
+            job_creation_failed += 1
+          else
+            debuglog "component #{cname} new job failed, err=#{err}", 0
+          end
+        end
+
+        if c[:job] and c[:run_in_process] and not ["Complete", "Failed", "Cancelled"].include? c[:job][:state]
+          report_status
+          begin
+            require 'open3'
+            Open3.popen3("arv-crunch-job", "--force-unlock",
+                         "--job", c[:job][:uuid]) do |stdin, stdout, stderr, wait_thr|
+              debuglog "arv-crunch-job pid #{wait_thr.pid} started", 0
+              stdin.close
+              while true
+                rready, wready, = IO.select([stdout, stderr], [])
+                break if !rready[0]
+                begin
+                  buf = rready[0].read_nonblock(2**20)
+                rescue EOFError
+                  break
+                end
+                (rready[0] == stdout ? $stdout : $stderr).write(buf)
+              end
+              stdout.close
+              stderr.close
+              debuglog "arv-crunch-job pid #{wait_thr.pid} exit #{wait_thr.value.to_i}", 0
+            end
+            if not $arv.job.get(uuid: c[:job][:uuid])[:finished_at]
+              raise Exception.new("arv-crunch-job did not set finished_at.")
+            end
+          rescue Exception => e
+            debuglog "Interrupted (#{e}). Failing job.", 0
+            $arv.job.update(uuid: c[:job][:uuid],
+                            job: {
+                              state: "Failed"
+                            })
+          end
+        end
+
+        if c[:job] and c[:job][:uuid]
+          if ["Running", "Queued"].include?(c[:job][:state])
+            # Job is running (or may be soon) so update copy of job record
+            c[:job] = JobCache.get(c[:job][:uuid])
+          end
+
+          if c[:job][:state] == "Complete"
+            # Populate script_parameters of other components waiting for
+            # this job
+            @components.each do |c2name, c2|
+              c2[:script_parameters].each do |pname, p|
+                if p.is_a? Hash and p[:output_of] == cname.to_s
+                  debuglog "parameter #{c2name}::#{pname} == #{c[:job][:output]}"
+                  c2[:script_parameters][pname] = {value: c[:job][:output]}
+                  moretodo = true
+                end
+              end
+            end
+            unless c_already_finished
+              # This is my first time discovering that the job
+              # succeeded. (At the top of this loop, I was still
+              # waiting for it to finish.)
+
+              if @instance[:name].andand.length.andand > 0
+                pipeline_name = @instance[:name]
+              elsif @template.andand[:name].andand.length.andand > 0
+                pipeline_name = @template[:name]
+              else
+                pipeline_name = @instance[:uuid]
+              end
+              if c[:output_name] != false
+                # Create a collection located in the same project as the pipeline with the contents of the output.
+                portable_data_hash = c[:job][:output]
+                collections = $arv.collection.list(limit: 1,
+                                                   filters: [['portable_data_hash', '=', portable_data_hash]],
+                                                   select: ["portable_data_hash", "manifest_text"]
+                                                   )[:items]
+                if collections.any?
+                  name = c[:output_name] || "Output #{portable_data_hash[0..7]} of #{cname} of #{pipeline_name}"
+
+                  # check if there is a name collision.
+                  name_collisions = $arv.collection.list(filters: [["owner_uuid", "=", owner_uuid],
+                                                                   ["name", "=", name]])[:items]
+
+                  newcollection_actual = nil
+                  if name_collisions.any? and name_collisions.first[:portable_data_hash] == portable_data_hash
+                    # There is already a collection with the same name and the
+                    # same contents, so just point to that.
+                    newcollection_actual = name_collisions.first
+                  end
+
+                  if newcollection_actual.nil?
+                    # Did not find a collection with the same name (or the
+                    # collection has a different portable data hash) so create
+                    # a new collection with ensure_unique_name: true.
+                    newcollection = {
+                      owner_uuid: owner_uuid,
+                      name: name,
+                      portable_data_hash: collections.first[:portable_data_hash],
+                      manifest_text: collections.first[:manifest_text]
+                    }
+                    debuglog "Creating collection #{newcollection}", 0
+                    newcollection_actual = $arv.collection.create collection: newcollection, ensure_unique_name: true
+                  end
+
+                  c[:output_uuid] = newcollection_actual[:uuid]
+                else
+                  debuglog "Could not find a collection with portable data hash #{portable_data_hash}", 0
+                end
+              end
+            end
+          elsif ["Queued", "Running"].include? c[:job][:state]
+            # Job is running or queued to run, so indicate that pipeline
+            # should continue to run
+            moretodo = true
+          elsif c[:job][:state] == "Cancelled"
+            debuglog "component #{cname} job #{c[:job][:uuid]} cancelled."
+            moretodo = false
+          elsif c[:job][:state] == "Failed"
+            moretodo = false
+          end
+        end
+      end
+      @instance[:components] = @components
+      report_status
+
+      if @options[:no_wait]
+        moretodo = false
+      end
+
+      # If job creation fails, just give up on this pipeline instance.
+      if job_creation_failed > 0
+        moretodo = false
+      end
+
+      if moretodo
+        begin
+          sleep 10
+        rescue Interrupt
+          debuglog "interrupt", 0
+          interrupted = true
+          break
+        end
+      end
+    end
+
+    c_in_state = @components.values.group_by { |c|
+      c[:job] and c[:job][:state]
+    }
+    succeeded = c_in_state["Complete"].andand.count || 0
+    failed = (c_in_state["Failed"].andand.count || 0) + (c_in_state["Cancelled"].andand.count || 0)
+    ended = succeeded + failed
+
+    success = (succeeded == @components.length)
+
+    # A job create call failed. Just give up.
+    if job_creation_failed > 0
+      debuglog "job creation failed - giving up on this pipeline instance", 0
+      success = false
+      failed += 1
+    end
+
+    if interrupted
+     if success
+        @instance[:state] = 'Complete'
+     else
+        @instance[:state] = 'Paused'
+     end
+    else
+      if ended == @components.length or failed > 0
+        @instance[:state] = success ? 'Complete' : 'Failed'
+      end
+    end
+
+    if @instance[:finished_at].nil? and ['Complete', 'Failed'].include? @instance[:state]
+      @instance[:finished_at] = Time.now
+    end
+
+    debuglog "pipeline instance state is #{@instance[:state]}"
+
+    # set components_summary
+    components_summary = {"todo" => @components.length - ended, "done" => succeeded, "failed" => failed}
+    @instance[:components_summary] = components_summary
+
+    @instance.save
+  end
+
+  def cleanup
+    if @instance and @instance[:state] == 'RunningOnClient'
+      @instance[:state] = 'Paused'
+      @instance.save
+    end
+  end
+
+  def uuid
+    @instance[:uuid]
+  end
+
+  protected
+
+  def report_status
+    @instance.save
+
+    if @options[:status_json] != '/dev/null'
+      File.open(@options[:status_json], 'w') do |f|
+        f.puts @components.pretty_inspect
+      end
+    end
+
+    if @options[:status_text] != '/dev/null'
+      File.open(@options[:status_text], 'w') do |f|
+        f.puts ""
+        f.puts "#{Time.now} -- pipeline_instance #{@instance[:uuid]}"
+        namewidth = @components.collect { |cname, c| cname.size }.max
+        @components.each do |cname, c|
+          jstatus = if !c[:job]
+                      "-"
+                    else case c[:job][:state]
+                         when "Running"
+                           "#{c[:job][:tasks_summary].inspect}"
+                         when "Complete"
+                           c[:job][:output]
+                         when "Cancelled"
+                           "cancelled #{c[:job][:cancelled_at]}"
+                         when "Failed"
+                           "failed #{c[:job][:finished_at]}"
+                         when "Queued"
+                           "queued #{c[:job][:created_at]}"
+                         end
+                    end
+          f.puts "#{cname.to_s.ljust namewidth} #{c[:job] ? c[:job][:uuid] : '-'.ljust(27)} #{jstatus}"
+        end
+      end
+    end
+  end
+
+  def abort(msg)
+    if @instance
+      if ["New", "Ready", "RunningOnClient",
+          "RunningOnServer"].include?(@instance[:state])
+        @instance[:state] = "Failed"
+        @instance[:finished_at] = Time.now
+        @instance.save
+      end
+      @instance.log_stderr(msg)
+    end
+    Kernel::abort(msg)
+  end
+end
+
+runner = WhRunPipelineInstance.new($options)
+begin
+  if $options[:template]
+    runner.fetch_template($options[:template])
+  else
+    runner.fetch_instance($options[:instance])
+  end
+  runner.apply_parameters(p.leftovers)
+  runner.setup_instance
+  if $options[:submit]
+    runner.instance.save
+    puts runner.instance[:uuid]
+  else
+    runner.run
+  end
+rescue Exception => e
+  runner.cleanup
+  raise e
+end
diff --git a/sdk/cli/bin/arv-tag b/sdk/cli/bin/arv-tag
new file mode 100755 (executable)
index 0000000..f709020
--- /dev/null
@@ -0,0 +1,241 @@
+#! /usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# arv tag usage:
+#   arv tag add tag1 [tag2 ...] --object obj_uuid1 [--object obj_uuid2 ...]
+#   arv tag remove tag1 [tag2 ...] --object obj_uuid1 [--object obj_uuid2 ...]
+#   arv tag remove tag1 [tag2 ...] --all
+
+def usage_string
+  return "\nUsage:\n" +
+    "arv tag add tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\n" +
+    "arv tag remove tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\n" +
+    "arv tag remove --all\n"
+end
+
+def usage
+  abort usage_string
+end
+
+def api_call(method, parameters:{}, request_body:{})
+  result = $client.execute(:api_method => method,
+                           :parameters => parameters,
+                           :body_object => request_body,
+                           :authenticated => false,
+                           :headers => {
+                             authorization: "OAuth2 #{ENV['ARVADOS_API_TOKEN']}",
+                           })
+
+  begin
+    results = JSON.parse result.body
+  rescue JSON::ParserError => e
+    abort "Failed to parse server response:\n" + e.to_s
+  end
+
+  if results["errors"]
+    abort "Error: #{results["errors"][0]}"
+  end
+
+  return results
+end
+
+def tag_add(tag, obj_uuid)
+  return api_call($arvados.links.create,
+                  request_body: {
+                    :link => {
+                      :name       => tag,
+                      :link_class => :tag,
+                      :head_uuid  => obj_uuid,
+                    }
+                  })
+end
+
+def tag_remove(tag, obj_uuids=nil)
+  # If we got a list of objects to untag, look up the uuids for the
+  # links that need to be deleted.
+  link_uuids = []
+  if obj_uuids
+    obj_uuids.each do |uuid|
+      link = api_call($arvados.links.list,
+                      request_body: {
+                        :where => {
+                          :link_class => :tag,
+                          :name => tag,
+                          :head_uuid => uuid,
+                        }
+                      })
+      if link['items_available'] > 0
+        link_uuids.push link['items'][0]['uuid']
+      end
+    end
+  else
+    all_tag_links = api_call($arvados.links.list,
+                             request_body: {
+                               :where => {
+                                 :link_class => :tag,
+                                 :name => tag,
+                               }
+                             })
+    link_uuids = all_tag_links['items'].map { |obj| obj['uuid'] }
+  end
+
+  results = []
+  if link_uuids
+    link_uuids.each do |uuid|
+      results.push api_call($arvados.links.delete, parameters:{ :uuid => uuid })
+    end
+  else
+    $stderr.puts "no tags found to remove"
+  end
+
+  return results
+end
+
+if RUBY_VERSION < '1.9.3' then
+  abort <<-EOS
+#{$0.gsub(/^\.\//,'')} requires Ruby version 1.9.3 or higher.
+EOS
+end
+
+$arvados_api_version = ENV['ARVADOS_API_VERSION'] || 'v1'
+$arvados_api_host = ENV['ARVADOS_API_HOST'] or
+  abort "#{$0}: fatal: ARVADOS_API_HOST environment variable not set."
+$arvados_api_token = ENV['ARVADOS_API_TOKEN'] or
+  abort "#{$0}: fatal: ARVADOS_API_TOKEN environment variable not set."
+$arvados_api_host_insecure = %w(1 true yes).
+  include?((ENV['ARVADOS_API_HOST_INSECURE'] || "").downcase)
+
+begin
+  require 'rubygems'
+  require 'google/api_client'
+  require 'json'
+  require 'pp'
+  require 'oj'
+  require 'optimist'
+rescue LoadError
+  abort <<-EOS
+#{$0}: fatal: some runtime dependencies are missing.
+Try: gem install pp google-api-client json optimist
+  EOS
+end
+
+def debuglog(message, verbosity=1)
+  $stderr.puts "#{File.split($0).last} #{$$}: #{message}" if $debuglevel >= verbosity
+end
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+if $arvados_api_host_insecure or $arvados_api_host.match /local/
+  # You probably don't care about SSL certificate checks if you're
+  # testing with a dev server.
+  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+class Google::APIClient
+  def discovery_document(api, version)
+    api = api.to_s
+    return @discovery_documents["#{api}:#{version}"] ||=
+      begin
+        response = self.execute!(
+                                 :http_method => :get,
+                                 :uri => self.discovery_uri(api, version),
+                                 :authenticated => false
+                                 )
+        response.body.class == String ? JSON.parse(response.body) : response.body
+      end
+  end
+end
+
+global_opts = Optimist::options do
+  banner usage_string
+  banner ""
+  opt :dry_run, "Don't actually do anything", :short => "-n"
+  opt :verbose, "Print some things on stderr", :short => "-v"
+  opt :uuid, "Return the UUIDs of the objects in the response, one per line (default)", :short => nil
+  opt :json, "Return the entire response received from the API server, as a JSON object", :short => "-j"
+  opt :human, "Return the response received from the API server, as a JSON object with whitespace added for human consumption", :short => "-h"
+  opt :pretty, "Synonym of --human", :short => nil
+  opt :yaml, "Return the response received from the API server, in YAML format", :short => "-y"
+  stop_on ['add', 'remove']
+end
+
+p = Optimist::Parser.new do
+  opt(:all,
+      "Remove this tag from all objects under your ownership. Only valid with `tag remove'.",
+      :short => :none)
+  opt(:object,
+      "The UUID of an object to which this tag operation should be applied.",
+      :type => :string,
+      :multi => true,
+      :short => :o)
+end
+
+$options = Optimist::with_standard_exception_handling p do
+  p.parse ARGV
+end
+
+if $options[:all] and ARGV[0] != 'remove'
+  usage
+end
+
+# Set up the API client.
+
+$client ||= Google::APIClient.
+  new(:host => $arvados_api_host,
+      :application_name => File.split($0).last,
+      :application_version => $application_version.to_s)
+$arvados = $client.discovered_api('arvados', $arvados_api_version)
+
+results = []
+cmd = ARGV.shift
+
+if ARGV.empty?
+  usage
+end
+
+case cmd
+when 'add'
+  ARGV.each do |tag|
+    $options[:object].each do |obj|
+      results.push(tag_add(tag, obj))
+    end
+  end
+when 'remove'
+  ARGV.each do |tag|
+    if $options[:all] then
+      results.concat tag_remove(tag)
+    else
+      results.concat tag_remove(tag, $options[:object])
+    end
+  end
+else
+  usage
+end
+
+if global_opts[:human] or global_opts[:pretty] then
+  puts Oj.dump(results, :indent => 1)
+elsif global_opts[:yaml] then
+  puts results.to_yaml
+elsif global_opts[:json] then
+  puts Oj.dump(results)
+else
+  results.each do |r|
+    if r['uuid'].nil?
+      abort("Response did not include a uuid:\n" +
+            Oj.dump(r, :indent => 1) +
+            "\n")
+    else
+      puts r['uuid']
+    end
+  end
+end
diff --git a/sdk/cli/bin/arv-ws b/sdk/cli/bin/arv-ws
new file mode 120000 (symlink)
index 0000000..622916b
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-ws
\ No newline at end of file
diff --git a/sdk/cli/bin/crunch-job b/sdk/cli/bin/crunch-job
new file mode 100755 (executable)
index 0000000..b98df8a
--- /dev/null
@@ -0,0 +1,2575 @@
+#!/usr/bin/env perl
+# -*- mode: perl; perl-indent-level: 2; indent-tabs-mode: nil; -*-
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+=head1 NAME
+
+crunch-job: Execute job steps, save snapshots as requested, collate output.
+
+=head1 SYNOPSIS
+
+Obtain job details from Arvados, run tasks on compute nodes (typically
+invoked by scheduler on controller):
+
+ crunch-job --job x-y-z --git-dir /path/to/repo/.git
+
+Obtain job details from command line, run tasks on local machine
+(typically invoked by application or developer on VM):
+
+ crunch-job --job '{"script_version":"/path/to/working/tree","script":"scriptname",...}'
+
+ crunch-job --job '{"repository":"https://github.com/curoverse/arvados.git","script_version":"master","script":"scriptname",...}'
+
+=head1 OPTIONS
+
+=over
+
+=item --force-unlock
+
+If the job is already locked, steal the lock and run it anyway.
+
+=item --git-dir
+
+Path to a .git directory (or a git URL) where the commit given in the
+job's C<script_version> attribute is to be found. If this is I<not>
+given, the job's C<repository> attribute will be used.
+
+=item --job-api-token
+
+Arvados API authorization token to use during the course of the job.
+
+=item --no-clear-tmp
+
+Do not clear per-job/task temporary directories during initial job
+setup. This can speed up development and debugging when running jobs
+locally.
+
+=item --job
+
+UUID of the job to run, or a JSON-encoded job resource without a
+UUID. If the latter is given, a new job object will be created.
+
+=back
+
+=head1 RUNNING JOBS LOCALLY
+
+crunch-job's log messages appear on stderr along with the job tasks'
+stderr streams. The log is saved in Keep at each checkpoint and when
+the job finishes.
+
+If the job succeeds, the job's output locator is printed on stdout.
+
+While the job is running, the following signals are accepted:
+
+=over
+
+=item control-C, SIGINT, SIGQUIT
+
+Save a checkpoint, terminate any job tasks that are running, and stop.
+
+=item SIGALRM
+
+Save a checkpoint and continue.
+
+=item SIGHUP
+
+Refresh node allocation (i.e., check whether any nodes have been added
+or unallocated) and attributes of the Job record that should affect
+behavior (e.g., cancel job if cancelled_at becomes non-nil).
+
+=back
+
+=cut
+
+
+use strict;
+use POSIX ':sys_wait_h';
+use POSIX qw(strftime);
+use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
+use Arvados;
+use Cwd qw(realpath);
+use Data::Dumper;
+use Digest::MD5 qw(md5_hex);
+use Getopt::Long;
+use IPC::Open2;
+use IO::Select;
+use File::Temp;
+use Fcntl ':flock';
+use File::Path qw( make_path remove_tree );
+
+use constant TASK_TEMPFAIL => 111;
+use constant EX_TEMPFAIL => 75;
+use constant EX_RETRY_UNLOCKED => 93;
+
+$ENV{"TMPDIR"} ||= "/tmp";
+unless (defined $ENV{"CRUNCH_TMP"}) {
+  $ENV{"CRUNCH_TMP"} = $ENV{"TMPDIR"} . "/crunch-job";
+  if ($ENV{"USER"} ne "crunch" && $< != 0) {
+    # use a tmp dir unique for my uid
+    $ENV{"CRUNCH_TMP"} .= "-$<";
+  }
+}
+
+# Create the tmp directory if it does not exist
+if ( ! -d $ENV{"CRUNCH_TMP"} ) {
+  make_path $ENV{"CRUNCH_TMP"} or die "Failed to create temporary working directory: " . $ENV{"CRUNCH_TMP"};
+}
+
+$ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
+$ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt";
+$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
+mkdir ($ENV{"JOB_WORK"});
+
+my %proc;
+my $force_unlock;
+my $git_dir;
+my $jobspec;
+my $job_api_token;
+my $no_clear_tmp;
+my $resume_stash;
+my $cgroup_root = "/sys/fs/cgroup";
+my $docker_bin = "docker.io";
+my $docker_run_args = "";
+my $srun_sync_timeout = 15*60;
+GetOptions('force-unlock' => \$force_unlock,
+           'git-dir=s' => \$git_dir,
+           'job=s' => \$jobspec,
+           'job-api-token=s' => \$job_api_token,
+           'no-clear-tmp' => \$no_clear_tmp,
+           'resume-stash=s' => \$resume_stash,
+           'cgroup-root=s' => \$cgroup_root,
+           'docker-bin=s' => \$docker_bin,
+           'docker-run-args=s' => \$docker_run_args,
+           'srun-sync-timeout=i' => \$srun_sync_timeout,
+    );
+
+if (defined $job_api_token) {
+  $ENV{ARVADOS_API_TOKEN} = $job_api_token;
+}
+
+my $have_slurm = exists $ENV{SLURM_JOB_ID} && exists $ENV{SLURM_NODELIST};
+
+
+$SIG{'USR1'} = sub
+{
+  $main::ENV{CRUNCH_DEBUG} = 1;
+};
+$SIG{'USR2'} = sub
+{
+  $main::ENV{CRUNCH_DEBUG} = 0;
+};
+
+my $arv = Arvados->new('apiVersion' => 'v1');
+
+my $Job;
+my $job_id;
+my $dbh;
+my $sth;
+my @jobstep;
+
+my $local_job;
+if ($jobspec =~ /^[-a-z\d]+$/)
+{
+  # $jobspec is an Arvados UUID, not a JSON job specification
+  $Job = api_call("jobs/get", uuid => $jobspec);
+  $local_job = 0;
+}
+else
+{
+  $local_job = JSON::decode_json($jobspec);
+}
+
+
+# Make sure our workers (our slurm nodes, localhost, or whatever) are
+# at least able to run basic commands: they aren't down or severely
+# misconfigured.
+my $cmd = ['true'];
+if (($Job || $local_job)->{docker_image_locator}) {
+  $cmd = [$docker_bin, 'ps', '-q'];
+}
+Log(undef, "Sanity check is `@$cmd`");
+my ($exited, $stdout, $stderr, $tempfail) = srun_sync(
+  ["srun", "--nodes=\Q$ENV{SLURM_NNODES}\E", "--ntasks-per-node=1"],
+  $cmd,
+  {label => "sanity check"});
+if ($exited != 0) {
+  Log(undef, "Sanity check failed: ".exit_status_s($exited));
+  exit EX_TEMPFAIL;
+}
+Log(undef, "Sanity check OK");
+
+
+my $User = api_call("users/current");
+
+if (!$local_job) {
+  if (!$force_unlock) {
+    # Claim this job, and make sure nobody else does
+    eval { api_call("jobs/lock", uuid => $Job->{uuid}); };
+    if ($@) {
+      Log(undef, "Error while locking job, exiting ".EX_TEMPFAIL);
+      exit EX_TEMPFAIL;
+    };
+  }
+}
+else
+{
+  if (!$resume_stash)
+  {
+    map { croak ("No $_ specified") unless $local_job->{$_} }
+    qw(script script_version script_parameters);
+  }
+
+  $local_job->{'is_locked_by_uuid'} = $User->{'uuid'};
+  $local_job->{'started_at'} = gmtime;
+  $local_job->{'state'} = 'Running';
+
+  $Job = api_call("jobs/create", job => $local_job);
+}
+$job_id = $Job->{'uuid'};
+
+my $keep_logfile = $job_id . '.log.txt';
+log_writer_start($keep_logfile);
+
+$Job->{'runtime_constraints'} ||= {};
+$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
+my $max_ncpus = $Job->{'runtime_constraints'}->{'max_tasks_per_node'};
+
+my $gem_versions = `gem list --quiet arvados-cli 2>/dev/null`;
+if ($? == 0) {
+  $gem_versions =~ s/^arvados-cli \(/ with arvados-cli Gem version(s) /;
+  chomp($gem_versions);
+  chop($gem_versions);  # Closing parentheses
+} else {
+  $gem_versions = "";
+}
+Log(undef,
+    "running from " . ((-e $0) ? realpath($0) : "stdin") . $gem_versions);
+
+Log (undef, "check slurm allocation");
+my @slot;
+my @node;
+# Should use $ENV{SLURM_TASKS_PER_NODE} instead of sinfo? (eg. "4(x3),2,4(x2)")
+my @sinfo;
+if (!$have_slurm)
+{
+  my $localcpus = 0 + `grep -cw ^processor /proc/cpuinfo` || 1;
+  push @sinfo, "$localcpus localhost";
+}
+if (exists $ENV{SLURM_NODELIST})
+{
+  push @sinfo, `sinfo -h --format='%c %N' --nodes=\Q$ENV{SLURM_NODELIST}\E`;
+}
+foreach (@sinfo)
+{
+  my ($ncpus, $slurm_nodelist) = split;
+  $ncpus = $max_ncpus if $max_ncpus && $ncpus > $max_ncpus;
+
+  my @nodelist;
+  while ($slurm_nodelist =~ s/^([^\[,]+?(\[.*?\])?)(,|$)//)
+  {
+    my $nodelist = $1;
+    if ($nodelist =~ /\[((\d+)(-(\d+))?(,(\d+)(-(\d+))?)*)\]/)
+    {
+      my $ranges = $1;
+      foreach (split (",", $ranges))
+      {
+       my ($a, $b);
+       if (/(\d+)-(\d+)/)
+       {
+         $a = $1;
+         $b = $2;
+       }
+       else
+       {
+         $a = $_;
+         $b = $_;
+       }
+       push @nodelist, map {
+         my $n = $nodelist;
+         $n =~ s/\[[-,\d]+\]/$_/;
+         $n;
+       } ($a..$b);
+      }
+    }
+    else
+    {
+      push @nodelist, $nodelist;
+    }
+  }
+  foreach my $nodename (@nodelist)
+  {
+    Log (undef, "node $nodename - $ncpus slots");
+    my $node = { name => $nodename,
+                 ncpus => $ncpus,
+                 # The number of consecutive times a task has been dispatched
+                 # to this node and failed.
+                 losing_streak => 0,
+                 # The number of consecutive times that SLURM has reported
+                 # a node failure since the last successful task.
+                 fail_count => 0,
+                 # Don't dispatch work to this node until this time
+                 # (in seconds since the epoch) has passed.
+                 hold_until => 0 };
+    foreach my $cpu (1..$ncpus)
+    {
+      push @slot, { node => $node,
+                   cpu => $cpu };
+    }
+  }
+  push @node, @nodelist;
+}
+
+
+
+# Ensure that we get one jobstep running on each allocated node before
+# we start overloading nodes with concurrent steps
+
+@slot = sort { $a->{cpu} <=> $b->{cpu} } @slot;
+
+
+$Job->update_attributes(
+  'tasks_summary' => { 'failed' => 0,
+                       'todo' => 1,
+                       'running' => 0,
+                       'done' => 0 });
+
+Log (undef, "start");
+$SIG{'INT'} = sub { $main::please_freeze = 1; };
+$SIG{'QUIT'} = sub { $main::please_freeze = 1; };
+$SIG{'TERM'} = \&croak;
+$SIG{'TSTP'} = sub { $main::please_freeze = 1; };
+$SIG{'ALRM'} = sub { $main::please_info = 1; };
+$SIG{'CONT'} = sub { $main::please_continue = 1; };
+$SIG{'HUP'} = sub { $main::please_refresh = 1; };
+
+$main::please_freeze = 0;
+$main::please_info = 0;
+$main::please_continue = 0;
+$main::please_refresh = 0;
+my $jobsteps_must_output_keys = 0;     # becomes 1 when any task outputs a key
+
+grep { $ENV{$1} = $2 if /^(NOCACHE.*?)=(.*)/ } split ("\n", $$Job{knobs});
+$ENV{"CRUNCH_JOB_UUID"} = $job_id;
+$ENV{"JOB_UUID"} = $job_id;
+
+
+my @jobstep_todo = ();
+my @jobstep_done = ();
+my @jobstep_tomerge = ();
+my $jobstep_tomerge_level = 0;
+my $squeue_checked = 0;
+my $sinfo_checked = 0;
+my $latest_refresh = scalar time;
+
+
+
+if (defined $Job->{thawedfromkey})
+{
+  thaw ($Job->{thawedfromkey});
+}
+else
+{
+  my $first_task = api_call("job_tasks/create", job_task => {
+    'job_uuid' => $Job->{'uuid'},
+    'sequence' => 0,
+    'qsequence' => 0,
+    'parameters' => {},
+  });
+  push @jobstep, { 'level' => 0,
+                  'failures' => 0,
+                   'arvados_task' => $first_task,
+                };
+  push @jobstep_todo, 0;
+}
+
+
+if (!$have_slurm)
+{
+  must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
+}
+
+my $build_script = handle_readall(\*DATA);
+my $nodelist = join(",", @node);
+my $git_tar_count = 0;
+
+if (!defined $no_clear_tmp) {
+  # Find FUSE mounts under $CRUNCH_TMP and unmount them.  Then clean
+  # up work directories crunch_tmp/work, crunch_tmp/opt,
+  # crunch_tmp/src*.
+  my ($exited, $stdout, $stderr, $tempfail) = srun_sync(
+    ["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
+    ['bash', '-ec', q{
+arv-mount --unmount-timeout 10 --unmount-all ${CRUNCH_TMP}
+rm -rf ${JOB_WORK} ${CRUNCH_INSTALL} ${CRUNCH_TMP}/task ${CRUNCH_TMP}/src* ${CRUNCH_TMP}/*.cid
+    }],
+    {label => "clean work dirs"});
+  if ($exited != 0) {
+    exit_retry_unlocked();
+  }
+}
+
+# If this job requires a Docker image, install that.
+my ($docker_locator, $docker_stream, $docker_hash, $docker_limitmem, $dockeruserarg);
+if ($docker_locator = $Job->{docker_image_locator}) {
+  Log (undef, "Install docker image $docker_locator");
+  ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
+  if (!$docker_hash)
+  {
+    croak("No Docker image hash found from locator $docker_locator");
+  }
+  Log (undef, "docker image hash is $docker_hash");
+  $docker_stream =~ s/^\.//;
+  my $docker_install_script = qq{
+loaded() {
+  id=\$($docker_bin inspect --format="{{.ID}}" \Q$docker_hash\E) || return 1
+  echo "image ID is \$id"
+  [[ \${id} = \Q$docker_hash\E ]]
+}
+if loaded >&2 2>/dev/null; then
+  echo >&2 "image is already present"
+  exit 0
+fi
+echo >&2 "docker image is not present; loading"
+arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
+if ! loaded >&2; then
+  echo >&2 "`docker load` exited 0, but image is not found (!)"
+  exit 1
+fi
+echo >&2 "image loaded successfully"
+};
+
+  my ($exited, $stdout, $stderr, $tempfail) = srun_sync(
+    ["srun", "--nodelist=" . join(',', @node)],
+    ["/bin/bash", "-o", "pipefail", "-ec", $docker_install_script],
+    {label => "load docker image"});
+  if ($exited != 0)
+  {
+    exit_retry_unlocked();
+  }
+
+  # Determine whether this version of Docker supports memory+swap limits.
+  ($exited, $stdout, $stderr, $tempfail) = srun_sync(
+    ["srun", "--nodes=1"],
+    [$docker_bin, 'run', '--help'],
+    {label => "check --memory-swap feature"});
+  if ($tempfail) {
+    exit_retry_unlocked();
+  }
+  $docker_limitmem = ($stdout =~ /--memory-swap/);
+
+  # Find a non-root Docker user to use.
+  # Tries the default user for the container, then 'crunch', then 'nobody',
+  # testing for whether the actual user id is non-zero.  This defends against
+  # mistakes but not malice, but we intend to harden the security in the future
+  # so we don't want anyone getting used to their jobs running as root in their
+  # Docker containers.
+  my @tryusers = ("", "crunch", "nobody");
+  foreach my $try_user (@tryusers) {
+    my $label;
+    my $try_user_arg;
+    if ($try_user eq "") {
+      $label = "check whether default user is UID 0";
+      $try_user_arg = "";
+    } else {
+      $label = "check whether user '$try_user' is UID 0";
+      $try_user_arg = "--user=$try_user";
+    }
+    my ($exited, $stdout, $stderr, $tempfail) = srun_sync(
+      ["srun", "--nodes=1"],
+      ["/bin/sh", "-ec",
+       "$docker_bin run $docker_run_args $try_user_arg $docker_hash id --user"],
+      {label => $label});
+    chomp($stdout);
+    if ($exited == 0 && $stdout =~ /^\d+$/ && $stdout > 0) {
+      $dockeruserarg = $try_user_arg;
+      if ($try_user eq "") {
+        Log(undef, "Container will run with default user");
+      } else {
+        Log(undef, "Container will run with $dockeruserarg");
+      }
+      last;
+    } elsif ($tempfail) {
+      exit_retry_unlocked();
+    }
+  }
+
+  if (!defined $dockeruserarg) {
+    croak("Could not find a user in container that is not UID 0 (tried default user, @tryusers) or there was a problem running 'id' in the container.");
+  }
+
+  if ($Job->{arvados_sdk_version}) {
+    # The job also specifies an Arvados SDK version.  Add the SDKs to the
+    # tar file for the build script to install.
+    Log(undef, sprintf("Packing Arvados SDK version %s for installation",
+                       $Job->{arvados_sdk_version}));
+    add_git_archive("git", "--git-dir=$git_dir", "archive",
+                    "--prefix=.arvados.sdk/",
+                    $Job->{arvados_sdk_version}, "sdk");
+  }
+}
+
+if (!defined $git_dir && $Job->{'script_version'} =~ m{^/}) {
+  # If script_version looks like an absolute path, *and* the --git-dir
+  # argument was not given -- which implies we were not invoked by
+  # crunch-dispatch -- we will use the given path as a working
+  # directory instead of resolving script_version to a git commit (or
+  # doing anything else with git).
+  $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{'script_version'};
+  $ENV{"CRUNCH_SRC"} = $Job->{'script_version'};
+}
+else {
+  # Resolve the given script_version to a git commit sha1. Also, if
+  # the repository is remote, clone it into our local filesystem: this
+  # ensures "git archive" will work, and is necessary to reliably
+  # resolve a symbolic script_version like "master^".
+  $ENV{"CRUNCH_SRC"} = "$ENV{CRUNCH_TMP}/src";
+
+  Log (undef, "Looking for version ".$Job->{script_version}." from repository ".$Job->{repository});
+
+  $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version};
+
+  # If we're running under crunch-dispatch, it will have already
+  # pulled the appropriate source tree into its own repository, and
+  # given us that repo's path as $git_dir.
+  #
+  # If we're running a "local" job, we might have to fetch content
+  # from a remote repository.
+  #
+  # (Currently crunch-dispatch gives a local path with --git-dir, but
+  # we might as well accept URLs there too in case it changes its
+  # mind.)
+  my $repo = $git_dir || $Job->{'repository'};
+
+  # Repository can be remote or local. If remote, we'll need to fetch it
+  # to a local dir before doing `git log` et al.
+  my $repo_location;
+
+  if ($repo =~ m{://|^[^/]*:}) {
+    # $repo is a git url we can clone, like git:// or https:// or
+    # file:/// or [user@]host:repo.git. Note "user/name@host:foo" is
+    # not recognized here because distinguishing that from a local
+    # path is too fragile. If you really need something strange here,
+    # use the ssh:// form.
+    $repo_location = 'remote';
+  } elsif ($repo =~ m{^\.*/}) {
+    # $repo is a local path to a git index. We'll also resolve ../foo
+    # to ../foo/.git if the latter is a directory. To help
+    # disambiguate local paths from named hosted repositories, this
+    # form must be given as ./ or ../ if it's a relative path.
+    if (-d "$repo/.git") {
+      $repo = "$repo/.git";
+    }
+    $repo_location = 'local';
+  } else {
+    # $repo is none of the above. It must be the name of a hosted
+    # repository.
+    my $arv_repo_list = api_call("repositories/list",
+                                 'filters' => [['name','=',$repo]]);
+    my @repos_found = @{$arv_repo_list->{'items'}};
+    my $n_found = $arv_repo_list->{'serverResponse'}->{'items_available'};
+    if ($n_found > 0) {
+      Log(undef, "Repository '$repo' -> "
+          . join(", ", map { $_->{'uuid'} } @repos_found));
+    }
+    if ($n_found != 1) {
+      croak("Error: Found $n_found repositories with name '$repo'.");
+    }
+    $repo = $repos_found[0]->{'fetch_url'};
+    $repo_location = 'remote';
+  }
+  Log(undef, "Using $repo_location repository '$repo'");
+  $ENV{"CRUNCH_SRC_URL"} = $repo;
+
+  # Resolve given script_version (we'll call that $treeish here) to a
+  # commit sha1 ($commit).
+  my $treeish = $Job->{'script_version'};
+  my $commit;
+  if ($repo_location eq 'remote') {
+    # We minimize excess object-fetching by re-using the same bare
+    # repository in CRUNCH_TMP/.git for multiple crunch-jobs -- we
+    # just keep adding remotes to it as needed.
+    my $local_repo = $ENV{'CRUNCH_TMP'}."/.git";
+    my $gitcmd = "git --git-dir=\Q$local_repo\E";
+
+    # Set up our local repo for caching remote objects, making
+    # archives, etc.
+    if (!-d $local_repo) {
+      make_path($local_repo) or croak("Error: could not create $local_repo");
+    }
+    # This works (exits 0 and doesn't delete fetched objects) even
+    # if $local_repo is already initialized:
+    `$gitcmd init --bare`;
+    if ($?) {
+      croak("Error: $gitcmd init --bare exited ".exit_status_s($?));
+    }
+
+    # If $treeish looks like a hash (or abbrev hash) we look it up in
+    # our local cache first, since that's cheaper. (We don't want to
+    # do that with tags/branches though -- those change over time, so
+    # they should always be resolved by the remote repo.)
+    if ($treeish =~ /^[0-9a-f]{7,40}$/s) {
+      # Hide stderr because it's normal for this to fail:
+      my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E 2>/dev/null`;
+      if ($? == 0 &&
+          # Careful not to resolve a branch named abcdeff to commit 1234567:
+          $sha1 =~ /^$treeish/ &&
+          $sha1 =~ /^([0-9a-f]{40})$/s) {
+        $commit = $1;
+        Log(undef, "Commit $commit already present in $local_repo");
+      }
+    }
+
+    if (!defined $commit) {
+      # If $treeish isn't just a hash or abbrev hash, or isn't here
+      # yet, we need to fetch the remote to resolve it correctly.
+
+      # First, remove all local heads. This prevents a name that does
+      # not exist on the remote from resolving to (or colliding with)
+      # a previously fetched branch or tag (possibly from a different
+      # remote).
+      remove_tree("$local_repo/refs/heads", {keep_root => 1});
+
+      Log(undef, "Fetching objects from $repo to $local_repo");
+      `$gitcmd fetch --no-progress --tags ''\Q$repo\E \Q+refs/heads/*:refs/heads/*\E`;
+      if ($?) {
+        croak("Error: `$gitcmd fetch` exited ".exit_status_s($?));
+      }
+    }
+
+    # Now that the data is all here, we will use our local repo for
+    # the rest of our git activities.
+    $repo = $local_repo;
+  }
+
+  my $gitcmd = "git --git-dir=\Q$repo\E";
+  my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E`;
+  unless ($? == 0 && $sha1 =~ /^([0-9a-f]{40})$/) {
+    croak("`$gitcmd rev-list` exited "
+          .exit_status_s($?)
+          .", '$treeish' not found, giving up");
+  }
+  $commit = $1;
+  Log(undef, "Version $treeish is commit $commit");
+
+  if ($commit ne $Job->{'script_version'}) {
+    # Record the real commit id in the database, frozentokey, logs,
+    # etc. -- instead of an abbreviation or a branch name which can
+    # become ambiguous or point to a different commit in the future.
+    if (!$Job->update_attributes('script_version' => $commit)) {
+      croak("Error: failed to update job's script_version attribute");
+    }
+  }
+
+  $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
+  add_git_archive("$gitcmd archive ''\Q$commit\E");
+}
+
+my $git_archive = combined_git_archive();
+if (!defined $git_archive) {
+  Log(undef, "Skip install phase (no git archive)");
+  if ($have_slurm) {
+    Log(undef, "Warning: This probably means workers have no source tree!");
+  }
+}
+else {
+  my $exited;
+  my $install_script_tries_left = 3;
+  for (my $attempts = 0; $attempts < 3; $attempts++) {
+    my @srunargs = ("srun",
+                    "--nodelist=$nodelist",
+                    "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
+    my @execargs = ("sh", "-c",
+                    "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
+
+    $ENV{"CRUNCH_GIT_ARCHIVE_HASH"} = md5_hex($git_archive);
+    my ($stdout, $stderr, $tempfail);
+    ($exited, $stdout, $stderr, $tempfail) = srun_sync(
+      \@srunargs, \@execargs,
+      {label => "run install script on all workers"},
+        $build_script . $git_archive);
+    if ($tempfail) {
+      exit_retry_unlocked();
+    }
+
+    my $stderr_anything_from_script = 0;
+    for my $line (split(/\n/, $stderr)) {
+      if ($line !~ /^(srun: error: |starting: \[)/) {
+        $stderr_anything_from_script = 1;
+      }
+    }
+
+    last if $exited == 0 || $main::please_freeze;
+
+    # If the install script fails but doesn't print an error message,
+    # the next thing anyone is likely to do is just run it again in
+    # case it was a transient problem like "slurm communication fails
+    # because the network isn't reliable enough". So we'll just do
+    # that ourselves (up to 3 attempts in total). OTOH, if there is an
+    # error message, the problem is more likely to have a real fix and
+    # we should fail the job so the fixing process can start, instead
+    # of doing 2 more attempts.
+    last if $stderr_anything_from_script;
+  }
+
+  foreach my $tar_filename (map { tar_filename_n($_); } (1..$git_tar_count)) {
+    unlink($tar_filename);
+  }
+
+  if ($exited != 0) {
+    croak("Giving up");
+  }
+}
+
+foreach (qw (script script_version script_parameters runtime_constraints))
+{
+  Log (undef,
+       "$_ " .
+       (ref($Job->{$_}) ? JSON::encode_json($Job->{$_}) : $Job->{$_}));
+}
+foreach (split (/\n/, $Job->{knobs}))
+{
+  Log (undef, "knob " . $_);
+}
+my $resp = api_call(
+  'nodes/list',
+  'filters' => [['hostname', 'in', \@node]],
+  'order' => 'hostname',
+  'limit' => scalar(@node),
+    );
+for my $n (@{$resp->{items}}) {
+  Log(undef, "$n->{hostname} $n->{uuid} ".JSON::encode_json($n->{properties}));
+}
+
+
+
+$main::success = undef;
+
+
+
+ONELEVEL:
+
+my $thisround_succeeded = 0;
+my $thisround_failed = 0;
+my $thisround_failed_multiple = 0;
+my $working_slot_count = scalar(@slot);
+
+@jobstep_todo = sort { $jobstep[$a]->{level} <=> $jobstep[$b]->{level}
+                      or $a <=> $b } @jobstep_todo;
+my $level = $jobstep[$jobstep_todo[0]]->{level};
+
+my $initial_tasks_this_level = 0;
+foreach my $id (@jobstep_todo) {
+  $initial_tasks_this_level++ if ($jobstep[$id]->{level} == $level);
+}
+
+# If the number of tasks scheduled at this level #T is smaller than the number
+# of slots available #S, only use the first #T slots, or the first slot on
+# each node, whichever number is greater.
+#
+# When we dispatch tasks later, we'll allocate whole-node resources like RAM
+# based on these numbers.  Using fewer slots makes more resources available
+# to each individual task, which should normally be a better strategy when
+# there are fewer of them running with less parallelism.
+#
+# Note that this calculation is not redone if the initial tasks at
+# this level queue more tasks at the same level.  This may harm
+# overall task throughput for that level.
+my @freeslot;
+if ($initial_tasks_this_level < @node) {
+  @freeslot = (0..$#node);
+} elsif ($initial_tasks_this_level < @slot) {
+  @freeslot = (0..$initial_tasks_this_level - 1);
+} else {
+  @freeslot = (0..$#slot);
+}
+my $round_num_freeslots = scalar(@freeslot);
+print STDERR "crunch-job have ${round_num_freeslots} free slots for ${initial_tasks_this_level} initial tasks at this level, ".scalar(@node)." nodes, and ".scalar(@slot)." slots\n";
+
+my %round_max_slots = ();
+for (my $ii = $#freeslot; $ii >= 0; $ii--) {
+  my $this_slot = $slot[$freeslot[$ii]];
+  my $node_name = $this_slot->{node}->{name};
+  $round_max_slots{$node_name} ||= $this_slot->{cpu};
+  last if (scalar(keys(%round_max_slots)) >= @node);
+}
+
+Log(undef, "start level $level with $round_num_freeslots slots");
+my @holdslot;
+my %reader;
+my $progress_is_dirty = 1;
+my $progress_stats_updated = 0;
+
+update_progress_stats();
+
+
+THISROUND:
+for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
+{
+  # Don't create new tasks if we already know the job's final result.
+  last if defined($main::success);
+
+  my $id = $jobstep_todo[$todo_ptr];
+  my $Jobstep = $jobstep[$id];
+  if ($Jobstep->{level} != $level)
+  {
+    next;
+  }
+
+  pipe $reader{$id}, "writer" or croak("pipe() failed: $!");
+  set_nonblocking($reader{$id});
+
+  my $childslot = $freeslot[0];
+  my $childnode = $slot[$childslot]->{node};
+  my $childslotname = join (".",
+                           $slot[$childslot]->{node}->{name},
+                           $slot[$childslot]->{cpu});
+
+  my $childpid = fork();
+  if ($childpid == 0)
+  {
+    $SIG{'INT'} = 'DEFAULT';
+    $SIG{'QUIT'} = 'DEFAULT';
+    $SIG{'TERM'} = 'DEFAULT';
+
+    foreach (values (%reader))
+    {
+      close($_);
+    }
+    fcntl ("writer", F_SETFL, 0) or croak ($!); # no close-on-exec
+    open(STDOUT,">&writer") or croak ($!);
+    open(STDERR,">&writer") or croak ($!);
+
+    undef $dbh;
+    undef $sth;
+
+    delete $ENV{"GNUPGHOME"};
+    $ENV{"TASK_UUID"} = $Jobstep->{'arvados_task'}->{'uuid'};
+    $ENV{"TASK_QSEQUENCE"} = $id;
+    $ENV{"TASK_SEQUENCE"} = $level;
+    $ENV{"JOB_SCRIPT"} = $Job->{script};
+    while (my ($param, $value) = each %{$Job->{script_parameters}}) {
+      $param =~ tr/a-z/A-Z/;
+      $ENV{"JOB_PARAMETER_$param"} = $value;
+    }
+    $ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
+    $ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
+    $ENV{"TASK_WORK"} = $ENV{"CRUNCH_TMP"}."/task/$childslotname";
+    $ENV{"HOME"} = $ENV{"TASK_WORK"};
+    $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
+    $ENV{"CRUNCH_NODE_SLOTS"} = $round_max_slots{$ENV{TASK_SLOT_NODE}};
+    $ENV{"PATH"} = $ENV{"CRUNCH_INSTALL"} . "/bin:" . $ENV{"PATH"};
+
+    my $keep_mnt = $ENV{"TASK_WORK"}.".keep";
+
+    $ENV{"GZIP"} = "-n";
+
+    my @srunargs = (
+      "srun",
+      "--nodelist=".$childnode->{name},
+      qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
+      "--job-name=$job_id.$id.$$",
+       );
+
+    my $stdbuf = " stdbuf --output=0 --error=0 ";
+
+    my $arv_file_cache = "";
+    if (defined($Job->{'runtime_constraints'}->{'keep_cache_mb_per_task'})) {
+      $arv_file_cache = "--file-cache=" . ($Job->{'runtime_constraints'}->{'keep_cache_mb_per_task'} * 1024 * 1024);
+    }
+
+    my $command =
+       "if [ -e \Q$ENV{TASK_WORK}\E ]; then rm -rf \Q$ENV{TASK_WORK}\E; fi; "
+        ."mkdir -p \Q$ENV{CRUNCH_TMP}\E \Q$ENV{JOB_WORK}\E \Q$ENV{TASK_WORK}\E \Q$keep_mnt\E "
+       ."&& cd \Q$ENV{CRUNCH_TMP}\E "
+        # These environment variables get used explicitly later in
+        # $command.  No tool is expected to read these values directly.
+        .q{&& MEM=$(awk '($1 == "MemTotal:"){print $2}' </proc/meminfo) }
+        .q{&& SWAP=$(awk '($1 == "SwapTotal:"){print $2}' </proc/meminfo) }
+        ."&& MEMLIMIT=\$(( (\$MEM * 95) / ($ENV{CRUNCH_NODE_SLOTS} * 100) )) "
+        ."&& let SWAPLIMIT=\$MEMLIMIT+\$SWAP "
+        .q{&& declare -a VOLUMES=() }
+        .q{&& if which crunchrunner >/dev/null ; then VOLUMES+=("--volume=$(which crunchrunner):/usr/local/bin/crunchrunner:ro") ; fi }
+        .q{&& if test -f /etc/ssl/certs/ca-certificates.crt ; then VOLUMES+=("--volume=/etc/ssl/certs/ca-certificates.crt:/etc/arvados/ca-certificates.crt:ro") ; }
+        .q{elif test -f /etc/pki/tls/certs/ca-bundle.crt ; then VOLUMES+=("--volume=/etc/pki/tls/certs/ca-bundle.crt:/etc/arvados/ca-certificates.crt:ro") ; fi };
+
+    $command .= "&& exec arv-mount --read-write --mount-by-pdh=by_pdh --mount-tmp=tmp --crunchstat-interval=10 --allow-other $arv_file_cache \Q$keep_mnt\E --exec ";
+    $ENV{TASK_KEEPMOUNT} = "$keep_mnt/by_pdh";
+    $ENV{TASK_KEEPMOUNT_TMP} = "$keep_mnt/tmp";
+
+    if ($docker_hash)
+    {
+      my $containername = "$Jobstep->{arvados_task}->{uuid}-$Jobstep->{failures}";
+      my $cidfile = "$ENV{CRUNCH_TMP}/$containername.cid";
+      $command .= "crunchstat -cgroup-root=\Q$cgroup_root\E -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 ";
+      $command .= "$docker_bin run $docker_run_args --name=$containername --attach=stdout --attach=stderr --attach=stdin -i \Q$dockeruserarg\E --cidfile=$cidfile --sig-proxy ";
+      # We only set memory limits if Docker lets us limit both memory and swap.
+      # Memory limits alone have been supported longer, but subprocesses tend
+      # to get SIGKILL if they exceed that without any swap limit set.
+      # See #5642 for additional background.
+      if ($docker_limitmem) {
+        $command .= "--memory=\${MEMLIMIT}k --memory-swap=\${SWAPLIMIT}k ";
+      }
+
+      # The source tree and $destdir directory (which we have
+      # installed on the worker host) are available in the container,
+      # under the same path.
+      $command .= "--volume=\Q$ENV{CRUNCH_SRC}:$ENV{CRUNCH_SRC}:ro\E ";
+      $command .= "--volume=\Q$ENV{CRUNCH_INSTALL}:$ENV{CRUNCH_INSTALL}:ro\E ";
+
+      # Currently, we make the "by_pdh" directory in arv-mount's mount
+      # point appear at /keep inside the container (instead of using
+      # the same path as the host like we do with CRUNCH_SRC and
+      # CRUNCH_INSTALL). However, crunch scripts and utilities must
+      # not rely on this. They must use $TASK_KEEPMOUNT.
+      $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
+      $ENV{TASK_KEEPMOUNT} = "/keep";
+
+      # Ditto TASK_KEEPMOUNT_TMP, as /keep_tmp.
+      $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT_TMP}:/keep_tmp\E ";
+      $ENV{TASK_KEEPMOUNT_TMP} = "/keep_tmp";
+
+      # TASK_WORK is almost exactly like a docker data volume: it
+      # starts out empty, is writable, and persists until no
+      # containers use it any more. We don't use --volumes-from to
+      # share it with other containers: it is only accessible to this
+      # task, and it goes away when this task stops.
+      #
+      # However, a docker data volume is writable only by root unless
+      # the mount point already happens to exist in the container with
+      # different permissions. Therefore, we [1] assume /tmp already
+      # exists in the image and is writable by the crunch user; [2]
+      # avoid putting TASK_WORK inside CRUNCH_TMP (which won't be
+      # writable if they are created by docker while setting up the
+      # other --volumes); and [3] create $TASK_WORK inside the
+      # container using $build_script.
+      $command .= "--volume=/tmp ";
+      $ENV{"TASK_WORK"} = "/tmp/crunch-job-task-work/$childslotname";
+      $ENV{"HOME"} = $ENV{"TASK_WORK"};
+      $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
+
+      # TODO: Share a single JOB_WORK volume across all task
+      # containers on a given worker node, and delete it when the job
+      # ends (and, in case that doesn't work, when the next job
+      # starts).
+      #
+      # For now, use the same approach as TASK_WORK above.
+      $ENV{"JOB_WORK"} = "/tmp/crunch-job-work";
+
+      # Bind mount the crunchrunner binary and host TLS certificates file into
+      # the container.
+      $command .= '"${VOLUMES[@]}" ';
+
+      while (my ($env_key, $env_val) = each %ENV)
+      {
+        if ($env_key =~ /^(ARVADOS|CRUNCH|JOB|TASK)_/) {
+          $command .= "--env=\Q$env_key=$env_val\E ";
+        }
+      }
+      $command .= "--env=\QHOME=$ENV{HOME}\E ";
+      $command .= "\Q$docker_hash\E ";
+
+      if ($Job->{arvados_sdk_version}) {
+        $command .= $stdbuf;
+        $command .= "perl - \Q$ENV{CRUNCH_SRC}/crunch_scripts/$Job->{script}\E";
+      } else {
+        $command .= "/bin/sh -c \'python -c " .
+            '"from pkg_resources import get_distribution as get; print \"Using Arvados SDK version\", get(\"arvados-python-client\").version"' .
+            ">&2 2>/dev/null; " .
+            "mkdir -p \"$ENV{JOB_WORK}\" \"$ENV{TASK_WORK}\" && " .
+            "if which stdbuf >/dev/null ; then " .
+            "  exec $stdbuf \Q$ENV{CRUNCH_SRC}/crunch_scripts/$Job->{script}\E ;" .
+            " else " .
+            "  exec \Q$ENV{CRUNCH_SRC}/crunch_scripts/$Job->{script}\E ;" .
+            " fi\'";
+      }
+    } else {
+      # Non-docker run
+      $command .= "crunchstat -cgroup-root=\Q$cgroup_root\E -poll=10000 ";
+      $command .= $stdbuf;
+      $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+    }
+
+    my @execargs = ('bash', '-c', $command);
+    srun (\@srunargs, \@execargs, undef, $build_script);
+    # exec() failed, we assume nothing happened.
+    die "srun() failed on build script\n";
+  }
+  close("writer");
+  if (!defined $childpid)
+  {
+    close $reader{$id};
+    delete $reader{$id};
+    next;
+  }
+  shift @freeslot;
+  $proc{$childpid} = {
+    jobstepidx => $id,
+    time => time,
+    slot => $childslot,
+    jobstepname => "$job_id.$id.$childpid",
+  };
+  croak ("assert failed: \$slot[$childslot]->{'pid'} exists") if exists $slot[$childslot]->{pid};
+  $slot[$childslot]->{pid} = $childpid;
+
+  Log ($id, "job_task ".$Jobstep->{'arvados_task'}->{'uuid'});
+  Log ($id, "child $childpid started on $childslotname");
+  $Jobstep->{starttime} = time;
+  $Jobstep->{node} = $childnode->{name};
+  $Jobstep->{slotindex} = $childslot;
+  delete $Jobstep->{stderr};
+  delete $Jobstep->{finishtime};
+  delete $Jobstep->{tempfail};
+
+  $Jobstep->{'arvados_task'}->{started_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{starttime});
+  retry_op(sub { $Jobstep->{'arvados_task'}->save; }, "job_tasks.update API");
+
+  splice @jobstep_todo, $todo_ptr, 1;
+  --$todo_ptr;
+
+  $progress_is_dirty = 1;
+
+  while (!@freeslot
+        ||
+        ($round_num_freeslots > @freeslot && $todo_ptr+1 > $#jobstep_todo))
+  {
+    last THISROUND if $main::please_freeze;
+    if ($main::please_info)
+    {
+      $main::please_info = 0;
+      freeze();
+      create_output_collection();
+      save_meta(1);
+      update_progress_stats();
+    }
+    my $gotsome
+       = readfrompipes ()
+       + reapchildren ();
+    if (!$gotsome || ($latest_refresh + 2 < scalar time))
+    {
+      check_refresh_wanted();
+      check_squeue();
+      update_progress_stats();
+    }
+    elsif (time - $progress_stats_updated >= 30 || $progress_is_dirty)
+    {
+      update_progress_stats();
+    }
+    if (!$gotsome) {
+      select (undef, undef, undef, 0.1);
+    }
+    $working_slot_count = scalar(grep { $_->{node}->{fail_count} == 0 &&
+                                        $_->{node}->{hold_count} < 4 } @slot);
+    if (($thisround_failed_multiple >= 8 && $thisround_succeeded == 0) ||
+       ($thisround_failed_multiple >= 16 && $thisround_failed_multiple > $thisround_succeeded))
+    {
+      my $message = "Repeated failure rate too high ($thisround_failed_multiple/"
+         .($thisround_failed+$thisround_succeeded)
+         .") -- giving up on this round";
+      Log (undef, $message);
+      last THISROUND;
+    }
+
+    # move slots from freeslot to holdslot (or back to freeslot) if necessary
+    for (my $i=$#freeslot; $i>=0; $i--) {
+      if ($slot[$freeslot[$i]]->{node}->{hold_until} > scalar time) {
+       push @holdslot, (splice @freeslot, $i, 1);
+      }
+    }
+    for (my $i=$#holdslot; $i>=0; $i--) {
+      if ($slot[$holdslot[$i]]->{node}->{hold_until} <= scalar time) {
+       push @freeslot, (splice @holdslot, $i, 1);
+      }
+    }
+
+    # give up if no nodes are succeeding
+    if ($working_slot_count < 1) {
+      Log(undef, "Every node has failed -- giving up");
+      last THISROUND;
+    }
+  }
+}
+
+
+push @freeslot, splice @holdslot;
+map { $slot[$freeslot[$_]]->{node}->{losing_streak} = 0 } (0..$#freeslot);
+
+
+Log (undef, "wait for last ".(scalar keys %proc)." children to finish");
+while (%proc)
+{
+  if ($main::please_continue) {
+    $main::please_continue = 0;
+    goto THISROUND;
+  }
+  $main::please_info = 0, freeze(), create_output_collection(), save_meta(1) if $main::please_info;
+  readfrompipes ();
+  if (!reapchildren())
+  {
+    check_refresh_wanted();
+    check_squeue();
+    update_progress_stats();
+    select (undef, undef, undef, 0.1);
+    killem (keys %proc) if $main::please_freeze;
+  }
+}
+
+update_progress_stats();
+freeze_if_want_freeze();
+
+
+if (!defined $main::success)
+{
+  if (!@jobstep_todo) {
+    $main::success = 1;
+  } elsif ($working_slot_count < 1) {
+    save_output_collection();
+    save_meta();
+    exit_retry_unlocked();
+  } elsif ($thisround_succeeded == 0 &&
+           ($thisround_failed == 0 || $thisround_failed > 4)) {
+    my $message = "stop because $thisround_failed tasks failed and none succeeded";
+    Log (undef, $message);
+    $main::success = 0;
+  }
+}
+
+goto ONELEVEL if !defined $main::success;
+
+
+release_allocation();
+freeze();
+my $collated_output = save_output_collection();
+Log (undef, "finish");
+
+my $final_log = save_meta();
+
+my $final_state;
+if ($collated_output && $final_log && $main::success) {
+  $final_state = 'Complete';
+} else {
+  $final_state = 'Failed';
+}
+$Job->update_attributes('state' => $final_state);
+
+exit (($final_state eq 'Complete') ? 0 : 1);
+
+
+
+sub update_progress_stats
+{
+  $progress_stats_updated = time;
+  return if !$progress_is_dirty;
+  my ($todo, $done, $running) = (scalar @jobstep_todo,
+                                 scalar @jobstep_done,
+                                 scalar keys(%proc));
+  $Job->{'tasks_summary'} ||= {};
+  $Job->{'tasks_summary'}->{'todo'} = $todo;
+  $Job->{'tasks_summary'}->{'done'} = $done;
+  $Job->{'tasks_summary'}->{'running'} = $running;
+  $Job->update_attributes('tasks_summary' => $Job->{'tasks_summary'});
+  Log (undef, "status: $done done, $running running, $todo todo");
+  $progress_is_dirty = 0;
+}
+
+
+
+sub reapchildren
+{
+  my $children_reaped = 0;
+  my @successful_task_uuids = ();
+
+  while((my $pid = waitpid (-1, WNOHANG)) > 0)
+  {
+    my $childstatus = $?;
+
+    my $whatslot = ($slot[$proc{$pid}->{slot}]->{node}->{name}
+                    . "."
+                    . $slot[$proc{$pid}->{slot}]->{cpu});
+    my $jobstepidx = $proc{$pid}->{jobstepidx};
+
+    readfrompipes_after_exit ($jobstepidx);
+
+    $children_reaped++;
+    my $elapsed = time - $proc{$pid}->{time};
+    my $Jobstep = $jobstep[$jobstepidx];
+
+    my $exitvalue = $childstatus >> 8;
+    my $exitinfo = "exit ".exit_status_s($childstatus);
+    $Jobstep->{'arvados_task'}->reload;
+    my $task_success = $Jobstep->{'arvados_task'}->{success};
+
+    Log ($jobstepidx, "child $pid on $whatslot $exitinfo success=$task_success");
+
+    if (!defined $task_success) {
+      # task did not indicate one way or the other --> fail
+      Log($jobstepidx, sprintf(
+            "ERROR: Task process exited %s, but never updated its task record to indicate success and record its output.",
+            exit_status_s($childstatus)));
+      $Jobstep->{'arvados_task'}->{success} = 0;
+      retry_op(sub { $Jobstep->{'arvados_task'}->save; }, "job_tasks.update API");
+      $task_success = 0;
+    }
+
+    if (!$task_success)
+    {
+      my $temporary_fail;
+      $temporary_fail ||= $Jobstep->{tempfail};
+      $temporary_fail ||= ($exitvalue == TASK_TEMPFAIL);
+
+      ++$thisround_failed;
+      ++$thisround_failed_multiple if $Jobstep->{'failures'} >= 1;
+
+      # Check for signs of a failed or misconfigured node
+      if (++$slot[$proc{$pid}->{slot}]->{node}->{losing_streak} >=
+          2+$slot[$proc{$pid}->{slot}]->{node}->{ncpus}) {
+        # Don't count this against jobstep failure thresholds if this
+        # node is already suspected faulty and srun exited quickly
+        if ($slot[$proc{$pid}->{slot}]->{node}->{hold_until} &&
+            $elapsed < 5) {
+          Log ($jobstepidx, "blaming failure on suspect node " .
+               $slot[$proc{$pid}->{slot}]->{node}->{name});
+          $temporary_fail ||= 1;
+        }
+        ban_node_by_slot($proc{$pid}->{slot});
+      }
+
+      Log ($jobstepidx, sprintf('failure (#%d, %s) after %d seconds',
+                                ++$Jobstep->{'failures'},
+                                $temporary_fail ? 'temporary' : 'permanent',
+                                $elapsed));
+
+      if (!$temporary_fail || $Jobstep->{'failures'} >= 3) {
+        # Give up on this task, and the whole job
+        $main::success = 0;
+      }
+      # Put this task back on the todo queue
+      push @jobstep_todo, $jobstepidx;
+      $Job->{'tasks_summary'}->{'failed'}++;
+    }
+    else # task_success
+    {
+      push @successful_task_uuids, $Jobstep->{'arvados_task'}->{uuid};
+      ++$thisround_succeeded;
+      $slot[$proc{$pid}->{slot}]->{node}->{losing_streak} = 0;
+      $slot[$proc{$pid}->{slot}]->{node}->{hold_until} = 0;
+      $slot[$proc{$pid}->{slot}]->{node}->{fail_count} = 0;
+      push @jobstep_done, $jobstepidx;
+      Log ($jobstepidx, "success in $elapsed seconds");
+    }
+    $Jobstep->{exitcode} = $childstatus;
+    $Jobstep->{finishtime} = time;
+    $Jobstep->{'arvados_task'}->{finished_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{finishtime});
+    retry_op(sub { $Jobstep->{'arvados_task'}->save; }, "job_tasks.update API");
+    Log ($jobstepidx, sprintf("task output (%d bytes): %s",
+                              length($Jobstep->{'arvados_task'}->{output}),
+                              $Jobstep->{'arvados_task'}->{output}));
+
+    close $reader{$jobstepidx};
+    delete $reader{$jobstepidx};
+    delete $slot[$proc{$pid}->{slot}]->{pid};
+    push @freeslot, $proc{$pid}->{slot};
+    delete $proc{$pid};
+
+    $progress_is_dirty = 1;
+  }
+
+  if (scalar(@successful_task_uuids) > 0)
+  {
+    Log (undef, sprintf("%d tasks exited (%d succeeded), checking for new tasks from API server.", $children_reaped, scalar(@successful_task_uuids)));
+    # Load new tasks
+    my $newtask_list = [];
+    my $newtask_results;
+    do {
+      $newtask_results = api_call(
+        "job_tasks/list",
+        'filters' => [["created_by_job_task_uuid","in",\@successful_task_uuids]],
+        'order' => 'qsequence',
+        'offset' => scalar(@$newtask_list),
+          );
+      push(@$newtask_list, @{$newtask_results->{items}});
+    } while (@{$newtask_results->{items}});
+    Log (undef, sprintf("Got %d new tasks from API server.", scalar(@$newtask_list)));
+    foreach my $arvados_task (@$newtask_list) {
+      my $jobstep = {
+        'level' => $arvados_task->{'sequence'},
+        'failures' => 0,
+        'arvados_task' => $arvados_task
+      };
+      push @jobstep, $jobstep;
+      push @jobstep_todo, $#jobstep;
+    }
+  }
+
+  return $children_reaped;
+}
+
+sub check_refresh_wanted
+{
+  my @stat = stat $ENV{"CRUNCH_REFRESH_TRIGGER"};
+  if (@stat &&
+      $stat[9] > $latest_refresh &&
+      # ...and we have actually locked the job record...
+      $job_id eq $Job->{'uuid'}) {
+    $latest_refresh = scalar time;
+    my $Job2 = api_call("jobs/get", uuid => $jobspec);
+    for my $attr ('cancelled_at',
+                  'cancelled_by_user_uuid',
+                  'cancelled_by_client_uuid',
+                  'state') {
+      $Job->{$attr} = $Job2->{$attr};
+    }
+    if ($Job->{'state'} ne "Running") {
+      if ($Job->{'state'} eq "Cancelled") {
+        Log (undef, "Job cancelled at " . $Job->{'cancelled_at'} . " by user " . $Job->{'cancelled_by_user_uuid'});
+      } else {
+        Log (undef, "Job state unexpectedly changed to " . $Job->{'state'});
+      }
+      $main::success = 0;
+      $main::please_freeze = 1;
+    }
+  }
+}
+
+sub check_squeue
+{
+  my $last_squeue_check = $squeue_checked;
+
+  # Do not call `squeue` or check the kill list more than once every
+  # 15 seconds.
+  return if $last_squeue_check > time - 15;
+  $squeue_checked = time;
+
+  # Look for children from which we haven't received stderr data since
+  # the last squeue check. If no such children exist, all procs are
+  # alive and there's no need to even look at squeue.
+  #
+  # As long as the crunchstat poll interval (10s) is shorter than the
+  # squeue check interval (15s) this should make the squeue check an
+  # infrequent event.
+  my $silent_procs = 0;
+  for my $js (map {$jobstep[$_->{jobstepidx}]} values %proc)
+  {
+    if (!exists($js->{stderr_at}))
+    {
+      $js->{stderr_at} = 0;
+    }
+    if ($js->{stderr_at} < $last_squeue_check)
+    {
+      $silent_procs++;
+    }
+  }
+  return if $silent_procs == 0;
+
+  # use killem() on procs whose killtime is reached
+  while (my ($pid, $procinfo) = each %proc)
+  {
+    my $js = $jobstep[$procinfo->{jobstepidx}];
+    if (exists $procinfo->{killtime}
+        && $procinfo->{killtime} <= time
+        && $js->{stderr_at} < $last_squeue_check)
+    {
+      my $sincewhen = "";
+      if ($js->{stderr_at}) {
+        $sincewhen = " in last " . (time - $js->{stderr_at}) . "s";
+      }
+      Log($procinfo->{jobstepidx}, "killing orphaned srun process $pid (task not in slurm queue, no stderr received$sincewhen)");
+      killem ($pid);
+    }
+  }
+
+  if (!$have_slurm)
+  {
+    # here is an opportunity to check for mysterious problems with local procs
+    return;
+  }
+
+  # Get a list of steps still running.  Note: squeue(1) says --steps
+  # selects a format (which we override anyway) and allows us to
+  # specify which steps we're interested in (which we don't).
+  # Importantly, it also changes the meaning of %j from "job name" to
+  # "step name" and (although this isn't mentioned explicitly in the
+  # docs) switches from "one line per job" mode to "one line per step"
+  # mode. Without it, we'd just get a list of one job, instead of a
+  # list of N steps.
+  my @squeue = `squeue --jobs=\Q$ENV{SLURM_JOB_ID}\E --steps --format='%j' --noheader`;
+  if ($? != 0)
+  {
+    Log(undef, "warning: squeue exit status $? ($!)");
+    return;
+  }
+  chop @squeue;
+
+  # which of my jobsteps are running, according to squeue?
+  my %ok;
+  for my $jobstepname (@squeue)
+  {
+    $ok{$jobstepname} = 1;
+  }
+
+  # Check for child procs >60s old and not mentioned by squeue.
+  while (my ($pid, $procinfo) = each %proc)
+  {
+    if ($procinfo->{time} < time - 60
+        && $procinfo->{jobstepname}
+        && !exists $ok{$procinfo->{jobstepname}}
+        && !exists $procinfo->{killtime})
+    {
+      # According to slurm, this task has ended (successfully or not)
+      # -- but our srun child hasn't exited. First we must wait (30
+      # seconds) in case this is just a race between communication
+      # channels. Then, if our srun child process still hasn't
+      # terminated, we'll conclude some slurm communication
+      # error/delay has caused the task to die without notifying srun,
+      # and we'll kill srun ourselves.
+      $procinfo->{killtime} = time + 30;
+      Log($procinfo->{jobstepidx}, "notice: task is not in slurm queue but srun process $pid has not exited");
+    }
+  }
+}
+
+sub check_sinfo
+{
+  # If a node fails in a multi-node "srun" call during job setup, the call
+  # may hang instead of exiting with a nonzero code.  This function checks
+  # "sinfo" for the health of the nodes that were allocated and ensures that
+  # they are all still in the "alloc" state.  If a node that is allocated to
+  # this job is not in "alloc" state, then set please_freeze.
+  #
+  # This is only called from srun_sync() for node configuration.  If a
+  # node fails doing actual work, there are other recovery mechanisms.
+
+  # Do not call `sinfo` more than once every 15 seconds.
+  return if $sinfo_checked > time - 15;
+  $sinfo_checked = time;
+
+  # The output format "%t" means output node states.
+  my @sinfo = `sinfo --nodes=\Q$ENV{SLURM_NODELIST}\E --noheader -o "%t"`;
+  if ($? != 0)
+  {
+    Log(undef, "warning: sinfo exit status $? ($!)");
+    return;
+  }
+  chop @sinfo;
+
+  foreach (@sinfo)
+  {
+    if ($_ != "alloc" && $_ != "alloc*") {
+      $main::please_freeze = 1;
+    }
+  }
+}
+
+sub release_allocation
+{
+  if ($have_slurm)
+  {
+    Log (undef, "release job allocation");
+    system "scancel $ENV{SLURM_JOB_ID}";
+  }
+}
+
+
+sub readfrompipes
+{
+  my $gotsome = 0;
+  my %fd_job;
+  my $sel = IO::Select->new();
+  foreach my $jobstepidx (keys %reader)
+  {
+    my $fd = $reader{$jobstepidx};
+    $sel->add($fd);
+    $fd_job{$fd} = $jobstepidx;
+
+    if (my $stdout_fd = $jobstep[$jobstepidx]->{stdout_r}) {
+      $sel->add($stdout_fd);
+      $fd_job{$stdout_fd} = $jobstepidx;
+    }
+  }
+  # select on all reader fds with 0.1s timeout
+  my @ready_fds = $sel->can_read(0.1);
+  foreach my $fd (@ready_fds)
+  {
+    my $buf;
+    if (0 < sysread ($fd, $buf, 65536))
+    {
+      $gotsome = 1;
+      print STDERR $buf if $ENV{CRUNCH_DEBUG};
+
+      my $jobstepidx = $fd_job{$fd};
+      if ($jobstep[$jobstepidx]->{stdout_r} == $fd) {
+        $jobstep[$jobstepidx]->{stdout_captured} .= $buf;
+        next;
+      }
+
+      $jobstep[$jobstepidx]->{stderr_at} = time;
+      $jobstep[$jobstepidx]->{stderr} .= $buf;
+
+      # Consume everything up to the last \n
+      preprocess_stderr ($jobstepidx);
+
+      if (length ($jobstep[$jobstepidx]->{stderr}) > 16384)
+      {
+        # If we get a lot of stderr without a newline, chop off the
+        # front to avoid letting our buffer grow indefinitely.
+        substr ($jobstep[$jobstepidx]->{stderr},
+                0, length($jobstep[$jobstepidx]->{stderr}) - 8192) = "";
+      }
+    }
+  }
+  return $gotsome;
+}
+
+
+# Consume all full lines of stderr for a jobstep. Everything after the
+# last newline will remain in $jobstep[$jobstepidx]->{stderr} after
+# returning.
+sub preprocess_stderr
+{
+  my $jobstepidx = shift;
+  # slotindex is only defined for children running Arvados job tasks.
+  # Be prepared to handle the undef case (for setup srun calls, etc.).
+  my $job_slot_index = $jobstep[$jobstepidx]->{slotindex};
+
+  while ($jobstep[$jobstepidx]->{stderr} =~ /^(.*?)\n/) {
+    my $line = $1;
+    substr $jobstep[$jobstepidx]->{stderr}, 0, 1+length($line), "";
+    Log ($jobstepidx, "stderr $line");
+    if ($line =~ /srun: error: (SLURM job $ENV{SLURM_JOB_ID} has expired|Unable to confirm allocation for job $ENV{SLURM_JOB_ID})/i) {
+      # If the allocation is revoked, we can't possibly continue, so mark all
+      # nodes as failed.  This will cause the overall exit code to be
+      # EX_RETRY_UNLOCKED instead of failure so that crunch_dispatch can re-run
+      # this job.
+      $main::please_freeze = 1;
+      foreach my $st (@slot) {
+        $st->{node}->{fail_count}++;
+      }
+    }
+    elsif ($line =~ /srun: error: .*?\b(Node failure on|Aborting, .*?\bio error\b|cannot communicate with node .* aborting job)/i) {
+      $jobstep[$jobstepidx]->{tempfail} = 1;
+      if (defined($job_slot_index)) {
+        $slot[$job_slot_index]->{node}->{fail_count}++;
+        ban_node_by_slot($job_slot_index);
+      }
+    }
+    elsif ($line =~ /srun: error: (Unable to create job step|.*?: Communication connection failure)/i) {
+      $jobstep[$jobstepidx]->{tempfail} = 1;
+      ban_node_by_slot($job_slot_index) if (defined($job_slot_index));
+    }
+    elsif ($line =~ /\bKeep(Read|Write|Request)Error:/) {
+      $jobstep[$jobstepidx]->{tempfail} = 1;
+    }
+  }
+}
+
+
+# Read whatever is still available on its stderr+stdout pipes after
+# the given child process has exited.
+sub readfrompipes_after_exit
+{
+  my $jobstepidx = shift;
+
+  # The fact that the child has exited allows some convenient
+  # simplifications: (1) all data must have already been written, so
+  # there's no need to wait for more once sysread returns 0; (2) the
+  # total amount of data available is bounded by the pipe buffer size,
+  # so it's safe to read everything into one string.
+  my $buf;
+  while (0 < sysread ($reader{$jobstepidx}, $buf, 65536)) {
+    $jobstep[$jobstepidx]->{stderr_at} = time;
+    $jobstep[$jobstepidx]->{stderr} .= $buf;
+  }
+  if ($jobstep[$jobstepidx]->{stdout_r}) {
+    while (0 < sysread ($jobstep[$jobstepidx]->{stdout_r}, $buf, 65536)) {
+      $jobstep[$jobstepidx]->{stdout_captured} .= $buf;
+    }
+  }
+  preprocess_stderr ($jobstepidx);
+
+  map {
+    Log ($jobstepidx, "stderr $_");
+  } split ("\n", $jobstep[$jobstepidx]->{stderr});
+  $jobstep[$jobstepidx]->{stderr} = '';
+}
+
+sub fetch_block
+{
+  my $hash = shift;
+  my $keep;
+  if (!open($keep, "-|", "arv-get", "--retries", retry_count(), $hash)) {
+    Log(undef, "fetch_block run error from arv-get $hash: $!");
+    return undef;
+  }
+  my $output_block = "";
+  while (1) {
+    my $buf;
+    my $bytes = sysread($keep, $buf, 1024 * 1024);
+    if (!defined $bytes) {
+      Log(undef, "fetch_block read error from arv-get: $!");
+      $output_block = undef;
+      last;
+    } elsif ($bytes == 0) {
+      # sysread returns 0 at the end of the pipe.
+      last;
+    } else {
+      # some bytes were read into buf.
+      $output_block .= $buf;
+    }
+  }
+  close $keep;
+  if ($?) {
+    Log(undef, "fetch_block arv-get exited " . exit_status_s($?));
+    $output_block = undef;
+  }
+  return $output_block;
+}
+
+# Create a collection by concatenating the output of all tasks (each
+# task's output is either a manifest fragment, a locator for a
+# manifest fragment stored in Keep, or nothing at all). Return the
+# portable_data_hash of the new collection.
+sub create_output_collection
+{
+  Log (undef, "collate");
+
+  my ($child_out, $child_in);
+  my $pid = open2($child_out, $child_in, 'python', '-c', q{
+import arvados
+import sys
+print (arvados.api("v1").collections().
+       create(body={"manifest_text": sys.stdin.read(),
+                    "owner_uuid": sys.argv[2]}).
+       execute(num_retries=int(sys.argv[1]))["portable_data_hash"])
+}, retry_count(), $Job->{owner_uuid});
+
+  my $task_idx = -1;
+  my $manifest_size = 0;
+  for (@jobstep)
+  {
+    ++$task_idx;
+    my $output = $_->{'arvados_task'}->{output};
+    next if (!defined($output));
+    my $next_write;
+    if ($output =~ /^[0-9a-f]{32}(\+\S+)*$/) {
+      $next_write = fetch_block($output);
+    } else {
+      $next_write = $output;
+    }
+    if (defined($next_write)) {
+      if (!defined(syswrite($child_in, $next_write))) {
+        # There's been an error writing.  Stop the loop.
+        # We'll log details about the exit code later.
+        last;
+      } else {
+        $manifest_size += length($next_write);
+      }
+    } else {
+      my $uuid = $_->{'arvados_task'}->{'uuid'};
+      Log (undef, "Error retrieving '$output' output by task $task_idx ($uuid)");
+      $main::success = 0;
+    }
+  }
+  close($child_in);
+  Log(undef, "collated output manifest text to send to API server is $manifest_size bytes with access tokens");
+
+  my $joboutput;
+  my $s = IO::Select->new($child_out);
+  if ($s->can_read(120)) {
+    sysread($child_out, $joboutput, 1024 * 1024);
+    waitpid($pid, 0);
+    if ($?) {
+      Log(undef, "output collection creation exited " . exit_status_s($?));
+      $joboutput = undef;
+    } else {
+      chomp($joboutput);
+    }
+  } else {
+    Log (undef, "timed out while creating output collection");
+    foreach my $signal (2, 2, 2, 15, 15, 9) {
+      kill($signal, $pid);
+      last if waitpid($pid, WNOHANG) == -1;
+      sleep(1);
+    }
+  }
+  close($child_out);
+
+  return $joboutput;
+}
+
+# Calls create_output_collection, logs the result, and returns it.
+# If that was successful, save that as the output in the job record.
+sub save_output_collection {
+  my $collated_output = create_output_collection();
+
+  if (!$collated_output) {
+    Log(undef, "Failed to write output collection");
+  }
+  else {
+    Log(undef, "job output $collated_output");
+    $Job->update_attributes('output' => $collated_output);
+  }
+  return $collated_output;
+}
+
+sub killem
+{
+  foreach (@_)
+  {
+    my $sig = 2;               # SIGINT first
+    if (exists $proc{$_}->{"sent_$sig"} &&
+       time - $proc{$_}->{"sent_$sig"} > 4)
+    {
+      $sig = 15;               # SIGTERM if SIGINT doesn't work
+    }
+    if (exists $proc{$_}->{"sent_$sig"} &&
+       time - $proc{$_}->{"sent_$sig"} > 4)
+    {
+      $sig = 9;                        # SIGKILL if SIGTERM doesn't work
+    }
+    if (!exists $proc{$_}->{"sent_$sig"})
+    {
+      Log ($proc{$_}->{jobstepidx}, "sending 2x signal $sig to pid $_");
+      kill $sig, $_;
+      select (undef, undef, undef, 0.1);
+      if ($sig == 2)
+      {
+       kill $sig, $_;     # srun wants two SIGINT to really interrupt
+      }
+      $proc{$_}->{"sent_$sig"} = time;
+      $proc{$_}->{"killedafter"} = time - $proc{$_}->{"time"};
+    }
+  }
+}
+
+
+sub fhbits
+{
+  my($bits);
+  for (@_) {
+    vec($bits,fileno($_),1) = 1;
+  }
+  $bits;
+}
+
+
+# Send log output to Keep via arv-put.
+#
+# $log_pipe_in and $log_pipe_out are the input and output filehandles to the arv-put pipe.
+# $log_pipe_out_buf is a string containing all output read from arv-put so far.
+# $log_pipe_out_select is an IO::Select object around $log_pipe_out.
+# $log_pipe_pid is the pid of the arv-put subprocess.
+#
+# The only functions that should access these variables directly are:
+#
+# log_writer_start($logfilename)
+#     Starts an arv-put pipe, reading data on stdin and writing it to
+#     a $logfilename file in an output collection.
+#
+# log_writer_read_output([$timeout])
+#     Read output from $log_pipe_out and append it to $log_pipe_out_buf.
+#     Passes $timeout to the select() call, with a default of 0.01.
+#     Returns the result of the last read() call on $log_pipe_out, or
+#     -1 if read() wasn't called because select() timed out.
+#     Only other log_writer_* functions should need to call this.
+#
+# log_writer_send($txt)
+#     Writes $txt to the output log collection.
+#
+# log_writer_finish()
+#     Closes the arv-put pipe and returns the output that it produces.
+#
+# log_writer_is_active()
+#     Returns a true value if there is currently a live arv-put
+#     process, false otherwise.
+#
+my ($log_pipe_in, $log_pipe_out, $log_pipe_out_buf, $log_pipe_out_select,
+    $log_pipe_pid);
+
+sub log_writer_start($)
+{
+  my $logfilename = shift;
+  $log_pipe_pid = open2($log_pipe_out, $log_pipe_in,
+                        'arv-put',
+                        '--stream',
+                        '--retries', '6',
+                        '--filename', $logfilename,
+                        '-');
+  $log_pipe_out_buf = "";
+  $log_pipe_out_select = IO::Select->new($log_pipe_out);
+}
+
+sub log_writer_read_output {
+  my $timeout = shift || 0.01;
+  my $read = -1;
+  while ($read && $log_pipe_out_select->can_read($timeout)) {
+    $read = read($log_pipe_out, $log_pipe_out_buf, 65536,
+                 length($log_pipe_out_buf));
+  }
+  if (!defined($read)) {
+    Log(undef, "error reading log manifest from arv-put: $!");
+  }
+  return $read;
+}
+
+sub log_writer_send($)
+{
+  my $txt = shift;
+  print $log_pipe_in $txt;
+  log_writer_read_output();
+}
+
+sub log_writer_finish()
+{
+  return unless $log_pipe_pid;
+
+  close($log_pipe_in);
+
+  my $logger_failed = 0;
+  my $read_result = log_writer_read_output(600);
+  if ($read_result == -1) {
+    $logger_failed = -1;
+    Log (undef, "timed out reading from 'arv-put'");
+  } elsif ($read_result != 0) {
+    $logger_failed = -2;
+    Log(undef, "failed to read arv-put log manifest to EOF");
+  }
+
+  waitpid($log_pipe_pid, 0);
+  if ($?) {
+    $logger_failed ||= $?;
+    Log(undef, "log_writer_finish: arv-put exited " . exit_status_s($?))
+  }
+
+  close($log_pipe_out);
+  my $arv_put_output = $logger_failed ? undef : $log_pipe_out_buf;
+  $log_pipe_pid = $log_pipe_in = $log_pipe_out = $log_pipe_out_buf =
+      $log_pipe_out_select = undef;
+
+  return $arv_put_output;
+}
+
+sub log_writer_is_active() {
+  return $log_pipe_pid;
+}
+
+sub Log                                # ($jobstepidx, $logmessage)
+{
+  my ($jobstepidx, $logmessage) = @_;
+  if ($logmessage =~ /\n/) {
+    for my $line (split (/\n/, $_[1])) {
+      Log ($jobstepidx, $line);
+    }
+    return;
+  }
+  my $fh = select STDERR; $|=1; select $fh;
+  my $task_qseq = '';
+  if (defined($jobstepidx) && exists($jobstep[$jobstepidx]->{arvados_task})) {
+    $task_qseq = $jobstepidx;
+  }
+  my $message = sprintf ("%s %d %s %s", $job_id, $$, $task_qseq, $logmessage);
+  $message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
+  $message .= "\n";
+  my $datetime;
+  if (log_writer_is_active() || -t STDERR) {
+    my @gmtime = gmtime;
+    $datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
+                        $gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
+  }
+  print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
+
+  if (log_writer_is_active()) {
+    log_writer_send($datetime . " " . $message);
+  }
+}
+
+
+sub croak
+{
+  my ($package, $file, $line) = caller;
+  my $message = "@_ at $file line $line\n";
+  Log (undef, $message);
+  release_allocation();
+  freeze() if @jobstep_todo;
+  create_output_collection() if @jobstep_todo;
+  cleanup();
+  save_meta();
+  die;
+}
+
+
+sub cleanup
+{
+  return unless $Job;
+  if ($Job->{'state'} eq 'Cancelled') {
+    $Job->update_attributes('finished_at' => scalar gmtime);
+  } else {
+    $Job->update_attributes('state' => 'Failed');
+  }
+}
+
+
+sub save_meta
+{
+  my $justcheckpoint = shift; # false if this will be the last meta saved
+  return if $justcheckpoint;  # checkpointing is not relevant post-Warehouse.pm
+  return unless log_writer_is_active();
+  my $log_manifest = log_writer_finish();
+  return unless defined($log_manifest);
+
+  if ($Job->{log}) {
+    my $prev_log_coll = api_call("collections/get", uuid => $Job->{log});
+    $log_manifest = $prev_log_coll->{manifest_text} . $log_manifest;
+  }
+
+  my $log_coll = api_call(
+    "collections/create", ensure_unique_name => 1, collection => {
+      manifest_text => $log_manifest,
+      owner_uuid => $Job->{owner_uuid},
+      name => sprintf("Log from %s job %s", $Job->{script}, $Job->{uuid}),
+    });
+  Log(undef, "log collection is " . $log_coll->{portable_data_hash});
+  $Job->update_attributes('log' => $log_coll->{portable_data_hash});
+
+  return $log_coll->{portable_data_hash};
+}
+
+
+sub freeze_if_want_freeze
+{
+  if ($main::please_freeze)
+  {
+    release_allocation();
+    if (@_)
+    {
+      # kill some srun procs before freeze+stop
+      map { $proc{$_} = {} } @_;
+      while (%proc)
+      {
+       killem (keys %proc);
+       select (undef, undef, undef, 0.1);
+       my $died;
+       while (($died = waitpid (-1, WNOHANG)) > 0)
+       {
+         delete $proc{$died};
+       }
+      }
+    }
+    freeze();
+    create_output_collection();
+    cleanup();
+    save_meta();
+    exit 1;
+  }
+}
+
+
+sub freeze
+{
+  Log (undef, "Freeze not implemented");
+  return;
+}
+
+
+sub thaw
+{
+  croak ("Thaw not implemented");
+}
+
+
+sub freezequote
+{
+  my $s = shift;
+  $s =~ s/\\/\\\\/g;
+  $s =~ s/\n/\\n/g;
+  return $s;
+}
+
+
+sub freezeunquote
+{
+  my $s = shift;
+  $s =~ s{\\(.)}{$1 eq "n" ? "\n" : $1}ge;
+  return $s;
+}
+
+sub srun_sync
+{
+  my $srunargs = shift;
+  my $execargs = shift;
+  my $opts = shift || {};
+  my $stdin = shift;
+
+  my $label = exists $opts->{label} ? $opts->{label} : "@$execargs";
+  Log (undef, "$label: start");
+
+  my ($stderr_r, $stderr_w);
+  pipe $stderr_r, $stderr_w or croak("pipe() failed: $!");
+
+  my ($stdout_r, $stdout_w);
+  pipe $stdout_r, $stdout_w or croak("pipe() failed: $!");
+
+  my $started_srun = scalar time;
+
+  my $srunpid = fork();
+  if ($srunpid == 0)
+  {
+    close($stderr_r);
+    close($stdout_r);
+    fcntl($stderr_w, F_SETFL, 0) or croak($!); # no close-on-exec
+    fcntl($stdout_w, F_SETFL, 0) or croak($!);
+    open(STDERR, ">&", $stderr_w) or croak ($!);
+    open(STDOUT, ">&", $stdout_w) or croak ($!);
+    srun ($srunargs, $execargs, $opts, $stdin);
+    exit (1);
+  }
+  close($stderr_w);
+  close($stdout_w);
+
+  set_nonblocking($stderr_r);
+  set_nonblocking($stdout_r);
+
+  # Add entries to @jobstep and %proc so check_squeue() and
+  # freeze_if_want_freeze() can treat it like a job task process.
+  push @jobstep, {
+    stderr => '',
+    stderr_at => 0,
+    stderr_captured => '',
+    stdout_r => $stdout_r,
+    stdout_captured => '',
+  };
+  my $jobstepidx = $#jobstep;
+  $proc{$srunpid} = {
+    jobstepidx => $jobstepidx,
+  };
+  $reader{$jobstepidx} = $stderr_r;
+
+  while ($srunpid != waitpid ($srunpid, WNOHANG)) {
+    my $busy = readfrompipes();
+    if (!$busy || ($latest_refresh + 2 < scalar time)) {
+      check_refresh_wanted();
+      check_squeue();
+      check_sinfo();
+    }
+    if (!$busy) {
+      select(undef, undef, undef, 0.1);
+    }
+    if (($started_srun + $srun_sync_timeout) < scalar time) {
+      # Exceeded general timeout for "srun_sync" operations, likely
+      # means something got stuck on the remote node.
+      Log(undef, "srun_sync exceeded timeout, will fail.");
+      $main::please_freeze = 1;
+    }
+    killem(keys %proc) if $main::please_freeze;
+  }
+  my $exited = $?;
+
+  readfrompipes_after_exit ($jobstepidx);
+
+  Log (undef, "$label: exit ".exit_status_s($exited));
+
+  close($stdout_r);
+  close($stderr_r);
+  delete $proc{$srunpid};
+  delete $reader{$jobstepidx};
+
+  my $j = pop @jobstep;
+  # If the srun showed signs of tempfail, ensure the caller treats that as a
+  # failure case.
+  if ($main::please_freeze || $j->{tempfail}) {
+    $exited ||= 255;
+  }
+  return ($exited, $j->{stdout_captured}, $j->{stderr_captured}, $j->{tempfail});
+}
+
+
+sub srun
+{
+  my $srunargs = shift;
+  my $execargs = shift;
+  my $opts = shift || {};
+  my $stdin = shift;
+  my $args = $have_slurm ? [@$srunargs, @$execargs] : $execargs;
+
+  $Data::Dumper::Terse = 1;
+  $Data::Dumper::Indent = 0;
+  my $show_cmd = Dumper($args);
+  $show_cmd =~ s/(TOKEN\\*=)[^\s\']+/${1}[...]/g;
+  $show_cmd =~ s/\n/ /g;
+  if ($opts->{fork}) {
+    Log(undef, "starting: $show_cmd");
+  } else {
+    # This is a child process: parent is in charge of reading our
+    # stderr and copying it to Log() if needed.
+    warn "starting: $show_cmd\n";
+  }
+
+  if (defined $stdin) {
+    my $child = open STDIN, "-|";
+    defined $child or die "no fork: $!";
+    if ($child == 0) {
+      print $stdin or die $!;
+      close STDOUT or die $!;
+      exit 0;
+    }
+  }
+
+  return system (@$args) if $opts->{fork};
+
+  exec @$args;
+  warn "ENV size is ".length(join(" ",%ENV));
+  die "exec failed: $!: @$args";
+}
+
+
+sub ban_node_by_slot {
+  # Don't start any new jobsteps on this node for 60 seconds
+  my $slotid = shift;
+  $slot[$slotid]->{node}->{hold_until} = 60 + scalar time;
+  $slot[$slotid]->{node}->{hold_count}++;
+  Log (undef, "backing off node " . $slot[$slotid]->{node}->{name} . " for 60 seconds");
+}
+
+sub must_lock_now
+{
+  my ($lockfile, $error_message) = @_;
+  open L, ">", $lockfile or croak("$lockfile: $!");
+  if (!flock L, LOCK_EX|LOCK_NB) {
+    croak("Can't lock $lockfile: $error_message\n");
+  }
+}
+
+sub find_docker_image {
+  # Given a Keep locator, check to see if it contains a Docker image.
+  # If so, return its stream name and Docker hash.
+  # If not, return undef for both values.
+  my $locator = shift;
+  my ($streamname, $filename);
+  my $image = api_call("collections/get", uuid => $locator);
+  if ($image) {
+    foreach my $line (split(/\n/, $image->{manifest_text})) {
+      my @tokens = split(/\s+/, $line);
+      next if (!@tokens);
+      $streamname = shift(@tokens);
+      foreach my $filedata (grep(/^\d+:\d+:/, @tokens)) {
+        if (defined($filename)) {
+          return (undef, undef);  # More than one file in the Collection.
+        } else {
+          $filename = (split(/:/, $filedata, 3))[2];
+          $filename =~ s/\\([0-3][0-7][0-7])/chr(oct($1))/ge;
+        }
+      }
+    }
+  }
+  if (defined($filename) and ($filename =~ /^((?:sha256:)?[0-9A-Fa-f]{64})\.tar$/)) {
+    return ($streamname, $1);
+  } else {
+    return (undef, undef);
+  }
+}
+
+sub exit_retry_unlocked {
+  Log(undef, "Transient failure with lock acquired; asking for re-dispatch by exiting ".EX_RETRY_UNLOCKED);
+  exit(EX_RETRY_UNLOCKED);
+}
+
+sub retry_count {
+  # Calculate the number of times an operation should be retried,
+  # assuming exponential backoff, and that we're willing to retry as
+  # long as tasks have been running.  Enforce a minimum of 3 retries.
+  my ($starttime, $endtime, $timediff, $retries);
+  if (@jobstep) {
+    $starttime = $jobstep[0]->{starttime};
+    $endtime = $jobstep[-1]->{finishtime};
+  }
+  if (!defined($starttime)) {
+    $timediff = 0;
+  } elsif (!defined($endtime)) {
+    $timediff = time - $starttime;
+  } else {
+    $timediff = ($endtime - $starttime) - (time - $endtime);
+  }
+  if ($timediff > 0) {
+    $retries = int(log($timediff) / log(2));
+  } else {
+    $retries = 1;  # Use the minimum.
+  }
+  return ($retries > 3) ? $retries : 3;
+}
+
+sub retry_op {
+  # Pass in two function references.
+  # This method will be called with the remaining arguments.
+  # If it dies, retry it with exponential backoff until it succeeds,
+  # or until the current retry_count is exhausted.  After each failure
+  # that can be retried, the second function will be called with
+  # the current try count (0-based), next try time, and error message.
+  my $operation = shift;
+  my $op_text = shift;
+  my $retries = retry_count();
+  my $retry_callback = sub {
+    my ($try_count, $next_try_at, $errmsg) = @_;
+    $errmsg =~ s/\s*\bat \Q$0\E line \d+\.?\s*//;
+    $errmsg =~ s/\s/ /g;
+    $errmsg =~ s/\s+$//;
+    my $retry_msg;
+    if ($next_try_at < time) {
+      $retry_msg = "Retrying.";
+    } else {
+      my $next_try_fmt = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($next_try_at);
+      $retry_msg = "Retrying at $next_try_fmt.";
+    }
+    Log(undef, "$op_text failed: $errmsg. $retry_msg");
+  };
+  foreach my $try_count (0..$retries) {
+    my $next_try = time + (2 ** $try_count);
+    my $result = eval { $operation->(@_); };
+    if (!$@) {
+      return $result;
+    } elsif ($try_count < $retries) {
+      $retry_callback->($try_count, $next_try, $@);
+      my $sleep_time = $next_try - time;
+      sleep($sleep_time) if ($sleep_time > 0);
+    }
+  }
+  # Ensure the error message ends in a newline, so Perl doesn't add
+  # retry_op's line number to it.
+  chomp($@);
+  die($@ . "\n");
+}
+
+sub api_call {
+  # Pass in a /-separated API method name, and arguments for it.
+  # This function will call that method, retrying as needed until
+  # the current retry_count is exhausted, with a log on the first failure.
+  my $method_name = shift;
+  my $method = $arv;
+  foreach my $key (split(/\//, $method_name)) {
+    $method = $method->{$key};
+  }
+  return retry_op(sub { $method->execute(@_); }, "API method $method_name", @_);
+}
+
+sub exit_status_s {
+  # Given a $?, return a human-readable exit code string like "0" or
+  # "1" or "0 with signal 1" or "1 with signal 11".
+  my $exitcode = shift;
+  my $s = $exitcode >> 8;
+  if ($exitcode & 0x7f) {
+    $s .= " with signal " . ($exitcode & 0x7f);
+  }
+  if ($exitcode & 0x80) {
+    $s .= " with core dump";
+  }
+  return $s;
+}
+
+sub handle_readall {
+  # Pass in a glob reference to a file handle.
+  # Read all its contents and return them as a string.
+  my $fh_glob_ref = shift;
+  local $/ = undef;
+  return <$fh_glob_ref>;
+}
+
+sub tar_filename_n {
+  my $n = shift;
+  return sprintf("%s/git.%s.%d.tar", $ENV{CRUNCH_TMP}, $job_id, $n);
+}
+
+sub add_git_archive {
+  # Pass in a git archive command as a string or list, a la system().
+  # This method will save its output to be included in the archive sent to the
+  # build script.
+  my $git_input;
+  $git_tar_count++;
+  if (!open(GIT_ARCHIVE, ">", tar_filename_n($git_tar_count))) {
+    croak("Failed to save git archive: $!");
+  }
+  my $git_pid = open2(">&GIT_ARCHIVE", $git_input, @_);
+  close($git_input);
+  waitpid($git_pid, 0);
+  close(GIT_ARCHIVE);
+  if ($?) {
+    croak("Failed to save git archive: git exited " . exit_status_s($?));
+  }
+}
+
+sub combined_git_archive {
+  # Combine all saved tar archives into a single archive, then return its
+  # contents in a string.  Return undef if no archives have been saved.
+  if ($git_tar_count < 1) {
+    return undef;
+  }
+  my $base_tar_name = tar_filename_n(1);
+  foreach my $tar_to_append (map { tar_filename_n($_); } (2..$git_tar_count)) {
+    my $tar_exit = system("tar", "-Af", $base_tar_name, $tar_to_append);
+    if ($tar_exit != 0) {
+      croak("Error preparing build archive: tar -A exited " .
+            exit_status_s($tar_exit));
+    }
+  }
+  if (!open(GIT_TAR, "<", $base_tar_name)) {
+    croak("Could not open build archive: $!");
+  }
+  my $tar_contents = handle_readall(\*GIT_TAR);
+  close(GIT_TAR);
+  return $tar_contents;
+}
+
+sub set_nonblocking {
+  my $fh = shift;
+  my $flags = fcntl ($fh, F_GETFL, 0) or croak ($!);
+  fcntl ($fh, F_SETFL, $flags | O_NONBLOCK) or croak ($!);
+}
+
+__DATA__
+#!/usr/bin/env perl
+#
+# This is crunch-job's internal dispatch script.  crunch-job running on the API
+# server invokes this script on individual compute nodes, or localhost if we're
+# running a job locally.  It gets called in two modes:
+#
+# * No arguments: Installation mode.  Read a tar archive from the DATA
+#   file handle; it includes the Crunch script's source code, and
+#   maybe SDKs as well.  Those should be installed in the proper
+#   locations.  This runs outside of any Docker container, so don't try to
+#   introspect Crunch's runtime environment.
+#
+# * With arguments: Crunch script run mode.  This script should set up the
+#   environment, then run the command specified in the arguments.  This runs
+#   inside any Docker container.
+
+use Fcntl ':flock';
+use File::Path qw( make_path remove_tree );
+use POSIX qw(getcwd);
+
+use constant TASK_TEMPFAIL => 111;
+
+# Map SDK subdirectories to the path environments they belong to.
+my %SDK_ENVVARS = ("perl/lib" => "PERLLIB", "ruby/lib" => "RUBYLIB");
+
+my $destdir = $ENV{"CRUNCH_SRC"};
+my $archive_hash = $ENV{"CRUNCH_GIT_ARCHIVE_HASH"};
+my $repo = $ENV{"CRUNCH_SRC_URL"};
+my $install_dir = $ENV{"CRUNCH_INSTALL"} || (getcwd() . "/opt");
+my $job_work = $ENV{"JOB_WORK"};
+my $task_work = $ENV{"TASK_WORK"};
+
+open(STDOUT_ORIG, ">&", STDOUT);
+open(STDERR_ORIG, ">&", STDERR);
+
+for my $dir ($destdir, $job_work, $task_work) {
+  if ($dir) {
+    make_path $dir;
+    -e $dir or die "Failed to create temporary directory ($dir): $!";
+  }
+}
+
+if ($task_work) {
+  remove_tree($task_work, {keep_root => 1});
+}
+
+### Crunch script run mode
+if (@ARGV) {
+  # We want to do routine logging during task 0 only.  This gives the user
+  # the information they need, but avoids repeating the information for every
+  # task.
+  my $Log;
+  if ($ENV{TASK_SEQUENCE} eq "0") {
+    $Log = sub {
+      my $msg = shift;
+      printf STDERR_ORIG "[Crunch] $msg\n", @_;
+    };
+  } else {
+    $Log = sub { };
+  }
+
+  my $python_src = "$install_dir/python";
+  my $venv_dir = "$job_work/.arvados.venv";
+  my $venv_built = -e "$venv_dir/bin/activate";
+  if ((!$venv_built) and (-d $python_src) and can_run("virtualenv")) {
+    shell_or_die(undef, "virtualenv", "--quiet", "--system-site-packages",
+                 "--python=python2.7", $venv_dir);
+    shell_or_die(TASK_TEMPFAIL, "$venv_dir/bin/pip", "--quiet", "install", "-I", $python_src);
+    $venv_built = 1;
+    $Log->("Built Python SDK virtualenv");
+  }
+
+  my @pysdk_version_cmd = ("python", "-c",
+    "from pkg_resources import get_distribution as get; print get('arvados-python-client').version");
+  if ($venv_built) {
+    $Log->("Running in Python SDK virtualenv");
+    @pysdk_version_cmd = ();
+    my $orig_argv = join(" ", map { quotemeta($_); } @ARGV);
+    @ARGV = ("/bin/sh", "-ec",
+             ". \Q$venv_dir/bin/activate\E; exec $orig_argv");
+  } elsif (-d $python_src) {
+    $Log->("Warning: virtualenv not found inside Docker container default " .
+           "\$PATH. Can't install Python SDK.");
+  }
+
+  if (@pysdk_version_cmd) {
+    open(my $pysdk_version_pipe, "-|", @pysdk_version_cmd);
+    my $pysdk_version = <$pysdk_version_pipe>;
+    close($pysdk_version_pipe);
+    if ($? == 0) {
+      chomp($pysdk_version);
+      $Log->("Using Arvados SDK version $pysdk_version");
+    } else {
+      # A lot could've gone wrong here, but pretty much all of it means that
+      # Python won't be able to load the Arvados SDK.
+      $Log->("Warning: Arvados SDK not found");
+    }
+  }
+
+  while (my ($sdk_dir, $sdk_envkey) = each(%SDK_ENVVARS)) {
+    my $sdk_path = "$install_dir/$sdk_dir";
+    if (-d $sdk_path) {
+      if ($ENV{$sdk_envkey}) {
+        $ENV{$sdk_envkey} = "$sdk_path:" . $ENV{$sdk_envkey};
+      } else {
+        $ENV{$sdk_envkey} = $sdk_path;
+      }
+      $Log->("Arvados SDK added to %s", $sdk_envkey);
+    }
+  }
+
+  exec(@ARGV);
+  die "Cannot exec `@ARGV`: $!";
+}
+
+### Installation mode
+open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
+flock L, LOCK_EX;
+if (readlink ("$destdir.archive_hash") eq $archive_hash && -d $destdir) {
+  # This exact git archive (source + arvados sdk) is already installed
+  # here, so there's no need to reinstall it.
+
+  # We must consume our DATA section, though: otherwise the process
+  # feeding it to us will get SIGPIPE.
+  my $buf;
+  while (read(DATA, $buf, 65536)) { }
+
+  exit(0);
+}
+
+unlink "$destdir.archive_hash";
+mkdir $destdir;
+
+do {
+  # Ignore SIGPIPE: we check retval of close() instead. See perlipc(1).
+  local $SIG{PIPE} = "IGNORE";
+  warn "Extracting archive: $archive_hash\n";
+  # --ignore-zeros is necessary sometimes: depending on how much NUL
+  # padding tar -A put on our combined archive (which in turn depends
+  # on the length of the component archives) tar without
+  # --ignore-zeros will exit before consuming stdin and cause close()
+  # to fail on the resulting SIGPIPE.
+  if (!open(TARX, "|-", "tar", "--ignore-zeros", "-xC", $destdir)) {
+    die "Error launching 'tar -xC $destdir': $!";
+  }
+  # If we send too much data to tar in one write (> 4-5 MiB), it stops, and we
+  # get SIGPIPE.  We must feed it data incrementally.
+  my $tar_input;
+  while (read(DATA, $tar_input, 65536)) {
+    print TARX $tar_input;
+  }
+  if(!close(TARX)) {
+    die "'tar -xC $destdir' exited $?: $!";
+  }
+};
+
+mkdir $install_dir;
+
+my $sdk_root = "$destdir/.arvados.sdk/sdk";
+if (-d $sdk_root) {
+  foreach my $sdk_lang (("python",
+                         map { (split /\//, $_, 2)[0]; } keys(%SDK_ENVVARS))) {
+    if (-d "$sdk_root/$sdk_lang") {
+      if (!rename("$sdk_root/$sdk_lang", "$install_dir/$sdk_lang")) {
+        die "Failed to install $sdk_lang SDK: $!";
+      }
+    }
+  }
+}
+
+my $python_dir = "$install_dir/python";
+if ((-d $python_dir) and can_run("python2.7")) {
+  open(my $egg_info_pipe, "-|",
+       "python2.7 \Q$python_dir/setup.py\E egg_info 2>&1 >/dev/null");
+  my @egg_info_errors = <$egg_info_pipe>;
+  close($egg_info_pipe);
+
+  if ($?) {
+    if (@egg_info_errors and (($egg_info_errors[-1] =~ /\bgit\b/) or ($egg_info_errors[-1] =~ /\[Errno 2\]/))) {
+      # egg_info apparently failed because it couldn't ask git for a build tag.
+      # Specify no build tag.
+      open(my $pysdk_cfg, ">>", "$python_dir/setup.cfg");
+      print $pysdk_cfg "\n[egg_info]\ntag_build =\n";
+      close($pysdk_cfg);
+    } else {
+      my $egg_info_exit = $? >> 8;
+      foreach my $errline (@egg_info_errors) {
+        warn $errline;
+      }
+      warn "python setup.py egg_info failed: exit $egg_info_exit";
+      exit ($egg_info_exit || 1);
+    }
+  }
+}
+
+# Hide messages from the install script (unless it fails: shell_or_die
+# will show $destdir.log in that case).
+open(STDOUT, ">>", "$destdir.log") or die ($!);
+open(STDERR, ">&", STDOUT) or die ($!);
+
+if (-e "$destdir/crunch_scripts/install") {
+    shell_or_die (undef, "$destdir/crunch_scripts/install", $install_dir);
+} elsif (!-e "./install.sh" && -e "./tests/autotests.sh") {
+    # Old version
+    shell_or_die (undef, "./tests/autotests.sh", $install_dir);
+} elsif (-e "./install.sh") {
+    shell_or_die (undef, "./install.sh", $install_dir);
+}
+
+if ($archive_hash) {
+    unlink "$destdir.archive_hash.new";
+    symlink ($archive_hash, "$destdir.archive_hash.new") or die "$destdir.archive_hash.new: $!";
+    rename ("$destdir.archive_hash.new", "$destdir.archive_hash") or die "$destdir.archive_hash: $!";
+}
+
+close L;
+
+sub can_run {
+  my $command_name = shift;
+  open(my $which, "-|", "which", $command_name) or die ($!);
+  while (<$which>) { }
+  close($which);
+  return ($? == 0);
+}
+
+sub shell_or_die
+{
+  my $exitcode = shift;
+
+  if ($ENV{"DEBUG"}) {
+    print STDERR "@_\n";
+  }
+  if (system (@_) != 0) {
+    my $err = $!;
+    my $code = $?;
+    my $exitstatus = sprintf("exit %d signal %d", $code >> 8, $code & 0x7f);
+    open STDERR, ">&STDERR_ORIG";
+    system ("cat $destdir.log >&2");
+    warn "@_ failed ($err): $exitstatus";
+    if (defined($exitcode)) {
+      exit $exitcode;
+    }
+    else {
+      exit (($code >> 8) || 1);
+    }
+  }
+}
+
+__DATA__
diff --git a/sdk/cli/test/binstub_arv-mount/arv-mount b/sdk/cli/test/binstub_arv-mount/arv-mount
new file mode 100755 (executable)
index 0000000..c763b14
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
diff --git a/sdk/cli/test/binstub_clean_fail/arv-mount b/sdk/cli/test/binstub_clean_fail/arv-mount
new file mode 100755 (executable)
index 0000000..9c03bdc
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+echo >&2 Failing mount stub was called
+exit 44
diff --git a/sdk/cli/test/binstub_docker_noop/docker.io b/sdk/cli/test/binstub_docker_noop/docker.io
new file mode 100755 (executable)
index 0000000..b8a2723
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+true
diff --git a/sdk/cli/test/binstub_output_coll_owner/python b/sdk/cli/test/binstub_output_coll_owner/python
new file mode 100755 (executable)
index 0000000..5fed375
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+echo owner_uuid: $2 >&2
+
diff --git a/sdk/cli/test/binstub_sanity_check/docker.io b/sdk/cli/test/binstub_sanity_check/docker.io
new file mode 100755 (executable)
index 0000000..402fe9d
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+exit 8
diff --git a/sdk/cli/test/binstub_sanity_check/true b/sdk/cli/test/binstub_sanity_check/true
new file mode 100755 (executable)
index 0000000..8f19bf4
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+exit 7
diff --git a/sdk/cli/test/test_arv-collection-create.rb b/sdk/cli/test/test_arv-collection-create.rb
new file mode 100644 (file)
index 0000000..237b210
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+require 'digest/md5'
+require 'active_support'
+require 'active_support/core_ext'
+require 'tempfile'
+
+class TestCollectionCreate < Minitest::Test
+  def setup
+  end
+
+  def test_small_collection
+    uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s
+    out, err = capture_subprocess_io do
+      assert_arv('--format', 'uuid', 'collection', 'create', '--collection', {
+                   uuid: uuid,
+                   manifest_text: foo_manifest
+                 }.to_json)
+    end
+    assert /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)
+    assert_equal '', err
+  end
+
+  def test_read_resource_object_from_file
+    tempfile = Tempfile.new('collection')
+    begin
+      tempfile.write({manifest_text: foo_manifest}.to_json)
+      tempfile.close
+      out, err = capture_subprocess_io do
+        assert_arv('--format', 'uuid',
+                   'collection', 'create', '--collection', tempfile.path)
+      end
+      assert /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)
+      assert_equal '', err
+    ensure
+      tempfile.unlink
+    end
+  end
+
+  protected
+  def assert_arv(*args)
+    expect = case args.first
+             when true, false
+               args.shift
+             else
+               true
+             end
+    assert_equal(expect,
+                 system(['./bin/arv', 'arv'], *args),
+                 "`arv #{args.join ' '}` " +
+                 "should exit #{if expect then 0 else 'non-zero' end}")
+  end
+
+  def foo_manifest
+    ". #{Digest::MD5.hexdigest('foo')}+3 0:3:foo\n"
+  end
+end
diff --git a/sdk/cli/test/test_arv-get.rb b/sdk/cli/test/test_arv-get.rb
new file mode 100644 (file)
index 0000000..c5ddacb
--- /dev/null
@@ -0,0 +1,193 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+require 'json'
+require 'yaml'
+
+# Black box tests for 'arv get' command.
+class TestArvGet < Minitest::Test
+  # UUID for an Arvados object that does not exist
+  NON_EXISTENT_OBJECT_UUID = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+  # Name of field of Arvados object that can store any (textual) value
+  STORED_VALUE_FIELD_NAME = "name"
+  # Name of UUID field of Arvados object
+  UUID_FIELD_NAME = "uuid"
+  # Name of an invalid field of Arvados object
+  INVALID_FIELD_NAME = "invalid"
+
+  # Tests that a valid Arvados object can be retrieved in a supported format
+  # using: `arv get [uuid]`. Given all other `arv foo` commands return JSON
+  # when no format is specified, JSON should be expected in this case.
+  def test_get_valid_object_no_format_specified
+    stored_value = __method__.to_s
+    uuid = create_arv_object_with_value(stored_value)
+    out, err = capture_subprocess_io do
+      assert(arv_get_default(uuid))
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    arv_object = parse_json_arv_object(out)
+    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))
+  end
+
+  # Tests that a valid Arvados object can be retrieved in JSON format using:
+  # `arv get [uuid] --format json`.
+  def test_get_valid_object_json_format_specified
+    stored_value = __method__.to_s
+    uuid = create_arv_object_with_value(stored_value)
+    out, err = capture_subprocess_io do
+      assert(arv_get_json(uuid))
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    arv_object = parse_json_arv_object(out)
+    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))
+  end
+
+  # Tests that a valid Arvados object can be retrieved in YAML format using:
+  # `arv get [uuid] --format yaml`.
+  def test_get_valid_object_yaml_format_specified
+    stored_value = __method__.to_s
+    uuid = create_arv_object_with_value(stored_value)
+    out, err = capture_subprocess_io do
+      assert(arv_get_yaml(uuid))
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    arv_object = parse_yaml_arv_object(out)
+    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))
+  end
+
+  # Tests that a subset of all fields of a valid Arvados object can be retrieved
+  # using: `arv get [uuid] [fields...]`.
+  def test_get_valid_object_with_valid_fields
+    stored_value = __method__.to_s
+    uuid = create_arv_object_with_value(stored_value)
+    out, err = capture_subprocess_io do
+      assert(arv_get_json(uuid, STORED_VALUE_FIELD_NAME, UUID_FIELD_NAME))
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    arv_object = parse_json_arv_object(out)
+    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))
+    assert(has_field_with_value(arv_object, UUID_FIELD_NAME, uuid))
+  end
+
+  # Tests that the valid field is retrieved when both a valid and invalid field
+  # are requested from a valid Arvados object, using:
+  # `arv get [uuid] [fields...]`.
+  def test_get_valid_object_with_both_valid_and_invalid_fields
+    stored_value = __method__.to_s
+    uuid = create_arv_object_with_value(stored_value)
+    out, err = capture_subprocess_io do
+      assert(arv_get_json(uuid, STORED_VALUE_FIELD_NAME, INVALID_FIELD_NAME))
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    arv_object = parse_json_arv_object(out)
+    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))
+    refute(has_field_with_value(arv_object, INVALID_FIELD_NAME, stored_value))
+  end
+
+  # Tests that no fields are retreived when no valid fields are requested from
+  # a valid Arvados object, using: `arv get [uuid] [fields...]`.
+  def test_get_valid_object_with_no_valid_fields
+    stored_value = __method__.to_s
+    uuid = create_arv_object_with_value(stored_value)
+    out, err = capture_subprocess_io do
+      assert(arv_get_json(uuid, INVALID_FIELD_NAME))
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    arv_object = parse_json_arv_object(out)
+    assert_equal(0, arv_object.length)
+  end
+
+  # Tests that an invalid (non-existent) Arvados object is not retrieved using:
+  # using: `arv get [non-existent-uuid]`.
+  def test_get_invalid_object
+    out, err = capture_subprocess_io do
+      refute(arv_get_json(NON_EXISTENT_OBJECT_UUID))
+    end
+    refute_empty(err, "Expected error feedback on request for invalid object")
+    assert_empty(out)
+  end
+
+  # Tests that help text exists using: `arv get --help`.
+  def test_help_exists
+    out, err = capture_subprocess_io do
+#      assert(arv_get_default("--help"), "Expected exit code 0: #{$?}")
+       #XXX: Exit code given is 255. It probably should be 0, which seems to be
+       #     standard elsewhere. However, 255 is in line with other `arv`
+       #     commands (e.g. see `arv edit`) so ignoring the problem here.
+       arv_get_default("--help")
+    end
+    assert_empty(err, "Error text not expected: '#{err}'")
+    refute_empty(out, "Help text should be given")
+  end
+
+  protected
+  # Runs 'arv get <varargs>' with given arguments. Returns whether the exit
+  # status was 0 (i.e. success). Use $? to attain more details on failure.
+  def arv_get_default(*args)
+    return system("arv", "get", *args)
+  end
+
+  # Runs 'arv --format json get <varargs>' with given arguments. Returns whether
+  # the exit status was 0 (i.e. success). Use $? to attain more details on
+  # failure.
+  def arv_get_json(*args)
+    return system("arv", "--format", "json", "get", *args)
+  end
+
+  # Runs 'arv --format yaml get <varargs>' with given arguments. Returns whether
+  # the exit status was 0 (i.e. success). Use $? to attain more details on
+  # failure.
+  def arv_get_yaml(*args)
+    return system("arv", "--format", "yaml", "get", *args)
+  end
+
+  # Creates an Arvados object that stores a given value. Returns the uuid of the
+  # created object.
+  def create_arv_object_with_value(value)
+    out, err = capture_subprocess_io do
+      system("arv", "tag", "add", value, "--object", "testing")
+      assert $?.success?, "Command failure running `arv tag`: #{$?}"
+    end
+    assert_equal '', err
+    assert_operator 0, :<, out.strip.length
+    out.strip
+  end
+
+  # Parses the given JSON representation of an Arvados object, returning
+  # an equivalent Ruby representation (a hash map).
+  def parse_json_arv_object(arvObjectAsJson)
+    begin
+      parsed = JSON.parse(arvObjectAsJson)
+      assert(parsed.instance_of?(Hash))
+      return parsed
+    rescue JSON::ParserError => e
+      raise "Invalid JSON representation of Arvados object.\n" \
+            "Parse error: '#{e}'\n" \
+            "JSON: '#{arvObjectAsJson}'\n"
+    end
+  end
+
+  # Parses the given JSON representation of an Arvados object, returning
+  # an equivalent Ruby representation (a hash map).
+  def parse_yaml_arv_object(arvObjectAsYaml)
+    begin
+      parsed = YAML.load(arvObjectAsYaml)
+      assert(parsed.instance_of?(Hash))
+      return parsed
+    rescue
+      raise "Invalid YAML representation of Arvados object.\n" \
+            "YAML: '#{arvObjectAsYaml}'\n"
+    end
+  end
+
+  # Checks whether the given Arvados object has the given expected value for the
+  # specified field.
+  def has_field_with_value(arvObjectAsHash, fieldName, expectedValue)
+    if !arvObjectAsHash.has_key?(fieldName)
+      return false
+    end
+    return (arvObjectAsHash[fieldName] == expectedValue)
+  end
+end
diff --git a/sdk/cli/test/test_arv-keep-get.rb b/sdk/cli/test/test_arv-keep-get.rb
new file mode 100644 (file)
index 0000000..2aa1e67
--- /dev/null
@@ -0,0 +1,263 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+require 'digest/md5'
+
+class TestArvKeepGet < Minitest::Test
+  def setup
+    begin
+      Dir.mkdir './tmp'
+    rescue Errno::EEXIST
+    end
+    @@foo_manifest_locator ||= `echo -n foo | ./bin/arv-put --filename foo --no-progress -`.strip
+    @@baz_locator ||= `echo -n baz | ./bin/arv-put --as-raw --no-progress -`.strip
+    @@multilevel_manifest_locator ||= `echo -n baz | ./bin/arv-put --filename foo/bar/baz --no-progress -`.strip
+  end
+
+  def test_no_args
+    out, err = capture_subprocess_io do
+      assert_arv_get false
+    end
+    assert_equal '', out
+    assert_match /^usage:/, err
+  end
+
+  def test_get_version
+    out, err = capture_subprocess_io do
+      assert_arv_get '--version'
+    end
+    assert_empty(out, "STDOUT not expected: '#{out}'")
+    assert_match(/[0-9]+\.[0-9]+\.[0-9]+/, err, "Version information incorrect: '#{err}'")
+  end
+
+  def test_help
+    out, err = capture_subprocess_io do
+      assert_arv_get '-h'
+    end
+    $stderr.write err
+    assert_equal '', err
+    assert_match /^usage:/, out
+  end
+
+  def test_file_to_dev_stdout
+    test_file_to_stdout('/dev/stdout')
+  end
+
+  def test_file_to_stdout(specify_stdout_as='-')
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator + '/foo', specify_stdout_as
+    end
+    assert_equal '', err
+    assert_equal 'foo', out
+  end
+
+  def test_file_to_file
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/foo'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_no_overwrite_file
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'
+    end
+    assert_match /Local file tmp\/foo already exists/, err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_no_overwrite_file_in_dir
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_match /Local file tmp\/foo already exists/, err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_force_overwrite
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    assert_equal 'baz', IO.read('tmp/foo')
+    out, err = capture_subprocess_io do
+      assert_arv_get '-f', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_match '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_skip_existing
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    assert_equal 'baz', IO.read('tmp/foo')
+    out, err = capture_subprocess_io do
+      assert_arv_get '--skip-existing', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_match '', err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/foo')
+  end
+
+  def test_file_to_dir
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_dir_to_file
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/foo'
+    end
+    assert_equal '', out
+    assert_match /^usage:/, err
+  end
+
+  def test_dir_to_empty_string
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/', ''
+    end
+    assert_equal '', out
+    assert_match /^usage:/, err
+  end
+
+  def test_nonexistent_block
+    out, err = capture_subprocess_io do
+      assert_arv_get false, 'e796ab2294f3e48ec709ffa8d6daf58c'
+    end
+    assert_equal '', out
+    assert_match /ERROR:/, err
+  end
+
+  def test_nonexistent_manifest
+    out, err = capture_subprocess_io do
+      assert_arv_get false, 'acbd18db4cc2f85cedef654fccc4a4d8/', 'tmp/'
+    end
+    assert_equal '', out
+    assert_match /ERROR:/, err
+  end
+
+  def test_manifest_root_to_dir
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_manifest_root_to_dir_noslash
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_display_md5sum
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-r', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_equal "#{Digest::MD5.hexdigest('foo')}  ./foo\n", err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_md5sum_nowrite
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-n', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_equal "#{Digest::MD5.hexdigest('foo')}  ./foo\n", err
+    assert_equal '', out
+    assert_equal false, File.exist?('tmp/foo')
+  end
+
+  def test_sha1_nowrite
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-n', '-r', '--hash', 'sha1', @@foo_manifest_locator+'/', 'tmp/'
+    end
+    assert_equal "#{Digest::SHA1.hexdigest('foo')}  ./foo\n", err
+    assert_equal '', out
+    assert_equal false, File.exist?('tmp/foo')
+  end
+
+  def test_block_to_file
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator, 'tmp/foo'
+    end
+    assert_equal '', err
+    assert_equal '', out
+
+    digest = Digest::MD5.hexdigest('foo')
+    !(IO.read('tmp/foo')).gsub!( /^(. #{digest}+3)(.*)( 0:3:foo)$/).nil?
+  end
+
+  def test_create_directory_tree
+    `rm -rf ./tmp/arv-get-test/`
+    Dir.mkdir './tmp/arv-get-test'
+    out, err = capture_subprocess_io do
+      assert_arv_get @@multilevel_manifest_locator + '/', 'tmp/arv-get-test/'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/arv-get-test/foo/bar/baz')
+  end
+
+  def test_create_partial_directory_tree
+    `rm -rf ./tmp/arv-get-test/`
+    Dir.mkdir './tmp/arv-get-test'
+    out, err = capture_subprocess_io do
+      assert_arv_get(@@multilevel_manifest_locator + '/foo/',
+                     'tmp/arv-get-test/')
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/arv-get-test/bar/baz')
+  end
+
+  protected
+  def assert_arv_get(*args)
+    expect = case args.first
+             when true, false
+               args.shift
+             else
+               true
+             end
+    assert_equal(expect,
+                 system(['./bin/arv-get', 'arv-get'], *args),
+                 "`arv-get #{args.join ' '}` " +
+                 "should exit #{if expect then 0 else 'non-zero' end}")
+  end
+
+  def remove_tmp_foo
+    begin
+      File.unlink('tmp/foo')
+    rescue Errno::ENOENT
+    end
+  end
+end
diff --git a/sdk/cli/test/test_arv-keep-put.rb b/sdk/cli/test/test_arv-keep-put.rb
new file mode 100644 (file)
index 0000000..9ac722f
--- /dev/null
@@ -0,0 +1,201 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+require 'digest/md5'
+
+class TestArvKeepPut < Minitest::Test
+  def setup
+    begin Dir.mkdir './tmp' rescue Errno::EEXIST end
+    begin Dir.mkdir './tmp/empty_dir' rescue Errno::EEXIST end
+    File.open './tmp/empty_file', 'wb' do
+    end
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'foo'
+    end
+  end
+
+  def test_help
+    out, err = capture_subprocess_io do
+      assert arv_put('-h'), 'arv-put -h exits zero'
+    end
+    $stderr.write err
+    assert_empty err
+    assert_match /^usage:/, out
+  end
+
+  def test_raw_stdin
+    out, err = capture_subprocess_io do
+      r,w = IO.pipe
+      wpid = fork do
+        r.close
+        w << 'foo'
+      end
+      w.close
+      assert arv_put('--raw', {in: r})
+      r.close
+      Process.waitpid wpid
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal "acbd18db4cc2f85cedef654fccc4a4d8+3\n", out
+  end
+
+  def test_raw_file
+    out, err = capture_subprocess_io do
+      assert arv_put('--no-cache', '--raw', './tmp/foo')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal "acbd18db4cc2f85cedef654fccc4a4d8+3\n", out
+  end
+
+  def test_raw_empty_file
+    out, err = capture_subprocess_io do
+      assert arv_put('--raw', './tmp/empty_file')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal "d41d8cd98f00b204e9800998ecf8427e+0\n", out
+  end
+
+  def test_filename_arg_with_directory
+    out, err = capture_subprocess_io do
+      assert_equal(false, arv_put('--filename', 'foo', './tmp/empty_dir/.'),
+                   'arv-put --filename refuses directory')
+    end
+    assert_match /^usage:.*error:/m, err
+    assert_empty out
+  end
+
+  def test_filename_arg_with_multiple_files
+    out, err = capture_subprocess_io do
+      assert_equal(false, arv_put('--filename', 'foo',
+                                  './tmp/empty_file',
+                                  './tmp/empty_file'),
+                   'arv-put --filename refuses directory')
+    end
+    assert_match /^usage:.*error:/m, err
+    assert_empty out
+  end
+
+  def test_filename_arg_with_empty_file
+    out, err = capture_subprocess_io do
+      assert arv_put('--filename', 'foo', './tmp/empty_file')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert match_collection_uuid(out)
+  end
+
+  def test_as_stream
+    out, err = capture_subprocess_io do
+      assert arv_put('--no-cache', '--as-stream', './tmp/foo')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal foo_manifest, out
+  end
+
+  def test_progress
+    out, err = capture_subprocess_io do
+      assert arv_put('--no-cache', '--manifest', '--progress', './tmp/foo')
+    end
+    assert_match /%/, err
+    assert match_collection_uuid(out)
+  end
+
+  def test_batch_progress
+    out, err = capture_subprocess_io do
+      assert arv_put('--no-cache', '--manifest', '--batch-progress', './tmp/foo')
+    end
+    assert_match /: 0 written 3 total/, err
+    assert_match /: 3 written 3 total/, err
+    assert match_collection_uuid(out)
+  end
+
+  def test_progress_and_batch_progress
+    out, err = capture_subprocess_io do
+      assert_equal(false,
+                   arv_put('--progress', '--batch-progress', './tmp/foo'),
+                   'arv-put --progress --batch-progress is contradictory')
+    end
+    assert_match /^usage:.*error:/m, err
+    assert_empty out
+  end
+
+  def test_read_from_implicit_stdin
+    test_read_from_stdin(specify_stdin_as='--manifest')
+  end
+
+  def test_read_from_dev_stdin
+    test_read_from_stdin(specify_stdin_as='/dev/stdin')
+  end
+
+  def test_read_from_stdin(specify_stdin_as='-')
+    out, err = capture_subprocess_io do
+      r,w = IO.pipe
+      wpid = fork do
+        r.close
+        w << 'foo'
+      end
+      w.close
+      assert arv_put('--filename', 'foo', specify_stdin_as,
+                                 { in: r })
+      r.close
+      Process.waitpid wpid
+    end
+    $stderr.write err
+    assert_match '', err
+    assert match_collection_uuid(out)
+  end
+
+  def test_read_from_implicit_stdin_implicit_manifest
+    test_read_from_stdin_implicit_manifest(specify_stdin_as=nil,
+                                           expect_filename='stdin')
+  end
+
+  def test_read_from_dev_stdin_implicit_manifest
+    test_read_from_stdin_implicit_manifest(specify_stdin_as='/dev/stdin')
+  end
+
+  def test_read_from_stdin_implicit_manifest(specify_stdin_as='-',
+                                             expect_filename=nil)
+    expect_filename = expect_filename || specify_stdin_as.split('/').last
+    out, err = capture_subprocess_io do
+      r,w = IO.pipe
+      wpid = fork do
+        r.close
+        w << 'foo'
+      end
+      w.close
+      args = []
+      args.push specify_stdin_as if specify_stdin_as
+      assert arv_put(*args, { in: r })
+      r.close
+      Process.waitpid wpid
+    end
+    $stderr.write err
+    assert_match '', err
+    assert match_collection_uuid(out)
+  end
+
+  protected
+  def arv_put(*args)
+    system ['./bin/arv-put', 'arv-put'], *args
+  end
+
+  def foo_manifest(filename='foo')
+    ". #{Digest::MD5.hexdigest('foo')}+3 0:3:#{filename}\n"
+  end
+
+  def foo_manifest_locator(filename='foo')
+    Digest::MD5.hexdigest(foo_manifest(filename)) +
+      "+#{foo_manifest(filename).length}"
+  end
+
+  def match_collection_uuid(uuid)
+    /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(uuid)
+  end
+end
diff --git a/sdk/cli/test/test_arv-run-pipeline-instance.rb b/sdk/cli/test/test_arv-run-pipeline-instance.rb
new file mode 100644 (file)
index 0000000..b6a0328
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+
+class TestRunPipelineInstance < Minitest::Test
+  def setup
+  end
+
+  def test_run_pipeline_instance_get_help
+    out, err = capture_subprocess_io do
+      system ('arv-run-pipeline-instance -h')
+    end
+    assert_equal '', err
+  end
+
+  def test_run_pipeline_instance_with_no_such_option
+    out, err = capture_subprocess_io do
+      system ('arv-run-pipeline-instance --junk')
+    end
+    refute_equal '', err
+  end
+
+  def test_run_pipeline_instance_for_bogus_template_uuid
+    out, err = capture_subprocess_io do
+      # fails with error SSL_connect error because HOST_INSECURE is not being used
+         # system ('arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+
+      # fails with error: fatal: cannot load such file -- arvados
+         # system ('./bin/arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+    end
+    #refute_equal '', err
+    assert_equal '', err
+  end
+
+end
diff --git a/sdk/cli/test/test_arv-tag.rb b/sdk/cli/test/test_arv-tag.rb
new file mode 100644 (file)
index 0000000..16542ba
--- /dev/null
@@ -0,0 +1,116 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+require 'digest/md5'
+require 'json'
+
+def assert_failure *args
+  assert_equal false, *args
+end
+
+class TestArvTag < Minitest::Test
+
+  def test_no_args
+    skip "Waiting until #4534 is implemented"
+
+    # arv-tag exits with failure if run with no args
+    out, err = capture_subprocess_io do
+      assert_equal false, arv_tag
+    end
+    assert_empty out
+    assert_match /^usage:/i, err
+  end
+
+  # Test adding and removing a single tag on a single object.
+  def test_single_tag_single_obj
+    skip "TBD"
+
+    # Add a single tag.
+    tag_uuid, err = capture_subprocess_io do
+      assert arv_tag '--short', 'add', 'test_tag1', '--object', 'uuid1'
+    end
+    assert_empty err
+
+    out, err = capture_subprocess_io do
+      assert arv 'link', 'show', '--uuid', tag_uuid.rstrip
+    end
+
+    assert_empty err
+    link = JSON.parse out
+    assert_tag link, 'test_tag1', 'uuid1'
+
+    # Remove the tag.
+    out, err = capture_subprocess_io do
+      assert arv_tag 'remove', 'test_tag1', '--object', 'uuid1'
+    end
+
+    assert_empty err
+    links = JSON.parse out
+    assert_equal 1, links.length
+    assert_tag links[0], 'test_tag1', 'uuid1'
+
+    # Verify that the link no longer exists.
+    out, err = capture_subprocess_io do
+      assert_equal false, arv('link', 'show', '--uuid', links[0]['uuid'])
+    end
+
+    assert_equal "Error: Path not found\n", err
+  end
+
+  # Test adding and removing a single tag with multiple objects.
+  def test_single_tag_multi_objects
+    skip "TBD"
+
+    out, err = capture_subprocess_io do
+      assert arv_tag('add', 'test_tag1',
+                     '--object', 'uuid1',
+                     '--object', 'uuid2',
+                     '--object', 'uuid3')
+    end
+    assert_empty err
+
+    out, err = capture_subprocess_io do
+      assert arv 'link', 'list', '--where', '{"link_class":"tag","name":"test_tag1"}'
+    end
+
+    assert_empty err
+    json_out = JSON.parse out
+    links = json_out['items'].sort { |a,b| a['head_uuid'] <=> b['head_uuid'] }
+    assert_equal 3, links.length
+    assert_tag links[0], 'test_tag1', 'uuid1'
+    assert_tag links[1], 'test_tag1', 'uuid2'
+    assert_tag links[2], 'test_tag1', 'uuid3'
+
+    out, err = capture_subprocess_io do
+      assert arv_tag('remove', 'test_tag1',
+                     '--object', 'uuid1',
+                     '--object', 'uuid2',
+                     '--object', 'uuid3')
+    end
+    assert_empty err
+
+    out, err = capture_subprocess_io do
+      assert arv 'link', 'list', '--where', '{"link_class":"tag","name":"test_tag1"}'
+    end
+
+    assert_empty err
+    assert_empty out
+  end
+
+  protected
+  def arv_tag(*args)
+    system ['./bin/arv-tag', 'arv-tag'], *args
+  end
+
+  def arv(*args)
+    system ['./bin/arv', 'arv'], *args
+  end
+
+  def assert_tag(link, name, head_uuid)
+    assert_equal 'tag',     link['link_class']
+    assert_equal name,      link['name']
+    assert_equal head_uuid, link['head_uuid']
+  end
+end
diff --git a/sdk/cli/test/test_arv-ws.rb b/sdk/cli/test/test_arv-ws.rb
new file mode 100644 (file)
index 0000000..ffdb865
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+
+class TestArvWs < Minitest::Test
+  def setup
+  end
+
+  def test_arv_ws_get_help
+    out, err = capture_subprocess_io do
+      system ('arv-ws -h')
+    end
+    assert_equal '', err
+  end
+
+  def test_arv_ws_such_option
+    out, err = capture_subprocess_io do
+      system ('arv-ws --junk')
+    end
+    refute_equal '', err
+  end
+
+end
diff --git a/sdk/cli/test/test_crunch-job.rb b/sdk/cli/test/test_crunch-job.rb
new file mode 100644 (file)
index 0000000..c1465d8
--- /dev/null
@@ -0,0 +1,139 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+
+class TestCrunchJob < Minitest::Test
+  SPECIAL_EXIT = {
+    EX_RETRY_UNLOCKED: 93,
+    EX_TEMPFAIL: 75,
+  }
+
+  JOBSPEC = {
+    grep_local: {
+      owner_uuid: 'zzzzz-j7d0g-it30l961gq3t0oi',
+      script: 'grep',
+      script_version: 'master',
+      repository: File.absolute_path('../../../..', __FILE__),
+      script_parameters: {foo: 'bar'},
+    },
+  }
+
+  def setup
+  end
+
+  def crunchjob
+    File.absolute_path '../../bin/crunch-job', __FILE__
+  end
+
+  # Return environment suitable for running crunch-job.
+  def crunchenv opts={}
+    env = ENV.to_h
+    env['CRUNCH_REFRESH_TRIGGER'] =
+      File.absolute_path('../../../../tmp/crunch-refresh-trigger', __FILE__)
+    env
+  end
+
+  def jobspec label
+    JOBSPEC[label].dup
+  end
+
+  # Encode job record to json and run it with crunch-job.
+  #
+  # opts[:binstubs] is an array of X where ./binstub_X is added to
+  # PATH in order to mock system programs.
+  def tryjobrecord jobrecord, opts={}
+    env = crunchenv
+    (opts[:binstubs] || []).each do |binstub|
+      env['PATH'] = File.absolute_path('../binstub_'+binstub, __FILE__) + ':' + env['PATH']
+    end
+    system env, crunchjob, '--job', jobrecord.to_json
+  end
+
+  def test_bogus_json
+    out, err = capture_subprocess_io do
+      system crunchenv, crunchjob, '--job', '"}{"'
+    end
+    assert_equal false, $?.success?
+    # Must not conflict with our special exit statuses
+    assert_jobfail $?
+    assert_match /JSON/, err
+  end
+
+  def test_fail_sanity_check
+    out, err = capture_subprocess_io do
+      j = {}
+      tryjobrecord j, binstubs: ['sanity_check']
+    end
+    assert_equal 75, $?.exitstatus
+    assert_match /Sanity check failed: 7/, err
+  end
+
+  def test_fail_docker_sanity_check
+    out, err = capture_subprocess_io do
+      j = {}
+      j[:docker_image_locator] = '4d449b9d34f2e2222747ef79c53fa3ff+1234'
+      tryjobrecord j, binstubs: ['sanity_check']
+    end
+    assert_equal 75, $?.exitstatus
+    assert_match /Sanity check failed: 8/, err
+  end
+
+  def test_no_script_specified
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      j.delete :script
+      tryjobrecord j
+    end
+    assert_match /No script specified/, err
+    assert_jobfail $?
+  end
+
+  def test_fail_clean_tmp
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      tryjobrecord j, binstubs: ['clean_fail']
+    end
+    assert_match /Failing mount stub was called/, err
+    assert_match /clean work dirs: exit 44\n.*Transient failure.* exiting 93\n(.*arv_put.*INFO.*\n)?$/, err
+    assert_equal SPECIAL_EXIT[:EX_RETRY_UNLOCKED], $?.exitstatus
+  end
+
+  def test_output_collection_owner_uuid
+    j = jobspec :grep_local
+    out, err = capture_subprocess_io do
+      tryjobrecord j, binstubs: ['arv-mount', 'output_coll_owner']
+    end
+    assert_match /owner_uuid: #{j['owner_uuid']}/, err
+  end
+
+  def test_docker_image_missing
+    skip 'API bug: it refuses to create this job in Running state'
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      j[:docker_image_locator] = '4d449b9d34f2e2222747ef79c53fa3ff+1234'
+      tryjobrecord j, binstubs: ['docker_noop']
+    end
+    assert_match /No Docker image hash found from locator/, err
+    assert_jobfail $?
+  end
+
+  def test_script_version_not_found_in_repository
+    bogus_version = 'f8b72707c1f5f740dbf1ed56eb429a36e0dee770'
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      j[:script_version] = bogus_version
+      tryjobrecord j, binstubs: ['arv-mount']
+    end
+    assert_match /'#{bogus_version}' not found, giving up/, err
+    assert_jobfail $?
+  end
+
+  # Ensure procstatus is not interpreted as a temporary infrastructure
+  # problem. Would be assert_http_4xx if this were http.
+  def assert_jobfail procstatus
+    refute_includes SPECIAL_EXIT.values, procstatus.exitstatus
+    assert_equal false, procstatus.success?
+  end
+end
diff --git a/sdk/cwl/.gitignore b/sdk/cwl/.gitignore
new file mode 120000 (symlink)
index 0000000..1399fd4
--- /dev/null
@@ -0,0 +1 @@
+../python/.gitignore
\ No newline at end of file
diff --git a/sdk/cwl/LICENSE-2.0.txt b/sdk/cwl/LICENSE-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/sdk/cwl/MANIFEST.in b/sdk/cwl/MANIFEST.in
new file mode 100644 (file)
index 0000000..50a2923
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+include LICENSE-2.0.txt
+include README.rst
+include arvados_version.py
\ No newline at end of file
diff --git a/sdk/cwl/README.rst b/sdk/cwl/README.rst
new file mode 100644 (file)
index 0000000..45d0be6
--- /dev/null
@@ -0,0 +1,5 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: Apache-2.0
+
+Arvados Common Workflow Language (CWL) runner.
diff --git a/sdk/cwl/arvados_cwl/__init__.py b/sdk/cwl/arvados_cwl/__init__.py
new file mode 100644 (file)
index 0000000..834ca19
--- /dev/null
@@ -0,0 +1,327 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Implement cwl-runner interface for submitting and running work on Arvados, using
+# either the Crunch jobs API or Crunch containers API.
+
+from future.utils import viewitems
+from builtins import str
+
+import argparse
+import logging
+import os
+import sys
+import re
+import pkg_resources  # part of setuptools
+
+from schema_salad.sourceline import SourceLine
+import schema_salad.validate as validate
+import cwltool.main
+import cwltool.workflow
+import cwltool.process
+import cwltool.argparser
+from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, get_listing
+
+import arvados
+import arvados.config
+from arvados.keep import KeepClient
+from arvados.errors import ApiError
+import arvados.commands._util as arv_cmd
+from arvados.api import OrderedJsonModel
+
+from .perf import Perf
+from ._version import __version__
+from .executor import ArvCwlExecutor
+
+# These arn't used directly in this file but
+# other code expects to import them from here
+from .arvcontainer import ArvadosContainer
+from .arvjob import ArvadosJob
+from .arvtool import ArvadosCommandTool
+from .fsaccess import CollectionFsAccess, CollectionCache, CollectionFetcher
+from .util import get_current_container
+from .executor import RuntimeStatusLoggingHandler, DEFAULT_PRIORITY
+from .arvworkflow import ArvadosWorkflow
+
+logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
+logger.setLevel(logging.INFO)
+
+arvados.log_handler.setFormatter(logging.Formatter(
+        '%(asctime)s %(name)s %(levelname)s: %(message)s',
+        '%Y-%m-%d %H:%M:%S'))
+
+def versionstring():
+    """Print version string of key packages for provenance and debugging."""
+
+    arvcwlpkg = pkg_resources.require("arvados-cwl-runner")
+    arvpkg = pkg_resources.require("arvados-python-client")
+    cwlpkg = pkg_resources.require("cwltool")
+
+    return "%s %s, %s %s, %s %s" % (sys.argv[0], arvcwlpkg[0].version,
+                                    "arvados-python-client", arvpkg[0].version,
+                                    "cwltool", cwlpkg[0].version)
+
+
+def arg_parser():  # type: () -> argparse.ArgumentParser
+    parser = argparse.ArgumentParser(description='Arvados executor for Common Workflow Language')
+
+    parser.add_argument("--basedir",
+                        help="Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).")
+    parser.add_argument("--outdir", default=os.path.abspath('.'),
+                        help="Output directory, default current directory")
+
+    parser.add_argument("--eval-timeout",
+                        help="Time to wait for a Javascript expression to evaluate before giving an error, default 20s.",
+                        type=float,
+                        default=20)
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--print-dot", action="store_true",
+                         help="Print workflow visualization in graphviz format and exit")
+    exgroup.add_argument("--version", action="version", help="Print version and exit", version=versionstring())
+    exgroup.add_argument("--validate", action="store_true", help="Validate CWL document only.")
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--verbose", action="store_true", help="Default logging")
+    exgroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
+    exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
+
+    parser.add_argument("--metrics", action="store_true", help="Print timing metrics")
+
+    parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--enable-reuse", action="store_true",
+                        default=True, dest="enable_reuse",
+                        help="Enable job or container reuse (default)")
+    exgroup.add_argument("--disable-reuse", action="store_false",
+                        default=True, dest="enable_reuse",
+                        help="Disable job or container reuse")
+
+    parser.add_argument("--project-uuid", metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
+    parser.add_argument("--output-name", help="Name to use for collection that stores the final output.", default=None)
+    parser.add_argument("--output-tags", help="Tags for the final output collection separated by commas, e.g., '--output-tags tag0,tag1,tag2'.", default=None)
+    parser.add_argument("--ignore-docker-for-reuse", action="store_true",
+                        help="Ignore Docker image version when deciding whether to reuse past jobs.",
+                        default=False)
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--submit", action="store_true", help="Submit workflow to run on Arvados.",
+                        default=True, dest="submit")
+    exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
+                        default=True, dest="submit")
+    exgroup.add_argument("--create-template", action="store_true", help="(Deprecated) synonym for --create-workflow.",
+                         dest="create_workflow")
+    exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow (if using the 'containers' API) or pipeline template (if using the 'jobs' API). See --api.")
+    exgroup.add_argument("--update-workflow", metavar="UUID", help="Update an existing Arvados workflow or pipeline template with the given UUID.")
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
+                        default=True, dest="wait")
+    exgroup.add_argument("--no-wait", action="store_false", help="Submit workflow runner job and exit.",
+                        default=True, dest="wait")
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--log-timestamps", action="store_true", help="Prefix logging lines with timestamp",
+                        default=True, dest="log_timestamps")
+    exgroup.add_argument("--no-log-timestamps", action="store_false", help="No timestamp on logging lines",
+                        default=True, dest="log_timestamps")
+
+    parser.add_argument("--api",
+                        default=None, dest="work_api",
+                        choices=("jobs", "containers"),
+                        help="Select work submission API.  Default is 'jobs' if that API is available, otherwise 'containers'.")
+
+    parser.add_argument("--compute-checksum", action="store_true", default=False,
+                        help="Compute checksum of contents while collecting outputs",
+                        dest="compute_checksum")
+
+    parser.add_argument("--submit-runner-ram", type=int,
+                        help="RAM (in MiB) required for the workflow runner job (default 1024)",
+                        default=None)
+
+    parser.add_argument("--submit-runner-image",
+                        help="Docker image for workflow runner job, default arvados/jobs:%s" % __version__,
+                        default=None)
+
+    parser.add_argument("--always-submit-runner", action="store_true",
+                        help="When invoked with --submit --wait, always submit a runner to manage the workflow, even when only running a single CommandLineTool",
+                        default=False)
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--submit-request-uuid",
+                         default=None,
+                         help="Update and commit to supplied container request instead of creating a new one (containers API only).",
+                         metavar="UUID")
+    exgroup.add_argument("--submit-runner-cluster",
+                         help="Submit workflow runner to a remote cluster (containers API only)",
+                         default=None,
+                         metavar="CLUSTER_ID")
+
+    parser.add_argument("--collection-cache-size", type=int,
+                        default=None,
+                        help="Collection cache size (in MiB, default 256).")
+
+    parser.add_argument("--name",
+                        help="Name to use for workflow execution instance.",
+                        default=None)
+
+    parser.add_argument("--on-error",
+                        help="Desired workflow behavior when a step fails.  One of 'stop' (do not submit any more steps) or "
+                        "'continue' (may submit other steps that are not downstream from the error). Default is 'continue'.",
+                        default="continue", choices=("stop", "continue"))
+
+    parser.add_argument("--enable-dev", action="store_true",
+                        help="Enable loading and running development versions "
+                             "of CWL spec.", default=False)
+    parser.add_argument('--storage-classes', default="default",
+                        help="Specify comma separated list of storage classes to be used when saving workflow output to Keep.")
+
+    parser.add_argument("--intermediate-output-ttl", type=int, metavar="N",
+                        help="If N > 0, intermediate output collections will be trashed N seconds after creation.  Default is 0 (don't trash).",
+                        default=0)
+
+    parser.add_argument("--priority", type=int,
+                        help="Workflow priority (range 1..1000, higher has precedence over lower, containers api only)",
+                        default=DEFAULT_PRIORITY)
+
+    parser.add_argument("--disable-validate", dest="do_validate",
+                        action="store_false", default=True,
+                        help=argparse.SUPPRESS)
+
+    parser.add_argument("--disable-js-validation",
+                        action="store_true", default=False,
+                        help=argparse.SUPPRESS)
+
+    parser.add_argument("--thread-count", type=int,
+                        default=1, help="Number of threads to use for job submit and output collection.")
+
+    parser.add_argument("--http-timeout", type=int,
+                        default=5*60, dest="http_timeout", help="API request timeout in seconds. Default is 300 seconds (5 minutes).")
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--trash-intermediate", action="store_true",
+                        default=False, dest="trash_intermediate",
+                         help="Immediately trash intermediate outputs on workflow success.")
+    exgroup.add_argument("--no-trash-intermediate", action="store_false",
+                        default=False, dest="trash_intermediate",
+                        help="Do not trash intermediate outputs (default).")
+
+    parser.add_argument("workflow", default=None, help="The workflow to execute")
+    parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
+
+    return parser
+
+def add_arv_hints():
+    cwltool.command_line_tool.ACCEPTLIST_EN_RELAXED_RE = re.compile(r".*")
+    cwltool.command_line_tool.ACCEPTLIST_RE = cwltool.command_line_tool.ACCEPTLIST_EN_RELAXED_RE
+    res = pkg_resources.resource_stream(__name__, 'arv-cwl-schema.yml')
+    use_custom_schema("v1.0", "http://arvados.org/cwl", res.read())
+    res.close()
+    cwltool.process.supportedProcessRequirements.extend([
+        "http://arvados.org/cwl#RunInSingleContainer",
+        "http://arvados.org/cwl#OutputDirType",
+        "http://arvados.org/cwl#RuntimeConstraints",
+        "http://arvados.org/cwl#PartitionRequirement",
+        "http://arvados.org/cwl#APIRequirement",
+        "http://commonwl.org/cwltool#LoadListingRequirement",
+        "http://arvados.org/cwl#IntermediateOutput",
+        "http://arvados.org/cwl#ReuseRequirement",
+        "http://arvados.org/cwl#ClusterTarget"
+    ])
+
+def exit_signal_handler(sigcode, frame):
+    logger.error(str(u"Caught signal {}, exiting.").format(sigcode))
+    sys.exit(-sigcode)
+
+def main(args, stdout, stderr, api_client=None, keep_client=None,
+         install_sig_handlers=True):
+    parser = arg_parser()
+
+    job_order_object = None
+    arvargs = parser.parse_args(args)
+
+    if len(arvargs.storage_classes.strip().split(',')) > 1:
+        logger.error(str(u"Multiple storage classes are not supported currently."))
+        return 1
+
+    arvargs.use_container = True
+    arvargs.relax_path_checks = True
+    arvargs.print_supported_versions = False
+
+    if install_sig_handlers:
+        arv_cmd.install_signal_handlers()
+
+    if arvargs.update_workflow:
+        if arvargs.update_workflow.find('-7fd4e-') == 5:
+            want_api = 'containers'
+        elif arvargs.update_workflow.find('-p5p6p-') == 5:
+            want_api = 'jobs'
+        else:
+            want_api = None
+        if want_api and arvargs.work_api and want_api != arvargs.work_api:
+            logger.error(str(u'--update-workflow arg {!r} uses {!r} API, but --api={!r} specified').format(
+                arvargs.update_workflow, want_api, arvargs.work_api))
+            return 1
+        arvargs.work_api = want_api
+
+    if (arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
+        job_order_object = ({}, "")
+
+    add_arv_hints()
+
+    for key, val in viewitems(cwltool.argparser.get_default_args()):
+        if not hasattr(arvargs, key):
+            setattr(arvargs, key, val)
+
+    try:
+        if api_client is None:
+            api_client = arvados.safeapi.ThreadSafeApiCache(
+                api_params={"model": OrderedJsonModel(), "timeout": arvargs.http_timeout},
+                keep_params={"num_retries": 4})
+            keep_client = api_client.keep
+            # Make an API object now so errors are reported early.
+            api_client.users().current().execute()
+        if keep_client is None:
+            keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4)
+        executor = ArvCwlExecutor(api_client, arvargs, keep_client=keep_client, num_retries=4)
+    except Exception:
+        logger.exception("Error creating the Arvados CWL Executor")
+        return 1
+
+    # Note that unless in debug mode, some stack traces related to user 
+    # workflow errors may be suppressed. See ArvadosJob.done().
+    if arvargs.debug:
+        logger.setLevel(logging.DEBUG)
+        logging.getLogger('arvados').setLevel(logging.DEBUG)
+
+    if arvargs.quiet:
+        logger.setLevel(logging.WARN)
+        logging.getLogger('arvados').setLevel(logging.WARN)
+        logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+
+    if arvargs.metrics:
+        metrics.setLevel(logging.DEBUG)
+        logging.getLogger("cwltool.metrics").setLevel(logging.DEBUG)
+
+    if arvargs.log_timestamps:
+        arvados.log_handler.setFormatter(logging.Formatter(
+            '%(asctime)s %(name)s %(levelname)s: %(message)s',
+            '%Y-%m-%d %H:%M:%S'))
+    else:
+        arvados.log_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))
+
+    return cwltool.main.main(args=arvargs,
+                             stdout=stdout,
+                             stderr=stderr,
+                             executor=executor.arv_executor,
+                             versionfunc=versionstring,
+                             job_order_object=job_order_object,
+                             logger_handler=arvados.log_handler,
+                             custom_schema_callback=add_arv_hints,
+                             loadingContext=executor.loadingContext,
+                             runtimeContext=executor.runtimeContext)
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema.yml
new file mode 100644 (file)
index 0000000..dce1bd4
--- /dev/null
@@ -0,0 +1,262 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+$base: "http://arvados.org/cwl#"
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+$graph:
+- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml
+
+- name: cwltool:LoadListingRequirement
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  fields:
+    class:
+      type: string
+      doc: "Always 'LoadListingRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    loadListing:
+      type:
+        - "null"
+        - type: enum
+          name: LoadListingEnum
+          symbols: [no_listing, shallow_listing, deep_listing]
+
+- name: cwltool:Secrets
+  type: record
+  inVocab: false
+  extends: cwl:ProcessRequirement
+  fields:
+    class:
+      type: string
+      doc: "Always 'Secrets'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    secrets:
+      type: string[]
+      doc: |
+        List one or more input parameters that are sensitive (such as passwords)
+        which will be deliberately obscured from logging.
+      jsonldPredicate:
+        "_type": "@id"
+        refScope: 0
+
+- name: cwltool:TimeLimit
+  type: record
+  inVocab: false
+  extends: cwl:ProcessRequirement
+  doc: |
+    Set an upper limit on the execution time of a CommandLineTool or
+    ExpressionTool.  A tool execution which exceeds the time limit may
+    be preemptively terminated and considered failed.  May also be
+    used by batch systems to make scheduling decisions.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'TimeLimit'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: timelimit
+      type: [long, string]
+      doc: |
+        The time limit, in seconds.  A time limit of zero means no
+        time limit.  Negative time limits are an error.
+
+- name: RunInSingleContainer
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Indicates that a subworkflow should run in a single container
+    and not be scheduled as separate steps.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:RunInSingleContainer'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+
+- name: OutputDirType
+  type: enum
+  symbols:
+    - local_output_dir
+    - keep_output_dir
+  doc:
+    - |
+      local_output_dir: Use regular file system local to the compute node.
+      There must be sufficient local scratch space to store entire output;
+      specify this with `outdirMin` of `ResourceRequirement`.  Files are
+      batch uploaded to Keep when the process completes.  Most compatible, but
+      upload step can be time consuming for very large files.
+    - |
+      keep_output_dir: Use writable Keep mount.  Files are streamed to Keep as
+      they are written.  Does not consume local scratch space, but does consume
+      RAM for output buffers (up to 192 MiB per file simultaneously open for
+      writing.)  Best suited to processes which produce sequential output of
+      large files (non-sequential writes may produced fragmented file
+      manifests).  Supports regular files and directories, does not support
+      special files such as symlinks, hard links, named pipes, named sockets,
+      or device nodes.
+
+
+- name: RuntimeConstraints
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Set Arvados-specific runtime hints.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:RuntimeConstraints'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    - name: keep_cache
+      type: int?
+      doc: |
+        Size of file data buffer for Keep mount in MiB. Default is 256
+        MiB. Increase this to reduce cache thrashing in situations such as
+        accessing multiple large (64+ MiB) files at the same time, or
+        performing random access on a large file.
+    - name: outputDirType
+      type: OutputDirType?
+      doc: |
+        Preferred backing store for output staging.  If not specified, the
+        system may choose which one to use.
+
+- name: PartitionRequirement
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Select preferred compute partitions on which to run jobs.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:PartitionRequirement'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    - name: partition
+      type:
+        - string
+        - string[]
+
+- name: APIRequirement
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Indicates that process wants to access to the Arvados API.  Will be granted
+    limited network access and have ARVADOS_API_HOST and ARVADOS_API_TOKEN set
+    in the environment.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:APIRequirement'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+
+- name: IntermediateOutput
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Specify desired handling of intermediate output collections.
+  fields:
+    class:
+      type: string
+      doc: "Always 'arv:IntermediateOutput'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    outputTTL:
+      type: int
+      doc: |
+        If the value is greater than zero, consider intermediate output
+        collections to be temporary and should be automatically
+        trashed. Temporary collections will be trashed `outputTTL` seconds
+        after creation.  A value of zero means intermediate output should be
+        retained indefinitely (this is the default behavior).
+
+        Note: arvados-cwl-runner currently does not take workflow dependencies
+        into account when setting the TTL on an intermediate output
+        collection. If the TTL is too short, it is possible for a collection to
+        be trashed before downstream steps that consume it are started.  The
+        recommended minimum value for TTL is the expected duration of the
+        entire the workflow.
+
+- name: ReuseRequirement
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Enable/disable work reuse for current process.  Default true (work reuse enabled).
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'arv:ReuseRequirement'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    - name: enableReuse
+      type: boolean
+
+- name: WorkflowRunnerResources
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Specify memory or cores resource request for the CWL runner process itself.
+  fields:
+    class:
+      type: string
+      doc: "Always 'arv:WorkflowRunnerResources'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    ramMin:
+      type: int?
+      doc: Minimum RAM, in mebibytes (2**20)
+      jsonldPredicate: "https://w3id.org/cwl/cwl#ResourceRequirement/ramMin"
+    coresMin:
+      type: int?
+      doc: Minimum cores allocated to cwl-runner
+      jsonldPredicate: "https://w3id.org/cwl/cwl#ResourceRequirement/coresMin"
+    keep_cache:
+      type: int?
+      doc: |
+        Size of collection metadata cache for the workflow runner, in
+        MiB.  Default 256 MiB.  Will be added on to the RAM request
+        when determining node size to request.
+      jsonldPredicate: "http://arvados.org/cwl#RuntimeConstraints/keep_cache"
+
+- name: ClusterTarget
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  doc: |
+    Specify where a workflow step should run
+  fields:
+    class:
+      type: string
+      doc: "Always 'arv:ClusterTarget'"
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+    cluster_id:
+      type: string?
+      doc: The cluster to run the container
+    project_uuid:
+      type: string?
+      doc: The project that will own the container requests and intermediate collections
diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py
new file mode 100644 (file)
index 0000000..03b4e07
--- /dev/null
@@ -0,0 +1,532 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+
+import logging
+import json
+import os
+import urllib.request, urllib.parse, urllib.error
+import time
+import datetime
+import ciso8601
+import uuid
+import math
+
+import arvados_cwl.util
+import ruamel.yaml as yaml
+
+from cwltool.errors import WorkflowException
+from cwltool.process import UnsupportedRequirement, shortname
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
+from cwltool.utils import aslist
+from cwltool.job import JobBase
+
+import arvados.collection
+
+from .arvdocker import arv_docker_get_image
+from . import done
+from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location, remove_redundant_fields
+from .fsaccess import CollectionFetcher
+from .pathmapper import NoFollowPathMapper, trim_listing
+from .perf import Perf
+
+logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
+
+class ArvadosContainer(JobBase):
+    """Submit and manage a Crunch container request for executing a CWL CommandLineTool."""
+
+    def __init__(self, runner, job_runtime,
+                 builder,   # type: Builder
+                 joborder,  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
+                 make_path_mapper,  # type: Callable[..., PathMapper]
+                 requirements,      # type: List[Dict[Text, Text]]
+                 hints,     # type: List[Dict[Text, Text]]
+                 name       # type: Text
+    ):
+        super(ArvadosContainer, self).__init__(builder, joborder, make_path_mapper, requirements, hints, name)
+        self.arvrunner = runner
+        self.job_runtime = job_runtime
+        self.running = False
+        self.uuid = None
+
+    def update_pipeline_component(self, r):
+        pass
+
+    def run(self, runtimeContext):
+        # ArvadosCommandTool subclasses from cwltool.CommandLineTool,
+        # which calls makeJobRunner() to get a new ArvadosContainer
+        # object.  The fields that define execution such as
+        # command_line, environment, etc are set on the
+        # ArvadosContainer object by CommandLineTool.job() before
+        # run() is called.
+
+        runtimeContext = self.job_runtime
+
+        container_request = {
+            "command": self.command_line,
+            "name": self.name,
+            "output_path": self.outdir,
+            "cwd": self.outdir,
+            "priority": runtimeContext.priority,
+            "state": "Committed",
+            "properties": {},
+        }
+        runtime_constraints = {}
+
+        if runtimeContext.project_uuid:
+            container_request["owner_uuid"] = runtimeContext.project_uuid
+
+        if self.arvrunner.secret_store.has_secret(self.command_line):
+            raise WorkflowException("Secret material leaked on command line, only file literals may contain secrets")
+
+        if self.arvrunner.secret_store.has_secret(self.environment):
+            raise WorkflowException("Secret material leaked in environment, only file literals may contain secrets")
+
+        resources = self.builder.resources
+        if resources is not None:
+            runtime_constraints["vcpus"] = math.ceil(resources.get("cores", 1))
+            runtime_constraints["ram"] = math.ceil(resources.get("ram") * 2**20)
+
+        mounts = {
+            self.outdir: {
+                "kind": "tmp",
+                "capacity": math.ceil(resources.get("outdirSize", 0) * 2**20)
+            },
+            self.tmpdir: {
+                "kind": "tmp",
+                "capacity": math.ceil(resources.get("tmpdirSize", 0) * 2**20)
+            }
+        }
+        secret_mounts = {}
+        scheduling_parameters = {}
+
+        rf = [self.pathmapper.mapper(f) for f in self.pathmapper.referenced_files]
+        rf.sort(key=lambda k: k.resolved)
+        prevdir = None
+        for resolved, target, tp, stg in rf:
+            if not stg:
+                continue
+            if prevdir and target.startswith(prevdir):
+                continue
+            if tp == "Directory":
+                targetdir = target
+            else:
+                targetdir = os.path.dirname(target)
+            sp = resolved.split("/", 1)
+            pdh = sp[0][5:]   # remove "keep:"
+            mounts[targetdir] = {
+                "kind": "collection",
+                "portable_data_hash": pdh
+            }
+            if len(sp) == 2:
+                if tp == "Directory":
+                    path = sp[1]
+                else:
+                    path = os.path.dirname(sp[1])
+                if path and path != "/":
+                    mounts[targetdir]["path"] = path
+            prevdir = targetdir + "/"
+
+        with Perf(metrics, "generatefiles %s" % self.name):
+            if self.generatefiles["listing"]:
+                vwd = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                    keep_client=self.arvrunner.keep_client,
+                                                    num_retries=self.arvrunner.num_retries)
+                generatemapper = NoFollowPathMapper(self.generatefiles["listing"], "", "",
+                                                    separateDirs=False)
+
+                sorteditems = sorted(generatemapper.items(), key=lambda n: n[1].target)
+
+                logger.debug("generatemapper is %s", sorteditems)
+
+                with Perf(metrics, "createfiles %s" % self.name):
+                    for f, p in sorteditems:
+                        if not p.target:
+                            pass
+                        elif p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
+                            if p.resolved.startswith("_:"):
+                                vwd.mkdirs(p.target)
+                            else:
+                                source, path = self.arvrunner.fs_access.get_collection(p.resolved)
+                                vwd.copy(path, p.target, source_collection=source)
+                        elif p.type == "CreateFile":
+                            if self.arvrunner.secret_store.has_secret(p.resolved):
+                                secret_mounts["%s/%s" % (self.outdir, p.target)] = {
+                                    "kind": "text",
+                                    "content": self.arvrunner.secret_store.retrieve(p.resolved)
+                                }
+                            else:
+                                with vwd.open(p.target, "w") as n:
+                                    n.write(p.resolved)
+
+                def keepemptydirs(p):
+                    if isinstance(p, arvados.collection.RichCollectionBase):
+                        if len(p) == 0:
+                            p.open(".keep", "w").close()
+                        else:
+                            for c in p:
+                                keepemptydirs(p[c])
+
+                keepemptydirs(vwd)
+
+                if not runtimeContext.current_container:
+                    runtimeContext.current_container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                info = arvados_cwl.util.get_intermediate_collection_info(self.name, runtimeContext.current_container, runtimeContext.intermediate_output_ttl)
+                vwd.save_new(name=info["name"],
+                             owner_uuid=runtimeContext.project_uuid,
+                             ensure_unique_name=True,
+                             trash_at=info["trash_at"],
+                             properties=info["properties"])
+
+                prev = None
+                for f, p in sorteditems:
+                    if (not p.target or self.arvrunner.secret_store.has_secret(p.resolved) or
+                        (prev is not None and p.target.startswith(prev))):
+                        continue
+                    mountpoint = "%s/%s" % (self.outdir, p.target)
+                    mounts[mountpoint] = {"kind": "collection",
+                                          "portable_data_hash": vwd.portable_data_hash(),
+                                          "path": p.target}
+                    if p.type.startswith("Writable"):
+                        mounts[mountpoint]["writable"] = True
+                    prev = p.target + "/"
+
+        container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
+        if self.environment:
+            container_request["environment"].update(self.environment)
+
+        if self.stdin:
+            sp = self.stdin[6:].split("/", 1)
+            mounts["stdin"] = {"kind": "collection",
+                                "portable_data_hash": sp[0],
+                                "path": sp[1]}
+
+        if self.stderr:
+            mounts["stderr"] = {"kind": "file",
+                                "path": "%s/%s" % (self.outdir, self.stderr)}
+
+        if self.stdout:
+            mounts["stdout"] = {"kind": "file",
+                                "path": "%s/%s" % (self.outdir, self.stdout)}
+
+        (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
+        if not docker_req:
+            docker_req = {"dockerImageId": "arvados/jobs"}
+
+        container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
+                                                                    docker_req,
+                                                                    runtimeContext.pull_image,
+                                                                    runtimeContext.project_uuid)
+
+        api_req, _ = self.get_requirement("http://arvados.org/cwl#APIRequirement")
+        if api_req:
+            runtime_constraints["API"] = True
+
+        runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
+        if runtime_req:
+            if "keep_cache" in runtime_req:
+                runtime_constraints["keep_cache_ram"] = math.ceil(runtime_req["keep_cache"] * 2**20)
+            if "outputDirType" in runtime_req:
+                if runtime_req["outputDirType"] == "local_output_dir":
+                    # Currently the default behavior.
+                    pass
+                elif runtime_req["outputDirType"] == "keep_output_dir":
+                    mounts[self.outdir]= {
+                        "kind": "collection",
+                        "writable": True
+                    }
+
+        partition_req, _ = self.get_requirement("http://arvados.org/cwl#PartitionRequirement")
+        if partition_req:
+            scheduling_parameters["partitions"] = aslist(partition_req["partition"])
+
+        intermediate_output_req, _ = self.get_requirement("http://arvados.org/cwl#IntermediateOutput")
+        if intermediate_output_req:
+            self.output_ttl = intermediate_output_req["outputTTL"]
+        else:
+            self.output_ttl = self.arvrunner.intermediate_output_ttl
+
+        if self.output_ttl < 0:
+            raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
+
+        if self.timelimit is not None:
+            scheduling_parameters["max_run_time"] = self.timelimit
+
+        extra_submit_params = {}
+        if runtimeContext.submit_runner_cluster:
+            extra_submit_params["cluster_id"] = runtimeContext.submit_runner_cluster
+
+        container_request["output_name"] = "Output for step %s" % (self.name)
+        container_request["output_ttl"] = self.output_ttl
+        container_request["mounts"] = mounts
+        container_request["secret_mounts"] = secret_mounts
+        container_request["runtime_constraints"] = runtime_constraints
+        container_request["scheduling_parameters"] = scheduling_parameters
+
+        enable_reuse = runtimeContext.enable_reuse
+        if enable_reuse:
+            reuse_req, _ = self.get_requirement("http://arvados.org/cwl#ReuseRequirement")
+            if reuse_req:
+                enable_reuse = reuse_req["enableReuse"]
+        container_request["use_existing"] = enable_reuse
+
+        if runtimeContext.runnerjob.startswith("arvwf:"):
+            wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")]
+            wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries)
+            if container_request["name"] == "main":
+                container_request["name"] = wfrecord["name"]
+            container_request["properties"]["template_uuid"] = wfuuid
+
+        self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
+
+        try:
+            if runtimeContext.submit_request_uuid:
+                response = self.arvrunner.api.container_requests().update(
+                    uuid=runtimeContext.submit_request_uuid,
+                    body=container_request,
+                    **extra_submit_params
+                ).execute(num_retries=self.arvrunner.num_retries)
+            else:
+                response = self.arvrunner.api.container_requests().create(
+                    body=container_request,
+                    **extra_submit_params
+                ).execute(num_retries=self.arvrunner.num_retries)
+
+            self.uuid = response["uuid"]
+            self.arvrunner.process_submitted(self)
+
+            if response["state"] == "Final":
+                logger.info("%s reused container %s", self.arvrunner.label(self), response["container_uuid"])
+            else:
+                logger.info("%s %s state is %s", self.arvrunner.label(self), response["uuid"], response["state"])
+        except Exception:
+            logger.exception("%s got an error", self.arvrunner.label(self))
+            self.output_callback({}, "permanentFail")
+
+    def done(self, record):
+        outputs = {}
+        try:
+            container = self.arvrunner.api.containers().get(
+                uuid=record["container_uuid"]
+            ).execute(num_retries=self.arvrunner.num_retries)
+            if container["state"] == "Complete":
+                rcode = container["exit_code"]
+                if self.successCodes and rcode in self.successCodes:
+                    processStatus = "success"
+                elif self.temporaryFailCodes and rcode in self.temporaryFailCodes:
+                    processStatus = "temporaryFail"
+                elif self.permanentFailCodes and rcode in self.permanentFailCodes:
+                    processStatus = "permanentFail"
+                elif rcode == 0:
+                    processStatus = "success"
+                else:
+                    processStatus = "permanentFail"
+            else:
+                processStatus = "permanentFail"
+
+            if processStatus == "permanentFail":
+                logc = arvados.collection.CollectionReader(container["log"],
+                                                           api_client=self.arvrunner.api,
+                                                           keep_client=self.arvrunner.keep_client,
+                                                           num_retries=self.arvrunner.num_retries)
+                label = self.arvrunner.label(self)
+                done.logtail(
+                    logc, logger.error,
+                    "%s (%s) error log:" % (label, record["uuid"]), maxlen=40)
+
+            if record["output_uuid"]:
+                if self.arvrunner.trash_intermediate or self.arvrunner.intermediate_output_ttl:
+                    # Compute the trash time to avoid requesting the collection record.
+                    trash_at = ciso8601.parse_datetime_as_naive(record["modified_at"]) + datetime.timedelta(0, self.arvrunner.intermediate_output_ttl)
+                    aftertime = " at %s" % trash_at.strftime("%Y-%m-%d %H:%M:%S UTC") if self.arvrunner.intermediate_output_ttl else ""
+                    orpart = ", or" if self.arvrunner.trash_intermediate and self.arvrunner.intermediate_output_ttl else ""
+                    oncomplete = " upon successful completion of the workflow" if self.arvrunner.trash_intermediate else ""
+                    logger.info("%s Intermediate output %s (%s) will be trashed%s%s%s." % (
+                        self.arvrunner.label(self), record["output_uuid"], container["output"], aftertime, orpart, oncomplete))
+                self.arvrunner.add_intermediate_output(record["output_uuid"])
+
+            if container["output"]:
+                outputs = done.done_outputs(self, container, "/tmp", self.outdir, "/keep")
+        except WorkflowException as e:
+            # Only include a stack trace if in debug mode. 
+            # A stack trace may obfuscate more useful output about the workflow. 
+            logger.error("%s unable to collect output from %s:\n%s",
+                         self.arvrunner.label(self), container["output"], e, exc_info=(e if self.arvrunner.debug else False))
+            processStatus = "permanentFail"
+        except Exception:
+            logger.exception("%s while getting output object:", self.arvrunner.label(self))
+            processStatus = "permanentFail"
+        finally:
+            self.output_callback(outputs, processStatus)
+
+
+class RunnerContainer(Runner):
+    """Submit and manage a container that runs arvados-cwl-runner."""
+
+    def arvados_job_spec(self, runtimeContext):
+        """Create an Arvados container request for this workflow.
+
+        The returned dict can be used to create a container passed as
+        the +body+ argument to container_requests().create().
+        """
+
+        adjustDirObjs(self.job_order, trim_listing)
+        visit_class(self.job_order, ("File", "Directory"), trim_anonymous_location)
+        visit_class(self.job_order, ("File", "Directory"), remove_redundant_fields)
+
+        secret_mounts = {}
+        for param in sorted(self.job_order.keys()):
+            if self.secret_store.has_secret(self.job_order[param]):
+                mnt = "/secrets/s%d" % len(secret_mounts)
+                secret_mounts[mnt] = {
+                    "kind": "text",
+                    "content": self.secret_store.retrieve(self.job_order[param])
+                }
+                self.job_order[param] = {"$include": mnt}
+
+        container_req = {
+            "name": self.name,
+            "output_path": "/var/spool/cwl",
+            "cwd": "/var/spool/cwl",
+            "priority": self.priority,
+            "state": "Committed",
+            "container_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
+            "mounts": {
+                "/var/lib/cwl/cwl.input.json": {
+                    "kind": "json",
+                    "content": self.job_order
+                },
+                "stdout": {
+                    "kind": "file",
+                    "path": "/var/spool/cwl/cwl.output.json"
+                },
+                "/var/spool/cwl": {
+                    "kind": "collection",
+                    "writable": True
+                }
+            },
+            "secret_mounts": secret_mounts,
+            "runtime_constraints": {
+                "vcpus": math.ceil(self.submit_runner_cores),
+                "ram": 1024*1024 * (math.ceil(self.submit_runner_ram) + math.ceil(self.collection_cache_size)),
+                "API": True
+            },
+            "use_existing": self.enable_reuse,
+            "properties": {}
+        }
+
+        if self.embedded_tool.tool.get("id", "").startswith("keep:"):
+            sp = self.embedded_tool.tool["id"].split('/')
+            workflowcollection = sp[0][5:]
+            workflowname = "/".join(sp[1:])
+            workflowpath = "/var/lib/cwl/workflow/%s" % workflowname
+            container_req["mounts"]["/var/lib/cwl/workflow"] = {
+                "kind": "collection",
+                "portable_data_hash": "%s" % workflowcollection
+            }
+        else:
+            packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map)
+            workflowpath = "/var/lib/cwl/workflow.json#main"
+            container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
+                "kind": "json",
+                "content": packed
+            }
+            if self.embedded_tool.tool.get("id", "").startswith("arvwf:"):
+                container_req["properties"]["template_uuid"] = self.embedded_tool.tool["id"][6:33]
+
+
+        # --local means execute the workflow instead of submitting a container request
+        # --api=containers means use the containers API
+        # --no-log-timestamps means don't add timestamps (the logging infrastructure does this)
+        # --disable-validate because we already validated so don't need to do it again
+        # --eval-timeout is the timeout for javascript invocation
+        # --parallel-task-count is the number of threads to use for job submission
+        # --enable/disable-reuse sets desired job reuse
+        # --collection-cache-size sets aside memory to store collections
+        command = ["arvados-cwl-runner",
+                   "--local",
+                   "--api=containers",
+                   "--no-log-timestamps",
+                   "--disable-validate",
+                   "--eval-timeout=%s" % self.arvrunner.eval_timeout,
+                   "--thread-count=%s" % self.arvrunner.thread_count,
+                   "--enable-reuse" if self.enable_reuse else "--disable-reuse",
+                   "--collection-cache-size=%s" % self.collection_cache_size]
+
+        if self.output_name:
+            command.append("--output-name=" + self.output_name)
+            container_req["output_name"] = self.output_name
+
+        if self.output_tags:
+            command.append("--output-tags=" + self.output_tags)
+
+        if runtimeContext.debug:
+            command.append("--debug")
+
+        if runtimeContext.storage_classes != "default":
+            command.append("--storage-classes=" + runtimeContext.storage_classes)
+
+        if self.on_error:
+            command.append("--on-error=" + self.on_error)
+
+        if self.intermediate_output_ttl:
+            command.append("--intermediate-output-ttl=%d" % self.intermediate_output_ttl)
+
+        if self.arvrunner.trash_intermediate:
+            command.append("--trash-intermediate")
+
+        if self.arvrunner.project_uuid:
+            command.append("--project-uuid="+self.arvrunner.project_uuid)
+
+        command.extend([workflowpath, "/var/lib/cwl/cwl.input.json"])
+
+        container_req["command"] = command
+
+        return container_req
+
+
+    def run(self, runtimeContext):
+        runtimeContext.keepprefix = "keep:"
+        job_spec = self.arvados_job_spec(runtimeContext)
+        if self.arvrunner.project_uuid:
+            job_spec["owner_uuid"] = self.arvrunner.project_uuid
+
+        extra_submit_params = {}
+        if runtimeContext.submit_runner_cluster:
+            extra_submit_params["cluster_id"] = runtimeContext.submit_runner_cluster
+
+        if runtimeContext.submit_request_uuid:
+            if "cluster_id" in extra_submit_params:
+                # Doesn't make sense for "update" and actually fails
+                del extra_submit_params["cluster_id"]
+            response = self.arvrunner.api.container_requests().update(
+                uuid=runtimeContext.submit_request_uuid,
+                body=job_spec,
+                **extra_submit_params
+            ).execute(num_retries=self.arvrunner.num_retries)
+        else:
+            response = self.arvrunner.api.container_requests().create(
+                body=job_spec,
+                **extra_submit_params
+            ).execute(num_retries=self.arvrunner.num_retries)
+
+        self.uuid = response["uuid"]
+        self.arvrunner.process_submitted(self)
+
+        logger.info("%s submitted container_request %s", self.arvrunner.label(self), response["uuid"])
+
+    def done(self, record):
+        try:
+            container = self.arvrunner.api.containers().get(
+                uuid=record["container_uuid"]
+            ).execute(num_retries=self.arvrunner.num_retries)
+        except Exception:
+            logger.exception("%s while getting runner container", self.arvrunner.label(self))
+            self.arvrunner.output_callback({}, "permanentFail")
+        else:
+            super(RunnerContainer, self).done(container)
diff --git a/sdk/cwl/arvados_cwl/arvdocker.py b/sdk/cwl/arvados_cwl/arvdocker.py
new file mode 100644 (file)
index 0000000..a8f56ad
--- /dev/null
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import logging
+import sys
+import threading
+import copy
+
+from schema_salad.sourceline import SourceLine
+
+import cwltool.docker
+from cwltool.errors import WorkflowException
+import arvados.commands.keepdocker
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+cached_lookups = {}
+cached_lookups_lock = threading.Lock()
+
+def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid):
+    """Check if a Docker image is available in Keep, if not, upload it using arv-keepdocker."""
+
+    if "http://arvados.org/cwl#dockerCollectionPDH" in dockerRequirement:
+        return dockerRequirement["http://arvados.org/cwl#dockerCollectionPDH"]
+
+    if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
+        dockerRequirement = copy.deepcopy(dockerRequirement)
+        dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
+        if hasattr(dockerRequirement, 'lc'):
+            dockerRequirement.lc.data["dockerImageId"] = dockerRequirement.lc.data["dockerPull"]
+
+    global cached_lookups
+    global cached_lookups_lock
+    with cached_lookups_lock:
+        if dockerRequirement["dockerImageId"] in cached_lookups:
+            return cached_lookups[dockerRequirement["dockerImageId"]]
+
+    with SourceLine(dockerRequirement, "dockerImageId", WorkflowException, logger.isEnabledFor(logging.DEBUG)):
+        sp = dockerRequirement["dockerImageId"].split(":")
+        image_name = sp[0]
+        image_tag = sp[1] if len(sp) > 1 else "latest"
+
+        images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
+                                                                image_name=image_name,
+                                                                image_tag=image_tag)
+
+        if not images:
+            # Fetch Docker image if necessary.
+            try:
+                cwltool.docker.DockerCommandLineJob.get_image(dockerRequirement, pull_image)
+            except OSError as e:
+                raise WorkflowException("While trying to get Docker image '%s', failed to execute 'docker': %s" % (dockerRequirement["dockerImageId"], e))
+
+            # Upload image to Arvados
+            args = []
+            if project_uuid:
+                args.append("--project-uuid="+project_uuid)
+            args.append(image_name)
+            args.append(image_tag)
+            logger.info("Uploading Docker image %s:%s", image_name, image_tag)
+            try:
+                arvados.commands.put.api_client = api_client
+                arvados.commands.keepdocker.main(args, stdout=sys.stderr, install_sig_handlers=False, api=api_client)
+            except SystemExit as e:
+                # If e.code is None or zero, then keepdocker exited normally and we can continue
+                if e.code:
+                    raise WorkflowException("keepdocker exited with code %s" % e.code)
+
+            images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
+                                                                    image_name=image_name,
+                                                                    image_tag=image_tag)
+
+        if not images:
+            raise WorkflowException("Could not find Docker image %s:%s" % (image_name, image_tag))
+
+        pdh = api_client.collections().get(uuid=images[0][0]).execute()["portable_data_hash"]
+
+        with cached_lookups_lock:
+            cached_lookups[dockerRequirement["dockerImageId"]] = pdh
+
+    return pdh
+
+def arv_docker_clear_cache():
+    global cached_lookups
+    global cached_lookups_lock
+    with cached_lookups_lock:
+        cached_lookups = {}
diff --git a/sdk/cwl/arvados_cwl/arvjob.py b/sdk/cwl/arvados_cwl/arvjob.py
new file mode 100644 (file)
index 0000000..ab2078e
--- /dev/null
@@ -0,0 +1,492 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from past.builtins import basestring
+from builtins import object
+from future.utils import viewitems
+
+import logging
+import re
+import copy
+import json
+import time
+
+from cwltool.process import shortname, UnsupportedRequirement
+from cwltool.errors import WorkflowException
+from cwltool.command_line_tool import revmap_file, CommandLineTool
+from cwltool.load_tool import fetch_document
+from cwltool.builder import Builder
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
+from cwltool.job import JobBase
+
+from schema_salad.sourceline import SourceLine
+
+import arvados_cwl.util
+import ruamel.yaml as yaml
+
+import arvados.collection
+from arvados.errors import ApiError
+
+from .arvdocker import arv_docker_get_image
+from .runner import Runner, arvados_jobs_image, packed_workflow, upload_workflow_collection, trim_anonymous_location, remove_redundant_fields
+from .pathmapper import VwdPathMapper, trim_listing
+from .perf import Perf
+from . import done
+from ._version import __version__
+from .util import get_intermediate_collection_info
+
+logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
+
+crunchrunner_re = re.compile(r"^.*crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)=(.*)$")
+
+crunchrunner_git_commit = 'a3f2cb186e437bfce0031b024b2157b73ed2717d'
+
+class ArvadosJob(JobBase):
+    """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
+
+    def __init__(self, runner,
+                 builder,   # type: Builder
+                 joborder,  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
+                 make_path_mapper,  # type: Callable[..., PathMapper]
+                 requirements,      # type: List[Dict[Text, Text]]
+                 hints,     # type: List[Dict[Text, Text]]
+                 name       # type: Text
+    ):
+        super(ArvadosJob, self).__init__(builder, joborder, make_path_mapper, requirements, hints, name)
+        self.arvrunner = runner
+        self.running = False
+        self.uuid = None
+
+    def run(self, runtimeContext):
+        script_parameters = {
+            "command": self.command_line
+        }
+        runtime_constraints = {}
+
+        with Perf(metrics, "generatefiles %s" % self.name):
+            if self.generatefiles["listing"]:
+                vwd = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                    keep_client=self.arvrunner.keep_client,
+                                                    num_retries=self.arvrunner.num_retries)
+                script_parameters["task.vwd"] = {}
+                generatemapper = VwdPathMapper(self.generatefiles["listing"], "", "",
+                                               separateDirs=False)
+
+                with Perf(metrics, "createfiles %s" % self.name):
+                    for f, p in generatemapper.items():
+                        if p.type == "CreateFile":
+                            with vwd.open(p.target, "w") as n:
+                                n.write(p.resolved.encode("utf-8"))
+
+                if vwd:
+                    with Perf(metrics, "generatefiles.save_new %s" % self.name):
+                        info = get_intermediate_collection_info(self.name, None, runtimeContext.intermediate_output_ttl)
+                        vwd.save_new(name=info["name"],
+                                     owner_uuid=self.arvrunner.project_uuid,
+                                     ensure_unique_name=True,
+                                     trash_at=info["trash_at"],
+                                     properties=info["properties"])
+
+                for f, p in generatemapper.items():
+                    if p.type == "File":
+                        script_parameters["task.vwd"][p.target] = p.resolved
+                    if p.type == "CreateFile":
+                        script_parameters["task.vwd"][p.target] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), p.target)
+
+        script_parameters["task.env"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
+        if self.environment:
+            script_parameters["task.env"].update(self.environment)
+
+        if self.stdin:
+            script_parameters["task.stdin"] = self.stdin
+
+        if self.stdout:
+            script_parameters["task.stdout"] = self.stdout
+
+        if self.stderr:
+            script_parameters["task.stderr"] = self.stderr
+
+        if self.successCodes:
+            script_parameters["task.successCodes"] = self.successCodes
+        if self.temporaryFailCodes:
+            script_parameters["task.temporaryFailCodes"] = self.temporaryFailCodes
+        if self.permanentFailCodes:
+            script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
+
+        with Perf(metrics, "arv_docker_get_image %s" % self.name):
+            (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
+            if docker_req and runtimeContext.use_container is not False:
+                if docker_req.get("dockerOutputDirectory"):
+                    raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
+                        "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
+                runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api,
+                                                                           docker_req,
+                                                                           runtimeContext.pull_image,
+                                                                           self.arvrunner.project_uuid)
+            else:
+                runtime_constraints["docker_image"] = "arvados/jobs"
+
+        resources = self.builder.resources
+        if resources is not None:
+            runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
+            runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
+            runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
+
+        runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
+        if runtime_req:
+            if "keep_cache" in runtime_req:
+                runtime_constraints["keep_cache_mb_per_task"] = runtime_req["keep_cache"]
+                runtime_constraints["min_ram_mb_per_node"] += runtime_req["keep_cache"]
+            if "outputDirType" in runtime_req:
+                if runtime_req["outputDirType"] == "local_output_dir":
+                    script_parameters["task.keepTmpOutput"] = False
+                elif runtime_req["outputDirType"] == "keep_output_dir":
+                    script_parameters["task.keepTmpOutput"] = True
+
+        filters = [["repository", "=", "arvados"],
+                   ["script", "=", "crunchrunner"],
+                   ["script_version", "in git", crunchrunner_git_commit]]
+        if not self.arvrunner.ignore_docker_for_reuse:
+            filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
+
+        enable_reuse = runtimeContext.enable_reuse
+        if enable_reuse:
+            reuse_req, _ = self.get_requirement("http://arvados.org/cwl#ReuseRequirement")
+            if reuse_req:
+                enable_reuse = reuse_req["enableReuse"]
+
+        self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
+
+        try:
+            with Perf(metrics, "create %s" % self.name):
+                response = self.arvrunner.api.jobs().create(
+                    body={
+                        "owner_uuid": self.arvrunner.project_uuid,
+                        "script": "crunchrunner",
+                        "repository": "arvados",
+                        "script_version": "master",
+                        "minimum_script_version": crunchrunner_git_commit,
+                        "script_parameters": {"tasks": [script_parameters]},
+                        "runtime_constraints": runtime_constraints
+                    },
+                    filters=filters,
+                    find_or_create=enable_reuse
+                ).execute(num_retries=self.arvrunner.num_retries)
+
+            self.uuid = response["uuid"]
+            self.arvrunner.process_submitted(self)
+
+            self.update_pipeline_component(response)
+
+            if response["state"] == "Complete":
+                logger.info("%s reused job %s", self.arvrunner.label(self), response["uuid"])
+                # Give read permission to the desired project on reused jobs
+                if response["owner_uuid"] != self.arvrunner.project_uuid:
+                    try:
+                        self.arvrunner.api.links().create(body={
+                            'link_class': 'permission',
+                            'name': 'can_read',
+                            'tail_uuid': self.arvrunner.project_uuid,
+                            'head_uuid': response["uuid"],
+                            }).execute(num_retries=self.arvrunner.num_retries)
+                    except ApiError as e:
+                        # The user might not have "manage" access on the job: log
+                        # a message and continue.
+                        logger.info("Creating read permission on job %s: %s",
+                                    response["uuid"],
+                                    e)
+            else:
+                logger.info("%s %s is %s", self.arvrunner.label(self), response["uuid"], response["state"])
+        except Exception:
+            logger.exception("%s error" % (self.arvrunner.label(self)))
+            self.output_callback({}, "permanentFail")
+
+    def update_pipeline_component(self, record):
+        with self.arvrunner.workflow_eval_lock:
+            if self.arvrunner.pipeline:
+                self.arvrunner.pipeline["components"][self.name] = {"job": record}
+                with Perf(metrics, "update_pipeline_component %s" % self.name):
+                    self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(
+                        uuid=self.arvrunner.pipeline["uuid"],
+                        body={
+                            "components": self.arvrunner.pipeline["components"]
+                        }).execute(num_retries=self.arvrunner.num_retries)
+            if self.arvrunner.uuid:
+                try:
+                    job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
+                    if job:
+                        components = job["components"]
+                        components[self.name] = record["uuid"]
+                        self.arvrunner.api.jobs().update(
+                            uuid=self.arvrunner.uuid,
+                            body={
+                                "components": components
+                            }).execute(num_retries=self.arvrunner.num_retries)
+                except Exception:
+                    logger.exception("Error adding to components")
+
+    def done(self, record):
+        try:
+            self.update_pipeline_component(record)
+        except:
+            pass
+
+        try:
+            if record["state"] == "Complete":
+                processStatus = "success"
+            else:
+                processStatus = "permanentFail"
+
+            outputs = {}
+            try:
+                if record["output"]:
+                    with Perf(metrics, "inspect log %s" % self.name):
+                        logc = arvados.collection.CollectionReader(record["log"],
+                                                                   api_client=self.arvrunner.api,
+                                                                   keep_client=self.arvrunner.keep_client,
+                                                                   num_retries=self.arvrunner.num_retries)
+                        log = logc.open(list(logc.keys())[0])
+                        dirs = {
+                            "tmpdir": "/tmpdir",
+                            "outdir": "/outdir",
+                            "keep": "/keep"
+                        }
+                        for l in log:
+                            # Determine the tmpdir, outdir and keep paths from
+                            # the job run.  Unfortunately, we can't take the first
+                            # values we find (which are expected to be near the
+                            # top) and stop scanning because if the node fails and
+                            # the job restarts on a different node these values
+                            # will different runs, and we need to know about the
+                            # final run that actually produced output.
+                            g = crunchrunner_re.match(l)
+                            if g:
+                                dirs[g.group(1)] = g.group(2)
+
+                    if processStatus == "permanentFail":
+                        done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
+
+                    with Perf(metrics, "output collection %s" % self.name):
+                        outputs = done.done(self, record, dirs["tmpdir"],
+                                            dirs["outdir"], dirs["keep"])
+            except WorkflowException as e:
+                # Only include a stack trace if in debug mode. 
+                # This is most likely a user workflow error and a stack trace may obfuscate more useful output. 
+                logger.error("%s unable to collect output from %s:\n%s",
+                             self.arvrunner.label(self), record["output"], e, exc_info=(e if self.arvrunner.debug else False))
+                processStatus = "permanentFail"
+            except Exception:
+                logger.exception("Got unknown exception while collecting output for job %s:", self.name)
+                processStatus = "permanentFail"
+
+            # Note: Currently, on error output_callback is expecting an empty dict,
+            # anything else will fail.
+            if not isinstance(outputs, dict):
+                logger.error("Unexpected output type %s '%s'", type(outputs), outputs)
+                outputs = {}
+                processStatus = "permanentFail"
+        finally:
+            self.output_callback(outputs, processStatus)
+
+
+class RunnerJob(Runner):
+    """Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
+
+    def arvados_job_spec(self, debug=False):
+        """Create an Arvados job specification for this workflow.
+
+        The returned dict can be used to create a job (i.e., passed as
+        the +body+ argument to jobs().create()), or as a component in
+        a pipeline template or pipeline instance.
+        """
+
+        if self.embedded_tool.tool["id"].startswith("keep:"):
+            self.job_order["cwl:tool"] = self.embedded_tool.tool["id"][5:]
+        else:
+            packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map)
+            wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
+            self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
+
+        adjustDirObjs(self.job_order, trim_listing)
+        visit_class(self.job_order, ("File", "Directory"), trim_anonymous_location)
+        visit_class(self.job_order, ("File", "Directory"), remove_redundant_fields)
+
+        if self.output_name:
+            self.job_order["arv:output_name"] = self.output_name
+
+        if self.output_tags:
+            self.job_order["arv:output_tags"] = self.output_tags
+
+        self.job_order["arv:enable_reuse"] = self.enable_reuse
+
+        if self.on_error:
+            self.job_order["arv:on_error"] = self.on_error
+
+        if debug:
+            self.job_order["arv:debug"] = True
+
+        return {
+            "script": "cwl-runner",
+            "script_version": "master",
+            "minimum_script_version": "570509ab4d2ef93d870fd2b1f2eab178afb1bad9",
+            "repository": "arvados",
+            "script_parameters": self.job_order,
+            "runtime_constraints": {
+                "docker_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
+                "min_ram_mb_per_node": self.submit_runner_ram
+            }
+        }
+
+    def run(self, runtimeContext):
+        job_spec = self.arvados_job_spec(runtimeContext.debug)
+
+        job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
+
+        job = self.arvrunner.api.jobs().create(
+            body=job_spec,
+            find_or_create=self.enable_reuse
+        ).execute(num_retries=self.arvrunner.num_retries)
+
+        for k,v in viewitems(job_spec["script_parameters"]):
+            if v is False or v is None or isinstance(v, dict):
+                job_spec["script_parameters"][k] = {"value": v}
+
+        del job_spec["owner_uuid"]
+        job_spec["job"] = job
+
+        instance_spec = {
+            "owner_uuid": self.arvrunner.project_uuid,
+            "name": self.name,
+            "components": {
+                "cwl-runner": job_spec,
+            },
+            "state": "RunningOnServer",
+        }
+        if not self.enable_reuse:
+            instance_spec["properties"] = {"run_options": {"enable_job_reuse": False}}
+
+        self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
+            body=instance_spec).execute(num_retries=self.arvrunner.num_retries)
+        logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
+
+        if runtimeContext.wait is False:
+            self.uuid = self.arvrunner.pipeline["uuid"]
+            return
+
+        self.uuid = job["uuid"]
+        self.arvrunner.process_submitted(self)
+
+
+class RunnerTemplate(object):
+    """An Arvados pipeline template that invokes a CWL workflow."""
+
+    type_to_dataclass = {
+        'boolean': 'boolean',
+        'File': 'File',
+        'Directory': 'Collection',
+        'float': 'number',
+        'int': 'number',
+        'string': 'text',
+    }
+
+    def __init__(self, runner, tool, job_order, enable_reuse, uuid,
+                 submit_runner_ram=0, name=None, merged_map=None,
+                 loadingContext=None):
+        self.runner = runner
+        self.embedded_tool = tool
+        self.job = RunnerJob(
+            runner=runner,
+            tool=tool,
+            enable_reuse=enable_reuse,
+            output_name=None,
+            output_tags=None,
+            submit_runner_ram=submit_runner_ram,
+            name=name,
+            merged_map=merged_map,
+            loadingContext=loadingContext)
+        self.job.job_order = job_order
+        self.uuid = uuid
+
+    def pipeline_component_spec(self):
+        """Return a component that Workbench and a-r-p-i will understand.
+
+        Specifically, translate CWL input specs to Arvados pipeline
+        format, like {"dataclass":"File","value":"xyz"}.
+        """
+
+        spec = self.job.arvados_job_spec()
+
+        # Most of the component spec is exactly the same as the job
+        # spec (script, script_version, etc.).
+        # spec['script_parameters'] isn't right, though. A component
+        # spec's script_parameters hash is a translation of
+        # self.tool.tool['inputs'] with defaults/overrides taken from
+        # the job order. So we move the job parameters out of the way
+        # and build a new spec['script_parameters'].
+        job_params = spec['script_parameters']
+        spec['script_parameters'] = {}
+
+        for param in self.embedded_tool.tool['inputs']:
+            param = copy.deepcopy(param)
+
+            # Data type and "required" flag...
+            types = param['type']
+            if not isinstance(types, list):
+                types = [types]
+            param['required'] = 'null' not in types
+            non_null_types = [t for t in types if t != "null"]
+            if len(non_null_types) == 1:
+                the_type = [c for c in non_null_types][0]
+                dataclass = None
+                if isinstance(the_type, basestring):
+                    dataclass = self.type_to_dataclass.get(the_type)
+                if dataclass:
+                    param['dataclass'] = dataclass
+            # Note: If we didn't figure out a single appropriate
+            # dataclass, we just left that attribute out.  We leave
+            # the "type" attribute there in any case, which might help
+            # downstream.
+
+            # Title and description...
+            title = param.pop('label', '')
+            descr = param.pop('doc', '').rstrip('\n')
+            if title:
+                param['title'] = title
+            if descr:
+                param['description'] = descr
+
+            # Fill in the value from the current job order, if any.
+            param_id = shortname(param.pop('id'))
+            value = job_params.get(param_id)
+            if value is None:
+                pass
+            elif not isinstance(value, dict):
+                param['value'] = value
+            elif param.get('dataclass') in ('File', 'Collection') and value.get('location'):
+                param['value'] = value['location'][5:]
+
+            spec['script_parameters'][param_id] = param
+        spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']
+        return spec
+
+    def save(self):
+        body = {
+            "components": {
+                self.job.name: self.pipeline_component_spec(),
+            },
+            "name": self.job.name,
+        }
+        if self.runner.project_uuid:
+            body["owner_uuid"] = self.runner.project_uuid
+        if self.uuid:
+            self.runner.api.pipeline_templates().update(
+                uuid=self.uuid, body=body).execute(
+                    num_retries=self.runner.num_retries)
+            logger.info("Updated template %s", self.uuid)
+        else:
+            self.uuid = self.runner.api.pipeline_templates().create(
+                body=body, ensure_unique_name=True).execute(
+                    num_retries=self.runner.num_retries)['uuid']
+            logger.info("Created template %s", self.uuid)
diff --git a/sdk/cwl/arvados_cwl/arvtool.py b/sdk/cwl/arvados_cwl/arvtool.py
new file mode 100644 (file)
index 0000000..31e6be1
--- /dev/null
@@ -0,0 +1,119 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from cwltool.command_line_tool import CommandLineTool, ExpressionTool
+from cwltool.builder import Builder
+from .arvjob import ArvadosJob
+from .arvcontainer import ArvadosContainer
+from .pathmapper import ArvPathMapper
+from functools import partial
+from schema_salad.sourceline import SourceLine
+from cwltool.errors import WorkflowException
+
+def validate_cluster_target(arvrunner, runtimeContext):
+    if (runtimeContext.submit_runner_cluster and
+        runtimeContext.submit_runner_cluster not in arvrunner.api._rootDesc["remoteHosts"] and
+        runtimeContext.submit_runner_cluster != arvrunner.api._rootDesc["uuidPrefix"]):
+        raise WorkflowException("Unknown or invalid cluster id '%s' known remote clusters are %s" % (runtimeContext.submit_runner_cluster,
+                                                                                                  ", ".join(list(arvrunner.api._rootDesc["remoteHosts"].keys()))))
+def set_cluster_target(tool, arvrunner, builder, runtimeContext):
+    cluster_target_req = None
+    for field in ("hints", "requirements"):
+        if field not in tool:
+            continue
+        for item in tool[field]:
+            if item["class"] == "http://arvados.org/cwl#ClusterTarget":
+                cluster_target_req = item
+
+    if cluster_target_req is None:
+        return runtimeContext
+
+    with SourceLine(cluster_target_req, None, WorkflowException, runtimeContext.debug):
+        runtimeContext = runtimeContext.copy()
+        runtimeContext.submit_runner_cluster = builder.do_eval(cluster_target_req.get("cluster_id")) or runtimeContext.submit_runner_cluster
+        runtimeContext.project_uuid = builder.do_eval(cluster_target_req.get("project_uuid")) or runtimeContext.project_uuid
+        validate_cluster_target(arvrunner, runtimeContext)
+
+    return runtimeContext
+
+def make_builder(joborder, hints, requirements, runtimeContext):
+    return Builder(
+                 job=joborder,
+                 files=[],               # type: List[Dict[Text, Text]]
+                 bindings=[],            # type: List[Dict[Text, Any]]
+                 schemaDefs={},          # type: Dict[Text, Dict[Text, Any]]
+                 names=None,               # type: Names
+                 requirements=requirements,        # type: List[Dict[Text, Any]]
+                 hints=hints,               # type: List[Dict[Text, Any]]
+                 resources={},           # type: Dict[str, int]
+                 mutation_manager=None,    # type: Optional[MutationManager]
+                 formatgraph=None,         # type: Optional[Graph]
+                 make_fs_access=None,      # type: Type[StdFsAccess]
+                 fs_access=None,           # type: StdFsAccess
+                 job_script_provider=runtimeContext.job_script_provider, # type: Optional[Any]
+                 timeout=runtimeContext.eval_timeout,             # type: float
+                 debug=runtimeContext.debug,               # type: bool
+                 js_console=runtimeContext.js_console,          # type: bool
+                 force_docker_pull=runtimeContext.force_docker_pull,   # type: bool
+                 loadListing="",         # type: Text
+                 outdir="",              # type: Text
+                 tmpdir="",              # type: Text
+                 stagedir="",            # type: Text
+                )
+
+class ArvadosCommandTool(CommandLineTool):
+    """Wrap cwltool CommandLineTool to override selected methods."""
+
+    def __init__(self, arvrunner, toolpath_object, loadingContext):
+        super(ArvadosCommandTool, self).__init__(toolpath_object, loadingContext)
+        self.arvrunner = arvrunner
+
+    def make_job_runner(self, runtimeContext):
+        if runtimeContext.work_api == "containers":
+            return partial(ArvadosContainer, self.arvrunner, runtimeContext)
+        elif runtimeContext.work_api == "jobs":
+            return partial(ArvadosJob, self.arvrunner)
+        else:
+            raise Exception("Unsupported work_api %s", runtimeContext.work_api)
+
+    def make_path_mapper(self, reffiles, stagedir, runtimeContext, separateDirs):
+        if runtimeContext.work_api == "containers":
+            return ArvPathMapper(self.arvrunner, reffiles+runtimeContext.extra_reffiles, runtimeContext.basedir,
+                                 "/keep/%s",
+                                 "/keep/%s/%s")
+        elif runtimeContext.work_api == "jobs":
+            return ArvPathMapper(self.arvrunner, reffiles, runtimeContext.basedir,
+                                 "$(task.keep)/%s",
+                                 "$(task.keep)/%s/%s")
+
+    def job(self, joborder, output_callback, runtimeContext):
+        builder = make_builder(joborder, self.hints, self.requirements, runtimeContext)
+        runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+
+        if runtimeContext.work_api == "containers":
+            dockerReq, is_req = self.get_requirement("DockerRequirement")
+            if dockerReq and dockerReq.get("dockerOutputDirectory"):
+                runtimeContext.outdir = dockerReq.get("dockerOutputDirectory")
+                runtimeContext.docker_outdir = dockerReq.get("dockerOutputDirectory")
+            else:
+                runtimeContext.outdir = "/var/spool/cwl"
+                runtimeContext.docker_outdir = "/var/spool/cwl"
+        elif runtimeContext.work_api == "jobs":
+            runtimeContext.outdir = "$(task.outdir)"
+            runtimeContext.docker_outdir = "$(task.outdir)"
+            runtimeContext.tmpdir = "$(task.tmpdir)"
+            runtimeContext.docker_tmpdir = "$(task.tmpdir)"
+        return super(ArvadosCommandTool, self).job(joborder, output_callback, runtimeContext)
+
+class ArvadosExpressionTool(ExpressionTool):
+    def __init__(self, arvrunner, toolpath_object, loadingContext):
+        super(ArvadosExpressionTool, self).__init__(toolpath_object, loadingContext)
+        self.arvrunner = arvrunner
+
+    def job(self,
+            job_order,         # type: Mapping[Text, Text]
+            output_callback,  # type: Callable[[Any, Any], Any]
+            runtimeContext     # type: RuntimeContext
+           ):
+        return super(ArvadosExpressionTool, self).job(job_order, self.arvrunner.get_wrapped_callback(output_callback), runtimeContext)
diff --git a/sdk/cwl/arvados_cwl/arvworkflow.py b/sdk/cwl/arvados_cwl/arvworkflow.py
new file mode 100644 (file)
index 0000000..8e6bff4
--- /dev/null
@@ -0,0 +1,323 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from past.builtins import basestring
+from future.utils import viewitems
+
+import os
+import json
+import copy
+import logging
+
+from schema_salad.sourceline import SourceLine, cmap
+
+from cwltool.pack import pack
+from cwltool.load_tool import fetch_document
+from cwltool.process import shortname
+from cwltool.workflow import Workflow, WorkflowException, WorkflowStep
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
+from cwltool.context import LoadingContext
+
+import ruamel.yaml as yaml
+
+from .runner import (upload_dependencies, packed_workflow, upload_workflow_collection,
+                     trim_anonymous_location, remove_redundant_fields, discover_secondary_files)
+from .pathmapper import ArvPathMapper, trim_listing
+from .arvtool import ArvadosCommandTool, set_cluster_target, make_builder
+from .perf import Perf
+
+logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
+
+max_res_pars = ("coresMin", "coresMax", "ramMin", "ramMax", "tmpdirMin", "tmpdirMax")
+sum_res_pars = ("outdirMin", "outdirMax")
+
+def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
+                    submit_runner_ram=0, name=None, merged_map=None):
+
+    packed = packed_workflow(arvRunner, tool, merged_map)
+
+    adjustDirObjs(job_order, trim_listing)
+    adjustFileObjs(job_order, trim_anonymous_location)
+    adjustDirObjs(job_order, trim_anonymous_location)
+
+    main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
+    for inp in main["inputs"]:
+        sn = shortname(inp["id"])
+        if sn in job_order:
+            inp["default"] = job_order[sn]
+
+    if not name:
+        name = tool.tool.get("label", os.path.basename(tool.tool["id"]))
+
+    upload_dependencies(arvRunner, name, tool.doc_loader,
+                        packed, tool.tool["id"], False)
+
+    if submit_runner_ram:
+        hints = main.get("hints", [])
+        found = False
+        for h in hints:
+            if h["class"] == "http://arvados.org/cwl#WorkflowRunnerResources":
+                h["ramMin"] = submit_runner_ram
+                found = True
+                break
+        if not found:
+            hints.append({"class": "http://arvados.org/cwl#WorkflowRunnerResources",
+                          "ramMin": submit_runner_ram})
+        main["hints"] = hints
+
+    body = {
+        "workflow": {
+            "name": name,
+            "description": tool.tool.get("doc", ""),
+            "definition":json.dumps(packed, sort_keys=True, indent=4, separators=(',',': '))
+        }}
+    if project_uuid:
+        body["workflow"]["owner_uuid"] = project_uuid
+
+    if uuid:
+        call = arvRunner.api.workflows().update(uuid=uuid, body=body)
+    else:
+        call = arvRunner.api.workflows().create(body=body)
+    return call.execute(num_retries=arvRunner.num_retries)["uuid"]
+
+def dedup_reqs(reqs):
+    dedup = {}
+    for r in reversed(reqs):
+        if r["class"] not in dedup and not r["class"].startswith("http://arvados.org/cwl#"):
+            dedup[r["class"]] = r
+    return [dedup[r] for r in sorted(dedup.keys())]
+
+def get_overall_res_req(res_reqs):
+    """Take the overall of a list of ResourceRequirement,
+    i.e., the max of coresMin, coresMax, ramMin, ramMax, tmpdirMin, tmpdirMax
+    and the sum of outdirMin, outdirMax."""
+
+    all_res_req = {}
+    exception_msgs = []
+    for a in max_res_pars + sum_res_pars:
+        all_res_req[a] = []
+        for res_req in res_reqs:
+            if a in res_req:
+                if isinstance(res_req[a], int): # integer check
+                    all_res_req[a].append(res_req[a])
+                else:
+                    msg = SourceLine(res_req, a).makeError(
+                    "Non-top-level ResourceRequirement in single container cannot have expressions")
+                    exception_msgs.append(msg)
+    if exception_msgs:
+        raise WorkflowException("\n".join(exception_msgs))
+    else:
+        overall_res_req = {}
+        for a in all_res_req:
+            if all_res_req[a]:
+                if a in max_res_pars:
+                    overall_res_req[a] = max(all_res_req[a])
+                elif a in sum_res_pars:
+                    overall_res_req[a] = sum(all_res_req[a])
+        if overall_res_req:
+            overall_res_req["class"] = "ResourceRequirement"
+        return cmap(overall_res_req)
+
+class ArvadosWorkflowStep(WorkflowStep):
+    def __init__(self,
+                 toolpath_object,      # type: Dict[Text, Any]
+                 pos,                  # type: int
+                 loadingContext,       # type: LoadingContext
+                 arvrunner,
+                 *argc,
+                 **argv
+                ):  # type: (...) -> None
+
+        super(ArvadosWorkflowStep, self).__init__(toolpath_object, pos, loadingContext, *argc, **argv)
+        self.tool["class"] = "WorkflowStep"
+        self.arvrunner = arvrunner
+
+    def job(self, joborder, output_callback, runtimeContext):
+        runtimeContext = runtimeContext.copy()
+        runtimeContext.toplevel = True  # Preserve behavior for #13365
+
+        builder = make_builder({shortname(k): v for k,v in viewitems(joborder)}, self.hints, self.requirements, runtimeContext)
+        runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+        return super(ArvadosWorkflowStep, self).job(joborder, output_callback, runtimeContext)
+
+
+class ArvadosWorkflow(Workflow):
+    """Wrap cwltool Workflow to override selected methods."""
+
+    def __init__(self, arvrunner, toolpath_object, loadingContext):
+        self.arvrunner = arvrunner
+        self.wf_pdh = None
+        self.dynamic_resource_req = []
+        self.static_resource_req = []
+        self.wf_reffiles = []
+        self.loadingContext = loadingContext
+        super(ArvadosWorkflow, self).__init__(toolpath_object, loadingContext)
+        self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget")
+
+    def job(self, joborder, output_callback, runtimeContext):
+
+        builder = make_builder(joborder, self.hints, self.requirements, runtimeContext)
+        runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+
+        req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
+        if not req:
+            return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)
+
+        # RunInSingleContainer is true
+
+        with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
+            if "id" not in self.tool:
+                raise WorkflowException("%s object must have 'id'" % (self.tool["class"]))
+        document_loader, workflowobj, uri = (self.doc_loader, self.doc_loader.fetch(self.tool["id"]), self.tool["id"])
+
+        discover_secondary_files(self.tool["inputs"], joborder)
+
+        with Perf(metrics, "subworkflow upload_deps"):
+            upload_dependencies(self.arvrunner,
+                                os.path.basename(joborder.get("id", "#")),
+                                document_loader,
+                                joborder,
+                                joborder.get("id", "#"),
+                                False)
+
+            if self.wf_pdh is None:
+                workflowobj["requirements"] = dedup_reqs(self.requirements)
+                workflowobj["hints"] = dedup_reqs(self.hints)
+
+                packed = pack(document_loader, workflowobj, uri, self.metadata)
+
+                def visit(item):
+                    for t in ("hints", "requirements"):
+                        if t not in item:
+                            continue
+                        for req in item[t]:
+                            if req["class"] == "ResourceRequirement":
+                                dyn = False
+                                for k in max_res_pars + sum_res_pars:
+                                    if k in req:
+                                        if isinstance(req[k], basestring):
+                                            if item["id"] == "#main":
+                                                # only the top-level requirements/hints may contain expressions
+                                                self.dynamic_resource_req.append(req)
+                                                dyn = True
+                                                break
+                                            else:
+                                                with SourceLine(req, k, WorkflowException):
+                                                    raise WorkflowException("Non-top-level ResourceRequirement in single container cannot have expressions")
+                                if not dyn:
+                                    self.static_resource_req.append(req)
+                            if req["class"] == "DockerRequirement":
+                                if "http://arvados.org/cwl#dockerCollectionPDH" in req:
+                                    del req["http://arvados.org/cwl#dockerCollectionPDH"]
+
+                visit_class(packed["$graph"], ("Workflow", "CommandLineTool"), visit)
+
+                if self.static_resource_req:
+                    self.static_resource_req = [get_overall_res_req(self.static_resource_req)]
+
+                upload_dependencies(self.arvrunner,
+                                    runtimeContext.name,
+                                    document_loader,
+                                    packed,
+                                    uri,
+                                    False)
+
+                # Discover files/directories referenced by the
+                # workflow (mainly "default" values)
+                visit_class(packed, ("File", "Directory"), self.wf_reffiles.append)
+
+
+        if self.dynamic_resource_req:
+            # Evaluate dynamic resource requirements using current builder
+            rs = copy.copy(self.static_resource_req)
+            for dyn_rs in self.dynamic_resource_req:
+                eval_req = {"class": "ResourceRequirement"}
+                for a in max_res_pars + sum_res_pars:
+                    if a in dyn_rs:
+                        eval_req[a] = builder.do_eval(dyn_rs[a])
+                rs.append(eval_req)
+            job_res_reqs = [get_overall_res_req(rs)]
+        else:
+            job_res_reqs = self.static_resource_req
+
+        with Perf(metrics, "subworkflow adjust"):
+            joborder_resolved = copy.deepcopy(joborder)
+            joborder_keepmount = copy.deepcopy(joborder)
+
+            reffiles = []
+            visit_class(joborder_keepmount, ("File", "Directory"), reffiles.append)
+
+            mapper = ArvPathMapper(self.arvrunner, reffiles+self.wf_reffiles, runtimeContext.basedir,
+                                   "/keep/%s",
+                                   "/keep/%s/%s")
+
+            # For containers API, we need to make sure any extra
+            # referenced files (ie referenced by the workflow but
+            # not in the inputs) are included in the mounts.
+            if self.wf_reffiles:
+                runtimeContext = runtimeContext.copy()
+                runtimeContext.extra_reffiles = copy.deepcopy(self.wf_reffiles)
+
+            def keepmount(obj):
+                remove_redundant_fields(obj)
+                with SourceLine(obj, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
+                    if "location" not in obj:
+                        raise WorkflowException("%s object is missing required 'location' field: %s" % (obj["class"], obj))
+                with SourceLine(obj, "location", WorkflowException, logger.isEnabledFor(logging.DEBUG)):
+                    if obj["location"].startswith("keep:"):
+                        obj["location"] = mapper.mapper(obj["location"]).target
+                        if "listing" in obj:
+                            del obj["listing"]
+                    elif obj["location"].startswith("_:"):
+                        del obj["location"]
+                    else:
+                        raise WorkflowException("Location is not a keep reference or a literal: '%s'" % obj["location"])
+
+            visit_class(joborder_keepmount, ("File", "Directory"), keepmount)
+
+            def resolved(obj):
+                if obj["location"].startswith("keep:"):
+                    obj["location"] = mapper.mapper(obj["location"]).resolved
+
+            visit_class(joborder_resolved, ("File", "Directory"), resolved)
+
+            if self.wf_pdh is None:
+                adjustFileObjs(packed, keepmount)
+                adjustDirObjs(packed, keepmount)
+                self.wf_pdh = upload_workflow_collection(self.arvrunner, shortname(self.tool["id"]), packed)
+
+        wf_runner = cmap({
+            "class": "CommandLineTool",
+            "baseCommand": "cwltool",
+            "inputs": self.tool["inputs"],
+            "outputs": self.tool["outputs"],
+            "stdout": "cwl.output.json",
+            "requirements": self.requirements+job_res_reqs+[
+                {"class": "InlineJavascriptRequirement"},
+                {
+                "class": "InitialWorkDirRequirement",
+                "listing": [{
+                        "entryname": "workflow.cwl",
+                        "entry": '$({"class": "File", "location": "keep:%s/workflow.cwl"})' % self.wf_pdh
+                    }, {
+                        "entryname": "cwl.input.yml",
+                        "entry": json.dumps(joborder_keepmount, indent=2, sort_keys=True, separators=(',',': ')).replace("\\", "\\\\").replace('$(', '\$(').replace('${', '\${')
+                    }]
+            }],
+            "hints": self.hints,
+            "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"],
+            "id": "#"
+        })
+        return ArvadosCommandTool(self.arvrunner, wf_runner, self.loadingContext).job(joborder_resolved, output_callback, runtimeContext)
+
+    def make_workflow_step(self,
+                           toolpath_object,      # type: Dict[Text, Any]
+                           pos,                  # type: int
+                           loadingContext,       # type: LoadingContext
+                           *argc,
+                           **argv
+    ):
+        # (...) -> WorkflowStep
+        return ArvadosWorkflowStep(toolpath_object, pos, loadingContext, self.arvrunner, *argc, **argv)
diff --git a/sdk/cwl/arvados_cwl/context.py b/sdk/cwl/arvados_cwl/context.py
new file mode 100644 (file)
index 0000000..8cfe22a
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from cwltool.context import LoadingContext, RuntimeContext
+from collections import namedtuple
+
+class ArvLoadingContext(LoadingContext):
+    def __init__(self, kwargs=None):
+        super(ArvLoadingContext, self).__init__(kwargs)
+
+class ArvRuntimeContext(RuntimeContext):
+    def __init__(self, kwargs=None):
+        self.work_api = None
+        self.extra_reffiles = []
+        self.priority = 500
+        self.enable_reuse = True
+        self.runnerjob = ""
+        self.submit_request_uuid = None
+        self.project_uuid = None
+        self.trash_intermediate = False
+        self.intermediate_output_ttl = 0
+        self.update_workflow = ""
+        self.create_workflow = False
+        self.submit_runner_ram = 0
+        self.ignore_docker_for_reuse = False
+        self.submit = True
+        self.submit_runner_image = None
+        self.wait = True
+        self.cwl_runner_job = None
+        self.storage_classes = "default"
+        self.current_container = None
+        self.http_timeout = 300
+        self.submit_runner_cluster = None
+        self.cluster_target_id = 0
+        self.always_submit_runner = False
+        self.collection_cache_size = 256
+
+        super(ArvRuntimeContext, self).__init__(kwargs)
+
+        if self.submit_request_uuid:
+            self.submit_runner_cluster = self.submit_request_uuid[0:5]
diff --git a/sdk/cwl/arvados_cwl/crunch_script.py b/sdk/cwl/arvados_cwl/crunch_script.py
new file mode 100644 (file)
index 0000000..c886550
--- /dev/null
@@ -0,0 +1,159 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Crunch script integration for running arvados-cwl-runner (importing
+# arvados_cwl module) inside a crunch job.
+#
+# This gets the job record, transforms the script parameters into a valid CWL
+# input object, then executes the CWL runner to run the underlying workflow or
+# tool.  When the workflow completes, record the output object in an output
+# collection for this runner job.
+
+from past.builtins import basestring
+from future.utils import viewitems
+
+import arvados
+import arvados_cwl
+import arvados.collection
+import arvados.util
+import cwltool.main
+import logging
+import os
+import json
+import argparse
+import re
+import functools
+
+from arvados.api import OrderedJsonModel
+from cwltool.process import shortname
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, normalizeFilesDirs
+from cwltool.load_tool import load_tool
+from cwltool.errors import WorkflowException
+from arvados_cwl.context import ArvRuntimeContext
+
+from .fsaccess import CollectionFetcher, CollectionFsAccess
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+def run():
+    # Timestamps are added by crunch-job, so don't print redundant timestamps.
+    arvados.log_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))
+
+    # Print package versions
+    logger.info(arvados_cwl.versionstring())
+
+    api = arvados.api("v1")
+
+    arvados_cwl.add_arv_hints()
+
+    runner = None
+    try:
+        job_order_object = arvados.current_job()['script_parameters']
+        toolpath = "file://%s/%s" % (os.environ['TASK_KEEPMOUNT'], job_order_object.pop("cwl:tool"))
+
+        pdh_path = re.compile(r'^[0-9a-f]{32}\+\d+(/.+)?$')
+
+        def keeppath(v):
+            if pdh_path.match(v):
+                return "keep:%s" % v
+            else:
+                return v
+
+        def keeppathObj(v):
+            if "location" in v:
+                v["location"] = keeppath(v["location"])
+
+        for k,v in viewitems(job_order_object):
+            if isinstance(v, basestring) and arvados.util.keep_locator_pattern.match(v):
+                job_order_object[k] = {
+                    "class": "File",
+                    "location": "keep:%s" % v
+                }
+
+        adjustFileObjs(job_order_object, keeppathObj)
+        adjustDirObjs(job_order_object, keeppathObj)
+        normalizeFilesDirs(job_order_object)
+
+        output_name = None
+        output_tags = None
+        enable_reuse = True
+        on_error = "continue"
+        debug = False
+
+        if "arv:output_name" in job_order_object:
+            output_name = job_order_object["arv:output_name"]
+            del job_order_object["arv:output_name"]
+
+        if "arv:output_tags" in job_order_object:
+            output_tags = job_order_object["arv:output_tags"]
+            del job_order_object["arv:output_tags"]
+
+        if "arv:enable_reuse" in job_order_object:
+            enable_reuse = job_order_object["arv:enable_reuse"]
+            del job_order_object["arv:enable_reuse"]
+
+        if "arv:on_error" in job_order_object:
+            on_error = job_order_object["arv:on_error"]
+            del job_order_object["arv:on_error"]
+
+        if "arv:debug" in job_order_object:
+            debug = job_order_object["arv:debug"]
+            del job_order_object["arv:debug"]
+
+        arvargs = argparse.Namespace()
+        arvargs.work_api = "jobs"
+        arvargs.output_name = output_name
+        arvargs.output_tags = output_tags
+        arvargs.thread_count = 1
+        arvargs.collection_cache_size = None
+
+        runner = arvados_cwl.ArvCwlExecutor(api_client=arvados.safeapi.ThreadSafeApiCache(
+            api_params={"model": OrderedJsonModel()}, keep_params={"num_retries": 4}),
+                                          arvargs=arvargs)
+
+        make_fs_access = functools.partial(CollectionFsAccess,
+                                 collection_cache=runner.collection_cache)
+
+        t = load_tool(toolpath, runner.loadingContext)
+
+        if debug:
+            logger.setLevel(logging.DEBUG)
+            logging.getLogger('arvados').setLevel(logging.DEBUG)
+            logging.getLogger("cwltool").setLevel(logging.DEBUG)
+
+        args = ArvRuntimeContext(vars(arvargs))
+        args.project_uuid = arvados.current_job()["owner_uuid"]
+        args.enable_reuse = enable_reuse
+        args.on_error = on_error
+        args.submit = False
+        args.debug = debug
+        args.quiet = False
+        args.ignore_docker_for_reuse = False
+        args.basedir = os.getcwd()
+        args.name = None
+        args.cwl_runner_job={"uuid": arvados.current_job()["uuid"], "state": arvados.current_job()["state"]}
+        args.make_fs_access = make_fs_access
+        args.trash_intermediate = False
+        args.intermediate_output_ttl = 0
+        args.priority = arvados_cwl.DEFAULT_PRIORITY
+        args.do_validate = True
+        args.disable_js_validation = False
+        args.tmp_outdir_prefix = "tmp"
+
+        runner.arv_executor(t, job_order_object, args, logger=logger)
+    except Exception as e:
+        if isinstance(e, WorkflowException):
+            logging.info("Workflow error %s", e)
+        else:
+            logging.exception("Unhandled exception")
+        if runner and runner.final_output_collection:
+            outputCollection = runner.final_output_collection.portable_data_hash()
+        else:
+            outputCollection = None
+        api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                             body={
+                                                 'output': outputCollection,
+                                                 'success': False,
+                                                 'progress':1.0
+                                             }).execute()
diff --git a/sdk/cwl/arvados_cwl/done.py b/sdk/cwl/arvados_cwl/done.py
new file mode 100644 (file)
index 0000000..9b26ad7
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future.utils import viewvalues
+
+import re
+from cwltool.errors import WorkflowException
+from collections import deque
+
+def done(self, record, tmpdir, outdir, keepdir):
+    cols = [
+        ("output", "Output %s of %s" % (record["output"][0:7], self.name), record["output"]),
+        ("log", "Log of %s" % (record["uuid"]), record["log"])
+    ]
+
+    for coltype, colname, colpdh in cols:
+        # check if collection already exists with same owner, name and content
+        collection_exists = self.arvrunner.api.collections().list(
+            filters=[["owner_uuid", "=", self.arvrunner.project_uuid],
+                     ['portable_data_hash', '=', colpdh],
+                     ["name", "=", colname]]
+        ).execute(num_retries=self.arvrunner.num_retries)
+
+        if not collection_exists["items"]:
+            # Create a collection located in the same project as the
+            # pipeline with the contents of the output/log.
+            # First, get output/log record.
+            collections = self.arvrunner.api.collections().list(
+                limit=1,
+                filters=[['portable_data_hash', '=', colpdh]],
+                select=["manifest_text"]
+            ).execute(num_retries=self.arvrunner.num_retries)
+
+            if not collections["items"]:
+                raise WorkflowException(
+                    "[job %s] %s '%s' cannot be found on API server" % (
+                        self.name, coltype, colpdh))
+
+            # Create new collection in the parent project
+            # with the output/log contents.
+            self.arvrunner.api.collections().create(body={
+                "owner_uuid": self.arvrunner.project_uuid,
+                "name": colname,
+                "portable_data_hash": colpdh,
+                "manifest_text": collections["items"][0]["manifest_text"]
+            }, ensure_unique_name=True).execute(
+                num_retries=self.arvrunner.num_retries)
+
+    return done_outputs(self, record, tmpdir, outdir, keepdir)
+
+def done_outputs(self, record, tmpdir, outdir, keepdir):
+    self.builder.outdir = outdir
+    self.builder.pathmapper.keepdir = keepdir
+    return self.collect_outputs("keep:" + record["output"])
+
+crunchstat_re = re.compile(r"^\d{4}-\d\d-\d\d_\d\d:\d\d:\d\d [a-z0-9]{5}-8i9sb-[a-z0-9]{15} \d+ \d+ stderr crunchstat:")
+timestamp_re = re.compile(r"^(\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d+Z) (.*)")
+
+def logtail(logcollection, logfunc, header, maxlen=25):
+    if len(logcollection) == 0:
+        logfunc("%s\n%s", header, "  ** log is empty **")
+        return
+
+    containersapi = ("crunch-run.txt" in logcollection)
+    mergelogs = {}
+
+    for log in list(logcollection):
+        if not containersapi or log in ("crunch-run.txt", "stdout.txt", "stderr.txt"):
+            logname = log[:-4]
+            logt = deque([], maxlen)
+            mergelogs[logname] = logt
+            with logcollection.open(log) as f:
+                for l in f:
+                    if containersapi:
+                        g = timestamp_re.match(l)
+                        logt.append((g.group(1), g.group(2)))
+                    elif not crunchstat_re.match(l):
+                        logt.append(l)
+
+    if containersapi:
+        keys = list(mergelogs)
+        loglines = []
+        while True:
+            earliest = None
+            for k in keys:
+                if mergelogs[k]:
+                    if earliest is None or mergelogs[k][0][0] < mergelogs[earliest][0][0]:
+                        earliest = k
+            if earliest is None:
+                break
+            ts, msg = mergelogs[earliest].popleft()
+            loglines.append("%s %s %s" % (ts, earliest, msg))
+        loglines = loglines[-maxlen:]
+    else:
+        loglines = mergelogs[list(mergelogs)[0]]
+
+    logtxt = "\n  ".join(l.strip() for l in loglines)
+    logfunc("%s\n\n  %s", header, logtxt)
diff --git a/sdk/cwl/arvados_cwl/executor.py b/sdk/cwl/arvados_cwl/executor.py
new file mode 100644 (file)
index 0000000..319e8a8
--- /dev/null
@@ -0,0 +1,786 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+from builtins import next
+from builtins import object
+from builtins import str
+from future.utils import viewvalues
+
+import argparse
+import logging
+import os
+import sys
+import threading
+import copy
+import json
+import re
+from functools import partial
+import time
+
+from cwltool.errors import WorkflowException
+import cwltool.workflow
+from schema_salad.sourceline import SourceLine
+import schema_salad.validate as validate
+
+import arvados
+import arvados.config
+from arvados.keep import KeepClient
+from arvados.errors import ApiError
+
+import arvados_cwl.util
+from .arvcontainer import RunnerContainer
+from .arvjob import RunnerJob, RunnerTemplate
+from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps
+from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
+from .arvworkflow import ArvadosWorkflow, upload_workflow
+from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
+from .perf import Perf
+from .pathmapper import NoFollowPathMapper
+from .task_queue import TaskQueue
+from .context import ArvLoadingContext, ArvRuntimeContext
+from ._version import __version__
+
+from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, get_listing, visit_class
+from cwltool.command_line_tool import compute_checksums
+
+logger = logging.getLogger('arvados.cwl-runner')
+metrics = logging.getLogger('arvados.cwl-runner.metrics')
+
+DEFAULT_PRIORITY = 500
+
+class RuntimeStatusLoggingHandler(logging.Handler):
+    """
+    Intercepts logging calls and report them as runtime statuses on runner
+    containers.
+    """
+    def __init__(self, runtime_status_update_func):
+        super(RuntimeStatusLoggingHandler, self).__init__()
+        self.runtime_status_update = runtime_status_update_func
+        self.updatingRuntimeStatus = False
+
+    def emit(self, record):
+        kind = None
+        if record.levelno >= logging.ERROR:
+            kind = 'error'
+        elif record.levelno >= logging.WARNING:
+            kind = 'warning'
+        if kind is not None and self.updatingRuntimeStatus is not True:
+            self.updatingRuntimeStatus = True
+            try:
+                log_msg = record.getMessage()
+                if '\n' in log_msg:
+                    # If the logged message is multi-line, use its first line as status
+                    # and the rest as detail.
+                    status, detail = log_msg.split('\n', 1)
+                    self.runtime_status_update(
+                        kind,
+                        "%s: %s" % (record.name, status),
+                        detail
+                    )
+                else:
+                    self.runtime_status_update(
+                        kind,
+                        "%s: %s" % (record.name, record.getMessage())
+                    )
+            finally:
+                self.updatingRuntimeStatus = False
+            
+
+class ArvCwlExecutor(object):
+    """Execute a CWL tool or workflow, submit work (using either jobs or
+    containers API), wait for them to complete, and report output.
+
+    """
+
+    def __init__(self, api_client,
+                 arvargs=None,
+                 keep_client=None,
+                 num_retries=4,
+                 thread_count=4):
+
+        if arvargs is None:
+            arvargs = argparse.Namespace()
+            arvargs.work_api = None
+            arvargs.output_name = None
+            arvargs.output_tags = None
+            arvargs.thread_count = 1
+            arvargs.collection_cache_size = None
+
+        self.api = api_client
+        self.processes = {}
+        self.workflow_eval_lock = threading.Condition(threading.RLock())
+        self.final_output = None
+        self.final_status = None
+        self.num_retries = num_retries
+        self.uuid = None
+        self.stop_polling = threading.Event()
+        self.poll_api = None
+        self.pipeline = None
+        self.final_output_collection = None
+        self.output_name = arvargs.output_name
+        self.output_tags = arvargs.output_tags
+        self.project_uuid = None
+        self.intermediate_output_ttl = 0
+        self.intermediate_output_collections = []
+        self.trash_intermediate = False
+        self.thread_count = arvargs.thread_count
+        self.poll_interval = 12
+        self.loadingContext = None
+        self.should_estimate_cache_size = True
+
+        if keep_client is not None:
+            self.keep_client = keep_client
+        else:
+            self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
+
+        if arvargs.collection_cache_size:
+            collection_cache_size = arvargs.collection_cache_size*1024*1024
+            self.should_estimate_cache_size = False
+        else:
+            collection_cache_size = 256*1024*1024
+
+        self.collection_cache = CollectionCache(self.api, self.keep_client, self.num_retries,
+                                                cap=collection_cache_size)
+
+        self.fetcher_constructor = partial(CollectionFetcher,
+                                           api_client=self.api,
+                                           fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
+                                           num_retries=self.num_retries)
+
+        self.work_api = None
+        expected_api = ["jobs", "containers"]
+        for api in expected_api:
+            try:
+                methods = self.api._rootDesc.get('resources')[api]['methods']
+                if ('httpMethod' in methods['create'] and
+                    (arvargs.work_api == api or arvargs.work_api is None)):
+                    self.work_api = api
+                    break
+            except KeyError:
+                pass
+
+        if not self.work_api:
+            if arvargs.work_api is None:
+                raise Exception("No supported APIs")
+            else:
+                raise Exception("Unsupported API '%s', expected one of %s" % (arvargs.work_api, expected_api))
+
+        if self.work_api == "jobs":
+            logger.warning("""
+*******************************
+Using the deprecated 'jobs' API.
+
+To get rid of this warning:
+
+Users: read about migrating at
+http://doc.arvados.org/user/cwl/cwl-style.html#migrate
+and use the option --api=containers
+
+Admins: configure the cluster to disable the 'jobs' API as described at:
+http://doc.arvados.org/install/install-api-server.html#disable_api_methods
+*******************************""")
+
+        self.loadingContext = ArvLoadingContext(vars(arvargs))
+        self.loadingContext.fetcher_constructor = self.fetcher_constructor
+        self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)
+        self.loadingContext.construct_tool_object = self.arv_make_tool
+
+        # Add a custom logging handler to the root logger for runtime status reporting
+        # if running inside a container
+        if arvados_cwl.util.get_current_container(self.api, self.num_retries, logger):
+            root_logger = logging.getLogger('')
+
+            # Remove existing RuntimeStatusLoggingHandlers if they exist
+            handlers = [h for h in root_logger.handlers if not isinstance(h, RuntimeStatusLoggingHandler)]
+            root_logger.handlers = handlers
+
+            handler = RuntimeStatusLoggingHandler(self.runtime_status_update)
+            root_logger.addHandler(handler)
+
+        self.runtimeContext = ArvRuntimeContext(vars(arvargs))
+        self.runtimeContext.make_fs_access = partial(CollectionFsAccess,
+                                                     collection_cache=self.collection_cache)
+
+        validate_cluster_target(self, self.runtimeContext)
+
+
+    def arv_make_tool(self, toolpath_object, loadingContext):
+        if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
+            return ArvadosCommandTool(self, toolpath_object, loadingContext)
+        elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
+            return ArvadosWorkflow(self, toolpath_object, loadingContext)
+        elif "class" in toolpath_object and toolpath_object["class"] == "ExpressionTool":
+            return ArvadosExpressionTool(self, toolpath_object, loadingContext)
+        else:
+            raise Exception("Unknown tool %s" % toolpath_object.get("class"))
+
+    def output_callback(self, out, processStatus):
+        with self.workflow_eval_lock:
+            if processStatus == "success":
+                logger.info("Overall process status is %s", processStatus)
+                state = "Complete"
+            else:
+                logger.error("Overall process status is %s", processStatus)
+                state = "Failed"
+            if self.pipeline:
+                self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
+                                                        body={"state": state}).execute(num_retries=self.num_retries)
+            self.final_status = processStatus
+            self.final_output = out
+            self.workflow_eval_lock.notifyAll()
+
+
+    def start_run(self, runnable, runtimeContext):
+        self.task_queue.add(partial(runnable.run, runtimeContext),
+                            self.workflow_eval_lock, self.stop_polling)
+
+    def process_submitted(self, container):
+        with self.workflow_eval_lock:
+            self.processes[container.uuid] = container
+
+    def process_done(self, uuid, record):
+        with self.workflow_eval_lock:
+            j = self.processes[uuid]
+            logger.info("%s %s is %s", self.label(j), uuid, record["state"])
+            self.task_queue.add(partial(j.done, record),
+                                self.workflow_eval_lock, self.stop_polling)
+            del self.processes[uuid]
+
+    def runtime_status_update(self, kind, message, detail=None):
+        """
+        Updates the runtime_status field on the runner container.
+        Called when there's a need to report errors, warnings or just
+        activity statuses, for example in the RuntimeStatusLoggingHandler.
+        """
+        with self.workflow_eval_lock:
+            current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
+            if current is None:
+                return
+            runtime_status = current.get('runtime_status', {})
+            # In case of status being an error, only report the first one.
+            if kind == 'error':
+                if not runtime_status.get('error'):
+                    runtime_status.update({
+                        'error': message
+                    })
+                    if detail is not None:
+                        runtime_status.update({
+                            'errorDetail': detail
+                        })
+                # Further errors are only mentioned as a count.
+                else:
+                    # Get anything before an optional 'and N more' string.
+                    try:
+                        error_msg = re.match(
+                            r'^(.*?)(?=\s*\(and \d+ more\)|$)', runtime_status.get('error')).groups()[0]
+                        more_failures = re.match(
+                            r'.*\(and (\d+) more\)', runtime_status.get('error'))
+                    except TypeError:
+                        # Ignore tests stubbing errors
+                        return
+                    if more_failures:
+                        failure_qty = int(more_failures.groups()[0])
+                        runtime_status.update({
+                            'error': "%s (and %d more)" % (error_msg, failure_qty+1)
+                        })
+                    else:
+                        runtime_status.update({
+                            'error': "%s (and 1 more)" % error_msg
+                        })
+            elif kind in ['warning', 'activity']:
+                # Record the last warning/activity status without regard of
+                # previous occurences.
+                runtime_status.update({
+                    kind: message
+                })
+                if detail is not None:
+                    runtime_status.update({
+                        kind+"Detail": detail
+                    })
+            else:
+                # Ignore any other status kind
+                return
+            try:
+                self.api.containers().update(uuid=current['uuid'],
+                                            body={
+                                                'runtime_status': runtime_status,
+                                            }).execute(num_retries=self.num_retries)
+            except Exception as e:
+                logger.info("Couldn't update runtime_status: %s", e)
+
+    def wrapped_callback(self, cb, obj, st):
+        with self.workflow_eval_lock:
+            cb(obj, st)
+            self.workflow_eval_lock.notifyAll()
+
+    def get_wrapped_callback(self, cb):
+        return partial(self.wrapped_callback, cb)
+
+    def on_message(self, event):
+        if event.get("object_uuid") in self.processes and event["event_type"] == "update":
+            uuid = event["object_uuid"]
+            if event["properties"]["new_attributes"]["state"] == "Running":
+                with self.workflow_eval_lock:
+                    j = self.processes[uuid]
+                    if j.running is False:
+                        j.running = True
+                        j.update_pipeline_component(event["properties"]["new_attributes"])
+                        logger.info("%s %s is Running", self.label(j), uuid)
+            elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
+                self.process_done(uuid, event["properties"]["new_attributes"])
+
+    def label(self, obj):
+        return "[%s %s]" % (self.work_api[0:-1], obj.name)
+
+    def poll_states(self):
+        """Poll status of jobs or containers listed in the processes dict.
+
+        Runs in a separate thread.
+        """
+
+        try:
+            remain_wait = self.poll_interval
+            while True:
+                if remain_wait > 0:
+                    self.stop_polling.wait(remain_wait)
+                if self.stop_polling.is_set():
+                    break
+                with self.workflow_eval_lock:
+                    keys = list(self.processes)
+                if not keys:
+                    remain_wait = self.poll_interval
+                    continue
+
+                begin_poll = time.time()
+                if self.work_api == "containers":
+                    table = self.poll_api.container_requests()
+                elif self.work_api == "jobs":
+                    table = self.poll_api.jobs()
+
+                pageSize = self.poll_api._rootDesc.get('maxItemsPerResponse', 1000)
+
+                while keys:
+                    page = keys[:pageSize]
+                    keys = keys[pageSize:]
+                    try:
+                        proc_states = table.list(filters=[["uuid", "in", page]]).execute(num_retries=self.num_retries)
+                    except Exception:
+                        logger.exception("Error checking states on API server: %s")
+                        remain_wait = self.poll_interval
+                        continue
+
+                    for p in proc_states["items"]:
+                        self.on_message({
+                            "object_uuid": p["uuid"],
+                            "event_type": "update",
+                            "properties": {
+                                "new_attributes": p
+                            }
+                        })
+                finish_poll = time.time()
+                remain_wait = self.poll_interval - (finish_poll - begin_poll)
+        except:
+            logger.exception("Fatal error in state polling thread.")
+            with self.workflow_eval_lock:
+                self.processes.clear()
+                self.workflow_eval_lock.notifyAll()
+        finally:
+            self.stop_polling.set()
+
+    def add_intermediate_output(self, uuid):
+        if uuid:
+            self.intermediate_output_collections.append(uuid)
+
+    def trash_intermediate_output(self):
+        logger.info("Cleaning up intermediate output collections")
+        for i in self.intermediate_output_collections:
+            try:
+                self.api.collections().delete(uuid=i).execute(num_retries=self.num_retries)
+            except Exception:
+                logger.warning("Failed to delete intermediate output: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
+            except (KeyboardInterrupt, SystemExit):
+                break
+
+    def check_features(self, obj):
+        if isinstance(obj, dict):
+            if obj.get("writable") and self.work_api != "containers":
+                raise SourceLine(obj, "writable", UnsupportedRequirement).makeError("InitialWorkDir feature 'writable: true' not supported with --api=jobs")
+            if obj.get("class") == "DockerRequirement":
+                if obj.get("dockerOutputDirectory"):
+                    if self.work_api != "containers":
+                        raise SourceLine(obj, "dockerOutputDirectory", UnsupportedRequirement).makeError(
+                            "Option 'dockerOutputDirectory' of DockerRequirement not supported with --api=jobs.")
+                    if not obj.get("dockerOutputDirectory").startswith('/'):
+                        raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
+                            "Option 'dockerOutputDirectory' must be an absolute path.")
+            if obj.get("class") == "http://commonwl.org/cwltool#Secrets" and self.work_api != "containers":
+                raise SourceLine(obj, "class", UnsupportedRequirement).makeError("Secrets not supported with --api=jobs")
+            for v in viewvalues(obj):
+                self.check_features(v)
+        elif isinstance(obj, list):
+            for i,v in enumerate(obj):
+                with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
+                    self.check_features(v)
+
+    def make_output_collection(self, name, storage_classes, tagsString, outputObj):
+        outputObj = copy.deepcopy(outputObj)
+
+        files = []
+        def capture(fileobj):
+            files.append(fileobj)
+
+        adjustDirObjs(outputObj, capture)
+        adjustFileObjs(outputObj, capture)
+
+        generatemapper = NoFollowPathMapper(files, "", "", separateDirs=False)
+
+        final = arvados.collection.Collection(api_client=self.api,
+                                              keep_client=self.keep_client,
+                                              num_retries=self.num_retries)
+
+        for k,v in generatemapper.items():
+            if k.startswith("_:"):
+                if v.type == "Directory":
+                    continue
+                if v.type == "CreateFile":
+                    with final.open(v.target, "wb") as f:
+                        f.write(v.resolved.encode("utf-8"))
+                    continue
+
+            if not k.startswith("keep:"):
+                raise Exception("Output source is not in keep or a literal")
+            sp = k.split("/")
+            srccollection = sp[0][5:]
+            try:
+                reader = self.collection_cache.get(srccollection)
+                srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
+                final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
+            except arvados.errors.ArgumentError as e:
+                logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
+                raise
+            except IOError as e:
+                logger.warning("While preparing output collection: %s", e)
+
+        def rewrite(fileobj):
+            fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
+            for k in ("listing", "contents", "nameext", "nameroot", "dirname"):
+                if k in fileobj:
+                    del fileobj[k]
+
+        adjustDirObjs(outputObj, rewrite)
+        adjustFileObjs(outputObj, rewrite)
+
+        with final.open("cwl.output.json", "w") as f:
+            res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))
+            f.write(res)           
+
+        final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes, ensure_unique_name=True)
+
+        logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
+                    final.api_response()["name"],
+                    final.manifest_locator())
+
+        final_uuid = final.manifest_locator()
+        tags = tagsString.split(',')
+        for tag in tags:
+             self.api.links().create(body={
+                "head_uuid": final_uuid, "link_class": "tag", "name": tag
+                }).execute(num_retries=self.num_retries)
+
+        def finalcollection(fileobj):
+            fileobj["location"] = "keep:%s/%s" % (final.portable_data_hash(), fileobj["location"])
+
+        adjustDirObjs(outputObj, finalcollection)
+        adjustFileObjs(outputObj, finalcollection)
+
+        return (outputObj, final)
+
+    def set_crunch_output(self):
+        if self.work_api == "containers":
+            current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
+            if current is None:
+                return
+            try:
+                self.api.containers().update(uuid=current['uuid'],
+                                             body={
+                                                 'output': self.final_output_collection.portable_data_hash(),
+                                             }).execute(num_retries=self.num_retries)
+                self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),
+                                              body={
+                                                  'is_trashed': True
+                                              }).execute(num_retries=self.num_retries)
+            except Exception:
+                logger.exception("Setting container output")
+                return
+        elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
+            self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
+                                   body={
+                                       'output': self.final_output_collection.portable_data_hash(),
+                                       'success': self.final_status == "success",
+                                       'progress':1.0
+                                   }).execute(num_retries=self.num_retries)
+
+    def arv_executor(self, tool, job_order, runtimeContext, logger=None):
+        self.debug = runtimeContext.debug
+
+        tool.visit(self.check_features)
+
+        self.project_uuid = runtimeContext.project_uuid
+        self.pipeline = None
+        self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
+        self.secret_store = runtimeContext.secret_store
+
+        self.trash_intermediate = runtimeContext.trash_intermediate
+        if self.trash_intermediate and self.work_api != "containers":
+            raise Exception("--trash-intermediate is only supported with --api=containers.")
+
+        self.intermediate_output_ttl = runtimeContext.intermediate_output_ttl
+        if self.intermediate_output_ttl and self.work_api != "containers":
+            raise Exception("--intermediate-output-ttl is only supported with --api=containers.")
+        if self.intermediate_output_ttl < 0:
+            raise Exception("Invalid value %d for --intermediate-output-ttl, cannot be less than zero" % self.intermediate_output_ttl)
+
+        if runtimeContext.submit_request_uuid and self.work_api != "containers":
+            raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
+
+        if not runtimeContext.name:
+            runtimeContext.name = self.name = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+
+        # Upload direct dependencies of workflow steps, get back mapping of files to keep references.
+        # Also uploads docker images.
+        merged_map = upload_workflow_deps(self, tool)
+
+        # Reload tool object which may have been updated by
+        # upload_workflow_deps
+        # Don't validate this time because it will just print redundant errors.
+        loadingContext = self.loadingContext.copy()
+        loadingContext.loader = tool.doc_loader
+        loadingContext.avsc_names = tool.doc_schema
+        loadingContext.metadata = tool.metadata
+        loadingContext.do_validate = False
+
+        tool = self.arv_make_tool(tool.doc_loader.idx[tool.tool["id"]],
+                                  loadingContext)
+
+        # Upload local file references in the job order.
+        job_order = upload_job_order(self, "%s input" % runtimeContext.name,
+                                     tool, job_order)
+
+        existing_uuid = runtimeContext.update_workflow
+        if existing_uuid or runtimeContext.create_workflow:
+            # Create a pipeline template or workflow record and exit.
+            if self.work_api == "jobs":
+                tmpl = RunnerTemplate(self, tool, job_order,
+                                      runtimeContext.enable_reuse,
+                                      uuid=existing_uuid,
+                                      submit_runner_ram=runtimeContext.submit_runner_ram,
+                                      name=runtimeContext.name,
+                                      merged_map=merged_map,
+                                      loadingContext=loadingContext)
+                tmpl.save()
+                # cwltool.main will write our return value to stdout.
+                return (tmpl.uuid, "success")
+            elif self.work_api == "containers":
+                return (upload_workflow(self, tool, job_order,
+                                        self.project_uuid,
+                                        uuid=existing_uuid,
+                                        submit_runner_ram=runtimeContext.submit_runner_ram,
+                                        name=runtimeContext.name,
+                                        merged_map=merged_map),
+                        "success")
+
+        self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
+        self.eval_timeout = runtimeContext.eval_timeout
+
+        runtimeContext = runtimeContext.copy()
+        runtimeContext.use_container = True
+        runtimeContext.tmpdir_prefix = "tmp"
+        runtimeContext.work_api = self.work_api
+
+        if self.work_api == "containers":
+            if self.ignore_docker_for_reuse:
+                raise Exception("--ignore-docker-for-reuse not supported with containers API.")
+            runtimeContext.outdir = "/var/spool/cwl"
+            runtimeContext.docker_outdir = "/var/spool/cwl"
+            runtimeContext.tmpdir = "/tmp"
+            runtimeContext.docker_tmpdir = "/tmp"
+        elif self.work_api == "jobs":
+            if runtimeContext.priority != DEFAULT_PRIORITY:
+                raise Exception("--priority not implemented for jobs API.")
+            runtimeContext.outdir = "$(task.outdir)"
+            runtimeContext.docker_outdir = "$(task.outdir)"
+            runtimeContext.tmpdir = "$(task.tmpdir)"
+
+        if runtimeContext.priority < 1 or runtimeContext.priority > 1000:
+            raise Exception("--priority must be in the range 1..1000.")
+
+        if self.should_estimate_cache_size:
+            visited = set()
+            estimated_size = [0]
+            def estimate_collection_cache(obj):
+                if obj.get("location", "").startswith("keep:"):
+                    m = pdh_size.match(obj["location"][5:])
+                    if m and m.group(1) not in visited:
+                        visited.add(m.group(1))
+                        estimated_size[0] += int(m.group(2))
+            visit_class(job_order, ("File", "Directory"), estimate_collection_cache)
+            runtimeContext.collection_cache_size = max(((estimated_size[0]*192) // (1024*1024))+1, 256)
+            self.collection_cache.set_cap(runtimeContext.collection_cache_size*1024*1024)
+
+        logger.info("Using collection cache size %s MiB", runtimeContext.collection_cache_size)
+
+        runnerjob = None
+        if runtimeContext.submit:
+            # Submit a runner job to run the workflow for us.
+            if self.work_api == "containers":
+                if tool.tool["class"] == "CommandLineTool" and runtimeContext.wait and (not runtimeContext.always_submit_runner):
+                    runtimeContext.runnerjob = tool.tool["id"]
+                else:
+                    tool = RunnerContainer(self, tool, loadingContext, runtimeContext.enable_reuse,
+                                                self.output_name,
+                                                self.output_tags,
+                                                submit_runner_ram=runtimeContext.submit_runner_ram,
+                                                name=runtimeContext.name,
+                                                on_error=runtimeContext.on_error,
+                                                submit_runner_image=runtimeContext.submit_runner_image,
+                                                intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
+                                                merged_map=merged_map,
+                                                priority=runtimeContext.priority,
+                                                secret_store=self.secret_store,
+                                                collection_cache_size=runtimeContext.collection_cache_size,
+                                                collection_cache_is_default=self.should_estimate_cache_size)
+            elif self.work_api == "jobs":
+                tool = RunnerJob(self, tool, loadingContext, runtimeContext.enable_reuse,
+                                      self.output_name,
+                                      self.output_tags,
+                                      submit_runner_ram=runtimeContext.submit_runner_ram,
+                                      name=runtimeContext.name,
+                                      on_error=runtimeContext.on_error,
+                                      submit_runner_image=runtimeContext.submit_runner_image,
+                                      merged_map=merged_map)
+        elif runtimeContext.cwl_runner_job is None and self.work_api == "jobs":
+            # Create pipeline for local run
+            self.pipeline = self.api.pipeline_instances().create(
+                body={
+                    "owner_uuid": self.project_uuid,
+                    "name": runtimeContext.name if runtimeContext.name else shortname(tool.tool["id"]),
+                    "components": {},
+                    "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
+            logger.info("Pipeline instance %s", self.pipeline["uuid"])
+
+        if runtimeContext.cwl_runner_job is not None:
+            self.uuid = runtimeContext.cwl_runner_job.get('uuid')
+
+        jobiter = tool.job(job_order,
+                           self.output_callback,
+                           runtimeContext)
+
+        if runtimeContext.submit and not runtimeContext.wait:
+            runnerjob = next(jobiter)
+            runnerjob.run(runtimeContext)
+            return (runnerjob.uuid, "success")
+
+        current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
+        if current_container:
+            logger.info("Running inside container %s", current_container.get("uuid"))
+
+        self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
+        self.polling_thread = threading.Thread(target=self.poll_states)
+        self.polling_thread.start()
+
+        self.task_queue = TaskQueue(self.workflow_eval_lock, self.thread_count)
+
+        try:
+            self.workflow_eval_lock.acquire()
+
+            # Holds the lock while this code runs and releases it when
+            # it is safe to do so in self.workflow_eval_lock.wait(),
+            # at which point on_message can update job state and
+            # process output callbacks.
+
+            loopperf = Perf(metrics, "jobiter")
+            loopperf.__enter__()
+            for runnable in jobiter:
+                loopperf.__exit__()
+
+                if self.stop_polling.is_set():
+                    break
+
+                if self.task_queue.error is not None:
+                    raise self.task_queue.error
+
+                if runnable:
+                    with Perf(metrics, "run"):
+                        self.start_run(runnable, runtimeContext)
+                else:
+                    if (self.task_queue.in_flight + len(self.processes)) > 0:
+                        self.workflow_eval_lock.wait(3)
+                    else:
+                        logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
+                        break
+
+                if self.stop_polling.is_set():
+                    break
+
+                loopperf.__enter__()
+            loopperf.__exit__()
+
+            while (self.task_queue.in_flight + len(self.processes)) > 0:
+                if self.task_queue.error is not None:
+                    raise self.task_queue.error
+                self.workflow_eval_lock.wait(3)
+
+        except UnsupportedRequirement:
+            raise
+        except:
+            if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
+                logger.error("Interrupted, workflow will be cancelled")
+            elif isinstance(sys.exc_info()[1], WorkflowException):
+                logger.error("Workflow execution failed:\n%s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
+            else:
+                logger.exception("Workflow execution failed")
+
+            if self.pipeline:
+                self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
+                                                     body={"state": "Failed"}).execute(num_retries=self.num_retries)
+            if runtimeContext.submit and isinstance(tool, Runner):
+                runnerjob = tool
+                if runnerjob.uuid and self.work_api == "containers":
+                    self.api.container_requests().update(uuid=runnerjob.uuid,
+                                                     body={"priority": "0"}).execute(num_retries=self.num_retries)
+        finally:
+            self.workflow_eval_lock.release()
+            self.task_queue.drain()
+            self.stop_polling.set()
+            self.polling_thread.join()
+            self.task_queue.join()
+
+        if self.final_status == "UnsupportedRequirement":
+            raise UnsupportedRequirement("Check log for details.")
+
+        if self.final_output is None:
+            raise WorkflowException("Workflow did not return a result.")
+
+        if runtimeContext.submit and isinstance(tool, Runner):
+            logger.info("Final output collection %s", tool.final_output)
+        else:
+            if self.output_name is None:
+                self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
+            if self.output_tags is None:
+                self.output_tags = ""
+
+            storage_classes = runtimeContext.storage_classes.strip().split(",")
+            self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes, self.output_tags, self.final_output)
+            self.set_crunch_output()
+
+        if runtimeContext.compute_checksum:
+            adjustDirObjs(self.final_output, partial(get_listing, self.fs_access))
+            adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
+
+        if self.trash_intermediate and self.final_status == "success":
+            self.trash_intermediate_output()
+
+        return (self.final_output, self.final_status)
diff --git a/sdk/cwl/arvados_cwl/fsaccess.py b/sdk/cwl/arvados_cwl/fsaccess.py
new file mode 100644 (file)
index 0000000..3744b4a
--- /dev/null
@@ -0,0 +1,310 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
+from builtins import str
+from future.utils import viewvalues
+
+import fnmatch
+import os
+import errno
+import urllib.parse
+import re
+import logging
+import threading
+from collections import OrderedDict
+
+import ruamel.yaml as yaml
+
+import cwltool.stdfsaccess
+from cwltool.pathmapper import abspath
+import cwltool.resolver
+
+import arvados.util
+import arvados.collection
+import arvados.arvfile
+import arvados.errors
+
+from googleapiclient.errors import HttpError
+
+from schema_salad.ref_resolver import DefaultFetcher
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+pdh_size = re.compile(r'([0-9a-f]{32})\+(\d+)(\+\S+)*')
+
+class CollectionCache(object):
+    def __init__(self, api_client, keep_client, num_retries,
+                 cap=256*1024*1024,
+                 min_entries=2):
+        self.api_client = api_client
+        self.keep_client = keep_client
+        self.num_retries = num_retries
+        self.collections = OrderedDict()
+        self.lock = threading.Lock()
+        self.total = 0
+        self.cap = cap
+        self.min_entries = min_entries
+
+    def set_cap(self, cap):
+        self.cap = cap
+
+    def cap_cache(self, required):
+        # ordered dict iterates from oldest to newest
+        for pdh, v in list(self.collections.items()):
+            available = self.cap - self.total
+            if available >= required or len(self.collections) < self.min_entries:
+                return
+            # cut it loose
+            logger.debug("Evicting collection reader %s from cache (cap %s total %s required %s)", pdh, self.cap, self.total, required)
+            del self.collections[pdh]
+            self.total -= v[1]
+
+    def get(self, pdh):
+        with self.lock:
+            if pdh not in self.collections:
+                m = pdh_size.match(pdh)
+                if m:
+                    self.cap_cache(int(m.group(2)) * 128)
+                logger.debug("Creating collection reader for %s", pdh)
+                cr = arvados.collection.CollectionReader(pdh, api_client=self.api_client,
+                                                         keep_client=self.keep_client,
+                                                         num_retries=self.num_retries)
+                sz = len(cr.manifest_text()) * 128
+                self.collections[pdh] = (cr, sz)
+                self.total += sz
+            else:
+                cr, sz = self.collections[pdh]
+                # bump it to the back
+                del self.collections[pdh]
+                self.collections[pdh] = (cr, sz)
+            return cr
+
+
+class CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):
+    """Implement the cwltool FsAccess interface for Arvados Collections."""
+
+    def __init__(self, basedir, collection_cache=None):
+        super(CollectionFsAccess, self).__init__(basedir)
+        self.collection_cache = collection_cache
+
+    def get_collection(self, path):
+        sp = path.split("/", 1)
+        p = sp[0]
+        if p.startswith("keep:") and arvados.util.keep_locator_pattern.match(p[5:]):
+            pdh = p[5:]
+            return (self.collection_cache.get(pdh), urllib.parse.unquote(sp[1]) if len(sp) == 2 else None)
+        else:
+            return (None, path)
+
+    def _match(self, collection, patternsegments, parent):
+        if not patternsegments:
+            return []
+
+        if not isinstance(collection, arvados.collection.RichCollectionBase):
+            return []
+
+        ret = []
+        # iterate over the files and subcollections in 'collection'
+        for filename in collection:
+            if patternsegments[0] == '.':
+                # Pattern contains something like "./foo" so just shift
+                # past the "./"
+                ret.extend(self._match(collection, patternsegments[1:], parent))
+            elif fnmatch.fnmatch(filename, patternsegments[0]):
+                cur = os.path.join(parent, filename)
+                if len(patternsegments) == 1:
+                    ret.append(cur)
+                else:
+                    ret.extend(self._match(collection[filename], patternsegments[1:], cur))
+        return ret
+
+    def glob(self, pattern):
+        collection, rest = self.get_collection(pattern)
+        if collection is not None and not rest:
+            return [pattern]
+        patternsegments = rest.split("/")
+        return sorted(self._match(collection, patternsegments, "keep:" + collection.manifest_locator()))
+
+    def open(self, fn, mode):
+        collection, rest = self.get_collection(fn)
+        if collection is not None:
+            return collection.open(rest, mode)
+        else:
+            return super(CollectionFsAccess, self).open(self._abs(fn), mode)
+
+    def exists(self, fn):
+        try:
+            collection, rest = self.get_collection(fn)
+        except HttpError as err:
+            if err.resp.status == 404:
+                return False
+            else:
+                raise
+        if collection is not None:
+            if rest:
+                return collection.exists(rest)
+            else:
+                return True
+        else:
+            return super(CollectionFsAccess, self).exists(fn)
+
+    def size(self, fn):  # type: (unicode) -> bool
+        collection, rest = self.get_collection(fn)
+        if collection is not None:
+            if rest:
+                arvfile = collection.find(rest)
+                if isinstance(arvfile, arvados.arvfile.ArvadosFile):
+                    return arvfile.size()
+            raise IOError(errno.EINVAL, "Not a path to a file %s" % (fn))
+        else:
+            return super(CollectionFsAccess, self).size(fn)
+
+    def isfile(self, fn):  # type: (unicode) -> bool
+        collection, rest = self.get_collection(fn)
+        if collection is not None:
+            if rest:
+                return isinstance(collection.find(rest), arvados.arvfile.ArvadosFile)
+            else:
+                return False
+        else:
+            return super(CollectionFsAccess, self).isfile(fn)
+
+    def isdir(self, fn):  # type: (unicode) -> bool
+        collection, rest = self.get_collection(fn)
+        if collection is not None:
+            if rest:
+                return isinstance(collection.find(rest), arvados.collection.RichCollectionBase)
+            else:
+                return True
+        else:
+            return super(CollectionFsAccess, self).isdir(fn)
+
+    def listdir(self, fn):  # type: (unicode) -> List[unicode]
+        collection, rest = self.get_collection(fn)
+        if collection is not None:
+            if rest:
+                dir = collection.find(rest)
+            else:
+                dir = collection
+            if dir is None:
+                raise IOError(errno.ENOENT, "Directory '%s' in '%s' not found" % (rest, collection.portable_data_hash()))
+            if not isinstance(dir, arvados.collection.RichCollectionBase):
+                raise IOError(errno.ENOENT, "Path '%s' in '%s' is not a Directory" % (rest, collection.portable_data_hash()))
+            return [abspath(l, fn) for l in list(dir.keys())]
+        else:
+            return super(CollectionFsAccess, self).listdir(fn)
+
+    def join(self, path, *paths): # type: (unicode, *unicode) -> unicode
+        if paths and paths[-1].startswith("keep:") and arvados.util.keep_locator_pattern.match(paths[-1][5:]):
+            return paths[-1]
+        return os.path.join(path, *paths)
+
+    def realpath(self, path):
+        if path.startswith("$(task.tmpdir)") or path.startswith("$(task.outdir)"):
+            return path
+        collection, rest = self.get_collection(path)
+        if collection is not None:
+            return path
+        else:
+            return os.path.realpath(path)
+
+class CollectionFetcher(DefaultFetcher):
+    def __init__(self, cache, session, api_client=None, fs_access=None, num_retries=4):
+        super(CollectionFetcher, self).__init__(cache, session)
+        self.api_client = api_client
+        self.fsaccess = fs_access
+        self.num_retries = num_retries
+
+    def fetch_text(self, url):
+        if url.startswith("keep:"):
+            with self.fsaccess.open(url, "r") as f:
+                return f.read()
+        if url.startswith("arvwf:"):
+            record = self.api_client.workflows().get(uuid=url[6:]).execute(num_retries=self.num_retries)
+            definition = record["definition"] + ('\nlabel: "%s"\n' % record["name"].replace('"', '\\"'))
+            return definition
+        return super(CollectionFetcher, self).fetch_text(url)
+
+    def check_exists(self, url):
+        try:
+            if url.startswith("http://arvados.org/cwl"):
+                return True
+            if url.startswith("keep:"):
+                return self.fsaccess.exists(url)
+            if url.startswith("arvwf:"):
+                if self.fetch_text(url):
+                    return True
+        except arvados.errors.NotFoundError:
+            return False
+        except Exception:
+            logger.exception("Got unexpected exception checking if file exists")
+            return False
+        return super(CollectionFetcher, self).check_exists(url)
+
+    def urljoin(self, base_url, url):
+        if not url:
+            return base_url
+
+        urlsp = urllib.parse.urlsplit(url)
+        if urlsp.scheme or not base_url:
+            return url
+
+        basesp = urllib.parse.urlsplit(base_url)
+        if basesp.scheme in ("keep", "arvwf"):
+            if not basesp.path:
+                raise IOError(errno.EINVAL, "Invalid Keep locator", base_url)
+
+            baseparts = basesp.path.split("/")
+            urlparts = urlsp.path.split("/") if urlsp.path else []
+
+            pdh = baseparts.pop(0)
+
+            if basesp.scheme == "keep" and not arvados.util.keep_locator_pattern.match(pdh):
+                raise IOError(errno.EINVAL, "Invalid Keep locator", base_url)
+
+            if urlsp.path.startswith("/"):
+                baseparts = []
+                urlparts.pop(0)
+
+            if baseparts and urlsp.path:
+                baseparts.pop()
+
+            path = "/".join([pdh] + baseparts + urlparts)
+            return urllib.parse.urlunsplit((basesp.scheme, "", path, "", urlsp.fragment))
+
+        return super(CollectionFetcher, self).urljoin(base_url, url)
+
+    schemes = [u"file", u"http", u"https", u"mailto", u"keep", u"arvwf"]
+
+    def supported_schemes(self):  # type: () -> List[Text]
+        return self.schemes
+
+
+workflow_uuid_pattern = re.compile(r'[a-z0-9]{5}-7fd4e-[a-z0-9]{15}')
+pipeline_template_uuid_pattern = re.compile(r'[a-z0-9]{5}-p5p6p-[a-z0-9]{15}')
+
+def collectionResolver(api_client, document_loader, uri, num_retries=4):
+    if uri.startswith("keep:") or uri.startswith("arvwf:"):
+        return str(uri)
+
+    if workflow_uuid_pattern.match(uri):
+        return u"arvwf:%s#main" % (uri)
+
+    if pipeline_template_uuid_pattern.match(uri):
+        pt = api_client.pipeline_templates().get(uuid=uri).execute(num_retries=num_retries)
+        return u"keep:" + viewvalues(pt["components"])[0]["script_parameters"]["cwl:tool"]
+
+    p = uri.split("/")
+    if arvados.util.keep_locator_pattern.match(p[0]):
+        return u"keep:%s" % (uri)
+
+    if arvados.util.collection_uuid_pattern.match(p[0]):
+        return u"keep:%s%s" % (api_client.collections().
+                              get(uuid=p[0]).execute()["portable_data_hash"],
+                              uri[len(p[0]):])
+
+    return cwltool.resolver.tool_resolver(document_loader, uri)
diff --git a/sdk/cwl/arvados_cwl/http.py b/sdk/cwl/arvados_cwl/http.py
new file mode 100644 (file)
index 0000000..47a3043
--- /dev/null
@@ -0,0 +1,155 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+
+import requests
+import email.utils
+import time
+import datetime
+import re
+import arvados
+import arvados.collection
+import urllib.parse
+import logging
+import calendar
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+def my_formatdate(dt):
+    return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),
+                                  localtime=False, usegmt=True)
+
+def my_parsedate(text):
+    parsed = email.utils.parsedate_tz(text)
+    if parsed:
+        if parsed[9]:
+            # Adjust to UTC
+            return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])
+        else:
+            # TZ is zero or missing, assume UTC.
+            return datetime.datetime(*parsed[:6])
+    else:
+        return datetime.datetime(1970, 1, 1)
+
+def fresh_cache(url, properties, now):
+    pr = properties[url]
+    expires = None
+
+    logger.debug("Checking cache freshness for %s using %s", url, pr)
+
+    if "Cache-Control" in pr:
+        if re.match(r"immutable", pr["Cache-Control"]):
+            return True
+
+        g = re.match(r"(s-maxage|max-age)=(\d+)", pr["Cache-Control"])
+        if g:
+            expires = my_parsedate(pr["Date"]) + datetime.timedelta(seconds=int(g.group(2)))
+
+    if expires is None and "Expires" in pr:
+        expires = my_parsedate(pr["Expires"])
+
+    if expires is None:
+        # Use a default cache time of 24 hours if upstream didn't set
+        # any cache headers, to reduce redundant downloads.
+        expires = my_parsedate(pr["Date"]) + datetime.timedelta(hours=24)
+
+    if not expires:
+        return False
+
+    return (now < expires)
+
+def remember_headers(url, properties, headers, now):
+    properties.setdefault(url, {})
+    for h in ("Cache-Control", "ETag", "Expires", "Date", "Content-Length"):
+        if h in headers:
+            properties[url][h] = headers[h]
+    if "Date" not in headers:
+        properties[url]["Date"] = my_formatdate(now)
+
+
+def changed(url, properties, now):
+    req = requests.head(url, allow_redirects=True)
+    remember_headers(url, properties, req.headers, now)
+
+    if req.status_code != 200:
+        raise Exception("Got status %s" % req.status_code)
+
+    pr = properties[url]
+    if "ETag" in pr and "ETag" in req.headers:
+        if pr["ETag"] == req.headers["ETag"]:
+            return False
+
+    return True
+
+def http_to_keep(api, project_uuid, url, utcnow=datetime.datetime.utcnow):
+    r = api.collections().list(filters=[["properties", "exists", url]]).execute()
+
+    now = utcnow()
+
+    for item in r["items"]:
+        properties = item["properties"]
+        if fresh_cache(url, properties, now):
+            # Do nothing
+            cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+            return "keep:%s/%s" % (item["portable_data_hash"], list(cr.keys())[0])
+
+        if not changed(url, properties, now):
+            # ETag didn't change, same content, just update headers
+            api.collections().update(uuid=item["uuid"], body={"collection":{"properties": properties}}).execute()
+            cr = arvados.collection.CollectionReader(item["portable_data_hash"], api_client=api)
+            return "keep:%s/%s" % (item["portable_data_hash"], list(cr.keys())[0])
+
+    properties = {}
+    req = requests.get(url, stream=True, allow_redirects=True)
+
+    if req.status_code != 200:
+        raise Exception("Failed to download '%s' got status %s " % (url, req.status_code))
+
+    remember_headers(url, properties, req.headers, now)
+
+    if "Content-Length" in properties[url]:
+        cl = int(properties[url]["Content-Length"])
+        logger.info("Downloading %s (%s bytes)", url, cl)
+    else:
+        cl = None
+        logger.info("Downloading %s (unknown size)", url)
+
+    c = arvados.collection.Collection()
+
+    if req.headers.get("Content-Disposition"):
+        grp = re.search(r'filename=("((\"|[^"])+)"|([^][()<>@,;:\"/?={} ]+))', req.headers["Content-Disposition"])
+        if grp.group(2):
+            name = grp.group(2)
+        else:
+            name = grp.group(4)
+    else:
+        name = urllib.parse.urlparse(url).path.split("/")[-1]
+
+    count = 0
+    start = time.time()
+    checkpoint = start
+    with c.open(name, "wb") as f:
+        for chunk in req.iter_content(chunk_size=1024):
+            count += len(chunk)
+            f.write(chunk)
+            loopnow = time.time()
+            if (loopnow - checkpoint) > 20:
+                bps = count / (loopnow - start)
+                if cl is not None:
+                    logger.info("%2.1f%% complete, %3.2f MiB/s, %1.0f seconds left",
+                                ((count * 100) / cl),
+                                (bps // (1024*1024)),
+                                ((cl-count) // bps))
+                else:
+                    logger.info("%d downloaded, %3.2f MiB/s", count, (bps / (1024*1024)))
+                checkpoint = loopnow
+
+    c.save_new(name="Downloaded from %s" % url, owner_uuid=project_uuid, ensure_unique_name=True)
+
+    api.collections().update(uuid=c.manifest_locator(), body={"collection":{"properties": properties}}).execute()
+
+    return "keep:%s/%s" % (c.portable_data_hash(), name)
diff --git a/sdk/cwl/arvados_cwl/pathmapper.py b/sdk/cwl/arvados_cwl/pathmapper.py
new file mode 100644 (file)
index 0000000..e0445fe
--- /dev/null
@@ -0,0 +1,319 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from past.builtins import basestring
+from future.utils import viewitems
+
+import re
+import logging
+import uuid
+import os
+import urllib.request, urllib.parse, urllib.error
+
+import arvados_cwl.util
+import arvados.commands.run
+import arvados.collection
+
+from schema_salad.sourceline import SourceLine
+
+from arvados.errors import ApiError
+from cwltool.pathmapper import PathMapper, MapperEnt, abspath, adjustFileObjs, adjustDirObjs
+from cwltool.workflow import WorkflowException
+
+from .http import http_to_keep
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+def trim_listing(obj):
+    """Remove 'listing' field from Directory objects that are keep references.
+
+    When Directory objects represent Keep references, it is redundant and
+    potentially very expensive to pass fully enumerated Directory objects
+    between instances of cwl-runner (e.g. a submitting a job, or using the
+    RunInSingleContainer feature), so delete the 'listing' field when it is
+    safe to do so.
+
+    """
+
+    if obj.get("location", "").startswith("keep:") and "listing" in obj:
+        del obj["listing"]
+
+
+class ArvPathMapper(PathMapper):
+    """Convert container-local paths to and from Keep collection ids."""
+
+    pdh_path = re.compile(r'^keep:[0-9a-f]{32}\+\d+/.+$')
+    pdh_dirpath = re.compile(r'^keep:[0-9a-f]{32}\+\d+(/.*)?$')
+
+    def __init__(self, arvrunner, referenced_files, input_basedir,
+                 collection_pattern, file_pattern, name=None, single_collection=False):
+        self.arvrunner = arvrunner
+        self.input_basedir = input_basedir
+        self.collection_pattern = collection_pattern
+        self.file_pattern = file_pattern
+        self.name = name
+        self.referenced_files = [r["location"] for r in referenced_files]
+        self.single_collection = single_collection
+        super(ArvPathMapper, self).__init__(referenced_files, input_basedir, None)
+
+    def visit(self, srcobj, uploadfiles):
+        src = srcobj["location"]
+        if "#" in src:
+            src = src[:src.index("#")]
+
+        if isinstance(src, basestring) and ArvPathMapper.pdh_dirpath.match(src):
+            self._pathmap[src] = MapperEnt(src, self.collection_pattern % urllib.parse.unquote(src[5:]), srcobj["class"], True)
+
+        debug = logger.isEnabledFor(logging.DEBUG)
+
+        if src not in self._pathmap:
+            if src.startswith("file:"):
+                # Local FS ref, may need to be uploaded or may be on keep
+                # mount.
+                ab = abspath(src, self.input_basedir)
+                st = arvados.commands.run.statfile("", ab,
+                                                   fnPattern="keep:%s/%s",
+                                                   dirPattern="keep:%s/%s",
+                                                   raiseOSError=True)
+                with SourceLine(srcobj, "location", WorkflowException, debug):
+                    if isinstance(st, arvados.commands.run.UploadFile):
+                        uploadfiles.add((src, ab, st))
+                    elif isinstance(st, arvados.commands.run.ArvFile):
+                        self._pathmap[src] = MapperEnt(st.fn, self.collection_pattern % urllib.parse.unquote(st.fn[5:]), "File", True)
+                    else:
+                        raise WorkflowException("Input file path '%s' is invalid" % st)
+            elif src.startswith("_:"):
+                if srcobj["class"] == "File" and "contents" not in srcobj:
+                    raise WorkflowException("File literal '%s' is missing `contents`" % src)
+                if srcobj["class"] == "Directory" and "listing" not in srcobj:
+                    raise WorkflowException("Directory literal '%s' is missing `listing`" % src)
+            elif src.startswith("http:") or src.startswith("https:"):
+                keepref = http_to_keep(self.arvrunner.api, self.arvrunner.project_uuid, src)
+                logger.info("%s is %s", src, keepref)
+                self._pathmap[src] = MapperEnt(keepref, keepref, srcobj["class"], True)
+            else:
+                self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
+
+        with SourceLine(srcobj, "secondaryFiles", WorkflowException, debug):
+            for l in srcobj.get("secondaryFiles", []):
+                self.visit(l, uploadfiles)
+        with SourceLine(srcobj, "listing", WorkflowException, debug):
+            for l in srcobj.get("listing", []):
+                self.visit(l, uploadfiles)
+
+    def addentry(self, obj, c, path, remap):
+        if obj["location"] in self._pathmap:
+            src, srcpath = self.arvrunner.fs_access.get_collection(self._pathmap[obj["location"]].resolved)
+            if srcpath == "":
+                srcpath = "."
+            c.copy(srcpath, path + "/" + obj["basename"], source_collection=src, overwrite=True)
+            remap.append((obj["location"], path + "/" + obj["basename"]))
+            for l in obj.get("secondaryFiles", []):
+                self.addentry(l, c, path, remap)
+        elif obj["class"] == "Directory":
+            for l in obj.get("listing", []):
+                self.addentry(l, c, path + "/" + obj["basename"], remap)
+            remap.append((obj["location"], path + "/" + obj["basename"]))
+        elif obj["location"].startswith("_:") and "contents" in obj:
+            with c.open(path + "/" + obj["basename"], "w") as f:
+                f.write(obj["contents"])
+            remap.append((obj["location"], path + "/" + obj["basename"]))
+        else:
+            raise SourceLine(obj, "location", WorkflowException).makeError("Don't know what to do with '%s'" % obj["location"])
+
+    def needs_new_collection(self, srcobj, prefix=""):
+        """Check if files need to be staged into a new collection.
+
+        If all the files are in the same collection and in the same
+        paths they would be staged to, return False.  Otherwise, a new
+        collection is needed with files copied/created in the
+        appropriate places.
+        """
+
+        loc = srcobj["location"]
+        if loc.startswith("_:"):
+            return True
+        if prefix:
+            if loc != prefix+srcobj["basename"]:
+                return True
+        else:
+            i = loc.rfind("/")
+            if i > -1:
+                prefix = loc[:i+1]
+            else:
+                prefix = loc+"/"
+        if srcobj["class"] == "File" and loc not in self._pathmap:
+            return True
+        for s in srcobj.get("secondaryFiles", []):
+            if self.needs_new_collection(s, prefix):
+                return True
+        if srcobj.get("listing"):
+            prefix = "%s%s/" % (prefix, srcobj["basename"])
+            for l in srcobj["listing"]:
+                if self.needs_new_collection(l, prefix):
+                    return True
+        return False
+
+    def setup(self, referenced_files, basedir):
+        # type: (List[Any], unicode) -> None
+        uploadfiles = set()
+
+        collection = None
+        if self.single_collection:
+            collection = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                       keep_client=self.arvrunner.keep_client,
+                                                       num_retries=self.arvrunner.num_retries)
+
+        for srcobj in referenced_files:
+            self.visit(srcobj, uploadfiles)
+
+        arvados.commands.run.uploadfiles([u[2] for u in uploadfiles],
+                                         self.arvrunner.api,
+                                         dry_run=False,
+                                         num_retries=self.arvrunner.num_retries,
+                                         fnPattern="keep:%s/%s",
+                                         name=self.name,
+                                         project=self.arvrunner.project_uuid,
+                                         collection=collection,
+                                         packed=False)
+
+        for src, ab, st in uploadfiles:
+            self._pathmap[src] = MapperEnt(urllib.parse.quote(st.fn, "/:+@"), self.collection_pattern % st.fn[5:],
+                                           "Directory" if os.path.isdir(ab) else "File", True)
+
+        for srcobj in referenced_files:
+            remap = []
+            if srcobj["class"] == "Directory" and srcobj["location"] not in self._pathmap:
+                c = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                  keep_client=self.arvrunner.keep_client,
+                                                  num_retries=self.arvrunner.num_retries)
+                for l in srcobj.get("listing", []):
+                    self.addentry(l, c, ".", remap)
+
+                container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                info = arvados_cwl.util.get_intermediate_collection_info(None, container, self.arvrunner.intermediate_output_ttl)
+
+                c.save_new(name=info["name"],
+                           owner_uuid=self.arvrunner.project_uuid,
+                           ensure_unique_name=True,
+                           trash_at=info["trash_at"],
+                           properties=info["properties"])
+
+                ab = self.collection_pattern % c.portable_data_hash()
+                self._pathmap[srcobj["location"]] = MapperEnt("keep:"+c.portable_data_hash(), ab, "Directory", True)
+            elif srcobj["class"] == "File" and (srcobj.get("secondaryFiles") or
+                (srcobj["location"].startswith("_:") and "contents" in srcobj)):
+
+                # If all secondary files/directories are located in
+                # the same collection as the primary file and the
+                # paths and names that are consistent with staging,
+                # don't create a new collection.
+                if not self.needs_new_collection(srcobj):
+                    continue
+
+                c = arvados.collection.Collection(api_client=self.arvrunner.api,
+                                                  keep_client=self.arvrunner.keep_client,
+                                                  num_retries=self.arvrunner.num_retries                                                  )
+                self.addentry(srcobj, c, ".", remap)
+
+                container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)
+                info = arvados_cwl.util.get_intermediate_collection_info(None, container, self.arvrunner.intermediate_output_ttl)
+
+                c.save_new(name=info["name"],
+                           owner_uuid=self.arvrunner.project_uuid,
+                           ensure_unique_name=True,
+                           trash_at=info["trash_at"],
+                           properties=info["properties"])
+
+                ab = self.file_pattern % (c.portable_data_hash(), srcobj["basename"])
+                self._pathmap[srcobj["location"]] = MapperEnt("keep:%s/%s" % (c.portable_data_hash(), srcobj["basename"]),
+                                                              ab, "File", True)
+                if srcobj.get("secondaryFiles"):
+                    ab = self.collection_pattern % c.portable_data_hash()
+                    self._pathmap["_:" + str(uuid.uuid4())] = MapperEnt("keep:"+c.portable_data_hash(), ab, "Directory", True)
+
+            if remap:
+                for loc, sub in remap:
+                    # subdirs start with "./", strip it off
+                    if sub.startswith("./"):
+                        ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
+                    else:
+                        ab = self.file_pattern % (c.portable_data_hash(), sub)
+                    self._pathmap[loc] = MapperEnt("keep:%s/%s" % (c.portable_data_hash(), sub[2:]),
+                                                   ab, "Directory", True)
+
+        self.keepdir = None
+
+    def reversemap(self, target):
+        p = super(ArvPathMapper, self).reversemap(target)
+        if p:
+            return p
+        elif target.startswith("keep:"):
+            return (target, target)
+        elif self.keepdir and target.startswith(self.keepdir):
+            kp = "keep:" + target[len(self.keepdir)+1:]
+            return (kp, kp)
+        else:
+            return None
+
+
+class StagingPathMapper(PathMapper):
+    _follow_dirs = True
+
+    def __init__(self, referenced_files, basedir, stagedir, separateDirs=True):
+        self.targets = set()
+        super(StagingPathMapper, self).__init__(referenced_files, basedir, stagedir, separateDirs)
+
+    def visit(self, obj, stagedir, basedir, copy=False, staged=False):
+        # type: (Dict[unicode, Any], unicode, unicode, bool) -> None
+        loc = obj["location"]
+        tgt = os.path.join(stagedir, obj["basename"])
+        basetgt, baseext = os.path.splitext(tgt)
+        n = 1
+        if tgt in self.targets and (self.reversemap(tgt)[0] != loc):
+            while tgt in self.targets:
+                n += 1
+                tgt = "%s_%i%s" % (basetgt, n, baseext)
+        self.targets.add(tgt)
+        if obj["class"] == "Directory":
+            if obj.get("writable"):
+                self._pathmap[loc] = MapperEnt(loc, tgt, "WritableDirectory", staged)
+            else:
+                self._pathmap[loc] = MapperEnt(loc, tgt, "Directory", staged)
+            if loc.startswith("_:") or self._follow_dirs:
+                self.visitlisting(obj.get("listing", []), tgt, basedir)
+        elif obj["class"] == "File":
+            if loc in self._pathmap:
+                return
+            if "contents" in obj and loc.startswith("_:"):
+                self._pathmap[loc] = MapperEnt(obj["contents"], tgt, "CreateFile", staged)
+            else:
+                if copy or obj.get("writable"):
+                    self._pathmap[loc] = MapperEnt(loc, tgt, "WritableFile", staged)
+                else:
+                    self._pathmap[loc] = MapperEnt(loc, tgt, "File", staged)
+                self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
+
+
+class VwdPathMapper(StagingPathMapper):
+    def setup(self, referenced_files, basedir):
+        # type: (List[Any], unicode) -> None
+
+        # Go through each file and set the target to its own directory along
+        # with any secondary files.
+        self.visitlisting(referenced_files, self.stagedir, basedir)
+
+        for path, (ab, tgt, type, staged) in viewitems(self._pathmap):
+            if type in ("File", "Directory") and ab.startswith("keep:"):
+                self._pathmap[path] = MapperEnt("$(task.keep)/%s" % ab[5:], tgt, type, staged)
+
+
+class NoFollowPathMapper(StagingPathMapper):
+    _follow_dirs = False
+    def setup(self, referenced_files, basedir):
+        # type: (List[Any], unicode) -> None
+        self.visitlisting(referenced_files, self.stagedir, basedir)
diff --git a/sdk/cwl/arvados_cwl/perf.py b/sdk/cwl/arvados_cwl/perf.py
new file mode 100644 (file)
index 0000000..cc3ea96
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import object
+
+import time
+import uuid
+
+class Perf(object):
+    def __init__(self, logger, name):
+        self.logger = logger
+        self.name = name
+
+    def __enter__(self):
+        self.time = time.time()
+        self.logger.debug("ENTER %s %s", self.name, self.time)
+
+    def __exit__(self, exc_type=None, exc_value=None, traceback=None):
+        now = time.time()
+        self.logger.debug("EXIT %s %s %s", self.name, now, now - self.time)
diff --git a/sdk/cwl/arvados_cwl/runner.py b/sdk/cwl/arvados_cwl/runner.py
new file mode 100644 (file)
index 0000000..e515ac2
--- /dev/null
@@ -0,0 +1,485 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from future.utils import  viewvalues, viewitems
+
+import os
+import sys
+import urllib.parse
+from functools import partial
+import logging
+import json
+from collections import namedtuple
+from io import StringIO
+
+if os.name == "posix" and sys.version_info[0] < 3:
+    import subprocess32 as subprocess
+else:
+    import subprocess
+
+from schema_salad.sourceline import SourceLine, cmap
+
+from cwltool.command_line_tool import CommandLineTool
+import cwltool.workflow
+from cwltool.process import scandeps, UnsupportedRequirement, normalizeFilesDirs, shortname, Process
+from cwltool.load_tool import fetch_document
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
+from cwltool.utils import aslist
+from cwltool.builder import substitute
+from cwltool.pack import pack
+
+import arvados.collection
+import ruamel.yaml as yaml
+
+import arvados_cwl.arvdocker
+from .pathmapper import ArvPathMapper, trim_listing
+from ._version import __version__
+from . import done
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+def trim_anonymous_location(obj):
+    """Remove 'location' field from File and Directory literals.
+
+    To make internal handling easier, literals are assigned a random id for
+    'location'.  However, when writing the record back out, this can break
+    reproducibility.  Since it is valid for literals not have a 'location'
+    field, remove it.
+
+    """
+
+    if obj.get("location", "").startswith("_:"):
+        del obj["location"]
+
+
+def remove_redundant_fields(obj):
+    for field in ("path", "nameext", "nameroot", "dirname"):
+        if field in obj:
+            del obj[field]
+
+
+def find_defaults(d, op):
+    if isinstance(d, list):
+        for i in d:
+            find_defaults(i, op)
+    elif isinstance(d, dict):
+        if "default" in d:
+            op(d)
+        else:
+            for i in viewvalues(d):
+                find_defaults(i, op)
+
+def setSecondary(t, fileobj, discovered):
+    if isinstance(fileobj, dict) and fileobj.get("class") == "File":
+        if "secondaryFiles" not in fileobj:
+            fileobj["secondaryFiles"] = cmap([{"location": substitute(fileobj["location"], sf), "class": "File"} for sf in t["secondaryFiles"]])
+            if discovered is not None:
+                discovered[fileobj["location"]] = fileobj["secondaryFiles"]
+    elif isinstance(fileobj, list):
+        for e in fileobj:
+            setSecondary(t, e, discovered)
+
+def discover_secondary_files(inputs, job_order, discovered=None):
+    for t in inputs:
+        if shortname(t["id"]) in job_order and t.get("secondaryFiles"):
+            setSecondary(t, job_order[shortname(t["id"])], discovered)
+
+
+def upload_dependencies(arvrunner, name, document_loader,
+                        workflowobj, uri, loadref_run,
+                        include_primary=True, discovered_secondaryfiles=None):
+    """Upload the dependencies of the workflowobj document to Keep.
+
+    Returns a pathmapper object mapping local paths to keep references.  Also
+    does an in-place update of references in "workflowobj".
+
+    Use scandeps to find $import, $include, $schemas, run, File and Directory
+    fields that represent external references.
+
+    If workflowobj has an "id" field, this will reload the document to ensure
+    it is scanning the raw document prior to preprocessing.
+    """
+
+    loaded = set()
+    def loadref(b, u):
+        joined = document_loader.fetcher.urljoin(b, u)
+        defrg, _ = urllib.parse.urldefrag(joined)
+        if defrg not in loaded:
+            loaded.add(defrg)
+            # Use fetch_text to get raw file (before preprocessing).
+            text = document_loader.fetch_text(defrg)
+            if isinstance(text, bytes):
+                textIO = StringIO(text.decode('utf-8'))
+            else:
+                textIO = StringIO(text)
+            return yaml.safe_load(textIO)
+        else:
+            return {}
+
+    if loadref_run:
+        loadref_fields = set(("$import", "run"))
+    else:
+        loadref_fields = set(("$import",))
+
+    scanobj = workflowobj
+    if "id" in workflowobj:
+        # Need raw file content (before preprocessing) to ensure
+        # that external references in $include and $mixin are captured.
+        scanobj = loadref("", workflowobj["id"])
+
+    sc_result = scandeps(uri, scanobj,
+                  loadref_fields,
+                  set(("$include", "$schemas", "location")),
+                  loadref, urljoin=document_loader.fetcher.urljoin)
+
+    sc = []
+    def only_real(obj):
+        # Only interested in local files than need to be uploaded,
+        # don't include file literals, keep references, etc.
+        sp = obj.get("location", "").split(":")
+        if len(sp) > 1 and sp[0] in ("file", "http", "https"):
+            sc.append(obj)
+
+    visit_class(sc_result, ("File", "Directory"), only_real)
+
+    normalizeFilesDirs(sc)
+
+    if include_primary and "id" in workflowobj:
+        sc.append({"class": "File", "location": workflowobj["id"]})
+
+    if "$schemas" in workflowobj:
+        for s in workflowobj["$schemas"]:
+            sc.append({"class": "File", "location": s})
+
+    def visit_default(obj):
+        remove = [False]
+        def ensure_default_location(f):
+            if "location" not in f and "path" in f:
+                f["location"] = f["path"]
+                del f["path"]
+            if "location" in f and not arvrunner.fs_access.exists(f["location"]):
+                # Doesn't exist, remove from list of dependencies to upload
+                sc[:] = [x for x in sc if x["location"] != f["location"]]
+                # Delete "default" from workflowobj
+                remove[0] = True
+        visit_class(obj["default"], ("File", "Directory"), ensure_default_location)
+        if remove[0]:
+            del obj["default"]
+
+    find_defaults(workflowobj, visit_default)
+
+    discovered = {}
+    def discover_default_secondary_files(obj):
+        discover_secondary_files(obj["inputs"],
+                                 {shortname(t["id"]): t["default"] for t in obj["inputs"] if "default" in t},
+                                 discovered)
+
+    visit_class(workflowobj, ("CommandLineTool", "Workflow"), discover_default_secondary_files)
+
+    for d in list(discovered):
+        # Only interested in discovered secondaryFiles which are local
+        # files that need to be uploaded.
+        if d.startswith("file:"):
+            sc.extend(discovered[d])
+        else:
+            del discovered[d]
+
+    mapper = ArvPathMapper(arvrunner, sc, "",
+                           "keep:%s",
+                           "keep:%s/%s",
+                           name=name,
+                           single_collection=True)
+
+    def setloc(p):
+        if "location" in p and (not p["location"].startswith("_:")) and (not p["location"].startswith("keep:")):
+            p["location"] = mapper.mapper(p["location"]).resolved
+
+    visit_class(workflowobj, ("File", "Directory"), setloc)
+    visit_class(discovered, ("File", "Directory"), setloc)
+
+    if discovered_secondaryfiles is not None:
+        for d in discovered:
+            discovered_secondaryfiles[mapper.mapper(d).resolved] = discovered[d]
+
+    if "$schemas" in workflowobj:
+        sch = []
+        for s in workflowobj["$schemas"]:
+            sch.append(mapper.mapper(s).resolved)
+        workflowobj["$schemas"] = sch
+
+    return mapper
+
+
+def upload_docker(arvrunner, tool):
+    """Uploads Docker images used in CommandLineTool objects."""
+
+    if isinstance(tool, CommandLineTool):
+        (docker_req, docker_is_req) = tool.get_requirement("DockerRequirement")
+        if docker_req:
+            if docker_req.get("dockerOutputDirectory") and arvrunner.work_api != "containers":
+                # TODO: can be supported by containers API, but not jobs API.
+                raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
+                    "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
+            arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, docker_req, True, arvrunner.project_uuid)
+        else:
+            arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, {"dockerPull": "arvados/jobs"}, True, arvrunner.project_uuid)
+    elif isinstance(tool, cwltool.workflow.Workflow):
+        for s in tool.steps:
+            upload_docker(arvrunner, s.embedded_tool)
+
+
+def packed_workflow(arvrunner, tool, merged_map):
+    """Create a packed workflow.
+
+    A "packed" workflow is one where all the components have been combined into a single document."""
+
+    rewrites = {}
+    packed = pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
+                  tool.tool["id"], tool.metadata, rewrite_out=rewrites)
+
+    rewrite_to_orig = {v: k for k,v in viewitems(rewrites)}
+
+    def visit(v, cur_id):
+        if isinstance(v, dict):
+            if v.get("class") in ("CommandLineTool", "Workflow"):
+                if "id" not in v:
+                    raise SourceLine(v, None, Exception).makeError("Embedded process object is missing required 'id' field")
+                cur_id = rewrite_to_orig.get(v["id"], v["id"])
+            if "location" in v and not v["location"].startswith("keep:"):
+                v["location"] = merged_map[cur_id].resolved[v["location"]]
+            if "location" in v and v["location"] in merged_map[cur_id].secondaryFiles:
+                v["secondaryFiles"] = merged_map[cur_id].secondaryFiles[v["location"]]
+            if v.get("class") == "DockerRequirement":
+                v["http://arvados.org/cwl#dockerCollectionPDH"] = arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, v, True, arvrunner.project_uuid)
+            for l in v:
+                visit(v[l], cur_id)
+        if isinstance(v, list):
+            for l in v:
+                visit(l, cur_id)
+    visit(packed, None)
+    return packed
+
+
+def tag_git_version(packed):
+    if tool.tool["id"].startswith("file://"):
+        path = os.path.dirname(tool.tool["id"][7:])
+        try:
+            githash = subprocess.check_output(['git', 'log', '--first-parent', '--max-count=1', '--format=%H'], stderr=subprocess.STDOUT, cwd=path).strip()
+        except (OSError, subprocess.CalledProcessError):
+            pass
+        else:
+            packed["http://schema.org/version"] = githash
+
+
+def upload_job_order(arvrunner, name, tool, job_order):
+    """Upload local files referenced in the input object and return updated input
+    object with 'location' updated to the proper keep references.
+    """
+
+    discover_secondary_files(tool.tool["inputs"], job_order)
+
+    jobmapper = upload_dependencies(arvrunner,
+                                    name,
+                                    tool.doc_loader,
+                                    job_order,
+                                    job_order.get("id", "#"),
+                                    False)
+
+    if "id" in job_order:
+        del job_order["id"]
+
+    # Need to filter this out, gets added by cwltool when providing
+    # parameters on the command line.
+    if "job_order" in job_order:
+        del job_order["job_order"]
+
+    return job_order
+
+FileUpdates = namedtuple("FileUpdates", ["resolved", "secondaryFiles"])
+
+def upload_workflow_deps(arvrunner, tool):
+    # Ensure that Docker images needed by this workflow are available
+
+    upload_docker(arvrunner, tool)
+
+    document_loader = tool.doc_loader
+
+    merged_map = {}
+
+    def upload_tool_deps(deptool):
+        if "id" in deptool:
+            discovered_secondaryfiles = {}
+            pm = upload_dependencies(arvrunner,
+                                     "%s dependencies" % (shortname(deptool["id"])),
+                                     document_loader,
+                                     deptool,
+                                     deptool["id"],
+                                     False,
+                                     include_primary=False,
+                                     discovered_secondaryfiles=discovered_secondaryfiles)
+            document_loader.idx[deptool["id"]] = deptool
+            toolmap = {}
+            for k,v in pm.items():
+                toolmap[k] = v.resolved
+            merged_map[deptool["id"]] = FileUpdates(toolmap, discovered_secondaryfiles)
+
+    tool.visit(upload_tool_deps)
+
+    return merged_map
+
+def arvados_jobs_image(arvrunner, img):
+    """Determine if the right arvados/jobs image version is available.  If not, try to pull and upload it."""
+
+    try:
+        return arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, {"dockerPull": img}, True, arvrunner.project_uuid)
+    except Exception as e:
+        raise Exception("Docker image %s is not available\n%s" % (img, e) )
+
+
+def upload_workflow_collection(arvrunner, name, packed):
+    collection = arvados.collection.Collection(api_client=arvrunner.api,
+                                               keep_client=arvrunner.keep_client,
+                                               num_retries=arvrunner.num_retries)
+    with collection.open("workflow.cwl", "w") as f:
+        f.write(json.dumps(packed, indent=2, sort_keys=True, separators=(',',': ')))
+
+    filters = [["portable_data_hash", "=", collection.portable_data_hash()],
+               ["name", "like", name+"%"]]
+    if arvrunner.project_uuid:
+        filters.append(["owner_uuid", "=", arvrunner.project_uuid])
+    exists = arvrunner.api.collections().list(filters=filters).execute(num_retries=arvrunner.num_retries)
+
+    if exists["items"]:
+        logger.info("Using collection %s", exists["items"][0]["uuid"])
+    else:
+        collection.save_new(name=name,
+                            owner_uuid=arvrunner.project_uuid,
+                            ensure_unique_name=True,
+                            num_retries=arvrunner.num_retries)
+        logger.info("Uploaded to %s", collection.manifest_locator())
+
+    return collection.portable_data_hash()
+
+
+class Runner(Process):
+    """Base class for runner processes, which submit an instance of
+    arvados-cwl-runner and wait for the final result."""
+
+    def __init__(self, runner, tool, loadingContext, enable_reuse,
+                 output_name, output_tags, submit_runner_ram=0,
+                 name=None, on_error=None, submit_runner_image=None,
+                 intermediate_output_ttl=0, merged_map=None,
+                 priority=None, secret_store=None,
+                 collection_cache_size=256,
+                 collection_cache_is_default=True):
+
+        super(Runner, self).__init__(tool.tool, loadingContext)
+
+        self.arvrunner = runner
+        self.embedded_tool = tool
+        self.job_order = None
+        self.running = False
+        if enable_reuse:
+            # If reuse is permitted by command line arguments but
+            # disabled by the workflow itself, disable it.
+            reuse_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#ReuseRequirement")
+            if reuse_req:
+                enable_reuse = reuse_req["enableReuse"]
+        self.enable_reuse = enable_reuse
+        self.uuid = None
+        self.final_output = None
+        self.output_name = output_name
+        self.output_tags = output_tags
+        self.name = name
+        self.on_error = on_error
+        self.jobs_image = submit_runner_image or "arvados/jobs:"+__version__
+        self.intermediate_output_ttl = intermediate_output_ttl
+        self.priority = priority
+        self.secret_store = secret_store
+
+        self.submit_runner_cores = 1
+        self.submit_runner_ram = 1024  # defaut 1 GiB
+        self.collection_cache_size = collection_cache_size
+
+        runner_resource_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+        if runner_resource_req:
+            if runner_resource_req.get("coresMin"):
+                self.submit_runner_cores = runner_resource_req["coresMin"]
+            if runner_resource_req.get("ramMin"):
+                self.submit_runner_ram = runner_resource_req["ramMin"]
+            if runner_resource_req.get("keep_cache") and collection_cache_is_default:
+                self.collection_cache_size = runner_resource_req["keep_cache"]
+
+        if submit_runner_ram:
+            # Command line / initializer overrides default and/or spec from workflow
+            self.submit_runner_ram = submit_runner_ram
+
+        if self.submit_runner_ram <= 0:
+            raise Exception("Value of submit-runner-ram must be greater than zero")
+
+        if self.submit_runner_cores <= 0:
+            raise Exception("Value of submit-runner-cores must be greater than zero")
+
+        self.merged_map = merged_map or {}
+
+    def job(self,
+            job_order,         # type: Mapping[Text, Text]
+            output_callbacks,  # type: Callable[[Any, Any], Any]
+            runtimeContext     # type: RuntimeContext
+           ):  # type: (...) -> Generator[Any, None, None]
+        self.job_order = job_order
+        self._init_job(job_order, runtimeContext)
+        yield self
+
+    def update_pipeline_component(self, record):
+        pass
+
+    def done(self, record):
+        """Base method for handling a completed runner."""
+
+        try:
+            if record["state"] == "Complete":
+                if record.get("exit_code") is not None:
+                    if record["exit_code"] == 33:
+                        processStatus = "UnsupportedRequirement"
+                    elif record["exit_code"] == 0:
+                        processStatus = "success"
+                    else:
+                        processStatus = "permanentFail"
+                else:
+                    processStatus = "success"
+            else:
+                processStatus = "permanentFail"
+
+            outputs = {}
+
+            if processStatus == "permanentFail":
+                logc = arvados.collection.CollectionReader(record["log"],
+                                                           api_client=self.arvrunner.api,
+                                                           keep_client=self.arvrunner.keep_client,
+                                                           num_retries=self.arvrunner.num_retries)
+                done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
+
+            self.final_output = record["output"]
+            outc = arvados.collection.CollectionReader(self.final_output,
+                                                       api_client=self.arvrunner.api,
+                                                       keep_client=self.arvrunner.keep_client,
+                                                       num_retries=self.arvrunner.num_retries)
+            if "cwl.output.json" in outc:
+                with outc.open("cwl.output.json", "rb") as f:
+                    if f.size() > 0:
+                        outputs = json.loads(f.read().decode())
+            def keepify(fileobj):
+                path = fileobj["location"]
+                if not path.startswith("keep:"):
+                    fileobj["location"] = "keep:%s/%s" % (record["output"], path)
+            adjustFileObjs(outputs, keepify)
+            adjustDirObjs(outputs, keepify)
+        except Exception:
+            logger.exception("[%s] While getting final output object", self.name)
+            self.arvrunner.output_callback({}, "permanentFail")
+        else:
+            self.arvrunner.output_callback(outputs, processStatus)
diff --git a/sdk/cwl/arvados_cwl/task_queue.py b/sdk/cwl/arvados_cwl/task_queue.py
new file mode 100644 (file)
index 0000000..d75fec6
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import range
+from builtins import object
+
+import queue
+import threading
+import logging
+
+logger = logging.getLogger('arvados.cwl-runner')
+
+class TaskQueue(object):
+    def __init__(self, lock, thread_count):
+        self.thread_count = thread_count
+        self.task_queue = queue.Queue(maxsize=self.thread_count)
+        self.task_queue_threads = []
+        self.lock = lock
+        self.in_flight = 0
+        self.error = None
+
+        for r in range(0, self.thread_count):
+            t = threading.Thread(target=self.task_queue_func)
+            self.task_queue_threads.append(t)
+            t.start()
+
+    def task_queue_func(self):
+        while True:
+            task = self.task_queue.get()
+            if task is None:
+                return
+            try:
+                task()
+            except Exception as e:
+                logger.exception("Unhandled exception running task")
+                self.error = e
+
+            with self.lock:
+                self.in_flight -= 1
+
+    def add(self, task, unlock, check_done):
+        if self.thread_count > 1:
+            with self.lock:
+                self.in_flight += 1
+        else:
+            task()
+            return
+
+        while True:
+            try:
+                unlock.release()
+                if check_done.is_set():
+                    return
+                self.task_queue.put(task, block=True, timeout=3)
+                return
+            except queue.Full:
+                pass
+            finally:
+                unlock.acquire()
+
+
+    def drain(self):
+        try:
+            # Drain queue
+            while not self.task_queue.empty():
+                self.task_queue.get(True, .1)
+        except queue.Empty:
+            pass
+
+    def join(self):
+        for t in self.task_queue_threads:
+            self.task_queue.put(None)
+        for t in self.task_queue_threads:
+            t.join()
diff --git a/sdk/cwl/arvados_cwl/util.py b/sdk/cwl/arvados_cwl/util.py
new file mode 100644 (file)
index 0000000..776fc6b
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import datetime
+from arvados.errors import ApiError
+
+def get_intermediate_collection_info(workflow_step_name, current_container, intermediate_output_ttl):
+        if workflow_step_name:
+            name = "Intermediate collection for step %s" % (workflow_step_name)
+        else:
+            name = "Intermediate collection"
+        trash_time = None
+        if intermediate_output_ttl > 0:
+            trash_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=intermediate_output_ttl)
+        container_uuid = None
+        if current_container:
+            container_uuid = current_container['uuid']
+        props = {"type": "intermediate", "container": container_uuid}
+
+        return {"name" : name, "trash_at" : trash_time, "properties" : props}
+
+def get_current_container(api, num_retries=0, logger=None):
+    current_container = None
+    try:
+        current_container = api.containers().current().execute(num_retries=num_retries)
+    except ApiError as e:
+        # Status code 404 just means we're not running in a container.
+        if e.resp.status != 404:
+            if logger:
+                logger.info("Getting current container: %s", e)
+            raise e
+            
+    return current_container
diff --git a/sdk/cwl/arvados_version.py b/sdk/cwl/arvados_version.py
new file mode 100644 (file)
index 0000000..d13dd5e
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def choose_version_from():
+    sdk_ts = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
+    cwl_ts = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', SETUP_DIR]).strip()
+    if int(sdk_ts) > int(cwl_ts):
+        getver = os.path.join(SETUP_DIR, "../python")
+    else:
+        getver = SETUP_DIR
+    return getver
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', choose_version_from()]).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except (subprocess.CalledProcessError, OSError):
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/sdk/cwl/bin/arvados-cwl-runner b/sdk/cwl/bin/arvados-cwl-runner
new file mode 100755 (executable)
index 0000000..55ce31e
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+
+from arvados_cwl import main
+
+sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/sdk/cwl/bin/cwl-runner b/sdk/cwl/bin/cwl-runner
new file mode 100755 (executable)
index 0000000..55ce31e
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+
+from arvados_cwl import main
+
+sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/sdk/cwl/fpm-info.sh b/sdk/cwl/fpm-info.sh
new file mode 100644 (file)
index 0000000..5c47532
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+case "$TARGET" in
+    debian8)
+        fpm_depends+=(libgnutls-deb0-28 libcurl3-gnutls)
+        ;;
+    debian* | ubuntu*)
+        fpm_depends+=(libcurl3-gnutls libpython2.7)
+        ;;
+esac
+
+fpm_args+=(--conflicts=python-cwltool --conflicts=cwltool)
diff --git a/sdk/cwl/gittaggers.py b/sdk/cwl/gittaggers.py
new file mode 100644 (file)
index 0000000..d6a4c24
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import str
+from builtins import next
+
+from setuptools.command.egg_info import egg_info
+import subprocess
+import time
+import os
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+
+def choose_version_from():
+    sdk_ts = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
+    cwl_ts = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', SETUP_DIR]).strip()
+    if int(sdk_ts) > int(cwl_ts):
+        getver = os.path.join(SETUP_DIR, "../python")
+    else:
+        getver = SETUP_DIR
+    return getver
+
+class EggInfoFromGit(egg_info):
+    """Tag the build with git commit timestamp.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def git_latest_tag(self):
+        gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+        gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+        return str(next(iter(gittags)).decode('utf-8'))
+
+    def git_timestamp_tag(self):
+        gitinfo = subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', choose_version_from()]).strip()
+        return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
+
+    def tags(self):
+        if self.tag_build is None:
+            self.tag_build = self.git_latest_tag() + self.git_timestamp_tag()
+        return egg_info.tags(self)
diff --git a/sdk/cwl/setup.py b/sdk/cwl/setup.py
new file mode 100644 (file)
index 0000000..d97e742
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import os
+import sys
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "arvados_cwl")
+
+setup(name='arvados-cwl-runner',
+      version=version,
+      description='Arvados Common Workflow Language runner',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license='Apache 2.0',
+      packages=find_packages(),
+      package_data={'arvados_cwl': ['arv-cwl-schema.yml']},
+      scripts=[
+          'bin/cwl-runner',
+          'bin/arvados-cwl-runner',
+      ],
+      # Note that arvados/build/run-build-packages.sh looks at this
+      # file to determine what version of cwltool and schema-salad to build.
+      install_requires=[
+          'cwltool==1.0.20181217162649',
+          'schema-salad==3.0.20181129082112',
+          'typing >= 3.6.4',
+          'ruamel.yaml >=0.15.54, <=0.15.77',
+          'arvados-python-client>=1.3.0.20190205182514',
+          'setuptools',
+          'ciso8601 >= 2.0.0',
+      ],
+      extras_require={
+          ':os.name=="posix" and python_version<"3"': ['subprocess32 >= 3.5.1'],
+          ':python_version<"3"': ['pytz'],
+      },
+      data_files=[
+          ('share/doc/arvados-cwl-runner', ['LICENSE-2.0.txt', 'README.rst']),
+      ],
+      classifiers=[
+          'Programming Language :: Python :: 2',
+          'Programming Language :: Python :: 3',
+      ],
+      test_suite='tests',
+      tests_require=[
+          'mock>=1.0',
+          'subprocess32>=3.5.1',
+      ],
+      zip_safe=True
+      )
diff --git a/sdk/cwl/test_with_arvbox.sh b/sdk/cwl/test_with_arvbox.sh
new file mode 100755 (executable)
index 0000000..37eb517
--- /dev/null
@@ -0,0 +1,148 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+set -x
+
+if ! which arvbox >/dev/null ; then
+    export PATH=$PATH:$(readlink -f $(dirname $0)/../../tools/arvbox/bin)
+fi
+
+reset_container=1
+leave_running=0
+config=dev
+tag="latest"
+pythoncmd=python
+suite=conformance
+runapi=containers
+
+while test -n "$1" ; do
+    arg="$1"
+    case "$arg" in
+        --no-reset-container)
+            reset_container=0
+            shift
+            ;;
+        --leave-running)
+            leave_running=1
+            shift
+            ;;
+        --config)
+            config=$2
+            shift ; shift
+            ;;
+        --tag)
+            tag=$2
+            shift ; shift
+            ;;
+        --build)
+            build=1
+            shift
+            ;;
+        --pythoncmd)
+            pythoncmd=$2
+            shift ; shift
+            ;;
+        --suite)
+            suite=$2
+            shift ; shift
+            ;;
+       --api)
+           runapi=$2
+            shift ; shift
+            ;;
+        -h|--help)
+            echo "$0 [--no-reset-container] [--leave-running] [--config dev|localdemo] [--tag docker_tag] [--build] [--pythoncmd python(2|3)] [--suite (integration|conformance)]"
+            exit
+            ;;
+        *)
+            break
+            ;;
+    esac
+done
+
+if test -z "$ARVBOX_CONTAINER" ; then
+   export ARVBOX_CONTAINER=cwltest
+fi
+
+if test $reset_container = 1 ; then
+    arvbox stop
+    docker rm $ARVBOX_CONTAINER
+    arvbox reset -f
+fi
+
+arvbox start $config $tag
+
+arvbox pipe <<EOF
+set -eu -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+export PYCMD=$pythoncmd
+
+if test $config = dev ; then
+  cd /usr/src/arvados/sdk/cwl
+  \$PYCMD setup.py sdist
+  pip_install \$(ls -r dist/arvados-cwl-runner-*.tar.gz | head -n1)
+fi
+
+set -x
+
+if [ \$PYCMD = "python3" ]; then
+    pip3 install cwltest
+else
+    pip install cwltest
+fi
+
+mkdir -p /tmp/cwltest
+cd /tmp/cwltest
+if ! test -d common-workflow-language ; then
+  git clone https://github.com/common-workflow-language/common-workflow-language.git
+fi
+cd common-workflow-language
+git pull
+export ARVADOS_API_HOST=localhost:8000
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados/superuser_token)
+
+
+if test -n "$build" ; then
+   /usr/src/arvados/build/build-dev-docker-jobs-image.sh
+elif test "$tag" = "latest" ; then
+  arv-keepdocker --pull arvados/jobs $tag
+else
+  jobsimg=\$(curl https://versions.arvados.org/v1/commit/$tag | python -c "import json; import sys; sys.stdout.write(json.load(sys.stdin)['Versions']['Docker']['arvados/jobs'])")
+  arv-keepdocker --pull arvados/jobs \$jobsimg
+  docker tag arvados/jobs:\$jobsimg arvados/jobs:latest
+  arv-keepdocker arvados/jobs latest
+fi
+
+cat >/tmp/cwltest/arv-cwl-jobs <<EOF2
+#!/bin/sh
+exec arvados-cwl-runner --api=jobs \\\$@
+EOF2
+chmod +x /tmp/cwltest/arv-cwl-jobs
+
+cat >/tmp/cwltest/arv-cwl-containers <<EOF2
+#!/bin/sh
+exec arvados-cwl-runner --api=containers \\\$@
+EOF2
+chmod +x /tmp/cwltest/arv-cwl-containers
+
+env
+if [[ "$suite" = "conformance" ]] ; then
+   exec ./run_test.sh RUNNER=/tmp/cwltest/arv-cwl-${runapi} EXTRA=--compute-checksum $@
+elif [[ "$suite" = "integration" ]] ; then
+   cd /usr/src/arvados/sdk/cwl/tests
+   exec ./arvados-tests.sh $@
+fi
+EOF
+
+CODE=$?
+
+if test $leave_running = 0 ; then
+    arvbox stop
+fi
+
+exit $CODE
diff --git a/sdk/cwl/tests/12213-keepref-expr.cwl b/sdk/cwl/tests/12213-keepref-expr.cwl
new file mode 100644 (file)
index 0000000..697f380
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: ExpressionTool
+requirements:
+  InlineJavascriptRequirement: {}
+inputs:
+  dir: Directory
+outputs:
+  out: Directory[]
+expression: |
+  ${
+    var samples = {};
+    var pattern = /^(.+)(_S[0-9]{1,3}_)(.+)$/;
+    inputs.dir.listing = inputs.dir.listing.sort(function(a, b) { return a.basename.localeCompare(b.basename); });
+    for (var i = 0; i < inputs.dir.listing.length; i++) {
+      var file = inputs.dir.listing[i];
+      var groups = file.basename.match(pattern);
+      if (groups) {
+        var sampleid = groups[1];
+        if (!samples[sampleid]) {
+          samples[sampleid] = [];
+        }
+        samples[sampleid].push(file);
+      }
+    }
+    var dirs = [];
+    Object.keys(samples).sort().forEach(function(sampleid, _) {
+      dirs.push({"class": "Directory",
+                 "basename": sampleid,
+                 "listing": samples[sampleid]});
+    });
+    return {"out": dirs};
+  }
diff --git a/sdk/cwl/tests/12213-keepref-job.yml b/sdk/cwl/tests/12213-keepref-job.yml
new file mode 100644 (file)
index 0000000..60c7657
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+dir:
+  class: Directory
+  location: samples
\ No newline at end of file
diff --git a/sdk/cwl/tests/12213-keepref-tool.cwl b/sdk/cwl/tests/12213-keepref-tool.cwl
new file mode 100644 (file)
index 0000000..e4730cf
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  InlineJavascriptRequirement: {}
+inputs:
+  fastqsdir: Directory
+outputs:
+  out: stdout
+baseCommand: [zcat]
+stdout: $(inputs.fastqsdir.listing[0].nameroot).txt
+arguments:
+  - $(inputs.fastqsdir.listing[0].path)
+  - $(inputs.fastqsdir.listing[1].path)
diff --git a/sdk/cwl/tests/12213-keepref-wf.cwl b/sdk/cwl/tests/12213-keepref-wf.cwl
new file mode 100644 (file)
index 0000000..343df0b
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+requirements:
+  ScatterFeatureRequirement: {}
+inputs:
+  dir: Directory
+outputs:
+  out:
+    type: File[]
+    outputSource: tool/out
+steps:
+  ex:
+    in:
+      dir: dir
+    out: [out]
+    run: 12213-keepref-expr.cwl
+  tool:
+    in:
+      fastqsdir: ex/out
+    out: [out]
+    scatter: fastqsdir
+    run: 12213-keepref-tool.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/12418-glob-empty-collection.cwl b/sdk/cwl/tests/12418-glob-empty-collection.cwl
new file mode 100644 (file)
index 0000000..f5e5e70
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+   "cwlVersion": "v1.0",
+      "arguments": [
+        "true"
+      ],
+      "class": "CommandLineTool",
+      "inputs": [],
+      "outputs": [
+        {
+          "id": "out",
+          "outputBinding": {
+            "glob": "*.txt"
+          },
+          "type": [
+            "null",
+            "File"
+          ]
+        }
+      ]
+}
\ No newline at end of file
diff --git a/sdk/cwl/tests/13931-size-job.yml b/sdk/cwl/tests/13931-size-job.yml
new file mode 100644 (file)
index 0000000..97b46dd
--- /dev/null
@@ -0,0 +1,3 @@
+fastq1:
+  class: File
+  location: keep:20850f01122e860fb878758ac1320877+71/sample1_S01_R1_001.fastq.gz
\ No newline at end of file
diff --git a/sdk/cwl/tests/13931-size.cwl b/sdk/cwl/tests/13931-size.cwl
new file mode 100644 (file)
index 0000000..aed1bd6
--- /dev/null
@@ -0,0 +1,10 @@
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  fastq1: File
+outputs:
+  out: stdout
+baseCommand: echo
+arguments:
+  - $(inputs.fastq1.size)
+stdout: size.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/13976-keepref-wf.cwl b/sdk/cwl/tests/13976-keepref-wf.cwl
new file mode 100644 (file)
index 0000000..7aa7b0a
--- /dev/null
@@ -0,0 +1,17 @@
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  - class: InlineJavascriptRequirement
+arguments:
+  - ls
+  - -l
+  - $(inputs.hello)
+inputs:
+  hello:
+    type: File
+    default:
+      class: File
+      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
+    secondaryFiles:
+      - .idx
+outputs: []
\ No newline at end of file
diff --git a/sdk/cwl/tests/__init__.py b/sdk/cwl/tests/__init__.py
new file mode 100644 (file)
index 0000000..ebb638e
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+
diff --git a/sdk/cwl/tests/arvados-tests.sh b/sdk/cwl/tests/arvados-tests.sh
new file mode 100755 (executable)
index 0000000..7727ebf
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+if ! arv-get d7514270f356df848477718d58308cc4+94 > /dev/null ; then
+    arv-put --portable-data-hash testdir/*
+fi
+if ! arv-get f225e6259bdd63bc7240599648dde9f1+97 > /dev/null ; then
+    arv-put --portable-data-hash hg19/*
+fi
+if ! arv-get 4d8a70b1e63b2aad6984e40e338e2373+69 > /dev/null ; then
+    arv-put --portable-data-hash secondaryFiles/hello.txt*
+fi
+if ! arv-get 20850f01122e860fb878758ac1320877+71 > /dev/null ; then
+    arv-put --portable-data-hash samples/sample1_S01_R1_001.fastq.gz
+fi
+
+exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum --api=containers
diff --git a/sdk/cwl/tests/arvados-tests.yml b/sdk/cwl/tests/arvados-tests.yml
new file mode 100644 (file)
index 0000000..8b8ff28
--- /dev/null
@@ -0,0 +1,245 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+- job: dir-job.yml
+  output:
+    "outlist": {
+        "size": 20,
+        "location": "output.txt",
+        "class": "File",
+        "checksum": "sha1$13cda8661796ae241da3a18668fb552161a72592"
+    }
+  tool: keep-dir-test-input.cwl
+  doc: Test directory in keep
+
+- job: dir-job2.yml
+  output:
+    "outlist": {
+        "size": 20,
+        "location": "output.txt",
+        "class": "File",
+        "checksum": "sha1$13cda8661796ae241da3a18668fb552161a72592"
+    }
+  tool: keep-dir-test-input.cwl
+  doc: Test directory in keep
+
+- job: null
+  output:
+    "outlist": {
+        "size": 20,
+        "location": "output.txt",
+        "class": "File",
+        "checksum": "sha1$13cda8661796ae241da3a18668fb552161a72592"
+    }
+  tool: keep-dir-test-input2.cwl
+  doc: Test default directory in keep
+
+- job: null
+  output:
+    "outlist": {
+        "size": 20,
+        "location": "output.txt",
+        "class": "File",
+        "checksum": "sha1$13cda8661796ae241da3a18668fb552161a72592"
+    }
+  tool: keep-dir-test-input3.cwl
+  doc: Test default directory in keep
+
+- job: octo.yml
+  output: {}
+  tool: cat.cwl
+  doc: Test hashes in filenames
+
+- job: listing-job.yml
+  output: {
+    "out": {
+        "class": "File",
+        "location": "output.txt",
+        "size": 5,
+        "checksum": "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1"
+    }
+  }
+  tool: wf/listing_shallow.cwl
+  doc: test shallow directory listing
+
+- job: listing-job.yml
+  output: {
+    "out": {
+        "class": "File",
+        "location": "output.txt",
+        "size": 5,
+        "checksum": "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1"
+    }
+  }
+  tool: wf/listing_none.cwl
+  doc: test no directory listing
+
+- job: listing-job.yml
+  output: {
+    "out": {
+        "class": "File",
+        "location": "output.txt",
+        "size": 5,
+        "checksum": "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1"
+    }
+  }
+  tool: wf/listing_deep.cwl
+  doc: test deep directory listing
+
+- job: null
+  output: {}
+  tool: noreuse.cwl
+  doc: "Test arv:ReuseRequirement"
+
+- job: 12213-keepref-job.yml
+  output: {
+    "out": [
+        {
+            "checksum": "sha1$1c78028c0d69163391eef89316b44a57bde3fead",
+            "location": "sample1_S01_R1_001.fastq.txt",
+            "class": "File",
+            "size": 32
+        },
+        {
+            "checksum": "sha1$5f3b4df1b0f7fdced751fc6079778600ad9fdb45",
+            "location": "sample2_S01_R1_001.fastq.txt",
+            "class": "File",
+            "size": 32
+        }
+    ]
+  }
+  tool: 12213-keepref-wf.cwl
+  doc: "Test manipulating keep references with expression tools"
+
+- job: null
+  output:
+    out: null
+  tool: 12418-glob-empty-collection.cwl
+  doc: "Test glob output on empty collection"
+
+- job: null
+  output:
+    out: null
+  tool: 13976-keepref-wf.cwl
+  doc: "Test issue 13976"
+
+- job: null
+  output:
+    out: out
+  tool: wf/runin-wf.cwl
+  doc: "RunInSingleContainer cwl.input.json needs to be consistent with pathmapper manipulations"
+
+- job: secondary/wf-job.yml
+  output: {}
+  tool: secondary/wf.cwl
+  doc: "RunInSingleContainer applies secondaryFile discovery & manipulation before generating cwl.input.yml"
+
+- job: null
+  output:
+    out: out
+  tool: wf/runin-with-ttl-wf.cwl
+  doc: "RunInSingleContainer respects outputTTL"
+
+- job: secret_test_job.yml
+  output: {
+    "out": {
+        "class": "File",
+        "location": "hashed_example.txt",
+        "size": 47,
+        "checksum": "sha1$f45341c7f03b4dd10646c402908d1aea0d580f5d"
+    }
+  }
+  tool: wf/secret_wf.cwl
+  doc: "Test secret input parameters"
+  tags: [ secrets ]
+
+- job: null
+  output:
+    out: null
+  tool: wf/runin-reqs-wf.cwl
+  doc: "RunInSingleContainer handles dynamic resource requests on step"
+
+- job: null
+  output:
+    out: null
+  tool: wf/runin-reqs-wf2.cwl
+  doc: "RunInSingleContainer handles dynamic resource requests on embedded subworkflow"
+
+- job: null
+  output:
+    out: null
+  tool: wf/runin-reqs-wf3.cwl
+  should_fail: true
+  doc: "RunInSingleContainer disallows dynamic resource request on subworkflow steps"
+
+- job: null
+  output:
+    out: null
+  tool: wf/runin-reqs-wf4.cwl
+  doc: "RunInSingleContainer discovers static resource request in subworkflow steps"
+
+- job: secondaryFiles/inp3.yml
+  output: {}
+  tool: secondaryFiles/example1.cwl
+  doc: Discover secondaryFiles at runtime if they are in keep
+
+- job: null
+  output: {}
+  tool: secondaryFiles/example3.cwl
+  doc: Discover secondaryFiles on default values
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf1.cwl
+  doc: "Can have separate default parameters including directory and file inside same directory"
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf2.cwl
+  doc: "Can have a parameter default value that is a directory literal with a file literal"
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf3.cwl
+  doc: "Do not accept a directory literal without a basename"
+  should_fail: true
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf4.cwl
+  doc: default in embedded subworkflow missing 'id' field
+  should_fail: true
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf5.cwl
+  doc: default in embedded subworkflow
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf6.cwl
+  doc: default in RunInSingleContainer step
+
+- job: null
+  output:
+    out: null
+  tool: wf-defaults/wf7.cwl
+  doc: workflow level default in RunInSingleContainer
+
+- job: 13931-size-job.yml
+  output:
+    "out": {
+        "checksum": "sha1$5bf6e5357bd42a6b1d2a3a040e16a91490064d26",
+        "location": "size.txt",
+        "class": "File",
+        "size": 3
+    }
+  tool: 13931-size.cwl
+  doc: Test that size is set for files in Keep
diff --git a/sdk/cwl/tests/cat.cwl b/sdk/cwl/tests/cat.cwl
new file mode 100644 (file)
index 0000000..d8249d4
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  - id: inp
+    type: File
+    inputBinding: {}
+outputs: []
+baseCommand: cat
diff --git a/sdk/cwl/tests/collection_per_tool/a.txt b/sdk/cwl/tests/collection_per_tool/a.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/collection_per_tool/b.txt b/sdk/cwl/tests/collection_per_tool/b.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/collection_per_tool/c.txt b/sdk/cwl/tests/collection_per_tool/c.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl b/sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl
new file mode 100644 (file)
index 0000000..f864f49
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: step1.cwl
+  step2:
+    in: []
+    out: []
+    run: step2.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/collection_per_tool/collection_per_tool_packed.cwl b/sdk/cwl/tests/collection_per_tool/collection_per_tool_packed.cwl
new file mode 100644 (file)
index 0000000..9bf1c20
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+    "$graph": [
+        {
+            "class": "Workflow",
+            "id": "#main",
+            "inputs": [],
+            "outputs": [],
+            "steps": [
+                {
+                    "id": "#main/step1",
+                    "in": [],
+                    "out": [],
+                    "run": "#step1.cwl"
+                },
+                {
+                    "id": "#main/step2",
+                    "in": [],
+                    "out": [],
+                    "run": "#step2.cwl"
+                }
+            ]
+        },
+        {
+            "arguments": [
+                "echo",
+                "$(inputs.a)",
+                "$(inputs.b)"
+            ],
+            "class": "CommandLineTool",
+            "id": "#step1.cwl",
+            "inputs": [
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:b9fca8bf06b170b8507b80b2564ee72b+57/a.txt"
+                    },
+                    "id": "#step1.cwl/a",
+                    "type": "File"
+                },
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:b9fca8bf06b170b8507b80b2564ee72b+57/b.txt"
+                    },
+                    "id": "#step1.cwl/b",
+                    "type": "File"
+                }
+            ],
+            "outputs": []
+        },
+        {
+            "arguments": [
+                "echo",
+                "$(inputs.c)",
+                "$(inputs.b)"
+            ],
+            "class": "CommandLineTool",
+            "id": "#step2.cwl",
+            "inputs": [
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:8e2d09a066d96cdffdd2be41579e4e2e+57/b.txt"
+                    },
+                    "id": "#step2.cwl/b",
+                    "type": "File"
+                },
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:8e2d09a066d96cdffdd2be41579e4e2e+57/c.txt"
+                    },
+                    "id": "#step2.cwl/c",
+                    "type": "File"
+                }
+            ],
+            "outputs": []
+        }
+    ],
+    "cwlVersion": "v1.0"
+}
\ No newline at end of file
diff --git a/sdk/cwl/tests/collection_per_tool/step1.cwl b/sdk/cwl/tests/collection_per_tool/step1.cwl
new file mode 100644 (file)
index 0000000..e96144a
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  a:
+    type: File
+    default:
+      class: File
+      location: a.txt
+  b:
+    type: File
+    default:
+      class: File
+      location: b.txt
+outputs: []
+arguments: [echo, $(inputs.a), $(inputs.b)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/collection_per_tool/step2.cwl b/sdk/cwl/tests/collection_per_tool/step2.cwl
new file mode 100644 (file)
index 0000000..e03f9b3
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  c:
+    type: File
+    default:
+      class: File
+      location: c.txt
+  b:
+    type: File
+    default:
+      class: File
+      location: b.txt
+outputs: []
+arguments: [echo, $(inputs.c), $(inputs.b)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/dir-job.yml b/sdk/cwl/tests/dir-job.yml
new file mode 100644 (file)
index 0000000..5d03e29
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+indir:
+  class: Directory
+  location: keep:d7514270f356df848477718d58308cc4+94
\ No newline at end of file
diff --git a/sdk/cwl/tests/dir-job2.yml b/sdk/cwl/tests/dir-job2.yml
new file mode 100644 (file)
index 0000000..b1f3e46
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+indir:
+  class: Directory
+  location: keep:d7514270f356df848477718d58308cc4+94/
diff --git a/sdk/cwl/tests/federation/README b/sdk/cwl/tests/federation/README
new file mode 100644 (file)
index 0000000..e5eb04c
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+Arvados federated workflow testing
+
+Requires cwltool 1.0.20181109150732 or later
+
+Create main-test.json:
+
+{
+    "acr": "/path/to/arvados-cwl-runner",
+    "arvado_api_host_insecure": false,
+    "arvados_api_hosts": [
+        "c97qk.arvadosapi.com",
+        "4xphq.arvadosapi.com",
+        "9tee4.arvadosapi.com"
+    ],
+    "arvados_api_token": "...",
+    "arvados_cluster_ids": [
+        "c97qk",
+        "4xphq",
+        "9tee4"
+    ]
+}
+
+Or create an arvbox test cluster:
+
+$ cwltool --enable-ext arvbox-make-federation.cwl --arvbox_base ~/.arvbox/ --in_acr /path/to/arvados-cwl-runner > main-test.json
+
+
+Run tests:
+
+$ cwltool main.cwl main-test.json
+
+
+List test cases:
+
+$ cwltool --print-targets main.cwl
+
+
+Run a specific test case:
+
+$ cwltool -t twostep-remote-copy-to-home main.cwl main-test.json
diff --git a/sdk/cwl/tests/federation/arvbox-make-federation.cwl b/sdk/cwl/tests/federation/arvbox-make-federation.cwl
new file mode 100644 (file)
index 0000000..9a08195
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+requirements:
+  ScatterFeatureRequirement: {}
+  StepInputExpressionRequirement: {}
+  cwltool:LoadListingRequirement:
+    loadListing: no_listing
+  InlineJavascriptRequirement: {}
+inputs:
+  containers:
+    type: string[]
+    default: [fedbox1, fedbox2, fedbox3]
+  arvbox_base: Directory
+  in_acr: string?
+  insecure:
+    type: boolean
+    default: true
+outputs:
+  arvados_api_token:
+    type: string
+    outputSource: setup-user/test_user_token
+  arvados_api_hosts:
+    type: string[]
+    outputSource: start/container_host
+  arvados_cluster_ids:
+    type: string[]
+    outputSource: start/cluster_id
+  acr:
+    type: string?
+    outputSource: in_acr
+  arvado_api_host_insecure:
+    type: boolean
+    outputSource: insecure
+steps:
+  mkdir:
+    in:
+      containers: containers
+      arvbox_base: arvbox_base
+    out: [arvbox_data]
+    run: arvbox/mkdir.cwl
+  start:
+    in:
+      container_name: containers
+      arvbox_data: mkdir/arvbox_data
+    out: [cluster_id, container_host, arvbox_data_out, superuser_token]
+    scatter: [container_name, arvbox_data]
+    scatterMethod: dotproduct
+    run: arvbox/start.cwl
+  fed-config:
+    in:
+      container_name: containers
+      this_cluster_id: start/cluster_id
+      cluster_ids: start/cluster_id
+      cluster_hosts: start/container_host
+      arvbox_data: start/arvbox_data_out
+    out: []
+    scatter: [container_name, this_cluster_id, arvbox_data]
+    scatterMethod: dotproduct
+    run: arvbox/fed-config.cwl
+  setup-user:
+    in:
+      container_host: {source: start/container_host, valueFrom: "$(self[0])"}
+      superuser_token: {source: start/superuser_token, valueFrom: "$(self[0])"}
+    out: [test_user_uuid, test_user_token]
+    run: arvbox/setup-user.cwl
diff --git a/sdk/cwl/tests/federation/arvbox/fed-config.cwl b/sdk/cwl/tests/federation/arvbox/fed-config.cwl
new file mode 100644 (file)
index 0000000..77567ee
--- /dev/null
@@ -0,0 +1,66 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+inputs:
+  container_name: string
+  this_cluster_id: string
+  cluster_ids: string[]
+  cluster_hosts: string[]
+  arvbox_data: Directory
+outputs:
+  arvbox_data_out:
+    type: Directory
+    outputBinding:
+      outputEval: $(inputs.arvbox_data)
+requirements:
+  EnvVarRequirement:
+    envDef:
+      ARVBOX_CONTAINER: $(inputs.container_name)
+      ARVBOX_DATA: $(inputs.arvbox_data.path)
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: cluster_config.yml.override
+        entry: >-
+          ${
+          var remoteClusters = {};
+          for (var i = 0; i < inputs.cluster_ids.length; i++) {
+            remoteClusters[inputs.cluster_ids[i]] = {
+              "Host": inputs.cluster_hosts[i],
+              "Proxy": true,
+              "Insecure": true
+            };
+          }
+          var r = {"Clusters": {}};
+          r["Clusters"][inputs.this_cluster_id] = {"RemoteClusters": remoteClusters};
+          return JSON.stringify(r);
+          }
+      - entryname: application.yml.override
+        entry: >-
+          ${
+          var remoteClusters = {};
+          for (var i = 0; i < inputs.cluster_ids.length; i++) {
+            remoteClusters[inputs.cluster_ids[i]] = inputs.cluster_hosts[i];
+          }
+          return JSON.stringify({"development": {"remote_hosts": remoteClusters}});
+          }
+  cwltool:LoadListingRequirement:
+    loadListing: no_listing
+  ShellCommandRequirement: {}
+  InlineJavascriptRequirement: {}
+  cwltool:InplaceUpdateRequirement:
+    inplaceUpdate: true
+arguments:
+  - shellQuote: false
+    valueFrom: |
+      docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados
+      docker cp application.yml.override $(inputs.container_name):/usr/src/arvados/services/api/config
+      arvbox sv restart api
+      arvbox sv restart controller
+      arvbox sv restart keepstore0
+      arvbox sv restart keepstore1
diff --git a/sdk/cwl/tests/federation/arvbox/mkdir.cwl b/sdk/cwl/tests/federation/arvbox/mkdir.cwl
new file mode 100644 (file)
index 0000000..727d491
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+inputs:
+  containers:
+    type:
+      type: array
+      items: string
+      inputBinding:
+        position: 3
+        valueFrom: |
+          ${
+          return "base/"+self;
+          }
+  arvbox_base: Directory
+outputs:
+  arvbox_data:
+    type: Directory[]
+    outputBinding:
+      glob: |
+        ${
+        var r = [];
+        for (var i = 0; i < inputs.containers.length; i++) {
+          r.push("base/"+inputs.containers[i]);
+        }
+        return r;
+        }
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entry: $(inputs.arvbox_base)
+        entryname: base
+        writable: true
+  cwltool:LoadListingRequirement:
+    loadListing: no_listing
+  InlineJavascriptRequirement: {}
+  cwltool:InplaceUpdateRequirement:
+    inplaceUpdate: true
+arguments:
+  - mkdir
+  - "-p"
diff --git a/sdk/cwl/tests/federation/arvbox/setup-user.cwl b/sdk/cwl/tests/federation/arvbox/setup-user.cwl
new file mode 100644 (file)
index 0000000..0fddc1b
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+requirements:
+  EnvVarRequirement:
+    envDef:
+      ARVADOS_API_HOST: $(inputs.container_host)
+      ARVADOS_API_TOKEN: $(inputs.superuser_token)
+      ARVADOS_API_HOST_INSECURE: "true"
+  cwltool:LoadListingRequirement:
+    loadListing: no_listing
+  InlineJavascriptRequirement: {}
+  cwltool:InplaceUpdateRequirement:
+    inplaceUpdate: true
+  DockerRequirement:
+    dockerPull: arvados/jobs
+inputs:
+  container_host: string
+  superuser_token: string
+  make_user_script:
+    type: File
+    default:
+      class: File
+      location: setup_user.py
+outputs:
+  test_user_uuid: string
+  test_user_token: string
+arguments: [python2, $(inputs.make_user_script)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/arvbox/setup_user.py b/sdk/cwl/tests/federation/arvbox/setup_user.py
new file mode 100644 (file)
index 0000000..a456976
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import arvados.errors
+import time
+import json
+
+while True:
+    try:
+        api = arvados.api()
+        break
+    except arvados.errors.ApiError:
+        time.sleep(2)
+
+existing = api.users().list(filters=[["email", "=", "test@example.com"],
+                                     ["is_active", "=", True]], limit=1).execute()
+if existing["items"]:
+    u = existing["items"][0]
+else:
+    u = api.users().create(body={
+        'first_name': 'Test',
+        'last_name': 'User',
+        'email': 'test@example.com',
+        'is_admin': False
+    }).execute()
+    api.users().activate(uuid=u["uuid"]).execute()
+
+tok = api.api_client_authorizations().create(body={
+    "api_client_authorization": {
+        "owner_uuid": u["uuid"]
+    }
+}).execute()
+
+with open("cwl.output.json", "w") as f:
+    json.dump({
+        "test_user_uuid": u["uuid"],
+        "test_user_token": "v2/%s/%s" % (tok["uuid"], tok["api_token"])
+    }, f)
diff --git a/sdk/cwl/tests/federation/arvbox/start.cwl b/sdk/cwl/tests/federation/arvbox/start.cwl
new file mode 100644 (file)
index 0000000..f69775a
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+inputs:
+  container_name: string
+  arvbox_data: Directory
+outputs:
+  cluster_id:
+    type: string
+    outputBinding:
+      glob: status.txt
+      loadContents: true
+      outputEval: |
+        ${
+        var sp = self[0].contents.split("\n");
+        for (var i = 0; i < sp.length; i++) {
+          if (sp[i].startsWith("Cluster id: ")) {
+            return sp[i].substr(12);
+          }
+        }
+        }
+  container_host:
+    type: string
+    outputBinding:
+      glob: status.txt
+      loadContents: true
+      outputEval: |
+        ${
+        var sp = self[0].contents.split("\n");
+        for (var i = 0; i < sp.length; i++) {
+          if (sp[i].startsWith("Container IP: ")) {
+            return sp[i].substr(14)+":8000";
+          }
+        }
+        }
+  superuser_token:
+    type: string
+    outputBinding:
+      glob: superuser_token.txt
+      loadContents: true
+      outputEval: $(self[0].contents.trim())
+  arvbox_data_out:
+    type: Directory
+    outputBinding:
+      outputEval: $(inputs.arvbox_data)
+requirements:
+  EnvVarRequirement:
+    envDef:
+      ARVBOX_CONTAINER: $(inputs.container_name)
+      ARVBOX_DATA: $(inputs.arvbox_data.path)
+  ShellCommandRequirement: {}
+  InitialWorkDirRequirement:
+    listing:
+      - entry: $(inputs.arvbox_data)
+        entryname: $(inputs.container_name)
+        writable: true
+  cwltool:InplaceUpdateRequirement:
+    inplaceUpdate: true
+  InlineJavascriptRequirement: {}
+arguments:
+  - shellQuote: false
+    valueFrom: |
+      set -e
+      arvbox start dev
+      arvbox status > status.txt
+      arvbox cat /var/lib/arvados/superuser_token > superuser_token.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/arvbox/stop.cwl b/sdk/cwl/tests/federation/arvbox/stop.cwl
new file mode 100644 (file)
index 0000000..2ea4c0f
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+inputs:
+  container_name: string
+outputs: []
+requirements:
+  EnvVarRequirement:
+    envDef:
+      ARVBOX_CONTAINER: $(inputs.container_name)
+arguments: [arvbox, stop]
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/cases/base-case.cwl b/sdk/cwl/tests/federation/cases/base-case.cwl
new file mode 100644 (file)
index 0000000..4ab3b20
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:base-case
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: runOnCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
diff --git a/sdk/cwl/tests/federation/cases/cat.cwl b/sdk/cwl/tests/federation/cases/cat.cwl
new file mode 100644 (file)
index 0000000..17132fe
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp:
+    type: File[]
+    inputBinding: {}
+outputs:
+  joined: stdout
+stdout: joined.txt
+baseCommand: cat
diff --git a/sdk/cwl/tests/federation/cases/hint-on-tool.cwl b/sdk/cwl/tests/federation/cases/hint-on-tool.cwl
new file mode 100644 (file)
index 0000000..93e6d2c
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:hint-on-tool
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: runOnCluster
+    out: [hash]
+    run: md5sum-tool-hint.cwl
diff --git a/sdk/cwl/tests/federation/cases/hint-on-wf.cwl b/sdk/cwl/tests/federation/cases/hint-on-wf.cwl
new file mode 100644 (file)
index 0000000..4323659
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:hint-on-wf
+hints:
+  arv:ClusterTarget:
+    cluster_id: $(inputs.runOnCluster)
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+    out: [hash]
+    run: md5sum.cwl
diff --git a/sdk/cwl/tests/federation/cases/md5sum-tool-hint.cwl b/sdk/cwl/tests/federation/cases/md5sum-tool-hint.cwl
new file mode 100644 (file)
index 0000000..726c33b
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+hints:
+  arv:ClusterTarget:
+    cluster_id: $(inputs.runOnCluster)
+inputs:
+  inp: File
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputBinding:
+      glob: out.txt
+stdin: $(inputs.inp.path)
+stdout: out.txt
+arguments: ["md5sum", "-"]
diff --git a/sdk/cwl/tests/federation/cases/md5sum.cwl b/sdk/cwl/tests/federation/cases/md5sum.cwl
new file mode 100644 (file)
index 0000000..af11999
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+inputs:
+  inp:
+    type: File
+outputs:
+  hash:
+    type: File
+    outputBinding:
+      glob: out.txt
+stdin: $(inputs.inp.path)
+stdout: out.txt
+arguments: ["md5sum", "-"]
diff --git a/sdk/cwl/tests/federation/cases/remote-case.cwl b/sdk/cwl/tests/federation/cases/remote-case.cwl
new file mode 100644 (file)
index 0000000..6683062
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:remote-case
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: runOnCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
diff --git a/sdk/cwl/tests/federation/cases/rev-input-to-output.cwl b/sdk/cwl/tests/federation/cases/rev-input-to-output.cwl
new file mode 100644 (file)
index 0000000..0c247a8
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  ShellCommandRequirement: {}
+inputs:
+  inp:
+    type: File
+outputs:
+  original:
+    type: File
+    outputBinding:
+      glob: $(inputs.inp.basename)
+  revhash:
+    type: stdout
+stdout: rev-$(inputs.inp.basename)
+arguments:
+  - shellQuote: false
+    valueFrom: |
+      ln -s $(inputs.inp.path) $(inputs.inp.basename) &&
+      rev $(inputs.inp.basename)
diff --git a/sdk/cwl/tests/federation/cases/rev.cwl b/sdk/cwl/tests/federation/cases/rev.cwl
new file mode 100644 (file)
index 0000000..8bbc565
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+inputs:
+  inp:
+    type: File
+outputs:
+  revhash:
+    type: File
+    outputBinding:
+      glob: out.txt
+stdout: out.txt
+arguments: [rev, $(inputs.inp)]
diff --git a/sdk/cwl/tests/federation/cases/runner-home-step-remote.cwl b/sdk/cwl/tests/federation/cases/runner-home-step-remote.cwl
new file mode 100644 (file)
index 0000000..182ca1e
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:runner-home-step-remote
+inputs:
+  inp: File
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: runOnCluster
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    out: [hash]
+    run: md5sum.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/cases/runner-remote-step-home.cwl b/sdk/cwl/tests/federation/cases/runner-remote-step-home.cwl
new file mode 100644 (file)
index 0000000..963c84f
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:runner-remote-step-home
+inputs:
+  inp: File
+  runOnCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: runOnCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/cases/scatter-gather.cwl b/sdk/cwl/tests/federation/cases/scatter-gather.cwl
new file mode 100644 (file)
index 0000000..07403ed
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:scatter-gather
+  ScatterFeatureRequirement: {}
+inputs:
+  shards: File[]
+  clusters: string[]
+outputs:
+  joined:
+    type: File
+    outputSource: cat/joined
+steps:
+  md5sum:
+    in:
+      inp: shards
+      runOnCluster: clusters
+    scatter: [inp, runOnCluster]
+    scatterMethod: dotproduct
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
+  cat:
+    in:
+      inp: md5sum/hash
+    out: [joined]
+    run: cat.cwl
diff --git a/sdk/cwl/tests/federation/cases/threestep-remote.cwl b/sdk/cwl/tests/federation/cases/threestep-remote.cwl
new file mode 100644 (file)
index 0000000..8dffc18
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:threestep-remote
+  ScatterFeatureRequirement: {}
+inputs:
+  inp: File
+  clusterA: string
+  clusterB: string
+  clusterC: string
+outputs:
+  revhash:
+    type: File
+    outputSource: revC/revhash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: clusterA
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
+  revB:
+    in:
+      inp: md5sum/hash
+      runOnCluster: clusterB
+    out: [revhash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: rev-input-to-output.cwl
+  revC:
+    in:
+      inp: revB/revhash
+      runOnCluster: clusterC
+    out: [revhash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: rev-input-to-output.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/cases/twostep-both-remote.cwl b/sdk/cwl/tests/federation/cases/twostep-both-remote.cwl
new file mode 100644 (file)
index 0000000..b924c54
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:twostep-both-remote
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  md5sumCluster: string
+  revCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: md5sumCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
+  rev:
+    in:
+      inp: md5sum/hash
+      runOnCluster: revCluster
+    out: [revhash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: rev.cwl
diff --git a/sdk/cwl/tests/federation/cases/twostep-home-to-remote.cwl b/sdk/cwl/tests/federation/cases/twostep-home-to-remote.cwl
new file mode 100644 (file)
index 0000000..c74c247
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:twostep-home-to-remote
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  md5sumCluster: string
+  revCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: md5sumCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
+  rev:
+    in:
+      inp: md5sum/hash
+      runOnCluster: revCluster
+    out: [revhash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: rev.cwl
diff --git a/sdk/cwl/tests/federation/cases/twostep-remote-copy-to-home.cwl b/sdk/cwl/tests/federation/cases/twostep-remote-copy-to-home.cwl
new file mode 100644 (file)
index 0000000..3722c99
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:twostep-remote-copy-to-home
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  md5sumCluster: string
+  revCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: md5sumCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
+  rev:
+    in:
+      inp: md5sum/hash
+      runOnCluster: revCluster
+    out: [revhash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: rev-input-to-output.cwl
diff --git a/sdk/cwl/tests/federation/cases/twostep-remote-to-home.cwl b/sdk/cwl/tests/federation/cases/twostep-remote-to-home.cwl
new file mode 100644 (file)
index 0000000..e528914
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  InlineJavascriptRequirement: {}
+  DockerRequirement:
+    dockerPull: arvados/fed-test:twostep-remote-to-home
+inputs:
+  inp:
+    type: File
+    inputBinding: {}
+  md5sumCluster: string
+  revCluster: string
+outputs:
+  hash:
+    type: File
+    outputSource: md5sum/hash
+steps:
+  md5sum:
+    in:
+      inp: inp
+      runOnCluster: md5sumCluster
+    out: [hash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: md5sum.cwl
+  rev:
+    in:
+      inp: md5sum/hash
+      runOnCluster: revCluster
+    out: [revhash]
+    hints:
+      arv:ClusterTarget:
+        cluster_id: $(inputs.runOnCluster)
+    run: rev.cwl
diff --git a/sdk/cwl/tests/federation/data/base-case-input.txt b/sdk/cwl/tests/federation/data/base-case-input.txt
new file mode 100644 (file)
index 0000000..761b840
--- /dev/null
@@ -0,0 +1,16 @@
+Call me base-case. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/hint-on-tool.txt b/sdk/cwl/tests/federation/data/hint-on-tool.txt
new file mode 100644 (file)
index 0000000..c396125
--- /dev/null
@@ -0,0 +1,16 @@
+Call me hint-on-tool. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/hint-on-wf.txt b/sdk/cwl/tests/federation/data/hint-on-wf.txt
new file mode 100644 (file)
index 0000000..f4aa872
--- /dev/null
@@ -0,0 +1,16 @@
+Call me hint-on-wf. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/remote-case-input.txt b/sdk/cwl/tests/federation/data/remote-case-input.txt
new file mode 100644 (file)
index 0000000..21e87fb
--- /dev/null
@@ -0,0 +1,16 @@
+Call me remote-case. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/runner-home-step-remote-input.txt b/sdk/cwl/tests/federation/data/runner-home-step-remote-input.txt
new file mode 100644 (file)
index 0000000..91ab77d
--- /dev/null
@@ -0,0 +1,16 @@
+Call me runner-home-step-remote. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/runner-remote-step-home-input.txt b/sdk/cwl/tests/federation/data/runner-remote-step-home-input.txt
new file mode 100644 (file)
index 0000000..e5673b8
--- /dev/null
@@ -0,0 +1,16 @@
+Call me runner-remote-step-home. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/scatter-gather-s1.txt b/sdk/cwl/tests/federation/data/scatter-gather-s1.txt
new file mode 100644 (file)
index 0000000..cc732e3
--- /dev/null
@@ -0,0 +1,16 @@
+Call me scatter-gather-s1. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/scatter-gather-s2.txt b/sdk/cwl/tests/federation/data/scatter-gather-s2.txt
new file mode 100644 (file)
index 0000000..3b57ee1
--- /dev/null
@@ -0,0 +1,16 @@
+Call me scatter-gather-s2. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/scatter-gather-s3.txt b/sdk/cwl/tests/federation/data/scatter-gather-s3.txt
new file mode 100644 (file)
index 0000000..06f77d2
--- /dev/null
@@ -0,0 +1,16 @@
+Call me scatter-gather-s3. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/threestep-remote.txt b/sdk/cwl/tests/federation/data/threestep-remote.txt
new file mode 100644 (file)
index 0000000..39dd99b
--- /dev/null
@@ -0,0 +1,16 @@
+Call me threestep-remote. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/twostep-both-remote.txt b/sdk/cwl/tests/federation/data/twostep-both-remote.txt
new file mode 100644 (file)
index 0000000..6218bb5
--- /dev/null
@@ -0,0 +1,16 @@
+Call me twostep-both-remote. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/twostep-home-to-remote.txt b/sdk/cwl/tests/federation/data/twostep-home-to-remote.txt
new file mode 100644 (file)
index 0000000..6430ad5
--- /dev/null
@@ -0,0 +1,16 @@
+Call me twostep-home-to-remote. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/twostep-remote-copy-to-home.txt b/sdk/cwl/tests/federation/data/twostep-remote-copy-to-home.txt
new file mode 100644 (file)
index 0000000..c0f72ef
--- /dev/null
@@ -0,0 +1,16 @@
+Call me twostep-remote-copy-to-home. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/data/twostep-remote-to-home.txt b/sdk/cwl/tests/federation/data/twostep-remote-to-home.txt
new file mode 100644 (file)
index 0000000..2318025
--- /dev/null
@@ -0,0 +1,16 @@
+Call me twostep-remote-to-home. Some years ago--never mind how long precisely--having
+little or no money in my purse, and nothing particular to interest me on
+shore, I thought I would sail about a little and see the watery part of
+the world. It is a way I have of driving off the spleen and regulating
+the circulation. Whenever I find myself growing grim about the mouth;
+whenever it is a damp, drizzly November in my soul; whenever I find
+myself involuntarily pausing before coffin warehouses, and bringing up
+the rear of every funeral I meet; and especially whenever my hypos get
+such an upper hand of me, that it requires a strong moral principle to
+prevent me from deliberately stepping into the street, and methodically
+knocking people's hats off--then, I account it high time to get to sea
+as soon as I can. This is my substitute for pistol and ball. With a
+philosophical flourish Cato throws himself upon his sword; I quietly
+take to the ship. There is nothing surprising in this. If they but knew
+it, almost all men in their degree, some time or other, cherish very
+nearly the same feelings towards the ocean with me.
diff --git a/sdk/cwl/tests/federation/framework/check-exist.cwl b/sdk/cwl/tests/federation/framework/check-exist.cwl
new file mode 100644 (file)
index 0000000..ebb0fb2
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: config.json
+        entry: |-
+          ${
+          return JSON.stringify({
+            check_collections: inputs.check_collections
+          });
+          }
+  EnvVarRequirement:
+    envDef:
+      ARVADOS_API_HOST: $(inputs.arvados_api_host)
+      ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
+      ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
+  InlineJavascriptRequirement: {}
+inputs:
+  arvados_api_token: string
+  arvado_api_host_insecure: boolean
+  arvados_api_host: string
+  check_collections: string[]
+  preparescript:
+    type: File
+    default:
+      class: File
+      location: check_exist.py
+    inputBinding:
+      position: 1
+outputs:
+  success:
+    type: boolean
+    outputBinding:
+      glob: success
+      loadContents: true
+      outputEval: $(self[0].contents=="true")
+baseCommand: python2
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/framework/check_exist.py b/sdk/cwl/tests/federation/framework/check_exist.py
new file mode 100644 (file)
index 0000000..1458772
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+import arvados
+import json
+
+api = arvados.api()
+
+with open("config.json") as f:
+    config = json.load(f)
+
+success = True
+for c in config["check_collections"]:
+    try:
+        api.collections().get(uuid=c).execute()
+    except Exception as e:
+        print("Checking for %s got exception %s" % (c, e))
+        success = False
+
+with open("success", "w") as f:
+    if success:
+        f.write("true")
+    else:
+        f.write("false")
diff --git a/sdk/cwl/tests/federation/framework/dockerbuild.cwl b/sdk/cwl/tests/federation/framework/dockerbuild.cwl
new file mode 100644 (file)
index 0000000..d00b3e2
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  testcase: string
+outputs:
+  imagename:
+    type: string
+    outputBinding:
+      outputEval: $(inputs.testcase)
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: Dockerfile
+        entry: |-
+          FROM debian@sha256:0a5fcee6f52d5170f557ee2447d7a10a5bdcf715dd7f0250be0b678c556a501b
+          LABEL org.arvados.testcase="$(inputs.testcase)"
+arguments: [docker, build, -t, $(inputs.testcase), "."]
diff --git a/sdk/cwl/tests/federation/framework/prepare.cwl b/sdk/cwl/tests/federation/framework/prepare.cwl
new file mode 100644 (file)
index 0000000..03f792c
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: input.json
+        entry: $(JSON.stringify(inputs.obj))
+      - entryname: config.json
+        entry: |-
+          ${
+          return JSON.stringify({
+            arvados_cluster_ids: inputs.arvados_cluster_ids,
+            scrub_images: [inputs.scrub_image],
+            scrub_collections: inputs.scrub_collections
+          });
+          }
+  EnvVarRequirement:
+    envDef:
+      ARVADOS_API_HOST: $(inputs.arvados_api_host)
+      ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
+      ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
+  InlineJavascriptRequirement: {}
+inputs:
+  arvados_api_token: string
+  arvado_api_host_insecure: boolean
+  arvados_api_host: string
+  arvados_cluster_ids: string[]
+  wf: File
+  obj: Any
+  scrub_image: string
+  scrub_collections: string[]
+  preparescript:
+    type: File
+    default:
+      class: File
+      location: prepare.py
+    inputBinding:
+      position: 1
+outputs:
+  done:
+    type: boolean
+    outputBinding:
+      outputEval: $(true)
+baseCommand: python2
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/framework/prepare.py b/sdk/cwl/tests/federation/framework/prepare.py
new file mode 100644 (file)
index 0000000..40bb843
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+import arvados
+import json
+
+api = arvados.api()
+
+with open("config.json") as f:
+    config = json.load(f)
+
+scrub_collections = set(config["scrub_collections"])
+
+for cluster_id in config["arvados_cluster_ids"]:
+    images = []
+    for scrub_image in config["scrub_images"]:
+        sp = scrub_image.split(":")
+        image_name = sp[0]
+        image_tag = sp[1] if len(sp) > 1 else "latest"
+        images.append('{}:{}'.format(image_name, image_tag))
+
+    search_links = api.links().list(
+        filters=[['link_class', '=', 'docker_image_repo+tag'],
+                 ['name', 'in', images]],
+        cluster_id=cluster_id).execute()
+
+    head_uuids = [lk["head_uuid"] for lk in search_links["items"]]
+    cols = api.collections().list(filters=[["uuid", "in", head_uuids]],
+                                  cluster_id=cluster_id).execute()
+    for c in cols["items"]:
+        scrub_collections.add(c["portable_data_hash"])
+    for lk in search_links["items"]:
+        api.links().delete(uuid=lk["uuid"]).execute()
+
+for cluster_id in config["arvados_cluster_ids"]:
+    matches = api.collections().list(filters=[["portable_data_hash", "in", list(scrub_collections)]],
+                                     select=["uuid", "portable_data_hash"], cluster_id=cluster_id).execute()
+    for m in matches["items"]:
+        api.collections().delete(uuid=m["uuid"]).execute()
+        print("Scrubbed %s (%s)" % (m["uuid"], m["portable_data_hash"]))
diff --git a/sdk/cwl/tests/federation/framework/run-acr.cwl b/sdk/cwl/tests/federation/framework/run-acr.cwl
new file mode 100644 (file)
index 0000000..5c8971b
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  acr:
+    type: string?
+    default: arvados-cwl-runner
+    inputBinding:
+      position: 1
+  arvados_api_host: string
+  arvados_api_token: string
+  arvado_api_host_insecure:
+    type: boolean
+    default: false
+  runner_cluster:
+    type: string?
+    inputBinding:
+      prefix: --submit-runner-cluster
+      position: 2
+  wf:
+    type: File
+    inputBinding:
+      position: 3
+  obj: Any
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: input.json
+        entry: $(JSON.stringify(inputs.obj))
+  EnvVarRequirement:
+    envDef:
+      ARVADOS_API_HOST: $(inputs.arvados_api_host)
+      ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
+      ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
+  InlineJavascriptRequirement: {}
+outputs:
+  out:
+    type: Any
+    outputBinding:
+      glob: output.json
+      loadContents: true
+      #outputEval: $(JSON.parse(self[0].contents))
+      outputEval: $(self[0].contents)
+stdout: output.json
+arguments:
+  - valueFrom: --disable-reuse
+    position: 2
+  - valueFrom: --always-submit-runner
+    position: 2
+  - valueFrom: --api=containers
+    position: 2
+  - valueFrom: input.json
+    position: 4
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/framework/testcase.cwl b/sdk/cwl/tests/federation/framework/testcase.cwl
new file mode 100644 (file)
index 0000000..89aa3f9
--- /dev/null
@@ -0,0 +1,77 @@
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+hints:
+  cwltool:Secrets:
+    secrets: [arvados_api_token]
+requirements:
+  StepInputExpressionRequirement: {}
+  InlineJavascriptRequirement: {}
+  SubworkflowFeatureRequirement: {}
+inputs:
+  arvados_api_token: string
+  arvado_api_host_insecure:
+    type: boolean
+    default: false
+  arvados_api_hosts: string[]
+  arvados_cluster_ids: string[]
+  acr: string?
+  wf: File
+  obj: Any
+  scrub_image: string
+  scrub_collections: string[]
+  runner_cluster: string?
+outputs:
+  out:
+    type: Any
+    outputSource: run-acr/out
+  success:
+    type: boolean
+    outputSource: check-result/success
+steps:
+  dockerbuild:
+    in:
+      testcase: scrub_image
+    out: [imagename]
+    run: dockerbuild.cwl
+  prepare:
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_host: {source: arvados_api_hosts, valueFrom: "$(self[0])"}
+      arvados_cluster_ids: arvados_cluster_ids
+      wf: wf
+      obj: obj
+      scrub_image: scrub_image
+      scrub_collections: scrub_collections
+    out: [done]
+    run: prepare.cwl
+  run-acr:
+    in:
+      prepare: prepare/done
+      image-ready: dockerbuild/imagename
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_host: {source: arvados_api_hosts, valueFrom: "$(self[0])"}
+      runner_cluster: runner_cluster
+      acr: acr
+      wf: wf
+      obj: obj
+    out: [out]
+    run: run-acr.cwl
+  check-result:
+    in:
+      acr-done: run-acr/out
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_host: {source: arvados_api_hosts, valueFrom: "$(self[0])"}
+      check_collections: scrub_collections
+    out: [success]
+    run: check-exist.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/federation/main.cwl b/sdk/cwl/tests/federation/main.cwl
new file mode 100755 (executable)
index 0000000..a00e6d3
--- /dev/null
@@ -0,0 +1,545 @@
+#!/usr/bin/env cwl-runner
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+hints:
+  cwltool:Secrets:
+    secrets: [arvados_api_token]
+requirements:
+  StepInputExpressionRequirement: {}
+  InlineJavascriptRequirement: {}
+  SubworkflowFeatureRequirement: {}
+inputs:
+  arvados_api_token: string
+  arvado_api_host_insecure:
+    type: boolean
+    default: false
+  arvados_api_hosts: string[]
+  arvados_cluster_ids: string[]
+  acr: string?
+  testcases:
+    type: string[]
+    default:
+      - base-case
+      - runner-home-step-remote
+      - runner-remote-step-home
+outputs:
+  base-case-success:
+    type: Any
+    outputSource: base-case/success
+  runner-home-step-remote-success:
+    type: Any
+    outputSource: runner-home-step-remote/success
+  runner-remote-step-home-success:
+    type: Any
+    outputSource: runner-remote-step-home/success
+  remote-case-success:
+    type: Any
+    outputSource: remote-case/success
+  twostep-home-to-remote-success:
+    type: Any
+    outputSource: twostep-home-to-remote/success
+  twostep-remote-to-home-success:
+    type: Any
+    outputSource: twostep-remote-to-home/success
+  twostep-both-remote-success:
+    type: Any
+    outputSource: twostep-both-remote/success
+  twostep-remote-copy-to-home-success:
+    type: Any
+    outputSource: twostep-remote-copy-to-home/success
+  scatter-gather-success:
+    type: Any
+    outputSource: scatter-gather/success
+  threestep-remote-success:
+    type: Any
+    outputSource: threestep-remote/success
+  hint-on-wf-success:
+    type: Any
+    outputSource: hint-on-wf/success
+  hint-on-tool-success:
+    type: Any
+    outputSource: hint-on-tool/success
+
+steps:
+  base-case:
+    doc: |
+      Base case (no federation), single step workflow with both the
+      runner and step on the same cluster.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/base-case.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/base-case-input.txt
+        valueFrom: |-
+          ${
+          self["runOnCluster"] = inputs.arvados_cluster_ids[0];
+          return self;
+          }
+      scrub_image: {default: "arvados/fed-test:base-case"}
+      scrub_collections:
+        default:
+          - 031a4ced0aa99de90fb630568afc6e9b+67   # input collection
+          - eb93a6718eb1a1a8ee9f66ee7d683472+51   # md5sum output collection
+          - f654d4048612135f4a5e7707ec0fcf3e+112  # final output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  runner-home-step-remote:
+    doc: |
+      Single step workflow with the runner on the home cluster and the
+      step on the remote cluster.  ClusterTarget hint is on the workflow step.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/runner-home-step-remote.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/runner-home-step-remote-input.txt
+        valueFrom: |-
+          ${
+          self["runOnCluster"] = inputs.arvados_cluster_ids[1];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:runner-home-step-remote"}
+      scrub_collections:
+        default:
+          - 3bc373e38751fe13dcbd62778d583242+81   # input collection
+          - 428e6d91e41a3af3ae287b453949e7fd+51   # md5sum output collection
+          - a4b0ddd866525655e8480f83a1ca83c6+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  runner-remote-step-home:
+    doc: |
+      Single step workflow with the runner on the remote cluster and the
+      step on the home cluster.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/runner-remote-step-home.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/runner-remote-step-home-input.txt
+        valueFrom: |-
+          ${
+          self["runOnCluster"] = inputs.arvados_cluster_ids[0];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[1])" }
+      scrub_image: {default: "arvados/fed-test:runner-remote-step-home"}
+      scrub_collections:
+        default:
+          - 25fe10d8e8530329a738de69d9bc8ab5+81   # input collection
+          - 7f052d1a04b851b6f73fba77c7802e1d+51   # md5sum output collection
+          - ecb639201f454b6493757f5117f540df+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  remote-case:
+    doc: |
+      Single step workflow with both the runner and the step on the
+      remote cluster.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/remote-case.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/remote-case-input.txt
+        valueFrom: |-
+          ${
+          self["runOnCluster"] = inputs.arvados_cluster_ids[1];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[1])" }
+      scrub_image: {default: "arvados/fed-test:remote-case"}
+      scrub_collections:
+        default:
+          - fccd49fdef8e452295f718208abafd88+69   # input collection
+          - 58c0e8ea6b148134ef8577ee11307eec+51   # md5sum output collection
+          - 1fd679c5ab64c123b9764024dbf560f0+112  # final output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  twostep-home-to-remote:
+    doc: |
+      Two step workflow.  The runner is on the home cluster, the first
+      step is on the home cluster, the second step is on the remote
+      cluster.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/twostep-home-to-remote.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+            - class: File
+              location: cases/rev.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/twostep-home-to-remote.txt
+        valueFrom: |-
+          ${
+          self["md5sumCluster"] = inputs.arvados_cluster_ids[0];
+          self["revCluster"] = inputs.arvados_cluster_ids[1];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:twostep-home-to-remote"}
+      scrub_collections:
+        default:
+          - 268a54947fb75115cfe05bb54cc62c30+74   # input collection
+          - 400f03b8c5d2dc3dcb513a21b626ef88+51   # md5sum output collection
+          - 3738166916ca5f6f6ad12bf7e06b4a21+51   # rev output collection
+          - bc37c17a37aa25229e5de1339b27fbcc+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  twostep-remote-to-home:
+    doc: |
+      Two step workflow.  The runner is on the home cluster, the first
+      step is on the remote cluster, the second step is on the home
+      cluster.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/twostep-remote-to-home.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+            - class: File
+              location: cases/rev.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/twostep-remote-to-home.txt
+        valueFrom: |-
+          ${
+          self["md5sumCluster"] = inputs.arvados_cluster_ids[1];
+          self["revCluster"] = inputs.arvados_cluster_ids[0];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:twostep-remote-to-home"}
+      scrub_collections:
+        default:
+          - cce89b9f7b6e163978144051ce5f071a+74   # input collection
+          - 0c358c3af63644c6343766feff1b7238+51   # md5sum output collection
+          - 33fb7d512bf21f04847eca58cea46e74+51   # rev output collection
+          - 912e04aa3db04aba008cf5cd46c277b2+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  twostep-both-remote:
+    doc: |
+      Two step workflow.  The runner is on the home cluster, both steps are
+      on the remote cluster.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/twostep-both-remote.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+            - class: File
+              location: cases/rev.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/twostep-both-remote.txt
+        valueFrom: |-
+          ${
+          self["md5sumCluster"] = inputs.arvados_cluster_ids[1];
+          self["revCluster"] = inputs.arvados_cluster_ids[1];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:twostep-both-remote"}
+      scrub_collections:
+        default:
+          - 3c5e39939cf197d304ac1eac20841238+71   # input collection
+          - 3edb99aa607731593969cdab663d65b4+51   # md5sum output collection
+          - a91625b7139e60fe61a88cae42fbee13+51   # rev output collection
+          - ddfa58a81953dad08436d571615dd584+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  twostep-remote-copy-to-home:
+    doc: |
+      Two step workflow.  The runner is on the home cluster, the first
+      step is on the remote cluster, the second step is on the home
+      cluster, and propagates its input file directly from input to
+      output by symlinking the input file in the output directory.
+      Tests that crunch-run will copy blocks from remote to local
+      when preparing output collection.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/twostep-remote-copy-to-home.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+            - class: File
+              location: cases/rev-input-to-output.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/twostep-remote-copy-to-home.txt
+        valueFrom: |-
+          ${
+          self["md5sumCluster"] = inputs.arvados_cluster_ids[1];
+          self["revCluster"] = inputs.arvados_cluster_ids[0];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:twostep-remote-copy-to-home"}
+      scrub_collections:
+        default:
+          - 538887bc29a3098bf79abdb8536d17bd+79   # input collection
+          - 14da0e0d52d7ab2945427074b275e9ee+51   # md5sum output collection
+          - 2d3a4a840077390a0d7788f169eaba89+112  # rev output collection
+          - 2d3a4a840077390a0d7788f169eaba89+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  scatter-gather:
+    doc: ""
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/scatter-gather.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+            - class: File
+              location: cases/cat.cwl
+      obj:
+        default:
+          shards:
+            - class: File
+              location: data/scatter-gather-s1.txt
+            - class: File
+              location: data/scatter-gather-s2.txt
+            - class: File
+              location: data/scatter-gather-s3.txt
+        valueFrom: |-
+          ${
+          self["clusters"] = inputs.arvados_cluster_ids;
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:scatter-gather"}
+      scrub_collections:
+        default:
+          - 99cc18329bce1b4a5fe6c4cf60477668+209  # input collection
+          - 2e570e844e03c7027baad148642d726f+51   # s1 md5sum output collection
+          - 61c88ee7811d0b849b5c06376eb065a6+51   # s2 md5sum output collection
+          - 85aaf18d638045fe609e025d3a319b2a+51   # s3 md5sum output collection
+          - ec44bcba77e65128f1a8f843d881ede4+56   # cat output collection
+          - 89de265942800ae36549109969940363+117  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  threestep-remote:
+    doc: ""
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/threestep-remote.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+            - class: File
+              location: cases/rev-input-to-output.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/threestep-remote.txt
+        valueFrom: |-
+          ${
+          self["clusterA"] = inputs.arvados_cluster_ids[0];
+          self["clusterB"] = inputs.arvados_cluster_ids[1];
+          self["clusterC"] = inputs.arvados_cluster_ids[2];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:threestep-remote"}
+      scrub_collections:
+        default:
+          - 9fbf33e62876357fe134f619865cc5a5+68   # input collection
+          - 210c5f2a716f6689b04316acd4928c10+51   # md5sum output collection
+          - 3abea7506269d5ebf61fb17c78bbd2af+105  # revB output
+          - 9e1b3acb28949759ad07e4c9740bbaa5+113  # revC output
+          - 8c86dbec7de7948871b5e168ede417e1+120  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  hint-on-wf:
+    doc: |
+      Single step workflow with the runner on the home cluster and the
+      step on the remote cluster.  ClusterTarget hint is at the workflow level.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/hint-on-wf.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/hint-on-wf.txt
+        valueFrom: |-
+          ${
+          self["runOnCluster"] = inputs.arvados_cluster_ids[1];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:hint-on-wf"}
+      scrub_collections:
+        default:
+          - 862433f328041b2525c90b1dc3c462fd+62   # input collection
+          - 9a68b0b9720977faba8a28e75a4398b7+51   # md5sum output collection
+          - 6a601cddb36ee2f766783b1aa9ff8d66+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
+
+  hint-on-tool:
+    doc: |
+      Single step workflow with the runner on the home cluster and the
+      step on the remote cluster.  ClusterTarget hint is at the tool level.
+    in:
+      arvados_api_token: arvados_api_token
+      arvado_api_host_insecure: arvado_api_host_insecure
+      arvados_api_hosts: arvados_api_hosts
+      arvados_cluster_ids: arvados_cluster_ids
+      acr: acr
+      wf:
+        default:
+          class: File
+          location: cases/hint-on-tool.cwl
+          secondaryFiles:
+            - class: File
+              location: cases/md5sum-tool-hint.cwl
+      obj:
+        default:
+          inp:
+            class: File
+            location: data/hint-on-tool.txt
+        valueFrom: |-
+          ${
+          self["runOnCluster"] = inputs.arvados_cluster_ids[1];
+          return self;
+          }
+      runner_cluster: { valueFrom: "$(inputs.arvados_cluster_ids[0])" }
+      scrub_image: {default: "arvados/fed-test:hint-on-tool"}
+      scrub_collections:
+        default:
+          - 6803004a4f8db9f8d1d54f6229851599+64   # input collection
+          - cacb0d56235564b5ff485c5b31215ab5+51   # md5sum output collection
+          - 2b50af43fdd84a9e906be2d54b92cddf+112  # runner output json
+    out: [out, success]
+    run: framework/testcase.cwl
diff --git a/sdk/cwl/tests/hg19/hg19.fa b/sdk/cwl/tests/hg19/hg19.fa
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hg19/hg19.fa.amb b/sdk/cwl/tests/hg19/hg19.fa.amb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hg19/hg19.fa.ann b/sdk/cwl/tests/hg19/hg19.fa.ann
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hg19/hg19.fa.fai b/sdk/cwl/tests/hg19/hg19.fa.fai
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hw.py b/sdk/cwl/tests/hw.py
new file mode 100644 (file)
index 0000000..e45bd72
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+print("Hello world")
diff --git a/sdk/cwl/tests/input/blorp.txt b/sdk/cwl/tests/input/blorp.txt
new file mode 100644 (file)
index 0000000..09fc24d
--- /dev/null
@@ -0,0 +1 @@
+blopper blubber
diff --git a/sdk/cwl/tests/keep-dir-test-input.cwl b/sdk/cwl/tests/keep-dir-test-input.cwl
new file mode 100644 (file)
index 0000000..d7061ef
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  - class: ShellCommandRequirement
+inputs:
+  indir:
+    type: Directory
+    inputBinding:
+      prefix: cd
+      position: -1
+outputs:
+  outlist:
+    type: File
+    outputBinding:
+      glob: output.txt
+arguments: [
+  {shellQuote: false, valueFrom: "&&"},
+  "find", ".",
+  {shellQuote: false, valueFrom: "|"},
+  "sort"]
+stdout: output.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/keep-dir-test-input2.cwl b/sdk/cwl/tests/keep-dir-test-input2.cwl
new file mode 100644 (file)
index 0000000..b7ae262
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  - class: ShellCommandRequirement
+inputs:
+  indir:
+    type: Directory
+    inputBinding:
+      prefix: cd
+      position: -1
+    default:
+      class: Directory
+      location: keep:d7514270f356df848477718d58308cc4+94
+outputs:
+  outlist:
+    type: File
+    outputBinding:
+      glob: output.txt
+arguments: [
+  {shellQuote: false, valueFrom: "&&"},
+  "find", ".",
+  {shellQuote: false, valueFrom: "|"},
+  "sort"]
+stdout: output.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/keep-dir-test-input3.cwl b/sdk/cwl/tests/keep-dir-test-input3.cwl
new file mode 100644 (file)
index 0000000..71f705e
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  - class: ShellCommandRequirement
+inputs:
+  indir:
+    type: Directory
+    inputBinding:
+      prefix: cd
+      position: -1
+    default:
+      class: Directory
+      location: keep:d7514270f356df848477718d58308cc4+94/
+outputs:
+  outlist:
+    type: File
+    outputBinding:
+      glob: output.txt
+arguments: [
+  {shellQuote: false, valueFrom: "&&"},
+  "find", ".",
+  {shellQuote: false, valueFrom: "|"},
+  "sort"]
+stdout: output.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/listing-job.yml b/sdk/cwl/tests/listing-job.yml
new file mode 100644 (file)
index 0000000..ad127f4
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+d:
+  class: Directory
+  location: tmp1
\ No newline at end of file
diff --git a/sdk/cwl/tests/makes_intermediates/echo.cwl b/sdk/cwl/tests/makes_intermediates/echo.cwl
new file mode 100644 (file)
index 0000000..5449bc3
--- /dev/null
@@ -0,0 +1,14 @@
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - $(inputs.inp1)
+      - $(inputs.inp2)
+      - $(inputs.inp3)
+inputs:
+  inp1: File
+  inp2: [File, Directory]
+  inp3: Directory
+outputs: []
+arguments: [echo, $(inputs.inp1), $(inputs.inp2), $(inputs.inp3)]
diff --git a/sdk/cwl/tests/makes_intermediates/hello1.txt b/sdk/cwl/tests/makes_intermediates/hello1.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/makes_intermediates/run_in_single.cwl b/sdk/cwl/tests/makes_intermediates/run_in_single.cwl
new file mode 100644 (file)
index 0000000..bb596b2
--- /dev/null
@@ -0,0 +1,38 @@
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+inputs:
+  inp1:
+    type: File
+    default:
+      class: File
+      location: hello1.txt
+  inp2:
+    type: [File, Directory]
+    default:
+      class: File
+      basename: "hello2.txt"
+      contents: "Hello world"
+  inp3:
+    type: [File, Directory]
+    default:
+      class: Directory
+      basename: inp3
+      listing:
+        - class: File
+          basename: "hello3.txt"
+          contents: "hello world"
+outputs: []
+steps:
+  step1:
+    requirements:
+      arv:RunInSingleContainer: {}
+    in:
+      inp1: inp1
+      inp2: inp2
+      inp3: inp3
+    out: []
+    run: subwf.cwl
diff --git a/sdk/cwl/tests/makes_intermediates/subwf.cwl b/sdk/cwl/tests/makes_intermediates/subwf.cwl
new file mode 100644 (file)
index 0000000..1852ab4
--- /dev/null
@@ -0,0 +1,15 @@
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp1: File
+  inp2: File
+  inp3: Directory
+outputs: []
+steps:
+  step1:
+    in:
+      inp1: inp1
+      inp2: inp2
+      inp3: inp3
+    out: []
+    run: echo.cwl
diff --git a/sdk/cwl/tests/matcher.py b/sdk/cwl/tests/matcher.py
new file mode 100644 (file)
index 0000000..04e67b7
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import object
+
+import difflib
+import json
+import re
+
+
+class JsonDiffMatcher(object):
+    """Raise AssertionError with a readable JSON diff when not __eq__().
+
+    Used with assert_called_with() so it's possible for a human to see
+    the differences between expected and actual call arguments that
+    include non-trivial data structures.
+    """
+    def __init__(self, expected):
+        self.expected = expected
+
+    def __eq__(self, actual):
+        expected_json = json.dumps(self.expected, sort_keys=True, indent=2)
+        actual_json = json.dumps(actual, sort_keys=True, indent=2)
+        if expected_json != actual_json:
+            raise AssertionError("".join(difflib.context_diff(
+                expected_json.splitlines(1),
+                actual_json.splitlines(1),
+                fromfile="Expected", tofile="Actual")))
+        return True
+
+
+def StripYAMLComments(yml):
+    return re.sub(r'(?ms)^(#.*?\n)*\n*', '', yml)
diff --git a/sdk/cwl/tests/mock_discovery.py b/sdk/cwl/tests/mock_discovery.py
new file mode 100644 (file)
index 0000000..9d9104f
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import json
+import arvados
+
+_rootDesc = None
+
+def get_rootDesc():
+    global _rootDesc
+    if not _rootDesc:
+        try:
+            _rootDesc = arvados.api('v1')._rootDesc
+        except ValueError:
+            raise Exception("Test requires an running API server to fetch discovery document")
+    return _rootDesc
diff --git a/sdk/cwl/tests/noreuse.cwl b/sdk/cwl/tests/noreuse.cwl
new file mode 100644 (file)
index 0000000..4c95eb6
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs: []
+outputs: []
+steps:
+  step1:
+    in:
+      message:
+        default: "hello world"
+    out: [output]
+    hints:
+      arv:ReuseRequirement:
+        enableReuse: false
+    run: stdout.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/octo.yml b/sdk/cwl/tests/octo.yml
new file mode 100644 (file)
index 0000000..b4b9f72
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+inp:
+  class: File
+  location: "octothorpe/item %231.txt"
\ No newline at end of file
diff --git a/sdk/cwl/tests/octothorpe/item #1.txt b/sdk/cwl/tests/octothorpe/item #1.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/order/empty_order.json b/sdk/cwl/tests/order/empty_order.json
new file mode 100644 (file)
index 0000000..0967ef4
--- /dev/null
@@ -0,0 +1 @@
+{}
diff --git a/sdk/cwl/tests/order/inputs_test_order.json b/sdk/cwl/tests/order/inputs_test_order.json
new file mode 100644 (file)
index 0000000..8830523
--- /dev/null
@@ -0,0 +1,9 @@
+{
+    "fileInput": {
+        "class": "File",
+        "path": "../input/blorp.txt"
+    },
+    "boolInput": true,
+    "floatInput": 1.234,
+    "optionalFloatInput": null
+}
diff --git a/sdk/cwl/tests/samples/sample1_S01_R1_001.fastq.gz b/sdk/cwl/tests/samples/sample1_S01_R1_001.fastq.gz
new file mode 100644 (file)
index 0000000..e98b246
Binary files /dev/null and b/sdk/cwl/tests/samples/sample1_S01_R1_001.fastq.gz differ
diff --git a/sdk/cwl/tests/samples/sample1_S01_R3_001.fastq.gz b/sdk/cwl/tests/samples/sample1_S01_R3_001.fastq.gz
new file mode 100644 (file)
index 0000000..552c0f8
Binary files /dev/null and b/sdk/cwl/tests/samples/sample1_S01_R3_001.fastq.gz differ
diff --git a/sdk/cwl/tests/samples/sample2_S01_R1_001.fastq.gz b/sdk/cwl/tests/samples/sample2_S01_R1_001.fastq.gz
new file mode 100644 (file)
index 0000000..5ef67f5
Binary files /dev/null and b/sdk/cwl/tests/samples/sample2_S01_R1_001.fastq.gz differ
diff --git a/sdk/cwl/tests/samples/sample2_S01_R3_001.fastq.gz b/sdk/cwl/tests/samples/sample2_S01_R3_001.fastq.gz
new file mode 100644 (file)
index 0000000..43342f9
Binary files /dev/null and b/sdk/cwl/tests/samples/sample2_S01_R3_001.fastq.gz differ
diff --git a/sdk/cwl/tests/secondary/dir/hg19.fa b/sdk/cwl/tests/secondary/dir/hg19.fa
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondary/dir/hg19.fa.amb b/sdk/cwl/tests/secondary/dir/hg19.fa.amb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondary/dir/hg19.fa.ann b/sdk/cwl/tests/secondary/dir/hg19.fa.ann
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondary/dir/hg19.fa.fai b/sdk/cwl/tests/secondary/dir/hg19.fa.fai
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondary/ls.cwl b/sdk/cwl/tests/secondary/ls.cwl
new file mode 100644 (file)
index 0000000..6c49757
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  i:
+    type: File
+    inputBinding:
+      position: 1
+    secondaryFiles:
+      - .fai
+outputs: []
+arguments: [ls, $(inputs.i), $(inputs.i.path).fai]
diff --git a/sdk/cwl/tests/secondary/sub.cwl b/sdk/cwl/tests/secondary/sub.cwl
new file mode 100644 (file)
index 0000000..19e4077
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+inputs:
+  i:
+    type: File
+    secondaryFiles:
+      - .fai
+outputs: []
+steps:
+  step1:
+    in:
+      i: i
+    out: []
+    run: ls.cwl
diff --git a/sdk/cwl/tests/secondary/wf-job.yml b/sdk/cwl/tests/secondary/wf-job.yml
new file mode 100644 (file)
index 0000000..7eb6bce
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+i:
+  class: File
+  location: keep:f225e6259bdd63bc7240599648dde9f1+97/hg19.fa
diff --git a/sdk/cwl/tests/secondary/wf.cwl b/sdk/cwl/tests/secondary/wf.cwl
new file mode 100644 (file)
index 0000000..5539562
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+inputs:
+  i:
+    type: File
+    # secondaryFiles:
+    #   - .fai
+    #   - .ann
+    #   - .amb
+outputs: []
+steps:
+  step1:
+    in:
+      i: i
+    out: []
+    run: sub.cwl
+    requirements:
+      arv:RunInSingleContainer: {}
diff --git a/sdk/cwl/tests/secondaryFiles/example1.cwl b/sdk/cwl/tests/secondaryFiles/example1.cwl
new file mode 100644 (file)
index 0000000..20847d4
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  toplevel_input: File
+outputs: []
+steps:
+  step1:
+    in:
+      step_input: toplevel_input
+    out: []
+    run:
+      id: sub
+      class: CommandLineTool
+      inputs:
+        step_input:
+          type: File
+          secondaryFiles:
+            - .idx
+      outputs: []
+      baseCommand: echo
diff --git a/sdk/cwl/tests/secondaryFiles/example3.cwl b/sdk/cwl/tests/secondaryFiles/example3.cwl
new file mode 100644 (file)
index 0000000..29f58f0
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+inputs:
+  step_input:
+    type: File
+    secondaryFiles:
+      - .idx
+    default:
+      class: File
+      location: hello.txt
+outputs: []
+baseCommand: echo
diff --git a/sdk/cwl/tests/secondaryFiles/hello.txt b/sdk/cwl/tests/secondaryFiles/hello.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondaryFiles/hello.txt.idx b/sdk/cwl/tests/secondaryFiles/hello.txt.idx
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/secondaryFiles/inp3.yml b/sdk/cwl/tests/secondaryFiles/inp3.yml
new file mode 100644 (file)
index 0000000..2e61ee3
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+toplevel_input:
+  class: File
+  location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
\ No newline at end of file
diff --git a/sdk/cwl/tests/secret_test_job.yml b/sdk/cwl/tests/secret_test_job.yml
new file mode 100644 (file)
index 0000000..254ed91
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+pw: blorp
diff --git a/sdk/cwl/tests/stdout.cwl b/sdk/cwl/tests/stdout.cwl
new file mode 100644 (file)
index 0000000..2100c37
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: echo
+stdout: output.txt
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+hints:
+  arv:RuntimeConstraints:
+    outputDirType: local_output_dir
+inputs:
+  message:
+    type: string
+    inputBinding:
+      position: 1
+outputs:
+  output:
+    type: stdout
diff --git a/sdk/cwl/tests/submit_test_job.json b/sdk/cwl/tests/submit_test_job.json
new file mode 100644 (file)
index 0000000..49d5944
--- /dev/null
@@ -0,0 +1,23 @@
+{
+    "x": {
+        "class": "File",
+        "path": "input/blorp.txt"
+    },
+    "y": {
+        "class": "Directory",
+        "location": "keep:99999999999999999999999999999998+99",
+        "listing": [{
+            "class": "File",
+            "location": "keep:99999999999999999999999999999998+99/file1.txt"
+        }]
+    },
+    "z": {
+        "class": "Directory",
+        "basename": "anonymous",
+        "listing": [{
+            "basename": "renamed.txt",
+            "class": "File",
+            "location": "keep:99999999999999999999999999999998+99/file1.txt"
+        }]
+    }
+}
diff --git a/sdk/cwl/tests/submit_test_job_missing.json b/sdk/cwl/tests/submit_test_job_missing.json
new file mode 100644 (file)
index 0000000..02d61fa
--- /dev/null
@@ -0,0 +1,14 @@
+{
+    "x": {
+        "class": "File",
+        "path": "input/blorp.txt"
+    },
+    "y": {
+        "class": "Directory",
+        "location": "keep:99999999999999999999999999999998+99",
+        "listing": [{
+            "class": "File",
+            "location": "keep:99999999999999999999999999999998+99/file1.txt"
+        }]
+    }
+}
diff --git a/sdk/cwl/tests/test_container.py b/sdk/cwl/tests/test_container.py
new file mode 100644 (file)
index 0000000..1a57da3
--- /dev/null
@@ -0,0 +1,820 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import str
+from builtins import object
+
+import arvados_cwl
+import arvados_cwl.context
+import arvados_cwl.util
+from arvados_cwl.arvdocker import arv_docker_clear_cache
+import copy
+import arvados.config
+import logging
+import mock
+import unittest
+import os
+import functools
+import cwltool.process
+import cwltool.secrets
+from schema_salad.ref_resolver import Loader
+from schema_salad.sourceline import cmap
+
+from .matcher import JsonDiffMatcher
+from .mock_discovery import get_rootDesc
+
+if not os.getenv('ARVADOS_DEBUG'):
+    logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
+    logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+
+class CollectionMock(object):
+    def __init__(self, vwdmock, *args, **kwargs):
+        self.vwdmock = vwdmock
+        self.count = 0
+
+    def open(self, *args, **kwargs):
+        self.count += 1
+        return self.vwdmock.open(*args, **kwargs)
+
+    def copy(self, *args, **kwargs):
+        self.count += 1
+        self.vwdmock.copy(*args, **kwargs)
+
+    def save_new(self, *args, **kwargs):
+        pass
+
+    def __len__(self):
+        return self.count
+
+    def portable_data_hash(self):
+        if self.count == 0:
+            return arvados.config.EMPTY_BLOCK_LOCATOR
+        else:
+            return "99999999999999999999999999999996+99"
+
+
+class TestContainer(unittest.TestCase):
+
+    def helper(self, runner, enable_reuse=True):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+        loadingContext = arvados_cwl.context.ArvLoadingContext(
+            {"avsc_names": avsc_names,
+             "basedir": "",
+             "make_fs_access": make_fs_access,
+             "loader": Loader({}),
+             "metadata": {"cwlVersion": "v1.0"}})
+        runtimeContext = arvados_cwl.context.ArvRuntimeContext(
+            {"work_api": "containers",
+             "basedir": "",
+             "name": "test_run_"+str(enable_reuse),
+             "make_fs_access": make_fs_access,
+             "tmpdir": "/tmp",
+             "enable_reuse": enable_reuse,
+             "priority": 500,
+             "project_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+            })
+
+        return loadingContext, runtimeContext
+
+    # Helper function to set up the ArvCwlExecutor to use the containers api 
+    # and test that the RuntimeStatusLoggingHandler is set up correctly
+    def setup_and_test_container_executor_and_logging(self, gcc_mock) :
+        api = mock.MagicMock()
+        api._rootDesc = copy.deepcopy(get_rootDesc())
+        del api._rootDesc.get('resources')['jobs']['methods']['create']
+
+        # Make sure ArvCwlExecutor thinks it's running inside a container so it
+        # adds the logging handler that will call runtime_status_update() mock
+        self.assertFalse(gcc_mock.called)
+        runner = arvados_cwl.ArvCwlExecutor(api)
+        self.assertEqual(runner.work_api, 'containers')
+        root_logger = logging.getLogger('')
+        handlerClasses = [h.__class__ for h in root_logger.handlers]
+        self.assertTrue(arvados_cwl.RuntimeStatusLoggingHandler in handlerClasses)
+        return runner
+        
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_run(self, keepdocker):
+        for enable_reuse in (True, False):
+            arv_docker_clear_cache()
+
+            runner = mock.MagicMock()
+            runner.ignore_docker_for_reuse = False
+            runner.intermediate_output_ttl = 0
+            runner.secret_store = cwltool.secrets.SecretStore()
+
+            keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+            runner.api.collections().get().execute.return_value = {
+                "portable_data_hash": "99999999999999999999999999999993+99"}
+
+            tool = cmap({
+                "inputs": [],
+                "outputs": [],
+                "baseCommand": "ls",
+                "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+                "id": "#",
+                "class": "CommandLineTool"
+            })
+
+            loadingContext, runtimeContext = self.helper(runner, enable_reuse)
+
+            arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+            arvtool.formatgraph = None
+
+            for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+                j.run(runtimeContext)
+                runner.api.container_requests().create.assert_called_with(
+                    body=JsonDiffMatcher({
+                        'environment': {
+                            'HOME': '/var/spool/cwl',
+                            'TMPDIR': '/tmp'
+                        },
+                        'name': 'test_run_'+str(enable_reuse),
+                        'runtime_constraints': {
+                            'vcpus': 1,
+                            'ram': 1073741824
+                        },
+                        'use_existing': enable_reuse,
+                        'priority': 500,
+                        'mounts': {
+                            '/tmp': {'kind': 'tmp',
+                                     "capacity": 1073741824
+                                 },
+                            '/var/spool/cwl': {'kind': 'tmp',
+                                               "capacity": 1073741824 }
+                        },
+                        'state': 'Committed',
+                        'output_name': 'Output for step test_run_'+str(enable_reuse),
+                        'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                        'output_path': '/var/spool/cwl',
+                        'output_ttl': 0,
+                        'container_image': '99999999999999999999999999999993+99',
+                        'command': ['ls', '/var/spool/cwl'],
+                        'cwd': '/var/spool/cwl',
+                        'scheduling_parameters': {},
+                        'properties': {},
+                        'secret_mounts': {}
+                    }))
+
+    # The test passes some fields in builder.resources
+    # For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_resource_requirements(self, keepdocker):
+        arv_docker_clear_cache()
+        runner = mock.MagicMock()
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 3600
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        tool = cmap({
+            "inputs": [],
+            "outputs": [],
+            "hints": [{
+                "class": "ResourceRequirement",
+                "coresMin": 3,
+                "ramMin": 3000,
+                "tmpdirMin": 4000,
+                "outdirMin": 5000
+            }, {
+                "class": "http://arvados.org/cwl#RuntimeConstraints",
+                "keep_cache": 512
+            }, {
+                "class": "http://arvados.org/cwl#APIRequirement",
+            }, {
+                "class": "http://arvados.org/cwl#PartitionRequirement",
+                "partition": "blurb"
+            }, {
+                "class": "http://arvados.org/cwl#IntermediateOutput",
+                "outputTTL": 7200
+            }, {
+                "class": "http://arvados.org/cwl#ReuseRequirement",
+                "enableReuse": False
+            }],
+            "baseCommand": "ls",
+            "id": "#",
+            "class": "CommandLineTool"
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_resource_requirements"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+
+        call_args, call_kwargs = runner.api.container_requests().create.call_args
+
+        call_body_expected = {
+            'environment': {
+                'HOME': '/var/spool/cwl',
+                'TMPDIR': '/tmp'
+            },
+            'name': 'test_resource_requirements',
+            'runtime_constraints': {
+                'vcpus': 3,
+                'ram': 3145728000,
+                'keep_cache_ram': 536870912,
+                'API': True
+            },
+            'use_existing': False,
+            'priority': 500,
+            'mounts': {
+                '/tmp': {'kind': 'tmp',
+                         "capacity": 4194304000 },
+                '/var/spool/cwl': {'kind': 'tmp',
+                                   "capacity": 5242880000 }
+            },
+            'state': 'Committed',
+            'output_name': 'Output for step test_resource_requirements',
+            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+            'output_path': '/var/spool/cwl',
+            'output_ttl': 7200,
+            'container_image': '99999999999999999999999999999993+99',
+            'command': ['ls'],
+            'cwd': '/var/spool/cwl',
+            'scheduling_parameters': {
+                'partitions': ['blurb']
+            },
+            'properties': {},
+            'secret_mounts': {}
+        }
+
+        call_body = call_kwargs.get('body', None)
+        self.assertNotEqual(None, call_body)
+        for key in call_body:
+            self.assertEqual(call_body_expected.get(key), call_body.get(key))
+
+
+    # The test passes some fields in builder.resources
+    # For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    @mock.patch("arvados.collection.Collection")
+    def test_initial_work_dir(self, collection_mock, keepdocker):
+        arv_docker_clear_cache()
+        runner = mock.MagicMock()
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        sourcemock = mock.MagicMock()
+        def get_collection_mock(p):
+            if "/" in p:
+                return (sourcemock, p.split("/", 1)[1])
+            else:
+                return (sourcemock, "")
+        runner.fs_access.get_collection.side_effect = get_collection_mock
+
+        vwdmock = mock.MagicMock()
+        collection_mock.side_effect = lambda *args, **kwargs: CollectionMock(vwdmock, *args, **kwargs)
+
+        tool = cmap({
+            "inputs": [],
+            "outputs": [],
+            "hints": [{
+                "class": "InitialWorkDirRequirement",
+                "listing": [{
+                    "class": "File",
+                    "basename": "foo",
+                    "location": "keep:99999999999999999999999999999995+99/bar"
+                },
+                {
+                    "class": "Directory",
+                    "basename": "foo2",
+                    "location": "keep:99999999999999999999999999999995+99"
+                },
+                {
+                    "class": "File",
+                    "basename": "filename",
+                    "location": "keep:99999999999999999999999999999995+99/baz/filename"
+                },
+                {
+                    "class": "Directory",
+                    "basename": "subdir",
+                    "location": "keep:99999999999999999999999999999995+99/subdir"
+                }                        ]
+            }],
+            "baseCommand": "ls",
+            "id": "#",
+            "class": "CommandLineTool"
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_initial_work_dir"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+
+        call_args, call_kwargs = runner.api.container_requests().create.call_args
+
+        vwdmock.copy.assert_has_calls([mock.call('bar', 'foo', source_collection=sourcemock)])
+        vwdmock.copy.assert_has_calls([mock.call('', 'foo2', source_collection=sourcemock)])
+        vwdmock.copy.assert_has_calls([mock.call('baz/filename', 'filename', source_collection=sourcemock)])
+        vwdmock.copy.assert_has_calls([mock.call('subdir', 'subdir', source_collection=sourcemock)])
+
+        call_body_expected = {
+            'environment': {
+                'HOME': '/var/spool/cwl',
+                'TMPDIR': '/tmp'
+            },
+            'name': 'test_initial_work_dir',
+            'runtime_constraints': {
+                'vcpus': 1,
+                'ram': 1073741824
+            },
+            'use_existing': True,
+            'priority': 500,
+            'mounts': {
+                '/tmp': {'kind': 'tmp',
+                         "capacity": 1073741824 },
+                '/var/spool/cwl': {'kind': 'tmp',
+                                   "capacity": 1073741824 },
+                '/var/spool/cwl/foo': {
+                    'kind': 'collection',
+                    'path': 'foo',
+                    'portable_data_hash': '99999999999999999999999999999996+99'
+                },
+                '/var/spool/cwl/foo2': {
+                    'kind': 'collection',
+                    'path': 'foo2',
+                    'portable_data_hash': '99999999999999999999999999999996+99'
+                },
+                '/var/spool/cwl/filename': {
+                    'kind': 'collection',
+                    'path': 'filename',
+                    'portable_data_hash': '99999999999999999999999999999996+99'
+                },
+                '/var/spool/cwl/subdir': {
+                    'kind': 'collection',
+                    'path': 'subdir',
+                    'portable_data_hash': '99999999999999999999999999999996+99'
+                }
+            },
+            'state': 'Committed',
+            'output_name': 'Output for step test_initial_work_dir',
+            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+            'output_path': '/var/spool/cwl',
+            'output_ttl': 0,
+            'container_image': '99999999999999999999999999999993+99',
+            'command': ['ls'],
+            'cwd': '/var/spool/cwl',
+            'scheduling_parameters': {
+            },
+            'properties': {},
+            'secret_mounts': {}
+        }
+
+        call_body = call_kwargs.get('body', None)
+        self.assertNotEqual(None, call_body)
+        for key in call_body:
+            self.assertEqual(call_body_expected.get(key), call_body.get(key))
+
+
+    # Test redirecting stdin/stdout/stderr
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_redirects(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        tool = cmap({
+            "inputs": [],
+            "outputs": [],
+            "baseCommand": "ls",
+            "stdout": "stdout.txt",
+            "stderr": "stderr.txt",
+            "stdin": "/keep/99999999999999999999999999999996+99/file.txt",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+            "id": "#",
+            "class": "CommandLineTool"
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_run_redirect"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+            runner.api.container_requests().create.assert_called_with(
+                body=JsonDiffMatcher({
+                    'environment': {
+                        'HOME': '/var/spool/cwl',
+                        'TMPDIR': '/tmp'
+                    },
+                    'name': 'test_run_redirect',
+                    'runtime_constraints': {
+                        'vcpus': 1,
+                        'ram': 1073741824
+                    },
+                    'use_existing': True,
+                    'priority': 500,
+                    'mounts': {
+                        '/tmp': {'kind': 'tmp',
+                                 "capacity": 1073741824 },
+                        '/var/spool/cwl': {'kind': 'tmp',
+                                           "capacity": 1073741824 },
+                        "stderr": {
+                            "kind": "file",
+                            "path": "/var/spool/cwl/stderr.txt"
+                        },
+                        "stdin": {
+                            "kind": "collection",
+                            "path": "file.txt",
+                            "portable_data_hash": "99999999999999999999999999999996+99"
+                        },
+                        "stdout": {
+                            "kind": "file",
+                            "path": "/var/spool/cwl/stdout.txt"
+                        },
+                    },
+                    'state': 'Committed',
+                    "output_name": "Output for step test_run_redirect",
+                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                    'output_path': '/var/spool/cwl',
+                    'output_ttl': 0,
+                    'container_image': '99999999999999999999999999999993+99',
+                    'command': ['ls', '/var/spool/cwl'],
+                    'cwd': '/var/spool/cwl',
+                    'scheduling_parameters': {},
+                    'properties': {},
+                    'secret_mounts': {}
+                }))
+
+    @mock.patch("arvados.collection.Collection")
+    def test_done(self, col):
+        api = mock.MagicMock()
+
+        runner = mock.MagicMock()
+        runner.api = api
+        runner.num_retries = 0
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        runner.api.containers().get().execute.return_value = {"state":"Complete",
+                                                              "output": "abc+123",
+                                                              "exit_code": 0}
+
+        col().open.return_value = []
+
+        loadingContext, runtimeContext = self.helper(runner)
+
+        arvjob = arvados_cwl.ArvadosContainer(runner,
+                                              runtimeContext,
+                                              mock.MagicMock(),
+                                              {},
+                                              None,
+                                              [],
+                                              [],
+                                              "testjob")
+        arvjob.output_callback = mock.MagicMock()
+        arvjob.collect_outputs = mock.MagicMock()
+        arvjob.successCodes = [0]
+        arvjob.outdir = "/var/spool/cwl"
+        arvjob.output_ttl = 3600
+
+        arvjob.collect_outputs.return_value = {"out": "stuff"}
+
+        arvjob.done({
+            "state": "Final",
+            "log_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz1",
+            "output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
+            "uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
+            "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
+            "modified_at": "2017-05-26T12:01:22Z"
+        })
+
+        self.assertFalse(api.collections().create.called)
+        self.assertFalse(runner.runtime_status_error.called)
+
+        arvjob.collect_outputs.assert_called_with("keep:abc+123")
+        arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
+        runner.add_intermediate_output.assert_called_with("zzzzz-4zz18-zzzzzzzzzzzzzz2")
+
+    # Test to make sure we dont call runtime_status_update if we already did
+    # some where higher up in the call stack
+    @mock.patch("arvados_cwl.util.get_current_container")
+    def test_recursive_runtime_status_update(self, gcc_mock):
+        self.setup_and_test_container_executor_and_logging(gcc_mock)
+        root_logger = logging.getLogger('')
+
+        # get_current_container is invoked when we call runtime_status_update
+        # so try and log again!
+        gcc_mock.side_effect = lambda *args: root_logger.error("Second Error")
+        try: 
+            root_logger.error("First Error")
+        except RuntimeError: 
+            self.fail("RuntimeStatusLoggingHandler should not be called recursively")
+
+    @mock.patch("arvados_cwl.ArvCwlExecutor.runtime_status_update")
+    @mock.patch("arvados_cwl.util.get_current_container")
+    @mock.patch("arvados.collection.CollectionReader")
+    @mock.patch("arvados.collection.Collection")
+    def test_child_failure(self, col, reader, gcc_mock, rts_mock):
+        runner = self.setup_and_test_container_executor_and_logging(gcc_mock)
+        
+        gcc_mock.return_value = {"uuid" : "zzzzz-dz642-zzzzzzzzzzzzzzz"}
+        self.assertTrue(gcc_mock.called)
+
+        runner.num_retries = 0
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+        runner.label = mock.MagicMock()
+        runner.label.return_value = '[container testjob]'
+
+        runner.api.containers().get().execute.return_value = {
+            "state":"Complete",
+            "output": "abc+123",
+            "exit_code": 1,
+            "log": "def+234"
+        }
+
+        col().open.return_value = []
+
+        loadingContext, runtimeContext = self.helper(runner)
+
+        arvjob = arvados_cwl.ArvadosContainer(runner,
+                                              runtimeContext,
+                                              mock.MagicMock(),
+                                              {},
+                                              None,
+                                              [],
+                                              [],
+                                              "testjob")
+        arvjob.output_callback = mock.MagicMock()
+        arvjob.collect_outputs = mock.MagicMock()
+        arvjob.successCodes = [0]
+        arvjob.outdir = "/var/spool/cwl"
+        arvjob.output_ttl = 3600
+        arvjob.collect_outputs.return_value = {"out": "stuff"}
+
+        arvjob.done({
+            "state": "Final",
+            "log_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz1",
+            "output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
+            "uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
+            "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
+            "modified_at": "2017-05-26T12:01:22Z"
+        })
+
+        rts_mock.assert_called_with(
+            'error',
+            'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
+            '  ** log is empty **'
+        )
+        arvjob.output_callback.assert_called_with({"out": "stuff"}, "permanentFail")
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_mounts(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999994+99",
+            "manifest_text": ". 99999999999999999999999999999994+99 0:0:file1 0:0:file2"}
+
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        tool = cmap({
+            "inputs": [
+                {"id": "p1",
+                 "type": "Directory"}
+            ],
+            "outputs": [],
+            "baseCommand": "ls",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+            "id": "#",
+            "class": "CommandLineTool"
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_run_mounts"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        job_order = {
+            "p1": {
+                "class": "Directory",
+                "location": "keep:99999999999999999999999999999994+44",
+                "listing": [
+                    {
+                        "class": "File",
+                        "location": "keep:99999999999999999999999999999994+44/file1",
+                    },
+                    {
+                        "class": "File",
+                        "location": "keep:99999999999999999999999999999994+44/file2",
+                    }
+                ]
+            }
+        }
+        for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+            runner.api.container_requests().create.assert_called_with(
+                body=JsonDiffMatcher({
+                    'environment': {
+                        'HOME': '/var/spool/cwl',
+                        'TMPDIR': '/tmp'
+                    },
+                    'name': 'test_run_mounts',
+                    'runtime_constraints': {
+                        'vcpus': 1,
+                        'ram': 1073741824
+                    },
+                    'use_existing': True,
+                    'priority': 500,
+                    'mounts': {
+                        "/keep/99999999999999999999999999999994+44": {
+                            "kind": "collection",
+                            "portable_data_hash": "99999999999999999999999999999994+44"
+                        },
+                        '/tmp': {'kind': 'tmp',
+                                 "capacity": 1073741824 },
+                        '/var/spool/cwl': {'kind': 'tmp',
+                                           "capacity": 1073741824 }
+                    },
+                    'state': 'Committed',
+                    'output_name': 'Output for step test_run_mounts',
+                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                    'output_path': '/var/spool/cwl',
+                    'output_ttl': 0,
+                    'container_image': '99999999999999999999999999999994+99',
+                    'command': ['ls', '/var/spool/cwl'],
+                    'cwd': '/var/spool/cwl',
+                    'scheduling_parameters': {},
+                    'properties': {},
+                    'secret_mounts': {}
+                }))
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_secrets(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        tool = cmap({"arguments": ["md5sum", "example.conf"],
+                     "class": "CommandLineTool",
+                     "hints": [
+                         {
+                             "class": "http://commonwl.org/cwltool#Secrets",
+                             "secrets": [
+                                 "#secret_job.cwl/pw"
+                             ]
+                         }
+                     ],
+                     "id": "#secret_job.cwl",
+                     "inputs": [
+                         {
+                             "id": "#secret_job.cwl/pw",
+                             "type": "string"
+                         }
+                     ],
+                     "outputs": [
+                     ],
+                     "requirements": [
+                         {
+                             "class": "InitialWorkDirRequirement",
+                             "listing": [
+                                 {
+                                     "entry": "username: user\npassword: $(inputs.pw)\n",
+                                     "entryname": "example.conf"
+                                 }
+                             ]
+                         }
+                     ]})
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_secrets"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+
+        job_order = {"pw": "blorp"}
+        runner.secret_store.store(["pw"], job_order)
+
+        for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+            runner.api.container_requests().create.assert_called_with(
+                body=JsonDiffMatcher({
+                    'environment': {
+                        'HOME': '/var/spool/cwl',
+                        'TMPDIR': '/tmp'
+                    },
+                    'name': 'test_secrets',
+                    'runtime_constraints': {
+                        'vcpus': 1,
+                        'ram': 1073741824
+                    },
+                    'use_existing': True,
+                    'priority': 500,
+                    'mounts': {
+                        '/tmp': {'kind': 'tmp',
+                                 "capacity": 1073741824
+                             },
+                        '/var/spool/cwl': {'kind': 'tmp',
+                                           "capacity": 1073741824 }
+                    },
+                    'state': 'Committed',
+                    'output_name': 'Output for step test_secrets',
+                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                    'output_path': '/var/spool/cwl',
+                    'output_ttl': 0,
+                    'container_image': '99999999999999999999999999999993+99',
+                    'command': ['md5sum', 'example.conf'],
+                    'cwd': '/var/spool/cwl',
+                    'scheduling_parameters': {},
+                    'properties': {},
+                    "secret_mounts": {
+                        "/var/spool/cwl/example.conf": {
+                            "content": "username: user\npassword: blorp\n",
+                            "kind": "text"
+                        }
+                    }
+                }))
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_timelimit(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        tool = cmap({
+            "inputs": [],
+            "outputs": [],
+            "baseCommand": "ls",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+            "id": "#",
+            "class": "CommandLineTool",
+            "hints": [
+                {
+                    "class": "http://commonwl.org/cwltool#TimeLimit",
+                    "timelimit": 42
+                }
+            ]
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_timelimit"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+
+        _, kwargs = runner.api.container_requests().create.call_args
+        self.assertEqual(42, kwargs['body']['scheduling_parameters'].get('max_run_time'))
diff --git a/sdk/cwl/tests/test_fsaccess.py b/sdk/cwl/tests/test_fsaccess.py
new file mode 100644 (file)
index 0000000..f83612a
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import functools
+import mock
+import sys
+import unittest
+import json
+import logging
+import os
+
+import arvados
+import arvados.keep
+import arvados.collection
+import arvados_cwl
+
+from cwltool.pathmapper import MapperEnt
+from .mock_discovery import get_rootDesc
+
+from arvados_cwl.fsaccess import CollectionCache
+
+class TestFsAccess(unittest.TestCase):
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_collection_cache(self, cr):
+        cache = CollectionCache(mock.MagicMock(), mock.MagicMock(), 4)
+        c1 = cache.get("99999999999999999999999999999991+99")
+        c2 = cache.get("99999999999999999999999999999991+99")
+        self.assertIs(c1, c2)
+        self.assertEqual(1, cr.call_count)
+        c3 = cache.get("99999999999999999999999999999992+99")
+        self.assertEqual(2, cr.call_count)
+
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_collection_cache_limit(self, cr):
+        cache = CollectionCache(mock.MagicMock(), mock.MagicMock(), 4)
+        cr().manifest_text.return_value = 'x' * 524289
+        self.assertEqual(0, cache.total)
+        c1 = cache.get("99999999999999999999999999999991+524289")
+        self.assertIn("99999999999999999999999999999991+524289", cache.collections)
+        self.assertNotIn("99999999999999999999999999999992+524289", cache.collections)
+        self.assertEqual((524289*128)*1, cache.total)
+
+        c2 = cache.get("99999999999999999999999999999992+524289")
+        self.assertIn("99999999999999999999999999999991+524289", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524289", cache.collections)
+        self.assertEqual((524289*128)*2, cache.total)
+
+        c1 = cache.get("99999999999999999999999999999991+524289")
+        self.assertIn("99999999999999999999999999999991+524289", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524289", cache.collections)
+        self.assertEqual((524289*128)*2, cache.total)
+
+        c3 = cache.get("99999999999999999999999999999993+524289")
+        self.assertIn("99999999999999999999999999999991+524289", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524289", cache.collections)
+        self.assertEqual((524289*128)*3, cache.total)
+
+        c4 = cache.get("99999999999999999999999999999994+524289")
+        self.assertIn("99999999999999999999999999999991+524289", cache.collections)
+        self.assertNotIn("99999999999999999999999999999992+524289", cache.collections)
+        self.assertEqual((524289*128)*3, cache.total)
+
+        c5 = cache.get("99999999999999999999999999999995+524289")
+        self.assertNotIn("99999999999999999999999999999991+524289", cache.collections)
+        self.assertNotIn("99999999999999999999999999999992+524289", cache.collections)
+        self.assertEqual((524289*128)*3, cache.total)
+
+
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_collection_cache_limit2(self, cr):
+        cache = CollectionCache(mock.MagicMock(), mock.MagicMock(), 4)
+        cr().manifest_text.return_value = 'x' * 524287
+        self.assertEqual(0, cache.total)
+        c1 = cache.get("99999999999999999999999999999991+524287")
+        self.assertIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertNotIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*1, cache.total)
+
+        c2 = cache.get("99999999999999999999999999999992+524287")
+        self.assertIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*2, cache.total)
+
+        c1 = cache.get("99999999999999999999999999999991+524287")
+        self.assertIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*2, cache.total)
+
+        c3 = cache.get("99999999999999999999999999999993+524287")
+        self.assertIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*3, cache.total)
+
+        c4 = cache.get("99999999999999999999999999999994+524287")
+        self.assertIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*4, cache.total)
+
+        c5 = cache.get("99999999999999999999999999999995+524287")
+        self.assertIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertNotIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*4, cache.total)
+
+        c6 = cache.get("99999999999999999999999999999996+524287")
+        self.assertNotIn("99999999999999999999999999999991+524287", cache.collections)
+        self.assertNotIn("99999999999999999999999999999992+524287", cache.collections)
+        self.assertEqual((524287*128)*4, cache.total)
diff --git a/sdk/cwl/tests/test_http.py b/sdk/cwl/tests/test_http.py
new file mode 100644 (file)
index 0000000..4119fee
--- /dev/null
@@ -0,0 +1,289 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+
+import copy
+import io
+import functools
+import hashlib
+import json
+import logging
+import mock
+import sys
+import unittest
+import datetime
+
+import arvados
+import arvados.collection
+import arvados_cwl
+import arvados_cwl.runner
+import arvados.keep
+
+from .matcher import JsonDiffMatcher, StripYAMLComments
+from .mock_discovery import get_rootDesc
+
+import arvados_cwl.http
+
+import ruamel.yaml as yaml
+
+
+class TestHttpToKeep(unittest.TestCase):
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.Collection")
+    def test_http_get(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": []
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
+
+        cm.open.assert_called_with("file1.txt", "wb")
+        cm.save_new.assert_called_with(name="Downloaded from http://example.com/file1.txt",
+                                       owner_uuid=None, ensure_unique_name=True)
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+        ])
+
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_http_expires(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Expires': 'Tue, 17 May 2018 00:00:00 GMT'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 16)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_not_called()
+
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_http_cache_control(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Cache-Control': 'max-age=172800'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 16)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_not_called()
+
+
+    @mock.patch("requests.get")
+    @mock.patch("requests.head")
+    @mock.patch("arvados.collection.Collection")
+    def test_http_expired(self, collectionmock, headmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Expires': 'Tue, 16 May 2018 00:00:00 GMT'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz4"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999997+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}
+        req.iter_content.return_value = ["def"]
+        getmock.return_value = req
+        headmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999997+99/file1.txt")
+
+        getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
+
+        cm.open.assert_called_with("file1.txt", "wb")
+        cm.save_new.assert_called_with(name="Downloaded from http://example.com/file1.txt",
+                                       owner_uuid=None, ensure_unique_name=True)
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}}}})
+        ])
+
+
+    @mock.patch("requests.get")
+    @mock.patch("requests.head")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_http_etag(self, collectionmock, headmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": [{
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "properties": {
+                    'http://example.com/file1.txt': {
+                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',
+                        'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
+                        'ETag': '123456'
+                    }
+                }
+            }]
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        cm.keys.return_value = ["file1.txt"]
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {
+            'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+            'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+            'ETag': '123456'
+        }
+        headmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 17)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_not_called()
+        cm.open.assert_not_called()
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {'http://example.com/file1.txt': {
+                          'Date': 'Tue, 17 May 2018 00:00:00 GMT',
+                          'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
+                          'ETag': '123456'
+                      }}}})
+                      ])
+
+    @mock.patch("requests.get")
+    @mock.patch("arvados.collection.Collection")
+    def test_http_content_disp(self, collectionmock, getmock):
+        api = mock.MagicMock()
+
+        api.collections().list().execute.return_value = {
+            "items": []
+        }
+
+        cm = mock.MagicMock()
+        cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
+        cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
+        collectionmock.return_value = cm
+
+        req = mock.MagicMock()
+        req.status_code = 200
+        req.headers = {"Content-Disposition": "attachment; filename=file1.txt"}
+        req.iter_content.return_value = ["abc"]
+        getmock.return_value = req
+
+        utcnow = mock.MagicMock()
+        utcnow.return_value = datetime.datetime(2018, 5, 15)
+
+        r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
+        self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
+
+        getmock.assert_called_with("http://example.com/download?fn=/file1.txt", stream=True, allow_redirects=True)
+
+        cm.open.assert_called_with("file1.txt", "wb")
+        cm.save_new.assert_called_with(name="Downloaded from http://example.com/download?fn=/file1.txt",
+                                       owner_uuid=None, ensure_unique_name=True)
+
+        api.collections().update.assert_has_calls([
+            mock.call(uuid=cm.manifest_locator(),
+                      body={"collection":{"properties": {"http://example.com/download?fn=/file1.txt": {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
+        ])
diff --git a/sdk/cwl/tests/test_job.py b/sdk/cwl/tests/test_job.py
new file mode 100644 (file)
index 0000000..022d75b
--- /dev/null
@@ -0,0 +1,530 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from builtins import next
+
+import functools
+import json
+import logging
+import mock
+import os
+import unittest
+import copy
+import io
+
+import arvados
+import arvados_cwl
+import arvados_cwl.executor
+import cwltool.process
+from arvados.errors import ApiError
+from schema_salad.ref_resolver import Loader
+from schema_salad.sourceline import cmap
+from .mock_discovery import get_rootDesc
+from .matcher import JsonDiffMatcher, StripYAMLComments
+from .test_container import CollectionMock
+
+if not os.getenv('ARVADOS_DEBUG'):
+    logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
+    logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+
+class TestJob(unittest.TestCase):
+
+    def helper(self, runner, enable_reuse=True):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+        loadingContext = arvados_cwl.context.ArvLoadingContext(
+            {"avsc_names": avsc_names,
+             "basedir": "",
+             "make_fs_access": make_fs_access,
+             "loader": Loader({}),
+             "metadata": {"cwlVersion": "v1.0"},
+             "makeTool": runner.arv_make_tool})
+        runtimeContext = arvados_cwl.context.ArvRuntimeContext(
+            {"work_api": "jobs",
+             "basedir": "",
+             "name": "test_run_job_"+str(enable_reuse),
+             "make_fs_access": make_fs_access,
+             "enable_reuse": enable_reuse,
+             "priority": 500})
+
+        return loadingContext, runtimeContext
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+    def test_run(self, list_images_in_arv):
+        for enable_reuse in (True, False):
+            runner = mock.MagicMock()
+            runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+            runner.ignore_docker_for_reuse = False
+            runner.num_retries = 0
+
+            list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+            runner.api.collections().get().execute.return_value = {"portable_data_hash": "99999999999999999999999999999993+99"}
+            # Simulate reused job from another project so that we can check is a can_read
+            # link is added.
+            runner.api.jobs().create().execute.return_value = {
+                'state': 'Complete' if enable_reuse else 'Queued',
+                'owner_uuid': 'zzzzz-tpzed-yyyyyyyyyyyyyyy' if enable_reuse else 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                'uuid': 'zzzzz-819sb-yyyyyyyyyyyyyyy',
+                'output': None,
+            }
+
+            tool = cmap({
+                "inputs": [],
+                "outputs": [],
+                "baseCommand": "ls",
+                "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+                "id": "#",
+                "class": "CommandLineTool"
+            })
+
+            loadingContext, runtimeContext = self.helper(runner, enable_reuse)
+
+            arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+            arvtool.formatgraph = None
+            for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+                j.run(runtimeContext)
+                runner.api.jobs().create.assert_called_with(
+                    body=JsonDiffMatcher({
+                        'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                        'runtime_constraints': {},
+                        'script_parameters': {
+                            'tasks': [{
+                                'task.env': {'HOME': '$(task.outdir)', 'TMPDIR': '$(task.tmpdir)'},
+                                'command': ['ls', '$(task.outdir)']
+                            }],
+                        },
+                        'script_version': 'master',
+                        'minimum_script_version': 'a3f2cb186e437bfce0031b024b2157b73ed2717d',
+                        'repository': 'arvados',
+                        'script': 'crunchrunner',
+                        'runtime_constraints': {
+                            'docker_image': 'arvados/jobs',
+                            'min_cores_per_node': 1,
+                            'min_ram_mb_per_node': 1024,
+                            'min_scratch_mb_per_node': 2048 # tmpdirSize + outdirSize
+                        }
+                    }),
+                    find_or_create=enable_reuse,
+                    filters=[['repository', '=', 'arvados'],
+                             ['script', '=', 'crunchrunner'],
+                             ['script_version', 'in git', 'a3f2cb186e437bfce0031b024b2157b73ed2717d'],
+                             ['docker_image_locator', 'in docker', 'arvados/jobs']]
+                )
+                if enable_reuse:
+                    runner.api.links().create.assert_called_with(
+                        body=JsonDiffMatcher({
+                            'link_class': 'permission',
+                            'name': 'can_read',
+                            "tail_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
+                            "head_uuid": "zzzzz-819sb-yyyyyyyyyyyyyyy",
+                        })
+                    )
+                    # Simulate an API excepction when trying to create a
+                    # sharing link on the job
+                    runner.api.links().create.side_effect = ApiError(
+                        mock.MagicMock(return_value={'status': 403}),
+                        bytes(b'Permission denied'))
+                    j.run(runtimeContext)
+                else:
+                    assert not runner.api.links().create.called
+
+    # The test passes some fields in builder.resources
+    # For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+    def test_resource_requirements(self, list_images_in_arv):
+        runner = mock.MagicMock()
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        runner.num_retries = 0
+        arvados_cwl.add_arv_hints()
+
+        list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+        runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
+
+        tool = {
+            "inputs": [],
+            "outputs": [],
+            "hints": [{
+                "class": "ResourceRequirement",
+                "coresMin": 3,
+                "ramMin": 3000,
+                "tmpdirMin": 4000
+            }, {
+                "class": "http://arvados.org/cwl#RuntimeConstraints",
+                "keep_cache": 512,
+                "outputDirType": "keep_output_dir"
+            }, {
+                "class": "http://arvados.org/cwl#APIRequirement",
+            },
+            {
+                "class": "http://arvados.org/cwl#ReuseRequirement",
+                "enableReuse": False
+            }],
+            "baseCommand": "ls",
+            "id": "#",
+            "class": "CommandLineTool"
+        }
+
+        loadingContext, runtimeContext = self.helper(runner)
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+        runner.api.jobs().create.assert_called_with(
+            body=JsonDiffMatcher({
+                'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                'runtime_constraints': {},
+                'script_parameters': {
+                    'tasks': [{
+                        'task.env': {'HOME': '$(task.outdir)', 'TMPDIR': '$(task.tmpdir)'},
+                        'task.keepTmpOutput': True,
+                        'command': ['ls']
+                    }]
+            },
+            'script_version': 'master',
+                'minimum_script_version': 'a3f2cb186e437bfce0031b024b2157b73ed2717d',
+                'repository': 'arvados',
+                'script': 'crunchrunner',
+                'runtime_constraints': {
+                    'docker_image': 'arvados/jobs',
+                    'min_cores_per_node': 3,
+                    'min_ram_mb_per_node': 3512,     # ramMin + keep_cache
+                    'min_scratch_mb_per_node': 5024, # tmpdirSize + outdirSize
+                    'keep_cache_mb_per_task': 512
+                }
+            }),
+            find_or_create=False,
+            filters=[['repository', '=', 'arvados'],
+                     ['script', '=', 'crunchrunner'],
+                     ['script_version', 'in git', 'a3f2cb186e437bfce0031b024b2157b73ed2717d'],
+                     ['docker_image_locator', 'in docker', 'arvados/jobs']])
+
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_done(self, reader):
+        api = mock.MagicMock()
+
+        runner = mock.MagicMock()
+        runner.api = api
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.num_retries = 0
+        runner.ignore_docker_for_reuse = False
+
+        reader().keys.return_value = "log.txt"
+        reader().open.return_value = io.StringIO(
+            str(u"""2016-11-02_23:12:18 c97qk-8i9sb-cryqw2blvzy4yaj 13358 0 stderr 2016/11/02 23:12:18 crunchrunner: $(task.tmpdir)=/tmp/crunch-job-task-work/compute3.1/tmpdir
+2016-11-02_23:12:18 c97qk-8i9sb-cryqw2blvzy4yaj 13358 0 stderr 2016/11/02 23:12:18 crunchrunner: $(task.outdir)=/tmp/crunch-job-task-work/compute3.1/outdir
+2016-11-02_23:12:18 c97qk-8i9sb-cryqw2blvzy4yaj 13358 0 stderr 2016/11/02 23:12:18 crunchrunner: $(task.keep)=/keep
+        """))
+        api.collections().list().execute.side_effect = ({"items": []},
+                                                        {"items": [{"manifest_text": "XYZ"}]},
+                                                        {"items": []},
+                                                        {"items": [{"manifest_text": "ABC"}]})
+
+        arvjob = arvados_cwl.ArvadosJob(runner,
+                                        mock.MagicMock(),
+                                        {},
+                                        None,
+                                        [],
+                                        [],
+                                        "testjob")
+        arvjob.output_callback = mock.MagicMock()
+        arvjob.collect_outputs = mock.MagicMock()
+        arvjob.collect_outputs.return_value = {"out": "stuff"}
+
+        arvjob.done({
+            "state": "Complete",
+            "output": "99999999999999999999999999999993+99",
+            "log": "99999999999999999999999999999994+99",
+            "uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        })
+
+        api.collections().list.assert_has_calls([
+            mock.call(),
+            # Output collection check
+            mock.call(filters=[['owner_uuid', '=', 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'],
+                          ['portable_data_hash', '=', '99999999999999999999999999999993+99'],
+                          ['name', '=', 'Output 9999999 of testjob']]),
+            mock.call().execute(num_retries=0),
+            mock.call(limit=1, filters=[['portable_data_hash', '=', '99999999999999999999999999999993+99']],
+                 select=['manifest_text']),
+            mock.call().execute(num_retries=0),
+            # Log collection's turn
+            mock.call(filters=[['owner_uuid', '=', 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'],
+                          ['portable_data_hash', '=', '99999999999999999999999999999994+99'],
+                          ['name', '=', 'Log of zzzzz-8i9sb-zzzzzzzzzzzzzzz']]),
+            mock.call().execute(num_retries=0),
+            mock.call(limit=1, filters=[['portable_data_hash', '=', '99999999999999999999999999999994+99']],
+                 select=['manifest_text']),
+            mock.call().execute(num_retries=0)])
+
+        api.collections().create.assert_has_calls([
+            mock.call(ensure_unique_name=True,
+                      body={'portable_data_hash': '99999999999999999999999999999993+99',
+                            'manifest_text': 'XYZ',
+                            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                            'name': 'Output 9999999 of testjob'}),
+            mock.call().execute(num_retries=0),
+            mock.call(ensure_unique_name=True,
+                      body={'portable_data_hash': '99999999999999999999999999999994+99',
+                            'manifest_text': 'ABC',
+                            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+                            'name': 'Log of zzzzz-8i9sb-zzzzzzzzzzzzzzz'}),
+            mock.call().execute(num_retries=0),
+        ])
+
+        arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
+
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_done_use_existing_collection(self, reader):
+        api = mock.MagicMock()
+
+        runner = mock.MagicMock()
+        runner.api = api
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.num_retries = 0
+
+        reader().keys.return_value = "log.txt"
+        reader().open.return_value = io.StringIO(
+            str(u"""2016-11-02_23:12:18 c97qk-8i9sb-cryqw2blvzy4yaj 13358 0 stderr 2016/11/02 23:12:18 crunchrunner: $(task.tmpdir)=/tmp/crunch-job-task-work/compute3.1/tmpdir
+2016-11-02_23:12:18 c97qk-8i9sb-cryqw2blvzy4yaj 13358 0 stderr 2016/11/02 23:12:18 crunchrunner: $(task.outdir)=/tmp/crunch-job-task-work/compute3.1/outdir
+2016-11-02_23:12:18 c97qk-8i9sb-cryqw2blvzy4yaj 13358 0 stderr 2016/11/02 23:12:18 crunchrunner: $(task.keep)=/keep
+        """))
+
+        api.collections().list().execute.side_effect = (
+            {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2"}]},
+            {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2"}]},
+        )
+
+        arvjob = arvados_cwl.ArvadosJob(runner,
+                                        mock.MagicMock(),
+                                        {},
+                                        None,
+                                        [],
+                                        [],
+                                        "testjob")
+        arvjob.output_callback = mock.MagicMock()
+        arvjob.collect_outputs = mock.MagicMock()
+        arvjob.collect_outputs.return_value = {"out": "stuff"}
+
+        arvjob.done({
+            "state": "Complete",
+            "output": "99999999999999999999999999999993+99",
+            "log": "99999999999999999999999999999994+99",
+            "uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        })
+
+        api.collections().list.assert_has_calls([
+            mock.call(),
+            # Output collection
+            mock.call(filters=[['owner_uuid', '=', 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'],
+                               ['portable_data_hash', '=', '99999999999999999999999999999993+99'],
+                               ['name', '=', 'Output 9999999 of testjob']]),
+            mock.call().execute(num_retries=0),
+            # Log collection
+            mock.call(filters=[['owner_uuid', '=', 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'],
+                               ['portable_data_hash', '=', '99999999999999999999999999999994+99'],
+                               ['name', '=', 'Log of zzzzz-8i9sb-zzzzzzzzzzzzzzz']]),
+            mock.call().execute(num_retries=0)
+        ])
+
+        self.assertFalse(api.collections().create.called)
+
+        arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
+
+
+class TestWorkflow(unittest.TestCase):
+    def helper(self, runner, enable_reuse=True):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+
+        document_loader.fetcher_constructor = functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=make_fs_access(""))
+        document_loader.fetcher = document_loader.fetcher_constructor(document_loader.cache, document_loader.session)
+        document_loader.fetch_text = document_loader.fetcher.fetch_text
+        document_loader.check_exists = document_loader.fetcher.check_exists
+
+        loadingContext = arvados_cwl.context.ArvLoadingContext(
+            {"avsc_names": avsc_names,
+             "basedir": "",
+             "make_fs_access": make_fs_access,
+             "loader": document_loader,
+             "metadata": {"cwlVersion": "v1.0"},
+             "construct_tool_object": runner.arv_make_tool})
+        runtimeContext = arvados_cwl.context.ArvRuntimeContext(
+            {"work_api": "jobs",
+             "basedir": "",
+             "name": "test_run_wf_"+str(enable_reuse),
+             "make_fs_access": make_fs_access,
+             "enable_reuse": enable_reuse,
+             "priority": 500})
+
+        return loadingContext, runtimeContext
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.collection.CollectionReader")
+    @mock.patch("arvados.collection.Collection")
+    @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+    def test_run(self, list_images_in_arv, mockcollection, mockcollectionreader):
+        arvados_cwl.add_arv_hints()
+
+        api = mock.MagicMock()
+        api._rootDesc = get_rootDesc()
+
+        runner = arvados_cwl.executor.ArvCwlExecutor(api)
+        self.assertEqual(runner.work_api, 'jobs')
+
+        list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+        runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
+        runner.api.collections().list().execute.return_vaulue = {"items": [{"portable_data_hash": "99999999999999999999999999999993+99"}]}
+
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        runner.num_retries = 0
+
+        loadingContext, runtimeContext = self.helper(runner)
+
+        tool, metadata = loadingContext.loader.resolve_ref("tests/wf/scatter2.cwl")
+        metadata["cwlVersion"] = tool["cwlVersion"]
+
+        mockc = mock.MagicMock()
+        mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mockc, *args, **kwargs)
+        mockcollectionreader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "token.txt")
+
+        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        it = arvtool.job({}, mock.MagicMock(), runtimeContext)
+
+        next(it).run(runtimeContext)
+        next(it).run(runtimeContext)
+
+        with open("tests/wf/scatter2_subwf.cwl") as f:
+            subwf = StripYAMLComments(f.read())
+
+        runner.api.jobs().create.assert_called_with(
+            body=JsonDiffMatcher({
+                'minimum_script_version': 'a3f2cb186e437bfce0031b024b2157b73ed2717d',
+                'repository': 'arvados',
+                'script_version': 'master',
+                'script': 'crunchrunner',
+                'script_parameters': {
+                    'tasks': [{'task.env': {
+                        'HOME': '$(task.outdir)',
+                        'TMPDIR': '$(task.tmpdir)'},
+                               'task.vwd': {
+                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999996+99/workflow.cwl',
+                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999996+99/cwl.input.yml'
+                               },
+                    'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
+                    'task.stdout': 'cwl.output.json'}]},
+                'runtime_constraints': {
+                    'min_scratch_mb_per_node': 2048,
+                    'min_cores_per_node': 1,
+                    'docker_image': 'arvados/jobs',
+                    'min_ram_mb_per_node': 1024
+                },
+                'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'}),
+            filters=[['repository', '=', 'arvados'],
+                     ['script', '=', 'crunchrunner'],
+                     ['script_version', 'in git', 'a3f2cb186e437bfce0031b024b2157b73ed2717d'],
+                     ['docker_image_locator', 'in docker', 'arvados/jobs']],
+            find_or_create=True)
+
+        mockc.open().__enter__().write.assert_has_calls([mock.call(subwf)])
+        mockc.open().__enter__().write.assert_has_calls([mock.call(
+bytes(b'''{
+  "fileblub": {
+    "basename": "token.txt",
+    "class": "File",
+    "location": "/keep/99999999999999999999999999999999+118/token.txt",
+    "size": 0
+  },
+  "sleeptime": 5
+}'''))])
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.collection.CollectionReader")
+    @mock.patch("arvados.collection.Collection")
+    @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+    def test_overall_resource_singlecontainer(self, list_images_in_arv, mockcollection, mockcollectionreader):
+        arvados_cwl.add_arv_hints()
+
+        api = mock.MagicMock()
+        api._rootDesc = get_rootDesc()
+
+        runner = arvados_cwl.executor.ArvCwlExecutor(api)
+        self.assertEqual(runner.work_api, 'jobs')
+
+        list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+        runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
+        runner.api.collections().list().execute.return_vaulue = {"items": [{"portable_data_hash": "99999999999999999999999999999993+99"}]}
+
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        runner.num_retries = 0
+
+        loadingContext, runtimeContext = self.helper(runner)
+
+        tool, metadata = loadingContext.loader.resolve_ref("tests/wf/echo-wf.cwl")
+        metadata["cwlVersion"] = tool["cwlVersion"]
+
+        mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mock.MagicMock(), *args, **kwargs)
+
+        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+        it = arvtool.job({}, mock.MagicMock(), runtimeContext)
+        
+        next(it).run(runtimeContext)
+        next(it).run(runtimeContext)
+
+        with open("tests/wf/echo-subwf.cwl") as f:
+            subwf = StripYAMLComments(f.read())
+
+        runner.api.jobs().create.assert_called_with(
+            body=JsonDiffMatcher({
+                'minimum_script_version': 'a3f2cb186e437bfce0031b024b2157b73ed2717d',
+                'repository': 'arvados',
+                'script_version': 'master',
+                'script': 'crunchrunner',
+                'script_parameters': {
+                    'tasks': [{'task.env': {
+                        'HOME': '$(task.outdir)',
+                        'TMPDIR': '$(task.tmpdir)'},
+                               'task.vwd': {
+                                   'workflow.cwl': '$(task.keep)/99999999999999999999999999999996+99/workflow.cwl',
+                                   'cwl.input.yml': '$(task.keep)/99999999999999999999999999999996+99/cwl.input.yml'
+                               },
+                    'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
+                    'task.stdout': 'cwl.output.json'}]},
+                'runtime_constraints': {
+                    'min_scratch_mb_per_node': 4096,
+                    'min_cores_per_node': 3,
+                    'docker_image': 'arvados/jobs',
+                    'min_ram_mb_per_node': 1024
+                },
+                'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'}),
+            filters=[['repository', '=', 'arvados'],
+                     ['script', '=', 'crunchrunner'],
+                     ['script_version', 'in git', 'a3f2cb186e437bfce0031b024b2157b73ed2717d'],
+                     ['docker_image_locator', 'in docker', 'arvados/jobs']],
+            find_or_create=True)
+
+    def test_default_work_api(self):
+        arvados_cwl.add_arv_hints()
+
+        api = mock.MagicMock()
+        api._rootDesc = copy.deepcopy(get_rootDesc())
+        del api._rootDesc.get('resources')['jobs']['methods']['create']
+        runner = arvados_cwl.executor.ArvCwlExecutor(api)
+        self.assertEqual(runner.work_api, 'containers')
diff --git a/sdk/cwl/tests/test_make_output.py b/sdk/cwl/tests/test_make_output.py
new file mode 100644 (file)
index 0000000..562d176
--- /dev/null
@@ -0,0 +1,83 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+
+import functools
+import json
+import logging
+import mock
+import os
+import io
+import unittest
+
+import arvados
+import arvados_cwl
+import arvados_cwl.executor
+from .mock_discovery import get_rootDesc
+
+class TestMakeOutput(unittest.TestCase):
+    def setUp(self):
+        self.api = mock.MagicMock()
+        self.api._rootDesc = get_rootDesc()
+
+    @mock.patch("arvados.collection.Collection")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_make_output_collection(self, reader, col):
+        keep_client = mock.MagicMock()
+        runner = arvados_cwl.executor.ArvCwlExecutor(self.api, keep_client=keep_client)
+        runner.project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        final = mock.MagicMock()
+        col.return_value = final
+        readermock = mock.MagicMock()
+        reader.return_value = readermock
+
+        final_uuid = final.manifest_locator()
+        num_retries = runner.num_retries
+
+        cwlout = io.StringIO()
+        openmock = mock.MagicMock()
+        final.open.return_value = openmock
+        openmock.__enter__.return_value = cwlout
+
+        _, runner.final_output_collection = runner.make_output_collection("Test output", ["foo"], "tag0,tag1,tag2", {
+            "foo": {
+                "class": "File",
+                "location": "keep:99999999999999999999999999999991+99/foo.txt",
+                "size": 3,
+                "basename": "foo.txt"
+            },
+            "bar": {
+                "class": "File",
+                "location": "keep:99999999999999999999999999999992+99/bar.txt",
+                "basename": "baz.txt",
+                "size": 4
+            }
+        })
+
+        final.copy.assert_has_calls([mock.call('bar.txt', 'baz.txt', overwrite=False, source_collection=readermock)])
+        final.copy.assert_has_calls([mock.call('foo.txt', 'foo.txt', overwrite=False, source_collection=readermock)])
+        final.save_new.assert_has_calls([mock.call(ensure_unique_name=True, name='Test output', owner_uuid='zzzzz-j7d0g-zzzzzzzzzzzzzzz', storage_classes=['foo'])])
+        self.assertEqual("""{
+    "bar": {
+        "basename": "baz.txt",
+        "class": "File",
+        "location": "baz.txt",
+        "size": 4
+    },
+    "foo": {
+        "basename": "foo.txt",
+        "class": "File",
+        "location": "foo.txt",
+        "size": 3
+    }
+}""", cwlout.getvalue())
+
+        self.assertIs(final, runner.final_output_collection)
+        self.assertIs(final_uuid, runner.final_output_collection.manifest_locator())
+        self.api.links().create.assert_has_calls([mock.call(body={"head_uuid": final_uuid, "link_class": "tag", "name": "tag0"}), mock.call().execute(num_retries=num_retries)])
+        self.api.links().create.assert_has_calls([mock.call(body={"head_uuid": final_uuid, "link_class": "tag", "name": "tag1"}), mock.call().execute(num_retries=num_retries)])
+        self.api.links().create.assert_has_calls([mock.call(body={"head_uuid": final_uuid, "link_class": "tag", "name": "tag2"}), mock.call().execute(num_retries=num_retries)])
diff --git a/sdk/cwl/tests/test_pathmapper.py b/sdk/cwl/tests/test_pathmapper.py
new file mode 100644 (file)
index 0000000..b78e890
--- /dev/null
@@ -0,0 +1,233 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import functools
+import mock
+import sys
+import unittest
+import json
+import logging
+import os
+
+import arvados
+import arvados.keep
+import arvados.collection
+import arvados_cwl
+import arvados_cwl.executor
+
+from cwltool.pathmapper import MapperEnt
+from .mock_discovery import get_rootDesc
+
+from arvados_cwl.pathmapper import ArvPathMapper
+
+def upload_mock(files, api, dry_run=False, num_retries=0, project=None, fnPattern="$(file %s/%s)", name=None, collection=None, packed=None):
+    pdh = "99999999999999999999999999999991+99"
+    for c in files:
+        c.keepref = "%s/%s" % (pdh, os.path.basename(c.fn))
+        c.fn = fnPattern % (pdh, os.path.basename(c.fn))
+
+class TestPathmap(unittest.TestCase):
+    def setUp(self):
+        self.api = mock.MagicMock()
+        self.api._rootDesc = get_rootDesc()
+
+    def test_keepref(self):
+        """Test direct keep references."""
+
+        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)
+
+        p = ArvPathMapper(arvrunner, [{
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py"
+        }], "", "/test/%s", "/test/%s/%s")
+
+        self.assertEqual({'keep:99999999999999999999999999999991+99/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},
+                         p._pathmap)
+
+    @mock.patch("arvados.commands.run.uploadfiles")
+    @mock.patch("arvados.commands.run.statfile")
+    def test_upload(self, statfile, upl):
+        """Test pathmapper uploading files."""
+
+        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)
+
+        def statfile_mock(prefix, fn, fnPattern="$(file %s/%s)", dirPattern="$(dir %s/%s/)", raiseOSError=False):
+            st = arvados.commands.run.UploadFile("", "tests/hw.py")
+            return st
+
+        upl.side_effect = upload_mock
+        statfile.side_effect = statfile_mock
+
+        p = ArvPathMapper(arvrunner, [{
+            "class": "File",
+            "location": "file:tests/hw.py"
+        }], "", "/test/%s", "/test/%s/%s")
+
+        self.assertEqual({'file:tests/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},
+                         p._pathmap)
+
+    @mock.patch("arvados.commands.run.uploadfiles")
+    @mock.patch("arvados.commands.run.statfile")
+    def test_statfile(self, statfile, upl):
+        """Test pathmapper handling ArvFile references."""
+        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)
+
+        # An ArvFile object returned from arvados.commands.run.statfile means the file is located on a
+        # keep mount, so we can construct a direct reference directly without upload.
+        def statfile_mock(prefix, fn, fnPattern="$(file %s/%s)", dirPattern="$(dir %s/%s/)", raiseOSError=False):
+            st = arvados.commands.run.ArvFile("", fnPattern % ("99999999999999999999999999999991+99", "hw.py"))
+            return st
+
+        upl.side_effect = upload_mock
+        statfile.side_effect = statfile_mock
+
+        p = ArvPathMapper(arvrunner, [{
+            "class": "File",
+            "location": "file:tests/hw.py"
+        }], "", "/test/%s", "/test/%s/%s")
+
+        self.assertEqual({'file:tests/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},
+                         p._pathmap)
+
+    @mock.patch("os.stat")
+    def test_missing_file(self, stat):
+        """Test pathmapper handling missing references."""
+        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)
+
+        stat.side_effect = OSError(2, "No such file or directory")
+
+        with self.assertRaises(OSError):
+            p = ArvPathMapper(arvrunner, [{
+                "class": "File",
+                "location": "file:tests/hw.py"
+            }], "", "/test/%s", "/test/%s/%s")
+
+    def test_needs_new_collection(self):
+        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)
+
+        # Plain file.  Don't need a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py"
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        self.assertFalse(p.needs_new_collection(a))
+
+        # A file that isn't in the pathmap (for some reason).  Need a new collection.
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        self.assertTrue(p.needs_new_collection(a))
+
+        # A file with a secondary file in the same collection.  Don't need
+        # a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py",
+            "secondaryFiles": [{
+                "class": "File",
+                "location": "keep:99999999999999999999999999999991+99/hw.pyc",
+                "basename": "hw.pyc"
+            }]
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.pyc"] = True
+        self.assertFalse(p.needs_new_collection(a))
+
+        # Secondary file is in a different collection from the
+        # a new collectionprimary.  Need a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py",
+            "secondaryFiles": [{
+                "class": "File",
+                "location": "keep:99999999999999999999999999999992+99/hw.pyc",
+                "basename": "hw.pyc"
+            }]
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        p._pathmap["keep:99999999999999999999999999999992+99/hw.pyc"] = True
+        self.assertTrue(p.needs_new_collection(a))
+
+        # Secondary file should be staged to a different name than
+        # path in location.  Need a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py",
+            "secondaryFiles": [{
+                "class": "File",
+                "location": "keep:99999999999999999999999999999991+99/hw.pyc",
+                "basename": "hw.other"
+            }]
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.pyc"] = True
+        self.assertTrue(p.needs_new_collection(a))
+
+        # Secondary file is a directory.  Do not need a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py",
+            "secondaryFiles": [{
+                "class": "Directory",
+                "location": "keep:99999999999999999999999999999991+99/hw",
+                "basename": "hw",
+                "listing": [{
+                    "class": "File",
+                    "location": "keep:99999999999999999999999999999991+99/hw/h2",
+                    "basename": "h2"
+                }]
+            }]
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        p._pathmap["keep:99999999999999999999999999999991+99/hw"] = True
+        p._pathmap["keep:99999999999999999999999999999991+99/hw/h2"] = True
+        self.assertFalse(p.needs_new_collection(a))
+
+        # Secondary file is a renamed directory.  Need a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py",
+            "secondaryFiles": [{
+                "class": "Directory",
+                "location": "keep:99999999999999999999999999999991+99/hw",
+                "basename": "wh",
+                "listing": [{
+                    "class": "File",
+                    "location": "keep:99999999999999999999999999999991+99/hw/h2",
+                    "basename": "h2"
+                }]
+            }]
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        p._pathmap["keep:99999999999999999999999999999991+99/hw"] = True
+        p._pathmap["keep:99999999999999999999999999999991+99/hw/h2"] = True
+        self.assertTrue(p.needs_new_collection(a))
+
+        # Secondary file is a file literal.  Need a new collection.
+        a = {
+            "class": "File",
+            "location": "keep:99999999999999999999999999999991+99/hw.py",
+            "basename": "hw.py",
+            "secondaryFiles": [{
+                "class": "File",
+                "location": "_:123",
+                "basename": "hw.pyc",
+                "contents": "123"
+            }]
+        }
+        p = ArvPathMapper(arvrunner, [], "", "%s", "%s/%s")
+        p._pathmap["keep:99999999999999999999999999999991+99/hw.py"] = True
+        p._pathmap["_:123"] = True
+        self.assertTrue(p.needs_new_collection(a))
diff --git a/sdk/cwl/tests/test_submit.py b/sdk/cwl/tests/test_submit.py
new file mode 100644 (file)
index 0000000..39117d8
--- /dev/null
@@ -0,0 +1,1779 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
+from builtins import str
+from future.utils import viewvalues
+
+import copy
+import io
+import functools
+import hashlib
+import json
+import logging
+import mock
+import sys
+import unittest
+
+from io import BytesIO, StringIO
+
+import arvados
+import arvados.collection
+import arvados_cwl
+import arvados_cwl.executor
+import arvados_cwl.runner
+import arvados.keep
+
+from .matcher import JsonDiffMatcher, StripYAMLComments
+from .mock_discovery import get_rootDesc
+
+import ruamel.yaml as yaml
+
+_rootDesc = None
+
+def stubs(func):
+    @functools.wraps(func)
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    @mock.patch("arvados.collection.KeepClient")
+    @mock.patch("arvados.keep.KeepClient")
+    @mock.patch("arvados.events.subscribe")
+    def wrapped(self, events, keep_client1, keep_client2, keepdocker, *args, **kwargs):
+        class Stubs(object):
+            pass
+        stubs = Stubs()
+        stubs.events = events
+        stubs.keepdocker = keepdocker
+
+        def putstub(p, **kwargs):
+            return "%s+%i" % (hashlib.md5(p).hexdigest(), len(p))
+        keep_client1().put.side_effect = putstub
+        keep_client1.put.side_effect = putstub
+        keep_client2().put.side_effect = putstub
+        keep_client2.put.side_effect = putstub
+
+        stubs.keep_client = keep_client2
+        stubs.docker_images = {
+            "arvados/jobs:"+arvados_cwl.__version__: [("zzzzz-4zz18-zzzzzzzzzzzzzd3", "")],
+            "debian:8": [("zzzzz-4zz18-zzzzzzzzzzzzzd4", "")],
+            "arvados/jobs:123": [("zzzzz-4zz18-zzzzzzzzzzzzzd5", "")],
+            "arvados/jobs:latest": [("zzzzz-4zz18-zzzzzzzzzzzzzd6", "")],
+        }
+        def kd(a, b, image_name=None, image_tag=None):
+            return stubs.docker_images.get("%s:%s" % (image_name, image_tag), [])
+        stubs.keepdocker.side_effect = kd
+
+        stubs.fake_user_uuid = "zzzzz-tpzed-zzzzzzzzzzzzzzz"
+        stubs.fake_container_uuid = "zzzzz-dz642-zzzzzzzzzzzzzzz"
+
+        if sys.version_info[0] < 3:
+            stubs.capture_stdout = BytesIO()
+        else:
+            stubs.capture_stdout = StringIO()
+
+        stubs.api = mock.MagicMock()
+        stubs.api._rootDesc = get_rootDesc()
+
+        stubs.api.users().current().execute.return_value = {
+            "uuid": stubs.fake_user_uuid,
+        }
+        stubs.api.collections().list().execute.return_value = {"items": []}
+        stubs.api.containers().current().execute.return_value = {
+            "uuid": stubs.fake_container_uuid,
+        }
+
+        class CollectionExecute(object):
+            def __init__(self, exe):
+                self.exe = exe
+            def execute(self, num_retries=None):
+                return self.exe
+
+        def collection_createstub(created_collections, body, ensure_unique_name=None):
+            mt = body["manifest_text"].encode('utf-8')
+            uuid = "zzzzz-4zz18-zzzzzzzzzzzzzx%d" % len(created_collections)
+            pdh = "%s+%i" % (hashlib.md5(mt).hexdigest(), len(mt))
+            created_collections[uuid] = {
+                "uuid": uuid,
+                "portable_data_hash": pdh,
+                "manifest_text": mt.decode('utf-8')
+            }
+            return CollectionExecute(created_collections[uuid])
+
+        def collection_getstub(created_collections, uuid):
+            for v in viewvalues(created_collections):
+                if uuid in (v["uuid"], v["portable_data_hash"]):
+                    return CollectionExecute(v)
+
+        created_collections = {
+            "99999999999999999999999999999998+99": {
+                "uuid": "",
+                "portable_data_hash": "99999999999999999999999999999998+99",
+                "manifest_text": ". 99999999999999999999999999999998+99 0:0:file1.txt"
+            },
+            "99999999999999999999999999999994+99": {
+                "uuid": "",
+                "portable_data_hash": "99999999999999999999999999999994+99",
+                "manifest_text": ". 99999999999999999999999999999994+99 0:0:expect_arvworkflow.cwl"
+            },
+            "zzzzz-4zz18-zzzzzzzzzzzzzd3": {
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzd3",
+                "portable_data_hash": "999999999999999999999999999999d3+99",
+                "manifest_text": ""
+            },
+            "zzzzz-4zz18-zzzzzzzzzzzzzd4": {
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzd4",
+                "portable_data_hash": "999999999999999999999999999999d4+99",
+                "manifest_text": ""
+            },
+            "zzzzz-4zz18-zzzzzzzzzzzzzd5": {
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzd5",
+                "portable_data_hash": "999999999999999999999999999999d5+99",
+                "manifest_text": ""
+            },
+            "zzzzz-4zz18-zzzzzzzzzzzzzd6": {
+                "uuid": "zzzzz-4zz18-zzzzzzzzzzzzzd6",
+                "portable_data_hash": "999999999999999999999999999999d6+99",
+                "manifest_text": ""
+            }
+        }
+        stubs.api.collections().create.side_effect = functools.partial(collection_createstub, created_collections)
+        stubs.api.collections().get.side_effect = functools.partial(collection_getstub, created_collections)
+
+        stubs.expect_job_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        stubs.api.jobs().create().execute.return_value = {
+            "uuid": stubs.expect_job_uuid,
+            "state": "Queued",
+        }
+
+        stubs.expect_container_request_uuid = "zzzzz-xvhdp-zzzzzzzzzzzzzzz"
+        stubs.api.container_requests().create().execute.return_value = {
+            "uuid": stubs.expect_container_request_uuid,
+            "container_uuid": "zzzzz-dz642-zzzzzzzzzzzzzzz",
+            "state": "Queued"
+        }
+
+        stubs.expect_pipeline_template_uuid = "zzzzz-d1hrv-zzzzzzzzzzzzzzz"
+        stubs.api.pipeline_templates().create().execute.return_value = {
+            "uuid": stubs.expect_pipeline_template_uuid,
+        }
+        stubs.expect_job_spec = {
+            'runtime_constraints': {
+                'docker_image': '999999999999999999999999999999d3+99',
+                'min_ram_mb_per_node': 1024
+            },
+            'script_parameters': {
+                'x': {
+                    'basename': 'blorp.txt',
+                    'location': 'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
+                    'class': 'File'
+                },
+                'y': {
+                    'basename': '99999999999999999999999999999998+99',
+                    'location': 'keep:99999999999999999999999999999998+99',
+                    'class': 'Directory'
+                },
+                'z': {
+                    'basename': 'anonymous',
+                    "listing": [{
+                        "basename": "renamed.txt",
+                        "class": "File",
+                        "location": "keep:99999999999999999999999999999998+99/file1.txt",
+                        "size": 0
+                    }],
+                    'class': 'Directory'
+                },
+                'cwl:tool': '57ad063d64c60dbddc027791f0649211+60/workflow.cwl#main'
+            },
+            'repository': 'arvados',
+            'script_version': 'master',
+            'minimum_script_version': '570509ab4d2ef93d870fd2b1f2eab178afb1bad9',
+            'script': 'cwl-runner'
+        }
+        stubs.pipeline_component = stubs.expect_job_spec.copy()
+        stubs.expect_pipeline_instance = {
+            'name': 'submit_wf.cwl',
+            'state': 'RunningOnServer',
+            'owner_uuid': None,
+            "components": {
+                "cwl-runner": {
+                    'runtime_constraints': {'docker_image': '999999999999999999999999999999d3+99', 'min_ram_mb_per_node': 1024},
+                    'script_parameters': {
+                        'y': {"value": {'basename': '99999999999999999999999999999998+99', 'location': 'keep:99999999999999999999999999999998+99', 'class': 'Directory'}},
+                        'x': {"value": {
+                            'basename': 'blorp.txt',
+                            'class': 'File',
+                            'location': 'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
+                            "size": 16
+                        }},
+                        'z': {"value": {'basename': 'anonymous', 'class': 'Directory',
+                              'listing': [
+                                  {
+                                      'basename': 'renamed.txt',
+                                      'class': 'File', 'location':
+                                      'keep:99999999999999999999999999999998+99/file1.txt',
+                                      'size': 0
+                                  }
+                              ]}},
+                        'cwl:tool': '57ad063d64c60dbddc027791f0649211+60/workflow.cwl#main',
+                        'arv:debug': True,
+                        'arv:enable_reuse': True,
+                        'arv:on_error': 'continue'
+                    },
+                    'repository': 'arvados',
+                    'script_version': 'master',
+                    'minimum_script_version': '570509ab4d2ef93d870fd2b1f2eab178afb1bad9',
+                    'script': 'cwl-runner',
+                    'job': {'state': 'Queued', 'uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'}
+                }
+            }
+        }
+        stubs.pipeline_create = copy.deepcopy(stubs.expect_pipeline_instance)
+        stubs.expect_pipeline_uuid = "zzzzz-d1hrv-zzzzzzzzzzzzzzz"
+        stubs.pipeline_create["uuid"] = stubs.expect_pipeline_uuid
+        stubs.pipeline_with_job = copy.deepcopy(stubs.pipeline_create)
+        stubs.pipeline_with_job["components"]["cwl-runner"]["job"] = {
+            "uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
+            "state": "Queued"
+        }
+        stubs.api.pipeline_instances().create().execute.return_value = stubs.pipeline_create
+        stubs.api.pipeline_instances().get().execute.return_value = stubs.pipeline_with_job
+
+        with open("tests/wf/submit_wf_packed.cwl") as f:
+            expect_packed_workflow = yaml.round_trip_load(f)
+
+        stubs.expect_container_spec = {
+            'priority': 500,
+            'mounts': {
+                '/var/spool/cwl': {
+                    'writable': True,
+                    'kind': 'collection'
+                },
+                '/var/lib/cwl/workflow.json': {
+                    'content': expect_packed_workflow,
+                    'kind': 'json'
+                },
+                'stdout': {
+                    'path': '/var/spool/cwl/cwl.output.json',
+                    'kind': 'file'
+                },
+                '/var/lib/cwl/cwl.input.json': {
+                    'kind': 'json',
+                    'content': {
+                        'y': {
+                            'basename': '99999999999999999999999999999998+99',
+                            'location': 'keep:99999999999999999999999999999998+99',
+                            'class': 'Directory'},
+                        'x': {
+                            'basename': u'blorp.txt',
+                            'class': 'File',
+                            'location': u'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
+                            "size": 16
+                        },
+                        'z': {'basename': 'anonymous', 'class': 'Directory', 'listing': [
+                            {'basename': 'renamed.txt',
+                             'class': 'File',
+                             'location': 'keep:99999999999999999999999999999998+99/file1.txt',
+                             'size': 0
+                            }
+                        ]}
+                    },
+                    'kind': 'json'
+                }
+            },
+            'secret_mounts': {},
+            'state': 'Committed',
+            'command': ['arvados-cwl-runner', '--local', '--api=containers',
+                        '--no-log-timestamps', '--disable-validate',
+                        '--eval-timeout=20', '--thread-count=1',
+                        '--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
+            'name': 'submit_wf.cwl',
+            'container_image': '999999999999999999999999999999d3+99',
+            'output_path': '/var/spool/cwl',
+            'cwd': '/var/spool/cwl',
+            'runtime_constraints': {
+                'API': True,
+                'vcpus': 1,
+                'ram': (1024+256)*1024*1024
+            },
+            'use_existing': True,
+            'properties': {},
+            'secret_mounts': {}
+        }
+
+        stubs.expect_workflow_uuid = "zzzzz-7fd4e-zzzzzzzzzzzzzzz"
+        stubs.api.workflows().create().execute.return_value = {
+            "uuid": stubs.expect_workflow_uuid,
+        }
+        def update_mock(**kwargs):
+            stubs.updated_uuid = kwargs.get('uuid')
+            return mock.DEFAULT
+        stubs.api.workflows().update.side_effect = update_mock
+        stubs.api.workflows().update().execute.side_effect = lambda **kwargs: {
+            "uuid": stubs.updated_uuid,
+        }
+
+        return func(self, stubs, *args, **kwargs)
+    return wrapped
+
+
+class TestSubmit(unittest.TestCase):
+    @mock.patch("arvados_cwl.arvdocker.arv_docker_get_image")
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit(self, stubs, tm, arvdock):
+        def get_image(api_client, dockerRequirement, pull_image, project_uuid):
+            if dockerRequirement["dockerPull"] == 'arvados/jobs:'+arvados_cwl.__version__:
+                return '999999999999999999999999999999d3+99'
+            elif dockerRequirement["dockerPull"] == "debian:8":
+                return '999999999999999999999999999999d4+99'
+        arvdock.side_effect = get_image
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=jobs", "--debug",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.collections().create.assert_has_calls([
+            mock.call(body=JsonDiffMatcher({
+                'manifest_text':
+                '. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\n',
+                'replication_desired': None,
+                'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',
+            }), ensure_unique_name=False),
+            mock.call(body=JsonDiffMatcher({
+                'manifest_text':
+                '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
+                'replication_desired': None,
+                'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+            }), ensure_unique_name=False),
+            mock.call(body=JsonDiffMatcher({
+                'manifest_text':
+                ". 68089141fbf7e020ac90a9d6a575bc8f+1312 0:1312:workflow.cwl\n",
+                'replication_desired': None,
+                'name': 'submit_wf.cwl',
+            }), ensure_unique_name=True)        ])
+
+        arvdock.assert_has_calls([
+            mock.call(stubs.api, {"class": "DockerRequirement", "dockerPull": "debian:8"}, True, None),
+            mock.call(stubs.api, {"class": "DockerRequirement", "dockerPull": "debian:8", 'http://arvados.org/cwl#dockerCollectionPDH': '999999999999999999999999999999d4+99'}, True, None),
+            mock.call(stubs.api, {'dockerPull': 'arvados/jobs:'+arvados_cwl.__version__}, True, None)
+        ])
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_no_reuse(self, stubs, tm):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=jobs", "--debug", "--disable-reuse",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["components"]["cwl-runner"]["script_parameters"]["arv:enable_reuse"] = {"value": False}
+        expect_pipeline["properties"] = {"run_options": {"enable_job_reuse": False}}
+
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_error_when_multiple_storage_classes_specified(self, stubs):
+        storage_classes = "foo,bar"
+        exited = arvados_cwl.main(
+                ["--debug", "--storage-classes", storage_classes,
+                 "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+                sys.stdin, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 1)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_on_error(self, stubs, tm):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=jobs", "--debug", "--on-error=stop",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["components"]["cwl-runner"]["script_parameters"]["arv:on_error"] = "stop"
+
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_runner_ram(self, stubs, tm):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--debug", "--submit-runner-ram=2048",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["components"]["cwl-runner"]["runtime_constraints"]["min_ram_mb_per_node"] = 2048
+
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_invalid_runner_ram(self, stubs, tm):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--debug", "--submit-runner-ram=-2048",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 1)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_output_name(self, stubs, tm):
+        output_name = "test_output_name"
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--debug", "--output-name", output_name,
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["components"]["cwl-runner"]["script_parameters"]["arv:output_name"] = output_name
+
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_pipeline_name(self, stubs, tm):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--debug", "--name=hello job 123",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 0)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["name"] = "hello job 123"
+
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_output_tags(self, stubs, tm):
+        output_tags = "tag0,tag1,tag2"
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--debug", "--output-tags", output_tags,
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 0)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["components"]["cwl-runner"]["script_parameters"]["arv:output_tags"] = output_tags
+
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_with_project_uuid(self, stubs, tm):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--debug",
+             "--project-uuid", project_uuid,
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            sys.stdout, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 0)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["owner_uuid"] = project_uuid
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+
+    @stubs
+    def test_submit_container(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        stubs.api.collections().create.assert_has_calls([
+            mock.call(body=JsonDiffMatcher({
+                'manifest_text':
+                '. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\n',
+                'replication_desired': None,
+                'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',
+            }), ensure_unique_name=False),
+            mock.call(body=JsonDiffMatcher({
+                'manifest_text':
+                '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
+                'replication_desired': None,
+                'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+            }), ensure_unique_name=False)])
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_no_reuse(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--disable-reuse",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = [
+            'arvados-cwl-runner', '--local', '--api=containers',
+            '--no-log-timestamps', '--disable-validate',
+            '--eval-timeout=20', '--thread-count=1',
+            '--disable-reuse', "--collection-cache-size=256",
+            '--debug', '--on-error=continue',
+            '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+        expect_container["use_existing"] = False
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_reuse_disabled_by_workflow(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+             "tests/wf/submit_wf_no_reuse.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+        self.assertEqual(exited, 0)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = [
+            'arvados-cwl-runner', '--local', '--api=containers',
+            '--no-log-timestamps', '--disable-validate',
+            '--eval-timeout=20', '--thread-count=1',
+            '--disable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+            '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+        expect_container["use_existing"] = False
+        expect_container["name"] = "submit_wf_no_reuse.cwl"
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+            {
+                "class": "http://arvados.org/cwl#ReuseRequirement",
+                "enableReuse": False,
+            },
+        ]
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+            "arv": "http://arvados.org/cwl#",
+            "cwltool": "http://commonwl.org/cwltool#"
+        }
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+
+
+    @stubs
+    def test_submit_container_on_error(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--on-error=stop",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256",
+                                       '--debug', '--on-error=stop',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_output_name(self, stubs):
+        output_name = "test_output_name"
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--output-name", output_name,
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256",
+                                       "--output-name="+output_name, '--debug', '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+        expect_container["output_name"] = output_name
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_storage_classes(self, stubs):
+        exited = arvados_cwl.main(
+            ["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=foo",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256", "--debug",
+                                       "--storage-classes=foo", '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("arvados_cwl.task_queue.TaskQueue")
+    @mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
+    @mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection", return_value = (None, None))
+    @stubs
+    def test_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
+        def set_final_output(job_order, output_callback, runtimeContext):
+            output_callback("zzzzz-4zz18-zzzzzzzzzzzzzzzz", "success")
+            return []
+        job.side_effect = set_final_output
+
+        exited = arvados_cwl.main(
+            ["--debug", "--local", "--storage-classes=foo",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            sys.stdin, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        make_output.assert_called_with(u'Output of submit_wf.cwl', ['foo'], '', 'zzzzz-4zz18-zzzzzzzzzzzzzzzz')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("arvados_cwl.task_queue.TaskQueue")
+    @mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
+    @mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection", return_value = (None, None))
+    @stubs
+    def test_default_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):
+        def set_final_output(job_order, output_callback, runtimeContext):
+            output_callback("zzzzz-4zz18-zzzzzzzzzzzzzzzz", "success")
+            return []
+        job.side_effect = set_final_output
+
+        exited = arvados_cwl.main(
+            ["--debug", "--local",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            sys.stdin, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        make_output.assert_called_with(u'Output of submit_wf.cwl', ['default'], '', 'zzzzz-4zz18-zzzzzzzzzzzzzzzz')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_output_ttl(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--intermediate-output-ttl", "3600",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256", '--debug',
+                                       '--on-error=continue',
+                                       "--intermediate-output-ttl=3600",
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_trash_intermediate(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--trash-intermediate",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256",
+                                       '--debug', '--on-error=continue',
+                                       "--trash-intermediate",
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_output_tags(self, stubs):
+        output_tags = "tag0,tag1,tag2"
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--output-tags", output_tags,
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256",
+                                       "--output-tags="+output_tags, '--debug', '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_runner_ram(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-ram=2048",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["runtime_constraints"]["ram"] = (2048+256)*1024*1024
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("arvados.collection.CollectionReader")
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_file_keepref(self, stubs, tm, collectionReader):
+        collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "blorp.txt")
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+             "tests/wf/submit_keepref_wf.cwl"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 0)
+
+    @mock.patch("arvados.collection.CollectionReader")
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_keepref(self, stubs, tm, reader):
+        with open("tests/wf/expect_arvworkflow.cwl") as f:
+            reader().open().__enter__().read.return_value = f.read()
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+             "keep:99999999999999999999999999999994+99/expect_arvworkflow.cwl#main", "-x", "XxX"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_container = {
+            'priority': 500,
+            'mounts': {
+                '/var/spool/cwl': {
+                    'writable': True,
+                    'kind': 'collection'
+                },
+                'stdout': {
+                    'path': '/var/spool/cwl/cwl.output.json',
+                    'kind': 'file'
+                },
+                '/var/lib/cwl/workflow': {
+                    'portable_data_hash': '99999999999999999999999999999994+99',
+                    'kind': 'collection'
+                },
+                '/var/lib/cwl/cwl.input.json': {
+                    'content': {
+                        'x': 'XxX'
+                    },
+                    'kind': 'json'
+                }
+            }, 'state': 'Committed',
+            'output_path': '/var/spool/cwl',
+            'name': 'expect_arvworkflow.cwl#main',
+            'container_image': '999999999999999999999999999999d3+99',
+            'command': ['arvados-cwl-runner', '--local', '--api=containers',
+                        '--no-log-timestamps', '--disable-validate',
+                        '--eval-timeout=20', '--thread-count=1',
+                        '--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+                        '/var/lib/cwl/workflow/expect_arvworkflow.cwl#main', '/var/lib/cwl/cwl.input.json'],
+            'cwd': '/var/spool/cwl',
+            'runtime_constraints': {
+                'API': True,
+                'vcpus': 1,
+                'ram': 1342177280
+            },
+            'use_existing': True,
+            'properties': {},
+            'secret_mounts': {}
+        }
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @mock.patch("arvados.collection.CollectionReader")
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_jobs_keepref(self, stubs, tm, reader):
+        with open("tests/wf/expect_arvworkflow.cwl") as f:
+            reader().open().__enter__().read.return_value = f.read()
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=jobs", "--debug",
+             "keep:99999999999999999999999999999994+99/expect_arvworkflow.cwl#main", "-x", "XxX"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        expect_pipeline["components"]["cwl-runner"]["script_parameters"]["x"] = "XxX"
+        del expect_pipeline["components"]["cwl-runner"]["script_parameters"]["y"]
+        del expect_pipeline["components"]["cwl-runner"]["script_parameters"]["z"]
+        expect_pipeline["components"]["cwl-runner"]["script_parameters"]["cwl:tool"] = "99999999999999999999999999999994+99/expect_arvworkflow.cwl#main"
+        expect_pipeline["name"] = "expect_arvworkflow.cwl#main"
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(exited, 0)
+
+    @mock.patch("time.sleep")
+    @stubs
+    def test_submit_arvworkflow(self, stubs, tm):
+        with open("tests/wf/expect_arvworkflow.cwl") as f:
+            stubs.api.workflows().get().execute.return_value = {"definition": f.read(), "name": "a test workflow"}
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+             "962eh-7fd4e-gkbzl62qqtfig37", "-x", "XxX"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_container = {
+            'priority': 500,
+            'mounts': {
+                '/var/spool/cwl': {
+                    'writable': True,
+                    'kind': 'collection'
+                },
+                'stdout': {
+                    'path': '/var/spool/cwl/cwl.output.json',
+                    'kind': 'file'
+                },
+                '/var/lib/cwl/workflow.json': {
+                    'kind': 'json',
+                    'content': {
+                        'cwlVersion': 'v1.0',
+                        '$graph': [
+                            {
+                                'id': '#main',
+                                'inputs': [
+                                    {'type': 'string', 'id': '#main/x'}
+                                ],
+                                'steps': [
+                                    {'in': [{'source': '#main/x', 'id': '#main/step1/x'}],
+                                     'run': '#submit_tool.cwl',
+                                     'id': '#main/step1',
+                                     'out': []}
+                                ],
+                                'class': 'Workflow',
+                                'outputs': []
+                            },
+                            {
+                                'inputs': [
+                                    {
+                                        'inputBinding': {'position': 1},
+                                        'type': 'string',
+                                        'id': '#submit_tool.cwl/x'}
+                                ],
+                                'requirements': [
+                                    {
+                                        'dockerPull': 'debian:8',
+                                        'class': 'DockerRequirement',
+                                        "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
+                                    }
+                                ],
+                                'id': '#submit_tool.cwl',
+                                'outputs': [],
+                                'baseCommand': 'cat',
+                                'class': 'CommandLineTool'
+                            }
+                        ]
+                    }
+                },
+                '/var/lib/cwl/cwl.input.json': {
+                    'content': {
+                        'x': 'XxX'
+                    },
+                    'kind': 'json'
+                }
+            }, 'state': 'Committed',
+            'output_path': '/var/spool/cwl',
+            'name': 'a test workflow',
+            'container_image': "999999999999999999999999999999d3+99",
+            'command': ['arvados-cwl-runner', '--local', '--api=containers',
+                        '--no-log-timestamps', '--disable-validate',
+                        '--eval-timeout=20', '--thread-count=1',
+                        '--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
+                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
+            'cwd': '/var/spool/cwl',
+            'runtime_constraints': {
+                'API': True,
+                'vcpus': 1,
+                'ram': 1342177280
+            },
+            'use_existing': True,
+            'properties': {
+                "template_uuid": "962eh-7fd4e-gkbzl62qqtfig37"
+            },
+            'secret_mounts': {}
+        }
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_name(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--name=hello container 123",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["name"] = "hello container 123"
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_missing_input(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+        self.assertEqual(exited, 0)
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job_missing.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+        self.assertEqual(exited, 1)
+
+    @stubs
+    def test_submit_container_project(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid="+project_uuid,
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["owner_uuid"] = project_uuid
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       "--eval-timeout=20", "--thread-count=1",
+                                       '--enable-reuse', "--collection-cache-size=256", '--debug',
+                                       '--on-error=continue',
+                                       '--project-uuid='+project_uuid,
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_eval_timeout(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--eval-timeout=60",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=60.0', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=256",
+                                       '--debug', '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_collection_cache(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--collection-cache-size=500",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=1',
+                                       '--enable-reuse', "--collection-cache-size=500",
+                                       '--debug', '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+        expect_container["runtime_constraints"]["ram"] = (1024+500)*1024*1024
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_thread_count(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--thread-count=20",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+                                       '--no-log-timestamps', '--disable-validate',
+                                       '--eval-timeout=20', '--thread-count=20',
+                                       '--enable-reuse', "--collection-cache-size=256",
+                                       '--debug', '--on-error=continue',
+                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_job_runner_image(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=jobs", "--debug", "--submit-runner-image=arvados/jobs:123",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        stubs.expect_pipeline_instance["components"]["cwl-runner"]["runtime_constraints"]["docker_image"] = "999999999999999999999999999999d5+99"
+
+        expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+        stubs.api.pipeline_instances().create.assert_called_with(
+            body=JsonDiffMatcher(expect_pipeline))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_runner_image(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-image=arvados/jobs:123",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        stubs.expect_container_spec["container_image"] = "999999999999999999999999999999d5+99"
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_priority(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--priority=669",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        stubs.expect_container_spec["priority"] = 669
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_wf_runner_resources(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+                "tests/wf/submit_wf_runner_resources.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+        expect_container["runtime_constraints"] = {
+            "API": True,
+            "vcpus": 2,
+            "ram": (2000+512) * 2**20
+        }
+        expect_container["name"] = "submit_wf_runner_resources.cwl"
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+            {
+                "class": "http://arvados.org/cwl#WorkflowRunnerResources",
+                "coresMin": 2,
+                "ramMin": 2000,
+                "keep_cache": 512
+            }
+        ]
+        expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+            "arv": "http://arvados.org/cwl#",
+        }
+        expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
+                        '--no-log-timestamps', '--disable-validate',
+                        '--eval-timeout=20', '--thread-count=1',
+                        '--enable-reuse', "--collection-cache-size=512", '--debug', '--on-error=continue',
+                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    def tearDown(self):
+        arvados_cwl.arvdocker.arv_docker_clear_cache()
+
+    @mock.patch("arvados.commands.keepdocker.find_one_image_hash")
+    @mock.patch("cwltool.docker.DockerCommandLineJob.get_image")
+    @mock.patch("arvados.api")
+    def test_arvados_jobs_image(self, api, get_image, find_one_image_hash):
+        arvados_cwl.arvdocker.arv_docker_clear_cache()
+
+        arvrunner = mock.MagicMock()
+        arvrunner.project_uuid = ""
+        api.return_value = mock.MagicMock()
+        arvrunner.api = api.return_value
+        arvrunner.api.links().list().execute.side_effect = ({"items": [{"created_at": "",
+                                                                        "head_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
+                                                                        "link_class": "docker_image_repo+tag",
+                                                                        "name": "arvados/jobs:"+arvados_cwl.__version__,
+                                                                        "owner_uuid": "",
+                                                                        "properties": {"image_timestamp": ""}}], "items_available": 1, "offset": 0},
+                                                            {"items": [{"created_at": "",
+                                                                        "head_uuid": "",
+                                                                        "link_class": "docker_image_hash",
+                                                                        "name": "123456",
+                                                                        "owner_uuid": "",
+                                                                        "properties": {"image_timestamp": ""}}], "items_available": 1, "offset": 0}
+        )
+        find_one_image_hash.return_value = "123456"
+
+        arvrunner.api.collections().list().execute.side_effect = ({"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
+                                                                              "owner_uuid": "",
+                                                                              "manifest_text": "",
+                                                                              "properties": ""
+                                                                          }], "items_available": 1, "offset": 0},)
+        arvrunner.api.collections().create().execute.return_value = {"uuid": ""}
+        arvrunner.api.collections().get().execute.return_value = {"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzb",
+                                                                  "portable_data_hash": "9999999999999999999999999999999b+99"}
+        self.assertEqual("9999999999999999999999999999999b+99",
+                         arvados_cwl.runner.arvados_jobs_image(arvrunner, "arvados/jobs:"+arvados_cwl.__version__))
+
+
+    @stubs
+    def test_submit_secrets(self, stubs):
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug",
+                "tests/wf/secret_wf.cwl", "tests/secret_test_job.yml"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = {
+            "command": [
+                "arvados-cwl-runner",
+                "--local",
+                "--api=containers",
+                "--no-log-timestamps",
+                "--disable-validate",
+                "--eval-timeout=20",
+                '--thread-count=1',
+                "--enable-reuse",
+                "--collection-cache-size=256",
+                '--debug',
+                "--on-error=continue",
+                "/var/lib/cwl/workflow.json#main",
+                "/var/lib/cwl/cwl.input.json"
+            ],
+            "container_image": "999999999999999999999999999999d3+99",
+            "cwd": "/var/spool/cwl",
+            "mounts": {
+                "/var/lib/cwl/cwl.input.json": {
+                    "content": {
+                        "pw": {
+                            "$include": "/secrets/s0"
+                        }
+                    },
+                    "kind": "json"
+                },
+                "/var/lib/cwl/workflow.json": {
+                    "content": {
+                        "$graph": [
+                            {
+                                "$namespaces": {
+                                    "cwltool": "http://commonwl.org/cwltool#"
+                                },
+                                "arguments": [
+                                    "md5sum",
+                                    "example.conf"
+                                ],
+                                "class": "CommandLineTool",
+                                "hints": [
+                                    {
+                                        "class": "http://commonwl.org/cwltool#Secrets",
+                                        "secrets": [
+                                            "#secret_job.cwl/pw"
+                                        ]
+                                    }
+                                ],
+                                "id": "#secret_job.cwl",
+                                "inputs": [
+                                    {
+                                        "id": "#secret_job.cwl/pw",
+                                        "type": "string"
+                                    }
+                                ],
+                                "outputs": [
+                                    {
+                                        "id": "#secret_job.cwl/out",
+                                        "type": "stdout"
+                                    }
+                                ],
+                                "stdout": "hashed_example.txt",
+                                "requirements": [
+                                    {
+                                        "class": "InitialWorkDirRequirement",
+                                        "listing": [
+                                            {
+                                                "entry": "username: user\npassword: $(inputs.pw)\n",
+                                                "entryname": "example.conf"
+                                            }
+                                        ]
+                                    }
+                                ]
+                            },
+                            {
+                                "class": "Workflow",
+                                "hints": [
+                                    {
+                                        "class": "DockerRequirement",
+                                        "dockerPull": "debian:8",
+                                        "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
+                                    },
+                                    {
+                                        "class": "http://commonwl.org/cwltool#Secrets",
+                                        "secrets": [
+                                            "#main/pw"
+                                        ]
+                                    }
+                                ],
+                                "id": "#main",
+                                "inputs": [
+                                    {
+                                        "id": "#main/pw",
+                                        "type": "string"
+                                    }
+                                ],
+                                "outputs": [
+                                    {
+                                        "id": "#main/out",
+                                        "outputSource": "#main/step1/out",
+                                        "type": "File"
+                                    }
+                                ],
+                                "steps": [
+                                    {
+                                        "id": "#main/step1",
+                                        "in": [
+                                            {
+                                                "id": "#main/step1/pw",
+                                                "source": "#main/pw"
+                                            }
+                                        ],
+                                        "out": [
+                                            "#main/step1/out"
+                                        ],
+                                        "run": "#secret_job.cwl"
+                                    }
+                                ]
+                            }
+                        ],
+                        "cwlVersion": "v1.0"
+                    },
+                    "kind": "json"
+                },
+                "/var/spool/cwl": {
+                    "kind": "collection",
+                    "writable": True
+                },
+                "stdout": {
+                    "kind": "file",
+                    "path": "/var/spool/cwl/cwl.output.json"
+                }
+            },
+            "name": "secret_wf.cwl",
+            "output_path": "/var/spool/cwl",
+            "priority": 500,
+            "properties": {},
+            "runtime_constraints": {
+                "API": True,
+                "ram": 1342177280,
+                "vcpus": 1
+            },
+            "secret_mounts": {
+                "/secrets/s0": {
+                    "content": "blorp",
+                    "kind": "text"
+                }
+            },
+            "state": "Committed",
+            "use_existing": True
+        }
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_request_uuid(self, stubs):
+        stubs.expect_container_request_uuid = "zzzzz-xvhdp-yyyyyyyyyyyyyyy"
+
+        stubs.api.container_requests().update().execute.return_value = {
+            "uuid": stubs.expect_container_request_uuid,
+            "container_uuid": "zzzzz-dz642-zzzzzzzzzzzzzzz",
+            "state": "Queued"
+        }
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-request-uuid=zzzzz-xvhdp-yyyyyyyyyyyyyyy",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        stubs.api.container_requests().update.assert_called_with(
+            uuid="zzzzz-xvhdp-yyyyyyyyyyyyyyy", body=JsonDiffMatcher(stubs.expect_container_spec))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_container_cluster_id(self, stubs):
+        stubs.api._rootDesc["remoteHosts"]["zbbbb"] = "123"
+
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-cluster=zbbbb",
+                "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+        expect_container = copy.deepcopy(stubs.expect_container_spec)
+
+        stubs.api.container_requests().create.assert_called_with(
+            body=JsonDiffMatcher(expect_container), cluster_id="zbbbb")
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_container_request_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_submit_validate_cluster_id(self, stubs):
+        stubs.api._rootDesc["remoteHosts"]["zbbbb"] = "123"
+        exited = arvados_cwl.main(
+            ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-cluster=zcccc",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+        self.assertEqual(exited, 1)
+
+
+class TestCreateTemplate(unittest.TestCase):
+    existing_template_uuid = "zzzzz-d1hrv-validworkfloyml"
+
+    def _adjust_script_params(self, expect_component):
+        expect_component['script_parameters']['x'] = {
+            'dataclass': 'File',
+            'required': True,
+            'type': 'File',
+            'value': '169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',
+        }
+        expect_component['script_parameters']['y'] = {
+            'dataclass': 'Collection',
+            'required': True,
+            'type': 'Directory',
+            'value': '99999999999999999999999999999998+99',
+        }
+        expect_component['script_parameters']['z'] = {
+            'dataclass': 'Collection',
+            'required': True,
+            'type': 'Directory',
+        }
+
+    @stubs
+    def test_create(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--create-workflow", "--debug",
+             "--api=jobs",
+             "--project-uuid", project_uuid,
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.pipeline_instances().create.refute_called()
+        stubs.api.jobs().create.refute_called()
+
+        expect_component = copy.deepcopy(stubs.expect_job_spec)
+        self._adjust_script_params(expect_component)
+        expect_template = {
+            "components": {
+                "submit_wf.cwl": expect_component,
+            },
+            "name": "submit_wf.cwl",
+            "owner_uuid": project_uuid,
+        }
+        stubs.api.pipeline_templates().create.assert_called_with(
+            body=JsonDiffMatcher(expect_template), ensure_unique_name=True)
+
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_template_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_create_name(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--create-workflow", "--debug",
+             "--project-uuid", project_uuid,
+             "--api=jobs",
+             "--name", "testing 123",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.pipeline_instances().create.refute_called()
+        stubs.api.jobs().create.refute_called()
+
+        expect_component = copy.deepcopy(stubs.expect_job_spec)
+        self._adjust_script_params(expect_component)
+        expect_template = {
+            "components": {
+                "testing 123": expect_component,
+            },
+            "name": "testing 123",
+            "owner_uuid": project_uuid,
+        }
+        stubs.api.pipeline_templates().create.assert_called_with(
+            body=JsonDiffMatcher(expect_template), ensure_unique_name=True)
+
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_pipeline_template_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_update_name(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--update-workflow", self.existing_template_uuid,
+             "--debug",
+             "--project-uuid", project_uuid,
+             "--api=jobs",
+             "--name", "testing 123",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.pipeline_instances().create.refute_called()
+        stubs.api.jobs().create.refute_called()
+
+        expect_component = copy.deepcopy(stubs.expect_job_spec)
+        self._adjust_script_params(expect_component)
+        expect_template = {
+            "components": {
+                "testing 123": expect_component,
+            },
+            "name": "testing 123",
+            "owner_uuid": project_uuid,
+        }
+        stubs.api.pipeline_templates().create.refute_called()
+        stubs.api.pipeline_templates().update.assert_called_with(
+            body=JsonDiffMatcher(expect_template), uuid=self.existing_template_uuid)
+
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         self.existing_template_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+
+class TestCreateWorkflow(unittest.TestCase):
+    existing_workflow_uuid = "zzzzz-7fd4e-validworkfloyml"
+    expect_workflow = StripYAMLComments(
+        open("tests/wf/expect_packed.cwl").read())
+
+    @stubs
+    def test_create(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--create-workflow", "--debug",
+             "--api=containers",
+             "--project-uuid", project_uuid,
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.pipeline_templates().create.refute_called()
+        stubs.api.container_requests().create.refute_called()
+
+        body = {
+            "workflow": {
+                "owner_uuid": project_uuid,
+                "name": "submit_wf.cwl",
+                "description": "",
+                "definition": self.expect_workflow,
+            }
+        }
+        stubs.api.workflows().create.assert_called_with(
+            body=JsonDiffMatcher(body))
+
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_workflow_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_create_name(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--create-workflow", "--debug",
+             "--api=containers",
+             "--project-uuid", project_uuid,
+             "--name", "testing 123",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.pipeline_templates().create.refute_called()
+        stubs.api.container_requests().create.refute_called()
+
+        body = {
+            "workflow": {
+                "owner_uuid": project_uuid,
+                "name": "testing 123",
+                "description": "",
+                "definition": self.expect_workflow,
+            }
+        }
+        stubs.api.workflows().create.assert_called_with(
+            body=JsonDiffMatcher(body))
+
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_workflow_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_incompatible_api(self, stubs):
+        capture_stderr = io.StringIO()
+        acr_logger = logging.getLogger('arvados.cwl-runner')
+        stderr_logger = logging.StreamHandler(capture_stderr)
+        acr_logger.addHandler(stderr_logger)
+
+        exited = arvados_cwl.main(
+            ["--update-workflow", self.existing_workflow_uuid,
+             "--api=jobs",
+             "--debug",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            sys.stderr, sys.stderr, api_client=stubs.api)
+        self.assertEqual(exited, 1)
+        self.assertRegexpMatches(
+            capture_stderr.getvalue(),
+            "--update-workflow arg '{}' uses 'containers' API, but --api='jobs' specified".format(self.existing_workflow_uuid))
+        acr_logger.removeHandler(stderr_logger)
+
+    @stubs
+    def test_update(self, stubs):
+        exited = arvados_cwl.main(
+            ["--update-workflow", self.existing_workflow_uuid,
+             "--debug",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        body = {
+            "workflow": {
+                "name": "submit_wf.cwl",
+                "description": "",
+                "definition": self.expect_workflow,
+            }
+        }
+        stubs.api.workflows().update.assert_called_with(
+            uuid=self.existing_workflow_uuid,
+            body=JsonDiffMatcher(body))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         self.existing_workflow_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_update_name(self, stubs):
+        exited = arvados_cwl.main(
+            ["--update-workflow", self.existing_workflow_uuid,
+             "--debug", "--name", "testing 123",
+             "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        body = {
+            "workflow": {
+                "name": "testing 123",
+                "description": "",
+                "definition": self.expect_workflow,
+            }
+        }
+        stubs.api.workflows().update.assert_called_with(
+            uuid=self.existing_workflow_uuid,
+            body=JsonDiffMatcher(body))
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         self.existing_workflow_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_create_collection_per_tool(self, stubs):
+        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        exited = arvados_cwl.main(
+            ["--create-workflow", "--debug",
+             "--api=containers",
+             "--project-uuid", project_uuid,
+             "tests/collection_per_tool/collection_per_tool.cwl"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        toolfile = "tests/collection_per_tool/collection_per_tool_packed.cwl"
+        expect_workflow = StripYAMLComments(open(toolfile).read())
+
+        body = {
+            "workflow": {
+                "owner_uuid": project_uuid,
+                "name": "collection_per_tool.cwl",
+                "description": "",
+                "definition": expect_workflow,
+            }
+        }
+        stubs.api.workflows().create.assert_called_with(
+            body=JsonDiffMatcher(body))
+
+        self.assertEqual(stubs.capture_stdout.getvalue(),
+                         stubs.expect_workflow_uuid + '\n')
+        self.assertEqual(exited, 0)
+
+class TestTemplateInputs(unittest.TestCase):
+    expect_template = {
+        "components": {
+            "inputs_test.cwl": {
+                'runtime_constraints': {
+                    'docker_image': '999999999999999999999999999999d3+99',
+                    'min_ram_mb_per_node': 1024
+                },
+                'script_parameters': {
+                    'cwl:tool':
+                    'a2de777156fb700f1363b1f2e370adca+60/workflow.cwl#main',
+                    'optionalFloatInput': None,
+                    'fileInput': {
+                        'type': 'File',
+                        'dataclass': 'File',
+                        'required': True,
+                        'title': "It's a file; we expect to find some characters in it.",
+                        'description': 'If there were anything further to say, it would be said here,\nor here.'
+                    },
+                    'floatInput': {
+                        'type': 'float',
+                        'dataclass': 'number',
+                        'required': True,
+                        'title': 'Floats like a duck',
+                        'default': 0.1,
+                        'value': 0.1,
+                    },
+                    'optionalFloatInput': {
+                        'type': ['null', 'float'],
+                        'dataclass': 'number',
+                        'required': False,
+                    },
+                    'boolInput': {
+                        'type': 'boolean',
+                        'dataclass': 'boolean',
+                        'required': True,
+                        'title': 'True or false?',
+                    },
+                },
+                'repository': 'arvados',
+                'script_version': 'master',
+                'minimum_script_version': '570509ab4d2ef93d870fd2b1f2eab178afb1bad9',
+                'script': 'cwl-runner',
+            },
+        },
+        "name": "inputs_test.cwl",
+    }
+
+    @stubs
+    def test_inputs_empty(self, stubs):
+        exited = arvados_cwl.main(
+            ["--create-template",
+             "tests/wf/inputs_test.cwl", "tests/order/empty_order.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        stubs.api.pipeline_templates().create.assert_called_with(
+            body=JsonDiffMatcher(self.expect_template), ensure_unique_name=True)
+
+        self.assertEqual(exited, 0)
+
+    @stubs
+    def test_inputs(self, stubs):
+        exited = arvados_cwl.main(
+            ["--create-template",
+             "tests/wf/inputs_test.cwl", "tests/order/inputs_test_order.json"],
+            stubs.capture_stdout, sys.stderr, api_client=stubs.api)
+
+        expect_template = copy.deepcopy(self.expect_template)
+        params = expect_template[
+            "components"]["inputs_test.cwl"]["script_parameters"]
+        params["fileInput"]["value"] = '169f39d466a5438ac4a90e779bf750c7+53/blorp.txt'
+        params["cwl:tool"] = 'a2de777156fb700f1363b1f2e370adca+60/workflow.cwl#main'
+        params["floatInput"]["value"] = 1.234
+        params["boolInput"]["value"] = True
+
+        stubs.api.pipeline_templates().create.assert_called_with(
+            body=JsonDiffMatcher(expect_template), ensure_unique_name=True)
+        self.assertEqual(exited, 0)
diff --git a/sdk/cwl/tests/test_tq.py b/sdk/cwl/tests/test_tq.py
new file mode 100644 (file)
index 0000000..a094890
--- /dev/null
@@ -0,0 +1,58 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import functools
+import mock
+import sys
+import unittest
+import json
+import logging
+import os
+import threading
+
+from arvados_cwl.task_queue import TaskQueue
+
+def success_task():
+    pass
+
+def fail_task():
+    raise Exception("Testing error handling")
+
+class TestTaskQueue(unittest.TestCase):
+    def test_tq(self):
+        tq = TaskQueue(threading.Lock(), 2)
+        try:
+            self.assertIsNone(tq.error)
+
+            unlock = threading.Lock()
+            unlock.acquire()
+            check_done = threading.Event()
+
+            tq.add(success_task, unlock, check_done)
+            tq.add(success_task, unlock, check_done)
+            tq.add(success_task, unlock, check_done)
+            tq.add(success_task, unlock, check_done)
+        finally:
+            tq.join()
+
+        self.assertIsNone(tq.error)
+
+
+    def test_tq_error(self):
+        tq = TaskQueue(threading.Lock(), 2)
+        try:
+            self.assertIsNone(tq.error)
+
+            unlock = threading.Lock()
+            unlock.acquire()
+            check_done = threading.Event()
+
+            tq.add(success_task, unlock, check_done)
+            tq.add(success_task, unlock, check_done)
+            tq.add(fail_task, unlock, check_done)
+            tq.add(success_task, unlock, check_done)
+        finally:
+            tq.join()
+
+        self.assertIsNotNone(tq.error)
diff --git a/sdk/cwl/tests/test_urljoin.py b/sdk/cwl/tests/test_urljoin.py
new file mode 100644 (file)
index 0000000..86a053e
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import functools
+import mock
+import sys
+import unittest
+import json
+import logging
+import os
+
+import arvados
+import arvados.keep
+import arvados.collection
+import arvados_cwl
+
+from arvados_cwl.fsaccess import CollectionFetcher
+
+class TestUrljoin(unittest.TestCase):
+    def test_urljoin(self):
+        """Test path joining for keep references."""
+
+        cf = CollectionFetcher({}, None)
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/hw.py",
+                          cf.urljoin("keep:99999999999999999999999999999991+99", "hw.py"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/hw.py",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/", "hw.py"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/hw.py#main",
+                          cf.urljoin("keep:99999999999999999999999999999991+99", "hw.py#main"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/hw.py#main",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/hw.py", "#main"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/dir/hw.py#main",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/dir/hw.py", "#main"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/dir/wh.py",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/dir/hw.py", "wh.py"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/wh.py",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/dir/hw.py", "/wh.py"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/wh.py#main",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/dir/hw.py", "/wh.py#main"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/wh.py",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/hw.py#main", "wh.py"))
+
+        self.assertEqual("keep:99999999999999999999999999999992+99",
+                          cf.urljoin("keep:99999999999999999999999999999991+99", "keep:99999999999999999999999999999992+99"))
+
+        self.assertEqual("keep:99999999999999999999999999999991+99/dir/wh.py",
+                          cf.urljoin("keep:99999999999999999999999999999991+99/dir/", "wh.py"))
+
+    def test_resolver(self):
+        pass
diff --git a/sdk/cwl/tests/test_util.py b/sdk/cwl/tests/test_util.py
new file mode 100644 (file)
index 0000000..3ca02c7
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import bytes
+
+import unittest
+import mock
+import datetime
+import httplib2
+
+from arvados_cwl.util import *
+from arvados.errors import ApiError
+
+class MockDateTime(datetime.datetime):
+    @classmethod
+    def utcnow(cls):
+        return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)
+
+datetime.datetime = MockDateTime
+
+class TestUtil(unittest.TestCase):
+    def test_get_intermediate_collection_info(self):
+        name = "one"
+        current_container = {"uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
+        intermediate_output_ttl = 120
+
+        info = get_intermediate_collection_info(name, current_container, intermediate_output_ttl)
+
+        self.assertEqual(info["name"], "Intermediate collection for step one")
+        self.assertEqual(info["trash_at"], datetime.datetime(2018, 1, 1, 0, 2, 0, 0))
+        self.assertEqual(info["properties"], {"type" : "intermediate", "container" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
+
+    def test_get_current_container_success(self):
+        api = mock.MagicMock()
+        api.containers().current().execute.return_value = {"uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"}
+
+        current_container = get_current_container(api)
+
+        self.assertEqual(current_container, {"uuid" : "zzzzz-8i9sb-zzzzzzzzzzzzzzz"})
+
+    def test_get_current_container_error(self):
+        api = mock.MagicMock()
+        api.containers().current().execute.side_effect = ApiError(httplib2.Response({"status": 300}), bytes(b""))
+        logger = mock.MagicMock()
+
+        with self.assertRaises(ApiError):
+            get_current_container(api, num_retries=0, logger=logger)
+
+    def test_get_current_container_404_error(self):
+        api = mock.MagicMock()
+        api.containers().current().execute.side_effect = ApiError(httplib2.Response({"status": 404}), bytes(b""))
+        logger = mock.MagicMock()
+
+        current_container = get_current_container(api, num_retries=0, logger=logger)
+        self.assertEqual(current_container, None)
\ No newline at end of file
diff --git a/sdk/cwl/tests/testdir/a b/sdk/cwl/tests/testdir/a
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/testdir/b b/sdk/cwl/tests/testdir/b
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/testdir/c/d b/sdk/cwl/tests/testdir/c/d
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/tmp1/tmp2/tmp3/.gitkeep b/sdk/cwl/tests/tmp1/tmp2/tmp3/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/tool/blub.txt b/sdk/cwl/tests/tool/blub.txt
new file mode 100644 (file)
index 0000000..f12927b
--- /dev/null
@@ -0,0 +1 @@
+blibber blubber
diff --git a/sdk/cwl/tests/tool/submit_tool.cwl b/sdk/cwl/tests/tool/submit_tool.cwl
new file mode 100644 (file)
index 0000000..aadbd56
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a tool file for dependencies (e.g. default
+# value blub.txt) and uploading to Keep works as intended.
+
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  - class: DockerRequirement
+    dockerPull: debian:8
+inputs:
+  - id: x
+    type: File
+    default:
+      class: File
+      location: blub.txt
+    inputBinding:
+      position: 1
+outputs: []
+baseCommand: cat
diff --git a/sdk/cwl/tests/wf-defaults/default-dir1.cwl b/sdk/cwl/tests/wf-defaults/default-dir1.cwl
new file mode 100644 (file)
index 0000000..fdd56be
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      location: inp1
+  inp1:
+    type: File
+    default:
+      class: File
+      location: inp1/hello.txt
+outputs: []
+arguments: [echo, $(inputs.inp1), $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir2.cwl b/sdk/cwl/tests/wf-defaults/default-dir2.cwl
new file mode 100644 (file)
index 0000000..98931ab
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      basename: inp2
+      listing:
+        - class: File
+          basename: "hello.txt"
+          contents: "hello world"
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir3.cwl b/sdk/cwl/tests/wf-defaults/default-dir3.cwl
new file mode 100644 (file)
index 0000000..3d0fe22
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      listing:
+        - class: File
+          location: "inp1/hello.txt"
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir4.cwl b/sdk/cwl/tests/wf-defaults/default-dir4.cwl
new file mode 100644 (file)
index 0000000..8bfc5d6
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in: []
+    out: []
+    run:
+      class: CommandLineTool
+      inputs:
+        inp2:
+          type: Directory
+          default:
+            class: Directory
+            location: inp1
+      outputs: []
+      arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir5.cwl b/sdk/cwl/tests/wf-defaults/default-dir5.cwl
new file mode 100644 (file)
index 0000000..2e66b10
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in: []
+    out: []
+    run:
+      id: stepid
+      class: CommandLineTool
+      inputs:
+        inp2:
+          type: Directory
+          default:
+            class: Directory
+            location: inp1
+      outputs: []
+      arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir6.cwl b/sdk/cwl/tests/wf-defaults/default-dir6.cwl
new file mode 100644 (file)
index 0000000..f779aef
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir6a.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir6a.cwl b/sdk/cwl/tests/wf-defaults/default-dir6a.cwl
new file mode 100644 (file)
index 0000000..ccc0ceb
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      location: inp1
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir7.cwl b/sdk/cwl/tests/wf-defaults/default-dir7.cwl
new file mode 100644 (file)
index 0000000..5c74ef0
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  inp2:
+    type: Directory
+    default:
+      class: Directory
+      location: inp1
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+steps:
+  step1:
+    in:
+      inp2: inp2
+    out: []
+    run: default-dir7a.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/default-dir7a.cwl b/sdk/cwl/tests/wf-defaults/default-dir7a.cwl
new file mode 100644 (file)
index 0000000..4b71c13
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  inp2:
+    type: Directory
+outputs: []
+arguments: [echo, $(inputs.inp2)]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/inp1/hello.txt b/sdk/cwl/tests/wf-defaults/inp1/hello.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/wf-defaults/wf1.cwl b/sdk/cwl/tests/wf-defaults/wf1.cwl
new file mode 100644 (file)
index 0000000..0133c7a
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir1.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf2.cwl b/sdk/cwl/tests/wf-defaults/wf2.cwl
new file mode 100644 (file)
index 0000000..ffe8731
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir2.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf3.cwl b/sdk/cwl/tests/wf-defaults/wf3.cwl
new file mode 100644 (file)
index 0000000..0292d13
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir3.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf4.cwl b/sdk/cwl/tests/wf-defaults/wf4.cwl
new file mode 100644 (file)
index 0000000..6e562e4
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir4.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf5.cwl b/sdk/cwl/tests/wf-defaults/wf5.cwl
new file mode 100644 (file)
index 0000000..de2748c
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    in: []
+    out: []
+    run: default-dir5.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf6.cwl b/sdk/cwl/tests/wf-defaults/wf6.cwl
new file mode 100644 (file)
index 0000000..6bcf69e
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    requirements:
+      arv:RunInSingleContainer: {}
+    in: []
+    out: []
+    run: default-dir6.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf-defaults/wf7.cwl b/sdk/cwl/tests/wf-defaults/wf7.cwl
new file mode 100644 (file)
index 0000000..715f1ef
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs: []
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+steps:
+  step1:
+    requirements:
+      arv:RunInSingleContainer: {}
+    in: []
+    out: []
+    run: default-dir7.cwl
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf/check_mem.py b/sdk/cwl/tests/wf/check_mem.py
new file mode 100644 (file)
index 0000000..b4322a8
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import division
+
+import arvados
+import sys
+import os
+
+if "JOB_UUID" in os.environ:
+    requested = arvados.api().jobs().get(uuid=os.environ["JOB_UUID"]).execute()["runtime_constraints"]["min_ram_mb_per_node"]
+else:
+    requested = arvados.api().containers().current().execute()["runtime_constraints"]["ram"] // (1024*1024)
+
+print("Requested %d expected %d" % (requested, int(sys.argv[1])))
+
+exit(0 if requested == int(sys.argv[1]) else 1)
diff --git a/sdk/cwl/tests/wf/echo-subwf.cwl b/sdk/cwl/tests/wf/echo-subwf.cwl
new file mode 100644 (file)
index 0000000..d7c8037
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+requirements:
+  ResourceRequirement:
+    coresMin: 1
+
+inputs: []
+
+outputs: []
+
+steps:
+  echo_a:
+    run: echo_a.cwl
+    in: []
+    out: []
+  echo_b:
+    run: echo_b.cwl
+    in: []
+    out: []
diff --git a/sdk/cwl/tests/wf/echo-wf.cwl b/sdk/cwl/tests/wf/echo-wf.cwl
new file mode 100644 (file)
index 0000000..5cdd80d
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+requirements:
+  SubworkflowFeatureRequirement: {}
+
+inputs: []
+
+outputs: []
+
+steps:
+  echo-subwf:
+    requirements:
+      arv:RunInSingleContainer: {}
+    run: echo-subwf.cwl
+    in: []
+    out: []
diff --git a/sdk/cwl/tests/wf/echo_a.cwl b/sdk/cwl/tests/wf/echo_a.cwl
new file mode 100644 (file)
index 0000000..0a734b3
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  ResourceRequirement:
+    coresMin: 2
+    outdirMin: 1024
+inputs: []
+outputs: []
+baseCommand: echo
+arguments:
+  - "a"
diff --git a/sdk/cwl/tests/wf/echo_b.cwl b/sdk/cwl/tests/wf/echo_b.cwl
new file mode 100644 (file)
index 0000000..7a052f8
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+  ResourceRequirement:
+    coresMin: 3
+    outdirMin: 2048
+inputs: []
+outputs: []
+baseCommand: echo
+arguments:
+  - "b"
diff --git a/sdk/cwl/tests/wf/expect_arvworkflow.cwl b/sdk/cwl/tests/wf/expect_arvworkflow.cwl
new file mode 100644 (file)
index 0000000..5739ddc
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+$graph:
+- class: Workflow
+  id: '#main'
+  inputs:
+  - id: '#main/x'
+    type: string
+  outputs: []
+  steps:
+  - id: '#main/step1'
+    in:
+    - {id: '#main/step1/x', source: '#main/x'}
+    out: []
+    run: '#submit_tool.cwl'
+- baseCommand: cat
+  class: CommandLineTool
+  id: '#submit_tool.cwl'
+  inputs:
+  - id: '#submit_tool.cwl/x'
+    inputBinding: {position: 1}
+    type: string
+  outputs: []
+  requirements:
+  - {class: DockerRequirement, dockerPull: 'debian:8'}
diff --git a/sdk/cwl/tests/wf/expect_packed.cwl b/sdk/cwl/tests/wf/expect_packed.cwl
new file mode 100644 (file)
index 0000000..cb2e5ff
--- /dev/null
@@ -0,0 +1,94 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+    "$graph": [
+        {
+            "baseCommand": "cat",
+            "class": "CommandLineTool",
+            "id": "#submit_tool.cwl",
+            "inputs": [
+                {
+                    "default": {
+                        "class": "File",
+                        "location": "keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt"
+                    },
+                    "id": "#submit_tool.cwl/x",
+                    "inputBinding": {
+                        "position": 1
+                    },
+                    "type": "File"
+                }
+            ],
+            "outputs": [],
+            "requirements": [
+                {
+                    "class": "DockerRequirement",
+                    "dockerPull": "debian:8",
+                    "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
+                }
+            ]
+        },
+        {
+            "class": "Workflow",
+            "id": "#main",
+            "inputs": [
+                {
+                    "default": {
+                        "basename": "blorp.txt",
+                        "class": "File",
+                        "location": "keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt",
+                        "nameext": ".txt",
+                        "nameroot": "blorp",
+                        "size": 16
+                    },
+                    "id": "#main/x",
+                    "type": "File"
+                },
+                {
+                    "default": {
+                        "basename": "99999999999999999999999999999998+99",
+                        "class": "Directory",
+                        "location": "keep:99999999999999999999999999999998+99"
+                    },
+                    "id": "#main/y",
+                    "type": "Directory"
+                },
+                {
+                    "default": {
+                        "basename": "anonymous",
+                        "class": "Directory",
+                        "listing": [
+                            {
+                                "basename": "renamed.txt",
+                                "class": "File",
+                                "location": "keep:99999999999999999999999999999998+99/file1.txt",
+                                "nameext": ".txt",
+                                "nameroot": "renamed",
+                                "size": 0
+                            }
+                        ]
+                    },
+                    "id": "#main/z",
+                    "type": "Directory"
+                }
+            ],
+            "outputs": [],
+            "steps": [
+                {
+                    "id": "#main/step1",
+                    "in": [
+                        {
+                            "id": "#main/step1/x",
+                            "source": "#main/x"
+                        }
+                    ],
+                    "out": [],
+                    "run": "#submit_tool.cwl"
+                }
+            ]
+        }
+    ],
+    "cwlVersion": "v1.0"
+}
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf/inputs_test.cwl b/sdk/cwl/tests/wf/inputs_test.cwl
new file mode 100644 (file)
index 0000000..668f86a
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner. Used to test propagation of
+# various input types as script_parameters in pipeline templates.
+
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  - id: "#fileInput"
+    type: File
+    label: It's a file; we expect to find some characters in it.
+    doc: |
+      If there were anything further to say, it would be said here,
+      or here.
+  - id: "#boolInput"
+    type: boolean
+    label: True or false?
+  - id: "#floatInput"
+    type: float
+    label: Floats like a duck
+    default: 0.1
+  - id: "#optionalFloatInput"
+    type: ["null", float]
+outputs: []
+steps:
+  - id: step1
+    in:
+      - { id: x, source: "#fileInput" }
+    out: []
+    run: ../tool/submit_tool.cwl
diff --git a/sdk/cwl/tests/wf/listing_deep.cwl b/sdk/cwl/tests/wf/listing_deep.cwl
new file mode 100644 (file)
index 0000000..86ff985
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: "http://commonwl.org/cwltool#"
+requirements:
+  cwltool:LoadListingRequirement:
+    loadListing: deep_listing
+  InlineJavascriptRequirement: {}
+inputs:
+  d: Directory
+outputs:
+  out: stdout
+stdout: output.txt
+arguments:
+  [echo, "${if(inputs.d.listing[0].class === 'Directory' && inputs.d.listing[0].listing[0].class === 'Directory') {return 'true';} else {return 'false';}}"]
diff --git a/sdk/cwl/tests/wf/listing_none.cwl b/sdk/cwl/tests/wf/listing_none.cwl
new file mode 100644 (file)
index 0000000..8277344
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+requirements:
+  cwltool:LoadListingRequirement:
+    loadListing: no_listing
+  InlineJavascriptRequirement: {}
+inputs:
+  d: Directory
+outputs:
+  out: stdout
+stdout: output.txt
+arguments:
+  [echo, "${if(inputs.d.listing === undefined) {return 'true';} else {return 'false';}}"]
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf/listing_shallow.cwl b/sdk/cwl/tests/wf/listing_shallow.cwl
new file mode 100644 (file)
index 0000000..154a6df
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+requirements:
+  cwltool:LoadListingRequirement:
+    loadListing: shallow_listing
+  InlineJavascriptRequirement: {}
+inputs:
+  d: Directory
+outputs:
+  out: stdout
+stdout: output.txt
+arguments:
+  [echo, "${if(inputs.d.listing[0].class === 'Directory' && inputs.d.listing[0].listing === undefined) {return 'true';} else {return 'false';}}"]
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf.cwl b/sdk/cwl/tests/wf/runin-reqs-wf.cwl
new file mode 100644 (file)
index 0000000..acaebb5
--- /dev/null
@@ -0,0 +1,58 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  count:
+    type: int[]
+    default: [1, 2, 3, 4]
+  script:
+    type: File
+    default:
+      class: File
+      location: check_mem.py
+outputs:
+  out: []
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  substep:
+    in:
+      count: count
+      script: script
+    out: []
+    hints:
+      - class: arv:RunInSingleContainer
+      - class: ResourceRequirement
+        ramMin: $(inputs.count*128)
+      - class: arv:APIRequirement
+    scatter: count
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        count: int
+        script: File
+      outputs: []
+      steps:
+        sleep1:
+          in:
+            count: count
+            script: script
+          out: []
+          run:
+            class: CommandLineTool
+            id: subtool
+            inputs:
+              count:
+                type: int
+              script: File
+            outputs: []
+            arguments: [python, $(inputs.script), $(inputs.count * 128)]
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf2.cwl b/sdk/cwl/tests/wf/runin-reqs-wf2.cwl
new file mode 100644 (file)
index 0000000..5795759
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  count:
+    type: int[]
+    default: [1, 2, 3, 4]
+  script:
+    type: File
+    default:
+      class: File
+      location: check_mem.py
+outputs:
+  out: []
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  substep:
+    in:
+      count: count
+      script: script
+    out: []
+    hints:
+      - class: arv:RunInSingleContainer
+      - class: arv:APIRequirement
+    scatter: count
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        count: int
+        script: File
+      outputs: []
+      hints:
+        - class: ResourceRequirement
+          ramMin: $(inputs.count*128)
+      steps:
+        sleep1:
+          in:
+            count: count
+            script: script
+          out: []
+          run:
+            class: CommandLineTool
+            id: subtool
+            inputs:
+              count:
+                type: int
+              script: File
+            outputs: []
+            arguments: [python, $(inputs.script), $(inputs.count * 128)]
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf3.cwl b/sdk/cwl/tests/wf/runin-reqs-wf3.cwl
new file mode 100644 (file)
index 0000000..3accb32
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  count:
+    type: int[]
+    default: [1, 2, 3, 4]
+  script:
+    type: File
+    default:
+      class: File
+      location: check_mem.py
+outputs:
+  out: []
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  substep:
+    in:
+      count: count
+      script: script
+    out: []
+    hints:
+      - class: arv:RunInSingleContainer
+      - class: arv:APIRequirement
+    scatter: count
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        count: int
+        script: File
+      outputs: []
+      steps:
+        sleep1:
+          in:
+            count: count
+            script: script
+          out: []
+          run:
+            class: CommandLineTool
+            id: subtool
+            hints:
+              - class: ResourceRequirement
+                ramMin: $(inputs.count*128)
+            inputs:
+              count:
+                type: int
+              script: File
+            outputs: []
+            arguments: [python, $(inputs.script), $(inputs.count * 128)]
diff --git a/sdk/cwl/tests/wf/runin-reqs-wf4.cwl b/sdk/cwl/tests/wf/runin-reqs-wf4.cwl
new file mode 100644 (file)
index 0000000..fc06fb3
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  count:
+    type: int[]
+    default: [1, 2, 3, 4]
+  script:
+    type: File
+    default:
+      class: File
+      location: check_mem.py
+outputs:
+  out: []
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  substep:
+    in:
+      count: count
+      script: script
+    out: []
+    hints:
+      - class: arv:RunInSingleContainer
+      - class: arv:APIRequirement
+    scatter: count
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        count: int
+        script: File
+      outputs: []
+      steps:
+        sleep1:
+          in:
+            count: count
+            script: script
+          out: []
+          run:
+            class: CommandLineTool
+            id: subtool
+            hints:
+              - class: ResourceRequirement
+                ramMin: 128
+            inputs:
+              count:
+                type: int
+              script: File
+            outputs: []
+            arguments: [python, $(inputs.script), "128"]
diff --git a/sdk/cwl/tests/wf/runin-wf.cwl b/sdk/cwl/tests/wf/runin-wf.cwl
new file mode 100644 (file)
index 0000000..a192b86
--- /dev/null
@@ -0,0 +1,64 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  sleeptime:
+    type: int
+    default: 5
+  fileblub:
+    type: File
+    default:
+      class: File
+      location: keep:d7514270f356df848477718d58308cc4+94/a
+      secondaryFiles:
+        - class: File
+          location: keep:d7514270f356df848477718d58308cc4+94/b
+outputs:
+  out:
+    type: string
+    outputSource: substep/out
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  substep:
+    in:
+      sleeptime: sleeptime
+      fileblub: fileblub
+    out: [out]
+    hints:
+      - class: arv:RunInSingleContainer
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        fileblub: File
+      outputs:
+        out:
+          type: string
+          outputSource: sleep1/out
+      steps:
+        sleep1:
+          in:
+            fileblub: fileblub
+          out: [out]
+          run:
+            class: CommandLineTool
+            id: subtool
+            inputs:
+              fileblub:
+                type: File
+                inputBinding: {position: 1}
+            outputs:
+              out:
+                type: string
+                outputBinding:
+                  outputEval: "out"
+            baseCommand: cat
diff --git a/sdk/cwl/tests/wf/runin-with-ttl-wf.cwl b/sdk/cwl/tests/wf/runin-with-ttl-wf.cwl
new file mode 100644 (file)
index 0000000..713e0c0
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  fileblub:
+    type: File
+    default:
+      class: File
+      location: keep:d7514270f356df848477718d58308cc4+94/a
+      secondaryFiles:
+        - class: File
+          location: keep:d7514270f356df848477718d58308cc4+94/b
+outputs:
+  out:
+    type: string
+    outputSource: substep/out
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+hints:
+  arv:IntermediateOutput:
+    outputTTL: 60
+steps:
+  substep:
+    in:
+      fileblub: fileblub
+    out: [out]
+    hints:
+      - class: arv:RunInSingleContainer
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        fileblub: File
+      outputs:
+        out:
+          type: string
+          outputSource: cat1/out
+      steps:
+        cat1:
+          in:
+            fileblub: fileblub
+          out: [out]
+          run:
+            class: CommandLineTool
+            id: subtool
+            inputs:
+              fileblub:
+                type: File
+                inputBinding: {position: 1}
+            outputs:
+              out:
+                type: string
+                outputBinding:
+                  outputEval: "out"
+            baseCommand: cat
diff --git a/sdk/cwl/tests/wf/scatter2.cwl b/sdk/cwl/tests/wf/scatter2.cwl
new file mode 100644 (file)
index 0000000..79c3854
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+inputs:
+  sleeptime:
+    type: int[]
+    default: [5]
+  fileblub:
+    type: File
+    default:
+      class: File
+      location: keep:99999999999999999999999999999999+118/token.txt
+outputs:
+  out:
+    type: string[]
+    outputSource: scatterstep/out
+requirements:
+  SubworkflowFeatureRequirement: {}
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+  StepInputExpressionRequirement: {}
+steps:
+  scatterstep:
+    in:
+      sleeptime: sleeptime
+      fileblub: fileblub
+    out: [out]
+    scatter: sleeptime
+    hints:
+      - class: arv:RunInSingleContainer
+    run:
+      class: Workflow
+      id: mysub
+      inputs:
+        sleeptime: int
+        fileblub: File
+      outputs:
+        out:
+          type: string
+          outputSource: sleep1/out
+      steps:
+        sleep1:
+          in:
+            sleeptime: sleeptime
+            blurb:
+              valueFrom: |
+                ${
+                  return String(inputs.sleeptime) + "b";
+                }
+          out: [out]
+          run:
+            class: CommandLineTool
+            id: subtool
+            inputs:
+              sleeptime:
+                type: int
+                inputBinding: {position: 1}
+            outputs:
+              out:
+                type: string
+                outputBinding:
+                  outputEval: "out"
+            baseCommand: sleep
diff --git a/sdk/cwl/tests/wf/scatter2_subwf.cwl b/sdk/cwl/tests/wf/scatter2_subwf.cwl
new file mode 100644 (file)
index 0000000..2af1155
--- /dev/null
@@ -0,0 +1,87 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{
+  "$graph": [
+    {
+      "class": "Workflow",
+      "cwlVersion": "v1.0",
+      "hints": [],
+      "id": "#main",
+      "inputs": [
+        {
+          "id": "#main/fileblub",
+          "type": "File"
+        },
+        {
+          "id": "#main/sleeptime",
+          "type": "int"
+        }
+      ],
+      "outputs": [
+        {
+          "id": "#main/out",
+          "outputSource": "#main/sleep1/out",
+          "type": "string"
+        }
+      ],
+      "requirements": [
+        {
+          "class": "InlineJavascriptRequirement"
+        },
+        {
+          "class": "ScatterFeatureRequirement"
+        },
+        {
+          "class": "StepInputExpressionRequirement"
+        },
+        {
+          "class": "SubworkflowFeatureRequirement"
+        }
+      ],
+      "steps": [
+        {
+          "id": "#main/sleep1",
+          "in": [
+            {
+              "id": "#main/sleep1/blurb",
+              "valueFrom": "${\n  return String(inputs.sleeptime) + \"b\";\n}\n"
+            },
+            {
+              "id": "#main/sleep1/sleeptime",
+              "source": "#main/sleeptime"
+            }
+          ],
+          "out": [
+            "#main/sleep1/out"
+          ],
+          "run": {
+            "baseCommand": "sleep",
+            "class": "CommandLineTool",
+            "id": "#main/sleep1/subtool",
+            "inputs": [
+              {
+                "id": "#main/sleep1/subtool/sleeptime",
+                "inputBinding": {
+                  "position": 1
+                },
+                "type": "int"
+              }
+            ],
+            "outputs": [
+              {
+                "id": "#main/sleep1/subtool/out",
+                "outputBinding": {
+                  "outputEval": "out"
+                },
+                "type": "string"
+              }
+            ]
+          }
+        }
+      ]
+    }
+  ],
+  "cwlVersion": "v1.0"
+}
\ No newline at end of file
diff --git a/sdk/cwl/tests/wf/secret_job.cwl b/sdk/cwl/tests/wf/secret_job.cwl
new file mode 100644 (file)
index 0000000..2be74b2
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+hints:
+  "cwltool:Secrets":
+    secrets: [pw]
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entryname: example.conf
+        entry: |
+          username: user
+          password: $(inputs.pw)
+inputs:
+  pw: string
+outputs:
+  out: stdout
+stdout: hashed_example.txt
+arguments: [md5sum, example.conf]
diff --git a/sdk/cwl/tests/wf/secret_wf.cwl b/sdk/cwl/tests/wf/secret_wf.cwl
new file mode 100644 (file)
index 0000000..05d950d
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+hints:
+  "cwltool:Secrets":
+    secrets: [pw]
+  DockerRequirement:
+    dockerPull: debian:8
+inputs:
+  pw: string
+outputs:
+  out:
+    type: File
+    outputSource: step1/out
+steps:
+  step1:
+    in:
+      pw: pw
+    out: [out]
+    run: secret_job.cwl
diff --git a/sdk/cwl/tests/wf/submit_keepref_wf.cwl b/sdk/cwl/tests/wf/submit_keepref_wf.cwl
new file mode 100644 (file)
index 0000000..b34ba6a
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a workflow file for dependencies
+# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
+
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  x:
+    type: File
+    default:
+      class: File
+      location: keep:99999999999999999999999999999994+99/blorp.txt
+outputs: []
+steps:
+  step1:
+    in:
+      x: x
+    out: []
+    run: ../tool/submit_tool.cwl
diff --git a/sdk/cwl/tests/wf/submit_wf.cwl b/sdk/cwl/tests/wf/submit_wf.cwl
new file mode 100644 (file)
index 0000000..6856e54
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a workflow file for dependencies
+# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
+
+class: Workflow
+cwlVersion: v1.0
+inputs:
+  - id: x
+    type: File
+  - id: y
+    type: Directory
+  - id: z
+    type: Directory
+outputs: []
+steps:
+  - id: step1
+    in:
+      - { id: x, source: "#x" }
+    out: []
+    run: ../tool/submit_tool.cwl
diff --git a/sdk/cwl/tests/wf/submit_wf_no_reuse.cwl b/sdk/cwl/tests/wf/submit_wf_no_reuse.cwl
new file mode 100644 (file)
index 0000000..636b850
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner. Disables job/container reuse.
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+  cwltool: "http://commonwl.org/cwltool#"
+inputs:
+  - id: x
+    type: File
+  - id: y
+    type: Directory
+  - id: z
+    type: Directory
+outputs: []
+steps:
+  - id: step1
+    in:
+      - { id: x, source: "#x" }
+    out: []
+    run: ../tool/submit_tool.cwl
+hints:
+  arv:ReuseRequirement:
+    enableReuse: false
diff --git a/sdk/cwl/tests/wf/submit_wf_packed.cwl b/sdk/cwl/tests/wf/submit_wf_packed.cwl
new file mode 100644 (file)
index 0000000..83ba584
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.0
+$graph:
+- class: CommandLineTool
+  requirements:
+  - class: DockerRequirement
+    dockerPull: debian:8
+    'http://arvados.org/cwl#dockerCollectionPDH': 999999999999999999999999999999d4+99
+  inputs:
+  - id: '#submit_tool.cwl/x'
+    type: File
+    default:
+      class: File
+      location: keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt
+    inputBinding:
+      position: 1
+  outputs: []
+  baseCommand: cat
+  id: '#submit_tool.cwl'
+- class: Workflow
+  inputs:
+  - id: '#main/x'
+    type: File
+  - id: '#main/y'
+    type: Directory
+  - id: '#main/z'
+    type: Directory
+  outputs: []
+  steps:
+  - id: '#main/step1'
+    in:
+    - {id: '#main/step1/x', source: '#main/x'}
+    out: []
+    run: '#submit_tool.cwl'
+  id: '#main'
diff --git a/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl b/sdk/cwl/tests/wf/submit_wf_runner_resources.cwl
new file mode 100644 (file)
index 0000000..814cd07
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a workflow file for dependencies
+# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+hints:
+  arv:WorkflowRunnerResources:
+    ramMin: 2000
+    coresMin: 2
+    keep_cache: 512
+inputs:
+  - id: x
+    type: File
+  - id: y
+    type: Directory
+  - id: z
+    type: Directory
+outputs: []
+steps:
+  - id: step1
+    in:
+      - { id: x, source: "#x" }
+    out: []
+    run: ../tool/submit_tool.cwl
diff --git a/sdk/dev-jobs.dockerfile b/sdk/dev-jobs.dockerfile
new file mode 100644 (file)
index 0000000..f9e3707
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Dockerfile for building an arvados/jobs Docker image from local git tree.
+#
+# Intended for use by developers working on arvados-python-client or
+# arvados-cwl-runner and need to run a crunch job with a custom package
+# version.
+#
+# Use arvados/build/build-dev-docker-jobs-image.sh to build.
+#
+# (This dockerfile file must be located in the arvados/sdk/ directory because
+#  of the docker build root.)
+
+FROM debian:jessie
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ARG pythoncmd=python
+
+RUN apt-get update -q && apt-get install -qy --no-install-recommends \
+    git ${pythoncmd}-pip ${pythoncmd}-virtualenv ${pythoncmd}-dev libcurl4-gnutls-dev \
+    libgnutls28-dev nodejs ${pythoncmd}-pyasn1-modules build-essential
+
+RUN if [ "$pythoncmd" = "python3" ]; then \
+       pip3 install -U setuptools six requests ; \
+    else \
+       pip install -U setuptools six requests ; \
+    fi
+
+ARG sdk
+ARG runner
+ARG salad
+ARG cwltool
+
+ADD python/dist/$sdk /tmp/
+ADD cwl/salad_dist/$salad /tmp/
+ADD cwl/cwltool_dist/$cwltool /tmp/
+ADD cwl/dist/$runner /tmp/
+
+RUN cd /tmp/arvados-python-client-* && $pythoncmd setup.py install
+RUN if test -d /tmp/schema-salad-* ; then cd /tmp/schema-salad-* && $pythoncmd setup.py install ; fi
+RUN if test -d /tmp/cwltool-* ; then cd /tmp/cwltool-* && $pythoncmd setup.py install ; fi
+RUN cd /tmp/arvados-cwl-runner-* && $pythoncmd setup.py install
+
+# Install dependencies and set up system.
+RUN /usr/sbin/adduser --disabled-password \
+      --gecos 'Crunch execution user' crunch && \
+    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
+
+USER crunch
diff --git a/sdk/go/arvados/api_client_authorization.go b/sdk/go/arvados/api_client_authorization.go
new file mode 100644 (file)
index 0000000..17cff23
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+// APIClientAuthorization is an arvados#apiClientAuthorization resource.
+type APIClientAuthorization struct {
+       UUID      string   `json:"uuid,omitempty"`
+       APIToken  string   `json:"api_token,omitempty"`
+       ExpiresAt string   `json:"expires_at,omitempty"`
+       Scopes    []string `json:"scopes,omitempty"`
+}
+
+// APIClientAuthorizationList is an arvados#apiClientAuthorizationList resource.
+type APIClientAuthorizationList struct {
+       Items []APIClientAuthorization `json:"items"`
+}
+
+func (aca APIClientAuthorization) TokenV2() string {
+       return "v2/" + aca.UUID + "/" + aca.APIToken
+}
diff --git a/sdk/go/arvados/byte_size.go b/sdk/go/arvados/byte_size.go
new file mode 100644 (file)
index 0000000..08cc83e
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "fmt"
+       "math"
+       "strings"
+)
+
+type ByteSize int64
+
+var prefixValue = map[string]int64{
+       "":   1,
+       "K":  1000,
+       "Ki": 1 << 10,
+       "M":  1000000,
+       "Mi": 1 << 20,
+       "G":  1000000000,
+       "Gi": 1 << 30,
+       "T":  1000000000000,
+       "Ti": 1 << 40,
+       "P":  1000000000000000,
+       "Pi": 1 << 50,
+       "E":  1000000000000000000,
+       "Ei": 1 << 60,
+}
+
+func (n *ByteSize) UnmarshalJSON(data []byte) error {
+       if len(data) == 0 || data[0] != '"' {
+               var i int64
+               err := json.Unmarshal(data, &i)
+               if err != nil {
+                       return err
+               }
+               *n = ByteSize(i)
+               return nil
+       }
+       var s string
+       err := json.Unmarshal(data, &s)
+       if err != nil {
+               return err
+       }
+       split := strings.LastIndexAny(s, "0123456789.+-eE") + 1
+       if split == 0 {
+               return fmt.Errorf("invalid byte size %q", s)
+       }
+       if s[split-1] == 'E' {
+               // We accepted an E as if it started the exponent part
+               // of a json number, but if the next char isn't +, -,
+               // or digit, then the E must have meant Exa. Instead
+               // of "4.5E"+"iB" we want "4.5"+"EiB".
+               split--
+       }
+       var val json.Number
+       dec := json.NewDecoder(strings.NewReader(s[:split]))
+       dec.UseNumber()
+       err = dec.Decode(&val)
+       if err != nil {
+               return err
+       }
+       if split == len(s) {
+               return nil
+       }
+       prefix := strings.Trim(s[split:], " ")
+       if strings.HasSuffix(prefix, "B") {
+               prefix = prefix[:len(prefix)-1]
+       }
+       pval, ok := prefixValue[prefix]
+       if !ok {
+               return fmt.Errorf("invalid unit %q", strings.Trim(s[split:], " "))
+       }
+       if intval, err := val.Int64(); err == nil {
+               if pval > 1 && (intval*pval)/pval != intval {
+                       return fmt.Errorf("size %q overflows int64", s)
+               }
+               *n = ByteSize(intval * pval)
+               return nil
+       } else if floatval, err := val.Float64(); err == nil {
+               if floatval*float64(pval) > math.MaxInt64 {
+                       return fmt.Errorf("size %q overflows int64", s)
+               }
+               *n = ByteSize(int64(floatval * float64(pval)))
+               return nil
+       } else {
+               return fmt.Errorf("bug: json.Number for %q is not int64 or float64: %s", s, err)
+       }
+}
diff --git a/sdk/go/arvados/byte_size_test.go b/sdk/go/arvados/byte_size_test.go
new file mode 100644 (file)
index 0000000..7c4aff2
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ByteSizeSuite{})
+
+type ByteSizeSuite struct{}
+
+func (s *ByteSizeSuite) TestUnmarshal(c *check.C) {
+       for _, testcase := range []struct {
+               in  string
+               out int64
+       }{
+               {"0", 0},
+               {"5", 5},
+               {"5B", 5},
+               {"5 B", 5},
+               {" 4 KiB ", 4096},
+               {"0K", 0},
+               {"0Ki", 0},
+               {"0 KiB", 0},
+               {"4K", 4000},
+               {"4KB", 4000},
+               {"4Ki", 4096},
+               {"4KiB", 4096},
+               {"4MB", 4000000},
+               {"4MiB", 4194304},
+               {"4GB", 4000000000},
+               {"4 GiB", 4294967296},
+               {"4TB", 4000000000000},
+               {"4TiB", 4398046511104},
+               {"4PB", 4000000000000000},
+               {"4PiB", 4503599627370496},
+               {"4EB", 4000000000000000000},
+               {"4EiB", 4611686018427387904},
+               {"4.5EiB", 5188146770730811392},
+               {"1.5 GB", 1500000000},
+               {"1.5 GiB", 1610612736},
+               {"1.234 GiB", 1324997410}, // rounds down from 1324997410.816
+               {"1e2 KB", 100000},
+               {"20E-1 KiB", 2048},
+               {"1E0EB", 1000000000000000000},
+               {"1E-1EB", 100000000000000000},
+               {"1E-1EiB", 115292150460684704},
+               {"4.5E15 K", 4500000000000000000},
+       } {
+               var n ByteSize
+               err := yaml.Unmarshal([]byte(testcase.in+"\n"), &n)
+               c.Logf("%v => %v: %v", testcase.in, testcase.out, n)
+               c.Check(err, check.IsNil)
+               c.Check(int64(n), check.Equals, testcase.out)
+       }
+       for _, testcase := range []string{
+               "B", "K", "KB", "KiB", "4BK", "4iB", "4A", "b", "4b", "4mB", "4m", "4mib", "4KIB", "4K iB", "4Ki B", "BB", "4BB",
+               "400000 EB", // overflows int64
+               "4.11e4 EB", // ok as float64, but overflows int64
+       } {
+               var n ByteSize
+               err := yaml.Unmarshal([]byte(testcase+"\n"), &n)
+               c.Logf("%v => error: %v", n, err)
+               c.Check(err, check.NotNil)
+       }
+}
diff --git a/sdk/go/arvados/client.go b/sdk/go/arvados/client.go
new file mode 100644 (file)
index 0000000..787e01a
--- /dev/null
@@ -0,0 +1,392 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "context"
+       "crypto/tls"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "math"
+       "net/http"
+       "net/url"
+       "os"
+       "regexp"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+// A Client is an HTTP client with an API endpoint and a set of
+// Arvados credentials.
+//
+// It offers methods for accessing individual Arvados APIs, and
+// methods that implement common patterns like fetching multiple pages
+// of results using List APIs.
+type Client struct {
+       // HTTP client used to make requests. If nil,
+       // DefaultSecureClient or InsecureHTTPClient will be used.
+       Client *http.Client `json:"-"`
+
+       // Hostname (or host:port) of Arvados API server.
+       APIHost string
+
+       // User authentication token.
+       AuthToken string
+
+       // Accept unverified certificates. This works only if the
+       // Client field is nil: otherwise, it has no effect.
+       Insecure bool
+
+       // Override keep service discovery with a list of base
+       // URIs. (Currently there are no Client methods for
+       // discovering keep services so this is just a convenience for
+       // callers who use a Client to initialize an
+       // arvadosclient.ArvadosClient.)
+       KeepServiceURIs []string `json:",omitempty"`
+
+       dd *DiscoveryDocument
+
+       ctx context.Context
+}
+
+// The default http.Client used by a Client with Insecure==true and
+// Client==nil.
+var InsecureHTTPClient = &http.Client{
+       Transport: &http.Transport{
+               TLSClientConfig: &tls.Config{
+                       InsecureSkipVerify: true}},
+       Timeout: 5 * time.Minute}
+
+// The default http.Client used by a Client otherwise.
+var DefaultSecureClient = &http.Client{
+       Timeout: 5 * time.Minute}
+
+// NewClientFromEnv creates a new Client that uses the default HTTP
+// client with the API endpoint and credentials given by the
+// ARVADOS_API_* environment variables.
+func NewClientFromEnv() *Client {
+       var svcs []string
+       for _, s := range strings.Split(os.Getenv("ARVADOS_KEEP_SERVICES"), " ") {
+               if s == "" {
+                       continue
+               } else if u, err := url.Parse(s); err != nil {
+                       log.Printf("ARVADOS_KEEP_SERVICES: %q: %s", s, err)
+               } else if !u.IsAbs() {
+                       log.Printf("ARVADOS_KEEP_SERVICES: %q: not an absolute URI", s)
+               } else {
+                       svcs = append(svcs, s)
+               }
+       }
+       var insecure bool
+       if s := strings.ToLower(os.Getenv("ARVADOS_API_HOST_INSECURE")); s == "1" || s == "yes" || s == "true" {
+               insecure = true
+       }
+       return &Client{
+               APIHost:         os.Getenv("ARVADOS_API_HOST"),
+               AuthToken:       os.Getenv("ARVADOS_API_TOKEN"),
+               Insecure:        insecure,
+               KeepServiceURIs: svcs,
+       }
+}
+
+var reqIDGen = httpserver.IDGenerator{Prefix: "req-"}
+
+// Do adds Authorization and X-Request-Id headers and then calls
+// (*http.Client)Do().
+func (c *Client) Do(req *http.Request) (*http.Response, error) {
+       if c.AuthToken != "" {
+               req.Header.Add("Authorization", "OAuth2 "+c.AuthToken)
+       }
+
+       if req.Header.Get("X-Request-Id") == "" {
+               reqid, _ := c.context().Value(contextKeyRequestID).(string)
+               if reqid == "" {
+                       reqid = reqIDGen.Next()
+               }
+               if req.Header == nil {
+                       req.Header = http.Header{"X-Request-Id": {reqid}}
+               } else {
+                       req.Header.Set("X-Request-Id", reqid)
+               }
+       }
+       return c.httpClient().Do(req)
+}
+
+// DoAndDecode performs req and unmarshals the response (which must be
+// JSON) into dst. Use this instead of RequestAndDecode if you need
+// more control of the http.Request object.
+func (c *Client) DoAndDecode(dst interface{}, req *http.Request) error {
+       resp, err := c.Do(req)
+       if err != nil {
+               return err
+       }
+       defer resp.Body.Close()
+       buf, err := ioutil.ReadAll(resp.Body)
+       if err != nil {
+               return err
+       }
+       if resp.StatusCode != 200 {
+               return newTransactionError(req, resp, buf)
+       }
+       if dst == nil {
+               return nil
+       }
+       return json.Unmarshal(buf, dst)
+}
+
+// Convert an arbitrary struct to url.Values. For example,
+//
+//     Foo{Bar: []int{1,2,3}, Baz: "waz"}
+//
+// becomes
+//
+//     url.Values{`bar`:`{"a":[1,2,3]}`,`Baz`:`waz`}
+//
+// params itself is returned if it is already an url.Values.
+func anythingToValues(params interface{}) (url.Values, error) {
+       if v, ok := params.(url.Values); ok {
+               return v, nil
+       }
+       // TODO: Do this more efficiently, possibly using
+       // json.Decode/Encode, so the whole thing doesn't have to get
+       // encoded, decoded, and re-encoded.
+       j, err := json.Marshal(params)
+       if err != nil {
+               return nil, err
+       }
+       var generic map[string]interface{}
+       err = json.Unmarshal(j, &generic)
+       if err != nil {
+               return nil, err
+       }
+       urlValues := url.Values{}
+       for k, v := range generic {
+               if v, ok := v.(string); ok {
+                       urlValues.Set(k, v)
+                       continue
+               }
+               if v, ok := v.(float64); ok {
+                       // Unmarshal decodes all numbers as float64,
+                       // which can be written as 1.2345e4 in JSON,
+                       // but this form is not accepted for ints in
+                       // url params. If a number fits in an int64,
+                       // encode it as int64 rather than float64.
+                       if v, frac := math.Modf(v); frac == 0 && v <= math.MaxInt64 && v >= math.MinInt64 {
+                               urlValues.Set(k, fmt.Sprintf("%d", int64(v)))
+                               continue
+                       }
+               }
+               j, err := json.Marshal(v)
+               if err != nil {
+                       return nil, err
+               }
+               urlValues.Set(k, string(j))
+       }
+       return urlValues, nil
+}
+
+// RequestAndDecode performs an API request and unmarshals the
+// response (which must be JSON) into dst. Method and body arguments
+// are the same as for http.NewRequest(). The given path is added to
+// the server's scheme/host/port to form the request URL. The given
+// params are passed via POST form or query string.
+//
+// path must not contain a query string.
+func (c *Client) RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error {
+       if body, ok := body.(io.Closer); ok {
+               // Ensure body is closed even if we error out early
+               defer body.Close()
+       }
+       urlString := c.apiURL(path)
+       urlValues, err := anythingToValues(params)
+       if err != nil {
+               return err
+       }
+       if urlValues == nil {
+               // Nothing to send
+       } else if method == "GET" || method == "HEAD" || body != nil {
+               // Must send params in query part of URL (FIXME: what
+               // if resulting URL is too long?)
+               u, err := url.Parse(urlString)
+               if err != nil {
+                       return err
+               }
+               u.RawQuery = urlValues.Encode()
+               urlString = u.String()
+       } else {
+               body = strings.NewReader(urlValues.Encode())
+       }
+       req, err := http.NewRequest(method, urlString, body)
+       if err != nil {
+               return err
+       }
+       req.Header.Set("Content-type", "application/x-www-form-urlencoded")
+       return c.DoAndDecode(dst, req)
+}
+
+type resource interface {
+       resourceName() string
+}
+
+// UpdateBody returns an io.Reader suitable for use as an http.Request
+// Body for a create or update API call.
+func (c *Client) UpdateBody(rsc resource) io.Reader {
+       j, err := json.Marshal(rsc)
+       if err != nil {
+               // Return a reader that returns errors.
+               r, w := io.Pipe()
+               w.CloseWithError(err)
+               return r
+       }
+       v := url.Values{rsc.resourceName(): {string(j)}}
+       return bytes.NewBufferString(v.Encode())
+}
+
+type contextKey string
+
+var contextKeyRequestID contextKey = "X-Request-Id"
+
+func (c *Client) WithRequestID(reqid string) *Client {
+       cc := *c
+       cc.ctx = context.WithValue(cc.context(), contextKeyRequestID, reqid)
+       return &cc
+}
+
+func (c *Client) context() context.Context {
+       if c.ctx == nil {
+               return context.Background()
+       }
+       return c.ctx
+}
+
+func (c *Client) httpClient() *http.Client {
+       switch {
+       case c.Client != nil:
+               return c.Client
+       case c.Insecure:
+               return InsecureHTTPClient
+       default:
+               return DefaultSecureClient
+       }
+}
+
+func (c *Client) apiURL(path string) string {
+       return "https://" + c.APIHost + "/" + path
+}
+
+// DiscoveryDocument is the Arvados server's description of itself.
+type DiscoveryDocument struct {
+       BasePath                     string              `json:"basePath"`
+       DefaultCollectionReplication int                 `json:"defaultCollectionReplication"`
+       BlobSignatureTTL             int64               `json:"blobSignatureTtl"`
+       GitURL                       string              `json:"gitUrl"`
+       Schemas                      map[string]Schema   `json:"schemas"`
+       Resources                    map[string]Resource `json:"resources"`
+}
+
+type Resource struct {
+       Methods map[string]ResourceMethod `json:"methods"`
+}
+
+type ResourceMethod struct {
+       HTTPMethod string         `json:"httpMethod"`
+       Path       string         `json:"path"`
+       Response   MethodResponse `json:"response"`
+}
+
+type MethodResponse struct {
+       Ref string `json:"$ref"`
+}
+
+type Schema struct {
+       UUIDPrefix string `json:"uuidPrefix"`
+}
+
+// DiscoveryDocument returns a *DiscoveryDocument. The returned object
+// should not be modified: the same object may be returned by
+// subsequent calls.
+func (c *Client) DiscoveryDocument() (*DiscoveryDocument, error) {
+       if c.dd != nil {
+               return c.dd, nil
+       }
+       var dd DiscoveryDocument
+       err := c.RequestAndDecode(&dd, "GET", "discovery/v1/apis/arvados/v1/rest", nil, nil)
+       if err != nil {
+               return nil, err
+       }
+       c.dd = &dd
+       return c.dd, nil
+}
+
+var pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\+\d+$`)
+
+func (c *Client) modelForUUID(dd *DiscoveryDocument, uuid string) (string, error) {
+       if pdhRegexp.MatchString(uuid) {
+               return "Collection", nil
+       }
+       if len(uuid) != 27 {
+               return "", fmt.Errorf("invalid UUID: %q", uuid)
+       }
+       infix := uuid[6:11]
+       var model string
+       for m, s := range dd.Schemas {
+               if s.UUIDPrefix == infix {
+                       model = m
+                       break
+               }
+       }
+       if model == "" {
+               return "", fmt.Errorf("unrecognized type portion %q in UUID %q", infix, uuid)
+       }
+       return model, nil
+}
+
+func (c *Client) KindForUUID(uuid string) (string, error) {
+       dd, err := c.DiscoveryDocument()
+       if err != nil {
+               return "", err
+       }
+       model, err := c.modelForUUID(dd, uuid)
+       if err != nil {
+               return "", err
+       }
+       return "arvados#" + strings.ToLower(model[:1]) + model[1:], nil
+}
+
+func (c *Client) PathForUUID(method, uuid string) (string, error) {
+       dd, err := c.DiscoveryDocument()
+       if err != nil {
+               return "", err
+       }
+       model, err := c.modelForUUID(dd, uuid)
+       if err != nil {
+               return "", err
+       }
+       var resource string
+       for r, rsc := range dd.Resources {
+               if rsc.Methods["get"].Response.Ref == model {
+                       resource = r
+                       break
+               }
+       }
+       if resource == "" {
+               return "", fmt.Errorf("no resource for model: %q", model)
+       }
+       m, ok := dd.Resources[resource].Methods[method]
+       if !ok {
+               return "", fmt.Errorf("no method %q for resource %q", method, resource)
+       }
+       path := dd.BasePath + strings.Replace(m.Path, "{uuid}", uuid, -1)
+       if path[0] == '/' {
+               path = path[1:]
+       }
+       return path, nil
+}
diff --git a/sdk/go/arvados/client_test.go b/sdk/go/arvados/client_test.go
new file mode 100644 (file)
index 0000000..df93800
--- /dev/null
@@ -0,0 +1,174 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "fmt"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "sync"
+       "testing"
+       "testing/iotest"
+)
+
+type stubTransport struct {
+       Responses map[string]string
+       Requests  []http.Request
+       sync.Mutex
+}
+
+func (stub *stubTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+       stub.Lock()
+       stub.Requests = append(stub.Requests, *req)
+       stub.Unlock()
+
+       resp := &http.Response{
+               Status:     "200 OK",
+               StatusCode: 200,
+               Proto:      "HTTP/1.1",
+               ProtoMajor: 1,
+               ProtoMinor: 1,
+               Request:    req,
+       }
+       str := stub.Responses[req.URL.Path]
+       if str == "" {
+               resp.Status = "404 Not Found"
+               resp.StatusCode = 404
+               str = "{}"
+       }
+       buf := bytes.NewBufferString(str)
+       resp.Body = ioutil.NopCloser(buf)
+       resp.ContentLength = int64(buf.Len())
+       return resp, nil
+}
+
+type errorTransport struct{}
+
+func (stub *errorTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+       return nil, fmt.Errorf("something awful happened")
+}
+
+type timeoutTransport struct {
+       response []byte
+}
+
+func (stub *timeoutTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+       return &http.Response{
+               Status:     "200 OK",
+               StatusCode: 200,
+               Proto:      "HTTP/1.1",
+               ProtoMajor: 1,
+               ProtoMinor: 1,
+               Request:    req,
+               Body:       ioutil.NopCloser(iotest.TimeoutReader(bytes.NewReader(stub.response))),
+       }, nil
+}
+
+func TestCurrentUser(t *testing.T) {
+       t.Parallel()
+       stub := &stubTransport{
+               Responses: map[string]string{
+                       "/arvados/v1/users/current": `{"uuid":"zzzzz-abcde-012340123401234"}`,
+               },
+       }
+       c := &Client{
+               Client: &http.Client{
+                       Transport: stub,
+               },
+               APIHost:   "zzzzz.arvadosapi.com",
+               AuthToken: "xyzzy",
+       }
+       u, err := c.CurrentUser()
+       if err != nil {
+               t.Fatal(err)
+       }
+       if x := "zzzzz-abcde-012340123401234"; u.UUID != x {
+               t.Errorf("got uuid %q, expected %q", u.UUID, x)
+       }
+       if len(stub.Requests) < 1 {
+               t.Fatal("empty stub.Requests")
+       }
+       hdr := stub.Requests[len(stub.Requests)-1].Header
+       if hdr.Get("Authorization") != "OAuth2 xyzzy" {
+               t.Errorf("got headers %+q, expected Authorization header", hdr)
+       }
+
+       c.Client.Transport = &errorTransport{}
+       u, err = c.CurrentUser()
+       if err == nil {
+               t.Errorf("got nil error, expected something awful")
+       }
+}
+
+func TestAnythingToValues(t *testing.T) {
+       type testCase struct {
+               in interface{}
+               // ok==nil means anythingToValues should return an
+               // error, otherwise it's a func that returns true if
+               // out is correct
+               ok func(out url.Values) bool
+       }
+       for _, tc := range []testCase{
+               {
+                       in: map[string]interface{}{"foo": "bar"},
+                       ok: func(out url.Values) bool {
+                               return out.Get("foo") == "bar"
+                       },
+               },
+               {
+                       in: map[string]interface{}{"foo": 2147483647},
+                       ok: func(out url.Values) bool {
+                               return out.Get("foo") == "2147483647"
+                       },
+               },
+               {
+                       in: map[string]interface{}{"foo": 1.234},
+                       ok: func(out url.Values) bool {
+                               return out.Get("foo") == "1.234"
+                       },
+               },
+               {
+                       in: map[string]interface{}{"foo": "1.234"},
+                       ok: func(out url.Values) bool {
+                               return out.Get("foo") == "1.234"
+                       },
+               },
+               {
+                       in: map[string]interface{}{"foo": map[string]interface{}{"bar": 1.234}},
+                       ok: func(out url.Values) bool {
+                               return out.Get("foo") == `{"bar":1.234}`
+                       },
+               },
+               {
+                       in: url.Values{"foo": {"bar"}},
+                       ok: func(out url.Values) bool {
+                               return out.Get("foo") == "bar"
+                       },
+               },
+               {
+                       in: 1234,
+                       ok: nil,
+               },
+               {
+                       in: []string{"foo"},
+                       ok: nil,
+               },
+       } {
+               t.Logf("%#v", tc.in)
+               out, err := anythingToValues(tc.in)
+               switch {
+               case tc.ok == nil:
+                       if err == nil {
+                               t.Errorf("got %#v, expected error", out)
+                       }
+               case err != nil:
+                       t.Errorf("got err %#v, expected nil", err)
+               case !tc.ok(out):
+                       t.Errorf("got %#v but tc.ok() says that is wrong", out)
+               }
+       }
+}
diff --git a/sdk/go/arvados/collection.go b/sdk/go/arvados/collection.go
new file mode 100644 (file)
index 0000000..5b61300
--- /dev/null
@@ -0,0 +1,82 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bufio"
+       "fmt"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+)
+
+// Collection is an arvados#collection resource.
+type Collection struct {
+       UUID                      string     `json:"uuid,omitempty"`
+       OwnerUUID                 string     `json:"owner_uuid,omitempty"`
+       TrashAt                   *time.Time `json:"trash_at,omitempty"`
+       ManifestText              string     `json:"manifest_text"`
+       UnsignedManifestText      string     `json:"unsigned_manifest_text,omitempty"`
+       Name                      string     `json:"name,omitempty"`
+       CreatedAt                 *time.Time `json:"created_at,omitempty"`
+       ModifiedAt                *time.Time `json:"modified_at,omitempty"`
+       PortableDataHash          string     `json:"portable_data_hash,omitempty"`
+       ReplicationConfirmed      *int       `json:"replication_confirmed,omitempty"`
+       ReplicationConfirmedAt    *time.Time `json:"replication_confirmed_at,omitempty"`
+       ReplicationDesired        *int       `json:"replication_desired,omitempty"`
+       StorageClassesDesired     []string   `json:"storage_classes_desired,omitempty"`
+       StorageClassesConfirmed   []string   `json:"storage_classes_confirmed,omitempty"`
+       StorageClassesConfirmedAt *time.Time `json:"storage_classes_confirmed_at,omitempty"`
+       DeleteAt                  *time.Time `json:"delete_at,omitempty"`
+       IsTrashed                 bool       `json:"is_trashed,omitempty"`
+}
+
+func (c Collection) resourceName() string {
+       return "collection"
+}
+
+// SizedDigests returns the hash+size part of each data block
+// referenced by the collection.
+func (c *Collection) SizedDigests() ([]SizedDigest, error) {
+       manifestText := c.ManifestText
+       if manifestText == "" {
+               manifestText = c.UnsignedManifestText
+       }
+       if manifestText == "" && c.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
+               // TODO: Check more subtle forms of corruption, too
+               return nil, fmt.Errorf("manifest is missing")
+       }
+       var sds []SizedDigest
+       scanner := bufio.NewScanner(strings.NewReader(manifestText))
+       scanner.Buffer(make([]byte, 1048576), len(manifestText))
+       for scanner.Scan() {
+               line := scanner.Text()
+               tokens := strings.Split(line, " ")
+               if len(tokens) < 3 {
+                       return nil, fmt.Errorf("Invalid stream (<3 tokens): %q", line)
+               }
+               for _, token := range tokens[1:] {
+                       if !blockdigest.LocatorPattern.MatchString(token) {
+                               // FIXME: ensure it's a file token
+                               break
+                       }
+                       // FIXME: shouldn't assume 32 char hash
+                       if i := strings.IndexRune(token[33:], '+'); i >= 0 {
+                               token = token[:33+i]
+                       }
+                       sds = append(sds, SizedDigest(token))
+               }
+       }
+       return sds, scanner.Err()
+}
+
+// CollectionList is an arvados#collectionList resource.
+type CollectionList struct {
+       Items          []Collection `json:"items"`
+       ItemsAvailable int          `json:"items_available"`
+       Offset         int          `json:"offset"`
+       Limit          int          `json:"limit"`
+}
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
new file mode 100644 (file)
index 0000000..f16f98a
--- /dev/null
@@ -0,0 +1,302 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "errors"
+       "fmt"
+       "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/config"
+)
+
+const DefaultConfigFile = "/etc/arvados/config.yml"
+
+type Config struct {
+       Clusters map[string]Cluster
+}
+
+// GetConfig returns the current system config, loading it from
+// configFile if needed.
+func GetConfig(configFile string) (*Config, error) {
+       var cfg Config
+       err := config.LoadFile(&cfg, configFile)
+       return &cfg, err
+}
+
+// GetCluster returns the cluster ID and config for the given
+// cluster, or the default/only configured cluster if clusterID is "".
+func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
+       if clusterID == "" {
+               if len(sc.Clusters) == 0 {
+                       return nil, fmt.Errorf("no clusters configured")
+               } else if len(sc.Clusters) > 1 {
+                       return nil, fmt.Errorf("multiple clusters configured, cannot choose")
+               } else {
+                       for id, cc := range sc.Clusters {
+                               cc.ClusterID = id
+                               return &cc, nil
+                       }
+               }
+       }
+       if cc, ok := sc.Clusters[clusterID]; !ok {
+               return nil, fmt.Errorf("cluster %q is not configured", clusterID)
+       } else {
+               cc.ClusterID = clusterID
+               return &cc, nil
+       }
+}
+
+type RequestLimits struct {
+       MaxItemsPerResponse            int
+       MultiClusterRequestConcurrency int
+}
+
+type Cluster struct {
+       ClusterID          string `json:"-"`
+       ManagementToken    string
+       NodeProfiles       map[string]NodeProfile
+       InstanceTypes      InstanceTypeMap
+       CloudVMs           CloudVMs
+       Dispatch           Dispatch
+       HTTPRequestTimeout Duration
+       RemoteClusters     map[string]RemoteCluster
+       PostgreSQL         PostgreSQL
+       RequestLimits      RequestLimits
+       Logging            Logging
+}
+
+type Logging struct {
+       Level  string
+       Format string
+}
+
+type PostgreSQL struct {
+       Connection     PostgreSQLConnection
+       ConnectionPool int
+}
+
+type PostgreSQLConnection map[string]string
+
+type RemoteCluster struct {
+       // API endpoint host or host:port; default is {id}.arvadosapi.com
+       Host string
+       // Perform a proxy request when a local client requests an
+       // object belonging to this remote.
+       Proxy bool
+       // Scheme, default "https". Can be set to "http" for testing.
+       Scheme string
+       // Disable TLS verify. Can be set to true for testing.
+       Insecure bool
+}
+
+type InstanceType struct {
+       Name            string
+       ProviderType    string
+       VCPUs           int
+       RAM             ByteSize
+       Scratch         ByteSize
+       IncludedScratch ByteSize
+       AddedScratch    ByteSize
+       Price           float64
+       Preemptible     bool
+}
+
+type Dispatch struct {
+       // PEM encoded SSH key (RSA, DSA, or ECDSA) able to log in to
+       // cloud VMs.
+       PrivateKey string
+
+       // Max time for workers to come up before abandoning stale
+       // locks from previous run
+       StaleLockTimeout Duration
+
+       // Interval between queue polls
+       PollInterval Duration
+
+       // Interval between probes to each worker
+       ProbeInterval Duration
+
+       // Maximum total worker probes per second
+       MaxProbesPerSecond int
+}
+
+type CloudVMs struct {
+       // Shell command that exits zero IFF the VM is fully booted
+       // and ready to run containers, e.g., "mount | grep
+       // /encrypted-tmp"
+       BootProbeCommand string
+
+       // Listening port (name or number) of SSH servers on worker
+       // VMs
+       SSHPort string
+
+       SyncInterval Duration
+
+       // Maximum idle time before automatic shutdown
+       TimeoutIdle Duration
+
+       // Maximum booting time before automatic shutdown
+       TimeoutBooting Duration
+
+       // Maximum time with no successful probes before automatic shutdown
+       TimeoutProbe Duration
+
+       // Time after shutdown to retry shutdown
+       TimeoutShutdown Duration
+
+       ImageID string
+
+       Driver           string
+       DriverParameters json.RawMessage
+}
+
+type InstanceTypeMap map[string]InstanceType
+
+var errDuplicateInstanceTypeName = errors.New("duplicate instance type name")
+
+// UnmarshalJSON handles old config files that provide an array of
+// instance types instead of a hash.
+func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
+       if len(data) > 0 && data[0] == '[' {
+               var arr []InstanceType
+               err := json.Unmarshal(data, &arr)
+               if err != nil {
+                       return err
+               }
+               if len(arr) == 0 {
+                       *it = nil
+                       return nil
+               }
+               *it = make(map[string]InstanceType, len(arr))
+               for _, t := range arr {
+                       if _, ok := (*it)[t.Name]; ok {
+                               return errDuplicateInstanceTypeName
+                       }
+                       if t.ProviderType == "" {
+                               t.ProviderType = t.Name
+                       }
+                       if t.Scratch == 0 {
+                               t.Scratch = t.IncludedScratch + t.AddedScratch
+                       } else if t.AddedScratch == 0 {
+                               t.AddedScratch = t.Scratch - t.IncludedScratch
+                       } else if t.IncludedScratch == 0 {
+                               t.IncludedScratch = t.Scratch - t.AddedScratch
+                       }
+
+                       if t.Scratch != (t.IncludedScratch + t.AddedScratch) {
+                               return fmt.Errorf("%v: Scratch != (IncludedScratch + AddedScratch)", t.Name)
+                       }
+                       (*it)[t.Name] = t
+               }
+               return nil
+       }
+       var hash map[string]InstanceType
+       err := json.Unmarshal(data, &hash)
+       if err != nil {
+               return err
+       }
+       // Fill in Name field (and ProviderType field, if not
+       // specified) using hash key.
+       *it = InstanceTypeMap(hash)
+       for name, t := range *it {
+               t.Name = name
+               if t.ProviderType == "" {
+                       t.ProviderType = name
+               }
+               (*it)[name] = t
+       }
+       return nil
+}
+
+// GetNodeProfile returns a NodeProfile for the given hostname. An
+// error is returned if the appropriate configuration can't be
+// determined (e.g., this does not appear to be a system node). If
+// node is empty, use the OS-reported hostname.
+func (cc *Cluster) GetNodeProfile(node string) (*NodeProfile, error) {
+       if node == "" {
+               hostname, err := os.Hostname()
+               if err != nil {
+                       return nil, err
+               }
+               node = hostname
+       }
+       if cfg, ok := cc.NodeProfiles[node]; ok {
+               return &cfg, nil
+       }
+       // If node is not listed, but "*" gives a default system node
+       // config, use the default config.
+       if cfg, ok := cc.NodeProfiles["*"]; ok {
+               return &cfg, nil
+       }
+       return nil, fmt.Errorf("config does not provision host %q as a system node", node)
+}
+
+type NodeProfile struct {
+       Controller    SystemServiceInstance `json:"arvados-controller"`
+       Health        SystemServiceInstance `json:"arvados-health"`
+       Keepbalance   SystemServiceInstance `json:"keep-balance"`
+       Keepproxy     SystemServiceInstance `json:"keepproxy"`
+       Keepstore     SystemServiceInstance `json:"keepstore"`
+       Keepweb       SystemServiceInstance `json:"keep-web"`
+       Nodemanager   SystemServiceInstance `json:"arvados-node-manager"`
+       DispatchCloud SystemServiceInstance `json:"arvados-dispatch-cloud"`
+       RailsAPI      SystemServiceInstance `json:"arvados-api-server"`
+       Websocket     SystemServiceInstance `json:"arvados-ws"`
+       Workbench     SystemServiceInstance `json:"arvados-workbench"`
+}
+
+type ServiceName string
+
+const (
+       ServiceNameRailsAPI      ServiceName = "arvados-api-server"
+       ServiceNameController    ServiceName = "arvados-controller"
+       ServiceNameDispatchCloud ServiceName = "arvados-dispatch-cloud"
+       ServiceNameNodemanager   ServiceName = "arvados-node-manager"
+       ServiceNameWorkbench     ServiceName = "arvados-workbench"
+       ServiceNameWebsocket     ServiceName = "arvados-ws"
+       ServiceNameKeepbalance   ServiceName = "keep-balance"
+       ServiceNameKeepweb       ServiceName = "keep-web"
+       ServiceNameKeepproxy     ServiceName = "keepproxy"
+       ServiceNameKeepstore     ServiceName = "keepstore"
+)
+
+// ServicePorts returns the configured listening address (or "" if
+// disabled) for each service on the node.
+func (np *NodeProfile) ServicePorts() map[ServiceName]string {
+       return map[ServiceName]string{
+               ServiceNameRailsAPI:      np.RailsAPI.Listen,
+               ServiceNameController:    np.Controller.Listen,
+               ServiceNameDispatchCloud: np.DispatchCloud.Listen,
+               ServiceNameNodemanager:   np.Nodemanager.Listen,
+               ServiceNameWorkbench:     np.Workbench.Listen,
+               ServiceNameWebsocket:     np.Websocket.Listen,
+               ServiceNameKeepbalance:   np.Keepbalance.Listen,
+               ServiceNameKeepweb:       np.Keepweb.Listen,
+               ServiceNameKeepproxy:     np.Keepproxy.Listen,
+               ServiceNameKeepstore:     np.Keepstore.Listen,
+       }
+}
+
+func (h RequestLimits) GetMultiClusterRequestConcurrency() int {
+       if h.MultiClusterRequestConcurrency == 0 {
+               return 4
+       }
+       return h.MultiClusterRequestConcurrency
+}
+
+func (h RequestLimits) GetMaxItemsPerResponse() int {
+       if h.MaxItemsPerResponse == 0 {
+               return 1000
+       }
+       return h.MaxItemsPerResponse
+}
+
+type SystemServiceInstance struct {
+       Listen   string
+       TLS      bool
+       Insecure bool
+}
diff --git a/sdk/go/arvados/config_test.go b/sdk/go/arvados/config_test.go
new file mode 100644 (file)
index 0000000..59c7432
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ConfigSuite{})
+
+type ConfigSuite struct{}
+
+func (s *ConfigSuite) TestInstanceTypesAsArray(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte("InstanceTypes:\n- Name: foo\n"), &cluster)
+       c.Check(len(cluster.InstanceTypes), check.Equals, 1)
+       c.Check(cluster.InstanceTypes["foo"].Name, check.Equals, "foo")
+}
+
+func (s *ConfigSuite) TestInstanceTypesAsHash(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte("InstanceTypes:\n  foo:\n    ProviderType: bar\n"), &cluster)
+       c.Check(len(cluster.InstanceTypes), check.Equals, 1)
+       c.Check(cluster.InstanceTypes["foo"].Name, check.Equals, "foo")
+       c.Check(cluster.InstanceTypes["foo"].ProviderType, check.Equals, "bar")
+}
+
+func (s *ConfigSuite) TestInstanceTypeSize(c *check.C) {
+       var it InstanceType
+       err := yaml.Unmarshal([]byte("Name: foo\nScratch: 4GB\nRAM: 4GiB\n"), &it)
+       c.Check(err, check.IsNil)
+       c.Check(int64(it.Scratch), check.Equals, int64(4000000000))
+       c.Check(int64(it.RAM), check.Equals, int64(4294967296))
+}
diff --git a/sdk/go/arvados/container.go b/sdk/go/arvados/container.go
new file mode 100644 (file)
index 0000000..fb09548
--- /dev/null
@@ -0,0 +1,123 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+// Container is an arvados#container resource.
+type Container struct {
+       UUID                 string                 `json:"uuid"`
+       CreatedAt            time.Time              `json:"created_at"`
+       Command              []string               `json:"command"`
+       ContainerImage       string                 `json:"container_image"`
+       Cwd                  string                 `json:"cwd"`
+       Environment          map[string]string      `json:"environment"`
+       LockedByUUID         string                 `json:"locked_by_uuid"`
+       Mounts               map[string]Mount       `json:"mounts"`
+       Output               string                 `json:"output"`
+       OutputPath           string                 `json:"output_path"`
+       Priority             int64                  `json:"priority"`
+       RuntimeConstraints   RuntimeConstraints     `json:"runtime_constraints"`
+       State                ContainerState         `json:"state"`
+       SchedulingParameters SchedulingParameters   `json:"scheduling_parameters"`
+       ExitCode             int                    `json:"exit_code"`
+       RuntimeStatus        map[string]interface{} `json:"runtime_status"`
+}
+
+// Container is an arvados#container resource.
+type ContainerRequest struct {
+       UUID                    string                 `json:"uuid"`
+       OwnerUUID               string                 `json:"owner_uuid"`
+       CreatedAt               time.Time              `json:"created_at"`
+       ModifiedByClientUUID    string                 `json:"modified_by_client_uuid"`
+       ModifiedByUserUUID      string                 `json:"modified_by_user_uuid"`
+       ModifiedAt              time.Time              `json:"modified_at"`
+       Href                    string                 `json:"href"`
+       Kind                    string                 `json:"kind"`
+       Etag                    string                 `json:"etag"`
+       Name                    string                 `json:"name"`
+       Description             string                 `json:"description"`
+       Properties              map[string]interface{} `json:"properties"`
+       State                   ContainerRequestState  `json:"state"`
+       RequestingContainerUUID string                 `json:"requesting_container_uuid"`
+       ContainerUUID           string                 `json:"container_uuid"`
+       ContainerCountMax       int                    `json:"container_count_max"`
+       Mounts                  map[string]Mount       `json:"mounts"`
+       RuntimeConstraints      RuntimeConstraints     `json:"runtime_constraints"`
+       SchedulingParameters    SchedulingParameters   `json:"scheduling_parameters"`
+       ContainerImage          string                 `json:"container_image"`
+       Environment             map[string]string      `json:"environment"`
+       Cwd                     string                 `json:"cwd"`
+       Command                 []string               `json:"command"`
+       OutputPath              string                 `json:"output_path"`
+       OutputName              string                 `json:"output_name"`
+       OutputTTL               int                    `json:"output_ttl"`
+       Priority                int                    `json:"priority"`
+       UseExisting             bool                   `json:"use_existing"`
+       LogUUID                 string                 `json:"log_uuid"`
+       OutputUUID              string                 `json:"output_uuid"`
+       RuntimeToken            string                 `json:"runtime_token"`
+}
+
+// Mount is special behavior to attach to a filesystem path or device.
+type Mount struct {
+       Kind              string      `json:"kind"`
+       Writable          bool        `json:"writable"`
+       PortableDataHash  string      `json:"portable_data_hash"`
+       UUID              string      `json:"uuid"`
+       DeviceType        string      `json:"device_type"`
+       Path              string      `json:"path"`
+       Content           interface{} `json:"content"`
+       ExcludeFromOutput bool        `json:"exclude_from_output"`
+       Capacity          int64       `json:"capacity"`
+       Commit            string      `json:"commit"`          // only if kind=="git_tree"
+       RepositoryName    string      `json:"repository_name"` // only if kind=="git_tree"
+       GitURL            string      `json:"git_url"`         // only if kind=="git_tree"
+}
+
+// RuntimeConstraints specify a container's compute resources (RAM,
+// CPU) and network connectivity.
+type RuntimeConstraints struct {
+       API          *bool
+       RAM          int64 `json:"ram"`
+       VCPUs        int   `json:"vcpus"`
+       KeepCacheRAM int64 `json:"keep_cache_ram"`
+}
+
+// SchedulingParameters specify a container's scheduling parameters
+// such as Partitions
+type SchedulingParameters struct {
+       Partitions  []string `json:"partitions"`
+       Preemptible bool     `json:"preemptible"`
+       MaxRunTime  int      `json:"max_run_time"`
+}
+
+// ContainerList is an arvados#containerList resource.
+type ContainerList struct {
+       Items          []Container `json:"items"`
+       ItemsAvailable int         `json:"items_available"`
+       Offset         int         `json:"offset"`
+       Limit          int         `json:"limit"`
+}
+
+// ContainerState is a string corresponding to a valid Container state.
+type ContainerState string
+
+const (
+       ContainerStateQueued    = ContainerState("Queued")
+       ContainerStateLocked    = ContainerState("Locked")
+       ContainerStateRunning   = ContainerState("Running")
+       ContainerStateComplete  = ContainerState("Complete")
+       ContainerStateCancelled = ContainerState("Cancelled")
+)
+
+// ContainerState is a string corresponding to a valid Container state.
+type ContainerRequestState string
+
+const (
+       ContainerRequestStateUncomitted = ContainerState("Uncommitted")
+       ContainerRequestStateCommitted  = ContainerState("Committed")
+       ContainerRequestStateFinal      = ContainerState("Final")
+)
diff --git a/sdk/go/arvados/contextgroup.go b/sdk/go/arvados/contextgroup.go
new file mode 100644 (file)
index 0000000..fa0de24
--- /dev/null
@@ -0,0 +1,95 @@
+package arvados
+
+import (
+       "context"
+       "sync"
+)
+
+// A contextGroup is a context-aware variation on sync.WaitGroup. It
+// provides a child context for the added funcs to use, so they can
+// exit early if another added func returns an error. Its Wait()
+// method returns the first error returned by any added func.
+//
+// Example:
+//
+//     err := errors.New("oops")
+//     cg := newContextGroup()
+//     defer cg.Cancel()
+//     cg.Go(func() error {
+//             someFuncWithContext(cg.Context())
+//             return nil
+//     })
+//     cg.Go(func() error {
+//             return err // this cancels cg.Context()
+//     })
+//     return cg.Wait() // returns err after both goroutines have ended
+type contextGroup struct {
+       ctx    context.Context
+       cancel context.CancelFunc
+       wg     sync.WaitGroup
+       err    error
+       mtx    sync.Mutex
+}
+
+// newContextGroup returns a new contextGroup. The caller must
+// eventually call the Cancel() method of the returned contextGroup.
+func newContextGroup(ctx context.Context) *contextGroup {
+       ctx, cancel := context.WithCancel(ctx)
+       return &contextGroup{
+               ctx:    ctx,
+               cancel: cancel,
+       }
+}
+
+// Cancel cancels the context group.
+func (cg *contextGroup) Cancel() {
+       cg.cancel()
+}
+
+// Context returns a context.Context which will be canceled when all
+// funcs have succeeded or one has failed.
+func (cg *contextGroup) Context() context.Context {
+       return cg.ctx
+}
+
+// Go calls f in a new goroutine. If f returns an error, the
+// contextGroup is canceled.
+//
+// If f notices cg.Context() is done, it should abandon further work
+// and return. In this case, f's return value will be ignored.
+func (cg *contextGroup) Go(f func() error) {
+       cg.mtx.Lock()
+       defer cg.mtx.Unlock()
+       if cg.err != nil {
+               return
+       }
+       cg.wg.Add(1)
+       go func() {
+               defer cg.wg.Done()
+               err := f()
+               cg.mtx.Lock()
+               defer cg.mtx.Unlock()
+               if err != nil && cg.err == nil {
+                       cg.err = err
+                       cg.cancel()
+               }
+       }()
+}
+
+// Wait waits for all added funcs to return, and returns the first
+// non-nil error.
+//
+// If the parent context is canceled before a func returns an error,
+// Wait returns the parent context's Err().
+//
+// Wait returns nil if all funcs return nil before the parent context
+// is canceled.
+func (cg *contextGroup) Wait() error {
+       cg.wg.Wait()
+       cg.mtx.Lock()
+       defer cg.mtx.Unlock()
+       if cg.err != nil {
+               return cg.err
+       }
+       return cg.ctx.Err()
+}
diff --git a/sdk/go/arvados/doc.go b/sdk/go/arvados/doc.go
new file mode 100644 (file)
index 0000000..3761ffc
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Package arvados is a client library for Arvados.
+//
+// The API is not stable: it should be considered experimental
+// pre-release.
+//
+// The intent is to offer model types and API call functions that can
+// be generated automatically (or at least mostly automatically) from
+// a discovery document. For the time being, there is a manually
+// generated subset of those types and API calls with (approximately)
+// the right signatures, plus client/authentication support and some
+// convenience functions.
+package arvados
diff --git a/sdk/go/arvados/duration.go b/sdk/go/arvados/duration.go
new file mode 100644 (file)
index 0000000..25eed01
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "fmt"
+       "time"
+)
+
+// Duration is time.Duration but looks like "12s" in JSON, rather than
+// a number of nanoseconds.
+type Duration time.Duration
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (d *Duration) UnmarshalJSON(data []byte) error {
+       if data[0] == '"' {
+               return d.Set(string(data[1 : len(data)-1]))
+       }
+       return fmt.Errorf("duration must be given as a string like \"600s\" or \"1h30m\"")
+}
+
+// MarshalJSON implements json.Marshaler.
+func (d *Duration) MarshalJSON() ([]byte, error) {
+       return json.Marshal(d.String())
+}
+
+// String implements fmt.Stringer.
+func (d Duration) String() string {
+       return time.Duration(d).String()
+}
+
+// Duration returns a time.Duration.
+func (d Duration) Duration() time.Duration {
+       return time.Duration(d)
+}
+
+// Set implements the flag.Value interface and sets the duration value by using time.ParseDuration to parse the string.
+func (d *Duration) Set(s string) error {
+       dur, err := time.ParseDuration(s)
+       *d = Duration(dur)
+       return err
+}
diff --git a/sdk/go/arvados/error.go b/sdk/go/arvados/error.go
new file mode 100644 (file)
index 0000000..9a04855
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "net/url"
+       "strings"
+)
+
+type TransactionError struct {
+       Method     string
+       URL        url.URL
+       StatusCode int
+       Status     string
+       Errors     []string
+}
+
+func (e TransactionError) Error() (s string) {
+       s = fmt.Sprintf("request failed: %s", e.URL.String())
+       if e.Status != "" {
+               s = s + ": " + e.Status
+       }
+       if len(e.Errors) > 0 {
+               s = s + ": " + strings.Join(e.Errors, "; ")
+       }
+       return
+}
+
+func newTransactionError(req *http.Request, resp *http.Response, buf []byte) *TransactionError {
+       var e TransactionError
+       if json.Unmarshal(buf, &e) != nil {
+               // No JSON-formatted error response
+               e.Errors = nil
+       }
+       e.Method = req.Method
+       e.URL = *req.URL
+       if resp != nil {
+               e.Status = resp.Status
+               e.StatusCode = resp.StatusCode
+       }
+       return &e
+}
diff --git a/sdk/go/arvados/fs_backend.go b/sdk/go/arvados/fs_backend.go
new file mode 100644 (file)
index 0000000..9ae0fc3
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "io"
+
+type fsBackend interface {
+       keepClient
+       apiClient
+}
+
+// Ideally *Client would do everything; meanwhile keepBackend
+// implements fsBackend by merging the two kinds of arvados client.
+type keepBackend struct {
+       keepClient
+       apiClient
+}
+
+type keepClient interface {
+       ReadAt(locator string, p []byte, off int) (int, error)
+       PutB(p []byte) (string, int, error)
+       LocalLocator(locator string) (string, error)
+}
+
+type apiClient interface {
+       RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error
+       UpdateBody(rsc resource) io.Reader
+}
diff --git a/sdk/go/arvados/fs_base.go b/sdk/go/arvados/fs_base.go
new file mode 100644 (file)
index 0000000..3058a76
--- /dev/null
@@ -0,0 +1,595 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "errors"
+       "fmt"
+       "io"
+       "log"
+       "net/http"
+       "os"
+       "path"
+       "strings"
+       "sync"
+       "time"
+)
+
+var (
+       ErrReadOnlyFile      = errors.New("read-only file")
+       ErrNegativeOffset    = errors.New("cannot seek to negative offset")
+       ErrFileExists        = errors.New("file exists")
+       ErrInvalidOperation  = errors.New("invalid operation")
+       ErrInvalidArgument   = errors.New("invalid argument")
+       ErrDirectoryNotEmpty = errors.New("directory not empty")
+       ErrWriteOnlyMode     = errors.New("file is O_WRONLY")
+       ErrSyncNotSupported  = errors.New("O_SYNC flag is not supported")
+       ErrIsDirectory       = errors.New("cannot rename file to overwrite existing directory")
+       ErrNotADirectory     = errors.New("not a directory")
+       ErrPermission        = os.ErrPermission
+)
+
+// A File is an *os.File-like interface for reading and writing files
+// in a FileSystem.
+type File interface {
+       io.Reader
+       io.Writer
+       io.Closer
+       io.Seeker
+       Size() int64
+       Readdir(int) ([]os.FileInfo, error)
+       Stat() (os.FileInfo, error)
+       Truncate(int64) error
+       Sync() error
+}
+
+// A FileSystem is an http.Filesystem plus Stat() and support for
+// opening writable files. All methods are safe to call from multiple
+// goroutines.
+type FileSystem interface {
+       http.FileSystem
+       fsBackend
+
+       rootnode() inode
+
+       // filesystem-wide lock: used by Rename() to prevent deadlock
+       // while locking multiple inodes.
+       locker() sync.Locker
+
+       // create a new node with nil parent.
+       newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error)
+
+       // analogous to os.Stat()
+       Stat(name string) (os.FileInfo, error)
+
+       // analogous to os.Create(): create/truncate a file and open it O_RDWR.
+       Create(name string) (File, error)
+
+       // Like os.OpenFile(): create or open a file or directory.
+       //
+       // If flag&os.O_EXCL==0, it opens an existing file or
+       // directory if one exists. If flag&os.O_CREATE!=0, it creates
+       // a new empty file or directory if one does not already
+       // exist.
+       //
+       // When creating a new item, perm&os.ModeDir determines
+       // whether it is a file or a directory.
+       //
+       // A file can be opened multiple times and used concurrently
+       // from multiple goroutines. However, each File object should
+       // be used by only one goroutine at a time.
+       OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+       Mkdir(name string, perm os.FileMode) error
+       Remove(name string) error
+       RemoveAll(name string) error
+       Rename(oldname, newname string) error
+       Sync() error
+}
+
+type inode interface {
+       SetParent(parent inode, name string)
+       Parent() inode
+       FS() FileSystem
+       Read([]byte, filenodePtr) (int, filenodePtr, error)
+       Write([]byte, filenodePtr) (int, filenodePtr, error)
+       Truncate(int64) error
+       IsDir() bool
+       Readdir() ([]os.FileInfo, error)
+       Size() int64
+       FileInfo() os.FileInfo
+
+       // Child() performs lookups and updates of named child nodes.
+       //
+       // (The term "child" here is used strictly. This means name is
+       // not "." or "..", and name does not contain "/".)
+       //
+       // If replace is non-nil, Child calls replace(x) where x is
+       // the current child inode with the given name. If possible,
+       // the child inode is replaced with the one returned by
+       // replace().
+       //
+       // If replace(x) returns an inode (besides x or nil) that is
+       // subsequently returned by Child(), then Child()'s caller
+       // must ensure the new child's name and parent are set/updated
+       // to Child()'s name argument and its receiver respectively.
+       // This is not necessarily done before replace(x) returns, but
+       // it must be done before Child()'s caller releases the
+       // parent's lock.
+       //
+       // Nil represents "no child". replace(nil) signifies that no
+       // child with this name exists yet. If replace() returns nil,
+       // the existing child should be deleted if possible.
+       //
+       // An implementation of Child() is permitted to ignore
+       // replace() or its return value. For example, a regular file
+       // inode does not have children, so Child() always returns
+       // nil.
+       //
+       // Child() returns the child, if any, with the given name: if
+       // a child was added or changed, the new child is returned.
+       //
+       // Caller must have lock (or rlock if replace is nil).
+       Child(name string, replace func(inode) (inode, error)) (inode, error)
+
+       sync.Locker
+       RLock()
+       RUnlock()
+}
+
+type fileinfo struct {
+       name    string
+       mode    os.FileMode
+       size    int64
+       modTime time.Time
+}
+
+// Name implements os.FileInfo.
+func (fi fileinfo) Name() string {
+       return fi.name
+}
+
+// ModTime implements os.FileInfo.
+func (fi fileinfo) ModTime() time.Time {
+       return fi.modTime
+}
+
+// Mode implements os.FileInfo.
+func (fi fileinfo) Mode() os.FileMode {
+       return fi.mode
+}
+
+// IsDir implements os.FileInfo.
+func (fi fileinfo) IsDir() bool {
+       return fi.mode&os.ModeDir != 0
+}
+
+// Size implements os.FileInfo.
+func (fi fileinfo) Size() int64 {
+       return fi.size
+}
+
+// Sys implements os.FileInfo.
+func (fi fileinfo) Sys() interface{} {
+       return nil
+}
+
+type nullnode struct{}
+
+func (*nullnode) Mkdir(string, os.FileMode) error {
+       return ErrInvalidOperation
+}
+
+func (*nullnode) Read([]byte, filenodePtr) (int, filenodePtr, error) {
+       return 0, filenodePtr{}, ErrInvalidOperation
+}
+
+func (*nullnode) Write([]byte, filenodePtr) (int, filenodePtr, error) {
+       return 0, filenodePtr{}, ErrInvalidOperation
+}
+
+func (*nullnode) Truncate(int64) error {
+       return ErrInvalidOperation
+}
+
+func (*nullnode) FileInfo() os.FileInfo {
+       return fileinfo{}
+}
+
+func (*nullnode) IsDir() bool {
+       return false
+}
+
+func (*nullnode) Readdir() ([]os.FileInfo, error) {
+       return nil, ErrInvalidOperation
+}
+
+func (*nullnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       return nil, ErrNotADirectory
+}
+
+type treenode struct {
+       fs       FileSystem
+       parent   inode
+       inodes   map[string]inode
+       fileinfo fileinfo
+       sync.RWMutex
+       nullnode
+}
+
+func (n *treenode) FS() FileSystem {
+       return n.fs
+}
+
+func (n *treenode) SetParent(p inode, name string) {
+       n.Lock()
+       defer n.Unlock()
+       n.parent = p
+       n.fileinfo.name = name
+}
+
+func (n *treenode) Parent() inode {
+       n.RLock()
+       defer n.RUnlock()
+       return n.parent
+}
+
+func (n *treenode) IsDir() bool {
+       return true
+}
+
+func (n *treenode) Child(name string, replace func(inode) (inode, error)) (child inode, err error) {
+       child = n.inodes[name]
+       if name == "" || name == "." || name == ".." {
+               err = ErrInvalidArgument
+               return
+       }
+       if replace == nil {
+               return
+       }
+       newchild, err := replace(child)
+       if err != nil {
+               return
+       }
+       if newchild == nil {
+               delete(n.inodes, name)
+       } else if newchild != child {
+               n.inodes[name] = newchild
+               n.fileinfo.modTime = time.Now()
+               child = newchild
+       }
+       return
+}
+
+func (n *treenode) Size() int64 {
+       return n.FileInfo().Size()
+}
+
+func (n *treenode) FileInfo() os.FileInfo {
+       n.Lock()
+       defer n.Unlock()
+       n.fileinfo.size = int64(len(n.inodes))
+       return n.fileinfo
+}
+
+func (n *treenode) Readdir() (fi []os.FileInfo, err error) {
+       n.RLock()
+       defer n.RUnlock()
+       fi = make([]os.FileInfo, 0, len(n.inodes))
+       for _, inode := range n.inodes {
+               fi = append(fi, inode.FileInfo())
+       }
+       return
+}
+
+type fileSystem struct {
+       root inode
+       fsBackend
+       mutex sync.Mutex
+}
+
+func (fs *fileSystem) rootnode() inode {
+       return fs.root
+}
+
+func (fs *fileSystem) locker() sync.Locker {
+       return &fs.mutex
+}
+
+// OpenFile is analogous to os.OpenFile().
+func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+       return fs.openFile(name, flag, perm)
+}
+
+func (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*filehandle, error) {
+       if flag&os.O_SYNC != 0 {
+               return nil, ErrSyncNotSupported
+       }
+       dirname, name := path.Split(name)
+       parent, err := rlookup(fs.root, dirname)
+       if err != nil {
+               return nil, err
+       }
+       var readable, writable bool
+       switch flag & (os.O_RDWR | os.O_RDONLY | os.O_WRONLY) {
+       case os.O_RDWR:
+               readable = true
+               writable = true
+       case os.O_RDONLY:
+               readable = true
+       case os.O_WRONLY:
+               writable = true
+       default:
+               return nil, fmt.Errorf("invalid flags 0x%x", flag)
+       }
+       if !writable && parent.IsDir() {
+               // A directory can be opened via "foo/", "foo/.", or
+               // "foo/..".
+               switch name {
+               case ".", "":
+                       return &filehandle{inode: parent}, nil
+               case "..":
+                       return &filehandle{inode: parent.Parent()}, nil
+               }
+       }
+       createMode := flag&os.O_CREATE != 0
+       if createMode {
+               parent.Lock()
+               defer parent.Unlock()
+       } else {
+               parent.RLock()
+               defer parent.RUnlock()
+       }
+       n, err := parent.Child(name, nil)
+       if err != nil {
+               return nil, err
+       } else if n == nil {
+               if !createMode {
+                       return nil, os.ErrNotExist
+               }
+               n, err = parent.Child(name, func(inode) (repl inode, err error) {
+                       repl, err = parent.FS().newNode(name, perm|0755, time.Now())
+                       if err != nil {
+                               return
+                       }
+                       repl.SetParent(parent, name)
+                       return
+               })
+               if err != nil {
+                       return nil, err
+               } else if n == nil {
+                       // Parent rejected new child, but returned no error
+                       return nil, ErrInvalidArgument
+               }
+       } else if flag&os.O_EXCL != 0 {
+               return nil, ErrFileExists
+       } else if flag&os.O_TRUNC != 0 {
+               if !writable {
+                       return nil, fmt.Errorf("invalid flag O_TRUNC in read-only mode")
+               } else if n.IsDir() {
+                       return nil, fmt.Errorf("invalid flag O_TRUNC when opening directory")
+               } else if err := n.Truncate(0); err != nil {
+                       return nil, err
+               }
+       }
+       return &filehandle{
+               inode:    n,
+               append:   flag&os.O_APPEND != 0,
+               readable: readable,
+               writable: writable,
+       }, nil
+}
+
+func (fs *fileSystem) Open(name string) (http.File, error) {
+       return fs.OpenFile(name, os.O_RDONLY, 0)
+}
+
+func (fs *fileSystem) Create(name string) (File, error) {
+       return fs.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
+}
+
+func (fs *fileSystem) Mkdir(name string, perm os.FileMode) error {
+       dirname, name := path.Split(name)
+       n, err := rlookup(fs.root, dirname)
+       if err != nil {
+               return err
+       }
+       n.Lock()
+       defer n.Unlock()
+       if child, err := n.Child(name, nil); err != nil {
+               return err
+       } else if child != nil {
+               return os.ErrExist
+       }
+
+       _, err = n.Child(name, func(inode) (repl inode, err error) {
+               repl, err = n.FS().newNode(name, perm|os.ModeDir, time.Now())
+               if err != nil {
+                       return
+               }
+               repl.SetParent(n, name)
+               return
+       })
+       return err
+}
+
+func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
+       node, err := rlookup(fs.root, name)
+       if err != nil {
+               return nil, err
+       }
+       return node.FileInfo(), nil
+}
+
+func (fs *fileSystem) Rename(oldname, newname string) error {
+       olddir, oldname := path.Split(oldname)
+       if oldname == "" || oldname == "." || oldname == ".." {
+               return ErrInvalidArgument
+       }
+       olddirf, err := fs.openFile(olddir+".", os.O_RDONLY, 0)
+       if err != nil {
+               return fmt.Errorf("%q: %s", olddir, err)
+       }
+       defer olddirf.Close()
+
+       newdir, newname := path.Split(newname)
+       if newname == "." || newname == ".." {
+               return ErrInvalidArgument
+       } else if newname == "" {
+               // Rename("a/b", "c/") means Rename("a/b", "c/b")
+               newname = oldname
+       }
+       newdirf, err := fs.openFile(newdir+".", os.O_RDONLY, 0)
+       if err != nil {
+               return fmt.Errorf("%q: %s", newdir, err)
+       }
+       defer newdirf.Close()
+
+       // TODO: If the nearest common ancestor ("nca") of olddirf and
+       // newdirf is on a different filesystem than fs, we should
+       // call nca.FS().Rename() instead of proceeding. Until then
+       // it's awkward for filesystems to implement their own Rename
+       // methods effectively: the only one that runs is the one on
+       // the root FileSystem exposed to the caller (webdav, fuse,
+       // etc).
+
+       // When acquiring locks on multiple inodes, avoid deadlock by
+       // locking the entire containing filesystem first.
+       cfs := olddirf.inode.FS()
+       cfs.locker().Lock()
+       defer cfs.locker().Unlock()
+
+       if cfs != newdirf.inode.FS() {
+               // Moving inodes across filesystems is not (yet)
+               // supported. Locking inodes from different
+               // filesystems could deadlock, so we must error out
+               // now.
+               return ErrInvalidArgument
+       }
+
+       // To ensure we can test reliably whether we're about to move
+       // a directory into itself, lock all potential common
+       // ancestors of olddir and newdir.
+       needLock := []sync.Locker{}
+       for _, node := range []inode{olddirf.inode, newdirf.inode} {
+               needLock = append(needLock, node)
+               for node.Parent() != node && node.Parent().FS() == node.FS() {
+                       node = node.Parent()
+                       needLock = append(needLock, node)
+               }
+       }
+       locked := map[sync.Locker]bool{}
+       for i := len(needLock) - 1; i >= 0; i-- {
+               if n := needLock[i]; !locked[n] {
+                       n.Lock()
+                       defer n.Unlock()
+                       locked[n] = true
+               }
+       }
+
+       _, err = olddirf.inode.Child(oldname, func(oldinode inode) (inode, error) {
+               if oldinode == nil {
+                       return oldinode, os.ErrNotExist
+               }
+               if locked[oldinode] {
+                       // oldinode cannot become a descendant of itself.
+                       return oldinode, ErrInvalidArgument
+               }
+               if oldinode.FS() != cfs && newdirf.inode != olddirf.inode {
+                       // moving a mount point to a different parent
+                       // is not (yet) supported.
+                       return oldinode, ErrInvalidArgument
+               }
+               accepted, err := newdirf.inode.Child(newname, func(existing inode) (inode, error) {
+                       if existing != nil && existing.IsDir() {
+                               return existing, ErrIsDirectory
+                       }
+                       return oldinode, nil
+               })
+               if err != nil {
+                       // Leave oldinode in olddir.
+                       return oldinode, err
+               }
+               accepted.SetParent(newdirf.inode, newname)
+               return nil, nil
+       })
+       return err
+}
+
+func (fs *fileSystem) Remove(name string) error {
+       return fs.remove(strings.TrimRight(name, "/"), false)
+}
+
+func (fs *fileSystem) RemoveAll(name string) error {
+       err := fs.remove(strings.TrimRight(name, "/"), true)
+       if os.IsNotExist(err) {
+               // "If the path does not exist, RemoveAll returns
+               // nil." (see "os" pkg)
+               err = nil
+       }
+       return err
+}
+
+func (fs *fileSystem) remove(name string, recursive bool) error {
+       dirname, name := path.Split(name)
+       if name == "" || name == "." || name == ".." {
+               return ErrInvalidArgument
+       }
+       dir, err := rlookup(fs.root, dirname)
+       if err != nil {
+               return err
+       }
+       dir.Lock()
+       defer dir.Unlock()
+       _, err = dir.Child(name, func(node inode) (inode, error) {
+               if node == nil {
+                       return nil, os.ErrNotExist
+               }
+               if !recursive && node.IsDir() && node.Size() > 0 {
+                       return node, ErrDirectoryNotEmpty
+               }
+               return nil, nil
+       })
+       return err
+}
+
+func (fs *fileSystem) Sync() error {
+       log.Printf("TODO: sync fileSystem")
+       return ErrInvalidOperation
+}
+
+// rlookup (recursive lookup) returns the inode for the file/directory
+// with the given name (which may contain "/" separators). If no such
+// file/directory exists, the returned node is nil.
+func rlookup(start inode, path string) (node inode, err error) {
+       node = start
+       for _, name := range strings.Split(path, "/") {
+               if node.IsDir() {
+                       if name == "." || name == "" {
+                               continue
+                       }
+                       if name == ".." {
+                               node = node.Parent()
+                               continue
+                       }
+               }
+               node, err = func() (inode, error) {
+                       node.RLock()
+                       defer node.RUnlock()
+                       return node.Child(name, nil)
+               }()
+               if node == nil || err != nil {
+                       break
+               }
+       }
+       if node == nil && err == nil {
+               err = os.ErrNotExist
+       }
+       return
+}
+
+func permittedName(name string) bool {
+       return name != "" && name != "." && name != ".." && !strings.Contains(name, "/")
+}
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
new file mode 100644 (file)
index 0000000..6644f4c
--- /dev/null
@@ -0,0 +1,1171 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "context"
+       "encoding/json"
+       "fmt"
+       "io"
+       "os"
+       "path"
+       "regexp"
+       "sort"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+)
+
+var (
+       maxBlockSize      = 1 << 26
+       concurrentWriters = 4 // max goroutines writing to Keep during sync()
+       writeAheadBlocks  = 1 // max background jobs flushing to Keep before blocking writes
+)
+
+// A CollectionFileSystem is a FileSystem that can be serialized as a
+// manifest and stored as a collection.
+type CollectionFileSystem interface {
+       FileSystem
+
+       // Flush all file data to Keep and return a snapshot of the
+       // filesystem suitable for saving as (Collection)ManifestText.
+       // Prefix (normally ".") is a top level directory, effectively
+       // prepended to all paths in the returned manifest.
+       MarshalManifest(prefix string) (string, error)
+
+       // Total data bytes in all files.
+       Size() int64
+}
+
+type collectionFileSystem struct {
+       fileSystem
+       uuid string
+}
+
+// FileSystem returns a CollectionFileSystem for the collection.
+func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFileSystem, error) {
+       var modTime time.Time
+       if c.ModifiedAt == nil {
+               modTime = time.Now()
+       } else {
+               modTime = *c.ModifiedAt
+       }
+       fs := &collectionFileSystem{
+               uuid: c.UUID,
+               fileSystem: fileSystem{
+                       fsBackend: keepBackend{apiClient: client, keepClient: kc},
+               },
+       }
+       root := &dirnode{
+               fs: fs,
+               treenode: treenode{
+                       fileinfo: fileinfo{
+                               name:    ".",
+                               mode:    os.ModeDir | 0755,
+                               modTime: modTime,
+                       },
+                       inodes: make(map[string]inode),
+               },
+       }
+       root.SetParent(root, ".")
+       if err := root.loadManifest(c.ManifestText); err != nil {
+               return nil, err
+       }
+       backdateTree(root, modTime)
+       fs.root = root
+       return fs, nil
+}
+
+func backdateTree(n inode, modTime time.Time) {
+       switch n := n.(type) {
+       case *filenode:
+               n.fileinfo.modTime = modTime
+       case *dirnode:
+               n.fileinfo.modTime = modTime
+               for _, n := range n.inodes {
+                       backdateTree(n, modTime)
+               }
+       }
+}
+
+func (fs *collectionFileSystem) newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error) {
+       if name == "" || name == "." || name == ".." {
+               return nil, ErrInvalidArgument
+       }
+       if perm.IsDir() {
+               return &dirnode{
+                       fs: fs,
+                       treenode: treenode{
+                               fileinfo: fileinfo{
+                                       name:    name,
+                                       mode:    perm | os.ModeDir,
+                                       modTime: modTime,
+                               },
+                               inodes: make(map[string]inode),
+                       },
+               }, nil
+       } else {
+               return &filenode{
+                       fs: fs,
+                       fileinfo: fileinfo{
+                               name:    name,
+                               mode:    perm & ^os.ModeDir,
+                               modTime: modTime,
+                       },
+               }, nil
+       }
+}
+
+func (fs *collectionFileSystem) Sync() error {
+       if fs.uuid == "" {
+               return nil
+       }
+       txt, err := fs.MarshalManifest(".")
+       if err != nil {
+               return fmt.Errorf("sync failed: %s", err)
+       }
+       coll := &Collection{
+               UUID:         fs.uuid,
+               ManifestText: txt,
+       }
+       err = fs.RequestAndDecode(nil, "PUT", "arvados/v1/collections/"+fs.uuid, fs.UpdateBody(coll), map[string]interface{}{"select": []string{"uuid"}})
+       if err != nil {
+               return fmt.Errorf("sync failed: update %s: %s", fs.uuid, err)
+       }
+       return nil
+}
+
+func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {
+       fs.fileSystem.root.Lock()
+       defer fs.fileSystem.root.Unlock()
+       return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix, newThrottle(concurrentWriters))
+}
+
+func (fs *collectionFileSystem) Size() int64 {
+       return fs.fileSystem.root.(*dirnode).TreeSize()
+}
+
+// filenodePtr is an offset into a file that is (usually) efficient to
+// seek to. Specifically, if filenode.repacked==filenodePtr.repacked
+// then
+// filenode.segments[filenodePtr.segmentIdx][filenodePtr.segmentOff]
+// corresponds to file offset filenodePtr.off. Otherwise, it is
+// necessary to reexamine len(filenode.segments[0]) etc. to find the
+// correct segment and offset.
+type filenodePtr struct {
+       off        int64
+       segmentIdx int
+       segmentOff int
+       repacked   int64
+}
+
+// seek returns a ptr that is consistent with both startPtr.off and
+// the current state of fn. The caller must already hold fn.RLock() or
+// fn.Lock().
+//
+// If startPtr is beyond EOF, ptr.segment* will indicate precisely
+// EOF.
+//
+// After seeking:
+//
+//     ptr.segmentIdx == len(filenode.segments) // i.e., at EOF
+//     ||
+//     filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff
+func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
+       ptr = startPtr
+       if ptr.off < 0 {
+               // meaningless anyway
+               return
+       } else if ptr.off >= fn.fileinfo.size {
+               ptr.segmentIdx = len(fn.segments)
+               ptr.segmentOff = 0
+               ptr.repacked = fn.repacked
+               return
+       } else if ptr.repacked == fn.repacked {
+               // segmentIdx and segmentOff accurately reflect
+               // ptr.off, but might have fallen off the end of a
+               // segment
+               if ptr.segmentOff >= fn.segments[ptr.segmentIdx].Len() {
+                       ptr.segmentIdx++
+                       ptr.segmentOff = 0
+               }
+               return
+       }
+       defer func() {
+               ptr.repacked = fn.repacked
+       }()
+       if ptr.off >= fn.fileinfo.size {
+               ptr.segmentIdx, ptr.segmentOff = len(fn.segments), 0
+               return
+       }
+       // Recompute segmentIdx and segmentOff.  We have already
+       // established fn.fileinfo.size > ptr.off >= 0, so we don't
+       // have to deal with edge cases here.
+       var off int64
+       for ptr.segmentIdx, ptr.segmentOff = 0, 0; off < ptr.off; ptr.segmentIdx++ {
+               // This would panic (index out of range) if
+               // fn.fileinfo.size were larger than
+               // sum(fn.segments[i].Len()) -- but that can't happen
+               // because we have ensured fn.fileinfo.size is always
+               // accurate.
+               segLen := int64(fn.segments[ptr.segmentIdx].Len())
+               if off+segLen > ptr.off {
+                       ptr.segmentOff = int(ptr.off - off)
+                       break
+               }
+               off += segLen
+       }
+       return
+}
+
+// filenode implements inode.
+type filenode struct {
+       parent   inode
+       fs       FileSystem
+       fileinfo fileinfo
+       segments []segment
+       // number of times `segments` has changed in a
+       // way that might invalidate a filenodePtr
+       repacked int64
+       memsize  int64 // bytes in memSegments
+       sync.RWMutex
+       nullnode
+       throttle *throttle
+}
+
+// caller must have lock
+func (fn *filenode) appendSegment(e segment) {
+       fn.segments = append(fn.segments, e)
+       fn.fileinfo.size += int64(e.Len())
+}
+
+func (fn *filenode) SetParent(p inode, name string) {
+       fn.Lock()
+       defer fn.Unlock()
+       fn.parent = p
+       fn.fileinfo.name = name
+}
+
+func (fn *filenode) Parent() inode {
+       fn.RLock()
+       defer fn.RUnlock()
+       return fn.parent
+}
+
+func (fn *filenode) FS() FileSystem {
+       return fn.fs
+}
+
+// Read reads file data from a single segment, starting at startPtr,
+// into p. startPtr is assumed not to be up-to-date. Caller must have
+// RLock or Lock.
+func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
+       ptr = fn.seek(startPtr)
+       if ptr.off < 0 {
+               err = ErrNegativeOffset
+               return
+       }
+       if ptr.segmentIdx >= len(fn.segments) {
+               err = io.EOF
+               return
+       }
+       n, err = fn.segments[ptr.segmentIdx].ReadAt(p, int64(ptr.segmentOff))
+       if n > 0 {
+               ptr.off += int64(n)
+               ptr.segmentOff += n
+               if ptr.segmentOff == fn.segments[ptr.segmentIdx].Len() {
+                       ptr.segmentIdx++
+                       ptr.segmentOff = 0
+                       if ptr.segmentIdx < len(fn.segments) && err == io.EOF {
+                               err = nil
+                       }
+               }
+       }
+       return
+}
+
+func (fn *filenode) Size() int64 {
+       fn.RLock()
+       defer fn.RUnlock()
+       return fn.fileinfo.Size()
+}
+
+func (fn *filenode) FileInfo() os.FileInfo {
+       fn.RLock()
+       defer fn.RUnlock()
+       return fn.fileinfo
+}
+
+func (fn *filenode) Truncate(size int64) error {
+       fn.Lock()
+       defer fn.Unlock()
+       return fn.truncate(size)
+}
+
+func (fn *filenode) truncate(size int64) error {
+       if size == fn.fileinfo.size {
+               return nil
+       }
+       fn.repacked++
+       if size < fn.fileinfo.size {
+               ptr := fn.seek(filenodePtr{off: size})
+               for i := ptr.segmentIdx; i < len(fn.segments); i++ {
+                       if seg, ok := fn.segments[i].(*memSegment); ok {
+                               fn.memsize -= int64(seg.Len())
+                       }
+               }
+               if ptr.segmentOff == 0 {
+                       fn.segments = fn.segments[:ptr.segmentIdx]
+               } else {
+                       fn.segments = fn.segments[:ptr.segmentIdx+1]
+                       switch seg := fn.segments[ptr.segmentIdx].(type) {
+                       case *memSegment:
+                               seg.Truncate(ptr.segmentOff)
+                               fn.memsize += int64(seg.Len())
+                       default:
+                               fn.segments[ptr.segmentIdx] = seg.Slice(0, ptr.segmentOff)
+                       }
+               }
+               fn.fileinfo.size = size
+               return nil
+       }
+       for size > fn.fileinfo.size {
+               grow := size - fn.fileinfo.size
+               var seg *memSegment
+               var ok bool
+               if len(fn.segments) == 0 {
+                       seg = &memSegment{}
+                       fn.segments = append(fn.segments, seg)
+               } else if seg, ok = fn.segments[len(fn.segments)-1].(*memSegment); !ok || seg.Len() >= maxBlockSize {
+                       seg = &memSegment{}
+                       fn.segments = append(fn.segments, seg)
+               }
+               if maxgrow := int64(maxBlockSize - seg.Len()); maxgrow < grow {
+                       grow = maxgrow
+               }
+               seg.Truncate(seg.Len() + int(grow))
+               fn.fileinfo.size += grow
+               fn.memsize += grow
+       }
+       return nil
+}
+
+// Write writes data from p to the file, starting at startPtr,
+// extending the file size if necessary. Caller must have Lock.
+func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
+       if startPtr.off > fn.fileinfo.size {
+               if err = fn.truncate(startPtr.off); err != nil {
+                       return 0, startPtr, err
+               }
+       }
+       ptr = fn.seek(startPtr)
+       if ptr.off < 0 {
+               err = ErrNegativeOffset
+               return
+       }
+       for len(p) > 0 && err == nil {
+               cando := p
+               if len(cando) > maxBlockSize {
+                       cando = cando[:maxBlockSize]
+               }
+               // Rearrange/grow fn.segments (and shrink cando if
+               // needed) such that cando can be copied to
+               // fn.segments[ptr.segmentIdx] at offset
+               // ptr.segmentOff.
+               cur := ptr.segmentIdx
+               prev := ptr.segmentIdx - 1
+               var curWritable bool
+               if cur < len(fn.segments) {
+                       _, curWritable = fn.segments[cur].(*memSegment)
+               }
+               var prevAppendable bool
+               if prev >= 0 && fn.segments[prev].Len() < maxBlockSize {
+                       _, prevAppendable = fn.segments[prev].(*memSegment)
+               }
+               if ptr.segmentOff > 0 && !curWritable {
+                       // Split a non-writable block.
+                       if max := fn.segments[cur].Len() - ptr.segmentOff; max <= len(cando) {
+                               // Truncate cur, and insert a new
+                               // segment after it.
+                               cando = cando[:max]
+                               fn.segments = append(fn.segments, nil)
+                               copy(fn.segments[cur+1:], fn.segments[cur:])
+                       } else {
+                               // Split cur into two copies, truncate
+                               // the one on the left, shift the one
+                               // on the right, and insert a new
+                               // segment between them.
+                               fn.segments = append(fn.segments, nil, nil)
+                               copy(fn.segments[cur+2:], fn.segments[cur:])
+                               fn.segments[cur+2] = fn.segments[cur+2].Slice(ptr.segmentOff+len(cando), -1)
+                       }
+                       cur++
+                       prev++
+                       seg := &memSegment{}
+                       seg.Truncate(len(cando))
+                       fn.memsize += int64(len(cando))
+                       fn.segments[cur] = seg
+                       fn.segments[prev] = fn.segments[prev].Slice(0, ptr.segmentOff)
+                       ptr.segmentIdx++
+                       ptr.segmentOff = 0
+                       fn.repacked++
+                       ptr.repacked++
+               } else if curWritable {
+                       if fit := int(fn.segments[cur].Len()) - ptr.segmentOff; fit < len(cando) {
+                               cando = cando[:fit]
+                       }
+               } else {
+                       if prevAppendable {
+                               // Shrink cando if needed to fit in
+                               // prev segment.
+                               if cangrow := maxBlockSize - fn.segments[prev].Len(); cangrow < len(cando) {
+                                       cando = cando[:cangrow]
+                               }
+                       }
+
+                       if cur == len(fn.segments) {
+                               // ptr is at EOF, filesize is changing.
+                               fn.fileinfo.size += int64(len(cando))
+                       } else if el := fn.segments[cur].Len(); el <= len(cando) {
+                               // cando is long enough that we won't
+                               // need cur any more. shrink cando to
+                               // be exactly as long as cur
+                               // (otherwise we'd accidentally shift
+                               // the effective position of all
+                               // segments after cur).
+                               cando = cando[:el]
+                               copy(fn.segments[cur:], fn.segments[cur+1:])
+                               fn.segments = fn.segments[:len(fn.segments)-1]
+                       } else {
+                               // shrink cur by the same #bytes we're growing prev
+                               fn.segments[cur] = fn.segments[cur].Slice(len(cando), -1)
+                       }
+
+                       if prevAppendable {
+                               // Grow prev.
+                               ptr.segmentIdx--
+                               ptr.segmentOff = fn.segments[prev].Len()
+                               fn.segments[prev].(*memSegment).Truncate(ptr.segmentOff + len(cando))
+                               fn.memsize += int64(len(cando))
+                               ptr.repacked++
+                               fn.repacked++
+                       } else {
+                               // Insert a segment between prev and
+                               // cur, and advance prev/cur.
+                               fn.segments = append(fn.segments, nil)
+                               if cur < len(fn.segments) {
+                                       copy(fn.segments[cur+1:], fn.segments[cur:])
+                                       ptr.repacked++
+                                       fn.repacked++
+                               } else {
+                                       // appending a new segment does
+                                       // not invalidate any ptrs
+                               }
+                               seg := &memSegment{}
+                               seg.Truncate(len(cando))
+                               fn.memsize += int64(len(cando))
+                               fn.segments[cur] = seg
+                               cur++
+                               prev++
+                       }
+               }
+
+               // Finally we can copy bytes from cando to the current segment.
+               fn.segments[ptr.segmentIdx].(*memSegment).WriteAt(cando, ptr.segmentOff)
+               n += len(cando)
+               p = p[len(cando):]
+
+               ptr.off += int64(len(cando))
+               ptr.segmentOff += len(cando)
+               if ptr.segmentOff >= maxBlockSize {
+                       fn.pruneMemSegments()
+               }
+               if fn.segments[ptr.segmentIdx].Len() == ptr.segmentOff {
+                       ptr.segmentOff = 0
+                       ptr.segmentIdx++
+               }
+
+               fn.fileinfo.modTime = time.Now()
+       }
+       return
+}
+
+// Write some data out to disk to reduce memory use. Caller must have
+// write lock.
+func (fn *filenode) pruneMemSegments() {
+       // TODO: share code with (*dirnode)sync()
+       // TODO: pack/flush small blocks too, when fragmented
+       if fn.throttle == nil {
+               // TODO: share a throttle with filesystem
+               fn.throttle = newThrottle(writeAheadBlocks)
+       }
+       for idx, seg := range fn.segments {
+               seg, ok := seg.(*memSegment)
+               if !ok || seg.Len() < maxBlockSize || seg.flushing != nil {
+                       continue
+               }
+               // Setting seg.flushing guarantees seg.buf will not be
+               // modified in place: WriteAt and Truncate will
+               // allocate a new buf instead, if necessary.
+               idx, buf := idx, seg.buf
+               done := make(chan struct{})
+               seg.flushing = done
+               // If lots of background writes are already in
+               // progress, block here until one finishes, rather
+               // than pile up an unlimited number of buffered writes
+               // and network flush operations.
+               fn.throttle.Acquire()
+               go func() {
+                       defer close(done)
+                       locator, _, err := fn.FS().PutB(buf)
+                       fn.throttle.Release()
+                       fn.Lock()
+                       defer fn.Unlock()
+                       if curbuf := seg.buf[:1]; &curbuf[0] != &buf[0] {
+                               // A new seg.buf has been allocated.
+                               return
+                       }
+                       seg.flushing = nil
+                       if err != nil {
+                               // TODO: stall (or return errors from)
+                               // subsequent writes until flushing
+                               // starts to succeed.
+                               return
+                       }
+                       if len(fn.segments) <= idx || fn.segments[idx] != seg || len(seg.buf) != len(buf) {
+                               // Segment has been dropped/moved/resized.
+                               return
+                       }
+                       fn.memsize -= int64(len(buf))
+                       fn.segments[idx] = storedSegment{
+                               kc:      fn.FS(),
+                               locator: locator,
+                               size:    len(buf),
+                               offset:  0,
+                               length:  len(buf),
+                       }
+               }()
+       }
+}
+
+// Block until all pending pruneMemSegments work is finished. Caller
+// must NOT have lock.
+func (fn *filenode) waitPrune() {
+       var pending []<-chan struct{}
+       fn.Lock()
+       for _, seg := range fn.segments {
+               if seg, ok := seg.(*memSegment); ok && seg.flushing != nil {
+                       pending = append(pending, seg.flushing)
+               }
+       }
+       fn.Unlock()
+       for _, p := range pending {
+               <-p
+       }
+}
+
+type dirnode struct {
+       fs *collectionFileSystem
+       treenode
+}
+
+func (dn *dirnode) FS() FileSystem {
+       return dn.fs
+}
+
+func (dn *dirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       if dn == dn.fs.rootnode() && name == ".arvados#collection" {
+               gn := &getternode{Getter: func() ([]byte, error) {
+                       var coll Collection
+                       var err error
+                       coll.ManifestText, err = dn.fs.MarshalManifest(".")
+                       if err != nil {
+                               return nil, err
+                       }
+                       data, err := json.Marshal(&coll)
+                       if err == nil {
+                               data = append(data, '\n')
+                       }
+                       return data, err
+               }}
+               gn.SetParent(dn, name)
+               return gn, nil
+       }
+       return dn.treenode.Child(name, replace)
+}
+
+type fnSegmentRef struct {
+       fn  *filenode
+       idx int
+}
+
+// commitBlock concatenates the data from the given filenode segments
+// (which must be *memSegments), writes the data out to Keep as a
+// single block, and replaces the filenodes' *memSegments with
+// storedSegments that reference the relevant portions of the new
+// block.
+//
+// Caller must have write lock.
+func (dn *dirnode) commitBlock(ctx context.Context, throttle *throttle, refs []fnSegmentRef) error {
+       if len(refs) == 0 {
+               return nil
+       }
+       throttle.Acquire()
+       defer throttle.Release()
+       if err := ctx.Err(); err != nil {
+               return err
+       }
+       block := make([]byte, 0, maxBlockSize)
+       for _, ref := range refs {
+               block = append(block, ref.fn.segments[ref.idx].(*memSegment).buf...)
+       }
+       locator, _, err := dn.fs.PutB(block)
+       if err != nil {
+               return err
+       }
+       off := 0
+       for _, ref := range refs {
+               data := ref.fn.segments[ref.idx].(*memSegment).buf
+               ref.fn.segments[ref.idx] = storedSegment{
+                       kc:      dn.fs,
+                       locator: locator,
+                       size:    len(block),
+                       offset:  off,
+                       length:  len(data),
+               }
+               off += len(data)
+               ref.fn.memsize -= int64(len(data))
+       }
+       return nil
+}
+
+// sync flushes in-memory data and remote block references (for the
+// children with the given names, which must be children of dn) to
+// local persistent storage. Caller must have write lock on dn and the
+// named children.
+func (dn *dirnode) sync(ctx context.Context, throttle *throttle, names []string) error {
+       cg := newContextGroup(ctx)
+       defer cg.Cancel()
+
+       goCommit := func(refs []fnSegmentRef) {
+               cg.Go(func() error {
+                       return dn.commitBlock(cg.Context(), throttle, refs)
+               })
+       }
+
+       var pending []fnSegmentRef
+       var pendingLen int = 0
+       localLocator := map[string]string{}
+       for _, name := range names {
+               fn, ok := dn.inodes[name].(*filenode)
+               if !ok {
+                       continue
+               }
+               for idx, seg := range fn.segments {
+                       switch seg := seg.(type) {
+                       case storedSegment:
+                               loc, ok := localLocator[seg.locator]
+                               if !ok {
+                                       var err error
+                                       loc, err = dn.fs.LocalLocator(seg.locator)
+                                       if err != nil {
+                                               return err
+                                       }
+                                       localLocator[seg.locator] = loc
+                               }
+                               seg.locator = loc
+                               fn.segments[idx] = seg
+                       case *memSegment:
+                               if seg.Len() > maxBlockSize/2 {
+                                       goCommit([]fnSegmentRef{{fn, idx}})
+                                       continue
+                               }
+                               if pendingLen+seg.Len() > maxBlockSize {
+                                       goCommit(pending)
+                                       pending = nil
+                                       pendingLen = 0
+                               }
+                               pending = append(pending, fnSegmentRef{fn, idx})
+                               pendingLen += seg.Len()
+                       default:
+                               panic(fmt.Sprintf("can't sync segment type %T", seg))
+                       }
+               }
+       }
+       goCommit(pending)
+       return cg.Wait()
+}
+
+// caller must have write lock.
+func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle *throttle) (string, error) {
+       cg := newContextGroup(ctx)
+       defer cg.Cancel()
+
+       if len(dn.inodes) == 0 {
+               if prefix == "." {
+                       return "", nil
+               }
+               // Express the existence of an empty directory by
+               // adding an empty file named `\056`, which (unlike
+               // the more obvious spelling `.`) is accepted by the
+               // API's manifest validator.
+               return manifestEscape(prefix) + " d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n", nil
+       }
+
+       names := make([]string, 0, len(dn.inodes))
+       for name := range dn.inodes {
+               names = append(names, name)
+       }
+       sort.Strings(names)
+
+       // Wait for children to finish any pending write operations
+       // before locking them.
+       for _, name := range names {
+               node := dn.inodes[name]
+               if fn, ok := node.(*filenode); ok {
+                       fn.waitPrune()
+               }
+       }
+
+       var dirnames []string
+       var filenames []string
+       for _, name := range names {
+               node := dn.inodes[name]
+               node.Lock()
+               defer node.Unlock()
+               switch node := node.(type) {
+               case *dirnode:
+                       dirnames = append(dirnames, name)
+               case *filenode:
+                       filenames = append(filenames, name)
+               default:
+                       panic(fmt.Sprintf("can't marshal inode type %T", node))
+               }
+       }
+
+       subdirs := make([]string, len(dirnames))
+       rootdir := ""
+       for i, name := range dirnames {
+               i, name := i, name
+               cg.Go(func() error {
+                       txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name, throttle)
+                       subdirs[i] = txt
+                       return err
+               })
+       }
+
+       cg.Go(func() error {
+               var streamLen int64
+               type filepart struct {
+                       name   string
+                       offset int64
+                       length int64
+               }
+
+               var fileparts []filepart
+               var blocks []string
+               if err := dn.sync(cg.Context(), throttle, names); err != nil {
+                       return err
+               }
+               for _, name := range filenames {
+                       node := dn.inodes[name].(*filenode)
+                       if len(node.segments) == 0 {
+                               fileparts = append(fileparts, filepart{name: name})
+                               continue
+                       }
+                       for _, seg := range node.segments {
+                               switch seg := seg.(type) {
+                               case storedSegment:
+                                       if len(blocks) > 0 && blocks[len(blocks)-1] == seg.locator {
+                                               streamLen -= int64(seg.size)
+                                       } else {
+                                               blocks = append(blocks, seg.locator)
+                                       }
+                                       next := filepart{
+                                               name:   name,
+                                               offset: streamLen + int64(seg.offset),
+                                               length: int64(seg.length),
+                                       }
+                                       if prev := len(fileparts) - 1; prev >= 0 &&
+                                               fileparts[prev].name == name &&
+                                               fileparts[prev].offset+fileparts[prev].length == next.offset {
+                                               fileparts[prev].length += next.length
+                                       } else {
+                                               fileparts = append(fileparts, next)
+                                       }
+                                       streamLen += int64(seg.size)
+                               default:
+                                       // This can't happen: we
+                                       // haven't unlocked since
+                                       // calling sync().
+                                       panic(fmt.Sprintf("can't marshal segment type %T", seg))
+                               }
+                       }
+               }
+               var filetokens []string
+               for _, s := range fileparts {
+                       filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
+               }
+               if len(filetokens) == 0 {
+                       return nil
+               } else if len(blocks) == 0 {
+                       blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
+               }
+               rootdir = manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n"
+               return nil
+       })
+       err := cg.Wait()
+       return rootdir + strings.Join(subdirs, ""), err
+}
+
+func (dn *dirnode) loadManifest(txt string) error {
+       var dirname string
+       streams := strings.Split(txt, "\n")
+       if streams[len(streams)-1] != "" {
+               return fmt.Errorf("line %d: no trailing newline", len(streams))
+       }
+       streams = streams[:len(streams)-1]
+       segments := []storedSegment{}
+       for i, stream := range streams {
+               lineno := i + 1
+               var anyFileTokens bool
+               var pos int64
+               var segIdx int
+               segments = segments[:0]
+               for i, token := range strings.Split(stream, " ") {
+                       if i == 0 {
+                               dirname = manifestUnescape(token)
+                               continue
+                       }
+                       if !strings.Contains(token, ":") {
+                               if anyFileTokens {
+                                       return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+                               }
+                               toks := strings.SplitN(token, "+", 3)
+                               if len(toks) < 2 {
+                                       return fmt.Errorf("line %d: bad locator %q", lineno, token)
+                               }
+                               length, err := strconv.ParseInt(toks[1], 10, 32)
+                               if err != nil || length < 0 {
+                                       return fmt.Errorf("line %d: bad locator %q", lineno, token)
+                               }
+                               segments = append(segments, storedSegment{
+                                       locator: token,
+                                       size:    int(length),
+                                       offset:  0,
+                                       length:  int(length),
+                               })
+                               continue
+                       } else if len(segments) == 0 {
+                               return fmt.Errorf("line %d: bad locator %q", lineno, token)
+                       }
+
+                       toks := strings.SplitN(token, ":", 3)
+                       if len(toks) != 3 {
+                               return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+                       }
+                       anyFileTokens = true
+
+                       offset, err := strconv.ParseInt(toks[0], 10, 64)
+                       if err != nil || offset < 0 {
+                               return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+                       }
+                       length, err := strconv.ParseInt(toks[1], 10, 64)
+                       if err != nil || length < 0 {
+                               return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+                       }
+                       name := dirname + "/" + manifestUnescape(toks[2])
+                       fnode, err := dn.createFileAndParents(name)
+                       if fnode == nil && err == nil && length == 0 {
+                               // Special case: an empty file used as
+                               // a marker to preserve an otherwise
+                               // empty directory in a manifest.
+                               continue
+                       }
+                       if err != nil || (fnode == nil && length != 0) {
+                               return fmt.Errorf("line %d: cannot use path %q with length %d: %s", lineno, name, length, err)
+                       }
+                       // Map the stream offset/range coordinates to
+                       // block/offset/range coordinates and add
+                       // corresponding storedSegments to the filenode
+                       if pos > offset {
+                               // Can't continue where we left off.
+                               // TODO: binary search instead of
+                               // rewinding all the way (but this
+                               // situation might be rare anyway)
+                               segIdx, pos = 0, 0
+                       }
+                       for next := int64(0); segIdx < len(segments); segIdx++ {
+                               seg := segments[segIdx]
+                               next = pos + int64(seg.Len())
+                               if next <= offset || seg.Len() == 0 {
+                                       pos = next
+                                       continue
+                               }
+                               if pos >= offset+length {
+                                       break
+                               }
+                               var blkOff int
+                               if pos < offset {
+                                       blkOff = int(offset - pos)
+                               }
+                               blkLen := seg.Len() - blkOff
+                               if pos+int64(blkOff+blkLen) > offset+length {
+                                       blkLen = int(offset + length - pos - int64(blkOff))
+                               }
+                               fnode.appendSegment(storedSegment{
+                                       kc:      dn.fs,
+                                       locator: seg.locator,
+                                       size:    seg.size,
+                                       offset:  blkOff,
+                                       length:  blkLen,
+                               })
+                               if next > offset+length {
+                                       break
+                               } else {
+                                       pos = next
+                               }
+                       }
+                       if segIdx == len(segments) && pos < offset+length {
+                               return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
+                       }
+               }
+               if !anyFileTokens {
+                       return fmt.Errorf("line %d: no file segments", lineno)
+               } else if len(segments) == 0 {
+                       return fmt.Errorf("line %d: no locators", lineno)
+               } else if dirname == "" {
+                       return fmt.Errorf("line %d: no stream name", lineno)
+               }
+       }
+       return nil
+}
+
+// only safe to call from loadManifest -- no locking.
+//
+// If path is a "parent directory exists" marker (the last path
+// component is "."), the returned values are both nil.
+func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
+       var node inode = dn
+       names := strings.Split(path, "/")
+       basename := names[len(names)-1]
+       for _, name := range names[:len(names)-1] {
+               switch name {
+               case "", ".":
+                       continue
+               case "..":
+                       if node == dn {
+                               // can't be sure parent will be a *dirnode
+                               return nil, ErrInvalidArgument
+                       }
+                       node = node.Parent()
+                       continue
+               }
+               node, err = node.Child(name, func(child inode) (inode, error) {
+                       if child == nil {
+                               child, err := node.FS().newNode(name, 0755|os.ModeDir, node.Parent().FileInfo().ModTime())
+                               if err != nil {
+                                       return nil, err
+                               }
+                               child.SetParent(node, name)
+                               return child, nil
+                       } else if !child.IsDir() {
+                               return child, ErrFileExists
+                       } else {
+                               return child, nil
+                       }
+               })
+               if err != nil {
+                       return
+               }
+       }
+       if basename == "." {
+               return
+       } else if !permittedName(basename) {
+               err = fmt.Errorf("invalid file part %q in path %q", basename, path)
+               return
+       }
+       _, err = node.Child(basename, func(child inode) (inode, error) {
+               switch child := child.(type) {
+               case nil:
+                       child, err = node.FS().newNode(basename, 0755, node.FileInfo().ModTime())
+                       if err != nil {
+                               return nil, err
+                       }
+                       child.SetParent(node, basename)
+                       fn = child.(*filenode)
+                       return child, nil
+               case *filenode:
+                       fn = child
+                       return child, nil
+               case *dirnode:
+                       return child, ErrIsDirectory
+               default:
+                       return child, ErrInvalidArgument
+               }
+       })
+       return
+}
+
+func (dn *dirnode) TreeSize() (bytes int64) {
+       dn.RLock()
+       defer dn.RUnlock()
+       for _, i := range dn.inodes {
+               switch i := i.(type) {
+               case *filenode:
+                       bytes += i.Size()
+               case *dirnode:
+                       bytes += i.TreeSize()
+               }
+       }
+       return
+}
+
+type segment interface {
+       io.ReaderAt
+       Len() int
+       // Return a new segment with a subsection of the data from this
+       // one. length<0 means length=Len()-off.
+       Slice(off int, length int) segment
+}
+
+type memSegment struct {
+       buf []byte
+       // If flushing is not nil, then a) buf is being shared by a
+       // pruneMemSegments goroutine, and must be copied on write;
+       // and b) the flushing channel will close when the goroutine
+       // finishes, whether it succeeds or not.
+       flushing <-chan struct{}
+}
+
+func (me *memSegment) Len() int {
+       return len(me.buf)
+}
+
+func (me *memSegment) Slice(off, length int) segment {
+       if length < 0 {
+               length = len(me.buf) - off
+       }
+       buf := make([]byte, length)
+       copy(buf, me.buf[off:])
+       return &memSegment{buf: buf}
+}
+
+func (me *memSegment) Truncate(n int) {
+       if n > cap(me.buf) || (me.flushing != nil && n > len(me.buf)) {
+               newsize := 1024
+               for newsize < n {
+                       newsize = newsize << 2
+               }
+               newbuf := make([]byte, n, newsize)
+               copy(newbuf, me.buf)
+               me.buf, me.flushing = newbuf, nil
+       } else {
+               // reclaim existing capacity, and zero reclaimed part
+               oldlen := len(me.buf)
+               me.buf = me.buf[:n]
+               for i := oldlen; i < n; i++ {
+                       me.buf[i] = 0
+               }
+       }
+}
+
+func (me *memSegment) WriteAt(p []byte, off int) {
+       if off+len(p) > len(me.buf) {
+               panic("overflowed segment")
+       }
+       if me.flushing != nil {
+               me.buf, me.flushing = append([]byte(nil), me.buf...), nil
+       }
+       copy(me.buf[off:], p)
+}
+
+func (me *memSegment) ReadAt(p []byte, off int64) (n int, err error) {
+       if off > int64(me.Len()) {
+               err = io.EOF
+               return
+       }
+       n = copy(p, me.buf[int(off):])
+       if n < len(p) {
+               err = io.EOF
+       }
+       return
+}
+
+type storedSegment struct {
+       kc      fsBackend
+       locator string
+       size    int // size of stored block (also encoded in locator)
+       offset  int // position of segment within the stored block
+       length  int // bytes in this segment (offset + length <= size)
+}
+
+func (se storedSegment) Len() int {
+       return se.length
+}
+
+func (se storedSegment) Slice(n, size int) segment {
+       se.offset += n
+       se.length -= n
+       if size >= 0 && se.length > size {
+               se.length = size
+       }
+       return se
+}
+
+func (se storedSegment) ReadAt(p []byte, off int64) (n int, err error) {
+       if off > int64(se.length) {
+               return 0, io.EOF
+       }
+       maxlen := se.length - int(off)
+       if len(p) > maxlen {
+               p = p[:maxlen]
+               n, err = se.kc.ReadAt(se.locator, p, int(off)+se.offset)
+               if err == nil {
+                       err = io.EOF
+               }
+               return
+       }
+       return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
+}
+
+func canonicalName(name string) string {
+       name = path.Clean("/" + name)
+       if name == "/" || name == "./" {
+               name = "."
+       } else if strings.HasPrefix(name, "/") {
+               name = "." + name
+       }
+       return name
+}
+
+var manifestEscapeSeq = regexp.MustCompile(`\\([0-7]{3}|\\)`)
+
+func manifestUnescapeFunc(seq string) string {
+       if seq == `\\` {
+               return `\`
+       }
+       i, err := strconv.ParseUint(seq[1:], 8, 8)
+       if err != nil {
+               // Invalid escape sequence: can't unescape.
+               return seq
+       }
+       return string([]byte{byte(i)})
+}
+
+func manifestUnescape(s string) string {
+       return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeFunc)
+}
+
+var manifestEscapedChar = regexp.MustCompile(`[\000-\040:\s\\]`)
+
+func manifestEscapeFunc(seq string) string {
+       return fmt.Sprintf("\\%03o", byte(seq[0]))
+}
+
+func manifestEscape(s string) string {
+       return manifestEscapedChar.ReplaceAllStringFunc(s, manifestEscapeFunc)
+}
diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go
new file mode 100644 (file)
index 0000000..2ae2bd8
--- /dev/null
@@ -0,0 +1,1221 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "crypto/md5"
+       "crypto/sha1"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "math/rand"
+       "net/http"
+       "os"
+       "regexp"
+       "runtime"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CollectionFSSuite{})
+
+type keepClientStub struct {
+       blocks      map[string][]byte
+       refreshable map[string]bool
+       onPut       func(bufcopy []byte) // called from PutB, before acquiring lock
+       sync.RWMutex
+}
+
+var errStub404 = errors.New("404 block not found")
+
+func (kcs *keepClientStub) ReadAt(locator string, p []byte, off int) (int, error) {
+       kcs.RLock()
+       defer kcs.RUnlock()
+       buf := kcs.blocks[locator[:32]]
+       if buf == nil {
+               return 0, errStub404
+       }
+       return copy(p, buf[off:]), nil
+}
+
+func (kcs *keepClientStub) PutB(p []byte) (string, int, error) {
+       locator := fmt.Sprintf("%x+%d+A12345@abcde", md5.Sum(p), len(p))
+       buf := make([]byte, len(p))
+       copy(buf, p)
+       if kcs.onPut != nil {
+               kcs.onPut(buf)
+       }
+       kcs.Lock()
+       defer kcs.Unlock()
+       kcs.blocks[locator[:32]] = buf
+       return locator, 1, nil
+}
+
+var localOrRemoteSignature = regexp.MustCompile(`\+[AR][^+]*`)
+
+func (kcs *keepClientStub) LocalLocator(locator string) (string, error) {
+       kcs.Lock()
+       defer kcs.Unlock()
+       if strings.Contains(locator, "+R") {
+               if len(locator) < 32 {
+                       return "", fmt.Errorf("bad locator: %q", locator)
+               }
+               if _, ok := kcs.blocks[locator[:32]]; !ok && !kcs.refreshable[locator[:32]] {
+                       return "", fmt.Errorf("kcs.refreshable[%q]==false", locator)
+               }
+       }
+       fakeSig := fmt.Sprintf("+A%x@%x", sha1.Sum(nil), time.Now().Add(time.Hour*24*14).Unix())
+       return localOrRemoteSignature.ReplaceAllLiteralString(locator, fakeSig), nil
+}
+
+type CollectionFSSuite struct {
+       client *Client
+       coll   Collection
+       fs     CollectionFileSystem
+       kc     *keepClientStub
+}
+
+func (s *CollectionFSSuite) SetUpTest(c *check.C) {
+       s.client = NewClientFromEnv()
+       err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+arvadostest.FooAndBarFilesInDirUUID, nil, nil)
+       c.Assert(err, check.IsNil)
+       s.kc = &keepClientStub{
+               blocks: map[string][]byte{
+                       "3858f62230ac3c915f300c664312c63f": []byte("foobar"),
+               }}
+       s.fs, err = s.coll.FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+}
+
+func (s *CollectionFSSuite) TestHttpFileSystemInterface(c *check.C) {
+       _, ok := s.fs.(http.FileSystem)
+       c.Check(ok, check.Equals, true)
+}
+
+func (s *CollectionFSSuite) TestColonInFilename(c *check.C) {
+       fs, err := (&Collection{
+               ManifestText: "./foo:foo 3858f62230ac3c915f300c664312c63f+3 0:3:bar:bar\n",
+       }).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       f, err := fs.Open("/foo:foo")
+       c.Assert(err, check.IsNil)
+
+       fis, err := f.Readdir(0)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 1)
+       c.Check(fis[0].Name(), check.Equals, "bar:bar")
+}
+
+func (s *CollectionFSSuite) TestReaddirFull(c *check.C) {
+       f, err := s.fs.Open("/dir1")
+       c.Assert(err, check.IsNil)
+
+       st, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(st.Size(), check.Equals, int64(2))
+       c.Check(st.IsDir(), check.Equals, true)
+
+       fis, err := f.Readdir(0)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 2)
+       if len(fis) > 0 {
+               c.Check(fis[0].Size(), check.Equals, int64(3))
+       }
+}
+
+func (s *CollectionFSSuite) TestReaddirLimited(c *check.C) {
+       f, err := s.fs.Open("./dir1")
+       c.Assert(err, check.IsNil)
+
+       fis, err := f.Readdir(1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 1)
+       if len(fis) > 0 {
+               c.Check(fis[0].Size(), check.Equals, int64(3))
+       }
+
+       fis, err = f.Readdir(1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 1)
+       if len(fis) > 0 {
+               c.Check(fis[0].Size(), check.Equals, int64(3))
+       }
+
+       fis, err = f.Readdir(1)
+       c.Check(len(fis), check.Equals, 0)
+       c.Check(err, check.NotNil)
+       c.Check(err, check.Equals, io.EOF)
+
+       f, err = s.fs.Open("dir1")
+       c.Assert(err, check.IsNil)
+       fis, err = f.Readdir(1)
+       c.Check(len(fis), check.Equals, 1)
+       c.Assert(err, check.IsNil)
+       fis, err = f.Readdir(2)
+       c.Check(len(fis), check.Equals, 1)
+       c.Assert(err, check.IsNil)
+       fis, err = f.Readdir(2)
+       c.Check(len(fis), check.Equals, 0)
+       c.Assert(err, check.Equals, io.EOF)
+}
+
+func (s *CollectionFSSuite) TestPathMunge(c *check.C) {
+       for _, path := range []string{".", "/", "./", "///", "/../", "/./.."} {
+               f, err := s.fs.Open(path)
+               c.Assert(err, check.IsNil)
+
+               st, err := f.Stat()
+               c.Assert(err, check.IsNil)
+               c.Check(st.Size(), check.Equals, int64(1))
+               c.Check(st.IsDir(), check.Equals, true)
+       }
+       for _, path := range []string{"/dir1", "dir1", "./dir1", "///dir1//.//", "../dir1/../dir1/"} {
+               c.Logf("%q", path)
+               f, err := s.fs.Open(path)
+               c.Assert(err, check.IsNil)
+
+               st, err := f.Stat()
+               c.Assert(err, check.IsNil)
+               c.Check(st.Size(), check.Equals, int64(2))
+               c.Check(st.IsDir(), check.Equals, true)
+       }
+}
+
+func (s *CollectionFSSuite) TestNotExist(c *check.C) {
+       for _, path := range []string{"/no", "no", "./no", "n/o", "/n/o"} {
+               f, err := s.fs.Open(path)
+               c.Assert(f, check.IsNil)
+               c.Assert(err, check.NotNil)
+               c.Assert(os.IsNotExist(err), check.Equals, true)
+       }
+}
+
+func (s *CollectionFSSuite) TestReadOnlyFile(c *check.C) {
+       f, err := s.fs.OpenFile("/dir1/foo", os.O_RDONLY, 0)
+       c.Assert(err, check.IsNil)
+       st, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(st.Size(), check.Equals, int64(3))
+       n, err := f.Write([]byte("bar"))
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.Equals, ErrReadOnlyFile)
+}
+
+func (s *CollectionFSSuite) TestCreateFile(c *check.C) {
+       f, err := s.fs.OpenFile("/new-file 1", os.O_RDWR|os.O_CREATE, 0)
+       c.Assert(err, check.IsNil)
+       st, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(st.Size(), check.Equals, int64(0))
+
+       n, err := f.Write([]byte("bar"))
+       c.Check(n, check.Equals, 3)
+       c.Check(err, check.IsNil)
+
+       c.Check(f.Close(), check.IsNil)
+
+       f, err = s.fs.OpenFile("/new-file 1", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)
+       c.Check(f, check.IsNil)
+       c.Assert(err, check.NotNil)
+
+       f, err = s.fs.OpenFile("/new-file 1", os.O_RDWR, 0)
+       c.Assert(err, check.IsNil)
+       st, err = f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(st.Size(), check.Equals, int64(3))
+
+       c.Check(f.Close(), check.IsNil)
+
+       m, err := s.fs.MarshalManifest(".")
+       c.Assert(err, check.IsNil)
+       c.Check(m, check.Matches, `. 37b51d194a7513e45b56f6524f2d51f2\+3\+\S+ 0:3:new-file\\0401\n./dir1 .* 3:3:bar 0:3:foo\n`)
+}
+
+func (s *CollectionFSSuite) TestReadWriteFile(c *check.C) {
+       maxBlockSize = 8
+       defer func() { maxBlockSize = 2 << 26 }()
+
+       f, err := s.fs.OpenFile("/dir1/foo", os.O_RDWR, 0)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       st, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(st.Size(), check.Equals, int64(3))
+
+       f2, err := s.fs.OpenFile("/dir1/foo", os.O_RDWR, 0)
+       c.Assert(err, check.IsNil)
+       defer f2.Close()
+
+       buf := make([]byte, 64)
+       n, err := f.Read(buf)
+       c.Check(n, check.Equals, 3)
+       c.Check(err, check.Equals, io.EOF)
+       c.Check(string(buf[:3]), check.DeepEquals, "foo")
+
+       pos, err := f.Seek(-2, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(1))
+       c.Check(err, check.IsNil)
+
+       // Split a storedExtent in two, and insert a memExtent
+       n, err = f.Write([]byte("*"))
+       c.Check(n, check.Equals, 1)
+       c.Check(err, check.IsNil)
+
+       pos, err = f.Seek(0, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(2))
+       c.Check(err, check.IsNil)
+
+       pos, err = f.Seek(0, io.SeekStart)
+       c.Check(pos, check.Equals, int64(0))
+       c.Check(err, check.IsNil)
+
+       rbuf, err := ioutil.ReadAll(f)
+       c.Check(len(rbuf), check.Equals, 3)
+       c.Check(err, check.IsNil)
+       c.Check(string(rbuf), check.Equals, "f*o")
+
+       // Write multiple blocks in one call
+       f.Seek(1, io.SeekStart)
+       n, err = f.Write([]byte("0123456789abcdefg"))
+       c.Check(n, check.Equals, 17)
+       c.Check(err, check.IsNil)
+       pos, err = f.Seek(0, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(18))
+       c.Check(err, check.IsNil)
+       pos, err = f.Seek(-18, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(0))
+       c.Check(err, check.IsNil)
+       n, err = io.ReadFull(f, buf)
+       c.Check(n, check.Equals, 18)
+       c.Check(err, check.Equals, io.ErrUnexpectedEOF)
+       c.Check(string(buf[:n]), check.Equals, "f0123456789abcdefg")
+
+       buf2, err := ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "f0123456789abcdefg")
+
+       // truncate to current size
+       err = f.Truncate(18)
+       c.Check(err, check.IsNil)
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "f0123456789abcdefg")
+
+       // shrink to zero some data
+       f.Truncate(15)
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "f0123456789abcd")
+
+       // grow to partial block/extent
+       f.Truncate(20)
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "f0123456789abcd\x00\x00\x00\x00\x00")
+
+       f.Truncate(0)
+       f2.Seek(0, io.SeekStart)
+       f2.Write([]byte("12345678abcdefghijkl"))
+
+       // grow to block/extent boundary
+       f.Truncate(64)
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(len(buf2), check.Equals, 64)
+       c.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 8)
+
+       // shrink to block/extent boundary
+       err = f.Truncate(32)
+       c.Check(err, check.IsNil)
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(len(buf2), check.Equals, 32)
+       c.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 4)
+
+       // shrink to partial block/extent
+       err = f.Truncate(15)
+       c.Check(err, check.IsNil)
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "12345678abcdefg")
+       c.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 2)
+
+       // Force flush to ensure the block "12345678" gets stored, so
+       // we know what to expect in the final manifest below.
+       _, err = s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+
+       // Truncate to size=3 while f2's ptr is at 15
+       err = f.Truncate(3)
+       c.Check(err, check.IsNil)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "")
+       f2.Seek(0, io.SeekStart)
+       buf2, err = ioutil.ReadAll(f2)
+       c.Check(err, check.IsNil)
+       c.Check(string(buf2), check.Equals, "123")
+       c.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 1)
+
+       m, err := s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+       m = regexp.MustCompile(`\+A[^\+ ]+`).ReplaceAllLiteralString(m, "")
+       c.Check(m, check.Equals, "./dir1 3858f62230ac3c915f300c664312c63f+6 25d55ad283aa400af464c76d713c07ad+8 3:3:bar 6:3:foo\n")
+       c.Check(s.fs.Size(), check.Equals, int64(6))
+}
+
+func (s *CollectionFSSuite) TestSeekSparse(c *check.C) {
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       f, err := fs.OpenFile("test", os.O_CREATE|os.O_RDWR, 0755)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+
+       checkSize := func(size int64) {
+               fi, err := f.Stat()
+               c.Assert(err, check.IsNil)
+               c.Check(fi.Size(), check.Equals, size)
+
+               f, err := fs.OpenFile("test", os.O_CREATE|os.O_RDWR, 0755)
+               c.Assert(err, check.IsNil)
+               defer f.Close()
+               fi, err = f.Stat()
+               c.Check(err, check.IsNil)
+               c.Check(fi.Size(), check.Equals, size)
+               pos, err := f.Seek(0, io.SeekEnd)
+               c.Check(err, check.IsNil)
+               c.Check(pos, check.Equals, size)
+       }
+
+       f.Seek(2, io.SeekEnd)
+       checkSize(0)
+       f.Write([]byte{1})
+       checkSize(3)
+
+       f.Seek(2, io.SeekCurrent)
+       checkSize(3)
+       f.Write([]byte{})
+       checkSize(5)
+
+       f.Seek(8, io.SeekStart)
+       checkSize(5)
+       n, err := f.Read(make([]byte, 1))
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.Equals, io.EOF)
+       checkSize(5)
+       f.Write([]byte{1, 2, 3})
+       checkSize(11)
+}
+
+func (s *CollectionFSSuite) TestMarshalCopiesRemoteBlocks(c *check.C) {
+       foo := "foo"
+       bar := "bar"
+       hash := map[string]string{
+               foo: fmt.Sprintf("%x", md5.Sum([]byte(foo))),
+               bar: fmt.Sprintf("%x", md5.Sum([]byte(bar))),
+       }
+
+       fs, err := (&Collection{
+               ManifestText: ". " + hash[foo] + "+3+Rzaaaa-foo@bab " + hash[bar] + "+3+A12345@ffffff 0:2:fo.txt 2:4:obar.txt\n",
+       }).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       manifest, err := fs.MarshalManifest(".")
+       c.Check(manifest, check.Equals, "")
+       c.Check(err, check.NotNil)
+
+       s.kc.refreshable = map[string]bool{hash[bar]: true}
+
+       for _, sigIn := range []string{"Rzaaaa-foo@bab", "A12345@abcde"} {
+               fs, err = (&Collection{
+                       ManifestText: ". " + hash[foo] + "+3+A12345@fffff " + hash[bar] + "+3+" + sigIn + " 0:2:fo.txt 2:4:obar.txt\n",
+               }).FileSystem(s.client, s.kc)
+               c.Assert(err, check.IsNil)
+               manifest, err := fs.MarshalManifest(".")
+               c.Check(err, check.IsNil)
+               // Both blocks should now have +A signatures.
+               c.Check(manifest, check.Matches, `.*\+A.* .*\+A.*\n`)
+               c.Check(manifest, check.Not(check.Matches), `.*\+R.*\n`)
+       }
+}
+
+func (s *CollectionFSSuite) TestMarshalSmallBlocks(c *check.C) {
+       maxBlockSize = 8
+       defer func() { maxBlockSize = 2 << 26 }()
+
+       var err error
+       s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       for _, name := range []string{"foo", "bar", "baz"} {
+               f, err := s.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0)
+               c.Assert(err, check.IsNil)
+               f.Write([]byte(name))
+               f.Close()
+       }
+
+       m, err := s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+       m = regexp.MustCompile(`\+A[^\+ ]+`).ReplaceAllLiteralString(m, "")
+       c.Check(m, check.Equals, ". c3c23db5285662ef7172373df0003206+6 acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar 3:3:baz 6:3:foo\n")
+}
+
+func (s *CollectionFSSuite) TestMkdir(c *check.C) {
+       err := s.fs.Mkdir("foo/bar", 0755)
+       c.Check(err, check.Equals, os.ErrNotExist)
+
+       f, err := s.fs.OpenFile("foo/bar", os.O_CREATE, 0)
+       c.Check(err, check.Equals, os.ErrNotExist)
+
+       err = s.fs.Mkdir("foo", 0755)
+       c.Check(err, check.IsNil)
+
+       f, err = s.fs.OpenFile("foo/bar", os.O_CREATE|os.O_WRONLY, 0)
+       c.Check(err, check.IsNil)
+       if err == nil {
+               defer f.Close()
+               f.Write([]byte("foo"))
+       }
+
+       // mkdir fails if a file already exists with that name
+       err = s.fs.Mkdir("foo/bar", 0755)
+       c.Check(err, check.NotNil)
+
+       err = s.fs.Remove("foo/bar")
+       c.Check(err, check.IsNil)
+
+       // mkdir succeeds after the file is deleted
+       err = s.fs.Mkdir("foo/bar", 0755)
+       c.Check(err, check.IsNil)
+
+       // creating a file in a nonexistent subdir should still fail
+       f, err = s.fs.OpenFile("foo/bar/baz/foo.txt", os.O_CREATE|os.O_WRONLY, 0)
+       c.Check(err, check.Equals, os.ErrNotExist)
+
+       f, err = s.fs.OpenFile("foo/bar/foo.txt", os.O_CREATE|os.O_WRONLY, 0)
+       c.Check(err, check.IsNil)
+       if err == nil {
+               defer f.Close()
+               f.Write([]byte("foo"))
+       }
+
+       // creating foo/bar as a regular file should fail
+       f, err = s.fs.OpenFile("foo/bar", os.O_CREATE|os.O_EXCL, 0)
+       c.Check(err, check.NotNil)
+
+       // creating foo/bar as a directory should fail
+       f, err = s.fs.OpenFile("foo/bar", os.O_CREATE|os.O_EXCL, os.ModeDir)
+       c.Check(err, check.NotNil)
+       err = s.fs.Mkdir("foo/bar", 0755)
+       c.Check(err, check.NotNil)
+
+       m, err := s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+       m = regexp.MustCompile(`\+A[^\+ ]+`).ReplaceAllLiteralString(m, "")
+       c.Check(m, check.Equals, "./dir1 3858f62230ac3c915f300c664312c63f+6 3:3:bar 0:3:foo\n./foo/bar acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n")
+}
+
+func (s *CollectionFSSuite) TestConcurrentWriters(c *check.C) {
+       if testing.Short() {
+               c.Skip("slow")
+       }
+
+       maxBlockSize = 8
+       defer func() { maxBlockSize = 2 << 26 }()
+
+       var wg sync.WaitGroup
+       for n := 0; n < 128; n++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       f, err := s.fs.OpenFile("/dir1/foo", os.O_RDWR, 0)
+                       c.Assert(err, check.IsNil)
+                       defer f.Close()
+                       for i := 0; i < 1024; i++ {
+                               r := rand.Uint32()
+                               switch {
+                               case r%11 == 0:
+                                       _, err := s.fs.MarshalManifest(".")
+                                       c.Check(err, check.IsNil)
+                               case r&3 == 0:
+                                       f.Truncate(int64(rand.Intn(64)))
+                               case r&3 == 1:
+                                       f.Seek(int64(rand.Intn(64)), io.SeekStart)
+                               case r&3 == 2:
+                                       _, err := f.Write([]byte("beep boop"))
+                                       c.Check(err, check.IsNil)
+                               case r&3 == 3:
+                                       _, err := ioutil.ReadAll(f)
+                                       c.Check(err, check.IsNil)
+                               }
+                       }
+               }()
+       }
+       wg.Wait()
+
+       f, err := s.fs.OpenFile("/dir1/foo", os.O_RDWR, 0)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       buf, err := ioutil.ReadAll(f)
+       c.Check(err, check.IsNil)
+       c.Logf("after lots of random r/w/seek/trunc, buf is %q", buf)
+}
+
+func (s *CollectionFSSuite) TestRandomWrites(c *check.C) {
+       maxBlockSize = 40
+       defer func() { maxBlockSize = 2 << 26 }()
+
+       var err error
+       s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       const nfiles = 256
+       const ngoroutines = 256
+
+       var wg sync.WaitGroup
+       for n := 0; n < ngoroutines; n++ {
+               wg.Add(1)
+               go func(n int) {
+                       defer wg.Done()
+                       expect := make([]byte, 0, 64)
+                       wbytes := []byte("there's no simple explanation for anything important that any of us do")
+                       f, err := s.fs.OpenFile(fmt.Sprintf("random-%d", n), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)
+                       c.Assert(err, check.IsNil)
+                       defer f.Close()
+                       for i := 0; i < nfiles; i++ {
+                               trunc := rand.Intn(65)
+                               woff := rand.Intn(trunc + 1)
+                               wbytes = wbytes[:rand.Intn(64-woff+1)]
+                               for buf, i := expect[:cap(expect)], len(expect); i < trunc; i++ {
+                                       buf[i] = 0
+                               }
+                               expect = expect[:trunc]
+                               if trunc < woff+len(wbytes) {
+                                       expect = expect[:woff+len(wbytes)]
+                               }
+                               copy(expect[woff:], wbytes)
+                               f.Truncate(int64(trunc))
+                               pos, err := f.Seek(int64(woff), io.SeekStart)
+                               c.Check(pos, check.Equals, int64(woff))
+                               c.Check(err, check.IsNil)
+                               n, err := f.Write(wbytes)
+                               c.Check(n, check.Equals, len(wbytes))
+                               c.Check(err, check.IsNil)
+                               pos, err = f.Seek(0, io.SeekStart)
+                               c.Check(pos, check.Equals, int64(0))
+                               c.Check(err, check.IsNil)
+                               buf, err := ioutil.ReadAll(f)
+                               c.Check(string(buf), check.Equals, string(expect))
+                               c.Check(err, check.IsNil)
+                       }
+               }(n)
+       }
+       wg.Wait()
+
+       for n := 0; n < ngoroutines; n++ {
+               f, err := s.fs.OpenFile(fmt.Sprintf("random-%d", n), os.O_RDONLY, 0)
+               c.Assert(err, check.IsNil)
+               f.(*filehandle).inode.(*filenode).waitPrune()
+               s.checkMemSize(c, f)
+               defer f.Close()
+       }
+
+       root, err := s.fs.Open("/")
+       c.Assert(err, check.IsNil)
+       defer root.Close()
+       fi, err := root.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fi), check.Equals, nfiles)
+
+       _, err = s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+       // TODO: check manifest content
+}
+
+func (s *CollectionFSSuite) TestRemove(c *check.C) {
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("dir0", 0755)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("dir1", 0755)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("dir1/dir2", 0755)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("dir1/dir3", 0755)
+       c.Assert(err, check.IsNil)
+
+       err = fs.Remove("dir0")
+       c.Check(err, check.IsNil)
+       err = fs.Remove("dir0")
+       c.Check(err, check.Equals, os.ErrNotExist)
+
+       err = fs.Remove("dir1/dir2/.")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = fs.Remove("dir1/dir2/..")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = fs.Remove("dir1")
+       c.Check(err, check.Equals, ErrDirectoryNotEmpty)
+       err = fs.Remove("dir1/dir2/../../../dir1")
+       c.Check(err, check.Equals, ErrDirectoryNotEmpty)
+       err = fs.Remove("dir1/dir3/")
+       c.Check(err, check.IsNil)
+       err = fs.RemoveAll("dir1")
+       c.Check(err, check.IsNil)
+       err = fs.RemoveAll("dir1")
+       c.Check(err, check.IsNil)
+}
+
+func (s *CollectionFSSuite) TestRenameError(c *check.C) {
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("first", 0755)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("first/second", 0755)
+       c.Assert(err, check.IsNil)
+       f, err := fs.OpenFile("first/second/file", os.O_CREATE|os.O_WRONLY, 0755)
+       c.Assert(err, check.IsNil)
+       f.Write([]byte{1, 2, 3, 4, 5})
+       f.Close()
+       err = fs.Rename("first", "first/second/third")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = fs.Rename("first", "first/third")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = fs.Rename("first/second", "second")
+       c.Check(err, check.IsNil)
+       f, err = fs.OpenFile("second/file", 0, 0)
+       c.Assert(err, check.IsNil)
+       data, err := ioutil.ReadAll(f)
+       c.Check(err, check.IsNil)
+       c.Check(data, check.DeepEquals, []byte{1, 2, 3, 4, 5})
+}
+
+func (s *CollectionFSSuite) TestRenameDirectory(c *check.C) {
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("foo", 0755)
+       c.Assert(err, check.IsNil)
+       err = fs.Mkdir("bar", 0755)
+       c.Assert(err, check.IsNil)
+       err = fs.Rename("bar", "baz")
+       c.Check(err, check.IsNil)
+       err = fs.Rename("foo", "baz")
+       c.Check(err, check.NotNil)
+       err = fs.Rename("foo", "baz/")
+       c.Check(err, check.IsNil)
+       err = fs.Rename("baz/foo", ".")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = fs.Rename("baz/foo/", ".")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+}
+
+func (s *CollectionFSSuite) TestRename(c *check.C) {
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       const (
+               outer = 16
+               inner = 16
+       )
+       for i := 0; i < outer; i++ {
+               err = fs.Mkdir(fmt.Sprintf("dir%d", i), 0755)
+               c.Assert(err, check.IsNil)
+               for j := 0; j < inner; j++ {
+                       err = fs.Mkdir(fmt.Sprintf("dir%d/dir%d", i, j), 0755)
+                       c.Assert(err, check.IsNil)
+                       for _, fnm := range []string{
+                               fmt.Sprintf("dir%d/file%d", i, j),
+                               fmt.Sprintf("dir%d/dir%d/file%d", i, j, j),
+                       } {
+                               f, err := fs.OpenFile(fnm, os.O_CREATE|os.O_WRONLY, 0755)
+                               c.Assert(err, check.IsNil)
+                               _, err = f.Write([]byte("beep"))
+                               c.Assert(err, check.IsNil)
+                               f.Close()
+                       }
+               }
+       }
+       var wg sync.WaitGroup
+       for i := 0; i < outer; i++ {
+               for j := 0; j < inner; j++ {
+                       wg.Add(1)
+                       go func(i, j int) {
+                               defer wg.Done()
+                               oldname := fmt.Sprintf("dir%d/dir%d/file%d", i, j, j)
+                               newname := fmt.Sprintf("dir%d/newfile%d", i, inner-j-1)
+                               _, err := fs.Open(newname)
+                               c.Check(err, check.Equals, os.ErrNotExist)
+                               err = fs.Rename(oldname, newname)
+                               c.Check(err, check.IsNil)
+                               f, err := fs.Open(newname)
+                               c.Check(err, check.IsNil)
+                               f.Close()
+                       }(i, j)
+
+                       wg.Add(1)
+                       go func(i, j int) {
+                               defer wg.Done()
+                               // oldname does not exist
+                               err := fs.Rename(
+                                       fmt.Sprintf("dir%d/dir%d/missing", i, j),
+                                       fmt.Sprintf("dir%d/dir%d/file%d", outer-i-1, j, j))
+                               c.Check(err, check.ErrorMatches, `.*does not exist`)
+
+                               // newname parent dir does not exist
+                               err = fs.Rename(
+                                       fmt.Sprintf("dir%d/dir%d", i, j),
+                                       fmt.Sprintf("dir%d/missing/irrelevant", outer-i-1))
+                               c.Check(err, check.ErrorMatches, `.*does not exist`)
+
+                               // oldname parent dir is a file
+                               err = fs.Rename(
+                                       fmt.Sprintf("dir%d/file%d/patherror", i, j),
+                                       fmt.Sprintf("dir%d/irrelevant", i))
+                               c.Check(err, check.ErrorMatches, `.*not a directory`)
+
+                               // newname parent dir is a file
+                               err = fs.Rename(
+                                       fmt.Sprintf("dir%d/dir%d/file%d", i, j, j),
+                                       fmt.Sprintf("dir%d/file%d/patherror", i, inner-j-1))
+                               c.Check(err, check.ErrorMatches, `.*not a directory`)
+                       }(i, j)
+               }
+       }
+       wg.Wait()
+
+       f, err := fs.OpenFile("dir1/newfile3", 0, 0)
+       c.Assert(err, check.IsNil)
+       c.Check(f.Size(), check.Equals, int64(4))
+       buf, err := ioutil.ReadAll(f)
+       c.Check(buf, check.DeepEquals, []byte("beep"))
+       c.Check(err, check.IsNil)
+       _, err = fs.Open("dir1/dir1/file1")
+       c.Check(err, check.Equals, os.ErrNotExist)
+}
+
+func (s *CollectionFSSuite) TestPersist(c *check.C) {
+       maxBlockSize = 1024
+       defer func() { maxBlockSize = 2 << 26 }()
+
+       var err error
+       s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       err = s.fs.Mkdir("d:r", 0755)
+       c.Assert(err, check.IsNil)
+
+       expect := map[string][]byte{}
+
+       var wg sync.WaitGroup
+       for _, name := range []string{"random 1", "random:2", "random\\3", "d:r/random4"} {
+               buf := make([]byte, 500)
+               rand.Read(buf)
+               expect[name] = buf
+
+               f, err := s.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0)
+               c.Assert(err, check.IsNil)
+               // Note: we don't close the file until after the test
+               // is done. Writes to unclosed files should persist.
+               defer f.Close()
+
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for i := 0; i < len(buf); i += 5 {
+                               _, err := f.Write(buf[i : i+5])
+                               c.Assert(err, check.IsNil)
+                       }
+               }()
+       }
+       wg.Wait()
+
+       m, err := s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+       c.Logf("%q", m)
+
+       root, err := s.fs.Open("/")
+       c.Assert(err, check.IsNil)
+       defer root.Close()
+       fi, err := root.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fi), check.Equals, 4)
+
+       persisted, err := (&Collection{ManifestText: m}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       root, err = persisted.Open("/")
+       c.Assert(err, check.IsNil)
+       defer root.Close()
+       fi, err = root.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fi), check.Equals, 4)
+
+       for name, content := range expect {
+               c.Logf("read %q", name)
+               f, err := persisted.Open(name)
+               c.Assert(err, check.IsNil)
+               defer f.Close()
+               buf, err := ioutil.ReadAll(f)
+               c.Check(err, check.IsNil)
+               c.Check(buf, check.DeepEquals, content)
+       }
+}
+
+func (s *CollectionFSSuite) TestPersistEmptyFilesAndDirs(c *check.C) {
+       var err error
+       s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       for _, name := range []string{"dir", "dir/zerodir", "empty", "not empty", "not empty/empty", "zero", "zero/zero"} {
+               err = s.fs.Mkdir(name, 0755)
+               c.Assert(err, check.IsNil)
+       }
+
+       expect := map[string][]byte{
+               "0":                nil,
+               "00":               {},
+               "one":              {1},
+               "dir/0":            nil,
+               "dir/two":          {1, 2},
+               "dir/zero":         nil,
+               "dir/zerodir/zero": nil,
+               "zero/zero/zero":   nil,
+       }
+       for name, data := range expect {
+               f, err := s.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0)
+               c.Assert(err, check.IsNil)
+               if data != nil {
+                       _, err := f.Write(data)
+                       c.Assert(err, check.IsNil)
+               }
+               f.Close()
+       }
+
+       m, err := s.fs.MarshalManifest(".")
+       c.Check(err, check.IsNil)
+       c.Logf("%q", m)
+
+       persisted, err := (&Collection{ManifestText: m}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       for name, data := range expect {
+               _, err = persisted.Open("bogus-" + name)
+               c.Check(err, check.NotNil)
+
+               f, err := persisted.Open(name)
+               c.Assert(err, check.IsNil)
+
+               if data == nil {
+                       data = []byte{}
+               }
+               buf, err := ioutil.ReadAll(f)
+               c.Check(err, check.IsNil)
+               c.Check(buf, check.DeepEquals, data)
+       }
+
+       expectDir := map[string]int{
+               "empty":           0,
+               "not empty":       1,
+               "not empty/empty": 0,
+       }
+       for name, expectLen := range expectDir {
+               _, err := persisted.Open(name + "/bogus")
+               c.Check(err, check.NotNil)
+
+               d, err := persisted.Open(name)
+               defer d.Close()
+               c.Check(err, check.IsNil)
+               fi, err := d.Readdir(-1)
+               c.Check(err, check.IsNil)
+               c.Check(fi, check.HasLen, expectLen)
+       }
+}
+
+func (s *CollectionFSSuite) TestOpenFileFlags(c *check.C) {
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       f, err := fs.OpenFile("missing", os.O_WRONLY, 0)
+       c.Check(f, check.IsNil)
+       c.Check(err, check.ErrorMatches, `file does not exist`)
+
+       f, err = fs.OpenFile("new", os.O_CREATE|os.O_RDONLY, 0)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       n, err := f.Write([]byte{1, 2, 3})
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.ErrorMatches, `read-only file`)
+       n, err = f.Read(make([]byte, 1))
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.Equals, io.EOF)
+       f, err = fs.OpenFile("new", os.O_RDWR, 0)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       _, err = f.Write([]byte{4, 5, 6})
+       c.Check(err, check.IsNil)
+       fi, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.Size(), check.Equals, int64(3))
+
+       f, err = fs.OpenFile("new", os.O_TRUNC|os.O_RDWR, 0)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       pos, err := f.Seek(0, io.SeekEnd)
+       c.Check(pos, check.Equals, int64(0))
+       c.Check(err, check.IsNil)
+       fi, err = f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.Size(), check.Equals, int64(0))
+       fs.Remove("new")
+
+       buf := make([]byte, 64)
+       f, err = fs.OpenFile("append", os.O_EXCL|os.O_CREATE|os.O_RDWR|os.O_APPEND, 0)
+       c.Assert(err, check.IsNil)
+       f.Write([]byte{1, 2, 3})
+       f.Seek(0, io.SeekStart)
+       n, _ = f.Read(buf[:1])
+       c.Check(n, check.Equals, 1)
+       c.Check(buf[:1], check.DeepEquals, []byte{1})
+       pos, err = f.Seek(0, io.SeekCurrent)
+       c.Assert(err, check.IsNil)
+       c.Check(pos, check.Equals, int64(1))
+       f.Write([]byte{4, 5, 6})
+       pos, err = f.Seek(0, io.SeekCurrent)
+       c.Assert(err, check.IsNil)
+       c.Check(pos, check.Equals, int64(6))
+       f.Seek(0, io.SeekStart)
+       n, err = f.Read(buf)
+       c.Check(buf[:n], check.DeepEquals, []byte{1, 2, 3, 4, 5, 6})
+       c.Check(err, check.Equals, io.EOF)
+       f.Close()
+
+       f, err = fs.OpenFile("append", os.O_RDWR|os.O_APPEND, 0)
+       c.Assert(err, check.IsNil)
+       pos, err = f.Seek(0, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(0))
+       c.Check(err, check.IsNil)
+       f.Read(buf[:3])
+       pos, _ = f.Seek(0, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(3))
+       f.Write([]byte{7, 8, 9})
+       pos, err = f.Seek(0, io.SeekCurrent)
+       c.Check(err, check.IsNil)
+       c.Check(pos, check.Equals, int64(9))
+       f.Close()
+
+       f, err = fs.OpenFile("wronly", os.O_CREATE|os.O_WRONLY, 0)
+       c.Assert(err, check.IsNil)
+       n, err = f.Write([]byte{3, 2, 1})
+       c.Check(n, check.Equals, 3)
+       c.Check(err, check.IsNil)
+       pos, _ = f.Seek(0, io.SeekCurrent)
+       c.Check(pos, check.Equals, int64(3))
+       pos, _ = f.Seek(0, io.SeekStart)
+       c.Check(pos, check.Equals, int64(0))
+       n, err = f.Read(buf)
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.ErrorMatches, `.*O_WRONLY.*`)
+       f, err = fs.OpenFile("wronly", os.O_RDONLY, 0)
+       c.Assert(err, check.IsNil)
+       n, _ = f.Read(buf)
+       c.Check(buf[:n], check.DeepEquals, []byte{3, 2, 1})
+
+       f, err = fs.OpenFile("unsupported", os.O_CREATE|os.O_SYNC, 0)
+       c.Check(f, check.IsNil)
+       c.Check(err, check.NotNil)
+
+       f, err = fs.OpenFile("append", os.O_RDWR|os.O_WRONLY, 0)
+       c.Check(f, check.IsNil)
+       c.Check(err, check.ErrorMatches, `invalid flag.*`)
+}
+
+func (s *CollectionFSSuite) TestFlushFullBlocks(c *check.C) {
+       defer func(wab, mbs int) {
+               writeAheadBlocks = wab
+               maxBlockSize = mbs
+       }(writeAheadBlocks, maxBlockSize)
+       writeAheadBlocks = 2
+       maxBlockSize = 1024
+
+       proceed := make(chan struct{})
+       var started, concurrent int32
+       blk2done := false
+       s.kc.onPut = func([]byte) {
+               atomic.AddInt32(&concurrent, 1)
+               switch atomic.AddInt32(&started, 1) {
+               case 1:
+                       // Wait until block 2 starts and finishes, and block 3 starts
+                       select {
+                       case <-proceed:
+                               c.Check(blk2done, check.Equals, true)
+                       case <-time.After(time.Second):
+                               c.Error("timed out")
+                       }
+               case 2:
+                       time.Sleep(time.Millisecond)
+                       blk2done = true
+               case 3:
+                       close(proceed)
+               default:
+                       time.Sleep(time.Millisecond)
+               }
+               c.Check(atomic.AddInt32(&concurrent, -1) < int32(writeAheadBlocks), check.Equals, true)
+       }
+
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       f, err := fs.OpenFile("50K", os.O_WRONLY|os.O_CREATE, 0)
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+
+       data := make([]byte, 500)
+       rand.Read(data)
+
+       for i := 0; i < 100; i++ {
+               n, err := f.Write(data)
+               c.Assert(n, check.Equals, len(data))
+               c.Assert(err, check.IsNil)
+       }
+
+       currentMemExtents := func() (memExtents []int) {
+               for idx, e := range f.(*filehandle).inode.(*filenode).segments {
+                       switch e.(type) {
+                       case *memSegment:
+                               memExtents = append(memExtents, idx)
+                       }
+               }
+               return
+       }
+       f.(*filehandle).inode.(*filenode).waitPrune()
+       c.Check(currentMemExtents(), check.HasLen, 1)
+
+       m, err := fs.MarshalManifest(".")
+       c.Check(m, check.Matches, `[^:]* 0:50000:50K\n`)
+       c.Check(err, check.IsNil)
+       c.Check(currentMemExtents(), check.HasLen, 0)
+}
+
+func (s *CollectionFSSuite) TestBrokenManifests(c *check.C) {
+       for _, txt := range []string{
+               "\n",
+               ".\n",
+               ". \n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 \n",
+               ". 0:0:foo\n",
+               ".  0:0:foo\n",
+               ". 0:0:foo 0:0:bar\n",
+               ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 :0:0:foo\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 foo:0:foo\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:foo:foo\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:foo 1:1:bar\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:\\056\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:\\056\\057\\056\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:.\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:..\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:..\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/..\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+1 0:0:foo\n./foo d41d8cd98f00b204e9800998ecf8427e+1 0:0:bar\n",
+               "./foo d41d8cd98f00b204e9800998ecf8427e+1 0:0:bar\n. d41d8cd98f00b204e9800998ecf8427e+1 0:0:foo\n",
+       } {
+               c.Logf("<-%q", txt)
+               fs, err := (&Collection{ManifestText: txt}).FileSystem(s.client, s.kc)
+               c.Check(fs, check.IsNil)
+               c.Logf("-> %s", err)
+               c.Check(err, check.NotNil)
+       }
+}
+
+func (s *CollectionFSSuite) TestEdgeCaseManifests(c *check.C) {
+       for _, txt := range []string{
+               "",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:...\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:. 0:0:. 0:0:\\056 0:0:\\056\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/. 0:0:. 0:0:foo\\057bar\\057\\056\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo 0:0:foo 0:0:bar\n",
+               ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar\n./foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:bar\n",
+       } {
+               c.Logf("<-%q", txt)
+               fs, err := (&Collection{ManifestText: txt}).FileSystem(s.client, s.kc)
+               c.Check(err, check.IsNil)
+               c.Check(fs, check.NotNil)
+       }
+}
+
+func (s *CollectionFSSuite) checkMemSize(c *check.C, f File) {
+       fn := f.(*filehandle).inode.(*filenode)
+       var memsize int64
+       for _, seg := range fn.segments {
+               if e, ok := seg.(*memSegment); ok {
+                       memsize += int64(len(e.buf))
+               }
+       }
+       c.Check(fn.memsize, check.Equals, memsize)
+}
+
+type CollectionFSUnitSuite struct{}
+
+var _ = check.Suite(&CollectionFSUnitSuite{})
+
+// expect ~2 seconds to load a manifest with 256K files
+func (s *CollectionFSUnitSuite) TestLargeManifest(c *check.C) {
+       if testing.Short() {
+               c.Skip("slow")
+       }
+
+       const (
+               dirCount  = 512
+               fileCount = 512
+       )
+
+       mb := bytes.NewBuffer(make([]byte, 0, 40000000))
+       for i := 0; i < dirCount; i++ {
+               fmt.Fprintf(mb, "./dir%d", i)
+               for j := 0; j <= fileCount; j++ {
+                       fmt.Fprintf(mb, " %032x+42+A%040x@%08x", j, j, j)
+               }
+               for j := 0; j < fileCount; j++ {
+                       fmt.Fprintf(mb, " %d:%d:dir%d/file%d", j*42+21, 42, j, j)
+               }
+               mb.Write([]byte{'\n'})
+       }
+       coll := Collection{ManifestText: mb.String()}
+       c.Logf("%s built", time.Now())
+
+       var memstats runtime.MemStats
+       runtime.ReadMemStats(&memstats)
+       c.Logf("%s Alloc=%d Sys=%d", time.Now(), memstats.Alloc, memstats.Sys)
+
+       f, err := coll.FileSystem(nil, nil)
+       c.Check(err, check.IsNil)
+       c.Logf("%s loaded", time.Now())
+       c.Check(f.Size(), check.Equals, int64(42*dirCount*fileCount))
+
+       for i := 0; i < dirCount; i++ {
+               for j := 0; j < fileCount; j++ {
+                       f.Stat(fmt.Sprintf("./dir%d/dir%d/file%d", i, j, j))
+               }
+       }
+       c.Logf("%s Stat() x %d", time.Now(), dirCount*fileCount)
+
+       runtime.ReadMemStats(&memstats)
+       c.Logf("%s Alloc=%d Sys=%d", time.Now(), memstats.Alloc, memstats.Sys)
+}
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/sdk/go/arvados/fs_deferred.go b/sdk/go/arvados/fs_deferred.go
new file mode 100644 (file)
index 0000000..a84f64f
--- /dev/null
@@ -0,0 +1,103 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "log"
+       "os"
+       "sync"
+       "time"
+)
+
+func deferredCollectionFS(fs FileSystem, parent inode, coll Collection) inode {
+       var modTime time.Time
+       if coll.ModifiedAt != nil {
+               modTime = *coll.ModifiedAt
+       } else {
+               modTime = time.Now()
+       }
+       placeholder := &treenode{
+               fs:     fs,
+               parent: parent,
+               inodes: nil,
+               fileinfo: fileinfo{
+                       name:    coll.Name,
+                       modTime: modTime,
+                       mode:    0755 | os.ModeDir,
+               },
+       }
+       return &deferrednode{wrapped: placeholder, create: func() inode {
+               err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
+               if err != nil {
+                       log.Printf("BUG: unhandled error: %s", err)
+                       return placeholder
+               }
+               cfs, err := coll.FileSystem(fs, fs)
+               if err != nil {
+                       log.Printf("BUG: unhandled error: %s", err)
+                       return placeholder
+               }
+               root := cfs.rootnode()
+               root.SetParent(parent, coll.Name)
+               return root
+       }}
+}
+
+// A deferrednode wraps an inode that's expensive to build. Initially,
+// it responds to basic directory functions by proxying to the given
+// placeholder. If a caller uses a read/write/lock operation,
+// deferrednode calls the create() func to create the real inode, and
+// proxies to the real inode from then on.
+//
+// In practice, this means a deferrednode's parent's directory listing
+// can be generated using only the placeholder, instead of waiting for
+// create().
+type deferrednode struct {
+       wrapped inode
+       create  func() inode
+       mtx     sync.Mutex
+       created bool
+}
+
+func (dn *deferrednode) realinode() inode {
+       dn.mtx.Lock()
+       defer dn.mtx.Unlock()
+       if !dn.created {
+               dn.wrapped = dn.create()
+               dn.created = true
+       }
+       return dn.wrapped
+}
+
+func (dn *deferrednode) currentinode() inode {
+       dn.mtx.Lock()
+       defer dn.mtx.Unlock()
+       return dn.wrapped
+}
+
+func (dn *deferrednode) Read(p []byte, pos filenodePtr) (int, filenodePtr, error) {
+       return dn.realinode().Read(p, pos)
+}
+
+func (dn *deferrednode) Write(p []byte, pos filenodePtr) (int, filenodePtr, error) {
+       return dn.realinode().Write(p, pos)
+}
+
+func (dn *deferrednode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       return dn.realinode().Child(name, replace)
+}
+
+func (dn *deferrednode) Truncate(size int64) error       { return dn.realinode().Truncate(size) }
+func (dn *deferrednode) SetParent(p inode, name string)  { dn.realinode().SetParent(p, name) }
+func (dn *deferrednode) IsDir() bool                     { return dn.currentinode().IsDir() }
+func (dn *deferrednode) Readdir() ([]os.FileInfo, error) { return dn.realinode().Readdir() }
+func (dn *deferrednode) Size() int64                     { return dn.currentinode().Size() }
+func (dn *deferrednode) FileInfo() os.FileInfo           { return dn.currentinode().FileInfo() }
+func (dn *deferrednode) Lock()                           { dn.realinode().Lock() }
+func (dn *deferrednode) Unlock()                         { dn.realinode().Unlock() }
+func (dn *deferrednode) RLock()                          { dn.realinode().RLock() }
+func (dn *deferrednode) RUnlock()                        { dn.realinode().RUnlock() }
+func (dn *deferrednode) FS() FileSystem                  { return dn.currentinode().FS() }
+func (dn *deferrednode) Parent() inode                   { return dn.currentinode().Parent() }
diff --git a/sdk/go/arvados/fs_filehandle.go b/sdk/go/arvados/fs_filehandle.go
new file mode 100644 (file)
index 0000000..9af8d0a
--- /dev/null
@@ -0,0 +1,112 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "io"
+       "os"
+)
+
+type filehandle struct {
+       inode
+       ptr        filenodePtr
+       append     bool
+       readable   bool
+       writable   bool
+       unreaddirs []os.FileInfo
+}
+
+func (f *filehandle) Read(p []byte) (n int, err error) {
+       if !f.readable {
+               return 0, ErrWriteOnlyMode
+       }
+       f.inode.RLock()
+       defer f.inode.RUnlock()
+       n, f.ptr, err = f.inode.Read(p, f.ptr)
+       return
+}
+
+func (f *filehandle) Seek(off int64, whence int) (pos int64, err error) {
+       size := f.inode.Size()
+       ptr := f.ptr
+       switch whence {
+       case io.SeekStart:
+               ptr.off = off
+       case io.SeekCurrent:
+               ptr.off += off
+       case io.SeekEnd:
+               ptr.off = size + off
+       }
+       if ptr.off < 0 {
+               return f.ptr.off, ErrNegativeOffset
+       }
+       if ptr.off != f.ptr.off {
+               f.ptr = ptr
+               // force filenode to recompute f.ptr fields on next
+               // use
+               f.ptr.repacked = -1
+       }
+       return f.ptr.off, nil
+}
+
+func (f *filehandle) Truncate(size int64) error {
+       return f.inode.Truncate(size)
+}
+
+func (f *filehandle) Write(p []byte) (n int, err error) {
+       if !f.writable {
+               return 0, ErrReadOnlyFile
+       }
+       f.inode.Lock()
+       defer f.inode.Unlock()
+       if fn, ok := f.inode.(*filenode); ok && f.append {
+               f.ptr = filenodePtr{
+                       off:        fn.fileinfo.size,
+                       segmentIdx: len(fn.segments),
+                       segmentOff: 0,
+                       repacked:   fn.repacked,
+               }
+       }
+       n, f.ptr, err = f.inode.Write(p, f.ptr)
+       return
+}
+
+func (f *filehandle) Readdir(count int) ([]os.FileInfo, error) {
+       if !f.inode.IsDir() {
+               return nil, ErrInvalidOperation
+       }
+       if count <= 0 {
+               return f.inode.Readdir()
+       }
+       if f.unreaddirs == nil {
+               var err error
+               f.unreaddirs, err = f.inode.Readdir()
+               if err != nil {
+                       return nil, err
+               }
+       }
+       if len(f.unreaddirs) == 0 {
+               return nil, io.EOF
+       }
+       if count > len(f.unreaddirs) {
+               count = len(f.unreaddirs)
+       }
+       ret := f.unreaddirs[:count]
+       f.unreaddirs = f.unreaddirs[count:]
+       return ret, nil
+}
+
+func (f *filehandle) Stat() (os.FileInfo, error) {
+       return f.inode.FileInfo(), nil
+}
+
+func (f *filehandle) Close() error {
+       return nil
+}
+
+func (f *filehandle) Sync() error {
+       // Sync the containing filesystem.
+       return f.FS().Sync()
+}
diff --git a/sdk/go/arvados/fs_getternode.go b/sdk/go/arvados/fs_getternode.go
new file mode 100644 (file)
index 0000000..966fe9d
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "os"
+       "time"
+)
+
+// A getternode is a read-only character device that returns whatever
+// data is returned by the supplied function.
+type getternode struct {
+       Getter func() ([]byte, error)
+
+       treenode
+       data *bytes.Reader
+}
+
+func (*getternode) IsDir() bool {
+       return false
+}
+
+func (*getternode) Child(string, func(inode) (inode, error)) (inode, error) {
+       return nil, ErrInvalidArgument
+}
+
+func (gn *getternode) get() error {
+       if gn.data != nil {
+               return nil
+       }
+       data, err := gn.Getter()
+       if err != nil {
+               return err
+       }
+       gn.data = bytes.NewReader(data)
+       return nil
+}
+
+func (gn *getternode) Size() int64 {
+       return gn.FileInfo().Size()
+}
+
+func (gn *getternode) FileInfo() os.FileInfo {
+       gn.Lock()
+       defer gn.Unlock()
+       var size int64
+       if gn.get() == nil {
+               size = gn.data.Size()
+       }
+       return fileinfo{
+               modTime: time.Now(),
+               mode:    0444,
+               size:    size,
+       }
+}
+
+func (gn *getternode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
+       if err := gn.get(); err != nil {
+               return 0, ptr, err
+       }
+       n, err := gn.data.ReadAt(p, ptr.off)
+       return n, filenodePtr{off: ptr.off + int64(n)}, err
+}
diff --git a/sdk/go/arvados/fs_lookup.go b/sdk/go/arvados/fs_lookup.go
new file mode 100644 (file)
index 0000000..42322a1
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+       "sync"
+       "time"
+)
+
+// lookupnode is a caching tree node that is initially empty and calls
+// loadOne and loadAll to load/update child nodes as needed.
+//
+// See (*customFileSystem)MountUsers for example usage.
+type lookupnode struct {
+       inode
+       loadOne func(parent inode, name string) (inode, error)
+       loadAll func(parent inode) ([]inode, error)
+       stale   func(time.Time) bool
+
+       // internal fields
+       staleLock sync.Mutex
+       staleAll  time.Time
+       staleOne  map[string]time.Time
+}
+
+func (ln *lookupnode) Readdir() ([]os.FileInfo, error) {
+       ln.staleLock.Lock()
+       defer ln.staleLock.Unlock()
+       checkTime := time.Now()
+       if ln.stale(ln.staleAll) {
+               all, err := ln.loadAll(ln)
+               if err != nil {
+                       return nil, err
+               }
+               for _, child := range all {
+                       _, err = ln.inode.Child(child.FileInfo().Name(), func(inode) (inode, error) {
+                               return child, nil
+                       })
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+               ln.staleAll = checkTime
+               // No value in ln.staleOne can make a difference to an
+               // "entry is stale?" test now, because no value is
+               // newer than ln.staleAll. Reclaim memory.
+               ln.staleOne = nil
+       }
+       return ln.inode.Readdir()
+}
+
+func (ln *lookupnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       ln.staleLock.Lock()
+       defer ln.staleLock.Unlock()
+       checkTime := time.Now()
+       if ln.stale(ln.staleAll) && ln.stale(ln.staleOne[name]) {
+               _, err := ln.inode.Child(name, func(inode) (inode, error) {
+                       return ln.loadOne(ln, name)
+               })
+               if err != nil {
+                       return nil, err
+               }
+               if ln.staleOne == nil {
+                       ln.staleOne = map[string]time.Time{name: checkTime}
+               } else {
+                       ln.staleOne[name] = checkTime
+               }
+       }
+       return ln.inode.Child(name, replace)
+}
diff --git a/sdk/go/arvados/fs_project.go b/sdk/go/arvados/fs_project.go
new file mode 100644 (file)
index 0000000..9299551
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "log"
+       "os"
+       "strings"
+)
+
+func (fs *customFileSystem) defaultUUID(uuid string) (string, error) {
+       if uuid != "" {
+               return uuid, nil
+       }
+       var resp User
+       err := fs.RequestAndDecode(&resp, "GET", "arvados/v1/users/current", nil, nil)
+       if err != nil {
+               return "", err
+       }
+       return resp.UUID, nil
+}
+
+// loadOneChild loads only the named child, if it exists.
+func (fs *customFileSystem) projectsLoadOne(parent inode, uuid, name string) (inode, error) {
+       uuid, err := fs.defaultUUID(uuid)
+       if err != nil {
+               return nil, err
+       }
+
+       var contents CollectionList
+       err = fs.RequestAndDecode(&contents, "GET", "arvados/v1/groups/"+uuid+"/contents", nil, ResourceListParams{
+               Count: "none",
+               Filters: []Filter{
+                       {"name", "=", name},
+                       {"uuid", "is_a", []string{"arvados#collection", "arvados#group"}},
+                       {"groups.group_class", "=", "project"},
+               },
+       })
+       if err != nil {
+               return nil, err
+       }
+       if len(contents.Items) == 0 {
+               return nil, os.ErrNotExist
+       }
+       coll := contents.Items[0]
+
+       if strings.Contains(coll.UUID, "-j7d0g-") {
+               // Group item was loaded into a Collection var -- but
+               // we only need the Name and UUID anyway, so it's OK.
+               return fs.newProjectNode(parent, coll.Name, coll.UUID), nil
+       } else if strings.Contains(coll.UUID, "-4zz18-") {
+               return deferredCollectionFS(fs, parent, coll), nil
+       } else {
+               log.Printf("projectnode: unrecognized UUID in response: %q", coll.UUID)
+               return nil, ErrInvalidArgument
+       }
+}
+
+func (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode, error) {
+       uuid, err := fs.defaultUUID(uuid)
+       if err != nil {
+               return nil, err
+       }
+
+       var inodes []inode
+
+       // Note: the "filters" slice's backing array might be reused
+       // by append(filters,...) below. This isn't goroutine safe,
+       // but all accesses are in the same goroutine, so it's OK.
+       filters := []Filter{{"owner_uuid", "=", uuid}}
+       params := ResourceListParams{
+               Count:   "none",
+               Filters: filters,
+               Order:   "uuid",
+       }
+       for {
+               var resp CollectionList
+               err = fs.RequestAndDecode(&resp, "GET", "arvados/v1/collections", nil, params)
+               if err != nil {
+                       return nil, err
+               }
+               if len(resp.Items) == 0 {
+                       break
+               }
+               for _, i := range resp.Items {
+                       coll := i
+                       if !permittedName(coll.Name) {
+                               continue
+                       }
+                       inodes = append(inodes, deferredCollectionFS(fs, parent, coll))
+               }
+               params.Filters = append(filters, Filter{"uuid", ">", resp.Items[len(resp.Items)-1].UUID})
+       }
+
+       filters = append(filters, Filter{"group_class", "=", "project"})
+       params.Filters = filters
+       for {
+               var resp GroupList
+               err = fs.RequestAndDecode(&resp, "GET", "arvados/v1/groups", nil, params)
+               if err != nil {
+                       return nil, err
+               }
+               if len(resp.Items) == 0 {
+                       break
+               }
+               for _, group := range resp.Items {
+                       if !permittedName(group.Name) {
+                               continue
+                       }
+                       inodes = append(inodes, fs.newProjectNode(parent, group.Name, group.UUID))
+               }
+               params.Filters = append(filters, Filter{"uuid", ">", resp.Items[len(resp.Items)-1].UUID})
+       }
+       return inodes, nil
+}
diff --git a/sdk/go/arvados/fs_project_test.go b/sdk/go/arvados/fs_project_test.go
new file mode 100644 (file)
index 0000000..1a06ce1
--- /dev/null
@@ -0,0 +1,201 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "encoding/json"
+       "io"
+       "os"
+       "path/filepath"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+type spiedRequest struct {
+       method string
+       path   string
+       params map[string]interface{}
+}
+
+type spyingClient struct {
+       *Client
+       calls []spiedRequest
+}
+
+func (sc *spyingClient) RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error {
+       var paramsCopy map[string]interface{}
+       var buf bytes.Buffer
+       json.NewEncoder(&buf).Encode(params)
+       json.NewDecoder(&buf).Decode(&paramsCopy)
+       sc.calls = append(sc.calls, spiedRequest{
+               method: method,
+               path:   path,
+               params: paramsCopy,
+       })
+       return sc.Client.RequestAndDecode(dst, method, path, body, params)
+}
+
+func (s *SiteFSSuite) TestCurrentUserHome(c *check.C) {
+       s.fs.MountProject("home", "")
+       s.testHomeProject(c, "/home")
+}
+
+func (s *SiteFSSuite) TestUsersDir(c *check.C) {
+       s.testHomeProject(c, "/users/active")
+}
+
+func (s *SiteFSSuite) testHomeProject(c *check.C, path string) {
+       f, err := s.fs.Open(path)
+       c.Assert(err, check.IsNil)
+       fis, err := f.Readdir(-1)
+       c.Check(len(fis), check.Not(check.Equals), 0)
+
+       ok := false
+       for _, fi := range fis {
+               c.Check(fi.Name(), check.Not(check.Equals), "")
+               if fi.Name() == "A Project" {
+                       ok = true
+               }
+       }
+       c.Check(ok, check.Equals, true)
+
+       f, err = s.fs.Open(path + "/A Project/..")
+       c.Assert(err, check.IsNil)
+       fi, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.IsDir(), check.Equals, true)
+       _, basename := filepath.Split(path)
+       c.Check(fi.Name(), check.Equals, basename)
+
+       f, err = s.fs.Open(path + "/A Project/A Subproject")
+       c.Assert(err, check.IsNil)
+       fi, err = f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.IsDir(), check.Equals, true)
+
+       for _, nx := range []string{
+               path + "/Unrestricted public data",
+               path + "/Unrestricted public data/does not exist",
+               path + "/A Project/does not exist",
+       } {
+               c.Log(nx)
+               f, err = s.fs.Open(nx)
+               c.Check(err, check.NotNil)
+               c.Check(os.IsNotExist(err), check.Equals, true)
+       }
+}
+
+func (s *SiteFSSuite) TestProjectReaddirAfterLoadOne(c *check.C) {
+       f, err := s.fs.Open("/users/active/A Project/A Subproject")
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       f, err = s.fs.Open("/users/active/A Project/Project does not exist")
+       c.Assert(err, check.NotNil)
+       f, err = s.fs.Open("/users/active/A Project/A Subproject")
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       f, err = s.fs.Open("/users/active/A Project")
+       c.Assert(err, check.IsNil)
+       defer f.Close()
+       fis, err := f.Readdir(-1)
+       c.Assert(err, check.IsNil)
+       c.Logf("%#v", fis)
+       var foundSubproject, foundCollection bool
+       for _, fi := range fis {
+               switch fi.Name() {
+               case "A Subproject":
+                       foundSubproject = true
+               case "collection_to_move_around":
+                       foundCollection = true
+               }
+       }
+       c.Check(foundSubproject, check.Equals, true)
+       c.Check(foundCollection, check.Equals, true)
+}
+
+func (s *SiteFSSuite) TestSlashInName(c *check.C) {
+       badCollection := Collection{
+               Name:      "bad/collection",
+               OwnerUUID: arvadostest.AProjectUUID,
+       }
+       err := s.client.RequestAndDecode(&badCollection, "POST", "arvados/v1/collections", s.client.UpdateBody(&badCollection), nil)
+       c.Assert(err, check.IsNil)
+       defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+badCollection.UUID, nil, nil)
+
+       badProject := Group{
+               Name:       "bad/project",
+               GroupClass: "project",
+               OwnerUUID:  arvadostest.AProjectUUID,
+       }
+       err = s.client.RequestAndDecode(&badProject, "POST", "arvados/v1/groups", s.client.UpdateBody(&badProject), nil)
+       c.Assert(err, check.IsNil)
+       defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/groups/"+badProject.UUID, nil, nil)
+
+       dir, err := s.fs.Open("/users/active/A Project")
+       c.Assert(err, check.IsNil)
+       fis, err := dir.Readdir(-1)
+       c.Check(err, check.IsNil)
+       for _, fi := range fis {
+               c.Logf("fi.Name() == %q", fi.Name())
+               c.Check(strings.Contains(fi.Name(), "/"), check.Equals, false)
+       }
+}
+
+func (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {
+       s.fs.MountProject("home", "")
+
+       project, err := s.fs.OpenFile("/home/A Project", 0, 0)
+       c.Assert(err, check.IsNil)
+
+       _, err = s.fs.Open("/home/A Project/oob")
+       c.Check(err, check.NotNil)
+
+       oob := Collection{
+               Name:      "oob",
+               OwnerUUID: arvadostest.AProjectUUID,
+       }
+       err = s.client.RequestAndDecode(&oob, "POST", "arvados/v1/collections", s.client.UpdateBody(&oob), nil)
+       c.Assert(err, check.IsNil)
+       defer s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+oob.UUID, nil, nil)
+
+       err = project.Sync()
+       c.Check(err, check.IsNil)
+       f, err := s.fs.Open("/home/A Project/oob")
+       c.Assert(err, check.IsNil)
+       fi, err := f.Stat()
+       c.Assert(err, check.IsNil)
+       c.Check(fi.IsDir(), check.Equals, true)
+       f.Close()
+
+       wf, err := s.fs.OpenFile("/home/A Project/oob/test.txt", os.O_CREATE|os.O_RDWR, 0700)
+       c.Assert(err, check.IsNil)
+       _, err = wf.Write([]byte("hello oob\n"))
+       c.Check(err, check.IsNil)
+       err = wf.Close()
+       c.Check(err, check.IsNil)
+
+       // Delete test.txt behind s.fs's back by updating the
+       // collection record with the old (empty) ManifestText.
+       err = s.client.RequestAndDecode(nil, "PATCH", "arvados/v1/collections/"+oob.UUID, s.client.UpdateBody(&oob), nil)
+       c.Assert(err, check.IsNil)
+
+       err = project.Sync()
+       c.Check(err, check.IsNil)
+       _, err = s.fs.Open("/home/A Project/oob/test.txt")
+       c.Check(err, check.NotNil)
+       _, err = s.fs.Open("/home/A Project/oob")
+       c.Check(err, check.IsNil)
+
+       err = s.client.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+oob.UUID, nil, nil)
+       c.Assert(err, check.IsNil)
+
+       err = project.Sync()
+       c.Check(err, check.IsNil)
+       _, err = s.fs.Open("/home/A Project/oob")
+       c.Check(err, check.NotNil)
+}
diff --git a/sdk/go/arvados/fs_site.go b/sdk/go/arvados/fs_site.go
new file mode 100644 (file)
index 0000000..82114e2
--- /dev/null
@@ -0,0 +1,200 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+       "strings"
+       "sync"
+       "time"
+)
+
+type CustomFileSystem interface {
+       FileSystem
+       MountByID(mount string)
+       MountProject(mount, uuid string)
+       MountUsers(mount string)
+}
+
+type customFileSystem struct {
+       fileSystem
+       root *vdirnode
+
+       staleThreshold time.Time
+       staleLock      sync.Mutex
+}
+
+func (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {
+       root := &vdirnode{}
+       fs := &customFileSystem{
+               root: root,
+               fileSystem: fileSystem{
+                       fsBackend: keepBackend{apiClient: c, keepClient: kc},
+                       root:      root,
+               },
+       }
+       root.inode = &treenode{
+               fs:     fs,
+               parent: root,
+               fileinfo: fileinfo{
+                       name:    "/",
+                       mode:    os.ModeDir | 0755,
+                       modTime: time.Now(),
+               },
+               inodes: make(map[string]inode),
+       }
+       return fs
+}
+
+func (fs *customFileSystem) MountByID(mount string) {
+       fs.root.inode.Child(mount, func(inode) (inode, error) {
+               return &vdirnode{
+                       inode: &treenode{
+                               fs:     fs,
+                               parent: fs.root,
+                               inodes: make(map[string]inode),
+                               fileinfo: fileinfo{
+                                       name:    mount,
+                                       modTime: time.Now(),
+                                       mode:    0755 | os.ModeDir,
+                               },
+                       },
+                       create: fs.mountByID,
+               }, nil
+       })
+}
+
+func (fs *customFileSystem) MountProject(mount, uuid string) {
+       fs.root.inode.Child(mount, func(inode) (inode, error) {
+               return fs.newProjectNode(fs.root, mount, uuid), nil
+       })
+}
+
+func (fs *customFileSystem) MountUsers(mount string) {
+       fs.root.inode.Child(mount, func(inode) (inode, error) {
+               return &lookupnode{
+                       stale:   fs.Stale,
+                       loadOne: fs.usersLoadOne,
+                       loadAll: fs.usersLoadAll,
+                       inode: &treenode{
+                               fs:     fs,
+                               parent: fs.root,
+                               inodes: make(map[string]inode),
+                               fileinfo: fileinfo{
+                                       name:    mount,
+                                       modTime: time.Now(),
+                                       mode:    0755 | os.ModeDir,
+                               },
+                       },
+               }, nil
+       })
+}
+
+// SiteFileSystem returns a FileSystem that maps collections and other
+// Arvados objects onto a filesystem layout.
+//
+// This is experimental: the filesystem layout is not stable, and
+// there are significant known bugs and shortcomings. For example,
+// writes are not persisted until Sync() is called.
+func (c *Client) SiteFileSystem(kc keepClient) CustomFileSystem {
+       fs := c.CustomFileSystem(kc)
+       fs.MountByID("by_id")
+       fs.MountUsers("users")
+       return fs
+}
+
+func (fs *customFileSystem) Sync() error {
+       fs.staleLock.Lock()
+       defer fs.staleLock.Unlock()
+       fs.staleThreshold = time.Now()
+       return nil
+}
+
+// Stale returns true if information obtained at time t should be
+// considered stale.
+func (fs *customFileSystem) Stale(t time.Time) bool {
+       fs.staleLock.Lock()
+       defer fs.staleLock.Unlock()
+       return !fs.staleThreshold.Before(t)
+}
+
+func (fs *customFileSystem) newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error) {
+       return nil, ErrInvalidOperation
+}
+
+func (fs *customFileSystem) mountByID(parent inode, id string) inode {
+       if strings.Contains(id, "-4zz18-") || pdhRegexp.MatchString(id) {
+               return fs.mountCollection(parent, id)
+       } else if strings.Contains(id, "-j7d0g-") {
+               return fs.newProjectNode(fs.root, id, id)
+       } else {
+               return nil
+       }
+}
+
+func (fs *customFileSystem) mountCollection(parent inode, id string) inode {
+       var coll Collection
+       err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+id, nil, nil)
+       if err != nil {
+               return nil
+       }
+       cfs, err := coll.FileSystem(fs, fs)
+       if err != nil {
+               return nil
+       }
+       root := cfs.rootnode()
+       root.SetParent(parent, id)
+       return root
+}
+
+func (fs *customFileSystem) newProjectNode(root inode, name, uuid string) inode {
+       return &lookupnode{
+               stale:   fs.Stale,
+               loadOne: func(parent inode, name string) (inode, error) { return fs.projectsLoadOne(parent, uuid, name) },
+               loadAll: func(parent inode) ([]inode, error) { return fs.projectsLoadAll(parent, uuid) },
+               inode: &treenode{
+                       fs:     fs,
+                       parent: root,
+                       inodes: make(map[string]inode),
+                       fileinfo: fileinfo{
+                               name:    name,
+                               modTime: time.Now(),
+                               mode:    0755 | os.ModeDir,
+                       },
+               },
+       }
+}
+
+// vdirnode wraps an inode by ignoring any requests to add/replace
+// children, and calling a create() func when a non-existing child is
+// looked up.
+//
+// create() can return either a new node, which will be added to the
+// treenode, or nil for ENOENT.
+type vdirnode struct {
+       inode
+       create func(parent inode, name string) inode
+}
+
+func (vn *vdirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {
+       return vn.inode.Child(name, func(existing inode) (inode, error) {
+               if existing == nil && vn.create != nil {
+                       existing = vn.create(vn, name)
+                       if existing != nil {
+                               existing.SetParent(vn, name)
+                               vn.inode.(*treenode).fileinfo.modTime = time.Now()
+                       }
+               }
+               if replace == nil {
+                       return existing, nil
+               } else if tryRepl, err := replace(existing); err != nil {
+                       return existing, err
+               } else if tryRepl != existing {
+                       return existing, ErrInvalidArgument
+               } else {
+                       return existing, nil
+               }
+       })
+}
diff --git a/sdk/go/arvados/fs_site_test.go b/sdk/go/arvados/fs_site_test.go
new file mode 100644 (file)
index 0000000..80028dc
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "net/http"
+       "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&SiteFSSuite{})
+
+type SiteFSSuite struct {
+       client *Client
+       fs     CustomFileSystem
+       kc     keepClient
+}
+
+func (s *SiteFSSuite) SetUpTest(c *check.C) {
+       s.client = &Client{
+               APIHost:   os.Getenv("ARVADOS_API_HOST"),
+               AuthToken: arvadostest.ActiveToken,
+               Insecure:  true,
+       }
+       s.kc = &keepClientStub{
+               blocks: map[string][]byte{
+                       "3858f62230ac3c915f300c664312c63f": []byte("foobar"),
+               }}
+       s.fs = s.client.SiteFileSystem(s.kc)
+}
+
+func (s *SiteFSSuite) TestHttpFileSystemInterface(c *check.C) {
+       _, ok := s.fs.(http.FileSystem)
+       c.Check(ok, check.Equals, true)
+}
+
+func (s *SiteFSSuite) TestByIDEmpty(c *check.C) {
+       f, err := s.fs.Open("/by_id")
+       c.Assert(err, check.IsNil)
+       fis, err := f.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 0)
+}
+
+func (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {
+       f, err := s.fs.Open("/by_id")
+       c.Assert(err, check.IsNil)
+       fis, err := f.Readdir(-1)
+       c.Check(err, check.IsNil)
+       c.Check(len(fis), check.Equals, 0)
+
+       err = s.fs.Mkdir("/by_id/"+arvadostest.FooCollection, 0755)
+       c.Check(err, check.Equals, os.ErrExist)
+
+       f, err = s.fs.Open("/by_id/" + arvadostest.NonexistentCollection)
+       c.Assert(err, check.Equals, os.ErrNotExist)
+
+       for _, path := range []string{
+               arvadostest.FooCollection,
+               arvadostest.FooPdh,
+               arvadostest.AProjectUUID + "/" + arvadostest.FooCollectionName,
+       } {
+               f, err = s.fs.Open("/by_id/" + path)
+               c.Assert(err, check.IsNil)
+               fis, err = f.Readdir(-1)
+               var names []string
+               for _, fi := range fis {
+                       names = append(names, fi.Name())
+               }
+               c.Check(names, check.DeepEquals, []string{"foo"})
+       }
+
+       f, err = s.fs.Open("/by_id/" + arvadostest.AProjectUUID + "/A Subproject/baz_file")
+       c.Assert(err, check.IsNil)
+       fis, err = f.Readdir(-1)
+       var names []string
+       for _, fi := range fis {
+               names = append(names, fi.Name())
+       }
+       c.Check(names, check.DeepEquals, []string{"baz"})
+
+       _, err = s.fs.OpenFile("/by_id/"+arvadostest.NonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
+       c.Check(err, check.Equals, ErrInvalidOperation)
+       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection, "/by_id/beep")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/beep")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+       _, err = s.fs.Stat("/by_id/beep")
+       c.Check(err, check.Equals, os.ErrNotExist)
+       err = s.fs.Rename("/by_id/"+arvadostest.FooCollection+"/foo", "/by_id/"+arvadostest.FooCollection+"/bar")
+       c.Check(err, check.IsNil)
+
+       err = s.fs.Rename("/by_id", "/beep")
+       c.Check(err, check.Equals, ErrInvalidArgument)
+}
diff --git a/sdk/go/arvados/fs_users.go b/sdk/go/arvados/fs_users.go
new file mode 100644 (file)
index 0000000..00f7036
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+)
+
+func (fs *customFileSystem) usersLoadOne(parent inode, name string) (inode, error) {
+       var resp UserList
+       err := fs.RequestAndDecode(&resp, "GET", "arvados/v1/users", nil, ResourceListParams{
+               Count:   "none",
+               Filters: []Filter{{"username", "=", name}},
+       })
+       if err != nil {
+               return nil, err
+       } else if len(resp.Items) == 0 {
+               return nil, os.ErrNotExist
+       }
+       user := resp.Items[0]
+       return fs.newProjectNode(parent, user.Username, user.UUID), nil
+}
+
+func (fs *customFileSystem) usersLoadAll(parent inode) ([]inode, error) {
+       params := ResourceListParams{
+               Count: "none",
+               Order: "uuid",
+       }
+       var inodes []inode
+       for {
+               var resp UserList
+               err := fs.RequestAndDecode(&resp, "GET", "arvados/v1/users", nil, params)
+               if err != nil {
+                       return nil, err
+               } else if len(resp.Items) == 0 {
+                       return inodes, nil
+               }
+               for _, user := range resp.Items {
+                       if user.Username == "" {
+                               continue
+                       }
+                       inodes = append(inodes, fs.newProjectNode(parent, user.Username, user.UUID))
+               }
+               params.Filters = []Filter{{"uuid", ">", resp.Items[len(resp.Items)-1].UUID}}
+       }
+}
diff --git a/sdk/go/arvados/group.go b/sdk/go/arvados/group.go
new file mode 100644 (file)
index 0000000..6b5718a
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+// Group is an arvados#group record
+type Group struct {
+       UUID       string `json:"uuid,omitempty"`
+       Name       string `json:"name,omitempty"`
+       OwnerUUID  string `json:"owner_uuid,omitempty"`
+       GroupClass string `json:"group_class"`
+}
+
+// GroupList is an arvados#groupList resource.
+type GroupList struct {
+       Items          []Group `json:"items"`
+       ItemsAvailable int     `json:"items_available"`
+       Offset         int     `json:"offset"`
+       Limit          int     `json:"limit"`
+}
+
+func (g Group) resourceName() string {
+       return "group"
+}
diff --git a/sdk/go/arvados/integration_test_cluster.go b/sdk/go/arvados/integration_test_cluster.go
new file mode 100644 (file)
index 0000000..ebf93f8
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "os"
+       "path/filepath"
+)
+
+// IntegrationTestCluster returns the cluster that has been set up by
+// the integration test framework (see /build/run-tests.sh). It panics
+// on error.
+func IntegrationTestCluster() *Cluster {
+       config, err := GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+       if err != nil {
+               panic(err)
+       }
+       cluster, err := config.GetCluster("")
+       if err != nil {
+               panic(err)
+       }
+       return cluster
+}
diff --git a/sdk/go/arvados/keep_block.go b/sdk/go/arvados/keep_block.go
new file mode 100644 (file)
index 0000000..de2b91a
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "strconv"
+       "strings"
+)
+
+// SizedDigest is a minimal Keep block locator: hash+size
+type SizedDigest string
+
+// Size returns the size of the data block, in bytes.
+func (sd SizedDigest) Size() int64 {
+       n, _ := strconv.ParseInt(strings.Split(string(sd), "+")[1], 10, 64)
+       return n
+}
diff --git a/sdk/go/arvados/keep_service.go b/sdk/go/arvados/keep_service.go
new file mode 100644 (file)
index 0000000..0c86635
--- /dev/null
@@ -0,0 +1,173 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bufio"
+       "fmt"
+       "net/http"
+       "strconv"
+       "strings"
+)
+
+// KeepService is an arvados#keepService record
+type KeepService struct {
+       UUID           string `json:"uuid"`
+       ServiceHost    string `json:"service_host"`
+       ServicePort    int    `json:"service_port"`
+       ServiceSSLFlag bool   `json:"service_ssl_flag"`
+       ServiceType    string `json:"service_type"`
+       ReadOnly       bool   `json:"read_only"`
+}
+
+type KeepMount struct {
+       UUID           string   `json:"uuid"`
+       DeviceID       string   `json:"device_id"`
+       ReadOnly       bool     `json:"read_only"`
+       Replication    int      `json:"replication"`
+       StorageClasses []string `json:"storage_classes"`
+}
+
+// KeepServiceList is an arvados#keepServiceList record
+type KeepServiceList struct {
+       Items          []KeepService `json:"items"`
+       ItemsAvailable int           `json:"items_available"`
+       Offset         int           `json:"offset"`
+       Limit          int           `json:"limit"`
+}
+
+// KeepServiceIndexEntry is what a keep service's index response tells
+// us about a stored block.
+type KeepServiceIndexEntry struct {
+       SizedDigest
+       // Time of last write, in nanoseconds since Unix epoch
+       Mtime int64
+}
+
+// EachKeepService calls f once for every readable
+// KeepService. EachKeepService stops if it encounters an
+// error, such as f returning a non-nil error.
+func (c *Client) EachKeepService(f func(KeepService) error) error {
+       params := ResourceListParams{}
+       for {
+               var page KeepServiceList
+               err := c.RequestAndDecode(&page, "GET", "arvados/v1/keep_services", nil, params)
+               if err != nil {
+                       return err
+               }
+               for _, item := range page.Items {
+                       err = f(item)
+                       if err != nil {
+                               return err
+                       }
+               }
+               params.Offset = params.Offset + len(page.Items)
+               if params.Offset >= page.ItemsAvailable {
+                       return nil
+               }
+       }
+}
+
+func (s *KeepService) url(path string) string {
+       var f string
+       if s.ServiceSSLFlag {
+               f = "https://%s:%d/%s"
+       } else {
+               f = "http://%s:%d/%s"
+       }
+       return fmt.Sprintf(f, s.ServiceHost, s.ServicePort, path)
+}
+
+// String implements fmt.Stringer
+func (s *KeepService) String() string {
+       return s.UUID
+}
+
+func (s *KeepService) Mounts(c *Client) ([]KeepMount, error) {
+       url := s.url("mounts")
+       req, err := http.NewRequest("GET", url, nil)
+       if err != nil {
+               return nil, err
+       }
+       var mounts []KeepMount
+       err = c.DoAndDecode(&mounts, req)
+       if err != nil {
+               return nil, fmt.Errorf("GET %v: %v", url, err)
+       }
+       return mounts, nil
+}
+
+// Index returns an unsorted list of blocks at the given mount point.
+func (s *KeepService) IndexMount(c *Client, mountUUID string, prefix string) ([]KeepServiceIndexEntry, error) {
+       return s.index(c, s.url("mounts/"+mountUUID+"/blocks?prefix="+prefix))
+}
+
+// Index returns an unsorted list of blocks that can be retrieved from
+// this server.
+func (s *KeepService) Index(c *Client, prefix string) ([]KeepServiceIndexEntry, error) {
+       return s.index(c, s.url("index/"+prefix))
+}
+
+func (s *KeepService) index(c *Client, url string) ([]KeepServiceIndexEntry, error) {
+       req, err := http.NewRequest("GET", url, nil)
+       if err != nil {
+               return nil, fmt.Errorf("NewRequest(%v): %v", url, err)
+       }
+       resp, err := c.Do(req)
+       if err != nil {
+               return nil, fmt.Errorf("Do(%v): %v", url, err)
+       } else if resp.StatusCode != 200 {
+               return nil, fmt.Errorf("%v: %d %v", url, resp.StatusCode, resp.Status)
+       }
+       defer resp.Body.Close()
+
+       var entries []KeepServiceIndexEntry
+       scanner := bufio.NewScanner(resp.Body)
+       sawEOF := false
+       for scanner.Scan() {
+               if scanner.Err() != nil {
+                       // If we encounter a read error (timeout,
+                       // connection failure), stop now and return it
+                       // below, so it doesn't get masked by the
+                       // ensuing "badly formatted response" error.
+                       break
+               }
+               if sawEOF {
+                       return nil, fmt.Errorf("Index response contained non-terminal blank line")
+               }
+               line := scanner.Text()
+               if line == "" {
+                       sawEOF = true
+                       continue
+               }
+               fields := strings.Split(line, " ")
+               if len(fields) != 2 {
+                       return nil, fmt.Errorf("Malformed index line %q: %d fields", line, len(fields))
+               }
+               mtime, err := strconv.ParseInt(fields[1], 10, 64)
+               if err != nil {
+                       return nil, fmt.Errorf("Malformed index line %q: mtime: %v", line, err)
+               }
+               if mtime < 1e12 {
+                       // An old version of keepstore is giving us
+                       // timestamps in seconds instead of
+                       // nanoseconds. (This threshold correctly
+                       // handles all times between 1970-01-02 and
+                       // 33658-09-27.)
+                       mtime = mtime * 1e9
+               }
+               entries = append(entries, KeepServiceIndexEntry{
+                       SizedDigest: SizedDigest(fields[0]),
+                       Mtime:       mtime,
+               })
+       }
+       if err := scanner.Err(); err != nil {
+               return nil, fmt.Errorf("Error scanning index response: %v", err)
+       }
+       if !sawEOF {
+               return nil, fmt.Errorf("Index response had no EOF marker")
+       }
+       return entries, nil
+}
diff --git a/sdk/go/arvados/keep_service_test.go b/sdk/go/arvados/keep_service_test.go
new file mode 100644 (file)
index 0000000..8715f74
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "net/http"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&KeepServiceSuite{})
+
+type KeepServiceSuite struct{}
+
+func (*KeepServiceSuite) TestIndexTimeout(c *check.C) {
+       client := &Client{
+               Client: &http.Client{
+                       Transport: &timeoutTransport{response: []byte("\n")},
+               },
+               APIHost:   "zzzzz.arvadosapi.com",
+               AuthToken: "xyzzy",
+       }
+       _, err := (&KeepService{}).IndexMount(client, "fake", "")
+       c.Check(err, check.ErrorMatches, `.*timeout.*`)
+}
diff --git a/sdk/go/arvados/link.go b/sdk/go/arvados/link.go
new file mode 100644 (file)
index 0000000..dee1355
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+// Link is an arvados#link record
+type Link struct {
+       UUID      string `json:"uuid,omiempty"`
+       OwnerUUID string `json:"owner_uuid,omitempty"`
+       Name      string `json:"name,omitempty"`
+       LinkClass string `json:"link_class,omitempty"`
+       HeadUUID  string `json:"head_uuid,omitempty"`
+       HeadKind  string `json:"head_kind,omitempty"`
+       TailUUID  string `json:"tail_uuid,omitempty"`
+       TailKind  string `json:"tail_kind,omitempty"`
+}
+
+// UserList is an arvados#userList resource.
+type LinkList struct {
+       Items          []Link `json:"items"`
+       ItemsAvailable int    `json:"items_available"`
+       Offset         int    `json:"offset"`
+       Limit          int    `json:"limit"`
+}
diff --git a/sdk/go/arvados/log.go b/sdk/go/arvados/log.go
new file mode 100644 (file)
index 0000000..6f72bf7
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "time"
+)
+
+// Log is an arvados#log record
+type Log struct {
+       ID              uint64                 `json:"id,omitempty"`
+       UUID            string                 `json:"uuid,omitempty"`
+       ObjectUUID      string                 `json:"object_uuid,omitempty"`
+       ObjectOwnerUUID string                 `json:"object_owner_uuid,omitempty"`
+       EventType       string                 `json:"event_type,omitempty"`
+       EventAt         *time.Time             `json:"event,omitempty"`
+       Properties      map[string]interface{} `json:"properties,omitempty"`
+       CreatedAt       *time.Time             `json:"created_at,omitempty"`
+}
+
+// LogList is an arvados#logList resource.
+type LogList struct {
+       Items          []Log `json:"items"`
+       ItemsAvailable int   `json:"items_available"`
+       Offset         int   `json:"offset"`
+       Limit          int   `json:"limit"`
+}
diff --git a/sdk/go/arvados/node.go b/sdk/go/arvados/node.go
new file mode 100644 (file)
index 0000000..cc844fe
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+// Node is an arvados#node resource.
+type Node struct {
+       UUID       string         `json:"uuid"`
+       Domain     string         `json:"domain"`
+       Hostname   string         `json:"hostname"`
+       IPAddress  string         `json:"ip_address"`
+       LastPingAt *time.Time     `json:"last_ping_at,omitempty"`
+       SlotNumber int            `json:"slot_number"`
+       Status     string         `json:"status"`
+       JobUUID    string         `json:"job_uuid,omitempty"`
+       Properties NodeProperties `json:"properties"`
+}
+
+type NodeProperties struct {
+       CloudNode      NodePropertiesCloudNode `json:"cloud_node"`
+       TotalCPUCores  int                     `json:"total_cpu_cores,omitempty"`
+       TotalScratchMB int64                   `json:"total_scratch_mb,omitempty"`
+       TotalRAMMB     int64                   `json:"total_ram_mb,omitempty"`
+}
+
+type NodePropertiesCloudNode struct {
+       Size  string  `json:"size,omitempty"`
+       Price float64 `json:"price"`
+}
+
+func (c Node) resourceName() string {
+       return "node"
+}
+
+// NodeList is an arvados#nodeList resource.
+type NodeList struct {
+       Items          []Node `json:"items"`
+       ItemsAvailable int    `json:"items_available"`
+       Offset         int    `json:"offset"`
+       Limit          int    `json:"limit"`
+}
diff --git a/sdk/go/arvados/postgresql.go b/sdk/go/arvados/postgresql.go
new file mode 100644 (file)
index 0000000..47953ce
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "strings"
+
+func (c PostgreSQLConnection) String() string {
+       s := ""
+       for k, v := range c {
+               s += strings.ToLower(k)
+               s += "='"
+               s += strings.Replace(
+                       strings.Replace(v, `\`, `\\`, -1),
+                       `'`, `\'`, -1)
+               s += "' "
+       }
+       return s
+}
diff --git a/sdk/go/arvados/resource_list.go b/sdk/go/arvados/resource_list.go
new file mode 100644 (file)
index 0000000..14ce098
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "encoding/json"
+
+// ResourceListParams expresses which results are requested in a
+// list/index API.
+type ResourceListParams struct {
+       Select             []string `json:"select,omitempty"`
+       Filters            []Filter `json:"filters,omitempty"`
+       IncludeTrash       bool     `json:"include_trash,omitempty"`
+       IncludeOldVersions bool     `json:"include_old_versions,omitempty"`
+       Limit              *int     `json:"limit,omitempty"`
+       Offset             int      `json:"offset,omitempty"`
+       Order              string   `json:"order,omitempty"`
+       Distinct           bool     `json:"distinct,omitempty"`
+       Count              string   `json:"count,omitempty"`
+}
+
+// A Filter restricts the set of records returned by a list/index API.
+type Filter struct {
+       Attr     string
+       Operator string
+       Operand  interface{}
+}
+
+// MarshalJSON encodes a Filter in the form expected by the API.
+func (f *Filter) MarshalJSON() ([]byte, error) {
+       return json.Marshal([]interface{}{f.Attr, f.Operator, f.Operand})
+}
diff --git a/sdk/go/arvados/resource_list_test.go b/sdk/go/arvados/resource_list_test.go
new file mode 100644 (file)
index 0000000..5642599
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "bytes"
+       "encoding/json"
+       "testing"
+       "time"
+)
+
+func TestMarshalFiltersWithNanoseconds(t *testing.T) {
+       t0 := time.Now()
+       t0str := t0.Format(time.RFC3339Nano)
+       buf, err := json.Marshal([]Filter{
+               {Attr: "modified_at", Operator: "=", Operand: t0}})
+       if err != nil {
+               t.Fatal(err)
+       }
+       if expect := []byte(`[["modified_at","=","` + t0str + `"]]`); 0 != bytes.Compare(buf, expect) {
+               t.Errorf("Encoded as %q, expected %q", buf, expect)
+       }
+}
diff --git a/sdk/go/arvados/throttle.go b/sdk/go/arvados/throttle.go
new file mode 100644 (file)
index 0000000..464b73b
--- /dev/null
@@ -0,0 +1,17 @@
+package arvados
+
+type throttle struct {
+       c chan struct{}
+}
+
+func newThrottle(n int) *throttle {
+       return &throttle{c: make(chan struct{}, n)}
+}
+
+func (t *throttle) Acquire() {
+       t.c <- struct{}{}
+}
+
+func (t *throttle) Release() {
+       <-t.c
+}
diff --git a/sdk/go/arvados/user.go b/sdk/go/arvados/user.go
new file mode 100644 (file)
index 0000000..3a36e5e
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+// User is an arvados#user record
+type User struct {
+       UUID     string `json:"uuid,omitempty"`
+       IsActive bool   `json:"is_active"`
+       IsAdmin  bool   `json:"is_admin"`
+       Username string `json:"username,omitempty"`
+       Email    string `json:"email,omitempty"`
+}
+
+// UserList is an arvados#userList resource.
+type UserList struct {
+       Items          []User `json:"items"`
+       ItemsAvailable int    `json:"items_available"`
+       Offset         int    `json:"offset"`
+       Limit          int    `json:"limit"`
+}
+
+// CurrentUser calls arvados.v1.users.current, and returns the User
+// record corresponding to this client's credentials.
+func (c *Client) CurrentUser() (User, error) {
+       var u User
+       err := c.RequestAndDecode(&u, "GET", "arvados/v1/users/current", nil, nil)
+       return u, err
+}
diff --git a/sdk/go/arvados/workflow.go b/sdk/go/arvados/workflow.go
new file mode 100644 (file)
index 0000000..09c8c71
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+// Workflow is an arvados#workflow resource.
+type Workflow struct {
+       UUID        string     `json:"uuid,omitempty"`
+       OwnerUUID   string     `json:"owner_uuid,omitempty"`
+       Name        string     `json:"name,omitempty"`
+       Description string     `json:"description,omitempty"`
+       Definition  string     `json:"definition,omitempty"`
+       CreatedAt   *time.Time `json:"created_at,omitempty"`
+       ModifiedAt  *time.Time `json:"modified_at,omitempty"`
+}
+
+// WorkflowList is an arvados#workflowList resource.
+type WorkflowList struct {
+       Items          []Workflow `json:"items"`
+       ItemsAvailable int        `json:"items_available"`
+       Offset         int        `json:"offset"`
+       Limit          int        `json:"limit"`
+}
diff --git a/sdk/go/arvadosclient/arvadosclient.go b/sdk/go/arvadosclient/arvadosclient.go
new file mode 100644 (file)
index 0000000..e3a9f4a
--- /dev/null
@@ -0,0 +1,443 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+/* Simple Arvados Go SDK for communicating with API server. */
+
+package arvadosclient
+
+import (
+       "bytes"
+       "crypto/tls"
+       "crypto/x509"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "net/url"
+       "os"
+       "regexp"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type StringMatcher func(string) bool
+
+var UUIDMatch StringMatcher = regexp.MustCompile(`^[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}$`).MatchString
+var PDHMatch StringMatcher = regexp.MustCompile(`^[0-9a-f]{32}\+\d+$`).MatchString
+
+var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
+var MissingArvadosApiToken = errors.New("Missing required environment variable ARVADOS_API_TOKEN")
+var ErrInvalidArgument = errors.New("Invalid argument")
+
+// A common failure mode is to reuse a keepalive connection that has been
+// terminated (in a way that we can't detect) for being idle too long.
+// POST and DELETE are not safe to retry automatically, so we minimize
+// such failures by always using a new or recently active socket.
+var MaxIdleConnectionDuration = 30 * time.Second
+
+var RetryDelay = 2 * time.Second
+
+var (
+       defaultInsecureHTTPClient *http.Client
+       defaultSecureHTTPClient   *http.Client
+       defaultHTTPClientMtx      sync.Mutex
+)
+
+// Indicates an error that was returned by the API server.
+type APIServerError struct {
+       // Address of server returning error, of the form "host:port".
+       ServerAddress string
+
+       // Components of server response.
+       HttpStatusCode    int
+       HttpStatusMessage string
+
+       // Additional error details from response body.
+       ErrorDetails []string
+}
+
+func (e APIServerError) Error() string {
+       if len(e.ErrorDetails) > 0 {
+               return fmt.Sprintf("arvados API server error: %s (%d: %s) returned by %s",
+                       strings.Join(e.ErrorDetails, "; "),
+                       e.HttpStatusCode,
+                       e.HttpStatusMessage,
+                       e.ServerAddress)
+       } else {
+               return fmt.Sprintf("arvados API server error: %d: %s returned by %s",
+                       e.HttpStatusCode,
+                       e.HttpStatusMessage,
+                       e.ServerAddress)
+       }
+}
+
+// StringBool tests whether s is suggestive of true. It returns true
+// if s is a mixed/uppoer/lower-case variant of "1", "yes", or "true".
+func StringBool(s string) bool {
+       s = strings.ToLower(s)
+       return s == "1" || s == "yes" || s == "true"
+}
+
+// Helper type so we don't have to write out 'map[string]interface{}' every time.
+type Dict map[string]interface{}
+
+// Information about how to contact the Arvados server
+type ArvadosClient struct {
+       // https
+       Scheme string
+
+       // Arvados API server, form "host:port"
+       ApiServer string
+
+       // Arvados API token for authentication
+       ApiToken string
+
+       // Whether to require a valid SSL certificate or not
+       ApiInsecure bool
+
+       // Client object shared by client requests.  Supports HTTP KeepAlive.
+       Client *http.Client
+
+       // If true, sets the X-External-Client header to indicate
+       // the client is outside the cluster.
+       External bool
+
+       // Base URIs of Keep services, e.g., {"https://host1:8443",
+       // "https://host2:8443"}.  If this is nil, Keep clients will
+       // use the arvados.v1.keep_services.accessible API to discover
+       // available services.
+       KeepServiceURIs []string
+
+       // Discovery document
+       DiscoveryDoc Dict
+
+       lastClosedIdlesAt time.Time
+
+       // Number of retries
+       Retries int
+
+       // X-Request-Id for outgoing requests
+       RequestID string
+}
+
+var CertFiles = []string{
+       "/etc/arvados/ca-certificates.crt",
+       "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
+       "/etc/pki/tls/certs/ca-bundle.crt",   // Fedora/RHEL
+}
+
+// MakeTLSConfig sets up TLS configuration for communicating with
+// Arvados and Keep services.
+func MakeTLSConfig(insecure bool) *tls.Config {
+       tlsconfig := tls.Config{InsecureSkipVerify: insecure}
+
+       if !insecure {
+               // Use the first entry in CertFiles that we can read
+               // certificates from. If none of those work out, use
+               // the Go defaults.
+               certs := x509.NewCertPool()
+               for _, file := range CertFiles {
+                       data, err := ioutil.ReadFile(file)
+                       if err != nil {
+                               if !os.IsNotExist(err) {
+                                       log.Printf("error reading %q: %s", file, err)
+                               }
+                               continue
+                       }
+                       if !certs.AppendCertsFromPEM(data) {
+                               log.Printf("unable to load any certificates from %v", file)
+                               continue
+                       }
+                       tlsconfig.RootCAs = certs
+                       break
+               }
+       }
+
+       return &tlsconfig
+}
+
+// New returns an ArvadosClient using the given arvados.Client
+// configuration. This is useful for callers who load arvados.Client
+// fields from configuration files but still need to use the
+// arvadosclient.ArvadosClient package.
+func New(c *arvados.Client) (*ArvadosClient, error) {
+       ac := &ArvadosClient{
+               Scheme:      "https",
+               ApiServer:   c.APIHost,
+               ApiToken:    c.AuthToken,
+               ApiInsecure: c.Insecure,
+               Client: &http.Client{
+                       Timeout: 5 * time.Minute,
+                       Transport: &http.Transport{
+                               TLSClientConfig: MakeTLSConfig(c.Insecure)},
+               },
+               External:          false,
+               Retries:           2,
+               KeepServiceURIs:   c.KeepServiceURIs,
+               lastClosedIdlesAt: time.Now(),
+       }
+
+       return ac, nil
+}
+
+// MakeArvadosClient creates a new ArvadosClient using the standard
+// environment variables ARVADOS_API_HOST, ARVADOS_API_TOKEN,
+// ARVADOS_API_HOST_INSECURE, ARVADOS_EXTERNAL_CLIENT, and
+// ARVADOS_KEEP_SERVICES.
+func MakeArvadosClient() (ac *ArvadosClient, err error) {
+       ac, err = New(arvados.NewClientFromEnv())
+       if err != nil {
+               return
+       }
+       ac.External = StringBool(os.Getenv("ARVADOS_EXTERNAL_CLIENT"))
+       return
+}
+
+// CallRaw is the same as Call() but returns a Reader that reads the
+// response body, instead of taking an output object.
+func (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string, action string, parameters Dict) (reader io.ReadCloser, err error) {
+       scheme := c.Scheme
+       if scheme == "" {
+               scheme = "https"
+       }
+       u := url.URL{
+               Scheme: scheme,
+               Host:   c.ApiServer}
+
+       if resourceType != API_DISCOVERY_RESOURCE {
+               u.Path = "/arvados/v1"
+       }
+
+       if resourceType != "" {
+               u.Path = u.Path + "/" + resourceType
+       }
+       if uuid != "" {
+               u.Path = u.Path + "/" + uuid
+       }
+       if action != "" {
+               u.Path = u.Path + "/" + action
+       }
+
+       if parameters == nil {
+               parameters = make(Dict)
+       }
+
+       vals := make(url.Values)
+       for k, v := range parameters {
+               if s, ok := v.(string); ok {
+                       vals.Set(k, s)
+               } else if m, err := json.Marshal(v); err == nil {
+                       vals.Set(k, string(m))
+               }
+       }
+
+       retryable := false
+       switch method {
+       case "GET", "HEAD", "PUT", "OPTIONS", "DELETE":
+               retryable = true
+       }
+
+       // Non-retryable methods such as POST are not safe to retry automatically,
+       // so we minimize such failures by always using a new or recently active socket
+       if !retryable {
+               if time.Since(c.lastClosedIdlesAt) > MaxIdleConnectionDuration {
+                       c.lastClosedIdlesAt = time.Now()
+                       c.Client.Transport.(*http.Transport).CloseIdleConnections()
+               }
+       }
+
+       // Make the request
+       var req *http.Request
+       var resp *http.Response
+
+       for attempt := 0; attempt <= c.Retries; attempt++ {
+               if method == "GET" || method == "HEAD" {
+                       u.RawQuery = vals.Encode()
+                       if req, err = http.NewRequest(method, u.String(), nil); err != nil {
+                               return nil, err
+                       }
+               } else {
+                       if req, err = http.NewRequest(method, u.String(), bytes.NewBufferString(vals.Encode())); err != nil {
+                               return nil, err
+                       }
+                       req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+               }
+
+               // Add api token header
+               req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", c.ApiToken))
+               if c.RequestID != "" {
+                       req.Header.Add("X-Request-Id", c.RequestID)
+               }
+               if c.External {
+                       req.Header.Add("X-External-Client", "1")
+               }
+
+               resp, err = c.Client.Do(req)
+               if err != nil {
+                       if retryable {
+                               time.Sleep(RetryDelay)
+                               continue
+                       } else {
+                               return nil, err
+                       }
+               }
+
+               if resp.StatusCode == http.StatusOK {
+                       return resp.Body, nil
+               }
+
+               defer resp.Body.Close()
+
+               switch resp.StatusCode {
+               case 408, 409, 422, 423, 500, 502, 503, 504:
+                       time.Sleep(RetryDelay)
+                       continue
+               default:
+                       return nil, newAPIServerError(c.ApiServer, resp)
+               }
+       }
+
+       if resp != nil {
+               return nil, newAPIServerError(c.ApiServer, resp)
+       }
+       return nil, err
+}
+
+func newAPIServerError(ServerAddress string, resp *http.Response) APIServerError {
+
+       ase := APIServerError{
+               ServerAddress:     ServerAddress,
+               HttpStatusCode:    resp.StatusCode,
+               HttpStatusMessage: resp.Status}
+
+       // If the response body has {"errors":["reason1","reason2"]}
+       // then return those reasons.
+       var errInfo = Dict{}
+       if err := json.NewDecoder(resp.Body).Decode(&errInfo); err == nil {
+               if errorList, ok := errInfo["errors"]; ok {
+                       if errArray, ok := errorList.([]interface{}); ok {
+                               for _, errItem := range errArray {
+                                       // We expect an array of strings here.
+                                       // Non-strings will be passed along
+                                       // JSON-encoded.
+                                       if s, ok := errItem.(string); ok {
+                                               ase.ErrorDetails = append(ase.ErrorDetails, s)
+                                       } else if j, err := json.Marshal(errItem); err == nil {
+                                               ase.ErrorDetails = append(ase.ErrorDetails, string(j))
+                                       }
+                               }
+                       }
+               }
+       }
+       return ase
+}
+
+// Call an API endpoint and parse the JSON response into an object.
+//
+//   method - HTTP method: GET, HEAD, PUT, POST, PATCH or DELETE.
+//   resourceType - the type of arvados resource to act on (e.g., "collections", "pipeline_instances").
+//   uuid - the uuid of the specific item to access. May be empty.
+//   action - API method name (e.g., "lock"). This is often empty if implied by method and uuid.
+//   parameters - method parameters.
+//   output - a map or annotated struct which is a legal target for encoding/json/Decoder.
+//
+// Returns a non-nil error if an error occurs making the API call, the
+// API responds with a non-successful HTTP status, or an error occurs
+// parsing the response body.
+func (c *ArvadosClient) Call(method, resourceType, uuid, action string, parameters Dict, output interface{}) error {
+       reader, err := c.CallRaw(method, resourceType, uuid, action, parameters)
+       if reader != nil {
+               defer reader.Close()
+       }
+       if err != nil {
+               return err
+       }
+
+       if output != nil {
+               dec := json.NewDecoder(reader)
+               if err = dec.Decode(output); err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+// Create a new resource. See Call for argument descriptions.
+func (c *ArvadosClient) Create(resourceType string, parameters Dict, output interface{}) error {
+       return c.Call("POST", resourceType, "", "", parameters, output)
+}
+
+// Delete a resource. See Call for argument descriptions.
+func (c *ArvadosClient) Delete(resource string, uuid string, parameters Dict, output interface{}) (err error) {
+       return c.Call("DELETE", resource, uuid, "", parameters, output)
+}
+
+// Modify attributes of a resource. See Call for argument descriptions.
+func (c *ArvadosClient) Update(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
+       return c.Call("PUT", resourceType, uuid, "", parameters, output)
+}
+
+// Get a resource. See Call for argument descriptions.
+func (c *ArvadosClient) Get(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
+       if !UUIDMatch(uuid) && !(resourceType == "collections" && PDHMatch(uuid)) {
+               // No object has uuid == "": there is no need to make
+               // an API call. Furthermore, the HTTP request for such
+               // an API call would be "GET /arvados/v1/type/", which
+               // is liable to be misinterpreted as the List API.
+               return ErrInvalidArgument
+       }
+       return c.Call("GET", resourceType, uuid, "", parameters, output)
+}
+
+// List resources of a given type. See Call for argument descriptions.
+func (c *ArvadosClient) List(resource string, parameters Dict, output interface{}) (err error) {
+       return c.Call("GET", resource, "", "", parameters, output)
+}
+
+const API_DISCOVERY_RESOURCE = "discovery/v1/apis/arvados/v1/rest"
+
+// Discovery returns the value of the given parameter in the discovery
+// document. Returns a non-nil error if the discovery document cannot
+// be retrieved/decoded. Returns ErrInvalidArgument if the requested
+// parameter is not found in the discovery document.
+func (c *ArvadosClient) Discovery(parameter string) (value interface{}, err error) {
+       if len(c.DiscoveryDoc) == 0 {
+               c.DiscoveryDoc = make(Dict)
+               err = c.Call("GET", API_DISCOVERY_RESOURCE, "", "", nil, &c.DiscoveryDoc)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       var found bool
+       value, found = c.DiscoveryDoc[parameter]
+       if found {
+               return value, nil
+       } else {
+               return value, ErrInvalidArgument
+       }
+}
+
+func (ac *ArvadosClient) httpClient() *http.Client {
+       if ac.Client != nil {
+               return ac.Client
+       }
+       c := &defaultSecureHTTPClient
+       if ac.ApiInsecure {
+               c = &defaultInsecureHTTPClient
+       }
+       if *c == nil {
+               defaultHTTPClientMtx.Lock()
+               defer defaultHTTPClientMtx.Unlock()
+               *c = &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: MakeTLSConfig(ac.ApiInsecure)}}
+       }
+       return *c
+}
diff --git a/sdk/go/arvadosclient/arvadosclient_test.go b/sdk/go/arvadosclient/arvadosclient_test.go
new file mode 100644 (file)
index 0000000..372f09d
--- /dev/null
@@ -0,0 +1,391 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadosclient
+
+import (
+       "fmt"
+       "net"
+       "net/http"
+       "os"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&UnitSuite{})
+var _ = Suite(&MockArvadosServerSuite{})
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(2, false)
+       RetryDelay = 0
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       arvadostest.StopKeep(2)
+       arvadostest.StopAPI()
+}
+
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       arvadostest.ResetEnv()
+}
+
+func (s *ServerRequiredSuite) TestMakeArvadosClientSecure(c *C) {
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+       ac, err := MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+       c.Check(ac.ApiServer, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Check(ac.ApiToken, Equals, os.Getenv("ARVADOS_API_TOKEN"))
+       c.Check(ac.ApiInsecure, Equals, false)
+}
+
+func (s *ServerRequiredSuite) TestMakeArvadosClientInsecure(c *C) {
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+       ac, err := MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+       c.Check(ac.ApiInsecure, Equals, true)
+       c.Check(ac.ApiServer, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Check(ac.ApiToken, Equals, os.Getenv("ARVADOS_API_TOKEN"))
+       c.Check(ac.Client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, Equals, true)
+}
+
+func (s *ServerRequiredSuite) TestGetInvalidUUID(c *C) {
+       arv, err := MakeArvadosClient()
+
+       getback := make(Dict)
+       err = arv.Get("collections", "", nil, &getback)
+       c.Assert(err, Equals, ErrInvalidArgument)
+       c.Assert(len(getback), Equals, 0)
+
+       err = arv.Get("collections", "zebra-moose-unicorn", nil, &getback)
+       c.Assert(err, Equals, ErrInvalidArgument)
+       c.Assert(len(getback), Equals, 0)
+
+       err = arv.Get("collections", "acbd18db4cc2f85cedef654fccc4a4d8", nil, &getback)
+       c.Assert(err, Equals, ErrInvalidArgument)
+       c.Assert(len(getback), Equals, 0)
+}
+
+func (s *ServerRequiredSuite) TestGetValidUUID(c *C) {
+       arv, err := MakeArvadosClient()
+
+       getback := make(Dict)
+       err = arv.Get("collections", "zzzzz-4zz18-abcdeabcdeabcde", nil, &getback)
+       c.Assert(err, FitsTypeOf, APIServerError{})
+       c.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)
+       c.Assert(len(getback), Equals, 0)
+
+       err = arv.Get("collections", "acbd18db4cc2f85cedef654fccc4a4d8+3", nil, &getback)
+       c.Assert(err, FitsTypeOf, APIServerError{})
+       c.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)
+       c.Assert(len(getback), Equals, 0)
+}
+
+func (s *ServerRequiredSuite) TestInvalidResourceType(c *C) {
+       arv, err := MakeArvadosClient()
+
+       getback := make(Dict)
+       err = arv.Get("unicorns", "zzzzz-zebra-unicorn7unicorn", nil, &getback)
+       c.Assert(err, FitsTypeOf, APIServerError{})
+       c.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)
+       c.Assert(len(getback), Equals, 0)
+
+       err = arv.Update("unicorns", "zzzzz-zebra-unicorn7unicorn", nil, &getback)
+       c.Assert(err, FitsTypeOf, APIServerError{})
+       c.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)
+       c.Assert(len(getback), Equals, 0)
+
+       err = arv.List("unicorns", nil, &getback)
+       c.Assert(err, FitsTypeOf, APIServerError{})
+       c.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)
+       c.Assert(len(getback), Equals, 0)
+}
+
+func (s *ServerRequiredSuite) TestCreatePipelineTemplate(c *C) {
+       arv, err := MakeArvadosClient()
+
+       for _, idleConnections := range []bool{
+               false,
+               true,
+       } {
+               if idleConnections {
+                       arv.lastClosedIdlesAt = time.Now().Add(-time.Minute)
+               } else {
+                       arv.lastClosedIdlesAt = time.Now()
+               }
+
+               getback := make(Dict)
+               err = arv.Create("pipeline_templates",
+                       Dict{"pipeline_template": Dict{
+                               "name": "tmp",
+                               "components": Dict{
+                                       "c1": map[string]string{"script": "script1"},
+                                       "c2": map[string]string{"script": "script2"}}}},
+                       &getback)
+               c.Assert(err, Equals, nil)
+               c.Assert(getback["name"], Equals, "tmp")
+               c.Assert(getback["components"].(map[string]interface{})["c2"].(map[string]interface{})["script"], Equals, "script2")
+
+               uuid := getback["uuid"].(string)
+
+               getback = make(Dict)
+               err = arv.Get("pipeline_templates", uuid, nil, &getback)
+               c.Assert(err, Equals, nil)
+               c.Assert(getback["name"], Equals, "tmp")
+               c.Assert(getback["components"].(map[string]interface{})["c1"].(map[string]interface{})["script"], Equals, "script1")
+
+               getback = make(Dict)
+               err = arv.Update("pipeline_templates", uuid,
+                       Dict{
+                               "pipeline_template": Dict{"name": "tmp2"}},
+                       &getback)
+               c.Assert(err, Equals, nil)
+               c.Assert(getback["name"], Equals, "tmp2")
+
+               c.Assert(getback["uuid"].(string), Equals, uuid)
+               getback = make(Dict)
+               err = arv.Delete("pipeline_templates", uuid, nil, &getback)
+               c.Assert(err, Equals, nil)
+               c.Assert(getback["name"], Equals, "tmp2")
+       }
+}
+
+func (s *ServerRequiredSuite) TestErrorResponse(c *C) {
+       arv, _ := MakeArvadosClient()
+
+       getback := make(Dict)
+
+       {
+               err := arv.Create("logs",
+                       Dict{"log": Dict{"bogus_attr": "foo"}},
+                       &getback)
+               c.Assert(err, ErrorMatches, "arvados API server error: .*")
+               c.Assert(err, ErrorMatches, ".*unknown attribute(: | ')bogus_attr.*")
+               c.Assert(err, FitsTypeOf, APIServerError{})
+               c.Assert(err.(APIServerError).HttpStatusCode, Equals, 422)
+       }
+
+       {
+               err := arv.Create("bogus",
+                       Dict{"bogus": Dict{}},
+                       &getback)
+               c.Assert(err, ErrorMatches, "arvados API server error: .*")
+               c.Assert(err, ErrorMatches, ".*Path not found.*")
+               c.Assert(err, FitsTypeOf, APIServerError{})
+               c.Assert(err.(APIServerError).HttpStatusCode, Equals, 404)
+       }
+}
+
+func (s *ServerRequiredSuite) TestAPIDiscovery_Get_defaultCollectionReplication(c *C) {
+       arv, err := MakeArvadosClient()
+       value, err := arv.Discovery("defaultCollectionReplication")
+       c.Assert(err, IsNil)
+       c.Assert(value, NotNil)
+}
+
+func (s *ServerRequiredSuite) TestAPIDiscovery_Get_noSuchParameter(c *C) {
+       arv, err := MakeArvadosClient()
+       value, err := arv.Discovery("noSuchParameter")
+       c.Assert(err, NotNil)
+       c.Assert(value, IsNil)
+}
+
+type UnitSuite struct{}
+
+func (s *UnitSuite) TestUUIDMatch(c *C) {
+       c.Assert(UUIDMatch("zzzzz-tpzed-000000000000000"), Equals, true)
+       c.Assert(UUIDMatch("zzzzz-zebra-000000000000000"), Equals, true)
+       c.Assert(UUIDMatch("00000-00000-zzzzzzzzzzzzzzz"), Equals, true)
+       c.Assert(UUIDMatch("ZEBRA-HORSE-AFRICANELEPHANT"), Equals, false)
+       c.Assert(UUIDMatch(" zzzzz-tpzed-000000000000000"), Equals, false)
+       c.Assert(UUIDMatch("d41d8cd98f00b204e9800998ecf8427e"), Equals, false)
+       c.Assert(UUIDMatch("d41d8cd98f00b204e9800998ecf8427e+0"), Equals, false)
+       c.Assert(UUIDMatch(""), Equals, false)
+}
+
+func (s *UnitSuite) TestPDHMatch(c *C) {
+       c.Assert(PDHMatch("zzzzz-tpzed-000000000000000"), Equals, false)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e"), Equals, false)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e+0"), Equals, true)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e+12345"), Equals, true)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e 12345"), Equals, false)
+       c.Assert(PDHMatch("D41D8CD98F00B204E9800998ECF8427E+12345"), Equals, false)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e+12345 "), Equals, false)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e+abcdef"), Equals, false)
+       c.Assert(PDHMatch("da39a3ee5e6b4b0d3255bfef95601890afd80709"), Equals, false)
+       c.Assert(PDHMatch("da39a3ee5e6b4b0d3255bfef95601890afd80709+0"), Equals, false)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427+12345"), Equals, false)
+       c.Assert(PDHMatch("d41d8cd98f00b204e9800998ecf8427e+12345\n"), Equals, false)
+       c.Assert(PDHMatch("+12345"), Equals, false)
+       c.Assert(PDHMatch(""), Equals, false)
+}
+
+// Tests that use mock arvados server
+type MockArvadosServerSuite struct{}
+
+func (s *MockArvadosServerSuite) SetUpSuite(c *C) {
+       RetryDelay = 0
+}
+
+func (s *MockArvadosServerSuite) SetUpTest(c *C) {
+       arvadostest.ResetEnv()
+}
+
+type APIServer struct {
+       listener net.Listener
+       url      string
+}
+
+func RunFakeArvadosServer(st http.Handler) (api APIServer, err error) {
+       api.listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: 0})
+       if err != nil {
+               return
+       }
+       api.url = api.listener.Addr().String()
+       go http.Serve(api.listener, st)
+       return
+}
+
+type APIStub struct {
+       method        string
+       retryAttempts int
+       expected      int
+       respStatus    []int
+       responseBody  []string
+}
+
+func (h *APIStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/redirect-loop" {
+               http.Redirect(resp, req, "/redirect-loop", http.StatusFound)
+               return
+       }
+       if h.respStatus[h.retryAttempts] < 0 {
+               // Fail the client's Do() by starting a redirect loop
+               http.Redirect(resp, req, "/redirect-loop", http.StatusFound)
+       } else {
+               resp.WriteHeader(h.respStatus[h.retryAttempts])
+               resp.Write([]byte(h.responseBody[h.retryAttempts]))
+       }
+       h.retryAttempts++
+}
+
+func (s *MockArvadosServerSuite) TestWithRetries(c *C) {
+       for _, stub := range []APIStub{
+               {
+                       "get", 0, 200, []int{200, 500}, []string{`{"ok":"ok"}`, ``},
+               },
+               {
+                       "create", 0, 200, []int{200, 500}, []string{`{"ok":"ok"}`, ``},
+               },
+               {
+                       "get", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "create", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "update", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "delete", 0, 500, []int{500, 500, 500, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "get", 0, 502, []int{500, 500, 502, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "create", 0, 502, []int{500, 500, 502, 200}, []string{``, ``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "get", 0, 200, []int{500, 500, 200}, []string{``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "create", 0, 200, []int{500, 500, 200}, []string{``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "delete", 0, 200, []int{500, 500, 200}, []string{``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "update", 0, 200, []int{500, 500, 200}, []string{``, ``, `{"ok":"ok"}`},
+               },
+               {
+                       "get", 0, 401, []int{401, 200}, []string{``, `{"ok":"ok"}`},
+               },
+               {
+                       "create", 0, 401, []int{401, 200}, []string{``, `{"ok":"ok"}`},
+               },
+               {
+                       "get", 0, 404, []int{404, 200}, []string{``, `{"ok":"ok"}`},
+               },
+               {
+                       "get", 0, 401, []int{500, 401, 200}, []string{``, ``, `{"ok":"ok"}`},
+               },
+
+               // Response code -1 simulates an HTTP/network error
+               // (i.e., Do() returns an error; there is no HTTP
+               // response status code).
+
+               // Succeed on second retry
+               {
+                       "get", 0, 200, []int{-1, -1, 200}, []string{``, ``, `{"ok":"ok"}`},
+               },
+               // "POST" is not safe to retry: fail after one error
+               {
+                       "create", 0, -1, []int{-1, 200}, []string{``, `{"ok":"ok"}`},
+               },
+       } {
+               api, err := RunFakeArvadosServer(&stub)
+               c.Check(err, IsNil)
+
+               defer api.listener.Close()
+
+               arv := ArvadosClient{
+                       Scheme:      "http",
+                       ApiServer:   api.url,
+                       ApiToken:    "abc123",
+                       ApiInsecure: true,
+                       Client:      &http.Client{Transport: &http.Transport{}},
+                       Retries:     2}
+
+               getback := make(Dict)
+               switch stub.method {
+               case "get":
+                       err = arv.Get("collections", "zzzzz-4zz18-znfnqtbbv4spc3w", nil, &getback)
+               case "create":
+                       err = arv.Create("collections",
+                               Dict{"collection": Dict{"name": "testing"}},
+                               &getback)
+               case "update":
+                       err = arv.Update("collections", "zzzzz-4zz18-znfnqtbbv4spc3w",
+                               Dict{"collection": Dict{"name": "testing"}},
+                               &getback)
+               case "delete":
+                       err = arv.Delete("pipeline_templates", "zzzzz-4zz18-znfnqtbbv4spc3w", nil, &getback)
+               }
+
+               switch stub.expected {
+               case 200:
+                       c.Check(err, IsNil)
+                       c.Check(getback["ok"], Equals, "ok")
+               case -1:
+                       c.Check(err, NotNil)
+                       c.Check(err, ErrorMatches, `.*stopped after \d+ redirects`)
+               default:
+                       c.Check(err, NotNil)
+                       c.Check(err, ErrorMatches, fmt.Sprintf("arvados API server error: %d.*", stub.expected))
+                       c.Check(err.(APIServerError).HttpStatusCode, Equals, stub.expected)
+               }
+       }
+}
diff --git a/sdk/go/arvadosclient/pool.go b/sdk/go/arvadosclient/pool.go
new file mode 100644 (file)
index 0000000..7320807
--- /dev/null
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadosclient
+
+import (
+       "sync"
+)
+
+// A ClientPool is a pool of ArvadosClients. This is useful for
+// applications that make API calls using a dynamic set of tokens,
+// like web services that pass through their own clients'
+// credentials. See arvados-git-httpd for an example, and sync.Pool
+// for more information about garbage collection.
+type ClientPool struct {
+       // Initialize new clients by coping this one.
+       Prototype *ArvadosClient
+
+       pool      *sync.Pool
+       lastErr   error
+       setupOnce sync.Once
+}
+
+// MakeClientPool returns a new empty ClientPool, using environment
+// variables to initialize the prototype.
+func MakeClientPool() *ClientPool {
+       proto, err := MakeArvadosClient()
+       return &ClientPool{
+               Prototype: proto,
+               lastErr:   err,
+       }
+}
+
+func (p *ClientPool) setup() {
+       p.pool = &sync.Pool{New: func() interface{} {
+               if p.lastErr != nil {
+                       return nil
+               }
+               c := *p.Prototype
+               return &c
+       }}
+}
+
+// Err returns the error that was encountered last time Get returned
+// nil.
+func (p *ClientPool) Err() error {
+       return p.lastErr
+}
+
+// Get returns an ArvadosClient taken from the pool, or a new one if
+// the pool is empty. If an existing client is returned, its state
+// (including its ApiToken) will be just as it was when it was Put
+// back in the pool.
+func (p *ClientPool) Get() *ArvadosClient {
+       p.setupOnce.Do(p.setup)
+       c, ok := p.pool.Get().(*ArvadosClient)
+       if !ok {
+               return nil
+       }
+       return c
+}
+
+// Put puts an ArvadosClient back in the pool.
+func (p *ClientPool) Put(c *ArvadosClient) {
+       p.setupOnce.Do(p.setup)
+       p.pool.Put(c)
+}
diff --git a/sdk/go/arvadostest/fixtures.go b/sdk/go/arvadostest/fixtures.go
new file mode 100644 (file)
index 0000000..4f648e9
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+// IDs of API server's test fixtures
+const (
+       SpectatorToken          = "zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu"
+       ActiveToken             = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
+       ActiveTokenUUID         = "zzzzz-gj3su-077z32aux8dg2s1"
+       ActiveTokenV2           = "v2/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
+       AdminToken              = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
+       AnonymousToken          = "4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi"
+       DataManagerToken        = "320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1"
+       ManagementToken         = "jg3ajndnq63sywcd50gbs5dskdc9ckkysb0nsqmfz08nwf17nl"
+       ActiveUserUUID          = "zzzzz-tpzed-xurymjxw79nv3jz"
+       FederatedActiveUserUUID = "zbbbb-tpzed-xurymjxw79nv3jz"
+       SpectatorUserUUID       = "zzzzz-tpzed-l1s2piq4t4mps8r"
+       UserAgreementCollection = "zzzzz-4zz18-uukreo9rbgwsujr" // user_agreement_in_anonymously_accessible_project
+       FooCollectionName       = "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+       FooCollection           = "zzzzz-4zz18-fy296fx3hot09f7"
+       FooCollectionPDH        = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
+       NonexistentCollection   = "zzzzz-4zz18-totallynotexist"
+       HelloWorldCollection    = "zzzzz-4zz18-4en62shvi99lxd4"
+       FooBarDirCollection     = "zzzzz-4zz18-foonbarfilesdir"
+       WazVersion1Collection   = "zzzzz-4zz18-25k12570yk1ver1"
+       UserAgreementPDH        = "b519d9cb706a29fc7ea24dbea2f05851+93"
+       FooPdh                  = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
+       HelloWorldPdh           = "55713e6a34081eb03609e7ad5fcad129+62"
+
+       AProjectUUID    = "zzzzz-j7d0g-v955i6s2oi1cbso"
+       ASubprojectUUID = "zzzzz-j7d0g-axqo7eu9pwvna1x"
+
+       FooAndBarFilesInDirUUID = "zzzzz-4zz18-foonbarfilesdir"
+       FooAndBarFilesInDirPDH  = "6bbac24198d09a93975f60098caf0bdf+62"
+
+       Dispatch1Token    = "kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw"
+       Dispatch1AuthUUID = "zzzzz-gj3su-k9dvestay1plssr"
+
+       QueuedContainerRequestUUID = "zzzzz-xvhdp-cr4queuedcontnr"
+       QueuedContainerUUID        = "zzzzz-dz642-queuedcontainer"
+
+       RunningContainerUUID = "zzzzz-dz642-runningcontainr"
+
+       CompletedContainerUUID = "zzzzz-dz642-compltcontainer"
+
+       ArvadosRepoUUID = "zzzzz-s0uqq-arvadosrepo0123"
+       ArvadosRepoName = "arvados"
+       FooRepoUUID     = "zzzzz-s0uqq-382brsig8rp3666"
+       FooRepoName     = "active/foo"
+       Repository2UUID = "zzzzz-s0uqq-382brsig8rp3667"
+       Repository2Name = "active/foo2"
+
+       FooCollectionSharingTokenUUID = "zzzzz-gj3su-gf02tdm4g1z3e3u"
+       FooCollectionSharingToken     = "iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss"
+
+       WorkflowWithDefinitionYAMLUUID = "zzzzz-7fd4e-validworkfloyml"
+)
+
+// PathologicalManifest : A valid manifest designed to test
+// various edge cases and parsing requirements
+const PathologicalManifest = ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:zero@0 0:1:f 1:0:zero@1 1:4:ooba 4:0:zero@4 5:1:r 5:4:rbaz 9:0:zero@9\n" +
+       "./overlapReverse acbd18db4cc2f85cedef654fccc4a4d8+3 acbd18db4cc2f85cedef654fccc4a4d8+3 5:1:o 4:2:oo 2:4:ofoo\n" +
+       "./segmented acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:1:frob 5:1:frob 1:1:frob 1:2:oof 0:1:oof 5:0:frob 3:1:frob\n" +
+       `./foo\040b\141r acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:baz` + "\n" +
+       `./foo\040b\141r acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:b\141z\040w\141z` + "\n" +
+       "./foo acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:zero 0:3:foo\n" +
+       ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:foo/zero 0:3:foo/foo\n"
+
+// An MD5 collision.
+var (
+       MD5CollisionData = [][]byte{
+               []byte("\x0e0eaU\x9a\xa7\x87\xd0\x0b\xc6\xf7\x0b\xbd\xfe4\x04\xcf\x03e\x9epO\x854\xc0\x0f\xfbe\x9cL\x87@\xcc\x94/\xeb-\xa1\x15\xa3\xf4\x15\\\xbb\x86\x07Is\x86em}\x1f4\xa4 Y\xd7\x8fZ\x8d\xd1\xef"),
+               []byte("\x0e0eaU\x9a\xa7\x87\xd0\x0b\xc6\xf7\x0b\xbd\xfe4\x04\xcf\x03e\x9etO\x854\xc0\x0f\xfbe\x9cL\x87@\xcc\x94/\xeb-\xa1\x15\xa3\xf4\x15\xdc\xbb\x86\x07Is\x86em}\x1f4\xa4 Y\xd7\x8fZ\x8d\xd1\xef"),
+       }
+       MD5CollisionMD5 = "cee9a457e790cf20d4bdaa6d69f01e41"
+)
+
+// BlobSigningKey used by the test servers
+const BlobSigningKey = "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc"
diff --git a/sdk/go/arvadostest/run_servers.go b/sdk/go/arvadostest/run_servers.go
new file mode 100644 (file)
index 0000000..490a7f3
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "path"
+       "strconv"
+       "strings"
+)
+
+var authSettings = make(map[string]string)
+
+// ResetEnv resets test env
+func ResetEnv() {
+       for k, v := range authSettings {
+               os.Setenv(k, v)
+       }
+}
+
+// APIHost returns the address:port of the current test server.
+func APIHost() string {
+       h := authSettings["ARVADOS_API_HOST"]
+       if h == "" {
+               log.Fatal("arvadostest.APIHost() was called but authSettings is not populated")
+       }
+       return h
+}
+
+// ParseAuthSettings parses auth settings from given input
+func ParseAuthSettings(authScript []byte) {
+       scanner := bufio.NewScanner(bytes.NewReader(authScript))
+       for scanner.Scan() {
+               line := scanner.Text()
+               if 0 != strings.Index(line, "export ") {
+                       log.Printf("Ignoring: %v", line)
+                       continue
+               }
+               toks := strings.SplitN(strings.Replace(line, "export ", "", 1), "=", 2)
+               if len(toks) == 2 {
+                       authSettings[toks[0]] = toks[1]
+               } else {
+                       log.Fatalf("Could not parse: %v", line)
+               }
+       }
+       log.Printf("authSettings: %v", authSettings)
+}
+
+var pythonTestDir string
+
+func chdirToPythonTests() {
+       if pythonTestDir != "" {
+               if err := os.Chdir(pythonTestDir); err != nil {
+                       log.Fatalf("chdir %s: %s", pythonTestDir, err)
+               }
+               return
+       }
+       for {
+               if err := os.Chdir("sdk/python/tests"); err == nil {
+                       pythonTestDir, err = os.Getwd()
+                       if err != nil {
+                               log.Fatal(err)
+                       }
+                       return
+               }
+               if parent, err := os.Getwd(); err != nil || parent == "/" {
+                       log.Fatalf("sdk/python/tests/ not found in any ancestor")
+               }
+               if err := os.Chdir(".."); err != nil {
+                       log.Fatal(err)
+               }
+       }
+}
+
+// StartAPI starts test API server
+func StartAPI() {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       cmd := exec.Command("python", "run_test_server.py", "start", "--auth", "admin")
+       cmd.Stdin = nil
+       cmd.Stderr = os.Stderr
+
+       authScript, err := cmd.Output()
+       if err != nil {
+               log.Fatalf("%+v: %s", cmd.Args, err)
+       }
+       ParseAuthSettings(authScript)
+       ResetEnv()
+}
+
+// StopAPI stops test API server
+func StopAPI() {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       cmd := exec.Command("python", "run_test_server.py", "stop")
+       bgRun(cmd)
+       // Without Wait, "go test" in go1.10.1 tends to hang. https://github.com/golang/go/issues/24050
+       cmd.Wait()
+}
+
+// StartKeep starts the given number of keep servers,
+// optionally with -enforce-permissions enabled.
+// Use numKeepServers = 2 and enforcePermissions = false under all normal circumstances.
+func StartKeep(numKeepServers int, enforcePermissions bool) {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       cmdArgs := []string{"run_test_server.py", "start_keep", "--num-keep-servers", strconv.Itoa(numKeepServers)}
+       if enforcePermissions {
+               cmdArgs = append(cmdArgs, "--keep-enforce-permissions")
+       }
+
+       bgRun(exec.Command("python", cmdArgs...))
+}
+
+// StopKeep stops keep servers that were started with StartKeep.
+// numkeepServers should be the same value that was passed to StartKeep,
+// which is 2 under all normal circumstances.
+func StopKeep(numKeepServers int) {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       cmd := exec.Command("python", "run_test_server.py", "stop_keep", "--num-keep-servers", strconv.Itoa(numKeepServers))
+       bgRun(cmd)
+       // Without Wait, "go test" in go1.10.1 tends to hang. https://github.com/golang/go/issues/24050
+       cmd.Wait()
+}
+
+// Start cmd, with stderr and stdout redirected to our own
+// stderr. Return when the process exits, but do not wait for its
+// stderr and stdout to close: any grandchild processes will continue
+// writing to our stderr.
+func bgRun(cmd *exec.Cmd) {
+       cmd.Stdin = nil
+       cmd.Stderr = os.Stderr
+       cmd.Stdout = os.Stderr
+       if err := cmd.Start(); err != nil {
+               log.Fatalf("%+v: %s", cmd.Args, err)
+       }
+       if _, err := cmd.Process.Wait(); err != nil {
+               log.Fatalf("%+v: %s", cmd.Args, err)
+       }
+}
+
+// CreateBadPath creates a tmp dir, appends given string and returns that path
+// This will guarantee that the path being returned does not exist
+func CreateBadPath() (badpath string, err error) {
+       tempdir, err := ioutil.TempDir("", "bad")
+       if err != nil {
+               return "", fmt.Errorf("Could not create temporary directory for bad path: %v", err)
+       }
+       badpath = path.Join(tempdir, "bad")
+       return badpath, nil
+}
+
+// DestroyBadPath deletes the tmp dir created by the previous CreateBadPath call
+func DestroyBadPath(badpath string) error {
+       tempdir := path.Join(badpath, "..")
+       err := os.Remove(tempdir)
+       if err != nil {
+               return fmt.Errorf("Could not remove bad path temporary directory %v: %v", tempdir, err)
+       }
+       return nil
+}
diff --git a/sdk/go/arvadostest/stub.go b/sdk/go/arvadostest/stub.go
new file mode 100644 (file)
index 0000000..89925a9
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+       "net/http"
+)
+
+// StubResponse struct with response status and body
+type StubResponse struct {
+       Status int
+       Body   string
+}
+
+// ServerStub with response map of path and StubResponse
+// Ex:  /arvados/v1/keep_services = arvadostest.StubResponse{200, string(`{}`)}
+type ServerStub struct {
+       Responses map[string]StubResponse
+}
+
+func (stub *ServerStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/redirect-loop" {
+               http.Redirect(resp, req, "/redirect-loop", http.StatusFound)
+               return
+       }
+
+       pathResponse := stub.Responses[req.URL.Path]
+       if pathResponse.Status == -1 {
+               http.Redirect(resp, req, "/redirect-loop", http.StatusFound)
+       } else if pathResponse.Body != "" {
+               resp.WriteHeader(pathResponse.Status)
+               resp.Write([]byte(pathResponse.Body))
+       } else {
+               resp.WriteHeader(500)
+               resp.Write([]byte(``))
+       }
+}
diff --git a/sdk/go/asyncbuf/buf.go b/sdk/go/asyncbuf/buf.go
new file mode 100644 (file)
index 0000000..05af02f
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package asyncbuf
+
+import (
+       "bytes"
+       "io"
+       "sync"
+)
+
+// A Buffer is an io.Writer that distributes written data
+// asynchronously to multiple concurrent readers.
+//
+// NewReader() can be called at any time. In all cases, every returned
+// io.Reader reads all data written to the Buffer.
+//
+// Behavior is undefined if Write is called after Close or
+// CloseWithError.
+type Buffer interface {
+       io.WriteCloser
+
+       // NewReader() returns an io.Reader that reads all data
+       // written to the Buffer.
+       NewReader() io.Reader
+
+       // Close, but return the given error (instead of io.EOF) to
+       // all readers when they reach the end of the buffer.
+       //
+       // CloseWithError(nil) is equivalent to
+       // CloseWithError(io.EOF).
+       CloseWithError(error) error
+}
+
+type buffer struct {
+       data *bytes.Buffer
+       cond sync.Cond
+       err  error // nil if there might be more writes
+}
+
+// NewBuffer creates a new Buffer using buf as its initial
+// contents. The new Buffer takes ownership of buf, and the caller
+// should not use buf after this call.
+func NewBuffer(buf []byte) Buffer {
+       return &buffer{
+               data: bytes.NewBuffer(buf),
+               cond: sync.Cond{L: &sync.Mutex{}},
+       }
+}
+
+func (b *buffer) Write(p []byte) (int, error) {
+       defer b.cond.Broadcast()
+       b.cond.L.Lock()
+       defer b.cond.L.Unlock()
+       if b.err != nil {
+               return 0, b.err
+       }
+       return b.data.Write(p)
+}
+
+func (b *buffer) Close() error {
+       return b.CloseWithError(nil)
+}
+
+func (b *buffer) CloseWithError(err error) error {
+       defer b.cond.Broadcast()
+       b.cond.L.Lock()
+       defer b.cond.L.Unlock()
+       if err == nil {
+               b.err = io.EOF
+       } else {
+               b.err = err
+       }
+       return nil
+}
+
+func (b *buffer) NewReader() io.Reader {
+       return &reader{b: b}
+}
+
+type reader struct {
+       b    *buffer
+       read int // # bytes already read
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+       r.b.cond.L.Lock()
+       for {
+               switch {
+               case r.read < r.b.data.Len():
+                       buf := r.b.data.Bytes()
+                       r.b.cond.L.Unlock()
+                       n := copy(p, buf[r.read:])
+                       r.read += n
+                       return n, nil
+               case r.b.err != nil || len(p) == 0:
+                       // r.b.err != nil means we reached EOF.  And
+                       // even if we're not at EOF, there's no need
+                       // to block if len(p)==0.
+                       err := r.b.err
+                       r.b.cond.L.Unlock()
+                       return 0, err
+               default:
+                       r.b.cond.Wait()
+               }
+       }
+}
diff --git a/sdk/go/asyncbuf/buf_test.go b/sdk/go/asyncbuf/buf_test.go
new file mode 100644 (file)
index 0000000..cc742a8
--- /dev/null
@@ -0,0 +1,245 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package asyncbuf
+
+import (
+       "crypto/md5"
+       "errors"
+       "io"
+       "io/ioutil"
+       "math/rand"
+       "sync"
+       "sync/atomic"
+       "testing"
+       "time"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (s *Suite) TestNoWrites(c *check.C) {
+       b := NewBuffer(nil)
+       r1 := b.NewReader()
+       r2 := b.NewReader()
+       b.Close()
+       s.checkReader(c, r1, []byte{}, nil, nil)
+       s.checkReader(c, r2, []byte{}, nil, nil)
+}
+
+func (s *Suite) TestNoReaders(c *check.C) {
+       b := NewBuffer(nil)
+       n, err := b.Write([]byte("foobar"))
+       err2 := b.Close()
+       c.Check(n, check.Equals, 6)
+       c.Check(err, check.IsNil)
+       c.Check(err2, check.IsNil)
+}
+
+func (s *Suite) TestWriteReadClose(c *check.C) {
+       done := make(chan bool, 2)
+       b := NewBuffer(nil)
+       n, err := b.Write([]byte("foobar"))
+       c.Check(n, check.Equals, 6)
+       c.Check(err, check.IsNil)
+       r1 := b.NewReader()
+       r2 := b.NewReader()
+       go s.checkReader(c, r1, []byte("foobar"), nil, done)
+       go s.checkReader(c, r2, []byte("foobar"), nil, done)
+       time.Sleep(time.Millisecond)
+       c.Check(len(done), check.Equals, 0)
+       b.Close()
+       <-done
+       <-done
+}
+
+func (s *Suite) TestPrefillWriteCloseRead(c *check.C) {
+       done := make(chan bool, 2)
+       b := NewBuffer([]byte("baz"))
+       n, err := b.Write([]byte("waz"))
+       c.Check(n, check.Equals, 3)
+       c.Check(err, check.IsNil)
+       b.Close()
+       r1 := b.NewReader()
+       go s.checkReader(c, r1, []byte("bazwaz"), nil, done)
+       r2 := b.NewReader()
+       go s.checkReader(c, r2, []byte("bazwaz"), nil, done)
+       <-done
+       <-done
+}
+
+func (s *Suite) TestWriteReadCloseRead(c *check.C) {
+       done := make(chan bool, 1)
+       b := NewBuffer(nil)
+       r1 := b.NewReader()
+       go s.checkReader(c, r1, []byte("bazwazqux"), nil, done)
+
+       b.Write([]byte("bazwaz"))
+
+       r2 := b.NewReader()
+       r2.Read(make([]byte, 3))
+
+       b.Write([]byte("qux"))
+       b.Close()
+
+       s.checkReader(c, r2, []byte("wazqux"), nil, nil)
+       <-done
+}
+
+func (s *Suite) TestReadAtEOF(c *check.C) {
+       buf := make([]byte, 8)
+
+       b := NewBuffer([]byte{1, 2, 3})
+
+       r := b.NewReader()
+       n, err := r.Read(buf)
+       c.Check(n, check.Equals, 3)
+       c.Check(err, check.IsNil)
+
+       // Reading zero bytes at EOF, but before Close(), doesn't
+       // block or error
+       done := make(chan bool)
+       go func() {
+               defer close(done)
+               n, err = r.Read(buf[:0])
+               c.Check(n, check.Equals, 0)
+               c.Check(err, check.IsNil)
+       }()
+       select {
+       case <-done:
+       case <-time.After(time.Second):
+               c.Error("timeout")
+       }
+
+       b.Close()
+
+       // Reading zero bytes after Close() returns EOF
+       n, err = r.Read(buf[:0])
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.Equals, io.EOF)
+
+       // Reading from start after Close() returns 3 bytes, then EOF
+       r = b.NewReader()
+       n, err = r.Read(buf)
+       c.Check(n, check.Equals, 3)
+       if err != nil {
+               c.Check(err, check.Equals, io.EOF)
+       }
+       n, err = r.Read(buf[:0])
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.Equals, io.EOF)
+       n, err = r.Read(buf)
+       c.Check(n, check.Equals, 0)
+       c.Check(err, check.Equals, io.EOF)
+}
+
+func (s *Suite) TestCloseWithError(c *check.C) {
+       errFake := errors.New("it's not even a real error")
+
+       done := make(chan bool, 1)
+       b := NewBuffer(nil)
+       r1 := b.NewReader()
+       go s.checkReader(c, r1, []byte("bazwazqux"), errFake, done)
+
+       b.Write([]byte("bazwaz"))
+
+       r2 := b.NewReader()
+       r2.Read(make([]byte, 3))
+
+       b.Write([]byte("qux"))
+       b.CloseWithError(errFake)
+
+       s.checkReader(c, r2, []byte("wazqux"), errFake, nil)
+       <-done
+}
+
+// Write n*n bytes, n at a time; read them into n goroutines using
+// varying buffer sizes; compare checksums.
+func (s *Suite) TestManyReaders(c *check.C) {
+       const n = 256
+
+       b := NewBuffer(nil)
+
+       expectSum := make(chan []byte)
+       go func() {
+               hash := md5.New()
+               buf := make([]byte, n)
+               for i := 0; i < n; i++ {
+                       time.Sleep(10 * time.Nanosecond)
+                       rand.Read(buf)
+                       b.Write(buf)
+                       hash.Write(buf)
+               }
+               expectSum <- hash.Sum(nil)
+               b.Close()
+       }()
+
+       gotSum := make(chan []byte)
+       for i := 0; i < n; i++ {
+               go func(bufSize int) {
+                       got := md5.New()
+                       io.CopyBuffer(got, b.NewReader(), make([]byte, bufSize))
+                       gotSum <- got.Sum(nil)
+               }(i + n/2)
+       }
+
+       expect := <-expectSum
+       for i := 0; i < n; i++ {
+               c.Check(expect, check.DeepEquals, <-gotSum)
+       }
+}
+
+func (s *Suite) BenchmarkOneReader(c *check.C) {
+       s.benchmarkReaders(c, 1)
+}
+
+func (s *Suite) BenchmarkManyReaders(c *check.C) {
+       s.benchmarkReaders(c, 100)
+}
+
+func (s *Suite) benchmarkReaders(c *check.C, readers int) {
+       var n int64
+       t0 := time.Now()
+
+       buf := make([]byte, 10000)
+       rand.Read(buf)
+       for i := 0; i < 10; i++ {
+               b := NewBuffer(nil)
+               go func() {
+                       for i := 0; i < c.N; i++ {
+                               b.Write(buf)
+                       }
+                       b.Close()
+               }()
+
+               var wg sync.WaitGroup
+               for i := 0; i < readers; i++ {
+                       wg.Add(1)
+                       go func() {
+                               defer wg.Done()
+                               nn, _ := io.Copy(ioutil.Discard, b.NewReader())
+                               atomic.AddInt64(&n, int64(nn))
+                       }()
+               }
+               wg.Wait()
+       }
+       c.Logf("%d bytes, %.0f MB/s", n, float64(n)/time.Since(t0).Seconds()/1000000)
+}
+
+func (s *Suite) checkReader(c *check.C, r io.Reader, expectData []byte, expectError error, done chan bool) {
+       buf, err := ioutil.ReadAll(r)
+       c.Check(err, check.Equals, expectError)
+       c.Check(buf, check.DeepEquals, expectData)
+       if done != nil {
+               done <- true
+       }
+}
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/sdk/go/auth/auth.go b/sdk/go/auth/auth.go
new file mode 100644 (file)
index 0000000..3c266e0
--- /dev/null
@@ -0,0 +1,107 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package auth
+
+import (
+       "encoding/base64"
+       "net/http"
+       "net/url"
+       "strings"
+)
+
+type Credentials struct {
+       Tokens []string
+}
+
+func NewCredentials() *Credentials {
+       return &Credentials{Tokens: []string{}}
+}
+
+func CredentialsFromRequest(r *http.Request) *Credentials {
+       if c, ok := r.Context().Value(contextKeyCredentials).(*Credentials); ok {
+               // preloaded by middleware
+               return c
+       }
+       c := NewCredentials()
+       c.LoadTokensFromHTTPRequest(r)
+       return c
+}
+
+// EncodeTokenCookie accepts a token and returns a byte slice suitable
+// for use as a cookie value, such that it will be decoded correctly
+// by LoadTokensFromHTTPRequest.
+var EncodeTokenCookie func([]byte) string = base64.URLEncoding.EncodeToString
+
+// DecodeTokenCookie accepts a cookie value and returns the encoded
+// token.
+var DecodeTokenCookie func(string) ([]byte, error) = base64.URLEncoding.DecodeString
+
+// LoadTokensFromHTTPRequest loads all tokens it can find in the
+// headers and query string of an http query.
+func (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {
+       // Load plain token from "Authorization: OAuth2 ..." header
+       // (typically used by smart API clients)
+       if toks := strings.SplitN(r.Header.Get("Authorization"), " ", 2); len(toks) == 2 && (toks[0] == "OAuth2" || toks[0] == "Bearer") {
+               a.Tokens = append(a.Tokens, toks[1])
+       }
+
+       // Load base64-encoded token from "Authorization: Basic ..."
+       // header (typically used by git via credential helper)
+       if _, password, ok := r.BasicAuth(); ok {
+               a.Tokens = append(a.Tokens, password)
+       }
+
+       // Load tokens from query string. It's generally not a good
+       // idea to pass tokens around this way, but passing a narrowly
+       // scoped token is a reasonable way to implement "secret link
+       // to an object" in a generic way.
+       //
+       // ParseQuery always returns a non-nil map which might have
+       // valid parameters, even when a decoding error causes it to
+       // return a non-nil err. We ignore err; hopefully the caller
+       // will also need to parse the query string for
+       // application-specific purposes and will therefore
+       // find/report decoding errors in a suitable way.
+       qvalues, _ := url.ParseQuery(r.URL.RawQuery)
+       if val, ok := qvalues["api_token"]; ok {
+               a.Tokens = append(a.Tokens, val...)
+       }
+
+       a.loadTokenFromCookie(r)
+
+       // TODO: Load token from Rails session cookie (if Rails site
+       // secret is known)
+}
+
+func (a *Credentials) loadTokenFromCookie(r *http.Request) {
+       cookie, err := r.Cookie("arvados_api_token")
+       if err != nil || len(cookie.Value) == 0 {
+               return
+       }
+       token, err := DecodeTokenCookie(cookie.Value)
+       if err != nil {
+               return
+       }
+       a.Tokens = append(a.Tokens, string(token))
+}
+
+// LoadTokensFromHTTPRequestBody() loads credentials from the request
+// body.
+//
+// This is separate from LoadTokensFromHTTPRequest() because it's not
+// always desirable to read the request body. This has to be requested
+// explicitly by the application.
+func (a *Credentials) LoadTokensFromHTTPRequestBody(r *http.Request) error {
+       if r.Header.Get("Content-Type") != "application/x-www-form-urlencoded" {
+               return nil
+       }
+       if err := r.ParseForm(); err != nil {
+               return err
+       }
+       if t := r.PostFormValue("api_token"); t != "" {
+               a.Tokens = append(a.Tokens, t)
+       }
+       return nil
+}
diff --git a/sdk/go/auth/handlers.go b/sdk/go/auth/handlers.go
new file mode 100644 (file)
index 0000000..ad1fa51
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package auth
+
+import (
+       "context"
+       "net/http"
+)
+
+type contextKey string
+
+var contextKeyCredentials contextKey = "credentials"
+
+// LoadToken wraps the next handler, adding credentials to the request
+// context so subsequent handlers can access them efficiently via
+// CredentialsFromRequest.
+func LoadToken(next http.Handler) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               if _, ok := r.Context().Value(contextKeyCredentials).(*Credentials); !ok {
+                       r = r.WithContext(context.WithValue(r.Context(), contextKeyCredentials, CredentialsFromRequest(r)))
+               }
+               next.ServeHTTP(w, r)
+       })
+}
+
+// RequireLiteralToken wraps the next handler, rejecting any request
+// that doesn't supply the given token. If the given token is empty,
+// RequireLiteralToken returns next (i.e., no auth checks are
+// performed).
+func RequireLiteralToken(token string, next http.Handler) http.Handler {
+       if token == "" {
+               return next
+       }
+       return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               c := CredentialsFromRequest(r)
+               if len(c.Tokens) == 0 {
+                       http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
+                       return
+               }
+               for _, t := range c.Tokens {
+                       if t == token {
+                               next.ServeHTTP(w, r)
+                               return
+                       }
+               }
+               http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
+       })
+}
diff --git a/sdk/go/auth/handlers_test.go b/sdk/go/auth/handlers_test.go
new file mode 100644 (file)
index 0000000..362aeb7
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package auth
+
+import (
+       "net/http"
+       "net/http/httptest"
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&HandlersSuite{})
+
+type HandlersSuite struct {
+       served         int
+       gotCredentials *Credentials
+}
+
+func (s *HandlersSuite) SetUpTest(c *check.C) {
+       s.served = 0
+       s.gotCredentials = nil
+}
+
+func (s *HandlersSuite) TestLoadToken(c *check.C) {
+       handler := LoadToken(s)
+       handler.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest("GET", "/foo/bar?api_token=xyzzy", nil))
+       c.Assert(s.gotCredentials, check.NotNil)
+       c.Assert(s.gotCredentials.Tokens, check.HasLen, 1)
+       c.Check(s.gotCredentials.Tokens[0], check.Equals, "xyzzy")
+}
+
+func (s *HandlersSuite) TestRequireLiteralTokenEmpty(c *check.C) {
+       handler := RequireLiteralToken("", s)
+
+       w := httptest.NewRecorder()
+       handler.ServeHTTP(w, httptest.NewRequest("GET", "/foo/bar?api_token=abcdef", nil))
+       c.Check(s.served, check.Equals, 1)
+       c.Check(w.Code, check.Equals, http.StatusOK)
+
+       w = httptest.NewRecorder()
+       handler.ServeHTTP(w, httptest.NewRequest("GET", "/foo/bar", nil))
+       c.Check(s.served, check.Equals, 2)
+       c.Check(w.Code, check.Equals, http.StatusOK)
+}
+
+func (s *HandlersSuite) TestRequireLiteralToken(c *check.C) {
+       handler := RequireLiteralToken("xyzzy", s)
+
+       w := httptest.NewRecorder()
+       handler.ServeHTTP(w, httptest.NewRequest("GET", "/foo/bar?api_token=abcdef", nil))
+       c.Check(s.served, check.Equals, 0)
+       c.Check(w.Code, check.Equals, http.StatusForbidden)
+
+       w = httptest.NewRecorder()
+       handler.ServeHTTP(w, httptest.NewRequest("GET", "/foo/bar", nil))
+       c.Check(s.served, check.Equals, 0)
+       c.Check(w.Code, check.Equals, http.StatusUnauthorized)
+
+       w = httptest.NewRecorder()
+       handler.ServeHTTP(w, httptest.NewRequest("GET", "/foo/bar?api_token=xyzzy", nil))
+       c.Check(s.served, check.Equals, 1)
+       c.Check(w.Code, check.Equals, http.StatusOK)
+       c.Assert(s.gotCredentials, check.NotNil)
+       c.Assert(s.gotCredentials.Tokens, check.HasLen, 1)
+       c.Check(s.gotCredentials.Tokens[0], check.Equals, "xyzzy")
+}
+
+func (s *HandlersSuite) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       s.served++
+       s.gotCredentials = CredentialsFromRequest(r)
+}
diff --git a/sdk/go/auth/salt.go b/sdk/go/auth/salt.go
new file mode 100644 (file)
index 0000000..667a30f
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package auth
+
+import (
+       "crypto/hmac"
+       "crypto/sha1"
+       "errors"
+       "fmt"
+       "io"
+       "regexp"
+       "strings"
+)
+
+var (
+       reObsoleteToken  = regexp.MustCompile(`^[0-9a-z]{41,}$`)
+       ErrObsoleteToken = errors.New("obsolete token format")
+       ErrTokenFormat   = errors.New("badly formatted token")
+       ErrSalted        = errors.New("token already salted")
+)
+
+func SaltToken(token, remote string) (string, error) {
+       parts := strings.Split(token, "/")
+       if len(parts) < 3 || parts[0] != "v2" {
+               if reObsoleteToken.MatchString(token) {
+                       return "", ErrObsoleteToken
+               } else {
+                       return "", ErrTokenFormat
+               }
+       }
+       uuid := parts[1]
+       secret := parts[2]
+       if len(secret) != 40 {
+               // not already salted
+               hmac := hmac.New(sha1.New, []byte(secret))
+               io.WriteString(hmac, remote)
+               secret = fmt.Sprintf("%x", hmac.Sum(nil))
+               return "v2/" + uuid + "/" + secret, nil
+       } else if strings.HasPrefix(uuid, remote) {
+               // already salted for the desired remote
+               return token, nil
+       } else {
+               // salted for a different remote, can't be used
+               return "", ErrSalted
+       }
+}
diff --git a/sdk/go/blockdigest/blockdigest.go b/sdk/go/blockdigest/blockdigest.go
new file mode 100644 (file)
index 0000000..b9ecc45
--- /dev/null
@@ -0,0 +1,93 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Stores a Block Locator Digest compactly. Can be used as a map key.
+package blockdigest
+
+import (
+       "fmt"
+       "regexp"
+       "strconv"
+       "strings"
+)
+
+var LocatorPattern = regexp.MustCompile(
+       "^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]*)*$")
+
+// Stores a Block Locator Digest compactly, up to 128 bits.
+// Can be used as a map key.
+type BlockDigest struct {
+       H uint64
+       L uint64
+}
+
+type DigestWithSize struct {
+       Digest BlockDigest
+       Size   uint32
+}
+
+type BlockLocator struct {
+       Digest BlockDigest
+       Size   int
+       Hints  []string
+}
+
+func (d BlockDigest) String() string {
+       return fmt.Sprintf("%016x%016x", d.H, d.L)
+}
+
+func (w DigestWithSize) String() string {
+       return fmt.Sprintf("%s+%d", w.Digest.String(), w.Size)
+}
+
+// Will create a new BlockDigest unless an error is encountered.
+func FromString(s string) (dig BlockDigest, err error) {
+       if len(s) != 32 {
+               err = fmt.Errorf("Block digest should be exactly 32 characters but this one is %d: %s", len(s), s)
+               return
+       }
+
+       var d BlockDigest
+       d.H, err = strconv.ParseUint(s[:16], 16, 64)
+       if err != nil {
+               return
+       }
+       d.L, err = strconv.ParseUint(s[16:], 16, 64)
+       if err != nil {
+               return
+       }
+       dig = d
+       return
+}
+
+func IsBlockLocator(s string) bool {
+       return LocatorPattern.MatchString(s)
+}
+
+func ParseBlockLocator(s string) (b BlockLocator, err error) {
+       if !LocatorPattern.MatchString(s) {
+               err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
+                       "\"%s\".",
+                       s,
+                       LocatorPattern.String())
+       } else {
+               tokens := strings.Split(s, "+")
+               var blockSize int64
+               var blockDigest BlockDigest
+               // We expect both of the following to succeed since LocatorPattern
+               // restricts the strings appropriately.
+               blockDigest, err = FromString(tokens[0])
+               if err != nil {
+                       return
+               }
+               blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
+               if err != nil {
+                       return
+               }
+               b.Digest = blockDigest
+               b.Size = int(blockSize)
+               b.Hints = tokens[2:]
+       }
+       return
+}
diff --git a/sdk/go/blockdigest/blockdigest_test.go b/sdk/go/blockdigest/blockdigest_test.go
new file mode 100644 (file)
index 0000000..a9994f7
--- /dev/null
@@ -0,0 +1,181 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package blockdigest
+
+import (
+       "fmt"
+       "runtime"
+       "strings"
+       "testing"
+)
+
+func getStackTrace() string {
+       buf := make([]byte, 1000)
+       bytes_written := runtime.Stack(buf, false)
+       return "Stack Trace:\n" + string(buf[:bytes_written])
+}
+
+func expectEqual(t *testing.T, actual interface{}, expected interface{}) {
+       if actual != expected {
+               t.Fatalf("Expected %v but received %v instead. %s",
+                       expected,
+                       actual,
+                       getStackTrace())
+       }
+}
+
+func expectStringSlicesEqual(t *testing.T, actual []string, expected []string) {
+       if len(actual) != len(expected) {
+               t.Fatalf("Expected %v (length %d), but received %v (length %d) instead. %s", expected, len(expected), actual, len(actual), getStackTrace())
+       }
+       for i := range actual {
+               if actual[i] != expected[i] {
+                       t.Fatalf("Expected %v but received %v instead (first disagreement at position %d). %s", expected, actual, i, getStackTrace())
+               }
+       }
+}
+
+func expectValidDigestString(t *testing.T, s string) {
+       bd, err := FromString(s)
+       if err != nil {
+               t.Fatalf("Expected %s to produce a valid BlockDigest but instead got error: %v", s, err)
+       }
+
+       expected := strings.ToLower(s)
+
+       if expected != bd.String() {
+               t.Fatalf("Expected %s to be returned by FromString(%s).String() but instead we received %s", expected, s, bd.String())
+       }
+}
+
+func expectInvalidDigestString(t *testing.T, s string) {
+       _, err := FromString(s)
+       if err == nil {
+               t.Fatalf("Expected %s to be an invalid BlockDigest, but did not receive an error", s)
+       }
+}
+
+func expectBlockLocator(t *testing.T, actual BlockLocator, expected BlockLocator) {
+       expectEqual(t, actual.Digest, expected.Digest)
+       expectEqual(t, actual.Size, expected.Size)
+       expectStringSlicesEqual(t, actual.Hints, expected.Hints)
+}
+
+func expectLocatorPatternMatch(t *testing.T, s string) {
+       if !LocatorPattern.MatchString(s) {
+               t.Fatalf("Expected \"%s\" to match locator pattern but it did not.",
+                       s)
+       }
+}
+
+func expectLocatorPatternFail(t *testing.T, s string) {
+       if LocatorPattern.MatchString(s) {
+               t.Fatalf("Expected \"%s\" to fail locator pattern but it passed.",
+                       s)
+       }
+}
+
+func TestValidDigestStrings(t *testing.T) {
+       expectValidDigestString(t, "01234567890123456789abcdefabcdef")
+       expectValidDigestString(t, "01234567890123456789ABCDEFABCDEF")
+       expectValidDigestString(t, "01234567890123456789AbCdEfaBcDeF")
+}
+
+func TestInvalidDigestStrings(t *testing.T) {
+       expectInvalidDigestString(t, "01234567890123456789abcdefabcdeg")
+       expectInvalidDigestString(t, "01234567890123456789abcdefabcde")
+       expectInvalidDigestString(t, "01234567890123456789abcdefabcdefa")
+       expectInvalidDigestString(t, "g1234567890123456789abcdefabcdef")
+}
+
+func TestBlockDigestWorksAsMapKey(t *testing.T) {
+       m := make(map[BlockDigest]int)
+       bd, err := FromString("01234567890123456789abcdefabcdef")
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString for block: %v", err)
+       }
+       m[bd] = 5
+}
+
+func TestBlockDigestGetsPrettyPrintedByPrintf(t *testing.T) {
+       input := "01234567890123456789abcdefabcdef"
+       fromString, err := FromString(input)
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString: %v", err)
+       }
+       prettyPrinted := fmt.Sprintf("%v", fromString)
+       if prettyPrinted != input {
+               t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as "+
+                       "\"%s\", but instead it was printed as %s",
+                       input, input, prettyPrinted)
+       }
+}
+
+func TestBlockDigestGetsPrettyPrintedByPrintfInNestedStructs(t *testing.T) {
+       input, err := FromString("01234567890123456789abcdefabcdef")
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString for block: %v", err)
+       }
+       value := 42
+       nested := struct {
+               // Fun trivia fact: If this field was called "digest" instead of
+               // "Digest", then it would not be exported and String() would
+               // never get called on it and our output would look very
+               // different.
+               Digest BlockDigest
+               value  int
+       }{
+               input,
+               value,
+       }
+       prettyPrinted := fmt.Sprintf("%+v", nested)
+       expected := fmt.Sprintf("{Digest:%s value:%d}", input, value)
+       if prettyPrinted != expected {
+               t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as "+
+                       "\"%s\", but instead it was printed as %s",
+                       input, expected, prettyPrinted)
+       }
+}
+
+func TestLocatorPatternBasic(t *testing.T) {
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345")
+       expectLocatorPatternMatch(t, "A2345678901234abcdefababdeffdfdf+12345")
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A1")
+       expectLocatorPatternMatch(t,
+               "12345678901234567890123456789012+12345+A1+B123wxyz@_-")
+       expectLocatorPatternMatch(t,
+               "12345678901234567890123456789012+12345+A1+B123wxyz@_-+C@")
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A")
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A1+B")
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A+B2")
+
+       expectLocatorPatternFail(t, "12345678901234567890123456789012")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+")
+       expectLocatorPatternFail(t, "1234567890123456789012345678901+12345")
+       expectLocatorPatternFail(t, "123456789012345678901234567890123+12345")
+       expectLocatorPatternFail(t, "g2345678901234abcdefababdeffdfdf+12345")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+12345 ")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+1")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+1A")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+a1")
+       expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A1+")
+
+}
+
+func TestParseBlockLocatorSimple(t *testing.T) {
+       b, err := ParseBlockLocator("365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf")
+       if err != nil {
+               t.Fatalf("Unexpected error parsing block locator: %v", err)
+       }
+       d, err := FromString("365f83f5f808896ec834c8b595288735")
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString for block: %v", err)
+       }
+       expectBlockLocator(t, b, BlockLocator{Digest: d,
+               Size: 2310,
+               Hints: []string{"K@qr1hi",
+                       "Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"}})
+}
diff --git a/sdk/go/blockdigest/testing.go b/sdk/go/blockdigest/testing.go
new file mode 100644 (file)
index 0000000..7716a71
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Code used for testing only.
+
+package blockdigest
+
+// Just used for testing when we need some distinct BlockDigests
+func MakeTestBlockDigest(i int) BlockDigest {
+       return BlockDigest{L: uint64(i)}
+}
+
+func MakeTestDigestSpecifySize(i int, s int) DigestWithSize {
+       return DigestWithSize{Digest: BlockDigest{L: uint64(i)}, Size: uint32(s)}
+}
+
+func MakeTestDigestWithSize(i int) DigestWithSize {
+       return MakeTestDigestSpecifySize(i, i)
+}
diff --git a/sdk/go/config/dump.go b/sdk/go/config/dump.go
new file mode 100644 (file)
index 0000000..e835dee
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package config
+
+import (
+       "errors"
+       "os"
+
+       "github.com/ghodss/yaml"
+)
+
+// DumpAndExit writes the given config to stdout as YAML. If an error
+// occurs, that error is returned. Otherwise, the program exits 0.
+//
+// Example:
+//
+//     log.Fatal(DumpAndExit(cfg))
+func DumpAndExit(cfg interface{}) error {
+       y, err := yaml.Marshal(cfg)
+       if err != nil {
+               return err
+       }
+       _, err = os.Stdout.Write(y)
+       if err != nil {
+               return err
+       }
+       os.Exit(0)
+       return errors.New("exit failed!?")
+}
diff --git a/sdk/go/config/load.go b/sdk/go/config/load.go
new file mode 100644 (file)
index 0000000..cab09c7
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package config
+
+import (
+       "fmt"
+       "io/ioutil"
+
+       "github.com/ghodss/yaml"
+)
+
+// LoadFile loads configuration from the file given by configPath and
+// decodes it into cfg.
+//
+// YAML and JSON formats are supported.
+func LoadFile(cfg interface{}, configPath string) error {
+       buf, err := ioutil.ReadFile(configPath)
+       if err != nil {
+               return err
+       }
+       err = yaml.Unmarshal(buf, cfg)
+       if err != nil {
+               return fmt.Errorf("Error decoding config %q: %v", configPath, err)
+       }
+       return nil
+}
+
+// Dump returns a YAML representation of cfg.
+func Dump(cfg interface{}) ([]byte, error) {
+       return yaml.Marshal(cfg)
+}
diff --git a/sdk/go/crunchrunner/crunchrunner.go b/sdk/go/crunchrunner/crunchrunner.go
new file mode 100644 (file)
index 0000000..ca16fc6
--- /dev/null
@@ -0,0 +1,439 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package main
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "os/signal"
+       "strings"
+       "syscall"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+type TaskDef struct {
+       Command            []string          `json:"command"`
+       Env                map[string]string `json:"task.env"`
+       Stdin              string            `json:"task.stdin"`
+       Stdout             string            `json:"task.stdout"`
+       Stderr             string            `json:"task.stderr"`
+       Vwd                map[string]string `json:"task.vwd"`
+       SuccessCodes       []int             `json:"task.successCodes"`
+       PermanentFailCodes []int             `json:"task.permanentFailCodes"`
+       TemporaryFailCodes []int             `json:"task.temporaryFailCodes"`
+       KeepTmpOutput      bool              `json:"task.keepTmpOutput"`
+}
+
+type Tasks struct {
+       Tasks []TaskDef `json:"tasks"`
+}
+
+type Job struct {
+       ScriptParameters Tasks `json:"script_parameters"`
+}
+
+type Task struct {
+       JobUUID              string  `json:"job_uuid"`
+       CreatedByJobTaskUUID string  `json:"created_by_job_task_uuid"`
+       Parameters           TaskDef `json:"parameters"`
+       Sequence             int     `json:"sequence"`
+       Output               string  `json:"output"`
+       Success              bool    `json:"success"`
+       Progress             float32 `json:"sequence"`
+}
+
+type IArvadosClient interface {
+       Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
+       Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error)
+}
+
+func setupDirectories(crunchtmpdir, taskUUID string, keepTmp bool) (tmpdir, outdir string, err error) {
+       tmpdir = crunchtmpdir + "/tmpdir"
+       err = os.Mkdir(tmpdir, 0700)
+       if err != nil {
+               return "", "", err
+       }
+
+       if keepTmp {
+               outdir = os.Getenv("TASK_KEEPMOUNT_TMP")
+       } else {
+               outdir = crunchtmpdir + "/outdir"
+               err = os.Mkdir(outdir, 0700)
+               if err != nil {
+                       return "", "", err
+               }
+       }
+
+       return tmpdir, outdir, nil
+}
+
+func checkOutputFilename(outdir, fn string) error {
+       if strings.HasPrefix(fn, "/") || strings.HasSuffix(fn, "/") {
+               return fmt.Errorf("Path must not start or end with '/'")
+       }
+       if strings.Index("../", fn) != -1 {
+               return fmt.Errorf("Path must not contain '../'")
+       }
+
+       sl := strings.LastIndex(fn, "/")
+       if sl != -1 {
+               os.MkdirAll(outdir+"/"+fn[0:sl], 0777)
+       }
+       return nil
+}
+
+func copyFile(dst, src string) error {
+       in, err := os.Open(src)
+       if err != nil {
+               return err
+       }
+       defer in.Close()
+
+       out, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer out.Close()
+
+       _, err = io.Copy(out, in)
+       return err
+}
+
+func setupCommand(cmd *exec.Cmd, taskp TaskDef, outdir string, replacements map[string]string) (stdin, stdout, stderr string, err error) {
+       if taskp.Vwd != nil {
+               for k, v := range taskp.Vwd {
+                       v = substitute(v, replacements)
+                       err = checkOutputFilename(outdir, k)
+                       if err != nil {
+                               return "", "", "", err
+                       }
+                       if taskp.KeepTmpOutput {
+                               err = copyFile(v, outdir+"/"+k)
+                       } else {
+                               err = os.Symlink(v, outdir+"/"+k)
+                       }
+                       if err != nil {
+                               return "", "", "", err
+                       }
+               }
+       }
+
+       if taskp.Stdin != "" {
+               // Set up stdin redirection
+               stdin = substitute(taskp.Stdin, replacements)
+               cmd.Stdin, err = os.Open(stdin)
+               if err != nil {
+                       return "", "", "", err
+               }
+       }
+
+       if taskp.Stdout != "" {
+               err = checkOutputFilename(outdir, taskp.Stdout)
+               if err != nil {
+                       return "", "", "", err
+               }
+               // Set up stdout redirection
+               stdout = outdir + "/" + taskp.Stdout
+               cmd.Stdout, err = os.Create(stdout)
+               if err != nil {
+                       return "", "", "", err
+               }
+       } else {
+               cmd.Stdout = os.Stdout
+       }
+
+       if taskp.Stderr != "" {
+               err = checkOutputFilename(outdir, taskp.Stderr)
+               if err != nil {
+                       return "", "", "", err
+               }
+               // Set up stderr redirection
+               stderr = outdir + "/" + taskp.Stderr
+               cmd.Stderr, err = os.Create(stderr)
+               if err != nil {
+                       return "", "", "", err
+               }
+       } else {
+               cmd.Stderr = os.Stderr
+       }
+
+       if taskp.Env != nil {
+               // Set up subprocess environment
+               cmd.Env = os.Environ()
+               for k, v := range taskp.Env {
+                       v = substitute(v, replacements)
+                       cmd.Env = append(cmd.Env, k+"="+v)
+               }
+       }
+       return stdin, stdout, stderr, nil
+}
+
+// Set up signal handlers.  Go sends signal notifications to a "signal
+// channel".
+func setupSignals(cmd *exec.Cmd) chan os.Signal {
+       sigChan := make(chan os.Signal, 1)
+       signal.Notify(sigChan, syscall.SIGTERM)
+       signal.Notify(sigChan, syscall.SIGINT)
+       signal.Notify(sigChan, syscall.SIGQUIT)
+       return sigChan
+}
+
+func inCodes(code int, codes []int) bool {
+       if codes != nil {
+               for _, c := range codes {
+                       if code == c {
+                               return true
+                       }
+               }
+       }
+       return false
+}
+
+const TASK_TEMPFAIL = 111
+
+type TempFail struct{ error }
+type PermFail struct{}
+
+func (s PermFail) Error() string {
+       return "PermFail"
+}
+
+func substitute(inp string, subst map[string]string) string {
+       for k, v := range subst {
+               inp = strings.Replace(inp, k, v, -1)
+       }
+       return inp
+}
+
+func getKeepTmp(outdir string) (manifest string, err error) {
+       fn, err := os.Open(outdir + "/" + ".arvados#collection")
+       if err != nil {
+               return "", err
+       }
+       defer fn.Close()
+
+       buf, err := ioutil.ReadAll(fn)
+       if err != nil {
+               return "", err
+       }
+       collection := arvados.Collection{}
+       err = json.Unmarshal(buf, &collection)
+       return collection.ManifestText, err
+}
+
+func runner(api IArvadosClient,
+       kc IKeepClient,
+       jobUUID, taskUUID, crunchtmpdir, keepmount string,
+       jobStruct Job, taskStruct Task) error {
+
+       var err error
+       taskp := taskStruct.Parameters
+
+       // If this is task 0 and there are multiple tasks, dispatch subtasks
+       // and exit.
+       if taskStruct.Sequence == 0 {
+               if len(jobStruct.ScriptParameters.Tasks) == 1 {
+                       taskp = jobStruct.ScriptParameters.Tasks[0]
+               } else {
+                       for _, task := range jobStruct.ScriptParameters.Tasks {
+                               err := api.Create("job_tasks",
+                                       map[string]interface{}{
+                                               "job_task": Task{
+                                                       JobUUID:              jobUUID,
+                                                       CreatedByJobTaskUUID: taskUUID,
+                                                       Sequence:             1,
+                                                       Parameters:           task}},
+                                       nil)
+                               if err != nil {
+                                       return TempFail{err}
+                               }
+                       }
+                       err = api.Update("job_tasks", taskUUID,
+                               map[string]interface{}{
+                                       "job_task": map[string]interface{}{
+                                               "output":   "",
+                                               "success":  true,
+                                               "progress": 1.0}},
+                               nil)
+                       return nil
+               }
+       }
+
+       var tmpdir, outdir string
+       tmpdir, outdir, err = setupDirectories(crunchtmpdir, taskUUID, taskp.KeepTmpOutput)
+       if err != nil {
+               return TempFail{err}
+       }
+
+       replacements := map[string]string{
+               "$(task.tmpdir)": tmpdir,
+               "$(task.outdir)": outdir,
+               "$(task.keep)":   keepmount}
+
+       log.Printf("crunchrunner: $(task.tmpdir)=%v", tmpdir)
+       log.Printf("crunchrunner: $(task.outdir)=%v", outdir)
+       log.Printf("crunchrunner: $(task.keep)=%v", keepmount)
+
+       // Set up subprocess
+       for k, v := range taskp.Command {
+               taskp.Command[k] = substitute(v, replacements)
+       }
+
+       cmd := exec.Command(taskp.Command[0], taskp.Command[1:]...)
+
+       cmd.Dir = outdir
+
+       var stdin, stdout, stderr string
+       stdin, stdout, stderr, err = setupCommand(cmd, taskp, outdir, replacements)
+       if err != nil {
+               return err
+       }
+
+       // Run subprocess and wait for it to complete
+       if stdin != "" {
+               stdin = " < " + stdin
+       }
+       if stdout != "" {
+               stdout = " > " + stdout
+       }
+       if stderr != "" {
+               stderr = " 2> " + stderr
+       }
+       log.Printf("Running %v%v%v%v", cmd.Args, stdin, stdout, stderr)
+
+       var caughtSignal os.Signal
+       sigChan := setupSignals(cmd)
+
+       err = cmd.Start()
+       if err != nil {
+               signal.Stop(sigChan)
+               return TempFail{err}
+       }
+
+       finishedSignalNotify := make(chan struct{})
+       go func(sig <-chan os.Signal) {
+               for sig := range sig {
+                       caughtSignal = sig
+                       cmd.Process.Signal(caughtSignal)
+               }
+               close(finishedSignalNotify)
+       }(sigChan)
+
+       err = cmd.Wait()
+       signal.Stop(sigChan)
+
+       close(sigChan)
+       <-finishedSignalNotify
+
+       if caughtSignal != nil {
+               log.Printf("Caught signal %v", caughtSignal)
+               return PermFail{}
+       }
+
+       if err != nil {
+               // Run() returns ExitError on non-zero exit code, but we handle
+               // that down below.  So only return if it's not ExitError.
+               if _, ok := err.(*exec.ExitError); !ok {
+                       return TempFail{err}
+               }
+       }
+
+       var success bool
+
+       exitCode := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+
+       log.Printf("Completed with exit code %v", exitCode)
+
+       if inCodes(exitCode, taskp.PermanentFailCodes) {
+               success = false
+       } else if inCodes(exitCode, taskp.TemporaryFailCodes) {
+               return TempFail{fmt.Errorf("Process tempfail with exit code %v", exitCode)}
+       } else if inCodes(exitCode, taskp.SuccessCodes) || cmd.ProcessState.Success() {
+               success = true
+       } else {
+               success = false
+       }
+
+       // Upload output directory
+       var manifest string
+       if taskp.KeepTmpOutput {
+               manifest, err = getKeepTmp(outdir)
+       } else {
+               manifest, err = WriteTree(kc, outdir)
+       }
+       if err != nil {
+               return TempFail{err}
+       }
+
+       // Set status
+       err = api.Update("job_tasks", taskUUID,
+               map[string]interface{}{
+                       "job_task": Task{
+                               Output:   manifest,
+                               Success:  success,
+                               Progress: 1}},
+               nil)
+       if err != nil {
+               return TempFail{err}
+       }
+
+       if success {
+               return nil
+       } else {
+               return PermFail{}
+       }
+}
+
+func main() {
+       api, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       jobUUID := os.Getenv("JOB_UUID")
+       taskUUID := os.Getenv("TASK_UUID")
+       tmpdir := os.Getenv("TASK_WORK")
+       keepmount := os.Getenv("TASK_KEEPMOUNT")
+
+       var jobStruct Job
+       var taskStruct Task
+
+       err = api.Get("jobs", jobUUID, nil, &jobStruct)
+       if err != nil {
+               log.Fatal(err)
+       }
+       err = api.Get("job_tasks", taskUUID, nil, &taskStruct)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       var kc IKeepClient
+       kc, err = keepclient.MakeKeepClient(api)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       syscall.Umask(0022)
+       err = runner(api, kc, jobUUID, taskUUID, tmpdir, keepmount, jobStruct, taskStruct)
+
+       if err == nil {
+               os.Exit(0)
+       } else if _, ok := err.(TempFail); ok {
+               log.Print(err)
+               os.Exit(TASK_TEMPFAIL)
+       } else if _, ok := err.(PermFail); ok {
+               os.Exit(1)
+       } else {
+               log.Fatal(err)
+       }
+}
diff --git a/sdk/go/crunchrunner/crunchrunner_test.go b/sdk/go/crunchrunner/crunchrunner_test.go
new file mode 100644 (file)
index 0000000..f2827c6
--- /dev/null
@@ -0,0 +1,478 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package main
+
+import (
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "syscall"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+type TestSuite struct{}
+
+// Gocheck boilerplate
+var _ = Suite(&TestSuite{})
+
+type ArvTestClient struct {
+       c        *C
+       manifest string
+       success  bool
+}
+
+func (t ArvTestClient) Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error {
+       return nil
+}
+
+func (t ArvTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {
+       t.c.Check(resourceType, Equals, "job_tasks")
+       t.c.Check(parameters, DeepEquals, arvadosclient.Dict{"job_task": Task{
+               Output:   t.manifest,
+               Success:  t.success,
+               Progress: 1}})
+       return nil
+}
+
+func (s *TestSuite) TestSimpleRun(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, "", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"echo", "foo"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+}
+
+func checkOutput(c *C, tmpdir string) {
+       file, err := os.Open(tmpdir + "/outdir/output.txt")
+       c.Assert(err, IsNil)
+
+       data := make([]byte, 100)
+       var count int
+       err = nil
+       offset := 0
+       for err == nil {
+               count, err = file.Read(data[offset:])
+               offset += count
+       }
+       c.Assert(err, Equals, io.EOF)
+       c.Check(string(data[0:offset]), Equals, "foo\n")
+}
+
+func (s *TestSuite) TestSimpleRunSubtask(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c,
+               ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{
+                       {Command: []string{"echo", "bar"}},
+                       {Command: []string{"echo", "foo"}}}}},
+               Task{Parameters: TaskDef{
+                       Command: []string{"echo", "foo"},
+                       Stdout:  "output.txt"},
+                       Sequence: 1})
+       c.Check(err, IsNil)
+
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestRedirect(c *C) {
+       tmpfile, _ := ioutil.TempFile("", "")
+       tmpfile.Write([]byte("foo\n"))
+       tmpfile.Close()
+       defer os.Remove(tmpfile.Name())
+
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c,
+               ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"cat"},
+                       Stdout:  "output.txt",
+                       Stdin:   tmpfile.Name()}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestEnv(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"/bin/sh", "-c", "echo $BAR"},
+                       Stdout:  "output.txt",
+                       Env:     map[string]string{"BAR": "foo"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestEnvSubstitute(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "foo\n",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"/bin/sh", "-c", "echo $BAR"},
+                       Stdout:  "output.txt",
+                       Env:     map[string]string{"BAR": "$(task.keep)"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestEnvReplace(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"/bin/sh", "-c", "echo $PATH"},
+                       Stdout:  "output.txt",
+                       Env:     map[string]string{"PATH": "foo"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+       checkOutput(c, tmpdir)
+}
+
+type SubtaskTestClient struct {
+       c     *C
+       parms []Task
+       i     int
+}
+
+func (t *SubtaskTestClient) Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error {
+       t.c.Check(resourceType, Equals, "job_tasks")
+       t.c.Check(parameters, DeepEquals, arvadosclient.Dict{"job_task": t.parms[t.i]})
+       t.i += 1
+       return nil
+}
+
+func (t SubtaskTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {
+       return nil
+}
+
+func (s *TestSuite) TestScheduleSubtask(c *C) {
+
+       api := SubtaskTestClient{c, []Task{
+               {JobUUID: "zzzz-8i9sb-111111111111111",
+                       CreatedByJobTaskUUID: "zzzz-ot0gb-111111111111111",
+                       Sequence:             1,
+                       Parameters: TaskDef{
+                               Command: []string{"echo", "bar"}}},
+               {JobUUID: "zzzz-8i9sb-111111111111111",
+                       CreatedByJobTaskUUID: "zzzz-ot0gb-111111111111111",
+                       Sequence:             1,
+                       Parameters: TaskDef{
+                               Command: []string{"echo", "foo"}}}},
+               0}
+
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(&api, KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{
+                       {Command: []string{"echo", "bar"}},
+                       {Command: []string{"echo", "foo"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+
+}
+
+func (s *TestSuite) TestRunFail(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, "", false}, KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"/bin/sh", "-c", "exit 1"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, FitsTypeOf, PermFail{})
+}
+
+func (s *TestSuite) TestRunSuccessCode(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, "", true}, KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command:      []string{"/bin/sh", "-c", "exit 1"},
+                       SuccessCodes: []int{0, 1}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+}
+
+func (s *TestSuite) TestRunFailCode(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, "", false}, KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command:            []string{"/bin/sh", "-c", "exit 0"},
+                       PermanentFailCodes: []int{0, 1}}}}},
+               Task{Sequence: 0})
+       c.Check(err, FitsTypeOf, PermFail{})
+}
+
+func (s *TestSuite) TestRunTempFailCode(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, "", false}, KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command:            []string{"/bin/sh", "-c", "exit 1"},
+                       TemporaryFailCodes: []int{1}}}}},
+               Task{Sequence: 0})
+       c.Check(err, FitsTypeOf, TempFail{})
+}
+
+func (s *TestSuite) TestVwd(c *C) {
+       tmpfile, _ := ioutil.TempFile("", "")
+       tmpfile.Write([]byte("foo\n"))
+       tmpfile.Close()
+       defer os.Remove(tmpfile.Name())
+
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c, ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"ls", "output.txt"},
+                       Vwd: map[string]string{
+                               "output.txt": tmpfile.Name()}}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestSubstitutionStdin(c *C) {
+       keepmount, _ := ioutil.TempDir("", "")
+       ioutil.WriteFile(keepmount+"/"+"file1.txt", []byte("foo\n"), 0600)
+       defer func() {
+               os.RemoveAll(keepmount)
+       }()
+
+       log.Print("Keepmount is ", keepmount)
+
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       log.Print("tmpdir is ", tmpdir)
+
+       err := runner(ArvTestClient{c,
+               ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               keepmount,
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"cat"},
+                       Stdout:  "output.txt",
+                       Stdin:   "$(task.keep)/file1.txt"}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestSubstitutionCommandLine(c *C) {
+       keepmount, _ := ioutil.TempDir("", "")
+       ioutil.WriteFile(keepmount+"/"+"file1.txt", []byte("foo\n"), 0600)
+       defer func() {
+               os.RemoveAll(keepmount)
+       }()
+
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c,
+               ". d3b07384d113edec49eaa6238ad5ff00+4 0:4:output.txt\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               keepmount,
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"cat", "$(task.keep)/file1.txt"},
+                       Stdout:  "output.txt"}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+
+       checkOutput(c, tmpdir)
+}
+
+func (s *TestSuite) TestSignal(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       go func() {
+               time.Sleep(1 * time.Second)
+               self, _ := os.FindProcess(os.Getpid())
+               self.Signal(syscall.SIGINT)
+       }()
+
+       err := runner(ArvTestClient{c,
+               "", false},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"sleep", "4"}}}}},
+               Task{Sequence: 0})
+       c.Check(err, FitsTypeOf, PermFail{})
+
+}
+
+func (s *TestSuite) TestQuoting(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       err := runner(ArvTestClient{c,
+               "./s\\040ub:dir d3b07384d113edec49eaa6238ad5ff00+4 0:4::e\\040vil\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command: []string{"echo", "foo"},
+                       Stdout:  "s ub:dir/:e vi\nl"}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+}
+
+func (s *TestSuite) TestKeepTmp(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       os.Setenv("TASK_KEEPMOUNT_TMP", tmpdir)
+       defer os.Setenv("TASK_KEEPMOUNT_TMP", "")
+
+       fn, err := os.Create(tmpdir + "/.arvados#collection")
+       fn.Write([]byte("{\"manifest_text\":\". unparsed 0:3:foo\\n\",\"uuid\":null}"))
+       defer fn.Close()
+
+       err = runner(ArvTestClient{c,
+               ". unparsed 0:3:foo\n", true},
+               KeepTestClient{},
+               "zzzz-8i9sb-111111111111111",
+               "zzzz-ot0gb-111111111111111",
+               tmpdir,
+               "",
+               Job{ScriptParameters: Tasks{[]TaskDef{{
+                       Command:       []string{"echo", "foo"},
+                       KeepTmpOutput: true}}}},
+               Task{Sequence: 0})
+       c.Check(err, IsNil)
+
+}
diff --git a/sdk/go/crunchrunner/upload.go b/sdk/go/crunchrunner/upload.go
new file mode 100644 (file)
index 0000000..2848d10
--- /dev/null
@@ -0,0 +1,241 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package main
+
+import (
+       "bytes"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "log"
+       "os"
+       "path/filepath"
+       "sort"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+)
+
+type Block struct {
+       data   []byte
+       offset int64
+}
+
+type ManifestStreamWriter struct {
+       *ManifestWriter
+       *manifest.ManifestStream
+       offset int64
+       *Block
+       uploader chan *Block
+       finish   chan []error
+}
+
+type IKeepClient interface {
+       PutHB(hash string, buf []byte) (string, int, error)
+}
+
+func (m *ManifestStreamWriter) Write(p []byte) (int, error) {
+       n, err := m.ReadFrom(bytes.NewReader(p))
+       return int(n), err
+}
+
+func (m *ManifestStreamWriter) ReadFrom(r io.Reader) (n int64, err error) {
+       var total int64
+       var count int
+
+       for err == nil {
+               if m.Block == nil {
+                       m.Block = &Block{make([]byte, keepclient.BLOCKSIZE), 0}
+               }
+               count, err = r.Read(m.Block.data[m.Block.offset:])
+               total += int64(count)
+               m.Block.offset += int64(count)
+               if m.Block.offset == keepclient.BLOCKSIZE {
+                       m.uploader <- m.Block
+                       m.Block = nil
+               }
+       }
+
+       if err == io.EOF {
+               return total, nil
+       } else {
+               return total, err
+       }
+
+}
+
+func (m *ManifestStreamWriter) goUpload() {
+       var errors []error
+       uploader := m.uploader
+       finish := m.finish
+       for block := range uploader {
+               hash := fmt.Sprintf("%x", md5.Sum(block.data[0:block.offset]))
+               signedHash, _, err := m.ManifestWriter.IKeepClient.PutHB(hash, block.data[0:block.offset])
+               if err != nil {
+                       errors = append(errors, err)
+               } else {
+                       m.ManifestStream.Blocks = append(m.ManifestStream.Blocks, signedHash)
+               }
+       }
+       finish <- errors
+}
+
+type ManifestWriter struct {
+       IKeepClient
+       stripPrefix string
+       Streams     map[string]*ManifestStreamWriter
+}
+
+func (m *ManifestWriter) WalkFunc(path string, info os.FileInfo, err error) error {
+       if err != nil {
+               return err
+       }
+
+       targetPath, targetInfo := path, info
+       if info.Mode()&os.ModeSymlink != 0 {
+               // Update targetpath/info to reflect the symlink
+               // target, not the symlink itself
+               targetPath, err = filepath.EvalSymlinks(path)
+               if err != nil {
+                       return err
+               }
+               targetInfo, err = os.Stat(targetPath)
+               if err != nil {
+                       return fmt.Errorf("stat symlink %q target %q: %s", path, targetPath, err)
+               }
+       }
+
+       if targetInfo.Mode()&os.ModeType != 0 {
+               // Skip directories, pipes, other non-regular files
+               return nil
+       }
+
+       var dir string
+       if len(path) > (len(m.stripPrefix) + len(info.Name()) + 1) {
+               dir = path[len(m.stripPrefix)+1 : (len(path) - len(info.Name()) - 1)]
+       }
+       if dir == "" {
+               dir = "."
+       }
+
+       fn := path[(len(path) - len(info.Name())):]
+
+       if m.Streams[dir] == nil {
+               m.Streams[dir] = &ManifestStreamWriter{
+                       m,
+                       &manifest.ManifestStream{StreamName: dir},
+                       0,
+                       nil,
+                       make(chan *Block),
+                       make(chan []error)}
+               go m.Streams[dir].goUpload()
+       }
+
+       stream := m.Streams[dir]
+
+       fileStart := stream.offset
+
+       file, err := os.Open(path)
+       if err != nil {
+               return err
+       }
+
+       log.Printf("Uploading %v/%v (%v bytes)", dir, fn, info.Size())
+
+       var count int64
+       count, err = io.Copy(stream, file)
+       if err != nil {
+               return err
+       }
+
+       stream.offset += count
+
+       stream.ManifestStream.FileStreamSegments = append(stream.ManifestStream.FileStreamSegments,
+               manifest.FileStreamSegment{uint64(fileStart), uint64(count), fn})
+
+       return nil
+}
+
+func (m *ManifestWriter) Finish() error {
+       var errstring string
+       for _, stream := range m.Streams {
+               if stream.uploader == nil {
+                       continue
+               }
+               if stream.Block != nil {
+                       stream.uploader <- stream.Block
+               }
+               close(stream.uploader)
+               stream.uploader = nil
+
+               errors := <-stream.finish
+               close(stream.finish)
+               stream.finish = nil
+
+               for _, r := range errors {
+                       errstring = fmt.Sprintf("%v%v\n", errstring, r.Error())
+               }
+       }
+       if errstring != "" {
+               return errors.New(errstring)
+       } else {
+               return nil
+       }
+}
+
+func (m *ManifestWriter) ManifestText() string {
+       m.Finish()
+       var buf bytes.Buffer
+
+       dirs := make([]string, len(m.Streams))
+       i := 0
+       for k := range m.Streams {
+               dirs[i] = k
+               i++
+       }
+       sort.Strings(dirs)
+
+       for _, k := range dirs {
+               v := m.Streams[k]
+
+               if k == "." {
+                       buf.WriteString(".")
+               } else {
+                       k = strings.Replace(k, " ", "\\040", -1)
+                       k = strings.Replace(k, "\n", "", -1)
+                       buf.WriteString("./" + k)
+               }
+               for _, b := range v.Blocks {
+                       buf.WriteString(" ")
+                       buf.WriteString(b)
+               }
+               for _, f := range v.FileStreamSegments {
+                       buf.WriteString(" ")
+                       name := strings.Replace(f.Name, " ", "\\040", -1)
+                       name = strings.Replace(name, "\n", "", -1)
+                       buf.WriteString(fmt.Sprintf("%d:%d:%s", f.SegPos, f.SegLen, name))
+               }
+               buf.WriteString("\n")
+       }
+       return buf.String()
+}
+
+func WriteTree(kc IKeepClient, root string) (manifest string, err error) {
+       mw := ManifestWriter{kc, root, map[string]*ManifestStreamWriter{}}
+       err = filepath.Walk(root, mw.WalkFunc)
+
+       if err != nil {
+               return "", err
+       }
+
+       err = mw.Finish()
+       if err != nil {
+               return "", err
+       }
+
+       return mw.ManifestText(), nil
+}
diff --git a/sdk/go/crunchrunner/upload_test.go b/sdk/go/crunchrunner/upload_test.go
new file mode 100644 (file)
index 0000000..5bc7492
--- /dev/null
@@ -0,0 +1,152 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package main
+
+import (
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io/ioutil"
+       "os"
+       "syscall"
+
+       . "gopkg.in/check.v1"
+)
+
+type UploadTestSuite struct{}
+
+// Gocheck boilerplate
+var _ = Suite(&UploadTestSuite{})
+
+type KeepTestClient struct {
+}
+
+func (k KeepTestClient) PutHB(hash string, buf []byte) (string, int, error) {
+       return fmt.Sprintf("%x+%v", md5.Sum(buf), len(buf)), len(buf), nil
+}
+
+func (s *TestSuite) TestSimpleUpload(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
+
+       str, err := WriteTree(KeepTestClient{}, tmpdir)
+       c.Check(err, IsNil)
+       c.Check(str, Equals, ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt\n")
+}
+
+func (s *TestSuite) TestSimpleUploadThreeFiles(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       for _, err := range []error{
+               ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600),
+               ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600),
+               os.Symlink("./file2.txt", tmpdir+"/file3.txt"),
+               syscall.Mkfifo(tmpdir+"/ignore.fifo", 0600),
+       } {
+               c.Assert(err, IsNil)
+       }
+
+       str, err := WriteTree(KeepTestClient{}, tmpdir)
+       c.Check(err, IsNil)
+       c.Check(str, Equals, ". aa65a413921163458c52fea478d5d3ee+9 0:3:file1.txt 3:3:file2.txt 6:3:file3.txt\n")
+}
+
+func (s *TestSuite) TestSimpleUploadSubdir(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       os.Mkdir(tmpdir+"/subdir", 0700)
+
+       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
+       ioutil.WriteFile(tmpdir+"/subdir/file2.txt", []byte("bar"), 0600)
+
+       str, err := WriteTree(KeepTestClient{}, tmpdir)
+       c.Check(err, IsNil)
+       c.Check(str, Equals, `. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt
+./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:file2.txt
+`)
+}
+
+func (s *TestSuite) TestSimpleUploadLarge(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       file, _ := os.Create(tmpdir + "/" + "file1.txt")
+       data := make([]byte, 1024*1024-1)
+       for i := range data {
+               data[i] = byte(i % 10)
+       }
+       for i := 0; i < 65; i++ {
+               file.Write(data)
+       }
+       file.Close()
+
+       ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600)
+
+       str, err := WriteTree(KeepTestClient{}, tmpdir)
+       c.Check(err, IsNil)
+       c.Check(str, Equals, ". 00ecf01e0d93385115c9f8bed757425d+67108864 485cd630387b6b1846fe429f261ea05f+1048514 0:68157375:file1.txt 68157375:3:file2.txt\n")
+}
+
+func (s *TestSuite) TestUploadEmptySubdir(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       os.Mkdir(tmpdir+"/subdir", 0700)
+
+       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
+
+       str, err := WriteTree(KeepTestClient{}, tmpdir)
+       c.Check(err, IsNil)
+       c.Check(str, Equals, `. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt
+`)
+}
+
+func (s *TestSuite) TestUploadEmptyFile(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte(""), 0600)
+
+       str, err := WriteTree(KeepTestClient{}, tmpdir)
+       c.Check(err, IsNil)
+       c.Check(str, Equals, `. d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1.txt
+`)
+}
+
+type KeepErrorTestClient struct {
+}
+
+func (k KeepErrorTestClient) PutHB(hash string, buf []byte) (string, int, error) {
+       return "", 0, errors.New("Failed!")
+}
+
+func (s *TestSuite) TestUploadError(c *C) {
+       tmpdir, _ := ioutil.TempDir("", "")
+       defer func() {
+               os.RemoveAll(tmpdir)
+       }()
+
+       ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
+
+       str, err := WriteTree(KeepErrorTestClient{}, tmpdir)
+       c.Check(err, NotNil)
+       c.Check(str, Equals, "")
+}
diff --git a/sdk/go/ctxlog/log.go b/sdk/go/ctxlog/log.go
new file mode 100644 (file)
index 0000000..e66eead
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package ctxlog
+
+import (
+       "bytes"
+       "context"
+       "io"
+       "os"
+
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+var (
+       loggerCtxKey = new(int)
+       rootLogger   = logrus.New()
+)
+
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// Context returns a new child context such that FromContext(child)
+// returns the given logger.
+func Context(ctx context.Context, logger logrus.FieldLogger) context.Context {
+       return context.WithValue(ctx, loggerCtxKey, logger)
+}
+
+// FromContext returns the logger suitable for the given context -- the one
+// attached by contextWithLogger() if applicable, otherwise the
+// top-level logger with no fields/values.
+func FromContext(ctx context.Context) logrus.FieldLogger {
+       if ctx != nil {
+               if logger, ok := ctx.Value(loggerCtxKey).(logrus.FieldLogger); ok {
+                       return logger
+               }
+       }
+       return rootLogger.WithFields(nil)
+}
+
+// New returns a new logger with the indicated format and
+// level.
+func New(out io.Writer, format, level string) logrus.FieldLogger {
+       logger := logrus.New()
+       logger.Out = out
+       setFormat(logger, format)
+       setLevel(logger, level)
+       return logger
+}
+
+func TestLogger(c *check.C) logrus.FieldLogger {
+       logger := logrus.New()
+       logger.Out = &logWriter{c.Log}
+       setFormat(logger, "text")
+       if d := os.Getenv("ARVADOS_DEBUG"); d != "0" && d != "" {
+               setLevel(logger, "debug")
+       } else {
+               setLevel(logger, "info")
+       }
+       return logger
+}
+
+// SetLevel sets the current logging level. See logrus for level
+// names.
+func SetLevel(level string) {
+       setLevel(rootLogger, level)
+}
+
+func setLevel(logger *logrus.Logger, level string) {
+       if level == "" {
+       } else if lvl, err := logrus.ParseLevel(level); err != nil {
+               logrus.WithField("Level", level).Fatal("unknown log level")
+       } else {
+               logger.Level = lvl
+       }
+}
+
+// SetFormat sets the current logging format to "json" or "text".
+func SetFormat(format string) {
+       setFormat(rootLogger, format)
+}
+
+func setFormat(logger *logrus.Logger, format string) {
+       switch format {
+       case "text":
+               logger.Formatter = &logrus.TextFormatter{
+                       FullTimestamp:   true,
+                       TimestampFormat: rfc3339NanoFixed,
+               }
+       case "json", "":
+               logger.Formatter = &logrus.JSONFormatter{
+                       TimestampFormat: rfc3339NanoFixed,
+               }
+       default:
+               logrus.WithField("Format", format).Fatal("unknown log format")
+       }
+}
+
+// logWriter is an io.Writer that writes by calling a "write log"
+// function, typically (*check.C)Log().
+type logWriter struct {
+       logfunc func(...interface{})
+}
+
+func (tl *logWriter) Write(buf []byte) (int, error) {
+       tl.logfunc(string(bytes.TrimRight(buf, "\n")))
+       return len(buf), nil
+}
diff --git a/sdk/go/dispatch/dispatch.go b/sdk/go/dispatch/dispatch.go
new file mode 100644 (file)
index 0000000..fdb52e5
--- /dev/null
@@ -0,0 +1,353 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Package dispatch is a helper library for building Arvados container
+// dispatchers.
+package dispatch
+
+import (
+       "context"
+       "fmt"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "github.com/sirupsen/logrus"
+)
+
+const (
+       Queued    = arvados.ContainerStateQueued
+       Locked    = arvados.ContainerStateLocked
+       Running   = arvados.ContainerStateRunning
+       Complete  = arvados.ContainerStateComplete
+       Cancelled = arvados.ContainerStateCancelled
+)
+
+type Logger interface {
+       Printf(string, ...interface{})
+       Warnf(string, ...interface{})
+       Debugf(string, ...interface{})
+}
+
+// Dispatcher struct
+type Dispatcher struct {
+       Arv *arvadosclient.ArvadosClient
+
+       Logger Logger
+
+       // Batch size for container queries
+       BatchSize int64
+
+       // Queue polling frequency
+       PollPeriod time.Duration
+
+       // Time to wait between successive attempts to run the same container
+       MinRetryPeriod time.Duration
+
+       // Func that implements the container lifecycle. Must be set
+       // to a non-nil DispatchFunc before calling Run().
+       RunContainer DispatchFunc
+
+       auth     arvados.APIClientAuthorization
+       mtx      sync.Mutex
+       trackers map[string]*runTracker
+       throttle throttle
+}
+
+// A DispatchFunc executes a container (if the container record is
+// Locked) or resume monitoring an already-running container, and wait
+// until that container exits.
+//
+// While the container runs, the DispatchFunc should listen for
+// updated container records on the provided channel. When the channel
+// closes, the DispatchFunc should stop the container if it's still
+// running, and return.
+//
+// The DispatchFunc should not return until the container is finished.
+type DispatchFunc func(*Dispatcher, arvados.Container, <-chan arvados.Container)
+
+// Run watches the API server's queue for containers that are either
+// ready to run and available to lock, or are already locked by this
+// dispatcher's token. When a new one appears, Run calls RunContainer
+// in a new goroutine.
+func (d *Dispatcher) Run(ctx context.Context) error {
+       if d.Logger == nil {
+               d.Logger = logrus.StandardLogger()
+       }
+
+       err := d.Arv.Call("GET", "api_client_authorizations", "", "current", nil, &d.auth)
+       if err != nil {
+               return fmt.Errorf("error getting my token UUID: %v", err)
+       }
+
+       d.throttle.hold = d.MinRetryPeriod
+
+       poll := time.NewTicker(d.PollPeriod)
+       defer poll.Stop()
+
+       if d.BatchSize == 0 {
+               d.BatchSize = 100
+       }
+
+       for {
+               select {
+               case <-poll.C:
+                       break
+               case <-ctx.Done():
+                       return ctx.Err()
+               }
+
+               todo := make(map[string]*runTracker)
+               d.mtx.Lock()
+               // Make a copy of trackers
+               for uuid, tracker := range d.trackers {
+                       todo[uuid] = tracker
+               }
+               d.mtx.Unlock()
+
+               // Containers I currently own (Locked/Running)
+               querySuccess := d.checkForUpdates([][]interface{}{
+                       {"locked_by_uuid", "=", d.auth.UUID}}, todo)
+
+               // Containers I should try to dispatch
+               querySuccess = d.checkForUpdates([][]interface{}{
+                       {"state", "=", Queued},
+                       {"priority", ">", "0"}}, todo) && querySuccess
+
+               if !querySuccess {
+                       // There was an error in one of the previous queries,
+                       // we probably didn't get updates for all the
+                       // containers we should have.  Don't check them
+                       // individually because it may be expensive.
+                       continue
+               }
+
+               // Containers I know about but didn't fall into the
+               // above two categories (probably Complete/Cancelled)
+               var missed []string
+               for uuid := range todo {
+                       missed = append(missed, uuid)
+               }
+
+               for len(missed) > 0 {
+                       var batch []string
+                       if len(missed) > 20 {
+                               batch = missed[0:20]
+                               missed = missed[20:]
+                       } else {
+                               batch = missed
+                               missed = missed[0:0]
+                       }
+                       querySuccess = d.checkForUpdates([][]interface{}{
+                               {"uuid", "in", batch}}, todo) && querySuccess
+               }
+
+               if !querySuccess {
+                       // There was an error in one of the previous queries, we probably
+                       // didn't see all the containers we should have, so don't shut down
+                       // the missed containers.
+                       continue
+               }
+
+               // Containers that I know about that didn't show up in any
+               // query should be let go.
+               for uuid, tracker := range todo {
+                       d.Logger.Printf("Container %q not returned by any query, stopping tracking.", uuid)
+                       tracker.close()
+               }
+
+       }
+}
+
+// Start a runner in a new goroutine, and send the initial container
+// record to its updates channel.
+func (d *Dispatcher) start(c arvados.Container) *runTracker {
+       tracker := &runTracker{
+               updates: make(chan arvados.Container, 1),
+               logger:  d.Logger,
+       }
+       tracker.updates <- c
+       go func() {
+               d.RunContainer(d, c, tracker.updates)
+               // RunContainer blocks for the lifetime of the container.  When
+               // it returns, the tracker should delete itself.
+               d.mtx.Lock()
+               delete(d.trackers, c.UUID)
+               d.mtx.Unlock()
+       }()
+       return tracker
+}
+
+func (d *Dispatcher) checkForUpdates(filters [][]interface{}, todo map[string]*runTracker) bool {
+       var countList arvados.ContainerList
+       params := arvadosclient.Dict{
+               "filters": filters,
+               "count":   "exact",
+               "limit":   0,
+               "order":   []string{"priority desc"}}
+       err := d.Arv.List("containers", params, &countList)
+       if err != nil {
+               d.Logger.Warnf("error getting count of containers: %q", err)
+               return false
+       }
+       itemsAvailable := countList.ItemsAvailable
+       params = arvadosclient.Dict{
+               "filters": filters,
+               "count":   "none",
+               "limit":   d.BatchSize,
+               "order":   []string{"priority desc"}}
+       offset := 0
+       for {
+               params["offset"] = offset
+
+               // This list variable must be a new one declared
+               // inside the loop: otherwise, items in the API
+               // response would get deep-merged into the items
+               // loaded in previous iterations.
+               var list arvados.ContainerList
+
+               err := d.Arv.List("containers", params, &list)
+               if err != nil {
+                       d.Logger.Warnf("error getting list of containers: %q", err)
+                       return false
+               }
+               d.checkListForUpdates(list.Items, todo)
+               offset += len(list.Items)
+               if len(list.Items) == 0 || itemsAvailable <= offset {
+                       return true
+               }
+       }
+}
+
+func (d *Dispatcher) checkListForUpdates(containers []arvados.Container, todo map[string]*runTracker) {
+       d.mtx.Lock()
+       defer d.mtx.Unlock()
+       if d.trackers == nil {
+               d.trackers = make(map[string]*runTracker)
+       }
+
+       for _, c := range containers {
+               tracker, alreadyTracking := d.trackers[c.UUID]
+               delete(todo, c.UUID)
+
+               if c.LockedByUUID != "" && c.LockedByUUID != d.auth.UUID {
+                       d.Logger.Debugf("ignoring %s locked by %s", c.UUID, c.LockedByUUID)
+               } else if alreadyTracking {
+                       switch c.State {
+                       case Queued:
+                               tracker.close()
+                       case Locked, Running:
+                               tracker.update(c)
+                       case Cancelled, Complete:
+                               tracker.close()
+                       }
+               } else {
+                       switch c.State {
+                       case Queued:
+                               if !d.throttle.Check(c.UUID) {
+                                       break
+                               }
+                               err := d.lock(c.UUID)
+                               if err != nil {
+                                       d.Logger.Warnf("error locking container %s: %s", c.UUID, err)
+                                       break
+                               }
+                               c.State = Locked
+                               d.trackers[c.UUID] = d.start(c)
+                       case Locked, Running:
+                               if !d.throttle.Check(c.UUID) {
+                                       break
+                               }
+                               d.trackers[c.UUID] = d.start(c)
+                       case Cancelled, Complete:
+                               // no-op (we already stopped monitoring)
+                       }
+               }
+       }
+}
+
+// UpdateState makes an API call to change the state of a container.
+func (d *Dispatcher) UpdateState(uuid string, state arvados.ContainerState) error {
+       err := d.Arv.Update("containers", uuid,
+               arvadosclient.Dict{
+                       "container": arvadosclient.Dict{"state": state},
+               }, nil)
+       if err != nil {
+               d.Logger.Warnf("error updating container %s to state %q: %s", uuid, state, err)
+       }
+       return err
+}
+
+// Lock makes the lock API call which updates the state of a container to Locked.
+func (d *Dispatcher) lock(uuid string) error {
+       return d.Arv.Call("POST", "containers", uuid, "lock", nil, nil)
+}
+
+// Unlock makes the unlock API call which updates the state of a container to Queued.
+func (d *Dispatcher) Unlock(uuid string) error {
+       return d.Arv.Call("POST", "containers", uuid, "unlock", nil, nil)
+}
+
+// TrackContainer ensures a tracker is running for the given UUID,
+// regardless of the current state of the container (except: if the
+// container is locked by a different dispatcher, a tracker will not
+// be started). If the container is not in Locked or Running state,
+// the new tracker will close down immediately.
+//
+// This allows the dispatcher to put its own RunContainer func into a
+// cleanup phase (for example, to kill local processes created by a
+// prevous dispatch process that are still running even though the
+// container state is final) without the risk of having multiple
+// goroutines monitoring the same UUID.
+func (d *Dispatcher) TrackContainer(uuid string) error {
+       var cntr arvados.Container
+       err := d.Arv.Call("GET", "containers", uuid, "", nil, &cntr)
+       if err != nil {
+               return err
+       }
+       if cntr.LockedByUUID != "" && cntr.LockedByUUID != d.auth.UUID {
+               return nil
+       }
+
+       d.mtx.Lock()
+       defer d.mtx.Unlock()
+       if _, alreadyTracking := d.trackers[uuid]; alreadyTracking {
+               return nil
+       }
+       if d.trackers == nil {
+               d.trackers = make(map[string]*runTracker)
+       }
+       d.trackers[uuid] = d.start(cntr)
+       switch cntr.State {
+       case Queued, Cancelled, Complete:
+               d.trackers[uuid].close()
+       }
+       return nil
+}
+
+type runTracker struct {
+       closing bool
+       updates chan arvados.Container
+       logger  Logger
+}
+
+func (tracker *runTracker) close() {
+       if !tracker.closing {
+               close(tracker.updates)
+       }
+       tracker.closing = true
+}
+
+func (tracker *runTracker) update(c arvados.Container) {
+       if tracker.closing {
+               return
+       }
+       select {
+       case <-tracker.updates:
+               tracker.logger.Debugf("runner is handling updates slowly, discarded previous update for %s", c.UUID)
+       default:
+       }
+       tracker.updates <- c
+}
diff --git a/sdk/go/dispatch/dispatch_test.go b/sdk/go/dispatch/dispatch_test.go
new file mode 100644 (file)
index 0000000..c7e4938
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package dispatch
+
+import (
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+var _ = Suite(&suite{})
+
+type suite struct{}
+
+func (s *suite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+}
+
+func (s *suite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+}
+
+func (s *suite) TestTrackContainer(c *C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+       arv.ApiToken = arvadostest.Dispatch1Token
+
+       done := make(chan bool, 1)
+       time.AfterFunc(10*time.Second, func() { done <- false })
+       d := &Dispatcher{
+               Arv: arv,
+               RunContainer: func(dsp *Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+                       for ctr := range status {
+                               c.Logf("%#v", ctr)
+                       }
+                       done <- true
+               },
+       }
+       d.TrackContainer(arvadostest.QueuedContainerUUID)
+       c.Assert(<-done, Equals, true)
+}
diff --git a/sdk/go/dispatch/throttle.go b/sdk/go/dispatch/throttle.go
new file mode 100644 (file)
index 0000000..fada6ff
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package dispatch
+
+import (
+       "sync"
+       "time"
+)
+
+type throttleEnt struct {
+       last time.Time // last attempt that was allowed
+}
+
+type throttle struct {
+       hold      time.Duration
+       seen      map[string]*throttleEnt
+       updated   sync.Cond
+       setupOnce sync.Once
+       mtx       sync.Mutex
+}
+
+// Check checks whether there have been too many recent attempts with
+// the given uuid, and returns true if it's OK to attempt [again] now.
+func (t *throttle) Check(uuid string) bool {
+       if t.hold == 0 {
+               return true
+       }
+       t.setupOnce.Do(t.setup)
+       t.mtx.Lock()
+       defer t.updated.Broadcast()
+       defer t.mtx.Unlock()
+       ent, ok := t.seen[uuid]
+       if !ok {
+               t.seen[uuid] = &throttleEnt{last: time.Now()}
+               return true
+       }
+       if time.Since(ent.last) < t.hold {
+               return false
+       }
+       ent.last = time.Now()
+       return true
+}
+
+func (t *throttle) setup() {
+       t.seen = make(map[string]*throttleEnt)
+       t.updated.L = &t.mtx
+       go func() {
+               for range time.NewTicker(t.hold).C {
+                       t.mtx.Lock()
+                       for uuid, ent := range t.seen {
+                               if time.Since(ent.last) >= t.hold {
+                                       delete(t.seen, uuid)
+                               }
+                       }
+                       // don't bother cleaning again until the next update
+                       t.updated.Wait()
+                       t.mtx.Unlock()
+               }
+       }()
+}
diff --git a/sdk/go/dispatch/throttle_test.go b/sdk/go/dispatch/throttle_test.go
new file mode 100644 (file)
index 0000000..d126596
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package dispatch
+
+import (
+       "testing"
+       "time"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&ThrottleTestSuite{})
+
+type ThrottleTestSuite struct{}
+
+func (*ThrottleTestSuite) TestThrottle(c *check.C) {
+       uuid := "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+       t0 := throttle{}
+       c.Check(t0.Check(uuid), check.Equals, true)
+       c.Check(t0.Check(uuid), check.Equals, true)
+
+       tNs := throttle{hold: time.Nanosecond}
+       c.Check(tNs.Check(uuid), check.Equals, true)
+       time.Sleep(time.Microsecond)
+       c.Check(tNs.Check(uuid), check.Equals, true)
+
+       tMin := throttle{hold: time.Minute}
+       c.Check(tMin.Check(uuid), check.Equals, true)
+       c.Check(tMin.Check(uuid), check.Equals, false)
+       c.Check(tMin.Check(uuid), check.Equals, false)
+       tMin.seen[uuid].last = time.Now().Add(-time.Hour)
+       c.Check(tMin.Check(uuid), check.Equals, true)
+       c.Check(tMin.Check(uuid), check.Equals, false)
+}
diff --git a/sdk/go/health/aggregator.go b/sdk/go/health/aggregator.go
new file mode 100644 (file)
index 0000000..5643313
--- /dev/null
@@ -0,0 +1,227 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package health
+
+import (
+       "context"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "net"
+       "net/http"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+)
+
+const defaultTimeout = arvados.Duration(2 * time.Second)
+
+// Aggregator implements http.Handler. It handles "GET /_health/all"
+// by checking the health of all configured services on the cluster
+// and responding 200 if everything is healthy.
+type Aggregator struct {
+       setupOnce  sync.Once
+       httpClient *http.Client
+       timeout    arvados.Duration
+
+       Config *arvados.Config
+
+       // If non-nil, Log is called after handling each request.
+       Log func(*http.Request, error)
+}
+
+func (agg *Aggregator) setup() {
+       agg.httpClient = http.DefaultClient
+       if agg.timeout == 0 {
+               // this is always the case, except in the test suite
+               agg.timeout = defaultTimeout
+       }
+}
+
+func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       agg.setupOnce.Do(agg.setup)
+       sendErr := func(statusCode int, err error) {
+               resp.WriteHeader(statusCode)
+               json.NewEncoder(resp).Encode(map[string]string{"error": err.Error()})
+               if agg.Log != nil {
+                       agg.Log(req, err)
+               }
+       }
+
+       resp.Header().Set("Content-Type", "application/json")
+
+       cluster, err := agg.Config.GetCluster("")
+       if err != nil {
+               err = fmt.Errorf("arvados.GetCluster(): %s", err)
+               sendErr(http.StatusInternalServerError, err)
+               return
+       }
+       if !agg.checkAuth(req, cluster) {
+               sendErr(http.StatusUnauthorized, errUnauthorized)
+               return
+       }
+       if req.URL.Path != "/_health/all" {
+               sendErr(http.StatusNotFound, errNotFound)
+               return
+       }
+       json.NewEncoder(resp).Encode(agg.ClusterHealth(cluster))
+       if agg.Log != nil {
+               agg.Log(req, nil)
+       }
+}
+
+type ClusterHealthResponse struct {
+       // "OK" if all needed services are OK, otherwise "ERROR".
+       Health string `json:"health"`
+
+       // An entry for each known health check of each known instance
+       // of each needed component: "instance of service S on node N
+       // reports health-check C is OK."
+       Checks map[string]CheckResult `json:"checks"`
+
+       // An entry for each service type: "service S is OK." This
+       // exposes problems that can't be expressed in Checks, like
+       // "service S is needed, but isn't configured to run
+       // anywhere."
+       Services map[arvados.ServiceName]ServiceHealth `json:"services"`
+}
+
+type CheckResult struct {
+       Health         string                 `json:"health"`
+       Error          string                 `json:"error,omitempty"`
+       HTTPStatusCode int                    `json:",omitempty"`
+       HTTPStatusText string                 `json:",omitempty"`
+       Response       map[string]interface{} `json:"response"`
+       ResponseTime   json.Number            `json:"responseTime"`
+}
+
+type ServiceHealth struct {
+       Health string `json:"health"`
+       N      int    `json:"n"`
+}
+
+func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResponse {
+       resp := ClusterHealthResponse{
+               Health:   "OK",
+               Checks:   make(map[string]CheckResult),
+               Services: make(map[arvados.ServiceName]ServiceHealth),
+       }
+
+       mtx := sync.Mutex{}
+       wg := sync.WaitGroup{}
+       for profileName, profile := range cluster.NodeProfiles {
+               for svc, addr := range profile.ServicePorts() {
+                       // Ensure svc is listed in resp.Services.
+                       mtx.Lock()
+                       if _, ok := resp.Services[svc]; !ok {
+                               resp.Services[svc] = ServiceHealth{Health: "ERROR"}
+                       }
+                       mtx.Unlock()
+
+                       if addr == "" {
+                               // svc is not expected on this node.
+                               continue
+                       }
+
+                       wg.Add(1)
+                       go func(profileName string, svc arvados.ServiceName, addr string) {
+                               defer wg.Done()
+                               var result CheckResult
+                               url, err := agg.pingURL(profileName, addr)
+                               if err != nil {
+                                       result = CheckResult{
+                                               Health: "ERROR",
+                                               Error:  err.Error(),
+                                       }
+                               } else {
+                                       result = agg.ping(url, cluster)
+                               }
+
+                               mtx.Lock()
+                               defer mtx.Unlock()
+                               resp.Checks[fmt.Sprintf("%s+%s", svc, url)] = result
+                               if result.Health == "OK" {
+                                       h := resp.Services[svc]
+                                       h.N++
+                                       h.Health = "OK"
+                                       resp.Services[svc] = h
+                               } else {
+                                       resp.Health = "ERROR"
+                               }
+                       }(profileName, svc, addr)
+               }
+       }
+       wg.Wait()
+
+       // Report ERROR if a needed service didn't fail any checks
+       // merely because it isn't configured to run anywhere.
+       for _, sh := range resp.Services {
+               if sh.Health != "OK" {
+                       resp.Health = "ERROR"
+                       break
+               }
+       }
+       return resp
+}
+
+func (agg *Aggregator) pingURL(node, addr string) (string, error) {
+       _, port, err := net.SplitHostPort(addr)
+       return "http://" + node + ":" + port + "/_health/ping", err
+}
+
+func (agg *Aggregator) ping(url string, cluster *arvados.Cluster) (result CheckResult) {
+       t0 := time.Now()
+
+       var err error
+       defer func() {
+               result.ResponseTime = json.Number(fmt.Sprintf("%.6f", time.Since(t0).Seconds()))
+               if err != nil {
+                       result.Health, result.Error = "ERROR", err.Error()
+               } else {
+                       result.Health = "OK"
+               }
+       }()
+
+       req, err := http.NewRequest("GET", url, nil)
+       if err != nil {
+               return
+       }
+       req.Header.Set("Authorization", "Bearer "+cluster.ManagementToken)
+
+       ctx, cancel := context.WithTimeout(req.Context(), time.Duration(agg.timeout))
+       defer cancel()
+       req = req.WithContext(ctx)
+       resp, err := agg.httpClient.Do(req)
+       if err != nil {
+               return
+       }
+       result.HTTPStatusCode = resp.StatusCode
+       result.HTTPStatusText = resp.Status
+       err = json.NewDecoder(resp.Body).Decode(&result.Response)
+       if err != nil {
+               err = fmt.Errorf("cannot decode response: %s", err)
+       } else if resp.StatusCode != http.StatusOK {
+               err = fmt.Errorf("HTTP %d %s", resp.StatusCode, resp.Status)
+       } else if h, _ := result.Response["health"].(string); h != "OK" {
+               if e, ok := result.Response["error"].(string); ok && e != "" {
+                       err = errors.New(e)
+               } else {
+                       err = fmt.Errorf("health=%q in ping response", h)
+               }
+       }
+       return
+}
+
+func (agg *Aggregator) checkAuth(req *http.Request, cluster *arvados.Cluster) bool {
+       creds := auth.CredentialsFromRequest(req)
+       for _, token := range creds.Tokens {
+               if token != "" && token == cluster.ManagementToken {
+                       return true
+               }
+       }
+       return false
+}
diff --git a/sdk/go/health/aggregator_test.go b/sdk/go/health/aggregator_test.go
new file mode 100644 (file)
index 0000000..122355b
--- /dev/null
@@ -0,0 +1,212 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package health
+
+import (
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "gopkg.in/check.v1"
+)
+
+type AggregatorSuite struct {
+       handler *Aggregator
+       req     *http.Request
+       resp    *httptest.ResponseRecorder
+}
+
+// Gocheck boilerplate
+var _ = check.Suite(&AggregatorSuite{})
+
+func (s *AggregatorSuite) TestInterface(c *check.C) {
+       var _ http.Handler = &Aggregator{}
+}
+
+func (s *AggregatorSuite) SetUpTest(c *check.C) {
+       s.handler = &Aggregator{Config: &arvados.Config{
+               Clusters: map[string]arvados.Cluster{
+                       "zzzzz": {
+                               ManagementToken: arvadostest.ManagementToken,
+                               NodeProfiles:    map[string]arvados.NodeProfile{},
+                       },
+               },
+       }}
+       s.req = httptest.NewRequest("GET", "/_health/all", nil)
+       s.req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+       s.resp = httptest.NewRecorder()
+}
+
+func (s *AggregatorSuite) TestNoAuth(c *check.C) {
+       s.req.Header.Del("Authorization")
+       s.handler.ServeHTTP(s.resp, s.req)
+       s.checkError(c)
+       c.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)
+}
+
+func (s *AggregatorSuite) TestBadAuth(c *check.C) {
+       s.req.Header.Set("Authorization", "xyzzy")
+       s.handler.ServeHTTP(s.resp, s.req)
+       s.checkError(c)
+       c.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)
+}
+
+func (s *AggregatorSuite) TestEmptyConfig(c *check.C) {
+       s.handler.ServeHTTP(s.resp, s.req)
+       s.checkOK(c)
+}
+
+func (s *AggregatorSuite) stubServer(handler http.Handler) (*httptest.Server, string) {
+       srv := httptest.NewServer(handler)
+       var port string
+       if parts := strings.Split(srv.URL, ":"); len(parts) < 3 {
+               panic(srv.URL)
+       } else {
+               port = parts[len(parts)-1]
+       }
+       return srv, ":" + port
+}
+
+type unhealthyHandler struct{}
+
+func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/_health/ping" {
+               resp.Write([]byte(`{"health":"ERROR","error":"the bends"}`))
+       } else {
+               http.Error(resp, "not found", http.StatusNotFound)
+       }
+}
+
+func (s *AggregatorSuite) TestUnhealthy(c *check.C) {
+       srv, listen := s.stubServer(&unhealthyHandler{})
+       defer srv.Close()
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
+               Keepstore: arvados.SystemServiceInstance{Listen: listen},
+       }
+       s.handler.ServeHTTP(s.resp, s.req)
+       s.checkUnhealthy(c)
+}
+
+type healthyHandler struct{}
+
+func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/_health/ping" {
+               resp.Write([]byte(`{"health":"OK"}`))
+       } else {
+               http.Error(resp, "not found", http.StatusNotFound)
+       }
+}
+
+func (s *AggregatorSuite) TestHealthy(c *check.C) {
+       srv, listen := s.stubServer(&healthyHandler{})
+       defer srv.Close()
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
+               Controller:    arvados.SystemServiceInstance{Listen: listen},
+               DispatchCloud: arvados.SystemServiceInstance{Listen: listen},
+               Keepbalance:   arvados.SystemServiceInstance{Listen: listen},
+               Keepproxy:     arvados.SystemServiceInstance{Listen: listen},
+               Keepstore:     arvados.SystemServiceInstance{Listen: listen},
+               Keepweb:       arvados.SystemServiceInstance{Listen: listen},
+               Nodemanager:   arvados.SystemServiceInstance{Listen: listen},
+               RailsAPI:      arvados.SystemServiceInstance{Listen: listen},
+               Websocket:     arvados.SystemServiceInstance{Listen: listen},
+               Workbench:     arvados.SystemServiceInstance{Listen: listen},
+       }
+       s.handler.ServeHTTP(s.resp, s.req)
+       resp := s.checkOK(c)
+       svc := "keepstore+http://localhost" + listen + "/_health/ping"
+       c.Logf("%#v", resp)
+       ep := resp.Checks[svc]
+       c.Check(ep.Health, check.Equals, "OK")
+       c.Check(ep.HTTPStatusCode, check.Equals, 200)
+}
+
+func (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {
+       srvH, listenH := s.stubServer(&healthyHandler{})
+       defer srvH.Close()
+       srvU, listenU := s.stubServer(&unhealthyHandler{})
+       defer srvU.Close()
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
+               Controller:    arvados.SystemServiceInstance{Listen: listenH},
+               DispatchCloud: arvados.SystemServiceInstance{Listen: listenH},
+               Keepbalance:   arvados.SystemServiceInstance{Listen: listenH},
+               Keepproxy:     arvados.SystemServiceInstance{Listen: listenH},
+               Keepstore:     arvados.SystemServiceInstance{Listen: listenH},
+               Keepweb:       arvados.SystemServiceInstance{Listen: listenH},
+               Nodemanager:   arvados.SystemServiceInstance{Listen: listenH},
+               RailsAPI:      arvados.SystemServiceInstance{Listen: listenH},
+               Websocket:     arvados.SystemServiceInstance{Listen: listenH},
+               Workbench:     arvados.SystemServiceInstance{Listen: listenH},
+       }
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["127.0.0.1"] = arvados.NodeProfile{
+               Keepstore: arvados.SystemServiceInstance{Listen: listenU},
+       }
+       s.handler.ServeHTTP(s.resp, s.req)
+       resp := s.checkUnhealthy(c)
+       ep := resp.Checks["keepstore+http://localhost"+listenH+"/_health/ping"]
+       c.Check(ep.Health, check.Equals, "OK")
+       c.Check(ep.HTTPStatusCode, check.Equals, 200)
+       ep = resp.Checks["keepstore+http://127.0.0.1"+listenU+"/_health/ping"]
+       c.Check(ep.Health, check.Equals, "ERROR")
+       c.Check(ep.HTTPStatusCode, check.Equals, 200)
+       c.Logf("%#v", ep)
+}
+
+func (s *AggregatorSuite) checkError(c *check.C) {
+       c.Check(s.resp.Code, check.Not(check.Equals), http.StatusOK)
+       var resp ClusterHealthResponse
+       err := json.NewDecoder(s.resp.Body).Decode(&resp)
+       c.Check(err, check.IsNil)
+       c.Check(resp.Health, check.Not(check.Equals), "OK")
+}
+
+func (s *AggregatorSuite) checkUnhealthy(c *check.C) ClusterHealthResponse {
+       return s.checkResult(c, "ERROR")
+}
+
+func (s *AggregatorSuite) checkOK(c *check.C) ClusterHealthResponse {
+       return s.checkResult(c, "OK")
+}
+
+func (s *AggregatorSuite) checkResult(c *check.C, health string) ClusterHealthResponse {
+       c.Check(s.resp.Code, check.Equals, http.StatusOK)
+       var resp ClusterHealthResponse
+       err := json.NewDecoder(s.resp.Body).Decode(&resp)
+       c.Check(err, check.IsNil)
+       c.Check(resp.Health, check.Equals, health)
+       return resp
+}
+
+type slowHandler struct{}
+
+func (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       if req.URL.Path == "/_health/ping" {
+               time.Sleep(3 * time.Second)
+               resp.Write([]byte(`{"health":"OK"}`))
+       } else {
+               http.Error(resp, "not found", http.StatusNotFound)
+       }
+}
+
+func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
+       s.handler.timeout = arvados.Duration(100 * time.Millisecond)
+       srv, listen := s.stubServer(&slowHandler{})
+       defer srv.Close()
+       s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
+               Keepstore: arvados.SystemServiceInstance{Listen: listen},
+       }
+       s.handler.ServeHTTP(s.resp, s.req)
+       resp := s.checkUnhealthy(c)
+       ep := resp.Checks["keepstore+http://localhost"+listen+"/_health/ping"]
+       c.Check(ep.Health, check.Equals, "ERROR")
+       c.Check(ep.HTTPStatusCode, check.Equals, 0)
+       rt, err := ep.ResponseTime.Float64()
+       c.Check(err, check.IsNil)
+       c.Check(rt > 0.005, check.Equals, true)
+}
diff --git a/sdk/go/health/handler.go b/sdk/go/health/handler.go
new file mode 100644 (file)
index 0000000..81b9587
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package health
+
+import (
+       "encoding/json"
+       "errors"
+       "net/http"
+       "strings"
+       "sync"
+)
+
+// Func is a health-check function: it returns nil when healthy, an
+// error when not.
+type Func func() error
+
+// Routes is a map of URI path to health-check function.
+type Routes map[string]Func
+
+// Handler is an http.Handler that responds to authenticated
+// health-check requests with JSON responses like {"health":"OK"} or
+// {"health":"ERROR","error":"error text"}.
+//
+// Fields of a Handler should not be changed after the Handler is
+// first used.
+type Handler struct {
+       setupOnce sync.Once
+       mux       *http.ServeMux
+
+       // Authentication token. If empty, all requests will return 404.
+       Token string
+
+       // Route prefix, typically "/_health/".
+       Prefix string
+
+       // Map of URI paths to health-check Func. The prefix is
+       // omitted: Routes["foo"] is the health check invoked by a
+       // request to "{Prefix}/foo".
+       //
+       // If "ping" is not listed here, it will be added
+       // automatically and will always return a "healthy" response.
+       Routes Routes
+
+       // If non-nil, Log is called after handling each request. The
+       // error argument is nil if the request was successfully
+       // authenticated and served, even if the health check itself
+       // failed.
+       Log func(*http.Request, error)
+}
+
+// ServeHTTP implements http.Handler.
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       h.setupOnce.Do(h.setup)
+       h.mux.ServeHTTP(w, r)
+}
+
+func (h *Handler) setup() {
+       h.mux = http.NewServeMux()
+       prefix := h.Prefix
+       if !strings.HasSuffix(prefix, "/") {
+               prefix = prefix + "/"
+       }
+       for name, fn := range h.Routes {
+               h.mux.Handle(prefix+name, h.healthJSON(fn))
+       }
+       if _, ok := h.Routes["ping"]; !ok {
+               h.mux.Handle(prefix+"ping", h.healthJSON(func() error { return nil }))
+       }
+}
+
+var (
+       healthyBody     = []byte(`{"health":"OK"}` + "\n")
+       errNotFound     = errors.New(http.StatusText(http.StatusNotFound))
+       errUnauthorized = errors.New(http.StatusText(http.StatusUnauthorized))
+       errForbidden    = errors.New(http.StatusText(http.StatusForbidden))
+)
+
+func (h *Handler) healthJSON(fn Func) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               var err error
+               defer func() {
+                       if h.Log != nil {
+                               h.Log(r, err)
+                       }
+               }()
+               if h.Token == "" {
+                       http.Error(w, "disabled", http.StatusNotFound)
+                       err = errNotFound
+               } else if ah := r.Header.Get("Authorization"); ah == "" {
+                       http.Error(w, "authorization required", http.StatusUnauthorized)
+                       err = errUnauthorized
+               } else if ah != "Bearer "+h.Token {
+                       http.Error(w, "authorization error", http.StatusForbidden)
+                       err = errForbidden
+               } else if err = fn(); err == nil {
+                       w.Header().Set("Content-Type", "application/json")
+                       w.Write(healthyBody)
+               } else {
+                       w.Header().Set("Content-Type", "application/json")
+                       enc := json.NewEncoder(w)
+                       err = enc.Encode(map[string]string{
+                               "health": "ERROR",
+                               "error":  err.Error(),
+                       })
+               }
+       })
+}
diff --git a/sdk/go/health/handler_test.go b/sdk/go/health/handler_test.go
new file mode 100644 (file)
index 0000000..c9f6a0b
--- /dev/null
@@ -0,0 +1,137 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package health
+
+import (
+       "encoding/json"
+       "errors"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+var _ = check.Suite(&Suite{})
+
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+type Suite struct{}
+
+const (
+       goodToken = "supersecret"
+       badToken  = "pwn"
+)
+
+func (s *Suite) TestPassFailRefuse(c *check.C) {
+       h := &Handler{
+               Token:  goodToken,
+               Prefix: "/_health/",
+               Routes: Routes{
+                       "success": func() error { return nil },
+                       "miracle": func() error { return errors.New("unimplemented") },
+               },
+       }
+
+       resp := httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/_health/ping", goodToken))
+       s.checkHealthy(c, resp)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/_health/success", goodToken))
+       s.checkHealthy(c, resp)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/_health/miracle", goodToken))
+       s.checkUnhealthy(c, resp)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/_health/miracle", badToken))
+       c.Check(resp.Code, check.Equals, http.StatusForbidden)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/_health/miracle", ""))
+       c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/_health/theperthcountyconspiracy", ""))
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/x/miracle", ""))
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/miracle", ""))
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+}
+
+func (s *Suite) TestPingOverride(c *check.C) {
+       var ok bool
+       h := &Handler{
+               Token: goodToken,
+               Routes: Routes{
+                       "ping": func() error {
+                               ok = !ok
+                               if ok {
+                                       return nil
+                               } else {
+                                       return errors.New("good error")
+                               }
+                       },
+               },
+       }
+       resp := httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/ping", goodToken))
+       s.checkHealthy(c, resp)
+
+       resp = httptest.NewRecorder()
+       h.ServeHTTP(resp, s.request("/ping", goodToken))
+       s.checkUnhealthy(c, resp)
+}
+
+func (s *Suite) TestZeroValueIsDisabled(c *check.C) {
+       resp := httptest.NewRecorder()
+       (&Handler{}).ServeHTTP(resp, s.request("/ping", goodToken))
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+
+       resp = httptest.NewRecorder()
+       (&Handler{}).ServeHTTP(resp, s.request("/ping", ""))
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+}
+
+func (s *Suite) request(path, token string) *http.Request {
+       u, _ := url.Parse("http://foo.local" + path)
+       req := &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+       }
+       if token != "" {
+               req.Header = http.Header{
+                       "Authorization": {"Bearer " + token},
+               }
+       }
+       return req
+}
+
+func (s *Suite) checkHealthy(c *check.C, resp *httptest.ResponseRecorder) {
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Equals, `{"health":"OK"}`+"\n")
+}
+
+func (s *Suite) checkUnhealthy(c *check.C, resp *httptest.ResponseRecorder) {
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var result map[string]interface{}
+       err := json.Unmarshal(resp.Body.Bytes(), &result)
+       c.Assert(err, check.IsNil)
+       c.Check(result["health"], check.Equals, "ERROR")
+       c.Check(result["error"].(string), check.Not(check.Equals), "")
+}
diff --git a/sdk/go/httpserver/error.go b/sdk/go/httpserver/error.go
new file mode 100644 (file)
index 0000000..1ccf8c0
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "encoding/json"
+       "net/http"
+)
+
+type ErrorResponse struct {
+       Errors []string `json:"errors"`
+}
+
+func Error(w http.ResponseWriter, error string, code int) {
+       w.Header().Set("Content-Type", "application/json")
+       w.Header().Set("X-Content-Type-Options", "nosniff")
+       w.WriteHeader(code)
+       json.NewEncoder(w).Encode(ErrorResponse{Errors: []string{error}})
+}
+
+func Errors(w http.ResponseWriter, errors []string, code int) {
+       w.Header().Set("Content-Type", "application/json")
+       w.Header().Set("X-Content-Type-Options", "nosniff")
+       w.WriteHeader(code)
+       json.NewEncoder(w).Encode(ErrorResponse{Errors: errors})
+}
diff --git a/sdk/go/httpserver/httpserver.go b/sdk/go/httpserver/httpserver.go
new file mode 100644 (file)
index 0000000..a94146f
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "net"
+       "net/http"
+       "sync"
+       "time"
+)
+
+type Server struct {
+       http.Server
+       Addr     string // host:port where the server is listening.
+       err      error
+       cond     *sync.Cond
+       running  bool
+       listener *net.TCPListener
+       wantDown bool
+}
+
+// Start is essentially (*http.Server)ListenAndServe() with two more
+// features: (1) by the time Start() returns, Addr is changed to the
+// address:port we ended up listening to -- which makes listening on
+// ":0" useful in test suites -- and (2) the server can be shut down
+// without killing the process -- which is useful in test cases, and
+// makes it possible to shut down gracefully on SIGTERM without
+// killing active connections.
+func (srv *Server) Start() error {
+       addr, err := net.ResolveTCPAddr("tcp", srv.Addr)
+       if err != nil {
+               return err
+       }
+       srv.listener, err = net.ListenTCP("tcp", addr)
+       if err != nil {
+               return err
+       }
+       srv.Addr = srv.listener.Addr().String()
+
+       mutex := &sync.RWMutex{}
+       srv.cond = sync.NewCond(mutex.RLocker())
+       srv.running = true
+       go func() {
+               err = srv.Serve(tcpKeepAliveListener{srv.listener})
+               if !srv.wantDown {
+                       srv.err = err
+               }
+               mutex.Lock()
+               srv.running = false
+               srv.cond.Broadcast()
+               mutex.Unlock()
+       }()
+       return nil
+}
+
+// Close shuts down the server and returns when it has stopped.
+func (srv *Server) Close() error {
+       srv.wantDown = true
+       srv.listener.Close()
+       return srv.Wait()
+}
+
+// Wait returns when the server has shut down.
+func (srv *Server) Wait() error {
+       if srv.cond == nil {
+               return nil
+       }
+       srv.cond.L.Lock()
+       defer srv.cond.L.Unlock()
+       for srv.running {
+               srv.cond.Wait()
+       }
+       return srv.err
+}
+
+// tcpKeepAliveListener is copied from net/http because not exported.
+type tcpKeepAliveListener struct {
+       *net.TCPListener
+}
+
+func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
+       tc, err := ln.AcceptTCP()
+       if err != nil {
+               return
+       }
+       tc.SetKeepAlive(true)
+       tc.SetKeepAlivePeriod(3 * time.Minute)
+       return tc, nil
+}
diff --git a/sdk/go/httpserver/id_generator.go b/sdk/go/httpserver/id_generator.go
new file mode 100644 (file)
index 0000000..14d8987
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "math/rand"
+       "net/http"
+       "strconv"
+       "sync"
+       "time"
+)
+
+const (
+       HeaderRequestID = "X-Request-Id"
+)
+
+// IDGenerator generates alphanumeric strings suitable for use as
+// unique IDs (a given IDGenerator will never return the same ID
+// twice).
+type IDGenerator struct {
+       // Prefix is prepended to each returned ID.
+       Prefix string
+
+       mtx sync.Mutex
+       src rand.Source
+}
+
+// Next returns a new ID string. It is safe to call Next from multiple
+// goroutines.
+func (g *IDGenerator) Next() string {
+       g.mtx.Lock()
+       defer g.mtx.Unlock()
+       if g.src == nil {
+               g.src = rand.NewSource(time.Now().UnixNano())
+       }
+       a, b := g.src.Int63(), g.src.Int63()
+       id := strconv.FormatInt(a, 36) + strconv.FormatInt(b, 36)
+       for len(id) > 20 {
+               id = id[:20]
+       }
+       return g.Prefix + id
+}
+
+// AddRequestIDs wraps an http.Handler, adding an X-Request-Id header
+// to each request that doesn't already have one.
+func AddRequestIDs(h http.Handler) http.Handler {
+       gen := &IDGenerator{Prefix: "req-"}
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               if req.Header.Get(HeaderRequestID) == "" {
+                       if req.Header == nil {
+                               req.Header = http.Header{}
+                       }
+                       req.Header.Set(HeaderRequestID, gen.Next())
+               }
+               h.ServeHTTP(w, req)
+       })
+}
diff --git a/sdk/go/httpserver/log.go b/sdk/go/httpserver/log.go
new file mode 100644 (file)
index 0000000..a6d0478
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "fmt"
+       "log"
+)
+
+// Log calls log.Println but first transforms strings so they are
+// safer to write in logs (e.g., 'foo"bar' becomes
+// '"foo\"bar"'). Arguments that aren't strings and don't have a
+// (String() string) method are left alone.
+func Log(args ...interface{}) {
+       newargs := make([]interface{}, len(args))
+       for i, arg := range args {
+               if s, ok := arg.(string); ok {
+                       newargs[i] = fmt.Sprintf("%+q", s)
+               } else if s, ok := arg.(fmt.Stringer); ok {
+                       newargs[i] = fmt.Sprintf("%+q", s.String())
+               } else {
+                       newargs[i] = arg
+               }
+       }
+       log.Println(newargs...)
+}
diff --git a/sdk/go/httpserver/logger.go b/sdk/go/httpserver/logger.go
new file mode 100644 (file)
index 0000000..357daee
--- /dev/null
@@ -0,0 +1,113 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package httpserver
+
+import (
+       "context"
+       "net/http"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/stats"
+       "github.com/sirupsen/logrus"
+)
+
+type contextKey struct {
+       name string
+}
+
+var (
+       requestTimeContextKey = contextKey{"requestTime"}
+       loggerContextKey      = contextKey{"logger"}
+)
+
+// LogRequests wraps an http.Handler, logging each request and
+// response via logger.
+func LogRequests(logger logrus.FieldLogger, h http.Handler) http.Handler {
+       if logger == nil {
+               logger = logrus.StandardLogger()
+       }
+       return http.HandlerFunc(func(wrapped http.ResponseWriter, req *http.Request) {
+               w := &responseTimer{ResponseWriter: WrapResponseWriter(wrapped)}
+               lgr := logger.WithFields(logrus.Fields{
+                       "RequestID":       req.Header.Get("X-Request-Id"),
+                       "remoteAddr":      req.RemoteAddr,
+                       "reqForwardedFor": req.Header.Get("X-Forwarded-For"),
+                       "reqMethod":       req.Method,
+                       "reqHost":         req.Host,
+                       "reqPath":         req.URL.Path[1:],
+                       "reqQuery":        req.URL.RawQuery,
+                       "reqBytes":        req.ContentLength,
+               })
+               ctx := req.Context()
+               ctx = context.WithValue(ctx, &requestTimeContextKey, time.Now())
+               ctx = context.WithValue(ctx, &loggerContextKey, lgr)
+               req = req.WithContext(ctx)
+
+               logRequest(w, req, lgr)
+               defer logResponse(w, req, lgr)
+               h.ServeHTTP(w, req)
+       })
+}
+
+func Logger(req *http.Request) logrus.FieldLogger {
+       if lgr, ok := req.Context().Value(&loggerContextKey).(logrus.FieldLogger); ok {
+               return lgr
+       } else {
+               return logrus.StandardLogger()
+       }
+}
+
+func logRequest(w *responseTimer, req *http.Request, lgr *logrus.Entry) {
+       lgr.Info("request")
+}
+
+func logResponse(w *responseTimer, req *http.Request, lgr *logrus.Entry) {
+       if tStart, ok := req.Context().Value(&requestTimeContextKey).(time.Time); ok {
+               tDone := time.Now()
+               lgr = lgr.WithFields(logrus.Fields{
+                       "timeTotal":     stats.Duration(tDone.Sub(tStart)),
+                       "timeToStatus":  stats.Duration(w.writeTime.Sub(tStart)),
+                       "timeWriteBody": stats.Duration(tDone.Sub(w.writeTime)),
+               })
+       }
+       respCode := w.WroteStatus()
+       if respCode == 0 {
+               respCode = http.StatusOK
+       }
+       lgr.WithFields(logrus.Fields{
+               "respStatusCode": respCode,
+               "respStatus":     http.StatusText(respCode),
+               "respBytes":      w.WroteBodyBytes(),
+       }).Info("response")
+}
+
+type responseTimer struct {
+       ResponseWriter
+       wrote     bool
+       writeTime time.Time
+}
+
+func (rt *responseTimer) CloseNotify() <-chan bool {
+       if cn, ok := rt.ResponseWriter.(http.CloseNotifier); ok {
+               return cn.CloseNotify()
+       }
+       return nil
+}
+
+func (rt *responseTimer) WriteHeader(code int) {
+       if !rt.wrote {
+               rt.wrote = true
+               rt.writeTime = time.Now()
+       }
+       rt.ResponseWriter.WriteHeader(code)
+}
+
+func (rt *responseTimer) Write(p []byte) (int, error) {
+       if !rt.wrote {
+               rt.wrote = true
+               rt.writeTime = time.Now()
+       }
+       return rt.ResponseWriter.Write(p)
+}
diff --git a/sdk/go/httpserver/logger_test.go b/sdk/go/httpserver/logger_test.go
new file mode 100644 (file)
index 0000000..8386db9
--- /dev/null
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package httpserver
+
+import (
+       "bytes"
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+       "testing"
+       "time"
+
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (s *Suite) TestLogRequests(c *check.C) {
+       captured := &bytes.Buffer{}
+       log := logrus.New()
+       log.Out = captured
+       log.Formatter = &logrus.JSONFormatter{
+               TimestampFormat: time.RFC3339Nano,
+       }
+
+       h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               w.Write([]byte("hello world"))
+       })
+       req, err := http.NewRequest("GET", "https://foo.example/bar", nil)
+       req.Header.Set("X-Forwarded-For", "1.2.3.4:12345")
+       c.Assert(err, check.IsNil)
+       resp := httptest.NewRecorder()
+       AddRequestIDs(LogRequests(log, h)).ServeHTTP(resp, req)
+
+       dec := json.NewDecoder(captured)
+
+       gotReq := make(map[string]interface{})
+       err = dec.Decode(&gotReq)
+       c.Logf("%#v", gotReq)
+       c.Check(gotReq["RequestID"], check.Matches, "req-[a-z0-9]{20}")
+       c.Check(gotReq["reqForwardedFor"], check.Equals, "1.2.3.4:12345")
+       c.Check(gotReq["msg"], check.Equals, "request")
+
+       gotResp := make(map[string]interface{})
+       err = dec.Decode(&gotResp)
+       c.Logf("%#v", gotResp)
+       c.Check(gotResp["RequestID"], check.Equals, gotReq["RequestID"])
+       c.Check(gotResp["reqForwardedFor"], check.Equals, "1.2.3.4:12345")
+       c.Check(gotResp["msg"], check.Equals, "response")
+
+       c.Assert(gotResp["time"], check.FitsTypeOf, "")
+       _, err = time.Parse(time.RFC3339Nano, gotResp["time"].(string))
+       c.Check(err, check.IsNil)
+
+       for _, key := range []string{"timeToStatus", "timeWriteBody", "timeTotal"} {
+               c.Assert(gotResp[key], check.FitsTypeOf, float64(0))
+               c.Check(gotResp[key].(float64), check.Not(check.Equals), float64(0))
+       }
+}
diff --git a/sdk/go/httpserver/metrics.go b/sdk/go/httpserver/metrics.go
new file mode 100644 (file)
index 0000000..032093f
--- /dev/null
@@ -0,0 +1,141 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "net/http"
+       "strconv"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/stats"
+       "github.com/gogo/protobuf/jsonpb"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/prometheus/client_golang/prometheus/promhttp"
+       "github.com/sirupsen/logrus"
+)
+
+type Handler interface {
+       http.Handler
+
+       // Returns an http.Handler that serves the Handler's metrics
+       // data at /metrics and /metrics.json, and passes other
+       // requests through to next.
+       ServeAPI(token string, next http.Handler) http.Handler
+}
+
+type metrics struct {
+       next         http.Handler
+       logger       *logrus.Logger
+       registry     *prometheus.Registry
+       reqDuration  *prometheus.SummaryVec
+       timeToStatus *prometheus.SummaryVec
+       exportProm   http.Handler
+}
+
+func (*metrics) Levels() []logrus.Level {
+       return logrus.AllLevels
+}
+
+// Fire implements logrus.Hook in order to collect data points from
+// request logs.
+func (m *metrics) Fire(ent *logrus.Entry) error {
+       if tts, ok := ent.Data["timeToStatus"].(stats.Duration); !ok {
+       } else if method, ok := ent.Data["reqMethod"].(string); !ok {
+       } else if code, ok := ent.Data["respStatusCode"].(int); !ok {
+       } else {
+               m.timeToStatus.WithLabelValues(strconv.Itoa(code), strings.ToLower(method)).Observe(time.Duration(tts).Seconds())
+       }
+       return nil
+}
+
+func (m *metrics) exportJSON(w http.ResponseWriter, req *http.Request) {
+       jm := jsonpb.Marshaler{Indent: "  "}
+       mfs, _ := m.registry.Gather()
+       w.Write([]byte{'['})
+       for i, mf := range mfs {
+               if i > 0 {
+                       w.Write([]byte{','})
+               }
+               jm.Marshal(w, mf)
+       }
+       w.Write([]byte{']'})
+}
+
+// ServeHTTP implements http.Handler.
+func (m *metrics) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+       m.next.ServeHTTP(w, req)
+}
+
+// ServeAPI returns a new http.Handler that serves current data at
+// metrics API endpoints (currently "GET /metrics(.json)?") and passes
+// other requests through to next.
+//
+// If the given token is not empty, that token must be supplied by a
+// client in order to access the metrics endpoints.
+//
+// Typical example:
+//
+//     m := Instrument(...)
+//     srv := http.Server{Handler: m.ServeAPI("secrettoken", m)}
+func (m *metrics) ServeAPI(token string, next http.Handler) http.Handler {
+       jsonMetrics := auth.RequireLiteralToken(token, http.HandlerFunc(m.exportJSON))
+       plainMetrics := auth.RequireLiteralToken(token, m.exportProm)
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               switch {
+               case req.Method != "GET" && req.Method != "HEAD":
+                       next.ServeHTTP(w, req)
+               case req.URL.Path == "/metrics.json":
+                       jsonMetrics.ServeHTTP(w, req)
+               case req.URL.Path == "/metrics":
+                       plainMetrics.ServeHTTP(w, req)
+               default:
+                       next.ServeHTTP(w, req)
+               }
+       })
+}
+
+// Instrument returns a new Handler that passes requests through to
+// the next handler in the stack, and tracks metrics of those
+// requests.
+//
+// For the metrics to be accurate, the caller must ensure every
+// request passed to the Handler also passes through
+// LogRequests(logger, ...), and vice versa.
+//
+// If registry is nil, a new registry is created.
+//
+// If logger is nil, logrus.StandardLogger() is used.
+func Instrument(registry *prometheus.Registry, logger *logrus.Logger, next http.Handler) Handler {
+       if logger == nil {
+               logger = logrus.StandardLogger()
+       }
+       if registry == nil {
+               registry = prometheus.NewRegistry()
+       }
+       reqDuration := prometheus.NewSummaryVec(prometheus.SummaryOpts{
+               Name: "request_duration_seconds",
+               Help: "Summary of request duration.",
+       }, []string{"code", "method"})
+       timeToStatus := prometheus.NewSummaryVec(prometheus.SummaryOpts{
+               Name: "time_to_status_seconds",
+               Help: "Summary of request TTFB.",
+       }, []string{"code", "method"})
+       registry.MustRegister(timeToStatus)
+       registry.MustRegister(reqDuration)
+       m := &metrics{
+               next:         promhttp.InstrumentHandlerDuration(reqDuration, next),
+               logger:       logger,
+               registry:     registry,
+               reqDuration:  reqDuration,
+               timeToStatus: timeToStatus,
+               exportProm: promhttp.HandlerFor(registry, promhttp.HandlerOpts{
+                       ErrorLog: logger,
+               }),
+       }
+       m.logger.AddHook(m)
+       return m
+}
diff --git a/sdk/go/httpserver/request_limiter.go b/sdk/go/httpserver/request_limiter.go
new file mode 100644 (file)
index 0000000..e7192d5
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "net/http"
+)
+
+// RequestCounter is an http.Handler that tracks the number of
+// requests in progress.
+type RequestCounter interface {
+       http.Handler
+
+       // Current() returns the number of requests in progress.
+       Current() int
+
+       // Max() returns the maximum number of concurrent requests
+       // that will be accepted.
+       Max() int
+}
+
+type limiterHandler struct {
+       requests chan struct{}
+       handler  http.Handler
+}
+
+// NewRequestLimiter returns a RequestCounter that delegates up to
+// maxRequests at a time to the given handler, and responds 503 to all
+// incoming requests beyond that limit.
+func NewRequestLimiter(maxRequests int, handler http.Handler) RequestCounter {
+       return &limiterHandler{
+               requests: make(chan struct{}, maxRequests),
+               handler:  handler,
+       }
+}
+
+func (h *limiterHandler) Current() int {
+       return len(h.requests)
+}
+
+func (h *limiterHandler) Max() int {
+       return cap(h.requests)
+}
+
+func (h *limiterHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       select {
+       case h.requests <- struct{}{}:
+       default:
+               // reached max requests
+               resp.WriteHeader(http.StatusServiceUnavailable)
+               return
+       }
+       h.handler.ServeHTTP(resp, req)
+       <-h.requests
+}
diff --git a/sdk/go/httpserver/request_limiter_test.go b/sdk/go/httpserver/request_limiter_test.go
new file mode 100644 (file)
index 0000000..afa4e3f
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "net/http"
+       "net/http/httptest"
+       "sync"
+       "testing"
+       "time"
+)
+
+type testHandler struct {
+       inHandler   chan struct{}
+       okToProceed chan struct{}
+}
+
+func (h *testHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       h.inHandler <- struct{}{}
+       <-h.okToProceed
+}
+
+func newTestHandler(maxReqs int) *testHandler {
+       return &testHandler{
+               inHandler:   make(chan struct{}),
+               okToProceed: make(chan struct{}),
+       }
+}
+
+func TestRequestLimiter1(t *testing.T) {
+       h := newTestHandler(10)
+       l := NewRequestLimiter(1, h)
+       var wg sync.WaitGroup
+       resps := make([]*httptest.ResponseRecorder, 10)
+       for i := 0; i < 10; i++ {
+               wg.Add(1)
+               resps[i] = httptest.NewRecorder()
+               go func(i int) {
+                       l.ServeHTTP(resps[i], &http.Request{})
+                       wg.Done()
+               }(i)
+       }
+       done := make(chan struct{})
+       go func() {
+               // Make sure one request has entered the handler
+               <-h.inHandler
+               // Make sure all unsuccessful requests finish (but don't wait
+               // for the one that's still waiting for okToProceed)
+               wg.Add(-1)
+               wg.Wait()
+               // Wait for the last goroutine
+               wg.Add(1)
+               h.okToProceed <- struct{}{}
+               wg.Wait()
+               done <- struct{}{}
+       }()
+       select {
+       case <-done:
+       case <-time.After(10 * time.Second):
+               t.Fatal("test timed out, probably deadlocked")
+       }
+       n200 := 0
+       n503 := 0
+       for i := 0; i < 10; i++ {
+               switch resps[i].Code {
+               case 200:
+                       n200++
+               case 503:
+                       n503++
+               default:
+                       t.Fatalf("Unexpected response code %d", resps[i].Code)
+               }
+       }
+       if n200 != 1 || n503 != 9 {
+               t.Fatalf("Got %d 200 responses, %d 503 responses (expected 1, 9)", n200, n503)
+       }
+       // Now that all 10 are finished, an 11th request should
+       // succeed.
+       go func() {
+               <-h.inHandler
+               h.okToProceed <- struct{}{}
+       }()
+       resp := httptest.NewRecorder()
+       l.ServeHTTP(resp, &http.Request{})
+       if resp.Code != 200 {
+               t.Errorf("Got status %d on 11th request, want 200", resp.Code)
+       }
+}
+
+func TestRequestLimiter10(t *testing.T) {
+       h := newTestHandler(10)
+       l := NewRequestLimiter(10, h)
+       var wg sync.WaitGroup
+       for i := 0; i < 10; i++ {
+               wg.Add(1)
+               go func() {
+                       l.ServeHTTP(httptest.NewRecorder(), &http.Request{})
+                       wg.Done()
+               }()
+               // Make sure the handler starts before we initiate the
+               // next request, but don't let it finish yet.
+               <-h.inHandler
+       }
+       for i := 0; i < 10; i++ {
+               h.okToProceed <- struct{}{}
+       }
+       wg.Wait()
+}
diff --git a/sdk/go/httpserver/responsewriter.go b/sdk/go/httpserver/responsewriter.go
new file mode 100644 (file)
index 0000000..8dea759
--- /dev/null
@@ -0,0 +1,63 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package httpserver
+
+import (
+       "net/http"
+)
+
+type ResponseWriter interface {
+       http.ResponseWriter
+       WroteStatus() int
+       WroteBodyBytes() int
+}
+
+// responseWriter wraps http.ResponseWriter and exposes the status
+// sent, the number of bytes sent to the client, and the last write
+// error.
+type responseWriter struct {
+       http.ResponseWriter
+       wroteStatus    int   // Last status given to WriteHeader()
+       wroteBodyBytes int   // Bytes successfully written
+       err            error // Last error returned from Write()
+}
+
+func WrapResponseWriter(orig http.ResponseWriter) ResponseWriter {
+       return &responseWriter{ResponseWriter: orig}
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+       if cn, ok := w.ResponseWriter.(http.CloseNotifier); ok {
+               return cn.CloseNotify()
+       }
+       return nil
+}
+
+func (w *responseWriter) WriteHeader(s int) {
+       w.wroteStatus = s
+       w.ResponseWriter.WriteHeader(s)
+}
+
+func (w *responseWriter) Write(data []byte) (n int, err error) {
+       if w.wroteStatus == 0 {
+               w.WriteHeader(http.StatusOK)
+       }
+       n, err = w.ResponseWriter.Write(data)
+       w.wroteBodyBytes += n
+       w.err = err
+       return
+}
+
+func (w *responseWriter) WroteStatus() int {
+       return w.wroteStatus
+}
+
+func (w *responseWriter) WroteBodyBytes() int {
+       return w.wroteBodyBytes
+}
+
+func (w *responseWriter) Err() error {
+       return w.err
+}
diff --git a/sdk/go/keepclient/block_cache.go b/sdk/go/keepclient/block_cache.go
new file mode 100644 (file)
index 0000000..bac4a24
--- /dev/null
@@ -0,0 +1,138 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "io"
+       "sort"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+)
+
+var DefaultBlockCache = &BlockCache{}
+
+type BlockCache struct {
+       // Maximum number of blocks to keep in the cache. If 0, a
+       // default size (currently 4) is used instead.
+       MaxBlocks int
+
+       cache map[string]*cacheBlock
+       mtx   sync.Mutex
+}
+
+const defaultMaxBlocks = 4
+
+// Sweep deletes the least recently used blocks from the cache until
+// there are no more than MaxBlocks left.
+func (c *BlockCache) Sweep() {
+       max := c.MaxBlocks
+       if max == 0 {
+               max = defaultMaxBlocks
+       }
+       c.mtx.Lock()
+       defer c.mtx.Unlock()
+       if len(c.cache) <= max {
+               return
+       }
+       lru := make([]time.Time, 0, len(c.cache))
+       for _, b := range c.cache {
+               lru = append(lru, b.lastUse)
+       }
+       sort.Sort(sort.Reverse(timeSlice(lru)))
+       threshold := lru[max]
+       for loc, b := range c.cache {
+               if !b.lastUse.After(threshold) {
+                       delete(c.cache, loc)
+               }
+       }
+}
+
+// ReadAt returns data from the cache, first retrieving it from Keep if
+// necessary.
+func (c *BlockCache) ReadAt(kc *KeepClient, locator string, p []byte, off int) (int, error) {
+       buf, err := c.Get(kc, locator)
+       if err != nil {
+               return 0, err
+       }
+       if off > len(buf) {
+               return 0, io.ErrUnexpectedEOF
+       }
+       return copy(p, buf[off:]), nil
+}
+
+// Get returns data from the cache, first retrieving it from Keep if
+// necessary.
+func (c *BlockCache) Get(kc *KeepClient, locator string) ([]byte, error) {
+       cacheKey := locator[:32]
+       bufsize := BLOCKSIZE
+       if parts := strings.SplitN(locator, "+", 3); len(parts) >= 2 {
+               datasize, err := strconv.ParseInt(parts[1], 10, 32)
+               if err == nil && datasize >= 0 {
+                       bufsize = int(datasize)
+               }
+       }
+       c.mtx.Lock()
+       if c.cache == nil {
+               c.cache = make(map[string]*cacheBlock)
+       }
+       b, ok := c.cache[cacheKey]
+       if !ok || b.err != nil {
+               b = &cacheBlock{
+                       fetched: make(chan struct{}),
+                       lastUse: time.Now(),
+               }
+               c.cache[cacheKey] = b
+               go func() {
+                       rdr, size, _, err := kc.Get(locator)
+                       var data []byte
+                       if err == nil {
+                               data = make([]byte, size, bufsize)
+                               _, err = io.ReadFull(rdr, data)
+                               err2 := rdr.Close()
+                               if err == nil {
+                                       err = err2
+                               }
+                       }
+                       c.mtx.Lock()
+                       b.data, b.err = data, err
+                       c.mtx.Unlock()
+                       close(b.fetched)
+                       go c.Sweep()
+               }()
+       }
+       c.mtx.Unlock()
+
+       // Wait (with mtx unlocked) for the fetch goroutine to finish,
+       // in case it hasn't already.
+       <-b.fetched
+
+       c.mtx.Lock()
+       b.lastUse = time.Now()
+       c.mtx.Unlock()
+       return b.data, b.err
+}
+
+func (c *BlockCache) Clear() {
+       c.mtx.Lock()
+       c.cache = nil
+       c.mtx.Unlock()
+}
+
+type timeSlice []time.Time
+
+func (ts timeSlice) Len() int { return len(ts) }
+
+func (ts timeSlice) Less(i, j int) bool { return ts[i].Before(ts[j]) }
+
+func (ts timeSlice) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
+
+type cacheBlock struct {
+       data    []byte
+       err     error
+       fetched chan struct{}
+       lastUse time.Time
+}
diff --git a/sdk/go/keepclient/collectionreader.go b/sdk/go/keepclient/collectionreader.go
new file mode 100644 (file)
index 0000000..fa309f6
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "errors"
+       "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+)
+
+// ErrNoManifest indicates the given collection has no manifest
+// information (e.g., manifest_text was excluded by a "select"
+// parameter when retrieving the collection record).
+var ErrNoManifest = errors.New("Collection has no manifest")
+
+// CollectionFileReader returns a Reader that reads content from a single file
+// in the collection. The filename must be relative to the root of the
+// collection.  A leading prefix of "/" or "./" in the filename is ignored.
+func (kc *KeepClient) CollectionFileReader(collection map[string]interface{}, filename string) (arvados.File, error) {
+       mText, ok := collection["manifest_text"].(string)
+       if !ok {
+               return nil, ErrNoManifest
+       }
+       fs, err := (&arvados.Collection{ManifestText: mText}).FileSystem(nil, kc)
+       if err != nil {
+               return nil, err
+       }
+       return fs.OpenFile(filename, os.O_RDONLY, 0)
+}
+
+func (kc *KeepClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
+       fs, err := (&arvados.Collection{ManifestText: m.Text}).FileSystem(nil, kc)
+       if err != nil {
+               return nil, err
+       }
+       return fs.OpenFile(filename, os.O_RDONLY, 0)
+}
diff --git a/sdk/go/keepclient/collectionreader_test.go b/sdk/go/keepclient/collectionreader_test.go
new file mode 100644 (file)
index 0000000..4d7846d
--- /dev/null
@@ -0,0 +1,291 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "crypto/md5"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "math/rand"
+       "net/http"
+       "os"
+       "strconv"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CollectionReaderUnit{})
+
+type CollectionReaderUnit struct {
+       arv     *arvadosclient.ArvadosClient
+       kc      *KeepClient
+       handler SuccessHandler
+}
+
+func (s *CollectionReaderUnit) SetUpTest(c *check.C) {
+       var err error
+       s.arv, err = arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       s.arv.ApiToken = arvadostest.ActiveToken
+
+       s.kc, err = MakeKeepClient(s.arv)
+       c.Assert(err, check.IsNil)
+
+       s.handler = SuccessHandler{
+               disk: make(map[string][]byte),
+               lock: make(chan struct{}, 1),
+               ops:  new(int),
+       }
+       localRoots := make(map[string]string)
+       for i, k := range RunSomeFakeKeepServers(s.handler, 4) {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+       }
+       s.kc.SetServiceRoots(localRoots, localRoots, nil)
+}
+
+type SuccessHandler struct {
+       disk map[string][]byte
+       lock chan struct{} // channel with buffer==1: full when an operation is in progress.
+       ops  *int          // number of operations completed
+}
+
+func (h SuccessHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       switch req.Method {
+       case "PUT":
+               buf, err := ioutil.ReadAll(req.Body)
+               if err != nil {
+                       resp.WriteHeader(500)
+                       return
+               }
+               pdh := fmt.Sprintf("%x+%d", md5.Sum(buf), len(buf))
+               h.lock <- struct{}{}
+               h.disk[pdh] = buf
+               if h.ops != nil {
+                       (*h.ops)++
+               }
+               <-h.lock
+               resp.Write([]byte(pdh))
+       case "GET":
+               pdh := req.URL.Path[1:]
+               h.lock <- struct{}{}
+               buf, ok := h.disk[pdh]
+               if h.ops != nil {
+                       (*h.ops)++
+               }
+               <-h.lock
+               if !ok {
+                       resp.WriteHeader(http.StatusNotFound)
+               } else {
+                       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(buf)))
+                       resp.Write(buf)
+               }
+       default:
+               resp.WriteHeader(http.StatusMethodNotAllowed)
+       }
+}
+
+type rdrTest struct {
+       mt   string      // manifest text
+       f    string      // filename
+       want interface{} // error or string to expect
+}
+
+func (s *CollectionReaderUnit) TestCollectionReaderContent(c *check.C) {
+       s.kc.PutB([]byte("foo"))
+       s.kc.PutB([]byte("bar"))
+       s.kc.PutB([]byte("Hello world\n"))
+       s.kc.PutB([]byte(""))
+
+       mt := arvadostest.PathologicalManifest
+
+       for _, testCase := range []rdrTest{
+               {mt: mt, f: "zzzz", want: os.ErrNotExist},
+               {mt: mt, f: "frob", want: os.ErrNotExist},
+               {mt: mt, f: "/segmented/frob", want: "frob"},
+               {mt: mt, f: "./segmented/frob", want: "frob"},
+               {mt: mt, f: "/f", want: "f"},
+               {mt: mt, f: "./f", want: "f"},
+               {mt: mt, f: "foo bar//baz", want: "foo"},
+               {mt: mt, f: "foo/zero", want: ""},
+               {mt: mt, f: "zero@0", want: ""},
+               {mt: mt, f: "zero@1", want: ""},
+               {mt: mt, f: "zero@4", want: ""},
+               {mt: mt, f: "zero@9", want: ""},
+               {mt: mt, f: "f", want: "f"},
+               {mt: mt, f: "ooba", want: "ooba"},
+               {mt: mt, f: "overlapReverse/o", want: "o"},
+               {mt: mt, f: "overlapReverse/oo", want: "oo"},
+               {mt: mt, f: "overlapReverse/ofoo", want: "ofoo"},
+               {mt: mt, f: "foo bar/baz", want: "foo"},
+               {mt: mt, f: "segmented/frob", want: "frob"},
+               {mt: mt, f: "segmented/oof", want: "oof"},
+       } {
+               c.Logf("%#v", testCase)
+               rdr, err := s.kc.CollectionFileReader(map[string]interface{}{"manifest_text": testCase.mt}, testCase.f)
+               switch want := testCase.want.(type) {
+               case error:
+                       c.Check(rdr, check.IsNil)
+                       c.Check(err, check.Equals, want)
+               case string:
+                       buf := make([]byte, len(want))
+                       n, err := io.ReadFull(rdr, buf)
+                       c.Check(err, check.IsNil)
+                       for i := 0; i < 4; i++ {
+                               c.Check(string(buf), check.Equals, want)
+                               n, err = rdr.Read(buf)
+                               c.Check(n, check.Equals, 0)
+                               c.Check(err, check.Equals, io.EOF)
+                       }
+
+                       for a := len(want) - 2; a >= 0; a-- {
+                               for b := a + 1; b <= len(want); b++ {
+                                       offset, err := rdr.Seek(int64(a), io.SeekStart)
+                                       c.Logf("...a=%d, b=%d", a, b)
+                                       c.Check(err, check.IsNil)
+                                       c.Check(offset, check.Equals, int64(a))
+                                       buf := make([]byte, b-a)
+                                       n, err := io.ReadFull(rdr, buf)
+                                       c.Check(err, check.IsNil)
+                                       c.Check(n, check.Equals, b-a)
+                                       c.Check(string(buf), check.Equals, want[a:b])
+                               }
+                       }
+                       offset, err := rdr.Seek(-1, io.SeekStart)
+                       c.Check(err, check.NotNil)
+                       c.Check(offset, check.Equals, int64(len(want)))
+
+                       c.Check(rdr.Close(), check.Equals, nil)
+               }
+       }
+}
+
+func (s *CollectionReaderUnit) TestCollectionReaderManyBlocks(c *check.C) {
+       h := md5.New()
+       buf := make([]byte, 4096)
+       locs := make([]string, len(buf))
+       testdata := make([]byte, 0, len(buf)*len(buf))
+       filesize := 0
+       for i := range locs {
+               _, err := rand.Read(buf[:i])
+               h.Write(buf[:i])
+               locs[i], _, err = s.kc.PutB(buf[:i])
+               c.Assert(err, check.IsNil)
+               filesize += i
+               testdata = append(testdata, buf[:i]...)
+       }
+       manifest := "./random " + strings.Join(locs, " ") + " 0:" + strconv.Itoa(filesize) + ":bytes.bin\n"
+       dataMD5 := h.Sum(nil)
+
+       checkMD5 := md5.New()
+       rdr, err := s.kc.CollectionFileReader(map[string]interface{}{"manifest_text": manifest}, "random/bytes.bin")
+       c.Assert(err, check.IsNil)
+       defer rdr.Close()
+
+       _, err = io.Copy(checkMD5, rdr)
+       c.Check(err, check.IsNil)
+       _, err = rdr.Read(make([]byte, 1))
+       c.Check(err, check.Equals, io.EOF)
+       c.Check(checkMD5.Sum(nil), check.DeepEquals, dataMD5)
+
+       size, err := rdr.Seek(0, io.SeekEnd)
+       c.Check(err, check.IsNil)
+       buf = make([]byte, len(testdata))
+       copy(buf, testdata)
+       curPos := size
+       for i := 0; i < 16; i++ {
+               offset := rand.Intn(len(buf) - 1)
+               count := rand.Intn(len(buf) - offset)
+               if rand.Intn(2) == 0 {
+                       curPos, err = rdr.Seek(int64(offset)-curPos, io.SeekCurrent)
+               } else {
+                       curPos, err = rdr.Seek(int64(offset), io.SeekStart)
+               }
+               c.Check(curPos, check.Equals, int64(offset))
+               for count > 0 {
+                       n, err := rdr.Read(buf[offset : offset+count])
+                       c.Assert(err, check.IsNil)
+                       c.Assert(n > 0, check.Equals, true)
+                       offset += n
+                       count -= n
+               }
+               curPos, err = rdr.Seek(0, io.SeekCurrent)
+               c.Check(curPos, check.Equals, int64(offset))
+       }
+       c.Check(md5.Sum(buf), check.DeepEquals, md5.Sum(testdata))
+       c.Check(buf[:1000], check.DeepEquals, testdata[:1000])
+
+       expectPos := curPos + size + 12345
+       curPos, err = rdr.Seek(size+12345, io.SeekCurrent)
+       c.Check(err, check.IsNil)
+       c.Check(curPos, check.Equals, expectPos)
+
+       curPos, err = rdr.Seek(8-curPos, io.SeekCurrent)
+       c.Check(err, check.IsNil)
+       c.Check(curPos, check.Equals, int64(8))
+
+       curPos, err = rdr.Seek(-9, io.SeekCurrent)
+       c.Check(err, check.NotNil)
+       c.Check(curPos, check.Equals, int64(8))
+}
+
+func (s *CollectionReaderUnit) TestCollectionReaderCloseEarly(c *check.C) {
+       s.kc.BlockCache = &BlockCache{}
+       s.kc.PutB([]byte("foo"))
+       s.kc.PutB([]byte("bar"))
+       s.kc.PutB([]byte("baz"))
+
+       mt := ". "
+       for i := 0; i < 300; i++ {
+               mt += "acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3 "
+       }
+       mt += "0:2700:foo900.txt\n"
+
+       // Grab the stub server's lock, ensuring our cfReader doesn't
+       // get anything back from its first call to kc.Get() before we
+       // have a chance to call Close().
+       s.handler.lock <- struct{}{}
+       opsBeforeRead := *s.handler.ops
+
+       rdr, err := s.kc.CollectionFileReader(map[string]interface{}{"manifest_text": mt}, "foo900.txt")
+       c.Assert(err, check.IsNil)
+
+       firstReadDone := make(chan struct{})
+       go func() {
+               n, err := rdr.Read(make([]byte, 3))
+               c.Check(n, check.Equals, 3)
+               c.Check(err, check.IsNil)
+               close(firstReadDone)
+       }()
+
+       // Release the stub server's lock. The first GET operation will proceed.
+       <-s.handler.lock
+
+       // Make sure our first read operation consumes the data
+       // received from the first GET.
+       <-firstReadDone
+
+       err = rdr.Close()
+       c.Check(err, check.IsNil)
+
+       // Stub should have handled exactly one GET request.
+       c.Check(*s.handler.ops, check.Equals, opsBeforeRead+1)
+}
+
+func (s *CollectionReaderUnit) TestCollectionReaderDataError(c *check.C) {
+       manifest := ". ffffffffffffffffffffffffffffffff+1 0:1:notfound.txt\n"
+       buf := make([]byte, 1)
+       rdr, err := s.kc.CollectionFileReader(map[string]interface{}{"manifest_text": manifest}, "notfound.txt")
+       c.Check(err, check.IsNil)
+       for i := 0; i < 2; i++ {
+               _, err = io.ReadFull(rdr, buf)
+               c.Check(err, check.NotNil)
+               c.Check(err, check.Not(check.Equals), io.EOF)
+       }
+       c.Check(rdr.Close(), check.IsNil)
+}
diff --git a/sdk/go/keepclient/discover.go b/sdk/go/keepclient/discover.go
new file mode 100644 (file)
index 0000000..2392fcd
--- /dev/null
@@ -0,0 +1,224 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "encoding/json"
+       "fmt"
+       "log"
+       "os"
+       "os/signal"
+       "strings"
+       "sync"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+)
+
+// ClearCache clears the Keep service discovery cache.
+func RefreshServiceDiscovery() {
+       var wg sync.WaitGroup
+       defer wg.Wait()
+       svcListCacheMtx.Lock()
+       defer svcListCacheMtx.Unlock()
+       for _, ent := range svcListCache {
+               wg.Add(1)
+               go func() {
+                       ent.clear <- struct{}{}
+                       wg.Done()
+               }()
+       }
+}
+
+// ClearCacheOnSIGHUP installs a signal handler that calls
+// ClearCache when SIGHUP is received.
+func RefreshServiceDiscoveryOnSIGHUP() {
+       svcListCacheMtx.Lock()
+       defer svcListCacheMtx.Unlock()
+       if svcListCacheSignal != nil {
+               return
+       }
+       svcListCacheSignal = make(chan os.Signal, 1)
+       signal.Notify(svcListCacheSignal, syscall.SIGHUP)
+       go func() {
+               for range svcListCacheSignal {
+                       RefreshServiceDiscovery()
+               }
+       }()
+}
+
+var (
+       svcListCache       = map[string]cachedSvcList{}
+       svcListCacheSignal chan os.Signal
+       svcListCacheMtx    sync.Mutex
+)
+
+type cachedSvcList struct {
+       arv    *arvadosclient.ArvadosClient
+       latest chan svcList
+       clear  chan struct{}
+}
+
+// Check for new services list every few minutes. Send the latest list
+// to the "latest" channel as needed.
+func (ent *cachedSvcList) poll() {
+       wakeup := make(chan struct{})
+
+       replace := make(chan svcList)
+       go func() {
+               wakeup <- struct{}{}
+               current := <-replace
+               for {
+                       select {
+                       case <-ent.clear:
+                               wakeup <- struct{}{}
+                               // Wait here for the next success, in
+                               // order to avoid returning stale
+                               // results on the "latest" channel.
+                               current = <-replace
+                       case current = <-replace:
+                       case ent.latest <- current:
+                       }
+               }
+       }()
+
+       okDelay := 5 * time.Minute
+       errDelay := 3 * time.Second
+       timer := time.NewTimer(okDelay)
+       for {
+               select {
+               case <-timer.C:
+               case <-wakeup:
+                       if !timer.Stop() {
+                               // Lost race stopping timer; skip extra firing
+                               <-timer.C
+                       }
+               }
+               var next svcList
+               err := ent.arv.Call("GET", "keep_services", "", "accessible", nil, &next)
+               if err != nil {
+                       log.Printf("WARNING: Error retrieving services list: %v (retrying in %v)", err, errDelay)
+                       timer.Reset(errDelay)
+                       continue
+               }
+               replace <- next
+               timer.Reset(okDelay)
+       }
+}
+
+// discoverServices gets the list of available keep services from
+// the API server.
+//
+// If a list of services is provided in the arvadosclient (e.g., from
+// an environment variable or local config), that list is used
+// instead.
+//
+// If an API call is made, the result is cached for 5 minutes or until
+// ClearCache() is called, and during this interval it is reused by
+// other KeepClients that use the same API server host.
+func (kc *KeepClient) discoverServices() error {
+       if kc.disableDiscovery {
+               return nil
+       }
+
+       if kc.Arvados.KeepServiceURIs != nil {
+               kc.disableDiscovery = true
+               kc.foundNonDiskSvc = true
+               kc.replicasPerService = 0
+               roots := make(map[string]string)
+               for i, uri := range kc.Arvados.KeepServiceURIs {
+                       roots[fmt.Sprintf("00000-bi6l4-%015d", i)] = uri
+               }
+               kc.setServiceRoots(roots, roots, roots)
+               return nil
+       }
+
+       svcListCacheMtx.Lock()
+       cacheEnt, ok := svcListCache[kc.Arvados.ApiServer]
+       if !ok {
+               arv := *kc.Arvados
+               cacheEnt = cachedSvcList{
+                       latest: make(chan svcList),
+                       clear:  make(chan struct{}),
+                       arv:    &arv,
+               }
+               go cacheEnt.poll()
+               svcListCache[kc.Arvados.ApiServer] = cacheEnt
+       }
+       svcListCacheMtx.Unlock()
+
+       return kc.loadKeepServers(<-cacheEnt.latest)
+}
+
+func (kc *KeepClient) RefreshServiceDiscovery() {
+       svcListCacheMtx.Lock()
+       ent, ok := svcListCache[kc.Arvados.ApiServer]
+       svcListCacheMtx.Unlock()
+       if !ok || kc.Arvados.KeepServiceURIs != nil || kc.disableDiscovery {
+               return
+       }
+       ent.clear <- struct{}{}
+}
+
+// LoadKeepServicesFromJSON gets list of available keep services from
+// given JSON and disables automatic service discovery.
+func (kc *KeepClient) LoadKeepServicesFromJSON(services string) error {
+       kc.disableDiscovery = true
+
+       var list svcList
+       dec := json.NewDecoder(strings.NewReader(services))
+       if err := dec.Decode(&list); err != nil {
+               return err
+       }
+
+       return kc.loadKeepServers(list)
+}
+
+func (kc *KeepClient) loadKeepServers(list svcList) error {
+       listed := make(map[string]bool)
+       localRoots := make(map[string]string)
+       gatewayRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       // replicasPerService is 1 for disks; unknown or unlimited otherwise
+       kc.replicasPerService = 1
+
+       for _, service := range list.Items {
+               scheme := "http"
+               if service.SSL {
+                       scheme = "https"
+               }
+               url := fmt.Sprintf("%s://%s:%d", scheme, service.Hostname, service.Port)
+
+               // Skip duplicates
+               if listed[url] {
+                       continue
+               }
+               listed[url] = true
+
+               localRoots[service.Uuid] = url
+               if service.ReadOnly == false {
+                       writableLocalRoots[service.Uuid] = url
+                       if service.SvcType != "disk" {
+                               kc.replicasPerService = 0
+                       }
+               }
+
+               if service.SvcType != "disk" {
+                       kc.foundNonDiskSvc = true
+               }
+
+               // Gateway services are only used when specified by
+               // UUID, so there's nothing to gain by filtering them
+               // by service type. Including all accessible services
+               // (gateway and otherwise) merely accommodates more
+               // service configurations.
+               gatewayRoots[service.Uuid] = url
+       }
+
+       kc.setServiceRoots(localRoots, writableLocalRoots, gatewayRoots)
+       return nil
+}
diff --git a/sdk/go/keepclient/discover_test.go b/sdk/go/keepclient/discover_test.go
new file mode 100644 (file)
index 0000000..95a84c0
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "crypto/md5"
+       "fmt"
+       "net/http"
+       "os"
+
+       "gopkg.in/check.v1"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+)
+
+func (s *ServerRequiredSuite) TestOverrideDiscovery(c *check.C) {
+       defer os.Setenv("ARVADOS_KEEP_SERVICES", "")
+
+       data := []byte("TestOverrideDiscovery")
+       hash := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
+       st := StubGetHandler{
+               c,
+               hash,
+               arvadostest.ActiveToken,
+               http.StatusOK,
+               data}
+       ks := RunSomeFakeKeepServers(st, 2)
+
+       os.Setenv("ARVADOS_KEEP_SERVICES", "")
+       arv1, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       arv1.ApiToken = arvadostest.ActiveToken
+
+       os.Setenv("ARVADOS_KEEP_SERVICES", ks[0].url+"  "+ks[1].url+" ")
+       arv2, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       arv2.ApiToken = arvadostest.ActiveToken
+
+       // ARVADOS_KEEP_SERVICES was empty when we created arv1, but
+       // it pointed to our stub servers when we created
+       // arv2. Regardless of what it's set to now, a keepclient for
+       // arv2 should use our stub servers, but one created for arv1
+       // should not.
+
+       kc1, err := MakeKeepClient(arv1)
+       c.Assert(err, check.IsNil)
+       kc2, err := MakeKeepClient(arv2)
+       c.Assert(err, check.IsNil)
+
+       _, _, _, err = kc1.Get(hash)
+       c.Check(err, check.NotNil)
+       _, _, _, err = kc2.Get(hash)
+       c.Check(err, check.IsNil)
+}
diff --git a/sdk/go/keepclient/hashcheck.go b/sdk/go/keepclient/hashcheck.go
new file mode 100644 (file)
index 0000000..9295c14
--- /dev/null
@@ -0,0 +1,87 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "errors"
+       "fmt"
+       "hash"
+       "io"
+)
+
+var BadChecksum = errors.New("Reader failed checksum")
+
+// HashCheckingReader is an io.ReadCloser that checks the contents
+// read from the underlying io.Reader against the provided hash.
+type HashCheckingReader struct {
+       // The underlying data source
+       io.Reader
+
+       // The hash function to use
+       hash.Hash
+
+       // The hash value to check against.  Must be a hex-encoded lowercase string.
+       Check string
+}
+
+// Reads from the underlying reader, update the hashing function, and
+// pass the results through. Returns BadChecksum (instead of EOF) on
+// the last read if the checksum doesn't match.
+func (this HashCheckingReader) Read(p []byte) (n int, err error) {
+       n, err = this.Reader.Read(p)
+       if n > 0 {
+               this.Hash.Write(p[:n])
+       }
+       if err == io.EOF {
+               sum := this.Hash.Sum(nil)
+               if fmt.Sprintf("%x", sum) != this.Check {
+                       err = BadChecksum
+               }
+       }
+       return n, err
+}
+
+// WriteTo writes the entire contents of this.Reader to dest. Returns
+// BadChecksum if writing is successful but the checksum doesn't
+// match.
+func (this HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {
+       if writeto, ok := this.Reader.(io.WriterTo); ok {
+               written, err = writeto.WriteTo(io.MultiWriter(dest, this.Hash))
+       } else {
+               written, err = io.Copy(io.MultiWriter(dest, this.Hash), this.Reader)
+       }
+
+       if err != nil {
+               return written, err
+       }
+
+       sum := this.Hash.Sum(nil)
+       if fmt.Sprintf("%x", sum) != this.Check {
+               return written, BadChecksum
+       }
+
+       return written, nil
+}
+
+// Close reads all remaining data from the underlying Reader and
+// returns BadChecksum if the checksum doesn't match. It also closes
+// the underlying Reader if it implements io.ReadCloser.
+func (this HashCheckingReader) Close() (err error) {
+       _, err = io.Copy(this.Hash, this.Reader)
+
+       if closer, ok := this.Reader.(io.Closer); ok {
+               closeErr := closer.Close()
+               if err == nil {
+                       err = closeErr
+               }
+       }
+       if err != nil {
+               return err
+       }
+       if fmt.Sprintf("%x", this.Hash.Sum(nil)) != this.Check {
+               return BadChecksum
+       }
+       return nil
+}
diff --git a/sdk/go/keepclient/hashcheck_test.go b/sdk/go/keepclient/hashcheck_test.go
new file mode 100644 (file)
index 0000000..44345af
--- /dev/null
@@ -0,0 +1,103 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "bytes"
+       "crypto/md5"
+       "fmt"
+       "io"
+       "io/ioutil"
+
+       . "gopkg.in/check.v1"
+)
+
+type HashcheckSuiteSuite struct{}
+
+// Gocheck boilerplate
+var _ = Suite(&HashcheckSuiteSuite{})
+
+func (h *HashcheckSuiteSuite) TestRead(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       {
+               r, w := io.Pipe()
+               hcr := HashCheckingReader{r, md5.New(), hash}
+               go func() {
+                       w.Write([]byte("foo"))
+                       w.Close()
+               }()
+               p, err := ioutil.ReadAll(hcr)
+               c.Check(len(p), Equals, 3)
+               c.Check(err, Equals, nil)
+       }
+
+       {
+               r, w := io.Pipe()
+               hcr := HashCheckingReader{r, md5.New(), hash}
+               go func() {
+                       w.Write([]byte("bar"))
+                       w.Close()
+               }()
+               p, err := ioutil.ReadAll(hcr)
+               c.Check(len(p), Equals, 3)
+               c.Check(err, Equals, BadChecksum)
+       }
+}
+
+func (h *HashcheckSuiteSuite) TestWriteTo(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       {
+               bb := bytes.NewBufferString("foo")
+               hcr := HashCheckingReader{bb, md5.New(), hash}
+               r, w := io.Pipe()
+               done := make(chan bool)
+               go func() {
+                       p, err := ioutil.ReadAll(r)
+                       c.Check(len(p), Equals, 3)
+                       c.Check(err, Equals, nil)
+                       done <- true
+               }()
+
+               n, err := hcr.WriteTo(w)
+               w.Close()
+               c.Check(n, Equals, int64(3))
+               c.Check(err, Equals, nil)
+               <-done
+       }
+
+       {
+               bb := bytes.NewBufferString("bar")
+               hcr := HashCheckingReader{bb, md5.New(), hash}
+               r, w := io.Pipe()
+               done := make(chan bool)
+               go func() {
+                       p, err := ioutil.ReadAll(r)
+                       c.Check(len(p), Equals, 3)
+                       c.Check(err, Equals, nil)
+                       done <- true
+               }()
+
+               n, err := hcr.WriteTo(w)
+               w.Close()
+               c.Check(n, Equals, int64(3))
+               c.Check(err, Equals, BadChecksum)
+               <-done
+       }
+
+       // If WriteTo stops early due to a write error, return the
+       // write error (not "bad checksum").
+       {
+               input := bytes.NewBuffer(make([]byte, 1<<26))
+               hcr := HashCheckingReader{input, md5.New(), hash}
+               r, w := io.Pipe()
+               r.Close()
+               n, err := hcr.WriteTo(w)
+               c.Check(n, Equals, int64(0))
+               c.Check(err, NotNil)
+               c.Check(err, Not(Equals), BadChecksum)
+       }
+}
diff --git a/sdk/go/keepclient/keepclient.go b/sdk/go/keepclient/keepclient.go
new file mode 100644 (file)
index 0000000..ab610d6
--- /dev/null
@@ -0,0 +1,617 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+/* Provides low-level Get/Put primitives for accessing Arvados Keep blocks. */
+package keepclient
+
+import (
+       "bytes"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "regexp"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/asyncbuf"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+// A Keep "block" is 64MB.
+const BLOCKSIZE = 64 * 1024 * 1024
+
+var (
+       DefaultRequestTimeout      = 20 * time.Second
+       DefaultConnectTimeout      = 2 * time.Second
+       DefaultTLSHandshakeTimeout = 4 * time.Second
+       DefaultKeepAlive           = 180 * time.Second
+
+       DefaultProxyRequestTimeout      = 300 * time.Second
+       DefaultProxyConnectTimeout      = 30 * time.Second
+       DefaultProxyTLSHandshakeTimeout = 10 * time.Second
+       DefaultProxyKeepAlive           = 120 * time.Second
+)
+
+// Error interface with an error and boolean indicating whether the error is temporary
+type Error interface {
+       error
+       Temporary() bool
+}
+
+// multipleResponseError is of type Error
+type multipleResponseError struct {
+       error
+       isTemp bool
+}
+
+func (e *multipleResponseError) Temporary() bool {
+       return e.isTemp
+}
+
+// BlockNotFound is a multipleResponseError where isTemp is false
+var BlockNotFound = &ErrNotFound{multipleResponseError{
+       error:  errors.New("Block not found"),
+       isTemp: false,
+}}
+
+// ErrNotFound is a multipleResponseError where isTemp can be true or false
+type ErrNotFound struct {
+       multipleResponseError
+}
+
+type InsufficientReplicasError error
+
+type OversizeBlockError error
+
+var ErrOversizeBlock = OversizeBlockError(errors.New("Exceeded maximum block size (" + strconv.Itoa(BLOCKSIZE) + ")"))
+var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
+var MissingArvadosApiToken = errors.New("Missing required environment variable ARVADOS_API_TOKEN")
+var InvalidLocatorError = errors.New("Invalid locator")
+
+// ErrNoSuchKeepServer is returned when GetIndex is invoked with a UUID with no matching keep server
+var ErrNoSuchKeepServer = errors.New("No keep server matching the given UUID is found")
+
+// ErrIncompleteIndex is returned when the Index response does not end with a new empty line
+var ErrIncompleteIndex = errors.New("Got incomplete index")
+
+const X_Keep_Desired_Replicas = "X-Keep-Desired-Replicas"
+const X_Keep_Replicas_Stored = "X-Keep-Replicas-Stored"
+
+type HTTPClient interface {
+       Do(*http.Request) (*http.Response, error)
+}
+
+// Information about Arvados and Keep servers.
+type KeepClient struct {
+       Arvados            *arvadosclient.ArvadosClient
+       Want_replicas      int
+       localRoots         map[string]string
+       writableLocalRoots map[string]string
+       gatewayRoots       map[string]string
+       lock               sync.RWMutex
+       HTTPClient         HTTPClient
+       Retries            int
+       BlockCache         *BlockCache
+       RequestID          string
+       StorageClasses     []string
+
+       // set to 1 if all writable services are of disk type, otherwise 0
+       replicasPerService int
+
+       // Any non-disk typed services found in the list of keepservers?
+       foundNonDiskSvc bool
+
+       // Disable automatic discovery of keep services
+       disableDiscovery bool
+}
+
+// MakeKeepClient creates a new KeepClient, calls
+// DiscoverKeepServices(), and returns when the client is ready to
+// use.
+func MakeKeepClient(arv *arvadosclient.ArvadosClient) (*KeepClient, error) {
+       kc := New(arv)
+       return kc, kc.discoverServices()
+}
+
+// New creates a new KeepClient. Service discovery will occur on the
+// next read/write operation.
+func New(arv *arvadosclient.ArvadosClient) *KeepClient {
+       defaultReplicationLevel := 2
+       value, err := arv.Discovery("defaultCollectionReplication")
+       if err == nil {
+               v, ok := value.(float64)
+               if ok && v > 0 {
+                       defaultReplicationLevel = int(v)
+               }
+       }
+       return &KeepClient{
+               Arvados:       arv,
+               Want_replicas: defaultReplicationLevel,
+               Retries:       2,
+       }
+}
+
+// Put a block given the block hash, a reader, and the number of bytes
+// to read from the reader (which must be between 0 and BLOCKSIZE).
+//
+// Returns the locator for the written block, the number of replicas
+// written, and an error.
+//
+// Returns an InsufficientReplicasError if 0 <= replicas <
+// kc.Wants_replicas.
+func (kc *KeepClient) PutHR(hash string, r io.Reader, dataBytes int64) (string, int, error) {
+       // Buffer for reads from 'r'
+       var bufsize int
+       if dataBytes > 0 {
+               if dataBytes > BLOCKSIZE {
+                       return "", 0, ErrOversizeBlock
+               }
+               bufsize = int(dataBytes)
+       } else {
+               bufsize = BLOCKSIZE
+       }
+
+       buf := asyncbuf.NewBuffer(make([]byte, 0, bufsize))
+       go func() {
+               _, err := io.Copy(buf, HashCheckingReader{r, md5.New(), hash})
+               buf.CloseWithError(err)
+       }()
+       return kc.putReplicas(hash, buf.NewReader, dataBytes)
+}
+
+// PutHB writes a block to Keep. The hash of the bytes is given in
+// hash, and the data is given in buf.
+//
+// Return values are the same as for PutHR.
+func (kc *KeepClient) PutHB(hash string, buf []byte) (string, int, error) {
+       newReader := func() io.Reader { return bytes.NewBuffer(buf) }
+       return kc.putReplicas(hash, newReader, int64(len(buf)))
+}
+
+// PutB writes a block to Keep. It computes the hash itself.
+//
+// Return values are the same as for PutHR.
+func (kc *KeepClient) PutB(buffer []byte) (string, int, error) {
+       hash := fmt.Sprintf("%x", md5.Sum(buffer))
+       return kc.PutHB(hash, buffer)
+}
+
+// PutR writes a block to Keep. It first reads all data from r into a buffer
+// in order to compute the hash.
+//
+// Return values are the same as for PutHR.
+//
+// If the block hash and data size are known, PutHR is more efficient.
+func (kc *KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {
+       if buffer, err := ioutil.ReadAll(r); err != nil {
+               return "", 0, err
+       } else {
+               return kc.PutB(buffer)
+       }
+}
+
+func (kc *KeepClient) getOrHead(method string, locator string, header http.Header) (io.ReadCloser, int64, string, http.Header, error) {
+       if strings.HasPrefix(locator, "d41d8cd98f00b204e9800998ecf8427e+0") {
+               return ioutil.NopCloser(bytes.NewReader(nil)), 0, "", nil, nil
+       }
+
+       reqid := kc.getRequestID()
+
+       var expectLength int64
+       if parts := strings.SplitN(locator, "+", 3); len(parts) < 2 {
+               expectLength = -1
+       } else if n, err := strconv.ParseInt(parts[1], 10, 64); err != nil {
+               expectLength = -1
+       } else {
+               expectLength = n
+       }
+
+       var errs []string
+
+       tries_remaining := 1 + kc.Retries
+
+       serversToTry := kc.getSortedRoots(locator)
+
+       numServers := len(serversToTry)
+       count404 := 0
+
+       var retryList []string
+
+       for tries_remaining > 0 {
+               tries_remaining -= 1
+               retryList = nil
+
+               for _, host := range serversToTry {
+                       url := host + "/" + locator
+
+                       req, err := http.NewRequest(method, url, nil)
+                       if err != nil {
+                               errs = append(errs, fmt.Sprintf("%s: %v", url, err))
+                               continue
+                       }
+                       for k, v := range header {
+                               req.Header[k] = append([]string(nil), v...)
+                       }
+                       if req.Header.Get("Authorization") == "" {
+                               req.Header.Set("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+                       }
+                       if req.Header.Get("X-Request-Id") == "" {
+                               req.Header.Set("X-Request-Id", reqid)
+                       }
+                       resp, err := kc.httpClient().Do(req)
+                       if err != nil {
+                               // Probably a network error, may be transient,
+                               // can try again.
+                               errs = append(errs, fmt.Sprintf("%s: %v", url, err))
+                               retryList = append(retryList, host)
+                               continue
+                       }
+                       if resp.StatusCode != http.StatusOK {
+                               var respbody []byte
+                               respbody, _ = ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
+                               resp.Body.Close()
+                               errs = append(errs, fmt.Sprintf("%s: HTTP %d %q",
+                                       url, resp.StatusCode, bytes.TrimSpace(respbody)))
+
+                               if resp.StatusCode == 408 ||
+                                       resp.StatusCode == 429 ||
+                                       resp.StatusCode >= 500 {
+                                       // Timeout, too many requests, or other
+                                       // server side failure, transient
+                                       // error, can try again.
+                                       retryList = append(retryList, host)
+                               } else if resp.StatusCode == 404 {
+                                       count404++
+                               }
+                               continue
+                       }
+                       if expectLength < 0 {
+                               if resp.ContentLength < 0 {
+                                       resp.Body.Close()
+                                       return nil, 0, "", nil, fmt.Errorf("error reading %q: no size hint, no Content-Length header in response", locator)
+                               }
+                               expectLength = resp.ContentLength
+                       } else if resp.ContentLength >= 0 && expectLength != resp.ContentLength {
+                               resp.Body.Close()
+                               return nil, 0, "", nil, fmt.Errorf("error reading %q: size hint %d != Content-Length %d", locator, expectLength, resp.ContentLength)
+                       }
+                       // Success
+                       if method == "GET" {
+                               return HashCheckingReader{
+                                       Reader: resp.Body,
+                                       Hash:   md5.New(),
+                                       Check:  locator[0:32],
+                               }, expectLength, url, resp.Header, nil
+                       } else {
+                               resp.Body.Close()
+                               return nil, expectLength, url, resp.Header, nil
+                       }
+               }
+               serversToTry = retryList
+       }
+       DebugPrintf("DEBUG: %s %s failed: %v", method, locator, errs)
+
+       var err error
+       if count404 == numServers {
+               err = BlockNotFound
+       } else {
+               err = &ErrNotFound{multipleResponseError{
+                       error:  fmt.Errorf("%s %s failed: %v", method, locator, errs),
+                       isTemp: len(serversToTry) > 0,
+               }}
+       }
+       return nil, 0, "", nil, err
+}
+
+// LocalLocator returns a locator equivalent to the one supplied, but
+// with a valid signature from the local cluster. If the given locator
+// already has a local signature, it is returned unchanged.
+func (kc *KeepClient) LocalLocator(locator string) (string, error) {
+       if !strings.Contains(locator, "+R") {
+               // Either it has +A, or it's unsigned and we assume
+               // it's a local locator on a site with signatures
+               // disabled.
+               return locator, nil
+       }
+       sighdr := fmt.Sprintf("local, time=%s", time.Now().UTC().Format(time.RFC3339))
+       _, _, url, hdr, err := kc.getOrHead("HEAD", locator, http.Header{"X-Keep-Signature": []string{sighdr}})
+       if err != nil {
+               return "", err
+       }
+       loc := hdr.Get("X-Keep-Locator")
+       if loc == "" {
+               return "", fmt.Errorf("missing X-Keep-Locator header in HEAD response from %s", url)
+       }
+       return loc, nil
+}
+
+// Get() retrieves a block, given a locator. Returns a reader, the
+// expected data length, the URL the block is being fetched from, and
+// an error.
+//
+// If the block checksum does not match, the final Read() on the
+// reader returned by this method will return a BadChecksum error
+// instead of EOF.
+func (kc *KeepClient) Get(locator string) (io.ReadCloser, int64, string, error) {
+       rdr, size, url, _, err := kc.getOrHead("GET", locator, nil)
+       return rdr, size, url, err
+}
+
+// ReadAt() retrieves a portion of block from the cache if it's
+// present, otherwise from the network.
+func (kc *KeepClient) ReadAt(locator string, p []byte, off int) (int, error) {
+       return kc.cache().ReadAt(kc, locator, p, off)
+}
+
+// Ask() verifies that a block with the given hash is available and
+// readable, according to at least one Keep service. Unlike Get, it
+// does not retrieve the data or verify that the data content matches
+// the hash specified by the locator.
+//
+// Returns the data size (content length) reported by the Keep service
+// and the URI reporting the data size.
+func (kc *KeepClient) Ask(locator string) (int64, string, error) {
+       _, size, url, _, err := kc.getOrHead("HEAD", locator, nil)
+       return size, url, err
+}
+
+// GetIndex retrieves a list of blocks stored on the given server whose hashes
+// begin with the given prefix. The returned reader will return an error (other
+// than EOF) if the complete index cannot be retrieved.
+//
+// This is meant to be used only by system components and admin tools.
+// It will return an error unless the client is using a "data manager token"
+// recognized by the Keep services.
+func (kc *KeepClient) GetIndex(keepServiceUUID, prefix string) (io.Reader, error) {
+       url := kc.LocalRoots()[keepServiceUUID]
+       if url == "" {
+               return nil, ErrNoSuchKeepServer
+       }
+
+       url += "/index"
+       if prefix != "" {
+               url += "/" + prefix
+       }
+
+       req, err := http.NewRequest("GET", url, nil)
+       if err != nil {
+               return nil, err
+       }
+
+       req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
+       req.Header.Set("X-Request-Id", kc.getRequestID())
+       resp, err := kc.httpClient().Do(req)
+       if err != nil {
+               return nil, err
+       }
+
+       defer resp.Body.Close()
+
+       if resp.StatusCode != http.StatusOK {
+               return nil, fmt.Errorf("Got http status code: %d", resp.StatusCode)
+       }
+
+       var respBody []byte
+       respBody, err = ioutil.ReadAll(resp.Body)
+       if err != nil {
+               return nil, err
+       }
+
+       // Got index; verify that it is complete
+       // The response should be "\n" if no locators matched the prefix
+       // Else, it should be a list of locators followed by a blank line
+       if !bytes.Equal(respBody, []byte("\n")) && !bytes.HasSuffix(respBody, []byte("\n\n")) {
+               return nil, ErrIncompleteIndex
+       }
+
+       // Got complete index; strip the trailing newline and send
+       return bytes.NewReader(respBody[0 : len(respBody)-1]), nil
+}
+
+// LocalRoots() returns the map of local (i.e., disk and proxy) Keep
+// services: uuid -> baseURI.
+func (kc *KeepClient) LocalRoots() map[string]string {
+       kc.discoverServices()
+       kc.lock.RLock()
+       defer kc.lock.RUnlock()
+       return kc.localRoots
+}
+
+// GatewayRoots() returns the map of Keep remote gateway services:
+// uuid -> baseURI.
+func (kc *KeepClient) GatewayRoots() map[string]string {
+       kc.discoverServices()
+       kc.lock.RLock()
+       defer kc.lock.RUnlock()
+       return kc.gatewayRoots
+}
+
+// WritableLocalRoots() returns the map of writable local Keep services:
+// uuid -> baseURI.
+func (kc *KeepClient) WritableLocalRoots() map[string]string {
+       kc.discoverServices()
+       kc.lock.RLock()
+       defer kc.lock.RUnlock()
+       return kc.writableLocalRoots
+}
+
+// SetServiceRoots disables service discovery and updates the
+// localRoots and gatewayRoots maps, without disrupting operations
+// that are already in progress.
+//
+// The supplied maps must not be modified after calling
+// SetServiceRoots.
+func (kc *KeepClient) SetServiceRoots(locals, writables, gateways map[string]string) {
+       kc.disableDiscovery = true
+       kc.setServiceRoots(locals, writables, gateways)
+}
+
+func (kc *KeepClient) setServiceRoots(locals, writables, gateways map[string]string) {
+       kc.lock.Lock()
+       defer kc.lock.Unlock()
+       kc.localRoots = locals
+       kc.writableLocalRoots = writables
+       kc.gatewayRoots = gateways
+}
+
+// getSortedRoots returns a list of base URIs of Keep services, in the
+// order they should be attempted in order to retrieve content for the
+// given locator.
+func (kc *KeepClient) getSortedRoots(locator string) []string {
+       var found []string
+       for _, hint := range strings.Split(locator, "+") {
+               if len(hint) < 7 || hint[0:2] != "K@" {
+                       // Not a service hint.
+                       continue
+               }
+               if len(hint) == 7 {
+                       // +K@abcde means fetch from proxy at
+                       // keep.abcde.arvadosapi.com
+                       found = append(found, "https://keep."+hint[2:]+".arvadosapi.com")
+               } else if len(hint) == 29 {
+                       // +K@abcde-abcde-abcdeabcdeabcde means fetch
+                       // from gateway with given uuid
+                       if gwURI, ok := kc.GatewayRoots()[hint[2:]]; ok {
+                               found = append(found, gwURI)
+                       }
+                       // else this hint is no use to us; carry on.
+               }
+       }
+       // After trying all usable service hints, fall back to local roots.
+       found = append(found, NewRootSorter(kc.LocalRoots(), locator[0:32]).GetSortedRoots()...)
+       return found
+}
+
+func (kc *KeepClient) cache() *BlockCache {
+       if kc.BlockCache != nil {
+               return kc.BlockCache
+       } else {
+               return DefaultBlockCache
+       }
+}
+
+func (kc *KeepClient) ClearBlockCache() {
+       kc.cache().Clear()
+}
+
+var (
+       // There are four global http.Client objects for the four
+       // possible permutations of TLS behavior (verify/skip-verify)
+       // and timeout settings (proxy/non-proxy).
+       defaultClient = map[bool]map[bool]HTTPClient{
+               // defaultClient[false] is used for verified TLS reqs
+               false: {},
+               // defaultClient[true] is used for unverified
+               // (insecure) TLS reqs
+               true: {},
+       }
+       defaultClientMtx sync.Mutex
+)
+
+// httpClient returns the HTTPClient field if it's not nil, otherwise
+// whichever of the four global http.Client objects is suitable for
+// the current environment (i.e., TLS verification on/off, keep
+// services are/aren't proxies).
+func (kc *KeepClient) httpClient() HTTPClient {
+       if kc.HTTPClient != nil {
+               return kc.HTTPClient
+       }
+       defaultClientMtx.Lock()
+       defer defaultClientMtx.Unlock()
+       if c, ok := defaultClient[kc.Arvados.ApiInsecure][kc.foundNonDiskSvc]; ok {
+               return c
+       }
+
+       var requestTimeout, connectTimeout, keepAlive, tlsTimeout time.Duration
+       if kc.foundNonDiskSvc {
+               // Use longer timeouts when connecting to a proxy,
+               // because this usually means the intervening network
+               // is slower.
+               requestTimeout = DefaultProxyRequestTimeout
+               connectTimeout = DefaultProxyConnectTimeout
+               tlsTimeout = DefaultProxyTLSHandshakeTimeout
+               keepAlive = DefaultProxyKeepAlive
+       } else {
+               requestTimeout = DefaultRequestTimeout
+               connectTimeout = DefaultConnectTimeout
+               tlsTimeout = DefaultTLSHandshakeTimeout
+               keepAlive = DefaultKeepAlive
+       }
+
+       c := &http.Client{
+               Timeout: requestTimeout,
+               // It's not safe to copy *http.DefaultTransport
+               // because it has a mutex (which might be locked)
+               // protecting a private map (which might not be nil).
+               // So we build our own, using the Go 1.10 default
+               // values, ignoring any changes the application has
+               // made to http.DefaultTransport.
+               Transport: &http.Transport{
+                       DialContext: (&net.Dialer{
+                               Timeout:   connectTimeout,
+                               KeepAlive: keepAlive,
+                               DualStack: true,
+                       }).DialContext,
+                       MaxIdleConns:          100,
+                       IdleConnTimeout:       90 * time.Second,
+                       TLSHandshakeTimeout:   tlsTimeout,
+                       ExpectContinueTimeout: time.Second,
+                       TLSClientConfig:       arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure),
+               },
+       }
+       defaultClient[kc.Arvados.ApiInsecure][kc.foundNonDiskSvc] = c
+       return c
+}
+
+var reqIDGen = httpserver.IDGenerator{Prefix: "req-"}
+
+func (kc *KeepClient) getRequestID() string {
+       if kc.RequestID != "" {
+               return kc.RequestID
+       } else {
+               return reqIDGen.Next()
+       }
+}
+
+type Locator struct {
+       Hash  string
+       Size  int      // -1 if data size is not known
+       Hints []string // Including the size hint, if any
+}
+
+func (loc *Locator) String() string {
+       s := loc.Hash
+       if len(loc.Hints) > 0 {
+               s = s + "+" + strings.Join(loc.Hints, "+")
+       }
+       return s
+}
+
+var locatorMatcher = regexp.MustCompile("^([0-9a-f]{32})([+](.*))?$")
+
+func MakeLocator(path string) (*Locator, error) {
+       sm := locatorMatcher.FindStringSubmatch(path)
+       if sm == nil {
+               return nil, InvalidLocatorError
+       }
+       loc := Locator{Hash: sm[1], Size: -1}
+       if sm[2] != "" {
+               loc.Hints = strings.Split(sm[3], "+")
+       } else {
+               loc.Hints = []string{}
+       }
+       if len(loc.Hints) > 0 {
+               if size, err := strconv.Atoi(loc.Hints[0]); err == nil {
+                       loc.Size = size
+               }
+       }
+       return &loc, nil
+}
diff --git a/sdk/go/keepclient/keepclient_test.go b/sdk/go/keepclient/keepclient_test.go
new file mode 100644 (file)
index 0000000..176ad65
--- /dev/null
@@ -0,0 +1,1296 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "bytes"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&StandaloneSuite{})
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+// Standalone tests
+type StandaloneSuite struct{}
+
+func (s *StandaloneSuite) SetUpTest(c *C) {
+       RefreshServiceDiscovery()
+}
+
+func pythonDir() string {
+       cwd, _ := os.Getwd()
+       return fmt.Sprintf("%s/../../python/tests", cwd)
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(2, false)
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       arvadostest.StopKeep(2)
+       arvadostest.StopAPI()
+}
+
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       RefreshServiceDiscovery()
+}
+
+func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+
+       kc, err := MakeKeepClient(arv)
+
+       c.Assert(err, Equals, nil)
+       c.Check(len(kc.LocalRoots()), Equals, 2)
+       for _, root := range kc.LocalRoots() {
+               c.Check(root, Matches, "http://localhost:\\d+")
+       }
+}
+
+func (s *ServerRequiredSuite) TestDefaultReplications(c *C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+
+       kc, err := MakeKeepClient(arv)
+       c.Assert(kc.Want_replicas, Equals, 2)
+
+       arv.DiscoveryDoc["defaultCollectionReplication"] = 3.0
+       kc, err = MakeKeepClient(arv)
+       c.Assert(kc.Want_replicas, Equals, 3)
+
+       arv.DiscoveryDoc["defaultCollectionReplication"] = 1.0
+       kc, err = MakeKeepClient(arv)
+       c.Check(err, IsNil)
+       c.Assert(kc.Want_replicas, Equals, 1)
+}
+
+type StubPutHandler struct {
+       c                  *C
+       expectPath         string
+       expectApiToken     string
+       expectBody         string
+       expectStorageClass string
+       handled            chan string
+}
+
+func (sph StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       sph.c.Check(req.URL.Path, Equals, "/"+sph.expectPath)
+       sph.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sph.expectApiToken))
+       sph.c.Check(req.Header.Get("X-Keep-Storage-Classes"), Equals, sph.expectStorageClass)
+       body, err := ioutil.ReadAll(req.Body)
+       sph.c.Check(err, Equals, nil)
+       sph.c.Check(body, DeepEquals, []byte(sph.expectBody))
+       resp.WriteHeader(200)
+       sph.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func RunFakeKeepServer(st http.Handler) (ks KeepServer) {
+       var err error
+       // If we don't explicitly bind it to localhost, ks.listener.Addr() will
+       // bind to 0.0.0.0 or [::] which is not a valid address for Dial()
+       ks.listener, err = net.ListenTCP("tcp", &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0})
+       if err != nil {
+               panic(fmt.Sprintf("Could not listen on any port"))
+       }
+       ks.url = fmt.Sprintf("http://%s", ks.listener.Addr().String())
+       go http.Serve(ks.listener, st)
+       return
+}
+
+func UploadToStubHelper(c *C, st http.Handler, f func(*KeepClient, string,
+       io.ReadCloser, io.WriteCloser, chan uploadStatus)) {
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       arv.ApiToken = "abc123"
+
+       kc, _ := MakeKeepClient(arv)
+
+       reader, writer := io.Pipe()
+       upload_status := make(chan uploadStatus)
+
+       f(kc, ks.url, reader, writer, upload_status)
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
+       log.Printf("TestUploadToStubKeepServer")
+
+       st := StubPutHandler{
+               c,
+               "acbd18db4cc2f85cedef654fccc4a4d8",
+               "abc123",
+               "foo",
+               "hot",
+               make(chan string)}
+
+       UploadToStubHelper(c, st,
+               func(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, upload_status chan uploadStatus) {
+                       kc.StorageClasses = []string{"hot"}
+                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), kc.getRequestID())
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+                       status := <-upload_status
+                       c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
+               })
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
+       st := StubPutHandler{
+               c,
+               "acbd18db4cc2f85cedef654fccc4a4d8",
+               "abc123",
+               "foo",
+               "",
+               make(chan string)}
+
+       UploadToStubHelper(c, st,
+               func(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, upload_status chan uploadStatus) {
+                       go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), upload_status, 3, kc.getRequestID())
+
+                       <-st.handled
+
+                       status := <-upload_status
+                       c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
+               })
+}
+
+type FailHandler struct {
+       handled chan string
+}
+
+func (fh FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.WriteHeader(500)
+       fh.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+type FailThenSucceedHandler struct {
+       handled        chan string
+       count          int
+       successhandler http.Handler
+       reqIDs         []string
+}
+
+func (fh *FailThenSucceedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       fh.reqIDs = append(fh.reqIDs, req.Header.Get("X-Request-Id"))
+       if fh.count == 0 {
+               resp.WriteHeader(500)
+               fh.count += 1
+               fh.handled <- fmt.Sprintf("http://%s", req.Host)
+       } else {
+               fh.successhandler.ServeHTTP(resp, req)
+       }
+}
+
+type Error404Handler struct {
+       handled chan string
+}
+
+func (fh Error404Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.WriteHeader(404)
+       fh.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
+       st := FailHandler{
+               make(chan string)}
+
+       hash := "acbd18db4cc2f85cedef654fccc4a4d8"
+
+       UploadToStubHelper(c, st,
+               func(kc *KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan uploadStatus) {
+
+                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, kc.getRequestID())
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+
+                       status := <-upload_status
+                       c.Check(status.url, Equals, fmt.Sprintf("%s/%s", url, hash))
+                       c.Check(status.statusCode, Equals, 500)
+               })
+}
+
+type KeepServer struct {
+       listener net.Listener
+       url      string
+}
+
+func RunSomeFakeKeepServers(st http.Handler, n int) (ks []KeepServer) {
+       ks = make([]KeepServer, n)
+
+       for i := 0; i < n; i += 1 {
+               ks[i] = RunFakeKeepServer(st)
+       }
+
+       return ks
+}
+
+func (s *StandaloneSuite) TestPutB(c *C) {
+       hash := Md5String("foo")
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               "",
+               make(chan string, 5)}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 5)
+
+       for i, k := range ks {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       kc.PutB([]byte("foo"))
+
+       shuff := NewRootSorter(
+               kc.LocalRoots(), Md5String("foo")).GetSortedRoots()
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+       c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+               (s1 == shuff[1] && s2 == shuff[0]),
+               Equals,
+               true)
+}
+
+func (s *StandaloneSuite) TestPutHR(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               "",
+               make(chan string, 5)}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 5)
+
+       for i, k := range ks {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       reader, writer := io.Pipe()
+
+       go func() {
+               writer.Write([]byte("foo"))
+               writer.Close()
+       }()
+
+       kc.PutHR(hash, reader, 3)
+
+       shuff := NewRootSorter(kc.LocalRoots(), hash).GetSortedRoots()
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+
+       c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+               (s1 == shuff[1] && s2 == shuff[0]),
+               Equals,
+               true)
+}
+
+func (s *StandaloneSuite) TestPutWithFail(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               "",
+               make(chan string, 4)}
+
+       fh := FailHandler{
+               make(chan string, 1)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 4)
+       ks2 := RunSomeFakeKeepServers(fh, 1)
+
+       for i, k := range ks1 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       shuff := NewRootSorter(
+               kc.LocalRoots(), Md5String("foo")).GetSortedRoots()
+       c.Logf("%+v", shuff)
+
+       phash, replicas, err := kc.PutB([]byte("foo"))
+
+       <-fh.handled
+
+       c.Check(err, Equals, nil)
+       c.Check(phash, Equals, "")
+       c.Check(replicas, Equals, 2)
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+
+       c.Check((s1 == shuff[1] && s2 == shuff[2]) ||
+               (s1 == shuff[2] && s2 == shuff[1]),
+               Equals,
+               true)
+}
+
+func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               "",
+               make(chan string, 1)}
+
+       fh := FailHandler{
+               make(chan string, 4)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       kc.Retries = 0
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+       ks2 := RunSomeFakeKeepServers(fh, 4)
+
+       for i, k := range ks1 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+
+       c.Check(err, FitsTypeOf, InsufficientReplicasError(errors.New("")))
+       c.Check(replicas, Equals, 1)
+       c.Check(<-st.handled, Equals, ks1[0].url)
+}
+
+type StubGetHandler struct {
+       c              *C
+       expectPath     string
+       expectApiToken string
+       httpStatus     int
+       body           []byte
+}
+
+func (sgh StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       sgh.c.Check(req.URL.Path, Equals, "/"+sgh.expectPath)
+       sgh.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", sgh.expectApiToken))
+       resp.WriteHeader(sgh.httpStatus)
+       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(sgh.body)))
+       resp.Write(sgh.body)
+}
+
+func (s *StandaloneSuite) TestGet(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetHandler{
+               c,
+               hash,
+               "abc123",
+               http.StatusOK,
+               []byte("foo")}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, n, url2, err := kc.Get(hash)
+       defer r.Close()
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks.url, hash))
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
+
+func (s *StandaloneSuite) TestGet404(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := Error404Handler{make(chan string, 1)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, n, url2, err := kc.Get(hash)
+       c.Check(err, Equals, BlockNotFound)
+       c.Check(n, Equals, int64(0))
+       c.Check(url2, Equals, "")
+       c.Check(r, Equals, nil)
+}
+
+func (s *StandaloneSuite) TestGetEmptyBlock(c *C) {
+       st := Error404Handler{make(chan string, 1)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, n, url2, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e+0")
+       c.Check(err, IsNil)
+       c.Check(n, Equals, int64(0))
+       c.Check(url2, Equals, "")
+       c.Assert(r, NotNil)
+       buf, err := ioutil.ReadAll(r)
+       c.Check(err, IsNil)
+       c.Check(buf, DeepEquals, []byte{})
+}
+
+func (s *StandaloneSuite) TestGetFail(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := FailHandler{make(chan string, 1)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+       kc.Retries = 0
+
+       r, n, url2, err := kc.Get(hash)
+       errNotFound, _ := err.(*ErrNotFound)
+       c.Check(errNotFound, NotNil)
+       c.Check(strings.Contains(errNotFound.Error(), "HTTP 500"), Equals, true)
+       c.Check(errNotFound.Temporary(), Equals, true)
+       c.Check(n, Equals, int64(0))
+       c.Check(url2, Equals, "")
+       c.Check(r, Equals, nil)
+}
+
+func (s *StandaloneSuite) TestGetFailRetry(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := &FailThenSucceedHandler{
+               handled: make(chan string, 1),
+               successhandler: StubGetHandler{
+                       c,
+                       hash,
+                       "abc123",
+                       http.StatusOK,
+                       []byte("foo")}}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, n, url2, err := kc.Get(hash)
+       defer r.Close()
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks.url, hash))
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+
+       c.Logf("%q", st.reqIDs)
+       c.Assert(len(st.reqIDs) > 1, Equals, true)
+       for _, reqid := range st.reqIDs {
+               c.Check(reqid, Not(Equals), "")
+               c.Check(reqid, Equals, st.reqIDs[0])
+       }
+}
+
+func (s *StandaloneSuite) TestGetNetError(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": "http://localhost:62222"}, nil, nil)
+
+       r, n, url2, err := kc.Get(hash)
+       errNotFound, _ := err.(*ErrNotFound)
+       c.Check(errNotFound, NotNil)
+       c.Check(strings.Contains(errNotFound.Error(), "connection refused"), Equals, true)
+       c.Check(errNotFound.Temporary(), Equals, true)
+       c.Check(n, Equals, int64(0))
+       c.Check(url2, Equals, "")
+       c.Check(r, Equals, nil)
+}
+
+func (s *StandaloneSuite) TestGetWithServiceHint(c *C) {
+       uuid := "zzzzz-bi6l4-123451234512345"
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       // This one shouldn't be used:
+       ks0 := RunFakeKeepServer(StubGetHandler{
+               c,
+               "error if used",
+               "abc123",
+               http.StatusOK,
+               []byte("foo")})
+       defer ks0.listener.Close()
+       // This one should be used:
+       ks := RunFakeKeepServer(StubGetHandler{
+               c,
+               hash + "+K@" + uuid,
+               "abc123",
+               http.StatusOK,
+               []byte("foo")})
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(
+               map[string]string{"x": ks0.url},
+               nil,
+               map[string]string{uuid: ks.url})
+
+       r, n, uri, err := kc.Get(hash + "+K@" + uuid)
+       defer r.Close()
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(uri, Equals, fmt.Sprintf("%s/%s", ks.url, hash+"+K@"+uuid))
+
+       content, err := ioutil.ReadAll(r)
+       c.Check(err, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
+
+// Use a service hint to fetch from a local disk service, overriding
+// rendezvous probe order.
+func (s *StandaloneSuite) TestGetWithLocalServiceHint(c *C) {
+       uuid := "zzzzz-bi6l4-zzzzzzzzzzzzzzz"
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       // This one shouldn't be used, although it appears first in
+       // rendezvous probe order:
+       ks0 := RunFakeKeepServer(StubGetHandler{
+               c,
+               "error if used",
+               "abc123",
+               http.StatusOK,
+               []byte("foo")})
+       defer ks0.listener.Close()
+       // This one should be used:
+       ks := RunFakeKeepServer(StubGetHandler{
+               c,
+               hash + "+K@" + uuid,
+               "abc123",
+               http.StatusOK,
+               []byte("foo")})
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(
+               map[string]string{
+                       "zzzzz-bi6l4-yyyyyyyyyyyyyyy": ks0.url,
+                       "zzzzz-bi6l4-xxxxxxxxxxxxxxx": ks0.url,
+                       "zzzzz-bi6l4-wwwwwwwwwwwwwww": ks0.url,
+                       uuid: ks.url},
+               nil,
+               map[string]string{
+                       "zzzzz-bi6l4-yyyyyyyyyyyyyyy": ks0.url,
+                       "zzzzz-bi6l4-xxxxxxxxxxxxxxx": ks0.url,
+                       "zzzzz-bi6l4-wwwwwwwwwwwwwww": ks0.url,
+                       uuid: ks.url},
+       )
+
+       r, n, uri, err := kc.Get(hash + "+K@" + uuid)
+       defer r.Close()
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(uri, Equals, fmt.Sprintf("%s/%s", ks.url, hash+"+K@"+uuid))
+
+       content, err := ioutil.ReadAll(r)
+       c.Check(err, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
+
+func (s *StandaloneSuite) TestGetWithServiceHintFailoverToLocals(c *C) {
+       uuid := "zzzzz-bi6l4-123451234512345"
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       ksLocal := RunFakeKeepServer(StubGetHandler{
+               c,
+               hash + "+K@" + uuid,
+               "abc123",
+               http.StatusOK,
+               []byte("foo")})
+       defer ksLocal.listener.Close()
+       ksGateway := RunFakeKeepServer(StubGetHandler{
+               c,
+               hash + "+K@" + uuid,
+               "abc123",
+               http.StatusInternalServerError,
+               []byte("Error")})
+       defer ksGateway.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(
+               map[string]string{"zzzzz-bi6l4-keepdisk0000000": ksLocal.url},
+               nil,
+               map[string]string{uuid: ksGateway.url})
+
+       r, n, uri, err := kc.Get(hash + "+K@" + uuid)
+       c.Assert(err, Equals, nil)
+       defer r.Close()
+       c.Check(n, Equals, int64(3))
+       c.Check(uri, Equals, fmt.Sprintf("%s/%s", ksLocal.url, hash+"+K@"+uuid))
+
+       content, err := ioutil.ReadAll(r)
+       c.Check(err, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+}
+
+type BarHandler struct {
+       handled chan string
+}
+
+func (this BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.Write([]byte("bar"))
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestChecksum(c *C) {
+       foohash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       barhash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
+
+       st := BarHandler{make(chan string, 1)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, n, _, err := kc.Get(barhash)
+       _, err = ioutil.ReadAll(r)
+       c.Check(n, Equals, int64(3))
+       c.Check(err, Equals, nil)
+
+       <-st.handled
+
+       r, n, _, err = kc.Get(foohash)
+       _, err = ioutil.ReadAll(r)
+       c.Check(n, Equals, int64(3))
+       c.Check(err, Equals, BadChecksum)
+
+       <-st.handled
+}
+
+func (s *StandaloneSuite) TestGetWithFailures(c *C) {
+       content := []byte("waz")
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       fh := Error404Handler{
+               make(chan string, 4)}
+
+       st := StubGetHandler{
+               c,
+               hash,
+               "abc123",
+               http.StatusOK,
+               content}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+       ks2 := RunSomeFakeKeepServers(fh, 4)
+
+       for i, k := range ks1 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+       kc.Retries = 0
+
+       // This test works only if one of the failing services is
+       // attempted before the succeeding service. Otherwise,
+       // <-fh.handled below will just hang! (Probe order depends on
+       // the choice of block content "waz" and the UUIDs of the fake
+       // servers, so we just tried different strings until we found
+       // an example that passes this Assert.)
+       c.Assert(NewRootSorter(localRoots, hash).GetSortedRoots()[0], Not(Equals), ks1[0].url)
+
+       r, n, url2, err := kc.Get(hash)
+
+       <-fh.handled
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
+
+       read_content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(read_content, DeepEquals, content)
+}
+
+func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
+       content := []byte("TestPutGetHead")
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, err := MakeKeepClient(arv)
+       c.Assert(err, Equals, nil)
+
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       {
+               n, _, err := kc.Ask(hash)
+               c.Check(err, Equals, BlockNotFound)
+               c.Check(n, Equals, int64(0))
+       }
+       {
+               hash2, replicas, err := kc.PutB(content)
+               c.Check(hash2, Matches, fmt.Sprintf(`%s\+%d\b.*`, hash, len(content)))
+               c.Check(replicas, Equals, 2)
+               c.Check(err, Equals, nil)
+       }
+       {
+               r, n, url2, err := kc.Get(hash)
+               c.Check(err, Equals, nil)
+               c.Check(n, Equals, int64(len(content)))
+               c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
+
+               read_content, err2 := ioutil.ReadAll(r)
+               c.Check(err2, Equals, nil)
+               c.Check(read_content, DeepEquals, content)
+       }
+       {
+               n, url2, err := kc.Ask(hash)
+               c.Check(err, Equals, nil)
+               c.Check(n, Equals, int64(len(content)))
+               c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
+       }
+       {
+               loc, err := kc.LocalLocator(hash)
+               c.Check(err, Equals, nil)
+               c.Assert(len(loc) >= 32, Equals, true)
+               c.Check(loc[:32], Equals, hash[:32])
+       }
+       {
+               content := []byte("the perth county conspiracy")
+               loc, err := kc.LocalLocator(fmt.Sprintf("%x+%d+Rzaaaa-abcde@12345", md5.Sum(content), len(content)))
+               c.Check(loc, Equals, "")
+               c.Check(err, ErrorMatches, `.*HEAD .*\+R.*`)
+               c.Check(err, ErrorMatches, `.*HTTP 400.*`)
+       }
+}
+
+type StubProxyHandler struct {
+       handled chan string
+}
+
+func (this StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.Header().Set("X-Keep-Replicas-Stored", "2")
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestPutProxy(c *C) {
+       st := StubProxyHandler{make(chan string, 1)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+
+       for i, k := range ks1 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+       <-st.handled
+
+       c.Check(err, Equals, nil)
+       c.Check(replicas, Equals, 2)
+}
+
+func (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {
+       st := StubProxyHandler{make(chan string, 1)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 3
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+
+       for i, k := range ks1 {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+       <-st.handled
+
+       c.Check(err, FitsTypeOf, InsufficientReplicasError(errors.New("")))
+       c.Check(replicas, Equals, 2)
+}
+
+func (s *StandaloneSuite) TestMakeLocator(c *C) {
+       l, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce+3+Aabcde@12345678")
+       c.Check(err, Equals, nil)
+       c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
+       c.Check(l.Size, Equals, 3)
+       c.Check(l.Hints, DeepEquals, []string{"3", "Aabcde@12345678"})
+}
+
+func (s *StandaloneSuite) TestMakeLocatorNoHints(c *C) {
+       l, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce")
+       c.Check(err, Equals, nil)
+       c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
+       c.Check(l.Size, Equals, -1)
+       c.Check(l.Hints, DeepEquals, []string{})
+}
+
+func (s *StandaloneSuite) TestMakeLocatorNoSizeHint(c *C) {
+       l, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce+Aabcde@12345678")
+       c.Check(err, Equals, nil)
+       c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
+       c.Check(l.Size, Equals, -1)
+       c.Check(l.Hints, DeepEquals, []string{"Aabcde@12345678"})
+}
+
+func (s *StandaloneSuite) TestMakeLocatorPreservesUnrecognizedHints(c *C) {
+       str := "91f372a266fe2bf2823cb8ec7fda31ce+3+Unknown+Kzzzzz+Afoobar"
+       l, err := MakeLocator(str)
+       c.Check(err, Equals, nil)
+       c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
+       c.Check(l.Size, Equals, 3)
+       c.Check(l.Hints, DeepEquals, []string{"3", "Unknown", "Kzzzzz", "Afoobar"})
+       c.Check(l.String(), Equals, str)
+}
+
+func (s *StandaloneSuite) TestMakeLocatorInvalidInput(c *C) {
+       _, err := MakeLocator("91f372a266fe2bf2823cb8ec7fda31c")
+       c.Check(err, Equals, InvalidLocatorError)
+}
+
+func (s *StandaloneSuite) TestPutBWant2ReplicasWithOnlyOneWritableLocalRoot(c *C) {
+       hash := Md5String("foo")
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               "",
+               make(chan string, 5)}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 5)
+
+       for i, k := range ks {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               if i == 0 {
+                       writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               }
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+
+       c.Check(err, FitsTypeOf, InsufficientReplicasError(errors.New("")))
+       c.Check(replicas, Equals, 1)
+
+       c.Check(<-st.handled, Equals, localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", 0)])
+}
+
+func (s *StandaloneSuite) TestPutBWithNoWritableLocalRoots(c *C) {
+       hash := Md5String("foo")
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               "",
+               make(chan string, 5)}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 5)
+
+       for i, k := range ks {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+
+       c.Check(err, FitsTypeOf, InsufficientReplicasError(errors.New("")))
+       c.Check(replicas, Equals, 0)
+}
+
+type StubGetIndexHandler struct {
+       c              *C
+       expectPath     string
+       expectAPIToken string
+       httpStatus     int
+       body           []byte
+}
+
+func (h StubGetIndexHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       h.c.Check(req.URL.Path, Equals, h.expectPath)
+       h.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", h.expectAPIToken))
+       resp.WriteHeader(h.httpStatus)
+       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(h.body)))
+       resp.Write(h.body)
+}
+
+func (s *StandaloneSuite) TestGetIndexWithNoPrefix(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetIndexHandler{
+               c,
+               "/index",
+               "abc123",
+               http.StatusOK,
+               []byte(hash + "+3 1443559274\n\n")}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, IsNil)
+       kc, err := MakeKeepClient(arv)
+       c.Assert(err, IsNil)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, err := kc.GetIndex("x", "")
+       c.Check(err, IsNil)
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
+}
+
+func (s *StandaloneSuite) TestGetIndexWithPrefix(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetIndexHandler{
+               c,
+               "/index/" + hash[0:3],
+               "abc123",
+               http.StatusOK,
+               []byte(hash + "+3 1443559274\n\n")}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, err := kc.GetIndex("x", hash[0:3])
+       c.Assert(err, Equals, nil)
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
+}
+
+func (s *StandaloneSuite) TestGetIndexIncomplete(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetIndexHandler{
+               c,
+               "/index/" + hash[0:3],
+               "abc123",
+               http.StatusOK,
+               []byte(hash)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       _, err = kc.GetIndex("x", hash[0:3])
+       c.Check(err, Equals, ErrIncompleteIndex)
+}
+
+func (s *StandaloneSuite) TestGetIndexWithNoSuchServer(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetIndexHandler{
+               c,
+               "/index/" + hash[0:3],
+               "abc123",
+               http.StatusOK,
+               []byte(hash)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       _, err = kc.GetIndex("y", hash[0:3])
+       c.Check(err, Equals, ErrNoSuchKeepServer)
+}
+
+func (s *StandaloneSuite) TestGetIndexWithNoSuchPrefix(c *C) {
+       st := StubGetIndexHandler{
+               c,
+               "/index/abcd",
+               "abc123",
+               http.StatusOK,
+               []byte("\n")}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+
+       r, err := kc.GetIndex("x", "abcd")
+       c.Check(err, Equals, nil)
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, st.body[0:len(st.body)-1])
+}
+
+func (s *StandaloneSuite) TestPutBRetry(c *C) {
+       st := &FailThenSucceedHandler{
+               handled: make(chan string, 1),
+               successhandler: StubPutHandler{
+                       c,
+                       Md5String("foo"),
+                       "abc123",
+                       "foo",
+                       "",
+                       make(chan string, 5)}}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       localRoots := make(map[string]string)
+       writableLocalRoots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 2)
+
+       for i, k := range ks {
+               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+
+       hash, replicas, err := kc.PutB([]byte("foo"))
+
+       c.Check(err, Equals, nil)
+       c.Check(hash, Equals, "")
+       c.Check(replicas, Equals, 2)
+}
+
+func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+
+       // Add an additional "testblobstore" keepservice
+       blobKeepService := make(arvadosclient.Dict)
+       err = arv.Create("keep_services",
+               arvadosclient.Dict{"keep_service": arvadosclient.Dict{
+                       "service_host": "localhost",
+                       "service_port": "21321",
+                       "service_type": "testblobstore"}},
+               &blobKeepService)
+       c.Assert(err, Equals, nil)
+       defer func() { arv.Delete("keep_services", blobKeepService["uuid"].(string), nil, nil) }()
+       RefreshServiceDiscovery()
+
+       // Make a keepclient and ensure that the testblobstore is included
+       kc, err := MakeKeepClient(arv)
+       c.Assert(err, Equals, nil)
+
+       // verify kc.LocalRoots
+       c.Check(len(kc.LocalRoots()), Equals, 3)
+       for _, root := range kc.LocalRoots() {
+               c.Check(root, Matches, "http://localhost:\\d+")
+       }
+       c.Assert(kc.LocalRoots()[blobKeepService["uuid"].(string)], Not(Equals), "")
+
+       // verify kc.GatewayRoots
+       c.Check(len(kc.GatewayRoots()), Equals, 3)
+       for _, root := range kc.GatewayRoots() {
+               c.Check(root, Matches, "http://localhost:\\d+")
+       }
+       c.Assert(kc.GatewayRoots()[blobKeepService["uuid"].(string)], Not(Equals), "")
+
+       // verify kc.WritableLocalRoots
+       c.Check(len(kc.WritableLocalRoots()), Equals, 3)
+       for _, root := range kc.WritableLocalRoots() {
+               c.Check(root, Matches, "http://localhost:\\d+")
+       }
+       c.Assert(kc.WritableLocalRoots()[blobKeepService["uuid"].(string)], Not(Equals), "")
+
+       c.Assert(kc.replicasPerService, Equals, 0)
+       c.Assert(kc.foundNonDiskSvc, Equals, true)
+       c.Assert(kc.httpClient().(*http.Client).Timeout, Equals, 300*time.Second)
+}
diff --git a/sdk/go/keepclient/perms.go b/sdk/go/keepclient/perms.go
new file mode 100644 (file)
index 0000000..a779833
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Generate and verify permission signatures for Keep locators.
+//
+// See https://dev.arvados.org/projects/arvados/wiki/Keep_locator_format
+
+package keepclient
+
+import (
+       "crypto/hmac"
+       "crypto/sha1"
+       "errors"
+       "fmt"
+       "regexp"
+       "strconv"
+       "strings"
+       "time"
+)
+
+var (
+       // ErrSignatureExpired - a signature was rejected because the
+       // expiry time has passed.
+       ErrSignatureExpired = errors.New("Signature expired")
+       // ErrSignatureInvalid - a signature was rejected because it
+       // was badly formatted or did not match the given secret key.
+       ErrSignatureInvalid = errors.New("Invalid signature")
+       // ErrSignatureMissing - the given locator does not have a
+       // signature hint.
+       ErrSignatureMissing = errors.New("Missing signature")
+)
+
+// makePermSignature generates a SHA-1 HMAC digest for the given blob,
+// token, expiry, and site secret.
+func makePermSignature(blobHash, apiToken, expiry, blobSignatureTTL string, permissionSecret []byte) string {
+       hmac := hmac.New(sha1.New, permissionSecret)
+       hmac.Write([]byte(blobHash))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(apiToken))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(expiry))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(blobSignatureTTL))
+       digest := hmac.Sum(nil)
+       return fmt.Sprintf("%x", digest)
+}
+
+// SignLocator returns blobLocator with a permission signature
+// added. If either permissionSecret or apiToken is empty, blobLocator
+// is returned untouched.
+//
+// This function is intended to be used by system components and admin
+// utilities: userland programs do not know the permissionSecret.
+func SignLocator(blobLocator, apiToken string, expiry time.Time, blobSignatureTTL time.Duration, permissionSecret []byte) string {
+       if len(permissionSecret) == 0 || apiToken == "" {
+               return blobLocator
+       }
+       // Strip off all hints: only the hash is used to sign.
+       blobHash := strings.Split(blobLocator, "+")[0]
+       timestampHex := fmt.Sprintf("%08x", expiry.Unix())
+       blobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)
+       return blobLocator +
+               "+A" + makePermSignature(blobHash, apiToken, timestampHex, blobSignatureTTLHex, permissionSecret) +
+               "@" + timestampHex
+}
+
+var SignedLocatorRe = regexp.MustCompile(
+       //1                 2          34                         5   6                  7                 89
+       `^([[:xdigit:]]{32})(\+[0-9]+)?((\+[B-Z][A-Za-z0-9@_-]*)*)(\+A([[:xdigit:]]{40})@([[:xdigit:]]{8}))((\+[B-Z][A-Za-z0-9@_-]*)*)$`)
+
+// VerifySignature returns nil if the signature on the signedLocator
+// can be verified using the given apiToken. Otherwise it returns
+// ErrSignatureExpired (if the signature's expiry time has passed,
+// which is something the client could have figured out
+// independently), ErrSignatureMissing (if there is no signature hint
+// at all), or ErrSignatureInvalid (if the signature is present but
+// badly formatted or incorrect).
+//
+// This function is intended to be used by system components and admin
+// utilities: userland programs do not know the permissionSecret.
+func VerifySignature(signedLocator, apiToken string, blobSignatureTTL time.Duration, permissionSecret []byte) error {
+       matches := SignedLocatorRe.FindStringSubmatch(signedLocator)
+       if matches == nil {
+               return ErrSignatureMissing
+       }
+       blobHash := matches[1]
+       signatureHex := matches[6]
+       expiryHex := matches[7]
+       if expiryTime, err := parseHexTimestamp(expiryHex); err != nil {
+               return ErrSignatureInvalid
+       } else if expiryTime.Before(time.Now()) {
+               return ErrSignatureExpired
+       }
+       blobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)
+       if signatureHex != makePermSignature(blobHash, apiToken, expiryHex, blobSignatureTTLHex, permissionSecret) {
+               return ErrSignatureInvalid
+       }
+       return nil
+}
+
+func parseHexTimestamp(timestampHex string) (ts time.Time, err error) {
+       if tsInt, e := strconv.ParseInt(timestampHex, 16, 0); e == nil {
+               ts = time.Unix(tsInt, 0)
+       } else {
+               err = e
+       }
+       return ts, err
+}
diff --git a/sdk/go/keepclient/perms_test.go b/sdk/go/keepclient/perms_test.go
new file mode 100644 (file)
index 0000000..f8107f4
--- /dev/null
@@ -0,0 +1,103 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "testing"
+       "time"
+)
+
+const (
+       knownHash    = "acbd18db4cc2f85cedef654fccc4a4d8"
+       knownLocator = knownHash + "+3"
+       knownToken   = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
+       knownKey     = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
+               "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
+               "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
+               "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
+               "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
+               "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
+               "786u5rw2a9gx743dj3fgq2irk"
+       knownSignature     = "89118b78732c33104a4d6231e8b5a5fa1e4301e3"
+       knownTimestamp     = "7fffffff"
+       knownSigHint       = "+A" + knownSignature + "@" + knownTimestamp
+       knownSignedLocator = knownLocator + knownSigHint
+       blobSignatureTTL   = 1209600 * time.Second
+)
+
+func TestSignLocator(t *testing.T) {
+       if ts, err := parseHexTimestamp(knownTimestamp); err != nil {
+               t.Errorf("bad knownTimestamp %s", knownTimestamp)
+       } else {
+               if knownSignedLocator != SignLocator(knownLocator, knownToken, ts, blobSignatureTTL, []byte(knownKey)) {
+                       t.Fail()
+               }
+       }
+}
+
+func TestVerifySignature(t *testing.T) {
+       if VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureExtraHints(t *testing.T) {
+       if VerifySignature(knownLocator+"+K@xyzzy"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
+               t.Fatal("Verify cannot handle hint before permission signature")
+       }
+
+       if VerifySignature(knownLocator+knownSigHint+"+Zfoo", knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
+               t.Fatal("Verify cannot handle hint after permission signature")
+       }
+
+       if VerifySignature(knownLocator+"+K@xyzzy"+knownSigHint+"+Zfoo", knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
+               t.Fatal("Verify cannot handle hints around permission signature")
+       }
+}
+
+// The size hint on the locator string should not affect signature validation.
+func TestVerifySignatureWrongSize(t *testing.T) {
+       if VerifySignature(knownHash+"+999999"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
+               t.Fatal("Verify cannot handle incorrect size hint")
+       }
+
+       if VerifySignature(knownHash+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
+               t.Fatal("Verify cannot handle missing size hint")
+       }
+}
+
+func TestVerifySignatureBadSig(t *testing.T) {
+       badLocator := knownLocator + "+Aaaaaaaaaaaaaaaa@" + knownTimestamp
+       if VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != ErrSignatureMissing {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureBadTimestamp(t *testing.T) {
+       badLocator := knownLocator + "+A" + knownSignature + "@OOOOOOOl"
+       if VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != ErrSignatureMissing {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureBadSecret(t *testing.T) {
+       if VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte("00000000000000000000")) != ErrSignatureInvalid {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureBadToken(t *testing.T) {
+       if VerifySignature(knownSignedLocator, "00000000", blobSignatureTTL, []byte(knownKey)) != ErrSignatureInvalid {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureExpired(t *testing.T) {
+       yesterday := time.Now().AddDate(0, 0, -1)
+       expiredLocator := SignLocator(knownHash, knownToken, yesterday, blobSignatureTTL, []byte(knownKey))
+       if VerifySignature(expiredLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != ErrSignatureExpired {
+               t.Fail()
+       }
+}
diff --git a/sdk/go/keepclient/root_sorter.go b/sdk/go/keepclient/root_sorter.go
new file mode 100644 (file)
index 0000000..afeb802
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "sort"
+)
+
+type RootSorter struct {
+       root   []string
+       weight []string
+       order  []int
+}
+
+func NewRootSorter(serviceRoots map[string]string, hash string) *RootSorter {
+       rs := new(RootSorter)
+       rs.root = make([]string, len(serviceRoots))
+       rs.weight = make([]string, len(serviceRoots))
+       rs.order = make([]int, len(serviceRoots))
+       i := 0
+       for uuid, root := range serviceRoots {
+               rs.root[i] = root
+               rs.weight[i] = rs.getWeight(hash, uuid)
+               rs.order[i] = i
+               i++
+       }
+       sort.Sort(rs)
+       return rs
+}
+
+func (rs RootSorter) getWeight(hash string, uuid string) string {
+       if len(uuid) == 27 {
+               return Md5String(hash + uuid[12:])
+       } else {
+               // Only useful for testing, a set of one service root, etc.
+               return Md5String(hash + uuid)
+       }
+}
+
+func (rs RootSorter) GetSortedRoots() []string {
+       sorted := make([]string, len(rs.order))
+       for i := range rs.order {
+               sorted[i] = rs.root[rs.order[i]]
+       }
+       return sorted
+}
+
+// Less is really More here: the heaviest root will be at the front of the list.
+func (rs RootSorter) Less(i, j int) bool {
+       return rs.weight[rs.order[j]] < rs.weight[rs.order[i]]
+}
+
+func (rs RootSorter) Len() int {
+       return len(rs.order)
+}
+
+func (rs RootSorter) Swap(i, j int) {
+       sort.IntSlice(rs.order).Swap(i, j)
+}
diff --git a/sdk/go/keepclient/root_sorter_test.go b/sdk/go/keepclient/root_sorter_test.go
new file mode 100644 (file)
index 0000000..bd3bb0b
--- /dev/null
@@ -0,0 +1,63 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "fmt"
+       . "gopkg.in/check.v1"
+       "strconv"
+       "strings"
+)
+
+type RootSorterSuite struct{}
+
+var _ = Suite(&RootSorterSuite{})
+
+func FakeSvcRoot(i uint64) string {
+       return fmt.Sprintf("https://%x.svc/", i)
+}
+
+func FakeSvcUuid(i uint64) string {
+       return fmt.Sprintf("zzzzz-bi6l4-%015x", i)
+}
+
+func FakeServiceRoots(n uint64) map[string]string {
+       sr := map[string]string{}
+       for i := uint64(0); i < n; i++ {
+               sr[FakeSvcUuid(i)] = FakeSvcRoot(i)
+       }
+       return sr
+}
+
+func (*RootSorterSuite) EmptyRoots(c *C) {
+       rs := NewRootSorter(map[string]string{}, Md5String("foo"))
+       c.Check(rs.GetSortedRoots(), Equals, []string{})
+}
+
+func (*RootSorterSuite) JustOneRoot(c *C) {
+       rs := NewRootSorter(FakeServiceRoots(1), Md5String("foo"))
+       c.Check(rs.GetSortedRoots(), Equals, []string{FakeSvcRoot(0)})
+}
+
+func (*RootSorterSuite) ReferenceSet(c *C) {
+       fakeroots := FakeServiceRoots(16)
+       // These reference probe orders are explained further in
+       // ../../python/tests/test_keep_client.py:
+       expected_orders := []string{
+               "3eab2d5fc9681074",
+               "097dba52e648f1c3",
+               "c5b4e023f8a7d691",
+               "9d81c02e76a3bf54",
+       }
+       for h, expected_order := range expected_orders {
+               hash := Md5String(fmt.Sprintf("%064x", h))
+               roots := NewRootSorter(fakeroots, hash).GetSortedRoots()
+               for i, svc_id_s := range strings.Split(expected_order, "") {
+                       svc_id, err := strconv.ParseUint(svc_id_s, 16, 64)
+                       c.Assert(err, Equals, nil)
+                       c.Check(roots[i], Equals, FakeSvcRoot(svc_id))
+               }
+       }
+}
diff --git a/sdk/go/keepclient/support.go b/sdk/go/keepclient/support.go
new file mode 100644 (file)
index 0000000..e589593
--- /dev/null
@@ -0,0 +1,225 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package keepclient
+
+import (
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "os"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+)
+
+// Function used to emit debug messages. The easiest way to enable
+// keepclient debug messages in your application is to assign
+// log.Printf to DebugPrintf.
+var DebugPrintf = func(string, ...interface{}) {}
+
+func init() {
+       if arvadosclient.StringBool(os.Getenv("ARVADOS_DEBUG")) {
+               DebugPrintf = log.Printf
+       }
+}
+
+type keepService struct {
+       Uuid     string `json:"uuid"`
+       Hostname string `json:"service_host"`
+       Port     int    `json:"service_port"`
+       SSL      bool   `json:"service_ssl_flag"`
+       SvcType  string `json:"service_type"`
+       ReadOnly bool   `json:"read_only"`
+}
+
+// Md5String returns md5 hash for the bytes in the given string
+func Md5String(s string) string {
+       return fmt.Sprintf("%x", md5.Sum([]byte(s)))
+}
+
+type svcList struct {
+       Items []keepService `json:"items"`
+}
+
+type uploadStatus struct {
+       err             error
+       url             string
+       statusCode      int
+       replicas_stored int
+       response        string
+}
+
+func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
+       upload_status chan<- uploadStatus, expectedLength int64, reqid string) {
+
+       var req *http.Request
+       var err error
+       var url = fmt.Sprintf("%s/%s", host, hash)
+       if req, err = http.NewRequest("PUT", url, nil); err != nil {
+               DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
+               upload_status <- uploadStatus{err, url, 0, 0, ""}
+               return
+       }
+
+       req.ContentLength = expectedLength
+       if expectedLength > 0 {
+               req.Body = ioutil.NopCloser(body)
+       } else {
+               // "For client requests, a value of 0 means unknown if
+               // Body is not nil."  In this case we do want the body
+               // to be empty, so don't set req.Body.
+       }
+
+       req.Header.Add("X-Request-Id", reqid)
+       req.Header.Add("Authorization", "OAuth2 "+this.Arvados.ApiToken)
+       req.Header.Add("Content-Type", "application/octet-stream")
+       req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
+       if len(this.StorageClasses) > 0 {
+               req.Header.Add("X-Keep-Storage-Classes", strings.Join(this.StorageClasses, ", "))
+       }
+
+       var resp *http.Response
+       if resp, err = this.httpClient().Do(req); err != nil {
+               DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
+               upload_status <- uploadStatus{err, url, 0, 0, err.Error()}
+               return
+       }
+
+       rep := 1
+       if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
+               fmt.Sscanf(xr, "%d", &rep)
+       }
+
+       defer resp.Body.Close()
+       defer io.Copy(ioutil.Discard, resp.Body)
+
+       respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
+       response := strings.TrimSpace(string(respbody))
+       if err2 != nil && err2 != io.EOF {
+               DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
+               upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
+       } else if resp.StatusCode == http.StatusOK {
+               DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
+               upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
+       } else {
+               if resp.StatusCode >= 300 && response == "" {
+                       response = resp.Status
+               }
+               DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
+               upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
+       }
+}
+
+func (this *KeepClient) putReplicas(
+       hash string,
+       getReader func() io.Reader,
+       expectedLength int64) (locator string, replicas int, err error) {
+
+       reqid := this.getRequestID()
+
+       // Calculate the ordering for uploading to servers
+       sv := NewRootSorter(this.WritableLocalRoots(), hash).GetSortedRoots()
+
+       // The next server to try contacting
+       next_server := 0
+
+       // The number of active writers
+       active := 0
+
+       // Used to communicate status from the upload goroutines
+       upload_status := make(chan uploadStatus)
+       defer func() {
+               // Wait for any abandoned uploads (e.g., we started
+               // two uploads and the first replied with replicas=2)
+               // to finish before closing the status channel.
+               go func() {
+                       for active > 0 {
+                               <-upload_status
+                       }
+                       close(upload_status)
+               }()
+       }()
+
+       replicasDone := 0
+       replicasTodo := this.Want_replicas
+
+       replicasPerThread := this.replicasPerService
+       if replicasPerThread < 1 {
+               // unlimited or unknown
+               replicasPerThread = replicasTodo
+       }
+
+       retriesRemaining := 1 + this.Retries
+       var retryServers []string
+
+       lastError := make(map[string]string)
+
+       for retriesRemaining > 0 {
+               retriesRemaining -= 1
+               next_server = 0
+               retryServers = []string{}
+               for replicasTodo > 0 {
+                       for active*replicasPerThread < replicasTodo {
+                               // Start some upload requests
+                               if next_server < len(sv) {
+                                       DebugPrintf("DEBUG: [%s] Begin upload %s to %s", reqid, hash, sv[next_server])
+                                       go this.uploadToKeepServer(sv[next_server], hash, getReader(), upload_status, expectedLength, reqid)
+                                       next_server += 1
+                                       active += 1
+                               } else {
+                                       if active == 0 && retriesRemaining == 0 {
+                                               msg := "Could not write sufficient replicas: "
+                                               for _, resp := range lastError {
+                                                       msg += resp + "; "
+                                               }
+                                               msg = msg[:len(msg)-2]
+                                               return locator, replicasDone, InsufficientReplicasError(errors.New(msg))
+                                       } else {
+                                               break
+                                       }
+                               }
+                       }
+                       DebugPrintf("DEBUG: [%s] Replicas remaining to write: %v active uploads: %v",
+                               reqid, replicasTodo, active)
+
+                       // Now wait for something to happen.
+                       if active > 0 {
+                               status := <-upload_status
+                               active -= 1
+
+                               if status.statusCode == 200 {
+                                       // good news!
+                                       replicasDone += status.replicas_stored
+                                       replicasTodo -= status.replicas_stored
+                                       locator = status.response
+                                       delete(lastError, status.url)
+                               } else {
+                                       msg := fmt.Sprintf("[%d] %s", status.statusCode, status.response)
+                                       if len(msg) > 100 {
+                                               msg = msg[:100]
+                                       }
+                                       lastError[status.url] = msg
+                               }
+
+                               if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
+                                       (status.statusCode >= 500 && status.statusCode != 503) {
+                                       // Timeout, too many requests, or other server side failure
+                                       // Do not retry when status code is 503, which means the keep server is full
+                                       retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
+                               }
+                       } else {
+                               break
+                       }
+               }
+
+               sv = retryServers
+       }
+
+       return locator, replicasDone, nil
+}
diff --git a/sdk/go/manifest/manifest.go b/sdk/go/manifest/manifest.go
new file mode 100644 (file)
index 0000000..a517c06
--- /dev/null
@@ -0,0 +1,556 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+/* Deals with parsing Manifest Text. */
+
+// Inspired by the Manifest class in arvados/sdk/ruby/lib/arvados/keep.rb
+
+package manifest
+
+import (
+       "errors"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+       "path"
+       "regexp"
+       "sort"
+       "strconv"
+       "strings"
+)
+
+var ErrInvalidToken = errors.New("Invalid token")
+
+type Manifest struct {
+       Text string
+       Err  error
+}
+
+type BlockLocator struct {
+       Digest blockdigest.BlockDigest
+       Size   int
+       Hints  []string
+}
+
+// FileSegment is a portion of a file that is contained within a
+// single block.
+type FileSegment struct {
+       Locator string
+       // Offset (within this block) of this data segment
+       Offset int
+       Len    int
+}
+
+// FileStreamSegment is a portion of a file described as a segment of a stream.
+type FileStreamSegment struct {
+       SegPos uint64
+       SegLen uint64
+       Name   string
+}
+
+// Represents a single line from a manifest.
+type ManifestStream struct {
+       StreamName         string
+       Blocks             []string
+       blockOffsets       []uint64
+       FileStreamSegments []FileStreamSegment
+       Err                error
+}
+
+// Array of segments referencing file content
+type segmentedFile []FileSegment
+
+// Map of files to list of file segments referencing file content
+type segmentedStream map[string]segmentedFile
+
+// Map of streams
+type segmentedManifest map[string]segmentedStream
+
+var escapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`)
+
+func unescapeSeq(seq string) string {
+       if seq == `\\` {
+               return `\`
+       }
+       i, err := strconv.ParseUint(seq[1:], 8, 8)
+       if err != nil {
+               // Invalid escape sequence: can't unescape.
+               return seq
+       }
+       return string([]byte{byte(i)})
+}
+
+func EscapeName(s string) string {
+       raw := []byte(s)
+       escaped := make([]byte, 0, len(s))
+       for _, c := range raw {
+               if c <= 32 {
+                       oct := fmt.Sprintf("\\%03o", c)
+                       escaped = append(escaped, []byte(oct)...)
+               } else {
+                       escaped = append(escaped, c)
+               }
+       }
+       return string(escaped)
+}
+
+func UnescapeName(s string) string {
+       return escapeSeq.ReplaceAllStringFunc(s, unescapeSeq)
+}
+
+func ParseBlockLocator(s string) (b BlockLocator, err error) {
+       if !blockdigest.LocatorPattern.MatchString(s) {
+               err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
+                       "\"%s\".",
+                       s,
+                       blockdigest.LocatorPattern.String())
+       } else {
+               tokens := strings.Split(s, "+")
+               var blockSize int64
+               var blockDigest blockdigest.BlockDigest
+               // We expect both of the following to succeed since LocatorPattern
+               // restricts the strings appropriately.
+               blockDigest, err = blockdigest.FromString(tokens[0])
+               if err != nil {
+                       return
+               }
+               blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
+               if err != nil {
+                       return
+               }
+               b.Digest = blockDigest
+               b.Size = int(blockSize)
+               b.Hints = tokens[2:]
+       }
+       return
+}
+
+func parseFileStreamSegment(tok string) (ft FileStreamSegment, err error) {
+       parts := strings.SplitN(tok, ":", 3)
+       if len(parts) != 3 {
+               err = ErrInvalidToken
+               return
+       }
+       ft.SegPos, err = strconv.ParseUint(parts[0], 10, 64)
+       if err != nil {
+               return
+       }
+       ft.SegLen, err = strconv.ParseUint(parts[1], 10, 64)
+       if err != nil {
+               return
+       }
+       ft.Name = UnescapeName(parts[2])
+       return
+}
+
+func (s *ManifestStream) FileSegmentIterByName(filepath string) <-chan *FileSegment {
+       ch := make(chan *FileSegment, 64)
+       go func() {
+               s.sendFileSegmentIterByName(filepath, ch)
+               close(ch)
+       }()
+       return ch
+}
+
+func firstBlock(offsets []uint64, range_start uint64) int {
+       // range_start/block_start is the inclusive lower bound
+       // range_end/block_end is the exclusive upper bound
+
+       hi := len(offsets) - 1
+       var lo int
+       i := ((hi + lo) / 2)
+       block_start := offsets[i]
+       block_end := offsets[i+1]
+
+       // perform a binary search for the first block
+       // assumes that all of the blocks are contiguous, so range_start is guaranteed
+       // to either fall into the range of a block or be outside the block range entirely
+       for !(range_start >= block_start && range_start < block_end) {
+               if lo == i {
+                       // must be out of range, fail
+                       return -1
+               }
+               if range_start > block_start {
+                       lo = i
+               } else {
+                       hi = i
+               }
+               i = ((hi + lo) / 2)
+               block_start = offsets[i]
+               block_end = offsets[i+1]
+       }
+       return i
+}
+
+func (s *ManifestStream) sendFileSegmentIterByName(filepath string, ch chan<- *FileSegment) {
+       // This is what streamName+"/"+fileName will look like:
+       target := fixStreamName(filepath)
+       for _, fTok := range s.FileStreamSegments {
+               wantPos := fTok.SegPos
+               wantLen := fTok.SegLen
+               name := fTok.Name
+
+               if s.StreamName+"/"+name != target {
+                       continue
+               }
+               if wantLen == 0 {
+                       ch <- &FileSegment{Locator: "d41d8cd98f00b204e9800998ecf8427e+0", Offset: 0, Len: 0}
+                       continue
+               }
+
+               // Binary search to determine first block in the stream
+               i := firstBlock(s.blockOffsets, wantPos)
+               if i == -1 {
+                       // Shouldn't happen, file segments are checked in parseManifestStream
+                       panic(fmt.Sprintf("File segment %v extends past end of stream", fTok))
+               }
+               for ; i < len(s.Blocks); i++ {
+                       blockPos := s.blockOffsets[i]
+                       blockEnd := s.blockOffsets[i+1]
+                       if blockEnd <= wantPos {
+                               // Shouldn't happen, FirstBlock() should start
+                               // us on the right block, so if this triggers
+                               // that means there is a bug.
+                               panic(fmt.Sprintf("Block end %v comes before start of file segment %v", blockEnd, wantPos))
+                       }
+                       if blockPos >= wantPos+wantLen {
+                               // current block comes after current file span
+                               break
+                       }
+
+                       fseg := FileSegment{
+                               Locator: s.Blocks[i],
+                               Offset:  0,
+                               Len:     int(blockEnd - blockPos),
+                       }
+                       if blockPos < wantPos {
+                               fseg.Offset = int(wantPos - blockPos)
+                               fseg.Len -= fseg.Offset
+                       }
+                       if blockEnd > wantPos+wantLen {
+                               fseg.Len = int(wantPos+wantLen-blockPos) - fseg.Offset
+                       }
+                       ch <- &fseg
+               }
+       }
+}
+
+func parseManifestStream(s string) (m ManifestStream) {
+       tokens := strings.Split(s, " ")
+
+       m.StreamName = UnescapeName(tokens[0])
+       if m.StreamName != "." && !strings.HasPrefix(m.StreamName, "./") {
+               m.Err = fmt.Errorf("Invalid stream name: %s", m.StreamName)
+               return
+       }
+
+       tokens = tokens[1:]
+       var i int
+       for i = 0; i < len(tokens); i++ {
+               if !blockdigest.IsBlockLocator(tokens[i]) {
+                       break
+               }
+       }
+       m.Blocks = tokens[:i]
+       fileTokens := tokens[i:]
+
+       if len(m.Blocks) == 0 {
+               m.Err = fmt.Errorf("No block locators found")
+               return
+       }
+
+       m.blockOffsets = make([]uint64, len(m.Blocks)+1)
+       var streamoffset uint64
+       for i, b := range m.Blocks {
+               bl, err := ParseBlockLocator(b)
+               if err != nil {
+                       m.Err = err
+                       return
+               }
+               m.blockOffsets[i] = streamoffset
+               streamoffset += uint64(bl.Size)
+       }
+       m.blockOffsets[len(m.Blocks)] = streamoffset
+
+       if len(fileTokens) == 0 {
+               m.Err = fmt.Errorf("No file tokens found")
+               return
+       }
+
+       for _, ft := range fileTokens {
+               pft, err := parseFileStreamSegment(ft)
+               if err != nil {
+                       m.Err = fmt.Errorf("Invalid file token: %s", ft)
+                       break
+               }
+               if pft.SegPos+pft.SegLen > streamoffset {
+                       m.Err = fmt.Errorf("File segment %s extends past end of stream %d", ft, streamoffset)
+                       break
+               }
+               m.FileStreamSegments = append(m.FileStreamSegments, pft)
+       }
+
+       return
+}
+
+func fixStreamName(sn string) string {
+       sn = path.Clean(sn)
+       if strings.HasPrefix(sn, "/") {
+               sn = "." + sn
+       } else if sn != "." {
+               sn = "./" + sn
+       }
+       return sn
+}
+
+func splitPath(srcpath string) (streamname, filename string) {
+       pathIdx := strings.LastIndex(srcpath, "/")
+       if pathIdx >= 0 {
+               streamname = srcpath[0:pathIdx]
+               filename = srcpath[pathIdx+1:]
+       } else {
+               streamname = srcpath
+               filename = ""
+       }
+       return
+}
+
+func (m *Manifest) segment() (*segmentedManifest, error) {
+       files := make(segmentedManifest)
+
+       for stream := range m.StreamIter() {
+               if stream.Err != nil {
+                       // Stream has an error
+                       return nil, stream.Err
+               }
+               currentStreamfiles := make(map[string]bool)
+               for _, f := range stream.FileStreamSegments {
+                       sn := stream.StreamName
+                       if strings.HasSuffix(sn, "/") {
+                               sn = sn[0 : len(sn)-1]
+                       }
+                       path := sn + "/" + f.Name
+                       streamname, filename := splitPath(path)
+                       if files[streamname] == nil {
+                               files[streamname] = make(segmentedStream)
+                       }
+                       if !currentStreamfiles[path] {
+                               segs := files[streamname][filename]
+                               for seg := range stream.FileSegmentIterByName(path) {
+                                       if seg.Len > 0 {
+                                               segs = append(segs, *seg)
+                                       }
+                               }
+                               files[streamname][filename] = segs
+                               currentStreamfiles[path] = true
+                       }
+               }
+       }
+
+       return &files, nil
+}
+
+func (stream segmentedStream) normalizedText(name string) string {
+       var sortedfiles []string
+       for k := range stream {
+               sortedfiles = append(sortedfiles, k)
+       }
+       sort.Strings(sortedfiles)
+
+       stream_tokens := []string{EscapeName(name)}
+
+       blocks := make(map[blockdigest.BlockDigest]int64)
+       var streamoffset int64
+
+       // Go through each file and add each referenced block exactly once.
+       for _, streamfile := range sortedfiles {
+               for _, segment := range stream[streamfile] {
+                       b, _ := ParseBlockLocator(segment.Locator)
+                       if _, ok := blocks[b.Digest]; !ok {
+                               stream_tokens = append(stream_tokens, segment.Locator)
+                               blocks[b.Digest] = streamoffset
+                               streamoffset += int64(b.Size)
+                       }
+               }
+       }
+
+       if len(stream_tokens) == 1 {
+               stream_tokens = append(stream_tokens, "d41d8cd98f00b204e9800998ecf8427e+0")
+       }
+
+       for _, streamfile := range sortedfiles {
+               // Add in file segments
+               span_start := int64(-1)
+               span_end := int64(0)
+               fout := EscapeName(streamfile)
+               for _, segment := range stream[streamfile] {
+                       // Collapse adjacent segments
+                       b, _ := ParseBlockLocator(segment.Locator)
+                       streamoffset = blocks[b.Digest] + int64(segment.Offset)
+                       if span_start == -1 {
+                               span_start = streamoffset
+                               span_end = streamoffset + int64(segment.Len)
+                       } else {
+                               if streamoffset == span_end {
+                                       span_end += int64(segment.Len)
+                               } else {
+                                       stream_tokens = append(stream_tokens, fmt.Sprintf("%d:%d:%s", span_start, span_end-span_start, fout))
+                                       span_start = streamoffset
+                                       span_end = streamoffset + int64(segment.Len)
+                               }
+                       }
+               }
+
+               if span_start != -1 {
+                       stream_tokens = append(stream_tokens, fmt.Sprintf("%d:%d:%s", span_start, span_end-span_start, fout))
+               }
+
+               if len(stream[streamfile]) == 0 {
+                       stream_tokens = append(stream_tokens, fmt.Sprintf("0:0:%s", fout))
+               }
+       }
+
+       return strings.Join(stream_tokens, " ") + "\n"
+}
+
+func (m segmentedManifest) manifestTextForPath(srcpath, relocate string) string {
+       srcpath = fixStreamName(srcpath)
+
+       var suffix string
+       if strings.HasSuffix(relocate, "/") {
+               suffix = "/"
+       }
+       relocate = fixStreamName(relocate) + suffix
+
+       streamname, filename := splitPath(srcpath)
+
+       if stream, ok := m[streamname]; ok {
+               // check if it refers to a single file in a stream
+               filesegs, okfile := stream[filename]
+               if okfile {
+                       newstream := make(segmentedStream)
+                       relocate_stream, relocate_filename := splitPath(relocate)
+                       if relocate_filename == "" {
+                               relocate_filename = filename
+                       }
+                       newstream[relocate_filename] = filesegs
+                       return newstream.normalizedText(relocate_stream)
+               }
+       }
+
+       // Going to extract multiple streams
+       prefix := srcpath + "/"
+
+       if strings.HasSuffix(relocate, "/") {
+               relocate = relocate[0 : len(relocate)-1]
+       }
+
+       var sortedstreams []string
+       for k := range m {
+               sortedstreams = append(sortedstreams, k)
+       }
+       sort.Strings(sortedstreams)
+
+       manifest := ""
+       for _, k := range sortedstreams {
+               if strings.HasPrefix(k, prefix) || k == srcpath {
+                       manifest += m[k].normalizedText(relocate + k[len(srcpath):])
+               }
+       }
+       return manifest
+}
+
+// Extract extracts some or all of the manifest and returns the extracted
+// portion as a normalized manifest.  This is a swiss army knife function that
+// can be several ways:
+//
+// If 'srcpath' and 'relocate' are '.' it simply returns an equivalent manifest
+// in normalized form.
+//
+//   Extract(".", ".")  // return entire normalized manfest text
+//
+// If 'srcpath' points to a single file, it will return manifest text for just that file.
+// The value of "relocate" is can be used to rename the file or set the file stream.
+//
+//   Extract("./foo", ".")          // extract file "foo" and put it in stream "."
+//   Extract("./foo", "./bar")      // extract file "foo", rename it to "bar" in stream "."
+//   Extract("./foo", "./bar/")     // extract file "foo", rename it to "./bar/foo"
+//   Extract("./foo", "./bar/baz")  // extract file "foo", rename it to "./bar/baz")
+//
+// Otherwise it will return the manifest text for all streams with the prefix in "srcpath" and place
+// them under the path in "relocate".
+//
+//   Extract("./stream", ".")      // extract "./stream" to "." and "./stream/subdir" to "./subdir")
+//   Extract("./stream", "./bar")  // extract "./stream" to "./bar" and "./stream/subdir" to "./bar/subdir")
+func (m Manifest) Extract(srcpath, relocate string) (ret Manifest) {
+       segmented, err := m.segment()
+       if err != nil {
+               ret.Err = err
+               return
+       }
+       ret.Text = segmented.manifestTextForPath(srcpath, relocate)
+       return
+}
+
+func (m *Manifest) StreamIter() <-chan ManifestStream {
+       ch := make(chan ManifestStream)
+       go func(input string) {
+               // This slice holds the current line and the remainder of the
+               // manifest.  We parse one line at a time, to save effort if we
+               // only need the first few lines.
+               lines := []string{"", input}
+               for {
+                       lines = strings.SplitN(lines[1], "\n", 2)
+                       if len(lines[0]) > 0 {
+                               // Only parse non-blank lines
+                               ch <- parseManifestStream(lines[0])
+                       }
+                       if len(lines) == 1 {
+                               break
+                       }
+               }
+               close(ch)
+       }(m.Text)
+       return ch
+}
+
+func (m *Manifest) FileSegmentIterByName(filepath string) <-chan *FileSegment {
+       ch := make(chan *FileSegment, 64)
+       filepath = fixStreamName(filepath)
+       go func() {
+               for stream := range m.StreamIter() {
+                       if !strings.HasPrefix(filepath, stream.StreamName+"/") {
+                               continue
+                       }
+                       stream.sendFileSegmentIterByName(filepath, ch)
+               }
+               close(ch)
+       }()
+       return ch
+}
+
+// Blocks may appear multiple times within the same manifest if they
+// are used by multiple files. In that case this Iterator will output
+// the same block multiple times.
+//
+// In order to detect parse errors, caller must check m.Err after the returned channel closes.
+func (m *Manifest) BlockIterWithDuplicates() <-chan blockdigest.BlockLocator {
+       blockChannel := make(chan blockdigest.BlockLocator)
+       go func(streamChannel <-chan ManifestStream) {
+               for ms := range streamChannel {
+                       if ms.Err != nil {
+                               m.Err = ms.Err
+                               continue
+                       }
+                       for _, block := range ms.Blocks {
+                               if b, err := blockdigest.ParseBlockLocator(block); err == nil {
+                                       blockChannel <- b
+                               } else {
+                                       m.Err = err
+                               }
+                       }
+               }
+               close(blockChannel)
+       }(m.StreamIter())
+       return blockChannel
+}
diff --git a/sdk/go/manifest/manifest_test.go b/sdk/go/manifest/manifest_test.go
new file mode 100644 (file)
index 0000000..1f0f85a
--- /dev/null
@@ -0,0 +1,375 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package manifest
+
+import (
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+       "io/ioutil"
+       "reflect"
+       "regexp"
+       "runtime"
+       "testing"
+)
+
+func getStackTrace() string {
+       buf := make([]byte, 1000)
+       bytesWritten := runtime.Stack(buf, false)
+       return "Stack Trace:\n" + string(buf[:bytesWritten])
+}
+
+func expectFromChannel(t *testing.T, c <-chan string, expected string) {
+       actual, ok := <-c
+       if !ok {
+               t.Fatalf("Expected to receive %s but channel was closed. %s",
+                       expected,
+                       getStackTrace())
+       }
+       if actual != expected {
+               t.Fatalf("Expected %s but got %s instead. %s",
+                       expected,
+                       actual,
+                       getStackTrace())
+       }
+}
+
+func expectChannelClosed(t *testing.T, c <-chan interface{}) {
+       received, ok := <-c
+       if ok {
+               t.Fatalf("Expected channel to be closed, but received %v instead. %s",
+                       received,
+                       getStackTrace())
+       }
+}
+
+func expectEqual(t *testing.T, actual interface{}, expected interface{}) {
+       if actual != expected {
+               t.Fatalf("Expected %v but received %v instead. %s",
+                       expected,
+                       actual,
+                       getStackTrace())
+       }
+}
+
+func expectStringSlicesEqual(t *testing.T, actual []string, expected []string) {
+       if len(actual) != len(expected) {
+               t.Fatalf("Expected %v (length %d), but received %v (length %d) instead. %s", expected, len(expected), actual, len(actual), getStackTrace())
+       }
+       for i := range actual {
+               if actual[i] != expected[i] {
+                       t.Fatalf("Expected %v but received %v instead (first disagreement at position %d). %s", expected, actual, i, getStackTrace())
+               }
+       }
+}
+
+func expectFileStreamSegmentsEqual(t *testing.T, actual []FileStreamSegment, expected []FileStreamSegment) {
+       if !reflect.DeepEqual(actual, expected) {
+               t.Fatalf("Expected %v but received %v instead. %s", expected, actual, getStackTrace())
+       }
+}
+
+func expectManifestStream(t *testing.T, actual ManifestStream, expected ManifestStream) {
+       expectEqual(t, actual.StreamName, expected.StreamName)
+       expectStringSlicesEqual(t, actual.Blocks, expected.Blocks)
+       expectFileStreamSegmentsEqual(t, actual.FileStreamSegments, expected.FileStreamSegments)
+}
+
+func expectBlockLocator(t *testing.T, actual blockdigest.BlockLocator, expected blockdigest.BlockLocator) {
+       expectEqual(t, actual.Digest, expected.Digest)
+       expectEqual(t, actual.Size, expected.Size)
+       expectStringSlicesEqual(t, actual.Hints, expected.Hints)
+}
+
+func TestParseManifestStreamSimple(t *testing.T) {
+       m := parseManifestStream(". 365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf 0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt")
+       expectManifestStream(t, m, ManifestStream{StreamName: ".",
+               Blocks:             []string{"365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"},
+               FileStreamSegments: []FileStreamSegment{{0, 2310, "qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt"}}})
+}
+
+func TestParseBlockLocatorSimple(t *testing.T) {
+       b, err := ParseBlockLocator("365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf")
+       if err != nil {
+               t.Fatalf("Unexpected error parsing block locator: %v", err)
+       }
+       d, err := blockdigest.FromString("365f83f5f808896ec834c8b595288735")
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString for block locator: %v", err)
+       }
+       expectBlockLocator(t, blockdigest.BlockLocator{b.Digest, b.Size, b.Hints},
+               blockdigest.BlockLocator{Digest: d,
+                       Size: 2310,
+                       Hints: []string{"K@qr1hi",
+                               "Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"}})
+}
+
+func TestStreamIterShortManifestWithBlankStreams(t *testing.T) {
+       content, err := ioutil.ReadFile("testdata/short_manifest")
+       if err != nil {
+               t.Fatalf("Unexpected error reading manifest from file: %v", err)
+       }
+       manifest := Manifest{Text: string(content)}
+       streamIter := manifest.StreamIter()
+
+       firstStream := <-streamIter
+       expectManifestStream(t,
+               firstStream,
+               ManifestStream{StreamName: ".",
+                       Blocks:             []string{"b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c"},
+                       FileStreamSegments: []FileStreamSegment{{0, 15693477, "chr10_band0_s0_e3000000.fj"}}})
+
+       received, ok := <-streamIter
+       if ok {
+               t.Fatalf("Expected streamIter to be closed, but received %v instead.",
+                       received)
+       }
+}
+
+func TestBlockIterLongManifest(t *testing.T) {
+       content, err := ioutil.ReadFile("testdata/long_manifest")
+       if err != nil {
+               t.Fatalf("Unexpected error reading manifest from file: %v", err)
+       }
+       manifest := Manifest{Text: string(content)}
+       blockChannel := manifest.BlockIterWithDuplicates()
+
+       firstBlock := <-blockChannel
+       d, err := blockdigest.FromString("b746e3d2104645f2f64cd3cc69dd895d")
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString for block: %v", err)
+       }
+       expectBlockLocator(t,
+               firstBlock,
+               blockdigest.BlockLocator{Digest: d,
+                       Size:  15693477,
+                       Hints: []string{"E2866e643690156651c03d876e638e674dcd79475@5441920c"}})
+       blocksRead := 1
+       var lastBlock blockdigest.BlockLocator
+       for lastBlock = range blockChannel {
+               blocksRead++
+       }
+       expectEqual(t, blocksRead, 853)
+
+       d, err = blockdigest.FromString("f9ce82f59e5908d2d70e18df9679b469")
+       if err != nil {
+               t.Fatalf("Unexpected error during FromString for block: %v", err)
+       }
+       expectBlockLocator(t,
+               lastBlock,
+               blockdigest.BlockLocator{Digest: d,
+                       Size:  31367794,
+                       Hints: []string{"E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c"}})
+}
+
+func TestUnescape(t *testing.T) {
+       for _, testCase := range [][]string{
+               {`\040`, ` `},
+               {`\009`, `\009`},
+               {`\\\040\\`, `\ \`},
+               {`\\040\`, `\040\`},
+       } {
+               in := testCase[0]
+               expect := testCase[1]
+               got := UnescapeName(in)
+               if expect != got {
+                       t.Errorf("For '%s' got '%s' instead of '%s'", in, got, expect)
+               }
+       }
+}
+
+type fsegtest struct {
+       mt   string        // manifest text
+       f    string        // filename
+       want []FileSegment // segments should be received on channel
+}
+
+func TestFileSegmentIterByName(t *testing.T) {
+       mt := arvadostest.PathologicalManifest
+       for _, testCase := range []fsegtest{
+               {mt: mt, f: "zzzz", want: nil},
+               // This case is too sensitive: it would be acceptable
+               // (even preferable) to return only one empty segment.
+               {mt: mt, f: "foo/zero", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}, {"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
+               {mt: mt, f: "zero@0", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
+               {mt: mt, f: "zero@1", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
+               {mt: mt, f: "zero@4", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
+               {mt: mt, f: "zero@9", want: []FileSegment{{"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}}},
+               {mt: mt, f: "f", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 1}}},
+               {mt: mt, f: "ooba", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 2}, {"37b51d194a7513e45b56f6524f2d51f2+3", 0, 2}}},
+               {mt: mt, f: "overlapReverse/o", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 2, 1}}},
+               {mt: mt, f: "overlapReverse/oo", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 2}}},
+               {mt: mt, f: "overlapReverse/ofoo", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 2, 1}, {"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 3}}},
+               {mt: mt, f: "foo bar/baz", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 3}}},
+               // This case is too sensitive: it would be better to
+               // omit the empty segment.
+               {mt: mt, f: "segmented/frob", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 1}, {"37b51d194a7513e45b56f6524f2d51f2+3", 2, 1}, {"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 1}, {"d41d8cd98f00b204e9800998ecf8427e+0", 0, 0}, {"37b51d194a7513e45b56f6524f2d51f2+3", 0, 1}}},
+               {mt: mt, f: "segmented/oof", want: []FileSegment{{"acbd18db4cc2f85cedef654fccc4a4d8+3", 1, 2}, {"acbd18db4cc2f85cedef654fccc4a4d8+3", 0, 1}}},
+       } {
+               m := Manifest{Text: testCase.mt}
+               var got []FileSegment
+               for fs := range m.FileSegmentIterByName(testCase.f) {
+                       got = append(got, *fs)
+               }
+               if !reflect.DeepEqual(got, testCase.want) {
+                       t.Errorf("For %#v:\n got  %#v\n want %#v", testCase.f, got, testCase.want)
+               }
+       }
+}
+
+func TestBlockIterWithBadManifest(t *testing.T) {
+       testCases := [][]string{
+               {"badstream acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt", "Invalid stream name: badstream"},
+               {"/badstream acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt", "Invalid stream name: /badstream"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3 file1.txt", "Invalid file token: file1.txt"},
+               {". acbd18db4cc2f85cedef654fccc4a4+3 0:1:file1.txt", "No block locators found"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8 0:1:file1.txt", "No block locators found"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt file2.txt 1:2:file3.txt", "Invalid file token: file2.txt"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt. bcde18db4cc2f85cedef654fccc4a4d8+3 1:2:file3.txt", "Invalid file token: bcde18db4cc2f85cedef654fccc4a4d8.*"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1.txt\n. acbd18db4cc2f85cedef654fccc4a4d8+3 ::file2.txt\n", "Invalid file token: ::file2.txt"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3 bcde18db4cc2f85cedef654fccc4a4d8+3\n", "No file tokens found"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3 ", "Invalid file token"},
+               {". acbd18db4cc2f85cedef654fccc4a4d8+3", "No file tokens found"},
+               {". 0:1:file1.txt\n", "No block locators found"},
+               {".\n", "No block locators found"},
+       }
+
+       for _, testCase := range testCases {
+               manifest := Manifest{Text: string(testCase[0])}
+               blockChannel := manifest.BlockIterWithDuplicates()
+
+               for block := range blockChannel {
+                       _ = block
+               }
+
+               // completed reading from blockChannel; now check for errors
+               if manifest.Err == nil {
+                       t.Fatalf("Expected error")
+               }
+
+               matched, _ := regexp.MatchString(testCase[1], manifest.Err.Error())
+               if !matched {
+                       t.Fatalf("Expected error not found. Expected: %v; Found: %v", testCase[1], manifest.Err.Error())
+               }
+       }
+}
+
+func TestNormalizeManifest(t *testing.T) {
+       m1 := Manifest{Text: `. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+`}
+       expectEqual(t, m1.Extract(".", ".").Text,
+               `. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt
+`)
+
+       m2 := Manifest{Text: `. 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2
+`}
+       expectEqual(t, m2.Extract(".", ".").Text, m2.Text)
+
+       m3 := Manifest{Text: `. 5348b82a029fd9e971a811ce1f71360b+43 3:40:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+`}
+       expectEqual(t, m3.Extract(".", ".").Text, `. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:md5sum.txt
+`)
+       expectEqual(t, m3.Extract("/md5sum.txt", "/wiggle.txt").Text, `. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:wiggle.txt
+`)
+
+       m4 := Manifest{Text: `. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+`}
+
+       expectEqual(t, m4.Extract(".", ".").Text,
+               `./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+`)
+
+       expectEqual(t, m4.Extract("./foo", ".").Text, ". 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
+       expectEqual(t, m4.Extract("./foo", "./baz").Text, "./baz 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
+       expectEqual(t, m4.Extract("./foo/bar", ".").Text, ". 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
+       expectEqual(t, m4.Extract("./foo/bar", "./baz").Text, ". 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:baz 67108864:3:baz\n")
+       expectEqual(t, m4.Extract("./foo/bar", "./quux/").Text, "./quux 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n")
+       expectEqual(t, m4.Extract("./foo/bar", "./quux/baz").Text, "./quux 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:baz 67108864:3:baz\n")
+       expectEqual(t, m4.Extract(".", ".").Text, `./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+`)
+       expectEqual(t, m4.Extract(".", "./zip").Text, `./zip/foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
+./zip/zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+`)
+
+       expectEqual(t, m4.Extract("foo/.//bar/../../zzz/", "/waz/").Text, `./waz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+`)
+
+       m5 := Manifest{Text: `. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo 204e43b8a1185621ca55a94839582e6f+67108864 3:3:bar
+`}
+       expectEqual(t, m5.Extract(".", ".").Text,
+               `./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:6:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+`)
+
+       m8 := Manifest{Text: `./a\040b\040c 59ca0efa9f5633cb0371bbc0355478d8+13 0:13:hello\040world.txt
+`}
+       expectEqual(t, m8.Extract(".", ".").Text, m8.Text)
+
+       m9 := Manifest{Text: ". acbd18db4cc2f85cedef654fccc4a4d8+40 0:10:one 20:10:two 10:10:one 30:10:two\n"}
+       expectEqual(t, m9.Extract("", "").Text, ". acbd18db4cc2f85cedef654fccc4a4d8+40 0:20:one 20:20:two\n")
+
+       m10 := Manifest{Text: ". acbd18db4cc2f85cedef654fccc4a4d8+40 0:10:one 20:10:two 10:10:one 30:10:two\n"}
+       expectEqual(t, m10.Extract("./two", "./three").Text, ". acbd18db4cc2f85cedef654fccc4a4d8+40 20:20:three\n")
+
+       m11 := Manifest{Text: arvadostest.PathologicalManifest}
+       expectEqual(t, m11.Extract(".", ".").Text, `. acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy 0:1:f 1:4:ooba 5:1:r 5:4:rbaz 0:0:zero@0 0:0:zero@1 0:0:zero@4 0:0:zero@9
+./foo acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo 0:3:foo 0:0:zero
+./foo\040bar acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:baz 0:3:baz\040waz
+./overlapReverse acbd18db4cc2f85cedef654fccc4a4d8+3 2:1:o 2:1:ofoo 0:3:ofoo 1:2:oo
+./segmented acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:1:frob 5:1:frob 1:1:frob 3:1:frob 1:2:oof 0:1:oof
+`)
+
+       m12 := Manifest{Text: `./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+`}
+
+       expectEqual(t, m12.Extract("./foo", ".").Text, `. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
+./baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+`)
+       expectEqual(t, m12.Extract("./foo", "./blub").Text, `./blub 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
+./blub/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+`)
+       expectEqual(t, m12.Extract("./foo", "./blub/").Text, `./blub 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
+./blub/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+`)
+       expectEqual(t, m12.Extract("./foo/", "./blub/").Text, `./blub 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
+./blub/baz 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+`)
+
+       m13 := Manifest{Text: `foo 204e43b8a1185621ca55a94839582e6f+67108864 0:3:bar
+`}
+
+       expectEqual(t, m13.Extract(".", ".").Text, ``)
+       expectEqual(t, m13.Extract(".", ".").Err.Error(), "Invalid stream name: foo")
+
+       m14 := Manifest{Text: `./foo 204e43b8a1185621ca55a94839582e6f+67108864 67108863:3:bar
+`}
+
+       expectEqual(t, m14.Extract(".", ".").Text, ``)
+       expectEqual(t, m14.Extract(".", ".").Err.Error(), "File segment 67108863:3:bar extends past end of stream 67108864")
+
+       m15 := Manifest{Text: `./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:3bar
+`}
+
+       expectEqual(t, m15.Extract(".", ".").Text, ``)
+       expectEqual(t, m15.Extract(".", ".").Err.Error(), "Invalid file token: 0:3bar")
+}
+
+func TestFirstBlock(t *testing.T) {
+       fmt.Println("ZZZ")
+       expectEqual(t, firstBlock([]uint64{1, 2, 3, 4}, 3), 2)
+       expectEqual(t, firstBlock([]uint64{1, 2, 3, 4, 5, 6}, 4), 3)
+}
diff --git a/sdk/go/manifest/testdata/long_manifest b/sdk/go/manifest/testdata/long_manifest
new file mode 100644 (file)
index 0000000..a7949e6
--- /dev/null
@@ -0,0 +1 @@
+. b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c 109cd35b4d3f83266b63fb46c6943454+6770629+Ed0c0561b669237162996223b813b811d248ff9b0@5441920c 1455890e7b56831edff40738856e4194+15962669+Ec298b770d14205b5185d0e2b016ddd940c745446@5441920c 8c87f1c69c6f302c8c05e7d0e740d233+16342794+Ec432f4c24e63b840c1f12976b9edf396d70b8f67@5441920c 451cfce8c67bf92b67b5c6190d45d4f5+5067634+E406821d6ceb1d16ec638e66b7603c69f3482d895@5441920c f963d174978dc966910be6240e8602c7+4264756+E00241238e18635fdb583dd0c6d6561b672996467@5441920c 33be2d8cdd100eec6e842f644556d031+16665404+E6c773004b8296523014b9d23ed066ec72387485e@5441920c 6db13c2df6342b52d72df469c065b675+13536792+E6011e6057857f68d9b1b486571f239614b0707be@5441920c fb7ccc93e86187c519f6716c26474cb3+13714429+Ec4677bfcbe8689621d1b2d4f1bdce5b52f379f98@5441920c 972f24d216684646dfb9e266b7166f63+44743112+E1706fe89133bcd3625cc88de1035681c2d179770@5441920c 16f8df1595811cf9823c30254e6d58e6+17555223+E0febd567bf630b656dcfef01e90d3878c66eed36@5441920c d25b29289e6632728bf485eff6dde9c5+4366647+E7071644d29dd00be350e2e6fb7496346555fb4e9@5441920c 11dffe40608763462b5d89d5ccf33779+32161952+E7f110261b4b0d628396ff782f86966c17569c249@5441920c 0d36936536e85c28c233c6dfb856863b+22400265+Eee3966f1088f96d4fde6e4ec6b9b85cd65ff0c56@5441920c 03f293686e7c22b852b1f94b3490d781+14026139+Ef27fdfb40d6f9bd7bf8f639bcb2608365e002761@5441920c 185863e4c8fb666bc67b5b6666067094+22042495+Ee1164ffe4bffb0c2f29e1767688fbc468b326007@5441920c 4c7368ed41d2266df698176d0483e0be+31053569+E527d607c348f45ede4d8d6340f6079dd044c554d@5441920c ef75be5688e570564269596833866420+7357223+Eb27e68b0dc1674c515646c79280269779f2fb9ed@5441920c cc178064be26076266896d7b9bd91363+17709624+Ed64b0f5e023578cc2d23de434de9ec95debf6c4c@5441920c 5721f0964f9fb339066c176ce6d819c4+6146416+E5df3e33404b589fd4f2f827b86200fe3507c669b@5441920c 53df2cf91db94f57e7d67e4bc568d102+14669912+E64ddcf065630c72e281d0760fe11475b11614382@5441920c 3b045d987f9e1d03d9f3764223394f7f+11964610+E667868e60686bb6fc49609f2d61cb5b4e990dc4c@5441920c 1b83050279df8c6bfd2d7f675ecc6cc0+14904735+E91b1576015021d4debb5465dc449037bed0efc60@5441920c 16c366b5e44bd6d3f01600776e65076b+13400037+E6ded42f36469b5996e60c3415094d93b98d58d17@5441920c 6e7c59c345714f8d20176086c53d128f+5665774+Ef4c5716bb8c535d1335886f4ed8792e28829f531@5441920c 47c20b212e917be6923d6040053b6199+9646905+E875b5786fe08f40d5655ec0731368085d2059fe7@5441920c 6d56fc2964ee717fb3d168e0433652e5+4640161+E59be5ce3d0188761859f8f723bdfbf6f6cfc58b6@5441920c b62899c71fbf5ee6b3c59777480393b1+32455363+E2bfbdc56d6b66b7709f99466e733c1389cd8c952@5441920c 5c0390fc6f76631ec906792975d36d09+15940309+E0671c8fd6b2d8e05827cf400b6e6f7be76955dbf@5441920c 19be066d6bb9de09cb171c92efb62613+22466671+E2230614c0ccc69fd2669ce65738de68dbff3c867@5441920c 4c8396101d3fc596400d63121db853d0+13741614+Ecf2839221feb3d070b074fb1500544572dc5256b@5441920c cd29406297ffb7f637c058efbf305236+7619567+Ec063b1c180b6dfef7462c65dc2c7fc34b5756598@5441920c f68b644c6c02d36658e6f006f07b8ff0+23222064+E67594b67317452786c664f26808697d343d3316c@5441920c 42f58fb009502ec82e1d5cc076e79e4c+29666907+E2e27c6bef691333b19269570bc175be262e7b2ec@5441920c 384e1e7642d928660bc90950570071b7+16511641+E44951c3c7b111f06d566b686fc78dc430744549e@5441920c e200de735365bd89d42e70b469023076+26095352+Ef9566086c4526e88e4694b55cbeb2ed3d229198d@5441920c e809638508b9c667f7fbd2fde654c4b7+26536426+Eedb7bd609b7d22df73bc5b6031663824ff106f5f@5441920c c6e13cc51e2354c0346d4564c1b22138+5595242+Ef4eb609230d6644f1d8626e186f95f9b784186e3@5441920c fc6e075d862372e6dd4d438f0c339647+524636+E28e5d58c5feed7ef5e11869e16b00666424f3963@5441920c 654066ef6cd1b9ec3010d864800dd1c8+20166756+E655b286e729e5cb164646314031f45628c914761@5441920c dfe8df7f1f6d8f37667f275fb0f16fe4+10195576+Ec7b5272532230b29ce176629dbe6c9098f482062@5441920c 0b3e18ed791e551bbde5653487cd9e0c+26057104+E95309d4ec6c56d6490946103224e8e6d35622e12@5441920c 9f453ed53b8be18d3538b9564c9d6e2f+14129943+Ede61011c6d265c59417889db12301c712ef6e375@5441920c fd919cb4313d5c4d3e6d36ddecb39d9f+27262406+Ee7dcc78b62b26b179f6cd05bb6c56b6d932f01f8@5441920c 2371986d9b195513d56d7d8b6888fd13+11366564+E487076c1c0dbbfe05439e9b7506b3d79dff8e3d7@5441920c 19cc39fb80e4cf65dd9c36888261bf6c+4264756+E5d56331cc97d68d9cd7d1f942b04be3fd808c640@5441920c 622c38578f1913e0d1ce5db993821c89+6746610+E95f98718306714835df471b43393f45e27ddd9b9@5441920c 3836977b216b56d36b456fc07bd53664+21620366+Ed358c40e313e1cc97d3692eec180e45684dc21e5@5441920c 738636b97bc221e7d028bdb06347dc16+9166469+E76e010db792235b2fe1f56f26037638570191f5d@5441920c 56605f61b621650d3df04831649d2588+6326193+E1d9d0567e8fcb93990f7c4365f92742983e6f69c@5441920c 2125e15df79813c69497ef6c0f0f3c6c+12757371+E30cbe534f649db7301496eb203711dd9eb3e9ee9@5441920c c61de805f19928e6561c96f511fedbb4+12157116+E756df376e5bcc65319d062bd10685df117957004@5441920c e32dc879179c2d507bb75ebd015d4d26+10261919+E2250d07188228888c8052e774d68e2918f6c4c2e@5441920c 6d2d0e3b6984940858e36864d571eb96+40669605+E2bd8434ddf794691166b1556e47ef8f7b636c920@5441920c 65603431e7ded48b401b866d4c8d1d93+24190274+Ed2c84b40dde45d8b4df9c696651c4d8cbe02e019@5441920c 1228e02f7cbf807d8ed8b1823fe779b3+10020619+Eef06c59626f88b5dc9b741f777841845549d956d@5441920c 7367b338b16c64312146e65701605876+44636330+Ee6d463f6d719b0f684b7c8911f9cdcf6c272fec5@5441920c cd8d61ee8e4e2ce0717396093b6f39eb+13920977+Eb6c4f61e78b10c045b0dfd82d9635e45b6b01b5f@5441920c 28079dc5488123e5f9f3dcd323b7b560+22369141+E077f18b49d62e4d88ccc78dcc0008e4021d7342b@5441920c 56bf3c8e6c6064f6cb91600d29155b2b+22616366+E920d258e698cd2e7e66d9f78de12c87f62d472d1@5441920c 49f686994d4cb5967d19641e284733c6+26439412+E9dcd733412c06841ded126efdb30542c4f932587@5441920c 1ef6646ce8917186e1752eb65d26856c+4173314+Ed60dc1dc4b9ed74166619d66109f6eb546c86342@5441920c b24076cf2d292b60e6f8634e92b95db9+39664156+Edf615c5203845de38c846c2620560664ee6cb083@5441920c 576e06066d91f6ecb6f9b135926e271c+11123032+E9d147b4b89c947956f0c99b36c98f7026c2d6b05@5441920c 7642676de1dccb14cc2617522f27eb4e+10756630+E55cb4ed690976381c9f60e2666641c16f7cf5dc2@5441920c 77580fe91cd86342165fb0b3115ecc66+10560316+E99463b8815868992449668e59e41644b33c00244@5441920c 1c506d050783c30b8cd6b3e80668e468+35565426+E67c9d75c946c5c6e603867c66ccfcdb45266fc34@5441920c b0d8e3bf2d6fc9c9d067467749639c31+14197061+Ecdbb94e40090d099c847952d2f21de89803f3169@5441920c 01605bdb27b06992636d635b584c5c2f+20756432+E36de4fe4eb01fdd1b9226810d21c8f62f1d65643@5441920c 0c27885b49cf5589619bd6ff07d02fb2+15792191+E23bd16d3bd20d3bed3660d6fd035086d6d5146d7@5441920c b0149371ff6e4b097561cb6de4b5018d+22249239+E4f207f62d04d6d847c27e2463f69b847676344ed@5441920c d6fb819c6039468f36141e1344675379+16449706+Ecfb1156101edfeb2e7f62d074f52686d215def86@5441920c 09d34633511ddbcc6646d275d6f8446d+29052525+E6bd7fe2d67cec4ed4e303e5f75343e4b45656699@5441920c ed798723d587058615b6940434924f17+23966312+E97c78dcf692c99b1432839029c311b9e66ec51e9@5441920c 29f64c166e005e21d9ff612d6345886d+5944461+E004b7cdd000e8b6b82cde77f618d416953ef5f76@5441920c 8610cd2d6fb638467035fdf43f6c056d+20155513+E76b2453644c8624f5352098d3976bd41ccd81152@5441920c 64fbf1f692c85396dffd0497048ff655+26292374+E3d479e00158992e9d770632ed7fe613b801c536d@5441920c e7db466023228e000877117bf40898d5+37776620+E8268e86cf6d614e31b3f89dfcb73cfd1f7b4472d@5441920c 26f844c3000746d76150e474e838876c+16720695+Ecd248063ec976663774bb5102068672f6db25dc8@5441920c d631188d8c5318efbb5966d96567162b+13059459+Ee8e8b625c936d9ed4e5bfdd5031e99d60ec606e6@5441920c 75e196c3ff8c902f0357406573c27969+7673046+E3fde8dc65682eccb43637129dbb2efb2122f6677@5441920c 90d0f062f153d749dc548f5f924e16c7+5625767+Eecd6284d567555146616cf6dc6cc596e76e30e62@5441920c cc3f072f71cc6b1366f8406c613361f6+42976743+E55561d73068c4816945df0039e80863880128997@5441920c e74b79c0cbd84059178c60e8016d113d+13609906+E74850d9197693f46e640df4c7bf631f5cd6fe7db@5441920c 186706b6c31f83b07e7c60eb358e93bf+11966262+Ee4e0e578278e9288bcfc546355e16dd07c71854b@5441920c f85c6bc762c46d2b6245637bfe3f3144+17595626+E780515682f0279edf3bc7638e69dde8d5c87eb5f@5441920c 80fb6eed15dbf3f3d88fb45f2d1e70bb+6567336+E61709663412711e6bcccd1e82e02c207d65083e6@5441920c 55d586d9b4e661654d46201c77047949+7406969+Ef65e6ef6de723634d7ebc04b8e8c787760940948@5441920c 6fc45eb907446762169d58fb66dfc806+26345033+Ebf58596e6096dd76c9ec7579e5803e82ec7ccf66@5441920c e398725534cbe4b9875f184d383fc73e+11140026+E54668ebd22937e69e288657134242770c1fdc699@5441920c 69b586521b967c388b1f6ecf3727f274+9977002+E6eb4b63de4d17b50866bc5d38b0ec26df48be564@5441920c 2e293570b864703f5f1320426762c24e+13651023+Ef6640563ec496df42bcfc696986b6e4f6edccc68@5441920c 462b1eb00f462e161f4e5ce2bbf23515+19646309+E47ec8fb615747c6104f7463ffe65d1f6738c2e67@5441920c 7f8eb265855e458e6bfc13789dd696b7+22406679+Ef3cf31dbb3fefef455f62d6b5c2486500f327398@5441920c 36659b0e79c69296927b418919561e89+24370117+E66e94cf0be13046deb186302cd666d5300908029@5441920c bf6dd822cfbc6b90f986e5f43d500c6c+34354522+Edff8be044ebd69391cf282451659660d5dc6dc12@5441920c 2267fb579f99df6b23290bd2e939bcd6+12153797+Ed3de8875c91d6f346fe320b20670c410f46e7ede@5441920c dd66288e4f7ef394f6ed7e9b73ff5178+19120741+E3860d5c83e021eb3646e5884018ec3dd59d806b7@5441920c 7f86957074e677328be7538ccbcc747f+16676462+Ef6492f2cb4dbf9d73c1e58e2d0d85b0dd2f18402@5441920c d7363e073e178f502b92e053369f40fb+26125462+Ecf329f93efd1ec34f17edb991de264b9590c88f6@5441920c 6d64dde62f62d6febdf6f2c66c0220d8+23263164+Ecc22f32322cd039cce602e155bb530ebedce7b49@5441920c 7b70bebe42067024d360b7216c55d7e6+11436933+E7b70998697b46b0840836219c8e37e6d74906656@5441920c 3e6201706ff76745189f1636d5827578+27434607+E5204e6cf46e581b019661ed794674b877f7d3c26@5441920c 1b1968d7d8bb0d850e15bf8c122b1185+13431932+E28e98b072607648f73c5f09616c0be88d68111dc@5441920c f8ddc22888e3fff1491fdfc81327d8cf+2633555+E1b55c1417c2c0bb2fff5e77dbd6ce09e7f5d68bd@5441920c 9f200cd59000566dd3c5b606c8bd4899+10166739+E88797b1c2d44d6c6b6c16b6e2dfe76812494df2c@5441920c 65f26cbde744d142d8561b715f5dffc7+13335963+E13e86ebb6b426b1f4b6546320f95b63d558678f9@5441920c c89cbf812dd061873fdbeefcbb7bf344+6763176+E13b1765c5d3f3709605ef703c5c41bc46f25ffb4@5441920c 99f663066b7d0dc6f6e355eefbc64726+13444650+E8f607654b8d1fb72109b2e3eb64645202111ef2e@5441920c 6804c29fd6b3ec351dc36bf66146610c+26266416+E106283d64058d0c8b15061eee6d2059095767f7d@5441920c c23c67b4d1123fee2d8ed636c4817fd5+16376964+E392625bf396b887186e8200d94d8c7e392352618@5441920c 3f7640ed561971609025b37696c38236+14116164+E55239788883085d7f854058e090177fd10436258@5441920c 4f4014cf7cf09694c6bc5050d08d6861+23692725+Eb40f77014747eb8756606581bb6cef6665bc1e92@5441920c 0f46b1e0e8e69d0ec0546666b21f1c23+10507763+E173fc49b601c3c699d7cfce8c8871e44b371e6cf@5441920c 24385b164f3913fb234c6e3d8cbf6e55+27625276+Ed26e6d9e6eb59b6cf51c01d4b8909dc648338906@5441920c 0ec3f2ecf85f63886962b33d4785dd19+7026139+E43ec8f5ee2bf4f3b639ed66313c2363965702052@5441920c 674e2b084199c6be0566c29f512ce264+27711533+E1752f5c20c69cd33e669012632cfb2b93e1febf8@5441920c 8de5446ce99c95842b63dd62f2836e35+6793207+E808e94501ce9cf2f0b694f16ff261d42792dfc34@5441920c ecc3b274850405ec6531982414c634c2+15405916+E3c45d5ec865de3c34bb7e14e5577b7ec99d50268@5441920c 4c3b28e830f55707601378f6b314bb36+9160724+E6c42dd49736833326cfeb59003340d99d336b85c@5441920c f217e6338e5be409b309bc05768cd692+9467601+E33296cb0476d39648eb3518265241d2e58667c69@5441920c 1c33d278e00d838960c35365e8b211f3+7969532+E976bbcb318e35b425276d16640687cd30c0f6513@5441920c 45fdc6257f4601f5e6ddf2c3f3249453+24739014+E37fc9116462386d43647d43b1f24301fc2b3d2ff@5441920c 42c619bd934e4ee7876e6e62bb013c8d+26941562+E22061d93633689db860c97d09c2d428e0bc26318@5441920c cef567d31d5e889fc38f0b1c8e10603c+3036311+Eff049d2e8b04646603c7307d8427ec384dd5636e@5441920c 6d919324cfd4489696661b0c3bd2046e+7761096+E3d0ccb506d66c4621d1563e7f301d9de5e306ed0@5441920c 4631f15b56631ddf066623240ef60ecf+16709476+E125d603e61f05573e9bc6d15d64038548be25646@5441920c 6c897d794f5e90b15ee08634c3bfbef1+22602265+E65c0d239fe02411d4e688b0ff35b54b5fbf861e6@5441920c 26e1e7c8d16d0ec9335c8edb01556e74+23405696+Ed77c8c87b739992b6e2f4f0bd813e3877c029646@5441920c de5607856bc6965b3d689d9f6c739dc6+14457362+E16b373fe771865bec4e26e0c5b86e3241be55416@5441920c 9c96247f87d27cdf351d10424fb65154+11220750+E5666f47b25b3667bf32b17cf06202016edd96078@5441920c 6bb96d31bb0766150fbc94ff08ec1e50+16561466+Ef617977d6fc4b3b7606056e7744f61508e1f6dfd@5441920c 290806849f83631376637e012d63c055+15634314+Ef56d98c07c837800ef7653b9e74b1c868911c512@5441920c 917ff996f786819bc13747d05796db8d+26147265+Ebd9eb6985b39beb62d7cee1675dc88bc469786be@5441920c e3c8b5f953857082274364d3867fb56c+11193151+E39798993b68bcde100412e41e046f716cb576fd4@5441920c b0ce9f0bf1db246f83f961be4789b2db+9599462+E9d8bd12dc40e9e4665e4f33206ce9d4144b5c48e@5441920c 77d5f68866703cc369796f6d56c4d564+9625154+E6076126e1811c6e7b05c8959558fd35be4d9336e@5441920c 7b861b04ecef1e4260f42febc076dd48+46677445+E979196bd9bbd7456963e8f55564ecbe16ff3745f@5441920c ffb4f46254cfc652517e153438489038+12795653+E43e6ec68c5276d6422c66b077266230772849035@5441920c 7699462d29f00f611f35891127e16031+27123199+E09eeec5c1612c40246b21e26b65766ecc59bcc9b@5441920c df706e0400506e210565939e04539eb8+16632721+E3d404cd76de417682560ecf97b5c7f821c18148f@5441920c 1c9d96048b663c625fd02658f6f75c7f+12652756+E97cb664d41f2b9c69f9fe5667c12bcc266b6d492@5441920c ed360b6b945be71391e803353132c5fb+5706666+E7e4162c6cc3862322792cf91d76c719c84896c74@5441920c 24b7bf83c6b60fe6cf9746c8d16b86d6+12566075+E0d0b95ee04f865f5db70e2c80d35ed7742d20619@5441920c 9deef070820c1ecff87d109852443e97+16946677+E288515ff55d2b49754bffbde646d6b9f08981b66@5441920c 5e57630e60dd29658e61165360404fb5+12209370+E0762d4cee56b876c85ee0d2fd468649640561070@5441920c 61c7e19f7e96bcf59bff036887e5e755+17916606+E92d286ed713f8cb36d44f6b0346db71b5156648d@5441920c 878e7f227305c5c89ddc057bdc56ede5+24643337+E214637662b794717e65860d89ef5bc35f3f43d10@5441920c ef1514658c8f004fe640b59d376fdb06+3264756+E2b6eb6625c08c54758676006f634f9d09d9218b6@5441920c 485e4d6249b959b57226eec66268d074+4102134+E1118dbb1517f7323387bf970ddd5457c852353ef@5441920c 06d4b5ce44510d68dd154ff45203448c+19703325+E65bff4376436dff5c5601120e7c7138cc78eee61@5441920c 6d6616d27e10b3d0b562d154b6934eb7+11554223+E814476dfc3d4839453633b5538f76e11d365cdf2@5441920c f81f6f1ee2b866edf1e866c360c9decc+12130664+E3f3c05664668c4573244d3ce9ebb32356ec78d00@5441920c 66fb6db666667e6fe4b644d414643225+5642000+Ed3db35e5034c66e26323c3711b3bdd9e0c30b9e1@5441920c 5bedd5d1813136695b744e6696bd444b+17354621+Ed6c692158452b91b00e4f7065fb4d57945c6544f@5441920c 041391d37c47b66c064f216c76967c1d+7546724+E225d15c0700689d941be9216136d5159e57617bf@5441920c 0b3936e98635485dc5c39c091b1e141b+30306549+Ed8201dc4b2f19c6436b27200cc661160880f53e1@5441920c 87c955bc76e6dcd602074cd0b61ef669+19466657+Edce058995064b4c6d2ee4b5fd77634ef612fc4e2@5441920c 5863cf41b6d842606191f91266766ecf+19566732+E35547d8c39d6ddf6f0fd663ef6207d369121fd2c@5441920c 4b2cfe879bfdd4f5592b2948e1f12f80+16726166+E0c34f334513cfc42834f2f1b8bf3c2ec320bf9cc@5441920c 18fed9e859f59e23181668e4143c216d+7297044+E77384d2014fc7f1e460436175b45bb23678c0f70@5441920c dd1ee9df0750267ee5bc9ef6f29b0632+13453405+E45879d6d0f51bd868f7361809df00e383b2d83eb@5441920c f3e82d6578cc5172dd9f264f50d8bb42+20691242+E246dff090584102969751374c13e36510ef96feb@5441920c d68c62d920b706612d32f31727654479+13969727+E0428790ccc219305dd026886526fc5f41505ef67@5441920c 672f554d523e6939c88956610d8d66d9+15929956+Eb0468436beee5f8614d96765e75c628443d04832@5441920c 03690d1333904fdc508c57f33c715c3b+12006715+E3dfb288e160d2920cf92e3cef145d82d8636d807@5441920c d7d5d48c6ecbfff8edf63e21c8ee1680+6976746+Eee6cf6450806f2d68c7ff61d16ff0b9b09bee55b@5441920c b206cce6b38d71c626fc6260d60de055+16617309+E5bd96be2db6bc7692b8e7166fef6741635fe71c1@5441920c f82bc9fb241fc9bb1e9403660f31e963+26602130+E23677fb52377535f6f4d98371640701007467dd3@5441920c 60909d87315fc866ce54161907883f86+22761626+E222d02645d114b88836267760cc5599064dd8937@5441920c 5938d2c975658ed73f676cdf8e2df648+7096657+E6d5533fbcdc0f54dd094cf4de638c4cd3020bf04@5441920c 4b8c87889c09deee98b01bf9ec143964+26067196+Ebcb681616efd85c46893be63dd6663f5b45695c4@5441920c 4e7f06d06fd613f5d50dc3b9626d01de+10673992+E66fe9d65f3f18ef2fc74c6c766e04c6826060c21@5441920c e016be89b3607dc2c6d84703446096c6+14647560+E67d21749bf35c936546c2816e658c8ce4fd4863e@5441920c 65663576005d0735780d7783d27fd612+6567442+E3eeb256c414f59c671484666608019515b6d66e8@5441920c 8184bfb40466690c3c7bd33cf2001b7d+27369311+Ed3b2d4e52f16cf2c20b95e1650f0b69671b6767b@5441920c 28210e98e4bccfc0c8c881ee65dbccd7+9264693+E6780fef94c00c22364661b4df03db1894b65b279@5441920c 7d635728d6d3f0654491e73d06e2760b+16320752+E89b121f6c09e7f188397cedd9ce53064630e4197@5441920c c355555c484c0d41d31c1496bb0f88d4+4140293+Ed2ec40601643f992424e6042610ceeec4f926202@5441920c eee46de26c233081986fcc63036f6e87+17266099+E643f07bc7496eb97beb2bbdd74f78d9c7c40632e@5441920c 6bf27eb8b36619050c0246b26d541397+3060756+E9ed96e63725bb226e6717733062d92c38d0dd416@5441920c 17e7810c048bbbd3837c74253576c064+3260426+E660edf2b267bd1dfb1c70d25ce1173d99b572435@5441920c 633b2f33c40f13b691d59f8b64543ee9+26136225+E65975c79c76fedc2d8b92c2d8095845996c656c8@5441920c e5588b19938ee85458f1008b6155ff80+45662056+E5fe59f043d3b8e6f1ccc6d92e19ff6c6bd6e2d2c@5441920c 14b6ece5c233ed08c8343665bbc435fc+10447960+E6009d59e556cf6379ed6bc849f180d1cc33b3068@5441920c 1064ee1f9f687c0461c5bd686b612ce4+6564566+E7cbf7c65eb90855372605b5452b6265366e64841@5441920c c073866fd327e646c556d748027d6cc6+6396676+E8c404153f6d5010756968c6b9ff619bcddb1e1d7@5441920c 1dd987d82e5f8d23659cf23db99f6517+7956724+E18d666c504486712bddb5f8173658650c7708182@5441920c c4eb6d77298d6964f9e862e809463521+34269266+E1e466382fe93e2103395fedbb57bc5e2826f482f@5441920c 5c621f017e2e17260b15e13d6d6102be+13762411+E5293993d8891eed812c1829096775c9129d66d86@5441920c 706beecbdb9f413d8456e05b6744f6eb+3947613+Ecce55b46196c75ccfb06eb9b392e53d9f1c71c18@5441920c d498f6f76978747843767963f5064309+5537714+E2885742de6412d62b47c33bec68d8d9f81f9c09c@5441920c 2266396b65b97e348973206358673f66+24305632+E2e0ec28566c629333dce5f41e47488f4d736f018@5441920c d91969572c86d6b14636f6e3460bcb24+17507515+E96fb6850f7fbb4d9c2e0954be44635896879976f@5441920c 11b46690ee6e9bfef0c4026d856f4670+32626524+E361d099f561efd303d2e24182ee09327ec51657f@5441920c 2361c32669d0564e52d336f85923b61e+1010299+E45038369c554e6b30b60f3ec580898792163d919@5441920c 858bd2ddeb56d69038b78d289ddfde15+23454636+Ebb767b2668b5f9f61c4de733265595f1c074e606@5441920c 91618b31768711ec4b19dbfcfc7bb98c+16017355+E876f5f62b67613de0f79e60f245cb0f02f017220@5441920c 1bb9feb4c6ecd90cf1d8e061fe9967b1+9792746+Ebee666de05c3811c76620f4d9f49cc7103f0690f@5441920c f76ed53563936eb324feb4fcf9d2e44d+533647+E59361b31266d7566c00ce339629b5d1d86863cb6@5441920c 47f61e664eb4d68364d94971f0446206+1064656+Ef226fc40f66666690e640c125f636b37c6e75682@5441920c 155b75f465771d25168cc2f723426908+27465637+Ef6d455ccdd7350f6d8eb036675b046bd531f694b@5441920c 189e6923d3e6810634475b6563ff42d0+12707353+E218987c1f65753c694feecf176253ccc353268e6@5441920c 345957000ebe671b86130e51896d8694+6632970+E76eb72461dffd0b03ebd0287b4bd4df60fff6019@5441920c bb8830d56f6e8b0463c1897f9c6c9b68+6746794+Ee569093960e68f65b8bfcf0660c0d51d8e316507@5441920c c1c82dbc3246d4994c7110536532bd3f+17732191+Efb0bdf49337261801bd36e7f961cc766bb258d6c@5441920c 3469b89f618cf43d6964c89cb7557360+15491375+Efb4f84bd36776264d5b66193cbe06700c9c36986@5441920c 1c6c8cdd2b55b59763484fc736fcb2cb+20295749+Efd1b1e16c26825e6be2f0086e5956ffc2cb86186@5441920c 425eeb625e0e6f78640cd646b81ff96c+27117670+E6c651bc6fbf0911c5f0cfb13cf46643234cfd962@5441920c 467b40e186cbe66e68e27b497c146989+14464752+E6661978e64f282c9673fbf76c8c28d447de95571@5441920c 215e9957c31b9786166166d3066dc8c1+22592925+E24ec6bec163688076c95e6d575cc43c4d2185d25@5441920c 8e6d9566f2e6b368629c336c9fd6e0c1+21043993+E60f9744737815de11b5cbbf7d2b9bc26197710c6@5441920c 6903b3ef7b72b5c437121c8d75ee5f00+6526756+Eed896e26d13830cd0de7271e986638655bf936f6@5441920c e99d862823e5647d428cf03c85510dff+4646274+E7f7e0d272568f9d8353432e1d1284c6e99179ee1@5441920c de8752933c71e8e4912367c396286d59+19571326+Ed6eb12d8d1ec809bc6636806c89f0fc31b76e49b@5441920c 42b9673e467681dd1b75622d5871022d+12923669+E6638266df36f80ccee9b177392378fe0174654ed@5441920c 6738766901e6522d122065eb602706f8+9921926+Ee0506f3116684358651481b6f6766b6d61e4df36@5441920c 25ed8c9f9b7fc61b3f66936f6d22e966+2695507+E24986eb797bd7e2ce75f8cd7fd13502bd1db0900@5441920c 5f63716d6964f6346be68e50eb3477fd+11292446+E6d40765c1ee54fd31d239e1e96c25d6d964e6e33@5441920c 646ed63541be7c4b197e74200fc58563+40629656+E3228f646ef6d86dfb63090bc1f4540534fb12809@5441920c 2bc96d464c08c774950465b994786463+4060756+Ef6418662f5bf612877bc0334972769d5c364bbbe@5441920c 074f412860c7143944662f3579e8cc96+16610667+E7d989e4216744576f348473d58cb5102cd3b57cb@5441920c fdf162c24e1b743db60644c910bfcf26+29170320+Ec6c6b955e0fe664690d2364446326c2f16279321@5441920c d1e6d9e6512687494cb66788d97d6b76+21574362+E9e9f63bb64f611c623604e6f6f0222e0c8105236@5441920c debdb22c0be9d5cf661539bfdd628421+3619563+Eb95f6d2052bbc63bb931d21fb518f89531168e2d@5441920c 1b3b785b6f585c9f46c8b932ce5ceb26+49161531+E2f15232081e450fd4efe9368bfd8bf8162046667@5441920c e336b53894f0543d59963105e9678367+19746144+Ebf3c79b229c275ee7e1201257605016278153d7d@5441920c 782f48c017169e53d2c726d046dcc6ec+10946735+E9e78046511c67ebe2b39f5b21622bddfb87069c5@5441920c fdeb6225b7463435cebe00e7f86df276+6376465+Ef4599c2d6e757f7f66579b373e9e6ef0ed74b62d@5441920c 32d626f756c4cdf566533c6b2df652f2+26661567+Ed4671f20388d6576565fd26bc00d53f0e38b6c51@5441920c 14c4e60bd3fbded9dc8d11d6e970f666+13661669+E0d589b83806594837ed672319ddfd74f3cc39ff9@5441920c 77886771777c50587e02dd08866b75eb+13501427+E01866f494dcd7dd4fbe7541df16529447e52ef6c@5441920c 8b3bf3e5f6b6be1d667f36d1784367eb+13677551+E6b241697c8d0c97c142fb695936589c1945e9ebe@5441920c e12686bd46818f07614c0143b68802fe+15666076+E24458761c577527694bb99ff659b96c954dbc3e4@5441920c c710454601fb0f6e4d03d6461fce5f17+7996490+E8e9cc9e865e420e3e0cb0987f106665e80e7184e@5441920c 316eb301c1ee9cd9b38c6544cb7bf941+6053236+E04118416885186189d00220842078fdd82b105bc@5441920c 1946863de487f91790e10ce2d63deb4f+10726254+E1613e538b89d50e662650196b2bb46060e46b325@5441920c 7e6debd8e9fe0f58f0c0ee19225e4664+11356746+E15749f35c8f636eb7666f8d62d32f179c7f2b443@5441920c 62d6d9202fc0cd2099157526b4977b6c+7600427+E5363fc1d6f6c9ec60576c454be6e0e026c638644@5441920c 80f767764063d69fb042e73741108330+20722736+E79223662b666f482c76c074de7c948d9b81e9eee@5441920c 7de230cb3c601ffdc306c656d729e766+13729019+Ed6839fff29b73d5b54c16855f0cb57ef1f0d5dee@5441920c 566eb88cf65d80f8def689999ef64367+20246913+E4868dc526d88506ced164b48b2cb6ce669820484@5441920c 27250e8f350f3b51c756d68e47e2c980+26945676+E8c606e26b483c6e93227776776b116e63c7b6607@5441920c bb9e9cd086ee769366229cd0b32b5c09+3364670+Ef63125e4676b66d764234e76f314863e7769e3f5@5441920c 50e50111ef9bfff37663d6932f9b72fd+16155754+E056360cc57665896b629cd38fe14715621363de6@5441920c 72e864cd512f786c54b9f07646e66e37+12762477+E6bd9bff5c2926b09dfd6b66c2e969dbce9f53669@5441920c c339c751cf7d5166c30b8b21dbefb69c+16572364+Ef279e41366b796bbfb333ee55631cd9dfb6e097f@5441920c cbb37c74cd1f688d1c9756cffbfee897+12456663+E523b778eb6355bb66c2f5d4773d775bc6df25dfb@5441920c 819066f13ed2c71947e3f647656b576f+14524669+E62b3c65fee64e372239593516c64d60fcb850d75@5441920c e3635e4290543563388e94e1e6109729+36661662+E767f7d2e1298f1ef565e967e6170f88f7d6ec9f1@5441920c 2ce76730ceec8d843946f809c16f6f46+3149045+E882e0ecf259166b860f68dc6fd844cdce3fe49f9@5441920c fb2814493d1c484625bee373d5369cb9+13700211+E6be1eee5409d867cf0327d762d7ede7fbc296f25@5441920c 6ef39899b0ce52e83964c55f466f7021+7529724+E3095671946451ef2d9b129106c26f1e9515eb60b@5441920c 36e914556f2c8d21b82b63578764e811+7950542+E329cd0c0b244ff75d31782f2dbb7741619b24861@5441920c 895c6d874d1245d8e66455604fc45d3d+14756600+E764966661b47eb9946f1964e5ed060f623240695@5441920c b66ff865ef7d09bb19966902e62429e5+16443596+Ee56f4778f3b067103eb6bb8e0249fed5133749b0@5441920c 3e76f1361961466b0b95d3b6f8ece285+20106669+E01c84b2e28e91ebfe917067bb6671061c8db49e2@5441920c 63953f84933eef8bc8bd15c5d560c522+20056363+E4c6bd626c3b008116064f13694d49844e6e656ff@5441920c 5964964ef7c947f1c185073125669465+2567406+E064d861f4630b32521588b17290264c70f3cd71d@5441920c 379733627e446179436f327832659951+30547504+E76c3833c4d3698066d4eb966d179b85bc889e628@5441920c 3358c02673c23b84c37d83e469c72f66+21562054+E6008936e0c5343533bfc19f5c81ff58c3e2925b3@5441920c 46cb194289db37ee376f4f3346de0e04+27395356+E6db539216c1b433314f27bded4c6cf0078bfce37@5441920c 94310de101827648d6b3bc3c89708c59+26365676+Ee319940fb28fc2b11801e3019bd84937e2248074@5441920c 42220345631c336b5194ce9b573ed40b+269200+Efe4d5267e1d56103455663b90c06d54622e0641d@5441920c 2263e6126061fd7681b1d7e22b9f6e14+5237174+E51317e2730be6fb316f2b2b6e31d2913f4f37676@5441920c 07642351234b816b15e150bb6bd637ec+29727146+E66325bb50e67ef4de1d94737653dbb98761c1e66@5441920c 2fd5ccf86cbbd0e3c3f366d7bfee56de+30907674+Eedfe3e86d6243ddf6d5ede6c86604e7e310283d4@5441920c d6859cec4d9fb1c68e391840579b56de+1504656+E69c673e18f46659560ce19e24cd642d7ec4cb3b7@5441920c 49620b9c06ec234288fe02c59e694928+14943044+E4557ff4e2cd1800c94b296ee059f895660b0d38b@5441920c 1e7664d9f69c30178124676004c5622c+33721037+E16ec6ff518bc86565f4c9dcfc0656e38cf2d47fc@5441920c 17ebf9c6bc4ec665ce79750639272662+24605551+E636c8155632762d667d6c9004f6738f927dd5979@5441920c 342b663668c683fb488c62ce8568b618+29376907+E156b0293e6de6662cebb0703b9e2b37386fd116e@5441920c f0ff7321084e5fb26b047c29b787166f+12633635+Eb57428f2bbc765e1391c660e6592684e76f624f8@5441920c edfc2352776c326d1425c8f75206518b+14797426+E136b15d57166c3791c3cec25f2606868be3bbdc7@5441920c 19556e814b8696d174614d2635efce37+13760102+E8e64c18124f98b3f0d615b89b4bcc0db5345471c@5441920c 25764e17398bb530336f104fe1f16fe6+26794272+E6fc3ce18868166e546e46d72fe289455cfc70834@5441920c ed98ddfbf7181c16fc299ee261fbfc82+10201924+E2de330f0e91b386d0d779d21c3918e998cdde6ce@5441920c 77eecfed3522b3b96d26b645e3367fb1+24124636+E2332473f67efcc195ef87657368074fc7b600642@5441920c 9bc03661300986db109ef2626d3742c8+26615557+E68ed6cff0f9894c2ec3e940e0c676ccf99b6c0ff@5441920c 152316dbdb21124ed53e3eb985b94dc0+22145236+E658096d502e9136b69b1fcf50d5064613dcc7d0e@5441920c 561c751762166c7b8fb609601b9f2f48+7311346+E81d4d07984d6c5e974c15008d4f92d663c710388@5441920c 012c01572b943bb8466fb8116e57e60c+12577740+E1c98f4cd9f1760b062bbb20bcc0131eb9cbf5821@5441920c 4dd985e1e9728f9d676d9d526c0225cb+21506140+E7ef21dd62f372fbf66c17e6164064bc9c1283863@5441920c 626622416232e782cd0874f9fc41e170+52369+E8ec7e615f231dfe25b603f3c178460c06e624f6f@5441920c 6b7e084ec85bdb5633ee1355933517eb+5076969+E3251c561406ffccf6f6678054cc66308160672b9@5441920c d944332019b54e4213694d720652f837+31190176+E51c7c1b974617f8711d31f1ed3d554dd69708b92@5441920c bc35e4ec4f310481df053878c99e2028+41160366+E72e6fc6c8996446f8428889039d6382c3187ee57@5441920c 32b116162e37fe261fbf44699d161bdc+23615045+E7616236b140e626104830c0bf9b63c3632defc9c@5441920c 3260853d69d0f6b96ce5b079b1f1037c+34031699+E614c898376081ef614581fcf012196259b247f1c@5441920c 9024866876926291e291e983816cb080+13651503+E44d2c5f757e5ce51df4bed90d213e67280c08cbf@5441920c 7f7352234c5c86d70eed25447b6f6e51+1996046+E68d1c68b7d65e0697e6c47285061b36474bc9848@5441920c 0866e053769fee5e5eb4c9315d6bc5f2+22692591+E44f353b0622fe8378168c3cc6684ee351e0105cb@5441920c db74b6286949f3b1fc69be2083982e48+6672354+E2546bb731323d421439cd1c6e426dfdc0e6f3184@5441920c 1f6e9090bce4972b5371f66be3dbe365+13749361+E6276f45e81bdbc0eee34e591e76b38385ed87108@5441920c 8495966c987b24d64e8f23261e40773c+16660930+E6b7e063904d76d68b68ce542095408b362230e93@5441920c 222648c113cd8d52179954bf684d5626+11036031+Eb563b11617cf4f44d7c31e51e50d17e0f398f063@5441920c d878ec2cecd3470c7dbf4291653e6c90+13412650+E0dbd46d19e8b6f8c66064196cdceccf5b762727d@5441920c 7658c35e0b91464508f7133dce6e60cb+14313555+E54e6f8e766224090ec6c74f776d30ceccc3de46b@5441920c e8789734411f44661e0fc74c1c0d36f6+33635703+E470928dfe26c643e0603f7630526232621ddc4c7@5441920c 8825382969eb6c5066fe78997e0c7bbf+469634+Ed6dec1e7f6886d2bd1efbd8c6edfb22edff74bc1@5441920c f5552117005f6c9d736496e2f9030f5d+11377056+E0c2be5653d1776957700311e5de86764c636bdc8@5441920c de62f65e30719327fdeb326c2f16d698+16346545+E5d6e6d619f640363f167467b2de9c64347e63768@5441920c 3cc990452997b05b51ed170d291f9f1d+21127772+E281769d6ed0579760f4f2342c1f9bc76618c8cc3@5441920c 13c43c4e049c7d067f0f1dde01648303+1059366+E3f19eee97b53b375756ef3367b86deb6077c593c@5441920c 7d62d6e36364e35252710e47b06c54ed+6964270+E8030563b53b8d7d6c4d127c2e527e6f2ef56e98f@5441920c 7255f3e557e3be60e6bd18054b360f99+20073973+E2d6e29ce1c66668b02f075d99194392b83bb67eb@5441920c 32289c50f7dee66d59260463d7b85c7c+15769669+Ecd9f070e6f3b0555848c8506610997600db07b15@5441920c 6603201c20e0c24b9602169b3547381c+9756229+E3268c74ff8f0f67d1cf1d10c01dd9e2332dcec21@5441920c 061c6b2528256682c7b205b0f0f9d69c+11469333+E960692b62d3d34902fc765048d36081bd58b0e75@5441920c 80b649545f654616348cf66f4dff90f0+11074951+E49c53fcd4deed6b62e3d292e66e2948716e7e1cc@5441920c e736c8b66c29160f42d7ee5bd649e636+26145091+E436426d265d3d4d65658e6b39405b82d308639fd@5441920c 961e212b3d7f9464c268692761090f6c+20545569+E4606f1cfe9cedef085404f8465b915190c8cce76@5441920c 761fe39e125f6f19585464b661706631+13562476+E33877770d62273e62e345f52b755f73fd56c59e7@5441920c 241691bc053966df9f226e308c46e36c+19737049+E86d7f2325737cbb78d6ff61b583ec96fc4c8d0f4@5441920c 6e367eef8cb34400d2b43368893c81b3+27529030+Edff31b6b50ccddb0954c28c8cf38ecbc86417510@5441920c 05555c5fb49bedcf63ff878f1cfbe3c8+15452164+Eefb61e71fb4066dc56c247904be42015ef755861@5441920c 354e0c970b39c6956fc9660eb7367b61+12062565+E27cdb80616591bf8781d75f2349c12c7261338b6@5441920c b7c109d474fecd5b568fd8e460e81d02+39769591+E66648208f40b52f1822c01c3d61c374b1b656055@5441920c 6b6b334d6fc6ce94572fdcc96dbbb204+15604669+E690739f9742699cd09db1fb6b7c8f864916663d6@5441920c 796452beff88c6c0b46efc4b93f14ee2+4141622+E864cee574b2995464159f65fcb48768275ec1649@5441920c 61b6165606d625f9e2f5d22966e9f6c6+67106664+Ef0ce0c9615bf03becf58b76695fcfbf57596d5d9@5441920c c3c76c86ffbdb4331c4d29705f7bf508+13102561+Ecc8d369181f0836cfc5964c61e1e36945eb163cd@5441920c 40b30d29c63466c5e6239f6be673e456+16343642+E2c9d43453c0772ddd2619efbec822e08dcc33967@5441920c 386dc864e33f0436b915d5fe99e568fe+16664730+E20655b581566fdfcc78c0210d212eebddf4ee191@5441920c 6310b937f32c88e68c99d1065dd562ed+13661616+Ee8f93e9678226b32596883f5283d6271b57cee3d@5441920c 9dfc7371d62085d018c01f6e734d7666+26472421+E6b56e878337cbd25dffd733e1613722630682615@5441920c 2b26b973d557149726460d0c84dce8ee+13161766+Efdf846b7114c9dbe0f464dd7fe5226600d66decd@5441920c 4822d05c1f1061302f5e90ed3e33eb32+26136564+E293c1b58e1dc2eefd8ecd3dd99357b837c2d1165@5441920c 802f5e0e9957fb6fef23f77d1826e5b5+23561374+E05b6f3952dffd11ecf83c61f3eb2dff941cf0d48@5441920c 6f81c5950c9bc67d7bc82566e8735fe5+22349651+Ecf7818e4574828536cf5416cc67b87e5233b1586@5441920c 969c688f7267f6e313e4f0fe1c97d3d4+9400437+E697ddb147c825d4b09d2f56466ed5e61cccb10c7@5441920c 23d33d2b86f5be5e60626f213453696e+6696401+E5476e70ce697f686bb63f6927c758653123c7926@5441920c 01fb6544531e50d5dc982f54c6945839+14463365+E796ffc2fb3492cd5f70b9e46b09e7b904e86c186@5441920c 5e10cce37c60cc768ece04794589b362+2797932+Ec7bc4352c6c25f73fe54b62f671673701b676488@5441920c d27e35b3168f6fe30d6446d469cfb82e+7140760+E8e0e1d27865edf69d6f162f262f418267864b716@5441920c 79ce0bcc5f565e689c44df3b2f299690+7956760+E6b405440347634c4d780d9cd2f751b1b74801821@5441920c 6429667e76cfd6c7049b9f2dc83d2e02+26100130+E1c67439fc75bbe8822c11f6be411228c75474346@5441920c f63d64c68edf1058f8042054d9e608c1+15570132+Ef59753bed1608c150b463db19e0b824c56180472@5441920c 3d76591c1fbc9b1cd43216b53037d3b8+12079936+E3659f239292e2cb4c86885b44c6669507140f5b9@5441920c f438cd1e753312868038166908b7746d+23646496+E2b187c62f3015562691904e717f0b766b1d119f4@5441920c 476b689f6d00f5d5c94d4bf89d2d6f26+7320072+E7e4d35700d55497f8cc8188559d256f046d0bf16@5441920c ccbf6b908e6d39954627695372c66646+10249929+Ec220ef724e48c90b31d0c396802df409203f44e6@5441920c e25bc8599399b2b9c174d0b866633d63+13622024+E736877b72407836e424889479e46e60506db8c6b@5441920c 573e08705ff70f79d328c60c0dfe1151+7329647+E978fffec456ecd2633409ee866f9bb9311d976be@5441920c 5f2772d86c6567de1c03fb9b1535e6b5+25915639+E26e094692d34cd8e6e51f964bb8f147be4825d0e@5441920c 6c5cc886928952bd46f1e0432e966c39+6902437+E22272c74f82664339e62651c6373fcd997684ebd@5441920c d7581c3bf65327e93bf6cb536650063c+19367309+Eb904b6e6c9337464e0bb3e3b1fbcd0bf4228726f@5441920c c4dd8646b372463c3ce23c3604418ebc+10334901+E680573c727b403b3c7d364e9076479e6c68ff635@5441920c 16fe696306debe5906c75fbfb4f35e82+15956391+E68cb974c31829f20f4381d605c396ddd9021502f@5441920c b457f19b1c560665968f580861bb5519+22361464+Ee0e5f7040fe15c3d1138046b4204e2d81ffb09ef@5441920c 6283c883239d206dc8d7bd20439ff2fe+26762910+Eb6598d1d22ec11840f06949940cd671e16f54d66@5441920c fec697c5e865cc4e4587d9e2bf4b1df4+27462517+E6cfee6c054636f17309efc8185cd86cd1d0f2f28@5441920c e369d98390996c5b6d124db79d188615+17696144+Eebb1069fe1f6f406c36e2bdd4ced45961d1f63fb@5441920c 6566d9d439d70e07e6590b7232bc6dc6+16115379+E4615bf36e6691866358c30874be71993dc04c491@5441920c 78ebc34f2f582b1e58e52b36cb9b9fd3+26603399+Eb868f96c8010eb08b8bf48fd6689d884962fe856@5441920c 7b306f84f006e652f346640314e565ef+42767332+E8696fcf20e694d7e3190d2263dd0013486d9e286@5441920c 6164704991bcf25741294e26fb6f1033+22519054+Ed30ed601783b6f824d96968157e0ff69d0199301@5441920c f7942548dd956c6c02c1eedfd2755947+15623994+E8dc622096c66e7e459680b0466c97c08e968247c@5441920c 55496f6870b58c20c61c69e329f86b18+50651137+E4d15666681f614d666ffc6033cb2564fd498b422@5441920c 511635872c2be5d2773ebd578167369b+2340763+Ee6f8d691449ec061efdb7db6e67e686446660060@5441920c 5e4db617b4b314863d3df7f5f7d40b46+12296366+Ec52619bfb7bed7e38283cec6c31c629f3b43609e@5441920c 2c1c155e211f8615f348f56cd4e2eb68+19160541+E4d61dff6db10bcdd89f30333c6f416c4dcb10050@5441920c 26793ecd9d648d83017188676d1e468f+21150112+Ebe666f5f9db78499070dd5cce17f1801f5856395@5441920c 5c19db5f2feb0ec6cc247077326132b7+15934102+Ef2c65268fb7556e5008c1ed147e6cb62fc23b8b6@5441920c 18924490df2fe7c8bf536710b6fd6766+9572247+E7606e9814ef7776e16c3661693f0490c94195225@5441920c 640e94c562bf36ddeb0dd226029eb0b9+37063925+E6ef54638f818d4fe8c3dd65c8f3366c7e5d74607@5441920c 14cd1cc7e24f6f166bb26dcfe4143ed9+26279656+E0e62c48482369497792441dd4672849654fb0616@5441920c 606fb1c0c699c7ccd315576b02e692bf+21312663+E05ee10d5f8cc07fcc3cc665d0efe3d1b297cc615@5441920c b42c6410199f3c4b0e54cbf94ee88980+17966553+E6b79e87c11e7fe96d5d960fd875261711e66f06c@5441920c c286916e594c40952556b7857d67e889+20502272+E1b528c0bb53c020dcd3581d629845bc1c25316d0@5441920c e285ce576d5090b707f24dd699667c27+10454346+E6384d44e091f0b6379d8523d6defc6cb6975eedc@5441920c d530986cfed06e608fecb1191df8c11e+26240932+Ee061638e4f42024ef17e01b02e67383f15c14593@5441920c 3536b5d45d919cb866d1569d96f9e939+11477343+E19f085e4dbd379e83c9856956386bccb26495d6b@5441920c 956fd18076397dd9602e5c01ef76623c+16121702+E976fc641f109ceb585672eb795e964c6b8f2f509@5441920c ef33ee876d98646e6fdcb3867518b6cf+21665969+Ebc9b108234b28642df30c976460016486d27f2c1@5441920c 7426c8c56917966f5e7d867133c104c4+2106601+E63d93fe162433e6744e8bf1f63613d3994d46615@5441920c d12b745d9bb4de069124635140d94e66+22234696+E39d77b9c6db4180d930e44d7e77594b7328cb8e6@5441920c 7cc94de0506c1800b23456081e828694+17466445+E3ee18b1031435b6c714cd132d53324f3ec004ee0@5441920c 1984fed8feecb6697671f6c7629736c6+27353500+E69652dcf6edd66d6f7f223c87526ee683550ebed@5441920c f2ff43078422e101efb31546d513d917+7951115+E08c6d30fce60f953131ee9639397d6b9f361b6f3@5441920c 386b40bcf914276c970f642f66521be1+10132647+E69d7529f917e1874c44c21e3c1d391261690268f@5441920c e09636e09cd7d8c32b6663e95678f4b4+24122390+E115e9506efd385e3c03b51d274b136cc283cdb61@5441920c 5dcf989f58765256e745395de2c16d69+21750606+Efb7615104f94c7b4bf48ed8ec84e6ce1f884632e@5441920c 686e5915cd9858003f6822546d6b6d4d+15546705+Ee8d6933f60c51411f136b86962dd7b30c27f466b@5441920c 01486366fe6d0482971666c98fc70766+30792695+E49c08c45d856d386f968485e4505e36fc823ec2e@5441920c bed62e9e6bb42eb6006f7065e6990e18+17604912+E9cb886387c324b05c6be038882bd29434cc49e7c@5441920c e2859d677d7c237974c872784e13e6d3+5164960+Ec46db70c565633fc267dd6d133be6bb5891b6c4c@5441920c e757577865ddb690336d4cecc386c3e6+17296739+E7d1dc238f71762ccf46766627eb215be08b3d5b3@5441920c e2d2f7dd057617592cf9e4317535b11e+3301773+Ecd6413cb8c4e5b795dcc5680693d623b91744107@5441920c 0622825df321b6b36886ce672217feb9+3676756+E46d666c70e222477b3337606dc209e5f6cde7625@5441920c 23566113cc3c2891f84df717b6b5ceb5+13263209+E6f1d139cd24c47f6b5bdbe5636d49d2140745175@5441920c dee0ecf366b0e469126252646cd78667+5712724+E67405ebe84168df10534466699ff60c899055389@5441920c ebe97bb12f1656d3176bb8ce05dcf62d+10516666+E79769813ddcc30681b29180676df6666c06b5164@5441920c e16b6468eb876f7582d666b4538796d4+10144603+E6681f4fef94f71787c6bdf60f73ffc31dcecc444@5441920c 908963806d665f6692de6131cb688e3d+15620599+E19d63801835710d6fe726dfd3002226d59d1e6c2@5441920c e55d7c07c1351d8d52db166be6b8c09b+26940326+Ef7468138dee02cf621b8869c9b9e50476fec05bd@5441920c d4c83966648b99e8dcf6c6494d8d763c+6160746+E24c413dd9f4c938f3234023714c5dde6dde24d2e@5441920c 530f167ef4dc42e18714b4d6fc79bed3+11144267+Ebb553f3bd952cf396b654261465b55bbcf814826@5441920c 16c2c511d3066e6032f465bee26cb26c+1431977+Eb914580870bb2b6bd01dfdb14bd83331470484f2@5441920c 0b71c2164058821f852fc4456876c7b6+2244756+E6c07eef683e14f34fe3f7f066e33c3333304e6d3@5441920c e65eb96f4c91605b01158d56374663dc+9266561+E0b5bc104933b16464fb9d3f15555b6eb321ff820@5441920c bf69063c2fc34526666771506f68bf5d+41245659+Eed1f87b918f56236ecff73bde704699ef23d9fe2@5441920c 6056068b9e9989c1c3260c3501865930+15344510+E04e7df1e225c11c83512b4029fbd2c018b256c45@5441920c 3e5b8e59d577b16b6e84786242521806+24932791+E74c4d89582d84340cdf5465fec29706076667669@5441920c 634d6b1d6146338e38344547047643bd+22442446+Ed776175e050fd858036380649d6482d49287d096@5441920c fb5b4283359e7e5e366c3606cd8894b6+7752724+E680f4054419d6fbe710970d65d33bcc466613cec@5441920c 9b2911bb7fd67f6cd4f64337664d831f+26224360+E20cf4b6c243f160fd6f86253cc6377cdf46873d6@5441920c 63f6692b0fcbe33870031f8687547dbb+17304639+E92ec56f25f729945fb30562bef77f6684645658b@5441920c d96d6835e084f2c1eff67c52f566f6cf+16113075+E454fc1c125573183c69bd5e5cbe26f4bd4412670@5441920c 1d2eb0963b1fdfc11f6ff534162728d6+22233411+E60b2eb26e8d067f3d7612bd3cd6fffc46de1fdd9@5441920c 694b1b84ebddbe61749d6c7744e2e2f3+5524922+Ed932398d61660693e39554b50b2212f8d4960971@5441920c 0b42c92d97c0877b04d33666f22509c5+9664262+E9ce27760d3e7e05b965366f712b5e5f349638f54@5441920c 2cfe498b5b41ff5586b3c18fbf175d68+9160746+Ed2db55d98c2efbef816f30972eefb7f366705618@5441920c 6607b727ee38d0151e22927e8432e2f3+7956752+E20e43f0628df779e08c742dc2651861e4644b161@5441920c d782d2966fe60ceb61760be1891ee909+5100756+E35634fe29d03c35d26f2dc6c03f60272f0674160@5441920c 606030c626d5e94c4062618c3f652b38+9937902+E5662749f1f2e19249023941e760f73fd6df66334@5441920c d62d663092ce6f4d50361f36c0232049+19546232+E6316b6c1b16bc310863d18e7e387e35e4e001d27@5441920c 748d7e6865bd463de20915f53be86056+6663394+E7609edc173c34c9e36112f163563762933d1d284@5441920c cd0b2e572966bee981f066b967c25558+11752445+E224d2e284600166f66b0dd65562f01e7f6bb495d@5441920c ce1806850f36d94b326506b6d9763515+19256022+Eb4b964d8cf18b298376b5b42e1745c925fb6b568@5441920c 3496386d8279d2519c237764d914f862+12954653+E94e21b2f6c32195f270ec96567f6135e4c9d9f7f@5441920c 2eb661f4b584753660883614c14650e2+23233415+E22c6fb1b5e3d821378772485590ccf287b46153c@5441920c b9465b26065de0b6fb1fe66660060fc3+10667296+E95b5997d369dc93fc0bb8646217870cd50110f4b@5441920c 70485d53084d944674663bbe07336639+16966664+E5438d9ef89f2512b426cf230e9ed03461e490566@5441920c d29c2195fe6226f7cbc596264b1ecb9f+31566677+E9107c836bd1436d9d8f06e0fe58f74c36619eec8@5441920c 170d605b14e135f717cded781b3659c4+26966370+E2e596e0187c64ccdcdbb7ee379d59139eb84fe7b@5441920c 0836cfb1122130ecf01d5d2bf06def42+10993650+E4076b5402f7e21f1639dd22370286d76b7c67565@5441920c 851e496848b92598c551313836610426+33045521+Edb0688f6969e275c687f66bfc3be0318e674b13f@5441920c 6e4834f41842034f423626cbc8c2684f+14150927+Ec802066b016ec6ce60661b2b46c10e1663b0915d@5441920c 90e67d4209cbf05e9df318e63f645b54+23576635+E3b614e8f064e641468dc25c8340edf12d10cfe33@5441920c d933cbcedefd194be06338cd661d666b+14665552+Ebeb1c374bd74c56625c164d2e9660134d3623069@5441920c 76922bc107807f84b39fe3c763def0d0+10410131+E62f638476bffb582fd56696696ffddc344d55417@5441920c 7f523f4e5b7fe74b758f084c68f4cc3d+14156634+Eeeebe9b69ffb2424771669467f0c53ee18323294@5441920c bff64b2d1466d691b7967513cbd13dfb+7576172+E95bb1660e199d76d1fcc3bf756844d334bcb5ffb@5441920c 5346366f8e228259192b1fd25fb03174+44109465+E0cc4fc81f3e00e2626ecd5990388e38de4758611@5441920c 23c9d6b46c17b44f615072641d7f1ce4+31254935+E3b51142315f65f48b8e9cf29299712b55469bd9d@5441920c 18c9077006641766e2b0fe36b698011b+5169067+E083cf08445fb6c789417280f85938d6dff9ce4c3@5441920c db7d18f27edc3c6ddfee633731c2be53+10366921+E08619bee42652e512c63090e464049ec58f5502f@5441920c 6407825d48318f5626e310333b4b968e+29052271+Ec14b855cc4525b3d31542216d7b03c74d301068f@5441920c b96e133b4557374be68806e19132e43f+17612627+E775c6d28d896678b02f419e1215405db04db1dbe@5441920c 61f26b19764e01221b6f589b46f2589f+7641759+E4ebf4463513e7ec8326e77eef8b443b7deb6fd30@5441920c 580506ccc464195d925e3bd2d37c2b89+13411716+E534f2d36ec36702153d2ffebc88bde6d16681314@5441920c 0ff594047bfc075755fb6f6d368d4446+17757245+E686ff1689039794f81c901ddceb5bf96b002f471@5441920c ce16005db1fdd8fb664184e4897ed848+26954567+Eb1925567366e0b994d07507992215736ef795c6d@5441920c 668b8606e229bf34b265b5889cc2555e+23246223+Ef8b4096de374b658d17c366314fb2b78d604055d@5441920c 606b63de08276f2fe96e97839f80c725+19074161+E8864426fc98765090568d788fb706bf66eb2025d@5441920c 6f8436bd2f31836f58cec1bb0ee05636+39449695+Ee8933dbb69c24d6d37c456817370dc907b5c95ce@5441920c ec05d19ce5eb8f336eb13e2557d63124+11696577+E964d26d1560036f27bf8776572362c03e4e9f7f6@5441920c 44385cb347d456b6c56885e8de160e6f+13249663+E46f5be980854e6bfc58c55668db28c4090cc627e@5441920c 706fc99f3855ffb94b6d81c5c62b069d+6706592+E1053643769f364b1dc5f766789b747f0bd383d4f@5441920c 6652574701024f5e2f18edc6fd036681+29162964+Ee1b80f61e7971c1f4b3e55260337cd33266c0f00@5441920c 14763654521f8d4f6bc427d3b52e1121+10264945+E69bf6eb86179653066b92c30d288d34e698fc996@5441920c 8d6f406ec665e80c3986fb2d0dc39ce9+24601643+Ec7326d895814e06dcd041c099e25f26cd4e3c214@5441920c cb32b5ed637e57de216603c266184249+5951761+E16690fdf9283ebfd35e13f67307432b23448fe63@5441920c 565e8136fe7ed38f038b4236b645375e+23795506+Ed68ece3056f828c9e5d7fde3f240c404d1c472f3@5441920c 710dd03c65252f2187e76e7666d7d120+3150007+E19fc4021fdd7040bef16d9760fcb94c312665319@5441920c 9bfdf090c2776f258c39e549391c1612+23077469+E7637186506886b0e54b1e3e32cc578ec49c6f3b0@5441920c 7792c138d461748e9e5627e9b0c76c09+25966072+E82c28076635440613cd55d6993fecb03580f4cb6@5441920c e8f20b6349d80e1107ebee500169b8c4+20640325+Eefd81f4895e4ed54edb560bc5586d8df43456678@5441920c 4b61f2ce70111127b33f6249665c3d47+31996632+E388eb8561b2724c225c873e14421df59ecf09fc6@5441920c 661fbce63c16de3520d44e033134c6bb+43632512+E6ebc3f333f55fd0f69c7c5bee2687d1228933e8c@5441920c 5e6df4d8cbb16dd620f373369bc8c9d8+13731959+E16052995d0bc657b3759b020207ff0e3e41369b2@5441920c 24f521b431f2d770f1338700bc6c6917+12656172+E6e287eb610cc483fec0218d27632b69c546d01d0@5441920c ce0c0138e32619861616966d61be5915+34247127+Eebe1b981633c8728f6c69815fd3b88678266ee96@5441920c d870092429833d18585b6f4ec01dc640+13266016+E2d1df1258701d32d0d01f7613b88cb196f410262@5441920c d0728b8d923894b266361b90f06c047d+13161256+E8f4ec81d6944833c3dbc3cb1de240cdcd5f32ddc@5441920c 4c434f76f60d949088278498e5512652+29663052+E7bde3f3f63c9ddd47d466442e2c7116bdd26c9ce@5441920c 953b273715e8c9c1e89155300fc76183+30634366+Ecf3b53ed7d6bf5fb62e0861446615623596fe359@5441920c 432cd2317e2e713880e73660431c3648+6075493+E4b8e179f8c779951f5697e454dedc3b2d5dc0498@5441920c 3251f5bb90f3e37db864b31661297db2+21661204+E69bb7d26df5775651fc098e530697067ef65e343@5441920c 1c6168d1547d01601c45dc5485d6c8d0+33606107+Edf5663b3b04e139dd68bbe6960d6761d14e7d96f@5441920c 0482dc465426bd763b346e8474e3306e+32791910+E716d0c2d56e2b3ebe74922786f5526cd85650c05@5441920c cff6ce7521e98176cd89b1996cc6b2b4+19669112+E33760661c08ff206847f837f6629f2246eb24f90@5441920c 66b606bf24b864bc3e165cdec26cb2b6+4741605+E4175e96fec0c423932d88eb21ec0c63f077e9683@5441920c 63c8d322dc1998841b3b5461070969b5+25904705+Ebce6d95c6e12c00ed08efe4de69856ff194fbeb7@5441920c 4c672b0f4d22e5b6eb0fce791981b5c4+23619321+Ef803e6ce60d662931e60c1e7eb0e0307cf719639@5441920c 212c8b6286520d28024171b6316919f0+25423194+E15c722ef20ee1217c4d25e7be6382e10887c474c@5441920c e9477cfd645d0e20e85ddfbc827b65e2+9119290+Ebf9394269e1967cb3206c86d19f7806bbe48786c@5441920c 9cff6d241881d3fce55ed434be59b717+30796914+E7e404f9edddd288648e0b656bcc8c651e42eff34@5441920c 80666518360e529e9d92c201ff716b4b+19924674+Eb76685753d3cf6e3f358c407e48f038d7c351613@5441920c bef263464b2c50b53b2f6869099c0461+11135309+E6951358e80152e40712796312f6643ee63017b3e@5441920c fe8d5b36b6408eb475c4e1e446649058+10940177+Eb66e396c194c25e7e9fe16f92e25521db80e4036@5441920c 68d36b133c7dc81f5934d70c0617575c+23560116+E2cf474592b605bf861651e09cff2f62e166b351b@5441920c 6332b32b6617659b3840e701167c8222+14661122+E5627d019f688cddeb6b8960629d88973232dddff@5441920c 4e4e6b6b40d792e7c70362b47c968b2e+22609615+E4db8cb76e66db83f0806649bc2101046d4e6b254@5441920c 94668c2206155f7949de7c58f61404b7+15046616+E0f9495622572b5ff0cfc988436bbdc76b6bec4f9@5441920c d69777eeec1b5cb599d6614629b5142e+22166262+E27b789796f685c194502ce65578e5b7149f65b1c@5441920c 95edb266c0f8249c9fd662bb2b15454b+2056060+E8e86762f71e0637c7ff02fe61ce0e4e75f07286c@5441920c 6262cd49c44b278f3dc3c26626811601+521252+E23d8b5757618266f67ce07c658f9b85832c2d162@5441920c 00d93614557937f44de6f05f32c52790+32234144+E0f9f4506067b8b03b2c341f52315fef5bdc7848f@5441920c 3e815000b466f885958480d629962711+6441932+E9736c4dcd0db18be05035b497394832e06c46263@5441920c bde54d38f7b66830c212f9fd8215dcbb+10946699+E1be619426673d73606614825b965d48c72335766@5441920c 6240c463730c510e1d4c78899ce6d1fb+21772696+E480d296eff2c33873c7636eb723e8731c6959ce2@5441920c 2643b11d023f4f0f7614363664ed94e7+27069700+E0634917365618b24d91c37c3297140430ccb2556@5441920c b8cc10de6847ec2063bdb661f54906ed+6545313+E4490384364cc607681f6977b50863dedc73607c6@5441920c e20c9288b641899dbf4cebfb56ccd9c8+31767795+E44cc68804efc112c269c26c4369c67ee6c3193c9@5441920c 3b20e469d187234df0465b86666599df+23275612+Ee611c6930c20756cb200b2cde45057c242457cb1@5441920c 255f4954bcdfcbd1d4e0defd92985d29+29739564+E63b62f61cbe87116d56397b35bcdc3b91114db38@5441920c 5e64c3dc3fbb136403f47c18b06d9cfd+20035093+Eddc50bf137ee36fbd1f1fe0546533e97536926d5@5441920c 600fc94c672bde2472e2447effc449ee+20162106+E47b5116de964dff76b87bdefd81e56666f020f98@5441920c 057e9f3925259bf349d351cd75e01146+3767564+Ec46b4b19673ec8e14b697f77fb76c9822e51b100@5441920c 6d3c76220e433618c237ef6dbee0b20e+13561503+Eb1d89b38199393599fc613ed23e359eecf5880e6@5441920c d227c3f6355fedec0507675b7103e86d+7002557+E461eb64220cc189b14f845c1c696e7020c63e1fc@5441920c dcd0ed86431d171f935c2f86d6102166+23576165+E381238e16b036dc6df7614b6ed2f87c4324c8fd7@5441920c 2666135948e62048f269cf807ee5039d+6615671+E724406f1702c8b9d4b54865b7b197de08e68d057@5441920c 0bf0328bf9c3e9ccc6c386febe043cb7+24662143+E11355cc83b85db6d6d743f608c9db4e3777378e7@5441920c 186647736b93302dd61610f424c8b366+4697534+E40386c60866141c75cc84c3ec39772920b7e0196@5441920c e4ce7d1409ee024c1646e2bd9116d96f+9636940+E88d6ce1c9b866c666e86336e9d406b678efe9802@5441920c 463bb99ff72e344514e00683836dd4c4+16496116+E16301e6dc3b6d434214ebe82fed7333d2d201661@5441920c 3c8e07176c59686e683938069424ff92+9016631+Ef746d69e963c8380dc3b93f3cece96f202893f9f@5441920c e35874b80f896ef662c8ef10509d8612+17929166+E60132dcbf4fc5851d3c387420d13255457dfe9cb@5441920c 0f6973694123d553842f312ee1e7f9e0+11594711+E6ec26bffb5c736d335bd77d57d8c8e2f1be866dd@5441920c 9000642526f472c77960bdf873fd01c1+20306666+E8d23013d5065109c13fffdf723bd66fbb608b949@5441920c 7b237e32669f8636b96ddc6bd4bf63e7+9030401+E3c2be036b14dc4d7ebbb5e86e08eb8075cccb8e5@5441920c 36b36df49e07763417f98881786d8559+14696627+E339168621266131471b452731dc9f62f73bf056d@5441920c dc5694ddff5394218744de5398156668+10006611+Efef3e681202d6b2c713e659ecdd26df28e73867b@5441920c 06f25d6f4944c65bff12b41f65b9ef6e+15979710+Ef66d40d2473b250e733bce068b416ce526cb3f53@5441920c 3e333726fb6849b943b4484c1861599b+10116166+E3e7426053bff164299d4d397165b0fd6220b8dec@5441920c 12de047e2ccc029d5e058db8fc27468c+17606797+Ee906d267962f6f0836c36e376deec2676189060b@5441920c 14beb6bccd174c0d46c298320e76bdcb+23227207+E234249e7c4f2bc3e62fd6f6b0443f0cc543f1147@5441920c d18bb654f006323febde3039f09553bf+10556009+E8817cf8fe57546d649f5d4505d43f7e3f179e3fc@5441920c 83f045513b4f3f4021f65e845bb6ddb5+15162933+Ec1dd1156d1363c11f887cfc8ed001bf86835b2c5@5441920c 648336f5f4936526b645d64725621826+21307590+E6de931671f89b44940b760352e2d0ee530442267@5441920c 08f67de336c072c6d662b6cf2c583861+32759712+E8d66d86d62c0cb53f7b7b45d2f5b6e363622e2f1@5441920c c80193521c5914f978569b404b5d641c+16676434+Ee05b6346211c4f606521ce55234127eb763d5bf9@5441920c 436322cbbf425f6618b10c6d369ede1f+15100163+E60064882f33e83e1c12473c20957cf176fb53e73@5441920c 0bc877878b00f4dc96e0b93be374d5c7+10434017+E1e5016dfe71330e966d163b325c738b126533ff6@5441920c e6b152369e78d29bf8251e0995bcbc17+7765476+E2e56fe08f21e08d1693903bd29722e3f679bb7e2@5441920c 4e6f2124dd087175cf26b53d70f2f816+10542610+E481819516d33f267c0811e007365904f907644e1@5441920c 61664890644031d2e269f642f2669466+6767004+E3ccc64d7245c916b473b8065418527f5def1209d@5441920c 358cf83e673e0b5c9b9ef4316367b369+37253134+Ed1329475fd3b9680fef7648dc3cd0b086b4fdbf7@5441920c 189308e56578d296d153b6d7cfc8ffd6+17163652+Ef2342c38299bd6c8033cd9066d94f9f0966216cc@5441920c 480cc0e63c7d3c15621bd664fd67b509+25746921+E0c75c64e567f7cc675633894b80d2c1f55534c9f@5441920c 6967c4f89e8b866d18e8015d7786fe54+17296262+E6ccb6666326b056be3b82ce978b61656c55e81b8@5441920c cb619e0608d05d333ee66dd60133c26b+19044617+E582576631693e5f9f6463c79285c6399f10dfebd@5441920c 5dbcff65061e1859129467d669376e57+9216326+E66b33696790ee67567724fd77bb6f081ceb0b1b6@5441920c b5cd4c03fd56ce74f47534305f41966d+7709647+E369d45c56d9120b7f54c926cd465b6508c061168@5441920c 021c56603b593fe7b0b9341d3e69dbd5+9992471+E5904c358669ebf85d9672d96b1f05562be4cc1fd@5441920c 25e6d8e21638046d71fdd9236b5c3bbc+16105743+E0b235dd4d6ecf49ee503194de09e83995bbb8b37@5441920c 1035df4f548660343340651661d54861+23723049+E860501658363d21944c46d2861dcf27375cdbe2e@5441920c 66c958453bc3c0536053228807554242+26740659+E6bf768f4c64b76980e71f3986f653dc17dff3fb4@5441920c ee27ffe36861e4d610769c1ef36e81bf+39101465+E07f66cf945d7636bbcf48500ed84f6fede43066c@5441920c 366f8f5b1b4ecd36c3d4bc6e28454223+13179037+E0ed961bc4edefbdd47c9659b746fc485361b3866@5441920c 4545c0483335c548e5454620e8087531+23659026+E16019b3165d481b46fedf5506606dce182507e93@5441920c 27df881c9c906dc3fd04c2ff68d7f69b+6320674+E6e2799fed96fb6f5f8ebf32e18680b691d9528df@5441920c 28b9b320074163fc02b034e862246754+22624620+Ec26668860c2e9b956b8bdb06b69536b65f34d974@5441920c 5968de9783406e3cf1585824f3068095+19209706+Edc1ec66fc64dce19c967735840c19791e0c7b9d1@5441920c dd6b0299fe83e269b456540618bd4837+6364513+Ee18b3f6bfdc9e1ff8f927e61e1d3c9990bf259e4@5441920c 56cfe9b7296366840fbf3c9d0cb2bdb9+4766253+E5296479852716963f5749f7866dd919322e284c8@5441920c 368b66c5265db5c5613316566f7f9652+35016116+E8d446e66889974311f73c0b7cd682ed4dc4163be@5441920c cddb3f3bbf9d3c545fd68d76983b45bf+36549974+E6f26c14bf26fc4c57903f698475076cbdcdf1f07@5441920c 7cef19426bed80ff83f9d900c8178667+20373460+E5c6cb47d34cc6495fd887903d6d6666d9505d761@5441920c 8f32865c19f724438e2d9b648c6640ee+29919661+E16c07cb08b67feb34fb9c76b36206644f898976e@5441920c 09043bdd449b7946c4fec913e4217364+13493460+Eeefc5c2c41ceb676feb866380ef68062579196c6@5441920c b5d1be0d12754de15166479f927dd02f+16466490+E28ec14599eb0db52480483f68be739f4ec638686@5441920c f483fb8e54f6e1763799e9df42f08950+6660416+E962653dc7f63c1f1fbb6856633f1c2b857de4cf1@5441920c d4dbcb0d851764f4f94e4d62996d7261+7796021+E91f5483255146fb4f1eb66c2b797b6e924b8b108@5441920c 6691b09543c044060441936ee10221f5+14575657+E09fb9c6678805f0e7b29e290177f1d2f3916f0f8@5441920c b19b9c3869e39becd78c8053ff63c6fe+5634479+Ee35b3b397624685016de4586c9d96f57fec9fb4e@5441920c 94208330d58de63b7b603355845e2e9f+29716269+E0d811ef3d936670d06e74b9fe6fec5c86ed5004c@5441920c f024f736cff9618312339bd9847f08f0+7363995+E4b972de47eeff1038c3be68b28c652c6750cf1c1@5441920c 8b66166d236682687322174e707f5bf1+19715177+Effe9b54cf6b0b4156cc78334b71ed29cefe2fbce@5441920c 687925fb3f001f6eb17e262f7f3bf6c9+11922350+E6e45db64c158b168d9866928667ed8c5e400dc37@5441920c 811bc86929c6cbe690e68e712f81df76+34696356+E1d9dbd19b5b1f6d16697890d4136646e0b250567@5441920c f88343667b26669cebdb91160bde17e6+33645974+Ef0e4ccb520cdd1fd51f4008b596e370b8920fc63@5441920c b9e2c0b204645f0b5ee13776052de068+35567370+E78b5f0cc1d71b91ee13763613c715b5c0d946874@5441920c 516603449b0e68dbe1f10916626c66cf+15611642+E35b47868610850f2b866fbc566936872708ec8d0@5441920c 5ee193db01448f87063d7b854d07986c+27146461+E8c3b7df08f26c457d654c4d90c956b75d3856660@5441920c 928de1604d0f709c62e23fb2f6c1d3f6+26736354+Ec954bcb4e6951f3fd82e89f4d675d0d6655b5ff5@5441920c 17c99db9e4d850c53408ff6593bf4e6d+12053649+E6418566222886b6e5003c6804f92327c66059e3e@5441920c b28d67d5c60e0165d639695ccce06c60+45621670+E39f873b0cc620266d04f5b32482c68e3ff3fc15c@5441920c f7d21c881b4ecdc6105befd96983d442+10457142+Eb98210f27687e94d9f8921502c56bfd5b0606e8f@5441920c 0bf6beeb6097903930dbfb6f397363f7+27163032+Ec0d77f7bc0182f422df918f597e01f9e6b7715be@5441920c e7170d8075f74f96bb230515214907c2+6901657+E9e0b89feb40e2267f5d94df2d1993ec640268b53@5441920c 0e59c8753f30cc7cc9fd19f8e11dc5f6+13650247+E0b663b68366d8df921269fb04bb7f72770352066@5441920c 29f864d900551cf85dc33c850f49061f+23602906+Ed6f0120d02dd26216c5510c1e46bd109bebf6681@5441920c c8f4528bd47bddb5b26e4006d9cc89f0+33300672+Ef1b055439022dcb8e5b60721226028cf60b9660e@5441920c b84644525493d6b827f166d0edb616de+14622270+E56972e6be0dfd68fc0362332ce43cdc55f9c30be@5441920c 04f6c936ef65edd854c2105b246c7d0b+20760162+E8410cc9b133b6082efe33c6f42996d304708984d@5441920c ce1c666fc933026cfcf39c0221987462+29757577+E8b297663f7f1b63191103790dd7060374535f380@5441920c d6369df6628f2d7c48ecc5726e544004+9439391+E4d6b43d61290d3383d50668b68ed1b1d3b86cb9e@5441920c 3092d05f3b8f55ec765c8c95b6b40622+23690991+E269654b67d14c41bdd9920303500003f0e930cd8@5441920c 50b159ef9c1213116d947c92285d4983+6504376+E8e941656effc51485f2f6419e6f76d6b0619cd65@5441920c c8543b693d01c8fe6cb3728ddcdbdb22+31429424+E96e17b3cf08bb6494f841556d6037b6df5cb4842@5441920c 4846c99e6bb5b179de4fd46edf46e31b+20667266+E89858f30656fe456b6b4c2271fb1f5fd98b4e9dd@5441920c df48275087655f67867539949f52cc01+21542259+E6d2796f768f4621eb6b7b74c3322d1bd2b3d981c@5441920c e07398c19366fc4f876b23bf79049f1f+13635330+E24336044e23d35569f51f466c47b1c0e3090666d@5441920c d27072861c18d3d61663bb359e61d1e4+16645443+E57c58c03f56666fded7e95596e6017f6458f5e8e@5441920c 868b525fc41c185415fee9ede35c9b7f+33763672+Ed656e64e7f08cd2363648f29446f84f0013f3662@5441920c 634d4d1116c85199b4c8837667126628+43935944+E51dc654d2602ee26618e60c8842112225ec2bf48@5441920c 6e8cec6b84b340b746f63f7368339430+26173344+E7b16866c76fd11f50f6768172453cbe3c83385b6@5441920c 1c7746f9733e0ece7923ed3537dd2966+17960379+E2c7f5850549c1662d20c09e330fb173c243f4f47@5441920c 331557d6b124e16eb4de307655c40882+23669100+Ed8c6c496bef0c4fd0866922cf4d7762b2b9390e6@5441920c e200e83c9304eec0022b7521c3d8f256+21475297+Ee99dd49f41fec9566f6fb75301236673737db243@5441920c 9c76d58e16d65c05325d12318189b06d+16293653+E862e595b7100806e3036dd94df563646226bd766@5441920c 515e08e4b23d320644267cb4946d5e3e+1514377+E67d27dec368dd978e2fc48ee90f59711e5dbc2e0@5441920c 1d943f36d01f35f0f1bf9663b506e924+6509364+E59408bb9f6fd9f3c537000dd213e628f656ef976@5441920c 6d234083cee3e2efe81455d863cc5dc4+43017690+E6b1e7c6b5c44860e1fecbf135b7e9f662801cce6@5441920c d1922cc8dc266b6f6eb0c95cd8b2f417+20665117+E962b126b99c4c8cc216450937bc8c1dbd8e2d2dc@5441920c c3f2e5d04b3346545c2584cbcc9969f2+1591467+E6d7e5836b23ec1ff6336d62f6037e9d3cb92693d@5441920c 867de2fc66953e25bf15c61378ebb781+16146759+Ed18d2b753e63678546bce0bb1196b4ccc23207e6@5441920c d8c6f838e3d60b0f1f7dd7e9bc896cc2+6656200+E2db7f97705c6c32e14562c2776776bc80fc97d63@5441920c 0cdb1913f6fb98e1680d101dec9c07cc+20707621+E9178d6b652d68bc7f61dbbfc942673d523c7d86e@5441920c 413e0b5537cb3d5ce03f9e9cec4f62c6+9656450+E7f00c2344edbd7683c37d786c0c7cdb9168d1cec@5441920c f8083c6ec29669d7ee607223e3ed584d+16425621+E8083b2db35f09487c86c03c0165716144f68112d@5441920c d57b00fff01f31e839921b4109151f30+23196332+E663e94940799968e43e632cb56d75fee8b418677@5441920c c9d80bdb4b75c42ef1154bc13e11021e+7300691+E8681d5461b3d984ed09eed8fb41917b9e7bcbe17@5441920c e4d78db5894943cd403b6b3147c7321f+49692537+E12239bf4e933dfb24292001dcdb3b074969ded00@5441920c 4902623e0f182b4f31fbbf6c1b917ec6+30721960+Ec98bb316ded2bdc765967b66218227f45e4ecdcf@5441920c 5f0238db354266526666793b0b228312+23666340+E0b679d71662d6387682430b11bbbc47737356e93@5441920c 8c0db777b2b8b95785766bc1b47733b6+9229611+E5869e889bf157f2612f20b6d765bdee03476e9c0@5441920c b53c6b59d74ec6dd58b56152519274e5+27421753+E42b4c33532ff2638983b21548b50f8d77b40cef9@5441920c 65bf786bf68c762e3fe62c2357896c7d+9699436+E8f2b22716ef79f09748948eec2c610118f7576eb@5441920c 894c39bec02f51f622e4b1bf202be8cd+6406659+E6b56fe4f277d784ce1d3d3c279763690f19d576b@5441920c 32dbd624968d15ccf65b3d26bcf3e0bb+16694996+E38435585ce3658c50de109653661fd661968fd2e@5441920c 5218162dc863d88d27c8088b7fe0db3e+11026527+E70b1e8d389f0b1b1e635bd5f0219635976f53586@5441920c 2664546541d516425ef812bd00e4e549+32909679+E56d0d0f602d8d2e240dc6ccc4d69d9353030ce9b@5441920c 24400617c269c4ddc9bef64256865245+30963436+E7eb3c301800f63d66ed0755b1858ebf488464166@5441920c 80e8650bf6f2101d6526e85cbf1669c1+17266919+Ec9e10be5668d905fcc4fed6e5856281c4e2d64b5@5441920c 659e40465fc1d4d93b9596d6902258b6+26996009+Ee6ebb57415fd1b8b668e276f34f9b5b891d3f526@5441920c 940b65f21799e622371662b8c543f280+16704607+E2eb498f04302367895cb3ec665eb7941bc62dd82@5441920c e5bdddf3051f3e66608008750c46f2d7+26045175+E6661b5d76f3253e1044d6b266174b6d27fd7b65b@5441920c c4810116de72ffbf10295ed9c07e7685+27916575+E6ef1302d90884fbf836712f1fd74d61f612f536e@5441920c 64fc98e6841b185fc0d82fb136b663f1+15050054+Ef795fd6e80365f9ff17767c6231327463433e9bd@5441920c 6456c7ef22d529c812b5622668f1f84d+15603577+E5577c12c3563684bb5600b4e9be014dec6b06c33@5441920c 687405d9d700bc374b30029cf8d4be59+27716393+Eb0b3748363bb867ddd6dc8c3c8c08105741864d4@5441920c d284d714348344645242506366129f16+22019757+Eee039cdcd2e2630126ed862ff4e697bb1b93637d@5441920c 69b48566c2663ef36086d9db2f990136+45797643+E1c80842642f746545dd1405229f35c3b3dc6b19d@5441920c e32e44b17e16fc2e2113466ff867e26c+22514360+E56d768865cbdc8b4c3c56965ed282e1fee305906@5441920c c8b2896f824744f6569b88fefdd216ce+19253951+E322903880e688b62d3bc146765c5c1750e43f45f@5441920c eb2576903cced0150e92eb028603f228+21229495+Ec266378d59606199c6e5294f1d400b196904859f@5441920c c9d128d476e5c463452d08d4ce0efe6f+17559372+E465f5926d3711b9b1dc8266666fb7ced402c9c78@5441920c 565cd1f686914644b63dfeb72e9d041f+25526673+E49e77665901ddf4f98fb5d61de73edd66b43fdcb@5441920c f293c29e91b111e9330209f3d94dec55+7096070+E3ef2322ce8517189616069206c266c66c16ce39c@5441920c c243edbd633d9795c9008457e7f64c24+23411651+E0fd1066e77be25015675fbf8e338364bd404d16e@5441920c 66959d9139f6de12ec00f9dd486fb30c+26119054+E04bd73dc60b645f68239df27e8707d342cc5be4f@5441920c 6b633b3567fcf12293e447f2f535f68e+23290349+E8e76c6686bbc756c2b966ed43e9fe1dd4f9bbbc8@5441920c 8c6b77dd767ff6e4e4bcb5645db9c267+11654057+Ec5e46e815801c11f9d0b13200539d5b8c05c6b90@5441920c 3377c8e76b7eb9f04d30063e468fe4e1+6496414+Eecd1131c353c78259036e2c36205d71e695ef6b5@5441920c 4026556790fdb1739541e13e97c58e9d+7220726+E01078c064ef4477876ee0d730ccb97c695f72d9b@5441920c 265fc5b76cbd9cbb3fb0ce49dd4ee2b6+15666346+E831444667901b15497b4b1850fb5df76f5098681@5441920c d6f158bdeec1c3cbe0df75466d5e0691+20565771+E66517714fb6121c25260c6d766080d107136b199@5441920c c99404c36f55be9285d6ce7f6c398728+29696076+E46bde61dd962c7659b6bf58c5f24f3c4b0295fc4@5441920c 3162d76defe7c44544707f52d67b4770+29661960+E88d05cc566b526ef6cb76626bd386ed468eccddf@5441920c ebcf6967d9e4c232e2876786f86e3cd8+5667660+E21dd3f48b8116e7824b2fd342e7d1300663ee33f@5441920c 7507d4647d3bf526be5e64f86fc24740+21602934+E6729c2617d186d5d1f828bf0126c67ce3c67534e@5441920c 64745b39622be74bc8c6bf8d566f7f64+6125690+E9e94137358c821d701932363b415c35511b41009@5441920c 8ed366614cf657d3d654f181b698d28d+345265+Ef9f24b9b4b39e13c0859033f00ded590e89e9eb5@5441920c 10b4f10368dd17556465f21dc66c9d62+10003929+E4b200cf2ee068279d431940f687d300e4741c76d@5441920c 748847d8c44cc3c6ec068161d13d8269+6997133+E117d6fb869e6138b8c9cef842f2dc2f60b9b8cb3@5441920c 332d07334b64462499c6fd664e3ce8e4+550060+Ef1062eb63d03656f3368fb088c6e152667662c00@5441920c 386e6796b1756f14b906d6496667666b+35514556+E398d66e1be91928eb0f6725e40096f34c1566bc8@5441920c f699169634840b6c5c22032f04662885+30770003+E23066b0c9607b5c0c848716251bccfff57f57644@5441920c e3328568c69cb7833368c53660bf7778+41661599+E934b4b27473595d61ee61381b912dd1b15f69b3e@5441920c b68239278c162b35f59dd11b26c7bedb+5032660+E13f6f6039e1954d72bcfe87cf714144b71f3c9ec@5441920c 63b57755c3cd8069e5e2626dee6c93b7+15730167+Ee3c7d8676fc6461df3dd9b78c30931b29c569485@5441920c 18dc31ccb81062638626639d1c7bdf60+26696961+E61e3bf0e745b644443647e287252e84061f20838@5441920c 37ff6375ed894106e4365c5c6416d067+33670066+E6d626346726f50f7f186c6602f6cb1166dd7506f@5441920c 11365f54371b62654524fc65e34ee36f+5763371+E315283978f461c40e5641556f19e630c2256046b@5441920c d830c420616b1e0ceecd1361e07575fc+15201350+E8260833cce68c73846008c810d7e910821f6fffd@5441920c 93f16b759b1cb023344dcf15cc7b199c+26316506+E9fe3d33fc9c66091ddfd6eedf752442bb988254f@5441920c c23706e6fed66ded615945667300d388+47367411+E8b073cc5777d624909c6bd3e65b61ed303d5423e@5441920c e24b474d105f11b6573565fe54862860+19419515+E4cb466d568e663b386014080e11bdd9e22db32b6@5441920c 3632b61819853b163036e1f402638c44+1079105+E8db9e3652cc1780bebf800344e250feb52ff1f11@5441920c 22c064b8de458f72fb77e43f73cf3123+40594325+E672dbd4b2b496ef28b64ed6910948c68b607e491@5441920c 3c5710662d8d0e6f12b2071426d48644+5163249+Ec6517c5bd09f4b081dd95ebee4bd869f89b1441c@5441920c c3f52592b3f97416b23f689536e37693+17064012+Ecbcee67800458b6df98ff689187446966d821f39@5441920c 8456c6cfb316bf82c7934d3ced09b5b7+4703673+E916ece3d7117b6dce14e2e1621566cbd7766de3e@5441920c cbb9f866c562655729c4bf5f67666e46+20937649+E2d2396b6587f6f4ebf76295728070c835d55bfcb@5441920c 7eb39636607c3c8b726d67e928d0c950+19766577+E16676995d471f8d36806e8065063e9144e612d6c@5441920c 6873797eedef2fcd226686765b83cd84+16520799+E4de63c7082f1d2ccfc77457150423562d9346b62@5441920c 16c2450d3153b864de69e362c16ec6d4+20064956+E3b681fe29ee6c47e19ed1bc08947576d38c1b1c6@5441920c ce951e19024eb6ef606629527d03657f+14563555+Ebb1556509137131bec2c637946440d2e39f2dfeb@5441920c 968f09f24f240b0ee3de2615905d284c+17666235+Ec08966e756c198656d867620466617cd3d1021b7@5441920c 01264be9f7569fc6d446c6658c68c7ff+16200150+Efb257061168310d1334e51db7d064d13f053b7db@5441920c 3354d97c0c7368349cb167d463ffff3c+15699664+Ecdfe2e0b6c86dc12f8ed0c035056143dfdd16bc7@5441920c 685b0eb9860422e6c308197912f9cf89+26566964+Ee1c91b8b47f186dee67bc0bfc8581487f1734841@5441920c 10d6fde917d2f67974c342f2bdb99810+16666607+Ed9d736c3598219d786bccdc4480ccb6574fe65d4@5441920c 7436e9b3dc58d16c4f7fec9558e2e3f4+34299516+E84f366efbc7f687c37bd4006c80bf867606bbc2d@5441920c 0c7dc3f9be85bf7f02b6986369e15396+5167494+E3491f6f0157be9976e8f52f48f427068efef2041@5441920c 853860b5e3d7d68db870d64887eee036+9550459+E22296006714611dbf6ff2100006d14f7ee49274b@5441920c f4b3b1b8c22d36c1b2efdd626c3f7353+9425652+E502bd665962785f678f6e33e9b79ff5c09dbb892@5441920c 46dd6e718e7bc94962d460890d532d46+52257569+Ebe11c76489465f24695550366c829b11679d9f4c@5441920c b72f5e0756c4b5fd3ebdd71812f3ee56+6929925+E242c9f670807672f3b9cc681b47140529f436874@5441920c ed7f65db39984d581c595cd0e1e9d056+17556391+E8486351c5ec074e0b844c186d66fe701e44e3763@5441920c 63006056e077e0dc7716bf3425010ecc+14247713+E664b6f21cf6debc0095276164d3091f20b752597@5441920c d5116b69973d889d6f298f4738deb498+11066306+E3b4646b51c989f65567426664efde6e6c341f66c@5441920c 13e923c021e62ee066ef759d74e32d92+32067066+E3effb2cb94884161514b9cef413cc81b178be806@5441920c 65078f352fbbd13b8b56ebd0defb5cc1+10666222+E68f108063ebebded3649026b960f55f646c9b3f4@5441920c 3d4be63f60347bb56626be3969de967b+16626376+Ed8f748f6f073e373f71126f1c984815b26607dc3@5441920c f90b18dee1e00c275c2e238eb0393064+7956919+Ebd4f603319ed5e1f01c3e5875391688de2627899@5441920c b203671ec56f6d0bd72ff3f8415091e5+16713509+E5e929bb716e7dd51eef64530b23257d64dd06d64@5441920c 73b1ec2c6b3358b8190bf6c23e4569b0+16935900+E66185731649cf69f5d192b084b03dbef866ded63@5441920c 18697cdc7111d7dcf188dc222dc236d8+261032+E4b9dd594df44bdf4eb8850bb2f7dd1154b2fc5c8@5441920c 466b81f8768877dbc09fced3669fe11b+4269160+Ed51deef9b87c6c468b8cee2f1c7354f15117df62@5441920c 56719bc3f4db387ee926e85f9c017bb2+22617045+E182150854d000ed3316429530534337731b1c888@5441920c b6560f6d5e974464461f5d996cc16160+6953071+E26ddf53265f8146ed70f620d46f56667fb6e6411@5441920c 61280c9751d006c822044302870516f6+26475163+Ebe9b8b46c367107632bdf064fb80566ec8175e10@5441920c cd36ebee7f20ebb43dbe61351c9e33d8+21260557+E84b512df1b769c965f796560616566d36ed612d9@5441920c eeb7dd91c17fe167d870676648891ee1+47650592+Ed857b3e69858537d67766568e6e43d8b6487108c@5441920c 62ed43cd619e5307f96ec7294634ff81+9264520+Ef6ebc37b8877018c46c43f31b322f46b8676096f@5441920c 637d2d6c28466b2fbcc3596e4e48e925+15247646+E69510297f56571313d71633767be496d6ee5bce7@5441920c 1955419f9b01bd0e663edc61ef23fb44+6560616+E8eb1266643ff694fb8b2b2062469736302211e84@5441920c ed3b97847e816871458df4097897f666+26610427+E91d463cbc8639e2982d83e5ef6d6676130112c29@5441920c cc560418ec87624942f357bd6e349f11+27671122+E6326b03674bd7db8bd951d008d8e8617c64b959b@5441920c 346d8222e5e662b9d52d535e6354b571+3665630+E971cd85296175789601c66de54420bc0b04e58dc@5441920c 974beb90b949dc76258e7d73b6505e86+14940321+Ed73ffbc16e630c20b681536c7c6446c6fb6253ef@5441920c 6f88628c91767558e376f10d6eefb559+12957633+Eee6b1166f4c4fd01b9b39d84f85fc8bc68c7fb52@5441920c 50016d6fe13d9dc340cc27b6c20d6040+36096753+E6027b3b3d25bcb41de50469c6d8f6576613e6cf7@5441920c 66c0d207c66e6fbc509f52b8bf20b664+14674016+E206b24966155d3ce169076f32c91ef17c3bf7c85@5441920c 467e1966e64f17f6c68be65561c1dd6d+19901201+E8f59b255569d94803750e9c98e29c335218bde60@5441920c 3669c6c63e8ec00fde480790eff80647+14314479+E857690ec233c9c4b6436ef04590e21ceb26e7606@5441920c f924d760468f7dd3d28940f57f77193c+17691663+Ef8d9fbb6446528ff1c84bfbd61bfb4e2e9074fcf@5441920c 437442184168c16035eee82204cdf366+10632652+E715f84e7ff6f6d9d6b456d8854ec6b78403205f9@5441920c 4b2ebfccf47699f59192894db210d37e+5606647+Eb16336fe5036868c36db8cfbe0be660c4611676e@5441920c 9bd8ccfe078b86dc475656911667dc24+11677064+E1245c040830dc83b3e8ff5648296ff1e0bec36e7@5441920c 628384646666c65c0c67f9e671e67262+29615252+E3efcec0c36df19663d3c7240634740feb051d6cb@5441920c 04676d96e3b5dcf5e36f633c124b366c+24913006+E6461330125be80553808e043915b51c31567be17@5441920c 2e5cc96bf6d6c81674364bf74534d96d+35077014+E408515ceee93c3781b012517294560695b2d3ff8@5441920c 625d6dcc95bf40d68d42bc4f0d8c8135+23967236+Ef8f0869bdbfd658032d59d02692f6b4671d67b16@5441920c f7f176fd25d26d69057e4259f7164280+23454742+Eb05113506e03958136f1e77b659c805e481346b6@5441920c e0c006f2dfd4542e2b6b9d66c065975c+12476502+E6e617df6b90f7f1633b960cb710e5d639f507684@5441920c 65476f45e197d0cb38bcb0e5c3eb88c6+21624407+E75496e0662883f622ceb1166426172b11f066049@5441920c 46d6d4e5356fff3df952c006c60e605b+14556946+E2478fe7b5334c38c666519e26085f8f879f2e5ff@5441920c c06b49d30b4c58d6c407d0f01d8c9134+23503963+E6f100f01e53944593d14668b674e766eff14d626@5441920c 9ee892068c2c07664f13e519e2356f65+23959972+E5e5566d7092060b6e573d9b6058569967936f8f1@5441920c 8063f4732047fd6290456863c75355f8+13023330+Ef5e6fd0bd4d9e3e619769122400e6699ef42ffb6@5441920c 6133e2fd32713cd037fee9ee60193360+4304022+Eb9181ee3eb8d773d3929b1e58f605ef46158b668@5441920c 06cb6265f8ec862ee6c405b9c5185ff5+32133100+E2d828480e29d35725418721e6d969066fe6310c3@5441920c f9ed6b5c26c126de295bf1232ccedbdb+11110146+E8c90f9ddd3b6cd1620f48e34b7490534c43f7e74@5441920c 6bb1e5ccf8c4ef2583efe96bccf70491+17322291+E196662fc86919b65b4d92890cb6d758510fe0d6b@5441920c 32543c512ede456e6d526780b88b6b15+3523440+E4c9067c126d27c21558d66b676b9050c020feccd@5441920c 3884892318510d6e5e52e6194686d19d+6666009+Ebdb05b4e5c10f28d5507ce38c81c268cd3457084@5441920c 42bf30ee736e0e254b04e9e6913e06be+21279449+E1b145961d7c6637bfb1916eb42f503b4680636d8@5441920c 05f010fe0e3687f66986159dd1916699+2064644+Ed52318d3e26290f188266514dbd9602e779bb4d3@5441920c 662c56b8642e26265093b826e0689288+14665421+E36cdfe7b5656d9b7158e8ee7883886d1d34d52f8@5441920c 6789d9df635d40dbdb42cf201796619c+31742006+E496c7611143e3960f1c1c6c82fb16d8620dc7cd3@5441920c dcfe9fd63c4f21c8eedcdf27d60477bb+3139055+E91e6d07fccb75cc809bf6d5bf06bb2585ff0e8eb@5441920c f69eb3f93d80bf4f7221794964081e46+20025220+Ee596640d0963153b29b2edf074fd8c431c4b3f90@5441920c 967d6d4ed233c610896fc1d98c3b28b0+10164650+E81e99e39e3557b64076e433547541397b1478685@5441920c 9039b674c5217c1c2cd912f6e5028439+22530516+Ebb526c19b663ce48e1c6016634928684b7374b63@5441920c 14d6d4915152e2ce983496774fcde566+13062626+E22f59b087fbbe5eb5de5f7f66176b6bdcecb4940@5441920c e7060435500d777946081cd8df43c78b+17612660+Ec32fdd296301d5673c5415ef79c25e7fcdf1dfde@5441920c 9e31b4720c6ed8d60c4757065491f965+23191965+E67b454334b6140627c3fe06fdbe6028499970f63@5441920c d26d94682c04132c5c42709463ccc26e+9532662+Eb2cc826866265f6b9d72606fd6f5b250c61e9c7e@5441920c 4077d037e9c6096d6388e967236fc9e3+23463610+E93c7660c4f0defb357f973e15663e50b016ed35c@5441920c 939efcf9d66d076f1f52dbee0b46d886+11316441+E193158f0e66c8369d87e5561c720f355ee77c987@5441920c 78edd8c79e4911de2266411947056c8d+33406606+E632b24bd669f2c39ef56540fdc4fd90f5460303c@5441920c 2ff0eb23186b6db3c3f296363de5180c+29555310+E0139db36506e657b6995fd4771019d393f82885c@5441920c 5b0fe6d6604951cd5f6c78f66f359c66+13315726+E295b6db03207f03143e00edc9964739cffc195ce@5441920c 22d56132b728471e2712561b5e683548+24175332+E426de3f93b2bcbde645b57970b0663c1b7fb665f@5441920c f5092e3541b32c9b8553c18fc75deb59+30526779+E5f26e773f219367c550c67c6902b7e929d65e7d3@5441920c d43ec61d1ff5e63b9076626671b0c038+22764166+E915f309339169f6e23c4336240c6e592e08313e6@5441920c e6f4908ec7ec6060c8d466beb076d87d+6501650+Ed2502bebd746dc46382c87cf196c4bb468939782@5441920c 3b0d1c2743570c67ef61cc5940e60bff+30459907+Ef4258964867743bcb19f8ec64e66d018e865f452@5441920c fd47e84763e40011d00b23eb19cfe0d6+7937153+Edfc4e6bf57c0f1df165661393f86b1d355bd4d44@5441920c 34987c6857bce32f07bf4ee618772df9+17300095+Ed03738e8fdf1041662244f4270066862ff1fb197@5441920c b5356f66d692218e690bb94561c047fd+25176211+E0643cebe06c4ebe5fe1c5dbf746e2868e24b9c6f@5441920c cb9e15151f73d81f6864d55758496cc5+22053170+E6962e9ee716ce012e916f2cc93cc9624ce090225@5441920c 687f6d297b07065c613e6192963e82f0+26170716+Edd7828372c78ec4964d9363891068e13f339124f@5441920c b3e5368d95d1916d5653b90cbe4c5166+16915964+Ee39ebeb6e92096ee58648c66d373e5035b19fb96@5441920c 028f92d2328d0b82662e85c504389ebf+33011109+E56f2d7d585901e563e6223b63444806861778686@5441920c 8fee741b744b81dc001986562c76e2e3+34615593+E1e92ec6070bb4561382d33d7049dec660d71f87d@5441920c 0561c687debcbf5cd5bb661d576d88dd+22721666+E5fb1596528cbb8bc42d1d18cc8f0b46c5e0de6f5@5441920c b41703b42df6d3cd992599e9677b7c0c+21656716+E888d9325f666683427068d89bfc54806d296b684@5441920c e88dee4736fb1332fd0624d886984839+7279296+Ee91142f0e8f0fd9678e3e188effdc440e1d23408@5441920c 219dc65085f06379262dd3b4727b6efe+40036264+E9865f1497fd07c02915697e223727563686663ff@5441920c 8632264fbc7898588dc38e4639496636+9223066+E9c265e5654eccb4def668d87e0614465b4ec44bb@5441920c 10c33b498107392c30e1c5f3494e602d+7265467+E417be03290b37248c4c91fccd30b1b4692266255@5441920c c21959f5756d956456bf8f9eb955c4f0+11169673+E85e840f2b4870c83264233dc08fc3b396c9d1de2@5441920c 96db807ff3e36722740dddc5c1bf62e0+15396551+E1e8f4b9270dcf3638cb6182254c9df3d0e26221e@5441920c bbbf2e45768169c6699cccd60655b635+10659291+Ed47d66136074c5764fe655cb7d96b251ccbc4561@5441920c bfd61f58437b8b8021132efec2446665+6096913+E916f0429b8e8cc046dded7b8c07f4837ff021b5f@5441920c 37604e70419598750ec924d525267f3c+7660539+E6c4d798bf8eb3122157d31e0306c1e545611b259@5441920c de66fe2dcdb83b476990ecf1fbb27c26+11204600+Eb81c3d896180706f813d67882bd113fd69cf6528@5441920c 342eef125628740b9562673bfd2b4d96+54366+E70098b304b0d8975c36075254076223fb73f2eeb@5441920c 1b9215689eb58c256528bd2865c2d626+3466752+E127268472ff8bde9ee818669f4629298e8086bed@5441920c 086034465e0316e2648f8e4802604f51+31370255+E6f34120d24c1f3665846204865b4bcceb47686fe@5441920c 2e234e3f091bec21456c9cff0bc761b6+6507254+E6fb62e6b9e0e734d152106d060ee6692b6c9ff16@5441920c d6c63e474cf6b5385104b0b78677ec67+13175993+E9809c18b776b605849b2c321928863c362988576@5441920c 4ddfb1ccc7611289d7264bd70cc93dcf+12766634+E3b762696bee4dd0569d0302f957868fc20f51652@5441920c 0bdf4063360d6b021e7bd3ccdef516b7+9460636+Ec07113cdb6264b0821504ccf3b0e2604c1870916@5441920c 491e0110808fffddc6b9d2576c19dc1c+27323515+E11f2c06e4f100f75448161462cb693e6debd5178@5441920c 4f16e91e756de766b9bc8ce99900623e+9640416+E06208cbec42bfc0d55661e91036cdb4cb5dce80b@5441920c 5605f670b81c38565483275336c3eb92+15604264+E7ec502fe6fb951904769d67621100686144d56de@5441920c 176d89f315f6d49fee66317f081634c6+14067052+Ec6345f6d94364e6e310655435b476c46087e6746@5441920c 89b45833d2c19fd864538c2ec1d39db0+17922209+E6679c164b6f598d5d0630b678297cd068c9c9262@5441920c b42f550fc4f7c3987413b19e69ec784d+6951532+E589684c52326864cd096ce66dc61e30ebe4130d8@5441920c 70462101bfe861f600cff25705738683+10179374+Ee8735f8d2e55946d6dbd3622bfd0b52ed4ff5645@5441920c e5639f1ce89f30d7647d43d92f3749c9+13769767+Ecd88f922b0db4102564b81fc91c7b74f66112656@5441920c 922c4f0efe4d505f216cf6b16e0c74f0+13596264+E521d6ccf9306e12e3976c9169c122220b1cd702d@5441920c f75c903f8d88d6969e7ff2c72e5b31b8+22691146+E0f3dfdb223b828723e3017ff77e3f66b493b86dd@5441920c 6788dc29696632e5f39b668e84337147+16625559+Eff07ef424de0e25ff25316c43ecb9620c8ff6cd6@5441920c 195872d8776fb7df2699106f22de52eb+29592637+E3796e413486c57e1671b9066cf91fb6f358e1b8c@5441920c 1872d8876c16d6c72b1915486c996f51+16101070+E299d2660721262061d20e5421d387c966595f396@5441920c 6d54122d77b2246369d35f699220bd41+16610443+E763f085d9b08d8333e5d95f295028d5b848fc7b7@5441920c 9987e8842471d306ff54b68333fc94bc+14696064+E66641522d69ffb1b990be462106b248c99506b55@5441920c 37ed73f77c77c6c8ec8666e753cbbf7b+25736556+E87781259d92d670966e1654d369ee46d86d5ce66@5441920c de0dbcf70d224c16dcf92905ec10e261+17151669+E4228816e8d6d28e8835d8dfb46e54dc1f63c7c67@5441920c 709e8ee867526b180b619b682159c277+23262243+E93f00c26b28e85d26e8cec6d916de796e6e3333f@5441920c 374f74d9f4f0409b19ef96d00b267868+15933520+Eb4f5933760625f77d172235bb2fd62b5d46c1b6c@5441920c 51202e99c801cfc3062bd9610c00f063+2539339+E26c714978b06906d7144158b6ebb1fbe36d56344@5441920c b6fd759cb167c94557649cb3f7482d49+26353605+Ee642b25b5c00040520fe3dddd988c146e632cc14@5441920c 94669028355369bfe0db926846bb56f2+9695904+E94bd0c5fbe063be26b5d37061e0d5e13666b67d5@5441920c 2350567d203eb82066ef6dd59351990f+7647526+E27c8f695b3d508984bb35cdb78f75b0b690e5078@5441920c ed1f536d97255d9b3287612ed4833026+19420966+E35cd0f0303cfc68077376266e3117c72b369b10c@5441920c bf202c423c2f658db116976b3866c622+12634376+E7086c00ef933ccf0f07f0c9d00377797f337fefe@5441920c 17055b910c95c42619109362966c8fbf+10157396+Efd6d11c193bd32c7c69df08d8217ec63cf8414e3@5441920c f11c6144838cb9b4d67351d6626d1802+7156443+E4d5dcdd8ed1c174076dfd46767996651f38c4903@5441920c 9cb47df53d1cff53cd5b4796d0bc23f3+29952199+Eed564950d188541356161227068fd9f40fb5933d@5441920c 2ede654beb747fe9ee17be9dd5d3949c+12640911+Ee99ffeb440dd729067c606762ec076e524d592f5@5441920c 9be26457c84576c7e66e3168fd979607+26005247+Ec94b90868305fb875497f3b687655fd096e95296@5441920c 38b141749fdcc96dc28f725593486bec+61264+Ebe8cc5cfd0bdd54732ff1c62f6620ce4c797cbc8@5441920c 853b659766fbc96f641bc6923d5694bb+14544713+Eb567982c333b291b2d72467b6c431cee1bfcb6de@5441920c c6ecf79b145527c6cd62b8b6cf6f51b8+24455427+Ebc7bd846b936b266f7985111223eb1fb73d99cd5@5441920c 753b0b93996c2970915290ebb7eebd27+19979357+E3c0604c2ec64edbbc8360e568601e1c6ecbeebc1@5441920c 8fcfbb2b43c14680bdff1e514210632d+15760934+Ed0265cebdd6c614709e8cb4295f353e36083b32f@5441920c 11e006c41883660d19e68df266fe4636+22066346+E08206865313e13d29662e02927f424c7c8ebf265@5441920c d55b9e552b90d8f54c84f620ecb73e2b+15463950+E31701420310310e677b648926644c3234d52f472@5441920c c6b76f0b30d2e1c48c345608961c6603+2245652+E1248cd543d2160eef37fe460402ff946e75d8d64@5441920c 4806216f9c2638b63e678d0d660d2409+6206011+E1e94089dbb7c14d892d7f6521fc36764f5c6d761@5441920c 47f36ce735ff98996762cb1245e2d97b+7456077+Ec7ddc386d614f46ce7bbd4db8fc8e2e0261f923d@5441920c 7e8e73b8655f80b3583507fb666c77e1+15544112+Edfdd6ce9b54bf6fd3d2e8116783289dd77532b9f@5441920c 675b0fd88758c546376314872801576e+19435511+E30b4e596f663f0b826b5208370246bd321bbd856@5441920c d69769253bb145bc162c6158e9675316+6640055+Ef9c9c2ccbb6e964c05fb79b1250b606d57e59164@5441920c 0e357377b64fe29c3806fbf96c946645+9325419+E1b8609b20f5fef67fc46ffe5046b9f86e883d6e7@5441920c 9f57b97c259fed92f637d5232dde6104+9611496+E7e2b4cd0562494cbec77f3f67eb55414266d8d50@5441920c 09f60e940e2b603004d6337b32665beb+42415433+E93636b065e97d59bbdb24bc7dff5145f618f64d9@5441920c 6276b65424d63984f8015782060647b6+6046575+Ecc7e42155e92667eb8499956d012fc67b674301e@5441920c c9ce65d27ed164502366f9f5ec6e3fdf+22647045+Ebbff16b79dd826b687464f496f630db769e4f267@5441920c c16f091009b6f237366d5554137509c0+7507452+E3099761fe738fd5ee6368dcb8f1871d9bc018673@5441920c e06b96b906460dc628310477ec136ed7+24532176+E467927670673306f4186e4298f594c2584625137@5441920c f4ff1289c81b231be38907b88e82e975+20702445+Eb06cd9434e0292e6650453656986dbee2e5517b6@5441920c 8ed4167cbc6998f76847f4504cc21655+5393310+E3216b6f606602517fc6102e663746762e348b261@5441920c ed96eee78bcd599609bccb890d19d1c0+25036697+E2855c621547f6508f06862739b1d3c98d502f60f@5441920c b10905f5fbde35f7764492472ef1296c+17526792+E2387540056d68b4f5370bf7cb01d8439c83fc571@5441920c 762ef6d6e967ef7de65eb2095005664c+39123936+E366b9e4e438991d75f6cbc63d66d4671b62dc13b@5441920c 58686918bf8226496969555356830d50+21530262+E08415f6366061839595597edf078cc42764ec929@5441920c 987cc9c5c66e600676ccb76827266b69+39763257+Eb8e06991c83ec041e86f2e563656c869b6237cd7@5441920c c5c010572d6fd5f3683b3f7452e88b2d+6637631+Efb665b8364468f891bf42622099c643c558534f1@5441920c 076d7008f20864612f7f5132c66b84ce+16073436+Ec6cf748b16cc57f7168c989e661346495224f661@5441920c 81115023d44583e3dd80c630e9eb3b95+21766601+E4456d3c5e1cedc36461269e8c84fe32e8882f0b7@5441920c 26e15cef932e661c163d65c53f3d7596+11316659+Ed328777b54e6570d8fb1067f00847290be9642d7@5441920c 2e9c846ce77c8d62e58728d948f32301+6626151+E6742654b169c78c2636ee26bfbbbd246f86ec811@5441920c 86d19f8cc3be48b90501605017b36579+25421420+Edebc6387dd9f7fed0d4bcf6696220087381e5404@5441920c 27e6162bc2c14c183953fe682fdf1525+36360466+E7c6ece51c0fbd20f6647230bbdbdc66c66860beb@5441920c c03d55167fb6714d78880dc460574091+36766715+E140799f4146c60857050b56e4ffc66693b576ec2@5441920c 631c6b6f09985860c7fed6048e76b716+11066673+E5db6df91202e3100c4577f4bb665474382f8811c@5441920c d62dd2616f00f463681e15ec3647cd58+13126734+E609f8229cdf8c9e9642dfd6e3167ffd076dedbb8@5441920c 8749dd87c0d6b1377909c58fbc45dded+15236795+E461ee6611937f46654806754353bd32961666056@5441920c df7e5e5e1dd4d9dc09d8bf35b5fe3f24+22561443+E8fffe5863e071f5becb24e9c4de0569c1d864ec9@5441920c 4738611fe367691dd44e18f3c8857839+11364640+Ef171c946e87f52ec2877c74964d6c05115724fd6@5441920c f9ce82f59e5908d2d70e18df9679b469+31367794+E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c 0:15893477:chr10_band0_s0_e3000000.fj 15893477:8770829:chr10_band10_s29600000_e31300000.fj 24664306:15962689:chr10_band11_s31300000_e34400000.fj 40626995:18342794:chr10_band12_s34400000_e38000000.fj 58969789:5087834:chr10_band13_s38000000_e40200000.fj 64057623:4284756:chr10_band14_s40200000_e42300000.fj 68342379:18665404:chr10_band15_s42300000_e46100000.fj 87007783:13536792:chr10_band16_s46100000_e49900000.fj 100544575:13714429:chr10_band17_s49900000_e52900000.fj 114259004:44743112:chr10_band18_s52900000_e61200000.fj 159002116:17555223:chr10_band19_s61200000_e64500000.fj 176557339:4386647:chr10_band1_s3000000_e3800000.fj 180943986:32161952:chr10_band20_s64500000_e70600000.fj 213105938:22400285:chr10_band21_s70600000_e74900000.fj 235506223:14028139:chr10_band22_s74900000_e77700000.fj 249534362:22042495:chr10_band23_s77700000_e82000000.fj 271576857:31053589:chr10_band24_s82000000_e87900000.fj 302630446:7357223:chr10_band25_s87900000_e89500000.fj 309987669:17709824:chr10_band26_s89500000_e92900000.fj 327697493:6148418:chr10_band27_s92900000_e94100000.fj 333845911:14689912:chr10_band28_s94100000_e97000000.fj 348535823:11964810:chr10_band29_s97000000_e99300000.fj 360500633:14904735:chr10_band2_s3800000_e6600000.fj 375405368:13400037:chr10_band30_s99300000_e101900000.fj 388805405:5685774:chr10_band31_s101900000_e103000000.fj 394491179:9646905:chr10_band32_s103000000_e104900000.fj 404138084:4640161:chr10_band33_s104900000_e105800000.fj 408778245:32455363:chr10_band34_s105800000_e111900000.fj 441233608:15940309:chr10_band35_s111900000_e114900000.fj 457173917:22488871:chr10_band36_s114900000_e119100000.fj 479662788:13741614:chr10_band37_s119100000_e121700000.fj 493404402:7619587:chr10_band38_s121700000_e123100000.fj 501023989:23222084:chr10_band39_s123100000_e127500000.fj 524246073:29868907:chr10_band3_s6600000_e12200000.fj 554114980:16511841:chr10_band40_s127500000_e130600000.fj 570626821:26095352:chr10_band41_s130600000_e135534747.fj 596722173:26538428:chr10_band4_s12200000_e17300000.fj 623260601:5595242:chr10_band5_s17300000_e18600000.fj 628855843:524638:chr10_band6_s18600000_e18700000.fj 629380481:20166758:chr10_band7_s18700000_e22600000.fj 649547239:10195576:chr10_band8_s22600000_e24600000.fj 659742815:26057104:chr10_band9_s24600000_e29600000.fj 685799919:14129943:chr11_band0_s0_e2800000.fj 699929862:27262406:chr11_band10_s43500000_e48800000.fj 727192268:11366584:chr11_band11_s48800000_e51600000.fj 738558852:4284756:chr11_band12_s51600000_e53700000.fj 742843608:6746810:chr11_band13_s53700000_e55700000.fj 749590418:21620368:chr11_band14_s55700000_e59900000.fj 771210786:9186489:chr11_band15_s59900000_e61700000.fj 780397275:8326193:chr11_band16_s61700000_e63400000.fj 788723468:12757371:chr11_band17_s63400000_e65900000.fj 801480839:12157116:chr11_band18_s65900000_e68400000.fj 813637955:10261919:chr11_band19_s68400000_e70400000.fj 823899874:40669605:chr11_band1_s2800000_e10700000.fj 864569479:24190274:chr11_band20_s70400000_e75200000.fj 888759753:10020619:chr11_band21_s75200000_e77100000.fj 898780372:44638330:chr11_band22_s77100000_e85600000.fj 943418702:13920977:chr11_band23_s85600000_e88300000.fj 957339679:22389141:chr11_band24_s88300000_e92800000.fj 979728820:22616388:chr11_band25_s92800000_e97200000.fj 1002345208:26439412:chr11_band26_s97200000_e102100000.fj 1028784620:4173314:chr11_band27_s102100000_e102900000.fj 1032957934:39884156:chr11_band28_s102900000_e110400000.fj 1072842090:11123032:chr11_band29_s110400000_e112500000.fj 1083965122:10756630:chr11_band2_s10700000_e12700000.fj 1094721752:10580316:chr11_band30_s112500000_e114500000.fj 1105302068:35565428:chr11_band31_s114500000_e121200000.fj 1140867496:14197081:chr11_band32_s121200000_e123900000.fj 1155064577:20758432:chr11_band33_s123900000_e127800000.fj 1175823009:15792191:chr11_band34_s127800000_e130800000.fj 1191615200:22249239:chr11_band35_s130800000_e135006516.fj 1213864439:18449708:chr11_band3_s12700000_e16200000.fj 1232314147:29052525:chr11_band4_s16200000_e21700000.fj 1261366672:23968312:chr11_band5_s21700000_e26100000.fj 1285334984:5944481:chr11_band6_s26100000_e27200000.fj 1291279465:20155513:chr11_band7_s27200000_e31000000.fj 1311434978:28292374:chr11_band8_s31000000_e36400000.fj 1339727352:37778620:chr11_band9_s36400000_e43500000.fj 1377505972:16720695:chr12_band0_s0_e3300000.fj 1394226667:13059459:chr12_band10_s30700000_e33300000.fj 1407286126:7673046:chr12_band11_s33300000_e35800000.fj 1414959172:5825767:chr12_band12_s35800000_e38200000.fj 1420784939:42976743:chr12_band13_s38200000_e46400000.fj 1463761682:13809906:chr12_band14_s46400000_e49100000.fj 1477571588:11988262:chr12_band15_s49100000_e51500000.fj 1489559850:17595626:chr12_band16_s51500000_e54900000.fj 1507155476:8587338:chr12_band17_s54900000_e56600000.fj 1515742814:7408989:chr12_band18_s56600000_e58100000.fj 1523151803:26345033:chr12_band19_s58100000_e63100000.fj 1549496836:11140028:chr12_band1_s3300000_e5400000.fj 1560636864:9977002:chr12_band20_s63100000_e65100000.fj 1570613866:13651023:chr12_band21_s65100000_e67700000.fj 1584264889:19846309:chr12_band22_s67700000_e71500000.fj 1604111198:22406679:chr12_band23_s71500000_e75700000.fj 1626517877:24370117:chr12_band24_s75700000_e80300000.fj 1650887994:34354522:chr12_band25_s80300000_e86700000.fj 1685242516:12153797:chr12_band26_s86700000_e89000000.fj 1697396313:19120741:chr12_band27_s89000000_e92600000.fj 1716517054:18678462:chr12_band28_s92600000_e96200000.fj 1735195516:28125462:chr12_band29_s96200000_e101600000.fj 1763320978:23263164:chr12_band2_s5400000_e10100000.fj 1786584142:11438933:chr12_band30_s101600000_e103800000.fj 1798023075:27434807:chr12_band31_s103800000_e109000000.fj 1825457882:13431932:chr12_band32_s109000000_e111700000.fj 1838889814:2833555:chr12_band33_s111700000_e112300000.fj 1841723369:10166739:chr12_band34_s112300000_e114300000.fj 1851890108:13335983:chr12_band35_s114300000_e116800000.fj 1865226091:6763178:chr12_band36_s116800000_e118100000.fj 1871989269:13444650:chr12_band37_s118100000_e120700000.fj 1885433919:26286416:chr12_band38_s120700000_e125900000.fj 1911720335:18376984:chr12_band39_s125900000_e129300000.fj 1930097319:14118184:chr12_band3_s10100000_e12800000.fj 1944215503:23892725:chr12_band40_s129300000_e133851895.fj 1968108228:10507783:chr12_band4_s12800000_e14800000.fj 1978616011:27625276:chr12_band5_s14800000_e20000000.fj 2006241287:7026139:chr12_band6_s20000000_e21300000.fj 2013267426:27711533:chr12_band7_s21300000_e26500000.fj 2040978959:6793207:chr12_band8_s26500000_e27800000.fj 2047772166:15405916:chr12_band9_s27800000_e30700000.fj 2063178082:9180724:chr13_band0_s0_e4500000.fj 2072358806:9467601:chr13_band10_s32200000_e34000000.fj 2081826407:7989532:chr13_band11_s34000000_e35500000.fj 2089815939:24739014:chr13_band12_s35500000_e40100000.fj 2114554953:26941582:chr13_band13_s40100000_e45200000.fj 2141496535:3036311:chr13_band14_s45200000_e45800000.fj 2144532846:7761096:chr13_band15_s45800000_e47300000.fj 2152293942:18709476:chr13_band16_s47300000_e50900000.fj 2171003418:22602285:chr13_band17_s50900000_e55300000.fj 2193605703:23405896:chr13_band18_s55300000_e59600000.fj 2217011599:14457382:chr13_band19_s59600000_e62300000.fj 2231468981:11220750:chr13_band1_s4500000_e10000000.fj 2242689731:18581486:chr13_band20_s62300000_e65700000.fj 2261271217:15834314:chr13_band21_s65700000_e68600000.fj 2277105531:26147285:chr13_band22_s68600000_e73300000.fj 2303252816:11193151:chr13_band23_s73300000_e75400000.fj 2314445967:9599462:chr13_band24_s75400000_e77200000.fj 2324045429:9625154:chr13_band25_s77200000_e79000000.fj 2333670583:46677445:chr13_band26_s79000000_e87700000.fj 2380348028:12795853:chr13_band27_s87700000_e90000000.fj 2393143881:27123199:chr13_band28_s90000000_e95000000.fj 2420267080:16832721:chr13_band29_s95000000_e98200000.fj 2437099801:12852756:chr13_band2_s10000000_e16300000.fj 2449952557:5708668:chr13_band30_s98200000_e99300000.fj 2455661225:12588075:chr13_band31_s99300000_e101700000.fj 2468249300:16946677:chr13_band32_s101700000_e104800000.fj 2485195977:12209370:chr13_band33_s104800000_e107000000.fj 2497405347:17916606:chr13_band34_s107000000_e110300000.fj 2515321953:24643337:chr13_band35_s110300000_e115169878.fj 2539965290:3264756:chr13_band3_s16300000_e17900000.fj 2543230046:4102134:chr13_band4_s17900000_e19500000.fj 2547332180:19703325:chr13_band5_s19500000_e23300000.fj 2567035505:11554223:chr13_band6_s23300000_e25500000.fj 2578589728:12130664:chr13_band7_s25500000_e27800000.fj 2590720392:5842000:chr13_band8_s27800000_e28900000.fj 2596562392:17354821:chr13_band9_s28900000_e32200000.fj 2613917213:7548724:chr14_band0_s0_e3700000.fj 2621465937:30306549:chr14_band10_s37800000_e43500000.fj 2651772486:19488657:chr14_band11_s43500000_e47200000.fj 2671261143:19588732:chr14_band12_s47200000_e50900000.fj 2690849875:16728188:chr14_band13_s50900000_e54100000.fj 2707578063:7297044:chr14_band14_s54100000_e55500000.fj 2714875107:13453405:chr14_band15_s55500000_e58100000.fj 2728328512:20891242:chr14_band16_s58100000_e62100000.fj 2749219754:13969727:chr14_band17_s62100000_e64800000.fj 2763189481:15929958:chr14_band18_s64800000_e67900000.fj 2779119439:12006715:chr14_band19_s67900000_e70200000.fj 2791126154:8976748:chr14_band1_s3700000_e8100000.fj 2800102902:18617309:chr14_band20_s70200000_e73800000.fj 2818720211:28602130:chr14_band21_s73800000_e79300000.fj 2847322341:22781826:chr14_band22_s79300000_e83600000.fj 2870104167:7096857:chr14_band23_s83600000_e84900000.fj 2877201024:26087198:chr14_band24_s84900000_e89800000.fj 2903288222:10873992:chr14_band25_s89800000_e91900000.fj 2914162214:14647560:chr14_band26_s91900000_e94700000.fj 2928809774:8587442:chr14_band27_s94700000_e96300000.fj 2937397216:27389311:chr14_band28_s96300000_e101400000.fj 2964786527:9264693:chr14_band29_s101400000_e103200000.fj 2974051220:16320752:chr14_band2_s8100000_e16100000.fj 2990371972:4140293:chr14_band30_s103200000_e104000000.fj 2994512265:17268099:chr14_band31_s104000000_e107349540.fj 3011780364:3060756:chr14_band3_s16100000_e17600000.fj 3014841120:3260428:chr14_band4_s17600000_e19100000.fj 3018101548:26138225:chr14_band5_s19100000_e24600000.fj 3044239773:45862056:chr14_band6_s24600000_e33300000.fj 3090101829:10447980:chr14_band7_s33300000_e35300000.fj 3100549809:6564588:chr14_band8_s35300000_e36600000.fj 3107114397:6398876:chr14_band9_s36600000_e37800000.fj 3113513273:7956724:chr15_band0_s0_e3900000.fj 3121469997:34269266:chr15_band10_s33600000_e40100000.fj 3155739263:13762411:chr15_band11_s40100000_e42800000.fj 3169501674:3947813:chr15_band12_s42800000_e43600000.fj 3173449487:5537714:chr15_band13_s43600000_e44800000.fj 3178987201:24305832:chr15_band14_s44800000_e49500000.fj 3203293033:17507515:chr15_band15_s49500000_e52900000.fj 3220800548:32826524:chr15_band16_s52900000_e59100000.fj 3253627072:1010299:chr15_band17_s59100000_e59300000.fj 3254637371:23454838:chr15_band18_s59300000_e63700000.fj 3278092209:18017355:chr15_band19_s63700000_e67200000.fj 3296109564:9792748:chr15_band1_s3900000_e8700000.fj 3305902312:533847:chr15_band20_s67200000_e67300000.fj 3306436159:1084858:chr15_band21_s67300000_e67500000.fj 3307521017:27465637:chr15_band22_s67500000_e72700000.fj 3334986654:12707353:chr15_band23_s72700000_e75200000.fj 3347694007:6832970:chr15_band24_s75200000_e76600000.fj 3354526977:8748794:chr15_band25_s76600000_e78300000.fj 3363275771:17732191:chr15_band26_s78300000_e81700000.fj 3381007962:15491375:chr15_band27_s81700000_e85200000.fj 3396499337:20295749:chr15_band28_s85200000_e89100000.fj 3416795086:27117670:chr15_band29_s89100000_e94300000.fj 3443912756:14484752:chr15_band2_s8700000_e15800000.fj 3458397508:22592925:chr15_band30_s94300000_e98500000.fj 3480990433:21043993:chr15_band31_s98500000_e102531392.fj 3502034426:6528756:chr15_band3_s15800000_e19000000.fj 3508563182:4646274:chr15_band4_s19000000_e20700000.fj 3513209456:19571328:chr15_band5_s20700000_e25700000.fj 3532780784:12923689:chr15_band6_s25700000_e28100000.fj 3545704473:9921926:chr15_band7_s28100000_e30300000.fj 3555626399:2895507:chr15_band8_s30300000_e31200000.fj 3558521906:11292446:chr15_band9_s31200000_e33600000.fj 3569814352:40629656:chr16_band0_s0_e7900000.fj 3610444008:4080756:chr16_band10_s36600000_e38600000.fj 3614524764:18810667:chr16_band11_s38600000_e47000000.fj 3633335431:29170320:chr16_band12_s47000000_e52600000.fj 3662505751:21574362:chr16_band13_s52600000_e56700000.fj 3684080113:3619563:chr16_band14_s56700000_e57400000.fj 3687699676:49161531:chr16_band15_s57400000_e66700000.fj 3736861207:19748144:chr16_band16_s66700000_e70800000.fj 3756609351:10946735:chr16_band17_s70800000_e72900000.fj 3767556086:6378485:chr16_band18_s72900000_e74100000.fj 3773934571:26881587:chr16_band19_s74100000_e79200000.fj 3800816158:13661669:chr16_band1_s7900000_e10500000.fj 3814477827:13501427:chr16_band20_s79200000_e81700000.fj 3827979254:13677551:chr16_band21_s81700000_e84200000.fj 3841656805:15666076:chr16_band22_s84200000_e87100000.fj 3857322881:7998490:chr16_band23_s87100000_e88700000.fj 3865321371:8053236:chr16_band24_s88700000_e90354753.fj 3873374607:10728254:chr16_band2_s10500000_e12600000.fj 3884102861:11356748:chr16_band3_s12600000_e14800000.fj 3895459609:7600427:chr16_band4_s14800000_e16800000.fj 3903060036:20722736:chr16_band5_s16800000_e21200000.fj 3923782772:13729019:chr16_band6_s21200000_e24200000.fj 3937511791:20246913:chr16_band7_s24200000_e28100000.fj 3957758704:26945678:chr16_band8_s28100000_e34600000.fj 3984704382:3384870:chr16_band9_s34600000_e36600000.fj 3988089252:16155754:chr17_band0_s0_e3300000.fj 4004245006:12762477:chr17_band10_s38400000_e40900000.fj 4017007483:18572384:chr17_band11_s40900000_e44900000.fj 4035579867:12458663:chr17_band12_s44900000_e47400000.fj 4048038530:14524689:chr17_band13_s47400000_e50200000.fj 4062563219:38661662:chr17_band14_s50200000_e57600000.fj 4101224881:3149045:chr17_band15_s57600000_e58300000.fj 4104373926:13700211:chr17_band16_s58300000_e61100000.fj 4118074137:7529724:chr17_band17_s61100000_e62600000.fj 4125603861:7950542:chr17_band18_s62600000_e64200000.fj 4133554403:14756800:chr17_band19_s64200000_e67100000.fj 4148311203:16443598:chr17_band1_s3300000_e6500000.fj 4164754801:20108889:chr17_band20_s67100000_e70900000.fj 4184863690:20058363:chr17_band21_s70900000_e74800000.fj 4204922053:2587408:chr17_band22_s74800000_e75300000.fj 4207509461:30547504:chr17_band23_s75300000_e81195210.fj 4238056965:21562054:chr17_band2_s6500000_e10700000.fj 4259619019:27395356:chr17_band3_s10700000_e16000000.fj 4287014375:28365678:chr17_band4_s16000000_e22200000.fj 4315380053:289200:chr17_band5_s22200000_e24000000.fj 4315669253:5237174:chr17_band6_s24000000_e25800000.fj 4320906427:29727146:chr17_band7_s25800000_e31800000.fj 4350633573:30907874:chr17_band8_s31800000_e38100000.fj 4381541447:1504858:chr17_band9_s38100000_e38400000.fj 4383046305:14943044:chr18_band0_s0_e2900000.fj 4397989349:33721037:chr18_band10_s37200000_e43500000.fj 4431710386:24805551:chr18_band11_s43500000_e48200000.fj 4456515937:29378907:chr18_band12_s48200000_e53800000.fj 4485894844:12633635:chr18_band13_s53800000_e56200000.fj 4498528479:14797428:chr18_band14_s56200000_e59000000.fj 4513325907:13780102:chr18_band15_s59000000_e61600000.fj 4527106009:28794272:chr18_band16_s61600000_e66800000.fj 4555900281:10201924:chr18_band17_s66800000_e68700000.fj 4566102205:24124836:chr18_band18_s68700000_e73100000.fj 4590227041:26615557:chr18_band19_s73100000_e78077248.fj 4616842598:22145236:chr18_band1_s2900000_e7100000.fj 4638987834:7311348:chr18_band2_s7100000_e8500000.fj 4646299182:12577740:chr18_band3_s8500000_e10900000.fj 4658876922:21508140:chr18_band4_s10900000_e15400000.fj 4680385062:52389:chr18_band5_s15400000_e17200000.fj 4680437451:5076969:chr18_band6_s17200000_e19000000.fj 4685514420:31190178:chr18_band7_s19000000_e25000000.fj 4716704598:41160388:chr18_band8_s25000000_e32700000.fj 4757864986:23815045:chr18_band9_s32700000_e37200000.fj 4781680031:34031899:chr19_band0_s0_e6900000.fj 4815711930:13851503:chr19_band10_s35500000_e38300000.fj 4829563433:1998048:chr19_band11_s38300000_e38700000.fj 4831561481:22892591:chr19_band12_s38700000_e43400000.fj 4854454072:8872354:chr19_band13_s43400000_e45200000.fj 4863326426:13749381:chr19_band14_s45200000_e48000000.fj 4877075807:16660930:chr19_band15_s48000000_e51400000.fj 4893736737:11038031:chr19_band16_s51400000_e53600000.fj 4904774768:13412850:chr19_band17_s53600000_e56300000.fj 4918187618:14313555:chr19_band18_s56300000_e59128983.fj 4932501173:33635703:chr19_band1_s6900000_e13900000.fj 4966136876:489834:chr19_band2_s13900000_e14000000.fj 4966626710:11377056:chr19_band3_s14000000_e16300000.fj 4978003766:18348545:chr19_band4_s16300000_e20000000.fj 4996352311:21127772:chr19_band5_s20000000_e24400000.fj 5017480083:1059388:chr19_band6_s24400000_e26500000.fj 5018539471:6984270:chr19_band7_s26500000_e28600000.fj 5025523741:20073973:chr19_band8_s28600000_e32400000.fj 5045597714:15769669:chr19_band9_s32400000_e35500000.fj 5061367383:9756229:chr1_band0_s0_e2300000.fj 5071123612:11489333:chr1_band10_s30200000_e32400000.fj 5082612945:11074951:chr1_band11_s32400000_e34600000.fj 5093687896:28145091:chr1_band12_s34600000_e40100000.fj 5121832987:20545569:chr1_band13_s40100000_e44100000.fj 5142378556:13582476:chr1_band14_s44100000_e46800000.fj 5155961032:19737049:chr1_band15_s46800000_e50700000.fj 5175698081:27529030:chr1_band16_s50700000_e56100000.fj 5203227111:15452164:chr1_band17_s56100000_e59000000.fj 5218679275:12082565:chr1_band18_s59000000_e61300000.fj 5230761840:39789591:chr1_band19_s61300000_e68900000.fj 5270551431:15804689:chr1_band1_s2300000_e5400000.fj 5286356120:4141822:chr1_band20_s68900000_e69700000.fj 5290497942:80211445:chr1_band21_s69700000_e84900000.fj 5370709387:18343642:chr1_band22_s84900000_e88400000.fj 5389053029:18664730:chr1_band23_s88400000_e92000000.fj 5407717759:13861818:chr1_band24_s92000000_e94700000.fj 5421579577:26472421:chr1_band25_s94700000_e99700000.fj 5448051998:13161786:chr1_band26_s99700000_e102200000.fj 5461213784:26136584:chr1_band27_s102200000_e107200000.fj 5487350368:23561374:chr1_band28_s107200000_e111800000.fj 5510911742:22349851:chr1_band29_s111800000_e116100000.fj 5533261593:9400437:chr1_band2_s5400000_e7200000.fj 5542662030:8898401:chr1_band30_s116100000_e117800000.fj 5551560431:14463385:chr1_band31_s117800000_e120600000.fj 5566023816:2797932:chr1_band32_s120600000_e121500000.fj 5568821748:7140760:chr1_band33_s121500000_e125000000.fj 5575962508:7956760:chr1_band34_s125000000_e128900000.fj 5583919268:28100130:chr1_band35_s128900000_e142600000.fj 5612019398:15570132:chr1_band36_s142600000_e147000000.fj 5627589530:12079936:chr1_band37_s147000000_e150300000.fj 5639669466:23848498:chr1_band38_s150300000_e155000000.fj 5663517964:7320072:chr1_band39_s155000000_e156500000.fj 5670838036:10249929:chr1_band3_s7200000_e9200000.fj 5681087965:13622024:chr1_band40_s156500000_e159100000.fj 5694709989:7329847:chr1_band41_s159100000_e160500000.fj 5702039836:25915639:chr1_band42_s160500000_e165500000.fj 5727955475:8902437:chr1_band43_s165500000_e167200000.fj 5736857912:19387309:chr1_band44_s167200000_e170900000.fj 5756245221:10334901:chr1_band45_s170900000_e172900000.fj 5766580122:15956391:chr1_band46_s172900000_e176000000.fj 5782536513:22381464:chr1_band47_s176000000_e180300000.fj 5804917977:28762910:chr1_band48_s180300000_e185800000.fj 5833680887:27482517:chr1_band49_s185800000_e190800000.fj 5861163404:17698144:chr1_band4_s9200000_e12700000.fj 5878861548:16115379:chr1_band50_s190800000_e193800000.fj 5894976927:26603399:chr1_band51_s193800000_e198700000.fj 5921580326:42767332:chr1_band52_s198700000_e207200000.fj 5964347658:22519054:chr1_band53_s207200000_e211500000.fj 5986866712:15623994:chr1_band54_s211500000_e214500000.fj 6002490706:50651137:chr1_band55_s214500000_e224100000.fj 6053141843:2340783:chr1_band56_s224100000_e224600000.fj 6055482626:12296366:chr1_band57_s224600000_e227000000.fj 6067778992:19160541:chr1_band58_s227000000_e230700000.fj 6086939533:21150112:chr1_band59_s230700000_e234700000.fj 6108089645:15934102:chr1_band5_s12700000_e16200000.fj 6124023747:9572247:chr1_band60_s234700000_e236600000.fj 6133595994:37063925:chr1_band61_s236600000_e243700000.fj 6170659919:28279658:chr1_band62_s243700000_e249250621.fj 6198939577:21312883:chr1_band6_s16200000_e20400000.fj 6220252460:17968553:chr1_band7_s20400000_e23900000.fj 6238221013:20502272:chr1_band8_s23900000_e28000000.fj 6258723285:10454348:chr1_band9_s28000000_e30200000.fj 6269177633:26240932:chr20_band0_s0_e5100000.fj 6295418565:11477343:chr20_band10_s32100000_e34400000.fj 6306895908:16121702:chr20_band11_s34400000_e37600000.fj 6323017610:21665969:chr20_band12_s37600000_e41700000.fj 6344683579:2106601:chr20_band13_s41700000_e42100000.fj 6346790180:22234896:chr20_band14_s42100000_e46400000.fj 6369025076:17466445:chr20_band15_s46400000_e49800000.fj 6386491521:27353500:chr20_band16_s49800000_e55000000.fj 6413845021:7951115:chr20_band17_s55000000_e56500000.fj 6421796136:10132647:chr20_band18_s56500000_e58400000.fj 6431928783:24122390:chr20_band19_s58400000_e63025520.fj 6456051173:21750808:chr20_band1_s5100000_e9200000.fj 6477801981:15548705:chr20_band2_s9200000_e12100000.fj 6493350686:30792695:chr20_band3_s12100000_e17900000.fj 6524143381:17804912:chr20_band4_s17900000_e21300000.fj 6541948293:5184960:chr20_band5_s21300000_e22300000.fj 6547133253:17298739:chr20_band6_s22300000_e25600000.fj 6564431992:3301773:chr20_band7_s25600000_e27500000.fj 6567733765:3876756:chr20_band8_s27500000_e29400000.fj 6571610521:13283209:chr20_band9_s29400000_e32100000.fj 6584893730:5712724:chr21_band0_s0_e2800000.fj 6590606454:10518888:chr21_band10_s35800000_e37800000.fj 6601125342:10144603:chr21_band11_s37800000_e39700000.fj 6611269945:15620599:chr21_band12_s39700000_e42600000.fj 6626890544:28940326:chr21_band13_s42600000_e48129895.fj 6655830870:8160748:chr21_band1_s2800000_e6800000.fj 6663991618:11144287:chr21_band2_s6800000_e10900000.fj 6675135905:1431977:chr21_band3_s10900000_e13200000.fj 6676567882:2244756:chr21_band4_s13200000_e14300000.fj 6678812638:9266581:chr21_band5_s14300000_e16400000.fj 6688079219:41245659:chr21_band6_s16400000_e24000000.fj 6729324878:15344510:chr21_band7_s24000000_e26800000.fj 6744669388:24932791:chr21_band8_s26800000_e31500000.fj 6769602179:22442446:chr21_band9_s31500000_e35800000.fj 6792044625:7752724:chr22_band0_s0_e3800000.fj 6799797349:28224380:chr22_band10_s32200000_e37600000.fj 6828021729:17304839:chr22_band11_s37600000_e41000000.fj 6845326568:16113075:chr22_band12_s41000000_e44200000.fj 6861439643:22233411:chr22_band13_s44200000_e48400000.fj 6883673054:5524922:chr22_band14_s48400000_e49400000.fj 6889197976:9664262:chr22_band15_s49400000_e51304566.fj 6898862238:9180748:chr22_band1_s3800000_e8300000.fj 6908042986:7956752:chr22_band2_s8300000_e12200000.fj 6915999738:5100756:chr22_band3_s12200000_e14700000.fj 6921100494:9937902:chr22_band4_s14700000_e17900000.fj 6931038396:19548232:chr22_band5_s17900000_e22200000.fj 6950586628:6683394:chr22_band6_s22200000_e23500000.fj 6957270022:11752445:chr22_band7_s23500000_e25900000.fj 6969022467:19256022:chr22_band8_s25900000_e29600000.fj 6988278489:12954853:chr22_band9_s29600000_e32200000.fj 7001233342:23233415:chr2_band0_s0_e4400000.fj 7024466757:10667298:chr2_band10_s36600000_e38600000.fj 7035134055:16966684:chr2_band11_s38600000_e41800000.fj 7052100739:31586877:chr2_band12_s41800000_e47800000.fj 7083687616:26968370:chr2_band13_s47800000_e52900000.fj 7110655986:10993850:chr2_band14_s52900000_e55000000.fj 7121649836:33045521:chr2_band15_s55000000_e61300000.fj 7154695357:14150927:chr2_band16_s61300000_e64100000.fj 7168846284:23578835:chr2_band17_s64100000_e68600000.fj 7192425119:14885552:chr2_band18_s68600000_e71500000.fj 7207310671:10410131:chr2_band19_s71500000_e73500000.fj 7217720802:14156834:chr2_band1_s4400000_e7100000.fj 7231877636:7578172:chr2_band20_s73500000_e75000000.fj 7239455808:44109485:chr2_band21_s75000000_e83300000.fj 7283565293:31254935:chr2_band22_s83300000_e90500000.fj 7314820228:5169067:chr2_band23_s90500000_e93300000.fj 7319989295:10368921:chr2_band24_s93300000_e96800000.fj 7330358216:29052271:chr2_band25_s96800000_e102700000.fj 7359410487:17612827:chr2_band26_s102700000_e106000000.fj 7377023314:7641759:chr2_band27_s106000000_e107500000.fj 7384665073:13411716:chr2_band28_s107500000_e110200000.fj 7398076789:17757245:chr2_band29_s110200000_e114400000.fj 7415834034:26954567:chr2_band2_s7100000_e12200000.fj 7442788601:23246223:chr2_band30_s114400000_e118800000.fj 7466034824:19074161:chr2_band31_s118800000_e122400000.fj 7485108985:39449695:chr2_band32_s122400000_e129900000.fj 7524558680:11696577:chr2_band33_s129900000_e132500000.fj 7536255257:13249863:chr2_band34_s132500000_e135100000.fj 7549505120:8708592:chr2_band35_s135100000_e136800000.fj 7558213712:29182964:chr2_band36_s136800000_e142200000.fj 7587396676:10264945:chr2_band37_s142200000_e144100000.fj 7597661621:24601843:chr2_band38_s144100000_e148700000.fj 7622263464:5951781:chr2_band39_s148700000_e149900000.fj 7628215245:23795508:chr2_band3_s12200000_e16700000.fj 7652010753:3150007:chr2_band40_s149900000_e150500000.fj 7655160760:23077469:chr2_band41_s150500000_e154900000.fj 7678238229:25968072:chr2_band42_s154900000_e159800000.fj 7704206301:20640325:chr2_band43_s159800000_e163700000.fj 7724846626:31998832:chr2_band44_s163700000_e169700000.fj 7756845458:43632512:chr2_band45_s169700000_e178000000.fj 7800477970:13731959:chr2_band46_s178000000_e180600000.fj 7814209929:12856172:chr2_band47_s180600000_e183000000.fj 7827066101:34247127:chr2_band48_s183000000_e189400000.fj 7861313228:13286018:chr2_band49_s189400000_e191900000.fj 7874599246:13181256:chr2_band4_s16700000_e19200000.fj 7887780502:29663052:chr2_band50_s191900000_e197400000.fj 7917443554:30634366:chr2_band51_s197400000_e203300000.fj 7948077920:8075493:chr2_band52_s203300000_e204900000.fj 7956153413:21661204:chr2_band53_s204900000_e209000000.fj 7977814617:33806107:chr2_band54_s209000000_e215300000.fj 8011620724:32791910:chr2_band55_s215300000_e221500000.fj 8044412634:19689112:chr2_band56_s221500000_e225200000.fj 8064101746:4741805:chr2_band57_s225200000_e226100000.fj 8068843551:25904705:chr2_band58_s226100000_e231000000.fj 8094748256:23619321:chr2_band59_s231000000_e235600000.fj 8118367577:25423194:chr2_band5_s19200000_e24000000.fj 8143790771:9119290:chr2_band60_s235600000_e237300000.fj 8152910061:30796914:chr2_band61_s237300000_e243199373.fj 8183706975:19924674:chr2_band6_s24000000_e27900000.fj 8203631649:11135309:chr2_band7_s27900000_e30000000.fj 8214766958:10940177:chr2_band8_s30000000_e32100000.fj 8225707135:23560118:chr2_band9_s32100000_e36600000.fj 8249267253:14861122:chr3_band0_s0_e2800000.fj 8264128375:22809815:chr3_band10_s32100000_e36500000.fj 8286938190:15046818:chr3_band11_s36500000_e39400000.fj 8301985008:22186262:chr3_band12_s39400000_e43700000.fj 8324171270:2058080:chr3_band13_s43700000_e44100000.fj 8326229350:521252:chr3_band14_s44100000_e44200000.fj 8326750602:32234144:chr3_band15_s44200000_e50600000.fj 8358984746:8441932:chr3_band16_s50600000_e52300000.fj 8367426678:10948899:chr3_band17_s52300000_e54400000.fj 8378375577:21772898:chr3_band18_s54400000_e58600000.fj 8400148475:27069700:chr3_band19_s58600000_e63700000.fj 8427218175:6545313:chr3_band1_s2800000_e4000000.fj 8433763488:31787795:chr3_band20_s63700000_e69800000.fj 8465551283:23275812:chr3_band21_s69800000_e74200000.fj 8488827095:29739564:chr3_band22_s74200000_e79800000.fj 8518566659:20035093:chr3_band23_s79800000_e83500000.fj 8538601752:20162108:chr3_band24_s83500000_e87200000.fj 8558763860:3767584:chr3_band25_s87200000_e87900000.fj 8562531444:13581503:chr3_band26_s87900000_e91000000.fj 8576112947:7002557:chr3_band27_s91000000_e93900000.fj 8583115504:23576185:chr3_band28_s93900000_e98300000.fj 8606691689:8815871:chr3_band29_s98300000_e100000000.fj 8615507560:24882143:chr3_band2_s4000000_e8700000.fj 8640389703:4697534:chr3_band30_s100000000_e100900000.fj 8645087237:9838940:chr3_band31_s100900000_e102800000.fj 8654926177:18496118:chr3_band32_s102800000_e106200000.fj 8673422295:9018631:chr3_band33_s106200000_e107900000.fj 8682440926:17929166:chr3_band34_s107900000_e111300000.fj 8700370092:11594711:chr3_band35_s111300000_e113500000.fj 8711964803:20308668:chr3_band36_s113500000_e117300000.fj 8732273471:9030401:chr3_band37_s117300000_e119000000.fj 8741303872:14898827:chr3_band38_s119000000_e121900000.fj 8756202699:10008811:chr3_band39_s121900000_e123800000.fj 8766211510:15979710:chr3_band3_s8700000_e11800000.fj 8782191220:10116188:chr3_band40_s123800000_e125800000.fj 8792307408:17806797:chr3_band41_s125800000_e129200000.fj 8810114205:23227207:chr3_band42_s129200000_e133700000.fj 8833341412:10556009:chr3_band43_s133700000_e135700000.fj 8843897421:15182933:chr3_band44_s135700000_e138700000.fj 8859080354:21307590:chr3_band45_s138700000_e142800000.fj 8880387944:32759712:chr3_band46_s142800000_e148900000.fj 8913147656:16878434:chr3_band47_s148900000_e152100000.fj 8930026090:15100163:chr3_band48_s152100000_e155000000.fj 8945126253:10434017:chr3_band49_s155000000_e157000000.fj 8955560270:7785476:chr3_band4_s11800000_e13300000.fj 8963345746:10542610:chr3_band50_s157000000_e159000000.fj 8973888356:8787004:chr3_band51_s159000000_e160700000.fj 8982675360:37253134:chr3_band52_s160700000_e167600000.fj 9019928494:17183652:chr3_band53_s167600000_e170900000.fj 9037112146:25746921:chr3_band54_s170900000_e175700000.fj 9062859067:17296262:chr3_band55_s175700000_e179000000.fj 9080155329:19044817:chr3_band56_s179000000_e182700000.fj 9099200146:9216326:chr3_band57_s182700000_e184500000.fj 9108416472:7709847:chr3_band58_s184500000_e186000000.fj 9116126319:9992471:chr3_band59_s186000000_e187900000.fj 9126118790:16105743:chr3_band5_s13300000_e16400000.fj 9142224533:23723049:chr3_band60_s187900000_e192300000.fj 9165947582:28740659:chr3_band61_s192300000_e198022430.fj 9194688241:39101485:chr3_band6_s16400000_e23900000.fj 9233789726:13179037:chr3_band7_s23900000_e26400000.fj 9246968763:23659026:chr3_band8_s26400000_e30900000.fj 9270627789:6320874:chr3_band9_s30900000_e32100000.fj 9276948663:22624820:chr4_band0_s0_e4500000.fj 9299573483:19209706:chr4_band10_s44600000_e48200000.fj 9318783189:6384513:chr4_band11_s48200000_e50400000.fj 9325167702:4766253:chr4_band12_s50400000_e52700000.fj 9329933955:35018116:chr4_band13_s52700000_e59500000.fj 9364952071:38549974:chr4_band14_s59500000_e66600000.fj 9403502045:20373460:chr4_band15_s66600000_e70500000.fj 9423875505:29919881:chr4_band16_s70500000_e76300000.fj 9453795386:13493480:chr4_band17_s76300000_e78900000.fj 9467288866:18466490:chr4_band18_s78900000_e82400000.fj 9485755356:8860418:chr4_band19_s82400000_e84100000.fj 9494615774:7798021:chr4_band1_s4500000_e6000000.fj 9502413795:14575657:chr4_band20_s84100000_e86900000.fj 9516989452:5634479:chr4_band21_s86900000_e88000000.fj 9522623931:29718269:chr4_band22_s88000000_e93700000.fj 9552342200:7383995:chr4_band23_s93700000_e95100000.fj 9559726195:19715177:chr4_band24_s95100000_e98800000.fj 9579441372:11922350:chr4_band25_s98800000_e101100000.fj 9591363722:34698356:chr4_band26_s101100000_e107700000.fj 9626062078:33645974:chr4_band27_s107700000_e114100000.fj 9659708052:35587370:chr4_band28_s114100000_e120800000.fj 9695295422:15811642:chr4_band29_s120800000_e123800000.fj 9711107064:27146461:chr4_band2_s6000000_e11300000.fj 9738253525:26736354:chr4_band30_s123800000_e128800000.fj 9764989879:12053649:chr4_band31_s128800000_e131100000.fj 9777043528:45621870:chr4_band32_s131100000_e139500000.fj 9822665398:10457142:chr4_band33_s139500000_e141500000.fj 9833122540:27183032:chr4_band34_s141500000_e146800000.fj 9860305572:8901657:chr4_band35_s146800000_e148500000.fj 9869207229:13650247:chr4_band36_s148500000_e151100000.fj 9882857476:23802908:chr4_band37_s151100000_e155600000.fj 9906660384:33300872:chr4_band38_s155600000_e161800000.fj 9939961256:14822270:chr4_band39_s161800000_e164500000.fj 9954783526:20780182:chr4_band3_s11300000_e15200000.fj 9975563708:29757577:chr4_band40_s164500000_e170100000.fj 10005321285:9439391:chr4_band41_s170100000_e171900000.fj 10014760676:23890991:chr4_band42_s171900000_e176300000.fj 10038651667:6504378:chr4_band43_s176300000_e177500000.fj 10045156045:31429424:chr4_band44_s177500000_e183200000.fj 10076585469:20867286:chr4_band45_s183200000_e187100000.fj 10097452755:21542259:chr4_band46_s187100000_e191154276.fj 10118995014:13635330:chr4_band4_s15200000_e17800000.fj 10132630344:18645443:chr4_band5_s17800000_e21300000.fj 10151275787:33763872:chr4_band6_s21300000_e27700000.fj 10185039659:43935944:chr4_band7_s27700000_e35800000.fj 10228975603:28173344:chr4_band8_s35800000_e41200000.fj 10257148947:17960379:chr4_band9_s41200000_e44600000.fj 10275109326:23869100:chr5_band0_s0_e4500000.fj 10298978426:21475297:chr5_band10_s38400000_e42500000.fj 10320453723:18293853:chr5_band11_s42500000_e46100000.fj 10338747576:1514377:chr5_band12_s46100000_e48400000.fj 10340261953:8509364:chr5_band13_s48400000_e50700000.fj 10348771317:43017890:chr5_band14_s50700000_e58900000.fj 10391789207:20665117:chr5_band15_s58900000_e62900000.fj 10412454324:1591467:chr5_band16_s62900000_e63200000.fj 10414045791:18148759:chr5_band17_s63200000_e66700000.fj 10432194550:8856200:chr5_band18_s66700000_e68400000.fj 10441050750:20707621:chr5_band19_s68400000_e73300000.fj 10461758371:9656450:chr5_band1_s4500000_e6300000.fj 10471414821:18425621:chr5_band20_s73300000_e76900000.fj 10489840442:23196332:chr5_band21_s76900000_e81400000.fj 10513036774:7300891:chr5_band22_s81400000_e82800000.fj 10520337665:49892537:chr5_band23_s82800000_e92300000.fj 10570230202:30721980:chr5_band24_s92300000_e98200000.fj 10600952182:23888340:chr5_band25_s98200000_e102800000.fj 10624840522:9229611:chr5_band26_s102800000_e104500000.fj 10634070133:27421753:chr5_band27_s104500000_e109600000.fj 10661491886:9899436:chr5_band28_s109600000_e111500000.fj 10671391322:8406659:chr5_band29_s111500000_e113100000.fj 10679797981:18694996:chr5_band2_s6300000_e9800000.fj 10698492977:11028527:chr5_band30_s113100000_e115200000.fj 10709521504:32909679:chr5_band31_s115200000_e121400000.fj 10742431183:30963436:chr5_band32_s121400000_e127300000.fj 10773394619:17266919:chr5_band33_s127300000_e130600000.fj 10790661538:28998009:chr5_band34_s130600000_e136200000.fj 10819659547:16704607:chr5_band35_s136200000_e139500000.fj 10836364154:26045175:chr5_band36_s139500000_e144500000.fj 10862409329:27918575:chr5_band37_s144500000_e149800000.fj 10890327904:15050054:chr5_band38_s149800000_e152700000.fj 10905377958:15603577:chr5_band39_s152700000_e155700000.fj 10920981535:27716393:chr5_band3_s9800000_e15000000.fj 10948697928:22019757:chr5_band40_s155700000_e159900000.fj 10970717685:45797643:chr5_band41_s159900000_e168500000.fj 11016515328:22514380:chr5_band42_s168500000_e172800000.fj 11039029708:19253951:chr5_band43_s172800000_e176600000.fj 11058283659:21229495:chr5_band44_s176600000_e180915260.fj 11079513154:17559372:chr5_band4_s15000000_e18400000.fj 11097072526:25526673:chr5_band5_s18400000_e23300000.fj 11122599199:7096070:chr5_band6_s23300000_e24600000.fj 11129695269:23411851:chr5_band7_s24600000_e28900000.fj 11153107120:26119054:chr5_band8_s28900000_e33800000.fj 11179226174:23290349:chr5_band9_s33800000_e38400000.fj 11202516523:11854057:chr6_band0_s0_e2300000.fj 11214370580:8496414:chr6_band10_s30400000_e32100000.fj 11222866994:7220728:chr6_band11_s32100000_e33500000.fj 11230087722:15866348:chr6_band12_s33500000_e36600000.fj 11245954070:20565771:chr6_band13_s36600000_e40500000.fj 11266519841:29696078:chr6_band14_s40500000_e46200000.fj 11296215919:29661980:chr6_band15_s46200000_e51800000.fj 11325877899:5687860:chr6_band16_s51800000_e52900000.fj 11331565759:21802934:chr6_band17_s52900000_e57000000.fj 11353368693:8125890:chr6_band18_s57000000_e58700000.fj 11361494583:345265:chr6_band19_s58700000_e61000000.fj 11361839848:10003929:chr6_band1_s2300000_e4200000.fj 11371843777:8997133:chr6_band20_s61000000_e63300000.fj 11380840910:550060:chr6_band21_s63300000_e63400000.fj 11381390970:35514558:chr6_band22_s63400000_e70000000.fj 11416905528:30770003:chr6_band23_s70000000_e75900000.fj 11447675531:41661599:chr6_band24_s75900000_e83900000.fj 11489337130:5032680:chr6_band25_s83900000_e84900000.fj 11494369810:15730167:chr6_band26_s84900000_e88000000.fj 11510099977:26698981:chr6_band27_s88000000_e93100000.fj 11536798958:33870086:chr6_band28_s93100000_e99500000.fj 11570669044:5783371:chr6_band29_s99500000_e100600000.fj 11576452415:15201350:chr6_band2_s4200000_e7100000.fj 11591653765:26318508:chr6_band30_s100600000_e105500000.fj 11617972273:47367411:chr6_band31_s105500000_e114600000.fj 11665339684:19419515:chr6_band32_s114600000_e118300000.fj 11684759199:1079105:chr6_band33_s118300000_e118500000.fj 11685838304:40594325:chr6_band34_s118500000_e126100000.fj 11726432629:5183249:chr6_band35_s126100000_e127100000.fj 11731615878:17064012:chr6_band36_s127100000_e130300000.fj 11748679890:4703673:chr6_band37_s130300000_e131200000.fj 11753383563:20937849:chr6_band38_s131200000_e135200000.fj 11774321412:19768577:chr6_band39_s135200000_e139000000.fj 11794089989:18520799:chr6_band3_s7100000_e10600000.fj 11812610788:20084958:chr6_band40_s139000000_e142800000.fj 11832695746:14583555:chr6_band41_s142800000_e145600000.fj 11847279301:17888235:chr6_band42_s145600000_e149000000.fj 11865167536:18200150:chr6_band43_s149000000_e152500000.fj 11883367686:15899684:chr6_band44_s152500000_e155500000.fj 11899267370:28588964:chr6_band45_s155500000_e161000000.fj 11927856334:18688807:chr6_band46_s161000000_e164500000.fj 11946545141:34299518:chr6_band47_s164500000_e171115067.fj 11980844659:5187494:chr6_band4_s10600000_e11600000.fj 11986032153:9550459:chr6_band5_s11600000_e13400000.fj 11995582612:9425852:chr6_band6_s13400000_e15200000.fj 12005008464:52257569:chr6_band7_s15200000_e25200000.fj 12057266033:8929925:chr6_band8_s25200000_e27000000.fj 12066195958:17556391:chr6_band9_s27000000_e30400000.fj 12083752349:14247713:chr7_band0_s0_e2800000.fj 12098000062:11066306:chr7_band10_s35000000_e37200000.fj 12109066368:32087088:chr7_band11_s37200000_e43300000.fj 12141153456:10668222:chr7_band12_s43300000_e45400000.fj 12151821678:18626376:chr7_band13_s45400000_e49000000.fj 12170448054:7958919:chr7_band14_s49000000_e50500000.fj 12178406973:18713509:chr7_band15_s50500000_e54000000.fj 12197120482:18935900:chr7_band16_s54000000_e58000000.fj 12216056382:261032:chr7_band17_s58000000_e59900000.fj 12216317414:4289180:chr7_band18_s59900000_e61700000.fj 12220606594:22817045:chr7_band19_s61700000_e67000000.fj 12243423639:8953071:chr7_band1_s2800000_e4500000.fj 12252376710:26475183:chr7_band20_s67000000_e72200000.fj 12278851893:21260557:chr7_band21_s72200000_e77500000.fj 12300112450:47850592:chr7_band22_s77500000_e86400000.fj 12347963042:9284520:chr7_band23_s86400000_e88200000.fj 12357247562:15247848:chr7_band24_s88200000_e91100000.fj 12372495410:8580818:chr7_band25_s91100000_e92800000.fj 12381076228:26810427:chr7_band26_s92800000_e98000000.fj 12407886655:27671122:chr7_band27_s98000000_e103800000.fj 12435557777:3665630:chr7_band28_s103800000_e104500000.fj 12439223407:14940321:chr7_band29_s104500000_e107400000.fj 12454163728:12957633:chr7_band2_s4500000_e7300000.fj 12467121361:38098753:chr7_band30_s107400000_e114600000.fj 12505220114:14874016:chr7_band31_s114600000_e117400000.fj 12520094130:19901201:chr7_band32_s117400000_e121100000.fj 12539995331:14314479:chr7_band33_s121100000_e123800000.fj 12554309810:17691683:chr7_band34_s123800000_e127100000.fj 12572001493:10632852:chr7_band35_s127100000_e129200000.fj 12582634345:5806847:chr7_band36_s129200000_e130400000.fj 12588441192:11677084:chr7_band37_s130400000_e132600000.fj 12600118276:29615252:chr7_band38_s132600000_e138200000.fj 12629733528:24913008:chr7_band39_s138200000_e143100000.fj 12654646536:35077014:chr7_band3_s7300000_e13800000.fj 12689723550:23967238:chr7_band40_s143100000_e147900000.fj 12713690788:23454742:chr7_band41_s147900000_e152600000.fj 12737145530:12478502:chr7_band42_s152600000_e155100000.fj 12749624032:21624407:chr7_band43_s155100000_e159138663.fj 12771248439:14556948:chr7_band4_s13800000_e16500000.fj 12785805387:23503963:chr7_band5_s16500000_e20900000.fj 12809309350:23959972:chr7_band6_s20900000_e25500000.fj 12833269322:13023330:chr7_band7_s25500000_e28000000.fj 12846292652:4304022:chr7_band8_s28000000_e28800000.fj 12850596674:32133100:chr7_band9_s28800000_e35000000.fj 12882729774:11110146:chr8_band0_s0_e2200000.fj 12893839920:17322291:chr8_band10_s39700000_e43100000.fj 12911162211:3523440:chr8_band11_s43100000_e45600000.fj 12914685651:8688009:chr8_band12_s45600000_e48100000.fj 12923373660:21279449:chr8_band13_s48100000_e52200000.fj 12944653109:2084644:chr8_band14_s52200000_e52600000.fj 12946737753:14865421:chr8_band15_s52600000_e55500000.fj 12961603174:31742008:chr8_band16_s55500000_e61600000.fj 12993345182:3139055:chr8_band17_s61600000_e62200000.fj 12996484237:20025220:chr8_band18_s62200000_e66000000.fj 13016509457:10184650:chr8_band19_s66000000_e68000000.fj 13026694107:22530516:chr8_band1_s2200000_e6200000.fj 13049224623:13062826:chr8_band20_s68000000_e70500000.fj 13062287449:17612880:chr8_band21_s70500000_e73900000.fj 13079900329:23191965:chr8_band22_s73900000_e78300000.fj 13103092294:9532882:chr8_band23_s78300000_e80100000.fj 13112625176:23483810:chr8_band24_s80100000_e84600000.fj 13136108986:11316441:chr8_band25_s84600000_e86900000.fj 13147425427:33406608:chr8_band26_s86900000_e93300000.fj 13180832035:29555310:chr8_band27_s93300000_e99000000.fj 13210387345:13315728:chr8_band28_s99000000_e101600000.fj 13223703073:24175332:chr8_band29_s101600000_e106200000.fj 13247878405:30528779:chr8_band2_s6200000_e12700000.fj 13278407184:22764188:chr8_band30_s106200000_e110500000.fj 13301171372:8501850:chr8_band31_s110500000_e112100000.fj 13309673222:30459907:chr8_band32_s112100000_e117700000.fj 13340133129:7937153:chr8_band33_s117700000_e119200000.fj 13348070282:17300095:chr8_band34_s119200000_e122500000.fj 13365370377:25178211:chr8_band35_s122500000_e127300000.fj 13390548588:22053170:chr8_band36_s127300000_e131500000.fj 13412601758:26170716:chr8_band37_s131500000_e136400000.fj 13438772474:18915984:chr8_band38_s136400000_e139900000.fj 13457688458:33011109:chr8_band39_s139900000_e146364022.fj 13490699567:34615593:chr8_band3_s12700000_e19000000.fj 13525315160:22721686:chr8_band4_s19000000_e23300000.fj 13548036846:21858716:chr8_band5_s23300000_e27400000.fj 13569895562:7279298:chr8_band6_s27400000_e28800000.fj 13577174860:40036264:chr8_band7_s28800000_e36500000.fj 13617211124:9223086:chr8_band8_s36500000_e38300000.fj 13626434210:7285487:chr8_band9_s38300000_e39700000.fj 13633719697:11189873:chr9_band0_s0_e2200000.fj 13644909570:15398551:chr9_band10_s33200000_e36300000.fj 13660308121:10859291:chr9_band11_s36300000_e38400000.fj 13671167412:8098913:chr9_band12_s38400000_e41000000.fj 13679266325:7680539:chr9_band13_s41000000_e43600000.fj 13686946864:11204600:chr9_band14_s43600000_e47300000.fj 13698151464:54388:chr9_band15_s47300000_e49000000.fj 13698205852:3468752:chr9_band16_s49000000_e50700000.fj 13701674604:31370255:chr9_band17_s50700000_e65900000.fj 13733044859:8507254:chr9_band18_s65900000_e68700000.fj 13741552113:13175993:chr9_band19_s68700000_e72200000.fj 13754728106:12788834:chr9_band1_s2200000_e4600000.fj 13767516940:9460838:chr9_band20_s72200000_e74000000.fj 13776977778:27323515:chr9_band21_s74000000_e79200000.fj 13804301293:9840418:chr9_band22_s79200000_e81100000.fj 13814141711:15804284:chr9_band23_s81100000_e84100000.fj 13829945995:14087052:chr9_band24_s84100000_e86900000.fj 13844033047:17922209:chr9_band25_s86900000_e90400000.fj 13861955256:6951532:chr9_band26_s90400000_e91800000.fj 13868906788:10179374:chr9_band27_s91800000_e93900000.fj 13879086162:13769787:chr9_band28_s93900000_e96600000.fj 13892855949:13598284:chr9_band29_s96600000_e99300000.fj 13906454233:22891146:chr9_band2_s4600000_e9000000.fj 13929345379:16625559:chr9_band30_s99300000_e102600000.fj 13945970938:29592637:chr9_band31_s102600000_e108200000.fj 13975563575:16101070:chr9_band32_s108200000_e111300000.fj 13991664645:18810443:chr9_band33_s111300000_e114900000.fj 14010475088:14696084:chr9_band34_s114900000_e117700000.fj 14025171172:25738558:chr9_band35_s117700000_e122500000.fj 14050909730:17151669:chr9_band36_s122500000_e125800000.fj 14068061399:23282243:chr9_band37_s125800000_e130300000.fj 14091343642:15933520:chr9_band38_s130300000_e133500000.fj 14107277162:2539339:chr9_band39_s133500000_e134000000.fj 14109816501:28353605:chr9_band3_s9000000_e14200000.fj 14138170106:9895904:chr9_band40_s134000000_e135900000.fj 14148066010:7847526:chr9_band41_s135900000_e137400000.fj 14155913536:19420968:chr9_band42_s137400000_e141213431.fj 14175334504:12634378:chr9_band4_s14200000_e16600000.fj 14187968882:10157396:chr9_band5_s16600000_e18500000.fj 14198126278:7156443:chr9_band6_s18500000_e19900000.fj 14205282721:29952199:chr9_band7_s19900000_e25600000.fj 14235234920:12640911:chr9_band8_s25600000_e28000000.fj 14247875831:28005247:chr9_band9_s28000000_e33200000.fj 14275881078:61284:chrM_band0_s0_e16571.fj 14275942362:14544713:chrX_band0_s0_e4300000.fj 14290487075:24455427:chrX_band10_s37600000_e42400000.fj 14314942502:19979357:chrX_band11_s42400000_e46400000.fj 14334921859:15780934:chrX_band12_s46400000_e49800000.fj 14350702793:22068346:chrX_band13_s49800000_e54800000.fj 14372771139:15483950:chrX_band14_s54800000_e58100000.fj 14388255089:2245852:chrX_band15_s58100000_e60600000.fj 14390500941:8206011:chrX_band16_s60600000_e63000000.fj 14398706952:7456077:chrX_band17_s63000000_e64600000.fj 14406163029:15544112:chrX_band18_s64600000_e67800000.fj 14421707141:19435511:chrX_band19_s67800000_e71800000.fj 14441142652:8840055:chrX_band1_s4300000_e6000000.fj 14449982707:9325419:chrX_band20_s71800000_e73900000.fj 14459308126:9811498:chrX_band21_s73900000_e76000000.fj 14469119624:42415433:chrX_band22_s76000000_e84600000.fj 14511535057:8048575:chrX_band23_s84600000_e86200000.fj 14519583632:22647045:chrX_band24_s86200000_e91800000.fj 14542230677:7507452:chrX_band25_s91800000_e93500000.fj 14549738129:24532176:chrX_band26_s93500000_e98300000.fj 14574270305:20702445:chrX_band27_s98300000_e102600000.fj 14594972750:5393310:chrX_band28_s102600000_e103700000.fj 14600366060:25038697:chrX_band29_s103700000_e108700000.fj 14625404757:17528792:chrX_band2_s6000000_e9500000.fj 14642933549:39123936:chrX_band30_s108700000_e116500000.fj 14682057485:21530282:chrX_band31_s116500000_e120900000.fj 14703587767:39763257:chrX_band32_s120900000_e128700000.fj 14743351024:8637631:chrX_band33_s128700000_e130400000.fj 14751988655:16073438:chrX_band34_s130400000_e133600000.fj 14768062093:21768801:chrX_band35_s133600000_e138000000.fj 14789830894:11318859:chrX_band36_s138000000_e140300000.fj 14801149753:8828151:chrX_band37_s140300000_e142100000.fj
diff --git a/sdk/go/manifest/testdata/short_manifest b/sdk/go/manifest/testdata/short_manifest
new file mode 100644 (file)
index 0000000..e8a0e43
--- /dev/null
@@ -0,0 +1 @@
+. b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c 0:15693477:chr10_band0_s0_e3000000.fj
diff --git a/sdk/go/stats/duration.go b/sdk/go/stats/duration.go
new file mode 100644 (file)
index 0000000..cf91726
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package stats
+
+import (
+       "fmt"
+       "strconv"
+       "time"
+)
+
+// Duration is a duration that is displayed as a number of seconds in
+// fixed-point notation.
+type Duration time.Duration
+
+// MarshalJSON implements json.Marshaler.
+func (d Duration) MarshalJSON() ([]byte, error) {
+       return []byte(d.String()), nil
+}
+
+// String implements fmt.Stringer.
+func (d Duration) String() string {
+       return fmt.Sprintf("%.6f", time.Duration(d).Seconds())
+}
+
+// UnmarshalJSON implements json.Unmarshaler
+func (d *Duration) UnmarshalJSON(data []byte) error {
+       return d.Set(string(data))
+}
+
+// Value implements flag.Value
+func (d *Duration) Set(s string) error {
+       sec, err := strconv.ParseFloat(s, 64)
+       if err == nil {
+               *d = Duration(sec * float64(time.Second))
+       }
+       return err
+}
diff --git a/sdk/go/stats/duration_test.go b/sdk/go/stats/duration_test.go
new file mode 100644 (file)
index 0000000..d9fe9c5
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package stats
+
+import (
+       "testing"
+       "time"
+)
+
+func TestString(t *testing.T) {
+       d := Duration(123123123123 * time.Nanosecond)
+       if s, expect := d.String(), "123.123123"; s != expect {
+               t.Errorf("got %s, expect %s", s, expect)
+       }
+}
+
+func TestSet(t *testing.T) {
+       var d Duration
+       if err := d.Set("123.456"); err != nil {
+               t.Fatal(err)
+       }
+       if got, expect := time.Duration(d).Nanoseconds(), int64(123456000000); got != expect {
+               t.Errorf("got %d, expect %d", got, expect)
+       }
+}
diff --git a/sdk/java-v2/.gitignore b/sdk/java-v2/.gitignore
new file mode 100644 (file)
index 0000000..c928081
--- /dev/null
@@ -0,0 +1,9 @@
+/.gradle/
+/bin/
+/build/
+.project
+.classpath
+/.settings/
+.DS_Store
+/.idea/
+/out/
diff --git a/sdk/java-v2/.licenseignore b/sdk/java-v2/.licenseignore
new file mode 100644 (file)
index 0000000..ecee9c7
--- /dev/null
@@ -0,0 +1,4 @@
+.licenseignore
+agpl-3.0.txt
+apache-2.0.txt
+COPYING
\ No newline at end of file
diff --git a/sdk/java-v2/COPYING b/sdk/java-v2/COPYING
new file mode 100644 (file)
index 0000000..27d8c81
--- /dev/null
@@ -0,0 +1,15 @@
+Unless indicated otherwise in the header of the file, the files in this
+repository are dual-licensed AGPL-3.0 and Apache-2.0
+
+Individual files contain an SPDX tag that indicates the license for the file.
+dual-licensed files use the following tag:
+
+    SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are available here: http://spdx.org/licenses/
+
+The full license text for each license is available in this directory:
+
+  AGPL-3.0:     agpl-3.0.txt
+  Apache-2.0:   apache-2.0.txt
diff --git a/sdk/java-v2/README.md b/sdk/java-v2/README.md
new file mode 100644 (file)
index 0000000..ca5aef9
--- /dev/null
@@ -0,0 +1,115 @@
+```
+Copyright (C) The Arvados Authors. All rights reserved.
+SPDX-License-Identifier: CC-BY-SA-3.0
+```
+
+# Arvados Java SDK
+
+##### About
+Arvados Java Client allows to access Arvados servers and uses two APIs:
+* lower level [Keep Server API](https://doc.arvados.org/api/index.html)
+* higher level [Keep-Web API](https://godoc.org/github.com/curoverse/arvados/services/keep-web) (when needed)
+
+##### Required Java version
+This SDK requires Java 8+
+
+##### Logging
+
+SLF4J is used for logging. Concrete logging framework and configuration must be provided by a client.
+
+##### Configuration
+
+[TypeSafe Configuration](https://github.com/lightbend/config) is used for configuring this library.
+
+Please, have a look at java/resources/reference.conf for default values provided with this library.
+
+* **keepweb-host** - change to host of your Keep-Web installation
+* **keepweb-port** - change to port of your Keep-Web installation
+* **host** - change to host of your Arvados installation
+* **port** - change to port of your Arvados installation
+* **token** - authenticates registered user, one must provide
+  [token obtained from Arvados Workbench](https://doc.arvados.org/user/reference/api-tokens.html)
+* **protocol** - don't change to unless really needed
+* **host-insecure** - insecure communication with Arvados (ignores SSL certificate verification), 
+  don't change to *true* unless really needed
+* **split-size** - size of chunk files in megabytes
+* **temp-dir** - temporary chunk files storage
+* **copies** - amount of chunk files duplicates per Keep server
+* **retries** - in case of chunk files send failure this should allow to repeat send 
+  (*NOTE*: this parameter is not used at the moment but was left for future improvements)
+
+In order to override default settings one can create application.conf file in an application.
+Example: src/test/resources/application.conf.
+
+Alternatively ExternalConfigProvider class can be used to pass configuration via code. 
+ExternalConfigProvider comes with a builder and all of the above values must be provided in order for it to work properly.
+
+ArvadosFacade has two constructors, one without arguments that uses values from reference.conf and second one 
+taking ExternalConfigProvider as an argument.
+
+##### API clients
+
+All API clients inherit from BaseStandardApiClient. This class contains implementation of all 
+common methods as described in http://doc.arvados.org/api/methods.html.
+
+Parameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example:
+
+```java
+String uuid = "ardev-4zz18-rxcql7qwyakg1r1";
+
+Collection actual = client.get(uuid);
+```
+
+```java
+ListArgument listArgument = ListArgument.builder()
+        .filters(Arrays.asList(
+                Filter.of("owner_uuid", Operator.LIKE, "ardev%"),
+                Filter.of("name", Operator.LIKE, "Super%"),
+                Filter.of("portable_data_hash", Operator.IN, Lists.newArrayList("54f6d9f59065d3c009d4306660989379+65")
+            )))
+        .build();
+
+CollectionList actual = client.list(listArgument);
+```
+
+Non-standard API clients must inherit from BaseApiClient. 
+For example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods.
+
+##### Business logic
+
+More advanced API data handling could be implemented as *Facade* classes. 
+In current version functionalities provided by SDK are handled by *ArvadosFacade*.
+They include:
+* **downloading single file from collection** - using Keep-Web
+* **downloading whole collection** - using Keep-Web or Keep Server API
+* **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details
+* **uploading single file** - to either new or existing collection
+* **uploading list of files** - to either new or existing collection
+* **creating an empty collection**
+* **getting current user info**
+* **listing current user's collections**
+* **creating new project**
+* **deleting certain collection**
+
+##### Note regarding Keep-Web
+
+Current version requires both Keep Web and standard Keep Server API configured in order to use Keep-Web functionalities.
+
+##### Integration tests
+
+In order to run integration tests all fields within following configuration file must be provided: 
+```java
+src/test/resources/integration-test-appliation.conf 
+```
+Parameter **integration-tests.project-uuid** should contain UUID of one project available to user,
+whose token was provided within configuration file. 
+
+Integration tests require connection to real Arvados server.
+
+##### Note regarding file naming
+
+While uploading via this SDK all uploaded files within single collection must have different names.
+This applies also to uploading files to already existing collection. 
+Renaming files with duplicate names is not implemented in current version.
+
diff --git a/sdk/java-v2/agpl-3.0.txt b/sdk/java-v2/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/sdk/java-v2/apache-2.0.txt b/sdk/java-v2/apache-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
similarity index 100%
rename from build.gradle
rename to sdk/java-v2/build.gradle
similarity index 100%
rename from gradlew
rename to sdk/java-v2/gradlew
similarity index 100%
rename from gradlew.bat
rename to sdk/java-v2/gradlew.bat
similarity index 100%
rename from settings.gradle
rename to sdk/java-v2/settings.gradle
diff --git a/sdk/java/.classpath b/sdk/java/.classpath
new file mode 100644 (file)
index 0000000..27d14a1
--- /dev/null
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+       <classpathentry including="**/*.java" kind="src" output="target/test-classes" path="src/test/java"/>
+       <classpathentry including="**/*.java" kind="src" path="src/main/java"/>
+       <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/apis/google-api-services-discovery/v1-rev42-1.18.0-rc/google-api-services-discovery-v1-rev42-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/api-client/google-api-client/1.18.0-rc/google-api-client-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client/1.18.0-rc/google-http-client-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar"/>
+       <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.0.1/httpclient-4.0.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpcore/4.0.1/httpcore-4.0.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/commons-codec/commons-codec/1.3/commons-codec-1.3.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client-jackson2/1.18.0-rc/google-http-client-jackson2-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/fasterxml/jackson/core/jackson-core/2.1.3/jackson-core-2.1.3.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/guava/guava/r05/guava-r05.jar"/>
+       <classpathentry kind="var" path="M2_REPO/log4j/log4j/1.2.16/log4j-1.2.16.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/googlecode/json-simple/json-simple/1.1.1/json-simple-1.1.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/junit/junit/4.8.1/junit-4.8.1.jar"/>
+       <classpathentry kind="output" path="target/classes"/>
+</classpath>
diff --git a/sdk/java/.project b/sdk/java/.project
new file mode 100644 (file)
index 0000000..40c2bdf
--- /dev/null
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+  <name>java</name>
+  <comment>NO_M2ECLIPSE_SUPPORT: Project files created with the maven-eclipse-plugin are not supported in M2Eclipse.</comment>
+  <projects/>
+  <buildSpec>
+    <buildCommand>
+      <name>org.eclipse.jdt.core.javabuilder</name>
+    </buildCommand>
+  </buildSpec>
+  <natures>
+    <nature>org.eclipse.jdt.core.javanature</nature>
+  </natures>
+</projectDescription>
\ No newline at end of file
diff --git a/sdk/java/.settings/org.eclipse.jdt.core.prefs b/sdk/java/.settings/org.eclipse.jdt.core.prefs
new file mode 100644 (file)
index 0000000..f4f19ea
--- /dev/null
@@ -0,0 +1,5 @@
+#Mon Apr 28 10:33:40 EDT 2014
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
diff --git a/sdk/java/ArvadosSDKJavaExample.java b/sdk/java/ArvadosSDKJavaExample.java
new file mode 100644 (file)
index 0000000..ded6ce9
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+/**
+ * This Sample test program is useful in getting started with working with Arvados Java SDK.
+ * @author radhika
+ *
+ */
+
+import org.arvados.sdk.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+public class ArvadosSDKJavaExample {
+  /** Make sure the following environment variables are set before using Arvados:
+   *      ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE 
+   *      Set ARVADOS_API_HOST_INSECURE to true if you are using self-singed
+   *      certificates in development and want to bypass certificate validations.
+   *
+   *  If you are not using env variables, you can pass them to Arvados constructor.
+   *
+   *  Please refer to http://doc.arvados.org/api/index.html for a complete list
+   *      of the available API methods.
+   */
+  public static void main(String[] args) throws Exception {
+    String apiName = "arvados";
+    String apiVersion = "v1";
+
+    Arvados arv = new Arvados(apiName, apiVersion);
+
+    // Make a users list call. Here list on users is the method being invoked.
+    // Expect a Map containing the list of users as the response.
+    System.out.println("Making an arvados users.list api call");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    System.out.println("Arvados users.list:\n");
+    printResponse(response);
+    
+    // get uuid of the first user from the response
+    List items = (List)response.get("items");
+
+    Map firstUser = (Map)items.get(0);
+    String userUuid = (String)firstUser.get("uuid");
+    
+    // Make a users get call on the uuid obtained above
+    System.out.println("\n\n\nMaking a users.get call for " + userUuid);
+    params = new HashMap<String, Object>();
+    params.put("uuid", userUuid);
+    response = arv.call("users", "get", params);
+    System.out.println("Arvados users.get:\n");
+    printResponse(response);
+
+    // Make a pipeline_templates list call
+    System.out.println("\n\n\nMaking a pipeline_templates.list call.");
+
+    params = new HashMap<String, Object>();
+    response = arv.call("pipeline_templates", "list", params);
+
+    System.out.println("Arvados pipelinetempates.list:\n");
+    printResponse(response);
+  }
+  
+  private static void printResponse(Map response){
+    Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+    for (Map.Entry<String, Object> entry : entrySet) {
+      if ("items".equals(entry.getKey())) {
+        List items = (List)entry.getValue();
+        for (Object item : items) {
+          System.out.println("    " + item);
+        }            
+      } else {
+        System.out.println(entry.getKey() + " = " + entry.getValue());
+      }
+    }
+  }
+}
diff --git a/sdk/java/ArvadosSDKJavaExampleWithPrompt.java b/sdk/java/ArvadosSDKJavaExampleWithPrompt.java
new file mode 100644 (file)
index 0000000..d9608ed
--- /dev/null
@@ -0,0 +1,127 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+/**
+ * This Sample test program is useful in getting started with using Arvados Java SDK.
+ * This program creates an Arvados instance using the configured environment variables.
+ * It then provides a prompt to input method name and input parameters. 
+ * The program them invokes the API server to execute the specified method.  
+ * 
+ * @author radhika
+ */
+
+import org.arvados.sdk.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+
+public class ArvadosSDKJavaExampleWithPrompt {
+  /**
+   * Make sure the following environment variables are set before using Arvados:
+   * ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE Set
+   * ARVADOS_API_HOST_INSECURE to true if you are using self-singed certificates
+   * in development and want to bypass certificate validations.
+   * 
+   * Please refer to http://doc.arvados.org/api/index.html for a complete list
+   * of the available API methods.
+   */
+  public static void main(String[] args) throws Exception {
+    String apiName = "arvados";
+    String apiVersion = "v1";
+
+    System.out.print("Welcome to Arvados Java SDK.");
+    System.out.println("\nYou can use this example to call API methods interactively.");
+    System.out.println("\nPlease refer to http://doc.arvados.org/api/index.html for api documentation");
+    System.out.println("\nTo make the calls, enter input data at the prompt.");
+    System.out.println("When entering parameters, you may enter a simple string or a well-formed json.");
+    System.out.println("For example to get a user you may enter:  user, zzzzz-12345-67890");
+    System.out.println("Or to filter links, you may enter:  filters, [[ \"name\", \"=\", \"can_manage\"]]");
+
+    System.out.println("\nEnter ^C when you want to quit");
+
+    // use configured env variables for API TOKEN, HOST and HOST_INSECURE
+    Arvados arv = new Arvados(apiName, apiVersion);
+
+    while (true) {
+      try {
+        // prompt for resource
+        System.out.println("\n\nEnter Resource name (for example users)");
+        System.out.println("\nAvailable resources are: " + arv.getAvailableResourses());
+        System.out.print("\n>>> ");
+
+        // read resource name
+        BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+        String resourceName = in.readLine().trim();
+        if ("".equals(resourceName)) {
+          throw (new Exception("No resource name entered"));
+        }
+        // read method name
+        System.out.println("\nEnter method name (for example get)");
+        System.out.println("\nAvailable methods are: " + arv.getAvailableMethodsForResourse(resourceName));
+        System.out.print("\n>>> ");
+        String methodName = in.readLine().trim();
+        if ("".equals(methodName)) {
+          throw (new Exception("No method name entered"));
+        }
+
+        // read method parameters
+        System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+        System.out.println("\nAvailable parameters are: " + 
+              arv.getAvailableParametersForMethod(resourceName, methodName));
+        
+        System.out.print("\n>>> ");
+        Map paramsMap = new HashMap();
+        String param = "";
+        try {
+          do {
+            param = in.readLine();
+            if (param.isEmpty())
+              break;
+            int index = param.indexOf(","); // first comma
+            String paramName = param.substring(0, index);
+            String paramValue = param.substring(index+1);
+            paramsMap.put(paramName.trim(), paramValue.trim());
+
+            System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+            System.out.print("\n>>> ");
+          } while (!param.isEmpty());
+        } catch (Exception e) {
+          System.out.println (e.getMessage());
+          System.out.println ("\nSet up a new call");
+          continue;
+        }
+
+        // Make a "call" for the given resource name and method name
+        try {
+          System.out.println ("Making a call for " + resourceName + " " + methodName);
+          Map response = arv.call(resourceName, methodName, paramsMap);
+
+          Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+          for (Map.Entry<String, Object> entry : entrySet) {
+            if ("items".equals(entry.getKey())) {
+              List items = (List)entry.getValue();
+              for (Object item : items) {
+                System.out.println("    " + item);
+              }            
+            } else {
+              System.out.println(entry.getKey() + " = " + entry.getValue());
+            }
+          }
+        } catch (Exception e){
+          System.out.println (e.getMessage());
+          System.out.println ("\nSet up a new call");
+        }
+      } catch (Exception e) {
+        System.out.println (e.getMessage());
+        System.out.println ("\nSet up a new call");
+      }
+    }
+  }
+}
diff --git a/sdk/java/README b/sdk/java/README
new file mode 100644 (file)
index 0000000..0933b88
--- /dev/null
@@ -0,0 +1,4 @@
+Welcome to Arvados Java SDK.
+
+Please refer to http://doc.arvados.org/sdk/java/index.html to get started
+    with Arvados Java SDK.
diff --git a/sdk/java/pom.xml b/sdk/java/pom.xml
new file mode 100644 (file)
index 0000000..13e1c6a
--- /dev/null
@@ -0,0 +1,106 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.arvados.sdk</groupId>
+  <artifactId>arvados</artifactId>
+  <packaging>jar</packaging>
+  <version>1.1</version>
+  <name>arvados-sdk</name>
+  <url>http://arvados.org</url>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.apis</groupId>
+      <artifactId>google-api-services-discovery</artifactId>
+      <version>v1-rev42-1.18.0-rc</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.api-client</groupId>
+      <artifactId>google-api-client</artifactId>
+      <version>1.18.0-rc</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.http-client</groupId>
+      <artifactId>google-http-client-jackson2</artifactId>
+      <version>1.18.0-rc</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>r05</version>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>1.2.16</version>
+    </dependency>
+    <dependency>
+      <groupId>com.googlecode.json-simple</groupId>
+      <artifactId>json-simple</artifactId>
+      <version>1.1.1</version>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.8.1</version>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <finalName>arvados-sdk-1.1</finalName>
+
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>1.6</source>
+          <target>1.6</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>attached</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <descriptorRefs>
+                <descriptorRef>jar-with-dependencies</descriptorRef>
+              </descriptorRefs>
+              <archive>
+                <manifest>
+                  <mainClass>org.arvados.sdk.Arvados</mainClass>
+                </manifest>
+                <manifestEntries>
+                  <!--<Premain-Class>Your.agent.class</Premain-Class> <Agent-Class>Your.agent.class</Agent-Class> -->
+                  <Can-Redefine-Classes>true</Can-Redefine-Classes>
+                  <Can-Retransform-Classes>true</Can-Retransform-Classes>
+                </manifestEntries>
+              </archive>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+    <resources>
+      <resource>
+        <directory>src/main/resources</directory>
+        <targetPath>${basedir}/target/classes</targetPath>
+        <includes>
+          <include>log4j.properties</include>
+        </includes>
+        <filtering>true</filtering>
+      </resource>
+      <resource>
+        <directory>src/test/resources</directory>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
+  </build>
+</project>
diff --git a/sdk/java/src/main/java/org/arvados/sdk/Arvados.java b/sdk/java/src/main/java/org/arvados/sdk/Arvados.java
new file mode 100644 (file)
index 0000000..2b8bbee
--- /dev/null
@@ -0,0 +1,465 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package org.arvados.sdk;
+
+import com.google.api.client.http.javanet.*;
+import com.google.api.client.http.ByteArrayContent;
+import com.google.api.client.http.GenericUrl;
+import com.google.api.client.http.HttpBackOffIOExceptionHandler;
+import com.google.api.client.http.HttpContent;
+import com.google.api.client.http.HttpRequest;
+import com.google.api.client.http.HttpRequestFactory;
+import com.google.api.client.http.HttpTransport;
+import com.google.api.client.http.UriTemplate;
+import com.google.api.client.json.JsonFactory;
+import com.google.api.client.json.jackson2.JacksonFactory;
+import com.google.api.client.util.ExponentialBackOff;
+import com.google.api.client.util.Maps;
+import com.google.api.services.discovery.Discovery;
+import com.google.api.services.discovery.model.JsonSchema;
+import com.google.api.services.discovery.model.RestDescription;
+import com.google.api.services.discovery.model.RestMethod;
+import com.google.api.services.discovery.model.RestMethod.Request;
+import com.google.api.services.discovery.model.RestResource;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+/**
+ * This class provides a java SDK interface to Arvados API server.
+ *
+ * Please refer to http://doc.arvados.org/api/ to learn about the
+ *  various resources and methods exposed by the API server.
+ *
+ * @author radhika
+ */
+public class Arvados {
+  // HttpTransport and JsonFactory are thread-safe. So, use global instances.
+  private HttpTransport httpTransport;
+  private final JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
+
+  private String arvadosApiToken;
+  private String arvadosApiHost;
+  private boolean arvadosApiHostInsecure;
+
+  private String arvadosRootUrl;
+
+  private static final Logger logger = Logger.getLogger(Arvados.class);
+
+  // Get it once and reuse on the call requests
+  RestDescription restDescription = null;
+  String apiName = null;
+  String apiVersion = null;
+
+  public Arvados (String apiName, String apiVersion) throws Exception {
+    this (apiName, apiVersion, null, null, null);
+  }
+
+  public Arvados (String apiName, String apiVersion, String token,
+      String host, String hostInsecure) throws Exception {
+    this.apiName = apiName;
+    this.apiVersion = apiVersion;
+
+    // Read needed environmental variables if they are not passed
+    if (token != null) {
+      arvadosApiToken = token;
+    } else {
+      arvadosApiToken = System.getenv().get("ARVADOS_API_TOKEN");
+      if (arvadosApiToken == null) {
+        throw new Exception("Missing environment variable: ARVADOS_API_TOKEN");
+      }
+    }
+
+    if (host != null) {
+      arvadosApiHost = host;
+    } else {
+      arvadosApiHost = System.getenv().get("ARVADOS_API_HOST");
+      if (arvadosApiHost == null) {
+        throw new Exception("Missing environment variable: ARVADOS_API_HOST");
+      }
+    }
+    arvadosRootUrl = "https://" + arvadosApiHost;
+
+    if (hostInsecure != null) {
+      arvadosApiHostInsecure = Boolean.valueOf(hostInsecure);
+    } else {
+      arvadosApiHostInsecure =
+          "true".equals(System.getenv().get("ARVADOS_API_HOST_INSECURE")) ? true : false;
+    }
+
+    // Create HTTP_TRANSPORT object
+    NetHttpTransport.Builder builder = new NetHttpTransport.Builder();
+    if (arvadosApiHostInsecure) {
+      builder.doNotValidateCertificate();
+    }
+    httpTransport = builder.build();
+
+    // initialize rest description
+    restDescription = loadArvadosApi();
+  }
+
+  /**
+   * Make a call to API server with the provide call information.
+   * @param resourceName
+   * @param methodName
+   * @param paramsMap
+   * @return Map
+   * @throws Exception
+   */
+  public Map call(String resourceName, String methodName,
+      Map<String, Object> paramsMap) throws Exception {
+    RestMethod method = getMatchingMethod(resourceName, methodName);
+
+    HashMap<String, Object> parameters = loadParameters(paramsMap, method);
+
+    GenericUrl url = new GenericUrl(UriTemplate.expand(
+        arvadosRootUrl + restDescription.getBasePath() + method.getPath(),
+        parameters, true));
+
+    try {
+      // construct the request
+      HttpRequestFactory requestFactory;
+      requestFactory = httpTransport.createRequestFactory();
+
+      // possibly required content
+      HttpContent content = null;
+
+      if (!method.getHttpMethod().equals("GET") &&
+          !method.getHttpMethod().equals("DELETE")) {
+        String objectName = resourceName.substring(0, resourceName.length()-1);
+        Object requestBody = paramsMap.get(objectName);
+        if (requestBody == null) {
+          error("POST method requires content object " + objectName);
+        }
+
+        content = new ByteArrayContent("application/json",((String)requestBody).getBytes());
+      }
+
+      HttpRequest request =
+          requestFactory.buildRequest(method.getHttpMethod(), url, content);
+
+      // Set read timeout to 120 seconds (up from default of 20 seconds)
+      request.setReadTimeout(120 * 1000);
+
+      // Add retry behavior
+      request.setIOExceptionHandler(new HttpBackOffIOExceptionHandler(new ExponentialBackOff()));
+
+      // make the request
+      List<String> authHeader = new ArrayList<String>();
+      authHeader.add("OAuth2 " + arvadosApiToken);
+      request.getHeaders().put("Authorization", authHeader);
+      String response = request.execute().parseAsString();
+
+      Map responseMap = jsonFactory.createJsonParser(response).parse(HashMap.class);
+
+      logger.debug(responseMap);
+
+      return responseMap;
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  /**
+   * Get all supported resources by the API
+   * @return Set
+   */
+  public Set<String> getAvailableResourses() {
+    return (restDescription.getResources().keySet());
+  }
+
+  /**
+   * Get all supported method names for the given resource
+   * @param resourceName
+   * @return Set
+   * @throws Exception
+   */
+  public Set<String> getAvailableMethodsForResourse(String resourceName)
+      throws Exception {
+    Map<String, RestMethod> methodMap = getMatchingMethodMap (resourceName);
+    return (methodMap.keySet());
+  }
+
+  /**
+   * Get the parameters for the method in the resource sought.
+   * @param resourceName
+   * @param methodName
+   * @return Set
+   * @throws Exception
+   */
+  public Map<String,List<String>> getAvailableParametersForMethod(String resourceName, String methodName)
+      throws Exception {
+    RestMethod method = getMatchingMethod(resourceName, methodName);
+    Map<String, List<String>> parameters = new HashMap<String, List<String>>();
+    List<String> requiredParameters = new ArrayList<String>();
+    List<String> optionalParameters = new ArrayList<String>();
+    parameters.put ("required", requiredParameters);
+    parameters.put("optional", optionalParameters);
+
+    try {
+      // get any request parameters
+      Request request = method.getRequest();
+      if (request != null) {
+        Object required = request.get("required");
+        Object requestProperties = request.get("properties");
+        if (requestProperties != null) {
+          if (requestProperties instanceof Map) {
+            Map properties = (Map)requestProperties;
+            Set<String> propertyKeys = properties.keySet();
+            for (String property : propertyKeys) {
+              if (Boolean.TRUE.equals(required)) {
+                requiredParameters.add(property);
+              } else {
+                optionalParameters.add(property);
+              }
+            }
+          }
+        }
+      }
+
+      // get other listed parameters
+      Map<String,JsonSchema> methodParameters = method.getParameters();
+      for (Map.Entry<String, JsonSchema> entry : methodParameters.entrySet()) {
+        if (Boolean.TRUE.equals(entry.getValue().getRequired())) {
+          requiredParameters.add(entry.getKey());
+        } else {
+          optionalParameters.add(entry.getKey());
+        }
+      }
+    } catch (Exception e){
+      logger.error(e);
+    }
+
+    return parameters;
+  }
+
+  private HashMap<String, Object> loadParameters(Map<String, Object> paramsMap,
+      RestMethod method) throws Exception {
+    HashMap<String, Object> parameters = Maps.newHashMap();
+
+    // required parameters
+    if (method.getParameterOrder() != null) {
+      for (String parameterName : method.getParameterOrder()) {
+        JsonSchema parameter = method.getParameters().get(parameterName);
+        if (Boolean.TRUE.equals(parameter.getRequired())) {
+          Object parameterValue = paramsMap.get(parameterName);
+          if (parameterValue == null) {
+            error("missing required parameter: " + parameter);
+          } else {
+            putParameter(null, parameters, parameterName, parameter, parameterValue);
+          }
+        }
+      }
+    }
+
+    for (Map.Entry<String, Object> entry : paramsMap.entrySet()) {
+      String parameterName = entry.getKey();
+      Object parameterValue = entry.getValue();
+
+      if (parameterName.equals("contentType")) {
+        if (method.getHttpMethod().equals("GET") || method.getHttpMethod().equals("DELETE")) {
+          error("HTTP content type cannot be specified for this method: " + parameterName);
+        }
+      } else {
+        JsonSchema parameter = null;
+        if (restDescription.getParameters() != null) {
+          parameter = restDescription.getParameters().get(parameterName);
+        }
+        if (parameter == null && method.getParameters() != null) {
+          parameter = method.getParameters().get(parameterName);
+        }
+        putParameter(parameterName, parameters, parameterName, parameter, parameterValue);
+      }
+    }
+
+    return parameters;
+  }
+
+  private RestMethod getMatchingMethod(String resourceName, String methodName)
+      throws Exception {
+    Map<String, RestMethod> methodMap = getMatchingMethodMap(resourceName);
+
+    if (methodName == null) {
+      error("missing method name");
+    }
+
+    RestMethod method =
+        methodMap == null ? null : methodMap.get(methodName);
+    if (method == null) {
+      error("method not found: ");
+    }
+
+    return method;
+  }
+
+  private Map<String, RestMethod> getMatchingMethodMap(String resourceName)
+      throws Exception {
+    if (resourceName == null) {
+      error("missing resource name");
+    }
+
+    Map<String, RestMethod> methodMap = null;
+    Map<String, RestResource> resources = restDescription.getResources();
+    RestResource resource = resources.get(resourceName);
+    if (resource == null) {
+      error("resource not found");
+    }
+    methodMap = resource.getMethods();
+    return methodMap;
+  }
+
+  /**
+   * Not thread-safe. So, create for each request.
+   * @param apiName
+   * @param apiVersion
+   * @return
+   * @throws Exception
+   */
+  private RestDescription loadArvadosApi()
+      throws Exception {
+    try {
+      Discovery discovery;
+
+      Discovery.Builder discoveryBuilder =
+          new Discovery.Builder(httpTransport, jsonFactory, null);
+
+      discoveryBuilder.setRootUrl(arvadosRootUrl);
+      discoveryBuilder.setApplicationName(apiName);
+
+      discovery = discoveryBuilder.build();
+
+      return discovery.apis().getRest(apiName, apiVersion).execute();
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  /**
+   * Convert the input parameter into its equivalent json string.
+   * Add this json string value to the parameters map to be sent to server.
+   * @param argName
+   * @param parameters
+   * @param parameterName
+   * @param parameter
+   * @param parameterValue
+   * @throws Exception
+   */
+  private void putParameter(String argName, Map<String, Object> parameters,
+      String parameterName, JsonSchema parameter, Object parameterValue)
+          throws Exception {
+    Object value = parameterValue;
+    if (parameter != null) {
+      if ("boolean".equals(parameter.getType())) {
+        value = Boolean.valueOf(parameterValue.toString());
+      } else if ("number".equals(parameter.getType())) {
+        value = new BigDecimal(parameterValue.toString());
+      } else if ("integer".equals(parameter.getType())) {
+        value = new BigInteger(parameterValue.toString());
+      } else if ("float".equals(parameter.getType())) {
+        value = new BigDecimal(parameterValue.toString());
+      } else if ("Java.util.Calendar".equals(parameter.getType())) {
+        value = new BigDecimal(parameterValue.toString());
+      } else if (("array".equals(parameter.getType())) ||
+          ("Array".equals(parameter.getType()))) {
+        if (parameterValue.getClass().isArray()){
+          value = getJsonValueFromArrayType(parameterValue);
+        } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+          value = getJsonValueFromListType(parameterValue);
+        }
+      } else if (("Hash".equals(parameter.getType())) ||
+          ("hash".equals(parameter.getType()))) {
+        value = getJsonValueFromMapType(parameterValue);
+      } else {
+        if (parameterValue.getClass().isArray()){
+          value = getJsonValueFromArrayType(parameterValue);
+        } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+          value = getJsonValueFromListType(parameterValue);
+        } else if (Map.class.isAssignableFrom(parameterValue.getClass())) {
+          value = getJsonValueFromMapType(parameterValue);
+        }
+      }
+    }
+
+    parameters.put(parameterName, value);
+  }
+
+  /**
+   * Convert the given input array into json string before sending to server.
+   * @param parameterValue
+   * @return
+   */
+  private String getJsonValueFromArrayType (Object parameterValue) {
+    String arrayStr = Arrays.deepToString((Object[])parameterValue);
+
+    // we can expect either an array of array objects or an array of objects
+    if (arrayStr.startsWith("[[") && arrayStr.endsWith("]]")) {
+      Object[][] array = new Object[1][];
+      arrayStr = arrayStr.substring(2, arrayStr.length()-2);
+      String jsonStr = getJsonStringForArrayStr(arrayStr);
+      String value = "[" + jsonStr + "]";
+      return value;
+    } else {
+      arrayStr = arrayStr.substring(1, arrayStr.length()-1);
+      return (getJsonStringForArrayStr(arrayStr));
+    }
+  }
+
+  private String getJsonStringForArrayStr(String arrayStr) {
+    Object[] array = arrayStr.split(",");
+    Object[] trimmedArray = new Object[array.length];
+    for (int i=0; i<array.length; i++){
+      trimmedArray[i] = array[i].toString().trim();
+    }
+    String value = JSONArray.toJSONString(Arrays.asList(trimmedArray));
+    return value;
+  }
+
+  /**
+   * Convert the given input List into json string before sending to server.
+   * @param parameterValue
+   * @return
+   */
+  private String getJsonValueFromListType (Object parameterValue) {
+    List paramList = (List)parameterValue;
+    Object[] array = new Object[paramList.size()];
+    Arrays.deepToString(paramList.toArray(array));
+    return (getJsonValueFromArrayType(array));
+  }
+
+  /**
+   * Convert the given input map into json string before sending to server.
+   * @param parameterValue
+   * @return
+   */
+  private String getJsonValueFromMapType (Object parameterValue) {
+    JSONObject json = new JSONObject((Map)parameterValue);
+    return json.toString();
+  }
+
+  private static void error(String detail) throws Exception {
+    String errorDetail = "ERROR: " + detail;
+
+    logger.debug(errorDetail);
+    throw new Exception(errorDetail);
+  }
+
+  public static void main(String[] args){
+    System.out.println("Welcome to Arvados Java SDK.");
+    System.out.println("Please refer to http://doc.arvados.org/sdk/java/index.html to get started with the the SDK.");
+  }
+
+}
diff --git a/sdk/java/src/main/java/org/arvados/sdk/MethodDetails.java b/sdk/java/src/main/java/org/arvados/sdk/MethodDetails.java
new file mode 100644 (file)
index 0000000..fccc100
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package org.arvados.sdk;
+
+import com.google.api.client.util.Lists;
+import com.google.api.client.util.Sets;
+
+import java.util.ArrayList;
+import java.util.SortedSet;
+
+public class MethodDetails implements Comparable<MethodDetails> {
+    String name;
+    ArrayList<String> requiredParameters = Lists.newArrayList();
+    SortedSet<String> optionalParameters = Sets.newTreeSet();
+    boolean hasContent;
+
+    @Override
+    public int compareTo(MethodDetails o) {
+      if (o == this) {
+        return 0;
+      }
+      return name.compareTo(o.name);
+    }
+}
diff --git a/sdk/java/src/main/resources/log4j.properties b/sdk/java/src/main/resources/log4j.properties
new file mode 100644 (file)
index 0000000..89a9b93
--- /dev/null
@@ -0,0 +1,11 @@
+# To change log location, change log4j.appender.fileAppender.File 
+
+log4j.rootLogger=DEBUG, fileAppender
+
+log4j.appender.fileAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.fileAppender.File=${basedir}/log/arvados_sdk_java.log
+log4j.appender.fileAppender.Append=true
+log4j.appender.file.MaxFileSize=10MB
+log4j.appender.file.MaxBackupIndex=10
+log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.fileAppender.layout.ConversionPattern=[%d] %-5p %c %L %x - %m%n
diff --git a/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java b/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java
new file mode 100644 (file)
index 0000000..9de8946
--- /dev/null
@@ -0,0 +1,467 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package org.arvados.sdk;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Unit test for Arvados.
+ */
+public class ArvadosTest {
+
+  /**
+   * Test users.list api
+   * @throws Exception
+   */
+  @Test
+  public void testCallUsersList() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+    List items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+    assertTrue("expected at least one item in users list", items.size()>0);
+
+    Map firstUser = (Map)items.get(0);
+    assertNotNull ("Expcted at least one user", firstUser);
+
+    assertEquals("Expected kind to be user", "arvados#user", firstUser.get("kind"));
+    assertNotNull("Expected uuid for first user", firstUser.get("uuid"));
+  }
+
+  /**
+   * Test users.get <uuid> api
+   * @throws Exception
+   */
+  @Test
+  public void testCallUsersGet() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    // call user.system and get uuid of this user
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+
+    assertNotNull("expected users list", response);
+    List items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+
+    Map firstUser = (Map)items.get(0);
+    String userUuid = (String)firstUser.get("uuid");
+
+    // invoke users.get with the system user uuid
+    params = new HashMap<String, Object>();
+    params.put("uuid", userUuid);
+
+    response = arv.call("users", "get", params);
+
+    assertNotNull("Expected uuid for first user", response.get("uuid"));
+    assertEquals("Expected system user uuid", userUuid, response.get("uuid"));
+  }
+
+  /**
+   * Test users.create api
+   * @throws Exception
+   */
+  @Test
+  public void testCreateUser() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("user", "{}");
+    Map response = arv.call("users", "create", params);
+
+    assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+    Object uuid = response.get("uuid");
+    assertNotNull("Expected uuid for first user", uuid);
+
+    // delete the object
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("users", "delete", params);
+
+    // invoke users.get with the system user uuid
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+
+    Exception caught = null;
+    try {
+      arv.call("users", "get", params);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected 404", caught.getMessage().contains("Path not found"));
+  }
+
+  @Test
+  public void testCreateUserWithMissingRequiredParam() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Exception caught = null;
+    try {
+      arv.call("users", "create", params);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected POST method requires content object user", 
+        caught.getMessage().contains("ERROR: POST method requires content object user"));
+  }
+
+  /**
+   * Test users.create api
+   * @throws Exception
+   */
+  @Test
+  public void testCreateAndUpdateUser() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("user", "{}");
+    Map response = arv.call("users", "create", params);
+
+    assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+    Object uuid = response.get("uuid");
+    assertNotNull("Expected uuid for first user", uuid);
+
+    // update this user
+    params = new HashMap<String, Object>();
+    params.put("user", "{}");
+    params.put("uuid", uuid);
+    response = arv.call("users", "update", params);
+
+    assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+    uuid = response.get("uuid");
+    assertNotNull("Expected uuid for first user", uuid);
+
+    // delete the object
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("users", "delete", params);
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testUnsupportedApiName() throws Exception {
+    Exception caught = null;
+    try {
+      Arvados arv = new Arvados("not_arvados", "v1");
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected 404 when unsupported api is used", caught.getMessage().contains("404 Not Found"));
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testUnsupportedVersion() throws Exception {
+    Exception caught = null;
+    try {
+      Arvados arv = new Arvados("arvados", "v2");
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected 404 when unsupported version is used", caught.getMessage().contains("404 Not Found"));
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testCallForNoSuchResrouce() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Exception caught = null;
+    try {
+      arv.call("abcd", "list", null);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: resource not found"));
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testCallForNoSuchResrouceMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Exception caught = null;
+    try {
+      arv.call("users", "abcd", null);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: method not found"));
+  }
+
+  /**
+   * Test pipeline_tempates.create api
+   * @throws Exception
+   */
+  @Test
+  public void testCreateAndGetPipelineTemplate() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    File file = new File(getClass().getResource( "/first_pipeline.json" ).toURI());
+    byte[] data = new byte[(int)file.length()];
+    try {
+      FileInputStream is = new FileInputStream(file);
+      is.read(data);
+      is.close();
+    }catch(Exception e) {
+      e.printStackTrace();
+    }
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("pipeline_template", new String(data));
+    Map response = arv.call("pipeline_templates", "create", params);
+    assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+    String uuid = (String)response.get("uuid");
+    assertNotNull("Expected uuid for pipeline template", uuid);
+
+    // get the pipeline
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("pipeline_templates", "get", params);
+
+    assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+    assertEquals("Expected uuid for pipeline template", uuid, response.get("uuid"));
+
+    // delete the object
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("pipeline_templates", "delete", params);
+  }
+
+  /**
+   * Test users.list api
+   * @throws Exception
+   */
+  @Test
+  public void testArvadosWithTokenPassed() throws Exception {
+    String token = System.getenv().get("ARVADOS_API_TOKEN");
+    String host = System.getenv().get("ARVADOS_API_HOST");      
+    String hostInsecure = System.getenv().get("ARVADOS_API_HOST_INSECURE");
+
+    Arvados arv = new Arvados("arvados", "v1", token, host, hostInsecure);
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+  }
+
+  /**
+   * Test users.list api
+   * @throws Exception
+   */
+  @Test
+  public void testCallUsersListWithLimit() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    assertEquals("Expected users.list in response", "arvados#userList", response.get("kind"));
+
+    List items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+    assertTrue("expected at least one item in users list", items.size()>0);
+
+    int numUsersListItems = items.size();
+
+    // make the request again with limit
+    params = new HashMap<String, Object>();
+    params.put("limit", numUsersListItems-1);
+
+    response = arv.call("users", "list", params);
+
+    assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+    items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+    assertTrue("expected at least one item in users list", items.size()>0);
+
+    int numUsersListItems2 = items.size();
+    assertEquals ("Got more users than requested", numUsersListItems-1, numUsersListItems2);
+  }
+
+  @Test
+  public void testGetLinksWithFilters() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("links", "list", params);
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+    String[][] filters = new String[1][];
+    String[] condition = new String[3];
+    condition[0] = "name";
+    condition[1] = "=";
+    condition[2] = "can_manage";
+    filters[0] = condition;
+    params.put("filters", filters);
+    
+    response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+    assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+  }
+
+  @Test
+  public void testGetLinksWithFiltersAsList() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("links", "list", params);
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+    List<List> filters = new ArrayList<List>();
+    List<String> condition = new ArrayList<String>();
+    condition.add("name");
+    condition.add("is_a");
+    condition.add("can_manage");
+    filters.add(condition);
+    params.put("filters", filters);
+    
+    response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+    assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+  }
+
+  @Test
+  public void testGetLinksWithTimestampFilters() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("links", "list", params);
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+    // get links created "tomorrow". Expect none in response
+    Calendar calendar = new GregorianCalendar();
+    calendar.setTime(new Date());
+    calendar.add(Calendar.DAY_OF_MONTH, 1);
+    
+    Object[][] filters = new Object[1][];
+    Object[] condition = new Object[3];
+    condition[0] = "created_at";
+    condition[1] = ">";
+    condition[2] = calendar.get(Calendar.YEAR) + "-" + (calendar.get(Calendar.MONTH)+1) + "-" + calendar.get(Calendar.DAY_OF_MONTH);
+    filters[0] = condition;
+    params.put("filters", filters);
+    
+    response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+    int items_avail = ((BigDecimal)response.get("items_available")).intValue();
+    assertEquals("Expected zero links", items_avail, 0);
+  }
+
+  @Test
+  public void testGetLinksWithWhereClause() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map<String, String> where = new HashMap<String, String>();
+    where.put("where", "updated_at > '2014-05-01'");
+    
+    params.put("where", where);
+    
+    Map response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+  }
+
+  @Test
+  public void testGetAvailableResources() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Set<String> resources = arv.getAvailableResourses();
+    assertNotNull("Expected resources", resources);
+    assertTrue("Excected users in resrouces", resources.contains("users"));
+  }
+
+  @Test
+  public void testGetAvailableMethodsResources() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Set<String> methods = arv.getAvailableMethodsForResourse("users");
+    assertNotNull("Expected resources", methods);
+    assertTrue("Excected create method for users", methods.contains("create"));
+  }
+
+  @Test
+  public void testGetAvailableParametersForUsersGetMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "get");
+    assertNotNull("Expected parameters", parameters);
+    assertTrue("Excected uuid parameter for get method for users", parameters.get("required").contains("uuid"));
+  }
+
+  @Test
+  public void testGetAvailableParametersForUsersCreateMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "create");
+    assertNotNull("Expected parameters", parameters);
+    assertTrue("Excected user parameter for get method for users", parameters.get("required").contains("user"));
+  }
+
+  @Test
+  public void testGetAvailableParametersForUsersListMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "list");
+    assertNotNull("Expected parameters", parameters);
+    assertTrue("Excected no required parameter for list method for users", parameters.get("required").size() == 0);
+    assertTrue("Excected some optional parameters for list method for users", parameters.get("optional").contains("filters"));
+  }
+
+}
diff --git a/sdk/java/src/test/resources/first_pipeline.json b/sdk/java/src/test/resources/first_pipeline.json
new file mode 100644 (file)
index 0000000..dc3b080
--- /dev/null
@@ -0,0 +1,15 @@
+{
+  "components":{
+    "do_hash":{
+      "script":"hash.py",
+      "script_parameters":{
+        "input":{
+          "required": true,
+          "dataclass": "Collection"
+        }
+      },
+      "script_version":"master",
+      "output_is_persistent":true
+    }
+  }
+}
diff --git a/sdk/pam/.dockerignore b/sdk/pam/.dockerignore
new file mode 100644 (file)
index 0000000..922b80e
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+*~
+*.pyc
+.eggs
+*.egg_info
+build
+tmp
diff --git a/sdk/pam/.gitignore b/sdk/pam/.gitignore
new file mode 120000 (symlink)
index 0000000..1399fd4
--- /dev/null
@@ -0,0 +1 @@
+../python/.gitignore
\ No newline at end of file
diff --git a/sdk/pam/Dockerfile b/sdk/pam/Dockerfile
new file mode 100644 (file)
index 0000000..ff450d8
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# These tests assume you have a real API server running on the docker host.
+#
+# Build the test container:
+#   First, replace 3000 below with your api server's port number if necessary.
+#   host$ python setup.py sdist rotate --keep=1 --match .tar.gz
+#   host$ docker build --tag=arvados/pam_test .
+#
+# Automated integration test:
+#   host$ docker run -it --add-host zzzzz.arvadosapi.com:"$(hostname -I |awk '{print $1}')" arvados/pam_test
+# You should see "=== OK ===", followed by a Perl stack trace due to a
+# yet-unidentified pam_python.so bug.
+#
+# Manual integration test:
+#   host$ docker run -it --add-host zzzzz.arvadosapi.com:"$(hostname -I |awk '{print $1}')" arvados/pam_test bash -c 'rsyslogd & tail -F /var/log/auth.log & sleep 1 & bash'
+#   container# login
+#   login: active
+#   Arvados API token: 3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+# You should now be logged in to the "active" shell account in the
+# container. You should also see arvados_pam log entries in
+# /var/log/auth.log (and in your terminal, thanks to "tail -F").
+
+FROM debian:wheezy
+RUN apt-get update
+RUN apt-get -qy dist-upgrade
+RUN apt-get -qy install python python-virtualenv libpam-python rsyslog
+# Packages required by pycurl, ciso8601
+RUN apt-get -qy install libcurl4-gnutls-dev python2.7-dev
+
+# for jessie (which also has other snags)
+# RUN apt-get -qy install python-pip libgnutls28-dev
+
+RUN pip install --upgrade setuptools
+RUN pip install python-pam
+ADD dist /dist
+RUN pip install /dist/arvados-pam-*.tar.gz
+
+# Configure and enable the module (hopefully vendor packages will offer a neater way)
+RUN perl -pi -e 's{api.example}{zzzzz.arvadosapi.com:3000}; s{shell\.example}{testvm2.shell insecure};' /usr/share/pam-configs/arvados
+RUN DEBIAN_FRONTEND=noninteractive pam-auth-update arvados --remove unix
+
+# Add a user account matching the fixture
+RUN useradd -ms /bin/bash active
+
+# Test with python (SIGSEGV during tests)
+#ADD . /pam
+#WORKDIR /pam
+#CMD rsyslogd & tail -F /var/log/auth.log & python setup.py test --test-suite integration_tests
+
+# Test with perl (SIGSEGV when program exits)
+RUN apt-get install -qy libauthen-pam-perl
+ADD tests/integration_test.pl /integration_test.pl
+CMD rsyslogd & tail -F /var/log/auth.log & sleep 1 && /integration_test.pl
diff --git a/sdk/pam/LICENSE-2.0.txt b/sdk/pam/LICENSE-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/sdk/pam/MANIFEST.in b/sdk/pam/MANIFEST.in
new file mode 100644 (file)
index 0000000..48892fa
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+include LICENSE-2.0.txt
+include README.rst
+include examples/shellinabox
+include lib/libpam_arvados.py
+include pam-configs/arvados
+include arvados_version.py
\ No newline at end of file
diff --git a/sdk/pam/README.rst b/sdk/pam/README.rst
new file mode 100644 (file)
index 0000000..81be331
--- /dev/null
@@ -0,0 +1,25 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: Apache-2.0
+
+==================
+Arvados PAM Module
+==================
+
+Overview
+--------
+
+Accept Arvados API tokens to authenticate to shell accounts.
+
+.. _Arvados: https://arvados.org
+
+Installation
+------------
+
+See http://doc.arvados.org
+
+Testing and Development
+-----------------------
+
+https://arvados.org/projects/arvados/wiki/Hacking
+describes how to set up a development environment and run tests.
diff --git a/sdk/pam/arvados_pam/__init__.py b/sdk/pam/arvados_pam/__init__.py
new file mode 100644 (file)
index 0000000..7f400ce
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+sys.argv=['']
+
+from . import auth_event
+
+def pam_sm_authenticate(pamh, flags, argv):
+    config = {}
+    config['arvados_api_host'] = argv[1]
+    config['virtual_machine_hostname'] = argv[2]
+    if len(argv) > 3:
+        for k in argv[3:]:
+            config[k] = True
+
+    try:
+        username = pamh.get_user(None)
+    except pamh.exception, e:
+        return e.pam_result
+
+    if not username:
+        return pamh.PAM_USER_UNKNOWN
+
+    try:
+        prompt = '' if config.get('noprompt') else 'Arvados API token: '
+        token = pamh.conversation(pamh.Message(pamh.PAM_PROMPT_ECHO_OFF, prompt)).resp
+    except pamh.exception as e:
+        return e.pam_result
+
+    if auth_event.AuthEvent(
+            config=config,
+            service=pamh.service,
+            client_host=pamh.rhost,
+            username=username,
+            token=token).can_login():
+        return pamh.PAM_SUCCESS
+    else:
+        return pamh.PAM_AUTH_ERR
+
+def pam_sm_setcred(pamh, flags, argv):
+    return pamh.PAM_SUCCESS
+
+def pam_sm_acct_mgmt(pamh, flags, argv):
+    return pamh.PAM_SUCCESS
+
+def pam_sm_open_session(pamh, flags, argv):
+    return pamh.PAM_SUCCESS
+
+def pam_sm_close_session(pamh, flags, argv):
+    return pamh.PAM_SUCCESS
+
+def pam_sm_chauthtok(pamh, flags, argv):
+    return pamh.PAM_SUCCESS
diff --git a/sdk/pam/arvados_pam/auth_event.py b/sdk/pam/arvados_pam/auth_event.py
new file mode 100644 (file)
index 0000000..4f2663c
--- /dev/null
@@ -0,0 +1,92 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados
+import syslog
+
+def auth_log(msg):
+    """Log an authentication result to syslogd"""
+    syslog.openlog(facility=syslog.LOG_AUTH)
+    syslog.syslog('arvados_pam: ' + msg)
+    syslog.closelog()
+
+class AuthEvent(object):
+    def __init__(self, config, service, client_host, username, token):
+        self.config = config
+        self.service = service
+        self.client_host = client_host
+        self.username = username
+        self.token = token
+
+        self.api_host = None
+        self.vm_uuid = None
+        self.user = None
+
+    def can_login(self):
+        """Return truthy IFF credentials should be accepted."""
+        ok = False
+        try:
+            self.api_host = self.config['arvados_api_host']
+            self.arv = arvados.api('v1', host=self.api_host, token=self.token,
+                                   insecure=self.config.get('insecure', False),
+                                   cache=False)
+
+            vmname = self.config['virtual_machine_hostname']
+            vms = self.arv.virtual_machines().list(filters=[['hostname','=',vmname]]).execute()
+            if vms['items_available'] > 1:
+                raise Exception("lookup hostname %s returned %d records" % (vmname, vms['items_available']))
+            if vms['items_available'] == 0:
+                raise Exception("lookup hostname %s not found" % vmname)
+            vm = vms['items'][0]
+            if vm['hostname'] != vmname:
+                raise Exception("lookup hostname %s returned hostname %s" % (vmname, vm['hostname']))
+            self.vm_uuid = vm['uuid']
+
+            self.user = self.arv.users().current().execute()
+
+            filters = [
+                ['link_class','=','permission'],
+                ['name','=','can_login'],
+                ['head_uuid','=',self.vm_uuid],
+                ['tail_uuid','=',self.user['uuid']]]
+            for l in self.arv.links().list(filters=filters, limit=10000).execute()['items']:
+                if (l['properties']['username'] == self.username and
+                    l['tail_uuid'] == self.user['uuid'] and
+                    l['head_uuid'] == self.vm_uuid and
+                    l['link_class'] == 'permission' and
+                    l['name'] == 'can_login'):
+                    return self._report(True)
+
+            return self._report(False)
+
+        except Exception as e:
+            return self._report(e)
+
+    def _report(self, result):
+        """Log the result. Return truthy IFF result is True.
+
+        result must be True, False, or an exception.
+        """
+        self.result = result
+        auth_log(self.message())
+        return result == True
+
+    def message(self):
+        """Return a log message describing the event and its outcome."""
+        if isinstance(self.result, Exception):
+            outcome = 'Error: ' + repr(self.result)
+        elif self.result == True:
+            outcome = 'Allow'
+        else:
+            outcome = 'Deny'
+
+        if len(self.token) > 40:
+            log_token = self.token[0:15]
+        else:
+            log_token = '<invalid>'
+
+        log_label = [self.service, self.api_host, self.vm_uuid, self.client_host, self.username, log_token]
+        if self.user:
+            log_label += [self.user.get('uuid'), self.user.get('full_name')]
+        return str(log_label) + ': ' + outcome
diff --git a/sdk/pam/arvados_version.py b/sdk/pam/arvados_version.py
new file mode 100644 (file)
index 0000000..2e6484c
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', '.']).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except subprocess.CalledProcessError:
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/sdk/pam/examples/shellinabox b/sdk/pam/examples/shellinabox
new file mode 100644 (file)
index 0000000..2d91ccb
--- /dev/null
@@ -0,0 +1,27 @@
+# This example is a stock debian "login" file with libpam_arvados
+# replacing pam_unix, and the "noprompt" option in use. It can be
+# installed as /etc/pam.d/shellinabox .
+
+auth       optional   pam_faildelay.so  delay=3000000
+auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
+auth       requisite  pam_nologin.so
+session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
+session       required   pam_env.so readenv=1
+session       required   pam_env.so readenv=1 envfile=/etc/default/locale
+
+auth [success=1 default=ignore] pam_python.so /usr/local/lib/security/libpam_arvados.py api.example shell.example noprompt
+auth   requisite                       pam_deny.so
+auth   required                        pam_permit.so
+
+auth       optional   pam_group.so
+session    required   pam_limits.so
+session    optional   pam_lastlog.so
+session    optional   pam_motd.so  motd=/run/motd.dynamic
+session    optional   pam_motd.so
+session    optional   pam_mail.so standard
+
+@include common-account
+@include common-session
+@include common-password
+
+session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
diff --git a/sdk/pam/fpm-info.sh b/sdk/pam/fpm-info.sh
new file mode 100644 (file)
index 0000000..6c323f5
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+case "$TARGET" in
+    debian* | ubuntu*)
+        fpm_depends+=('libpam-python' 'libcurl3-gnutls')
+        ;;
+    centos*)
+        fpm_depends+=('python-pam')
+        ;;
+    *)
+        echo >&2 "ERROR: $PACKAGE: pam_python.so dependency unavailable in $TARGET."
+        return 1
+        ;;
+esac
+
+case "$FORMAT" in
+    deb)
+        fpm_args+=('--deb-recommends=system-log-daemon')
+        ;;
+esac
diff --git a/sdk/pam/gittaggers.py b/sdk/pam/gittaggers.py
new file mode 120000 (symlink)
index 0000000..d59c02c
--- /dev/null
@@ -0,0 +1 @@
+../python/gittaggers.py
\ No newline at end of file
diff --git a/sdk/pam/integration_tests/__init__.py b/sdk/pam/integration_tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/pam/integration_tests/test_pam.py b/sdk/pam/integration_tests/test_pam.py
new file mode 100644 (file)
index 0000000..32ae38d
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""These tests assume we are running (in a docker container) with
+arvados_pam configured and a test API server running.
+"""
+import pam
+import unittest
+
+# From services/api/test/fixtures/api_client_authorizations.yml
+# because that file is not available during integration tests:
+ACTIVE_TOKEN = '3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi'
+SPECTATOR_TOKEN = 'zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu'
+
+class IntegrationTest(unittest.TestCase):
+    def setUp(self):
+        self.p = pam.pam()
+
+    def test_allow(self):
+        self.assertTrue(self.p.authenticate('active', ACTIVE_TOKEN, service='login'))
+
+    def test_deny_bad_token(self):
+        self.assertFalse(self.p.authenticate('active', 'thisisaverybadtoken', service='login'))
+
+    def test_deny_empty_token(self):
+        self.assertFalse(self.p.authenticate('active', '', service='login'))
+
+    def test_deny_permission(self):
+        self.assertFalse(self.p.authenticate('spectator', SPECTATOR_TOKEN, service='login'))
diff --git a/sdk/pam/lib/libpam_arvados.py b/sdk/pam/lib/libpam_arvados.py
new file mode 100644 (file)
index 0000000..7c3406d
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+sys.path.append('/usr/share/python2.7/dist/libpam-arvados/lib/python2.7/site-packages')
+from arvados_pam import *
diff --git a/sdk/pam/pam-configs/arvados b/sdk/pam/pam-configs/arvados
new file mode 100644 (file)
index 0000000..086e176
--- /dev/null
@@ -0,0 +1,14 @@
+# 1. Change "api.example" to your ARVADOS_API_HOST
+# 2. Change "shell.example" to this host's hostname
+#    (as it appears in the Arvados virtual_machines list)
+# 3. Install in /usr/share/pam-configs/arvados
+# 4. Run `pam-auth-update arvados`
+
+Name: Arvados authentication
+Default: yes
+Priority: 256
+Auth-Type: Primary
+Auth:
+       [success=end default=ignore]    pam_python.so /usr/local/lib/security/libpam_arvados.py api.example shell.example
+Auth-Initial:
+       [success=end default=ignore]    pam_python.so /usr/local/lib/security/libpam_arvados.py api.example shell.example
diff --git a/sdk/pam/setup.py b/sdk/pam/setup.py
new file mode 100755 (executable)
index 0000000..c94f5b4
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import glob
+import os
+import sys
+import re
+import subprocess
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "arvados_pam")
+
+short_tests_only = False
+if '--short-tests-only' in sys.argv:
+    short_tests_only = True
+    sys.argv.remove('--short-tests-only')
+
+setup(name='arvados-pam',
+      version=version,
+      description='Arvados PAM module',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url='https://arvados.org',
+      download_url='https://github.com/curoverse/arvados.git',
+      license='Apache 2.0',
+      packages=[
+          'arvados_pam',
+      ],
+      scripts=[
+      ],
+      data_files=[
+          ('lib/security', ['lib/libpam_arvados.py']),
+          ('share/pam-configs', ['pam-configs/arvados']),
+          ('share/doc/arvados-pam', ['LICENSE-2.0.txt', 'README.rst']),
+          ('share/doc/arvados-pam/examples', glob.glob('examples/*')),
+
+          # The arvados build scripts used to install data files to
+          # "/usr/data/*" but now install them to "/usr/*". Here, we
+          # install an extra copy in the old location so existing pam
+          # configs can still work. When old systems have had a chance
+          # to update to the new paths, this line can be removed.
+          ('data/lib/security', ['lib/libpam_arvados.py']),
+      ],
+      install_requires=[
+          'arvados-python-client>=0.1.20150801000000',
+      ],
+      test_suite='tests',
+      tests_require=['pbr<1.7.0', 'mock>=1.0', 'python-pam'],
+      zip_safe=False
+      )
diff --git a/sdk/pam/tests/__init__.py b/sdk/pam/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/pam/tests/integration_test.pl b/sdk/pam/tests/integration_test.pl
new file mode 100755 (executable)
index 0000000..cbe9b0a
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env perl
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+$ENV{ARVADOS_API_HOST_INSECURE} = 1;
+use Authen::PAM qw(:constants);
+
+for my $case (['good', 1, 'active', '3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi'],
+              ['badtoken', 0, 'active', 'badtokenmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi'],
+              ['badusername', 0, 'baduser', '3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi']) {
+    dotest(@$case);
+}
+print "=== OK ===\n";
+
+sub dotest {
+    my ($label, $expect_ok, $user, $token) = @_;
+    print "$label: ";
+    my $service_name = 'login';
+    $main::Token = $token;
+    my $pamh = new Authen::PAM($service_name, $user, \&token_conv_func);
+    ref($pamh) || die "Error code $pamh during PAM init!";
+    $pamh->pam_set_item(PAM_RHOST(), '::1');
+    $pamh->pam_set_item(PAM_RUSER(), 'none');
+    $pamh->pam_set_item(PAM_TTY(), '/dev/null');
+    my $flags = PAM_SILENT();
+    $res = $pamh->pam_authenticate($flags);
+    $msg = $pamh->pam_strerror($res);
+    print "Result (code $res): $msg\n";
+    if (($res == 0) != ($expect_ok == 1)) {
+        die "*** FAIL ***\n";
+    }
+}
+
+sub token_conv_func {
+    my @res;
+    while ( @_ ) {
+        my $code = shift;
+        my $msg = shift;
+        my $ans;
+        print "Message (type $code): $msg\n";
+        if ($code == PAM_PROMPT_ECHO_OFF() || $code == PAM_PROMPT_ECHO_ON()) {
+            $ans = $main::Token;
+        }
+        push @res, (0,$ans);
+    }
+    push @res, PAM_SUCCESS();
+    return @res;
+}
diff --git a/sdk/pam/tests/mocker.py b/sdk/pam/tests/mocker.py
new file mode 100644 (file)
index 0000000..ec6f064
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import mock
+import unittest
+
+class Mocker(unittest.TestCase):
+    ACTIVE_TOKEN = '3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi'
+
+    default_config = {
+        'arvados_api_host': 'zzzzz.api_host.example',
+        'virtual_machine_hostname': 'testvm2.shell',
+    }
+    default_request = {
+        'client_host': '::1',
+        'token': ACTIVE_TOKEN,
+        'username': 'active',
+    }
+    default_response = {
+        'links': {
+            'items': [{
+                'uuid': 'zzzzz-o0j2j-rah2ya1ohx9xaev',
+                'tail_uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
+                'head_uuid': 'zzzzz-2x53u-382brsig8rp3065',
+                'link_class': 'permission',
+                'name': 'can_login',
+                'properties': {
+                    'username': 'active',
+                },
+            }],
+        },
+        'users': {
+            'uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
+            'full_name': 'Active User',
+        },
+        'virtual_machines': {
+            'items': [{
+                'uuid': 'zzzzz-2x53u-382brsig8rp3065',
+                'hostname': 'testvm2.shell',
+            }],
+            'items_available': 1,
+        },
+    }
+
+    def setUp(self):
+        self.config = self.default_config.copy()
+        self.request = self.default_request.copy()
+        self.response = self.default_response.copy()
+        self.api_client = mock.MagicMock(name='api_client')
+        self.api_client.users().current().execute.side_effect = lambda: self.response['users']
+        self.api_client.virtual_machines().list().execute.side_effect = lambda: self.response['virtual_machines']
+        self.api_client.links().list().execute.side_effect = lambda: self.response['links']
+        patcher = mock.patch('arvados.api')
+        self.api = patcher.start()
+        self.addCleanup(patcher.stop)
+        self.api.side_effect = [self.api_client]
+
+        self.syslogged = []
+        patcher = mock.patch('syslog.syslog')
+        self.syslog = patcher.start()
+        self.addCleanup(patcher.stop)
+        self.syslog.side_effect = lambda s: self.syslogged.append(s)
diff --git a/sdk/pam/tests/test_auth_event.py b/sdk/pam/tests/test_auth_event.py
new file mode 100644 (file)
index 0000000..f907b31
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados_pam
+import re
+from . import mocker
+
+class AuthEventTest(mocker.Mocker):
+    def attempt(self):
+        return arvados_pam.auth_event.AuthEvent(config=self.config, service='test_service', **self.request).can_login()
+
+    def test_success(self):
+        self.assertTrue(self.attempt())
+
+        self.api_client.virtual_machines().list.assert_called_with(
+            filters=[['hostname','=',self.config['virtual_machine_hostname']]])
+        self.api.assert_called_with(
+            'v1',
+            host=self.config['arvados_api_host'], token=self.request['token'],
+            insecure=False,
+            cache=False)
+        self.assertEqual(1, len(self.syslogged))
+        for i in ['test_service',
+                  self.request['username'],
+                  self.config['arvados_api_host'],
+                  self.response['virtual_machines']['items'][0]['uuid']]:
+            self.assertRegexpMatches(self.syslogged[0], re.escape(i))
+        self.assertRegexpMatches(self.syslogged[0], re.escape(self.request['token'][0:15]), 'token prefix not logged')
+        self.assertNotRegexpMatches(self.syslogged[0], re.escape(self.request['token'][15:30]), 'too much token logged')
+
+    def test_fail_vm_lookup(self):
+        self.api_client.virtual_machines().list().execute.side_effect = Exception("Test-induced failure")
+        self.assertFalse(self.attempt())
+        self.assertRegexpMatches(self.syslogged[0], 'Test-induced failure')
+
+    def test_vm_hostname_not_found(self):
+        self.response['virtual_machines'] = {
+            'items': [],
+            'items_available': 0,
+        }
+        self.assertFalse(self.attempt())
+
+    def test_vm_hostname_ambiguous(self):
+        self.response['virtual_machines'] = {
+            'items': [
+                {
+                    'uuid': 'zzzzz-2x53u-382brsig8rp3065',
+                    'hostname': 'testvm2.shell',
+                },
+                {
+                    'uuid': 'zzzzz-2x53u-382brsig8rp3065',
+                    'hostname': 'testvm2.shell',
+                },
+            ],
+            'items_available': 2,
+        }
+        self.assertFalse(self.attempt())
+
+    def test_server_ignores_vm_filters(self):
+        self.response['virtual_machines'] = {
+            'items': [
+                {
+                    'uuid': 'zzzzz-2x53u-382brsig8rp3065',
+                    'hostname': 'testvm22.shell', # <-----
+                },
+            ],
+            'items_available': 1,
+        }
+        self.assertFalse(self.attempt())
+
+    def test_fail_user_lookup(self):
+        self.api_client.users().current().execute.side_effect = Exception("Test-induced failure")
+        self.assertFalse(self.attempt())
+
+    def test_fail_permission_check(self):
+        self.api_client.links().list().execute.side_effect = Exception("Test-induced failure")
+        self.assertFalse(self.attempt())
+
+    def test_no_login_permission(self):
+        self.response['links'] = {
+            'items': [],
+        }
+        self.assertFalse(self.attempt())
+
+    def test_server_ignores_permission_filters(self):
+        self.response['links'] = {
+            'items': [{
+                'uuid': 'zzzzz-o0j2j-rah2ya1ohx9xaev',
+                'tail_uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
+                'head_uuid': 'zzzzz-2x53u-382brsig8rp3065',
+                'link_class': 'permission',
+                'name': 'CANT_login', # <-----
+                'properties': {
+                    'username': 'active',
+                },
+            }],
+        }
+        self.assertFalse(self.attempt())
diff --git a/sdk/pam/tests/test_pam_sm.py b/sdk/pam/tests/test_pam_sm.py
new file mode 100644 (file)
index 0000000..53597c0
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados_pam
+import mock
+from . import mocker
+
+class PamSMTest(mocker.Mocker):
+    def attempt(self):
+        return arvados_pam.pam_sm_authenticate(self.pamh, 0, self.argv)
+
+    def test_success(self):
+        self.assertEqual(self.pamh.PAM_SUCCESS, self.attempt())
+
+    def test_bad_user(self):
+        self.pamh.get_user = mock.MagicMock(return_value='badusername')
+        self.assertEqual(self.pamh.PAM_AUTH_ERR, self.attempt())
+
+    def test_bad_vm(self):
+        self.argv[2] = 'testvm22.shell'
+        self.assertEqual(self.pamh.PAM_AUTH_ERR, self.attempt())
+
+    def setUp(self):
+        super(PamSMTest, self).setUp()
+        self.pamh = mock.MagicMock()
+        self.pamh.get_user = mock.MagicMock(return_value='active')
+        self.pamh.PAM_SUCCESS = 12345
+        self.pamh.PAM_AUTH_ERR = 54321
+        self.argv = [__file__, 'zzzzz.arvadosapi.com', 'testvm2.shell']
diff --git a/sdk/perl/.gitignore b/sdk/perl/.gitignore
new file mode 100644 (file)
index 0000000..7c32f55
--- /dev/null
@@ -0,0 +1 @@
+install
diff --git a/sdk/perl/Makefile.PL b/sdk/perl/Makefile.PL
new file mode 100644 (file)
index 0000000..ec903f3
--- /dev/null
@@ -0,0 +1,18 @@
+#! /usr/bin/perl
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+use strict;
+
+use ExtUtils::MakeMaker;
+
+WriteMakefile(
+    NAME            => 'Arvados',
+    VERSION_FROM    => 'lib/Arvados.pm',
+    PREREQ_PM       => {
+        'JSON'     => 0,
+        'LWP'      => 0,
+        'Net::SSL' => 0,
+    },
+);
diff --git a/sdk/perl/lib/Arvados.pm b/sdk/perl/lib/Arvados.pm
new file mode 100644 (file)
index 0000000..9eb04b4
--- /dev/null
@@ -0,0 +1,165 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+=head1 NAME
+
+Arvados -- client library for Arvados services
+
+=head1 SYNOPSIS
+
+  use Arvados;
+  $arv = Arvados->new(apiHost => 'arvados.local');
+
+  my $instances = $arv->{'pipeline_instances'}->{'list'}->execute();
+  print "UUID is ", $instances->{'items'}->[0]->{'uuid'}, "\n";
+
+  $uuid = 'eiv0u-arx5y-2c5ovx43zw90gvh';
+  $instance = $arv->{'pipeline_instances'}->{'get'}->execute('uuid' => $uuid);
+  print "ETag is ", $instance->{'etag'}, "\n";
+
+  $instance->{'active'} = 1;
+  $instance->{'name'} = '';
+  $instance->save();
+  print "ETag is ", $instance->{'etag'}, "\n";
+
+=head1 METHODS
+
+=head2 new()
+
+ my $whc = Arvados->new( %OPTIONS );
+
+Set up a client and retrieve the schema from the server.
+
+=head3 Options
+
+=over
+
+=item apiHost
+
+Hostname of API discovery service. Default: C<ARVADOS_API_HOST>
+environment variable, or C<arvados>
+
+=item apiProtocolScheme
+
+Protocol scheme. Default: C<ARVADOS_API_PROTOCOL_SCHEME> environment
+variable, or C<https>
+
+=item authToken
+
+Authorization token. Default: C<ARVADOS_API_TOKEN> environment variable
+
+=item apiService
+
+Default C<arvados>
+
+=item apiVersion
+
+Default C<v1>
+
+=back
+
+=cut
+
+package Arvados;
+
+use Net::SSL (); # From Crypt-SSLeay
+BEGIN {
+  $Net::HTTPS::SSL_SOCKET_CLASS = "Net::SSL"; # Force use of Net::SSL
+}
+
+use JSON;
+use Carp;
+use Arvados::ResourceAccessor;
+use Arvados::ResourceMethod;
+use Arvados::ResourceProxy;
+use Arvados::ResourceProxyList;
+use Arvados::Request;
+use Data::Dumper;
+
+$Arvados::VERSION = 0.1;
+
+sub new
+{
+    my $class = shift;
+    my %self = @_;
+    my $self = \%self;
+    bless ($self, $class);
+    return $self->build(@_);
+}
+
+sub build
+{
+    my $self = shift;
+
+    $config = load_config_file("$ENV{HOME}/.config/arvados/settings.conf");
+
+    $self->{'authToken'} ||=
+       $ENV{ARVADOS_API_TOKEN} || $config->{ARVADOS_API_TOKEN};
+
+    $self->{'apiHost'} ||=
+       $ENV{ARVADOS_API_HOST} || $config->{ARVADOS_API_HOST};
+
+    $self->{'noVerifyHostname'} ||=
+       $ENV{ARVADOS_API_HOST_INSECURE};
+
+    $self->{'apiProtocolScheme'} ||=
+       $ENV{ARVADOS_API_PROTOCOL_SCHEME} ||
+       $config->{ARVADOS_API_PROTOCOL_SCHEME};
+
+    $self->{'ua'} = new Arvados::Request;
+
+    my $host = $self->{'apiHost'} || 'arvados';
+    my $service = $self->{'apiService'} || 'arvados';
+    my $version = $self->{'apiVersion'} || 'v1';
+    my $scheme = $self->{'apiProtocolScheme'} || 'https';
+    my $uri = "$scheme://$host/discovery/v1/apis/$service/$version/rest";
+    my $r = $self->new_request;
+    $r->set_uri($uri);
+    $r->set_method("GET");
+    $r->process_request();
+    my $data, $headers;
+    my ($status_number, $status_phrase) = $r->get_status();
+    $data = $r->get_body() if $status_number == 200;
+    $headers = $r->get_headers();
+    if ($data) {
+        my $doc = $self->{'discoveryDocument'} = JSON::decode_json($data);
+        print STDERR Dumper $doc if $ENV{'DEBUG_ARVADOS_API_DISCOVERY'};
+        my $k, $v;
+        while (($k, $v) = each %{$doc->{'resources'}}) {
+            $self->{$k} = Arvados::ResourceAccessor->new($self, $k);
+        }
+    } else {
+        croak "No discovery doc at $uri - $status_number $status_phrase";
+    }
+    $self;
+}
+
+sub new_request
+{
+    my $self = shift;
+    local $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'};
+    if ($self->{'noVerifyHostname'} || ($host =~ /\.local$/)) {
+        $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'} = 0;
+    }
+    Arvados::Request->new();
+}
+
+sub load_config_file ($)
+{
+    my $config_file = shift;
+    my %config;
+
+    if (open (CONF, $config_file)) {
+       while (<CONF>) {
+           next if /^\s*#/ || /^\s*$/;  # skip comments and blank lines
+           chomp;
+           my ($key, $val) = split /\s*=\s*/, $_, 2;
+           $config{$key} = $val;
+       }
+    }
+    close CONF;
+    return \%config;
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/Request.pm b/sdk/perl/lib/Arvados/Request.pm
new file mode 100644 (file)
index 0000000..4523f7d
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+package Arvados::Request;
+use Data::Dumper;
+use LWP::UserAgent;
+use URI::Escape;
+use Encode;
+use strict;
+@Arvados::HTTP::ISA = qw(LWP::UserAgent);
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+    return $self->_init(@_);
+}
+
+sub _init
+{
+    my $self = shift;
+    $self->{'ua'} = new LWP::UserAgent(@_);
+    $self->{'ua'}->agent ("libarvados-perl/".$Arvados::VERSION);
+    $self;
+}
+
+sub set_uri
+{
+    my $self = shift;
+    $self->{'uri'} = shift;
+}
+
+sub process_request
+{
+    my $self = shift;
+    my %req;
+    my %content;
+    my $method = $self->{'method'};
+    if ($method eq 'GET' || $method eq 'HEAD') {
+        $content{'_method'} = $method;
+        $method = 'POST';
+    }
+    $req{$method} = $self->{'uri'};
+    $self->{'req'} = new HTTP::Request (%req);
+    $self->{'req'}->header('Authorization' => ('OAuth2 ' . $self->{'authToken'})) if $self->{'authToken'};
+    $self->{'req'}->header('Accept' => 'application/json');
+
+    # allow_nonref lets us encode JSON::true and JSON::false, see #12078
+    my $json = JSON->new->allow_nonref;
+    my ($p, $v);
+    while (($p, $v) = each %{$self->{'queryParams'}}) {
+        $content{$p} = (ref($v) eq "") ? $v : $json->encode($v);
+    }
+    my $content;
+    while (($p, $v) = each %content) {
+        $content .= '&' unless $content eq '';
+        $content .= uri_escape($p);
+        $content .= '=';
+        $content .= uri_escape($v);
+    }
+    $self->{'req'}->content_type("application/x-www-form-urlencoded; charset='utf8'");
+    $self->{'req'}->content(Encode::encode('utf8', $content));
+    $self->{'res'} = $self->{'ua'}->request ($self->{'req'});
+}
+
+sub get_status
+{
+    my $self = shift;
+    return ($self->{'res'}->code(),
+           $self->{'res'}->message());
+}
+
+sub get_body
+{
+    my $self = shift;
+    return $self->{'res'}->content;
+}
+
+sub set_method
+{
+    my $self = shift;
+    $self->{'method'} = shift;
+}
+
+sub set_query_params
+{
+    my $self = shift;
+    $self->{'queryParams'} = shift;
+}
+
+sub set_auth_token
+{
+    my $self = shift;
+    $self->{'authToken'} = shift;
+}
+
+sub get_headers
+{
+    ""
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceAccessor.pm b/sdk/perl/lib/Arvados/ResourceAccessor.pm
new file mode 100644 (file)
index 0000000..8b235fc
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+package Arvados::ResourceAccessor;
+use Carp;
+use Data::Dumper;
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+
+    $self->{'api'} = shift;
+    $self->{'resourcesName'} = shift;
+    $self->{'methods'} = $self->{'api'}->{'discoveryDocument'}->{'resources'}->{$self->{'resourcesName'}}->{'methods'};
+    my $method_name, $method;
+    while (($method_name, $method) = each %{$self->{'methods'}}) {
+        $self->{$method_name} = Arvados::ResourceMethod->new($self, $method);
+    }
+    $self;
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceMethod.pm b/sdk/perl/lib/Arvados/ResourceMethod.pm
new file mode 100644 (file)
index 0000000..d7e86ff
--- /dev/null
@@ -0,0 +1,124 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+package Arvados::ResourceMethod;
+use Carp;
+use Data::Dumper;
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+    return $self->_init(@_);
+}
+
+sub _init
+{
+    my $self = shift;
+    $self->{'resourceAccessor'} = shift;
+    $self->{'method'} = shift;
+    return $self;
+}
+
+sub execute
+{
+    my $self = shift;
+    my $method = $self->{'method'};
+
+    my $path = $method->{'path'};
+
+    my %body_params;
+    my %given_params = @_;
+    my %extra_params = %given_params;
+    my %method_params = %{$method->{'parameters'}};
+    if ($method->{'request'}->{'properties'}) {
+        while (my ($prop_name, $prop_value) =
+               each %{$method->{'request'}->{'properties'}}) {
+            if (ref($prop_value) eq 'HASH' && $prop_value->{'$ref'}) {
+                $method_params{$prop_name} = { 'type' => 'object' };
+            }
+        }
+    }
+    while (my ($param_name, $param) = each %method_params) {
+        delete $extra_params{$param_name};
+        if ($param->{'required'} && !exists $given_params{$param_name}) {
+            croak("Required parameter not supplied: $param_name");
+        }
+        elsif ($param->{'location'} eq 'path') {
+            $path =~ s/{\Q$param_name\E}/$given_params{$param_name}/eg;
+        }
+        elsif (!exists $given_params{$param_name}) {
+            ;
+        }
+        elsif ($param->{'type'} eq 'object') {
+            my %param_value;
+            my ($p, $v);
+            if (exists $param->{'properties'}) {
+                while (my ($property_name, $property) =
+                       each %{$param->{'properties'}}) {
+                    # if the discovery doc specifies object structure,
+                    # convert to true/false depending on supplied type
+                    if (!exists $given_params{$param_name}->{$property_name}) {
+                        ;
+                    }
+                    elsif (!defined $given_params{$param_name}->{$property_name}) {
+                        $param_value{$property_name} = JSON::null;
+                    }
+                    elsif ($property->{'type'} eq 'boolean') {
+                        $param_value{$property_name} = $given_params{$param_name}->{$property_name} ? JSON::true : JSON::false;
+                    }
+                    else {
+                        $param_value{$property_name} = $given_params{$param_name}->{$property_name};
+                    }
+                }
+            }
+            else {
+                while (my ($property_name, $property) =
+                       each %{$given_params{$param_name}}) {
+                    if (ref $property eq '' || $property eq undef) {
+                        $param_value{$property_name} = $property;
+                    }
+                    elsif (ref $property eq 'HASH') {
+                        $param_value{$property_name} = {};
+                        while (my ($k, $v) = each %$property) {
+                            $param_value{$property_name}->{$k} = $v;
+                        }
+                    }
+                }
+            }
+            $body_params{$param_name} = \%param_value;
+        } elsif ($param->{'type'} eq 'boolean') {
+            $body_params{$param_name} = $given_params{$param_name} ? JSON::true : JSON::false;
+        } else {
+            $body_params{$param_name} = $given_params{$param_name};
+        }
+    }
+    if (%extra_params) {
+        croak("Unsupported parameter(s) passed to API call /$path: \"" . join('", "', keys %extra_params) . '"');
+    }
+    my $r = $self->{'resourceAccessor'}->{'api'}->new_request;
+    my $base_uri = $self->{'resourceAccessor'}->{'api'}->{'discoveryDocument'}->{'baseUrl'};
+    $base_uri =~ s:/$::;
+    $r->set_uri($base_uri . "/" . $path);
+    $r->set_method($method->{'httpMethod'});
+    $r->set_auth_token($self->{'resourceAccessor'}->{'api'}->{'authToken'});
+    $r->set_query_params(\%body_params) if %body_params;
+    $r->process_request();
+    my $data, $headers;
+    my ($status_number, $status_phrase) = $r->get_status();
+    if ($status_number != 200) {
+        croak("API call /$path failed: $status_number $status_phrase\n". $r->get_body());
+    }
+    $data = $r->get_body();
+    $headers = $r->get_headers();
+    my $result = JSON::decode_json($data);
+    if ($method->{'response'}->{'$ref'} =~ /List$/) {
+        Arvados::ResourceProxyList->new($result, $self->{'resourceAccessor'});
+    } else {
+        Arvados::ResourceProxy->new($result, $self->{'resourceAccessor'});
+    }
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceProxy.pm b/sdk/perl/lib/Arvados/ResourceProxy.pm
new file mode 100644 (file)
index 0000000..d3be468
--- /dev/null
@@ -0,0 +1,61 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+package Arvados::ResourceProxy;
+
+sub new
+{
+    my $class = shift;
+    my $self = shift;
+    $self->{'resourceAccessor'} = shift;
+    bless ($self, $class);
+    $self;
+}
+
+sub save
+{
+    my $self = shift;
+    $response = $self->{'resourceAccessor'}->{'update'}->execute('uuid' => $self->{'uuid'}, $self->resource_parameter_name() => $self);
+    foreach my $param (keys %$self) {
+        if (exists $response->{$param}) {
+            $self->{$param} = $response->{$param};
+        }
+    }
+    $self;
+}
+
+sub update_attributes
+{
+    my $self = shift;
+    my %updates = @_;
+    $response = $self->{'resourceAccessor'}->{'update'}->execute('uuid' => $self->{'uuid'}, $self->resource_parameter_name() => \%updates);
+    foreach my $param (keys %updates) {
+        if (exists $response->{$param}) {
+            $self->{$param} = $response->{$param};
+        }
+    }
+    $self;
+}
+
+sub reload
+{
+    my $self = shift;
+    $response = $self->{'resourceAccessor'}->{'get'}->execute('uuid' => $self->{'uuid'});
+    foreach my $param (keys %$self) {
+        if (exists $response->{$param}) {
+            $self->{$param} = $response->{$param};
+        }
+    }
+    $self;
+}
+
+sub resource_parameter_name
+{
+    my $self = shift;
+    my $pname = $self->{'resourceAccessor'}->{'resourcesName'};
+    $pname =~ s/s$//;           # XXX not a very good singularize()
+    $pname;
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceProxyList.pm b/sdk/perl/lib/Arvados/ResourceProxyList.pm
new file mode 100644 (file)
index 0000000..7d8e187
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+package Arvados::ResourceProxyList;
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+    $self->_init(@_);
+}
+
+sub _init
+{
+    my $self = shift;
+    $self->{'serverResponse'} = shift;
+    $self->{'resourceAccessor'} = shift;
+    $self->{'items'} = [ map { Arvados::ResourceProxy->new($_, $self->{'resourceAccessor'}) } @{$self->{'serverResponse'}->{'items'}} ];
+    $self;
+}
+
+1;
diff --git a/sdk/python/.gitignore b/sdk/python/.gitignore
new file mode 100644 (file)
index 0000000..ab21552
--- /dev/null
@@ -0,0 +1,7 @@
+*.pyc
+/build/
+/dist/
+*.egg
+*.egg-info
+/tests/tmp
+.eggs
diff --git a/sdk/python/LICENSE-2.0.txt b/sdk/python/LICENSE-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/sdk/python/MANIFEST.in b/sdk/python/MANIFEST.in
new file mode 100644 (file)
index 0000000..50a2923
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+include LICENSE-2.0.txt
+include README.rst
+include arvados_version.py
\ No newline at end of file
diff --git a/sdk/python/README.rst b/sdk/python/README.rst
new file mode 100644 (file)
index 0000000..a03d6af
--- /dev/null
@@ -0,0 +1,67 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: Apache-2.0
+
+=====================
+Arvados Python Client
+=====================
+
+Overview
+--------
+
+This package provides the ``arvados`` module, an API client for
+Arvados_.  It also includes higher-level functions to help you write
+Crunch scripts, and command-line tools to store and retrieve data in
+the Keep storage server.
+
+.. _Arvados: https://arvados.org/
+
+Installation
+------------
+
+Installing under your user account
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method lets you install the package without root access.
+However, other users on the same system won't be able to use it.
+
+1. Run ``pip install --user arvados-python-client``.
+
+2. In your shell configuration, make sure you add ``$HOME/.local/bin``
+   to your PATH environment variable.  For example, you could add the
+   command ``PATH=$PATH:$HOME/.local/bin`` to your ``.bashrc`` file.
+
+3. Reload your shell configuration.  For example, bash users could run
+   ``source ~/.bashrc``.
+
+Installing on Debian systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Add this Arvados repository to your sources list::
+
+     deb http://apt.arvados.org/ stretch main
+
+2. Update your package list.
+
+3. Install the ``python-arvados-python-client`` package.
+
+Configuration
+-------------
+
+This client software needs two pieces of information to connect to
+Arvados: the DNS name of the API server, and an API authorization
+token.  You can set these in environment variables, or the file
+``$HOME/.config/arvados/settings.conf``.  `The Arvados user
+documentation
+<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes
+how to find this information in the Arvados Workbench, and install it
+on your system.
+
+Testing and Development
+-----------------------
+
+This package is one part of the Arvados source package, and it has
+integration tests to check interoperability with other Arvados
+components.  Our `hacking guide
+<https://arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_
+describes how to set up a development environment and run tests.
diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
new file mode 100644 (file)
index 0000000..c8c7029
--- /dev/null
@@ -0,0 +1,173 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
+import bz2
+import fcntl
+import hashlib
+import http.client
+import httplib2
+import json
+import logging
+import os
+import pprint
+import re
+import string
+import sys
+import time
+import types
+import zlib
+
+if sys.version_info >= (3, 0):
+    from collections import UserDict
+else:
+    from UserDict import UserDict
+
+from .api import api, api_from_config, http_cache
+from .collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
+from arvados.keep import *
+from arvados.stream import *
+from .arvfile import StreamFileReader
+from .retry import RetryLoop
+import arvados.errors as errors
+import arvados.util as util
+
+# Set up Arvados logging based on the user's configuration.
+# All Arvados code should log under the arvados hierarchy.
+log_format = '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s'
+log_date_format = '%Y-%m-%d %H:%M:%S'
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(logging.Formatter(log_format, log_date_format))
+logger = logging.getLogger('arvados')
+logger.addHandler(log_handler)
+logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
+                else logging.WARNING)
+
+def task_set_output(self, s, num_retries=5):
+    for tries_left in RetryLoop(num_retries=num_retries, backoff_start=0):
+        try:
+            return api('v1').job_tasks().update(
+                uuid=self['uuid'],
+                body={
+                    'output':s,
+                    'success':True,
+                    'progress':1.0
+                }).execute()
+        except errors.ApiError as error:
+            if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
+                logger.debug("task_set_output: job_tasks().update() raised {}, retrying with {} tries left".format(repr(error),tries_left))
+            else:
+                raise
+
+_current_task = None
+def current_task(num_retries=5):
+    global _current_task
+    if _current_task:
+        return _current_task
+
+    for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
+        try:
+            task = api('v1').job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
+            task = UserDict(task)
+            task.set_output = types.MethodType(task_set_output, task)
+            task.tmpdir = os.environ['TASK_WORK']
+            _current_task = task
+            return task
+        except errors.ApiError as error:
+            if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
+                logger.debug("current_task: job_tasks().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
+            else:
+                raise
+
+_current_job = None
+def current_job(num_retries=5):
+    global _current_job
+    if _current_job:
+        return _current_job
+
+    for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
+        try:
+            job = api('v1').jobs().get(uuid=os.environ['JOB_UUID']).execute()
+            job = UserDict(job)
+            job.tmpdir = os.environ['JOB_WORK']
+            _current_job = job
+            return job
+        except errors.ApiError as error:
+            if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
+                logger.debug("current_job: jobs().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
+            else:
+                raise
+
+def getjobparam(*args):
+    return current_job()['script_parameters'].get(*args)
+
+def get_job_param_mount(*args):
+    return os.path.join(os.environ['TASK_KEEPMOUNT'], current_job()['script_parameters'].get(*args))
+
+def get_task_param_mount(*args):
+    return os.path.join(os.environ['TASK_KEEPMOUNT'], current_task()['parameters'].get(*args))
+
+class JobTask(object):
+    def __init__(self, parameters=dict(), runtime_constraints=dict()):
+        print("init jobtask %s %s" % (parameters, runtime_constraints))
+
+class job_setup(object):
+    @staticmethod
+    def one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=False, api_client=None):
+        if if_sequence != current_task()['sequence']:
+            return
+
+        if not api_client:
+            api_client = api('v1')
+
+        job_input = current_job()['script_parameters']['input']
+        cr = CollectionReader(job_input, api_client=api_client)
+        cr.normalize()
+        for s in cr.all_streams():
+            for f in s.all_files():
+                if input_as_path:
+                    task_input = os.path.join(job_input, s.name(), f.name())
+                else:
+                    task_input = f.as_manifest()
+                new_task_attrs = {
+                    'job_uuid': current_job()['uuid'],
+                    'created_by_job_task_uuid': current_task()['uuid'],
+                    'sequence': if_sequence + 1,
+                    'parameters': {
+                        'input':task_input
+                        }
+                    }
+                api_client.job_tasks().create(body=new_task_attrs).execute()
+        if and_end_task:
+            api_client.job_tasks().update(uuid=current_task()['uuid'],
+                                       body={'success':True}
+                                       ).execute()
+            exit(0)
+
+    @staticmethod
+    def one_task_per_input_stream(if_sequence=0, and_end_task=True):
+        if if_sequence != current_task()['sequence']:
+            return
+        job_input = current_job()['script_parameters']['input']
+        cr = CollectionReader(job_input)
+        for s in cr.all_streams():
+            task_input = s.tokens()
+            new_task_attrs = {
+                'job_uuid': current_job()['uuid'],
+                'created_by_job_task_uuid': current_task()['uuid'],
+                'sequence': if_sequence + 1,
+                'parameters': {
+                    'input':task_input
+                    }
+                }
+            api('v1').job_tasks().create(body=new_task_attrs).execute()
+        if and_end_task:
+            api('v1').job_tasks().update(uuid=current_task()['uuid'],
+                                       body={'success':True}
+                                       ).execute()
+            exit(0)
diff --git a/sdk/python/arvados/_normalize_stream.py b/sdk/python/arvados/_normalize_stream.py
new file mode 100644 (file)
index 0000000..485c757
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from . import config
+
+import re
+
+def escape(path):
+    path = re.sub('\\\\', lambda m: '\\134', path)
+    path = re.sub('[:\000-\040]', lambda m: "\\%03o" % ord(m.group(0)), path)
+    return path
+
+def normalize_stream(stream_name, stream):
+    """Take manifest stream and return a list of tokens in normalized format.
+
+    :stream_name:
+      The name of the stream.
+
+    :stream:
+      A dict mapping each filename to a list of `_range.LocatorAndRange` objects.
+
+    """
+
+    stream_name = escape(stream_name)
+    stream_tokens = [stream_name]
+    sortedfiles = list(stream.keys())
+    sortedfiles.sort()
+
+    blocks = {}
+    streamoffset = 0
+    # Go through each file and add each referenced block exactly once.
+    for streamfile in sortedfiles:
+        for segment in stream[streamfile]:
+            if segment.locator not in blocks:
+                stream_tokens.append(segment.locator)
+                blocks[segment.locator] = streamoffset
+                streamoffset += segment.block_size
+
+    # Add the empty block if the stream is otherwise empty.
+    if len(stream_tokens) == 1:
+        stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
+    for streamfile in sortedfiles:
+        # Add in file segments
+        current_span = None
+        fout = escape(streamfile)
+        for segment in stream[streamfile]:
+            # Collapse adjacent segments
+            streamoffset = blocks[segment.locator] + segment.segment_offset
+            if current_span is None:
+                current_span = [streamoffset, streamoffset + segment.segment_size]
+            else:
+                if streamoffset == current_span[1]:
+                    current_span[1] += segment.segment_size
+                else:
+                    stream_tokens.append(u"{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+                    current_span = [streamoffset, streamoffset + segment.segment_size]
+
+        if current_span is not None:
+            stream_tokens.append(u"{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+
+        if not stream[streamfile]:
+            stream_tokens.append(u"0:0:{0}".format(fout))
+
+    return stream_tokens
diff --git a/sdk/python/arvados/_ranges.py b/sdk/python/arvados/_ranges.py
new file mode 100644 (file)
index 0000000..bb245ab
--- /dev/null
@@ -0,0 +1,227 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+from builtins import object
+import logging
+
+_logger = logging.getLogger('arvados.ranges')
+
+# Log level below 'debug' !
+RANGES_SPAM = 9
+
+class Range(object):
+    __slots__ = ("locator", "range_start", "range_size", "segment_offset")
+
+    def __init__(self, locator, range_start, range_size, segment_offset=0):
+        self.locator = locator
+        self.range_start = range_start
+        self.range_size = range_size
+        self.segment_offset = segment_offset
+
+    def __repr__(self):
+        return "Range(%r, %r, %r, %r)" % (self.locator, self.range_start, self.range_size, self.segment_offset)
+
+    def __eq__(self, other):
+        return (self.locator == other.locator and
+                self.range_start == other.range_start and
+                self.range_size == other.range_size and
+                self.segment_offset == other.segment_offset)
+
+def first_block(data_locators, range_start):
+    block_start = 0
+
+    # range_start/block_start is the inclusive lower bound
+    # range_end/block_end is the exclusive upper bound
+
+    hi = len(data_locators)
+    lo = 0
+    i = (hi + lo) // 2
+    block_size = data_locators[i].range_size
+    block_start = data_locators[i].range_start
+    block_end = block_start + block_size
+
+    # perform a binary search for the first block
+    # assumes that all of the blocks are contiguous, so range_start is guaranteed
+    # to either fall into the range of a block or be outside the block range entirely
+    while not (range_start >= block_start and range_start < block_end):
+        if lo == i:
+            # must be out of range, fail
+            return None
+        if range_start > block_start:
+            lo = i
+        else:
+            hi = i
+        i = (hi + lo) // 2
+        block_size = data_locators[i].range_size
+        block_start = data_locators[i].range_start
+        block_end = block_start + block_size
+
+    return i
+
+class LocatorAndRange(object):
+    __slots__ = ("locator", "block_size", "segment_offset", "segment_size")
+
+    def __init__(self, locator, block_size, segment_offset, segment_size):
+        self.locator = locator
+        self.block_size = block_size
+        self.segment_offset = segment_offset
+        self.segment_size = segment_size
+
+    def __eq__(self, other):
+        return  (self.locator == other.locator and
+                 self.block_size == other.block_size and
+                 self.segment_offset == other.segment_offset and
+                 self.segment_size == other.segment_size)
+
+    def __repr__(self):
+        return "LocatorAndRange(%r, %r, %r, %r)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
+
+def locators_and_ranges(data_locators, range_start, range_size, limit=None):
+    """Get blocks that are covered by a range.
+
+    Returns a list of LocatorAndRange objects.
+
+    :data_locators:
+      list of Range objects, assumes that blocks are in order and contiguous
+
+    :range_start:
+      start of range
+
+    :range_size:
+      size of range
+
+    :limit:
+      Maximum segments to return, default None (unlimited).  Will truncate the
+      result if there are more segments needed to cover the range than the
+      limit.
+
+    """
+    if range_size == 0:
+        return []
+    resp = []
+    range_end = range_start + range_size
+
+    i = first_block(data_locators, range_start)
+    if i is None:
+        return []
+
+    # We should always start at the first segment due to the binary
+    # search.
+    while i < len(data_locators) and len(resp) != limit:
+        dl = data_locators[i]
+        block_start = dl.range_start
+        block_size = dl.range_size
+        block_end = block_start + block_size
+        _logger.log(RANGES_SPAM,
+            "L&R %s range_start %s block_start %s range_end %s block_end %s",
+            dl.locator, range_start, block_start, range_end, block_end)
+        if range_end <= block_start:
+            # range ends before this block starts, so don't look at any more locators
+            break
+
+        if range_start >= block_start and range_end <= block_end:
+            # range starts and ends in this block
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), range_size))
+        elif range_start >= block_start and range_end > block_end:
+            # range starts in this block
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), block_end - range_start))
+        elif range_start < block_start and range_end > block_end:
+            # range starts in a previous block and extends to further blocks
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, block_size))
+        elif range_start < block_start and range_end <= block_end:
+            # range starts in a previous block and ends in this block
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, range_end - block_start))
+        block_start = block_end
+        i += 1
+    return resp
+
+def replace_range(data_locators, new_range_start, new_range_size, new_locator, new_segment_offset):
+    """
+    Replace a file segment range with a new segment.
+
+    NOTE::
+    data_locators will be updated in place
+
+    :data_locators:
+      list of Range objects, assumes that segments are in order and contiguous
+
+    :new_range_start:
+      start of range to replace in data_locators
+
+    :new_range_size:
+      size of range to replace in data_locators
+
+    :new_locator:
+      locator for new segment to be inserted
+
+    :new_segment_offset:
+      segment offset within the locator
+
+    """
+    if new_range_size == 0:
+        return
+
+    new_range_end = new_range_start + new_range_size
+
+    if len(data_locators) == 0:
+        data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+        return
+
+    last = data_locators[-1]
+    if (last.range_start+last.range_size) == new_range_start:
+        if last.locator == new_locator and (last.segment_offset+last.range_size) == new_segment_offset:
+            # extend last segment
+            last.range_size += new_range_size
+        else:
+            data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+        return
+
+    i = first_block(data_locators, new_range_start)
+    if i is None:
+        return
+
+    # We should always start at the first segment due to the binary
+    # search.
+    while i < len(data_locators):
+        dl = data_locators[i]
+        old_segment_start = dl.range_start
+        old_segment_end = old_segment_start + dl.range_size
+        _logger.log(RANGES_SPAM,
+            "RR %s range_start %s segment_start %s range_end %s segment_end %s",
+            dl, new_range_start, old_segment_start, new_range_end,
+            old_segment_end)
+        if new_range_end <= old_segment_start:
+            # range ends before this segment starts, so don't look at any more locators
+            break
+
+        if old_segment_start <= new_range_start and new_range_end <= old_segment_end:
+            # new range starts and ends in old segment
+            # split segment into up to 3 pieces
+            if (new_range_start-old_segment_start) > 0:
+                data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)
+                data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+            else:
+                data_locators[i] = Range(new_locator, new_range_start, new_range_size, new_segment_offset)
+                i -= 1
+            if (old_segment_end-new_range_end) > 0:
+                data_locators.insert(i+2, Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_start-old_segment_start) + new_range_size))
+            return
+        elif old_segment_start <= new_range_start and new_range_end > old_segment_end:
+            # range starts in this segment
+            # split segment into 2 pieces
+            data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)
+            data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+            i += 1
+        elif new_range_start < old_segment_start and new_range_end >= old_segment_end:
+            # range starts in a previous segment and extends to further segments
+            # delete this segment
+            del data_locators[i]
+            i -= 1
+        elif new_range_start < old_segment_start and new_range_end < old_segment_end:
+            # range starts in a previous segment and ends in this segment
+            # move the starting point of this segment up, and shrink it.
+            data_locators[i] = Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_end-old_segment_start))
+            return
+        i += 1
diff --git a/sdk/python/arvados/api.py b/sdk/python/arvados/api.py
new file mode 100644 (file)
index 0000000..b18ce25
--- /dev/null
@@ -0,0 +1,277 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import range
+import collections
+import http.client
+import httplib2
+import json
+import logging
+import os
+import re
+import socket
+import time
+import types
+
+import apiclient
+from apiclient import discovery as apiclient_discovery
+from apiclient import errors as apiclient_errors
+from . import config
+from . import errors
+from . import util
+from . import cache
+
+_logger = logging.getLogger('arvados.api')
+
+MAX_IDLE_CONNECTION_DURATION = 30
+RETRY_DELAY_INITIAL = 2
+RETRY_DELAY_BACKOFF = 2
+RETRY_COUNT = 2
+
+class OrderedJsonModel(apiclient.model.JsonModel):
+    """Model class for JSON that preserves the contents' order.
+
+    API clients that care about preserving the order of fields in API
+    server responses can use this model to do so, like this::
+
+        from arvados.api import OrderedJsonModel
+        client = arvados.api('v1', ..., model=OrderedJsonModel())
+    """
+
+    def deserialize(self, content):
+        # This is a very slightly modified version of the parent class'
+        # implementation.  Copyright (c) 2010 Google.
+        content = content.decode('utf-8')
+        body = json.loads(content, object_pairs_hook=collections.OrderedDict)
+        if self._data_wrapper and isinstance(body, dict) and 'data' in body:
+            body = body['data']
+        return body
+
+
+def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
+    if (self.max_request_size and
+        kwargs.get('body') and
+        self.max_request_size < len(kwargs['body'])):
+        raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
+
+    if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
+        headers['X-External-Client'] = '1'
+
+    headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
+    if not headers.get('X-Request-Id'):
+        headers['X-Request-Id'] = self._request_id()
+
+    retryable = method in [
+        'DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT']
+    retry_count = self._retry_count if retryable else 0
+
+    if (not retryable and
+        time.time() - self._last_request_time > self._max_keepalive_idle):
+        # High probability of failure due to connection atrophy. Make
+        # sure this request [re]opens a new connection by closing and
+        # forgetting all cached connections first.
+        for conn in self.connections.values():
+            conn.close()
+        self.connections.clear()
+
+    delay = self._retry_delay_initial
+    for _ in range(retry_count):
+        self._last_request_time = time.time()
+        try:
+            return self.orig_http_request(uri, method, headers=headers, **kwargs)
+        except http.client.HTTPException:
+            _logger.debug("Retrying API request in %d s after HTTP error",
+                          delay, exc_info=True)
+        except socket.error:
+            # This is the one case where httplib2 doesn't close the
+            # underlying connection first.  Close all open
+            # connections, expecting this object only has the one
+            # connection to the API server.  This is safe because
+            # httplib2 reopens connections when needed.
+            _logger.debug("Retrying API request in %d s after socket error",
+                          delay, exc_info=True)
+            for conn in self.connections.values():
+                conn.close()
+        except httplib2.SSLHandshakeError as e:
+            # Intercept and re-raise with a better error message.
+            raise httplib2.SSLHandshakeError("Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e))
+
+        time.sleep(delay)
+        delay = delay * self._retry_delay_backoff
+
+    self._last_request_time = time.time()
+    return self.orig_http_request(uri, method, headers=headers, **kwargs)
+
+def _patch_http_request(http, api_token):
+    http.arvados_api_token = api_token
+    http.max_request_size = 0
+    http.orig_http_request = http.request
+    http.request = types.MethodType(_intercept_http_request, http)
+    http._last_request_time = 0
+    http._max_keepalive_idle = MAX_IDLE_CONNECTION_DURATION
+    http._retry_delay_initial = RETRY_DELAY_INITIAL
+    http._retry_delay_backoff = RETRY_DELAY_BACKOFF
+    http._retry_count = RETRY_COUNT
+    http._request_id = util.new_request_id
+    return http
+
+# Monkey patch discovery._cast() so objects and arrays get serialized
+# with json.dumps() instead of str().
+_cast_orig = apiclient_discovery._cast
+def _cast_objects_too(value, schema_type):
+    global _cast_orig
+    if (type(value) != type('') and
+        type(value) != type(b'') and
+        (schema_type == 'object' or schema_type == 'array')):
+        return json.dumps(value)
+    else:
+        return _cast_orig(value, schema_type)
+apiclient_discovery._cast = _cast_objects_too
+
+# Convert apiclient's HttpErrors into our own API error subclass for better
+# error reporting.
+# Reassigning apiclient_errors.HttpError is not sufficient because most of the
+# apiclient submodules import the class into their own namespace.
+def _new_http_error(cls, *args, **kwargs):
+    return super(apiclient_errors.HttpError, cls).__new__(
+        errors.ApiError, *args, **kwargs)
+apiclient_errors.HttpError.__new__ = staticmethod(_new_http_error)
+
+def http_cache(data_type):
+    homedir = os.environ.get('HOME')
+    if not homedir or len(homedir) == 0:
+        return None
+    path = homedir + '/.cache/arvados/' + data_type
+    try:
+        util.mkdir_dash_p(path)
+    except OSError:
+        return None
+    return cache.SafeHTTPCache(path, max_age=60*60*24*2)
+
+def api(version=None, cache=True, host=None, token=None, insecure=False,
+        request_id=None, timeout=5*60, **kwargs):
+    """Return an apiclient Resources object for an Arvados instance.
+
+    :version:
+      A string naming the version of the Arvados API to use (for
+      example, 'v1').
+
+    :cache:
+      Use a cache (~/.cache/arvados/discovery) for the discovery
+      document.
+
+    :host:
+      The Arvados API server host (and optional :port) to connect to.
+
+    :token:
+      The authentication token to send with each API call.
+
+    :insecure:
+      If True, ignore SSL certificate validation errors.
+
+    :timeout:
+      A timeout value for http requests.
+
+    :request_id:
+      Default X-Request-Id header value for outgoing requests that
+      don't already provide one. If None or omitted, generate a random
+      ID. When retrying failed requests, the same ID is used on all
+      attempts.
+
+    Additional keyword arguments will be passed directly to
+    `apiclient_discovery.build` if a new Resource object is created.
+    If the `discoveryServiceUrl` or `http` keyword arguments are
+    missing, this function will set default values for them, based on
+    the current Arvados configuration settings.
+
+    """
+
+    if not version:
+        version = 'v1'
+        _logger.info("Using default API version. " +
+                     "Call arvados.api('%s') instead." %
+                     version)
+    if 'discoveryServiceUrl' in kwargs:
+        if host:
+            raise ValueError("both discoveryServiceUrl and host provided")
+        # Here we can't use a token from environment, config file,
+        # etc. Those probably have nothing to do with the host
+        # provided by the caller.
+        if not token:
+            raise ValueError("discoveryServiceUrl provided, but token missing")
+    elif host and token:
+        pass
+    elif not host and not token:
+        return api_from_config(
+            version=version, cache=cache, request_id=request_id, **kwargs)
+    else:
+        # Caller provided one but not the other
+        if not host:
+            raise ValueError("token argument provided, but host missing.")
+        else:
+            raise ValueError("host argument provided, but token missing.")
+
+    if host:
+        # Caller wants us to build the discoveryServiceUrl
+        kwargs['discoveryServiceUrl'] = (
+            'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,))
+
+    if 'http' not in kwargs:
+        http_kwargs = {'ca_certs': util.ca_certs_path()}
+        if cache:
+            http_kwargs['cache'] = http_cache('discovery')
+        if insecure:
+            http_kwargs['disable_ssl_certificate_validation'] = True
+        kwargs['http'] = httplib2.Http(**http_kwargs)
+
+    if kwargs['http'].timeout is None:
+        kwargs['http'].timeout = timeout
+
+    kwargs['http'] = _patch_http_request(kwargs['http'], token)
+
+    svc = apiclient_discovery.build('arvados', version, cache_discovery=False, **kwargs)
+    svc.api_token = token
+    svc.insecure = insecure
+    svc.request_id = request_id
+    kwargs['http'].max_request_size = svc._rootDesc.get('maxRequestSize', 0)
+    kwargs['http'].cache = None
+    kwargs['http']._request_id = lambda: svc.request_id or util.new_request_id()
+    return svc
+
+def api_from_config(version=None, apiconfig=None, **kwargs):
+    """Return an apiclient Resources object enabling access to an Arvados server
+    instance.
+
+    :version:
+      A string naming the version of the Arvados REST API to use (for
+      example, 'v1').
+
+    :apiconfig:
+      If provided, this should be a dict-like object (must support the get()
+      method) with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and
+      optionally ARVADOS_API_HOST_INSECURE.  If not provided, use
+      arvados.config (which gets these parameters from the environment by
+      default.)
+
+    Other keyword arguments such as `cache` will be passed along `api()`
+
+    """
+    # Load from user configuration or environment
+    if apiconfig is None:
+        apiconfig = config.settings()
+
+    errors = []
+    for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
+        if x not in apiconfig:
+            errors.append(x)
+    if errors:
+        raise ValueError(" and ".join(errors)+" not set.\nPlease set in %s or export environment variable." % config.default_config_file)
+    host = apiconfig.get('ARVADOS_API_HOST')
+    token = apiconfig.get('ARVADOS_API_TOKEN')
+    insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)
+
+    return api(version=version, host=host, token=token, insecure=insecure, **kwargs)
diff --git a/sdk/python/arvados/arvfile.py b/sdk/python/arvados/arvfile.py
new file mode 100644 (file)
index 0000000..37666eb
--- /dev/null
@@ -0,0 +1,1390 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+from future.utils import listitems, listvalues
+standard_library.install_aliases()
+from builtins import range
+from builtins import object
+import bz2
+import collections
+import copy
+import errno
+import functools
+import hashlib
+import logging
+import os
+import queue
+import re
+import sys
+import threading
+import uuid
+import zlib
+
+from . import config
+from .errors import KeepWriteError, AssertionError, ArgumentError
+from .keep import KeepLocator
+from ._normalize_stream import normalize_stream
+from ._ranges import locators_and_ranges, replace_range, Range, LocatorAndRange
+from .retry import retry_method
+
+MOD = "mod"
+WRITE = "write"
+
+_logger = logging.getLogger('arvados.arvfile')
+
+def split(path):
+    """split(path) -> streamname, filename
+
+    Separate the stream name and file name in a /-separated stream path and
+    return a tuple (stream_name, file_name).  If no stream name is available,
+    assume '.'.
+
+    """
+    try:
+        stream_name, file_name = path.rsplit('/', 1)
+    except ValueError:  # No / in string
+        stream_name, file_name = '.', path
+    return stream_name, file_name
+
+
+class UnownedBlockError(Exception):
+    """Raised when there's an writable block without an owner on the BlockManager."""
+    pass
+
+
+class _FileLikeObjectBase(object):
+    def __init__(self, name, mode):
+        self.name = name
+        self.mode = mode
+        self.closed = False
+
+    @staticmethod
+    def _before_close(orig_func):
+        @functools.wraps(orig_func)
+        def before_close_wrapper(self, *args, **kwargs):
+            if self.closed:
+                raise ValueError("I/O operation on closed stream file")
+            return orig_func(self, *args, **kwargs)
+        return before_close_wrapper
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        try:
+            self.close()
+        except Exception:
+            if exc_type is None:
+                raise
+
+    def close(self):
+        self.closed = True
+
+
+class ArvadosFileReaderBase(_FileLikeObjectBase):
+    def __init__(self, name, mode, num_retries=None):
+        super(ArvadosFileReaderBase, self).__init__(name, mode)
+        self._filepos = 0
+        self.num_retries = num_retries
+        self._readline_cache = (None, None)
+
+    def __iter__(self):
+        while True:
+            data = self.readline()
+            if not data:
+                break
+            yield data
+
+    def decompressed_name(self):
+        return re.sub('\.(bz2|gz)$', '', self.name)
+
+    @_FileLikeObjectBase._before_close
+    def seek(self, pos, whence=os.SEEK_SET):
+        if whence == os.SEEK_CUR:
+            pos += self._filepos
+        elif whence == os.SEEK_END:
+            pos += self.size()
+        if pos < 0:
+            raise IOError(errno.EINVAL, "Tried to seek to negative file offset.")
+        self._filepos = pos
+        return self._filepos
+
+    def tell(self):
+        return self._filepos
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return False
+
+    def seekable(self):
+        return True
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def readall(self, size=2**20, num_retries=None):
+        while True:
+            data = self.read(size, num_retries=num_retries)
+            if len(data) == 0:
+                break
+            yield data
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def readline(self, size=float('inf'), num_retries=None):
+        cache_pos, cache_data = self._readline_cache
+        if self.tell() == cache_pos:
+            data = [cache_data]
+            self._filepos += len(cache_data)
+        else:
+            data = [b'']
+        data_size = len(data[-1])
+        while (data_size < size) and (b'\n' not in data[-1]):
+            next_read = self.read(2 ** 20, num_retries=num_retries)
+            if not next_read:
+                break
+            data.append(next_read)
+            data_size += len(next_read)
+        data = b''.join(data)
+        try:
+            nextline_index = data.index(b'\n') + 1
+        except ValueError:
+            nextline_index = len(data)
+        nextline_index = min(nextline_index, size)
+        self._filepos -= len(data) - nextline_index
+        self._readline_cache = (self.tell(), data[nextline_index:])
+        return data[:nextline_index].decode()
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def decompress(self, decompress, size, num_retries=None):
+        for segment in self.readall(size, num_retries=num_retries):
+            data = decompress(segment)
+            if data:
+                yield data
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def readall_decompressed(self, size=2**20, num_retries=None):
+        self.seek(0)
+        if self.name.endswith('.bz2'):
+            dc = bz2.BZ2Decompressor()
+            return self.decompress(dc.decompress, size,
+                                   num_retries=num_retries)
+        elif self.name.endswith('.gz'):
+            dc = zlib.decompressobj(16+zlib.MAX_WBITS)
+            return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
+                                   size, num_retries=num_retries)
+        else:
+            return self.readall(size, num_retries=num_retries)
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def readlines(self, sizehint=float('inf'), num_retries=None):
+        data = []
+        data_size = 0
+        for s in self.readall(num_retries=num_retries):
+            data.append(s)
+            data_size += len(s)
+            if data_size >= sizehint:
+                break
+        return b''.join(data).decode().splitlines(True)
+
+    def size(self):
+        raise IOError(errno.ENOSYS, "Not implemented")
+
+    def read(self, size, num_retries=None):
+        raise IOError(errno.ENOSYS, "Not implemented")
+
+    def readfrom(self, start, size, num_retries=None):
+        raise IOError(errno.ENOSYS, "Not implemented")
+
+
+class StreamFileReader(ArvadosFileReaderBase):
+    class _NameAttribute(str):
+        # The Python file API provides a plain .name attribute.
+        # Older SDK provided a name() method.
+        # This class provides both, for maximum compatibility.
+        def __call__(self):
+            return self
+
+    def __init__(self, stream, segments, name):
+        super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb', num_retries=stream.num_retries)
+        self._stream = stream
+        self.segments = segments
+
+    def stream_name(self):
+        return self._stream.name()
+
+    def size(self):
+        n = self.segments[-1]
+        return n.range_start + n.range_size
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def read(self, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at the current file position"""
+        if size == 0:
+            return b''
+
+        data = b''
+        available_chunks = locators_and_ranges(self.segments, self._filepos, size)
+        if available_chunks:
+            lr = available_chunks[0]
+            data = self._stream.readfrom(lr.locator+lr.segment_offset,
+                                         lr.segment_size,
+                                         num_retries=num_retries)
+
+        self._filepos += len(data)
+        return data
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def readfrom(self, start, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at 'start'"""
+        if size == 0:
+            return b''
+
+        data = []
+        for lr in locators_and_ranges(self.segments, start, size):
+            data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
+                                              num_retries=num_retries))
+        return b''.join(data)
+
+    def as_manifest(self):
+        segs = []
+        for r in self.segments:
+            segs.extend(self._stream.locators_and_ranges(r.locator, r.range_size))
+        return " ".join(normalize_stream(".", {self.name: segs})) + "\n"
+
+
+def synchronized(orig_func):
+    @functools.wraps(orig_func)
+    def synchronized_wrapper(self, *args, **kwargs):
+        with self.lock:
+            return orig_func(self, *args, **kwargs)
+    return synchronized_wrapper
+
+
+class StateChangeError(Exception):
+    def __init__(self, message, state, nextstate):
+        super(StateChangeError, self).__init__(message)
+        self.state = state
+        self.nextstate = nextstate
+
+class _BufferBlock(object):
+    """A stand-in for a Keep block that is in the process of being written.
+
+    Writers can append to it, get the size, and compute the Keep locator.
+    There are three valid states:
+
+    WRITABLE
+      Can append to block.
+
+    PENDING
+      Block is in the process of being uploaded to Keep, append is an error.
+
+    COMMITTED
+      The block has been written to Keep, its internal buffer has been
+      released, fetching the block will fetch it via keep client (since we
+      discarded the internal copy), and identifiers referring to the BufferBlock
+      can be replaced with the block locator.
+
+    """
+
+    WRITABLE = 0
+    PENDING = 1
+    COMMITTED = 2
+    ERROR = 3
+    DELETED = 4
+
+    def __init__(self, blockid, starting_capacity, owner):
+        """
+        :blockid:
+          the identifier for this block
+
+        :starting_capacity:
+          the initial buffer capacity
+
+        :owner:
+          ArvadosFile that owns this block
+
+        """
+        self.blockid = blockid
+        self.buffer_block = bytearray(starting_capacity)
+        self.buffer_view = memoryview(self.buffer_block)
+        self.write_pointer = 0
+        self._state = _BufferBlock.WRITABLE
+        self._locator = None
+        self.owner = owner
+        self.lock = threading.Lock()
+        self.wait_for_commit = threading.Event()
+        self.error = None
+
+    @synchronized
+    def append(self, data):
+        """Append some data to the buffer.
+
+        Only valid if the block is in WRITABLE state.  Implements an expanding
+        buffer, doubling capacity as needed to accomdate all the data.
+
+        """
+        if self._state == _BufferBlock.WRITABLE:
+            if not isinstance(data, bytes) and not isinstance(data, memoryview):
+                data = data.encode()
+            while (self.write_pointer+len(data)) > len(self.buffer_block):
+                new_buffer_block = bytearray(len(self.buffer_block) * 2)
+                new_buffer_block[0:self.write_pointer] = self.buffer_block[0:self.write_pointer]
+                self.buffer_block = new_buffer_block
+                self.buffer_view = memoryview(self.buffer_block)
+            self.buffer_view[self.write_pointer:self.write_pointer+len(data)] = data
+            self.write_pointer += len(data)
+            self._locator = None
+        else:
+            raise AssertionError("Buffer block is not writable")
+
+    STATE_TRANSITIONS = frozenset([
+            (WRITABLE, PENDING),
+            (PENDING, COMMITTED),
+            (PENDING, ERROR),
+            (ERROR, PENDING)])
+
+    @synchronized
+    def set_state(self, nextstate, val=None):
+        if (self._state, nextstate) not in self.STATE_TRANSITIONS:
+            raise StateChangeError("Invalid state change from %s to %s" % (self._state, nextstate), self._state, nextstate)
+        self._state = nextstate
+
+        if self._state == _BufferBlock.PENDING:
+            self.wait_for_commit.clear()
+
+        if self._state == _BufferBlock.COMMITTED:
+            self._locator = val
+            self.buffer_view = None
+            self.buffer_block = None
+            self.wait_for_commit.set()
+
+        if self._state == _BufferBlock.ERROR:
+            self.error = val
+            self.wait_for_commit.set()
+
+    @synchronized
+    def state(self):
+        return self._state
+
+    def size(self):
+        """The amount of data written to the buffer."""
+        return self.write_pointer
+
+    @synchronized
+    def locator(self):
+        """The Keep locator for this buffer's contents."""
+        if self._locator is None:
+            self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
+        return self._locator
+
+    @synchronized
+    def clone(self, new_blockid, owner):
+        if self._state == _BufferBlock.COMMITTED:
+            raise AssertionError("Cannot duplicate committed buffer block")
+        bufferblock = _BufferBlock(new_blockid, self.size(), owner)
+        bufferblock.append(self.buffer_view[0:self.size()])
+        return bufferblock
+
+    @synchronized
+    def clear(self):
+        self._state = _BufferBlock.DELETED
+        self.owner = None
+        self.buffer_block = None
+        self.buffer_view = None
+
+    @synchronized
+    def repack_writes(self):
+        """Optimize buffer block by repacking segments in file sequence.
+
+        When the client makes random writes, they appear in the buffer block in
+        the sequence they were written rather than the sequence they appear in
+        the file.  This makes for inefficient, fragmented manifests.  Attempt
+        to optimize by repacking writes in file sequence.
+
+        """
+        if self._state != _BufferBlock.WRITABLE:
+            raise AssertionError("Cannot repack non-writable block")
+
+        segs = self.owner.segments()
+
+        # Collect the segments that reference the buffer block.
+        bufferblock_segs = [s for s in segs if s.locator == self.blockid]
+
+        # Collect total data referenced by segments (could be smaller than
+        # bufferblock size if a portion of the file was written and
+        # then overwritten).
+        write_total = sum([s.range_size for s in bufferblock_segs])
+
+        if write_total < self.size() or len(bufferblock_segs) > 1:
+            # If there's more than one segment referencing this block, it is
+            # due to out-of-order writes and will produce a fragmented
+            # manifest, so try to optimize by re-packing into a new buffer.
+            contents = self.buffer_view[0:self.write_pointer].tobytes()
+            new_bb = _BufferBlock(None, write_total, None)
+            for t in bufferblock_segs:
+                new_bb.append(contents[t.segment_offset:t.segment_offset+t.range_size])
+                t.segment_offset = new_bb.size() - t.range_size
+
+            self.buffer_block = new_bb.buffer_block
+            self.buffer_view = new_bb.buffer_view
+            self.write_pointer = new_bb.write_pointer
+            self._locator = None
+            new_bb.clear()
+            self.owner.set_segments(segs)
+
+    def __repr__(self):
+        return "<BufferBlock %s>" % (self.blockid)
+
+
+class NoopLock(object):
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        pass
+
+    def acquire(self, blocking=False):
+        pass
+
+    def release(self):
+        pass
+
+
+def must_be_writable(orig_func):
+    @functools.wraps(orig_func)
+    def must_be_writable_wrapper(self, *args, **kwargs):
+        if not self.writable():
+            raise IOError(errno.EROFS, "Collection is read-only.")
+        return orig_func(self, *args, **kwargs)
+    return must_be_writable_wrapper
+
+
+class _BlockManager(object):
+    """BlockManager handles buffer blocks.
+
+    Also handles background block uploads, and background block prefetch for a
+    Collection of ArvadosFiles.
+
+    """
+
+    DEFAULT_PUT_THREADS = 2
+    DEFAULT_GET_THREADS = 2
+
+    def __init__(self, keep, copies=None, put_threads=None):
+        """keep: KeepClient object to use"""
+        self._keep = keep
+        self._bufferblocks = collections.OrderedDict()
+        self._put_queue = None
+        self._put_threads = None
+        self._prefetch_queue = None
+        self._prefetch_threads = None
+        self.lock = threading.Lock()
+        self.prefetch_enabled = True
+        if put_threads:
+            self.num_put_threads = put_threads
+        else:
+            self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
+        self.num_get_threads = _BlockManager.DEFAULT_GET_THREADS
+        self.copies = copies
+        self._pending_write_size = 0
+        self.threads_lock = threading.Lock()
+        self.padding_block = None
+
+    @synchronized
+    def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
+        """Allocate a new, empty bufferblock in WRITABLE state and return it.
+
+        :blockid:
+          optional block identifier, otherwise one will be automatically assigned
+
+        :starting_capacity:
+          optional capacity, otherwise will use default capacity
+
+        :owner:
+          ArvadosFile that owns this block
+
+        """
+        return self._alloc_bufferblock(blockid, starting_capacity, owner)
+
+    def _alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
+        if blockid is None:
+            blockid = str(uuid.uuid4())
+        bufferblock = _BufferBlock(blockid, starting_capacity=starting_capacity, owner=owner)
+        self._bufferblocks[bufferblock.blockid] = bufferblock
+        return bufferblock
+
+    @synchronized
+    def dup_block(self, block, owner):
+        """Create a new bufferblock initialized with the content of an existing bufferblock.
+
+        :block:
+          the buffer block to copy.
+
+        :owner:
+          ArvadosFile that owns the new block
+
+        """
+        new_blockid = str(uuid.uuid4())
+        bufferblock = block.clone(new_blockid, owner)
+        self._bufferblocks[bufferblock.blockid] = bufferblock
+        return bufferblock
+
+    @synchronized
+    def is_bufferblock(self, locator):
+        return locator in self._bufferblocks
+
+    def _commit_bufferblock_worker(self):
+        """Background uploader thread."""
+
+        while True:
+            try:
+                bufferblock = self._put_queue.get()
+                if bufferblock is None:
+                    return
+
+                if self.copies is None:
+                    loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+                else:
+                    loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), copies=self.copies)
+                bufferblock.set_state(_BufferBlock.COMMITTED, loc)
+            except Exception as e:
+                bufferblock.set_state(_BufferBlock.ERROR, e)
+            finally:
+                if self._put_queue is not None:
+                    self._put_queue.task_done()
+
+    def start_put_threads(self):
+        with self.threads_lock:
+            if self._put_threads is None:
+                # Start uploader threads.
+
+                # If we don't limit the Queue size, the upload queue can quickly
+                # grow to take up gigabytes of RAM if the writing process is
+                # generating data more quickly than it can be send to the Keep
+                # servers.
+                #
+                # With two upload threads and a queue size of 2, this means up to 4
+                # blocks pending.  If they are full 64 MiB blocks, that means up to
+                # 256 MiB of internal buffering, which is the same size as the
+                # default download block cache in KeepClient.
+                self._put_queue = queue.Queue(maxsize=2)
+
+                self._put_threads = []
+                for i in range(0, self.num_put_threads):
+                    thread = threading.Thread(target=self._commit_bufferblock_worker)
+                    self._put_threads.append(thread)
+                    thread.daemon = True
+                    thread.start()
+
+    def _block_prefetch_worker(self):
+        """The background downloader thread."""
+        while True:
+            try:
+                b = self._prefetch_queue.get()
+                if b is None:
+                    return
+                self._keep.get(b)
+            except Exception:
+                _logger.exception("Exception doing block prefetch")
+
+    @synchronized
+    def start_get_threads(self):
+        if self._prefetch_threads is None:
+            self._prefetch_queue = queue.Queue()
+            self._prefetch_threads = []
+            for i in range(0, self.num_get_threads):
+                thread = threading.Thread(target=self._block_prefetch_worker)
+                self._prefetch_threads.append(thread)
+                thread.daemon = True
+                thread.start()
+
+
+    @synchronized
+    def stop_threads(self):
+        """Shut down and wait for background upload and download threads to finish."""
+
+        if self._put_threads is not None:
+            for t in self._put_threads:
+                self._put_queue.put(None)
+            for t in self._put_threads:
+                t.join()
+        self._put_threads = None
+        self._put_queue = None
+
+        if self._prefetch_threads is not None:
+            for t in self._prefetch_threads:
+                self._prefetch_queue.put(None)
+            for t in self._prefetch_threads:
+                t.join()
+        self._prefetch_threads = None
+        self._prefetch_queue = None
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.stop_threads()
+
+    @synchronized
+    def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):
+        """Packs small blocks together before uploading"""
+
+        self._pending_write_size += closed_file_size
+
+        # Check if there are enough small blocks for filling up one in full
+        if not (force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE)):
+            return
+
+        # Search blocks ready for getting packed together before being
+        # committed to Keep.
+        # A WRITABLE block always has an owner.
+        # A WRITABLE block with its owner.closed() implies that its
+        # size is <= KEEP_BLOCK_SIZE/2.
+        try:
+            small_blocks = [b for b in listvalues(self._bufferblocks)
+                            if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
+        except AttributeError:
+            # Writable blocks without owner shouldn't exist.
+            raise UnownedBlockError()
+
+        if len(small_blocks) <= 1:
+            # Not enough small blocks for repacking
+            return
+
+        for bb in small_blocks:
+            bb.repack_writes()
+
+        # Update the pending write size count with its true value, just in case
+        # some small file was opened, written and closed several times.
+        self._pending_write_size = sum([b.size() for b in small_blocks])
+
+        if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:
+            return
+
+        new_bb = self._alloc_bufferblock()
+        new_bb.owner = []
+        files = []
+        while len(small_blocks) > 0 and (new_bb.write_pointer + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
+            bb = small_blocks.pop(0)
+            new_bb.owner.append(bb.owner)
+            self._pending_write_size -= bb.size()
+            new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
+            files.append((bb, new_bb.write_pointer - bb.size()))
+
+        self.commit_bufferblock(new_bb, sync=sync)
+
+        for bb, new_bb_segment_offset in files:
+            newsegs = bb.owner.segments()
+            for s in newsegs:
+                if s.locator == bb.blockid:
+                    s.locator = new_bb.blockid
+                    s.segment_offset = new_bb_segment_offset+s.segment_offset
+            bb.owner.set_segments(newsegs)
+            self._delete_bufferblock(bb.blockid)
+
+    def commit_bufferblock(self, block, sync):
+        """Initiate a background upload of a bufferblock.
+
+        :block:
+          The block object to upload
+
+        :sync:
+          If `sync` is True, upload the block synchronously.
+          If `sync` is False, upload the block asynchronously.  This will
+          return immediately unless the upload queue is at capacity, in
+          which case it will wait on an upload queue slot.
+
+        """
+        try:
+            # Mark the block as PENDING so to disallow any more appends.
+            block.set_state(_BufferBlock.PENDING)
+        except StateChangeError as e:
+            if e.state == _BufferBlock.PENDING:
+                if sync:
+                    block.wait_for_commit.wait()
+                else:
+                    return
+            if block.state() == _BufferBlock.COMMITTED:
+                return
+            elif block.state() == _BufferBlock.ERROR:
+                raise block.error
+            else:
+                raise
+
+        if sync:
+            try:
+                if self.copies is None:
+                    loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes())
+                else:
+                    loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), copies=self.copies)
+                block.set_state(_BufferBlock.COMMITTED, loc)
+            except Exception as e:
+                block.set_state(_BufferBlock.ERROR, e)
+                raise
+        else:
+            self.start_put_threads()
+            self._put_queue.put(block)
+
+    @synchronized
+    def get_bufferblock(self, locator):
+        return self._bufferblocks.get(locator)
+
+    @synchronized
+    def get_padding_block(self):
+        """Get a bufferblock 64 MB in size consisting of all zeros, used as padding
+        when using truncate() to extend the size of a file.
+
+        For reference (and possible future optimization), the md5sum of the
+        padding block is: 7f614da9329cd3aebf59b91aadc30bf0+67108864
+
+        """
+
+        if self.padding_block is None:
+            self.padding_block = self._alloc_bufferblock(starting_capacity=config.KEEP_BLOCK_SIZE)
+            self.padding_block.write_pointer = config.KEEP_BLOCK_SIZE
+            self.commit_bufferblock(self.padding_block, False)
+        return self.padding_block
+
+    @synchronized
+    def delete_bufferblock(self, locator):
+        self._delete_bufferblock(locator)
+
+    def _delete_bufferblock(self, locator):
+        bb = self._bufferblocks[locator]
+        bb.clear()
+        del self._bufferblocks[locator]
+
+    def get_block_contents(self, locator, num_retries, cache_only=False):
+        """Fetch a block.
+
+        First checks to see if the locator is a BufferBlock and return that, if
+        not, passes the request through to KeepClient.get().
+
+        """
+        with self.lock:
+            if locator in self._bufferblocks:
+                bufferblock = self._bufferblocks[locator]
+                if bufferblock.state() != _BufferBlock.COMMITTED:
+                    return bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes()
+                else:
+                    locator = bufferblock._locator
+        if cache_only:
+            return self._keep.get_from_cache(locator)
+        else:
+            return self._keep.get(locator, num_retries=num_retries)
+
+    def commit_all(self):
+        """Commit all outstanding buffer blocks.
+
+        This is a synchronous call, and will not return until all buffer blocks
+        are uploaded.  Raises KeepWriteError() if any blocks failed to upload.
+
+        """
+        self.repack_small_blocks(force=True, sync=True)
+
+        with self.lock:
+            items = listitems(self._bufferblocks)
+
+        for k,v in items:
+            if v.state() != _BufferBlock.COMMITTED and v.owner:
+                # Ignore blocks with a list of owners, as if they're not in COMMITTED
+                # state, they're already being committed asynchronously.
+                if isinstance(v.owner, ArvadosFile):
+                    v.owner.flush(sync=False)
+
+        with self.lock:
+            if self._put_queue is not None:
+                self._put_queue.join()
+
+                err = []
+                for k,v in items:
+                    if v.state() == _BufferBlock.ERROR:
+                        err.append((v.locator(), v.error))
+                if err:
+                    raise KeepWriteError("Error writing some blocks", err, label="block")
+
+        for k,v in items:
+            # flush again with sync=True to remove committed bufferblocks from
+            # the segments.
+            if v.owner:
+                if isinstance(v.owner, ArvadosFile):
+                    v.owner.flush(sync=True)
+                elif isinstance(v.owner, list) and len(v.owner) > 0:
+                    # This bufferblock is referenced by many files as a result
+                    # of repacking small blocks, so don't delete it when flushing
+                    # its owners, just do it after flushing them all.
+                    for owner in v.owner:
+                        owner.flush(sync=True)
+                    self.delete_bufferblock(k)
+
+    def block_prefetch(self, locator):
+        """Initiate a background download of a block.
+
+        This assumes that the underlying KeepClient implements a block cache,
+        so repeated requests for the same block will not result in repeated
+        downloads (unless the block is evicted from the cache.)  This method
+        does not block.
+
+        """
+
+        if not self.prefetch_enabled:
+            return
+
+        if self._keep.get_from_cache(locator) is not None:
+            return
+
+        with self.lock:
+            if locator in self._bufferblocks:
+                return
+
+        self.start_get_threads()
+        self._prefetch_queue.put(locator)
+
+
+class ArvadosFile(object):
+    """Represent a file in a Collection.
+
+    ArvadosFile manages the underlying representation of a file in Keep as a
+    sequence of segments spanning a set of blocks, and implements random
+    read/write access.
+
+    This object may be accessed from multiple threads.
+
+    """
+
+    __slots__ = ('parent', 'name', '_writers', '_committed',
+                 '_segments', 'lock', '_current_bblock', 'fuse_entry')
+
+    def __init__(self, parent, name, stream=[], segments=[]):
+        """
+        ArvadosFile constructor.
+
+        :stream:
+          a list of Range objects representing a block stream
+
+        :segments:
+          a list of Range objects representing segments
+        """
+        self.parent = parent
+        self.name = name
+        self._writers = set()
+        self._committed = False
+        self._segments = []
+        self.lock = parent.root_collection().lock
+        for s in segments:
+            self._add_segment(stream, s.locator, s.range_size)
+        self._current_bblock = None
+
+    def writable(self):
+        return self.parent.writable()
+
+    @synchronized
+    def permission_expired(self, as_of_dt=None):
+        """Returns True if any of the segment's locators is expired"""
+        for r in self._segments:
+            if KeepLocator(r.locator).permission_expired(as_of_dt):
+                return True
+        return False
+
+    @synchronized
+    def has_remote_blocks(self):
+        """Returns True if any of the segment's locators has a +R signature"""
+
+        for s in self._segments:
+            if '+R' in s.locator:
+                return True
+        return False
+
+    @synchronized
+    def _copy_remote_blocks(self, remote_blocks={}):
+        """Ask Keep to copy remote blocks and point to their local copies.
+
+        This is called from the parent Collection.
+
+        :remote_blocks:
+            Shared cache of remote to local block mappings. This is used to avoid
+            doing extra work when blocks are shared by more than one file in
+            different subdirectories.
+        """
+
+        for s in self._segments:
+            if '+R' in s.locator:
+                try:
+                    loc = remote_blocks[s.locator]
+                except KeyError:
+                    loc = self.parent._my_keep().refresh_signature(s.locator)
+                    remote_blocks[s.locator] = loc
+                s.locator = loc
+                self.parent.set_committed(False)
+        return remote_blocks
+
+    @synchronized
+    def segments(self):
+        return copy.copy(self._segments)
+
+    @synchronized
+    def clone(self, new_parent, new_name):
+        """Make a copy of this file."""
+        cp = ArvadosFile(new_parent, new_name)
+        cp.replace_contents(self)
+        return cp
+
+    @must_be_writable
+    @synchronized
+    def replace_contents(self, other):
+        """Replace segments of this file with segments from another `ArvadosFile` object."""
+
+        map_loc = {}
+        self._segments = []
+        for other_segment in other.segments():
+            new_loc = other_segment.locator
+            if other.parent._my_block_manager().is_bufferblock(other_segment.locator):
+                if other_segment.locator not in map_loc:
+                    bufferblock = other.parent._my_block_manager().get_bufferblock(other_segment.locator)
+                    if bufferblock.state() != _BufferBlock.WRITABLE:
+                        map_loc[other_segment.locator] = bufferblock.locator()
+                    else:
+                        map_loc[other_segment.locator] = self.parent._my_block_manager().dup_block(bufferblock, self).blockid
+                new_loc = map_loc[other_segment.locator]
+
+            self._segments.append(Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))
+
+        self.set_committed(False)
+
+    def __eq__(self, other):
+        if other is self:
+            return True
+        if not isinstance(other, ArvadosFile):
+            return False
+
+        othersegs = other.segments()
+        with self.lock:
+            if len(self._segments) != len(othersegs):
+                return False
+            for i in range(0, len(othersegs)):
+                seg1 = self._segments[i]
+                seg2 = othersegs[i]
+                loc1 = seg1.locator
+                loc2 = seg2.locator
+
+                if self.parent._my_block_manager().is_bufferblock(loc1):
+                    loc1 = self.parent._my_block_manager().get_bufferblock(loc1).locator()
+
+                if other.parent._my_block_manager().is_bufferblock(loc2):
+                    loc2 = other.parent._my_block_manager().get_bufferblock(loc2).locator()
+
+                if (KeepLocator(loc1).stripped() != KeepLocator(loc2).stripped() or
+                    seg1.range_start != seg2.range_start or
+                    seg1.range_size != seg2.range_size or
+                    seg1.segment_offset != seg2.segment_offset):
+                    return False
+
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    @synchronized
+    def set_segments(self, segs):
+        self._segments = segs
+
+    @synchronized
+    def set_committed(self, value=True):
+        """Set committed flag.
+
+        If value is True, set committed to be True.
+
+        If value is False, set committed to be False for this and all parents.
+        """
+        if value == self._committed:
+            return
+        self._committed = value
+        if self._committed is False and self.parent is not None:
+            self.parent.set_committed(False)
+
+    @synchronized
+    def committed(self):
+        """Get whether this is committed or not."""
+        return self._committed
+
+    @synchronized
+    def add_writer(self, writer):
+        """Add an ArvadosFileWriter reference to the list of writers"""
+        if isinstance(writer, ArvadosFileWriter):
+            self._writers.add(writer)
+
+    @synchronized
+    def remove_writer(self, writer, flush):
+        """
+        Called from ArvadosFileWriter.close(). Remove a writer reference from the list
+        and do some block maintenance tasks.
+        """
+        self._writers.remove(writer)
+
+        if flush or self.size() > config.KEEP_BLOCK_SIZE // 2:
+            # File writer closed, not small enough for repacking
+            self.flush()
+        elif self.closed():
+            # All writers closed and size is adequate for repacking
+            self.parent._my_block_manager().repack_small_blocks(closed_file_size=self.size())
+
+    def closed(self):
+        """
+        Get whether this is closed or not. When the writers list is empty, the file
+        is supposed to be closed.
+        """
+        return len(self._writers) == 0
+
+    @must_be_writable
+    @synchronized
+    def truncate(self, size):
+        """Shrink or expand the size of the file.
+
+        If `size` is less than the size of the file, the file contents after
+        `size` will be discarded.  If `size` is greater than the current size
+        of the file, it will be filled with zero bytes.
+
+        """
+        if size < self.size():
+            new_segs = []
+            for r in self._segments:
+                range_end = r.range_start+r.range_size
+                if r.range_start >= size:
+                    # segment is past the trucate size, all done
+                    break
+                elif size < range_end:
+                    nr = Range(r.locator, r.range_start, size - r.range_start, 0)
+                    nr.segment_offset = r.segment_offset
+                    new_segs.append(nr)
+                    break
+                else:
+                    new_segs.append(r)
+
+            self._segments = new_segs
+            self.set_committed(False)
+        elif size > self.size():
+            padding = self.parent._my_block_manager().get_padding_block()
+            diff = size - self.size()
+            while diff > config.KEEP_BLOCK_SIZE:
+                self._segments.append(Range(padding.blockid, self.size(), config.KEEP_BLOCK_SIZE, 0))
+                diff -= config.KEEP_BLOCK_SIZE
+            if diff > 0:
+                self._segments.append(Range(padding.blockid, self.size(), diff, 0))
+            self.set_committed(False)
+        else:
+            # size == self.size()
+            pass
+
+    def readfrom(self, offset, size, num_retries, exact=False):
+        """Read up to `size` bytes from the file starting at `offset`.
+
+        :exact:
+         If False (default), return less data than requested if the read
+         crosses a block boundary and the next block isn't cached.  If True,
+         only return less data than requested when hitting EOF.
+        """
+
+        with self.lock:
+            if size == 0 or offset >= self.size():
+                return b''
+            readsegs = locators_and_ranges(self._segments, offset, size)
+            prefetch = locators_and_ranges(self._segments, offset + size, config.KEEP_BLOCK_SIZE, limit=32)
+
+        locs = set()
+        data = []
+        for lr in readsegs:
+            block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=(bool(data) and not exact))
+            if block:
+                blockview = memoryview(block)
+                data.append(blockview[lr.segment_offset:lr.segment_offset+lr.segment_size].tobytes())
+                locs.add(lr.locator)
+            else:
+                break
+
+        for lr in prefetch:
+            if lr.locator not in locs:
+                self.parent._my_block_manager().block_prefetch(lr.locator)
+                locs.add(lr.locator)
+
+        return b''.join(data)
+
+    @must_be_writable
+    @synchronized
+    def writeto(self, offset, data, num_retries):
+        """Write `data` to the file starting at `offset`.
+
+        This will update existing bytes and/or extend the size of the file as
+        necessary.
+
+        """
+        if not isinstance(data, bytes) and not isinstance(data, memoryview):
+            data = data.encode()
+        if len(data) == 0:
+            return
+
+        if offset > self.size():
+            self.truncate(offset)
+
+        if len(data) > config.KEEP_BLOCK_SIZE:
+            # Chunk it up into smaller writes
+            n = 0
+            dataview = memoryview(data)
+            while n < len(data):
+                self.writeto(offset+n, dataview[n:n + config.KEEP_BLOCK_SIZE].tobytes(), num_retries)
+                n += config.KEEP_BLOCK_SIZE
+            return
+
+        self.set_committed(False)
+
+        if self._current_bblock is None or self._current_bblock.state() != _BufferBlock.WRITABLE:
+            self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
+
+        if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
+            self._current_bblock.repack_writes()
+            if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
+                self.parent._my_block_manager().commit_bufferblock(self._current_bblock, sync=False)
+                self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
+
+        self._current_bblock.append(data)
+
+        replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
+
+        self.parent.notify(WRITE, self.parent, self.name, (self, self))
+
+        return len(data)
+
+    @synchronized
+    def flush(self, sync=True, num_retries=0):
+        """Flush the current bufferblock to Keep.
+
+        :sync:
+          If True, commit block synchronously, wait until buffer block has been written.
+          If False, commit block asynchronously, return immediately after putting block into
+          the keep put queue.
+        """
+        if self.committed():
+            return
+
+        if self._current_bblock and self._current_bblock.state() != _BufferBlock.COMMITTED:
+            if self._current_bblock.state() == _BufferBlock.WRITABLE:
+                self._current_bblock.repack_writes()
+            if self._current_bblock.state() != _BufferBlock.DELETED:
+                self.parent._my_block_manager().commit_bufferblock(self._current_bblock, sync=sync)
+
+        if sync:
+            to_delete = set()
+            for s in self._segments:
+                bb = self.parent._my_block_manager().get_bufferblock(s.locator)
+                if bb:
+                    if bb.state() != _BufferBlock.COMMITTED:
+                        self.parent._my_block_manager().commit_bufferblock(bb, sync=True)
+                    to_delete.add(s.locator)
+                    s.locator = bb.locator()
+            for s in to_delete:
+                # Don't delete the bufferblock if it's owned by many files. It'll be
+                # deleted after all of its owners are flush()ed.
+                if self.parent._my_block_manager().get_bufferblock(s).owner is self:
+                    self.parent._my_block_manager().delete_bufferblock(s)
+
+        self.parent.notify(MOD, self.parent, self.name, (self, self))
+
+    @must_be_writable
+    @synchronized
+    def add_segment(self, blocks, pos, size):
+        """Add a segment to the end of the file.
+
+        `pos` and `offset` reference a section of the stream described by
+        `blocks` (a list of Range objects)
+
+        """
+        self._add_segment(blocks, pos, size)
+
+    def _add_segment(self, blocks, pos, size):
+        """Internal implementation of add_segment."""
+        self.set_committed(False)
+        for lr in locators_and_ranges(blocks, pos, size):
+            last = self._segments[-1] if self._segments else Range(0, 0, 0, 0)
+            r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
+            self._segments.append(r)
+
+    @synchronized
+    def size(self):
+        """Get the file size."""
+        if self._segments:
+            n = self._segments[-1]
+            return n.range_start + n.range_size
+        else:
+            return 0
+
+    @synchronized
+    def manifest_text(self, stream_name=".", portable_locators=False,
+                      normalize=False, only_committed=False):
+        buf = ""
+        filestream = []
+        for segment in self._segments:
+            loc = segment.locator
+            if self.parent._my_block_manager().is_bufferblock(loc):
+                if only_committed:
+                    continue
+                loc = self.parent._my_block_manager().get_bufferblock(loc).locator()
+            if portable_locators:
+                loc = KeepLocator(loc).stripped()
+            filestream.append(LocatorAndRange(loc, KeepLocator(loc).size,
+                                 segment.segment_offset, segment.range_size))
+        buf += ' '.join(normalize_stream(stream_name, {self.name: filestream}))
+        buf += "\n"
+        return buf
+
+    @must_be_writable
+    @synchronized
+    def _reparent(self, newparent, newname):
+        self.set_committed(False)
+        self.flush(sync=True)
+        self.parent.remove(self.name)
+        self.parent = newparent
+        self.name = newname
+        self.lock = self.parent.root_collection().lock
+
+
+class ArvadosFileReader(ArvadosFileReaderBase):
+    """Wraps ArvadosFile in a file-like object supporting reading only.
+
+    Be aware that this class is NOT thread safe as there is no locking around
+    updating file pointer.
+
+    """
+
+    def __init__(self, arvadosfile, mode="r", num_retries=None):
+        super(ArvadosFileReader, self).__init__(arvadosfile.name, mode=mode, num_retries=num_retries)
+        self.arvadosfile = arvadosfile
+
+    def size(self):
+        return self.arvadosfile.size()
+
+    def stream_name(self):
+        return self.arvadosfile.parent.stream_name()
+
+    def readinto(self, b):
+        data = self.read(len(b))
+        b[:len(data)] = data
+        return len(data)
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def read(self, size=None, num_retries=None):
+        """Read up to `size` bytes from the file and return the result.
+
+        Starts at the current file position.  If `size` is None, read the
+        entire remainder of the file.
+        """
+        if size is None:
+            data = []
+            rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
+            while rd:
+                data.append(rd)
+                self._filepos += len(rd)
+                rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
+            return b''.join(data)
+        else:
+            data = self.arvadosfile.readfrom(self._filepos, size, num_retries, exact=True)
+            self._filepos += len(data)
+            return data
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def readfrom(self, offset, size, num_retries=None):
+        """Read up to `size` bytes from the stream, starting at the specified file offset.
+
+        This method does not change the file position.
+        """
+        return self.arvadosfile.readfrom(offset, size, num_retries)
+
+    def flush(self):
+        pass
+
+
+class ArvadosFileWriter(ArvadosFileReader):
+    """Wraps ArvadosFile in a file-like object supporting both reading and writing.
+
+    Be aware that this class is NOT thread safe as there is no locking around
+    updating file pointer.
+
+    """
+
+    def __init__(self, arvadosfile, mode, num_retries=None):
+        super(ArvadosFileWriter, self).__init__(arvadosfile, mode=mode, num_retries=num_retries)
+        self.arvadosfile.add_writer(self)
+
+    def writable(self):
+        return True
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def write(self, data, num_retries=None):
+        if self.mode[0] == "a":
+            self._filepos = self.size()
+        self.arvadosfile.writeto(self._filepos, data, num_retries)
+        self._filepos += len(data)
+        return len(data)
+
+    @_FileLikeObjectBase._before_close
+    @retry_method
+    def writelines(self, seq, num_retries=None):
+        for s in seq:
+            self.write(s, num_retries=num_retries)
+
+    @_FileLikeObjectBase._before_close
+    def truncate(self, size=None):
+        if size is None:
+            size = self._filepos
+        self.arvadosfile.truncate(size)
+
+    @_FileLikeObjectBase._before_close
+    def flush(self):
+        self.arvadosfile.flush()
+
+    def close(self, flush=True):
+        if not self.closed:
+            self.arvadosfile.remove_writer(self, flush)
+            super(ArvadosFileWriter, self).close()
+
+
+class WrappableFile(object):
+    """An interface to an Arvados file that's compatible with io wrappers.
+
+    """
+    def __init__(self, f):
+        self.f = f
+        self.closed = False
+    def close(self):
+        self.closed = True
+        return self.f.close()
+    def flush(self):
+        return self.f.flush()
+    def read(self, *args, **kwargs):
+        return self.f.read(*args, **kwargs)
+    def readable(self):
+        return self.f.readable()
+    def readinto(self, *args, **kwargs):
+        return self.f.readinto(*args, **kwargs)
+    def seek(self, *args, **kwargs):
+        return self.f.seek(*args, **kwargs)
+    def seekable(self):
+        return self.f.seekable()
+    def tell(self):
+        return self.f.tell()
+    def writable(self):
+        return self.f.writable()
+    def write(self, *args, **kwargs):
+        return self.f.write(*args, **kwargs)
diff --git a/sdk/python/arvados/cache.py b/sdk/python/arvados/cache.py
new file mode 100644 (file)
index 0000000..85f2b89
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import object
+import errno
+import hashlib
+import os
+import tempfile
+import time
+
+class SafeHTTPCache(object):
+    """Thread-safe replacement for httplib2.FileCache"""
+
+    def __init__(self, path=None, max_age=None):
+        self._dir = path
+        if max_age is not None:
+            try:
+                self._clean(threshold=time.time() - max_age)
+            except:
+                pass
+
+    def _clean(self, threshold=0):
+        for ent in os.listdir(self._dir):
+            fnm = os.path.join(self._dir, ent)
+            if os.path.isdir(fnm) or not fnm.endswith('.tmp'):
+                continue
+            stat = os.lstat(fnm)
+            if stat.st_mtime < threshold:
+                try:
+                    os.unlink(fnm)
+                except OSError as err:
+                    if err.errno != errno.ENOENT:
+                        raise
+
+    def __str__(self):
+        return self._dir
+
+    def _filename(self, url):
+        return os.path.join(self._dir, hashlib.md5(url.encode('utf-8')).hexdigest()+'.tmp')
+
+    def get(self, url):
+        filename = self._filename(url)
+        try:
+            with open(filename, 'rb') as f:
+                return f.read()
+        except (IOError, OSError):
+            return None
+
+    def set(self, url, content):
+        try:
+            fd, tempname = tempfile.mkstemp(dir=self._dir)
+        except:
+            return None
+        try:
+            try:
+                f = os.fdopen(fd, 'wb')
+            except:
+                os.close(fd)
+                raise
+            try:
+                f.write(content)
+            finally:
+                f.close()
+            os.rename(tempname, self._filename(url))
+            tempname = None
+        finally:
+            if tempname:
+                os.unlink(tempname)
+
+    def delete(self, url):
+        try:
+            os.unlink(self._filename(url))
+        except OSError as err:
+            if err.errno != errno.ENOENT:
+                raise
diff --git a/sdk/python/arvados/collection.py b/sdk/python/arvados/collection.py
new file mode 100644 (file)
index 0000000..cf1a36f
--- /dev/null
@@ -0,0 +1,1921 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from future.utils import listitems, listvalues, viewkeys
+from builtins import str
+from past.builtins import basestring
+from builtins import object
+import ciso8601
+import datetime
+import errno
+import functools
+import hashlib
+import io
+import logging
+import os
+import re
+import sys
+import threading
+import time
+
+from collections import deque
+from stat import *
+
+from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, WrappableFile, _BlockManager, synchronized, must_be_writable, NoopLock
+from .keep import KeepLocator, KeepClient
+from .stream import StreamReader
+from ._normalize_stream import normalize_stream, escape
+from ._ranges import Range, LocatorAndRange
+from .safeapi import ThreadSafeApiCache
+import arvados.config as config
+import arvados.errors as errors
+import arvados.util
+import arvados.events as events
+from arvados.retry import retry_method
+
+_logger = logging.getLogger('arvados.collection')
+
+
+if sys.version_info >= (3, 0):
+    TextIOWrapper = io.TextIOWrapper
+else:
+    class TextIOWrapper(io.TextIOWrapper):
+        """To maintain backward compatibility, cast str to unicode in
+        write('foo').
+
+        """
+        def write(self, data):
+            if isinstance(data, basestring):
+                data = unicode(data)
+            return super(TextIOWrapper, self).write(data)
+
+
+class CollectionBase(object):
+    """Abstract base class for Collection classes."""
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        pass
+
+    def _my_keep(self):
+        if self._keep_client is None:
+            self._keep_client = KeepClient(api_client=self._api_client,
+                                           num_retries=self.num_retries)
+        return self._keep_client
+
+    def stripped_manifest(self):
+        """Get the manifest with locator hints stripped.
+
+        Return the manifest for the current collection with all
+        non-portable hints (i.e., permission signatures and other
+        hints other than size hints) removed from the locators.
+        """
+        raw = self.manifest_text()
+        clean = []
+        for line in raw.split("\n"):
+            fields = line.split()
+            if fields:
+                clean_fields = fields[:1] + [
+                    (re.sub(r'\+[^\d][^\+]*', '', x)
+                     if re.match(arvados.util.keep_locator_pattern, x)
+                     else x)
+                    for x in fields[1:]]
+                clean += [' '.join(clean_fields), "\n"]
+        return ''.join(clean)
+
+
+class _WriterFile(_FileLikeObjectBase):
+    def __init__(self, coll_writer, name):
+        super(_WriterFile, self).__init__(name, 'wb')
+        self.dest = coll_writer
+
+    def close(self):
+        super(_WriterFile, self).close()
+        self.dest.finish_current_file()
+
+    @_FileLikeObjectBase._before_close
+    def write(self, data):
+        self.dest.write(data)
+
+    @_FileLikeObjectBase._before_close
+    def writelines(self, seq):
+        for data in seq:
+            self.write(data)
+
+    @_FileLikeObjectBase._before_close
+    def flush(self):
+        self.dest.flush_data()
+
+
+class CollectionWriter(CollectionBase):
+    """Deprecated, use Collection instead."""
+
+    def __init__(self, api_client=None, num_retries=0, replication=None):
+        """Instantiate a CollectionWriter.
+
+        CollectionWriter lets you build a new Arvados Collection from scratch.
+        Write files to it.  The CollectionWriter will upload data to Keep as
+        appropriate, and provide you with the Collection manifest text when
+        you're finished.
+
+        Arguments:
+        * api_client: The API client to use to look up Collections.  If not
+          provided, CollectionReader will build one from available Arvados
+          configuration.
+        * num_retries: The default number of times to retry failed
+          service requests.  Default 0.  You may change this value
+          after instantiation, but note those changes may not
+          propagate to related objects like the Keep client.
+        * replication: The number of copies of each block to store.
+          If this argument is None or not supplied, replication is
+          the server-provided default if available, otherwise 2.
+        """
+        self._api_client = api_client
+        self.num_retries = num_retries
+        self.replication = (2 if replication is None else replication)
+        self._keep_client = None
+        self._data_buffer = []
+        self._data_buffer_len = 0
+        self._current_stream_files = []
+        self._current_stream_length = 0
+        self._current_stream_locators = []
+        self._current_stream_name = '.'
+        self._current_file_name = None
+        self._current_file_pos = 0
+        self._finished_streams = []
+        self._close_file = None
+        self._queued_file = None
+        self._queued_dirents = deque()
+        self._queued_trees = deque()
+        self._last_open = None
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if exc_type is None:
+            self.finish()
+
+    def do_queued_work(self):
+        # The work queue consists of three pieces:
+        # * _queued_file: The file object we're currently writing to the
+        #   Collection.
+        # * _queued_dirents: Entries under the current directory
+        #   (_queued_trees[0]) that we want to write or recurse through.
+        #   This may contain files from subdirectories if
+        #   max_manifest_depth == 0 for this directory.
+        # * _queued_trees: Directories that should be written as separate
+        #   streams to the Collection.
+        # This function handles the smallest piece of work currently queued
+        # (current file, then current directory, then next directory) until
+        # no work remains.  The _work_THING methods each do a unit of work on
+        # THING.  _queue_THING methods add a THING to the work queue.
+        while True:
+            if self._queued_file:
+                self._work_file()
+            elif self._queued_dirents:
+                self._work_dirents()
+            elif self._queued_trees:
+                self._work_trees()
+            else:
+                break
+
+    def _work_file(self):
+        while True:
+            buf = self._queued_file.read(config.KEEP_BLOCK_SIZE)
+            if not buf:
+                break
+            self.write(buf)
+        self.finish_current_file()
+        if self._close_file:
+            self._queued_file.close()
+        self._close_file = None
+        self._queued_file = None
+
+    def _work_dirents(self):
+        path, stream_name, max_manifest_depth = self._queued_trees[0]
+        if stream_name != self.current_stream_name():
+            self.start_new_stream(stream_name)
+        while self._queued_dirents:
+            dirent = self._queued_dirents.popleft()
+            target = os.path.join(path, dirent)
+            if os.path.isdir(target):
+                self._queue_tree(target,
+                                 os.path.join(stream_name, dirent),
+                                 max_manifest_depth - 1)
+            else:
+                self._queue_file(target, dirent)
+                break
+        if not self._queued_dirents:
+            self._queued_trees.popleft()
+
+    def _work_trees(self):
+        path, stream_name, max_manifest_depth = self._queued_trees[0]
+        d = arvados.util.listdir_recursive(
+            path, max_depth = (None if max_manifest_depth == 0 else 0))
+        if d:
+            self._queue_dirents(stream_name, d)
+        else:
+            self._queued_trees.popleft()
+
+    def _queue_file(self, source, filename=None):
+        assert (self._queued_file is None), "tried to queue more than one file"
+        if not hasattr(source, 'read'):
+            source = open(source, 'rb')
+            self._close_file = True
+        else:
+            self._close_file = False
+        if filename is None:
+            filename = os.path.basename(source.name)
+        self.start_new_file(filename)
+        self._queued_file = source
+
+    def _queue_dirents(self, stream_name, dirents):
+        assert (not self._queued_dirents), "tried to queue more than one tree"
+        self._queued_dirents = deque(sorted(dirents))
+
+    def _queue_tree(self, path, stream_name, max_manifest_depth):
+        self._queued_trees.append((path, stream_name, max_manifest_depth))
+
+    def write_file(self, source, filename=None):
+        self._queue_file(source, filename)
+        self.do_queued_work()
+
+    def write_directory_tree(self,
+                             path, stream_name='.', max_manifest_depth=-1):
+        self._queue_tree(path, stream_name, max_manifest_depth)
+        self.do_queued_work()
+
+    def write(self, newdata):
+        if isinstance(newdata, bytes):
+            pass
+        elif isinstance(newdata, str):
+            newdata = newdata.encode()
+        elif hasattr(newdata, '__iter__'):
+            for s in newdata:
+                self.write(s)
+            return
+        self._data_buffer.append(newdata)
+        self._data_buffer_len += len(newdata)
+        self._current_stream_length += len(newdata)
+        while self._data_buffer_len >= config.KEEP_BLOCK_SIZE:
+            self.flush_data()
+
+    def open(self, streampath, filename=None):
+        """open(streampath[, filename]) -> file-like object
+
+        Pass in the path of a file to write to the Collection, either as a
+        single string or as two separate stream name and file name arguments.
+        This method returns a file-like object you can write to add it to the
+        Collection.
+
+        You may only have one file object from the Collection open at a time,
+        so be sure to close the object when you're done.  Using the object in
+        a with statement makes that easy::
+
+          with cwriter.open('./doc/page1.txt') as outfile:
+              outfile.write(page1_data)
+          with cwriter.open('./doc/page2.txt') as outfile:
+              outfile.write(page2_data)
+        """
+        if filename is None:
+            streampath, filename = split(streampath)
+        if self._last_open and not self._last_open.closed:
+            raise errors.AssertionError(
+                u"can't open '{}' when '{}' is still open".format(
+                    filename, self._last_open.name))
+        if streampath != self.current_stream_name():
+            self.start_new_stream(streampath)
+        self.set_current_file_name(filename)
+        self._last_open = _WriterFile(self, filename)
+        return self._last_open
+
+    def flush_data(self):
+        data_buffer = b''.join(self._data_buffer)
+        if data_buffer:
+            self._current_stream_locators.append(
+                self._my_keep().put(
+                    data_buffer[0:config.KEEP_BLOCK_SIZE],
+                    copies=self.replication))
+            self._data_buffer = [data_buffer[config.KEEP_BLOCK_SIZE:]]
+            self._data_buffer_len = len(self._data_buffer[0])
+
+    def start_new_file(self, newfilename=None):
+        self.finish_current_file()
+        self.set_current_file_name(newfilename)
+
+    def set_current_file_name(self, newfilename):
+        if re.search(r'[\t\n]', newfilename):
+            raise errors.AssertionError(
+                "Manifest filenames cannot contain whitespace: %s" %
+                newfilename)
+        elif re.search(r'\x00', newfilename):
+            raise errors.AssertionError(
+                "Manifest filenames cannot contain NUL characters: %s" %
+                newfilename)
+        self._current_file_name = newfilename
+
+    def current_file_name(self):
+        return self._current_file_name
+
+    def finish_current_file(self):
+        if self._current_file_name is None:
+            if self._current_file_pos == self._current_stream_length:
+                return
+            raise errors.AssertionError(
+                "Cannot finish an unnamed file " +
+                "(%d bytes at offset %d in '%s' stream)" %
+                (self._current_stream_length - self._current_file_pos,
+                 self._current_file_pos,
+                 self._current_stream_name))
+        self._current_stream_files.append([
+                self._current_file_pos,
+                self._current_stream_length - self._current_file_pos,
+                self._current_file_name])
+        self._current_file_pos = self._current_stream_length
+        self._current_file_name = None
+
+    def start_new_stream(self, newstreamname='.'):
+        self.finish_current_stream()
+        self.set_current_stream_name(newstreamname)
+
+    def set_current_stream_name(self, newstreamname):
+        if re.search(r'[\t\n]', newstreamname):
+            raise errors.AssertionError(
+                "Manifest stream names cannot contain whitespace: '%s'" %
+                (newstreamname))
+        self._current_stream_name = '.' if newstreamname=='' else newstreamname
+
+    def current_stream_name(self):
+        return self._current_stream_name
+
+    def finish_current_stream(self):
+        self.finish_current_file()
+        self.flush_data()
+        if not self._current_stream_files:
+            pass
+        elif self._current_stream_name is None:
+            raise errors.AssertionError(
+                "Cannot finish an unnamed stream (%d bytes in %d files)" %
+                (self._current_stream_length, len(self._current_stream_files)))
+        else:
+            if not self._current_stream_locators:
+                self._current_stream_locators.append(config.EMPTY_BLOCK_LOCATOR)
+            self._finished_streams.append([self._current_stream_name,
+                                           self._current_stream_locators,
+                                           self._current_stream_files])
+        self._current_stream_files = []
+        self._current_stream_length = 0
+        self._current_stream_locators = []
+        self._current_stream_name = None
+        self._current_file_pos = 0
+        self._current_file_name = None
+
+    def finish(self):
+        """Store the manifest in Keep and return its locator.
+
+        This is useful for storing manifest fragments (task outputs)
+        temporarily in Keep during a Crunch job.
+
+        In other cases you should make a collection instead, by
+        sending manifest_text() to the API server's "create
+        collection" endpoint.
+        """
+        return self._my_keep().put(self.manifest_text().encode(),
+                                   copies=self.replication)
+
+    def portable_data_hash(self):
+        stripped = self.stripped_manifest().encode()
+        return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))
+
+    def manifest_text(self):
+        self.finish_current_stream()
+        manifest = ''
+
+        for stream in self._finished_streams:
+            if not re.search(r'^\.(/.*)?$', stream[0]):
+                manifest += './'
+            manifest += stream[0].replace(' ', '\\040')
+            manifest += ' ' + ' '.join(stream[1])
+            manifest += ' ' + ' '.join("%d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040')) for sfile in stream[2])
+            manifest += "\n"
+
+        return manifest
+
+    def data_locators(self):
+        ret = []
+        for name, locators, files in self._finished_streams:
+            ret += locators
+        return ret
+
+    def save_new(self, name=None):
+        return self._api_client.collections().create(
+            ensure_unique_name=True,
+            body={
+                'name': name,
+                'manifest_text': self.manifest_text(),
+            }).execute(num_retries=self.num_retries)
+
+
+class ResumableCollectionWriter(CollectionWriter):
+    """Deprecated, use Collection instead."""
+
+    STATE_PROPS = ['_current_stream_files', '_current_stream_length',
+                   '_current_stream_locators', '_current_stream_name',
+                   '_current_file_name', '_current_file_pos', '_close_file',
+                   '_data_buffer', '_dependencies', '_finished_streams',
+                   '_queued_dirents', '_queued_trees']
+
+    def __init__(self, api_client=None, **kwargs):
+        self._dependencies = {}
+        super(ResumableCollectionWriter, self).__init__(api_client, **kwargs)
+
+    @classmethod
+    def from_state(cls, state, *init_args, **init_kwargs):
+        # Try to build a new writer from scratch with the given state.
+        # If the state is not suitable to resume (because files have changed,
+        # been deleted, aren't predictable, etc.), raise a
+        # StaleWriterStateError.  Otherwise, return the initialized writer.
+        # The caller is responsible for calling writer.do_queued_work()
+        # appropriately after it's returned.
+        writer = cls(*init_args, **init_kwargs)
+        for attr_name in cls.STATE_PROPS:
+            attr_value = state[attr_name]
+            attr_class = getattr(writer, attr_name).__class__
+            # Coerce the value into the same type as the initial value, if
+            # needed.
+            if attr_class not in (type(None), attr_value.__class__):
+                attr_value = attr_class(attr_value)
+            setattr(writer, attr_name, attr_value)
+        # Check dependencies before we try to resume anything.
+        if any(KeepLocator(ls).permission_expired()
+               for ls in writer._current_stream_locators):
+            raise errors.StaleWriterStateError(
+                "locators include expired permission hint")
+        writer.check_dependencies()
+        if state['_current_file'] is not None:
+            path, pos = state['_current_file']
+            try:
+                writer._queued_file = open(path, 'rb')
+                writer._queued_file.seek(pos)
+            except IOError as error:
+                raise errors.StaleWriterStateError(
+                    u"failed to reopen active file {}: {}".format(path, error))
+        return writer
+
+    def check_dependencies(self):
+        for path, orig_stat in listitems(self._dependencies):
+            if not S_ISREG(orig_stat[ST_MODE]):
+                raise errors.StaleWriterStateError(u"{} not file".format(path))
+            try:
+                now_stat = tuple(os.stat(path))
+            except OSError as error:
+                raise errors.StaleWriterStateError(
+                    u"failed to stat {}: {}".format(path, error))
+            if ((not S_ISREG(now_stat[ST_MODE])) or
+                (orig_stat[ST_MTIME] != now_stat[ST_MTIME]) or
+                (orig_stat[ST_SIZE] != now_stat[ST_SIZE])):
+                raise errors.StaleWriterStateError(u"{} changed".format(path))
+
+    def dump_state(self, copy_func=lambda x: x):
+        state = {attr: copy_func(getattr(self, attr))
+                 for attr in self.STATE_PROPS}
+        if self._queued_file is None:
+            state['_current_file'] = None
+        else:
+            state['_current_file'] = (os.path.realpath(self._queued_file.name),
+                                      self._queued_file.tell())
+        return state
+
+    def _queue_file(self, source, filename=None):
+        try:
+            src_path = os.path.realpath(source)
+        except Exception:
+            raise errors.AssertionError(u"{} not a file path".format(source))
+        try:
+            path_stat = os.stat(src_path)
+        except OSError as stat_error:
+            path_stat = None
+        super(ResumableCollectionWriter, self)._queue_file(source, filename)
+        fd_stat = os.fstat(self._queued_file.fileno())
+        if not S_ISREG(fd_stat.st_mode):
+            # We won't be able to resume from this cache anyway, so don't
+            # worry about further checks.
+            self._dependencies[source] = tuple(fd_stat)
+        elif path_stat is None:
+            raise errors.AssertionError(
+                u"could not stat {}: {}".format(source, stat_error))
+        elif path_stat.st_ino != fd_stat.st_ino:
+            raise errors.AssertionError(
+                u"{} changed between open and stat calls".format(source))
+        else:
+            self._dependencies[src_path] = tuple(fd_stat)
+
+    def write(self, data):
+        if self._queued_file is None:
+            raise errors.AssertionError(
+                "resumable writer can't accept unsourced data")
+        return super(ResumableCollectionWriter, self).write(data)
+
+
+ADD = "add"
+DEL = "del"
+MOD = "mod"
+TOK = "tok"
+FILE = "file"
+COLLECTION = "collection"
+
+class RichCollectionBase(CollectionBase):
+    """Base class for Collections and Subcollections.
+
+    Implements the majority of functionality relating to accessing items in the
+    Collection.
+
+    """
+
+    def __init__(self, parent=None):
+        self.parent = parent
+        self._committed = False
+        self._has_remote_blocks = False
+        self._callback = None
+        self._items = {}
+
+    def _my_api(self):
+        raise NotImplementedError()
+
+    def _my_keep(self):
+        raise NotImplementedError()
+
+    def _my_block_manager(self):
+        raise NotImplementedError()
+
+    def writable(self):
+        raise NotImplementedError()
+
+    def root_collection(self):
+        raise NotImplementedError()
+
+    def notify(self, event, collection, name, item):
+        raise NotImplementedError()
+
+    def stream_name(self):
+        raise NotImplementedError()
+
+
+    @synchronized
+    def has_remote_blocks(self):
+        """Recursively check for a +R segment locator signature."""
+
+        if self._has_remote_blocks:
+            return True
+        for item in self:
+            if self[item].has_remote_blocks():
+                return True
+        return False
+
+    @synchronized
+    def set_has_remote_blocks(self, val):
+        self._has_remote_blocks = val
+        if self.parent:
+            self.parent.set_has_remote_blocks(val)
+
+    @must_be_writable
+    @synchronized
+    def find_or_create(self, path, create_type):
+        """Recursively search the specified file path.
+
+        May return either a `Collection` or `ArvadosFile`.  If not found, will
+        create a new item at the specified path based on `create_type`.  Will
+        create intermediate subcollections needed to contain the final item in
+        the path.
+
+        :create_type:
+          One of `arvados.collection.FILE` or
+          `arvados.collection.COLLECTION`.  If the path is not found, and value
+          of create_type is FILE then create and return a new ArvadosFile for
+          the last path component.  If COLLECTION, then create and return a new
+          Collection for the last path component.
+
+        """
+
+        pathcomponents = path.split("/", 1)
+        if pathcomponents[0]:
+            item = self._items.get(pathcomponents[0])
+            if len(pathcomponents) == 1:
+                if item is None:
+                    # create new file
+                    if create_type == COLLECTION:
+                        item = Subcollection(self, pathcomponents[0])
+                    else:
+                        item = ArvadosFile(self, pathcomponents[0])
+                    self._items[pathcomponents[0]] = item
+                    self.set_committed(False)
+                    self.notify(ADD, self, pathcomponents[0], item)
+                return item
+            else:
+                if item is None:
+                    # create new collection
+                    item = Subcollection(self, pathcomponents[0])
+                    self._items[pathcomponents[0]] = item
+                    self.set_committed(False)
+                    self.notify(ADD, self, pathcomponents[0], item)
+                if isinstance(item, RichCollectionBase):
+                    return item.find_or_create(pathcomponents[1], create_type)
+                else:
+                    raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
+        else:
+            return self
+
+    @synchronized
+    def find(self, path):
+        """Recursively search the specified file path.
+
+        May return either a Collection or ArvadosFile. Return None if not
+        found.
+        If path is invalid (ex: starts with '/'), an IOError exception will be
+        raised.
+
+        """
+        if not path:
+            raise errors.ArgumentError("Parameter 'path' is empty.")
+
+        pathcomponents = path.split("/", 1)
+        if pathcomponents[0] == '':
+            raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
+
+        item = self._items.get(pathcomponents[0])
+        if item is None:
+            return None
+        elif len(pathcomponents) == 1:
+            return item
+        else:
+            if isinstance(item, RichCollectionBase):
+                if pathcomponents[1]:
+                    return item.find(pathcomponents[1])
+                else:
+                    return item
+            else:
+                raise IOError(errno.ENOTDIR, "Not a directory", pathcomponents[0])
+
+    @synchronized
+    def mkdirs(self, path):
+        """Recursive subcollection create.
+
+        Like `os.makedirs()`.  Will create intermediate subcollections needed
+        to contain the leaf subcollection path.
+
+        """
+
+        if self.find(path) != None:
+            raise IOError(errno.EEXIST, "Directory or file exists", path)
+
+        return self.find_or_create(path, COLLECTION)
+
+    def open(self, path, mode="r", encoding=None):
+        """Open a file-like object for access.
+
+        :path:
+          path to a file in the collection
+        :mode:
+          a string consisting of "r", "w", or "a", optionally followed
+          by "b" or "t", optionally followed by "+".
+          :"b":
+            binary mode: write() accepts bytes, read() returns bytes.
+          :"t":
+            text mode (default): write() accepts strings, read() returns strings.
+          :"r":
+            opens for reading
+          :"r+":
+            opens for reading and writing.  Reads/writes share a file pointer.
+          :"w", "w+":
+            truncates to 0 and opens for reading and writing.  Reads/writes share a file pointer.
+          :"a", "a+":
+            opens for reading and writing.  All writes are appended to
+            the end of the file.  Writing does not affect the file pointer for
+            reading.
+
+        """
+
+        if not re.search(r'^[rwa][bt]?\+?$', mode):
+            raise errors.ArgumentError("Invalid mode {!r}".format(mode))
+
+        if mode[0] == 'r' and '+' not in mode:
+            fclass = ArvadosFileReader
+            arvfile = self.find(path)
+        elif not self.writable():
+            raise IOError(errno.EROFS, "Collection is read only")
+        else:
+            fclass = ArvadosFileWriter
+            arvfile = self.find_or_create(path, FILE)
+
+        if arvfile is None:
+            raise IOError(errno.ENOENT, "File not found", path)
+        if not isinstance(arvfile, ArvadosFile):
+            raise IOError(errno.EISDIR, "Is a directory", path)
+
+        if mode[0] == 'w':
+            arvfile.truncate(0)
+
+        binmode = mode[0] + 'b' + re.sub('[bt]', '', mode[1:])
+        f = fclass(arvfile, mode=binmode, num_retries=self.num_retries)
+        if 'b' not in mode:
+            bufferclass = io.BufferedRandom if f.writable() else io.BufferedReader
+            f = TextIOWrapper(bufferclass(WrappableFile(f)), encoding=encoding)
+        return f
+
+    def modified(self):
+        """Determine if the collection has been modified since last commited."""
+        return not self.committed()
+
+    @synchronized
+    def committed(self):
+        """Determine if the collection has been committed to the API server."""
+        return self._committed
+
+    @synchronized
+    def set_committed(self, value=True):
+        """Recursively set committed flag.
+
+        If value is True, set committed to be True for this and all children.
+
+        If value is False, set committed to be False for this and all parents.
+        """
+        if value == self._committed:
+            return
+        if value:
+            for k,v in listitems(self._items):
+                v.set_committed(True)
+            self._committed = True
+        else:
+            self._committed = False
+            if self.parent is not None:
+                self.parent.set_committed(False)
+
+    @synchronized
+    def __iter__(self):
+        """Iterate over names of files and collections contained in this collection."""
+        return iter(viewkeys(self._items))
+
+    @synchronized
+    def __getitem__(self, k):
+        """Get a file or collection that is directly contained by this collection.
+
+        If you want to search a path, use `find()` instead.
+
+        """
+        return self._items[k]
+
+    @synchronized
+    def __contains__(self, k):
+        """Test if there is a file or collection a directly contained by this collection."""
+        return k in self._items
+
+    @synchronized
+    def __len__(self):
+        """Get the number of items directly contained in this collection."""
+        return len(self._items)
+
+    @must_be_writable
+    @synchronized
+    def __delitem__(self, p):
+        """Delete an item by name which is directly contained by this collection."""
+        del self._items[p]
+        self.set_committed(False)
+        self.notify(DEL, self, p, None)
+
+    @synchronized
+    def keys(self):
+        """Get a list of names of files and collections directly contained in this collection."""
+        return self._items.keys()
+
+    @synchronized
+    def values(self):
+        """Get a list of files and collection objects directly contained in this collection."""
+        return listvalues(self._items)
+
+    @synchronized
+    def items(self):
+        """Get a list of (name, object) tuples directly contained in this collection."""
+        return listitems(self._items)
+
+    def exists(self, path):
+        """Test if there is a file or collection at `path`."""
+        return self.find(path) is not None
+
+    @must_be_writable
+    @synchronized
+    def remove(self, path, recursive=False):
+        """Remove the file or subcollection (directory) at `path`.
+
+        :recursive:
+          Specify whether to remove non-empty subcollections (True), or raise an error (False).
+        """
+
+        if not path:
+            raise errors.ArgumentError("Parameter 'path' is empty.")
+
+        pathcomponents = path.split("/", 1)
+        item = self._items.get(pathcomponents[0])
+        if item is None:
+            raise IOError(errno.ENOENT, "File not found", path)
+        if len(pathcomponents) == 1:
+            if isinstance(self._items[pathcomponents[0]], RichCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:
+                raise IOError(errno.ENOTEMPTY, "Directory not empty", path)
+            deleteditem = self._items[pathcomponents[0]]
+            del self._items[pathcomponents[0]]
+            self.set_committed(False)
+            self.notify(DEL, self, pathcomponents[0], deleteditem)
+        else:
+            item.remove(pathcomponents[1])
+
+    def _clonefrom(self, source):
+        for k,v in listitems(source):
+            self._items[k] = v.clone(self, k)
+
+    def clone(self):
+        raise NotImplementedError()
+
+    @must_be_writable
+    @synchronized
+    def add(self, source_obj, target_name, overwrite=False, reparent=False):
+        """Copy or move a file or subcollection to this collection.
+
+        :source_obj:
+          An ArvadosFile, or Subcollection object
+
+        :target_name:
+          Destination item name.  If the target name already exists and is a
+          file, this will raise an error unless you specify `overwrite=True`.
+
+        :overwrite:
+          Whether to overwrite target file if it already exists.
+
+        :reparent:
+          If True, source_obj will be moved from its parent collection to this collection.
+          If False, source_obj will be copied and the parent collection will be
+          unmodified.
+
+        """
+
+        if target_name in self and not overwrite:
+            raise IOError(errno.EEXIST, "File already exists", target_name)
+
+        modified_from = None
+        if target_name in self:
+            modified_from = self[target_name]
+
+        # Actually make the move or copy.
+        if reparent:
+            source_obj._reparent(self, target_name)
+            item = source_obj
+        else:
+            item = source_obj.clone(self, target_name)
+
+        self._items[target_name] = item
+        self.set_committed(False)
+        if not self._has_remote_blocks and source_obj.has_remote_blocks():
+            self.set_has_remote_blocks(True)
+
+        if modified_from:
+            self.notify(MOD, self, target_name, (modified_from, item))
+        else:
+            self.notify(ADD, self, target_name, item)
+
+    def _get_src_target(self, source, target_path, source_collection, create_dest):
+        if source_collection is None:
+            source_collection = self
+
+        # Find the object
+        if isinstance(source, basestring):
+            source_obj = source_collection.find(source)
+            if source_obj is None:
+                raise IOError(errno.ENOENT, "File not found", source)
+            sourcecomponents = source.split("/")
+        else:
+            source_obj = source
+            sourcecomponents = None
+
+        # Find parent collection the target path
+        targetcomponents = target_path.split("/")
+
+        # Determine the name to use.
+        target_name = targetcomponents[-1] if targetcomponents[-1] else sourcecomponents[-1]
+
+        if not target_name:
+            raise errors.ArgumentError("Target path is empty and source is an object.  Cannot determine destination filename to use.")
+
+        if create_dest:
+            target_dir = self.find_or_create("/".join(targetcomponents[0:-1]), COLLECTION)
+        else:
+            if len(targetcomponents) > 1:
+                target_dir = self.find("/".join(targetcomponents[0:-1]))
+            else:
+                target_dir = self
+
+        if target_dir is None:
+            raise IOError(errno.ENOENT, "Target directory not found", target_name)
+
+        if target_name in target_dir and isinstance(target_dir[target_name], RichCollectionBase) and sourcecomponents:
+            target_dir = target_dir[target_name]
+            target_name = sourcecomponents[-1]
+
+        return (source_obj, target_dir, target_name)
+
+    @must_be_writable
+    @synchronized
+    def copy(self, source, target_path, source_collection=None, overwrite=False):
+        """Copy a file or subcollection to a new path in this collection.
+
+        :source:
+          A string with a path to source file or subcollection, or an actual ArvadosFile or Subcollection object.
+
+        :target_path:
+          Destination file or path.  If the target path already exists and is a
+          subcollection, the item will be placed inside the subcollection.  If
+          the target path already exists and is a file, this will raise an error
+          unless you specify `overwrite=True`.
+
+        :source_collection:
+          Collection to copy `source_path` from (default `self`)
+
+        :overwrite:
+          Whether to overwrite target file if it already exists.
+        """
+
+        source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, True)
+        target_dir.add(source_obj, target_name, overwrite, False)
+
+    @must_be_writable
+    @synchronized
+    def rename(self, source, target_path, source_collection=None, overwrite=False):
+        """Move a file or subcollection from `source_collection` to a new path in this collection.
+
+        :source:
+          A string with a path to source file or subcollection.
+
+        :target_path:
+          Destination file or path.  If the target path already exists and is a
+          subcollection, the item will be placed inside the subcollection.  If
+          the target path already exists and is a file, this will raise an error
+          unless you specify `overwrite=True`.
+
+        :source_collection:
+          Collection to copy `source_path` from (default `self`)
+
+        :overwrite:
+          Whether to overwrite target file if it already exists.
+        """
+
+        source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, False)
+        if not source_obj.writable():
+            raise IOError(errno.EROFS, "Source collection is read only", source)
+        target_dir.add(source_obj, target_name, overwrite, True)
+
+    def portable_manifest_text(self, stream_name="."):
+        """Get the manifest text for this collection, sub collections and files.
+
+        This method does not flush outstanding blocks to Keep.  It will return
+        a normalized manifest with access tokens stripped.
+
+        :stream_name:
+          Name to use for this stream (directory)
+
+        """
+        return self._get_manifest_text(stream_name, True, True)
+
+    @synchronized
+    def manifest_text(self, stream_name=".", strip=False, normalize=False,
+                      only_committed=False):
+        """Get the manifest text for this collection, sub collections and files.
+
+        This method will flush outstanding blocks to Keep.  By default, it will
+        not normalize an unmodified manifest or strip access tokens.
+
+        :stream_name:
+          Name to use for this stream (directory)
+
+        :strip:
+          If True, remove signing tokens from block locators if present.
+          If False (default), block locators are left unchanged.
+
+        :normalize:
+          If True, always export the manifest text in normalized form
+          even if the Collection is not modified.  If False (default) and the collection
+          is not modified, return the original manifest text even if it is not
+          in normalized form.
+
+        :only_committed:
+          If True, don't commit pending blocks.
+
+        """
+
+        if not only_committed:
+            self._my_block_manager().commit_all()
+        return self._get_manifest_text(stream_name, strip, normalize,
+                                       only_committed=only_committed)
+
+    @synchronized
+    def _get_manifest_text(self, stream_name, strip, normalize, only_committed=False):
+        """Get the manifest text for this collection, sub collections and files.
+
+        :stream_name:
+          Name to use for this stream (directory)
+
+        :strip:
+          If True, remove signing tokens from block locators if present.
+          If False (default), block locators are left unchanged.
+
+        :normalize:
+          If True, always export the manifest text in normalized form
+          even if the Collection is not modified.  If False (default) and the collection
+          is not modified, return the original manifest text even if it is not
+          in normalized form.
+
+        :only_committed:
+          If True, only include blocks that were already committed to Keep.
+
+        """
+
+        if not self.committed() or self._manifest_text is None or normalize:
+            stream = {}
+            buf = []
+            sorted_keys = sorted(self.keys())
+            for filename in [s for s in sorted_keys if isinstance(self[s], ArvadosFile)]:
+                # Create a stream per file `k`
+                arvfile = self[filename]
+                filestream = []
+                for segment in arvfile.segments():
+                    loc = segment.locator
+                    if arvfile.parent._my_block_manager().is_bufferblock(loc):
+                        if only_committed:
+                            continue
+                        loc = arvfile.parent._my_block_manager().get_bufferblock(loc).locator()
+                    if strip:
+                        loc = KeepLocator(loc).stripped()
+                    filestream.append(LocatorAndRange(loc, KeepLocator(loc).size,
+                                         segment.segment_offset, segment.range_size))
+                stream[filename] = filestream
+            if stream:
+                buf.append(" ".join(normalize_stream(stream_name, stream)) + "\n")
+            for dirname in [s for s in sorted_keys if isinstance(self[s], RichCollectionBase)]:
+                buf.append(self[dirname].manifest_text(
+                    stream_name=os.path.join(stream_name, dirname),
+                    strip=strip, normalize=True, only_committed=only_committed))
+            return "".join(buf)
+        else:
+            if strip:
+                return self.stripped_manifest()
+            else:
+                return self._manifest_text
+
+    @synchronized
+    def _copy_remote_blocks(self, remote_blocks={}):
+        """Scan through the entire collection and ask Keep to copy remote blocks.
+
+        When accessing a remote collection, blocks will have a remote signature
+        (+R instead of +A). Collect these signatures and request Keep to copy the
+        blocks to the local cluster, returning local (+A) signatures.
+
+        :remote_blocks:
+          Shared cache of remote to local block mappings. This is used to avoid
+          doing extra work when blocks are shared by more than one file in
+          different subdirectories.
+
+        """
+        for item in self:
+            remote_blocks = self[item]._copy_remote_blocks(remote_blocks)
+        return remote_blocks
+
+    @synchronized
+    def diff(self, end_collection, prefix=".", holding_collection=None):
+        """Generate list of add/modify/delete actions.
+
+        When given to `apply`, will change `self` to match `end_collection`
+
+        """
+        changes = []
+        if holding_collection is None:
+            holding_collection = Collection(api_client=self._my_api(), keep_client=self._my_keep())
+        for k in self:
+            if k not in end_collection:
+               changes.append((DEL, os.path.join(prefix, k), self[k].clone(holding_collection, "")))
+        for k in end_collection:
+            if k in self:
+                if isinstance(end_collection[k], Subcollection) and isinstance(self[k], Subcollection):
+                    changes.extend(self[k].diff(end_collection[k], os.path.join(prefix, k), holding_collection))
+                elif end_collection[k] != self[k]:
+                    changes.append((MOD, os.path.join(prefix, k), self[k].clone(holding_collection, ""), end_collection[k].clone(holding_collection, "")))
+                else:
+                    changes.append((TOK, os.path.join(prefix, k), self[k].clone(holding_collection, ""), end_collection[k].clone(holding_collection, "")))
+            else:
+                changes.append((ADD, os.path.join(prefix, k), end_collection[k].clone(holding_collection, "")))
+        return changes
+
+    @must_be_writable
+    @synchronized
+    def apply(self, changes):
+        """Apply changes from `diff`.
+
+        If a change conflicts with a local change, it will be saved to an
+        alternate path indicating the conflict.
+
+        """
+        if changes:
+            self.set_committed(False)
+        for change in changes:
+            event_type = change[0]
+            path = change[1]
+            initial = change[2]
+            local = self.find(path)
+            conflictpath = "%s~%s~conflict~" % (path, time.strftime("%Y%m%d-%H%M%S",
+                                                                    time.gmtime()))
+            if event_type == ADD:
+                if local is None:
+                    # No local file at path, safe to copy over new file
+                    self.copy(initial, path)
+                elif local is not None and local != initial:
+                    # There is already local file and it is different:
+                    # save change to conflict file.
+                    self.copy(initial, conflictpath)
+            elif event_type == MOD or event_type == TOK:
+                final = change[3]
+                if local == initial:
+                    # Local matches the "initial" item so it has not
+                    # changed locally and is safe to update.
+                    if isinstance(local, ArvadosFile) and isinstance(final, ArvadosFile):
+                        # Replace contents of local file with new contents
+                        local.replace_contents(final)
+                    else:
+                        # Overwrite path with new item; this can happen if
+                        # path was a file and is now a collection or vice versa
+                        self.copy(final, path, overwrite=True)
+                else:
+                    # Local is missing (presumably deleted) or local doesn't
+                    # match the "start" value, so save change to conflict file
+                    self.copy(final, conflictpath)
+            elif event_type == DEL:
+                if local == initial:
+                    # Local item matches "initial" value, so it is safe to remove.
+                    self.remove(path, recursive=True)
+                # else, the file is modified or already removed, in either
+                # case we don't want to try to remove it.
+
+    def portable_data_hash(self):
+        """Get the portable data hash for this collection's manifest."""
+        if self._manifest_locator and self.committed():
+            # If the collection is already saved on the API server, and it's committed
+            # then return API server's PDH response.
+            return self._portable_data_hash
+        else:
+            stripped = self.portable_manifest_text().encode()
+            return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))
+
+    @synchronized
+    def subscribe(self, callback):
+        if self._callback is None:
+            self._callback = callback
+        else:
+            raise errors.ArgumentError("A callback is already set on this collection.")
+
+    @synchronized
+    def unsubscribe(self):
+        if self._callback is not None:
+            self._callback = None
+
+    @synchronized
+    def notify(self, event, collection, name, item):
+        if self._callback:
+            self._callback(event, collection, name, item)
+        self.root_collection().notify(event, collection, name, item)
+
+    @synchronized
+    def __eq__(self, other):
+        if other is self:
+            return True
+        if not isinstance(other, RichCollectionBase):
+            return False
+        if len(self._items) != len(other):
+            return False
+        for k in self._items:
+            if k not in other:
+                return False
+            if self._items[k] != other[k]:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    @synchronized
+    def flush(self):
+        """Flush bufferblocks to Keep."""
+        for e in listvalues(self):
+            e.flush()
+
+
+class Collection(RichCollectionBase):
+    """Represents the root of an Arvados Collection.
+
+    This class is threadsafe.  The root collection object, all subcollections
+    and files are protected by a single lock (i.e. each access locks the entire
+    collection).
+
+    Brief summary of
+    useful methods:
+
+    :To read an existing file:
+      `c.open("myfile", "r")`
+
+    :To write a new file:
+      `c.open("myfile", "w")`
+
+    :To determine if a file exists:
+      `c.find("myfile") is not None`
+
+    :To copy a file:
+      `c.copy("source", "dest")`
+
+    :To delete a file:
+      `c.remove("myfile")`
+
+    :To save to an existing collection record:
+      `c.save()`
+
+    :To save a new collection record:
+    `c.save_new()`
+
+    :To merge remote changes into this object:
+      `c.update()`
+
+    Must be associated with an API server Collection record (during
+    initialization, or using `save_new`) to use `save` or `update`
+
+    """
+
+    def __init__(self, manifest_locator_or_text=None,
+                 api_client=None,
+                 keep_client=None,
+                 num_retries=None,
+                 parent=None,
+                 apiconfig=None,
+                 block_manager=None,
+                 replication_desired=None,
+                 put_threads=None):
+        """Collection constructor.
+
+        :manifest_locator_or_text:
+          An Arvados collection UUID, portable data hash, raw manifest
+          text, or (if creating an empty collection) None.
+
+        :parent:
+          the parent Collection, may be None.
+
+        :apiconfig:
+          A dict containing keys for ARVADOS_API_HOST and ARVADOS_API_TOKEN.
+          Prefer this over supplying your own api_client and keep_client (except in testing).
+          Will use default config settings if not specified.
+
+        :api_client:
+          The API client object to use for requests.  If not specified, create one using `apiconfig`.
+
+        :keep_client:
+          the Keep client to use for requests.  If not specified, create one using `apiconfig`.
+
+        :num_retries:
+          the number of retries for API and Keep requests.
+
+        :block_manager:
+          the block manager to use.  If not specified, create one.
+
+        :replication_desired:
+          How many copies should Arvados maintain. If None, API server default
+          configuration applies. If not None, this value will also be used
+          for determining the number of block copies being written.
+
+        """
+        super(Collection, self).__init__(parent)
+        self._api_client = api_client
+        self._keep_client = keep_client
+        self._block_manager = block_manager
+        self.replication_desired = replication_desired
+        self.put_threads = put_threads
+
+        if apiconfig:
+            self._config = apiconfig
+        else:
+            self._config = config.settings()
+
+        self.num_retries = num_retries if num_retries is not None else 0
+        self._manifest_locator = None
+        self._manifest_text = None
+        self._portable_data_hash = None
+        self._api_response = None
+        self._past_versions = set()
+
+        self.lock = threading.RLock()
+        self.events = None
+
+        if manifest_locator_or_text:
+            if re.match(arvados.util.keep_locator_pattern, manifest_locator_or_text):
+                self._manifest_locator = manifest_locator_or_text
+            elif re.match(arvados.util.collection_uuid_pattern, manifest_locator_or_text):
+                self._manifest_locator = manifest_locator_or_text
+                if not self._has_local_collection_uuid():
+                    self._has_remote_blocks = True
+            elif re.match(arvados.util.manifest_pattern, manifest_locator_or_text):
+                self._manifest_text = manifest_locator_or_text
+                if '+R' in self._manifest_text:
+                    self._has_remote_blocks = True
+            else:
+                raise errors.ArgumentError(
+                    "Argument to CollectionReader is not a manifest or a collection UUID")
+
+            try:
+                self._populate()
+            except (IOError, errors.SyntaxError) as e:
+                raise errors.ArgumentError("Error processing manifest text: %s", e)
+
+    def root_collection(self):
+        return self
+
+    def get_properties(self):
+        if self._api_response and self._api_response["properties"]:
+            return self._api_response["properties"]
+        else:
+            return {}
+
+    def get_trash_at(self):
+        if self._api_response and self._api_response["trash_at"]:
+            try:
+                return ciso8601.parse_datetime(self._api_response["trash_at"])
+            except ValueError:
+                return None
+        else:
+            return None
+
+    def stream_name(self):
+        return "."
+
+    def writable(self):
+        return True
+
+    @synchronized
+    def known_past_version(self, modified_at_and_portable_data_hash):
+        return modified_at_and_portable_data_hash in self._past_versions
+
+    @synchronized
+    @retry_method
+    def update(self, other=None, num_retries=None):
+        """Merge the latest collection on the API server with the current collection."""
+
+        if other is None:
+            if self._manifest_locator is None:
+                raise errors.ArgumentError("`other` is None but collection does not have a manifest_locator uuid")
+            response = self._my_api().collections().get(uuid=self._manifest_locator).execute(num_retries=num_retries)
+            if (self.known_past_version((response.get("modified_at"), response.get("portable_data_hash"))) and
+                response.get("portable_data_hash") != self.portable_data_hash()):
+                # The record on the server is different from our current one, but we've seen it before,
+                # so ignore it because it's already been merged.
+                # However, if it's the same as our current record, proceed with the update, because we want to update
+                # our tokens.
+                return
+            else:
+                self._past_versions.add((response.get("modified_at"), response.get("portable_data_hash")))
+            other = CollectionReader(response["manifest_text"])
+        baseline = CollectionReader(self._manifest_text)
+        self.apply(baseline.diff(other))
+        self._manifest_text = self.manifest_text()
+
+    @synchronized
+    def _my_api(self):
+        if self._api_client is None:
+            self._api_client = ThreadSafeApiCache(self._config)
+            if self._keep_client is None:
+                self._keep_client = self._api_client.keep
+        return self._api_client
+
+    @synchronized
+    def _my_keep(self):
+        if self._keep_client is None:
+            if self._api_client is None:
+                self._my_api()
+            else:
+                self._keep_client = KeepClient(api_client=self._api_client)
+        return self._keep_client
+
+    @synchronized
+    def _my_block_manager(self):
+        if self._block_manager is None:
+            copies = (self.replication_desired or
+                      self._my_api()._rootDesc.get('defaultCollectionReplication',
+                                                   2))
+            self._block_manager = _BlockManager(self._my_keep(), copies=copies, put_threads=self.put_threads)
+        return self._block_manager
+
+    def _remember_api_response(self, response):
+        self._api_response = response
+        self._past_versions.add((response.get("modified_at"), response.get("portable_data_hash")))
+
+    def _populate_from_api_server(self):
+        # As in KeepClient itself, we must wait until the last
+        # possible moment to instantiate an API client, in order to
+        # avoid tripping up clients that don't have access to an API
+        # server.  If we do build one, make sure our Keep client uses
+        # it.  If instantiation fails, we'll fall back to the except
+        # clause, just like any other Collection lookup
+        # failure. Return an exception, or None if successful.
+        self._remember_api_response(self._my_api().collections().get(
+            uuid=self._manifest_locator).execute(
+                num_retries=self.num_retries))
+        self._manifest_text = self._api_response['manifest_text']
+        self._portable_data_hash = self._api_response['portable_data_hash']
+        # If not overriden via kwargs, we should try to load the
+        # replication_desired from the API server
+        if self.replication_desired is None:
+            self.replication_desired = self._api_response.get('replication_desired', None)
+
+    def _populate(self):
+        if self._manifest_text is None:
+            if self._manifest_locator is None:
+                return
+            else:
+                self._populate_from_api_server()
+        self._baseline_manifest = self._manifest_text
+        self._import_manifest(self._manifest_text)
+
+    def _has_collection_uuid(self):
+        return self._manifest_locator is not None and re.match(arvados.util.collection_uuid_pattern, self._manifest_locator)
+
+    def _has_local_collection_uuid(self):
+        return self._has_collection_uuid and \
+            self._my_api()._rootDesc['uuidPrefix'] == self._manifest_locator.split('-')[0]
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        """Support scoped auto-commit in a with: block."""
+        if exc_type is None:
+            if self.writable() and self._has_collection_uuid():
+                self.save()
+        self.stop_threads()
+
+    def stop_threads(self):
+        if self._block_manager is not None:
+            self._block_manager.stop_threads()
+
+    @synchronized
+    def manifest_locator(self):
+        """Get the manifest locator, if any.
+
+        The manifest locator will be set when the collection is loaded from an
+        API server record or the portable data hash of a manifest.
+
+        The manifest locator will be None if the collection is newly created or
+        was created directly from manifest text.  The method `save_new()` will
+        assign a manifest locator.
+
+        """
+        return self._manifest_locator
+
+    @synchronized
+    def clone(self, new_parent=None, new_name=None, readonly=False, new_config=None):
+        if new_config is None:
+            new_config = self._config
+        if readonly:
+            newcollection = CollectionReader(parent=new_parent, apiconfig=new_config)
+        else:
+            newcollection = Collection(parent=new_parent, apiconfig=new_config)
+
+        newcollection._clonefrom(self)
+        return newcollection
+
+    @synchronized
+    def api_response(self):
+        """Returns information about this Collection fetched from the API server.
+
+        If the Collection exists in Keep but not the API server, currently
+        returns None.  Future versions may provide a synthetic response.
+
+        """
+        return self._api_response
+
+    def find_or_create(self, path, create_type):
+        """See `RichCollectionBase.find_or_create`"""
+        if path == ".":
+            return self
+        else:
+            return super(Collection, self).find_or_create(path[2:] if path.startswith("./") else path, create_type)
+
+    def find(self, path):
+        """See `RichCollectionBase.find`"""
+        if path == ".":
+            return self
+        else:
+            return super(Collection, self).find(path[2:] if path.startswith("./") else path)
+
+    def remove(self, path, recursive=False):
+        """See `RichCollectionBase.remove`"""
+        if path == ".":
+            raise errors.ArgumentError("Cannot remove '.'")
+        else:
+            return super(Collection, self).remove(path[2:] if path.startswith("./") else path, recursive)
+
+    @must_be_writable
+    @synchronized
+    @retry_method
+    def save(self,
+             properties=None,
+             storage_classes=None,
+             trash_at=None,
+             merge=True,
+             num_retries=None):
+        """Save collection to an existing collection record.
+
+        Commit pending buffer blocks to Keep, merge with remote record (if
+        merge=True, the default), and update the collection record. Returns
+        the current manifest text.
+
+        Will raise AssertionError if not associated with a collection record on
+        the API server.  If you want to save a manifest to Keep only, see
+        `save_new()`.
+
+        :properties:
+          Additional properties of collection. This value will replace any existing
+          properties of collection.
+
+        :storage_classes:
+          Specify desirable storage classes to be used when writing data to Keep.
+
+        :trash_at:
+          A collection is *expiring* when it has a *trash_at* time in the future.
+          An expiring collection can be accessed as normal,
+          but is scheduled to be trashed automatically at the *trash_at* time.
+
+        :merge:
+          Update and merge remote changes before saving.  Otherwise, any
+          remote changes will be ignored and overwritten.
+
+        :num_retries:
+          Retry count on API calls (if None,  use the collection default)
+
+        """
+        if properties and type(properties) is not dict:
+            raise errors.ArgumentError("properties must be dictionary type.")
+
+        if storage_classes and type(storage_classes) is not list:
+            raise errors.ArgumentError("storage_classes must be list type.")
+
+        if trash_at and type(trash_at) is not datetime.datetime:
+            raise errors.ArgumentError("trash_at must be datetime type.")
+
+        body={}
+        if properties:
+            body["properties"] = properties
+        if storage_classes:
+            body["storage_classes_desired"] = storage_classes
+        if trash_at:
+            t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+            body["trash_at"] = t
+
+        if not self.committed():
+            if self._has_remote_blocks:
+                # Copy any remote blocks to the local cluster.
+                self._copy_remote_blocks(remote_blocks={})
+                self._has_remote_blocks = False
+            if not self._has_collection_uuid():
+                raise AssertionError("Collection manifest_locator is not a collection uuid.  Use save_new() for new collections.")
+            elif not self._has_local_collection_uuid():
+                raise AssertionError("Collection manifest_locator is from a remote cluster. Use save_new() to save it on the local cluster.")
+
+            self._my_block_manager().commit_all()
+
+            if merge:
+                self.update()
+
+            text = self.manifest_text(strip=False)
+            body['manifest_text'] = text
+
+            self._remember_api_response(self._my_api().collections().update(
+                uuid=self._manifest_locator,
+                body=body
+                ).execute(num_retries=num_retries))
+            self._manifest_text = self._api_response["manifest_text"]
+            self._portable_data_hash = self._api_response["portable_data_hash"]
+            self.set_committed(True)
+        elif body:
+            self._remember_api_response(self._my_api().collections().update(
+                uuid=self._manifest_locator,
+                body=body
+                ).execute(num_retries=num_retries))
+
+        return self._manifest_text
+
+
+    @must_be_writable
+    @synchronized
+    @retry_method
+    def save_new(self, name=None,
+                 create_collection_record=True,
+                 owner_uuid=None,
+                 properties=None,
+                 storage_classes=None,
+                 trash_at=None,
+                 ensure_unique_name=False,
+                 num_retries=None):
+        """Save collection to a new collection record.
+
+        Commit pending buffer blocks to Keep and, when create_collection_record
+        is True (default), create a new collection record.  After creating a
+        new collection record, this Collection object will be associated with
+        the new record used by `save()`. Returns the current manifest text.
+
+        :name:
+          The collection name.
+
+        :create_collection_record:
+           If True, create a collection record on the API server.
+           If False, only commit blocks to Keep and return the manifest text.
+
+        :owner_uuid:
+          the user, or project uuid that will own this collection.
+          If None, defaults to the current user.
+
+        :properties:
+          Additional properties of collection. This value will replace any existing
+          properties of collection.
+
+        :storage_classes:
+          Specify desirable storage classes to be used when writing data to Keep.
+
+        :trash_at:
+          A collection is *expiring* when it has a *trash_at* time in the future.
+          An expiring collection can be accessed as normal,
+          but is scheduled to be trashed automatically at the *trash_at* time.
+
+        :ensure_unique_name:
+          If True, ask the API server to rename the collection
+          if it conflicts with a collection with the same name and owner.  If
+          False, a name conflict will result in an error.
+
+        :num_retries:
+          Retry count on API calls (if None,  use the collection default)
+
+        """
+        if properties and type(properties) is not dict:
+            raise errors.ArgumentError("properties must be dictionary type.")
+
+        if storage_classes and type(storage_classes) is not list:
+            raise errors.ArgumentError("storage_classes must be list type.")
+
+        if trash_at and type(trash_at) is not datetime.datetime:
+            raise errors.ArgumentError("trash_at must be datetime type.")
+
+        if self._has_remote_blocks:
+            # Copy any remote blocks to the local cluster.
+            self._copy_remote_blocks(remote_blocks={})
+            self._has_remote_blocks = False
+
+        self._my_block_manager().commit_all()
+        text = self.manifest_text(strip=False)
+
+        if create_collection_record:
+            if name is None:
+                name = "New collection"
+                ensure_unique_name = True
+
+            body = {"manifest_text": text,
+                    "name": name,
+                    "replication_desired": self.replication_desired}
+            if owner_uuid:
+                body["owner_uuid"] = owner_uuid
+            if properties:
+                body["properties"] = properties
+            if storage_classes:
+                body["storage_classes_desired"] = storage_classes
+            if trash_at:
+                t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+                body["trash_at"] = t
+
+            self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))
+            text = self._api_response["manifest_text"]
+
+            self._manifest_locator = self._api_response["uuid"]
+            self._portable_data_hash = self._api_response["portable_data_hash"]
+
+            self._manifest_text = text
+            self.set_committed(True)
+
+        return text
+
+    _token_re = re.compile(r'(\S+)(\s+|$)')
+    _block_re = re.compile(r'[0-9a-f]{32}\+(\d+)(\+\S+)*')
+    _segment_re = re.compile(r'(\d+):(\d+):(\S+)')
+
+    def _unescape_manifest_path(self, path):
+        return re.sub('\\\\([0-3][0-7][0-7])', lambda m: chr(int(m.group(1), 8)), path)
+
+    @synchronized
+    def _import_manifest(self, manifest_text):
+        """Import a manifest into a `Collection`.
+
+        :manifest_text:
+          The manifest text to import from.
+
+        """
+        if len(self) > 0:
+            raise ArgumentError("Can only import manifest into an empty collection")
+
+        STREAM_NAME = 0
+        BLOCKS = 1
+        SEGMENTS = 2
+
+        stream_name = None
+        state = STREAM_NAME
+
+        for token_and_separator in self._token_re.finditer(manifest_text):
+            tok = token_and_separator.group(1)
+            sep = token_and_separator.group(2)
+
+            if state == STREAM_NAME:
+                # starting a new stream
+                stream_name = self._unescape_manifest_path(tok)
+                blocks = []
+                segments = []
+                streamoffset = 0
+                state = BLOCKS
+                self.find_or_create(stream_name, COLLECTION)
+                continue
+
+            if state == BLOCKS:
+                block_locator = self._block_re.match(tok)
+                if block_locator:
+                    blocksize = int(block_locator.group(1))
+                    blocks.append(Range(tok, streamoffset, blocksize, 0))
+                    streamoffset += blocksize
+                else:
+                    state = SEGMENTS
+
+            if state == SEGMENTS:
+                file_segment = self._segment_re.match(tok)
+                if file_segment:
+                    pos = int(file_segment.group(1))
+                    size = int(file_segment.group(2))
+                    name = self._unescape_manifest_path(file_segment.group(3))
+                    if name.split('/')[-1] == '.':
+                        # placeholder for persisting an empty directory, not a real file
+                        if len(name) > 2:
+                            self.find_or_create(os.path.join(stream_name, name[:-2]), COLLECTION)
+                    else:
+                        filepath = os.path.join(stream_name, name)
+                        afile = self.find_or_create(filepath, FILE)
+                        if isinstance(afile, ArvadosFile):
+                            afile.add_segment(blocks, pos, size)
+                        else:
+                            raise errors.SyntaxError("File %s conflicts with stream of the same name.", filepath)
+                else:
+                    # error!
+                    raise errors.SyntaxError("Invalid manifest format, expected file segment but did not match format: '%s'" % tok)
+
+            if sep == "\n":
+                stream_name = None
+                state = STREAM_NAME
+
+        self.set_committed(True)
+
+    @synchronized
+    def notify(self, event, collection, name, item):
+        if self._callback:
+            self._callback(event, collection, name, item)
+
+
+class Subcollection(RichCollectionBase):
+    """This is a subdirectory within a collection that doesn't have its own API
+    server record.
+
+    Subcollection locking falls under the umbrella lock of its root collection.
+
+    """
+
+    def __init__(self, parent, name):
+        super(Subcollection, self).__init__(parent)
+        self.lock = self.root_collection().lock
+        self._manifest_text = None
+        self.name = name
+        self.num_retries = parent.num_retries
+
+    def root_collection(self):
+        return self.parent.root_collection()
+
+    def writable(self):
+        return self.root_collection().writable()
+
+    def _my_api(self):
+        return self.root_collection()._my_api()
+
+    def _my_keep(self):
+        return self.root_collection()._my_keep()
+
+    def _my_block_manager(self):
+        return self.root_collection()._my_block_manager()
+
+    def stream_name(self):
+        return os.path.join(self.parent.stream_name(), self.name)
+
+    @synchronized
+    def clone(self, new_parent, new_name):
+        c = Subcollection(new_parent, new_name)
+        c._clonefrom(self)
+        return c
+
+    @must_be_writable
+    @synchronized
+    def _reparent(self, newparent, newname):
+        self.set_committed(False)
+        self.flush()
+        self.parent.remove(self.name, recursive=True)
+        self.parent = newparent
+        self.name = newname
+        self.lock = self.parent.root_collection().lock
+
+    @synchronized
+    def _get_manifest_text(self, stream_name, strip, normalize, only_committed=False):
+        """Encode empty directories by using an \056-named (".") empty file"""
+        if len(self._items) == 0:
+            return "%s %s 0:0:\\056\n" % (
+                escape(stream_name), config.EMPTY_BLOCK_LOCATOR)
+        return super(Subcollection, self)._get_manifest_text(stream_name,
+                                                             strip, normalize,
+                                                             only_committed)
+
+
+class CollectionReader(Collection):
+    """A read-only collection object.
+
+    Initialize from a collection UUID or portable data hash, or raw
+    manifest text.  See `Collection` constructor for detailed options.
+
+    """
+    def __init__(self, manifest_locator_or_text, *args, **kwargs):
+        self._in_init = True
+        super(CollectionReader, self).__init__(manifest_locator_or_text, *args, **kwargs)
+        self._in_init = False
+
+        # Forego any locking since it should never change once initialized.
+        self.lock = NoopLock()
+
+        # Backwards compatability with old CollectionReader
+        # all_streams() and all_files()
+        self._streams = None
+
+    def writable(self):
+        return self._in_init
+
+    def _populate_streams(orig_func):
+        @functools.wraps(orig_func)
+        def populate_streams_wrapper(self, *args, **kwargs):
+            # Defer populating self._streams until needed since it creates a copy of the manifest.
+            if self._streams is None:
+                if self._manifest_text:
+                    self._streams = [sline.split()
+                                     for sline in self._manifest_text.split("\n")
+                                     if sline]
+                else:
+                    self._streams = []
+            return orig_func(self, *args, **kwargs)
+        return populate_streams_wrapper
+
+    @_populate_streams
+    def normalize(self):
+        """Normalize the streams returned by `all_streams`.
+
+        This method is kept for backwards compatability and only affects the
+        behavior of `all_streams()` and `all_files()`
+
+        """
+
+        # Rearrange streams
+        streams = {}
+        for s in self.all_streams():
+            for f in s.all_files():
+                streamname, filename = split(s.name() + "/" + f.name())
+                if streamname not in streams:
+                    streams[streamname] = {}
+                if filename not in streams[streamname]:
+                    streams[streamname][filename] = []
+                for r in f.segments:
+                    streams[streamname][filename].extend(s.locators_and_ranges(r.locator, r.range_size))
+
+        self._streams = [normalize_stream(s, streams[s])
+                         for s in sorted(streams)]
+    @_populate_streams
+    def all_streams(self):
+        return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
+                for s in self._streams]
+
+    @_populate_streams
+    def all_files(self):
+        for s in self.all_streams():
+            for f in s.all_files():
+                yield f
diff --git a/sdk/python/arvados/commands/__init__.py b/sdk/python/arvados/commands/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/python/arvados/commands/_util.py b/sdk/python/arvados/commands/_util.py
new file mode 100644 (file)
index 0000000..d10d38e
--- /dev/null
@@ -0,0 +1,65 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import argparse
+import errno
+import os
+import logging
+import signal
+from future.utils import listitems, listvalues
+import sys
+
+def _pos_int(s):
+    num = int(s)
+    if num < 0:
+        raise ValueError("can't accept negative value: %s" % (num,))
+    return num
+
+retry_opt = argparse.ArgumentParser(add_help=False)
+retry_opt.add_argument('--retries', type=_pos_int, default=3, help="""
+Maximum number of times to retry server requests that encounter temporary
+failures (e.g., server down).  Default 3.""")
+
+def _ignore_error(error):
+    return None
+
+def _raise_error(error):
+    raise error
+
+def make_home_conf_dir(path, mode=None, errors='ignore'):
+    # Make the directory path under the user's home directory, making parent
+    # directories as needed.
+    # If the directory is newly created, and a mode is specified, chmod it
+    # with those permissions.
+    # If there's an error, return None if errors is 'ignore', else raise an
+    # exception.
+    error_handler = _ignore_error if (errors == 'ignore') else _raise_error
+    tilde_path = os.path.join('~', path)
+    abs_path = os.path.expanduser(tilde_path)
+    if abs_path == tilde_path:
+        return error_handler(ValueError("no home directory available"))
+    try:
+        os.makedirs(abs_path)
+    except OSError as error:
+        if error.errno != errno.EEXIST:
+            return error_handler(error)
+    else:
+        if mode is not None:
+            os.chmod(abs_path, mode)
+    return abs_path
+
+CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
+
+def exit_signal_handler(sigcode, frame):
+    logging.getLogger('arvados').error("Caught signal {}, exiting.".format(sigcode))
+    sys.exit(-sigcode)
+
+def install_signal_handlers():
+    global orig_signal_handlers
+    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
+                            for sigcode in CAUGHT_SIGNALS}
+
+def restore_signal_handlers():
+    for sigcode, orig_handler in listitems(orig_signal_handlers):
+        signal.signal(sigcode, orig_handler)
diff --git a/sdk/python/arvados/commands/arv_copy.py b/sdk/python/arvados/commands/arv_copy.py
new file mode 100755 (executable)
index 0000000..8850d0b
--- /dev/null
@@ -0,0 +1,979 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# arv-copy [--recursive] [--no-recursive] object-uuid src dst
+#
+# Copies an object from Arvados instance src to instance dst.
+#
+# By default, arv-copy recursively copies any dependent objects
+# necessary to make the object functional in the new instance
+# (e.g. for a pipeline instance, arv-copy copies the pipeline
+# template, input collection, docker images, git repositories). If
+# --no-recursive is given, arv-copy copies only the single record
+# identified by object-uuid.
+#
+# The user must have files $HOME/.config/arvados/{src}.conf and
+# $HOME/.config/arvados/{dst}.conf with valid login credentials for
+# instances src and dst.  If either of these files is not found,
+# arv-copy will issue an error.
+
+from __future__ import division
+from future import standard_library
+from future.utils import listvalues
+standard_library.install_aliases()
+from past.builtins import basestring
+from builtins import object
+import argparse
+import contextlib
+import getpass
+import os
+import re
+import shutil
+import sys
+import logging
+import tempfile
+import urllib.parse
+
+import arvados
+import arvados.config
+import arvados.keep
+import arvados.util
+import arvados.commands._util as arv_cmd
+import arvados.commands.keepdocker
+import ruamel.yaml as yaml
+
+from arvados.api import OrderedJsonModel
+from arvados._version import __version__
+
+COMMIT_HASH_RE = re.compile(r'^[0-9a-f]{1,40}$')
+
+logger = logging.getLogger('arvados.arv-copy')
+
+# local_repo_dir records which git repositories from the Arvados source
+# instance have been checked out locally during this run, and to which
+# directories.
+# e.g. if repository 'twp' from src_arv has been cloned into
+# /tmp/gitfHkV9lu44A then local_repo_dir['twp'] = '/tmp/gitfHkV9lu44A'
+#
+local_repo_dir = {}
+
+# List of collections that have been copied in this session, and their
+# destination collection UUIDs.
+collections_copied = {}
+
+# Set of (repository, script_version) two-tuples of commits copied in git.
+scripts_copied = set()
+
+# The owner_uuid of the object being copied
+src_owner_uuid = None
+
+def main():
+    copy_opts = argparse.ArgumentParser(add_help=False)
+
+    copy_opts.add_argument(
+        '--version', action='version', version="%s %s" % (sys.argv[0], __version__),
+        help='Print version and exit.')
+    copy_opts.add_argument(
+        '-v', '--verbose', dest='verbose', action='store_true',
+        help='Verbose output.')
+    copy_opts.add_argument(
+        '--progress', dest='progress', action='store_true',
+        help='Report progress on copying collections. (default)')
+    copy_opts.add_argument(
+        '--no-progress', dest='progress', action='store_false',
+        help='Do not report progress on copying collections.')
+    copy_opts.add_argument(
+        '-f', '--force', dest='force', action='store_true',
+        help='Perform copy even if the object appears to exist at the remote destination.')
+    copy_opts.add_argument(
+        '--force-filters', action='store_true', default=False,
+        help="Copy pipeline template filters verbatim, even if they act differently on the destination cluster.")
+    copy_opts.add_argument(
+        '--src', dest='source_arvados', required=True,
+        help='The name of the source Arvados instance (required) - points at an Arvados config file. May be either a pathname to a config file, or (for example) "foo" as shorthand for $HOME/.config/arvados/foo.conf.')
+    copy_opts.add_argument(
+        '--dst', dest='destination_arvados', required=True,
+        help='The name of the destination Arvados instance (required) - points at an Arvados config file. May be either a pathname to a config file, or (for example) "foo" as shorthand for $HOME/.config/arvados/foo.conf.')
+    copy_opts.add_argument(
+        '--recursive', dest='recursive', action='store_true',
+        help='Recursively copy any dependencies for this object. (default)')
+    copy_opts.add_argument(
+        '--no-recursive', dest='recursive', action='store_false',
+        help='Do not copy any dependencies. NOTE: if this option is given, the copied object will need to be updated manually in order to be functional.')
+    copy_opts.add_argument(
+        '--dst-git-repo', dest='dst_git_repo',
+        help='The name of the destination git repository. Required when copying a pipeline recursively.')
+    copy_opts.add_argument(
+        '--project-uuid', dest='project_uuid',
+        help='The UUID of the project at the destination to which the pipeline should be copied.')
+    copy_opts.add_argument(
+        '--allow-git-http-src', action="store_true",
+        help='Allow cloning git repositories over insecure http')
+    copy_opts.add_argument(
+        '--allow-git-http-dst', action="store_true",
+        help='Allow pushing git repositories over insecure http')
+
+    copy_opts.add_argument(
+        'object_uuid',
+        help='The UUID of the object to be copied.')
+    copy_opts.set_defaults(progress=True)
+    copy_opts.set_defaults(recursive=True)
+
+    parser = argparse.ArgumentParser(
+        description='Copy a pipeline instance, template, workflow, or collection from one Arvados instance to another.',
+        parents=[copy_opts, arv_cmd.retry_opt])
+    args = parser.parse_args()
+
+    if args.verbose:
+        logger.setLevel(logging.DEBUG)
+    else:
+        logger.setLevel(logging.INFO)
+
+    # Create API clients for the source and destination instances
+    src_arv = api_for_instance(args.source_arvados)
+    dst_arv = api_for_instance(args.destination_arvados)
+
+    if not args.project_uuid:
+        args.project_uuid = dst_arv.users().current().execute(num_retries=args.retries)["uuid"]
+
+    # Identify the kind of object we have been given, and begin copying.
+    t = uuid_type(src_arv, args.object_uuid)
+    if t == 'Collection':
+        set_src_owner_uuid(src_arv.collections(), args.object_uuid, args)
+        result = copy_collection(args.object_uuid,
+                                 src_arv, dst_arv,
+                                 args)
+    elif t == 'PipelineInstance':
+        set_src_owner_uuid(src_arv.pipeline_instances(), args.object_uuid, args)
+        result = copy_pipeline_instance(args.object_uuid,
+                                        src_arv, dst_arv,
+                                        args)
+    elif t == 'PipelineTemplate':
+        set_src_owner_uuid(src_arv.pipeline_templates(), args.object_uuid, args)
+        result = copy_pipeline_template(args.object_uuid,
+                                        src_arv, dst_arv, args)
+    elif t == 'Workflow':
+        set_src_owner_uuid(src_arv.workflows(), args.object_uuid, args)
+        result = copy_workflow(args.object_uuid, src_arv, dst_arv, args)
+    else:
+        abort("cannot copy object {} of type {}".format(args.object_uuid, t))
+
+    # Clean up any outstanding temp git repositories.
+    for d in listvalues(local_repo_dir):
+        shutil.rmtree(d, ignore_errors=True)
+
+    # If no exception was thrown and the response does not have an
+    # error_token field, presume success
+    if 'error_token' in result or 'uuid' not in result:
+        logger.error("API server returned an error result: {}".format(result))
+        exit(1)
+
+    logger.info("")
+    logger.info("Success: created copy with uuid {}".format(result['uuid']))
+    exit(0)
+
+def set_src_owner_uuid(resource, uuid, args):
+    global src_owner_uuid
+    c = resource.get(uuid=uuid).execute(num_retries=args.retries)
+    src_owner_uuid = c.get("owner_uuid")
+
+# api_for_instance(instance_name)
+#
+#     Creates an API client for the Arvados instance identified by
+#     instance_name.
+#
+#     If instance_name contains a slash, it is presumed to be a path
+#     (either local or absolute) to a file with Arvados configuration
+#     settings.
+#
+#     Otherwise, it is presumed to be the name of a file in
+#     $HOME/.config/arvados/instance_name.conf
+#
+def api_for_instance(instance_name):
+    if '/' in instance_name:
+        config_file = instance_name
+    else:
+        config_file = os.path.join(os.environ['HOME'], '.config', 'arvados', "{}.conf".format(instance_name))
+
+    try:
+        cfg = arvados.config.load(config_file)
+    except (IOError, OSError) as e:
+        abort(("Could not open config file {}: {}\n" +
+               "You must make sure that your configuration tokens\n" +
+               "for Arvados instance {} are in {} and that this\n" +
+               "file is readable.").format(
+                   config_file, e, instance_name, config_file))
+
+    if 'ARVADOS_API_HOST' in cfg and 'ARVADOS_API_TOKEN' in cfg:
+        api_is_insecure = (
+            cfg.get('ARVADOS_API_HOST_INSECURE', '').lower() in set(
+                ['1', 't', 'true', 'y', 'yes']))
+        client = arvados.api('v1',
+                             host=cfg['ARVADOS_API_HOST'],
+                             token=cfg['ARVADOS_API_TOKEN'],
+                             insecure=api_is_insecure,
+                             model=OrderedJsonModel())
+    else:
+        abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
+    return client
+
+# Check if git is available
+def check_git_availability():
+    try:
+        arvados.util.run_command(['git', '--help'])
+    except Exception:
+        abort('git command is not available. Please ensure git is installed.')
+
+# copy_pipeline_instance(pi_uuid, src, dst, args)
+#
+#    Copies a pipeline instance identified by pi_uuid from src to dst.
+#
+#    If the args.recursive option is set:
+#      1. Copies all input collections
+#           * For each component in the pipeline, include all collections
+#             listed as job dependencies for that component)
+#      2. Copy docker images
+#      3. Copy git repositories
+#      4. Copy the pipeline template
+#
+#    The only changes made to the copied pipeline instance are:
+#      1. The original pipeline instance UUID is preserved in
+#         the 'properties' hash as 'copied_from_pipeline_instance_uuid'.
+#      2. The pipeline_template_uuid is changed to the new template uuid.
+#      3. The owner_uuid of the instance is changed to the user who
+#         copied it.
+#
+def copy_pipeline_instance(pi_uuid, src, dst, args):
+    # Fetch the pipeline instance record.
+    pi = src.pipeline_instances().get(uuid=pi_uuid).execute(num_retries=args.retries)
+
+    if args.recursive:
+        check_git_availability()
+
+        if not args.dst_git_repo:
+            abort('--dst-git-repo is required when copying a pipeline recursively.')
+        # Copy the pipeline template and save the copied template.
+        if pi.get('pipeline_template_uuid', None):
+            pt = copy_pipeline_template(pi['pipeline_template_uuid'],
+                                        src, dst, args)
+
+        # Copy input collections, docker images and git repos.
+        pi = copy_collections(pi, src, dst, args)
+        copy_git_repos(pi, src, dst, args.dst_git_repo, args)
+        copy_docker_images(pi, src, dst, args)
+
+        # Update the fields of the pipeline instance with the copied
+        # pipeline template.
+        if pi.get('pipeline_template_uuid', None):
+            pi['pipeline_template_uuid'] = pt['uuid']
+
+    else:
+        # not recursive
+        logger.info("Copying only pipeline instance %s.", pi_uuid)
+        logger.info("You are responsible for making sure all pipeline dependencies have been updated.")
+
+    # Update the pipeline instance properties, and create the new
+    # instance at dst.
+    pi['properties']['copied_from_pipeline_instance_uuid'] = pi_uuid
+    pi['description'] = "Pipeline copied from {}\n\n{}".format(
+        pi_uuid,
+        pi['description'] if pi.get('description', None) else '')
+
+    pi['owner_uuid'] = args.project_uuid
+
+    del pi['uuid']
+
+    new_pi = dst.pipeline_instances().create(body=pi, ensure_unique_name=True).execute(num_retries=args.retries)
+    return new_pi
+
+def filter_iter(arg):
+    """Iterate a filter string-or-list.
+
+    Pass in a filter field that can either be a string or list.
+    This will iterate elements as if the field had been written as a list.
+    """
+    if isinstance(arg, basestring):
+        return iter((arg,))
+    else:
+        return iter(arg)
+
+def migrate_repository_filter(repo_filter, src_repository, dst_repository):
+    """Update a single repository filter in-place for the destination.
+
+    If the filter checks that the repository is src_repository, it is
+    updated to check that the repository is dst_repository.  If it does
+    anything else, this function raises ValueError.
+    """
+    if src_repository is None:
+        raise ValueError("component does not specify a source repository")
+    elif dst_repository is None:
+        raise ValueError("no destination repository specified to update repository filter")
+    elif repo_filter[1:] == ['=', src_repository]:
+        repo_filter[2] = dst_repository
+    elif repo_filter[1:] == ['in', [src_repository]]:
+        repo_filter[2] = [dst_repository]
+    else:
+        raise ValueError("repository filter is not a simple source match")
+
+def migrate_script_version_filter(version_filter):
+    """Update a single script_version filter in-place for the destination.
+
+    Currently this function checks that all the filter operands are Git
+    commit hashes.  If they're not, it raises ValueError to indicate that
+    the filter is not portable.  It could be extended to make other
+    transformations in the future.
+    """
+    if not all(COMMIT_HASH_RE.match(v) for v in filter_iter(version_filter[2])):
+        raise ValueError("script_version filter is not limited to commit hashes")
+
+def attr_filtered(filter_, *attr_names):
+    """Return True if filter_ applies to any of attr_names, else False."""
+    return any((name == 'any') or (name in attr_names)
+               for name in filter_iter(filter_[0]))
+
+@contextlib.contextmanager
+def exception_handler(handler, *exc_types):
+    """If any exc_types are raised in the block, call handler on the exception."""
+    try:
+        yield
+    except exc_types as error:
+        handler(error)
+
+def migrate_components_filters(template_components, dst_git_repo):
+    """Update template component filters in-place for the destination.
+
+    template_components is a dictionary of components in a pipeline template.
+    This method walks over each component's filters, and updates them to have
+    identical semantics on the destination cluster.  It returns a list of
+    error strings that describe what filters could not be updated safely.
+
+    dst_git_repo is the name of the destination Git repository, which can
+    be None if that is not known.
+    """
+    errors = []
+    for cname, cspec in template_components.items():
+        def add_error(errmsg):
+            errors.append("{}: {}".format(cname, errmsg))
+        if not isinstance(cspec, dict):
+            add_error("value is not a component definition")
+            continue
+        src_repository = cspec.get('repository')
+        filters = cspec.get('filters', [])
+        if not isinstance(filters, list):
+            add_error("filters are not a list")
+            continue
+        for cfilter in filters:
+            if not (isinstance(cfilter, list) and (len(cfilter) == 3)):
+                add_error("malformed filter {!r}".format(cfilter))
+                continue
+            if attr_filtered(cfilter, 'repository'):
+                with exception_handler(add_error, ValueError):
+                    migrate_repository_filter(cfilter, src_repository, dst_git_repo)
+            if attr_filtered(cfilter, 'script_version'):
+                with exception_handler(add_error, ValueError):
+                    migrate_script_version_filter(cfilter)
+    return errors
+
+# copy_pipeline_template(pt_uuid, src, dst, args)
+#
+#    Copies a pipeline template identified by pt_uuid from src to dst.
+#
+#    If args.recursive is True, also copy any collections, docker
+#    images and git repositories that this template references.
+#
+#    The owner_uuid of the new template is changed to that of the user
+#    who copied the template.
+#
+#    Returns the copied pipeline template object.
+#
+def copy_pipeline_template(pt_uuid, src, dst, args):
+    # fetch the pipeline template from the source instance
+    pt = src.pipeline_templates().get(uuid=pt_uuid).execute(num_retries=args.retries)
+
+    if not args.force_filters:
+        filter_errors = migrate_components_filters(pt['components'], args.dst_git_repo)
+        if filter_errors:
+            abort("Template filters cannot be copied safely. Use --force-filters to copy anyway.\n" +
+                  "\n".join(filter_errors))
+
+    if args.recursive:
+        check_git_availability()
+
+        if not args.dst_git_repo:
+            abort('--dst-git-repo is required when copying a pipeline recursively.')
+        # Copy input collections, docker images and git repos.
+        pt = copy_collections(pt, src, dst, args)
+        copy_git_repos(pt, src, dst, args.dst_git_repo, args)
+        copy_docker_images(pt, src, dst, args)
+
+    pt['description'] = "Pipeline template copied from {}\n\n{}".format(
+        pt_uuid,
+        pt['description'] if pt.get('description', None) else '')
+    pt['name'] = "{} copied from {}".format(pt.get('name', ''), pt_uuid)
+    del pt['uuid']
+
+    pt['owner_uuid'] = args.project_uuid
+
+    return dst.pipeline_templates().create(body=pt, ensure_unique_name=True).execute(num_retries=args.retries)
+
+# copy_workflow(wf_uuid, src, dst, args)
+#
+#    Copies a workflow identified by wf_uuid from src to dst.
+#
+#    If args.recursive is True, also copy any collections
+#      referenced in the workflow definition yaml.
+#
+#    The owner_uuid of the new workflow is set to any given
+#      project_uuid or the user who copied the template.
+#
+#    Returns the copied workflow object.
+#
+def copy_workflow(wf_uuid, src, dst, args):
+    # fetch the workflow from the source instance
+    wf = src.workflows().get(uuid=wf_uuid).execute(num_retries=args.retries)
+
+    # copy collections and docker images
+    if args.recursive:
+        wf_def = yaml.safe_load(wf["definition"])
+        if wf_def is not None:
+            locations = []
+            docker_images = {}
+            graph = wf_def.get('$graph', None)
+            if graph is not None:
+                workflow_collections(graph, locations, docker_images)
+            else:
+                workflow_collections(wf_def, locations, docker_images)
+
+            if locations:
+                copy_collections(locations, src, dst, args)
+
+            for image in docker_images:
+                copy_docker_image(image, docker_images[image], src, dst, args)
+
+    # copy the workflow itself
+    del wf['uuid']
+    wf['owner_uuid'] = args.project_uuid
+    return dst.workflows().create(body=wf).execute(num_retries=args.retries)
+
+def workflow_collections(obj, locations, docker_images):
+    if isinstance(obj, dict):
+        loc = obj.get('location', None)
+        if loc is not None:
+            if loc.startswith("keep:"):
+                locations.append(loc[5:])
+
+        docker_image = obj.get('dockerImageId', None) or obj.get('dockerPull', None)
+        if docker_image is not None:
+            ds = docker_image.split(":", 1)
+            tag = ds[1] if len(ds)==2 else 'latest'
+            docker_images[ds[0]] = tag
+
+        for x in obj:
+            workflow_collections(obj[x], locations, docker_images)
+    elif isinstance(obj, list):
+        for x in obj:
+            workflow_collections(x, locations, docker_images)
+
+# copy_collections(obj, src, dst, args)
+#
+#    Recursively copies all collections referenced by 'obj' from src
+#    to dst.  obj may be a dict or a list, in which case we run
+#    copy_collections on every value it contains. If it is a string,
+#    search it for any substring that matches a collection hash or uuid
+#    (this will find hidden references to collections like
+#      "input0": "$(file 3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq)")
+#
+#    Returns a copy of obj with any old collection uuids replaced by
+#    the new ones.
+#
+def copy_collections(obj, src, dst, args):
+
+    def copy_collection_fn(collection_match):
+        """Helper function for regex substitution: copies a single collection,
+        identified by the collection_match MatchObject, to the
+        destination.  Returns the destination collection uuid (or the
+        portable data hash if that's what src_id is).
+
+        """
+        src_id = collection_match.group(0)
+        if src_id not in collections_copied:
+            dst_col = copy_collection(src_id, src, dst, args)
+            if src_id in [dst_col['uuid'], dst_col['portable_data_hash']]:
+                collections_copied[src_id] = src_id
+            else:
+                collections_copied[src_id] = dst_col['uuid']
+        return collections_copied[src_id]
+
+    if isinstance(obj, basestring):
+        # Copy any collections identified in this string to dst, replacing
+        # them with the dst uuids as necessary.
+        obj = arvados.util.portable_data_hash_pattern.sub(copy_collection_fn, obj)
+        obj = arvados.util.collection_uuid_pattern.sub(copy_collection_fn, obj)
+        return obj
+    elif isinstance(obj, dict):
+        return type(obj)((v, copy_collections(obj[v], src, dst, args))
+                         for v in obj)
+    elif isinstance(obj, list):
+        return type(obj)(copy_collections(v, src, dst, args) for v in obj)
+    return obj
+
+def migrate_jobspec(jobspec, src, dst, dst_repo, args):
+    """Copy a job's script to the destination repository, and update its record.
+
+    Given a jobspec dictionary, this function finds the referenced script from
+    src and copies it to dst and dst_repo.  It also updates jobspec in place to
+    refer to names on the destination.
+    """
+    repo = jobspec.get('repository')
+    if repo is None:
+        return
+    # script_version is the "script_version" parameter from the source
+    # component or job.  If no script_version was supplied in the
+    # component or job, it is a mistake in the pipeline, but for the
+    # purposes of copying the repository, default to "master".
+    script_version = jobspec.get('script_version') or 'master'
+    script_key = (repo, script_version)
+    if script_key not in scripts_copied:
+        copy_git_repo(repo, src, dst, dst_repo, script_version, args)
+        scripts_copied.add(script_key)
+    jobspec['repository'] = dst_repo
+    repo_dir = local_repo_dir[repo]
+    for version_key in ['script_version', 'supplied_script_version']:
+        if version_key in jobspec:
+            jobspec[version_key] = git_rev_parse(jobspec[version_key], repo_dir)
+
+# copy_git_repos(p, src, dst, dst_repo, args)
+#
+#    Copies all git repositories referenced by pipeline instance or
+#    template 'p' from src to dst.
+#
+#    For each component c in the pipeline:
+#      * Copy git repositories named in c['repository'] and c['job']['repository'] if present
+#      * Rename script versions:
+#          * c['script_version']
+#          * c['job']['script_version']
+#          * c['job']['supplied_script_version']
+#        to the commit hashes they resolve to, since any symbolic
+#        names (tags, branches) are not preserved in the destination repo.
+#
+#    The pipeline object is updated in place with the new repository
+#    names.  The return value is undefined.
+#
+def copy_git_repos(p, src, dst, dst_repo, args):
+    for component in p['components'].values():
+        migrate_jobspec(component, src, dst, dst_repo, args)
+        if 'job' in component:
+            migrate_jobspec(component['job'], src, dst, dst_repo, args)
+
+def total_collection_size(manifest_text):
+    """Return the total number of bytes in this collection (excluding
+    duplicate blocks)."""
+
+    total_bytes = 0
+    locators_seen = {}
+    for line in manifest_text.splitlines():
+        words = line.split()
+        for word in words[1:]:
+            try:
+                loc = arvados.KeepLocator(word)
+            except ValueError:
+                continue  # this word isn't a locator, skip it
+            if loc.md5sum not in locators_seen:
+                locators_seen[loc.md5sum] = True
+                total_bytes += loc.size
+
+    return total_bytes
+
+def create_collection_from(c, src, dst, args):
+    """Create a new collection record on dst, and copy Docker metadata if
+    available."""
+
+    collection_uuid = c['uuid']
+    del c['uuid']
+
+    if not c["name"]:
+        c['name'] = "copied from " + collection_uuid
+
+    if 'properties' in c:
+        del c['properties']
+
+    c['owner_uuid'] = args.project_uuid
+
+    dst_collection = dst.collections().create(body=c, ensure_unique_name=True).execute(num_retries=args.retries)
+
+    # Create docker_image_repo+tag and docker_image_hash links
+    # at the destination.
+    for link_class in ("docker_image_repo+tag", "docker_image_hash"):
+        docker_links = src.links().list(filters=[["head_uuid", "=", collection_uuid], ["link_class", "=", link_class]]).execute(num_retries=args.retries)['items']
+
+        for src_link in docker_links:
+            body = {key: src_link[key]
+                    for key in ['link_class', 'name', 'properties']}
+            body['head_uuid'] = dst_collection['uuid']
+            body['owner_uuid'] = args.project_uuid
+
+            lk = dst.links().create(body=body).execute(num_retries=args.retries)
+            logger.debug('created dst link {}'.format(lk))
+
+    return dst_collection
+
+# copy_collection(obj_uuid, src, dst, args)
+#
+#    Copies the collection identified by obj_uuid from src to dst.
+#    Returns the collection object created at dst.
+#
+#    If args.progress is True, produce a human-friendly progress
+#    report.
+#
+#    If a collection with the desired portable_data_hash already
+#    exists at dst, and args.force is False, copy_collection returns
+#    the existing collection without copying any blocks.  Otherwise
+#    (if no collection exists or if args.force is True)
+#    copy_collection copies all of the collection data blocks from src
+#    to dst.
+#
+#    For this application, it is critical to preserve the
+#    collection's manifest hash, which is not guaranteed with the
+#    arvados.CollectionReader and arvados.CollectionWriter classes.
+#    Copying each block in the collection manually, followed by
+#    the manifest block, ensures that the collection's manifest
+#    hash will not change.
+#
+def copy_collection(obj_uuid, src, dst, args):
+    if arvados.util.keep_locator_pattern.match(obj_uuid):
+        # If the obj_uuid is a portable data hash, it might not be uniquely
+        # identified with a particular collection.  As a result, it is
+        # ambigious as to what name to use for the copy.  Apply some heuristics
+        # to pick which collection to get the name from.
+        srccol = src.collections().list(
+            filters=[['portable_data_hash', '=', obj_uuid]],
+            order="created_at asc"
+            ).execute(num_retries=args.retries)
+
+        items = srccol.get("items")
+
+        if not items:
+            logger.warning("Could not find collection with portable data hash %s", obj_uuid)
+            return
+
+        c = None
+
+        if len(items) == 1:
+            # There's only one collection with the PDH, so use that.
+            c = items[0]
+        if not c:
+            # See if there is a collection that's in the same project
+            # as the root item (usually a pipeline) being copied.
+            for i in items:
+                if i.get("owner_uuid") == src_owner_uuid and i.get("name"):
+                    c = i
+                    break
+        if not c:
+            # Didn't find any collections located in the same project, so
+            # pick the oldest collection that has a name assigned to it.
+            for i in items:
+                if i.get("name"):
+                    c = i
+                    break
+        if not c:
+            # None of the collections have names (?!), so just pick the
+            # first one.
+            c = items[0]
+
+        # list() doesn't return manifest text (and we don't want it to,
+        # because we don't need the same maninfest text sent to us 50
+        # times) so go and retrieve the collection object directly
+        # which will include the manifest text.
+        c = src.collections().get(uuid=c["uuid"]).execute(num_retries=args.retries)
+    else:
+        # Assume this is an actual collection uuid, so fetch it directly.
+        c = src.collections().get(uuid=obj_uuid).execute(num_retries=args.retries)
+
+    # If a collection with this hash already exists at the
+    # destination, and 'force' is not true, just return that
+    # collection.
+    if not args.force:
+        if 'portable_data_hash' in c:
+            colhash = c['portable_data_hash']
+        else:
+            colhash = c['uuid']
+        dstcol = dst.collections().list(
+            filters=[['portable_data_hash', '=', colhash]]
+        ).execute(num_retries=args.retries)
+        if dstcol['items_available'] > 0:
+            for d in dstcol['items']:
+                if ((args.project_uuid == d['owner_uuid']) and
+                    (c.get('name') == d['name']) and
+                    (c['portable_data_hash'] == d['portable_data_hash'])):
+                    return d
+            c['manifest_text'] = dst.collections().get(
+                uuid=dstcol['items'][0]['uuid']
+            ).execute(num_retries=args.retries)['manifest_text']
+            return create_collection_from(c, src, dst, args)
+
+    # Fetch the collection's manifest.
+    manifest = c['manifest_text']
+    logger.debug("Copying collection %s with manifest: <%s>", obj_uuid, manifest)
+
+    # Copy each block from src_keep to dst_keep.
+    # Use the newly signed locators returned from dst_keep to build
+    # a new manifest as we go.
+    src_keep = arvados.keep.KeepClient(api_client=src, num_retries=args.retries)
+    dst_keep = arvados.keep.KeepClient(api_client=dst, num_retries=args.retries)
+    dst_manifest = ""
+    dst_locators = {}
+    bytes_written = 0
+    bytes_expected = total_collection_size(manifest)
+    if args.progress:
+        progress_writer = ProgressWriter(human_progress)
+    else:
+        progress_writer = None
+
+    for line in manifest.splitlines():
+        words = line.split()
+        dst_manifest += words[0]
+        for word in words[1:]:
+            try:
+                loc = arvados.KeepLocator(word)
+            except ValueError:
+                # If 'word' can't be parsed as a locator,
+                # presume it's a filename.
+                dst_manifest += ' ' + word
+                continue
+            blockhash = loc.md5sum
+            # copy this block if we haven't seen it before
+            # (otherwise, just reuse the existing dst_locator)
+            if blockhash not in dst_locators:
+                logger.debug("Copying block %s (%s bytes)", blockhash, loc.size)
+                if progress_writer:
+                    progress_writer.report(obj_uuid, bytes_written, bytes_expected)
+                data = src_keep.get(word)
+                dst_locator = dst_keep.put(data)
+                dst_locators[blockhash] = dst_locator
+                bytes_written += loc.size
+            dst_manifest += ' ' + dst_locators[blockhash]
+        dst_manifest += "\n"
+
+    if progress_writer:
+        progress_writer.report(obj_uuid, bytes_written, bytes_expected)
+        progress_writer.finish()
+
+    # Copy the manifest and save the collection.
+    logger.debug('saving %s with manifest: <%s>', obj_uuid, dst_manifest)
+
+    c['manifest_text'] = dst_manifest
+    return create_collection_from(c, src, dst, args)
+
+def select_git_url(api, repo_name, retries, allow_insecure_http, allow_insecure_http_opt):
+    r = api.repositories().list(
+        filters=[['name', '=', repo_name]]).execute(num_retries=retries)
+    if r['items_available'] != 1:
+        raise Exception('cannot identify repo {}; {} repos found'
+                        .format(repo_name, r['items_available']))
+
+    https_url = [c for c in r['items'][0]["clone_urls"] if c.startswith("https:")]
+    http_url = [c for c in r['items'][0]["clone_urls"] if c.startswith("http:")]
+    other_url = [c for c in r['items'][0]["clone_urls"] if not c.startswith("http")]
+
+    priority = https_url + other_url + http_url
+
+    git_config = []
+    git_url = None
+    for url in priority:
+        if url.startswith("http"):
+            u = urllib.parse.urlsplit(url)
+            baseurl = urllib.parse.urlunsplit((u.scheme, u.netloc, "", "", ""))
+            git_config = ["-c", "credential.%s/.username=none" % baseurl,
+                          "-c", "credential.%s/.helper=!cred(){ cat >/dev/null; if [ \"$1\" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred" % baseurl]
+        else:
+            git_config = []
+
+        try:
+            logger.debug("trying %s", url)
+            arvados.util.run_command(["git"] + git_config + ["ls-remote", url],
+                                      env={"HOME": os.environ["HOME"],
+                                           "ARVADOS_API_TOKEN": api.api_token,
+                                           "GIT_ASKPASS": "/bin/false"})
+        except arvados.errors.CommandFailedError:
+            pass
+        else:
+            git_url = url
+            break
+
+    if not git_url:
+        raise Exception('Cannot access git repository, tried {}'
+                        .format(priority))
+
+    if git_url.startswith("http:"):
+        if allow_insecure_http:
+            logger.warning("Using insecure git url %s but will allow this because %s", git_url, allow_insecure_http_opt)
+        else:
+            raise Exception("Refusing to use insecure git url %s, use %s if you really want this." % (git_url, allow_insecure_http_opt))
+
+    return (git_url, git_config)
+
+
+# copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version, args)
+#
+#    Copies commits from git repository 'src_git_repo' on Arvados
+#    instance 'src' to 'dst_git_repo' on 'dst'.  Both src_git_repo
+#    and dst_git_repo are repository names, not UUIDs (i.e. "arvados"
+#    or "jsmith")
+#
+#    All commits will be copied to a destination branch named for the
+#    source repository URL.
+#
+#    The destination repository must already exist.
+#
+#    The user running this command must be authenticated
+#    to both repositories.
+#
+def copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version, args):
+    # Identify the fetch and push URLs for the git repositories.
+
+    (src_git_url, src_git_config) = select_git_url(src, src_git_repo, args.retries, args.allow_git_http_src, "--allow-git-http-src")
+    (dst_git_url, dst_git_config) = select_git_url(dst, dst_git_repo, args.retries, args.allow_git_http_dst, "--allow-git-http-dst")
+
+    logger.debug('src_git_url: {}'.format(src_git_url))
+    logger.debug('dst_git_url: {}'.format(dst_git_url))
+
+    dst_branch = re.sub(r'\W+', '_', "{}_{}".format(src_git_url, script_version))
+
+    # Copy git commits from src repo to dst repo.
+    if src_git_repo not in local_repo_dir:
+        local_repo_dir[src_git_repo] = tempfile.mkdtemp()
+        arvados.util.run_command(
+            ["git"] + src_git_config + ["clone", "--bare", src_git_url,
+             local_repo_dir[src_git_repo]],
+            cwd=os.path.dirname(local_repo_dir[src_git_repo]),
+            env={"HOME": os.environ["HOME"],
+                 "ARVADOS_API_TOKEN": src.api_token,
+                 "GIT_ASKPASS": "/bin/false"})
+        arvados.util.run_command(
+            ["git", "remote", "add", "dst", dst_git_url],
+            cwd=local_repo_dir[src_git_repo])
+    arvados.util.run_command(
+        ["git", "branch", dst_branch, script_version],
+        cwd=local_repo_dir[src_git_repo])
+    arvados.util.run_command(["git"] + dst_git_config + ["push", "dst", dst_branch],
+                             cwd=local_repo_dir[src_git_repo],
+                             env={"HOME": os.environ["HOME"],
+                                  "ARVADOS_API_TOKEN": dst.api_token,
+                                  "GIT_ASKPASS": "/bin/false"})
+
+def copy_docker_images(pipeline, src, dst, args):
+    """Copy any docker images named in the pipeline components'
+    runtime_constraints field from src to dst."""
+
+    logger.debug('copy_docker_images: {}'.format(pipeline['uuid']))
+    for c_name, c_info in pipeline['components'].items():
+        if ('runtime_constraints' in c_info and
+            'docker_image' in c_info['runtime_constraints']):
+            copy_docker_image(
+                c_info['runtime_constraints']['docker_image'],
+                c_info['runtime_constraints'].get('docker_image_tag', 'latest'),
+                src, dst, args)
+
+
+def copy_docker_image(docker_image, docker_image_tag, src, dst, args):
+    """Copy the docker image identified by docker_image and
+    docker_image_tag from src to dst. Create appropriate
+    docker_image_repo+tag and docker_image_hash links at dst.
+
+    """
+
+    logger.debug('copying docker image {}:{}'.format(docker_image, docker_image_tag))
+
+    # Find the link identifying this docker image.
+    docker_image_list = arvados.commands.keepdocker.list_images_in_arv(
+        src, args.retries, docker_image, docker_image_tag)
+    if docker_image_list:
+        image_uuid, image_info = docker_image_list[0]
+        logger.debug('copying collection {} {}'.format(image_uuid, image_info))
+
+        # Copy the collection it refers to.
+        dst_image_col = copy_collection(image_uuid, src, dst, args)
+    elif arvados.util.keep_locator_pattern.match(docker_image):
+        dst_image_col = copy_collection(docker_image, src, dst, args)
+    else:
+        logger.warning('Could not find docker image {}:{}'.format(docker_image, docker_image_tag))
+
+# git_rev_parse(rev, repo)
+#
+#    Returns the 40-character commit hash corresponding to 'rev' in
+#    git repository 'repo' (which must be the path of a local git
+#    repository)
+#
+def git_rev_parse(rev, repo):
+    gitout, giterr = arvados.util.run_command(
+        ['git', 'rev-parse', rev], cwd=repo)
+    return gitout.strip()
+
+# uuid_type(api, object_uuid)
+#
+#    Returns the name of the class that object_uuid belongs to, based on
+#    the second field of the uuid.  This function consults the api's
+#    schema to identify the object class.
+#
+#    It returns a string such as 'Collection', 'PipelineInstance', etc.
+#
+#    Special case: if handed a Keep locator hash, return 'Collection'.
+#
+def uuid_type(api, object_uuid):
+    if re.match(r'^[a-f0-9]{32}\+[0-9]+(\+[A-Za-z0-9+-]+)?$', object_uuid):
+        return 'Collection'
+    p = object_uuid.split('-')
+    if len(p) == 3:
+        type_prefix = p[1]
+        for k in api._schema.schemas:
+            obj_class = api._schema.schemas[k].get('uuidPrefix', None)
+            if type_prefix == obj_class:
+                return k
+    return None
+
+def abort(msg, code=1):
+    logger.info("arv-copy: %s", msg)
+    exit(code)
+
+
+# Code for reporting on the progress of a collection upload.
+# Stolen from arvados.commands.put.ArvPutCollectionWriter
+# TODO(twp): figure out how to refactor into a shared library
+# (may involve refactoring some arvados.commands.arv_copy.copy_collection
+# code)
+
+def machine_progress(obj_uuid, bytes_written, bytes_expected):
+    return "{} {}: {} {} written {} total\n".format(
+        sys.argv[0],
+        os.getpid(),
+        obj_uuid,
+        bytes_written,
+        -1 if (bytes_expected is None) else bytes_expected)
+
+def human_progress(obj_uuid, bytes_written, bytes_expected):
+    if bytes_expected:
+        return "\r{}: {}M / {}M {:.1%} ".format(
+            obj_uuid,
+            bytes_written >> 20, bytes_expected >> 20,
+            float(bytes_written) / bytes_expected)
+    else:
+        return "\r{}: {} ".format(obj_uuid, bytes_written)
+
+class ProgressWriter(object):
+    _progress_func = None
+    outfile = sys.stderr
+
+    def __init__(self, progress_func):
+        self._progress_func = progress_func
+
+    def report(self, obj_uuid, bytes_written, bytes_expected):
+        if self._progress_func is not None:
+            self.outfile.write(
+                self._progress_func(obj_uuid, bytes_written, bytes_expected))
+
+    def finish(self):
+        self.outfile.write("\n")
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/get.py b/sdk/python/arvados/commands/get.py
new file mode 100755 (executable)
index 0000000..1e52714
--- /dev/null
@@ -0,0 +1,321 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import argparse
+import hashlib
+import os
+import re
+import string
+import sys
+import logging
+
+import arvados
+import arvados.commands._util as arv_cmd
+import arvados.util as util
+
+from arvados._version import __version__
+
+api_client = None
+logger = logging.getLogger('arvados.arv-get')
+
+parser = argparse.ArgumentParser(
+    description='Copy data from Keep to a local file or pipe.',
+    parents=[arv_cmd.retry_opt])
+parser.add_argument('--version', action='version',
+                    version="%s %s" % (sys.argv[0], __version__),
+                    help='Print version and exit.')
+parser.add_argument('locator', type=str,
+                    help="""
+Collection locator, optionally with a file path or prefix.
+""")
+parser.add_argument('destination', type=str, nargs='?', default='-',
+                    help="""
+Local file or directory where the data is to be written. Default: stdout.
+""")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('--progress', action='store_true',
+                   help="""
+Display human-readable progress on stderr (bytes and, if possible,
+percentage of total data size). This is the default behavior when it
+is not expected to interfere with the output: specifically, stderr is
+a tty _and_ either stdout is not a tty, or output is being written to
+named files rather than stdout.
+""")
+group.add_argument('--no-progress', action='store_true',
+                   help="""
+Do not display human-readable progress on stderr.
+""")
+group.add_argument('--batch-progress', action='store_true',
+                   help="""
+Display machine-readable progress on stderr (bytes and, if known,
+total data size).
+""")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('--hash',
+                    help="""
+Display the hash of each file as it is read from Keep, using the given
+hash algorithm. Supported algorithms include md5, sha1, sha224,
+sha256, sha384, and sha512.
+""")
+group.add_argument('--md5sum', action='store_const',
+                    dest='hash', const='md5',
+                    help="""
+Display the MD5 hash of each file as it is read from Keep.
+""")
+parser.add_argument('-n', action='store_true',
+                    help="""
+Do not write any data -- just read from Keep, and report md5sums if
+requested.
+""")
+parser.add_argument('-r', action='store_true',
+                    help="""
+Retrieve all files in the specified collection/prefix. This is the
+default behavior if the "locator" argument ends with a forward slash.
+""")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('-f', action='store_true',
+                   help="""
+Overwrite existing files while writing. The default behavior is to
+refuse to write *anything* if any of the output files already
+exist. As a special case, -f is not needed to write to stdout.
+""")
+group.add_argument('-v', action='count', default=0,
+                    help="""
+Once for verbose mode, twice for debug mode.
+""")
+group.add_argument('--skip-existing', action='store_true',
+                   help="""
+Skip files that already exist. The default behavior is to refuse to
+write *anything* if any files exist that would have to be
+overwritten. This option causes even devices, sockets, and fifos to be
+skipped.
+""")
+group.add_argument('--strip-manifest', action='store_true', default=False,
+                   help="""
+When getting a collection manifest, strip its access tokens before writing
+it.
+""")
+
+def parse_arguments(arguments, stdout, stderr):
+    args = parser.parse_args(arguments)
+
+    if args.locator[-1] == os.sep:
+        args.r = True
+    if (args.r and
+        not args.n and
+        not (args.destination and
+             os.path.isdir(args.destination))):
+        parser.error('Destination is not a directory.')
+    if not args.r and (os.path.isdir(args.destination) or
+                       args.destination[-1] == os.path.sep):
+        args.destination = os.path.join(args.destination,
+                                        os.path.basename(args.locator))
+        logger.debug("Appended source file name to destination directory: %s",
+                     args.destination)
+
+    if args.destination == '/dev/stdout':
+        args.destination = "-"
+
+    if args.destination == '-':
+        # Normally you have to use -f to write to a file (or device) that
+        # already exists, but "-" and "/dev/stdout" are common enough to
+        # merit a special exception.
+        args.f = True
+    else:
+        args.destination = args.destination.rstrip(os.sep)
+
+    # Turn on --progress by default if stderr is a tty and output is
+    # either going to a named file, or going (via stdout) to something
+    # that isn't a tty.
+    if (not (args.batch_progress or args.no_progress)
+        and stderr.isatty()
+        and (args.destination != '-'
+             or not stdout.isatty())):
+        args.progress = True
+    return args
+
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
+    global api_client
+
+    if stdout is sys.stdout and hasattr(stdout, 'buffer'):
+        # in Python 3, write to stdout as binary
+        stdout = stdout.buffer
+
+    args = parse_arguments(arguments, stdout, stderr)
+    logger.setLevel(logging.WARNING - 10 * args.v)
+
+    request_id = arvados.util.new_request_id()
+    logger.info('X-Request-Id: '+request_id)
+
+    if api_client is None:
+        api_client = arvados.api('v1', request_id=request_id)
+
+    r = re.search(r'^(.*?)(/.*)?$', args.locator)
+    col_loc = r.group(1)
+    get_prefix = r.group(2)
+    if args.r and not get_prefix:
+        get_prefix = os.sep
+
+    # User asked to download the collection's manifest
+    if not get_prefix:
+        if not args.n:
+            open_flags = os.O_CREAT | os.O_WRONLY
+            if not args.f:
+                open_flags |= os.O_EXCL
+            try:
+                if args.destination == "-":
+                    write_block_or_manifest(
+                        dest=stdout, src=col_loc,
+                        api_client=api_client, args=args)
+                else:
+                    out_fd = os.open(args.destination, open_flags)
+                    with os.fdopen(out_fd, 'wb') as out_file:
+                        write_block_or_manifest(
+                            dest=out_file, src=col_loc,
+                            api_client=api_client, args=args)
+            except (IOError, OSError) as error:
+                logger.error("can't write to '{}': {}".format(args.destination, error))
+                return 1
+            except (arvados.errors.ApiError, arvados.errors.KeepReadError) as error:
+                logger.error("failed to download '{}': {}".format(col_loc, error))
+                return 1
+            except arvados.errors.ArgumentError as error:
+                if 'Argument to CollectionReader' in str(error):
+                    logger.error("error reading collection: {}".format(error))
+                    return 1
+                else:
+                    raise
+        return 0
+
+    try:
+        reader = arvados.CollectionReader(
+            col_loc, api_client=api_client, num_retries=args.retries)
+    except Exception as error:
+        logger.error("failed to read collection: {}".format(error))
+        return 1
+
+    # Scan the collection. Make an array of (stream, file, local
+    # destination filename) tuples, and add up total size to extract.
+    todo = []
+    todo_bytes = 0
+    try:
+        if get_prefix == os.sep:
+            item = reader
+        else:
+            item = reader.find('.' + get_prefix)
+
+        if isinstance(item, arvados.collection.Subcollection) or isinstance(item, arvados.collection.CollectionReader):
+            # If the user asked for a file and we got a subcollection, error out.
+            if get_prefix[-1] != os.sep:
+                logger.error("requested file '{}' is in fact a subcollection. Append a trailing '/' to download it.".format('.' + get_prefix))
+                return 1
+            # If the user asked stdout as a destination, error out.
+            elif args.destination == '-':
+                logger.error("cannot use 'stdout' as destination when downloading multiple files.")
+                return 1
+            # User asked for a subcollection, and that's what was found. Add up total size
+            # to download.
+            for s, f in files_in_collection(item):
+                dest_path = os.path.join(
+                    args.destination,
+                    os.path.join(s.stream_name(), f.name)[len(get_prefix)+1:])
+                if (not (args.n or args.f or args.skip_existing) and
+                    os.path.exists(dest_path)):
+                    logger.error('Local file %s already exists.' % (dest_path,))
+                    return 1
+                todo += [(s, f, dest_path)]
+                todo_bytes += f.size()
+        elif isinstance(item, arvados.arvfile.ArvadosFile):
+            todo += [(item.parent, item, args.destination)]
+            todo_bytes += item.size()
+        else:
+            logger.error("'{}' not found.".format('.' + get_prefix))
+            return 1
+    except (IOError, arvados.errors.NotFoundError) as e:
+        logger.error(e)
+        return 1
+
+    out_bytes = 0
+    for s, f, outfilename in todo:
+        outfile = None
+        digestor = None
+        if not args.n:
+            if outfilename == "-":
+                outfile = stdout
+            else:
+                if args.skip_existing and os.path.exists(outfilename):
+                    logger.debug('Local file %s exists. Skipping.', outfilename)
+                    continue
+                elif not args.f and (os.path.isfile(outfilename) or
+                                   os.path.isdir(outfilename)):
+                    # Good thing we looked again: apparently this file wasn't
+                    # here yet when we checked earlier.
+                    logger.error('Local file %s already exists.' % (outfilename,))
+                    return 1
+                if args.r:
+                    arvados.util.mkdir_dash_p(os.path.dirname(outfilename))
+                try:
+                    outfile = open(outfilename, 'wb')
+                except Exception as error:
+                    logger.error('Open(%s) failed: %s' % (outfilename, error))
+                    return 1
+        if args.hash:
+            digestor = hashlib.new(args.hash)
+        try:
+            with s.open(f.name, 'rb') as file_reader:
+                for data in file_reader.readall():
+                    if outfile:
+                        outfile.write(data)
+                    if digestor:
+                        digestor.update(data)
+                    out_bytes += len(data)
+                    if args.progress:
+                        stderr.write('\r%d MiB / %d MiB %.1f%%' %
+                                     (out_bytes >> 20,
+                                      todo_bytes >> 20,
+                                      (100
+                                       if todo_bytes==0
+                                       else 100.0*out_bytes/todo_bytes)))
+                    elif args.batch_progress:
+                        stderr.write('%s %d read %d total\n' %
+                                     (sys.argv[0], os.getpid(),
+                                      out_bytes, todo_bytes))
+            if digestor:
+                stderr.write("%s  %s/%s\n"
+                             % (digestor.hexdigest(), s.stream_name(), f.name))
+        except KeyboardInterrupt:
+            if outfile and (outfile.fileno() > 2) and not outfile.closed:
+                os.unlink(outfile.name)
+            break
+        finally:
+            if outfile != None and outfile != stdout:
+                outfile.close()
+
+    if args.progress:
+        stderr.write('\n')
+    return 0
+
+def files_in_collection(c):
+    # Sort first by file type, then alphabetically by file path.
+    for i in sorted(list(c.keys()),
+                    key=lambda k: (
+                        isinstance(c[k], arvados.collection.Subcollection),
+                        k.upper())):
+        if isinstance(c[i], arvados.arvfile.ArvadosFile):
+            yield (c, c[i])
+        elif isinstance(c[i], arvados.collection.Subcollection):
+            for s, f in files_in_collection(c[i]):
+                yield (s, f)
+
+def write_block_or_manifest(dest, src, api_client, args):
+    if '+A' in src:
+        # block locator
+        kc = arvados.keep.KeepClient(api_client=api_client)
+        dest.write(kc.get(src, num_retries=args.retries))
+    else:
+        # collection UUID or portable data hash
+        reader = arvados.CollectionReader(
+            src, api_client=api_client, num_retries=args.retries)
+        dest.write(reader.manifest_text(strip=args.strip_manifest).encode())
diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py
new file mode 100644 (file)
index 0000000..d4fecc4
--- /dev/null
@@ -0,0 +1,544 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import next
+import argparse
+import collections
+import datetime
+import errno
+import json
+import os
+import re
+import sys
+import tarfile
+import tempfile
+import shutil
+import _strptime
+import fcntl
+from operator import itemgetter
+from stat import *
+
+if os.name == "posix" and sys.version_info[0] < 3:
+    import subprocess32 as subprocess
+else:
+    import subprocess
+
+import arvados
+import arvados.util
+import arvados.commands._util as arv_cmd
+import arvados.commands.put as arv_put
+from arvados.collection import CollectionReader
+import ciso8601
+import logging
+import arvados.config
+
+from arvados._version import __version__
+
+logger = logging.getLogger('arvados.keepdocker')
+logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')
+                else logging.INFO)
+
+EARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0)
+STAT_CACHE_ERRORS = (IOError, OSError, ValueError)
+
+DockerImage = collections.namedtuple(
+    'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize'])
+
+keepdocker_parser = argparse.ArgumentParser(add_help=False)
+keepdocker_parser.add_argument(
+    '--version', action='version', version="%s %s" % (sys.argv[0], __version__),
+    help='Print version and exit.')
+keepdocker_parser.add_argument(
+    '-f', '--force', action='store_true', default=False,
+    help="Re-upload the image even if it already exists on the server")
+keepdocker_parser.add_argument(
+    '--force-image-format', action='store_true', default=False,
+    help="Proceed even if the image format is not supported by the server")
+
+_group = keepdocker_parser.add_mutually_exclusive_group()
+_group.add_argument(
+    '--pull', action='store_true', default=False,
+    help="Try to pull the latest image from Docker registry")
+_group.add_argument(
+    '--no-pull', action='store_false', dest='pull',
+    help="Use locally installed image only, don't pull image from Docker registry (default)")
+
+# Combine keepdocker options listed above with run_opts options of arv-put.
+# The options inherited from arv-put include --name, --project-uuid,
+# --progress/--no-progress/--batch-progress and --resume/--no-resume.
+arg_parser = argparse.ArgumentParser(
+        description="Upload or list Docker images in Arvados",
+        parents=[keepdocker_parser, arv_put.run_opts, arv_cmd.retry_opt])
+
+arg_parser.add_argument(
+    'image', nargs='?',
+    help="Docker image to upload: repo, repo:tag, or hash")
+arg_parser.add_argument(
+    'tag', nargs='?',
+    help="Tag of the Docker image to upload (default 'latest'), if image is given as an untagged repo name")
+
+class DockerError(Exception):
+    pass
+
+
+def popen_docker(cmd, *args, **kwargs):
+    manage_stdin = ('stdin' not in kwargs)
+    kwargs.setdefault('stdin', subprocess.PIPE)
+    kwargs.setdefault('stdout', sys.stderr)
+    try:
+        docker_proc = subprocess.Popen(['docker.io'] + cmd, *args, **kwargs)
+    except OSError:  # No docker.io in $PATH
+        docker_proc = subprocess.Popen(['docker'] + cmd, *args, **kwargs)
+    if manage_stdin:
+        docker_proc.stdin.close()
+    return docker_proc
+
+def check_docker(proc, description):
+    proc.wait()
+    if proc.returncode != 0:
+        raise DockerError("docker {} returned status code {}".
+                          format(description, proc.returncode))
+
+def docker_image_format(image_hash):
+    """Return the registry format ('v1' or 'v2') of the given image."""
+    cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash],
+                        stdout=subprocess.PIPE)
+    try:
+        image_id = next(cmd.stdout).decode().strip()
+        if image_id.startswith('sha256:'):
+            return 'v2'
+        elif ':' not in image_id:
+            return 'v1'
+        else:
+            return 'unknown'
+    finally:
+        check_docker(cmd, "inspect")
+
+def docker_image_compatible(api, image_hash):
+    supported = api._rootDesc.get('dockerImageFormats', [])
+    if not supported:
+        logger.warning("server does not specify supported image formats (see docker_image_formats in server config).")
+        return False
+
+    fmt = docker_image_format(image_hash)
+    if fmt in supported:
+        return True
+    else:
+        logger.error("image format is {!r} " \
+            "but server supports only {!r}".format(fmt, supported))
+        return False
+
+def docker_images():
+    # Yield a DockerImage tuple for each installed image.
+    list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE)
+    list_output = iter(list_proc.stdout)
+    next(list_output)  # Ignore the header line
+    for line in list_output:
+        words = line.split()
+        words = [word.decode() for word in words]
+        size_index = len(words) - 2
+        repo, tag, imageid = words[:3]
+        ctime = ' '.join(words[3:size_index])
+        vsize = ' '.join(words[size_index:])
+        yield DockerImage(repo, tag, imageid, ctime, vsize)
+    list_proc.stdout.close()
+    check_docker(list_proc, "images")
+
+def find_image_hashes(image_search, image_tag=None):
+    # Given one argument, search for Docker images with matching hashes,
+    # and return their full hashes in a set.
+    # Given two arguments, also search for a Docker image with the
+    # same repository and tag.  If one is found, return its hash in a
+    # set; otherwise, fall back to the one-argument hash search.
+    # Returns None if no match is found, or a hash search is ambiguous.
+    hash_search = image_search.lower()
+    hash_matches = set()
+    for image in docker_images():
+        if (image.repo == image_search) and (image.tag == image_tag):
+            return set([image.hash])
+        elif image.hash.startswith(hash_search):
+            hash_matches.add(image.hash)
+    return hash_matches
+
+def find_one_image_hash(image_search, image_tag=None):
+    hashes = find_image_hashes(image_search, image_tag)
+    hash_count = len(hashes)
+    if hash_count == 1:
+        return hashes.pop()
+    elif hash_count == 0:
+        raise DockerError("no matching image found")
+    else:
+        raise DockerError("{} images match {}".format(hash_count, image_search))
+
+def stat_cache_name(image_file):
+    return getattr(image_file, 'name', image_file) + '.stat'
+
+def pull_image(image_name, image_tag):
+    check_docker(popen_docker(['pull', '{}:{}'.format(image_name, image_tag)]),
+                 "pull")
+
+def save_image(image_hash, image_file):
+    # Save the specified Docker image to image_file, then try to save its
+    # stats so we can try to resume after interruption.
+    check_docker(popen_docker(['save', image_hash], stdout=image_file),
+                 "save")
+    image_file.flush()
+    try:
+        with open(stat_cache_name(image_file), 'w') as statfile:
+            json.dump(tuple(os.fstat(image_file.fileno())), statfile)
+    except STAT_CACHE_ERRORS:
+        pass  # We won't resume from this cache.  No big deal.
+
+def get_cache_dir():
+    return arv_cmd.make_home_conf_dir(
+        os.path.join('.cache', 'arvados', 'docker'), 0o700)
+
+def prep_image_file(filename):
+    # Return a file object ready to save a Docker image,
+    # and a boolean indicating whether or not we need to actually save the
+    # image (False if a cached save is available).
+    cache_dir = get_cache_dir()
+    if cache_dir is None:
+        image_file = tempfile.NamedTemporaryFile(suffix='.tar')
+        need_save = True
+    else:
+        file_path = os.path.join(cache_dir, filename)
+        try:
+            with open(stat_cache_name(file_path)) as statfile:
+                prev_stat = json.load(statfile)
+            now_stat = os.stat(file_path)
+            need_save = any(prev_stat[field] != now_stat[field]
+                            for field in [ST_MTIME, ST_SIZE])
+        except STAT_CACHE_ERRORS + (AttributeError, IndexError):
+            need_save = True  # We couldn't compare against old stats
+        image_file = open(file_path, 'w+b' if need_save else 'rb')
+    return image_file, need_save
+
+def make_link(api_client, num_retries, link_class, link_name, **link_attrs):
+    link_attrs.update({'link_class': link_class, 'name': link_name})
+    return api_client.links().create(body=link_attrs).execute(
+        num_retries=num_retries)
+
+def docker_link_sort_key(link):
+    """Build a sort key to find the latest available Docker image.
+
+    To find one source collection for a Docker image referenced by
+    name or image id, the API server looks for a link with the most
+    recent `image_timestamp` property; then the most recent
+    `created_at` timestamp.  This method generates a sort key for
+    Docker metadata links to sort them from least to most preferred.
+    """
+    try:
+        image_timestamp = ciso8601.parse_datetime_as_naive(
+            link['properties']['image_timestamp'])
+    except (KeyError, ValueError):
+        image_timestamp = EARLIEST_DATETIME
+    try:
+        created_timestamp = ciso8601.parse_datetime_as_naive(link['created_at'])
+    except ValueError:
+        created_timestamp = None
+    return (image_timestamp, created_timestamp)
+
+def _get_docker_links(api_client, num_retries, **kwargs):
+    links = arvados.util.list_all(api_client.links().list,
+                                  num_retries, **kwargs)
+    for link in links:
+        link['_sort_key'] = docker_link_sort_key(link)
+    links.sort(key=itemgetter('_sort_key'), reverse=True)
+    return links
+
+def _new_image_listing(link, dockerhash, repo='<none>', tag='<none>'):
+    timestamp_index = 1 if (link['_sort_key'][0] is EARLIEST_DATETIME) else 0
+    return {
+        '_sort_key': link['_sort_key'],
+        'timestamp': link['_sort_key'][timestamp_index],
+        'collection': link['head_uuid'],
+        'dockerhash': dockerhash,
+        'repo': repo,
+        'tag': tag,
+        }
+
+def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None):
+    """List all Docker images known to the api_client with image_name and
+    image_tag.  If no image_name is given, defaults to listing all
+    Docker images.
+
+    Returns a list of tuples representing matching Docker images,
+    sorted in preference order (i.e. the first collection in the list
+    is the one that the API server would use). Each tuple is a
+    (collection_uuid, collection_info) pair, where collection_info is
+    a dict with fields "dockerhash", "repo", "tag", and "timestamp".
+
+    """
+    search_filters = []
+    repo_links = None
+    hash_links = None
+    if image_name:
+        # Find images with the name the user specified.
+        search_links = _get_docker_links(
+            api_client, num_retries,
+            filters=[['link_class', '=', 'docker_image_repo+tag'],
+                     ['name', '=',
+                      '{}:{}'.format(image_name, image_tag or 'latest')]])
+        if search_links:
+            repo_links = search_links
+        else:
+            # Fall back to finding images with the specified image hash.
+            search_links = _get_docker_links(
+                api_client, num_retries,
+                filters=[['link_class', '=', 'docker_image_hash'],
+                         ['name', 'ilike', image_name + '%']])
+            hash_links = search_links
+        # Only list information about images that were found in the search.
+        search_filters.append(['head_uuid', 'in',
+                               [link['head_uuid'] for link in search_links]])
+
+    # It should be reasonable to expect that each collection only has one
+    # image hash (though there may be many links specifying this).  Find
+    # the API server's most preferred image hash link for each collection.
+    if hash_links is None:
+        hash_links = _get_docker_links(
+            api_client, num_retries,
+            filters=search_filters + [['link_class', '=', 'docker_image_hash']])
+    hash_link_map = {link['head_uuid']: link for link in reversed(hash_links)}
+
+    # Each collection may have more than one name (though again, one name
+    # may be specified more than once).  Build an image listing from name
+    # tags, sorted by API server preference.
+    if repo_links is None:
+        repo_links = _get_docker_links(
+            api_client, num_retries,
+            filters=search_filters + [['link_class', '=',
+                                       'docker_image_repo+tag']])
+    seen_image_names = collections.defaultdict(set)
+    images = []
+    for link in repo_links:
+        collection_uuid = link['head_uuid']
+        if link['name'] in seen_image_names[collection_uuid]:
+            continue
+        seen_image_names[collection_uuid].add(link['name'])
+        try:
+            dockerhash = hash_link_map[collection_uuid]['name']
+        except KeyError:
+            dockerhash = '<unknown>'
+        name_parts = link['name'].split(':', 1)
+        images.append(_new_image_listing(link, dockerhash, *name_parts))
+
+    # Find any image hash links that did not have a corresponding name link,
+    # and add image listings for them, retaining the API server preference
+    # sorting.
+    images_start_size = len(images)
+    for collection_uuid, link in hash_link_map.items():
+        if not seen_image_names[collection_uuid]:
+            images.append(_new_image_listing(link, link['name']))
+    if len(images) > images_start_size:
+        images.sort(key=itemgetter('_sort_key'), reverse=True)
+
+    # Remove any image listings that refer to unknown collections.
+    existing_coll_uuids = {coll['uuid'] for coll in arvados.util.list_all(
+            api_client.collections().list, num_retries,
+            filters=[['uuid', 'in', [im['collection'] for im in images]]],
+            select=['uuid'])}
+    return [(image['collection'], image) for image in images
+            if image['collection'] in existing_coll_uuids]
+
+def items_owned_by(owner_uuid, arv_items):
+    return (item for item in arv_items if item['owner_uuid'] == owner_uuid)
+
+def _uuid2pdh(api, uuid):
+    return api.collections().list(
+        filters=[['uuid', '=', uuid]],
+        select=['portable_data_hash'],
+    ).execute()['items'][0]['portable_data_hash']
+
+def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None):
+    args = arg_parser.parse_args(arguments)
+    if api is None:
+        api = arvados.api('v1')
+
+    if args.image is None or args.image == 'images':
+        fmt = "{:30}  {:10}  {:12}  {:29}  {:20}\n"
+        stdout.write(fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED"))
+        try:
+            for i, j in list_images_in_arv(api, args.retries):
+                stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
+        except IOError as e:
+            if e.errno == errno.EPIPE:
+                pass
+            else:
+                raise
+        sys.exit(0)
+
+    if re.search(r':\w[-.\w]{0,127}$', args.image):
+        # image ends with :valid-tag
+        if args.tag is not None:
+            logger.error(
+                "image %r already includes a tag, cannot add tag argument %r",
+                args.image, args.tag)
+            sys.exit(1)
+        # rsplit() accommodates "myrepo.example:8888/repo/image:tag"
+        args.image, args.tag = args.image.rsplit(':', 1)
+    elif args.tag is None:
+        args.tag = 'latest'
+
+    # Pull the image if requested, unless the image is specified as a hash
+    # that we already have.
+    if args.pull and not find_image_hashes(args.image):
+        pull_image(args.image, args.tag)
+
+    try:
+        image_hash = find_one_image_hash(args.image, args.tag)
+    except DockerError as error:
+        logger.error(error.message)
+        sys.exit(1)
+
+    if not docker_image_compatible(api, image_hash):
+        if args.force_image_format:
+            logger.warning("forcing incompatible image")
+        else:
+            logger.error("refusing to store " \
+                "incompatible format (use --force-image-format to override)")
+            sys.exit(1)
+
+    image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None
+
+    if args.name is None:
+        if image_repo_tag:
+            collection_name = 'Docker image {} {}'.format(image_repo_tag, image_hash[0:12])
+        else:
+            collection_name = 'Docker image {}'.format(image_hash[0:12])
+    else:
+        collection_name = args.name
+
+    # Acquire a lock so that only one arv-keepdocker process will
+    # dump/upload a particular docker image at a time.  Do this before
+    # checking if the image already exists in Arvados so that if there
+    # is an upload already underway, when that upload completes and
+    # this process gets a turn, it will discover the Docker image is
+    # already available and exit quickly.
+    outfile_name = '{}.tar'.format(image_hash)
+    lockfile_name = '{}.lock'.format(outfile_name)
+    lockfile = None
+    cache_dir = get_cache_dir()
+    if cache_dir:
+        lockfile = open(os.path.join(cache_dir, lockfile_name), 'w+')
+        fcntl.flock(lockfile, fcntl.LOCK_EX)
+
+    try:
+        if not args.force:
+            # Check if this image is already in Arvados.
+
+            # Project where everything should be owned
+            parent_project_uuid = args.project_uuid or api.users().current().execute(
+                num_retries=args.retries)['uuid']
+
+            # Find image hash tags
+            existing_links = _get_docker_links(
+                api, args.retries,
+                filters=[['link_class', '=', 'docker_image_hash'],
+                         ['name', '=', image_hash]])
+            if existing_links:
+                # get readable collections
+                collections = api.collections().list(
+                    filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],
+                    select=["uuid", "owner_uuid", "name", "manifest_text"]
+                    ).execute(num_retries=args.retries)['items']
+
+                if collections:
+                    # check for repo+tag links on these collections
+                    if image_repo_tag:
+                        existing_repo_tag = _get_docker_links(
+                            api, args.retries,
+                            filters=[['link_class', '=', 'docker_image_repo+tag'],
+                                     ['name', '=', image_repo_tag],
+                                     ['head_uuid', 'in', [c["uuid"] for c in collections]]])
+                    else:
+                        existing_repo_tag = []
+
+                    try:
+                        coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
+                    except StopIteration:
+                        # create new collection owned by the project
+                        coll_uuid = api.collections().create(
+                            body={"manifest_text": collections[0]['manifest_text'],
+                                  "name": collection_name,
+                                  "owner_uuid": parent_project_uuid},
+                            ensure_unique_name=True
+                            ).execute(num_retries=args.retries)['uuid']
+
+                    link_base = {'owner_uuid': parent_project_uuid,
+                                 'head_uuid':  coll_uuid,
+                                 'properties': existing_links[0]['properties']}
+
+                    if not any(items_owned_by(parent_project_uuid, existing_links)):
+                        # create image link owned by the project
+                        make_link(api, args.retries,
+                                  'docker_image_hash', image_hash, **link_base)
+
+                    if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
+                        # create repo+tag link owned by the project
+                        make_link(api, args.retries, 'docker_image_repo+tag',
+                                  image_repo_tag, **link_base)
+
+                    stdout.write(coll_uuid + "\n")
+
+                    sys.exit(0)
+
+        # Open a file for the saved image, and write it if needed.
+        image_file, need_save = prep_image_file(outfile_name)
+        if need_save:
+            save_image(image_hash, image_file)
+
+        # Call arv-put with switches we inherited from it
+        # (a.k.a., switches that aren't our own).
+        if arguments is None:
+            arguments = sys.argv[1:]
+        arguments = [i for i in arguments if i not in (args.image, args.tag, image_repo_tag)]
+        put_args = keepdocker_parser.parse_known_args(arguments)[1]
+
+        if args.name is None:
+            put_args += ['--name', collection_name]
+
+        coll_uuid = arv_put.main(
+            put_args + ['--filename', outfile_name, image_file.name], stdout=stdout,
+            install_sig_handlers=install_sig_handlers).strip()
+
+        # Read the image metadata and make Arvados links from it.
+        image_file.seek(0)
+        image_tar = tarfile.open(fileobj=image_file)
+        image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
+        if image_hash_type:
+            json_filename = raw_image_hash + '.json'
+        else:
+            json_filename = raw_image_hash + '/json'
+        json_file = image_tar.extractfile(image_tar.getmember(json_filename))
+        image_metadata = json.loads(json_file.read().decode())
+        json_file.close()
+        image_tar.close()
+        link_base = {'head_uuid': coll_uuid, 'properties': {}}
+        if 'created' in image_metadata:
+            link_base['properties']['image_timestamp'] = image_metadata['created']
+        if args.project_uuid is not None:
+            link_base['owner_uuid'] = args.project_uuid
+
+        make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)
+        if image_repo_tag:
+            make_link(api, args.retries,
+                      'docker_image_repo+tag', image_repo_tag, **link_base)
+
+        # Clean up.
+        image_file.close()
+        for filename in [stat_cache_name(image_file), image_file.name]:
+            try:
+                os.unlink(filename)
+            except OSError as error:
+                if error.errno != errno.ENOENT:
+                    raise
+    finally:
+        if lockfile is not None:
+            # Closing the lockfile unlocks it.
+            lockfile.close()
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/ls.py b/sdk/python/arvados/commands/ls.py
new file mode 100644 (file)
index 0000000..86e728e
--- /dev/null
@@ -0,0 +1,98 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import division
+
+import argparse
+import collections
+import logging
+import re
+import sys
+
+import arvados
+import arvados.commands._util as arv_cmd
+
+from arvados._version import __version__
+
+FileInfo = collections.namedtuple('FileInfo', ['stream_name', 'name', 'size'])
+
+def parse_args(args):
+    parser = argparse.ArgumentParser(
+        description='List contents of a manifest',
+        parents=[arv_cmd.retry_opt])
+
+    parser.add_argument('locator', type=str,
+                        help="""Collection UUID or locator, optionally with a subdir path.""")
+    parser.add_argument('-s', action='store_true',
+                        help="""List file sizes, in KiB.""")
+    parser.add_argument('--version', action='version',
+                        version="%s %s" % (sys.argv[0], __version__),
+                        help='Print version and exit.')
+
+    return parser.parse_args(args)
+
+def size_formatter(coll_file):
+    return "{:>10}".format((coll_file.size + 1023) // 1024)
+
+def name_formatter(coll_file):
+    return "{}/{}".format(coll_file.stream_name, coll_file.name)
+
+def main(args, stdout, stderr, api_client=None, logger=None):
+    args = parse_args(args)
+
+    if api_client is None:
+        api_client = arvados.api('v1')
+
+    if logger is None:
+        logger = logging.getLogger('arvados.arv-ls')
+
+    try:
+        r = re.search(r'^(.*?)(/.*)?$', args.locator)
+        collection = r.group(1)
+        get_prefix = r.group(2)
+
+        cr = arvados.CollectionReader(collection, api_client=api_client,
+                                      num_retries=args.retries)
+        if get_prefix:
+            if get_prefix[-1] == '/':
+                get_prefix = get_prefix[:-1]
+            stream_name = '.' + get_prefix
+            reader = cr.find(stream_name)
+            if not (isinstance(reader, arvados.CollectionReader) or
+                    isinstance(reader, arvados.collection.Subcollection)):
+                logger.error("'{}' is not a subdirectory".format(get_prefix))
+                return 1
+        else:
+            stream_name = '.'
+            reader = cr
+    except (arvados.errors.ApiError,
+            arvados.errors.ArgumentError,
+            arvados.errors.NotFoundError) as error:
+        logger.error("error fetching collection: {}".format(error))
+        return 1
+
+    formatters = []
+    if args.s:
+        formatters.append(size_formatter)
+    formatters.append(name_formatter)
+
+    for f in files_in_collection(reader, stream_name):
+        print(*(info_func(f) for info_func in formatters), file=stdout)
+
+    return 0
+
+def files_in_collection(c, stream_name='.'):
+    # Sort first by file type, then alphabetically by file path.
+    for i in sorted(c.keys(),
+                    key=lambda k: (
+                        isinstance(c[k], arvados.collection.Subcollection),
+                        k.upper())):
+        if isinstance(c[i], arvados.arvfile.ArvadosFile):
+            yield FileInfo(stream_name=stream_name,
+                           name=i,
+                           size=c[i].size())
+        elif isinstance(c[i], arvados.collection.Subcollection):
+            for f in files_in_collection(c[i], "{}/{}".format(stream_name, i)):
+                yield f
diff --git a/sdk/python/arvados/commands/migrate19.py b/sdk/python/arvados/commands/migrate19.py
new file mode 100644 (file)
index 0000000..3ce47b2
--- /dev/null
@@ -0,0 +1,287 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import division
+import argparse
+import time
+import sys
+import logging
+import shutil
+import tempfile
+import os
+import subprocess
+import re
+
+import arvados
+import arvados.commands.keepdocker
+from arvados._version import __version__
+from arvados.collection import CollectionReader
+
+logger = logging.getLogger('arvados.migrate-docker19')
+logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')
+                else logging.INFO)
+
+_migration_link_class = 'docker_image_migration'
+_migration_link_name = 'migrate_1.9_1.10'
+
+class MigrationFailed(Exception):
+    pass
+
+def main(arguments=None):
+    """Docker image format migration tool for Arvados.
+
+    This converts Docker images stored in Arvados from image format v1
+    (Docker <= 1.9) to image format v2 (Docker >= 1.10).
+
+    Requires Docker running on the local host.
+
+    Usage:
+
+    1) Run arvados/docker/migrate-docker19/build.sh to create
+    arvados/migrate-docker19 Docker image.
+
+    2) Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
+
+    3) Run arv-migrate-docker19 from the Arvados Python SDK on the host (not in a container).
+
+    This will query Arvados for v1 format Docker images.  For each image that
+    does not already have a corresponding v2 format image (as indicated by a
+    docker_image_migration tag) it will perform the following process:
+
+    i) download the image from Arvados
+    ii) load it into Docker
+    iii) update the Docker version, which updates the image
+    iv) save the v2 format image and upload to Arvados
+    v) create a migration link
+
+    """
+
+    migrate19_parser = argparse.ArgumentParser()
+    migrate19_parser.add_argument(
+        '--version', action='version', version="%s %s" % (sys.argv[0], __version__),
+        help='Print version and exit.')
+    migrate19_parser.add_argument(
+        '--verbose', action="store_true", help="Print stdout/stderr even on success")
+    migrate19_parser.add_argument(
+        '--force', action="store_true", help="Try to migrate even if there isn't enough space")
+
+    migrate19_parser.add_argument(
+        '--storage-driver', type=str, default="overlay",
+        help="Docker storage driver, e.g. aufs, overlay, vfs")
+
+    exgroup = migrate19_parser.add_mutually_exclusive_group()
+    exgroup.add_argument(
+        '--dry-run', action='store_true', help="Print number of pending migrations.")
+    exgroup.add_argument(
+        '--print-unmigrated', action='store_true',
+        default=False, help="Print list of images needing migration.")
+
+    migrate19_parser.add_argument('--tempdir', help="Set temporary directory")
+
+    migrate19_parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
+                                  default=None, help="List of images to be migrated")
+
+    args = migrate19_parser.parse_args(arguments)
+
+    if args.tempdir:
+        tempfile.tempdir = args.tempdir
+
+    if args.verbose:
+        logger.setLevel(logging.DEBUG)
+
+    only_migrate = None
+    if args.infile:
+        only_migrate = set()
+        for l in args.infile:
+            only_migrate.add(l.strip())
+
+    api_client  = arvados.api()
+
+    user = api_client.users().current().execute()
+    if not user['is_admin']:
+        raise Exception("This command requires an admin token")
+    sys_uuid = user['uuid'][:12] + '000000000000000'
+
+    images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3)
+
+    is_new = lambda img: img['dockerhash'].startswith('sha256:')
+
+    count_new = 0
+    old_images = []
+    for uuid, img in images:
+        if img["dockerhash"].startswith("sha256:"):
+            continue
+        key = (img["repo"], img["tag"], img["timestamp"])
+        old_images.append(img)
+
+    migration_links = arvados.util.list_all(api_client.links().list, filters=[
+        ['link_class', '=', _migration_link_class],
+        ['name', '=', _migration_link_name],
+    ])
+
+    already_migrated = set()
+    for m in migration_links:
+        already_migrated.add(m["tail_uuid"])
+
+    items = arvados.util.list_all(api_client.collections().list,
+                                  filters=[["uuid", "in", [img["collection"] for img in old_images]]],
+                                  select=["uuid", "portable_data_hash", "manifest_text", "owner_uuid"])
+    uuid_to_collection = {i["uuid"]: i for i in items}
+
+    need_migrate = {}
+    totalbytes = 0
+    biggest = 0
+    biggest_pdh = None
+    for img in old_images:
+        i = uuid_to_collection[img["collection"]]
+        pdh = i["portable_data_hash"]
+        if pdh not in already_migrated and pdh not in need_migrate and (only_migrate is None or pdh in only_migrate):
+            need_migrate[pdh] = img
+            with CollectionReader(i["manifest_text"]) as c:
+                size = list(c.values())[0].size()
+                if size > biggest:
+                    biggest = size
+                    biggest_pdh = pdh
+                totalbytes += size
+
+
+    if args.storage_driver == "vfs":
+        will_need = (biggest*20)
+    else:
+        will_need = (biggest*2.5)
+
+    if args.print_unmigrated:
+        only_migrate = set()
+        for pdh in need_migrate:
+            print(pdh)
+        return
+
+    logger.info("Already migrated %i images", len(already_migrated))
+    logger.info("Need to migrate %i images", len(need_migrate))
+    logger.info("Using tempdir %s", tempfile.gettempdir())
+    logger.info("Biggest image %s is about %i MiB", biggest_pdh, biggest>>20)
+    logger.info("Total data to migrate about %i MiB", totalbytes>>20)
+
+    df_out = subprocess.check_output(["df", "-B1", tempfile.gettempdir()])
+    ln = df_out.splitlines()[1]
+    filesystem, blocks, used, available, use_pct, mounted = re.match(r"^([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+)", ln).groups(1)
+    if int(available) <= will_need:
+        logger.warn("Temp filesystem mounted at %s does not have enough space for biggest image (has %i MiB, needs %i MiB)", mounted, int(available)>>20, int(will_need)>>20)
+        if not args.force:
+            exit(1)
+        else:
+            logger.warn("--force provided, will migrate anyway")
+
+    if args.dry_run:
+        return
+
+    success = []
+    failures = []
+    count = 1
+    for old_image in list(need_migrate.values()):
+        if uuid_to_collection[old_image["collection"]]["portable_data_hash"] in already_migrated:
+            continue
+
+        oldcol = CollectionReader(uuid_to_collection[old_image["collection"]]["manifest_text"])
+        tarfile = list(oldcol.keys())[0]
+
+        logger.info("[%i/%i] Migrating %s:%s (%s) (%i MiB)", count, len(need_migrate), old_image["repo"],
+                    old_image["tag"], old_image["collection"], list(oldcol.values())[0].size()>>20)
+        count += 1
+        start = time.time()
+
+        varlibdocker = tempfile.mkdtemp()
+        dockercache = tempfile.mkdtemp()
+        try:
+            with tempfile.NamedTemporaryFile() as envfile:
+                envfile.write("ARVADOS_API_HOST=%s\n" % (arvados.config.get("ARVADOS_API_HOST")))
+                envfile.write("ARVADOS_API_TOKEN=%s\n" % (arvados.config.get("ARVADOS_API_TOKEN")))
+                if arvados.config.get("ARVADOS_API_HOST_INSECURE"):
+                    envfile.write("ARVADOS_API_HOST_INSECURE=%s\n" % (arvados.config.get("ARVADOS_API_HOST_INSECURE")))
+                envfile.flush()
+
+                dockercmd = ["docker", "run",
+                             "--privileged",
+                             "--rm",
+                             "--env-file", envfile.name,
+                             "--volume", "%s:/var/lib/docker" % varlibdocker,
+                             "--volume", "%s:/root/.cache/arvados/docker" % dockercache,
+                             "arvados/migrate-docker19:1.0",
+                             "/root/migrate.sh",
+                             "%s/%s" % (old_image["collection"], tarfile),
+                             tarfile[0:40],
+                             old_image["repo"],
+                             old_image["tag"],
+                             uuid_to_collection[old_image["collection"]]["owner_uuid"],
+                             args.storage_driver]
+
+                proc = subprocess.Popen(dockercmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                out, err = proc.communicate()
+
+                initial_space = re.search(r"Initial available space is (\d+)", out)
+                imgload_space = re.search(r"Available space after image load is (\d+)", out)
+                imgupgrade_space = re.search(r"Available space after image upgrade is (\d+)", out)
+                keepdocker_space = re.search(r"Available space after arv-keepdocker is (\d+)", out)
+                cleanup_space = re.search(r"Available space after cleanup is (\d+)", out)
+
+                if initial_space:
+                    isp = int(initial_space.group(1))
+                    logger.info("Available space initially: %i MiB", (isp)/(2**20))
+                    if imgload_space:
+                        sp = int(imgload_space.group(1))
+                        logger.debug("Used after load: %i MiB", (isp-sp)/(2**20))
+                    if imgupgrade_space:
+                        sp = int(imgupgrade_space.group(1))
+                        logger.debug("Used after upgrade: %i MiB", (isp-sp)/(2**20))
+                    if keepdocker_space:
+                        sp = int(keepdocker_space.group(1))
+                        logger.info("Used after upload: %i MiB", (isp-sp)/(2**20))
+
+                if cleanup_space:
+                    sp = int(cleanup_space.group(1))
+                    logger.debug("Available after cleanup: %i MiB", (sp)/(2**20))
+
+                if proc.returncode != 0:
+                    logger.error("Failed with return code %i", proc.returncode)
+                    logger.error("--- Stdout ---\n%s", out)
+                    logger.error("--- Stderr ---\n%s", err)
+                    raise MigrationFailed()
+
+                if args.verbose:
+                    logger.info("--- Stdout ---\n%s", out)
+                    logger.info("--- Stderr ---\n%s", err)
+
+            migrated = re.search(r"Migrated uuid is ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15})", out)
+            if migrated:
+                newcol = CollectionReader(migrated.group(1))
+
+                api_client.links().create(body={"link": {
+                    'owner_uuid': sys_uuid,
+                    'link_class': _migration_link_class,
+                    'name': _migration_link_name,
+                    'tail_uuid': oldcol.portable_data_hash(),
+                    'head_uuid': newcol.portable_data_hash()
+                    }}).execute(num_retries=3)
+
+                logger.info("Migrated '%s' (%s) to '%s' (%s) in %is",
+                            oldcol.portable_data_hash(), old_image["collection"],
+                            newcol.portable_data_hash(), migrated.group(1),
+                            time.time() - start)
+                already_migrated.add(oldcol.portable_data_hash())
+                success.append(old_image["collection"])
+            else:
+                logger.error("Error migrating '%s'", old_image["collection"])
+                failures.append(old_image["collection"])
+        except Exception as e:
+            logger.error("Failed to migrate %s in %is", old_image["collection"], time.time() - start,
+                         exc_info=(not isinstance(e, MigrationFailed)))
+            failures.append(old_image["collection"])
+        finally:
+            shutil.rmtree(varlibdocker)
+            shutil.rmtree(dockercache)
+
+    logger.info("Successfully migrated %i images", len(success))
+    if failures:
+        logger.error("Failed to migrate %i images", len(failures))
diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py
new file mode 100644 (file)
index 0000000..afe75b3
--- /dev/null
@@ -0,0 +1,1270 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+from future.utils import listitems, listvalues
+from builtins import str
+from builtins import object
+import argparse
+import arvados
+import arvados.collection
+import base64
+import copy
+import datetime
+import errno
+import fcntl
+import fnmatch
+import hashlib
+import json
+import logging
+import os
+import pwd
+import re
+import signal
+import socket
+import sys
+import tempfile
+import threading
+import time
+import traceback
+
+from apiclient import errors as apiclient_errors
+from arvados._version import __version__
+from arvados.util import keep_locator_pattern
+
+import arvados.commands._util as arv_cmd
+
+api_client = None
+
+upload_opts = argparse.ArgumentParser(add_help=False)
+
+upload_opts.add_argument('--version', action='version',
+                         version="%s %s" % (sys.argv[0], __version__),
+                         help='Print version and exit.')
+upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
+                         help="""
+Local file or directory. If path is a directory reference with a trailing
+slash, then just upload the directory's contents; otherwise upload the
+directory itself. Default: read from standard input.
+""")
+
+_group = upload_opts.add_mutually_exclusive_group()
+
+_group.add_argument('--max-manifest-depth', type=int, metavar='N',
+                    default=-1, help=argparse.SUPPRESS)
+
+_group.add_argument('--normalize', action='store_true',
+                    help="""
+Normalize the manifest by re-ordering files and streams after writing
+data.
+""")
+
+_group.add_argument('--dry-run', action='store_true', default=False,
+                    help="""
+Don't actually upload files, but only check if any file should be
+uploaded. Exit with code=2 when files are pending for upload.
+""")
+
+_group = upload_opts.add_mutually_exclusive_group()
+
+_group.add_argument('--as-stream', action='store_true', dest='stream',
+                    help="""
+Synonym for --stream.
+""")
+
+_group.add_argument('--stream', action='store_true',
+                    help="""
+Store the file content and display the resulting manifest on
+stdout. Do not write the manifest to Keep or save a Collection object
+in Arvados.
+""")
+
+_group.add_argument('--as-manifest', action='store_true', dest='manifest',
+                    help="""
+Synonym for --manifest.
+""")
+
+_group.add_argument('--in-manifest', action='store_true', dest='manifest',
+                    help="""
+Synonym for --manifest.
+""")
+
+_group.add_argument('--manifest', action='store_true',
+                    help="""
+Store the file data and resulting manifest in Keep, save a Collection
+object in Arvados, and display the manifest locator (Collection uuid)
+on stdout. This is the default behavior.
+""")
+
+_group.add_argument('--as-raw', action='store_true', dest='raw',
+                    help="""
+Synonym for --raw.
+""")
+
+_group.add_argument('--raw', action='store_true',
+                    help="""
+Store the file content and display the data block locators on stdout,
+separated by commas, with a trailing newline. Do not store a
+manifest.
+""")
+
+upload_opts.add_argument('--update-collection', type=str, default=None,
+                         dest='update_collection', metavar="UUID", help="""
+Update an existing collection identified by the given Arvados collection
+UUID. All new local files will be uploaded.
+""")
+
+upload_opts.add_argument('--use-filename', type=str, default=None,
+                         dest='filename', help="""
+Synonym for --filename.
+""")
+
+upload_opts.add_argument('--filename', type=str, default=None,
+                         help="""
+Use the given filename in the manifest, instead of the name of the
+local file. This is useful when "-" or "/dev/stdin" is given as an
+input file. It can be used only if there is exactly one path given and
+it is not a directory. Implies --manifest.
+""")
+
+upload_opts.add_argument('--portable-data-hash', action='store_true',
+                         help="""
+Print the portable data hash instead of the Arvados UUID for the collection
+created by the upload.
+""")
+
+upload_opts.add_argument('--replication', type=int, metavar='N', default=None,
+                         help="""
+Set the replication level for the new collection: how many different
+physical storage devices (e.g., disks) should have a copy of each data
+block. Default is to use the server-provided default (if any) or 2.
+""")
+
+upload_opts.add_argument('--storage-classes', help="""
+Specify comma separated list of storage classes to be used when saving data to Keep.
+""")
+
+upload_opts.add_argument('--threads', type=int, metavar='N', default=None,
+                         help="""
+Set the number of upload threads to be used. Take into account that
+using lots of threads will increase the RAM requirements. Default is
+to use 2 threads.
+On high latency installations, using a greater number will improve
+overall throughput.
+""")
+
+upload_opts.add_argument('--exclude', metavar='PATTERN', default=[],
+                      action='append', help="""
+Exclude files and directories whose names match the given glob pattern. When
+using a path-like pattern like 'subdir/*.txt', all text files inside 'subdir'
+directory, relative to the provided input dirs will be excluded.
+When using a filename pattern like '*.txt', any text file will be excluded
+no matter where it is placed.
+For the special case of needing to exclude only files or dirs directly below
+the given input directory, you can use a pattern like './exclude_this.gif'.
+You can specify multiple patterns by using this argument more than once.
+""")
+
+_group = upload_opts.add_mutually_exclusive_group()
+_group.add_argument('--follow-links', action='store_true', default=True,
+                    dest='follow_links', help="""
+Follow file and directory symlinks (default).
+""")
+_group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
+                    help="""
+Do not follow file and directory symlinks.
+""")
+
+
+run_opts = argparse.ArgumentParser(add_help=False)
+
+run_opts.add_argument('--project-uuid', metavar='UUID', help="""
+Store the collection in the specified project, instead of your Home
+project.
+""")
+
+run_opts.add_argument('--name', help="""
+Save the collection with the specified name.
+""")
+
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--progress', action='store_true',
+                    help="""
+Display human-readable progress on stderr (bytes and, if possible,
+percentage of total data size). This is the default behavior when
+stderr is a tty.
+""")
+
+_group.add_argument('--no-progress', action='store_true',
+                    help="""
+Do not display human-readable progress on stderr, even if stderr is a
+tty.
+""")
+
+_group.add_argument('--batch-progress', action='store_true',
+                    help="""
+Display machine-readable progress on stderr (bytes and, if known,
+total data size).
+""")
+
+run_opts.add_argument('--silent', action='store_true',
+                      help="""
+Do not print any debug messages to console. (Any error messages will
+still be displayed.)
+""")
+
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--resume', action='store_true', default=True,
+                    help="""
+Continue interrupted uploads from cached state (default).
+""")
+_group.add_argument('--no-resume', action='store_false', dest='resume',
+                    help="""
+Do not continue interrupted uploads from cached state.
+""")
+
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--cache', action='store_true', dest='use_cache', default=True,
+                    help="""
+Save upload state in a cache file for resuming (default).
+""")
+_group.add_argument('--no-cache', action='store_false', dest='use_cache',
+                    help="""
+Do not save upload state in a cache file for resuming.
+""")
+
+arg_parser = argparse.ArgumentParser(
+    description='Copy data from the local filesystem to Keep.',
+    parents=[upload_opts, run_opts, arv_cmd.retry_opt])
+
+def parse_arguments(arguments):
+    args = arg_parser.parse_args(arguments)
+
+    if len(args.paths) == 0:
+        args.paths = ['-']
+
+    args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths]
+
+    if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
+        if args.filename:
+            arg_parser.error("""
+    --filename argument cannot be used when storing a directory or
+    multiple files.
+    """)
+
+    # Turn on --progress by default if stderr is a tty.
+    if (not (args.batch_progress or args.no_progress or args.silent)
+        and os.isatty(sys.stderr.fileno())):
+        args.progress = True
+
+    # Turn off --resume (default) if --no-cache is used.
+    if not args.use_cache:
+        args.resume = False
+
+    if args.paths == ['-']:
+        if args.update_collection:
+            arg_parser.error("""
+    --update-collection cannot be used when reading from stdin.
+    """)
+        args.resume = False
+        args.use_cache = False
+        if not args.filename:
+            args.filename = 'stdin'
+
+    # Remove possible duplicated patterns
+    if len(args.exclude) > 0:
+        args.exclude = list(set(args.exclude))
+
+    return args
+
+
+class PathDoesNotExistError(Exception):
+    pass
+
+
+class CollectionUpdateError(Exception):
+    pass
+
+
+class ResumeCacheConflict(Exception):
+    pass
+
+
+class ResumeCacheInvalidError(Exception):
+    pass
+
+class ArvPutArgumentConflict(Exception):
+    pass
+
+
+class ArvPutUploadIsPending(Exception):
+    pass
+
+
+class ArvPutUploadNotPending(Exception):
+    pass
+
+
+class FileUploadList(list):
+    def __init__(self, dry_run=False):
+        list.__init__(self)
+        self.dry_run = dry_run
+
+    def append(self, other):
+        if self.dry_run:
+            raise ArvPutUploadIsPending()
+        super(FileUploadList, self).append(other)
+
+
+# Appends the X-Request-Id to the log message when log level is ERROR or DEBUG
+class ArvPutLogFormatter(logging.Formatter):
+    std_fmtr = logging.Formatter(arvados.log_format, arvados.log_date_format)
+    err_fmtr = None
+    request_id_informed = False
+
+    def __init__(self, request_id):
+        self.err_fmtr = logging.Formatter(
+            arvados.log_format+' (X-Request-Id: {})'.format(request_id),
+            arvados.log_date_format)
+
+    def format(self, record):
+        if (not self.request_id_informed) and (record.levelno in (logging.DEBUG, logging.ERROR)):
+            self.request_id_informed = True
+            return self.err_fmtr.format(record)
+        return self.std_fmtr.format(record)
+
+
+class ResumeCache(object):
+    CACHE_DIR = '.cache/arvados/arv-put'
+
+    def __init__(self, file_spec):
+        self.cache_file = open(file_spec, 'a+')
+        self._lock_file(self.cache_file)
+        self.filename = self.cache_file.name
+
+    @classmethod
+    def make_path(cls, args):
+        md5 = hashlib.md5()
+        md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode())
+        realpaths = sorted(os.path.realpath(path) for path in args.paths)
+        md5.update(b'\0'.join([p.encode() for p in realpaths]))
+        if any(os.path.isdir(path) for path in realpaths):
+            md5.update(b'-1')
+        elif args.filename:
+            md5.update(args.filename.encode())
+        return os.path.join(
+            arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'),
+            md5.hexdigest())
+
+    def _lock_file(self, fileobj):
+        try:
+            fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except IOError:
+            raise ResumeCacheConflict(u"{} locked".format(fileobj.name))
+
+    def load(self):
+        self.cache_file.seek(0)
+        return json.load(self.cache_file)
+
+    def check_cache(self, api_client=None, num_retries=0):
+        try:
+            state = self.load()
+            locator = None
+            try:
+                if "_finished_streams" in state and len(state["_finished_streams"]) > 0:
+                    locator = state["_finished_streams"][0][1][0]
+                elif "_current_stream_locators" in state and len(state["_current_stream_locators"]) > 0:
+                    locator = state["_current_stream_locators"][0]
+                if locator is not None:
+                    kc = arvados.keep.KeepClient(api_client=api_client)
+                    kc.head(locator, num_retries=num_retries)
+            except Exception as e:
+                self.restart()
+        except (ValueError):
+            pass
+
+    def save(self, data):
+        try:
+            new_cache_fd, new_cache_name = tempfile.mkstemp(
+                dir=os.path.dirname(self.filename))
+            self._lock_file(new_cache_fd)
+            new_cache = os.fdopen(new_cache_fd, 'r+')
+            json.dump(data, new_cache)
+            os.rename(new_cache_name, self.filename)
+        except (IOError, OSError, ResumeCacheConflict):
+            try:
+                os.unlink(new_cache_name)
+            except NameError:  # mkstemp failed.
+                pass
+        else:
+            self.cache_file.close()
+            self.cache_file = new_cache
+
+    def close(self):
+        self.cache_file.close()
+
+    def destroy(self):
+        try:
+            os.unlink(self.filename)
+        except OSError as error:
+            if error.errno != errno.ENOENT:  # That's what we wanted anyway.
+                raise
+        self.close()
+
+    def restart(self):
+        self.destroy()
+        self.__init__(self.filename)
+
+
+class ArvPutUploadJob(object):
+    CACHE_DIR = '.cache/arvados/arv-put'
+    EMPTY_STATE = {
+        'manifest' : None, # Last saved manifest checkpoint
+        'files' : {} # Previous run file list: {path : {size, mtime}}
+    }
+
+    def __init__(self, paths, resume=True, use_cache=True, reporter=None,
+                 name=None, owner_uuid=None, api_client=None,
+                 ensure_unique_name=False, num_retries=None,
+                 put_threads=None, replication_desired=None, filename=None,
+                 update_time=60.0, update_collection=None, storage_classes=None,
+                 logger=logging.getLogger('arvados.arv_put'), dry_run=False,
+                 follow_links=True, exclude_paths=[], exclude_names=None):
+        self.paths = paths
+        self.resume = resume
+        self.use_cache = use_cache
+        self.update = False
+        self.reporter = reporter
+        # This will set to 0 before start counting, if no special files are going
+        # to be read.
+        self.bytes_expected = None
+        self.bytes_written = 0
+        self.bytes_skipped = 0
+        self.name = name
+        self.owner_uuid = owner_uuid
+        self.ensure_unique_name = ensure_unique_name
+        self.num_retries = num_retries
+        self.replication_desired = replication_desired
+        self.put_threads = put_threads
+        self.filename = filename
+        self.storage_classes = storage_classes
+        self._api_client = api_client
+        self._state_lock = threading.Lock()
+        self._state = None # Previous run state (file list & manifest)
+        self._current_files = [] # Current run file list
+        self._cache_file = None
+        self._collection_lock = threading.Lock()
+        self._remote_collection = None # Collection being updated (if asked)
+        self._local_collection = None # Collection from previous run manifest
+        self._file_paths = set() # Files to be updated in remote collection
+        self._stop_checkpointer = threading.Event()
+        self._checkpointer = threading.Thread(target=self._update_task)
+        self._checkpointer.daemon = True
+        self._update_task_time = update_time  # How many seconds wait between update runs
+        self._files_to_upload = FileUploadList(dry_run=dry_run)
+        self._upload_started = False
+        self.logger = logger
+        self.dry_run = dry_run
+        self._checkpoint_before_quit = True
+        self.follow_links = follow_links
+        self.exclude_paths = exclude_paths
+        self.exclude_names = exclude_names
+
+        if not self.use_cache and self.resume:
+            raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
+
+        # Check for obvious dry-run responses
+        if self.dry_run and (not self.use_cache or not self.resume):
+            raise ArvPutUploadIsPending()
+
+        # Load cached data if any and if needed
+        self._setup_state(update_collection)
+
+        # Build the upload file list, excluding requested files and counting the
+        # bytes expected to be uploaded.
+        self._build_upload_list()
+
+    def _build_upload_list(self):
+        """
+        Scan the requested paths to count file sizes, excluding requested files
+        and dirs and building the upload file list.
+        """
+        # If there aren't special files to be read, reset total bytes count to zero
+        # to start counting.
+        if not any([p for p in self.paths
+                    if not (os.path.isfile(p) or os.path.isdir(p))]):
+            self.bytes_expected = 0
+
+        for path in self.paths:
+            # Test for stdin first, in case some file named '-' exist
+            if path == '-':
+                if self.dry_run:
+                    raise ArvPutUploadIsPending()
+                self._write_stdin(self.filename or 'stdin')
+            elif not os.path.exists(path):
+                 raise PathDoesNotExistError(u"file or directory '{}' does not exist.".format(path))
+            elif os.path.isdir(path):
+                # Use absolute paths on cache index so CWD doesn't interfere
+                # with the caching logic.
+                orig_path = path
+                path = os.path.abspath(path)
+                if orig_path[-1:] == os.sep:
+                    # When passing a directory reference with a trailing slash,
+                    # its contents should be uploaded directly to the
+                    # collection's root.
+                    prefixdir = path
+                else:
+                    # When passing a directory reference with no trailing slash,
+                    # upload the directory to the collection's root.
+                    prefixdir = os.path.dirname(path)
+                prefixdir += os.sep
+                for root, dirs, files in os.walk(path,
+                                                 followlinks=self.follow_links):
+                    root_relpath = os.path.relpath(root, path)
+                    if root_relpath == '.':
+                        root_relpath = ''
+                    # Exclude files/dirs by full path matching pattern
+                    if self.exclude_paths:
+                        dirs[:] = [d for d in dirs
+                                   if not any(pathname_match(
+                                           os.path.join(root_relpath, d), pat)
+                                              for pat in self.exclude_paths)]
+                        files = [f for f in files
+                                 if not any(pathname_match(
+                                         os.path.join(root_relpath, f), pat)
+                                            for pat in self.exclude_paths)]
+                    # Exclude files/dirs by name matching pattern
+                    if self.exclude_names is not None:
+                        dirs[:] = [d for d in dirs
+                                   if not self.exclude_names.match(d)]
+                        files = [f for f in files
+                                 if not self.exclude_names.match(f)]
+                    # Make os.walk()'s dir traversing order deterministic
+                    dirs.sort()
+                    files.sort()
+                    for f in files:
+                        filepath = os.path.join(root, f)
+                        # Add its size to the total bytes count (if applicable)
+                        if self.follow_links or (not os.path.islink(filepath)):
+                            if self.bytes_expected is not None:
+                                self.bytes_expected += os.path.getsize(filepath)
+                        self._check_file(filepath,
+                                         os.path.join(root[len(prefixdir):], f))
+            else:
+                filepath = os.path.abspath(path)
+                # Add its size to the total bytes count (if applicable)
+                if self.follow_links or (not os.path.islink(filepath)):
+                    if self.bytes_expected is not None:
+                        self.bytes_expected += os.path.getsize(filepath)
+                self._check_file(filepath,
+                                 self.filename or os.path.basename(path))
+        # If dry-mode is on, and got up to this point, then we should notify that
+        # there aren't any file to upload.
+        if self.dry_run:
+            raise ArvPutUploadNotPending()
+        # Remove local_collection's files that don't exist locally anymore, so the
+        # bytes_written count is correct.
+        for f in self.collection_file_paths(self._local_collection,
+                                            path_prefix=""):
+            if f != 'stdin' and f != self.filename and not f in self._file_paths:
+                self._local_collection.remove(f)
+
+    def start(self, save_collection):
+        """
+        Start supporting thread & file uploading
+        """
+        self._checkpointer.start()
+        try:
+            # Update bytes_written from current local collection and
+            # report initial progress.
+            self._update()
+            # Actual file upload
+            self._upload_started = True # Used by the update thread to start checkpointing
+            self._upload_files()
+        except (SystemExit, Exception) as e:
+            self._checkpoint_before_quit = False
+            # Log stack trace only when Ctrl-C isn't pressed (SIGINT)
+            # Note: We're expecting SystemExit instead of
+            # KeyboardInterrupt because we have a custom signal
+            # handler in place that raises SystemExit with the catched
+            # signal's code.
+            if isinstance(e, PathDoesNotExistError):
+                # We aren't interested in the traceback for this case
+                pass
+            elif not isinstance(e, SystemExit) or e.code != -2:
+                self.logger.warning("Abnormal termination:\n{}".format(
+                    traceback.format_exc()))
+            raise
+        finally:
+            if not self.dry_run:
+                # Stop the thread before doing anything else
+                self._stop_checkpointer.set()
+                self._checkpointer.join()
+                if self._checkpoint_before_quit:
+                    # Commit all pending blocks & one last _update()
+                    self._local_collection.manifest_text()
+                    self._update(final=True)
+                    if save_collection:
+                        self.save_collection()
+            if self.use_cache:
+                self._cache_file.close()
+
+    def save_collection(self):
+        if self.update:
+            # Check if files should be updated on the remote collection.
+            for fp in self._file_paths:
+                remote_file = self._remote_collection.find(fp)
+                if not remote_file:
+                    # File don't exist on remote collection, copy it.
+                    self._remote_collection.copy(fp, fp, self._local_collection)
+                elif remote_file != self._local_collection.find(fp):
+                    # A different file exist on remote collection, overwrite it.
+                    self._remote_collection.copy(fp, fp, self._local_collection, overwrite=True)
+                else:
+                    # The file already exist on remote collection, skip it.
+                    pass
+            self._remote_collection.save(storage_classes=self.storage_classes,
+                                         num_retries=self.num_retries)
+        else:
+            if self.storage_classes is None:
+                self.storage_classes = ['default']
+            self._local_collection.save_new(
+                name=self.name, owner_uuid=self.owner_uuid,
+                storage_classes=self.storage_classes,
+                ensure_unique_name=self.ensure_unique_name,
+                num_retries=self.num_retries)
+
+    def destroy_cache(self):
+        if self.use_cache:
+            try:
+                os.unlink(self._cache_filename)
+            except OSError as error:
+                # That's what we wanted anyway.
+                if error.errno != errno.ENOENT:
+                    raise
+            self._cache_file.close()
+
+    def _collection_size(self, collection):
+        """
+        Recursively get the total size of the collection
+        """
+        size = 0
+        for item in listvalues(collection):
+            if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
+                size += self._collection_size(item)
+            else:
+                size += item.size()
+        return size
+
+    def _update_task(self):
+        """
+        Periodically called support task. File uploading is
+        asynchronous so we poll status from the collection.
+        """
+        while not self._stop_checkpointer.wait(1 if not self._upload_started else self._update_task_time):
+            self._update()
+
+    def _update(self, final=False):
+        """
+        Update cached manifest text and report progress.
+        """
+        if self._upload_started:
+            with self._collection_lock:
+                self.bytes_written = self._collection_size(self._local_collection)
+                if self.use_cache:
+                    if final:
+                        manifest = self._local_collection.manifest_text()
+                    else:
+                        # Get the manifest text without comitting pending blocks
+                        manifest = self._local_collection.manifest_text(strip=False,
+                                                                        normalize=False,
+                                                                        only_committed=True)
+                    # Update cache
+                    with self._state_lock:
+                        self._state['manifest'] = manifest
+            if self.use_cache:
+                try:
+                    self._save_state()
+                except Exception as e:
+                    self.logger.error("Unexpected error trying to save cache file: {}".format(e))
+        else:
+            self.bytes_written = self.bytes_skipped
+        # Call the reporter, if any
+        self.report_progress()
+
+    def report_progress(self):
+        if self.reporter is not None:
+            self.reporter(self.bytes_written, self.bytes_expected)
+
+    def _write_stdin(self, filename):
+        output = self._local_collection.open(filename, 'wb')
+        self._write(sys.stdin, output)
+        output.close()
+
+    def _check_file(self, source, filename):
+        """
+        Check if this file needs to be uploaded
+        """
+        # Ignore symlinks when requested
+        if (not self.follow_links) and os.path.islink(source):
+            return
+        resume_offset = 0
+        should_upload = False
+        new_file_in_cache = False
+        # Record file path for updating the remote collection before exiting
+        self._file_paths.add(filename)
+
+        with self._state_lock:
+            # If no previous cached data on this file, store it for an eventual
+            # repeated run.
+            if source not in self._state['files']:
+                self._state['files'][source] = {
+                    'mtime': os.path.getmtime(source),
+                    'size' : os.path.getsize(source)
+                }
+                new_file_in_cache = True
+            cached_file_data = self._state['files'][source]
+
+        # Check if file was already uploaded (at least partially)
+        file_in_local_collection = self._local_collection.find(filename)
+
+        # If not resuming, upload the full file.
+        if not self.resume:
+            should_upload = True
+        # New file detected from last run, upload it.
+        elif new_file_in_cache:
+            should_upload = True
+        # Local file didn't change from last run.
+        elif cached_file_data['mtime'] == os.path.getmtime(source) and cached_file_data['size'] == os.path.getsize(source):
+            if not file_in_local_collection:
+                # File not uploaded yet, upload it completely
+                should_upload = True
+            elif file_in_local_collection.permission_expired():
+                # Permission token expired, re-upload file. This will change whenever
+                # we have a API for refreshing tokens.
+                self.logger.warning(u"Uploaded file '{}' access token expired, will re-upload it from scratch".format(filename))
+                should_upload = True
+                self._local_collection.remove(filename)
+            elif cached_file_data['size'] == file_in_local_collection.size():
+                # File already there, skip it.
+                self.bytes_skipped += cached_file_data['size']
+            elif cached_file_data['size'] > file_in_local_collection.size():
+                # File partially uploaded, resume!
+                resume_offset = file_in_local_collection.size()
+                self.bytes_skipped += resume_offset
+                should_upload = True
+            else:
+                # Inconsistent cache, re-upload the file
+                should_upload = True
+                self._local_collection.remove(filename)
+                self.logger.warning(u"Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source))
+        # Local file differs from cached data, re-upload it.
+        else:
+            if file_in_local_collection:
+                self._local_collection.remove(filename)
+            should_upload = True
+
+        if should_upload:
+            try:
+                self._files_to_upload.append((source, resume_offset, filename))
+            except ArvPutUploadIsPending:
+                # This could happen when running on dry-mode, close cache file to
+                # avoid locking issues.
+                self._cache_file.close()
+                raise
+
+    def _upload_files(self):
+        for source, resume_offset, filename in self._files_to_upload:
+            with open(source, 'rb') as source_fd:
+                with self._state_lock:
+                    self._state['files'][source]['mtime'] = os.path.getmtime(source)
+                    self._state['files'][source]['size'] = os.path.getsize(source)
+                if resume_offset > 0:
+                    # Start upload where we left off
+                    output = self._local_collection.open(filename, 'ab')
+                    source_fd.seek(resume_offset)
+                else:
+                    # Start from scratch
+                    output = self._local_collection.open(filename, 'wb')
+                self._write(source_fd, output)
+                output.close(flush=False)
+
+    def _write(self, source_fd, output):
+        while True:
+            data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE)
+            if not data:
+                break
+            output.write(data)
+
+    def _my_collection(self):
+        return self._remote_collection if self.update else self._local_collection
+
+    def _get_cache_filepath(self):
+        # Set up cache file name from input paths.
+        md5 = hashlib.md5()
+        md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode())
+        realpaths = sorted(os.path.realpath(path) for path in self.paths)
+        md5.update(b'\0'.join([p.encode() for p in realpaths]))
+        if self.filename:
+            md5.update(self.filename.encode())
+        cache_filename = md5.hexdigest()
+        cache_filepath = os.path.join(
+            arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'),
+            cache_filename)
+        return cache_filepath
+
+    def _setup_state(self, update_collection):
+        """
+        Create a new cache file or load a previously existing one.
+        """
+        # Load an already existing collection for update
+        if update_collection and re.match(arvados.util.collection_uuid_pattern,
+                                          update_collection):
+            try:
+                self._remote_collection = arvados.collection.Collection(
+                    update_collection, api_client=self._api_client)
+            except arvados.errors.ApiError as error:
+                raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error))
+            else:
+                self.update = True
+        elif update_collection:
+            # Collection locator provided, but unknown format
+            raise CollectionUpdateError("Collection locator unknown: '{}'".format(update_collection))
+
+        if self.use_cache:
+            cache_filepath = self._get_cache_filepath()
+            if self.resume and os.path.exists(cache_filepath):
+                self.logger.info(u"Resuming upload from cache file {}".format(cache_filepath))
+                self._cache_file = open(cache_filepath, 'a+')
+            else:
+                # --no-resume means start with a empty cache file.
+                self.logger.info(u"Creating new cache file at {}".format(cache_filepath))
+                self._cache_file = open(cache_filepath, 'w+')
+            self._cache_filename = self._cache_file.name
+            self._lock_file(self._cache_file)
+            self._cache_file.seek(0)
+
+        with self._state_lock:
+            if self.use_cache:
+                try:
+                    self._state = json.load(self._cache_file)
+                    if not set(['manifest', 'files']).issubset(set(self._state.keys())):
+                        # Cache at least partially incomplete, set up new cache
+                        self._state = copy.deepcopy(self.EMPTY_STATE)
+                except ValueError:
+                    # Cache file empty, set up new cache
+                    self._state = copy.deepcopy(self.EMPTY_STATE)
+            else:
+                self.logger.info("No cache usage requested for this run.")
+                # No cache file, set empty state
+                self._state = copy.deepcopy(self.EMPTY_STATE)
+            if not self._cached_manifest_valid():
+                raise ResumeCacheInvalidError()
+            # Load the previous manifest so we can check if files were modified remotely.
+            self._local_collection = arvados.collection.Collection(
+                self._state['manifest'],
+                replication_desired=self.replication_desired,
+                put_threads=self.put_threads,
+                api_client=self._api_client)
+
+    def _cached_manifest_valid(self):
+        """
+        Validate the oldest non-expired block signature to check if cached manifest
+        is usable: checking if the cached manifest was not created with a different
+        arvados account.
+        """
+        if self._state.get('manifest', None) is None:
+            # No cached manifest yet, all good.
+            return True
+        now = datetime.datetime.utcnow()
+        oldest_exp = None
+        oldest_loc = None
+        block_found = False
+        for m in keep_locator_pattern.finditer(self._state['manifest']):
+            loc = m.group(0)
+            try:
+                exp = datetime.datetime.utcfromtimestamp(int(loc.split('@')[1], 16))
+            except IndexError:
+                # Locator without signature
+                continue
+            block_found = True
+            if exp > now and (oldest_exp is None or exp < oldest_exp):
+                oldest_exp = exp
+                oldest_loc = loc
+        if not block_found:
+            # No block signatures found => no invalid block signatures.
+            return True
+        if oldest_loc is None:
+            # Locator signatures found, but all have expired.
+            # Reset the cache and move on.
+            self.logger.info('Cache expired, starting from scratch.')
+            self._state['manifest'] = ''
+            return True
+        kc = arvados.KeepClient(api_client=self._api_client,
+                                num_retries=self.num_retries)
+        try:
+            kc.head(oldest_loc)
+        except arvados.errors.KeepRequestError:
+            # Something is wrong, cached manifest is not valid.
+            return False
+        return True
+
+    def collection_file_paths(self, col, path_prefix='.'):
+        """Return a list of file paths by recursively go through the entire collection `col`"""
+        file_paths = []
+        for name, item in listitems(col):
+            if isinstance(item, arvados.arvfile.ArvadosFile):
+                file_paths.append(os.path.join(path_prefix, name))
+            elif isinstance(item, arvados.collection.Subcollection):
+                new_prefix = os.path.join(path_prefix, name)
+                file_paths += self.collection_file_paths(item, path_prefix=new_prefix)
+        return file_paths
+
+    def _lock_file(self, fileobj):
+        try:
+            fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except IOError:
+            raise ResumeCacheConflict(u"{} locked".format(fileobj.name))
+
+    def _save_state(self):
+        """
+        Atomically save current state into cache.
+        """
+        with self._state_lock:
+            # We're not using copy.deepcopy() here because it's a lot slower
+            # than json.dumps(), and we're already needing JSON format to be
+            # saved on disk.
+            state = json.dumps(self._state)
+        try:
+            new_cache = tempfile.NamedTemporaryFile(
+                mode='w+',
+                dir=os.path.dirname(self._cache_filename), delete=False)
+            self._lock_file(new_cache)
+            new_cache.write(state)
+            new_cache.flush()
+            os.fsync(new_cache)
+            os.rename(new_cache.name, self._cache_filename)
+        except (IOError, OSError, ResumeCacheConflict) as error:
+            self.logger.error("There was a problem while saving the cache file: {}".format(error))
+            try:
+                os.unlink(new_cache_name)
+            except NameError:  # mkstemp failed.
+                pass
+        else:
+            self._cache_file.close()
+            self._cache_file = new_cache
+
+    def collection_name(self):
+        return self._my_collection().api_response()['name'] if self._my_collection().api_response() else None
+
+    def manifest_locator(self):
+        return self._my_collection().manifest_locator()
+
+    def portable_data_hash(self):
+        pdh = self._my_collection().portable_data_hash()
+        m = self._my_collection().stripped_manifest().encode()
+        local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m))
+        if pdh != local_pdh:
+            self.logger.warning("\n".join([
+                "arv-put: API server provided PDH differs from local manifest.",
+                "         This should not happen; showing API server version."]))
+        return pdh
+
+    def manifest_text(self, stream_name=".", strip=False, normalize=False):
+        return self._my_collection().manifest_text(stream_name, strip, normalize)
+
+    def _datablocks_on_item(self, item):
+        """
+        Return a list of datablock locators, recursively navigating
+        through subcollections
+        """
+        if isinstance(item, arvados.arvfile.ArvadosFile):
+            if item.size() == 0:
+                # Empty file locator
+                return ["d41d8cd98f00b204e9800998ecf8427e+0"]
+            else:
+                locators = []
+                for segment in item.segments():
+                    loc = segment.locator
+                    locators.append(loc)
+                return locators
+        elif isinstance(item, arvados.collection.Collection):
+            l = [self._datablocks_on_item(x) for x in listvalues(item)]
+            # Fast list flattener method taken from:
+            # http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
+            return [loc for sublist in l for loc in sublist]
+        else:
+            return None
+
+    def data_locators(self):
+        with self._collection_lock:
+            # Make sure all datablocks are flushed before getting the locators
+            self._my_collection().manifest_text()
+            datablocks = self._datablocks_on_item(self._my_collection())
+        return datablocks
+
+_machine_format = "{} {}: {{}} written {{}} total\n".format(sys.argv[0],
+                                                            os.getpid())
+
+# Simulate glob.glob() matching behavior without the need to scan the filesystem
+# Note: fnmatch() doesn't work correctly when used with pathnames. For example the
+# pattern 'tests/*.py' will match 'tests/run_test.py' and also 'tests/subdir/run_test.py',
+# so instead we're using it on every path component.
+def pathname_match(pathname, pattern):
+    name = pathname.split(os.sep)
+    # Fix patterns like 'some/subdir/' or 'some//subdir'
+    pat = [x for x in pattern.split(os.sep) if x != '' and x != '.']
+    if len(name) != len(pat):
+        return False
+    for i in range(len(name)):
+        if not fnmatch.fnmatch(name[i], pat[i]):
+            return False
+    return True
+
+def machine_progress(bytes_written, bytes_expected):
+    return _machine_format.format(
+        bytes_written, -1 if (bytes_expected is None) else bytes_expected)
+
+def human_progress(bytes_written, bytes_expected):
+    if bytes_expected:
+        return "\r{}M / {}M {:.1%} ".format(
+            bytes_written >> 20, bytes_expected >> 20,
+            float(bytes_written) / bytes_expected)
+    else:
+        return "\r{} ".format(bytes_written)
+
+def progress_writer(progress_func, outfile=sys.stderr):
+    def write_progress(bytes_written, bytes_expected):
+        outfile.write(progress_func(bytes_written, bytes_expected))
+    return write_progress
+
+def desired_project_uuid(api_client, project_uuid, num_retries):
+    if not project_uuid:
+        query = api_client.users().current()
+    elif arvados.util.user_uuid_pattern.match(project_uuid):
+        query = api_client.users().get(uuid=project_uuid)
+    elif arvados.util.group_uuid_pattern.match(project_uuid):
+        query = api_client.groups().get(uuid=project_uuid)
+    else:
+        raise ValueError("Not a valid project UUID: {}".format(project_uuid))
+    return query.execute(num_retries=num_retries)['uuid']
+
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,
+         install_sig_handlers=True):
+    global api_client
+
+    args = parse_arguments(arguments)
+    logger = logging.getLogger('arvados.arv_put')
+    if args.silent:
+        logger.setLevel(logging.WARNING)
+    else:
+        logger.setLevel(logging.INFO)
+    status = 0
+
+    request_id = arvados.util.new_request_id()
+
+    formatter = ArvPutLogFormatter(request_id)
+    logging.getLogger('arvados').handlers[0].setFormatter(formatter)
+
+    if api_client is None:
+        api_client = arvados.api('v1', request_id=request_id)
+
+    if install_sig_handlers:
+        arv_cmd.install_signal_handlers()
+
+    # Determine the name to use
+    if args.name:
+        if args.stream or args.raw:
+            logger.error("Cannot use --name with --stream or --raw")
+            sys.exit(1)
+        elif args.update_collection:
+            logger.error("Cannot use --name with --update-collection")
+            sys.exit(1)
+        collection_name = args.name
+    else:
+        collection_name = "Saved at {} by {}@{}".format(
+            datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
+            pwd.getpwuid(os.getuid()).pw_name,
+            socket.gethostname())
+
+    if args.project_uuid and (args.stream or args.raw):
+        logger.error("Cannot use --project-uuid with --stream or --raw")
+        sys.exit(1)
+
+    # Determine the parent project
+    try:
+        project_uuid = desired_project_uuid(api_client, args.project_uuid,
+                                            args.retries)
+    except (apiclient_errors.Error, ValueError) as error:
+        logger.error(error)
+        sys.exit(1)
+
+    if args.progress:
+        reporter = progress_writer(human_progress)
+    elif args.batch_progress:
+        reporter = progress_writer(machine_progress)
+    else:
+        reporter = None
+
+    #  Split storage-classes argument
+    storage_classes = None
+    if args.storage_classes:
+        storage_classes = args.storage_classes.strip().split(',')
+        if len(storage_classes) > 1:
+            logger.error("Multiple storage classes are not supported currently.")
+            sys.exit(1)
+
+
+    # Setup exclude regex from all the --exclude arguments provided
+    name_patterns = []
+    exclude_paths = []
+    exclude_names = None
+    if len(args.exclude) > 0:
+        # We're supporting 2 kinds of exclusion patterns:
+        # 1)   --exclude '*.jpg'    (file/dir name patterns, will only match
+        #                            the name, wherever the file is on the tree)
+        # 2.1) --exclude 'foo/bar'  (file/dir path patterns, will match the
+        #                            entire path, and should be relative to
+        #                            any input dir argument)
+        # 2.2) --exclude './*.jpg'  (Special case for excluding files/dirs
+        #                            placed directly underneath the input dir)
+        for p in args.exclude:
+            # Only relative paths patterns allowed
+            if p.startswith(os.sep):
+                logger.error("Cannot use absolute paths with --exclude")
+                sys.exit(1)
+            if os.path.dirname(p):
+                # We don't support of path patterns with '..'
+                p_parts = p.split(os.sep)
+                if '..' in p_parts:
+                    logger.error(
+                        "Cannot use path patterns that include or '..'")
+                    sys.exit(1)
+                # Path search pattern
+                exclude_paths.append(p)
+            else:
+                # Name-only search pattern
+                name_patterns.append(p)
+        # For name only matching, we can combine all patterns into a single
+        # regexp, for better performance.
+        exclude_names = re.compile('|'.join(
+            [fnmatch.translate(p) for p in name_patterns]
+        )) if len(name_patterns) > 0 else None
+        # Show the user the patterns to be used, just in case they weren't
+        # specified inside quotes and got changed by the shell expansion.
+        logger.info("Exclude patterns: {}".format(args.exclude))
+
+    # If this is used by a human, and there's at least one directory to be
+    # uploaded, the expected bytes calculation can take a moment.
+    if args.progress and any([os.path.isdir(f) for f in args.paths]):
+        logger.info("Calculating upload size, this could take some time...")
+    try:
+        writer = ArvPutUploadJob(paths = args.paths,
+                                 resume = args.resume,
+                                 use_cache = args.use_cache,
+                                 filename = args.filename,
+                                 reporter = reporter,
+                                 api_client = api_client,
+                                 num_retries = args.retries,
+                                 replication_desired = args.replication,
+                                 put_threads = args.threads,
+                                 name = collection_name,
+                                 owner_uuid = project_uuid,
+                                 ensure_unique_name = True,
+                                 update_collection = args.update_collection,
+                                 storage_classes=storage_classes,
+                                 logger=logger,
+                                 dry_run=args.dry_run,
+                                 follow_links=args.follow_links,
+                                 exclude_paths=exclude_paths,
+                                 exclude_names=exclude_names)
+    except ResumeCacheConflict:
+        logger.error("\n".join([
+            "arv-put: Another process is already uploading this data.",
+            "         Use --no-cache if this is really what you want."]))
+        sys.exit(1)
+    except ResumeCacheInvalidError:
+        logger.error("\n".join([
+            "arv-put: Resume cache contains invalid signature: it may have expired",
+            "         or been created with another Arvados user's credentials.",
+            "         Switch user or use one of the following options to restart upload:",
+            "         --no-resume to start a new resume cache.",
+            "         --no-cache to disable resume cache."]))
+        sys.exit(1)
+    except CollectionUpdateError as error:
+        logger.error("\n".join([
+            "arv-put: %s" % str(error)]))
+        sys.exit(1)
+    except ArvPutUploadIsPending:
+        # Dry run check successful, return proper exit code.
+        sys.exit(2)
+    except ArvPutUploadNotPending:
+        # No files pending for upload
+        sys.exit(0)
+    except PathDoesNotExistError as error:
+        logger.error("\n".join([
+            "arv-put: %s" % str(error)]))
+        sys.exit(1)
+
+    if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
+        logger.warning("\n".join([
+            "arv-put: Resuming previous upload from last checkpoint.",
+            "         Use the --no-resume option to start over."]))
+
+    if not args.dry_run:
+        writer.report_progress()
+    output = None
+    try:
+        writer.start(save_collection=not(args.stream or args.raw))
+    except arvados.errors.ApiError as error:
+        logger.error("\n".join([
+            "arv-put: %s" % str(error)]))
+        sys.exit(1)
+
+    if args.progress:  # Print newline to split stderr from stdout for humans.
+        logger.info("\n")
+
+    if args.stream:
+        if args.normalize:
+            output = writer.manifest_text(normalize=True)
+        else:
+            output = writer.manifest_text()
+    elif args.raw:
+        output = ','.join(writer.data_locators())
+    else:
+        try:
+            if args.update_collection:
+                logger.info(u"Collection updated: '{}'".format(writer.collection_name()))
+            else:
+                logger.info(u"Collection saved as '{}'".format(writer.collection_name()))
+            if args.portable_data_hash:
+                output = writer.portable_data_hash()
+            else:
+                output = writer.manifest_locator()
+        except apiclient_errors.Error as error:
+            logger.error(
+                "arv-put: Error creating Collection on project: {}.".format(
+                    error))
+            status = 1
+
+    # Print the locator (uuid) of the new collection.
+    if output is None:
+        status = status or 1
+    elif not args.silent:
+        stdout.write(output)
+        if not output.endswith('\n'):
+            stdout.write('\n')
+
+    if install_sig_handlers:
+        arv_cmd.restore_signal_handlers()
+
+    if status != 0:
+        sys.exit(status)
+
+    # Success!
+    return output
+
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/run.py b/sdk/python/arvados/commands/run.py
new file mode 100644 (file)
index 0000000..b17ed29
--- /dev/null
@@ -0,0 +1,433 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+# Copyright (C) 2018 Genome Research Ltd.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+from __future__ import absolute_import
+from builtins import range
+from past.builtins import basestring
+from builtins import object
+import arvados
+import arvados.commands.ws as ws
+import argparse
+import json
+import re
+import os
+import stat
+from . import put
+import time
+import subprocess
+import logging
+import sys
+import errno
+import arvados.commands._util as arv_cmd
+import arvados.collection
+import arvados.config as config
+
+from arvados._version import __version__
+
+logger = logging.getLogger('arvados.arv-run')
+logger.setLevel(logging.INFO)
+
+arvrun_parser = argparse.ArgumentParser(parents=[arv_cmd.retry_opt])
+arvrun_parser.add_argument('--dry-run', action="store_true",
+                           help="Print out the pipeline that would be submitted and exit")
+arvrun_parser.add_argument('--local', action="store_true",
+                           help="Run locally using arv-run-pipeline-instance")
+arvrun_parser.add_argument('--docker-image', type=str,
+                           help="Docker image to use, otherwise use instance default.")
+arvrun_parser.add_argument('--ignore-rcode', action="store_true",
+                           help="Commands that return non-zero return codes should not be considered failed.")
+arvrun_parser.add_argument('--no-reuse', action="store_true",
+                           help="Do not reuse past jobs.")
+arvrun_parser.add_argument('--no-wait', action="store_true",
+                           help="Do not wait and display logs after submitting command, just exit.")
+arvrun_parser.add_argument('--project-uuid', type=str,
+                           help="Parent project of the pipeline")
+arvrun_parser.add_argument('--git-dir', type=str, default="",
+                           help="Git repository passed to arv-crunch-job when using --local")
+arvrun_parser.add_argument('--repository', type=str, default="arvados",
+                           help="repository field of component, default 'arvados'")
+arvrun_parser.add_argument('--script-version', type=str, default="master",
+                           help="script_version field of component, default 'master'")
+arvrun_parser.add_argument('--version', action='version',
+                           version="%s %s" % (sys.argv[0], __version__),
+                           help='Print version and exit.')
+arvrun_parser.add_argument('args', nargs=argparse.REMAINDER)
+
+class ArvFile(object):
+    def __init__(self, prefix, fn):
+        self.prefix = prefix
+        self.fn = fn
+
+    def __hash__(self):
+        return (self.prefix+self.fn).__hash__()
+
+    def __eq__(self, other):
+        return (self.prefix == other.prefix) and (self.fn == other.fn)
+
+class UploadFile(ArvFile):
+    pass
+
+# Determine if a file is in a collection, and return a tuple consisting of the
+# portable data hash and the path relative to the root of the collection.
+# Return None if the path isn't with an arv-mount collection or there was is error.
+def is_in_collection(root, branch):
+    try:
+        if root == "/":
+            return (None, None)
+        fn = os.path.join(root, ".arvados#collection")
+        if os.path.exists(fn):
+            with file(fn, 'r') as f:
+                c = json.load(f)
+            return (c["portable_data_hash"], branch)
+        else:
+            sp = os.path.split(root)
+            return is_in_collection(sp[0], os.path.join(sp[1], branch))
+    except (IOError, OSError):
+        return (None, None)
+
+# Determine the project to place the output of this command by searching upward
+# for arv-mount psuedofile indicating the project.  If the cwd isn't within
+# an arv-mount project or there is an error, return current_user.
+def determine_project(root, current_user):
+    try:
+        if root == "/":
+            return current_user
+        fn = os.path.join(root, ".arvados#project")
+        if os.path.exists(fn):
+            with file(fn, 'r') as f:
+                c = json.load(f)
+            if 'writable_by' in c and current_user in c['writable_by']:
+                return c["uuid"]
+            else:
+                return current_user
+        else:
+            sp = os.path.split(root)
+            return determine_project(sp[0], current_user)
+    except (IOError, OSError):
+        return current_user
+
+# Determine if string corresponds to a file, and if that file is part of a
+# arv-mounted collection or only local to the machine.  Returns one of
+# ArvFile() (file already exists in a collection), UploadFile() (file needs to
+# be uploaded to a collection), or simply returns prefix+fn (which yields the
+# original parameter string).
+def statfile(prefix, fn, fnPattern="$(file %s/%s)", dirPattern="$(dir %s/%s/)", raiseOSError=False):
+    absfn = os.path.abspath(fn)
+    try:
+        st = os.stat(absfn)
+        sp = os.path.split(absfn)
+        (pdh, branch) = is_in_collection(sp[0], sp[1])
+        if pdh:
+            if stat.S_ISREG(st.st_mode):
+                return ArvFile(prefix, fnPattern % (pdh, branch))
+            elif stat.S_ISDIR(st.st_mode):
+                return ArvFile(prefix, dirPattern % (pdh, branch))
+            else:
+                raise Exception("%s is not a regular file or directory" % absfn)
+        else:
+            # trim leading '/' for path prefix test later
+            return UploadFile(prefix, absfn[1:])
+    except OSError as e:
+        if e.errno == errno.ENOENT and not raiseOSError:
+            pass
+        else:
+            raise
+
+    return prefix+fn
+
+def write_file(collection, pathprefix, fn, flush=False):
+    with open(os.path.join(pathprefix, fn), "rb") as src:
+        dst = collection.open(fn, "wb")
+        r = src.read(1024*128)
+        while r:
+            dst.write(r)
+            r = src.read(1024*128)
+        dst.close(flush=flush)
+
+def uploadfiles(files, api, dry_run=False, num_retries=0,
+                project=None,
+                fnPattern="$(file %s/%s)",
+                name=None,
+                collection=None,
+                packed=True):
+    # Find the smallest path prefix that includes all the files that need to be uploaded.
+    # This starts at the root and iteratively removes common parent directory prefixes
+    # until all file paths no longer have a common parent.
+    if files:
+        n = True
+        pathprefix = "/"
+        while n:
+            pathstep = None
+            for c in files:
+                if pathstep is None:
+                    sp = c.fn.split('/')
+                    if len(sp) < 2:
+                        # no parent directories left
+                        n = False
+                        break
+                    # path step takes next directory
+                    pathstep = sp[0] + "/"
+                else:
+                    # check if pathstep is common prefix for all files
+                    if not c.fn.startswith(pathstep):
+                        n = False
+                        break
+            if n:
+                # pathstep is common parent directory for all files, so remove the prefix
+                # from each path
+                pathprefix += pathstep
+                for c in files:
+                    c.fn = c.fn[len(pathstep):]
+
+        logger.info("Upload local files: \"%s\"", '" "'.join([c.fn for c in files]))
+
+    if dry_run:
+        logger.info("$(input) is %s", pathprefix.rstrip('/'))
+        pdh = "$(input)"
+    else:
+        files = sorted(files, key=lambda x: x.fn)
+        if collection is None:
+            collection = arvados.collection.Collection(api_client=api, num_retries=num_retries)
+        prev = ""
+        for f in files:
+            localpath = os.path.join(pathprefix, f.fn)
+            if prev and localpath.startswith(prev+"/"):
+                # If this path is inside an already uploaded subdirectory,
+                # don't redundantly re-upload it.
+                # e.g. we uploaded /tmp/foo and the next file is /tmp/foo/bar
+                # skip it because it starts with "/tmp/foo/"
+                continue
+            prev = localpath
+            if os.path.isfile(localpath):
+                write_file(collection, pathprefix, f.fn, not packed)
+            elif os.path.isdir(localpath):
+                for root, dirs, iterfiles in os.walk(localpath):
+                    root = root[len(pathprefix):]
+                    for src in iterfiles:
+                        write_file(collection, pathprefix, os.path.join(root, src), not packed)
+
+        pdh = None
+        if len(collection) > 0:
+            # non-empty collection
+            filters = [["portable_data_hash", "=", collection.portable_data_hash()]]
+            name_pdh = "%s (%s)" % (name, collection.portable_data_hash())
+            if name:
+                filters.append(["name", "=", name_pdh])
+            if project:
+                filters.append(["owner_uuid", "=", project])
+
+            # do the list / create in a loop with up to 2 tries as we are using `ensure_unique_name=False`
+            # and there is a potential race with other workflows that may have created the collection
+            # between when we list it and find it does not exist and when we attempt to create it.
+            tries = 2
+            while pdh is None and tries > 0:
+                exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)
+
+                if exists["items"]:
+                    item = exists["items"][0]
+                    pdh = item["portable_data_hash"]
+                    logger.info("Using collection %s (%s)", pdh, item["uuid"])
+                else:
+                    try:
+                        collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=False)
+                        pdh = collection.portable_data_hash()
+                        logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+                    except arvados.errors.ApiError as ae:
+                        tries -= 1
+            if pdh is None:
+                # Something weird going on here, probably a collection
+                # with a conflicting name but wrong PDH.  We won't
+                # able to reuse it but we still need to save our
+                # collection, so so save it with unique name.
+                logger.info("Name conflict on '%s', existing collection has an unexpected portable data hash", name_pdh)
+                collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=True)
+                pdh = collection.portable_data_hash()
+                logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+        else:
+            # empty collection
+            pdh = collection.portable_data_hash()
+            assert (pdh == config.EMPTY_BLOCK_LOCATOR), "Empty collection portable_data_hash did not have expected locator, was %s" % pdh
+            logger.info("Using empty collection %s", pdh)
+
+    for c in files:
+        c.keepref = "%s/%s" % (pdh, c.fn)
+        c.fn = fnPattern % (pdh, c.fn)
+
+
+def main(arguments=None):
+    args = arvrun_parser.parse_args(arguments)
+
+    if len(args.args) == 0:
+        arvrun_parser.print_help()
+        return
+
+    starting_args = args.args
+
+    reading_into = 2
+
+    # Parse the command arguments into 'slots'.
+    # All words following '>' are output arguments and are collected into slots[0].
+    # All words following '<' are input arguments and are collected into slots[1].
+    # slots[2..] store the parameters of each command in the pipeline.
+    #
+    # e.g. arv-run foo arg1 arg2 '|' bar arg3 arg4 '<' input1 input2 input3 '>' output.txt
+    # will be parsed into:
+    #   [['output.txt'],
+    #    ['input1', 'input2', 'input3'],
+    #    ['foo', 'arg1', 'arg2'],
+    #    ['bar', 'arg3', 'arg4']]
+    slots = [[], [], []]
+    for c in args.args:
+        if c.startswith('>'):
+            reading_into = 0
+            if len(c) > 1:
+                slots[reading_into].append(c[1:])
+        elif c.startswith('<'):
+            reading_into = 1
+            if len(c) > 1:
+                slots[reading_into].append(c[1:])
+        elif c == '|':
+            reading_into = len(slots)
+            slots.append([])
+        else:
+            slots[reading_into].append(c)
+
+    if slots[0] and len(slots[0]) > 1:
+        logger.error("Can only specify a single stdout file (run-command substitutions are permitted)")
+        return
+
+    if not args.dry_run:
+        api = arvados.api('v1')
+        if args.project_uuid:
+            project = args.project_uuid
+        else:
+            project = determine_project(os.getcwd(), api.users().current().execute()["uuid"])
+
+    # Identify input files.  Look at each parameter and test to see if there is
+    # a file by that name.  This uses 'patterns' to look for within
+    # command line arguments, such as --foo=file.txt or -lfile.txt
+    patterns = [re.compile("([^=]+=)(.*)"),
+                re.compile("(-[A-Za-z])(.+)")]
+    for j, command in enumerate(slots[1:]):
+        for i, a in enumerate(command):
+            if j > 0 and i == 0:
+                # j == 0 is stdin, j > 0 is commands
+                # always skip program executable (i == 0) in commands
+                pass
+            elif a.startswith('\\'):
+                # if it starts with a \ then don't do any interpretation
+                command[i] = a[1:]
+            else:
+                # See if it looks like a file
+                command[i] = statfile('', a)
+
+                # If a file named command[i] was found, it would now be an
+                # ArvFile or UploadFile.  If command[i] is a basestring, that
+                # means it doesn't correspond exactly to a file, so do some
+                # pattern matching.
+                if isinstance(command[i], basestring):
+                    for p in patterns:
+                        m = p.match(a)
+                        if m:
+                            command[i] = statfile(m.group(1), m.group(2))
+                            break
+
+    files = [c for command in slots[1:] for c in command if isinstance(c, UploadFile)]
+    if files:
+        uploadfiles(files, api, dry_run=args.dry_run, num_retries=args.retries, project=project)
+
+    for i in range(1, len(slots)):
+        slots[i] = [("%s%s" % (c.prefix, c.fn)) if isinstance(c, ArvFile) else c for c in slots[i]]
+
+    component = {
+        "script": "run-command",
+        "script_version": args.script_version,
+        "repository": args.repository,
+        "script_parameters": {
+        },
+        "runtime_constraints": {}
+    }
+
+    if args.docker_image:
+        component["runtime_constraints"]["docker_image"] = args.docker_image
+
+    task_foreach = []
+    group_parser = argparse.ArgumentParser()
+    group_parser.add_argument('-b', '--batch-size', type=int)
+    group_parser.add_argument('args', nargs=argparse.REMAINDER)
+
+    for s in range(2, len(slots)):
+        for i in range(0, len(slots[s])):
+            if slots[s][i] == '--':
+                inp = "input%i" % (s-2)
+                groupargs = group_parser.parse_args(slots[2][i+1:])
+                if groupargs.batch_size:
+                    component["script_parameters"][inp] = {"value": {"batch":groupargs.args, "size":groupargs.batch_size}}
+                    slots[s] = slots[s][0:i] + [{"foreach": inp, "command": "$(%s)" % inp}]
+                else:
+                    component["script_parameters"][inp] = groupargs.args
+                    slots[s] = slots[s][0:i] + ["$(%s)" % inp]
+                task_foreach.append(inp)
+                break
+            if slots[s][i] == '\--':
+                slots[s][i] = '--'
+
+    if slots[0]:
+        component["script_parameters"]["task.stdout"] = slots[0][0]
+    if slots[1]:
+        task_foreach.append("stdin")
+        component["script_parameters"]["stdin"] = slots[1]
+        component["script_parameters"]["task.stdin"] = "$(stdin)"
+
+    if task_foreach:
+        component["script_parameters"]["task.foreach"] = task_foreach
+
+    component["script_parameters"]["command"] = slots[2:]
+    if args.ignore_rcode:
+        component["script_parameters"]["task.ignore_rcode"] = args.ignore_rcode
+
+    pipeline = {
+        "name": "arv-run " + " | ".join([s[0] for s in slots[2:]]),
+        "description": "@" + " ".join(starting_args) + "@",
+        "components": {
+            "command": component
+        },
+        "state": "RunningOnClient" if args.local else "RunningOnServer"
+    }
+
+    if args.dry_run:
+        print(json.dumps(pipeline, indent=4))
+    else:
+        pipeline["owner_uuid"] = project
+        pi = api.pipeline_instances().create(body=pipeline, ensure_unique_name=True).execute()
+        logger.info("Running pipeline %s", pi["uuid"])
+
+        if args.local:
+            subprocess.call(["arv-run-pipeline-instance", "--instance", pi["uuid"], "--run-jobs-here"] + (["--no-reuse"] if args.no_reuse else []))
+        elif not args.no_wait:
+            ws.main(["--pipeline", pi["uuid"]])
+
+        pi = api.pipeline_instances().get(uuid=pi["uuid"]).execute()
+        logger.info("Pipeline is %s", pi["state"])
+        if "output_uuid" in pi["components"]["command"]:
+            logger.info("Output is %s", pi["components"]["command"]["output_uuid"])
+        else:
+            logger.info("No output")
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/ws.py b/sdk/python/arvados/commands/ws.py
new file mode 100644 (file)
index 0000000..37dab55
--- /dev/null
@@ -0,0 +1,122 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+import sys
+import logging
+import argparse
+import arvados
+import json
+from arvados.events import subscribe
+from arvados._version import __version__
+import signal
+
+def main(arguments=None):
+    logger = logging.getLogger('arvados.arv-ws')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--version', action='version',
+                        version="%s %s" % (sys.argv[0], __version__),
+                        help='Print version and exit.')
+    parser.add_argument('-u', '--uuid', type=str, default="", help="Filter events on object_uuid")
+    parser.add_argument('-f', '--filters', type=str, default="", help="Arvados query filter to apply to log events (JSON encoded)")
+    parser.add_argument('-s', '--start-time', type=str, default="", help="Arvados query filter to fetch log events created at or after this time. This will be server time in UTC. Allowed format: YYYY-MM-DD or YYYY-MM-DD hh:mm:ss")
+    parser.add_argument('-i', '--id', type=int, default=None, help="Start from given log id.")
+
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument('--poll-interval', default=15, type=int, help="If websockets is not available, specify the polling interval, default is every 15 seconds")
+    group.add_argument('--no-poll', action='store_false', dest='poll_interval', help="Do not poll if websockets are not available, just fail")
+
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument('-p', '--pipeline', type=str, default="", help="Supply pipeline uuid, print log output from pipeline and its jobs")
+    group.add_argument('-j', '--job', type=str, default="", help="Supply job uuid, print log output from jobs")
+
+    args = parser.parse_args(arguments)
+
+    global filters
+    global known_component_jobs
+    global ws
+
+    filters = []
+    known_component_jobs = set()
+    ws = None
+
+    def update_subscribed_components(components):
+        global known_component_jobs
+        global filters
+        pipeline_jobs = set()
+        for c in components:
+            if "job" in components[c]:
+                pipeline_jobs.add(components[c]["job"]["uuid"])
+        if known_component_jobs != pipeline_jobs:
+            new_filters = [['object_uuid', 'in', [args.pipeline] + list(pipeline_jobs)]]
+            ws.subscribe(new_filters)
+            ws.unsubscribe(filters)
+            filters = new_filters
+            known_component_jobs = pipeline_jobs
+
+    api = arvados.api('v1')
+
+    if args.uuid:
+        filters += [ ['object_uuid', '=', args.uuid] ]
+
+    if args.filters:
+        filters += json.loads(args.filters)
+
+    if args.job:
+        filters += [ ['object_uuid', '=', args.job] ]
+
+    if args.pipeline:
+        filters += [ ['object_uuid', '=', args.pipeline] ]
+
+    if args.start_time:
+        last_log_id = 1
+        filters += [ ['created_at', '>=', args.start_time] ]
+    else:
+        last_log_id = None
+
+    if args.id:
+        last_log_id = args.id-1
+
+    def on_message(ev):
+        global filters
+        global ws
+
+        logger.debug(ev)
+        if 'event_type' in ev and (args.pipeline or args.job):
+            if ev['event_type'] in ('stderr', 'stdout'):
+                sys.stdout.write(ev["properties"]["text"])
+            elif ev["event_type"] in ("create", "update"):
+                if ev["object_kind"] == "arvados#pipelineInstance":
+                    c = api.pipeline_instances().get(uuid=ev["object_uuid"]).execute()
+                    update_subscribed_components(c["components"])
+
+                if ev["object_kind"] == "arvados#pipelineInstance" and args.pipeline:
+                    if ev["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Paused"):
+                        ws.close()
+
+                if ev["object_kind"] == "arvados#job" and args.job:
+                    if ev["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
+                        ws.close()
+        elif 'status' in ev and ev['status'] == 200:
+            pass
+        else:
+            print(json.dumps(ev))
+
+    try:
+        ws = subscribe(arvados.api('v1'), filters, on_message, poll_fallback=args.poll_interval, last_log_id=last_log_id)
+        if ws:
+            if args.pipeline:
+                c = api.pipeline_instances().get(uuid=args.pipeline).execute()
+                update_subscribed_components(c["components"])
+                if c["state"] in ("Complete", "Failed", "Paused"):
+                    ws.close()
+            ws.run_forever()
+    except KeyboardInterrupt:
+        pass
+    except Exception as e:
+        logger.error(e)
+    finally:
+        if ws:
+            ws.close()
diff --git a/sdk/python/arvados/config.py b/sdk/python/arvados/config.py
new file mode 100644 (file)
index 0000000..e17eb1f
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# config.py - configuration settings and global variables for Arvados clients
+#
+# Arvados configuration settings are taken from $HOME/.config/arvados.
+# Environment variables override settings in the config file.
+
+import os
+import re
+
+_settings = None
+if os.environ.get('HOME') is not None:
+    default_config_file = os.environ['HOME'] + '/.config/arvados/settings.conf'
+else:
+    default_config_file = ''
+
+KEEP_BLOCK_SIZE = 2**26
+EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
+
+def initialize(config_file=default_config_file):
+    global _settings
+    _settings = {}
+
+    # load the specified config file if available
+    try:
+        _settings = load(config_file)
+    except IOError:
+        pass
+
+    # override any settings with environment vars
+    for var in os.environ:
+        if var.startswith('ARVADOS_'):
+            _settings[var] = os.environ[var]
+
+def load(config_file):
+    cfg = {}
+    with open(config_file, "r") as f:
+        for config_line in f:
+            if re.match('^\s*$', config_line):
+                continue
+            if re.match('^\s*#', config_line):
+                continue
+            var, val = config_line.rstrip().split('=', 2)
+            cfg[var] = val
+    return cfg
+
+def flag_is_true(key, d=None):
+    if d is None:
+        d = settings()
+    return d.get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])
+
+def get(key, default_val=None):
+    return settings().get(key, default_val)
+
+def settings():
+    if _settings is None:
+        initialize()
+    return _settings
diff --git a/sdk/python/arvados/crunch.py b/sdk/python/arvados/crunch.py
new file mode 100644 (file)
index 0000000..70b8b44
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import object
+import json
+import os
+
+class TaskOutputDir(object):
+    """Keep-backed directory for staging outputs of Crunch tasks.
+
+    Example, in a crunch task whose output is a file called "out.txt"
+    containing "42":
+
+        import arvados
+        import arvados.crunch
+        import os
+
+        out = arvados.crunch.TaskOutputDir()
+        with open(os.path.join(out.path, 'out.txt'), 'w') as f:
+            f.write('42')
+        arvados.current_task().set_output(out.manifest_text())
+    """
+    def __init__(self):
+        self.path = os.environ['TASK_KEEPMOUNT_TMP']
+
+    def __str__(self):
+        return self.path
+
+    def manifest_text(self):
+        snapshot = os.path.join(self.path, '.arvados#collection')
+        return json.load(open(snapshot))['manifest_text']
diff --git a/sdk/python/arvados/errors.py b/sdk/python/arvados/errors.py
new file mode 100644 (file)
index 0000000..4fe1f76
--- /dev/null
@@ -0,0 +1,94 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# errors.py - Arvados-specific exceptions.
+
+import json
+
+from apiclient import errors as apiclient_errors
+from collections import OrderedDict
+
+class ApiError(apiclient_errors.HttpError):
+    def _get_reason(self):
+        try:
+            return '; '.join(json.loads(self.content.decode('utf-8'))['errors'])
+        except (KeyError, TypeError, ValueError):
+            return super(ApiError, self)._get_reason()
+
+
+class KeepRequestError(Exception):
+    """Base class for errors accessing Keep services."""
+    def __init__(self, message='', request_errors=(), label=""):
+        """KeepRequestError(message='', request_errors=(), label="")
+
+        :message:
+          A human-readable message describing what Keep operation
+          failed.
+
+        :request_errors:
+          An iterable that yields 2-tuples of keys (where the key refers to
+          some operation that was attempted) to the error encountered when
+          talking to it--either an exception, or an HTTP response object.
+          These will be packed into an OrderedDict, available through the
+          request_errors() method.
+
+        :label:
+          A label indicating the type of value in the 'key' position of request_errors.
+
+        """
+        self.label = label
+        self._request_errors = OrderedDict(request_errors)
+        if self._request_errors:
+            exc_reports = [self._format_error(*err_pair)
+                           for err_pair in self._request_errors.items()]
+            base_msg = "{}: {}".format(message, "; ".join(exc_reports))
+        else:
+            base_msg = message
+        super(KeepRequestError, self).__init__(base_msg)
+        self.message = message
+
+    def _format_error(self, key, error):
+        if isinstance(error, HttpError):
+            err_fmt = "{} {} responded with {e.status_code} {e.reason}"
+        else:
+            err_fmt = "{} {} raised {e.__class__.__name__} ({e})"
+        return err_fmt.format(self.label, key, e=error)
+
+    def request_errors(self):
+        """request_errors() -> OrderedDict
+
+        The keys of the dictionary are described by `self.label`
+        The corresponding value is the exception raised when sending the
+        request to it."""
+        return self._request_errors
+
+
+class HttpError(Exception):
+    def __init__(self, status_code, reason):
+        self.status_code = status_code
+        self.reason = reason
+
+
+class ArgumentError(Exception):
+    pass
+class SyntaxError(Exception):
+    pass
+class AssertionError(Exception):
+    pass
+class CommandFailedError(Exception):
+    pass
+class KeepReadError(KeepRequestError):
+    pass
+class KeepWriteError(KeepRequestError):
+    pass
+class NotFoundError(KeepReadError):
+    pass
+class NotImplementedError(Exception):
+    pass
+class NoKeepServersError(Exception):
+    pass
+class StaleWriterStateError(Exception):
+    pass
+class FeatureNotEnabledError(Exception):
+    pass
diff --git a/sdk/python/arvados/events.py b/sdk/python/arvados/events.py
new file mode 100644 (file)
index 0000000..c308750
--- /dev/null
@@ -0,0 +1,332 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from builtins import object
+import arvados
+from . import config
+from . import errors
+from .retry import RetryLoop
+
+import logging
+import json
+import _thread
+import threading
+import time
+import os
+import re
+import ssl
+from ws4py.client.threadedclient import WebSocketClient
+
+_logger = logging.getLogger('arvados.events')
+
+
+class _EventClient(WebSocketClient):
+    def __init__(self, url, filters, on_event, last_log_id, on_closed):
+        ssl_options = {'ca_certs': arvados.util.ca_certs_path()}
+        if config.flag_is_true('ARVADOS_API_HOST_INSECURE'):
+            ssl_options['cert_reqs'] = ssl.CERT_NONE
+        else:
+            ssl_options['cert_reqs'] = ssl.CERT_REQUIRED
+
+        # Warning: If the host part of url resolves to both IPv6 and
+        # IPv4 addresses (common with "localhost"), only one of them
+        # will be attempted -- and it might not be the right one. See
+        # ws4py's WebSocketBaseClient.__init__.
+        super(_EventClient, self).__init__(url, ssl_options=ssl_options)
+
+        self.filters = filters
+        self.on_event = on_event
+        self.last_log_id = last_log_id
+        self._closing_lock = threading.RLock()
+        self._closing = False
+        self._closed = threading.Event()
+        self.on_closed = on_closed
+
+    def opened(self):
+        for f in self.filters:
+            self.subscribe(f, self.last_log_id)
+
+    def closed(self, code, reason=None):
+        self._closed.set()
+        self.on_closed()
+
+    def received_message(self, m):
+        with self._closing_lock:
+            if not self._closing:
+                self.on_event(json.loads(str(m)))
+
+    def close(self, code=1000, reason='', timeout=0):
+        """Close event client and optionally wait for it to finish.
+
+        :timeout: is the number of seconds to wait for ws4py to
+        indicate that the connection has closed.
+        """
+        super(_EventClient, self).close(code, reason)
+        with self._closing_lock:
+            # make sure we don't process any more messages.
+            self._closing = True
+        # wait for ws4py to tell us the connection is closed.
+        self._closed.wait(timeout=timeout)
+
+    def subscribe(self, f, last_log_id=None):
+        m = {"method": "subscribe", "filters": f}
+        if last_log_id is not None:
+            m["last_log_id"] = last_log_id
+        self.send(json.dumps(m))
+
+    def unsubscribe(self, f):
+        self.send(json.dumps({"method": "unsubscribe", "filters": f}))
+
+
+class EventClient(object):
+    def __init__(self, url, filters, on_event_cb, last_log_id):
+        self.url = url
+        if filters:
+            self.filters = [filters]
+        else:
+            self.filters = [[]]
+        self.on_event_cb = on_event_cb
+        self.last_log_id = last_log_id
+        self.is_closed = threading.Event()
+        self._setup_event_client()
+
+    def _setup_event_client(self):
+        self.ec = _EventClient(self.url, self.filters, self.on_event,
+                               self.last_log_id, self.on_closed)
+        self.ec.daemon = True
+        try:
+            self.ec.connect()
+        except Exception:
+            self.ec.close_connection()
+            raise
+
+    def subscribe(self, f, last_log_id=None):
+        self.filters.append(f)
+        self.ec.subscribe(f, last_log_id)
+
+    def unsubscribe(self, f):
+        del self.filters[self.filters.index(f)]
+        self.ec.unsubscribe(f)
+
+    def close(self, code=1000, reason='', timeout=0):
+        self.is_closed.set()
+        self.ec.close(code, reason, timeout)
+
+    def on_event(self, m):
+        if m.get('id') != None:
+            self.last_log_id = m.get('id')
+        try:
+            self.on_event_cb(m)
+        except Exception as e:
+            _logger.exception("Unexpected exception from event callback.")
+            _thread.interrupt_main()
+
+    def on_closed(self):
+        if not self.is_closed.is_set():
+            _logger.warning("Unexpected close. Reconnecting.")
+            for tries_left in RetryLoop(num_retries=25, backoff_start=.1, max_wait=15):
+                try:
+                    self._setup_event_client()
+                    _logger.warning("Reconnect successful.")
+                    break
+                except Exception as e:
+                    _logger.warning("Error '%s' during websocket reconnect.", e)
+            if tries_left == 0:
+                _logger.exception("EventClient thread could not contact websocket server.")
+                self.is_closed.set()
+                _thread.interrupt_main()
+                return
+
+    def run_forever(self):
+        # Have to poll here to let KeyboardInterrupt get raised.
+        while not self.is_closed.wait(1):
+            pass
+
+
+class PollClient(threading.Thread):
+    def __init__(self, api, filters, on_event, poll_time, last_log_id):
+        super(PollClient, self).__init__()
+        self.api = api
+        if filters:
+            self.filters = [filters]
+        else:
+            self.filters = [[]]
+        self.on_event = on_event
+        self.poll_time = poll_time
+        self.daemon = True
+        self.last_log_id = last_log_id
+        self._closing = threading.Event()
+        self._closing_lock = threading.RLock()
+
+    def run(self):
+        if self.last_log_id != None:
+            # Caller supplied the last-seen event ID from a previous
+            # connection
+            skip_old_events = [["id", ">", str(self.last_log_id)]]
+        else:
+            # We need to do a reverse-order query to find the most
+            # recent event ID (see "if not skip_old_events" below).
+            skip_old_events = False
+
+        self.on_event({'status': 200})
+
+        while not self._closing.is_set():
+            moreitems = False
+            for f in self.filters:
+                for tries_left in RetryLoop(num_retries=25, backoff_start=.1, max_wait=self.poll_time):
+                    try:
+                        if not skip_old_events:
+                            # If the caller didn't provide a known
+                            # recent ID, our first request will ask
+                            # for the single most recent event from
+                            # the last 2 hours (the time restriction
+                            # avoids doing an expensive database
+                            # query, and leaves a big enough margin to
+                            # account for clock skew). If we do find a
+                            # recent event, we remember its ID but
+                            # then discard it (we are supposed to be
+                            # returning new/current events, not old
+                            # ones).
+                            #
+                            # Subsequent requests will get multiple
+                            # events in chronological order, and
+                            # filter on that same cutoff time, or
+                            # (once we see our first matching event)
+                            # the ID of the last-seen event.
+                            skip_old_events = [[
+                                "created_at", ">=",
+                                time.strftime(
+                                    "%Y-%m-%dT%H:%M:%SZ",
+                                    time.gmtime(time.time()-7200))]]
+                            items = self.api.logs().list(
+                                order="id desc",
+                                limit=1,
+                                filters=f+skip_old_events).execute()
+                            if items["items"]:
+                                skip_old_events = [
+                                    ["id", ">", str(items["items"][0]["id"])]]
+                                items = {
+                                    "items": [],
+                                    "items_available": 0,
+                                }
+                        else:
+                            # In this case, either we know the most
+                            # recent matching ID, or we know there
+                            # were no matching events in the 2-hour
+                            # window before subscribing. Either way we
+                            # can safely ask for events in ascending
+                            # order.
+                            items = self.api.logs().list(
+                                order="id asc",
+                                filters=f+skip_old_events).execute()
+                        break
+                    except errors.ApiError as error:
+                        pass
+                    else:
+                        tries_left = 0
+                        break
+                if tries_left == 0:
+                    _logger.exception("PollClient thread could not contact API server.")
+                    with self._closing_lock:
+                        self._closing.set()
+                    _thread.interrupt_main()
+                    return
+                for i in items["items"]:
+                    skip_old_events = [["id", ">", str(i["id"])]]
+                    with self._closing_lock:
+                        if self._closing.is_set():
+                            return
+                        try:
+                            self.on_event(i)
+                        except Exception as e:
+                            _logger.exception("Unexpected exception from event callback.")
+                            _thread.interrupt_main()
+                if items["items_available"] > len(items["items"]):
+                    moreitems = True
+            if not moreitems:
+                self._closing.wait(self.poll_time)
+
+    def run_forever(self):
+        # Have to poll here, otherwise KeyboardInterrupt will never get processed.
+        while not self._closing.is_set():
+            self._closing.wait(1)
+
+    def close(self, code=None, reason=None, timeout=0):
+        """Close poll client and optionally wait for it to finish.
+
+        If an :on_event: handler is running in a different thread,
+        first wait (indefinitely) for it to return.
+
+        After closing, wait up to :timeout: seconds for the thread to
+        finish the poll request in progress (if any).
+
+        :code: and :reason: are ignored. They are present for
+        interface compatibility with EventClient.
+        """
+
+        with self._closing_lock:
+            self._closing.set()
+        try:
+            self.join(timeout=timeout)
+        except RuntimeError:
+            # "join() raises a RuntimeError if an attempt is made to join the
+            # current thread as that would cause a deadlock. It is also an
+            # error to join() a thread before it has been started and attempts
+            # to do so raises the same exception."
+            pass
+
+    def subscribe(self, f):
+        self.on_event({'status': 200})
+        self.filters.append(f)
+
+    def unsubscribe(self, f):
+        del self.filters[self.filters.index(f)]
+
+
+def _subscribe_websocket(api, filters, on_event, last_log_id=None):
+    endpoint = api._rootDesc.get('websocketUrl', None)
+    if not endpoint:
+        raise errors.FeatureNotEnabledError(
+            "Server does not advertise a websocket endpoint")
+    uri_with_token = "{}?api_token={}".format(endpoint, api.api_token)
+    try:
+        client = EventClient(uri_with_token, filters, on_event, last_log_id)
+    except Exception:
+        _logger.warning("Failed to connect to websockets on %s" % endpoint)
+        raise
+    else:
+        return client
+
+
+def subscribe(api, filters, on_event, poll_fallback=15, last_log_id=None):
+    """
+    :api:
+      a client object retrieved from arvados.api(). The caller should not use this client object for anything else after calling subscribe().
+    :filters:
+      Initial subscription filters.
+    :on_event:
+      The callback when a message is received.
+    :poll_fallback:
+      If websockets are not available, fall back to polling every N seconds.  If poll_fallback=False, this will return None if websockets are not available.
+    :last_log_id:
+      Log rows that are newer than the log id
+    """
+
+    if not poll_fallback:
+        return _subscribe_websocket(api, filters, on_event, last_log_id)
+
+    try:
+        if not config.flag_is_true('ARVADOS_DISABLE_WEBSOCKETS'):
+            return _subscribe_websocket(api, filters, on_event, last_log_id)
+        else:
+            _logger.info("Using polling because ARVADOS_DISABLE_WEBSOCKETS is true")
+    except Exception as e:
+        _logger.warning("Falling back to polling after websocket error: %s" % e)
+    p = PollClient(api, filters, on_event, poll_fallback, last_log_id)
+    p.start()
+    return p
diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py
new file mode 100644 (file)
index 0000000..4354ced
--- /dev/null
@@ -0,0 +1,1247 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+from future.utils import native_str
+standard_library.install_aliases()
+from builtins import next
+from builtins import str
+from builtins import range
+from builtins import object
+import collections
+import datetime
+import hashlib
+import io
+import logging
+import math
+import os
+import pycurl
+import queue
+import re
+import socket
+import ssl
+import sys
+import threading
+from . import timer
+import urllib.parse
+
+if sys.version_info >= (3, 0):
+    from io import BytesIO
+else:
+    from cStringIO import StringIO as BytesIO
+
+import arvados
+import arvados.config as config
+import arvados.errors
+import arvados.retry as retry
+import arvados.util
+
+_logger = logging.getLogger('arvados.keep')
+global_client_object = None
+
+
+# Monkey patch TCP constants when not available (apple). Values sourced from:
+# http://www.opensource.apple.com/source/xnu/xnu-2422.115.4/bsd/netinet/tcp.h
+if sys.platform == 'darwin':
+    if not hasattr(socket, 'TCP_KEEPALIVE'):
+        socket.TCP_KEEPALIVE = 0x010
+    if not hasattr(socket, 'TCP_KEEPINTVL'):
+        socket.TCP_KEEPINTVL = 0x101
+    if not hasattr(socket, 'TCP_KEEPCNT'):
+        socket.TCP_KEEPCNT = 0x102
+
+
+class KeepLocator(object):
+    EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
+    HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')
+
+    def __init__(self, locator_str):
+        self.hints = []
+        self._perm_sig = None
+        self._perm_expiry = None
+        pieces = iter(locator_str.split('+'))
+        self.md5sum = next(pieces)
+        try:
+            self.size = int(next(pieces))
+        except StopIteration:
+            self.size = None
+        for hint in pieces:
+            if self.HINT_RE.match(hint) is None:
+                raise ValueError("invalid hint format: {}".format(hint))
+            elif hint.startswith('A'):
+                self.parse_permission_hint(hint)
+            else:
+                self.hints.append(hint)
+
+    def __str__(self):
+        return '+'.join(
+            native_str(s)
+            for s in [self.md5sum, self.size,
+                      self.permission_hint()] + self.hints
+            if s is not None)
+
+    def stripped(self):
+        if self.size is not None:
+            return "%s+%i" % (self.md5sum, self.size)
+        else:
+            return self.md5sum
+
+    def _make_hex_prop(name, length):
+        # Build and return a new property with the given name that
+        # must be a hex string of the given length.
+        data_name = '_{}'.format(name)
+        def getter(self):
+            return getattr(self, data_name)
+        def setter(self, hex_str):
+            if not arvados.util.is_hex(hex_str, length):
+                raise ValueError("{} is not a {}-digit hex string: {!r}".
+                                 format(name, length, hex_str))
+            setattr(self, data_name, hex_str)
+        return property(getter, setter)
+
+    md5sum = _make_hex_prop('md5sum', 32)
+    perm_sig = _make_hex_prop('perm_sig', 40)
+
+    @property
+    def perm_expiry(self):
+        return self._perm_expiry
+
+    @perm_expiry.setter
+    def perm_expiry(self, value):
+        if not arvados.util.is_hex(value, 1, 8):
+            raise ValueError(
+                "permission timestamp must be a hex Unix timestamp: {}".
+                format(value))
+        self._perm_expiry = datetime.datetime.utcfromtimestamp(int(value, 16))
+
+    def permission_hint(self):
+        data = [self.perm_sig, self.perm_expiry]
+        if None in data:
+            return None
+        data[1] = int((data[1] - self.EPOCH_DATETIME).total_seconds())
+        return "A{}@{:08x}".format(*data)
+
+    def parse_permission_hint(self, s):
+        try:
+            self.perm_sig, self.perm_expiry = s[1:].split('@', 1)
+        except IndexError:
+            raise ValueError("bad permission hint {}".format(s))
+
+    def permission_expired(self, as_of_dt=None):
+        if self.perm_expiry is None:
+            return False
+        elif as_of_dt is None:
+            as_of_dt = datetime.datetime.now()
+        return self.perm_expiry <= as_of_dt
+
+
+class Keep(object):
+    """Simple interface to a global KeepClient object.
+
+    THIS CLASS IS DEPRECATED.  Please instantiate your own KeepClient with your
+    own API client.  The global KeepClient will build an API client from the
+    current Arvados configuration, which may not match the one you built.
+    """
+    _last_key = None
+
+    @classmethod
+    def global_client_object(cls):
+        global global_client_object
+        # Previously, KeepClient would change its behavior at runtime based
+        # on these configuration settings.  We simulate that behavior here
+        # by checking the values and returning a new KeepClient if any of
+        # them have changed.
+        key = (config.get('ARVADOS_API_HOST'),
+               config.get('ARVADOS_API_TOKEN'),
+               config.flag_is_true('ARVADOS_API_HOST_INSECURE'),
+               config.get('ARVADOS_KEEP_PROXY'),
+               config.get('ARVADOS_EXTERNAL_CLIENT') == 'true',
+               os.environ.get('KEEP_LOCAL_STORE'))
+        if (global_client_object is None) or (cls._last_key != key):
+            global_client_object = KeepClient()
+            cls._last_key = key
+        return global_client_object
+
+    @staticmethod
+    def get(locator, **kwargs):
+        return Keep.global_client_object().get(locator, **kwargs)
+
+    @staticmethod
+    def put(data, **kwargs):
+        return Keep.global_client_object().put(data, **kwargs)
+
+class KeepBlockCache(object):
+    # Default RAM cache is 256MiB
+    def __init__(self, cache_max=(256 * 1024 * 1024)):
+        self.cache_max = cache_max
+        self._cache = []
+        self._cache_lock = threading.Lock()
+
+    class CacheSlot(object):
+        __slots__ = ("locator", "ready", "content")
+
+        def __init__(self, locator):
+            self.locator = locator
+            self.ready = threading.Event()
+            self.content = None
+
+        def get(self):
+            self.ready.wait()
+            return self.content
+
+        def set(self, value):
+            self.content = value
+            self.ready.set()
+
+        def size(self):
+            if self.content is None:
+                return 0
+            else:
+                return len(self.content)
+
+    def cap_cache(self):
+        '''Cap the cache size to self.cache_max'''
+        with self._cache_lock:
+            # Select all slots except those where ready.is_set() and content is
+            # None (that means there was an error reading the block).
+            self._cache = [c for c in self._cache if not (c.ready.is_set() and c.content is None)]
+            sm = sum([slot.size() for slot in self._cache])
+            while len(self._cache) > 0 and sm > self.cache_max:
+                for i in range(len(self._cache)-1, -1, -1):
+                    if self._cache[i].ready.is_set():
+                        del self._cache[i]
+                        break
+                sm = sum([slot.size() for slot in self._cache])
+
+    def _get(self, locator):
+        # Test if the locator is already in the cache
+        for i in range(0, len(self._cache)):
+            if self._cache[i].locator == locator:
+                n = self._cache[i]
+                if i != 0:
+                    # move it to the front
+                    del self._cache[i]
+                    self._cache.insert(0, n)
+                return n
+        return None
+
+    def get(self, locator):
+        with self._cache_lock:
+            return self._get(locator)
+
+    def reserve_cache(self, locator):
+        '''Reserve a cache slot for the specified locator,
+        or return the existing slot.'''
+        with self._cache_lock:
+            n = self._get(locator)
+            if n:
+                return n, False
+            else:
+                # Add a new cache slot for the locator
+                n = KeepBlockCache.CacheSlot(locator)
+                self._cache.insert(0, n)
+                return n, True
+
+class Counter(object):
+    def __init__(self, v=0):
+        self._lk = threading.Lock()
+        self._val = v
+
+    def add(self, v):
+        with self._lk:
+            self._val += v
+
+    def get(self):
+        with self._lk:
+            return self._val
+
+
+class KeepClient(object):
+
+    # Default Keep server connection timeout:  2 seconds
+    # Default Keep server read timeout:       256 seconds
+    # Default Keep server bandwidth minimum:  32768 bytes per second
+    # Default Keep proxy connection timeout:  20 seconds
+    # Default Keep proxy read timeout:        256 seconds
+    # Default Keep proxy bandwidth minimum:   32768 bytes per second
+    DEFAULT_TIMEOUT = (2, 256, 32768)
+    DEFAULT_PROXY_TIMEOUT = (20, 256, 32768)
+
+
+    class KeepService(object):
+        """Make requests to a single Keep service, and track results.
+
+        A KeepService is intended to last long enough to perform one
+        transaction (GET or PUT) against one Keep service. This can
+        involve calling either get() or put() multiple times in order
+        to retry after transient failures. However, calling both get()
+        and put() on a single instance -- or using the same instance
+        to access two different Keep services -- will not produce
+        sensible behavior.
+        """
+
+        HTTP_ERRORS = (
+            socket.error,
+            ssl.SSLError,
+            arvados.errors.HttpError,
+        )
+
+        def __init__(self, root, user_agent_pool=queue.LifoQueue(),
+                     upload_counter=None,
+                     download_counter=None,
+                     headers={},
+                     insecure=False):
+            self.root = root
+            self._user_agent_pool = user_agent_pool
+            self._result = {'error': None}
+            self._usable = True
+            self._session = None
+            self._socket = None
+            self.get_headers = {'Accept': 'application/octet-stream'}
+            self.get_headers.update(headers)
+            self.put_headers = headers
+            self.upload_counter = upload_counter
+            self.download_counter = download_counter
+            self.insecure = insecure
+
+        def usable(self):
+            """Is it worth attempting a request?"""
+            return self._usable
+
+        def finished(self):
+            """Did the request succeed or encounter permanent failure?"""
+            return self._result['error'] == False or not self._usable
+
+        def last_result(self):
+            return self._result
+
+        def _get_user_agent(self):
+            try:
+                return self._user_agent_pool.get(block=False)
+            except queue.Empty:
+                return pycurl.Curl()
+
+        def _put_user_agent(self, ua):
+            try:
+                ua.reset()
+                self._user_agent_pool.put(ua, block=False)
+            except:
+                ua.close()
+
+        def _socket_open(self, *args, **kwargs):
+            if len(args) + len(kwargs) == 2:
+                return self._socket_open_pycurl_7_21_5(*args, **kwargs)
+            else:
+                return self._socket_open_pycurl_7_19_3(*args, **kwargs)
+
+        def _socket_open_pycurl_7_19_3(self, family, socktype, protocol, address=None):
+            return self._socket_open_pycurl_7_21_5(
+                purpose=None,
+                address=collections.namedtuple(
+                    'Address', ['family', 'socktype', 'protocol', 'addr'],
+                )(family, socktype, protocol, address))
+
+        def _socket_open_pycurl_7_21_5(self, purpose, address):
+            """Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE"""
+            s = socket.socket(address.family, address.socktype, address.protocol)
+            s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+            # Will throw invalid protocol error on mac. This test prevents that.
+            if hasattr(socket, 'TCP_KEEPIDLE'):
+                s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
+            s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
+            self._socket = s
+            return s
+
+        def get(self, locator, method="GET", timeout=None):
+            # locator is a KeepLocator object.
+            url = self.root + str(locator)
+            _logger.debug("Request: %s %s", method, url)
+            curl = self._get_user_agent()
+            ok = None
+            try:
+                with timer.Timer() as t:
+                    self._headers = {}
+                    response_body = BytesIO()
+                    curl.setopt(pycurl.NOSIGNAL, 1)
+                    curl.setopt(pycurl.OPENSOCKETFUNCTION,
+                                lambda *args, **kwargs: self._socket_open(*args, **kwargs))
+                    curl.setopt(pycurl.URL, url.encode('utf-8'))
+                    curl.setopt(pycurl.HTTPHEADER, [
+                        '{}: {}'.format(k,v) for k,v in self.get_headers.items()])
+                    curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
+                    curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+                    if self.insecure:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+                    if method == "HEAD":
+                        curl.setopt(pycurl.NOBODY, True)
+                    self._setcurltimeouts(curl, timeout, method=="HEAD")
+
+                    try:
+                        curl.perform()
+                    except Exception as e:
+                        raise arvados.errors.HttpError(0, str(e))
+                    finally:
+                        if self._socket:
+                            self._socket.close()
+                            self._socket = None
+                    self._result = {
+                        'status_code': curl.getinfo(pycurl.RESPONSE_CODE),
+                        'body': response_body.getvalue(),
+                        'headers': self._headers,
+                        'error': False,
+                    }
+
+                ok = retry.check_http_response_success(self._result['status_code'])
+                if not ok:
+                    self._result['error'] = arvados.errors.HttpError(
+                        self._result['status_code'],
+                        self._headers.get('x-status-line', 'Error'))
+            except self.HTTP_ERRORS as e:
+                self._result = {
+                    'error': e,
+                }
+            self._usable = ok != False
+            if self._result.get('status_code', None):
+                # The client worked well enough to get an HTTP status
+                # code, so presumably any problems are just on the
+                # server side and it's OK to reuse the client.
+                self._put_user_agent(curl)
+            else:
+                # Don't return this client to the pool, in case it's
+                # broken.
+                curl.close()
+            if not ok:
+                _logger.debug("Request fail: GET %s => %s: %s",
+                              url, type(self._result['error']), str(self._result['error']))
+                return None
+            if method == "HEAD":
+                _logger.info("HEAD %s: %s bytes",
+                         self._result['status_code'],
+                         self._result.get('content-length'))
+                if self._result['headers'].get('x-keep-locator'):
+                    # This is a response to a remote block copy request, return
+                    # the local copy block locator.
+                    return self._result['headers'].get('x-keep-locator')
+                return True
+
+            _logger.info("GET %s: %s bytes in %s msec (%.3f MiB/sec)",
+                         self._result['status_code'],
+                         len(self._result['body']),
+                         t.msecs,
+                         1.0*len(self._result['body'])/2**20/t.secs if t.secs > 0 else 0)
+
+            if self.download_counter:
+                self.download_counter.add(len(self._result['body']))
+            resp_md5 = hashlib.md5(self._result['body']).hexdigest()
+            if resp_md5 != locator.md5sum:
+                _logger.warning("Checksum fail: md5(%s) = %s",
+                                url, resp_md5)
+                self._result['error'] = arvados.errors.HttpError(
+                    0, 'Checksum fail')
+                return None
+            return self._result['body']
+
+        def put(self, hash_s, body, timeout=None):
+            url = self.root + hash_s
+            _logger.debug("Request: PUT %s", url)
+            curl = self._get_user_agent()
+            ok = None
+            try:
+                with timer.Timer() as t:
+                    self._headers = {}
+                    body_reader = BytesIO(body)
+                    response_body = BytesIO()
+                    curl.setopt(pycurl.NOSIGNAL, 1)
+                    curl.setopt(pycurl.OPENSOCKETFUNCTION,
+                                lambda *args, **kwargs: self._socket_open(*args, **kwargs))
+                    curl.setopt(pycurl.URL, url.encode('utf-8'))
+                    # Using UPLOAD tells cURL to wait for a "go ahead" from the
+                    # Keep server (in the form of a HTTP/1.1 "100 Continue"
+                    # response) instead of sending the request body immediately.
+                    # This allows the server to reject the request if the request
+                    # is invalid or the server is read-only, without waiting for
+                    # the client to send the entire block.
+                    curl.setopt(pycurl.UPLOAD, True)
+                    curl.setopt(pycurl.INFILESIZE, len(body))
+                    curl.setopt(pycurl.READFUNCTION, body_reader.read)
+                    curl.setopt(pycurl.HTTPHEADER, [
+                        '{}: {}'.format(k,v) for k,v in self.put_headers.items()])
+                    curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
+                    curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+                    if self.insecure:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+                    self._setcurltimeouts(curl, timeout)
+                    try:
+                        curl.perform()
+                    except Exception as e:
+                        raise arvados.errors.HttpError(0, str(e))
+                    finally:
+                        if self._socket:
+                            self._socket.close()
+                            self._socket = None
+                    self._result = {
+                        'status_code': curl.getinfo(pycurl.RESPONSE_CODE),
+                        'body': response_body.getvalue().decode('utf-8'),
+                        'headers': self._headers,
+                        'error': False,
+                    }
+                ok = retry.check_http_response_success(self._result['status_code'])
+                if not ok:
+                    self._result['error'] = arvados.errors.HttpError(
+                        self._result['status_code'],
+                        self._headers.get('x-status-line', 'Error'))
+            except self.HTTP_ERRORS as e:
+                self._result = {
+                    'error': e,
+                }
+            self._usable = ok != False # still usable if ok is True or None
+            if self._result.get('status_code', None):
+                # Client is functional. See comment in get().
+                self._put_user_agent(curl)
+            else:
+                curl.close()
+            if not ok:
+                _logger.debug("Request fail: PUT %s => %s: %s",
+                              url, type(self._result['error']), str(self._result['error']))
+                return False
+            _logger.info("PUT %s: %s bytes in %s msec (%.3f MiB/sec)",
+                         self._result['status_code'],
+                         len(body),
+                         t.msecs,
+                         1.0*len(body)/2**20/t.secs if t.secs > 0 else 0)
+            if self.upload_counter:
+                self.upload_counter.add(len(body))
+            return True
+
+        def _setcurltimeouts(self, curl, timeouts, ignore_bandwidth=False):
+            if not timeouts:
+                return
+            elif isinstance(timeouts, tuple):
+                if len(timeouts) == 2:
+                    conn_t, xfer_t = timeouts
+                    bandwidth_bps = KeepClient.DEFAULT_TIMEOUT[2]
+                else:
+                    conn_t, xfer_t, bandwidth_bps = timeouts
+            else:
+                conn_t, xfer_t = (timeouts, timeouts)
+                bandwidth_bps = KeepClient.DEFAULT_TIMEOUT[2]
+            curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(conn_t*1000))
+            if not ignore_bandwidth:
+                curl.setopt(pycurl.LOW_SPEED_TIME, int(math.ceil(xfer_t)))
+                curl.setopt(pycurl.LOW_SPEED_LIMIT, int(math.ceil(bandwidth_bps)))
+
+        def _headerfunction(self, header_line):
+            if isinstance(header_line, bytes):
+                header_line = header_line.decode('iso-8859-1')
+            if ':' in header_line:
+                name, value = header_line.split(':', 1)
+                name = name.strip().lower()
+                value = value.strip()
+            elif self._headers:
+                name = self._lastheadername
+                value = self._headers[name] + ' ' + header_line.strip()
+            elif header_line.startswith('HTTP/'):
+                name = 'x-status-line'
+                value = header_line
+            else:
+                _logger.error("Unexpected header line: %s", header_line)
+                return
+            self._lastheadername = name
+            self._headers[name] = value
+            # Returning None implies all bytes were written
+
+
+    class KeepWriterQueue(queue.Queue):
+        def __init__(self, copies):
+            queue.Queue.__init__(self) # Old-style superclass
+            self.wanted_copies = copies
+            self.successful_copies = 0
+            self.response = None
+            self.successful_copies_lock = threading.Lock()
+            self.pending_tries = copies
+            self.pending_tries_notification = threading.Condition()
+
+        def write_success(self, response, replicas_nr):
+            with self.successful_copies_lock:
+                self.successful_copies += replicas_nr
+                self.response = response
+            with self.pending_tries_notification:
+                self.pending_tries_notification.notify_all()
+
+        def write_fail(self, ks):
+            with self.pending_tries_notification:
+                self.pending_tries += 1
+                self.pending_tries_notification.notify()
+
+        def pending_copies(self):
+            with self.successful_copies_lock:
+                return self.wanted_copies - self.successful_copies
+
+        def get_next_task(self):
+            with self.pending_tries_notification:
+                while True:
+                    if self.pending_copies() < 1:
+                        # This notify_all() is unnecessary --
+                        # write_success() already called notify_all()
+                        # when pending<1 became true, so it's not
+                        # possible for any other thread to be in
+                        # wait() now -- but it's cheap insurance
+                        # against deadlock so we do it anyway:
+                        self.pending_tries_notification.notify_all()
+                        # Drain the queue and then raise Queue.Empty
+                        while True:
+                            self.get_nowait()
+                            self.task_done()
+                    elif self.pending_tries > 0:
+                        service, service_root = self.get_nowait()
+                        if service.finished():
+                            self.task_done()
+                            continue
+                        self.pending_tries -= 1
+                        return service, service_root
+                    elif self.empty():
+                        self.pending_tries_notification.notify_all()
+                        raise queue.Empty
+                    else:
+                        self.pending_tries_notification.wait()
+
+
+    class KeepWriterThreadPool(object):
+        def __init__(self, data, data_hash, copies, max_service_replicas, timeout=None):
+            self.total_task_nr = 0
+            self.wanted_copies = copies
+            if (not max_service_replicas) or (max_service_replicas >= copies):
+                num_threads = 1
+            else:
+                num_threads = int(math.ceil(1.0*copies/max_service_replicas))
+            _logger.debug("Pool max threads is %d", num_threads)
+            self.workers = []
+            self.queue = KeepClient.KeepWriterQueue(copies)
+            # Create workers
+            for _ in range(num_threads):
+                w = KeepClient.KeepWriterThread(self.queue, data, data_hash, timeout)
+                self.workers.append(w)
+
+        def add_task(self, ks, service_root):
+            self.queue.put((ks, service_root))
+            self.total_task_nr += 1
+
+        def done(self):
+            return self.queue.successful_copies
+
+        def join(self):
+            # Start workers
+            for worker in self.workers:
+                worker.start()
+            # Wait for finished work
+            self.queue.join()
+
+        def response(self):
+            return self.queue.response
+
+
+    class KeepWriterThread(threading.Thread):
+        TaskFailed = RuntimeError()
+
+        def __init__(self, queue, data, data_hash, timeout=None):
+            super(KeepClient.KeepWriterThread, self).__init__()
+            self.timeout = timeout
+            self.queue = queue
+            self.data = data
+            self.data_hash = data_hash
+            self.daemon = True
+
+        def run(self):
+            while True:
+                try:
+                    service, service_root = self.queue.get_next_task()
+                except queue.Empty:
+                    return
+                try:
+                    locator, copies = self.do_task(service, service_root)
+                except Exception as e:
+                    if e is not self.TaskFailed:
+                        _logger.exception("Exception in KeepWriterThread")
+                    self.queue.write_fail(service)
+                else:
+                    self.queue.write_success(locator, copies)
+                finally:
+                    self.queue.task_done()
+
+        def do_task(self, service, service_root):
+            success = bool(service.put(self.data_hash,
+                                        self.data,
+                                        timeout=self.timeout))
+            result = service.last_result()
+
+            if not success:
+                if result.get('status_code', None):
+                    _logger.debug("Request fail: PUT %s => %s %s",
+                                  self.data_hash,
+                                  result['status_code'],
+                                  result['body'])
+                raise self.TaskFailed
+
+            _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
+                          str(threading.current_thread()),
+                          self.data_hash,
+                          len(self.data),
+                          service_root)
+            try:
+                replicas_stored = int(result['headers']['x-keep-replicas-stored'])
+            except (KeyError, ValueError):
+                replicas_stored = 1
+
+            return result['body'].strip(), replicas_stored
+
+
+    def __init__(self, api_client=None, proxy=None,
+                 timeout=DEFAULT_TIMEOUT, proxy_timeout=DEFAULT_PROXY_TIMEOUT,
+                 api_token=None, local_store=None, block_cache=None,
+                 num_retries=0, session=None):
+        """Initialize a new KeepClient.
+
+        Arguments:
+        :api_client:
+          The API client to use to find Keep services.  If not
+          provided, KeepClient will build one from available Arvados
+          configuration.
+
+        :proxy:
+          If specified, this KeepClient will send requests to this Keep
+          proxy.  Otherwise, KeepClient will fall back to the setting of the
+          ARVADOS_KEEP_SERVICES or ARVADOS_KEEP_PROXY configuration settings.
+          If you want to KeepClient does not use a proxy, pass in an empty
+          string.
+
+        :timeout:
+          The initial timeout (in seconds) for HTTP requests to Keep
+          non-proxy servers.  A tuple of three floats is interpreted as
+          (connection_timeout, read_timeout, minimum_bandwidth). A connection
+          will be aborted if the average traffic rate falls below
+          minimum_bandwidth bytes per second over an interval of read_timeout
+          seconds. Because timeouts are often a result of transient server
+          load, the actual connection timeout will be increased by a factor
+          of two on each retry.
+          Default: (2, 256, 32768).
+
+        :proxy_timeout:
+          The initial timeout (in seconds) for HTTP requests to
+          Keep proxies. A tuple of three floats is interpreted as
+          (connection_timeout, read_timeout, minimum_bandwidth). The behavior
+          described above for adjusting connection timeouts on retry also
+          applies.
+          Default: (20, 256, 32768).
+
+        :api_token:
+          If you're not using an API client, but only talking
+          directly to a Keep proxy, this parameter specifies an API token
+          to authenticate Keep requests.  It is an error to specify both
+          api_client and api_token.  If you specify neither, KeepClient
+          will use one available from the Arvados configuration.
+
+        :local_store:
+          If specified, this KeepClient will bypass Keep
+          services, and save data to the named directory.  If unspecified,
+          KeepClient will fall back to the setting of the $KEEP_LOCAL_STORE
+          environment variable.  If you want to ensure KeepClient does not
+          use local storage, pass in an empty string.  This is primarily
+          intended to mock a server for testing.
+
+        :num_retries:
+          The default number of times to retry failed requests.
+          This will be used as the default num_retries value when get() and
+          put() are called.  Default 0.
+        """
+        self.lock = threading.Lock()
+        if proxy is None:
+            if config.get('ARVADOS_KEEP_SERVICES'):
+                proxy = config.get('ARVADOS_KEEP_SERVICES')
+            else:
+                proxy = config.get('ARVADOS_KEEP_PROXY')
+        if api_token is None:
+            if api_client is None:
+                api_token = config.get('ARVADOS_API_TOKEN')
+            else:
+                api_token = api_client.api_token
+        elif api_client is not None:
+            raise ValueError(
+                "can't build KeepClient with both API client and token")
+        if local_store is None:
+            local_store = os.environ.get('KEEP_LOCAL_STORE')
+
+        if api_client is None:
+            self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+        else:
+            self.insecure = api_client.insecure
+
+        self.block_cache = block_cache if block_cache else KeepBlockCache()
+        self.timeout = timeout
+        self.proxy_timeout = proxy_timeout
+        self._user_agent_pool = queue.LifoQueue()
+        self.upload_counter = Counter()
+        self.download_counter = Counter()
+        self.put_counter = Counter()
+        self.get_counter = Counter()
+        self.hits_counter = Counter()
+        self.misses_counter = Counter()
+
+        if local_store:
+            self.local_store = local_store
+            self.head = self.local_store_head
+            self.get = self.local_store_get
+            self.put = self.local_store_put
+        else:
+            self.num_retries = num_retries
+            self.max_replicas_per_service = None
+            if proxy:
+                proxy_uris = proxy.split()
+                for i in range(len(proxy_uris)):
+                    if not proxy_uris[i].endswith('/'):
+                        proxy_uris[i] += '/'
+                    # URL validation
+                    url = urllib.parse.urlparse(proxy_uris[i])
+                    if not (url.scheme and url.netloc):
+                        raise arvados.errors.ArgumentError("Invalid proxy URI: {}".format(proxy_uris[i]))
+                self.api_token = api_token
+                self._gateway_services = {}
+                self._keep_services = [{
+                    'uuid': "00000-bi6l4-%015d" % idx,
+                    'service_type': 'proxy',
+                    '_service_root': uri,
+                    } for idx, uri in enumerate(proxy_uris)]
+                self._writable_services = self._keep_services
+                self.using_proxy = True
+                self._static_services_list = True
+            else:
+                # It's important to avoid instantiating an API client
+                # unless we actually need one, for testing's sake.
+                if api_client is None:
+                    api_client = arvados.api('v1')
+                self.api_client = api_client
+                self.api_token = api_client.api_token
+                self._gateway_services = {}
+                self._keep_services = None
+                self._writable_services = None
+                self.using_proxy = None
+                self._static_services_list = False
+
+    def current_timeout(self, attempt_number):
+        """Return the appropriate timeout to use for this client.
+
+        The proxy timeout setting if the backend service is currently a proxy,
+        the regular timeout setting otherwise.  The `attempt_number` indicates
+        how many times the operation has been tried already (starting from 0
+        for the first try), and scales the connection timeout portion of the
+        return value accordingly.
+
+        """
+        # TODO(twp): the timeout should be a property of a
+        # KeepService, not a KeepClient. See #4488.
+        t = self.proxy_timeout if self.using_proxy else self.timeout
+        if len(t) == 2:
+            return (t[0] * (1 << attempt_number), t[1])
+        else:
+            return (t[0] * (1 << attempt_number), t[1], t[2])
+    def _any_nondisk_services(self, service_list):
+        return any(ks.get('service_type', 'disk') != 'disk'
+                   for ks in service_list)
+
+    def build_services_list(self, force_rebuild=False):
+        if (self._static_services_list or
+              (self._keep_services and not force_rebuild)):
+            return
+        with self.lock:
+            try:
+                keep_services = self.api_client.keep_services().accessible()
+            except Exception:  # API server predates Keep services.
+                keep_services = self.api_client.keep_disks().list()
+
+            # Gateway services are only used when specified by UUID,
+            # so there's nothing to gain by filtering them by
+            # service_type.
+            self._gateway_services = {ks['uuid']: ks for ks in
+                                      keep_services.execute()['items']}
+            if not self._gateway_services:
+                raise arvados.errors.NoKeepServersError()
+
+            # Precompute the base URI for each service.
+            for r in self._gateway_services.values():
+                host = r['service_host']
+                if not host.startswith('[') and host.find(':') >= 0:
+                    # IPv6 URIs must be formatted like http://[::1]:80/...
+                    host = '[' + host + ']'
+                r['_service_root'] = "{}://{}:{:d}/".format(
+                    'https' if r['service_ssl_flag'] else 'http',
+                    host,
+                    r['service_port'])
+
+            _logger.debug(str(self._gateway_services))
+            self._keep_services = [
+                ks for ks in self._gateway_services.values()
+                if not ks.get('service_type', '').startswith('gateway:')]
+            self._writable_services = [ks for ks in self._keep_services
+                                       if not ks.get('read_only')]
+
+            # For disk type services, max_replicas_per_service is 1
+            # It is unknown (unlimited) for other service types.
+            if self._any_nondisk_services(self._writable_services):
+                self.max_replicas_per_service = None
+            else:
+                self.max_replicas_per_service = 1
+
+    def _service_weight(self, data_hash, service_uuid):
+        """Compute the weight of a Keep service endpoint for a data
+        block with a known hash.
+
+        The weight is md5(h + u) where u is the last 15 characters of
+        the service endpoint's UUID.
+        """
+        return hashlib.md5((data_hash + service_uuid[-15:]).encode()).hexdigest()
+
+    def weighted_service_roots(self, locator, force_rebuild=False, need_writable=False):
+        """Return an array of Keep service endpoints, in the order in
+        which they should be probed when reading or writing data with
+        the given hash+hints.
+        """
+        self.build_services_list(force_rebuild)
+
+        sorted_roots = []
+        # Use the services indicated by the given +K@... remote
+        # service hints, if any are present and can be resolved to a
+        # URI.
+        for hint in locator.hints:
+            if hint.startswith('K@'):
+                if len(hint) == 7:
+                    sorted_roots.append(
+                        "https://keep.{}.arvadosapi.com/".format(hint[2:]))
+                elif len(hint) == 29:
+                    svc = self._gateway_services.get(hint[2:])
+                    if svc:
+                        sorted_roots.append(svc['_service_root'])
+
+        # Sort the available local services by weight (heaviest first)
+        # for this locator, and return their service_roots (base URIs)
+        # in that order.
+        use_services = self._keep_services
+        if need_writable:
+            use_services = self._writable_services
+        self.using_proxy = self._any_nondisk_services(use_services)
+        sorted_roots.extend([
+            svc['_service_root'] for svc in sorted(
+                use_services,
+                reverse=True,
+                key=lambda svc: self._service_weight(locator.md5sum, svc['uuid']))])
+        _logger.debug("{}: {}".format(locator, sorted_roots))
+        return sorted_roots
+
+    def map_new_services(self, roots_map, locator, force_rebuild, need_writable, headers):
+        # roots_map is a dictionary, mapping Keep service root strings
+        # to KeepService objects.  Poll for Keep services, and add any
+        # new ones to roots_map.  Return the current list of local
+        # root strings.
+        headers.setdefault('Authorization', "OAuth2 %s" % (self.api_token,))
+        local_roots = self.weighted_service_roots(locator, force_rebuild, need_writable)
+        for root in local_roots:
+            if root not in roots_map:
+                roots_map[root] = self.KeepService(
+                    root, self._user_agent_pool,
+                    upload_counter=self.upload_counter,
+                    download_counter=self.download_counter,
+                    headers=headers,
+                    insecure=self.insecure)
+        return local_roots
+
+    @staticmethod
+    def _check_loop_result(result):
+        # KeepClient RetryLoops should save results as a 2-tuple: the
+        # actual result of the request, and the number of servers available
+        # to receive the request this round.
+        # This method returns True if there's a real result, False if
+        # there are no more servers available, otherwise None.
+        if isinstance(result, Exception):
+            return None
+        result, tried_server_count = result
+        if (result is not None) and (result is not False):
+            return True
+        elif tried_server_count < 1:
+            _logger.info("No more Keep services to try; giving up")
+            return False
+        else:
+            return None
+
+    def get_from_cache(self, loc):
+        """Fetch a block only if is in the cache, otherwise return None."""
+        slot = self.block_cache.get(loc)
+        if slot is not None and slot.ready.is_set():
+            return slot.get()
+        else:
+            return None
+
+    def refresh_signature(self, loc):
+        """Ask Keep to get the remote block and return its local signature"""
+        now = datetime.datetime.utcnow().isoformat("T") + 'Z'
+        return self.head(loc, headers={'X-Keep-Signature': 'local, {}'.format(now)})
+
+    @retry.retry_method
+    def head(self, loc_s, **kwargs):
+        return self._get_or_head(loc_s, method="HEAD", **kwargs)
+
+    @retry.retry_method
+    def get(self, loc_s, **kwargs):
+        return self._get_or_head(loc_s, method="GET", **kwargs)
+
+    def _get_or_head(self, loc_s, method="GET", num_retries=None, request_id=None, headers=None):
+        """Get data from Keep.
+
+        This method fetches one or more blocks of data from Keep.  It
+        sends a request each Keep service registered with the API
+        server (or the proxy provided when this client was
+        instantiated), then each service named in location hints, in
+        sequence.  As soon as one service provides the data, it's
+        returned.
+
+        Arguments:
+        * loc_s: A string of one or more comma-separated locators to fetch.
+          This method returns the concatenation of these blocks.
+        * num_retries: The number of times to retry GET requests to
+          *each* Keep server if it returns temporary failures, with
+          exponential backoff.  Note that, in each loop, the method may try
+          to fetch data from every available Keep service, along with any
+          that are named in location hints in the locator.  The default value
+          is set when the KeepClient is initialized.
+        """
+        if ',' in loc_s:
+            return ''.join(self.get(x) for x in loc_s.split(','))
+
+        self.get_counter.add(1)
+
+        slot = None
+        blob = None
+        try:
+            locator = KeepLocator(loc_s)
+            if method == "GET":
+                slot, first = self.block_cache.reserve_cache(locator.md5sum)
+                if not first:
+                    self.hits_counter.add(1)
+                    blob = slot.get()
+                    if blob is None:
+                        raise arvados.errors.KeepReadError(
+                            "failed to read {}".format(loc_s))
+                    return blob
+
+            self.misses_counter.add(1)
+
+            if headers is None:
+                headers = {}
+            headers['X-Request-Id'] = (request_id or
+                                        (hasattr(self, 'api_client') and self.api_client.request_id) or
+                                        arvados.util.new_request_id())
+
+            # If the locator has hints specifying a prefix (indicating a
+            # remote keepproxy) or the UUID of a local gateway service,
+            # read data from the indicated service(s) instead of the usual
+            # list of local disk services.
+            hint_roots = ['http://keep.{}.arvadosapi.com/'.format(hint[2:])
+                          for hint in locator.hints if hint.startswith('K@') and len(hint) == 7]
+            hint_roots.extend([self._gateway_services[hint[2:]]['_service_root']
+                               for hint in locator.hints if (
+                                       hint.startswith('K@') and
+                                       len(hint) == 29 and
+                                       self._gateway_services.get(hint[2:])
+                                       )])
+            # Map root URLs to their KeepService objects.
+            roots_map = {
+                root: self.KeepService(root, self._user_agent_pool,
+                                       upload_counter=self.upload_counter,
+                                       download_counter=self.download_counter,
+                                       headers=headers,
+                                       insecure=self.insecure)
+                for root in hint_roots
+            }
+
+            # See #3147 for a discussion of the loop implementation.  Highlights:
+            # * Refresh the list of Keep services after each failure, in case
+            #   it's being updated.
+            # * Retry until we succeed, we're out of retries, or every available
+            #   service has returned permanent failure.
+            sorted_roots = []
+            roots_map = {}
+            loop = retry.RetryLoop(num_retries, self._check_loop_result,
+                                   backoff_start=2)
+            for tries_left in loop:
+                try:
+                    sorted_roots = self.map_new_services(
+                        roots_map, locator,
+                        force_rebuild=(tries_left < num_retries),
+                        need_writable=False,
+                        headers=headers)
+                except Exception as error:
+                    loop.save_result(error)
+                    continue
+
+                # Query KeepService objects that haven't returned
+                # permanent failure, in our specified shuffle order.
+                services_to_try = [roots_map[root]
+                                   for root in sorted_roots
+                                   if roots_map[root].usable()]
+                for keep_service in services_to_try:
+                    blob = keep_service.get(locator, method=method, timeout=self.current_timeout(num_retries-tries_left))
+                    if blob is not None:
+                        break
+                loop.save_result((blob, len(services_to_try)))
+
+            # Always cache the result, then return it if we succeeded.
+            if loop.success():
+                return blob
+        finally:
+            if slot is not None:
+                slot.set(blob)
+                self.block_cache.cap_cache()
+
+        # Q: Including 403 is necessary for the Keep tests to continue
+        # passing, but maybe they should expect KeepReadError instead?
+        not_founds = sum(1 for key in sorted_roots
+                         if roots_map[key].last_result().get('status_code', None) in {403, 404, 410})
+        service_errors = ((key, roots_map[key].last_result()['error'])
+                          for key in sorted_roots)
+        if not roots_map:
+            raise arvados.errors.KeepReadError(
+                "failed to read {}: no Keep services available ({})".format(
+                    loc_s, loop.last_result()))
+        elif not_founds == len(sorted_roots):
+            raise arvados.errors.NotFoundError(
+                "{} not found".format(loc_s), service_errors)
+        else:
+            raise arvados.errors.KeepReadError(
+                "failed to read {}".format(loc_s), service_errors, label="service")
+
+    @retry.retry_method
+    def put(self, data, copies=2, num_retries=None, request_id=None):
+        """Save data in Keep.
+
+        This method will get a list of Keep services from the API server, and
+        send the data to each one simultaneously in a new thread.  Once the
+        uploads are finished, if enough copies are saved, this method returns
+        the most recent HTTP response body.  If requests fail to upload
+        enough copies, this method raises KeepWriteError.
+
+        Arguments:
+        * data: The string of data to upload.
+        * copies: The number of copies that the user requires be saved.
+          Default 2.
+        * num_retries: The number of times to retry PUT requests to
+          *each* Keep server if it returns temporary failures, with
+          exponential backoff.  The default value is set when the
+          KeepClient is initialized.
+        """
+
+        if not isinstance(data, bytes):
+            data = data.encode()
+
+        self.put_counter.add(1)
+
+        data_hash = hashlib.md5(data).hexdigest()
+        loc_s = data_hash + '+' + str(len(data))
+        if copies < 1:
+            return loc_s
+        locator = KeepLocator(loc_s)
+
+        headers = {
+            'X-Request-Id': (request_id or
+                             (hasattr(self, 'api_client') and self.api_client.request_id) or
+                             arvados.util.new_request_id()),
+            'X-Keep-Desired-Replicas': str(copies),
+        }
+        roots_map = {}
+        loop = retry.RetryLoop(num_retries, self._check_loop_result,
+                               backoff_start=2)
+        done = 0
+        for tries_left in loop:
+            try:
+                sorted_roots = self.map_new_services(
+                    roots_map, locator,
+                    force_rebuild=(tries_left < num_retries),
+                    need_writable=True,
+                    headers=headers)
+            except Exception as error:
+                loop.save_result(error)
+                continue
+
+            writer_pool = KeepClient.KeepWriterThreadPool(data=data,
+                                                        data_hash=data_hash,
+                                                        copies=copies - done,
+                                                        max_service_replicas=self.max_replicas_per_service,
+                                                        timeout=self.current_timeout(num_retries - tries_left))
+            for service_root, ks in [(root, roots_map[root])
+                                     for root in sorted_roots]:
+                if ks.finished():
+                    continue
+                writer_pool.add_task(ks, service_root)
+            writer_pool.join()
+            done += writer_pool.done()
+            loop.save_result((done >= copies, writer_pool.total_task_nr))
+
+        if loop.success():
+            return writer_pool.response()
+        if not roots_map:
+            raise arvados.errors.KeepWriteError(
+                "failed to write {}: no Keep services available ({})".format(
+                    data_hash, loop.last_result()))
+        else:
+            service_errors = ((key, roots_map[key].last_result()['error'])
+                              for key in sorted_roots
+                              if roots_map[key].last_result()['error'])
+            raise arvados.errors.KeepWriteError(
+                "failed to write {} (wanted {} copies but wrote {})".format(
+                    data_hash, copies, writer_pool.done()), service_errors, label="service")
+
+    def local_store_put(self, data, copies=1, num_retries=None):
+        """A stub for put().
+
+        This method is used in place of the real put() method when
+        using local storage (see constructor's local_store argument).
+
+        copies and num_retries arguments are ignored: they are here
+        only for the sake of offering the same call signature as
+        put().
+
+        Data stored this way can be retrieved via local_store_get().
+        """
+        md5 = hashlib.md5(data).hexdigest()
+        locator = '%s+%d' % (md5, len(data))
+        with open(os.path.join(self.local_store, md5 + '.tmp'), 'wb') as f:
+            f.write(data)
+        os.rename(os.path.join(self.local_store, md5 + '.tmp'),
+                  os.path.join(self.local_store, md5))
+        return locator
+
+    def local_store_get(self, loc_s, num_retries=None):
+        """Companion to local_store_put()."""
+        try:
+            locator = KeepLocator(loc_s)
+        except ValueError:
+            raise arvados.errors.NotFoundError(
+                "Invalid data locator: '%s'" % loc_s)
+        if locator.md5sum == config.EMPTY_BLOCK_LOCATOR.split('+')[0]:
+            return b''
+        with open(os.path.join(self.local_store, locator.md5sum), 'rb') as f:
+            return f.read()
+
+    def local_store_head(self, loc_s, num_retries=None):
+        """Companion to local_store_put()."""
+        try:
+            locator = KeepLocator(loc_s)
+        except ValueError:
+            raise arvados.errors.NotFoundError(
+                "Invalid data locator: '%s'" % loc_s)
+        if locator.md5sum == config.EMPTY_BLOCK_LOCATOR.split('+')[0]:
+            return True
+        if os.path.exists(os.path.join(self.local_store, locator.md5sum)):
+            return True
+
+    def is_cached(self, locator):
+        return self.block_cache.reserve_cache(expect_hash)
diff --git a/sdk/python/arvados/retry.py b/sdk/python/arvados/retry.py
new file mode 100644 (file)
index 0000000..3f62ab7
--- /dev/null
@@ -0,0 +1,163 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import range
+from builtins import object
+import functools
+import inspect
+import pycurl
+import time
+
+from collections import deque
+
+import arvados.errors
+
+_HTTP_SUCCESSES = set(range(200, 300))
+_HTTP_CAN_RETRY = set([408, 409, 422, 423, 500, 502, 503, 504])
+
+class RetryLoop(object):
+    """Coordinate limited retries of code.
+
+    RetryLoop coordinates a loop that runs until it records a
+    successful result or tries too many times, whichever comes first.
+    Typical use looks like:
+
+        loop = RetryLoop(num_retries=2)
+        for tries_left in loop:
+            try:
+                result = do_something()
+            except TemporaryError as error:
+                log("error: {} ({} tries left)".format(error, tries_left))
+            else:
+                loop.save_result(result)
+        if loop.success():
+            return loop.last_result()
+    """
+    def __init__(self, num_retries, success_check=lambda r: True,
+                 backoff_start=0, backoff_growth=2, save_results=1,
+                 max_wait=60):
+        """Construct a new RetryLoop.
+
+        Arguments:
+        * num_retries: The maximum number of times to retry the loop if it
+          doesn't succeed.  This means the loop could run at most 1+N times.
+        * success_check: This is a function that will be called each
+          time the loop saves a result.  The function should return
+          True if the result indicates loop success, False if it
+          represents a permanent failure state, and None if the loop
+          should continue.  If no function is provided, the loop will
+          end as soon as it records any result.
+        * backoff_start: The number of seconds that must pass before the
+          loop's second iteration.  Default 0, which disables all waiting.
+        * backoff_growth: The wait time multiplier after each iteration.
+          Default 2 (i.e., double the wait time each time).
+        * save_results: Specify a number to save the last N results
+          that the loop recorded.  These records are available through
+          the results attribute, oldest first.  Default 1.
+        * max_wait: Maximum number of seconds to wait between retries.
+        """
+        self.tries_left = num_retries + 1
+        self.check_result = success_check
+        self.backoff_wait = backoff_start
+        self.backoff_growth = backoff_growth
+        self.max_wait = max_wait
+        self.next_start_time = 0
+        self.results = deque(maxlen=save_results)
+        self._running = None
+        self._success = None
+
+    def __iter__(self):
+        return self
+
+    def running(self):
+        return self._running and (self._success is None)
+
+    def __next__(self):
+        if self._running is None:
+            self._running = True
+        if (self.tries_left < 1) or not self.running():
+            self._running = False
+            raise StopIteration
+        else:
+            wait_time = max(0, self.next_start_time - time.time())
+            time.sleep(wait_time)
+            self.backoff_wait *= self.backoff_growth
+            if self.backoff_wait > self.max_wait:
+                self.backoff_wait = self.max_wait
+        self.next_start_time = time.time() + self.backoff_wait
+        self.tries_left -= 1
+        return self.tries_left
+
+    def save_result(self, result):
+        """Record a loop result.
+
+        Save the given result, and end the loop if it indicates
+        success or permanent failure.  See __init__'s documentation
+        about success_check to learn how to make that indication.
+        """
+        if not self.running():
+            raise arvados.errors.AssertionError(
+                "recorded a loop result after the loop finished")
+        self.results.append(result)
+        self._success = self.check_result(result)
+
+    def success(self):
+        """Return the loop's end state.
+
+        Returns True if the loop obtained a successful result, False if it
+        encountered permanent failure, or else None.
+        """
+        return self._success
+
+    def last_result(self):
+        """Return the most recent result the loop recorded."""
+        try:
+            return self.results[-1]
+        except IndexError:
+            raise arvados.errors.AssertionError(
+                "queried loop results before any were recorded")
+
+
+def check_http_response_success(status_code):
+    """Convert an HTTP status code to a loop control flag.
+
+    Pass this method a numeric HTTP status code.  It returns True if
+    the code indicates success, None if it indicates temporary
+    failure, and False otherwise.  You can use this as the
+    success_check for a RetryLoop.
+
+    Implementation details:
+    * Any 2xx result returns True.
+    * A select few status codes, or any malformed responses, return None.
+      422 Unprocessable Entity is in this category.  This may not meet the
+      letter of the HTTP specification, but the Arvados API server will
+      use it for various server-side problems like database connection
+      errors.
+    * Everything else returns False.  Note that this includes 1xx and
+      3xx status codes.  They don't indicate success, and you can't
+      retry those requests verbatim.
+    """
+    if status_code in _HTTP_SUCCESSES:
+        return True
+    elif status_code in _HTTP_CAN_RETRY:
+        return None
+    elif 100 <= status_code < 600:
+        return False
+    else:
+        return None  # Get well soon, server.
+
+def retry_method(orig_func):
+    """Provide a default value for a method's num_retries argument.
+
+    This is a decorator for instance and class methods that accept a
+    num_retries argument, with a None default.  When the method is called
+    without a value for num_retries, it will be set from the underlying
+    instance or class' num_retries attribute.
+    """
+    @functools.wraps(orig_func)
+    def num_retries_setter(self, *args, **kwargs):
+        if kwargs.get('num_retries') is None:
+            kwargs['num_retries'] = self.num_retries
+        return orig_func(self, *args, **kwargs)
+    return num_retries_setter
diff --git a/sdk/python/arvados/safeapi.py b/sdk/python/arvados/safeapi.py
new file mode 100644 (file)
index 0000000..c6e17ca
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+
+from builtins import object
+import copy
+import threading
+
+import arvados
+import arvados.keep as keep
+import arvados.config as config
+
+class ThreadSafeApiCache(object):
+    """Threadsafe wrapper for API objects.
+
+    This stores and returns a different api object per thread, because httplib2
+    which underlies apiclient is not threadsafe.
+
+    """
+
+    def __init__(self, apiconfig=None, keep_params={}, api_params={}):
+        if apiconfig is None:
+            apiconfig = config.settings()
+        self.apiconfig = copy.copy(apiconfig)
+        self.api_params = api_params
+        self.local = threading.local()
+
+        # Initialize an API object for this thread before creating
+        # KeepClient, this will report if ARVADOS_API_HOST or
+        # ARVADOS_API_TOKEN are missing.
+        self.localapi()
+
+        self.keep = keep.KeepClient(api_client=self, **keep_params)
+
+    def localapi(self):
+        if 'api' not in self.local.__dict__:
+            self.local.api = arvados.api_from_config('v1', apiconfig=self.apiconfig,
+                                                     **self.api_params)
+        return self.local.api
+
+    def __getattr__(self, name):
+        # Proxy nonexistent attributes to the thread-local API client.
+        if name == "api_token":
+            return self.apiconfig['ARVADOS_API_TOKEN']
+        return getattr(self.localapi(), name)
diff --git a/sdk/python/arvados/stream.py b/sdk/python/arvados/stream.py
new file mode 100644 (file)
index 0000000..edfb771
--- /dev/null
@@ -0,0 +1,107 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import absolute_import
+from future.utils import listvalues
+from builtins import object
+import collections
+import hashlib
+import os
+import re
+import threading
+import functools
+import copy
+
+from ._ranges import locators_and_ranges, Range
+from .arvfile import StreamFileReader
+from arvados.retry import retry_method
+from arvados.keep import *
+from . import config
+from . import errors
+from ._normalize_stream import normalize_stream
+
+class StreamReader(object):
+    def __init__(self, tokens, keep=None, debug=False, _empty=False,
+                 num_retries=0):
+        self._stream_name = None
+        self._data_locators = []
+        self._files = collections.OrderedDict()
+        self._keep = keep
+        self.num_retries = num_retries
+
+        streamoffset = 0
+
+        # parse stream
+        for tok in tokens:
+            if debug: print('tok', tok)
+            if self._stream_name is None:
+                self._stream_name = tok.replace('\\040', ' ')
+                continue
+
+            s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
+            if s:
+                blocksize = int(s.group(1))
+                self._data_locators.append(Range(tok, streamoffset, blocksize, 0))
+                streamoffset += blocksize
+                continue
+
+            s = re.search(r'^(\d+):(\d+):(\S+)', tok)
+            if s:
+                pos = int(s.group(1))
+                size = int(s.group(2))
+                name = s.group(3).replace('\\040', ' ')
+                if name not in self._files:
+                    self._files[name] = StreamFileReader(self, [Range(pos, 0, size, 0)], name)
+                else:
+                    filereader = self._files[name]
+                    filereader.segments.append(Range(pos, filereader.size(), size))
+                continue
+
+            raise errors.SyntaxError("Invalid manifest format")
+
+    def name(self):
+        return self._stream_name
+
+    def files(self):
+        return self._files
+
+    def all_files(self):
+        return listvalues(self._files)
+
+    def size(self):
+        n = self._data_locators[-1]
+        return n.range_start + n.range_size
+
+    def locators_and_ranges(self, range_start, range_size):
+        return locators_and_ranges(self._data_locators, range_start, range_size)
+
+    @retry_method
+    def _keepget(self, locator, num_retries=None):
+        return self._keep.get(locator, num_retries=num_retries)
+
+    @retry_method
+    def readfrom(self, start, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at 'start'"""
+        if size == 0:
+            return b''
+        if self._keep is None:
+            self._keep = KeepClient(num_retries=self.num_retries)
+        data = []
+        for lr in locators_and_ranges(self._data_locators, start, size):
+            data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
+        return b''.join(data)
+
+    def manifest_text(self, strip=False):
+        manifest_text = [self.name().replace(' ', '\\040')]
+        if strip:
+            for d in self._data_locators:
+                m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
+                manifest_text.append(m.group(0))
+        else:
+            manifest_text.extend([d.locator for d in self._data_locators])
+        manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
+                                        for seg in f.segments])
+                              for f in listvalues(self._files)])
+        return ' '.join(manifest_text) + '\n'
diff --git a/sdk/python/arvados/timer.py b/sdk/python/arvados/timer.py
new file mode 100644 (file)
index 0000000..97bc38a
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from builtins import object
+import time
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print('elapsed time: %f ms' % self.msecs)
diff --git a/sdk/python/arvados/util.py b/sdk/python/arvados/util.py
new file mode 100644 (file)
index 0000000..66da2d1
--- /dev/null
@@ -0,0 +1,418 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+from builtins import range
+
+import fcntl
+import hashlib
+import httplib2
+import os
+import random
+import re
+import subprocess
+import errno
+import sys
+
+import arvados
+from arvados.collection import CollectionReader
+
+HEX_RE = re.compile(r'^[0-9a-fA-F]+$')
+
+keep_locator_pattern = re.compile(r'[0-9a-f]{32}\+\d+(\+\S+)*')
+signed_locator_pattern = re.compile(r'[0-9a-f]{32}\+\d+(\+\S+)*\+A\S+(\+\S+)*')
+portable_data_hash_pattern = re.compile(r'[0-9a-f]{32}\+\d+')
+uuid_pattern = re.compile(r'[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}')
+collection_uuid_pattern = re.compile(r'[a-z0-9]{5}-4zz18-[a-z0-9]{15}')
+group_uuid_pattern = re.compile(r'[a-z0-9]{5}-j7d0g-[a-z0-9]{15}')
+user_uuid_pattern = re.compile(r'[a-z0-9]{5}-tpzed-[a-z0-9]{15}')
+link_uuid_pattern = re.compile(r'[a-z0-9]{5}-o0j2j-[a-z0-9]{15}')
+job_uuid_pattern = re.compile(r'[a-z0-9]{5}-8i9sb-[a-z0-9]{15}')
+container_uuid_pattern = re.compile(r'[a-z0-9]{5}-dz642-[a-z0-9]{15}')
+manifest_pattern = re.compile(r'((\S+)( +[a-f0-9]{32}(\+\d+)(\+\S+)*)+( +\d+:\d+:\S+)+$)+', flags=re.MULTILINE)
+
+def clear_tmpdir(path=None):
+    """
+    Ensure the given directory (or TASK_TMPDIR if none given)
+    exists and is empty.
+    """
+    if path is None:
+        path = arvados.current_task().tmpdir
+    if os.path.exists(path):
+        p = subprocess.Popen(['rm', '-rf', path])
+        stdout, stderr = p.communicate(None)
+        if p.returncode != 0:
+            raise Exception('rm -rf %s: %s' % (path, stderr))
+    os.mkdir(path)
+
+def run_command(execargs, **kwargs):
+    kwargs.setdefault('stdin', subprocess.PIPE)
+    kwargs.setdefault('stdout', subprocess.PIPE)
+    kwargs.setdefault('stderr', sys.stderr)
+    kwargs.setdefault('close_fds', True)
+    kwargs.setdefault('shell', False)
+    p = subprocess.Popen(execargs, **kwargs)
+    stdoutdata, stderrdata = p.communicate(None)
+    if p.returncode != 0:
+        raise arvados.errors.CommandFailedError(
+            "run_command %s exit %d:\n%s" %
+            (execargs, p.returncode, stderrdata))
+    return stdoutdata, stderrdata
+
+def git_checkout(url, version, path):
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    if not os.path.exists(path):
+        run_command(["git", "clone", url, path],
+                    cwd=os.path.dirname(path))
+    run_command(["git", "checkout", version],
+                cwd=path)
+    return path
+
+def tar_extractor(path, decompress_flag):
+    return subprocess.Popen(["tar",
+                             "-C", path,
+                             ("-x%sf" % decompress_flag),
+                             "-"],
+                            stdout=None,
+                            stdin=subprocess.PIPE, stderr=sys.stderr,
+                            shell=False, close_fds=True)
+
+def tarball_extract(tarball, path):
+    """Retrieve a tarball from Keep and extract it to a local
+    directory.  Return the absolute path where the tarball was
+    extracted. If the top level of the tarball contained just one
+    file or directory, return the absolute path of that single
+    item.
+
+    tarball -- collection locator
+    path -- where to extract the tarball: absolute, or relative to job tmp
+    """
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+    already_have_it = False
+    try:
+        if os.readlink(os.path.join(path, '.locator')) == tarball:
+            already_have_it = True
+    except OSError:
+        pass
+    if not already_have_it:
+
+        # emulate "rm -f" (i.e., if the file does not exist, we win)
+        try:
+            os.unlink(os.path.join(path, '.locator'))
+        except OSError:
+            if os.path.exists(os.path.join(path, '.locator')):
+                os.unlink(os.path.join(path, '.locator'))
+
+        for f in CollectionReader(tarball).all_files():
+            if re.search('\.(tbz|tar.bz2)$', f.name()):
+                p = tar_extractor(path, 'j')
+            elif re.search('\.(tgz|tar.gz)$', f.name()):
+                p = tar_extractor(path, 'z')
+            elif re.search('\.tar$', f.name()):
+                p = tar_extractor(path, '')
+            else:
+                raise arvados.errors.AssertionError(
+                    "tarball_extract cannot handle filename %s" % f.name())
+            while True:
+                buf = f.read(2**20)
+                if len(buf) == 0:
+                    break
+                p.stdin.write(buf)
+            p.stdin.close()
+            p.wait()
+            if p.returncode != 0:
+                lockfile.close()
+                raise arvados.errors.CommandFailedError(
+                    "tar exited %d" % p.returncode)
+        os.symlink(tarball, os.path.join(path, '.locator'))
+    tld_extracts = [f for f in os.listdir(path) if f != '.locator']
+    lockfile.close()
+    if len(tld_extracts) == 1:
+        return os.path.join(path, tld_extracts[0])
+    return path
+
+def zipball_extract(zipball, path):
+    """Retrieve a zip archive from Keep and extract it to a local
+    directory.  Return the absolute path where the archive was
+    extracted. If the top level of the archive contained just one
+    file or directory, return the absolute path of that single
+    item.
+
+    zipball -- collection locator
+    path -- where to extract the archive: absolute, or relative to job tmp
+    """
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+    already_have_it = False
+    try:
+        if os.readlink(os.path.join(path, '.locator')) == zipball:
+            already_have_it = True
+    except OSError:
+        pass
+    if not already_have_it:
+
+        # emulate "rm -f" (i.e., if the file does not exist, we win)
+        try:
+            os.unlink(os.path.join(path, '.locator'))
+        except OSError:
+            if os.path.exists(os.path.join(path, '.locator')):
+                os.unlink(os.path.join(path, '.locator'))
+
+        for f in CollectionReader(zipball).all_files():
+            if not re.search('\.zip$', f.name()):
+                raise arvados.errors.NotImplementedError(
+                    "zipball_extract cannot handle filename %s" % f.name())
+            zip_filename = os.path.join(path, os.path.basename(f.name()))
+            zip_file = open(zip_filename, 'wb')
+            while True:
+                buf = f.read(2**20)
+                if len(buf) == 0:
+                    break
+                zip_file.write(buf)
+            zip_file.close()
+
+            p = subprocess.Popen(["unzip",
+                                  "-q", "-o",
+                                  "-d", path,
+                                  zip_filename],
+                                 stdout=None,
+                                 stdin=None, stderr=sys.stderr,
+                                 shell=False, close_fds=True)
+            p.wait()
+            if p.returncode != 0:
+                lockfile.close()
+                raise arvados.errors.CommandFailedError(
+                    "unzip exited %d" % p.returncode)
+            os.unlink(zip_filename)
+        os.symlink(zipball, os.path.join(path, '.locator'))
+    tld_extracts = [f for f in os.listdir(path) if f != '.locator']
+    lockfile.close()
+    if len(tld_extracts) == 1:
+        return os.path.join(path, tld_extracts[0])
+    return path
+
+def collection_extract(collection, path, files=[], decompress=True):
+    """Retrieve a collection from Keep and extract it to a local
+    directory.  Return the absolute path where the collection was
+    extracted.
+
+    collection -- collection locator
+    path -- where to extract: absolute, or relative to job tmp
+    """
+    matches = re.search(r'^([0-9a-f]+)(\+[\w@]+)*$', collection)
+    if matches:
+        collection_hash = matches.group(1)
+    else:
+        collection_hash = hashlib.md5(collection).hexdigest()
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+    already_have_it = False
+    try:
+        if os.readlink(os.path.join(path, '.locator')) == collection_hash:
+            already_have_it = True
+    except OSError:
+        pass
+
+    # emulate "rm -f" (i.e., if the file does not exist, we win)
+    try:
+        os.unlink(os.path.join(path, '.locator'))
+    except OSError:
+        if os.path.exists(os.path.join(path, '.locator')):
+            os.unlink(os.path.join(path, '.locator'))
+
+    files_got = []
+    for s in CollectionReader(collection).all_streams():
+        stream_name = s.name()
+        for f in s.all_files():
+            if (files == [] or
+                ((f.name() not in files_got) and
+                 (f.name() in files or
+                  (decompress and f.decompressed_name() in files)))):
+                outname = f.decompressed_name() if decompress else f.name()
+                files_got += [outname]
+                if os.path.exists(os.path.join(path, stream_name, outname)):
+                    continue
+                mkdir_dash_p(os.path.dirname(os.path.join(path, stream_name, outname)))
+                outfile = open(os.path.join(path, stream_name, outname), 'wb')
+                for buf in (f.readall_decompressed() if decompress
+                            else f.readall()):
+                    outfile.write(buf)
+                outfile.close()
+    if len(files_got) < len(files):
+        raise arvados.errors.AssertionError(
+            "Wanted files %s but only got %s from %s" %
+            (files, files_got,
+             [z.name() for z in CollectionReader(collection).all_files()]))
+    os.symlink(collection_hash, os.path.join(path, '.locator'))
+
+    lockfile.close()
+    return path
+
+def mkdir_dash_p(path):
+    if not os.path.isdir(path):
+        try:
+            os.makedirs(path)
+        except OSError as e:
+            if e.errno == errno.EEXIST and os.path.isdir(path):
+                # It is not an error if someone else creates the
+                # directory between our exists() and makedirs() calls.
+                pass
+            else:
+                raise
+
+def stream_extract(stream, path, files=[], decompress=True):
+    """Retrieve a stream from Keep and extract it to a local
+    directory.  Return the absolute path where the stream was
+    extracted.
+
+    stream -- StreamReader object
+    path -- where to extract: absolute, or relative to job tmp
+    """
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+
+    files_got = []
+    for f in stream.all_files():
+        if (files == [] or
+            ((f.name() not in files_got) and
+             (f.name() in files or
+              (decompress and f.decompressed_name() in files)))):
+            outname = f.decompressed_name() if decompress else f.name()
+            files_got += [outname]
+            if os.path.exists(os.path.join(path, outname)):
+                os.unlink(os.path.join(path, outname))
+            mkdir_dash_p(os.path.dirname(os.path.join(path, outname)))
+            outfile = open(os.path.join(path, outname), 'wb')
+            for buf in (f.readall_decompressed() if decompress
+                        else f.readall()):
+                outfile.write(buf)
+            outfile.close()
+    if len(files_got) < len(files):
+        raise arvados.errors.AssertionError(
+            "Wanted files %s but only got %s from %s" %
+            (files, files_got, [z.name() for z in stream.all_files()]))
+    lockfile.close()
+    return path
+
+def listdir_recursive(dirname, base=None, max_depth=None):
+    """listdir_recursive(dirname, base, max_depth)
+
+    Return a list of file and directory names found under dirname.
+
+    If base is not None, prepend "{base}/" to each returned name.
+
+    If max_depth is None, descend into directories and return only the
+    names of files found in the directory tree.
+
+    If max_depth is a non-negative integer, stop descending into
+    directories at the given depth, and at that point return directory
+    names instead.
+
+    If max_depth==0 (and base is None) this is equivalent to
+    sorted(os.listdir(dirname)).
+    """
+    allfiles = []
+    for ent in sorted(os.listdir(dirname)):
+        ent_path = os.path.join(dirname, ent)
+        ent_base = os.path.join(base, ent) if base else ent
+        if os.path.isdir(ent_path) and max_depth != 0:
+            allfiles += listdir_recursive(
+                ent_path, base=ent_base,
+                max_depth=(max_depth-1 if max_depth else None))
+        else:
+            allfiles += [ent_base]
+    return allfiles
+
+def is_hex(s, *length_args):
+    """is_hex(s[, length[, max_length]]) -> boolean
+
+    Return True if s is a string of hexadecimal digits.
+    If one length argument is given, the string must contain exactly
+    that number of digits.
+    If two length arguments are given, the string must contain a number of
+    digits between those two lengths, inclusive.
+    Return False otherwise.
+    """
+    num_length_args = len(length_args)
+    if num_length_args > 2:
+        raise arvados.errors.ArgumentError(
+            "is_hex accepts up to 3 arguments ({} given)".format(1 + num_length_args))
+    elif num_length_args == 2:
+        good_len = (length_args[0] <= len(s) <= length_args[1])
+    elif num_length_args == 1:
+        good_len = (len(s) == length_args[0])
+    else:
+        good_len = True
+    return bool(good_len and HEX_RE.match(s))
+
+def list_all(fn, num_retries=0, **kwargs):
+    # Default limit to (effectively) api server's MAX_LIMIT
+    kwargs.setdefault('limit', sys.maxsize)
+    items = []
+    offset = 0
+    items_available = sys.maxsize
+    while len(items) < items_available:
+        c = fn(offset=offset, **kwargs).execute(num_retries=num_retries)
+        items += c['items']
+        items_available = c['items_available']
+        offset = c['offset'] + len(c['items'])
+    return items
+
+def ca_certs_path(fallback=httplib2.CA_CERTS):
+    """Return the path of the best available CA certs source.
+
+    This function searches for various distribution sources of CA
+    certificates, and returns the first it finds.  If it doesn't find any,
+    it returns the value of `fallback` (httplib2's CA certs by default).
+    """
+    for ca_certs_path in [
+        # Arvados specific:
+        '/etc/arvados/ca-certificates.crt',
+        # Debian:
+        '/etc/ssl/certs/ca-certificates.crt',
+        # Red Hat:
+        '/etc/pki/tls/certs/ca-bundle.crt',
+        ]:
+        if os.path.exists(ca_certs_path):
+            return ca_certs_path
+    return fallback
+
+def new_request_id():
+    rid = "req-"
+    # 2**104 > 36**20 > 2**103
+    n = random.getrandbits(104)
+    for _ in range(20):
+        c = n % 36
+        if c < 10:
+            rid += chr(c+ord('0'))
+        else:
+            rid += chr(c+ord('a')-10)
+        n = n // 36
+    return rid
diff --git a/sdk/python/arvados_version.py b/sdk/python/arvados_version.py
new file mode 100644 (file)
index 0000000..2e6484c
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', '.']).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except subprocess.CalledProcessError:
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/sdk/python/bin/arv-copy b/sdk/python/bin/arv-copy
new file mode 100755 (executable)
index 0000000..ad020d7
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.arv_copy import main
+main()
diff --git a/sdk/python/bin/arv-get b/sdk/python/bin/arv-get
new file mode 100755 (executable)
index 0000000..3216374
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+
+from arvados.commands.get import main
+
+sys.exit(main(sys.argv[1:]))
diff --git a/sdk/python/bin/arv-keepdocker b/sdk/python/bin/arv-keepdocker
new file mode 100755 (executable)
index 0000000..c90bb03
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.keepdocker import main
+main()
diff --git a/sdk/python/bin/arv-ls b/sdk/python/bin/arv-ls
new file mode 100755 (executable)
index 0000000..b612fda
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import sys
+
+from arvados.commands.ls import main
+
+sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/sdk/python/bin/arv-migrate-docker19 b/sdk/python/bin/arv-migrate-docker19
new file mode 100755 (executable)
index 0000000..6995c01
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.migrate19 import main
+main()
diff --git a/sdk/python/bin/arv-normalize b/sdk/python/bin/arv-normalize
new file mode 100755 (executable)
index 0000000..eab21f1
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import argparse
+import hashlib
+import os
+import re
+import string
+import sys
+
+import arvados
+from arvados._version import __version__
+
+parser = argparse.ArgumentParser(
+    description='Read manifest on standard input and put normalized manifest on standard output.')
+
+parser.add_argument('--extract', type=str,
+                    help="The file to extract from the input manifest")
+parser.add_argument('--strip', action='store_true',
+                    help="Strip authorization tokens")
+parser.add_argument('--version', action='version',
+                    version="%s %s" % (sys.argv[0], __version__),
+                    help='Print version and exit.')
+
+args = parser.parse_args()
+
+r = sys.stdin.read()
+
+cr = arvados.CollectionReader(r)
+
+if args.extract:
+    i = args.extract.rfind('/')
+    if i == -1:
+        stream = '.'
+        fn = args.extract
+    else:
+        stream = args.extract[:i]
+        fn = args.extract[(i+1):]
+    for s in cr.all_streams():
+        if s.name() == stream:
+            if fn in s.files():
+                sys.stdout.write(s.files()[fn].as_manifest())
+else:
+    sys.stdout.write(cr.manifest_text(strip=args.strip, normalize=True))
diff --git a/sdk/python/bin/arv-put b/sdk/python/bin/arv-put
new file mode 100755 (executable)
index 0000000..eaeecfb
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.put import main
+main()
diff --git a/sdk/python/bin/arv-run b/sdk/python/bin/arv-run
new file mode 100755 (executable)
index 0000000..ebba201
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.run import main
+main()
diff --git a/sdk/python/bin/arv-ws b/sdk/python/bin/arv-ws
new file mode 100755 (executable)
index 0000000..4e84918
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from arvados.commands.ws import main
+main()
diff --git a/sdk/python/fpm-info.sh b/sdk/python/fpm-info.sh
new file mode 100644 (file)
index 0000000..7a89cf0
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+case "$TARGET" in
+    debian* | ubuntu*)
+        fpm_depends+=(libcurl3-gnutls)
+        ;;
+esac
diff --git a/sdk/python/gittaggers.py b/sdk/python/gittaggers.py
new file mode 100644 (file)
index 0000000..f3278fc
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from setuptools.command.egg_info import egg_info
+import subprocess
+import time
+
+class EggInfoFromGit(egg_info):
+    """Tag the build with git commit timestamp.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def git_latest_tag(self):
+        gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+        gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+        return str(next(iter(gittags)).decode('utf-8'))
+
+    def git_timestamp_tag(self):
+        gitinfo = subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', '.']).strip()
+        return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
+
+    def tags(self):
+        if self.tag_build is None:
+            self.tag_build = self.git_latest_tag()+self.git_timestamp_tag()
+        return egg_info.tags(self)
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
new file mode 100644 (file)
index 0000000..ffca234
--- /dev/null
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import os
+import sys
+import re
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "arvados")
+
+short_tests_only = False
+if '--short-tests-only' in sys.argv:
+    short_tests_only = True
+    sys.argv.remove('--short-tests-only')
+
+setup(name='arvados-python-client',
+      version=version,
+      description='Arvados client library',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license='Apache 2.0',
+      packages=find_packages(),
+      scripts=[
+          'bin/arv-copy',
+          'bin/arv-get',
+          'bin/arv-keepdocker',
+          'bin/arv-ls',
+          'bin/arv-migrate-docker19',
+          'bin/arv-normalize',
+          'bin/arv-put',
+          'bin/arv-run',
+          'bin/arv-ws'
+      ],
+      data_files=[
+          ('share/doc/arvados-python-client', ['LICENSE-2.0.txt', 'README.rst']),
+      ],
+      install_requires=[
+          'ciso8601 >=2.0.0',
+          'future',
+          'google-api-python-client >=1.6.2, <1.7',
+          'httplib2 >=0.9.2',
+          'pycurl >=7.19.5.1',
+          'ruamel.yaml >=0.15.54, <=0.15.77',
+          'setuptools',
+          'ws4py >=0.4.2',
+      ],
+      extras_require={
+          ':os.name=="posix" and python_version<"3"': ['subprocess32 >= 3.5.1'],
+          ':python_version<"3"': ['pytz'],
+      },
+      classifiers=[
+          'Programming Language :: Python :: 2',
+          'Programming Language :: Python :: 3',
+      ],
+      test_suite='tests',
+      tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
+      zip_safe=False
+      )
diff --git a/sdk/python/tests/__init__.py b/sdk/python/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/python/tests/arvados_testutil.py b/sdk/python/tests/arvados_testutil.py
new file mode 100644 (file)
index 0000000..21b3f15
--- /dev/null
@@ -0,0 +1,276 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from builtins import range
+from builtins import object
+import arvados
+import contextlib
+import errno
+import hashlib
+import http.client
+import httplib2
+import io
+import mock
+import os
+import pycurl
+import queue
+import shutil
+import sys
+import tempfile
+import unittest
+
+if sys.version_info >= (3, 0):
+    from io import StringIO, BytesIO
+else:
+    from cStringIO import StringIO
+    BytesIO = StringIO
+
+# Use this hostname when you want to make sure the traffic will be
+# instantly refused.  100::/64 is a dedicated black hole.
+TEST_HOST = '100::'
+
+skip_sleep = mock.patch('time.sleep', lambda n: None)  # clown'll eat me
+
+def queue_with(items):
+    """Return a thread-safe iterator that yields the given items.
+
+    +items+ can be given as an array or an iterator. If an iterator is
+    given, it will be consumed to fill the queue before queue_with()
+    returns.
+    """
+    q = queue.Queue()
+    for val in items:
+        q.put(val)
+    return lambda *args, **kwargs: q.get(block=False)
+
+# fake_httplib2_response and mock_responses
+# mock calls to httplib2.Http.request()
+def fake_httplib2_response(code, **headers):
+    headers.update(status=str(code),
+                   reason=http.client.responses.get(code, "Unknown Response"))
+    return httplib2.Response(headers)
+
+def mock_responses(body, *codes, **headers):
+    if not isinstance(body, bytes) and hasattr(body, 'encode'):
+        body = body.encode()
+    return mock.patch('httplib2.Http.request', side_effect=queue_with((
+        (fake_httplib2_response(code, **headers), body) for code in codes)))
+
+def mock_api_responses(api_client, body, codes, headers={}):
+    if not isinstance(body, bytes) and hasattr(body, 'encode'):
+        body = body.encode()
+    return mock.patch.object(api_client._http, 'request', side_effect=queue_with((
+        (fake_httplib2_response(code, **headers), body) for code in codes)))
+
+def str_keep_locator(s):
+    return '{}+{}'.format(hashlib.md5(s if isinstance(s, bytes) else s.encode()).hexdigest(), len(s))
+
+@contextlib.contextmanager
+def redirected_streams(stdout=None, stderr=None):
+    if stdout == StringIO:
+        stdout = StringIO()
+    if stderr == StringIO:
+        stderr = StringIO()
+    orig_stdout, sys.stdout = sys.stdout, stdout or sys.stdout
+    orig_stderr, sys.stderr = sys.stderr, stderr or sys.stderr
+    try:
+        yield (stdout, stderr)
+    finally:
+        sys.stdout = orig_stdout
+        sys.stderr = orig_stderr
+
+
+class VersionChecker(object):
+    def assertVersionOutput(self, out, err):
+        if sys.version_info >= (3, 0):
+            self.assertEqual(err.getvalue(), '')
+            v = out.getvalue()
+        else:
+            # Python 2 writes version info on stderr.
+            self.assertEqual(out.getvalue(), '')
+            v = err.getvalue()
+        self.assertRegex(v, r"[0-9]+\.[0-9]+\.[0-9]+$\n")
+
+
+class FakeCurl(object):
+    @classmethod
+    def make(cls, code, body=b'', headers={}):
+        if not isinstance(body, bytes) and hasattr(body, 'encode'):
+            body = body.encode()
+        return mock.Mock(spec=cls, wraps=cls(code, body, headers))
+
+    def __init__(self, code=200, body=b'', headers={}):
+        self._opt = {}
+        self._got_url = None
+        self._writer = None
+        self._headerfunction = None
+        self._resp_code = code
+        self._resp_body = body
+        self._resp_headers = headers
+
+    def getopt(self, opt):
+        return self._opt.get(str(opt), None)
+
+    def setopt(self, opt, val):
+        self._opt[str(opt)] = val
+        if opt == pycurl.WRITEFUNCTION:
+            self._writer = val
+        elif opt == pycurl.HEADERFUNCTION:
+            self._headerfunction = val
+
+    def perform(self):
+        if not isinstance(self._resp_code, int):
+            raise self._resp_code
+        if self.getopt(pycurl.URL) is None:
+            raise ValueError
+        if self._writer is None:
+            raise ValueError
+        if self._headerfunction:
+            self._headerfunction("HTTP/1.1 {} Status".format(self._resp_code))
+            for k, v in self._resp_headers.items():
+                self._headerfunction(k + ': ' + str(v))
+        if type(self._resp_body) is not bool:
+            self._writer(self._resp_body)
+
+    def close(self):
+        pass
+
+    def reset(self):
+        """Prevent fake UAs from going back into the user agent pool."""
+        raise Exception
+
+    def getinfo(self, opt):
+        if opt == pycurl.RESPONSE_CODE:
+            return self._resp_code
+        raise Exception
+
+def mock_keep_responses(body, *codes, **headers):
+    """Patch pycurl to return fake responses and raise exceptions.
+
+    body can be a string to return as the response body; an exception
+    to raise when perform() is called; or an iterable that returns a
+    sequence of such values.
+    """
+    cm = mock.MagicMock()
+    if isinstance(body, tuple):
+        codes = list(codes)
+        codes.insert(0, body)
+        responses = [
+            FakeCurl.make(code=code, body=b, headers=headers)
+            for b, code in codes
+        ]
+    else:
+        responses = [
+            FakeCurl.make(code=code, body=body, headers=headers)
+            for code in codes
+        ]
+    cm.side_effect = queue_with(responses)
+    cm.responses = responses
+    return mock.patch('pycurl.Curl', cm)
+
+
+class MockStreamReader(object):
+    def __init__(self, name='.', *data):
+        self._name = name
+        self._data = b''.join([
+            b if isinstance(b, bytes) else b.encode()
+            for b in data])
+        self._data_locators = [str_keep_locator(d) for d in data]
+        self.num_retries = 0
+
+    def name(self):
+        return self._name
+
+    def readfrom(self, start, size, num_retries=None):
+        return self._data[start:start + size]
+
+class ApiClientMock(object):
+    def api_client_mock(self):
+        return mock.MagicMock(name='api_client_mock')
+
+    def mock_keep_services(self, api_mock=None, status=200, count=12,
+                           service_type='disk',
+                           service_host=None,
+                           service_port=None,
+                           service_ssl_flag=False,
+                           additional_services=[],
+                           read_only=False):
+        if api_mock is None:
+            api_mock = self.api_client_mock()
+        body = {
+            'items_available': count,
+            'items': [{
+                'uuid': 'zzzzz-bi6l4-{:015x}'.format(i),
+                'owner_uuid': 'zzzzz-tpzed-000000000000000',
+                'service_host': service_host or 'keep0x{:x}'.format(i),
+                'service_port': service_port or 65535-i,
+                'service_ssl_flag': service_ssl_flag,
+                'service_type': service_type,
+                'read_only': read_only,
+            } for i in range(0, count)] + additional_services
+        }
+        self._mock_api_call(api_mock.keep_services().accessible, status, body)
+        return api_mock
+
+    def _mock_api_call(self, mock_method, code, body):
+        mock_method = mock_method().execute
+        if code == 200:
+            mock_method.return_value = body
+        else:
+            mock_method.side_effect = arvados.errors.ApiError(
+                fake_httplib2_response(code), b"{}")
+
+
+class ArvadosBaseTestCase(unittest.TestCase):
+    # This class provides common utility functions for our tests.
+
+    def setUp(self):
+        self._tempdirs = []
+
+    def tearDown(self):
+        for workdir in self._tempdirs:
+            shutil.rmtree(workdir, ignore_errors=True)
+
+    def make_tmpdir(self):
+        self._tempdirs.append(tempfile.mkdtemp())
+        return self._tempdirs[-1]
+
+    def data_file(self, filename):
+        try:
+            basedir = os.path.dirname(__file__)
+        except NameError:
+            basedir = '.'
+        return open(os.path.join(basedir, 'data', filename))
+
+    def build_directory_tree(self, tree):
+        tree_root = self.make_tmpdir()
+        for leaf in tree:
+            path = os.path.join(tree_root, leaf)
+            try:
+                os.makedirs(os.path.dirname(path))
+            except OSError as error:
+                if error.errno != errno.EEXIST:
+                    raise
+            with open(path, 'w') as tmpfile:
+                tmpfile.write(leaf)
+        return tree_root
+
+    def make_test_file(self, text=b"test"):
+        testfile = tempfile.NamedTemporaryFile()
+        testfile.write(text)
+        testfile.flush()
+        return testfile
+
+if sys.version_info < (3, 0):
+    # There is no assert[Not]Regex that works in both Python 2 and 3,
+    # so we backport Python 3 style to Python 2.
+    def assertRegex(self, *args, **kwargs):
+        return self.assertRegexpMatches(*args, **kwargs)
+    def assertNotRegex(self, *args, **kwargs):
+        return self.assertNotRegexpMatches(*args, **kwargs)
+    unittest.TestCase.assertRegex = assertRegex
+    unittest.TestCase.assertNotRegex = assertNotRegex
diff --git a/sdk/python/tests/data/1000G_ref_manifest b/sdk/python/tests/data/1000G_ref_manifest
new file mode 100644 (file)
index 0000000..d0fe113
--- /dev/null
@@ -0,0 +1 @@
+. 231e69ef8840dcdb883b934a008f0eeb+67108864+K@qr1hi e14a59b578206d2d32dd858715645e0b+67108864+K@qr1hi 27a4b87e4cf1f85dc3fd917d2c388641+67108864+K@qr1hi 06b3ff80cf45bda52aca0711059a0bd6+67108864+K@qr1hi b036f1120ca429d0a148a5e8312663d9+67108864+K@qr1hi 83dc6b43bf27ce28da50967cd7dd23c3+67108864+K@qr1hi 3f6a4512b125bca64e1fa3d82f1e638d+67108864+K@qr1hi c0a8af66954841dae178e9417a82b710+67108864+K@qr1hi b3b4fb7120fae8b8f804849e36de9b55+67108864+K@qr1hi 2323ea3c93cc9664f35b6f90493ad01e+67108864+K@qr1hi e0b0f131d6f4669d0eaafbd4d72e0268+67108864+K@qr1hi 4274ff53c12dd5821c9ff6b12c4678f2+67108864+K@qr1hi 5d7af6348037a8161b1f932edcf32fae+67108864+K@qr1hi b7d88946691cc0d0f3c22dc3619b2ef3+67108864+K@qr1hi 799dd7f25556ad3a90604e094b538149+67108864+K@qr1hi 41b4d1c38afbcc48c0d463ad37adbdb7+67108864+K@qr1hi 6adcba8494cb0a6f6563e4c92a5c002a+67108864+K@qr1hi d0b7417a3872a5889cdc66daff4da326+67108864+K@qr1hi 7a5aeeb69132524c3e35cf454683fabe+67108864+K@qr1hi 9b09a732903086533e58e1acefc5df1f+67108864+K@qr1hi cdd07f6573f9e1239ef83ba8a02d6bf4+67108864+K@qr1hi 3d802b3e5b532210397b6992d9d9caff+67108864+K@qr1hi 5ce57221bdb69beb1376479a00dc839c+67108864+K@qr1hi 9173b38ec40c457fa3bd36ef89e562fc+67108864+K@qr1hi 05c712dee07f2115b657bb83d18f77dc+67108864+K@qr1hi 5764176e6aa0e8dd1195eb37c10ff921+67108864+K@qr1hi ce5d4f465c761cdfce6a7075e75e4c8d+67108864+K@qr1hi 2f594f1a5028e5954b14aba3cc7edc5d+67108864+K@qr1hi 9cb165ce899a80dc0aba79290d054f2e+67108864+K@qr1hi 9c5c8f5ad6dd0a23dbdd7bae47d9b77c+67108864+K@qr1hi b7afd688ca053e6cdd44f7c7add74c88+67108864+K@qr1hi 0b6c92b166993dbf4ebe65b130a18531+67108864+K@qr1hi 9c0e13bc1825573446cdb2e0d2a13057+67108864+K@qr1hi a64e372054ceb89494e7da42e1869a19+67108864+K@qr1hi 04b9acc199d01058c413c0b61474fa42+67108864+K@qr1hi 4d3f26dd05c50bbc5dac61ffc45f4a36+67108864+K@qr1hi f7f4400a463b1950a9422d3d798ae4e4+67108864+K@qr1hi 88814bec594bc5207ed70b52da08c964+67108864+K@qr1hi e645deb6e2cb633d175636327432b547+67108864+K@qr1hi 82249761efba1a759a94533a0f0225ee+67108864+K@qr1hi ff981edca6999dddb04e2b3528be376a+67108864+K@qr1hi d6e5f14509a26363492aab3775b070a7+67108864+K@qr1hi 20395032fda158a8fddd68281559b706+67108864+K@qr1hi f8dce2bf8d30bfa2306715f355e14702+67108864+K@qr1hi 937e9736f130276bd99c5a3cfd419f01+67108864+K@qr1hi c5ce8d646b2e1f8ceda6a88bdaa42354+67108864+K@qr1hi e82f42c6458660ba3e4aba0bf9f2fa83+67108864+K@qr1hi d2940d4c7155a94dfb31caf6951d9a59+67108864+K@qr1hi 165ffca3eae18d12aebfc395f4565547+67108864+K@qr1hi 8500ebaa55b2b377414c1d627e9a1982+67108864+K@qr1hi 594501b0e794179d7a04170d668358a8+67108864+K@qr1hi cd353db66ea7bba1ce4aa9fcd78fbf47+67108864+K@qr1hi 63b0e03e3a11c00f2b36de1384325756+67108864+K@qr1hi ac811ad87a8a5bc4e23130d2cc8ab588+67108864+K@qr1hi bb2c282f29b1ff680e1ecf82ba2902a9+67108864+K@qr1hi 5d3dbfa93769c1cba4406e616cc15d92+67108864+K@qr1hi 4f70517fc34def9e912ca98c127e8568+67108864+K@qr1hi 7da6aeb8a23b001ef8324ac28996f79d+67108864+K@qr1hi bcaba7f5eb10668c62c366527c5b7225+67108864+K@qr1hi 6fe50b949e8ace7e7bbe4a443b15cd89+67108864+K@qr1hi 7f104154192f4f15ef6760cd8ef836b4+67108864+K@qr1hi 9e528cc6e61ac04ab4806b1eca14473b+67108864+K@qr1hi b9af9ac127033b9639a32b4b4d21a033+67108864+K@qr1hi 724d08ad32a6de2dea78c357367f4de8+67108864+K@qr1hi 8afc63f0435da749a1a58544c290a792+67108864+K@qr1hi 7b6ea56cbf9bfce245fab717b02bc5c7+67108864+K@qr1hi c5903ce244515bb870dbad04acd7b482+67108864+K@qr1hi 491f4228b75e5d9af5c10eb4c7960d63+67108864+K@qr1hi 252bcf6db697723c6bcea11f601be4b0+67108864+K@qr1hi 81244819a14415d52b26a340d0b9a367+67108864+K@qr1hi 40336ab12de8a3c192dc4275c65e0b16+67108864+K@qr1hi 991ffe3960f9510b26352f5f925c23ba+67108864+K@qr1hi 0331ed4cfbc01ce07053691a31568c55+67108864+K@qr1hi b7d63f0ae6507c7f828c376ef2861e1c+67108864+K@qr1hi 548c0041481a1795913f62321944a81b+67108864+K@qr1hi b804bd2af77b7a3a32ac9512380d94e2+67108864+K@qr1hi a1702dad1c2354c9c85b02191800d5df+67108864+K@qr1hi 8e314c6cbda431e328e2de30ed0bacf0+67108864+K@qr1hi 8930eda1c1c868067fe4514c86cb0006+67108864+K@qr1hi 8ab6885d8e7a65fdff1a64fae78f83cb+67108864+K@qr1hi bb9ca8c63316097110e34d49f9b1a551+67108864+K@qr1hi 280e1ceb75a5db7a72d23b0705e2de94+67108864+K@qr1hi 47172b0b71e03d88fe560c68c3ca5b13+67108864+K@qr1hi 3f985d99b0929ab8869c2d6ee78ebe06+67108864+K@qr1hi f99ce1f7543bf5d6b92bbf1d74bf341b+67108864+K@qr1hi 2d1073b508cde7fdb47d3e0045f12ba1+67108864+K@qr1hi 66d40bfdb5d04f954745e47bd958c959+67108864+K@qr1hi 423753509d9fb12a3b99eb463a8e6441+67108864+K@qr1hi 5d7676bf15ff2c73c84da0d6a1b21ec9+67108864+K@qr1hi 17dd674093f3ec22acb79db4d3c958e8+67108864+K@qr1hi 792f960e48650f1ff4397400d61e4868+67108864+K@qr1hi e44a4987cc0df77331131a4b7ee5408e+67108864+K@qr1hi 1d5b68a793bde89b35afd0abfffa1e91+67108864+K@qr1hi 93474190338acbc799a96e095c1ce6e1+67108864+K@qr1hi 19ad3d61a0729bd779608cefcefd3f17+67108864+K@qr1hi 4effea732567c4def12cb91616fcadab+67108864+K@qr1hi 00313b354ef436c75e89b7d9abb83d4f+67108864+K@qr1hi df5e63c2a4f060d462436f512c54858a+67108864+K@qr1hi ce8b016b1c46ebc3cf3872b26a799ec1+67108864+K@qr1hi 87163cb8078652223da89aa3d2f902f3+67108864+K@qr1hi f85e64b456e64bbb679dfaa2c462a033+67108864+K@qr1hi de8050929d7d89303e809d8f27c422dc+67108864+K@qr1hi adf643ec5085bf757fcc59a47813288a+67108864+K@qr1hi 7d2b01ba0f9be07644927cca7c459e43+67108864+K@qr1hi a5dc9a1d620b49be2da2098bef54ab18+67108864+K@qr1hi d24da6679ba48e4cbe29af40b5f36968+67108864+K@qr1hi 7d9f73079ff9aeeda056e21e64cddadc+67108864+K@qr1hi 47e45f31917f41385dd79c9316e1cefc+67108864+K@qr1hi a8f9a8ca17809b7c2f64aba9a0c13b1f+67108864+K@qr1hi d8d534661a3af310b085323d496f6d44+67108864+K@qr1hi 899756d962d283156f681a461c0588ce+67108864+K@qr1hi 0f28d102f6e05dca547d5a019fe85b61+67108864+K@qr1hi ddd31b0c369f60219e0a65a071772018+67108864+K@qr1hi 7ff925b478d6a51806982f1a32ecdb5a+67108864+K@qr1hi b7afe2d94145776bc7fecb7a8385527d+67108864+K@qr1hi c77b5ee8577c6c89533114803b5efc9b+67108864+K@qr1hi 884661029ffb441e16ca87a248bcf394+67108864+K@qr1hi cd22c4e3fd707f54818545004da24a1c+67108864+K@qr1hi a90b6f132d31540d6a1c9a5e426997f7+67108864+K@qr1hi c9f699cdb3ab822dd0bd9c345926c86d+67108864+K@qr1hi 4b0abce1b72d97eebb652fb438c1b3be+67108864+K@qr1hi d4568db71967719cda57374a9963a6dc+67108864+K@qr1hi cfb15250fb5eb9ac69a18f64eaeb3e31+67108864+K@qr1hi 4d3ac3e4c5a278b91cda0d890db84e3a+67108864+K@qr1hi 40b137b57f8d83e4d508a1a4b95ac134+67108864+K@qr1hi c86b47517a22f1303bb519a7a244e57d+67108864+K@qr1hi 712681dd526dd49add64af4f168254d0+67108864+K@qr1hi b5abb03c2b4fbd01e8997f8c755bf347+67108864+K@qr1hi 50245663fc28f000c536a9b85ae05d52+67108864+K@qr1hi 89e2b1ece4b8a34700a48bd5625c1ddc+67108864+K@qr1hi a06799afa99163b7ba2471f4bc2da4a2+67108864+K@qr1hi 4cecb752f0015ca1b70a95c1b4497c7b+67108864+K@qr1hi e0b110c9e392c0126787a7b1635f4923+67108864+K@qr1hi f32788adf3c77d67b09abc43f2922012+67108864+K@qr1hi fb6e48fa0feb6ee3d6356beb765a98a4+67108864+K@qr1hi 52a464c2209f6e262dc72e485b5df16c+67108864+K@qr1hi 50ed10e5d3dfedbd18f1fbff62048487+67108864+K@qr1hi b4d649a55f537c628bc337729857a2cd+67108864+K@qr1hi d8d5fa84e7199021ed7f3daa9e1d3c6a+67108864+K@qr1hi 7ea980aec5a6d9bc7ae8353b34145daa+67108864+K@qr1hi f9b989d982bbc5b3587a2a06df2f7bdc+67108864+K@qr1hi 27c8ba6cc8208fa1c220657768e8780e+67108864+K@qr1hi 3ec1187853bc27c62ecc02c7a27f0587+67108864+K@qr1hi 9b34e5a81de59c416255d6e23de498fc+67108864+K@qr1hi 469f6398ee90d5dd398c21ad06b05fe3+67108864+K@qr1hi d0969eae2340c1e1d34d311cc4815fde+67108864+K@qr1hi 2e3e6b8823e39e08d5e697adc2120339+67108864+K@qr1hi 224fc36688f7adb2ec5cc088b09e8463+42902639+K@qr1hi 0:51549666:1000G_omni2.5.b37.vcf.gz 51549666:95:1000G_omni2.5.b37.vcf.gz.md5 51549761:475087:1000G_omni2.5.b37.vcf.idx.gz 52024848:99:1000G_omni2.5.b37.vcf.idx.gz.md5 52024947:45036197:1000G_phase1.indels.b37.vcf.gz 97061144:101:1000G_phase1.indels.b37.vcf.gz.md5 97061245:333605:1000G_phase1.indels.b37.vcf.idx.gz 97394850:105:1000G_phase1.indels.b37.vcf.idx.gz.md5 97394955:550102132:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz 647497087:124:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz.md5 647497211:3555843:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz 651053054:128:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz.md5 651053182:19868212:Mills_and_1000G_gold_standard.indels.b37.vcf.gz 670921394:118:Mills_and_1000G_gold_standard.indels.b37.vcf.gz.md5 670921512:547962:Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz 671469474:122:Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz.md5 671469596:29993649:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz 701463245:128:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz.md5 701463373:578447:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz 702041820:132:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz.md5 702041952:38839441:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz 740881393:122:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz.md5 740881515:605289:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz 741486804:126:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz.md5 741486930:6040047539:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam 6781534469:113749:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz 6781648218:124:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz.md5 6781648342:117:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.md5 6781648459:3928395:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz 6785576854:120:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz.md5 6785576974:66113:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz 6785643087:124:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz.md5 6785643211:282374229:dbsnp_137.b37.excluding_sites_after_129.vcf.gz 7068017440:117:dbsnp_137.b37.excluding_sites_after_129.vcf.gz.md5 7068017557:3824375:dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz 7071841932:121:dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz.md5 7071842053:1022107667:dbsnp_137.b37.vcf.gz 8093949720:91:dbsnp_137.b37.vcf.gz.md5 8093949811:3982568:dbsnp_137.b37.vcf.idx.gz 8097932379:95:dbsnp_137.b37.vcf.idx.gz.md5 8097932474:59819710:hapmap_3.3.b37.vcf.gz 8157752184:92:hapmap_3.3.b37.vcf.gz.md5 8157752276:1022297:hapmap_3.3.b37.vcf.idx.gz 8158774573:96:hapmap_3.3.b37.vcf.idx.gz.md5 8158774669:2597:human_g1k_v37.dict.gz 8158777266:92:human_g1k_v37.dict.gz.md5 8158777358:1044:human_g1k_v37.fasta.fai.gz 8158778402:97:human_g1k_v37.fasta.fai.gz.md5 8158778499:869925027:human_g1k_v37.fasta.gz 9028703526:93:human_g1k_v37.fasta.gz.md5 9028703619:85:human_g1k_v37.stats.gz 9028703704:93:human_g1k_v37.stats.gz.md5 9028703797:2689:human_g1k_v37_decoy.dict.gz 9028706486:98:human_g1k_v37_decoy.dict.gz.md5 9028706584:1095:human_g1k_v37_decoy.fasta.fai.gz 9028707679:103:human_g1k_v37_decoy.fasta.fai.gz.md5 9028707782:879197576:human_g1k_v37_decoy.fasta.gz 9907905358:99:human_g1k_v37_decoy.fasta.gz.md5 9907905457:91:human_g1k_v37_decoy.stats.gz 9907905548:99:human_g1k_v37_decoy.stats.gz.md5
diff --git a/sdk/python/tests/data/jlake_manifest b/sdk/python/tests/data/jlake_manifest
new file mode 100644 (file)
index 0000000..5da24dd
--- /dev/null
@@ -0,0 +1,20 @@
+./PG0002577-DNA-jlake-germline ec9ae2be7620af8b6efd96809e5e75e0+1917833 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:1904161:PG0002577-DNA.pdf 1914405:76:PersonalGenome_Mac.command 1914481:704:PersonalGenome_Windows.bat 1915185:2648:md5sum.txt
+./PG0002577-DNA-jlake-germline/Assembly 42a43977e2303a4337129331577d122a+10244 0:6148:.DS_Store 6148:4096:._.DS_Store
+./PG0002577-DNA-jlake-germline/Assembly/conf 95747f01cc916acc1f385ea2d018841a+165976 0:61440:dirs.tar 61440:11245:project.conf 72685:61440:project.dirs.tar 134125:31851:run.conf.xml
+./PG0002577-DNA-jlake-germline/Assembly/genome 6762e2e508fc8eb451f7ec0ea0faee33+10244 0:6148:.DS_Store 6148:4096:._.DS_Store
+./PG0002577-DNA-jlake-germline/Assembly/genome/bam 5a854b86da65e1ff3572f17e088633fe+67108864 9c6c77dd7511dada2e631a3b038b5fa1+67108864 54c6025d7dd4cd8ecccfe573499a2176+67108864 c9cfd86fe5df4b83c6ad8dddd901becc+67108864 c9d7be11850c860e2d5cb0c9e1a44915+67108864 3fb42e18373c8710aed8620fa81db3c1+67108864 f419d215df8195df08394865f719fbca+67108864 d263423f327da62e24867f22fc490706+67108864 40ef57add0dfbee66836365ae0fd7d06+67108864 0fea4b759d870ab6decbb632176fbeb5+67108864 c109c9e484cdafee14c53198c0d6b610+67108864 103f902681b7d40dcde1dce0b79b17c8+67108864 b6b93a2b846620c39e38302f1f7ffe10+67108864 e38be01100ab8d220f06b800b7cd62dd+67108864 f5812ce719898a4d2cb06eebcafdacc0+67108864 6f5fc3c25e8abeb9c079ef801d0f9450+67108864 d9ad4e7e17a79487817ae2ad8e534498+67108864 e5a15e06712a2090e74f78e678b490f6+67108864 69d57fee8eb8dd67906c9a7e9e3e5cac+67108864 68395295a64f0d0b75117a2400a4c1ee+67108864 497dbca0f9dd89eeb2e8dd8aed9796fa+67108864 d14e96347585f714ed527fb0e2e78be5+67108864 03bcad873029c5778e59dd4c4109f5d7+67108864 0bb736900002c0c2b2ab31a6f8009fdb+67108864 22509a92aca7813b2363661ab233772f+67108864 c23528b87c5581e6d6ad538ff007e3a8+67108864 784f055bea23e35078c9437eaa233e0e+67108864 76a724595ce0b01bb51c6a948fa2f5a4+67108864 c52e5205c23728d439e90378267ee382+67108864 e4f2400071c39af9661de0662018e984+67108864 66b8a86b349902d359dc6ac9788e0246+67108864 98353a4694dbc5eae4c3960d2cd7baf9+67108864 130d8c29af7d0a5468b854bb1758dd4d+67108864 9741fb84b07c6282bd6b6a002ecdc15e+67108864 7eaa6674cf09efc2adfd14a7b169e357+67108864 4071ca2915995af71dd9da03e8c6c4c1+67108864 888673ec3ca2787511dca770f090c138+67108864 21d21d390663b32c3738bebdd97c32e8+67108864 f575e7b15bc194bf98e346bb4b2788a4+67108864 b5d983d92f990f90622be2232ced743b+67108864 0e6bd01b5b9e49b1969b5d3817499476+67108864 481144b5d5ab8a1ab7235fe64bdc872b+67108864 6c8f5fe6d45e5719fc6270748015db12+67108864 68966f5d47a96aa8731c5950fd1f467d+67108864 4d48317b33738dfa96174bd5a1dcc841+67108864 24f75fafbe546fe366f7a6d465240f8d+67108864 8711ad611384bd7f0e89c400c90cfb7e+67108864 2ae54914c5bf92a8cbdad755e8135b1b+67108864 e11116820a72cfed92d64bced7172187+67108864 4cf0a16d830cb8c91837b3f24093ca2a+67108864 196a74cb26cd1bd8e540d22e46fa4302+67108864 92982486ba3b6d370cc31ccfddfd8110+67108864 6c5e160bb75c99ed338bdbbe9c968955+67108864 d0a48c19f2a00ad5b3709326c428b3d9+67108864 877890001e22c2d88375892890e634fa+67108864 f3cad932b80e8d20c8f67f4239a5c279+67108864 15d980c196e5d7690de660a3c3400154+67108864 964bb3d46fe32c7b3ff9077c96a32a0d+67108864 60107013728b9a04d9822588613cc1bb+67108864 e265d4a367e48f740f6fc1c881fcb053+67108864 4808ac7dbb6393908ebf47f453e659c2+67108864 1442fbee68d6d39576d7de798d293859+67108864 c9a16c84c579e83b7423358b7141d66f+67108864 36874b0c65f95e43a0656adad0734d8f+67108864 b174dadbcc463d64b80980a4cdd3fe41+67108864 518c5f4c27b2d5d2b42863e2132e0320+67108864 ab658f08b58b199860913a4b55d68d1d+67108864 92d9f01091907ec183981cedf2f26b37+67108864 539a6fe604549facefb68c4cfc7b204f+67108864 31e641b640af914fe6a2e4bcc31c1948+67108864 ba1e9349e65c068170b62f655a300d3b+67108864 131ff6d51b65974c9d366e81cd608d57+67108864 4ff4d286591293acdd3610373bce6f11+67108864 07cf4b86c65559b68689849c85d8cd64+67108864 051cf1cbf95a08df5c18fc2e98f705a5+67108864 2737c7e7ea93ff9a2c5b2cfd76c06022+67108864 60c00403a73e9b0c40d4b137870abe53+67108864 64068170004d472feb843e4448d8d121+67108864 e955ab9f87979be9d09753e843d36b10+67108864 180692a3f95576c265370680a2a2e052+67108864 cd7a1d16bdef3fa9474ca0a5b21264a2+67108864 4ac9f4d038b4be59a9a2808fef0fea43+67108864 c4947ab9755e190b5ebc9ee9bdc609da+67108864 96ff83a8ceb80097fa25b893b88104f0+67108864 e893caeea6fc8b2f39695b48bbc16362+67108864 254628d570c39863ee1c7fd1a0750196+67108864 2e24dd5c0c0696e79e674e85a3f12934+67108864 1990250dc54ed2e708dca7b4f9a41c18+67108864 8e6e40e53a663018974749ceebb1b98a+67108864 d84c21e61a9cb82dc9c2a7674a51657f+67108864 95f61628504580fa779eeeb96c0577cd+67108864 19f4baba83495e69c94ad52d22ccc2e7+67108864 0b9a91c910d6bee96f8c1caf129a10c9+67108864 0fb342be2b7e50536485fad5fd98280d+67108864 482ccfbf6dc6d3cec1c3a58982f40db4+67108864 b51612ee241fc06eeac1d823abd70858+67108864 14ff4c77a9a151b0062338c1d5b30242+67108864 9c37ab2ca3179d71544b2793cab38918+67108864 3947527126983bccbcc795c2a922dbb3+67108864 df6e54c39b918a1a26a3f557fc61c69c+67108864 94ae725934b1aeee78db89dec7e629b9+67108864 b08756a57d798709bcbb60d448cfe703+67108864 50de90bf9d8cf7fdc8ff54a5a6ff6f5b+67108864 24c451aa9481b72c0f989ed264198730+67108864 b1f79cc8b5729ae7523bd5d0d7c840b0+67108864 9b5f5392fb4d814ae13d5106bec28696+67108864 55d49c99f24cd778295f3637daddbd4f+67108864 e18e2c5dac882f932e1e4e4cf557b572+67108864 453f3aa0d1a9832e2dd99c44b30cd3ed+67108864 57f19b58f8da911138af4351c6aea5a9+67108864 5e3843ce5bfaf6170a16f26b4e4b1aac+67108864 597919fdf8ff48500c264d0c9f7439f5+67108864 b51bb4c6be8618ab377787f180560048+67108864 ff6a60a658e75febf565ca3cecdd90fb+67108864 9c993d598ac6a696591249ca115169b0+67108864 283ecc93e81dbae4a4cc8523e0260fbf+67108864 ad8ad32aec1b8df99ae91a3580908f96+67108864 d3c0d337118c727705fc5de57e05c235+67108864 7fc7f73d3ccd5c6205c80f845aae7ed3+67108864 4e555e6ce17d792bb5982232fcf70405+67108864 320bd68a5462576ad726c129f6941844+67108864 7ffbde2e853e1e9b0fbfdae63be37dab+67108864 ae25a4cc941178b62929aae15831ef8b+67108864 ba24657691267443b2e7bf6b6964e051+67108864 1e9e12b68f26db8c8f7c7cc788945df0+67108864 edf4acc2527cddf0e20d009f4486d7d2+67108864 baefb0589e1516ed42fced78370f8710+67108864 c037629c703f4a2c1df194202cecff0a+67108864 944ceef59fc6dc1e2dac2d8bc71ef6fb+67108864 d955e2eee34d9873d36065870fe6904b+67108864 eb4b9530be56e0bb33b9827279990dd6+67108864 559d40ebfeb35b1d88e797a3e9f8fdd7+67108864 623e4c4f5a5ae8c466a7b52bcf6e830c+67108864 5790327fc52b0d57357ec948331ca387+67108864 232a548bccb18ba351c681ddb0f84d4a+67108864 e4797b5eea62b9980478d01e7092a05c+67108864 6b888368142e02359ae8241daa0ba99c+67108864 57969f6dd6f8f56064c57ed4929d6c09+67108864 10f07bd7b6c3b8bd55e990cc6b65de37+67108864 ce126a25374dfe6e4cd91d27239e3606+67108864 536c1ff25f7334b3eee12fd61cd4c88c+67108864 07f03c33b0a0ca3d76d31e65aec87a05+67108864 156cc2e702d5706c6dcbfc65937e0ced+67108864 ed19220354dd45366dda185aa6fcdda9+67108864 f8f3c1650d6d9fd1fb81defcef4f5e37+67108864 157836f6e84cb522a520b8e00cbdddb1+67108864 ea34080f4a2c67c8eac2d204adae0214+67108864 ceff9152067f3579158d38f67ccfe3ee+67108864 d42fd7b0f9afa30622f19cdad886e510+67108864 ca252dacf27733bcd1bbb0fee73dbcf1+67108864 74bd8949c0a3ad016dd2fe0dff389217+67108864 c198a863ca7030e39d8876e0f84ac8fe+67108864 6c63203260faceabf16fed782bee1e6a+67108864 d5d3f52e5615f06c301d8ffd43bdcaf1+67108864 921c874804339001b4122f0302cef8cc+67108864 e0c59c1f377fe5cd9d6ba345edb4f519+67108864 fced978955c7d8526a6dbc344eeff8ed+67108864 4da3bdacd3c86355934b49b053f09650+67108864 973880f2f3c56727b95dedcaa4d0a60e+67108864 caf2b7a8550df7b241e371efa2fe692d+67108864 d7cddaecf10aa58a54daa2b7b2a02e11+67108864 2bc6e177da10b99fcd6d091fd23fb155+67108864 de817551ab1a9cd5dd51c6e81e0e44bf+67108864 f137d5fffb9fd4cfcf7aa4926f3e99c0+67108864 9d17da7cf8b872e2a826fff2c5374142+67108864 5830325f09fd13cd12bce3f691aad968+67108864 8215a5983c95e23a63c687b642f13083+67108864 11f76f54f3d0552c10299367a10cdb22+67108864 6efb7fad52edf9859561e9499cf510e0+67108864 a088c12c4f3927e74ce4a4c5f8760621+67108864 e0f969e633f92e756b1094e28839bd5e+67108864 ab0b88f96c9ee1916797ef424b683167+67108864 dbb6ddb82e96ffecd55bf7b7f7a512ed+67108864 9a055029ec6bd836c85dc954fdf7bbc9+67108864 c5f10798cc176c312c605fff373b3a17+67108864 05ed65cc2bb638c886bcf4ffce6dfeac+67108864 c4a3be0b33972b566228dbd5f47d7e5b+67108864 57ef58cc15ea5d8b8ebbe8420cd06b55+67108864 274c1514d474e64f3de02c179a82956f+67108864 810a77d1d7ba6a20e82c8910f5e70763+67108864 42a4f5e30a2f86bf11fe622f7a618ae6+67108864 9ff7eeca6c1879bd22dd86ce89349276+67108864 913bb4a677d4b4c7c131fd3b1cbebf06+67108864 9ba24586a78d824ace0337f321d2c100+67108864 2f892d650527a819ed7d499a7e09a3f3+67108864 b25bda0cfe2c4344d1703fcf3a6d4b8a+67108864 2784662ce216490c73fe67fbd52130fa+67108864 e79a6367f486e2fae9e54d0020b0db17+67108864 2f24bfab56da9e223c37b44e1f1ce9b2+67108864 b50f659dd903cb6f9c707f699d21af72+67108864 b94162503a41517f5a419ee397340005+67108864 6231562ab94c3be051ea9087fb8401c9+67108864 ef51cf3cd05f7810b6f322adc187ba6d+67108864 5d621ca36e6f35cb4f057b117d372e50+67108864 8b8239bd23a8adc2cee1f8a156d85e85+67108864 66676e37602149cc7d37ccebe484fc03+67108864 f91cb726e6ac09145c242752c4404e1f+67108864 ce73c7682c4fe26b98ee72a730010746+67108864 8f1642675b8f47e6cef4ce6a65e7dddb+67108864 a0c772eee3f139b1831cca60ce49b347+67108864 547699429dc8c3cfcb1e77e7618f2f17+67108864 02df0efc955879c3f2501fa8b0bcc801+67108864 a701fcf88e400f11220ddc2cbd83a73e+67108864 4c1721a042b1a9437c3eef71a58754dd+67108864 0a5fe95394f9f497e66217c117bfeb42+67108864 f23e04ce1d188c78e33f258b45c5ac8d+67108864 f3c7badf1ec40e33b375e62971320278+67108864 d2242648880ab86690ccea9f98275e43+67108864 8b397dfb9224fdfed3859ce86d74ad0e+67108864 09153d492860fc9d412c01785340798a+67108864 c3350ac90cf4d559a7295a730f18d20a+67108864 baebf81afe44cb6b42f5f6e6290ec0fe+67108864 96cd500b282c3113007c697612444603+67108864 a73c14020805ae3c1925ce55eb2e5971+67108864 433ae5411010c6213478f28ad6cab58a+67108864 3e042ee7b88f2d28bfe5b98b44f3be6a+67108864 fd9dde59a76b04cef85e85d61d02e6ad+67108864 d16f4bbcf2a0e8fdf932b54e35695844+67108864 efe1099652662006f509b99df819f1b7+67108864 613fef2dfc96ad3305006cdb8dea0728+67108864 b3eedbfe69eb52fb6bf7d21ea1c3bd5e+67108864 2f1784343b9d2cc4682ee293d051500d+67108864 62ac15c401cabc308f91639667229e0d+67108864 2afc3e34c284f72faded649c0ec3b72c+67108864 1399e4bff224e69402319a61c6db3dd2+67108864 533d737e352255f8f1b65622d92fe9c7+67108864 203eeb4e4410b7a59230eeac6d8e6e71+67108864 c8bacf1552def30bf0a3525bb90d4ee3+67108864 de0d669dd7c9cc7c394eade1f4a11100+67108864 b995714c0553fb41ae5ab417579381f6+67108864 5f6be59b124ec208b9e4d461fe0e5321+67108864 fd8f0a64895e2d28f52b94e980d4a6e3+67108864 776afe2ccc11fc9b9b9bcd1637ef346d+67108864 95b8cdfe70f64a49bc570565107ecf5f+67108864 26479dc9fe565e63bfff3f5850d77998+67108864 8da85395f1b08cd52869d77a7364249f+67108864 66f76a05e19e0580251ede71e2958e92+67108864 5ff92a30a1aa13ab0a42320b77cfea45+67108864 6f3f8613d13412c9530801a9a3f35c1b+67108864 50dc0b3a14dbeffd71b69058275b2d37+67108864 cf19857fbd6aa6f6c3c258431674a106+67108864 e0b084b9493be695c18f5d3cb42fbaf6+67108864 3aef149ddbecaa8ade6fa0afbc9de9e9+67108864 3854fb0bc82e15b33cb2966f5be46239+67108864 06b43ce1c20a4bd948750f7214e40f4e+67108864 2ee5b68ddb703c975e18319a67621c7d+67108864 2306d047cf9aa09caa1b2a9a0c541896+67108864 af2c70d38e589ebd8c7649a90cb065fa+67108864 71bc4c0a4c2d00cc34988fb0ecd1285c+67108864 0c2c0cbfe580ec70c9cdf1b8ef31d4f9+67108864 cb15da81b8aab0167e184f2ee2b78b2e+67108864 003e15c44f959ec8e46327b05e79d599+67108864 6ff440ea12517ebcc7636b07f840b7ab+67108864 7a40e62a496ee3bb3b5fdadf6d2af3ae+67108864 db1e814bd3ae2f39083a62aff4381f90+67108864 7c68aa83cb27f585291a835b4ac5ced3+67108864 c7a7efb80d3347d4f7a13618ba5aa3f0+67108864 1411ce7884539214a1e92568231f235b+67108864 02a2b2122889b53f45667af2a120d01f+67108864 ad22b2a76908ccde5aa8adc29df538c0+67108864 280e52e831b65105f06a38a4f67b6402+67108864 a5f9439149211f3a7d25fa11de178fd1+67108864 0bb88f24287ad72b8d58c536c2e34663+67108864 417d3b1862cb8df75a7e8a210f15e3a0+67108864 a441aa0f23faba7c3cf66def56e66004+67108864 f55322a307d173bb797924fc5f4afbc3+67108864 5a9a3ad6dd8ae415a0e696dcd40a2b7f+67108864 0bab46dbc9501d08a4625629f4d5b860+67108864 67893ccbe30016c1eabe3d2c8a80692f+67108864 cc8357e24fe32c5cb15b5bc8057b9f4a+67108864 9e9673069303440d7f9fbfbbd4092044+67108864 f0c3f8cddd5792cf5eebe8b79dcc1bbe+67108864 21ce8204bbdb6f016f11fe9ae5f6020a+67108864 0f8a1598ee8c418b9ce7c34c175bf9c1+67108864 d7c5a77e5397635ce1430ed537183d5f+67108864 56b7099cd423ebba5cbbf10d5e0d2e7c+67108864 1c9804af0c110daa81ce966a7fbf41ea+67108864 163197488a7121c0e9a33bc0fde5ec51+67108864 374b8b0e3949983e1ac9fba65a3e4024+67108864 ced30f4e0c46e5659f364248955c1667+67108864 5988fa40843d6a1ca07b957e77c8ec12+67108864 fe2111482b8ba2c58e2accfb309b3b72+67108864 87a7d5a127d9273d55f5dd0ff4760a13+67108864 a64150d31725405d33e689ab39377efe+67108864 c8abd310e6150bd4afadfe16e7d256ac+67108864 027fe6eb0b09a4cb755d1d62b0a1423f+67108864 a9b655be082bfc1c61e21e0510153776+67108864 aa2a094dca5481091ce7ba08852b0903+67108864 eab0206180b5c06bdc9202aa6d7e0eaf+67108864 d84e88e3bf76cb3a4a44bf98eee1d262+67108864 9d39e5294e994cda2d993e06770d9b7f+67108864 427b9a850025c42f01274fd92bc4ad51+67108864 6155de11d88a0a5e81c04d75e240bf37+67108864 b700a6e431b551630b28368317194aa7+67108864 2d5a90b6e65546928056048116c45dd9+67108864 c9ed6f6d6dbf42ffb3b377973ca7dba7+67108864 08302c1433b0fb80ca8dfff9e6f74681+67108864 92db4d53c6888a14602556bbb79f835c+67108864 c21fba4899004a955ef88e4ab3d38941+67108864 de1b9dbb2f511d2ece9d9837323101f1+67108864 54180d64a3cb966eca0356ffaea49f26+67108864 381354a3022c66550fb4e44d8034a141+67108864 597876a4a4464de636f8a5fbf3e543f5+67108864 c29963efcbdeee5b08f5f1575a1d6092+67108864 fce9d2b920f580ea8779f72fbea8d964+67108864 2effefe2d35c5ba29c9e2e6acc03ed4c+67108864 e04da28dc87c175eb01704e4b51fb4c7+67108864 f4adcd9d6ecdbdb8c2b91ac2ad591d59+67108864 53f8e4d2781414ad30e4479605b589c3+67108864 301c286433798a684f2d721c7d1f5c03+67108864 4f5e4d9c261ec572be8c117135ccd8fb+67108864 d9feb7b8981f846190b4e4f8c3075164+67108864 ff9343ec031079a66cd0a5560e4ae9ac+67108864 c4abd8ab5a655d3bdded0516ee442db1+67108864 db082ef8f2e3ee42677b34ccca86b5e3+67108864 6fc83a81ea15c177c41b2a56a736a098+67108864 c09ed63c14f57b8218470f6b0ba1435b+67108864 3c87cc66c90c9f0d995408df69305209+67108864 9c9e9b96e4d2970f55ff3f5930495d44+67108864 7572a36b461e6c3fee3512d8582aeb61+67108864 948d1d1e7c8ddf237fa910623c963568+67108864 84fea0e98e7718e039bc9d6c1632586c+67108864 9eb6eee60f28e1d075f71d5cc0447bae+67108864 3f36fc2738992c1135a4e80a28b28b75+67108864 db81cde123e9bcd6c291209d385129c4+67108864 e14d1a2b49e62d23b81fc9383322dd19+67108864 c79fa6a3288e27bcedee730a1f0d54f3+67108864 80679d686e1a55b5b22a9d250023c2f0+67108864 114434b3a95875165370385cd5adfcb3+67108864 83c5ee9bf0da6e2419913d4954d28e35+67108864 8c26ea89fb32a840068c2b049380ac15+67108864 afd1f950225dc30b68260e0a85297e72+67108864 35ca9657c4d2ad957b3d5f9f56a3f2c1+67108864 6bbffec1991d0308e6fefaa7a260097f+67108864 0e9f68eab1ff4f688863e192a374846d+67108864 eb672833bd531bfe24f1246628a3dd8e+67108864 7a3eb4e715458a189579184ebdec2215+67108864 1f34c4808c3503e1993ccd80ce850ed1+67108864 041b3c92294ed8be56bad1009ebde213+67108864 a88c8e6b793f189af93f8c2bae915bc8+67108864 af77c7d4cadc116f4c78f46f9f648edc+67108864 4890bb9d239182d6b11b12f4322b3097+67108864 4707a2b5d2951423b9cb5cdf32141783+67108864 35ba9cc88e7ad5b5c98e43a921f7ff56+67108864 2eb75b8978046345cde58d2176af7170+67108864 add1039d192c6e42526cf2b6afa9bd42+67108864 9a477df1480fd7592fd2ce32c39f7406+67108864 6fcace189ae0cf89e39c98ae8738a85d+67108864 5bf37955e0e45794d0390850401964a0+67108864 446e566fd032785f676ebdd3ba1dfafa+67108864 fdf9c3e975893f7c3df285eaaef9d69c+67108864 9befa5fbe2f43cbcd728b81508369f97+67108864 d862dfb30026a97111e96504a3058e04+67108864 78f0cd0b88f301624b5f104fa30858f5+67108864 4ab8f486d99a3b4fe66c9c348d6fd907+67108864 580601b7db8d4a0c9adbb2e4fbfdd486+67108864 9cff8f97cc34a751dd58feb19e416e37+67108864 63ddfe988e3fc9c4b88365d69996f4e1+67108864 b56ecf4f9b696c9453ee8dc948fe17ad+67108864 81c2eebdfa8ea6950cf57b863175e2a9+67108864 da2e52c5fa244bff3d5c86f7b702bb25+67108864 462e9f5d6031574b2a22ad60f8cab54d+67108864 c445d18f7531e24f0a160a7630b1b8ff+67108864 1974531544580a294bc8ed247b5786a4+67108864 21e93b14a1f1e35264a62d1ad97a3501+67108864 5c634b78ab66b25b93a6c019dd10307e+67108864 40c7314f60f524b551a121a98a3b89e5+67108864 1677007ea98f9147329aa04fd618fe2b+67108864 a79c00d0291a392abe73c0c162649ac3+67108864 15bf1499acf1c24c8b66ddb5c2f7588c+67108864 2eaea836bff8cde4519db6cf2e69b05e+67108864 1e5ec5554dce9eede58caf8933ce3596+67108864 ac1078460e08af47d6c5d2ae1dd3b746+67108864 a6dea03259087ab7207a5f5a1ef679d0+67108864 78ed9f9db2e9643daf166658b952c127+67108864 c5d12b1cff4bca0c8d15e1bd73dae1d0+67108864 c883939a7cf896bcd2f85d850e71b07d+67108864 fc16e276e1588daaee99de8018df0571+67108864 7a21a92e38106e93ee8ae7a7381329a7+67108864 8eb5b51ffdc62129fe0eb28e9e38b489+67108864 d9c70a717133085d230e29c83b495676+67108864 2eedcd7fe3d79c6a9f2b4aaeaad04164+67108864 da5e919680b6ea8d41b8758e7086133e+67108864 d1cc0d339784a72a4f0d263464e071d8+67108864 124967dc391185cd924e86e2d9ff3745+67108864 e6b68f6b97dc9e2426b699b3140564a9+67108864 6cc20ee8b272deff8980d2b3a4477e90+67108864 dad069a6ba6c47b35483cc9fa3885d53+67108864 b0eda0c60528e29f5b216012d4616b1f+67108864 eb5e3a335f47c063adfef05de7b4667c+67108864 b545b5ad590984a3f478998f5696a72b+67108864 f73ddc8d5914626ddd50bcaae6abdeef+67108864 62793b6bdb743f9448ccd5740b33680f+67108864 1a84b04f18a2bf3cbf38ee71592361c6+67108864 518bbaeeb370f12d449c8d63946966e2+67108864 5457694f602380c630b15f36ec30981b+67108864 b8b72eacb6589d8103d1bd9c05759d9c+67108864 122d43c6dd8f63428580de4e2a3b7e31+67108864 5f5bf06a26037d6e683b088559a7853e+67108864 487e0c8983c7aa7a9660dced20fd75a4+67108864 0c4bab937d54d2b41d64874d05266304+67108864 e585812acfbdcef8b863cc1c55d3f070+67108864 a636fd1e57ba72c9710a20b0615c9c36+67108864 7ec1d0e5a83afbfe0848b1d11a192977+67108864 6206fde59d76ea51aa58dea598f35df7+67108864 5541b1f4b2ea70f386f99fe2bec7ca8e+67108864 2765d0875069650b3ded5bcf1057748b+67108864 76fc47f77e38cd9e600e0eeaf9e30b6b+67108864 c158c77c833392aa67e8499812e5f83e+67108864 b180e70bae15ed646c3f53e3e741651c+67108864 c9270bff3ed8f8b6676299d470ca10b5+67108864 50e72e004bc6fafcc55c95af20b38cb9+67108864 c4b5047e53e0330b9312be395cae7de8+67108864 8471c29a2e2a64144cfcf81102567306+67108864 edce7e124a06247a4eeaa5961def2fc8+67108864 83623c9a99c45b228a04d4099be110ff+67108864 98c5f2deab5676fc460e553c792fd5d5+67108864 583fd68a679773af986ddea8c2bb2323+67108864 2bcdc93ccfb5601210573e985f54dfb1+67108864 b36651e5cd042a6d221dbbb370fdf595+67108864 e815d8da6a24f58b6079500dcb6dde92+67108864 3ecbd8481a063f405a5b87ada486e229+67108864 559159e2bbe19ec78551d4f04ca71219+67108864 207965435d3dfa7dd6b872205ecee670+67108864 09c39df8dd0ed895f2f06f5fdf331f44+67108864 222acaaf41033f052ace00e4f633f8de+67108864 3bfca4b0dddb38adf015657390aa84b7+67108864 ca141e093060662c7fda108ce8467b9f+67108864 b72c6eda282461590d16f05cc5b6b6be+67108864 ee2d50ded50608024eeb6e2a375775c4+67108864 97aa77b8d241de99d3ca107876e59577+67108864 5fbe7fb61babb4c6ea78353db046577e+67108864 22eeffd26c93bc1832c08664e567f2c5+67108864 ee46575fb609641bfc79f45d0a552263+67108864 af222b7594ef2a21968e8f5a355f602f+67108864 a3196fe7b6d2a992ca31d59eb86a4884+67108864 f86726c23cb938537bda8f12bed90a0b+67108864 92f65da548b38c97fb87baeb414b985a+67108864 6a379105ad86d53af712cf270cf201c2+67108864 a6342eb8f300adce14b3dceb65c48042+67108864 68c0b82d38f8f55f234c8b864f3124aa+67108864 9f836e76ec05bc98614f88104805ce94+67108864 9698568c75af9181b50fb1aaa248a17d+67108864 ba20012679f7e77e55d5c0c8d4c9795e+67108864 ca7637fe43241795a070542007cc0fb1+67108864 73b586d5f610791131e73f5e5e2bbb33+67108864 ebaa2386120e44e5c3b003ed9afb1a1a+67108864 fe94d8e9d03089abdc49b5a91bd0a5f4+67108864 549a252c97994224d3f1878bdb2d3bd2+67108864 b4f2c8c6c2a363005dd59ed295f7a372+67108864 4d3df1f5848bad821d9aa9ba4b906e5d+67108864 29214f6eba324b577f3e57eeed313d8a+67108864 c2b2b768109ee7a185389bb2c03c9463+67108864 430763efd090fd67b9d34648ec769ff3+67108864 0b45e8e8635ce07dbe95511a0f73a58c+67108864 9672ae65f691a9906a30eaaf1f412456+67108864 2790ff8d6a65162c64a8020737c056ca+67108864 71deb0da8ab7b2cb8ac31bbbd2153b7a+67108864 0176831f5ced03159f23d1605f09c0b5+67108864 a214e08c45eb9659412f7e1da66f8393+67108864 082f6320725c4353e1713e5543896ceb+67108864 2ea08c52cc0a421cdb787af6093d6a8f+67108864 293b457808839b5fb1c96b4ad5307f64+67108864 a8bd1439b2bf08516f48867f2649c92b+67108864 7fd29dc64c3dd3945eaff1cb9159fbbb+67108864 e9de19748265de7ae96d9b7d9bc4f021+67108864 2990043a6a02592178b3d515cebbeabb+67108864 3e22d44e8668c64b812e6a69e3fc9ba1+67108864 17a9aedc84771d8b0b2a3a8004f1b5af+67108864 58015b5a67f8bd5924732d5821c0cf6c+67108864 00b48587ac18be30d2ee3be476f753c7+67108864 8c6e61d5713cbe261f9bda849810b113+67108864 6687b061aedcac8b3c0397ca06cd7454+67108864 a680a002f3165cea16d65e66ae764e6f+67108864 6a5f349fc52033d3e94f98904ab425a7+67108864 51eb722ed0402ac5da6cca70a5ec5c17+67108864 0b71fc77e9ac56ad924716419c4b14b9+67108864 cfd35aa4526e8ace42fe58ae778310ba+67108864 c189e3b91cf3fdedd4f16420da6edada+67108864 90414e8a86c002ad919e3fca22698f24+67108864 4548a56cc0285c88b12cbd149a8c33b9+67108864 57ad0742333b640abaf99d2d76251bbb+67108864 c96c7bfb6e4e5e91eaa95ef4a7f0063b+67108864 0a2432298689a7930310d505a8b72b25+67108864 68bf60dbfd64b9e42a4ffe985ec0ed1c+67108864 82af1dfa30b23d296c8c492ff49d363d+67108864 52012e0f2502a53d1a7e6bd0b53c1f7f+67108864 b7a38a5eb68c133166608d6894c40323+67108864 99a474347fe2aa2f885d3e03b41c5a13+67108864 3a7812b49e18c60e8ffc7e06f3f611c1+67108864 bbf3832ed3621c86917153a27bcd6c96+67108864 f7c80aa5a5c8912c7b374d17e53f8812+67108864 31d27c0506420d4dae49bf3500171123+67108864 5bf3a9a223b785637a976d8a2314d15b+67108864 0e2e4d0f02b414d814bd8f142a63c611+67108864 e0cd118bff0722657e8af9d6f7ab9387+67108864 aafbd2e68aaddc80b1b2c9a58d34d912+67108864 31bf19492fca47885d25ff1f03458eed+67108864 22ec120fd0738b9f641e45b0062eba27+67108864 218c385c10060cb8ce8d1af4df3199f9+67108864 1662e09426794ba9017f0b2ee3bbe056+67108864 62814cac58ffa10896b0e1efe404ab07+67108864 aaf6128b4e32d506c09fc2ff789e4659+67108864 fa9132261ef770571d448da6e820f05c+67108864 7a155a0ecd60a1a3802f9a3dbbdac312+67108864 b5c9b5769ba7f38b516dbad74e4b85f5+67108864 7738f2fa92dbe1dfdb7baf21db826cbd+67108864 07d0c63d6265000dbb3900229e27ff72+67108864 ad08ac30caaa64b0b1c6ae78ef1eeb70+67108864 14373867dd0ea7c9c7ef2f5c8f6ab326+67108864 6bdce1b8e03734102e07a68a1cedafe6+67108864 582c8263766cceff15f0e1aff84998b1+67108864 e71f57598751ea906c68ff46c478c76b+67108864 a4804871f087cd8fe93b888e274ee024+67108864 144967b90aacf70d4265a83656223e35+67108864 4da6c6e79f38ceca986d250f6e932096+67108864 9bae8b1ff4292019c53888841f72a982+67108864 f0ee2e68deba6b63c248003cb82f71c7+67108864 a5ec67f66515b948dfa49ebf4c85222f+67108864 04c73a8657409844f66619031721cf93+67108864 1da9010012af695023de6802e1f4cd96+67108864 d7e4035ba647ca2aeb4f218a5fba7f65+67108864 ea989512fa8076eb0fd44a028d460924+67108864 c8e3fe48f7b331d0b264f302275818b4+67108864 a1f498f4b71bf600f5416509cfb3e65a+67108864 24b0c787a8d21be9435748465ec26515+67108864 d11ca8b28ae1b8bdba9ea71a78ff73e7+67108864 0017d2c46a2e0250eb5c6d75a9045f92+67108864 bc156095174bc4814efae2ed49367fe8+67108864 7f127376f0cf418c605c5950a4bfec42+67108864 bb9e9b067b0363c6f1502b444c0b4472+67108864 2f6b3a746f5e8cc2254ed9cc08450a3d+67108864 196f05634db6db0b069ba240e6a646cf+67108864 5493c7d92b9710993ae12ae788904d3a+67108864 845f7edc40ff2731224713ec3deb2575+67108864 f050395dfec775d38db5b0d57ea0a581+67108864 4e5b768d18d85067d93a6a519ffd10ef+67108864 5cf888ffe5474f587318b36460ce0ad7+67108864 c3be66ebe0fd039a13cf0fae05774afe+67108864 303023470389e4048aae95d8bb34fe7e+67108864 af06698e1893e2704e03f722959d7831+67108864 57e7124fdd21f6c580c4ac374861a026+67108864 7dbb565e8fbae60e3597eb711921acdd+67108864 3e6550d9f748d441dca47757cf98ef22+67108864 178423073bb92104feabc8f7708164f0+67108864 98eeab6311f4d210e230573b9d5d43aa+67108864 8e3feb1831e7ad494dab2ba877831944+67108864 32ad56f602dcee3ecd0b26b33f439705+67108864 13fc3c90af586e0171cff2c1ced46af5+67108864 15cab69619aca7efbd9dc84dda97c2be+67108864 f734bc52b02ff5e7d00ff35469495084+67108864 2eae852de4af35923e335c11ab9a707f+67108864 fcfcdae7f7368cd3f4cbdf5bbe4c9218+67108864 da45807d889e3a9c7fff8d5184101bfd+67108864 9b877fb32436c4b8b207244e01e1874a+67108864 fb1c79d9d336df91ee9e8f16acab2866+67108864 cc45d4bc6c390fcc65ac41cb478414ea+67108864 abe9501379c619480d0894455896c4bf+67108864 8272118879a5ba38ade3942a7678dfbb+67108864 e94ed5c5ae2b7b30088b5b0f15f7b070+67108864 56586f0d6388bf0a191685aea2884b17+67108864 2f78d776307e364cac16d82b7ce20c3f+67108864 819cbd489af5fa54f388dc255e156012+67108864 023ea9e60186ddf1cf51b39abd8d9b89+67108864 52aa46153ceef085ac1c81b22f8fc7d6+67108864 e79234af9de91a7a55c5c96097fde9a2+67108864 eaeb9ddcac5b8cb42cd5c311995e5923+67108864 3c98cf812311fbd27ca8562640969aee+67108864 b8b1496bfe192d3863aed28169a1a5a1+67108864 7c0055922279d093c461459d93be5d6c+67108864 89109de3468e0c004a5df2e6ab4aaa5f+67108864 9fdcec9a7189a195b82946f477e8bcf6+67108864 2e924831c2241f3f8a9deeb44013e8d3+67108864 1cf6e8b97738a95881063cff3c1d11d5+67108864 7cc135fab1e7d88c70034bef501ff218+67108864 b1e2ed6915c6103dd5b854cc064271d7+67108864 7734fa966c13cea05fc1d67fec540a83+67108864 983e5f5a2f2632e9ac307f343ab18362+67108864 7d0fc77105fc9c1326e81baedadc2611+67108864 8e623fe4072c4e9ebd146d836c725076+67108864 06494a14aa546919573fde57a86ba94c+67108864 ba0b2cba826fae4c98c807b96fda418e+67108864 ed01c980866649a1a8dee6d43017aac6+67108864 f65de1fbbaa627a0e8388905bb9c2272+67108864 ce6ce58654540a18de19258cccf39b0d+67108864 a496b4869c4b258a443a9ef49073838f+67108864 d5522d98d48cc32f511b54efc4b85076+67108864 d82c6c2cfcdcacbf396f782a66f5a0b5+67108864 e16c8092f0a4ea3b0810f3d428581d36+67108864 a3882f1484c0817d39c5cf4a9e5f5cb6+67108864 c047df254090031ac754fd1518dda335+67108864 adf67c2a1461a1f3d31825563e9f591a+67108864 490c76dad1f70cd18fb91d4ae73227f3+67108864 6b5df83749324e88e4201030f97c203b+67108864 428d3fc9733f6833c773d207bc48b3bb+67108864 ecfcfcd10eeadeb5d551475d6ca99d8d+67108864 c79936bfbe83976274d8841a55edc0a0+67108864 3690834378f8713ddb8b209ca5dbdc60+67108864 7995163ac59ad78033956399183c6f80+67108864 19a11565a6e351a70b1f081490adee4f+67108864 0e629545f3e092533656aae73161ef7a+67108864 1c89e4d1ead0d3962c6a5a476ab7259d+67108864 94bb9ba880fc950bf008fdfe31926d18+67108864 ef92797ef7501d6748c0565527b78e13+67108864 1ceae0b2ee74bfd514655fbff18f185a+67108864 08fb4aa7c2e95ecf51a7b9edb6ca8abf+67108864 0005f0aae91d1b8aafa7cdf811aba9ad+67108864 821b28d84dd2717999becb924400b2a1+67108864 62fc209aa8daf7ed5e7d3e368a959715+67108864 a51feb570bc08c90109f072a00a86e6a+67108864 3a43bf7c8329e803d5a5b946f3d2eeb2+67108864 5fbaefefd0e33c0561d2d1ad02243cc2+67108864 70125926487b5e14434f4ee652e57f5c+67108864 266db112d5741f7cc0033a9a5d892a02+67108864 6402d4e9e089275626ada0a304569b08+67108864 ac89f489e384afdeca6f675efc065d03+67108864 7d712d4ce185e805bdc1f4824a2c2013+67108864 5365fec63b04a937ce25921cc74fe5f4+67108864 6ad1ceacaac14aa728bc5a0dbbb1cb8e+67108864 34435c0dedd729486c3629f8cf9f577d+67108864 79bdaa7e0686a43759a8bc554afe288f+67108864 976edc9593c14a6bd89b17d64b960e39+67108864 64b9bf319d1625a1ab43bcf702beb1a9+67108864 10f927dec0765488bc0c4e4ad5864fa4+67108864 f49bef52b8d0fb6b617da9c2e7f9670c+67108864 455129b3c82cc5883065979d29eee373+67108864 021ab29d986daf4d3eac5016202f7133+67108864 7a6b5fa82b5504061c240832980b5fb3+67108864 66ef0cabcdb0a0597c1d0cda9ea80ccd+67108864 7162f8592aaef6ba81eaec040513748b+67108864 616a218103d3bb29469d3a114a161b28+67108864 943d9da1352428c411e9dc7481a048bb+67108864 63afb9a885c0eeca44dc27b09bd5e08d+67108864 c4a6f3d00323af98bb03f4036d534f66+67108864 0caf3534294ff0737818091a223b39c1+67108864 8ff5a74aa3014ff730a4b2cc7fa19f8d+67108864 ea60fb9aadfd2b7dd2c2ec8311a78917+67108864 664f1859521ffa626e2180080a86da49+67108864 39c2b35af70b0cbbd185e525b6811d12+67108864 9cd080c53f594249856240fd8fe3befd+67108864 46c00423ce315e452ef072f1fd2f446c+67108864 791ec75ea6e41fb6569b906340c8359a+67108864 d6171b1a4a7cd7e7151181da6e355389+67108864 db9124b010b6d85517bf6c19df1fbcea+67108864 9d342295e2fa66cab7a9bf525e423cad+67108864 ebfb6d5d8da2c885ab89ccaab22addee+67108864 edc6f46d9e98132e8a70a5dba6d0a524+67108864 c3a3a1a69c7d06724eb506231ebaa4ad+67108864 a5efeed9314f0ed8d8cd098d27899fdc+67108864 6357216e884f403729f9ed85a1e156ff+67108864 4e2e7af2debd0ac7334125eb148736ea+67108864 ee9885bbe6fabb1d20ecf5075c3ef1ce+67108864 2e082c8cee844f71be6d3a8dc63a3275+67108864 8204308ba14a9b7cf1cd14e26ae231c9+67108864 49656b0e77291783b56dff27f69054f3+67108864 bce59a39ca6e7171f413c737a80771dc+67108864 90818e71eec72d6ba69f87a2ae552885+67108864 713752842990e7e98f4c6aaf4174847b+67108864 c9c4f210937340bc0741740dc0bfed2c+67108864 37abdd6893d4b1104ed0aab3031e28d8+67108864 29da5ad57f46c5105f97b87db4e080b4+67108864 e482af928333956a09b440dbddf00088+67108864 08b9e84d1f6841abd8a994210db3c69a+67108864 213b94ad5bbc43168711864b441349ad+67108864 c703506d76acb781ec09c3285fb0381f+67108864 31c7fb70b7eb1832377c81509aac937a+67108864 6bfc148caa4b734b423d889d3b0602b0+67108864 110e00453a504570e90799b4daa042a0+67108864 6bc9e6edf15a1168249ed54795464f41+67108864 0bfe1332f9773d4056009cb1f1a8eab7+67108864 ba552c6d88ca5c6005143c6a7e85e7c0+67108864 11f93451e28ff65ce40de89091b62f8f+67108864 6acdc268644d8ad70e90e68210e8852d+67108864 7eabb04f5237edf30c0152c632ccdbcf+67108864 a038c58f9dda274d06736df758a29436+67108864 9aa9974c9fbc581261e09d59e9224680+67108864 6e5398224bfc952fe566de3ea89076d7+67108864 265eb4465984d2f68fc639657fdde4db+67108864 0b2b8a7e74cc58cb8594cfbf75e5cf7a+67108864 7d19acd58426a5b4d7cd2a8cd4e4ce55+67108864 7ac5458db9dc2cff8f961850d4959771+67108864 eedd1feb836fce4707d7f44ece26b086+67108864 82e86b19ac612bbcc31206c40fa9b883+67108864 41d7129bb2145abcdeaa257b3b31312d+67108864 6e1240f3fda156181fe0a1d52ca5fd05+67108864 3180a5269907a15aa42c4e448724f816+67108864 ddeb2b08aeda71f0655c6cc786f2a85a+67108864 7db290b6db7f6d60386f08bf00518e68+67108864 4379e303cb12df9a9f28a0f86756737c+67108864 5621082de6125cae13913cad7cd546e3+67108864 be1af8242fce966ba091fdcdf5d3d769+67108864 1e714951ba26e3b7a010ba78ac5dc9ca+67108864 68921acf53a6b458cd6e5b8e0ed1a374+67108864 7c6d8b960e16168a32805ee8373f8eb4+67108864 5a3b518f9b1529a7c144d569521b7731+67108864 a0bae5564daf95e14cce3b8842b15535+67108864 4406c2de1a9787f1b589789ce2f4e768+67108864 118f08b4245eb3857a46276e45bab1e9+67108864 3d8d6effccf3b0342af688187dc4f3e6+67108864 d192c8d6741a6adee036f5aba554edef+67108864 965ae2d62cada40f496ac05b403e827b+67108864 81258bfeaf7a9fc15666ae10af305953+67108864 ae23e489f9d9eaaf7d2945855e972c75+67108864 297187be9f6fa75f3445720cf961ac77+67108864 2bcb1194581501e8c20f00264256ed58+67108864 16bafb9d0cc7730239d180101520734c+67108864 976cc5ddaee712bede5542e7b003b0bd+67108864 bcd02f6a3b00f4550730440a5aabfa9a+67108864 a455a0dd014194ae08007ea6e8ffc203+67108864 6495f63085f1eda582bb0475a44bb34b+67108864 76f39563689b46c281fc5af603fe744c+67108864 3c730aa83b0c01bc36b5cd8d666a35c4+67108864 637a2ec9a7840d248259466fd9025b05+67108864 0fc5ed24a204865033809c785cb7c5cf+67108864 4a3908945ac82bff0c9c133a42a60035+67108864 a2bd743f3304c65fedbdb7904f76c6db+67108864 b4b548164626fb0fb5bd4e1027c65634+67108864 b8ada412683bdaded98f4d772bfc0b72+67108864 b6230517ca29374241efe1fe500f3bf4+67108864 c2219a2c649a50b03e63447d7cf76290+67108864 7d5c44bcd5631650e79c979faa3ff1f5+67108864 e8589cfe03cac2b0958c8d8182005944+67108864 2e2260880ffdf589dd731679669d4162+67108864 ce30ca4d2ee870a4b645641fe46f4bb4+67108864 222aa06cfae2d5fdc4c54e31e0a2f7cf+67108864 7a5e6c5b9e962ad698ef50a3546e50f7+67108864 cbaa5957827447ed2f2548c044c98c2e+67108864 1316d790c4a0c56c96042c27c774d417+67108864 539b1ff5b0cf43d24364c8a2ddada430+67108864 d297a6c4ce2c3b5738afb24bfaf4112c+67108864 9c28f3fe301bb8359609a38272b99166+67108864 b14881938bd2985ee9780c543d79cd0a+67108864 b9131f8ccc3d3fb4d7879d3f6ca6ba8c+67108864 7b9bd91dff75a3da7898c54341a84797+67108864 d729676cb8f2032aa5068c0b6f3edded+67108864 36fd3979f82c521e0ac49591c38a1399+67108864 621f831d2103670dd086fbf74a2baeeb+67108864 434b0edbde1d7c8ccc524754ca511336+67108864 6191761726b291039971013e456860b2+67108864 81f56aa9beee03bf95e2d96e10331e6a+67108864 478af52460b6c572c4586067e621ecb7+67108864 d1b46bdc141e6beb00f573c7aa2eb76a+67108864 95e9acc5d676161bd4011c915d35cebb+67108864 31fb0b0f97de60f84cbbb9c6bae275fd+67108864 68383b28acd96e9a23087138fd3f67c9+67108864 06e7e4d9dd82b4dd307805302f59b6a8+67108864 f01d7a68a50b47c25636a61615fb2dcb+67108864 3d92fe7f228b5db0eb83a955ac765284+67108864 22543080bfb2cafc6276f067344b6c95+67108864 c777be42a3b970dbbfae050a59db63c0+67108864 523ed14e70faffe938d9562bda81b006+67108864 52e0dd73df65a5c28727088a336ca9f8+67108864 5d3735e054e694a998cd7d656afc498b+67108864 1a8a3f179210a12402a6308381e78a1f+67108864 8406bc8043e9a249e9575354b90420ec+67108864 a8aafb1fc4c208d712afd441c10ce110+67108864 7ef66e1c5a9aa8ddf12e37881a3fad5b+67108864 1cc5c7bebb389cb5d7cb2768aaae7a8d+67108864 de67652071ea4c8bd9fca0485d687406+67108864 bccb14bacc9d2bf516903dafeba159ce+67108864 208b6c5356a84735b523bdf1ee8c3352+67108864 cc6678ac9df8cfd452e5cf84e7382119+67108864 d05d3afe72f85020cbf1c47619e08137+67108864 811a2caffe5295fda0c20cfe07eb9e7c+67108864 91b6e3aad7844631e03a4b4691b7467e+67108864 3b36c286695fd13293960956a1d9ec79+67108864 e7cf08e2fa2b68acefe9c7ed126b1924+67108864 01756f89511027e9f7afdc51155f9c68+67108864 b63bc9636a6bf211183727f9e124888b+67108864 b36a38a8b9a5b2bc0c42ed1f58e75175+67108864 059efb6daa914416834e244e34b2f7ad+67108864 30cdfb63916ff1e609bf2e53895b77d1+67108864 9ccff5d27dbbdcc68dd1805afb4a0015+67108864 9bdbbec05d44b0c1ef1198a474dc0ef2+67108864 e98c4216d9976c63f8a7237d8aac1732+67108864 a8d8205ea2e1356fad587c2e7bc8c8b2+67108864 49d5ecb84c5327966e083984c43d218f+67108864 702343649c578c8961b92373eb5e6324+67108864 81b31e5e42d1e0dc2499686be84a5a0f+67108864 92e94d7be66176d50ef2c7b75df8388b+67108864 e052099368f8175e3cfd319a69b88e5f+67108864 50e2cacd47f698549ee368fe0b86af2f+67108864 067a888f2fcf0e334a59b76efb988f16+67108864 ea973e82d757f6027d79169dec882267+67108864 d6ba6ba9cd7ed076c6ca75a666c1cde0+67108864 f970793e593c3b7b273dd64f3278b06b+67108864 181e4e39f6fe1eff305c7ca124de8df9+67108864 81d3e93e5fed970958ba33baf6514018+67108864 d94d8d656d73e7c22a9fc21d61814021+67108864 cba3f4ddd207d8d6550929d3d6f59e6b+67108864 ec09778d02b57020f1df80e19fc43df7+67108864 31f7f1ac9de231b22b0756f9f90cd1c2+67108864 f7f69f67fc00841c81539220938061ec+67108864 9a4e943bbc8002c7973b34877d3a8832+67108864 8b2dbd591511b111aade4ac6dbe7e9ba+67108864 3f048fe65ab2388790762965f79018a6+67108864 7295219eaa859a1050df9fd769a109a2+67108864 d414303c3465d3c676cbd88bf62751de+67108864 e2cc7f0c2a6f8f2be16566448187fb9f+67108864 67d498ecf74a5b2bffef378249de6986+67108864 3dc7cde461783f6603ae130c886747b7+67108864 20bb90dab534b0fe6740d0f9a91b69ef+67108864 c341fcf8c17f3f0bc8fb77b1c38a87dc+67108864 4f6b22463d3bbbcd1ec223769a991c35+67108864 49b7d5714200d8fc8065cb7867feedc2+67108864 3fe374cf036f57b052f5bfac5e777536+67108864 849a12126e6e77ef89c6a6fd40b3b50e+67108864 d6597ebb50acf6026937d7e34e0fe5f5+67108864 1db60c0d626a7dd6931ad9a8f012a7a3+67108864 c541492c1dd39a9652a12929de6c132d+67108864 133456ad3ef89c02c15255894c8b4c38+67108864 ac762257261e25124d0f3bff34d87e5a+67108864 902a5877d0ca144be4e64e49ddbc6a84+67108864 4bed9dae329bb5a139487c26f77beadf+67108864 cd57fdb99ce8f02a528e55adf394c3c9+67108864 ddded9688f0f1d644ffdd0713a418434+67108864 c78069eef75782c7ca73f2cd4b222e5b+67108864 770652d2d600ea9467c19cd6b25318f0+67108864 e15caa477a7f2919ac7f50421fd4add1+67108864 4313b9b47b2af2652e84b57faff175cc+67108864 c7d7e55181b1614e5db644302b89d7b2+67108864 be4e2c5abe2b975abbfbbe05481205c9+67108864 0eebb9ad647ca490eecc8fd85f9e3130+67108864 b81284f74640253d18110baf27b2edb1+67108864 1bf994344e46cefef0ef528a65b16616+67108864 5b4e51780ab0d2c620dd4a09607c914e+67108864 a369023a52d53a580c9ac2e5b6414298+67108864 1ca1cc6b116631ef9042b7fcc1a71069+67108864 4c238ef73d518fe846db3da59e7f3371+67108864 9cff0ccf69f6718db3e2a2c814d5e93d+67108864 7c08baa884bceb5a721a2d1c3631303a+67108864 3c4632c93c3d96608d76a1835fb7c53f+67108864 21a1e400a720f29ad5ef0c398447ab96+67108864 e03a0738f567dd01ead1cdf5195482c2+67108864 31b21120078f7cfa2ed8f365e7d4ae67+67108864 72b9f9dfd2dcf67316d397d592515465+67108864 ec2acd07100379e405ee88c69baf8700+67108864 ed0e5cf5845d7b4d029579396edaa93f+67108864 2d6e8325ae138e53a69daf270a6a7143+67108864 165b7b0a9b59a0779078b237a0f43859+67108864 5426931979acf526c7dac829940b4744+67108864 a19f2f3722ba8b743f37ad4fc1de7efd+67108864 7899a1f6dba4a7561613ac8c346be233+67108864 d4af28e803d3acf8baf02343704771e9+67108864 c0acd238a240fe5d887796ebf2c59928+67108864 81c9046e203b9ea21a3854a08be3b6c7+67108864 19be9dc41ce37336f95cdfdcb9bcb1bc+67108864 592955756e6e716301b545670dee9519+67108864 664791aa59678b7f35d91490ef0e98e9+67108864 871796aec9cfe0957e02a973e6929aae+67108864 a29cbb99c16228863e2b94aeb52a4d9b+67108864 21529a807f3ca8c79de6985f65c229e6+67108864 65d73e789e7e2afb2398350501c94582+67108864 ab0f11f1bbf28776c8a942cc66a9fa24+67108864 f26f4e283ff6b79edb5b518483eb5698+67108864 eac4a53e663ffe1ba44350304e4f80eb+67108864 4bc565e82bb421fb178737d8b35e4de7+67108864 d6c94182c341a2cefb3c471fbca1aa5a+67108864 93053012095770ee0ec3efbbe5250dfa+67108864 861d66ba010c5138d68feb386a8929a6+67108864 c7ef775c4324deba52152a63d56e22ba+67108864 120c437c1605010561ae054991e0f06d+67108864 62fcc121f247bddeb2e4690e8794ce04+67108864 b219871e84e96e5e15caf19df374730e+67108864 7a3b52d2d071fe20ec7e4d1ebc3181c7+67108864 462271a7aace014a37e2278bad547c57+67108864 cac6ec7d98ff4b14bc19090ad1e7b5b8+67108864 dda917590a670289dba64ebf4ec81e7c+67108864 d8ed3cb70550899546ade41043264a91+67108864 5476c343b293ceb581ff79577c026993+67108864 bdd564f80a3ccf32c745be5e1b379712+67108864 674c08db645cb5081699beb789581124+67108864 de7b66c8448497df3b29bd147a6a372e+67108864 ff6d3e917e61990074f704fd680c8795+67108864 188c19e8e02c3d94e78a8e87ffc3d808+67108864 84c8d082ff79b3358a206846466a5bc3+67108864 3908b9e0a2e7cccf8d84e417e9742311+67108864 b03f0a17946803d7ac479ed9e24de52e+67108864 bad1eefd25bde16ef8234917d2200934+67108864 05b1547d6a2c9ccbc8ca098ceb9cd56e+67108864 227b71d3bc344620f24ca2bdd096d1f9+67108864 4e3a911661430007959d4d3c8195683b+67108864 e003b58fda6183184169d6cc75ddce25+67108864 7fad6f2dbdccee6cbbdb08cf1baec185+67108864 5b7acabe0b10aef1faf11f84606ee771+67108864 9fd97f1a0b485c0fec8b1b3719e7c077+67108864 31032fec03d16b41492a4f4be3dd436c+67108864 084d081193ca6c8ffecca056ea8f49c1+67108864 dae3aab6f390dc7e5356e53c9fd96e4d+67108864 f3f9b46cb5fe5960dc39ae0aff326805+67108864 c1c90111b2966da38021e23ef7624d2d+67108864 58d9d76b23af01b0b8a79ed6c8adaeb0+67108864 1b7ae7ec0e2a8eecf3b104c6a69b4557+67108864 0a32a1d91c8ce9e4139f1175eaaecdc4+67108864 31ca474df7cc3539f1811768bc88b1ab+67108864 1096ff9961700c784e2d8602f32be34d+67108864 85baad3a44ea3d1e052f9f065cbae2ef+67108864 e1fbe64ae53e81697580c7fe706dd637+67108864 00af57013678b23cd6701d2462c7cb45+67108864 66ed4630ae4c7f5650b77019778501d7+67108864 c53eba1250783154d4d6c796519b0743+67108864 2509db5b0048a95a99c8a05141d25a08+67108864 7bca0e2f4ed3f7975da1eb96e5e991b7+67108864 6dbc441cf6a53df1e0ccb2c458147187+67108864 9e68be13f859009d018bc89f013e2a5d+67108864 0b3260c834bcb8b24957ec1bdd834828+67108864 8fc3818ed7233f087dff6035d0d0e68c+67108864 af889dedf0f936ab5cc7751a3d8e273c+67108864 9619e43d50be73c63668dbe0143d7848+67108864 b172946e97fe8377517616b10aa6f5cd+67108864 483a1c541268bda2ae26f5babd1a3e4e+67108864 b849c2251770ba0a27b8cde071a7a8ea+67108864 21ad98835da14f125e0ddf3e5f641d1b+67108864 e746bc97b5689c1f05824139e6268be3+67108864 4bc2175232d8e03caf985f0de1d57ef5+67108864 0770fd6f7d5be477b1cd57357c65f2f3+67108864 215aeaa9a9883a9608751c8426340bfb+67108864 c30527b55a4841c52206b2ac388fd09c+67108864 109da8a7a4f5d899cfc2aec78f10417d+67108864 cc6b5a96b78c9f72186ab904b53bf6cb+67108864 45d85dd5079605ea72b2956e178936b7+67108864 511290d203b744c2f205f8e391565aee+67108864 292c5c76e08184652dc6b016cdf7bd69+67108864 b6b9f3aa332b32172b12227452fb9a0d+67108864 3faf30b6e440a7e63ec5b33026c4e8f2+67108864 287c54f2ededd394a348db4a51f31d73+67108864 86ea3ab570a70fcfb97cad610f30b2ad+67108864 b0ca221dd6800789a2b0e3ea3b3735ce+67108864 bf170401d62007080905447557163bd0+67108864 845e02d0645ac0bcc74f12573af46cbf+67108864 a7074ebcbd3bc6e477d74789565cb2b6+67108864 dfed9499e9f1aabe61841af5db7e32d4+67108864 aac55aad3070c0f1decd3648bb8dc75d+67108864 1e430b86330b7c4f20235ad50934e29d+67108864 8e6952bca44fa90f55d7df7efa35c24b+67108864 c9bd6e4d596358098d46e9c9852ba08a+67108864 f0481b0838851da2d672fecb8bed0ed1+67108864 39351d86071a678a156bd6685830852f+67108864 6f405976b4fd4b9f0d0fc482f11cdbb2+67108864 b99d1829308454f6a6d4c1f7fa67fc22+67108864 a65479a2abf49e006fff628f524c64d8+67108864 384fc792211710c36090828945f633b3+67108864 1b0ef2edf16035aa787a2234a2773678+67108864 2997683ebe6be9bbdf19777e6dcb7f85+67108864 a97176ce1a49dcead6f98c5d21e21f4f+67108864 fd8b3633b4da408c5348c901ee991c5f+67108864 05846a8b6bac781a0d5f0ae93da95c31+67108864 60bd664d28ab876ab2e4561f45a81413+67108864 4b2333ef5f5d87b933c943f993baf855+67108864 6b6cb6b3c49d63ac5b8c27a7287ffaa5+67108864 5c4c18633f8ca72ce9e4762b8e39c5ed+67108864 cda8a7e76d682bf2724dc36dfd628fd0+67108864 251af4c3c4c0297371c9cbc18f5391b7+67108864 3d9cb1ee432726b6704f4f062242a15f+67108864 d9be281b494fda2b2e097ecee73951ae+67108864 64097a37bcb45de638b1d1f3d02525e8+67108864 ad7c99315b582d3eee02a188986bbb06+67108864 7dbbcd083f126383c604d25a36318240+67108864 953dd0cc0f65581a304a69d1d8c6c074+67108864 6a4717fd23889c9e57e2fc16702b2ddc+67108864 ce2cc9afe4e799d38ac872ada6dd1c76+67108864 4874f67fe3f584f4bceb296e08e902fe+67108864 2eb3f1c36065e4d1356e30f849ef3887+67108864 8224b179ede992888735c59abe61208d+67108864 3bfb71ec8ce2fe06abdc1c072d2109f9+67108864 95b077361de7df58fac636a8c63f084d+67108864 34ce1eabb9b94098ffdde1971958457b+67108864 f6dcbab5c3c046737e2603deab056786+67108864 97381d8b85c75d95e4060e90ed0429cb+67108864 8009035222650ae111fc04889f6a91a0+67108864 7de4cff45fb41d2ab43da4ca4787a1b6+67108864 56e158d2d96c87c61ab07aec79a3f83d+67108864 80446cb5b295a6f943c345d60ca60fc4+67108864 014e252601f01f531ccca2232756c299+67108864 4521c10e47a426b0c8837cc808e20877+67108864 acc6f83179ba69237c125d8fbb85f5fb+67108864 b3c6f417342f1b43da8d644c10d2728f+67108864 f7d991cf5cfb8923681dd841fec2c3a6+67108864 81a497293d206956365b5e5994acd53f+67108864 6bbec259f46456e0ecdbd4cb4b3e38a8+67108864 6920db353f03e0b89a17c6eba721024d+67108864 11e1346974f9c5c99ab4e116c8b6e721+67108864 dc85c5e79ca12b3ce241634db0887a63+67108864 02ba2753b14df08c6065fe3d8067f520+67108864 512295f97d8196f4a3ee96fa7fbf28a8+67108864 167e357fef386e6fba3be79338c6b242+67108864 be1203699a729599cb62682276d7912f+67108864 57af384e85fbf8f3794f7187dd66da47+67108864 47551bb1052fd5456d8146be45ed0553+67108864 b96a537acec586386129a748be6210f0+67108864 5d9ebd409950ea6daacbf4fc81f0b8d7+67108864 efde5b728f3dcf9f88d791b2796d155f+67108864 2d0b282d815567c51d601a70fcddd68d+67108864 c4ceff02ed6b0513cafc9e3246d1457c+67108864 d84624307b9c2ff7948a79ab9f98646b+67108864 c1a0198f44e5bb6317988e9bdd613650+67108864 41e517c6cfc94a2f01f4f3d8391a9c66+67108864 c88cb112ac786c60539be3892adfa4e8+67108864 5ccbe64ef42d17e7d9a5efb4e555bcbd+67108864 98d2f0a55f84aeb228eac5d2f1bfb880+67108864 c065beef0ef2df2a3e046c13d2f13c21+67108864 6d029e69045be3737979e2711bd50ea7+67108864 384231d48e990757125f8ed75de83b6b+67108864 0a12b9a19ca1cfdb25ffee4cc3b79f33+67108864 5b8cb8cc4670684db3308db01c15e426+67108864 de0a828daef9037a9c79f2c631c1e0c5+67108864 02673dc624408ab64b89325ed9758ccf+67108864 7e3379b52437c0063d65dec9c0d425e4+67108864 fb6874cbe8aad09ac938aec059f8a64e+67108864 6f7e3055a1d27198d59b9db66e7f9fdf+67108864 592c577ce395168ab1e2b7f169f2810d+67108864 8dc6e1551248f7597693d128f0099708+67108864 8e59539a8b9c76e300824cb4c6e77400+67108864 e06cfaba57b978a4a60ed0d92830019c+67108864 fdc786f66bbe0ed72d008a9a93507238+67108864 df834d931b666e9892346816d1e023dd+67108864 5abd6568f3f49075aec50be5deb08b24+67108864 51c3db0547419829724af016a34376f4+67108864 5532f28919c3e73e4b311532f5173885+67108864 9f35d38d94c833e4bbecd64e2a5d820f+67108864 b9a010eb74aaea97ec508e46d2724635+67108864 96c729f98c1cc39b094e65e9912bd47b+67108864 37e46f23c8deef3b89bf4003034d3d0d+67108864 9bdfb8ab69c3f939c089658364e408b7+67108864 36e9ee64d69e5e766673cc0ff3568784+67108864 141e5f5a866f40a332481f8f0b5a02c4+67108864 13759de2988ae2c61bdc0646bd3f20c9+67108864 cbaffeea577a3e3022dd528979dd66a5+67108864 47d71f445f8a913639c2de35748ca1ac+67108864 c96f556870b01298a200c984c1b6a485+67108864 9101f4c9077a4999b25a816c5c7d4577+67108864 ac67846ccbaf979a06869dbe9b339b67+67108864 ae04dd6dc18bccc506d4a6c154ab0984+67108864 a7b1d2031b7790d6285ec6ceb9bc7caa+67108864 51cc682f1e4ca25e919b805449c99db0+67108864 dd675b1ce02a4cdb5010c23b8b8effc8+67108864 5402750a6d0d6b745bfb62cbc7a0ef41+67108864 abb173de47c177a5513b6eafc8e067fd+67108864 821c634696374641e713d263226f585f+67108864 312745c720c1dc832b990494ec21d328+67108864 ec41d509395490b699cbb5371748dc0e+67108864 571af59f889abd5666ebddc4acbf26e1+67108864 d13ec87ec18d2fa7c8594a7a683b2b87+67108864 c27783a42ed1b39611c5b4e56b898f89+67108864 256e0bd60301b630d4d9ae492fdac4ca+67108864 25e79caf8013afb12230195662f86195+67108864 06a2e7c753f64011e34974e7c2190b41+67108864 534e4134093ef3418c0546c6e4365783+67108864 4f973a9f34db9004c06c99a3e74b3586+67108864 37f3d57d8b0d46fd66be22d20504c3e4+67108864 f36e3a3343c70e8f6754d037b029ceef+67108864 8998b57cb2f3a4d4478b18b1a915ef22+67108864 e9a0de38f943bbf35ee736689b88ca2d+67108864 9f4b3c74f27a057b41ea1409c4da74e3+67108864 06b20eaa64f9d3dab4c529a8272248f8+67108864 6aea7ee9338fd608ce1ca71c8138dd16+67108864 add48912237a5d2310566a14e713fb5f+67108864 58471508500ae0331ebed69b053b3c1f+67108864 0f9c0aedb6cac897b66f462066aad408+67108864 7ae5b8003a518e6e73e0789744698564+67108864 f4997614ed3b49007c5ab44d55f9b2db+67108864 dee630fde59d34a16f13e06d696ff961+67108864 b24b9f7e6ef3ce54460e0c642229d8d5+67108864 cb98440ebce12bccf65c6b0d03dda757+67108864 1657ddc3534304afa69ecf8aba171de8+67108864 8136646433203d0f92305094371a044a+67108864 86980ec972ce5b94fd523b0005ae531e+67108864 25dc54c2c9ef76bf73baef782e9426ef+67108864 426c837fdc0e955984bd4abd2af4bf49+67108864 41fa3f238068fbebfeb9b5158ac624b0+67108864 c8d5838a82ffe2b1d160d9ecdac6dddf+67108864 cb8d6dd2800967ce59054dd528a34523+67108864 9d217fcc11e3c1cc0db31410b9725c2b+67108864 11fa34b137060862b6066b0c38583c64+67108864 47bc8a59acaac90e96cd61fa2c0f7dae+67108864 26c46447138d5f96e00b9a3501c1fdf7+67108864 3d28f5a3dfbdc118eaf758d5ff3e4d06+67108864 ff609548cd6699e0c060fedff5bc0ed4+67108864 9a6590c4f9bc00bbf1da8e0f06a5e152+67108864 7ebaf211e7365e86f04e9a8580c64ed6+67108864 0ff0d02833920ca107acb6f023f561bf+67108864 ba4f70a5ac053173f79d60ce3b03930f+67108864 055da2c4cee6215dea4d53f147b23004+67108864 b5e1a6ec785aba278cef39e0a6448a44+67108864 5512c19387b1dbf55b0966fea0996859+67108864 8a7d3fe39533cc44993f6f7021b101b4+67108864 b4b285ea1691d70835d477003691f40d+67108864 fb143f5264c383b1f9bb0aecd311040a+67108864 d990e38d43a13dbd04583b2509464048+67108864 66589506c3fdb72c89b4d57d2010f82d+67108864 fd266e7a1f8a3075f258c1884e8aa28c+67108864 148463ea6fed5dda988f4dd6cca2386e+67108864 a486f29b29d8b941513c8c0fb1c6e228+67108864 8cae319912b2b63bac4ca1ff0293cdd6+67108864 c2175b5c46aad00cae0dc874fa78d735+67108864 94fe5a4d987ae23d32ddd942654dbf34+67108864 366fee4ebcc1c4436e09886307d594a7+67108864 145819f02f913ab57b95aa6e687c7b65+67108864 021b8921b8a0be01fcfaf6a8f432ed1e+67108864 44aa8530fcda59a7386ab753d852af90+67108864 84c95c93dba1c2335c5132aea1bfbde9+67108864 ccad6eed58375535d5888a94dfacf051+67108864 8c70c1572583289f4f886b3e7e36f4ac+67108864 1f282e0018a1ad18505c191cddea2cab+67108864 c95e3457eeeda4ebe2db198892985d53+67108864 0cadf623f5acbd4c9bd62125de6a7a84+67108864 88bbe2bd406b21002e577196b7dec646+67108864 01a85e867d8f17994c267ae9f28802de+67108864 254a198d49a7cd9abc7ae6f9472ff938+67108864 765c620ae8b180b36e1dd15b9e46415f+67108864 b3bf18d1866f90046c14b21e1a49aeb6+67108864 64586178bfbf5919d7331c5417f343d5+67108864 b9780d2f2273ff02a535f9aa1d3ebd85+67108864 3e91ea1b91d943edcdc02ec2d2748f41+67108864 9b98df65c1d3b5b33e49b97177ec1b00+67108864 06c841928cb336b39a3fbef2ec27a411+67108864 f8d137eaf5bda678bf7822fa07e1d5f9+67108864 69843b1ca92cf826a802693ef2b2ae44+67108864 992a200bb822df303b43a2351bf302a0+67108864 3d20c7b9643d676eb2d2618d34c98270+67108864 31a88741befd885c16bcadebfd5a8cee+67108864 49ad328a8d37103e54a16dbae4c3055c+67108864 215e0e9cd05d16694b8cccd48dfa5abb+67108864 987fa48895459fc6f33853ab57fb6dc1+67108864 a65ac062423a006d76fcad051d72a65d+67108864 a7f805ac6a34145fc3b8d667ec43d576+67108864 dadabefe4c732ddf56e988a0ce034093+67108864 00e73b019e926773bd8b44e55ebfe89b+67108864 87869dda7d5df3ccda8050b44416b584+67108864 76c6be6d066a24b1b9b0aba1e3673f70+67108864 2c754bd71ef5bd4f72ade61d4434a7d4+67108864 7550d3c5d929e2433f167307593e468c+67108864 38113aec6f56566d7e9567c3a17f28f7+67108864 f7be1e07c0d71923a78e80fe90404bd3+67108864 7cd4833b1898db8aa453b9004e25c4ba+67108864 c68a382c523a60f0fbb0ab6073ea910e+67108864 0b771a04b9796e9f96c8da6c0e6b0fd9+67108864 8693722335dbf3c7b959c615e0fe6924+67108864 5cb47ebfba5a7db4b42dcc7fca2bb870+67108864 636be78a2c685b4da74e97cbcc124105+67108864 551b759b58ac10312ec353b7b0a267e0+67108864 ceeffeb5315ea9e54ee2050526b23bb8+67108864 c4e5252e8b0c6089c22ff9da672467dc+67108864 ebbf8c423715faf70b3af0077088bc6e+67108864 991c7c2448f265e70b0ac16ca43472e2+67108864 7c47a1a33a8d8e9588fa45f42518cc20+67108864 245c2168cde621737996c3fbd6c470ba+67108864 79047629ccbfef51995de48da7f3725e+67108864 e4bc4d51564c761c1f0fcdf1c80c6699+67108864 f14f733fe67b971cd66fd8b36564e29a+67108864 7eb7a28632a62a963e209bf8bd38f332+67108864 32ac8dcc12935fdd2d9171bcdfb015de+67108864 618f64f3ab4219f2b2923086be90d457+67108864 c2ce069535fe3beca8f628f18f73e653+67108864 39f9feacd8ee8a9165529930e6fa944d+67108864 6b97fba9adeab93c0021133ba08b2b47+67108864 5fad45965eea86e0f32f7631f852ced7+67108864 5c2fd36a54b250d15c4a04d6f4c9ef1a+67108864 46e18f793a73586043daafe0e013799b+67108864 1f5d09b11cd22a8f39d80e6162754274+67108864 a7333383f35cfe90ea57639ecd5e0795+67108864 2092efd510100ab4a93d08b240d4e551+67108864 6c8aa179f44bfd647c47eb5b5e04face+67108864 848379844fc6af50f86090526fe222e0+67108864 aff62043ce5de2aedcb03544e6e5db57+67108864 cd791e50bb80cefe40b2ef1b2ca85526+67108864 2ccde8a4341f0c8775c3539877e53fd8+67108864 7e2c4ca905113146e57920b996f8938b+67108864 c1ab3da1fccde2e04b990b3f54e7e7e0+67108864 2f152fe6cd43ff4bc5f9870189c5fd91+67108864 362763218d47cb2ad1be8827c7b51aea+67108864 9525519796bea462fd67a883c547d60f+67108864 13689b46f56b81757265379951f4bbb3+67108864 b9a0e158c2ff00011baff7d0db7e0e35+67108864 ba1a9edd255ca7957fa64b6c578f0de4+67108864 9b63e5f3b4d733d33de31dc2a6a79b48+67108864 5ef12f13166b39bed94977b68310fcd5+67108864 a515cd79b3361536326bcf2abe9b48f7+67108864 cf2eb5da8e95bda57de0836808c3b40b+67108864 65570f99a823ad70b69fb603c400f355+67108864 02c5238434e15f45094ca29aeda3c35c+67108864 596b6c24fd2f6414fb2d29b4bea5b104+67108864 3c017435093274bcb039d83568a7131a+67108864 47c1b455a5604fea60e6b906fa91488d+67108864 e074b3d5b5c478986b821cbc88f9dd77+67108864 4408bb38d2012b2cc8d856a83f8d9dc3+67108864 7d5a0460cc1d38fe20c194a673d6461d+67108864 3e79d38393f59cf7eaf7eeeb4b5ee82d+67108864 452a7aefccfad5289e42a3a3f8709ef4+67108864 6d0398bd5641472117545815d7906eb4+67108864 63f43499c4fdd70f9ca016a997777d63+67108864 ea130df56228d6d0e99397a1e5213ba3+67108864 da9b41f4c338e33f0a913e925f5ceaab+67108864 e1c79b8efd129398d6e8dc84daf0c65f+67108864 727b14298f8be46bae6553c7705eaef2+67108864 d7d9955f80c1b8f7f22d75ab64599aa9+67108864 fec64254c1e06da83c769861a48e0abf+67108864 2cc3c9076ca84d0b3d1424ff23f5e18d+67108864 be0315b12077acb758fb6d607ec0d9db+67108864 609a5aed43714ac588ceef16be5cf0bd+67108864 e8528687518544546f15d104e42ab36c+67108864 7590add63fc192d47386d025f96a8bf4+67108864 010654a6ae811cf3c911c34a7c306282+67108864 ff72a1acc0072ed5dfdd967da0830b6b+67108864 df6f44a67ada6387036e07449b2def8c+67108864 14405c4605e26f7114cfee29afc12e8e+67108864 e3f76e3d0906afeb8e3491e7890386ef+67108864 04df6b259de0b2cab861456dcff4707c+67108864 e9c9c7dfa8627d8b69a3d37361965a8b+67108864 a8f2caecea074252e7d7268f161c3ad6+67108864 a3f0b7cf9f5285990bd09a991e9046fb+67108864 5f08577621f017b31fcb40955fc95fdb+67108864 e4f5c8e2127f0e6c3387acb41b84f331+67108864 eaab7103ab17f9ef849287e24857b9c6+67108864 01dfbcc8bc5d7689f5f9b0969b1f50d7+67108864 dd0e627ba4e7cd97cfd07a89f34ab2d3+67108864 628e500b4a1e5739827242184dcc55f0+67108864 c1bd7e3cd8f6cb893c6691a1c132e41d+67108864 a8ce57e72e494f6ff16c01416a715388+67108864 e5e4d21216c2be92faead68dd4a7cbb8+67108864 fba18a980ef743d02623bdac1fdc11bf+67108864 fd35ea347e664d2bba5a06cc5b5859ae+67108864 c7c3772463ffddc941f864cb561196cf+67108864 e3d4d8d4c0582df9c4d9d7df34eb7254+67108864 0e96910a0a2a49a887f7c4afd7c6111f+67108864 d7d0ad947a996de69cf6d20f34dd0a75+67108864 7513ee25f082164fc1e2dd9303fd6dfa+67108864 ba35f2c1e66448e18ee94a7567e901ca+67108864 ab894212dd52e4048db5db39267af038+67108864 0fe5d7c56c2cf526a0fefff4361cc41b+67108864 6a72fbbf322b4efaffec4c17e16f0337+67108864 7d7ec2f367837d0a99c45f13e2a72ec4+67108864 e531e5a0f983b625b97dbeb6c8c75e39+67108864 141cca3d2ef79428c45e31b8196680ef+67108864 f5797709703765504615673f3a7798a1+67108864 42a7d894d9bb750583981d721fc0b43a+67108864 3538a8dbec2f3bfe2752d45a71e70b89+67108864 c80e07e5d0ca589894bf774edc001f37+67108864 47337a94882b8e782b9f52f5cbe47df7+67108864 989387d2308d63dfa14f1984764daa1d+67108864 ec0611f2664f96a0a146e82efe340723+67108864 fdfe2ea11dcc789a16d09e7f57bd724e+67108864 ed1c5b025df4101ce2d0453c52bcf44d+67108864 c871836dba44d682c2e9bd5068184083+67108864 da3e03f6d48247957ddf1138cffcb069+67108864 f0a4baf3d0c7b390005017316d5f2466+67108864 6c4a4f37c8d1c2e96ebed82c2112f2a5+67108864 b16a1edfe99ff8fac55049dd1838e3a9+67108864 830da62d52b0970e8486e09146d3c380+67108864 4f8285c1c54f0bb0e2a3548a709ca801+67108864 715993ca1791faa1c2318927f99e7cde+67108864 4620f01f02827a655dc211e6d19da234+67108864 ff6e868275e6ad24c573e4c158b5d36f+67108864 6cc04d5bd2dbf95a8bf51c3b4514e756+67108864 c81fcfdc4df6edb054225b043a3d0006+67108864 7eb19468c9690ff9f5ed0606e76f1ecb+67108864 df142d2444521375a0b3419e4ddf289c+67108864 85839ad5cd690b60fc3c9ffde0b16a48+67108864 a48acdc808f3f6871c5b1ddb61b856e7+67108864 0702af55adf78844d8447ae2edc612b8+67108864 6df29acca1f5512741144462c9965066+67108864 2772528da2379d504ee0a2f2f8f6d654+67108864 9a48c99615d21cc3803bab1109d01b74+67108864 c43285f8335705ff5db5c273649c1abc+67108864 95f810aee86d91c03e830b0b07c35802+67108864 fe441d6f0df7600e82f39bb8b783d689+67108864 a5fb5cd4c8bafefe7295d832d54d153f+67108864 fc3d46569b39762465fe0a52a4794e53+67108864 604abe47fe48c7ee596be8cd8593a48b+67108864 b363fd001c92188e289b26d54a1b2ff6+67108864 fe882c56c35ff21dcc34f96531254c5b+67108864 76ef911b4fdd4f71d93b74bdcbc9c090+67108864 e2ea51150b31146e112e909f64164267+67108864 d2c5b720e097815e664f6c87f0cf4ed2+67108864 97e7507df7dd00de336a42dabfc2c2a3+67108864 4491090d96143ccd1901779e9f00aeab+67108864 931d4a0940ec2eb34fef01cfb8dd9595+67108864 b7b81ce4af050cd0a01a0dc86a794805+67108864 77b4e0d34dbc3684fc6cca5120bc3456+67108864 ae2f71e229dc701bb379bcbfd00fed30+67108864 03e3daacfd7829c88c098033ccab1fa6+67108864 a6304b94717ba7d063489a01fec40e1b+67108864 cd4755b79ef2912a216fcac641af5635+67108864 3add1f551929e2bb8465bf103566afe2+67108864 575f0da37b5e9d2041faa60d5949af03+67108864 ab5a61475c7255b70370fcee5dd9d94f+67108864 30634d01c7f400d31d611cc7a7673d92+67108864 e31a7d521389ce6891f698cf268437b5+67108864 846f9a6630755b7a8b882effb86d2020+67108864 52e1eaf4662a1bed7e63d5310a893a43+67108864 8d09fac511f0037c94037b818268a2b6+67108864 774e3872523f78fdfd6b6ebce2b68c2c+67108864 ea5f1d513ec4f40d70c8f8e0a8726002+67108864 ac296ba33f17bdb4e9159024f8e4cdd1+67108864 c9a195ffbfd1259b22153695b36a2bbd+67108864 38b4931a017a78fcb6fc16b0e72f3ce0+67108864 702d41530330c10a587400342185ed59+67108864 05ab5fde413857fed335ff0deb326a30+67108864 2651b1e0e66b3014f752bede5e011723+67108864 a12615fc13b38ebdca36487bb3591724+67108864 e2992d9c680a9f7e11270af1b133e529+67108864 fae3470df006aae3f2bb2cfbb0953ed3+67108864 701c0290354a64872ad219a9244fb8c2+67108864 b93e0aee1e4f38b7f7fb4e16ff8d1ffc+67108864 8964e1e6e989bcafe111522b00d47713+67108864 51e9e5dab456ab0eb7b59279c4ec06ca+67108864 725c44c44536b8ac3fa0a9ae25d9baa1+67108864 4f39315c0645812f34179bde2133fa19+67108864 7bd82c0b2754702ec71a0634a9dc1d17+67108864 5de934e6e85aca1a204a89c3e99824a2+67108864 7bfe74cb7feff036a0ee21f370712821+67108864 0fc5fcddb3c53f5b53808cd958295800+67108864 d3f20cd29545736bbd57bd50f07b1579+67108864 0c64435faf1ab3845fcbb0e32d82e999+67108864 ea441e811b3520c2822c6a953144413a+67108864 d2b394a3ab92e0856a230fc11ce9065e+67108864 aa0c5f125034b3dba21fdfa3695d6a01+67108864 a8f0b4118ce8b8cbd6b7774679032512+67108864 339ee569c3993742cf8f7be8d874045d+67108864 1b38e6bebb07ff094362ee79fcbb6989+67108864 7c0e35f1ecceaf8a7588dfc8d3aa076c+67108864 fe15358161d85528453800ca110a449f+67108864 e13a94ecade9c1334056f51001161f0e+67108864 5ca9d755a0db3f62206b87f8489fa84c+67108864 1005fd30a8e26c4c04f5b39911722e7e+67108864 21d7603eb5492c507fca121f14b8d132+67108864 b1c8f98dee262a8a2084e1382454942a+67108864 1be9511623dfe333fe39bd2677811406+67108864 e6439cbf2bd93d45b11141be27fa80fd+67108864 12e41768e400ded7a86e78ed33446b65+67108864 3d99f810b556b5eac4247c3317b8616d+67108864 23632aa22a1a08cfe7a899f568511e87+67108864 42b10f75f25df8d105919717d081a850+67108864 aed75190ef2bd0c0fb2fad3e8a943934+67108864 fc57fc90c814367b5b34429fd52b4ec1+67108864 53da1e5576aacde23c1c86e1624d7b18+67108864 ebaf7a0946e3fc47a636389faed47d09+67108864 7066d5399a64e1dd669506972b53bdf9+67108864 077a6247a557f0f648fb492308f4cd74+67108864 0e0a5887a578b38577464d267e2b6df9+67108864 577478f28f10be523db58f30a56c13cd+67108864 df2840943cf215aea832ffe6252eda34+67108864 da6f29900f7677fe60efb9729da88c66+67108864 fcb41e115ba071a96b0a3b49e8bf0103+67108864 8a753ce795051334874fdae207855829+67108864 b13343e612361dd8e39ae29e41176e5b+67108864 704361fc939d60722e7bf633ae91d53d+67108864 a1cc9ac3b719fd41cfc2bc0d94c1bd2a+67108864 cd40ded00769b3b4bd845841c20c9b34+67108864 7c72ce73e8a5ad438ba8cdf99a65f9e9+67108864 d932108c4d3a8476491092cd581a905f+67108864 910a317116b2dcba43584187fdf33ad2+67108864 71178cdee4f382d77af80021d908457d+67108864 847b3fbb23de4510d6a10f216b1a210a+67108864 3950c35ae2852796a55eeb319f22f6f3+67108864 bfb8801bef84493b831ae34a5f73ca4f+67108864 955a59ef5011ab222a40de7cf3b507a7+67108864 e519cb7c85edabb54261110d8060de80+67108864 da074887680e187ef1db4ba128997cf1+67108864 b18567fc4c64b4aa0c0c0f204a01ff97+67108864 a41418794f481bf2b8409ec6e4267ffe+67108864 eb9b344f48720cefc217da3cce124e8a+67108864 ff92e78105ed0cb6cc6cdc3270faabf2+67108864 a2d0285e1207083dd58bf9002abc7fc2+67108864 147a9ed33d92f99fb89b685140a732d4+67108864 3f0138f29d10d2a73cdfa2a83d5653e4+67108864 4247f786d38761001e7fcdaa703e9b6b+67108864 e2974a5009436d37873f17608e406605+67108864 d491febe0a56b11ce2278deec0f617b0+67108864 8d9439a2a051307fd164906bd5c6bda2+67108864 8c5cfbbbfb6c3666eb4459a98921c954+67108864 d5ce682bd4f98463fdb3d773f518acae+67108864 e9dd69a37fc45ee1db12d0162fc7e165+67108864 52eeca7caec19ca02a391ce6bfa9be07+67108864 d64c37f9d21f14bf9487d5628a30eca1+67108864 f033519daee269196bfe92d70af67b39+67108864 c1282ca6aceba771a171f726d6a6ac16+67108864 1f2ca1e52852d72e5a57a1b16cbc3f74+67108864 54636fff02ff0270fa05e4d8cf29d7d2+67108864 0d39d0910ead42e48d1961bddf7ff5b2+67108864 44da2d272ba8acd54826bf012eb40e59+67108864 d80fe9f849963e7eeae297a546fcb845+67108864 0b9047663ab22c2a7dead55b79a368ea+67108864 0e0fd7eb8ae183106e9983da4f46532e+67108864 aa6d7435fdb5b00a690e3582f82cd6df+67108864 59b2fd0c4db35ecb13fa5e4dd3160df6+67108864 d1c103ecd75bdae037a7e4697a41333e+67108864 1d8679726c26461224e0a2e5cb0acc5f+67108864 f61e8db26fd4758f7cc84569ab2b028a+67108864 a2cb1b000b5332b21f725178bba5c4b5+67108864 0251aba53ffd3f3ba6da0fe53e80f315+67108864 0adc72a031deea5b21caeb4d49dbfd1a+67108864 da64eff1540d79841d656ca7191aad04+67108864 fb2b14ddb10660379ce55f727527ff2b+67108864 d4338af18b258bb8ce4931ca7ed05064+67108864 b59bccdac8c48e1a690fec35b6f8a8a4+67108864 e44379d4229ff8d166628aa4ad441afc+67108864 a2edd9b7190d04989e9ace2fad760116+67108864 efb094f97d5e1e44ea6534fabc8bc67d+67108864 8cfbbe4f5451f1a9a7667413b67abb00+67108864 8801376aba9e6efec6549b0c3e29c978+67108864 f8fcc193b53dd209a896d007327bee7d+67108864 7de9ca296a062cf33faf9ea8f312ccb3+67108864 8d69bae5e32e7a4ae1f6dd57e0d4bc27+67108864 09cb36b4aac4a4230b0f1933ff032e99+67108864 1b69f061dd6daa99762976d61c707490+67108864 b54b276356bf5e28514a9224be56eb30+67108864 2d95083444f68bd2384ab2482c545a93+67108864 8169dcd201cdabc17a0c651d69ddb5b9+67108864 8fea21ea4fde472cf5b9222bd2966fb4+67108864 bbf9e95ef3f9cd4eb43f143091ea7d31+67108864 081f91f057e44a5703a96560bb310e23+67108864 b696d01611c9215f4a93fc7684638ef9+67108864 498b72c742dc776c2b0e1525dbe16c2b+67108864 56b958bd8492a7d864ce60438db57311+67108864 333bf76155e916a6e4d72531c12b3b61+67108864 512c2e49f16514c68dd06a242c3006f4+67108864 0fb48df3a292cd784d3a4e1df2efea3b+67108864 b43562e4baf12fda2ef2f88e655d24ca+67108864 13b293acaea889c5672a10096d0b70b6+67108864 87232993894f0684d3071118528acb30+67108864 f7ebc10f8e47b12167c3b18a9b40af4a+67108864 7aba76f27f35b5c0c1ca8750c5895f45+67108864 889d9bb6f270501204b48e052c763e3f+67108864 8b574864a6d5ce579793d4d232bff6b3+67108864 e19cf0756dc6b54f2ad8bf750415772e+67108864 88e85afb7736347d28df444a048af72a+67108864 06bda3da34e66eb63f1699fef8c70047+67108864 a06ad6b51927d11c35f8cfa86980a713+67108864 68b3204eba15d00312166c99fa33eeab+67108864 98b12cada97a8843dae7448ff380298c+67108864 5ce5c167e9f03631d6d30cb6e45a52a2+67108864 348633f0b38db8610b435ce4f53fd049+67108864 8651641cdca73876e3ee249973888a4a+67108864 ba2e2453070570ecee21e549cdba4c4c+67108864 4c14365d045edf5f7db25a6077cd18d1+67108864 199d38530de4d2bbdf644082a5cbfaa3+67108864 ea8e9092d88d7a2ded26dddaea3e35bb+67108864 a981d1b9cc878eb2825e6ac397204ced+67108864 6c8a6a15f249927de8c391bfa9a13887+67108864 5fb6f06f70251fab1474bd5cf827c4fb+67108864 51336a8ac8e66b4cf183a87f83184a58+67108864 5e7324e999bfc640e1e4b8e193c2704f+67108864 ed0372472af294f0a70ec98f4b12eac5+67108864 97dc3c2bf973b650dcc1b14e4e307049+67108864 ac2dbf604592e32443f698243cf21b43+67108864 4f6b487a03217b6e2db48fd609ac3fdf+67108864 19ce146e7d279ace45a056b40c8b0e2c+67108864 7d22fb7947fc9a8f389ae5aa2681b767+67108864 9dcebdba72dfd50659747073415dce98+67108864 10982d30d47b5909b6ae40d073e2b7b8+67108864 bae58060b6dd999976b61149d05b2d9f+67108864 0fd4d4d81968a7e683c1e736af26ca08+67108864 ef25f3edcc83b407d40b406187c7e73a+67108864 befaf13aae2e584e9e3ad1c7a5e98f64+67108864 6880080067e109394df71b712d201f4c+67108864 485c60ad6add65d4eac035768e43db0f+67108864 902c2519a1d327098a8936486f237c3b+67108864 e0a1ca44d4a9b71efcb95a8690283a77+67108864 5cb09dac3177616dea86b0683c63d379+67108864 5a08f945265debf4ec6cc61e51efa702+67108864 f0179759fd17199bc13902de79374753+67108864 835e93df3ad2cfcc3b6b0b0ba26ecd52+67108864 7ffd15a96469d15bb821573bcb406900+67108864 282105da606f63b2bae3bf91b8703ad1+67108864 0e67f64306d1949d07c301077279a257+67108864 1ff5dc38a6e08ac8737f7b048883ba57+67108864 0c48efc7f081f704b5e81b37d2fcf78d+67108864 48cb575c08d65b849e336a528fd1fdef+67108864 2f89ed035978f3eda7402d3a352f172a+67108864 ee96f8e4c159fecd3167f2d5f89cd1f2+67108864 44c196f7b06576ca183baba37c07ed0b+67108864 c394702750ec25144bbd50864817f412+67108864 ecb5f5dfaa3ecbeb44f850c3f39e75ee+67108864 9189dae667bd9bb7efc65a468657e108+67108864 cf32ab8783731037450e3b3bb1a7e24c+67108864 970130f22be6713fe29ab1b4654508ef+67108864 3eec0cba1a8e599c776331fce72eb135+67108864 40f024255335221fc9b5e35147956717+67108864 0a23f0e47c7aff96458df2d8a4337b43+67108864 aa4dbb73141da37741ef87f25908de08+67108864 b4374046f4aa89c1bc56ab4ff243712a+67108864 5b3b59037994aa980b394d984a107364+67108864 1856eed7f374fef9a6a2d54855646dc0+67108864 c58269b0c2137b24b4ab73d017382af2+67108864 3ef84af204b89170bde51ff5b2251580+67108864 47eca65588f21d57d6826ab968ce0ee1+67108864 c1c8a064e8c5cb4998db2f63fbe38aa8+67108864 7989eaff82ce0c9ce44b07d30004f736+67108864 5c550ab03a78a8e95591466c98e73c35+67108864 a9bbfb746766c189c72e01f9c5f06893+67108864 df809e32f246382bd14dfaba10f796f7+67108864 c08575ebfe83afdb9d99c8b0a20f204d+67108864 342a1fff271f3b35fb476136ed1046aa+67108864 be53838e3da15290c9450dbf615a23a8+67108864 3d15a3114e259ce18e60f7d162b23b49+67108864 3fe7e7a6840c16fe922ee285073a5e34+67108864 0a98281ef6382ece1136ed68f1276e4a+67108864 d8242a4c2212e41de5d3871407813a04+67108864 89161d238c95e16e74ff7e98df4dae3d+67108864 2cd9e980b0c040903d723bcc097873fd+67108864 58c233f19a14fcf85763483343f68ed5+67108864 cd9ea5cb906ebbfe11989f6b2178b1e5+67108864 1ed5d50de85e8b1aa8a9fe351c17ffb3+67108864 b207e05f95076c307944bcf66e8271b5+67108864 1e16d3914e27d93a30a1fe14900d5bd2+67108864 3dd41cb336b5dba6ce1ec12a12d70838+67108864 0553a8a876bcd08a9e59b672dea3cadf+67108864 3270098cdd3a653802b64b97e2c013d3+67108864 74a4d14216f2e95c3c762fa899810245+67108864 c8f6bfd2f49cb68d2c12bfcddb7a3a14+67108864 d57828eafb6113e1378635fb7d811ef7+67108864 4ea5da07cc79e707cfbd46a05e71f626+67108864 fcf7ed7fd390b222f75879caeddc10bf+67108864 a9b52d652eb5b057507664e55d8c2065+67108864 1952492c2a5e553538b01b8c68754fba+67108864 1876ea6728d7f5135308bfbb2f907f7c+67108864 637eb85d012094cdce43454ac707b186+67108864 6146cbfbbde640b0ba81721ad730518d+67108864 339994a175ee6af23dbbadffabf5abaf+67108864 2512f64ca51feb66de4943bd63de1389+67108864 d45850194dffeeace32b87161df8b637+67108864 70e746b2c7a2785abd140bcefb359d5d+67108864 a675ca70dfc82b1295b8e934777c2963+67108864 89b608bcd8e1ae97b1b54a4ce6534f25+67108864 93fafcf8ad42d73962773d45aa703e48+67108864 b2ee61842cb6035d392cb39a9f9bf3d1+67108864 65201c355aec6c098ecbc096d807a94e+67108864 efc47cc3b58120afaa5b12f9847bd9b9+67108864 815b18871706defdbb32f52f79888b6b+67108864 cb212280920fd14d7c083e244fae8f5a+67108864 0563576707787553605de7c2c3b43f69+67108864 706b06048a3c162df3eb8c76fa4b9a1c+67108864 39ce8aefa48b3e1ed7387d9374ebe607+67108864 b68c296491338234d53ee8d7b80e20fd+67108864 6a604b76699c7b5d1f28c783ba7b785f+67108864 29ef63b9fb1236065d93d649c14a7696+67108864 3c42c2f540d3961241f8ca697ce4a15e+67108864 0f2212e11f9abffbc05b49c7085e6692+67108864 66bdf3d4ba4a002e86e81a0ef235c6e3+67108864 84dc9f05a6292ae7a1a368b39ec5ba61+67108864 3a9889f10bf0f526c8de4d9ad875cd25+67108864 8953d1e682dd25c8df3cd5b659db5d4f+67108864 ab7be59c61570efa5dc2b78cf83676bd+67108864 dba9ac0eb05c49a6cb308074a19206d8+67108864 f06d174cb6ec6a6ce10d0149c62d1a0f+67108864 11d2f482b141766b0ba4f53e59c2188d+67108864 9fb4a1a3ddf95e2c7135947af3114e2e+67108864 275bfb636251eda218715c1aff06e503+67108864 a0c89242b3edf53737070abf6ad87afd+67108864 8b4763a22fa4a3a9f045d22e4b11270a+67108864 2cca0708700a877794c8590619f2968e+67108864 b9d7eb594832c2b65a93d83ac35a19cd+67108864 d9f5beeed66607e179827b0fc6185139+67108864 f97887998347fd8619bd35cb03da2a4f+67108864 52c088791a443a992cba0ea3fbf12e1c+67108864 78f71f4bb45320072b2fbad6e5725055+67108864 652e98cbe0e04a9dd5d2b6ec2fc7c04e+67108864 550cf40ee7d2487066f44d7925c7b709+67108864 00d1a4f4532f950c85668a7748d38cad+67108864 f085dda612a3afa91c9582b0c5d4aaaa+67108864 fe1b1abfdc6bb203c38909ed0d124709+67108864 7316d6a08a14e2d840d524ec3700e837+67108864 e00f6b79c66e8e1e23f52c97467fb161+67108864 a3a1206eac6c1439baece870f922c446+67108864 fc677aeb5ae37d160ce7f658672d0352+67108864 b306ac3b86b18e3df67c12af3432f0f8+67108864 de6bcace6f2bbafbadab0830f970c581+67108864 30fb4d875646e3ee8b9f284e9761de9f+67108864 c2c9e0be88875d7fe8b93ea622f9b44e+67108864 dec0fac2abc6f1eaa399e5640819a000+67108864 2aaf7d4f5cfec013454efc6b1e702d3b+67108864 5a9d28e7cbf92495289d4719810580bf+67108864 5f618fe336664eb82b35dcd1a3527305+67108864 52b997bc9b934183e5b69029d3c90dc6+67108864 fdd35a04ec9d5938f7ba040422f09b50+67108864 b8eec7e11bfee019bede22b20fe0b59e+67108864 fd03ab91847277757e25c4214545378d+67108864 0cebfabcbdc53c4366f9fdb3c58da369+67108864 37fc9e297586cbe133823b0dcd74151b+67108864 5413d6e80ab9b9a4995238f56387f920+67108864 b65fbffd1cd35708767b822d354fb6c5+67108864 369d2566eabb30c55223261f52561e05+67108864 f6d5f3b3c0a22b93e1dda7a569813098+67108864 2097a5b95515dcab7170d6e45f15644f+67108864 651a39b728d858e73a97317f28f01570+67108864 1fffdd51f56f1df30310ed957e7b6940+67108864 1f3ebeb7f6da888bbc10daefbb389d86+67108864 0446df2210a9bb8aa78424ea8890dbfd+67108864 79ba0e4df90bd1aeb12f1521fb29150d+67108864 5eef6de0afe26994aa2c7cfd31c97c85+67108864 250e64306df679e7a1abca0302c796ec+67108864 64e0ef4c14f7c913b48a2a2ee2610077+67108864 8207b3d6919395a87fd4705d83033dad+67108864 673ff19dd35b2c519c4bcfb158976f56+67108864 ff2885cbdb66c872885e44c43bd8e68d+67108864 5a2e771e4b0338c1dd7647d693da555b+67108864 ee9f602d74e0fb02bf26d60025517511+67108864 b53d146b9ad8d8470a9ae1f8982f693e+67108864 0ec4753c965090fb02b2d4881cfded65+67108864 258255645bdc327b3f5aff2f5f308b86+67108864 eca1fb054e55cbbb106ec7b77987e4f9+67108864 67ff2f4aa9246afeee983520baa9f28f+67108864 9088128cd4b27cd3e49f2cd16605cf3b+67108864 eef9ef8b7d292d53b512844eeb1f8c14+67108864 82fb940c018616328ad5b1f40573d0d0+67108864 d91badac0cf1090d6b9e0c4835095d41+67108864 8cf02691ad8e84eab3f166cbf21fe456+67108864 d046af96ff4ea58d232fc61a0cec06c7+67108864 90fc539b57ac863cfbaa510a6d7a8831+67108864 c56a89de61ab11e03b57fe0652a9f824+67108864 ac049420424f3a7320ffd25124e73097+67108864 b331df4098dd950ed2127110a77d69cb+67108864 4169576a334d9d6d0ad1acf785de4beb+67108864 8417499360a366a19e803eb4bad782cb+67108864 ce9c12f73a782f7d8c4adb1955f5041b+67108864 67ac94f8d9252daa60fd83fdd485719d+67108864 b6745ef3760ed93c751c2a6ad463c453+67108864 3e8ca95376b731465cfdad3137debd5b+67108864 3597ed23a677c8510948fcffcaa32174+67108864 e183a236e71b541e936697989c953fdf+67108864 adb1b761eab093ccf4381e483767ad87+67108864 61fe4674dea00f7dfffb572981da4eb6+67108864 59d5521497d7004b1734f8f12d50995c+67108864 708ab80ea89529c7a828b27240bae2c9+67108864 d5546b0d36c4618401eb534f8ef9ae67+67108864 146b5cfb47f585add7fb91b3eb792b3f+67108864 98af89fbfc592389f4ee90001b73b7c0+67108864 f1f7e7a38ae792b1676958c38a079302+67108864 dee5b4e934ad4d820084abe03ec82bb0+67108864 233ac5529b4bddd3357ba114cecb8265+67108864 2ea7c26b43543828173d265429e5e9a3+67108864 554279d68a2ba24f37960716668f57c0+67108864 a652774854ea89b90addf1e5558b03db+67108864 2f32839c01a3178754b207ccf2cf774f+67108864 19e1085e4a9428f54516d2c9763fc69b+67108864 b08e52aa55ce36adddc8c827a7b56a90+67108864 ca5a2cfa551dcc4171a8548b2e75737c+67108864 07d0c5d3b9da6f020c49a09ddab88c11+67108864 297c14087d08799e3cc758bebb35a998+67108864 36745bcc5ab5e3b6db6d73d685486632+67108864 145570316045be126fb77dce478c9e7b+67108864 7ae40af44f48b1940ae5f1eaf7149d8e+67108864 a040da7c89a88d383f6351a09881e5c1+67108864 347090b7962725c04375d35b1426a922+67108864 969ae2ca582911e37549ab48999596f2+67108864 b3d6aff2b8fcca4b97080ec1a393d4ae+67108864 f2cfa7ddb99bb01b7fe0e380bbbeb743+67108864 459a7ea1a6c0b502ae06f87bc8a22d85+67108864 d6c18cc1df3b7973d1fc1dc994e90e7f+67108864 7585f0be7bce0bde7246e93f26d9663f+67108864 e75e85ea0ca03510ac43f793a012f71b+67108864 69b99706e27164ddd6bbebcc34704e2f+67108864 6d54e0c179d902173ec3550561baf014+67108864 6b8d7b1fc0b2db4a2eb4a62026df6f07+67108864 454dcf5308ef0d3cb8d50963536b47cf+67108864 00934415280fbe98d2f428ba54694504+67108864 8ce63e908997f169795175c2aecc2800+67108864 d2adddeec7da9fb3b9eb973b7dd62301+67108864 df334feb04844764f8d6deb418c8809c+67108864 c400cefe791d3885f256df23f156b790+67108864 3fe67e7bc24ff07ee05f1d5010f65e59+67108864 52dea4a8d0b2076971c0d8662f4012c0+67108864 4a361fd451f8f4f645f300880d6fbea1+67108864 a47ed8eebf37a796eaf28fd5b8a448f3+67108864 6333c635f5f8efd608837c9c43a54809+67108864 c5218502430171c40ff8329ea81d6142+67108864 e40b9b1935f2f8242ecd722c87876905+67108864 0fa2bd707cf80dd38937420c88ad07e3+67108864 68a7f964eb4f4f32f42d2fc0e4b4c168+67108864 d7031b0d93a9e6ae52fefb38ea359d0b+67108864 a8cb2892fdaa1ab06152ba266271bf6a+67108864 dde2ae6293a78d523de73934580adb98+67108864 64b31b76d0b3f567cabdb19ec8a6b5bb+67108864 5722cd27250a7b4f387f689eccf1387b+67108864 9a5a56f5ad46ae4cc425b52f5f66b0e9+67108864 2c26af8c2644c3130ce2630c11b55f16+67108864 b15cc2d99d1814ea3269caefe5aeed1f+67108864 845936a2571203663e760fe969cb7cd4+67108864 cfea2e5fd2d21b39a0279c0ad8166b0a+67108864 012754e502a0a7ec6ffe98299592e496+67108864 ca04a3214136dca30208e625288d50c0+67108864 fb015bda0345f425717e8e2da41515e1+67108864 6375587f9cd8b52f461e457d0ba5e6a5+67108864 18ac65bc4f38174709f34a54cb0bfdc3+67108864 fd3afea501ea6dc765eba676c7668d1c+67108864 9e576d0ff9d0f7ef7c55a9142627f48c+67108864 aafb3340fb59fd4e1ecd15b71486cf71+67108864 f8579bc6ec704e334984c5ed7626061d+67108864 67798d6de9a3a161763560a25cd5fdad+67108864 0895eb15394b8f3eb87630ced9c54374+67108864 e65992f5b348cf08f7f94c83abdbce9b+67108864 6e3a01b14054b64f85bee2b089c82183+67108864 5a02dd377136f74a88f37478d1465e4d+67108864 6d6cecaaf9e0446e484448dd62ee963e+67108864 4e1cbc7a34dc967a034ed3e9cba67b64+67108864 a7cc7e1328300bd8f32504136b084c80+67108864 648ce1fecf63126e094fdfc123f549be+67108864 164e1083eabbad57b7b4f72810a655db+67108864 452a5be2713d4931248299a6c563cef1+67108864 0ef85624c448f6e4ffbf5c69f0df6a63+67108864 636f3cf22e32d186b16700b34b93f34b+67108864 1005aeb33da4e9fc72bb3a0fafd0c3f4+67108864 6ce50685ad27cfbf0b83cbde29adf870+67108864 f8079b077ed19ab55860c83f0a5891d1+67108864 1e4471af55b217dbb89bcbebd476dbc1+67108864 0941ab2b20017001185b8f7d96b3da45+67108864 218c586c39817c072148fc1c9323fccd+67108864 c6ae2f238bec21d75c463888eb9eb34e+67108864 17e43a11af424ed19ec3e37208ccd7c2+67108864 088975b838d40639b140cacb48f24582+67108864 d3499a99641de103b50eec6391c8bb85+67108864 3a3925e9ce09b308b7d686bd35ded3ad+67108864 4be279b24000df6b90f3b21346a95ec0+67108864 dff78a508fe2d378362073a634fc8568+67108864 7e030eed13b4d92570bf27cea056de2b+67108864 e575167c80666518e1a1fcc37e5be977+67108864 59bf806d56b3f1da60d7e1ea1e326732+67108864 7ab1600fd8e0ea33d4a1522ccb4b16ec+67108864 b486802203e725f884e67662d60a4bc9+67108864 e2747727cd64c67213008cb997a182af+67108864 bbcd31beb88d98d982fae985212cd467+67108864 63e43c3dbf8b07f8823d6d6d8dca4e5a+67108864 1aca07c1a84f7370026399e36371059a+67108864 59da7470ed1691daa40159076f00aacc+67108864 2c67c1687441dfe2dd335aeace7960a3+67108864 148352f31cd89f167ebecb37709763e9+67108864 b15ebf827a05848de50be331fb5480ff+67108864 5a5e03fb89bf5dc0e4b2ea691944f772+67108864 8625d91e288b99dccb7b56561ee74876+67108864 3656bca4544924b720b82283a91cb76b+67108864 5fd2f6f1fad7d20c0e973703ac6e3669+67108864 b33f494062f3cb679c36337ffd306507+67108864 707dba27750617a509a96a2000893ef3+67108864 c9db9063eefa8c0c352ef3deb05af1f4+67108864 77ad729701812bfe2cc39ba1750c2051+67108864 632b1bef7f64c435bb783d1ebf397196+67108864 b9137b8556e1d9d3c47c79d756647182+67108864 146b878dac4ca3bb0a728d395b42a74e+67108864 0e65fdf451b17e75be0f91786077f848+67108864 2926796ae841754b2dd1815cbb707043+67108864 c0cae36e934007991f9d56a0caca8158+67108864 b8ec664a45a543ff21447729cc4799ee+67108864 4d3ea3381baa8a14c71628a95c4b2d82+67108864 a1ee68e9f6381a4c1723d2b221d182fc+67108864 51383ce68788f81c011a8b658f425b8d+67108864 39738b40415353cf36bcaebbddae28b0+67108864 49a9a08e0f163045fb4bdb87d94c7470+67108864 bf896038315fcb8c40a628ea135cdc31+67108864 3e5ca3e8307931fed9854d8bc8d6b04c+67108864 1c7a2e1225976dd6ad6b6c57d6089da9+67108864 8d970c8f25369d3ddee7897a801b3600+67108864 4099f0a3b81e8f24097fd8778a230cd6+67108864 1190633b75127dc6dcf564a81d903afc+67108864 aabcc49889ba529bbdde745f7a4e2a06+67108864 4006b0b18f997427d6b01d7488875b0a+67108864 6c02ecaf633dc323c688d0ac7c6a42cc+67108864 341c3efd284477d4540fd53938af4a40+67108864 070d5b5a0274690de3b95bc09ffb78dd+67108864 ebdc0b5a4395cebc98e0716965c47a3f+67108864 2ef29fc6ddb8118b08ee3960029506bd+67108864 5958bffe4d6506019c1596d8ed1ba6a1+67108864 9c15577caab6a3ebed7ea299e32e4dce+67108864 6cb435f99aaa250079c37629628d5520+67108864 4b3c90f76918b88372745d89eeb9e084+67108864 837c2eb79a671d35aa881aab520bbf4f+67108864 92f0463f8c9dae280f9a162870679502+67108864 95f70fcce64cfabc5b8707b62e6d245f+67108864 64ea34ea306f87277008613ffc46e4f7+67108864 de302fcd65dafb52be9e0488631b0de1+67108864 feb4e1556edf524f8020b14e432fb73d+67108864 f7cd9b1aa21cede7078a6dde4a9a6d91+67108864 061f4e1dc36630d27d4ce11239aba66f+44772532 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:117550693231:PG0002577-DNA.bam 117550703475:8805224:PG0002577-DNA.bam.bai 117559508699:936266194:PG0002577-DNA.bam.fa.gz 118495774893:465904647:PG0002577-DNA.bam.tdf
+./PG0002577-DNA-jlake-germline/Assembly/genome/bam/realigned e3d7a51e4c0ead03e3e3896342f471e0+67108864 42ba06408eb1234fae80cec40e58427c+67108864 1af5af879cc2eb0a208852e0211e7625+67108864 30a07562440868426251ca573f2d9ec1+67108864 e9e0de7a6665939596b778ac72043926+67108864 e0ef7fc66317baaf856a3f9145731d98+67108864 b2ca4708a9098cdafcb7416705031238+67108864 1bbbafc913e7312f709f2cbe02037bef+67108864 1d4e3218c9bf91ee9b6de69f0e5559fa+67108864 8a089574e5a9a8ce2882b06bab082314+67108864 7efed976b32544fa933c38b1c810521a+67108864 cbf8af44acde3ecc1cca2bd98777832f+67108864 19d0c9bccdf28f7cae5dd51cb6f3883b+67108864 3220af4613dd7dde52fbf72208dedf65+67108864 41447742354119644391fa6e8ea27a92+67108864 1b003e733ed594760ede9079ab5f0997+67108864 5283d84d614113b2732c931d01f47ddd+67108864 db29a5faa61e4301c6bf71012ad1d4d5+67108864 ac0387a2bc339dca235c1f0eb1d6e3d9+24178596 0:1226675356:PG0002577-DNA.realigned.bam 1226675356:5462792:PG0002577-DNA.realigned.bam.bai
+./PG0002577-DNA-jlake-germline/Assembly/stats a3d360c03f346f830fe8c247fb6b6969+37754 0:17936:Reads.idx 17936:17936:Reads.idx.bu 35872:1249:coverage.summary.txt 37121:633:dupCount.summary.txt
+./PG0002577-DNA-jlake-germline/Docs 5b614c0ac2f1b7be407433860ee2a553+5625667 0:277564:1_IGS_Deliverable_Layout_gVCF.pdf 277564:304833:2_gVCF_Conventions_CASAVA_Release.pdf 582397:332651:3_Illumina_Annotation_Pipeline_Documentation.pdf 915048:3765366:S1_CASAVA_UG_15011196B.PDF 4680414:903872:S2_CASAVA_QRG_15011197B.PDF 5584286:41381:S3_bam2fastq.pdf
+./PG0002577-DNA-jlake-germline/Genotyping c73ad23d5e2b04a552ba4172a5efb1af+67108864 6bfe5a7cb20aceadc14486a51eaa81b8+67108864 6c7822f824a85de9c6e5a2e1e222777c+4019621 0:138237349:FinalReport_HumanOmni2.5-8v1_PG0002577.txt
+./PG0002577-DNA-jlake-germline/IGV 3ab66c4b117c21a43e1b3e921d32cf49+15934904 0:2898:.igv_session.xml 2898:2898:.mac_igv_session.xml 5796:2526:GID_session.xml 8322:171972:batik-codec.jar 180294:202:igv.bat 180496:43:igv.command 180539:15725768:igv.jar 15906307:42:igv.sh 15906349:1150:illumina.ico 15907499:26167:license_LGPL-2.1.txt 15933666:1238:readme.txt
+./PG0002577-DNA-jlake-germline/Variations c8be77db5bd5c21d538743aca8977c14+67108864 c7fe6cd59645d31b4de811ff627ef4b5+67108864 014529a882403f6d7f425015980848c6+67108864 dd7715f352e78bf2da032fa1ce2b1377+67108864 7967b6e51b1331f996358898955a0baa+67108864 f7491669a7747705bf63780078a8da60+67108864 ae300af114c7a1adbf251db77ef188b8+67108864 a5d6beb9c88ba5abf7003d34ff44bbe2+67108864 f1cc90f7192ffd09197c429d57400f07+67108864 13051c9b1052878f8a49992355c48860+67108864 eb964b9fc0515b5f6248fcc83a6af015+67108864 30bef0e73685998399da830c54ac64b4+67108864 0b3c289822fda2d7837bdc140ef18b62+67108864 ec49a958f1ec1bc94fd40b99d3baa0f9+67108864 39bf6e6bb4bbac8e98e46eee0751cb56+67108864 f6d35577522c66c11d96b0bce2359b87+67108864 3ce124d6ce052da22b79aa82277879a9+67108864 a6aa4f7b73c9bebc05b9f2d71a118dcb+67108864 de2132d64ee96ca4fa5c8f0bcf29529a+67108864 2305a45a0a9a219b0b992612f1954808+67108864 2eb2714435221611c91295d4b4036cc9+3233741 0:1209008791:PG0002577-DNA.genome.block.anno.vcf.gz 1209008791:3563086:PG0002577-DNA.genome.block.anno.vcf.gz.tbi 1212571877:131236358:PG0002577-DNA.snps.vcf.gz 1343808235:1602786:PG0002577-DNA.snps.vcf.gz.tbi
+./PG0002578-DNA-jlake-tumor 70ff27bf1044858b30152546a861370d+1848377 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:1834705:PG0002578-DNA.pdf 1844949:76:PersonalGenome_Mac.command 1845025:704:PersonalGenome_Windows.bat 1845729:2648:md5sum.txt
+./PG0002578-DNA-jlake-tumor/Assembly/conf d6de8e335f14bb99df74e40d37649876+159230 0:61440:dirs.tar 61440:11017:project.conf 72457:61440:project.dirs.tar 133897:25333:run.conf.xml
+./PG0002578-DNA-jlake-tumor/Assembly/genome/bam b27667638ac1b82166eb4158a276203d+67108864 58eeb2e10cd0859bb96886a205bd4aaa+67108864 667e5de545718d41403ace0a1e52572b+67108864 1c5a59fcef82d558aea7f8402652c97f+67108864 f89963d6b7d2dda6c5859ec38fbd39ac+67108864 8446e283f77664263ba79f6c3d84b965+67108864 d3b974ed13fd0e3f4daa88611e9cf97a+67108864 cd6f8cd41438efa36f836963cabc14ee+67108864 dc4c3d32095842b4542cdbf678f8638e+67108864 e4163f2f75001d911c55291c6557fee9+67108864 a152baede8c988a2b79edc26d92859e5+67108864 203df769bf775c7a903427b47cd50058+67108864 2e1fd6d98da402f7057d9e42f31a6855+67108864 15694d0809b5cb0b9b62e278030fb9ba+67108864 c5659c15bf0a35bdd7c9ce9521caaf17+67108864 4b331f97efb322bab7823f60dc33cbaa+67108864 ef5df5e9be8cebe67752c1ec7b699360+67108864 195e68b05f0175fe26abfe17a52133c4+67108864 d1df9508f48008dd0897e986d5e0a095+67108864 cb9bb9701109b42927d21dc0c441c106+67108864 3c544db7889c09ad04a683ab65dbf905+67108864 b3059eeaa6c4be575ef6442d7d686044+67108864 6f7d0657e6dcacb680e2c18c7ea969f4+67108864 fd19754ef54a96294fa0be3c235190d3+67108864 d403cf9deb5d300471cf491270728769+67108864 c11e3a5138e7e73a8d462a817284a578+67108864 31a4eb0282b6a20b1db03ab537f27d90+67108864 d2925bfcd512c54f774b28371321e80a+67108864 b10985d7e9eb98adc7bcd47fd51d74c7+67108864 cd6611da61290e6320c85aa2e64b573c+67108864 790643150532ca3b87fc87b398463bda+67108864 d73db36a115a9d62d3dc624a68263299+67108864 428b2cb5bf196058426fa5cfb0ab1f55+67108864 0882d387d810173e5466f1659f8ac150+67108864 c5602579109a9527c299cf23ff8e90c0+67108864 1e51d370499018e399e9da092061c13c+67108864 e49696b1edd093737fc543f594f55f6e+67108864 19b700f5e309bf97a4bfe5cfbea39768+67108864 bf0f28dc79bce5d2c5f1562558f27447+67108864 4be9d66602af08697c4a1bfc3c31841f+67108864 b6ab46ebedea1f2232120c1d390bf39c+67108864 ce12df5c0931d07646ba92f9c97ddbf5+67108864 d4afb665643b3b448998e68bbc9a91a1+67108864 92ebdbd5ffe74cfc842bd6ae44d11bd5+67108864 57faf1813558e0a6c25dc13bea541fe9+67108864 20851e6a10cfeda7062427e52035dd8c+67108864 25a70e81b78029f0be4fd632f57aa7be+67108864 419283b83cbd3202499bd4cefb0a1cdb+67108864 1a4a73a0778b404a2436684ef85a97bd+67108864 917f420880916197600f18d403b8173d+67108864 bd83a677cfc43cb03a6abfbc30b43d8a+67108864 f76cf33b00cddbd5e19d18bbf0fbaa2e+67108864 8d580e6c30d80cedf72fd3b60fca4a64+67108864 ba25a6f251eeb30321f807c9429f51c9+67108864 d003d7b82f5778e576a4a6367b5117d0+67108864 f37d3c41b2fdba068359b21666c4c81a+67108864 ae93098d87574a5f0b86413e8cf5bf40+67108864 7c1d4fc584f7adc3b778852776eb00f9+67108864 e1db975356914650ba3e29d13cd125ef+67108864 853f89dad1549ae9b0ba44899d4ccc8e+67108864 8a182ad3a2444ae6d6da7f6f7f4df167+67108864 a473f2377653cf34cc216528c883a12d+67108864 f0614b5c5954fefc1b2e6e2c4eeb5114+67108864 095fab3d4eb93eaf3662e3d307bdca46+67108864 cc5ef7122b2566b67fc5576a8f5f1f21+67108864 5e495a56cc1dd12d2349a71a066d392b+67108864 ac7834fe596d03c10adf8b689f6f151b+67108864 ee09ce2a05fb11431d2682631b702e26+67108864 e8893b94fdcafbee3257c27255512899+67108864 ef566dc982a25f7723106d859971ae15+67108864 bef22febb5bb8d15b8760795bcc70dd6+67108864 736d1b506a8e928e1911c58eeb5de21b+67108864 8e21577e579ed20b42f0feea41d8bade+67108864 51ae0fbd5bec7e4cf8ffef8a32c1ce40+67108864 66c69c19a031d37c5ce3cc256dfc702d+67108864 503d43e6803675b8387234ee64564ac9+67108864 a29454d0d616e51caa2b352a968d7dd9+67108864 f8c68ced290518c6e754b0e2c45c64f2+67108864 1bbcedb7c16ee806a82bd8b4a80ce339+67108864 7742487b63e051606b7f15765025e69f+67108864 ce33f57c6e8405d27c611991f55aea65+67108864 97e7ac624da63ef827ab9622ca1f5013+67108864 25ff51e2abedd04c71e7337e49ddd2cc+67108864 d92bcb904e10595be0dc35838bfd01b3+67108864 b5107a1d8064062dfc20cf915aac6691+67108864 1a35c6f5f345d085c5b1ce13733b6c3a+67108864 aaf44b9e1927563d13da3e9667624594+67108864 eccece5552f7a4aaf887fcf1a9445fab+67108864 e842b55a948fa0b492b69fd2c029f3d1+67108864 67c57be4efda877f9b44f4d1a2069a1f+67108864 6ab8cdae41408d27e4da3682450177a7+67108864 7aa6e963e26b2b987ffcdf223d42be5b+67108864 999b6b6ce0fcea94a4261395eff99038+67108864 acf4418cf27bd8ab5e40e2d6004ee9dc+67108864 ae3f390d34b4a92c23053c44a39f0980+67108864 b2df89979a416c6703222503a8722b7d+67108864 325a9362025c52c3a93daf3a411ff54e+67108864 b6b885513511313a706768655b770470+67108864 7924347c70e23e71392ac3be1a8dd202+67108864 81b852cfa07cdedbc467204835f346ec+67108864 c14a909737d44c5ddbb31d8bbc2301aa+67108864 8eaaa6ccd27b0433a16379fbd46a46b1+67108864 7b515b1799a09df1c53ba8a42afaaa44+67108864 da78adc26bc2739f4778ec04cf4eb545+67108864 f89e9a83a12f6c20c99e2a8128b24480+67108864 32cba62bd7e4e0d6c669059d9e9e8187+67108864 9c98cde5507452e6e66a92a47f0b1177+67108864 3ec80b900ddc0aa5351ed295c2519506+67108864 3e440a25f00805e345904f712459d05e+67108864 c6d5b9c92034e7a3ae676db0f9440410+67108864 a459a2ef3c7a523d03641d4eb5b19b9a+67108864 38af85e2bfdd36f39e871d8ec77e99f1+67108864 3ef6789bfbe65b73ea91aabaf2814e51+67108864 71d11439333ded47932b26a0c1291ee6+67108864 ace062e48a19df1816c912cf14ec73e3+67108864 96c0bd7cd898216e1de2de81ba431fe5+67108864 b7b0bb102bb512b191d57dbac648a9ac+67108864 1beb107a743ddb783f1df684a770d3b3+67108864 dbb7ccff44d8a1fc3e2c2f8b56feef69+67108864 376856dc7222a5d06fba46edb8d421af+67108864 936cde3ac43bcc6fabc0dcccbd8c0bb7+67108864 fa0064d0ba4b197ea1e043b39214ecf8+67108864 faff642c6445bbff6766b2173caf52d8+67108864 329226588c99b28febbfafcd465a0506+67108864 b1053c66901f8c9ababa599a215f5865+67108864 52a1e0723389874dbe26cda89a7ba074+67108864 651fc0d719d0ecf654fb68fcead40fa7+67108864 11a5aae83b1855f665d1fb649ca77551+67108864 49fe9f4b9374b20cb104ce019597458c+67108864 ee63c03171c5ae71845f9ec36b5d7810+67108864 dd0eaced2856c38ca14f4b4e7cd1283a+67108864 031b5092c185f12485b6fb60f12f1bd9+67108864 52624c84bd220136bd295b728fb39ee7+67108864 619ce6edd5b950b97b0144fd8cc0d846+67108864 2a326f8467fcc97e00feb58a2d282e60+67108864 afe28a3567c3d8e22357e54767a66284+67108864 43ddb4ef932cd779efa7803a8c682ff3+67108864 d714bd4ea327fdc41bd030ba0dacfcb7+67108864 cd0b60019265606af1630fda58d7fe76+67108864 245bf3c257e85ecc61c94b21f199c3ba+67108864 cd57ef218d5dab7230df2ed458400f4a+67108864 05c0056dc807fae33dec61b84c21e72e+67108864 a5dbee91918ee2fe8ca60b3935a36f66+67108864 1763cd31e4ce10548f0cfefcdd389b27+67108864 f27c37a2d77439dfb851a797df640be1+67108864 5c2afc1352ac67ca338a2bea4daacb9b+67108864 26b65a719765253a2248f1cbce10ae42+67108864 ccd5dc52fb8f92f467e99bba643e962b+67108864 d6f46c28ec827a571d03d44c1b3f0be6+67108864 835810e6e8acfe67f9cc53f12058f3f5+67108864 fc04ac6ac1bc8f85696cbf34d47e067c+67108864 180ef12c3f768a9347368a3d68f83a76+67108864 6e2cd549271844576032f654ac3cade1+67108864 b0063ee8160312761b2d95ecbe61add1+67108864 5601decb3b13f98a1246f4841254d949+67108864 c2ebf8433eaa439e8a7101e8ce735b30+67108864 acb6486284c8ac5ddc3a5cabe10f6ec2+67108864 f2d534128b117a03e6a1b528fd7739bf+67108864 3aebf3617fcf25db123b20011a96e9d6+67108864 dcef10edb820661ba7e708c61789d9ef+67108864 115ec18ea7aa397236ab039332c60ea5+67108864 8fe654a590ccc870c5aefcd848768480+67108864 d343ec7f640ce4a8b1e9bb49d9c16f09+67108864 3a24e80c488cee7f138a7c9357236997+67108864 3f42b92c722326b05303aac018ccf3dd+67108864 b91bc0cc81c1a2886034555e0de77cb9+67108864 b04373680126dcd17847013f0a1cdc5e+67108864 771305894ea402b6275fb079baf1e176+67108864 83b902682a699d2e509f6148be44cc81+67108864 2f2dba45cbc0d0a3b039bbb15e642806+67108864 28c35daa91968426f743e02c1df09ecb+67108864 ae9c053be0004a45a04bd7c684816eeb+67108864 b779e49cfe032ea858f04a174d5f7915+67108864 1cb18ea4dfffb211cf2e1b3b89684cd0+67108864 700a140d8be71a3a4111efa7676ce751+67108864 0e39fc71ea3aef669e46273e879a9cb5+67108864 eb075d86d17bbe570daee899e531a0d6+67108864 1209317403b766b79db6e691aa51d807+67108864 06511508a5f7af30d2b1a25355709ddd+67108864 23415542c3528187dcaa667ab27e910a+67108864 b21c149732221ddbb710146b6e0dc641+67108864 d6cb43cea2c25237ff24c957ed305c4d+67108864 130240a70807018d8353a49ecd9a114a+67108864 02ee615b4caabd6b65be38fecde69e36+67108864 a44590e6dbeee583ec4e2e556fb4ebc1+67108864 6690720a79e990e60bb53330f2381b60+67108864 938b829c29b361dd9df3345183ad8a2e+67108864 c0979fc234519c48ff714b533b0ac84f+67108864 2925fd6e9204dd1e120fa0b1f81b3d79+67108864 824dacbcbab1e6e4bb3bf19f0998bd07+67108864 31f33dd52e63b4d1915be40521805ac7+67108864 75721f7a0f736b2e89c3660dcdebd669+67108864 8b5ec2f053d27d497a02bbd4430fd680+67108864 ef45bf06c76c11c1e018508ddfbea33e+67108864 46ec11a6412019631b342baf9bd8cb1b+67108864 a936e7b58f6453dea8eeb240f78436f7+67108864 cdc0790f7b0e0fb948d2fd72eadcc1b4+67108864 e4e68a5f78554687704ed9a6e02c587e+67108864 92be6241e5980eeed5c52922e5fb506e+67108864 cbadb73e5d13adc78f1cf5128cf2f071+67108864 a7d0e1b8cfdd33fb776e040a6d6c95b3+67108864 22c7264616a56cb9051f42d601d59e6c+67108864 47518cfef61f5870fc537240738134cc+67108864 57d873bc31e939b64fbde571a81807b4+67108864 e39e5c68e7527446b4569cc06d4173c1+67108864 445a9a6abb15410f43093da9ef20d14a+67108864 c03b8fa6fcfe2a4952410468e684052b+67108864 fee487e4e6e506f97fd349b827a973a2+67108864 2b13ead8d886deaf68c310893dace3d6+67108864 ad5e842788cd9cacaf6380cce4666e4b+67108864 818508d6056786e1705ae6b9c5135752+67108864 53f0b9bc1be7cb1b7a329c22f2ac4b92+67108864 61e1042f92891deb1a3ca198ed66111d+67108864 be5addb257cfa18b00ca9b2e394cb289+67108864 b527844cfeee11e254b667885581f483+67108864 67b373734db9d69ec644c1c7db2bf5f0+67108864 8b23c1262fd32884dcfee4a801af3e5d+67108864 000bbb9236cc0ceb98913aa92fff5e99+67108864 7fc2ffc8c1b6009d078595b74a62cb85+67108864 5de77a68570a727b13666ef7aca9366a+67108864 54bb5594fb272171844bd33cb42ad9b1+67108864 590f8787314bf797bbbcb2787f613835+67108864 fde9b2191cbf07223459ad210a5c428f+67108864 503e56d97295d6e0a12045c0e12002ee+67108864 03a5f9b183dc9569721ed453644ddc18+67108864 2be6fc27fc31381276862d2abe1ac5dd+67108864 5947b0d663c0a52c05875ec5ce61bd84+67108864 4ef80e9e4dae16a5aae51a9501c616ec+67108864 153df94cd79dbb2dc40303907ee33b80+67108864 09104bb5a00744a104ef3400b21f20f0+67108864 2c124e205a229a9e99c2b304be89294c+67108864 854ba0a363dabefa28e8d360250b2ba7+67108864 95ff4f4b90489ddbc14aa33c7c867e06+67108864 f9373ff3a5039c6a5d2257ebef59dc3a+67108864 7f1abc805d8f6324c34b912af0ed0493+67108864 86ed37c6ba43db6e1b73df095d2767a5+67108864 09b0a46851e882230861ffc4177b4ed2+67108864 1092b56cc1a2172b7084a0d99ec6d0b7+67108864 f70c26b2a62dd5a914208db67d341064+67108864 830ea49cee6d701200b3c08b39be989b+67108864 4255d52ed73e6e02f8bd4c431831adca+67108864 b43115fe604e7741234d9d816a29e89b+67108864 e5ac0b97870c3cadc72b85097df78157+67108864 db9645ed90daada987e1d19ff8ec56ae+67108864 ad29235faaf4367473e2af9fda3d5080+67108864 191eb8c2cd7957b63474c53121e79e70+67108864 8503282417733089b18da8530ccedbb7+67108864 5e0889c82ed4c2a16d34c7ae9b87a5c2+67108864 d45d6abe6f8dee6cd021619e017fd63b+67108864 2892b58d187bcab6e8878b849f43445d+67108864 dc541d6a0af76dfb86a6770bab90e408+67108864 d1d42ce6c4d0499eb502f2b413b84e8f+67108864 b14d93850fc3eeca8d6daab0b577a408+67108864 593d932d2839b798fe3dbdbbf8c3a7ea+67108864 f384a78c5361f354cff3d6abbc08de67+67108864 6cf3d9317a0c58fcaa96a06edbb4c34d+67108864 c2ac341f1ffe67c5f729e0f3318ed272+67108864 08b4299b0bb7afbe68cf2af3aebe1a58+67108864 7dacdb347bcc696d50683b5947de2941+67108864 bf115a0e40b82478001cc3337d05f4a5+67108864 9772c379171a80eaf9167a5229eadb18+67108864 b2999f960a78a00649e7775949e7348f+67108864 3d99cb6768c58bc98dc55785440cebd0+67108864 b41f747b1d5f7428faead1391f9aaca4+67108864 1287df2ffb45e0415a9a368d52f8d508+67108864 7d6e7553dfe6ba49963519eb068713af+67108864 14a8e108d5fdfd7eef34851c695bcba7+67108864 8eb7f420eb13a536f0392adb709885ef+67108864 559e61efc2da01fdb0986679bb1dbc37+67108864 2cec25bda3ad1d892fbee59af1163685+67108864 315616c4097c448b9cd9945744cf6cc7+67108864 6c7fcb4dae7790a43a4cc22204bb6a84+67108864 ed2551b5c8662c3a57dcc0c712e4fcc7+67108864 15b54667bae409ea2d760f93ae26bbd4+67108864 f95a0fa90b01a5ce5e6ade63c331305e+67108864 1cdd6995b63579938be9aec26195d995+67108864 23215d1a26ac2696e27f03418ce138a5+67108864 7b611d9a0f689a1544df8132347a56f6+67108864 8dd426c07fe5fb724dcba9c2a15b2931+67108864 2ce2ba723ce6ebe056066a234bac46ba+67108864 c4e8594a8b7ce23b3da167825c8de7d1+67108864 4e7e54bc389c33c968c46f1a124610e3+67108864 f4f9aceea1e08862d46dd717d8b4f8a8+67108864 a2aa43ceba29057bc66cdb9a361fb419+67108864 5e886dc968de49b8e6abe6d151d53fd0+67108864 99d00caf3c14fd72f776c383c90ab631+67108864 08544e774a050e67f03e202514d1ac42+67108864 b9d6faa9b8f2811ccb451d2910a6a589+67108864 f977340bde7d733ba35fc998963fa8b4+67108864 135ac1eee41a279e3d000a146adb1fce+67108864 b30fa80fdd9ff4691d796fdac74f0093+67108864 a41417fec9fee6f189d834402a9ebc8a+67108864 4ab7b6f850183e8d1af3dc0f94a1c83f+67108864 6c1635db222313655d1a61cd9264299e+67108864 348a8d7f292e600ee53dc9c0cf48358d+67108864 2c2101c9815e64d46a38a5c24720b233+67108864 be8acf03fe71d9d43c3cf59e77ef3f92+67108864 a82c39cf815336c4d230429c9e53ea30+67108864 7c3c4b4dc62efd8c2f3597e2ca1b4c4b+67108864 15f7175e532a484f4be80d6f7e15b808+67108864 d44d3e0dfa084fc5e888e1503fb2d28f+67108864 3fbea3c0a4c7876ce9358de20795b751+67108864 735b8c3b26ea942e166321f4e0bc5127+67108864 286e2637c88ccf5187dde287a4ca9990+67108864 bbf981a586017a3a8c323d9d0fdf1a43+67108864 5a809f908b75792c8589eb2b23a9b8c3+67108864 d6e897e6dd3ee464553d3c835cafae94+67108864 6ee1aef6e62e295bc7dd79e2ea6e0ccf+67108864 f3baa399c5e8bb8b3db494b0107e5af1+67108864 99e401e71cd20430f26df9cabd414658+67108864 f1ba18255163e40e1a7738d5107fa214+67108864 9be7a2b18f11ada848d5586e353cc2ff+67108864 4f453ae5dfe7744f1f355c8c360cb7e9+67108864 70348d7e74ba8dc70519172645d98b51+67108864 1149a22ebe1be33590c5d531a974e79c+67108864 379397789b87b9c0e44d46dd9a7b0927+67108864 0bc9781da20de560b00f9492bf9d26b1+67108864 965ef8040f95c779b567a3c2c9319e7e+67108864 d86b8775c719b63188321560cca2b076+67108864 6ce9dfda8310aefd648aedf750c23900+67108864 68b06472825519dfb5f743d759708c8a+67108864 9617ea0542a1ebf7496efd4477bb67bc+67108864 6335e90273f14ca152e88a986ec40935+67108864 35f1d34a24d889cd1d86b10b1b233c42+67108864 49ef382f65fe1ec46cb2a8a4f8a960cf+67108864 a3f0fec2599be5ecd19b360b203e5876+67108864 743dc90628f546541daf0cf89710fdde+67108864 d167ec2708343b4b13fba5c2f12bf9b0+67108864 bd0bb42bd6a7bf267b00e81479ce3de0+67108864 c92c45f21773d3fa66afe741fc40c79b+67108864 b31bcb75bf11f2a25a7829cf0d8050bc+67108864 63b72753cfd53c2cd9ab94bcb5902969+67108864 3ba309561299c6547c52ee967fb315df+67108864 aab873f87359a7dfc85b0fe51f01b5a9+67108864 a150926f927cc3e60d45b8b18f3f57a3+67108864 f195bee4c6bfba67434e6204ba7c082f+67108864 96c83f753f9b3ff62b1d097d7ab5161d+67108864 85ae48853801003271b8b7a113e3c439+67108864 7279992bc5df537682a25710e83cb6e0+67108864 9799fdb7db707cebfc2fb23279d1328b+67108864 28b4a1cb9b7d06db17482190b37eb014+67108864 bc8887be40fda9cec1cd4e405dee6784+67108864 67b3c19c0cfc1f897d082fc32558c0a2+67108864 66b5d0f197a5d622cc8a46ebfb91c831+67108864 aaa8c95f2e065adb74a630951644b69a+67108864 1df2ba81a4082700248ed31b49a78d3c+67108864 c641804541401992e665b6d71b08676e+67108864 43a751e4dd67528826b1c8b48a208899+67108864 12e4cfcbdac91438cae64b2cd7c9780f+67108864 0d3aae2988c4714fa91537613c5e616b+67108864 2f95606f37db2656ced4b009d6905ca0+67108864 a3053b1d8868bcdd108d9bbdf8987667+67108864 f4c6f20e3e6f4e9fbd11325b33c81f28+67108864 079ae4f891032ca44ccad706c3210fc5+67108864 aeb92132131d06d085946802bc63379f+67108864 0c51a101a9015707f28be913c549ea2d+67108864 a3adbdde43e893554b73304849163d31+67108864 1124b485aa2be68ec06067db378caadf+67108864 105a90949ef8a77743739cf6912ddecf+67108864 2d6eb744f7babcbd30e17f709cefc92d+67108864 c6e8581198ad5ba878697e95b4c55dc7+67108864 6633b9c706e53b649cc5d0eb5d7474bf+67108864 a0a8055971c9c2698af9cae2dcfc1de5+67108864 7afa27ead5036742aff637977742bce2+67108864 2a4689ec1a63cf5ccb4ae3754ad143a3+67108864 c50953205baa8180b24c33b3430e1895+67108864 43fa314ee5b342bc7aed72517cdff282+67108864 a3df0148323a0004acfe8387c67cc7d5+67108864 1c29892044dc4854651c73296d0e0c41+67108864 9d75a69bb257970ffaeb4ffbdb890728+67108864 b1b22a2463ee4106c480505247037743+67108864 7072621df84141b650e222049e3c3d4c+67108864 638e4160832bf12ac9eed8dc2d1aa57d+67108864 b88afc09886457810ba20ed4bc9725a3+67108864 45bce56d25f3612a8fa4d15adad97539+67108864 5421b2945c952a99b69ef6a6a70cf453+67108864 1f6f2dfb6224182059688f5d46e61ff9+67108864 359ac6d0f79d5e29b465fa12547d45f9+67108864 7b7c538226d537326ae3c8a334494425+67108864 86cffb84184aab85f109d155f45a6289+67108864 1913f3176b9441fa5adb2b5feb112a36+67108864 6020e46282904da6b914d6a37756c9ec+67108864 e7fb9206f7d4eafb469b3619a37f64cf+67108864 5c4eb1a7bc507b0c4fd9147bd6fe5b3d+67108864 39511f0f76cd426f1a8b55563a393090+67108864 228589eaa12f741ae3d63964e552ceb9+67108864 e71fd43848d544eea1440595225383bc+67108864 5d9d7af54c2b43e70331e74346de7be6+67108864 11f889041c34528b4f32b717fcef37f4+67108864 41f9c5578d0b11e7d4e96d951407a495+67108864 3164503cb8eed5eda0f929e89b4ba41f+67108864 e8232889c0ebeb8b95efda8a0be0fed5+67108864 f831bb088817b20fc665cb152ffcb7f7+67108864 c317567be4a749a38cee367bcb4650be+67108864 535ff483eea4aae8db6693c6fcbf4c96+67108864 4c3667b429aeab2cbd1c3764808beadc+67108864 f2d7e4f9220a5b144ab935441ee5a4c4+67108864 46376eaaac4261635cc8d4c421ad989c+67108864 8ad8c6ba6994354296efe3b271c83d68+67108864 f3ccadadfcc75bb032d33af3f218136d+67108864 fb7f377672c6ab9a6d7ee640de0708da+67108864 455e5b21427a1e0c1166a77888e3db84+67108864 56c1bd2a46e9c510c0f9df14a5e35005+67108864 729f094efa399f560639698946c924a5+67108864 f3597659df39a5489bf09e9fee9e5f18+67108864 6eb0404e6575741c52f564ed127a9e58+67108864 15b2142e9955e2a7f2597942e891d800+67108864 24a2e5ad926d978f93f1a055bac2ae82+67108864 6da3be627ed06f3302422a3571912798+67108864 faf50ff859c11e516516a396fa90e370+67108864 f30c246988782a97f8fb045f8562cd66+67108864 3dcfac0f2b47e7b54dd42b7593c11b3c+67108864 8c0cca153ed7b0298c272a38659dcd35+67108864 24d100860a2522d686b11e841e6bb3fc+67108864 a0fa91ad649ba8bfbd39edc032164aa4+67108864 4873a584c9bb9b86df2dda047977223e+67108864 71af7580293bbe70550235da201bc5b7+67108864 95246229f010e909205f973c336ef913+67108864 760718000bfa43f87faced458fef8a81+67108864 b2bad6b04dc56bf6d486a389c3a89f69+67108864 f131bbb77c16d5e8bb37241b38aed448+67108864 e93e069d21f6b5c65ad59f54e10954fa+67108864 8edd8526ceeae5a2f73898ea94a2637d+67108864 89998b502934966809d3c3c5e9fcae09+67108864 2fce7d4eb38c05370d1695cb5fe2b283+67108864 19d601c0232c10ba5dbfd1ad65e26dd2+67108864 9c7b6580cb0a2b603d05117f559453c8+67108864 72726a8bee94ed66806cce11af1e2371+67108864 2b5db38f1eab185a91777d57ee86d2c6+67108864 ec6db4e314d2fcd9740bb0fb6a05204b+67108864 94edf1746c8aa716099d6cb69c065a40+67108864 4836f61b8d29f86f2cc8b7f449587ae9+67108864 436d54ff31569849e9b3b861a8d7a2c2+67108864 7ab27b51efccbb501ec8b0524d862005+67108864 0c23eb2a65db550177f8a03356b7e726+67108864 fae904ea4bded822adba6a676481440a+67108864 07f9c8f81344f0cd051bde9c3bb20398+67108864 57f5fabdd15e3deefe7e8670c015ab49+67108864 27812f4f1bf76cac4467e94bc517e2b0+67108864 eeb1e8dc2b9efb3ebab313e848ba415d+67108864 f2827412ad0b5ca07628a44c28fce28d+67108864 27127550ebc9c8be6cd30f08c20d3e77+67108864 827a8e19e46b575e698a03f2f159929a+67108864 d6da49faa10324de4f345e0ba7eb963f+67108864 48ee9edb8e9be2824651378cd71bcf60+67108864 d42fd051025373f0b395f9ee413e133b+67108864 8f72b8ed9aacb48b4d008fba4da38055+67108864 4c382522d1fb5e5e70a7641b11cbbc5e+67108864 dc40ff7e5dc3419dbf06ce5d89db77b6+67108864 b178799b404d1a0f2405c45f853bb87e+67108864 aad7711a0c588e389e1d898837c77975+67108864 dc92a289df1ee20c2a50c5cb34b9ee9f+67108864 c0b506a6540b190c8de760845d89a20d+67108864 95d57277bef8851c91053f8930557783+67108864 f1fbd42cb8a45b97fd3e1befbebbd141+67108864 343af058b01d8c9cb6deab3c7041be9e+67108864 ff56bd3a487164a446760ba4691ec93e+67108864 4ff72bbf0fbd4429341cd6c0633adfac+67108864 9b08953e5766d8cc5665c73f3066b9d4+67108864 c26eb39cffb14c5b29464552158f6f9e+67108864 2597fd02083990464f66d77950854dad+67108864 da1df4d16834f62ee21f8f414b63264a+67108864 9ee6fbfd042c057d1d700b51a3d6620b+67108864 1728c736a8598ede8333ef2359d3c9bd+67108864 a1a10371aa32905f2ec8ddbcba5fb065+67108864 ba2ad4e1729d11bdce0129d87f2a999b+67108864 c41113602add81f97ffdc93b5c3ee306+67108864 ee449a445b76a3f2590fe5ccc313cd55+67108864 7190f9a48c2b16ad0b93eccd0ea07e5f+67108864 d825ce686c183120f818411877161caf+67108864 8696a1e8f5542fef24a8e7a1b4eb667d+67108864 eac9cc77117da7eca4e691815d440570+67108864 4eb5252425e2342053e27721ef419033+67108864 bb6ecfd5c6c3404d9a4c1f44e75397cd+67108864 7dd9c32e758ac7a2a6adbcce675ea9b8+67108864 711f558f7ccf54b12910e7f4646d3ed4+67108864 7867a303bc4095bcde227661553682af+67108864 9370f355a3193b46bdecdafa6cbb9c8a+67108864 016b4584b44296de38f1d102937c59d5+67108864 07261c25f8ca01b3a384f590673a3380+67108864 e6994e3638f2e12ed52a104c14d88b1d+67108864 16e57ee081f408f8dc74e909efb63879+67108864 235866ac3b7d83e27bf8f4a6bdb56c02+67108864 25efb32940871da66f4ea5465334825d+67108864 f79d26bb161e83bd2a26b8c35b1d42b4+67108864 2783ea5a5721654486aaa5efbc83fb74+67108864 461881362fa60250adae60d3c49dc131+67108864 f4b68164f31c286249f1e5d2d7237c35+67108864 659eac0ad8a3e7e597458709bb795680+67108864 034ebd61fd1a2c7b8a283fffbdbb6ca4+67108864 50d93209850a3d6bbfe509ef813ecafe+67108864 42a97fd6584b9ca3cbc7dcb74cd405a3+67108864 3d0aa7902822df7f39feae96b5344f1a+67108864 f5dec5ba880a6d6392e4cb7605ffac0d+67108864 567809b2e7769647651efca42fef2e8d+67108864 6b12c06ff734e6e58b9eccf64bf1f911+67108864 7bc4c900e5d59443b0096866a80a700e+67108864 b8add59db3b248dafa75beb2700136cc+67108864 6ce79cc7c998349ff12c651f464b4d42+67108864 6cbe5e5e6b19de4a0b715e0d845d5719+67108864 e59a47b51498f2ce7f276a9a0d63fa66+67108864 4f5ba032de58d21a0cbd20ff0c5ca2cb+67108864 9d5fdbdf3cf4cc4fe2c32e5678417bcb+67108864 606d871a7812d8d61717bb0d6de1d3bb+67108864 a7f59b847684061ae94b5e0d9bf70cb0+67108864 7704eacf857396f10a46a5141067f26a+67108864 bee69de8466c8b7ed1d8729658a40027+67108864 3c3c55ff9c9cd05f1fa4104140fcc901+67108864 4456b7aef7afd6689d3570b66ef0182e+67108864 50ba9fd74ef8d07e23dd96b20e1c1067+67108864 ff2c79e2bcae5c9204cd64cdf372d3b1+67108864 2ce25d963ba37dd5b24c2df65dd1295f+67108864 20b81f528f70c9e5f920e7534afc2bb2+67108864 a0b0e42685869944d3768dc337580428+67108864 fb0d0bc80635bda7b81030998c5bdec3+67108864 bfe86f22ef97ee20388699c8d4e824fc+67108864 5041aa0dc3a9b4b86f5aca145314477d+67108864 68e608344390a442be79c53395223e36+67108864 86bce8f646770b66dff7cf43751b77fc+67108864 de0b67d42b4b6a7825a3bef956737e5f+67108864 4b88b81edeb09740420ec66d79249c6a+67108864 755132bfea966ea29dec0b5703174ff2+67108864 c80f48e88aae10238b1c28647c7ed739+67108864 e001ea8aa95caab94f82df29fdf27e4f+67108864 8fccfa89d4af86d60e9f97af9936ecf2+67108864 bb01dafd921f67f6cd859d329eaa5463+67108864 931a95676fd8ba998400abeb98ad293a+67108864 3e0b9148a86dc17ee1ed7b5d44f394bd+67108864 63b29d2a9bee8cf79d63ce337feee7a9+67108864 bdc851ade507ac37df5fc3d3d869a996+67108864 fe054d07167887c82d439c841ae352c1+67108864 0107f8ff057ec92321c25060cd4d865d+67108864 ea6bb18766a302faa044cc2357ad1f3e+67108864 487fcc9b756d31eeb5ca6c2006bebf91+67108864 233f7ea448da931b0385a63aa0b3c1a3+67108864 fe1d37c64f7e48c69e4565428bc78857+67108864 4990ba572fc9afa7f325052b51d0a47a+67108864 423266abfb6cd3571e8e66c2b30d972c+67108864 8a83c116090deefd716b983e63ecd5af+67108864 8e9b6bb307f89902d414223e9cbd7d5d+67108864 e18c8fd868d41face78005a8b255bd3b+67108864 4a60796fad1c71dc21c23a74ee658421+67108864 2d27fff66b21eb557946c09e9b930deb+67108864 524dd7eb012fd8b3b3f64b31f43115ce+67108864 512765c1f1685768a0d8bb47ae5225fe+67108864 1e11f5b0124b427d1bf462e7632296b4+67108864 41faf74a022e8b3a583cee0410be3c32+67108864 0b4a883486509fa81c8c19bcc51532e2+67108864 f708c01017b33cdc5f4d373da7fc90ff+67108864 602de9b6714a2d9b55b81670256bdfb3+67108864 87346a79929afc2fb5029dac7a2279ae+67108864 e1251b9b403eba79863fc82fdfbd943b+67108864 189117239220e41f0d02e8d7620a04a6+67108864 736705421734ba02be110da2808cfa81+67108864 295f181993c67d894145c2c071b4301a+67108864 ff84adf713707bb22c1eee95e4e27dce+67108864 d3c2aa31c1d8d790741e828df484fe17+67108864 7aa19cd3f187fa5b7dd623c2c09e4236+67108864 779595e6c757024f5fb2fef69d9b048b+67108864 eb3fc60541f45f8ed8983af5c2bf3b95+67108864 b8fe527fc513cd3f60237c6aa87b0573+67108864 1c9d80f46710684355577738e5a07a61+67108864 98a2d14f30435bd37e68e18fd13879a5+67108864 dc464b8e7cc454900bd1af2f73167ed1+67108864 3fc4b913ade156db548d8db91e03389c+67108864 677c1763fe8b931e01b5d76c66119051+67108864 33bb1e8bc7548970a4b3f7055fed6cec+67108864 43857b257e194d9a0ba6b68e16d81dbc+67108864 4662626ec479033f2c67f00c10f4f3cc+67108864 82b053bffc39afd1835da5593026c5ed+67108864 f090aa4a714e943af26fbcabfc5a8be8+67108864 6a655ff89d9a9e1f034837de7291fc2c+67108864 f74ae1321c93c6d3d0ac64cee951518f+67108864 d4c8f4b673100f15076763b856e6261e+67108864 fc516957b488877fd52fb5723aaf1d9f+67108864 6729a81638a2518956a3eb5a6a18fd91+67108864 f13eb8762ad4935d6cdf6c872a5afc0e+67108864 8f0d2b097917b4f052e5f9533a55fe58+67108864 fdc3ecfd8eb73392e1fe1aa8edc916bd+67108864 337387bd132aaa183ed5843f272a49f4+67108864 6a9db804b6f2087c186fbc8ff77c79f0+67108864 5fe678e3a9e49f4d8debcc56a4d04dc8+67108864 89f2ad15d704ded3af481430d26c00f3+67108864 f803716b4143c361087ab2a034e9abe6+67108864 4edf94e4dd6c5c6bfb72da385c2b981e+67108864 c5f51291f77b31cf93fff9e294ac9659+67108864 d938cf9124ca3f39eff966a98668b3cb+67108864 aff6467b5d36539c8c6444107f08e959+67108864 da8a0ad7cd02d4f78df8ef8c8b61f8f7+67108864 9d33faec81bf171b5dae27fb380dfd8f+67108864 e4093751a7136d8d17ccb5034aced419+67108864 94fc879d3bcb98f726b2cfe018f3be3b+67108864 474ab1d4763a090f4a361d660544d8a6+67108864 8af4c1580f8c0bed92d3b860238a37f9+67108864 63b45b164b25c4457f672ea2095bb234+67108864 3ebf649e0b24f32487778caa41f24442+67108864 9719bd418db412587e6d66c75c3a6205+67108864 346d35f73e586a9e5e2573b336f194a4+67108864 efc845c053c037d89d9bcc059a025db6+67108864 186c5aa2db96fc6b9393d7d675a11e7f+67108864 631b5eced91f74bbfc206cf5856e812e+67108864 916386facb14f03f9f3251ca7b07476c+67108864 12844c228114136ab850dd20e11ace4e+67108864 ed3255e55123b2ef21f4a7d2f2bdd644+67108864 0e31381b92d55f99ced79c14281ab52a+67108864 fed7f83c927dc4e5b2ba63e21b1736b5+67108864 22ab11bc0606f0d041e34f4c1139a209+67108864 20bbbc8375b015e4a89df74a1566ef47+67108864 7d7752212e5226ae570e03c9b2700fdc+67108864 cd22f0b787d25927062cbee33bbe3371+67108864 40baadfd1426510073508dc0628f0794+67108864 181e4bf30a8f77da23c8959e1be70fe8+67108864 622a02d725eb682b81dda117da700ed1+67108864 8d9f0c71149e9c1199a951d452ea0d02+67108864 1814f3c34ea4c42495a79205816f169d+67108864 4f4298fbaf1dcc46ed7c3f410a065d45+67108864 960e592059027311a32d1c1e97bd7260+67108864 de9ffdb227add673b59b10bc19796360+67108864 2736cf83c1bdfbbfa742472b5af5e651+67108864 1269c0373263126cf938de481aa9b897+67108864 fe3fbc87e7a23f0160a69c0ed626082d+67108864 e276c8357f973f36b45f51f0c71d5aed+67108864 1fc4414ab833f2b0cb43c5c067aeb39a+67108864 652af913139e3e10555bdfc56ae16b9a+67108864 c3119097a1400ec0897abf917ef8a108+67108864 69a8272c1d1f14078e1d550b611bc985+67108864 23675ce9f0120b2d89289e253e92ba8f+67108864 f959c9a7b0272c49ed1f084208694276+67108864 b7e9bebb22287722cd96cadcc4eea2fe+67108864 15acb83d077045ffc1b8458a48a6398c+67108864 c4f0640ca93090b7a06df55c66110547+67108864 275e73ff5e9266d5d07fd4c9f0b6256c+67108864 2532625da1f8a0a444ef53dd0d196db2+67108864 7f05e653e318b82ff410b83e6bb0b556+67108864 d6e133ca3f00f63184ce80a53fec3107+67108864 b8daa3bb4165f81d53aae24183eb6608+67108864 0a8d711d220316af33b0ec0c1818f403+67108864 f9c34cdb6b97d2c5dd58430d324e7f05+67108864 ddcc459d76f637cec850257ab5fd711f+67108864 a532803027b7545065855b827ca760ba+67108864 c5109e5fbf248ae1145b45ff2e137a86+67108864 088bba73713732e628ed664b45a80878+67108864 720af32dd389f70a969d6e75c8442d05+67108864 fdb232e6af0bd7d28e2b5b07c62e1aa5+67108864 7a50af59afbed238c6bb61dc3ac22675+67108864 e3831c594aff61c21fbdca24228c70f5+67108864 b64ee908abe5dc9b59db0e928833f81e+67108864 7cd627c229001bfe2a2771412125ad9c+67108864 2eb73d895bbaa6e6f704712c5dfca62f+67108864 5fbe4f7adcb52ef8afe2770c3f444d57+67108864 cb00a9a59fe9adae4c19c55d0f67cb23+67108864 21309b59a02bf3956b350c33f5ce016c+67108864 fdc90f4032fbf373b66fd1a1d16cda0a+67108864 1fb14dbb013e05b7df0ec7275dac673e+67108864 a287d100a50408ba94ad3c1956445e44+67108864 bbf5b0e3c7d1f855d43c1b3ae5713cc7+67108864 66364beb6181a393943f61726e06a63f+67108864 71a199035090172c2c48dfca851fb77b+67108864 c85f3d46426e932c3ae5f20929f1d412+67108864 413053233729159c37ca20b2d325729e+67108864 16ea95b5143fac3d4894db0f6761bc07+67108864 65e3ea0a4c2192ba1b3dde341beb8b8e+67108864 9be1589739885d435b8208ec3a5608e1+67108864 19a2d0b16817c6e11cbfa36f0fa17cc5+67108864 e5b38b84741d4def69309c9c66532c92+67108864 1cef5fb113965f16ff73f0d74ac5f6f2+67108864 57a91e408fc820b8cfec6d2733f5b573+67108864 bbadd120a6bce9d182009445d90bf403+67108864 d498136e6a6b6821df6241c945245a72+67108864 a3a7f8dbc36741da81350f2fdc6f33c6+67108864 0459e23c9c447e5d6cf4a733a04de961+67108864 2dda2d49a05aa1fbc58c3ab040281bfa+67108864 30b7695c33688f894a83f3cce0b67da7+67108864 8689b86276aa67441da2602d13484a16+67108864 a77f3e13c2744bd912ecbc94ad5aade4+67108864 10da7c2337eb393cfd600368862ba662+67108864 ce7e2fde61772e393ed0d7c31e855be3+67108864 096b521bab8c247f74789d070e771807+67108864 1019c2b793990d6aaf7d81bbb2176ee9+67108864 54ed19ddec9c9c553f59ebab3a7d5dbf+67108864 094fdf5c30c3d805c38e769d5ee928cb+67108864 1d7ebd59cc5d0cc9400d6f2fb9e29594+67108864 d5cab35aba572e070cd870a19c0beec1+67108864 e418e25ea0e2d1dda947ff716d916529+67108864 a568b248c881c740a444dabe4a8cba1b+67108864 25688b79ead2563dbaa5d21e512c1aa7+67108864 4fa44475f1dfb2fab6dfa8347f43abd5+67108864 c37860c180fafb91feb9187acd355c2d+67108864 585dfedd5202b1084484ee85dbaa5ed1+67108864 ac00a4785238f69fb5005977d36d02a0+67108864 a97c8edd59cea65c679a80966ca03482+67108864 2b1207bbb44508f0738dfc60881c2e99+67108864 f0f698122fcff73e939f6b6bf22f9cf9+67108864 792a5130188e5504e23bb19dcd748fb5+67108864 8a50dd1f26086f2eba93a7b6e7035b11+67108864 7fba73104b7d27b8ca512746161aa95c+67108864 52f549dd074baf3fef610d2a81e8d1e2+67108864 e8ba8562c268fcd5b2edf5bdad76d319+67108864 6fae9fa89a358ce5c80c339e6b576d78+67108864 48f1f2fc7d52618658bb8f652fddf9a9+67108864 5c6a4bdc74ef2581d3fbf391a8d587ae+67108864 411afdc89fc1435d7ecd1569e474d6a8+67108864 a546bb78c4913f26cc4b1c8ab91ad277+67108864 fa45ef65dc70d14160f1efaa0a8e2687+67108864 481dfbeb675e469846f0f69cb7d36d73+67108864 1ceb17f51950b94673950f088842f33d+67108864 d1e4769eca2b7eb98a84a87cf6803cf2+67108864 f13d8deca0cbccb51fcd1a4ab40ab0da+67108864 20873c5e390a5552511f8ed98e2d49de+67108864 767eadf955a415988e7e1f83c74f7a9b+67108864 85562d1acff2915f0ff6d45ad87fdef1+67108864 12547b8f30465624e8883e25e870892a+67108864 f2dd149abc688678a5132be30af317b6+67108864 a5190593f6608e3747e929105fd601c8+67108864 101835f2b47c37fa0740525295019838+67108864 107c3a97bcc74c54b565f025068ee9c9+67108864 bd04bae6fa6e5a8fbfa9dccd2e467952+67108864 cf907435f4f5c0446fe211049e211984+67108864 16d183f23f46c6cb4e492c2a496901d1+67108864 c831967b607bcc15f73a8334ea4f1501+67108864 edee1bbe2a9f0126e45526d9bb421861+67108864 011ffbc947b2aeca6a75939ef16e4b1b+67108864 68d847961bed44f8d89bb26f1bd0af35+67108864 b6d56c74fe7d25dcf0c28df056090306+67108864 24da1fd3c8d4b91e3ae907a25d156a54+67108864 4a61fdabde6a07f7e5b2a4cdcc26d013+67108864 b861eeaa168860e759d5c8f24712f7ee+67108864 a266e80c9a70f7d4937384aaeb5ee360+67108864 f8a5c5d06023f4bf75eeaa1eb5bb3374+67108864 f228202d9f58ccdc24c1dca57fa5d77d+67108864 4e61e6dba9ff5a6a4051d48b907be4ba+67108864 2be5bf4cee45ab1c4400a680df60f81b+67108864 dd66f99ff5054cbc8cf54d3c8fefc5e6+67108864 dff0cfbd2ff138d4f9fea60a96fb2df6+67108864 500ca6e6c15d85070900b6fdf3979b76+67108864 6f36698535293c13016b1abd0ff46139+67108864 e9dcd509836c212d2f6d8274891c3c41+67108864 70d0934888c6c587addcc3927bad38a2+67108864 0c50fe89bb20e3e9efffd1a0cab6d084+67108864 d4e349a2a674497462202d061932508a+67108864 6bc71e5e68fc5f83370173073ab4ae1e+67108864 a00a2da32f91785cf75e6482e59892e7+67108864 397510c84c36e9c234e4c280b06862ab+67108864 e0c00e4cab678098571417c9fda1cf87+67108864 124092481a8bcc63fd71fcc8cfa84795+67108864 0e87753f045eeb278f2b257fa53ab510+67108864 8168fbb0188889addbb3e9b68d3b3593+67108864 f6a005c9723b4ddf4cd5bc1811279fc6+67108864 beca840b56f1ee8eab55366750fde36a+67108864 944d4724fdf35af10976624297ff3343+67108864 d82f71afcdf80d43ac70d43d18d039cd+67108864 7068a4db9fbe9a06e59350f634fd8855+67108864 bda72fa7ed6c0f07f149deae16120901+67108864 b134a0ad3152051241bb63ddb0ffa0ee+67108864 afd1344cd6247583b2e08b42a5b495c0+67108864 6d71dcd93c01a8bdedb1a384cc1b47d9+67108864 525f86b84d3f9602f1308d2c82206019+67108864 3c5edb580b5d56b8fc92b49681e22db6+67108864 8fb6f50d1873a1974b7c5947ad8ffd5b+67108864 fd510dc5ad3116a7b4424a405c9e0333+67108864 a5e463f2679cb6f4bab936e0e372b8ad+67108864 7fa2c65ea82c0bac249684bbe4ee3b33+67108864 d3c23cb68b91c0d1852e133c82881eeb+67108864 0d23ac035da7e96d139f6b6724fe6d94+67108864 e4bdc1e7327eaa9457860179765b1a2a+67108864 3838edbbd807bbe5c75379e6f7ef53c2+67108864 ee09b2bb5466e520d98348f0b4e5a272+67108864 4f5015d61f01512653964ae3d58cf2a3+67108864 c05fd85f363942029b17fabca47b21fc+67108864 93ba1a2da4ef91a9460fd65895b867c4+67108864 d6430bc6ff93632f3c571c57ced084ba+67108864 a59c6864154616b9704d769f4437fe48+67108864 29f7d541b7fd363162189ff4be354ec3+67108864 142b095b33338c13fe6a0cba04ab8982+67108864 9e4fff3f8f5a272c4e55d79e899acfed+67108864 5d782433b0615d0c81f40d45b5ecf4ac+67108864 a1ce4f9cf8db339593655aaf432dc372+67108864 814f4ba561bd3459a7da97a7fca5f3a2+67108864 9a663484b49fd5a5465906760f4677b1+67108864 b4fe2e513982a4efb17bf442ae8abf30+67108864 36ab4fbd80eb1525f5f078c0bad27961+67108864 888389ff68d5b71d3a38d44d2a5f5321+67108864 af94eb7d687c5d973c3ef6826986a377+67108864 7ff861173bc6125ac2729f82fb4f76cf+67108864 f04b119f090e7dae55e982c94dfa5197+67108864 67b095174cd6d4d5a97c4c10926488c6+67108864 54963c6c726ae113457ac8628493bb36+67108864 94b063d6778371dbdf74c81ec6e00705+67108864 efd856b38a4127578ef3b30cf2f8111b+67108864 e9a78aaf8290e786d31b52e3d280f25b+67108864 acf39db50c314e81d77063e1363b0d44+67108864 f5ed13b763e78ee4d4ef45c754ddf2c6+67108864 f0a522930f01ccd16a21c4c836d81486+67108864 638503d44d5f70646c90cd9c1e5ddad4+67108864 32375ece74a773563332b435121f1f69+67108864 206a739575b9f0864abd4f01c57e7a6c+67108864 4b8ce29ba1e51c563b67abf72ba2559b+67108864 5ae2edd2acd049b2bb6d2bf398e0ea9b+67108864 f142e90252904e409bf460290d3ef7de+67108864 c168023fec9fc0d340f647a31b49d68a+67108864 d46018096ee511db7264de70de2de130+67108864 a4ec756f5ec69838074d7c51e3ce9366+67108864 657b7662f9fa7ce790eb6b6788bf4fdb+67108864 88b268dbcd8cfc68e322a6593a3f85a9+67108864 f6abb1603254a9c704c214208e45ecd8+67108864 53ba009b6a5ccd6e5001c099c97fc3e2+67108864 5304881bcd7ae73781a53a4fdb8f013c+67108864 fcf742e76f89d28953997eb339c4d2ae+67108864 e732f6fc4718c7ab9d6832641ed9850c+67108864 de4c892de00874bc9cd07918393ef6e2+67108864 9abadd8f9c1dcd464f17c45d65e6c4ab+67108864 7f1971a1fd92794421c07d50771b404a+67108864 349595e945b11955f64fd8899611d58e+67108864 d58073ab66e3f676a560748bf9898ba5+67108864 8f8b26f5d44408c19d0a3cab03ef67cc+67108864 38bc3e0c4925c47fef039917090f69a0+67108864 0ca5058c7c829ff871ca3868332b662c+67108864 5ae9155c4b1a25c37ea00d671324c6c4+67108864 7028834108fafc13240ffe2021cf7027+67108864 f2e194ea64bc8a0dbd5a86f085f1e934+67108864 298bffc3352973f28307d993a8973c1d+67108864 5e43691e50baacabf4a3c295649f2b2e+67108864 727520dafcd59f806f52ffaeaff02bc1+67108864 706789ae77dc03640546c8e83e129528+67108864 b5eeaf5cd6a94607ff7247eac7eb6455+67108864 7f8b151b8ffe117beea3b1934f9f419b+67108864 bab5b9d0cfb54f33e79227643fbf0b65+67108864 a59d082d7207bef94b4991215b134a10+67108864 785056e6a4426076b734e608cae86737+67108864 47117a5988bb128528b528e4d541eb1a+67108864 1b7085a666ecc988bd37eccf3e93c4ed+67108864 bfcbe4d5f5d7e4678f198d60996f13b9+67108864 3f4f1303a528c16d0f930da14a16b230+67108864 9e9140f33f9ee9b5fc3907b58eb051c8+67108864 4fcb1c3af4d85fcf0ac1095e14a42bd4+67108864 a6be2e79bff044c13ce21c2aa9c95e08+67108864 9e03214d420e916b21e2324a432dbbe6+67108864 1f0b58dc5d2b1497cd83fb9b1a2176f1+67108864 44c835860f76c96f9e0837e5bae8dd54+67108864 f9d33a3817dc95252c3385a9b620de96+67108864 c9fb1942eb1ca6f2df777a537bd717f4+67108864 e56f8e279cf628f2575b9f0b8df32df6+67108864 febf0b9a281b32a2a20b9611264f93e9+67108864 e6d5ad53ed0acd1ff102f22fe495a7ac+67108864 75435e66e302aba8c7da88b547a2517d+67108864 3bae3dca82efdf59e588345138b08498+67108864 e2e2c0f5e15558e7dce55f20fb52e254+67108864 f4c5394e402d38d8d4c5725ba7eef920+67108864 2580fe3b582a0b4c17a4ff4bb9ebd673+67108864 ed5992cc4a9cf4daabd5cfcbfeb4b896+67108864 488d4c76a978e3265bdddfcde78fcc82+67108864 04fe93c825d53638d60d38953b372557+67108864 ba6e44ea4f79f61f907e36253c0bf958+67108864 41213b819b4bfff9f17581950193ebe0+67108864 269b6e49b5593ef54b3d49032e4edfd4+67108864 e267c0d9870a736ac13c5a2a01cb23fe+67108864 804a37500b781cfa66441090c323c677+67108864 33195f727565738c7f2e4a70c6920d7c+67108864 e7c94488ea6b2408b8a81d404bab5408+67108864 48b336d69d1188f5ef6523151afee6f3+67108864 6cad6bf178aeb3f6279911a1153b0bce+67108864 9a674b1c710c52a4994a0670610cbf4c+67108864 f6a38265f9473e02057ac4be6bee7170+67108864 833b1f028e71ab42dbecc4d183da4b07+67108864 6839ceaa235277adae36d5587640df0f+67108864 c4835d24adb63f08ca4311469d82587f+67108864 040e2f87dedfc549528ce3de53688dda+67108864 2984c84d1d32b7f52ae49bd838b8c016+67108864 33fce73dee1e203326ffff8fece4816a+67108864 f59f8f10cfa7548df73184ff86acd661+67108864 fa82b6721c15118eee3ee95b41ebc19f+67108864 7b6db4c134dcc09f5a851d81d6fe2fcd+67108864 178ba3074b954c0aca5910dfa78e341c+67108864 f44afa76eca4fac8b8bfaca8b14401fb+67108864 90d717e686fc0cfecc8b92bff5e3dc3a+67108864 4c497db5abb4e6c08762c66b51be2864+67108864 3f5f220f3749bec9664f2a5c22714f0c+67108864 36165d8271554ff80e9ad204d6f41c86+67108864 0cde3c478fe6d14dfc7238f017af453d+67108864 d22407672d47ebf442725a0365a4f9f1+67108864 9ee04e14feb275234673409d2efa97ee+67108864 1431699e15fa4cef7dd54e6ed69d9cd5+67108864 a91b69896d9cfe2be169f4dd1e585b24+67108864 8c35d02a5b3961f7717828239b49d21d+67108864 c4d11536015b05bf84926e0cef77dcbe+67108864 b14f3f000f174c9b63de76d6b7f7e2c3+67108864 2732612254c0b8fec0c533017bf946bb+67108864 87b760ef4b34ac9d5ecf0c32a2d07aea+67108864 7566c2a133f0f8c5dd31d49a6ba8d5f5+67108864 28de06e61335558872188129073eca90+67108864 384ec6e77e4581804ef5ebed60c49806+67108864 460f0d01ee80f6936a36202c52d1367a+67108864 f475c729d470dba96ad5a355223fa3f8+67108864 75951867b767478687a0ed04031005dd+67108864 988e9032fc26f255d5e65f896f97c386+67108864 c72d6fe3a565167215c4b8725c71d20c+67108864 03f2a74dc6d301e7eb1440e7af0e8aab+67108864 0cad7f4c65c691932c7490ec31f32a2c+67108864 b86eaab28570a18618fcc5551589535a+67108864 5c2e755aa09e7eb494fe2f380cfc18bb+67108864 7b0314633fd0276ba4a7cfb18faf2a63+67108864 805596626907ffea2d90c29d1a8bd183+67108864 bbe42cc13151e29901f2ff81cd16ac10+67108864 96f514038c76fbc50b3325ce0a50e260+67108864 73ec6c69f79cb39a8c6813d6f27082db+67108864 9ab13bcf77c6874f6843b2ce9bb18cb0+67108864 4570482366c5811d199f47a2e92ec727+67108864 dc1c567a96316d293feb2fae90a7b8ee+67108864 7b2bfabe105b5c19e70bc18cf7c62320+67108864 513593f9a7b1b1465c8e20a4daaa8e2f+67108864 6442521c08fd3b7b6772e71404c09d9d+67108864 601eec17c302eef232cdc707534a6c61+67108864 2a7822d5f641502312296d507301ebca+67108864 0805d7819623d6af391ea9e072ce9bc4+67108864 152c643d2ccad0674c38a23368a4a1f9+67108864 b1a4f45eba2a13984d9b095ab5a16e73+67108864 9cf64a5302f8b9c21c01dc5f068bdcd0+67108864 0e82e1e3f2f1390be5ca4560b18bd96e+67108864 6213cb25efa2a9bb524be9a4f273355b+67108864 a04a71578d23bf26517ae21d101f9ce7+67108864 c875cbbd27c30d3e2be026fd11b28011+67108864 1375d33fc6949a826a321ba2c2d21d05+67108864 aec41b74054aac99980f2047d462757d+67108864 e672ff1a260ac97238a25a95747a346e+67108864 1cf87c401a5084c740466f8a724132b0+67108864 771c370c42eb026842a56f533af4b8e5+67108864 5139242530c8feb094f7effef56e7305+67108864 1d4bd13959bb21b28b48363fd367fbbf+67108864 e920e35f2fb67d9b34b012b95d8e2c89+67108864 8facf133a1ded0c098fa41c953f63b89+67108864 6c32393c950f9062a02ca4c13bd858ae+67108864 6b2e28a4fd767ef70b56589f3d2f29f5+67108864 f2b0bc69c431be32a87fc6e1c097e456+67108864 adf88e147e6089df2e1f44472d4523fe+67108864 74aded23685105b0a360e006946a403a+67108864 62b19d7d4c3342d653f780bdeba26e90+67108864 0ec5b45cd3a722baf10c683c3be0f1e9+67108864 9da6829a21b6b75cef6b5c6f17f5e07b+67108864 826ffa05850817239f4fe38d60c01ea5+67108864 adb1cfacdf09d7041a55433f32d32c4f+67108864 29e8f3c27f97a7ee5f00a15196000f29+67108864 7a5753093a96c622e7bc5bb762bf65c4+67108864 1307b7902955bc0588261c48d684dafe+67108864 88d27c9d828382625103e1b1b75ef6e5+67108864 7345011181afb92c1990ed7656a0eb9e+67108864 22ab24fe279e19a3773a70f42a4b37d2+67108864 27446b06a710072e7439388ef138749d+67108864 27353af04e039140bf5560bfb12b49f0+67108864 2cf867ed85d88a901025580c0d42c163+67108864 a2cc7481a86b8577cd9b1c36d81f9c54+67108864 63fc71a9df52da41f1879db40ae4683c+67108864 cba5ff71d3b6c3499e8c9f9ef99484d7+67108864 621818664122058078b2669d0efb60d9+67108864 544590a66aeff05521be3c0c51009803+67108864 a56212f4d9e0072dd8b2c6eadf7b2d9a+67108864 2b19824bd9c1f9bac0c87163f671c56f+67108864 c039c43ad2190e1be0cf68376bef9e76+67108864 6c616beba20069803ac30b1be56aeae6+67108864 28c4d7d0e0f45cbc9bfac0bf72beca95+67108864 a3b74f9d7d729e0443b2acac559aebe2+67108864 67d7c31bb19b81d190cdf58c92a0762d+67108864 6ba1fa534fb5ebd4382d2f37ab9bc16c+67108864 5c8e4e4404b987be63f37942f38b4d17+67108864 b3fdd9a8d97720199e9fc8bdf5464ce8+67108864 9e41a0725995633c4199fb5c79705146+67108864 d6ed794aaa78d7e338c9b88980798d57+67108864 bec7ca93e898b711f8705ed27815bfed+67108864 73d9523c774903cce44f37b36e350b33+67108864 3bb892044528096fb01358fbcb5689d2+67108864 ba6a6d4afd22b52213df85f79a30d251+67108864 b1b5fddd4d4dfc8de7b6e9edd888f24e+67108864 ad1cfd8c575b1f4df0d9481bb25bfe10+67108864 eb42bc382bad6c31248953175c57fbc3+67108864 f7ff3fbcddfdeff776851bf7f0b91708+67108864 096d0a35baf62c6267a588ec20b0b295+67108864 213f9667d41a694c24edeb6723817425+67108864 a1a3102d7223910a50f343bfa9f567fd+67108864 cb20b98b9cfa88ab1cf0cde7e92a963c+67108864 cfd396b2c4fdb101d220385445bdcc6f+67108864 feec74cb51b17c68e03caeaf520a013d+67108864 3b0eb66cd74ca88137ca5f3ba474c653+67108864 4e564d3d903811897e5970f31e20cff2+67108864 4b811ae252ba5070d60c6dfe5f6374ed+67108864 139ab8e8f46ac3bf236c23de49925413+67108864 e8fe58e8624815a50badebfa4d6bec2a+67108864 3015e8601d6eb6e42e9cdbc64664f32b+67108864 d77de37630cf3560ad50d02352d0c514+67108864 abc01fbe76ef45bdf2835ef06e4541cc+67108864 4dfb8d4243e6049028dd1fe43fd2180d+67108864 3220ab3a86613f63e0e5d4bc5a02afde+67108864 fd77865c46669253284d781a553b8a57+67108864 851ca8a64dca3edf8eb645a0007aa9c1+67108864 16b554ca3194d8b58cd9be8df8b7a321+67108864 9d0b9cfe67898e7c9f58050a5231e4b0+67108864 757a90985b5b92b16c02642f7c36fd74+67108864 3857a258764c94340920f45da85541fd+67108864 40326fff73456df9b895e6ff137fdccc+67108864 63f1d9a514d48754916bd33722911176+67108864 4820a4a74d6d4be30f4cca1eea2fb507+67108864 636d058d10bf9791c3092a8443915c9a+67108864 916f4f6dd7635b871325c288fbde0475+67108864 e7b2a6379ba0d49e7017bae5d4e5469e+67108864 c17c35e312dba5bd13ffd23cc2c7d939+67108864 7d80a58afa6735d113ffee5228a87775+67108864 99ecaa8b85b99e85427fa76657681647+67108864 602ae0f4abd8be52e76e9749ab8d8766+67108864 546e66b992c13e1f42fb03ebbe21d20e+67108864 46be78022b380d3b1ad9a7d6d48d4ffd+67108864 632596cde5b56aa0ad4e5a364669bf30+67108864 df8e7efd77d29fc70afcdfb355bb7002+67108864 ea0b3c954abe83b54c8c2b82b4b29217+67108864 1b7ff88f8491f39efd6bf74caa1b8ca2+67108864 208469377ac08802354de7a56d216c99+67108864 b467a3def115f956b617a4b5c066bd38+67108864 1208f0cf2e1953ff2d218525a0e5267f+67108864 44d7cc36d50d20860983f5a0869248b5+67108864 ac5c5552be8df8b790863e15bb50f3d5+67108864 0eee3089f9e662f5593a5ebc27b00532+67108864 5debd7be66b6480762a64e42f03aba81+67108864 e6a2bf3e08fd8ae2532b5dfc07e61f4c+67108864 64cb0b5d826b614aef5d95f17da8303b+67108864 3df707ea75c92e036c1185f046d5a2a6+67108864 50a7f1474a09c58d74d64605bddea6ae+67108864 874d107937f315b0b8169346b0630074+67108864 8e67dea16e96a62bc9902954d98a0f66+67108864 8d97c8703fac17ff05d2493233a67db9+67108864 0b21830cbcf73b7742837500b4c815c4+67108864 f2d055d36a926d74b87235c598cb4bf8+67108864 8aaf0b8589579bdc8da894fe167df948+67108864 fb64252c12e1fe1b64762c7f2c476dbf+67108864 3e23aaebfe46253e265f1cdbe55e9ca7+67108864 3ec7f774ce71e3d5b05a1b428357879f+67108864 870c74fbba8c1c5ca855374e34227770+67108864 c847245ea26999c205496a2edadd1f0e+67108864 c9c01dbd3df4a60e85d49a09c7a78cd6+67108864 1c5409de5ac0dc6c56cbcadc39de2c88+67108864 73081aa5140b4be500081ff8097478ef+67108864 edd831590a9ea992557dfae2ced26ec3+67108864 45d83cbc0c27d34f54e8ab01dac78acd+67108864 29d0340872b053b07c17781af001d700+67108864 6c707bdab47d7f86926f3ca83d46a5b3+67108864 c83a0779a209c7b6d36008b3b5bcb094+67108864 022481f50f42cf63f4931425ce841e6c+67108864 3727c014ebdfa8fd044c11fa7e5fcdc4+67108864 2cc1df6a91333b4b9b1a1752d298d7f5+67108864 6455c34e8cb08007b5b538aa96335cd4+67108864 c611c113c44dbc38f2b7dc0e2c8e789e+67108864 72d91fa8c3fd9ca40fe9ce4abd6aef3d+67108864 269ba993ba237bf6fb38573d129e29be+67108864 c2647496d2d5e294a4f59d0b321e597c+67108864 8652c87dd888540694383e6c37fe61e6+67108864 ba173508a36e70695d67f82bd002ee75+67108864 6b24483f7cba1ee77e92965c96eee803+67108864 7dcc165fac7975c3e390ab595ea84215+67108864 34de3797f59cb379bd7bc54eb07c2159+67108864 2adc45907715d755fd4f422b841609ef+67108864 7a46f799c381c18cb2ff2812b5b845fa+67108864 1986cd180cd8e948cfff091ccbb656ad+67108864 622cd1910b7f81f0c7170da1a3362bca+67108864 3aa879b74c7dc6bb0858636638d2b5e5+67108864 0dbf410b5be606ff739a480dec8161ae+67108864 f4086a5dac293be83bd889b04b7fd312+67108864 1f63c6f777a762cbb96169b2278ada67+67108864 96b5c1defe59dabbe1a380a8bc6056d6+67108864 84b13f87fa6c3ab2d04eb93f3505c98d+67108864 dfdc2aae1c1f49fbc36b5ab9573cb64e+67108864 318d865d048de4b484291b88bf36af44+67108864 651188cc20eb0df1c8945c533fa2892b+67108864 a8256ef0104dad714359c01c8059499f+67108864 5534cf3170d3cca00e8926e811ed31a9+67108864 35097b20dae2e31750cebc3a4046ff7a+67108864 08146f4dbf346719da9351d806363c3a+67108864 dda60404102211f6f8958f1f044e2a4a+67108864 f0353b389211bb2f3787147643e0293e+67108864 42a778507d32f3d16563d23b17c836e3+67108864 770af5bcc4f42e26bfd94f11b2de1111+67108864 f1d6738a1979688177fb392dbc3ea8c8+67108864 40be76d3cc1e1f0700ae7bee5b084b88+67108864 8560b693afb6ec7ab75032819e1a0737+67108864 9ff467396156130788b0de8b3abbc835+67108864 fb81044f46ee376b4ea7067c0e9474ad+67108864 5aa3aa18a8eb9fb635b218b5ba85713c+67108864 4bf897273b900fc96efaec8b27266257+67108864 601cb544933cb27f9b8322d609c24379+67108864 0f845c76f2cebf936abd4a8acee42076+67108864 771a5507849272544dd28835aee1baa0+67108864 82ad48d9641075f08789f7e9dc4fba26+67108864 30e0fbcc1fd402d6d4ee077dbbed9985+67108864 9c3f4cff8a0292cabccfe492cc978c6f+67108864 6413966c9aa54f96e2426600936d72d3+67108864 cf903ebc0656b705c26d869d455405ee+67108864 0ed47f82adc7a5e5787715e6a43d9fcc+67108864 2a92d60161816494b4ccbe4d727e021f+67108864 6d4c65315c56f53ad4955b2cd402286d+67108864 3c6d8a7ccd14d9aa75d4ad2e74b7bb28+67108864 982dc1ddfea2915a10dde69ec06dfa76+67108864 03b9c546519fb29aaac3a4bee52414b1+67108864 7803ff4e38dfa625745189e39876c155+67108864 844ea317c0f0095e2f51182702c393f5+67108864 2ea4a37141cf743d79fd884b70f4de05+67108864 c7e2a60485c19ffd6ae4cd5b61b1cf48+67108864 791b84800ed2cc0869bf33b7d85cee77+67108864 6f2b1d1f71f26c1f5bb606034db357a7+67108864 ac185d4ac6b4a85d2320662aef29f060+67108864 2c30efc1fb13aa0043decfb8bc1d1463+67108864 b421b08e5745d876953098e2f2baacd4+67108864 307e458977fd32d61445babb4310030b+67108864 3a6d5736dcc1ef9db6c694dd36e6cfa8+67108864 7ee43e9000accce3462428668a696cde+67108864 a15a30673558f6070996b77e5f975285+67108864 d358ddad7d06cf521cf9d6657b51cec2+67108864 eac6f5c1a8e2cb363c89305c7bfdbd6b+67108864 e5a87afb73e69adc46d79f57b192b7da+67108864 bc7cf84f4d954131ac5e2df31b31144a+67108864 5a9e7bcc8bbf16f3d64545f77e446642+67108864 b1bca9126d855cff7680d960f7a67a45+67108864 15eefaf5212822bc3e0e06916681fe4f+67108864 2ec224ffa681bb8ebaf65b984fe5ac2d+67108864 c5a94822c9c89f4f6627d288747f9b66+67108864 52aaeee57527a75a3bbbe911100c0361+67108864 80fa3e2b96c7823d8e9979d67f41e98c+67108864 b361c3601325ae469be51356e5804d72+67108864 78a933d60f6ca62c1df56b9b11a19160+67108864 98c26a9f728291a50055330fa4880f93+67108864 4f5030e434da715a0e5e2b6f85f0fd46+67108864 06f2acf241fc5269f94d85533b7417fa+67108864 8e39b0432f784ab407735d0d9667db20+67108864 3d6d01636a230edc77fe2d2cfa8c822b+67108864 af54747b06874508412e92815d05fd54+67108864 5f9beb261ee7800d96cf0068707b1419+67108864 8f2cfef40aceb50cd10e37d3693d340e+67108864 e8871ce0905050da9fdd140f96219b34+67108864 1c907eabee3b1eb051aa5bd16ea8c40c+67108864 aa50715b07f837cc3f9e0047ea08ba4d+67108864 d4f89f368239b077b28902df2291ec7d+67108864 513c7bc368665fbbe551708bce375f54+67108864 b5ec7a608fdf46f63b2aace86b22ecbe+67108864 37180c46fdbb4da4ebf7fc85604cb6dc+67108864 5594f89127ce8cd5b5b65b15076d0017+67108864 85ea487408db514720f43e8e61013a9e+67108864 a58bf72af3f393a3bca9f78502f39f09+67108864 3d63ead9a1ca876fcf472a36e437d5d3+67108864 e437dcbb269983af98f515fdff1b8c7d+67108864 76e1b803f60d796a15457fd006290246+67108864 eb4473b76001e5c65ac412099965d200+67108864 18e85dad25031b80d870089342d3fb30+67108864 35a7541d53a64feb7eddeeef2535586f+67108864 c2fbf1f0f1fa1359db0899683894f33d+67108864 6794ee809f5e58111792320303f1a8b2+67108864 d848092b6abefd75c4af817805da1cfc+67108864 2f7471f842739e00be1bdee2c3af96de+67108864 33d3bf343d5703851cf571cc0169acf4+67108864 73d934696c7ef7d873c13a61c6e93610+67108864 eec4ab22ea8289a12fede9936d06c47f+67108864 f280a1f0d2afe437f560e2d37b3d6f98+67108864 d636a57ea0e9b1018424a2acaa6d6a6d+67108864 4025879d6bf913a95dd289efbbc1a289+67108864 d7a116cdc7c52a7e05658d3ed9e24124+67108864 e3221ca9702dc74d06f168840663f73f+67108864 4165ee4fd1dbdca4c8879aed8a2b47a8+67108864 81c0cbde0e9cb9b1b141b48ea1a308cd+67108864 bff9166a75515e5f10af5538dc303212+67108864 45b4ca425e17296da803a51e0fed70bf+67108864 60e983493191b0df26b65c8e7446e9f0+67108864 93590b8affca8daf37b17844b5bf9040+67108864 8621fb9f9c1023d9faaa6eb74fc54a4e+67108864 23275e63a4acd8c7af597fcb76df373e+67108864 f6c082b0018e2c7cb81b97dbc76d5276+67108864 c1de2367ebbb8f821924cc9829caf433+67108864 28d7fe0b01343515c98152458e7469ba+67108864 d30ecd76efca9a45fb6541bf982bb043+67108864 c19fc0b7c6131911ec86f395b32942a1+67108864 fee25c10fa170ef91d6716546c325503+67108864 ba852a7069532ec2b50d30a2b8680369+67108864 eb6f12e52f9742085af5b111263a8a31+67108864 a3c5a938fe579707f06bbe71a445f57f+67108864 103fef8dfcc65c29b0c4c348d6b7cd5c+67108864 891dce541cc4c9fc0cff89231b7081ad+67108864 63434747d0b4b7677fc028fc842a01f8+67108864 4debf4aae27725e624d95e8d8f092790+67108864 b8b34bd97e512895a509222db622ed34+67108864 37e2b5b9f31dc0ed14f45af72827b850+67108864 fd1093368c508a9ccb7faa058087080d+67108864 3ec967bb33e9b55fcf32b63986399481+67108864 608325c1df26b43099ff3f539eaa8534+67108864 b3bb50a8d7a2af13df953ccd64770037+67108864 85debc01db61ef231b17d5adac609899+67108864 8e088c719257d21ca6c9c14c437ef72c+67108864 fd1c96ccde6743a911fb60a4433fecb7+67108864 a8f426581b54a0591d2bded643fde0f0+67108864 19ceaaaacbd811496d178ab8be3d721a+67108864 603b2fcacaaa2903771c5c0a7be15c6a+67108864 ee44b1f92ff9204befa52460d6ac6dda+67108864 366c305dd4378452ab3e9c7ed7b75e5e+67108864 ee826c5bf9d9d25c12d74cca0cde7cde+67108864 cf9ac2e024028adaed51a54e6dc70cd6+67108864 eca814155c1024562187292140170356+67108864 ccfcc357515eb46b9fbadbe80e8ff20b+67108864 3699ec34d183a068422d10b40aae0631+67108864 8daacf3c17a7273ce86e75fc555ecf70+67108864 b23cdd04b9fa1e61b972c2caef1577a2+67108864 990eeff6f56cbb12ecd6df721cac3115+67108864 2de9439dbf73f13efcdad3bb1909d5d3+67108864 e2cb816e92cff445a13fa9754c2bd2d4+67108864 892fec0f10624ae2eab5e4a833a53e3c+67108864 838ae0ca9da00f5c2621af49ec562448+67108864 7278188b1c82dd51460dc4b8c4557cd5+67108864 3c9d0798f3827f8363ae677bafbe9a69+67108864 910df3804a999d1414eb4806bd3b91fe+67108864 5ec3deace8be01db561690bf2ad6fcd9+67108864 42245551b2225e0185aee98a3bbc96e5+67108864 024d1b37f61012e8b11bce86417c5b66+67108864 bf9d48c28504eae8ad723c1e723e6c1e+67108864 daae1d560067bf2c461707a8063fd284+67108864 f9663b2d732c9e954a7cb728978f15fa+67108864 fa95a4fdcbd2128496b72c4ad304065a+67108864 bede18218d8c3c019b3bd2c873b94543+67108864 5ff434025926d30ae45c2114dc816257+67108864 b2cde4f1fffd89abdeb9f585108ed9e7+67108864 d54e21fbd16a3e8c38055a1c10ff6c1a+67108864 d49e4c6e77ee68fe23d34a679c792535+67108864 c30788084c79acf5bdfbf2a44a6486c7+67108864 142696943bd0289004de141776a4ebf3+67108864 23253a95877919a9e15b9496d171a990+67108864 a9fc4555af8829d2720a2b308772ff19+67108864 4d97246ede727b2dfdb72568397a127b+67108864 55b1cc3eb96ef940a8872d2a828abf64+67108864 70fa925c5553dd08873444c7d0ef1b8e+67108864 76c3db8722b92364f03f87eeed8fe53d+67108864 43ee24bc7c125362797acd869881fdb2+67108864 6c28ceb03969b041c1c189068f12bab4+67108864 7078ef25b761e30d51aee9116c5c59fd+67108864 88cf2cb45ee2711c1477e290a6cda42b+67108864 6cbf85a79fdeb004196684e63e45de3f+67108864 0dee67d87e5628caad9c3cd19e8bb38c+67108864 5313f75103edb9ab1eef9d0d9e987609+67108864 3d8454608337215314df099fcde636d9+67108864 2864e53673cadb6e1ed4fed72ef6360d+67108864 ef77259b68a64e9db567fcedf9709612+67108864 8bd0df0b2999c64d2866152d868dd4a6+67108864 3853cdaf6af0a85b0cfec54cd0d2e7bf+67108864 26d95f4758780a9eadfb0fa4b9fa9e85+67108864 bfaba46d337bfc45ae3dff8a5eed0d2d+67108864 52098bd03d910f2947747f744a7e7cf6+67108864 4cbb3854d9975b3e3d3105257aba7ef3+67108864 e06499f3a15a3dd2699f6942c2c0620d+67108864 efc4c80437412a4d5b0baf65a3b5550a+67108864 c574766c117b409c08bd12d82229f8c1+67108864 52af621352517bc7b5f7a37bba1e9a7f+67108864 f2258eae9b00f0837441c0e9ca024308+67108864 e57077ada195464b45117e44ce43292f+67108864 50d87f4c461824c081eeea80f9405783+67108864 2b538fca02bbe2e7d32bd0b58921e82f+67108864 2db2d0e983e1e3507de4cd9976a31017+67108864 7fb8c5435dd3eb83ce94e88278123613+67108864 67e61af3278ebd337d0155843024df19+67108864 9b65d127cf41e54ca6eb215806c3c78a+67108864 76b9faf2c24bc448817c1ff94f7e0aad+67108864 c04f7e671684c411d736a4f217e8c285+67108864 c14a4e1dbef1112fe60bbc8515e0d6d6+67108864 851819a0f279d5b0958e5d718c5865f0+67108864 d1986a6720da6e1abe012637b5547d57+67108864 11b0f93f0f5373729978dbc17362ef97+67108864 8c1f042f1c7f465019182de726f02350+67108864 2a455b07fbb041db3cbf54ade540752e+67108864 647e3a6dc6b99ac5866d0d1c4e2e7352+67108864 7cafb10d4d750b6a70b6de5f064eeb0c+67108864 d6ae66882a0ca9bba70d847e66797896+67108864 0d01a0eee71ef4bee88f9453f632134b+67108864 ae2140209ba681364275a26e8eac217e+67108864 40b675b2b11e70de68407f0f70f1f03f+67108864 130d9d5d51e8cc5096ba667aa9364c97+67108864 380a783cf03f5a3cfeebef66aee5b28d+67108864 e56360b5cda00f91cc3ba4b51633fdb4+67108864 24d4b59098f119b5b9fc5818f16d6aab+67108864 0b593d049f80991ae59cd330123d42be+67108864 30826cd5f49acf23070a1ded8022808b+67108864 cff3def8b5cbd55e5a1679f9c8934923+67108864 2d4a11a94554b1040cf8904396b0680a+67108864 eb9a072fa8e648ff5ef10b288af5fb6c+67108864 8046144d1a921d88683205b893c9aebc+67108864 7730f957a9be36db7abd4962afb3054f+67108864 742a9f4fcde793b644c13fbb4ee3e975+67108864 a35832757441d24aa04836af07b4c3b5+67108864 0ba07a50ae988c3506b7fa07e509f5bb+67108864 b39743548f0a682c65f277764ff9b097+67108864 21098bc8efe18127c66082e6222bf7ba+67108864 6f21d4c116fdd221692ee09036c8e985+67108864 4f3e46fe795367a58ddb917439850237+67108864 3531ce816ce7ff198f668c7119d7e339+67108864 799778902558e13feecdb35008f64335+67108864 773a7ea46671db445dcff7cdb3641fa6+67108864 97e489ed1177fc6db932af81478b9a6d+67108864 987e799850a5a5027ec095af93e53250+67108864 3a531df05dbe8224cec6fec6f411baef+67108864 d2ee610b5ebc5e705a6029c74d86710a+67108864 672a73b64ae5009c06438b755549241a+67108864 1e7185a9cf5ba378826474bccc51370b+67108864 e489af9d0c3ffef982125ac3e016391e+67108864 5dc7dc12e9cf09150aa39798f16dda0c+67108864 764fae2a1a5c53bc9b7725b8d48e17a9+67108864 4fcbea54f6867700008205dee1a55ee1+67108864 2d7af75fae4b8fafc9198432a127d3e7+67108864 71c3f297de8c416025af0cde00cefa6d+67108864 cad383c3f7341c192398820fbe86e665+67108864 d3ee90d3f0e49c76fe7d5ded5fd8a13d+67108864 52b2a629ea9a32e9b3cd3bd153989b6f+67108864 267fdcb1dcfb1b721352462501444d53+67108864 283ac9aded198745e16d3e2172fe59f7+67108864 57025c826ecb06f0321ce3e0ed7b2e4b+67108864 7cbe3a01c92f13807b15ce870a160fec+67108864 de7f59f08ea07551bdf2eac2b5b346ae+67108864 e70e70e3dd37ec0e59e1109ac6f1b3b8+67108864 a64486d04c1bf082e27e323f3edbb6b2+67108864 3af2e24eddb3f079a43222f2c521a4b7+67108864 4a3ec05b28d6c7540660400f0b5e2132+67108864 d34ae3feefb83c1921cba43e1902a624+67108864 aa0b774639159abcdfd4e87cdbb22501+67108864 bb06b89f2760bae1e0205fa5030827af+67108864 e5f240b2ddec9bf3f9b685bcfe095dfb+67108864 120b67b713bdb4092424f299143ee075+67108864 f9ffd0f5696ed790a2cb0a3bfe7eac57+67108864 6c8d86f09658c53d0b0b30ee42482271+67108864 d9288b957174ccb7fba72fc4ea3d761c+67108864 ca4aa05a80f2a62000d32b9b3bec7e77+67108864 ce9809f906a6c05a0ab0fd87238d083b+67108864 ab0c00cda4e5e7e5ab426c40dff7b705+67108864 ec73f7e70dc8a41524c25bb3a48c34a7+67108864 1f073a660524c25e2613feb6f05eb72f+67108864 09542461e2d26915c2d6ad6d2ce37c22+67108864 2ae249fa2dadd829eb68f9706c6fdd5a+67108864 34a60d677721a4f6106aeda9486731c7+67108864 d5c3016a253edd4be3df2a2d9e205fa6+67108864 aefca039b38116199e66f3ca685ff711+67108864 a952a7a8e82db93aa5ece90dbfeb558c+67108864 df3e8d76a91b95534365ad17c7fd80ca+67108864 683acf860d1b972852697e75e82d4377+67108864 4b48aa46659ccc66948b1948920c8bb8+67108864 75921520ff8976da048e4f48b65713fc+67108864 b1f9b9541ca3df1ff6d644527853a56e+67108864 f482ed10f9bcc8839226c13374dc24fd+67108864 89f6a2018a94e0dc75efebbfb3298ea2+67108864 1116d391c6aa0168db01ae84e4a6974e+67108864 3638536201513086e8a6ab97f6b28013+67108864 e0ae416fbd2edbea670fd4ab33f0dd53+67108864 cab05291139f2f9695f80fd4f56eb9a7+67108864 2074dc1e5b43560a7f0dba097f798599+67108864 499e50cc59a0282465d7f5d7ab00ca3c+67108864 c8d5ea8ac6383e69951ae03b92f4ef6a+67108864 028684dd6cc229bc52bc421784696473+67108864 9a05421a96b3657e72306ac3cc91a14c+67108864 c28927241aa6dbad0408cad3384d4887+67108864 37e3154fa87dbfdfc1508a17d9a539d8+67108864 bd57b88617da02c1c547a534d4cea6ac+67108864 cced51fa202f8410d4b0c0cdbd839e43+67108864 b2a3991e08ed41183e4e3693b3de7871+67108864 6442a997170bbf2f64c2f423c3e58a02+67108864 afd77bcbb63d84d2a59a6ca20921187b+67108864 54eab8b24e4e9fa88256f15c302a77d7+67108864 d3affc2d9a34a998d7d189b9e5a7055d+67108864 c457931914fa11b139fb745f900bcc5f+67108864 51ad6a806b0eb57d4f9e43d713a8eb23+67108864 42a1e629c0fb3f26eabfd15560f97b32+67108864 f878ee0ae55ae5e0f57611ddcef39b1e+67108864 fcb4593f27accd8efdf42afe593a29a8+67108864 d61ab1521fec675ce644ec3426f49833+67108864 7844dcc6c4a53d645d8d6be11fdde7db+67108864 cab883dc96650743eea243f830ddfa99+67108864 e412e3496b8e288b0f84f3915f78682b+67108864 3c9f0bbfa22aea749ad1003759f90871+67108864 6476e0c1c60207f0c5f82179cbb1eb55+67108864 39803ea71680efa53d93ecf071da128a+67108864 746567896b262ea67fb15702772272ae+67108864 3ebba6eaf8d320520c1ae04c69c51c54+67108864 60ec5ed8a833afb4065e39dbf649ce3b+67108864 378ed3ee18558e18b02e9d864c740192+67108864 496ee9cfb30dae1cb7bb7d7be99b4f9d+67108864 32c5ca216b69a9024cd785f3c6f5727a+67108864 101aaf4e6cc6fea9372116cbc91295dc+67108864 26da7649915f412dc9e353ae77439e16+67108864 81f8dff12b4aeabe610806080b56cdf4+67108864 b305bf18afbd40bd57fdcbfe80a1f6b0+67108864 34dcac449fd85a37e468b5b747d860ec+67108864 dd7296825e900f1a5c48cc77887b6d26+67108864 778bec1f5aec275d7a0747fc2f1edb69+67108864 a1c96dd0819cf0eac95edd0ab5c05a58+67108864 2bd7d0ca76c8f4d0efda3935575cc45c+67108864 1aabfb4e8d4143022443335ff4326f1e+67108864 ab939e65a4d32b305261829f2abb8b2a+67108864 d336dba0e326ae1e24000c00c10a3e3e+67108864 91da82d2cc82f55b859e2233c76e60f3+67108864 5e8665e03aab339a9f7da3c529a2b612+67108864 d13a86b4688790e57a32e87bcc209ec4+67108864 05712bd5603728d3f9016791d46f18a0+67108864 0b9a1a50d053843bcaa8c2c92d2738d3+67108864 a155912e0e45b54521429e739096d7ed+67108864 dedad0d2a05e7f7786e90370e2f4aae7+67108864 bb3b9f9b905ea40b6aa7360ff26c251b+67108864 f1c1dbf5e0ca7bc398ace6d215eb1b0f+67108864 56ce951826892637bcf276bd7e234587+67108864 b1b20e8b062f0a22375754b6f3170876+67108864 942532b6c96f7b51859d39ed123b2509+67108864 c4ead5a64baa6a6fd16c6d9d639ab84c+67108864 d7d4b85f843e2f4a5576fc7d9b6f8c4a+67108864 7eff16af07bdf8470b0b19f843c22ec5+67108864 1dd070d54d64b07d27fb7625a94b47bb+67108864 b33c17ab40dc191636dbbff51c1b0aab+67108864 d8cdd7ca5fdc00505f405c9ae54947d3+67108864 a2a68c3d5ffa4d9660e1644fc784c595+67108864 6419d072f83937007be14ade52127892+67108864 a21e2c355fc15c5aed0ec8bbdd72ce57+67108864 5a39145dd181812527c28ef26515ec05+67108864 fce2f7b28fac75292ad68c9e158a13e5+67108864 ad8a8fcea664c1f0e73e68f1747a926c+67108864 6a0475795138c21b49a7f64a83830786+67108864 fdb58610320a6c1ac6b7a65621d0e1cf+67108864 7b00d44d90b7e2c303442017f4c90669+67108864 05d5186afda38885eb02558047b4de5a+67108864 1fd3dadea7b7325a1679a3bd64a0677b+67108864 02873b880b7c81807450431f38578adf+67108864 92f94a574cf8f897b0d5cdcf81a36757+67108864 ab23dc595b61727226997bbfc81d0ab5+67108864 d6641c1132d04737d040216305a9dcbc+67108864 0ef5d9a5a279b482fa6b97aa2039e423+67108864 aae63d13b1d9d8405d55c79f38a562d2+67108864 c676127582c2f5324575e7044e8e8e13+67108864 f4828a91ed7679c403f0711910adb8e9+67108864 9dc446c9842db3d526567b1b07a55c4d+67108864 c4192c0f0cb5a42c1bdb49530e3560bd+67108864 98cbdb65458086ad86164661fe3113fd+67108864 4d4c5ad721ba1ec22aa044fb5cec743c+67108864 7ea9aa9309936c10635dd5a5d1b986f3+67108864 7dc4a6d69b3ae0704a25fe6d0fd75272+67108864 f749b92411e4f602907a4c2e0853d914+67108864 89384497e0c6b5ae900d504e6f4e7348+67108864 01a97fdc4c69bb61d00bb0e0f175ee9c+67108864 f2920e0ad57f64b1c2eb48af5387321b+67108864 a8fa1e0e5aaafb37766bd135c818a3a4+67108864 7d7bec841a2c0c4214727cc83787733c+67108864 73d6603ce67a74b1b4d7346ce5442669+67108864 43e728dd53e31a4c6e24ae69e38d5482+67108864 427dd6b12aec18c38c5b8a8276160cf0+67108864 313574f4f22e7dc8b4d3f6d1b5594146+67108864 d0720964a956512960d5c147bf8dd280+67108864 8a35b2b23f9cf7fd81c6739a13c26add+36834981 0:95527142234:PG0002578-DNA.bam 95527142234:8761328:PG0002578-DNA.bam.bai 95535903562:936266194:PG0002578-DNA.bam.fa.gz 96472169756:469864841:PG0002578-DNA.bam.tdf
+./PG0002578-DNA-jlake-tumor/Assembly/genome/bam/realigned 39f862840d200c3dbc166ad29dea6807+67108864 34c3d868d8bd1134dbbde930b5a0161a+67108864 0c44e83c212416b1845cc0b292bafa9c+67108864 3f9bfd80e8eccd4fe3d26f3a74f3de35+67108864 9021a4781b43910f1952000e72100689+67108864 48083a918c1e46c64b9387a334c1faa2+67108864 1a3e290d82c15d770ab030a806d4166b+67108864 16aacdcffb8fb09724e517e76984463a+67108864 c4b69566ee2213c9da499588e27f9b37+67108864 77616a26e1711f0f0af2afe361f07559+67108864 23afc19d408c9e2b918eef3254899dff+67108864 ef820a07ec1446e36dcfb54f4b1bb4d9+67108864 85804989c1a609229f2100aca2a5164b+67108864 056c41fdc0e02e32e6a480ce2c401e78+67108864 d79df4b7582af576371a42cb17de617c+62074987 0:996151435:PG0002578-DNA.realigned.bam 996151435:5447648:PG0002578-DNA.realigned.bam.bai
+./PG0002578-DNA-jlake-tumor/Assembly/stats 6798533a7dfcc926ab0d03ca4b1fe9bb+30924 0:14525:Reads.idx 14525:14525:Reads.idx.bu 29050:1247:coverage.summary.txt 30297:627:dupCount.summary.txt
+./PG0002578-DNA-jlake-tumor/Docs 5b614c0ac2f1b7be407433860ee2a553+5625667 0:277564:1_IGS_Deliverable_Layout_gVCF.pdf 277564:304833:2_gVCF_Conventions_CASAVA_Release.pdf 582397:332651:3_Illumina_Annotation_Pipeline_Documentation.pdf 915048:3765366:S1_CASAVA_UG_15011196B.PDF 4680414:903872:S2_CASAVA_QRG_15011197B.PDF 5584286:41381:S3_bam2fastq.pdf
+./PG0002578-DNA-jlake-tumor/Genotyping 74a29b8642eafbee843c51f46fb526c0+67108864 c659bb8049be3ac13768227a5ac78228+67108864 388f348c50d59fc16bfb566f088e85aa+4019621 0:138237349:FinalReport_HumanOmni2.5-8v1_PG0002578.txt
+./PG0002578-DNA-jlake-tumor/IGV 1ae944d18638508fece1b0a8bfcf8c9e+15934904 0:2898:.igv_session.xml 2898:2898:.mac_igv_session.xml 5796:2526:GID_session.xml 8322:171972:batik-codec.jar 180294:202:igv.bat 180496:43:igv.command 180539:15725768:igv.jar 15906307:42:igv.sh 15906349:1150:illumina.ico 15907499:26167:license_LGPL-2.1.txt 15933666:1238:readme.txt
+./PG0002578-DNA-jlake-tumor/Variations c16493faff63b09072395f9131e38152+67108864 f8f61d884faf2a66f162af422c2749ee+67108864 75b165151b45b7aee32b4690f0644b38+67108864 a33a13617e75d300715e46c57162fc33+67108864 ca46c7a4c725d0efe0568dce26bc5213+67108864 18f8e870d93a271f8a894fc4c4351e60+67108864 adb2c4f3e1e99be24e3ab19cf5db7631+67108864 fefc2d2bfafc240776c7b06ddc8d475b+67108864 adf6ee298b575c59143eda731f83c1cf+67108864 78cf4992604945f97209402eabb2eb7d+67108864 8bf9394ad0435a51e2b2e57a841300c1+67108864 a6f8c5582335c30d06d111ecd79d85bf+67108864 7765bf35e97576b071639d17d9bdd090+67108864 8b9b746b76b18e32ba8047176e9f74d2+67108864 56590e49063b1f814005d9c02000ddfd+67108864 10a639f273764b31451b5e8d95a8e98d+67108864 1b1c8235e66633bb95d2a2a2bede6f3f+67108864 4cc650efa04c38fd0502287a92786737+67108864 4433bac63122a8f9058334633904e988+67108864 3b470c2e8bc8d0ed981ea14353f08eaa+67108864 13de261edeba63d0c330f85bf3544656+67108864 7aee17fb67ed1a622f2bed2b3a50ff78+67108864 8c7ecacdbd99ec2cb3cbd276984bb361+67108864 4bee2d90998217971ef72b2f94054966+12779495 0:1432964600:PG0002578-DNA.genome.block.anno.vcf.gz 1432964600:3650551:PG0002578-DNA.genome.block.anno.vcf.gz.tbi 1436615151:118084986:PG0002578-DNA.snps.vcf.gz 1554700137:1583230:PG0002578-DNA.snps.vcf.gz.tbi
diff --git a/sdk/python/tests/keepstub.py b/sdk/python/tests/keepstub.py
new file mode 100644 (file)
index 0000000..6be8d8b
--- /dev/null
@@ -0,0 +1,231 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+import http.server
+import hashlib
+import os
+import re
+import socket
+import socketserver
+import sys
+import threading
+import time
+
+from . import arvados_testutil as tutil
+
+_debug = os.environ.get('ARVADOS_DEBUG', None)
+
+
+class StubKeepServers(tutil.ApiClientMock):
+
+    def setUp(self):
+        super(StubKeepServers, self).setUp()
+        sock = socket.socket()
+        sock.bind(('0.0.0.0', 0))
+        self.port = sock.getsockname()[1]
+        sock.close()
+        self.server = Server(('0.0.0.0', self.port), Handler)
+        self.thread = threading.Thread(target=self.server.serve_forever)
+        self.thread.daemon = True # Exit thread if main proc exits
+        self.thread.start()
+        self.api_client = self.mock_keep_services(
+            count=1,
+            service_host='localhost',
+            service_port=self.port,
+        )
+
+    def tearDown(self):
+        self.server.shutdown()
+        super(StubKeepServers, self).tearDown()
+
+
+class Server(socketserver.ThreadingMixIn, http.server.HTTPServer, object):
+
+    allow_reuse_address = 1
+
+    def __init__(self, *args, **kwargs):
+        self.store = {}
+        self.delays = {
+            # before reading request headers
+            'request': 0,
+            # before reading request body
+            'request_body': 0,
+            # before setting response status and headers
+            'response': 0,
+            # before sending response body
+            'response_body': 0,
+            # before returning from handler (thus setting response EOF)
+            'response_close': 0,
+            # after writing over 1s worth of data at self.bandwidth
+            'mid_write': 0,
+            # after reading over 1s worth of data at self.bandwidth
+            'mid_read': 0,
+        }
+        self.bandwidth = None
+        super(Server, self).__init__(*args, **kwargs)
+
+    def setdelays(self, **kwargs):
+        """In future requests, induce delays at the given checkpoints."""
+        for (k, v) in kwargs.items():
+            self.delays.get(k)  # NameError if unknown key
+            self.delays[k] = v
+
+    def setbandwidth(self, bandwidth):
+        """For future requests, set the maximum bandwidth (number of bytes per
+        second) to operate at. If setbandwidth is never called, function at
+        maximum bandwidth possible"""
+        self.bandwidth = float(bandwidth)
+
+    def _sleep_at_least(self, seconds):
+        """Sleep for given time, even if signals are received."""
+        wake = time.time() + seconds
+        todo = seconds
+        while todo > 0:
+            time.sleep(todo)
+            todo = wake - time.time()
+
+    def _do_delay(self, k):
+        self._sleep_at_least(self.delays[k])
+
+
+class Handler(http.server.BaseHTTPRequestHandler, object):
+
+    protocol_version = 'HTTP/1.1'
+
+    def wfile_bandwidth_write(self, data_to_write):
+        if self.server.bandwidth is None and self.server.delays['mid_write'] == 0:
+            self.wfile.write(data_to_write)
+        else:
+            BYTES_PER_WRITE = int(self.server.bandwidth/4) or 32768
+            outage_happened = False
+            num_bytes = len(data_to_write)
+            num_sent_bytes = 0
+            target_time = time.time()
+            while num_sent_bytes < num_bytes:
+                if num_sent_bytes > self.server.bandwidth and not outage_happened:
+                    self.server._do_delay('mid_write')
+                    target_time += self.server.delays['mid_write']
+                    outage_happened = True
+                num_write_bytes = min(BYTES_PER_WRITE,
+                    num_bytes - num_sent_bytes)
+                self.wfile.write(data_to_write[
+                    num_sent_bytes:num_sent_bytes+num_write_bytes])
+                num_sent_bytes += num_write_bytes
+                if self.server.bandwidth is not None:
+                    target_time += num_write_bytes / self.server.bandwidth
+                    self.server._sleep_at_least(target_time - time.time())
+        return None
+
+    def rfile_bandwidth_read(self, bytes_to_read):
+        if self.server.bandwidth is None and self.server.delays['mid_read'] == 0:
+            return self.rfile.read(bytes_to_read)
+        else:
+            BYTES_PER_READ = int(self.server.bandwidth/4) or 32768
+            data = b''
+            outage_happened = False
+            bytes_read = 0
+            target_time = time.time()
+            while bytes_to_read > bytes_read:
+                if bytes_read > self.server.bandwidth and not outage_happened:
+                    self.server._do_delay('mid_read')
+                    target_time += self.server.delays['mid_read']
+                    outage_happened = True
+                next_bytes_to_read = min(BYTES_PER_READ,
+                    bytes_to_read - bytes_read)
+                data += self.rfile.read(next_bytes_to_read)
+                bytes_read += next_bytes_to_read
+                if self.server.bandwidth is not None:
+                    target_time += next_bytes_to_read / self.server.bandwidth
+                    self.server._sleep_at_least(target_time - time.time())
+        return data
+
+    def finish(self, *args, **kwargs):
+        try:
+            return super(Handler, self).finish(*args, **kwargs)
+        except Exception as err:
+            if _debug:
+                raise
+
+    def handle(self, *args, **kwargs):
+        try:
+            return super(Handler, self).handle(*args, **kwargs)
+        except:
+            if _debug:
+                raise
+
+    def handle_one_request(self, *args, **kwargs):
+        self._sent_continue = False
+        self.server._do_delay('request')
+        return super(Handler, self).handle_one_request(*args, **kwargs)
+
+    def handle_expect_100(self):
+        self.server._do_delay('request_body')
+        self._sent_continue = True
+        return super(Handler, self).handle_expect_100()
+
+    def do_GET(self):
+        self.server._do_delay('response')
+        r = re.search(r'[0-9a-f]{32}', self.path)
+        if not r:
+            return self.send_response(422)
+        datahash = r.group(0)
+        if datahash not in self.server.store:
+            return self.send_response(404)
+        self.send_response(200)
+        self.send_header('Connection', 'close')
+        self.send_header('Content-type', 'application/octet-stream')
+        self.end_headers()
+        self.server._do_delay('response_body')
+        self.wfile_bandwidth_write(self.server.store[datahash])
+        self.server._do_delay('response_close')
+
+    def do_HEAD(self):
+        self.server._do_delay('response')
+        r = re.search(r'[0-9a-f]{32}', self.path)
+        if not r:
+            return self.send_response(422)
+        datahash = r.group(0)
+        if datahash not in self.server.store:
+            return self.send_response(404)
+        self.send_response(200)
+        self.send_header('Connection', 'close')
+        self.send_header('Content-type', 'application/octet-stream')
+        self.send_header('Content-length', str(len(self.server.store[datahash])))
+        self.end_headers()
+        self.server._do_delay('response_close')
+        self.close_connection = True
+
+    def do_PUT(self):
+        if not self._sent_continue and self.headers.get('expect') == '100-continue':
+            # The comments at https://bugs.python.org/issue1491
+            # implies that Python 2.7 BaseHTTPRequestHandler was
+            # patched to support 100 Continue, but reading the actual
+            # code that ships in Debian it clearly is not, so we need
+            # to send the response on the socket directly.
+            self.server._do_delay('request_body')
+            self.wfile.write("{} {} {}\r\n\r\n".format(
+                self.protocol_version, 100, "Continue").encode())
+        data = self.rfile_bandwidth_read(
+            int(self.headers.get('content-length')))
+        datahash = hashlib.md5(data).hexdigest()
+        self.server.store[datahash] = data
+        resp = '{}+{}\n'.format(datahash, len(data)).encode()
+        self.server._do_delay('response')
+        self.send_response(200)
+        self.send_header('Connection', 'close')
+        self.send_header('Content-type', 'text/plain')
+        self.send_header('Content-length', len(resp))
+        self.end_headers()
+        self.server._do_delay('response_body')
+        self.wfile_bandwidth_write(resp)
+        self.server._do_delay('response_close')
+        self.close_connection = True
+
+    def log_request(self, *args, **kwargs):
+        if _debug:
+            super(Handler, self).log_request(*args, **kwargs)
diff --git a/sdk/python/tests/manifest_examples.py b/sdk/python/tests/manifest_examples.py
new file mode 100644 (file)
index 0000000..050d690
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from builtins import range
+from builtins import object
+import arvados
+from . import arvados_testutil as tutil
+
+class ManifestExamples(object):
+    def make_manifest(self,
+                      bytes_per_block=1,
+                      blocks_per_file=1,
+                      files_per_stream=1,
+                      streams=1):
+        datablip = 'x' * bytes_per_block
+        data_loc = tutil.str_keep_locator(datablip)
+        with tutil.mock_keep_responses(data_loc, 200):
+            coll = arvados.CollectionWriter()
+            for si in range(0, streams):
+                for fi in range(0, files_per_stream):
+                    with coll.open("stream{}/file{}.txt".format(si, fi)) as f:
+                        for bi in range(0, blocks_per_file):
+                            f.write(datablip)
+            return coll.manifest_text()
diff --git a/sdk/python/tests/nginx.conf b/sdk/python/tests/nginx.conf
new file mode 100644 (file)
index 0000000..130d8c9
--- /dev/null
@@ -0,0 +1,137 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+daemon off;
+error_log "{{ERRORLOG}}" info;          # Yes, must be specified here _and_ cmdline
+events {
+}
+http {
+  log_format customlog
+    '[$time_local] $server_name $status $body_bytes_sent $request_time $request_method "$scheme://$http_host$request_uri" $remote_addr:$remote_port '
+    '"$http_referer" "$http_user_agent"';
+  access_log "{{ACCESSLOG}}" customlog;
+  client_body_temp_path "{{TMPDIR}}";
+  upstream arv-git-http {
+    server localhost:{{GITPORT}};
+  }
+  server {
+    listen *:{{GITSSLPORT}} ssl default_server;
+    server_name arv-git-http;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://arv-git-http;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+  upstream keepproxy {
+    server localhost:{{KEEPPROXYPORT}};
+  }
+  server {
+    listen *:{{KEEPPROXYSSLPORT}} ssl default_server;
+    server_name keepproxy;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://keepproxy;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+
+      proxy_http_version 1.1;
+      proxy_request_buffering off;
+    }
+  }
+  upstream keep-web {
+    server localhost:{{KEEPWEBPORT}};
+  }
+  server {
+    listen *:{{KEEPWEBSSLPORT}} ssl default_server;
+    server_name keep-web;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://keep-web;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+
+      client_max_body_size 0;
+      proxy_http_version 1.1;
+      proxy_request_buffering off;
+    }
+  }
+  server {
+    listen *:{{KEEPWEBDLSSLPORT}} ssl default_server;
+    server_name keep-web-dl ~.*;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://keep-web;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+
+      client_max_body_size 0;
+      proxy_http_version 1.1;
+      proxy_request_buffering off;
+
+      # Unlike other proxy sections, here we need to override the
+      # requested Host header and use proxy_redirect because of the
+      # way the test suite orchestrates services. Keep-web's "download
+      # only" behavior relies on the Host header matching a configured
+      # value, but when run_test_servers.py writes keep-web's command
+      # line, the keep-web-dl TLS port (which clients will connect to
+      # and include in their Host header) has not yet been assigned.
+      #
+      # In production, "proxy_set_header Host $http_host;
+      # proxy_redirect off;" works: keep-web's redirect URLs will
+      # match the request URL received by Nginx.
+      #
+      # Here, keep-web will issue redirects to https://download/ and
+      # Nginx will rewrite them.
+      #
+      proxy_set_header Host  download;
+      proxy_redirect https://download/ https://$host:{{KEEPWEBDLSSLPORT}}/;
+    }
+  }
+  upstream ws {
+    server localhost:{{WSPORT}};
+  }
+  server {
+    listen *:{{WSSPORT}} ssl default_server;
+    server_name websocket;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://ws;
+      proxy_set_header Upgrade $http_upgrade;
+      proxy_set_header Connection "upgrade";
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+  upstream controller {
+    server localhost:{{CONTROLLERPORT}};
+  }
+  server {
+    listen *:{{CONTROLLERSSLPORT}} ssl default_server;
+    server_name controller;
+    ssl_certificate "{{SSLCERT}}";
+    ssl_certificate_key "{{SSLKEY}}";
+    location  / {
+      proxy_pass http://controller;
+      proxy_set_header Host $http_host;
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+}
diff --git a/sdk/python/tests/performance/__init__.py b/sdk/python/tests/performance/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/python/tests/performance/performance_profiler.py b/sdk/python/tests/performance/performance_profiler.py
new file mode 100644 (file)
index 0000000..3be00c4
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Use the "profiled" decorator on a test to get profiling data.
+#
+# Usage:
+#   from performance_profiler import profiled
+#
+#   # See report in tmp/profile/foobar
+#   @profiled
+#   def foobar():
+#       baz = 1
+#
+#   See "test_a_sample.py" for a working example.
+#
+# Performance tests run as part of regular test suite.
+# You can also run only the performance tests using one of the following:
+#     python -m unittest discover tests.performance
+#     ./run-tests.sh WORKSPACE=~/arvados --only sdk/python sdk/python_test="--test-suite=tests.performance"
+
+import functools
+import os
+import pstats
+import sys
+import unittest
+try:
+    import cProfile as profile
+except ImportError:
+    import profile
+
+output_dir = os.path.abspath(os.path.join('tmp', 'profile'))
+if not os.path.exists(output_dir):
+    os.makedirs(output_dir)
+
+def profiled(function):
+    @functools.wraps(function)
+    def profiled_function(*args, **kwargs):
+        outfile = open(os.path.join(output_dir, function.__name__), "w")
+        caught = None
+        pr = profile.Profile()
+        pr.enable()
+        try:
+            return function(*args, **kwargs)
+        finally:
+            pr.disable()
+            ps = pstats.Stats(pr, stream=outfile)
+            ps.sort_stats('time').print_stats()
+    return profiled_function
diff --git a/sdk/python/tests/performance/test_a_sample.py b/sdk/python/tests/performance/test_a_sample.py
new file mode 100644 (file)
index 0000000..65015dc
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import absolute_import
+from builtins import range
+import unittest
+
+from .performance_profiler import profiled
+
+class PerformanceTestSample(unittest.TestCase):
+    def foo(self):
+        bar = 64
+
+    @profiled
+    def test_profiled_decorator(self):
+        j = 0
+        for i in range(0,2**20):
+            j += i
+        self.foo()
+        print('Hello')
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
new file mode 100644 (file)
index 0000000..7b1f605
--- /dev/null
@@ -0,0 +1,912 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import division
+from builtins import str
+from builtins import range
+import argparse
+import atexit
+import errno
+import glob
+import httplib2
+import os
+import pipes
+import random
+import re
+import shutil
+import signal
+import socket
+import string
+import subprocess
+import sys
+import tempfile
+import time
+import unittest
+import yaml
+
+MY_DIRNAME = os.path.dirname(os.path.realpath(__file__))
+if __name__ == '__main__' and os.path.exists(
+      os.path.join(MY_DIRNAME, '..', 'arvados', '__init__.py')):
+    # We're being launched to support another test suite.
+    # Add the Python SDK source to the library path.
+    sys.path.insert(1, os.path.dirname(MY_DIRNAME))
+
+import arvados
+import arvados.config
+
+ARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))
+SERVICES_SRC_DIR = os.path.join(ARVADOS_DIR, 'services')
+if 'GOPATH' in os.environ:
+    # Add all GOPATH bin dirs to PATH -- but insert them after the
+    # ruby gems bin dir, to ensure "bundle" runs the Ruby bundler
+    # command, not the golang.org/x/tools/cmd/bundle command.
+    gopaths = os.environ['GOPATH'].split(':')
+    addbins = [os.path.join(path, 'bin') for path in gopaths]
+    newbins = []
+    for path in os.environ['PATH'].split(':'):
+        newbins.append(path)
+        if os.path.exists(os.path.join(path, 'bundle')):
+            newbins += addbins
+            addbins = []
+    newbins += addbins
+    os.environ['PATH'] = ':'.join(newbins)
+
+TEST_TMPDIR = os.path.join(ARVADOS_DIR, 'tmp')
+if not os.path.exists(TEST_TMPDIR):
+    os.mkdir(TEST_TMPDIR)
+
+my_api_host = None
+_cached_config = {}
+_cached_db_config = {}
+
+def find_server_pid(PID_PATH, wait=10):
+    now = time.time()
+    timeout = now + wait
+    good_pid = False
+    while (not good_pid) and (now <= timeout):
+        time.sleep(0.2)
+        try:
+            with open(PID_PATH, 'r') as f:
+                server_pid = int(f.read())
+            good_pid = (os.kill(server_pid, 0) is None)
+        except EnvironmentError:
+            good_pid = False
+        now = time.time()
+
+    if not good_pid:
+        return None
+
+    return server_pid
+
+def kill_server_pid(pidfile, wait=10, passenger_root=False):
+    # Must re-import modules in order to work during atexit
+    import os
+    import signal
+    import subprocess
+    import time
+
+    now = time.time()
+    startTERM = now
+    deadline = now + wait
+
+    if passenger_root:
+        # First try to shut down nicely
+        restore_cwd = os.getcwd()
+        os.chdir(passenger_root)
+        subprocess.call([
+            'bundle', 'exec', 'passenger', 'stop', '--pid-file', pidfile])
+        os.chdir(restore_cwd)
+        # Use up to half of the +wait+ period waiting for "passenger
+        # stop" to work. If the process hasn't exited by then, start
+        # sending TERM signals.
+        startTERM += wait//2
+
+    server_pid = None
+    while now <= deadline and server_pid is None:
+        try:
+            with open(pidfile, 'r') as f:
+                server_pid = int(f.read())
+        except IOError:
+            # No pidfile = nothing to kill.
+            return
+        except ValueError as error:
+            # Pidfile exists, but we can't parse it. Perhaps the
+            # server has created the file but hasn't written its PID
+            # yet?
+            print("Parse error reading pidfile {}: {}".format(pidfile, error),
+                  file=sys.stderr)
+            time.sleep(0.1)
+            now = time.time()
+
+    while now <= deadline:
+        try:
+            exited, _ = os.waitpid(server_pid, os.WNOHANG)
+            if exited > 0:
+                _remove_pidfile(pidfile)
+                return
+        except OSError:
+            # already exited, or isn't our child process
+            pass
+        try:
+            if now >= startTERM:
+                os.kill(server_pid, signal.SIGTERM)
+                print("Sent SIGTERM to {} ({})".format(server_pid, pidfile),
+                      file=sys.stderr)
+        except OSError as error:
+            if error.errno == errno.ESRCH:
+                # Thrown by os.getpgid() or os.kill() if the process
+                # does not exist, i.e., our work here is done.
+                _remove_pidfile(pidfile)
+                return
+            raise
+        time.sleep(0.1)
+        now = time.time()
+
+    print("Server PID {} ({}) did not exit, giving up after {}s".
+          format(server_pid, pidfile, wait),
+          file=sys.stderr)
+
+def _remove_pidfile(pidfile):
+    try:
+        os.unlink(pidfile)
+    except:
+        if os.path.lexists(pidfile):
+            raise
+
+def find_available_port():
+    """Return an IPv4 port number that is not in use right now.
+
+    We assume whoever needs to use the returned port is able to reuse
+    a recently used port without waiting for TIME_WAIT (see
+    SO_REUSEADDR / SO_REUSEPORT).
+
+    Some opportunity for races here, but it's better than choosing
+    something at random and not checking at all. If all of our servers
+    (hey Passenger) knew that listening on port 0 was a thing, the OS
+    would take care of the races, and this wouldn't be needed at all.
+    """
+
+    sock = socket.socket()
+    sock.bind(('0.0.0.0', 0))
+    port = sock.getsockname()[1]
+    sock.close()
+    return port
+
+def _wait_until_port_listens(port, timeout=10, warn=True):
+    """Wait for a process to start listening on the given port.
+
+    If nothing listens on the port within the specified timeout (given
+    in seconds), print a warning on stderr before returning.
+    """
+    try:
+        subprocess.check_output(['which', 'lsof'])
+    except subprocess.CalledProcessError:
+        print("WARNING: No `lsof` -- cannot wait for port to listen. "+
+              "Sleeping 0.5 and hoping for the best.",
+              file=sys.stderr)
+        time.sleep(0.5)
+        return
+    deadline = time.time() + timeout
+    while time.time() < deadline:
+        try:
+            subprocess.check_output(
+                ['lsof', '-t', '-i', 'tcp:'+str(port)])
+        except subprocess.CalledProcessError:
+            time.sleep(0.1)
+            continue
+        return True
+    if warn:
+        print(
+            "WARNING: Nothing is listening on port {} (waited {} seconds).".
+            format(port, timeout),
+            file=sys.stderr)
+    return False
+
+def _logfilename(label):
+    """Set up a labelled log file, and return a path to write logs to.
+
+    Normally, the returned path is {tmpdir}/{label}.log.
+
+    In debug mode, logs are also written to stderr, with [label]
+    prepended to each line. The returned path is a FIFO.
+
+    +label+ should contain only alphanumerics: it is also used as part
+    of the FIFO filename.
+
+    """
+    logfilename = os.path.join(TEST_TMPDIR, label+'.log')
+    if not os.environ.get('ARVADOS_DEBUG', ''):
+        return logfilename
+    fifo = os.path.join(TEST_TMPDIR, label+'.fifo')
+    try:
+        os.remove(fifo)
+    except OSError as error:
+        if error.errno != errno.ENOENT:
+            raise
+    os.mkfifo(fifo, 0o700)
+    stdbuf = ['stdbuf', '-i0', '-oL', '-eL']
+    # open(fifo, 'r') would block waiting for someone to open the fifo
+    # for writing, so we need a separate cat process to open it for
+    # us.
+    cat = subprocess.Popen(
+        stdbuf+['cat', fifo],
+        stdin=open('/dev/null'),
+        stdout=subprocess.PIPE)
+    tee = subprocess.Popen(
+        stdbuf+['tee', '-a', logfilename],
+        stdin=cat.stdout,
+        stdout=subprocess.PIPE)
+    subprocess.Popen(
+        stdbuf+['sed', '-e', 's/^/['+label+'] /'],
+        stdin=tee.stdout,
+        stdout=sys.stderr)
+    return fifo
+
+def run(leave_running_atexit=False):
+    """Ensure an API server is running, and ARVADOS_API_* env vars have
+    admin credentials for it.
+
+    If ARVADOS_TEST_API_HOST is set, a parent process has started a
+    test server for us to use: we just need to reset() it using the
+    admin token fixture.
+
+    If a previous call to run() started a new server process, and it
+    is still running, we just need to reset() it to fixture state and
+    return.
+
+    If neither of those options work out, we'll really start a new
+    server.
+    """
+    global my_api_host
+
+    # Delete cached discovery documents.
+    #
+    # This will clear cached docs that belong to other processes (like
+    # concurrent test suites) even if they're still running. They should
+    # be able to tolerate that.
+    for fn in glob.glob(os.path.join(
+            str(arvados.http_cache('discovery')),
+            '*,arvados,v1,rest,*')):
+        os.unlink(fn)
+
+    pid_file = _pidfile('api')
+    pid_file_ok = find_server_pid(pid_file, 0)
+
+    existing_api_host = os.environ.get('ARVADOS_TEST_API_HOST', my_api_host)
+    if existing_api_host and pid_file_ok:
+        if existing_api_host == my_api_host:
+            try:
+                return reset()
+            except:
+                # Fall through to shutdown-and-start case.
+                pass
+        else:
+            # Server was provided by parent. Can't recover if it's
+            # unresettable.
+            return reset()
+
+    # Before trying to start up our own server, call stop() to avoid
+    # "Phusion Passenger Standalone is already running on PID 12345".
+    # (If we've gotten this far, ARVADOS_TEST_API_HOST isn't set, so
+    # we know the server is ours to kill.)
+    stop(force=True)
+
+    restore_cwd = os.getcwd()
+    api_src_dir = os.path.join(SERVICES_SRC_DIR, 'api')
+    os.chdir(api_src_dir)
+
+    # Either we haven't started a server of our own yet, or it has
+    # died, or we have lost our credentials, or something else is
+    # preventing us from calling reset(). Start a new one.
+
+    if not os.path.exists('tmp'):
+        os.makedirs('tmp')
+
+    if not os.path.exists('tmp/api'):
+        os.makedirs('tmp/api')
+
+    if not os.path.exists('tmp/logs'):
+        os.makedirs('tmp/logs')
+
+    # Install the git repository fixtures.
+    gitdir = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git')
+    gittarball = os.path.join(SERVICES_SRC_DIR, 'api', 'test', 'test.git.tar')
+    if not os.path.isdir(gitdir):
+        os.makedirs(gitdir)
+    subprocess.check_output(['tar', '-xC', gitdir, '-f', gittarball])
+
+    # The nginx proxy isn't listening here yet, but we need to choose
+    # the wss:// port now so we can write the API server config file.
+    wss_port = find_available_port()
+    _setport('wss', wss_port)
+
+    port = find_available_port()
+    env = os.environ.copy()
+    env['RAILS_ENV'] = 'test'
+    env['ARVADOS_TEST_WSS_PORT'] = str(wss_port)
+    env.pop('ARVADOS_WEBSOCKETS', None)
+    env.pop('ARVADOS_TEST_API_HOST', None)
+    env.pop('ARVADOS_API_HOST', None)
+    env.pop('ARVADOS_API_HOST_INSECURE', None)
+    env.pop('ARVADOS_API_TOKEN', None)
+    start_msg = subprocess.check_output(
+        ['bundle', 'exec',
+         'passenger', 'start', '-d', '-p{}'.format(port),
+         '--pid-file', pid_file,
+         '--log-file', os.path.join(os.getcwd(), 'log/test.log'),
+         '--ssl',
+         '--ssl-certificate', 'tmp/self-signed.pem',
+         '--ssl-certificate-key', 'tmp/self-signed.key'],
+        env=env)
+
+    if not leave_running_atexit:
+        atexit.register(kill_server_pid, pid_file, passenger_root=api_src_dir)
+
+    match = re.search(r'Accessible via: https://(.*?)/', start_msg)
+    if not match:
+        raise Exception(
+            "Passenger did not report endpoint: {}".format(start_msg))
+    my_api_host = match.group(1)
+    os.environ['ARVADOS_API_HOST'] = my_api_host
+
+    # Make sure the server has written its pid file and started
+    # listening on its TCP port
+    find_server_pid(pid_file)
+    _wait_until_port_listens(port)
+
+    reset()
+    os.chdir(restore_cwd)
+
+def reset():
+    """Reset the test server to fixture state.
+
+    This resets the ARVADOS_TEST_API_HOST provided by a parent process
+    if any, otherwise the server started by run().
+
+    It also resets ARVADOS_* environment vars to point to the test
+    server with admin credentials.
+    """
+    existing_api_host = os.environ.get('ARVADOS_TEST_API_HOST', my_api_host)
+    token = auth_token('admin')
+    httpclient = httplib2.Http(ca_certs=os.path.join(
+        SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem'))
+    httpclient.request(
+        'https://{}/database/reset'.format(existing_api_host),
+        'POST',
+        headers={'Authorization': 'OAuth2 {}'.format(token)})
+    os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
+    os.environ['ARVADOS_API_TOKEN'] = token
+    if _wait_until_port_listens(_getport('controller-ssl'), timeout=0.5, warn=False):
+        os.environ['ARVADOS_API_HOST'] = '0.0.0.0:'+str(_getport('controller-ssl'))
+    else:
+        os.environ['ARVADOS_API_HOST'] = existing_api_host
+
+def stop(force=False):
+    """Stop the API server, if one is running.
+
+    If force==False, kill it only if we started it ourselves. (This
+    supports the use case where a Python test suite calls run(), but
+    run() just uses the ARVADOS_TEST_API_HOST provided by the parent
+    process, and the test suite cleans up after itself by calling
+    stop(). In this case the test server provided by the parent
+    process should be left alone.)
+
+    If force==True, kill it even if we didn't start it
+    ourselves. (This supports the use case in __main__, where "run"
+    and "stop" happen in different processes.)
+    """
+    global my_api_host
+    if force or my_api_host is not None:
+        kill_server_pid(_pidfile('api'))
+        my_api_host = None
+
+def run_controller():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_controller()
+    rails_api_port = int(string.split(os.environ.get('ARVADOS_TEST_API_HOST', my_api_host), ':')[-1])
+    port = find_available_port()
+    conf = os.path.join(TEST_TMPDIR, 'arvados.yml')
+    with open(conf, 'w') as f:
+        f.write("""
+Clusters:
+  zzzzz:
+    HTTPRequestTimeout: 30s
+    PostgreSQL:
+      ConnectionPool: 32
+      Connection:
+        host: {}
+        dbname: {}
+        user: {}
+        password: {}
+    NodeProfiles:
+      "*":
+        "arvados-controller":
+          Listen: ":{}"
+        "arvados-api-server":
+          Listen: ":{}"
+          TLS: true
+          Insecure: true
+        """.format(
+            _dbconfig('host'),
+            _dbconfig('database'),
+            _dbconfig('username'),
+            _dbconfig('password'),
+            port,
+            rails_api_port,
+        ))
+    logf = open(_logfilename('controller'), 'a')
+    controller = subprocess.Popen(
+        ["arvados-server", "controller", "-config", conf],
+        stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+    with open(_pidfile('controller'), 'w') as f:
+        f.write(str(controller.pid))
+    _wait_until_port_listens(port)
+    _setport('controller', port)
+    return port
+
+def stop_controller():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('controller'))
+
+def run_ws():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_ws()
+    port = find_available_port()
+    conf = os.path.join(TEST_TMPDIR, 'ws.yml')
+    with open(conf, 'w') as f:
+        f.write("""
+Client:
+  APIHost: {}
+  Insecure: true
+Listen: :{}
+LogLevel: {}
+Postgres:
+  host: {}
+  dbname: {}
+  user: {}
+  password: {}
+  sslmode: require
+        """.format(os.environ['ARVADOS_API_HOST'],
+                   port,
+                   ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
+                   _dbconfig('host'),
+                   _dbconfig('database'),
+                   _dbconfig('username'),
+                   _dbconfig('password')))
+    logf = open(_logfilename('ws'), 'a')
+    ws = subprocess.Popen(
+        ["ws", "-config", conf],
+        stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+    with open(_pidfile('ws'), 'w') as f:
+        f.write(str(ws.pid))
+    _wait_until_port_listens(port)
+    _setport('ws', port)
+    return port
+
+def stop_ws():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('ws'))
+
+def _start_keep(n, keep_args):
+    keep0 = tempfile.mkdtemp()
+    port = find_available_port()
+    keep_cmd = ["keepstore",
+                "-volume={}".format(keep0),
+                "-listen=:{}".format(port),
+                "-pid="+_pidfile('keep{}'.format(n))]
+
+    for arg, val in keep_args.items():
+        keep_cmd.append("{}={}".format(arg, val))
+
+    logf = open(_logfilename('keep{}'.format(n)), 'a')
+    kp0 = subprocess.Popen(
+        keep_cmd, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+
+    with open(_pidfile('keep{}'.format(n)), 'w') as f:
+        f.write(str(kp0.pid))
+
+    with open("{}/keep{}.volume".format(TEST_TMPDIR, n), 'w') as f:
+        f.write(keep0)
+
+    _wait_until_port_listens(port)
+
+    return port
+
+def run_keep(blob_signing_key=None, enforce_permissions=False, num_servers=2):
+    stop_keep(num_servers)
+
+    keep_args = {}
+    if not blob_signing_key:
+        blob_signing_key = 'zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc'
+    with open(os.path.join(TEST_TMPDIR, "keep.blob_signing_key"), "w") as f:
+        keep_args['-blob-signing-key-file'] = f.name
+        f.write(blob_signing_key)
+    keep_args['-enforce-permissions'] = str(enforce_permissions).lower()
+    with open(os.path.join(TEST_TMPDIR, "keep.data-manager-token-file"), "w") as f:
+        keep_args['-data-manager-token-file'] = f.name
+        f.write(auth_token('data_manager'))
+    keep_args['-never-delete'] = 'false'
+
+    api = arvados.api(
+        version='v1',
+        host=os.environ['ARVADOS_API_HOST'],
+        token=os.environ['ARVADOS_API_TOKEN'],
+        insecure=True)
+
+    for d in api.keep_services().list(filters=[['service_type','=','disk']]).execute()['items']:
+        api.keep_services().delete(uuid=d['uuid']).execute()
+    for d in api.keep_disks().list().execute()['items']:
+        api.keep_disks().delete(uuid=d['uuid']).execute()
+
+    for d in range(0, num_servers):
+        port = _start_keep(d, keep_args)
+        svc = api.keep_services().create(body={'keep_service': {
+            'uuid': 'zzzzz-bi6l4-keepdisk{:07d}'.format(d),
+            'service_host': 'localhost',
+            'service_port': port,
+            'service_type': 'disk',
+            'service_ssl_flag': False,
+        }}).execute()
+        api.keep_disks().create(body={
+            'keep_disk': {'keep_service_uuid': svc['uuid'] }
+        }).execute()
+
+    # If keepproxy and/or keep-web is running, send SIGHUP to make
+    # them discover the new keepstore services.
+    for svc in ('keepproxy', 'keep-web'):
+        pidfile = _pidfile('keepproxy')
+        if os.path.exists(pidfile):
+            try:
+                os.kill(int(open(pidfile).read()), signal.SIGHUP)
+            except OSError:
+                os.remove(pidfile)
+
+def _stop_keep(n):
+    kill_server_pid(_pidfile('keep{}'.format(n)))
+    if os.path.exists("{}/keep{}.volume".format(TEST_TMPDIR, n)):
+        with open("{}/keep{}.volume".format(TEST_TMPDIR, n), 'r') as r:
+            shutil.rmtree(r.read(), True)
+        os.unlink("{}/keep{}.volume".format(TEST_TMPDIR, n))
+    if os.path.exists(os.path.join(TEST_TMPDIR, "keep.blob_signing_key")):
+        os.remove(os.path.join(TEST_TMPDIR, "keep.blob_signing_key"))
+
+def stop_keep(num_servers=2):
+    for n in range(0, num_servers):
+        _stop_keep(n)
+
+def run_keep_proxy():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_keep_proxy()
+
+    port = find_available_port()
+    env = os.environ.copy()
+    env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
+    logf = open(_logfilename('keepproxy'), 'a')
+    kp = subprocess.Popen(
+        ['keepproxy',
+         '-pid='+_pidfile('keepproxy'),
+         '-listen=:{}'.format(port)],
+        env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+
+    api = arvados.api(
+        version='v1',
+        host=os.environ['ARVADOS_API_HOST'],
+        token=auth_token('admin'),
+        insecure=True)
+    for d in api.keep_services().list(
+            filters=[['service_type','=','proxy']]).execute()['items']:
+        api.keep_services().delete(uuid=d['uuid']).execute()
+    api.keep_services().create(body={'keep_service': {
+        'service_host': 'localhost',
+        'service_port': port,
+        'service_type': 'proxy',
+        'service_ssl_flag': False,
+    }}).execute()
+    os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(port)
+    _setport('keepproxy', port)
+    _wait_until_port_listens(port)
+
+def stop_keep_proxy():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('keepproxy'))
+
+def run_arv_git_httpd():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_arv_git_httpd()
+
+    gitdir = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git')
+    gitport = find_available_port()
+    env = os.environ.copy()
+    env.pop('ARVADOS_API_TOKEN', None)
+    logf = open(_logfilename('arv-git-httpd'), 'a')
+    agh = subprocess.Popen(
+        ['arv-git-httpd',
+         '-repo-root='+gitdir+'/test',
+         '-address=:'+str(gitport)],
+        env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+    with open(_pidfile('arv-git-httpd'), 'w') as f:
+        f.write(str(agh.pid))
+    _setport('arv-git-httpd', gitport)
+    _wait_until_port_listens(gitport)
+
+def stop_arv_git_httpd():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('arv-git-httpd'))
+
+def run_keep_web():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_keep_web()
+
+    keepwebport = find_available_port()
+    env = os.environ.copy()
+    env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
+    logf = open(_logfilename('keep-web'), 'a')
+    keepweb = subprocess.Popen(
+        ['keep-web',
+         '-allow-anonymous',
+         '-attachment-only-host=download',
+         '-listen=:'+str(keepwebport)],
+        env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+    with open(_pidfile('keep-web'), 'w') as f:
+        f.write(str(keepweb.pid))
+    _setport('keep-web', keepwebport)
+    _wait_until_port_listens(keepwebport)
+
+def stop_keep_web():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('keep-web'))
+
+def run_nginx():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    stop_nginx()
+    nginxconf = {}
+    nginxconf['CONTROLLERPORT'] = _getport('controller')
+    nginxconf['CONTROLLERSSLPORT'] = find_available_port()
+    nginxconf['KEEPWEBPORT'] = _getport('keep-web')
+    nginxconf['KEEPWEBDLSSLPORT'] = find_available_port()
+    nginxconf['KEEPWEBSSLPORT'] = find_available_port()
+    nginxconf['KEEPPROXYPORT'] = _getport('keepproxy')
+    nginxconf['KEEPPROXYSSLPORT'] = find_available_port()
+    nginxconf['GITPORT'] = _getport('arv-git-httpd')
+    nginxconf['GITSSLPORT'] = find_available_port()
+    nginxconf['WSPORT'] = _getport('ws')
+    nginxconf['WSSPORT'] = _getport('wss')
+    nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')
+    nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')
+    nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
+    nginxconf['ERRORLOG'] = _logfilename('nginx_error')
+    nginxconf['TMPDIR'] = TEST_TMPDIR
+
+    conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')
+    conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
+    with open(conffile, 'w') as f:
+        f.write(re.sub(
+            r'{{([A-Z]+)}}',
+            lambda match: str(nginxconf.get(match.group(1))),
+            open(conftemplatefile).read()))
+
+    env = os.environ.copy()
+    env['PATH'] = env['PATH']+':/sbin:/usr/sbin:/usr/local/sbin'
+
+    nginx = subprocess.Popen(
+        ['nginx',
+         '-g', 'error_log stderr info;',
+         '-g', 'pid '+_pidfile('nginx')+';',
+         '-c', conffile],
+        env=env, stdin=open('/dev/null'), stdout=sys.stderr)
+    _setport('controller-ssl', nginxconf['CONTROLLERSSLPORT'])
+    _setport('keep-web-dl-ssl', nginxconf['KEEPWEBDLSSLPORT'])
+    _setport('keep-web-ssl', nginxconf['KEEPWEBSSLPORT'])
+    _setport('keepproxy-ssl', nginxconf['KEEPPROXYSSLPORT'])
+    _setport('arv-git-httpd-ssl', nginxconf['GITSSLPORT'])
+
+def stop_nginx():
+    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        return
+    kill_server_pid(_pidfile('nginx'))
+
+def _pidfile(program):
+    return os.path.join(TEST_TMPDIR, program + '.pid')
+
+def _portfile(program):
+    return os.path.join(TEST_TMPDIR, program + '.port')
+
+def _setport(program, port):
+    with open(_portfile(program), 'w') as f:
+        f.write(str(port))
+
+# Returns 9 if program is not up.
+def _getport(program):
+    try:
+        return int(open(_portfile(program)).read())
+    except IOError:
+        return 9
+
+def _dbconfig(key):
+    global _cached_db_config
+    if not _cached_db_config:
+        _cached_db_config = yaml.load(open(os.path.join(
+            SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))
+    return _cached_db_config['test'][key]
+
+def _apiconfig(key):
+    global _cached_config
+    if _cached_config:
+        return _cached_config[key]
+    def _load(f, required=True):
+        fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
+        if not required and not os.path.exists(fullpath):
+            return {}
+        return yaml.load(fullpath)
+    cdefault = _load('application.default.yml')
+    csite = _load('application.yml', required=False)
+    _cached_config = {}
+    for section in [cdefault.get('common',{}), cdefault.get('test',{}),
+                    csite.get('common',{}), csite.get('test',{})]:
+        _cached_config.update(section)
+    return _cached_config[key]
+
+def fixture(fix):
+    '''load a fixture yaml file'''
+    with open(os.path.join(SERVICES_SRC_DIR, 'api', "test", "fixtures",
+                           fix + ".yml")) as f:
+        yaml_file = f.read()
+        try:
+          trim_index = yaml_file.index("# Test Helper trims the rest of the file")
+          yaml_file = yaml_file[0:trim_index]
+        except ValueError:
+          pass
+        return yaml.load(yaml_file)
+
+def auth_token(token_name):
+    return fixture("api_client_authorizations")[token_name]["api_token"]
+
+def authorize_with(token_name):
+    '''token_name is the symbolic name of the token from the api_client_authorizations fixture'''
+    arvados.config.settings()["ARVADOS_API_TOKEN"] = auth_token(token_name)
+    arvados.config.settings()["ARVADOS_API_HOST"] = os.environ.get("ARVADOS_API_HOST")
+    arvados.config.settings()["ARVADOS_API_HOST_INSECURE"] = "true"
+
+class TestCaseWithServers(unittest.TestCase):
+    """TestCase to start and stop supporting Arvados servers.
+
+    Define any of MAIN_SERVER, KEEP_SERVER, and/or KEEP_PROXY_SERVER
+    class variables as a dictionary of keyword arguments.  If you do,
+    setUpClass will start the corresponding servers by passing these
+    keyword arguments to the run, run_keep, and/or run_keep_server
+    functions, respectively.  It will also set Arvados environment
+    variables to point to these servers appropriately.  If you don't
+    run a Keep or Keep proxy server, setUpClass will set up a
+    temporary directory for Keep local storage, and set it as
+    KEEP_LOCAL_STORE.
+
+    tearDownClass will stop any servers started, and restore the
+    original environment.
+    """
+    MAIN_SERVER = None
+    WS_SERVER = None
+    KEEP_SERVER = None
+    KEEP_PROXY_SERVER = None
+    KEEP_WEB_SERVER = None
+
+    @staticmethod
+    def _restore_dict(src, dest):
+        for key in list(dest.keys()):
+            if key not in src:
+                del dest[key]
+        dest.update(src)
+
+    @classmethod
+    def setUpClass(cls):
+        cls._orig_environ = os.environ.copy()
+        cls._orig_config = arvados.config.settings().copy()
+        cls._cleanup_funcs = []
+        os.environ.pop('ARVADOS_KEEP_SERVICES', None)
+        os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
+        for server_kwargs, start_func, stop_func in (
+                (cls.MAIN_SERVER, run, reset),
+                (cls.WS_SERVER, run_ws, stop_ws),
+                (cls.KEEP_SERVER, run_keep, stop_keep),
+                (cls.KEEP_PROXY_SERVER, run_keep_proxy, stop_keep_proxy),
+                (cls.KEEP_WEB_SERVER, run_keep_web, stop_keep_web)):
+            if server_kwargs is not None:
+                start_func(**server_kwargs)
+                cls._cleanup_funcs.append(stop_func)
+        if (cls.KEEP_SERVER is None) and (cls.KEEP_PROXY_SERVER is None):
+            cls.local_store = tempfile.mkdtemp()
+            os.environ['KEEP_LOCAL_STORE'] = cls.local_store
+            cls._cleanup_funcs.append(
+                lambda: shutil.rmtree(cls.local_store, ignore_errors=True))
+        else:
+            os.environ.pop('KEEP_LOCAL_STORE', None)
+        arvados.config.initialize()
+
+    @classmethod
+    def tearDownClass(cls):
+        for clean_func in cls._cleanup_funcs:
+            clean_func()
+        cls._restore_dict(cls._orig_environ, os.environ)
+        cls._restore_dict(cls._orig_config, arvados.config.settings())
+
+
+if __name__ == "__main__":
+    actions = [
+        'start', 'stop',
+        'start_ws', 'stop_ws',
+        'start_controller', 'stop_controller',
+        'start_keep', 'stop_keep',
+        'start_keep_proxy', 'stop_keep_proxy',
+        'start_keep-web', 'stop_keep-web',
+        'start_arv-git-httpd', 'stop_arv-git-httpd',
+        'start_nginx', 'stop_nginx',
+    ]
+    parser = argparse.ArgumentParser()
+    parser.add_argument('action', type=str, help="one of {}".format(actions))
+    parser.add_argument('--auth', type=str, metavar='FIXTURE_NAME', help='Print authorization info for given api_client_authorizations fixture')
+    parser.add_argument('--num-keep-servers', metavar='int', type=int, default=2, help="Number of keep servers desired")
+    parser.add_argument('--keep-enforce-permissions', action="store_true", help="Enforce keep permissions")
+
+    args = parser.parse_args()
+
+    if args.action not in actions:
+        print("Unrecognized action '{}'. Actions are: {}.".
+              format(args.action, actions),
+              file=sys.stderr)
+        sys.exit(1)
+    if args.action == 'start':
+        stop(force=('ARVADOS_TEST_API_HOST' not in os.environ))
+        run(leave_running_atexit=True)
+        host = os.environ['ARVADOS_API_HOST']
+        if args.auth is not None:
+            token = auth_token(args.auth)
+            print("export ARVADOS_API_TOKEN={}".format(pipes.quote(token)))
+            print("export ARVADOS_API_HOST={}".format(pipes.quote(host)))
+            print("export ARVADOS_API_HOST_INSECURE=true")
+        else:
+            print(host)
+    elif args.action == 'stop':
+        stop(force=('ARVADOS_TEST_API_HOST' not in os.environ))
+    elif args.action == 'start_ws':
+        run_ws()
+    elif args.action == 'stop_ws':
+        stop_ws()
+    elif args.action == 'start_controller':
+        run_controller()
+    elif args.action == 'stop_controller':
+        stop_controller()
+    elif args.action == 'start_keep':
+        run_keep(enforce_permissions=args.keep_enforce_permissions, num_servers=args.num_keep_servers)
+    elif args.action == 'stop_keep':
+        stop_keep(num_servers=args.num_keep_servers)
+    elif args.action == 'start_keep_proxy':
+        run_keep_proxy()
+    elif args.action == 'stop_keep_proxy':
+        stop_keep_proxy()
+    elif args.action == 'start_arv-git-httpd':
+        run_arv_git_httpd()
+    elif args.action == 'stop_arv-git-httpd':
+        stop_arv_git_httpd()
+    elif args.action == 'start_keep-web':
+        run_keep_web()
+    elif args.action == 'stop_keep-web':
+        stop_keep_web()
+    elif args.action == 'start_nginx':
+        run_nginx()
+        print("export ARVADOS_API_HOST=0.0.0.0:{}".format(_getport('controller-ssl')))
+    elif args.action == 'stop_nginx':
+        stop_nginx()
+    else:
+        raise Exception("action recognized but not implemented!?")
diff --git a/sdk/python/tests/slow_test.py b/sdk/python/tests/slow_test.py
new file mode 100644 (file)
index 0000000..ae46f4e
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import __main__
+import os
+import unittest
+
+slow_test = lambda _: unittest.skipIf(
+    __main__.short_tests_only,
+    "running --short tests only")
diff --git a/sdk/python/tests/test_api.py b/sdk/python/tests/test_api.py
new file mode 100644 (file)
index 0000000..8d3142a
--- /dev/null
@@ -0,0 +1,222 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from builtins import str
+from builtins import range
+import arvados
+import collections
+import httplib2
+import itertools
+import json
+import mimetypes
+import os
+import socket
+import string
+import unittest
+
+import mock
+from . import run_test_server
+
+from apiclient import errors as apiclient_errors
+from apiclient import http as apiclient_http
+from arvados.api import OrderedJsonModel, RETRY_DELAY_INITIAL, RETRY_DELAY_BACKOFF, RETRY_COUNT
+from .arvados_testutil import fake_httplib2_response, queue_with
+
+if not mimetypes.inited:
+    mimetypes.init()
+
+class ArvadosApiTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    ERROR_HEADERS = {'Content-Type': mimetypes.types_map['.json']}
+
+    def api_error_response(self, code, *errors):
+        return (fake_httplib2_response(code, **self.ERROR_HEADERS),
+                json.dumps({'errors': errors,
+                            'error_token': '1234567890+12345678'}).encode())
+
+    def test_new_api_objects_with_cache(self):
+        clients = [arvados.api('v1', cache=True) for index in [0, 1]]
+        self.assertIsNot(*clients)
+
+    def test_empty_list(self):
+        answer = arvados.api('v1').humans().list(
+            filters=[['uuid', '=', None]]).execute()
+        self.assertEqual(answer['items_available'], len(answer['items']))
+
+    def test_nonempty_list(self):
+        answer = arvados.api('v1').collections().list().execute()
+        self.assertNotEqual(0, answer['items_available'])
+        self.assertNotEqual(0, len(answer['items']))
+
+    def test_timestamp_inequality_filter(self):
+        api = arvados.api('v1')
+        new_item = api.specimens().create(body={}).execute()
+        for operator, should_include in [
+                ['<', False], ['>', False],
+                ['<=', True], ['>=', True], ['=', True]]:
+            response = api.specimens().list(filters=[
+                ['created_at', operator, new_item['created_at']],
+                # Also filter by uuid to ensure (if it matches) it's on page 0
+                ['uuid', '=', new_item['uuid']]]).execute()
+            uuids = [item['uuid'] for item in response['items']]
+            did_include = new_item['uuid'] in uuids
+            self.assertEqual(
+                did_include, should_include,
+                "'%s %s' filter should%s have matched '%s'" % (
+                    operator, new_item['created_at'],
+                    ('' if should_include else ' not'),
+                    new_item['created_at']))
+
+    def test_exceptions_include_errors(self):
+        mock_responses = {
+            'arvados.humans.get': self.api_error_response(
+                422, "Bad UUID format", "Bad output format"),
+            }
+        req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+        api = arvados.api('v1', requestBuilder=req_builder)
+        with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
+            api.humans().get(uuid='xyz-xyz-abcdef').execute()
+        err_s = str(err_ctx.exception)
+        for msg in ["Bad UUID format", "Bad output format"]:
+            self.assertIn(msg, err_s)
+
+    def test_exceptions_without_errors_have_basic_info(self):
+        mock_responses = {
+            'arvados.humans.delete': (
+                fake_httplib2_response(500, **self.ERROR_HEADERS),
+                b"")
+            }
+        req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+        api = arvados.api('v1', requestBuilder=req_builder)
+        with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
+            api.humans().delete(uuid='xyz-xyz-abcdef').execute()
+        self.assertIn("500", str(err_ctx.exception))
+
+    def test_request_too_large(self):
+        api = arvados.api('v1')
+        maxsize = api._rootDesc.get('maxRequestSize', 0)
+        with self.assertRaises(apiclient_errors.MediaUploadSizeError):
+            text = "X" * maxsize
+            arvados.api('v1').collections().create(body={"manifest_text": text}).execute()
+
+    def test_ordered_json_model(self):
+        mock_responses = {
+            'arvados.humans.get': (
+                None,
+                json.dumps(collections.OrderedDict(
+                    (c, int(c, 16)) for c in string.hexdigits
+                )).encode(),
+            ),
+        }
+        req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+        api = arvados.api('v1',
+                          requestBuilder=req_builder, model=OrderedJsonModel())
+        result = api.humans().get(uuid='test').execute()
+        self.assertEqual(string.hexdigits, ''.join(list(result.keys())))
+
+
+class RetryREST(unittest.TestCase):
+    def setUp(self):
+        self.api = arvados.api('v1')
+        self.assertTrue(hasattr(self.api._http, 'orig_http_request'),
+                        "test doesn't know how to intercept HTTP requests")
+        self.mock_response = {'user': 'person'}
+        self.request_success = (fake_httplib2_response(200),
+                                json.dumps(self.mock_response))
+        self.api._http.orig_http_request = mock.MagicMock()
+        # All requests succeed by default. Tests override as needed.
+        self.api._http.orig_http_request.return_value = self.request_success
+
+    @mock.patch('time.sleep')
+    def test_socket_error_retry_get(self, sleep):
+        self.api._http.orig_http_request.side_effect = (
+            socket.error('mock error'),
+            self.request_success,
+        )
+        self.assertEqual(self.api.users().current().execute(),
+                         self.mock_response)
+        self.assertGreater(self.api._http.orig_http_request.call_count, 1,
+                           "client got the right response without retrying")
+        self.assertEqual(sleep.call_args_list,
+                         [mock.call(RETRY_DELAY_INITIAL)])
+
+    @mock.patch('time.sleep')
+    def test_same_automatic_request_id_on_retry(self, sleep):
+        self.api._http.orig_http_request.side_effect = (
+            socket.error('mock error'),
+            self.request_success,
+        )
+        self.api.users().current().execute()
+        calls = self.api._http.orig_http_request.call_args_list
+        self.assertEqual(len(calls), 2)
+        self.assertEqual(
+            calls[0][1]['headers']['X-Request-Id'],
+            calls[1][1]['headers']['X-Request-Id'])
+        self.assertRegex(calls[0][1]['headers']['X-Request-Id'], r'^req-[a-z0-9]{20}$')
+
+    @mock.patch('time.sleep')
+    def test_provided_request_id_on_retry(self, sleep):
+        self.api.request_id='fake-request-id'
+        self.api._http.orig_http_request.side_effect = (
+            socket.error('mock error'),
+            self.request_success,
+        )
+        self.api.users().current().execute()
+        calls = self.api._http.orig_http_request.call_args_list
+        self.assertEqual(len(calls), 2)
+        for call in calls:
+            self.assertEqual(call[1]['headers']['X-Request-Id'], 'fake-request-id')
+
+    @mock.patch('time.sleep')
+    def test_socket_error_retry_delay(self, sleep):
+        self.api._http.orig_http_request.side_effect = socket.error('mock')
+        self.api._http._retry_count = 3
+        with self.assertRaises(socket.error):
+            self.api.users().current().execute()
+        self.assertEqual(self.api._http.orig_http_request.call_count, 4)
+        self.assertEqual(sleep.call_args_list, [
+            mock.call(RETRY_DELAY_INITIAL),
+            mock.call(RETRY_DELAY_INITIAL * RETRY_DELAY_BACKOFF),
+            mock.call(RETRY_DELAY_INITIAL * RETRY_DELAY_BACKOFF**2),
+        ])
+
+    @mock.patch('time.time', side_effect=[i*2**20 for i in range(99)])
+    def test_close_old_connections_non_retryable(self, sleep):
+        self._test_connection_close(expect=1)
+
+    @mock.patch('time.time', side_effect=itertools.count())
+    def test_no_close_fresh_connections_non_retryable(self, sleep):
+        self._test_connection_close(expect=0)
+
+    @mock.patch('time.time', side_effect=itertools.count())
+    def test_override_max_idle_time(self, sleep):
+        self.api._http._max_keepalive_idle = 0
+        self._test_connection_close(expect=1)
+
+    def _test_connection_close(self, expect=0):
+        # Do two POST requests. The second one must close all
+        # connections +expect+ times.
+        self.api.users().create(body={}).execute()
+        mock_conns = {str(i): mock.MagicMock() for i in range(2)}
+        self.api._http.connections = mock_conns.copy()
+        self.api.users().create(body={}).execute()
+        for c in mock_conns.values():
+            self.assertEqual(c.close.call_count, expect)
+
+    @mock.patch('time.sleep')
+    def test_socket_error_no_retry_post(self, sleep):
+        self.api._http.orig_http_request.side_effect = (
+            socket.error('mock error'),
+            self.request_success,
+        )
+        with self.assertRaises(socket.error):
+            self.api.users().create(body={}).execute()
+        self.assertEqual(self.api._http.orig_http_request.call_count, 1,
+                         "client should try non-retryable method exactly once")
+        self.assertEqual(sleep.call_args_list, [])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_arv_copy.py b/sdk/python/tests/test_arv_copy.py
new file mode 100644 (file)
index 0000000..324d6e0
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import os
+import sys
+import tempfile
+import unittest
+
+import arvados.commands.arv_copy as arv_copy
+from . import arvados_testutil as tutil
+
+class ArvCopyTestCase(unittest.TestCase, tutil.VersionChecker):
+    def run_copy(self, args):
+        sys.argv = ['arv-copy'] + args
+        return arv_copy.main()
+
+    def test_unsupported_arg(self):
+        with self.assertRaises(SystemExit):
+            self.run_copy(['-x=unknown'])
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_copy(['--version'])
+        self.assertVersionOutput(out, err)
diff --git a/sdk/python/tests/test_arv_get.py b/sdk/python/tests/test_arv_get.py
new file mode 100644 (file)
index 0000000..733cd64
--- /dev/null
@@ -0,0 +1,208 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from future.utils import listitems
+import io
+import logging
+import mock
+import os
+import re
+import shutil
+import tempfile
+
+import arvados
+import arvados.collection as collection
+import arvados.commands.get as arv_get
+from . import run_test_server
+
+from . import arvados_testutil as tutil
+from .arvados_testutil import ArvadosBaseTestCase
+
+class ArvadosGetTestCase(run_test_server.TestCaseWithServers,
+                         tutil.VersionChecker,
+                         ArvadosBaseTestCase):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+
+    def setUp(self):
+        super(ArvadosGetTestCase, self).setUp()
+        self.tempdir = tempfile.mkdtemp()
+        self.col_loc, self.col_pdh, self.col_manifest = self.write_test_collection()
+
+        self.stdout = tutil.BytesIO()
+        self.stderr = tutil.StringIO()
+        self.loggingHandler = logging.StreamHandler(self.stderr)
+        self.loggingHandler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+        logging.getLogger().addHandler(self.loggingHandler)
+
+    def tearDown(self):
+        logging.getLogger().removeHandler(self.loggingHandler)
+        super(ArvadosGetTestCase, self).tearDown()
+        shutil.rmtree(self.tempdir)
+
+    def write_test_collection(self,
+                              strip_manifest=False,
+                              contents = {
+                                  'foo.txt' : 'foo',
+                                  'bar.txt' : 'bar',
+                                  'subdir/baz.txt' : 'baz',
+                              }):
+        c = collection.Collection()
+        for path, data in listitems(contents):
+            with c.open(path, 'wb') as f:
+                f.write(data)
+        c.save_new()
+
+        return (c.manifest_locator(),
+                c.portable_data_hash(),
+                c.manifest_text(strip=strip_manifest))
+
+    def run_get(self, args):
+        self.stdout.seek(0, 0)
+        self.stdout.truncate(0)
+        self.stderr.seek(0, 0)
+        self.stderr.truncate(0)
+        return arv_get.main(args, self.stdout, self.stderr)
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_get(['--version'])
+        self.assertVersionOutput(out, err)
+
+    def test_get_single_file(self):
+        # Get the file using the collection's locator
+        r = self.run_get(["{}/subdir/baz.txt".format(self.col_loc), '-'])
+        self.assertEqual(0, r)
+        self.assertEqual(b'baz', self.stdout.getvalue())
+        # Then, try by PDH
+        r = self.run_get(["{}/subdir/baz.txt".format(self.col_pdh), '-'])
+        self.assertEqual(0, r)
+        self.assertEqual(b'baz', self.stdout.getvalue())
+
+    def test_get_block(self):
+        # Get raw data using a block locator
+        blk = re.search(' (acbd18\S+\+A\S+) ', self.col_manifest).group(1)
+        r = self.run_get([blk, '-'])
+        self.assertEqual(0, r)
+        self.assertEqual(b'foo', self.stdout.getvalue())
+
+    def test_get_multiple_files(self):
+        # Download the entire collection to the temp directory
+        r = self.run_get(["{}/".format(self.col_loc), self.tempdir])
+        self.assertEqual(0, r)
+        with open(os.path.join(self.tempdir, "foo.txt"), "r") as f:
+            self.assertEqual("foo", f.read())
+        with open(os.path.join(self.tempdir, "bar.txt"), "r") as f:
+            self.assertEqual("bar", f.read())
+        with open(os.path.join(self.tempdir, "subdir", "baz.txt"), "r") as f:
+            self.assertEqual("baz", f.read())
+
+    def test_get_collection_unstripped_manifest(self):
+        dummy_token = "+Axxxxxxx"
+        # Get the collection manifest by UUID
+        r = self.run_get([self.col_loc, self.tempdir])
+        self.assertEqual(0, r)
+        m_from_collection = re.sub(r"\+A[0-9a-f@]+", dummy_token, self.col_manifest)
+        with open(os.path.join(self.tempdir, self.col_loc), "r") as f:
+            # Replace manifest tokens before comparison to avoid races
+            m_from_file = re.sub(r"\+A[0-9a-f@]+", dummy_token, f.read())
+            self.assertEqual(m_from_collection, m_from_file)
+        # Get the collection manifest by PDH
+        r = self.run_get([self.col_pdh, self.tempdir])
+        self.assertEqual(0, r)
+        with open(os.path.join(self.tempdir, self.col_pdh), "r") as f:
+            # Replace manifest tokens before comparison to avoid races
+            m_from_file = re.sub(r"\+A[0-9a-f@]+", dummy_token, f.read())
+            self.assertEqual(m_from_collection, m_from_file)
+
+    def test_get_collection_stripped_manifest(self):
+        col_loc, col_pdh, col_manifest = self.write_test_collection(
+            strip_manifest=True)
+        # Get the collection manifest by UUID
+        r = self.run_get(['--strip-manifest', col_loc, self.tempdir])
+        self.assertEqual(0, r)
+        with open(os.path.join(self.tempdir, col_loc), "r") as f:
+            self.assertEqual(col_manifest, f.read())
+        # Get the collection manifest by PDH
+        r = self.run_get(['--strip-manifest', col_pdh, self.tempdir])
+        self.assertEqual(0, r)
+        with open(os.path.join(self.tempdir, col_pdh), "r") as f:
+            self.assertEqual(col_manifest, f.read())
+
+    def test_invalid_collection(self):
+        # Asking for an invalid collection should generate an error.
+        r = self.run_get(['this-uuid-seems-to-be-fake', self.tempdir])
+        self.assertNotEqual(0, r)
+
+    def test_invalid_file_request(self):
+        # Asking for an inexistant file within a collection should generate an error.
+        r = self.run_get(["{}/im-not-here.txt".format(self.col_loc), self.tempdir])
+        self.assertNotEqual(0, r)
+
+    def test_invalid_destination(self):
+        # Asking to place the collection's files on a non existant directory
+        # should generate an error.
+        r = self.run_get([self.col_loc, "/fake/subdir/"])
+        self.assertNotEqual(0, r)
+
+    def test_preexistent_destination(self):
+        # Asking to place a file with the same path as a local one should
+        # generate an error and avoid overwrites.
+        with open(os.path.join(self.tempdir, "foo.txt"), "w") as f:
+            f.write("another foo")
+        r = self.run_get(["{}/foo.txt".format(self.col_loc), self.tempdir])
+        self.assertNotEqual(0, r)
+        with open(os.path.join(self.tempdir, "foo.txt"), "r") as f:
+            self.assertEqual("another foo", f.read())
+
+    def test_no_progress_when_stderr_not_a_tty(self):
+        # Create a collection with a big file (>64MB) to force the progress
+        # to be printed
+        c = collection.Collection()
+        with c.open('bigfile.txt', 'wb') as f:
+            for _ in range(65):
+                f.write("x" * 1024 * 1024)
+        c.save_new()
+        tmpdir = self.make_tmpdir()
+        # Simulate a TTY stderr
+        stderr = mock.MagicMock()
+        stdout = tutil.BytesIO()
+
+        # Confirm that progress is written to stderr when is a tty
+        stderr.isatty.return_value = True
+        r = arv_get.main(['{}/bigfile.txt'.format(c.manifest_locator()),
+                          '{}/bigfile.txt'.format(tmpdir)],
+                         stdout, stderr)
+        self.assertEqual(0, r)
+        self.assertEqual(b'', stdout.getvalue())
+        self.assertTrue(stderr.write.called)
+
+        # Clean up and reset stderr mock
+        os.remove('{}/bigfile.txt'.format(tmpdir))
+        stderr = mock.MagicMock()
+        stdout = tutil.BytesIO()
+
+        # Confirm that progress is not written to stderr when isn't a tty
+        stderr.isatty.return_value = False
+        r = arv_get.main(['{}/bigfile.txt'.format(c.manifest_locator()),
+                          '{}/bigfile.txt'.format(tmpdir)],
+                         stdout, stderr)
+        self.assertEqual(0, r)
+        self.assertEqual(b'', stdout.getvalue())
+        self.assertFalse(stderr.write.called)
+
+    request_id_regex = r'INFO: X-Request-Id: req-[a-z0-9]{20}\n'
+
+    def test_request_id_logging_on(self):
+        r = self.run_get(["-v", "{}/".format(self.col_loc), self.tempdir])
+        self.assertEqual(0, r)
+        self.assertRegex(self.stderr.getvalue(), self.request_id_regex)
+
+    def test_request_id_logging_off(self):
+        r = self.run_get(["{}/".format(self.col_loc), self.tempdir])
+        self.assertEqual(0, r)
+        self.assertNotRegex(self.stderr.getvalue(), self.request_id_regex)
diff --git a/sdk/python/tests/test_arv_keepdocker.py b/sdk/python/tests/test_arv_keepdocker.py
new file mode 100644 (file)
index 0000000..695a038
--- /dev/null
@@ -0,0 +1,151 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import arvados
+import hashlib
+import mock
+import os
+import subprocess
+import sys
+import tempfile
+import unittest
+import logging
+
+import arvados.commands.keepdocker as arv_keepdocker
+from . import arvados_testutil as tutil
+from . import run_test_server
+
+
+class StopTest(Exception):
+    pass
+
+
+class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
+    def run_arv_keepdocker(self, args, err):
+        sys.argv = ['arv-keepdocker'] + args
+        log_handler = logging.StreamHandler(err)
+        arv_keepdocker.logger.addHandler(log_handler)
+        try:
+            return arv_keepdocker.main()
+        finally:
+            arv_keepdocker.logger.removeHandler(log_handler)
+
+    def test_unsupported_arg(self):
+        out = tutil.StringIO()
+        with tutil.redirected_streams(stdout=out, stderr=out), \
+             self.assertRaises(SystemExit):
+            self.run_arv_keepdocker(['-x=unknown'], sys.stderr)
+        self.assertRegex(out.getvalue(), 'unrecognized arguments')
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_arv_keepdocker(['--version'], sys.stderr)
+        self.assertVersionOutput(out, err)
+
+    @mock.patch('arvados.commands.keepdocker.find_image_hashes',
+                return_value=['abc123'])
+    @mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+                return_value='abc123')
+    def test_image_format_compatibility(self, _1, _2):
+        old_id = hashlib.sha256(b'old').hexdigest()
+        new_id = 'sha256:'+hashlib.sha256(b'new').hexdigest()
+        for supported, img_id, expect_ok in [
+                (['v1'], old_id, True),
+                (['v1'], new_id, False),
+                (None, old_id, False),
+                ([], old_id, False),
+                ([], new_id, False),
+                (['v1', 'v2'], new_id, True),
+                (['v1'], new_id, False),
+                (['v2'], new_id, True)]:
+
+            fakeDD = arvados.api('v1')._rootDesc
+            if supported is None:
+                del fakeDD['dockerImageFormats']
+            else:
+                fakeDD['dockerImageFormats'] = supported
+
+            err = tutil.StringIO()
+            out = tutil.StringIO()
+
+            with tutil.redirected_streams(stdout=out), \
+                 mock.patch('arvados.api') as api, \
+                 mock.patch('arvados.commands.keepdocker.popen_docker',
+                            return_value=subprocess.Popen(
+                                ['echo', img_id],
+                                stdout=subprocess.PIPE)), \
+                 mock.patch('arvados.commands.keepdocker.prep_image_file',
+                            side_effect=StopTest), \
+                 self.assertRaises(StopTest if expect_ok else SystemExit):
+
+                api()._rootDesc = fakeDD
+                self.run_arv_keepdocker(['--force', 'testimage'], err)
+
+            self.assertEqual(out.getvalue(), '')
+            if expect_ok:
+                self.assertNotRegex(
+                    err.getvalue(), "refusing to store",
+                    msg=repr((supported, img_id)))
+            else:
+                self.assertRegex(
+                    err.getvalue(), "refusing to store",
+                    msg=repr((supported, img_id)))
+            if not supported:
+                self.assertRegex(
+                    err.getvalue(),
+                    "server does not specify supported image formats",
+                    msg=repr((supported, img_id)))
+
+        fakeDD = arvados.api('v1')._rootDesc
+        fakeDD['dockerImageFormats'] = ['v1']
+        err = tutil.StringIO()
+        out = tutil.StringIO()
+        with tutil.redirected_streams(stdout=out), \
+             mock.patch('arvados.api') as api, \
+             mock.patch('arvados.commands.keepdocker.popen_docker',
+                        return_value=subprocess.Popen(
+                            ['echo', new_id],
+                            stdout=subprocess.PIPE)), \
+             mock.patch('arvados.commands.keepdocker.prep_image_file',
+                        side_effect=StopTest), \
+             self.assertRaises(StopTest):
+            api()._rootDesc = fakeDD
+            self.run_arv_keepdocker(
+                ['--force', '--force-image-format', 'testimage'], err)
+        self.assertRegex(err.getvalue(), "forcing incompatible image")
+
+    def test_tag_given_twice(self):
+        with tutil.redirected_streams(stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_arv_keepdocker(['myrepo:mytag', 'extratag'], sys.stderr)
+            self.assertRegex(err.getvalue(), "cannot add tag argument 'extratag'")
+
+    def test_image_given_as_repo_colon_tag(self):
+        with self.assertRaises(StopTest), \
+             mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+                        side_effect=StopTest) as find_image_mock:
+            self.run_arv_keepdocker(['repo:tag'], sys.stderr)
+        find_image_mock.assert_called_with('repo', 'tag')
+
+        with self.assertRaises(StopTest), \
+             mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+                        side_effect=StopTest) as find_image_mock:
+            self.run_arv_keepdocker(['myreg.example:8888/repo/img:tag'], sys.stderr)
+        find_image_mock.assert_called_with('myreg.example:8888/repo/img', 'tag')
+
+    def test_image_has_colons(self):
+        with self.assertRaises(StopTest), \
+             mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+                        side_effect=StopTest) as find_image_mock:
+            self.run_arv_keepdocker(['[::1]:8888/repo/img'], sys.stderr)
+        find_image_mock.assert_called_with('[::1]:8888/repo/img', 'latest')
+
+        with self.assertRaises(StopTest), \
+             mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+                        side_effect=StopTest) as find_image_mock:
+            self.run_arv_keepdocker(['[::1]/repo/img'], sys.stderr)
+        find_image_mock.assert_called_with('[::1]/repo/img', 'latest')
diff --git a/sdk/python/tests/test_arv_ls.py b/sdk/python/tests/test_arv_ls.py
new file mode 100644 (file)
index 0000000..635c625
--- /dev/null
@@ -0,0 +1,98 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from builtins import str
+from builtins import range
+import os
+import random
+import sys
+import mock
+import tempfile
+
+import arvados.errors as arv_error
+import arvados.commands.ls as arv_ls
+from . import run_test_server
+
+from . import arvados_testutil as tutil
+from .arvados_testutil import str_keep_locator, redirected_streams, StringIO
+
+class ArvLsTestCase(run_test_server.TestCaseWithServers, tutil.VersionChecker):
+    FAKE_UUID = 'zzzzz-4zz18-12345abcde12345'
+
+    def newline_join(self, seq):
+        return '\n'.join(seq) + '\n'
+
+    def random_blocks(self, *sizes):
+        return ' '.join('{:032x}+{:d}'.format(
+                  random.randint(0, (16 ** 32) - 1), size
+                ) for size in sizes)
+
+    def mock_api_for_manifest(self, manifest_lines, uuid=FAKE_UUID):
+        manifest_text = self.newline_join(manifest_lines)
+        pdh = str_keep_locator(manifest_text)
+        coll_info = {'uuid': uuid,
+                     'portable_data_hash': pdh,
+                     'manifest_text': manifest_text}
+        api_client = mock.MagicMock(name='mock_api_client')
+        api_client.collections().get().execute.return_value = coll_info
+        return coll_info, api_client
+
+    def run_ls(self, args, api_client, logger=None):
+        self.stdout = StringIO()
+        self.stderr = StringIO()
+        return arv_ls.main(args, self.stdout, self.stderr, api_client, logger)
+
+    def test_plain_listing(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:3:one.txt 3:4:two.txt'.format(self.random_blocks(5, 2)),
+             './dir {} 1:5:sub.txt'.format(self.random_blocks(8))])
+        self.assertEqual(0, self.run_ls([collection['uuid']], api_client))
+        self.assertEqual(
+            self.newline_join(['./one.txt', './two.txt', './dir/sub.txt']),
+            self.stdout.getvalue())
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_size_listing(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:0:0.txt 0:1000:1.txt 1000:2000:2.txt'.format(
+                    self.random_blocks(3000))])
+        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))
+        self.stdout.seek(0, 0)
+        for expected in range(3):
+            actual_size, actual_name = self.stdout.readline().split()
+            # But she seems much bigger to me...
+            self.assertEqual(str(expected), actual_size)
+            self.assertEqual('./{}.txt'.format(expected), actual_name)
+        self.assertEqual('', self.stdout.read(-1))
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_nonnormalized_manifest(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:1010:non.txt'.format(self.random_blocks(1010)),
+             '. {} 0:2020:non.txt'.format(self.random_blocks(2020))])
+        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))
+        self.stdout.seek(0, 0)
+        self.assertEqual(['3', './non.txt'], self.stdout.readline().split())
+        self.assertEqual('', self.stdout.read(-1))
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_locator_failure(self):
+        api_client = mock.MagicMock(name='mock_api_client')
+        error_mock = mock.MagicMock()
+        logger = mock.MagicMock()
+        logger.error = error_mock
+        api_client.collections().get().execute.side_effect = (
+            arv_error.NotFoundError)
+        self.assertNotEqual(0, self.run_ls([self.FAKE_UUID], api_client, logger))
+        self.assertEqual(1, error_mock.call_count)
+
+    def test_version_argument(self):
+        if sys.version_info >= (3, 0):
+            import warnings
+            warnings.simplefilter("ignore")
+        with redirected_streams(stdout=StringIO, stderr=StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_ls(['--version'], None)
+        self.assertVersionOutput(out, err)
diff --git a/sdk/python/tests/test_arv_normalize.py b/sdk/python/tests/test_arv_normalize.py
new file mode 100644 (file)
index 0000000..80d1594
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import sys
+import tempfile
+import unittest
+
+from . import arvados_testutil as tutil
+
+
+class ArvNormalizeTestCase(unittest.TestCase, tutil.VersionChecker):
+    def run_arv_normalize(self, args=[]):
+        p = subprocess.Popen([sys.executable, 'bin/arv-normalize'] + args,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        out, err = p.communicate()
+        sys.stdout.write(out.decode())
+        sys.stderr.write(err.decode())
+        return p.returncode
+
+    def test_unsupported_arg(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            returncode = self.run_arv_normalize(['-x=unknown'])
+        self.assertNotEqual(0, returncode)
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            returncode = self.run_arv_normalize(['--version'])
+        self.assertVersionOutput(out, err)
+        self.assertEqual(0, returncode)
diff --git a/sdk/python/tests/test_arv_put.py b/sdk/python/tests/test_arv_put.py
new file mode 100644 (file)
index 0000000..01a52a5
--- /dev/null
@@ -0,0 +1,1327 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from builtins import range
+from functools import partial
+import apiclient
+import datetime
+import hashlib
+import json
+import logging
+import mock
+import os
+import pwd
+import random
+import re
+import select
+import shutil
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import unittest
+import uuid
+import yaml
+
+import arvados
+import arvados.commands.put as arv_put
+from . import arvados_testutil as tutil
+
+from .arvados_testutil import ArvadosBaseTestCase, fake_httplib2_response
+from . import run_test_server
+
+class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
+    CACHE_ARGSET = [
+        [],
+        ['/dev/null'],
+        ['/dev/null', '--filename', 'empty'],
+        ['/tmp']
+        ]
+
+    def tearDown(self):
+        super(ArvadosPutResumeCacheTest, self).tearDown()
+        try:
+            self.last_cache.destroy()
+        except AttributeError:
+            pass
+
+    def cache_path_from_arglist(self, arglist):
+        return arv_put.ResumeCache.make_path(arv_put.parse_arguments(arglist))
+
+    def test_cache_names_stable(self):
+        for argset in self.CACHE_ARGSET:
+            self.assertEqual(self.cache_path_from_arglist(argset),
+                              self.cache_path_from_arglist(argset),
+                              "cache name changed for {}".format(argset))
+
+    def test_cache_names_unique(self):
+        results = []
+        for argset in self.CACHE_ARGSET:
+            path = self.cache_path_from_arglist(argset)
+            self.assertNotIn(path, results)
+            results.append(path)
+
+    def test_cache_names_simple(self):
+        # The goal here is to make sure the filename doesn't use characters
+        # reserved by the filesystem.  Feel free to adjust this regexp as
+        # long as it still does that.
+        bad_chars = re.compile(r'[^-\.\w]')
+        for argset in self.CACHE_ARGSET:
+            path = self.cache_path_from_arglist(argset)
+            self.assertFalse(bad_chars.search(os.path.basename(path)),
+                             "path too exotic: {}".format(path))
+
+    def test_cache_names_ignore_argument_order(self):
+        self.assertEqual(
+            self.cache_path_from_arglist(['a', 'b', 'c']),
+            self.cache_path_from_arglist(['c', 'a', 'b']))
+        self.assertEqual(
+            self.cache_path_from_arglist(['-', '--filename', 'stdin']),
+            self.cache_path_from_arglist(['--filename', 'stdin', '-']))
+
+    def test_cache_names_differ_for_similar_paths(self):
+        # This test needs names at / that don't exist on the real filesystem.
+        self.assertNotEqual(
+            self.cache_path_from_arglist(['/_arvputtest1', '/_arvputtest2']),
+            self.cache_path_from_arglist(['/_arvputtest1/_arvputtest2']))
+
+    def test_cache_names_ignore_irrelevant_arguments(self):
+        # Workaround: parse_arguments bails on --filename with a directory.
+        path1 = self.cache_path_from_arglist(['/tmp'])
+        args = arv_put.parse_arguments(['/tmp'])
+        args.filename = 'tmp'
+        path2 = arv_put.ResumeCache.make_path(args)
+        self.assertEqual(path1, path2,
+                         "cache path considered --filename for directory")
+        self.assertEqual(
+            self.cache_path_from_arglist(['-']),
+            self.cache_path_from_arglist(['-', '--max-manifest-depth', '1']),
+            "cache path considered --max-manifest-depth for file")
+
+    def test_cache_names_treat_negative_manifest_depths_identically(self):
+        base_args = ['/tmp', '--max-manifest-depth']
+        self.assertEqual(
+            self.cache_path_from_arglist(base_args + ['-1']),
+            self.cache_path_from_arglist(base_args + ['-2']))
+
+    def test_cache_names_treat_stdin_consistently(self):
+        self.assertEqual(
+            self.cache_path_from_arglist(['-', '--filename', 'test']),
+            self.cache_path_from_arglist(['/dev/stdin', '--filename', 'test']))
+
+    def test_cache_names_identical_for_synonymous_names(self):
+        self.assertEqual(
+            self.cache_path_from_arglist(['.']),
+            self.cache_path_from_arglist([os.path.realpath('.')]))
+        testdir = self.make_tmpdir()
+        looplink = os.path.join(testdir, 'loop')
+        os.symlink(testdir, looplink)
+        self.assertEqual(
+            self.cache_path_from_arglist([testdir]),
+            self.cache_path_from_arglist([looplink]))
+
+    def test_cache_names_different_by_api_host(self):
+        config = arvados.config.settings()
+        orig_host = config.get('ARVADOS_API_HOST')
+        try:
+            name1 = self.cache_path_from_arglist(['.'])
+            config['ARVADOS_API_HOST'] = 'x' + (orig_host or 'localhost')
+            self.assertNotEqual(name1, self.cache_path_from_arglist(['.']))
+        finally:
+            if orig_host is None:
+                del config['ARVADOS_API_HOST']
+            else:
+                config['ARVADOS_API_HOST'] = orig_host
+
+    @mock.patch('arvados.keep.KeepClient.head')
+    def test_resume_cache_with_current_stream_locators(self, keep_client_head):
+        keep_client_head.side_effect = [True]
+        thing = {}
+        thing['_current_stream_locators'] = ['098f6bcd4621d373cade4e832627b4f6+4', '1f253c60a2306e0ee12fb6ce0c587904+6']
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        self.last_cache.save(thing)
+        self.last_cache.close()
+        resume_cache = arv_put.ResumeCache(self.last_cache.filename)
+        self.assertNotEqual(None, resume_cache)
+
+    @mock.patch('arvados.keep.KeepClient.head')
+    def test_resume_cache_with_finished_streams(self, keep_client_head):
+        keep_client_head.side_effect = [True]
+        thing = {}
+        thing['_finished_streams'] = [['.', ['098f6bcd4621d373cade4e832627b4f6+4', '1f253c60a2306e0ee12fb6ce0c587904+6']]]
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        self.last_cache.save(thing)
+        self.last_cache.close()
+        resume_cache = arv_put.ResumeCache(self.last_cache.filename)
+        self.assertNotEqual(None, resume_cache)
+
+    @mock.patch('arvados.keep.KeepClient.head')
+    def test_resume_cache_with_finished_streams_error_on_head(self, keep_client_head):
+        keep_client_head.side_effect = Exception('Locator not found')
+        thing = {}
+        thing['_finished_streams'] = [['.', ['098f6bcd4621d373cade4e832627b4f6+4', '1f253c60a2306e0ee12fb6ce0c587904+6']]]
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        self.last_cache.save(thing)
+        self.last_cache.close()
+        resume_cache = arv_put.ResumeCache(self.last_cache.filename)
+        self.assertNotEqual(None, resume_cache)
+        resume_cache.check_cache()
+
+    def test_basic_cache_storage(self):
+        thing = ['test', 'list']
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        self.last_cache.save(thing)
+        self.assertEqual(thing, self.last_cache.load())
+
+    def test_empty_cache(self):
+        with tempfile.NamedTemporaryFile() as cachefile:
+            cache = arv_put.ResumeCache(cachefile.name)
+        self.assertRaises(ValueError, cache.load)
+
+    def test_cache_persistent(self):
+        thing = ['test', 'list']
+        path = os.path.join(self.make_tmpdir(), 'cache')
+        cache = arv_put.ResumeCache(path)
+        cache.save(thing)
+        cache.close()
+        self.last_cache = arv_put.ResumeCache(path)
+        self.assertEqual(thing, self.last_cache.load())
+
+    def test_multiple_cache_writes(self):
+        thing = ['short', 'list']
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        # Start writing an object longer than the one we test, to make
+        # sure the cache file gets truncated.
+        self.last_cache.save(['long', 'long', 'list'])
+        self.last_cache.save(thing)
+        self.assertEqual(thing, self.last_cache.load())
+
+    def test_cache_is_locked(self):
+        with tempfile.NamedTemporaryFile() as cachefile:
+            cache = arv_put.ResumeCache(cachefile.name)
+            self.assertRaises(arv_put.ResumeCacheConflict,
+                              arv_put.ResumeCache, cachefile.name)
+
+    def test_cache_stays_locked(self):
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+            path = cachefile.name
+        self.last_cache.save('test')
+        self.assertRaises(arv_put.ResumeCacheConflict,
+                          arv_put.ResumeCache, path)
+
+    def test_destroy_cache(self):
+        cachefile = tempfile.NamedTemporaryFile(delete=False)
+        try:
+            cache = arv_put.ResumeCache(cachefile.name)
+            cache.save('test')
+            cache.destroy()
+            try:
+                arv_put.ResumeCache(cachefile.name)
+            except arv_put.ResumeCacheConflict:
+                self.fail("could not load cache after destroying it")
+            self.assertRaises(ValueError, cache.load)
+        finally:
+            if os.path.exists(cachefile.name):
+                os.unlink(cachefile.name)
+
+    def test_restart_cache(self):
+        path = os.path.join(self.make_tmpdir(), 'cache')
+        cache = arv_put.ResumeCache(path)
+        cache.save('test')
+        cache.restart()
+        self.assertRaises(ValueError, cache.load)
+        self.assertRaises(arv_put.ResumeCacheConflict,
+                          arv_put.ResumeCache, path)
+
+
+class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
+                          ArvadosBaseTestCase):
+
+    def setUp(self):
+        super(ArvPutUploadJobTest, self).setUp()
+        run_test_server.authorize_with('active')
+        # Temp files creation
+        self.tempdir = tempfile.mkdtemp()
+        subdir = os.path.join(self.tempdir, 'subdir')
+        os.mkdir(subdir)
+        data = "x" * 1024 # 1 KB
+        for i in range(1, 5):
+            with open(os.path.join(self.tempdir, str(i)), 'w') as f:
+                f.write(data * i)
+        with open(os.path.join(subdir, 'otherfile'), 'w') as f:
+            f.write(data * 5)
+        # Large temp file for resume test
+        _, self.large_file_name = tempfile.mkstemp()
+        fileobj = open(self.large_file_name, 'w')
+        # Make sure to write just a little more than one block
+        for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):
+            data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB
+            fileobj.write(data)
+        fileobj.close()
+        # Temp dir containing small files to be repacked
+        self.small_files_dir = tempfile.mkdtemp()
+        data = 'y' * 1024 * 1024 # 1 MB
+        for i in range(1, 70):
+            with open(os.path.join(self.small_files_dir, str(i)), 'w') as f:
+                f.write(data + str(i))
+        self.arvfile_write = getattr(arvados.arvfile.ArvadosFileWriter, 'write')
+        # Temp dir to hold a symlink to other temp dir
+        self.tempdir_with_symlink = tempfile.mkdtemp()
+        os.symlink(self.tempdir, os.path.join(self.tempdir_with_symlink, 'linkeddir'))
+        os.symlink(os.path.join(self.tempdir, '1'),
+                   os.path.join(self.tempdir_with_symlink, 'linkedfile'))
+
+    def tearDown(self):
+        super(ArvPutUploadJobTest, self).tearDown()
+        shutil.rmtree(self.tempdir)
+        os.unlink(self.large_file_name)
+        shutil.rmtree(self.small_files_dir)
+        shutil.rmtree(self.tempdir_with_symlink)
+
+    def test_symlinks_are_followed_by_default(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
+        cwriter.start(save_collection=False)
+        self.assertIn('linkeddir', cwriter.manifest_text())
+        self.assertIn('linkedfile', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_symlinks_are_not_followed_when_requested(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],
+                                          follow_links=False)
+        cwriter.start(save_collection=False)
+        self.assertNotIn('linkeddir', cwriter.manifest_text())
+        self.assertNotIn('linkedfile', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_passing_nonexistant_path_raise_exception(self):
+        uuid_str = str(uuid.uuid4())
+        with self.assertRaises(arv_put.PathDoesNotExistError):
+            cwriter = arv_put.ArvPutUploadJob(["/this/path/does/not/exist/{}".format(uuid_str)])
+
+    def test_writer_works_without_cache(self):
+        cwriter = arv_put.ArvPutUploadJob(['/dev/null'], resume=False)
+        cwriter.start(save_collection=False)
+        self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+
+    def test_writer_works_with_cache(self):
+        with tempfile.NamedTemporaryFile() as f:
+            f.write(b'foo')
+            f.flush()
+            cwriter = arv_put.ArvPutUploadJob([f.name])
+            cwriter.start(save_collection=False)
+            self.assertEqual(0, cwriter.bytes_skipped)
+            self.assertEqual(3, cwriter.bytes_written)
+            # Don't destroy the cache, and start another upload
+            cwriter_new = arv_put.ArvPutUploadJob([f.name])
+            cwriter_new.start(save_collection=False)
+            cwriter_new.destroy_cache()
+            self.assertEqual(3, cwriter_new.bytes_skipped)
+            self.assertEqual(3, cwriter_new.bytes_written)
+
+    def make_progress_tester(self):
+        progression = []
+        def record_func(written, expected):
+            progression.append((written, expected))
+        return progression, record_func
+
+    def test_progress_reporting(self):
+        with tempfile.NamedTemporaryFile() as f:
+            f.write(b'foo')
+            f.flush()
+            for expect_count in (None, 8):
+                progression, reporter = self.make_progress_tester()
+                cwriter = arv_put.ArvPutUploadJob([f.name],
+                                                  reporter=reporter)
+                cwriter.bytes_expected = expect_count
+                cwriter.start(save_collection=False)
+                cwriter.destroy_cache()
+                self.assertIn((3, expect_count), progression)
+
+    def test_writer_upload_directory(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir])
+        cwriter.start(save_collection=False)
+        cwriter.destroy_cache()
+        self.assertEqual(1024*(1+2+3+4+5), cwriter.bytes_written)
+
+    def test_resume_large_file_upload(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting. Ensure block commit.
+                self.writer._update(final=True)
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        # Retry the upload
+        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1)
+        writer2.start(save_collection=False)
+        self.assertEqual(writer.bytes_written + writer2.bytes_written - writer2.bytes_skipped,
+                         os.path.getsize(self.large_file_name))
+        writer2.destroy_cache()
+        del(self.writer)
+
+    # Test for bug #11002
+    def test_graceful_exit_while_repacking_small_blocks(self):
+        def wrapped_commit(*args, **kwargs):
+            raise SystemExit("Simulated error")
+
+        with mock.patch('arvados.arvfile._BlockManager.commit_bufferblock',
+                        autospec=True) as mocked_commit:
+            mocked_commit.side_effect = wrapped_commit
+            # Upload a little more than 1 block, wrapped_commit will make the first block
+            # commit to fail.
+            # arv-put should not exit with an exception by trying to commit the collection
+            # as it's in an inconsistent state.
+            writer = arv_put.ArvPutUploadJob([self.small_files_dir],
+                                             replication_desired=1)
+            try:
+                with self.assertRaises(SystemExit):
+                    writer.start(save_collection=False)
+            except arvados.arvfile.UnownedBlockError:
+                self.fail("arv-put command is trying to use a corrupted BlockManager. See https://dev.arvados.org/issues/11002")
+        writer.destroy_cache()
+
+    def test_no_resume_when_asked(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        # Retry the upload, this time without resume
+        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1,
+                                          resume=False)
+        writer2.start(save_collection=False)
+        self.assertEqual(writer2.bytes_skipped, 0)
+        self.assertEqual(writer2.bytes_written,
+                         os.path.getsize(self.large_file_name))
+        writer2.destroy_cache()
+        del(self.writer)
+
+    def test_no_resume_when_no_cache(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        # Retry the upload, this time without cache usage
+        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1,
+                                          resume=False,
+                                          use_cache=False)
+        writer2.start(save_collection=False)
+        self.assertEqual(writer2.bytes_skipped, 0)
+        self.assertEqual(writer2.bytes_written,
+                         os.path.getsize(self.large_file_name))
+        writer2.destroy_cache()
+        del(self.writer)
+
+    def test_dry_run_feature(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        with self.assertRaises(arv_put.ArvPutUploadIsPending):
+            # Retry the upload using dry_run to check if there is a pending upload
+            writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                              replication_desired=1,
+                                              dry_run=True)
+        # Complete the pending upload
+        writer3 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1)
+        writer3.start(save_collection=False)
+        with self.assertRaises(arv_put.ArvPutUploadNotPending):
+            # Confirm there's no pending upload with dry_run=True
+            writer4 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                              replication_desired=1,
+                                              dry_run=True)
+        # Test obvious cases
+        with self.assertRaises(arv_put.ArvPutUploadIsPending):
+            arv_put.ArvPutUploadJob([self.large_file_name],
+                                    replication_desired=1,
+                                    dry_run=True,
+                                    resume=False,
+                                    use_cache=False)
+        with self.assertRaises(arv_put.ArvPutUploadIsPending):
+            arv_put.ArvPutUploadJob([self.large_file_name],
+                                    replication_desired=1,
+                                    dry_run=True,
+                                    resume=False)
+        del(self.writer)
+
+class CachedManifestValidationTest(ArvadosBaseTestCase):
+    class MockedPut(arv_put.ArvPutUploadJob):
+        def __init__(self, cached_manifest=None):
+            self._state = arv_put.ArvPutUploadJob.EMPTY_STATE
+            self._state['manifest'] = cached_manifest
+            self._api_client = mock.MagicMock()
+            self.logger = mock.MagicMock()
+            self.num_retries = 1
+
+    def datetime_to_hex(self, dt):
+        return hex(int(time.mktime(dt.timetuple())))[2:]
+
+    def setUp(self):
+        super(CachedManifestValidationTest, self).setUp()
+        self.block1 = "fdba98970961edb29f88241b9d99d890" # foo
+        self.block2 = "37b51d194a7513e45b56f6524f2d51f2" # bar
+        self.template = ". "+self.block1+"+3+Asignature@%s "+self.block2+"+3+Anothersignature@%s 0:3:foofile.txt 3:6:barfile.txt\n"
+
+    def test_empty_cached_manifest_is_valid(self):
+        put_mock = self.MockedPut()
+        self.assertEqual(None, put_mock._state.get('manifest'))
+        self.assertTrue(put_mock._cached_manifest_valid())
+        put_mock._state['manifest'] = ''
+        self.assertTrue(put_mock._cached_manifest_valid())
+
+    def test_signature_cases(self):
+        now = datetime.datetime.utcnow()
+        yesterday = now - datetime.timedelta(days=1)
+        lastweek = now - datetime.timedelta(days=7)
+        tomorrow = now + datetime.timedelta(days=1)
+        nextweek = now + datetime.timedelta(days=7)
+
+        def mocked_head(blocks={}, loc=None):
+            blk = loc.split('+', 1)[0]
+            if blocks.get(blk):
+                return True
+            raise arvados.errors.KeepRequestError("mocked error - block invalid")
+
+        # Block1_expiration, Block2_expiration, Block1_HEAD, Block2_HEAD, Expectation
+        cases = [
+            # All expired, reset cache - OK
+            (yesterday, lastweek, False, False, True),
+            (lastweek, yesterday, False, False, True),
+            # All non-expired valid blocks - OK
+            (tomorrow, nextweek, True, True, True),
+            (nextweek, tomorrow, True, True, True),
+            # All non-expired invalid blocks - Not OK
+            (tomorrow, nextweek, False, False, False),
+            (nextweek, tomorrow, False, False, False),
+            # One non-expired valid block - OK
+            (tomorrow, yesterday, True, False, True),
+            (yesterday, tomorrow, False, True, True),
+            # One non-expired invalid block - Not OK
+            (tomorrow, yesterday, False, False, False),
+            (yesterday, tomorrow, False, False, False),
+        ]
+        for case in cases:
+            b1_expiration, b2_expiration, b1_valid, b2_valid, outcome = case
+            head_responses = {
+                self.block1: b1_valid,
+                self.block2: b2_valid,
+            }
+            cached_manifest = self.template % (
+                self.datetime_to_hex(b1_expiration),
+                self.datetime_to_hex(b2_expiration),
+            )
+            arvput = self.MockedPut(cached_manifest)
+            with mock.patch('arvados.collection.KeepClient.head') as head_mock:
+                head_mock.side_effect = partial(mocked_head, head_responses)
+                self.assertEqual(outcome, arvput._cached_manifest_valid(),
+                    "Case '%s' should have produced outcome '%s'" % (case, outcome)
+                )
+                if b1_expiration > now or b2_expiration > now:
+                    # A HEAD request should have been done
+                    head_mock.assert_called_once()
+                else:
+                    head_mock.assert_not_called()
+
+
+class ArvadosExpectedBytesTest(ArvadosBaseTestCase):
+    TEST_SIZE = os.path.getsize(__file__)
+
+    def test_expected_bytes_for_file(self):
+        writer = arv_put.ArvPutUploadJob([__file__])
+        self.assertEqual(self.TEST_SIZE,
+                         writer.bytes_expected)
+
+    def test_expected_bytes_for_tree(self):
+        tree = self.make_tmpdir()
+        shutil.copyfile(__file__, os.path.join(tree, 'one'))
+        shutil.copyfile(__file__, os.path.join(tree, 'two'))
+
+        writer = arv_put.ArvPutUploadJob([tree])
+        self.assertEqual(self.TEST_SIZE * 2,
+                         writer.bytes_expected)
+        writer = arv_put.ArvPutUploadJob([tree, __file__])
+        self.assertEqual(self.TEST_SIZE * 3,
+                         writer.bytes_expected)
+
+    def test_expected_bytes_for_device(self):
+        writer = arv_put.ArvPutUploadJob(['/dev/null'], use_cache=False, resume=False)
+        self.assertIsNone(writer.bytes_expected)
+        writer = arv_put.ArvPutUploadJob([__file__, '/dev/null'])
+        self.assertIsNone(writer.bytes_expected)
+
+
+class ArvadosPutReportTest(ArvadosBaseTestCase):
+    def test_machine_progress(self):
+        for count, total in [(0, 1), (0, None), (1, None), (235, 9283)]:
+            expect = ": {} written {} total\n".format(
+                count, -1 if (total is None) else total)
+            self.assertTrue(
+                arv_put.machine_progress(count, total).endswith(expect))
+
+    def test_known_human_progress(self):
+        for count, total in [(0, 1), (2, 4), (45, 60)]:
+            expect = '{:.1%}'.format(1.0*count/total)
+            actual = arv_put.human_progress(count, total)
+            self.assertTrue(actual.startswith('\r'))
+            self.assertIn(expect, actual)
+
+    def test_unknown_human_progress(self):
+        for count in [1, 20, 300, 4000, 50000]:
+            self.assertTrue(re.search(r'\b{}\b'.format(count),
+                                      arv_put.human_progress(count, None)))
+
+
+class ArvPutLogFormatterTest(ArvadosBaseTestCase):
+    matcher = r'\(X-Request-Id: req-[a-z0-9]{20}\)'
+
+    def setUp(self):
+        super(ArvPutLogFormatterTest, self).setUp()
+        self.stderr = tutil.StringIO()
+        self.loggingHandler = logging.StreamHandler(self.stderr)
+        self.loggingHandler.setFormatter(
+            arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))
+        self.logger = logging.getLogger()
+        self.logger.addHandler(self.loggingHandler)
+        self.logger.setLevel(logging.DEBUG)
+
+    def tearDown(self):
+        self.logger.removeHandler(self.loggingHandler)
+        self.stderr.close()
+        self.stderr = None
+        super(ArvPutLogFormatterTest, self).tearDown()
+
+    def test_request_id_logged_only_once_on_error(self):
+        self.logger.error('Ooops, something bad happened.')
+        self.logger.error('Another bad thing just happened.')
+        log_lines = self.stderr.getvalue().split('\n')[:-1]
+        self.assertEqual(2, len(log_lines))
+        self.assertRegex(log_lines[0], self.matcher)
+        self.assertNotRegex(log_lines[1], self.matcher)
+
+    def test_request_id_logged_only_once_on_debug(self):
+        self.logger.debug('This is just a debug message.')
+        self.logger.debug('Another message, move along.')
+        log_lines = self.stderr.getvalue().split('\n')[:-1]
+        self.assertEqual(2, len(log_lines))
+        self.assertRegex(log_lines[0], self.matcher)
+        self.assertNotRegex(log_lines[1], self.matcher)
+
+    def test_request_id_not_logged_on_info(self):
+        self.logger.info('This should be a useful message')
+        log_lines = self.stderr.getvalue().split('\n')[:-1]
+        self.assertEqual(1, len(log_lines))
+        self.assertNotRegex(log_lines[0], self.matcher)
+
+class ArvadosPutTest(run_test_server.TestCaseWithServers,
+                     ArvadosBaseTestCase,
+                     tutil.VersionChecker):
+    MAIN_SERVER = {}
+    Z_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+
+    def call_main_with_args(self, args):
+        self.main_stdout.seek(0, 0)
+        self.main_stdout.truncate(0)
+        self.main_stderr.seek(0, 0)
+        self.main_stderr.truncate(0)
+        return arv_put.main(args, self.main_stdout, self.main_stderr)
+
+    def call_main_on_test_file(self, args=[]):
+        with self.make_test_file() as testfile:
+            path = testfile.name
+            self.call_main_with_args(['--stream', '--no-progress'] + args + [path])
+        self.assertTrue(
+            os.path.exists(os.path.join(os.environ['KEEP_LOCAL_STORE'],
+                                        '098f6bcd4621d373cade4e832627b4f6')),
+            "did not find file stream in Keep store")
+
+    def setUp(self):
+        super(ArvadosPutTest, self).setUp()
+        run_test_server.authorize_with('active')
+        arv_put.api_client = None
+        self.main_stdout = tutil.StringIO()
+        self.main_stderr = tutil.StringIO()
+        self.loggingHandler = logging.StreamHandler(self.main_stderr)
+        self.loggingHandler.setFormatter(
+            arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))
+        logging.getLogger().addHandler(self.loggingHandler)
+
+    def tearDown(self):
+        logging.getLogger().removeHandler(self.loggingHandler)
+        for outbuf in ['main_stdout', 'main_stderr']:
+            if hasattr(self, outbuf):
+                getattr(self, outbuf).close()
+                delattr(self, outbuf)
+        super(ArvadosPutTest, self).tearDown()
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.call_main_with_args(['--version'])
+        self.assertVersionOutput(out, err)
+
+    def test_simple_file_put(self):
+        self.call_main_on_test_file()
+
+    def test_put_with_unwriteable_cache_dir(self):
+        orig_cachedir = arv_put.ResumeCache.CACHE_DIR
+        cachedir = self.make_tmpdir()
+        os.chmod(cachedir, 0o0)
+        arv_put.ResumeCache.CACHE_DIR = cachedir
+        try:
+            self.call_main_on_test_file()
+        finally:
+            arv_put.ResumeCache.CACHE_DIR = orig_cachedir
+            os.chmod(cachedir, 0o700)
+
+    def test_put_with_unwritable_cache_subdir(self):
+        orig_cachedir = arv_put.ResumeCache.CACHE_DIR
+        cachedir = self.make_tmpdir()
+        os.chmod(cachedir, 0o0)
+        arv_put.ResumeCache.CACHE_DIR = os.path.join(cachedir, 'cachedir')
+        try:
+            self.call_main_on_test_file()
+        finally:
+            arv_put.ResumeCache.CACHE_DIR = orig_cachedir
+            os.chmod(cachedir, 0o700)
+
+    def test_put_block_replication(self):
+        self.call_main_on_test_file()
+        with mock.patch('arvados.collection.KeepClient.local_store_put') as put_mock:
+            put_mock.return_value = 'acbd18db4cc2f85cedef654fccc4a4d8+3'
+            self.call_main_on_test_file(['--replication', '1'])
+            self.call_main_on_test_file(['--replication', '4'])
+            self.call_main_on_test_file(['--replication', '5'])
+            self.assertEqual(
+                [x[-1].get('copies') for x in put_mock.call_args_list],
+                [1, 4, 5])
+
+    def test_normalize(self):
+        testfile1 = self.make_test_file()
+        testfile2 = self.make_test_file()
+        test_paths = [testfile1.name, testfile2.name]
+        # Reverse-sort the paths, so normalization must change their order.
+        test_paths.sort(reverse=True)
+        self.call_main_with_args(['--stream', '--no-progress', '--normalize'] +
+                                 test_paths)
+        manifest = self.main_stdout.getvalue()
+        # Assert the second file we specified appears first in the manifest.
+        file_indices = [manifest.find(':' + os.path.basename(path))
+                        for path in test_paths]
+        self.assertGreater(*file_indices)
+
+    def test_error_name_without_collection(self):
+        self.assertRaises(SystemExit, self.call_main_with_args,
+                          ['--name', 'test without Collection',
+                           '--stream', '/dev/null'])
+
+    def test_error_when_project_not_found(self):
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--project-uuid', self.Z_UUID])
+
+    def test_error_bad_project_uuid(self):
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--project-uuid', self.Z_UUID, '--stream'])
+
+    def test_error_when_multiple_storage_classes_specified(self):
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--storage-classes', 'hot,cold'])
+
+    def test_error_when_excluding_absolute_path(self):
+        tmpdir = self.make_tmpdir()
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--exclude', '/some/absolute/path/*',
+                           tmpdir])
+
+    def test_api_error_handling(self):
+        coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')
+        coll_save_mock.side_effect = arvados.errors.ApiError(
+            fake_httplib2_response(403), b'{}')
+        with mock.patch('arvados.collection.Collection.save_new',
+                        new=coll_save_mock):
+            with self.assertRaises(SystemExit) as exc_test:
+                self.call_main_with_args(['/dev/null'])
+            self.assertLess(0, exc_test.exception.args[0])
+            self.assertLess(0, coll_save_mock.call_count)
+            self.assertEqual("", self.main_stdout.getvalue())
+
+    def test_request_id_logging_on_error(self):
+        matcher = r'\(X-Request-Id: req-[a-z0-9]{20}\)\n'
+        coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')
+        coll_save_mock.side_effect = arvados.errors.ApiError(
+            fake_httplib2_response(403), b'{}')
+        with mock.patch('arvados.collection.Collection.save_new',
+                        new=coll_save_mock):
+            with self.assertRaises(SystemExit) as exc_test:
+                self.call_main_with_args(['/dev/null'])
+            self.assertRegex(
+                self.main_stderr.getvalue(), matcher)
+
+
+class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
+                            ArvadosBaseTestCase):
+    def _getKeepServerConfig():
+        for config_file, mandatory in [
+                ['application.yml', False], ['application.default.yml', True]]:
+            path = os.path.join(run_test_server.SERVICES_SRC_DIR,
+                                "api", "config", config_file)
+            if not mandatory and not os.path.exists(path):
+                continue
+            with open(path) as f:
+                rails_config = yaml.load(f.read())
+                for config_section in ['test', 'common']:
+                    try:
+                        key = rails_config[config_section]["blob_signing_key"]
+                    except (KeyError, TypeError):
+                        pass
+                    else:
+                        return {'blob_signing_key': key,
+                                'enforce_permissions': True}
+        return {'blog_signing_key': None, 'enforce_permissions': False}
+
+    MAIN_SERVER = {}
+    KEEP_SERVER = _getKeepServerConfig()
+    PROJECT_UUID = run_test_server.fixture('groups')['aproject']['uuid']
+
+    @classmethod
+    def setUpClass(cls):
+        super(ArvPutIntegrationTest, cls).setUpClass()
+        cls.ENVIRON = os.environ.copy()
+        cls.ENVIRON['PYTHONPATH'] = ':'.join(sys.path)
+
+    def datetime_to_hex(self, dt):
+        return hex(int(time.mktime(dt.timetuple())))[2:]
+
+    def setUp(self):
+        super(ArvPutIntegrationTest, self).setUp()
+        arv_put.api_client = None
+
+    def authorize_with(self, token_name):
+        run_test_server.authorize_with(token_name)
+        for v in ["ARVADOS_API_HOST",
+                  "ARVADOS_API_HOST_INSECURE",
+                  "ARVADOS_API_TOKEN"]:
+            self.ENVIRON[v] = arvados.config.settings()[v]
+        arv_put.api_client = arvados.api('v1')
+
+    def current_user(self):
+        return arv_put.api_client.users().current().execute()
+
+    def test_check_real_project_found(self):
+        self.authorize_with('active')
+        self.assertTrue(arv_put.desired_project_uuid(arv_put.api_client, self.PROJECT_UUID, 0),
+                        "did not correctly find test fixture project")
+
+    def test_check_error_finding_nonexistent_uuid(self):
+        BAD_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+        self.authorize_with('active')
+        try:
+            result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
+                                                  0)
+        except ValueError as error:
+            self.assertIn(BAD_UUID, str(error))
+        else:
+            self.assertFalse(result, "incorrectly found nonexistent project")
+
+    def test_check_error_finding_nonexistent_project(self):
+        BAD_UUID = 'zzzzz-tpzed-zzzzzzzzzzzzzzz'
+        self.authorize_with('active')
+        with self.assertRaises(apiclient.errors.HttpError):
+            result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
+                                                  0)
+
+    def test_short_put_from_stdin(self):
+        # Have to run this as an integration test since arv-put can't
+        # read from the tests' stdin.
+        # arv-put usually can't stat(os.path.realpath('/dev/stdin')) in this
+        # case, because the /proc entry is already gone by the time it tries.
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__, '--stream'],
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT, env=self.ENVIRON)
+        pipe.stdin.write(b'stdin test\n')
+        pipe.stdin.close()
+        deadline = time.time() + 5
+        while (pipe.poll() is None) and (time.time() < deadline):
+            time.sleep(.1)
+        returncode = pipe.poll()
+        if returncode is None:
+            pipe.terminate()
+            self.fail("arv-put did not PUT from stdin within 5 seconds")
+        elif returncode != 0:
+            sys.stdout.write(pipe.stdout.read())
+            self.fail("arv-put returned exit code {}".format(returncode))
+        self.assertIn('4a9c8b735dce4b5fa3acf221a0b13628+11',
+                      pipe.stdout.read().decode())
+
+    def test_sigint_logs_request_id(self):
+        # Start arv-put, give it a chance to start up, send SIGINT,
+        # and check that its output includes the X-Request-Id.
+        input_stream = subprocess.Popen(
+            ['sleep', '10'],
+            stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__, '--stream'],
+            stdin=input_stream.stdout, stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT, env=self.ENVIRON)
+        # Wait for arv-put child process to print something (i.e., a
+        # log message) so we know its signal handler is installed.
+        select.select([pipe.stdout], [], [], 10)
+        pipe.send_signal(signal.SIGINT)
+        deadline = time.time() + 5
+        while (pipe.poll() is None) and (time.time() < deadline):
+            time.sleep(.1)
+        returncode = pipe.poll()
+        input_stream.terminate()
+        if returncode is None:
+            pipe.terminate()
+            self.fail("arv-put did not exit within 5 seconds")
+        self.assertRegex(pipe.stdout.read().decode(), r'\(X-Request-Id: req-[a-z0-9]{20}\)')
+
+    def test_ArvPutSignedManifest(self):
+        # ArvPutSignedManifest runs "arv-put foo" and then attempts to get
+        # the newly created manifest from the API server, testing to confirm
+        # that the block locators in the returned manifest are signed.
+        self.authorize_with('active')
+
+        # Before doing anything, demonstrate that the collection
+        # we're about to create is not present in our test fixture.
+        manifest_uuid = "00b4e9f40ac4dd432ef89749f1c01e74+47"
+        with self.assertRaises(apiclient.errors.HttpError):
+            notfound = arv_put.api_client.collections().get(
+                uuid=manifest_uuid).execute()
+
+        datadir = self.make_tmpdir()
+        with open(os.path.join(datadir, "foo"), "w") as f:
+            f.write("The quick brown fox jumped over the lazy dog")
+        p = subprocess.Popen([sys.executable, arv_put.__file__,
+                              os.path.join(datadir, 'foo')],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(err.decode(), r'INFO: Collection saved as ')
+        self.assertEqual(p.returncode, 0)
+
+        # The manifest text stored in the API server under the same
+        # manifest UUID must use signed locators.
+        c = arv_put.api_client.collections().get(uuid=manifest_uuid).execute()
+        self.assertRegex(
+            c['manifest_text'],
+            r'^\. 08a008a01d498c404b0c30852b39d3b8\+44\+A[0-9a-f]+@[0-9a-f]+ 0:44:foo\n')
+
+        os.remove(os.path.join(datadir, "foo"))
+        os.rmdir(datadir)
+
+    def run_and_find_collection(self, text, extra_args=[]):
+        self.authorize_with('active')
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__] + extra_args,
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE, env=self.ENVIRON)
+        stdout, stderr = pipe.communicate(text.encode())
+        self.assertRegex(stderr.decode(), r'INFO: Collection (updated:|saved as)')
+        search_key = ('portable_data_hash'
+                      if '--portable-data-hash' in extra_args else 'uuid')
+        collection_list = arvados.api('v1').collections().list(
+            filters=[[search_key, '=', stdout.decode().strip()]]
+        ).execute().get('items', [])
+        self.assertEqual(1, len(collection_list))
+        return collection_list[0]
+
+    def test_all_expired_signatures_invalidates_cache(self):
+        self.authorize_with('active')
+        tmpdir = self.make_tmpdir()
+        with open(os.path.join(tmpdir, 'somefile.txt'), 'w') as f:
+            f.write('foo')
+        # Upload a directory and get the cache file name
+        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
+        self.assertEqual(p.returncode, 0)
+        cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
+                                   err.decode()).groups()[0]
+        self.assertTrue(os.path.isfile(cache_filepath))
+        # Load the cache file contents and modify the manifest to simulate
+        # an expired access token
+        with open(cache_filepath, 'r') as c:
+            cache = json.load(c)
+        self.assertRegex(cache['manifest'], r'\+A\S+\@')
+        a_month_ago = datetime.datetime.now() - datetime.timedelta(days=30)
+        cache['manifest'] = re.sub(
+            r'\@.*? ',
+            "@{} ".format(self.datetime_to_hex(a_month_ago)),
+            cache['manifest'])
+        with open(cache_filepath, 'w') as c:
+            c.write(json.dumps(cache))
+        # Re-run the upload and expect to get an invalid cache message
+        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(
+            err.decode(),
+            r'INFO: Cache expired, starting from scratch.*')
+        self.assertEqual(p.returncode, 0)
+
+    def test_invalid_signature_invalidates_cache(self):
+        self.authorize_with('active')
+        tmpdir = self.make_tmpdir()
+        with open(os.path.join(tmpdir, 'somefile.txt'), 'w') as f:
+            f.write('foo')
+        # Upload a directory and get the cache file name
+        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
+        self.assertEqual(p.returncode, 0)
+        cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
+                                   err.decode()).groups()[0]
+        self.assertTrue(os.path.isfile(cache_filepath))
+        # Load the cache file contents and modify the manifest to simulate
+        # an invalid access token
+        with open(cache_filepath, 'r') as c:
+            cache = json.load(c)
+        self.assertRegex(cache['manifest'], r'\+A\S+\@')
+        cache['manifest'] = re.sub(
+            r'\+A.*\@',
+            "+Aabcdef0123456789abcdef0123456789abcdef01@",
+            cache['manifest'])
+        with open(cache_filepath, 'w') as c:
+            c.write(json.dumps(cache))
+        # Re-run the upload and expect to get an invalid cache message
+        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(
+            err.decode(),
+            r'ERROR: arv-put: Resume cache contains invalid signature.*')
+        self.assertEqual(p.returncode, 1)
+
+    def test_single_expired_signature_reuploads_file(self):
+        self.authorize_with('active')
+        tmpdir = self.make_tmpdir()
+        with open(os.path.join(tmpdir, 'foofile.txt'), 'w') as f:
+            f.write('foo')
+        # Write a second file on its own subdir to force a new stream
+        os.mkdir(os.path.join(tmpdir, 'bar'))
+        with open(os.path.join(tmpdir, 'bar', 'barfile.txt'), 'w') as f:
+            f.write('bar')
+        # Upload a directory and get the cache file name
+        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
+        self.assertEqual(p.returncode, 0)
+        cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
+                                   err.decode()).groups()[0]
+        self.assertTrue(os.path.isfile(cache_filepath))
+        # Load the cache file contents and modify the manifest to simulate
+        # an expired access token
+        with open(cache_filepath, 'r') as c:
+            cache = json.load(c)
+        self.assertRegex(cache['manifest'], r'\+A\S+\@')
+        a_month_ago = datetime.datetime.now() - datetime.timedelta(days=30)
+        # Make one of the signatures appear to have expired
+        cache['manifest'] = re.sub(
+            r'\@.*? 3:3:barfile.txt',
+            "@{} 3:3:barfile.txt".format(self.datetime_to_hex(a_month_ago)),
+            cache['manifest'])
+        with open(cache_filepath, 'w') as c:
+            c.write(json.dumps(cache))
+        # Re-run the upload and expect to get an invalid cache message
+        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(
+            err.decode(),
+            r'WARNING: Uploaded file \'.*barfile.txt\' access token expired, will re-upload it from scratch')
+        self.assertEqual(p.returncode, 0)
+        # Confirm that the resulting cache is different from the last run.
+        with open(cache_filepath, 'r') as c2:
+            new_cache = json.load(c2)
+        self.assertNotEqual(cache['manifest'], new_cache['manifest'])
+
+    def test_put_collection_with_later_update(self):
+        tmpdir = self.make_tmpdir()
+        with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+        col = self.run_and_find_collection("", ['--no-progress', tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        # Add a new file to the directory
+        with open(os.path.join(tmpdir, 'file2'), 'w') as f:
+            f.write('The quick brown fox jumped over the lazy dog')
+        updated_col = self.run_and_find_collection("", ['--no-progress', '--update-collection', col['uuid'], tmpdir])
+        self.assertEqual(col['uuid'], updated_col['uuid'])
+        # Get the manifest and check that the new file is being included
+        c = arv_put.api_client.collections().get(uuid=updated_col['uuid']).execute()
+        self.assertRegex(c['manifest_text'], r'^\..* .*:44:file2\n')
+
+    def test_upload_directory_reference_without_trailing_slash(self):
+        tmpdir1 = self.make_tmpdir()
+        tmpdir2 = self.make_tmpdir()
+        with open(os.path.join(tmpdir1, 'foo'), 'w') as f:
+            f.write('This is foo')
+        with open(os.path.join(tmpdir2, 'bar'), 'w') as f:
+            f.write('This is not foo')
+        # Upload one directory and one file
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                tmpdir1,
+                                                os.path.join(tmpdir2, 'bar')])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # Check that 'foo' was written inside a subcollection
+        # OTOH, 'bar' should have been directly uploaded on the root collection
+        self.assertRegex(c['manifest_text'], r'^\. .*:15:bar\n\./.+ .*:11:foo\n')
+
+    def test_upload_directory_reference_with_trailing_slash(self):
+        tmpdir1 = self.make_tmpdir()
+        tmpdir2 = self.make_tmpdir()
+        with open(os.path.join(tmpdir1, 'foo'), 'w') as f:
+            f.write('This is foo')
+        with open(os.path.join(tmpdir2, 'bar'), 'w') as f:
+            f.write('This is not foo')
+        # Upload one directory (with trailing slash) and one file
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                tmpdir1 + os.sep,
+                                                os.path.join(tmpdir2, 'bar')])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # Check that 'foo' and 'bar' were written at the same level
+        self.assertRegex(c['manifest_text'], r'^\. .*:15:bar .*:11:foo\n')
+
+    def test_put_collection_with_high_redundancy(self):
+        # Write empty data: we're not testing CollectionWriter, just
+        # making sure collections.create tells the API server what our
+        # desired replication level is.
+        collection = self.run_and_find_collection("", ['--replication', '4'])
+        self.assertEqual(4, collection['replication_desired'])
+
+    def test_put_collection_with_default_redundancy(self):
+        collection = self.run_and_find_collection("")
+        self.assertEqual(None, collection['replication_desired'])
+
+    def test_put_collection_with_unnamed_project_link(self):
+        link = self.run_and_find_collection(
+            "Test unnamed collection",
+            ['--portable-data-hash', '--project-uuid', self.PROJECT_UUID])
+        username = pwd.getpwuid(os.getuid()).pw_name
+        self.assertRegex(
+            link['name'],
+            r'^Saved at .* by {}@'.format(re.escape(username)))
+
+    def test_put_collection_with_name_and_no_project(self):
+        link_name = 'Test Collection Link in home project'
+        collection = self.run_and_find_collection(
+            "Test named collection in home project",
+            ['--portable-data-hash', '--name', link_name])
+        self.assertEqual(link_name, collection['name'])
+        my_user_uuid = self.current_user()['uuid']
+        self.assertEqual(my_user_uuid, collection['owner_uuid'])
+
+    def test_put_collection_with_named_project_link(self):
+        link_name = 'Test auto Collection Link'
+        collection = self.run_and_find_collection("Test named collection",
+                                      ['--portable-data-hash',
+                                       '--name', link_name,
+                                       '--project-uuid', self.PROJECT_UUID])
+        self.assertEqual(link_name, collection['name'])
+
+    def test_put_collection_with_storage_classes_specified(self):
+        collection = self.run_and_find_collection("", ['--storage-classes', 'hot'])
+
+        self.assertEqual(len(collection['storage_classes_desired']), 1)
+        self.assertEqual(collection['storage_classes_desired'][0], 'hot')
+
+    def test_put_collection_without_storage_classes_specified(self):
+        collection = self.run_and_find_collection("")
+
+        self.assertEqual(len(collection['storage_classes_desired']), 1)
+        self.assertEqual(collection['storage_classes_desired'][0], 'default')
+
+    def test_exclude_filename_pattern(self):
+        tmpdir = self.make_tmpdir()
+        tmpsubdir = os.path.join(tmpdir, 'subdir')
+        os.mkdir(tmpsubdir)
+        for fname in ['file1', 'file2', 'file3']:
+            with open(os.path.join(tmpdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+            with open(os.path.join(tmpsubdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                '--exclude', '*2.txt',
+                                                '--exclude', 'file3.*',
+                                                 tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # None of the file2.txt & file3.txt should have been uploaded
+        self.assertRegex(c['manifest_text'], r'^.*:file1.txt')
+        self.assertNotRegex(c['manifest_text'], r'^.*:file2.txt')
+        self.assertNotRegex(c['manifest_text'], r'^.*:file3.txt')
+
+    def test_exclude_filepath_pattern(self):
+        tmpdir = self.make_tmpdir()
+        tmpsubdir = os.path.join(tmpdir, 'subdir')
+        os.mkdir(tmpsubdir)
+        for fname in ['file1', 'file2', 'file3']:
+            with open(os.path.join(tmpdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+            with open(os.path.join(tmpsubdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                '--exclude', 'subdir/*2.txt',
+                                                '--exclude', './file1.*',
+                                                 tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # Only tmpdir/file1.txt & tmpdir/subdir/file2.txt should have been excluded
+        self.assertNotRegex(c['manifest_text'],
+                            r'^\./%s.*:file1.txt' % os.path.basename(tmpdir))
+        self.assertNotRegex(c['manifest_text'],
+                            r'^\./%s/subdir.*:file2.txt' % os.path.basename(tmpdir))
+        self.assertRegex(c['manifest_text'],
+                         r'^\./%s.*:file2.txt' % os.path.basename(tmpdir))
+        self.assertRegex(c['manifest_text'], r'^.*:file3.txt')
+
+    def test_unicode_on_filename(self):
+        tmpdir = self.make_tmpdir()
+        fname = u"i❤arvados.txt"
+        with open(os.path.join(tmpdir, fname), 'w') as f:
+            f.write("This is a unicode named file")
+        col = self.run_and_find_collection("", ['--no-progress', tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        self.assertTrue(fname in c['manifest_text'], u"{} does not include {}".format(c['manifest_text'], fname))
+
+    def test_silent_mode_no_errors(self):
+        self.authorize_with('active')
+        tmpdir = self.make_tmpdir()
+        with open(os.path.join(tmpdir, 'test.txt'), 'w') as f:
+            f.write('hello world')
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__] + ['--silent', tmpdir],
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE, env=self.ENVIRON)
+        stdout, stderr = pipe.communicate()
+        # No console output should occur on normal operations
+        self.assertNotRegex(stderr.decode(), r'.+')
+        self.assertNotRegex(stdout.decode(), r'.+')
+
+    def test_silent_mode_does_not_avoid_error_messages(self):
+        self.authorize_with('active')
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__] + ['--silent',
+                                                  '/path/not/existant'],
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE, env=self.ENVIRON)
+        stdout, stderr = pipe.communicate()
+        # Error message should be displayed when errors happen
+        self.assertRegex(stderr.decode(), r'.*ERROR:.*')
+        self.assertNotRegex(stdout.decode(), r'.+')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_arv_run.py b/sdk/python/tests/test_arv_run.py
new file mode 100644 (file)
index 0000000..1afc120
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import os
+import sys
+import tempfile
+import unittest
+
+import arvados.commands.run as arv_run
+from . import arvados_testutil as tutil
+
+class ArvRunTestCase(unittest.TestCase, tutil.VersionChecker):
+    def run_arv_run(self, args):
+        sys.argv = ['arv-run'] + args
+        return arv_run.main()
+
+    def test_unsupported_arg(self):
+        with self.assertRaises(SystemExit):
+            self.run_arv_run(['-x=unknown'])
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_arv_run(['--version'])
+        self.assertVersionOutput(out, err)
diff --git a/sdk/python/tests/test_arv_ws.py b/sdk/python/tests/test_arv_ws.py
new file mode 100644 (file)
index 0000000..521c46e
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import os
+import sys
+import tempfile
+import unittest
+
+import arvados.errors as arv_error
+import arvados.commands.ws as arv_ws
+from . import arvados_testutil as tutil
+
+class ArvWsTestCase(unittest.TestCase, tutil.VersionChecker):
+    def run_ws(self, args):
+        return arv_ws.main(args)
+
+    def test_unsupported_arg(self):
+        with self.assertRaises(SystemExit):
+            self.run_ws(['-x=unknown'])
+
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.run_ws(['--version'])
+        self.assertVersionOutput(out, err)
diff --git a/sdk/python/tests/test_arvfile.py b/sdk/python/tests/test_arvfile.py
new file mode 100644 (file)
index 0000000..a760255
--- /dev/null
@@ -0,0 +1,894 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from builtins import hex
+from builtins import str
+from builtins import range
+from builtins import object
+import bz2
+import datetime
+import gzip
+import io
+import mock
+import os
+import unittest
+import time
+
+import arvados
+from arvados._ranges import Range
+from arvados.keep import KeepLocator
+from arvados.collection import Collection, CollectionReader
+from arvados.arvfile import ArvadosFile, ArvadosFileReader
+
+from . import arvados_testutil as tutil
+from .test_stream import StreamFileReaderTestCase, StreamRetryTestMixin
+
+class ArvadosFileWriterTestCase(unittest.TestCase):
+    class MockKeep(object):
+        def __init__(self, blocks):
+            self.blocks = blocks
+            self.requests = []
+        def get(self, locator, num_retries=0):
+            self.requests.append(locator)
+            return self.blocks.get(locator)
+        def get_from_cache(self, locator):
+            self.requests.append(locator)
+            return self.blocks.get(locator)
+        def put(self, data, num_retries=None, copies=None):
+            pdh = tutil.str_keep_locator(data)
+            self.blocks[pdh] = bytes(data)
+            return pdh
+
+    class MockApi(object):
+        def __init__(self, b, r):
+            self.body = b
+            self.response = r
+            self._schema = ArvadosFileWriterTestCase.MockApi.MockSchema()
+            self._rootDesc = {}
+        class MockSchema(object):
+            def __init__(self):
+                self.schemas = {'Collection': {'properties': {'replication_desired': {'type':'integer'}}}}
+        class MockCollections(object):
+            def __init__(self, b, r):
+                self.body = b
+                self.response = r
+            class Execute(object):
+                def __init__(self, r):
+                    self.response = r
+                def execute(self, num_retries=None):
+                    return self.response
+            def create(self, ensure_unique_name=False, body=None):
+                if body != self.body:
+                    raise Exception("Body %s does not match expectation %s" % (body, self.body))
+                return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.response)
+            def update(self, uuid=None, body=None):
+                return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.response)
+        def collections(self):
+            return ArvadosFileWriterTestCase.MockApi.MockCollections(self.body, self.response)
+
+
+    def test_truncate(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        api = ArvadosFileWriterTestCase.MockApi({
+            "name": "test_truncate",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n",
+            "replication_desired": None,
+        }, {
+            "uuid": "zzzzz-4zz18-mockcollection0",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n",
+            "portable_data_hash":"7fcd0eaac3aad4c31a6a0e756475da92+52",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                        api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"0123456789", writer.read(12))
+
+            writer.truncate(8)
+
+            # Make sure reading off the end doesn't break
+            self.assertEqual(b"", writer.read(12))
+
+            self.assertEqual(writer.size(), 8)
+            writer.seek(0, os.SEEK_SET)
+            self.assertEqual(b"01234567", writer.read(12))
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            c.save_new("test_truncate")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+
+
+    def test_truncate2(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        api = ArvadosFileWriterTestCase.MockApi({
+            "name": "test_truncate2",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 7f614da9329cd3aebf59b91aadc30bf0+67108864 0:12:count.txt\n",
+            "replication_desired": None,
+        }, {
+            "uuid": "zzzzz-4zz18-mockcollection0",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 7f614da9329cd3aebf59b91aadc30bf0+67108864 0:12:count.txt\n",
+            "portable_data_hash": "272da898abdf86ddc71994835e3155f8+95",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                        api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"0123456789", writer.read(12))
+
+            # extend file size
+            writer.truncate(12)
+
+            self.assertEqual(writer.size(), 12)
+            writer.seek(0, os.SEEK_SET)
+            self.assertEqual(b"0123456789\x00\x00", writer.read(12))
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            c.save_new("test_truncate2")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+
+    def test_truncate3(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+            "a925576942e94b2ef57a066101b48876+10": b"abcdefghij",
+        })
+        api = ArvadosFileWriterTestCase.MockApi({
+            "name": "test_truncate",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n",
+            "replication_desired": None,
+        }, {
+            "uuid": "zzzzz-4zz18-mockcollection0",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n",
+            "portable_data_hash": "7fcd0eaac3aad4c31a6a0e756475da92+52",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:20:count.txt\n',
+                        api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 20)
+            self.assertEqual(b"0123456789ab", writer.read(12))
+            self.assertEqual(12, writer.tell())
+
+            writer.truncate(8)
+
+            # Make sure reading off the end doesn't break
+            self.assertEqual(12, writer.tell())
+            self.assertEqual(b"", writer.read(12))
+
+            self.assertEqual(writer.size(), 8)
+            self.assertEqual(2, writer.seek(-10, os.SEEK_CUR))
+            self.assertEqual(b"234567", writer.read(12))
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            c.save_new("test_truncate")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+
+
+
+    def test_write_to_end(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        api = ArvadosFileWriterTestCase.MockApi({
+            "name": "test_append",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n",
+            "replication_desired": None,
+        }, {
+            "uuid": "zzzzz-4zz18-mockcollection0",
+            "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n",
+            "portable_data_hash": "c5c3af76565c8efb6a806546bcf073f3+88",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 10)
+
+            self.assertEqual(5, writer.seek(5, os.SEEK_SET))
+            self.assertEqual(b"56789", writer.read(8))
+
+            writer.seek(10, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 13)
+
+            writer.seek(5, os.SEEK_SET)
+            self.assertEqual(b"56789foo", writer.read(8))
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            self.assertIsNone(keep.get("acbd18db4cc2f85cedef654fccc4a4d8+3"))
+
+            c.save_new("test_append")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+            self.assertEqual(b"foo", keep.get("acbd18db4cc2f85cedef654fccc4a4d8+3"))
+
+
+    def test_append(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        for (mode, convert) in (
+                ('a+', lambda data: data.decode(encoding='utf-8')),
+                ('at+', lambda data: data.decode(encoding='utf-8')),
+                ('ab+', lambda data: data)):
+            c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', keep_client=keep)
+            writer = c.open("count.txt", mode)
+            self.assertEqual(writer.read(20), convert(b"0123456789"))
+
+            writer.seek(0, os.SEEK_SET)
+            writer.write(convert(b"hello"))
+            self.assertEqual(writer.read(), convert(b""))
+            if 'b' in mode:
+                writer.seek(-5, os.SEEK_CUR)
+                self.assertEqual(writer.read(3), convert(b"hel"))
+                self.assertEqual(writer.read(), convert(b"lo"))
+            else:
+                with self.assertRaises(IOError):
+                    writer.seek(-5, os.SEEK_CUR)
+                with self.assertRaises(IOError):
+                    writer.seek(-3, os.SEEK_END)
+            writer.seek(0, os.SEEK_SET)
+            writer.read(7)
+            self.assertEqual(7, writer.tell())
+            self.assertEqual(7, writer.seek(7, os.SEEK_SET))
+
+            writer.seek(0, os.SEEK_SET)
+            self.assertEqual(writer.read(), convert(b"0123456789hello"))
+
+            writer.seek(0)
+            writer.write(convert(b"world"))
+            self.assertEqual(writer.read(), convert(b""))
+            writer.seek(0)
+            self.assertEqual(writer.read(), convert(b"0123456789helloworld"))
+
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 fc5e038d38a57032085441e7fe7010b0+10 0:20:count.txt\n", c.portable_manifest_text())
+
+    def test_write_at_beginning(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(b"0123456789", writer.readfrom(0, 13))
+            writer.seek(0, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"foo3456789", writer.readfrom(0, 13))
+            self.assertEqual(". acbd18db4cc2f85cedef654fccc4a4d8+3 781e5e245d69b566979b86e28d23f2c7+10 0:3:count.txt 6:7:count.txt\n", c.portable_manifest_text())
+
+    def test_write_empty(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        with Collection(keep_client=keep) as c:
+            writer = c.open("count.txt", "wb")
+            self.assertEqual(writer.size(), 0)
+            self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:count.txt\n", c.portable_manifest_text())
+
+    def test_save_manifest_text(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        with Collection(keep_client=keep) as c:
+            writer = c.open("count.txt", "wb")
+            writer.write(b"0123456789")
+            self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', c.portable_manifest_text())
+            self.assertNotIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)
+
+            self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', c.save_new(create_collection_record=False))
+            self.assertIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)
+
+    def test_get_manifest_text_commits(self):
+         keep = ArvadosFileWriterTestCase.MockKeep({})
+         with Collection(keep_client=keep) as c:
+             writer = c.open("count.txt", "wb")
+             writer.write("0123456789")
+             self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', c.portable_manifest_text())
+             self.assertNotIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)
+             self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', c.manifest_text())
+             self.assertIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)
+
+
+    def test_write_in_middle(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": b"0123456789"})
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(b"0123456789", writer.readfrom(0, 13))
+            writer.seek(3, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"012foo6789", writer.readfrom(0, 13))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:count.txt 10:3:count.txt 6:4:count.txt\n", c.portable_manifest_text())
+
+    def test_write_at_end(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": b"0123456789"})
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(b"0123456789", writer.readfrom(0, 13))
+            writer.seek(7, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"0123456foo", writer.readfrom(0, 13))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:7:count.txt 10:3:count.txt\n", c.portable_manifest_text())
+
+    def test_write_across_segment_boundary(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": b"0123456789"})
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(b"012345678901234", writer.readfrom(0, 15))
+            writer.seek(7, os.SEEK_SET)
+            writer.write("foobar")
+            self.assertEqual(writer.size(), 20)
+            self.assertEqual(b"0123456foobar34", writer.readfrom(0, 15))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 3858f62230ac3c915f300c664312c63f+6 0:7:count.txt 10:6:count.txt 3:7:count.txt\n", c.portable_manifest_text())
+
+    def test_write_across_several_segments(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": b"0123456789"})
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:4:count.txt 0:4:count.txt 0:4:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(b"012301230123", writer.readfrom(0, 15))
+            writer.seek(2, os.SEEK_SET)
+            writer.write("abcdefg")
+            self.assertEqual(writer.size(), 12)
+            self.assertEqual(b"01abcdefg123", writer.readfrom(0, 15))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 7ac66c0f148de9519b8bd264312c4d64+7 0:2:count.txt 10:7:count.txt 1:3:count.txt\n", c.portable_manifest_text())
+
+    def test_write_large(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
+                                                 "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n",
+                                                 "replication_desired":None},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0",
+                                                 "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n",
+                                                 "portable_data_hash":"9132ca8e3f671c76103a38f5bc24328c+108"})
+        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            text = "0123456789" * 100
+            for b in range(0, 100000):
+                writer.write(text)
+            self.assertEqual(writer.size(), 100000000)
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            c.save_new("test_write_large")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+
+
+    def test_large_write(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({}, {})
+        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 0)
+
+            text = "0123456789"
+            writer.write(text)
+            text = "0123456789" * 9999999
+            writer.write(text)
+            self.assertEqual(writer.size(), 100000000)
+
+            self.assertEqual(c.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 48dd23ea1645fd47d789804d71b5bb8e+67108864 77c57dc6ac5a10bb2205caaa73187994+32891126 0:100000000:count.txt\n")
+
+    def test_sparse_write(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({}, {})
+        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 0)
+
+            text = b"0123456789"
+            writer.seek(2)
+            writer.write(text)
+            self.assertEqual(writer.size(), 12)
+            writer.seek(0, os.SEEK_SET)
+            self.assertEqual(writer.read(), b"\x00\x00"+text)
+
+            self.assertEqual(c.manifest_text(), ". 7f614da9329cd3aebf59b91aadc30bf0+67108864 781e5e245d69b566979b86e28d23f2c7+10 0:2:count.txt 67108864:10:count.txt\n")
+
+
+    def test_sparse_write2(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({}, {})
+        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            self.assertEqual(writer.size(), 0)
+
+            text = "0123456789"
+            writer.seek((arvados.config.KEEP_BLOCK_SIZE*2) + 2)
+            writer.write(text)
+            self.assertEqual(writer.size(), (arvados.config.KEEP_BLOCK_SIZE*2) + 12)
+            writer.seek(0, os.SEEK_SET)
+
+            self.assertEqual(c.manifest_text(), ". 7f614da9329cd3aebf59b91aadc30bf0+67108864 781e5e245d69b566979b86e28d23f2c7+10 0:67108864:count.txt 0:67108864:count.txt 0:2:count.txt 67108864:10:count.txt\n")
+
+
+    def test_sparse_write3(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({}, {})
+        for r in [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [3, 2, 0, 4, 1]]:
+            with Collection() as c:
+                writer = c.open("count.txt", "rb+")
+                self.assertEqual(writer.size(), 0)
+
+                for i in r:
+                    w = ("%s" % i) * 10
+                    writer.seek(i*10)
+                    writer.write(w.encode())
+                writer.seek(0)
+                self.assertEqual(writer.read(), b"00000000001111111111222222222233333333334444444444")
+
+    def test_sparse_write4(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({}, {})
+        for r in [[0, 1, 2, 4], [4, 2, 1, 0], [2, 0, 4, 1]]:
+            with Collection() as c:
+                writer = c.open("count.txt", "rb+")
+                self.assertEqual(writer.size(), 0)
+
+                for i in r:
+                    w = ("%s" % i) * 10
+                    writer.seek(i*10)
+                    writer.write(w.encode())
+                writer.seek(0)
+                self.assertEqual(writer.read(), b"000000000011111111112222222222\x00\x00\x00\x00\x00\x00\x00\x00\x00\x004444444444")
+
+
+    def test_rewrite_on_empty_file(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            for b in range(0, 10):
+                writer.seek(0, os.SEEK_SET)
+                writer.write("0123456789")
+
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"0123456789", writer.readfrom(0, 20))
+            self.assertEqual(". 7a08b07e84641703e5f2c836aa59a170+100 90:10:count.txt\n", c.portable_manifest_text())
+            writer.flush()
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual(b"0123456789", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n", c.portable_manifest_text())
+
+    def test_rewrite_append_existing_file(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            for b in range(0, 10):
+                writer.seek(10, os.SEEK_SET)
+                writer.write("abcdefghij")
+
+            self.assertEqual(writer.size(), 20)
+            self.assertEqual(b"0123456789abcdefghij", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 ae5f43bab79cf0be33f025fa97ae7398+100 0:10:count.txt 100:10:count.txt\n", c.portable_manifest_text())
+
+            writer.arvadosfile.flush()
+            self.assertEqual(writer.size(), 20)
+            self.assertEqual(b"0123456789abcdefghij", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:20:count.txt\n", c.portable_manifest_text())
+
+    def test_rewrite_over_existing_file(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            for b in range(0, 10):
+                writer.seek(5, os.SEEK_SET)
+                writer.write("abcdefghij")
+
+            self.assertEqual(writer.size(), 15)
+            self.assertEqual(b"01234abcdefghij", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 ae5f43bab79cf0be33f025fa97ae7398+100 0:5:count.txt 100:10:count.txt\n", c.portable_manifest_text())
+
+            writer.arvadosfile.flush()
+
+            self.assertEqual(writer.size(), 15)
+            self.assertEqual(b"01234abcdefghij", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:5:count.txt 10:10:count.txt\n", c.portable_manifest_text())
+
+    def test_write_large_rewrite(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
+                                                 "manifest_text": ". 3dc0d4bc21f48060bedcb2c91af4f906+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 0:3:count.txt 32892006:67107997:count.txt 3:32892000:count.txt\n",
+                                                 "replication_desired":None},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0",
+                                                 "manifest_text": ". 3dc0d4bc21f48060bedcb2c91af4f906+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 0:3:count.txt 32892006:67107997:count.txt 3:32892000:count.txt\n",
+                                                 "portable_data_hash":"217665c6b713e1b78dfba7ebd42344db+156"})
+        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "rb+")
+            text = b''.join([b"0123456789" for a in range(0, 100)])
+            for b in range(0, 100000):
+                writer.write(text)
+            writer.seek(0, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 100000000)
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            c.save_new("test_write_large")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+
+    def test_create(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({
+            "name":"test_create",
+            "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+            "replication_desired":None,
+        }, {
+            "uuid":"zzzzz-4zz18-mockcollection0",
+            "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+            "portable_data_hash":"7a461a8c58601798f690f8b368ac4423+51",
+        })
+        with Collection(api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "wb+")
+            self.assertEqual(writer.size(), 0)
+            writer.write("01234567")
+            self.assertEqual(writer.size(), 8)
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            self.assertIsNone(keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+            c.save_new("test_create")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+            self.assertEqual(b"01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+
+
+    def test_create_subdir(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
+                                                 "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+                                                 "replication_desired":None},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0",
+                                                 "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+                                                 "portable_data_hash":"1b02aaa62528d28a5be41651cbb9d7c7+59"})
+        with Collection(api_client=api, keep_client=keep) as c:
+            self.assertIsNone(c.api_response())
+            writer = c.open("foo/bar/count.txt", "wb+")
+            writer.write("01234567")
+            self.assertFalse(c.committed())
+            c.save_new("test_create")
+            self.assertTrue(c.committed())
+            self.assertEqual(c.api_response(), api.response)
+
+    def test_overwrite(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_overwrite",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+                                                 "replication_desired":None},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+                                                 "portable_data_hash":"7a461a8c58601798f690f8b368ac4423+51"})
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "wb+")
+            self.assertEqual(writer.size(), 0)
+            writer.write("01234567")
+            self.assertEqual(writer.size(), 8)
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            c.save_new("test_overwrite")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+
+    def test_file_not_found(self):
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n') as c:
+            with self.assertRaises(IOError):
+                writer = c.open("nocount.txt", "rb")
+
+    def test_cannot_open_directory(self):
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n') as c:
+            with self.assertRaises(IOError):
+                writer = c.open(".", "rb")
+
+    def test_create_multiple(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_create_multiple",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n",
+                                                 "replication_desired":None},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n",
+                                                 "portable_data_hash":"71e7bb6c00d31fc2b4364199fd97be08+102"})
+        with Collection(api_client=api, keep_client=keep) as c:
+            w1 = c.open("count1.txt", "wb")
+            w2 = c.open("count2.txt", "wb")
+            w1.write("01234567")
+            w2.write("abcdefgh")
+            self.assertEqual(w1.size(), 8)
+            self.assertEqual(w2.size(), 8)
+
+            self.assertIsNone(c.manifest_locator())
+            self.assertTrue(c.modified())
+            self.assertIsNone(keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+            c.save_new("test_create_multiple")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c.manifest_locator())
+            self.assertFalse(c.modified())
+            self.assertEqual(b"01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+
+
+class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
+    class MockParent(object):
+        class MockBlockMgr(object):
+            def __init__(self, blocks, nocache):
+                self.blocks = blocks
+                self.nocache = nocache
+
+            def block_prefetch(self, loc):
+                pass
+
+            def get_block_contents(self, loc, num_retries=0, cache_only=False):
+                if self.nocache and cache_only:
+                    return None
+                return self.blocks[loc]
+
+        def __init__(self, blocks, nocache):
+            self.blocks = blocks
+            self.nocache = nocache
+            self.lock = arvados.arvfile.NoopLock()
+
+        def root_collection(self):
+            return self
+
+        def _my_block_manager(self):
+            return ArvadosFileReaderTestCase.MockParent.MockBlockMgr(self.blocks, self.nocache)
+
+
+    def make_count_reader(self, nocache=False):
+        stream = []
+        n = 0
+        blocks = {}
+        for d in [b'01234', b'34567', b'67890']:
+            loc = tutil.str_keep_locator(d)
+            blocks[loc] = d
+            stream.append(Range(loc, n, len(d)))
+            n += len(d)
+        af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache), "count.txt", stream=stream, segments=[Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)])
+        return ArvadosFileReader(af, mode="rb")
+
+    def test_read_block_crossing_behavior(self):
+        # read() needs to return all the data requested if possible, even if it
+        # crosses uncached blocks: https://arvados.org/issues/5856
+        sfile = self.make_count_reader(nocache=True)
+        self.assertEqual(b'12345678', sfile.read(8))
+
+    def test_successive_reads(self):
+        # Override StreamFileReaderTestCase.test_successive_reads
+        sfile = self.make_count_reader(nocache=True)
+        self.assertEqual(b'1234', sfile.read(4))
+        self.assertEqual(b'5678', sfile.read(4))
+        self.assertEqual(b'9', sfile.read(4))
+        self.assertEqual(b'', sfile.read(4))
+
+    def test_tell_after_block_read(self):
+        # Override StreamFileReaderTestCase.test_tell_after_block_read
+        sfile = self.make_count_reader(nocache=True)
+        self.assertEqual(b'12345678', sfile.read(8))
+        self.assertEqual(8, sfile.tell())
+
+    def test_prefetch(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "2e9ec317e197819358fbc43afca7d837+8": b"01234567",
+            "e8dc4081b13434b45189a720b77b6818+8": b"abcdefgh",
+        })
+        with Collection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
+            r = c.open("count.txt", "rb")
+            self.assertEqual(b"0123", r.read(4))
+        self.assertIn("2e9ec317e197819358fbc43afca7d837+8", keep.requests)
+        self.assertIn("e8dc4081b13434b45189a720b77b6818+8", keep.requests)
+
+    def test__eq__from_manifest(self):
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+            with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c2:
+                self.assertTrue(c1["count1.txt"] == c2["count1.txt"])
+                self.assertFalse(c1["count1.txt"] != c2["count1.txt"])
+
+    def test__eq__from_writes(self):
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+            with Collection() as c2:
+                f = c2.open("count1.txt", "wb")
+                f.write("0123456789")
+
+                self.assertTrue(c1["count1.txt"] == c2["count1.txt"])
+                self.assertFalse(c1["count1.txt"] != c2["count1.txt"])
+
+    def test__ne__(self):
+        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+            with Collection() as c2:
+                f = c2.open("count1.txt", "wb")
+                f.write("1234567890")
+
+                self.assertTrue(c1["count1.txt"] != c2["count1.txt"])
+                self.assertFalse(c1["count1.txt"] == c2["count1.txt"])
+
+
+class ArvadosFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):
+    def reader_for(self, coll_name, **kwargs):
+        stream = []
+        segments = []
+        n = 0
+        for d in self.manifest_for(coll_name).split():
+            try:
+                k = KeepLocator(d)
+                segments.append(Range(n, n, k.size))
+                stream.append(Range(d, n, k.size))
+                n += k.size
+            except ValueError:
+                pass
+
+        blockmanager = arvados.arvfile._BlockManager(self.keep_client())
+        blockmanager.prefetch_enabled = False
+        col = Collection(keep_client=self.keep_client(), block_manager=blockmanager)
+        af = ArvadosFile(col, "test",
+                         stream=stream,
+                         segments=segments)
+        kwargs.setdefault('mode', 'rb')
+        return ArvadosFileReader(af, **kwargs)
+
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.read(byte_count, **kwargs)
+
+
+class ArvadosFileReadFromTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.readfrom(0, byte_count, **kwargs)
+
+
+class ArvadosFileReadAllTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return b''.join(reader.readall(**kwargs))
+
+
+class ArvadosFileReadAllDecompressedTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return b''.join(reader.readall_decompressed(**kwargs))
+
+
+class ArvadosFileReadlinesTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readlines(**kwargs)).encode()
+
+
+class ArvadosFileTestCase(unittest.TestCase):
+    def datetime_to_hex(self, dt):
+        return hex(int(time.mktime(dt.timetuple())))[2:]
+
+    def test_permission_expired(self):
+        base_manifest = ". 781e5e245d69b566979b86e28d23f2c7+10+A715fd31f8111894f717eb1003c1b0216799dd9ec@{} 0:10:count.txt\n"
+        now = datetime.datetime.now()
+        a_week_ago = now - datetime.timedelta(days=7)
+        a_month_ago = now - datetime.timedelta(days=30)
+        a_week_from_now = now + datetime.timedelta(days=7)
+        with Collection(base_manifest.format(self.datetime_to_hex(a_week_from_now))) as c:
+            self.assertFalse(c.find('count.txt').permission_expired())
+        with Collection(base_manifest.format(self.datetime_to_hex(a_week_ago))) as c:
+            f = c.find('count.txt')
+            self.assertTrue(f.permission_expired())
+            self.assertTrue(f.permission_expired(a_week_from_now))
+            self.assertFalse(f.permission_expired(a_month_ago))
+
+
+class BlockManagerTest(unittest.TestCase):
+    def test_bufferblock_append(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        with arvados.arvfile._BlockManager(keep) as blockmanager:
+            bufferblock = blockmanager.alloc_bufferblock()
+            bufferblock.append("foo")
+
+            self.assertEqual(bufferblock.size(), 3)
+            self.assertEqual(bufferblock.buffer_view[0:3], b"foo")
+            self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+            bufferblock.append("bar")
+
+            self.assertEqual(bufferblock.size(), 6)
+            self.assertEqual(bufferblock.buffer_view[0:6], b"foobar")
+            self.assertEqual(bufferblock.locator(), "3858f62230ac3c915f300c664312c63f+6")
+
+            bufferblock.set_state(arvados.arvfile._BufferBlock.PENDING)
+            with self.assertRaises(arvados.errors.AssertionError):
+                bufferblock.append("bar")
+
+    def test_bufferblock_dup(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        with arvados.arvfile._BlockManager(keep) as blockmanager:
+            bufferblock = blockmanager.alloc_bufferblock()
+            bufferblock.append("foo")
+
+            self.assertEqual(bufferblock.size(), 3)
+            self.assertEqual(bufferblock.buffer_view[0:3], b"foo")
+            self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+            bufferblock.set_state(arvados.arvfile._BufferBlock.PENDING)
+
+            bufferblock2 = blockmanager.dup_block(bufferblock, None)
+            self.assertNotEqual(bufferblock.blockid, bufferblock2.blockid)
+
+            bufferblock2.append("bar")
+
+            self.assertEqual(bufferblock2.size(), 6)
+            self.assertEqual(bufferblock2.buffer_view[0:6], b"foobar")
+            self.assertEqual(bufferblock2.locator(), "3858f62230ac3c915f300c664312c63f+6")
+
+            self.assertEqual(bufferblock.size(), 3)
+            self.assertEqual(bufferblock.buffer_view[0:3], b"foo")
+            self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+    def test_bufferblock_get(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({
+            "781e5e245d69b566979b86e28d23f2c7+10": b"0123456789",
+        })
+        with arvados.arvfile._BlockManager(keep) as blockmanager:
+            bufferblock = blockmanager.alloc_bufferblock()
+            bufferblock.append("foo")
+
+            self.assertEqual(blockmanager.get_block_contents("781e5e245d69b566979b86e28d23f2c7+10", 1), b"0123456789")
+            self.assertEqual(blockmanager.get_block_contents(bufferblock.blockid, 1), b"foo")
+
+    def test_bufferblock_commit(self):
+        mockkeep = mock.MagicMock()
+        with arvados.arvfile._BlockManager(mockkeep) as blockmanager:
+            bufferblock = blockmanager.alloc_bufferblock()
+            bufferblock.owner = mock.MagicMock(spec=arvados.arvfile.ArvadosFile)
+            def flush(sync=None):
+                blockmanager.commit_bufferblock(bufferblock, sync)
+            bufferblock.owner.flush.side_effect = flush
+            bufferblock.append("foo")
+            blockmanager.commit_all()
+            self.assertTrue(bufferblock.owner.flush.called)
+            self.assertTrue(mockkeep.put.called)
+            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.COMMITTED)
+            self.assertIsNone(bufferblock.buffer_view)
+
+    def test_bufferblock_commit_pending(self):
+        # Test for bug #7225
+        mockkeep = mock.MagicMock()
+        mockkeep.put.side_effect = lambda x: time.sleep(1)
+        with arvados.arvfile._BlockManager(mockkeep) as blockmanager:
+            bufferblock = blockmanager.alloc_bufferblock()
+            bufferblock.append("foo")
+
+            blockmanager.commit_bufferblock(bufferblock, False)
+            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.PENDING)
+
+            blockmanager.commit_bufferblock(bufferblock, True)
+            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.COMMITTED)
+
+
+    def test_bufferblock_commit_with_error(self):
+        mockkeep = mock.MagicMock()
+        mockkeep.put.side_effect = arvados.errors.KeepWriteError("fail")
+        with arvados.arvfile._BlockManager(mockkeep) as blockmanager:
+            bufferblock = blockmanager.alloc_bufferblock()
+            bufferblock.owner = mock.MagicMock(spec=arvados.arvfile.ArvadosFile)
+            def flush(sync=None):
+                blockmanager.commit_bufferblock(bufferblock, sync)
+            bufferblock.owner.flush.side_effect = flush
+            bufferblock.append("foo")
+            with self.assertRaises(arvados.errors.KeepWriteError) as err:
+                blockmanager.commit_all()
+            self.assertTrue(bufferblock.owner.flush.called)
+            self.assertEqual(str(err.exception), "Error writing some blocks: block acbd18db4cc2f85cedef654fccc4a4d8+3 raised KeepWriteError (fail)")
+            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.ERROR)
diff --git a/sdk/python/tests/test_benchmark_collections.py b/sdk/python/tests/test_benchmark_collections.py
new file mode 100644 (file)
index 0000000..fc062e7
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import arvados
+import sys
+
+from . import run_test_server
+from . import arvados_testutil as tutil
+from . import manifest_examples
+from .performance.performance_profiler import profiled
+
+class CollectionBenchmark(run_test_server.TestCaseWithServers,
+                          tutil.ArvadosBaseTestCase,
+                          manifest_examples.ManifestExamples):
+    MAIN_SERVER = {}
+    TEST_BLOCK_SIZE = 0
+
+    @classmethod
+    def list_recursive(cls, coll, parent_name=None):
+        if parent_name is None:
+            current_name = coll.stream_name()
+        else:
+            current_name = '{}/{}'.format(parent_name, coll.name)
+        try:
+            for name in coll:
+                for item in cls.list_recursive(coll[name], current_name):
+                    yield item
+        except TypeError:
+            yield current_name
+
+    @classmethod
+    def setUpClass(cls):
+        super(CollectionBenchmark, cls).setUpClass()
+        run_test_server.authorize_with('active')
+        cls.api_client = arvados.api('v1')
+        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
+                                             local_store=cls.local_store)
+
+    @profiled
+    def profile_new_collection_from_manifest(self, manifest_text):
+        return arvados.collection.Collection(manifest_text)
+
+    @profiled
+    def profile_new_collection_from_server(self, uuid):
+        return arvados.collection.Collection(uuid)
+
+    @profiled
+    def profile_new_collection_copying_bytes_from_collection(self, src):
+        dst = arvados.collection.Collection()
+        with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):
+            for name in self.list_recursive(src):
+                with src.open(name, 'rb') as srcfile, dst.open(name, 'wb') as dstfile:
+                    dstfile.write(srcfile.read())
+            dst.save_new()
+
+    @profiled
+    def profile_new_collection_copying_files_from_collection(self, src):
+        dst = arvados.collection.Collection()
+        with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):
+            for name in self.list_recursive(src):
+                dst.copy(name, name, src)
+            dst.save_new()
+
+    @profiled
+    def profile_collection_list_files(self, coll):
+        return sum(1 for name in self.list_recursive(coll))
+
+    def test_medium_sized_manifest(self):
+        """Exercise manifest-handling code.
+
+        Currently, this test puts undue emphasis on some code paths
+        that don't reflect typical use because the contrived example
+        manifest has some unusual characteristics:
+
+        * Block size is zero.
+
+        * Every block is identical, so block caching patterns are
+          unrealistic.
+
+        * Every file begins and ends at a block boundary.
+        """
+        specs = {
+            'streams': 100,
+            'files_per_stream': 100,
+            'blocks_per_file': 20,
+            'bytes_per_block': self.TEST_BLOCK_SIZE,
+        }
+        my_manifest = self.make_manifest(**specs)
+
+        coll = self.profile_new_collection_from_manifest(my_manifest)
+
+        coll.save_new()
+        self.profile_new_collection_from_server(coll.manifest_locator())
+
+        num_items = self.profile_collection_list_files(coll)
+        self.assertEqual(num_items, specs['streams'] * specs['files_per_stream'])
+
+        self.profile_new_collection_copying_bytes_from_collection(coll)
+
+        self.profile_new_collection_copying_files_from_collection(coll)
diff --git a/sdk/python/tests/test_cache.py b/sdk/python/tests/test_cache.py
new file mode 100644 (file)
index 0000000..259acd0
--- /dev/null
@@ -0,0 +1,103 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+from builtins import str
+from builtins import range
+import hashlib
+import mock
+import os
+import random
+import shutil
+import sys
+import tempfile
+import threading
+import unittest
+
+import arvados
+import arvados.cache
+from . import run_test_server
+
+
+def _random(n):
+    return bytearray(random.getrandbits(8) for _ in range(n))
+
+
+class CacheTestThread(threading.Thread):
+    def __init__(self, dir):
+        super(CacheTestThread, self).__init__()
+        self._dir = dir
+
+    def run(self):
+        c = arvados.cache.SafeHTTPCache(self._dir)
+        url = 'http://example.com/foo'
+        self.ok = True
+        for x in range(16):
+            try:
+                data_in = _random(128)
+                data_in = hashlib.md5(data_in).hexdigest().encode() + b"\n" + data_in
+                c.set(url, data_in)
+                data_out = c.get(url)
+                digest, _, content = data_out.partition(b"\n")
+                if digest != hashlib.md5(content).hexdigest().encode():
+                    self.ok = False
+            except Exception as err:
+                self.ok = False
+                print("cache failed: {}: {}".format(type(err), err), file=sys.stderr)
+                raise
+
+
+class CacheTest(unittest.TestCase):
+    def setUp(self):
+        self._dir = tempfile.mkdtemp()
+
+    def tearDown(self):
+        shutil.rmtree(self._dir)
+
+    def test_cache_create_error(self):
+        _, filename = tempfile.mkstemp()
+        home_was = os.environ['HOME']
+        os.environ['HOME'] = filename
+        try:
+            c = arvados.http_cache('test')
+            self.assertEqual(None, c)
+        finally:
+            os.environ['HOME'] = home_was
+            os.unlink(filename)
+
+    def test_cache_crud(self):
+        c = arvados.cache.SafeHTTPCache(self._dir, max_age=0)
+        url = 'https://example.com/foo?bar=baz'
+        data1 = _random(256)
+        data2 = _random(128)
+        self.assertEqual(None, c.get(url))
+        c.delete(url)
+        c.set(url, data1)
+        self.assertEqual(data1, c.get(url))
+        c.delete(url)
+        self.assertEqual(None, c.get(url))
+        c.set(url, data1)
+        c.set(url, data2)
+        self.assertEqual(data2, c.get(url))
+
+    def test_cache_threads(self):
+        threads = []
+        for _ in range(64):
+            t = CacheTestThread(dir=self._dir)
+            t.start()
+            threads.append(t)
+        for t in threads:
+            t.join()
+            self.assertTrue(t.ok)
+
+
+class CacheIntegrationTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+
+    def test_cache_used_by_default_client(self):
+        with mock.patch('arvados.cache.SafeHTTPCache.get') as getter:
+            arvados.api('v1')._rootDesc.get('foobar')
+            getter.assert_called()
diff --git a/sdk/python/tests/test_collections.py b/sdk/python/tests/test_collections.py
new file mode 100644 (file)
index 0000000..66f062c
--- /dev/null
@@ -0,0 +1,1552 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+
+from builtins import object
+import arvados
+import copy
+import mock
+import os
+import pprint
+import random
+import re
+import sys
+import tempfile
+import datetime
+import ciso8601
+import time
+import unittest
+
+from . import run_test_server
+from arvados._ranges import Range, LocatorAndRange
+from arvados.collection import Collection, CollectionReader
+from . import arvados_testutil as tutil
+
+class TestResumableWriter(arvados.ResumableCollectionWriter):
+    KEEP_BLOCK_SIZE = 1024  # PUT to Keep every 1K.
+
+    def current_state(self):
+        return self.dump_state(copy.deepcopy)
+
+
+class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
+                             tutil.ArvadosBaseTestCase):
+    MAIN_SERVER = {}
+
+    @classmethod
+    def setUpClass(cls):
+        super(ArvadosCollectionsTest, cls).setUpClass()
+        # need admin privileges to make collections with unsigned blocks
+        run_test_server.authorize_with('admin')
+        cls.api_client = arvados.api('v1')
+        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
+                                             local_store=cls.local_store)
+
+    def write_foo_bar_baz(self):
+        cw = arvados.CollectionWriter(self.api_client)
+        self.assertEqual(cw.current_stream_name(), '.',
+                         'current_stream_name() should be "." now')
+        cw.set_current_file_name('foo.txt')
+        cw.write(b'foo')
+        self.assertEqual(cw.current_file_name(), 'foo.txt',
+                         'current_file_name() should be foo.txt now')
+        cw.start_new_file('bar.txt')
+        cw.write(b'bar')
+        cw.start_new_stream('baz')
+        cw.write(b'baz')
+        cw.set_current_file_name('baz.txt')
+        self.assertEqual(cw.manifest_text(),
+                         ". 3858f62230ac3c915f300c664312c63f+6 0:3:foo.txt 3:3:bar.txt\n" +
+                         "./baz 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n",
+                         "wrong manifest: got {}".format(cw.manifest_text()))
+        cw.save_new()
+        return cw.portable_data_hash()
+
+    def test_pdh_is_native_str(self):
+        pdh = self.write_foo_bar_baz()
+        self.assertEqual(type(''), type(pdh))
+
+    def test_keep_local_store(self):
+        self.assertEqual(self.keep_client.put(b'foo'), 'acbd18db4cc2f85cedef654fccc4a4d8+3', 'wrong md5 hash from Keep.put')
+        self.assertEqual(self.keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3'), b'foo', 'wrong data from Keep.get')
+
+    def test_local_collection_writer(self):
+        self.assertEqual(self.write_foo_bar_baz(),
+                         '23ca013983d6239e98931cc779e68426+114',
+                         'wrong locator hash: ' + self.write_foo_bar_baz())
+
+    def test_local_collection_reader(self):
+        foobarbaz = self.write_foo_bar_baz()
+        cr = arvados.CollectionReader(
+            foobarbaz + '+Xzizzle', self.api_client)
+        got = []
+        for s in cr.all_streams():
+            for f in s.all_files():
+                got += [[f.size(), f.stream_name(), f.name(), f.read(2**26)]]
+        expected = [[3, '.', 'foo.txt', b'foo'],
+                    [3, '.', 'bar.txt', b'bar'],
+                    [3, './baz', 'baz.txt', b'baz']]
+        self.assertEqual(got,
+                         expected)
+        stream0 = cr.all_streams()[0]
+        self.assertEqual(stream0.readfrom(0, 0),
+                         b'',
+                         'reading zero bytes should have returned empty string')
+        self.assertEqual(stream0.readfrom(0, 2**26),
+                         b'foobar',
+                         'reading entire stream failed')
+        self.assertEqual(stream0.readfrom(2**26, 0),
+                         b'',
+                         'reading zero bytes should have returned empty string')
+        self.assertEqual(3, len(cr))
+        self.assertTrue(cr)
+
+    def _test_subset(self, collection, expected):
+        cr = arvados.CollectionReader(collection, self.api_client)
+        for s in cr.all_streams():
+            for ex in expected:
+                if ex[0] == s:
+                    f = s.files()[ex[2]]
+                    got = [f.size(), f.stream_name(), f.name(), "".join(f.readall(2**26))]
+                    self.assertEqual(got,
+                                     ex,
+                                     'all_files|as_manifest did not preserve manifest contents: got %s expected %s' % (got, ex))
+
+    def test_collection_manifest_subset(self):
+        foobarbaz = self.write_foo_bar_baz()
+        self._test_subset(foobarbaz,
+                          [[3, '.',     'bar.txt', b'bar'],
+                           [3, '.',     'foo.txt', b'foo'],
+                           [3, './baz', 'baz.txt', b'baz']])
+        self._test_subset((". %s %s 0:3:foo.txt 3:3:bar.txt\n" %
+                           (self.keep_client.put(b"foo"),
+                            self.keep_client.put(b"bar"))),
+                          [[3, '.', 'bar.txt', b'bar'],
+                           [3, '.', 'foo.txt', b'foo']])
+        self._test_subset((". %s %s 0:2:fo.txt 2:4:obar.txt\n" %
+                           (self.keep_client.put(b"foo"),
+                            self.keep_client.put(b"bar"))),
+                          [[2, '.', 'fo.txt', b'fo'],
+                           [4, '.', 'obar.txt', b'obar']])
+        self._test_subset((". %s %s 0:2:fo.txt 2:0:zero.txt 2:2:ob.txt 4:2:ar.txt\n" %
+                           (self.keep_client.put(b"foo"),
+                            self.keep_client.put(b"bar"))),
+                          [[2, '.', 'ar.txt', b'ar'],
+                           [2, '.', 'fo.txt', b'fo'],
+                           [2, '.', 'ob.txt', b'ob'],
+                           [0, '.', 'zero.txt', b'']])
+
+    def test_collection_empty_file(self):
+        cw = arvados.CollectionWriter(self.api_client)
+        cw.start_new_file('zero.txt')
+        cw.write(b'')
+
+        self.assertEqual(cw.manifest_text(), ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:zero.txt\n")
+        self.check_manifest_file_sizes(cw.manifest_text(), [0])
+        cw = arvados.CollectionWriter(self.api_client)
+        cw.start_new_file('zero.txt')
+        cw.write(b'')
+        cw.start_new_file('one.txt')
+        cw.write(b'1')
+        cw.start_new_stream('foo')
+        cw.start_new_file('zero.txt')
+        cw.write(b'')
+        self.check_manifest_file_sizes(cw.manifest_text(), [0,1,0])
+
+    def test_no_implicit_normalize(self):
+        cw = arvados.CollectionWriter(self.api_client)
+        cw.start_new_file('b')
+        cw.write(b'b')
+        cw.start_new_file('a')
+        cw.write(b'')
+        self.check_manifest_file_sizes(cw.manifest_text(), [1,0])
+        self.check_manifest_file_sizes(
+            arvados.CollectionReader(
+                cw.manifest_text()).manifest_text(normalize=True),
+            [0,1])
+
+    def check_manifest_file_sizes(self, manifest_text, expect_sizes):
+        cr = arvados.CollectionReader(manifest_text, self.api_client)
+        got_sizes = []
+        for f in cr.all_files():
+            got_sizes += [f.size()]
+        self.assertEqual(got_sizes, expect_sizes, "got wrong file sizes %s, expected %s" % (got_sizes, expect_sizes))
+
+    def test_normalized_collection(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(arvados.CollectionReader(m1, self.api_client).manifest_text(normalize=True),
+                         """. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt
+""")
+
+        m2 = """. 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2
+"""
+        self.assertEqual(arvados.CollectionReader(m2, self.api_client).manifest_text(normalize=True), m2)
+
+        m3 = """. 5348b82a029fd9e971a811ce1f71360b+43 3:40:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(arvados.CollectionReader(m3, self.api_client).manifest_text(normalize=True),
+                         """. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:md5sum.txt
+""")
+
+        m4 = """. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+"""
+        self.assertEqual(arvados.CollectionReader(m4, self.api_client).manifest_text(normalize=True),
+                         """./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+""")
+
+        m5 = """. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo 204e43b8a1185621ca55a94839582e6f+67108864 3:3:bar
+"""
+        self.assertEqual(arvados.CollectionReader(m5, self.api_client).manifest_text(normalize=True),
+                         """./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:6:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+""")
+
+        with self.data_file('1000G_ref_manifest') as f6:
+            m6 = f6.read()
+            self.assertEqual(arvados.CollectionReader(m6, self.api_client).manifest_text(normalize=True), m6)
+
+        with self.data_file('jlake_manifest') as f7:
+            m7 = f7.read()
+            self.assertEqual(arvados.CollectionReader(m7, self.api_client).manifest_text(normalize=True), m7)
+
+        m8 = """./a\\040b\\040c 59ca0efa9f5633cb0371bbc0355478d8+13 0:13:hello\\040world.txt
+"""
+        self.assertEqual(arvados.CollectionReader(m8, self.api_client).manifest_text(normalize=True), m8)
+
+    def test_locators_and_ranges(self):
+        blocks2 = [Range('a', 0, 10),
+                   Range('b', 10, 10),
+                   Range('c', 20, 10),
+                   Range('d', 30, 10),
+                   Range('e', 40, 10),
+                   Range('f', 50, 10)]
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  2,  2), [LocatorAndRange('a', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 62, 2), [])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  0,  2), [LocatorAndRange('a', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [LocatorAndRange('b', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [LocatorAndRange('c', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [LocatorAndRange('d', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [LocatorAndRange('e', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [LocatorAndRange('f', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 60, 2), [])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  9,  2), [LocatorAndRange('a', 10, 9, 1), LocatorAndRange('b', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [LocatorAndRange('b', 10, 9, 1), LocatorAndRange('c', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [LocatorAndRange('c', 10, 9, 1), LocatorAndRange('d', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [LocatorAndRange('d', 10, 9, 1), LocatorAndRange('e', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [LocatorAndRange('e', 10, 9, 1), LocatorAndRange('f', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [LocatorAndRange('f', 10, 9, 1)])
+
+
+        blocks3 = [Range('a', 0, 10),
+                  Range('b', 10, 10),
+                  Range('c', 20, 10),
+                  Range('d', 30, 10),
+                  Range('e', 40, 10),
+                  Range('f', 50, 10),
+                   Range('g', 60, 10)]
+
+        self.assertEqual(arvados.locators_and_ranges(blocks3,  2,  2), [LocatorAndRange('a', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [LocatorAndRange('g', 10, 2, 2)])
+
+
+        blocks = [Range('a', 0, 10),
+                  Range('b', 10, 15),
+                  Range('c', 25, 5)]
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 0), [])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [LocatorAndRange('a', 10, 0, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [LocatorAndRange('a', 10, 3, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [LocatorAndRange('a', 10, 0, 10)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [LocatorAndRange('a', 10, 1, 9),
+                                                                      LocatorAndRange('b', 15, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 15)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [LocatorAndRange('a', 10, 1, 9),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 5)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [LocatorAndRange('b', 15, 5, 5)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [LocatorAndRange('a', 10, 8, 2),
+                                                                      LocatorAndRange('b', 15, 0, 15)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [LocatorAndRange('a', 10, 8, 2),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 3)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [LocatorAndRange('c', 5, 1, 2)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [LocatorAndRange('a', 10, 9, 1),
+                                                                      LocatorAndRange('b', 15, 0, 14)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [LocatorAndRange('b', 15, 0, 15)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [LocatorAndRange('b', 15, 1, 14),
+                                                                       LocatorAndRange('c', 5, 0, 1)])
+
+    class MockKeep(object):
+        def __init__(self, content, num_retries=0):
+            self.content = content
+
+        def get(self, locator, num_retries=0):
+            return self.content[locator]
+
+    def test_stream_reader(self):
+        keepblocks = {
+            'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+10': b'abcdefghij',
+            'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15': b'klmnopqrstuvwxy',
+            'cccccccccccccccccccccccccccccccc+5': b'z0123',
+        }
+        mk = self.MockKeep(keepblocks)
+
+        sr = arvados.StreamReader([".", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+10", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15", "cccccccccccccccccccccccccccccccc+5", "0:30:foo"], mk)
+
+        content = b'abcdefghijklmnopqrstuvwxyz0123456789'
+
+        self.assertEqual(sr.readfrom(0, 30), content[0:30])
+        self.assertEqual(sr.readfrom(2, 30), content[2:30])
+
+        self.assertEqual(sr.readfrom(2, 8), content[2:10])
+        self.assertEqual(sr.readfrom(0, 10), content[0:10])
+
+        self.assertEqual(sr.readfrom(0, 5), content[0:5])
+        self.assertEqual(sr.readfrom(5, 5), content[5:10])
+        self.assertEqual(sr.readfrom(10, 5), content[10:15])
+        self.assertEqual(sr.readfrom(15, 5), content[15:20])
+        self.assertEqual(sr.readfrom(20, 5), content[20:25])
+        self.assertEqual(sr.readfrom(25, 5), content[25:30])
+        self.assertEqual(sr.readfrom(30, 5), b'')
+
+    def test_extract_file(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 47:80:md8sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt
+"""
+
+        m2 = arvados.CollectionReader(m1, self.api_client).manifest_text(normalize=True)
+
+        self.assertEqual(m2,
+                         ". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt 43:41:md6sum.txt 84:43:md7sum.txt 6:37:md8sum.txt 84:43:md8sum.txt 83:1:md9sum.txt 0:43:md9sum.txt 84:36:md9sum.txt\n")
+        files = arvados.CollectionReader(
+            m2, self.api_client).all_streams()[0].files()
+
+        self.assertEqual(files['md5sum.txt'].as_manifest(),
+                         ". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n")
+        self.assertEqual(files['md6sum.txt'].as_manifest(),
+                         ". 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt\n")
+        self.assertEqual(files['md7sum.txt'].as_manifest(),
+                         ". 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt\n")
+        self.assertEqual(files['md9sum.txt'].as_manifest(),
+                         ". 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt\n")
+
+    def test_write_directory_tree(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(self.build_directory_tree(
+                ['basefile', 'subdir/subfile']))
+        self.assertEqual(cwriter.manifest_text(),
+                         """. c5110c5ac93202d8e0f9e381f22bac0f+8 0:8:basefile
+./subdir 1ca4dec89403084bf282ad31e6cf7972+14 0:14:subfile\n""")
+
+    def test_write_named_directory_tree(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(self.build_directory_tree(
+                ['basefile', 'subdir/subfile']), 'root')
+        self.assertEqual(
+            cwriter.manifest_text(),
+            """./root c5110c5ac93202d8e0f9e381f22bac0f+8 0:8:basefile
+./root/subdir 1ca4dec89403084bf282ad31e6cf7972+14 0:14:subfile\n""")
+
+    def test_write_directory_tree_in_one_stream(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(self.build_directory_tree(
+                ['basefile', 'subdir/subfile']), max_manifest_depth=0)
+        self.assertEqual(cwriter.manifest_text(),
+                         """. 4ace875ffdc6824a04950f06858f4465+22 0:8:basefile 8:14:subdir/subfile\n""")
+
+    def test_write_directory_tree_with_limited_recursion(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(
+            self.build_directory_tree(['f1', 'd1/f2', 'd1/d2/f3']),
+            max_manifest_depth=1)
+        self.assertEqual(cwriter.manifest_text(),
+                         """. bd19836ddb62c11c55ab251ccaca5645+2 0:2:f1
+./d1 50170217e5b04312024aa5cd42934494+13 0:8:d2/f3 8:5:f2\n""")
+
+    def test_write_directory_tree_with_zero_recursion(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        content = 'd1/d2/f3d1/f2f1'
+        blockhash = tutil.str_keep_locator(content)
+        cwriter.write_directory_tree(
+            self.build_directory_tree(['f1', 'd1/f2', 'd1/d2/f3']),
+            max_manifest_depth=0)
+        self.assertEqual(
+            cwriter.manifest_text(),
+            ". {} 0:8:d1/d2/f3 8:5:d1/f2 13:2:f1\n".format(blockhash))
+
+    def test_write_one_file(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name)
+            self.assertEqual(
+                cwriter.manifest_text(),
+                ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:{}\n".format(
+                    os.path.basename(testfile.name)))
+
+    def test_write_named_file(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'foo')
+            self.assertEqual(cwriter.manifest_text(),
+                             ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:foo\n")
+
+    def test_write_multiple_files(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        for letter in 'ABC':
+            with self.make_test_file(letter.encode()) as testfile:
+                cwriter.write_file(testfile.name, letter)
+        self.assertEqual(
+            cwriter.manifest_text(),
+            ". 902fbdd2b1df0c4f70b4a5d23525e932+3 0:1:A 1:1:B 2:1:C\n")
+
+    def test_basic_resume(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            resumed = TestResumableWriter.from_state(cwriter.current_state())
+        self.assertEqual(cwriter.manifest_text(), resumed.manifest_text(),
+                          "resumed CollectionWriter had different manifest")
+
+    def test_resume_fails_when_missing_dependency(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+        self.assertRaises(arvados.errors.StaleWriterStateError,
+                          TestResumableWriter.from_state,
+                          cwriter.current_state())
+
+    def test_resume_fails_when_dependency_mtime_changed(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            os.utime(testfile.name, (0, 0))
+            self.assertRaises(arvados.errors.StaleWriterStateError,
+                              TestResumableWriter.from_state,
+                              cwriter.current_state())
+
+    def test_resume_fails_when_dependency_is_nonfile(self):
+        cwriter = TestResumableWriter()
+        cwriter.write_file('/dev/null', 'empty')
+        self.assertRaises(arvados.errors.StaleWriterStateError,
+                          TestResumableWriter.from_state,
+                          cwriter.current_state())
+
+    def test_resume_fails_when_dependency_size_changed(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            orig_mtime = os.fstat(testfile.fileno()).st_mtime
+            testfile.write(b'extra')
+            testfile.flush()
+            os.utime(testfile.name, (orig_mtime, orig_mtime))
+            self.assertRaises(arvados.errors.StaleWriterStateError,
+                              TestResumableWriter.from_state,
+                              cwriter.current_state())
+
+    def test_resume_fails_with_expired_locator(self):
+        cwriter = TestResumableWriter()
+        state = cwriter.current_state()
+        # Add an expired locator to the state.
+        state['_current_stream_locators'].append(''.join([
+                    'a' * 32, '+1+A', 'b' * 40, '@', '10000000']))
+        self.assertRaises(arvados.errors.StaleWriterStateError,
+                          TestResumableWriter.from_state, state)
+
+    def test_arbitrary_objects_not_resumable(self):
+        cwriter = TestResumableWriter()
+        with open('/dev/null') as badfile:
+            self.assertRaises(arvados.errors.AssertionError,
+                              cwriter.write_file, badfile)
+
+    def test_arbitrary_writes_not_resumable(self):
+        cwriter = TestResumableWriter()
+        self.assertRaises(arvados.errors.AssertionError,
+                          cwriter.write, "badtext")
+
+
+class CollectionTestMixin(tutil.ApiClientMock):
+    API_COLLECTIONS = run_test_server.fixture('collections')
+    DEFAULT_COLLECTION = API_COLLECTIONS['foo_file']
+    DEFAULT_DATA_HASH = DEFAULT_COLLECTION['portable_data_hash']
+    DEFAULT_MANIFEST = DEFAULT_COLLECTION['manifest_text']
+    DEFAULT_UUID = DEFAULT_COLLECTION['uuid']
+    ALT_COLLECTION = API_COLLECTIONS['bar_file']
+    ALT_DATA_HASH = ALT_COLLECTION['portable_data_hash']
+    ALT_MANIFEST = ALT_COLLECTION['manifest_text']
+
+    def api_client_mock(self, status=200):
+        client = super(CollectionTestMixin, self).api_client_mock()
+        self.mock_keep_services(client, status=status, service_type='proxy', count=1)
+        return client
+
+
+@tutil.skip_sleep
+class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
+    def mock_get_collection(self, api_mock, code, fixturename):
+        body = self.API_COLLECTIONS.get(fixturename)
+        self._mock_api_call(api_mock.collections().get, code, body)
+
+    def api_client_mock(self, status=200):
+        client = super(CollectionReaderTestCase, self).api_client_mock()
+        self.mock_get_collection(client, status, 'foo_file')
+        return client
+
+    def test_init_no_default_retries(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        reader.manifest_text()
+        client.collections().get().execute.assert_called_with(num_retries=0)
+
+    def test_uuid_init_success(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,
+                                          num_retries=3)
+        self.assertEqual(self.DEFAULT_COLLECTION['manifest_text'],
+                         reader.manifest_text())
+        client.collections().get().execute.assert_called_with(num_retries=3)
+
+    def test_uuid_init_failure_raises_api_error(self):
+        client = self.api_client_mock(500)
+        with self.assertRaises(arvados.errors.ApiError):
+            reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+
+    def test_locator_init(self):
+        client = self.api_client_mock(200)
+        # Ensure Keep will not return anything if asked.
+        with tutil.mock_keep_responses(None, 404):
+            reader = arvados.CollectionReader(self.DEFAULT_DATA_HASH,
+                                              api_client=client)
+            self.assertEqual(self.DEFAULT_MANIFEST, reader.manifest_text())
+
+    def test_init_no_fallback_to_keep(self):
+        # Do not look up a collection UUID or PDH in Keep.
+        for key in [self.DEFAULT_UUID, self.DEFAULT_DATA_HASH]:
+            client = self.api_client_mock(404)
+            with tutil.mock_keep_responses(self.DEFAULT_MANIFEST, 200):
+                with self.assertRaises(arvados.errors.ApiError):
+                    reader = arvados.CollectionReader(key, api_client=client)
+
+    def test_init_num_retries_propagated(self):
+        # More of an integration test...
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,
+                                          num_retries=3)
+        with tutil.mock_keep_responses('foo', 500, 500, 200):
+            self.assertEqual(b'foo',
+                             b''.join(f.read(9) for f in reader.all_files()))
+
+    def test_read_nonnormalized_manifest_with_collection_reader(self):
+        # client should be able to use CollectionReader on a manifest without normalizing it
+        client = self.api_client_mock(500)
+        nonnormal = ". acbd18db4cc2f85cedef654fccc4a4d8+3+Aabadbadbee@abeebdee 0:3:foo.txt 1:0:bar.txt 0:3:foo.txt\n"
+        reader = arvados.CollectionReader(
+            nonnormal,
+            api_client=client, num_retries=0)
+        # Ensure stripped_manifest() doesn't mangle our manifest in
+        # any way other than stripping hints.
+        self.assertEqual(
+            re.sub('\+[^\d\s\+]+', '', nonnormal),
+            reader.stripped_manifest())
+        # Ensure stripped_manifest() didn't mutate our reader.
+        self.assertEqual(nonnormal, reader.manifest_text())
+        # Ensure the files appear in the order given in the manifest.
+        self.assertEqual(
+            [[6, '.', 'foo.txt'],
+             [0, '.', 'bar.txt']],
+            [[f.size(), f.stream_name(), f.name()]
+             for f in reader.all_streams()[0].all_files()])
+
+    def test_read_empty_collection(self):
+        client = self.api_client_mock(200)
+        self.mock_get_collection(client, 200, 'empty')
+        reader = arvados.CollectionReader('d41d8cd98f00b204e9800998ecf8427e+0',
+                                          api_client=client)
+        self.assertEqual('', reader.manifest_text())
+        self.assertEqual(0, len(reader))
+        self.assertFalse(reader)
+
+    def test_api_response(self):
+        client = self.api_client_mock()
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertEqual(self.DEFAULT_COLLECTION, reader.api_response())
+
+    def check_open_file(self, coll_file, stream_name, file_name, file_size):
+        self.assertFalse(coll_file.closed, "returned file is not open")
+        self.assertEqual(stream_name, coll_file.stream_name())
+        self.assertEqual(file_name, coll_file.name)
+        self.assertEqual(file_size, coll_file.size())
+
+    def test_open_collection_file_one_argument(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        cfile = reader.open('./foo', 'rb')
+        self.check_open_file(cfile, '.', 'foo', 3)
+
+    def test_open_deep_file(self):
+        coll_name = 'collection_with_files_in_subdir'
+        client = self.api_client_mock(200)
+        self.mock_get_collection(client, 200, coll_name)
+        reader = arvados.CollectionReader(
+            self.API_COLLECTIONS[coll_name]['uuid'], api_client=client)
+        cfile = reader.open('./subdir2/subdir3/file2_in_subdir3.txt', 'rb')
+        self.check_open_file(cfile, './subdir2/subdir3', 'file2_in_subdir3.txt',
+                             32)
+
+    def test_open_nonexistent_stream(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertRaises(IOError, reader.open, './nonexistent/foo')
+
+    def test_open_nonexistent_file(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertRaises(IOError, reader.open, 'nonexistent')
+
+
+@tutil.skip_sleep
+class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
+    def mock_keep(self, body, *codes, **headers):
+        headers.setdefault('x-keep-replicas-stored', 2)
+        return tutil.mock_keep_responses(body, *codes, **headers)
+
+    def foo_writer(self, **kwargs):
+        kwargs.setdefault('api_client', self.api_client_mock())
+        writer = arvados.CollectionWriter(**kwargs)
+        writer.start_new_file('foo')
+        writer.write(b'foo')
+        return writer
+
+    def test_write_whole_collection(self):
+        writer = self.foo_writer()
+        with self.mock_keep(self.DEFAULT_DATA_HASH, 200, 200):
+            self.assertEqual(self.DEFAULT_DATA_HASH, writer.finish())
+
+    def test_write_no_default(self):
+        writer = self.foo_writer()
+        with self.mock_keep(None, 500):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                writer.finish()
+
+    def test_write_insufficient_replicas_via_proxy(self):
+        writer = self.foo_writer(replication=3)
+        with self.mock_keep(None, 200, **{'x-keep-replicas-stored': 2}):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                writer.manifest_text()
+
+    def test_write_insufficient_replicas_via_disks(self):
+        client = mock.MagicMock(name='api_client')
+        with self.mock_keep(
+                None, 200, 200,
+                **{'x-keep-replicas-stored': 1}) as keepmock:
+            self.mock_keep_services(client, status=200, service_type='disk', count=2)
+            writer = self.foo_writer(api_client=client, replication=3)
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                writer.manifest_text()
+
+    def test_write_three_replicas(self):
+        client = mock.MagicMock(name='api_client')
+        with self.mock_keep(
+                "", 500, 500, 500, 200, 200, 200,
+                **{'x-keep-replicas-stored': 1}) as keepmock:
+            self.mock_keep_services(client, status=200, service_type='disk', count=6)
+            writer = self.foo_writer(api_client=client, replication=3)
+            writer.manifest_text()
+            self.assertEqual(6, keepmock.call_count)
+
+    def test_write_whole_collection_through_retries(self):
+        writer = self.foo_writer(num_retries=2)
+        with self.mock_keep(self.DEFAULT_DATA_HASH,
+                            500, 500, 200, 500, 500, 200):
+            self.assertEqual(self.DEFAULT_DATA_HASH, writer.finish())
+
+    def test_flush_data_retries(self):
+        writer = self.foo_writer(num_retries=2)
+        foo_hash = self.DEFAULT_MANIFEST.split()[1]
+        with self.mock_keep(foo_hash, 500, 200):
+            writer.flush_data()
+        self.assertEqual(self.DEFAULT_MANIFEST, writer.manifest_text())
+
+    def test_one_open(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('out') as out_file:
+            self.assertEqual('.', writer.current_stream_name())
+            self.assertEqual('out', writer.current_file_name())
+            out_file.write(b'test data')
+            data_loc = tutil.str_keep_locator('test data')
+        self.assertTrue(out_file.closed, "writer file not closed after context")
+        self.assertRaises(ValueError, out_file.write, 'extra text')
+        with self.mock_keep(data_loc, 200) as keep_mock:
+            self.assertEqual(". {} 0:9:out\n".format(data_loc),
+                             writer.manifest_text())
+
+    def test_open_writelines(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('six') as out_file:
+            out_file.writelines(['12', '34', '56'])
+            data_loc = tutil.str_keep_locator('123456')
+        with self.mock_keep(data_loc, 200) as keep_mock:
+            self.assertEqual(". {} 0:6:six\n".format(data_loc),
+                             writer.manifest_text())
+
+    def test_open_flush(self):
+        client = self.api_client_mock()
+        data_loc1 = tutil.str_keep_locator('flush1')
+        data_loc2 = tutil.str_keep_locator('flush2')
+        with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
+            writer = arvados.CollectionWriter(client)
+            with writer.open('flush_test') as out_file:
+                out_file.write(b'flush1')
+                out_file.flush()
+                out_file.write(b'flush2')
+            self.assertEqual(". {} {} 0:12:flush_test\n".format(data_loc1,
+                                                                data_loc2),
+                             writer.manifest_text())
+
+    def test_two_opens_same_stream(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('.', '1') as out_file:
+            out_file.write(b'1st')
+        with writer.open('.', '2') as out_file:
+            out_file.write(b'2nd')
+        data_loc = tutil.str_keep_locator('1st2nd')
+        with self.mock_keep(data_loc, 200) as keep_mock:
+            self.assertEqual(". {} 0:3:1 3:3:2\n".format(data_loc),
+                             writer.manifest_text())
+
+    def test_two_opens_two_streams(self):
+        client = self.api_client_mock()
+        data_loc1 = tutil.str_keep_locator('file')
+        data_loc2 = tutil.str_keep_locator('indir')
+        with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
+            writer = arvados.CollectionWriter(client)
+            with writer.open('file') as out_file:
+                out_file.write(b'file')
+            with writer.open('./dir', 'indir') as out_file:
+                out_file.write(b'indir')
+            expected = ". {} 0:4:file\n./dir {} 0:5:indir\n".format(
+                data_loc1, data_loc2)
+            self.assertEqual(expected, writer.manifest_text())
+
+    def test_dup_open_fails(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        file1 = writer.open('one')
+        self.assertRaises(arvados.errors.AssertionError, writer.open, 'two')
+
+
+class CollectionMethods(run_test_server.TestCaseWithServers):
+
+    def test_keys_values_items_support_indexing(self):
+        c = Collection()
+        with c.open('foo', 'wb') as f:
+            f.write(b'foo')
+        with c.open('bar', 'wb') as f:
+            f.write(b'bar')
+        self.assertEqual(2, len(c.keys()))
+        if sys.version_info < (3, 0):
+            # keys() supports indexing only for python2 callers.
+            fn0 = c.keys()[0]
+            fn1 = c.keys()[1]
+        else:
+            fn0, fn1 = c.keys()
+        self.assertEqual(2, len(c.values()))
+        f0 = c.values()[0]
+        f1 = c.values()[1]
+        self.assertEqual(2, len(c.items()))
+        self.assertEqual(fn0, c.items()[0][0])
+        self.assertEqual(fn1, c.items()[1][0])
+
+    def test_get_properties(self):
+        c = Collection()
+        self.assertEqual(c.get_properties(), {})
+        c.save_new(properties={"foo":"bar"})
+        self.assertEqual(c.get_properties(), {"foo":"bar"})
+
+    def test_get_trash_at(self):
+        c = Collection()
+        self.assertEqual(c.get_trash_at(), None)
+        c.save_new(trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+        self.assertEqual(c.get_trash_at(), ciso8601.parse_datetime('2111-01-01T11:11:11.111111000Z'))
+
+
+class CollectionOpenModes(run_test_server.TestCaseWithServers):
+
+    def test_open_binary_modes(self):
+        c = Collection()
+        for mode in ['wb', 'wb+', 'ab', 'ab+']:
+            with c.open('foo', mode) as f:
+                f.write(b'foo')
+
+    def test_open_invalid_modes(self):
+        c = Collection()
+        for mode in ['+r', 'aa', '++', 'r+b', 'beer', '', None]:
+            with self.assertRaises(Exception):
+                c.open('foo', mode)
+
+    def test_open_text_modes(self):
+        c = Collection()
+        with c.open('foo', 'wb') as f:
+            f.write('foo')
+        for mode in ['r', 'rt', 'r+', 'rt+', 'w', 'wt', 'a', 'at']:
+            with c.open('foo', mode) as f:
+                if mode[0] == 'r' and '+' not in mode:
+                    self.assertEqual('foo', f.read(3))
+                else:
+                    f.write('bar')
+                    f.seek(0, os.SEEK_SET)
+                    self.assertEqual('bar', f.read(3))
+
+
+class TextModes(run_test_server.TestCaseWithServers):
+
+    def setUp(self):
+        arvados.config.KEEP_BLOCK_SIZE = 4
+        if sys.version_info < (3, 0):
+            import unicodedata
+            self.sailboat = unicodedata.lookup('SAILBOAT')
+            self.snowman = unicodedata.lookup('SNOWMAN')
+        else:
+            self.sailboat = '\N{SAILBOAT}'
+            self.snowman = '\N{SNOWMAN}'
+
+    def tearDown(self):
+        arvados.config.KEEP_BLOCK_SIZE = 2 ** 26
+
+    def test_read_sailboat_across_block_boundary(self):
+        c = Collection()
+        f = c.open('sailboats', 'wb')
+        data = self.sailboat.encode('utf-8')
+        f.write(data)
+        f.write(data[:1])
+        f.write(data[1:])
+        f.write(b'\n')
+        f.close()
+        self.assertRegex(c.portable_manifest_text(), r'\+4 .*\+3 ')
+
+        f = c.open('sailboats', 'r')
+        string = f.readline()
+        self.assertEqual(string, self.sailboat+self.sailboat+'\n')
+        f.close()
+
+    def test_write_snowman_across_block_boundary(self):
+        c = Collection()
+        f = c.open('snowmany', 'w')
+        data = self.snowman
+        f.write(data+data+'\n'+data+'\n')
+        f.close()
+        self.assertRegex(c.portable_manifest_text(), r'\+4 .*\+4 .*\+3 ')
+
+        f = c.open('snowmany', 'r')
+        self.assertEqual(f.readline(), self.snowman+self.snowman+'\n')
+        self.assertEqual(f.readline(), self.snowman+'\n')
+        f.close()
+
+
+class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
+
+    def test_replication_desired_kept_on_load(self):
+        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n'
+        c1 = Collection(m, replication_desired=1)
+        c1.save_new()
+        loc = c1.manifest_locator()
+        c2 = Collection(loc)
+        self.assertEqual(c1.manifest_text, c2.manifest_text)
+        self.assertEqual(c1.replication_desired, c2.replication_desired)
+
+    def test_replication_desired_not_loaded_if_provided(self):
+        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n'
+        c1 = Collection(m, replication_desired=1)
+        c1.save_new()
+        loc = c1.manifest_locator()
+        c2 = Collection(loc, replication_desired=2)
+        self.assertEqual(c1.manifest_text, c2.manifest_text)
+        self.assertNotEqual(c1.replication_desired, c2.replication_desired)
+
+    def test_init_manifest(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(m1, CollectionReader(m1).manifest_text(normalize=False))
+        self.assertEqual(". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt\n", CollectionReader(m1).manifest_text(normalize=True))
+
+    def test_init_manifest_with_collision(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+./md5sum.txt 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+"""
+        with self.assertRaises(arvados.errors.ArgumentError):
+            self.assertEqual(m1, CollectionReader(m1))
+
+    def test_init_manifest_with_error(self):
+        m1 = """. 0:43:md5sum.txt"""
+        with self.assertRaises(arvados.errors.ArgumentError):
+            self.assertEqual(m1, CollectionReader(m1))
+
+    def test_remove(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n')
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.portable_manifest_text())
+        self.assertIn("count1.txt", c)
+        c.remove("count1.txt")
+        self.assertNotIn("count1.txt", c)
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c.portable_manifest_text())
+        with self.assertRaises(arvados.errors.ArgumentError):
+            c.remove("")
+
+    def test_find(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n')
+        self.assertIs(c.find("."), c)
+        self.assertIs(c.find("./count1.txt"), c["count1.txt"])
+        self.assertIs(c.find("count1.txt"), c["count1.txt"])
+        with self.assertRaises(IOError):
+            c.find("/.")
+        with self.assertRaises(arvados.errors.ArgumentError):
+            c.find("")
+        self.assertIs(c.find("./nonexistant.txt"), None)
+        self.assertIs(c.find("./nonexistantsubdir/nonexistant.txt"), None)
+
+    def test_escaped_paths_dont_get_unescaped_on_manifest(self):
+        # Dir & file names are literally '\056' (escaped form: \134056)
+        manifest = './\\134056\\040Test d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\134056\n'
+        c = Collection(manifest)
+        self.assertEqual(c.portable_manifest_text(), manifest)
+
+    def test_other_special_chars_on_file_token(self):
+        cases = [
+            ('\\000', '\0'),
+            ('\\011', '\t'),
+            ('\\012', '\n'),
+            ('\\072', ':'),
+            ('\\134400', '\\400'),
+        ]
+        for encoded, decoded in cases:
+            manifest = '. d41d8cd98f00b204e9800998ecf8427e+0 0:0:some%sfile.txt\n' % encoded
+            c = Collection(manifest)
+            self.assertEqual(c.portable_manifest_text(), manifest)
+            self.assertIn('some%sfile.txt' % decoded, c.keys())
+
+    def test_escaped_paths_do_get_unescaped_on_listing(self):
+        # Dir & file names are literally '\056' (escaped form: \134056)
+        manifest = './\\134056\\040Test d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\134056\n'
+        c = Collection(manifest)
+        self.assertIn('\\056 Test', c.keys())
+        self.assertIn('\\056', c['\\056 Test'].keys())
+
+    def test_make_empty_dir_with_escaped_chars(self):
+        c = Collection()
+        c.mkdirs('./Empty\\056Dir')
+        self.assertEqual(c.portable_manifest_text(),
+                         './Empty\\134056Dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n')
+
+    def test_make_empty_dir_with_spaces(self):
+        c = Collection()
+        c.mkdirs('./foo bar/baz waz')
+        self.assertEqual(c.portable_manifest_text(),
+                         './foo\\040bar/baz\\040waz d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n')
+
+    def test_remove_in_subdir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c.remove("foo/count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n", c.portable_manifest_text())
+
+    def test_remove_empty_subdir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c.remove("foo/count2.txt")
+        c.remove("foo")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.portable_manifest_text())
+
+    def test_remove_nonempty_subdir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        with self.assertRaises(IOError):
+            c.remove("foo")
+        c.remove("foo", recursive=True)
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.portable_manifest_text())
+
+    def test_copy_to_file_in_dir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "foo/count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c.portable_manifest_text())
+
+    def test_copy_file(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.portable_manifest_text())
+
+    def test_copy_to_existing_dir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c.copy("count1.txt", "foo")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.portable_manifest_text())
+
+    def test_copy_to_new_dir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "foo/")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.portable_manifest_text())
+
+    def test_rename_file(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.rename("count1.txt", "count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c.manifest_text())
+
+    def test_move_file_to_dir(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.mkdirs("foo")
+        c.rename("count1.txt", "foo/count2.txt")
+        self.assertEqual("./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c.manifest_text())
+
+    def test_move_file_to_other(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection()
+        c2.rename("count1.txt", "count2.txt", source_collection=c1)
+        self.assertEqual("", c1.manifest_text())
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c2.manifest_text())
+
+    def test_clone(self):
+        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        cl = c.clone()
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", cl.portable_manifest_text())
+
+    def test_diff_del_add(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(sorted(d), [
+            ('add', './count1.txt', c1["count1.txt"]),
+            ('del', './count2.txt', c2["count2.txt"]),
+        ])
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('add', './count2.txt', c2["count2.txt"]),
+            ('del', './count1.txt', c1["count1.txt"]),
+        ])
+        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_diff_same(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"])])
+
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_diff_mod(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('mod', './count1.txt', c2["count1.txt"], c1["count1.txt"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+
+        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_diff_add(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt 10:20:count2.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(sorted(d), [
+            ('del', './count2.txt', c2["count2.txt"]),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('add', './count2.txt', c2["count2.txt"]),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+
+        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_diff_add_in_subcollection(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(sorted(d), [
+            ('del', './foo', c2["foo"]),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('add', './foo', c2["foo"]),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_diff_del_add_in_subcollection(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:3:count3.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(sorted(d), [
+            ('add', './foo/count2.txt', c1.find("foo/count2.txt")),
+            ('del', './foo/count3.txt', c2.find("foo/count3.txt")),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('add', './foo/count3.txt', c2.find("foo/count3.txt")),
+            ('del', './foo/count2.txt', c1.find("foo/count2.txt")),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+
+        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_diff_mod_in_subcollection(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:3:foo\n')
+        d = c2.diff(c1)
+        self.assertEqual(sorted(d), [
+            ('mod', './foo', c2["foo"], c1["foo"]),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('mod', './foo', c1["foo"], c2["foo"]),
+            ('tok', './count1.txt', c2["count1.txt"], c1["count1.txt"]),
+        ])
+
+        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())
+
+    def test_conflict_keep_local_change(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('add', './count2.txt', c2["count2.txt"]),
+            ('del', './count1.txt', c1["count1.txt"]),
+        ])
+        f = c1.open("count1.txt", "wb")
+        f.write(b"zzzzz")
+
+        # c1 changed, so it should not be deleted.
+        c1.apply(d)
+        self.assertEqual(c1.portable_manifest_text(), ". 95ebc3c7b3b9f1d2c40fec14415d3cb8+5 5348b82a029fd9e971a811ce1f71360b+43 0:5:count1.txt 5:10:count2.txt\n")
+
+    def test_conflict_mod(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
+        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+        f = c1.open("count1.txt", "wb")
+        f.write(b"zzzzz")
+
+        # c1 changed, so c2 mod will go to a conflict file
+        c1.apply(d)
+        self.assertRegex(
+            c1.portable_manifest_text(),
+            r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1\.txt 5:10:count1\.txt~\d\d\d\d\d\d\d\d-\d\d\d\d\d\d~conflict~$")
+
+    def test_conflict_add(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\n')
+        d = c1.diff(c2)
+        self.assertEqual(sorted(d), [
+            ('add', './count1.txt', c2["count1.txt"]),
+            ('del', './count2.txt', c1["count2.txt"]),
+        ])
+        f = c1.open("count1.txt", "wb")
+        f.write(b"zzzzz")
+
+        # c1 added count1.txt, so c2 add will go to a conflict file
+        c1.apply(d)
+        self.assertRegex(
+            c1.portable_manifest_text(),
+            r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1\.txt 5:10:count1\.txt~\d\d\d\d\d\d\d\d-\d\d\d\d\d\d~conflict~$")
+
+    def test_conflict_del(self):
+        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
+        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+        c1.remove("count1.txt")
+
+        # c1 deleted, so c2 mod will go to a conflict file
+        c1.apply(d)
+        self.assertRegex(
+            c1.portable_manifest_text(),
+            r"\. 5348b82a029fd9e971a811ce1f71360b\+43 0:10:count1\.txt~\d\d\d\d\d\d\d\d-\d\d\d\d\d\d~conflict~$")
+
+    def test_notify(self):
+        c1 = Collection()
+        events = []
+        c1.subscribe(lambda event, collection, name, item: events.append((event, collection, name, item)))
+        f = c1.open("foo.txt", "wb")
+        self.assertEqual(events[0], (arvados.collection.ADD, c1, "foo.txt", f.arvadosfile))
+
+    def test_open_w(self):
+        c1 = Collection(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n")
+        self.assertEqual(c1["count1.txt"].size(), 10)
+        c1.open("count1.txt", "wb").close()
+        self.assertEqual(c1["count1.txt"].size(), 0)
+
+
+class NewCollectionTestCaseWithServersAndTokens(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+    local_locator_re = r"[0-9a-f]{32}\+\d+\+A[a-f0-9]{40}@[a-f0-9]{8}"
+    remote_locator_re = r"[0-9a-f]{32}\+\d+\+R[a-z]{5}-[a-f0-9]{40}@[a-f0-9]{8}"
+
+    def setUp(self):
+        self.keep_put = getattr(arvados.keep.KeepClient, 'put')
+
+    @mock.patch('arvados.keep.KeepClient.put', autospec=True)
+    def test_repacked_block_submission_get_permission_token(self, mocked_put):
+        '''
+        Make sure that those blocks that are committed after repacking small ones,
+        get their permission tokens assigned on the collection manifest.
+        '''
+        def wrapped_keep_put(*args, **kwargs):
+            # Simulate slow put operations
+            time.sleep(1)
+            return self.keep_put(*args, **kwargs)
+
+        mocked_put.side_effect = wrapped_keep_put
+        c = Collection()
+        # Write 70 files ~1MiB each so we force to produce 1 big block by repacking
+        # small ones before finishing the upload.
+        for i in range(70):
+            f = c.open("file_{}.txt".format(i), 'wb')
+            f.write(random.choice('abcdefghijklmnopqrstuvwxyz') * (2**20+i))
+            f.close(flush=False)
+        # We should get 2 blocks with their tokens
+        self.assertEqual(len(re.findall(self.local_locator_re, c.manifest_text())), 2)
+
+    @mock.patch('arvados.keep.KeepClient.refresh_signature')
+    def test_copy_remote_blocks_on_save_new(self, rs_mock):
+        remote_block_loc = "acbd18db4cc2f85cedef654fccc4a4d8+3+Remote-" + "a" * 40 + "@abcdef01"
+        local_block_loc = "acbd18db4cc2f85cedef654fccc4a4d8+3+A" + "b" * 40 + "@abcdef01"
+        rs_mock.return_value = local_block_loc
+        c = Collection(". " + remote_block_loc + " 0:3:foofile.txt\n")
+        self.assertEqual(
+            len(re.findall(self.remote_locator_re, c.manifest_text())), 1)
+        self.assertEqual(
+            len(re.findall(self.local_locator_re, c.manifest_text())), 0)
+        c.save_new()
+        rs_mock.assert_called()
+        self.assertEqual(
+            len(re.findall(self.remote_locator_re, c.manifest_text())), 0)
+        self.assertEqual(
+            len(re.findall(self.local_locator_re, c.manifest_text())), 1)
+
+    @mock.patch('arvados.keep.KeepClient.refresh_signature')
+    def test_copy_remote_blocks_on_save(self, rs_mock):
+        remote_block_loc = "acbd18db4cc2f85cedef654fccc4a4d8+3+Remote-" + "a" * 40 + "@abcdef01"
+        local_block_loc = "acbd18db4cc2f85cedef654fccc4a4d8+3+A" + "b" * 40 + "@abcdef01"
+        rs_mock.return_value = local_block_loc
+        # Remote collection
+        remote_c = Collection(". " + remote_block_loc + " 0:3:foofile.txt\n")
+        self.assertEqual(
+            len(re.findall(self.remote_locator_re, remote_c.manifest_text())), 1)
+        # Local collection
+        local_c = Collection()
+        with local_c.open('barfile.txt', 'wb') as f:
+            f.write('bar')
+        local_c.save_new()
+        self.assertEqual(
+            len(re.findall(self.local_locator_re, local_c.manifest_text())), 1)
+        self.assertEqual(
+            len(re.findall(self.remote_locator_re, local_c.manifest_text())), 0)
+        # Copy remote file to local collection
+        local_c.copy('./foofile.txt', './copied/foofile.txt', remote_c)
+        self.assertEqual(
+            len(re.findall(self.local_locator_re, local_c.manifest_text())), 1)
+        self.assertEqual(
+            len(re.findall(self.remote_locator_re, local_c.manifest_text())), 1)
+        # Save local collection: remote block should be copied
+        local_c.save()
+        rs_mock.assert_called()
+        self.assertEqual(
+            len(re.findall(self.local_locator_re, local_c.manifest_text())), 2)
+        self.assertEqual(
+            len(re.findall(self.remote_locator_re, local_c.manifest_text())), 0)
+
+
+class NewCollectionTestCaseWithServers(run_test_server.TestCaseWithServers):
+    def test_get_manifest_text_only_committed(self):
+        c = Collection()
+        with c.open("count.txt", "wb") as f:
+            # One file committed
+            with c.open("foo.txt", "wb") as foo:
+                foo.write(b"foo")
+                foo.flush() # Force block commit
+            f.write(b"0123456789")
+            # Other file not committed. Block not written to keep yet.
+            self.assertEqual(
+                c._get_manifest_text(".",
+                                     strip=False,
+                                     normalize=False,
+                                     only_committed=True),
+                '. acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:count.txt 0:3:foo.txt\n')
+            # And now with the file closed...
+            f.flush() # Force block commit
+        self.assertEqual(
+            c._get_manifest_text(".",
+                                 strip=False,
+                                 normalize=False,
+                                 only_committed=True),
+            ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:10:count.txt 10:3:foo.txt\n")
+
+    def test_only_small_blocks_are_packed_together(self):
+        c = Collection()
+        # Write a couple of small files,
+        f = c.open("count.txt", "wb")
+        f.write(b"0123456789")
+        f.close(flush=False)
+        foo = c.open("foo.txt", "wb")
+        foo.write(b"foo")
+        foo.close(flush=False)
+        # Then, write a big file, it shouldn't be packed with the ones above
+        big = c.open("bigfile.txt", "wb")
+        big.write(b"x" * 1024 * 1024 * 33) # 33 MB > KEEP_BLOCK_SIZE/2
+        big.close(flush=False)
+        self.assertEqual(
+            c.manifest_text("."),
+            '. 2d303c138c118af809f39319e5d507e9+34603008 a8430a058b8fbf408e1931b794dbd6fb+13 0:34603008:bigfile.txt 34603008:10:count.txt 34603018:3:foo.txt\n')
+
+    def test_flush_after_small_block_packing(self):
+        c = Collection()
+        # Write a couple of small files,
+        f = c.open("count.txt", "wb")
+        f.write(b"0123456789")
+        f.close(flush=False)
+        foo = c.open("foo.txt", "wb")
+        foo.write(b"foo")
+        foo.close(flush=False)
+
+        self.assertEqual(
+            c.manifest_text(),
+            '. a8430a058b8fbf408e1931b794dbd6fb+13 0:10:count.txt 10:3:foo.txt\n')
+
+        f = c.open("count.txt", "rb+")
+        f.close(flush=True)
+
+        self.assertEqual(
+            c.manifest_text(),
+            '. a8430a058b8fbf408e1931b794dbd6fb+13 0:10:count.txt 10:3:foo.txt\n')
+
+    def test_write_after_small_block_packing2(self):
+        c = Collection()
+        # Write a couple of small files,
+        f = c.open("count.txt", "wb")
+        f.write(b"0123456789")
+        f.close(flush=False)
+        foo = c.open("foo.txt", "wb")
+        foo.write(b"foo")
+        foo.close(flush=False)
+
+        self.assertEqual(
+            c.manifest_text(),
+            '. a8430a058b8fbf408e1931b794dbd6fb+13 0:10:count.txt 10:3:foo.txt\n')
+
+        f = c.open("count.txt", "rb+")
+        f.write(b"abc")
+        f.close(flush=False)
+
+        self.assertEqual(
+            c.manifest_text(),
+            '. 900150983cd24fb0d6963f7d28e17f72+3 a8430a058b8fbf408e1931b794dbd6fb+13 0:3:count.txt 6:7:count.txt 13:3:foo.txt\n')
+
+
+    def test_small_block_packing_with_overwrite(self):
+        c = Collection()
+        c.open("b1", "wb").close()
+        c["b1"].writeto(0, b"b1", 0)
+
+        c.open("b2", "wb").close()
+        c["b2"].writeto(0, b"b2", 0)
+
+        c["b1"].writeto(0, b"1b", 0)
+
+        self.assertEqual(c.manifest_text(), ". ed4f3f67c70b02b29c50ce1ea26666bd+4 0:2:b1 2:2:b2\n")
+        self.assertEqual(c["b1"].manifest_text(), ". ed4f3f67c70b02b29c50ce1ea26666bd+4 0:2:b1\n")
+        self.assertEqual(c["b2"].manifest_text(), ". ed4f3f67c70b02b29c50ce1ea26666bd+4 2:2:b2\n")
+
+
+class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+
+    def create_count_txt(self):
+        # Create an empty collection, save it to the API server, then write a
+        # file, but don't save it.
+
+        c = Collection()
+        c.save_new("CollectionCreateUpdateTest", ensure_unique_name=True)
+        self.assertEqual(c.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+        self.assertEqual(c.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+
+        with c.open("count.txt", "wb") as f:
+            f.write(b"0123456789")
+
+        self.assertEqual(c.portable_manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+
+        return c
+
+    def test_create_and_save(self):
+        c = self.create_count_txt()
+        c.save(properties={'type' : 'Intermediate'},
+               storage_classes=['archive'],
+               trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+
+        self.assertRegex(
+            c.manifest_text(),
+            r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
+        self.assertEqual(c.api_response()["storage_classes_desired"], ['archive'])
+        self.assertEqual(c.api_response()["properties"], {'type' : 'Intermediate'})
+        self.assertEqual(c.api_response()["trash_at"], '2111-01-01T11:11:11.111111000Z')
+
+
+    def test_create_and_save_new(self):
+        c = self.create_count_txt()
+        c.save_new(properties={'type' : 'Intermediate'},
+                   storage_classes=['archive'],
+                   trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+
+        self.assertRegex(
+            c.manifest_text(),
+            r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
+        self.assertEqual(c.api_response()["storage_classes_desired"], ['archive'])
+        self.assertEqual(c.api_response()["properties"], {'type' : 'Intermediate'})
+        self.assertEqual(c.api_response()["trash_at"], '2111-01-01T11:11:11.111111000Z')
+
+    def test_create_and_save_after_commiting(self):
+        c = self.create_count_txt()
+        c.save(properties={'type' : 'Intermediate'},
+               storage_classes=['hot'],
+               trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))
+        c.save(properties={'type' : 'Output'},
+               storage_classes=['cold'],
+               trash_at=datetime.datetime(2222, 2, 2, 22, 22, 22, 222222))
+
+        self.assertEqual(c.api_response()["storage_classes_desired"], ['cold'])
+        self.assertEqual(c.api_response()["properties"], {'type' : 'Output'})
+        self.assertEqual(c.api_response()["trash_at"], '2222-02-02T22:22:22.222222000Z')
+
+    def test_create_diff_apply(self):
+        c1 = self.create_count_txt()
+        c1.save()
+
+        c2 = Collection(c1.manifest_locator())
+        with c2.open("count.txt", "wb") as f:
+            f.write(b"abcdefg")
+
+        diff = c1.diff(c2)
+
+        self.assertEqual(diff[0], (arvados.collection.MOD, u'./count.txt', c1["count.txt"], c2["count.txt"]))
+
+        c1.apply(diff)
+        self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())
+
+    def test_diff_apply_with_token(self):
+        baseline = CollectionReader(". 781e5e245d69b566979b86e28d23f2c7+10+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:10:count.txt\n")
+        c = Collection(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+        other = CollectionReader(". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\n")
+
+        diff = baseline.diff(other)
+        self.assertEqual(diff, [('mod', u'./count.txt', c["count.txt"], other["count.txt"])])
+
+        c.apply(diff)
+
+        self.assertEqual(c.manifest_text(), ". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\n")
+
+
+    def test_create_and_update(self):
+        c1 = self.create_count_txt()
+        c1.save()
+
+        c2 = arvados.collection.Collection(c1.manifest_locator())
+        with c2.open("count.txt", "wb") as f:
+            f.write(b"abcdefg")
+
+        c2.save()
+
+        self.assertNotEqual(c1.portable_data_hash(), c2.portable_data_hash())
+        c1.update()
+        self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())
+
+
+    def test_create_and_update_with_conflict(self):
+        c1 = self.create_count_txt()
+        c1.save()
+
+        with c1.open("count.txt", "wb") as f:
+            f.write(b"XYZ")
+
+        c2 = arvados.collection.Collection(c1.manifest_locator())
+        with c2.open("count.txt", "wb") as f:
+            f.write(b"abcdefg")
+
+        c2.save()
+
+        c1.update()
+        self.assertRegex(
+            c1.manifest_text(),
+            r"\. e65075d550f9b5bf9992fa1d71a131be\+3\S* 7ac66c0f148de9519b8bd264312c4d64\+7\S* 0:3:count\.txt 3:7:count\.txt~\d\d\d\d\d\d\d\d-\d\d\d\d\d\d~conflict~$")
+
+    def test_pdh_is_native_str(self):
+        c1 = self.create_count_txt()
+        pdh = c1.portable_data_hash()
+        self.assertEqual(type(''), type(pdh))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_crunch.py b/sdk/python/tests/test_crunch.py
new file mode 100644 (file)
index 0000000..809e229
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import arvados.crunch
+import os
+import shutil
+import tempfile
+import unittest
+
+class TaskOutputDirTest(unittest.TestCase):
+    def setUp(self):
+        self.tmp = tempfile.mkdtemp()
+        os.environ['TASK_KEEPMOUNT_TMP'] = self.tmp
+
+    def tearDown(self):
+        os.environ.pop('TASK_KEEPMOUNT_TMP')
+        shutil.rmtree(self.tmp)
+
+    def test_env_var(self):
+        out = arvados.crunch.TaskOutputDir()
+        self.assertEqual(out.path, self.tmp)
+
+        with open(os.path.join(self.tmp, '.arvados#collection'), 'w') as f:
+            f.write('{\n  "manifest_text":"",\n  "uuid":null\n}\n')
+        self.assertEqual(out.manifest_text(), '')
+
+        # Special file must be re-read on each call to manifest_text().
+        with open(os.path.join(self.tmp, '.arvados#collection'), 'w') as f:
+            f.write(r'{"manifest_text":". unparsed 0:3:foo\n","uuid":null}')
+        self.assertEqual(out.manifest_text(), ". unparsed 0:3:foo\n")
diff --git a/sdk/python/tests/test_errors.py b/sdk/python/tests/test_errors.py
new file mode 100644 (file)
index 0000000..4ee68ba
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+import traceback
+import unittest
+
+import arvados.errors as arv_error
+from . import arvados_testutil as tutil
+
+class KeepRequestErrorTestCase(unittest.TestCase):
+    REQUEST_ERRORS = [
+        ('http://keep1.zzzzz.example.org/', IOError("test IOError")),
+        ('http://keep3.zzzzz.example.org/', MemoryError("test MemoryError")),
+        ('http://keep5.zzzzz.example.org/',
+         arv_error.HttpError(500, "Internal Server Error")),
+        ('http://keep7.zzzzz.example.org/', IOError("second test IOError")),
+        ]
+
+    def check_get_message(self, *args):
+        test_exc = arv_error.KeepRequestError("test message", *args)
+        self.assertEqual("test message", test_exc.message)
+
+    def test_get_message_with_request_errors(self):
+        self.check_get_message(self.REQUEST_ERRORS[:])
+
+    def test_get_message_without_request_errors(self):
+        self.check_get_message()
+
+    def check_get_request_errors(self, *args):
+        expected = dict(args[0]) if args else {}
+        test_exc = arv_error.KeepRequestError("test service exceptions", *args)
+        self.assertEqual(expected, test_exc.request_errors())
+
+    def test_get_request_errors(self):
+        self.check_get_request_errors(self.REQUEST_ERRORS[:])
+
+    def test_get_request_errors_none(self):
+        self.check_get_request_errors({})
+
+    def test_empty_exception(self):
+        test_exc = arv_error.KeepRequestError()
+        self.assertFalse(test_exc.message)
+        self.assertEqual({}, test_exc.request_errors())
+
+    def traceback_str(self, exc):
+        return traceback.format_exception_only(type(exc), exc)[-1]
+
+    def test_traceback_str_without_request_errors(self):
+        message = "test plain traceback string"
+        test_exc = arv_error.KeepRequestError(message)
+        exc_report = self.traceback_str(test_exc)
+        self.assertRegex(exc_report, r"^(arvados\.errors\.)?KeepRequestError: ")
+        self.assertIn(message, exc_report)
+
+    def test_traceback_str_with_request_errors(self):
+        message = "test traceback shows Keep services"
+        test_exc = arv_error.KeepRequestError(message, self.REQUEST_ERRORS[:])
+        exc_report = self.traceback_str(test_exc)
+        self.assertRegex(exc_report, r"^(arvados\.errors\.)?KeepRequestError: ")
+        self.assertIn(message, exc_report)
+        for expect_re in [
+                r"raised (IOError|OSError)", # IOError in Python2, OSError in Python3
+                r"raised MemoryError",
+                r"test MemoryError",
+                r"second test IOError",
+                r"responded with 500 Internal Server Error"]:
+            self.assertRegex(exc_report, expect_re)
+        # Assert the report maintains order of listed services.
+        last_index = -1
+        for service_key, _ in self.REQUEST_ERRORS:
+            service_index = exc_report.find(service_key)
+            self.assertGreater(service_index, last_index)
+            last_index = service_index
diff --git a/sdk/python/tests/test_events.py b/sdk/python/tests/test_events.py
new file mode 100644 (file)
index 0000000..3561434
--- /dev/null
@@ -0,0 +1,408 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+from builtins import range
+from builtins import object
+import logging
+import mock
+import queue
+import sys
+import threading
+import time
+import unittest
+
+import arvados
+from . import arvados_testutil as tutil
+from . import run_test_server
+
+
+class WebsocketTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+
+    TIME_PAST = time.time()-3600
+    TIME_FUTURE = time.time()+3600
+    MOCK_WS_URL = 'wss://[{}]/'.format(tutil.TEST_HOST)
+
+    TEST_TIMEOUT = 10.0
+
+    def setUp(self):
+        self.ws = None
+
+    def tearDown(self):
+        try:
+            if self.ws:
+                self.ws.close()
+        except Exception as e:
+            print("Error in teardown: ", e)
+        super(WebsocketTest, self).tearDown()
+        run_test_server.reset()
+
+    def _test_subscribe(self, poll_fallback, expect_type, start_time=None, expected=1):
+        run_test_server.authorize_with('active')
+        events = queue.Queue(100)
+
+        # Create ancestor before subscribing.
+        # When listening with start_time in the past, this should also be retrieved.
+        # However, when start_time is omitted in subscribe, this should not be fetched.
+        ancestor = arvados.api('v1').humans().create(body={}).execute()
+
+        filters = [['object_uuid', 'is_a', 'arvados#human']]
+        if start_time:
+            filters.append(['created_at', '>=', start_time])
+
+        self.ws = arvados.events.subscribe(
+            arvados.api('v1'), filters,
+            events.put_nowait,
+            poll_fallback=poll_fallback,
+            last_log_id=(1 if start_time else None))
+        self.assertIsInstance(self.ws, expect_type)
+        self.assertEqual(200, events.get(True, 5)['status'])
+        human = arvados.api('v1').humans().create(body={}).execute()
+
+        want_uuids = []
+        if expected > 0:
+            want_uuids.append(human['uuid'])
+        if expected > 1:
+            want_uuids.append(ancestor['uuid'])
+        log_object_uuids = []
+        while set(want_uuids) - set(log_object_uuids):
+            log_object_uuids.append(events.get(True, 5)['object_uuid'])
+
+        if expected < 2:
+            with self.assertRaises(queue.Empty):
+                # assertEqual just serves to show us what unexpected
+                # thing comes out of the queue when the assertRaises
+                # fails; when the test passes, this assertEqual
+                # doesn't get called.
+                self.assertEqual(events.get(True, 2), None)
+
+    def test_subscribe_websocket(self):
+        self._test_subscribe(
+            poll_fallback=False, expect_type=arvados.events.EventClient, expected=1)
+
+    @mock.patch('arvados.events.EventClient.__init__')
+    def test_subscribe_poll(self, event_client_constr):
+        event_client_constr.side_effect = Exception('All is well')
+        self._test_subscribe(
+            poll_fallback=0.25, expect_type=arvados.events.PollClient, expected=1)
+
+    def test_subscribe_poll_retry(self):
+        api_mock = mock.MagicMock()
+        n = []
+        def on_ev(ev):
+            n.append(ev)
+
+        error_mock = mock.MagicMock()
+        error_mock.resp.status = 0
+        error_mock._get_reason.return_value = "testing"
+        api_mock.logs().list().execute.side_effect = (
+            arvados.errors.ApiError(error_mock, b""),
+            {"items": [{"id": 1}], "items_available": 1},
+            arvados.errors.ApiError(error_mock, b""),
+            {"items": [{"id": 1}], "items_available": 1},
+        )
+        pc = arvados.events.PollClient(api_mock, [], on_ev, 15, None)
+        pc.start()
+        while len(n) < 2:
+            time.sleep(.1)
+        pc.close()
+
+    def test_subscribe_websocket_with_start_time_past(self):
+        self._test_subscribe(
+            poll_fallback=False, expect_type=arvados.events.EventClient,
+            start_time=self.localiso(self.TIME_PAST),
+            expected=2)
+
+    @mock.patch('arvados.events.EventClient.__init__')
+    def test_subscribe_poll_with_start_time_past(self, event_client_constr):
+        event_client_constr.side_effect = Exception('All is well')
+        self._test_subscribe(
+            poll_fallback=0.25, expect_type=arvados.events.PollClient,
+            start_time=self.localiso(self.TIME_PAST),
+            expected=2)
+
+    def test_subscribe_websocket_with_start_time_future(self):
+        self._test_subscribe(
+            poll_fallback=False, expect_type=arvados.events.EventClient,
+            start_time=self.localiso(self.TIME_FUTURE),
+            expected=0)
+
+    @mock.patch('arvados.events.EventClient.__init__')
+    def test_subscribe_poll_with_start_time_future(self, event_client_constr):
+        event_client_constr.side_effect = Exception('All is well')
+        self._test_subscribe(
+            poll_fallback=0.25, expect_type=arvados.events.PollClient,
+            start_time=self.localiso(self.TIME_FUTURE),
+            expected=0)
+
+    def test_subscribe_websocket_with_start_time_past_utc(self):
+        self._test_subscribe(
+            poll_fallback=False, expect_type=arvados.events.EventClient,
+            start_time=self.utciso(self.TIME_PAST),
+            expected=2)
+
+    def test_subscribe_websocket_with_start_time_future_utc(self):
+        self._test_subscribe(
+            poll_fallback=False, expect_type=arvados.events.EventClient,
+            start_time=self.utciso(self.TIME_FUTURE),
+            expected=0)
+
+    def utciso(self, t):
+        return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
+
+    def localiso(self, t):
+        return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(-time.timezone//60)
+
+    def isotz(self, offset):
+        """Convert minutes-east-of-UTC to RFC3339- and ISO-compatible time zone designator"""
+        return '{:+03d}:{:02d}'.format(offset//60, offset%60)
+
+    # Test websocket reconnection on (un)expected close
+    def _test_websocket_reconnect(self, close_unexpected):
+        run_test_server.authorize_with('active')
+        events = queue.Queue(100)
+
+        logstream = tutil.StringIO()
+        rootLogger = logging.getLogger()
+        streamHandler = logging.StreamHandler(logstream)
+        rootLogger.addHandler(streamHandler)
+
+        filters = [['object_uuid', 'is_a', 'arvados#human']]
+        filters.append(['created_at', '>=', self.localiso(self.TIME_PAST)])
+        self.ws = arvados.events.subscribe(
+            arvados.api('v1'), filters,
+            events.put_nowait,
+            poll_fallback=False,
+            last_log_id=None)
+        self.assertIsInstance(self.ws, arvados.events.EventClient)
+        self.assertEqual(200, events.get(True, 5)['status'])
+
+        # create obj
+        human = arvados.api('v1').humans().create(body={}).execute()
+
+        # expect an event
+        self.assertIn(human['uuid'], events.get(True, 5)['object_uuid'])
+        with self.assertRaises(queue.Empty):
+            self.assertEqual(events.get(True, 2), None)
+
+        # close (im)properly
+        if close_unexpected:
+            self.ws.ec.close_connection()
+        else:
+            self.ws.close()
+
+        # create one more obj
+        human2 = arvados.api('v1').humans().create(body={}).execute()
+
+        # (un)expect the object creation event
+        if close_unexpected:
+            log_object_uuids = []
+            for i in range(0, 2):
+                event = events.get(True, 5)
+                if event.get('object_uuid') != None:
+                    log_object_uuids.append(event['object_uuid'])
+            with self.assertRaises(queue.Empty):
+                self.assertEqual(events.get(True, 2), None)
+            self.assertNotIn(human['uuid'], log_object_uuids)
+            self.assertIn(human2['uuid'], log_object_uuids)
+        else:
+            with self.assertRaises(queue.Empty):
+                self.assertEqual(events.get(True, 2), None)
+
+        # verify log message to ensure that an (un)expected close
+        log_messages = logstream.getvalue()
+        closeLogFound = log_messages.find("Unexpected close. Reconnecting.")
+        retryLogFound = log_messages.find("Error during websocket reconnect. Will retry")
+        if close_unexpected:
+            self.assertNotEqual(closeLogFound, -1)
+        else:
+            self.assertEqual(closeLogFound, -1)
+        rootLogger.removeHandler(streamHandler)
+
+    def test_websocket_reconnect_on_unexpected_close(self):
+        self._test_websocket_reconnect(True)
+
+    def test_websocket_no_reconnect_on_close_by_user(self):
+        self._test_websocket_reconnect(False)
+
+    # Test websocket reconnection retry
+    @mock.patch('arvados.events._EventClient.connect')
+    def test_websocket_reconnect_retry(self, event_client_connect):
+        event_client_connect.side_effect = [None, Exception('EventClient.connect error'), None]
+
+        logstream = tutil.StringIO()
+        rootLogger = logging.getLogger()
+        streamHandler = logging.StreamHandler(logstream)
+        rootLogger.addHandler(streamHandler)
+
+        run_test_server.authorize_with('active')
+        events = queue.Queue(100)
+
+        filters = [['object_uuid', 'is_a', 'arvados#human']]
+        self.ws = arvados.events.subscribe(
+            arvados.api('v1'), filters,
+            events.put_nowait,
+            poll_fallback=False,
+            last_log_id=None)
+        self.assertIsInstance(self.ws, arvados.events.EventClient)
+
+        # simulate improper close
+        self.ws.on_closed()
+
+        # verify log messages to ensure retry happened
+        log_messages = logstream.getvalue()
+        found = log_messages.find("Error 'EventClient.connect error' during websocket reconnect.")
+        self.assertNotEqual(found, -1)
+        rootLogger.removeHandler(streamHandler)
+
+    @mock.patch('arvados.events._EventClient')
+    def test_subscribe_method(self, websocket_client):
+        filters = [['object_uuid', 'is_a', 'arvados#human']]
+        client = arvados.events.EventClient(
+            self.MOCK_WS_URL, [], lambda event: None, None)
+        client.subscribe(filters[:], 99)
+        websocket_client().subscribe.assert_called_with(filters, 99)
+
+    @mock.patch('arvados.events._EventClient')
+    def test_unsubscribe(self, websocket_client):
+        filters = [['object_uuid', 'is_a', 'arvados#human']]
+        client = arvados.events.EventClient(
+            self.MOCK_WS_URL, filters[:], lambda event: None, None)
+        client.unsubscribe(filters[:])
+        websocket_client().unsubscribe.assert_called_with(filters)
+
+    @mock.patch('arvados.events._EventClient')
+    def test_run_forever_survives_reconnects(self, websocket_client):
+        connected = threading.Event()
+        websocket_client().connect.side_effect = connected.set
+        client = arvados.events.EventClient(
+            self.MOCK_WS_URL, [], lambda event: None, None)
+        forever_thread = threading.Thread(target=client.run_forever)
+        forever_thread.start()
+        # Simulate an unexpected disconnect, and wait for reconnect.
+        close_thread = threading.Thread(target=client.on_closed)
+        close_thread.start()
+        self.assertTrue(connected.wait(timeout=self.TEST_TIMEOUT))
+        close_thread.join()
+        run_forever_alive = forever_thread.is_alive()
+        client.close()
+        forever_thread.join()
+        self.assertTrue(run_forever_alive)
+        self.assertEqual(2, websocket_client().connect.call_count)
+
+
+class PollClientTestCase(unittest.TestCase):
+    TEST_TIMEOUT = 10.0
+
+    class MockLogs(object):
+
+        def __init__(self):
+            self.logs = []
+            self.lock = threading.Lock()
+            self.api_called = threading.Event()
+
+        def add(self, log):
+            with self.lock:
+                self.logs.append(log)
+
+        def return_list(self, num_retries=None):
+            self.api_called.set()
+            args, kwargs = self.list_func.call_args_list[-1]
+            filters = kwargs.get('filters', [])
+            if not any(True for f in filters if f[0] == 'id' and f[1] == '>'):
+                # No 'id' filter was given -- this must be the probe
+                # to determine the most recent id.
+                return {'items': [{'id': 1}], 'items_available': 1}
+            with self.lock:
+                retval = self.logs
+                self.logs = []
+            return {'items': retval, 'items_available': len(retval)}
+
+    def setUp(self):
+        self.logs = self.MockLogs()
+        self.arv = mock.MagicMock(name='arvados.api()')
+        self.arv.logs().list().execute.side_effect = self.logs.return_list
+        # our MockLogs object's "execute" stub will need to inspect
+        # the call history to determine X in
+        # ....logs().list(filters=X).execute():
+        self.logs.list_func = self.arv.logs().list
+        self.status_ok = threading.Event()
+        self.event_received = threading.Event()
+        self.recv_events = []
+
+    def tearDown(self):
+        if hasattr(self, 'client'):
+            self.client.close(timeout=None)
+
+    def callback(self, event):
+        if event.get('status') == 200:
+            self.status_ok.set()
+        else:
+            self.recv_events.append(event)
+            self.event_received.set()
+
+    def build_client(self, filters=None, callback=None, last_log_id=None, poll_time=99):
+        if filters is None:
+            filters = []
+        if callback is None:
+            callback = self.callback
+        self.client = arvados.events.PollClient(
+            self.arv, filters, callback, poll_time, last_log_id)
+
+    def was_filter_used(self, target):
+        return any(target in call[-1].get('filters', [])
+                   for call in self.arv.logs().list.call_args_list)
+
+    def test_callback(self):
+        test_log = {'id': 12345, 'testkey': 'testtext'}
+        self.logs.add({'id': 123})
+        self.build_client(poll_time=.01)
+        self.client.start()
+        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
+        self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))
+        self.event_received.clear()
+        self.logs.add(test_log.copy())
+        self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))
+        self.assertIn(test_log, self.recv_events)
+
+    def test_subscribe(self):
+        client_filter = ['kind', '=', 'arvados#test']
+        self.build_client()
+        self.client.unsubscribe([])
+        self.client.subscribe([client_filter[:]])
+        self.client.start()
+        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
+        self.assertTrue(self.logs.api_called.wait(self.TEST_TIMEOUT))
+        self.assertTrue(self.was_filter_used(client_filter))
+
+    def test_unsubscribe(self):
+        should_filter = ['foo', '=', 'foo']
+        should_not_filter = ['foo', '=', 'bar']
+        self.build_client(poll_time=0.01)
+        self.client.unsubscribe([])
+        self.client.subscribe([should_not_filter[:]])
+        self.client.subscribe([should_filter[:]])
+        self.client.unsubscribe([should_not_filter[:]])
+        self.client.start()
+        self.logs.add({'id': 123})
+        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
+        self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))
+        self.assertTrue(self.was_filter_used(should_filter))
+        self.assertFalse(self.was_filter_used(should_not_filter))
+
+    def test_run_forever(self):
+        self.build_client()
+        self.client.start()
+        forever_thread = threading.Thread(target=self.client.run_forever)
+        forever_thread.start()
+        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))
+        self.assertTrue(forever_thread.is_alive())
+        self.client.close()
+        forever_thread.join()
+        del self.client
diff --git a/sdk/python/tests/test_keep_client.py b/sdk/python/tests/test_keep_client.py
new file mode 100644 (file)
index 0000000..d6b3a2a
--- /dev/null
@@ -0,0 +1,1356 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from builtins import range
+from builtins import object
+import hashlib
+import mock
+import os
+import pycurl
+import random
+import re
+import socket
+import sys
+import time
+import unittest
+import urllib.parse
+
+import arvados
+import arvados.retry
+import arvados.util
+from . import arvados_testutil as tutil
+from . import keepstub
+from . import run_test_server
+
+class KeepTestCase(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+
+    @classmethod
+    def setUpClass(cls):
+        super(KeepTestCase, cls).setUpClass()
+        run_test_server.authorize_with("admin")
+        cls.api_client = arvados.api('v1')
+        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
+                                             proxy='', local_store='')
+
+    def test_KeepBasicRWTest(self):
+        self.assertEqual(0, self.keep_client.upload_counter.get())
+        foo_locator = self.keep_client.put('foo')
+        self.assertRegex(
+            foo_locator,
+            '^acbd18db4cc2f85cedef654fccc4a4d8\+3',
+            'wrong md5 hash from Keep.put("foo"): ' + foo_locator)
+
+        # 6 bytes because uploaded 2 copies
+        self.assertEqual(6, self.keep_client.upload_counter.get())
+
+        self.assertEqual(0, self.keep_client.download_counter.get())
+        self.assertEqual(self.keep_client.get(foo_locator),
+                         b'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+        self.assertEqual(3, self.keep_client.download_counter.get())
+
+    def test_KeepBinaryRWTest(self):
+        blob_str = b'\xff\xfe\xf7\x00\x01\x02'
+        blob_locator = self.keep_client.put(blob_str)
+        self.assertRegex(
+            blob_locator,
+            '^7fc7c53b45e53926ba52821140fef396\+6',
+            ('wrong locator from Keep.put(<binarydata>):' + blob_locator))
+        self.assertEqual(self.keep_client.get(blob_locator),
+                         blob_str,
+                         'wrong content from Keep.get(md5(<binarydata>))')
+
+    def test_KeepLongBinaryRWTest(self):
+        blob_data = b'\xff\xfe\xfd\xfc\x00\x01\x02\x03'
+        for i in range(0,23):
+            blob_data = blob_data + blob_data
+        blob_locator = self.keep_client.put(blob_data)
+        self.assertRegex(
+            blob_locator,
+            '^84d90fc0d8175dd5dcfab04b999bc956\+67108864',
+            ('wrong locator from Keep.put(<binarydata>): ' + blob_locator))
+        self.assertEqual(self.keep_client.get(blob_locator),
+                         blob_data,
+                         'wrong content from Keep.get(md5(<binarydata>))')
+
+    @unittest.skip("unreliable test - please fix and close #8752")
+    def test_KeepSingleCopyRWTest(self):
+        blob_data = b'\xff\xfe\xfd\xfc\x00\x01\x02\x03'
+        blob_locator = self.keep_client.put(blob_data, copies=1)
+        self.assertRegex(
+            blob_locator,
+            '^c902006bc98a3eb4a3663b65ab4a6fab\+8',
+            ('wrong locator from Keep.put(<binarydata>): ' + blob_locator))
+        self.assertEqual(self.keep_client.get(blob_locator),
+                         blob_data,
+                         'wrong content from Keep.get(md5(<binarydata>))')
+
+    def test_KeepEmptyCollectionTest(self):
+        blob_locator = self.keep_client.put('', copies=1)
+        self.assertRegex(
+            blob_locator,
+            '^d41d8cd98f00b204e9800998ecf8427e\+0',
+            ('wrong locator from Keep.put(""): ' + blob_locator))
+
+    def test_unicode_must_be_ascii(self):
+        # If unicode type, must only consist of valid ASCII
+        foo_locator = self.keep_client.put(u'foo')
+        self.assertRegex(
+            foo_locator,
+            '^acbd18db4cc2f85cedef654fccc4a4d8\+3',
+            'wrong md5 hash from Keep.put("foo"): ' + foo_locator)
+
+        if sys.version_info < (3, 0):
+            with self.assertRaises(UnicodeEncodeError):
+                # Error if it is not ASCII
+                self.keep_client.put(u'\xe2')
+
+        with self.assertRaises(AttributeError):
+            # Must be bytes or have an encode() method
+            self.keep_client.put({})
+
+    def test_KeepHeadTest(self):
+        locator = self.keep_client.put('test_head')
+        self.assertRegex(
+            locator,
+            '^b9a772c7049325feb7130fff1f8333e9\+9',
+            'wrong md5 hash from Keep.put for "test_head": ' + locator)
+        self.assertEqual(True, self.keep_client.head(locator))
+        self.assertEqual(self.keep_client.get(locator),
+                         b'test_head',
+                         'wrong content from Keep.get for "test_head"')
+
+class KeepPermissionTestCase(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {'blob_signing_key': 'abcdefghijk0123456789',
+                   'enforce_permissions': True}
+
+    def test_KeepBasicRWTest(self):
+        run_test_server.authorize_with('active')
+        keep_client = arvados.KeepClient()
+        foo_locator = keep_client.put('foo')
+        self.assertRegex(
+            foo_locator,
+            r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+            'invalid locator from Keep.put("foo"): ' + foo_locator)
+        self.assertEqual(keep_client.get(foo_locator),
+                         b'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+        # GET with an unsigned locator => NotFound
+        bar_locator = keep_client.put('bar')
+        unsigned_bar_locator = "37b51d194a7513e45b56f6524f2d51f2+3"
+        self.assertRegex(
+            bar_locator,
+            r'^37b51d194a7513e45b56f6524f2d51f2\+3\+A[a-f0-9]+@[a-f0-9]+$',
+            'invalid locator from Keep.put("bar"): ' + bar_locator)
+        self.assertRaises(arvados.errors.NotFoundError,
+                          keep_client.get,
+                          unsigned_bar_locator)
+
+        # GET from a different user => NotFound
+        run_test_server.authorize_with('spectator')
+        self.assertRaises(arvados.errors.NotFoundError,
+                          arvados.Keep.get,
+                          bar_locator)
+
+        # Unauthenticated GET for a signed locator => NotFound
+        # Unauthenticated GET for an unsigned locator => NotFound
+        keep_client.api_token = ''
+        self.assertRaises(arvados.errors.NotFoundError,
+                          keep_client.get,
+                          bar_locator)
+        self.assertRaises(arvados.errors.NotFoundError,
+                          keep_client.get,
+                          unsigned_bar_locator)
+
+
+# KeepOptionalPermission: starts Keep with --permission-key-file
+# but not --enforce-permissions (i.e. generate signatures on PUT
+# requests, but do not require them for GET requests)
+#
+# All of these requests should succeed when permissions are optional:
+# * authenticated request, signed locator
+# * authenticated request, unsigned locator
+# * unauthenticated request, signed locator
+# * unauthenticated request, unsigned locator
+class KeepOptionalPermission(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {'blob_signing_key': 'abcdefghijk0123456789',
+                   'enforce_permissions': False}
+
+    @classmethod
+    def setUpClass(cls):
+        super(KeepOptionalPermission, cls).setUpClass()
+        run_test_server.authorize_with("admin")
+        cls.api_client = arvados.api('v1')
+
+    def setUp(self):
+        super(KeepOptionalPermission, self).setUp()
+        self.keep_client = arvados.KeepClient(api_client=self.api_client,
+                                              proxy='', local_store='')
+
+    def _put_foo_and_check(self):
+        signed_locator = self.keep_client.put('foo')
+        self.assertRegex(
+            signed_locator,
+            r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+            'invalid locator from Keep.put("foo"): ' + signed_locator)
+        return signed_locator
+
+    def test_KeepAuthenticatedSignedTest(self):
+        signed_locator = self._put_foo_and_check()
+        self.assertEqual(self.keep_client.get(signed_locator),
+                         b'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepAuthenticatedUnsignedTest(self):
+        signed_locator = self._put_foo_and_check()
+        self.assertEqual(self.keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8"),
+                         b'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepUnauthenticatedSignedTest(self):
+        # Check that signed GET requests work even when permissions
+        # enforcement is off.
+        signed_locator = self._put_foo_and_check()
+        self.keep_client.api_token = ''
+        self.assertEqual(self.keep_client.get(signed_locator),
+                         b'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepUnauthenticatedUnsignedTest(self):
+        # Since --enforce-permissions is not in effect, GET requests
+        # need not be authenticated.
+        signed_locator = self._put_foo_and_check()
+        self.keep_client.api_token = ''
+        self.assertEqual(self.keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8"),
+                         b'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+
+class KeepProxyTestCase(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+    KEEP_PROXY_SERVER = {}
+
+    @classmethod
+    def setUpClass(cls):
+        super(KeepProxyTestCase, cls).setUpClass()
+        run_test_server.authorize_with('active')
+        cls.api_client = arvados.api('v1')
+
+    def tearDown(self):
+        arvados.config.settings().pop('ARVADOS_EXTERNAL_CLIENT', None)
+        super(KeepProxyTestCase, self).tearDown()
+
+    def test_KeepProxyTest1(self):
+        # Will use ARVADOS_KEEP_SERVICES environment variable that
+        # is set by setUpClass().
+        keep_client = arvados.KeepClient(api_client=self.api_client,
+                                         local_store='')
+        baz_locator = keep_client.put('baz')
+        self.assertRegex(
+            baz_locator,
+            '^73feffa4b7f6bb68e44cf984c85f6e88\+3',
+            'wrong md5 hash from Keep.put("baz"): ' + baz_locator)
+        self.assertEqual(keep_client.get(baz_locator),
+                         b'baz',
+                         'wrong content from Keep.get(md5("baz"))')
+        self.assertTrue(keep_client.using_proxy)
+
+    def test_KeepProxyTest2(self):
+        # Don't instantiate the proxy directly, but set the X-External-Client
+        # header.  The API server should direct us to the proxy.
+        arvados.config.settings()['ARVADOS_EXTERNAL_CLIENT'] = 'true'
+        keep_client = arvados.KeepClient(api_client=self.api_client,
+                                         proxy='', local_store='')
+        baz_locator = keep_client.put('baz2')
+        self.assertRegex(
+            baz_locator,
+            '^91f372a266fe2bf2823cb8ec7fda31ce\+4',
+            'wrong md5 hash from Keep.put("baz2"): ' + baz_locator)
+        self.assertEqual(keep_client.get(baz_locator),
+                         b'baz2',
+                         'wrong content from Keep.get(md5("baz2"))')
+        self.assertTrue(keep_client.using_proxy)
+
+    def test_KeepProxyTestMultipleURIs(self):
+        # Test using ARVADOS_KEEP_SERVICES env var overriding any
+        # existing proxy setting and setting multiple proxies
+        arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'http://10.0.0.1 https://foo.example.org:1234/'
+        keep_client = arvados.KeepClient(api_client=self.api_client,
+                                         local_store='')
+        uris = [x['_service_root'] for x in keep_client._keep_services]
+        self.assertEqual(uris, ['http://10.0.0.1/',
+                                'https://foo.example.org:1234/'])
+
+    def test_KeepProxyTestInvalidURI(self):
+        arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'bad.uri.org'
+        with self.assertRaises(arvados.errors.ArgumentError):
+            keep_client = arvados.KeepClient(api_client=self.api_client,
+                                             local_store='')
+
+
+class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
+    def get_service_roots(self, api_client):
+        keep_client = arvados.KeepClient(api_client=api_client)
+        services = keep_client.weighted_service_roots(arvados.KeepLocator('0'*32))
+        return [urllib.parse.urlparse(url) for url in sorted(services)]
+
+    def test_ssl_flag_respected_in_roots(self):
+        for ssl_flag in [False, True]:
+            services = self.get_service_roots(self.mock_keep_services(
+                service_ssl_flag=ssl_flag))
+            self.assertEqual(
+                ('https' if ssl_flag else 'http'), services[0].scheme)
+
+    def test_correct_ports_with_ipv6_addresses(self):
+        service = self.get_service_roots(self.mock_keep_services(
+            service_type='proxy', service_host='100::1', service_port=10, count=1))[0]
+        self.assertEqual('100::1', service.hostname)
+        self.assertEqual(10, service.port)
+
+    def test_insecure_disables_tls_verify(self):
+        api_client = self.mock_keep_services(count=1)
+        force_timeout = socket.timeout("timed out")
+
+        api_client.insecure = True
+        with tutil.mock_keep_responses(b'foo', 200) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
+                0)
+
+        api_client.insecure = False
+        with tutil.mock_keep_responses(b'foo', 200) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
+            # getopt()==None here means we didn't change the
+            # default. If we were using real pycurl instead of a mock,
+            # it would return the default value 1.
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
+                None)
+
+    def test_refresh_signature(self):
+        blk_digest = '6f5902ac237024bdd0c176cb93063dc4+11'
+        blk_sig = 'da39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294'
+        local_loc = blk_digest+'+A'+blk_sig
+        remote_loc = blk_digest+'+R'+blk_sig
+        api_client = self.mock_keep_services(count=1)
+        headers = {'X-Keep-Locator':local_loc}
+        with tutil.mock_keep_responses('', 200, **headers):
+            # Check that the translated locator gets returned
+            keep_client = arvados.KeepClient(api_client=api_client)
+            self.assertEqual(local_loc, keep_client.refresh_signature(remote_loc))
+            # Check that refresh_signature() uses the correct method and headers
+            keep_client._get_or_head = mock.MagicMock()
+            keep_client.refresh_signature(remote_loc)
+            args, kwargs = keep_client._get_or_head.call_args_list[0]
+            self.assertIn(remote_loc, args)
+            self.assertEqual("HEAD", kwargs['method'])
+            self.assertIn('X-Keep-Signature', kwargs['headers'])
+
+    # test_*_timeout verify that KeepClient instructs pycurl to use
+    # the appropriate connection and read timeouts. They don't care
+    # whether pycurl actually exhibits the expected timeout behavior
+    # -- those tests are in the KeepClientTimeout test class.
+
+    def test_get_timeout(self):
+        api_client = self.mock_keep_services(count=1)
+        force_timeout = socket.timeout("timed out")
+        with tutil.mock_keep_responses(force_timeout, 0) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            with self.assertRaises(arvados.errors.KeepReadError):
+                keep_client.get('ffffffffffffffffffffffffffffffff')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))
+
+    def test_put_timeout(self):
+        api_client = self.mock_keep_services(count=1)
+        force_timeout = socket.timeout("timed out")
+        with tutil.mock_keep_responses(force_timeout, 0) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                keep_client.put(b'foo')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))
+
+    def test_head_timeout(self):
+        api_client = self.mock_keep_services(count=1)
+        force_timeout = socket.timeout("timed out")
+        with tutil.mock_keep_responses(force_timeout, 0) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            with self.assertRaises(arvados.errors.KeepReadError):
+                keep_client.head('ffffffffffffffffffffffffffffffff')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+                int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+                None)
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+                None)
+
+    def test_proxy_get_timeout(self):
+        api_client = self.mock_keep_services(service_type='proxy', count=1)
+        force_timeout = socket.timeout("timed out")
+        with tutil.mock_keep_responses(force_timeout, 0) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            with self.assertRaises(arvados.errors.KeepReadError):
+                keep_client.get('ffffffffffffffffffffffffffffffff')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))
+
+    def test_proxy_head_timeout(self):
+        api_client = self.mock_keep_services(service_type='proxy', count=1)
+        force_timeout = socket.timeout("timed out")
+        with tutil.mock_keep_responses(force_timeout, 0) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            with self.assertRaises(arvados.errors.KeepReadError):
+                keep_client.head('ffffffffffffffffffffffffffffffff')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+                None)
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+                None)
+
+    def test_proxy_put_timeout(self):
+        api_client = self.mock_keep_services(service_type='proxy', count=1)
+        force_timeout = socket.timeout("timed out")
+        with tutil.mock_keep_responses(force_timeout, 0) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                keep_client.put('foo')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))
+
+    def check_no_services_error(self, verb, exc_class):
+        api_client = mock.MagicMock(name='api_client')
+        api_client.keep_services().accessible().execute.side_effect = (
+            arvados.errors.ApiError)
+        keep_client = arvados.KeepClient(api_client=api_client)
+        with self.assertRaises(exc_class) as err_check:
+            getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0')
+        self.assertEqual(0, len(err_check.exception.request_errors()))
+
+    def test_get_error_with_no_services(self):
+        self.check_no_services_error('get', arvados.errors.KeepReadError)
+
+    def test_head_error_with_no_services(self):
+        self.check_no_services_error('head', arvados.errors.KeepReadError)
+
+    def test_put_error_with_no_services(self):
+        self.check_no_services_error('put', arvados.errors.KeepWriteError)
+
+    def check_errors_from_last_retry(self, verb, exc_class):
+        api_client = self.mock_keep_services(count=2)
+        req_mock = tutil.mock_keep_responses(
+            "retry error reporting test", 500, 500, 403, 403)
+        with req_mock, tutil.skip_sleep, \
+                self.assertRaises(exc_class) as err_check:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0',
+                                       num_retries=3)
+        self.assertEqual([403, 403], [
+                getattr(error, 'status_code', None)
+                for error in err_check.exception.request_errors().values()])
+
+    def test_get_error_reflects_last_retry(self):
+        self.check_errors_from_last_retry('get', arvados.errors.KeepReadError)
+
+    def test_head_error_reflects_last_retry(self):
+        self.check_errors_from_last_retry('head', arvados.errors.KeepReadError)
+
+    def test_put_error_reflects_last_retry(self):
+        self.check_errors_from_last_retry('put', arvados.errors.KeepWriteError)
+
+    def test_put_error_does_not_include_successful_puts(self):
+        data = 'partial failure test'
+        data_loc = tutil.str_keep_locator(data)
+        api_client = self.mock_keep_services(count=3)
+        with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \
+                self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.put(data)
+        self.assertEqual(2, len(exc_check.exception.request_errors()))
+
+    def test_proxy_put_with_no_writable_services(self):
+        data = 'test with no writable services'
+        data_loc = tutil.str_keep_locator(data)
+        api_client = self.mock_keep_services(service_type='proxy', read_only=True, count=1)
+        with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \
+                self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
+          keep_client = arvados.KeepClient(api_client=api_client)
+          keep_client.put(data)
+        self.assertEqual(True, ("no Keep services available" in str(exc_check.exception)))
+        self.assertEqual(0, len(exc_check.exception.request_errors()))
+
+    def test_oddball_service_get(self):
+        body = b'oddball service get'
+        api_client = self.mock_keep_services(service_type='fancynewblobstore')
+        with tutil.mock_keep_responses(body, 200):
+            keep_client = arvados.KeepClient(api_client=api_client)
+            actual = keep_client.get(tutil.str_keep_locator(body))
+        self.assertEqual(body, actual)
+
+    def test_oddball_service_put(self):
+        body = b'oddball service put'
+        pdh = tutil.str_keep_locator(body)
+        api_client = self.mock_keep_services(service_type='fancynewblobstore')
+        with tutil.mock_keep_responses(pdh, 200):
+            keep_client = arvados.KeepClient(api_client=api_client)
+            actual = keep_client.put(body, copies=1)
+        self.assertEqual(pdh, actual)
+
+    def test_oddball_service_writer_count(self):
+        body = b'oddball service writer count'
+        pdh = tutil.str_keep_locator(body)
+        api_client = self.mock_keep_services(service_type='fancynewblobstore',
+                                             count=4)
+        headers = {'x-keep-replicas-stored': 3}
+        with tutil.mock_keep_responses(pdh, 200, 418, 418, 418,
+                                       **headers) as req_mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            actual = keep_client.put(body, copies=2)
+        self.assertEqual(pdh, actual)
+        self.assertEqual(1, req_mock.call_count)
+
+
+@tutil.skip_sleep
+class KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock):
+    def setUp(self):
+        self.api_client = self.mock_keep_services(count=2)
+        self.keep_client = arvados.KeepClient(api_client=self.api_client)
+        self.data = b'xyzzy'
+        self.locator = '1271ed5ef305aadabc605b1609e24c52'
+
+    @mock.patch('arvados.KeepClient.KeepService.get')
+    def test_get_request_cache(self, get_mock):
+        with tutil.mock_keep_responses(self.data, 200, 200):
+            self.keep_client.get(self.locator)
+            self.keep_client.get(self.locator)
+        # Request already cached, don't require more than one request
+        get_mock.assert_called_once()
+
+    @mock.patch('arvados.KeepClient.KeepService.get')
+    def test_head_request_cache(self, get_mock):
+        with tutil.mock_keep_responses(self.data, 200, 200):
+            self.keep_client.head(self.locator)
+            self.keep_client.head(self.locator)
+        # Don't cache HEAD requests so that they're not confused with GET reqs
+        self.assertEqual(2, get_mock.call_count)
+
+    @mock.patch('arvados.KeepClient.KeepService.get')
+    def test_head_and_then_get_return_different_responses(self, get_mock):
+        head_resp = None
+        get_resp = None
+        get_mock.side_effect = ['first response', 'second response']
+        with tutil.mock_keep_responses(self.data, 200, 200):
+            head_resp = self.keep_client.head(self.locator)
+            get_resp = self.keep_client.get(self.locator)
+        self.assertEqual('first response', head_resp)
+        # First reponse was not cached because it was from a HEAD request.
+        self.assertNotEqual(head_resp, get_resp)
+
+
+@tutil.skip_sleep
+class KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock):
+    def setUp(self):
+        self.api_client = self.mock_keep_services(count=2)
+        self.keep_client = arvados.KeepClient(api_client=self.api_client)
+        self.data = b'xyzzy'
+        self.locator = '1271ed5ef305aadabc605b1609e24c52'
+        self.test_id = arvados.util.new_request_id()
+        self.assertRegex(self.test_id, r'^req-[a-z0-9]{20}$')
+        # If we don't set request_id to None explicitly here, it will
+        # return <MagicMock name='api_client_mock.request_id'
+        # id='123456789'>:
+        self.api_client.request_id = None
+
+    def test_default_to_api_client_request_id(self):
+        self.api_client.request_id = self.test_id
+        with tutil.mock_keep_responses(self.locator, 200, 200) as mock:
+            self.keep_client.put(self.data)
+        self.assertEqual(2, len(mock.responses))
+        for resp in mock.responses:
+            self.assertProvidedRequestId(resp)
+
+        with tutil.mock_keep_responses(self.data, 200) as mock:
+            self.keep_client.get(self.locator)
+        self.assertProvidedRequestId(mock.responses[0])
+
+        with tutil.mock_keep_responses(b'', 200) as mock:
+            self.keep_client.head(self.locator)
+        self.assertProvidedRequestId(mock.responses[0])
+
+    def test_explicit_request_id(self):
+        with tutil.mock_keep_responses(self.locator, 200, 200) as mock:
+            self.keep_client.put(self.data, request_id=self.test_id)
+        self.assertEqual(2, len(mock.responses))
+        for resp in mock.responses:
+            self.assertProvidedRequestId(resp)
+
+        with tutil.mock_keep_responses(self.data, 200) as mock:
+            self.keep_client.get(self.locator, request_id=self.test_id)
+        self.assertProvidedRequestId(mock.responses[0])
+
+        with tutil.mock_keep_responses(b'', 200) as mock:
+            self.keep_client.head(self.locator, request_id=self.test_id)
+        self.assertProvidedRequestId(mock.responses[0])
+
+    def test_automatic_request_id(self):
+        with tutil.mock_keep_responses(self.locator, 200, 200) as mock:
+            self.keep_client.put(self.data)
+        self.assertEqual(2, len(mock.responses))
+        for resp in mock.responses:
+            self.assertAutomaticRequestId(resp)
+
+        with tutil.mock_keep_responses(self.data, 200) as mock:
+            self.keep_client.get(self.locator)
+        self.assertAutomaticRequestId(mock.responses[0])
+
+        with tutil.mock_keep_responses(b'', 200) as mock:
+            self.keep_client.head(self.locator)
+        self.assertAutomaticRequestId(mock.responses[0])
+
+    def assertAutomaticRequestId(self, resp):
+        hdr = [x for x in resp.getopt(pycurl.HTTPHEADER)
+               if x.startswith('X-Request-Id: ')][0]
+        self.assertNotEqual(hdr, 'X-Request-Id: '+self.test_id)
+        self.assertRegex(hdr, r'^X-Request-Id: req-[a-z0-9]{20}$')
+
+    def assertProvidedRequestId(self, resp):
+        self.assertIn('X-Request-Id: '+self.test_id,
+                      resp.getopt(pycurl.HTTPHEADER))
+
+
+@tutil.skip_sleep
+class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
+
+    def setUp(self):
+        # expected_order[i] is the probe order for
+        # hash=md5(sprintf("%064x",i)) where there are 16 services
+        # with uuid sprintf("anything-%015x",j) with j in 0..15. E.g.,
+        # the first probe for the block consisting of 64 "0"
+        # characters is the service whose uuid is
+        # "zzzzz-bi6l4-000000000000003", so expected_order[0][0]=='3'.
+        self.services = 16
+        self.expected_order = [
+            list('3eab2d5fc9681074'),
+            list('097dba52e648f1c3'),
+            list('c5b4e023f8a7d691'),
+            list('9d81c02e76a3bf54'),
+            ]
+        self.blocks = [
+            "{:064x}".format(x).encode()
+            for x in range(len(self.expected_order))]
+        self.hashes = [
+            hashlib.md5(self.blocks[x]).hexdigest()
+            for x in range(len(self.expected_order))]
+        self.api_client = self.mock_keep_services(count=self.services)
+        self.keep_client = arvados.KeepClient(api_client=self.api_client)
+
+    def test_weighted_service_roots_against_reference_set(self):
+        # Confirm weighted_service_roots() returns the correct order
+        for i, hash in enumerate(self.hashes):
+            roots = self.keep_client.weighted_service_roots(arvados.KeepLocator(hash))
+            got_order = [
+                re.search(r'//\[?keep0x([0-9a-f]+)', root).group(1)
+                for root in roots]
+            self.assertEqual(self.expected_order[i], got_order)
+
+    def test_get_probe_order_against_reference_set(self):
+        self._test_probe_order_against_reference_set(
+            lambda i: self.keep_client.get(self.hashes[i], num_retries=1))
+
+    def test_head_probe_order_against_reference_set(self):
+        self._test_probe_order_against_reference_set(
+            lambda i: self.keep_client.head(self.hashes[i], num_retries=1))
+
+    def test_put_probe_order_against_reference_set(self):
+        # copies=1 prevents the test from being sensitive to races
+        # between writer threads.
+        self._test_probe_order_against_reference_set(
+            lambda i: self.keep_client.put(self.blocks[i], num_retries=1, copies=1))
+
+    def _test_probe_order_against_reference_set(self, op):
+        for i in range(len(self.blocks)):
+            with tutil.mock_keep_responses('', *[500 for _ in range(self.services*2)]) as mock, \
+                 self.assertRaises(arvados.errors.KeepRequestError):
+                op(i)
+            got_order = [
+                re.search(r'//\[?keep0x([0-9a-f]+)', resp.getopt(pycurl.URL).decode()).group(1)
+                for resp in mock.responses]
+            self.assertEqual(self.expected_order[i]*2, got_order)
+
+    def test_put_probe_order_multiple_copies(self):
+        for copies in range(2, 4):
+            for i in range(len(self.blocks)):
+                with tutil.mock_keep_responses('', *[500 for _ in range(self.services*3)]) as mock, \
+                     self.assertRaises(arvados.errors.KeepWriteError):
+                    self.keep_client.put(self.blocks[i], num_retries=2, copies=copies)
+                got_order = [
+                    re.search(r'//\[?keep0x([0-9a-f]+)', resp.getopt(pycurl.URL).decode()).group(1)
+                    for resp in mock.responses]
+                # With T threads racing to make requests, the position
+                # of a given server in the sequence of HTTP requests
+                # (got_order) cannot be more than T-1 positions
+                # earlier than that server's position in the reference
+                # probe sequence (expected_order).
+                #
+                # Loop invariant: we have accounted for +pos+ expected
+                # probes, either by seeing them in +got_order+ or by
+                # putting them in +pending+ in the hope of seeing them
+                # later. As long as +len(pending)<T+, we haven't
+                # started a request too early.
+                pending = []
+                for pos, expected in enumerate(self.expected_order[i]*3):
+                    got = got_order[pos-len(pending)]
+                    while got in pending:
+                        del pending[pending.index(got)]
+                        got = got_order[pos-len(pending)]
+                    if got != expected:
+                        pending.append(expected)
+                        self.assertLess(
+                            len(pending), copies,
+                            "pending={}, with copies={}, got {}, expected {}".format(
+                                pending, copies, repr(got_order), repr(self.expected_order[i]*3)))
+
+    def test_probe_waste_adding_one_server(self):
+        hashes = [
+            hashlib.md5("{:064x}".format(x).encode()).hexdigest() for x in range(100)]
+        initial_services = 12
+        self.api_client = self.mock_keep_services(count=initial_services)
+        self.keep_client = arvados.KeepClient(api_client=self.api_client)
+        probes_before = [
+            self.keep_client.weighted_service_roots(arvados.KeepLocator(hash)) for hash in hashes]
+        for added_services in range(1, 12):
+            api_client = self.mock_keep_services(count=initial_services+added_services)
+            keep_client = arvados.KeepClient(api_client=api_client)
+            total_penalty = 0
+            for hash_index in range(len(hashes)):
+                probe_after = keep_client.weighted_service_roots(
+                    arvados.KeepLocator(hashes[hash_index]))
+                penalty = probe_after.index(probes_before[hash_index][0])
+                self.assertLessEqual(penalty, added_services)
+                total_penalty += penalty
+            # Average penalty per block should not exceed
+            # N(added)/N(orig) by more than 20%, and should get closer
+            # to the ideal as we add data points.
+            expect_penalty = (
+                added_services *
+                len(hashes) / initial_services)
+            max_penalty = (
+                expect_penalty *
+                (120 - added_services)/100)
+            min_penalty = (
+                expect_penalty * 8/10)
+            self.assertTrue(
+                min_penalty <= total_penalty <= max_penalty,
+                "With {}+{} services, {} blocks, penalty {} but expected {}..{}".format(
+                    initial_services,
+                    added_services,
+                    len(hashes),
+                    total_penalty,
+                    min_penalty,
+                    max_penalty))
+
+    def check_64_zeros_error_order(self, verb, exc_class):
+        data = b'0' * 64
+        if verb == 'get':
+            data = tutil.str_keep_locator(data)
+        # Arbitrary port number:
+        aport = random.randint(1024,65535)
+        api_client = self.mock_keep_services(service_port=aport, count=self.services)
+        keep_client = arvados.KeepClient(api_client=api_client)
+        with mock.patch('pycurl.Curl') as curl_mock, \
+             self.assertRaises(exc_class) as err_check:
+            curl_mock.return_value = tutil.FakeCurl.make(code=500, body=b'')
+            getattr(keep_client, verb)(data)
+        urls = [urllib.parse.urlparse(url)
+                for url in err_check.exception.request_errors()]
+        self.assertEqual([('keep0x' + c, aport) for c in '3eab2d5fc9681074'],
+                         [(url.hostname, url.port) for url in urls])
+
+    def test_get_error_shows_probe_order(self):
+        self.check_64_zeros_error_order('get', arvados.errors.KeepReadError)
+
+    def test_put_error_shows_probe_order(self):
+        self.check_64_zeros_error_order('put', arvados.errors.KeepWriteError)
+
+
+class KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase):
+    # BANDWIDTH_LOW_LIM must be less than len(DATA) so we can transfer
+    # 1s worth of data and then trigger bandwidth errors before running
+    # out of data.
+    DATA = b'x'*2**11
+    BANDWIDTH_LOW_LIM = 1024
+    TIMEOUT_TIME = 1.0
+
+    class assertTakesBetween(unittest.TestCase):
+        def __init__(self, tmin, tmax):
+            self.tmin = tmin
+            self.tmax = tmax
+
+        def __enter__(self):
+            self.t0 = time.time()
+
+        def __exit__(self, *args, **kwargs):
+            # Round times to milliseconds, like CURL. Otherwise, we
+            # fail when CURL reaches a 1s timeout at 0.9998s.
+            delta = round(time.time() - self.t0, 3)
+            self.assertGreaterEqual(delta, self.tmin)
+            self.assertLessEqual(delta, self.tmax)
+
+    class assertTakesGreater(unittest.TestCase):
+        def __init__(self, tmin):
+            self.tmin = tmin
+
+        def __enter__(self):
+            self.t0 = time.time()
+
+        def __exit__(self, *args, **kwargs):
+            delta = round(time.time() - self.t0, 3)
+            self.assertGreaterEqual(delta, self.tmin)
+
+    def keepClient(self, timeouts=(0.1, TIMEOUT_TIME, BANDWIDTH_LOW_LIM)):
+        return arvados.KeepClient(
+            api_client=self.api_client,
+            timeout=timeouts)
+
+    def test_timeout_slow_connect(self):
+        # Can't simulate TCP delays with our own socket. Leave our
+        # stub server running uselessly, and try to connect to an
+        # unroutable IP address instead.
+        self.api_client = self.mock_keep_services(
+            count=1,
+            service_host='240.0.0.0',
+        )
+        with self.assertTakesBetween(0.1, 0.5):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                self.keepClient().put(self.DATA, copies=1, num_retries=0)
+
+    def test_low_bandwidth_no_delays_success(self):
+        self.server.setbandwidth(2*self.BANDWIDTH_LOW_LIM)
+        kc = self.keepClient()
+        loc = kc.put(self.DATA, copies=1, num_retries=0)
+        self.assertEqual(self.DATA, kc.get(loc, num_retries=0))
+
+    def test_too_low_bandwidth_no_delays_failure(self):
+        # Check that lessening bandwidth corresponds to failing
+        kc = self.keepClient()
+        loc = kc.put(self.DATA, copies=1, num_retries=0)
+        self.server.setbandwidth(0.5*self.BANDWIDTH_LOW_LIM)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                kc.get(loc, num_retries=0)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                kc.put(self.DATA, copies=1, num_retries=0)
+
+    def test_low_bandwidth_with_server_response_delay_failure(self):
+        kc = self.keepClient()
+        loc = kc.put(self.DATA, copies=1, num_retries=0)
+        self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
+        self.server.setdelays(response=self.TIMEOUT_TIME)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                kc.get(loc, num_retries=0)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                kc.put(self.DATA, copies=1, num_retries=0)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            kc.head(loc, num_retries=0)
+
+    def test_low_bandwidth_with_server_mid_delay_failure(self):
+        kc = self.keepClient()
+        loc = kc.put(self.DATA, copies=1, num_retries=0)
+        self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
+        self.server.setdelays(mid_write=self.TIMEOUT_TIME, mid_read=self.TIMEOUT_TIME)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            with self.assertRaises(arvados.errors.KeepReadError) as e:
+                kc.get(loc, num_retries=0)
+        with self.assertTakesGreater(self.TIMEOUT_TIME):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                kc.put(self.DATA, copies=1, num_retries=0)
+
+    def test_timeout_slow_request(self):
+        loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)
+        self.server.setdelays(request=.2)
+        self._test_connect_timeout_under_200ms(loc)
+        self.server.setdelays(request=2)
+        self._test_response_timeout_under_2s(loc)
+
+    def test_timeout_slow_response(self):
+        loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)
+        self.server.setdelays(response=.2)
+        self._test_connect_timeout_under_200ms(loc)
+        self.server.setdelays(response=2)
+        self._test_response_timeout_under_2s(loc)
+
+    def test_timeout_slow_response_body(self):
+        loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)
+        self.server.setdelays(response_body=.2)
+        self._test_connect_timeout_under_200ms(loc)
+        self.server.setdelays(response_body=2)
+        self._test_response_timeout_under_2s(loc)
+
+    def _test_connect_timeout_under_200ms(self, loc):
+        # Allow 100ms to connect, then 1s for response. Everything
+        # should work, and everything should take at least 200ms to
+        # return.
+        kc = self.keepClient(timeouts=(.1, 1))
+        with self.assertTakesBetween(.2, .3):
+            kc.put(self.DATA, copies=1, num_retries=0)
+        with self.assertTakesBetween(.2, .3):
+            self.assertEqual(self.DATA, kc.get(loc, num_retries=0))
+
+    def _test_response_timeout_under_2s(self, loc):
+        # Allow 10s to connect, then 1s for response. Nothing should
+        # work, and everything should take at least 1s to return.
+        kc = self.keepClient(timeouts=(10, 1))
+        with self.assertTakesBetween(1, 9):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                kc.get(loc, num_retries=0)
+        with self.assertTakesBetween(1, 9):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                kc.put(self.DATA, copies=1, num_retries=0)
+
+
+class KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock):
+    def mock_disks_and_gateways(self, disks=3, gateways=1):
+        self.gateways = [{
+                'uuid': 'zzzzz-bi6l4-gateway{:08d}'.format(i),
+                'owner_uuid': 'zzzzz-tpzed-000000000000000',
+                'service_host': 'gatewayhost{}'.format(i),
+                'service_port': 12345,
+                'service_ssl_flag': True,
+                'service_type': 'gateway:test',
+        } for i in range(gateways)]
+        self.gateway_roots = [
+            "https://{service_host}:{service_port}/".format(**gw)
+            for gw in self.gateways]
+        self.api_client = self.mock_keep_services(
+            count=disks, additional_services=self.gateways)
+        self.keepClient = arvados.KeepClient(api_client=self.api_client)
+
+    @mock.patch('pycurl.Curl')
+    def test_get_with_gateway_hint_first(self, MockCurl):
+        MockCurl.return_value = tutil.FakeCurl.make(
+            code=200, body='foo', headers={'Content-Length': 3})
+        self.mock_disks_and_gateways()
+        locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@' + self.gateways[0]['uuid']
+        self.assertEqual(b'foo', self.keepClient.get(locator))
+        self.assertEqual(self.gateway_roots[0]+locator,
+                         MockCurl.return_value.getopt(pycurl.URL).decode())
+        self.assertEqual(True, self.keepClient.head(locator))
+
+    @mock.patch('pycurl.Curl')
+    def test_get_with_gateway_hints_in_order(self, MockCurl):
+        gateways = 4
+        disks = 3
+        mocks = [
+            tutil.FakeCurl.make(code=404, body='')
+            for _ in range(gateways+disks)
+        ]
+        MockCurl.side_effect = tutil.queue_with(mocks)
+        self.mock_disks_and_gateways(gateways=gateways, disks=disks)
+        locator = '+'.join(['acbd18db4cc2f85cedef654fccc4a4d8+3'] +
+                           ['K@'+gw['uuid'] for gw in self.gateways])
+        with self.assertRaises(arvados.errors.NotFoundError):
+            self.keepClient.get(locator)
+        # Gateways are tried first, in the order given.
+        for i, root in enumerate(self.gateway_roots):
+            self.assertEqual(root+locator,
+                             mocks[i].getopt(pycurl.URL).decode())
+        # Disk services are tried next.
+        for i in range(gateways, gateways+disks):
+            self.assertRegex(
+                mocks[i].getopt(pycurl.URL).decode(),
+                r'keep0x')
+
+    @mock.patch('pycurl.Curl')
+    def test_head_with_gateway_hints_in_order(self, MockCurl):
+        gateways = 4
+        disks = 3
+        mocks = [
+            tutil.FakeCurl.make(code=404, body=b'')
+            for _ in range(gateways+disks)
+        ]
+        MockCurl.side_effect = tutil.queue_with(mocks)
+        self.mock_disks_and_gateways(gateways=gateways, disks=disks)
+        locator = '+'.join(['acbd18db4cc2f85cedef654fccc4a4d8+3'] +
+                           ['K@'+gw['uuid'] for gw in self.gateways])
+        with self.assertRaises(arvados.errors.NotFoundError):
+            self.keepClient.head(locator)
+        # Gateways are tried first, in the order given.
+        for i, root in enumerate(self.gateway_roots):
+            self.assertEqual(root+locator,
+                             mocks[i].getopt(pycurl.URL).decode())
+        # Disk services are tried next.
+        for i in range(gateways, gateways+disks):
+            self.assertRegex(
+                mocks[i].getopt(pycurl.URL).decode(),
+                r'keep0x')
+
+    @mock.patch('pycurl.Curl')
+    def test_get_with_remote_proxy_hint(self, MockCurl):
+        MockCurl.return_value = tutil.FakeCurl.make(
+            code=200, body=b'foo', headers={'Content-Length': 3})
+        self.mock_disks_and_gateways()
+        locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@xyzzy'
+        self.assertEqual(b'foo', self.keepClient.get(locator))
+        self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,
+                         MockCurl.return_value.getopt(pycurl.URL).decode())
+
+    @mock.patch('pycurl.Curl')
+    def test_head_with_remote_proxy_hint(self, MockCurl):
+        MockCurl.return_value = tutil.FakeCurl.make(
+            code=200, body=b'foo', headers={'Content-Length': 3})
+        self.mock_disks_and_gateways()
+        locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@xyzzy'
+        self.assertEqual(True, self.keepClient.head(locator))
+        self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,
+                         MockCurl.return_value.getopt(pycurl.URL).decode())
+
+
+class KeepClientRetryTestMixin(object):
+    # Testing with a local Keep store won't exercise the retry behavior.
+    # Instead, our strategy is:
+    # * Create a client with one proxy specified (pointed at a black
+    #   hole), so there's no need to instantiate an API client, and
+    #   all HTTP requests come from one place.
+    # * Mock httplib's request method to provide simulated responses.
+    # This lets us test the retry logic extensively without relying on any
+    # supporting servers, and prevents side effects in case something hiccups.
+    # To use this mixin, define DEFAULT_EXPECT, DEFAULT_EXCEPTION, and
+    # run_method().
+    #
+    # Test classes must define TEST_PATCHER to a method that mocks
+    # out appropriate methods in the client.
+
+    PROXY_ADDR = 'http://[%s]:65535/' % (tutil.TEST_HOST,)
+    TEST_DATA = b'testdata'
+    TEST_LOCATOR = 'ef654c40ab4f1747fc699915d4f70902+8'
+
+    def setUp(self):
+        self.client_kwargs = {'proxy': self.PROXY_ADDR, 'local_store': ''}
+
+    def new_client(self, **caller_kwargs):
+        kwargs = self.client_kwargs.copy()
+        kwargs.update(caller_kwargs)
+        return arvados.KeepClient(**kwargs)
+
+    def run_method(self, *args, **kwargs):
+        raise NotImplementedError("test subclasses must define run_method")
+
+    def check_success(self, expected=None, *args, **kwargs):
+        if expected is None:
+            expected = self.DEFAULT_EXPECT
+        self.assertEqual(expected, self.run_method(*args, **kwargs))
+
+    def check_exception(self, error_class=None, *args, **kwargs):
+        if error_class is None:
+            error_class = self.DEFAULT_EXCEPTION
+        self.assertRaises(error_class, self.run_method, *args, **kwargs)
+
+    def test_immediate_success(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 200):
+            self.check_success()
+
+    def test_retry_then_success(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
+            self.check_success(num_retries=3)
+
+    def test_exception_then_success(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, Exception('mock err'), 200):
+            self.check_success(num_retries=3)
+
+    def test_no_default_retry(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
+            self.check_exception()
+
+    def test_no_retry_after_permanent_error(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 403, 200):
+            self.check_exception(num_retries=3)
+
+    def test_error_after_retries_exhausted(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 500, 200):
+            self.check_exception(num_retries=1)
+
+    def test_num_retries_instance_fallback(self):
+        self.client_kwargs['num_retries'] = 3
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
+            self.check_success()
+
+
+@tutil.skip_sleep
+class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+    DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_DATA
+    DEFAULT_EXCEPTION = arvados.errors.KeepReadError
+    HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'
+    TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+
+    def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,
+                   *args, **kwargs):
+        return self.new_client().get(locator, *args, **kwargs)
+
+    def test_specific_exception_when_not_found(self):
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200):
+            self.check_exception(arvados.errors.NotFoundError, num_retries=3)
+
+    def test_general_exception_with_mixed_errors(self):
+        # get should raise a NotFoundError if no server returns the block,
+        # and a high threshold of servers report that it's not found.
+        # This test rigs up 50/50 disagreement between two servers, and
+        # checks that it does not become a NotFoundError.
+        client = self.new_client()
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):
+            with self.assertRaises(arvados.errors.KeepReadError) as exc_check:
+                client.get(self.HINTED_LOCATOR)
+            self.assertNotIsInstance(
+                exc_check.exception, arvados.errors.NotFoundError,
+                "mixed errors raised NotFoundError")
+
+    def test_hint_server_can_succeed_without_retries(self):
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200, 500):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+    def test_try_next_server_after_timeout(self):
+        with tutil.mock_keep_responses(
+                (socket.timeout("timed out"), 200),
+                (self.DEFAULT_EXPECT, 200)):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+    def test_retry_data_with_wrong_checksum(self):
+        with tutil.mock_keep_responses(
+                ('baddata', 200),
+                (self.DEFAULT_EXPECT, 200)):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+@tutil.skip_sleep
+class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+    DEFAULT_EXPECT = True
+    DEFAULT_EXCEPTION = arvados.errors.KeepReadError
+    HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'
+    TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+
+    def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,
+                   *args, **kwargs):
+        return self.new_client().head(locator, *args, **kwargs)
+
+    def test_specific_exception_when_not_found(self):
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200):
+            self.check_exception(arvados.errors.NotFoundError, num_retries=3)
+
+    def test_general_exception_with_mixed_errors(self):
+        # head should raise a NotFoundError if no server returns the block,
+        # and a high threshold of servers report that it's not found.
+        # This test rigs up 50/50 disagreement between two servers, and
+        # checks that it does not become a NotFoundError.
+        client = self.new_client()
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):
+            with self.assertRaises(arvados.errors.KeepReadError) as exc_check:
+                client.head(self.HINTED_LOCATOR)
+            self.assertNotIsInstance(
+                exc_check.exception, arvados.errors.NotFoundError,
+                "mixed errors raised NotFoundError")
+
+    def test_hint_server_can_succeed_without_retries(self):
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200, 500):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+    def test_try_next_server_after_timeout(self):
+        with tutil.mock_keep_responses(
+                (socket.timeout("timed out"), 200),
+                (self.DEFAULT_EXPECT, 200)):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+@tutil.skip_sleep
+class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+    DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_LOCATOR
+    DEFAULT_EXCEPTION = arvados.errors.KeepWriteError
+    TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+
+    def run_method(self, data=KeepClientRetryTestMixin.TEST_DATA,
+                   copies=1, *args, **kwargs):
+        return self.new_client().put(data, copies, *args, **kwargs)
+
+    def test_do_not_send_multiple_copies_to_same_server(self):
+        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 200):
+            self.check_exception(copies=2, num_retries=3)
+
+
+class AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):
+
+    class FakeKeepService(object):
+        def __init__(self, delay, will_succeed=False, will_raise=None, replicas=1):
+            self.delay = delay
+            self.will_succeed = will_succeed
+            self.will_raise = will_raise
+            self._result = {}
+            self._result['headers'] = {}
+            self._result['headers']['x-keep-replicas-stored'] = str(replicas)
+            self._result['body'] = 'foobar'
+
+        def put(self, data_hash, data, timeout):
+            time.sleep(self.delay)
+            if self.will_raise is not None:
+                raise self.will_raise
+            return self.will_succeed
+
+        def last_result(self):
+            if self.will_succeed:
+                return self._result
+
+        def finished(self):
+            return False
+
+    def setUp(self):
+        self.copies = 3
+        self.pool = arvados.KeepClient.KeepWriterThreadPool(
+            data = 'foo',
+            data_hash = 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+            max_service_replicas = self.copies,
+            copies = self.copies
+        )
+
+    def test_only_write_enough_on_success(self):
+        for i in range(10):
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+            self.pool.add_task(ks, None)
+        self.pool.join()
+        self.assertEqual(self.pool.done(), self.copies)
+
+    def test_only_write_enough_on_partial_success(self):
+        for i in range(5):
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=False)
+            self.pool.add_task(ks, None)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+            self.pool.add_task(ks, None)
+        self.pool.join()
+        self.assertEqual(self.pool.done(), self.copies)
+
+    def test_only_write_enough_when_some_crash(self):
+        for i in range(5):
+            ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
+            self.pool.add_task(ks, None)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+            self.pool.add_task(ks, None)
+        self.pool.join()
+        self.assertEqual(self.pool.done(), self.copies)
+
+    def test_fail_when_too_many_crash(self):
+        for i in range(self.copies+1):
+            ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
+            self.pool.add_task(ks, None)
+        for i in range(self.copies-1):
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+            self.pool.add_task(ks, None)
+        self.pool.join()
+        self.assertEqual(self.pool.done(), self.copies-1)
+
+
+@tutil.skip_sleep
+class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock):
+    # Test put()s that need two distinct servers to succeed, possibly
+    # requiring multiple passes through the retry loop.
+
+    def setUp(self):
+        self.api_client = self.mock_keep_services(count=2)
+        self.keep_client = arvados.KeepClient(api_client=self.api_client)
+
+    def test_success_after_exception(self):
+        with tutil.mock_keep_responses(
+                'acbd18db4cc2f85cedef654fccc4a4d8+3',
+                Exception('mock err'), 200, 200) as req_mock:
+            self.keep_client.put('foo', num_retries=1, copies=2)
+        self.assertEqual(3, req_mock.call_count)
+
+    def test_success_after_retryable_error(self):
+        with tutil.mock_keep_responses(
+                'acbd18db4cc2f85cedef654fccc4a4d8+3',
+                500, 200, 200) as req_mock:
+            self.keep_client.put('foo', num_retries=1, copies=2)
+        self.assertEqual(3, req_mock.call_count)
+
+    def test_fail_after_final_error(self):
+        # First retry loop gets a 200 (can't achieve replication by
+        # storing again on that server) and a 400 (can't retry that
+        # server at all), so we shouldn't try a third request.
+        with tutil.mock_keep_responses(
+                'acbd18db4cc2f85cedef654fccc4a4d8+3',
+                200, 400, 200) as req_mock:
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                self.keep_client.put('foo', num_retries=1, copies=2)
+        self.assertEqual(2, req_mock.call_count)
+
+class KeepClientAPIErrorTest(unittest.TestCase):
+    def test_api_fail(self):
+        class ApiMock(object):
+            def __getattr__(self, r):
+                if r == "api_token":
+                    return "abc"
+                elif r == "insecure":
+                    return False
+                else:
+                    raise arvados.errors.KeepReadError()
+        keep_client = arvados.KeepClient(api_client=ApiMock(),
+                                             proxy='', local_store='')
+
+        # The bug this is testing for is that if an API (not
+        # keepstore) exception is thrown as part of a get(), the next
+        # attempt to get that same block will result in a deadlock.
+        # This is why there are two get()s in a row.  Unfortunately,
+        # the failure mode for this test is that the test suite
+        # deadlocks, there isn't a good way to avoid that without
+        # adding a special case that has no use except for this test.
+
+        with self.assertRaises(arvados.errors.KeepReadError):
+            keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8+3")
+        with self.assertRaises(arvados.errors.KeepReadError):
+            keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8+3")
diff --git a/sdk/python/tests/test_keep_locator.py b/sdk/python/tests/test_keep_locator.py
new file mode 100644 (file)
index 0000000..e47d64d
--- /dev/null
@@ -0,0 +1,93 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import next
+from builtins import zip
+from builtins import str
+from builtins import range
+import datetime
+import itertools
+import random
+import unittest
+
+from arvados.keep import KeepLocator
+
+class ArvadosKeepLocatorTest(unittest.TestCase):
+    DEFAULT_TEST_COUNT = 10
+
+    def numstrs(fmtstr, base, exponent):
+        def genstrs(self, count=None):
+            return (fmtstr.format(random.randint(0, base ** exponent))
+                    for c in range(count or self.DEFAULT_TEST_COUNT))
+        return genstrs
+
+    checksums = numstrs('{:032x}', 16, 32)
+    sizes = numstrs('{:d}', 2, 26)
+    signatures = numstrs('{:040x}', 16, 40)
+    timestamps = numstrs('{:08x}', 16, 8)
+
+    def base_locators(self, count=DEFAULT_TEST_COUNT):
+        return ('+'.join(pair) for pair in
+                zip(self.checksums(count), self.sizes(count)))
+
+    def perm_hints(self, count=DEFAULT_TEST_COUNT):
+        for sig, ts in zip(self.signatures(count),
+                                      self.timestamps(count)):
+            yield 'A{}@{}'.format(sig, ts)
+
+    def test_good_locators_returned(self):
+        for hint_gens in [(), (self.sizes(),),
+                          (self.sizes(), self.perm_hints())]:
+            for loc_data in zip(self.checksums(), *hint_gens):
+                locator = '+'.join(loc_data)
+                self.assertEqual(locator, str(KeepLocator(locator)))
+
+    def test_nonchecksum_rejected(self):
+        for badstr in ['', 'badbadbad', '8f9e68d957b504a29ba76c526c3145dj',
+                       '+8f9e68d957b504a29ba76c526c3145d9',
+                       '3+8f9e68d957b504a29ba76c526c3145d9']:
+            self.assertRaises(ValueError, KeepLocator, badstr)
+
+    def test_unknown_hints_accepted(self):
+        base = next(self.base_locators(1))
+        for weirdhint in ['Zfoo', 'Ybar234', 'Xa@b_c-372', 'W99']:
+            locator = '+'.join([base, weirdhint])
+            self.assertEqual(locator, str(KeepLocator(locator)))
+
+    def test_bad_hints_rejected(self):
+        base = next(self.base_locators(1))
+        for badhint in ['', 'A', 'lowercase', '+32']:
+            self.assertRaises(ValueError, KeepLocator,
+                              '+'.join([base, badhint]))
+
+    def test_multiple_locator_hints_accepted(self):
+        base = next(self.base_locators(1))
+        for loc_hints in itertools.permutations(['Kab1cd', 'Kef2gh', 'Kij3kl']):
+            locator = '+'.join((base,) + loc_hints)
+            self.assertEqual(locator, str(KeepLocator(locator)))
+
+    def test_str_type(self):
+        base = next(self.base_locators(1))
+        locator = KeepLocator(base)
+        self.assertEqual(type(''), type(locator.__str__()))
+
+    def test_expiry_passed(self):
+        base = next(self.base_locators(1))
+        signature = next(self.signatures(1))
+        dt1980 = datetime.datetime(1980, 1, 1)
+        dt2000 = datetime.datetime(2000, 2, 2)
+        dt2080 = datetime.datetime(2080, 3, 3)
+        locator = KeepLocator(base)
+        self.assertFalse(locator.permission_expired())
+        self.assertFalse(locator.permission_expired(dt1980))
+        self.assertFalse(locator.permission_expired(dt2080))
+        # Timestamped to 1987-01-05 18:48:32.
+        locator = KeepLocator('{}+A{}@20000000'.format(base, signature))
+        self.assertTrue(locator.permission_expired())
+        self.assertTrue(locator.permission_expired(dt2000))
+        self.assertFalse(locator.permission_expired(dt1980))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_pipeline_template.py b/sdk/python/tests/test_pipeline_template.py
new file mode 100644 (file)
index 0000000..88138f3
--- /dev/null
@@ -0,0 +1,62 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+# usage example:
+#
+# ARVADOS_API_TOKEN=abc ARVADOS_API_HOST=arvados.local python -m unittest discover
+
+import unittest
+import arvados
+import apiclient
+from . import run_test_server
+
+class PipelineTemplateTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+
+    def runTest(self):
+        run_test_server.authorize_with("admin")
+        pt_uuid = arvados.api('v1').pipeline_templates().create(
+            body={'name':__file__}
+            ).execute()['uuid']
+        self.assertEqual(len(pt_uuid), 27,
+                         'Unexpected format of pipeline template UUID ("%s")'
+                         % pt_uuid)
+        components = {
+            'x': 'x',
+            '-x-': [1,2,{'foo':'bar'}],
+            'Boggis': {'Bunce': '[\'Bean\']'},
+            'SpassBox': True,
+            'spass_box': False,
+            'spass-box': [True, 'Maybe', False]
+            }
+        update_response = arvados.api('v1').pipeline_templates().update(
+            uuid=pt_uuid,
+            body={'components':components}
+            ).execute()
+        self.assertEqual('uuid' in update_response, True,
+                         'update() response did not include a uuid')
+        self.assertEqual(update_response['uuid'], pt_uuid,
+                         'update() response has a different uuid (%s, not %s)'
+                         % (update_response['uuid'], pt_uuid))
+        self.assertEqual(update_response['name'], __file__,
+                         'update() response has a different name (%s, not %s)'
+                         % (update_response['name'], __file__))
+        get_response = arvados.api('v1').pipeline_templates().get(
+            uuid=pt_uuid
+            ).execute()
+        self.assertEqual(get_response['components'], components,
+                         'components got munged by server (%s -> %s)'
+                         % (components, update_response['components']))
+        delete_response = arvados.api('v1').pipeline_templates().delete(
+            uuid=pt_uuid
+            ).execute()
+        self.assertEqual(delete_response['uuid'], pt_uuid,
+                         'delete() response has wrong uuid (%s, not %s)'
+                         % (delete_response['uuid'], pt_uuid))
+        with self.assertRaises(apiclient.errors.HttpError):
+            geterror_response = arvados.api('v1').pipeline_templates().get(
+                uuid=pt_uuid
+                ).execute()
diff --git a/sdk/python/tests/test_retry.py b/sdk/python/tests/test_retry.py
new file mode 100644 (file)
index 0000000..2d02005
--- /dev/null
@@ -0,0 +1,229 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from builtins import zip
+from builtins import range
+from builtins import object
+import itertools
+import unittest
+
+import arvados.errors as arv_error
+import arvados.retry as arv_retry
+import mock
+
+class RetryLoopTestMixin(object):
+    @staticmethod
+    def loop_success(result):
+        # During the tests, we use integers that look like HTTP status
+        # codes as loop results.  Then we define simplified HTTP
+        # heuristics here to decide whether the result is success (True),
+        # permanent failure (False), or temporary failure (None).
+        if result < 400:
+            return True
+        elif result < 500:
+            return False
+        else:
+            return None
+
+    def run_loop(self, num_retries, *results, **kwargs):
+        responses = itertools.chain(results, itertools.repeat(None))
+        retrier = arv_retry.RetryLoop(num_retries, self.loop_success,
+                                      **kwargs)
+        for tries_left, response in zip(retrier, responses):
+            retrier.save_result(response)
+        return retrier
+
+    def check_result(self, retrier, expect_success, last_code):
+        self.assertIs(retrier.success(), expect_success,
+                      "loop success flag is incorrect")
+        self.assertEqual(last_code, retrier.last_result())
+
+
+class RetryLoopTestCase(unittest.TestCase, RetryLoopTestMixin):
+    def test_zero_retries_and_success(self):
+        retrier = self.run_loop(0, 200)
+        self.check_result(retrier, True, 200)
+
+    def test_zero_retries_and_tempfail(self):
+        retrier = self.run_loop(0, 500, 501)
+        self.check_result(retrier, None, 500)
+
+    def test_zero_retries_and_permfail(self):
+        retrier = self.run_loop(0, 400, 201)
+        self.check_result(retrier, False, 400)
+
+    def test_one_retry_with_immediate_success(self):
+        retrier = self.run_loop(1, 200, 201)
+        self.check_result(retrier, True, 200)
+
+    def test_one_retry_with_delayed_success(self):
+        retrier = self.run_loop(1, 500, 201)
+        self.check_result(retrier, True, 201)
+
+    def test_one_retry_with_no_success(self):
+        retrier = self.run_loop(1, 500, 501, 502)
+        self.check_result(retrier, None, 501)
+
+    def test_one_retry_but_permfail(self):
+        retrier = self.run_loop(1, 400, 201)
+        self.check_result(retrier, False, 400)
+
+    def test_two_retries_with_immediate_success(self):
+        retrier = self.run_loop(2, 200, 201, 202)
+        self.check_result(retrier, True, 200)
+
+    def test_two_retries_with_success_after_one(self):
+        retrier = self.run_loop(2, 500, 201, 502)
+        self.check_result(retrier, True, 201)
+
+    def test_two_retries_with_success_after_two(self):
+        retrier = self.run_loop(2, 500, 501, 202, 503)
+        self.check_result(retrier, True, 202)
+
+    def test_two_retries_with_no_success(self):
+        retrier = self.run_loop(2, 500, 501, 502, 503)
+        self.check_result(retrier, None, 502)
+
+    def test_two_retries_with_permfail(self):
+        retrier = self.run_loop(2, 500, 401, 202)
+        self.check_result(retrier, False, 401)
+
+    def test_save_result_before_start_is_error(self):
+        retrier = arv_retry.RetryLoop(0)
+        self.assertRaises(arv_error.AssertionError, retrier.save_result, 1)
+
+    def test_save_result_after_end_is_error(self):
+        retrier = arv_retry.RetryLoop(0)
+        for count in retrier:
+            pass
+        self.assertRaises(arv_error.AssertionError, retrier.save_result, 1)
+
+
+@mock.patch('time.time', side_effect=itertools.count())
+@mock.patch('time.sleep')
+class RetryLoopBackoffTestCase(unittest.TestCase, RetryLoopTestMixin):
+    def run_loop(self, num_retries, *results, **kwargs):
+        kwargs.setdefault('backoff_start', 8)
+        return super(RetryLoopBackoffTestCase, self).run_loop(
+            num_retries, *results, **kwargs)
+
+    def check_backoff(self, sleep_mock, sleep_count, multiplier=1):
+        # Figure out how much time we actually spent sleeping.
+        sleep_times = [arglist[0][0] for arglist in sleep_mock.call_args_list
+                       if arglist[0][0] > 0]
+        self.assertEqual(sleep_count, len(sleep_times),
+                         "loop did not back off correctly")
+        last_wait = 0
+        for this_wait in sleep_times:
+            self.assertGreater(this_wait, last_wait * multiplier,
+                               "loop did not grow backoff times correctly")
+            last_wait = this_wait
+
+    def test_no_backoff_with_no_retries(self, sleep_mock, time_mock):
+        self.run_loop(0, 500, 201)
+        self.check_backoff(sleep_mock, 0)
+
+    def test_no_backoff_after_success(self, sleep_mock, time_mock):
+        self.run_loop(1, 200, 501)
+        self.check_backoff(sleep_mock, 0)
+
+    def test_no_backoff_after_permfail(self, sleep_mock, time_mock):
+        self.run_loop(1, 400, 201)
+        self.check_backoff(sleep_mock, 0)
+
+    def test_backoff_before_success(self, sleep_mock, time_mock):
+        self.run_loop(5, 500, 501, 502, 203, 504)
+        self.check_backoff(sleep_mock, 3)
+
+    def test_backoff_before_permfail(self, sleep_mock, time_mock):
+        self.run_loop(5, 500, 501, 502, 403, 504)
+        self.check_backoff(sleep_mock, 3)
+
+    def test_backoff_all_tempfail(self, sleep_mock, time_mock):
+        self.run_loop(3, 500, 501, 502, 503, 504)
+        self.check_backoff(sleep_mock, 3)
+
+    def test_backoff_multiplier(self, sleep_mock, time_mock):
+        self.run_loop(5, 500, 501, 502, 503, 504, 505,
+                      backoff_start=5, backoff_growth=10, max_wait=1000000000)
+        self.check_backoff(sleep_mock, 5, 9)
+
+
+class CheckHTTPResponseSuccessTestCase(unittest.TestCase):
+    def results_map(self, *codes):
+        for code in codes:
+            yield code, arv_retry.check_http_response_success(code)
+
+    def check(assert_name):
+        def check_method(self, expected, *codes):
+            assert_func = getattr(self, assert_name)
+            for code, actual in self.results_map(*codes):
+                assert_func(expected, actual,
+                            "{} status flagged {}".format(code, actual))
+                if assert_name != 'assertIs':
+                    self.assertTrue(
+                        actual is True or actual is False or actual is None,
+                        "{} status returned {}".format(code, actual))
+        return check_method
+
+    check_is = check('assertIs')
+    check_is_not = check('assertIsNot')
+
+    def test_obvious_successes(self):
+        self.check_is(True, *list(range(200, 207)))
+
+    def test_obvious_stops(self):
+        self.check_is(False, 424, 426, 428, 431,
+                      *list(range(400, 408)) + list(range(410, 420)))
+
+    def test_obvious_retries(self):
+        self.check_is(None, 500, 502, 503, 504)
+
+    def test_4xx_retries(self):
+        self.check_is(None, 408, 409, 422, 423)
+
+    def test_5xx_failures(self):
+        self.check_is(False, 501, *list(range(505, 512)))
+
+    def test_1xx_not_retried(self):
+        self.check_is_not(None, 100, 101)
+
+    def test_redirects_not_retried(self):
+        self.check_is_not(None, *list(range(300, 309)))
+
+    def test_wacky_code_retries(self):
+        self.check_is(None, 0, 99, 600, -200)
+
+
+class RetryMethodTestCase(unittest.TestCase):
+    class Tester(object):
+        def __init__(self):
+            self.num_retries = 1
+
+        @arv_retry.retry_method
+        def check(self, a, num_retries=None, z=0):
+            return (a, num_retries, z)
+
+
+    def test_positional_arg_raises(self):
+        # unsupported use -- make sure we raise rather than ignore
+        with self.assertRaises(TypeError):
+            self.assertEqual((3, 2, 0), self.Tester().check(3, 2))
+
+    def test_keyword_arg_passed(self):
+        self.assertEqual((4, 3, 0), self.Tester().check(num_retries=3, a=4))
+
+    def test_not_specified(self):
+        self.assertEqual((0, 1, 0), self.Tester().check(0))
+
+    def test_not_specified_with_other_kwargs(self):
+        self.assertEqual((1, 1, 1), self.Tester().check(1, z=1))
+
+    def test_bad_call(self):
+        with self.assertRaises(TypeError):
+            self.Tester().check(num_retries=2)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_retry_job_helpers.py b/sdk/python/tests/test_retry_job_helpers.py
new file mode 100644 (file)
index 0000000..b9c87a6
--- /dev/null
@@ -0,0 +1,109 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from builtins import object
+import mock
+import os
+import unittest
+import hashlib
+from . import run_test_server
+import json
+import arvados
+from . import arvados_testutil as tutil
+from apiclient import http as apiclient_http
+
+
+@tutil.skip_sleep
+class ApiClientRetryTestMixin(object):
+
+    TEST_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+    TEST_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
+
+    @classmethod
+    def setUpClass(cls):
+        run_test_server.run()
+
+    def setUp(self):
+        # Patch arvados.api() to return our mock API, so we can mock
+        # its http requests.
+        self.api_client = arvados.api('v1', cache=False)
+        self.api_patch = mock.patch('arvados.api', return_value=self.api_client)
+        self.api_patch.start()
+
+    def tearDown(self):
+        self.api_patch.stop()
+
+    def run_method(self):
+        raise NotImplementedError("test subclasses must define run_method")
+
+    def test_immediate_success(self):
+        with tutil.mock_api_responses(self.api_client, '{}', [200]):
+            self.run_method()
+
+    def test_immediate_failure(self):
+        with tutil.mock_api_responses(self.api_client, '{}', [400]), self.assertRaises(self.DEFAULT_EXCEPTION):
+            self.run_method()
+
+    def test_retry_then_success(self):
+        with tutil.mock_api_responses(self.api_client, '{}', [500, 200]):
+            self.run_method()
+
+    def test_error_after_default_retries_exhausted(self):
+        with tutil.mock_api_responses(self.api_client, '{}', [500, 500, 500, 500, 500, 500, 200]), self.assertRaises(self.DEFAULT_EXCEPTION):
+            self.run_method()
+
+    def test_no_retry_after_immediate_success(self):
+        with tutil.mock_api_responses(self.api_client, '{}', [200, 400]):
+            self.run_method()
+
+
+class CurrentJobTestCase(ApiClientRetryTestMixin, unittest.TestCase):
+
+    DEFAULT_EXCEPTION = arvados.errors.ApiError
+
+    def setUp(self):
+        super(CurrentJobTestCase, self).setUp()
+        os.environ['JOB_UUID'] = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+        os.environ['JOB_WORK'] = '.'
+
+    def tearDown(self):
+        del os.environ['JOB_UUID']
+        del os.environ['JOB_WORK']
+        arvados._current_job = None
+        super(CurrentJobTestCase, self).tearDown()
+
+    def run_method(self):
+        arvados.current_job()
+
+
+class CurrentTaskTestCase(ApiClientRetryTestMixin, unittest.TestCase):
+
+    DEFAULT_EXCEPTION = arvados.errors.ApiError
+
+    def setUp(self):
+        super(CurrentTaskTestCase, self).setUp()
+        os.environ['TASK_UUID'] = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+        os.environ['TASK_WORK'] = '.'
+
+    def tearDown(self):
+        del os.environ['TASK_UUID']
+        del os.environ['TASK_WORK']
+        arvados._current_task = None
+        super(CurrentTaskTestCase, self).tearDown()
+
+    def run_method(self):
+        arvados.current_task()
+
+
+class TaskSetOutputTestCase(CurrentTaskTestCase, unittest.TestCase):
+
+    DEFAULT_EXCEPTION = arvados.errors.ApiError
+
+    def tearDown(self):
+        super(TaskSetOutputTestCase, self).tearDown()
+        run_test_server.reset()
+
+    def run_method(self, locator=ApiClientRetryTestMixin.TEST_LOCATOR):
+        arvados.task_set_output({'uuid':self.TEST_UUID},s=locator)
diff --git a/sdk/python/tests/test_sdk.py b/sdk/python/tests/test_sdk.py
new file mode 100644 (file)
index 0000000..41add57
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import mock
+import os
+import unittest
+
+import arvados
+import arvados.collection
+
+class TestSDK(unittest.TestCase):
+
+    @mock.patch('arvados.current_task')
+    @mock.patch('arvados.current_job')
+    def test_one_task_per_input_file_normalize(self, mock_job, mock_task):
+        mock_api = mock.MagicMock()
+
+        # This manifest will be reduced from three lines to one when it is
+        # normalized.
+        nonnormalized_manifest = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        dummy_hash = 'ffffffffffffffffffffffffffffffff+0'
+
+        mock_job.return_value = {
+            'uuid': 'none',
+            'script_parameters': {
+                'input': dummy_hash
+            }
+        }
+        mock_task.return_value = {
+            'uuid': 'none',
+            'sequence': 0,
+        }
+        # mock the API client to return a collection with a nonnormalized manifest.
+        mock_api.collections().get().execute.return_value = {
+            'uuid': 'zzzzz-4zz18-mockcollection0',
+            'portable_data_hash': dummy_hash,
+            'manifest_text': nonnormalized_manifest,
+        }
+
+        # Because one_task_per_input_file normalizes this collection,
+        # it should now create only one job task and not three.
+        arvados.job_setup.one_task_per_input_file(and_end_task=False, api_client=mock_api)
+        mock_api.job_tasks().create().execute.assert_called_once_with()
diff --git a/sdk/python/tests/test_stream.py b/sdk/python/tests/test_stream.py
new file mode 100644 (file)
index 0000000..dc84a03
--- /dev/null
@@ -0,0 +1,312 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from builtins import object
+import bz2
+import gzip
+import io
+import mock
+import os
+import unittest
+import hashlib
+
+import arvados
+from arvados import StreamReader, StreamFileReader
+from arvados._ranges import Range
+
+from . import arvados_testutil as tutil
+from . import run_test_server
+
+class StreamFileReaderTestCase(unittest.TestCase):
+    def make_count_reader(self):
+        stream = tutil.MockStreamReader('.', '01234', '34567', '67890')
+        return StreamFileReader(stream, [Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)],
+                                'count.txt')
+
+    def test_read_block_crossing_behavior(self):
+        # read() calls will be aligned on block boundaries - see #3663.
+        sfile = self.make_count_reader()
+        self.assertEqual(b'123', sfile.read(10))
+
+    def test_small_read(self):
+        sfile = self.make_count_reader()
+        self.assertEqual(b'12', sfile.read(2))
+
+    def test_successive_reads(self):
+        sfile = self.make_count_reader()
+        for expect in [b'123', b'456', b'789', b'']:
+            self.assertEqual(expect, sfile.read(10))
+
+    def test_readfrom_spans_blocks(self):
+        sfile = self.make_count_reader()
+        self.assertEqual(b'6789', sfile.readfrom(5, 12))
+
+    def test_small_readfrom_spanning_blocks(self):
+        sfile = self.make_count_reader()
+        self.assertEqual(b'2345', sfile.readfrom(1, 4))
+
+    def test_readall(self):
+        sfile = self.make_count_reader()
+        self.assertEqual(b'123456789', b''.join(sfile.readall()))
+
+    def test_one_arg_seek(self):
+        self.test_absolute_seek([])
+
+    def test_absolute_seek(self, args=[os.SEEK_SET]):
+        sfile = self.make_count_reader()
+        sfile.seek(6, *args)
+        self.assertEqual(b'78', sfile.read(2))
+        sfile.seek(4, *args)
+        self.assertEqual(b'56', sfile.read(2))
+
+    def test_relative_seek(self, args=[os.SEEK_CUR]):
+        sfile = self.make_count_reader()
+        self.assertEqual(b'12', sfile.read(2))
+        sfile.seek(2, *args)
+        self.assertEqual(b'56', sfile.read(2))
+
+    def test_end_seek(self):
+        sfile = self.make_count_reader()
+        sfile.seek(-6, os.SEEK_END)
+        self.assertEqual(b'45', sfile.read(2))
+
+    def test_seek_min_zero(self):
+        sfile = self.make_count_reader()
+        self.assertEqual(0, sfile.tell())
+        with self.assertRaises(IOError):
+            sfile.seek(-2, os.SEEK_SET)
+        self.assertEqual(0, sfile.tell())
+
+    def test_seek_max_size(self):
+        sfile = self.make_count_reader()
+        sfile.seek(2, os.SEEK_END)
+        # POSIX permits seeking past end of file.
+        self.assertEqual(11, sfile.tell())
+
+    def test_size(self):
+        self.assertEqual(9, self.make_count_reader().size())
+
+    def test_tell_after_block_read(self):
+        sfile = self.make_count_reader()
+        sfile.read(5)
+        self.assertEqual(3, sfile.tell())
+
+    def test_tell_after_small_read(self):
+        sfile = self.make_count_reader()
+        sfile.read(1)
+        self.assertEqual(1, sfile.tell())
+
+    def test_no_read_after_close(self):
+        sfile = self.make_count_reader()
+        sfile.close()
+        self.assertRaises(ValueError, sfile.read, 2)
+
+    def test_context(self):
+        with self.make_count_reader() as sfile:
+            self.assertFalse(sfile.closed, "reader is closed inside context")
+            self.assertEqual(b'12', sfile.read(2))
+        self.assertTrue(sfile.closed, "reader is open after context")
+
+    def make_newlines_reader(self):
+        stream = tutil.MockStreamReader('.', 'one\ntwo\n\nth', 'ree\nfour\n\n')
+        return StreamFileReader(stream, [Range(0, 0, 11), Range(11, 11, 10)], 'count.txt')
+
+    def check_lines(self, actual):
+        self.assertEqual(['one\n', 'two\n', '\n', 'three\n', 'four\n', '\n'],
+                         actual)
+
+    def test_readline(self):
+        reader = self.make_newlines_reader()
+        actual = []
+        while True:
+            data = reader.readline()
+            if not data:
+                break
+            actual.append(data)
+        self.check_lines(actual)
+
+    def test_readlines(self):
+        self.check_lines(self.make_newlines_reader().readlines())
+
+    def test_iteration(self):
+        self.check_lines(list(iter(self.make_newlines_reader())))
+
+    def test_readline_size(self):
+        reader = self.make_newlines_reader()
+        self.assertEqual('on', reader.readline(2))
+        self.assertEqual('e\n', reader.readline(4))
+        self.assertEqual('two\n', reader.readline(6))
+        self.assertEqual('\n', reader.readline(8))
+        self.assertEqual('thre', reader.readline(4))
+
+    def test_readlines_sizehint(self):
+        result = self.make_newlines_reader().readlines(8)
+        self.assertEqual(['one\n', 'two\n'], result[:2])
+        self.assertNotIn('three\n', result)
+
+    def test_name_attribute(self):
+        # Test both .name and .name() (for backward compatibility)
+        stream = tutil.MockStreamReader()
+        sfile = StreamFileReader(stream, [Range(0, 0, 0)], 'nametest')
+        self.assertEqual('nametest', sfile.name)
+        self.assertEqual('nametest', sfile.name())
+
+    def check_decompressed_name(self, filename, expect):
+        stream = tutil.MockStreamReader('.', '')
+        reader = StreamFileReader(stream, [Range(0, 0, 0)], filename)
+        self.assertEqual(expect, reader.decompressed_name())
+
+    def test_decompressed_name_uncompressed_file(self):
+        self.check_decompressed_name('test.log', 'test.log')
+
+    def test_decompressed_name_gzip_file(self):
+        self.check_decompressed_name('test.log.gz', 'test.log')
+
+    def test_decompressed_name_bz2_file(self):
+        self.check_decompressed_name('test.log.bz2', 'test.log')
+
+    def check_decompression(self, compress_ext, compress_func):
+        test_text = b'decompression\ntest\n'
+        test_data = compress_func(test_text)
+        stream = tutil.MockStreamReader('.', test_data)
+        reader = StreamFileReader(stream, [Range(0, 0, len(test_data))],
+                                  'test.' + compress_ext)
+        self.assertEqual(test_text, b''.join(reader.readall_decompressed()))
+
+    @staticmethod
+    def gzip_compress(data):
+        compressed_data = io.BytesIO()
+        with gzip.GzipFile(fileobj=compressed_data, mode='wb') as gzip_file:
+            gzip_file.write(data)
+        return compressed_data.getvalue()
+
+    def test_no_decompression(self):
+        self.check_decompression('log', lambda s: s)
+
+    def test_gzip_decompression(self):
+        self.check_decompression('gz', self.gzip_compress)
+
+    def test_bz2_decompression(self):
+        self.check_decompression('bz2', bz2.compress)
+
+    def test_readline_then_readlines(self):
+        reader = self.make_newlines_reader()
+        data = reader.readline()
+        self.assertEqual('one\n', data)
+        data = reader.readlines()
+        self.assertEqual(['two\n', '\n', 'three\n', 'four\n', '\n'], data)
+
+    def test_readline_then_readall(self):
+        reader = self.make_newlines_reader()
+        data = reader.readline()
+        self.assertEqual('one\n', data)
+        self.assertEqual(b''.join([b'two\n', b'\n', b'three\n', b'four\n', b'\n']), b''.join(reader.readall()))
+
+
+class StreamRetryTestMixin(object):
+    # Define reader_for(coll_name, **kwargs)
+    # and read_for_test(reader, size, **kwargs).
+    API_COLLECTIONS = run_test_server.fixture('collections')
+
+    def keep_client(self):
+        return arvados.KeepClient(proxy='http://[%s]:1' % (tutil.TEST_HOST,),
+                                  local_store='')
+
+    def manifest_for(self, coll_name):
+        return self.API_COLLECTIONS[coll_name]['manifest_text']
+
+    @tutil.skip_sleep
+    def test_success_without_retries(self):
+        with tutil.mock_keep_responses('bar', 200):
+            reader = self.reader_for('bar_file')
+            self.assertEqual(b'bar', self.read_for_test(reader, 3))
+
+    @tutil.skip_sleep
+    def test_read_no_default_retry(self):
+        with tutil.mock_keep_responses('', 500):
+            reader = self.reader_for('user_agreement')
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 10)
+
+    @tutil.skip_sleep
+    def test_read_with_instance_retries(self):
+        with tutil.mock_keep_responses('foo', 500, 200):
+            reader = self.reader_for('foo_file', num_retries=3)
+            self.assertEqual(b'foo', self.read_for_test(reader, 3))
+
+    @tutil.skip_sleep
+    def test_read_with_method_retries(self):
+        with tutil.mock_keep_responses('foo', 500, 200):
+            reader = self.reader_for('foo_file')
+            self.assertEqual(b'foo',
+                             self.read_for_test(reader, 3, num_retries=3))
+
+    @tutil.skip_sleep
+    def test_read_instance_retries_exhausted(self):
+        with tutil.mock_keep_responses('bar', 500, 500, 500, 500, 200):
+            reader = self.reader_for('bar_file', num_retries=3)
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 3)
+
+    @tutil.skip_sleep
+    def test_read_method_retries_exhausted(self):
+        with tutil.mock_keep_responses('bar', 500, 500, 500, 500, 200):
+            reader = self.reader_for('bar_file')
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 3, num_retries=3)
+
+    @tutil.skip_sleep
+    def test_method_retries_take_precedence(self):
+        with tutil.mock_keep_responses('', 500, 500, 500, 200):
+            reader = self.reader_for('user_agreement', num_retries=10)
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 10, num_retries=1)
+
+
+class StreamReaderTestCase(unittest.TestCase, StreamRetryTestMixin):
+    def reader_for(self, coll_name, **kwargs):
+        return StreamReader(self.manifest_for(coll_name).split(),
+                            self.keep_client(), **kwargs)
+
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.readfrom(0, byte_count, **kwargs)
+
+    def test_manifest_text_without_keep_client(self):
+        mtext = self.manifest_for('multilevel_collection_1')
+        for line in mtext.rstrip('\n').split('\n'):
+            reader = StreamReader(line.split())
+            self.assertEqual(line + '\n', reader.manifest_text())
+
+
+class StreamFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):
+    def reader_for(self, coll_name, **kwargs):
+        return StreamReader(self.manifest_for(coll_name).split(),
+                            self.keep_client(), **kwargs).all_files()[0]
+
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.read(byte_count, **kwargs)
+
+
+class StreamFileReadFromTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.readfrom(0, byte_count, **kwargs)
+
+
+class StreamFileReadAllTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return b''.join(reader.readall(**kwargs))
+
+
+class StreamFileReadAllDecompressedTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return b''.join(reader.readall_decompressed(**kwargs))
+
+
+class StreamFileReadlinesTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readlines(**kwargs)).encode()
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_util.py b/sdk/python/tests/test_util.py
new file mode 100644 (file)
index 0000000..87074db
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import os
+import subprocess
+import unittest
+
+import arvados
+
+class MkdirDashPTest(unittest.TestCase):
+    def setUp(self):
+        try:
+            os.path.mkdir('./tmp')
+        except:
+            pass
+    def tearDown(self):
+        try:
+            os.unlink('./tmp/bar')
+            os.rmdir('./tmp/foo')
+            os.rmdir('./tmp')
+        except:
+            pass
+    def runTest(self):
+        arvados.util.mkdir_dash_p('./tmp/foo')
+        with open('./tmp/bar', 'wb') as f:
+            f.write(b'bar')
+        self.assertRaises(OSError, arvados.util.mkdir_dash_p, './tmp/bar')
+
+
+class RunCommandTestCase(unittest.TestCase):
+    def test_success(self):
+        stdout, stderr = arvados.util.run_command(['echo', 'test'],
+                                                  stderr=subprocess.PIPE)
+        self.assertEqual("test\n".encode(), stdout)
+        self.assertEqual("".encode(), stderr)
+
+    def test_failure(self):
+        with self.assertRaises(arvados.errors.CommandFailedError):
+            arvados.util.run_command(['false'])
diff --git a/sdk/ruby/.gitignore b/sdk/ruby/.gitignore
new file mode 100644 (file)
index 0000000..1a58eb0
--- /dev/null
@@ -0,0 +1,2 @@
+Gemfile.lock
+arvados*gem
diff --git a/sdk/ruby/Gemfile b/sdk/ruby/Gemfile
new file mode 100644 (file)
index 0000000..4ea8d11
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+source 'https://rubygems.org'
+gemspec
+gem 'rake'
+gem 'minitest', '>= 5.0.0'
diff --git a/sdk/ruby/LICENSE-2.0.txt b/sdk/ruby/LICENSE-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/sdk/ruby/README b/sdk/ruby/README
new file mode 100644 (file)
index 0000000..f72a3d1
--- /dev/null
@@ -0,0 +1,28 @@
+
+This directory contains contains the Ruby SDK.
+
+## Installation instructions
+
+You can build the gem with the following command:
+
+  gem build arvados.gemspec
+
+and install it like this:
+
+  gem install ./arvados-0.1.0.gem
+
+## Code example
+
+#!/usr/bin/env ruby
+
+ENV['ARVADOS_API_HOST'] = 'arvados.local'
+ENV['ARVADOS_API_TOKEN'] = 'qwertyuiopasdfghjklzxcvbnm1234567890abcdefghijklmn'
+
+require 'arvados'
+arv = Arvados.new( { :suppress_ssl_warnings => false } )
+
+pt_list = arv.pipeline_template.list(where:{})
+puts pt_list[:items].first.inspect
+
+pt = arv.pipeline_template.get(uuid:"9zb4a-p5p6p-fkkbrl98u3pk87m")
+puts pt.inspect
diff --git a/sdk/ruby/Rakefile b/sdk/ruby/Rakefile
new file mode 100644 (file)
index 0000000..d9aa7ed
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'rake/testtask'
+
+Rake::TestTask.new do |t|
+  t.libs << 'test'
+end
+
+desc 'Run tests'
+task default: :test
diff --git a/sdk/ruby/arvados.gemspec b/sdk/ruby/arvados.gemspec
new file mode 100644 (file)
index 0000000..da91930
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+if not File.exist?('/usr/bin/git') then
+  STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
+  exit
+end
+
+git_latest_tag = `git tag -l |sort -V -r |head -n1`
+git_latest_tag = git_latest_tag.encode('utf-8').strip
+git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
+git_timestamp = Time.at(git_timestamp.to_i).utc
+
+Gem::Specification.new do |s|
+  s.name        = 'arvados'
+  s.version     = "#{git_latest_tag}.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.date        = git_timestamp.strftime("%Y-%m-%d")
+  s.summary     = "Arvados client library"
+  s.description = "Arvados client library, git commit #{git_hash}"
+  s.authors     = ["Arvados Authors"]
+  s.email       = 'gem-dev@curoverse.com'
+  s.licenses    = ['Apache-2.0']
+  s.files       = ["lib/arvados.rb", "lib/arvados/google_api_client.rb",
+                   "lib/arvados/collection.rb", "lib/arvados/keep.rb",
+                   "README", "LICENSE-2.0.txt"]
+  s.required_ruby_version = '>= 1.8.7'
+  s.add_dependency('activesupport', '>= 3')
+  s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
+  # Our google-api-client dependency used to be < 0.9, but that could be
+  # satisfied by the buggy 0.9.pre*.  https://dev.arvados.org/issues/9213
+  s.add_dependency('cure-google-api-client', '>= 0.7', '< 0.8.9')
+  # work around undeclared dependency on i18n in some activesupport 3.x.x:
+  s.add_dependency('i18n', '~> 0')
+  s.add_dependency('json', '>= 1.7.7', '<3')
+  s.add_runtime_dependency('jwt', '<2', '>= 0.1.5')
+  s.homepage    =
+    'https://arvados.org'
+end
diff --git a/sdk/ruby/lib/arvados.rb b/sdk/ruby/lib/arvados.rb
new file mode 100644 (file)
index 0000000..a89c21b
--- /dev/null
@@ -0,0 +1,270 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'rubygems'
+require 'active_support/inflector'
+require 'json'
+require 'fileutils'
+require 'andand'
+
+require 'arvados/google_api_client'
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.irregular 'specimen', 'specimens'
+  inflect.irregular 'human', 'humans'
+end
+
+class Arvados
+  class TransactionFailedError < StandardError
+  end
+
+  @@config = nil
+  @@debuglevel = 0
+  class << self
+    attr_accessor :debuglevel
+  end
+
+  def initialize(opts={})
+    @application_version ||= 0.0
+    @application_name ||= File.split($0).last
+
+    @arvados_api_version = opts[:api_version] || 'v1'
+
+    @arvados_api_host = opts[:api_host] ||
+      config['ARVADOS_API_HOST'] or
+      raise "#{$0}: no :api_host or ENV[ARVADOS_API_HOST] provided."
+    @arvados_api_token = opts[:api_token] ||
+      config['ARVADOS_API_TOKEN'] or
+      raise "#{$0}: no :api_token or ENV[ARVADOS_API_TOKEN] provided."
+
+    if (opts[:suppress_ssl_warnings] or
+        %w(1 true yes).index(config['ARVADOS_API_HOST_INSECURE'].
+                             andand.downcase))
+      suppress_warnings do
+        OpenSSL::SSL.const_set 'VERIFY_PEER', OpenSSL::SSL::VERIFY_NONE
+      end
+    end
+
+    # Define a class and an Arvados instance method for each Arvados
+    # resource. After this, self.job will return Arvados::Job;
+    # self.job.new() and self.job.find() will do what you want.
+    _arvados = self
+    namespace_class = Arvados.const_set "A#{self.object_id}", Class.new
+    self.arvados_api.schemas.each do |classname, schema|
+      next if classname.match /List$/
+      klass = Class.new(Arvados::Model) do
+        def self.arvados
+          @arvados
+        end
+        def self.api_models_sym
+          @api_models_sym
+        end
+        def self.api_model_sym
+          @api_model_sym
+        end
+      end
+
+      # Define the resource methods (create, get, update, delete, ...)
+      self.
+        arvados_api.
+        send(classname.underscore.split('/').last.pluralize.to_sym).
+        discovered_methods.
+        each do |method|
+        class << klass; self; end.class_eval do
+          define_method method.name do |*params|
+            self.api_exec method, *params
+          end
+        end
+      end
+
+      # Give the new class access to the API
+      klass.instance_eval do
+        @arvados = _arvados
+        # TODO: Pull these from the discovery document instead.
+        @api_models_sym = classname.underscore.split('/').last.pluralize.to_sym
+        @api_model_sym = classname.underscore.split('/').last.to_sym
+      end
+
+      # Create the new class in namespace_class so it doesn't
+      # interfere with classes created by other Arvados objects. The
+      # result looks like Arvados::A26949680::Job.
+      namespace_class.const_set classname, klass
+
+      self.class.class_eval do
+        define_method classname.underscore do
+          klass
+        end
+      end
+    end
+  end
+
+  def client
+    @client ||= Google::APIClient.
+      new(:host => @arvados_api_host,
+          :application_name => @application_name,
+          :application_version => @application_version.to_s)
+  end
+
+  def arvados_api
+    @arvados_api ||= self.client.discovered_api('arvados', @arvados_api_version)
+  end
+
+  def self.debuglog(message, verbosity=1)
+    $stderr.puts "#{File.split($0).last} #{$$}: #{message}" if @@debuglevel >= verbosity
+  end
+
+  def debuglog *args
+    self.class.debuglog *args
+  end
+
+  def config(config_file_path="~/.config/arvados/settings.conf")
+    return @@config if @@config
+
+    # Initialize config settings with environment variables.
+    config = {}
+    config['ARVADOS_API_HOST']          = ENV['ARVADOS_API_HOST']
+    config['ARVADOS_API_TOKEN']         = ENV['ARVADOS_API_TOKEN']
+    config['ARVADOS_API_HOST_INSECURE'] = ENV['ARVADOS_API_HOST_INSECURE']
+
+    if config['ARVADOS_API_HOST'] and config['ARVADOS_API_TOKEN']
+      # Environment variables take precedence over the config file, so
+      # there is no point reading the config file. If the environment
+      # specifies a _HOST without asking for _INSECURE, we certainly
+      # shouldn't give the config file a chance to create a
+      # system-wide _INSECURE state for this user.
+      #
+      # Note: If we start using additional configuration settings from
+      # this file in the future, we might have to read the file anyway
+      # instead of returning here.
+      return (@@config = config)
+    end
+
+    begin
+      expanded_path = File.expand_path config_file_path
+      if File.exist? expanded_path
+        # Load settings from the config file.
+        lineno = 0
+        File.open(expanded_path).each do |line|
+          lineno = lineno + 1
+          # skip comments and blank lines
+          next if line.match('^\s*#') or not line.match('\S')
+          var, val = line.chomp.split('=', 2)
+          var.strip!
+          val.strip!
+          # allow environment settings to override config files.
+          if !var.empty? and val
+            config[var] ||= val
+          else
+            debuglog "#{expanded_path}: #{lineno}: could not parse `#{line}'", 0
+          end
+        end
+      end
+    rescue StandardError => e
+      debuglog "Ignoring error reading #{config_file_path}: #{e}", 0
+    end
+
+    @@config = config
+  end
+
+  class Model
+    def self.arvados_api
+      arvados.arvados_api
+    end
+    def self.client
+      arvados.client
+    end
+    def self.debuglog(*args)
+      arvados.class.debuglog *args
+    end
+    def debuglog(*args)
+      self.class.arvados.class.debuglog *args
+    end
+    def self.api_exec(method, parameters={})
+      api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)
+      parameters.each do |k,v|
+        parameters[k] = v.to_json if v.is_a? Array or v.is_a? Hash
+      end
+      # Look for objects expected by request.properties.(key).$ref and
+      # move them from parameters (query string) to request body.
+      body = nil
+      method.discovery_document['request'].
+        andand['properties'].
+        andand.each do |k,v|
+        if v.is_a? Hash and v['$ref']
+          body ||= {}
+          body[k] = parameters.delete k.to_sym
+        end
+      end
+      result = client.
+        execute(:api_method => api_method,
+                :authenticated => false,
+                :parameters => parameters,
+                :body_object => body,
+                :headers => {
+                  :authorization => 'OAuth2 '+arvados.config['ARVADOS_API_TOKEN']
+                })
+      resp = JSON.parse result.body, :symbolize_names => true
+      if resp[:errors]
+        raise Arvados::TransactionFailedError.new(resp[:errors])
+      elsif resp[:uuid] and resp[:etag]
+        self.new(resp)
+      elsif resp[:items].is_a? Array
+        resp.merge(:items => resp[:items].collect do |i|
+                     self.new(i)
+                   end)
+      else
+        resp
+      end
+    end
+
+    def []=(x,y)
+      @attributes_to_update[x] = y
+      @attributes[x] = y
+    end
+    def [](x)
+      if @attributes[x].is_a? Hash or @attributes[x].is_a? Array
+        # We won't be notified via []= if these change, so we'll just
+        # assume they are going to get changed, and submit them if
+        # save() is called.
+        @attributes_to_update[x] = @attributes[x]
+      end
+      @attributes[x]
+    end
+    def save
+      @attributes_to_update.keys.each do |k|
+        @attributes_to_update[k] = @attributes[k]
+      end
+      j = self.class.api_exec :update, {
+        :uuid => @attributes[:uuid],
+        self.class.api_model_sym => @attributes_to_update.to_json
+      }
+      unless j.respond_to? :[] and j[:uuid]
+        debuglog "Failed to save #{self.to_s}: #{j[:errors] rescue nil}", 0
+        nil
+      else
+        @attributes_to_update = {}
+        @attributes = j
+      end
+    end
+
+    protected
+
+    def initialize(j)
+      @attributes_to_update = {}
+      @attributes = j
+    end
+  end
+
+  protected
+
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    begin
+      $VERBOSE = nil
+      yield
+    ensure
+      $VERBOSE = original_verbosity
+    end
+  end
+end
diff --git a/sdk/ruby/lib/arvados/collection.rb b/sdk/ruby/lib/arvados/collection.rb
new file mode 100644 (file)
index 0000000..f236ce8
--- /dev/null
@@ -0,0 +1,558 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require "arvados/keep"
+
+module Arv
+  class Collection
+    def initialize(manifest_text="")
+      @manifest_text = manifest_text
+      @modified = false
+      @root = CollectionRoot.new
+      manifest = Keep::Manifest.new(manifest_text)
+      manifest.each_line do |stream_root, locators, file_specs|
+        if stream_root.empty? or locators.empty? or file_specs.empty?
+          raise ArgumentError.new("manifest text includes malformed line")
+        end
+        loc_list = LocatorList.new(locators)
+        file_specs.map { |s| manifest.split_file_token(s) }.
+            each do |file_start, file_len, file_path|
+          begin
+            @root.file_at(normalize_path(stream_root, file_path)).
+              add_segment(loc_list.segment(file_start, file_len))
+          rescue Errno::ENOTDIR, Errno::EISDIR => error
+            raise ArgumentError.new("%p is both a stream and file" %
+                                    error.to_s.partition(" - ").last)
+          end
+        end
+      end
+    end
+
+    def manifest_text
+      @manifest_text ||= @root.manifest_text
+    end
+
+    def modified?
+      @modified
+    end
+
+    def unmodified
+      @modified = false
+      self
+    end
+
+    def normalize
+      @manifest_text = @root.manifest_text
+      self
+    end
+
+    def cp_r(source, target, source_collection=nil)
+      opts = {:descend_target => !source.end_with?("/")}
+      copy(:merge, source.chomp("/"), target, source_collection, opts)
+    end
+
+    def each_file_path(&block)
+      @root.each_file_path(&block)
+    end
+
+    def exist?(path)
+      begin
+        substream, item = find(path)
+        not (substream.leaf? or substream[item].nil?)
+      rescue Errno::ENOENT, Errno::ENOTDIR
+        false
+      end
+    end
+
+    def rename(source, target)
+      copy(:add_copy, source, target) { rm_r(source) }
+    end
+
+    def rm(source)
+      remove(source)
+    end
+
+    def rm_r(source)
+      remove(source, :recursive => true)
+    end
+
+    protected
+
+    def find(*parts)
+      @root.find(normalize_path(*parts))
+    end
+
+    private
+
+    def modified
+      @manifest_text = nil
+      @modified = true
+      self
+    end
+
+    def normalize_path(*parts)
+      path = File.join(*parts)
+      if path.empty?
+        raise ArgumentError.new("empty path")
+      elsif (path == ".") or path.start_with?("./")
+        path
+      else
+        "./#{path}"
+      end
+    end
+
+    def copy(copy_method, source, target, source_collection=nil, opts={})
+      # Find the item at path `source` in `source_collection`, find the
+      # destination stream at path `target`, and use `copy_method` to copy
+      # the found object there.  If a block is passed in, it will be called
+      # right before we do the actual copy, after we confirm that everything
+      # is found and can be copied.
+      source_collection = self if source_collection.nil?
+      src_stream, src_tail = source_collection.find(source)
+      dst_stream_path, _, dst_tail = normalize_path(target).rpartition("/")
+      if dst_stream_path.empty?
+        dst_stream, dst_tail = @root.find(dst_tail)
+        dst_tail ||= src_tail
+      else
+        dst_stream = @root.stream_at(dst_stream_path)
+        dst_tail = src_tail if dst_tail.empty?
+      end
+      if (source_collection.equal?(self) and
+          (src_stream.path == dst_stream.path) and (src_tail == dst_tail))
+        return self
+      end
+      src_item = src_stream[src_tail]
+      check_method = "check_can_#{copy_method}".to_sym
+      target_name = nil
+      if opts.fetch(:descend_target, true)
+        begin
+          # Find out if `target` refers to a stream we should copy into.
+          tail_stream = dst_stream[dst_tail]
+          tail_stream.send(check_method, src_item, src_tail)
+          # Yes it does.  Copy the item at `source` into it with the same name.
+          dst_stream = tail_stream
+          target_name = src_tail
+        rescue Errno::ENOENT, Errno::ENOTDIR
+          # It does not.  We'll fall back to writing to `target` below.
+        end
+      end
+      if target_name.nil?
+        dst_stream.send(check_method, src_item, dst_tail)
+        target_name = dst_tail
+      end
+      # At this point, we know the operation will work.  Call any block as
+      # a pre-copy hook.
+      if block_given?
+        yield
+        # Re-find the destination stream, in case the block removed
+        # the original (that's how rename is implemented).
+        dst_stream = @root.stream_at(dst_stream.path)
+      end
+      dst_stream.send(copy_method, src_item, target_name)
+      modified
+    end
+
+    def remove(path, opts={})
+      stream, name = find(path)
+      stream.delete(name, opts)
+      modified
+    end
+
+    Struct.new("LocatorSegment", :locators, :start_pos, :length)
+
+    class LocatorRange < Range
+      attr_reader :locator
+
+      def initialize(loc_s, start)
+        @locator = loc_s
+        range_end = start + Keep::Locator.parse(loc_s).size.to_i
+        super(start, range_end, false)
+      end
+    end
+
+    class LocatorList
+      # LocatorList efficiently builds LocatorSegments from a stream manifest.
+      def initialize(locators)
+        next_start = 0
+        @ranges = locators.map do |loc_s|
+          new_range = LocatorRange.new(loc_s, next_start)
+          next_start = new_range.end
+          new_range
+        end
+      end
+
+      def segment(start_pos, length)
+        # Return a LocatorSegment that captures `length` bytes from `start_pos`.
+        start_index = search_for_byte(start_pos)
+        if length == 0
+          end_index = start_index
+        else
+          end_index = search_for_byte(start_pos + length - 1, start_index)
+        end
+        seg_ranges = @ranges[start_index..end_index]
+        Struct::LocatorSegment.new(seg_ranges.map(&:locator),
+                                   start_pos - seg_ranges.first.begin,
+                                   length)
+      end
+
+      private
+
+      def search_for_byte(target, start_index=0)
+        # Do a binary search for byte `target` in the list of locators,
+        # starting from `start_index`.  Return the index of the range in
+        # @ranges that contains the byte.
+        lo = start_index
+        hi = @ranges.size
+        loop do
+          ii = (lo + hi) / 2
+          range = @ranges[ii]
+          if range.include?(target)
+            return ii
+          elsif ii == lo
+            raise RangeError.new("%i not in segment" % target)
+          elsif target < range.begin
+            hi = ii
+          else
+            lo = ii
+          end
+        end
+      end
+    end
+
+    class CollectionItem
+      attr_reader :path, :name
+
+      def initialize(path)
+        @path = path
+        @name = File.basename(path)
+      end
+    end
+
+    class CollectionFile < CollectionItem
+      def initialize(path)
+        super
+        @segments = []
+      end
+
+      def self.human_name
+        "file"
+      end
+
+      def file?
+        true
+      end
+
+      def leaf?
+        true
+      end
+
+      def add_segment(segment)
+        @segments << segment
+      end
+
+      def each_segment(&block)
+        @segments.each(&block)
+      end
+
+      def check_can_add_copy(src_item, name)
+        raise Errno::ENOTDIR.new(path)
+      end
+
+      alias_method :check_can_merge, :check_can_add_copy
+
+      def copy_named(copy_path)
+        copy = self.class.new(copy_path)
+        each_segment { |segment| copy.add_segment(segment) }
+        copy
+      end
+    end
+
+    class CollectionStream < CollectionItem
+      def initialize(path)
+        super
+        @items = {}
+      end
+
+      def self.human_name
+        "stream"
+      end
+
+      def file?
+        false
+      end
+
+      def leaf?
+        items.empty?
+      end
+
+      def [](key)
+        items[key] or
+          raise Errno::ENOENT.new("%p not found in %p" % [key, path])
+      end
+
+      def delete(name, opts={})
+        item = self[name]
+        if item.file? or opts[:recursive]
+          items.delete(name)
+        else
+          raise Errno::EISDIR.new(path)
+        end
+      end
+
+      def each_file_path
+        return to_enum(__method__) unless block_given?
+        items.each_value do |item|
+          if item.file?
+            yield item.path
+          else
+            item.each_file_path { |path| yield path }
+          end
+        end
+      end
+
+      def find(find_path)
+        # Given a POSIX-style path, return the CollectionStream that
+        # contains the object at that path, and the name of the object
+        # inside it.
+        components = find_path.split("/")
+        tail = components.pop
+        [components.reduce(self, :[]), tail]
+      end
+
+      def stream_at(find_path)
+        key, rest = find_path.split("/", 2)
+        next_stream = get_or_new(key, CollectionStream, Errno::ENOTDIR)
+        if rest.nil?
+          next_stream
+        else
+          next_stream.stream_at(rest)
+        end
+      end
+
+      def file_at(find_path)
+        stream_path, _, file_name = find_path.rpartition("/")
+        if stream_path.empty?
+          get_or_new(file_name, CollectionFile, Errno::EISDIR)
+        else
+          stream_at(stream_path).file_at(file_name)
+        end
+      end
+
+      def manifest_text
+        # Return a string with the normalized manifest text for this stream,
+        # including all substreams.
+        file_keys, stream_keys = items.keys.sort.partition do |key|
+          items[key].file?
+        end
+        my_line = StreamManifest.new(path)
+        file_keys.each do |file_name|
+          my_line.add_file(items[file_name])
+        end
+        sub_lines = stream_keys.map do |sub_name|
+          items[sub_name].manifest_text
+        end
+        my_line.to_s + sub_lines.join("")
+      end
+
+      def check_can_add_copy(src_item, key)
+        if existing = check_can_merge(src_item, key) and not existing.leaf?
+          raise Errno::ENOTEMPTY.new(existing.path)
+        end
+      end
+
+      def check_can_merge(src_item, key)
+        if existing = items[key] and (existing.class != src_item.class)
+          raise Errno::ENOTDIR.new(existing.path)
+        end
+        existing
+      end
+
+      def add_copy(src_item, key)
+        if key == "."
+          self[key] = src_item.copy_named("#{path}")
+        else
+          self[key] = src_item.copy_named("#{path}/#{key}")
+        end
+      end
+
+      def merge(src_item, key)
+        # Do a recursive copy of the collection item `src_item` to destination
+        # `key`.  If a simple copy is safe, do that; otherwise, recursively
+        # merge the contents of the stream `src_item` into the stream at
+        # `key`.
+        begin
+          check_can_add_copy(src_item, key)
+          add_copy(src_item, key)
+        rescue Errno::ENOTEMPTY
+          dest = self[key]
+          error = nil
+          # Copy as much as possible, then raise any error encountered.
+          # Start with streams for a depth-first merge.
+          src_items = src_item.items.each_pair.sort_by do |_, sub_item|
+            (sub_item.file?) ? 1 : 0
+          end
+          src_items.each do |sub_key, sub_item|
+            begin
+              dest.merge(sub_item, sub_key)
+            rescue Errno::ENOTDIR => error
+            end
+          end
+          raise error unless error.nil?
+        end
+      end
+
+      def copy_named(copy_path)
+        copy = self.class.new(copy_path)
+        items.each_pair do |key, item|
+          copy.add_copy(item, key)
+        end
+        copy
+      end
+
+      protected
+
+      attr_reader :items
+
+      private
+
+      def []=(key, item)
+        items[key] = item
+      end
+
+      def get_or_new(key, klass, err_class)
+        # Return the collection item at `key` and ensure that it's a `klass`.
+        # If `key` does not exist, create a new `klass` there.
+        # If the value for `key` is not a `klass`, raise an `err_class`.
+        item = items[key]
+        if item.nil?
+          self[key] = klass.new("#{path}/#{key}")
+        elsif not item.is_a?(klass)
+          raise err_class.new(item.path)
+        else
+          item
+        end
+      end
+    end
+
+    class CollectionRoot < CollectionStream
+      def initialize
+        super("")
+        setup
+      end
+
+      def delete(name, opts={})
+        super
+        # If that didn't fail, it deleted the . stream.  Recreate it.
+        setup
+      end
+
+      def check_can_merge(src_item, key)
+        if items.include?(key)
+          super
+        else
+          raise_root_write_error(key)
+        end
+      end
+
+      private
+
+      def setup
+        items["."] = CollectionStream.new(".")
+      end
+
+      def add_copy(src_item, key)
+        items["."].add_copy(src_item, key)
+      end
+
+      def raise_root_write_error(key)
+        raise ArgumentError.new("can't write to %p at collection root" % key)
+      end
+
+      def []=(key, item)
+        raise_root_write_error(key)
+      end
+    end
+
+    class StreamManifest
+      # Build a manifest text for a single stream, without substreams.
+      # The manifest includes files in the order they're added.  If you want
+      # a normalized manifest, add files in lexical order by name.
+
+      def initialize(name)
+        @name = name
+        @loc_ranges = {}
+        @loc_range_start = 0
+        @file_specs = []
+      end
+
+      def add_file(coll_file)
+        coll_file.each_segment do |segment|
+          extend_locator_ranges(segment.locators)
+          extend_file_specs(coll_file.name, segment)
+        end
+      end
+
+      def to_s
+        if @file_specs.empty?
+          ""
+        else
+          "%s %s %s\n" % [escape_name(@name),
+                          @loc_ranges.keys.join(" "),
+                          @file_specs.join(" ")]
+        end
+      end
+
+      private
+
+      def extend_locator_ranges(locators)
+        locators.
+            select { |loc_s| not @loc_ranges.include?(loc_s) }.
+            each do |loc_s|
+          @loc_ranges[loc_s] = LocatorRange.new(loc_s, @loc_range_start)
+          @loc_range_start = @loc_ranges[loc_s].end
+        end
+      end
+
+      def extend_file_specs(filename, segment)
+        # Given a filename and a LocatorSegment, add the smallest
+        # possible array of file spec strings to @file_specs that
+        # builds the file from available locators.
+        filename = escape_name(filename)
+        start_pos = segment.start_pos
+        length = segment.length
+        start_loc = segment.locators.first
+        prev_loc = start_loc
+        # Build a list of file specs by iterating through the segment's
+        # locators and preparing a file spec for each contiguous range.
+        segment.locators[1..-1].each do |loc_s|
+          range = @loc_ranges[loc_s]
+          if range.begin != @loc_ranges[prev_loc].end
+            range_start, range_length =
+              start_and_length_at(start_loc, prev_loc, start_pos, length)
+            @file_specs << "#{range_start}:#{range_length}:#{filename}"
+            start_pos = 0
+            length -= range_length
+            start_loc = loc_s
+          end
+          prev_loc = loc_s
+        end
+        range_start, range_length =
+          start_and_length_at(start_loc, prev_loc, start_pos, length)
+        @file_specs << "#{range_start}:#{range_length}:#{filename}"
+      end
+
+      def escape_name(name)
+        name.gsub(/\\/, "\\\\\\\\").gsub(/\s/) do |s|
+          s.each_byte.map { |c| "\\%03o" % c }.join("")
+        end
+      end
+
+      def start_and_length_at(start_key, end_key, start_pos, length)
+        range_begin = @loc_ranges[start_key].begin + start_pos
+        range_length = [@loc_ranges[end_key].end - range_begin, length].min
+        [range_begin, range_length]
+      end
+    end
+  end
+end
diff --git a/sdk/ruby/lib/arvados/google_api_client.rb b/sdk/ruby/lib/arvados/google_api_client.rb
new file mode 100644 (file)
index 0000000..69383d1
--- /dev/null
@@ -0,0 +1,64 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'google/api_client'
+# Monkeypatch google-api-client gem to avoid sending newline characters
+# on headers to make ruby-2.3.7+ happy.
+# See: https://dev.arvados.org/issues/13920
+Google::APIClient::ENV::OS_VERSION.strip!
+
+require 'json'
+require 'tempfile'
+
+class Google::APIClient
+  def discovery_document(api, version)
+    api = api.to_s
+    discovery_uri = self.discovery_uri(api, version)
+    discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
+    discovery_cache_path =
+      File.expand_path("~/.cache/arvados/discovery-#{discovery_uri_hash}.json")
+    @discovery_documents[discovery_uri_hash] ||=
+      disk_cached_discovery_document(discovery_cache_path) or
+      fetched_discovery_document(discovery_uri, discovery_cache_path)
+  end
+
+  private
+
+  def disk_cached_discovery_document(cache_path)
+    begin
+      if (Time.now - File.mtime(cache_path)) < 86400
+        open(cache_path) do |cache_file|
+          return JSON.load(cache_file)
+        end
+      end
+    rescue IOError, SystemCallError, JSON::JSONError
+      # Error reading the cache.  Act like it doesn't exist.
+    end
+    nil
+  end
+
+  def write_cached_discovery_document(cache_path, body)
+    cache_dir = File.dirname(cache_path)
+    cache_file = nil
+    begin
+      FileUtils.makedirs(cache_dir)
+      cache_file = Tempfile.new("discovery", cache_dir)
+      cache_file.write(body)
+      cache_file.flush
+      File.rename(cache_file.path, cache_path)
+    rescue IOError, SystemCallError
+      # Failure to write the cache is non-fatal.  Do nothing.
+    ensure
+      cache_file.close! unless cache_file.nil?
+    end
+  end
+
+  def fetched_discovery_document(uri, cache_path)
+    response = self.execute!(:http_method => :get,
+                             :uri => uri,
+                             :authenticated => false)
+    write_cached_discovery_document(cache_path, response.body)
+    JSON.load(response.body)
+  end
+end
diff --git a/sdk/ruby/lib/arvados/keep.rb b/sdk/ruby/lib/arvados/keep.rb
new file mode 100644 (file)
index 0000000..458af53
--- /dev/null
@@ -0,0 +1,312 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+module Keep
+  class Locator
+    # A Locator is used to parse and manipulate Keep locator strings.
+    #
+    # Locators obey the following syntax:
+    #
+    #   locator      ::= address hint*
+    #   address      ::= digest size-hint
+    #   digest       ::= <32 hexadecimal digits>
+    #   size-hint    ::= "+" [0-9]+
+    #   hint         ::= "+" hint-type hint-content
+    #   hint-type    ::= [A-Z]
+    #   hint-content ::= [A-Za-z0-9@_-]+
+    #
+    # Individual hints may have their own required format:
+    #
+    #   sign-hint      ::= "+A" <40 lowercase hex digits> "@" sign-timestamp
+    #   sign-timestamp ::= <8 lowercase hex digits>
+    attr_reader :hash, :hints, :size
+
+    LOCATOR_REGEXP = /^([[:xdigit:]]{32})(\+([[:digit:]]+))?((\+([[:upper:]][[:alnum:]@_-]*))+)?\z/
+
+    def initialize(hasharg, sizearg, hintarg)
+      @hash = hasharg
+      @size = sizearg
+      @hints = hintarg
+    end
+
+    def self.valid? tok
+      !!(LOCATOR_REGEXP.match tok)
+    end
+
+    # Locator.parse returns a Locator object parsed from the string tok.
+    # Returns nil if tok could not be parsed as a valid locator.
+    def self.parse(tok)
+      begin
+        Locator.parse!(tok)
+      rescue ArgumentError
+        nil
+      end
+    end
+
+    # Locator.parse! returns a Locator object parsed from the string tok,
+    # raising an ArgumentError if tok cannot be parsed.
+    def self.parse!(tok)
+      if tok.nil? or tok.empty?
+        raise ArgumentError.new "locator is nil or empty"
+      end
+
+      m = LOCATOR_REGEXP.match(tok)
+      unless m
+        raise ArgumentError.new "not a valid locator #{tok}"
+      end
+
+      tokhash, _, toksize, _, _, trailer = m[1..6]
+      tokhints = []
+      if trailer
+        trailer.split('+').each do |hint|
+          if hint =~ /^[[:upper:]][[:alnum:]@_-]*$/
+            tokhints.push(hint)
+          else
+            raise ArgumentError.new "invalid hint #{hint}"
+          end
+        end
+      end
+
+      Locator.new(tokhash, toksize, tokhints)
+    end
+
+    # Returns the signature hint supplied with this locator,
+    # or nil if the locator was not signed.
+    def signature
+      @hints.grep(/^A/).first
+    end
+
+    # Returns an unsigned Locator.
+    def without_signature
+      Locator.new(@hash, @size, @hints.reject { |o| o.start_with?("A") })
+    end
+
+    def strip_hints
+      Locator.new(@hash, @size, [])
+    end
+
+    def strip_hints!
+      @hints = []
+      self
+    end
+
+    def to_s
+      if @size
+        [ @hash, @size, *@hints ].join('+')
+      else
+        [ @hash, *@hints ].join('+')
+      end
+    end
+  end
+
+  class Manifest
+    STREAM_TOKEN_REGEXP = /^([^\000-\040\\]|\\[0-3][0-7][0-7])+$/
+    STREAM_NAME_REGEXP = /^(\.)(\/[^\/]+)*$/
+
+    EMPTY_DIR_TOKEN_REGEXP = /^0:0:\.$/ # The exception when a file can have '.' as a name
+    FILE_TOKEN_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\000-\040\\]|\\[0-3][0-7][0-7])+$/
+    FILE_NAME_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\/]+(\/[^\/]+)*)$/
+
+    NON_8BIT_ENCODED_CHAR = /[^\\]\\[4-7][0-7][0-7]/
+
+    # Class to parse a manifest text and provide common views of that data.
+    def initialize(manifest_text)
+      @text = manifest_text
+      @files = nil
+    end
+
+    def each_line
+      return to_enum(__method__) unless block_given?
+      @text.each_line do |line|
+        stream_name = nil
+        block_tokens = []
+        file_tokens = []
+        line.scan(/\S+/) do |token|
+          if stream_name.nil?
+            stream_name = unescape token
+          elsif file_tokens.empty? and Locator.valid? token
+            block_tokens << token
+          else
+            file_tokens << unescape(token)
+          end
+        end
+        # Ignore blank lines
+        next if stream_name.nil?
+        yield [stream_name, block_tokens, file_tokens]
+      end
+    end
+
+    def self.unescape(s)
+      return nil if s.nil?
+
+      # Parse backslash escapes in a Keep manifest stream or file name.
+      s.gsub(/\\(\\|[0-7]{3})/) do |_|
+        case $1
+        when '\\'
+          '\\'
+        else
+          $1.to_i(8).chr
+        end
+      end
+    end
+
+    def unescape(s)
+      self.class.unescape(s)
+    end
+
+    def split_file_token token
+      start_pos, filesize, filename = token.split(':', 3)
+      if filename.nil?
+        raise ArgumentError.new "Invalid file token '#{token}'"
+      end
+      [start_pos.to_i, filesize.to_i, unescape(filename)]
+    end
+
+    def each_file_spec
+      return to_enum(__method__) unless block_given?
+      @text.each_line do |line|
+        stream_name = nil
+        in_file_tokens = false
+        line.scan(/\S+/) do |token|
+          if stream_name.nil?
+            stream_name = unescape token
+          elsif in_file_tokens or not Locator.valid? token
+            in_file_tokens = true
+
+            start_pos, file_size, file_name = split_file_token(token)
+            stream_name_adjuster = ''
+            if file_name.include?('/')                # '/' in filename
+              dirname, sep, basename = file_name.rpartition('/')
+              stream_name_adjuster = sep + dirname   # /dir_parts
+              file_name = basename
+            end
+
+            yield [stream_name + stream_name_adjuster, start_pos, file_size, file_name]
+          end
+        end
+      end
+      true
+    end
+
+    def files
+      if @files.nil?
+        file_sizes = Hash.new(0)
+        each_file_spec do |streamname, _, filesize, filename|
+          file_sizes[[streamname, filename]] += filesize
+        end
+        @files = file_sizes.each_pair.map do |(streamname, filename), size|
+          [streamname, filename, size]
+        end
+      end
+      @files
+    end
+
+    def files_count(stop_after=nil)
+      # Return the number of files represented in this manifest.
+      # If stop_after is provided, files_count will read the manifest
+      # incrementally, and return immediately when it counts that number of
+      # files.  This can help you avoid parsing the entire manifest if you
+      # just want to check if a small number of files are specified.
+      if stop_after.nil? or not @files.nil?
+        # Avoid counting empty dir placeholders
+        return files.reject{|_, name, size| name == '.' and size == 0}.size
+      end
+      seen_files = {}
+      each_file_spec do |streamname, _, filesize, filename|
+        # Avoid counting empty dir placeholders
+        next if filename == "." and filesize == 0
+        seen_files[[streamname, filename]] = true
+        return stop_after if (seen_files.size >= stop_after)
+      end
+      seen_files.size
+    end
+
+    def files_size
+      # Return the total size of all files in this manifest.
+      files.reduce(0) { |total, (_, _, size)| total + size }
+    end
+
+    def exact_file_count?(want_count)
+      files_count(want_count + 1) == want_count
+    end
+
+    def minimum_file_count?(want_count)
+      files_count(want_count) >= want_count
+    end
+
+    def has_file?(want_stream, want_file=nil)
+      if want_file.nil?
+        want_stream, want_file = File.split(want_stream)
+      end
+      each_file_spec do |streamname, _, _, name|
+        if streamname == want_stream and name == want_file
+          return true
+        end
+      end
+      false
+    end
+
+    # Verify that a given manifest is valid according to
+    # https://arvados.org/projects/arvados/wiki/Keep_manifest_format
+    def self.validate! manifest
+      raise ArgumentError.new "No manifest found" if !manifest
+
+      return true if manifest.empty?
+
+      raise ArgumentError.new "Invalid manifest: does not end with newline" if !manifest.end_with?("\n")
+      line_count = 0
+      manifest.each_line do |line|
+        line_count += 1
+
+        words = line[0..-2].split(/ /)
+        raise ArgumentError.new "Manifest invalid for stream #{line_count}: missing stream name" if words.empty?
+
+        count = 0
+
+        word = words.shift
+        raise ArgumentError.new "Manifest invalid for stream #{line_count}: >8-bit encoded chars not allowed on stream token #{word.inspect}" if word =~ NON_8BIT_ENCODED_CHAR
+        unescaped_word = unescape(word)
+        count += 1 if word =~ STREAM_TOKEN_REGEXP and unescaped_word =~ STREAM_NAME_REGEXP and unescaped_word !~ /\/\.\.?(\/|$)/
+        raise ArgumentError.new "Manifest invalid for stream #{line_count}: missing or invalid stream name #{word.inspect if word}" if count != 1
+
+        count = 0
+        word = words.shift
+        while word =~ Locator::LOCATOR_REGEXP
+          word = words.shift
+          count += 1
+        end
+        raise ArgumentError.new "Manifest invalid for stream #{line_count}: missing or invalid locator #{word.inspect if word}" if count == 0
+
+        count = 0
+        raise ArgumentError.new "Manifest invalid for stream #{line_count}: >8-bit encoded chars not allowed on file token #{word.inspect}" if word =~ NON_8BIT_ENCODED_CHAR
+        while unescape(word) =~ EMPTY_DIR_TOKEN_REGEXP or
+          (word =~ FILE_TOKEN_REGEXP and unescape(word) =~ FILE_NAME_REGEXP and ($~[1].split('/') & ['..', '.']).empty?)
+          word = words.shift
+          count += 1
+        end
+
+        if word
+          raise ArgumentError.new "Manifest invalid for stream #{line_count}: invalid file token #{word.inspect}"
+        elsif count == 0
+          raise ArgumentError.new "Manifest invalid for stream #{line_count}: no file tokens"
+        end
+
+        # Ruby's split() method silently drops trailing empty tokens
+        # (which are not allowed by the manifest format) so we have to
+        # check trailing spaces manually.
+        raise ArgumentError.new "Manifest invalid for stream #{line_count}: trailing space" if line.end_with? " \n"
+      end
+      true
+    end
+
+    def self.valid? manifest
+      begin
+        validate! manifest
+        true
+      rescue ArgumentError
+        false
+      end
+    end
+  end
+end
diff --git a/sdk/ruby/test/sdk_fixtures.rb b/sdk/ruby/test/sdk_fixtures.rb
new file mode 100644 (file)
index 0000000..0f385e2
--- /dev/null
@@ -0,0 +1,74 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require "yaml"
+
+module SDKFixtures
+  module StaticMethods
+    # SDKFixtures will use these as class methods, and install them as
+    # instance methods on the test classes.
+    def random_block(size=nil)
+      sprintf("%032x+%d", rand(16 ** 32), size || rand(64 * 1024 * 1024))
+    end
+
+    def random_blocks(count, size=nil)
+      (0...count).map { |_| random_block(size) }
+    end
+  end
+
+  extend StaticMethods
+
+  def self.included(base)
+    base.include(StaticMethods)
+  end
+
+  @@fixtures = {}
+  def fixtures name
+    @@fixtures[name] ||=
+      begin
+        path = File.
+          expand_path("../../../../services/api/test/fixtures/#{name}.yml",
+                      __FILE__)
+        file = IO.read(path)
+        trim_index = file.index('# Test Helper trims the rest of the file')
+        file = file[0, trim_index] if trim_index
+        YAML.load(file)
+      end
+  end
+
+  ### Valid manifests
+  SIMPLEST_MANIFEST = ". #{random_block(9)} 0:9:simple.txt\n"
+  MULTIBLOCK_FILE_MANIFEST =
+    [". #{random_block(8)} 0:4:repfile 4:4:uniqfile",
+     "./s1 #{random_block(6)} 0:3:repfile 3:3:uniqfile",
+     ". #{random_block(8)} 0:7:uniqfile2 7:1:repfile\n"].join("\n")
+  MULTILEVEL_MANIFEST =
+    [". #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir0 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir0/subdir #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir1 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir1/subdir #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir2 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n"].join("")
+  COLON_FILENAME_MANIFEST = ". #{random_block(9)} 0:9:file:test.txt\n"
+  # Filename is `a a.txt`.
+  ESCAPED_FILENAME_MANIFEST = ". #{random_block(9)} 0:9:a\\040\\141.txt\n"
+  MANY_ESCAPES_MANIFEST =
+    "./dir\\040name #{random_block(9)} 0:9:file\\\\name\\011\\here.txt\n"
+  NONNORMALIZED_MANIFEST =
+    ["./dir2 #{random_block} 0:0:z 0:0:y 0:0:x",
+     "./dir1 #{random_block} 0:0:p 0:0:o 0:0:n\n"].join("\n")
+  MANIFEST_WITH_DIRS_IN_FILENAMES =
+    [". #{random_block(10)} 0:3:file1 3:3:dir1/file1 6:3:dir1/dir2/file1\n"].join("")
+  MULTILEVEL_MANIFEST_WITH_DIRS_IN_FILENAMES =
+    [". #{random_block(10)} 0:3:file1 3:3:dir1/file1 6:4:dir1/dir2/file1\n",
+     "./dir1 #{random_block(10)} 0:3:file1 3:7:dir2/file1\n"].join("")
+
+  ### Non-tree manifests
+  # These manifests follow the spec, but they express a structure that can't
+  # can't be represented by a POSIX filesystem tree.  For example, there's a
+  # name conflict between a stream and a filename.
+  NAME_CONFLICT_MANIFEST =
+    [". #{random_block(9)} 0:9:conflict",
+     "./conflict #{random_block} 0:0:name\n"].join("\n")
+end
diff --git a/sdk/ruby/test/test_big_request.rb b/sdk/ruby/test/test_big_request.rb
new file mode 100644 (file)
index 0000000..d90382c
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require 'minitest/autorun'
+require 'arvados'
+require 'digest/md5'
+
+class TestBigRequest < Minitest::Test
+  def boring_manifest nblocks
+    x = '.'
+    (0..nblocks).each do |z|
+      x += ' d41d8cd98f00b204e9800998ecf8427e+0'
+    end
+    x += " 0:0:foo.txt\n"
+    x
+  end
+
+  def test_create_manifest nblocks=1
+    skip "Test needs an API server to run against"
+    manifest_text = boring_manifest nblocks
+    uuid = Digest::MD5.hexdigest(manifest_text) + '+' + manifest_text.size.to_s
+    c = Arvados.new.collection.create(collection: {
+                                        uuid: uuid,
+                                        manifest_text: manifest_text,
+                                      })
+    assert_equal uuid, c[:portable_data_hash]
+  end
+
+  def test_create_big_manifest
+    # This ensures that manifest_text is passed in the request body:
+    # it's too large to fit in the query string.
+    test_create_manifest 9999
+  end
+end
diff --git a/sdk/ruby/test/test_collection.rb b/sdk/ruby/test/test_collection.rb
new file mode 100644 (file)
index 0000000..288fd26
--- /dev/null
@@ -0,0 +1,738 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require "arvados/collection"
+require "minitest/autorun"
+require "sdk_fixtures"
+
+class CollectionTest < Minitest::Test
+  include SDKFixtures
+
+  TWO_BY_TWO_BLOCKS = SDKFixtures.random_blocks(2, 9)
+  TWO_BY_TWO_MANIFEST_A =
+    [". #{TWO_BY_TWO_BLOCKS.first} 0:5:f1 5:4:f2\n",
+     "./s1 #{TWO_BY_TWO_BLOCKS.last} 0:5:f1 5:4:f3\n"]
+  TWO_BY_TWO_MANIFEST_S = TWO_BY_TWO_MANIFEST_A.join("")
+
+  ### .new
+
+  def test_empty_construction
+    coll = Arv::Collection.new
+    assert_equal("", coll.manifest_text)
+  end
+
+  def test_successful_construction
+    [:SIMPLEST_MANIFEST, :MULTIBLOCK_FILE_MANIFEST, :MULTILEVEL_MANIFEST].
+        each do |manifest_name|
+      manifest_text = SDKFixtures.const_get(manifest_name)
+      coll = Arv::Collection.new(manifest_text)
+      assert_equal(manifest_text, coll.manifest_text,
+                   "did not get same manifest back out from #{manifest_name}")
+    end
+  end
+
+  def test_non_manifest_construction_error
+    ["word", ". abc def", ". #{random_block} 0:", ". / !"].each do |m_text|
+      assert_raises(ArgumentError,
+                    "built collection from manifest #{m_text.inspect}") do
+        Arv::Collection.new(m_text)
+      end
+    end
+  end
+
+  def test_file_directory_conflict_construction_error
+    assert_raises(ArgumentError) do
+      Arv::Collection.new(NAME_CONFLICT_MANIFEST)
+    end
+  end
+
+  def test_no_implicit_normalization
+    coll = Arv::Collection.new(NONNORMALIZED_MANIFEST)
+    assert_equal(NONNORMALIZED_MANIFEST, coll.manifest_text)
+  end
+
+  ### .normalize
+
+  def test_non_posix_path_handling
+    m_text = "./.. #{random_block(9)} 0:5:. 5:4:..\n"
+    coll = Arv::Collection.new(m_text.dup)
+    coll.normalize
+    assert_equal(m_text, coll.manifest_text)
+  end
+
+  def test_escaping_through_normalization
+    coll = Arv::Collection.new(MANY_ESCAPES_MANIFEST)
+    coll.normalize
+    # The result should simply duplicate the file spec.
+    # The source file spec has an unescaped backslash in it.
+    # It's OK for the Collection class to properly escape that.
+    expect_text = MANY_ESCAPES_MANIFEST.sub(/ \d+:\d+:\S+/) do |file_spec|
+      file_spec.gsub(/([^\\])(\\[^\\\d])/, '\1\\\\\2')
+    end
+    assert_equal(expect_text, coll.manifest_text)
+  end
+
+  def test_concatenation_with_locator_overlap(over_index=0)
+    blocks = random_blocks(4, 2)
+    blocks_s = blocks.join(" ")
+    coll = Arv::Collection.new(". %s 0:8:file\n. %s 0:4:file\n" %
+                               [blocks_s, blocks[over_index, 2].join(" ")])
+    coll.normalize
+    assert_equal(". #{blocks_s} 0:8:file #{over_index * 2}:4:file\n",
+                 coll.manifest_text)
+  end
+
+  def test_concatenation_with_middle_locator_overlap
+    test_concatenation_with_locator_overlap(1)
+  end
+
+  def test_concatenation_with_end_locator_overlap
+    test_concatenation_with_locator_overlap(2)
+  end
+
+  def test_concatenation_with_partial_locator_overlap
+    blocks = random_blocks(3, 3)
+    coll = Arv::Collection
+      .new(". %s 0:6:overlap\n. %s 0:6:overlap\n" %
+           [blocks[0, 2].join(" "), blocks[1, 2].join(" ")])
+    coll.normalize
+    assert_equal(". #{blocks.join(' ')} 0:6:overlap 3:6:overlap\n",
+                 coll.manifest_text)
+  end
+
+  def test_normalize
+    block = random_block
+    coll = Arv::Collection.new(". #{block} 0:0:f2 0:0:f1\n")
+    coll.normalize
+    assert_equal(". #{block} 0:0:f1 0:0:f2\n", coll.manifest_text)
+  end
+
+  def test_normalization_file_spans_two_whole_blocks(file_specs="0:10:f1",
+                                                     num_blocks=2)
+    blocks = random_blocks(num_blocks, 5)
+    m_text = ". #{blocks.join(' ')} #{file_specs}\n"
+    coll = Arv::Collection.new(m_text.dup)
+    coll.normalize
+    assert_equal(m_text, coll.manifest_text)
+  end
+
+  def test_normalization_file_fits_beginning_block
+    test_normalization_file_spans_two_whole_blocks("0:7:f1")
+  end
+
+  def test_normalization_file_fits_end_block
+    test_normalization_file_spans_two_whole_blocks("3:7:f1")
+  end
+
+  def test_normalization_file_spans_middle
+    test_normalization_file_spans_two_whole_blocks("3:5:f1")
+  end
+
+  def test_normalization_file_spans_three_whole_blocks
+    test_normalization_file_spans_two_whole_blocks("0:15:f1", 3)
+  end
+
+  def test_normalization_file_skips_bytes
+    test_normalization_file_spans_two_whole_blocks("0:3:f1 5:5:f1")
+  end
+
+  def test_normalization_file_inserts_bytes
+    test_normalization_file_spans_two_whole_blocks("0:3:f1 5:3:f1 3:2:f1")
+  end
+
+  def test_normalization_file_duplicates_bytes
+    test_normalization_file_spans_two_whole_blocks("2:3:f1 2:3:f1", 1)
+  end
+
+  def test_normalization_dedups_locators
+    blocks = random_blocks(2, 5)
+    coll = Arv::Collection.new(". %s %s 1:8:f1 11:8:f1\n" %
+                               [blocks.join(" "), blocks.reverse.join(" ")])
+    coll.normalize
+    assert_equal(". #{blocks.join(' ')} 1:8:f1 6:4:f1 0:4:f1\n",
+                 coll.manifest_text)
+  end
+
+  ### .cp_r
+
+  def test_simple_file_copy
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.cp_r("./simple.txt", "./new")
+    assert_equal(SIMPLEST_MANIFEST.sub(" 0:9:", " 0:9:new 0:9:"),
+                 coll.manifest_text)
+  end
+
+  def test_copy_file_into_other_stream(target="./s1/f2", basename="f2")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.cp_r("./f2", target)
+    expected = "%s./s1 %s 0:5:f1 14:4:%s 5:4:f3\n" %
+      [TWO_BY_TWO_MANIFEST_A.first,
+       TWO_BY_TWO_BLOCKS.reverse.join(" "), basename]
+    assert_equal(expected, coll.manifest_text)
+  end
+
+  def test_implicit_copy_file_into_other_stream
+    test_copy_file_into_other_stream("./s1")
+  end
+
+  def test_copy_file_into_other_stream_with_new_name
+    test_copy_file_into_other_stream("./s1/f2a", "f2a")
+  end
+
+  def test_copy_file_over_in_other_stream(target="./s1/f1")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.cp_r("./f1", target)
+    expected = "%s./s1 %s 0:5:f1 14:4:f3\n" %
+      [TWO_BY_TWO_MANIFEST_A.first, TWO_BY_TWO_BLOCKS.join(" ")]
+    assert_equal(expected, coll.manifest_text)
+  end
+
+  def test_implicit_copy_file_over_in_other_stream
+    test_copy_file_over_in_other_stream("./s1")
+  end
+
+  def test_simple_stream_copy
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.cp_r("./s1", "./sNew")
+    new_line = TWO_BY_TWO_MANIFEST_A.last.sub("./s1 ", "./sNew ")
+    assert_equal(TWO_BY_TWO_MANIFEST_S + new_line, coll.manifest_text)
+  end
+
+  def test_copy_stream_into_other_stream(target="./dir2/subdir",
+                                         basename="subdir")
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    coll.cp_r("./dir1/subdir", target)
+    new_line = MULTILEVEL_MANIFEST.lines[4].sub("./dir1/subdir ",
+                                                "./dir2/#{basename} ")
+    assert_equal(MULTILEVEL_MANIFEST + new_line, coll.manifest_text)
+  end
+
+  def test_implicit_copy_stream_into_other_stream
+    test_copy_stream_into_other_stream("./dir2")
+  end
+
+  def test_copy_stream_into_other_stream_with_new_name
+    test_copy_stream_into_other_stream("./dir2/newsub", "newsub")
+  end
+
+  def test_copy_stream_over_empty_stream
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    (1..3).each do |file_num|
+      coll.rm("./dir0/subdir/file#{file_num}")
+    end
+    coll.cp_r("./dir1/subdir", "./dir0")
+    expected = MULTILEVEL_MANIFEST.lines
+    expected[2] = expected[4].sub("./dir1/", "./dir0/")
+    assert_equal(expected.join(""), coll.manifest_text)
+  end
+
+  def test_copy_stream_over_file_raises_ENOTDIR(source="./s1", target="./f2")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    assert_raises(Errno::ENOTDIR) do
+      coll.cp_r(source, target)
+    end
+  end
+
+  def test_copy_file_under_file_raises_ENOTDIR
+    test_copy_stream_over_file_raises_ENOTDIR("./f1", "./f2/newfile")
+  end
+
+  def test_copy_stream_over_nonempty_stream_merges_and_overwrites
+    blocks = random_blocks(3, 9)
+    manifest_a =
+      ["./subdir #{blocks[0]} 0:1:s1 1:2:zero\n",
+       "./zdir #{blocks[1]} 0:9:zfile\n",
+       "./zdir/subdir #{blocks[2]} 0:1:s2 1:2:zero\n"]
+    coll = Arv::Collection.new(manifest_a.join(""))
+    coll.cp_r("./subdir", "./zdir")
+    manifest_a[2] = "./zdir/subdir %s %s 0:1:s1 9:1:s2 1:2:zero\n" %
+      [blocks[0], blocks[2]]
+    assert_equal(manifest_a.join(""), coll.manifest_text)
+  end
+
+  def test_copy_stream_into_substream(source="./dir1",
+                                      target="./dir1/subdir/dir1")
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    coll.cp_r(source, target)
+    expected = MULTILEVEL_MANIFEST.lines.flat_map do |line|
+      [line, line.gsub(/^#{Regexp.escape(source)}([\/ ])/, "#{target}\\1")].uniq
+    end
+    assert_equal(expected.sort.join(""), coll.manifest_text)
+  end
+
+  def test_copy_root
+    test_copy_stream_into_substream(".", "./root")
+  end
+
+  def test_adding_to_root_after_copy
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.cp_r(".", "./root")
+    src_coll = Arv::Collection.new(COLON_FILENAME_MANIFEST)
+    coll.cp_r("./file:test.txt", ".", src_coll)
+    got_lines = coll.manifest_text.lines
+    assert_equal(2, got_lines.size)
+    assert_match(/^\. \S{33,} \S{33,} 0:9:file:test\.txt 9:9:simple\.txt\n/,
+                 got_lines.first)
+    assert_equal(SIMPLEST_MANIFEST.sub(". ", "./root "), got_lines.last)
+  end
+
+  def test_copy_chaining
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.cp_r("./simple.txt", "./a").cp_r("./a", "./b")
+    assert_equal(SIMPLEST_MANIFEST.sub(" 0:9:", " 0:9:a 0:9:b 0:9:"),
+                 coll.manifest_text)
+  end
+
+  def prep_two_collections_for_copy(src_stream, dst_stream)
+    blocks = random_blocks(2, 8)
+    src_text = "#{src_stream} #{blocks.first} 0:8:f1\n"
+    dst_text = "#{dst_stream} #{blocks.last} 0:8:f2\n"
+    return [blocks, src_text, dst_text,
+            Arv::Collection.new(src_text.dup),
+            Arv::Collection.new(dst_text.dup)]
+  end
+
+  def test_copy_file_from_other_collection(src_stream=".", dst_stream="./s1")
+    blocks, src_text, dst_text, src_coll, dst_coll =
+      prep_two_collections_for_copy(src_stream, dst_stream)
+    dst_coll.cp_r("#{src_stream}/f1", dst_stream, src_coll)
+    assert_equal("#{dst_stream} #{blocks.join(' ')} 0:8:f1 8:8:f2\n",
+                 dst_coll.manifest_text)
+    assert_equal(src_text, src_coll.manifest_text)
+  end
+
+  def test_copy_file_from_other_collection_to_root
+    test_copy_file_from_other_collection("./s1", ".")
+  end
+
+  def test_copy_stream_from_other_collection
+    blocks, src_text, dst_text, src_coll, dst_coll =
+      prep_two_collections_for_copy("./s2", "./s1")
+    dst_coll.cp_r("./s2", "./s1", src_coll)
+    assert_equal(dst_text + src_text.sub("./s2 ", "./s1/s2 "),
+                 dst_coll.manifest_text)
+    assert_equal(src_text, src_coll.manifest_text)
+  end
+
+  def test_copy_stream_from_other_collection_to_root
+    blocks, src_text, dst_text, src_coll, dst_coll =
+      prep_two_collections_for_copy("./s1", ".")
+    dst_coll.cp_r("./s1", ".", src_coll)
+    assert_equal(dst_text + src_text, dst_coll.manifest_text)
+    assert_equal(src_text, src_coll.manifest_text)
+  end
+
+  def test_copy_stream_contents
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    coll.cp_r("./dir0/subdir/", "./dir1/subdir")
+    expect_lines = MULTILEVEL_MANIFEST.lines
+    expect_lines[4] = expect_lines[2].sub("./dir0/", "./dir1/")
+    assert_equal(expect_lines.join(""), coll.manifest_text)
+  end
+
+  def test_copy_file_into_new_stream_with_implicit_filename
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.cp_r("./simple.txt", "./new/")
+    assert_equal(SIMPLEST_MANIFEST + SIMPLEST_MANIFEST.sub(". ", "./new "),
+                 coll.manifest_text)
+  end
+
+  def test_copy_file_into_new_stream_with_explicit_filename
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.cp_r("./simple.txt", "./new/newfile.txt")
+    new_line = SIMPLEST_MANIFEST.sub(". ", "./new ").sub(":simple", ":newfile")
+    assert_equal(SIMPLEST_MANIFEST + new_line, coll.manifest_text)
+  end
+
+  def test_copy_stream_contents_into_root
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.cp_r("./s1/", ".")
+    assert_equal(". %s 0:5:f1 14:4:f2 5:4:f3\n%s" %
+                 [TWO_BY_TWO_BLOCKS.reverse.join(" "),
+                  TWO_BY_TWO_MANIFEST_A.last],
+                 coll.manifest_text)
+  end
+
+  def test_copy_root_contents_into_stream
+    # This is especially fun, because we're copying a parent into its child.
+    # Make sure that happens depth-first.
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.cp_r("./", "./s1")
+    assert_equal("%s./s1 %s 0:5:f1 5:4:f2 14:4:f3\n%s" %
+                 [TWO_BY_TWO_MANIFEST_A.first, TWO_BY_TWO_BLOCKS.join(" "),
+                  TWO_BY_TWO_MANIFEST_A.last.sub("./s1 ", "./s1/s1 ")],
+                 coll.manifest_text)
+  end
+
+  def test_copy_stream_contents_across_collections
+    block = random_block(8)
+    src_coll = Arv::Collection.new("./s1 #{block} 0:8:f1\n")
+    dst_coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    dst_coll.cp_r("./s1/", "./s1", src_coll)
+    assert_equal("%s./s1 %s %s 0:8:f1 13:4:f3\n" %
+                 [TWO_BY_TWO_MANIFEST_A.first, block, TWO_BY_TWO_BLOCKS.last],
+                 dst_coll.manifest_text)
+  end
+
+  def test_copy_root_contents_across_collections
+    block = random_block(8)
+    src_coll = Arv::Collection.new(". #{block} 0:8:f1\n")
+    dst_coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    dst_coll.cp_r("./", ".", src_coll)
+    assert_equal(". %s %s 0:8:f1 13:4:f2\n%s" %
+                 [block, TWO_BY_TWO_BLOCKS.first, TWO_BY_TWO_MANIFEST_A.last],
+                 dst_coll.manifest_text)
+  end
+
+  def test_copy_root_into_empty_collection
+    block = random_block(8)
+    src_coll = Arv::Collection.new(". #{block} 0:8:f1\n")
+    dst_coll = Arv::Collection.new()
+    dst_coll.cp_r("./", ".", src_coll)
+    assert_equal(". %s 0:8:f1\n" %
+                 [block],
+                 dst_coll.manifest_text)
+  end
+
+  def test_copy_empty_source_path_raises_ArgumentError(src="", dst="./s1")
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    assert_raises(ArgumentError) do
+      coll.cp_r(src, dst)
+    end
+  end
+
+  def test_copy_empty_destination_path_raises_ArgumentError
+    test_copy_empty_source_path_raises_ArgumentError(".", "")
+  end
+
+  ### .each_file_path
+
+  def test_each_file_path
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    if block_given?
+      result = yield(coll)
+    else
+      result = []
+      coll.each_file_path { |path| result << path }
+    end
+    assert_equal(["./f1", "./f2", "./s1/f1", "./s1/f3"], result.sort)
+  end
+
+  def test_each_file_path_without_block
+    test_each_file_path { |coll| coll.each_file_path.to_a }
+  end
+
+  def test_each_file_path_empty_collection
+    assert_empty(Arv::Collection.new.each_file_path.to_a)
+  end
+
+  def test_each_file_path_after_collection_emptied
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.rm("simple.txt")
+    assert_empty(coll.each_file_path.to_a)
+  end
+
+  def test_each_file_path_deduplicates_manifest_listings
+    coll = Arv::Collection.new(MULTIBLOCK_FILE_MANIFEST)
+    assert_equal(["./repfile", "./s1/repfile", "./s1/uniqfile",
+                  "./uniqfile", "./uniqfile2"],
+                 coll.each_file_path.to_a.sort)
+  end
+
+  ### .exist?
+
+  def test_exist(test_method=:assert, path="f2")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    send(test_method, coll.exist?(path))
+  end
+
+  def test_file_not_exist
+    test_exist(:refute, "f3")
+  end
+
+  def test_stream_exist
+    test_exist(:assert, "s1")
+  end
+
+  def test_file_inside_stream_exist
+    test_exist(:assert, "s1/f1")
+  end
+
+  def test_path_inside_stream_not_exist
+    test_exist(:refute, "s1/f2")
+  end
+
+  def test_path_under_file_not_exist
+    test_exist(:refute, "f2/nonexistent")
+  end
+
+  def test_deep_substreams_not_exist
+    test_exist(:refute, "a/b/c/d/e/f/g")
+  end
+
+  ### .rename
+
+  def test_simple_file_rename
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.rename("./simple.txt", "./new")
+    assert_equal(SIMPLEST_MANIFEST.sub(":simple.txt", ":new"),
+                 coll.manifest_text)
+  end
+
+  def test_rename_file_into_other_stream(target="./s1/f2", basename="f2")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.rename("./f2", target)
+    expected = ". %s 0:5:f1\n./s1 %s 0:5:f1 14:4:%s 5:4:f3\n" %
+      [TWO_BY_TWO_BLOCKS.first,
+       TWO_BY_TWO_BLOCKS.reverse.join(" "), basename]
+    assert_equal(expected, coll.manifest_text)
+  end
+
+  def test_implicit_rename_file_into_other_stream
+    test_rename_file_into_other_stream("./s1")
+  end
+
+  def test_rename_file_into_other_stream_with_new_name
+    test_rename_file_into_other_stream("./s1/f2a", "f2a")
+  end
+
+  def test_rename_file_over_in_other_stream(target="./s1/f1")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.rename("./f1", target)
+    expected = ". %s 5:4:f2\n./s1 %s 0:5:f1 14:4:f3\n" %
+      [TWO_BY_TWO_BLOCKS.first, TWO_BY_TWO_BLOCKS.join(" ")]
+    assert_equal(expected, coll.manifest_text)
+  end
+
+  def test_implicit_rename_file_over_in_other_stream
+    test_rename_file_over_in_other_stream("./s1")
+  end
+
+  def test_simple_stream_rename
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    coll.rename("./s1", "./newS")
+    assert_equal(TWO_BY_TWO_MANIFEST_S.sub("\n./s1 ", "\n./newS "),
+                 coll.manifest_text)
+  end
+
+  def test_rename_stream_into_other_stream(target="./dir2/subdir",
+                                           basename="subdir")
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    coll.rename("./dir1/subdir", target)
+    expected = MULTILEVEL_MANIFEST.lines
+    replaced_line = expected.delete_at(4)
+    expected << replaced_line.sub("./dir1/subdir ", "./dir2/#{basename} ")
+    assert_equal(expected.join(""), coll.manifest_text)
+  end
+
+  def test_implicit_rename_stream_into_other_stream
+    test_rename_stream_into_other_stream("./dir2")
+  end
+
+  def test_rename_stream_into_other_stream_with_new_name
+    test_rename_stream_into_other_stream("./dir2/newsub", "newsub")
+  end
+
+  def test_rename_stream_over_empty_stream
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    (1..3).each do |file_num|
+      coll.rm("./dir0/subdir/file#{file_num}")
+    end
+    coll.rename("./dir1/subdir", "./dir0")
+    expected = MULTILEVEL_MANIFEST.lines
+    expected[2] = expected.delete_at(4).sub("./dir1/", "./dir0/")
+    assert_equal(expected.sort.join(""), coll.manifest_text)
+  end
+
+  def test_rename_stream_over_file_raises_ENOTDIR
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    assert_raises(Errno::ENOTDIR) do
+      coll.rename("./s1", "./f2")
+    end
+  end
+
+  def test_rename_stream_over_nonempty_stream_raises_ENOTEMPTY
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    assert_raises(Errno::ENOTEMPTY) do
+      coll.rename("./dir1/subdir", "./dir0")
+    end
+  end
+
+  def test_rename_stream_into_substream(source="./dir1",
+                                        target="./dir1/subdir/dir1")
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    coll.rename(source, target)
+    assert_equal(MULTILEVEL_MANIFEST.gsub(/^#{Regexp.escape(source)}([\/ ])/m,
+                                          "#{target}\\1"),
+                 coll.manifest_text)
+  end
+
+  def test_rename_root
+    test_rename_stream_into_substream(".", "./root")
+  end
+
+  def test_adding_to_root_after_rename
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.rename(".", "./root")
+    src_coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.cp_r("./simple.txt", ".", src_coll)
+    assert_equal(SIMPLEST_MANIFEST + SIMPLEST_MANIFEST.sub(". ", "./root "),
+                 coll.manifest_text)
+  end
+
+  def test_rename_chaining
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.rename("./simple.txt", "./x").rename("./x", "./simple.txt")
+    assert_equal(SIMPLEST_MANIFEST, coll.manifest_text)
+  end
+
+  ### .rm
+
+  def test_simple_remove
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S.dup)
+    coll.rm("./f2")
+    assert_equal(TWO_BY_TWO_MANIFEST_S.sub(" 5:4:f2", ""), coll.manifest_text)
+  end
+
+  def empty_stream_and_assert(expect_index=0)
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    yield coll
+    assert_equal(TWO_BY_TWO_MANIFEST_A[expect_index], coll.manifest_text)
+  end
+
+  def test_remove_all_files_in_substream
+    empty_stream_and_assert do |coll|
+      coll.rm("./s1/f1")
+      coll.rm("./s1/f3")
+    end
+  end
+
+  def test_remove_all_files_in_root_stream
+    empty_stream_and_assert(1) do |coll|
+      coll.rm("./f1")
+      coll.rm("./f2")
+    end
+  end
+
+  def test_chaining_removes
+    empty_stream_and_assert do |coll|
+      coll.rm("./s1/f1").rm("./s1/f3")
+    end
+  end
+
+  def test_remove_last_file
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    coll.rm("./simple.txt")
+    assert_equal("", coll.manifest_text)
+  end
+
+  def test_remove_nonexistent_file_raises_ENOENT(path="./NoSuchFile",
+                                                 method=:rm)
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    assert_raises(Errno::ENOENT) do
+      coll.send(method, path)
+    end
+  end
+
+  def test_remove_from_nonexistent_stream_raises_ENOENT
+    test_remove_nonexistent_file_raises_ENOENT("./NoSuchStream/simple.txt")
+  end
+
+  def test_remove_stream_raises_EISDIR(path="./s1")
+    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)
+    assert_raises(Errno::EISDIR) do
+      coll.rm(path)
+    end
+  end
+
+  def test_remove_root_raises_EISDIR
+    test_remove_stream_raises_EISDIR(".")
+  end
+
+  def test_remove_empty_string_raises_ArgumentError
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    assert_raises(ArgumentError) do
+      coll.rm("")
+    end
+  end
+
+  ### rm_r
+
+  def test_recursive_remove
+    empty_stream_and_assert do |coll|
+      coll.rm_r("./s1")
+    end
+  end
+
+  def test_recursive_remove_on_files
+    empty_stream_and_assert do |coll|
+      coll.rm_r("./s1/f1")
+      coll.rm_r("./s1/f3")
+    end
+  end
+
+  def test_recursive_remove_root
+    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)
+    coll.rm_r(".")
+    assert_equal("", coll.manifest_text)
+  end
+
+  def test_rm_r_nonexistent_file_raises_ENOENT(path="./NoSuchFile")
+    test_remove_nonexistent_file_raises_ENOENT("./NoSuchFile", :rm_r)
+  end
+
+  def test_rm_r_from_nonexistent_stream_raises_ENOENT
+    test_remove_nonexistent_file_raises_ENOENT("./NoSuchStream/file", :rm_r)
+  end
+
+  def test_rm_r_empty_string_raises_ArgumentError
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    assert_raises(ArgumentError) do
+      coll.rm_r("")
+    end
+  end
+
+  ### .modified?
+
+  def test_new_collection_unmodified(*args)
+    coll = Arv::Collection.new(*args)
+    yield coll if block_given?
+    refute(coll.modified?)
+  end
+
+  def test_collection_unmodified_after_instantiation
+    test_new_collection_unmodified(SIMPLEST_MANIFEST)
+  end
+
+  def test_collection_unmodified_after_mark
+    test_new_collection_unmodified(SIMPLEST_MANIFEST) do |coll|
+      coll.cp_r("./simple.txt", "./copy")
+      coll.unmodified
+    end
+  end
+
+  def check_collection_modified
+    coll = Arv::Collection.new(SIMPLEST_MANIFEST)
+    yield coll
+    assert(coll.modified?)
+  end
+
+  def test_collection_modified_after_copy
+    check_collection_modified do |coll|
+      coll.cp_r("./simple.txt", "./copy")
+    end
+  end
+
+  def test_collection_modified_after_remove
+    check_collection_modified do |coll|
+      coll.rm("./simple.txt")
+    end
+  end
+
+  def test_collection_modified_after_rename
+    check_collection_modified do |coll|
+      coll.rename("./simple.txt", "./newname")
+    end
+  end
+end
diff --git a/sdk/ruby/test/test_keep_manifest.rb b/sdk/ruby/test/test_keep_manifest.rb
new file mode 100644 (file)
index 0000000..eee8b39
--- /dev/null
@@ -0,0 +1,511 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+require "arvados/keep"
+require "minitest/autorun"
+require "sdk_fixtures"
+
+class ManifestTest < Minitest::Test
+  include SDKFixtures
+
+  def check_stream(stream, exp_name, exp_blocks, exp_files)
+    assert_equal(exp_name, stream.first)
+    assert_equal(exp_blocks, stream[1].map(&:to_s))
+    assert_equal(exp_files, stream.last)
+  end
+
+  def test_simple_each_line_array
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    stream_name, block_s, file = SIMPLEST_MANIFEST.strip.split
+    stream_a = manifest.each_line.to_a
+    assert_equal(1, stream_a.size, "wrong number of streams")
+    check_stream(stream_a.first, stream_name, [block_s], [file])
+  end
+
+  def test_simple_each_line_block
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    result = []
+    manifest.each_line do |stream, blocks, files|
+      result << files
+    end
+    assert_equal([[SIMPLEST_MANIFEST.split.last]], result,
+                 "wrong result from each_line block")
+  end
+
+  def test_multilevel_each_line
+    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST)
+    seen = []
+    manifest.each_line do |stream, blocks, files|
+      refute(seen.include?(stream),
+             "each_line already yielded stream #{stream}")
+      seen << stream
+      assert_equal(3, files.size, "wrong file count for stream #{stream}")
+    end
+    assert_equal(MULTILEVEL_MANIFEST.count("\n"), seen.size,
+                 "wrong number of streams")
+  end
+
+  def test_empty_each_line
+    assert_empty(Keep::Manifest.new("").each_line.to_a)
+  end
+
+  def test_empty_each_file_spec
+    assert_empty(Keep::Manifest.new("").each_file_spec.to_a)
+  end
+
+  def test_empty_files
+    assert_empty(Keep::Manifest.new("").files)
+  end
+
+  def test_empty_files_count
+    assert_equal(0, Keep::Manifest.new("").files_count)
+  end
+
+  def test_empty_dir_files_count
+    assert_equal(0,
+      Keep::Manifest.new("./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n").files_count)
+  end
+
+  def test_empty_files_size
+    assert_equal(0, Keep::Manifest.new("").files_size)
+  end
+
+  def test_empty_has_file?
+    refute(Keep::Manifest.new("").has_file?(""))
+  end
+
+  def test_empty_line_within_manifest
+    block_s = random_block
+    manifest = Keep::Manifest.
+      new([". #{block_s} 0:1:file1 1:2:file2\n",
+           "\n",
+           ". #{block_s} 3:3:file3 6:4:file4\n"].join(""))
+    streams = manifest.each_line.to_a
+    assert_equal(2, streams.size)
+    check_stream(streams[0], ".", [block_s], ["0:1:file1", "1:2:file2"])
+    check_stream(streams[1], ".", [block_s], ["3:3:file3", "6:4:file4"])
+  end
+
+  def test_backslash_escape_parsing
+    manifest = Keep::Manifest.new(MANY_ESCAPES_MANIFEST)
+    streams = manifest.each_line.to_a
+    assert_equal(1, streams.size, "wrong number of streams with whitespace")
+    assert_equal("./dir name", streams.first.first,
+                 "wrong stream name with whitespace")
+    assert_equal(["0:9:file\\name\t\\here.txt"], streams.first.last,
+                 "wrong filename(s) with whitespace")
+  end
+
+  def test_simple_files
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    assert_equal([[".", "simple.txt", 9]], manifest.files)
+  end
+
+  def test_multilevel_files
+    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST)
+    seen = Hash.new { |this, key| this[key] = [] }
+    manifest.files.each do |stream, basename, size|
+      refute(seen[stream].include?(basename),
+             "each_file repeated #{stream}/#{basename}")
+      seen[stream] << basename
+      assert_equal(3, size, "wrong size for #{stream}/#{basename}")
+    end
+    seen.each_pair do |stream, basenames|
+      assert_equal(%w(file1 file2 file3), basenames.sort,
+                   "wrong file list for #{stream}")
+    end
+  end
+
+  def test_files_with_colons_in_names
+    manifest = Keep::Manifest.new(COLON_FILENAME_MANIFEST)
+    assert_equal([[".", "file:test.txt", 9]], manifest.files)
+  end
+
+  def test_files_with_escape_sequence_in_filename
+    manifest = Keep::Manifest.new(ESCAPED_FILENAME_MANIFEST)
+    assert_equal([[".", "a a.txt", 9]], manifest.files)
+  end
+
+  def test_files_spanning_multiple_blocks
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert_equal([[".", "repfile", 5],
+                  [".", "uniqfile", 4],
+                  [".", "uniqfile2", 7],
+                  ["./s1", "repfile", 3],
+                  ["./s1", "uniqfile", 3]],
+                 manifest.files.sort)
+  end
+
+  def test_minimum_file_count_simple
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    assert(manifest.minimum_file_count?(1), "real minimum file count false")
+    refute(manifest.minimum_file_count?(2), "fake minimum file count true")
+  end
+
+  def test_minimum_file_count_multiblock
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert(manifest.minimum_file_count?(2), "low minimum file count false")
+    assert(manifest.minimum_file_count?(5), "real minimum file count false")
+    refute(manifest.minimum_file_count?(6), "fake minimum file count true")
+  end
+
+  def test_exact_file_count_simple
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    assert(manifest.exact_file_count?(1), "exact file count false")
+    refute(manifest.exact_file_count?(0), "-1 file count true")
+    refute(manifest.exact_file_count?(2), "+1 file count true")
+  end
+
+  def test_exact_file_count_multiblock
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert(manifest.exact_file_count?(5), "exact file count false")
+    refute(manifest.exact_file_count?(4), "-1 file count true")
+    refute(manifest.exact_file_count?(6), "+1 file count true")
+  end
+
+  def test_files_size_multiblock
+    assert_equal(22, Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST).files_size)
+  end
+
+  def test_files_size_with_skipped_overlapping_data
+    manifest = Keep::Manifest.new(". #{random_block(9)} 3:3:f1 5:3:f2\n")
+    assert_equal(6, manifest.files_size)
+  end
+
+  def test_has_file
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert(manifest.has_file?("./repfile"), "one-arg repfile not found")
+    assert(manifest.has_file?(".", "repfile"), "two-arg repfile not found")
+    assert(manifest.has_file?("./s1/repfile"), "one-arg s1/repfile not found")
+    assert(manifest.has_file?("./s1", "repfile"), "two-arg s1/repfile not found")
+    refute(manifest.has_file?("./s1/uniqfile2"), "one-arg missing file found")
+    refute(manifest.has_file?("./s1", "uniqfile2"), "two-arg missing file found")
+    refute(manifest.has_file?("./s2/repfile"), "one-arg missing stream found")
+    refute(manifest.has_file?("./s2", "repfile"), "two-arg missing stream found")
+  end
+
+  def test_has_file_with_spaces
+    manifest = Keep::Manifest.new(ESCAPED_FILENAME_MANIFEST)
+    assert(manifest.has_file?("./a a.txt"), "one-arg path not found")
+    assert(manifest.has_file?(".", "a a.txt"), "two-arg path not found")
+    refute(manifest.has_file?("a\\040\\141"), "one-arg unescaped found")
+    refute(manifest.has_file?(".", "a\\040\\141"), "two-arg unescaped found")
+  end
+
+  def test_parse_all_fixtures
+    fixtures('collections').each do |name, collection|
+      parse_collection_manifest name, collection
+    end
+  end
+
+  def test_raise_on_bogus_fixture
+    assert_raises ArgumentError do
+      parse_collection_manifest('bogus collection',
+                                {'manifest_text' => ". zzz 0:\n"})
+    end
+  end
+
+  def parse_collection_manifest name, collection
+    manifest = Keep::Manifest.new(collection['manifest_text'])
+    manifest.each_file_spec do |stream_name, start_pos, file_size, file_name|
+      assert_kind_of String, stream_name
+      assert_kind_of Integer, start_pos
+      assert_kind_of Integer, file_size
+      assert_kind_of String, file_name
+      assert !stream_name.empty?, "empty stream_name in #{name} fixture"
+      assert !file_name.empty?, "empty file_name in #{name} fixture"
+    end
+  end
+
+  def test_collection_with_dirs_in_filenames
+    manifest = Keep::Manifest.new(MANIFEST_WITH_DIRS_IN_FILENAMES)
+
+    seen = Hash.new { |this, key| this[key] = [] }
+
+    manifest.files.each do |stream, basename, size|
+      refute(seen[stream].include?(basename), "each_file repeated #{stream}/#{basename}")
+      assert_equal(3, size, "wrong size for #{stream}/#{basename}")
+      seen[stream] << basename
+    end
+
+    assert_equal(%w(. ./dir1 ./dir1/dir2), seen.keys)
+
+    seen.each_pair do |stream, basenames|
+      assert_equal(%w(file1), basenames.sort, "wrong file list for #{stream}")
+    end
+  end
+
+  def test_multilevel_collection_with_dirs_in_filenames
+    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST_WITH_DIRS_IN_FILENAMES)
+
+    seen = Hash.new { |this, key| this[key] = [] }
+    expected_sizes = {'.' => 3, './dir1' => 6, './dir1/dir2' => 11}
+
+    manifest.files.each do |stream, basename, size|
+      refute(seen[stream].include?(basename), "each_file repeated #{stream}/#{basename}")
+      assert_equal(expected_sizes[stream], size, "wrong size for #{stream}/#{basename}")
+      seen[stream] << basename
+    end
+
+    assert_equal(%w(. ./dir1 ./dir1/dir2), seen.keys)
+
+    seen.each_pair do |stream, basenames|
+      assert_equal(%w(file1), basenames.sort, "wrong file list for #{stream}")
+    end
+  end
+
+  [[false, nil],
+   [false, '+0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427+0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e0+0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+0 '],
+   [false, "d41d8cd98f00b204e9800998ecf8427e+0\n"],
+   [false, ' d41d8cd98f00b204e9800998ecf8427e+0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+K+0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+0'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e++'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+K+'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+0++K'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+K++'],
+   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+K++Z'],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e', nil,nil,nil],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+0', '+0','0',nil],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+0+Fizz+Buzz','+0','0','+Fizz+Buzz'],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+Fizz+Buzz', nil,nil,'+Fizz+Buzz'],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo', '+0','0','+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo'],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo', nil,nil,'+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo'],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+0+Z', '+0','0','+Z'],
+   [true, 'd41d8cd98f00b204e9800998ecf8427e+Z', nil,nil,'+Z'],
+  ].each do |ok, locator, match2, match3, match4|
+    define_method "test_LOCATOR_REGEXP_on_#{locator.inspect}" do
+      match = Keep::Locator::LOCATOR_REGEXP.match locator
+      assert_equal ok, !!match
+      if ok
+        assert_equal match2, match[2]
+        assert_equal match3, match[3]
+        assert_equal match4, match[4]
+      end
+    end
+    define_method "test_parse_method_on_#{locator.inspect}" do
+      loc = Keep::Locator.parse locator
+      if !ok
+        assert_nil loc
+      else
+        refute_nil loc
+        assert loc.is_a?(Keep::Locator)
+        #assert loc.hash
+        #assert loc.size
+        #assert loc.hints.is_a?(Array)
+      end
+    end
+  end
+
+  [
+    [false, nil, "No manifest found"],
+    [true, ""],
+    [false, " ", "Invalid manifest: does not end with newline"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e a41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n"], # 2 locators
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:.foo.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:.foo\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:...\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:.../.foo./.../bar\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/...\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/.../bar\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/.bar/baz.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar./baz.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 000000000000000000000000000000:0777:foo.txt\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:0:0\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\040\n"],
+    [true, ". 00000000000000000000000000000000+0 0:0:0\n"],
+    [true, ". 00000000000000000000000000000000+0 0:0:d41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000@ffffffff\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000@ffffffff 0:0:empty.txt\n"],
+    [true, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n"],
+    [false, '. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt',
+      "Invalid manifest: does not end with newline"],
+    [false, "abc d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"abc\""],
+    [false, "abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"abc/./foo\""],
+    [false, "./abc/../foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"./abc/../foo\""],
+    [false, "./abc/. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"./abc/.\""],
+    [false, "./abc/.. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"./abc/..\""],
+    [false, "./abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"./abc/./foo\""],
+    # non-empty '.'-named file tokens aren't acceptable. Empty ones are used as empty dir placeholders.
+    [false, ". 8cf8463b34caa8ac871a52d5dd7ad1ef+1 0:1:.\n",
+      "invalid file token \"0:1:.\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:..\n",
+      "invalid file token \"0:0:..\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:./abc.txt\n",
+      "invalid file token \"0:0:./abc.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:../abc.txt\n",
+      "invalid file token \"0:0:../abc.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt/.\n",
+      "invalid file token \"0:0:abc.txt/.\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt/..\n",
+      "invalid file token \"0:0:abc.txt/..\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:a/./bc.txt\n",
+      "invalid file token \"0:0:a/./bc.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:a/../bc.txt\n",
+      "invalid file token \"0:0:a/../bc.txt\""],
+    [false, "./abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
+      "invalid stream name \"./abc/./foo\""],
+    [false, "d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n",
+      "invalid stream name \"d41d8cd98f00b204e9800998ecf8427e+0\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427 0:0:abc.txt\n",
+      "invalid locator \"d41d8cd98f00b204e9800998ecf8427\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e\n",
+      "Manifest invalid for stream 1: no file tokens"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n/dir1 d41d8cd98f00b204e9800998ecf842 0:0:abc.txt\n",
+      "Manifest invalid for stream 2: missing or invalid stream name \"/dir1\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n./dir1 d41d8cd98f00b204e9800998ecf842 0:0:abc.txt\n",
+      "Manifest invalid for stream 2: missing or invalid locator \"d41d8cd98f00b204e9800998ecf842\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n./dir1 a41d8cd98f00b204e9800998ecf8427e+0 abc.txt\n",
+      "Manifest invalid for stream 2: invalid file token \"abc.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n./dir1 a41d8cd98f00b204e9800998ecf8427e+0 0:abc.txt\n",
+      "Manifest invalid for stream 2: invalid file token \"0:abc.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\n./dir1 a41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt xyz.txt\n",
+      "Manifest invalid for stream 2: invalid file token \"xyz.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt d41d8cd98f00b204e9800998ecf8427e+0\n",
+      "Manifest invalid for stream 1: invalid file token \"d41d8cd98f00b204e9800998ecf8427e+0\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0\n",
+      "Manifest invalid for stream 1: no file tokens"],
+    [false, ". 0:0:foo.txt d41d8cd98f00b204e9800998ecf8427e+0\n",
+      "Manifest invalid for stream 1: missing or invalid locator \"0:0:foo.txt\""],
+    [false, ". 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid locator \"0:0:foo.txt\""],
+    [false, ".\n", "Manifest invalid for stream 1: missing or invalid locator"],
+    [false, ".", "Invalid manifest: does not end with newline"],
+    [false, ". \n", "Manifest invalid for stream 1: missing or invalid locator"],
+    [false, ".  \n", "Manifest invalid for stream 1: missing or invalid locator"],
+    [false, " . d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt \n",
+      "stream 1: trailing space"],
+   # TAB and other tricky whitespace characters:
+    [false, "\v. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"\\v."],
+    [false, "./foo\vbar d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\vbar"],
+    [false, "\t. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"\\t"],
+    [false, ".\td41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \".\\t"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\t\n",
+      "stream 1: invalid file token \"0:0:foo.txt\\t\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0\t 0:0:foo.txt\n",
+      "stream 1: missing or invalid locator \"d41d8cd98f00b204e9800998ecf8427e+0\\t\""],
+    [false, "./foo\tbar d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "stream 1: missing or invalid stream name \"./foo\\tbar\""],
+    # other whitespace errors:
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0  0:0:foo.txt\n",
+      "Manifest invalid for stream 1: invalid file token \"\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n \n",
+      "Manifest invalid for stream 2: missing stream name"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n\n",
+      "Manifest invalid for stream 2: missing stream name"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n ",
+      "Invalid manifest: does not end with newline"],
+    [false, "\n. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing stream name"],
+    [false, " \n. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing stream name"],
+    # empty file and stream name components:
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:/foo.txt\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:/foo.txt\""],
+    [false, "./ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./\""],
+    [false, ".//foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \".//foo\""],
+    [false, "./foo/ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./foo/\""],
+    [false, "./foo//bar d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./foo//bar\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo//bar.txt\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:foo//bar.txt\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:foo/\""],
+    # escaped chars
+    [true, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n"],
+    [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\\056\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:\\\\056\\\\056\""],
+    [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\\056\\057foo\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:\\\\056\\\\056\\\\057foo\""],
+    [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0\\0720\\072foo\n",
+      "Manifest invalid for stream 1: invalid file token \"0\\\\0720\\\\072foo\""],
+    [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 \\060:\\060:foo\n",
+      "Manifest invalid for stream 1: invalid file token \"\\\\060:\\\\060:foo\""],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\057bar\n"],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\072\n"],
+    [true, ".\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+    [true, "\\056\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+    [true, "./\\134444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+    [false, "./\\\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./\\\\\\\\444\""],
+    [true, "./\\011foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+    [false, "./\\011/.. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./\\\\011/..\""],
+    [false, ".\\056\\057 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \".\\\\056\\\\057\""],
+    [false, ".\\057\\056 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \".\\\\057\\\\056\""],
+    [false, ".\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\444\n",
+      "Manifest invalid for stream 1: >8-bit encoded chars not allowed on file token \"0:0:foo\\\\444\""],
+    [false, "./\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: >8-bit encoded chars not allowed on stream token \"./\\\\444\""],
+    [false, "./\tfoo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./\\tfoo\""],
+    [false, "./foo\\ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\\\\""],
+    [false, "./foo\\r d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\\\r\""],
+    [false, "./foo\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: >8-bit encoded chars not allowed on stream token \"./foo\\\\444\""],
+    [false, "./foo\\888 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\\\888\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\r\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\r\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\444\n",
+      "Manifest invalid for stream 1: >8-bit encoded chars not allowed on file token \"0:0:foo\\\\444\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\888\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\888\""],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\057/bar\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\057/bar\""],
+    [false, ".\\057/Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+      "Manifest invalid for stream 1: missing or invalid stream name \".\\\\057/Data\""],
+    [true, "./Data\\040Folder d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\057foo/bar\n",
+      "Manifest invalid for stream 1: invalid file token \"0:0:\\\\057foo/bar\""],
+    [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\134057foo/bar\n"],
+    [false, ". d41d8cd98f00b204e9800998ecf8427e+0 \\040:\\040:foo.txt\n",
+      "Manifest invalid for stream 1: invalid file token \"\\\\040:\\\\040:foo.txt\""],
+  ].each do |ok, manifest, expected_error=nil|
+    define_method "test_validate manifest #{manifest.inspect}" do
+      assert_equal ok, Keep::Manifest.valid?(manifest)
+      if ok
+        assert Keep::Manifest.validate! manifest
+      else
+        begin
+          Keep::Manifest.validate! manifest
+        rescue ArgumentError => e
+          msg = e.message
+        end
+        refute_nil msg, "Expected ArgumentError"
+        assert msg.include?(expected_error), "Did not find expected error message. Expected: #{expected_error}; Actual: #{msg}"
+      end
+    end
+  end
+end
diff --git a/services/api/.gitignore b/services/api/.gitignore
new file mode 100644 (file)
index 0000000..2cda8bc
--- /dev/null
@@ -0,0 +1,35 @@
+# Ignore the default SQLite database.
+/db/*.sqlite3
+
+# Ignore all logfiles and tempfiles.
+/log
+/tmp
+
+# Sensitive files and local configuration
+/config/database.yml
+/config/initializers/omniauth.rb
+/config/application.yml
+
+# asset cache
+/public/assets/
+
+/config/environments/development.rb
+/config/environments/production.rb
+/config/environments/test.rb
+
+# Capistrano files are coming from another repo
+/Capfile*
+/config/deploy*
+
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
+
+# Generated git-commit.version file
+/git-commit.version
+
+# Generated when building distribution packages
+/package-build.version
diff --git a/services/api/Gemfile b/services/api/Gemfile
new file mode 100644 (file)
index 0000000..25e441b
--- /dev/null
@@ -0,0 +1,79 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+source 'https://rubygems.org'
+
+gem 'rails', '~> 4.2'
+gem 'responders', '~> 2.0'
+gem 'protected_attributes'
+
+group :test, :development do
+  gem 'factory_bot_rails'
+  gem 'database_cleaner'
+  gem 'ruby-prof'
+  # Note: "require: false" here tells bunder not to automatically
+  # 'require' the packages during application startup. Installation is
+  # still mandatory.
+  gem 'test-unit', '~> 3.0', require: false
+  gem 'simplecov', '~> 0.7.1', require: false
+  gem 'simplecov-rcov', require: false
+  gem 'mocha', require: false
+end
+
+# We need this dependency because of crunchv1
+gem 'arvados-cli'
+
+# We'll need to update related code prior to Rails 5.
+# See: https://github.com/rails/activerecord-deprecated_finders
+gem 'activerecord-deprecated_finders', require: 'active_record/deprecated_finders'
+
+# pg is the only supported database driver.
+# Note: Rails 4.2 is not compatible with pg 1.0
+#       (See: https://github.com/rails/rails/pull/31671)
+gem 'pg', '~> 0.18'
+
+gem 'multi_json'
+gem 'oj'
+
+# for building assets
+gem 'sass-rails',   '~> 4.0'
+gem 'coffee-rails', '~> 4.0'
+gem 'therubyracer'
+gem 'uglifier', '~> 2.0'
+
+gem 'jquery-rails'
+
+gem 'rvm-capistrano', :group => :test
+
+gem 'acts_as_api'
+
+gem 'passenger'
+
+# Restricted because omniauth >= 1.5.0 requires Ruby >= 2.1.9:
+gem 'omniauth', '~> 1.4.0'
+gem 'omniauth-oauth2', '~> 1.1'
+
+gem 'andand'
+
+gem 'test_after_commit', :group => :test
+
+gem 'trollop'
+gem 'faye-websocket'
+
+gem 'themes_for_rails', git: 'https://github.com/curoverse/themes_for_rails'
+
+gem 'arvados', '>= 1.3.1.20190301212059'
+gem 'httpclient'
+
+gem 'sshkey'
+gem 'safe_yaml'
+gem 'lograge'
+gem 'logstash-event'
+
+gem 'rails-observers'
+
+# Install any plugin gems
+Dir.glob(File.join(File.dirname(__FILE__), 'lib', '**', "Gemfile")) do |f|
+    eval(IO.read(f), binding)
+end
diff --git a/services/api/Gemfile.lock b/services/api/Gemfile.lock
new file mode 100644 (file)
index 0000000..6e2fa2c
--- /dev/null
@@ -0,0 +1,325 @@
+GIT
+  remote: https://github.com/curoverse/themes_for_rails
+  revision: 61154877047d2346890bda0b7be5827cf51a6a76
+  specs:
+    themes_for_rails (0.5.1)
+      rails (>= 3.0.0)
+
+GEM
+  remote: https://rubygems.org/
+  specs:
+    actionmailer (4.2.11)
+      actionpack (= 4.2.11)
+      actionview (= 4.2.11)
+      activejob (= 4.2.11)
+      mail (~> 2.5, >= 2.5.4)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+    actionpack (4.2.11)
+      actionview (= 4.2.11)
+      activesupport (= 4.2.11)
+      rack (~> 1.6)
+      rack-test (~> 0.6.2)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-html-sanitizer (~> 1.0, >= 1.0.2)
+    actionview (4.2.11)
+      activesupport (= 4.2.11)
+      builder (~> 3.1)
+      erubis (~> 2.7.0)
+      rails-dom-testing (~> 1.0, >= 1.0.5)
+      rails-html-sanitizer (~> 1.0, >= 1.0.3)
+    activejob (4.2.11)
+      activesupport (= 4.2.11)
+      globalid (>= 0.3.0)
+    activemodel (4.2.11)
+      activesupport (= 4.2.11)
+      builder (~> 3.1)
+    activerecord (4.2.11)
+      activemodel (= 4.2.11)
+      activesupport (= 4.2.11)
+      arel (~> 6.0)
+    activerecord-deprecated_finders (1.0.4)
+    activesupport (4.2.11)
+      i18n (~> 0.7)
+      minitest (~> 5.1)
+      thread_safe (~> 0.3, >= 0.3.4)
+      tzinfo (~> 1.1)
+    acts_as_api (1.0.1)
+      activemodel (>= 3.0.0)
+      activesupport (>= 3.0.0)
+      rack (>= 1.1.0)
+    addressable (2.6.0)
+      public_suffix (>= 2.0.2, < 4.0)
+    andand (1.3.3)
+    arel (6.0.4)
+    arvados (1.3.1.20190301212059)
+      activesupport (>= 3)
+      andand (~> 1.3, >= 1.3.3)
+      cure-google-api-client (>= 0.7, < 0.8.9)
+      i18n (~> 0)
+      json (>= 1.7.7, < 3)
+      jwt (>= 0.1.5, < 2)
+    arvados-cli (1.3.1.20190211211047)
+      activesupport (>= 3.2.13, < 5)
+      andand (~> 1.3, >= 1.3.3)
+      arvados (~> 1.3.0, >= 1.3.0)
+      curb (~> 0.8)
+      cure-google-api-client (~> 0.6, >= 0.6.3, < 0.8.9)
+      json (>= 1.7.7, < 3)
+      oj (~> 3.0)
+      optimist (~> 3.0)
+    autoparse (0.3.3)
+      addressable (>= 2.3.1)
+      extlib (>= 0.9.15)
+      multi_json (>= 1.0.0)
+    builder (3.2.3)
+    capistrano (2.15.9)
+      highline
+      net-scp (>= 1.0.0)
+      net-sftp (>= 2.0.0)
+      net-ssh (>= 2.0.14)
+      net-ssh-gateway (>= 1.1.0)
+    coffee-rails (4.2.2)
+      coffee-script (>= 2.2.0)
+      railties (>= 4.0.0)
+    coffee-script (2.4.1)
+      coffee-script-source
+      execjs
+    coffee-script-source (1.12.2)
+    concurrent-ruby (1.1.4)
+    crass (1.0.4)
+    curb (0.9.8)
+    cure-google-api-client (0.8.7.1)
+      activesupport (>= 3.2, < 5.0)
+      addressable (~> 2.3)
+      autoparse (~> 0.3)
+      extlib (~> 0.9)
+      faraday (~> 0.9)
+      googleauth (~> 0.3)
+      launchy (~> 2.4)
+      multi_json (~> 1.10)
+      retriable (~> 1.4)
+      signet (~> 0.6)
+    database_cleaner (1.7.0)
+    erubis (2.7.0)
+    eventmachine (1.2.6)
+    execjs (2.7.0)
+    extlib (0.9.16)
+    factory_bot (4.11.1)
+      activesupport (>= 3.0.0)
+    factory_bot_rails (4.11.1)
+      factory_bot (~> 4.11.1)
+      railties (>= 3.0.0)
+    faraday (0.12.2)
+      multipart-post (>= 1.2, < 3)
+    faye-websocket (0.10.7)
+      eventmachine (>= 0.12.0)
+      websocket-driver (>= 0.5.1)
+    globalid (0.4.1)
+      activesupport (>= 4.2.0)
+    googleauth (0.8.0)
+      faraday (~> 0.12)
+      jwt (>= 1.4, < 3.0)
+      memoist (~> 0.16)
+      multi_json (~> 1.11)
+      os (>= 0.9, < 2.0)
+      signet (~> 0.7)
+    hashie (3.5.7)
+    highline (1.7.10)
+    hike (1.2.3)
+    httpclient (2.8.3)
+    i18n (0.9.5)
+      concurrent-ruby (~> 1.0)
+    jquery-rails (4.3.3)
+      rails-dom-testing (>= 1, < 3)
+      railties (>= 4.2.0)
+      thor (>= 0.14, < 2.0)
+    json (2.2.0)
+    jwt (1.5.6)
+    launchy (2.4.3)
+      addressable (~> 2.3)
+    libv8 (3.16.14.19)
+    lograge (0.10.0)
+      actionpack (>= 4)
+      activesupport (>= 4)
+      railties (>= 4)
+      request_store (~> 1.0)
+    logstash-event (1.2.02)
+    loofah (2.2.3)
+      crass (~> 1.0.2)
+      nokogiri (>= 1.5.9)
+    mail (2.7.1)
+      mini_mime (>= 0.1.1)
+    memoist (0.16.0)
+    metaclass (0.0.4)
+    mini_mime (1.0.1)
+    mini_portile2 (2.4.0)
+    minitest (5.11.3)
+    mocha (1.5.0)
+      metaclass (~> 0.0.1)
+    multi_json (1.13.1)
+    multi_xml (0.6.0)
+    multipart-post (2.0.0)
+    net-scp (1.2.1)
+      net-ssh (>= 2.6.5)
+    net-sftp (2.1.2)
+      net-ssh (>= 2.6.5)
+    net-ssh (4.2.0)
+    net-ssh-gateway (2.0.0)
+      net-ssh (>= 4.0.0)
+    nokogiri (1.9.1)
+      mini_portile2 (~> 2.4.0)
+    oauth2 (1.4.0)
+      faraday (>= 0.8, < 0.13)
+      jwt (~> 1.0)
+      multi_json (~> 1.3)
+      multi_xml (~> 0.5)
+      rack (>= 1.2, < 3)
+    oj (3.7.9)
+    omniauth (1.4.3)
+      hashie (>= 1.2, < 4)
+      rack (>= 1.6.2, < 3)
+    omniauth-oauth2 (1.5.0)
+      oauth2 (~> 1.1)
+      omniauth (~> 1.2)
+    optimist (3.0.0)
+    os (1.0.0)
+    passenger (5.3.0)
+      rack
+      rake (>= 0.8.1)
+    pg (0.21.0)
+    power_assert (1.1.1)
+    protected_attributes (1.1.4)
+      activemodel (>= 4.0.1, < 5.0)
+    public_suffix (3.0.3)
+    rack (1.6.11)
+    rack-test (0.6.3)
+      rack (>= 1.0)
+    rails (4.2.11)
+      actionmailer (= 4.2.11)
+      actionpack (= 4.2.11)
+      actionview (= 4.2.11)
+      activejob (= 4.2.11)
+      activemodel (= 4.2.11)
+      activerecord (= 4.2.11)
+      activesupport (= 4.2.11)
+      bundler (>= 1.3.0, < 2.0)
+      railties (= 4.2.11)
+      sprockets-rails
+    rails-deprecated_sanitizer (1.0.3)
+      activesupport (>= 4.2.0.alpha)
+    rails-dom-testing (1.0.9)
+      activesupport (>= 4.2.0, < 5.0)
+      nokogiri (~> 1.6)
+      rails-deprecated_sanitizer (>= 1.0.1)
+    rails-html-sanitizer (1.0.4)
+      loofah (~> 2.2, >= 2.2.2)
+    rails-observers (0.1.5)
+      activemodel (>= 4.0)
+    railties (4.2.11)
+      actionpack (= 4.2.11)
+      activesupport (= 4.2.11)
+      rake (>= 0.8.7)
+      thor (>= 0.18.1, < 2.0)
+    rake (12.3.2)
+    ref (2.0.0)
+    request_store (1.4.1)
+      rack (>= 1.4)
+    responders (2.4.0)
+      actionpack (>= 4.2.0, < 5.3)
+      railties (>= 4.2.0, < 5.3)
+    retriable (1.4.1)
+    ruby-prof (0.17.0)
+    rvm-capistrano (1.5.6)
+      capistrano (~> 2.15.4)
+    safe_yaml (1.0.4)
+    sass (3.2.19)
+    sass-rails (4.0.5)
+      railties (>= 4.0.0, < 5.0)
+      sass (~> 3.2.2)
+      sprockets (~> 2.8, < 3.0)
+      sprockets-rails (~> 2.0)
+    signet (0.11.0)
+      addressable (~> 2.3)
+      faraday (~> 0.9)
+      jwt (>= 1.5, < 3.0)
+      multi_json (~> 1.10)
+    simplecov (0.7.1)
+      multi_json (~> 1.0)
+      simplecov-html (~> 0.7.1)
+    simplecov-html (0.7.1)
+    simplecov-rcov (0.2.3)
+      simplecov (>= 0.4.1)
+    sprockets (2.12.5)
+      hike (~> 1.2)
+      multi_json (~> 1.0)
+      rack (~> 1.0)
+      tilt (~> 1.1, != 1.3.0)
+    sprockets-rails (2.3.3)
+      actionpack (>= 3.0)
+      activesupport (>= 3.0)
+      sprockets (>= 2.8, < 4.0)
+    sshkey (1.9.0)
+    test-unit (3.2.7)
+      power_assert
+    test_after_commit (1.1.0)
+      activerecord (>= 3.2)
+    therubyracer (0.12.3)
+      libv8 (~> 3.16.14.15)
+      ref
+    thor (0.20.3)
+    thread_safe (0.3.6)
+    tilt (1.4.1)
+    trollop (2.9.9)
+    tzinfo (1.2.5)
+      thread_safe (~> 0.1)
+    uglifier (2.7.2)
+      execjs (>= 0.3.0)
+      json (>= 1.8.0)
+    websocket-driver (0.7.0)
+      websocket-extensions (>= 0.1.0)
+    websocket-extensions (0.1.3)
+
+PLATFORMS
+  ruby
+
+DEPENDENCIES
+  activerecord-deprecated_finders
+  acts_as_api
+  andand
+  arvados (>= 1.3.1.20190301212059)
+  arvados-cli
+  coffee-rails (~> 4.0)
+  database_cleaner
+  factory_bot_rails
+  faye-websocket
+  httpclient
+  jquery-rails
+  lograge
+  logstash-event
+  mocha
+  multi_json
+  oj
+  omniauth (~> 1.4.0)
+  omniauth-oauth2 (~> 1.1)
+  passenger
+  pg (~> 0.18)
+  protected_attributes
+  rails (~> 4.2)
+  rails-observers
+  responders (~> 2.0)
+  ruby-prof
+  rvm-capistrano
+  safe_yaml
+  sass-rails (~> 4.0)
+  simplecov (~> 0.7.1)
+  simplecov-rcov
+  sshkey
+  test-unit (~> 3.0)
+  test_after_commit
+  themes_for_rails!
+  therubyracer
+  trollop
+  uglifier (~> 2.0)
+
+BUNDLED WITH
+   1.17.2
diff --git a/services/api/README b/services/api/README
new file mode 100644 (file)
index 0000000..7c36f23
--- /dev/null
@@ -0,0 +1,261 @@
+== Welcome to Rails
+
+Rails is a web-application framework that includes everything needed to create
+database-backed web applications according to the Model-View-Control pattern.
+
+This pattern splits the view (also called the presentation) into "dumb"
+templates that are primarily responsible for inserting pre-built data in between
+HTML tags. The model contains the "smart" domain objects (such as Account,
+Product, Person, Post) that holds all the business logic and knows how to
+persist themselves to a database. The controller handles the incoming requests
+(such as Save New Account, Update Product, Show Post) by manipulating the model
+and directing data to the view.
+
+In Rails, the model is handled by what's called an object-relational mapping
+layer entitled Active Record. This layer allows you to present the data from
+database rows as objects and embellish these data objects with business logic
+methods. You can read more about Active Record in
+link:files/vendor/rails/activerecord/README.html.
+
+The controller and view are handled by the Action Pack, which handles both
+layers by its two parts: Action View and Action Controller. These two layers
+are bundled in a single package due to their heavy interdependence. This is
+unlike the relationship between the Active Record and Action Pack that is much
+more separate. Each of these packages can be used independently outside of
+Rails. You can read more about Action Pack in
+link:files/vendor/rails/actionpack/README.html.
+
+
+== Getting Started
+
+1. At the command prompt, create a new Rails application:
+       <tt>rails new myapp</tt> (where <tt>myapp</tt> is the application name)
+
+2. Change directory to <tt>myapp</tt> and start the web server:
+       <tt>cd myapp; rails server</tt> (run with --help for options)
+
+3. Go to http://localhost:3000/ and you'll see:
+       "Welcome aboard: You're riding Ruby on Rails!"
+
+4. Follow the guidelines to start developing your application. You can find
+the following resources handy:
+
+* The Getting Started Guide: http://guides.rubyonrails.org/getting_started.html
+* Ruby on Rails Tutorial Book: http://www.railstutorial.org/
+
+
+== Debugging Rails
+
+Sometimes your application goes wrong. Fortunately there are a lot of tools that
+will help you debug it and get it back on the rails.
+
+First area to check is the application log files. Have "tail -f" commands
+running on the server.log and development.log. Rails will automatically display
+debugging and runtime information to these files. Debugging info will also be
+shown in the browser on requests from 127.0.0.1.
+
+You can also log your own messages directly into the log file from your code
+using the Ruby logger class from inside your controllers. Example:
+
+  class WeblogController < ActionController::Base
+    def destroy
+      @weblog = Weblog.find(params[:id])
+      @weblog.destroy
+      logger.info("#{Time.now} Destroyed Weblog ID ##{@weblog.id}!")
+    end
+  end
+
+The result will be a message in your log file along the lines of:
+
+  Mon Oct 08 14:22:29 +1000 2007 Destroyed Weblog ID #1!
+
+More information on how to use the logger is at http://www.ruby-doc.org/core/
+
+Also, Ruby documentation can be found at http://www.ruby-lang.org/. There are
+several books available online as well:
+
+* Programming Ruby: http://www.ruby-doc.org/docs/ProgrammingRuby/ (Pickaxe)
+* Learn to Program: http://pine.fm/LearnToProgram/ (a beginners guide)
+
+These two books will bring you up to speed on the Ruby language and also on
+programming in general.
+
+
+== Debugger
+
+Debugger support is available through the debugger command when you start your
+Mongrel or WEBrick server with --debugger. This means that you can break out of
+execution at any point in the code, investigate and change the model, and then,
+resume execution! You need to install ruby-debug to run the server in debugging
+mode. With gems, use <tt>sudo gem install ruby-debug</tt>. Example:
+
+  class WeblogController < ActionController::Base
+    def index
+      @posts = Post.all
+      debugger
+    end
+  end
+
+So the controller will accept the action, run the first line, then present you
+with a IRB prompt in the server window. Here you can do things like:
+
+  >> @posts.inspect
+  => "[#<Post:0x14a6be8
+          @attributes={"title"=>nil, "body"=>nil, "id"=>"1"}>,
+       #<Post:0x14a6620
+          @attributes={"title"=>"Rails", "body"=>"Only ten..", "id"=>"2"}>]"
+  >> @posts.first.title = "hello from a debugger"
+  => "hello from a debugger"
+
+...and even better, you can examine how your runtime objects actually work:
+
+  >> f = @posts.first
+  => #<Post:0x13630c4 @attributes={"title"=>nil, "body"=>nil, "id"=>"1"}>
+  >> f.
+  Display all 152 possibilities? (y or n)
+
+Finally, when you're ready to resume execution, you can enter "cont".
+
+
+== Console
+
+The console is a Ruby shell, which allows you to interact with your
+application's domain model. Here you'll have all parts of the application
+configured, just like it is when the application is running. You can inspect
+domain models, change values, and save to the database. Starting the script
+without arguments will launch it in the development environment.
+
+To start the console, run <tt>rails console</tt> from the application
+directory.
+
+Options:
+
+* Passing the <tt>-s, --sandbox</tt> argument will rollback any modifications
+  made to the database.
+* Passing an environment name as an argument will load the corresponding
+  environment. Example: <tt>rails console production</tt>.
+
+To reload your controllers and models after launching the console run
+<tt>reload!</tt>
+
+More information about irb can be found at:
+link:http://www.rubycentral.org/pickaxe/irb.html
+
+
+== dbconsole
+
+You can go to the command line of your database directly through <tt>rails
+dbconsole</tt>. You would be connected to the database with the credentials
+defined in database.yml. Starting the script without arguments will connect you
+to the development database. Passing an argument will connect you to a different
+database, like <tt>rails dbconsole production</tt>. Currently works for MySQL,
+PostgreSQL and SQLite 3.
+
+== Description of Contents
+
+The default directory structure of a generated Ruby on Rails application:
+
+  |-- app
+  |   |-- assets
+  |       |-- images
+  |       |-- javascripts
+  |       `-- stylesheets
+  |   |-- controllers
+  |   |-- helpers
+  |   |-- mailers
+  |   |-- models
+  |   `-- views
+  |       `-- layouts
+  |-- config
+  |   |-- environments
+  |   |-- initializers
+  |   `-- locales
+  |-- db
+  |-- doc
+  |-- lib
+  |   `-- tasks
+  |-- log
+  |-- public
+  |-- script
+  |-- test
+  |   |-- fixtures
+  |   |-- functional
+  |   |-- integration
+  |   |-- performance
+  |   `-- unit
+  |-- tmp
+  |   |-- cache
+  |   |-- pids
+  |   |-- sessions
+  |   `-- sockets
+  `-- vendor
+      |-- assets
+          `-- stylesheets
+      `-- plugins
+
+app
+  Holds all the code that's specific to this particular application.
+
+app/assets
+  Contains subdirectories for images, stylesheets, and JavaScript files.
+
+app/controllers
+  Holds controllers that should be named like weblogs_controller.rb for
+  automated URL mapping. All controllers should descend from
+  ApplicationController which itself descends from ActionController::Base.
+
+app/models
+  Holds models that should be named like post.rb. Models descend from
+  ActiveRecord::Base by default.
+
+app/views
+  Holds the template files for the view that should be named like
+  weblogs/index.html.erb for the WeblogsController#index action. All views use
+  eRuby syntax by default.
+
+app/views/layouts
+  Holds the template files for layouts to be used with views. This models the
+  common header/footer method of wrapping views. In your views, define a layout
+  using the <tt>layout :default</tt> and create a file named default.html.erb.
+  Inside default.html.erb, call <% yield %> to render the view using this
+  layout.
+
+app/helpers
+  Holds view helpers that should be named like weblogs_helper.rb. These are
+  generated for you automatically when using generators for controllers.
+  Helpers can be used to wrap functionality for your views into methods.
+
+config
+  Configuration files for the Rails environment, the routing map, the database,
+  and other dependencies.
+
+db
+  Contains the database schema in schema.rb. db/migrate contains all the
+  sequence of Migrations for your schema.
+
+doc
+  This directory is where your application documentation will be stored when
+  generated using <tt>rake doc:app</tt>
+
+lib
+  Application specific libraries. Basically, any kind of custom code that
+  doesn't belong under controllers, models, or helpers. This directory is in
+  the load path.
+
+public
+  The directory available for the web server. Also contains the dispatchers and the
+  default HTML files. This should be set as the DOCUMENT_ROOT of your web
+  server.
+
+script
+  Helper scripts for automation and generation.
+
+test
+  Unit and functional tests along with fixtures. When using the rails generate
+  command, template test files will be generated for you and placed in this
+  directory.
+
+vendor
+  External libraries that the application depends on. Also includes the plugins
+  subdirectory. If the app has frozen rails, those gems also go here, under
+  vendor/rails/. This directory is in the load path.
diff --git a/services/api/Rakefile b/services/api/Rakefile
new file mode 100644 (file)
index 0000000..3de2d27
--- /dev/null
@@ -0,0 +1,89 @@
+#!/usr/bin/env rake
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Add your own tasks in files placed in lib/tasks ending in .rake,
+# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.
+
+require File.expand_path('../config/application', __FILE__)
+
+Server::Application.load_tasks
+
+namespace :test do
+  task(:run).clear
+  # Copied from the definition in Rails 3.2.
+  # This may need to be updated if we upgrade Rails.
+  task :run do
+    errors = %w(test:units test:functionals test:integration test:tasks).collect do |task|
+      begin
+        Rake::Task[task].invoke
+        nil
+      rescue => e
+        { :task => task, :exception => e }
+      end
+    end.compact
+
+    if errors.any?
+      puts errors.map { |e| "Errors running #{e[:task]}! #{e[:exception].inspect}" }.join("\n")
+      abort
+    end
+  end
+end
+
+namespace :db do
+  namespace :structure do
+    task :dump do
+      require 'tempfile'
+      origfnm = File.expand_path('../db/structure.sql', __FILE__)
+      tmpfnm = Tempfile.new 'structure.sql', File.expand_path('..', origfnm)
+      copyright_done = false
+      started = false
+      begin
+        tmpfile = File.new tmpfnm, 'w'
+        origfile = File.new origfnm
+        origfile.each_line do |line|
+          if !copyright_done
+            if !/Copyright .* Arvados/.match(line)
+               tmpfile.write "-- Copyright (C) The Arvados Authors. All rights reserved.\n--\n-- SPDX-License-Identifier: AGPL-3.0\n\n"
+            end
+            copyright_done = true
+          end
+
+          if !started && /^[^-\n]/ !~ line
+            # Ignore the "PostgreSQL database dump" comment block,
+            # which varies from one client version to the next.
+            next
+          end
+          started = true
+
+          if /^SET (lock_timeout|idle_in_transaction_session_timeout|row_security) = / =~ line
+            # Avoid edit wars between versions that do/don't write (and can/can't execute) this line.
+            next
+          elsif /^COMMENT ON EXTENSION/ =~ line
+            # Avoid warning message when loading:
+            # "structure.sql:22: ERROR:  must be owner of extension plpgsql"
+            tmpfile.write "-- "
+          end
+          tmpfile.write line
+        end
+        origfile.close
+        tmpfile.close
+        File.rename tmpfnm, origfnm
+        tmpfnm = false
+      ensure
+        File.unlink tmpfnm if tmpfnm
+      end
+    end
+  end
+end
+
+# Work around Rails3+PostgreSQL9.5 incompatibility (pg_dump used to
+# accept -i as a no-op, but now it's not accepted at all).
+module Kernel
+  alias_method :orig_backtick, :`
+  def `(*args) #`#` sorry, parsers
+    args[0].sub!(/\Apg_dump -i /, 'pg_dump ') rescue nil
+    orig_backtick(*args)
+  end
+end
diff --git a/services/api/app/assets/images/logo.png b/services/api/app/assets/images/logo.png
new file mode 100644 (file)
index 0000000..4db96ef
Binary files /dev/null and b/services/api/app/assets/images/logo.png differ
diff --git a/services/api/app/assets/images/rails.png b/services/api/app/assets/images/rails.png
new file mode 100644 (file)
index 0000000..d5edc04
Binary files /dev/null and b/services/api/app/assets/images/rails.png differ
diff --git a/services/api/app/assets/stylesheets/api_client_authorizations.css.scss b/services/api/app/assets/stylesheets/api_client_authorizations.css.scss
new file mode 100644 (file)
index 0000000..ec87eb2
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the ApiClientAuthorizations controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/api_clients.css.scss b/services/api/app/assets/stylesheets/api_clients.css.scss
new file mode 100644 (file)
index 0000000..61d7e53
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the ApiClients controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/application.css b/services/api/app/assets/stylesheets/application.css
new file mode 100644 (file)
index 0000000..742a575
--- /dev/null
@@ -0,0 +1,183 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 */
+
+/*
+ * This is a manifest file that'll automatically include all the stylesheets available in this directory
+ * and any sub-directories. You're free to add application-wide styles to this file and they'll appear at
+ * the top of the compiled file, but it's generally better to create a new file per style scope.
+ *= require_self
+ *= require_tree . 
+*/
+
+.contain-align-left {
+    text-align: left;
+}
+
+body {
+    margin: 0;
+}
+body > div {
+    margin: 2px;
+}
+div#footer {
+    font-family: Verdana,Arial,sans-serif;
+    font-size: 12px;
+    margin-top: 24px;
+    border-top: 1px solid #ccc;
+}
+div#footer, div#footer a {
+    color: #777;
+}
+div#header {
+    margin: 0;
+    padding: .5em 1em;
+    background: #000;
+    font-weight: bold;
+    font-size: 18px;
+    font-family: Verdana,Arial,sans-serif;
+    vertical-align: middle;
+    color: #ddd;
+}
+div#header > div {
+    display: inline-block;
+    font-size: 12px;
+    line-height: 18px;
+}
+div#header > .apptitle {
+    font-size: 18px;
+}
+div#header a.logout {
+    color: #fff;
+    font-weight: normal;
+}
+div#header button {
+    font-size: 12px;
+}
+div#header span.beta {
+    opacity: 0.5;
+}
+div#header span.beta > span {
+    border-top: 1px solid #fff;
+    border-bottom: 1px solid #fff;
+    font-size: 0.8em;
+}
+img.curoverse-logo {
+    width: 221px;
+    height: 44px;
+}
+#intropage {
+    font-family: Verdana,Arial,sans-serif;
+}
+#errorpage {
+    font-family: Verdana,Arial,sans-serif;
+}
+
+div.full-page-tab-set > ul > li {
+    font-size: 14px;
+}
+.titlebanner p {
+    font-size: 16px;
+}
+p {
+    font-size: 12px;
+}
+.small-text {
+    font-size: 12px;
+}
+.autoui-icon-float-left {
+    float: left;
+    margin-right: .3em;
+}
+.autoui-pad {
+    padding: 0 1em;
+}
+table.datatablesme {
+    border: 0;
+    border-collapse: collapse;
+    width: 100%;
+}
+.loadinggif {
+    background: #fff url(/images/ajax-loader-16-fff-aad.gif) no-repeat;
+}
+.clientprogressgif {
+    /* warning: depends on 24px outer container. */
+    position: absolute;
+    left: 4px;
+    top: 4px;
+    width: 16px;
+    height: 16px;
+}
+.counttable {
+    width: 100%;
+    display: table;
+    border-collapse: collapse;
+    margin-bottom: 0.5em;
+}
+.counttable > div {
+    display: table-row;
+}
+.counttable > div > div {
+    display: table-cell;
+    text-align: center;
+    background: #ccf;
+    padding: 0 2px;
+    font-size: 0.8em;
+}
+.counttable > div > div.counter {
+    font-size: 2em;
+    padding: 4px 2px 0 2px;
+}
+table.admin_table {
+    border-collapse: collapse;
+}
+table.admin_table tbody tr {
+    height: 2.5em;
+}
+table.admin_table th,table.admin_table td {
+    text-align: left;
+    border: 1px solid #bbb;
+    padding: 3px;
+}
+table.admin_table tbody tr:hover {
+    background: #ff8;
+}
+table.admin_table tbody tr:hover td {
+    background: transparent;
+}
+
+div.helptopics {
+    position: fixed;
+}
+div.helptopics ul {
+    padding: 0;
+    margin-left: 1em;
+    list-style-type: none;
+}
+div.helptopics ul li {
+    margin: 0 0 1em 0;
+}
+div.helpcontent li {
+    margin-bottom: .5em;
+}
+
+div.preview {
+    color: red;
+    font-weight: bold;
+    text-align: center;
+}
+
+.sudo-warning {
+    padding: 4px 10px;
+    background: #ffdd00;
+    color: red;
+    -webkit-border-radius: 3px;
+    -moz-border-radius: 3px;
+    border-radius: 3px
+}
+
+div#header a.sudo-logout {
+    color: #000;
+    font-weight: bold;
+}
+
diff --git a/services/api/app/assets/stylesheets/authorized_keys.css.scss b/services/api/app/assets/stylesheets/authorized_keys.css.scss
new file mode 100644 (file)
index 0000000..9eeaa89
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the AuthorizedKeys controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/collections.css.scss b/services/api/app/assets/stylesheets/collections.css.scss
new file mode 100644 (file)
index 0000000..7510f17
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Collections controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/commit_ancestors.css.scss b/services/api/app/assets/stylesheets/commit_ancestors.css.scss
new file mode 100644 (file)
index 0000000..5004f86
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the commit_ancestors controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/commits.css.scss b/services/api/app/assets/stylesheets/commits.css.scss
new file mode 100644 (file)
index 0000000..6b4df4d
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the commits controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/groups.css.scss b/services/api/app/assets/stylesheets/groups.css.scss
new file mode 100644 (file)
index 0000000..905e72a
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Groups controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/humans.css.scss b/services/api/app/assets/stylesheets/humans.css.scss
new file mode 100644 (file)
index 0000000..29668c2
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Humans controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/job_tasks.css.scss b/services/api/app/assets/stylesheets/job_tasks.css.scss
new file mode 100644 (file)
index 0000000..0d4d260
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the JobTasks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/jobs.css.scss b/services/api/app/assets/stylesheets/jobs.css.scss
new file mode 100644 (file)
index 0000000..53b6ca7
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Jobs controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/keep_disks.css.scss b/services/api/app/assets/stylesheets/keep_disks.css.scss
new file mode 100644 (file)
index 0000000..1996f11
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the KeepDisks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/links.css.scss b/services/api/app/assets/stylesheets/links.css.scss
new file mode 100644 (file)
index 0000000..c2e90ad
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the links controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/logs.css.scss b/services/api/app/assets/stylesheets/logs.css.scss
new file mode 100644 (file)
index 0000000..c8b22f9
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Logs controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/nodes.css b/services/api/app/assets/stylesheets/nodes.css
new file mode 100644 (file)
index 0000000..d1ce011
--- /dev/null
@@ -0,0 +1,41 @@
+/* Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 */
+
+/*
+  Place all the styles related to the matching controller here.
+  They will automatically be included in application.css.
+*/
+.node-status {
+    /* unknown status - might be bad */
+    background: #ff8888;
+}
+.node-status-running .node-status {
+    background: #88ff88;
+}
+.node-status-missing .node-status {
+    background: #ff8888;
+}
+.node-status-terminated .node-status {
+    background: #ffffff;
+}
+
+.node-slurm-state {
+    /* unknown status - might be bad */
+    background: #ff8888;
+}
+.node-status-missing .node-slurm-state {
+    background: #ffffff;
+}
+.node-status-terminated .node-slurm-state {
+    background: #ffffff;
+}
+.node-status-running .node-slurm-state-alloc {
+    background: #88ff88;
+}
+.node-status-running .node-slurm-state-idle {
+    background: #ffbbbb;
+}
+.node-status-running .node-slurm-state-down {
+    background: #ff8888;
+}
diff --git a/services/api/app/assets/stylesheets/nodes.css.scss b/services/api/app/assets/stylesheets/nodes.css.scss
new file mode 100644 (file)
index 0000000..a7b0861
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Nodes controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/pipeline_instances.css.scss b/services/api/app/assets/stylesheets/pipeline_instances.css.scss
new file mode 100644 (file)
index 0000000..7292a9a
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the PipelineInstances controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/pipeline_templates.css.scss b/services/api/app/assets/stylesheets/pipeline_templates.css.scss
new file mode 100644 (file)
index 0000000..40c0cef
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the PipelineTemplates controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/repositories.css.scss b/services/api/app/assets/stylesheets/repositories.css.scss
new file mode 100644 (file)
index 0000000..1dd9a16
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Repositories controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/scaffolds.css.scss b/services/api/app/assets/stylesheets/scaffolds.css.scss
new file mode 100644 (file)
index 0000000..a7a606e
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+body {
+  background-color: #fff;
+  color: #333;
+  font-family: verdana, arial, helvetica, sans-serif;
+  font-size: 13px;
+  line-height: 18px;
+}
+
+p, ol, ul, td {
+  font-family: verdana, arial, helvetica, sans-serif;
+  font-size: 13px;
+  line-height: 18px;
+}
+
+pre {
+  background-color: #eee;
+  padding: 10px;
+  font-size: 11px;
+}
+
+a {
+  color: #000;
+  &:visited {
+    color: #666;
+  }
+  &:hover {
+    color: #fff;
+    background-color: #000;
+  }
+}
+
+div {
+  &.field, &.actions {
+    margin-bottom: 10px;
+  }
+}
+
+#notice {
+  color: green;
+}
+
+.field_with_errors {
+  padding: 2px;
+  background-color: red;
+  display: table;
+}
+
+#error_explanation {
+  width: 450px;
+  border: 2px solid red;
+  padding: 7px;
+  padding-bottom: 0;
+  margin-bottom: 20px;
+  background-color: #f0f0f0;
+  h2 {
+    text-align: left;
+    font-weight: bold;
+    padding: 5px 5px 5px 15px;
+    font-size: 12px;
+    margin: -7px;
+    margin-bottom: 0px;
+    background-color: #c00;
+    color: #fff;
+  }
+  ul li {
+    font-size: 12px;
+    list-style: square;
+  }
+}
diff --git a/services/api/app/assets/stylesheets/specimens.css.scss b/services/api/app/assets/stylesheets/specimens.css.scss
new file mode 100644 (file)
index 0000000..60d630c
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Specimens controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/traits.css.scss b/services/api/app/assets/stylesheets/traits.css.scss
new file mode 100644 (file)
index 0000000..7d2f713
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the Traits controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/virtual_machines.css.scss b/services/api/app/assets/stylesheets/virtual_machines.css.scss
new file mode 100644 (file)
index 0000000..4a94d45
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Place all the styles related to the VirtualMachines controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
new file mode 100644 (file)
index 0000000..6dbba1a
--- /dev/null
@@ -0,0 +1,631 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'safe_json'
+require 'request_error'
+
+module ApiTemplateOverride
+  def allowed_to_render?(fieldset, field, model, options)
+    return false if !super
+    if options[:select]
+      options[:select].include? field.to_s
+    else
+      true
+    end
+  end
+end
+
+class ActsAsApi::ApiTemplate
+  prepend ApiTemplateOverride
+end
+
+require 'load_param'
+
+class ApplicationController < ActionController::Base
+  include ThemesForRails::ActionController
+  include CurrentApiClient
+  include LoadParam
+  include DbCurrentTime
+
+  respond_to :json
+  protect_from_forgery
+
+  ERROR_ACTIONS = [:render_error, :render_not_found]
+
+  around_filter :set_current_request_id
+  before_filter :disable_api_methods
+  before_filter :set_cors_headers
+  before_filter :respond_with_json_by_default
+  before_filter :remote_ip
+  before_filter :load_read_auths
+  before_filter :require_auth_scope, except: ERROR_ACTIONS
+
+  before_filter :catch_redirect_hint
+  before_filter(:find_object_by_uuid,
+                except: [:index, :create] + ERROR_ACTIONS)
+  before_filter :load_required_parameters
+  before_filter :load_limit_offset_order_params, only: [:index, :contents]
+  before_filter :load_where_param, only: [:index, :contents]
+  before_filter :load_filters_param, only: [:index, :contents]
+  before_filter :find_objects_for_index, :only => :index
+  before_filter :reload_object_before_update, :only => :update
+  before_filter(:render_404_if_no_object,
+                except: [:index, :create] + ERROR_ACTIONS)
+
+  theme Rails.configuration.arvados_theme
+
+  attr_writer :resource_attrs
+
+  begin
+    rescue_from(Exception,
+                ArvadosModel::PermissionDeniedError,
+                :with => :render_error)
+    rescue_from(ActiveRecord::RecordNotFound,
+                ActionController::RoutingError,
+                ActionController::UnknownController,
+                AbstractController::ActionNotFound,
+                :with => :render_not_found)
+  end
+
+  def initialize *args
+    super
+    @object = nil
+    @objects = nil
+    @offset = nil
+    @limit = nil
+    @select = nil
+    @distinct = nil
+    @response_resource_name = nil
+    @attrs = nil
+    @extra_included = nil
+  end
+
+  def default_url_options
+    options = {}
+    if Rails.configuration.host
+      options[:host] = Rails.configuration.host
+    end
+    if Rails.configuration.port
+      options[:port] = Rails.configuration.port
+    end
+    if Rails.configuration.protocol
+      options[:protocol] = Rails.configuration.protocol
+    end
+    options
+  end
+
+  def index
+    if params[:eager] and params[:eager] != '0' and params[:eager] != 0 and params[:eager] != ''
+      @objects.each(&:eager_load_associations)
+    end
+    render_list
+  end
+
+  def show
+    send_json @object.as_api_response(nil, select: @select)
+  end
+
+  def create
+    @object = model_class.new resource_attrs
+
+    if @object.respond_to?(:name) && params[:ensure_unique_name]
+      @object.save_with_unique_name!
+    else
+      @object.save!
+    end
+
+    show
+  end
+
+  def update
+    attrs_to_update = resource_attrs.reject { |k,v|
+      [:kind, :etag, :href].index k
+    }
+    @object.update_attributes! attrs_to_update
+    show
+  end
+
+  def destroy
+    @object.destroy
+    show
+  end
+
+  def catch_redirect_hint
+    if !current_user
+      if params.has_key?('redirect_to') then
+        session[:redirect_to] = params[:redirect_to]
+      end
+    end
+  end
+
+  def render_404_if_no_object
+    render_not_found "Object not found" if !@object
+  end
+
+  def render_error(e)
+    logger.error e.inspect
+    if !e.is_a? RequestError and (e.respond_to? :backtrace and e.backtrace)
+      logger.error e.backtrace.collect { |x| x + "\n" }.join('')
+    end
+    if (@object.respond_to? :errors and
+        @object.errors.andand.full_messages.andand.any?)
+      errors = @object.errors.full_messages
+      logger.error errors.inspect
+    else
+      errors = [e.inspect]
+    end
+    status = e.respond_to?(:http_status) ? e.http_status : 422
+    send_error(*errors, status: status)
+  end
+
+  def render_not_found(e=ActionController::RoutingError.new("Path not found"))
+    logger.error e.inspect
+    send_error("Path not found", status: 404)
+  end
+
+  def render_accepted
+    send_json ({accepted: true}), status: 202
+  end
+
+  protected
+
+  def send_error(*args)
+    if args.last.is_a? Hash
+      err = args.pop
+    else
+      err = {}
+    end
+    err[:errors] ||= args
+    err[:error_token] = [Time.now.utc.to_i, "%08x" % rand(16 ** 8)].join("+")
+    status = err.delete(:status) || 422
+    logger.error "Error #{err[:error_token]}: #{status}"
+    send_json err, status: status
+  end
+
+  def send_json response, opts={}
+    # The obvious render(json: ...) forces a slow JSON encoder. See
+    # #3021 and commit logs. Might be fixed in Rails 4.1.
+    render({
+             text: SafeJSON.dump(response).html_safe,
+             content_type: 'application/json'
+           }.merge opts)
+  end
+
+  def find_objects_for_index
+    @objects ||= model_class.readable_by(*@read_users, {
+      :include_trash => (params[:include_trash] || 'untrash' == action_name),
+      :include_old_versions => params[:include_old_versions]
+    })
+    apply_where_limit_order_params
+  end
+
+  def apply_filters model_class=nil
+    model_class ||= self.model_class
+    @objects = model_class.apply_filters(@objects, @filters)
+  end
+
+  def apply_where_limit_order_params model_class=nil
+    model_class ||= self.model_class
+    apply_filters model_class
+
+    ar_table_name = @objects.table_name
+    if @where.is_a? Hash and @where.any?
+      conditions = ['1=1']
+      @where.each do |attr,value|
+        if attr.to_s == 'any'
+          if value.is_a?(Array) and
+              value.length == 2 and
+              value[0] == 'contains' then
+            ilikes = []
+            model_class.searchable_columns('ilike').each do |column|
+              # Including owner_uuid in an "any column" search will
+              # probably just return a lot of false positives.
+              next if column == 'owner_uuid'
+              ilikes << "#{ar_table_name}.#{column} ilike ?"
+              conditions << "%#{value[1]}%"
+            end
+            if ilikes.any?
+              conditions[0] << ' and (' + ilikes.join(' or ') + ')'
+            end
+          end
+        elsif attr.to_s.match(/^[a-z][_a-z0-9]+$/) and
+            model_class.columns.collect(&:name).index(attr.to_s)
+          if value.nil?
+            conditions[0] << " and #{ar_table_name}.#{attr} is ?"
+            conditions << nil
+          elsif value.is_a? Array
+            if value[0] == 'contains' and value.length == 2
+              conditions[0] << " and #{ar_table_name}.#{attr} like ?"
+              conditions << "%#{value[1]}%"
+            else
+              conditions[0] << " and #{ar_table_name}.#{attr} in (?)"
+              conditions << value
+            end
+          elsif value.is_a? String or value.is_a? Fixnum or value == true or value == false
+            conditions[0] << " and #{ar_table_name}.#{attr}=?"
+            conditions << value
+          elsif value.is_a? Hash
+            # Not quite the same thing as "equal?" but better than nothing?
+            value.each do |k,v|
+              if v.is_a? String
+                conditions[0] << " and #{ar_table_name}.#{attr} ilike ?"
+                conditions << "%#{k}%#{v}%"
+              end
+            end
+          end
+        end
+      end
+      if conditions.length > 1
+        conditions[0].sub!(/^1=1 and /, '')
+        @objects = @objects.
+          where(*conditions)
+      end
+    end
+
+    if @select
+      unless action_name.in? %w(create update destroy)
+        # Map attribute names in @select to real column names, resolve
+        # those to fully-qualified SQL column names, and pass the
+        # resulting string to the select method.
+        columns_list = model_class.columns_for_attributes(@select).
+          map { |s| "#{ar_table_name}.#{ActiveRecord::Base.connection.quote_column_name s}" }
+        @objects = @objects.select(columns_list.join(", "))
+      end
+
+      # This information helps clients understand what they're seeing
+      # (Workbench always expects it), but they can't select it explicitly
+      # because it's not an SQL column.  Always add it.
+      # (This is harmless, given that clients can deduce what they're
+      # looking at by the returned UUID anyway.)
+      @select |= ["kind"]
+    end
+    @objects = @objects.order(@orders.join ", ") if @orders.any?
+    @objects = @objects.limit(@limit)
+    @objects = @objects.offset(@offset)
+    @objects = @objects.uniq(@distinct) if not @distinct.nil?
+  end
+
+  # limit_database_read ensures @objects (which must be an
+  # ActiveRelation) does not return too many results to fit in memory,
+  # by previewing the results and calling @objects.limit() if
+  # necessary.
+  def limit_database_read(model_class:)
+    return if @limit == 0 || @limit == 1
+    model_class ||= self.model_class
+    limit_columns = model_class.limit_index_columns_read
+    limit_columns &= model_class.columns_for_attributes(@select) if @select
+    return if limit_columns.empty?
+    model_class.transaction do
+      limit_query = @objects.
+        except(:select, :distinct).
+        select("(%s) as read_length" %
+               limit_columns.map { |s| "octet_length(#{model_class.table_name}.#{s})" }.join(" + "))
+      new_limit = 0
+      read_total = 0
+      limit_query.each do |record|
+        new_limit += 1
+        read_total += record.read_length.to_i
+        if read_total >= Rails.configuration.max_index_database_read
+          new_limit -= 1 if new_limit > 1
+          @limit = new_limit
+          break
+        elsif new_limit >= @limit
+          break
+        end
+      end
+      @objects = @objects.limit(@limit)
+      # Force @objects to run its query inside this transaction.
+      @objects.each { |_| break }
+    end
+  end
+
+  def resource_attrs
+    return @attrs if @attrs
+    @attrs = params[resource_name]
+    if @attrs.is_a? String
+      @attrs = Oj.strict_load @attrs, symbol_keys: true
+    end
+    unless @attrs.is_a? Hash
+      message = "No #{resource_name}"
+      if resource_name.index('_')
+        message << " (or #{resource_name.camelcase(:lower)})"
+      end
+      message << " hash provided with request"
+      raise ArgumentError.new(message)
+    end
+    %w(created_at modified_by_client_uuid modified_by_user_uuid modified_at).each do |x|
+      @attrs.delete x.to_sym
+    end
+    @attrs = @attrs.symbolize_keys if @attrs.is_a? HashWithIndifferentAccess
+    @attrs
+  end
+
+  # Authentication
+  def load_read_auths
+    @read_auths = []
+    if current_api_client_authorization
+      @read_auths << current_api_client_authorization
+    end
+    # Load reader tokens if this is a read request.
+    # If there are too many reader tokens, assume the request is malicious
+    # and ignore it.
+    if request.get? and params[:reader_tokens] and
+      params[:reader_tokens].size < 100
+      secrets = params[:reader_tokens].map { |t|
+        if t.is_a? String and t.starts_with? "v2/"
+          t.split("/")[2]
+        else
+          t
+        end
+      }
+      @read_auths += ApiClientAuthorization
+        .includes(:user)
+        .where('api_token IN (?) AND
+                (expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP)',
+               secrets)
+        .to_a
+    end
+    @read_auths.select! { |auth| auth.scopes_allow_request? request }
+    @read_users = @read_auths.map(&:user).uniq
+  end
+
+  def require_login
+    if not current_user
+      respond_to do |format|
+        format.json { send_error("Not logged in", status: 401) }
+        format.html { redirect_to '/auth/joshid' }
+      end
+      false
+    end
+  end
+
+  def admin_required
+    unless current_user and current_user.is_admin
+      send_error("Forbidden", status: 403)
+    end
+  end
+
+  def require_auth_scope
+    unless current_user && @read_auths.any? { |auth| auth.user.andand.uuid == current_user.uuid }
+      if require_login != false
+        send_error("Forbidden", status: 403)
+      end
+      false
+    end
+  end
+
+  def set_current_request_id
+    req_id = request.headers['X-Request-Id']
+    if !req_id || req_id.length < 1 || req_id.length > 1024
+      # Client-supplied ID is either missing or too long to be
+      # considered friendly.
+      req_id = "req-" + Random::DEFAULT.rand(2**128).to_s(36)[0..19]
+    end
+    response.headers['X-Request-Id'] = Thread.current[:request_id] = req_id
+    Rails.logger.tagged(req_id) do
+      yield
+    end
+    Thread.current[:request_id] = nil
+  end
+
+  def append_info_to_payload(payload)
+    super
+    payload[:request_id] = response.headers['X-Request-Id']
+    payload[:client_ipaddr] = @remote_ip
+    payload[:client_auth] = current_api_client_authorization.andand.uuid || nil
+  end
+
+  def disable_api_methods
+    if Rails.configuration.disable_api_methods.
+        include?(controller_name + "." + action_name)
+      send_error("Disabled", status: 404)
+    end
+  end
+
+  def set_cors_headers
+    response.headers['Access-Control-Allow-Origin'] = '*'
+    response.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, PUT, POST, DELETE'
+    response.headers['Access-Control-Allow-Headers'] = 'Authorization, Content-Type'
+    response.headers['Access-Control-Max-Age'] = '86486400'
+  end
+
+  def respond_with_json_by_default
+    html_index = request.accepts.index(Mime::HTML)
+    if html_index.nil? or request.accepts[0...html_index].include?(Mime::JSON)
+      request.format = :json
+    end
+  end
+
+  def model_class
+    controller_name.classify.constantize
+  end
+
+  def resource_name             # params[] key used by client
+    controller_name.singularize
+  end
+
+  def table_name
+    controller_name
+  end
+
+  def find_object_by_uuid
+    if params[:id] and params[:id].match(/\D/)
+      params[:uuid] = params.delete :id
+    end
+    @where = { uuid: params[:uuid] }
+    @offset = 0
+    @limit = 1
+    @orders = []
+    @filters = []
+    @objects = nil
+    find_objects_for_index
+    @object = @objects.first
+  end
+
+  def reload_object_before_update
+    # This is necessary to prevent an ActiveRecord::ReadOnlyRecord
+    # error when updating an object which was retrieved using a join.
+    if @object.andand.readonly?
+      @object = model_class.find_by_uuid(@objects.first.uuid)
+    end
+  end
+
+  def load_json_value(hash, key, must_be_class=nil)
+    if hash[key].is_a? String
+      hash[key] = SafeJSON.load(hash[key])
+      if must_be_class and !hash[key].is_a? must_be_class
+        raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
+      end
+    end
+  end
+
+  def self.accept_attribute_as_json(attr, must_be_class=nil)
+    before_filter lambda { accept_attribute_as_json attr, must_be_class }
+  end
+  accept_attribute_as_json :properties, Hash
+  accept_attribute_as_json :info, Hash
+  def accept_attribute_as_json(attr, must_be_class)
+    if params[resource_name] and resource_attrs.is_a? Hash
+      if resource_attrs[attr].is_a? Hash
+        # Convert symbol keys to strings (in hashes provided by
+        # resource_attrs)
+        resource_attrs[attr] = resource_attrs[attr].
+          with_indifferent_access.to_hash
+      else
+        load_json_value(resource_attrs, attr, must_be_class)
+      end
+    end
+  end
+
+  def self.accept_param_as_json(key, must_be_class=nil)
+    prepend_before_filter lambda { load_json_value(params, key, must_be_class) }
+  end
+  accept_param_as_json :reader_tokens, Array
+
+  def object_list(model_class:)
+    if @objects.respond_to?(:except)
+      limit_database_read(model_class: model_class)
+    end
+    list = {
+      :kind  => "arvados##{(@response_resource_name || resource_name).camelize(:lower)}List",
+      :etag => "",
+      :self_link => "",
+      :offset => @offset,
+      :limit => @limit,
+      :items => @objects.as_api_response(nil, {select: @select})
+    }
+    if @extra_included
+      list[:included] = @extra_included.as_api_response(nil, {select: @select})
+    end
+    case params[:count]
+    when nil, '', 'exact'
+      if @objects.respond_to? :except
+        list[:items_available] = @objects.
+          except(:limit).except(:offset).
+          count(:id, distinct: true)
+      end
+    when 'none'
+    else
+      raise ArgumentError.new("count parameter must be 'exact' or 'none'")
+    end
+    list
+  end
+
+  def render_list
+    send_json object_list(model_class: self.model_class)
+  end
+
+  def remote_ip
+    # Caveat: this is highly dependent on the proxy setup. YMMV.
+    if request.headers.key?('HTTP_X_REAL_IP') then
+      # We're behind a reverse proxy
+      @remote_ip = request.headers['HTTP_X_REAL_IP']
+    else
+      # Hopefully, we are not!
+      @remote_ip = request.env['REMOTE_ADDR']
+    end
+  end
+
+  def load_required_parameters
+    (self.class.send "_#{params[:action]}_requires_parameters" rescue {}).
+      each do |key, info|
+      if info[:required] and not params.include?(key)
+        raise ArgumentError.new("#{key} parameter is required")
+      elsif info[:type] == 'boolean'
+        # Make sure params[key] is either true or false -- not a
+        # string, not nil, etc.
+        if not params.include?(key)
+          params[key] = info[:default]
+        elsif [false, 'false', '0', 0].include? params[key]
+          params[key] = false
+        elsif [true, 'true', '1', 1].include? params[key]
+          params[key] = true
+        else
+          raise TypeError.new("#{key} parameter must be a boolean, true or false")
+        end
+      end
+    end
+    true
+  end
+
+  def self._create_requires_parameters
+    {
+      ensure_unique_name: {
+        type: "boolean",
+        description: "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+        location: "query",
+        required: false,
+        default: false
+      },
+      cluster_id: {
+        type: 'string',
+        description: "Create object on a remote federated cluster instead of the current one.",
+        location: "query",
+        required: false,
+      },
+    }
+  end
+
+  def self._update_requires_parameters
+    {}
+  end
+
+  def self._index_requires_parameters
+    {
+      filters: { type: 'array', required: false },
+      where: { type: 'object', required: false },
+      order: { type: 'array', required: false },
+      select: { type: 'array', required: false },
+      distinct: { type: 'boolean', required: false },
+      limit: { type: 'integer', required: false, default: DEFAULT_LIMIT },
+      offset: { type: 'integer', required: false, default: 0 },
+      count: { type: 'string', required: false, default: 'exact' },
+      cluster_id: {
+        type: 'string',
+        description: "List objects on a remote federated cluster instead of the current one.",
+        location: "query",
+        required: false,
+      },
+    }
+  end
+
+  def client_accepts_plain_text_stream
+    (request.headers['Accept'].split(' ') &
+     ['text/plain', '*/*']).count > 0
+  end
+
+  def render *opts
+    if opts.first
+      response = opts.first[:json]
+      if response.is_a?(Hash) &&
+          params[:_profile] &&
+          Thread.current[:request_starttime]
+        response[:_profile] = {
+          request_time: Time.now - Thread.current[:request_starttime]
+        }
+      end
+    end
+    super(*opts)
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb b/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
new file mode 100644 (file)
index 0000000..826ced1
--- /dev/null
@@ -0,0 +1,168 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'safe_json'
+
+class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
+  accept_attribute_as_json :scopes, Array
+  before_filter :current_api_client_is_trusted, :except => [:current]
+  before_filter :admin_required, :only => :create_system_auth
+  skip_before_filter :render_404_if_no_object, :only => [:create_system_auth, :current]
+  skip_before_filter :find_object_by_uuid, :only => [:create_system_auth, :current]
+
+  def self._create_system_auth_requires_parameters
+    {
+      api_client_id: {type: 'integer', required: false},
+      scopes: {type: 'array', required: false}
+    }
+  end
+  def create_system_auth
+    @object = ApiClientAuthorization.
+      new(user_id: system_user.id,
+          api_client_id: params[:api_client_id] || current_api_client.andand.id,
+          created_by_ip_address: remote_ip,
+          scopes: SafeJSON.load(params[:scopes] || '["all"]'))
+    @object.save!
+    show
+  end
+
+  def create
+    # Note: the user could specify a owner_uuid for a different user, which on
+    # the surface appears to be a security hole.  However, the record will be
+    # rejected before being saved to the database by the ApiClientAuthorization
+    # model which enforces that user_id == current user or the user is an
+    # admin.
+
+    if resource_attrs[:owner_uuid]
+      # The model has an owner_id attribute instead of owner_uuid, but
+      # we can't expect the client to know the local numeric ID. We
+      # translate UUID to numeric ID here.
+      resource_attrs[:user_id] =
+        User.where(uuid: resource_attrs.delete(:owner_uuid)).first.andand.id
+    elsif not resource_attrs[:user_id]
+      resource_attrs[:user_id] = current_user.id
+    end
+    resource_attrs[:api_client_id] = Thread.current[:api_client].id
+    super
+  end
+
+  def current
+    @object = Thread.current[:api_client_authorization]
+    show
+  end
+
+  protected
+
+  def default_orders
+    ["#{table_name}.created_at desc"]
+  end
+
+  def find_objects_for_index
+    # Here we are deliberately less helpful about searching for client
+    # authorizations.  We look up tokens belonging to the current user
+    # and filter by exact matches on uuid, api_token, and scopes.
+    wanted_scopes = []
+    if @filters
+      wanted_scopes.concat(@filters.map { |attr, operator, operand|
+        ((attr == 'scopes') and (operator == '=')) ? operand : nil
+      })
+      @filters.select! { |attr, operator, operand|
+        operator == '=' && (attr == 'uuid' || attr == 'api_token')
+      }
+    end
+    if @where
+      wanted_scopes << @where['scopes']
+      @where.select! { |attr, val|
+        # "where":{"uuid":"zzzzz-zzzzz-zzzzzzzzzzzzzzz"} is OK but
+        # "where":{"api_client_id":1} is not supported
+        # "where":{"uuid":["contains","-"]} is not supported
+        # "where":{"uuid":["uuid1","uuid2","uuid3"]} is not supported
+        val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')
+      }
+    end
+    @objects = model_class.where('user_id=?', current_user.id)
+    if wanted_scopes.compact.any?
+      # We can't filter on scopes effectively using AR/postgres.
+      # Instead we get the entire result set, do our own filtering on
+      # scopes to get a list of UUIDs, then start a new query
+      # (restricted to the selected UUIDs) so super can apply the
+      # offset/limit/order params in the usual way.
+      @request_limit = @limit
+      @request_offset = @offset
+      @limit = @objects.count
+      @offset = 0
+      super
+      wanted_scopes.compact.each do |scope_list|
+        if @objects.respond_to?(:where) && scope_list.length < 2
+          @objects = @objects.
+                     where('scopes in (?)',
+                           [scope_list.to_yaml, SafeJSON.dump(scope_list)])
+        else
+          if @objects.respond_to?(:where)
+            # Eliminate rows with scopes=['all'] before doing the
+            # expensive filter. They are typically the majority of
+            # rows, and they obviously won't match given
+            # scope_list.length>=2, so loading them all into
+            # ActiveRecord objects is a huge waste of time.
+            @objects = @objects.
+                       where('scopes not in (?)',
+                             [['all'].to_yaml, SafeJSON.dump(['all'])])
+          end
+          sorted_scopes = scope_list.sort
+          @objects = @objects.select { |auth| auth.scopes.sort == sorted_scopes }
+        end
+      end
+      @limit = @request_limit
+      @offset = @request_offset
+      @objects = model_class.where('uuid in (?)', @objects.collect(&:uuid))
+    end
+    super
+  end
+
+  def find_object_by_uuid
+    uuid_param = params[:uuid] || params[:id]
+    if (uuid_param != current_api_client_authorization.andand.uuid and
+        not Thread.current[:api_client].andand.is_trusted)
+      return forbidden
+    end
+    @limit = 1
+    @offset = 0
+    @orders = []
+    @where = {}
+    @filters = [['uuid', '=', uuid_param]]
+    find_objects_for_index
+    @object = @objects.first
+  end
+
+  def current_api_client_is_trusted
+    if Thread.current[:api_client].andand.is_trusted
+      return true
+    end
+    # A non-trusted client can do a search for its own token if it
+    # explicitly restricts the search to its own UUID or api_token.
+    # Any other kind of query must return 403, even if it matches only
+    # the current token, because that's currently how Workbench knows
+    # (after searching on scopes) the difference between "the token
+    # I'm using now *is* the only sharing token for this collection"
+    # (403) and "my token is trusted, and there is one sharing token
+    # for this collection" (200).
+    #
+    # The @filters test here also prevents a non-trusted token from
+    # filtering on its own scopes, and discovering whether any _other_
+    # equally scoped tokens exist (403=yes, 200=no).
+    return forbidden if !@objects
+    full_set = @objects.except(:limit).except(:offset) if @objects
+    if (full_set.count == 1 and
+        full_set.first.uuid == current_api_client_authorization.andand.uuid and
+        (@filters.map(&:first) & %w(uuid api_token)).any?)
+      return true
+    end
+    forbidden
+  end
+
+  def forbidden
+    send_error('Forbidden: this API client cannot manipulate other clients\' access tokens.',
+               status: 403)
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/api_clients_controller.rb b/services/api/app/controllers/arvados/v1/api_clients_controller.rb
new file mode 100644 (file)
index 0000000..5a67632
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::ApiClientsController < ApplicationController
+  before_filter :admin_required
+end
diff --git a/services/api/app/controllers/arvados/v1/authorized_keys_controller.rb b/services/api/app/controllers/arvados/v1/authorized_keys_controller.rb
new file mode 100644 (file)
index 0000000..f665d3d
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::AuthorizedKeysController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/collections_controller.rb b/services/api/app/controllers/arvados/v1/collections_controller.rb
new file mode 100644 (file)
index 0000000..5d7a7ae
--- /dev/null
@@ -0,0 +1,238 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "arvados/keep"
+require "trashable"
+
+class Arvados::V1::CollectionsController < ApplicationController
+  include DbCurrentTime
+  include TrashableController
+
+  def self._index_requires_parameters
+    (super rescue {}).
+      merge({
+        include_trash: {
+          type: 'boolean', required: false, description: "Include collections whose is_trashed attribute is true."
+        },
+        include_old_versions: {
+          type: 'boolean', required: false, description: "Include past collection versions."
+        },
+      })
+  end
+
+  def create
+    if resource_attrs[:uuid] and (loc = Keep::Locator.parse(resource_attrs[:uuid]))
+      resource_attrs[:portable_data_hash] = loc.to_s
+      resource_attrs.delete :uuid
+    end
+    resource_attrs.delete :version
+    resource_attrs.delete :current_version_uuid
+    super
+  end
+
+  def find_objects_for_index
+    opts = {}
+    if params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name)
+      opts.update({include_trash: true})
+    end
+    if params[:include_old_versions] || @include_old_versions
+      opts.update({include_old_versions: true})
+    end
+    @objects = Collection.readable_by(*@read_users, opts) if !opts.empty?
+    super
+  end
+
+  def find_object_by_uuid
+    @include_old_versions = true
+
+    if loc = Keep::Locator.parse(params[:id])
+      loc.strip_hints!
+
+      # It matters which Collection object we pick because we use it to get signed_manifest_text,
+      # the value of which is affected by the value of trash_at.
+      #
+      # From postgres doc: "By default, null values sort as if larger than any non-null
+      # value; that is, NULLS FIRST is the default for DESC order, and
+      # NULLS LAST otherwise."
+      #
+      # "trash_at desc" sorts null first, then latest to earliest, so
+      # it will select the Collection object with the longest
+      # available lifetime.
+
+      if c = Collection.readable_by(*@read_users).where({ portable_data_hash: loc.to_s }).order("trash_at desc").limit(1).first
+        @object = {
+          uuid: c.portable_data_hash,
+          portable_data_hash: c.portable_data_hash,
+          manifest_text: c.signed_manifest_text,
+        }
+      end
+      true
+    else
+      super
+    end
+  end
+
+  def show
+    if @object.is_a? Collection
+      # Omit unsigned_manifest_text
+      @select ||= model_class.selectable_attributes - ["unsigned_manifest_text"]
+      super
+    else
+      send_json @object
+    end
+  end
+
+
+  def find_collections(visited, sp, &b)
+    case sp
+    when ArvadosModel
+      sp.class.columns.each do |c|
+        find_collections(visited, sp[c.name.to_sym], &b) if c.name != "log"
+      end
+    when Hash
+      sp.each do |k, v|
+        find_collections(visited, v, &b)
+      end
+    when Array
+      sp.each do |v|
+        find_collections(visited, v, &b)
+      end
+    when String
+      if m = /[a-f0-9]{32}\+\d+/.match(sp)
+        yield m[0], nil
+      elsif m = Collection.uuid_regex.match(sp)
+        yield nil, m[0]
+      end
+    end
+  end
+
+  def search_edges(visited, uuid, direction)
+    if uuid.nil? or uuid.empty? or visited[uuid]
+      return
+    end
+
+    if loc = Keep::Locator.parse(uuid)
+      loc.strip_hints!
+      return if visited[loc.to_s]
+    end
+
+    logger.debug "visiting #{uuid}"
+
+    if loc
+      # uuid is a portable_data_hash
+      collections = Collection.readable_by(*@read_users).where(portable_data_hash: loc.to_s)
+      c = collections.limit(2).all
+      if c.size == 1
+        visited[loc.to_s] = c[0]
+      elsif c.size > 1
+        name = collections.limit(1).where("name <> ''").first
+        if name
+          visited[loc.to_s] = {
+            portable_data_hash: c[0].portable_data_hash,
+            name: "#{name.name} + #{collections.count-1} more"
+          }
+        else
+          visited[loc.to_s] = {
+            portable_data_hash: c[0].portable_data_hash,
+            name: loc.to_s
+          }
+        end
+      end
+
+      if direction == :search_up
+        # Search upstream for jobs where this locator is the output of some job
+        Job.readable_by(*@read_users).where(output: loc.to_s).each do |job|
+          search_edges(visited, job.uuid, :search_up)
+        end
+
+        Job.readable_by(*@read_users).where(log: loc.to_s).each do |job|
+          search_edges(visited, job.uuid, :search_up)
+        end
+      elsif direction == :search_down
+        if loc.to_s == "d41d8cd98f00b204e9800998ecf8427e+0"
+          # Special case, don't follow the empty collection.
+          return
+        end
+
+        # Search downstream for jobs where this locator is in script_parameters
+        Job.readable_by(*@read_users).where(["jobs.script_parameters like ?", "%#{loc.to_s}%"]).each do |job|
+          search_edges(visited, job.uuid, :search_down)
+        end
+
+        Job.readable_by(*@read_users).where(["jobs.docker_image_locator = ?", "#{loc.to_s}"]).each do |job|
+          search_edges(visited, job.uuid, :search_down)
+        end
+      end
+    else
+      # uuid is a regular Arvados UUID
+      rsc = ArvadosModel::resource_class_for_uuid uuid
+      if rsc == Job
+        Job.readable_by(*@read_users).where(uuid: uuid).each do |job|
+          visited[uuid] = job.as_api_response
+          if direction == :search_up
+            # Follow upstream collections referenced in the script parameters
+            find_collections(visited, job) do |hash, col_uuid|
+              search_edges(visited, hash, :search_up) if hash
+              search_edges(visited, col_uuid, :search_up) if col_uuid
+            end
+          elsif direction == :search_down
+            # Follow downstream job output
+            search_edges(visited, job.output, direction)
+          end
+        end
+      elsif rsc == Collection
+        if c = Collection.readable_by(*@read_users).where(uuid: uuid).limit(1).first
+          search_edges(visited, c.portable_data_hash, direction)
+          visited[c.portable_data_hash] = c.as_api_response
+        end
+      elsif rsc != nil
+        rsc.where(uuid: uuid).each do |r|
+          visited[uuid] = r.as_api_response
+        end
+      end
+    end
+
+    if direction == :search_up
+      # Search for provenance links pointing to the current uuid
+      Link.readable_by(*@read_users).
+        where(head_uuid: uuid, link_class: "provenance").
+        each do |link|
+        visited[link.uuid] = link.as_api_response
+        search_edges(visited, link.tail_uuid, direction)
+      end
+    elsif direction == :search_down
+      # Search for provenance links emanating from the current uuid
+      Link.readable_by(current_user).
+        where(tail_uuid: uuid, link_class: "provenance").
+        each do |link|
+        visited[link.uuid] = link.as_api_response
+        search_edges(visited, link.head_uuid, direction)
+      end
+    end
+  end
+
+  def provenance
+    visited = {}
+    search_edges(visited, @object[:portable_data_hash], :search_up)
+    search_edges(visited, @object[:uuid], :search_up)
+    send_json visited
+  end
+
+  def used_by
+    visited = {}
+    search_edges(visited, @object[:uuid], :search_down)
+    search_edges(visited, @object[:portable_data_hash], :search_down)
+    send_json visited
+  end
+
+  protected
+
+  def load_limit_offset_order_params *args
+    super
+    if action_name == 'index'
+      # Omit manifest_text and unsigned_manifest_text from index results unless expressly selected.
+      @select ||= model_class.selectable_attributes - ["manifest_text", "unsigned_manifest_text"]
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/container_requests_controller.rb b/services/api/app/controllers/arvados/v1/container_requests_controller.rb
new file mode 100644 (file)
index 0000000..47ea16e
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::ContainerRequestsController < ApplicationController
+  accept_attribute_as_json :environment, Hash
+  accept_attribute_as_json :mounts, Hash
+  accept_attribute_as_json :runtime_constraints, Hash
+  accept_attribute_as_json :command, Array
+  accept_attribute_as_json :filters, Array
+  accept_attribute_as_json :scheduling_parameters, Hash
+  accept_attribute_as_json :secret_mounts, Hash
+end
diff --git a/services/api/app/controllers/arvados/v1/containers_controller.rb b/services/api/app/controllers/arvados/v1/containers_controller.rb
new file mode 100644 (file)
index 0000000..8542096
--- /dev/null
@@ -0,0 +1,78 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::ContainersController < ApplicationController
+  accept_attribute_as_json :environment, Hash
+  accept_attribute_as_json :mounts, Hash
+  accept_attribute_as_json :runtime_constraints, Hash
+  accept_attribute_as_json :runtime_status, Hash
+  accept_attribute_as_json :command, Array
+  accept_attribute_as_json :scheduling_parameters, Hash
+
+  skip_before_filter :find_object_by_uuid, only: [:current]
+  skip_before_filter :render_404_if_no_object, only: [:current]
+
+  def auth
+    if @object.locked_by_uuid != Thread.current[:api_client_authorization].uuid
+      raise ArvadosModel::PermissionDeniedError.new("Not locked by your token")
+    end
+    if @object.runtime_token.nil?
+      @object = @object.auth
+    else
+      @object = ApiClientAuthorization.validate(token: @object.runtime_token)
+      if @object.nil?
+        raise ArvadosModel::PermissionDeniedError.new("Invalid runtime_token")
+      end
+    end
+    show
+  end
+
+  def update
+    @object.with_lock do
+      @object.reload
+      super
+    end
+  end
+
+  def find_objects_for_index
+    super
+    if action_name == 'lock' || action_name == 'unlock'
+      # Avoid loading more fields than we need
+      @objects = @objects.select(:id, :uuid, :state, :priority, :auth_uuid, :locked_by_uuid)
+      @select = %w(uuid state priority auth_uuid locked_by_uuid)
+    end
+  end
+
+  def lock
+    @object.lock
+    show
+  end
+
+  def unlock
+    @object.unlock
+    show
+  end
+
+  def current
+    if Thread.current[:api_client_authorization].nil?
+      send_error("Not logged in", status: 401)
+    else
+      @object = Container.for_current_token
+      if @object.nil?
+        send_error("Token is not associated with a container.", status: 404)
+      else
+        show
+      end
+    end
+  end
+
+  def secret_mounts
+    c = Container.for_current_token
+    if @object && c && @object.uuid == c.uuid
+      send_json({"secret_mounts" => @object.secret_mounts})
+    else
+      send_error("Token is not associated with this container.", status: 403)
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
new file mode 100644 (file)
index 0000000..6163f89
--- /dev/null
@@ -0,0 +1,337 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "trashable"
+
+class Arvados::V1::GroupsController < ApplicationController
+  include TrashableController
+
+  skip_before_filter :find_object_by_uuid, only: :shared
+  skip_before_filter :render_404_if_no_object, only: :shared
+
+  def self._index_requires_parameters
+    (super rescue {}).
+      merge({
+        include_trash: {
+          type: 'boolean', required: false, description: "Include items whose is_trashed attribute is true."
+        },
+      })
+  end
+
+  def self._contents_requires_parameters
+    params = _index_requires_parameters.
+      merge({
+              uuid: {
+                type: 'string', required: false, default: nil
+              },
+              recursive: {
+                type: 'boolean', required: false, description: 'Include contents from child groups recursively.'
+              },
+            })
+    params.delete(:select)
+    params
+  end
+
+  def self._create_requires_parameters
+    super.merge(
+      {
+        async: {
+          required: false,
+          type: 'boolean',
+          location: 'query',
+          default: false,
+          description: 'defer permissions update'
+        }
+      }
+    )
+  end
+
+  def self._update_requires_parameters
+    super.merge(
+      {
+        async: {
+          required: false,
+          type: 'boolean',
+          location: 'query',
+          default: false,
+          description: 'defer permissions update'
+        }
+      }
+    )
+  end
+
+  def create
+    if params[:async]
+      @object = model_class.new(resource_attrs.merge({async_permissions_update: true}))
+      @object.save!
+      render_accepted
+    else
+      super
+    end
+  end
+
+  def update
+    if params[:async]
+      attrs_to_update = resource_attrs.reject { |k, v|
+        [:kind, :etag, :href].index k
+      }.merge({async_permissions_update: true})
+      @object.update_attributes!(attrs_to_update)
+      @object.save!
+      render_accepted
+    else
+      super
+    end
+  end
+
+  def render_404_if_no_object
+    if params[:action] == 'contents'
+      if !params[:uuid]
+        # OK!
+        @object = nil
+        true
+      elsif @object
+        # Project group
+        true
+      elsif (@object = User.where(uuid: params[:uuid]).first)
+        # "Home" pseudo-project
+        true
+      else
+        super
+      end
+    else
+      super
+    end
+  end
+
+  def contents
+    load_searchable_objects
+    list = {
+      :kind => "arvados#objectList",
+      :etag => "",
+      :self_link => "",
+      :offset => @offset,
+      :limit => @limit,
+      :items_available => @items_available,
+      :items => @objects.as_api_response(nil)
+    }
+    if @extra_included
+      list[:included] = @extra_included.as_api_response(nil, {select: @select})
+    end
+    send_json(list)
+  end
+
+  def shared
+    # The purpose of this endpoint is to return the toplevel set of
+    # groups which are *not* reachable through a direct ownership
+    # chain of projects starting from the current user account.  In
+    # other words, groups which to which access was granted via a
+    # permission link or chain of links.
+    #
+    # This also returns (in the "included" field) the objects that own
+    # those projects (users or non-project groups).
+    #
+    #
+    # The intended use of this endpoint is to support clients which
+    # wish to browse those projects which are visible to the user but
+    # are not part of the "home" project.
+
+    load_limit_offset_order_params
+    load_filters_param
+
+    @objects = exclude_home Group.readable_by(*@read_users), Group
+
+    apply_where_limit_order_params
+
+    if params["include"] == "owner_uuid"
+      owners = @objects.map(&:owner_uuid).to_set
+      @extra_included = []
+      [Group, User].each do |klass|
+        @extra_included += klass.readable_by(*@read_users).where(uuid: owners.to_a).to_a
+      end
+    end
+
+    index
+  end
+
+  def self._shared_requires_parameters
+    rp = self._index_requires_parameters
+    rp[:include] = { type: 'string', required: false }
+    rp
+  end
+
+  protected
+
+  def load_searchable_objects
+    all_objects = []
+    @items_available = 0
+
+    # Reload the orders param, this time without prefixing unqualified
+    # columns ("name" => "groups.name"). Here, unqualified orders
+    # apply to each table being searched, not "groups".
+    load_limit_offset_order_params(fill_table_names: false)
+
+    # Trick apply_where_limit_order_params into applying suitable
+    # per-table values. *_all are the real ones we'll apply to the
+    # aggregate set.
+    limit_all = @limit
+    offset_all = @offset
+    # save the orders from the current request as determined by load_param,
+    # but otherwise discard them because we're going to be getting objects
+    # from many models
+    request_orders = @orders.clone
+    @orders = []
+
+    request_filters = @filters
+
+    klasses = [Group,
+     Job, PipelineInstance, PipelineTemplate, ContainerRequest, Workflow,
+     Collection,
+     Human, Specimen, Trait]
+
+    table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
+
+    disabled_methods = Rails.configuration.disable_api_methods
+    avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
+    klasses = avail_klasses.keys
+
+    request_filters.each do |col, op, val|
+      if col.index('.') && !table_names.values.include?(col.split('.', 2)[0])
+        raise ArgumentError.new("Invalid attribute '#{col}' in filter")
+      end
+    end
+
+    wanted_klasses = []
+    request_filters.each do |col,op,val|
+      if op == 'is_a'
+        (val.is_a?(Array) ? val : [val]).each do |type|
+          type = type.split('#')[-1]
+          type[0] = type[0].capitalize
+          wanted_klasses << type
+        end
+      end
+    end
+
+    filter_by_owner = {}
+    if @object
+      if params['recursive']
+        filter_by_owner[:owner_uuid] = [@object.uuid] + @object.descendant_project_uuids
+      else
+        filter_by_owner[:owner_uuid] = @object.uuid
+      end
+
+      if params['exclude_home_project']
+        raise ArgumentError.new "Cannot use 'exclude_home_project' with a parent object"
+      end
+    end
+
+    included_by_uuid = {}
+
+    seen_last_class = false
+    klasses.each do |klass|
+      @offset = 0 if seen_last_class  # reset offset for the new next type being processed
+
+      # if current klass is same as params['last_object_class'], mark that fact
+      seen_last_class = true if((params['count'].andand.==('none')) and
+                                (params['last_object_class'].nil? or
+                                 params['last_object_class'].empty? or
+                                 params['last_object_class'] == klass.to_s))
+
+      # if klasses are specified, skip all other klass types
+      next if wanted_klasses.any? and !wanted_klasses.include?(klass.to_s)
+
+      # don't reprocess klass types that were already seen
+      next if params['count'] == 'none' and !seen_last_class
+
+      # don't process rest of object types if we already have needed number of objects
+      break if params['count'] == 'none' and all_objects.size >= limit_all
+
+      # If the currently requested orders specifically match the
+      # table_name for the current klass, apply that order.
+      # Otherwise, order by recency.
+      request_order =
+        request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i || r !~ /\./ } ||
+        klass.default_orders.join(", ")
+
+      @select = nil
+      where_conds = filter_by_owner
+      if klass == Collection
+        @select = klass.selectable_attributes - ["manifest_text"]
+      elsif klass == Group
+        where_conds = where_conds.merge(group_class: "project")
+      end
+
+      @filters = request_filters.map do |col, op, val|
+        if !col.index('.')
+          [col, op, val]
+        elsif (col = col.split('.', 2))[0] == klass.table_name
+          [col[1], op, val]
+        else
+          nil
+        end
+      end.compact
+
+      @objects = klass.readable_by(*@read_users, {:include_trash => params[:include_trash]}).
+                 order(request_order).where(where_conds)
+
+      if params['exclude_home_project']
+        @objects = exclude_home @objects, klass
+      end
+
+      klass_limit = limit_all - all_objects.count
+      @limit = klass_limit
+      apply_where_limit_order_params klass
+      klass_object_list = object_list(model_class: klass)
+      klass_items_available = klass_object_list[:items_available] || 0
+      @items_available += klass_items_available
+      @offset = [@offset - klass_items_available, 0].max
+      all_objects += klass_object_list[:items]
+
+      if klass_object_list[:limit] < klass_limit
+        # object_list() had to reduce @limit to comply with
+        # max_index_database_read. From now on, we'll do all queries
+        # with limit=0 and just accumulate items_available.
+        limit_all = all_objects.count
+      end
+
+      if params["include"] == "owner_uuid"
+        owners = klass_object_list[:items].map {|i| i[:owner_uuid]}.to_set
+        [Group, User].each do |ownerklass|
+          ownerklass.readable_by(*@read_users).where(uuid: owners.to_a).each do |ow|
+            included_by_uuid[ow.uuid] = ow
+          end
+        end
+      end
+    end
+
+    if params["include"]
+      @extra_included = included_by_uuid.values
+    end
+
+    @objects = all_objects
+    @limit = limit_all
+    @offset = offset_all
+  end
+
+  protected
+
+  def exclude_home objectlist, klass
+    # select records that are readable by current user AND
+    #   the owner_uuid is a user (but not the current user) OR
+    #   the owner_uuid is not readable by the current user
+    #   the owner_uuid is a group but group_class is not a project
+
+    read_parent_check = if current_user.is_admin
+                          ""
+                        else
+                          "NOT EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} WHERE "+
+                            "user_uuid=(:user_uuid) AND target_uuid=#{klass.table_name}.owner_uuid AND perm_level >= 1) OR "
+                        end
+
+    objectlist.where("#{klass.table_name}.owner_uuid IN (SELECT users.uuid FROM users WHERE users.uuid != (:user_uuid)) OR "+
+                     read_parent_check+
+                     "EXISTS(SELECT 1 FROM groups as gp where gp.uuid=#{klass.table_name}.owner_uuid and gp.group_class != 'project')",
+                     user_uuid: current_user.uuid)
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/healthcheck_controller.rb b/services/api/app/controllers/arvados/v1/healthcheck_controller.rb
new file mode 100644 (file)
index 0000000..6d55506
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::HealthcheckController < ApplicationController
+  skip_before_filter :catch_redirect_hint
+  skip_before_filter :find_objects_for_index
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :load_filters_param
+  skip_before_filter :load_limit_offset_order_params
+  skip_before_filter :load_read_auths
+  skip_before_filter :load_where_param
+  skip_before_filter :render_404_if_no_object
+  skip_before_filter :require_auth_scope
+
+  before_filter :check_auth_header
+
+  def check_auth_header
+    mgmt_token = Rails.configuration.ManagementToken
+    auth_header = request.headers['Authorization']
+
+    if !mgmt_token
+      send_json ({"errors" => "disabled"}), status: 404
+    elsif !auth_header
+      send_json ({"errors" => "authorization required"}), status: 401
+    elsif auth_header != 'Bearer '+mgmt_token
+      send_json ({"errors" => "authorization error"}), status: 403
+    end
+  end
+
+  def ping
+    resp = {"health" => "OK"}
+    send_json resp
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/humans_controller.rb b/services/api/app/controllers/arvados/v1/humans_controller.rb
new file mode 100644 (file)
index 0000000..88eee30
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::HumansController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/job_tasks_controller.rb b/services/api/app/controllers/arvados/v1/job_tasks_controller.rb
new file mode 100644 (file)
index 0000000..07bbc33
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::JobTasksController < ApplicationController
+  accept_attribute_as_json :parameters, Hash
+end
diff --git a/services/api/app/controllers/arvados/v1/jobs_controller.rb b/services/api/app/controllers/arvados/v1/jobs_controller.rb
new file mode 100644 (file)
index 0000000..23c059c
--- /dev/null
@@ -0,0 +1,163 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::JobsController < ApplicationController
+  accept_attribute_as_json :components, Hash
+  accept_attribute_as_json :script_parameters, Hash
+  accept_attribute_as_json :runtime_constraints, Hash
+  accept_attribute_as_json :tasks_summary, Hash
+  skip_before_filter :find_object_by_uuid, :only => [:queue, :queue_size]
+  skip_before_filter :render_404_if_no_object, :only => [:queue, :queue_size]
+
+  include DbCurrentTime
+
+  def create
+    [:repository, :script, :script_version, :script_parameters].each do |r|
+      if !resource_attrs[r]
+        return send_error("#{r} attribute must be specified",
+                          status: :unprocessable_entity)
+      end
+    end
+
+    # We used to ask for the minimum_, exclude_, and no_reuse params
+    # in the job resource. Now we advertise them as flags that alter
+    # the behavior of the create action.
+    [:minimum_script_version, :exclude_script_versions].each do |attr|
+      if resource_attrs.has_key? attr
+        params[attr] = resource_attrs.delete attr
+      end
+    end
+    if resource_attrs.has_key? :no_reuse
+      params[:find_or_create] = !resource_attrs.delete(:no_reuse)
+    end
+
+    return super if !params[:find_or_create]
+    return if !load_filters_param
+
+    begin
+      @object = Job.find_reusable(resource_attrs, params, @filters, @read_users)
+    rescue ArgumentError => error
+      return send_error(error.message)
+    end
+
+    if @object
+      show
+    else
+      super
+    end
+  end
+
+  def cancel
+    reload_object_before_update
+    @object.cancel cascade: params[:cascade]
+    show
+  end
+
+  def lock
+    @object.lock current_user.uuid
+    show
+  end
+
+  class LogStreamer
+    Q_UPDATE_INTERVAL = 12
+    def initialize(job, opts={})
+      @job = job
+      @opts = opts
+    end
+    def each
+      if @job.finished_at
+        yield "#{@job.uuid} finished at #{@job.finished_at}\n"
+        return
+      end
+      while not @job.started_at
+        # send a summary (job queue + available nodes) to the client
+        # every few seconds while waiting for the job to start
+        current_time = db_current_time
+        last_ack_at ||= current_time - Q_UPDATE_INTERVAL - 1
+        if current_time - last_ack_at >= Q_UPDATE_INTERVAL
+          nodes_in_state = {idle: 0, alloc: 0}
+          ActiveRecord::Base.uncached do
+            Node.where('hostname is not ?', nil).collect do |n|
+              if n.info[:slurm_state]
+                nodes_in_state[n.info[:slurm_state]] ||= 0
+                nodes_in_state[n.info[:slurm_state]] += 1
+              end
+            end
+          end
+          job_queue = Job.queue.select(:uuid)
+          n_queued_before_me = 0
+          job_queue.each do |j|
+            break if j.uuid == @job.uuid
+            n_queued_before_me += 1
+          end
+          yield "#{db_current_time}" \
+            " job #{@job.uuid}" \
+            " queue_position #{n_queued_before_me}" \
+            " queue_size #{job_queue.count}" \
+            " nodes_idle #{nodes_in_state[:idle]}" \
+            " nodes_alloc #{nodes_in_state[:alloc]}\n"
+          last_ack_at = db_current_time
+        end
+        sleep 3
+        ActiveRecord::Base.uncached do
+          @job.reload
+        end
+      end
+    end
+  end
+
+  def queue
+    params[:order] ||= ['priority desc', 'created_at']
+    load_limit_offset_order_params
+    load_where_param
+    @where.merge!({state: Job::Queued})
+    return if !load_filters_param
+    find_objects_for_index
+    index
+  end
+
+  def queue_size
+    # Users may not be allowed to see all the jobs in the queue, so provide a
+    # method to get just the queue size in order to get a gist of how busy the
+    # cluster is.
+    render :json => {:queue_size => Job.queue.size}
+  end
+
+  def self._create_requires_parameters
+    (super rescue {}).
+      merge({
+              find_or_create: {
+                type: 'boolean', required: false, default: false
+              },
+              filters: {
+                type: 'array', required: false
+              },
+              minimum_script_version: {
+                type: 'string', required: false
+              },
+              exclude_script_versions: {
+                type: 'array', required: false
+              },
+            })
+  end
+
+  def self._queue_requires_parameters
+    self._index_requires_parameters
+  end
+
+  protected
+
+  def load_filters_param
+    begin
+      super
+      attrs = resource_attrs rescue {}
+      @filters = Job.load_job_specific_filters attrs, @filters, @read_users
+    rescue ArgumentError => error
+      send_error(error.message)
+      false
+    else
+      true
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/keep_disks_controller.rb b/services/api/app/controllers/arvados/v1/keep_disks_controller.rb
new file mode 100644 (file)
index 0000000..9b4c342
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::KeepDisksController < ApplicationController
+  skip_before_filter :require_auth_scope, only: :ping
+  skip_before_filter :render_404_if_no_object, only: :ping
+
+  def self._ping_requires_parameters
+    {
+      uuid: {required: false},
+      ping_secret: {required: true},
+      node_uuid: {required: false},
+      filesystem_uuid: {required: false},
+      service_host: {required: false},
+      service_port: {required: true},
+      service_ssl_flag: {required: true}
+    }
+  end
+
+  def ping
+    params[:service_host] ||= request.env['REMOTE_ADDR']
+    if !params[:uuid] && current_user.andand.is_admin
+      # Create a new KeepDisk and ping it.
+      @object = KeepDisk.new(filesystem_uuid: params[:filesystem_uuid])
+      @object.save!
+
+      # In the first ping from this new filesystem_uuid, we can't
+      # expect the keep node to know the ping_secret so we made sure
+      # we got an admin token. Here we add ping_secret to params so
+      # the ping call below is properly authenticated.
+      params[:ping_secret] = @object.ping_secret
+    end
+    act_as_system_user do
+      if !@object.andand.ping(params)
+        return render_not_found "object not found"
+      end
+      # Render the :superuser view (i.e., include the ping_secret) even
+      # if !current_user.is_admin. This is safe because @object.ping's
+      # success implies the ping_secret was already known by the client.
+      send_json @object.as_api_response(:superuser)
+    end
+  end
+
+  def find_objects_for_index
+    # all users can list all keep disks
+    @objects = model_class.where('1=1')
+    super
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/keep_services_controller.rb b/services/api/app/controllers/arvados/v1/keep_services_controller.rb
new file mode 100644 (file)
index 0000000..c7c9119
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::KeepServicesController < ApplicationController
+
+  skip_before_filter :find_object_by_uuid, only: :accessible
+  skip_before_filter :render_404_if_no_object, only: :accessible
+  skip_before_filter :require_auth_scope, only: :accessible
+
+  def find_objects_for_index
+    # all users can list all keep services
+    @objects = model_class.where('1=1')
+    super
+  end
+
+  def accessible
+    if request.headers['X-External-Client'] == '1'
+      @objects = model_class.where('service_type=?', 'proxy')
+    else
+      @objects = model_class.where(model_class.arel_table[:service_type].not_eq('proxy'))
+    end
+    render_list
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/links_controller.rb b/services/api/app/controllers/arvados/v1/links_controller.rb
new file mode 100644 (file)
index 0000000..f54c4a9
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::LinksController < ApplicationController
+
+  def check_uuid_kind uuid, kind
+    if kind and ArvadosModel::resource_class_for_uuid(uuid).andand.kind != kind
+      send_error("'#{kind}' does not match uuid '#{uuid}', expected '#{ArvadosModel::resource_class_for_uuid(uuid).andand.kind}'",
+                 status: 422)
+      nil
+    else
+      true
+    end
+  end
+
+  def create
+    return if ! check_uuid_kind resource_attrs[:head_uuid], resource_attrs[:head_kind]
+    return if ! check_uuid_kind resource_attrs[:tail_uuid], resource_attrs[:tail_kind]
+
+    resource_attrs.delete :head_kind
+    resource_attrs.delete :tail_kind
+    super
+  end
+
+  def get_permissions
+    if current_user.andand.can?(manage: @object)
+      # find all links and return them
+      @objects = Link.where(link_class: "permission",
+                            head_uuid: params[:uuid])
+      @offset = 0
+      @limit = @objects.count
+      render_list
+    else
+      render :json => { errors: ['Forbidden'] }.to_json, status: 403
+    end
+  end
+
+  protected
+
+  def find_object_by_uuid
+    if action_name == 'get_permissions'
+      # get_permissions accepts a UUID for any kind of object.
+      @object = ArvadosModel::resource_class_for_uuid(params[:uuid])
+        .readable_by(*@read_users)
+        .where(uuid: params[:uuid])
+        .first
+    else
+      super
+      if @object.nil?
+        # Normally group permission links are not readable_by users.
+        # Make an exception for users with permission to manage the group.
+        # FIXME: Solve this more generally - see the controller tests.
+        link = Link.find_by_uuid(params[:uuid])
+        if (not link.nil?) and
+            (link.link_class == "permission") and
+            (@read_users.any? { |u| u.can?(manage: link.head_uuid) })
+          @object = link
+        end
+      end
+    end
+  end
+
+  # Overrides ApplicationController load_where_param
+  def load_where_param
+    super
+
+    # head_kind and tail_kind columns are now virtual,
+    # equivalent functionality is now provided by
+    # 'is_a', so fix up any old-style 'where' clauses.
+    if @where
+      @filters ||= []
+      if @where[:head_kind]
+        @filters << ['head_uuid', 'is_a', @where[:head_kind]]
+        @where.delete :head_kind
+      end
+      if @where[:tail_kind]
+        @filters << ['tail_uuid', 'is_a', @where[:tail_kind]]
+        @where.delete :tail_kind
+      end
+    end
+  end
+
+  # Overrides ApplicationController load_filters_param
+  def load_filters_param
+    super
+
+    # head_kind and tail_kind columns are now virtual,
+    # equivalent functionality is now provided by
+    # 'is_a', so fix up any old-style 'filter' clauses.
+    @filters = @filters.map do |k|
+      if k[0] == 'head_kind' and k[1] == '='
+        ['head_uuid', 'is_a', k[2]]
+      elsif k[0] == 'tail_kind' and k[1] == '='
+        ['tail_uuid', 'is_a', k[2]]
+      else
+        k
+      end
+    end
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/logs_controller.rb b/services/api/app/controllers/arvados/v1/logs_controller.rb
new file mode 100644 (file)
index 0000000..9024979
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::LogsController < ApplicationController
+  # Overrides ApplicationController load_where_param
+  def load_where_param
+    super
+
+    # object_kind and column is now virtual,
+    # equivilent functionality is now provided by
+    # 'is_a', so fix up any old-style 'where' clauses.
+    if @where
+      @filters ||= []
+      if @where[:object_kind]
+        @filters << ['object_uuid', 'is_a', @where[:object_kind]]
+        @where.delete :object_kind
+      end
+    end
+  end
+
+  # Overrides ApplicationController load_filters_param
+  def load_filters_param
+    super
+
+    # object_kind and column is now virtual,
+    # equivilent functionality is now provided by
+    # 'is_a', so fix up any old-style 'filter' clauses.
+    @filters = @filters.map do |k|
+      if k[0] == 'object_kind' and k[1] == '='
+        ['object_uuid', 'is_a', k[2]]
+      else
+        k
+      end
+    end
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/nodes_controller.rb b/services/api/app/controllers/arvados/v1/nodes_controller.rb
new file mode 100644 (file)
index 0000000..a2b22ea
--- /dev/null
@@ -0,0 +1,90 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::NodesController < ApplicationController
+  skip_before_filter :require_auth_scope, :only => :ping
+  skip_before_filter :find_object_by_uuid, :only => :ping
+  skip_before_filter :render_404_if_no_object, :only => :ping
+
+  include DbCurrentTime
+
+  def self._ping_requires_parameters
+    { ping_secret: {required: true} }
+  end
+
+  def self._create_requires_parameters
+    super.merge(
+      { assign_slot: {required: false, type: 'boolean', description: 'assign slot and hostname'} })
+  end
+
+  def self._update_requires_parameters
+    super.merge(
+      { assign_slot: {required: false, type: 'boolean', description: 'assign slot and hostname'} })
+  end
+
+  def create
+    @object = model_class.new(resource_attrs)
+    @object.assign_slot if params[:assign_slot]
+    @object.save!
+    show
+  end
+
+  def update
+    if resource_attrs[:job_uuid].is_a? String
+      @object.job_readable = readable_job_uuids([resource_attrs[:job_uuid]]).any?
+    end
+    attrs_to_update = resource_attrs.reject { |k,v|
+      [:kind, :etag, :href].index k
+    }
+    @object.update_attributes!(attrs_to_update)
+    @object.assign_slot if params[:assign_slot]
+    @object.save!
+    show
+  end
+
+  def ping
+    act_as_system_user do
+      @object = Node.where(uuid: (params[:id] || params[:uuid])).first
+      if !@object
+        return render_not_found
+      end
+      ping_data = {
+        ip: params[:local_ipv4] || request.remote_ip,
+        ec2_instance_id: params[:instance_id]
+      }
+      [:ping_secret, :total_cpu_cores, :total_ram_mb, :total_scratch_mb]
+        .each do |key|
+        ping_data[key] = params[key] if params[key]
+      end
+      @object.ping(ping_data)
+      if @object.info['ping_secret'] == params[:ping_secret]
+        send_json @object.as_api_response(:superuser)
+      else
+        raise "Invalid ping_secret after ping"
+      end
+    end
+  end
+
+  def find_objects_for_index
+    if !current_user.andand.is_admin && current_user.andand.is_active
+      # active non-admin users can list nodes that are (or were
+      # recently) working
+      @objects = model_class.where('last_ping_at >= ?', db_current_time - 1.hours)
+    end
+    super
+    if @select.nil? or @select.include? 'job_uuid'
+      job_uuids = @objects.map { |n| n[:job_uuid] }.compact
+      assoc_jobs = readable_job_uuids(job_uuids)
+      @objects.each do |node|
+        node.job_readable = assoc_jobs.include?(node[:job_uuid])
+      end
+    end
+  end
+
+  protected
+
+  def readable_job_uuids(uuids)
+    Job.readable_by(*@read_users).select(:uuid).where(uuid: uuids).map(&:uuid)
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb b/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb
new file mode 100644 (file)
index 0000000..baffda1
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::PipelineInstancesController < ApplicationController
+  accept_attribute_as_json :components, Hash
+  accept_attribute_as_json :properties, Hash
+  accept_attribute_as_json :components_summary, Hash
+
+  def cancel
+    reload_object_before_update
+    @object.cancel cascade: params[:cascade]
+    show
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb b/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb
new file mode 100644 (file)
index 0000000..a276948
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::PipelineTemplatesController < ApplicationController
+  accept_attribute_as_json :components, Hash
+end
diff --git a/services/api/app/controllers/arvados/v1/repositories_controller.rb b/services/api/app/controllers/arvados/v1/repositories_controller.rb
new file mode 100644 (file)
index 0000000..b88e10c
--- /dev/null
@@ -0,0 +1,124 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::RepositoriesController < ApplicationController
+  skip_before_filter :find_object_by_uuid, :only => :get_all_permissions
+  skip_before_filter :render_404_if_no_object, :only => :get_all_permissions
+  before_filter :admin_required, :only => :get_all_permissions
+
+  def get_all_permissions
+    # user_aks is a map of {user_uuid => array of public keys}
+    user_aks = {}
+    # admins is an array of user_uuids
+    admins = []
+    User.
+      where('users.is_active = ? or users.uuid = ?', true, anonymous_user_uuid).
+      eager_load(:authorized_keys).find_each do |u|
+      user_aks[u.uuid] = u.authorized_keys.collect do |ak|
+        {
+          public_key: ak.public_key,
+          authorized_key_uuid: ak.uuid
+        }
+      end
+      admins << u.uuid if u.is_admin
+    end
+    all_group_permissions = User.all_group_permissions
+    @repo_info = {}
+    Repository.eager_load(:permissions).find_each do |repo|
+      @repo_info[repo.uuid] = {
+        uuid: repo.uuid,
+        name: repo.name,
+        push_url: repo.push_url,
+        fetch_url: repo.fetch_url,
+        user_permissions: {},
+      }
+      # evidence is an array of {name: 'can_xxx', user_uuid: 'x-y-z'},
+      # one entry for each piece of evidence we find in the permission
+      # database that establishes that a user can access this
+      # repository. Multiple entries can be added for a given user,
+      # possibly with different access levels; these will be compacted
+      # below.
+      evidence = []
+      repo.permissions.each do |perm|
+        if ArvadosModel::resource_class_for_uuid(perm.tail_uuid) == Group
+          # A group has permission. Each user who has access to this
+          # group also has access to the repository. Access level is
+          # min(group-to-repo permission, user-to-group permission).
+          user_aks.each do |user_uuid, _|
+            perm_mask = all_group_permissions[user_uuid].andand[perm.tail_uuid]
+            if not perm_mask
+              next
+            elsif perm_mask[:manage] and perm.name == 'can_manage'
+              evidence << {name: 'can_manage', user_uuid: user_uuid}
+            elsif perm_mask[:write] and ['can_manage', 'can_write'].index perm.name
+              evidence << {name: 'can_write', user_uuid: user_uuid}
+            elsif perm_mask[:read]
+              evidence << {name: 'can_read', user_uuid: user_uuid}
+            end
+          end
+        elsif user_aks.has_key?(perm.tail_uuid)
+          # A user has permission; the user exists; and either the
+          # user is active, or it's the special case of the anonymous
+          # user which is never "active" but is allowed to read
+          # content from public repositories.
+          evidence << {name: perm.name, user_uuid: perm.tail_uuid}
+        end
+      end
+      # Owner of the repository, and all admins, can do everything.
+      ([repo.owner_uuid] | admins).each do |user_uuid|
+        # Except: no permissions for inactive users, even if they own
+        # repositories.
+        next unless user_aks.has_key?(user_uuid)
+        evidence << {name: 'can_manage', user_uuid: user_uuid}
+      end
+      # Distill all the evidence about permissions on this repository
+      # into one hash per user, of the form {'can_xxx' => true, ...}.
+      # The hash is nil for a user who has no permissions at all on
+      # this particular repository.
+      evidence.each do |perm|
+        user_uuid = perm[:user_uuid]
+        user_perms = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
+        user_perms[perm[:name]] = true
+      end
+    end
+    # Revisit each {'can_xxx' => true, ...} hash for some final
+    # cleanup to make life easier for the requestor.
+    #
+    # Add a 'gitolite_permissions' key alongside the 'can_xxx' keys,
+    # for the convenience of the gitolite config file generator.
+    #
+    # Add all lesser permissions when a greater permission is
+    # present. If the requestor only wants to know who can write, it
+    # only has to test for 'can_write' in the response.
+    @repo_info.values.each do |repo|
+      repo[:user_permissions].each do |user_uuid, user_perms|
+        if user_perms['can_manage']
+          user_perms['gitolite_permissions'] = 'RW+'
+          user_perms['can_write'] = true
+          user_perms['can_read'] = true
+        elsif user_perms['can_write']
+          user_perms['gitolite_permissions'] = 'RW+'
+          user_perms['can_read'] = true
+        elsif user_perms['can_read']
+          user_perms['gitolite_permissions'] = 'R'
+        end
+      end
+    end
+    # The response looks like
+    #   {"kind":"...",
+    #    "repositories":[r1,r2,r3,...],
+    #    "user_keys":usermap}
+    # where each of r1,r2,r3 looks like
+    #   {"uuid":"repo-uuid-1",
+    #    "name":"username/reponame",
+    #    "push_url":"...",
+    #    "user_permissions":{"user-uuid-a":{"can_read":true,"gitolite_permissions":"R"}}}
+    # and usermap looks like
+    #   {"user-uuid-a":[{"public_key":"ssh-rsa g...","authorized_key_uuid":"ak-uuid-g"},...],
+    #    "user-uuid-b":[{"public_key":"ssh-rsa h...","authorized_key_uuid":"ak-uuid-h"},...],...}
+    send_json(kind: 'arvados#RepositoryPermissionSnapshot',
+              repositories: @repo_info.values,
+              user_keys: user_aks)
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
new file mode 100644 (file)
index 0000000..771ef2b
--- /dev/null
@@ -0,0 +1,407 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::SchemaController < ApplicationController
+  skip_before_filter :catch_redirect_hint
+  skip_before_filter :find_objects_for_index
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :load_filters_param
+  skip_before_filter :load_limit_offset_order_params
+  skip_before_filter :load_read_auths
+  skip_before_filter :load_where_param
+  skip_before_filter :render_404_if_no_object
+  skip_before_filter :require_auth_scope
+
+  include DbCurrentTime
+
+  def index
+    expires_in 24.hours, public: true
+    send_json discovery_doc
+  end
+
+  protected
+
+  def discovery_doc
+    Rails.cache.fetch 'arvados_v1_rest_discovery' do
+      Rails.application.eager_load!
+      discovery = {
+        kind: "discovery#restDescription",
+        discoveryVersion: "v1",
+        id: "arvados:v1",
+        name: "arvados",
+        version: "v1",
+        revision: "20131114",
+        source_version: AppVersion.hash,
+        sourceVersion: AppVersion.hash, # source_version should be deprecated in the future
+        packageVersion: AppVersion.package_version,
+        generatedAt: db_current_time.iso8601,
+        title: "Arvados API",
+        description: "The API to interact with Arvados.",
+        documentationLink: "http://doc.arvados.org/api/index.html",
+        defaultCollectionReplication: Rails.configuration.default_collection_replication,
+        protocol: "rest",
+        baseUrl: root_url + "arvados/v1/",
+        basePath: "/arvados/v1/",
+        rootUrl: root_url,
+        servicePath: "arvados/v1/",
+        batchPath: "batch",
+        uuidPrefix: Rails.application.config.uuid_prefix,
+        defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
+        blobSignatureTtl: Rails.application.config.blob_signature_ttl,
+        maxRequestSize: Rails.application.config.max_request_size,
+        maxItemsPerResponse: Rails.application.config.max_items_per_response,
+        dockerImageFormats: Rails.application.config.docker_image_formats,
+        crunchLogBytesPerEvent: Rails.application.config.crunch_log_bytes_per_event,
+        crunchLogSecondsBetweenEvents: Rails.application.config.crunch_log_seconds_between_events,
+        crunchLogThrottlePeriod: Rails.application.config.crunch_log_throttle_period,
+        crunchLogThrottleBytes: Rails.application.config.crunch_log_throttle_bytes,
+        crunchLogThrottleLines: Rails.application.config.crunch_log_throttle_lines,
+        crunchLimitLogBytesPerJob: Rails.application.config.crunch_limit_log_bytes_per_job,
+        crunchLogPartialLineThrottlePeriod: Rails.application.config.crunch_log_partial_line_throttle_period,
+        crunchLogUpdatePeriod: Rails.application.config.crunch_log_update_period,
+        crunchLogUpdateSize: Rails.application.config.crunch_log_update_size,
+        remoteHosts: Rails.configuration.remote_hosts,
+        remoteHostsViaDNS: Rails.configuration.remote_hosts_via_dns,
+        websocketUrl: Rails.application.config.websocket_address,
+        workbenchUrl: Rails.application.config.workbench_address,
+        keepWebServiceUrl: Rails.application.config.keep_web_service_url,
+        gitUrl: case Rails.application.config.git_repo_https_base
+                when false
+                  ''
+                when true
+                  'https://git.%s.arvadosapi.com/' % Rails.configuration.uuid_prefix
+                else
+                  Rails.application.config.git_repo_https_base
+                end,
+        parameters: {
+          alt: {
+            type: "string",
+            description: "Data format for the response.",
+            default: "json",
+            enum: [
+                   "json"
+                  ],
+            enumDescriptions: [
+                               "Responses with Content-Type of application/json"
+                              ],
+            location: "query"
+          },
+          fields: {
+            type: "string",
+            description: "Selector specifying which fields to include in a partial response.",
+            location: "query"
+          },
+          key: {
+            type: "string",
+            description: "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+            location: "query"
+          },
+          oauth_token: {
+            type: "string",
+            description: "OAuth 2.0 token for the current user.",
+            location: "query"
+          }
+        },
+        auth: {
+          oauth2: {
+            scopes: {
+              "https://api.curoverse.com/auth/arvados" => {
+                description: "View and manage objects"
+              },
+              "https://api.curoverse.com/auth/arvados.readonly" => {
+                description: "View objects"
+              }
+            }
+          }
+        },
+        schemas: {},
+        resources: {}
+      }
+
+      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
+        begin
+          ctl_class = "Arvados::V1::#{k.to_s.pluralize}Controller".constantize
+        rescue
+          # No controller -> no discovery.
+          next
+        end
+        object_properties = {}
+        k.columns.
+          select { |col| col.name != 'id' && !col.name.start_with?('secret_') }.
+          collect do |col|
+          if k.serialized_attributes.has_key? col.name
+            object_properties[col.name] = {
+              type: k.serialized_attributes[col.name].object_class.to_s
+            }
+          else
+            object_properties[col.name] = {
+              type: col.type
+            }
+          end
+        end
+        discovery[:schemas][k.to_s + 'List'] = {
+          id: k.to_s + 'List',
+          description: k.to_s + ' list',
+          type: "object",
+          properties: {
+            kind: {
+              type: "string",
+              description: "Object type. Always arvados##{k.to_s.camelcase(:lower)}List.",
+              default: "arvados##{k.to_s.camelcase(:lower)}List"
+            },
+            etag: {
+              type: "string",
+              description: "List version."
+            },
+            items: {
+              type: "array",
+              description: "The list of #{k.to_s.pluralize}.",
+              items: {
+                "$ref" => k.to_s
+              }
+            },
+            next_link: {
+              type: "string",
+              description: "A link to the next page of #{k.to_s.pluralize}."
+            },
+            next_page_token: {
+              type: "string",
+              description: "The page token for the next page of #{k.to_s.pluralize}."
+            },
+            selfLink: {
+              type: "string",
+              description: "A link back to this list."
+            }
+          }
+        }
+        discovery[:schemas][k.to_s] = {
+          id: k.to_s,
+          description: k.to_s,
+          type: "object",
+          uuidPrefix: (k.respond_to?(:uuid_prefix) ? k.uuid_prefix : nil),
+          properties: {
+            uuid: {
+              type: "string",
+              description: "Object ID."
+            },
+            etag: {
+              type: "string",
+              description: "Object version."
+            }
+          }.merge(object_properties)
+        }
+        discovery[:resources][k.to_s.underscore.pluralize] = {
+          methods: {
+            get: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.get",
+              path: "#{k.to_s.underscore.pluralize}/{uuid}",
+              httpMethod: "GET",
+              description: "Gets a #{k.to_s}'s metadata by UUID.",
+              parameters: {
+                uuid: {
+                  type: "string",
+                  description: "The UUID of the #{k.to_s} in question.",
+                  required: true,
+                  location: "path"
+                }
+              },
+              parameterOrder: [
+                               "uuid"
+                              ],
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados",
+                       "https://api.curoverse.com/auth/arvados.readonly"
+                      ]
+            },
+            index: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.index",
+              path: k.to_s.underscore.pluralize,
+              httpMethod: "GET",
+              description:
+                 %|Index #{k.to_s.pluralize}.
+
+                   The <code>index</code> method returns a
+                   <a href="/api/resources.html">resource list</a> of
+                   matching #{k.to_s.pluralize}. For example:
+
+                   <pre>
+                   {
+                    "kind":"arvados##{k.to_s.camelcase(:lower)}List",
+                    "etag":"",
+                    "self_link":"",
+                    "next_page_token":"",
+                    "next_link":"",
+                    "items":[
+                       ...
+                    ],
+                    "items_available":745,
+                    "_profile":{
+                     "request_time":0.157236317
+                    }
+                    </pre>|,
+              parameters: {
+              },
+              response: {
+                "$ref" => "#{k.to_s}List"
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados",
+                       "https://api.curoverse.com/auth/arvados.readonly"
+                      ]
+            },
+            create: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.create",
+              path: "#{k.to_s.underscore.pluralize}",
+              httpMethod: "POST",
+              description: "Create a new #{k.to_s}.",
+              parameters: {},
+              request: {
+                required: true,
+                properties: {
+                  k.to_s.underscore => {
+                    "$ref" => k.to_s
+                  }
+                }
+              },
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados"
+                      ]
+            },
+            update: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.update",
+              path: "#{k.to_s.underscore.pluralize}/{uuid}",
+              httpMethod: "PUT",
+              description: "Update attributes of an existing #{k.to_s}.",
+              parameters: {
+                uuid: {
+                  type: "string",
+                  description: "The UUID of the #{k.to_s} in question.",
+                  required: true,
+                  location: "path"
+                }
+              },
+              request: {
+                required: true,
+                properties: {
+                  k.to_s.underscore => {
+                    "$ref" => k.to_s
+                  }
+                }
+              },
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados"
+                      ]
+            },
+            delete: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.delete",
+              path: "#{k.to_s.underscore.pluralize}/{uuid}",
+              httpMethod: "DELETE",
+              description: "Delete an existing #{k.to_s}.",
+              parameters: {
+                uuid: {
+                  type: "string",
+                  description: "The UUID of the #{k.to_s} in question.",
+                  required: true,
+                  location: "path"
+                }
+              },
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados"
+                      ]
+            }
+          }
+        }
+        # Check for Rails routes that don't match the usual actions
+        # listed above
+        d_methods = discovery[:resources][k.to_s.underscore.pluralize][:methods]
+        Rails.application.routes.routes.each do |route|
+          action = route.defaults[:action]
+          httpMethod = ['GET', 'POST', 'PUT', 'DELETE'].map { |method|
+            method if route.verb.match(method)
+          }.compact.first
+          if httpMethod and
+              route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize and
+              ctl_class.action_methods.include? action
+            if !d_methods[action.to_sym]
+              method = {
+                id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
+                path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
+                httpMethod: httpMethod,
+                description: "#{action} #{k.to_s.underscore.pluralize}",
+                parameters: {},
+                response: {
+                  "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
+                },
+                scopes: [
+                         "https://api.curoverse.com/auth/arvados"
+                        ]
+              }
+              route.segment_keys.each do |key|
+                if key != :format
+                  key = :uuid if key == :id
+                  method[:parameters][key] = {
+                    type: "string",
+                    description: "",
+                    required: true,
+                    location: "path"
+                  }
+                end
+              end
+            else
+              # We already built a generic method description, but we
+              # might find some more required parameters through
+              # introspection.
+              method = d_methods[action.to_sym]
+            end
+            if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
+              ctl_class.send("_#{action}_requires_parameters".to_sym).each do |l, v|
+                if v.is_a? Hash
+                  method[:parameters][l] = v
+                else
+                  method[:parameters][l] = {}
+                end
+                if !method[:parameters][l][:default].nil?
+                  # The JAVA SDK is sensitive to all values being strings
+                  method[:parameters][l][:default] = method[:parameters][l][:default].to_s
+                end
+                method[:parameters][l][:type] ||= 'string'
+                method[:parameters][l][:description] ||= ''
+                method[:parameters][l][:location] = (route.segment_keys.include?(l) ? 'path' : 'query')
+                if method[:parameters][l][:required].nil?
+                  method[:parameters][l][:required] = v != false
+                end
+              end
+            end
+            d_methods[action.to_sym] = method
+
+            if action == 'index'
+              list_method = method.dup
+              list_method[:id].sub!('index', 'list')
+              list_method[:description].sub!('Index', 'List')
+              list_method[:description].sub!('index', 'list')
+              d_methods[:list] = list_method
+            end
+          end
+        end
+      end
+      Rails.configuration.disable_api_methods.each do |method|
+        ctrl, action = method.split('.', 2)
+        discovery[:resources][ctrl][:methods].delete(action.to_sym)
+      end
+      discovery
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/specimens_controller.rb b/services/api/app/controllers/arvados/v1/specimens_controller.rb
new file mode 100644 (file)
index 0000000..b1e50a7
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::SpecimensController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/traits_controller.rb b/services/api/app/controllers/arvados/v1/traits_controller.rb
new file mode 100644 (file)
index 0000000..7aaed5c
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::TraitsController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/user_agreements_controller.rb b/services/api/app/controllers/arvados/v1/user_agreements_controller.rb
new file mode 100644 (file)
index 0000000..dc08c6a
--- /dev/null
@@ -0,0 +1,87 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::UserAgreementsController < ApplicationController
+  before_filter :admin_required, except: [:index, :sign, :signatures]
+  skip_before_filter :find_object_by_uuid, only: [:sign, :signatures]
+  skip_before_filter :render_404_if_no_object, only: [:sign, :signatures]
+
+  def model_class
+    Link
+  end
+
+  def table_name
+    'links'
+  end
+
+  def index
+    if not current_user.is_invited
+      # New users cannot see user agreements until/unless invited to
+      # use this installation.
+      @objects = []
+    else
+      act_as_system_user do
+        uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                           system_user_uuid,
+                           'signature',
+                           'require',
+                           system_user_uuid,
+                           Collection.uuid_like_pattern).
+          collect(&:head_uuid)
+        @objects = Collection.where('uuid in (?)', uuids)
+      end
+    end
+    @response_resource_name = 'collection'
+    super
+  end
+
+  def signatures
+    current_user_uuid = (current_user.andand.is_admin && params[:uuid]) ||
+      current_user.uuid
+    act_as_system_user do
+      @objects = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                            system_user_uuid,
+                            'signature',
+                            'click',
+                            current_user_uuid,
+                            Collection.uuid_like_pattern)
+    end
+    @response_resource_name = 'link'
+    render_list
+  end
+
+  def sign
+    current_user_uuid = current_user.uuid
+    act_as_system_user do
+      @object = Link.create(link_class: 'signature',
+                            name: 'click',
+                            tail_uuid: current_user_uuid,
+                            head_uuid: params[:uuid])
+    end
+    show
+  end
+
+  def create
+    usage_error
+  end
+  
+  def new
+    usage_error
+  end
+
+  def update
+    usage_error
+  end
+
+  def destroy
+    usage_error
+  end
+
+  protected
+  def usage_error
+    raise ArgumentError.new \
+    "Manage user agreements via Collections and Links instead."
+  end
+  
+end
diff --git a/services/api/app/controllers/arvados/v1/users_controller.rb b/services/api/app/controllers/arvados/v1/users_controller.rb
new file mode 100644 (file)
index 0000000..d2126ec
--- /dev/null
@@ -0,0 +1,224 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::UsersController < ApplicationController
+  accept_attribute_as_json :prefs, Hash
+
+  skip_before_filter :find_object_by_uuid, only:
+    [:activate, :current, :system, :setup, :merge]
+  skip_before_filter :render_404_if_no_object, only:
+    [:activate, :current, :system, :setup, :merge]
+  before_filter :admin_required, only: [:setup, :unsetup, :update_uuid]
+
+  def current
+    if current_user
+      @object = current_user
+      show
+    else
+      send_error("Not logged in", status: 401)
+    end
+  end
+
+  def system
+    @object = system_user
+    show
+  end
+
+  def activate
+    if current_user.andand.is_admin && params[:uuid]
+      @object = User.find params[:uuid]
+    else
+      @object = current_user
+    end
+    if not @object.is_active
+      if not (current_user.is_admin or @object.is_invited)
+        logger.warn "User #{@object.uuid} called users.activate " +
+          "but is not invited"
+        raise ArgumentError.new "Cannot activate without being invited."
+      end
+      act_as_system_user do
+        required_uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                                    system_user_uuid,
+                                    'signature',
+                                    'require',
+                                    system_user_uuid,
+                                    Collection.uuid_like_pattern).
+          collect(&:head_uuid)
+        signed_uuids = Link.where(owner_uuid: system_user_uuid,
+                                  link_class: 'signature',
+                                  name: 'click',
+                                  tail_uuid: @object.uuid,
+                                  head_uuid: required_uuids).
+          collect(&:head_uuid)
+        todo_uuids = required_uuids - signed_uuids
+        if todo_uuids.empty?
+          @object.update_attributes is_active: true
+          logger.info "User #{@object.uuid} activated"
+        else
+          logger.warn "User #{@object.uuid} called users.activate " +
+            "before signing agreements #{todo_uuids.inspect}"
+          raise ArvadosModel::PermissionDeniedError.new \
+          "Cannot activate without user agreements #{todo_uuids.inspect}."
+        end
+      end
+    end
+    show
+  end
+
+  # create user object and all the needed links
+  def setup
+    if params[:uuid]
+      @object = User.find_by_uuid(params[:uuid])
+      if !@object
+        return render_404_if_no_object
+      end
+    elsif !params[:user]
+      raise ArgumentError.new "Required uuid or user"
+    elsif !params[:user]['email']
+      raise ArgumentError.new "Require user email"
+    elsif !params[:openid_prefix]
+      raise ArgumentError.new "Required openid_prefix parameter is missing."
+    else
+      @object = model_class.create! resource_attrs
+    end
+
+    # It's not always possible for the client to know the user's
+    # username when submitting this request: the username might have
+    # been assigned automatically in create!() above. If client
+    # provided a plain repository name, prefix it with the username
+    # now that we know what it is.
+    if params[:repo_name].nil?
+      full_repo_name = nil
+    elsif @object.username.nil?
+      raise ArgumentError.
+        new("cannot setup a repository because user has no username")
+    elsif params[:repo_name].index("/")
+      full_repo_name = params[:repo_name]
+    else
+      full_repo_name = "#{@object.username}/#{params[:repo_name]}"
+    end
+
+    @response = @object.setup(repo_name: full_repo_name,
+                              vm_uuid: params[:vm_uuid],
+                              openid_prefix: params[:openid_prefix])
+
+    # setup succeeded. send email to user
+    if params[:send_notification_email]
+      UserNotifier.account_is_setup(@object).deliver_now
+    end
+
+    send_json kind: "arvados#HashList", items: @response.as_api_response(nil)
+  end
+
+  # delete user agreements, vm, repository, login links; set state to inactive
+  def unsetup
+    reload_object_before_update
+    @object.unsetup
+    show
+  end
+
+  # Change UUID to a new (unused) uuid and transfer all owned/linked
+  # objects accordingly.
+  def update_uuid
+    @object.update_uuid(new_uuid: params[:new_uuid])
+    show
+  end
+
+  def merge
+    if !Thread.current[:api_client].andand.is_trusted
+      return send_error("supplied API token is not from a trusted client", status: 403)
+    elsif Thread.current[:api_client_authorization].scopes != ['all']
+      return send_error("cannot merge with a scoped token", status: 403)
+    end
+
+    new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])
+    if !new_auth
+      return send_error("invalid new_user_token", status: 401)
+    end
+    if !new_auth.api_client.andand.is_trusted
+      return send_error("supplied new_user_token is not from a trusted client", status: 403)
+    elsif new_auth.scopes != ['all']
+      return send_error("supplied new_user_token has restricted scope", status: 403)
+    end
+    new_user = new_auth.user
+
+    if current_user.uuid == new_user.uuid
+      return send_error("cannot merge user to self", status: 422)
+    end
+
+    if !new_user.can?(write: params[:new_owner_uuid])
+      return send_error("cannot move objects into supplied new_owner_uuid: new user does not have write permission", status: 403)
+    end
+
+    redirect = params[:redirect_to_new_user]
+    if !redirect
+      return send_error("merge with redirect_to_new_user=false is not yet supported", status: 422)
+    end
+
+    @object = current_user
+    act_as_system_user do
+      @object.merge(new_owner_uuid: params[:new_owner_uuid], redirect_to_user_uuid: redirect && new_user.uuid)
+    end
+    show
+  end
+
+  protected
+
+  def self._merge_requires_parameters
+    {
+      new_owner_uuid: {
+        type: 'string', required: true,
+      },
+      new_user_token: {
+        type: 'string', required: true,
+      },
+      redirect_to_new_user: {
+        type: 'boolean', required: false,
+      },
+    }
+  end
+
+  def self._setup_requires_parameters
+    {
+      user: {
+        type: 'object', required: false
+      },
+      openid_prefix: {
+        type: 'string', required: false
+      },
+      repo_name: {
+        type: 'string', required: false
+      },
+      vm_uuid: {
+        type: 'string', required: false
+      },
+      send_notification_email: {
+        type: 'boolean', required: false, default: false
+      },
+    }
+  end
+
+  def self._update_uuid_requires_parameters
+    {
+      new_uuid: {
+        type: 'string', required: true,
+      },
+    }
+  end
+
+  def apply_filters(model_class=nil)
+    return super if @read_users.any?(&:is_admin)
+    if params[:uuid] != current_user.andand.uuid
+      # Non-admin index/show returns very basic information about readable users.
+      safe_attrs = ["uuid", "is_active", "email", "first_name", "last_name", "username"]
+      if @select
+        @select = @select & safe_attrs
+      else
+        @select = safe_attrs
+      end
+      @filters += [['is_active', '=', true]]
+    end
+    super
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb b/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb
new file mode 100644 (file)
index 0000000..7a1c680
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::VirtualMachinesController < ApplicationController
+  skip_before_filter :find_object_by_uuid, :only => :get_all_logins
+  skip_before_filter :render_404_if_no_object, :only => :get_all_logins
+  before_filter(:admin_required,
+                :only => [:logins, :get_all_logins])
+
+  # Get all login permissons (user uuid, login account, SSH key) for a
+  # single VM
+  def logins
+    render_logins_for VirtualMachine.where(uuid: @object.uuid)
+  end
+
+  # Get all login permissons for all VMs
+  def get_all_logins
+    render_logins_for VirtualMachine
+  end
+
+  protected
+
+  def render_logins_for vm_query
+    @response = []
+    @vms = vm_query.eager_load :login_permissions
+    @users = {}
+    User.eager_load(:authorized_keys).
+      where('users.uuid in (?)',
+            @vms.map { |vm| vm.login_permissions.map(&:tail_uuid) }.flatten.uniq).
+      each do |u|
+      @users[u.uuid] = u
+    end
+    @vms.each do |vm|
+      vm.login_permissions.each do |perm|
+        user_uuid = perm.tail_uuid
+        next if not @users[user_uuid]
+        next if perm.properties['username'].blank?
+        aks = @users[user_uuid].authorized_keys
+        if aks.empty?
+          # We'll emit one entry, with no public key.
+          aks = [nil]
+        end
+        aks.each do |ak|
+          @response << {
+            username: perm.properties['username'],
+            hostname: vm.hostname,
+            groups: (perm.properties['groups'].to_a rescue []),
+            public_key: ak ? ak.public_key : nil,
+            user_uuid: user_uuid,
+            virtual_machine_uuid: vm.uuid,
+            authorized_key_uuid: ak ? ak.uuid : nil,
+          }
+        end
+      end
+    end
+    send_json kind: "arvados#HashList", items: @response
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/workflows_controller.rb b/services/api/app/controllers/arvados/v1/workflows_controller.rb
new file mode 100644 (file)
index 0000000..7cfdd9d
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Arvados::V1::WorkflowsController < ApplicationController
+end
diff --git a/services/api/app/controllers/database_controller.rb b/services/api/app/controllers/database_controller.rb
new file mode 100644 (file)
index 0000000..dddc340
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DatabaseController < ApplicationController
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+  before_filter :admin_required
+  def reset
+    raise ArvadosModel::PermissionDeniedError unless Rails.env == 'test'
+
+    # Sanity check: If someone has actually logged in here, this might
+    # not really be a throwaway database. Client test suites should
+    # use @example.com email addresses when creating user records, so
+    # we can tell they're not valuable.
+    user_uuids = User.
+      where('email is null or email not like ?', '%@example.com').
+      collect(&:uuid)
+    fixture_uuids =
+      YAML::load_file(File.expand_path('../../../test/fixtures/users.yml',
+                                       __FILE__)).
+      values.collect { |u| u['uuid'] }
+    unexpected_uuids = user_uuids - fixture_uuids
+    if unexpected_uuids.any?
+      logger.error("Running in test environment, but non-fixture users exist: " +
+                   "#{unexpected_uuids}")
+      raise ArvadosModel::PermissionDeniedError
+    end
+
+    require 'active_record/fixtures'
+
+    # What kinds of fixtures do we have?
+    fixturesets = Dir.glob(Rails.root.join('test', 'fixtures', '*.yml')).
+      collect { |yml| yml.match(/([^\/]*)\.yml$/)[1] }
+
+    # Don't reset keep_services: clients need to discover our
+    # integration-testing keepstores, not test fixtures.
+    fixturesets -= %w[keep_services]
+
+    table_names = '"' + ActiveRecord::Base.connection.tables.join('","') + '"'
+
+    attempts_left = 20
+    begin
+      ActiveRecord::Base.transaction do
+        # Avoid deadlock by locking all tables before doing anything
+        # drastic.
+        ActiveRecord::Base.connection.execute \
+        "LOCK TABLE #{table_names} IN ACCESS EXCLUSIVE MODE"
+
+        # Delete existing fixtures (and everything else) from fixture
+        # tables
+        fixturesets.each do |x|
+          x.classify.constantize.unscoped.delete_all
+        end
+
+        # create_fixtures() is a no-op for cached fixture sets, so
+        # uncache them all.
+        ActiveRecord::FixtureSet.reset_cache
+        ActiveRecord::FixtureSet.
+          create_fixtures(Rails.root.join('test', 'fixtures'), fixturesets)
+
+        # Dump cache of permissions etc.
+        Rails.cache.clear
+        ActiveRecord::Base.connection.clear_query_cache
+
+        # Reload database seeds
+        DatabaseSeeds.install
+      end
+    rescue ActiveRecord::StatementInvalid => e
+      if "#{e.inspect}" =~ /deadlock detected/i and (attempts_left -= 1) > 0
+        logger.info "Waiting for lock -- #{e.inspect}"
+        sleep 0.5
+        retry
+      end
+      raise
+    end
+
+    require 'refresh_permission_view'
+
+    refresh_permission_view
+
+    # Done.
+    send_json success: true
+  end
+end
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
new file mode 100644 (file)
index 0000000..f0992c1
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class StaticController < ApplicationController
+  respond_to :json, :html
+
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+  skip_before_filter :require_auth_scope, only: [:home, :empty, :login_failure]
+
+  def home
+    respond_to do |f|
+      f.html do
+        if Rails.configuration.workbench_address
+          redirect_to Rails.configuration.workbench_address
+        else
+          render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
+        end
+      end
+      f.json do
+        render_not_found "Path not found."
+      end
+    end
+  end
+
+  def empty
+    render text: ""
+  end
+
+end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
new file mode 100644 (file)
index 0000000..1889d74
--- /dev/null
@@ -0,0 +1,188 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UserSessionsController < ApplicationController
+  before_filter :require_auth_scope, :only => [ :destroy ]
+
+  skip_before_filter :set_cors_headers
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+
+  respond_to :html
+
+  # omniauth callback method
+  def create
+    omniauth = env['omniauth.auth']
+
+    identity_url_ok = (omniauth['info']['identity_url'].length > 0) rescue false
+    unless identity_url_ok
+      # Whoa. This should never happen.
+      logger.error "UserSessionsController.create: omniauth object missing/invalid"
+      logger.error "omniauth: "+omniauth.pretty_inspect
+
+      return redirect_to login_failure_url
+    end
+
+    # Only local users can create sessions, hence uuid_like_pattern
+    # here.
+    user = User.unscoped.where('identity_url = ? and uuid like ?',
+                               omniauth['info']['identity_url'],
+                               User.uuid_like_pattern).first
+    if not user
+      # Check for permission to log in to an existing User record with
+      # a different identity_url
+      Link.where("link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                 'permission',
+                 'can_login',
+                 omniauth['info']['email'],
+                 User.uuid_like_pattern).each do |link|
+        if prefix = link.properties['identity_url_prefix']
+          if prefix == omniauth['info']['identity_url'][0..prefix.size-1]
+            user = User.find_by_uuid(link.head_uuid)
+            break if user
+          end
+        end
+      end
+    end
+
+    if not user
+      # New user registration
+      user = User.new(:email => omniauth['info']['email'],
+                      :first_name => omniauth['info']['first_name'],
+                      :last_name => omniauth['info']['last_name'],
+                      :identity_url => omniauth['info']['identity_url'],
+                      :is_active => Rails.configuration.new_users_are_active,
+                      :owner_uuid => system_user_uuid)
+      if omniauth['info']['username']
+        user.set_initial_username(requested: omniauth['info']['username'])
+      end
+      act_as_system_user do
+        user.save or raise Exception.new(user.errors.messages)
+      end
+    else
+      user.email = omniauth['info']['email']
+      user.first_name = omniauth['info']['first_name']
+      user.last_name = omniauth['info']['last_name']
+      if user.identity_url.nil?
+        # First login to a pre-activated account
+        user.identity_url = omniauth['info']['identity_url']
+      end
+
+      while (uuid = user.redirect_to_user_uuid)
+        user = User.unscoped.where(uuid: uuid).first
+        if !user
+          raise Exception.new("identity_url #{omniauth['info']['identity_url']} redirects to nonexistent uuid #{uuid}")
+        end
+      end
+    end
+
+    # For the benefit of functional and integration tests:
+    @user = user
+
+    # prevent ArvadosModel#before_create and _update from throwing
+    # "unauthorized":
+    Thread.current[:user] = user
+
+    user.save or raise Exception.new(user.errors.messages)
+
+    omniauth.delete('extra')
+
+    # Give the authenticated user a cookie for direct API access
+    session[:user_id] = user.id
+    session[:api_client_uuid] = nil
+    session[:api_client_trusted] = true # full permission to see user's secrets
+
+    @redirect_to = root_path
+    if params.has_key?(:return_to)
+      # return_to param's format is 'remote,return_to_url'. This comes from login()
+      # encoding the remote=zbbbb parameter passed by a client asking for a salted
+      # token.
+      remote, return_to_url = params[:return_to].split(',', 2)
+      if remote !~ /^[0-9a-z]{5}$/ && remote != ""
+        return send_error 'Invalid remote cluster id', status: 400
+      end
+      remote = nil if remote == ''
+      return send_api_token_to(return_to_url, user, remote)
+    end
+    redirect_to @redirect_to
+  end
+
+  # Omniauth failure callback
+  def failure
+    flash[:notice] = params[:message]
+  end
+
+  # logout - Clear our rack session BUT essentially redirect to the provider
+  # to clean up the Devise session from there too !
+  def logout
+    session[:user_id] = nil
+
+    flash[:notice] = 'You have logged off'
+    return_to = params[:return_to] || root_url
+    redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+  end
+
+  # login - Just bounce to /auth/joshid. The only purpose of this function is
+  # to save the return_to parameter (if it exists; see the application
+  # controller). /auth/joshid bypasses the application controller.
+  def login
+    if params[:remote] !~ /^[0-9a-z]{5}$/ && !params[:remote].nil?
+      return send_error 'Invalid remote cluster id', status: 400
+    end
+    if current_user and params[:return_to]
+      # Already logged in; just need to send a token to the requesting
+      # API client.
+      #
+      # FIXME: if current_user has never authorized this app before,
+      # ask for confirmation here!
+
+      return send_api_token_to(params[:return_to], current_user, params[:remote])
+    end
+    p = []
+    p << "auth_provider=#{CGI.escape(params[:auth_provider])}" if params[:auth_provider]
+    if params[:return_to]
+      # Encode remote param inside callback's return_to, so that we'll get it on
+      # create() after login.
+      remote_param = params[:remote].nil? ? '' : params[:remote]
+      p << "return_to=#{CGI.escape(remote_param + ',' + params[:return_to])}"
+    end
+    redirect_to "/auth/joshid?#{p.join('&')}"
+  end
+
+  def send_api_token_to(callback_url, user, remote=nil)
+    # Give the API client a token for making API calls on behalf of
+    # the authenticated user
+
+    # Stub: automatically register all new API clients
+    api_client_url_prefix = callback_url.match(%r{^.*?://[^/]+})[0] + '/'
+    act_as_system_user do
+      @api_client = ApiClient.
+        find_or_create_by(url_prefix: api_client_url_prefix)
+    end
+
+    @api_client_auth = ApiClientAuthorization.
+      new(user: user,
+          api_client: @api_client,
+          created_by_ip_address: remote_ip,
+          scopes: ["all"])
+    @api_client_auth.save!
+
+    if callback_url.index('?')
+      callback_url += '&'
+    else
+      callback_url += '?'
+    end
+    if remote.nil?
+      token = @api_client_auth.token
+    else
+      token = @api_client_auth.salted_token(remote: remote)
+    end
+    callback_url += 'api_token=' + token
+    redirect_to callback_url
+  end
+
+  def cross_origin_forbidden
+    send_error 'Forbidden', status: 403
+  end
+end
diff --git a/services/api/app/helpers/api_client_authorizations_helper.rb b/services/api/app/helpers/api_client_authorizations_helper.rb
new file mode 100644 (file)
index 0000000..e1066ba
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ApiClientAuthorizationsHelper
+end
diff --git a/services/api/app/helpers/api_clients_helper.rb b/services/api/app/helpers/api_clients_helper.rb
new file mode 100644 (file)
index 0000000..9604777
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ApiClientsHelper
+end
diff --git a/services/api/app/helpers/application_helper.rb b/services/api/app/helpers/application_helper.rb
new file mode 100644 (file)
index 0000000..904674b
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ApplicationHelper
+  include CurrentApiClient
+end
diff --git a/services/api/app/helpers/authorized_keys_helper.rb b/services/api/app/helpers/authorized_keys_helper.rb
new file mode 100644 (file)
index 0000000..665fff7
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module AuthorizedKeysHelper
+end
diff --git a/services/api/app/helpers/collections_helper.rb b/services/api/app/helpers/collections_helper.rb
new file mode 100644 (file)
index 0000000..ca44f47
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module CollectionsHelper
+end
diff --git a/services/api/app/helpers/commit_ancestors_helper.rb b/services/api/app/helpers/commit_ancestors_helper.rb
new file mode 100644 (file)
index 0000000..6def2c9
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module CommitAncestorsHelper
+end
diff --git a/services/api/app/helpers/commits_helper.rb b/services/api/app/helpers/commits_helper.rb
new file mode 100644 (file)
index 0000000..d44719f
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module CommitsHelper
+end
diff --git a/services/api/app/helpers/groups_helper.rb b/services/api/app/helpers/groups_helper.rb
new file mode 100644 (file)
index 0000000..5464d96
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module GroupsHelper
+end
diff --git a/services/api/app/helpers/humans_helper.rb b/services/api/app/helpers/humans_helper.rb
new file mode 100644 (file)
index 0000000..e9b4c96
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module HumansHelper
+end
diff --git a/services/api/app/helpers/job_tasks_helper.rb b/services/api/app/helpers/job_tasks_helper.rb
new file mode 100644 (file)
index 0000000..ae78d75
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module JobTasksHelper
+end
diff --git a/services/api/app/helpers/jobs_helper.rb b/services/api/app/helpers/jobs_helper.rb
new file mode 100644 (file)
index 0000000..ba3f715
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module JobsHelper
+end
diff --git a/services/api/app/helpers/keep_disks_helper.rb b/services/api/app/helpers/keep_disks_helper.rb
new file mode 100644 (file)
index 0000000..19386c9
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module KeepDisksHelper
+end
diff --git a/services/api/app/helpers/links_helper.rb b/services/api/app/helpers/links_helper.rb
new file mode 100644 (file)
index 0000000..422a42d
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module LinksHelper
+end
diff --git a/services/api/app/helpers/logs_helper.rb b/services/api/app/helpers/logs_helper.rb
new file mode 100644 (file)
index 0000000..9b767d3
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module LogsHelper
+end
diff --git a/services/api/app/helpers/nodes_helper.rb b/services/api/app/helpers/nodes_helper.rb
new file mode 100644 (file)
index 0000000..cd1ecc9
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module NodesHelper
+end
diff --git a/services/api/app/helpers/pipeline_instances_helper.rb b/services/api/app/helpers/pipeline_instances_helper.rb
new file mode 100644 (file)
index 0000000..79c2e09
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module PipelineInstancesHelper
+end
diff --git a/services/api/app/helpers/pipeline_templates_helper.rb b/services/api/app/helpers/pipeline_templates_helper.rb
new file mode 100644 (file)
index 0000000..135b525
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module PipelineTemplatesHelper
+end
diff --git a/services/api/app/helpers/repositories_helper.rb b/services/api/app/helpers/repositories_helper.rb
new file mode 100644 (file)
index 0000000..04ef0f1
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module RepositoriesHelper
+end
diff --git a/services/api/app/helpers/specimens_helper.rb b/services/api/app/helpers/specimens_helper.rb
new file mode 100644 (file)
index 0000000..5c7e98a
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module SpecimensHelper
+end
diff --git a/services/api/app/helpers/traits_helper.rb b/services/api/app/helpers/traits_helper.rb
new file mode 100644 (file)
index 0000000..35ae11d
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module TraitsHelper
+end
diff --git a/services/api/app/helpers/virtual_machines_helper.rb b/services/api/app/helpers/virtual_machines_helper.rb
new file mode 100644 (file)
index 0000000..7d2b08f
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module VirtualMachinesHelper
+end
diff --git a/services/api/app/mailers/.gitkeep b/services/api/app/mailers/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/app/mailers/admin_notifier.rb b/services/api/app/mailers/admin_notifier.rb
new file mode 100644 (file)
index 0000000..87a5699
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AdminNotifier < ActionMailer::Base
+  include AbstractController::Callbacks
+
+  default from: Rails.configuration.admin_notifier_email_from
+
+  def new_user(user)
+    @user = user
+    if not Rails.configuration.new_user_notification_recipients.empty? then
+      @recipients = Rails.configuration.new_user_notification_recipients
+      logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
+
+      add_to_subject = ''
+      if Rails.configuration.auto_setup_new_users
+        add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'
+      end
+
+      mail(to: @recipients,
+           subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification"
+          )
+    end
+  end
+
+  def new_inactive_user(user)
+    @user = user
+    if not Rails.configuration.new_inactive_user_notification_recipients.empty? then
+      @recipients = Rails.configuration.new_inactive_user_notification_recipients
+      logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
+      mail(to: @recipients,
+           subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification"
+          )
+    end
+  end
+
+end
diff --git a/services/api/app/mailers/profile_notifier.rb b/services/api/app/mailers/profile_notifier.rb
new file mode 100644 (file)
index 0000000..8c0c5ec
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ProfileNotifier < ActionMailer::Base
+  default from: Rails.configuration.admin_notifier_email_from
+
+  def profile_created(user, address)
+    @user = user
+    mail(to: address, subject: "Profile created by #{@user.email}")
+  end
+end
diff --git a/services/api/app/mailers/user_notifier.rb b/services/api/app/mailers/user_notifier.rb
new file mode 100644 (file)
index 0000000..5fb7036
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UserNotifier < ActionMailer::Base
+  include AbstractController::Callbacks
+
+  default from: Rails.configuration.user_notifier_email_from
+
+  def account_is_setup(user)
+    @user = user
+    mail(to: user.email, subject: 'Welcome to Arvados - shell account enabled')
+  end
+
+end
diff --git a/services/api/app/middlewares/arvados_api_token.rb b/services/api/app/middlewares/arvados_api_token.rb
new file mode 100644 (file)
index 0000000..acdc485
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Perform api_token checking very early in the request process.  We want to do
+# this in the Rack stack instead of in ApplicationController because
+# websockets needs access to authentication but doesn't use any of the rails
+# active dispatch infrastructure.
+class ArvadosApiToken
+
+  # Create a new ArvadosApiToken handler
+  # +app+  The next layer of the Rack stack.
+  def initialize(app = nil, options = nil)
+    @app = app.respond_to?(:call) ? app : nil
+  end
+
+  def call env
+    request = Rack::Request.new(env)
+    params = request.params
+    remote_ip = env["action_dispatch.remote_ip"]
+
+    Thread.current[:request_starttime] = Time.now
+
+    remote = false
+    reader_tokens = nil
+    if params["remote"] && request.get? && (
+         request.path.start_with?('/arvados/v1/groups') ||
+         request.path.start_with?('/arvados/v1/users/current'))
+      # Request from a remote API server, asking to validate a salted
+      # token.
+      remote = params["remote"]
+    elsif request.get? || params["_method"] == 'GET'
+      reader_tokens = params["reader_tokens"]
+      if reader_tokens.is_a? String
+        reader_tokens = SafeJSON.load(reader_tokens)
+      end
+    end
+
+    # Set current_user etc. based on the primary session token if a
+    # valid one is present. Otherwise, use the first valid token in
+    # reader_tokens.
+    accepted = false
+    auth = nil
+    [params["api_token"],
+     params["oauth_token"],
+     env["HTTP_AUTHORIZATION"].andand.match(/(OAuth2|Bearer) ([-\/a-zA-Z0-9]+)/).andand[2],
+     *reader_tokens,
+    ].each do |supplied|
+      next if !supplied
+      try_auth = ApiClientAuthorization.
+                 validate(token: supplied, remote: remote)
+      if try_auth.andand.user
+        auth = try_auth
+        accepted = supplied
+        break
+      end
+    end
+
+    Thread.current[:api_client_ip_address] = remote_ip
+    Thread.current[:api_client_authorization] = auth
+    Thread.current[:api_client_uuid] = auth.andand.api_client.andand.uuid
+    Thread.current[:api_client] = auth.andand.api_client
+    Thread.current[:token] = accepted
+    Thread.current[:user] = auth.andand.user
+
+    @app.call env if @app
+  end
+end
diff --git a/services/api/app/middlewares/rack_socket.rb b/services/api/app/middlewares/rack_socket.rb
new file mode 100644 (file)
index 0000000..1b301e2
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'rack'
+require 'faye/websocket'
+require 'eventmachine'
+
+# A Rack middleware to handle inbound websocket connection requests and hand
+# them over to the faye websocket library.
+class RackSocket
+
+  DEFAULT_ENDPOINT  = '/websocket'
+
+  # Stop EventMachine on signal, this should give it a chance to to unwind any
+  # open connections.
+  def die_gracefully_on_signal
+    Signal.trap("INT") { EM.stop }
+    Signal.trap("TERM") { EM.stop }
+  end
+
+  # Create a new RackSocket handler
+  # +app+  The next layer of the Rack stack.
+  #
+  # Accepts options:
+  # +:handler+ (Required) A class to handle new connections.  #initialize will
+  # call handler.new to create the actual handler instance object.  When a new
+  # websocket connection is established, #on_connect on the handler instance
+  # object will be called with the new connection.
+  #
+  # +:mount+ The HTTP request path that will be recognized for websocket
+  # connect requests, defaults to '/websocket'.
+  #
+  # +:websocket_only+  If true, the server will only handle websocket requests,
+  # and all other requests will result in an error.  If false, unhandled
+  # non-websocket requests will be passed along on to 'app' in the usual Rack
+  # way.
+  def initialize(app = nil, options = nil)
+    @app = app if app.respond_to?(:call)
+    @options = [app, options].grep(Hash).first || {}
+    @endpoint = @options[:mount] || DEFAULT_ENDPOINT
+    @websocket_only = @options[:websocket_only] || false
+
+    # from https://gist.github.com/eatenbyagrue/1338545#file-eventmachine-rb
+    if defined?(PhusionPassenger)
+      PhusionPassenger.on_event(:starting_worker_process) do |forked|
+        # for passenger, we need to avoid orphaned threads
+        if forked && EM.reactor_running?
+          EM.stop
+        end
+        Thread.new do
+          begin
+            EM.run
+          ensure
+            ActiveRecord::Base.connection.close
+          end
+        end
+        die_gracefully_on_signal
+      end
+    else
+      # faciliates debugging
+      Thread.abort_on_exception = true
+      # just spawn a thread and start it up
+      Thread.new do
+        begin
+          EM.run
+        ensure
+          ActiveRecord::Base.connection.close
+        end
+      end
+    end
+
+    # Create actual handler instance object from handler class.
+    @handler = @options[:handler].new
+  end
+
+  # Handle websocket connection request, or pass on to the next middleware
+  # supplied in +app+ initialize (unless +:websocket_only+ option is true, in
+  # which case return an error response.)
+  # +env+ the Rack environment with information about the request.
+  def call env
+    request = Rack::Request.new(env)
+    if request.path_info == @endpoint and Faye::WebSocket.websocket?(env)
+      if @handler.overloaded?
+        return [503, {"Content-Type" => "text/plain"}, ["Too many connections, try again later."]]
+      end
+
+      ws = Faye::WebSocket.new(env, nil, :ping => 30)
+
+      # Notify handler about new connection
+      @handler.on_connect ws
+
+      # Return async Rack response
+      ws.rack_response
+    elsif not @websocket_only
+      @app.call env
+    else
+      [406, {"Content-Type" => "text/plain"}, ["Only websocket connections are permitted on this port."]]
+    end
+  end
+
+end
diff --git a/services/api/app/models/.gitkeep b/services/api/app/models/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/app/models/api_client.rb b/services/api/app/models/api_client.rb
new file mode 100644 (file)
index 0000000..1f95d78
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApiClient < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  has_many :api_client_authorizations
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :url_prefix
+    t.add :is_trusted
+  end
+end
diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb
new file mode 100644 (file)
index 0000000..38538cb
--- /dev/null
@@ -0,0 +1,266 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ApiClientAuthorization < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  extend CurrentApiClient
+
+  belongs_to :api_client
+  belongs_to :user
+  after_initialize :assign_random_api_token
+  serialize :scopes, Array
+
+  api_accessible :user, extend: :common do |t|
+    t.add :owner_uuid
+    t.add :user_id
+    t.add :api_client_id
+    # NB the "api_token" db column is a misnomer in that it's only the
+    # "secret" part of a token: a v1 token is just the secret, but a
+    # v2 token is "v2/uuid/secret".
+    t.add :api_token
+    t.add :created_by_ip_address
+    t.add :default_owner_uuid
+    t.add :expires_at
+    t.add :last_used_at
+    t.add :last_used_by_ip_address
+    t.add :scopes
+  end
+
+  UNLOGGED_CHANGES = ['last_used_at', 'last_used_by_ip_address', 'updated_at']
+
+  def assign_random_api_token
+    self.api_token ||= rand(2**256).to_s(36)
+  end
+
+  def owner_uuid
+    self.user.andand.uuid
+  end
+  def owner_uuid_was
+    self.user_id_changed? ? User.where(id: self.user_id_was).first.andand.uuid : self.user.andand.uuid
+  end
+  def owner_uuid_changed?
+    self.user_id_changed?
+  end
+
+  def modified_by_client_uuid
+    nil
+  end
+  def modified_by_client_uuid=(x) end
+
+  def modified_by_user_uuid
+    nil
+  end
+  def modified_by_user_uuid=(x) end
+
+  def modified_at
+    nil
+  end
+  def modified_at=(x) end
+
+  def scopes_allow?(req_s)
+    scopes.each do |scope|
+      return true if (scope == 'all') or (scope == req_s) or
+        ((scope.end_with? '/') and (req_s.start_with? scope))
+    end
+    false
+  end
+
+  def scopes_allow_request?(request)
+    method = request.request_method
+    if method == 'HEAD'
+      (scopes_allow?(['HEAD', request.path].join(' ')) ||
+       scopes_allow?(['GET', request.path].join(' ')))
+    else
+      scopes_allow?([method, request.path].join(' '))
+    end
+  end
+
+  def logged_attributes
+    super.except 'api_token'
+  end
+
+  def self.default_orders
+    ["#{table_name}.id desc"]
+  end
+
+  def self.remote_host(uuid_prefix:)
+    Rails.configuration.remote_hosts[uuid_prefix] ||
+      (Rails.configuration.remote_hosts_via_dns &&
+       uuid_prefix+".arvadosapi.com")
+  end
+
+  def self.validate(token:, remote: nil)
+    return nil if !token
+    remote ||= Rails.configuration.uuid_prefix
+
+    case token[0..2]
+    when 'v2/'
+      _, uuid, secret, optional = token.split('/')
+      unless uuid.andand.length == 27 && secret.andand.length.andand > 0
+        return nil
+      end
+
+      if !optional.nil?
+        # if "optional" is a container uuid, check that it
+        # matches expections.
+        c = Container.where(uuid: optional).first
+        if !c.nil?
+          if !c.auth_uuid.nil? and c.auth_uuid != uuid
+            # token doesn't match the container's token
+            return nil
+          end
+          if !c.runtime_token.nil? and "v2/#{uuid}/#{secret}" != c.runtime_token
+            # token doesn't match the container's token
+            return nil
+          end
+          if ![Container::Locked, Container::Running].include?(c.state)
+            # container isn't locked or running, token shouldn't be used
+            return nil
+          end
+        end
+      end
+
+      auth = ApiClientAuthorization.
+             includes(:user, :api_client).
+             where('uuid=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', uuid).
+             first
+      if auth && auth.user &&
+         (secret == auth.api_token ||
+          secret == OpenSSL::HMAC.hexdigest('sha1', auth.api_token, remote))
+        return auth
+      end
+
+      uuid_prefix = uuid[0..4]
+      if uuid_prefix == Rails.configuration.uuid_prefix
+        # If the token were valid, we would have validated it above
+        return nil
+      elsif uuid_prefix.length != 5
+        # malformed
+        return nil
+      end
+
+      host = remote_host(uuid_prefix: uuid_prefix)
+      if !host
+        Rails.logger.warn "remote authentication rejected: no host for #{uuid_prefix.inspect}"
+        return nil
+      end
+
+      # Token was issued by a different cluster. If it's expired or
+      # missing in our database, ask the originating cluster to
+      # [re]validate it.
+      begin
+        clnt = HTTPClient.new
+        if Rails.configuration.sso_insecure
+          clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
+        else
+          # Use system CA certificates
+          ["/etc/ssl/certs/ca-certificates.crt",
+           "/etc/pki/tls/certs/ca-bundle.crt"]
+            .select { |ca_path| File.readable?(ca_path) }
+            .each { |ca_path| clnt.ssl_config.add_trust_ca(ca_path) }
+        end
+        remote_user = SafeJSON.load(
+          clnt.get_content('https://' + host + '/arvados/v1/users/current',
+                           {'remote' => Rails.configuration.uuid_prefix},
+                           {'Authorization' => 'Bearer ' + token}))
+      rescue => e
+        Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
+        return nil
+      end
+      if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String) || remote_user['uuid'][0..4] != uuid[0..4]
+        Rails.logger.warn "remote authentication rejected: remote_user=#{remote_user.inspect}"
+        return nil
+      end
+      act_as_system_user do
+        # Add/update user and token in our database so we can
+        # validate subsequent requests faster.
+
+        user = User.find_or_create_by(uuid: remote_user['uuid']) do |user|
+          # (this block runs for the "create" case, not for "find")
+          user.is_admin = false
+          user.email = remote_user['email']
+          if remote_user['username'].andand.length.andand > 0
+            user.set_initial_username(requested: remote_user['username'])
+          end
+        end
+
+        if Rails.configuration.new_users_are_active ||
+           Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
+          # Update is_active to whatever it is at the remote end
+          user.is_active = remote_user['is_active']
+        elsif !remote_user['is_active']
+          # Remote user is inactive; our mirror should be, too.
+          user.is_active = false
+        end
+
+        %w[first_name last_name email prefs].each do |attr|
+          user.send(attr+'=', remote_user[attr])
+        end
+
+        user.save!
+
+        auth = ApiClientAuthorization.find_or_create_by(uuid: uuid) do |auth|
+          auth.user = user
+          auth.api_token = secret
+          auth.api_client_id = 0
+        end
+
+        # Accept this token (and don't reload the user record) for
+        # 5 minutes. TODO: Request the actual api_client_auth
+        # record from the remote server in case it wants the token
+        # to expire sooner.
+        auth.update_attributes!(user: user,
+                                api_token: secret,
+                                api_client_id: 0,
+                                expires_at: Time.now + 5.minutes)
+      end
+      return auth
+    else
+      auth = ApiClientAuthorization.
+             includes(:user, :api_client).
+             where('api_token=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', token).
+             first
+      if auth && auth.user
+        return auth
+      end
+    end
+    return nil
+  end
+
+  def token
+    v2token
+  end
+
+  def v1token
+    api_token
+  end
+
+  def v2token
+    'v2/' + uuid + '/' + api_token
+  end
+
+  def salted_token(remote:)
+    if remote.nil?
+      token
+    end
+    'v2/' + uuid + '/' + OpenSSL::HMAC.hexdigest('sha1', api_token, remote)
+  end
+
+  protected
+
+  def permission_to_create
+    current_user.andand.is_admin or (current_user.andand.id == self.user_id)
+  end
+
+  def permission_to_update
+    permission_to_create && !uuid_changed? &&
+      (current_user.andand.is_admin || !user_id_changed?)
+  end
+
+  def log_update
+    super unless (changed - UNLOGGED_CHANGES).empty?
+  end
+end
diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb
new file mode 100644 (file)
index 0000000..2002e90
--- /dev/null
@@ -0,0 +1,843 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'arvados_model_updates'
+require 'has_uuid'
+require 'record_filters'
+require 'serializers'
+require 'request_error'
+
+class ArvadosModel < ActiveRecord::Base
+  self.abstract_class = true
+
+  include ArvadosModelUpdates
+  include CurrentApiClient      # current_user, current_api_client, etc.
+  include DbCurrentTime
+  extend RecordFilters
+
+  after_initialize :log_start_state
+  before_save :ensure_permission_to_save
+  before_save :ensure_owner_uuid_is_permitted
+  before_save :ensure_ownership_path_leads_to_user
+  before_destroy :ensure_owner_uuid_is_permitted
+  before_destroy :ensure_permission_to_destroy
+  before_create :update_modified_by_fields
+  before_update :maybe_update_modified_by_fields
+  after_create :log_create
+  after_update :log_update
+  after_destroy :log_destroy
+  after_find :convert_serialized_symbols_to_strings
+  before_validation :normalize_collection_uuids
+  before_validation :set_default_owner
+  validate :ensure_valid_uuids
+
+  # Note: This only returns permission links. It does not account for
+  # permissions obtained via user.is_admin or
+  # user.uuid==object.owner_uuid.
+  has_many(:permissions,
+           ->{where(link_class: 'permission')},
+           foreign_key: :head_uuid,
+           class_name: 'Link',
+           primary_key: :uuid)
+
+  # If async is true at create or update, permission graph
+  # update is deferred allowing making multiple calls without the performance
+  # penalty.
+  attr_accessor :async_permissions_update
+
+  class PermissionDeniedError < RequestError
+    def http_status
+      403
+    end
+  end
+
+  class AlreadyLockedError < RequestError
+    def http_status
+      422
+    end
+  end
+
+  class LockFailedError < RequestError
+    def http_status
+      422
+    end
+  end
+
+  class InvalidStateTransitionError < RequestError
+    def http_status
+      422
+    end
+  end
+
+  class UnauthorizedError < RequestError
+    def http_status
+      401
+    end
+  end
+
+  class UnresolvableContainerError < RequestError
+    def http_status
+      422
+    end
+  end
+
+  def self.kind_class(kind)
+    kind.match(/^arvados\#(.+)$/)[1].classify.safe_constantize rescue nil
+  end
+
+  def href
+    "#{current_api_base}/#{self.class.to_s.pluralize.underscore}/#{self.uuid}"
+  end
+
+  def self.permit_attribute_params raw_params
+    # strong_parameters does not provide security: permissions are
+    # implemented with before_save hooks.
+    #
+    # The following permit! is necessary even with
+    # "ActionController::Parameters.permit_all_parameters = true",
+    # because permit_all does not permit nested attributes.
+    if raw_params
+      serialized_attributes.each do |colname, coder|
+        param = raw_params[colname.to_sym]
+        if param.nil?
+          # ok
+        elsif !param.is_a?(coder.object_class)
+          raise ArgumentError.new("#{colname} parameter must be #{coder.object_class}, not #{param.class}")
+        elsif has_nonstring_keys?(param)
+          raise ArgumentError.new("#{colname} parameter cannot have non-string hash keys")
+        end
+      end
+    end
+    ActionController::Parameters.new(raw_params).permit!
+  end
+
+  def initialize raw_params={}, *args
+    super(self.class.permit_attribute_params(raw_params), *args)
+  end
+
+  # Reload "old attributes" for logging, too.
+  def reload(*args)
+    super
+    log_start_state
+  end
+
+  def self.create raw_params={}, *args
+    super(permit_attribute_params(raw_params), *args)
+  end
+
+  def update_attributes raw_params={}, *args
+    super(self.class.permit_attribute_params(raw_params), *args)
+  end
+
+  def self.selectable_attributes(template=:user)
+    # Return an array of attribute name strings that can be selected
+    # in the given template.
+    api_accessible_attributes(template).map { |attr_spec| attr_spec.first.to_s }
+  end
+
+  def self.searchable_columns operator
+    textonly_operator = !operator.match(/[<=>]/)
+    self.columns.select do |col|
+      case col.type
+      when :string, :text
+        true
+      when :datetime, :integer, :boolean
+        !textonly_operator
+      else
+        false
+      end
+    end.map(&:name)
+  end
+
+  def self.attribute_column attr
+    self.columns.select { |col| col.name == attr.to_s }.first
+  end
+
+  def self.attributes_required_columns
+    # This method returns a hash.  Each key is the name of an API attribute,
+    # and it's mapped to a list of database columns that must be fetched
+    # to generate that attribute.
+    # This implementation generates a simple map of attributes to
+    # matching column names.  Subclasses can override this method
+    # to specify that method-backed API attributes need to fetch
+    # specific columns from the database.
+    all_columns = columns.map(&:name)
+    api_column_map = Hash.new { |hash, key| hash[key] = [] }
+    methods.grep(/^api_accessible_\w+$/).each do |method_name|
+      next if method_name == :api_accessible_attributes
+      send(method_name).each_pair do |api_attr_name, col_name|
+        col_name = col_name.to_s
+        if all_columns.include?(col_name)
+          api_column_map[api_attr_name.to_s] |= [col_name]
+        end
+      end
+    end
+    api_column_map
+  end
+
+  def self.ignored_select_attributes
+    ["href", "kind", "etag"]
+  end
+
+  def self.columns_for_attributes(select_attributes)
+    if select_attributes.empty?
+      raise ArgumentError.new("Attribute selection list cannot be empty")
+    end
+    api_column_map = attributes_required_columns
+    invalid_attrs = []
+    select_attributes.each do |s|
+      next if ignored_select_attributes.include? s
+      if not s.is_a? String or not api_column_map.include? s
+        invalid_attrs << s
+      end
+    end
+    if not invalid_attrs.empty?
+      raise ArgumentError.new("Invalid attribute(s): #{invalid_attrs.inspect}")
+    end
+    # Given an array of attribute names to select, return an array of column
+    # names that must be fetched from the database to satisfy the request.
+    select_attributes.flat_map { |attr| api_column_map[attr] }.uniq
+  end
+
+  def self.default_orders
+    ["#{table_name}.modified_at desc", "#{table_name}.uuid"]
+  end
+
+  def self.unique_columns
+    ["id", "uuid"]
+  end
+
+  def self.limit_index_columns_read
+    # This method returns a list of column names.
+    # If an index request reads that column from the database,
+    # APIs that return lists will only fetch objects until reaching
+    # max_index_database_read bytes of data from those columns.
+    []
+  end
+
+  # If current user can manage the object, return an array of uuids of
+  # users and groups that have permission to write the object. The
+  # first two elements are always [self.owner_uuid, current user's
+  # uuid].
+  #
+  # If current user can write but not manage the object, return
+  # [self.owner_uuid, current user's uuid].
+  #
+  # If current user cannot write this object, just return
+  # [self.owner_uuid].
+  def writable_by
+    return [owner_uuid] if not current_user
+    unless (owner_uuid == current_user.uuid or
+            current_user.is_admin or
+            (current_user.groups_i_can(:manage) & [uuid, owner_uuid]).any?)
+      if ((current_user.groups_i_can(:write) + [current_user.uuid]) &
+          [uuid, owner_uuid]).any?
+        return [owner_uuid, current_user.uuid]
+      else
+        return [owner_uuid]
+      end
+    end
+    [owner_uuid, current_user.uuid] + permissions.collect do |p|
+      if ['can_write', 'can_manage'].index p.name
+        p.tail_uuid
+      end
+    end.compact.uniq
+  end
+
+  # Return a query with read permissions restricted to the union of the
+  # permissions of the members of users_list, i.e. if something is readable by
+  # any user in users_list, it will be readable in the query returned by this
+  # function.
+  def self.readable_by(*users_list)
+    # Get rid of troublesome nils
+    users_list.compact!
+
+    # Load optional keyword arguments, if they exist.
+    if users_list.last.is_a? Hash
+      kwargs = users_list.pop
+    else
+      kwargs = {}
+    end
+
+    # Collect the UUIDs of the authorized users.
+    sql_table = kwargs.fetch(:table_name, table_name)
+    include_trash = kwargs.fetch(:include_trash, false)
+    include_old_versions = kwargs.fetch(:include_old_versions, false)
+
+    sql_conds = nil
+    user_uuids = users_list.map { |u| u.uuid }
+
+    exclude_trashed_records = ""
+    if !include_trash and (sql_table == "groups" or sql_table == "collections") then
+      # Only include records that are not explicitly trashed
+      exclude_trashed_records = "AND #{sql_table}.is_trashed = false"
+    end
+
+    if users_list.select { |u| u.is_admin }.any?
+      # Admin skips most permission checks, but still want to filter on trashed items.
+      if !include_trash
+        if sql_table != "api_client_authorizations"
+          # Only include records where the owner is not trashed
+          sql_conds = "#{sql_table}.owner_uuid NOT IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
+                      "WHERE trashed = 1) #{exclude_trashed_records}"
+        end
+      end
+    else
+      trashed_check = ""
+      if !include_trash then
+        trashed_check = "AND trashed = 0"
+      end
+
+      # Note: it is possible to combine the direct_check and
+      # owner_check into a single EXISTS() clause, however it turns
+      # out query optimizer doesn't like it and forces a sequential
+      # table scan.  Constructing the query with separate EXISTS()
+      # clauses enables it to use the index.
+      #
+      # see issue 13208 for details.
+
+      # Match a direct read permission link from the user to the record uuid
+      direct_check = "#{sql_table}.uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
+                     "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check})"
+
+      # Match a read permission link from the user to the record's owner_uuid
+      owner_check = ""
+      if sql_table != "api_client_authorizations" and sql_table != "groups" then
+        owner_check = "OR #{sql_table}.owner_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
+          "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND target_owner_uuid IS NOT NULL) "
+      end
+
+      links_cond = ""
+      if sql_table == "links"
+        # Match any permission link that gives one of the authorized
+        # users some permission _or_ gives anyone else permission to
+        # view one of the authorized users.
+        links_cond = "OR (#{sql_table}.link_class IN (:permission_link_classes) AND "+
+                       "(#{sql_table}.head_uuid IN (:user_uuids) OR #{sql_table}.tail_uuid IN (:user_uuids)))"
+      end
+
+      sql_conds = "(#{direct_check} #{owner_check} #{links_cond}) #{exclude_trashed_records}"
+
+    end
+
+    if !include_old_versions && sql_table == "collections"
+      exclude_old_versions = "#{sql_table}.uuid = #{sql_table}.current_version_uuid"
+      if sql_conds.nil?
+        sql_conds = exclude_old_versions
+      else
+        sql_conds += " AND #{exclude_old_versions}"
+      end
+    end
+
+    self.where(sql_conds,
+               user_uuids: user_uuids,
+               permission_link_classes: ['permission', 'resources'])
+  end
+
+  def save_with_unique_name!
+    uuid_was = uuid
+    name_was = name
+    max_retries = 2
+    transaction do
+      conn = ActiveRecord::Base.connection
+      conn.exec_query 'SAVEPOINT save_with_unique_name'
+      begin
+        save!
+      rescue ActiveRecord::RecordNotUnique => rn
+        raise if max_retries == 0
+        max_retries -= 1
+
+        conn.exec_query 'ROLLBACK TO SAVEPOINT save_with_unique_name'
+
+        # Dig into the error to determine if it is specifically calling out a
+        # (owner_uuid, name) uniqueness violation.  In this specific case, and
+        # the client requested a unique name with ensure_unique_name==true,
+        # update the name field and try to save again.  Loop as necessary to
+        # discover a unique name.  It is necessary to handle name choosing at
+        # this level (as opposed to the client) to ensure that record creation
+        # never fails due to a race condition.
+        err = rn.original_exception
+        raise unless err.is_a?(PG::UniqueViolation)
+
+        # Unfortunately ActiveRecord doesn't abstract out any of the
+        # necessary information to figure out if this the error is actually
+        # the specific case where we want to apply the ensure_unique_name
+        # behavior, so the following code is specialized to Postgres.
+        detail = err.result.error_field(PG::Result::PG_DIAG_MESSAGE_DETAIL)
+        raise unless /^Key \(owner_uuid, name\)=\([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}, .*?\) already exists\./.match detail
+
+        new_name = "#{name_was} (#{db_current_time.utc.iso8601(3)})"
+        if new_name == name
+          # If the database is fast enough to do two attempts in the
+          # same millisecond, we need to wait to ensure we try a
+          # different timestamp on each attempt.
+          sleep 0.002
+          new_name = "#{name_was} (#{db_current_time.utc.iso8601(3)})"
+        end
+
+        self[:name] = new_name
+        if uuid_was.nil? && !uuid.nil?
+          self[:uuid] = nil
+          if self.is_a? Collection
+            # Reset so that is assigned to the new UUID
+            self[:current_version_uuid] = nil
+          end
+        end
+        conn.exec_query 'SAVEPOINT save_with_unique_name'
+        retry
+      ensure
+        conn.exec_query 'RELEASE SAVEPOINT save_with_unique_name'
+      end
+    end
+  end
+
+  def logged_attributes
+    attributes.except(*Rails.configuration.unlogged_attributes)
+  end
+
+  def self.full_text_searchable_columns
+    self.columns.select do |col|
+      [:string, :text, :jsonb].include?(col.type)
+    end.map(&:name)
+  end
+
+  def self.full_text_tsvector
+    parts = full_text_searchable_columns.collect do |column|
+      cast = serialized_attributes[column] ? '::text' : ''
+      "coalesce(#{column}#{cast},'')"
+    end
+    "to_tsvector('english', substr(#{parts.join(" || ' ' || ")}, 0, 8000))"
+  end
+
+  def self.apply_filters query, filters
+    ft = record_filters filters, self
+    if not ft[:cond_out].any?
+      return query
+    end
+    query.where('(' + ft[:cond_out].join(') AND (') + ')',
+                          *ft[:param_out])
+  end
+
+  protected
+
+  def self.deep_sort_hash(x)
+    if x.is_a? Hash
+      x.sort.collect do |k, v|
+        [k, deep_sort_hash(v)]
+      end.to_h
+    elsif x.is_a? Array
+      x.collect { |v| deep_sort_hash(v) }
+    else
+      x
+    end
+  end
+
+  def ensure_ownership_path_leads_to_user
+    if new_record? or owner_uuid_changed?
+      uuid_in_path = {owner_uuid => true, uuid => true}
+      x = owner_uuid
+      while (owner_class = ArvadosModel::resource_class_for_uuid(x)) != User
+        begin
+          if x == uuid
+            # Test for cycles with the new version, not the DB contents
+            x = owner_uuid
+          elsif !owner_class.respond_to? :find_by_uuid
+            raise ActiveRecord::RecordNotFound.new
+          else
+            x = owner_class.find_by_uuid(x).owner_uuid
+          end
+        rescue ActiveRecord::RecordNotFound => e
+          errors.add :owner_uuid, "is not owned by any user: #{e}"
+          return false
+        end
+        if uuid_in_path[x]
+          if x == owner_uuid
+            errors.add :owner_uuid, "would create an ownership cycle"
+          else
+            errors.add :owner_uuid, "has an ownership cycle"
+          end
+          return false
+        end
+        uuid_in_path[x] = true
+      end
+    end
+    true
+  end
+
+  def set_default_owner
+    if new_record? and current_user and respond_to? :owner_uuid=
+      self.owner_uuid ||= current_user.uuid
+    end
+  end
+
+  def ensure_owner_uuid_is_permitted
+    raise PermissionDeniedError if !current_user
+
+    if self.owner_uuid.nil?
+      errors.add :owner_uuid, "cannot be nil"
+      raise PermissionDeniedError
+    end
+
+    rsc_class = ArvadosModel::resource_class_for_uuid owner_uuid
+    unless rsc_class == User or rsc_class == Group
+      errors.add :owner_uuid, "must be set to User or Group"
+      raise PermissionDeniedError
+    end
+
+    if new_record? || owner_uuid_changed?
+      # Permission on owner_uuid_was is needed to move an existing
+      # object away from its previous owner (which implies permission
+      # to modify this object itself, so we don't need to check that
+      # separately). Permission on the new owner_uuid is also needed.
+      [['old', owner_uuid_was],
+       ['new', owner_uuid]
+      ].each do |which, check_uuid|
+        if check_uuid.nil?
+          # old_owner_uuid is nil? New record, no need to check.
+        elsif !current_user.can?(write: check_uuid)
+          logger.warn "User #{current_user.uuid} tried to set ownership of #{self.class.to_s} #{self.uuid} but does not have permission to write #{which} owner_uuid #{check_uuid}"
+          errors.add :owner_uuid, "cannot be set or changed without write permission on #{which} owner"
+          raise PermissionDeniedError
+        end
+      end
+    else
+      # If the object already existed and we're not changing
+      # owner_uuid, we only need write permission on the object
+      # itself.
+      if !current_user.can?(write: self.uuid)
+        logger.warn "User #{current_user.uuid} tried to modify #{self.class.to_s} #{self.uuid} without write permission"
+        errors.add :uuid, "is not writable"
+        raise PermissionDeniedError
+      end
+    end
+
+    true
+  end
+
+  def ensure_permission_to_save
+    unless (new_record? ? permission_to_create : permission_to_update)
+      raise PermissionDeniedError
+    end
+  end
+
+  def permission_to_create
+    current_user.andand.is_active
+  end
+
+  def permission_to_update
+    if !current_user
+      logger.warn "Anonymous user tried to update #{self.class.to_s} #{self.uuid_was}"
+      return false
+    end
+    if !current_user.is_active
+      logger.warn "Inactive user #{current_user.uuid} tried to update #{self.class.to_s} #{self.uuid_was}"
+      return false
+    end
+    return true if current_user.is_admin
+    if self.uuid_changed?
+      logger.warn "User #{current_user.uuid} tried to change uuid of #{self.class.to_s} #{self.uuid_was} to #{self.uuid}"
+      return false
+    end
+    return true
+  end
+
+  def ensure_permission_to_destroy
+    raise PermissionDeniedError unless permission_to_destroy
+  end
+
+  def permission_to_destroy
+    permission_to_update
+  end
+
+  def maybe_update_modified_by_fields
+    update_modified_by_fields if self.changed? or self.new_record?
+    true
+  end
+
+  def update_modified_by_fields
+    current_time = db_current_time
+    self.created_at ||= created_at_was || current_time
+    self.updated_at = current_time
+    self.owner_uuid ||= current_default_owner if self.respond_to? :owner_uuid=
+    if !anonymous_updater
+      self.modified_by_user_uuid = current_user ? current_user.uuid : nil
+    end
+    if !timeless_updater
+      self.modified_at = current_time
+    end
+    self.modified_by_client_uuid = current_api_client ? current_api_client.uuid : nil
+    true
+  end
+
+  def self.has_nonstring_keys? x
+    if x.is_a? Hash
+      x.each do |k,v|
+        return true if !(k.is_a?(String) || k.is_a?(Symbol)) || has_nonstring_keys?(v)
+      end
+    elsif x.is_a? Array
+      x.each do |v|
+        return true if has_nonstring_keys?(v)
+      end
+    end
+    false
+  end
+
+  def self.has_symbols? x
+    if x.is_a? Hash
+      x.each do |k,v|
+        return true if has_symbols?(k) or has_symbols?(v)
+      end
+    elsif x.is_a? Array
+      x.each do |k|
+        return true if has_symbols?(k)
+      end
+    elsif x.is_a? Symbol
+      return true
+    elsif x.is_a? String
+      return true if x.start_with?(':') && !x.start_with?('::')
+    end
+    false
+  end
+
+  def self.recursive_stringify x
+    if x.is_a? Hash
+      Hash[x.collect do |k,v|
+             [recursive_stringify(k), recursive_stringify(v)]
+           end]
+    elsif x.is_a? Array
+      x.collect do |k|
+        recursive_stringify k
+      end
+    elsif x.is_a? Symbol
+      x.to_s
+    elsif x.is_a? String and x.start_with?(':') and !x.start_with?('::')
+      x[1..-1]
+    else
+      x
+    end
+  end
+
+  def self.where_serialized(colname, value, md5: false)
+    colsql = colname.to_s
+    if md5
+      colsql = "md5(#{colsql})"
+    end
+    if value.empty?
+      # rails4 stores as null, rails3 stored as serialized [] or {}
+      sql = "#{colsql} is null or #{colsql} IN (?)"
+      sorted = value
+    else
+      sql = "#{colsql} IN (?)"
+      sorted = deep_sort_hash(value)
+    end
+    params = [sorted.to_yaml, SafeJSON.dump(sorted)]
+    if md5
+      params = params.map { |x| Digest::MD5.hexdigest(x) }
+    end
+    where(sql, params)
+  end
+
+  Serializer = {
+    Hash => HashSerializer,
+    Array => ArraySerializer,
+  }
+
+  def self.serialize(colname, type)
+    coder = Serializer[type]
+    @serialized_attributes ||= {}
+    @serialized_attributes[colname.to_s] = coder
+    super(colname, coder)
+  end
+
+  def self.serialized_attributes
+    @serialized_attributes ||= {}
+  end
+
+  def serialized_attributes
+    self.class.serialized_attributes
+  end
+
+  def convert_serialized_symbols_to_strings
+    # ensure_serialized_attribute_type should prevent symbols from
+    # getting into the database in the first place. If someone managed
+    # to get them into the database (perhaps using an older version)
+    # we'll convert symbols to strings when loading from the
+    # database. (Otherwise, loading and saving an object with existing
+    # symbols in a serialized field will crash.)
+    self.class.serialized_attributes.each do |colname, attr|
+      if self.class.has_symbols? attributes[colname]
+        attributes[colname] = self.class.recursive_stringify attributes[colname]
+        send(colname + '=',
+             self.class.recursive_stringify(attributes[colname]))
+      end
+    end
+  end
+
+  def foreign_key_attributes
+    attributes.keys.select { |a| a.match(/_uuid$/) }
+  end
+
+  def skip_uuid_read_permission_check
+    %w(modified_by_client_uuid)
+  end
+
+  def skip_uuid_existence_check
+    []
+  end
+
+  def normalize_collection_uuids
+    foreign_key_attributes.each do |attr|
+      attr_value = send attr
+      if attr_value.is_a? String and
+          attr_value.match(/^[0-9a-f]{32,}(\+[@\w]+)*$/)
+        begin
+          send "#{attr}=", Collection.normalize_uuid(attr_value)
+        rescue
+          # TODO: abort instead of silently accepting unnormalizable value?
+        end
+      end
+    end
+  end
+
+  @@prefixes_hash = nil
+  def self.uuid_prefixes
+    unless @@prefixes_hash
+      @@prefixes_hash = {}
+      Rails.application.eager_load!
+      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
+        if k.respond_to?(:uuid_prefix)
+          @@prefixes_hash[k.uuid_prefix] = k
+        end
+      end
+    end
+    @@prefixes_hash
+  end
+
+  def self.uuid_like_pattern
+    "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________"
+  end
+
+  def self.uuid_regex
+    %r/[a-z0-9]{5}-#{uuid_prefix}-[a-z0-9]{15}/
+  end
+
+  def ensure_valid_uuids
+    specials = [system_user_uuid]
+
+    foreign_key_attributes.each do |attr|
+      if new_record? or send (attr + "_changed?")
+        next if skip_uuid_existence_check.include? attr
+        attr_value = send attr
+        next if specials.include? attr_value
+        if attr_value
+          if (r = ArvadosModel::resource_class_for_uuid attr_value)
+            unless skip_uuid_read_permission_check.include? attr
+              r = r.readable_by(current_user)
+            end
+            if r.where(uuid: attr_value).count == 0
+              errors.add(attr, "'#{attr_value}' not found")
+            end
+          end
+        end
+      end
+    end
+  end
+
+  class Email
+    def self.kind
+      "email"
+    end
+
+    def kind
+      self.class.kind
+    end
+
+    def self.readable_by (*u)
+      self
+    end
+
+    def self.where (u)
+      [{:uuid => u[:uuid]}]
+    end
+  end
+
+  def self.resource_class_for_uuid(uuid)
+    if uuid.is_a? ArvadosModel
+      return uuid.class
+    end
+    unless uuid.is_a? String
+      return nil
+    end
+
+    uuid.match HasUuid::UUID_REGEX do |re|
+      return uuid_prefixes[re[1]] if uuid_prefixes[re[1]]
+    end
+
+    if uuid.match(/.+@.+/)
+      return Email
+    end
+
+    nil
+  end
+
+  # ArvadosModel.find_by_uuid needs extra magic to allow it to return
+  # an object in any class.
+  def self.find_by_uuid uuid
+    if self == ArvadosModel
+      # If called directly as ArvadosModel.find_by_uuid rather than via subclass,
+      # delegate to the appropriate subclass based on the given uuid.
+      self.resource_class_for_uuid(uuid).find_by_uuid(uuid)
+    else
+      super
+    end
+  end
+
+  def is_audit_logging_enabled?
+    return !(Rails.configuration.max_audit_log_age.to_i == 0 &&
+             Rails.configuration.max_audit_log_delete_batch.to_i > 0)
+  end
+
+  def log_start_state
+    if is_audit_logging_enabled?
+      @old_attributes = Marshal.load(Marshal.dump(attributes))
+      @old_logged_attributes = Marshal.load(Marshal.dump(logged_attributes))
+    end
+  end
+
+  def log_change(event_type)
+    if is_audit_logging_enabled?
+      log = Log.new(event_type: event_type).fill_object(self)
+      yield log
+      log.save!
+      log_start_state
+    end
+  end
+
+  def log_create
+    if is_audit_logging_enabled?
+      log_change('create') do |log|
+        log.fill_properties('old', nil, nil)
+        log.update_to self
+      end
+    end
+  end
+
+  def log_update
+    if is_audit_logging_enabled?
+      log_change('update') do |log|
+        log.fill_properties('old', etag(@old_attributes), @old_logged_attributes)
+        log.update_to self
+      end
+    end
+  end
+
+  def log_destroy
+    if is_audit_logging_enabled?
+      log_change('delete') do |log|
+        log.fill_properties('old', etag(@old_attributes), @old_logged_attributes)
+        log.update_to nil
+      end
+    end
+  end
+end
diff --git a/services/api/app/models/authorized_key.rb b/services/api/app/models/authorized_key.rb
new file mode 100644 (file)
index 0000000..a5c5081
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AuthorizedKey < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  before_create :permission_to_set_authorized_user_uuid
+  before_update :permission_to_set_authorized_user_uuid
+
+  belongs_to :authorized_user, :foreign_key => :authorized_user_uuid, :class_name => 'User', :primary_key => :uuid
+
+  validate :public_key_must_be_unique
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :key_type
+    t.add :authorized_user_uuid
+    t.add :public_key
+    t.add :expires_at
+  end
+
+  def permission_to_set_authorized_user_uuid
+    # Anonymous users cannot do anything here
+    return false if !current_user
+
+    # Administrators can attach a key to any user account
+    return true if current_user.is_admin
+
+    # All users can attach keys to their own accounts
+    return true if current_user.uuid == authorized_user_uuid
+
+    # Default = deny.
+    false
+  end
+
+  def public_key_must_be_unique
+    if self.public_key
+      valid_key = SSHKey.valid_ssh_public_key? self.public_key
+
+      if not valid_key
+        errors.add(:public_key, "does not appear to be a valid ssh-rsa or dsa public key")
+      else
+        # Valid if no other rows have this public key
+        if self.class.where('uuid != ? and public_key like ?',
+                            uuid || '', "%#{self.public_key}%").any?
+          errors.add(:public_key, "already exists in the database, use a different key.")
+          return false
+        end
+      end
+    end
+    return true
+  end
+end
diff --git a/services/api/app/models/blob.rb b/services/api/app/models/blob.rb
new file mode 100644 (file)
index 0000000..55a2578
--- /dev/null
@@ -0,0 +1,126 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'request_error'
+
+class Blob
+  extend DbCurrentTime
+
+  def initialize locator
+    @locator = locator
+  end
+
+  def empty?
+    !!@locator.match(/^d41d8cd98f00b204e9800998ecf8427e(\+.*)?$/)
+  end
+
+  # In order to get a Blob from Keep, you have to prove either
+  # [a] you have recently written it to Keep yourself, or
+  # [b] apiserver has recently decided that you should be able to read it
+  #
+  # To ensure that the requestor of a blob is authorized to read it,
+  # Keep requires clients to timestamp the blob locator with an expiry
+  # time, and to sign the timestamped locator with their API token.
+  #
+  # A signed blob locator has the form:
+  #     locator_hash +A blob_signature @ timestamp
+  # where the timestamp is a Unix time expressed as a hexadecimal value,
+  # and the blob_signature is the signed locator_hash + API token + timestamp.
+  #
+  class InvalidSignatureError < RequestError
+  end
+
+  # Blob.sign_locator: return a signed and timestamped blob locator.
+  #
+  # The 'opts' argument should include:
+  #   [required] :api_token - API token (signatures only work for this token)
+  #   [optional] :key       - the Arvados server-side blobstore key
+  #   [optional] :ttl       - number of seconds before signature should expire
+  #   [optional] :expire    - unix timestamp when signature should expire
+  #
+  def self.sign_locator blob_locator, opts
+    # We only use the hash portion for signatures.
+    blob_hash = blob_locator.split('+').first
+
+    # Generate an expiry timestamp (seconds after epoch, base 16)
+    if opts[:expire]
+      if opts[:ttl]
+        raise "Cannot specify both :ttl and :expire options"
+      end
+      timestamp = opts[:expire]
+    else
+      timestamp = db_current_time.to_i +
+        (opts[:ttl] || Rails.configuration.blob_signature_ttl)
+    end
+    timestamp_hex = timestamp.to_s(16)
+    # => "53163cb4"
+    blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+
+    # Generate a signature.
+    signature =
+      generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+                         blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl)
+
+    blob_locator + '+A' + signature + '@' + timestamp_hex
+  end
+
+  # Blob.verify_signature
+  #   Safely verify the signature on a blob locator.
+  #   Return value: true if the locator has a valid signature, false otherwise
+  #   Arguments: signed_blob_locator, opts
+  #
+  def self.verify_signature(*args)
+    begin
+      self.verify_signature!(*args)
+      true
+    rescue Blob::InvalidSignatureError
+      false
+    end
+  end
+
+  # Blob.verify_signature!
+  #   Verify the signature on a blob locator.
+  #   Return value: true if the locator has a valid signature
+  #   Arguments: signed_blob_locator, opts
+  #   Exceptions:
+  #     Blob::InvalidSignatureError if the blob locator does not include a
+  #     valid signature
+  #
+  def self.verify_signature! signed_blob_locator, opts
+    blob_hash = signed_blob_locator.split('+').first
+    given_signature, timestamp = signed_blob_locator.
+      split('+A').last.
+      split('+').first.
+      split('@')
+
+    if !timestamp
+      raise Blob::InvalidSignatureError.new 'No signature provided.'
+    end
+    unless timestamp =~ /^[\da-f]+$/
+      raise Blob::InvalidSignatureError.new 'Timestamp is not a base16 number.'
+    end
+    if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
+      raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
+    end
+    blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+
+    my_signature =
+      generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+                         blob_hash, opts[:api_token], timestamp, blob_signature_ttl)
+
+    if my_signature != given_signature
+      raise Blob::InvalidSignatureError.new 'Signature is invalid.'
+    end
+
+    true
+  end
+
+  def self.generate_signature key, blob_hash, api_token, timestamp, blob_signature_ttl
+    OpenSSL::HMAC.hexdigest('sha1', key,
+                            [blob_hash,
+                             api_token,
+                             timestamp,
+                             blob_signature_ttl].join('@'))
+  end
+end
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
new file mode 100644 (file)
index 0000000..6147b79
--- /dev/null
@@ -0,0 +1,658 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'arvados/keep'
+require 'sweep_trashed_objects'
+require 'trashable'
+
+class Collection < ArvadosModel
+  extend CurrentApiClient
+  extend DbCurrentTime
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include Trashable
+
+  serialize :properties, Hash
+  serialize :storage_classes_desired, Array
+  serialize :storage_classes_confirmed, Array
+
+  before_validation :default_empty_manifest
+  before_validation :default_storage_classes, on: :create
+  before_validation :check_encoding
+  before_validation :check_manifest_validity
+  before_validation :check_signatures
+  before_validation :strip_signatures_and_update_replication_confirmed
+  validate :ensure_pdh_matches_manifest_text
+  validate :ensure_storage_classes_desired_is_not_empty
+  validate :ensure_storage_classes_contain_non_empty_strings
+  validate :versioning_metadata_updates, on: :update
+  validate :past_versions_cannot_be_updated, on: :update
+  before_save :set_file_names
+  around_update :manage_versioning
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :description
+    t.add :properties
+    t.add :portable_data_hash
+    t.add :signed_manifest_text, as: :manifest_text
+    t.add :manifest_text, as: :unsigned_manifest_text
+    t.add :replication_desired
+    t.add :replication_confirmed
+    t.add :replication_confirmed_at
+    t.add :storage_classes_desired
+    t.add :storage_classes_confirmed
+    t.add :storage_classes_confirmed_at
+    t.add :delete_at
+    t.add :trash_at
+    t.add :is_trashed
+    t.add :version
+    t.add :current_version_uuid
+    t.add :preserve_version
+  end
+
+  after_initialize do
+    @signatures_checked = false
+    @computed_pdh_for_manifest_text = false
+  end
+
+  def self.attributes_required_columns
+    super.merge(
+                # If we don't list manifest_text explicitly, the
+                # params[:select] code gets confused by the way we
+                # expose signed_manifest_text as manifest_text in the
+                # API response, and never let clients select the
+                # manifest_text column.
+                #
+                # We need trash_at and is_trashed to determine the
+                # correct timestamp in signed_manifest_text.
+                'manifest_text' => ['manifest_text', 'trash_at', 'is_trashed'],
+                'unsigned_manifest_text' => ['manifest_text'],
+                )
+  end
+
+  def self.ignored_select_attributes
+    super + ["updated_at", "file_names"]
+  end
+
+  def self.limit_index_columns_read
+    ["manifest_text"]
+  end
+
+  FILE_TOKEN = /^[[:digit:]]+:[[:digit:]]+:/
+  def check_signatures
+    return false if self.manifest_text.nil?
+
+    return true if current_user.andand.is_admin
+
+    # Provided the manifest_text hasn't changed materially since an
+    # earlier validation, it's safe to pass this validation on
+    # subsequent passes without checking any signatures. This is
+    # important because the signatures have probably been stripped off
+    # by the time we get to a second validation pass!
+    if @signatures_checked && @signatures_checked == computed_pdh
+      return true
+    end
+
+    if self.manifest_text_changed?
+      # Check permissions on the collection manifest.
+      # If any signature cannot be verified, raise PermissionDeniedError
+      # which will return 403 Permission denied to the client.
+      api_token = Thread.current[:token]
+      signing_opts = {
+        api_token: api_token,
+        now: @validation_timestamp.to_i,
+      }
+      self.manifest_text.each_line do |entry|
+        entry.split.each do |tok|
+          if tok == '.' or tok.starts_with? './'
+            # Stream name token.
+          elsif tok =~ FILE_TOKEN
+            # This is a filename token, not a blob locator. Note that we
+            # keep checking tokens after this, even though manifest
+            # format dictates that all subsequent tokens will also be
+            # filenames. Safety first!
+          elsif Blob.verify_signature tok, signing_opts
+            # OK.
+          elsif Keep::Locator.parse(tok).andand.signature
+            # Signature provided, but verify_signature did not like it.
+            logger.warn "Invalid signature on locator #{tok}"
+            raise ArvadosModel::PermissionDeniedError
+          elsif Rails.configuration.permit_create_collection_with_unsigned_manifest
+            # No signature provided, but we are running in insecure mode.
+            logger.debug "Missing signature on locator #{tok} ignored"
+          elsif Blob.new(tok).empty?
+            # No signature provided -- but no data to protect, either.
+          else
+            logger.warn "Missing signature on locator #{tok}"
+            raise ArvadosModel::PermissionDeniedError
+          end
+        end
+      end
+    end
+    @signatures_checked = computed_pdh
+  end
+
+  def strip_signatures_and_update_replication_confirmed
+    if self.manifest_text_changed?
+      in_old_manifest = {}
+      if not self.replication_confirmed.nil?
+        self.class.each_manifest_locator(manifest_text_was) do |match|
+          in_old_manifest[match[1]] = true
+        end
+      end
+
+      stripped_manifest = self.class.munge_manifest_locators(manifest_text) do |match|
+        if not self.replication_confirmed.nil? and not in_old_manifest[match[1]]
+          # If the new manifest_text contains locators whose hashes
+          # weren't in the old manifest_text, storage replication is no
+          # longer confirmed.
+          self.replication_confirmed_at = nil
+          self.replication_confirmed = nil
+        end
+
+        # Return the locator with all permission signatures removed,
+        # but otherwise intact.
+        match[0].gsub(/\+A[^+]*/, '')
+      end
+
+      if @computed_pdh_for_manifest_text == manifest_text
+        # If the cached PDH was valid before stripping, it is still
+        # valid after stripping.
+        @computed_pdh_for_manifest_text = stripped_manifest.dup
+      end
+
+      self[:manifest_text] = stripped_manifest
+    end
+    true
+  end
+
+  def ensure_pdh_matches_manifest_text
+    if not manifest_text_changed? and not portable_data_hash_changed?
+      true
+    elsif portable_data_hash.nil? or not portable_data_hash_changed?
+      self.portable_data_hash = computed_pdh
+    elsif portable_data_hash !~ Keep::Locator::LOCATOR_REGEXP
+      errors.add(:portable_data_hash, "is not a valid locator")
+      false
+    elsif portable_data_hash[0..31] != computed_pdh[0..31]
+      errors.add(:portable_data_hash,
+                 "'#{portable_data_hash}' does not match computed hash '#{computed_pdh}'")
+      false
+    else
+      # Ignore the client-provided size part: always store
+      # computed_pdh in the database.
+      self.portable_data_hash = computed_pdh
+    end
+  end
+
+  def set_file_names
+    if self.manifest_text_changed?
+      self.file_names = manifest_files
+    end
+    true
+  end
+
+  def manifest_files
+    return '' if !self.manifest_text
+
+    done = {}
+    names = ''
+    self.manifest_text.scan(/ \d+:\d+:(\S+)/) do |name|
+      next if done[name]
+      done[name] = true
+      names << name.first.gsub('\040',' ') + "\n"
+    end
+    self.manifest_text.scan(/^\.\/(\S+)/m) do |stream_name|
+      next if done[stream_name]
+      done[stream_name] = true
+      names << stream_name.first.gsub('\040',' ') + "\n"
+    end
+    names
+  end
+
+  def default_empty_manifest
+    self.manifest_text ||= ''
+  end
+
+  def skip_uuid_existence_check
+    # Avoid checking the existence of current_version_uuid, as it's
+    # assigned on creation of a new 'current version' collection, so
+    # the collection's UUID only lives on memory when the validation check
+    # is performed.
+    ['current_version_uuid']
+  end
+
+  def manage_versioning
+    should_preserve_version = should_preserve_version? # Time sensitive, cache value
+    return(yield) unless (should_preserve_version || syncable_updates.any?)
+
+    # Put aside the changes because with_lock forces a record reload
+    changes = self.changes
+    snapshot = nil
+    with_lock do
+      # Copy the original state to save it as old version
+      if should_preserve_version
+        snapshot = self.dup
+        snapshot.uuid = nil # Reset UUID so it's created as a new record
+        snapshot.created_at = self.created_at
+      end
+
+      # Restore requested changes on the current version
+      changes.keys.each do |attr|
+        if attr == 'preserve_version' && changes[attr].last == false
+          next # Ignore false assignment, once true it'll be true until next version
+        end
+        self.attributes = {attr => changes[attr].last}
+        if attr == 'uuid'
+          # Also update the current version reference
+          self.attributes = {'current_version_uuid' => changes[attr].last}
+        end
+      end
+
+      if should_preserve_version
+        self.version += 1
+        self.preserve_version = false
+      end
+
+      yield
+
+      sync_past_versions if syncable_updates.any?
+      if snapshot
+        snapshot.attributes = self.syncable_updates
+        snapshot.manifest_text = snapshot.signed_manifest_text
+        snapshot.save
+      end
+    end
+  end
+
+  def syncable_updates
+    updates = {}
+    (syncable_attrs & self.changes.keys).each do |attr|
+      if attr == 'uuid'
+        # Point old versions to current version's new UUID
+        updates['current_version_uuid'] = self.changes[attr].last
+      else
+        updates[attr] = self.changes[attr].last
+      end
+    end
+    return updates
+  end
+
+  def sync_past_versions
+    updates = self.syncable_updates
+    Collection.where('current_version_uuid = ? AND uuid != ?', self.uuid_was, self.uuid_was).each do |c|
+      c.attributes = updates
+      # Use a different validation context to skip the 'old_versions_cannot_be_updated'
+      # validator, as on this case it is legal to update some fields.
+      leave_modified_by_user_alone do
+        leave_modified_at_alone do
+          c.save(context: :update_old_versions)
+        end
+      end
+    end
+  end
+
+  def versionable_updates?(attrs)
+    (['manifest_text', 'description', 'properties', 'name'] & attrs).any?
+  end
+
+  def syncable_attrs
+    ['uuid', 'owner_uuid', 'delete_at', 'trash_at', 'is_trashed', 'replication_desired', 'storage_classes_desired']
+  end
+
+  def should_preserve_version?
+    return false unless (Rails.configuration.collection_versioning && versionable_updates?(self.changes.keys))
+
+    idle_threshold = Rails.configuration.preserve_version_if_idle
+    if !self.preserve_version_was &&
+      (idle_threshold < 0 ||
+        (idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds))
+      return false
+    end
+    return true
+  end
+
+  def check_encoding
+    if manifest_text.encoding.name == 'UTF-8' and manifest_text.valid_encoding?
+      true
+    else
+      begin
+        # If Ruby thinks the encoding is something else, like 7-bit
+        # ASCII, but its stored bytes are equal to the (valid) UTF-8
+        # encoding of the same string, we declare it to be a UTF-8
+        # string.
+        utf8 = manifest_text
+        utf8.force_encoding Encoding::UTF_8
+        if utf8.valid_encoding? and utf8 == manifest_text.encode(Encoding::UTF_8)
+          self.manifest_text = utf8
+          return true
+        end
+      rescue
+      end
+      errors.add :manifest_text, "must use UTF-8 encoding"
+      false
+    end
+  end
+
+  def check_manifest_validity
+    begin
+      Keep::Manifest.validate! manifest_text
+      true
+    rescue ArgumentError => e
+      errors.add :manifest_text, e.message
+      false
+    end
+  end
+
+  def signed_manifest_text
+    if !has_attribute? :manifest_text
+      return nil
+    elsif is_trashed
+      return manifest_text
+    else
+      token = Thread.current[:token]
+      exp = [db_current_time.to_i + Rails.configuration.blob_signature_ttl,
+             trash_at].compact.map(&:to_i).min
+      self.class.sign_manifest manifest_text, token, exp
+    end
+  end
+
+  def self.sign_manifest manifest, token, exp=nil
+    if exp.nil?
+      exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+    end
+    signing_opts = {
+      api_token: token,
+      expire: exp,
+    }
+    m = munge_manifest_locators(manifest) do |match|
+      Blob.sign_locator(match[0], signing_opts)
+    end
+    return m
+  end
+
+  def self.munge_manifest_locators manifest
+    # Given a manifest text and a block, yield the regexp MatchData
+    # for each locator. Return a new manifest in which each locator
+    # has been replaced by the block's return value.
+    return nil if !manifest
+    return '' if manifest == ''
+
+    new_lines = []
+    manifest.each_line do |line|
+      line.rstrip!
+      new_words = []
+      line.split(' ').each do |word|
+        if new_words.empty?
+          new_words << word
+        elsif match = Keep::Locator::LOCATOR_REGEXP.match(word)
+          new_words << yield(match)
+        else
+          new_words << word
+        end
+      end
+      new_lines << new_words.join(' ')
+    end
+    new_lines.join("\n") + "\n"
+  end
+
+  def self.each_manifest_locator manifest
+    # Given a manifest text and a block, yield the regexp match object
+    # for each locator.
+    manifest.each_line do |line|
+      # line will have a trailing newline, but the last token is never
+      # a locator, so it's harmless here.
+      line.split(' ').each do |word|
+        if match = Keep::Locator::LOCATOR_REGEXP.match(word)
+          yield(match)
+        end
+      end
+    end
+  end
+
+  def self.normalize_uuid uuid
+    hash_part = nil
+    size_part = nil
+    uuid.split('+').each do |token|
+      if token.match(/^[0-9a-f]{32,}$/)
+        raise "uuid #{uuid} has multiple hash parts" if hash_part
+        hash_part = token
+      elsif token.match(/^\d+$/)
+        raise "uuid #{uuid} has multiple size parts" if size_part
+        size_part = token
+      end
+    end
+    raise "uuid #{uuid} has no hash part" if !hash_part
+    [hash_part, size_part].compact.join '+'
+  end
+
+  def self.get_compatible_images(readers, pattern, collections)
+    if collections.empty?
+      return []
+    end
+
+    migrations = Hash[
+      Link.where('tail_uuid in (?) AND link_class=? AND links.owner_uuid=?',
+                 collections.map(&:portable_data_hash),
+                 'docker_image_migration',
+                 system_user_uuid).
+      order('links.created_at asc').
+      map { |l|
+        [l.tail_uuid, l.head_uuid]
+      }]
+
+    migrated_collections = Hash[
+      Collection.readable_by(*readers).
+      where('portable_data_hash in (?)', migrations.values).
+      map { |c|
+        [c.portable_data_hash, c]
+      }]
+
+    collections.map { |c|
+      # Check if the listed image is compatible first, if not, then try the
+      # migration link.
+      manifest = Keep::Manifest.new(c.manifest_text)
+      if manifest.exact_file_count?(1) and manifest.files[0][1] =~ pattern
+        c
+      elsif m = migrated_collections[migrations[c.portable_data_hash]]
+        manifest = Keep::Manifest.new(m.manifest_text)
+        if manifest.exact_file_count?(1) and manifest.files[0][1] =~ pattern
+          m
+        end
+      end
+    }.compact
+  end
+
+  # Resolve a Docker repo+tag, hash, or collection PDH to an array of
+  # Collection objects, sorted by timestamp starting with the most recent
+  # match.
+  #
+  # If filter_compatible_format is true (the default), only return image
+  # collections which are support by the installation as indicated by
+  # Rails.configuration.docker_image_formats.  Will follow
+  # 'docker_image_migration' links if search_term resolves to an incompatible
+  # image, but an equivalent compatible image is available.
+  def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true)
+    readers ||= [Thread.current[:user]]
+    base_search = Link.
+      readable_by(*readers).
+      readable_by(*readers, table_name: "collections").
+      joins("JOIN collections ON links.head_uuid = collections.uuid").
+      order("links.created_at DESC")
+
+    if (Rails.configuration.docker_image_formats.include? 'v1' and
+        Rails.configuration.docker_image_formats.include? 'v2') or filter_compatible_format == false
+      pattern = /^(sha256:)?[0-9A-Fa-f]{64}\.tar$/
+    elsif Rails.configuration.docker_image_formats.include? 'v2'
+      pattern = /^(sha256:)[0-9A-Fa-f]{64}\.tar$/
+    elsif Rails.configuration.docker_image_formats.include? 'v1'
+      pattern = /^[0-9A-Fa-f]{64}\.tar$/
+    else
+      raise "Unrecognized configuration for docker_image_formats #{Rails.configuration.docker_image_formats}"
+    end
+
+    # If the search term is a Collection locator that contains one file
+    # that looks like a Docker image, return it.
+    if loc = Keep::Locator.parse(search_term)
+      loc.strip_hints!
+      coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1)
+      if coll_match.any? or Rails.configuration.remote_hosts.length == 0
+        return get_compatible_images(readers, pattern, coll_match)
+      else
+        # Allow bare pdh that doesn't exist in the local database so
+        # that federated container requests which refer to remotely
+        # stored containers will validate.
+        return [Collection.new(portable_data_hash: loc.to_s)]
+      end
+    end
+
+    if search_tag.nil? and (n = search_term.index(":"))
+      search_tag = search_term[n+1..-1]
+      search_term = search_term[0..n-1]
+    end
+
+    # Find Collections with matching Docker image repository+tag pairs.
+    matches = base_search.
+      where(link_class: "docker_image_repo+tag",
+            name: "#{search_term}:#{search_tag || 'latest'}")
+
+    # If that didn't work, find Collections with matching Docker image hashes.
+    if matches.empty?
+      matches = base_search.
+        where("link_class = ? and links.name LIKE ?",
+              "docker_image_hash", "#{search_term}%")
+    end
+
+    # Generate an order key for each result.  We want to order the results
+    # so that anything with an image timestamp is considered more recent than
+    # anything without; then we use the link's created_at as a tiebreaker.
+    uuid_timestamps = {}
+    matches.each do |link|
+      uuid_timestamps[link.head_uuid] = [(-link.properties["image_timestamp"].to_datetime.to_i rescue 0),
+       -link.created_at.to_i]
+     end
+
+    sorted = Collection.where('uuid in (?)', uuid_timestamps.keys).sort_by { |c|
+      uuid_timestamps[c.uuid]
+    }
+    compatible = get_compatible_images(readers, pattern, sorted)
+    if sorted.length > 0 and compatible.empty?
+      raise ArvadosModel::UnresolvableContainerError.new "Matching Docker image is incompatible with 'docker_image_formats' configuration."
+    end
+    compatible
+  end
+
+  def self.for_latest_docker_image(search_term, search_tag=nil, readers=nil)
+    find_all_for_docker_image(search_term, search_tag, readers).first
+  end
+
+  def self.searchable_columns operator
+    super - ["manifest_text"]
+  end
+
+  def self.full_text_searchable_columns
+    super - ["manifest_text", "storage_classes_desired", "storage_classes_confirmed", "current_version_uuid"]
+  end
+
+  def self.where *args
+    SweepTrashedObjects.sweep_if_stale
+    super
+  end
+
+  protected
+
+  # Although the defaults for these columns is already set up on the schema,
+  # collection creation from an API client seems to ignore them, making the
+  # validation on empty desired storage classes return an error.
+  def default_storage_classes
+    if self.storage_classes_desired.nil? || self.storage_classes_desired.empty?
+      self.storage_classes_desired = ["default"]
+    end
+    self.storage_classes_confirmed ||= []
+  end
+
+  def portable_manifest_text
+    self.class.munge_manifest_locators(manifest_text) do |match|
+      if match[2] # size
+        match[1] + match[2]
+      else
+        match[1]
+      end
+    end
+  end
+
+  def compute_pdh
+    portable_manifest = portable_manifest_text
+    (Digest::MD5.hexdigest(portable_manifest) +
+     '+' +
+     portable_manifest.bytesize.to_s)
+  end
+
+  def computed_pdh
+    if @computed_pdh_for_manifest_text == manifest_text
+      return @computed_pdh
+    end
+    @computed_pdh = compute_pdh
+    @computed_pdh_for_manifest_text = manifest_text.dup
+    @computed_pdh
+  end
+
+  def ensure_permission_to_save
+    if (not current_user.andand.is_admin)
+      if (replication_confirmed_at_changed? or replication_confirmed_changed?) and
+        not (replication_confirmed_at.nil? and replication_confirmed.nil?)
+        raise ArvadosModel::PermissionDeniedError.new("replication_confirmed and replication_confirmed_at attributes cannot be changed, except by setting both to nil")
+      end
+      if (storage_classes_confirmed_changed? or storage_classes_confirmed_at_changed?) and
+        not (storage_classes_confirmed == [] and storage_classes_confirmed_at.nil?)
+        raise ArvadosModel::PermissionDeniedError.new("storage_classes_confirmed and storage_classes_confirmed_at attributes cannot be changed, except by setting them to [] and nil respectively")
+      end
+    end
+    super
+  end
+
+  def ensure_storage_classes_desired_is_not_empty
+    if self.storage_classes_desired.empty?
+      raise ArvadosModel::InvalidStateTransitionError.new("storage_classes_desired shouldn't be empty")
+    end
+  end
+
+  def ensure_storage_classes_contain_non_empty_strings
+    (self.storage_classes_desired + self.storage_classes_confirmed).each do |c|
+      if !c.is_a?(String) || c == ''
+        raise ArvadosModel::InvalidStateTransitionError.new("storage classes should only be non-empty strings")
+      end
+    end
+  end
+
+  def past_versions_cannot_be_updated
+    # We check for the '_was' values just in case the update operation
+    # includes a change on current_version_uuid or uuid.
+    if current_version_uuid_was != uuid_was
+      errors.add(:base, "past versions cannot be updated")
+      false
+    end
+  end
+
+  def versioning_metadata_updates
+    valid = true
+    if (current_version_uuid_was == uuid_was) && current_version_uuid_changed?
+      errors.add(:current_version_uuid, "cannot be updated")
+      valid = false
+    end
+    if version_changed?
+      errors.add(:version, "cannot be updated")
+      valid = false
+    end
+    valid
+  end
+
+  def assign_uuid
+    super
+    self.current_version_uuid ||= self.uuid
+    true
+  end
+end
diff --git a/services/api/app/models/commit.rb b/services/api/app/models/commit.rb
new file mode 100644 (file)
index 0000000..921c690
--- /dev/null
@@ -0,0 +1,272 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'request_error'
+
+class Commit < ActiveRecord::Base
+  extend CurrentApiClient
+
+  class GitError < RequestError
+    def http_status
+      422
+    end
+  end
+
+  def self.git_check_ref_format(e)
+    if !e or e.empty? or e[0] == '-' or e[0] == '$'
+      # definitely not valid
+      false
+    else
+      `git check-ref-format --allow-onelevel #{e.shellescape}`
+      $?.success?
+    end
+  end
+
+  # Return an array of commits (each a 40-char sha1) satisfying the
+  # given criteria.
+  #
+  # Return [] if the revisions given in minimum/maximum are invalid or
+  # don't exist in the given repository.
+  #
+  # Raise ArgumentError if the given repository is invalid, does not
+  # exist, or cannot be read for any reason. (Any transient error that
+  # prevents commit ranges from resolving must raise rather than
+  # returning an empty array.)
+  #
+  # repository can be the name of a locally hosted repository or a git
+  # URL (see git-fetch(1)). Currently http, https, and git schemes are
+  # supported.
+  def self.find_commit_range repository, minimum, maximum, exclude
+    if minimum and minimum.empty?
+      minimum = nil
+    end
+
+    if minimum and !git_check_ref_format(minimum)
+      logger.warn "find_commit_range called with invalid minimum revision: '#{minimum}'"
+      return []
+    end
+
+    if maximum and !git_check_ref_format(maximum)
+      logger.warn "find_commit_range called with invalid maximum revision: '#{maximum}'"
+      return []
+    end
+
+    if !maximum
+      maximum = "HEAD"
+    end
+
+    gitdir, is_remote = git_dir_for repository
+    fetch_remote_repository gitdir, repository if is_remote
+    ENV['GIT_DIR'] = gitdir
+
+    commits = []
+
+    # Get the commit hash for the upper bound
+    max_hash = nil
+    git_max_hash_cmd = "git rev-list --max-count=1 #{maximum.shellescape} --"
+    IO.foreach("|#{git_max_hash_cmd}") do |line|
+      max_hash = line.strip
+    end
+
+    # If not found, nothing else to do
+    if !max_hash
+      logger.warn "no refs found looking for max_hash: `GIT_DIR=#{gitdir} #{git_max_hash_cmd}` returned no output"
+      return []
+    end
+
+    # If string is invalid, nothing else to do
+    if !git_check_ref_format(max_hash)
+      logger.warn "ref returned by `GIT_DIR=#{gitdir} #{git_max_hash_cmd}` was invalid for max_hash: #{max_hash}"
+      return []
+    end
+
+    resolved_exclude = nil
+    if exclude
+      resolved_exclude = []
+      exclude.each do |e|
+        if git_check_ref_format(e)
+          IO.foreach("|git rev-list --max-count=1 #{e.shellescape} --") do |line|
+            resolved_exclude.push(line.strip)
+          end
+        else
+          logger.warn "find_commit_range called with invalid exclude invalid characters: '#{exclude}'"
+          return []
+        end
+      end
+    end
+
+    if minimum
+      # Get the commit hash for the lower bound
+      min_hash = nil
+      git_min_hash_cmd = "git rev-list --max-count=1 #{minimum.shellescape} --"
+      IO.foreach("|#{git_min_hash_cmd}") do |line|
+        min_hash = line.strip
+      end
+
+      # If not found, nothing else to do
+      if !min_hash
+        logger.warn "no refs found looking for min_hash: `GIT_DIR=#{gitdir} #{git_min_hash_cmd}` returned no output"
+        return []
+      end
+
+      # If string is invalid, nothing else to do
+      if !git_check_ref_format(min_hash)
+        logger.warn "ref returned by `GIT_DIR=#{gitdir} #{git_min_hash_cmd}` was invalid for min_hash: #{min_hash}"
+        return []
+      end
+
+      # Now find all commits between them
+      IO.foreach("|git rev-list #{min_hash.shellescape}..#{max_hash.shellescape} --") do |line|
+        hash = line.strip
+        commits.push(hash) if !resolved_exclude or !resolved_exclude.include? hash
+      end
+
+      commits.push(min_hash) if !resolved_exclude or !resolved_exclude.include? min_hash
+    else
+      commits.push(max_hash) if !resolved_exclude or !resolved_exclude.include? max_hash
+    end
+
+    commits
+  end
+
+  # Given a repository (url, or name of hosted repo) and commit sha1,
+  # copy the commit into the internal git repo (if necessary), and tag
+  # it with the given tag (typically a job UUID).
+  #
+  # The repo can be a remote url, but in this case sha1 must already
+  # be present in our local cache for that repo: e.g., sha1 was just
+  # returned by find_commit_range.
+  def self.tag_in_internal_repository repo_name, sha1, tag
+    unless git_check_ref_format tag
+      raise ArgumentError.new "invalid tag #{tag}"
+    end
+    unless /^[0-9a-f]{40}$/ =~ sha1
+      raise ArgumentError.new "invalid sha1 #{sha1}"
+    end
+    src_gitdir, _ = git_dir_for repo_name
+    unless src_gitdir
+      raise ArgumentError.new "no local repository for #{repo_name}"
+    end
+    dst_gitdir = Rails.configuration.git_internal_dir
+
+    begin
+      commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip
+    rescue GitError
+      commit_in_dst = false
+    end
+
+    tag_cmd = "tag --force #{tag.shellescape} #{sha1.shellescape}^{commit}"
+    if commit_in_dst == sha1
+      must_git(dst_gitdir, tag_cmd)
+    else
+      # git-fetch is faster than pack-objects|unpack-objects, but
+      # git-fetch can't fetch by sha1. So we first try to fetch a
+      # branch that has the desired commit, and if that fails (there
+      # is no such branch, or the branch we choose changes under us in
+      # race), we fall back to pack|unpack.
+      begin
+        branches = must_git(src_gitdir,
+                            "branch --contains #{sha1.shellescape}")
+        m = branches.match(/^. (\w+)\n/)
+        if !m
+          raise GitError.new "commit is not on any branch"
+        end
+        branch = m[1]
+        must_git(dst_gitdir,
+                 "fetch file://#{src_gitdir.shellescape} #{branch.shellescape}")
+        # Even if all of the above steps succeeded, we might still not
+        # have the right commit due to a race, in which case tag_cmd
+        # will fail, and we'll need to fall back to pack|unpack. So
+        # don't be tempted to condense this tag_cmd and the one in the
+        # rescue block into a single attempt.
+        must_git(dst_gitdir, tag_cmd)
+      rescue GitError
+        must_pipe("echo #{sha1.shellescape}",
+                  "git --git-dir #{src_gitdir.shellescape} pack-objects -q --revs --stdout",
+                  "git --git-dir #{dst_gitdir.shellescape} unpack-objects -q")
+        must_git(dst_gitdir, tag_cmd)
+      end
+    end
+  end
+
+  protected
+
+  def self.remote_url? repo_name
+    /^(https?|git):\/\// =~ repo_name
+  end
+
+  # Return [local_git_dir, is_remote]. If is_remote, caller must use
+  # fetch_remote_repository to ensure content is up-to-date.
+  #
+  # Raises an exception if the latest content could not be fetched for
+  # any reason.
+  def self.git_dir_for repo_name
+    if remote_url? repo_name
+      return [cache_dir_for(repo_name), true]
+    end
+    repos = Repository.readable_by(current_user).where(name: repo_name)
+    if repos.count == 0
+      raise ArgumentError.new "Repository not found: '#{repo_name}'"
+    elsif repos.count > 1
+      logger.error "Multiple repositories with name=='#{repo_name}'!"
+      raise ArgumentError.new "Name conflict"
+    else
+      return [repos.first.server_path, false]
+    end
+  end
+
+  def self.cache_dir_for git_url
+    File.join(cache_dir_base, Digest::SHA1.hexdigest(git_url) + ".git").to_s
+  end
+
+  def self.cache_dir_base
+    Rails.root.join 'tmp', 'git-cache'
+  end
+
+  def self.fetch_remote_repository gitdir, git_url
+    # Caller decides which protocols are worth using. This is just a
+    # safety check to ensure we never use urls like "--flag" or wander
+    # into git's hardlink features by using bare "/path/foo" instead
+    # of "file:///path/foo".
+    unless /^[a-z]+:\/\// =~ git_url
+      raise ArgumentError.new "invalid git url #{git_url}"
+    end
+    begin
+      must_git gitdir, "branch"
+    rescue GitError => e
+      raise unless /Not a git repository/ =~ e.to_s
+      # OK, this just means we need to create a blank cache repository
+      # before fetching.
+      FileUtils.mkdir_p gitdir
+      must_git gitdir, "init"
+    end
+    must_git(gitdir,
+             "fetch --no-progress --tags --prune --force --update-head-ok #{git_url.shellescape} 'refs/heads/*:refs/heads/*'")
+  end
+
+  def self.must_git gitdir, *cmds
+    # Clear token in case a git helper tries to use it as a password.
+    orig_token = ENV['ARVADOS_API_TOKEN']
+    ENV['ARVADOS_API_TOKEN'] = ''
+    last_output = ''
+    begin
+      git = "git --git-dir #{gitdir.shellescape}"
+      cmds.each do |cmd|
+        last_output = must_pipe git+" "+cmd
+      end
+    ensure
+      ENV['ARVADOS_API_TOKEN'] = orig_token
+    end
+    return last_output
+  end
+
+  def self.must_pipe *cmds
+    cmd = cmds.join(" 2>&1 |") + " 2>&1"
+    out = IO.read("| </dev/null #{cmd}")
+    if not $?.success?
+      raise GitError.new "#{cmd}: #{$?}: #{out}"
+    end
+    return out
+  end
+end
diff --git a/services/api/app/models/commit_ancestor.rb b/services/api/app/models/commit_ancestor.rb
new file mode 100644 (file)
index 0000000..3d5152c
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Usage:
+#
+# x = CommitAncestor.find_or_create_by_descendant_and_ancestor(a, b)
+# "b is an ancestor of a" if x.is
+#
+
+class CommitAncestor < ActiveRecord::Base
+  before_create :ask_git_whether_is
+
+  class CommitNotFoundError < ArgumentError
+  end
+
+  protected
+
+  def ask_git_whether_is
+    @gitdirbase = Rails.configuration.git_repositories_dir
+    self.is = nil
+    Dir.foreach @gitdirbase do |repo|
+      next if repo.match(/^\./)
+      git_dir = repo.match(/\.git$/) ? repo : File.join(repo, '.git')
+      repo_name = repo.sub(/\.git$/, '')
+      ENV['GIT_DIR'] = File.join(@gitdirbase, git_dir)
+      IO.foreach("|git rev-list --format=oneline '#{self.descendant.gsub(/[^0-9a-f]/,"")}'") do |line|
+        self.is = false
+        sha1, _ = line.strip.split(" ", 2)
+        if sha1 == self.ancestor
+          self.is = true
+          break
+        end
+      end
+      if !self.is.nil?
+        self.repository_name = repo_name
+        break
+      end
+    end
+    if self.is.nil?
+      raise CommitNotFoundError.new "Specified commit was not found"
+    end
+  end
+end
diff --git a/services/api/app/models/container.rb b/services/api/app/models/container.rb
new file mode 100644 (file)
index 0000000..abcfdbd
--- /dev/null
@@ -0,0 +1,708 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'log_reuse_info'
+require 'whitelist_update'
+require 'safe_json'
+require 'update_priority'
+
+class Container < ArvadosModel
+  include ArvadosModelUpdates
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include WhitelistUpdate
+  extend CurrentApiClient
+  extend DbCurrentTime
+  extend LogReuseInfo
+
+  serialize :environment, Hash
+  serialize :mounts, Hash
+  serialize :runtime_constraints, Hash
+  serialize :command, Array
+  serialize :scheduling_parameters, Hash
+  serialize :secret_mounts, Hash
+  serialize :runtime_status, Hash
+
+  before_validation :fill_field_defaults, :if => :new_record?
+  before_validation :set_timestamps
+  validates :command, :container_image, :output_path, :cwd, :priority, { presence: true }
+  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
+  validate :validate_runtime_status
+  validate :validate_state_change
+  validate :validate_change
+  validate :validate_lock
+  validate :validate_output
+  after_validation :assign_auth
+  before_save :sort_serialized_attrs
+  before_save :update_secret_mounts_md5
+  before_save :scrub_secrets
+  before_save :clear_runtime_status_when_queued
+  after_save :update_cr_logs
+  after_save :handle_completed
+  after_save :propagate_priority
+  after_commit { UpdatePriority.run_update_thread }
+
+  has_many :container_requests, :foreign_key => :container_uuid, :class_name => 'ContainerRequest', :primary_key => :uuid
+  belongs_to :auth, :class_name => 'ApiClientAuthorization', :foreign_key => :auth_uuid, :primary_key => :uuid
+
+  api_accessible :user, extend: :common do |t|
+    t.add :command
+    t.add :container_image
+    t.add :cwd
+    t.add :environment
+    t.add :exit_code
+    t.add :finished_at
+    t.add :locked_by_uuid
+    t.add :log
+    t.add :mounts
+    t.add :output
+    t.add :output_path
+    t.add :priority
+    t.add :progress
+    t.add :runtime_constraints
+    t.add :runtime_status
+    t.add :started_at
+    t.add :state
+    t.add :auth_uuid
+    t.add :scheduling_parameters
+    t.add :runtime_user_uuid
+    t.add :runtime_auth_scopes
+  end
+
+  # Supported states for a container
+  States =
+    [
+     (Queued = 'Queued'),
+     (Locked = 'Locked'),
+     (Running = 'Running'),
+     (Complete = 'Complete'),
+     (Cancelled = 'Cancelled')
+    ]
+
+  State_transitions = {
+    nil => [Queued],
+    Queued => [Locked, Cancelled],
+    Locked => [Queued, Running, Cancelled],
+    Running => [Complete, Cancelled]
+  }
+
+  def self.limit_index_columns_read
+    ["mounts"]
+  end
+
+  def self.full_text_searchable_columns
+    super - ["secret_mounts", "secret_mounts_md5", "runtime_token"]
+  end
+
+  def self.searchable_columns *args
+    super - ["secret_mounts_md5", "runtime_token"]
+  end
+
+  def logged_attributes
+    super.except('secret_mounts', 'runtime_token')
+  end
+
+  def state_transitions
+    State_transitions
+  end
+
+  # Container priority is the highest "computed priority" of any
+  # matching request. The computed priority of a container-submitted
+  # request is the priority of the submitting container. The computed
+  # priority of a user-submitted request is a function of
+  # user-assigned priority and request creation time.
+  def update_priority!
+    return if ![Queued, Locked, Running].include?(state)
+    p = ContainerRequest.
+        where('container_uuid=? and priority>0', uuid).
+        includes(:requesting_container).
+        lock(true).
+        map do |cr|
+      if cr.requesting_container
+        cr.requesting_container.priority
+      else
+        (cr.priority << 50) - (cr.created_at.to_time.to_f * 1000).to_i
+      end
+    end.max || 0
+    update_attributes!(priority: p)
+  end
+
+  def propagate_priority
+    return true unless priority_changed?
+    act_as_system_user do
+      # Update the priority of child container requests to match new
+      # priority of the parent container (ignoring requests with no
+      # container assigned, because their priority doesn't matter).
+      ContainerRequest.
+        where(requesting_container_uuid: self.uuid,
+              state: ContainerRequest::Committed).
+        where('container_uuid is not null').
+        includes(:container).
+        map(&:container).
+        map(&:update_priority!)
+    end
+  end
+
+  # Create a new container (or find an existing one) to satisfy the
+  # given container request.
+  def self.resolve(req)
+    if req.runtime_token.nil?
+      runtime_user = if req.modified_by_user_uuid.nil?
+                       current_user
+                     else
+                       User.find_by_uuid(req.modified_by_user_uuid)
+                     end
+      runtime_auth_scopes = ["all"]
+    else
+      auth = ApiClientAuthorization.validate(token: req.runtime_token)
+      if auth.nil?
+        raise ArgumentError.new "Invalid runtime token"
+      end
+      runtime_user = User.find_by_id(auth.user_id)
+      runtime_auth_scopes = auth.scopes
+    end
+    c_attrs = act_as_user runtime_user do
+      {
+        command: req.command,
+        cwd: req.cwd,
+        environment: req.environment,
+        output_path: req.output_path,
+        container_image: resolve_container_image(req.container_image),
+        mounts: resolve_mounts(req.mounts),
+        runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),
+        scheduling_parameters: req.scheduling_parameters,
+        secret_mounts: req.secret_mounts,
+        runtime_token: req.runtime_token,
+        runtime_user_uuid: runtime_user.uuid,
+        runtime_auth_scopes: runtime_auth_scopes
+      }
+    end
+    act_as_system_user do
+      if req.use_existing && (reusable = find_reusable(c_attrs))
+        reusable
+      else
+        Container.create!(c_attrs)
+      end
+    end
+  end
+
+  # Return a runtime_constraints hash that complies with requested but
+  # is suitable for saving in a container record, i.e., has specific
+  # values instead of ranges.
+  #
+  # Doing this as a step separate from other resolutions, like "git
+  # revision range to commit hash", makes sense only when there is no
+  # opportunity to reuse an existing container (e.g., container reuse
+  # is not implemented yet, or we have already found that no existing
+  # containers are suitable).
+  def self.resolve_runtime_constraints(runtime_constraints)
+    rc = {}
+    defaults = {
+      'keep_cache_ram' =>
+      Rails.configuration.container_default_keep_cache_ram,
+    }
+    defaults.merge(runtime_constraints).each do |k, v|
+      if v.is_a? Array
+        rc[k] = v[0]
+      else
+        rc[k] = v
+      end
+    end
+    rc
+  end
+
+  # Return a mounts hash suitable for a Container, i.e., with every
+  # readonly collection UUID resolved to a PDH.
+  def self.resolve_mounts(mounts)
+    c_mounts = {}
+    mounts.each do |k, mount|
+      mount = mount.dup
+      c_mounts[k] = mount
+      if mount['kind'] != 'collection'
+        next
+      end
+
+      uuid = mount.delete 'uuid'
+
+      if mount['portable_data_hash'].nil? and !uuid.nil?
+        # PDH not supplied, try by UUID
+        c = Collection.
+          readable_by(current_user).
+          where(uuid: uuid).
+          select(:portable_data_hash).
+          first
+        if !c
+          raise ArvadosModel::UnresolvableContainerError.new "cannot mount collection #{uuid.inspect}: not found"
+        end
+        mount['portable_data_hash'] = c.portable_data_hash
+      end
+    end
+    return c_mounts
+  end
+
+  # Return a container_image PDH suitable for a Container.
+  def self.resolve_container_image(container_image)
+    coll = Collection.for_latest_docker_image(container_image)
+    if !coll
+      raise ArvadosModel::UnresolvableContainerError.new "docker image #{container_image.inspect} not found"
+    end
+    coll.portable_data_hash
+  end
+
+  def self.find_reusable(attrs)
+    log_reuse_info { "starting with #{Container.all.count} container records in database" }
+    candidates = Container.where_serialized(:command, attrs[:command], md5: true)
+    log_reuse_info(candidates) { "after filtering on command #{attrs[:command].inspect}" }
+
+    candidates = candidates.where('cwd = ?', attrs[:cwd])
+    log_reuse_info(candidates) { "after filtering on cwd #{attrs[:cwd].inspect}" }
+
+    candidates = candidates.where_serialized(:environment, attrs[:environment], md5: true)
+    log_reuse_info(candidates) { "after filtering on environment #{attrs[:environment].inspect}" }
+
+    candidates = candidates.where('output_path = ?', attrs[:output_path])
+    log_reuse_info(candidates) { "after filtering on output_path #{attrs[:output_path].inspect}" }
+
+    image = resolve_container_image(attrs[:container_image])
+    candidates = candidates.where('container_image = ?', image)
+    log_reuse_info(candidates) { "after filtering on container_image #{image.inspect} (resolved from #{attrs[:container_image].inspect})" }
+
+    candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]), md5: true)
+    log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+
+    secret_mounts_md5 = Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts])))
+    candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)
+    log_reuse_info(candidates) { "after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}" }
+
+    candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]), md5: true)
+    log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
+
+    log_reuse_info { "checking for state=Complete with readable output and log..." }
+
+    select_readable_pdh = Collection.
+      readable_by(current_user).
+      select(:portable_data_hash).
+      to_sql
+
+    usable = candidates.where(state: Complete, exit_code: 0)
+    log_reuse_info(usable) { "with state=Complete, exit_code=0" }
+
+    usable = usable.where("log IN (#{select_readable_pdh})")
+    log_reuse_info(usable) { "with readable log" }
+
+    usable = usable.where("output IN (#{select_readable_pdh})")
+    log_reuse_info(usable) { "with readable output" }
+
+    usable = usable.order('finished_at ASC').limit(1).first
+    if usable
+      log_reuse_info { "done, reusing container #{usable.uuid} with state=Complete" }
+      return usable
+    end
+
+    # Check for non-failing Running candidates and return the most likely to finish sooner.
+    log_reuse_info { "checking for state=Running..." }
+    running = candidates.where(state: Running).
+              where("(runtime_status->'error') is null").
+              order('progress desc, started_at asc').
+              limit(1).first
+    if running
+      log_reuse_info { "done, reusing container #{running.uuid} with state=Running" }
+      return running
+    else
+      log_reuse_info { "have no containers in Running state" }
+    end
+
+    # Check for Locked or Queued ones and return the most likely to start first.
+    locked_or_queued = candidates.
+                       where("state IN (?)", [Locked, Queued]).
+                       order('state asc, priority desc, created_at asc').
+                       limit(1).first
+    if locked_or_queued
+      log_reuse_info { "done, reusing container #{locked_or_queued.uuid} with state=#{locked_or_queued.state}" }
+      return locked_or_queued
+    else
+      log_reuse_info { "have no containers in Locked or Queued state" }
+    end
+
+    log_reuse_info { "done, no reusable container found" }
+    nil
+  end
+
+  def check_lock_fail
+    if self.state != Queued
+      raise LockFailedError.new("cannot lock when #{self.state}")
+    elsif self.priority <= 0
+      raise LockFailedError.new("cannot lock when priority<=0")
+    end
+  end
+
+  def lock
+    # Check invalid state transitions once before getting the lock
+    # (because it's cheaper that way) and once after getting the lock
+    # (because state might have changed while acquiring the lock).
+    check_lock_fail
+    transaction do
+      reload
+      check_lock_fail
+      update_attributes!(state: Locked, lock_count: self.lock_count+1)
+    end
+  end
+
+  def check_unlock_fail
+    if self.state != Locked
+      raise InvalidStateTransitionError.new("cannot unlock when #{self.state}")
+    elsif self.locked_by_uuid != current_api_client_authorization.uuid
+      raise InvalidStateTransitionError.new("locked by a different token")
+    end
+  end
+
+  def unlock
+    # Check invalid state transitions twice (see lock)
+    check_unlock_fail
+    transaction do
+      reload(lock: 'FOR UPDATE')
+      check_unlock_fail
+      if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+        update_attributes!(state: Queued)
+      else
+        update_attributes!(state: Cancelled,
+                           runtime_status: {
+                             error: "Container exceeded 'max_container_dispatch_attempts' (lock_count=#{self.lock_count}."
+                           })
+      end
+    end
+  end
+
+  def self.readable_by(*users_list)
+    # Load optional keyword arguments, if they exist.
+    if users_list.last.is_a? Hash
+      kwargs = users_list.pop
+    else
+      kwargs = {}
+    end
+    if users_list.select { |u| u.is_admin }.any?
+      return super
+    end
+    Container.where(ContainerRequest.readable_by(*users_list).where("containers.uuid = container_requests.container_uuid").exists)
+  end
+
+  def final?
+    [Complete, Cancelled].include?(self.state)
+  end
+
+  def self.for_current_token
+    return if !current_api_client_authorization
+    _, _, _, container_uuid = Thread.current[:token].split('/')
+    if container_uuid.nil?
+      Container.where(auth_uuid: current_api_client_authorization.uuid).first
+    else
+      Container.where('auth_uuid=? or (uuid=? and runtime_token=?)',
+                      current_api_client_authorization.uuid,
+                      container_uuid,
+                      current_api_client_authorization.token).first
+    end
+  end
+
+  protected
+
+  def fill_field_defaults
+    self.state ||= Queued
+    self.environment ||= {}
+    self.runtime_constraints ||= {}
+    self.mounts ||= {}
+    self.cwd ||= "."
+    self.priority ||= 0
+    self.scheduling_parameters ||= {}
+  end
+
+  def permission_to_create
+    current_user.andand.is_admin
+  end
+
+  def ensure_owner_uuid_is_permitted
+    # validate_change ensures owner_uuid can't be changed at all --
+    # except during create, which requires admin privileges. Checking
+    # permission here would be superfluous.
+    true
+  end
+
+  def set_timestamps
+    if self.state_changed? and self.state == Running
+      self.started_at ||= db_current_time
+    end
+
+    if self.state_changed? and [Complete, Cancelled].include? self.state
+      self.finished_at ||= db_current_time
+    end
+  end
+
+  # Check that well-known runtime status keys have desired data types
+  def validate_runtime_status
+    [
+      'error', 'errorDetail', 'warning', 'warningDetail', 'activity'
+    ].each do |k|
+      if self.runtime_status.andand.include?(k) && !self.runtime_status[k].is_a?(String)
+        errors.add(:runtime_status, "'#{k}' value must be a string")
+      end
+    end
+  end
+
+  def validate_change
+    permitted = [:state]
+    progress_attrs = [:progress, :runtime_status, :log, :output]
+    final_attrs = [:exit_code, :finished_at]
+
+    if self.new_record?
+      permitted.push(:owner_uuid, :command, :container_image, :cwd,
+                     :environment, :mounts, :output_path, :priority,
+                     :runtime_constraints, :scheduling_parameters,
+                     :secret_mounts, :runtime_token,
+                     :runtime_user_uuid, :runtime_auth_scopes)
+    end
+
+    case self.state
+    when Locked
+      permitted.push :priority, :runtime_status, :log, :lock_count
+
+    when Queued
+      permitted.push :priority
+
+    when Running
+      permitted.push :priority, *progress_attrs
+      if self.state_changed?
+        permitted.push :started_at
+      end
+
+    when Complete
+      if self.state_was == Running
+        permitted.push *final_attrs, *progress_attrs
+      end
+
+    when Cancelled
+      case self.state_was
+      when Running
+        permitted.push :finished_at, *progress_attrs
+      when Queued, Locked
+        permitted.push :finished_at, :log, :runtime_status
+      end
+
+    else
+      # The state_transitions check will add an error message for this
+      return false
+    end
+
+    if self.state == Running &&
+       !current_api_client_authorization.nil? &&
+       (current_api_client_authorization.uuid == self.auth_uuid ||
+        current_api_client_authorization.token == self.runtime_token)
+      # The contained process itself can write final attrs but can't
+      # change priority or log.
+      permitted.push *final_attrs
+      permitted = permitted - [:log, :priority]
+    elsif self.locked_by_uuid && self.locked_by_uuid != current_api_client_authorization.andand.uuid
+      # When locked, progress fields cannot be updated by the wrong
+      # dispatcher, even though it has admin privileges.
+      permitted = permitted - progress_attrs
+    end
+    check_update_whitelist permitted
+  end
+
+  def validate_lock
+    if [Locked, Running].include? self.state
+      # If the Container was already locked, locked_by_uuid must not
+      # changes. Otherwise, the current auth gets the lock.
+      need_lock = locked_by_uuid_was || current_api_client_authorization.andand.uuid
+    else
+      need_lock = nil
+    end
+
+    # The caller can provide a new value for locked_by_uuid, but only
+    # if it's exactly what we expect. This allows a caller to perform
+    # an update like {"state":"Unlocked","locked_by_uuid":null}.
+    if self.locked_by_uuid_changed?
+      if self.locked_by_uuid != need_lock
+        return errors.add :locked_by_uuid, "can only change to #{need_lock}"
+      end
+    end
+    self.locked_by_uuid = need_lock
+  end
+
+  def validate_output
+    # Output must exist and be readable by the current user.  This is so
+    # that a container cannot "claim" a collection that it doesn't otherwise
+    # have access to just by setting the output field to the collection PDH.
+    if output_changed?
+      c = Collection.
+            readable_by(current_user, {include_trash: true}).
+            where(portable_data_hash: self.output).
+            first
+      if !c
+        errors.add :output, "collection must exist and be readable by current user."
+      end
+    end
+  end
+
+  def update_cr_logs
+    # If self.final?, this update is superfluous: the final log/output
+    # update will be done when handle_completed calls finalize! on
+    # each requesting CR.
+    return if self.final? || !self.log_changed?
+    leave_modified_by_user_alone do
+      ContainerRequest.where(container_uuid: self.uuid).each do |cr|
+        cr.update_collections(container: self, collections: ['log'])
+        cr.save!
+      end
+    end
+  end
+
+  def assign_auth
+    if self.auth_uuid_changed?
+         return errors.add :auth_uuid, 'is readonly'
+    end
+    if not [Locked, Running].include? self.state
+      # don't need one
+      self.auth.andand.update_attributes(expires_at: db_current_time)
+      self.auth = nil
+      return
+    elsif self.auth
+      # already have one
+      return
+    end
+    if self.runtime_token.nil?
+      if self.runtime_user_uuid.nil?
+        # legacy behavior, we don't have a runtime_user_uuid so get
+        # the user from the highest priority container request, needed
+        # when performing an upgrade and there are queued containers,
+        # and some tests.
+        cr = ContainerRequest.
+               where('container_uuid=? and priority>0', self.uuid).
+               order('priority desc').
+               first
+        if !cr
+          return errors.add :auth_uuid, "cannot be assigned because priority <= 0"
+        end
+        self.runtime_user_uuid = cr.modified_by_user_uuid
+        self.runtime_auth_scopes = ["all"]
+      end
+
+      # generate a new token
+      self.auth = ApiClientAuthorization.
+                    create!(user_id: User.find_by_uuid(self.runtime_user_uuid).id,
+                            api_client_id: 0,
+                            scopes: self.runtime_auth_scopes)
+    end
+  end
+
+  def sort_serialized_attrs
+    if self.environment_changed?
+      self.environment = self.class.deep_sort_hash(self.environment)
+    end
+    if self.mounts_changed?
+      self.mounts = self.class.deep_sort_hash(self.mounts)
+    end
+    if self.runtime_constraints_changed?
+      self.runtime_constraints = self.class.deep_sort_hash(self.runtime_constraints)
+    end
+    if self.scheduling_parameters_changed?
+      self.scheduling_parameters = self.class.deep_sort_hash(self.scheduling_parameters)
+    end
+    if self.runtime_auth_scopes_changed?
+      self.runtime_auth_scopes = self.runtime_auth_scopes.sort
+    end
+  end
+
+  def update_secret_mounts_md5
+    if self.secret_mounts_changed?
+      self.secret_mounts_md5 = Digest::MD5.hexdigest(
+        SafeJSON.dump(self.class.deep_sort_hash(self.secret_mounts)))
+    end
+  end
+
+  def scrub_secrets
+    # this runs after update_secret_mounts_md5, so the
+    # secret_mounts_md5 will still reflect the secrets that are being
+    # scrubbed here.
+    if self.state_changed? && self.final?
+      self.secret_mounts = {}
+      self.runtime_token = nil
+    end
+  end
+
+  def clear_runtime_status_when_queued
+    # Avoid leaking status messages between different dispatch attempts
+    if self.state_was == Locked && self.state == Queued
+      self.runtime_status = {}
+    end
+  end
+
+  def handle_completed
+    # This container is finished so finalize any associated container requests
+    # that are associated with this container.
+    if self.state_changed? and self.final?
+      act_as_system_user do
+
+        if self.state == Cancelled
+          retryable_requests = ContainerRequest.where("container_uuid = ? and priority > 0 and state = 'Committed' and container_count < container_count_max", uuid)
+        else
+          retryable_requests = []
+        end
+
+        if retryable_requests.any?
+          c_attrs = {
+            command: self.command,
+            cwd: self.cwd,
+            environment: self.environment,
+            output_path: self.output_path,
+            container_image: self.container_image,
+            mounts: self.mounts,
+            runtime_constraints: self.runtime_constraints,
+            scheduling_parameters: self.scheduling_parameters,
+            secret_mounts: self.secret_mounts_was,
+            runtime_token: self.runtime_token_was,
+            runtime_user_uuid: self.runtime_user_uuid,
+            runtime_auth_scopes: self.runtime_auth_scopes
+          }
+          c = Container.create! c_attrs
+          retryable_requests.each do |cr|
+            cr.with_lock do
+              leave_modified_by_user_alone do
+                # Use row locking because this increments container_count
+                cr.container_uuid = c.uuid
+                cr.save!
+              end
+            end
+          end
+        end
+
+        # Notify container requests associated with this container
+        ContainerRequest.where(container_uuid: uuid,
+                               state: ContainerRequest::Committed).each do |cr|
+          leave_modified_by_user_alone do
+            cr.finalize!
+          end
+        end
+
+        # Cancel outstanding container requests made by this container.
+        ContainerRequest.
+          includes(:container).
+          where(requesting_container_uuid: uuid,
+                state: ContainerRequest::Committed).each do |cr|
+          leave_modified_by_user_alone do
+            cr.update_attributes!(priority: 0)
+            cr.container.reload
+            if cr.container.state == Container::Queued || cr.container.state == Container::Locked
+              # If the child container hasn't started yet, finalize the
+              # child CR now instead of leaving it "on hold", i.e.,
+              # Queued with priority 0.  (OTOH, if the child is already
+              # running, leave it alone so it can get cancelled the
+              # usual way, get a copy of the log collection, etc.)
+              cr.update_attributes!(state: ContainerRequest::Final)
+            end
+          end
+        end
+      end
+    end
+  end
+end
diff --git a/services/api/app/models/container_request.rb b/services/api/app/models/container_request.rb
new file mode 100644 (file)
index 0000000..921d4be
--- /dev/null
@@ -0,0 +1,433 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'whitelist_update'
+require 'arvados/collection'
+
+class ContainerRequest < ArvadosModel
+  include ArvadosModelUpdates
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include WhitelistUpdate
+
+  belongs_to :container, foreign_key: :container_uuid, primary_key: :uuid
+  belongs_to :requesting_container, {
+               class_name: 'Container',
+               foreign_key: :requesting_container_uuid,
+               primary_key: :uuid,
+             }
+
+  serialize :properties, Hash
+  serialize :environment, Hash
+  serialize :mounts, Hash
+  serialize :runtime_constraints, Hash
+  serialize :command, Array
+  serialize :scheduling_parameters, Hash
+  serialize :secret_mounts, Hash
+
+  before_validation :fill_field_defaults, :if => :new_record?
+  before_validation :validate_runtime_constraints
+  before_validation :set_default_preemptible_scheduling_parameter
+  before_validation :set_container
+  validates :command, :container_image, :output_path, :cwd, :presence => true
+  validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
+  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
+  validate :validate_datatypes
+  validate :validate_scheduling_parameters
+  validate :validate_state_change
+  validate :check_update_whitelist
+  validate :secret_mounts_key_conflict
+  validate :validate_runtime_token
+  before_save :scrub_secrets
+  before_create :set_requesting_container_uuid
+  before_destroy :set_priority_zero
+  after_save :update_priority
+  after_save :finalize_if_needed
+
+  api_accessible :user, extend: :common do |t|
+    t.add :command
+    t.add :container_count
+    t.add :container_count_max
+    t.add :container_image
+    t.add :container_uuid
+    t.add :cwd
+    t.add :description
+    t.add :environment
+    t.add :expires_at
+    t.add :filters
+    t.add :log_uuid
+    t.add :mounts
+    t.add :name
+    t.add :output_name
+    t.add :output_path
+    t.add :output_uuid
+    t.add :output_ttl
+    t.add :priority
+    t.add :properties
+    t.add :requesting_container_uuid
+    t.add :runtime_constraints
+    t.add :scheduling_parameters
+    t.add :state
+    t.add :use_existing
+  end
+
+  # Supported states for a container request
+  States =
+    [
+     (Uncommitted = 'Uncommitted'),
+     (Committed = 'Committed'),
+     (Final = 'Final'),
+    ]
+
+  State_transitions = {
+    nil => [Uncommitted, Committed],
+    Uncommitted => [Committed],
+    Committed => [Final]
+  }
+
+  AttrsPermittedAlways = [:owner_uuid, :state, :name, :description, :properties]
+  AttrsPermittedBeforeCommit = [:command, :container_count_max,
+  :container_image, :cwd, :environment, :filters, :mounts,
+  :output_path, :priority, :runtime_token,
+  :runtime_constraints, :state, :container_uuid, :use_existing,
+  :scheduling_parameters, :secret_mounts, :output_name, :output_ttl]
+
+  def self.limit_index_columns_read
+    ["mounts"]
+  end
+
+  def logged_attributes
+    super.except('secret_mounts', 'runtime_token')
+  end
+
+  def state_transitions
+    State_transitions
+  end
+
+  def skip_uuid_read_permission_check
+    # The uuid_read_permission_check prevents users from making
+    # references to objects they can't view.  However, in this case we
+    # don't want to do that check since there's a circular dependency
+    # where user can't view the container until the user has
+    # constructed the container request that references the container.
+    %w(container_uuid)
+  end
+
+  def finalize_if_needed
+    if state == Committed && Container.find_by_uuid(container_uuid).final?
+      reload
+      act_as_system_user do
+        leave_modified_by_user_alone do
+          finalize!
+        end
+      end
+    end
+  end
+
+  # Finalize the container request after the container has
+  # finished/cancelled.
+  def finalize!
+    update_collections(container: Container.find_by_uuid(container_uuid))
+    update_attributes!(state: Final)
+  end
+
+  def update_collections(container:, collections: ['log', 'output'])
+    collections.each do |out_type|
+      pdh = container.send(out_type)
+      next if pdh.nil?
+      coll_name = "Container #{out_type} for request #{uuid}"
+      trash_at = nil
+      if out_type == 'output'
+        if self.output_name
+          coll_name = self.output_name
+        end
+        if self.output_ttl > 0
+          trash_at = db_current_time + self.output_ttl
+        end
+      end
+      manifest = Collection.where(portable_data_hash: pdh).first.manifest_text
+
+      coll_uuid = self.send(out_type + '_uuid')
+      coll = coll_uuid.nil? ? nil : Collection.where(uuid: coll_uuid).first
+      if !coll
+        coll = Collection.new(
+          owner_uuid: self.owner_uuid,
+          name: coll_name,
+          manifest_text: "",
+          properties: {
+            'type' => out_type,
+            'container_request' => uuid,
+          })
+      end
+
+      if out_type == "log"
+        src = Arv::Collection.new(manifest)
+        dst = Arv::Collection.new(coll.manifest_text)
+        dst.cp_r("./", ".", src)
+        dst.cp_r("./", "log for container #{container.uuid}", src)
+        manifest = dst.manifest_text
+      end
+
+      coll.assign_attributes(
+        portable_data_hash: Digest::MD5.hexdigest(manifest) + '+' + manifest.bytesize.to_s,
+        manifest_text: manifest,
+        trash_at: trash_at,
+        delete_at: trash_at)
+      coll.save_with_unique_name!
+      self.send(out_type + '_uuid=', coll.uuid)
+    end
+  end
+
+  def self.full_text_searchable_columns
+    super - ["mounts", "secret_mounts", "secret_mounts_md5", "runtime_token"]
+  end
+
+  protected
+
+  def fill_field_defaults
+    self.state ||= Uncommitted
+    self.environment ||= {}
+    self.runtime_constraints ||= {}
+    self.mounts ||= {}
+    self.cwd ||= "."
+    self.container_count_max ||= Rails.configuration.container_count_max
+    self.scheduling_parameters ||= {}
+    self.output_ttl ||= 0
+    self.priority ||= 0
+  end
+
+  def set_container
+    if (container_uuid_changed? and
+        not current_user.andand.is_admin and
+        not container_uuid.nil?)
+      errors.add :container_uuid, "can only be updated to nil."
+      return false
+    end
+    if state_changed? and state == Committed and container_uuid.nil?
+      self.container_uuid = Container.resolve(self).uuid
+    end
+    if self.container_uuid != self.container_uuid_was
+      if self.container_count_changed?
+        errors.add :container_count, "cannot be updated directly."
+        return false
+      else
+        self.container_count += 1
+        if self.container_uuid_was
+          old_container = Container.find_by_uuid(self.container_uuid_was)
+          old_logs = Collection.where(portable_data_hash: old_container.log).first
+          if old_logs
+            log_coll = self.log_uuid.nil? ? nil : Collection.where(uuid: self.log_uuid).first
+            if self.log_uuid.nil?
+              log_coll = Collection.new(
+                owner_uuid: self.owner_uuid,
+                name: coll_name = "Container log for request #{uuid}",
+                manifest_text: "")
+            end
+
+            # copy logs from old container into CR's log collection
+            src = Arv::Collection.new(old_logs.manifest_text)
+            dst = Arv::Collection.new(log_coll.manifest_text)
+            dst.cp_r("./", "log for container #{old_container.uuid}", src)
+            manifest = dst.manifest_text
+
+            log_coll.assign_attributes(
+              portable_data_hash: Digest::MD5.hexdigest(manifest) + '+' + manifest.bytesize.to_s,
+              manifest_text: manifest)
+            log_coll.save_with_unique_name!
+            self.log_uuid = log_coll.uuid
+          end
+        end
+      end
+    end
+  end
+
+  def set_default_preemptible_scheduling_parameter
+    c = get_requesting_container()
+    if self.state == Committed
+      # If preemptible instances (eg: AWS Spot Instances) are allowed,
+      # ask them on child containers by default.
+      if Rails.configuration.preemptible_instances and !c.nil? and
+        self.scheduling_parameters['preemptible'].nil?
+          self.scheduling_parameters['preemptible'] = true
+      end
+    end
+  end
+
+  def validate_runtime_constraints
+    case self.state
+    when Committed
+      [['vcpus', true],
+       ['ram', true],
+       ['keep_cache_ram', false]].each do |k, required|
+        if !required && !runtime_constraints.include?(k)
+          next
+        end
+        v = runtime_constraints[k]
+        unless (v.is_a?(Integer) && v > 0)
+          errors.add(:runtime_constraints,
+                     "[#{k}]=#{v.inspect} must be a positive integer")
+        end
+      end
+    end
+  end
+
+  def validate_datatypes
+    command.each do |c|
+      if !c.is_a? String
+        errors.add(:command, "must be an array of strings but has entry #{c.class}")
+      end
+    end
+    environment.each do |k,v|
+      if !k.is_a?(String) || !v.is_a?(String)
+        errors.add(:environment, "must be an map of String to String but has entry #{k.class} to #{v.class}")
+      end
+    end
+    [:mounts, :secret_mounts].each do |m|
+      self[m].each do |k, v|
+        if !k.is_a?(String) || !v.is_a?(Hash)
+          errors.add(m, "must be an map of String to Hash but is has entry #{k.class} to #{v.class}")
+        end
+        if v["kind"].nil?
+          errors.add(m, "each item must have a 'kind' field")
+        end
+        [[String, ["kind", "portable_data_hash", "uuid", "device_type",
+                   "path", "commit", "repository_name", "git_url"]],
+         [Integer, ["capacity"]]].each do |t, fields|
+          fields.each do |f|
+            if !v[f].nil? && !v[f].is_a?(t)
+              errors.add(m, "#{k}: #{f} must be a #{t} but is #{v[f].class}")
+            end
+          end
+        end
+        ["writable", "exclude_from_output"].each do |f|
+          if !v[f].nil? && !v[f].is_a?(TrueClass) && !v[f].is_a?(FalseClass)
+            errors.add(m, "#{k}: #{f} must be a #{t} but is #{v[f].class}")
+          end
+        end
+      end
+    end
+  end
+
+  def validate_scheduling_parameters
+    if self.state == Committed
+      if scheduling_parameters.include? 'partitions' and
+         (!scheduling_parameters['partitions'].is_a?(Array) ||
+          scheduling_parameters['partitions'].reject{|x| !x.is_a?(String)}.size !=
+            scheduling_parameters['partitions'].size)
+            errors.add :scheduling_parameters, "partitions must be an array of strings"
+      end
+      if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
+        errors.add :scheduling_parameters, "preemptible instances are not allowed"
+      end
+      if scheduling_parameters.include? 'max_run_time' and
+        (!scheduling_parameters['max_run_time'].is_a?(Integer) ||
+          scheduling_parameters['max_run_time'] < 0)
+          errors.add :scheduling_parameters, "max_run_time must be positive integer"
+      end
+    end
+  end
+
+  def check_update_whitelist
+    permitted = AttrsPermittedAlways.dup
+
+    if self.new_record? || self.state_was == Uncommitted
+      # Allow create-and-commit in a single operation.
+      permitted.push(*AttrsPermittedBeforeCommit)
+    end
+
+    case self.state
+    when Committed
+      permitted.push :priority, :container_count_max, :container_uuid
+
+      if self.container_uuid.nil?
+        self.errors.add :container_uuid, "has not been resolved to a container."
+      end
+
+      if self.priority.nil?
+        self.errors.add :priority, "cannot be nil"
+      end
+
+      # Allow container count to increment by 1
+      if (self.container_uuid &&
+          self.container_uuid != self.container_uuid_was &&
+          self.container_count == 1 + (self.container_count_was || 0))
+        permitted.push :container_count
+      end
+
+      if current_user.andand.is_admin
+        permitted.push :log_uuid
+      end
+
+    when Final
+      if self.state_was == Committed
+        # "Cancel" means setting priority=0, state=Committed
+        permitted.push :priority
+
+        if current_user.andand.is_admin
+          permitted.push :output_uuid, :log_uuid
+        end
+      end
+
+    end
+
+    super(permitted)
+  end
+
+  def secret_mounts_key_conflict
+    secret_mounts.each do |k, v|
+      if mounts.has_key?(k)
+        errors.add(:secret_mounts, 'conflict with non-secret mounts')
+        return false
+      end
+    end
+  end
+
+  def validate_runtime_token
+    if !self.runtime_token.nil? && self.runtime_token_changed?
+      if !runtime_token[0..2] == "v2/"
+        errors.add :runtime_token, "not a v2 token"
+        return
+      end
+      if ApiClientAuthorization.validate(token: runtime_token).nil?
+        errors.add :runtime_token, "failed validation"
+      end
+    end
+  end
+
+  def scrub_secrets
+    if self.state == Final
+      self.secret_mounts = {}
+      self.runtime_token = nil
+    end
+  end
+
+  def update_priority
+    return unless state_changed? || priority_changed? || container_uuid_changed?
+    act_as_system_user do
+      Container.
+        where('uuid in (?)', [self.container_uuid_was, self.container_uuid].compact).
+        map(&:update_priority!)
+    end
+  end
+
+  def set_priority_zero
+    self.update_attributes!(priority: 0) if self.state != Final
+  end
+
+  def set_requesting_container_uuid
+    c = get_requesting_container()
+    if !c.nil?
+      self.requesting_container_uuid = c.uuid
+      # Determine the priority of container request for the requesting
+      # container.
+      self.priority = ContainerRequest.where(container_uuid: self.requesting_container_uuid).maximum("priority") || 0
+    end
+  end
+
+  def get_requesting_container
+    return self.requesting_container_uuid if !self.requesting_container_uuid.nil?
+    Container.for_current_token
+  end
+end
diff --git a/services/api/app/models/database_seeds.rb b/services/api/app/models/database_seeds.rb
new file mode 100644 (file)
index 0000000..6e7ab9b
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DatabaseSeeds
+  extend CurrentApiClient
+  def self.install
+    system_user
+    system_group
+    all_users_group
+    anonymous_group
+    anonymous_group_read_permission
+    anonymous_user
+    empty_collection
+  end
+end
diff --git a/services/api/app/models/group.rb b/services/api/app/models/group.rb
new file mode 100644 (file)
index 0000000..46bb447
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'can_be_an_owner'
+require 'trashable'
+
+class Group < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include CanBeAnOwner
+  include Trashable
+
+  serialize :properties, Hash
+
+  after_create :invalidate_permissions_cache
+  after_update :maybe_invalidate_permissions_cache
+  before_create :assign_name
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :group_class
+    t.add :description
+    t.add :writable_by
+    t.add :delete_at
+    t.add :trash_at
+    t.add :is_trashed
+    t.add :properties
+  end
+
+  def maybe_invalidate_permissions_cache
+    if uuid_changed? or owner_uuid_changed? or is_trashed_changed?
+      # This can change users' permissions on other groups as well as
+      # this one.
+      invalidate_permissions_cache
+    end
+  end
+
+  def invalidate_permissions_cache
+    # Ensure a new group can be accessed by the appropriate users
+    # immediately after being created.
+    User.invalidate_permissions_cache self.async_permissions_update
+  end
+
+  def assign_name
+    if self.new_record? and (self.name.nil? or self.name.empty?)
+      self.name = self.uuid
+    end
+    true
+  end
+
+end
diff --git a/services/api/app/models/human.rb b/services/api/app/models/human.rb
new file mode 100644 (file)
index 0000000..6897282
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Human < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :properties
+  end
+end
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
new file mode 100644 (file)
index 0000000..7508ead
--- /dev/null
@@ -0,0 +1,703 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'log_reuse_info'
+require 'safe_json'
+
+class Job < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  extend CurrentApiClient
+  extend LogReuseInfo
+  serialize :components, Hash
+  attr_protected :arvados_sdk_version, :docker_image_locator
+  serialize :script_parameters, Hash
+  serialize :runtime_constraints, Hash
+  serialize :tasks_summary, Hash
+  before_create :ensure_unique_submit_id
+  after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
+  before_validation :set_priority
+  before_validation :update_state_from_old_state_attrs
+  before_validation :update_script_parameters_digest
+  validate :ensure_script_version_is_commit
+  validate :find_docker_image_locator
+  validate :find_arvados_sdk_version
+  validate :validate_status
+  validate :validate_state_change
+  validate :ensure_no_collection_uuids_in_script_params
+  before_save :tag_version_in_internal_repository
+  before_save :update_timestamps_when_state_changes
+
+  has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
+  has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
+
+  class SubmitIdReused < RequestError
+  end
+
+  api_accessible :user, extend: :common do |t|
+    t.add :submit_id
+    t.add :priority
+    t.add :script
+    t.add :script_parameters
+    t.add :script_version
+    t.add :cancelled_at
+    t.add :cancelled_by_client_uuid
+    t.add :cancelled_by_user_uuid
+    t.add :started_at
+    t.add :finished_at
+    t.add :output
+    t.add :success
+    t.add :running
+    t.add :state
+    t.add :is_locked_by_uuid
+    t.add :log
+    t.add :runtime_constraints
+    t.add :tasks_summary
+    t.add :nondeterministic
+    t.add :repository
+    t.add :supplied_script_version
+    t.add :arvados_sdk_version
+    t.add :docker_image_locator
+    t.add :queue_position
+    t.add :node_uuids
+    t.add :description
+    t.add :components
+  end
+
+  # Supported states for a job
+  States = [
+            (Queued = 'Queued'),
+            (Running = 'Running'),
+            (Cancelled = 'Cancelled'),
+            (Failed = 'Failed'),
+            (Complete = 'Complete'),
+           ]
+
+  after_initialize do
+    @need_crunch_dispatch_trigger = false
+  end
+
+  def self.limit_index_columns_read
+    ["components"]
+  end
+
+  def assert_finished
+    update_attributes(finished_at: finished_at || db_current_time,
+                      success: success.nil? ? false : success,
+                      running: false)
+  end
+
+  def node_uuids
+    nodes.map(&:uuid)
+  end
+
+  def self.queue
+    self.where('state = ?', Queued).order('priority desc, created_at')
+  end
+
+  def queue_position
+    # We used to report this accurately, but the implementation made queue
+    # API requests O(n**2) for the size of the queue.  See #8800.
+    # We've soft-disabled it because it's not clear we even want this
+    # functionality: now that we have Node Manager with support for multiple
+    # node sizes, "queue position" tells you very little about when a job will
+    # run.
+    state == Queued ? 0 : nil
+  end
+
+  def self.running
+    self.where('running = ?', true).
+      order('priority desc, created_at')
+  end
+
+  def lock locked_by_uuid
+    with_lock do
+      unless self.state == Queued and self.is_locked_by_uuid.nil?
+        raise AlreadyLockedError
+      end
+      self.state = Running
+      self.is_locked_by_uuid = locked_by_uuid
+      self.save!
+    end
+  end
+
+  def update_script_parameters_digest
+    self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
+  end
+
+  def self.searchable_columns operator
+    super - ["script_parameters_digest"]
+  end
+
+  def self.full_text_searchable_columns
+    super - ["script_parameters_digest"]
+  end
+
+  def self.load_job_specific_filters attrs, orig_filters, read_users
+    # Convert Job-specific @filters entries into general SQL filters.
+    script_info = {"repository" => nil, "script" => nil}
+    git_filters = Hash.new do |hash, key|
+      hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
+    end
+    filters = []
+    orig_filters.each do |attr, operator, operand|
+      if (script_info.has_key? attr) and (operator == "=")
+        if script_info[attr].nil?
+          script_info[attr] = operand
+        elsif script_info[attr] != operand
+          raise ArgumentError.new("incompatible #{attr} filters")
+        end
+      end
+      case operator
+      when "in git"
+        git_filters[attr]["min_version"] = operand
+      when "not in git"
+        git_filters[attr]["exclude_versions"] += Array.wrap(operand)
+      when "in docker", "not in docker"
+        image_hashes = Array.wrap(operand).flat_map do |search_term|
+          image_search, image_tag = search_term.split(':', 2)
+          Collection.
+            find_all_for_docker_image(image_search, image_tag, read_users, filter_compatible_format: false).
+            map(&:portable_data_hash)
+        end
+        filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
+      else
+        filters << [attr, operator, operand]
+      end
+    end
+
+    # Build a real script_version filter from any "not? in git" filters.
+    git_filters.each_pair do |attr, filter|
+      case attr
+      when "script_version"
+        script_info.each_pair do |key, value|
+          if value.nil?
+            raise ArgumentError.new("script_version filter needs #{key} filter")
+          end
+        end
+        filter["repository"] = script_info["repository"]
+        if attrs[:script_version]
+          filter["max_version"] = attrs[:script_version]
+        else
+          # Using HEAD, set earlier by the hash default, is fine.
+        end
+      when "arvados_sdk_version"
+        filter["repository"] = "arvados"
+      else
+        raise ArgumentError.new("unknown attribute for git filter: #{attr}")
+      end
+      revisions = Commit.find_commit_range(filter["repository"],
+                                           filter["min_version"],
+                                           filter["max_version"],
+                                           filter["exclude_versions"])
+      if revisions.empty?
+        raise ArgumentError.
+          new("error searching #{filter['repository']} from " +
+              "'#{filter['min_version']}' to '#{filter['max_version']}', " +
+              "excluding #{filter['exclude_versions']}")
+      end
+      filters.append([attr, "in", revisions])
+    end
+
+    filters
+  end
+
+  def self.find_reusable attrs, params, filters, read_users
+    if filters.empty?  # Translate older creation parameters into filters.
+      filters =
+        [["repository", "=", attrs[:repository]],
+         ["script", "=", attrs[:script]],
+         ["script_version", "not in git", params[:exclude_script_versions]],
+        ].reject { |filter| filter.last.nil? or filter.last.empty? }
+      if !params[:minimum_script_version].blank?
+        filters << ["script_version", "in git",
+                     params[:minimum_script_version]]
+      else
+        filters += default_git_filters("script_version", attrs[:repository],
+                                       attrs[:script_version])
+      end
+      if image_search = attrs[:runtime_constraints].andand["docker_image"]
+        if image_tag = attrs[:runtime_constraints]["docker_image_tag"]
+          image_search += ":#{image_tag}"
+        end
+        image_locator = Collection.
+          for_latest_docker_image(image_search).andand.portable_data_hash
+      else
+        image_locator = nil
+      end
+      filters << ["docker_image_locator", "=", image_locator]
+      if sdk_version = attrs[:runtime_constraints].andand["arvados_sdk_version"]
+        filters += default_git_filters("arvados_sdk_version", "arvados", sdk_version)
+      end
+      filters = load_job_specific_filters(attrs, filters, read_users)
+    end
+
+    # Check specified filters for some reasonableness.
+    filter_names = filters.map { |f| f.first }.uniq
+    ["repository", "script"].each do |req_filter|
+      if not filter_names.include?(req_filter)
+        return send_error("#{req_filter} filter required")
+      end
+    end
+
+    # Search for a reusable Job, and return it if found.
+    candidates = Job.readable_by(current_user)
+    log_reuse_info { "starting with #{candidates.count} jobs readable by current user #{current_user.uuid}" }
+
+    candidates = candidates.where(
+      'state = ? or (owner_uuid = ? and state in (?))',
+      Job::Complete, current_user.uuid, [Job::Queued, Job::Running])
+    log_reuse_info(candidates) { "after filtering on job state ((state=Complete) or (state=Queued/Running and (submitted by current user)))" }
+
+    digest = Job.sorted_hash_digest(attrs[:script_parameters])
+    candidates = candidates.where('script_parameters_digest = ?', digest)
+    log_reuse_info(candidates) { "after filtering on script_parameters_digest #{digest}" }
+
+    candidates = candidates.where('nondeterministic is distinct from ?', true)
+    log_reuse_info(candidates) { "after filtering on !nondeterministic" }
+
+    # prefer Running jobs over Queued
+    candidates = candidates.order('state desc, created_at')
+
+    candidates = apply_filters candidates, filters
+    log_reuse_info(candidates) { "after filtering on repo, script, and custom filters #{filters.inspect}" }
+
+    chosen = nil
+    chosen_output = nil
+    incomplete_job = nil
+    candidates.each do |j|
+      if j.state != Job::Complete
+        if !incomplete_job
+          # We'll use this if we don't find a job that has completed
+          log_reuse_info { "job #{j.uuid} is reusable, but unfinished; continuing search for completed jobs" }
+          incomplete_job = j
+        else
+          log_reuse_info { "job #{j.uuid} is unfinished and we already have #{incomplete_job.uuid}; ignoring" }
+        end
+      elsif chosen == false
+        # Ignore: we have already decided not to reuse any completed
+        # job.
+        log_reuse_info { "job #{j.uuid} with output #{j.output} ignored, see above" }
+      elsif j.output.nil?
+        log_reuse_info { "job #{j.uuid} has nil output" }
+      elsif j.log.nil?
+        log_reuse_info { "job #{j.uuid} has nil log" }
+      elsif Rails.configuration.reuse_job_if_outputs_differ
+        if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
+          # Ignore: keep looking for an incomplete job or one whose
+          # output is readable.
+          log_reuse_info { "job #{j.uuid} output #{j.output} unavailable to user; continuing search" }
+        elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
+          # Ignore: keep looking for an incomplete job or one whose
+          # log is readable.
+          log_reuse_info { "job #{j.uuid} log #{j.log} unavailable to user; continuing search" }
+        else
+          log_reuse_info { "job #{j.uuid} with output #{j.output} is reusable; decision is final." }
+          return j
+        end
+      elsif chosen_output
+        if chosen_output != j.output
+          # If two matching jobs produced different outputs, run a new
+          # job (or use one that's already running/queued) instead of
+          # choosing one arbitrarily.
+          log_reuse_info { "job #{j.uuid} output #{j.output} disagrees; forgetting about #{chosen.uuid} and ignoring any other finished jobs (see reuse_job_if_outputs_differ in application.default.yml)" }
+          chosen = false
+        else
+          log_reuse_info { "job #{j.uuid} output #{j.output} agrees with chosen #{chosen.uuid}; continuing search in case other candidates have different outputs" }
+        end
+        # ...and that's the only thing we need to do once we've chosen
+        # a job to reuse.
+      elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
+        # This user cannot read the output of this job. Any other
+        # completed job will have either the same output (making it
+        # unusable) or a different output (making it unusable because
+        # reuse_job_if_outputs_different is turned off). Therefore,
+        # any further investigation of reusable jobs is futile.
+        log_reuse_info { "job #{j.uuid} output #{j.output} is unavailable to user; this means no finished job can be reused (see reuse_job_if_outputs_differ in application.default.yml)" }
+        chosen = false
+      elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.log)
+        # This user cannot read the log of this job, don't try to reuse the
+        # job but consider if the output is consistent.
+        log_reuse_info { "job #{j.uuid} log #{j.log} is unavailable to user; continuing search" }
+        chosen_output = j.output
+      else
+        log_reuse_info { "job #{j.uuid} with output #{j.output} can be reused; continuing search in case other candidates have different outputs" }
+        chosen = j
+        chosen_output = j.output
+      end
+    end
+    j = chosen || incomplete_job
+    if j
+      log_reuse_info { "done, #{j.uuid} was selected" }
+    else
+      log_reuse_info { "done, nothing suitable" }
+    end
+    return j
+  end
+
+  def self.default_git_filters(attr_name, repo_name, refspec)
+    # Add a filter to @filters for `attr_name` = the latest commit available
+    # in `repo_name` at `refspec`.  No filter is added if refspec can't be
+    # resolved.
+    commits = Commit.find_commit_range(repo_name, nil, refspec, nil)
+    if commit_hash = commits.first
+      [[attr_name, "=", commit_hash]]
+    else
+      []
+    end
+  end
+
+  def cancel(cascade: false, need_transaction: true)
+    if need_transaction
+      ActiveRecord::Base.transaction do
+        cancel(cascade: cascade, need_transaction: false)
+      end
+      return
+    end
+
+    if self.state.in?([Queued, Running])
+      self.state = Cancelled
+      self.save!
+    elsif self.state != Cancelled
+      raise InvalidStateTransitionError
+    end
+
+    return if !cascade
+
+    # cancel all children; they could be jobs or pipeline instances
+    children = self.components.andand.collect{|_, u| u}.compact
+
+    return if children.empty?
+
+    # cancel any child jobs
+    Job.where(uuid: children, state: [Queued, Running]).each do |job|
+      job.cancel(cascade: cascade, need_transaction: false)
+    end
+
+    # cancel any child pipelines
+    PipelineInstance.where(uuid: children, state: [PipelineInstance::RunningOnServer, PipelineInstance::RunningOnClient]).each do |pi|
+      pi.cancel(cascade: cascade, need_transaction: false)
+    end
+  end
+
+  protected
+
+  def self.sorted_hash_digest h
+    Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
+  end
+
+  def foreign_key_attributes
+    super + %w(output log)
+  end
+
+  def skip_uuid_read_permission_check
+    super + %w(cancelled_by_client_uuid)
+  end
+
+  def skip_uuid_existence_check
+    super + %w(output log)
+  end
+
+  def set_priority
+    if self.priority.nil?
+      self.priority = 0
+    end
+    true
+  end
+
+  def ensure_script_version_is_commit
+    if state == Running
+      # Apparently client has already decided to go for it. This is
+      # needed to run a local job using a local working directory
+      # instead of a commit-ish.
+      return true
+    end
+    if new_record? or repository_changed? or script_version_changed?
+      sha1 = Commit.find_commit_range(repository,
+                                      nil, script_version, nil).first
+      if not sha1
+        errors.add :script_version, "#{script_version} does not resolve to a commit"
+        return false
+      end
+      if supplied_script_version.nil? or supplied_script_version.empty?
+        self.supplied_script_version = script_version
+      end
+      self.script_version = sha1
+    end
+    true
+  end
+
+  def tag_version_in_internal_repository
+    if state == Running
+      # No point now. See ensure_script_version_is_commit.
+      true
+    elsif errors.any?
+      # Won't be saved, and script_version might not even be valid.
+      true
+    elsif new_record? or repository_changed? or script_version_changed?
+      uuid_was = uuid
+      begin
+        assign_uuid
+        Commit.tag_in_internal_repository repository, script_version, uuid
+      rescue
+        self.uuid = uuid_was
+        raise
+      end
+    end
+  end
+
+  def ensure_unique_submit_id
+    if !submit_id.nil?
+      if Job.where('submit_id=?',self.submit_id).first
+        raise SubmitIdReused.new
+      end
+    end
+    true
+  end
+
+  def resolve_runtime_constraint(key, attr_sym)
+    if ((runtime_constraints.is_a? Hash) and
+        (search = runtime_constraints[key]))
+      ok, result = yield search
+    else
+      ok, result = true, nil
+    end
+    if ok
+      send("#{attr_sym}=".to_sym, result)
+    else
+      errors.add(attr_sym, result)
+    end
+    ok
+  end
+
+  def find_arvados_sdk_version
+    resolve_runtime_constraint("arvados_sdk_version",
+                               :arvados_sdk_version) do |git_search|
+      commits = Commit.find_commit_range("arvados",
+                                         nil, git_search, nil)
+      if commits.empty?
+        [false, "#{git_search} does not resolve to a commit"]
+      elsif not runtime_constraints["docker_image"]
+        [false, "cannot be specified without a Docker image constraint"]
+      else
+        [true, commits.first]
+      end
+    end
+  end
+
+  def find_docker_image_locator
+    if runtime_constraints.is_a? Hash
+      runtime_constraints['docker_image'] ||=
+        Rails.configuration.default_docker_image_for_jobs
+    end
+
+    resolve_runtime_constraint("docker_image",
+                               :docker_image_locator) do |image_search|
+      image_tag = runtime_constraints['docker_image_tag']
+      if coll = Collection.for_latest_docker_image(image_search, image_tag)
+        [true, coll.portable_data_hash]
+      else
+        [false, "not found for #{image_search}"]
+      end
+    end
+  end
+
+  def permission_to_update
+    if is_locked_by_uuid_was and !(current_user and
+                                   (current_user.uuid == is_locked_by_uuid_was or
+                                    current_user.uuid == system_user.uuid))
+      if script_changed? or
+          script_parameters_changed? or
+          script_version_changed? or
+          (!cancelled_at_was.nil? and
+           (cancelled_by_client_uuid_changed? or
+            cancelled_by_user_uuid_changed? or
+            cancelled_at_changed?)) or
+          started_at_changed? or
+          finished_at_changed? or
+          running_changed? or
+          success_changed? or
+          output_changed? or
+          log_changed? or
+          tasks_summary_changed? or
+          (state_changed? && state != Cancelled) or
+          components_changed?
+        logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
+        return false
+      end
+    end
+    if !is_locked_by_uuid_changed?
+      super
+    else
+      if !current_user
+        logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
+        false
+      elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
+        logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
+        false
+      elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
+        logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
+        false
+      else
+        super
+      end
+    end
+  end
+
+  def update_modified_by_fields
+    if self.cancelled_at_changed?
+      # Ensure cancelled_at cannot be set to arbitrary non-now times,
+      # or changed once it is set.
+      if self.cancelled_at and not self.cancelled_at_was
+        self.cancelled_at = db_current_time
+        self.cancelled_by_user_uuid = current_user.uuid
+        self.cancelled_by_client_uuid = current_api_client.andand.uuid
+        @need_crunch_dispatch_trigger = true
+      else
+        self.cancelled_at = self.cancelled_at_was
+        self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
+        self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
+      end
+    end
+    super
+  end
+
+  def trigger_crunch_dispatch_if_cancelled
+    if @need_crunch_dispatch_trigger
+      File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
+        # That's all, just create/touch a file for crunch-job to see.
+      end
+    end
+  end
+
+  def update_timestamps_when_state_changes
+    return if not (state_changed? or new_record?)
+
+    case state
+    when Running
+      self.started_at ||= db_current_time
+    when Failed, Complete
+      self.finished_at ||= db_current_time
+    when Cancelled
+      self.cancelled_at ||= db_current_time
+    end
+
+    # TODO: Remove the following case block when old "success" and
+    # "running" attrs go away. Until then, this ensures we still
+    # expose correct success/running flags to older clients, even if
+    # some new clients are writing only the new state attribute.
+    case state
+    when Queued
+      self.running = false
+      self.success = nil
+    when Running
+      self.running = true
+      self.success = nil
+    when Cancelled, Failed
+      self.running = false
+      self.success = false
+    when Complete
+      self.running = false
+      self.success = true
+    end
+    self.running ||= false # Default to false instead of nil.
+
+    @need_crunch_dispatch_trigger = true
+
+    true
+  end
+
+  def update_state_from_old_state_attrs
+    # If a client has touched the legacy state attrs, update the
+    # "state" attr to agree with the updated values of the legacy
+    # attrs.
+    #
+    # TODO: Remove this method when old "success" and "running" attrs
+    # go away.
+    if cancelled_at_changed? or
+        success_changed? or
+        running_changed? or
+        state.nil?
+      if cancelled_at
+        self.state = Cancelled
+      elsif success == false
+        self.state = Failed
+      elsif success == true
+        self.state = Complete
+      elsif running == true
+        self.state = Running
+      else
+        self.state = Queued
+      end
+    end
+    true
+  end
+
+  def validate_status
+    if self.state.in?(States)
+      true
+    else
+      errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
+      false
+    end
+  end
+
+  def validate_state_change
+    ok = true
+    if self.state_changed?
+      ok = case self.state_was
+           when nil
+             # state isn't set yet
+             true
+           when Queued
+             # Permit going from queued to any state
+             true
+           when Running
+             # From running, may only transition to a finished state
+             [Complete, Failed, Cancelled].include? self.state
+           when Complete, Failed, Cancelled
+             # Once in a finished state, don't permit any more state changes
+             false
+           else
+             # Any other state transition is also invalid
+             false
+           end
+      if not ok
+        errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
+      end
+    end
+    ok
+  end
+
+  def ensure_no_collection_uuids_in_script_params
+    # Fail validation if any script_parameters field includes a string containing a
+    # collection uuid pattern.
+    if self.script_parameters_changed?
+      if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
+        self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
+        return false
+      end
+    end
+    true
+  end
+
+  # recursive_hash_search searches recursively through hashes and
+  # arrays in 'thing' for string fields matching regular expression
+  # 'pattern'.  Returns true if pattern is found, false otherwise.
+  def recursive_hash_search thing, pattern
+    if thing.is_a? Hash
+      thing.each do |k, v|
+        return true if recursive_hash_search v, pattern
+      end
+    elsif thing.is_a? Array
+      thing.each do |k|
+        return true if recursive_hash_search k, pattern
+      end
+    elsif thing.is_a? String
+      return true if thing.match pattern
+    end
+    false
+  end
+end
diff --git a/services/api/app/models/job_task.rb b/services/api/app/models/job_task.rb
new file mode 100644 (file)
index 0000000..a960186
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobTask < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :parameters, Hash
+  before_create :set_default_qsequence
+  after_update :delete_created_job_tasks_if_failed
+
+  api_accessible :user, extend: :common do |t|
+    t.add :job_uuid
+    t.add :created_by_job_task_uuid
+    t.add :sequence
+    t.add :qsequence
+    t.add :parameters
+    t.add :output
+    t.add :progress
+    t.add :success
+    t.add :started_at
+    t.add :finished_at
+  end
+
+  protected
+
+  def delete_created_job_tasks_if_failed
+    if self.success == false and self.success != self.success_was
+      JobTask.delete_all ['created_by_job_task_uuid = ?', self.uuid]
+    end
+  end
+
+  def set_default_qsequence
+    self.qsequence ||= self.class.connection.
+      select_value("SELECT nextval('job_tasks_qsequence_seq')")
+  end
+end
diff --git a/services/api/app/models/keep_disk.rb b/services/api/app/models/keep_disk.rb
new file mode 100644 (file)
index 0000000..5751c13
--- /dev/null
@@ -0,0 +1,79 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class KeepDisk < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  before_validation :ensure_ping_secret
+
+  api_accessible :user, extend: :common do |t|
+    t.add :node_uuid
+    t.add :filesystem_uuid
+    t.add :bytes_total
+    t.add :bytes_free
+    t.add :is_readable
+    t.add :is_writable
+    t.add :last_read_at
+    t.add :last_write_at
+    t.add :last_ping_at
+    t.add :service_host
+    t.add :service_port
+    t.add :service_ssl_flag
+    t.add :keep_service_uuid
+  end
+  api_accessible :superuser, :extend => :user do |t|
+    t.add :ping_secret
+  end
+
+  def foreign_key_attributes
+    super.reject { |a| a == "filesystem_uuid" }
+  end
+
+  def ping(o)
+    raise "must have :service_host and :ping_secret" unless o[:service_host] and o[:ping_secret]
+
+    if o[:ping_secret] != self.ping_secret
+      logger.info "Ping: secret mismatch: received \"#{o[:ping_secret]}\" != \"#{self.ping_secret}\""
+      return nil
+    end
+
+    @bypass_arvados_authorization = true
+    self.update_attributes!(o.select { |k,v|
+                             [:bytes_total,
+                              :bytes_free,
+                              :is_readable,
+                              :is_writable,
+                              :last_read_at,
+                              :last_write_at
+                             ].collect(&:to_s).index k
+                           }.merge(last_ping_at: db_current_time))
+  end
+
+  def service_host
+    KeepService.find_by_uuid(self.keep_service_uuid).andand.service_host
+  end
+
+  def service_port
+    KeepService.find_by_uuid(self.keep_service_uuid).andand.service_port
+  end
+
+  def service_ssl_flag
+    KeepService.find_by_uuid(self.keep_service_uuid).andand.service_ssl_flag
+  end
+
+  protected
+
+  def ensure_ping_secret
+    self.ping_secret ||= rand(2**256).to_s(36)
+  end
+
+  def permission_to_update
+    @bypass_arvados_authorization or super
+  end
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/models/keep_service.rb b/services/api/app/models/keep_service.rb
new file mode 100644 (file)
index 0000000..bf5f3cc
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class KeepService < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  api_accessible :user, extend: :common do |t|
+    t.add  :service_host
+    t.add  :service_port
+    t.add  :service_ssl_flag
+    t.add  :service_type
+    t.add  :read_only
+  end
+  api_accessible :superuser, :extend => :user do |t|
+  end
+
+  protected
+
+  def permission_to_create
+    current_user.andand.is_admin
+  end
+
+  def permission_to_update
+    current_user.andand.is_admin
+  end
+end
diff --git a/services/api/app/models/link.rb b/services/api/app/models/link.rb
new file mode 100644 (file)
index 0000000..ac3efe3
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Link < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+  before_create :permission_to_attach_to_objects
+  before_update :permission_to_attach_to_objects
+  after_update :maybe_invalidate_permissions_cache
+  after_create :maybe_invalidate_permissions_cache
+  after_destroy :maybe_invalidate_permissions_cache
+  validate :name_links_are_obsolete
+
+  api_accessible :user, extend: :common do |t|
+    t.add :tail_uuid
+    t.add :link_class
+    t.add :name
+    t.add :head_uuid
+    t.add :head_kind
+    t.add :tail_kind
+    t.add :properties
+  end
+
+  def head_kind
+    if k = ArvadosModel::resource_class_for_uuid(head_uuid)
+      k.kind
+    end
+  end
+
+  def tail_kind
+    if k = ArvadosModel::resource_class_for_uuid(tail_uuid)
+      k.kind
+    end
+  end
+
+  protected
+
+  def permission_to_attach_to_objects
+    # Anonymous users cannot write links
+    return false if !current_user
+
+    # All users can write links that don't affect permissions
+    return true if self.link_class != 'permission'
+
+    # Administrators can grant permissions
+    return true if current_user.is_admin
+
+    head_obj = ArvadosModel.find_by_uuid(head_uuid)
+
+    # No permission links can be pointed to past collection versions
+    return false if head_obj.is_a?(Collection) && head_obj.current_version_uuid != head_uuid
+
+    # All users can grant permissions on objects they own or can manage
+    return true if current_user.can?(manage: head_obj)
+
+    # Default = deny.
+    false
+  end
+
+  def maybe_invalidate_permissions_cache
+    if self.link_class == 'permission'
+      # Clearing the entire permissions cache can generate many
+      # unnecessary queries if many active users are not affected by
+      # this change. In such cases it would be better to search cached
+      # permissions for head_uuid and tail_uuid, and invalidate the
+      # cache for only those users. (This would require a browseable
+      # cache.)
+      User.invalidate_permissions_cache
+    end
+  end
+
+  def name_links_are_obsolete
+    if link_class == 'name'
+      errors.add('name', 'Name links are obsolete')
+      false
+    else
+      true
+    end
+  end
+
+  # A user is permitted to create, update or modify a permission link
+  # if and only if they have "manage" permission on the object
+  # indicated by the permission link's head_uuid.
+  #
+  # All other links are treated as regular ArvadosModel objects.
+  #
+  def ensure_owner_uuid_is_permitted
+    if link_class == 'permission'
+      ob = ArvadosModel.find_by_uuid(head_uuid)
+      raise PermissionDeniedError unless current_user.can?(manage: ob)
+      # All permission links should be owned by the system user.
+      self.owner_uuid = system_user_uuid
+      return true
+    else
+      super
+    end
+  end
+
+end
diff --git a/services/api/app/models/log.rb b/services/api/app/models/log.rb
new file mode 100644 (file)
index 0000000..75e1a4e
--- /dev/null
@@ -0,0 +1,109 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'audit_logs'
+
+class Log < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+  before_validation :set_default_event_at
+  after_save :send_notify
+  after_commit { AuditLogs.tidy_in_background }
+
+  api_accessible :user, extend: :common do |t|
+    t.add :id
+    t.add :object_uuid
+    t.add :object_owner_uuid
+    t.add :object_kind
+    t.add :event_at
+    t.add :event_type
+    t.add :summary
+    t.add :properties
+  end
+
+  def object_kind
+    if k = ArvadosModel::resource_class_for_uuid(object_uuid)
+      k.kind
+    end
+  end
+
+  def fill_object(thing)
+    self.object_uuid ||= thing.uuid
+    if respond_to? :object_owner_uuid=
+      # Skip this if the object_owner_uuid migration hasn't happened
+      # yet, i.e., we're in the process of migrating an old database.
+      self.object_owner_uuid = thing.owner_uuid
+    end
+    self.summary ||= "#{self.event_type} of #{thing.uuid}"
+    self
+  end
+
+  def fill_properties(age, etag_prop, attrs_prop)
+    self.properties.merge!({"#{age}_etag" => etag_prop,
+                             "#{age}_attributes" => attrs_prop})
+  end
+
+  def update_to(thing)
+    fill_properties('new', thing.andand.etag, thing.andand.logged_attributes)
+    case event_type
+    when "create"
+      self.event_at = thing.created_at
+    when "update"
+      self.event_at = thing.modified_at
+    when "delete"
+      self.event_at = db_current_time
+    end
+    self
+  end
+
+  def self.readable_by(*users_list)
+    if users_list.last.is_a? Hash
+      users_list.pop
+    end
+    if users_list.select { |u| u.is_admin }.any?
+      return self
+    end
+    user_uuids = users_list.map { |u| u.uuid }
+
+    joins("LEFT JOIN container_requests ON container_requests.container_uuid=logs.object_uuid").
+      where("EXISTS(SELECT target_uuid FROM #{PERMISSION_VIEW} "+
+            "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 AND "+
+            "target_uuid IN (container_requests.uuid, container_requests.owner_uuid, logs.object_uuid, logs.owner_uuid, logs.object_owner_uuid))",
+            user_uuids: user_uuids)
+  end
+
+  protected
+
+  def permission_to_create
+    true
+  end
+
+  def permission_to_update
+    current_user.andand.is_admin
+  end
+
+  alias_method :permission_to_delete, :permission_to_update
+
+  def set_default_event_at
+    self.event_at ||= db_current_time
+  end
+
+  def log_start_state
+    # don't log start state on logs
+  end
+
+  def log_change(event_type)
+    # Don't log changes to logs.
+  end
+
+  def ensure_valid_uuids
+    # logs can have references to deleted objects
+  end
+
+  def send_notify
+    ActiveRecord::Base.connection.execute "NOTIFY logs, '#{self.id}'"
+  end
+end
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
new file mode 100644 (file)
index 0000000..3d8b91b
--- /dev/null
@@ -0,0 +1,282 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'tempfile'
+
+class Node < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :info, Hash
+  serialize :properties, Hash
+  before_validation :ensure_ping_secret
+  after_update :dns_server_update
+
+  # Only a controller can figure out whether or not the current API tokens
+  # have access to the associated Job.  They're expected to set
+  # job_readable=true if the Job UUID can be included in the API response.
+  belongs_to(:job, foreign_key: :job_uuid, primary_key: :uuid)
+  attr_accessor :job_readable
+
+  UNUSED_NODE_IP = '127.40.4.0'
+
+  api_accessible :user, :extend => :common do |t|
+    t.add :hostname
+    t.add :domain
+    t.add :ip_address
+    t.add :last_ping_at
+    t.add :slot_number
+    t.add :status
+    t.add :api_job_uuid, as: :job_uuid
+    t.add :crunch_worker_state
+    t.add :properties
+  end
+  api_accessible :superuser, :extend => :user do |t|
+    t.add :first_ping_at
+    t.add :info
+    t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers
+  end
+
+  after_initialize do
+    @bypass_arvados_authorization = false
+  end
+
+  def domain
+    super || Rails.configuration.compute_node_domain
+  end
+
+  def api_job_uuid
+    job_readable ? job_uuid : nil
+  end
+
+  def crunch_worker_state
+    return 'down' if slot_number.nil?
+    case self.info.andand['slurm_state']
+    when 'alloc', 'comp', 'mix', 'drng'
+      'busy'
+    when 'idle'
+      'idle'
+    else
+      'down'
+    end
+  end
+
+  def status
+    if !self.last_ping_at
+      if db_current_time - self.created_at > 5.minutes
+        'startup-fail'
+      else
+        'pending'
+      end
+    elsif db_current_time - self.last_ping_at > 1.hours
+      'missing'
+    else
+      'running'
+    end
+  end
+
+  def ping(o)
+    raise "must have :ip and :ping_secret" unless o[:ip] and o[:ping_secret]
+
+    if o[:ping_secret] != self.info['ping_secret']
+      logger.info "Ping: secret mismatch: received \"#{o[:ping_secret]}\" != \"#{self.info['ping_secret']}\""
+      raise ArvadosModel::UnauthorizedError.new("Incorrect ping_secret")
+    end
+
+    current_time = db_current_time
+    self.last_ping_at = current_time
+
+    @bypass_arvados_authorization = true
+
+    # Record IP address
+    if self.ip_address.nil?
+      logger.info "#{self.uuid} ip_address= #{o[:ip]}"
+      self.ip_address = o[:ip]
+      self.first_ping_at = current_time
+    end
+
+    # Record instance ID if not already known
+    if o[:ec2_instance_id]
+      if !self.info['ec2_instance_id']
+        self.info['ec2_instance_id'] = o[:ec2_instance_id]
+      elsif self.info['ec2_instance_id'] != o[:ec2_instance_id]
+        logger.debug "Multiple nodes have credentials for #{self.uuid}"
+        raise "#{self.uuid} is already running at #{self.info['ec2_instance_id']} so rejecting ping from #{o[:ec2_instance_id]}"
+      end
+    end
+
+    assign_slot
+
+    # Record other basic stats
+    ['total_cpu_cores', 'total_ram_mb', 'total_scratch_mb'].each do |key|
+      if value = (o[key] or o[key.to_sym])
+        self.properties[key] = value.to_i
+      else
+        self.properties.delete(key)
+      end
+    end
+
+    save!
+  end
+
+  def assign_slot
+    return if self.slot_number.andand > 0
+    while true
+      self.slot_number = self.class.available_slot_number
+      if self.slot_number.nil?
+        raise "No available node slots"
+      end
+      begin
+        save!
+        return assign_hostname
+      rescue ActiveRecord::RecordNotUnique
+        # try again
+      end
+    end
+  end
+
+  protected
+
+  def assign_hostname
+    if self.hostname.nil? and Rails.configuration.assign_node_hostname
+      self.hostname = self.class.hostname_for_slot(self.slot_number)
+    end
+  end
+
+  def self.available_slot_number
+    # Join the sequence 1..max with the nodes table. Return the first
+    # (i.e., smallest) value that doesn't match the slot_number of any
+    # existing node.
+    connection.exec_query('SELECT n FROM generate_series(1, $1) AS slot(n)
+                          LEFT JOIN nodes ON n=slot_number
+                          WHERE slot_number IS NULL
+                          LIMIT 1',
+                          # query label:
+                          'Node.available_slot_number',
+                          # [col_id, val] for $1 vars:
+                          [[nil, Rails.configuration.max_compute_nodes]],
+                         ).rows.first.andand.first
+  end
+
+  def ensure_ping_secret
+    self.info['ping_secret'] ||= rand(2**256).to_s(36)
+  end
+
+  def dns_server_update
+    if ip_address_changed? && ip_address
+      Node.where('id != ? and ip_address = ?',
+                 id, ip_address).each do |stale_node|
+        # One or more(!) stale node records have the same IP address
+        # as the new node. Clear the ip_address field on the stale
+        # nodes. Otherwise, we (via SLURM) might inadvertently connect
+        # to the new node using the old node's hostname.
+        stale_node.update_attributes!(ip_address: nil)
+      end
+    end
+    if hostname_was && hostname_changed?
+      self.class.dns_server_update(hostname_was, UNUSED_NODE_IP)
+    end
+    if hostname && (hostname_changed? || ip_address_changed?)
+      self.class.dns_server_update(hostname, ip_address || UNUSED_NODE_IP)
+    end
+  end
+
+  def self.dns_server_update hostname, ip_address
+    ok = true
+
+    ptr_domain = ip_address.
+      split('.').reverse.join('.').concat('.in-addr.arpa')
+
+    template_vars = {
+      hostname: hostname,
+      uuid_prefix: Rails.configuration.uuid_prefix,
+      ip_address: ip_address,
+      ptr_domain: ptr_domain,
+    }
+
+    if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template
+      tmpfile = nil
+      begin
+        begin
+          template = IO.read(Rails.configuration.dns_server_conf_template)
+        rescue IOError, SystemCallError => e
+          logger.error "Reading #{Rails.configuration.dns_server_conf_template}: #{e.message}"
+          raise
+        end
+
+        hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+        Tempfile.open(["#{hostname}-", ".conf.tmp"],
+                                 Rails.configuration.dns_server_conf_dir) do |f|
+          tmpfile = f.path
+          f.puts template % template_vars
+        end
+        File.rename tmpfile, hostfile
+      rescue IOError, SystemCallError => e
+        logger.error "Writing #{hostfile}: #{e.message}"
+        ok = false
+      ensure
+        if tmpfile and File.file? tmpfile
+          # Cleanup remaining temporary file.
+          File.unlink tmpfile
+        end
+      end
+    end
+
+    if Rails.configuration.dns_server_update_command
+      cmd = Rails.configuration.dns_server_update_command % template_vars
+      if not system cmd
+        logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
+        ok = false
+      end
+    end
+
+    if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_reload_command
+      restartfile = File.join(Rails.configuration.dns_server_conf_dir, 'restart.txt')
+      begin
+        File.open(restartfile, 'w') do |f|
+          # Typically, this is used to trigger a dns server restart
+          f.puts Rails.configuration.dns_server_reload_command
+        end
+      rescue IOError, SystemCallError => e
+        logger.error "Unable to write #{restartfile}: #{e.message}"
+        ok = false
+      end
+    end
+
+    ok
+  end
+
+  def self.hostname_for_slot(slot_number)
+    config = Rails.configuration.assign_node_hostname
+
+    return nil if !config
+
+    sprintf(config, {:slot_number => slot_number})
+  end
+
+  # At startup, make sure all DNS entries exist.  Otherwise, slurmctld
+  # will refuse to start.
+  if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template and Rails.configuration.assign_node_hostname
+    (0..Rails.configuration.max_compute_nodes-1).each do |slot_number|
+      hostname = hostname_for_slot(slot_number)
+      hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+      if !File.exist? hostfile
+        n = Node.where(:slot_number => slot_number).first
+        if n.nil? or n.ip_address.nil?
+          dns_server_update(hostname, UNUSED_NODE_IP)
+        else
+          dns_server_update(hostname, n.ip_address)
+        end
+      end
+    end
+  end
+
+  def permission_to_update
+    @bypass_arvados_authorization or super
+  end
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/models/pipeline_instance.rb b/services/api/app/models/pipeline_instance.rb
new file mode 100644 (file)
index 0000000..55efa0a
--- /dev/null
@@ -0,0 +1,188 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineInstance < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :components, Hash
+  serialize :properties, Hash
+  serialize :components_summary, Hash
+  belongs_to :pipeline_template, :foreign_key => :pipeline_template_uuid, :primary_key => :uuid
+
+  before_validation :bootstrap_components
+  before_validation :update_state
+  before_validation :verify_status
+  before_validation :update_timestamps_when_state_changes
+  before_create :set_state_before_save
+  before_save :set_state_before_save
+
+  api_accessible :user, extend: :common do |t|
+    t.add :pipeline_template_uuid
+    t.add :name
+    t.add :components
+    t.add :properties
+    t.add :state
+    t.add :components_summary
+    t.add :description
+    t.add :started_at
+    t.add :finished_at
+  end
+
+  # Supported states for a pipeline instance
+  States =
+    [
+     (New = 'New'),
+     (Ready = 'Ready'),
+     (RunningOnServer = 'RunningOnServer'),
+     (RunningOnClient = 'RunningOnClient'),
+     (Paused = 'Paused'),
+     (Failed = 'Failed'),
+     (Complete = 'Complete'),
+    ]
+
+  def self.limit_index_columns_read
+    ["components"]
+  end
+
+  # if all components have input, the pipeline is Ready
+  def components_look_ready?
+    if !self.components || self.components.empty?
+      return false
+    end
+
+    all_components_have_input = true
+    self.components.each do |name, component|
+      component['script_parameters'].andand.each do |parametername, parameter|
+        parameter = { 'value' => parameter } unless parameter.is_a? Hash
+        if parameter['value'].nil? and parameter['required']
+          if parameter['output_of']
+            next
+          end
+          all_components_have_input = false
+          break
+        end
+      end
+    end
+    return all_components_have_input
+  end
+
+  def progress_table
+    begin
+      # v0 pipeline format
+      nrow = -1
+      components['steps'].collect do |step|
+        nrow += 1
+        row = [nrow, step['name']]
+        if step['complete'] and step['complete'] != 0
+          if step['output_data_locator']
+            row << 1.0
+          else
+            row << 0.0
+          end
+        else
+          row << 0.0
+          if step['failed']
+            self.state = Failed
+          end
+        end
+        row << (step['warehousejob']['id'] rescue nil)
+        row << (step['warehousejob']['revision'] rescue nil)
+        row << step['output_data_locator']
+        row << (Time.parse(step['warehousejob']['finishtime']) rescue nil)
+        row
+      end
+    rescue
+      []
+    end
+  end
+
+  def progress_ratio
+    t = progress_table
+    return 0 if t.size < 1
+    t.collect { |r| r[2] }.inject(0.0) { |sum,a| sum += a } / t.size
+  end
+
+  def self.queue
+    self.where("state = 'RunningOnServer'")
+  end
+
+  def cancel(cascade: false, need_transaction: true)
+    if need_transaction
+      ActiveRecord::Base.transaction do
+        cancel(cascade: cascade, need_transaction: false)
+      end
+      return
+    end
+
+    if self.state.in?([RunningOnServer, RunningOnClient])
+      self.state = Paused
+      self.save!
+    elsif self.state != Paused
+      raise InvalidStateTransitionError
+    end
+
+    return if !cascade
+
+    # cancel all child jobs
+    children = self.components.andand.collect{|_, c| c['job']}.compact.collect{|j| j['uuid']}.compact
+
+    return if children.empty?
+
+    Job.where(uuid: children, state: [Job::Queued, Job::Running]).each do |job|
+      job.cancel(cascade: cascade, need_transaction: false)
+    end
+  end
+
+  protected
+  def bootstrap_components
+    if pipeline_template and (!components or components.empty?)
+      self.components = pipeline_template.components.deep_dup
+    end
+  end
+
+  def update_state
+    if components and progress_ratio == 1.0
+      self.state = Complete
+    end
+  end
+
+  def verify_status
+    changed_attributes = self.changed
+
+    if new_record? or 'components'.in? changed_attributes
+      self.state ||= New
+      if (self.state == New) and self.components_look_ready?
+        self.state = Ready
+      end
+    end
+
+    if self.state.in?(States)
+      true
+    else
+      errors.add :state, "'#{state.inspect} must be one of: [#{States.join ', '}]"
+      false
+    end
+  end
+
+  def set_state_before_save
+    if self.components_look_ready? && (!self.state || self.state == New)
+      self.state = Ready
+    end
+  end
+
+  def update_timestamps_when_state_changes
+    return if not (state_changed? or new_record?)
+
+    case state
+    when RunningOnServer, RunningOnClient
+      self.started_at ||= db_current_time
+    when Failed, Complete
+      current_time = db_current_time
+      self.started_at ||= current_time
+      self.finished_at ||= current_time
+    end
+  end
+
+end
diff --git a/services/api/app/models/pipeline_template.rb b/services/api/app/models/pipeline_template.rb
new file mode 100644 (file)
index 0000000..b19a2b0
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineTemplate < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :components, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :components
+    t.add :description
+  end
+
+  def self.limit_index_columns_read
+    ["components"]
+  end
+end
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
new file mode 100644 (file)
index 0000000..4865515
--- /dev/null
@@ -0,0 +1,119 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Repository < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  # Order is important here.  We must validate the owner before we can
+  # validate the name.
+  validate :valid_owner
+  validate :name_format, :if => Proc.new { |r| r.errors[:owner_uuid].empty? }
+  validates(:name, uniqueness: true, allow_nil: false)
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :fetch_url
+    t.add :push_url
+    t.add :clone_urls
+  end
+
+  def self.attributes_required_columns
+    super.merge("clone_urls" => ["name"],
+                "fetch_url" => ["name"],
+                "push_url" => ["name"])
+  end
+
+  # Deprecated. Use clone_urls instead.
+  def push_url
+    ssh_clone_url
+  end
+
+  # Deprecated. Use clone_urls instead.
+  def fetch_url
+    ssh_clone_url
+  end
+
+  def clone_urls
+    [ssh_clone_url, https_clone_url].compact
+  end
+
+  def server_path
+    # Find where the repository is stored on the API server's filesystem,
+    # and return that path, or nil if not found.
+    # This method is only for the API server's internal use, and should not
+    # be exposed through the public API.  Following our current gitolite
+    # setup, it searches for repositories stored by UUID, then name; and it
+    # prefers bare repositories over checkouts.
+    [["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
+      [:uuid, :name].each do |path_attr|
+        git_dir = File.join(Rails.configuration.git_repositories_dir,
+                            repo_base % send(path_attr), *join_args)
+        return git_dir if File.exist?(git_dir)
+      end
+    end
+    nil
+  end
+
+  protected
+
+  def permission_to_update
+    if not super
+      false
+    elsif current_user.is_admin
+      true
+    elsif name_changed?
+      current_user.uuid == owner_uuid
+    else
+      true
+    end
+  end
+
+  def owner
+    User.find_by_uuid(owner_uuid)
+  end
+
+  def valid_owner
+    if owner.nil? or (owner.username.nil? and (owner.uuid != system_user_uuid))
+      errors.add(:owner_uuid, "must refer to a user with a username")
+      false
+    end
+  end
+
+  def name_format
+    if owner.uuid == system_user_uuid
+      prefix_match = ""
+      errmsg_start = "must be"
+    else
+      prefix_match = Regexp.escape(owner.username + "/")
+      errmsg_start = "must be the owner's username, then '/', then"
+    end
+    if not (/^#{prefix_match}[A-Za-z][A-Za-z0-9]*$/.match(name))
+      errors.add(:name,
+                 "#{errmsg_start} a letter followed by alphanumerics")
+      false
+    end
+  end
+
+  def ssh_clone_url
+    _clone_url :git_repo_ssh_base, 'git@git.%s.arvadosapi.com:'
+  end
+
+  def https_clone_url
+    _clone_url :git_repo_https_base, 'https://git.%s.arvadosapi.com/'
+  end
+
+  def _clone_url config_var, default_base_fmt
+    configured_base = Rails.configuration.send config_var
+    return nil if configured_base == false
+    prefix = new_record? ? Rails.configuration.uuid_prefix : uuid[0,5]
+    if prefix == Rails.configuration.uuid_prefix and configured_base != true
+      base = configured_base
+    else
+      base = default_base_fmt % prefix
+    end
+    '%s%s.git' % [base, name]
+  end
+end
diff --git a/services/api/app/models/specimen.rb b/services/api/app/models/specimen.rb
new file mode 100644 (file)
index 0000000..32d5ed5
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Specimen < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :material
+    t.add :properties
+  end
+
+  def properties
+    @properties ||= Hash.new
+    super
+  end
+end
diff --git a/services/api/app/models/trait.rb b/services/api/app/models/trait.rb
new file mode 100644 (file)
index 0000000..2d3556b
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Trait < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :properties
+  end
+end
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
new file mode 100644 (file)
index 0000000..8ed97e6
--- /dev/null
@@ -0,0 +1,603 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'can_be_an_owner'
+require 'refresh_permission_view'
+
+class User < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include CanBeAnOwner
+
+  serialize :prefs, Hash
+  has_many :api_client_authorizations
+  validates(:username,
+            format: {
+              with: /\A[A-Za-z][A-Za-z0-9]*\z/,
+              message: "must begin with a letter and contain only alphanumerics",
+            },
+            uniqueness: true,
+            allow_nil: true)
+  before_update :prevent_privilege_escalation
+  before_update :prevent_inactive_admin
+  before_update :verify_repositories_empty, :if => Proc.new { |user|
+    user.username.nil? and user.username_changed?
+  }
+  before_update :setup_on_activate
+  before_create :check_auto_admin
+  before_create :set_initial_username, :if => Proc.new { |user|
+    user.username.nil? and user.email
+  }
+  after_create :setup_on_activate
+  after_create :add_system_group_permission_link
+  after_create :invalidate_permissions_cache
+  after_create :auto_setup_new_user, :if => Proc.new { |user|
+    Rails.configuration.auto_setup_new_users and
+    (user.uuid != system_user_uuid) and
+    (user.uuid != anonymous_user_uuid)
+  }
+  after_create :send_admin_notifications
+  after_update :send_profile_created_notification
+  after_update :sync_repository_names, :if => Proc.new { |user|
+    (user.uuid != system_user_uuid) and
+    user.username_changed? and
+    (not user.username_was.nil?)
+  }
+
+  has_many :authorized_keys, :foreign_key => :authorized_user_uuid, :primary_key => :uuid
+  has_many :repositories, foreign_key: :owner_uuid, primary_key: :uuid
+
+  default_scope { where('redirect_to_user_uuid is null') }
+
+  api_accessible :user, extend: :common do |t|
+    t.add :email
+    t.add :username
+    t.add :full_name
+    t.add :first_name
+    t.add :last_name
+    t.add :identity_url
+    t.add :is_active
+    t.add :is_admin
+    t.add :is_invited
+    t.add :prefs
+    t.add :writable_by
+  end
+
+  ALL_PERMISSIONS = {read: true, write: true, manage: true}
+
+  # Map numeric permission levels (see lib/create_permission_view.sql)
+  # back to read/write/manage flags.
+  PERMS_FOR_VAL =
+    [{},
+     {read: true},
+     {read: true, write: true},
+     {read: true, write: true, manage: true}]
+
+  def full_name
+    "#{first_name} #{last_name}".strip
+  end
+
+  def is_invited
+    !!(self.is_active ||
+       Rails.configuration.new_users_are_active ||
+       self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
+  end
+
+  def groups_i_can(verb)
+    my_groups = self.group_permissions.select { |uuid, mask| mask[verb] }.keys
+    if verb == :read
+      my_groups << anonymous_group_uuid
+    end
+    my_groups
+  end
+
+  def can?(actions)
+    return true if is_admin
+    actions.each do |action, target|
+      unless target.nil?
+        if target.respond_to? :uuid
+          target_uuid = target.uuid
+        else
+          target_uuid = target
+          target = ArvadosModel.find_by_uuid(target_uuid)
+        end
+      end
+      next if target_uuid == self.uuid
+      next if (group_permissions[target_uuid] and
+               group_permissions[target_uuid][action])
+      if target.respond_to? :owner_uuid
+        next if target.owner_uuid == self.uuid
+        next if (group_permissions[target.owner_uuid] and
+                 group_permissions[target.owner_uuid][action])
+      end
+      sufficient_perms = case action
+                         when :manage
+                           ['can_manage']
+                         when :write
+                           ['can_manage', 'can_write']
+                         when :read
+                           ['can_manage', 'can_write', 'can_read']
+                         else
+                           # (Skip this kind of permission opportunity
+                           # if action is an unknown permission type)
+                         end
+      if sufficient_perms
+        # Check permission links with head_uuid pointing directly at
+        # the target object. If target is a Group, this is redundant
+        # and will fail except [a] if permission caching is broken or
+        # [b] during a race condition, where a permission link has
+        # *just* been added.
+        if Link.where(link_class: 'permission',
+                      name: sufficient_perms,
+                      tail_uuid: groups_i_can(action) + [self.uuid],
+                      head_uuid: target_uuid).any?
+          next
+        end
+      end
+      return false
+    end
+    true
+  end
+
+  def self.invalidate_permissions_cache(async=false)
+    refresh_permission_view(async)
+  end
+
+  def invalidate_permissions_cache
+    User.invalidate_permissions_cache
+  end
+
+  # Return a hash of {user_uuid: group_perms}
+  def self.all_group_permissions
+    all_perms = {}
+    ActiveRecord::Base.connection.
+      exec_query("SELECT user_uuid, target_owner_uuid, perm_level, trashed
+                  FROM #{PERMISSION_VIEW}
+                  WHERE target_owner_uuid IS NOT NULL",
+                  # "name" arg is a query label that appears in logs:
+                  "all_group_permissions",
+                  ).rows.each do |user_uuid, group_uuid, max_p_val, trashed|
+      all_perms[user_uuid] ||= {}
+      all_perms[user_uuid][group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
+    end
+    all_perms
+  end
+
+  # Return a hash of {group_uuid: perm_hash} where perm_hash[:read]
+  # and perm_hash[:write] are true if this user can read and write
+  # objects owned by group_uuid.
+  def group_permissions
+    group_perms = {self.uuid => {:read => true, :write => true, :manage => true}}
+    ActiveRecord::Base.connection.
+      exec_query("SELECT target_owner_uuid, perm_level, trashed
+                  FROM #{PERMISSION_VIEW}
+                  WHERE user_uuid = $1
+                  AND target_owner_uuid IS NOT NULL",
+                  # "name" arg is a query label that appears in logs:
+                  "group_permissions for #{uuid}",
+                  # "binds" arg is an array of [col_id, value] for '$1' vars:
+                  [[nil, uuid]],
+                ).rows.each do |group_uuid, max_p_val, trashed|
+      group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
+    end
+    group_perms
+  end
+
+  # create links
+  def setup(openid_prefix:, repo_name: nil, vm_uuid: nil)
+    oid_login_perm = create_oid_login_perm openid_prefix
+    repo_perm = create_user_repo_link repo_name
+    vm_login_perm = create_vm_login_permission_link(vm_uuid, username) if vm_uuid
+    group_perm = create_user_group_link
+
+    return [oid_login_perm, repo_perm, vm_login_perm, group_perm, self].compact
+  end
+
+  # delete user signatures, login, repo, and vm perms, and mark as inactive
+  def unsetup
+    # delete oid_login_perms for this user
+    Link.destroy_all(tail_uuid: self.email,
+                     link_class: 'permission',
+                     name: 'can_login')
+
+    # delete repo_perms for this user
+    Link.destroy_all(tail_uuid: self.uuid,
+                     link_class: 'permission',
+                     name: 'can_manage')
+
+    # delete vm_login_perms for this user
+    Link.destroy_all(tail_uuid: self.uuid,
+                     link_class: 'permission',
+                     name: 'can_login')
+
+    # delete "All users" group read permissions for this user
+    group = Group.where(name: 'All users').select do |g|
+      g[:uuid].match(/-f+$/)
+    end.first
+    Link.destroy_all(tail_uuid: self.uuid,
+                     head_uuid: group[:uuid],
+                     link_class: 'permission',
+                     name: 'can_read')
+
+    # delete any signatures by this user
+    Link.destroy_all(link_class: 'signature',
+                     tail_uuid: self.uuid)
+
+    # delete user preferences (including profile)
+    self.prefs = {}
+
+    # mark the user as inactive
+    self.is_active = false
+    self.save!
+  end
+
+  def set_initial_username(requested: false)
+    if !requested.is_a?(String) || requested.empty?
+      email_parts = email.partition("@")
+      local_parts = email_parts.first.partition("+")
+      if email_parts.any?(&:empty?)
+        return
+      elsif not local_parts.first.empty?
+        requested = local_parts.first
+      else
+        requested = email_parts.first
+      end
+    end
+    requested.sub!(/^[^A-Za-z]+/, "")
+    requested.gsub!(/[^A-Za-z0-9]/, "")
+    unless requested.empty?
+      self.username = find_usable_username_from(requested)
+    end
+  end
+
+  def update_uuid(new_uuid:)
+    if !current_user.andand.is_admin
+      raise PermissionDeniedError
+    end
+    if uuid == system_user_uuid || uuid == anonymous_user_uuid
+      raise "update_uuid cannot update system accounts"
+    end
+    if self.class != self.class.resource_class_for_uuid(new_uuid)
+      raise "invalid new_uuid #{new_uuid.inspect}"
+    end
+    transaction(requires_new: true) do
+      reload
+      old_uuid = self.uuid
+      self.uuid = new_uuid
+      save!(validate: false)
+      change_all_uuid_refs(old_uuid: old_uuid, new_uuid: new_uuid)
+    end
+  end
+
+  # Move this user's (i.e., self's) owned items into new_owner_uuid.
+  # Also redirect future uses of this account to
+  # redirect_to_user_uuid, i.e., when a caller authenticates to this
+  # account in the future, the account redirect_to_user_uuid account
+  # will be used instead.
+  #
+  # current_user must have admin privileges, i.e., the caller is
+  # responsible for checking permission to do this.
+  def merge(new_owner_uuid:, redirect_to_user_uuid:)
+    raise PermissionDeniedError if !current_user.andand.is_admin
+    raise "not implemented" if !redirect_to_user_uuid
+    transaction(requires_new: true) do
+      reload
+      raise "cannot merge an already merged user" if self.redirect_to_user_uuid
+
+      new_user = User.where(uuid: redirect_to_user_uuid).first
+      raise "user does not exist" if !new_user
+      raise "cannot merge to an already merged user" if new_user.redirect_to_user_uuid
+
+      # Existing API tokens are updated to authenticate to the new
+      # user.
+      ApiClientAuthorization.
+        where(user_id: id).
+        update_all(user_id: new_user.id)
+
+      # References to the old user UUID in the context of a user ID
+      # (rather than a "home project" in the project hierarchy) are
+      # updated to point to the new user.
+      [
+        [AuthorizedKey, :owner_uuid],
+        [AuthorizedKey, :authorized_user_uuid],
+        [Repository, :owner_uuid],
+        [Link, :owner_uuid],
+        [Link, :tail_uuid],
+        [Link, :head_uuid],
+      ].each do |klass, column|
+        klass.where(column => uuid).update_all(column => new_user.uuid)
+      end
+
+      # References to the merged user's "home project" are updated to
+      # point to new_owner_uuid.
+      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
+        next if [ApiClientAuthorization,
+                 AuthorizedKey,
+                 Link,
+                 Log,
+                 Repository].include?(klass)
+        next if !klass.columns.collect(&:name).include?('owner_uuid')
+        klass.where(owner_uuid: uuid).update_all(owner_uuid: new_owner_uuid)
+      end
+
+      update_attributes!(redirect_to_user_uuid: new_user.uuid)
+      invalidate_permissions_cache
+    end
+  end
+
+  protected
+
+  def change_all_uuid_refs(old_uuid:, new_uuid:)
+    ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
+      klass.columns.each do |col|
+        if col.name.end_with?('_uuid')
+          column = col.name.to_sym
+          klass.where(column => old_uuid).update_all(column => new_uuid)
+        end
+      end
+    end
+  end
+
+  def ensure_ownership_path_leads_to_user
+    true
+  end
+
+  def permission_to_update
+    if username_changed? || redirect_to_user_uuid_changed?
+      current_user.andand.is_admin
+    else
+      # users must be able to update themselves (even if they are
+      # inactive) in order to create sessions
+      self == current_user or super
+    end
+  end
+
+  def permission_to_create
+    current_user.andand.is_admin or
+      (self == current_user &&
+       self.redirect_to_user_uuid.nil? &&
+       self.is_active == Rails.configuration.new_users_are_active)
+  end
+
+  def check_auto_admin
+    return if self.uuid.end_with?('anonymouspublic')
+    if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
+        Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or
+       (User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and
+        Rails.configuration.auto_admin_first_user)
+      self.is_admin = true
+      self.is_active = true
+    end
+  end
+
+  def find_usable_username_from(basename)
+    # If "basename" is a usable username, return that.
+    # Otherwise, find a unique username "basenameN", where N is the
+    # smallest integer greater than 1, and return that.
+    # Return nil if a unique username can't be found after reasonable
+    # searching.
+    quoted_name = self.class.connection.quote_string(basename)
+    next_username = basename
+    next_suffix = 1
+    while Rails.configuration.auto_setup_name_blacklist.include?(next_username)
+      next_suffix += 1
+      next_username = "%s%i" % [basename, next_suffix]
+    end
+    0.upto(6).each do |suffix_len|
+      pattern = "%s%s" % [quoted_name, "_" * suffix_len]
+      self.class.unscoped.
+          where("username like '#{pattern}'").
+          select(:username).
+          order('username asc').
+          each do |other_user|
+        if other_user.username > next_username
+          break
+        elsif other_user.username == next_username
+          next_suffix += 1
+          next_username = "%s%i" % [basename, next_suffix]
+        end
+      end
+      return next_username if (next_username.size <= pattern.size)
+    end
+    nil
+  end
+
+  def prevent_privilege_escalation
+    if current_user.andand.is_admin
+      return true
+    end
+    if self.is_active_changed?
+      if self.is_active != self.is_active_was
+        logger.warn "User #{current_user.uuid} tried to change is_active from #{self.is_active_was} to #{self.is_active} for #{self.uuid}"
+        self.is_active = self.is_active_was
+      end
+    end
+    if self.is_admin_changed?
+      if self.is_admin != self.is_admin_was
+        logger.warn "User #{current_user.uuid} tried to change is_admin from #{self.is_admin_was} to #{self.is_admin} for #{self.uuid}"
+        self.is_admin = self.is_admin_was
+      end
+    end
+    true
+  end
+
+  def prevent_inactive_admin
+    if self.is_admin and not self.is_active
+      # There is no known use case for the strange set of permissions
+      # that would result from this change. It's safest to assume it's
+      # a mistake and disallow it outright.
+      raise "Admin users cannot be inactive"
+    end
+    true
+  end
+
+  def search_permissions(start, graph, merged={}, upstream_mask=nil, upstream_path={})
+    nextpaths = graph[start]
+    return merged if !nextpaths
+    return merged if upstream_path.has_key? start
+    upstream_path[start] = true
+    upstream_mask ||= ALL_PERMISSIONS
+    nextpaths.each do |head, mask|
+      merged[head] ||= {}
+      mask.each do |k,v|
+        merged[head][k] ||= v if upstream_mask[k]
+      end
+      search_permissions(head, graph, merged, upstream_mask.select { |k,v| v && merged[head][k] }, upstream_path)
+    end
+    upstream_path.delete start
+    merged
+  end
+
+  def create_oid_login_perm(openid_prefix)
+    # Check oid_login_perm
+    oid_login_perms = Link.where(tail_uuid: self.email,
+                                 head_uuid: self.uuid,
+                                 link_class: 'permission',
+                                 name: 'can_login')
+
+    if !oid_login_perms.any?
+      # create openid login permission
+      oid_login_perm = Link.create!(link_class: 'permission',
+                                   name: 'can_login',
+                                   tail_uuid: self.email,
+                                   head_uuid: self.uuid,
+                                   properties: {
+                                     "identity_url_prefix" => openid_prefix,
+                                   })
+      logger.info { "openid login permission: " + oid_login_perm[:uuid] }
+    else
+      oid_login_perm = oid_login_perms.first
+    end
+
+    return oid_login_perm
+  end
+
+  def create_user_repo_link(repo_name)
+    # repo_name is optional
+    if not repo_name
+      logger.warn ("Repository name not given for #{self.uuid}.")
+      return
+    end
+
+    repo = Repository.where(owner_uuid: uuid, name: repo_name).first_or_create!
+    logger.info { "repo uuid: " + repo[:uuid] }
+    repo_perm = Link.where(tail_uuid: uuid, head_uuid: repo.uuid,
+                           link_class: "permission",
+                           name: "can_manage").first_or_create!
+    logger.info { "repo permission: " + repo_perm[:uuid] }
+    return repo_perm
+  end
+
+  # create login permission for the given vm_uuid, if it does not already exist
+  def create_vm_login_permission_link(vm_uuid, repo_name)
+    # vm uuid is optional
+    return if !vm_uuid
+
+    vm = VirtualMachine.where(uuid: vm_uuid).first
+    if !vm
+      logger.warn "Could not find virtual machine for #{vm_uuid.inspect}"
+      raise "No vm found for #{vm_uuid}"
+    end
+
+    logger.info { "vm uuid: " + vm[:uuid] }
+    login_attrs = {
+      tail_uuid: uuid, head_uuid: vm.uuid,
+      link_class: "permission", name: "can_login",
+    }
+
+    login_perm = Link.
+      where(login_attrs).
+      select { |link| link.properties["username"] == repo_name }.
+      first
+
+    login_perm ||= Link.
+      create(login_attrs.merge(properties: {"username" => repo_name}))
+
+    logger.info { "login permission: " + login_perm[:uuid] }
+    login_perm
+  end
+
+  # add the user to the 'All users' group
+  def create_user_group_link
+    return (Link.where(tail_uuid: self.uuid,
+                       head_uuid: all_users_group[:uuid],
+                       link_class: 'permission',
+                       name: 'can_read').first or
+            Link.create(tail_uuid: self.uuid,
+                        head_uuid: all_users_group[:uuid],
+                        link_class: 'permission',
+                        name: 'can_read'))
+  end
+
+  # Give the special "System group" permission to manage this user and
+  # all of this user's stuff.
+  def add_system_group_permission_link
+    return true if uuid == system_user_uuid
+    act_as_system_user do
+      Link.create(link_class: 'permission',
+                  name: 'can_manage',
+                  tail_uuid: system_group_uuid,
+                  head_uuid: self.uuid)
+    end
+  end
+
+  # Send admin notifications
+  def send_admin_notifications
+    AdminNotifier.new_user(self).deliver_now
+    if not self.is_active then
+      AdminNotifier.new_inactive_user(self).deliver_now
+    end
+  end
+
+  # Automatically setup if is_active flag turns on
+  def setup_on_activate
+    return if [system_user_uuid, anonymous_user_uuid].include?(self.uuid)
+    if is_active && (new_record? || is_active_changed?)
+      setup(openid_prefix: Rails.configuration.default_openid_prefix)
+    end
+  end
+
+  # Automatically setup new user during creation
+  def auto_setup_new_user
+    setup(openid_prefix: Rails.configuration.default_openid_prefix)
+    if username
+      create_vm_login_permission_link(Rails.configuration.auto_setup_new_users_with_vm_uuid,
+                                      username)
+      repo_name = "#{username}/#{username}"
+      if Rails.configuration.auto_setup_new_users_with_repository and
+          Repository.where(name: repo_name).first.nil?
+        repo = Repository.create!(name: repo_name, owner_uuid: uuid)
+        Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
+                     link_class: "permission", name: "can_manage")
+      end
+    end
+  end
+
+  # Send notification if the user saved profile for the first time
+  def send_profile_created_notification
+    if self.prefs_changed?
+      if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
+        profile_notification_address = Rails.configuration.user_profile_notification_address
+        ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
+      end
+    end
+  end
+
+  def verify_repositories_empty
+    unless repositories.first.nil?
+      errors.add(:username, "can't be unset when the user owns repositories")
+      false
+    end
+  end
+
+  def sync_repository_names
+    old_name_re = /^#{Regexp.escape(username_was)}\//
+    name_sub = "#{username}/"
+    repositories.find_each do |repo|
+      repo.name = repo.name.sub(old_name_re, name_sub)
+      repo.save!
+    end
+  end
+end
diff --git a/services/api/app/models/user_agreement.rb b/services/api/app/models/user_agreement.rb
new file mode 100644 (file)
index 0000000..c8f1894
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UserAgreement < Collection
+  # This class exists so that Arvados::V1::SchemaController includes
+  # UserAgreementsController's methods in the discovery document.
+end
diff --git a/services/api/app/models/virtual_machine.rb b/services/api/app/models/virtual_machine.rb
new file mode 100644 (file)
index 0000000..0b3557e
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class VirtualMachine < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  has_many(:login_permissions,
+           -> { where("link_class = 'permission' and name = 'can_login'") },
+           foreign_key: :head_uuid,
+           class_name: 'Link',
+           primary_key: :uuid)
+
+  api_accessible :user, extend: :common do |t|
+    t.add :hostname
+  end
+
+  protected
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+  def permission_to_update
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/models/workflow.rb b/services/api/app/models/workflow.rb
new file mode 100644 (file)
index 0000000..94890c6
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class Workflow < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  validate :validate_definition
+  before_save :set_name_and_description
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :description
+    t.add :definition
+  end
+
+  def validate_definition
+    begin
+      @definition_yaml = YAML.load self.definition if !definition.nil?
+    rescue => e
+      errors.add :definition, "is not valid yaml: #{e.message}"
+    end
+  end
+
+  def set_name_and_description
+    old_wf = {}
+    begin
+      old_wf = YAML.load self.definition_was if !self.definition_was.nil?
+    rescue => e
+      logger.warn "set_name_and_description error: #{e.message}"
+      return
+    end
+
+    ['name', 'description'].each do |a|
+      if !self.changes.include?(a)
+        v = self.read_attribute(a)
+        if !v.present? or v == old_wf[a]
+          val = @definition_yaml[a] if self.definition and @definition_yaml
+          self[a] = val
+        end
+      end
+    end
+  end
+
+  def self.full_text_searchable_columns
+    super - ["definition"]
+  end
+
+  def self.limit_index_columns_read
+    ["definition"]
+  end
+end
diff --git a/services/api/app/views/admin_notifier/new_inactive_user.text.erb b/services/api/app/views/admin_notifier/new_inactive_user.text.erb
new file mode 100644 (file)
index 0000000..097412c
--- /dev/null
@@ -0,0 +1,17 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+
+A new user landed on the inactive user page:
+
+  <%= @user.full_name %> <<%= @user.email %>>
+
+<% if Rails.configuration.workbench_address -%>
+Please see workbench for more information:
+
+  <%= Rails.configuration.workbench_address %>
+
+<% end -%>
+Thanks,
+Your friendly Arvados robot.
diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb
new file mode 100644 (file)
index 0000000..d21513f
--- /dev/null
@@ -0,0 +1,25 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%
+  add_to_message = ''
+  if Rails.configuration.auto_setup_new_users
+    add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
+  end
+%>
+A new user has been created<%=add_to_message%>:
+
+  <%= @user.full_name %> <<%= @user.email %>>
+
+This user is <%= @user.is_active ? '' : 'NOT ' %>active.
+
+<% if Rails.configuration.workbench_address -%>
+Please see workbench for more information:
+
+  <%= Rails.configuration.workbench_address %>
+
+<% end -%>
+Thanks,
+Your friendly Arvados robot.
+
diff --git a/services/api/app/views/layouts/application.html.erb b/services/api/app/views/layouts/application.html.erb
new file mode 100644 (file)
index 0000000..3028595
--- /dev/null
@@ -0,0 +1,49 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>Server</title>
+  <%= stylesheet_link_tag    "application" %>
+  <%= javascript_include_tag "application" %>
+  <%= csrf_meta_tags %>
+</head>
+<body>
+<div id="header">
+  <div class="apptitle">ARVADOS <span class="beta"><span>BETA</span></span></div>
+  <div style="float:right">
+    <% if current_user %>
+    <%= current_user.full_name %>
+    <% if current_user.is_admin %>
+    &nbsp;&bull;&nbsp;
+    <a class="logout" href="/admin/users">Admin</a>
+    <% end %>
+    &nbsp;&bull;&nbsp;
+    <a class="logout" href="/logout">Log out</a>
+    <% else %>
+    <a class="logout" href="/auth/joshid">Log in</a>
+    <% end %>
+
+    <% if current_user and session[:real_uid] and session[:switch_back_to] and User.find(session[:real_uid].to_i).verify_userswitch_cookie(session[:switch_back_to]) %>
+    &nbsp;&bull;&nbsp;
+    <span class="sudo-warning">Logged in as <b><%= current_user.full_name %></b>. <%= link_to "Back to #{User.find(session[:real_uid]).full_name}", switch_to_user_path(session[:real_uid]), :method => :post, :class => 'sudo-logout' %></span>
+    <% end %>
+  </div>
+</div>
+
+
+<%= yield %>
+
+<div style="clear:both"></div>
+
+<% if current_user or session['invite_code'] %>
+<div id="footer">
+  <div style="float:right">Questions &rarr; <a href="mailto:arvados@curoverse.com">arvados@curoverse.com</a></div>
+  <div style="clear:both"></div>
+</div>
+<% end %>
+
+</body>
+</html>
diff --git a/services/api/app/views/profile_notifier/profile_created.text.erb b/services/api/app/views/profile_notifier/profile_created.text.erb
new file mode 100644 (file)
index 0000000..e9ddbf0
--- /dev/null
@@ -0,0 +1,6 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+Profile created by user <%=@user.full_name%> <%=@user.email%>
+User's profile: <%=@user.prefs['profile']%>
diff --git a/services/api/app/views/static/intro.html.erb b/services/api/app/views/static/intro.html.erb
new file mode 100644 (file)
index 0000000..bdefaa5
--- /dev/null
@@ -0,0 +1,42 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :js do %>
+$(function(){
+  $('button.login').button().click(function(){window.location=$(this).attr('href')});
+});
+<% end %>
+<div id="intropage">
+  <img class="curoverse-logo" src="<%= asset_path('logo.png') %>" style="display:block; margin:2em auto"/>
+  <div style="width:30em; margin:2em auto 0 auto">
+    <h1>Welcome</h1>
+    <h4>Curoverse ARVADOS</h4>
+
+    <% if !current_user and session['invite_code'] %>
+
+    <p>Curoverse Arvados lets you manage and process human genomes and exomes.  You can start using the private beta
+    now with your Google account.</p>
+    <p style="float:right;margin-top:1em">
+      <button class="login" href="/auth/joshid">Log in and get started</button>
+    </p>
+
+    <% else %>
+
+    <p>Curoverse ARVADOS is transforming how researchers and
+    clinical geneticists use whole genome sequences. </p>
+    <p>If you&rsquo;re interested in learning more, we&rsquo;d love to hear
+    from you &mdash;
+    contact <a href="mailto:arvados@curoverse.com">arvados@curoverse.com</a>.</p>
+
+    <% if !current_user %>
+    <p style="float:right;margin-top:1em">
+      <a href="/auth/joshid">Log in here.</a>
+    </p>
+    <% end %>
+
+    <% end %>
+
+    <div style="clear:both;height:8em"></div>
+  </div>
+</div>
diff --git a/services/api/app/views/static/login_failure.html.erb b/services/api/app/views/static/login_failure.html.erb
new file mode 100644 (file)
index 0000000..0f3141e
--- /dev/null
@@ -0,0 +1,26 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% content_for :js do %>
+$(function(){
+  $('button.login').button().click(function(){window.location=$(this).attr('href')});
+});
+<% end %>
+
+
+<div id="intropage">
+  <img class="curoverse-logo" src="<%= asset_path('logo.png') rescue '/logo.png' %>" style="display:block; margin:2em auto"/>
+  <div style="width:30em; margin:2em auto 0 auto">
+
+    <h1>Error</h1>
+
+    <p>Sorry, something went wrong logging you in. Please try again.</p>
+
+    <p style="float:right;margin-top:1em">
+      <a href="/auth/joshid">Log in here.</a>
+    </p>
+
+    <div style="clear:both;height:8em"></div>
+  </div>
+</div>
diff --git a/services/api/app/views/user_notifier/account_is_setup.text.erb b/services/api/app/views/user_notifier/account_is_setup.text.erb
new file mode 100644 (file)
index 0000000..ca70827
--- /dev/null
@@ -0,0 +1,18 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% if not @user.full_name.empty? -%>
+<%= @user.full_name %>,
+<% else -%>
+Hi there,
+<% end -%>
+
+Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.workbench_address %>at
+
+  <%= Rails.configuration.workbench_address %><%= "/" if !Rails.configuration.workbench_address.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
+
+for connection instructions.
+
+Thanks,
+The Arvados team.
diff --git a/services/api/app/views/user_sessions/failure.html.erb b/services/api/app/views/user_sessions/failure.html.erb
new file mode 100644 (file)
index 0000000..81c5be2
--- /dev/null
@@ -0,0 +1,10 @@
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<h1>Fail</h1>
+
+<%= notice %>
+
+<br/>
+<a href="/auth/joshid">Retry Login</a>
diff --git a/services/api/config.ru b/services/api/config.ru
new file mode 100644 (file)
index 0000000..30e8281
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file is used by Rack-based servers to start the application.
+
+require ::File.expand_path('../config/environment',  __FILE__)
+run Server::Application
diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml
new file mode 100644 (file)
index 0000000..8f0dbf4
--- /dev/null
@@ -0,0 +1,583 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create application.yml
+# instead (see application.yml.example).
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+common:
+  ###
+  ### Essential site configuration
+  ###
+
+  # The prefix used for all database identifiers to identify the record as
+  # originating from this site.  Must be exactly 5 alphanumeric characters
+  # (lowercase ASCII letters and digits).
+  uuid_prefix: ~
+
+  # secret_token is a string of alphanumeric characters used by Rails
+  # to sign session tokens. IMPORTANT: This is a site secret. It
+  # should be at least 50 characters.
+  secret_token: ~
+
+  # blob_signing_key is a string of alphanumeric characters used to
+  # generate permission signatures for Keep locators. It must be
+  # identical to the permission key given to Keep. IMPORTANT: This is
+  # a site secret. It should be at least 50 characters.
+  #
+  # Modifying blob_signing_key will invalidate all existing
+  # signatures, which can cause programs to fail (e.g., arv-put,
+  # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
+  # no such processes are running.
+  blob_signing_key: ~
+
+  # These settings are provided by your OAuth2 provider (e.g.,
+  # sso-provider).
+  sso_app_secret: ~
+  sso_app_id: ~
+  sso_provider_url: ~
+
+  # If this is not false, HTML requests at the API server's root URL
+  # are redirected to this location, and it is provided in the text of
+  # user activation notification email messages to remind them where
+  # to log in.
+  workbench_address: false
+
+  # Client-facing URI for websocket service. Nginx should be
+  # configured to proxy this URI to arvados-ws; see
+  # http://doc.arvados.org/install/install-ws.html
+  #
+  # If websocket_address is false (which is the default), no websocket
+  # server will be advertised to clients. This configuration is not
+  # supported.
+  #
+  # Example:
+  #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket
+  websocket_address: false
+
+  # Maximum number of websocket connections allowed
+  websocket_max_connections: 500
+
+  # Maximum number of events a single connection can be backlogged
+  websocket_max_notify_backlog: 1000
+
+  # Maximum number of subscriptions a single websocket connection can have
+  # active.
+  websocket_max_filters: 10
+
+  # Git repositories must be readable by api server, or you won't be
+  # able to submit crunch jobs. To pass the test suites, put a clone
+  # of the arvados tree in {git_repositories_dir}/arvados.git or
+  # {git_repositories_dir}/arvados/.git
+  git_repositories_dir: /var/lib/arvados/git/repositories
+
+  # This is a (bare) repository that stores commits used in jobs.  When a job
+  # runs, the source commits are first fetched into this repository, then this
+  # repository is used to deploy to compute nodes.  This should NOT be a
+  # subdirectory of {git_repositiories_dir}.
+  git_internal_dir: /var/lib/arvados/internal.git
+
+  # Default replication level for collections. This is used when a
+  # collection's replication_desired attribute is nil.
+  default_collection_replication: 2
+
+
+  ###
+  ### Overriding default advertised hostnames/URLs
+  ###
+
+  # If not false, this is the hostname, port, and protocol that will be used
+  # for root_url and advertised in the discovery document.  By default, use
+  # the default Rails logic for deciding on a hostname.
+  host: false
+  port: false
+  protocol: false
+
+  # Base part of SSH git clone url given with repository resources. If
+  # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
+  # used. If false, SSH clone URLs are not advertised. Include a
+  # trailing ":" or "/" if needed: it will not be added automatically.
+  git_repo_ssh_base: true
+
+  # Base part of HTTPS git clone urls given with repository
+  # resources. This is expected to be an arv-git-httpd service which
+  # accepts API tokens as HTTP-auth passwords. If true, the default
+  # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
+  # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
+  # if needed: it will not be added automatically.
+  git_repo_https_base: true
+
+
+  ###
+  ### New user and & email settings
+  ###
+
+  # Config parameters to automatically setup new users.  If enabled,
+  # this users will be able to self-activate.  Enable this if you want
+  # to run an open instance where anyone can create an account and use
+  # the system without requiring manual approval.
+  #
+  # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+  # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+  auto_setup_new_users: false
+  auto_setup_new_users_with_vm_uuid: false
+  auto_setup_new_users_with_repository: false
+  auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+  # When new_users_are_active is set to true, new users will be active
+  # immediately.  This skips the "self-activate" step which enforces
+  # user agreements.  Should only be enabled for development.
+  new_users_are_active: false
+
+  # The e-mail address of the user you would like to become marked as an admin
+  # user on their first login.
+  # In the default configuration, authentication happens through the Arvados SSO
+  # server, which uses OAuth2 against Google's servers, so in that case this
+  # should be an address associated with a Google account.
+  auto_admin_user: false
+
+  # If auto_admin_first_user is set to true, the first user to log in when no
+  # other admin users exist will automatically become an admin user.
+  auto_admin_first_user: false
+
+  # Email address to notify whenever a user creates a profile for the
+  # first time
+  user_profile_notification_address: false
+
+  admin_notifier_email_from: arvados@example.com
+  email_subject_prefix: "[ARVADOS] "
+  user_notifier_email_from: arvados@example.com
+  new_user_notification_recipients: [ ]
+  new_inactive_user_notification_recipients: [ ]
+
+
+  ###
+  ### Limits, timeouts and durations
+  ###
+
+  # Lifetime (in seconds) of blob permission signatures generated by
+  # the API server. This determines how long a client can take (after
+  # retrieving a collection record) to retrieve the collection data
+  # from Keep. If the client needs more time than that (assuming the
+  # collection still has the same content and the relevant user/token
+  # still has permission) the client can retrieve the collection again
+  # to get fresh signatures.
+  #
+  # This must be exactly equal to the -blob-signature-ttl flag used by
+  # keepstore servers.  Otherwise, reading data blocks and saving
+  # collections will fail with HTTP 403 permission errors.
+  #
+  # Modifying blob_signature_ttl invalidates existing signatures; see
+  # blob_signing_key note above.
+  #
+  # The default is 2 weeks.
+  blob_signature_ttl: 1209600
+
+  # Default lifetime for ephemeral collections: 2 weeks. This must not
+  # be less than blob_signature_ttl.
+  default_trash_lifetime: 1209600
+
+  # Interval (seconds) between trash sweeps. During a trash sweep,
+  # collections are marked as trash if their trash_at time has
+  # arrived, and deleted if their delete_at time has arrived.
+  trash_sweep_interval: 60
+
+  # Interval (seconds) between asynchronous permission view updates. Any
+  # permission-updating API called with the 'async' parameter schedules a an
+  # update on the permission view in the future, if not already scheduled.
+  async_permissions_update_interval: 20
+
+  # Maximum characters of (JSON-encoded) query parameters to include
+  # in each request log entry. When params exceed this size, they will
+  # be JSON-encoded, truncated to this size, and logged as
+  # params_truncated.
+  max_request_log_params_size: 2000
+
+  # Maximum size (in bytes) allowed for a single API request.  This
+  # limit is published in the discovery document for use by clients.
+  # Note: You must separately configure the upstream web server or
+  # proxy to actually enforce the desired maximum request size on the
+  # server side.
+  max_request_size: 134217728
+
+  # Limit the number of bytes read from the database during an index
+  # request (by retrieving and returning fewer rows than would
+  # normally be returned in a single response).
+  # Note 1: This setting never reduces the number of returned rows to
+  # zero, no matter how big the first data row is.
+  # Note 2: Currently, this is only checked against a specific set of
+  # columns that tend to get large (collections.manifest_text,
+  # containers.mounts, workflows.definition). Other fields (e.g.,
+  # "properties" hashes) are not counted against this limit.
+  max_index_database_read: 134217728
+
+  # Maximum number of items to return when responding to a APIs that
+  # can return partial result sets using limit and offset parameters
+  # (e.g., *.index, groups.contents). If a request specifies a "limit"
+  # parameter higher than this value, this value is used instead.
+  max_items_per_response: 1000
+
+  # When you run the db:delete_old_job_logs task, it will find jobs that
+  # have been finished for at least this many seconds, and delete their
+  # stderr logs from the logs table.
+  clean_job_log_rows_after: <%= 30.days %>
+
+  # When you run the db:delete_old_container_logs task, it will find
+  # containers that have been finished for at least this many seconds,
+  # and delete their stdout, stderr, arv-mount, crunch-run, and
+  # crunchstat logs from the logs table.
+  clean_container_log_rows_after: <%= 30.days %>
+
+  # Time to keep audit logs, in seconds. (An audit log is a row added
+  # to the "logs" table in the PostgreSQL database each time an
+  # Arvados object is created, modified, or deleted.)
+  #
+  # Currently, websocket event notifications rely on audit logs, so
+  # this should not be set lower than 600 (5 minutes).
+  max_audit_log_age: 1209600
+
+  # Maximum number of log rows to delete in a single SQL transaction.
+  #
+  # If max_audit_log_delete_batch is 0, log entries will never be
+  # deleted by Arvados. Cleanup can be done by an external process
+  # without affecting any Arvados system processes, as long as very
+  # recent (<5 minutes old) logs are not deleted.
+  #
+  # 100000 is a reasonable batch size for most sites.
+  max_audit_log_delete_batch: 0
+
+  # The maximum number of compute nodes that can be in use simultaneously
+  # If this limit is reduced, any existing nodes with slot number >= new limit
+  # will not be counted against the new limit. In other words, the new limit
+  # won't be strictly enforced until those nodes with higher slot numbers
+  # go down.
+  max_compute_nodes: 64
+
+  # These two settings control how frequently log events are flushed to the
+  # database.  Log lines are buffered until either crunch_log_bytes_per_event
+  # has been reached or crunch_log_seconds_between_events has elapsed since
+  # the last flush.
+  crunch_log_bytes_per_event: 4096
+  crunch_log_seconds_between_events: 1
+
+  # The sample period for throttling logs, in seconds.
+  crunch_log_throttle_period: 60
+
+  # Maximum number of bytes that job can log over crunch_log_throttle_period
+  # before being silenced until the end of the period.
+  crunch_log_throttle_bytes: 65536
+
+  # Maximum number of lines that job can log over crunch_log_throttle_period
+  # before being silenced until the end of the period.
+  crunch_log_throttle_lines: 1024
+
+  # Maximum bytes that may be logged by a single job.  Log bytes that are
+  # silenced by throttling are not counted against this total.
+  crunch_limit_log_bytes_per_job: 67108864
+
+  crunch_log_partial_line_throttle_period: 5
+
+  # Container logs are written to Keep and saved in a collection,
+  # which is updated periodically while the container runs.  This
+  # value sets the interval (given in seconds) between collection
+  # updates.
+  crunch_log_update_period: 1800
+
+  # The log collection is also updated when the specified amount of
+  # log data (given in bytes) is produced in less than one update
+  # period.
+  crunch_log_update_size: 33554432
+
+  # Attributes to suppress in events and audit logs.  Notably,
+  # specifying ["manifest_text"] here typically makes the database
+  # smaller and faster.
+  #
+  # Warning: Using any non-empty value here can have undesirable side
+  # effects for any client or component that relies on event logs.
+  # Use at your own risk.
+  unlogged_attributes: []
+
+  # API methods to disable. Disabled methods are not listed in the
+  # discovery document, and respond 404 to all requests.
+  # Example: ["jobs.create", "pipeline_instances.create"]
+  disable_api_methods: []
+
+  # Enable the legacy Jobs API.
+  # auto -- (default) enable the Jobs API only if it has been used before
+  #         (i.e., there are job records in the database)
+  # true -- enable the Jobs API despite lack of existing records.
+  # false -- disable the Jobs API despite presence of existing records.
+  enable_legacy_jobs_api: auto
+
+  ###
+  ### Crunch, DNS & compute node management
+  ###
+
+  # Preemptible instance support (e.g. AWS Spot Instances)
+  # When true, child containers will get created with the preemptible
+  # scheduling parameter parameter set.
+  preemptible_instances: false
+
+  # Docker image to be used when none found in runtime_constraints of a job
+  default_docker_image_for_jobs: false
+
+  # List of supported Docker Registry image formats that compute nodes
+  # are able to use. `arv keep docker` will error out if a user tries
+  # to store an image with an unsupported format. Use an empty array
+  # to skip the compatibility check (and display a warning message to
+  # that effect).
+  #
+  # Example for sites running docker < 1.10: ["v1"]
+  # Example for sites running docker >= 1.10: ["v2"]
+  # Example for disabling check: []
+  docker_image_formats: ["v2"]
+
+  # :none or :slurm_immediate
+  crunch_job_wrapper: :none
+
+  # username, or false = do not set uid when running jobs.
+  crunch_job_user: crunch
+
+  # The web service must be able to create/write this file, and
+  # crunch-job must be able to stat() it.
+  crunch_refresh_trigger: /tmp/crunch_refresh_trigger
+
+  # Path to dns server configuration directory
+  # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+  # files or touch restart.txt (see below).
+  dns_server_conf_dir: false
+
+  # Template file for the dns server host snippets. See
+  # unbound.template in this directory for an example. If false, do
+  # not write any config files.
+  dns_server_conf_template: false
+
+  # String to write to {dns_server_conf_dir}/restart.txt (with a
+  # trailing newline) after updating local data. If false, do not
+  # open or write the restart.txt file.
+  dns_server_reload_command: false
+
+  # Command to run after each DNS update. Template variables will be
+  # substituted; see the "unbound" example below. If false, do not run
+  # a command.
+  dns_server_update_command: false
+
+  ## Example for unbound:
+  #dns_server_conf_dir: /etc/unbound/conf.d
+  #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
+  ## ...plus one of the following two methods of reloading:
+  #dns_server_reload_command: unbound-control reload
+  #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
+
+  compute_node_domain: false
+  compute_node_nameservers:
+    - 192.168.1.1
+
+  # Hostname to assign to a compute node when it sends a "ping" and the
+  # hostname in its Node record is nil.
+  # During bootstrapping, the "ping" script is expected to notice the
+  # hostname given in the ping response, and update its unix hostname
+  # accordingly.
+  # If false, leave the hostname alone (this is appropriate if your compute
+  # nodes' hostnames are already assigned by some other mechanism).
+  #
+  # One way or another, the hostnames of your node records should agree
+  # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+  #
+  # Example for compute0000, compute0001, ....:
+  # assign_node_hostname: compute%<slot_number>04d
+  # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+  assign_node_hostname: compute%<slot_number>d
+
+
+  ###
+  ### Job and container reuse logic.
+  ###
+
+  # Include details about job reuse decisions in the server log. This
+  # causes additional database queries to run, so it should not be
+  # enabled unless you expect to examine the resulting logs for
+  # troubleshooting purposes.
+  log_reuse_decisions: false
+
+  # Control job reuse behavior when two completed jobs match the
+  # search criteria and have different outputs.
+  #
+  # If true, in case of a conflict, reuse the earliest job (this is
+  # similar to container reuse behavior).
+  #
+  # If false, in case of a conflict, do not reuse any completed job,
+  # but do reuse an already-running job if available (this is the
+  # original job reuse behavior, and is still the default).
+  reuse_job_if_outputs_differ: false
+
+  ###
+  ### Federation support.
+  ###
+
+  # You can enable use of this cluster by users who are authenticated
+  # by a remote Arvados site. Control which remote hosts are trusted
+  # to authenticate which user IDs by configuring remote_hosts,
+  # remote_hosts_via_dns, or both. The default configuration disables
+  # remote authentication.
+
+  # Map known prefixes to hosts. For example, if user IDs beginning
+  # with "zzzzz-" should be authenticated by the Arvados server at
+  # "zzzzz.example.com", use:
+  #
+  # remote_hosts:
+  #   zzzzz: zzzzz.example.com
+  remote_hosts: {}
+
+  # Use {prefix}.arvadosapi.com for any prefix not given in
+  # remote_hosts above.
+  remote_hosts_via_dns: false
+
+  # List of cluster prefixes.  These are "trusted" clusters, users
+  # from the clusters listed here will be automatically setup and
+  # activated.  This is separate from the settings
+  # auto_setup_new_users and new_users_are_active.
+  auto_activate_users_from: []
+
+  ###
+  ### Remaining assorted configuration options.
+  ###
+
+  arvados_theme: default
+
+  # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the
+  # Single Sign On (sso) server and remote Arvados sites.  Should only
+  # be enabled during development when the SSO server is using a
+  # self-signed cert.
+  sso_insecure: false
+
+  ## Set Time.zone default to the specified zone and make Active
+  ## Record auto-convert to this zone.  Run "rake -D time" for a list
+  ## of tasks for finding time zone names. Default is UTC.
+  #time_zone: Central Time (US & Canada)
+
+  ## Default encoding used in templates for Ruby 1.9.
+  encoding: utf-8
+
+  # Enable the asset pipeline
+  assets.enabled: true
+
+  # Version of your assets, change this if you want to expire all your assets
+  assets.version: "1.0"
+
+  # Allow clients to create collections by providing a manifest with
+  # unsigned data blob locators. IMPORTANT: This effectively disables
+  # access controls for data stored in Keep: a client who knows a hash
+  # can write a manifest that references the hash, pass it to
+  # collections.create (which will create a permission link), use
+  # collections.get to obtain a signature for that data locator, and
+  # use that signed locator to retrieve the data from Keep. Therefore,
+  # do not turn this on if your users expect to keep data private from
+  # one another!
+  permit_create_collection_with_unsigned_manifest: false
+
+  default_openid_prefix: https://www.google.com/accounts/o8/id
+
+  # Override the automatic version string. With the default value of
+  # false, the version string is read from git-commit.version in
+  # Rails.root (included in vendor packages) or determined by invoking
+  # "git log".
+  source_version: false
+
+  # Override the automatic package version string. With the default version of
+  # false, the package version is read from package-build.version in Rails.root
+  # (included in vendor packages).
+  package_version: false
+
+  # Default value for container_count_max for container requests.  This is the
+  # number of times Arvados will create a new container to satisfy a container
+  # request.  If a container is cancelled it will retry a new container if
+  # container_count < container_count_max on any container requests associated
+  # with the cancelled container.
+  container_count_max: 3
+
+  # Default value for keep_cache_ram of a container's runtime_constraints.
+  container_default_keep_cache_ram: 268435456
+
+  # Token to be included in all healthcheck requests. Disabled by default.
+  # Server expects request header of the format "Authorization: Bearer xxx"
+  ManagementToken: false
+
+  # URL of keep-web service.  Provides read/write access to collections via
+  # HTTP and WebDAV protocols.
+  #
+  # Example:
+  # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/
+  keep_web_service_url: false
+
+  # If true, enable collection versioning.
+  # When a collection's preserve_version field is true or the current version
+  # is older than the amount of seconds defined on preserve_version_if_idle,
+  # a snapshot of the collection's previous state is created and linked to
+  # the current collection.
+  collection_versioning: false
+  #   0 = auto-create a new version on every update.
+  #  -1 = never auto-create new versions.
+  # > 0 = auto-create a new version when older than the specified number of seconds.
+  preserve_version_if_idle: -1
+
+  # Number of times a container can be unlocked before being
+  # automatically cancelled.
+  max_container_dispatch_attempts: 5
+
+development:
+  force_ssl: false
+  cache_classes: false
+  whiny_nils: true
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_mailer.raise_delivery_errors: false
+  action_mailer.perform_deliveries: false
+  active_support.deprecation: :log
+  action_dispatch.best_standards_support: :builtin
+  active_record.auto_explain_threshold_in_seconds: 0.5
+  assets.compress: false
+  assets.debug: true
+
+production:
+  force_ssl: true
+  cache_classes: true
+  consider_all_requests_local: false
+  action_controller.perform_caching: true
+  serve_static_files: false
+  assets.compress: true
+  assets.compile: false
+  assets.digest: true
+
+test:
+  force_ssl: false
+  cache_classes: true
+  serve_static_files: true
+  static_cache_control: public, max-age=3600
+  whiny_nils: true
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_dispatch.show_exceptions: false
+  action_controller.allow_forgery_protection: false
+  action_mailer.delivery_method: :test
+  active_support.deprecation: :stderr
+  uuid_prefix: zzzzz
+  sso_app_id: arvados-server
+  sso_app_secret: <%= rand(2**512).to_s(36) %>
+  sso_provider_url: http://localhost:3002
+  secret_token: <%= rand(2**512).to_s(36) %>
+  blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
+  user_profile_notification_address: arvados@example.com
+  workbench_address: https://localhost:3001/
+  git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
+  git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
+  websocket_address: "wss://0.0.0.0:<%= ENV['ARVADOS_TEST_WSS_PORT'] %>/websocket"
+  trash_sweep_interval: -1
+  docker_image_formats: ["v1"]
diff --git a/services/api/config/application.rb b/services/api/config/application.rb
new file mode 100644 (file)
index 0000000..24fd618
--- /dev/null
@@ -0,0 +1,89 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require File.expand_path('../boot', __FILE__)
+
+require 'rails/all'
+require 'digest'
+
+module Kernel
+  def suppress_warnings
+    verbose_orig = $VERBOSE
+    begin
+      $VERBOSE = nil
+      yield
+    ensure
+      $VERBOSE = verbose_orig
+    end
+  end
+end
+
+if defined?(Bundler)
+  suppress_warnings do
+    # If you precompile assets before deploying to production, use this line
+    Bundler.require(*Rails.groups(:assets => %w(development test)))
+    # If you want your assets lazily compiled in production, use this line
+    # Bundler.require(:default, :assets, Rails.env)
+  end
+end
+
+module Server
+  class Application < Rails::Application
+    # The following is to avoid SafeYAML's warning message
+    SafeYAML::OPTIONS[:default_mode] = :safe
+
+    # Settings in config/environments/* take precedence over those specified here.
+    # Application configuration should go into files in config/initializers
+    # -- all .rb files in that directory are automatically loaded.
+
+    # Custom directories with classes and modules you want to be autoloadable.
+    # config.autoload_paths += %W(#{config.root}/extras)
+
+    # Only load the plugins named here, in the order given (default is alphabetical).
+    # :all can be used as a placeholder for all plugins not explicitly named.
+    # config.plugins = [ :exception_notification, :ssl_requirement, :all ]
+
+    # Activate observers that should always be running.
+    # config.active_record.observers = :cacher, :garbage_collector, :forum_observer
+    config.active_record.schema_format = :sql
+
+    # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
+    # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
+    # config.i18n.default_locale = :de
+
+    # Configure sensitive parameters which will be filtered from the log file.
+    config.filter_parameters += [:password]
+
+    # Load entire application at startup.
+    config.eager_load = true
+
+    config.active_record.raise_in_transactional_callbacks = true
+
+    config.active_support.test_order = :sorted
+
+    config.action_dispatch.perform_deep_munge = false
+
+    I18n.enforce_available_locales = false
+
+    # Before using the filesystem backend for Rails.cache, check
+    # whether we own the relevant directory. If we don't, using it is
+    # likely to either fail or (if we're root) pollute it and cause
+    # other processes to fail later.
+    default_cache_path = Rails.root.join('tmp', 'cache')
+    if not File.owned?(default_cache_path)
+      if File.exist?(default_cache_path)
+        why = "owner (uid=#{File::Stat.new(default_cache_path).uid}) " +
+          "is not me (uid=#{Process.euid})"
+      else
+        why = "does not exist"
+      end
+      STDERR.puts("Defaulting to memory cache, " +
+                  "because #{default_cache_path} #{why}")
+      config.cache_store = :memory_store
+    else
+      require Rails.root.join('lib/safer_file_store')
+      config.cache_store = ::SaferFileStore.new(default_cache_path)
+    end
+  end
+end
diff --git a/services/api/config/application.yml.example b/services/api/config/application.yml.example
new file mode 100644 (file)
index 0000000..6c6ff15
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+production:
+  # Mandatory site configuration.  See application.default.yml and
+  # http://http://doc.arvados.org/install/install-api-server.html#configure_application
+  # for more information.
+  uuid_prefix: ~
+  secret_token: ~
+  blob_signing_key: ~
+  sso_app_secret: ~
+  sso_app_id: ~
+  sso_provider_url: ~
+  workbench_address: ~
+  websocket_address: ~
+  #git_repositories_dir: ~
+  #git_internal_dir: ~
+
+development:
+  # Separate settings for development configuration.
+  uuid_prefix: ~
+  secret_token: ~
+  blob_signing_key: ~
+  sso_app_id: ~
+  sso_app_secret: ~
+  sso_provider_url: ~
+  workbench_address: ~
+  websocket_address: ~
+  #git_repositories_dir: ~
+  #git_internal_dir: ~
+
+test:
+  # Tests should be able to run without further configuration, but if you do
+  # want to change your local test configuration, this is where to do it.
+
+common:
+  # Settings in this section will be used in all environments
+  # (development, production, test) except when overridden in the
+  # environment-specific sections above.
diff --git a/services/api/config/boot.rb b/services/api/config/boot.rb
new file mode 100644 (file)
index 0000000..c70ab95
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'rubygems'
+
+# Set up gems listed in the Gemfile.
+ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
+
+require 'bundler/setup' if File.exist?(ENV['BUNDLE_GEMFILE'])
diff --git a/services/api/config/database.yml.example b/services/api/config/database.yml.example
new file mode 100644 (file)
index 0000000..8087688
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+development:
+  adapter: postgresql
+  template: template0
+  encoding: utf8
+  database: arvados_development
+  username: arvados
+  password: xxxxxxxx
+  host: localhost
+
+test:
+  adapter: postgresql
+  template: template0
+  encoding: utf8
+  database: arvados_test
+  username: arvados
+  password: xxxxxxxx
+  host: localhost
+
+production:
+  adapter: postgresql
+  template: template0
+  encoding: utf8
+  database: arvados_production
+  username: arvados
+  password: xxxxxxxx
+  host: localhost
+  # For the websockets server, prefer a larger database connection pool size since it
+  # multithreaded and can serve a large number of long-lived clients.  See also
+  # websocket_max_connections configuration option.
+  pool: 50
diff --git a/services/api/config/environment.rb b/services/api/config/environment.rb
new file mode 100644 (file)
index 0000000..e24eee0
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Load the rails application
+require File.expand_path('../application', __FILE__)
+require 'josh_id'
+
+# Initialize the rails application
+Server::Application.initialize!
diff --git a/services/api/config/environments/development.rb.example b/services/api/config/environments/development.rb.example
new file mode 100644 (file)
index 0000000..56a4ed6
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Server::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # In the development environment your application's code is reloaded on
+  # every request.  This slows down response time but is perfect for development
+  # since you don't have to restart the web server when you make code changes.
+  config.cache_classes = false
+
+  # Log error messages when you accidentally call methods on nil.
+  config.whiny_nils = true
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Don't care if the mailer can't send
+  config.action_mailer.raise_delivery_errors = false
+  config.action_mailer.perform_deliveries = false
+
+  # Print deprecation notices to the Rails logger
+  config.active_support.deprecation = :log
+
+  # Only use best-standards-support built into browsers
+  config.action_dispatch.best_standards_support = :builtin
+
+  # Log the query plan for queries taking more than this (works
+  # with SQLite, MySQL, and PostgreSQL)
+  config.active_record.auto_explain_threshold_in_seconds = 0.5
+
+  # Do not compress assets
+  config.assets.compress = false
+
+  # Expands the lines which load the assets
+  config.assets.debug = true
+
+  config.force_ssl = false
+
+end
diff --git a/services/api/config/environments/production.rb.example b/services/api/config/environments/production.rb.example
new file mode 100644 (file)
index 0000000..affb31d
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Server::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # Code is not reloaded between requests
+  config.cache_classes = true
+
+  # Full error reports are disabled and caching is turned on
+  config.consider_all_requests_local       = false
+  config.action_controller.perform_caching = true
+
+  # Disable Rails's static asset server (Apache or nginx will already do this)
+  config.serve_static_files = false
+
+  # Compress JavaScripts and CSS
+  config.assets.compress = true
+
+  # Don't fallback to assets pipeline if a precompiled asset is missed
+  config.assets.compile = false
+
+  # Generate digests for assets URLs
+  config.assets.digest = true
+
+  # Defaults to Rails.root.join("public/assets")
+  # config.assets.manifest = YOUR_PATH
+
+  # Specifies the header that your server uses for sending files
+  # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
+  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
+
+  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
+  # config.force_ssl = true
+
+  # See everything in the log (default is :info)
+  # config.log_level = :debug
+
+  # Use a different logger for distributed setups
+  # config.logger = SyslogLogger.new
+
+  # Use a different cache store in production
+  # config.cache_store = :mem_cache_store
+
+  # Enable serving of images, stylesheets, and JavaScripts from an asset server
+  # config.action_controller.asset_host = "http://assets.example.com"
+
+  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
+  # config.assets.precompile += %w( search.js )
+
+  # Disable delivery errors, bad email addresses will be ignored
+  # config.action_mailer.raise_delivery_errors = false
+  # config.action_mailer.perform_deliveries = true
+
+  # Enable threaded mode
+  # config.threadsafe!
+
+  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
+  # the I18n.default_locale when a translation can not be found)
+  config.i18n.fallbacks = true
+
+  # Send deprecation notices to registered listeners
+  config.active_support.deprecation = :notify
+
+  config.log_level = :info
+end
diff --git a/services/api/config/environments/test.rb.example b/services/api/config/environments/test.rb.example
new file mode 100644 (file)
index 0000000..5ceb8f8
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Server::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # The test environment is used exclusively to run your application's
+  # test suite.  You never need to work with it otherwise.  Remember that
+  # your test database is "scratch space" for the test suite and is wiped
+  # and recreated between test runs.  Don't rely on the data there!
+  config.cache_classes = true
+
+  # Configure static asset server for tests with Cache-Control for performance
+  config.serve_static_files = true
+  config.static_cache_control = "public, max-age=3600"
+
+  # Log error messages when you accidentally call methods on nil
+  config.whiny_nils = true
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Raise exceptions instead of rendering exception templates
+  config.action_dispatch.show_exceptions = false
+
+  # Disable request forgery protection in test environment
+  config.action_controller.allow_forgery_protection    = false
+
+  # Tell Action Mailer not to deliver emails to the real world.
+  # The :test delivery method accumulates sent emails in the
+  # ActionMailer::Base.deliveries array.
+  config.action_mailer.delivery_method = :test
+
+  # Use SQL instead of Active Record's schema dumper when creating the test database.
+  # This is necessary if your schema can't be completely dumped by the schema dumper,
+  # like if you have constraints or database-specific column types
+  # config.active_record.schema_format = :sql
+
+  # Print deprecation notices to the stderr
+  config.active_support.deprecation = :stderr
+
+  # No need for SSL while testing
+  config.force_ssl = false
+
+  # I18n likes to warn when this variable is not set
+  I18n.enforce_available_locales = true
+
+end
diff --git a/services/api/config/initializers/andand.rb b/services/api/config/initializers/andand.rb
new file mode 100644 (file)
index 0000000..2d41c4e
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'andand'
diff --git a/services/api/config/initializers/app_version.rb b/services/api/config/initializers/app_version.rb
new file mode 100644 (file)
index 0000000..bc3629f
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'app_version'
diff --git a/services/api/config/initializers/authorization.rb b/services/api/config/initializers/authorization.rb
new file mode 100644 (file)
index 0000000..ec80048
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Server::Application.configure do
+  config.middleware.delete ActionDispatch::RemoteIp
+  config.middleware.insert 0, ActionDispatch::RemoteIp
+  config.middleware.insert 1, ArvadosApiToken
+end
diff --git a/services/api/config/initializers/backtrace_silencers.rb b/services/api/config/initializers/backtrace_silencers.rb
new file mode 100644 (file)
index 0000000..b9c6bce
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.
+# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ }
+
+# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code.
+# Rails.backtrace_cleaner.remove_silencers!
diff --git a/services/api/config/initializers/common_api_template.rb b/services/api/config/initializers/common_api_template.rb
new file mode 100644 (file)
index 0000000..efc15f5
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'common_api_template'
diff --git a/services/api/config/initializers/current_api_client.rb b/services/api/config/initializers/current_api_client.rb
new file mode 100644 (file)
index 0000000..582051e
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'current_api_client'
diff --git a/services/api/config/initializers/db_current_time.rb b/services/api/config/initializers/db_current_time.rb
new file mode 100644 (file)
index 0000000..b983490
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'db_current_time'
diff --git a/services/api/config/initializers/eventbus.rb b/services/api/config/initializers/eventbus.rb
new file mode 100644 (file)
index 0000000..eb5561a
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+if ENV['ARVADOS_WEBSOCKETS']
+  Server::Application.configure do
+    Rails.logger.error "Built-in websocket server is disabled. See note (2017-03-23, e8cc0d7) at https://dev.arvados.org/projects/arvados/wiki/Upgrading_to_master"
+
+    class EventBusRemoved
+      def overloaded?
+        false
+      end
+      def on_connect ws
+        ws.on :open do |e|
+          EM::Timer.new 1 do
+            ws.send(SafeJSON.dump({status: 501, message: "Server misconfigured? see http://doc.arvados.org/install/install-ws.html"}))
+          end
+          EM::Timer.new 3 do
+            ws.close
+          end
+        end
+      end
+    end
+
+    config.middleware.insert_after(ArvadosApiToken, RackSocket, {
+                                     handler: EventBusRemoved,
+                                     mount: "/websocket",
+                                     websocket_only: (ENV['ARVADOS_WEBSOCKETS'] == "ws-only")
+                                   })
+  end
+end
diff --git a/services/api/config/initializers/fix_www_decode.rb b/services/api/config/initializers/fix_www_decode.rb
new file mode 100644 (file)
index 0000000..713d9f3
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module URI
+  if Gem::Version.new(RUBY_VERSION) < Gem::Version.new('2.2')
+    # Rack uses the standard library method URI.decode_www_form_component to
+    # process parameters.  This method first validates the string with a
+    # regular expression, and then decodes it using another regular expression.
+    # Ruby 2.1 and earlier has a bug is in the validation; the regular
+    # expression that is used generates many backtracking points, which results
+    # in exponential memory growth when matching large strings.  The fix is to
+    # monkey-patch the version of the method from Ruby 2.2 which checks that
+    # the string is not invalid instead of checking it is valid.
+    def self.decode_www_form_component(str, enc=Encoding::UTF_8)
+      raise ArgumentError, "invalid %-encoding (#{str})" if /%(?!\h\h)/ =~ str
+      str.b.gsub(/\+|%\h\h/, TBLDECWWWCOMP_).force_encoding(enc)
+    end
+  end
+end
diff --git a/services/api/config/initializers/inflections.rb b/services/api/config/initializers/inflections.rb
new file mode 100644 (file)
index 0000000..50bd0d5
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Add new inflection rules using the following format
+# (all these examples are active by default):
+# ActiveSupport::Inflector.inflections do |inflect|
+#   inflect.plural /^(ox)$/i, '\1en'
+#   inflect.singular /^(ox)en/i, '\1'
+#   inflect.irregular 'person', 'people'
+#   inflect.uncountable %w( fish sheep )
+# end
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.plural(/^([Ss]pecimen)$/i, '\1s')
+  inflect.singular(/^([Ss]pecimen)s?/i, '\1')
+  inflect.plural(/^([Hh]uman)$/i, '\1s')
+  inflect.singular(/^([Hh]uman)s?/i, '\1')
+end
diff --git a/services/api/config/initializers/kind_and_etag.rb b/services/api/config/initializers/kind_and_etag.rb
new file mode 100644 (file)
index 0000000..693cc8d
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'kind_and_etag'
diff --git a/services/api/config/initializers/legacy_jobs_api.rb b/services/api/config/initializers/legacy_jobs_api.rb
new file mode 100644 (file)
index 0000000..9ea6b28
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Config must be done before we  files; otherwise they
+# won't be able to use Rails.configuration.* to initialize their
+# classes.
+require_relative 'load_config.rb'
+
+require 'enable_jobs_api'
+
+Server::Application.configure do
+  if ActiveRecord::Base.connection.tables.include?('jobs')
+    check_enable_legacy_jobs_api
+  end
+end
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
new file mode 100644 (file)
index 0000000..16059ca
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+begin
+  # If secret_token.rb exists here, we need to load it first.
+  require_relative 'secret_token.rb'
+rescue LoadError
+  # Normally secret_token.rb is missing and the secret token is
+  # configured by application.yml (i.e., here!) instead.
+end
+
+if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and
+    not defined? WARNED_OMNIAUTH_CONFIG)
+  Rails.logger.warn <<-EOS
+DEPRECATED CONFIGURATION:
+ Please move your SSO provider config into config/application.yml
+ and delete config/initializers/omniauth.rb.
+EOS
+  # Real values will be copied from globals by omniauth_init.rb. For
+  # now, assign some strings so the generic *.yml config loader
+  # doesn't overwrite them or complain that they're missing.
+  Rails.configuration.sso_app_id = 'xxx'
+  Rails.configuration.sso_app_secret = 'xxx'
+  Rails.configuration.sso_provider_url = '//xxx'
+  WARNED_OMNIAUTH_CONFIG = true
+end
+
+$application_config = {}
+
+%w(application.default application).each do |cfgfile|
+  path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+  if File.exist? path
+    yaml = ERB.new(IO.read path).result(binding)
+    confs = YAML.load(yaml, deserialize_symbols: true)
+    # Ignore empty YAML file:
+    next if confs == false
+    $application_config.merge!(confs['common'] || {})
+    $application_config.merge!(confs[::Rails.env.to_s] || {})
+  end
+end
+
+Server::Application.configure do
+  nils = []
+  $application_config.each do |k, v|
+    # "foo.bar: baz" --> { config.foo.bar = baz }
+    cfg = config
+    ks = k.split '.'
+    k = ks.pop
+    ks.each do |kk|
+      cfg = cfg.send(kk)
+    end
+    if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
+      # Config must have been set already in environments/*.rb.
+      #
+      # After config files have been migrated, this mechanism should
+      # be deprecated, then removed.
+    elsif v.nil?
+      # Config variables are not allowed to be nil. Make a "naughty"
+      # list, and present it below.
+      nils << k
+    else
+      cfg.send "#{k}=", v
+    end
+  end
+  if !nils.empty?
+    raise <<EOS
+Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
+
+The following configuration settings must be specified in
+config/application.yml:
+* #{nils.join "\n* "}
+
+EOS
+  end
+  config.secret_key_base = config.secret_token
+end
diff --git a/services/api/config/initializers/lograge.rb b/services/api/config/initializers/lograge.rb
new file mode 100644 (file)
index 0000000..ef4e428
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'safe_json'
+
+Server::Application.configure do
+  config.lograge.enabled = true
+  config.lograge.formatter = Lograge::Formatters::Logstash.new
+  config.lograge.custom_options = lambda do |event|
+    payload = {
+      request_id: event.payload[:request_id],
+      client_ipaddr: event.payload[:client_ipaddr],
+      client_auth: event.payload[:client_auth],
+    }
+    exceptions = %w(controller action format id)
+    params = event.payload[:params].except(*exceptions)
+
+    # Omit secret_mounts field if supplied in create/update request
+    # body.
+    [
+      ['container', 'secret_mounts'],
+      ['container_request', 'secret_mounts'],
+    ].each do |resource, field|
+      if params[resource].is_a? Hash
+        params[resource] = params[resource].except(field)
+      end
+    end
+
+    # Redact new_user_token param in /arvados/v1/users/merge
+    # request. Log the auth UUID instead, if the token exists.
+    if params['new_user_token'].is_a? String
+      params['new_user_token_uuid'] =
+        ApiClientAuthorization.
+          where('api_token = ?', params['new_user_token']).
+          first.andand.uuid
+      params['new_user_token'] = '[...]'
+    end
+
+    params_s = SafeJSON.dump(params)
+    if params_s.length > Rails.configuration.max_request_log_params_size
+      payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
+    else
+      payload[:params] = params
+    end
+    payload
+  end
+end
diff --git a/services/api/config/initializers/mime_types.rb b/services/api/config/initializers/mime_types.rb
new file mode 100644 (file)
index 0000000..36683cc
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+# Add new mime types for use in respond_to blocks:
+# Mime::Type.register "text/richtext", :rtf
+# Mime::Type.register_alias "text/html", :iphone
diff --git a/services/api/config/initializers/net_http.rb b/services/api/config/initializers/net_http.rb
new file mode 100644 (file)
index 0000000..cda803e
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'net/http'
diff --git a/services/api/config/initializers/oj_mimic_json.rb b/services/api/config/initializers/oj_mimic_json.rb
new file mode 100644 (file)
index 0000000..ce2d40c
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'oj'
+
+Oj::Rails.set_encoder()
+Oj::Rails.set_decoder()
+Oj::Rails.optimize()
+Oj::Rails.mimic_JSON()
+
diff --git a/services/api/config/initializers/omniauth_init.rb b/services/api/config/initializers/omniauth_init.rb
new file mode 100644 (file)
index 0000000..b5e9894
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file is called omniauth_init.rb instead of omniauth.rb because
+# older versions had site configuration in omniauth.rb.
+#
+# It must come after omniauth.rb in (lexical) load order.
+
+if defined? CUSTOM_PROVIDER_URL
+  Rails.logger.warn "Copying omniauth from globals in legacy config file."
+  Rails.configuration.sso_app_id = APP_ID
+  Rails.configuration.sso_app_secret = APP_SECRET
+  Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL
+else
+  Rails.application.config.middleware.use OmniAuth::Builder do
+    provider(:josh_id,
+             Rails.configuration.sso_app_id,
+             Rails.configuration.sso_app_secret,
+             Rails.configuration.sso_provider_url)
+  end
+  OmniAuth.config.on_failure = StaticController.action(:login_failure)
+end
diff --git a/services/api/config/initializers/permit_all_parameters.rb b/services/api/config/initializers/permit_all_parameters.rb
new file mode 100644 (file)
index 0000000..1062884
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ActionController::Parameters.permit_all_parameters = true
diff --git a/services/api/config/initializers/preload_all_models.rb b/services/api/config/initializers/preload_all_models.rb
new file mode 100644 (file)
index 0000000..0ab2b03
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# See http://aaronvb.com/articles/37-rails-caching-and-undefined-class-module
+
+# Config must be done before we load model class files; otherwise they
+# won't be able to use Rails.configuration.* to initialize their
+# classes.
+require_relative 'load_config.rb'
+
+if Rails.env == 'development'
+  Dir.foreach("#{Rails.root}/app/models") do |model_file|
+    require_dependency model_file if model_file.match(/\.rb$/)
+  end
+end
diff --git a/services/api/config/initializers/schema_discovery_cache.rb b/services/api/config/initializers/schema_discovery_cache.rb
new file mode 100644 (file)
index 0000000..c2cb8de
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Delete the cached discovery document during startup. Otherwise we
+# might still serve an old discovery document after updating the
+# schema and restarting the server.
+
+Rails.cache.delete 'arvados_v1_rest_discovery'
diff --git a/services/api/config/initializers/session_store.rb b/services/api/config/initializers/session_store.rb
new file mode 100644 (file)
index 0000000..5e9290c
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+
+Server::Application.config.session_store :cookie_store, :key => '_server_session'
+
+# Use the database for sessions instead of the cookie-based default,
+# which shouldn't be used to store highly confidential information
+# (create the session table with "rails generate session_migration")
+# Server::Application.config.session_store :active_record_store
diff --git a/services/api/config/initializers/time_format.rb b/services/api/config/initializers/time_format.rb
new file mode 100644 (file)
index 0000000..78cabc8
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ActiveSupport::JSON::Encoding.time_precision = 9
+
+class ActiveSupport::TimeWithZone
+  remove_method :as_json
+  def as_json *args
+    strftime "%Y-%m-%dT%H:%M:%S.%NZ"
+  end
+end
+
+class Time
+  remove_method :as_json
+  def as_json *args
+    strftime "%Y-%m-%dT%H:%M:%S.%NZ"
+  end
+end
diff --git a/services/api/config/initializers/wrap_parameters.rb b/services/api/config/initializers/wrap_parameters.rb
new file mode 100644 (file)
index 0000000..9767777
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Be sure to restart your server when you modify this file.
+#
+# This file contains settings for ActionController::ParamsWrapper which
+# is enabled by default.
+
+# Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array.
+ActiveSupport.on_load(:action_controller) do
+  wrap_parameters :format => [:json]
+end
+
+# Disable root element in JSON by default.
+ActiveSupport.on_load(:active_record) do
+  self.include_root_in_json = false
+end
diff --git a/services/api/config/locales/en.yml b/services/api/config/locales/en.yml
new file mode 100644 (file)
index 0000000..e6a62cb
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Sample localization file for English. Add more files in this directory for other locales.
+# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+
+en:
+  hello: "Hello world"
diff --git a/services/api/config/routes.rb b/services/api/config/routes.rb
new file mode 100644 (file)
index 0000000..b54c3c5
--- /dev/null
@@ -0,0 +1,120 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Server::Application.routes.draw do
+  themes_for_rails
+
+  # OPTIONS requests are not allowed at routes that use cookies.
+  ['/auth/*a', '/login', '/logout'].each do |nono|
+    match nono, to: 'user_sessions#cross_origin_forbidden', via: 'OPTIONS'
+  end
+  # OPTIONS at discovery and API paths get an empty response with CORS headers.
+  match '/discovery/v1/*a', to: 'static#empty', via: 'OPTIONS'
+  match '/arvados/v1/*a', to: 'static#empty', via: 'OPTIONS'
+
+  namespace :arvados do
+    namespace :v1 do
+      resources :api_client_authorizations do
+        post 'create_system_auth', on: :collection
+        get 'current', on: :collection
+      end
+      resources :api_clients
+      resources :authorized_keys
+      resources :collections do
+        get 'provenance', on: :member
+        get 'used_by', on: :member
+        post 'trash', on: :member
+        post 'untrash', on: :member
+      end
+      resources :groups do
+        get 'contents', on: :collection
+        get 'contents', on: :member
+        get 'shared', on: :collection
+        post 'trash', on: :member
+        post 'untrash', on: :member
+      end
+      resources :humans
+      resources :job_tasks
+      resources :containers do
+        get 'auth', on: :member
+        post 'lock', on: :member
+        post 'unlock', on: :member
+        get 'secret_mounts', on: :member
+        get 'current', on: :collection
+      end
+      resources :container_requests
+      resources :jobs do
+        get 'queue', on: :collection
+        get 'queue_size', on: :collection
+        post 'cancel', on: :member
+        post 'lock', on: :member
+      end
+      resources :keep_disks do
+        post 'ping', on: :collection
+      end
+      resources :keep_services do
+        get 'accessible', on: :collection
+      end
+      resources :links
+      resources :logs
+      resources :nodes do
+        post 'ping', on: :member
+      end
+      resources :pipeline_instances do
+        post 'cancel', on: :member
+      end
+      resources :pipeline_templates
+      resources :workflows
+      resources :repositories do
+        get 'get_all_permissions', on: :collection
+      end
+      resources :specimens
+      resources :traits
+      resources :user_agreements do
+        get 'signatures', on: :collection
+        post 'sign', on: :collection
+      end
+      resources :users do
+        get 'current', on: :collection
+        get 'system', on: :collection
+        post 'activate', on: :member
+        post 'setup', on: :collection
+        post 'unsetup', on: :member
+        post 'update_uuid', on: :member
+        post 'merge', on: :collection
+      end
+      resources :virtual_machines do
+        get 'logins', on: :member
+        get 'get_all_logins', on: :collection
+      end
+      get '/permissions/:uuid', to: 'links#get_permissions'
+    end
+  end
+
+  if Rails.env == 'test'
+    post '/database/reset', to: 'database#reset'
+  end
+
+  # omniauth
+  match '/auth/:provider/callback', to: 'user_sessions#create', via: [:get, :post]
+  match '/auth/failure', to: 'user_sessions#failure', via: [:get, :post]
+  # not handled by omniauth provider -> 403 with no CORS headers.
+  get '/auth/*a', to: 'user_sessions#cross_origin_forbidden'
+
+  # Custom logout
+  match '/login', to: 'user_sessions#login', via: [:get, :post]
+  match '/logout', to: 'user_sessions#logout', via: [:get, :post]
+
+  match '/discovery/v1/apis/arvados/v1/rest', to: 'arvados/v1/schema#index', via: [:get, :post]
+
+  match '/static/login_failure', to: 'static#login_failure', as: :login_failure, via: [:get, :post]
+
+  match '/_health/ping', to: 'arvados/v1/healthcheck#ping', via: [:get]
+
+  # Send unroutable requests to an arbitrary controller
+  # (ends up at ApplicationController#render_not_found)
+  match '*a', to: 'static#render_not_found', via: [:get, :post, :put, :patch, :delete, :options]
+
+  root to: 'static#home'
+end
diff --git a/services/api/config/unbound.template b/services/api/config/unbound.template
new file mode 100644 (file)
index 0000000..0c67700
--- /dev/null
@@ -0,0 +1,4 @@
+  local-data: "%{hostname} IN A %{ip_address}"
+  local-data: "%{hostname}.%{uuid_prefix} IN A %{ip_address}"
+  local-data: "%{hostname}.%{uuid_prefix}.arvadosapi.com. IN A %{ip_address}"
+  local-data: "%{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com"
diff --git a/services/api/db/migrate/20121016005009_create_collections.rb b/services/api/db/migrate/20121016005009_create_collections.rb
new file mode 100644 (file)
index 0000000..79d7abe
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateCollections < ActiveRecord::Migration
+  def change
+    create_table :collections do |t|
+      t.string :locator
+      t.string :create_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :portable_data_hash
+      t.string :name
+      t.integer :redundancy
+      t.string :redundancy_confirmed_by_client
+      t.datetime :redundancy_confirmed_at
+      t.integer :redundancy_confirmed_as
+
+      t.timestamps
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130105203021_create_metadata.rb b/services/api/db/migrate/20130105203021_create_metadata.rb
new file mode 100644 (file)
index 0000000..63ddf72
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateMetadata < ActiveRecord::Migration
+  def change
+    create_table :metadata do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :target_uuid
+      t.string :target_kind
+      t.references :native_target, :polymorphic => true
+      t.string :metadatum_class
+      t.string :key
+      t.string :value
+      t.text :info # "unlimited length" in postgresql
+
+      t.timestamps
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130105224358_rename_metadata_class.rb b/services/api/db/migrate/20130105224358_rename_metadata_class.rb
new file mode 100644 (file)
index 0000000..dff192a
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameMetadataClass < ActiveRecord::Migration
+  def up
+    rename_column :metadata, :metadatum_class, :metadata_class
+  end
+
+  def down
+    rename_column :metadata, :metadata_class, :metadatum_class
+  end
+end
diff --git a/services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb b/services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb
new file mode 100644 (file)
index 0000000..7fba042
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameCollectionCreatedByClient < ActiveRecord::Migration
+  def up
+    rename_column :collections, :create_by_client, :created_by_client
+  end
+
+  def down
+    rename_column :collections, :created_by_client, :create_by_client
+  end
+end
diff --git a/services/api/db/migrate/20130107181109_add_uuid_to_collections.rb b/services/api/db/migrate/20130107181109_add_uuid_to_collections.rb
new file mode 100644 (file)
index 0000000..614599b
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddUuidToCollections < ActiveRecord::Migration
+  def change
+    add_column :collections, :uuid, :string
+  end
+end
diff --git a/services/api/db/migrate/20130107212832_create_nodes.rb b/services/api/db/migrate/20130107212832_create_nodes.rb
new file mode 100644 (file)
index 0000000..dd2ddb3
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateNodes < ActiveRecord::Migration
+  def up
+    create_table :nodes do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.integer :slot_number
+      t.string :hostname
+      t.string :domain
+      t.string :ip_address
+      t.datetime :first_ping_at
+      t.datetime :last_ping_at
+      t.text :info
+
+      t.timestamps
+    end
+    add_index :nodes, :uuid, :unique => true
+    add_index :nodes, :slot_number, :unique => true
+    add_index :nodes, :hostname, :unique => true
+  end
+  def down
+    drop_table :nodes
+  end
+end
diff --git a/services/api/db/migrate/20130109175700_create_pipelines.rb b/services/api/db/migrate/20130109175700_create_pipelines.rb
new file mode 100644 (file)
index 0000000..5a2d97e
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreatePipelines < ActiveRecord::Migration
+  def up
+    create_table :pipelines do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.text :components
+
+      t.timestamps
+    end
+    add_index :pipelines, :uuid, :unique => true
+  end
+  def down
+    drop_table :pipelines
+  end
+end
diff --git a/services/api/db/migrate/20130109220548_create_pipeline_invocations.rb b/services/api/db/migrate/20130109220548_create_pipeline_invocations.rb
new file mode 100644 (file)
index 0000000..bf65b50
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreatePipelineInvocations < ActiveRecord::Migration
+  def up
+    create_table :pipeline_invocations do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :pipeline_uuid
+      t.string :name
+      t.text :components
+      t.boolean :success, :null => true
+      t.boolean :active, :default => false
+
+      t.timestamps
+    end
+    add_index :pipeline_invocations, :uuid, :unique => true
+  end
+  def down
+    drop_table :pipeline_invocations
+  end
+end
diff --git a/services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb b/services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb
new file mode 100644 (file)
index 0000000..99eee7b
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddIndexToCollectionsAndMetadata < ActiveRecord::Migration
+  def up
+    add_index :collections, :uuid, :unique => true
+    add_index :metadata, :uuid, :unique => true
+  end
+  def down
+    remove_index :metadata, :uuid
+    remove_index :collections, :uuid
+  end
+end
diff --git a/services/api/db/migrate/20130116024233_create_specimens.rb b/services/api/db/migrate/20130116024233_create_specimens.rb
new file mode 100644 (file)
index 0000000..75a8ded
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateSpecimens < ActiveRecord::Migration
+  def up
+    create_table :specimens do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :material
+
+      t.timestamps
+    end
+    add_index :specimens, :uuid, :unique => true
+  end
+  def down
+    drop_table :specimens
+  end
+end
diff --git a/services/api/db/migrate/20130116215213_create_projects.rb b/services/api/db/migrate/20130116215213_create_projects.rb
new file mode 100644 (file)
index 0000000..032405a
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateProjects < ActiveRecord::Migration
+  def up
+    create_table :projects do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.text :description
+
+      t.timestamps
+    end
+    add_index :projects, :uuid, :unique => true
+  end
+  def down
+    drop_table :projects
+  end
+end
diff --git a/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb b/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb
new file mode 100644 (file)
index 0000000..dc4305d
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameMetadataAttributes < ActiveRecord::Migration
+  def up
+    rename_column :metadata, :target_kind, :tail_kind
+    rename_column :metadata, :target_uuid, :tail
+    rename_column :metadata, :value, :head
+    rename_column :metadata, :key, :name
+    add_column :metadata, :head_kind, :string
+    add_index :metadata, :head
+    add_index :metadata, :head_kind
+    add_index :metadata, :tail
+    add_index :metadata, :tail_kind
+    begin
+      Metadatum.where('head like ?', 'orvos#%').each do |m|
+        kind_uuid = m.head.match /^(orvos\#.*)\#([-0-9a-z]+)$/
+        if kind_uuid
+          m.update_attributes(head_kind: kind_uuid[1],
+                              head: kind_uuid[2])
+        end
+      end
+    rescue
+    end
+  end
+
+  def down
+    begin
+      Metadatum.where('head_kind is not null and head_kind <> ? and head is not null', '').each do |m|
+        m.update_attributes(head: m.head_kind + '#' + m.head)
+      end
+    rescue
+    end
+    remove_index :metadata, :tail_kind
+    remove_index :metadata, :tail
+    remove_index :metadata, :head_kind
+    remove_index :metadata, :head
+    rename_column :metadata, :name, :key
+    remove_column :metadata, :head_kind
+    rename_column :metadata, :head, :value
+    rename_column :metadata, :tail, :target_uuid
+    rename_column :metadata, :tail_kind, :target_kind
+  end
+end
diff --git a/services/api/db/migrate/20130122020042_create_users.rb b/services/api/db/migrate/20130122020042_create_users.rb
new file mode 100644 (file)
index 0000000..8a95768
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateUsers < ActiveRecord::Migration
+  def change
+    create_table :users do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :email
+      t.string :first_name
+      t.string :last_name
+      t.string :identity_url
+      t.boolean :is_admin
+      t.text :prefs
+
+      t.timestamps
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130122201442_create_logs.rb b/services/api/db/migrate/20130122201442_create_logs.rb
new file mode 100644 (file)
index 0000000..195becb
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateLogs < ActiveRecord::Migration
+  def up
+    create_table :logs do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.string :object_kind
+      t.string :object_uuid
+      t.datetime :event_at
+      t.string :event_type
+      t.text :summary
+      t.text :info
+
+      t.timestamps
+    end
+    add_index :logs, :uuid, :unique => true
+    add_index :logs, :object_kind
+    add_index :logs, :object_uuid
+    add_index :logs, :event_type
+    add_index :logs, :event_at
+    add_index :logs, :summary
+  end
+
+  def down
+    drop_table :logs  end
+end
diff --git a/services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb b/services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb
new file mode 100644 (file)
index 0000000..4e98c7e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddModifiedAtToLogs < ActiveRecord::Migration
+  def change
+    add_column :logs, :modified_at, :datetime
+  end
+end
diff --git a/services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb b/services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb
new file mode 100644 (file)
index 0000000..46d87c5
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddUuidIndexToUsers < ActiveRecord::Migration
+  def change
+    add_index :users, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130123180224_create_api_clients.rb b/services/api/db/migrate/20130123180224_create_api_clients.rb
new file mode 100644 (file)
index 0000000..326e09d
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateApiClients < ActiveRecord::Migration
+  def change
+    create_table :api_clients do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.string :url_prefix
+
+      t.timestamps
+    end
+    add_index :api_clients, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130123180228_create_api_client_authorizations.rb b/services/api/db/migrate/20130123180228_create_api_client_authorizations.rb
new file mode 100644 (file)
index 0000000..7a18109
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateApiClientAuthorizations < ActiveRecord::Migration
+  def change
+    create_table :api_client_authorizations do |t|
+      t.string :api_token, :null => false
+      t.references :api_client, :null => false
+      t.references :user, :null => false
+      t.string :created_by_ip_address
+      t.string :last_used_by_ip_address
+      t.datetime :last_used_at
+      t.datetime :expires_at
+
+      t.timestamps
+    end
+    add_index :api_client_authorizations, :api_token, :unique => true
+    add_index :api_client_authorizations, :api_client_id
+    add_index :api_client_authorizations, :user_id
+    add_index :api_client_authorizations, :expires_at
+  end
+end
diff --git a/services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb b/services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb
new file mode 100644 (file)
index 0000000..9a31222
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameCreatedByToOwner < ActiveRecord::Migration
+  def tables
+    %w{api_clients collections logs metadata nodes pipelines pipeline_invocations projects specimens users}
+  end
+
+  def up
+    tables.each do |t|
+      remove_column t.to_sym, :created_by_client
+      rename_column t.to_sym, :created_by_user, :owner
+    end
+  end
+
+  def down
+    tables.reverse.each do |t|
+      rename_column t.to_sym, :owner, :created_by_user
+      add_column t.to_sym, :created_by_client, :string
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130128202518_rename_metadata_to_links.rb b/services/api/db/migrate/20130128202518_rename_metadata_to_links.rb
new file mode 100644 (file)
index 0000000..f3e6b45
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameMetadataToLinks < ActiveRecord::Migration
+  def up
+    rename_table :metadata, :links
+    rename_column :links, :tail, :tail_uuid
+    rename_column :links, :head, :head_uuid
+    rename_column :links, :info, :properties
+    rename_column :links, :metadata_class, :link_class
+    rename_index :links, :index_metadata_on_head_kind, :index_links_on_head_kind
+    rename_index :links, :index_metadata_on_head, :index_links_on_head_uuid
+    rename_index :links, :index_metadata_on_tail_kind, :index_links_on_tail_kind
+    rename_index :links, :index_metadata_on_tail, :index_links_on_tail_uuid
+    rename_index :links, :index_metadata_on_uuid, :index_links_on_uuid
+  end
+
+  def down
+    rename_index :links, :index_links_on_uuid, :index_metadata_on_uuid
+    rename_index :links, :index_links_on_head_kind, :index_metadata_on_head_kind
+    rename_index :links, :index_links_on_head_uuid, :index_metadata_on_head
+    rename_index :links, :index_links_on_tail_kind, :index_metadata_on_tail_kind
+    rename_index :links, :index_links_on_tail_uuid, :index_metadata_on_tail
+    rename_column :links, :link_class, :metadata_class
+    rename_column :links, :properties, :info
+    rename_column :links, :head_uuid, :head
+    rename_column :links, :tail_uuid, :tail
+    rename_table :links, :metadata
+  end
+end
diff --git a/services/api/db/migrate/20130128231343_add_properties_to_specimen.rb b/services/api/db/migrate/20130128231343_add_properties_to_specimen.rb
new file mode 100644 (file)
index 0000000..78ba71e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPropertiesToSpecimen < ActiveRecord::Migration
+  def change
+    add_column :specimens, :properties, :text
+  end
+end
diff --git a/services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb b/services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb
new file mode 100644 (file)
index 0000000..ca2441a
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddManifestTextToCollection < ActiveRecord::Migration
+  def change
+    add_column :collections, :manifest_text, :text
+  end
+end
diff --git a/services/api/db/migrate/20130203104818_create_jobs.rb b/services/api/db/migrate/20130203104818_create_jobs.rb
new file mode 100644 (file)
index 0000000..ab06956
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateJobs < ActiveRecord::Migration
+  def change
+    create_table :jobs do |t|
+      t.string :uuid
+      t.string :owner
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :submit_id
+      t.string :command
+      t.string :command_version
+      t.text :command_parameters
+      t.string :cancelled_by_client
+      t.string :cancelled_by_user
+      t.datetime :cancelled_at
+      t.datetime :started_at
+      t.datetime :finished_at
+      t.boolean :running
+      t.boolean :success
+      t.string :output
+
+      t.timestamps
+    end
+    add_index :jobs, :uuid, :unique => true
+    add_index :jobs, :submit_id, :unique => true
+    add_index :jobs, :command
+    add_index :jobs, :finished_at
+    add_index :jobs, :started_at
+    add_index :jobs, :output
+  end
+end
diff --git a/services/api/db/migrate/20130203104824_create_job_steps.rb b/services/api/db/migrate/20130203104824_create_job_steps.rb
new file mode 100644 (file)
index 0000000..37c821c
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateJobSteps < ActiveRecord::Migration
+  def change
+    create_table :job_steps do |t|
+      t.string :uuid
+      t.string :owner
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :job_uuid
+      t.integer :sequence
+      t.text :parameters
+      t.text :output
+      t.float :progress
+      t.boolean :success
+
+      t.timestamps
+    end
+    add_index :job_steps, :uuid, :unique => true
+    add_index :job_steps, :job_uuid
+    add_index :job_steps, :sequence
+    add_index :job_steps, :success
+  end
+end
diff --git a/services/api/db/migrate/20130203115329_add_priority_to_jobs.rb b/services/api/db/migrate/20130203115329_add_priority_to_jobs.rb
new file mode 100644 (file)
index 0000000..a92e92b
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPriorityToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :priority, :string
+  end
+end
diff --git a/services/api/db/migrate/20130207195855_add_index_on_timestamps.rb b/services/api/db/migrate/20130207195855_add_index_on_timestamps.rb
new file mode 100644 (file)
index 0000000..0a0154d
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddIndexOnTimestamps < ActiveRecord::Migration
+  def tables
+    %w{api_clients collections jobs job_steps links logs nodes pipeline_invocations pipelines projects specimens users}
+  end
+
+  def change
+    tables.each do |t|
+      add_index t.to_sym, :created_at
+      add_index t.to_sym, :modified_at
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb b/services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb
new file mode 100644 (file)
index 0000000..453e134
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPropertiesToPipelineInvocations < ActiveRecord::Migration
+  def change
+    add_column :pipeline_invocations, :properties, :text
+  end
+end
diff --git a/services/api/db/migrate/20130226170000_remove_native_target_from_links.rb b/services/api/db/migrate/20130226170000_remove_native_target_from_links.rb
new file mode 100644 (file)
index 0000000..4cecd0e
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RemoveNativeTargetFromLinks < ActiveRecord::Migration
+  def up
+    remove_column :links, :native_target_id
+    remove_column :links, :native_target_type
+  end
+  def down
+    add_column :links, :native_target_id, :integer
+    add_column :links, :native_target_type, :string
+  end
+end
diff --git a/services/api/db/migrate/20130313175417_rename_projects_to_groups.rb b/services/api/db/migrate/20130313175417_rename_projects_to_groups.rb
new file mode 100644 (file)
index 0000000..602c8b4
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameProjectsToGroups < ActiveRecord::Migration
+  def up
+    rename_table :projects, :groups
+    rename_index :groups, :index_projects_on_created_at, :index_groups_on_created_at
+    rename_index :groups, :index_projects_on_modified_at, :index_groups_on_modified_at
+    rename_index :groups, :index_projects_on_uuid, :index_groups_on_uuid
+    Link.update_all({head_kind:'orvos#group'}, ['head_kind=?','orvos#project'])
+    Link.update_all({tail_kind:'orvos#group'}, ['tail_kind=?','orvos#project'])
+    Log.update_all({object_kind:'orvos#group'}, ['object_kind=?','orvos#project'])
+  end
+
+  def down
+    Log.update_all({object_kind:'orvos#project'}, ['object_kind=?','orvos#group'])
+    Link.update_all({tail_kind:'orvos#project'}, ['tail_kind=?','orvos#group'])
+    Link.update_all({head_kind:'orvos#project'}, ['head_kind=?','orvos#group'])
+    rename_index :groups, :index_groups_on_created_at, :index_projects_on_created_at
+    rename_index :groups, :index_groups_on_modified_at, :index_projects_on_modified_at
+    rename_index :groups, :index_groups_on_uuid, :index_projects_on_uuid
+    rename_table :groups, :projects
+  end
+end
diff --git a/services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb b/services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb
new file mode 100644 (file)
index 0000000..8ea3657
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddIsLockedByToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :is_locked_by, :string
+  end
+end
diff --git a/services/api/db/migrate/20130315183626_add_log_to_jobs.rb b/services/api/db/migrate/20130315183626_add_log_to_jobs.rb
new file mode 100644 (file)
index 0000000..aa30165
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddLogToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :log, :string
+  end
+end
diff --git a/services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb b/services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb
new file mode 100644 (file)
index 0000000..9a64fea
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddTasksSummaryToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :tasks_summary, :text
+  end
+end
diff --git a/services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb b/services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb
new file mode 100644 (file)
index 0000000..aae3ff2
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddResourceLimitsToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :resource_limits, :text
+  end
+end
diff --git a/services/api/db/migrate/20130319165853_rename_job_command_to_script.rb b/services/api/db/migrate/20130319165853_rename_job_command_to_script.rb
new file mode 100644 (file)
index 0000000..8606698
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameJobCommandToScript < ActiveRecord::Migration
+  def up
+    rename_column :jobs, :command, :script
+    rename_column :jobs, :command_parameters, :script_parameters
+    rename_column :jobs, :command_version, :script_version
+    rename_index :jobs, :index_jobs_on_command, :index_jobs_on_script
+  end
+
+  def down
+    rename_index :jobs, :index_jobs_on_script, :index_jobs_on_command
+    rename_column :jobs, :script_version, :command_version
+    rename_column :jobs, :script_parameters, :command_parameters
+    rename_column :jobs, :script, :command
+  end
+end
diff --git a/services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb b/services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb
new file mode 100644 (file)
index 0000000..61cc277
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenamePipelineInvocationToPipelineInstance < ActiveRecord::Migration
+  def up
+    rename_table :pipeline_invocations, :pipeline_instances
+    rename_index :pipeline_instances, :index_pipeline_invocations_on_created_at, :index_pipeline_instances_on_created_at
+    rename_index :pipeline_instances, :index_pipeline_invocations_on_modified_at, :index_pipeline_instances_on_modified_at
+    rename_index :pipeline_instances, :index_pipeline_invocations_on_uuid, :index_pipeline_instances_on_uuid
+    Link.update_all({head_kind:'orvos#pipeline_instance'}, ['head_kind=?','orvos#pipeline_invocation'])
+    Link.update_all({tail_kind:'orvos#pipeline_instance'}, ['tail_kind=?','orvos#pipeline_invocation'])
+    Log.update_all({object_kind:'orvos#pipeline_instance'}, ['object_kind=?','orvos#pipeline_invocation'])
+  end
+
+  def down
+    Link.update_all({head_kind:'orvos#pipeline_invocation'}, ['head_kind=?','orvos#pipeline_instance'])
+    Link.update_all({tail_kind:'orvos#pipeline_invocation'}, ['tail_kind=?','orvos#pipeline_instance'])
+    Log.update_all({object_kind:'orvos#pipeline_invocation'}, ['object_kind=?','orvos#pipeline_instance'])
+    rename_index :pipeline_instances, :index_pipeline_instances_on_created_at, :index_pipeline_invocations_on_created_at
+    rename_index :pipeline_instances, :index_pipeline_instances_on_modified_at, :index_pipeline_invocations_on_modified_at
+    rename_index :pipeline_instances, :index_pipeline_instances_on_uuid, :index_pipeline_invocations_on_uuid
+    rename_table :pipeline_instances, :pipeline_invocations
+  end
+end
diff --git a/services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb b/services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb
new file mode 100644 (file)
index 0000000..012a86a
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenamePipelinesToPipelineTemplates < ActiveRecord::Migration
+  def up
+    rename_column :pipeline_instances, :pipeline_uuid, :pipeline_template_uuid
+    rename_table :pipelines, :pipeline_templates
+    rename_index :pipeline_templates, :index_pipelines_on_created_at, :index_pipeline_templates_on_created_at
+    rename_index :pipeline_templates, :index_pipelines_on_modified_at, :index_pipeline_templates_on_modified_at
+    rename_index :pipeline_templates, :index_pipelines_on_uuid, :index_pipeline_templates_on_uuid
+    Link.update_all({head_kind:'orvos#pipeline'}, ['head_kind=?','orvos#pipeline_template'])
+    Link.update_all({tail_kind:'orvos#pipeline'}, ['tail_kind=?','orvos#pipeline_template'])
+    Log.update_all({object_kind:'orvos#pipeline'}, ['object_kind=?','orvos#pipeline_template'])
+  end
+
+  def down
+    Link.update_all({head_kind:'orvos#pipeline_template'}, ['head_kind=?','orvos#pipeline'])
+    Link.update_all({tail_kind:'orvos#pipeline_template'}, ['tail_kind=?','orvos#pipeline'])
+    Log.update_all({object_kind:'orvos#pipeline_template'}, ['object_kind=?','orvos#pipeline'])
+    rename_index :pipeline_templates, :index_pipeline_templates_on_created_at, :index_pipelines_on_created_at
+    rename_index :pipeline_templates, :index_pipeline_templates_on_modified_at, :index_pipelines_on_modified_at
+    rename_index :pipeline_templates, :index_pipeline_templates_on_uuid, :index_pipelines_on_uuid
+    rename_table :pipeline_templates, :pipelines
+    rename_column :pipeline_instances, :pipeline_template_uuid, :pipeline_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb b/services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..05d9ffb
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameJobStepsToJobTasks < ActiveRecord::Migration
+  def up
+    rename_table :job_steps, :job_tasks
+    rename_index :job_tasks, :index_job_steps_on_created_at, :index_job_tasks_on_created_at
+    rename_index :job_tasks, :index_job_steps_on_job_uuid, :index_job_tasks_on_job_uuid
+    rename_index :job_tasks, :index_job_steps_on_modified_at, :index_job_tasks_on_modified_at
+    rename_index :job_tasks, :index_job_steps_on_sequence, :index_job_tasks_on_sequence
+    rename_index :job_tasks, :index_job_steps_on_success, :index_job_tasks_on_success
+    rename_index :job_tasks, :index_job_steps_on_uuid, :index_job_tasks_on_uuid
+  end
+
+  def down
+    rename_index :job_steps, :index_job_tasks_on_created_at, :index_job_steps_on_created_at
+    rename_index :job_steps, :index_job_tasks_on_job_uuid, :index_job_steps_on_job_uuid
+    rename_index :job_steps, :index_job_tasks_on_modified_at, :index_job_steps_on_modified_at
+    rename_index :job_steps, :index_job_tasks_on_sequence, :index_job_steps_on_sequence
+    rename_index :job_steps, :index_job_tasks_on_success, :index_job_steps_on_success
+    rename_index :job_steps, :index_job_tasks_on_uuid, :index_job_steps_on_uuid
+    rename_table :job_tasks, :job_steps
+  end
+end
diff --git a/services/api/db/migrate/20130319235957_add_default_owner_to_users.rb b/services/api/db/migrate/20130319235957_add_default_owner_to_users.rb
new file mode 100644 (file)
index 0000000..935ce46
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddDefaultOwnerToUsers < ActiveRecord::Migration
+  def change
+    add_column :users, :default_owner, :string
+  end
+end
diff --git a/services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb b/services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb
new file mode 100644 (file)
index 0000000..0f2b18f
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddDefaultOwnerToApiClientAuthorizations < ActiveRecord::Migration
+  def change
+    add_column :api_client_authorizations, :default_owner, :string
+  end
+end
diff --git a/services/api/db/migrate/20130326173804_create_commits.rb b/services/api/db/migrate/20130326173804_create_commits.rb
new file mode 100644 (file)
index 0000000..972a61f
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateCommits < ActiveRecord::Migration
+  def change
+    create_table :commits do |t|
+      t.string :repository_name
+      t.string :sha1
+      t.string :message
+
+      t.timestamps
+    end
+    add_index :commits, [:repository_name, :sha1], :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130326182917_create_commit_ancestors.rb b/services/api/db/migrate/20130326182917_create_commit_ancestors.rb
new file mode 100644 (file)
index 0000000..d63b2a6
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateCommitAncestors < ActiveRecord::Migration
+  def change
+    create_table :commit_ancestors do |t|
+      t.string :repository_name
+      t.string :descendant, :null => false
+      t.string :ancestor, :null => false
+      t.boolean :is, :default => false, :null => false
+
+      t.timestamps
+    end
+    add_index :commit_ancestors, [:descendant, :ancestor], :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb b/services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb
new file mode 100644 (file)
index 0000000..ed66bc7
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameOrvosToArvados < ActiveRecord::Migration
+  def up
+    Link.update_all("head_kind=replace(head_kind,'orvos','arvados')")
+    Link.update_all("tail_kind=replace(tail_kind,'orvos','arvados')")
+    Log.update_all("object_kind=replace(object_kind,'orvos','arvados')")
+  end
+
+  def down
+    Link.update_all("head_kind=replace(head_kind,'arvados','orvos')")
+    Link.update_all("tail_kind=replace(tail_kind,'arvados','orvos')")
+    Log.update_all("object_kind=replace(object_kind,'arvados','orvos')")
+  end
+end
diff --git a/services/api/db/migrate/20130425024459_create_keep_disks.rb b/services/api/db/migrate/20130425024459_create_keep_disks.rb
new file mode 100644 (file)
index 0000000..79c33db
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateKeepDisks < ActiveRecord::Migration
+  def change
+    create_table :keep_disks do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :ping_secret, :null => false
+      t.string :node_uuid
+      t.string :filesystem_uuid
+      t.integer :bytes_total
+      t.integer :bytes_free
+      t.boolean :is_readable, :null => false, :default => true
+      t.boolean :is_writable, :null => false, :default => true
+      t.datetime :last_read_at
+      t.datetime :last_write_at
+      t.datetime :last_ping_at
+
+      t.timestamps
+    end
+    add_index :keep_disks, :uuid, :unique => true
+    add_index :keep_disks, :filesystem_uuid
+    add_index :keep_disks, :node_uuid
+    add_index :keep_disks, :last_ping_at
+  end
+end
diff --git a/services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb b/services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb
new file mode 100644 (file)
index 0000000..5a81512
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddServiceHostAndServicePortAndServiceSslFlagToKeepDisks < ActiveRecord::Migration
+  def change
+    add_column :keep_disks, :service_host, :string
+    add_column :keep_disks, :service_port, :integer
+    add_column :keep_disks, :service_ssl_flag, :boolean
+    add_index :keep_disks, [:service_host, :service_port, :last_ping_at],
+      name: 'keep_disks_service_host_port_ping_at_index'
+  end
+end
diff --git a/services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb b/services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..2f10e1c
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddCreatedByJobTaskToJobTasks < ActiveRecord::Migration
+  def change
+    add_column :job_tasks, :created_by_job_task, :string
+  end
+end
diff --git a/services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb b/services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..84a2df2
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddQsequenceToJobTasks < ActiveRecord::Migration
+  def change
+    add_column :job_tasks, :qsequence, :integer
+  end
+end
diff --git a/services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb b/services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb
new file mode 100644 (file)
index 0000000..91652f5
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FixJobTaskQsequenceType < ActiveRecord::Migration
+  def up
+    change_column :job_tasks, :qsequence, :integer, :limit => 8
+  end
+
+  def down
+    change_column :job_tasks, :qsequence, :integer
+  end
+end
diff --git a/services/api/db/migrate/20130528134100_update_nodes_index.rb b/services/api/db/migrate/20130528134100_update_nodes_index.rb
new file mode 100644 (file)
index 0000000..94ea67d
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class UpdateNodesIndex < ActiveRecord::Migration
+  def up
+    remove_index :nodes, :hostname
+    add_index :nodes, :hostname
+  end
+  def down
+    remove_index :nodes, :hostname
+    add_index :nodes, :hostname, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130606183519_create_authorized_keys.rb b/services/api/db/migrate/20130606183519_create_authorized_keys.rb
new file mode 100644 (file)
index 0000000..daeabb3
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateAuthorizedKeys < ActiveRecord::Migration
+  def change
+    create_table :authorized_keys do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.string :key_type
+      t.string :authorized_user
+      t.text :public_key
+      t.datetime :expires_at
+
+      t.timestamps
+    end
+    add_index :authorized_keys, :uuid, :unique => true
+    add_index :authorized_keys, [:authorized_user, :expires_at]
+  end
+end
diff --git a/services/api/db/migrate/20130608053730_create_virtual_machines.rb b/services/api/db/migrate/20130608053730_create_virtual_machines.rb
new file mode 100644 (file)
index 0000000..97ecb73
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateVirtualMachines < ActiveRecord::Migration
+  def change
+    create_table :virtual_machines do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :hostname
+
+      t.timestamps
+    end
+    add_index :virtual_machines, :uuid, :unique => true
+    add_index :virtual_machines, :hostname
+  end
+end
diff --git a/services/api/db/migrate/20130610202538_create_repositories.rb b/services/api/db/migrate/20130610202538_create_repositories.rb
new file mode 100644 (file)
index 0000000..6e8c947
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateRepositories < ActiveRecord::Migration
+  def change
+    create_table :repositories do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.string :fetch_url
+      t.string :push_url
+
+      t.timestamps
+    end
+    add_index :repositories, :uuid, :unique => true
+    add_index :repositories, :name
+  end
+end
diff --git a/services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb b/services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb
new file mode 100644 (file)
index 0000000..1e5f6cc
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameAuthorizedKeyAuthorizedUserToAuthorizedUserUuid < ActiveRecord::Migration
+  def up
+    remove_index :authorized_keys, [:authorized_user, :expires_at]
+    rename_column :authorized_keys, :authorized_user, :authorized_user_uuid
+    add_index :authorized_keys, [:authorized_user_uuid, :expires_at]
+  end
+
+  def down
+    remove_index :authorized_keys, [:authorized_user_uuid, :expires_at]
+    rename_column :authorized_keys, :authorized_user_uuid, :authorized_user
+    add_index :authorized_keys, [:authorized_user, :expires_at]
+  end
+end
diff --git a/services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb b/services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb
new file mode 100644 (file)
index 0000000..bb5767d
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddNameUniqueIndexToRepositories < ActiveRecord::Migration
+  def up
+    remove_index :repositories, :name
+    add_index :repositories, :name, :unique => true
+  end
+
+  def down
+    remove_index :repositories, :name
+    add_index :repositories, :name
+  end
+end
diff --git a/services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb b/services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb
new file mode 100644 (file)
index 0000000..e74acbc
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddIsTrustedToApiClients < ActiveRecord::Migration
+  def change
+    add_column :api_clients, :is_trusted, :boolean, :default => false
+  end
+end
diff --git a/services/api/db/migrate/20130626002829_add_is_active_to_users.rb b/services/api/db/migrate/20130626002829_add_is_active_to_users.rb
new file mode 100644 (file)
index 0000000..754ae6a
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddIsActiveToUsers < ActiveRecord::Migration
+  def change
+    add_column :users, :is_active, :boolean, :default => false
+  end
+end
diff --git a/services/api/db/migrate/20130626022810_activate_all_admins.rb b/services/api/db/migrate/20130626022810_activate_all_admins.rb
new file mode 100644 (file)
index 0000000..9986a30
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ActivateAllAdmins < ActiveRecord::Migration
+  def up
+    User.update_all({is_active: true}, ['is_admin=?', true])
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20130627154537_create_traits.rb b/services/api/db/migrate/20130627154537_create_traits.rb
new file mode 100644 (file)
index 0000000..6dc677d
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateTraits < ActiveRecord::Migration
+  def change
+    create_table :traits do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.text :properties
+
+      t.timestamps
+    end
+    add_index :traits, :uuid, :unique => true
+    add_index :traits, :name
+  end
+end
diff --git a/services/api/db/migrate/20130627184333_create_humans.rb b/services/api/db/migrate/20130627184333_create_humans.rb
new file mode 100644 (file)
index 0000000..4d8e1d2
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateHumans < ActiveRecord::Migration
+  def change
+    create_table :humans do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.text :properties
+
+      t.timestamps
+    end
+    add_index :humans, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb b/services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb
new file mode 100644 (file)
index 0000000..fa179da
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameForeignUuidAttributes < ActiveRecord::Migration
+  def change
+    rename_column :api_client_authorizations, :default_owner, :default_owner_uuid
+    [:api_clients, :authorized_keys, :collections,
+     :groups, :humans, :job_tasks, :jobs, :keep_disks,
+     :links, :logs, :nodes, :pipeline_instances, :pipeline_templates,
+     :repositories, :specimens, :traits, :users, :virtual_machines].each do |t|
+      rename_column t, :owner, :owner_uuid
+      rename_column t, :modified_by_client, :modified_by_client_uuid
+      rename_column t, :modified_by_user, :modified_by_user_uuid
+    end
+    rename_column :collections, :redundancy_confirmed_by_client, :redundancy_confirmed_by_client_uuid
+    rename_column :jobs, :is_locked_by, :is_locked_by_uuid
+    rename_column :job_tasks, :created_by_job_task, :created_by_job_task_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb b/services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb
new file mode 100644 (file)
index 0000000..e56f313
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameJobForeignUuidAttributes < ActiveRecord::Migration
+  def change
+    rename_column :jobs, :cancelled_by_client, :cancelled_by_client_uuid
+    rename_column :jobs, :cancelled_by_user, :cancelled_by_user_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130708185153_rename_user_default_owner.rb b/services/api/db/migrate/20130708185153_rename_user_default_owner.rb
new file mode 100644 (file)
index 0000000..10dadc4
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameUserDefaultOwner < ActiveRecord::Migration
+  def change
+    rename_column :users, :default_owner, :default_owner_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb b/services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb
new file mode 100644 (file)
index 0000000..43a3271
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddScopesToApiClientAuthorizations < ActiveRecord::Migration
+  def change
+    add_column :api_client_authorizations, :scopes, :text, :null => false, :default => ['all'].to_yaml
+  end
+end
diff --git a/services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb b/services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb
new file mode 100644 (file)
index 0000000..2fe28c3
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameResourceLimitsToRuntimeConstraints < ActiveRecord::Migration
+  def change
+    rename_column :jobs, :resource_limits, :runtime_constraints
+  end
+end
diff --git a/services/api/db/migrate/20140117231056_normalize_collection_uuid.rb b/services/api/db/migrate/20140117231056_normalize_collection_uuid.rb
new file mode 100644 (file)
index 0000000..c28c268
--- /dev/null
@@ -0,0 +1,95 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class NormalizeCollectionUuid < ActiveRecord::Migration
+  def count_orphans
+    %w(head tail).each do |ht|
+      results = ActiveRecord::Base.connection.execute(<<-EOS)
+SELECT COUNT(links.*)
+ FROM links
+ LEFT JOIN collections c
+   ON links.#{ht}_uuid = c.uuid
+ WHERE (#{ht}_kind='arvados#collection' or #{ht}_uuid ~ '^[0-9a-f]{32,}')
+   AND #{ht}_uuid IS NOT NULL
+   AND #{ht}_uuid NOT IN (SELECT uuid FROM collections)
+EOS
+      puts "#{results.first['count'].to_i} links with #{ht}_uuid pointing nowhere."
+    end
+  end
+
+  def up
+    # Normalize uuids in the collections table to
+    # {hash}+{size}. Existing uuids might be {hash},
+    # {hash}+{size}+K@{instance-name}, {hash}+K@{instance-name}, etc.
+
+    count_orphans
+    puts "Normalizing collection UUIDs."
+
+    update_sql <<-EOS
+UPDATE collections
+ SET uuid = regexp_replace(uuid,'\\+.*','') || '+' || length(manifest_text)
+ WHERE uuid !~ '^[0-9a-f]{32,}\\+[0-9]+$'
+   AND (regexp_replace(uuid,'\\+.*','') || '+' || length(manifest_text))
+     NOT IN (SELECT uuid FROM collections)
+EOS
+
+    count_orphans
+    puts "Updating links by stripping +K@.* from *_uuid attributes."
+
+    update_sql <<-EOS
+UPDATE links
+ SET head_uuid = regexp_replace(head_uuid,'\\+K@.*','')
+ WHERE head_uuid like '%+K@%'
+EOS
+    update_sql <<-EOS
+UPDATE links
+ SET tail_uuid = regexp_replace(tail_uuid,'\\+K@.*','')
+ WHERE tail_uuid like '%+K@%'
+EOS
+
+    count_orphans
+    puts "Updating links by searching bare collection hashes using regexp."
+
+    # Next, update {hash} (and any other non-normalized forms) to
+    # {hash}+{size}. This can only work where the corresponding
+    # collection is found in the collections table (otherwise we can't
+    # know the size).
+    %w(head tail).each do |ht|
+      update_sql <<-EOS
+UPDATE links
+ SET #{ht}_uuid = c.uuid
+ FROM collections c
+ WHERE #{ht}_uuid IS NOT NULL
+   AND (#{ht}_kind='arvados#collection' or #{ht}_uuid ~ '^[0-9a-f]{32,}')
+   AND #{ht}_uuid NOT IN (SELECT uuid FROM collections)
+   AND regexp_replace(#{ht}_uuid,'\\+.*','') = regexp_replace(c.uuid,'\\+.*','')
+   AND c.uuid ~ '^[0-9a-f]{32,}\\+[0-9]+$'
+EOS
+    end
+
+    count_orphans
+    puts "Stripping \"+K@.*\" from jobs.output, jobs.log, job_tasks.output."
+
+    update_sql <<-EOS
+UPDATE jobs
+ SET output = regexp_replace(output,'\\+K@.*','')
+ WHERE output ~ '^[0-9a-f]{32,}\\+[0-9]+\\+K@\\w+$'
+EOS
+    update_sql <<-EOS
+UPDATE jobs
+ SET log = regexp_replace(log,'\\+K@.*','')
+ WHERE log ~ '^[0-9a-f]{32,}\\+[0-9]+\\+K@\\w+$'
+EOS
+    update_sql <<-EOS
+UPDATE job_tasks
+ SET output = regexp_replace(output,'\\+K@.*','')
+ WHERE output ~ '^[0-9a-f]{32,}\\+[0-9]+\\+K@\\w+$'
+EOS
+
+    puts "Done."
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb b/services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb
new file mode 100644 (file)
index 0000000..90dbd6e
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FixLinkKindUnderscores < ActiveRecord::Migration
+  def up
+    update_sql <<-EOS
+UPDATE links
+ SET head_kind = 'arvados#virtualMachine'
+ WHERE head_kind = 'arvados#virtual_machine'
+EOS
+  end
+
+  def down
+    update_sql <<-EOS
+UPDATE links
+ SET head_kind = 'arvados#virtual_machine'
+ WHERE head_kind = 'arvados#virtualMachine'
+EOS
+  end
+end
diff --git a/services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb b/services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb
new file mode 100644 (file)
index 0000000..a54c4cc
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class NormalizeCollectionUuidsInScriptParameters < ActiveRecord::Migration
+  include CurrentApiClient
+  def up
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        pi.save! if fix_values_recursively(pi.components)
+      end
+      Job.all.each do |j|
+        changed = false
+        j.script_parameters.each do |p, v|
+          if v.is_a? String and v.match /\+K/
+            v.gsub! /\+K\@\w+/, ''
+            changed = true
+          end
+        end
+        j.save! if changed
+      end
+    end
+  end
+
+  def down
+  end
+
+  protected
+  def fix_values_recursively fixme
+    changed = false
+    if fixme.is_a? String
+      if fixme.match /\+K/
+        fixme.gsub! /\+K\@\w+/, ''
+        return true
+      else
+        return false
+      end
+    elsif fixme.is_a? Array
+      fixme.each do |v|
+        changed = fix_values_recursively(v) || changed
+      end
+    elsif fixme.is_a? Hash
+      fixme.each do |p, v|
+        changed = fix_values_recursively(v) || changed
+      end
+    end
+    changed
+  end
+end
diff --git a/services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb b/services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb
new file mode 100644 (file)
index 0000000..ccf6210
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddNondeterministicColumnToJob < ActiveRecord::Migration
+  def up
+    add_column :jobs, :nondeterministic, :boolean
+  end
+
+  def down
+    remove_column :jobs, :nondeterministic
+  end
+end
diff --git a/services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb b/services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb
new file mode 100644 (file)
index 0000000..fc09892
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SeparateRepositoryFromScriptVersion < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def fixup pt
+    c = pt.components
+    c.each do |k, v|
+      commit_ish = v["script_version"]
+      if commit_ish.andand.index(':')
+        want_repo, commit_ish = commit_ish.split(':',2)
+        v[:repository] = want_repo
+        v[:script_version] = commit_ish
+      end
+    end
+    pt.save!
+  end
+
+  def up
+    act_as_system_user do
+      PipelineTemplate.all.each do |pt|
+        fixup pt
+      end
+      PipelineInstance.all.each do |pt|
+        fixup pt
+      end
+    end
+  end
+
+  def down
+    raise ActiveRecord::IrreversibleMigration
+  end
+end
diff --git a/services/api/db/migrate/20140321191343_add_repository_column_to_job.rb b/services/api/db/migrate/20140321191343_add_repository_column_to_job.rb
new file mode 100644 (file)
index 0000000..5e2b636
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddRepositoryColumnToJob < ActiveRecord::Migration
+  def up
+    add_column :jobs, :repository, :string
+  end
+
+  def down
+    remove_column :jobs, :repository
+  end
+end
diff --git a/services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb b/services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb
new file mode 100644 (file)
index 0000000..a66d92e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputIsPersistentToJob < ActiveRecord::Migration
+  def change
+    add_column :jobs, :output_is_persistent, :boolean, null: false, default: false
+  end
+end
diff --git a/services/api/db/migrate/20140325175653_remove_kind_columns.rb b/services/api/db/migrate/20140325175653_remove_kind_columns.rb
new file mode 100644 (file)
index 0000000..b4085ba
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RemoveKindColumns < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    remove_column :links, :head_kind
+    remove_column :links, :tail_kind
+    remove_column :logs, :object_kind
+  end
+
+  def down
+    add_column :links, :head_kind, :string
+    add_column :links, :tail_kind, :string
+    add_column :logs, :object_kind, :string
+
+    act_as_system_user do
+      Link.all.each do |l|
+        l.head_kind = ArvadosModel::resource_class_for_uuid(l.head_uuid).kind if l.head_uuid
+        l.tail_kind = ArvadosModel::resource_class_for_uuid(l.tail_uuid).kind if l.tail_uuid
+        l.save
+      end
+      Log.all.each do |l|
+        l.object_kind = ArvadosModel::resource_class_for_uuid(l.object_uuid).kind if l.object_uuid
+        l.save
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140402001908_add_system_group.rb b/services/api/db/migrate/20140402001908_add_system_group.rb
new file mode 100644 (file)
index 0000000..33bdd41
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddSystemGroup < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    # Make sure the system group exists.
+    system_group
+  end
+
+  def down
+    act_as_system_user do
+      system_group.destroy
+
+      # Destroy the automatically generated links giving system_group
+      # permission on all users.
+      Link.destroy_all(tail_uuid: system_group_uuid, head_kind: 'arvados#user')
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb b/services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb
new file mode 100644 (file)
index 0000000..3d93ce5
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameLogInfoToProperties < ActiveRecord::Migration
+  def change
+    rename_column :logs, :info, :properties
+  end
+end
diff --git a/services/api/db/migrate/20140421140924_add_group_class_to_groups.rb b/services/api/db/migrate/20140421140924_add_group_class_to_groups.rb
new file mode 100644 (file)
index 0000000..ef62139
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddGroupClassToGroups < ActiveRecord::Migration
+  def change
+    add_column :groups, :group_class, :string
+    add_index :groups, :group_class
+  end
+end
diff --git a/services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb b/services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb
new file mode 100644 (file)
index 0000000..fd14074
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameAuthKeysUserIndex < ActiveRecord::Migration
+  # Rails' default name for this index is so long, Rails can't modify
+  # the index later, because the autogenerated temporary name exceeds
+  # PostgreSQL's 64-character limit.  This migration gives the index
+  # an explicit name to work around that issue.
+  def change
+    rename_index("authorized_keys",
+                 "index_authorized_keys_on_authorized_user_uuid_and_expires_at",
+                 "index_authkeys_on_user_and_expires_at")
+  end
+end
diff --git a/services/api/db/migrate/20140421151940_timestamps_not_null.rb b/services/api/db/migrate/20140421151940_timestamps_not_null.rb
new file mode 100644 (file)
index 0000000..4426bef
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class TimestampsNotNull < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.tables.each do |t|
+      next if t == 'schema_migrations'
+      change_column t.to_sym, :created_at, :datetime, :null => false
+      change_column t.to_sym, :updated_at, :datetime, :null => false
+    end
+  end
+  def down
+    # There might have been a NULL constraint before this, depending
+    # on the version of Rails used to build the database.
+  end
+end
diff --git a/services/api/db/migrate/20140422011506_pipeline_instance_state.rb b/services/api/db/migrate/20140422011506_pipeline_instance_state.rb
new file mode 100644 (file)
index 0000000..db84c2c
--- /dev/null
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PipelineInstanceState < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    add_column :pipeline_instances, :state, :string
+    add_column :pipeline_instances, :components_summary, :text
+
+    PipelineInstance.reset_column_information
+
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        pi.state = PipelineInstance::New
+
+        if !pi.attribute_present? :success   # success is nil
+          if pi[:active] == true
+            pi.state = PipelineInstance::RunningOnServer
+          else
+            if pi.components_look_ready?
+              pi.state = PipelineInstance::Ready
+            else
+              pi.state = PipelineInstance::New
+            end
+          end
+        elsif pi[:success] == true
+          pi.state = PipelineInstance::Complete
+        else
+          pi.state = PipelineInstance::Failed
+        end
+
+        pi.save!
+      end
+    end
+
+# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.
+=begin
+    if column_exists?(:pipeline_instances, :active)
+      remove_column :pipeline_instances, :active
+    end
+
+    if column_exists?(:pipeline_instances, :success)
+      remove_column :pipeline_instances, :success
+    end
+=end
+  end
+
+  def down
+# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.
+=begin
+    add_column :pipeline_instances, :success, :boolean, :null => true
+    add_column :pipeline_instances, :active, :boolean, :default => false
+
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        case pi.state
+        when PipelineInstance::New, PipelineInstance::Ready
+          pi.active = false
+          pi.success = nil
+        when PipelineInstance::RunningOnServer
+          pi.active = true
+          pi.success = nil
+        when PipelineInstance::RunningOnClient
+          pi.active = false
+          pi.success = nil
+        when PipelineInstance::Failed
+          pi.active = false
+          pi.success = false
+        when PipelineInstance::Complete
+          pi.active = false
+          pi.success = true
+        end
+        pi.save!
+      end
+    end
+=end
+
+    if column_exists?(:pipeline_instances, :components_summary)
+      remove_column :pipeline_instances, :components_summary
+    end
+
+    if column_exists?(:pipeline_instances, :state)
+      remove_column :pipeline_instances, :state
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb b/services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb
new file mode 100644 (file)
index 0000000..428ea57
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddObjectOwnerToLogs < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    add_column :logs, :object_owner_uuid, :string
+    act_as_system_user do
+      Log.find_in_batches(:batch_size => 500) do |batch|
+        upd = {}
+        ActiveRecord::Base.transaction do
+          batch.each do |log|
+            if log.properties["new_attributes"]
+              log.object_owner_uuid = log.properties['new_attributes']['owner_uuid']
+              log.save
+            elsif log.properties["old_attributes"]
+              log.object_owner_uuid = log.properties['old_attributes']['owner_uuid']
+              log.save
+            end
+          end
+        end
+      end
+    end
+  end
+
+  def down
+    remove_column :logs, :object_owner_uuid
+  end
+end
diff --git a/services/api/db/migrate/20140423133559_new_scope_format.rb b/services/api/db/migrate/20140423133559_new_scope_format.rb
new file mode 100644 (file)
index 0000000..b706edb
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# At the time we introduced scopes everywhere, VirtualMachinesController
+# recognized scopes that gave the URL for a VM to grant access to that VM's
+# login list.  This migration converts those VM-specific scopes to the new
+# general format, and back.
+
+class NewScopeFormat < ActiveRecord::Migration
+  include CurrentApiClient
+
+  VM_PATH_REGEX =
+    %r{(/arvados/v1/virtual_machines/[0-9a-z]{5}-[0-9a-z]{5}-[0-9a-z]{15})}
+  OLD_SCOPE_REGEX = %r{^https?://[^/]+#{VM_PATH_REGEX.source}$}
+  NEW_SCOPE_REGEX = %r{^GET #{VM_PATH_REGEX.source}/logins$}
+
+  def fix_scopes_matching(regex)
+    act_as_system_user
+    ApiClientAuthorization.find_each do |auth|
+      auth.scopes = auth.scopes.map do |scope|
+        if match = regex.match(scope)
+          yield match
+        else
+          scope
+        end
+      end
+      auth.save!
+    end
+  end
+
+  def up
+    fix_scopes_matching(OLD_SCOPE_REGEX) do |match|
+      "GET #{match[1]}/logins"
+    end
+  end
+
+  def down
+    case Rails.env
+    when 'test'
+      hostname = 'www.example.com'
+    else
+      require 'socket'
+      hostname = Socket.gethostname
+    end
+    fix_scopes_matching(NEW_SCOPE_REGEX) do |match|
+      Rails.application.routes.url_for(controller: 'virtual_machines',
+                                       uuid: match[1].split('/').last,
+                                       host: hostname, protocol: 'https')
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb b/services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb
new file mode 100644 (file)
index 0000000..c7f6e7a
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddUniqueNameIndexToLinks < ActiveRecord::Migration
+  def change
+    # Make sure PgPower is here. Otherwise the "where" will be ignored
+    # and we'll end up with a far too restrictive unique
+    # constraint. (Rails4 should work without PgPower, but that isn't
+    # tested.)
+    if not PgPower then raise "No partial column support" end
+
+    add_index(:links, [:tail_uuid, :name], unique: true,
+              where: "link_class='name'",
+              name: 'links_tail_name_unique_if_link_class_name')
+  end
+end
diff --git a/services/api/db/migrate/20140519205916_create_keep_services.rb b/services/api/db/migrate/20140519205916_create_keep_services.rb
new file mode 100644 (file)
index 0000000..e6cb967
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateKeepServices < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def change
+    act_as_system_user do
+      create_table :keep_services do |t|
+        t.string :uuid, :null => false
+        t.string :owner_uuid, :null => false
+        t.string :modified_by_client_uuid
+        t.string :modified_by_user_uuid
+        t.datetime :modified_at
+        t.string   :service_host
+        t.integer  :service_port
+        t.boolean  :service_ssl_flag
+        t.string   :service_type
+
+        t.timestamps
+      end
+      add_index :keep_services, :uuid, :unique => true
+
+      add_column :keep_disks, :keep_service_uuid, :string
+
+      KeepDisk.reset_column_information
+
+      services = {}
+
+      KeepDisk.find_each do |k|
+        services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"] = {
+          service_host: k[:service_host],
+          service_port: k[:service_port],
+          service_ssl_flag: k[:service_ssl_flag],
+          service_type: 'disk',
+          owner_uuid: k[:owner_uuid]
+        }
+      end
+
+      services.each do |k, v|
+        v['uuid'] = KeepService.create(v).uuid
+      end
+
+      KeepDisk.find_each do |k|
+        k.keep_service_uuid = services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"]['uuid']
+        k.save
+      end
+
+      remove_column :keep_disks, :service_host
+      remove_column :keep_disks, :service_port
+      remove_column :keep_disks, :service_ssl_flag
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb b/services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb
new file mode 100644 (file)
index 0000000..09e2ce1
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddDescriptionToPipelineTemplates < ActiveRecord::Migration
+  def change
+    add_column :pipeline_templates, :description, :text
+  end
+end
diff --git a/services/api/db/migrate/20140530200539_add_supplied_script_version.rb b/services/api/db/migrate/20140530200539_add_supplied_script_version.rb
new file mode 100644 (file)
index 0000000..62b1df0
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddSuppliedScriptVersion < ActiveRecord::Migration
+  def up
+    add_column :jobs, :supplied_script_version, :string
+  end
+
+  def down
+    remove_column :jobs, :supplied_script_version, :string
+  end
+end
diff --git a/services/api/db/migrate/20140601022548_remove_name_from_collections.rb b/services/api/db/migrate/20140601022548_remove_name_from_collections.rb
new file mode 100644 (file)
index 0000000..d76ac93
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RemoveNameFromCollections < ActiveRecord::Migration
+  def up
+    remove_column :collections, :name
+  end
+
+  def down
+    add_column :collections, :name, :string
+  end
+end
diff --git a/services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb b/services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb
new file mode 100644 (file)
index 0000000..511138f
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RemoveActiveAndSuccessFromPipelineInstances < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    if column_exists?(:pipeline_instances, :active)
+      remove_column :pipeline_instances, :active
+    end
+
+    if column_exists?(:pipeline_instances, :success)
+      remove_column :pipeline_instances, :success
+    end
+  end
+
+  def down
+    if !column_exists?(:pipeline_instances, :success)
+      add_column :pipeline_instances, :success, :boolean, :null => true
+    end
+    if !column_exists?(:pipeline_instances, :active)
+      add_column :pipeline_instances, :active, :boolean, :default => false
+    end
+
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        case pi.state
+        when PipelineInstance::New, PipelineInstance::Ready, PipelineInstance::Paused, PipelineInstance::RunningOnClient
+          pi.active = nil
+          pi.success = nil
+        when PipelineInstance::RunningOnServer
+          pi.active = true
+          pi.success = nil
+        when PipelineInstance::Failed
+          pi.active = false
+          pi.success = false
+        when PipelineInstance::Complete
+          pi.active = false
+          pi.success = true
+        end
+        pi.save!
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140607150616_rename_folder_to_project.rb b/services/api/db/migrate/20140607150616_rename_folder_to_project.rb
new file mode 100644 (file)
index 0000000..f6daa97
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameFolderToProject < ActiveRecord::Migration
+  def up
+    Group.update_all("group_class = 'project'", "group_class = 'folder'")
+  end
+
+  def down
+    Group.update_all("group_class = 'folder'", "group_class = 'project'")
+  end
+end
diff --git a/services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb b/services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb
new file mode 100644 (file)
index 0000000..2be03f5
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddDockerLocatorToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :docker_image_locator, :string
+  end
+end
diff --git a/services/api/db/migrate/20140627210837_anonymous_group.rb b/services/api/db/migrate/20140627210837_anonymous_group.rb
new file mode 100644 (file)
index 0000000..cd49da4
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AnonymousGroup < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    # create the anonymous group and user
+    anonymous_group
+    anonymous_user
+  end
+
+  def down
+    act_as_system_user do
+      anonymous_user.destroy
+      anonymous_group.destroy
+    end
+  end
+
+end
diff --git a/services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb b/services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb
new file mode 100644 (file)
index 0000000..53e087a
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobTaskSerialQsequence < ActiveRecord::Migration
+  SEQ_NAME = "job_tasks_qsequence_seq"
+
+  def up
+    execute "CREATE SEQUENCE #{SEQ_NAME} OWNED BY job_tasks.qsequence;"
+  end
+
+  def down
+    execute "DROP SEQUENCE #{SEQ_NAME};"
+  end
+end
diff --git a/services/api/db/migrate/20140714184006_empty_collection.rb b/services/api/db/migrate/20140714184006_empty_collection.rb
new file mode 100644 (file)
index 0000000..0c0eba6
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class EmptyCollection < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    empty_collection
+  end
+
+  def down
+    # do nothing when migrating down (having the empty collection
+    # and a permission link for it is harmless)
+  end
+end
diff --git a/services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb b/services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb
new file mode 100644 (file)
index 0000000..003f74b
--- /dev/null
@@ -0,0 +1,184 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CollectionUseRegularUuids < ActiveRecord::Migration
+  def up
+    add_column :collections, :name, :string
+    add_column :collections, :description, :string
+    add_column :collections, :properties, :text
+    add_column :collections, :expires_at, :date
+    remove_column :collections, :locator
+
+    say_with_time "Step 1. Move manifest hashes into portable_data_hash field" do
+      ActiveRecord::Base.connection.execute("update collections set portable_data_hash=uuid, uuid=null")
+    end
+
+    say_with_time "Step 2. Create new collection objects from the name links in the table." do
+      from_clause = %{
+from links inner join collections on head_uuid=collections.portable_data_hash
+where link_class='name' and collections.uuid is null
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, links.name,
+manifest_text, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid
+#{from_clause}
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into collections (uuid, portable_data_hash, owner_uuid, name, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['head_uuid']},
+#{ActiveRecord::Base.connection.quote d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['name']},
+#{ActiveRecord::Base.connection.quote d['manifest_text']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 3. Create new collection objects from the can_read links in the table." do
+      from_clause = %{
+from links inner join collections on head_uuid=collections.portable_data_hash
+where link_class='permission' and links.name='can_read' and collections.uuid is null
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, manifest_text, links.created_at, links.modified_at
+#{from_clause}
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into collections (uuid, portable_data_hash, owner_uuid, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['head_uuid']},
+#{ActiveRecord::Base.connection.quote d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['manifest_text']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 4. Migrate remaining orphan collection objects" do
+      links = ActiveRecord::Base.connection.select_all %{
+select portable_data_hash, owner_uuid, manifest_text, created_at, modified_at
+from collections
+where uuid is null and portable_data_hash not in (select portable_data_hash from collections where uuid is not null)
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into collections (uuid, portable_data_hash, owner_uuid, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['portable_data_hash']},
+#{ActiveRecord::Base.connection.quote d['owner_uuid']},
+#{ActiveRecord::Base.connection.quote d['manifest_text']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+    end
+
+    say_with_time "Step 5. Delete old collection objects." do
+      ActiveRecord::Base.connection.execute("delete from collections where uuid is null")
+    end
+
+    say_with_time "Step 6. Delete permission links where tail_uuid is a collection (invalid records)" do
+      ActiveRecord::Base.connection.execute %{
+delete from links where links.uuid in (select links.uuid
+from links
+where tail_uuid like '________________________________+%' and link_class='permission' )
+}
+    end
+
+    say_with_time "Step 7. Migrate collection -> collection provenance links to jobs" do
+      from_clause = %{
+from links
+where head_uuid like '________________________________+%' and tail_uuid like '________________________________+%' and links.link_class = 'provenance'
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid, links.owner_uuid
+#{from_clause}
+}
+      links.each do |d|
+        newuuid = Job.generate_uuid
+        ActiveRecord::Base.connection.execute %{
+insert into jobs (uuid, script_parameters, output, running, success, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, owner_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote newuuid},
+#{ActiveRecord::Base.connection.quote "---\ninput: "+d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['head_uuid']},
+#{ActiveRecord::Base.connection.quote false},
+#{ActiveRecord::Base.connection.quote true},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['owner_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 8. Migrate remaining links with head_uuid pointing to collections" do
+      from_clause = %{
+from links inner join collections on links.head_uuid=portable_data_hash
+where collections.uuid is not null
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, collections.uuid as collectionuuid, tail_uuid, link_class, links.properties,
+links.name, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid, links.owner_uuid
+#{from_clause}
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into links (uuid, head_uuid, tail_uuid, link_class, name, properties, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, owner_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Link.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['collectionuuid']},
+#{ActiveRecord::Base.connection.quote d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['link_class']},
+#{ActiveRecord::Base.connection.quote d['name']},
+#{ActiveRecord::Base.connection.quote d['properties']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['owner_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 9. Delete any remaining name links" do
+      ActiveRecord::Base.connection.execute("delete from links where link_class='name'")
+    end
+
+    say_with_time "Step 10. Validate links table" do
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, link_class, name
+from links
+where head_uuid like '________________________________+%' or tail_uuid like '________________________________+%'
+}
+      links.each do |d|
+        raise "Bad row #{d}"
+      end
+    end
+
+  end
+
+  def down
+    raise ActiveRecord::IrreversibleMigration, "Can't downmigrate changes to collections and links without potentially losing data."
+  end
+end
diff --git a/services/api/db/migrate/20140817035914_add_unique_name_constraints.rb b/services/api/db/migrate/20140817035914_add_unique_name_constraints.rb
new file mode 100644 (file)
index 0000000..a226836
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddUniqueNameConstraints < ActiveRecord::Migration
+  def change
+    # Ensure uniqueness before adding constraints.
+    ["collections", "pipeline_templates", "groups"].each do |table|
+      rows = ActiveRecord::Base.connection.select_all %{
+select uuid, owner_uuid, name from #{table} order by owner_uuid, name
+}
+      prev = {}
+      n = 1
+      rows.each do |r|
+        if r["owner_uuid"] == prev["owner_uuid"] and !r["name"].nil? and r["name"] == prev["name"]
+          n += 1
+          ActiveRecord::Base.connection.execute %{
+update #{table} set name='#{r["name"]} #{n}' where uuid='#{r["uuid"]}'
+}
+        else
+          n = 1
+        end
+        prev = r
+      end
+    end
+
+    add_index(:collections, [:owner_uuid, :name], unique: true,
+              name: 'collection_owner_uuid_name_unique')
+    add_index(:pipeline_templates, [:owner_uuid, :name], unique: true,
+              name: 'pipeline_template_owner_uuid_name_unique')
+    add_index(:groups, [:owner_uuid, :name], unique: true,
+              name: 'groups_owner_uuid_name_unique')
+  end
+end
diff --git a/services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb b/services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb
new file mode 100644 (file)
index 0000000..1322420
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddNotNullConstraintToGroupName < ActiveRecord::Migration
+  def change
+    ActiveRecord::Base.connection.execute("update groups set name=uuid where name is null or name=''")
+    change_column_null :groups, :name, false
+  end
+end
diff --git a/services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb b/services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb
new file mode 100644 (file)
index 0000000..dbb4c06
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RemoveOutputIsPersistentColumn < ActiveRecord::Migration
+  def up
+    remove_column :jobs, :output_is_persistent
+  end
+
+  def down
+    add_column :jobs, :output_is_persistent, :boolean, null: false, default: false
+  end
+end
diff --git a/services/api/db/migrate/20140828141043_job_priority_fixup.rb b/services/api/db/migrate/20140828141043_job_priority_fixup.rb
new file mode 100644 (file)
index 0000000..87f29b6
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class JobPriorityFixup < ActiveRecord::Migration
+  def up
+    remove_column :jobs, :priority
+    add_column :jobs, :priority, :integer, null: false, default: 0
+  end
+
+  def down
+    remove_column :jobs, :priority
+    add_column :jobs, :priority, :string, null: true, default: nil
+  end
+end
diff --git a/services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb b/services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb
new file mode 100644 (file)
index 0000000..df64127
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddStartFinishTimeToTasksAndPipelines < ActiveRecord::Migration
+  def up
+    add_column :job_tasks, :started_at, :datetime
+    add_column :job_tasks, :finished_at, :datetime
+    add_column :pipeline_instances, :started_at, :datetime
+    add_column :pipeline_instances, :finished_at, :datetime
+  end
+
+  def down
+    remove_column :job_tasks, :started_at
+    remove_column :job_tasks, :finished_at
+    remove_column :pipeline_instances, :started_at
+    remove_column :pipeline_instances, :finished_at
+  end
+end
diff --git a/services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb b/services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb
new file mode 100644 (file)
index 0000000..1ac030b
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddDescriptionToPipelineInstancesAndJobs < ActiveRecord::Migration
+  def up
+    add_column :pipeline_instances, :description, :text, null: true
+    add_column :jobs, :description, :text, null: true
+  end
+
+  def down
+    remove_column :jobs, :description
+    remove_column :pipeline_instances, :description
+  end
+end
diff --git a/services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb b/services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb
new file mode 100644 (file)
index 0000000..c111708
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ChangeUserOwnerUuidNotNull < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    User.update_all({owner_uuid: system_user_uuid}, 'owner_uuid is null')
+    change_column :users, :owner_uuid, :string, :null => false
+  end
+
+  def down
+    change_column :users, :owner_uuid, :string, :null => true
+  end
+end
diff --git a/services/api/db/migrate/20140918153541_add_properties_to_node.rb b/services/api/db/migrate/20140918153541_add_properties_to_node.rb
new file mode 100644 (file)
index 0000000..513ca04
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPropertiesToNode < ActiveRecord::Migration
+  def up
+    add_column :nodes, :properties, :text
+  end
+
+  def down
+    remove_column :nodes, :properties
+  end
+end
diff --git a/services/api/db/migrate/20140918153705_add_state_to_job.rb b/services/api/db/migrate/20140918153705_add_state_to_job.rb
new file mode 100644 (file)
index 0000000..77310d3
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddStateToJob < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    ActiveRecord::Base.transaction do
+      add_column :jobs, :state, :string
+      Job.reset_column_information
+      Job.update_all({state: 'Cancelled'}, ['state is null and cancelled_at is not null'])
+      Job.update_all({state: 'Failed'}, ['state is null and success = ?', false])
+      Job.update_all({state: 'Complete'}, ['state is null and success = ?', true])
+      Job.update_all({state: 'Running'}, ['state is null and running = ?', true])
+      # Locked/started, but not Running/Failed/Complete? Let's assume it failed.
+      Job.update_all({state: 'Failed'}, ['state is null and (is_locked_by_uuid is not null or started_at is not null)'])
+      Job.update_all({state: 'Queued'}, ['state is null'])
+    end
+  end
+
+  def down
+    remove_column :jobs, :state
+  end
+end
diff --git a/services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb b/services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb
new file mode 100644 (file)
index 0000000..55378d7
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddJobUuidToNodes < ActiveRecord::Migration
+  def up
+    change_table :nodes do |t|
+      t.column :job_uuid, :string
+    end
+  end
+
+  def down
+    change_table :nodes do |t|
+      t.remove :job_uuid
+    end
+  end
+end
diff --git a/services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb b/services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb
new file mode 100644 (file)
index 0000000..c0e3b3b
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddArvadosSdkVersionToJobs < ActiveRecord::Migration
+  def up
+    change_table :jobs do |t|
+      t.column :arvados_sdk_version, :string
+    end
+  end
+
+  def down
+    change_table :jobs do |t|
+      t.remove :arvados_sdk_version
+    end
+  end
+end
diff --git a/services/api/db/migrate/20141208164553_owner_uuid_index.rb b/services/api/db/migrate/20141208164553_owner_uuid_index.rb
new file mode 100644 (file)
index 0000000..a6d4e76
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class OwnerUuidIndex < ActiveRecord::Migration
+  def tables_with_owner_uuid
+    %w{api_clients authorized_keys collections groups humans
+       job_tasks jobs keep_disks keep_services links logs
+       nodes pipeline_instances pipeline_templates repositories
+       specimens traits users virtual_machines}
+  end
+
+  def up
+    tables_with_owner_uuid.each do |table|
+      add_index table.to_sym, :owner_uuid
+    end
+  end
+
+  def down
+    tables_with_owner_uuid.each do |table|
+      indexes = ActiveRecord::Base.connection.indexes(table)
+      owner_uuid_index = indexes.select do |index|
+        index.columns == ['owner_uuid']
+      end
+      if !owner_uuid_index.empty?
+        remove_index table.to_sym, :owner_uuid
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20141208174553_descriptions_are_strings.rb b/services/api/db/migrate/20141208174553_descriptions_are_strings.rb
new file mode 100644 (file)
index 0000000..554474d
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DescriptionsAreStrings < ActiveRecord::Migration
+  def tables_with_description_column
+    %w{collections groups jobs pipeline_instances pipeline_templates}
+  end
+
+  def up
+    tables_with_description_column.each do |table|
+      change_column table.to_sym, :description, :string, :limit => 2**19
+    end
+  end
+
+  def down
+    tables_with_description_column.each do |table|
+      if table == 'collections'
+        change_column table.to_sym, :description, :string # implicit limit 255
+      else
+        change_column table.to_sym, :description, :text
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20141208174653_collection_file_names.rb b/services/api/db/migrate/20141208174653_collection_file_names.rb
new file mode 100644 (file)
index 0000000..50ab30c
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CollectionFileNames < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    add_column :collections, :file_names, :string, :limit => 2**13
+
+    act_as_system_user do
+      Collection.find_each(batch_size: 20) do |c|
+        file_names = c.manifest_files
+        ActiveRecord::Base.connection.execute "UPDATE collections
+                    SET file_names = #{ActiveRecord::Base.connection.quote(file_names)}
+                    WHERE uuid = '#{c.uuid}'"
+      end
+    end
+  end
+
+  def down
+    remove_column :collections, :file_names
+  end
+end
diff --git a/services/api/db/migrate/20141208185217_search_index.rb b/services/api/db/migrate/20141208185217_search_index.rb
new file mode 100644 (file)
index 0000000..c8e9fef
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SearchIndex < ActiveRecord::Migration
+  def tables_with_searchable_columns
+    {
+      "api_client_authorizations" => ["api_token", "created_by_ip_address", "last_used_by_ip_address", "default_owner_uuid"],
+      "api_clients" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "url_prefix"],
+      "authorized_keys" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "key_type", "authorized_user_uuid"],
+      "collections" => ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "redundancy_confirmed_by_client_uuid", "uuid", "name", "file_names"],
+      "groups" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "group_class"],
+      "humans" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid"],
+      "job_tasks" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "job_uuid", "created_by_job_task_uuid"],
+      "jobs" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "submit_id", "script", "script_version", "cancelled_by_client_uuid", "cancelled_by_user_uuid", "output", "is_locked_by_uuid", "log", "repository", "supplied_script_version", "docker_image_locator", "state", "arvados_sdk_version"],
+      "keep_disks" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "ping_secret", "node_uuid", "filesystem_uuid", "keep_service_uuid"],
+      "keep_services" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "service_host", "service_type"],
+      "links" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "tail_uuid", "link_class", "name", "head_uuid"],
+      "logs" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "object_uuid", "event_type", "object_owner_uuid"],
+      "nodes" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "hostname", "domain", "ip_address", "job_uuid"],
+      "pipeline_instances" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "pipeline_template_uuid", "name", "state"],
+      "pipeline_templates" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name"],
+      "repositories" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "fetch_url", "push_url"],
+      "specimens" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "material"],
+      "traits" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name"],
+      "users" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "email", "first_name", "last_name", "identity_url", "default_owner_uuid"],
+      "virtual_machines" => ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "hostname"],
+    }
+  end
+
+  def change
+    tables_with_searchable_columns.each do |table, columns|
+      add_index table.to_sym, columns, name: "#{table}_search_index"
+    end
+  end
+end
diff --git a/services/api/db/migrate/20150122175935_no_description_in_search_index.rb b/services/api/db/migrate/20150122175935_no_description_in_search_index.rb
new file mode 100644 (file)
index 0000000..7371b0e
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# If the database reflects an obsolete version of the 20141208185217
+# migration (i.e., before commit:5c1db683), revert it and reapply the
+# current version. (The down-migration is the same in both versions.)
+
+require "./db/migrate/20141208185217_search_index.rb"
+
+class NoDescriptionInSearchIndex < ActiveRecord::Migration
+  def up
+    all_tables = %w{collections groups jobs pipeline_instances pipeline_templates}
+    all_tables.each do |table|
+      indexes = ActiveRecord::Base.connection.indexes(table)
+      search_index_by_name = indexes.select do |index|
+        index.name == "#{table}_search_index"
+      end
+
+      if !search_index_by_name.empty?
+        index_columns = search_index_by_name.first.columns
+        has_description = index_columns.include? 'description'
+        if has_description
+          SearchIndex.new.migrate(:down)
+          SearchIndex.new.migrate(:up)
+          break
+        end
+      end
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20150123142953_full_text_search.rb b/services/api/db/migrate/20150123142953_full_text_search.rb
new file mode 100644 (file)
index 0000000..b9b56a5
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FullTextSearch < ActiveRecord::Migration
+
+  def up
+    execute "CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});"
+    execute "CREATE INDEX groups_full_text_search_idx ON groups USING gin(#{Group.full_text_tsvector});"
+    execute "CREATE INDEX jobs_full_text_search_idx ON jobs USING gin(#{Job.full_text_tsvector});"
+    execute "CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin(#{PipelineInstance.full_text_tsvector});"
+    execute "CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin(#{PipelineTemplate.full_text_tsvector});"
+  end
+
+  def down
+    remove_index :pipeline_templates, :name => 'pipeline_templates_full_text_search_idx'
+    remove_index :pipeline_instances, :name => 'pipeline_instances_full_text_search_idx'
+    remove_index :jobs, :name => 'jobs_full_text_search_idx'
+    remove_index :groups, :name => 'groups_full_text_search_idx'
+    remove_index :collections, :name => 'collections_full_text_search_idx'
+  end
+end
diff --git a/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb b/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb
new file mode 100644 (file)
index 0000000..03e7fbb
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SetGroupClassOnAnonymousGroup < ActiveRecord::Migration
+  include CurrentApiClient
+  def up
+    act_as_system_user do
+      anonymous_group.update_attributes group_class: 'role', name: 'Anonymous users', description: 'Anonymous users'
+    end
+  end
+
+  def down
+    act_as_system_user do
+      anonymous_group.update_attributes group_class: nil, name: 'Anonymous group', description: 'Anonymous group'
+    end
+  end
+end
diff --git a/services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb b/services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb
new file mode 100644 (file)
index 0000000..ea9f393
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AllUsersCanReadAnonymousGroup < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    anonymous_group_read_permission
+  end
+
+  def down
+    # Do nothing - it's too dangerous to try to figure out whether or not
+    # the permission was created by the migration.
+  end
+end
diff --git a/services/api/db/migrate/20150206230342_rename_replication_attributes.rb b/services/api/db/migrate/20150206230342_rename_replication_attributes.rb
new file mode 100644 (file)
index 0000000..e6e5457
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameReplicationAttributes < ActiveRecord::Migration
+  RENAME = [[:redundancy, :replication_desired],
+            [:redundancy_confirmed_as, :replication_confirmed],
+            [:redundancy_confirmed_at, :replication_confirmed_at]]
+
+  def up
+    RENAME.each do |oldname, newname|
+      rename_column :collections, oldname, newname
+    end
+    remove_column :collections, :redundancy_confirmed_by_client_uuid
+    Collection.reset_column_information
+
+    # Removing that column dropped some search indexes. Let's put them back.
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name", "file_names"], name: 'collections_search_index'
+    execute "CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});"
+  end
+
+  def down
+    remove_index :collections, name: 'collections_search_index'
+    add_column :collections, :redundancy_confirmed_by_client_uuid, :string
+    RENAME.reverse.each do |oldname, newname|
+      rename_column :collections, newname, oldname
+    end
+    remove_index :collections, :name => 'collections_full_text_search_idx'
+    Collection.reset_column_information
+
+    execute "CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});"
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name", "file_names", "redundancy_confirmed_by_client_uuid"], name: 'collections_search_index'
+  end
+end
diff --git a/services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb b/services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb
new file mode 100644 (file)
index 0000000..6e58188
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CollectionNameOwnerUniqueOnlyNonExpired < ActiveRecord::Migration
+  def find_index
+    indexes = ActiveRecord::Base.connection.indexes('collections')
+    name_owner_index = indexes.select do |index|
+      index.name == 'collection_owner_uuid_name_unique'
+    end
+    name_owner_index
+  end
+
+  def up
+    remove_index :collections, :name => 'collection_owner_uuid_name_unique' if !find_index.empty?
+    add_index(:collections, [:owner_uuid, :name], unique: true,
+              where: 'expires_at is null',
+              name: 'collection_owner_uuid_name_unique')
+  end
+
+  def down
+    # it failed during up. is it going to pass now? should we do nothing?
+    remove_index :collections, :name => 'collection_owner_uuid_name_unique' if !find_index.empty?
+    add_index(:collections, [:owner_uuid, :name], unique: true,
+              name: 'collection_owner_uuid_name_unique')
+  end
+end
diff --git a/services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb b/services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb
new file mode 100644 (file)
index 0000000..31fc683
--- /dev/null
@@ -0,0 +1,131 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'has_uuid'
+require 'kind_and_etag'
+
+class FixCollectionPortableDataHashWithHintedManifest < ActiveRecord::Migration
+  include CurrentApiClient
+
+  class ArvadosModel < ActiveRecord::Base
+    self.abstract_class = true
+    extend HasUuid::ClassMethods
+    include CurrentApiClient
+    include KindAndEtag
+    before_create do |record|
+      record.uuid ||= record.class.generate_uuid
+      record.owner_uuid ||= system_user_uuid
+    end
+    serialize :properties, Hash
+
+    def self.to_s
+      # Clean up the name of the stub model class so we generate correct UUIDs.
+      super.sub("FixCollectionPortableDataHashWithHintedManifest::", "")
+    end
+  end
+
+  class Collection < ArvadosModel
+  end
+
+  class Log < ArvadosModel
+    def self.log_for(thing, age="old")
+      { "#{age}_etag" => thing.etag,
+        "#{age}_attributes" => thing.attributes,
+      }
+    end
+
+    def self.log_create(thing)
+      new_log("create", thing, log_for(thing, "new"))
+    end
+
+    def self.log_update(thing, start_state)
+      new_log("update", thing, start_state.merge(log_for(thing, "new")))
+    end
+
+    def self.log_destroy(thing)
+      new_log("destroy", thing, log_for(thing, "old"))
+    end
+
+    private
+
+    def self.new_log(event_type, thing, properties)
+      create!(event_type: event_type,
+              event_at: Time.now,
+              object_uuid: thing.uuid,
+              object_owner_uuid: thing.owner_uuid,
+              properties: properties)
+    end
+  end
+
+  def each_bad_collection
+    end_coll = Collection.order("id DESC").first
+    return if end_coll.nil?
+    seen_uuids = []
+    ("A".."Z").each do |hint_char|
+      query = Collection.
+        where("id <= ? AND manifest_text LIKE '%+#{hint_char}%'", end_coll.id)
+      unless seen_uuids.empty?
+        query = query.where("uuid NOT IN (?)", seen_uuids)
+      end
+      # It's important to make sure that this line doesn't swap.  The
+      # worst case scenario is that it finds a batch of collections that
+      # all have maximum size manifests (64MiB).  With a batch size of
+      # 50, that's about 3GiB.  Figure it will end up being 4GiB after
+      # other ActiveRecord overhead.  That's a size we're comfortable with.
+      query.find_each(batch_size: 50) do |coll|
+        seen_uuids << coll.uuid
+        stripped_manifest = coll.manifest_text.
+          gsub(/( [0-9a-f]{32}(\+\d+)?)\+\S+/, '\1')
+        stripped_pdh = sprintf("%s+%i",
+                               Digest::MD5.hexdigest(stripped_manifest),
+                               stripped_manifest.bytesize)
+        yield [coll, stripped_pdh] if (coll.portable_data_hash != stripped_pdh)
+      end
+    end
+  end
+
+  def up
+    Collection.reset_column_information
+    Log.reset_column_information
+    copied_attr_names =
+      [:owner_uuid, :created_at, :modified_by_client_uuid, :manifest_text,
+       :modified_by_user_uuid, :modified_at, :updated_at, :name,
+       :description, :portable_data_hash, :replication_desired,
+       :replication_confirmed, :replication_confirmed_at, :expires_at]
+    new_expiry = Date.new(2038, 1, 31)
+
+    each_bad_collection do |coll, stripped_pdh|
+      # Create a copy of the collection including bad portable data hash,
+      # with an expiration.  This makes it possible to resolve the bad
+      # portable data hash, but the expiration can hide the Collection
+      # from more user-friendly interfaces like Workbench.
+      start_log = Log.log_for(coll)
+      attributes = Hash[copied_attr_names.map { |key| [key, coll.send(key)] }]
+      attributes[:expires_at] ||= new_expiry
+      attributes[:properties] = (coll.properties.dup rescue {})
+      attributes[:properties]["migrated_from"] ||= coll.uuid
+      coll_copy = Collection.create!(attributes)
+      Log.log_create(coll_copy)
+      coll.update_attributes(portable_data_hash: stripped_pdh)
+      Log.log_update(coll, start_log)
+    end
+  end
+
+  def down
+    Collection.reset_column_information
+    Log.reset_column_information
+    each_bad_collection do |coll, stripped_pdh|
+      if ((src_uuid = coll.properties["migrated_from"]) and
+          (src_coll = Collection.where(uuid: src_uuid).first) and
+          (src_coll.portable_data_hash == stripped_pdh))
+        start_log = Log.log_for(src_coll)
+        src_coll.portable_data_hash = coll.portable_data_hash
+        src_coll.save!
+        Log.log_update(src_coll, start_log)
+        coll.destroy or raise Exception.new("failed to destroy old collection")
+        Log.log_destroy(coll)
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20150312151136_change_collection_expires_at_to_datetime.rb b/services/api/db/migrate/20150312151136_change_collection_expires_at_to_datetime.rb
new file mode 100644 (file)
index 0000000..96988e2
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ChangeCollectionExpiresAtToDatetime < ActiveRecord::Migration
+  def up
+    change_column :collections, :expires_at, :datetime
+  end
+
+  def down
+    change_column :collections, :expires_at, :date
+  end
+end
diff --git a/services/api/db/migrate/20150317132720_add_username_to_users.rb b/services/api/db/migrate/20150317132720_add_username_to_users.rb
new file mode 100644 (file)
index 0000000..79a076d
--- /dev/null
@@ -0,0 +1,131 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'has_uuid'
+require 'kind_and_etag'
+
+class AddUsernameToUsers < ActiveRecord::Migration
+  include CurrentApiClient
+
+  SEARCH_INDEX_COLUMNS =
+    ["uuid", "owner_uuid", "modified_by_client_uuid",
+     "modified_by_user_uuid", "email", "first_name", "last_name",
+     "identity_url", "default_owner_uuid"]
+
+  class ArvadosModel < ActiveRecord::Base
+    self.abstract_class = true
+    extend HasUuid::ClassMethods
+    include CurrentApiClient
+    include KindAndEtag
+    before_create do |record|
+      record.uuid ||= record.class.generate_uuid
+      record.owner_uuid ||= system_user_uuid
+    end
+    serialize :properties, Hash
+
+    def self.to_s
+      # Clean up the name of the stub model class so we generate correct UUIDs.
+      super.rpartition("::").last
+    end
+  end
+
+  class Log < ArvadosModel
+    def self.log_for(thing, age="old")
+      { "#{age}_etag" => thing.etag,
+        "#{age}_attributes" => thing.attributes,
+      }
+    end
+
+    def self.log_create(thing)
+      new_log("create", thing, log_for(thing, "new"))
+    end
+
+    def self.log_update(thing, start_state)
+      new_log("update", thing, start_state.merge(log_for(thing, "new")))
+    end
+
+    def self.log_destroy(thing)
+      new_log("destroy", thing, log_for(thing, "old"))
+    end
+
+    private
+
+    def self.new_log(event_type, thing, properties)
+      create!(event_type: event_type,
+              event_at: Time.now,
+              object_uuid: thing.uuid,
+              object_owner_uuid: thing.owner_uuid,
+              properties: properties)
+    end
+  end
+
+  class Link < ArvadosModel
+  end
+
+  class User < ArvadosModel
+  end
+
+  def sanitize_username(username)
+    username.
+      sub(/^[^A-Za-z]+/, "").
+      gsub(/[^A-Za-z0-9]/, "")
+  end
+
+  def usernames_wishlist(user)
+    usernames = Hash.new(0)
+    usernames[user.email.split("@", 2).first] += 1
+    Link.
+       where(tail_uuid: user.uuid, link_class: "permission", name: "can_login").
+       find_each do |login_perm|
+      username = login_perm.properties["username"]
+      usernames[username] += 2 if (username and not username.empty?)
+    end
+    usernames.keys.
+      sort_by { |n| -usernames[n] }.
+      map { |n| sanitize_username(n) }.
+      reject(&:empty?)
+  end
+
+  def increment_username(username)
+    @username_suffixes[username] += 1
+    "%s%i" % [username, @username_suffixes[username]]
+  end
+
+  def each_wanted_username(user)
+    usernames = usernames_wishlist(user)
+    usernames.each { |n| yield n }
+    base_username = usernames.first || "arvadosuser"
+    loop { yield increment_username(base_username) }
+  end
+
+  def recreate_search_index(columns)
+    remove_index :users, name: "users_search_index"
+    add_index :users, columns, name: "users_search_index"
+  end
+
+  def up
+    @username_suffixes = Hash.new(1)
+    add_column :users, :username, :string, null: true
+    add_index :users, :username, unique: true
+    recreate_search_index(SEARCH_INDEX_COLUMNS + ["username"])
+
+    [Link, Log, User].each { |m| m.reset_column_information }
+    User.validates(:username, uniqueness: true, allow_nil: true)
+    User.where(is_active: true).order(created_at: :asc).find_each do |user|
+      start_log = Log.log_for(user)
+      each_wanted_username(user) do |username|
+        user.username = username
+        break if user.valid?
+      end
+      user.save!
+      Log.log_update(user, start_log)
+    end
+  end
+
+  def down
+    remove_index :users, :username
+    recreate_search_index(SEARCH_INDEX_COLUMNS)
+    remove_column :users, :username
+  end
+end
diff --git a/services/api/db/migrate/20150324152204_backward_compatibility_for_user_repositories.rb b/services/api/db/migrate/20150324152204_backward_compatibility_for_user_repositories.rb
new file mode 100644 (file)
index 0000000..dc7c175
--- /dev/null
@@ -0,0 +1,93 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'has_uuid'
+require 'kind_and_etag'
+
+class BackwardCompatibilityForUserRepositories < ActiveRecord::Migration
+  include CurrentApiClient
+
+  class ArvadosModel < ActiveRecord::Base
+    self.abstract_class = true
+    extend HasUuid::ClassMethods
+    include CurrentApiClient
+    include KindAndEtag
+    before_create do |record|
+      record.uuid ||= record.class.generate_uuid
+      record.owner_uuid ||= system_user_uuid
+    end
+    serialize :properties, Hash
+
+    def self.to_s
+      # Clean up the name of the stub model class so we generate correct UUIDs.
+      super.rpartition("::").last
+    end
+  end
+
+  class Log < ArvadosModel
+    def self.log_for(thing, age="old")
+      { "#{age}_etag" => thing.etag,
+        "#{age}_attributes" => thing.attributes,
+      }
+    end
+
+    def self.log_create(thing)
+      new_log("create", thing, log_for(thing, "new"))
+    end
+
+    def self.log_update(thing, start_state)
+      new_log("update", thing, start_state.merge(log_for(thing, "new")))
+    end
+
+    def self.log_destroy(thing)
+      new_log("destroy", thing, log_for(thing, "old"))
+    end
+
+    private
+
+    def self.new_log(event_type, thing, properties)
+      create!(event_type: event_type,
+              event_at: Time.now,
+              object_uuid: thing.uuid,
+              object_owner_uuid: thing.owner_uuid,
+              properties: properties)
+    end
+  end
+
+  class Link < ArvadosModel
+  end
+
+  class Repository < ArvadosModel
+  end
+
+  def up
+    remove_index :repositories, name: "repositories_search_index"
+    add_index(:repositories, %w(uuid owner_uuid modified_by_client_uuid
+                                modified_by_user_uuid name),
+              name: "repositories_search_index")
+    remove_column :repositories, :fetch_url
+    remove_column :repositories, :push_url
+
+    [Link, Log, Repository].each { |m| m.reset_column_information }
+    Repository.where("owner_uuid != ?", system_user_uuid).find_each do |repo|
+      link_attrs = {
+        tail_uuid: repo.owner_uuid,
+        link_class: "permission", name: "can_manage", head_uuid: repo.uuid,
+      }
+      if Link.where(link_attrs).first.nil?
+        manage_link = Link.create!(link_attrs)
+        Log.log_create(manage_link)
+      end
+      start_log = Log.log_for(repo)
+      repo.owner_uuid = system_user_uuid
+      repo.save!
+      Log.log_update(repo, start_log)
+    end
+  end
+
+  def down
+    raise ActiveRecord::IrreversibleMigration.
+      new("can't restore prior fetch and push URLs")
+  end
+end
diff --git a/services/api/db/migrate/20150423145759_no_filenames_in_collection_search_index.rb b/services/api/db/migrate/20150423145759_no_filenames_in_collection_search_index.rb
new file mode 100644 (file)
index 0000000..4e1379e
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class NoFilenamesInCollectionSearchIndex < ActiveRecord::Migration
+  def up
+    remove_index :collections, :name => 'collections_search_index'
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name"], name: 'collections_search_index'
+  end
+
+  def down
+    remove_index :collections, :name => 'collections_search_index'
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name", "file_names"], name: 'collections_search_index'
+  end
+end
diff --git a/services/api/db/migrate/20150512193020_read_only_on_keep_services.rb b/services/api/db/migrate/20150512193020_read_only_on_keep_services.rb
new file mode 100644 (file)
index 0000000..a510c16
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ReadOnlyOnKeepServices < ActiveRecord::Migration
+  def change
+    add_column :keep_services, :read_only, :boolean, null: false, default: false
+  end
+end
diff --git a/services/api/db/migrate/20150526180251_leading_space_on_full_text_index.rb b/services/api/db/migrate/20150526180251_leading_space_on_full_text_index.rb
new file mode 100644 (file)
index 0000000..843837f
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "./db/migrate/20150123142953_full_text_search.rb"
+
+class LeadingSpaceOnFullTextIndex < ActiveRecord::Migration
+  def up
+    # Inspect one of the full-text indexes (chosen arbitrarily) to
+    # determine whether this migration is needed.
+    ft_index_name = 'jobs_full_text_search_idx'
+    ActiveRecord::Base.connection.indexes('jobs').each do |idx|
+      if idx.name == ft_index_name
+        if idx.columns.first.index "((((' '"
+          # Index is already correct. This happens if the source tree
+          # already had the new version of full_text_tsvector by the
+          # time the initial FullTextSearch migration ran.
+          $stderr.puts "This migration is not needed."
+        else
+          # Index was created using the old full_text_tsvector. Drop
+          # and re-create all full text indexes.
+          FullTextSearch.new.migrate(:down)
+          FullTextSearch.new.migrate(:up)
+        end
+        return
+      end
+    end
+    raise "Did not find index '#{ft_index_name}'. Earlier migration missed??"
+  end
+
+  def down
+    $stderr.puts <<EOS
+Down-migration is not supported for this change, and might be unnecessary.
+
+If you run a code base older than 20150526180251 against this
+database, full text search will be slow even on collections where it
+used to work well. If this is a concern, first check out the desired
+older version of the code base, and then run
+"rake db:migrate:down VERSION=20150123142953"
+followed by
+"rake db:migrate:up VERSION=20150123142953"
+.
+EOS
+  end
+end
diff --git a/services/api/db/migrate/20151202151426_create_containers_and_requests.rb b/services/api/db/migrate/20151202151426_create_containers_and_requests.rb
new file mode 100644 (file)
index 0000000..2bda546
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateContainersAndRequests < ActiveRecord::Migration
+  def change
+    create_table :containers do |t|
+      t.string :uuid
+      t.string :owner_uuid
+      t.datetime :created_at
+      t.datetime :modified_at
+      t.string :modified_by_client_uuid
+      t.string :modified_by_user_uuid
+      t.string :state
+      t.datetime :started_at
+      t.datetime :finished_at
+      t.string :log
+      t.text :environment
+      t.string :cwd
+      t.text :command
+      t.string :output_path
+      t.text :mounts
+      t.text :runtime_constraints
+      t.string :output
+      t.string :container_image
+      t.float :progress
+      t.integer :priority
+
+      t.timestamps
+    end
+
+    create_table :container_requests do |t|
+      t.string :uuid
+      t.string :owner_uuid
+      t.datetime :created_at
+      t.datetime :modified_at
+      t.string :modified_by_client_uuid
+      t.string :modified_by_user_uuid
+      t.string :name
+      t.text :description
+      t.text :properties
+      t.string :state
+      t.string :requesting_container_uuid
+      t.string :container_uuid
+      t.integer :container_count_max
+      t.text :mounts
+      t.text :runtime_constraints
+      t.string :container_image
+      t.text :environment
+      t.string :cwd
+      t.text :command
+      t.string :output_path
+      t.integer :priority
+      t.datetime :expires_at
+      t.text :filters
+
+      t.timestamps
+    end
+
+    add_index :containers, :uuid, :unique => true
+    add_index :container_requests, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20151215134304_fix_containers_index.rb b/services/api/db/migrate/20151215134304_fix_containers_index.rb
new file mode 100644 (file)
index 0000000..a112bea
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FixContainersIndex < ActiveRecord::Migration
+  def up
+    execute "CREATE INDEX container_requests_full_text_search_idx ON container_requests USING gin(#{ContainerRequest.full_text_tsvector});"
+    add_index :container_requests, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "state", "requesting_container_uuid", "container_uuid", "container_image", "cwd", "output_path"], name: 'container_requests_search_index'
+    add_index :containers, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "state", "log", "cwd", "output_path", "output", "container_image"], name: 'containers_search_index'
+    add_index :container_requests, :owner_uuid
+    add_index :containers, :owner_uuid
+  end
+
+  def down
+    remove_index :container_requests, :name => 'container_requests_full_text_search_idx'
+    remove_index :container_requests, :name => 'container_requests_search_index'
+    remove_index :containers, :name => 'containers_search_index'
+    remove_index :container_requests, :owner_uuid
+    remove_index :containers, :owner_uuid
+  end
+end
diff --git a/services/api/db/migrate/20151229214707_add_exit_code_to_containers.rb b/services/api/db/migrate/20151229214707_add_exit_code_to_containers.rb
new file mode 100644 (file)
index 0000000..e28ec28
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddExitCodeToContainers < ActiveRecord::Migration
+  def change
+    add_column :containers, :exit_code, :integer
+  end
+end
diff --git a/services/api/db/migrate/20160208210629_add_uuid_to_api_client_authorization.rb b/services/api/db/migrate/20160208210629_add_uuid_to_api_client_authorization.rb
new file mode 100644 (file)
index 0000000..30f8dd0
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'has_uuid'
+
+class AddUuidToApiClientAuthorization < ActiveRecord::Migration
+  extend HasUuid::ClassMethods
+
+  def up
+    add_column :api_client_authorizations, :uuid, :string
+    add_index :api_client_authorizations, :uuid, :unique => true
+
+    prefix = Server::Application.config.uuid_prefix + '-' +
+             Digest::MD5.hexdigest('ApiClientAuthorization'.to_s).to_i(16).to_s(36)[-5..-1] + '-'
+
+    update_sql <<-EOS
+update api_client_authorizations set uuid = (select concat('#{prefix}',
+array_to_string(ARRAY (SELECT substring(api_token FROM (ceil(random()*36))::int FOR 1) FROM generate_series(1, 15)), '')
+));
+EOS
+
+    change_column_null :api_client_authorizations, :uuid, false
+  end
+
+  def down
+    if column_exists?(:api_client_authorizations, :uuid)
+      remove_index :api_client_authorizations, :uuid
+      remove_column :api_client_authorizations, :uuid
+    end
+  end
+end
diff --git a/services/api/db/migrate/20160209155729_add_uuid_to_api_token_search_index.rb b/services/api/db/migrate/20160209155729_add_uuid_to_api_token_search_index.rb
new file mode 100644 (file)
index 0000000..d5c7587
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddUuidToApiTokenSearchIndex < ActiveRecord::Migration
+  def up
+    begin
+      remove_index :api_client_authorizations, :name => 'api_client_authorizations_search_index'
+    rescue
+    end
+    add_index :api_client_authorizations,
+              ["api_token", "created_by_ip_address", "last_used_by_ip_address", "default_owner_uuid", "uuid"],
+              name: "api_client_authorizations_search_index"
+  end
+
+  def down
+    begin
+      remove_index :api_client_authorizations, :name => 'api_client_authorizations_search_index'
+    rescue
+    end
+         add_index :api_client_authorizations,
+              ["api_token", "created_by_ip_address", "last_used_by_ip_address", "default_owner_uuid"],
+              name: "api_client_authorizations_search_index"
+  end
+end
diff --git a/services/api/db/migrate/20160324144017_add_components_to_job.rb b/services/api/db/migrate/20160324144017_add_components_to_job.rb
new file mode 100644 (file)
index 0000000..ea6ca63
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddComponentsToJob < ActiveRecord::Migration
+  def up
+    add_column :jobs, :components, :text
+  end
+
+  def down
+    if column_exists?(:jobs, :components)
+      remove_column :jobs, :components
+    end
+  end
+end
diff --git a/services/api/db/migrate/20160506175108_add_auths_to_container.rb b/services/api/db/migrate/20160506175108_add_auths_to_container.rb
new file mode 100644 (file)
index 0000000..163289b
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddAuthsToContainer < ActiveRecord::Migration
+  def change
+    add_column :containers, :auth_uuid, :string
+    add_column :containers, :locked_by_uuid, :string
+  end
+end
diff --git a/services/api/db/migrate/20160509143250_add_auth_and_lock_to_container_index.rb b/services/api/db/migrate/20160509143250_add_auth_and_lock_to_container_index.rb
new file mode 100644 (file)
index 0000000..3475904
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddAuthAndLockToContainerIndex < ActiveRecord::Migration
+  Columns_were = ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "state", "log", "cwd", "output_path", "output", "container_image"]
+  Columns = Columns_were + ["auth_uuid", "locked_by_uuid"]
+  def up
+    begin
+      remove_index :containers, :name => 'containers_search_index'
+    rescue
+    end
+    add_index(:containers, Columns, name: "containers_search_index")
+  end
+
+  def down
+    begin
+      remove_index :containers, :name => 'containers_search_index'
+    rescue
+    end
+    add_index(:containers, Columns_were, name: "containers_search_index")
+  end
+end
diff --git a/services/api/db/migrate/20160808151559_create_workflows.rb b/services/api/db/migrate/20160808151559_create_workflows.rb
new file mode 100644 (file)
index 0000000..b15a8e4
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CreateWorkflows < ActiveRecord::Migration
+  def up
+    create_table :workflows do |t|
+      t.string :uuid
+      t.string :owner_uuid
+      t.datetime :created_at
+      t.datetime :modified_at
+      t.string :modified_by_client_uuid
+      t.string :modified_by_user_uuid
+      t.string :name
+      t.text :description
+      t.text :workflow
+
+      t.timestamps
+    end
+
+    add_index :workflows, :uuid, :unique => true
+    add_index :workflows, :owner_uuid
+    add_index :workflows, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name"], name: 'workflows_search_idx'
+    execute "CREATE INDEX workflows_full_text_search_idx ON workflows USING gin(#{Workflow.full_text_tsvector});"
+  end
+
+  def down
+    remove_index :workflows, :name => 'workflows_full_text_search_idx'
+    remove_index :workflows, :name => 'workflows_search_idx'
+    remove_index :workflows, :owner_uuid
+    remove_index :workflows, :uuid
+    drop_table :workflows
+  end
+end
diff --git a/services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb b/services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb
new file mode 100644 (file)
index 0000000..1a48076
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddScriptParametersDigestToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :script_parameters_digest, :string
+    add_index :jobs, :script_parameters_digest
+  end
+end
diff --git a/services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb b/services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb
new file mode 100644 (file)
index 0000000..320e083
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PopulateScriptParametersDigest < ActiveRecord::Migration
+  def up
+    done = false
+    while !done
+      done = true
+      Job.
+        where('script_parameters_digest is null').
+        select([:id, :script_parameters, :script_parameters_digest]).
+        limit(200).
+        each do |j|
+        done = false
+        Job.
+          where('id=? or script_parameters=?', j.id, j.script_parameters.to_yaml).
+          update_all(script_parameters_digest: j.update_script_parameters_digest)
+      end
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb b/services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb
new file mode 100644 (file)
index 0000000..949bccb
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RepairScriptParametersDigest < ActiveRecord::Migration
+  def up
+    Job.find_each do |j|
+      have = j.script_parameters_digest
+      want = j.update_script_parameters_digest
+      if have != want
+        # where().update_all() skips validations, event logging, and
+        # timestamp updates, and just runs SQL. (This change is
+        # invisible to clients.)
+        Job.where('id=?', j.id).update_all(script_parameters_digest: want)
+      end
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb b/services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb
new file mode 100644 (file)
index 0000000..61d327b
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RenameWorkflowToDefinition < ActiveRecord::Migration
+  def up
+    rename_column :workflows, :workflow, :definition
+  end 
+    
+  def down
+    rename_column :workflows, :definition, :workflow
+  end
+end
+
diff --git a/services/api/db/migrate/20160926194129_add_container_count.rb b/services/api/db/migrate/20160926194129_add_container_count.rb
new file mode 100644 (file)
index 0000000..063ce97
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddContainerCount < ActiveRecord::Migration
+  def up
+    add_column :container_requests, :container_count, :int, :default => 0
+  end
+
+  def down
+    remove_column :container_requests, :container_count
+  end
+end
diff --git a/services/api/db/migrate/20161019171346_add_use_existing_to_container_requests.rb b/services/api/db/migrate/20161019171346_add_use_existing_to_container_requests.rb
new file mode 100644 (file)
index 0000000..0ba04f6
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddUseExistingToContainerRequests < ActiveRecord::Migration
+  def up
+    add_column :container_requests, :use_existing, :boolean, :default => true
+  end
+
+  def down
+    remove_column :container_requests, :use_existing
+  end
+end
diff --git a/services/api/db/migrate/20161111143147_add_scheduling_parameters_to_container.rb b/services/api/db/migrate/20161111143147_add_scheduling_parameters_to_container.rb
new file mode 100644 (file)
index 0000000..6820dfb
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddSchedulingParametersToContainer < ActiveRecord::Migration
+  def change
+    add_column :containers, :scheduling_parameters, :text
+    add_column :container_requests, :scheduling_parameters, :text
+  end
+end
diff --git a/services/api/db/migrate/20161115171221_add_output_and_log_uuid_to_container_request.rb b/services/api/db/migrate/20161115171221_add_output_and_log_uuid_to_container_request.rb
new file mode 100644 (file)
index 0000000..168bd4f
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'has_uuid'
+
+class AddOutputAndLogUuidToContainerRequest < ActiveRecord::Migration
+  extend HasUuid::ClassMethods
+
+  def up
+    add_column :container_requests, :output_uuid, :string
+    add_column :container_requests, :log_uuid, :string
+
+    no_such_out_coll = Server::Application.config.uuid_prefix + '-' + '4zz18' + '-xxxxxxxxxxxxxxx'
+    no_such_log_coll = Server::Application.config.uuid_prefix + '-' + '4zz18' + '-yyyyyyyyyyyyyyy'
+
+    update_sql <<-EOS
+update container_requests set output_uuid = ('#{no_such_out_coll}'), log_uuid = ('#{no_such_log_coll}');
+EOS
+  end
+
+  def down
+    remove_column :container_requests, :log_uuid
+    remove_column :container_requests, :output_uuid
+  end
+end
diff --git a/services/api/db/migrate/20161115174218_add_output_and_log_uuids_to_container_request_search_index.rb b/services/api/db/migrate/20161115174218_add_output_and_log_uuids_to_container_request_search_index.rb
new file mode 100644 (file)
index 0000000..9d1e752
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputAndLogUuidsToContainerRequestSearchIndex < ActiveRecord::Migration
+  def up
+    begin
+      remove_index :container_requests, :name => 'container_requests_search_index'
+    rescue
+    end
+    add_index :container_requests,
+              ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "state", "requesting_container_uuid", "container_uuid", "container_image", "cwd", "output_path", "output_uuid", "log_uuid"],
+              name: "container_requests_search_index"
+  end
+
+  def down
+    begin
+      remove_index :container_requests, :name => 'container_requests_search_index'
+    rescue
+    end
+         add_index :container_requests,
+              ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "state", "requesting_container_uuid", "container_uuid", "container_image", "cwd", "output_path"],
+              name: "container_requests_search_index"
+  end
+end
diff --git a/services/api/db/migrate/20161213172944_full_text_search_indexes.rb b/services/api/db/migrate/20161213172944_full_text_search_indexes.rb
new file mode 100644 (file)
index 0000000..0ec5212
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FullTextSearchIndexes < ActiveRecord::Migration
+  def fts_indexes
+    {
+      "collections" => "collections_full_text_search_idx",
+      "container_requests" => "container_requests_full_text_search_idx",
+      "groups" => "groups_full_text_search_idx",
+      "jobs" => "jobs_full_text_search_idx",
+      "pipeline_instances" => "pipeline_instances_full_text_search_idx",
+      "pipeline_templates" => "pipeline_templates_full_text_search_idx",
+      "workflows" => "workflows_full_text_search_idx",
+    }
+  end
+
+  def replace_index(t)
+    i = fts_indexes[t]
+    t.classify.constantize.reset_column_information
+    execute "DROP INDEX IF EXISTS #{i}"
+    execute "CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector})"
+  end
+
+  def up
+    # remove existing fts indexes and create up to date ones with no
+    # leading space
+    fts_indexes.keys.each do |t|
+      replace_index(t)
+    end
+  end
+
+  def down
+    fts_indexes.each do |t, i|
+      remove_index t.to_sym, :name => i
+    end
+  end
+end
diff --git a/services/api/db/migrate/20161222153434_split_expiry_to_trash_and_delete.rb b/services/api/db/migrate/20161222153434_split_expiry_to_trash_and_delete.rb
new file mode 100644 (file)
index 0000000..c3bea69
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SplitExpiryToTrashAndDelete < ActiveRecord::Migration
+  def up
+    Collection.transaction do
+      add_column(:collections, :trash_at, :datetime)
+      add_index(:collections, :trash_at)
+      add_column(:collections, :is_trashed, :boolean, null: false, default: false)
+      add_index(:collections, :is_trashed)
+      rename_column(:collections, :expires_at, :delete_at)
+      add_index(:collections, :delete_at)
+
+      Collection.reset_column_information
+      Collection.
+        where('delete_at is not null and delete_at <= statement_timestamp()').
+        delete_all
+      Collection.
+        where('delete_at is not null').
+        update_all('is_trashed = true, trash_at = statement_timestamp()')
+      add_index(:collections, [:owner_uuid, :name],
+                unique: true,
+                where: 'is_trashed = false',
+                name: 'index_collections_on_owner_uuid_and_name')
+      remove_index(:collections,
+                   name: 'collection_owner_uuid_name_unique')
+    end
+  end
+
+  def down
+    Collection.transaction do
+      remove_index(:collections, :delete_at)
+      rename_column(:collections, :delete_at, :expires_at)
+      add_index(:collections, [:owner_uuid, :name],
+                unique: true,
+                where: 'expires_at is null',
+                name: 'collection_owner_uuid_name_unique')
+      remove_index(:collections,
+                   name: 'index_collections_on_owner_uuid_and_name')
+      remove_column(:collections, :is_trashed)
+      remove_index(:collections, :trash_at)
+      remove_column(:collections, :trash_at)
+    end
+  end
+end
diff --git a/services/api/db/migrate/20161223090712_add_output_name_to_container_requests.rb b/services/api/db/migrate/20161223090712_add_output_name_to_container_requests.rb
new file mode 100644 (file)
index 0000000..aac8ade
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputNameToContainerRequests < ActiveRecord::Migration
+  def up
+    add_column :container_requests, :output_name, :string, :default => nil
+  end
+
+  def down
+    remove_column :container_requests, :output_name
+  end
+end
diff --git a/services/api/db/migrate/20170102153111_add_output_name_to_container_request_search_index.rb b/services/api/db/migrate/20170102153111_add_output_name_to_container_request_search_index.rb
new file mode 100644 (file)
index 0000000..1b846fd
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputNameToContainerRequestSearchIndex < ActiveRecord::Migration
+  def up
+    begin
+      remove_index :container_requests, :name => 'container_requests_search_index'
+    rescue
+    end
+    add_index :container_requests,
+              ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "state", "requesting_container_uuid", "container_uuid", "container_image", "cwd", "output_path", "output_uuid", "log_uuid", "output_name"],
+              name: "container_requests_search_index"
+  end
+
+  def down
+    begin
+      remove_index :container_requests, :name => 'container_requests_search_index'
+    rescue
+    end
+         add_index :container_requests,
+              ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name", "state", "requesting_container_uuid", "container_uuid", "container_image", "cwd", "output_path", "output_uuid", "log_uuid"],
+              name: "container_requests_search_index"
+  end
+end
diff --git a/services/api/db/migrate/20170105160301_add_output_name_to_cr_fts_index.rb b/services/api/db/migrate/20170105160301_add_output_name_to_cr_fts_index.rb
new file mode 100644 (file)
index 0000000..220323d
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputNameToCrFtsIndex < ActiveRecord::Migration
+  def up
+    t = "container_requests"
+    i = "container_requests_full_text_search_idx"
+    t.classify.constantize.reset_column_information
+    ActiveRecord::Base.connection.indexes(t).each do |idx|
+      if idx.name == i
+        remove_index t.to_sym, :name => i
+        break
+      end
+    end
+    # By now, container_request should have the new column "output_name" so full_text_tsvector
+    # would include it on its results
+    execute "CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector});"
+  end
+
+  def down
+    t = "container_requests"
+    i = "container_requests_full_text_search_idx"
+    remove_index t.to_sym, :name => i
+  end
+end
diff --git a/services/api/db/migrate/20170105160302_set_finished_at_on_finished_pipeline_instances.rb b/services/api/db/migrate/20170105160302_set_finished_at_on_finished_pipeline_instances.rb
new file mode 100644 (file)
index 0000000..cc56e42
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SetFinishedAtOnFinishedPipelineInstances < ActiveRecord::Migration
+  def change
+    ActiveRecord::Base.connection.execute("update pipeline_instances set finished_at=updated_at where finished_at is null and (state='Failed' or state='Complete')")
+  end
+end
diff --git a/services/api/db/migrate/20170216170823_no_cr_mounts_and_workflow_def_in_full_text_search_index.rb b/services/api/db/migrate/20170216170823_no_cr_mounts_and_workflow_def_in_full_text_search_index.rb
new file mode 100644 (file)
index 0000000..d4e13c9
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class NoCrMountsAndWorkflowDefInFullTextSearchIndex < ActiveRecord::Migration
+  def fts_indexes
+    {
+      "container_requests" => "container_requests_full_text_search_idx",
+      "workflows" => "workflows_full_text_search_idx",
+    }
+  end
+
+  def up
+    # remove existing fts index and recreate for container_requests and workflows
+    fts_indexes.each do |t, i|
+      t.classify.constantize.reset_column_information
+      ActiveRecord::Base.connection.indexes(t).each do |idx|
+        if idx.name == i
+          remove_index t.to_sym, :name => i
+          break
+        end
+      end
+      execute "CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector});"
+    end
+  end
+
+  def down
+    fts_indexes.each do |t, i|
+      t.classify.constantize.reset_column_information
+      ActiveRecord::Base.connection.indexes(t).each do |idx|
+        if idx.name == i
+          remove_index t.to_sym, :name => i
+          break
+        end
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20170301225558_no_downgrade_after_json.rb b/services/api/db/migrate/20170301225558_no_downgrade_after_json.rb
new file mode 100644 (file)
index 0000000..5b8c6a7
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class NoDowngradeAfterJson < ActiveRecord::Migration
+  def up
+  end
+
+  def down
+    raise ActiveRecord::IrreversibleMigration.
+      new("cannot downgrade: older versions cannot read JSON from DB tables")
+  end
+end
diff --git a/services/api/db/migrate/20170319063406_serialized_columns_accept_null.rb b/services/api/db/migrate/20170319063406_serialized_columns_accept_null.rb
new file mode 100644 (file)
index 0000000..cc87831
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SerializedColumnsAcceptNull < ActiveRecord::Migration
+  def change
+    change_column :api_client_authorizations, :scopes, :text, null: true, default: '["all"]'
+  end
+end
diff --git a/services/api/db/migrate/20170328215436_add_portable_data_hash_index_to_collections.rb b/services/api/db/migrate/20170328215436_add_portable_data_hash_index_to_collections.rb
new file mode 100644 (file)
index 0000000..c9b0a8e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPortableDataHashIndexToCollections < ActiveRecord::Migration
+  def change
+    add_index :collections, :portable_data_hash
+  end
+end
diff --git a/services/api/db/migrate/20170330012505_add_output_ttl_to_container_requests.rb b/services/api/db/migrate/20170330012505_add_output_ttl_to_container_requests.rb
new file mode 100644 (file)
index 0000000..99b3629
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddOutputTtlToContainerRequests < ActiveRecord::Migration
+  def change
+    add_column :container_requests, :output_ttl, :integer, default: 0, null: false
+  end
+end
diff --git a/services/api/db/migrate/20170419173031_add_created_by_job_task_index_to_job_tasks.rb b/services/api/db/migrate/20170419173031_add_created_by_job_task_index_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..e11c86f
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddCreatedByJobTaskIndexToJobTasks < ActiveRecord::Migration
+  def change
+    add_index :job_tasks, :created_by_job_task_uuid
+  end
+end
diff --git a/services/api/db/migrate/20170419173712_add_object_owner_index_to_logs.rb b/services/api/db/migrate/20170419173712_add_object_owner_index_to_logs.rb
new file mode 100644 (file)
index 0000000..44ae7e5
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddObjectOwnerIndexToLogs < ActiveRecord::Migration
+  def change
+    add_index :logs, :object_owner_uuid
+  end
+end
diff --git a/services/api/db/migrate/20170419175801_add_requesting_container_index_to_container_requests.rb b/services/api/db/migrate/20170419175801_add_requesting_container_index_to_container_requests.rb
new file mode 100644 (file)
index 0000000..2dd1a0c
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddRequestingContainerIndexToContainerRequests < ActiveRecord::Migration
+  def change
+    add_index :container_requests, :requesting_container_uuid
+  end
+end
diff --git a/services/api/db/migrate/20170628185847_jobs_yaml_to_json.rb b/services/api/db/migrate/20170628185847_jobs_yaml_to_json.rb
new file mode 100644 (file)
index 0000000..2c90c9a
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'migrate_yaml_to_json'
+
+class JobsYamlToJson < ActiveRecord::Migration
+  def up
+    [
+      'components',
+      'script_parameters',
+      'runtime_constraints',
+      'tasks_summary',
+    ].each do |column|
+      MigrateYAMLToJSON.migrate("jobs", column)
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20170704160233_yaml_to_json.rb b/services/api/db/migrate/20170704160233_yaml_to_json.rb
new file mode 100644 (file)
index 0000000..707c3dd
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'migrate_yaml_to_json'
+
+class YamlToJson < ActiveRecord::Migration
+  def up
+    [
+      ['collections', 'properties'],
+      ['containers', 'environment'],
+      ['containers', 'mounts'],
+      ['containers', 'runtime_constraints'],
+      ['containers', 'command'],
+      ['containers', 'scheduling_parameters'],
+      ['container_requests', 'properties'],
+      ['container_requests', 'environment'],
+      ['container_requests', 'mounts'],
+      ['container_requests', 'runtime_constraints'],
+      ['container_requests', 'command'],
+      ['container_requests', 'scheduling_parameters'],
+      ['humans', 'properties'],
+      ['job_tasks', 'parameters'],
+      ['links', 'properties'],
+      ['nodes', 'info'],
+      ['nodes', 'properties'],
+      ['pipeline_instances', 'components'],
+      ['pipeline_instances', 'properties'],
+      ['pipeline_instances', 'components_summary'],
+      ['pipeline_templates', 'components'],
+      ['specimens', 'properties'],
+      ['traits', 'properties'],
+      ['users', 'prefs'],
+    ].each do |table, column|
+      MigrateYAMLToJSON.migrate(table, column)
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20170706141334_json_collection_properties.rb b/services/api/db/migrate/20170706141334_json_collection_properties.rb
new file mode 100644 (file)
index 0000000..921803a
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require './db/migrate/20161213172944_full_text_search_indexes'
+
+class JsonCollectionProperties < ActiveRecord::Migration
+  def up
+    # Drop the FT index before changing column type to avoid
+    # "PG::DatatypeMismatch: ERROR: COALESCE types jsonb and text
+    # cannot be matched".
+    ActiveRecord::Base.connection.execute 'DROP INDEX IF EXISTS collections_full_text_search_idx'
+    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN properties TYPE jsonb USING properties::jsonb'
+    FullTextSearchIndexes.new.replace_index('collections')
+  end
+
+  def down
+    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN properties TYPE text'
+  end
+end
diff --git a/services/api/db/migrate/20170824202826_trashable_groups.rb b/services/api/db/migrate/20170824202826_trashable_groups.rb
new file mode 100644 (file)
index 0000000..17fc31f
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class TrashableGroups < ActiveRecord::Migration
+  def up
+    add_column :groups, :trash_at, :datetime
+    add_index(:groups, :trash_at)
+
+    add_column :groups, :is_trashed, :boolean, null: false, default: false
+    add_index(:groups, :is_trashed)
+
+    add_column :groups, :delete_at, :datetime
+    add_index(:groups, :delete_at)
+
+    Group.reset_column_information
+    add_index(:groups, [:owner_uuid, :name],
+              unique: true,
+              where: 'is_trashed = false',
+              name: 'index_groups_on_owner_uuid_and_name')
+    remove_index(:groups,
+                 name: 'groups_owner_uuid_name_unique')
+  end
+
+  def down
+    Group.transaction do
+      add_index(:groups, [:owner_uuid, :name], unique: true,
+                name: 'groups_owner_uuid_name_unique')
+      remove_index(:groups,
+                   name: 'index_groups_on_owner_uuid_and_name')
+
+      remove_index(:groups, :delete_at)
+      remove_column(:groups, :delete_at)
+
+      remove_index(:groups, :is_trashed)
+      remove_column(:groups, :is_trashed)
+
+      remove_index(:groups, :trash_at)
+      remove_column(:groups, :trash_at)
+    end
+  end
+end
diff --git a/services/api/db/migrate/20170906224040_materialized_permission_view.rb b/services/api/db/migrate/20170906224040_materialized_permission_view.rb
new file mode 100644 (file)
index 0000000..b864f75
--- /dev/null
@@ -0,0 +1,117 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class MaterializedPermissionView < ActiveRecord::Migration
+
+  @@idxtables = [:collections, :container_requests, :groups, :jobs, :links, :pipeline_instances, :pipeline_templates, :repositories, :users, :virtual_machines, :workflows, :logs]
+
+  def up
+
+    #
+    # Construct a materialized view for permissions.  This is a view which is
+    # derived from querying other tables, but is saved to a static table itself
+    # so that it can be indexed and queried efficiently without rerunning the
+    # query.  The view is updated using "REFRESH MATERIALIZED VIEW" which is
+    # executed after an operation invalidates the permission graph.
+    #
+
+    ActiveRecord::Base.connection.execute(
+"-- constructing perm_edges
+--   1. get the list of all permission links,
+--   2. any can_manage link or permission link to a group means permission should 'follow through'
+--      (as a special case, can_manage links to a user grant access to everything owned by the user,
+--       unlike can_read or can_write which only grant access to the user record)
+--   3. add all owner->owned relationships between groups as can_manage edges
+--
+-- constructing permissions
+--   1. base case: start with set of all users as the working set
+--   2. recursive case:
+--      join with edges where the tail is in the working set and 'follow' is true
+--      produce a new working set with the head (target) of each edge
+--      set permission to the least permission encountered on the path
+--      propagate trashed flag down
+
+CREATE MATERIALIZED VIEW materialized_permission_view AS
+WITH RECURSIVE
+perm_value (name, val) AS (
+     VALUES
+     ('can_read',   1::smallint),
+     ('can_login',  1),
+     ('can_write',  2),
+     ('can_manage', 3)
+     ),
+perm_edges (tail_uuid, head_uuid, val, follow, trashed) AS (
+       SELECT links.tail_uuid,
+              links.head_uuid,
+              pv.val,
+              (pv.val = 3 OR groups.uuid IS NOT NULL) AS follow,
+              0::smallint AS trashed
+              FROM links
+              LEFT JOIN perm_value pv ON pv.name = links.name
+              LEFT JOIN groups ON pv.val<3 AND groups.uuid = links.head_uuid
+              WHERE links.link_class = 'permission'
+       UNION ALL
+       SELECT owner_uuid, uuid, 3, true,
+              CASE WHEN trash_at IS NOT NULL and trash_at < clock_timestamp() THEN 1 ELSE 0 END
+              FROM groups
+       ),
+perm (val, follow, user_uuid, target_uuid, trashed) AS (
+     SELECT 3::smallint             AS val,
+            true                    AS follow,
+            users.uuid::varchar(32) AS user_uuid,
+            users.uuid::varchar(32) AS target_uuid,
+            0::smallint             AS trashed
+            FROM users
+     UNION
+     SELECT LEAST(perm.val, edges.val)::smallint  AS val,
+            edges.follow                          AS follow,
+            perm.user_uuid::varchar(32)           AS user_uuid,
+            edges.head_uuid::varchar(32)          AS target_uuid,
+            GREATEST(perm.trashed, edges.trashed)::smallint AS trashed
+            FROM perm
+            INNER JOIN perm_edges edges
+            ON perm.follow AND edges.tail_uuid = perm.target_uuid
+)
+SELECT user_uuid,
+       target_uuid,
+       MAX(val) AS perm_level,
+       CASE follow WHEN true THEN target_uuid ELSE NULL END AS target_owner_uuid,
+       MAX(trashed) AS trashed
+       FROM perm
+       GROUP BY user_uuid, target_uuid, target_owner_uuid;
+")
+    add_index :materialized_permission_view, [:trashed, :target_uuid], name: 'permission_target_trashed'
+    add_index :materialized_permission_view, [:user_uuid, :trashed, :perm_level], name: 'permission_target_user_trashed_level'
+
+    # Indexes on the other tables are essential to for the query planner to
+    # construct an efficient join with permission_view.
+    #
+    # Our default query uses "ORDER BY modified_by desc, uuid"
+    #
+    # It turns out the existing simple index on modified_by can't be used
+    # because of the additional ordering on "uuid".
+    #
+    # To be able to utilize the index, the index ordering has to match the
+    # ORDER BY clause.  For more detail see:
+    #
+    # https://www.postgresql.org/docs/9.3/static/indexes-ordering.html
+    #
+    @@idxtables.each do |table|
+      ActiveRecord::Base.connection.execute("CREATE INDEX index_#{table.to_s}_on_modified_at_uuid ON #{table.to_s} USING btree (modified_at desc, uuid asc)")
+    end
+
+    create_table :permission_refresh_lock
+    ActiveRecord::Base.connection.execute("REFRESH MATERIALIZED VIEW materialized_permission_view")
+  end
+
+  def down
+    drop_table :permission_refresh_lock
+    remove_index :materialized_permission_view, name: 'permission_target_trashed'
+    remove_index :materialized_permission_view, name: 'permission_target_user_trashed_level'
+    @@idxtables.each do |table|
+      ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS index_#{table.to_s}_on_modified_at_uuid")
+    end
+    ActiveRecord::Base.connection.execute("DROP MATERIALIZED VIEW IF EXISTS materialized_permission_view")
+  end
+end
diff --git a/services/api/db/migrate/20171027183824_add_index_to_containers.rb b/services/api/db/migrate/20171027183824_add_index_to_containers.rb
new file mode 100644 (file)
index 0000000..aa42423
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddIndexToContainers < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute("CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at desc, uuid asc)")
+    ActiveRecord::Base.connection.execute("CREATE INDEX index_container_requests_on_container_uuid on container_requests (container_uuid)")
+  end
+
+  def down
+    ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS index_containers_on_modified_at_uuid")
+    ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS index_container_requests_on_container_uuid")
+  end
+end
diff --git a/services/api/db/migrate/20171208203841_fix_trash_flag_follow.rb b/services/api/db/migrate/20171208203841_fix_trash_flag_follow.rb
new file mode 100644 (file)
index 0000000..c9e50a6
--- /dev/null
@@ -0,0 +1,79 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FixTrashFlagFollow < ActiveRecord::Migration
+  def change
+    ActiveRecord::Base.connection.execute("DROP MATERIALIZED VIEW materialized_permission_view")
+    ActiveRecord::Base.connection.execute(
+"-- constructing perm_edges
+--   1. get the list of all permission links,
+--   2. any can_manage link or permission link to a group means permission should 'follow through'
+--      (as a special case, can_manage links to a user grant access to everything owned by the user,
+--       unlike can_read or can_write which only grant access to the user record)
+--   3. add all owner->owned relationships between groups as can_manage edges
+--
+-- constructing permissions
+--   1. base case: start with set of all users as the working set
+--   2. recursive case:
+--      join with edges where the tail is in the working set and 'follow' is true
+--      produce a new working set with the head (target) of each edge
+--      set permission to the least permission encountered on the path
+--      propagate trashed flag down
+
+CREATE MATERIALIZED VIEW materialized_permission_view AS
+WITH RECURSIVE
+perm_value (name, val) AS (
+     VALUES
+     ('can_read',   1::smallint),
+     ('can_login',  1),
+     ('can_write',  2),
+     ('can_manage', 3)
+     ),
+perm_edges (tail_uuid, head_uuid, val, follow, trashed) AS (
+       SELECT links.tail_uuid,
+              links.head_uuid,
+              pv.val,
+              (pv.val = 3 OR groups.uuid IS NOT NULL) AS follow,
+              0::smallint AS trashed,
+              0::smallint AS followtrash
+              FROM links
+              LEFT JOIN perm_value pv ON pv.name = links.name
+              LEFT JOIN groups ON pv.val<3 AND groups.uuid = links.head_uuid
+              WHERE links.link_class = 'permission'
+       UNION ALL
+       SELECT owner_uuid, uuid, 3, true,
+              CASE WHEN trash_at IS NOT NULL and trash_at < clock_timestamp() THEN 1 ELSE 0 END,
+              1
+              FROM groups
+       ),
+perm (val, follow, user_uuid, target_uuid, trashed) AS (
+     SELECT 3::smallint             AS val,
+            true                    AS follow,
+            users.uuid::varchar(32) AS user_uuid,
+            users.uuid::varchar(32) AS target_uuid,
+            0::smallint             AS trashed
+            FROM users
+     UNION
+     SELECT LEAST(perm.val, edges.val)::smallint  AS val,
+            edges.follow                          AS follow,
+            perm.user_uuid::varchar(32)           AS user_uuid,
+            edges.head_uuid::varchar(32)          AS target_uuid,
+            (GREATEST(perm.trashed, edges.trashed) * edges.followtrash)::smallint AS trashed
+            FROM perm
+            INNER JOIN perm_edges edges
+            ON perm.follow AND edges.tail_uuid = perm.target_uuid
+)
+SELECT user_uuid,
+       target_uuid,
+       MAX(val) AS perm_level,
+       CASE follow WHEN true THEN target_uuid ELSE NULL END AS target_owner_uuid,
+       MAX(trashed) AS trashed
+       FROM perm
+       GROUP BY user_uuid, target_uuid, target_owner_uuid;
+")
+    add_index :materialized_permission_view, [:trashed, :target_uuid], name: 'permission_target_trashed'
+    add_index :materialized_permission_view, [:user_uuid, :trashed, :perm_level], name: 'permission_target_user_trashed_level'
+    ActiveRecord::Base.connection.execute("REFRESH MATERIALIZED VIEW materialized_permission_view")
+  end
+end
diff --git a/services/api/db/migrate/20171212153352_add_gin_index_to_collection_properties.rb b/services/api/db/migrate/20171212153352_add_gin_index_to_collection_properties.rb
new file mode 100644 (file)
index 0000000..0183ef6
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddGinIndexToCollectionProperties < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute("CREATE INDEX collection_index_on_properties ON collections USING gin (properties);")
+  end
+  def down
+    ActiveRecord::Base.connection.execute("DROP INDEX collection_index_on_properties")
+  end
+end
diff --git a/services/api/db/migrate/20180216203422_add_storage_classes_to_collections.rb b/services/api/db/migrate/20180216203422_add_storage_classes_to_collections.rb
new file mode 100644 (file)
index 0000000..112c2ba
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddStorageClassesToCollections < ActiveRecord::Migration
+  def up
+    add_column :collections, :storage_classes_desired, :jsonb, :default => ["default"]
+    add_column :collections, :storage_classes_confirmed, :jsonb, :default => []
+    add_column :collections, :storage_classes_confirmed_at, :datetime, :default => nil, :null => true
+  end
+
+  def down
+    remove_column :collections, :storage_classes_desired
+    remove_column :collections, :storage_classes_confirmed
+    remove_column :collections, :storage_classes_confirmed_at
+  end
+end
diff --git a/services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb b/services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb
new file mode 100644 (file)
index 0000000..a161f63
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddSecretMountsToContainers < ActiveRecord::Migration
+  def change
+    add_column :container_requests, :secret_mounts, :jsonb, default: {}
+    add_column :containers, :secret_mounts, :jsonb, default: {}
+    add_column :containers, :secret_mounts_md5, :string, default: "99914b932bd37a50b983c5e7c90ae93b"
+    add_index :containers, :secret_mounts_md5
+  end
+end
diff --git a/services/api/db/migrate/20180313180114_change_container_priority_bigint.rb b/services/api/db/migrate/20180313180114_change_container_priority_bigint.rb
new file mode 100644 (file)
index 0000000..529126b
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ChangeContainerPriorityBigint < ActiveRecord::Migration
+  def change
+    change_column :containers, :priority, :integer, limit: 8
+  end
+end
diff --git a/services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb b/services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb
new file mode 100644 (file)
index 0000000..10b35a7
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddRedirectToUserUuidToUsers < ActiveRecord::Migration
+  def up
+    add_column :users, :redirect_to_user_uuid, :string
+    User.reset_column_information
+    remove_index :users, name: 'users_search_index'
+    add_index :users, User.searchable_columns('ilike') - ['prefs'], name: 'users_search_index'
+  end
+
+  def down
+    remove_index :users, name: 'users_search_index'
+    remove_column :users, :redirect_to_user_uuid
+    User.reset_column_information
+    add_index :users, User.searchable_columns('ilike') - ['prefs'], name: 'users_search_index'
+  end
+end
diff --git a/services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb b/services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb
new file mode 100644 (file)
index 0000000..79e777e
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddContainerAuthUuidIndex < ActiveRecord::Migration
+  def change
+    add_index :containers, :auth_uuid
+  end
+end
diff --git a/services/api/db/migrate/20180607175050_properties_to_jsonb.rb b/services/api/db/migrate/20180607175050_properties_to_jsonb.rb
new file mode 100644 (file)
index 0000000..988227a
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require './db/migrate/20161213172944_full_text_search_indexes'
+
+class PropertiesToJsonb < ActiveRecord::Migration
+
+  @@tables_columns = [["nodes", "properties"],
+                      ["nodes", "info"],
+                      ["container_requests", "properties"],
+                      ["links", "properties"]]
+
+  def up
+    @@tables_columns.each do |table, column|
+      # Drop the FT index before changing column type to avoid
+      # "PG::DatatypeMismatch: ERROR: COALESCE types jsonb and text
+      # cannot be matched".
+      ActiveRecord::Base.connection.execute "DROP INDEX IF EXISTS #{table}_full_text_search_idx"
+      ActiveRecord::Base.connection.execute "ALTER TABLE #{table} ALTER COLUMN #{column} TYPE jsonb USING #{column}::jsonb"
+      ActiveRecord::Base.connection.execute "CREATE INDEX #{table}_index_on_#{column} ON #{table} USING gin (#{column})"
+    end
+    FullTextSearchIndexes.new.replace_index("container_requests")
+  end
+
+  def down
+    @@tables_columns.each do |table, column|
+      ActiveRecord::Base.connection.execute "DROP INDEX IF EXISTS #{table}_index_on_#{column}"
+      ActiveRecord::Base.connection.execute "ALTER TABLE #{table} ALTER COLUMN #{column} TYPE text"
+    end
+  end
+end
diff --git a/services/api/db/migrate/20180608123145_add_properties_to_groups.rb b/services/api/db/migrate/20180608123145_add_properties_to_groups.rb
new file mode 100644 (file)
index 0000000..12c6696
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require './db/migrate/20161213172944_full_text_search_indexes'
+
+class AddPropertiesToGroups < ActiveRecord::Migration
+  def up
+    add_column :groups, :properties, :jsonb, default: {}
+    ActiveRecord::Base.connection.execute("CREATE INDEX group_index_on_properties ON groups USING gin (properties);")
+    FullTextSearchIndexes.new.replace_index('groups')
+  end
+
+  def down
+    ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS group_index_on_properties")
+    remove_column :groups, :properties
+  end
+end
diff --git a/services/api/db/migrate/20180806133039_index_all_filenames.rb b/services/api/db/migrate/20180806133039_index_all_filenames.rb
new file mode 100644 (file)
index 0000000..36b155c
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class IndexAllFilenames < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE text'
+  end
+  def down
+    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE varchar(8192)'
+  end
+end
diff --git a/services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb b/services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb
new file mode 100644 (file)
index 0000000..8d1cdf3
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPdhAndTrashIndexToCollections < ActiveRecord::Migration
+  def change
+    add_index :collections, [:portable_data_hash, :trash_at]
+  end
+end
diff --git a/services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb b/services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb
new file mode 100644 (file)
index 0000000..94ca100
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddLockIndexToContainers < ActiveRecord::Migration
+  def change
+    # For the current code in sdk/go/dispatch:
+    add_index :containers, [:locked_by_uuid, :priority]
+    # For future dispatchers that use filters instead of offset for
+    # more predictable paging:
+    add_index :containers, [:locked_by_uuid, :uuid]
+  end
+end
diff --git a/services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb b/services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb
new file mode 100644 (file)
index 0000000..3d757e4
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DropPdhIndexFromCollections < ActiveRecord::Migration
+  def change
+    remove_index :collections, column: :portable_data_hash
+  end
+end
diff --git a/services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb b/services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb
new file mode 100644 (file)
index 0000000..82b2163
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddMd5IndexToContainers < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'
+  end
+  def down
+    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'
+  end
+end
diff --git a/services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb b/services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb
new file mode 100644 (file)
index 0000000..7245108
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddQueueIndexToContainers < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_queued_state on containers (state, (priority > 0))'
+  end
+  def down
+    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_queued_state'
+  end
+end
diff --git a/services/api/db/migrate/20180904110712_add_runtime_status_to_containers.rb b/services/api/db/migrate/20180904110712_add_runtime_status_to_containers.rb
new file mode 100644 (file)
index 0000000..4c963e6
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddRuntimeStatusToContainers < ActiveRecord::Migration
+  def change
+    add_column :containers, :runtime_status, :jsonb, default: {}
+    add_index :containers, :runtime_status, using: :gin
+  end
+end
diff --git a/services/api/db/migrate/20180913175443_add_version_info_to_collections.rb b/services/api/db/migrate/20180913175443_add_version_info_to_collections.rb
new file mode 100644 (file)
index 0000000..a624dd9
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddVersionInfoToCollections < ActiveRecord::Migration
+  def change
+    # Do changes in bulk to save time on huge tables
+    change_table :collections, :bulk => true do |t|
+      t.string :current_version_uuid
+      t.integer :version, null: false, default: 1
+      t.index [:current_version_uuid, :version], unique: true
+    end
+  end
+end
diff --git a/services/api/db/migrate/20180915155335_set_current_version_uuid_on_collections.rb b/services/api/db/migrate/20180915155335_set_current_version_uuid_on_collections.rb
new file mode 100644 (file)
index 0000000..12a08e0
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SetCurrentVersionUuidOnCollections < ActiveRecord::Migration
+  def up
+    # Set the current version uuid as itself
+    Collection.where(current_version_uuid: nil).update_all("current_version_uuid=uuid")
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20180917200000_replace_full_text_indexes.rb b/services/api/db/migrate/20180917200000_replace_full_text_indexes.rb
new file mode 100644 (file)
index 0000000..b0eea9e
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require './db/migrate/20161213172944_full_text_search_indexes'
+
+class ReplaceFullTextIndexes < ActiveRecord::Migration
+  def up
+    FullTextSearchIndexes.new.up
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20180917205609_recompute_file_names_index.rb b/services/api/db/migrate/20180917205609_recompute_file_names_index.rb
new file mode 100644 (file)
index 0000000..e686096
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RecomputeFileNamesIndex < ActiveRecord::Migration
+  def do_batch(pdhs:)
+    ActiveRecord::Base.connection.exec_query('BEGIN')
+    Collection.select(:portable_data_hash, :manifest_text).where(portable_data_hash: pdhs).distinct(:portable_data_hash).each do |c|
+      ActiveRecord::Base.connection.exec_query("update collections set file_names=$1 where portable_data_hash=$2",
+                                               "update file_names index",
+                                               [[nil, c.manifest_files], [nil, c.portable_data_hash]])
+    end
+    ActiveRecord::Base.connection.exec_query('COMMIT')
+  end
+  def up
+    # Process collections in multiple transactions, where the total
+    # size of all manifest_texts processed in a transaction is no more
+    # than batch_size_max.  Collections whose manifest_text is bigger
+    # than batch_size_max are updated in their own individual
+    # transactions.
+    batch_size_max = 1 << 28    # 256 MiB
+    batch_size = 0
+    batch_pdhs = {}
+    last_pdh = '0'
+    total = Collection.distinct.count(:portable_data_hash)
+    done = 0
+    any = true
+    while any
+      any = false
+      Collection.
+        unscoped.
+        select(:portable_data_hash).distinct.
+        order(:portable_data_hash).
+        where('portable_data_hash > ?', last_pdh).
+        limit(1000).each do |c|
+        any = true
+        last_pdh = c.portable_data_hash
+        manifest_size = c.portable_data_hash.split('+')[1].to_i
+        if batch_size > 0 && batch_size + manifest_size > batch_size_max
+          do_batch(pdhs: batch_pdhs.keys)
+          done += batch_pdhs.size
+          Rails.logger.info("RecomputeFileNamesIndex: #{done}/#{total}")
+          batch_pdhs = {}
+          batch_size = 0
+        end
+        batch_pdhs[c.portable_data_hash] = true
+        batch_size += manifest_size
+      end
+    end
+    do_batch(pdhs: batch_pdhs.keys)
+    Rails.logger.info("RecomputeFileNamesIndex: finished")
+  end
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20180919001158_recreate_collection_unique_name_index.rb b/services/api/db/migrate/20180919001158_recreate_collection_unique_name_index.rb
new file mode 100644 (file)
index 0000000..6403956
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RecreateCollectionUniqueNameIndex < ActiveRecord::Migration
+  def up
+    Collection.transaction do
+      remove_index(:collections,
+                   name: 'index_collections_on_owner_uuid_and_name')
+      add_index(:collections, [:owner_uuid, :name],
+                unique: true,
+                where: 'is_trashed = false AND current_version_uuid = uuid',
+                name: 'index_collections_on_owner_uuid_and_name')
+    end
+  end
+
+  def down
+    Collection.transaction do
+      remove_index(:collections,
+                   name: 'index_collections_on_owner_uuid_and_name')
+      add_index(:collections, [:owner_uuid, :name],
+                unique: true,
+                where: 'is_trashed = false',
+                name: 'index_collections_on_owner_uuid_and_name')
+    end
+  end
+end
diff --git a/services/api/db/migrate/20181001175023_add_preserve_version_to_collections.rb b/services/api/db/migrate/20181001175023_add_preserve_version_to_collections.rb
new file mode 100644 (file)
index 0000000..fbdc397
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPreserveVersionToCollections < ActiveRecord::Migration
+  def change
+    add_column :collections, :preserve_version, :boolean, default: false
+  end
+end
diff --git a/services/api/db/migrate/20181004131141_add_current_version_uuid_to_collection_search_index.rb b/services/api/db/migrate/20181004131141_add_current_version_uuid_to_collection_search_index.rb
new file mode 100644 (file)
index 0000000..63e9919
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddCurrentVersionUuidToCollectionSearchIndex < ActiveRecord::Migration
+  disable_ddl_transaction!
+
+  def up
+    remove_index :collections, :name => 'collections_search_index'
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name", "current_version_uuid"], name: 'collections_search_index', algorithm: :concurrently
+  end
+
+  def down
+    remove_index :collections, :name => 'collections_search_index'
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name"], name: 'collections_search_index', algorithm: :concurrently
+  end
+end
diff --git a/services/api/db/migrate/20181005192222_add_container_runtime_token.rb b/services/api/db/migrate/20181005192222_add_container_runtime_token.rb
new file mode 100644 (file)
index 0000000..07151cd
--- /dev/null
@@ -0,0 +1,7 @@
+class AddContainerRuntimeToken < ActiveRecord::Migration
+  def change
+    add_column :container_requests, :runtime_token, :text, :null => true
+    add_column :containers, :runtime_user_uuid, :text, :null => true
+    add_column :containers, :runtime_auth_scopes, :jsonb, :null => true
+  end
+end
diff --git a/services/api/db/migrate/20181011184200_add_runtime_token_to_container.rb b/services/api/db/migrate/20181011184200_add_runtime_token_to_container.rb
new file mode 100644 (file)
index 0000000..09201f5
--- /dev/null
@@ -0,0 +1,5 @@
+class AddRuntimeTokenToContainer < ActiveRecord::Migration
+  def change
+    add_column :containers, :runtime_token, :text, :null => true
+  end
+end
diff --git a/services/api/db/migrate/20181213183234_add_expression_index_to_links.rb b/services/api/db/migrate/20181213183234_add_expression_index_to_links.rb
new file mode 100644 (file)
index 0000000..2fdf830
--- /dev/null
@@ -0,0 +1,11 @@
+class AddExpressionIndexToLinks < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.execute 'CREATE INDEX index_links_on_substring_head_uuid on links (substring(head_uuid, 7, 5))'
+    ActiveRecord::Base.connection.execute 'CREATE INDEX index_links_on_substring_tail_uuid on links (substring(tail_uuid, 7, 5))'
+  end
+
+  def down
+    ActiveRecord::Base.connection.execute 'DROP INDEX index_links_on_substring_head_uuid'
+    ActiveRecord::Base.connection.execute 'DROP INDEX index_links_on_substring_tail_uuid'
+  end
+end
diff --git a/services/api/db/migrate/20190214214814_add_container_lock_count.rb b/services/api/db/migrate/20190214214814_add_container_lock_count.rb
new file mode 100644 (file)
index 0000000..a496eb0
--- /dev/null
@@ -0,0 +1,5 @@
+class AddContainerLockCount < ActiveRecord::Migration
+  def change
+    add_column :containers, :lock_count, :int, :null => false, :default => 0
+  end
+end
diff --git a/services/api/db/seeds.rb b/services/api/db/seeds.rb
new file mode 100644 (file)
index 0000000..b40bd4d
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This file seeds the database with initial/default values.
+#
+# It is invoked by `rake db:seed` and `rake db:setup`.
+
+DatabaseSeeds.install
diff --git a/services/api/db/structure.sql b/services/api/db/structure.sql
new file mode 100644 (file)
index 0000000..f766f33
--- /dev/null
@@ -0,0 +1,3222 @@
+-- Copyright (C) The Arvados Authors. All rights reserved.
+--
+-- SPDX-License-Identifier: AGPL-3.0
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SELECT pg_catalog.set_config('search_path', '', false);
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+
+--
+-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
+--
+
+CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
+
+
+--
+-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
+--
+
+-- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
+
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.api_client_authorizations (
+    id integer NOT NULL,
+    api_token character varying(255) NOT NULL,
+    api_client_id integer NOT NULL,
+    user_id integer NOT NULL,
+    created_by_ip_address character varying(255),
+    last_used_by_ip_address character varying(255),
+    last_used_at timestamp without time zone,
+    expires_at timestamp without time zone,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    default_owner_uuid character varying(255),
+    scopes text DEFAULT '["all"]'::text,
+    uuid character varying(255) NOT NULL
+);
+
+
+--
+-- Name: api_client_authorizations_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.api_client_authorizations_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: api_client_authorizations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.api_client_authorizations_id_seq OWNED BY public.api_client_authorizations.id;
+
+
+--
+-- Name: api_clients; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.api_clients (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    url_prefix character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    is_trusted boolean DEFAULT false
+);
+
+
+--
+-- Name: api_clients_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.api_clients_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: api_clients_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.api_clients_id_seq OWNED BY public.api_clients.id;
+
+
+--
+-- Name: authorized_keys; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.authorized_keys (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    key_type character varying(255),
+    authorized_user_uuid character varying(255),
+    public_key text,
+    expires_at timestamp without time zone,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: authorized_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.authorized_keys_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: authorized_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.authorized_keys_id_seq OWNED BY public.authorized_keys.id;
+
+
+--
+-- Name: collections; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.collections (
+    id integer NOT NULL,
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    portable_data_hash character varying(255),
+    replication_desired integer,
+    replication_confirmed_at timestamp without time zone,
+    replication_confirmed integer,
+    updated_at timestamp without time zone NOT NULL,
+    uuid character varying(255),
+    manifest_text text,
+    name character varying(255),
+    description character varying(524288),
+    properties jsonb,
+    delete_at timestamp without time zone,
+    file_names text,
+    trash_at timestamp without time zone,
+    is_trashed boolean DEFAULT false NOT NULL,
+    storage_classes_desired jsonb DEFAULT '["default"]'::jsonb,
+    storage_classes_confirmed jsonb DEFAULT '[]'::jsonb,
+    storage_classes_confirmed_at timestamp without time zone,
+    current_version_uuid character varying,
+    version integer DEFAULT 1 NOT NULL,
+    preserve_version boolean DEFAULT false
+);
+
+
+--
+-- Name: collections_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.collections_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: collections_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.collections_id_seq OWNED BY public.collections.id;
+
+
+--
+-- Name: commit_ancestors; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.commit_ancestors (
+    id integer NOT NULL,
+    repository_name character varying(255),
+    descendant character varying(255) NOT NULL,
+    ancestor character varying(255) NOT NULL,
+    "is" boolean DEFAULT false NOT NULL,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: commit_ancestors_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.commit_ancestors_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: commit_ancestors_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.commit_ancestors_id_seq OWNED BY public.commit_ancestors.id;
+
+
+--
+-- Name: commits; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.commits (
+    id integer NOT NULL,
+    repository_name character varying(255),
+    sha1 character varying(255),
+    message character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.commits_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.commits_id_seq OWNED BY public.commits.id;
+
+
+--
+-- Name: container_requests; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.container_requests (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_at timestamp without time zone,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    name character varying(255),
+    description text,
+    properties jsonb,
+    state character varying(255),
+    requesting_container_uuid character varying(255),
+    container_uuid character varying(255),
+    container_count_max integer,
+    mounts text,
+    runtime_constraints text,
+    container_image character varying(255),
+    environment text,
+    cwd character varying(255),
+    command text,
+    output_path character varying(255),
+    priority integer,
+    expires_at timestamp without time zone,
+    filters text,
+    updated_at timestamp without time zone NOT NULL,
+    container_count integer DEFAULT 0,
+    use_existing boolean DEFAULT true,
+    scheduling_parameters text,
+    output_uuid character varying(255),
+    log_uuid character varying(255),
+    output_name character varying(255) DEFAULT NULL::character varying,
+    output_ttl integer DEFAULT 0 NOT NULL,
+    secret_mounts jsonb DEFAULT '{}'::jsonb,
+    runtime_token text
+);
+
+
+--
+-- Name: container_requests_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.container_requests_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: container_requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.container_requests_id_seq OWNED BY public.container_requests.id;
+
+
+--
+-- Name: containers; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.containers (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_at timestamp without time zone,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    state character varying(255),
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone,
+    log character varying(255),
+    environment text,
+    cwd character varying(255),
+    command text,
+    output_path character varying(255),
+    mounts text,
+    runtime_constraints text,
+    output character varying(255),
+    container_image character varying(255),
+    progress double precision,
+    priority bigint,
+    updated_at timestamp without time zone NOT NULL,
+    exit_code integer,
+    auth_uuid character varying(255),
+    locked_by_uuid character varying(255),
+    scheduling_parameters text,
+    secret_mounts jsonb DEFAULT '{}'::jsonb,
+    secret_mounts_md5 character varying DEFAULT '99914b932bd37a50b983c5e7c90ae93b'::character varying,
+    runtime_status jsonb DEFAULT '{}'::jsonb,
+    runtime_user_uuid text,
+    runtime_auth_scopes jsonb,
+    runtime_token text,
+    lock_count integer DEFAULT 0 NOT NULL
+);
+
+
+--
+-- Name: containers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.containers_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: containers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.containers_id_seq OWNED BY public.containers.id;
+
+
+--
+-- Name: groups; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.groups (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255) NOT NULL,
+    description character varying(524288),
+    updated_at timestamp without time zone NOT NULL,
+    group_class character varying(255),
+    trash_at timestamp without time zone,
+    is_trashed boolean DEFAULT false NOT NULL,
+    delete_at timestamp without time zone,
+    properties jsonb DEFAULT '{}'::jsonb
+);
+
+
+--
+-- Name: groups_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.groups_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: groups_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.groups_id_seq OWNED BY public.groups.id;
+
+
+--
+-- Name: humans; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.humans (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    properties text,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: humans_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.humans_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: humans_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.humans_id_seq OWNED BY public.humans.id;
+
+
+--
+-- Name: job_tasks; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.job_tasks (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    job_uuid character varying(255),
+    sequence integer,
+    parameters text,
+    output text,
+    progress double precision,
+    success boolean,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    created_by_job_task_uuid character varying(255),
+    qsequence bigint,
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone
+);
+
+
+--
+-- Name: job_tasks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.job_tasks_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: job_tasks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.job_tasks_id_seq OWNED BY public.job_tasks.id;
+
+
+--
+-- Name: job_tasks_qsequence_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.job_tasks_qsequence_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: job_tasks_qsequence_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.job_tasks_qsequence_seq OWNED BY public.job_tasks.qsequence;
+
+
+--
+-- Name: jobs; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.jobs (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    submit_id character varying(255),
+    script character varying(255),
+    script_version character varying(255),
+    script_parameters text,
+    cancelled_by_client_uuid character varying(255),
+    cancelled_by_user_uuid character varying(255),
+    cancelled_at timestamp without time zone,
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone,
+    running boolean,
+    success boolean,
+    output character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    is_locked_by_uuid character varying(255),
+    log character varying(255),
+    tasks_summary text,
+    runtime_constraints text,
+    nondeterministic boolean,
+    repository character varying(255),
+    supplied_script_version character varying(255),
+    docker_image_locator character varying(255),
+    priority integer DEFAULT 0 NOT NULL,
+    description character varying(524288),
+    state character varying(255),
+    arvados_sdk_version character varying(255),
+    components text,
+    script_parameters_digest character varying(255)
+);
+
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.jobs_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.jobs_id_seq OWNED BY public.jobs.id;
+
+
+--
+-- Name: keep_disks; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.keep_disks (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    ping_secret character varying(255) NOT NULL,
+    node_uuid character varying(255),
+    filesystem_uuid character varying(255),
+    bytes_total integer,
+    bytes_free integer,
+    is_readable boolean DEFAULT true NOT NULL,
+    is_writable boolean DEFAULT true NOT NULL,
+    last_read_at timestamp without time zone,
+    last_write_at timestamp without time zone,
+    last_ping_at timestamp without time zone,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    keep_service_uuid character varying(255)
+);
+
+
+--
+-- Name: keep_disks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.keep_disks_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: keep_disks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.keep_disks_id_seq OWNED BY public.keep_disks.id;
+
+
+--
+-- Name: keep_services; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.keep_services (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    service_host character varying(255),
+    service_port integer,
+    service_ssl_flag boolean,
+    service_type character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    read_only boolean DEFAULT false NOT NULL
+);
+
+
+--
+-- Name: keep_services_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.keep_services_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: keep_services_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.keep_services_id_seq OWNED BY public.keep_services.id;
+
+
+--
+-- Name: links; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.links (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    tail_uuid character varying(255),
+    link_class character varying(255),
+    name character varying(255),
+    head_uuid character varying(255),
+    properties jsonb,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: links_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.links_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: links_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.links_id_seq OWNED BY public.links.id;
+
+
+--
+-- Name: logs; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.logs (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    object_uuid character varying(255),
+    event_at timestamp without time zone,
+    event_type character varying(255),
+    summary text,
+    properties text,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    modified_at timestamp without time zone,
+    object_owner_uuid character varying(255)
+);
+
+
+--
+-- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.logs_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.logs_id_seq OWNED BY public.logs.id;
+
+
+--
+-- Name: users; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.users (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255) NOT NULL,
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    email character varying(255),
+    first_name character varying(255),
+    last_name character varying(255),
+    identity_url character varying(255),
+    is_admin boolean,
+    prefs text,
+    updated_at timestamp without time zone NOT NULL,
+    default_owner_uuid character varying(255),
+    is_active boolean DEFAULT false,
+    username character varying(255),
+    redirect_to_user_uuid character varying
+);
+
+
+--
+-- Name: materialized_permission_view; Type: MATERIALIZED VIEW; Schema: public; Owner: -
+--
+
+CREATE MATERIALIZED VIEW public.materialized_permission_view AS
+ WITH RECURSIVE perm_value(name, val) AS (
+         VALUES ('can_read'::text,(1)::smallint), ('can_login'::text,1), ('can_write'::text,2), ('can_manage'::text,3)
+        ), perm_edges(tail_uuid, head_uuid, val, follow, trashed) AS (
+         SELECT links.tail_uuid,
+            links.head_uuid,
+            pv.val,
+            ((pv.val = 3) OR (groups.uuid IS NOT NULL)) AS follow,
+            (0)::smallint AS trashed,
+            (0)::smallint AS followtrash
+           FROM ((public.links
+             LEFT JOIN perm_value pv ON ((pv.name = (links.name)::text)))
+             LEFT JOIN public.groups ON (((pv.val < 3) AND ((groups.uuid)::text = (links.head_uuid)::text))))
+          WHERE ((links.link_class)::text = 'permission'::text)
+        UNION ALL
+         SELECT groups.owner_uuid,
+            groups.uuid,
+            3,
+            true AS bool,
+                CASE
+                    WHEN ((groups.trash_at IS NOT NULL) AND (groups.trash_at < clock_timestamp())) THEN 1
+                    ELSE 0
+                END AS "case",
+            1
+           FROM public.groups
+        ), perm(val, follow, user_uuid, target_uuid, trashed) AS (
+         SELECT (3)::smallint AS val,
+            true AS follow,
+            (users.uuid)::character varying(32) AS user_uuid,
+            (users.uuid)::character varying(32) AS target_uuid,
+            (0)::smallint AS trashed
+           FROM public.users
+        UNION
+         SELECT (LEAST((perm_1.val)::integer, edges.val))::smallint AS val,
+            edges.follow,
+            perm_1.user_uuid,
+            (edges.head_uuid)::character varying(32) AS target_uuid,
+            ((GREATEST((perm_1.trashed)::integer, edges.trashed) * edges.followtrash))::smallint AS trashed
+           FROM (perm perm_1
+             JOIN perm_edges edges ON ((perm_1.follow AND ((edges.tail_uuid)::text = (perm_1.target_uuid)::text))))
+        )
+ SELECT perm.user_uuid,
+    perm.target_uuid,
+    max(perm.val) AS perm_level,
+        CASE perm.follow
+            WHEN true THEN perm.target_uuid
+            ELSE NULL::character varying
+        END AS target_owner_uuid,
+    max(perm.trashed) AS trashed
+   FROM perm
+  GROUP BY perm.user_uuid, perm.target_uuid,
+        CASE perm.follow
+            WHEN true THEN perm.target_uuid
+            ELSE NULL::character varying
+        END
+  WITH NO DATA;
+
+
+--
+-- Name: nodes; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.nodes (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    slot_number integer,
+    hostname character varying(255),
+    domain character varying(255),
+    ip_address character varying(255),
+    first_ping_at timestamp without time zone,
+    last_ping_at timestamp without time zone,
+    info jsonb,
+    updated_at timestamp without time zone NOT NULL,
+    properties jsonb,
+    job_uuid character varying(255)
+);
+
+
+--
+-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.nodes_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: nodes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
+
+
+--
+-- Name: permission_refresh_lock; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.permission_refresh_lock (
+    id integer NOT NULL
+);
+
+
+--
+-- Name: permission_refresh_lock_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.permission_refresh_lock_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: permission_refresh_lock_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.permission_refresh_lock_id_seq OWNED BY public.permission_refresh_lock.id;
+
+
+--
+-- Name: pipeline_instances; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.pipeline_instances (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    pipeline_template_uuid character varying(255),
+    name character varying(255),
+    components text,
+    updated_at timestamp without time zone NOT NULL,
+    properties text,
+    state character varying(255),
+    components_summary text,
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone,
+    description character varying(524288)
+);
+
+
+--
+-- Name: pipeline_instances_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.pipeline_instances_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: pipeline_instances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.pipeline_instances_id_seq OWNED BY public.pipeline_instances.id;
+
+
+--
+-- Name: pipeline_templates; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.pipeline_templates (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    components text,
+    updated_at timestamp without time zone NOT NULL,
+    description character varying(524288)
+);
+
+
+--
+-- Name: pipeline_templates_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.pipeline_templates_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: pipeline_templates_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.pipeline_templates_id_seq OWNED BY public.pipeline_templates.id;
+
+
+--
+-- Name: repositories; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.repositories (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: repositories_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.repositories_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: repositories_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.repositories_id_seq OWNED BY public.repositories.id;
+
+
+--
+-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.schema_migrations (
+    version character varying(255) NOT NULL
+);
+
+
+--
+-- Name: specimens; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.specimens (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    material character varying(255),
+    updated_at timestamp without time zone NOT NULL,
+    properties text
+);
+
+
+--
+-- Name: specimens_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.specimens_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: specimens_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.specimens_id_seq OWNED BY public.specimens.id;
+
+
+--
+-- Name: traits; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.traits (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    properties text,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: traits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.traits_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: traits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.traits_id_seq OWNED BY public.traits.id;
+
+
+--
+-- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.users_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;
+
+
+--
+-- Name: virtual_machines; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.virtual_machines (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    hostname character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: virtual_machines_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.virtual_machines_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: virtual_machines_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.virtual_machines_id_seq OWNED BY public.virtual_machines.id;
+
+
+--
+-- Name: workflows; Type: TABLE; Schema: public; Owner: -
+--
+
+CREATE TABLE public.workflows (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_at timestamp without time zone,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    name character varying(255),
+    description text,
+    definition text,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: workflows_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE public.workflows_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: workflows_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE public.workflows_id_seq OWNED BY public.workflows.id;
+
+
+--
+-- Name: api_client_authorizations id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('public.api_client_authorizations_id_seq'::regclass);
+
+
+--
+-- Name: api_clients id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.api_clients ALTER COLUMN id SET DEFAULT nextval('public.api_clients_id_seq'::regclass);
+
+
+--
+-- Name: authorized_keys id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.authorized_keys ALTER COLUMN id SET DEFAULT nextval('public.authorized_keys_id_seq'::regclass);
+
+
+--
+-- Name: collections id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.collections ALTER COLUMN id SET DEFAULT nextval('public.collections_id_seq'::regclass);
+
+
+--
+-- Name: commit_ancestors id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.commit_ancestors ALTER COLUMN id SET DEFAULT nextval('public.commit_ancestors_id_seq'::regclass);
+
+
+--
+-- Name: commits id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.commits ALTER COLUMN id SET DEFAULT nextval('public.commits_id_seq'::regclass);
+
+
+--
+-- Name: container_requests id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.container_requests ALTER COLUMN id SET DEFAULT nextval('public.container_requests_id_seq'::regclass);
+
+
+--
+-- Name: containers id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.containers ALTER COLUMN id SET DEFAULT nextval('public.containers_id_seq'::regclass);
+
+
+--
+-- Name: groups id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.groups ALTER COLUMN id SET DEFAULT nextval('public.groups_id_seq'::regclass);
+
+
+--
+-- Name: humans id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.humans ALTER COLUMN id SET DEFAULT nextval('public.humans_id_seq'::regclass);
+
+
+--
+-- Name: job_tasks id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.job_tasks ALTER COLUMN id SET DEFAULT nextval('public.job_tasks_id_seq'::regclass);
+
+
+--
+-- Name: jobs id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.jobs ALTER COLUMN id SET DEFAULT nextval('public.jobs_id_seq'::regclass);
+
+
+--
+-- Name: keep_disks id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.keep_disks ALTER COLUMN id SET DEFAULT nextval('public.keep_disks_id_seq'::regclass);
+
+
+--
+-- Name: keep_services id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.keep_services ALTER COLUMN id SET DEFAULT nextval('public.keep_services_id_seq'::regclass);
+
+
+--
+-- Name: links id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.links ALTER COLUMN id SET DEFAULT nextval('public.links_id_seq'::regclass);
+
+
+--
+-- Name: logs id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.logs ALTER COLUMN id SET DEFAULT nextval('public.logs_id_seq'::regclass);
+
+
+--
+-- Name: nodes id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
+
+
+--
+-- Name: permission_refresh_lock id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.permission_refresh_lock ALTER COLUMN id SET DEFAULT nextval('public.permission_refresh_lock_id_seq'::regclass);
+
+
+--
+-- Name: pipeline_instances id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.pipeline_instances ALTER COLUMN id SET DEFAULT nextval('public.pipeline_instances_id_seq'::regclass);
+
+
+--
+-- Name: pipeline_templates id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.pipeline_templates ALTER COLUMN id SET DEFAULT nextval('public.pipeline_templates_id_seq'::regclass);
+
+
+--
+-- Name: repositories id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.repositories ALTER COLUMN id SET DEFAULT nextval('public.repositories_id_seq'::regclass);
+
+
+--
+-- Name: specimens id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.specimens ALTER COLUMN id SET DEFAULT nextval('public.specimens_id_seq'::regclass);
+
+
+--
+-- Name: traits id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.traits ALTER COLUMN id SET DEFAULT nextval('public.traits_id_seq'::regclass);
+
+
+--
+-- Name: users id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass);
+
+
+--
+-- Name: virtual_machines id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.virtual_machines ALTER COLUMN id SET DEFAULT nextval('public.virtual_machines_id_seq'::regclass);
+
+
+--
+-- Name: workflows id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.workflows ALTER COLUMN id SET DEFAULT nextval('public.workflows_id_seq'::regclass);
+
+
+--
+-- Name: api_client_authorizations api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.api_client_authorizations
+    ADD CONSTRAINT api_client_authorizations_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: api_clients api_clients_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.api_clients
+    ADD CONSTRAINT api_clients_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: authorized_keys authorized_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.authorized_keys
+    ADD CONSTRAINT authorized_keys_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: collections collections_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.collections
+    ADD CONSTRAINT collections_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: commit_ancestors commit_ancestors_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.commit_ancestors
+    ADD CONSTRAINT commit_ancestors_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: commits commits_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.commits
+    ADD CONSTRAINT commits_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: container_requests container_requests_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.container_requests
+    ADD CONSTRAINT container_requests_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: containers containers_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.containers
+    ADD CONSTRAINT containers_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: groups groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.groups
+    ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: humans humans_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.humans
+    ADD CONSTRAINT humans_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: job_tasks job_tasks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.job_tasks
+    ADD CONSTRAINT job_tasks_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: jobs jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.jobs
+    ADD CONSTRAINT jobs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: keep_disks keep_disks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.keep_disks
+    ADD CONSTRAINT keep_disks_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: keep_services keep_services_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.keep_services
+    ADD CONSTRAINT keep_services_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: links links_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.links
+    ADD CONSTRAINT links_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: logs logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.logs
+    ADD CONSTRAINT logs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: nodes nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.nodes
+    ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: permission_refresh_lock permission_refresh_lock_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.permission_refresh_lock
+    ADD CONSTRAINT permission_refresh_lock_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pipeline_instances pipeline_instances_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.pipeline_instances
+    ADD CONSTRAINT pipeline_instances_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pipeline_templates pipeline_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.pipeline_templates
+    ADD CONSTRAINT pipeline_templates_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: repositories repositories_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.repositories
+    ADD CONSTRAINT repositories_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: specimens specimens_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.specimens
+    ADD CONSTRAINT specimens_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: traits traits_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.traits
+    ADD CONSTRAINT traits_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.users
+    ADD CONSTRAINT users_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: virtual_machines virtual_machines_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.virtual_machines
+    ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: workflows workflows_pkey; Type: CONSTRAINT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY public.workflows
+    ADD CONSTRAINT workflows_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: api_client_authorizations_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX api_client_authorizations_search_index ON public.api_client_authorizations USING btree (api_token, created_by_ip_address, last_used_by_ip_address, default_owner_uuid, uuid);
+
+
+--
+-- Name: api_clients_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX api_clients_search_index ON public.api_clients USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, url_prefix);
+
+
+--
+-- Name: authorized_keys_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX authorized_keys_search_index ON public.authorized_keys USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, key_type, authorized_user_uuid);
+
+
+--
+-- Name: collection_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX collection_index_on_properties ON public.collections USING gin (properties);
+
+
+--
+-- Name: collections_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX collections_full_text_search_idx ON public.collections USING gin (to_tsvector('english'::regconfig, substr((((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || COALESCE(file_names, ''::text)), 0, 1000000)));
+
+
+--
+-- Name: collections_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX collections_search_index ON public.collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name, current_version_uuid);
+
+
+--
+-- Name: container_requests_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX container_requests_full_text_search_idx ON public.container_requests USING gin (to_tsvector('english'::regconfig, substr((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text), 0, 1000000)));
+
+
+--
+-- Name: container_requests_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX container_requests_index_on_properties ON public.container_requests USING gin (properties);
+
+
+--
+-- Name: container_requests_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX container_requests_search_index ON public.container_requests USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, state, requesting_container_uuid, container_uuid, container_image, cwd, output_path, output_uuid, log_uuid, output_name);
+
+
+--
+-- Name: containers_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX containers_search_index ON public.containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);
+
+
+--
+-- Name: group_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX group_index_on_properties ON public.groups USING gin (properties);
+
+
+--
+-- Name: groups_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX groups_full_text_search_idx ON public.groups USING gin (to_tsvector('english'::regconfig, substr((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)), 0, 1000000)));
+
+
+--
+-- Name: groups_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX groups_search_index ON public.groups USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, group_class);
+
+
+--
+-- Name: humans_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX humans_search_index ON public.humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);
+
+
+--
+-- Name: index_api_client_authorizations_on_api_client_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_client_authorizations_on_api_client_id ON public.api_client_authorizations USING btree (api_client_id);
+
+
+--
+-- Name: index_api_client_authorizations_on_api_token; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON public.api_client_authorizations USING btree (api_token);
+
+
+--
+-- Name: index_api_client_authorizations_on_expires_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_client_authorizations_on_expires_at ON public.api_client_authorizations USING btree (expires_at);
+
+
+--
+-- Name: index_api_client_authorizations_on_user_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_client_authorizations_on_user_id ON public.api_client_authorizations USING btree (user_id);
+
+
+--
+-- Name: index_api_client_authorizations_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_api_client_authorizations_on_uuid ON public.api_client_authorizations USING btree (uuid);
+
+
+--
+-- Name: index_api_clients_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_clients_on_created_at ON public.api_clients USING btree (created_at);
+
+
+--
+-- Name: index_api_clients_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_clients_on_modified_at ON public.api_clients USING btree (modified_at);
+
+
+--
+-- Name: index_api_clients_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_api_clients_on_owner_uuid ON public.api_clients USING btree (owner_uuid);
+
+
+--
+-- Name: index_api_clients_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_api_clients_on_uuid ON public.api_clients USING btree (uuid);
+
+
+--
+-- Name: index_authkeys_on_user_and_expires_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_authkeys_on_user_and_expires_at ON public.authorized_keys USING btree (authorized_user_uuid, expires_at);
+
+
+--
+-- Name: index_authorized_keys_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_authorized_keys_on_owner_uuid ON public.authorized_keys USING btree (owner_uuid);
+
+
+--
+-- Name: index_authorized_keys_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_authorized_keys_on_uuid ON public.authorized_keys USING btree (uuid);
+
+
+--
+-- Name: index_collections_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_created_at ON public.collections USING btree (created_at);
+
+
+--
+-- Name: index_collections_on_current_version_uuid_and_version; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_collections_on_current_version_uuid_and_version ON public.collections USING btree (current_version_uuid, version);
+
+
+--
+-- Name: index_collections_on_delete_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_delete_at ON public.collections USING btree (delete_at);
+
+
+--
+-- Name: index_collections_on_is_trashed; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_is_trashed ON public.collections USING btree (is_trashed);
+
+
+--
+-- Name: index_collections_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_modified_at ON public.collections USING btree (modified_at);
+
+
+--
+-- Name: index_collections_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_modified_at_uuid ON public.collections USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_owner_uuid ON public.collections USING btree (owner_uuid);
+
+
+--
+-- Name: index_collections_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_collections_on_owner_uuid_and_name ON public.collections USING btree (owner_uuid, name) WHERE ((is_trashed = false) AND ((current_version_uuid)::text = (uuid)::text));
+
+
+--
+-- Name: index_collections_on_portable_data_hash_and_trash_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_portable_data_hash_and_trash_at ON public.collections USING btree (portable_data_hash, trash_at);
+
+
+--
+-- Name: index_collections_on_trash_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_collections_on_trash_at ON public.collections USING btree (trash_at);
+
+
+--
+-- Name: index_collections_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_collections_on_uuid ON public.collections USING btree (uuid);
+
+
+--
+-- Name: index_commit_ancestors_on_descendant_and_ancestor; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_commit_ancestors_on_descendant_and_ancestor ON public.commit_ancestors USING btree (descendant, ancestor);
+
+
+--
+-- Name: index_commits_on_repository_name_and_sha1; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON public.commits USING btree (repository_name, sha1);
+
+
+--
+-- Name: index_container_requests_on_container_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_requests_on_container_uuid ON public.container_requests USING btree (container_uuid);
+
+
+--
+-- Name: index_container_requests_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_requests_on_modified_at_uuid ON public.container_requests USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_container_requests_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_requests_on_owner_uuid ON public.container_requests USING btree (owner_uuid);
+
+
+--
+-- Name: index_container_requests_on_requesting_container_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_requests_on_requesting_container_uuid ON public.container_requests USING btree (requesting_container_uuid);
+
+
+--
+-- Name: index_container_requests_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_container_requests_on_uuid ON public.container_requests USING btree (uuid);
+
+
+--
+-- Name: index_containers_on_auth_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_auth_uuid ON public.containers USING btree (auth_uuid);
+
+
+--
+-- Name: index_containers_on_locked_by_uuid_and_priority; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_locked_by_uuid_and_priority ON public.containers USING btree (locked_by_uuid, priority);
+
+
+--
+-- Name: index_containers_on_locked_by_uuid_and_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_locked_by_uuid_and_uuid ON public.containers USING btree (locked_by_uuid, uuid);
+
+
+--
+-- Name: index_containers_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_modified_at_uuid ON public.containers USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_containers_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_owner_uuid ON public.containers USING btree (owner_uuid);
+
+
+--
+-- Name: index_containers_on_queued_state; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_queued_state ON public.containers USING btree (state, ((priority > 0)));
+
+
+--
+-- Name: index_containers_on_reuse_columns; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_reuse_columns ON public.containers USING btree (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints));
+
+
+--
+-- Name: index_containers_on_runtime_status; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_runtime_status ON public.containers USING gin (runtime_status);
+
+
+--
+-- Name: index_containers_on_secret_mounts_md5; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_secret_mounts_md5 ON public.containers USING btree (secret_mounts_md5);
+
+
+--
+-- Name: index_containers_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_containers_on_uuid ON public.containers USING btree (uuid);
+
+
+--
+-- Name: index_groups_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_created_at ON public.groups USING btree (created_at);
+
+
+--
+-- Name: index_groups_on_delete_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_delete_at ON public.groups USING btree (delete_at);
+
+
+--
+-- Name: index_groups_on_group_class; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_group_class ON public.groups USING btree (group_class);
+
+
+--
+-- Name: index_groups_on_is_trashed; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_is_trashed ON public.groups USING btree (is_trashed);
+
+
+--
+-- Name: index_groups_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_modified_at ON public.groups USING btree (modified_at);
+
+
+--
+-- Name: index_groups_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_modified_at_uuid ON public.groups USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_owner_uuid ON public.groups USING btree (owner_uuid);
+
+
+--
+-- Name: index_groups_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_groups_on_owner_uuid_and_name ON public.groups USING btree (owner_uuid, name) WHERE (is_trashed = false);
+
+
+--
+-- Name: index_groups_on_trash_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_groups_on_trash_at ON public.groups USING btree (trash_at);
+
+
+--
+-- Name: index_groups_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_groups_on_uuid ON public.groups USING btree (uuid);
+
+
+--
+-- Name: index_humans_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_humans_on_owner_uuid ON public.humans USING btree (owner_uuid);
+
+
+--
+-- Name: index_humans_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_humans_on_uuid ON public.humans USING btree (uuid);
+
+
+--
+-- Name: index_job_tasks_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_created_at ON public.job_tasks USING btree (created_at);
+
+
+--
+-- Name: index_job_tasks_on_created_by_job_task_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_created_by_job_task_uuid ON public.job_tasks USING btree (created_by_job_task_uuid);
+
+
+--
+-- Name: index_job_tasks_on_job_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_job_uuid ON public.job_tasks USING btree (job_uuid);
+
+
+--
+-- Name: index_job_tasks_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_modified_at ON public.job_tasks USING btree (modified_at);
+
+
+--
+-- Name: index_job_tasks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_owner_uuid ON public.job_tasks USING btree (owner_uuid);
+
+
+--
+-- Name: index_job_tasks_on_sequence; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_sequence ON public.job_tasks USING btree (sequence);
+
+
+--
+-- Name: index_job_tasks_on_success; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_job_tasks_on_success ON public.job_tasks USING btree (success);
+
+
+--
+-- Name: index_job_tasks_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_job_tasks_on_uuid ON public.job_tasks USING btree (uuid);
+
+
+--
+-- Name: index_jobs_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_created_at ON public.jobs USING btree (created_at);
+
+
+--
+-- Name: index_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_finished_at ON public.jobs USING btree (finished_at);
+
+
+--
+-- Name: index_jobs_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_modified_at ON public.jobs USING btree (modified_at);
+
+
+--
+-- Name: index_jobs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_modified_at_uuid ON public.jobs USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_jobs_on_output; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_output ON public.jobs USING btree (output);
+
+
+--
+-- Name: index_jobs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_owner_uuid ON public.jobs USING btree (owner_uuid);
+
+
+--
+-- Name: index_jobs_on_script; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_script ON public.jobs USING btree (script);
+
+
+--
+-- Name: index_jobs_on_script_parameters_digest; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_script_parameters_digest ON public.jobs USING btree (script_parameters_digest);
+
+
+--
+-- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_jobs_on_started_at ON public.jobs USING btree (started_at);
+
+
+--
+-- Name: index_jobs_on_submit_id; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_jobs_on_submit_id ON public.jobs USING btree (submit_id);
+
+
+--
+-- Name: index_jobs_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_jobs_on_uuid ON public.jobs USING btree (uuid);
+
+
+--
+-- Name: index_keep_disks_on_filesystem_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_keep_disks_on_filesystem_uuid ON public.keep_disks USING btree (filesystem_uuid);
+
+
+--
+-- Name: index_keep_disks_on_last_ping_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_keep_disks_on_last_ping_at ON public.keep_disks USING btree (last_ping_at);
+
+
+--
+-- Name: index_keep_disks_on_node_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_keep_disks_on_node_uuid ON public.keep_disks USING btree (node_uuid);
+
+
+--
+-- Name: index_keep_disks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_keep_disks_on_owner_uuid ON public.keep_disks USING btree (owner_uuid);
+
+
+--
+-- Name: index_keep_disks_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_keep_disks_on_uuid ON public.keep_disks USING btree (uuid);
+
+
+--
+-- Name: index_keep_services_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_keep_services_on_owner_uuid ON public.keep_services USING btree (owner_uuid);
+
+
+--
+-- Name: index_keep_services_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_keep_services_on_uuid ON public.keep_services USING btree (uuid);
+
+
+--
+-- Name: index_links_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_created_at ON public.links USING btree (created_at);
+
+
+--
+-- Name: index_links_on_head_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_head_uuid ON public.links USING btree (head_uuid);
+
+
+--
+-- Name: index_links_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_modified_at ON public.links USING btree (modified_at);
+
+
+--
+-- Name: index_links_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_modified_at_uuid ON public.links USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_owner_uuid ON public.links USING btree (owner_uuid);
+
+
+--
+-- Name: index_links_on_substring_head_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_substring_head_uuid ON public.links USING btree ("substring"((head_uuid)::text, 7, 5));
+
+
+--
+-- Name: index_links_on_substring_tail_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_substring_tail_uuid ON public.links USING btree ("substring"((tail_uuid)::text, 7, 5));
+
+
+--
+-- Name: index_links_on_tail_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_links_on_tail_uuid ON public.links USING btree (tail_uuid);
+
+
+--
+-- Name: index_links_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_links_on_uuid ON public.links USING btree (uuid);
+
+
+--
+-- Name: index_logs_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_created_at ON public.logs USING btree (created_at);
+
+
+--
+-- Name: index_logs_on_event_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_event_at ON public.logs USING btree (event_at);
+
+
+--
+-- Name: index_logs_on_event_type; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_event_type ON public.logs USING btree (event_type);
+
+
+--
+-- Name: index_logs_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_modified_at ON public.logs USING btree (modified_at);
+
+
+--
+-- Name: index_logs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_modified_at_uuid ON public.logs USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_logs_on_object_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_object_owner_uuid ON public.logs USING btree (object_owner_uuid);
+
+
+--
+-- Name: index_logs_on_object_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_object_uuid ON public.logs USING btree (object_uuid);
+
+
+--
+-- Name: index_logs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_owner_uuid ON public.logs USING btree (owner_uuid);
+
+
+--
+-- Name: index_logs_on_summary; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_logs_on_summary ON public.logs USING btree (summary);
+
+
+--
+-- Name: index_logs_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_logs_on_uuid ON public.logs USING btree (uuid);
+
+
+--
+-- Name: index_nodes_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_nodes_on_created_at ON public.nodes USING btree (created_at);
+
+
+--
+-- Name: index_nodes_on_hostname; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_nodes_on_hostname ON public.nodes USING btree (hostname);
+
+
+--
+-- Name: index_nodes_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_nodes_on_modified_at ON public.nodes USING btree (modified_at);
+
+
+--
+-- Name: index_nodes_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_nodes_on_owner_uuid ON public.nodes USING btree (owner_uuid);
+
+
+--
+-- Name: index_nodes_on_slot_number; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_nodes_on_slot_number ON public.nodes USING btree (slot_number);
+
+
+--
+-- Name: index_nodes_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_nodes_on_uuid ON public.nodes USING btree (uuid);
+
+
+--
+-- Name: index_pipeline_instances_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_instances_on_created_at ON public.pipeline_instances USING btree (created_at);
+
+
+--
+-- Name: index_pipeline_instances_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_instances_on_modified_at ON public.pipeline_instances USING btree (modified_at);
+
+
+--
+-- Name: index_pipeline_instances_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_instances_on_modified_at_uuid ON public.pipeline_instances USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_pipeline_instances_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_instances_on_owner_uuid ON public.pipeline_instances USING btree (owner_uuid);
+
+
+--
+-- Name: index_pipeline_instances_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON public.pipeline_instances USING btree (uuid);
+
+
+--
+-- Name: index_pipeline_templates_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_templates_on_created_at ON public.pipeline_templates USING btree (created_at);
+
+
+--
+-- Name: index_pipeline_templates_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_templates_on_modified_at ON public.pipeline_templates USING btree (modified_at);
+
+
+--
+-- Name: index_pipeline_templates_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_templates_on_modified_at_uuid ON public.pipeline_templates USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_pipeline_templates_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_pipeline_templates_on_owner_uuid ON public.pipeline_templates USING btree (owner_uuid);
+
+
+--
+-- Name: index_pipeline_templates_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON public.pipeline_templates USING btree (uuid);
+
+
+--
+-- Name: index_repositories_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_repositories_on_modified_at_uuid ON public.repositories USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_repositories_on_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_repositories_on_name ON public.repositories USING btree (name);
+
+
+--
+-- Name: index_repositories_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_repositories_on_owner_uuid ON public.repositories USING btree (owner_uuid);
+
+
+--
+-- Name: index_repositories_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_repositories_on_uuid ON public.repositories USING btree (uuid);
+
+
+--
+-- Name: index_specimens_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_specimens_on_created_at ON public.specimens USING btree (created_at);
+
+
+--
+-- Name: index_specimens_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_specimens_on_modified_at ON public.specimens USING btree (modified_at);
+
+
+--
+-- Name: index_specimens_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_specimens_on_owner_uuid ON public.specimens USING btree (owner_uuid);
+
+
+--
+-- Name: index_specimens_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_specimens_on_uuid ON public.specimens USING btree (uuid);
+
+
+--
+-- Name: index_traits_on_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_traits_on_name ON public.traits USING btree (name);
+
+
+--
+-- Name: index_traits_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_traits_on_owner_uuid ON public.traits USING btree (owner_uuid);
+
+
+--
+-- Name: index_traits_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_traits_on_uuid ON public.traits USING btree (uuid);
+
+
+--
+-- Name: index_users_on_created_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_users_on_created_at ON public.users USING btree (created_at);
+
+
+--
+-- Name: index_users_on_modified_at; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_users_on_modified_at ON public.users USING btree (modified_at);
+
+
+--
+-- Name: index_users_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_users_on_modified_at_uuid ON public.users USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_users_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_users_on_owner_uuid ON public.users USING btree (owner_uuid);
+
+
+--
+-- Name: index_users_on_username; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_users_on_username ON public.users USING btree (username);
+
+
+--
+-- Name: index_users_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_users_on_uuid ON public.users USING btree (uuid);
+
+
+--
+-- Name: index_virtual_machines_on_hostname; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_virtual_machines_on_hostname ON public.virtual_machines USING btree (hostname);
+
+
+--
+-- Name: index_virtual_machines_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_virtual_machines_on_modified_at_uuid ON public.virtual_machines USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_virtual_machines_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_virtual_machines_on_owner_uuid ON public.virtual_machines USING btree (owner_uuid);
+
+
+--
+-- Name: index_virtual_machines_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON public.virtual_machines USING btree (uuid);
+
+
+--
+-- Name: index_workflows_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_workflows_on_modified_at_uuid ON public.workflows USING btree (modified_at DESC, uuid);
+
+
+--
+-- Name: index_workflows_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_workflows_on_owner_uuid ON public.workflows USING btree (owner_uuid);
+
+
+--
+-- Name: index_workflows_on_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX index_workflows_on_uuid ON public.workflows USING btree (uuid);
+
+
+--
+-- Name: job_tasks_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX job_tasks_search_index ON public.job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);
+
+
+--
+-- Name: jobs_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX jobs_full_text_search_idx ON public.jobs USING gin (to_tsvector('english'::regconfig, substr((((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)), 0, 1000000)));
+
+
+--
+-- Name: jobs_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX jobs_search_index ON public.jobs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, submit_id, script, script_version, cancelled_by_client_uuid, cancelled_by_user_uuid, output, is_locked_by_uuid, log, repository, supplied_script_version, docker_image_locator, state, arvados_sdk_version);
+
+
+--
+-- Name: keep_disks_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX keep_disks_search_index ON public.keep_disks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, ping_secret, node_uuid, filesystem_uuid, keep_service_uuid);
+
+
+--
+-- Name: keep_services_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX keep_services_search_index ON public.keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);
+
+
+--
+-- Name: links_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX links_index_on_properties ON public.links USING gin (properties);
+
+
+--
+-- Name: links_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX links_search_index ON public.links USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, tail_uuid, link_class, name, head_uuid);
+
+
+--
+-- Name: links_tail_name_unique_if_link_class_name; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON public.links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);
+
+
+--
+-- Name: logs_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX logs_search_index ON public.logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);
+
+
+--
+-- Name: nodes_index_on_info; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX nodes_index_on_info ON public.nodes USING gin (info);
+
+
+--
+-- Name: nodes_index_on_properties; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX nodes_index_on_properties ON public.nodes USING gin (properties);
+
+
+--
+-- Name: nodes_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX nodes_search_index ON public.nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);
+
+
+--
+-- Name: permission_target_trashed; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX permission_target_trashed ON public.materialized_permission_view USING btree (trashed, target_uuid);
+
+
+--
+-- Name: permission_target_user_trashed_level; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX permission_target_user_trashed_level ON public.materialized_permission_view USING btree (user_uuid, trashed, perm_level);
+
+
+--
+-- Name: pipeline_instances_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX pipeline_instances_full_text_search_idx ON public.pipeline_instances USING gin (to_tsvector('english'::regconfig, substr((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text), 0, 1000000)));
+
+
+--
+-- Name: pipeline_instances_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX pipeline_instances_search_index ON public.pipeline_instances USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, pipeline_template_uuid, name, state);
+
+
+--
+-- Name: pipeline_template_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON public.pipeline_templates USING btree (owner_uuid, name);
+
+
+--
+-- Name: pipeline_templates_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX pipeline_templates_full_text_search_idx ON public.pipeline_templates USING gin (to_tsvector('english'::regconfig, substr((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text), 0, 1000000)));
+
+
+--
+-- Name: pipeline_templates_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX pipeline_templates_search_index ON public.pipeline_templates USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+
+
+--
+-- Name: repositories_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX repositories_search_index ON public.repositories USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+
+
+--
+-- Name: specimens_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX specimens_search_index ON public.specimens USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, material);
+
+
+--
+-- Name: traits_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX traits_search_index ON public.traits USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+
+
+--
+-- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version);
+
+
+--
+-- Name: users_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX users_search_index ON public.users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);
+
+
+--
+-- Name: virtual_machines_search_index; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX virtual_machines_search_index ON public.virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
+
+
+--
+-- Name: workflows_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX workflows_full_text_search_idx ON public.workflows USING gin (to_tsvector('english'::regconfig, substr((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)), 0, 1000000)));
+
+
+--
+-- Name: workflows_search_idx; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX workflows_search_idx ON public.workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+
+
+--
+-- PostgreSQL database dump complete
+--
+
+SET search_path TO "$user", public;
+
+INSERT INTO schema_migrations (version) VALUES ('20121016005009');
+
+INSERT INTO schema_migrations (version) VALUES ('20130105203021');
+
+INSERT INTO schema_migrations (version) VALUES ('20130105224358');
+
+INSERT INTO schema_migrations (version) VALUES ('20130105224618');
+
+INSERT INTO schema_migrations (version) VALUES ('20130107181109');
+
+INSERT INTO schema_migrations (version) VALUES ('20130107212832');
+
+INSERT INTO schema_migrations (version) VALUES ('20130109175700');
+
+INSERT INTO schema_migrations (version) VALUES ('20130109220548');
+
+INSERT INTO schema_migrations (version) VALUES ('20130113214204');
+
+INSERT INTO schema_migrations (version) VALUES ('20130116024233');
+
+INSERT INTO schema_migrations (version) VALUES ('20130116215213');
+
+INSERT INTO schema_migrations (version) VALUES ('20130118002239');
+
+INSERT INTO schema_migrations (version) VALUES ('20130122020042');
+
+INSERT INTO schema_migrations (version) VALUES ('20130122201442');
+
+INSERT INTO schema_migrations (version) VALUES ('20130122221616');
+
+INSERT INTO schema_migrations (version) VALUES ('20130123174514');
+
+INSERT INTO schema_migrations (version) VALUES ('20130123180224');
+
+INSERT INTO schema_migrations (version) VALUES ('20130123180228');
+
+INSERT INTO schema_migrations (version) VALUES ('20130125220425');
+
+INSERT INTO schema_migrations (version) VALUES ('20130128202518');
+
+INSERT INTO schema_migrations (version) VALUES ('20130128231343');
+
+INSERT INTO schema_migrations (version) VALUES ('20130130205749');
+
+INSERT INTO schema_migrations (version) VALUES ('20130203104818');
+
+INSERT INTO schema_migrations (version) VALUES ('20130203104824');
+
+INSERT INTO schema_migrations (version) VALUES ('20130203115329');
+
+INSERT INTO schema_migrations (version) VALUES ('20130207195855');
+
+INSERT INTO schema_migrations (version) VALUES ('20130218181504');
+
+INSERT INTO schema_migrations (version) VALUES ('20130226170000');
+
+INSERT INTO schema_migrations (version) VALUES ('20130313175417');
+
+INSERT INTO schema_migrations (version) VALUES ('20130315155820');
+
+INSERT INTO schema_migrations (version) VALUES ('20130315183626');
+
+INSERT INTO schema_migrations (version) VALUES ('20130315213205');
+
+INSERT INTO schema_migrations (version) VALUES ('20130318002138');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319165853');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319180730');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319194637');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319201431');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319235957');
+
+INSERT INTO schema_migrations (version) VALUES ('20130320000107');
+
+INSERT INTO schema_migrations (version) VALUES ('20130326173804');
+
+INSERT INTO schema_migrations (version) VALUES ('20130326182917');
+
+INSERT INTO schema_migrations (version) VALUES ('20130415020241');
+
+INSERT INTO schema_migrations (version) VALUES ('20130425024459');
+
+INSERT INTO schema_migrations (version) VALUES ('20130425214427');
+
+INSERT INTO schema_migrations (version) VALUES ('20130523060112');
+
+INSERT INTO schema_migrations (version) VALUES ('20130523060213');
+
+INSERT INTO schema_migrations (version) VALUES ('20130524042319');
+
+INSERT INTO schema_migrations (version) VALUES ('20130528134100');
+
+INSERT INTO schema_migrations (version) VALUES ('20130606183519');
+
+INSERT INTO schema_migrations (version) VALUES ('20130608053730');
+
+INSERT INTO schema_migrations (version) VALUES ('20130610202538');
+
+INSERT INTO schema_migrations (version) VALUES ('20130611163736');
+
+INSERT INTO schema_migrations (version) VALUES ('20130612042554');
+
+INSERT INTO schema_migrations (version) VALUES ('20130617150007');
+
+INSERT INTO schema_migrations (version) VALUES ('20130626002829');
+
+INSERT INTO schema_migrations (version) VALUES ('20130626022810');
+
+INSERT INTO schema_migrations (version) VALUES ('20130627154537');
+
+INSERT INTO schema_migrations (version) VALUES ('20130627184333');
+
+INSERT INTO schema_migrations (version) VALUES ('20130708163414');
+
+INSERT INTO schema_migrations (version) VALUES ('20130708182912');
+
+INSERT INTO schema_migrations (version) VALUES ('20130708185153');
+
+INSERT INTO schema_migrations (version) VALUES ('20130724153034');
+
+INSERT INTO schema_migrations (version) VALUES ('20131007180607');
+
+INSERT INTO schema_migrations (version) VALUES ('20140117231056');
+
+INSERT INTO schema_migrations (version) VALUES ('20140124222114');
+
+INSERT INTO schema_migrations (version) VALUES ('20140129184311');
+
+INSERT INTO schema_migrations (version) VALUES ('20140317135600');
+
+INSERT INTO schema_migrations (version) VALUES ('20140319160547');
+
+INSERT INTO schema_migrations (version) VALUES ('20140321191343');
+
+INSERT INTO schema_migrations (version) VALUES ('20140324024606');
+
+INSERT INTO schema_migrations (version) VALUES ('20140325175653');
+
+INSERT INTO schema_migrations (version) VALUES ('20140402001908');
+
+INSERT INTO schema_migrations (version) VALUES ('20140407184311');
+
+INSERT INTO schema_migrations (version) VALUES ('20140421140924');
+
+INSERT INTO schema_migrations (version) VALUES ('20140421151939');
+
+INSERT INTO schema_migrations (version) VALUES ('20140421151940');
+
+INSERT INTO schema_migrations (version) VALUES ('20140422011506');
+
+INSERT INTO schema_migrations (version) VALUES ('20140423132913');
+
+INSERT INTO schema_migrations (version) VALUES ('20140423133559');
+
+INSERT INTO schema_migrations (version) VALUES ('20140501165548');
+
+INSERT INTO schema_migrations (version) VALUES ('20140519205916');
+
+INSERT INTO schema_migrations (version) VALUES ('20140527152921');
+
+INSERT INTO schema_migrations (version) VALUES ('20140530200539');
+
+INSERT INTO schema_migrations (version) VALUES ('20140601022548');
+
+INSERT INTO schema_migrations (version) VALUES ('20140602143352');
+
+INSERT INTO schema_migrations (version) VALUES ('20140607150616');
+
+INSERT INTO schema_migrations (version) VALUES ('20140611173003');
+
+INSERT INTO schema_migrations (version) VALUES ('20140627210837');
+
+INSERT INTO schema_migrations (version) VALUES ('20140709172343');
+
+INSERT INTO schema_migrations (version) VALUES ('20140714184006');
+
+INSERT INTO schema_migrations (version) VALUES ('20140811184643');
+
+INSERT INTO schema_migrations (version) VALUES ('20140817035914');
+
+INSERT INTO schema_migrations (version) VALUES ('20140818125735');
+
+INSERT INTO schema_migrations (version) VALUES ('20140826180337');
+
+INSERT INTO schema_migrations (version) VALUES ('20140828141043');
+
+INSERT INTO schema_migrations (version) VALUES ('20140909183946');
+
+INSERT INTO schema_migrations (version) VALUES ('20140911221252');
+
+INSERT INTO schema_migrations (version) VALUES ('20140918141529');
+
+INSERT INTO schema_migrations (version) VALUES ('20140918153541');
+
+INSERT INTO schema_migrations (version) VALUES ('20140918153705');
+
+INSERT INTO schema_migrations (version) VALUES ('20140924091559');
+
+INSERT INTO schema_migrations (version) VALUES ('20141111133038');
+
+INSERT INTO schema_migrations (version) VALUES ('20141208164553');
+
+INSERT INTO schema_migrations (version) VALUES ('20141208174553');
+
+INSERT INTO schema_migrations (version) VALUES ('20141208174653');
+
+INSERT INTO schema_migrations (version) VALUES ('20141208185217');
+
+INSERT INTO schema_migrations (version) VALUES ('20150122175935');
+
+INSERT INTO schema_migrations (version) VALUES ('20150123142953');
+
+INSERT INTO schema_migrations (version) VALUES ('20150203180223');
+
+INSERT INTO schema_migrations (version) VALUES ('20150206210804');
+
+INSERT INTO schema_migrations (version) VALUES ('20150206230342');
+
+INSERT INTO schema_migrations (version) VALUES ('20150216193428');
+
+INSERT INTO schema_migrations (version) VALUES ('20150303210106');
+
+INSERT INTO schema_migrations (version) VALUES ('20150312151136');
+
+INSERT INTO schema_migrations (version) VALUES ('20150317132720');
+
+INSERT INTO schema_migrations (version) VALUES ('20150324152204');
+
+INSERT INTO schema_migrations (version) VALUES ('20150423145759');
+
+INSERT INTO schema_migrations (version) VALUES ('20150512193020');
+
+INSERT INTO schema_migrations (version) VALUES ('20150526180251');
+
+INSERT INTO schema_migrations (version) VALUES ('20151202151426');
+
+INSERT INTO schema_migrations (version) VALUES ('20151215134304');
+
+INSERT INTO schema_migrations (version) VALUES ('20151229214707');
+
+INSERT INTO schema_migrations (version) VALUES ('20160208210629');
+
+INSERT INTO schema_migrations (version) VALUES ('20160209155729');
+
+INSERT INTO schema_migrations (version) VALUES ('20160324144017');
+
+INSERT INTO schema_migrations (version) VALUES ('20160506175108');
+
+INSERT INTO schema_migrations (version) VALUES ('20160509143250');
+
+INSERT INTO schema_migrations (version) VALUES ('20160808151559');
+
+INSERT INTO schema_migrations (version) VALUES ('20160819195557');
+
+INSERT INTO schema_migrations (version) VALUES ('20160819195725');
+
+INSERT INTO schema_migrations (version) VALUES ('20160901210110');
+
+INSERT INTO schema_migrations (version) VALUES ('20160909181442');
+
+INSERT INTO schema_migrations (version) VALUES ('20160926194129');
+
+INSERT INTO schema_migrations (version) VALUES ('20161019171346');
+
+INSERT INTO schema_migrations (version) VALUES ('20161111143147');
+
+INSERT INTO schema_migrations (version) VALUES ('20161115171221');
+
+INSERT INTO schema_migrations (version) VALUES ('20161115174218');
+
+INSERT INTO schema_migrations (version) VALUES ('20161213172944');
+
+INSERT INTO schema_migrations (version) VALUES ('20161222153434');
+
+INSERT INTO schema_migrations (version) VALUES ('20161223090712');
+
+INSERT INTO schema_migrations (version) VALUES ('20170102153111');
+
+INSERT INTO schema_migrations (version) VALUES ('20170105160301');
+
+INSERT INTO schema_migrations (version) VALUES ('20170105160302');
+
+INSERT INTO schema_migrations (version) VALUES ('20170216170823');
+
+INSERT INTO schema_migrations (version) VALUES ('20170301225558');
+
+INSERT INTO schema_migrations (version) VALUES ('20170319063406');
+
+INSERT INTO schema_migrations (version) VALUES ('20170328215436');
+
+INSERT INTO schema_migrations (version) VALUES ('20170330012505');
+
+INSERT INTO schema_migrations (version) VALUES ('20170419173031');
+
+INSERT INTO schema_migrations (version) VALUES ('20170419173712');
+
+INSERT INTO schema_migrations (version) VALUES ('20170419175801');
+
+INSERT INTO schema_migrations (version) VALUES ('20170628185847');
+
+INSERT INTO schema_migrations (version) VALUES ('20170704160233');
+
+INSERT INTO schema_migrations (version) VALUES ('20170706141334');
+
+INSERT INTO schema_migrations (version) VALUES ('20170824202826');
+
+INSERT INTO schema_migrations (version) VALUES ('20170906224040');
+
+INSERT INTO schema_migrations (version) VALUES ('20171027183824');
+
+INSERT INTO schema_migrations (version) VALUES ('20171208203841');
+
+INSERT INTO schema_migrations (version) VALUES ('20171212153352');
+
+INSERT INTO schema_migrations (version) VALUES ('20180216203422');
+
+INSERT INTO schema_migrations (version) VALUES ('20180228220311');
+
+INSERT INTO schema_migrations (version) VALUES ('20180313180114');
+
+INSERT INTO schema_migrations (version) VALUES ('20180501182859');
+
+INSERT INTO schema_migrations (version) VALUES ('20180514135529');
+
+INSERT INTO schema_migrations (version) VALUES ('20180607175050');
+
+INSERT INTO schema_migrations (version) VALUES ('20180608123145');
+
+INSERT INTO schema_migrations (version) VALUES ('20180806133039');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820130357');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820132617');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820135808');
+
+INSERT INTO schema_migrations (version) VALUES ('20180824152014');
+
+INSERT INTO schema_migrations (version) VALUES ('20180824155207');
+
+INSERT INTO schema_migrations (version) VALUES ('20180904110712');
+
+INSERT INTO schema_migrations (version) VALUES ('20180913175443');
+
+INSERT INTO schema_migrations (version) VALUES ('20180915155335');
+
+INSERT INTO schema_migrations (version) VALUES ('20180917200000');
+
+INSERT INTO schema_migrations (version) VALUES ('20180917205609');
+
+INSERT INTO schema_migrations (version) VALUES ('20180919001158');
+
+INSERT INTO schema_migrations (version) VALUES ('20181001175023');
+
+INSERT INTO schema_migrations (version) VALUES ('20181004131141');
+
+INSERT INTO schema_migrations (version) VALUES ('20181005192222');
+
+INSERT INTO schema_migrations (version) VALUES ('20181011184200');
+
+INSERT INTO schema_migrations (version) VALUES ('20181213183234');
+
+INSERT INTO schema_migrations (version) VALUES ('20190214214814');
+
diff --git a/services/api/fpm-info.sh b/services/api/fpm-info.sh
new file mode 100644 (file)
index 0000000..1d5891e
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+fpm_depends+=('git >= 1.7.10')
+
+case "$TARGET" in
+    centos*)
+        fpm_depends+=(libcurl-devel postgresql-devel)
+        ;;
+    debian* | ubuntu*)
+        fpm_depends+=(libcurl-ssl-dev libpq-dev g++)
+        ;;
+esac
diff --git a/services/api/lib/app_version.rb b/services/api/lib/app_version.rb
new file mode 100644 (file)
index 0000000..335608b
--- /dev/null
@@ -0,0 +1,71 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# If you change this file, you'll probably also want to make the same
+# changes in apps/workbench/lib/app_version.rb.
+
+class AppVersion
+  def self.git(*args, &block)
+    IO.popen(["git", "--git-dir", ".git"] + args, "r",
+             chdir: Rails.root.join('../..'),
+             err: "/dev/null",
+             &block)
+  end
+
+  def self.forget
+    @hash = nil
+    @package_version = nil
+  end
+
+  # Return abbrev commit hash for current code version: "abc1234", or
+  # "abc1234-modified" if there are uncommitted changes. If present,
+  # return contents of {root}/git-commit.version instead.
+  def self.hash
+    if (cached = Rails.configuration.source_version || @hash)
+      return cached
+    end
+
+    # Read the version from our package's git-commit.version file, if available.
+    begin
+      @hash = IO.read(Rails.root.join("git-commit.version")).strip
+    rescue Errno::ENOENT
+    end
+
+    if @hash.nil? or @hash.empty?
+      begin
+        local_modified = false
+        git("status", "--porcelain") do |git_pipe|
+          git_pipe.each_line do |_|
+            local_modified = true
+            # Continue reading the pipe so git doesn't get SIGPIPE.
+          end
+        end
+        if $?.success?
+          git("log", "-n1", "--format=%H") do |git_pipe|
+            git_pipe.each_line do |line|
+              @hash = line.chomp[0...8] + (local_modified ? '-modified' : '')
+            end
+          end
+        end
+      rescue SystemCallError
+      end
+    end
+
+    @hash || "unknown"
+  end
+
+  def self.package_version
+    if (cached = Rails.configuration.package_version || @package_version)
+      return cached
+    end
+
+    begin
+      @package_version = IO.read(Rails.root.join("package-build.version")).strip
+    rescue Errno::ENOENT
+      @package_version = "unknown"
+    end
+
+    @package_version
+  end
+end
diff --git a/services/api/lib/arvados_model_updates.rb b/services/api/lib/arvados_model_updates.rb
new file mode 100644 (file)
index 0000000..7f0d7c2
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ArvadosModelUpdates
+  # ArvadosModel checks this to decide whether it should update the
+  # 'modified_by_user_uuid' field.
+  def anonymous_updater
+    Thread.current[:anonymous_updater] || false
+  end
+
+  def leave_modified_by_user_alone
+    anonymous_updater_was = anonymous_updater
+    begin
+      Thread.current[:anonymous_updater] = true
+      yield
+    ensure
+      Thread.current[:anonymous_updater] = anonymous_updater_was
+    end
+  end
+
+  # ArvadosModel checks this to decide whether it should update the
+  # 'modified_at' field.
+  def timeless_updater
+    Thread.current[:timeless_updater] || false
+  end
+
+  def leave_modified_at_alone
+    timeless_updater_was = timeless_updater
+    begin
+      Thread.current[:timeless_updater] = true
+      yield
+    ensure
+      Thread.current[:timeless_updater] = timeless_updater_was
+    end
+  end
+
+end
diff --git a/services/api/lib/assets/.gitkeep b/services/api/lib/assets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/lib/audit_logs.rb b/services/api/lib/audit_logs.rb
new file mode 100644 (file)
index 0000000..56fd935
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'current_api_client'
+require 'db_current_time'
+
+module AuditLogs
+  extend CurrentApiClient
+  extend DbCurrentTime
+
+  def self.delete_old(max_age:, max_batch:)
+    act_as_system_user do
+      if !File.owned?(Rails.root.join('tmp'))
+        Rails.logger.warn("AuditLogs: not owner of #{Rails.root}/tmp, skipping")
+        return
+      end
+      lockfile = Rails.root.join('tmp', 'audit_logs.lock')
+      File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|
+        return unless f.flock(File::LOCK_NB|File::LOCK_EX)
+
+        sql = "select clock_timestamp() - interval '#{'%.9f' % max_age} seconds'"
+        threshold = ActiveRecord::Base.connection.select_value(sql).to_time.utc
+        Rails.logger.info "AuditLogs: deleting logs older than #{threshold}"
+
+        did_total = 0
+        loop do
+          sql = Log.unscoped.
+                select(:id).
+                order(:created_at).
+                where('event_type in (?)', ['create', 'update', 'destroy', 'delete']).
+                where('created_at < ?', threshold).
+                limit(max_batch).
+                to_sql
+          did = Log.unscoped.where("id in (#{sql})").delete_all
+          did_total += did
+
+          Rails.logger.info "AuditLogs: deleted batch of #{did}"
+          break if did == 0
+        end
+        Rails.logger.info "AuditLogs: deleted total #{did_total}"
+      end
+    end
+  end
+
+  def self.tidy_in_background
+    max_age = Rails.configuration.max_audit_log_age
+    max_batch = Rails.configuration.max_audit_log_delete_batch
+    return if max_age <= 0 || max_batch <= 0
+
+    exp = (max_age/14).seconds
+    need = false
+    Rails.cache.fetch('AuditLogs', expires_in: exp) do
+      need = true
+    end
+    return if !need
+
+    Thread.new do
+      Thread.current.abort_on_exception = false
+      begin
+        delete_old(max_age: max_age, max_batch: max_batch)
+      rescue => e
+        Rails.logger.error "#{e.class}: #{e}\n#{e.backtrace.join("\n\t")}"
+      ensure
+        ActiveRecord::Base.connection.close
+      end
+    end
+  end
+end
diff --git a/services/api/lib/can_be_an_owner.rb b/services/api/lib/can_be_an_owner.rb
new file mode 100644 (file)
index 0000000..1a990e1
--- /dev/null
@@ -0,0 +1,89 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Protect referential integrity of owner_uuid columns in other tables
+# that can refer to the uuid column in this table.
+
+module CanBeAnOwner
+
+  def self.included(base)
+    base.extend(ClassMethods)
+
+    # Rails' "has_many" can prevent us from destroying the owner
+    # record when other objects refer to it.
+    ActiveRecord::Base.connection.tables.each do |t|
+      next if t == base.table_name
+      next if t == 'schema_migrations'
+      next if t == 'permission_refresh_lock'
+      klass = t.classify.constantize
+      next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))
+      base.has_many(t.to_sym,
+                    foreign_key: :owner_uuid,
+                    primary_key: :uuid,
+                    dependent: :restrict_with_exception)
+    end
+    # We need custom protection for changing an owner's primary
+    # key. (Apart from this restriction, admins are allowed to change
+    # UUIDs.)
+    base.validate :restrict_uuid_change_breaking_associations
+  end
+
+  module ClassMethods
+    def install_view(type)
+      conn = ActiveRecord::Base.connection
+      transaction do
+        # Check whether the temporary view has already been created
+        # during this connection. If not, create it.
+        conn.exec_query "SAVEPOINT check_#{type}_view"
+        begin
+          conn.exec_query("SELECT 1 FROM #{type}_view LIMIT 0")
+        rescue
+          conn.exec_query "ROLLBACK TO SAVEPOINT check_#{type}_view"
+          sql = File.read(Rails.root.join("lib", "create_#{type}_view.sql"))
+          conn.exec_query(sql)
+        ensure
+          conn.exec_query "RELEASE SAVEPOINT check_#{type}_view"
+        end
+      end
+    end
+  end
+
+  def descendant_project_uuids
+    self.class.install_view('ancestor')
+    ActiveRecord::Base.connection.
+      exec_query('SELECT ancestor_view.uuid
+                  FROM ancestor_view
+                  LEFT JOIN groups ON groups.uuid=ancestor_view.uuid
+                  WHERE ancestor_uuid = $1 AND groups.group_class = $2',
+                  # "name" arg is a query label that appears in logs:
+                  "descendant_project_uuids for #{self.uuid}",
+                  # "binds" arg is an array of [col_id, value] for '$1' vars:
+                  [[nil, self.uuid], [nil, 'project']],
+                  ).rows.map do |project_uuid,|
+      project_uuid
+    end
+  end
+
+  protected
+
+  def restrict_uuid_change_breaking_associations
+    return true if new_record? or not uuid_changed?
+
+    # Check for objects that have my old uuid listed as their owner.
+    self.class.reflect_on_all_associations(:has_many).each do |assoc|
+      next unless assoc.foreign_key == :owner_uuid
+      if assoc.klass.where(owner_uuid: uuid_was).any?
+        errors.add(:uuid,
+                   "cannot be changed on a #{self.class} that owns objects")
+        return false
+      end
+    end
+
+    # if I owned myself before, I'll just continue to own myself with
+    # my new uuid.
+    if owner_uuid == uuid_was
+      self.owner_uuid = uuid
+    end
+  end
+end
diff --git a/services/api/lib/common_api_template.rb b/services/api/lib/common_api_template.rb
new file mode 100644 (file)
index 0000000..8aac264
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module CommonApiTemplate
+  def self.included(base)
+    base.acts_as_api
+    base.class_eval do
+      alias_method :as_api_response_orig, :as_api_response
+      include InstanceMethods
+    end
+    base.extend(ClassMethods)
+    base.api_accessible :common do |t|
+      t.add :href
+      t.add :kind
+      t.add :etag
+      t.add :uuid
+      t.add :owner_uuid
+      t.add :created_at
+      t.add :modified_by_client_uuid
+      t.add :modified_by_user_uuid
+      t.add :modified_at
+    end
+  end
+
+  module InstanceMethods
+    # choose template based on opts[:for_user]
+    def as_api_response(template=nil, opts={})
+      if template.nil?
+        user = opts[:for_user] || current_user
+        if user.andand.is_admin and self.respond_to? :api_accessible_superuser
+          template = :superuser
+        else
+          template = :user
+        end
+      end
+      self.as_api_response_orig(template, opts)
+    end
+  end
+
+  module ClassMethods
+  end
+end
diff --git a/services/api/lib/create_ancestor_view.sql b/services/api/lib/create_ancestor_view.sql
new file mode 100644 (file)
index 0000000..451491b
--- /dev/null
@@ -0,0 +1,18 @@
+-- Copyright (C) The Arvados Authors. All rights reserved.
+--
+-- SPDX-License-Identifier: AGPL-3.0
+
+CREATE TEMPORARY VIEW ancestor_view AS
+WITH RECURSIVE
+ancestor (uuid, ancestor_uuid) AS (
+     SELECT groups.uuid::varchar(32)       AS uuid,
+            groups.owner_uuid::varchar(32) AS ancestor_uuid
+            FROM groups
+     UNION
+     SELECT ancestor.uuid::varchar(32)     AS uuid,
+            groups.owner_uuid::varchar(32) AS ancestor_uuid
+            FROM ancestor
+            INNER JOIN groups
+            ON groups.uuid = ancestor.ancestor_uuid
+)
+SELECT * FROM ancestor;
diff --git a/services/api/lib/create_permission_view.sql b/services/api/lib/create_permission_view.sql
new file mode 100644 (file)
index 0000000..0c4c77b
--- /dev/null
@@ -0,0 +1,48 @@
+-- Copyright (C) The Arvados Authors. All rights reserved.
+--
+-- SPDX-License-Identifier: AGPL-3.0
+
+-- Note: this is not the current code used for permission checks (that is
+-- materialized_permission_view), but is retained here for migration purposes.
+
+CREATE TEMPORARY VIEW permission_view AS
+WITH RECURSIVE
+perm_value (name, val) AS (
+     VALUES
+     ('can_read',   1::smallint),
+     ('can_login',  1),
+     ('can_write',  2),
+     ('can_manage', 3)
+     ),
+perm_edges (tail_uuid, head_uuid, val, follow) AS (
+       SELECT links.tail_uuid,
+              links.head_uuid,
+              pv.val,
+              (pv.val = 3 OR groups.uuid IS NOT NULL) AS follow
+              FROM links
+              LEFT JOIN perm_value pv ON pv.name = links.name
+              LEFT JOIN groups ON pv.val<3 AND groups.uuid = links.head_uuid
+              WHERE links.link_class = 'permission'
+       UNION ALL
+       SELECT owner_uuid, uuid, 3, true FROM groups
+       ),
+perm (val, follow, user_uuid, target_uuid) AS (
+     SELECT 3::smallint             AS val,
+            true                    AS follow,
+            users.uuid::varchar(32) AS user_uuid,
+            users.uuid::varchar(32) AS target_uuid
+            FROM users
+     UNION
+     SELECT LEAST(perm.val, edges.val)::smallint AS val,
+            edges.follow                         AS follow,
+            perm.user_uuid::varchar(32)          AS user_uuid,
+            edges.head_uuid::varchar(32)         AS target_uuid
+            FROM perm
+            INNER JOIN perm_edges edges
+            ON perm.follow AND edges.tail_uuid = perm.target_uuid
+)
+SELECT user_uuid,
+       target_uuid,
+       val AS perm_level,
+       CASE follow WHEN true THEN target_uuid ELSE NULL END AS target_owner_uuid
+       FROM perm;
diff --git a/services/api/lib/create_superuser_token.rb b/services/api/lib/create_superuser_token.rb
new file mode 100755 (executable)
index 0000000..57eac04
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Install the supplied string (or a randomly generated token, if none
+# is given) as an API token that authenticates to the system user account.
+
+module CreateSuperUserToken
+  require File.dirname(__FILE__) + '/../config/boot'
+  require File.dirname(__FILE__) + '/../config/environment'
+
+  include ApplicationHelper
+
+  def create_superuser_token supplied_token=nil
+    act_as_system_user do
+      # If token is supplied, verify that it indeed is a superuser token
+      if supplied_token
+        api_client_auth = ApiClientAuthorization.
+          where(api_token: supplied_token).
+          first
+        if !api_client_auth
+          # fall through to create a token
+        elsif !api_client_auth.user.uuid.match(/-000000000000000$/)
+          raise "Token exists but is not a superuser token."
+        elsif api_client_auth.scopes != ['all']
+          raise "Token exists but has limited scope #{api_client_auth.scopes.inspect}."
+        end
+      end
+
+      # need to create a token
+      if !api_client_auth
+        # Get (or create) trusted api client
+        apiClient =  ApiClient.
+          find_or_create_by(url_prefix: "ssh://root@localhost/",
+                            is_trusted: true)
+
+        # Check if there is an unexpired superuser token corresponding to this api client
+        api_client_auth =
+          ApiClientAuthorization.
+          where(user_id: system_user.id).
+          where(api_client_id: apiClient.id).
+          where_serialized(:scopes, ['all']).
+          where('(expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP)').
+          first
+
+        # none exist; create one with the supplied token
+        if !api_client_auth
+          api_client_auth = ApiClientAuthorization.
+            new(user: system_user,
+              api_client_id: apiClient.id,
+              created_by_ip_address: '::1',
+              api_token: supplied_token)
+          api_client_auth.save!
+        end
+      end
+
+      api_client_auth.api_token
+    end
+  end
+end
diff --git a/services/api/lib/crunch_dispatch.rb b/services/api/lib/crunch_dispatch.rb
new file mode 100644 (file)
index 0000000..449d7d5
--- /dev/null
@@ -0,0 +1,981 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'open3'
+require 'shellwords'
+
+class CrunchDispatch
+  extend DbCurrentTime
+  include ApplicationHelper
+  include Process
+
+  EXIT_TEMPFAIL = 75
+  EXIT_RETRY_UNLOCKED = 93
+  RETRY_UNLOCKED_LIMIT = 3
+
+  class LogTime < Time
+    def to_s
+      self.utc.strftime "%Y-%m-%d_%H:%M:%S"
+    end
+  end
+
+  def initialize
+    @crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
+    if @crunch_job_bin.empty?
+      raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
+    end
+
+    @docker_bin = ENV['CRUNCH_JOB_DOCKER_BIN']
+    @docker_run_args = ENV['CRUNCH_JOB_DOCKER_RUN_ARGS']
+    @cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
+    @srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT']
+
+    @arvados_internal = Rails.configuration.git_internal_dir
+    if not File.exist? @arvados_internal
+      $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
+      raise "No internal git repository available" unless ($? == 0)
+    end
+
+    @repo_root = Rails.configuration.git_repositories_dir
+    @arvados_repo_path = Repository.where(name: "arvados").first.server_path
+    @authorizations = {}
+    @did_recently = {}
+    @fetched_commits = {}
+    @git_tags = {}
+    @node_state = {}
+    @pipe_auth_tokens = {}
+    @running = {}
+    @todo = []
+    @todo_job_retries = {}
+    @job_retry_counts = Hash.new(0)
+    @todo_pipelines = []
+  end
+
+  def sysuser
+    return act_as_system_user
+  end
+
+  def refresh_todo
+    if @runoptions[:jobs]
+      @todo = @todo_job_retries.values + Job.queue.select(&:repository)
+    end
+    if @runoptions[:pipelines]
+      @todo_pipelines = PipelineInstance.queue
+    end
+  end
+
+  def each_slurm_line(cmd, outfmt, max_fields=nil)
+    max_fields ||= outfmt.split(":").size
+    max_fields += 1  # To accommodate the node field we add
+    @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
+    if Gem::Version.new('2.3') <= @@slurm_version
+      `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
+        yield line.chomp.split(":", max_fields)
+      end
+    else
+      # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
+      # into multiple rows with one hostname each.
+      `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
+        tokens = line.chomp.split(":", max_fields)
+        if (re = tokens[0].match(/^(.*?)\[([-,\d]+)\]$/))
+          tokens.shift
+          re[2].split(",").each do |range|
+            range = range.split("-").collect(&:to_i)
+            (range[0]..range[-1]).each do |n|
+              yield [re[1] + n.to_s] + tokens
+            end
+          end
+        else
+          yield tokens
+        end
+      end
+    end
+  end
+
+  def slurm_status
+    slurm_nodes = {}
+    each_slurm_line("sinfo", "%t") do |hostname, state|
+      # Treat nodes in idle* state as down, because the * means that slurm
+      # hasn't been able to communicate with it recently.
+      state.sub!(/^idle\*/, "down")
+      state.sub!(/\W+$/, "")
+      state = "down" unless %w(idle alloc comp mix drng down).include?(state)
+      slurm_nodes[hostname] = {state: state, job: nil}
+    end
+    each_slurm_line("squeue", "%j") do |hostname, job_uuid|
+      slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
+    end
+    slurm_nodes
+  end
+
+  def update_node_status
+    return unless Server::Application.config.crunch_job_wrapper.to_s.match(/^slurm/)
+    slurm_status.each_pair do |hostname, slurmdata|
+      next if @node_state[hostname] == slurmdata
+      begin
+        node = Node.where('hostname=?', hostname).order(:last_ping_at).last
+        if node
+          $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
+          node.info["slurm_state"] = slurmdata[:state]
+          node.job_uuid = slurmdata[:job]
+          if node.save
+            @node_state[hostname] = slurmdata
+          else
+            $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
+          end
+        elsif slurmdata[:state] != 'down'
+          $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
+        end
+      rescue => error
+        $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
+      end
+    end
+  end
+
+  def positive_int(raw_value, default=nil)
+    value = begin raw_value.to_i rescue 0 end
+    if value > 0
+      value
+    else
+      default
+    end
+  end
+
+  NODE_CONSTRAINT_MAP = {
+    # Map Job runtime_constraints keys to the corresponding Node info key.
+    'min_ram_mb_per_node' => 'total_ram_mb',
+    'min_scratch_mb_per_node' => 'total_scratch_mb',
+    'min_cores_per_node' => 'total_cpu_cores',
+  }
+
+  def nodes_available_for_job_now(job)
+    # Find Nodes that satisfy a Job's runtime constraints (by building
+    # a list of Procs and using them to test each Node).  If there
+    # enough to run the Job, return an array of their names.
+    # Otherwise, return nil.
+    need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
+      Proc.new do |node|
+        positive_int(node.properties[node_key], 0) >=
+          positive_int(job.runtime_constraints[job_key], 0)
+      end
+    end
+    min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
+    usable_nodes = []
+    Node.all.select do |node|
+      node.info['slurm_state'] == 'idle'
+    end.sort_by do |node|
+      # Prefer nodes with no price, then cheap nodes, then expensive nodes
+      node.properties['cloud_node']['price'].to_f rescue 0
+    end.each do |node|
+      if need_procs.select { |need_proc| not need_proc.call(node) }.any?
+        # At least one runtime constraint is not satisfied by this node
+        next
+      end
+      usable_nodes << node
+      if usable_nodes.count >= min_node_count
+        hostnames = usable_nodes.map(&:hostname)
+        log_nodes = usable_nodes.map do |n|
+          "#{n.hostname} #{n.uuid} #{n.properties.to_json}"
+        end
+        log_job = "#{job.uuid} #{job.runtime_constraints}"
+        log_text = "dispatching job #{log_job} to #{log_nodes.join(", ")}"
+        $stderr.puts log_text
+        begin
+          act_as_system_user do
+            Log.new(object_uuid: job.uuid,
+                    event_type: 'dispatch',
+                    owner_uuid: system_user_uuid,
+                    summary: "dispatching to #{hostnames.join(", ")}",
+                    properties: {'text' => log_text}).save!
+          end
+        rescue => e
+          $stderr.puts "dispatch: log.create failed: #{e}"
+        end
+        return hostnames
+      end
+    end
+    nil
+  end
+
+  def nodes_available_for_job(job)
+    # Check if there are enough idle nodes with the Job's minimum
+    # hardware requirements to run it.  If so, return an array of
+    # their names.  If not, up to once per hour, signal start_jobs to
+    # hold off launching Jobs.  This delay is meant to give the Node
+    # Manager an opportunity to make new resources available for new
+    # Jobs.
+    #
+    # The exact timing parameters here might need to be adjusted for
+    # the best balance between helping the longest-waiting Jobs run,
+    # and making efficient use of immediately available resources.
+    # These are all just first efforts until we have more data to work
+    # with.
+    nodelist = nodes_available_for_job_now(job)
+    if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
+      $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
+      @node_wait_deadline = Time.now + 5.minutes
+    end
+    nodelist
+  end
+
+  def fail_job job, message, skip_lock: false
+    $stderr.puts "dispatch: #{job.uuid}: #{message}"
+    begin
+      Log.new(object_uuid: job.uuid,
+              event_type: 'dispatch',
+              owner_uuid: job.owner_uuid,
+              summary: message,
+              properties: {"text" => message}).save!
+    rescue => e
+      $stderr.puts "dispatch: log.create failed: #{e}"
+    end
+
+    if not skip_lock and not have_job_lock?(job)
+      begin
+        job.lock @authorizations[job.uuid].user.uuid
+      rescue ArvadosModel::AlreadyLockedError
+        $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
+        return
+      end
+    end
+
+    job.state = "Failed"
+    if not job.save
+      $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
+    end
+  end
+
+  def stdout_s(cmd_a, opts={})
+    IO.popen(cmd_a, "r", opts) do |pipe|
+      return pipe.read.chomp
+    end
+  end
+
+  def git_cmd(*cmd_a)
+    ["git", "--git-dir=#{@arvados_internal}"] + cmd_a
+  end
+
+  def get_authorization(job)
+    if @authorizations[job.uuid] and
+        @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
+      # We already made a token for this job, but we need a new one
+      # because modified_by_user_uuid has changed (the job will run
+      # as a different user).
+      @authorizations[job.uuid].update_attributes expires_at: Time.now
+      @authorizations[job.uuid] = nil
+    end
+    if not @authorizations[job.uuid]
+      auth = ApiClientAuthorization.
+        new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
+            api_client_id: 0)
+      if not auth.save
+        $stderr.puts "dispatch: auth.save failed for #{job.uuid}"
+      else
+        @authorizations[job.uuid] = auth
+      end
+    end
+    @authorizations[job.uuid]
+  end
+
+  def internal_repo_has_commit? sha1
+    if (not @fetched_commits[sha1] and
+        sha1 == stdout_s(git_cmd("rev-list", "-n1", sha1), err: "/dev/null") and
+        $? == 0)
+      @fetched_commits[sha1] = true
+    end
+    return @fetched_commits[sha1]
+  end
+
+  def get_commit src_repo, sha1
+    return true if internal_repo_has_commit? sha1
+
+    # commit does not exist in internal repository, so import the
+    # source repository using git fetch-pack
+    cmd = git_cmd("fetch-pack", "--no-progress", "--all", src_repo)
+    $stderr.puts "dispatch: #{cmd}"
+    $stderr.puts(stdout_s(cmd))
+    @fetched_commits[sha1] = ($? == 0)
+  end
+
+  def tag_commit(job, commit_hash, tag_name)
+    # @git_tags[T]==V if we know commit V has been tagged T in the
+    # arvados_internal repository.
+    if not @git_tags[tag_name]
+      cmd = git_cmd("tag", tag_name, commit_hash)
+      $stderr.puts "dispatch: #{cmd}"
+      $stderr.puts(stdout_s(cmd, err: "/dev/null"))
+      unless $? == 0
+        # git tag failed.  This may be because the tag already exists, so check for that.
+        tag_rev = stdout_s(git_cmd("rev-list", "-n1", tag_name))
+        if $? == 0
+          # We got a revision back
+          if tag_rev != commit_hash
+            # Uh oh, the tag doesn't point to the revision we were expecting.
+            # Someone has been monkeying with the job record and/or git.
+            fail_job job, "Existing tag #{tag_name} points to commit #{tag_rev} but expected commit #{commit_hash}"
+            return nil
+          end
+          # we're okay (fall through to setting @git_tags below)
+        else
+          # git rev-list failed for some reason.
+          fail_job job, "'git tag' for #{tag_name} failed but did not find any existing tag using 'git rev-list'"
+          return nil
+        end
+      end
+      # 'git tag' was successful, or there is an existing tag that points to the same revision.
+      @git_tags[tag_name] = commit_hash
+    elsif @git_tags[tag_name] != commit_hash
+      fail_job job, "Existing tag #{tag_name} points to commit #{@git_tags[tag_name]} but this job uses commit #{commit_hash}"
+      return nil
+    end
+    @git_tags[tag_name]
+  end
+
+  def start_jobs
+    @todo.each do |job|
+      next if @running[job.uuid]
+
+      cmd_args = nil
+      case Server::Application.config.crunch_job_wrapper
+      when :none
+        if @running.size > 0
+            # Don't run more than one at a time.
+            return
+        end
+        cmd_args = []
+      when :slurm_immediate
+        nodelist = nodes_available_for_job(job)
+        if nodelist.nil?
+          if Time.now < @node_wait_deadline
+            break
+          else
+            next
+          end
+        end
+        cmd_args = ["salloc",
+                    "--chdir=/",
+                    "--immediate",
+                    "--exclusive",
+                    "--no-kill",
+                    "--job-name=#{job.uuid}",
+                    "--nodelist=#{nodelist.join(',')}"]
+      else
+        raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
+      end
+
+      cmd_args = sudo_preface + cmd_args
+
+      next unless get_authorization job
+
+      ready = internal_repo_has_commit? job.script_version
+
+      if not ready
+        # Import the commit from the specified repository into the
+        # internal repository. This should have been done already when
+        # the job was created/updated; this code is obsolete except to
+        # avoid deployment races. Failing the job would be a
+        # reasonable thing to do at this point.
+        repo = Repository.where(name: job.repository).first
+        if repo.nil? or repo.server_path.nil?
+          fail_job job, "Repository #{job.repository} not found under #{@repo_root}"
+          next
+        end
+        ready &&= get_commit repo.server_path, job.script_version
+        ready &&= tag_commit job, job.script_version, job.uuid
+      end
+
+      # This should be unnecessary, because API server does it during
+      # job create/update, but it's still not a bad idea to verify the
+      # tag is correct before starting the job:
+      ready &&= tag_commit job, job.script_version, job.uuid
+
+      # The arvados_sdk_version doesn't support use of arbitrary
+      # remote URLs, so the requested version isn't necessarily copied
+      # into the internal repository yet.
+      if job.arvados_sdk_version
+        ready &&= get_commit @arvados_repo_path, job.arvados_sdk_version
+        ready &&= tag_commit job, job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
+      end
+
+      if not ready
+        fail_job job, "commit not present in internal repository"
+        next
+      end
+
+      cmd_args += [@crunch_job_bin,
+                   '--job-api-token', @authorizations[job.uuid].api_token,
+                   '--job', job.uuid,
+                   '--git-dir', @arvados_internal]
+
+      if @cgroup_root
+        cmd_args += ['--cgroup-root', @cgroup_root]
+      end
+
+      if @docker_bin
+        cmd_args += ['--docker-bin', @docker_bin]
+      end
+
+      if @docker_run_args
+        cmd_args += ['--docker-run-args', @docker_run_args]
+      end
+
+      if @srun_sync_timeout
+        cmd_args += ['--srun-sync-timeout', @srun_sync_timeout]
+      end
+
+      if have_job_lock?(job)
+        cmd_args << "--force-unlock"
+      end
+
+      $stderr.puts "dispatch: #{cmd_args.join ' '}"
+
+      begin
+        i, o, e, t = Open3.popen3(*cmd_args)
+      rescue
+        $stderr.puts "dispatch: popen3: #{$!}"
+        # This is a dispatch problem like "Too many open files";
+        # retrying another job right away would be futile. Just return
+        # and hope things are better next time, after (at least) a
+        # did_recently() delay.
+        return
+      end
+
+      $stderr.puts "dispatch: job #{job.uuid}"
+      start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
+      $stderr.puts start_banner
+
+      @running[job.uuid] = {
+        stdin: i,
+        stdout: o,
+        stderr: e,
+        wait_thr: t,
+        job: job,
+        buf: {stderr: '', stdout: ''},
+        started: false,
+        sent_int: 0,
+        job_auth: @authorizations[job.uuid],
+        stderr_buf_to_flush: '',
+        stderr_flushed_at: Time.new(0),
+        bytes_logged: 0,
+        events_logged: 0,
+        log_throttle_is_open: true,
+        log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
+        log_throttle_bytes_so_far: 0,
+        log_throttle_lines_so_far: 0,
+        log_throttle_bytes_skipped: 0,
+        log_throttle_partial_line_last_at: Time.new(0),
+        log_throttle_first_partial_line: true,
+      }
+      i.close
+      @todo_job_retries.delete(job.uuid)
+      update_node_status
+    end
+  end
+
+  # Test for hard cap on total output and for log throttling.  Returns whether
+  # the log line should go to output or not.  Modifies "line" in place to
+  # replace it with an error if a logging limit is tripped.
+  def rate_limit running_job, line
+    message = false
+    linesize = line.size
+    if running_job[:log_throttle_is_open]
+      partial_line = false
+      skip_counts = false
+      matches = line.match(/^\S+ \S+ \d+ \d+ stderr (.*)/)
+      if matches and matches[1] and matches[1].start_with?('[...]') and matches[1].end_with?('[...]')
+        partial_line = true
+        if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.crunch_log_partial_line_throttle_period
+          running_job[:log_throttle_partial_line_last_at] = Time.now
+        else
+          skip_counts = true
+        end
+      end
+
+      if !skip_counts
+        running_job[:log_throttle_lines_so_far] += 1
+        running_job[:log_throttle_bytes_so_far] += linesize
+        running_job[:bytes_logged] += linesize
+      end
+
+      if (running_job[:bytes_logged] >
+          Rails.configuration.crunch_limit_log_bytes_per_job)
+        message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
+        running_job[:log_throttle_reset_time] = Time.now + 100.years
+        running_job[:log_throttle_is_open] = false
+
+      elsif (running_job[:log_throttle_bytes_so_far] >
+             Rails.configuration.crunch_log_throttle_bytes)
+        remaining_time = running_job[:log_throttle_reset_time] - Time.now
+        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds."
+        running_job[:log_throttle_is_open] = false
+
+      elsif (running_job[:log_throttle_lines_so_far] >
+             Rails.configuration.crunch_log_throttle_lines)
+        remaining_time = running_job[:log_throttle_reset_time] - Time.now
+        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds."
+        running_job[:log_throttle_is_open] = false
+
+      elsif partial_line and running_job[:log_throttle_first_partial_line]
+        running_job[:log_throttle_first_partial_line] = false
+        message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.crunch_log_partial_line_throttle_period} seconds."
+      end
+    end
+
+    if not running_job[:log_throttle_is_open]
+      # Don't log anything if any limit has been exceeded. Just count lossage.
+      running_job[:log_throttle_bytes_skipped] += linesize
+    end
+
+    if message
+      # Yes, write to logs, but use our "rate exceeded" message
+      # instead of the log message that exceeded the limit.
+      message += " A complete log is still being written to Keep, and will be available when the job finishes.\n"
+      line.replace message
+      true
+    elsif partial_line
+      false
+    else
+      running_job[:log_throttle_is_open]
+    end
+  end
+
+  def read_pipes
+    @running.each do |job_uuid, j|
+      now = Time.now
+      if now > j[:log_throttle_reset_time]
+        # It has been more than throttle_period seconds since the last
+        # checkpoint so reset the throttle
+        if j[:log_throttle_bytes_skipped] > 0
+          message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
+          $stderr.puts message
+          j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
+        end
+
+        j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
+        j[:log_throttle_bytes_so_far] = 0
+        j[:log_throttle_lines_so_far] = 0
+        j[:log_throttle_bytes_skipped] = 0
+        j[:log_throttle_is_open] = true
+        j[:log_throttle_partial_line_last_at] = Time.new(0)
+        j[:log_throttle_first_partial_line] = true
+      end
+
+      j[:buf].each do |stream, streambuf|
+        # Read some data from the child stream
+        buf = ''
+        begin
+          # It's important to use a big enough buffer here. When we're
+          # being flooded with logs, we must read and discard many
+          # bytes at once. Otherwise, we can easily peg a CPU with
+          # time-checking and other loop overhead. (Quick tests show a
+          # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
+          #
+          # So don't reduce this buffer size!
+          buf = j[stream].read_nonblock(2**20)
+        rescue Errno::EAGAIN, EOFError
+        end
+
+        # Short circuit the counting code if we're just going to throw
+        # away the data anyway.
+        if not j[:log_throttle_is_open]
+          j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
+          streambuf.replace ''
+          next
+        elsif buf == ''
+          next
+        end
+
+        # Append to incomplete line from previous read, if any
+        streambuf << buf
+
+        bufend = ''
+        streambuf.each_line do |line|
+          if not line.end_with? $/
+            if line.size > Rails.configuration.crunch_log_throttle_bytes
+              # Without a limit here, we'll use 2x an arbitrary amount
+              # of memory, and waste a lot of time copying strings
+              # around, all without providing any feedback to anyone
+              # about what's going on _or_ hitting any of our throttle
+              # limits.
+              #
+              # Here we leave "line" alone, knowing it will never be
+              # sent anywhere: rate_limit() will reach
+              # crunch_log_throttle_bytes immediately. However, we'll
+              # leave [...] in bufend: if the trailing end of the long
+              # line does end up getting sent anywhere, it will have
+              # some indication that it is incomplete.
+              bufend = "[...]"
+            else
+              # If line length is sane, we'll wait for the rest of the
+              # line to appear in the next read_pipes() call.
+              bufend = line
+              break
+            end
+          end
+          # rate_limit returns true or false as to whether to actually log
+          # the line or not.  It also modifies "line" in place to replace
+          # it with an error if a logging limit is tripped.
+          if rate_limit j, line
+            $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
+            $stderr.puts line
+            pub_msg = "#{LogTime.now} #{line.strip}\n"
+            j[:stderr_buf_to_flush] << pub_msg
+          end
+        end
+
+        # Leave the trailing incomplete line (if any) in streambuf for
+        # next time.
+        streambuf.replace bufend
+      end
+      # Flush buffered logs to the logs table, if appropriate. We have
+      # to do this even if we didn't collect any new logs this time:
+      # otherwise, buffered data older than seconds_between_events
+      # won't get flushed until new data arrives.
+      write_log j
+    end
+  end
+
+  def reap_children
+    return if 0 == @running.size
+    pid_done = nil
+    j_done = nil
+
+    @running.each do |uuid, j|
+      if !j[:wait_thr].status
+        pid_done = j[:wait_thr].pid
+        j_done = j
+        break
+      end
+    end
+
+    return if !pid_done
+
+    job_done = j_done[:job]
+
+    # Ensure every last drop of stdout and stderr is consumed.
+    read_pipes
+    # Reset flush timestamp to make sure log gets written.
+    j_done[:stderr_flushed_at] = Time.new(0)
+    # Write any remaining logs.
+    write_log j_done
+
+    j_done[:buf].each do |stream, streambuf|
+      if streambuf != ''
+        $stderr.puts streambuf + "\n"
+      end
+    end
+
+    # Wait the thread (returns a Process::Status)
+    exit_status = j_done[:wait_thr].value.exitstatus
+    exit_tempfail = exit_status == EXIT_TEMPFAIL
+
+    $stderr.puts "dispatch: child #{pid_done} exit #{exit_status}"
+    $stderr.puts "dispatch: job #{job_done.uuid} end"
+
+    jobrecord = Job.find_by_uuid(job_done.uuid)
+
+    if exit_status == EXIT_RETRY_UNLOCKED or (exit_tempfail and @job_retry_counts.include? jobrecord.uuid)
+      $stderr.puts("dispatch: job #{jobrecord.uuid} was interrupted by node failure")
+      # Only this crunch-dispatch process can retry the job:
+      # it's already locked, and there's no way to put it back in the
+      # Queued state.  Put it in our internal todo list unless the job
+      # has failed this way excessively.
+      @job_retry_counts[jobrecord.uuid] += 1
+      exit_tempfail = @job_retry_counts[jobrecord.uuid] <= RETRY_UNLOCKED_LIMIT
+      do_what_next = "give up now"
+      if exit_tempfail
+        @todo_job_retries[jobrecord.uuid] = jobrecord
+        do_what_next = "re-attempt"
+      end
+      $stderr.puts("dispatch: job #{jobrecord.uuid} has been interrupted " +
+                   "#{@job_retry_counts[jobrecord.uuid]}x, will #{do_what_next}")
+    end
+
+    if !exit_tempfail
+      @job_retry_counts.delete(jobrecord.uuid)
+      if jobrecord.state == "Running"
+        # Apparently there was an unhandled error.  That could potentially
+        # include "all allocated nodes failed" when we don't to retry
+        # because the job has already been retried RETRY_UNLOCKED_LIMIT
+        # times.  Fail the job.
+        jobrecord.state = "Failed"
+        if not jobrecord.save
+          $stderr.puts "dispatch: jobrecord.save failed"
+        end
+      end
+    else
+      # If the job failed to run due to an infrastructure
+      # issue with crunch-job or slurm, we want the job to stay in the
+      # queue. If crunch-job exited after losing a race to another
+      # crunch-job process, it exits 75 and we should leave the job
+      # record alone so the winner of the race can do its thing.
+      # If crunch-job exited after all of its allocated nodes failed,
+      # it exits 93, and we want to retry it later (see the
+      # EXIT_RETRY_UNLOCKED `if` block).
+      #
+      # There is still an unhandled race condition: If our crunch-job
+      # process is about to lose a race with another crunch-job
+      # process, but crashes before getting to its "exit 75" (for
+      # example, "cannot fork" or "cannot reach API server") then we
+      # will assume incorrectly that it's our process's fault
+      # jobrecord.started_at is non-nil, and mark the job as failed
+      # even though the winner of the race is probably still doing
+      # fine.
+    end
+
+    # Invalidate the per-job auth token, unless the job is still queued and we
+    # might want to try it again.
+    if jobrecord.state != "Queued" and !@todo_job_retries.include?(jobrecord.uuid)
+      j_done[:job_auth].update_attributes expires_at: Time.now
+    end
+
+    @running.delete job_done.uuid
+  end
+
+  def update_pipelines
+    expire_tokens = @pipe_auth_tokens.dup
+    @todo_pipelines.each do |p|
+      pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
+                   create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
+                          api_client_id: 0))
+      puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
+      expire_tokens.delete p.uuid
+    end
+
+    expire_tokens.each do |k, v|
+      v.update_attributes expires_at: Time.now
+      @pipe_auth_tokens.delete k
+    end
+  end
+
+  def parse_argv argv
+    @runoptions = {}
+    (argv.any? ? argv : ['--jobs', '--pipelines']).each do |arg|
+      case arg
+      when '--jobs'
+        @runoptions[:jobs] = true
+      when '--pipelines'
+        @runoptions[:pipelines] = true
+      else
+        abort "Unrecognized command line option '#{arg}'"
+      end
+    end
+    if not (@runoptions[:jobs] or @runoptions[:pipelines])
+      abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
+    end
+  end
+
+  def run argv
+    parse_argv argv
+
+    # We want files written by crunch-dispatch to be writable by other
+    # processes with the same GID, see bug #7228
+    File.umask(0002)
+
+    # This is how crunch-job child procs know where the "refresh"
+    # trigger file is
+    ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+
+    # If salloc can't allocate resources immediately, make it use our
+    # temporary failure exit code.  This ensures crunch-dispatch won't
+    # mark a job failed because of an issue with node allocation.
+    # This often happens when another dispatcher wins the race to
+    # allocate nodes.
+    ENV["SLURM_EXIT_IMMEDIATE"] = CrunchDispatch::EXIT_TEMPFAIL.to_s
+
+    if ENV["CRUNCH_DISPATCH_LOCKFILE"]
+      lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
+      lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
+      unless lockfile.flock File::LOCK_EX|File::LOCK_NB
+        abort "Lock unavailable on #{lockfilename} - exit"
+      end
+    end
+
+    @signal = {}
+    %w{TERM INT}.each do |sig|
+      signame = sig
+      Signal.trap(sig) do
+        $stderr.puts "Received #{signame} signal"
+        @signal[:term] = true
+      end
+    end
+
+    act_as_system_user
+    User.first.group_permissions
+    $stderr.puts "dispatch: ready"
+    while !@signal[:term] or @running.size > 0
+      read_pipes
+      if @signal[:term]
+        @running.each do |uuid, j|
+          if !j[:started] and j[:sent_int] < 2
+            begin
+              Process.kill 'INT', j[:wait_thr].pid
+            rescue Errno::ESRCH
+              # No such pid = race condition + desired result is
+              # already achieved
+            end
+            j[:sent_int] += 1
+          end
+        end
+      else
+        refresh_todo unless did_recently(:refresh_todo, 1.0)
+        update_node_status unless did_recently(:update_node_status, 1.0)
+        unless @todo.empty? or did_recently(:start_jobs, 1.0) or @signal[:term]
+          start_jobs
+        end
+        unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
+          update_pipelines
+        end
+        unless did_recently('check_orphaned_slurm_jobs', 60)
+          check_orphaned_slurm_jobs
+        end
+      end
+      reap_children
+      select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
+             [], [], 1)
+    end
+    # If there are jobs we wanted to retry, we have to mark them as failed now.
+    # Other dispatchers can't pick them up because we hold their lock.
+    @todo_job_retries.each_key do |job_uuid|
+      job = Job.find_by_uuid(job_uuid)
+      if job.state == "Running"
+        fail_job(job, "crunch-dispatch was stopped during job's tempfail retry loop")
+      end
+    end
+  end
+
+  def fail_jobs before: nil
+    act_as_system_user do
+      threshold = nil
+      if before == 'reboot'
+        boottime = nil
+        open('/proc/stat').map(&:split).each do |stat, t|
+          if stat == 'btime'
+            boottime = t
+          end
+        end
+        if not boottime
+          raise "Could not find btime in /proc/stat"
+        end
+        threshold = Time.at(boottime.to_i)
+      elsif before
+        threshold = Time.parse(before, Time.now)
+      else
+        threshold = db_current_time
+      end
+      Rails.logger.info "fail_jobs: threshold is #{threshold}"
+
+      squeue = squeue_jobs
+      Job.where('state = ? and started_at < ?', Job::Running, threshold).
+        each do |job|
+        Rails.logger.debug "fail_jobs: #{job.uuid} started #{job.started_at}"
+        squeue.each do |slurm_name|
+          if slurm_name == job.uuid
+            Rails.logger.info "fail_jobs: scancel #{job.uuid}"
+            scancel slurm_name
+          end
+        end
+        fail_job(job, "cleaned up stale job: started before #{threshold}",
+                 skip_lock: true)
+      end
+    end
+  end
+
+  def check_orphaned_slurm_jobs
+    act_as_system_user do
+      squeue_uuids = squeue_jobs.select{|uuid| uuid.match(/^[0-9a-z]{5}-8i9sb-[0-9a-z]{15}$/)}.
+                                  select{|uuid| !@running.has_key?(uuid)}
+
+      return if squeue_uuids.size == 0
+
+      scancel_uuids = squeue_uuids - Job.where('uuid in (?) and (state in (?) or modified_at>?)',
+                                               squeue_uuids,
+                                               ['Running', 'Queued'],
+                                               (Time.now - 60)).
+                                         collect(&:uuid)
+      scancel_uuids.each do |uuid|
+        Rails.logger.info "orphaned job: scancel #{uuid}"
+        scancel uuid
+      end
+    end
+  end
+
+  def sudo_preface
+    return [] if not Server::Application.config.crunch_job_user
+    ["sudo", "-E", "-u",
+     Server::Application.config.crunch_job_user,
+     "LD_LIBRARY_PATH=#{ENV['LD_LIBRARY_PATH']}",
+     "PATH=#{ENV['PATH']}",
+     "PERLLIB=#{ENV['PERLLIB']}",
+     "PYTHONPATH=#{ENV['PYTHONPATH']}",
+     "RUBYLIB=#{ENV['RUBYLIB']}",
+     "GEM_PATH=#{ENV['GEM_PATH']}"]
+  end
+
+  protected
+
+  def have_job_lock?(job)
+    # Return true if the given job is locked by this crunch-dispatch, normally
+    # because we've run crunch-job for it.
+    @todo_job_retries.include?(job.uuid)
+  end
+
+  def did_recently(thing, min_interval)
+    if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
+      @did_recently[thing] = Time.now
+      false
+    else
+      true
+    end
+  end
+
+  # send message to log table. we want these records to be transient
+  def write_log running_job
+    return if running_job[:stderr_buf_to_flush] == ''
+
+    # Send out to log event if buffer size exceeds the bytes per event or if
+    # it has been at least crunch_log_seconds_between_events seconds since
+    # the last flush.
+    if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
+        (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
+      begin
+        log = Log.new(object_uuid: running_job[:job].uuid,
+                      event_type: 'stderr',
+                      owner_uuid: running_job[:job].owner_uuid,
+                      properties: {"text" => running_job[:stderr_buf_to_flush]})
+        log.save!
+        running_job[:events_logged] += 1
+      rescue => exception
+        $stderr.puts "Failed to write logs"
+        $stderr.puts exception.backtrace
+      end
+      running_job[:stderr_buf_to_flush] = ''
+      running_job[:stderr_flushed_at] = Time.now
+    end
+  end
+
+  # An array of job_uuids in squeue
+  def squeue_jobs
+    if Rails.configuration.crunch_job_wrapper == :slurm_immediate
+      p = IO.popen(['squeue', '-a', '-h', '-o', '%j'])
+      begin
+        p.readlines.map {|line| line.strip}
+      ensure
+        p.close
+      end
+    else
+      []
+    end
+  end
+
+  def scancel slurm_name
+    cmd = sudo_preface + ['scancel', '-n', slurm_name]
+    IO.popen(cmd) do |scancel_pipe|
+      puts scancel_pipe.read
+    end
+    if not $?.success?
+      Rails.logger.error "scancel #{slurm_name.shellescape}: $?"
+    end
+  end
+end
diff --git a/services/api/lib/current_api_client.rb b/services/api/lib/current_api_client.rb
new file mode 100644 (file)
index 0000000..4963867
--- /dev/null
@@ -0,0 +1,234 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+$system_user = nil
+$system_group = nil
+$all_users_group = nil
+$anonymous_user = nil
+$anonymous_group = nil
+$anonymous_group_read_permission = nil
+$empty_collection = nil
+
+module CurrentApiClient
+  def current_user
+    Thread.current[:user]
+  end
+
+  def current_api_client
+    Thread.current[:api_client]
+  end
+
+  def current_api_client_authorization
+    Thread.current[:api_client_authorization]
+  end
+
+  def current_api_base
+    Thread.current[:api_url_base]
+  end
+
+  def current_default_owner
+    # owner_uuid for newly created objects
+    ((current_api_client_authorization &&
+      current_api_client_authorization.default_owner_uuid) ||
+     (current_user && current_user.default_owner_uuid) ||
+     (current_user && current_user.uuid) ||
+     nil)
+  end
+
+  # Where is the client connecting from?
+  def current_api_client_ip_address
+    Thread.current[:api_client_ip_address]
+  end
+
+  def system_user_uuid
+    [Server::Application.config.uuid_prefix,
+     User.uuid_prefix,
+     '000000000000000'].join('-')
+  end
+
+  def system_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     '000000000000000'].join('-')
+  end
+
+  def anonymous_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     'anonymouspublic'].join('-')
+  end
+
+  def anonymous_user_uuid
+    [Server::Application.config.uuid_prefix,
+     User.uuid_prefix,
+     'anonymouspublic'].join('-')
+  end
+
+  def system_user
+    $system_user = check_cache $system_user do
+      real_current_user = Thread.current[:user]
+      begin
+        Thread.current[:user] = User.new(is_admin: true,
+                                         is_active: true,
+                                         uuid: system_user_uuid)
+        User.where(uuid: system_user_uuid).
+          first_or_create!(is_active: true,
+                           is_admin: true,
+                           email: 'root',
+                           first_name: 'root',
+                           last_name: '')
+      ensure
+        Thread.current[:user] = real_current_user
+      end
+    end
+  end
+
+  def system_group
+    $system_group = check_cache $system_group do
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          Group.where(uuid: system_group_uuid).
+            first_or_create!(name: "System group",
+                             description: "System group") do |g|
+            g.save!
+            User.all.collect(&:uuid).each do |user_uuid|
+              Link.create!(link_class: 'permission',
+                           name: 'can_manage',
+                           tail_uuid: system_group_uuid,
+                           head_uuid: user_uuid)
+            end
+          end
+        end
+      end
+    end
+  end
+
+  def all_users_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     'fffffffffffffff'].join('-')
+  end
+
+  def all_users_group
+    $all_users_group = check_cache $all_users_group do
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          Group.where(uuid: all_users_group_uuid).
+            first_or_create!(name: "All users",
+                             description: "All users",
+                             group_class: "role")
+        end
+      end
+    end
+  end
+
+  def act_as_system_user
+    if block_given?
+      act_as_user system_user do
+        yield
+      end
+    else
+      Thread.current[:user] = system_user
+    end
+  end
+
+  def act_as_user user
+    user_was = Thread.current[:user]
+    Thread.current[:user] = user
+    begin
+      yield
+    ensure
+      Thread.current[:user] = user_was
+    end
+  end
+
+  def anonymous_group
+    $anonymous_group = check_cache $anonymous_group do
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          Group.where(uuid: anonymous_group_uuid).
+            first_or_create!(group_class: "role",
+                             name: "Anonymous users",
+                             description: "Anonymous users")
+        end
+      end
+    end
+  end
+
+  def anonymous_group_read_permission
+    $anonymous_group_read_permission =
+        check_cache $anonymous_group_read_permission do
+      act_as_system_user do
+        Link.where(tail_uuid: all_users_group.uuid,
+                   head_uuid: anonymous_group.uuid,
+                   link_class: "permission",
+                   name: "can_read").first_or_create!
+      end
+    end
+  end
+
+  def anonymous_user
+    $anonymous_user = check_cache $anonymous_user do
+      act_as_system_user do
+        User.where(uuid: anonymous_user_uuid).
+          first_or_create!(is_active: false,
+                           is_admin: false,
+                           email: 'anonymous',
+                           first_name: 'Anonymous',
+                           last_name: '') do |u|
+          u.save!
+          Link.where(tail_uuid: anonymous_user_uuid,
+                     head_uuid: anonymous_group.uuid,
+                     link_class: 'permission',
+                     name: 'can_read').
+            first_or_create!
+        end
+      end
+    end
+  end
+
+  def empty_collection_uuid
+    'd41d8cd98f00b204e9800998ecf8427e+0'
+  end
+
+  def empty_collection
+    $empty_collection = check_cache $empty_collection do
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          Collection.
+            where(portable_data_hash: empty_collection_uuid).
+            first_or_create!(manifest_text: '', owner_uuid: anonymous_group.uuid)
+        end
+      end
+    end
+  end
+
+  private
+
+  # If the given value is nil, or the cache has been cleared since it
+  # was set, yield. Otherwise, return the given value.
+  def check_cache value
+    if not Rails.env.test? and
+        ActionController::Base.cache_store.is_a? ActiveSupport::Cache::FileStore and
+        not File.owned? ActionController::Base.cache_store.cache_path
+      # If we don't own the cache dir, we're probably
+      # crunch-dispatch. Whoever we are, using this cache is likely to
+      # either fail or screw up the cache for someone else. So we'll
+      # just assume the $globals are OK to live forever.
+      #
+      # The reason for making the globals expire with the cache in the
+      # first place is to avoid leaking state between test cases: in
+      # production, we don't expect the database seeds to ever go away
+      # even when the cache is cleared, so there's no particular
+      # reason to expire our global variables.
+    else
+      Rails.cache.fetch "CurrentApiClient.$globals" do
+        value = nil
+        true
+      end
+    end
+    return value unless value.nil?
+    yield
+  end
+end
diff --git a/services/api/lib/db_current_time.rb b/services/api/lib/db_current_time.rb
new file mode 100644 (file)
index 0000000..fdb6641
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module DbCurrentTime
+  CURRENT_TIME_SQL = "SELECT clock_timestamp()"
+
+  def db_current_time
+    Time.parse(ActiveRecord::Base.connection.select_value(CURRENT_TIME_SQL)).to_time
+  end
+end
diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb
new file mode 100644 (file)
index 0000000..d99edd8
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+Disable_jobs_api_method_list = ["jobs.create",
+                                               "pipeline_instances.create",
+                                               "pipeline_templates.create",
+                                               "jobs.get",
+                                               "pipeline_instances.get",
+                                               "pipeline_templates.get",
+                                               "jobs.list",
+                                               "pipeline_instances.list",
+                                               "pipeline_templates.list",
+                                               "jobs.index",
+                                               "pipeline_instances.index",
+                                               "pipeline_templates.index",
+                                               "jobs.update",
+                                               "pipeline_instances.update",
+                                               "pipeline_templates.update",
+                                               "jobs.queue",
+                                               "jobs.queue_size",
+                                               "job_tasks.create",
+                                               "job_tasks.get",
+                                               "job_tasks.list",
+                                               "job_tasks.index",
+                                               "job_tasks.update",
+                                               "jobs.show",
+                                               "pipeline_instances.show",
+                                               "pipeline_templates.show",
+                                               "jobs.show",
+                                               "job_tasks.show"]
+
+def check_enable_legacy_jobs_api
+  if Rails.configuration.enable_legacy_jobs_api == false ||
+     (Rails.configuration.enable_legacy_jobs_api == "auto" &&
+      ActiveRecord::Base.connection.exec_query("select count(*) from jobs").first["count"] == "0")
+    Rails.configuration.disable_api_methods = Disable_jobs_api_method_list
+  end
+end
diff --git a/services/api/lib/has_uuid.rb b/services/api/lib/has_uuid.rb
new file mode 100644 (file)
index 0000000..dc8bdcb
--- /dev/null
@@ -0,0 +1,86 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module HasUuid
+
+  UUID_REGEX = /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/
+
+  def self.included(base)
+    base.extend(ClassMethods)
+    base.validate :validate_uuid
+    base.before_create :assign_uuid
+    base.before_destroy :destroy_permission_links
+    base.has_many(:links_via_head,
+                  -> { where("not (link_class = 'permission')") },
+                  class_name: 'Link',
+                  foreign_key: :head_uuid,
+                  primary_key: :uuid,
+                  dependent: :destroy)
+    base.has_many(:links_via_tail,
+                  -> { where("not (link_class = 'permission')") },
+                  class_name: 'Link',
+                  foreign_key: :tail_uuid,
+                  primary_key: :uuid,
+                  dependent: :destroy)
+  end
+
+  module ClassMethods
+    def uuid_prefix
+      Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
+    end
+    def generate_uuid
+      [Server::Application.config.uuid_prefix,
+       self.uuid_prefix,
+       rand(2**256).to_s(36)[-15..-1]].
+        join '-'
+    end
+  end
+
+  protected
+
+  def respond_to_uuid?
+    self.respond_to? :uuid
+  end
+
+  def validate_uuid
+    if self.respond_to_uuid? and self.uuid_changed?
+      if current_user.andand.is_admin and self.uuid.is_a?(String)
+        if (re = self.uuid.match HasUuid::UUID_REGEX)
+          if re[1] == self.class.uuid_prefix
+            return true
+          else
+            self.errors.add(:uuid, "type field is '#{re[1]}', expected '#{self.class.uuid_prefix}'")
+            return false
+          end
+        else
+          self.errors.add(:uuid, "not a valid Arvados uuid '#{self.uuid}'")
+          return false
+        end
+      else
+        if self.new_record?
+          self.errors.add(:uuid, "assignment not permitted")
+        else
+          self.errors.add(:uuid, "change not permitted")
+        end
+        return false
+      end
+    else
+      return true
+    end
+  end
+
+  def assign_uuid
+    if self.respond_to_uuid? and self.uuid.nil? or self.uuid.empty?
+      self.uuid = self.class.generate_uuid
+    end
+    true
+  end
+
+  def destroy_permission_links
+    if uuid
+      Link.destroy_all(['link_class=? and (head_uuid=? or tail_uuid=?)',
+                        'permission', uuid, uuid])
+    end
+  end
+end
diff --git a/services/api/lib/josh_id.rb b/services/api/lib/josh_id.rb
new file mode 100644 (file)
index 0000000..bb6c1f4
--- /dev/null
@@ -0,0 +1,58 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'omniauth-oauth2'
+module OmniAuth
+  module Strategies
+    class JoshId < OmniAuth::Strategies::OAuth2
+
+      args [:client_id, :client_secret, :custom_provider_url]
+
+      option :custom_provider_url, ''
+
+      uid { raw_info['id'] }
+
+      option :client_options, {}
+
+      info do
+        {
+          :first_name => raw_info['info']['first_name'],
+          :last_name => raw_info['info']['last_name'],
+          :email => raw_info['info']['email'],
+          :identity_url => raw_info['info']['identity_url'],
+          :username => raw_info['info']['username'],
+        }
+      end
+
+      extra do
+        {
+          'raw_info' => raw_info
+        }
+      end
+
+      def authorize_params
+        options.authorize_params[:auth_provider] = request.params['auth_provider']
+        super
+      end
+
+      def client
+        options.client_options[:site] = options[:custom_provider_url]
+        options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize"
+        options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token"
+        if Rails.configuration.sso_insecure
+          options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE}
+        end
+        ::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options))
+      end
+
+      def callback_url
+        full_host + script_name + callback_path + "?return_to=" + CGI.escape(request.params['return_to'] || '')
+      end
+
+      def raw_info
+        @raw_info ||= access_token.get("/auth/josh_id/user.json?oauth_token=#{access_token.token}").parsed
+      end
+    end
+  end
+end
diff --git a/services/api/lib/kind_and_etag.rb b/services/api/lib/kind_and_etag.rb
new file mode 100644 (file)
index 0000000..faf3ab0
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module KindAndEtag
+
+  def self.included(base)
+    base.extend(ClassMethods)
+  end
+
+  module ClassMethods
+    def kind
+      'arvados#' + self.to_s.camelcase(:lower)
+    end
+  end
+
+  def kind
+    self.class.kind
+  end
+
+  def etag attrs=nil
+    Digest::MD5.hexdigest((attrs || self.attributes).inspect).to_i(16).to_s(36)
+  end
+end
diff --git a/services/api/lib/load_param.rb b/services/api/lib/load_param.rb
new file mode 100644 (file)
index 0000000..e7cb21f
--- /dev/null
@@ -0,0 +1,174 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Mixin module for reading out query parameters from request params.
+#
+# Expects:
+#   +params+ Hash
+# Sets:
+#   @where, @filters, @limit, @offset, @orders
+module LoadParam
+
+  # Default number of rows to return in a single query.
+  DEFAULT_LIMIT = 100
+
+  # Load params[:where] into @where
+  def load_where_param
+    if params[:where].nil? or params[:where] == ""
+      @where = {}
+    elsif params[:where].is_a? Hash
+      @where = params[:where]
+    elsif params[:where].is_a? String
+      begin
+        @where = SafeJSON.load(params[:where])
+        raise unless @where.is_a? Hash
+      rescue
+        raise ArgumentError.new("Could not parse \"where\" param as an object")
+      end
+    end
+    @where = @where.with_indifferent_access
+  end
+
+  # Load params[:filters] into @filters
+  def load_filters_param
+    @filters ||= []
+    if params[:filters].is_a? Array
+      @filters += params[:filters]
+    elsif params[:filters].is_a? String and !params[:filters].empty?
+      begin
+        f = SafeJSON.load(params[:filters])
+        if not f.nil?
+          raise unless f.is_a? Array
+          @filters += f
+        end
+      rescue
+        raise ArgumentError.new("Could not parse \"filters\" param as an array")
+      end
+    end
+  end
+
+  # Load params[:limit], params[:offset] and params[:order]
+  # into @limit, @offset, @orders
+  def load_limit_offset_order_params(fill_table_names: true)
+    if params[:limit]
+      unless params[:limit].to_s.match(/^\d+$/)
+        raise ArgumentError.new("Invalid value for limit parameter")
+      end
+      @limit = [params[:limit].to_i,
+                Rails.configuration.max_items_per_response].min
+    else
+      @limit = DEFAULT_LIMIT
+    end
+
+    if params[:offset]
+      unless params[:offset].to_s.match(/^\d+$/)
+        raise ArgumentError.new("Invalid value for offset parameter")
+      end
+      @offset = params[:offset].to_i
+    else
+      @offset = 0
+    end
+
+    @orders = []
+    if (params[:order].is_a?(Array) && !params[:order].empty?) || !params[:order].blank?
+      od = []
+      (case params[:order]
+       when String
+         if params[:order].starts_with? '['
+           od = SafeJSON.load(params[:order])
+           raise unless od.is_a? Array
+           od
+         else
+           params[:order].split(',')
+         end
+       when Array
+         params[:order]
+       else
+         []
+       end).each do |order|
+        order = order.to_s
+        attr, direction = order.strip.split " "
+        direction ||= 'asc'
+        # The attr can have its table unspecified if it happens to be for the current "model_class" (the first case)
+        # or it can be fully specified with the database tablename (the second case) (e.g. "collections.name").
+        # NB that the security check for the second case table_name will not work if the model
+        # has used set_table_name to use an alternate table name from the Rails standard.
+        # I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)
+        # would be a place to start if this ever becomes necessary.
+        if (attr.match(/^[a-z][_a-z0-9]+$/) &&
+            model_class.columns.collect(&:name).index(attr) &&
+            ['asc','desc'].index(direction.downcase))
+          if fill_table_names
+            @orders << "#{table_name}.#{attr} #{direction.downcase}"
+          else
+            @orders << "#{attr} #{direction.downcase}"
+          end
+        elsif attr.match(/^([a-z][_a-z0-9]+)\.([a-z][_a-z0-9]+)$/) and
+            ['asc','desc'].index(direction.downcase) and
+            ActiveRecord::Base.connection.tables.include?($1) and
+            $1.classify.constantize.columns.collect(&:name).index($2)
+          # $1 in the above checks references the first match from the regular expression, which is expected to be the database table name
+          # $2 is of course the actual database column name
+          @orders << "#{attr} #{direction.downcase}"
+        end
+      end
+    end
+
+    # If the client-specified orders don't amount to a full ordering
+    # (e.g., [] or ['owner_uuid desc']), fall back on the default
+    # orders to ensure repeating the same request (possibly with
+    # different limit/offset) will return records in the same order.
+    #
+    # Clean up the resulting list of orders such that no column
+    # uselessly appears twice (Postgres might not optimize this out
+    # for us) and no columns uselessly appear after a unique column
+    # (Postgres does not optimize this out for us; as of 9.2, "order
+    # by id, modified_at desc, uuid" is slow but "order by id" is
+    # fast).
+    orders_given_and_default = @orders + model_class.default_orders
+    order_cols_used = {}
+    @orders = []
+    orders_given_and_default.each do |order|
+      otablecol = order.split(' ')[0]
+
+      next if order_cols_used[otablecol]
+      order_cols_used[otablecol] = true
+
+      @orders << order
+
+      otable, ocol = otablecol.split('.')
+      if otable == table_name and model_class.unique_columns.include?(ocol)
+        # we already have a full ordering; subsequent entries would be
+        # superfluous
+        break
+      end
+    end
+
+    case params[:select]
+    when Array
+      @select = params[:select]
+    when String
+      begin
+        @select = SafeJSON.load(params[:select])
+        raise unless @select.is_a? Array or @select.nil?
+      rescue
+        raise ArgumentError.new("Could not parse \"select\" param as an array")
+      end
+    end
+
+    if @select
+      # Any ordering columns must be selected when doing select,
+      # otherwise it is an SQL error, so filter out invaliding orderings.
+      @orders.select! { |o|
+        col, _ = o.split
+        # match select column against order array entry
+        @select.select { |s| col == "#{table_name}.#{s}" }.any?
+      }
+    end
+
+    @distinct = true if (params[:distinct] == true || params[:distinct] == "true")
+    @distinct = false if (params[:distinct] == false || params[:distinct] == "false")
+  end
+
+end
diff --git a/services/api/lib/log_reuse_info.rb b/services/api/lib/log_reuse_info.rb
new file mode 100644 (file)
index 0000000..ed5cc82
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module LogReuseInfo
+  # log_reuse_info logs whatever the given block returns, if
+  # log_reuse_decisions is enabled. It accepts a block instead of a
+  # string because in some cases constructing the strings involves
+  # doing expensive things like database queries, and we want to skip
+  # those when logging is disabled.
+  def log_reuse_info(candidates=nil)
+    if Rails.configuration.log_reuse_decisions
+      msg = yield
+      if !candidates.nil?
+        msg = "have #{candidates.count} candidates " + msg
+      end
+      Rails.logger.info("find_reusable: " + msg)
+    end
+  end
+end
diff --git a/services/api/lib/migrate_yaml_to_json.rb b/services/api/lib/migrate_yaml_to_json.rb
new file mode 100644 (file)
index 0000000..1db7ed0
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module MigrateYAMLToJSON
+  def self.migrate(table, column)
+    conn = ActiveRecord::Base.connection
+    n = conn.update(
+      "UPDATE #{table} SET #{column}=$1 WHERE #{column}=$2",
+      "#{table}.#{column} convert YAML to JSON",
+      [[nil, "{}"], [nil, "--- {}\n"]])
+    Rails.logger.info("#{table}.#{column}: #{n} rows updated using empty hash")
+    finished = false
+    while !finished
+      n = 0
+      conn.exec_query(
+        "SELECT id, #{column} FROM #{table} WHERE #{column} LIKE $1 LIMIT 100",
+        "#{table}.#{column} check for YAML",
+        [[nil, '---%']],
+      ).rows.map do |id, yaml|
+        n += 1
+        json = SafeJSON.dump(YAML.load(yaml))
+        conn.exec_query(
+          "UPDATE #{table} SET #{column}=$1 WHERE id=$2 AND #{column}=$3",
+          "#{table}.#{column} convert YAML to JSON",
+          [[nil, json], [nil, id], [nil, yaml]])
+      end
+      Rails.logger.info("#{table}.#{column}: #{n} rows updated")
+      finished = (n == 0)
+    end
+  end
+end
diff --git a/services/api/lib/record_filters.rb b/services/api/lib/record_filters.rb
new file mode 100644 (file)
index 0000000..831e357
--- /dev/null
@@ -0,0 +1,227 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Mixin module providing a method to convert filters into a list of SQL
+# fragments suitable to be fed to ActiveRecord #where.
+#
+# Expects:
+#   model_class
+# Operates on:
+#   @objects
+
+require 'safe_json'
+
+module RecordFilters
+
+  # Input:
+  # +filters+        array of conditions, each being [column, operator, operand]
+  # +model_class+    subclass of ActiveRecord being filtered
+  #
+  # Output:
+  # Hash with two keys:
+  # :cond_out  array of SQL fragments for each filter expression
+  # :param_out  array of values for parameter substitution in cond_out
+  def record_filters filters, model_class
+    conds_out = []
+    param_out = []
+
+    ar_table_name = model_class.table_name
+    filters.each do |filter|
+      attrs_in, operator, operand = filter
+      if attrs_in == 'any' && operator != '@@'
+        attrs = model_class.searchable_columns(operator)
+      elsif attrs_in.is_a? Array
+        attrs = attrs_in
+      else
+        attrs = [attrs_in]
+      end
+      if !filter.is_a? Array
+        raise ArgumentError.new("Invalid element in filters array: #{filter.inspect} is not an array")
+      elsif !operator.is_a? String
+        raise ArgumentError.new("Invalid operator '#{operator}' (#{operator.class}) in filter")
+      end
+
+      cond_out = []
+
+      if operator == '@@'
+        # Full-text search
+        if attrs_in != 'any'
+          raise ArgumentError.new("Full text search on individual columns is not supported")
+        end
+        if operand.is_a? Array
+          raise ArgumentError.new("Full text search not supported for array operands")
+        end
+
+        # Skip the generic per-column operator loop below
+        attrs = []
+        # Use to_tsquery since plainto_tsquery does not support prefix
+        # search. And, split operand and join the words with ' & '
+        cond_out << model_class.full_text_tsvector+" @@ to_tsquery(?)"
+        param_out << operand.split.join(' & ')
+      end
+      attrs.each do |attr|
+        subproperty = attr.split(".", 2)
+
+        col = model_class.columns.select { |c| c.name == subproperty[0] }.first
+
+        if subproperty.length == 2
+          if col.nil? or col.type != :jsonb
+            raise ArgumentError.new("Invalid attribute '#{subproperty[0]}' for subproperty filter")
+          end
+
+          if subproperty[1][0] == "<" and subproperty[1][-1] == ">"
+            subproperty[1] = subproperty[1][1..-2]
+          end
+
+          # jsonb search
+          case operator.downcase
+          when '=', '!='
+            not_in = if operator.downcase == "!=" then "NOT " else "" end
+            cond_out << "#{not_in}(#{ar_table_name}.#{subproperty[0]} @> ?::jsonb)"
+            param_out << SafeJSON.dump({subproperty[1] => operand})
+          when 'in'
+            if operand.is_a? Array
+              operand.each do |opr|
+                cond_out << "#{ar_table_name}.#{subproperty[0]} @> ?::jsonb"
+                param_out << SafeJSON.dump({subproperty[1] => opr})
+              end
+            else
+              raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+                                      "for '#{operator}' operator in filters")
+            end
+          when '<', '<=', '>', '>='
+            cond_out << "#{ar_table_name}.#{subproperty[0]}->? #{operator} ?::jsonb"
+            param_out << subproperty[1]
+            param_out << SafeJSON.dump(operand)
+          when 'like', 'ilike'
+            cond_out << "#{ar_table_name}.#{subproperty[0]}->>? #{operator} ?"
+            param_out << subproperty[1]
+            param_out << operand
+          when 'not in'
+            if operand.is_a? Array
+              cond_out << "#{ar_table_name}.#{subproperty[0]}->>? NOT IN (?) OR #{ar_table_name}.#{subproperty[0]}->>? IS NULL"
+              param_out << subproperty[1]
+              param_out << operand
+              param_out << subproperty[1]
+            else
+              raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+                                      "for '#{operator}' operator in filters")
+            end
+          when 'exists'
+            if operand == true
+              cond_out << "jsonb_exists(#{ar_table_name}.#{subproperty[0]}, ?)"
+            elsif operand == false
+              cond_out << "(NOT jsonb_exists(#{ar_table_name}.#{subproperty[0]}, ?)) OR #{ar_table_name}.#{subproperty[0]} is NULL"
+            else
+              raise ArgumentError.new("Invalid operand '#{operand}' for '#{operator}' must be true or false")
+            end
+            param_out << subproperty[1]
+          else
+            raise ArgumentError.new("Invalid operator for subproperty search '#{operator}'")
+          end
+        elsif operator.downcase == "exists"
+          if col.type != :jsonb
+            raise ArgumentError.new("Invalid attribute '#{subproperty[0]}' for operator '#{operator}' in filter")
+          end
+
+          cond_out << "jsonb_exists(#{ar_table_name}.#{subproperty[0]}, ?)"
+          param_out << operand
+        else
+          if !model_class.searchable_columns(operator).index subproperty[0]
+            raise ArgumentError.new("Invalid attribute '#{subproperty[0]}' in filter")
+          end
+
+          case operator.downcase
+          when '=', '<', '<=', '>', '>=', '!=', 'like', 'ilike'
+            attr_type = model_class.attribute_column(attr).type
+            operator = '<>' if operator == '!='
+            if operand.is_a? String
+              if attr_type == :boolean
+                if not ['=', '<>'].include?(operator)
+                  raise ArgumentError.new("Invalid operator '#{operator}' for " \
+                                          "boolean attribute '#{attr}'")
+                end
+                case operand.downcase
+                when '1', 't', 'true', 'y', 'yes'
+                  operand = true
+                when '0', 'f', 'false', 'n', 'no'
+                  operand = false
+                else
+                  raise ArgumentError("Invalid operand '#{operand}' for " \
+                                      "boolean attribute '#{attr}'")
+                end
+              end
+              if operator == '<>'
+                # explicitly allow NULL
+                cond_out << "#{ar_table_name}.#{attr} #{operator} ? OR #{ar_table_name}.#{attr} IS NULL"
+              else
+                cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
+              end
+              if (# any operator that operates on value rather than
+                # representation:
+                operator.match(/[<=>]/) and (attr_type == :datetime))
+                operand = Time.parse operand
+              end
+              param_out << operand
+            elsif operand.nil? and operator == '='
+              cond_out << "#{ar_table_name}.#{attr} is null"
+            elsif operand.nil? and operator == '<>'
+              cond_out << "#{ar_table_name}.#{attr} is not null"
+            elsif (attr_type == :boolean) and ['=', '<>'].include?(operator) and
+                 [true, false].include?(operand)
+              cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
+              param_out << operand
+            elsif (attr_type == :integer)
+              cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
+              param_out << operand
+            else
+              raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+                                      "for '#{operator}' operator in filters")
+            end
+          when 'in', 'not in'
+            if operand.is_a? Array
+              cond_out << "#{ar_table_name}.#{attr} #{operator} (?)"
+              param_out << operand
+              if operator == 'not in' and not operand.include?(nil)
+                # explicitly allow NULL
+                cond_out[-1] = "(#{cond_out[-1]} OR #{ar_table_name}.#{attr} IS NULL)"
+              end
+            else
+              raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+                                      "for '#{operator}' operator in filters")
+            end
+          when 'is_a'
+            operand = [operand] unless operand.is_a? Array
+            cond = []
+            operand.each do |op|
+              cl = ArvadosModel::kind_class op
+              if cl
+                if attr == 'uuid'
+                  if model_class.uuid_prefix == cl.uuid_prefix
+                    cond << "1=1"
+                  else
+                    cond << "1=0"
+                  end
+                else
+                  # Use a substring query to support remote uuids
+                  cond << "substring(#{ar_table_name}.#{attr}, 7, 5) = ?"
+                  param_out << cl.uuid_prefix
+                end
+              else
+                cond << "1=0"
+              end
+            end
+            cond_out << cond.join(' OR ')
+          else
+            raise ArgumentError.new("Invalid operator '#{operator}'")
+          end
+        end
+      end
+      conds_out << cond_out.join(' OR ') if cond_out.any?
+    end
+
+    {:cond_out => conds_out, :param_out => param_out}
+  end
+
+end
diff --git a/services/api/lib/refresh_permission_view.rb b/services/api/lib/refresh_permission_view.rb
new file mode 100644 (file)
index 0000000..25be3c0
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+PERMISSION_VIEW = "materialized_permission_view"
+
+def do_refresh_permission_view
+  ActiveRecord::Base.transaction do
+    ActiveRecord::Base.connection.execute("LOCK TABLE permission_refresh_lock")
+    ActiveRecord::Base.connection.execute("REFRESH MATERIALIZED VIEW #{PERMISSION_VIEW}")
+  end
+end
+
+def refresh_permission_view(async=false)
+  if async and Rails.configuration.async_permissions_update_interval > 0
+    exp = Rails.configuration.async_permissions_update_interval.seconds
+    need = false
+    Rails.cache.fetch('AsyncRefreshPermissionView', expires_in: exp) do
+      need = true
+    end
+    if need
+      # Schedule a new permission update and return immediately
+      Thread.new do
+        Thread.current.abort_on_exception = false
+        begin
+          sleep(exp)
+          Rails.cache.delete('AsyncRefreshPermissionView')
+          do_refresh_permission_view
+        rescue => e
+          Rails.logger.error "Updating permission view: #{e}\n#{e.backtrace.join("\n\t")}"
+        ensure
+          ActiveRecord::Base.connection.close
+        end
+      end
+      true
+    end
+  else
+    do_refresh_permission_view
+  end
+end
diff --git a/services/api/lib/request_error.rb b/services/api/lib/request_error.rb
new file mode 100644 (file)
index 0000000..cd9f9f8
--- /dev/null
@@ -0,0 +1,6 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RequestError < StandardError
+end
diff --git a/services/api/lib/safe_json.rb b/services/api/lib/safe_json.rb
new file mode 100644 (file)
index 0000000..f78a3d3
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SafeJSON
+  def self.dump(o)
+    return Oj.dump(o, mode: :compat)
+  end
+  def self.load(s)
+    if s.nil? or s == ''
+      # Oj 2.18.5 used to return nil. Not anymore on 3.6.4.
+      # Upgraded for performance issues (see #13803 and
+      # https://github.com/ohler55/oj/issues/441)
+      return nil
+    end
+    Oj.strict_load(s, symbol_keys: false)
+  end
+end
diff --git a/services/api/lib/safer_file_store.rb b/services/api/lib/safer_file_store.rb
new file mode 100644 (file)
index 0000000..d87b980
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SaferFileStore < ActiveSupport::Cache::FileStore
+  private
+  def delete_empty_directories(dir)
+    # It is not safe to delete an empty directory. Another thread or
+    # process might be in write_entry(), having just created an empty
+    # directory via ensure_cache_path(). If we delete that empty
+    # directory, the other thread/process will crash in
+    # File.atomic_write():
+    #
+    # #<Errno::ENOENT: No such file or directory @ rb_sysopen - /.../tmp/cache/94F/070/.permissions_check.13730420.54542.801783>
+  end
+end
diff --git a/services/api/lib/salvage_collection.rb b/services/api/lib/salvage_collection.rb
new file mode 100755 (executable)
index 0000000..3813f41
--- /dev/null
@@ -0,0 +1,98 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module SalvageCollection
+  # Take two input parameters: a collection uuid and reason
+  # Get "src_collection" with the given uuid
+  # Create a new collection with:
+  #   src_collection.manifest_text as "invalid_manifest_text.txt"
+  #   Locators from src_collection.manifest_text as "salvaged_data"
+  # Update src_collection:
+  #   Set src_collection.manifest_text to: ""
+  #   Append to src_collection.name: " (reason; salvaged data at new_collection.uuid)"
+  #   Set portable_data_hash to "d41d8cd98f00b204e9800998ecf8427e+0"
+
+  require File.dirname(__FILE__) + '/../config/environment'
+  include ApplicationHelper
+  require 'tempfile'
+  require 'shellwords'
+
+  def salvage_collection_arv_put cmd
+    new_manifest = %x(#{cmd})
+    if $?.success?
+      new_manifest
+    else
+      raise "Error during arv-put: #{$?} (cmd was #{cmd.inspect})"
+    end
+  end
+
+  # Get all the locators (and perhaps other strings that look a lot
+  # like a locators) from the original manifest, even if they don't
+  # appear in the correct positions with the correct space delimiters.
+  def salvage_collection_locator_data manifest
+    locators = []
+    size = 0
+    manifest.scan(/(^|[^[:xdigit:]])([[:xdigit:]]{32})((\+\d+)(\+|\b))?/) do |_, hash, _, sizehint, _|
+      if sizehint
+        locators << hash.downcase + sizehint
+        size += sizehint.to_i
+      else
+        locators << hash.downcase
+      end
+    end
+    locators << 'd41d8cd98f00b204e9800998ecf8427e+0' if !locators.any?
+    return [locators, size]
+  end
+
+  def salvage_collection uuid, reason='salvaged - see #6277, #6859'
+    act_as_system_user do
+      if !ENV['ARVADOS_API_TOKEN'].present? or !ENV['ARVADOS_API_HOST'].present?
+        raise "ARVADOS environment variables missing. Please set your admin user credentials as ARVADOS environment variables."
+      end
+
+      if !uuid.present?
+        raise "Collection UUID is required."
+      end
+
+      src_collection = Collection.find_by_uuid uuid
+      if !src_collection
+        raise "No collection found for #{uuid}."
+      end
+
+      src_manifest = src_collection.manifest_text || ''
+
+      # create new collection using 'arv-put' with original manifest_text as the data
+      temp_file = Tempfile.new('temp')
+      temp_file.write(src_manifest)
+
+      temp_file.close
+
+      new_manifest = salvage_collection_arv_put "arv-put --as-stream --use-filename invalid_manifest_text.txt #{Shellwords::shellescape(temp_file.path)}"
+
+      temp_file.unlink
+
+      # Get the locator data in the format [[locators], size] from the original manifest
+      locator_data = salvage_collection_locator_data src_manifest
+
+      new_manifest += (". #{locator_data[0].join(' ')} 0:#{locator_data[1]}:salvaged_data\n")
+
+      new_collection = Collection.new
+      new_collection.name = "salvaged from #{src_collection.uuid}, #{src_collection.portable_data_hash}"
+      new_collection.manifest_text = new_manifest
+
+      created = new_collection.save!
+      raise "New collection creation failed." if !created
+
+      $stderr.puts "Salvaged manifest and data for #{uuid} are in #{new_collection.uuid}."
+      puts "Created new collection #{new_collection.uuid}"
+
+      # update src_collection collection name, pdh, and manifest_text
+      src_collection.name = (src_collection.name || '') + ' (' + (reason || '') + '; salvaged data at ' + new_collection.uuid + ')'
+      src_collection.manifest_text = ''
+      src_collection.portable_data_hash = 'd41d8cd98f00b204e9800998ecf8427e+0'
+      src_collection.save!
+      $stderr.puts "Collection #{uuid} emptied and renamed to #{src_collection.name.inspect}."
+    end
+  end
+end
diff --git a/services/api/lib/serializers.rb b/services/api/lib/serializers.rb
new file mode 100644 (file)
index 0000000..37734e0
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'safe_json'
+
+class Serializer
+  class TypeMismatch < ArgumentError
+  end
+
+  def self.dump(val)
+    if !val.is_a?(object_class)
+      raise TypeMismatch.new("cannot serialize #{val.class} as #{object_class}")
+    end
+    SafeJSON.dump(val)
+  end
+
+  def self.legacy_load(s)
+    val = Psych.safe_load(s)
+    if val.is_a? String
+      # If apiserver was downgraded to a YAML-only version after
+      # storing JSON in the database, the old code would have loaded
+      # the JSON document as a plain string, and then YAML-encoded
+      # it when saving it back to the database. It's too late now to
+      # make the old code behave better, but at least we can
+      # gracefully handle the mess it leaves in the database by
+      # double-decoding on the way out.
+      return SafeJSON.load(val)
+    else
+      return val
+    end
+  end
+
+  def self.load(s)
+    if s.is_a?(object_class)
+      # Rails already deserialized for us
+      s
+    elsif s.nil?
+      object_class.new()
+    elsif s[0] == first_json_char
+      SafeJSON.load(s)
+    elsif s[0..2] == "---"
+      legacy_load(s)
+    else
+      raise "invalid serialized data #{s[0..5].inspect}"
+    end
+  end
+end
+
+class HashSerializer < Serializer
+  def self.first_json_char
+    "{"
+  end
+
+  def self.object_class
+    ::Hash
+  end
+end
+
+class ArraySerializer < Serializer
+  def self.first_json_char
+    "["
+  end
+
+  def self.object_class
+    ::Array
+  end
+end
diff --git a/services/api/lib/simulate_job_log.rb b/services/api/lib/simulate_job_log.rb
new file mode 100644 (file)
index 0000000..abcf42e
--- /dev/null
@@ -0,0 +1,62 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'current_api_client'
+
+module SimulateJobLog
+  include CurrentApiClient
+  def replay(filename, multiplier = 1, simulated_job_uuid = nil)
+    raise "Environment must be development or test" unless [ 'test', 'development' ].include? ENV['RAILS_ENV']
+
+    multiplier = multiplier.to_f
+    multiplier = 1.0 if multiplier <= 0
+
+    actual_start_time = Time.now
+    log_start_time = nil
+
+    if simulated_job_uuid and (job = Job.where(uuid: simulated_job_uuid).first)
+      job_owner_uuid = job.owner_uuid
+    else
+      job_owner_uuid = system_user_uuid
+    end
+
+    act_as_system_user do
+      File.open(filename).each.with_index do |line, index|
+        cols = {}
+        cols[:timestamp], rest_of_line = line.split(' ', 2)
+        begin
+          cols[:timestamp] = Time.strptime( cols[:timestamp], "%Y-%m-%d_%H:%M:%S" )
+        rescue ArgumentError
+          if line =~ /^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (.*)/
+            # Wed Nov 19 07:12:39 2014
+            cols[:timestamp] = Time.strptime( $1, "%a %b %d %H:%M:%S %Y" )
+            rest_of_line = $2
+          else
+              STDERR.puts "Ignoring log line because of unknown time format: #{line}"
+          end
+        end
+        cols[:job_uuid], cols[:pid], cols[:task], cols[:event_type], cols[:message] = rest_of_line.split(' ', 5)
+        # Override job uuid with a simulated one if specified
+        cols[:job_uuid] = simulated_job_uuid || cols[:job_uuid]
+        # determine when we want to simulate this log being created, based on the time multiplier
+        log_start_time = cols[:timestamp] if log_start_time.nil?
+        log_time = cols[:timestamp]
+        actual_elapsed_time = Time.now - actual_start_time
+        log_elapsed_time = log_time - log_start_time
+        modified_elapsed_time = log_elapsed_time / multiplier
+        pause_time = modified_elapsed_time - actual_elapsed_time
+        sleep pause_time if pause_time > 0
+
+        Log.new({
+          owner_uuid:  job_owner_uuid,
+          event_at:    Time.zone.local_to_utc(cols[:timestamp]),
+          object_uuid: cols[:job_uuid],
+          event_type:  cols[:event_type],
+          properties:  { 'text' => line }
+        }).save!
+      end
+    end
+
+  end
+end
diff --git a/services/api/lib/sweep_trashed_objects.rb b/services/api/lib/sweep_trashed_objects.rb
new file mode 100644 (file)
index 0000000..bedbd68
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'current_api_client'
+
+module SweepTrashedObjects
+  extend CurrentApiClient
+
+  def self.delete_project_and_contents(p_uuid)
+    p = Group.find_by_uuid(p_uuid)
+    if !p || p.group_class != 'project'
+      raise "can't sweep group '#{p_uuid}', it may not exist or not be a project"
+    end
+    # First delete sub projects
+    Group.where({group_class: 'project', owner_uuid: p_uuid}).each do |sub_project|
+      delete_project_and_contents(sub_project.uuid)
+    end
+    # Next, iterate over all tables which have owner_uuid fields, with some
+    # exceptions, and delete records owned by this project
+    skipped_classes = ['Group', 'User']
+    ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
+      if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')
+        klass.where({owner_uuid: p_uuid}).destroy_all
+      end
+    end
+    # Finally delete the project itself
+    p.destroy
+  end
+
+  def self.sweep_now
+    act_as_system_user do
+      # Sweep trashed collections
+      Collection.
+        where('delete_at is not null and delete_at < statement_timestamp()').
+        destroy_all
+      Collection.
+        where('is_trashed = false and trash_at < statement_timestamp()').
+        update_all('is_trashed = true')
+
+      # Sweep trashed projects and their contents
+      Group.
+        where({group_class: 'project'}).
+        where('delete_at is not null and delete_at < statement_timestamp()').each do |project|
+          delete_project_and_contents(project.uuid)
+      end
+      Group.
+        where({group_class: 'project'}).
+        where('is_trashed = false and trash_at < statement_timestamp()').
+        update_all('is_trashed = true')
+
+      # Sweep expired tokens
+      ActiveRecord::Base.connection.execute("DELETE from api_client_authorizations where expires_at <= statement_timestamp()")
+    end
+  end
+
+  def self.sweep_if_stale
+    return if Rails.configuration.trash_sweep_interval <= 0
+    exp = Rails.configuration.trash_sweep_interval.seconds
+    need = false
+    Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do
+      need = true
+    end
+    if need
+      Thread.new do
+        Thread.current.abort_on_exception = false
+        begin
+          sweep_now
+        rescue => e
+          Rails.logger.error "#{e.class}: #{e}\n#{e.backtrace.join("\n\t")}"
+        ensure
+          ActiveRecord::Base.connection.close
+        end
+      end
+    end
+  end
+end
diff --git a/services/api/lib/tasks/.gitkeep b/services/api/lib/tasks/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake
new file mode 100644 (file)
index 0000000..4f071f1
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+namespace :config do
+  desc 'Ensure site configuration has all required settings'
+  task check: :environment do
+    $stderr.puts "%-32s %s" % ["AppVersion (discovered)", AppVersion.hash]
+    $application_config.sort.each do |k, v|
+      if ENV.has_key?('QUIET') then
+        # Make sure we still check for the variable to exist
+        eval("Rails.configuration.#{k}")
+      else
+        if /(password|secret|signing_key)/.match(k) then
+          # Make sure we still check for the variable to exist, but don't print the value
+          eval("Rails.configuration.#{k}")
+          $stderr.puts "%-32s %s" % [k, '*********']
+        else
+          $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
+        end
+      end
+    end
+    # default_trash_lifetime cannot be less than 24 hours
+    if Rails.configuration.default_trash_lifetime < 86400 then
+      raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime
+    end
+  end
+end
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
new file mode 100644 (file)
index 0000000..ed34960
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+namespace :config do
+  desc 'Show site configuration'
+  task dump: :environment do
+    puts $application_config.to_yaml
+  end
+end
diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake
new file mode 100644 (file)
index 0000000..b45113e
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This task finds containers that have been finished for at least as long as
+# the duration specified in the `clean_container_log_rows_after` config setting,
+# and deletes their stdout, stderr, arv-mount, crunch-run, and  crunchstat logs
+# from the logs table.
+
+namespace :db do
+  desc "Remove old container log entries from the logs table"
+
+  task delete_old_container_logs: :environment do
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.clean_container_log_rows_after} seconds')"
+
+    ActiveRecord::Base.connection.execute(delete_sql)
+  end
+end
diff --git a/services/api/lib/tasks/delete_old_job_logs.rake b/services/api/lib/tasks/delete_old_job_logs.rake
new file mode 100644 (file)
index 0000000..dcd92b1
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This task finds jobs that have been finished for at least as long as
+# the duration specified in the `clean_job_log_rows_after`
+# configuration setting, and deletes their stderr logs from the logs table.
+
+namespace :db do
+  desc "Remove old job stderr entries from the logs table"
+  task delete_old_job_logs: :environment do
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.clean_job_log_rows_after} seconds')"
+
+    ActiveRecord::Base.connection.execute(delete_sql)
+  end
+end
diff --git a/services/api/lib/tasks/replay_job_log.rake b/services/api/lib/tasks/replay_job_log.rake
new file mode 100644 (file)
index 0000000..9c0f005
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'simulate_job_log'
+desc 'Simulate job logging from a file. Three arguments: log filename, time multipler (optional), simulated job uuid (optional). E.g. (use quotation marks if using spaces between args): rake "replay_job_log[log.txt, 2.0, qr1hi-8i9sb-nf3qk0xzwwz3lre]"'
+task :replay_job_log, [:filename, :multiplier, :uuid] => :environment do |t, args|
+  include SimulateJobLog
+  abort("No filename specified.") if args[:filename].blank?
+  replay( args[:filename], args[:multiplier].to_f, args[:uuid] )
+end
diff --git a/services/api/lib/tasks/test_tasks.rake b/services/api/lib/tasks/test_tasks.rake
new file mode 100644 (file)
index 0000000..0d5ed40
--- /dev/null
@@ -0,0 +1,10 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+namespace :test do
+  new_task = Rake::TestTask.new(tasks: "test:prepare") do |t|
+    t.libs << "test"
+    t.pattern = "test/tasks/**/*_test.rb"
+  end
+end
diff --git a/services/api/lib/trashable.rb b/services/api/lib/trashable.rb
new file mode 100644 (file)
index 0000000..9687962
--- /dev/null
@@ -0,0 +1,128 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module Trashable
+  def self.included(base)
+    base.before_validation :set_validation_timestamp
+    base.before_validation :ensure_trash_at_not_in_past
+    base.before_validation :sync_trash_state
+    base.before_validation :default_trash_interval
+    base.validate :validate_trash_and_delete_timing
+  end
+
+  # Use a single timestamp for all validations, even though each
+  # validation runs at a different time.
+  def set_validation_timestamp
+    @validation_timestamp = db_current_time
+  end
+
+  # If trash_at is being changed to a time in the past, change it to
+  # now. This allows clients to say "expires {client-current-time}"
+  # without failing due to clock skew, while avoiding odd log entries
+  # like "expiry date changed to {1 year ago}".
+  def ensure_trash_at_not_in_past
+    if trash_at_changed? && trash_at
+      self.trash_at = [@validation_timestamp, trash_at].max
+    end
+  end
+
+  # Caller can move into/out of trash by setting/clearing is_trashed
+  # -- however, if the caller also changes trash_at, then any changes
+  # to is_trashed are ignored.
+  def sync_trash_state
+    if is_trashed_changed? && !trash_at_changed?
+      if is_trashed
+        self.trash_at = @validation_timestamp
+      else
+        self.trash_at = nil
+        self.delete_at = nil
+      end
+    end
+    self.is_trashed = trash_at && trash_at <= @validation_timestamp || false
+    true
+  end
+
+  def default_trash_interval
+    if trash_at_changed? && !delete_at_changed?
+      # If trash_at is updated without touching delete_at,
+      # automatically update delete_at to a sensible value.
+      if trash_at.nil?
+        self.delete_at = nil
+      else
+        self.delete_at = trash_at + Rails.configuration.default_trash_lifetime.seconds
+      end
+    elsif !trash_at || !delete_at || trash_at > delete_at
+      # Not trash, or bogus arguments? Just validate in
+      # validate_trash_and_delete_timing.
+    elsif delete_at_changed? && delete_at >= trash_at
+      # Fix delete_at if needed, so it's not earlier than the expiry
+      # time on any permission tokens that might have been given out.
+
+      # In any case there are no signatures expiring after now+TTL.
+      # Also, if the existing trash_at time has already passed, we
+      # know we haven't given out any signatures since then.
+      earliest_delete = [
+        @validation_timestamp,
+        trash_at_was,
+      ].compact.min + Rails.configuration.blob_signature_ttl.seconds
+
+      # The previous value of delete_at is also an upper bound on the
+      # longest-lived permission token. For example, if TTL=14,
+      # trash_at_was=now-7, delete_at_was=now+7, then it is safe to
+      # set trash_at=now+6, delete_at=now+8.
+      earliest_delete = [earliest_delete, delete_at_was].compact.min
+
+      # If delete_at is too soon, use the earliest possible time.
+      if delete_at < earliest_delete
+        self.delete_at = earliest_delete
+      end
+    end
+  end
+
+  def validate_trash_and_delete_timing
+    if trash_at.nil? != delete_at.nil?
+      errors.add :delete_at, "must be set if trash_at is set, and must be nil otherwise"
+    elsif delete_at && delete_at < trash_at
+      errors.add :delete_at, "must not be earlier than trash_at"
+    end
+    true
+  end
+end
+
+module TrashableController
+  def destroy
+    if !@object.is_trashed
+      @object.update_attributes!(trash_at: db_current_time)
+    end
+    earliest_delete = (@object.trash_at +
+                       Rails.configuration.blob_signature_ttl.seconds)
+    if @object.delete_at > earliest_delete
+      @object.update_attributes!(delete_at: earliest_delete)
+    end
+    show
+  end
+
+  def trash
+    if !@object.is_trashed
+      @object.update_attributes!(trash_at: db_current_time)
+    end
+    show
+  end
+
+  def untrash
+    if @object.is_trashed
+      @object.trash_at = nil
+
+      if params[:ensure_unique_name]
+        @object.save_with_unique_name!
+      else
+        @object.save!
+      end
+    else
+      raise ArvadosModel::InvalidStateTransitionError.new("Item is not trashed, cannot untrash")
+    end
+    show
+  end
+
+end
diff --git a/services/api/lib/update_priority.rb b/services/api/lib/update_priority.rb
new file mode 100644 (file)
index 0000000..21cd74b
--- /dev/null
@@ -0,0 +1,57 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module UpdatePriority
+  extend CurrentApiClient
+
+  # Clean up after races.
+  #
+  # If container priority>0 but there are no committed container
+  # requests for it, reset priority to 0.
+  #
+  # If container priority=0 but there are committed container requests
+  # for it with priority>0, update priority.
+  def self.update_priority
+    if !File.owned?(Rails.root.join('tmp'))
+      Rails.logger.warn("UpdatePriority: not owner of #{Rails.root}/tmp, skipping")
+      return
+    end
+    lockfile = Rails.root.join('tmp', 'update_priority.lock')
+    File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|
+      return unless f.flock(File::LOCK_NB|File::LOCK_EX)
+
+      # priority>0 but should be 0:
+      ActiveRecord::Base.connection.
+        exec_query("UPDATE containers AS c SET priority=0 WHERE state IN ('Queued', 'Locked', 'Running') AND priority>0 AND uuid NOT IN (SELECT container_uuid FROM container_requests WHERE priority>0 AND state='Committed');", 'UpdatePriority')
+
+      # priority==0 but should be >0:
+      act_as_system_user do
+        Container.
+          joins("JOIN container_requests ON container_requests.container_uuid=containers.uuid AND container_requests.state=#{Container.sanitize(ContainerRequest::Committed)} AND container_requests.priority>0").
+          where('containers.state IN (?) AND containers.priority=0 AND container_requests.uuid IS NOT NULL',
+                [Container::Queued, Container::Locked, Container::Running]).
+          map(&:update_priority!)
+      end
+    end
+  end
+
+  def self.run_update_thread
+    need = false
+    Rails.cache.fetch('UpdatePriority', expires_in: 5.seconds) do
+      need = true
+    end
+    return if !need
+
+    Thread.new do
+      Thread.current.abort_on_exception = false
+      begin
+        update_priority
+      rescue => e
+        Rails.logger.error "#{e.class}: #{e}\n#{e.backtrace.join("\n\t")}"
+      ensure
+        ActiveRecord::Base.connection.close
+      end
+    end
+  end
+end
diff --git a/services/api/lib/whitelist_update.rb b/services/api/lib/whitelist_update.rb
new file mode 100644 (file)
index 0000000..17aed4b
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module WhitelistUpdate
+  def check_update_whitelist permitted_fields
+    attribute_names.each do |field|
+      if !permitted_fields.include?(field.to_sym) && really_changed(field)
+        errors.add field, "cannot be modified in state '#{self.state}' (#{send(field+"_was").inspect}, #{send(field).inspect})"
+      end
+    end
+  end
+
+  def really_changed(attr)
+    return false if !send(attr+"_changed?")
+    old = send(attr+"_was")
+    new = send(attr)
+    if (old.nil? || old == [] || old == {}) && (new.nil? || new == [] || new == {})
+      false
+    else
+      old != new
+    end
+  end
+
+  def validate_state_change
+    if self.state_changed?
+      unless state_transitions[self.state_was].andand.include? self.state
+        errors.add :state, "cannot change from #{self.state_was} to #{self.state}"
+        return false
+      end
+    end
+  end
+end
diff --git a/services/api/public/404.html b/services/api/public/404.html
new file mode 100644 (file)
index 0000000..abb9f80
--- /dev/null
@@ -0,0 +1,30 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The page you were looking for doesn't exist (404)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/404.html -->
+  <div class="dialog">
+    <h1>The page you were looking for doesn't exist.</h1>
+    <p>You may have mistyped the address or the page may have moved.</p>
+  </div>
+</body>
+</html>
diff --git a/services/api/public/422.html b/services/api/public/422.html
new file mode 100644 (file)
index 0000000..faa4a52
--- /dev/null
@@ -0,0 +1,30 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The change you wanted was rejected (422)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/422.html -->
+  <div class="dialog">
+    <h1>The change you wanted was rejected.</h1>
+    <p>Maybe you tried to change something you didn't have access to.</p>
+  </div>
+</body>
+</html>
diff --git a/services/api/public/500.html b/services/api/public/500.html
new file mode 100644 (file)
index 0000000..c528141
--- /dev/null
@@ -0,0 +1,30 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<!DOCTYPE html>
+<html>
+<head>
+  <title>We're sorry, but something went wrong (500)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/500.html -->
+  <div class="dialog">
+    <h1>We're sorry, but something went wrong.</h1>
+    <p>We've been notified about this issue and we'll take a look at it shortly.</p>
+  </div>
+</body>
+</html>
diff --git a/services/api/public/favicon.ico b/services/api/public/favicon.ico
new file mode 100644 (file)
index 0000000..4c763b6
Binary files /dev/null and b/services/api/public/favicon.ico differ
diff --git a/services/api/public/robots.txt b/services/api/public/robots.txt
new file mode 100644 (file)
index 0000000..085187f
--- /dev/null
@@ -0,0 +1,5 @@
+# See http://www.robotstxt.org/wc/norobots.html for documentation on how to use the robots.txt file
+#
+# To ban all spiders from the entire site uncomment the next two lines:
+# User-Agent: *
+# Disallow: /
diff --git a/services/api/script/arvados-git-sync.rb b/services/api/script/arvados-git-sync.rb
new file mode 100755 (executable)
index 0000000..5e68082
--- /dev/null
@@ -0,0 +1,269 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'rubygems'
+require 'pp'
+require 'arvados'
+require 'tempfile'
+require 'yaml'
+require 'fileutils'
+
+# This script does the actual gitolite config management on disk.
+#
+# Ward Vandewege <ward@curoverse.com>
+
+# Default is development
+production = ARGV[0] == "production"
+
+ENV["RAILS_ENV"] = "development"
+ENV["RAILS_ENV"] = "production" if production
+
+DEBUG = 1
+
+# load and merge in the environment-specific application config info
+# if present, overriding base config parameters as specified
+path = File.absolute_path('../../config/arvados-clients.yml', __FILE__)
+if File.exist?(path) then
+  cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
+else
+  puts "Please create a\n #{path}\n file"
+  exit 1
+end
+
+gitolite_url = cp_config['gitolite_url']
+gitolite_arvados_git_user_key = cp_config['gitolite_arvados_git_user_key']
+
+gitolite_tmpdir = cp_config['gitolite_tmp']
+gitolite_admin = File.join(gitolite_tmpdir, 'gitolite-admin')
+gitolite_admin_keydir = File.join(gitolite_admin, 'keydir')
+gitolite_keydir = File.join(gitolite_admin, 'keydir', 'arvados')
+
+ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
+ENV['ARVADOS_API_TOKEN'] = cp_config['arvados_api_token']
+if cp_config['arvados_api_host_insecure']
+  ENV['ARVADOS_API_HOST_INSECURE'] = 'true'
+else
+  ENV.delete('ARVADOS_API_HOST_INSECURE')
+end
+
+def ensure_directory(path, mode)
+  begin
+    Dir.mkdir(path, mode)
+  rescue Errno::EEXIST
+  end
+end
+
+def replace_file(path, contents)
+  unlink_now = true
+  dirname, basename = File.split(path)
+  FileUtils.mkpath(dirname)
+  new_file = Tempfile.new([basename, ".tmp"], dirname)
+  begin
+    new_file.write(contents)
+    new_file.flush
+    File.rename(new_file, path)
+    unlink_now = false
+  ensure
+    new_file.close(unlink_now)
+  end
+end
+
+def file_has_contents?(path, contents)
+  begin
+    IO.read(path) == contents
+  rescue Errno::ENOENT
+    false
+  end
+end
+
+module TrackCommitState
+  module ClassMethods
+    # Note that all classes that include TrackCommitState will have
+    # @@need_commit = true if any of them set it.  Since this flag reports
+    # a boolean state of the underlying git repository, that's OK in the
+    # current implementation.
+    @@need_commit = false
+
+    def changed?
+      @@need_commit
+    end
+
+    def ensure_in_git(path, contents)
+      unless file_has_contents?(path, contents)
+        replace_file(path, contents)
+        system("git", "add", path)
+        @@need_commit = true
+      end
+    end
+  end
+
+  def ensure_in_git(path, contents)
+    self.class.ensure_in_git(path, contents)
+  end
+
+  def self.included(base)
+    base.extend(ClassMethods)
+  end
+end
+
+class UserSSHKeys
+  include TrackCommitState
+
+  def initialize(user_keys_map, key_dir)
+    @user_keys_map = user_keys_map
+    @key_dir = key_dir
+    @installed = {}
+  end
+
+  def install(filename, pubkey)
+    unless pubkey.nil?
+      key_path = File.join(@key_dir, filename)
+      ensure_in_git(key_path, pubkey)
+    end
+    @installed[filename] = true
+  end
+
+  def ensure_keys_for_user(user_uuid)
+    return unless key_list = @user_keys_map.delete(user_uuid)
+    key_list.map { |k| k[:public_key] }.compact.each_with_index do |pubkey, ii|
+      # Handle putty-style ssh public keys
+      pubkey.sub!(/^(Comment: "r[^\n]*\n)(.*)$/m,'ssh-rsa \2 \1')
+      pubkey.sub!(/^(Comment: "d[^\n]*\n)(.*)$/m,'ssh-dss \2 \1')
+      pubkey.gsub!(/\n/,'')
+      pubkey.strip!
+      install("#{user_uuid}@#{ii}.pub", pubkey)
+    end
+  end
+
+  def installed?(filename)
+    @installed[filename]
+  end
+end
+
+class Repository
+  include TrackCommitState
+
+  @@aliases = {}
+
+  def initialize(arv_repo, user_keys)
+    @arv_repo = arv_repo
+    @user_keys = user_keys
+  end
+
+  def self.ensure_system_config(conf_root)
+    ensure_in_git(File.join(conf_root, "conf", "gitolite.conf"),
+                  %Q{include "auto/*.conf"\ninclude "admin/*.conf"\n})
+    ensure_in_git(File.join(conf_root, "arvadosaliases.pl"), alias_config)
+
+    conf_path = File.join(conf_root, "conf", "admin", "arvados.conf")
+    conf_file = %Q{
+@arvados_git_user = arvados_git_user
+
+repo gitolite-admin
+     RW           = @arvados_git_user
+
+}
+    ensure_directory(File.dirname(conf_path), 0755)
+    ensure_in_git(conf_path, conf_file)
+  end
+
+  def ensure_config(conf_root)
+    if name and (File.exist?(auto_conf_path(conf_root, name)))
+      # This gitolite installation knows the repository by name, rather than
+      # UUID.  Leave it configured that way until a separate migration is run.
+      basename = name
+    else
+      basename = uuid
+      @@aliases[name] = uuid unless name.nil?
+    end
+    conf_file = "\nrepo #{basename}\n"
+    @arv_repo[:user_permissions].sort.each do |user_uuid, perm|
+      conf_file += "\t#{perm[:gitolite_permissions]}\t= #{user_uuid}\n"
+      @user_keys.ensure_keys_for_user(user_uuid)
+    end
+    ensure_in_git(auto_conf_path(conf_root, basename), conf_file)
+  end
+
+  private
+
+  def auto_conf_path(conf_root, basename)
+    File.join(conf_root, "conf", "auto", "#{basename}.conf")
+  end
+
+  def uuid
+    @arv_repo[:uuid]
+  end
+
+  def name
+    if @arv_repo[:name].nil?
+      nil
+    else
+      @clean_name ||=
+        @arv_repo[:name].sub(/^[^A-Za-z]+/, "").gsub(/[^\w\.\/]/, "")
+    end
+  end
+
+  def self.alias_config
+    conf_s = "{\n"
+    @@aliases.sort.each do |(repo_name, repo_uuid)|
+      conf_s += "\t'#{repo_name}' \t=> '#{repo_uuid}',\n"
+    end
+    conf_s += "};\n"
+    conf_s
+  end
+end
+
+begin
+  # Get our local gitolite-admin repo up to snuff
+  if not File.exist?(gitolite_admin) then
+    ensure_directory(gitolite_tmpdir, 0700)
+    Dir.chdir(gitolite_tmpdir)
+    `git clone #{gitolite_url}`
+    Dir.chdir(gitolite_admin)
+  else
+    Dir.chdir(gitolite_admin)
+    `git pull`
+  end
+
+  arv = Arvados.new
+  permissions = arv.repository.get_all_permissions
+
+  ensure_directory(gitolite_keydir, 0700)
+  admin_user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_admin_keydir)
+  # Make sure the arvados_git_user key is installed; put it in gitolite_admin_keydir
+  # because that is where gitolite will try to put it if we do not.
+  admin_user_ssh_keys.install('arvados_git_user.pub', gitolite_arvados_git_user_key)
+
+  user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_keydir)
+  permissions[:repositories].each do |repo_record|
+    repo = Repository.new(repo_record, user_ssh_keys)
+    repo.ensure_config(gitolite_admin)
+  end
+  Repository.ensure_system_config(gitolite_admin)
+
+  # Clean up public key files that should not be present
+  Dir.chdir(gitolite_keydir)
+  stale_keys = Dir.glob('*.pub').reject do |key_file|
+    user_ssh_keys.installed?(key_file)
+  end
+  if stale_keys.any?
+    stale_keys.each { |key_file| puts "Extra file #{key_file}" }
+    system("git", "rm", "--quiet", *stale_keys)
+  end
+
+  if UserSSHKeys.changed? or Repository.changed? or stale_keys.any?
+    message = "#{Time.now().to_s}: update from API"
+    Dir.chdir(gitolite_admin)
+    `git add --all`
+    `git commit -m '#{message}'`
+    `git push`
+  end
+
+rescue => bang
+  puts "Error: " + bang.to_s
+  puts bang.backtrace.join("\n")
+  exit 1
+end
+
diff --git a/services/api/script/create_superuser_token.rb b/services/api/script/create_superuser_token.rb
new file mode 100755 (executable)
index 0000000..3d5de35
--- /dev/null
@@ -0,0 +1,18 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Install the supplied string (or a randomly generated token, if none
+# is given) as an API token that authenticates to the system user
+# account.
+#
+# Print the token on stdout.
+
+require './lib/create_superuser_token'
+include CreateSuperUserToken
+
+supplied_token = ARGV[0]
+
+token = CreateSuperUserToken.create_superuser_token supplied_token
+puts token
diff --git a/services/api/script/crunch-dispatch.rb b/services/api/script/crunch-dispatch.rb
new file mode 100755 (executable)
index 0000000..38bd54b
--- /dev/null
@@ -0,0 +1,16 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+dispatch_argv = []
+ARGV.reject! do |arg|
+  dispatch_argv.push(arg) if /^--/ =~ arg
+end
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+require './lib/crunch_dispatch.rb'
+
+CrunchDispatch.new.run dispatch_argv
diff --git a/services/api/script/crunch_failure_report.py b/services/api/script/crunch_failure_report.py
new file mode 100755 (executable)
index 0000000..83217d8
--- /dev/null
@@ -0,0 +1,222 @@
+#! /usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import argparse
+import datetime
+import json
+import re
+import sys
+
+import arvados
+
+# Useful configuration variables:
+
+# Number of log lines to use as context in diagnosing failure.
+LOG_CONTEXT_LINES = 10
+
+# Regex that signifies a failed task.
+FAILED_TASK_REGEX = re.compile(' \d+ failure (.*permanent)')
+
+# Regular expressions used to classify failure types.
+JOB_FAILURE_TYPES = {
+    'sys/docker': 'Cannot destroy container',
+    'crunch/node': 'User not found on host',
+    'slurm/comm':  'Communication connection failure'
+}
+
+def parse_arguments(arguments):
+    arg_parser = argparse.ArgumentParser(
+        description='Produce a report of Crunch failures within a specified time range')
+
+    arg_parser.add_argument(
+        '--start',
+        help='Start date and time')
+    arg_parser.add_argument(
+        '--end',
+        help='End date and time')
+
+    args = arg_parser.parse_args(arguments)
+
+    if args.start and not is_valid_timestamp(args.start):
+        raise ValueError(args.start)
+    if args.end and not is_valid_timestamp(args.end):
+        raise ValueError(args.end)
+
+    return args
+
+
+def api_timestamp(when=None):
+    """Returns a string representing the timestamp 'when' in a format
+    suitable for delivering to the API server.  Defaults to the
+    current time.
+    """
+    if when is None:
+        when = datetime.datetime.utcnow()
+    return when.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
+def is_valid_timestamp(ts):
+    return re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', ts)
+
+
+def jobs_created_between_dates(api, start, end):
+    return arvados.util.list_all(
+        api.jobs().list,
+        filters=json.dumps([ ['created_at', '>=', start],
+                             ['created_at', '<=', end] ]))
+
+
+def job_logs(api, job):
+    # Returns the contents of the log for this job (as an array of lines).
+    if job['log']:
+        log_collection = arvados.CollectionReader(job['log'], api)
+        log_filename = "{}.log.txt".format(job['uuid'])
+        return log_collection.open(log_filename).readlines()
+    return []
+
+
+user_names = {}
+def job_user_name(api, user_uuid):
+    def _lookup_user_name(api, user_uuid):
+        try:
+            return api.users().get(uuid=user_uuid).execute()['full_name']
+        except arvados.errors.ApiError:
+            return user_uuid
+
+    if user_uuid not in user_names:
+        user_names[user_uuid] = _lookup_user_name(api, user_uuid)
+    return user_names[user_uuid]
+
+
+job_pipeline_names = {}
+def job_pipeline_name(api, job_uuid):
+    def _lookup_pipeline_name(api, job_uuid):
+        try:
+            pipelines = api.pipeline_instances().list(
+                filters='[["components", "like", "%{}%"]]'.format(job_uuid)).execute()
+            pi = pipelines['items'][0]
+            if pi['name']:
+                return pi['name']
+            else:
+                # Use the pipeline template name
+                pt = api.pipeline_templates().get(uuid=pi['pipeline_template_uuid']).execute()
+                return pt['name']
+        except (TypeError, ValueError, IndexError):
+            return ""
+
+    if job_uuid not in job_pipeline_names:
+        job_pipeline_names[job_uuid] = _lookup_pipeline_name(api, job_uuid)
+    return job_pipeline_names[job_uuid]
+
+
+def is_failed_task(logline):
+    return FAILED_TASK_REGEX.search(logline) != None
+
+
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
+    args = parse_arguments(arguments)
+
+    api = arvados.api('v1')
+
+    now = datetime.datetime.utcnow()
+    start_time = args.start or api_timestamp(now - datetime.timedelta(days=1))
+    end_time = args.end or api_timestamp(now)
+
+    # Find all jobs created within the specified window,
+    # and their corresponding job logs.
+    jobs_created = jobs_created_between_dates(api, start_time, end_time)
+    jobs_by_state = {}
+    for job in jobs_created:
+        jobs_by_state.setdefault(job['state'], [])
+        jobs_by_state[job['state']].append(job)
+
+    # Find failed jobs and record the job failure text.
+
+    # failure_stats maps failure types (e.g. "sys/docker") to
+    # a set of job UUIDs that failed for that reason.
+    failure_stats = {}
+    for job in jobs_by_state['Failed']:
+        job_uuid = job['uuid']
+        logs = job_logs(api, job)
+        # Find the first permanent task failure, and collect the
+        # preceding log lines.
+        failure_type = None
+        for i, lg in enumerate(logs):
+            if is_failed_task(lg):
+                # Get preceding log record to provide context.
+                log_start = i - LOG_CONTEXT_LINES if i >= LOG_CONTEXT_LINES else 0
+                log_end = i + 1
+                lastlogs = ''.join(logs[log_start:log_end])
+                # try to identify the type of failure.
+                for key, rgx in JOB_FAILURE_TYPES.iteritems():
+                    if re.search(rgx, lastlogs):
+                        failure_type = key
+                        break
+            if failure_type is not None:
+                break
+        if failure_type is None:
+            failure_type = 'unknown'
+        failure_stats.setdefault(failure_type, set())
+        failure_stats[failure_type].add(job_uuid)
+
+    # Report percentages of successful, failed and unfinished jobs.
+    print "Start: {:20s}".format(start_time)
+    print "End:   {:20s}".format(end_time)
+    print ""
+
+    print "Overview"
+    print ""
+
+    job_start_count = len(jobs_created)
+    print "  {: <25s} {:4d}".format('Started', job_start_count)
+    for state in ['Complete', 'Failed', 'Queued', 'Cancelled', 'Running']:
+        if state in jobs_by_state:
+            job_count = len(jobs_by_state[state])
+            job_percentage = job_count / float(job_start_count)
+            print "  {: <25s} {:4d} ({: >4.0%})".format(state,
+                                                        job_count,
+                                                        job_percentage)
+    print ""
+
+    # Report failure types.
+    failure_summary = ""
+    failure_detail = ""
+
+    # Generate a mapping from failed job uuids to job records, to assist
+    # in generating detailed statistics for job failures.
+    jobs_failed_map = { job['uuid']: job for job in jobs_by_state.get('Failed', []) }
+
+    # sort the failure stats in descending order by occurrence.
+    sorted_failures = sorted(failure_stats,
+                             reverse=True,
+                             key=lambda failure_type: len(failure_stats[failure_type]))
+    for failtype in sorted_failures:
+        job_uuids = failure_stats[failtype]
+        failstat = "  {: <25s} {:4d} ({: >4.0%})\n".format(
+            failtype,
+            len(job_uuids),
+            len(job_uuids) / float(len(jobs_by_state['Failed'])))
+        failure_summary = failure_summary + failstat
+        failure_detail = failure_detail + failstat
+        for j in job_uuids:
+            job_info = jobs_failed_map[j]
+            job_owner = job_user_name(api, job_info['modified_by_user_uuid'])
+            job_name = job_pipeline_name(api, job_info['uuid'])
+            failure_detail = failure_detail + "    {}  {: <15.15s}  {:29.29s}\n".format(j, job_owner, job_name)
+        failure_detail = failure_detail + "\n"
+
+    print "Failures by class"
+    print ""
+    print failure_summary
+
+    print "Failures by class (detail)"
+    print ""
+    print failure_detail
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/services/api/script/fail-jobs.rb b/services/api/script/fail-jobs.rb
new file mode 100755 (executable)
index 0000000..a24b58d
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'trollop'
+
+opts = Trollop::options do
+  banner 'Fail jobs that have state=="Running".'
+  banner 'Options:'
+  opt(:before,
+      'fail only jobs that started before the given time (or "reboot")',
+      type: :string)
+end
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+require Rails.root.join('lib/crunch_dispatch.rb')
+
+CrunchDispatch.new.fail_jobs before: opts[:before]
diff --git a/services/api/script/get_anonymous_user_token.rb b/services/api/script/get_anonymous_user_token.rb
new file mode 100755 (executable)
index 0000000..93c8318
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Get or Create an anonymous user token.
+# If get option is used, an existing anonymous user token is returned. If none exist, one is created.
+# If the get option is omitted, a new token is created and returned.
+
+require 'trollop'
+
+opts = Trollop::options do
+  banner ''
+  banner "Usage: get_anonymous_user_token "
+  banner ''
+  opt :get, <<-eos
+Get an existing anonymous user token. If no such token exists \
+or if this option is omitted, a new token is created and returned.
+  eos
+  opt :token, "token to create (optional)", :type => :string
+end
+
+get_existing = opts[:get]
+supplied_token = opts[:token]
+
+require File.dirname(__FILE__) + '/../config/environment'
+
+include ApplicationHelper
+act_as_system_user
+
+def create_api_client_auth(supplied_token=nil)
+
+  # If token is supplied, see if it exists
+  if supplied_token
+    api_client_auth = ApiClientAuthorization.
+      where(api_token: supplied_token).
+      first
+    if !api_client_auth
+      # fall through to create a token
+    else
+      raise "Token exists, aborting!"
+    end
+  end
+
+  api_client_auth = ApiClientAuthorization.
+    new(user: anonymous_user,
+        api_client_id: 0,
+        expires_at: Time.now + 100.years,
+        scopes: ['GET /'],
+        api_token: supplied_token)
+  api_client_auth.save!
+  api_client_auth.reload
+  api_client_auth
+end
+
+if get_existing
+  api_client_auth = ApiClientAuthorization.
+    where('user_id=?', anonymous_user.id.to_i).
+    where('expires_at>?', Time.now).
+    select { |auth| auth.scopes == ['GET /'] }.
+    first
+end
+
+# either not a get or no api_client_auth was found
+if !api_client_auth
+  api_client_auth = create_api_client_auth(supplied_token)
+end
+
+# print it to the console
+puts api_client_auth.api_token
diff --git a/services/api/script/migrate-gitolite-to-uuid-storage.rb b/services/api/script/migrate-gitolite-to-uuid-storage.rb
new file mode 100755 (executable)
index 0000000..91acf3e
--- /dev/null
@@ -0,0 +1,224 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+#
+# Prior to April 2015, Arvados Gitolite integration stored repositories by
+# name.  To improve user repository management, we switched to storing
+# repositories by UUID, and aliasing them to names.  This makes it easy to
+# have rich name hierarchies, and allow users to rename repositories.
+#
+# This script will migrate a name-based Gitolite configuration to a UUID-based
+# one.  To use it:
+#
+# 1. Change the value of REPOS_DIR below, if needed.
+# 2. Install this script in the same directory as `update-gitolite.rb`.
+# 3. Ensure that no *other* users can access Gitolite: edit gitolite's
+#    authorized_keys file so it only contains the arvados_git_user key,
+#    and disable the update-gitolite cron job.
+# 4. Run this script: `ruby migrate-gitolite-to-uuid-storage.rb production`.
+# 5. Undo step 3.
+
+require 'rubygems'
+require 'pp'
+require 'arvados'
+require 'tempfile'
+require 'yaml'
+
+REPOS_DIR = "/var/lib/gitolite/repositories"
+
+# Default is development
+production = ARGV[0] == "production"
+
+ENV["RAILS_ENV"] = "development"
+ENV["RAILS_ENV"] = "production" if production
+
+DEBUG = 1
+
+# load and merge in the environment-specific application config info
+# if present, overriding base config parameters as specified
+path = File.dirname(__FILE__) + '/config/arvados-clients.yml'
+if File.exist?(path) then
+  cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
+else
+  puts "Please create a\n " + File.dirname(__FILE__) + "/config/arvados-clients.yml\n file"
+  exit 1
+end
+
+gitolite_url = cp_config['gitolite_url']
+gitolite_arvados_git_user_key = cp_config['gitolite_arvados_git_user_key']
+
+gitolite_tmpdir = File.join(File.absolute_path(File.dirname(__FILE__)),
+                            cp_config['gitolite_tmp'])
+gitolite_admin = File.join(gitolite_tmpdir, 'gitolite-admin')
+gitolite_keydir = File.join(gitolite_admin, 'keydir', 'arvados')
+
+ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
+ENV['ARVADOS_API_TOKEN'] = cp_config['arvados_api_token']
+if cp_config['arvados_api_host_insecure']
+  ENV['ARVADOS_API_HOST_INSECURE'] = 'true'
+else
+  ENV.delete('ARVADOS_API_HOST_INSECURE')
+end
+
+def ensure_directory(path, mode)
+  begin
+    Dir.mkdir(path, mode)
+  rescue Errno::EEXIST
+  end
+end
+
+def replace_file(path, contents)
+  unlink_now = true
+  dirname, basename = File.split(path)
+  new_file = Tempfile.new([basename, ".tmp"], dirname)
+  begin
+    new_file.write(contents)
+    new_file.flush
+    File.rename(new_file, path)
+    unlink_now = false
+  ensure
+    new_file.close(unlink_now)
+  end
+end
+
+def file_has_contents?(path, contents)
+  begin
+    IO.read(path) == contents
+  rescue Errno::ENOENT
+    false
+  end
+end
+
+module TrackCommitState
+  module ClassMethods
+    # Note that all classes that include TrackCommitState will have
+    # @@need_commit = true if any of them set it.  Since this flag reports
+    # a boolean state of the underlying git repository, that's OK in the
+    # current implementation.
+    @@need_commit = false
+
+    def changed?
+      @@need_commit
+    end
+
+    def ensure_in_git(path, contents)
+      unless file_has_contents?(path, contents)
+        replace_file(path, contents)
+        system("git", "add", path)
+        @@need_commit = true
+      end
+    end
+  end
+
+  def ensure_in_git(path, contents)
+    self.class.ensure_in_git(path, contents)
+  end
+
+  def self.included(base)
+    base.extend(ClassMethods)
+  end
+end
+
+class Repository
+  include TrackCommitState
+
+  @@aliases = {}
+
+  def initialize(arv_repo)
+    @arv_repo = arv_repo
+  end
+
+  def self.ensure_system_config(conf_root)
+    ensure_in_git(File.join(conf_root, "arvadosaliases.pl"), alias_config)
+  end
+
+  def self.rename_repos(repos_root)
+    @@aliases.each_pair do |uuid, name|
+      begin
+        File.rename(File.join(repos_root, "#{name}.git/"),
+                    File.join(repos_root, "#{uuid}.git"))
+      rescue Errno::ENOENT
+      end
+      if name == "arvados"
+        Dir.chdir(repos_root) { File.symlink("#{uuid}.git/", "arvados.git") }
+      end
+    end
+  end
+
+  def ensure_config(conf_root)
+    return if name.nil?
+    @@aliases[uuid] = name
+    name_conf_path = auto_conf_path(conf_root, name)
+    return unless File.exist?(name_conf_path)
+    conf_file = IO.read(name_conf_path)
+    conf_file.gsub!(/^repo #{Regexp.escape(name)}$/m, "repo #{uuid}")
+    ensure_in_git(auto_conf_path(conf_root, uuid), conf_file)
+    File.unlink(name_conf_path)
+    system("git", "rm", "--quiet", name_conf_path)
+  end
+
+  private
+
+  def auto_conf_path(conf_root, basename)
+    File.join(conf_root, "conf", "auto", "#{basename}.conf")
+  end
+
+  def uuid
+    @arv_repo[:uuid]
+  end
+
+  def name
+    if @arv_repo[:name].nil?
+      nil
+    else
+      @clean_name ||=
+        @arv_repo[:name].sub(/^[^A-Za-z]+/, "").gsub(/[^\w\.\/]/, "")
+    end
+  end
+
+  def self.alias_config
+    conf_s = "{\n"
+    @@aliases.sort.each do |(repo_name, repo_uuid)|
+      conf_s += "\t'#{repo_name}' \t=> '#{repo_uuid}',\n"
+    end
+    conf_s += "};\n"
+    conf_s
+  end
+end
+
+begin
+  # Get our local gitolite-admin repo up to snuff
+  if not File.exist?(gitolite_admin) then
+    ensure_directory(gitolite_tmpdir, 0700)
+    Dir.chdir(gitolite_tmpdir)
+    `git clone #{gitolite_url}`
+    Dir.chdir(gitolite_admin)
+  else
+    Dir.chdir(gitolite_admin)
+    `git pull`
+  end
+
+  arv = Arvados.new
+  permissions = arv.repository.get_all_permissions
+
+  permissions[:repositories].each do |repo_record|
+    repo = Repository.new(repo_record)
+    repo.ensure_config(gitolite_admin)
+  end
+  Repository.ensure_system_config(gitolite_admin)
+
+  message = "#{Time.now().to_s}: migrate to storing repositories by UUID"
+  Dir.chdir(gitolite_admin)
+  `git add --all`
+  `git commit -m '#{message}'`
+  Repository.rename_repos(REPOS_DIR)
+  `git push`
+
+rescue => bang
+  puts "Error: " + bang.to_s
+  puts bang.backtrace.join("\n")
+  exit 1
+end
+
diff --git a/services/api/script/rails b/services/api/script/rails
new file mode 100755 (executable)
index 0000000..901460c
--- /dev/null
@@ -0,0 +1,34 @@
+#!/usr/bin/env ruby
+# This command will automatically be run when you run "rails" with Rails 3 gems installed from the root of your application.
+
+
+##### SSL - ward, 2012-10-15
+require 'rubygems'
+require 'rails/commands/server'
+require 'rack'
+require 'webrick'
+require 'webrick/https'
+
+module Rails
+    class Server < ::Rack::Server
+        def default_options
+            super.merge({
+                :Port => 3030,
+                :environment => (ENV['RAILS_ENV'] || "development").dup,
+                :daemonize => false,
+                :debugger => false,
+                :pid => File.expand_path("tmp/pids/server.pid"),
+                :config => File.expand_path("config.ru"),
+                :SSLEnable => true,
+                :SSLVerifyClient => OpenSSL::SSL::VERIFY_NONE,
+                :SSLCertName => [["CN", "#{WEBrick::Utils::getservername} #{Time.now().to_s}"]]
+            })
+        end
+    end
+end
+######### /SSL
+
+
+APP_PATH = File.expand_path('../../config/application',  __FILE__)
+require File.expand_path('../../config/boot',  __FILE__)
+require 'rails/commands'
diff --git a/services/api/script/rake_test.sh b/services/api/script/rake_test.sh
new file mode 100755 (executable)
index 0000000..458ae73
--- /dev/null
@@ -0,0 +1,16 @@
+#! /bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This script invokes `rake test' in a fresh Docker instance of the
+# API server, e.g.:
+#   docker run -t -i arvados/api /usr/src/arvados/services/api/script/rake_test.sh
+
+/etc/init.d/postgresql start
+
+export RAILS_ENV=test
+cd /usr/src/arvados/services/api
+cp config/environments/test.rb.example config/environments/test.rb
+bundle exec rake db:setup
+bundle exec rake test
diff --git a/services/api/script/restart-dns-server b/services/api/script/restart-dns-server
new file mode 100755 (executable)
index 0000000..ec81fbc
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# usage:
+# "restart-dns-server <path-to-restart.txt>" (restart now if needed)
+# or
+# "restart-dns-server <path-to-restart.txt> -d" (wait for restart to be needed, restart, repeat)
+
+RESTART_TXT_PATH=$1
+
+if [[ "$RESTART_TXT_PATH" == "" ]]; then
+  echo
+  echo "Usage: "
+  echo "   $0 <path-to-restart.txt>      # restart now if needed"
+  echo "   $0 <path-to-restart.txt> -d   # wait for restart to be needed, restart, repeat"
+  echo
+  exit 1
+fi
+
+while :
+do
+  if [ -e $RESTART_TXT_PATH ]; then
+    RESTART_COMMAND=`cat $RESTART_TXT_PATH`
+    echo "restart command: $RESTART_COMMAND"
+    rm -f "$RESTART_TXT_PATH"
+    echo restarting
+    $RESTART_COMMAND
+  fi
+  if [ "-d" = "$2" ]
+  then
+    sleep 2
+  else
+    exit 0
+  fi
+done
diff --git a/services/api/script/salvage_collection.rb b/services/api/script/salvage_collection.rb
new file mode 100755 (executable)
index 0000000..75b02e5
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Take two input parameters: a collection uuid and reason
+# Get "src_collection" with the given uuid
+# Create a new collection with:
+#   src_collection.manifest_text as "invalid_manifest_text.txt"
+#   Locators from src_collection.manifest_text as "salvaged_data"
+# Update src_collection:
+#   Set src_collection.manifest_text to: ""
+#   Append to src_collection.name: " (reason; salvaged data at new_collection.uuid)"
+#   Set portable_data_hash to "d41d8cd98f00b204e9800998ecf8427e+0"
+
+require 'trollop'
+require './lib/salvage_collection'
+include SalvageCollection
+
+opts = Trollop::options do
+  banner ''
+  banner "Usage: salvage_collection.rb " +
+    "{uuid} {reason}"
+  banner ''
+  opt :uuid, "uuid of the collection to be salvaged.", type: :string, required: true
+  opt :reason, "Reason for salvaging.", type: :string, required: false
+end
+
+# Salvage the collection with the given uuid
+SalvageCollection.salvage_collection opts.uuid, opts.reason
diff --git a/services/api/script/setup-new-user.rb b/services/api/script/setup-new-user.rb
new file mode 100755 (executable)
index 0000000..9f0219e
--- /dev/null
@@ -0,0 +1,74 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
+
+require 'logger'
+require 'trollop'
+
+log = Logger.new STDERR
+log.progname = $0.split('/').last
+
+opts = Trollop::options do
+  banner ''
+  banner "Usage: #{log.progname} " +
+    "{user_uuid_or_email} {user_and_repo_name} {vm_uuid}"
+  banner ''
+  opt :debug, <<-eos
+Show debug messages.
+  eos
+  opt :openid_prefix, <<-eos, default: 'https://www.google.com/accounts/o8/id'
+If creating a new user record, require authentication from an OpenID \
+with this OpenID prefix *and* a matching email address in order to \
+claim the account.
+  eos
+  opt :send_notification_email, <<-eos, default: 'true'
+Send notification email after successfully setting up the user.
+  eos
+end
+
+log.level = (ENV['DEBUG'] || opts.debug) ? Logger::DEBUG : Logger::WARN
+
+if ARGV.count != 3
+  Trollop::die "required arguments are missing"
+end
+
+user_arg, user_repo_name, vm_uuid = ARGV
+
+require 'arvados'
+arv = Arvados.new(api_version: 'v1')
+
+# Look up the given user by uuid or, failing that, email address.
+begin
+  found_user = arv.user.get(uuid: user_arg)
+rescue Arvados::TransactionFailedError
+  found = arv.user.list(where: {email: user_arg})[:items]
+
+  if found.count == 0
+    if !user_arg.match(/\w\@\w+\.\w+/)
+      abort "About to create new user, but #{user_arg.inspect} " +
+               "does not look like an email address. Stop."
+    end
+  elsif found.count != 1
+    abort "Found #{found.count} users with email. Stop."
+  else
+    found_user = found.first
+  end
+end
+
+# Invoke user setup method
+if (found_user)
+  user = arv.user.setup uuid: found_user[:uuid], repo_name: user_repo_name,
+          vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,
+          send_notification_email: opts.send_notification_email
+else
+  user = arv.user.setup user: {email: user_arg}, repo_name: user_repo_name,
+          vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,
+          send_notification_email: opts.send_notification_email
+end
+
+log.info {"user uuid: " + user[:uuid]}
+
+puts user.inspect
diff --git a/services/api/test/factories/api_client.rb b/services/api/test/factories/api_client.rb
new file mode 100644 (file)
index 0000000..ee2017b
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FactoryBot.define do
+  factory :api_client do
+    is_trusted { false }
+    to_create do |instance|
+      CurrentApiClientHelper.act_as_system_user do
+        instance.save!
+      end
+    end
+  end
+end
diff --git a/services/api/test/factories/api_client_authorization.rb b/services/api/test/factories/api_client_authorization.rb
new file mode 100644 (file)
index 0000000..af2660a
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FactoryBot.define do
+  factory :api_client_authorization do
+    api_client
+    scopes { ['all'] }
+
+    trait :trusted do
+      association :api_client, factory: :api_client, is_trusted: true
+    end
+    factory :token do
+      # Just provides shorthand for "create :api_client_authorization"
+    end
+
+    to_create do |instance|
+      CurrentApiClientHelper.act_as_user instance.user do
+        instance.save!
+      end
+    end
+  end
+end
diff --git a/services/api/test/factories/group.rb b/services/api/test/factories/group.rb
new file mode 100644 (file)
index 0000000..7af5735
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FactoryBot.define do
+  factory :group do
+  end
+end
diff --git a/services/api/test/factories/link.rb b/services/api/test/factories/link.rb
new file mode 100644 (file)
index 0000000..291a04e
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FactoryBot.define do
+  factory :link do
+    factory :permission_link do
+      link_class { 'permission' }
+    end
+  end
+end
diff --git a/services/api/test/factories/user.rb b/services/api/test/factories/user.rb
new file mode 100644 (file)
index 0000000..91d9395
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class CurrentApiClientHelper
+  extend CurrentApiClient
+end
+
+FactoryBot.define do
+  factory :user do
+    transient do
+      join_groups { [] }
+    end
+    after :create do |user, evaluator|
+      CurrentApiClientHelper.act_as_system_user do
+        evaluator.join_groups.each do |g|
+          Link.create!(tail_uuid: user.uuid,
+                       head_uuid: g.uuid,
+                       link_class: 'permission',
+                       name: 'can_read')
+          Link.create!(tail_uuid: g.uuid,
+                       head_uuid: user.uuid,
+                       link_class: 'permission',
+                       name: 'can_read')
+        end
+      end
+    end
+    first_name { "Factory" }
+    last_name { "Factory" }
+    identity_url do
+      "https://example.com/#{rand(2**24).to_s(36)}"
+    end
+    factory :active_user do
+      is_active { true }
+      after :create do |user|
+        CurrentApiClientHelper.act_as_system_user do
+          Link.create!(tail_uuid: user.uuid,
+                       head_uuid: Group.where('uuid ~ ?', '-f+$').first.uuid,
+                       link_class: 'permission',
+                       name: 'can_read')
+        end
+      end
+    end
+    to_create do |instance|
+      CurrentApiClientHelper.act_as_system_user do
+        instance.save!
+      end
+    end
+  end
+end
diff --git a/services/api/test/fixtures/.gitkeep b/services/api/test/fixtures/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/fixtures/api_client_authorizations.yml b/services/api/test/fixtures/api_client_authorizations.yml
new file mode 100644 (file)
index 0000000..d8ef631
--- /dev/null
@@ -0,0 +1,365 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+
+system_user:
+  uuid: zzzzz-gj3su-017z32aux8dg2s1
+  api_client: untrusted
+  user: system_user
+  api_token: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy
+  expires_at: 2038-01-01 00:00:00
+
+admin:
+  uuid: zzzzz-gj3su-027z32aux8dg2s1
+  api_client: untrusted
+  user: admin
+  api_token: 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h
+  expires_at: 2038-01-01 00:00:00
+
+admin_trustedclient:
+  uuid: zzzzz-gj3su-037z32aux8dg2s1
+  api_client: trusted_workbench
+  user: admin
+  api_token: 1a9ffdcga2o7cw8q12dndskomgs1ygli3ns9k2o9hgzgmktc78
+  expires_at: 2038-01-01 00:00:00
+
+data_manager:
+  uuid: zzzzz-gj3su-047z32aux8dg2s1
+  api_client: untrusted
+  user: system_user
+  api_token: 320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1
+  expires_at: 2038-01-01 00:00:00
+  scopes:
+    - GET /arvados/v1/collections
+    - GET /arvados/v1/keep_services
+    - GET /arvados/v1/keep_services/accessible
+    - GET /arvados/v1/users/current
+    - POST /arvados/v1/logs
+
+miniadmin:
+  uuid: zzzzz-gj3su-057z32aux8dg2s1
+  api_client: untrusted
+  user: miniadmin
+  api_token: 2zb2y9pw3e70270te7oe3ewaantea3adyxjascvkz0zob7q7xb
+  expires_at: 2038-01-01 00:00:00
+
+rominiadmin:
+  uuid: zzzzz-gj3su-067z32aux8dg2s1
+  api_client: untrusted
+  user: rominiadmin
+  api_token: 5tsb2pc3zlatn1ortl98s2tqsehpby88wmmnzmpsjmzwa6payh
+  expires_at: 2038-01-01 00:00:00
+
+active:
+  uuid: zzzzz-gj3su-077z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: 3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+  expires_at: 2038-01-01 00:00:00
+
+active_trustedclient:
+  uuid: zzzzz-gj3su-087z32aux8dg2s1
+  api_client: trusted_workbench
+  user: active
+  api_token: 27bnddk6x2nmq00a1e3gq43n9tsl5v87a3faqar2ijj8tud5en
+  expires_at: 2038-01-01 00:00:00
+
+active_noscope:
+  uuid: zzzzz-gj3su-097z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: activenoscopeabcdefghijklmnopqrstuvwxyz12345678901
+  expires_at: 2038-01-01 00:00:00
+  scopes: []
+
+project_viewer:
+  uuid: zzzzz-gj3su-107z32aux8dg2s1
+  api_client: untrusted
+  user: project_viewer
+  api_token: projectviewertoken1234567890abcdefghijklmnopqrstuv
+  expires_at: 2038-01-01 00:00:00
+
+project_viewer_trustedclient:
+  uuid: zzzzz-gj3su-117z32aux8dg2s1
+  api_client: trusted_workbench
+  user: project_viewer
+  api_token: projectviewertrustedtoken1234567890abcdefghijklmno
+  expires_at: 2038-01-01 00:00:00
+
+subproject_admin:
+  uuid: zzzzz-gj3su-127z32aux8dg2s1
+  api_client: untrusted
+  user: subproject_admin
+  api_token: subprojectadmintoken1234567890abcdefghijklmnopqrst
+  expires_at: 2038-01-01 00:00:00
+
+admin_vm:
+  uuid: zzzzz-gj3su-137z32aux8dg2s1
+  api_client: untrusted
+  user: admin
+  api_token: adminvirtualmachineabcdefghijklmnopqrstuvwxyz12345
+  expires_at: 2038-01-01 00:00:00
+  # scope refers to the testvm fixture.
+  scopes: ["GET /arvados/v1/virtual_machines/zzzzz-2x53u-382brsig8rp3064/logins"]
+
+admin_noscope:
+  uuid: zzzzz-gj3su-147z32aux8dg2s1
+  api_client: untrusted
+  user: admin
+  api_token: adminnoscopeabcdefghijklmnopqrstuvwxyz123456789012
+  expires_at: 2038-01-01 00:00:00
+  scopes: []
+
+active_all_collections:
+  uuid: zzzzz-gj3su-157z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: activecollectionsabcdefghijklmnopqrstuvwxyz1234567
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/collections/", "GET /arvados/v1/keep_services/accessible"]
+
+active_userlist:
+  uuid: zzzzz-gj3su-167z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: activeuserlistabcdefghijklmnopqrstuvwxyz1234568900
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/users"]
+
+active_specimens:
+  uuid: zzzzz-gj3su-177z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: activespecimensabcdefghijklmnopqrstuvwxyz123456890
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/specimens/"]
+
+active_apitokens:
+  uuid: zzzzz-gj3su-187z32aux8dg2s1
+  api_client: trusted_workbench
+  user: active
+  api_token: activeapitokensabcdefghijklmnopqrstuvwxyz123456789
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/api_client_authorizations",
+           "POST /arvados/v1/api_client_authorizations"]
+
+active_readonly:
+  uuid: zzzzz-gj3su-197z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: activereadonlyabcdefghijklmnopqrstuvwxyz1234568790
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /"]
+
+spectator:
+  uuid: zzzzz-gj3su-207z32aux8dg2s1
+  api_client: untrusted
+  user: spectator
+  api_token: zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu
+  expires_at: 2038-01-01 00:00:00
+
+spectator_specimens:
+  uuid: zzzzz-gj3su-217z32aux8dg2s1
+  api_client: untrusted
+  user: spectator
+  api_token: spectatorspecimensabcdefghijklmnopqrstuvwxyz123245
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/specimens", "GET /arvados/v1/specimens/",
+           "POST /arvados/v1/specimens"]
+
+inactive:
+  uuid: zzzzz-gj3su-227z32aux8dg2s1
+  api_client: untrusted
+  user: inactive
+  api_token: 5s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0
+  expires_at: 2038-01-01 00:00:00
+
+inactive_uninvited:
+  uuid: zzzzz-gj3su-237z32aux8dg2s1
+  api_client: untrusted
+  user: inactive_uninvited
+  api_token: 62mhllc0otp78v08e3rpa3nsmf8q8ogk47f7u5z4erp5gpj9al
+  expires_at: 2038-01-01 00:00:00
+
+inactive_uninvited_trustedclient:
+  uuid: zzzzz-gj3su-228z32aux8dg2s1
+  api_client: trusted_workbench
+  user: inactive_uninvited
+  api_token: 7s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0
+  expires_at: 2038-01-01 00:00:00
+
+inactive_but_signed_user_agreement:
+  uuid: zzzzz-gj3su-247z32aux8dg2s1
+  api_client: untrusted
+  user: inactive_but_signed_user_agreement
+  api_token: 64k3bzw37iwpdlexczj02rw3m333rrb8ydvn2qq99ohv68so5k
+  expires_at: 2038-01-01 00:00:00
+
+expired:
+  uuid: zzzzz-gj3su-257z32aux8dg2s1
+  api_client: untrusted
+  user: active
+  api_token: 2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx
+  expires_at: 1970-01-01 00:00:00
+
+expired_trustedclient:
+  uuid: zzzzz-gj3su-267z32aux8dg2s1
+  api_client: trusted_workbench
+  user: active
+  api_token: 5hpni7izokzcatku2896xxwqdbt5ptomn04r6auc7fohnli82v
+  expires_at: 1970-01-01 00:00:00
+
+valid_token_deleted_user:
+  uuid: zzzzz-gj3su-277z32aux8dg2s1
+  api_client: trusted_workbench
+  user_id: 1234567
+  api_token: tewfa58099sndckyqhlgd37za6e47o6h03r9l1vpll23hudm8b
+  expires_at: 2038-01-01 00:00:00
+
+anonymous:
+  uuid: zzzzz-gj3su-287z32aux8dg2s1
+  api_client: untrusted
+  user: anonymous
+  api_token: 4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /"]
+
+job_reader:
+  uuid: zzzzz-gj3su-297z32aux8dg2s1
+  api_client: untrusted
+  user: job_reader
+  api_token: e99512cdc0f3415c2428b9758f33bdfb07bc3561b00e86e7e6
+  expires_at: 2038-01-01 00:00:00
+
+job_reader2:
+  uuid: zzzzz-gj3su-jobreader2auth1
+  api_client: untrusted
+  user: job_reader2
+  api_token: jobreader2415c2428b9758f33bdfb07bc3561b0jobreader2
+  expires_at: 2038-01-01 00:00:00
+
+active_no_prefs:
+  uuid: zzzzz-gj3su-307z32aux8dg2s1
+  api_client: untrusted
+  user: active_no_prefs
+  api_token: 3kg612cdc0f3415c2428b9758f33bdfb07bc3561b00e86qdmi
+  expires_at: 2038-01-01 00:00:00
+
+active_no_prefs_profile_no_getting_started_shown:
+  uuid: zzzzz-gj3su-317z32aux8dg2s1
+  api_client: untrusted
+  user: active_no_prefs_profile_no_getting_started_shown
+  api_token: 3kg612cdc0f3415c242856758f33bdfb07bc3561b00e86qdmi
+  expires_at: 2038-01-01 00:00:00
+
+active_no_prefs_profile_with_getting_started_shown:
+  uuid: zzzzz-gj3su-327z32aux8dg2s1
+  api_client: untrusted
+  user: active_no_prefs_profile_with_getting_started_shown
+  api_token: 3kg612cdc0f3415c245786758f33bdfb07babcd1b00e86qdmi
+  expires_at: 2038-01-01 00:00:00
+
+active_with_prefs_profile_no_getting_started_shown:
+  uuid: zzzzz-gj3su-337z32aux8dg2s1
+  api_client: untrusted
+  user: active_with_prefs_profile_no_getting_started_shown
+  api_token: 3kg612cdc0f3415c245786758f33bdfb07befgh1b00e86qdmi
+  expires_at: 2038-01-01 00:00:00
+
+user_foo_in_sharing_group:
+  uuid: zzzzz-gj3su-347z32aux8dg2s1
+  api_client: untrusted
+  user: user_foo_in_sharing_group
+  api_token: 2p1pou8p4ls208mcbedeewlotghppenobcyrmyhq8pyf51xd8u
+  expires_at: 2038-01-01 00:00:00
+
+user_bar_in_sharing_group:
+  uuid: zzzzz-gj3su-62hryf5fht531mz
+  api_client: untrusted
+  user: user_bar_in_sharing_group
+  api_token: 5vy55akwq85vghh80wc2cuxl4p8psay73lkpqf5c2cxvp6rmm6
+  expires_at: 2038-01-01 00:00:00
+
+user1_with_load:
+  uuid: zzzzz-gj3su-357z32aux8dg2s1
+  api_client: untrusted
+  user: user1_with_load
+  api_token: 1234k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+  expires_at: 2038-01-01 00:00:00
+
+fuse:
+  uuid: zzzzz-gj3su-367z32aux8dg2s1
+  api_client: untrusted
+  user: fuse
+  api_token: 4nagbkv8eap0uok7pxm72nossq5asihls3yn5p4xmvqx5t5e7p
+  expires_at: 2038-01-01 00:00:00
+
+dispatch1:
+  uuid: zzzzz-gj3su-k9dvestay1plssr
+  api_client: untrusted
+  user: system_user
+  api_token: kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw
+  expires_at: 2038-01-01 00:00:00
+
+dispatch2:
+  uuid: zzzzz-gj3su-jrriu629zljsnuf
+  api_client: untrusted
+  user: system_user
+  api_token: pbe3v4v5oag83tjwxjh0a551j44xdu8t7ol5ljw3ixsq8oh50q
+  expires_at: 2038-01-01 00:00:00
+
+running_container_auth:
+  uuid: zzzzz-gj3su-077z32aux8dg2s2
+  api_client: untrusted
+  user: active
+  api_token: it2gl94mgu3rbn5s2d06vzh73ns1y6cthct0tvg82qdlsxvbwk
+  expires_at: 2038-01-01 00:00:00
+
+running_to_be_deleted_container_auth:
+  uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq
+  api_client: untrusted
+  user: active
+  api_token: ge1pez7dkk7nqntwcsj922g2b7a2t27xz6nsx39r15kbcqmp55
+  expires_at: 2038-01-01 00:00:00
+
+permission_perftest:
+  uuid: zzzzz-gj3su-077z32anoj93boo
+  api_client: untrusted
+  user: permission_perftest
+  api_token: 3kg6k6lzmp9kjabonentustoecn5bahbt2fod9zru30k1jqdmi
+  expires_at: 2038-01-01 00:00:00
+
+foo_collection_sharing_token:
+  uuid: zzzzz-gj3su-gf02tdm4g1z3e3u
+  api_client: untrusted
+  user: active
+  api_token: iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss
+  expires_at: 2038-01-01 00:00:00
+  scopes:
+  - GET /arvados/v1/collections/zzzzz-4zz18-znfnqtbbv4spc3w
+  - GET /arvados/v1/collections/zzzzz-4zz18-znfnqtbbv4spc3w/
+  - GET /arvados/v1/keep_services/accessible
+
+container_runtime_token:
+  uuid: zzzzz-gj3su-2nj68s291f50gd9
+  api_client: untrusted
+  user: container_runtime_token_user
+  api_token: 2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw
+  expires_at: 2038-01-01 00:00:00
+
+crt_user:
+  uuid: zzzzz-gj3su-3r47qqy5ja5d54v
+  api_client: untrusted
+  user: container_runtime_token_user
+  api_token: 13z1tz9deoryml3twep0vsahi4862097pe5lsmesugnkgpgpwk
+  expires_at: 2038-01-01 00:00:00
+
+runtime_token_limited_scope:
+  uuid: zzzzz-gj3su-2fljvypjrr4yr9m
+  api_client: untrusted
+  user: container_runtime_token_user
+  api_token: 1fwc3be1m13qkypix2gd01i4bq5ju483zjfc0cf4babjseirbm
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /"]
diff --git a/services/api/test/fixtures/api_clients.yml b/services/api/test/fixtures/api_clients.yml
new file mode 100644 (file)
index 0000000..7b52273
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+
+trusted_workbench:
+  uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Official Workbench
+  url_prefix: https://official-workbench.local/
+  is_trusted: true
+
+untrusted:
+  uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Untrusted
+  url_prefix: https://untrusted.local/
+  is_trusted: false
diff --git a/services/api/test/fixtures/authorized_keys.yml b/services/api/test/fixtures/authorized_keys.yml
new file mode 100644 (file)
index 0000000..1c14204
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+active:
+  uuid: zzzzz-fngyi-12nc9ov4osp8nae
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  authorized_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  key_type: SSH
+  name: active
+  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+8pc/xNohU3Mo2pAieLohLJcWy9OmNOnsEWlegYYoeynkczimicKRmB2iP50v2oKrtshIXwigfU26b0rGEJayFvsA7FCstz5G/tJy3YJGnQUDmrQBuB8SsQDL/O0Nnh8B8XmKSlxuv3FxLyPhUmcxxjIUIEMWVMlIKAfzmySsPby/QREJffUkFPa+luNkOVd5cyvwd6dnl0SLbrqZgcF3fbkOLDVgv3oceIYLjcy/SjqGR4wtGWHFFuna0M2/5YEvWpxD/HNO3WkFEdlAUEEWpvd/u3bmHq2p7ADbaX9ZaNDb8YbjFIOUxaJh+Vf0V6nDhEnUPylzM07F3fnvXQM53Xu5oYA6cp0Com61MBaXUDwM/w6PS2RtF8CG3ICMs5AsIy+Cnsuowj3fRlK29dgZ7K2pYRV2SlQj4vxjwpUcQCL/TFv31VnCMFKQBqmqh8iwZV3U6LLc3cwL9COXnIPF4lXjODL3geWsBNXo3hfoj6qD+2/+9/zOZUtGbQXlBmNC/wG/cK1A1L4S9docZT4QAiaSCdwcLB68hIvQMEOpffoeQhNZj0SddLLdEyjJY6rfWjbmnV68TzXoDz26hoPtagD+wvHOxz3D8BQ9RIqfNI1jNlwVkoKNVfszIPmESwJCu99+6TnyJl4923MTEXNOrJ7LgVUemWchOlkTDINuw== active-user@arvados.local
+
+admin:
+  uuid: zzzzz-fngyi-g290j3i3u701duh
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  authorized_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  key_type: SSH
+  name: admin
+  public_key: ssh-dss AAAAB3NzaC1kc3MAAACBAKy1IDMGwa7/Yjas77vLSShBE3SzpPXqXu6nRMC9zdIoMdctjhfP+GOOyQQP12rMs16NYmfdOxX+sa2t9syI/8NhDxTmNbHVw2jHimC6SL02v8WHDIw2vaBCVN+CHdeYbZsBB/8/M+2PO3uUWbr0TjoXcxrKYScS/aTTjSAWRg4ZAAAAFQDR/xAdrewj1ORNIQs+kWWdjmiO0wAAAIBC+G92r2ZeGaHLCMI0foKnfuQzg9fKp5krEvE6tvRNju7iOqtB9xe1qsAqr6GPZQjfSrNPac6T1pxMoh+an4PfNs5xgBIpvy93oqALd4maQt6483vsIyVCw6nQD7s/8IpIHpwxFEFs5/5moYxzY64eY0ldSXJwvPsrBTruhuUdugAAAIBut96rWQYTnYUdngyUK9EoJzgKn3l7gg0IQoFC4hS96D8vUm0wIdSEQHt01pSc0KR1Nnb4JrnNz/qCH45wOy5oB9msQ/2Pq2brTDZJcIPcN1LbMCps9PetUruz1OjK1NzDuLmvsrP3GBLxJrtmrCoKHLzPZ6QSefW0OymFgaDFGg==
+
+spectator:
+  uuid: zzzzz-fngyi-3uze1ipbnz2c2c2
+  owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  authorized_user_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  key_type: SSH
+  name: spectator
+  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJK4hxmgXzg1gty+91JfkpgikAZxTvFTQoaFUJYTHIygz2V3FgU64NkK3yfwh+bhs7n8YIMftuCHfojKEJTtedbiv/mYpItetzdOwYONCGSEk1VnfipGhnFvL7FZDESTxLN9KNve3ZmZh8HvO6s8fdlTlqTTNKpsdwLiQn2s3W1TWvru/NP504MD5qPeZ4+8jZEh/uiuRaeXqPDAlE9QGPV4FRAA1xo0dBZIrRMwQC8kOttq/i2pLgHq1xW9p4J23oV68O/kkeBb7VwrX3Av/M61kvRsP8tA5gqh+HMKVO2qTP4yG6eGkAobIokQAcyZetPQIDmfVeoB0NzwPfAy4r
+
+project_viewer:
+  uuid: zzzzz-fngyi-5d3av1396niwcej
+  owner_uuid: zzzzz-tpzed-projectviewer1a
+  authorized_user_uuid: zzzzz-tpzed-projectviewer1a
+  key_type: SSH
+  name: project_viewer
+  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPkOJMQzT9n6RousrLMU7c/KFKTI7I5JifDIEtGJJ1MMZW0GVoxtXALU90HcaRjEOwGPvQPxj7IDYqXs2N9uvm8SUWJMiz6c8NIjhGTkUoOnTFl4E9YTvkkKNs0P+3eT1Y+6zfTcFJHKP3AR4kZX+oiPHowRpCIlnLjXCFxX+E+YI554A7bS4yfOZO9lf6vtiT9I+6EqxC8a0hzZauPC1ZC3d/AFgBnrXJ2fBlAEySznru39quHN1u3v4qHTyaO2pDbG6vdI6O3JDCXCJKRv/B2FLuLTlzB0YesM1FiE6w8QgPxqb42B+uWTZb969UZliH8Pzw/mscOLAjmARDC02z
diff --git a/services/api/test/fixtures/collections.yml b/services/api/test/fixtures/collections.yml
new file mode 100644 (file)
index 0000000..8763f39
--- /dev/null
@@ -0,0 +1,1004 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+user_agreement:
+  uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  current_version_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T19:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2013-12-26T19:22:54Z
+  updated_at: 2013-12-26T19:22:54Z
+  manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\n"
+  name: user_agreement
+
+collection_owned_by_active:
+  uuid: zzzzz-4zz18-bv31uwvy3neko21
+  current_version_uuid: zzzzz-4zz18-bv31uwvy3neko21
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: owned_by_active
+  version: 2
+
+collection_owned_by_active_past_version_1:
+  uuid: zzzzz-4zz18-znfnqtbbv4spast
+  current_version_uuid: zzzzz-4zz18-bv31uwvy3neko21
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T15:22:54Z
+  updated_at: 2014-02-03T15:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: owned_by_active_version_1
+  version: 1
+
+foo_file:
+  uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  current_version_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2015-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-03T17:22:54Z
+  updated_at: 2015-02-03T17:22:54Z
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: foo_file
+
+bar_file:
+  uuid: zzzzz-4zz18-ehbhgtheo8909or
+  current_version_uuid: zzzzz-4zz18-ehbhgtheo8909or
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2015-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-03T17:22:54Z
+  updated_at: 2015-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: bar_file
+
+baz_file:
+  uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  current_version_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: baz_file
+
+w_a_z_file:
+  uuid: zzzzz-4zz18-25k12570yk134b3
+  current_version_uuid: zzzzz-4zz18-25k12570yk134b3
+  portable_data_hash: 8706aadd12a0ebc07d74cae88762ba9e+56
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-09T10:53:38Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-09T10:53:38Z
+  updated_at: 2015-02-09T10:53:38Z
+  manifest_text: ". 4c6c2c0ac8aa0696edd7316a3be5ca3c+5 0:5:w\\040\\141\\040z\n"
+  name: "\"w a z\" file"
+  version: 2
+
+w_a_z_file_version_1:
+  uuid: zzzzz-4zz18-25k12570yk1ver1
+  current_version_uuid: zzzzz-4zz18-25k12570yk134b3
+  portable_data_hash: ba4ba4c7b99a58806b1ed70ea1263afe+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-09T10:53:38Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-09T10:53:38Z
+  updated_at: 2015-02-09T10:53:38Z
+  manifest_text: ". 4d20280d5e516a0109768d49ab0f3318+3 0:3:waz\n"
+  name: "waz file"
+  version: 1
+
+multilevel_collection_1:
+  uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
+  current_version_uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
+  portable_data_hash: 1fd08fc162a5c6413070a8bd0bffc818+150
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n./dir1/subdir d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n./dir2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n"
+  name: multilevel_collection_1
+
+multilevel_collection_2:
+  uuid: zzzzz-4zz18-45xf9hw1sxkhl6q
+  current_version_uuid: zzzzz-4zz18-45xf9hw1sxkhl6q
+  # All of this collection's files are deep in subdirectories.
+  portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: "./dir1/sub1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:a 0:0:b\n./dir2/sub2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:c 0:0:d\n"
+  name: multilevel_collection_2
+
+docker_image:
+  uuid: zzzzz-4zz18-1v45jub259sjjgb
+  current_version_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  # This Collection has links with Docker image metadata.
+  portable_data_hash: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
+  name: docker_image
+
+# tagged docker image with sha256:{hash}.tar filename
+docker_image_1_12:
+  uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i
+  current_version_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i
+  portable_data_hash: d740a57097711e08eb9b2a93518f20ab+174
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2016-10-19 08:50:45.653552268 Z
+  modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2016-10-19 08:50:45.652930000 Z
+  updated_at: 2016-10-19 08:50:45.652930000 Z
+  manifest_text: ". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
+  name: docker_image_1_12
+
+unlinked_docker_image:
+  uuid: zzzzz-4zz18-d0d8z5wofvfgwad
+  current_version_uuid: zzzzz-4zz18-d0d8z5wofvfgwad
+  # This Collection contains a file that looks like a Docker image,
+  # but has no Docker metadata links pointing to it.
+  portable_data_hash: 9ae44d5792468c58bcf85ce7353c7027+124
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ". fca529cfe035e3e384563ee55eadbb2f+67108863 0:67108863:bcd02158b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
+  name: unlinked_docker_image
+
+empty:
+  uuid: zzzzz-4zz18-gs9ooj1h9sd5mde
+  current_version_uuid: zzzzz-4zz18-gs9ooj1h9sd5mde
+  # Empty collection owned by anonymous_group is added with rake db:seed.
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ""
+  name: empty_collection
+
+foo_collection_in_aproject:
+  uuid: zzzzz-4zz18-fy296fx3hot09f7
+  current_version_uuid: zzzzz-4zz18-fy296fx3hot09f7
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+
+user_agreement_in_anonymously_accessible_project:
+  uuid: zzzzz-4zz18-uukreo9rbgwsujr
+  current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujr
+  portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\n"
+  name: GNU General Public License, version 3
+
+public_text_file:
+  uuid: zzzzz-4zz18-4en62shvi99lxd4
+  current_version_uuid: zzzzz-4zz18-4en62shvi99lxd4
+  portable_data_hash: 55713e6a34081eb03609e7ad5fcad129+62
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2015-02-12 16:58:03 -0500
+  modified_at: 2015-02-12 16:58:03 -0500
+  updated_at: 2015-02-12 16:58:03 -0500
+  manifest_text: ". f0ef7081e1539ac00ef5b761b4fb01b3+12 0:12:Hello\\040world.txt\n"
+  name: Hello world
+
+baz_collection_name_in_asubproject:
+  uuid: zzzzz-4zz18-lsitwcf548ui4oe
+  current_version_uuid: zzzzz-4zz18-lsitwcf548ui4oe
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: "zzzzz-4zz18-lsitwcf548ui4oe added sometime"
+
+empty_collection_name_in_active_user_home_project:
+  uuid: zzzzz-4zz18-5qa38qghh1j3nvv
+  current_version_uuid: zzzzz-4zz18-5qa38qghh1j3nvv
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  manifest_text: ""
+  name: Empty collection
+
+baz_file_in_asubproject:
+  uuid: zzzzz-4zz18-0mri2x4u7ftngez
+  current_version_uuid: zzzzz-4zz18-0mri2x4u7ftngez
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: baz_file
+
+collection_to_move_around_in_aproject:
+  uuid: zzzzz-4zz18-0mri2x4u7ft1234
+  current_version_uuid: zzzzz-4zz18-0mri2x4u7ft1234
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: collection_to_move_around
+
+# Note: collections(:expired_collection) fixture finder won't work
+# because it is not in default scope
+expired_collection:
+  uuid: zzzzz-4zz18-mto52zx1s7sn3ih
+  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3ih
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  is_trashed: true
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-01-01T00:00:00Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: expired_collection
+  version: 2
+
+expired_collection_past_version:
+  uuid: zzzzz-4zz18-mto52zx1s7oldie
+  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3ih
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:12:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:17:54Z
+  updated_at: 2014-02-03T17:17:54Z
+  is_trashed: true
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-01-01T00:00:00Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: expired_collection original
+  version: 1
+
+trashed_on_next_sweep:
+  uuid: zzzzz-4zz18-4guozfh77ewd2f0
+  current_version_uuid: zzzzz-4zz18-4guozfh77ewd2f0
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2016-12-07T22:01:00.123456Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2016-12-27T22:01:30.123456Z
+  updated_at: 2016-12-27T22:01:30.123456Z
+  is_trashed: false
+  trash_at: 2016-12-07T22:01:30.123456Z
+  delete_at: 2112-01-01T00:00:00Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: trashed_on_next_sweep
+
+# Note: collections(:deleted_on_next_sweep) fixture finder won't work
+# because it is not in default scope
+deleted_on_next_sweep:
+  uuid: zzzzz-4zz18-3u1p5umicfpqszp
+  current_version_uuid: zzzzz-4zz18-3u1p5umicfpqszp
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2016-12-07T22:01:00.234567Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2016-12-27T22:01:30.234567Z
+  updated_at: 2016-12-27T22:01:30.234567Z
+  is_trashed: true
+  trash_at: 2016-12-07T22:01:30.234567Z
+  delete_at: 2016-12-27T22:01:30.234567Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: deleted_on_next_sweep
+
+collection_expires_in_future:
+  uuid: zzzzz-4zz18-padkqo7yb8d9i3j
+  current_version_uuid: zzzzz-4zz18-padkqo7yb8d9i3j
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  trash_at: 2038-01-01T00:00:00Z
+  delete_at: 2038-03-01T00:00:00Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: collection_expires_in_future
+
+unique_expired_collection:
+  uuid: zzzzz-4zz18-mto52zx1s7sn3jk
+  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3jk
+  portable_data_hash: 4ad199f90029935844dc3f098f4fca2a+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  is_trashed: true
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-01-01T00:00:00Z
+  manifest_text: ". 29d7797f1888013986899bc9083783fa+3 0:3:expired\n"
+  name: unique_expired_collection1
+
+unique_expired_collection2:
+  uuid: zzzzz-4zz18-mto52zx1s7sn3jr
+  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3jr
+  portable_data_hash: 4ad199f90029935844dc3f098f4fca2b+49
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  is_trashed: true
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-01-01T00:00:00Z
+  manifest_text: ". 29d7797f1888013986899bc9083783fa+3 0:3:expired\n"
+  name: unique_expired_collection2
+
+# a collection with a log file that can be parsed by the log viewer
+# This collection hash matches the following log text:
+#    2014-01-01_12:00:01 zzzzz-8i9sb-abcdefghijklmno 0  log message 1
+#    2014-01-01_12:00:02 zzzzz-8i9sb-abcdefghijklmno 0  log message 2
+#    2014-01-01_12:00:03 zzzzz-8i9sb-abcdefghijklmno 0  log message 3
+#
+real_log_collection:
+  uuid: zzzzz-4zz18-op4e2lbej01tcvu
+  current_version_uuid: zzzzz-4zz18-op4e2lbej01tcvu
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-09-01 12:00:00
+  modified_at: 2014-09-01 12:00:00
+  portable_data_hash: 0b9a7787660e1fce4a93f33e01376ba6+81
+  manifest_text: ". cdd549ae79fe6640fa3d5c6261d8303c+195 0:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt\n"
+  name: real_log_collection
+
+collection_in_home_project_with_same_name_as_in_aproject:
+  uuid: zzzzz-4zz18-12342x4u7ftabcd
+  current_version_uuid: zzzzz-4zz18-12342x4u7ftabcd
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: collection_with_same_name_in_aproject_and_home_project
+
+collection_in_aproject_with_same_name_as_in_home_project:
+  uuid: zzzzz-4zz18-56782x4u7ftefgh
+  current_version_uuid: zzzzz-4zz18-56782x4u7ftefgh
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: collection_with_same_name_in_aproject_and_home_project
+
+collection_owned_by_foo:
+  uuid: zzzzz-4zz18-50surkhkbhsp31b
+  current_version_uuid: zzzzz-4zz18-50surkhkbhsp31b
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  created_at: 2014-02-03T17:22:54Z
+  modified_at: 2014-02-03T17:22:54Z
+  name: collection_owned_by_foo
+
+collection_to_remove_from_subproject:
+  # The Workbench tests remove this from subproject.
+  uuid: zzzzz-4zz18-subprojgonecoll
+  current_version_uuid: zzzzz-4zz18-subprojgonecoll
+  portable_data_hash: 2386ca6e3fffd4be5e197a72c6c80fb2+51
+  manifest_text: ". 8258b505536a9ab47baa2f4281cb932a+9 0:9:missingno\n"
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-10-15T10:45:00
+  modified_at: 2014-10-15T10:45:00
+  name: Collection to remove from subproject
+
+collection_with_files_in_subdir:
+  uuid: zzzzz-4zz18-filesinsubdir00
+  current_version_uuid: zzzzz-4zz18-filesinsubdir00
+  name: collection_files_in_subdir
+  portable_data_hash: 85877ca2d7e05498dd3d109baf2df106+95
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir4.txt 32:32:file2_in_subdir4.txt"
+
+graph_test_collection1:
+  uuid: zzzzz-4zz18-bv31uwvy3neko22
+  current_version_uuid: zzzzz-4zz18-bv31uwvy3neko22
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: bar_file
+  created_at: 2014-02-03T17:22:54Z
+  modified_at: 2014-02-03T17:22:54Z
+
+graph_test_collection2:
+  uuid: zzzzz-4zz18-uukreo9rbgwsujx
+  current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujx
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  portable_data_hash: 65b17c95fdbc9800fc48acda4e9dcd0b+93
+  manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:FOO_General_Public_License,_version_3.pdf\n"
+  name: "FOO General Public License, version 3"
+  created_at: 2014-02-03T17:22:54Z
+  modified_at: 2014-02-03T17:22:54Z
+
+graph_test_collection3:
+  uuid: zzzzz-4zz18-uukreo9rbgwsujj
+  current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujj
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: "baz file"
+  created_at: 2014-02-03T17:22:54Z
+  modified_at: 2014-02-03T17:22:54Z
+
+collection_1_owned_by_fuse:
+  uuid: zzzzz-4zz18-ovx05bfzormx3bg
+  current_version_uuid: zzzzz-4zz18-ovx05bfzormx3bg
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: "collection #1 owned by FUSE"
+
+collection_2_owned_by_fuse:
+  uuid: zzzzz-4zz18-8ubpy4w74twtwzr
+  current_version_uuid: zzzzz-4zz18-8ubpy4w74twtwzr
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: "collection #2 owned by FUSE"
+
+collection_in_fuse_project:
+  uuid: zzzzz-4zz18-vx4mtkjqfrb534f
+  current_version_uuid: zzzzz-4zz18-vx4mtkjqfrb534f
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: "collection in FUSE project"
+
+collection_with_no_name_in_aproject:
+  uuid: zzzzz-4zz18-00000nonamecoll
+  current_version_uuid: zzzzz-4zz18-00000nonamecoll
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+collection_to_search_for_in_aproject:
+  uuid: zzzzz-4zz18-abcd6fx123409f7
+  current_version_uuid: zzzzz-4zz18-abcd6fx123409f7
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: "zzzzz-4zz18-abcd6fx123409f7 used to search with any"
+
+upload_sandbox:
+  uuid: zzzzz-4zz18-js48y3ykkfdfjd3
+  current_version_uuid: zzzzz-4zz18-js48y3ykkfdfjd3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-12-09 15:03:16
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-12-09 15:03:16
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  updated_at: 2014-12-09 15:03:16
+  manifest_text: ''
+  name: upload sandbox
+
+collection_with_unique_words_to_test_full_text_search:
+  uuid: zzzzz-4zz18-mnt690klmb51aud
+  current_version_uuid: zzzzz-4zz18-mnt690klmb51aud
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection_with_some_unique_words
+  description: The quick_brown_fox jumps over the lazy_dog
+
+replication_undesired_unconfirmed:
+  uuid: zzzzz-4zz18-wjxq7uzx2m9jj4a
+  current_version_uuid: zzzzz-4zz18-wjxq7uzx2m9jj4a
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:19:28.596506247 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:19:28.596338465 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  replication_desired: ~
+  replication_confirmed_at: ~
+  replication_confirmed: ~
+  updated_at: 2015-02-07 00:19:28.596236608 Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: replication want=null have=null
+
+replication_desired_2_unconfirmed:
+  uuid: zzzzz-4zz18-3t236wrz4769h7x
+  current_version_uuid: zzzzz-4zz18-3t236wrz4769h7x
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:21:35.050333515 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:21:35.050189104 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  replication_desired: 2
+  replication_confirmed_at: ~
+  replication_confirmed: ~
+  updated_at: 2015-02-07 00:21:35.050126576 Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: replication want=2 have=null
+
+replication_desired_2_confirmed_2:
+  uuid: zzzzz-4zz18-434zv1tnnf2rygp
+  current_version_uuid: zzzzz-4zz18-434zv1tnnf2rygp
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:19:28.596506247 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:19:28.596338465 Z
+  portable_data_hash: ec53808e4c23e6aeebea24d998ae5346+88
+  replication_desired: 2
+  replication_confirmed_at: 2015-02-07 00:24:52.983381227 Z
+  replication_confirmed: 2
+  updated_at: 2015-02-07 00:24:52.983381227 Z
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 3:3:bar\n"
+  name: replication want=2 have=2
+
+storage_classes_desired_default_unconfirmed:
+  uuid: zzzzz-4zz18-3t236wrz4769tga
+  current_version_uuid: zzzzz-4zz18-3t236wrz4769tga
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:21:35.050333515 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:21:35.050189104 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  storage_classes_desired: ["default"]
+  storage_classes_confirmed_at: ~
+  storage_classes_confirmed: ~
+  updated_at: 2015-02-07 00:21:35.050126576 Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: storage classes want=[default] have=[]
+
+storage_classes_desired_default_confirmed_default:
+  uuid: zzzzz-4zz18-3t236wr12769tga
+  current_version_uuid: zzzzz-4zz18-3t236wr12769tga
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:21:35.050333515 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:21:35.050189104 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  storage_classes_desired: ["default"]
+  storage_classes_confirmed_at: 2015-02-07 00:21:35.050126576 Z
+  storage_classes_confirmed: ["default"]
+  updated_at: 2015-02-07 00:21:35.050126576 Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: storage classes want=[default] have=[default]
+
+storage_classes_desired_archive_confirmed_default:
+  uuid: zzzzz-4zz18-3t236wr12769qqa
+  current_version_uuid: zzzzz-4zz18-3t236wr12769qqa
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:21:35.050333515 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:21:35.050189104 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  storage_classes_desired: ["archive"]
+  storage_classes_confirmed_at: ~
+  storage_classes_confirmed: ["default"]
+  updated_at: 2015-02-07 00:21:35.050126576 Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: storage classes want=[archive] have=[default]
+
+collection_with_empty_properties:
+  uuid: zzzzz-4zz18-emptyproperties
+  current_version_uuid: zzzzz-4zz18-emptyproperties
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with empty properties
+  properties: {}
+
+collection_with_one_property:
+  uuid: zzzzz-4zz18-withoneproperty
+  current_version_uuid: zzzzz-4zz18-withoneproperty
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with one property
+  properties:
+    property1: value1
+
+# The following four collections are used to test combining collections with repeated filenames
+collection_with_repeated_filenames_and_contents_in_two_dirs_1:
+  uuid: zzzzz-4zz18-duplicatenames1
+  current_version_uuid: zzzzz-4zz18-duplicatenames1
+  portable_data_hash: f3a67fad3a19c31c658982fb8158fa58+144
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  name: collection_with_repeated_filenames_and_contents_in_two_dirs_1
+  manifest_text: "./dir1 92b53930db60fe94be2a73fc771ba921+34 0:12:alice 12:12:alice.txt 24:10:bob.txt\n./dir2 56ac2557b1ded11ccab7293dc47d1e88+44 0:27:alice.txt\n"
+
+collection_with_repeated_filenames_and_contents_in_two_dirs_2:
+  uuid: zzzzz-4zz18-duplicatenames2
+  current_version_uuid: zzzzz-4zz18-duplicatenames2
+  portable_data_hash: f3a67fad3a19c31c658982fb8158fa58+144
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  name: collection_with_repeated_filenames_and_contents_in_two_dirs_2
+  manifest_text: "./dir1 92b53930db60fe94be2a73fc771ba921+34 0:12:alice 12:12:alice.txt 24:10:carol.txt\n./dir2 56ac2557b1ded11ccab7293dc47d1e88+44 0:27:alice.txt\n"
+
+foo_and_bar_files_in_dir:
+  uuid: zzzzz-4zz18-foonbarfilesdir
+  current_version_uuid: zzzzz-4zz18-foonbarfilesdir
+  portable_data_hash: 6bbac24198d09a93975f60098caf0bdf+62
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  name: foo_file_in_dir
+  manifest_text: "./dir1 3858f62230ac3c915f300c664312c63f+6 3:3:bar 0:3:foo\n"
+
+multi_level_to_combine:
+  uuid: zzzzz-4zz18-pyw8yp9g3ujh45f
+  current_version_uuid: zzzzz-4zz18-pyw8yp9g3ujh45f
+  portable_data_hash: 7a6ef4c162a5c6413070a8bd0bffc818+150
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\n./dir1 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\n./dir1/subdir 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\n./dir2 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\n"
+  name: multi_level_to_combine
+
+# collection with several file types to test view icon enabled state in collection show page
+collection_with_several_supported_file_types:
+  uuid: zzzzz-4zz18-supportedtypes1
+  current_version_uuid: zzzzz-4zz18-supportedtypes1
+  portable_data_hash: 020d82cf7dedb70fd2b7788b5d0634da+269
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file.csv 0:0:file.fa 0:0:file.fasta 0:0:file.gif 0:0:file.json 0:0:file.md 0:0:file.pdf 0:0:file.py 0:0:file.R 0:0:file.sam 0:0:file.sh 0:0:file.tiff 0:0:file.tsv 0:0:file.txt 0:0:file.vcf 0:0:file.xml 0:0:file.xsl 0:0:file.yml\n"
+  name: collection_with_several_supported_file_types
+
+collection_with_several_unsupported_file_types:
+  uuid: zzzzz-4zz18-supportedtypes2
+  current_version_uuid: zzzzz-4zz18-supportedtypes2
+  portable_data_hash: 71ac42f87464ee5f9fd396d560d400c3+59
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file 0:0:file.bam\n"
+  name: collection_with_several_unsupported_file_types
+
+collection_not_readable_by_active:
+  uuid: zzzzz-4zz18-cd42uwvy3neko21
+  current_version_uuid: zzzzz-4zz18-cd42uwvy3neko21
+  portable_data_hash: bb89eb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection_not_readable_by_active
+
+collection_to_remove_and_rename_files:
+  uuid: zzzzz-4zz18-a21ux3541sxa8sf
+  current_version_uuid: zzzzz-4zz18-a21ux3541sxa8sf
+  portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n"
+  name: collection to remove and rename files
+
+collection_with_tags_owned_by_active:
+  uuid: zzzzz-4zz18-taggedcolletion
+  current_version_uuid: zzzzz-4zz18-taggedcolletion
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with tags
+  properties:
+    existing tag 1: value for existing tag 1
+    existing tag 2: value for existing tag 2
+
+trashed_collection_to_test_name_conflict_on_untrash:
+  uuid: zzzzz-4zz18-trashedcolnamec
+  current_version_uuid: zzzzz-4zz18-trashedcolnamec
+  portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n"
+  name: same name for trashed and persisted collections
+  is_trashed: true
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-01-01T00:00:00Z
+
+same_name_as_trashed_coll_to_test_name_conflict_on_untrash:
+  uuid: zzzzz-4zz18-namesameastrash
+  current_version_uuid: zzzzz-4zz18-namesameastrash
+  portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n"
+  name: same name for trashed and persisted collections
+
+collection_in_trashed_subproject:
+  uuid: zzzzz-4zz18-trashedproj2col
+  current_version_uuid: zzzzz-4zz18-trashedproj2col
+  portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+  owner_uuid: zzzzz-j7d0g-trashedproject2
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\n"
+  name: collection in trashed subproject
+
+collection_with_prop1_value1:
+  uuid: zzzzz-4zz18-withprop1value1
+  current_version_uuid: zzzzz-4zz18-withprop1value1
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with prop1 value1
+  properties:
+    prop1: value1
+
+collection_with_prop1_value2:
+  uuid: zzzzz-4zz18-withprop1value2
+  current_version_uuid: zzzzz-4zz18-withprop1value2
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with prop1 value2
+  properties:
+    prop1: value2
+
+collection_with_prop1_value3:
+  uuid: zzzzz-4zz18-withprop1value3
+  current_version_uuid: zzzzz-4zz18-withprop1value3
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with prop1 value3
+  properties:
+    prop1: value3
+
+collection_with_prop1_other1:
+  uuid: zzzzz-4zz18-withprop1other1
+  current_version_uuid: zzzzz-4zz18-withprop1other1
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with prop1 other1
+  properties:
+    prop1: other1
+
+collection_with_prop2_1:
+  uuid: zzzzz-4zz18-withprop2value1
+  current_version_uuid: zzzzz-4zz18-withprop2value1
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with prop1 1
+  properties:
+    prop2: 1
+
+collection_with_prop2_5:
+  uuid: zzzzz-4zz18-withprop2value5
+  current_version_uuid: zzzzz-4zz18-withprop2value5
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with prop1 5
+  properties:
+    prop2: 5
+
+collection_with_uri_prop:
+  uuid: zzzzz-4zz18-withuripropval1
+  current_version_uuid: zzzzz-4zz18-withuripropval1
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with RDF-style URI property key
+  properties:
+    "http://schema.org/example": "value1"
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# collections in project_with_10_collections
+<% for i in 1..10 do %>
+collection_<%=i%>_of_10:
+  name: Collection_<%= i %>
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>
+  current_version_uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-0010collections
+  created_at: <%= i.minute.ago.to_s(:db) %>
+  modified_at: <%= i.minute.ago.to_s(:db) %>
+<% end %>
+
+# collections in project_with_201_collections
+<% for i in 1..201 do %>
+collection_<%=i%>_of_201:
+  name: Collection_<%= i %>
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>
+  current_version_uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-0201collections
+  created_at: <%= i.minute.ago.to_s(:db) %>
+  modified_at: <%= i.minute.ago.to_s(:db) %>
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/container_requests.yml b/services/api/test/fixtures/container_requests.yml
new file mode 100644 (file)
index 0000000..dea9888
--- /dev/null
@@ -0,0 +1,803 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+queued:
+  uuid: zzzzz-xvhdp-cr4queuedcontnr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: queued
+  state: Committed
+  priority: 1
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-queuedcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+running:
+  uuid: zzzzz-xvhdp-cr4runningcntnr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: running
+  state: Committed
+  priority: 501
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontainr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+requester_for_running:
+  uuid: zzzzz-xvhdp-req4runningcntr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: requester_for_running_cr
+  state: Committed
+  priority: 1
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 2.minute.ago.to_s(:db) %>
+  modified_at: <%= 2.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-logscontainer03
+  requesting_container_uuid: zzzzz-dz642-runningcontainr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+running_older:
+  uuid: zzzzz-xvhdp-cr4runningcntn2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: running
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-12 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontain2
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+completed:
+  uuid: zzzzz-xvhdp-cr4completedctr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: completed container request
+  state: Final
+  priority: 1
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-compltcontainer
+  log_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+completed-older:
+  uuid: zzzzz-xvhdp-cr4completedcr2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: completed
+  state: Final
+  priority: 1
+  created_at: <%= 30.minute.ago.to_s(:db) %>
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["arvados-cwl-runner", "echo", "hello"]
+  container_uuid: zzzzz-dz642-compltcontainr2
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+requester:
+  uuid: zzzzz-xvhdp-9zacv3o1xw6sxz5
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: requester
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: /
+  output_path: /output
+  command: ["request-another-container", "echo", "hello"]
+  container_uuid: zzzzz-dz642-requestingcntnr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+cr_for_requester:
+  uuid: zzzzz-xvhdp-cr4requestercnt
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: requester_cr
+  state: Final
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-requestercntnr1
+  requesting_container_uuid: zzzzz-dz642-requestingcntnr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+cr_for_requester2:
+  uuid: zzzzz-xvhdp-cr4requestercn2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: requester_cr2
+  state: Final
+  priority: 1
+  created_at: <%= 30.minute.ago.to_s(:db) %>
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  requesting_container_uuid: zzzzz-dz642-requestercntnr1
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+running_anonymous_accessible:
+  uuid: zzzzz-xvhdp-runninganonaccs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: running anonymously accessible cr
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontain2
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+cr_for_failed:
+  uuid: zzzzz-xvhdp-cr4failedcontnr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: cr for container exit code not 0
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-failedcontainr1
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+canceled_with_queued_container:
+  uuid: zzzzz-xvhdp-canceledqueuedc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: canceled with queued container
+  state: Final
+  priority: 0
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-queuedcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+canceled_with_locked_container:
+  uuid: zzzzz-xvhdp-canceledlocekdc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: canceled with locked container
+  state: Final
+  priority: 0
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-lockedcontainer
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+canceled_with_running_container:
+  uuid: zzzzz-xvhdp-canceledrunning
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: canceled with running container
+  state: Committed
+  priority: 0
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontainr
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+running_to_be_deleted:
+  uuid: zzzzz-xvhdp-cr5runningcntnr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: running to be deleted
+  state: Committed
+  priority: 1
+  created_at: <%= 2.days.ago.to_s(:db) %>
+  updated_at: <%= 1.days.ago.to_s(:db) %>
+  modified_at: <%= 1.days.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runnincntrtodel
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+completed_with_input_mounts:
+  uuid: zzzzz-xvhdp-crwithinputmnts
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: completed container request
+  state: Final
+  priority: 1
+  created_at: <%= 24.hour.ago.to_s(:db) %>
+  updated_at: <%= 24.hour.ago.to_s(:db) %>
+  modified_at: <%= 24.hour.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+  container_uuid: zzzzz-dz642-compltcontainer
+  log_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  mounts:
+    /var/lib/cwl/cwl.input.json:
+      content:
+        input1:
+          basename: foo
+          class: File
+          location: "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/foo"
+        input2:
+          basename: bar
+          class: File
+          location: "keep:fa7aeb5140e2848d39b416daeef4ffc5+45/bar"
+    /var/lib/cwl/workflow.json: "keep:1fd08fc162a5c6413070a8bd0bffc818+150"
+
+uncommitted:
+  uuid: zzzzz-xvhdp-cr4uncommittedc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: "Uncommitted"
+  container_image: arvados/jobs
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "$graph": [{
+                "id": "#main",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "doc": "a longer documentation string for this parameter (optional)",
+                        "type": "boolean",
+                        "id": "ex_boolean",
+                        "label": "a short label for this parameter (optional)",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "boolean"],
+                        "id": "ex_boolean_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "directory selection should present the workbench collection picker",
+                        "type": "Directory",
+                        "id": "ex_dir",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "double",
+                        "id": "ex_double",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "file selection should present the workbench file picker",
+                        "type": "File",
+                        "id": "ex_file",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "float",
+                        "id": "ex_float",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "int",
+                        "id": "ex_int",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "int"],
+                        "id": "ex_int_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "long",
+                        "id": "ex_long",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "string",
+                        "id": "ex_string",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "string"],
+                        "id": "ex_string_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        },
+                        "id": "ex_enum",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        }],
+                        "id": "ex_enum_opt",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }]
+          }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted_ready_to_run:
+  uuid: zzzzz-xvhdp-cr4uncommittedd
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted_ready_to_run
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: "Uncommitted"
+  container_image: arvados/jobs
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "doc": "a longer documentation string for this parameter (optional)",
+                        "type": "boolean",
+                        "id": "ex_boolean",
+                        "label": "a short label for this parameter (optional)",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "boolean"],
+                        "id": "ex_boolean_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "directory selection should present the workbench collection picker",
+                        "type": "Directory",
+                        "id": "ex_dir",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "double",
+                        "id": "ex_double",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "doc": "file selection should present the workbench file picker",
+                        "type": "File",
+                        "id": "ex_file",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "float",
+                        "id": "ex_float",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "int",
+                        "id": "ex_int",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "int"],
+                        "id": "ex_int_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "long",
+                        "id": "ex_long",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": "string",
+                        "id": "ex_string",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "string"],
+                        "id": "ex_string_opt",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        },
+                        "id": "ex_enum",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", {
+                            "type": "enum",
+                            "symbols": ["a", "b", "c"]
+                        }],
+                        "id": "ex_enum_opt",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {
+              "ex_string_opt": null,
+              "ex_int_opt": null,
+              "ex_boolean": false,
+              "ex_boolean_opt": true,
+              "ex_dir": {
+                "class": "Directory",
+                "location": "keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+                "arv:collection": "zzzzz-4zz18-znfnqtbbv4spc3w"
+              },
+              "ex_double": 66.0,
+              "ex_file": {
+                "class": "File",
+                "location": "keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45/foo",
+                "arv:collection": "zzzzz-4zz18-znfnqtbbv4spc3w/foo"
+              },
+              "ex_float": 55.0,
+              "ex_int": 55,
+              "ex_long": 22,
+              "ex_string": "qq",
+              "ex_enum": "a"
+            }
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted-with-directory-input:
+  uuid: zzzzz-xvhdp-cr4uncommitted2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted with directory input
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: Uncommitted
+  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "type": "Directory",
+                        "id": "directory_type",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted-with-file-input:
+  uuid: zzzzz-xvhdp-cr4uncommittedf
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted with directory input
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: Uncommitted
+  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "type": "File",
+                        "id": "file_type",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+uncommitted-with-required-and-optional-inputs:
+  uuid: zzzzz-xvhdp-cr4uncommitted3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: uncommitted with required and optional inputs
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  command: ["arvados-cwl-runner", "--local", "--api=containers",
+            "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+  output_path: "/var/spool/cwl"
+  cwd: "/var/spool/cwl"
+  priority: 1
+  state: Uncommitted
+  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  mounts: {
+        "/var/lib/cwl/workflow.json": {
+            "kind": "json",
+            "content": {
+                "cwlVersion": "v1.0",
+                "class": "CommandLineTool",
+                "baseCommand": ["echo"],
+                "inputs": [
+                    {
+                        "type": "int",
+                        "id": "int_required",
+                        "inputBinding": {"position": 1}
+                    },
+                    {
+                        "type": ["null", "int"],
+                        "id": "int_optional",
+                        "inputBinding": {"position": 1}
+                    }
+                ],
+                "outputs": []
+            }
+        },
+        "/var/lib/cwl/cwl.input.json": {
+            "kind": "json",
+            "content": {}
+        },
+        "stdout": {
+            "kind": "file",
+            "path": "/var/spool/cwl/cwl.output.json"
+        },
+        "/var/spool/cwl": {
+            "kind": "collection",
+            "writable": true
+        }
+    }
+  runtime_constraints:
+    vcpus: 1
+    ram: 256000000
+    API: true
+
+cr_in_trashed_project:
+  uuid: zzzzz-xvhdp-cr5trashedcontr
+  owner_uuid: zzzzz-j7d0g-trashedproject1
+  name: completed container request
+  state: Final
+  priority: 1
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-compltcontainer
+  log_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+runtime_token:
+  uuid: zzzzz-xvhdp-11eklkhy0n4dm86
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: queued
+  state: Committed
+  priority: 1
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_at: <%= 1.minute.ago.to_s(:db) %>
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-20isqbkl8xwnsao
+  runtime_token: v2/zzzzz-gj3su-2nj68s291f50gd9/2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw
+  runtime_constraints:
+    vcpus: 1
+    ram: 123
+
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# container requests in project_with_2_pipelines_and_60_crs
+<% for i in 1..60 do %>
+cr_<%=i%>_of_60:
+  uuid: zzzzz-xvhdp-oneof60crs<%= i.to_s.rjust(5, '0') %>
+  created_at: <%= ((i+5)/5).hour.ago.to_s(:db) %>
+  owner_uuid: zzzzz-j7d0g-nnncrspipelines
+  name: cr-<%= i.to_s %>
+  output_path: test
+  command: ["echo", "hello"]
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/containers.yml b/services/api/test/fixtures/containers.yml
new file mode 100644 (file)
index 0000000..5c5d45f
--- /dev/null
@@ -0,0 +1,287 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+queued:
+  uuid: zzzzz-dz642-queuedcontainer
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Queued
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
+    /var/spool/cwl:
+      kind: tmp
+      capacity: 24000000000
+
+running:
+  uuid: zzzzz-dz642-runningcontainr
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Running
+  priority: 12
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  started_at: <%= 1.minute.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts:
+    /secret/6x9:
+      kind: text
+      content: "42\n"
+  secret_mounts_md5: <%= Digest::MD5.hexdigest(SafeJSON.dump({'/secret/6x9' => {'content' => "42\n", 'kind' => 'text'}})) %>
+  auth_uuid: zzzzz-gj3su-077z32aux8dg2s2
+
+running_older:
+  uuid: zzzzz-dz642-runningcontain2
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Running
+  priority: 1
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 2.minute.ago.to_s(:db) %>
+  started_at: <%= 2.minute.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+locked:
+  uuid: zzzzz-dz642-lockedcontainer
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Locked
+  locked_by_uuid: zzzzz-gj3su-k9dvestay1plssr
+  priority: 2
+  created_at: <%= 2.minute.ago.to_s(:db) %>
+  updated_at: <%= 2.minute.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+completed:
+  uuid: zzzzz-dz642-compltcontainer
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  started_at: 2016-01-11 11:11:11.111111111 Z
+  finished_at: 2016-01-12 11:12:13.111111111 Z
+  container_image: test
+  cwd: test
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+completed_older:
+  uuid: zzzzz-dz642-compltcontainr2
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  started_at: 2016-01-13 11:11:11.111111111 Z
+  finished_at: 2016-01-14 11:12:13.111111111 Z
+  container_image: test
+  cwd: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+requester:
+  uuid: zzzzz-dz642-requestingcntnr
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  container_image: test
+  cwd: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+requester_container:
+  uuid: zzzzz-dz642-requestercntnr1
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  container_image: test
+  cwd: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+failed_container:
+  uuid: zzzzz-dz642-failedcontainr1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Complete
+  exit_code: 33
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  container_image: test
+  cwd: test
+  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+ancient_container_with_logs:
+  uuid: zzzzz-dz642-logscontainer01
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: <%= 2.year.ago.to_s(:db) %>
+  updated_at: <%= 2.year.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  finished_at: <%= 2.year.ago.to_s(:db) %>
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  output: test
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+previous_container_with_logs:
+  uuid: zzzzz-dz642-logscontainer02
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Complete
+  exit_code: 0
+  priority: 1
+  created_at: <%= 1.month.ago.to_s(:db) %>
+  updated_at: <%= 1.month.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  finished_at: <%= 1.month.ago.to_s(:db) %>
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  output: test
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+running_container_with_logs:
+  uuid: zzzzz-dz642-logscontainer03
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Running
+  priority: 1
+  created_at: <%= 1.hour.ago.to_s(:db) %>
+  updated_at: <%= 1.hour.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+running_to_be_deleted:
+  uuid: zzzzz-dz642-runnincntrtodel
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Running
+  priority: 1
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  updated_at: <%= 1.minute.ago.to_s(:db) %>
+  started_at: <%= 1.minute.ago.to_s(:db) %>
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  auth_uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq
+  secret_mounts: {}
+  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
+
+runtime_token:
+  uuid: zzzzz-dz642-20isqbkl8xwnsao
+  owner_uuid: zzzzz-tpzed-000000000000000
+  state: Locked
+  locked_by_uuid: zzzzz-gj3su-jrriu629zljsnuf
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  runtime_token: v2/zzzzz-gj3su-2nj68s291f50gd9/2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw
+  runtime_user_uuid: zzzzz-tpzed-l3skomkti0c4vg4
+  runtime_auth_scopes: ["all"]
+  runtime_constraints:
+    ram: 12000000000
+    vcpus: 4
+  mounts:
+    /tmp:
+      kind: tmp
+      capacity: 24000000000
+    /var/spool/cwl:
+      kind: tmp
+      capacity: 24000000000
diff --git a/services/api/test/fixtures/files/proc_stat b/services/api/test/fixtures/files/proc_stat
new file mode 100644 (file)
index 0000000..eac6c47
--- /dev/null
@@ -0,0 +1,14 @@
+cpu  1632063 14136 880034 1195938459 1041039 63 21266 52811 0 0
+cpu0 291707 2191 123004 199461836 32816 58 4488 13329 0 0
+cpu1 279247 2288 168096 199443605 20358 0 3320 7776 0 0
+cpu2 243805 1099 145178 199516577 19542 0 2656 6975 0 0
+cpu3 225772 3025 145032 199534463 21217 0 2260 6578 0 0
+cpu4 280505 2581 151177 198587478 885147 2 4446 10116 0 0
+cpu5 311025 2950 147545 199394498 61957 2 4093 8035 0 0
+intr 165887918 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8993615 9108946 8200 0 50911 0 12573182 7875376 8631 0 44633 0 10027365 7325091 8544 0 59992 0 9835855 6999541 8145 0 65576 0 9789778 8583897 8184 0 55917 0 10003804 8546910 8448 0 53484 0 463 150 3174990 11523 3836341 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 255708943
+btime 1448378837
+processes 924315
+procs_running 1
+procs_blocked 0
+softirq 105120691 0 21194262 1261637 20292759 0 0 40708 13638302 27046 48665977
diff --git a/services/api/test/fixtures/groups.yml b/services/api/test/fixtures/groups.yml
new file mode 100644 (file)
index 0000000..92a1ced
--- /dev/null
@@ -0,0 +1,346 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+public:
+  uuid: zzzzz-j7d0g-it30l961gq3t0oi
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  name: Public
+  description: Public Group
+  group_class: role
+
+private:
+  uuid: zzzzz-j7d0g-rew6elm53kancon
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Private
+  description: Private Group
+  group_class: role
+
+private_and_can_read_foofile:
+  uuid: zzzzz-j7d0g-22xp1wpjul508rk
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Private and Can Read Foofile
+  description: Another Private Group
+  group_class: role
+
+activeandfriends:
+  uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:02:18.481582707 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:02:18.481319501 Z
+  name: Active User and friends
+  description:
+  updated_at: 2014-08-22 14:02:18.481166435 Z
+  group_class: role
+
+system_group:
+  uuid: zzzzz-j7d0g-000000000000000
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: System Private
+  description: System-owned Group
+  group_class: role
+
+empty_lonely_group:
+  uuid: zzzzz-j7d0g-jtp06ulmvsezgyu
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Empty
+  description: Empty Group
+  group_class: role
+
+all_users:
+  uuid: zzzzz-j7d0g-fffffffffffffff
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: All users
+  description: All users
+  group_class: role
+
+testusergroup_admins:
+  uuid: zzzzz-j7d0g-48foin4vonvc2at
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Administrators of a subset of users
+
+aproject:
+  uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: A Project
+  description: Test project belonging to active user
+  group_class: project
+
+asubproject:
+  uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: A Subproject
+  description: "Test project belonging to active user's first test project"
+  group_class: project
+
+future_project_viewing_group:
+  uuid: zzzzz-j7d0g-futrprojviewgrp
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: Future Project Viewing Group
+  description: "Group used to test granting Group Project viewing"
+  group_class: role
+
+bad_group_has_ownership_cycle_a:
+  uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+  owner_uuid: zzzzz-j7d0g-0077nzts8c178lw
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-05-03 18:50:08 -0400
+  modified_at: 2014-05-03 18:50:08 -0400
+  updated_at: 2014-05-03 18:50:08 -0400
+  name: Owned by bad group b
+
+bad_group_has_ownership_cycle_b:
+  uuid: zzzzz-j7d0g-0077nzts8c178lw
+  owner_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-05-03 18:50:08 -0400
+  modified_at: 2014-05-03 18:50:08 -0400
+  updated_at: 2014-05-03 18:50:08 -0400
+  name: Owned by bad group a
+
+anonymous_group:
+  uuid: zzzzz-j7d0g-anonymouspublic
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Anonymous users
+  group_class: role
+  description: Anonymous users
+
+anonymously_accessible_project:
+  uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  name: Unrestricted public data
+  group_class: project
+  description: An anonymously accessible project
+
+subproject_in_anonymous_accessible_project:
+  uuid: zzzzz-j7d0g-mhtfesvgmkolpyf
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-04-21 15:37:48 -0400
+  name: Subproject in anonymous accessible project
+  description: Description for subproject in anonymous accessible project
+  group_class: project
+
+active_user_has_can_manage:
+  uuid: zzzzz-j7d0g-ptt1ou6a9lxrv07
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  name: Active user has can_manage
+
+# Group for testing granting permission between users who share a group.
+group_for_sharing_tests:
+  uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Group for sharing tests
+  description: Users who can share objects with each other
+  group_class: role
+
+project_owned_by_foo:
+  uuid:  zzzzz-j7d0g-lsjm0ibr0ydwpzx
+  owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  created_at: 2014-02-03T17:22:54Z
+  modified_at: 2014-02-03T17:22:54Z
+  name: project_owned_by_foo
+  group_class: project
+
+empty_project:
+  uuid: zzzzz-j7d0g-9otoxmrksam74q6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-12-16 15:56:27.967534940 Z
+  modified_by_client_uuid: ~
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-12-16 15:56:27.967358199 Z
+  name: Empty project
+  description: ~
+  updated_at: 2014-12-16 15:56:27.967242142 Z
+  group_class: project
+
+project_with_10_collections:
+  uuid: zzzzz-j7d0g-0010collections
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 10 collections
+  description: This will result in one page in the display
+  group_class: project
+
+project_with_201_collections:
+  uuid: zzzzz-j7d0g-0201collections
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 201 collections
+  description: This will result in two pages in the display
+  group_class: project
+
+project_with_10_pipelines:
+  uuid: zzzzz-j7d0g-000010pipelines
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 10 pipelines
+  description: project with 10 pipelines
+  group_class: project
+
+project_with_2_pipelines_and_60_crs:
+  uuid: zzzzz-j7d0g-nnncrspipelines
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 2 pipelines and 60 crs
+  description: This will result in two pages in the display
+  group_class: project
+
+project_with_25_pipelines:
+  uuid: zzzzz-j7d0g-000025pipelines
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 25 pipelines
+  description: project with 25 pipelines
+  group_class: project
+
+fuse_owned_project:
+  uuid: zzzzz-j7d0g-0000ownedbyfuse
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: FUSE Test Project
+  description: Test project belonging to FUSE test user
+  group_class: project
+
+group_with_no_class:
+  uuid: zzzzz-j7d0g-groupwithnoclas
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: group_with_no_class
+  description: This group has no class at all. So rude!
+
+# This wouldn't pass model validation, but it enables a workbench
+# infinite-loop test. See #4389
+project_owns_itself:
+  uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t
+  owner_uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t
+  created_at: 2014-11-05 22:31:24.258424340 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: 6pbr1-tpzed-000000000000000
+  modified_at: 2014-11-05 22:31:24.258242890 Z
+  name: zzzzz-j7d0g-7rqh7hdshd5yp5t
+  description: ~
+  updated_at: 2014-11-05 22:31:24.258093171 Z
+  group_class: project
+
+# Used to test renaming when removed from the "asubproject" while
+# another such object with same name exists in home project.
+subproject_in_active_user_home_project_to_test_unique_key_violation:
+  uuid: zzzzz-j7d0g-subprojsamenam1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2013-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2013-04-21 15:37:48 -0400
+  updated_at: 2013-04-21 15:37:48 -0400
+  name: Subproject to test owner uuid and name unique key violation upon removal
+  description: Subproject in active user home project to test owner uuid and name unique key violation upon removal
+  group_class: project
+
+subproject_in_asubproject_with_same_name_as_one_in_active_user_home:
+  uuid: zzzzz-j7d0g-subprojsamenam2
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2013-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2013-04-21 15:37:48 -0400
+  updated_at: 2013-04-21 15:37:48 -0400
+  name: Subproject to test owner uuid and name unique key violation upon removal
+  description: "Removing this will result in name conflict with 'A project' in Home project and hence get renamed."
+  group_class: project
+
+starred_and_shared_active_user_project:
+  uuid: zzzzz-j7d0g-starredshared01
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: Starred and shared active user project
+  description: Starred and shared active user project
+  group_class: project
+
+trashed_project:
+  uuid: zzzzz-j7d0g-trashedproject1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: trashed project
+  group_class: project
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2008-03-01T00:00:00Z
+  is_trashed: true
+  modified_at: 2001-01-01T00:00:00Z
+
+trashed_subproject:
+  uuid: zzzzz-j7d0g-trashedproject2
+  owner_uuid: zzzzz-j7d0g-trashedproject1
+  name: trashed subproject
+  group_class: project
+  is_trashed: false
+  modified_at: 2001-01-01T00:00:00Z
+
+trashed_subproject3:
+  uuid: zzzzz-j7d0g-trashedproject3
+  owner_uuid: zzzzz-j7d0g-trashedproject1
+  name: trashed subproject 3
+  group_class: project
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-03-01T00:00:00Z
+  is_trashed: true
+  modified_at: 2001-01-01T00:00:00Z
+
+trashed_on_next_sweep:
+  uuid: zzzzz-j7d0g-soontobetrashed
+  owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+  name: soon to be trashed project
+  group_class: project
+  trash_at: 2001-01-01T00:00:00Z
+  delete_at: 2038-03-01T00:00:00Z
+  is_trashed: false
+  modified_at: 2001-01-01T00:00:00Z
\ No newline at end of file
diff --git a/services/api/test/fixtures/humans.yml b/services/api/test/fixtures/humans.yml
new file mode 100644 (file)
index 0000000..eee61ef
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# File exists to ensure the table gets cleared during DatabaseController#reset
diff --git a/services/api/test/fixtures/job_tasks.yml b/services/api/test/fixtures/job_tasks.yml
new file mode 100644 (file)
index 0000000..7131da6
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+running_job_task_1:
+  uuid: zzzzz-ot0gb-runningjobtask1
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  job_uuid: zzzzz-8i9sb-with2components
+
+running_job_task_2:
+  uuid: zzzzz-ot0gb-runningjobtask2
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  job_uuid: zzzzz-8i9sb-with2components
diff --git a/services/api/test/fixtures/jobs.yml b/services/api/test/fixtures/jobs.yml
new file mode 100644 (file)
index 0000000..140f370
--- /dev/null
@@ -0,0 +1,768 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+running:
+  uuid: zzzzz-8i9sb-pshmckwoma9plh7
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script: hash
+  repository: active/foo
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+running_cancelled:
+  uuid: zzzzz-8i9sb-4cf0nhn6xte809j
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: <%= 1.minute.ago.to_s(:db) %>
+  cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script: hash
+  repository: active/foo
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Cancelled
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+uses_nonexistent_script_version:
+  uuid: zzzzz-8i9sb-7m339pu0x9mla88
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  created_at: <%= 5.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  script: hash
+  repository: active/foo
+  running: false
+  success: true
+  output: d41d8cd98f00b204e9800998ecf8427e+0
+  priority: 0
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+foobar:
+  uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script: hash
+  repository: active/foo
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  script_parameters:
+    input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  output: fa7aeb5140e2848d39b416daeef4ffc5+45
+  priority: 0
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+  script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
+
+barbaz:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: 1
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  repository: active/foo
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  priority: 0
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+  script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
+
+runningbarbaz:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: 1
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: true
+  success: ~
+  repository: active/foo
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  priority: 0
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 1
+    done: 0
+  runtime_constraints: {}
+  state: Running
+  script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
+
+previous_job_run:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  finished_at: <%= 13.minutes.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  success: true
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
+
+previous_job_run_nil_log:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykqq3
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  finished_at: <%= 13.minutes.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "3"
+  success: true
+  log: ~
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  state: Complete
+  script_parameters_digest: 445702df4029b8a6e7075b451ff1256a
+
+previous_ancient_job_run:
+  uuid: zzzzz-8i9sb-ahd7cie8jah9qui
+  created_at: <%= 366.days.ago.to_s(:db) %>
+  finished_at: <%= 365.days.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "2"
+  success: true
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  state: Complete
+  script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
+
+previous_docker_job_run:
+  uuid: zzzzz-8i9sb-k6emstgk4kw4yhi
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  runtime_constraints:
+    docker_image: arvados/apitestfixture
+  success: true
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+
+previous_ancient_docker_image_job_run:
+  uuid: zzzzz-8i9sb-t3b460aolxxuldl
+  created_at: <%= 144.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "2"
+  runtime_constraints:
+    docker_image: arvados/apitestfixture
+  success: true
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  docker_image_locator: b519d9cb706a29fc7ea24dbea2f05851+93
+  state: Complete
+  script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
+
+previous_job_run_with_arvados_sdk_version:
+  uuid: zzzzz-8i9sb-eoo0321or2dw2jg
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 31ce37fe365b3dc204300a3e4c396ad333ed0556
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  runtime_constraints:
+    arvados_sdk_version: commit2
+    docker_image: arvados/apitestfixture
+  arvados_sdk_version: 00634b2b8a492d6f121e3cf1d6587b821136a9a7
+  docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  success: true
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+
+previous_job_run_no_output:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykppp
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "2"
+  success: true
+  output: ~
+  state: Complete
+  script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
+
+previous_job_run_superseded_by_hash_branch:
+  # This supplied_script_version is a branch name with later commits.
+  uuid: zzzzz-8i9sb-aeviezu5dahph3e
+  created_at: <%= 15.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/shabranchnames
+  script: testscript
+  script_version: 7387838c69a21827834586cc42b467ff6c63293b
+  supplied_script_version: 738783
+  script_parameters: {}
+  success: true
+  output: d41d8cd98f00b204e9800998ecf8427e+0
+  state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+nondeterminisic_job_run:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: hash2
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  success: true
+  nondeterministic: true
+  state: Complete
+  script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
+
+nearly_finished_job:
+  uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: arvados
+  script: doesnotexist
+  script_version: 309e25a64fe994867db8459543af372f850e25b9
+  script_parameters:
+    input: b519d9cb706a29fc7ea24dbea2f05851+249025
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  running: true
+  success: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 1
+    done: 0
+  runtime_constraints: {}
+  state: Complete
+  script_parameters_digest: 7ea26d58a79b7f5db9f90fb1e33d3006
+
+queued:
+  uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  started_at: ~
+  finished_at: ~
+  script: foo
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters: {}
+  running: ~
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: ~
+  tasks_summary: {}
+  runtime_constraints: {}
+  state: Queued
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+# A job with a log collection that can be parsed by the log viewer.
+job_with_real_log:
+  uuid: zzzzz-8i9sb-0vsrcqi7whchuil
+  created_at: 2014-09-01 12:00:00
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  log: 0b9a7787660e1fce4a93f33e01376ba6+81
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+cancelled:
+  uuid: zzzzz-8i9sb-4cf0abc123e809j
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: <%= 1.minute.ago.to_s(:db) %>
+  cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: false
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Cancelled
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+job_in_subproject:
+  uuid: zzzzz-8i9sb-subprojectjob01
+  created_at: 2014-10-15 12:00:00
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  log: ~
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+job_in_trashed_project:
+  uuid: zzzzz-8i9sb-subprojectjob02
+  created_at: 2014-10-15 12:00:00
+  owner_uuid: zzzzz-j7d0g-trashedproject2
+  log: ~
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+running_will_be_completed:
+  uuid: zzzzz-8i9sb-rshmckwoma9pjh8
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+graph_stage1:
+  uuid: zzzzz-8i9sb-graphstage10000
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  repository: active/foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  output: fa7aeb5140e2848d39b416daeef4ffc5+45
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+graph_stage2:
+  uuid: zzzzz-8i9sb-graphstage20000
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  repository: active/foo
+  script: hash2
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff"
+  output: 65b17c95fdbc9800fc48acda4e9dcd0b+93
+  script_parameters_digest: 4900033ec5cfaf8a63566f3664aeaa70
+
+graph_stage3:
+  uuid: zzzzz-8i9sb-graphstage30000
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  repository: active/foo
+  script: hash2
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
+
+job_with_latest_version:
+  uuid: zzzzz-8i9sb-nj8ioxnrvjtyk2b
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script: hash
+  repository: active/foo
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  supplied_script_version: master
+  script_parameters:
+    input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 2.minute.ago.to_s(:db) %>
+  finished_at: <%= 1.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  output: fa7aeb5140e2848d39b416daeef4ffc5+45
+  priority: 0
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+  script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
+
+running_job_in_publicly_accessible_project:
+  uuid: zzzzz-8i9sb-n7omg50bvt0m1nf
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: running_job_script
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Running
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
+
+completed_job_in_publicly_accessible_project:
+  uuid: zzzzz-8i9sb-jyq01m7in1jlofj
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: completed_job_script
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
+  log: zzzzz-4zz18-4en62shvi99lxd4
+  output: b519d9cb706a29fc7ea24dbea2f05851+93
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
+  started_at: <%= 10.minute.ago.to_s(:db) %>
+  finished_at: <%= 5.minute.ago.to_s(:db) %>
+
+job_in_publicly_accessible_project_but_other_objects_elsewhere:
+  uuid: zzzzz-8i9sb-jyq01muyhgr4ofj
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: active/foo
+  script: completed_job_script
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
+  log: zzzzz-4zz18-fy296fx3hot09f7
+  output: zzzzz-4zz18-bv31uwvy3neko21
+  script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
+
+running_job_with_components:
+  uuid: zzzzz-8i9sb-with2components
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script: hash
+  repository: active/foo
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  components:
+    component1: zzzzz-8i9sb-jyq01m7in1jlofj
+    component2: zzzzz-d1hrv-partdonepipelin
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+
+# This main level job is in running state with one job and one pipeline instance components
+running_job_with_components_at_level_1:
+  uuid: zzzzz-8i9sb-jobcomponentsl1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  finished_at: ~
+  repository: active/foo
+  script: hash
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  components:
+    component1: zzzzz-8i9sb-jobcomponentsl2
+    component2: zzzzz-d1hrv-picomponentsl02
+
+# This running job, a child of level_1, has one child component
+running_job_with_components_at_level_2:
+  uuid: zzzzz-8i9sb-jobcomponentsl2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  finished_at: ~
+  repository: active/foo
+  script: hash
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  components:
+    component1: zzzzz-8i9sb-job1atlevel3noc
+
+# The below two running jobs, children of level_2, have no child components
+running_job_1_with_components_at_level_3:
+  uuid: zzzzz-8i9sb-job1atlevel3noc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  finished_at: ~
+  repository: active/foo
+  script: hash
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+
+running_job_2_with_components_at_level_3:
+  uuid: zzzzz-8i9sb-job2atlevel3noc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  finished_at: ~
+  repository: active/foo
+  script: hash
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+
+# The two jobs below are so confused, they have circular relationship
+running_job_1_with_circular_component_relationship:
+  uuid: zzzzz-8i9sb-job1withcirculr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  finished_at: ~
+  repository: active/foo
+  script: hash
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  components:
+    component1: zzzzz-8i9sb-job2withcirculr
+
+running_job_2_with_circular_component_relationship:
+  uuid: zzzzz-8i9sb-job2withcirculr
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  finished_at: ~
+  repository: active/foo
+  script: hash
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+  components:
+    component1: zzzzz-8i9sb-job1withcirculr
diff --git a/services/api/test/fixtures/keep_disks.yml b/services/api/test/fixtures/keep_disks.yml
new file mode 100644 (file)
index 0000000..e8424b2
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+nonfull:
+  uuid: zzzzz-penuu-5w2o2t1q5wy7fhn
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
+  keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+  last_read_at: <%= 1.minute.ago.to_s(:db) %>
+  last_write_at: <%= 2.minute.ago.to_s(:db) %>
+  last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+  ping_secret: z9xz2tc69dho51g1dmkdy5fnupdhsprahcwxdbjs0zms4eo6i
+
+full:
+  uuid: zzzzz-penuu-4kmq58ui07xuftx
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
+  keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+  last_read_at: <%= 1.minute.ago.to_s(:db) %>
+  last_write_at: <%= 2.day.ago.to_s(:db) %>
+  last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+  ping_secret: xx3ieejcufbjy4lli6yt5ig4e8w5l2hhgmbyzpzuq38gri6lj
+
+nonfull2:
+  uuid: zzzzz-penuu-1ydrih9k2er5j11
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  node_uuid: zzzzz-7ekkf-2z3mc76g2q73aio
+  keep_service_uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
+  last_read_at: <%= 1.minute.ago.to_s(:db) %>
+  last_write_at: <%= 2.minute.ago.to_s(:db) %>
+  last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+  ping_secret: 4rs260ibhdum1d242xy23qv320rlerc0j7qg9vyqnchbgmjeek
diff --git a/services/api/test/fixtures/keep_services.yml b/services/api/test/fixtures/keep_services.yml
new file mode 100644 (file)
index 0000000..b72eef6
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+keep0:
+  uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  service_host: keep0.zzzzz.arvadosapi.com
+  service_port: 25107
+  service_ssl_flag: false
+  service_type: disk
+
+keep1:
+  uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  service_host: keep1.zzzzz.arvadosapi.com
+  service_port: 25107
+  service_ssl_flag: false
+  service_type: disk
+
+proxy:
+  uuid: zzzzz-bi6l4-h0a0xwut9qa6g3a
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  service_host: keep.zzzzz.arvadosapi.com
+  service_port: 25333
+  service_ssl_flag: true
+  service_type: proxy
diff --git a/services/api/test/fixtures/links.yml b/services/api/test/fixtures/links.yml
new file mode 100644 (file)
index 0000000..e66bace
--- /dev/null
@@ -0,0 +1,1104 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+user_agreement_required:
+  uuid: zzzzz-o0j2j-j2qe76q7s3c8aro
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T19:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2013-12-26T19:52:21Z
+  updated_at: 2013-12-26T19:52:21Z
+  tail_uuid: zzzzz-tpzed-000000000000000
+  link_class: signature
+  name: require
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+user_agreement_readable:
+  uuid: zzzzz-o0j2j-qpf60gg4fwjlmex
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+all_users_can_read_anonymous_group:
+  uuid: zzzzz-o0j2j-0lhbqyjab4g0bwp
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2015-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-01-24 20:42:26 -0800
+  updated_at: 2015-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-anonymouspublic
+  properties: {}
+
+active_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-ctbysaduejxfrs5
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+active_user_can_manage_group:
+  uuid: zzzzz-o0j2j-3sa30nd3bqn1msh
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03 15:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-02-03 15:42:26 -0800
+  updated_at: 2014-02-03 15:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-ptt1ou6a9lxrv07
+  properties: {}
+
+user_agreement_signed_by_active:
+  uuid: zzzzz-o0j2j-4x85a69tqlrud1z
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: signature
+  name: click
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+user_agreement_signed_by_inactive:
+  uuid: zzzzz-o0j2j-lh7er2o3k6bmetw
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  link_class: signature
+  name: click
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+spectator_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-0s8ql1redzf8kvn
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+inactive_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-osckxpy5hl5fjk5
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-x9kqpd79egh49c7
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+inactive_signed_ua_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-qkhyjcr6tidk652
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+foo_file_readable_by_active:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw22r
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_federated_active:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw23r
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zbbbb-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_active_duplicate_permission:
+  uuid: zzzzz-o0j2j-2qlmhgothiur55r
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_active_redundant_permission_via_private_group:
+  uuid: zzzzz-o0j2j-5s8ry7sn6bwxb7w
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-22xp1wpjul508rk
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_aproject:
+  uuid: zzzzz-o0j2j-fp1d8395ldqw22p
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+bar_file_readable_by_active:
+  uuid: zzzzz-o0j2j-8hppiuduf8eqdng
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-ehbhgtheo8909or
+  properties: {}
+
+bar_file_readable_by_spectator:
+  uuid: zzzzz-o0j2j-0mhldkqozsltcli
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-ehbhgtheo8909or
+  properties: {}
+
+baz_file_publicly_readable:
+  uuid: zzzzz-o0j2j-132ne3lk954vtoc
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  properties: {}
+
+barbaz_job_readable_by_spectator:
+  uuid: zzzzz-o0j2j-cpy7p41hpk531e1
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
+  properties: {}
+
+runningbarbaz_job_readable_by_spectator:
+  uuid: zzzzz-o0j2j-cpy7p41hpk531e2
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
+  properties: {}
+
+arvados_repository_readable_by_all_users:
+  uuid: zzzzz-o0j2j-allcanreadarvrp
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-s0uqq-arvadosrepo0123
+  properties: {}
+
+foo_repository_readable_by_spectator:
+  uuid: zzzzz-o0j2j-cpy7p41hpk5xxx
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-s0uqq-382brsig8rp3666
+  properties: {}
+
+foo_repository_manageable_by_active:
+  uuid: zzzzz-o0j2j-8tdfjd8g0s4rn1k
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-s0uqq-382brsig8rp3666
+  properties: {}
+
+repository3_readable_by_active:
+  uuid: zzzzz-o0j2j-43iem9bdtefa76g
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-09-23 13:52:46 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-09-23 13:52:46 -0400
+  updated_at: 2014-09-23 13:52:46 -0400
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-s0uqq-38orljkqpyo1j61
+  properties: {}
+
+repository4_writable_by_active:
+  uuid: zzzzz-o0j2j-lio9debdt6yhkil
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-09-23 13:52:46 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-09-23 13:52:46 -0400
+  updated_at: 2014-09-23 13:52:46 -0400
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_write
+  head_uuid: zzzzz-s0uqq-38oru8hnk57ht34
+  properties: {}
+
+miniadmin_user_is_a_testusergroup_admin:
+  uuid: zzzzz-o0j2j-38vvkciz7qc12j9
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-04-01 13:53:33 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-04-01 13:53:33 -0400
+  updated_at: 2014-04-01 13:53:33 -0400
+  tail_uuid: zzzzz-tpzed-2bg9x0oeydcw5hm
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-48foin4vonvc2at
+  properties: {}
+
+rominiadmin_user_is_a_testusergroup_admin:
+  uuid: zzzzz-o0j2j-6b0hz5hr107mc90
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-04-01 13:53:33 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-04-01 13:53:33 -0400
+  updated_at: 2014-04-01 13:53:33 -0400
+  tail_uuid: zzzzz-tpzed-4hvxm4n25emegis
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-48foin4vonvc2at
+  properties: {}
+
+testusergroup_can_manage_active_user:
+  uuid: zzzzz-o0j2j-2vaqhxz6hsf4k1d
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-04-01 13:56:10 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-04-01 13:56:10 -0400
+  updated_at: 2014-04-01 13:56:10 -0400
+  tail_uuid: zzzzz-j7d0g-48foin4vonvc2at
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  properties: {}
+
+test_timestamps:
+  uuid: zzzzz-o0j2j-4abnk2w5t86x4uc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-15 13:17:14 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-15 13:17:14 -0400
+  updated_at: 2014-04-15 13:17:14 -0400
+  link_class: test
+  name: test
+  properties: {}
+
+admin_can_write_aproject:
+  # Yes, this permission is effectively redundant.
+  # We use it to test that other project admins can see
+  # all the project's sharing.
+  uuid: zzzzz-o0j2j-adminmgsproject
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  link_class: permission
+  name: can_write
+  head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  properties: {}
+
+project_viewer_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-cdnq6627g0h0r2x
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2015-07-28T21:34:41.361747000Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2015-07-28T21:34:41.361747000Z
+  updated_at: 2015-07-28T21:34:41.361747000Z
+  tail_uuid: zzzzz-tpzed-projectviewer1a
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+project_viewer_can_read_project:
+  uuid: zzzzz-o0j2j-projviewerreadp
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-projectviewer1a
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  properties: {}
+
+subproject_admin_can_manage_subproject:
+  uuid: zzzzz-o0j2j-subprojadminlnk
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-10-15 10:00:00 -0000
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-10-15 10:00:00 -0000
+  updated_at: 2014-10-15 10:00:00 -0000
+  tail_uuid: zzzzz-tpzed-subprojectadmin
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  properties: {}
+
+foo_collection_tag:
+  uuid: zzzzz-o0j2j-eedahfaho8aphiv
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-fy296fx3hot09f7
+  link_class: tag
+  name: foo_tag
+  properties: {}
+
+active_user_can_manage_bad_group_cx2al9cqkmsf1hs:
+  uuid: zzzzz-o0j2j-ezv55ahzc9lvjwe
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-03 18:50:08 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-03 18:50:08 -0400
+  updated_at: 2014-05-03 18:50:08 -0400
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+  properties: {}
+
+multilevel_collection_1_readable_by_active:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw22j
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
+  properties: {}
+
+has_symbol_keys_in_database_somehow:
+  uuid: zzzzz-o0j2j-enl1wg58310loc6
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-28 16:24:02.314722162 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-28 16:24:02.314484982 Z
+  tail_uuid: ~
+  link_class: test
+  name: ~
+  head_uuid: ~
+  properties:
+    :foo: "bar"
+    baz:
+      - waz
+      - :waz
+      - :waz
+      - 1
+      - ~
+      - false
+      - true
+  updated_at: 2014-05-28 16:24:02.314296411 Z
+
+bug2931_link_with_null_head_uuid:
+  uuid: zzzzz-o0j2j-uru66qok2wruasb
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-30 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-30 14:30:00.184019565 Z
+  updated_at: 2014-05-30 14:30:00.183829316 Z
+  link_class: permission
+  name: bug2931
+  tail_uuid: ~
+  head_uuid: ~
+  properties: {}
+
+anonymous_group_can_read_anonymously_accessible_project:
+  uuid: zzzzz-o0j2j-15gpzezqjg4bc4z
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-30 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-30 14:30:00.184019565 Z
+  updated_at: 2014-05-30 14:30:00.183829316 Z
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-j7d0g-anonymouspublic
+  head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  properties: {}
+
+anonymous_user_can_read_anonymously_accessible_project:
+  uuid: zzzzz-o0j2j-82nbli3jptwksj1
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-30 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-30 14:30:00.184019565 Z
+  updated_at: 2014-05-30 14:30:00.183829316 Z
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-anonymouspublic
+  head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  properties: {}
+
+user_agreement_readable_by_anonymously_accessible_project:
+  uuid: zzzzz-o0j2j-o5ds5gvhkztdc8h
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+
+active_user_permission_to_docker_image_collection:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw33s
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties: {}
+
+active_user_permission_to_unlinked_docker_image_collection:
+  uuid: zzzzz-o0j2j-g5i0sa8cr3b1psf
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-d0d8z5wofvfgwad
+  properties: {}
+
+crt_user_permission_to_unlinked_docker_image_collection:
+  uuid: zzzzz-o0j2j-20zvdi9b4odcfz3
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l3skomkti0c4vg4
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-d0d8z5wofvfgwad
+  properties: {}
+
+docker_image_collection_hash:
+  uuid: zzzzz-o0j2j-dockercollhasha
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_hash
+  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+docker_image_collection_tag:
+  uuid: zzzzz-o0j2j-dockercolltagbb
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:latest
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+docker_image_collection_tag2:
+  uuid: zzzzz-o0j2j-dockercolltagbc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:june10
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+docker_image_collection_hextag:
+  uuid: zzzzz-o0j2j-2591ao7zubhaoxh
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2017-02-13 21:41:06.769936997 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2017-02-13 21:41:06.769422000 Z
+  tail_uuid: ~
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:b25678748af0cac6d1180b9ca4ce3ef31f2b06602f471aad8dfd421e149b0d75
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties: {}
+  updated_at: 2017-02-13 21:41:06.769422000 Z
+
+docker_1_12_image_hash:
+  uuid: zzzzz-o0j2j-f58l58fn65n8v6k
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2017-02-13 21:35:12.602828136 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2017-02-13 21:35:12.602309000 Z
+  tail_uuid: ~
+  link_class: docker_image_hash
+  name: sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  head_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i
+  properties: {}
+  updated_at: 2017-02-13 21:35:12.602309000 Z
+
+docker_1_12_image_tag:
+  uuid: zzzzz-o0j2j-dybsy0m3u96jkbv
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2017-02-13 21:37:47.441406362 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2017-02-13 21:37:47.440882000 Z
+  tail_uuid: ~
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:latest
+  head_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i
+  properties: {}
+  updated_at: 2017-02-13 21:37:47.440882000 Z
+
+docker_1_12_image_hextag:
+  uuid: zzzzz-o0j2j-06hzef4u1hbk1g5
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2017-02-13 21:37:47.441406362 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2017-02-13 21:37:47.440882000 Z
+  tail_uuid: ~
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:b25678748af0cac6d1180b9ca4ce3ef31f2b06602f471aad8dfd421e149b0d75
+  head_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i
+  properties: {}
+  updated_at: 2017-02-13 21:37:47.440882000 Z
+
+ancient_docker_image_collection_hash:
+  # This image helps test that searches for Docker images find
+  # the latest available image: the hash is the same as
+  # docker_image_collection_hash, but it points to a different
+  # Collection and has an older image timestamp.
+  uuid: zzzzz-o0j2j-dockercollhashz
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-12 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-12 14:30:00.184019565 Z
+  updated_at: 2014-06-12 14:30:00.183829316 Z
+  link_class: docker_image_hash
+  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties:
+    image_timestamp: "2010-06-10T14:30:00.184019565Z"
+
+ancient_docker_image_collection_tag:
+  uuid: zzzzz-o0j2j-dockercolltagzz
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-12 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-12 14:30:00.184019565 Z
+  updated_at: 2014-06-12 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:latest
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties:
+    image_timestamp: "2010-06-10T14:30:00.184019565Z"
+
+docker_image_tag_like_hash:
+  uuid: zzzzz-o0j2j-dockerhashtagaa
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:latest
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+job_reader_can_read_previous_job_run:
+  # Permission link giving job_reader permission
+  # to read previous_job_run
+  uuid: zzzzz-o0j2j-8bbd851795ebafd
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-905b42d1dd4a354
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+
+job_reader_can_read_foo_repo:
+  # Permission link giving job_reader permission
+  # to read foo_repo
+  uuid: zzzzz-o0j2j-072ec05dc9487f8
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-905b42d1dd4a354
+  head_uuid: zzzzz-s0uqq-382brsig8rp3666
+
+job_reader2_can_read_job_with_components:
+  # Permission link giving job_reader2 permission
+  # to read running_job_with_components
+  uuid: zzzzz-o0j2j-jobcomps4jobrdr
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-readjobwithcomp
+  head_uuid: zzzzz-8i9sb-with2components
+
+job_reader2_can_read_pipeline_from_job_with_components:
+  # Permission link giving job_reader2 permission
+  # to read running_job_with_components
+  uuid: zzzzz-o0j2j-pi4comps4jobrdr
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-readjobwithcomp
+  head_uuid: zzzzz-d1hrv-partdonepipelin
+
+job_reader2_can_read_first_job_from_pipeline_from_job_with_components:
+  # Permission link giving job_reader2 permission
+  # to read running_job_with_components
+  uuid: zzzzz-o0j2j-job4pi4j4jobrdr
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-readjobwithcomp
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+
+baz_collection_name_in_asubproject:
+  uuid: zzzzz-o0j2j-bazprojectname2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  tail_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  head_uuid: ea10d51bcf88862dbcc36eb292017dfd+45
+  link_class: name
+  # This should resemble the default name assigned when a
+  # Collection is added to a Project.
+  name: "ea10d51bcf88862dbcc36eb292017dfd+45 added sometime"
+  properties: {}
+
+empty_collection_name_in_active_user_home_project:
+  uuid: zzzzz-o0j2j-i3n6m552x6tmoi4
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: name
+  name: Empty collection
+  head_uuid: d41d8cd98f00b204e9800998ecf8427e+0
+  properties: {}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+active_user_can_read_activeandfriends:
+  uuid: zzzzz-o0j2j-8184f5vk8c851ts
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:03:46.321059945 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:03:46.320865926 Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  properties: {}
+  updated_at: 2014-08-22 14:03:46.320743213 Z
+
+active_user_joined_activeandfriends:
+  uuid: zzzzz-o0j2j-t63rdd7vupqvnco
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:03:28.835064240 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:03:28.834849409 Z
+  tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  properties: {}
+  updated_at: 2014-08-22 14:03:28.834720558 Z
+
+future_project_can_read_activeandfriends:
+  uuid: zzzzz-o0j2j-bkdtnddpmwxqiza
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:04:18.811622057 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:04:18.811463859 Z
+  tail_uuid: zzzzz-tpzed-futureprojview2
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  properties: {}
+  updated_at: 2014-08-22 14:04:18.811387314 Z
+
+future_project_user_joined_activeandfriends:
+  uuid: zzzzz-o0j2j-ksl8bo92eokv332
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:04:24.182103355 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:04:24.181939129 Z
+  tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-futureprojview2
+  properties: {}
+  updated_at: 2014-08-22 14:04:24.181799856 Z
+
+auto_setup_vm_login_username_can_login_to_test_vm:
+  uuid: zzzzz-o0j2j-i3n6m98766tmoi4
+  owner_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
+  link_class: permission
+  name: can_login
+  head_uuid: zzzzz-2x53u-382brsig8rp3064
+  properties: {username: 'auto_setup_vm_login'}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+admin_can_login_to_testvm2:
+  uuid: zzzzz-o0j2j-peek9mecohgh3ai
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  link_class: permission
+  name: can_login
+  head_uuid: zzzzz-2x53u-382brsig8rp3065
+  # username is not obviously related to other user data.
+  properties: {username: 'adminroot', groups: ['docker', 'admin']}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+active_can_login_to_testvm2:
+  uuid: zzzzz-o0j2j-rah2ya1ohx9xaev
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_login
+  head_uuid: zzzzz-2x53u-382brsig8rp3065
+  # No groups.
+  properties: {username: 'active'}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+spectator_login_link_for_testvm2_without_username:
+  uuid: zzzzz-o0j2j-aem0eilie1jigh9
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_login
+  head_uuid: zzzzz-2x53u-382brsig8rp3065
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+user_foo_can_read_sharing_group:
+  uuid: zzzzz-o0j2j-gdpvwvpj9kjs5in
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+
+user_foo_is_in_sharing_group:
+  uuid: zzzzz-o0j2j-bwmcf9nqwomvtny
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+
+user_bar_can_read_sharing_group:
+  uuid: zzzzz-o0j2j-23djaoza9g2zvjx
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-tpzed-n3oaj4sm5fcnwib
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+
+user_bar_is_in_sharing_group:
+  uuid: zzzzz-o0j2j-ga7fgy3xsz4hu28
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-n3oaj4sm5fcnwib
+
+user1-with-load_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-user1-with-load
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-user1withloadab
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+empty_collection_name_in_fuse_user_home_project:
+  uuid: zzzzz-o0j2j-hw3mcg3c8pwo6ar
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-0fusedrivertest
+  link_class: name
+  name: Empty collection
+  head_uuid: d41d8cd98f00b204e9800998ecf8427e+0
+  properties: {}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+star_project_for_active_user:
+  uuid: zzzzz-o0j2j-starredbyactive
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: star
+  name: zzzzz-j7d0g-starredshared01
+  head_uuid: zzzzz-j7d0g-starredshared01
+  properties: {}
+
+share_starred_project_with_project_viewer:
+  uuid: zzzzz-o0j2j-sharewithviewer
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-tpzed-projectviewer1a
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-starredshared01
+
+star_shared_project_for_project_viewer:
+  uuid: zzzzz-o0j2j-starredbyviewer
+  owner_uuid: zzzzz-tpzed-projectviewer1a
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-projectviewer1a
+  link_class: star
+  name: zzzzz-j7d0g-starredshared01
+  head_uuid: zzzzz-j7d0g-starredshared01
+  properties: {}
+
+tagged_collection_readable_by_spectator:
+  uuid: zzzzz-o0j2j-readacl4tagcoll
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-taggedcolletion
+  properties: {}
diff --git a/services/api/test/fixtures/logs.yml b/services/api/test/fixtures/logs.yml
new file mode 100644 (file)
index 0000000..0785c12
--- /dev/null
@@ -0,0 +1,265 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+noop: # nothing happened ...to the 'spectator' user
+  id: 1
+  uuid: zzzzz-xxxxx-pshmckwoma9plh7
+  owner_uuid: zzzzz-tpzed-000000000000000
+  object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  object_owner_uuid: zzzzz-tpzed-000000000000000
+  event_at: <%= 1.minute.ago.to_s(:db) %>
+
+admin_changes_repository2: # admin changes repository2, which is owned by active user
+  id: 2
+  uuid: zzzzz-xxxxx-pshmckwoma00002
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
+  object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  event_at: <%= 2.minute.ago.to_s(:db) %>
+  event_type: update
+
+admin_changes_specimen: # admin changes specimen owned_by_spectator
+  id: 3
+  uuid: zzzzz-xxxxx-pshmckwoma00003
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
+  object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+  event_at: <%= 3.minute.ago.to_s(:db) %>
+  event_type: update
+
+system_adds_foo_file: # foo collection added, readable by active through link
+  id: 4
+  uuid: zzzzz-xxxxx-pshmckwoma00004
+  owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  object_uuid: zzzzz-4zz18-znfnqtbbv4spc3w # foo file
+  object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  event_at: <%= 4.minute.ago.to_s(:db) %>
+  event_type: create
+
+system_adds_baz: # baz collection added, readable by active and spectator through group 'all users' group membership
+  id: 5
+  uuid: zzzzz-xxxxx-pshmckwoma00005
+  owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  object_uuid: zzzzz-4zz18-y9vne9npefyxh8g # baz file
+  object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  event_at: <%= 5.minute.ago.to_s(:db) %>
+  event_type: create
+
+log_owned_by_active:
+  id: 6
+  uuid: zzzzz-xxxxx-pshmckwoma12345
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
+  object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  event_at: <%= 2.minute.ago.to_s(:db) %>
+  summary: non-admin use can read own logs
+
+crunchstat_for_running_job:
+  id: 7
+  uuid: zzzzz-57u5n-tmymyrojrbtnxh1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-8i9sb-pshmckwoma9plh7
+  event_at: 2014-11-07 23:33:42.347455000 Z
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2014-11-07_23:33:41 zzzzz-8i9sb-pshmckwoma9plh7 31708 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: 2014-11-07 23:33:42.351913000 Z
+  updated_at: 2014-11-07 23:33:42.347455000 Z
+  modified_at: 2014-11-07 23:33:42.347455000 Z
+  object_owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+
+log_line_for_pipeline_in_publicly_accessible_project:
+  id: 8
+  uuid: zzzzz-57u5n-tmymyrojrjyhb45
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-d1hrv-n68vc490mloy4fi
+  event_at: 2014-11-07 23:33:42.347455000 Z
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2014-11-07_23:33:41 zzzzz-d1hrv-n68vc490mloy4fi 31708 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: 2014-11-07 23:33:42.351913000 Z
+  updated_at: 2014-11-07 23:33:42.347455000 Z
+  modified_at: 2014-11-07 23:33:42.347455000 Z
+  object_owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+
+log_line_for_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere:
+  id: 9
+  uuid: zzzzz-57u5n-tmyhy56k9lnhb45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-d1hrv-pisharednotobjs
+  event_at: 2014-11-07 23:33:42.347455000 Z
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2014-11-07_23:33:41 zzzzz-d1hrv-pisharednotobjs 31708 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: 2014-11-07 23:33:42.351913000 Z
+  updated_at: 2014-11-07 23:33:42.347455000 Z
+  modified_at: 2014-11-07 23:33:42.347455000 Z
+  object_owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+
+crunchstat_for_previous_job:
+  id: 10
+  uuid: zzzzz-57u5n-eir3aesha3kaene
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+  event_at: 2014-11-07 23:33:42.347455000 Z
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2014-11-07_23:33:41 zzzzz-8i9sb-cjs4pklxxjykqqq 11592 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: 2014-11-07 23:33:42.351913000 Z
+  updated_at: 2014-11-07 23:33:42.347455000 Z
+  modified_at: 2014-11-07 23:33:42.347455000 Z
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_ancient_job:
+  id: 11
+  uuid: zzzzz-57u5n-ixioph7ieb5ung8
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-8i9sb-ahd7cie8jah9qui
+  event_at: 2013-11-07 23:33:42.347455000 Z
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: 2013-11-07 23:33:42.351913000 Z
+  updated_at: 2013-11-07 23:33:42.347455000 Z
+  modified_at: 2013-11-07 23:33:42.347455000 Z
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+stderr_for_ancient_container:
+  id: 12
+  uuid: zzzzz-57u5n-containerlog001
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer01
+  event_at: <%= 2.year.ago.to_s(:db) %>
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 2.year.ago.to_s(:db) %>
+  updated_at: <%= 2.year.ago.to_s(:db) %>
+  modified_at: <%= 2.year.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_ancient_container:
+  id: 13
+  uuid: zzzzz-57u5n-containerlog002
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer01
+  event_at: <%= 2.year.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 2.year.ago.to_s(:db) %>
+  updated_at: <%= 2.year.ago.to_s(:db) %>
+  modified_at: <%= 2.year.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+stderr_for_previous_container:
+  id: 14
+  uuid: zzzzz-57u5n-containerlog003
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer02
+  event_at: <%= 1.month.ago.to_s(:db) %>
+  event_type: stderr
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.month.ago.to_s(:db) %>
+  updated_at: <%= 1.month.ago.to_s(:db) %>
+  modified_at: <%= 1.month.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_previous_container:
+  id: 15
+  uuid: zzzzz-57u5n-containerlog004
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer02
+  event_at: <%= 1.month.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.month.ago.to_s(:db) %>
+  updated_at: <%= 1.month.ago.to_s(:db) %>
+  modified_at: <%= 1.month.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+stderr_for_running_container:
+  id: 16
+  uuid: zzzzz-57u5n-containerlog005
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer03
+  event_at: <%= 1.hour.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.hour.ago.to_s(:db) %>
+  updated_at: <%= 1.hour.ago.to_s(:db) %>
+  modified_at: <%= 1.hour.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
+
+crunchstat_for_running_container:
+  id: 17
+  uuid: zzzzz-57u5n-containerlog006
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  object_uuid: zzzzz-dz642-logscontainer03
+  event_at: <%= 1.hour.ago.to_s(:db) %>
+  event_type: crunchstat
+  summary: ~
+  properties:
+    text: '2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat:
+      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user
+      0.9900 sys'
+  created_at: <%= 1.hour.ago.to_s(:db) %>
+  updated_at: <%= 1.hour.ago.to_s(:db) %>
+  modified_at: <%= 1.hour.ago.to_s(:db) %>
+  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz
diff --git a/services/api/test/fixtures/nodes.yml b/services/api/test/fixtures/nodes.yml
new file mode 100644 (file)
index 0000000..971132f
--- /dev/null
@@ -0,0 +1,97 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+busy:
+  uuid: zzzzz-7ekkf-53y36l1lu5ijveb
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute0
+  slot_number: 0
+  domain: ""
+  ip_address: 172.17.2.172
+  last_ping_at: <%= 1.minute.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: zzzzz-8i9sb-2gx6rz0pjl033w3  # nearly_finished_job
+  info:
+    ping_secret: "48dpm3b8ijyj3jkr2yczxw0844dqd2752bhll7klodvgz9bg80"
+    slurm_state: "alloc"
+
+down:
+  uuid: zzzzz-7ekkf-2vbompg3ecc6e2s
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute1
+  slot_number: 1
+  domain: ""
+  ip_address: 172.17.2.173
+  last_ping_at: <%= 1.hour.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: ~
+  info:
+    ping_secret: "2k3i71depad36ugwmlgzilbi4e8n0illb2r8l4efg9mzkb3a1k"
+
+idle:
+  uuid: zzzzz-7ekkf-2z3mc76g2q73aio
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute2
+  slot_number: 2
+  domain: ""
+  ip_address: 172.17.2.174
+  last_ping_at: <%= 2.minute.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: ~
+  info:
+    ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
+    slurm_state: "idle"
+  properties:
+    total_cpu_cores: 16
+
+was_idle_now_down:
+  uuid: zzzzz-7ekkf-xuzpkdasl0uzwyz
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute3
+  slot_number: ~
+  domain: ""
+  ip_address: 172.17.2.174
+  last_ping_at: <%= 1.hour.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: ~
+  info:
+    ping_secret: "1bd1yi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
+    slurm_state: "idle"
+  properties:
+    total_cpu_cores: 16
+
+new_with_no_hostname:
+  uuid: zzzzz-7ekkf-newnohostname00
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: ~
+  slot_number: ~
+  ip_address: 172.17.2.175
+  last_ping_at: ~
+  first_ping_at: ~
+  job_uuid: ~
+  info:
+    ping_secret: "abcdyi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
+
+new_with_custom_hostname:
+  uuid: zzzzz-7ekkf-newwithhostname
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: custom1
+  slot_number: 23
+  ip_address: 172.17.2.176
+  last_ping_at: ~
+  first_ping_at: ~
+  job_uuid: ~
+  info:
+    ping_secret: "abcdyi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
+
+node_with_no_ip_address_yet:
+  uuid: zzzzz-7ekkf-nodenoipaddryet
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: noipaddr
+  slot_number: ~
+  last_ping_at: ~
+  first_ping_at: ~
+  job_uuid: ~
+  info:
+    ping_secret: "abcdyefg4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
diff --git a/services/api/test/fixtures/pipeline_instances.yml b/services/api/test/fixtures/pipeline_instances.yml
new file mode 100644 (file)
index 0000000..013f03c
--- /dev/null
@@ -0,0 +1,533 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+new_pipeline:
+  state: New
+  uuid: zzzzz-d1hrv-f4gneyn6br1xize
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+
+new_pipeline_in_subproject:
+  state: New
+  uuid: zzzzz-d1hrv-subprojpipeline
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+
+has_component_with_no_script_parameters:
+  state: Ready
+  uuid: zzzzz-d1hrv-1xfj6xkicf2muk2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 10.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+
+has_component_with_empty_script_parameters:
+  state: Ready
+  uuid: zzzzz-d1hrv-jq16l10gcsnyumo
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+
+has_component_with_completed_jobs:
+  # Test that the job "started_at" and "finished_at" fields are parsed
+  # into Time fields when rendering. These jobs must *not* have their
+  # own fixtures; the point is to force the
+  # pipeline_instances_controller_test in Workbench to parse the
+  # "components" field. (The relevant code paths are also used when a
+  # user has permission to read the pipeline instance itself, but not
+  # the jobs referenced by its components hash.)
+  state: Complete
+  uuid: zzzzz-d1hrv-i3e77t9z5y8j9cc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 11.minute.ago.to_s(:db) %>
+  started_at: <%= 10.minute.ago.to_s(:db) %>
+  finished_at: <%= 9.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-rft1xdewxkwgxnz
+      script_version: master
+      created_at: <%= 10.minute.ago.to_s(:db) %>
+      started_at: <%= 10.minute.ago.to_s(:db) %>
+      finished_at: <%= 9.minute.ago.to_s(:db) %>
+      state: Complete
+      tasks_summary:
+        failed: 0
+        todo: 0
+        running: 0
+        done: 1
+   bar:
+    script: bar
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-r2dtbzr6bfread7
+      script_version: master
+      created_at: <%= 9.minute.ago.to_s(:db) %>
+      started_at: <%= 9.minute.ago.to_s(:db) %>
+      state: Running
+      tasks_summary:
+        failed: 0
+        todo: 1
+        running: 2
+        done: 3
+   baz:
+    script: baz
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-c7408rni11o7r6s
+      script_version: master
+      created_at: <%= 9.minute.ago.to_s(:db) %>
+      state: Queued
+      tasks_summary: {}
+
+has_job:
+  name: pipeline_with_job
+  state: Ready
+  uuid: zzzzz-d1hrv-1yfj6xkidf2muk3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+    job: {
+            uuid: zzzzz-8i9sb-pshmckwoma9plh7,
+            script_version: master
+         }
+
+components_is_jobspec:
+  # Helps test that clients cope with funny-shaped components.
+  # For an example, see #3321.
+  uuid: zzzzz-d1hrv-jobspeccomponts
+  created_at: <%= 30.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: RunningOnServer
+  components:
+    script: foo
+    script_version: master
+    script_parameters:
+      input:
+        required: true
+        dataclass: Collection
+        title: "Foo/bar pair"
+        description: "Provide a collection containing at least two files."
+
+pipeline_with_tagged_collection_input:
+  name: pipeline_with_tagged_collection_input
+  state: Ready
+  uuid: zzzzz-d1hrv-1yfj61234abcdk3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  components:
+    part-one:
+      script_parameters:
+        input:
+          value: zzzzz-4zz18-znfnqtbbv4spc3w
+
+pipeline_to_merge_params:
+  name: pipeline_to_merge_params
+  state: Ready
+  uuid: zzzzz-d1hrv-1yfj6dcba4321k3
+  pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  components:
+    part-one:
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "Foo/bar pair"
+          description: "Provide a collection containing at least two files."
+    part-two:
+      script_parameters:
+        input:
+          output_of: part-one
+        integer_with_default:
+          default: 123
+        integer_with_value:
+          value: 123
+        string_with_default:
+          default: baz
+        string_with_value:
+          value: baz
+        plain_string: qux
+        array_with_default:
+          default: [1,1,2,3,5]
+        array_with_value:
+          value: [1,1,2,3,5]
+
+pipeline_with_newer_template:
+  state: Complete
+  uuid: zzzzz-d1hrv-9fm8l10i9z2kqc6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_instance_owned_by_fuse:
+  state: Complete
+  uuid: zzzzz-d1hrv-ri9dvgkgqs9y09j
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  created_at: 2014-09-15 12:00:00
+  name: "pipeline instance owned by FUSE"
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_instance_in_fuse_project:
+  state: Complete
+  uuid: zzzzz-d1hrv-scarxiyajtshq3l
+  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
+  pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  created_at: 2014-09-15 12:00:00
+  name: "pipeline instance in FUSE project"
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_owned_by_active_in_aproject:
+  name: Completed pipeline in A Project
+  state: Complete
+  uuid: zzzzz-d1hrv-ju5ghi0i9z2kqc6
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_owned_by_active_in_home:
+  name: Completed pipeline in active user home
+  state: Complete
+  uuid: zzzzz-d1hrv-lihrbd0i9z2kqc6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_in_publicly_accessible_project:
+  uuid: zzzzz-d1hrv-n68vc490mloy4fi
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Pipeline in publicly accessible project
+  pipeline_template_uuid: zzzzz-p5p6p-tmpltpublicproj
+  state: Complete
+  created_at: <%= 30.minute.ago.to_s(:db) %>
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+      job:
+        uuid: zzzzz-8i9sb-jyq01m7in1jlofj
+        repository: active/foo
+        script: foo
+        script_version: master
+        script_parameters:
+          input: zzzzz-4zz18-4en62shvi99lxd4
+        log: zzzzz-4zz18-4en62shvi99lxd4
+        output: b519d9cb706a29fc7ea24dbea2f05851+93
+        state: Complete
+
+pipeline_in_publicly_accessible_project_but_other_objects_elsewhere:
+  uuid: zzzzz-d1hrv-pisharednotobjs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Pipeline in public project with other objects elsewhere
+  pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  state: Complete
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+      job:
+        uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
+        repository: active/foo
+        script: foo
+        script_version: master
+        script_parameters:
+          input: zzzzz-4zz18-bv31uwvy3neko21
+        log: zzzzz-4zz18-bv31uwvy3neko21
+        output: zzzzz-4zz18-bv31uwvy3neko21
+        state: Complete
+
+new_pipeline_in_publicly_accessible_project:
+  uuid: zzzzz-d1hrv-newpisharedobjs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Pipeline in New state in publicly accessible project
+  pipeline_template_uuid: zzzzz-p5p6p-tmpltpublicproj
+  state: New
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          value: b519d9cb706a29fc7ea24dbea2f05851+93
+
+new_pipeline_in_publicly_accessible_project_but_other_objects_elsewhere:
+  uuid: zzzzz-d1hrv-newsharenotobjs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Pipeline in New state in public project with objects elsewhere
+  pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  state: New
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          value: zzzzz-4zz18-bv31uwvy3neko21
+
+new_pipeline_in_publicly_accessible_project_with_dataclass_file_and_other_objects_elsewhere:
+  uuid: zzzzz-d1hrv-newsharenotfile
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Pipeline in public project in New state with file type data class with objects elsewhere
+  pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  state: New
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: File
+          value: zzzzz-4zz18-bv31uwvy3neko21/bar
+
+pipeline_in_running_state:
+  name: running_with_job
+  uuid: zzzzz-d1hrv-runningpipeline
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  started_at: <%= 3.1.minute.ago.to_s(:db) %>
+  state: RunningOnServer
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-pshmckwoma9plh7
+      script_version: master
+
+running_pipeline_with_complete_job:
+  uuid: zzzzz-d1hrv-partdonepipelin
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: RunningOnServer
+  created_at: <%= 15.minute.ago.to_s(:db) %>
+  components:
+   previous:
+    job:
+      uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+      log: zzzzz-4zz18-op4e2lbej01tcvu
+   running:
+    job:
+      uuid: zzzzz-8i9sb-pshmckwoma9plh7
+
+complete_pipeline_with_two_jobs:
+  uuid: zzzzz-d1hrv-twodonepipeline
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: Complete
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 2.minute.ago.to_s(:db) %>
+  finished_at: <%= 1.minute.ago.to_s(:db) %>
+  components:
+   ancient:
+    job:
+      uuid: zzzzz-8i9sb-ahd7cie8jah9qui
+      log: zzzzz-4zz18-op4e2lbej01tcvu
+   previous:
+    job:
+      uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+      log: zzzzz-4zz18-op4e2lbej01tcvu
+
+failed_pipeline_with_two_jobs:
+  uuid: zzzzz-d1hrv-twofailpipeline
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 55.minute.ago.to_s(:db) %>
+  state: Failed
+  components:
+   ancient:
+    job:
+      uuid: zzzzz-8i9sb-ahd7cie8jah9qui
+      log: zzzzz-4zz18-op4e2lbej01tcvu
+   previous:
+    job:
+      uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+      log: zzzzz-4zz18-op4e2lbej01tcvu
+
+# This pipeline is a child of another running job and has it's own running children
+job_child_pipeline_with_components_at_level_2:
+  state: RunningOnServer
+  uuid: zzzzz-d1hrv-picomponentsl02
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 12.hour.ago.to_s(:db) %>
+  started_at: <%= 12.hour.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-job1atlevel3noc
+      script_version: master
+      created_at: <%= 12.hour.ago.to_s(:db) %>
+      started_at: <%= 12.hour.ago.to_s(:db) %>
+      state: Running
+      tasks_summary:
+        failed: 0
+        todo: 0
+        running: 1
+        done: 1
+   bar:
+    script: bar
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-job2atlevel3noc
+      script_version: master
+      created_at: <%= 12.hour.ago.to_s(:db) %>
+      started_at: <%= 12.hour.ago.to_s(:db) %>
+      state: Running
+      tasks_summary:
+        failed: 0
+        todo: 1
+        running: 2
+        done: 3
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# pipelines in project_with_10_pipelines
+<% for i in 1..10 do %>
+pipeline_<%=i%>_of_10:
+  name: pipeline_<%= i %>
+  uuid: zzzzz-d1hrv-10pipelines0<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-000010pipelines
+  created_at: <%= (2*(i-1)).hour.ago.to_s(:db) %>
+  started_at: <%= (2*(i-1)).hour.ago.to_s(:db) %>
+  finished_at: <%= (i-1).minute.ago.to_s(:db) %>
+  state: Failed
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+      job:
+        state: Failed
+<% end %>
+
+# pipelines in project_with_2_pipelines_and_60_crs
+<% for i in 1..2 do %>
+pipeline_<%=i%>_of_2_pipelines_and_60_crs:
+  name: pipeline_<%= i %>
+  state: New
+  uuid: zzzzz-d1hrv-abcgneyn6brx<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-nnncrspipelines
+  created_at: <%= i.minute.ago.to_s(:db) %>
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+<% end %>
+
+# pipelines in project_with_25_pipelines
+<% for i in 1..25 do %>
+pipeline_<%=i%>_of_25:
+  name: pipeline_<%=i%>
+  state: Failed
+  uuid: zzzzz-d1hrv-25pipelines0<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-000025pipelines
+  created_at: <%= i.hour.ago.to_s(:db) %>
+  started_at: <%= i.hour.ago.to_s(:db) %>
+  finished_at: <%= i.minute.ago.to_s(:db) %>
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/pipeline_templates.yml b/services/api/test/fixtures/pipeline_templates.yml
new file mode 100644 (file)
index 0000000..69f8f82
--- /dev/null
@@ -0,0 +1,271 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+two_part:
+  uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Two Part Pipeline Template
+  components:
+    part-one:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "Foo/bar pair"
+    part-two:
+      script: bar
+      script_version: master
+      script_parameters:
+        input:
+          output_of: part-one
+        integer_with_default:
+          default: 123
+        integer_with_value:
+          value: 123
+        string_with_default:
+          default: baz
+        string_with_value:
+          value: baz
+        plain_string: qux
+        array_with_default: # important to test repeating values in the array!
+          default: [1,1,2,3,5]
+        array_with_value: # important to test repeating values in the array!
+          value: [1,1,2,3,5]
+
+components_is_jobspec:
+  # Helps test that clients cope with funny-shaped components.
+  # For an example, see #3321.
+  uuid: zzzzz-p5p6p-jobspeccomponts
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline Template with Jobspec Components
+  components:
+    script: foo
+    script_version: master
+    script_parameters:
+      input:
+        required: true
+        dataclass: Collection
+        title: "Foo/bar pair"
+        description: "Provide a collection containing at least two files."
+
+parameter_with_search:
+  uuid: zzzzz-p5p6p-paramwsearch345
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline Template with Input Parameter with Search
+  components:
+    with-search:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "Foo/bar pair"
+          description: "Provide a collection containing at least two files."
+          search_for: sometime  # Matches baz_collection_in_asubproject
+
+new_pipeline_template:
+  # This template must include components that are not
+  # present in the pipeline instance 'pipeline_with_newer_template',
+  # at least one of which has a script_parameter that is a hash
+  # with a 'dataclass' field (ticket #4000)
+  uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-09-14 12:00:00
+  modified_at: 2014-09-16 12:00:00
+  name: Pipeline Template Newer Than Instance
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo template input
+    bar:
+      script: bar
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: bar template input
+
+pipeline_template_in_fuse_project:
+  uuid: zzzzz-p5p6p-templinfuseproj
+  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
+  name: pipeline template in FUSE project
+  components:
+    foo_component:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "default input"
+          description: "input collection"
+
+template_with_dataclass_file:
+  uuid: zzzzz-p5p6p-k0xoa0ofxrystgw
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Two Part Template with dataclass File
+  components:
+    part-one:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: File
+          title: "Foo/bar pair"
+          description: "Provide an input file"
+    part-two:
+      script: bar
+      script_version: master
+      script_parameters:
+        input:
+          output_of: part-one
+        integer_with_default:
+          default: 123
+        integer_with_value:
+          value: 123
+        string_with_default:
+          default: baz
+        string_with_value:
+          value: baz
+        plain_string: qux
+        array_with_default: # important to test repeating values in the array!
+          default: [1,1,2,3,5]
+        array_with_value: # important to test repeating values in the array!
+          value: [1,1,2,3,5]
+
+template_with_dataclass_number:
+  uuid: zzzzz-p5p6p-numbertemplatea
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-01-14 12:35:04 -0400
+  updated_at: 2015-01-14 12:35:04 -0400
+  modified_at: 2015-01-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Template with dataclass number
+  components:
+    work:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: number
+          title: "Input number"
+
+pipeline_template_in_publicly_accessible_project:
+  uuid: zzzzz-p5p6p-tmpltpublicproj
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline template in publicly accessible project
+  components:
+    foo_component:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "default input"
+          description: "input collection"
+
+# Used to test renaming when removed from the "aproject" subproject
+# while another such object with same name exists in home project.
+template_in_active_user_home_project_to_test_unique_key_violation:
+  uuid: zzzzz-p5p6p-templatsamenam1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2013-04-14 12:35:04 -0400
+  updated_at: 2013-04-14 12:35:04 -0400
+  modified_at: 2013-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Template to test owner uuid and name unique key violation upon removal
+  components:
+    script: foo
+    script_version: master
+    script_parameters:
+      input:
+        required: true
+        dataclass: Collection
+        title: "Foo/bar pair"
+        description: "Provide a collection containing at least two files."
+
+template_in_asubproject_with_same_name_as_one_in_active_user_home:
+  uuid: zzzzz-p5p6p-templatsamenam2
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2013-04-14 12:35:04 -0400
+  updated_at: 2013-04-14 12:35:04 -0400
+  modified_at: 2013-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Template to test owner uuid and name unique key violation upon removal
+  components:
+    script: foo
+    script_version: master
+    script_parameters:
+      input:
+        required: true
+        dataclass: Collection
+        title: "Foo/bar pair"
+        description: "Provide a collection containing at least two files."
+
+workflow_with_input_defaults:
+  uuid: zzzzz-p5p6p-aox0k0ofxrystg2
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline with default input specifications
+  components:
+    part-one:
+      script: foo
+      script_version: master
+      script_parameters:
+        ex_string:
+          required: true
+          dataclass: string
+        ex_string_def:
+          required: true
+          dataclass: string
+          default: hello-testing-123
\ No newline at end of file
diff --git a/services/api/test/fixtures/repositories.yml b/services/api/test/fixtures/repositories.yml
new file mode 100644 (file)
index 0000000..e4fe71e
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+crunch_dispatch_test:
+  uuid: zzzzz-s0uqq-382brsig8rp3665
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: active/crunchdispatchtest
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
+
+arvados:
+  uuid: zzzzz-s0uqq-arvadosrepo0123
+  owner_uuid: zzzzz-tpzed-000000000000000 # root
+  name: arvados
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
+
+foo:
+  uuid: zzzzz-s0uqq-382brsig8rp3666
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: active/foo
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
+
+repository2:
+  uuid: zzzzz-s0uqq-382brsig8rp3667
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: active/foo2
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
+
+repository3:
+  uuid: zzzzz-s0uqq-38orljkqpyo1j61
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  name: admin/foo3
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
+
+repository4:
+  uuid: zzzzz-s0uqq-38oru8hnk57ht34
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  name: admin/foo4
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
+
+has_branch_with_commit_hash_name:
+  uuid: zzzzz-s0uqq-382brsig8rp3668
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: active/shabranchnames
+  created_at: 2015-01-01T00:00:00.123456Z
+  modified_at: 2015-01-01T00:00:00.123456Z
diff --git a/services/api/test/fixtures/specimens.yml b/services/api/test/fixtures/specimens.yml
new file mode 100644 (file)
index 0000000..bcae020
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+owned_by_active_user:
+  uuid: zzzzz-j58dm-3zx463qyo0k4xrn
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_private_group:
+  uuid: zzzzz-j58dm-5m3qwg45g3nlpu6
+  owner_uuid: zzzzz-j7d0g-rew6elm53kancon
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_spectator:
+  uuid: zzzzz-j58dm-3b0xxwzlbzxq5yr
+  owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+in_aproject:
+  uuid: zzzzz-j58dm-7r18rnd5nzhg5yk
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+in_asubproject:
+  uuid: zzzzz-j58dm-c40lddwcqqr1ffs
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
diff --git a/services/api/test/fixtures/traits.yml b/services/api/test/fixtures/traits.yml
new file mode 100644 (file)
index 0000000..83beb70
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+owned_by_aproject_with_no_name:
+  uuid: zzzzz-q1cn2-ypsjlol9dofwijz
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-05-05 04:11:52 -0400
+  modified_at: 2014-05-05 04:11:52 -0400
diff --git a/services/api/test/fixtures/users.yml b/services/api/test/fixtures/users.yml
new file mode 100644 (file)
index 0000000..7d6b1fc
--- /dev/null
@@ -0,0 +1,417 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+
+system_user:
+  uuid: zzzzz-tpzed-000000000000000
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-11-27 06:38:21.215463000 Z
+  modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-11-27 06:38:21.208036000 Z
+  email: root
+  first_name: root
+  last_name: ''
+  identity_url:
+  is_admin: true
+  prefs: {}
+  updated_at: 2014-11-27 06:38:21.207873000 Z
+  is_active: true
+
+admin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-d9tiejq69daie8f
+  email: admin@arvados.local
+  first_name: TestCase
+  last_name: Administrator
+  identity_url: https://admin.openid.local
+  is_active: true
+  is_admin: true
+  username: admin
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+miniadmin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-2bg9x0oeydcw5hm
+  email: miniadmin@arvados.local
+  first_name: TestCase
+  last_name: User Group Administrator
+  identity_url: https://miniadmin.openid.local
+  is_active: true
+  is_admin: false
+  username: miniadmin
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+rominiadmin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-4hvxm4n25emegis
+  email: rominiadmin@arvados.local
+  first_name: TestCase
+  last_name: Read-Only User Group Administrator
+  identity_url: https://rominiadmin.openid.local
+  is_active: true
+  is_admin: false
+  username: rominiadmin
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  email: active-user@arvados.local
+  first_name: Active
+  last_name: User
+  identity_url: https://active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: active
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+federated_active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zbbbb-tpzed-xurymjxw79nv3jz
+  email: zbbbb-active-user@arvados.local
+  first_name: Active
+  last_name: User
+  identity_url: https://active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: federatedactive
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+project_viewer:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-projectviewer1a
+  email: project-viewer@arvados.local
+  first_name: Project
+  last_name: Viewer
+  identity_url: https://project-viewer.openid.local
+  is_active: true
+  is_admin: false
+  username: projectviewer
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+future_project_user:
+  # Workbench tests give this user permission on aproject.
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-futureprojview2
+  email: future-project-user@arvados.local
+  first_name: Future Project
+  last_name: User
+  identity_url: https://future-project-user.openid.local
+  is_active: true
+  is_admin: false
+  username: futureprojectviewer
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+subproject_admin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-subprojectadmin
+  email: subproject-admin@arvados.local
+  first_name: Subproject
+  last_name: Admin
+  identity_url: https://subproject-admin.openid.local
+  is_active: true
+  is_admin: false
+  username: subprojectadmin
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+spectator:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  email: spectator@arvados.local
+  first_name: Spect
+  last_name: Ator
+  identity_url: https://spectator.openid.local
+  is_active: true
+  is_admin: false
+  username: spectator
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+container_runtime_token_user:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-l3skomkti0c4vg4
+  email: spectator@arvados.local
+  first_name: Spect
+  last_name: Ator
+  identity_url: https://spectator.openid.local
+  is_active: true
+  is_admin: false
+  username: containerruntimetokenuser
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+inactive_uninvited:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-rf2ec3ryh4vb5ma
+  email: inactive-uninvited-user@arvados.local
+  first_name: Inactive and Uninvited
+  last_name: User
+  identity_url: https://inactive-uninvited-user.openid.local
+  is_active: false
+  is_admin: false
+  prefs: {}
+
+inactive:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-x9kqpd79egh49c7
+  email: inactive-user@arvados.local
+  first_name: Inactive
+  last_name: User
+  identity_url: https://inactive-user.openid.local
+  is_active: false
+  is_admin: false
+  username: inactiveuser
+  prefs: {}
+
+inactive_but_signed_user_agreement:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  email: inactive-user-signed-ua@arvados.local
+  first_name: Inactive But Agreeable
+  last_name: User
+  identity_url: https://inactive-but-agreeable-user.openid.local
+  is_active: false
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+anonymous:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-anonymouspublic
+  email: anonymouspublic
+  first_name: anonymouspublic
+  last_name: anonymouspublic
+  is_active: false
+  is_admin: false
+  prefs: {}
+
+job_reader:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-905b42d1dd4a354
+  email: jobber@arvados.local
+  first_name: Job
+  last_name: Er
+  identity_url: https://spectator.openid.local
+  is_active: true
+  is_admin: false
+  username: jobber
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+job_reader2:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-readjobwithcomp
+  email: job_reader2@arvados.local
+  first_name: Job
+  last_name: Reader2
+  identity_url: https://job_reader2.openid.local
+  is_active: true
+  is_admin: false
+  username: jobreader2
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+active_no_prefs:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-a46c42d1td4aoj4
+  email: active_no_prefs@arvados.local
+  first_name: NoPrefs
+  last_name: NoProfile
+  identity_url: https://active_no_prefs.openid.local
+  is_active: true
+  is_admin: false
+  prefs: {}
+
+active_no_prefs_profile_no_getting_started_shown:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-a46c98d1td4aoj4
+  email: active_no_prefs_profile@arvados.local
+  first_name: HasPrefs
+  last_name: NoProfile
+  identity_url: https://active_no_prefs_profile.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    test: abc
+
+active_no_prefs_profile_with_getting_started_shown:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-getstartnoprofl
+  email: active_no_prefs_profile@arvados.local
+  first_name: HasPrefs
+  last_name: NoProfileWithGettingStartedShown
+  identity_url: https://active_no_prefs_profile_seen_gs.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    test: abc
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+active_with_prefs_profile_no_getting_started_shown:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-nogettinstarted
+  email: active_nogettinstarted@arvados.local
+  first_name: HasPrefsProfile
+  last_name: NoGettingStartedShown
+  identity_url: https://active_nogettinstarted.openid.local
+  is_active: true
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+# Fixtures to test granting and removing permissions.
+
+user_foo_in_sharing_group:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  email: user_foo_in_sharing_group@arvados.local
+  first_name: Foo
+  last_name: Sharing
+  identity_url: https://user_foo_in_sharing_group.openid.local
+  is_active: true
+  is_admin: false
+  username: fooinsharing
+
+user_bar_in_sharing_group:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-n3oaj4sm5fcnwib
+  email: user_bar_in_sharing_group@arvados.local
+  first_name: Bar
+  last_name: Sharing
+  identity_url: https://user_bar_in_sharing_group.openid.local
+  is_active: true
+  is_admin: false
+  username: barinsharing
+
+user1_with_load:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-user1withloadab
+  email: user1_with_load@arvados.local
+  first_name: user1_with_load
+  last_name: User
+  identity_url: https://user1_with_load.openid.local
+  is_active: true
+  is_admin: false
+  username: user1withload
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+fuse:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-0fusedrivertest
+  email: fuse@arvados.local
+  first_name: FUSE
+  last_name: User
+  identity_url: https://fuse.openid.local
+  is_active: true
+  is_admin: false
+  username: FUSE
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+permission_perftest:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-permissionptest
+  email: fuse@arvados.local
+  first_name: FUSE
+  last_name: User
+  identity_url: https://fuse.openid.local
+  is_active: true
+  is_admin: false
+  username: perftest
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+redirects_to_active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-1au3is3g3chtthd
+  email: redirects-to-active-user@arvados.local
+  first_name: Active2
+  last_name: User2
+  identity_url: https://redirects-to-active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: redirect_active
+  redirect_to_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
+double_redirects_to_active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-oiusowoxoz0pk3p
+  email: double-redirects-to-active-user@arvados.local
+  first_name: Active3
+  last_name: User3
+  identity_url: https://double-redirects-to-active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: double_redirect_active
+  redirect_to_user_uuid: zzzzz-tpzed-1au3is3g3chtthd
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
diff --git a/services/api/test/fixtures/virtual_machines.yml b/services/api/test/fixtures/virtual_machines.yml
new file mode 100644 (file)
index 0000000..e3f9623
--- /dev/null
@@ -0,0 +1,13 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+testvm:
+  uuid: zzzzz-2x53u-382brsig8rp3064
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  hostname: testvm.shell
+
+testvm2:
+  uuid: zzzzz-2x53u-382brsig8rp3065
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  hostname: testvm2.shell
diff --git a/services/api/test/fixtures/workflows.yml b/services/api/test/fixtures/workflows.yml
new file mode 100644 (file)
index 0000000..2859e37
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+workflow_with_definition_yml:
+  uuid: zzzzz-7fd4e-validworkfloyml
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Valid workflow with name and desc
+  description: this workflow has a valid definition yaml
+  definition: "name: foo\ndesc: bar"
+  created_at: 2016-08-15 12:00:00
+
+workflow_with_no_definition_yml:
+  uuid: zzzzz-7fd4e-validbutnoyml00
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Valid workflow with no definition yaml
+  description: this workflow does not have a definition yaml
+  created_at: 2016-08-15 12:00:00
+
+workflow_with_no_name_and_desc:
+  uuid: zzzzz-7fd4e-validnonamedesc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  definition: this is valid yaml
+  created_at: 2016-08-15 12:00:01
+
+workflow_with_input_specifications:
+  uuid: zzzzz-7fd4e-validwithinputs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Workflow with input specifications
+  description: this workflow has inputs specified
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  definition: |
+    cwlVersion: v1.0
+    class: CommandLineTool
+    baseCommand:
+    - echo
+    inputs:
+    - doc: a longer documentation string for this parameter (optional)
+      type: boolean
+      id: ex_boolean
+      label: a short label for this parameter (optional)
+      inputBinding:
+        position: 1
+    - type:
+      - 'null'
+      - boolean
+      id: ex_boolean_opt
+      inputBinding:
+        position: 1
+    outputs: []
+
+workflow_with_input_defaults:
+  uuid: zzzzz-7fd4e-validwithinput2
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Workflow with default input specifications
+  description: this workflow has inputs specified
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  definition: |
+    cwlVersion: v1.0
+    class: CommandLineTool
+    baseCommand:
+    - echo
+    inputs:
+    - type: string
+      id: ex_string
+    - type: string
+      id: ex_string_def
+      default: hello-testing-123
+    outputs: []
diff --git a/services/api/test/functional/.gitkeep b/services/api/test/functional/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/functional/application_controller_test.rb b/services/api/test/functional/application_controller_test.rb
new file mode 100644 (file)
index 0000000..27b046e
--- /dev/null
@@ -0,0 +1,119 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApplicationControllerTest < ActionController::TestCase
+  BAD_UUID = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+  def now_timestamp
+    Time.now.utc.to_i
+  end
+
+  setup do
+    # These tests are meant to check behavior in ApplicationController.
+    # We instantiate a small concrete controller for convenience.
+    @controller = Arvados::V1::SpecimensController.new
+    @start_stamp = now_timestamp
+  end
+
+  def check_error_token
+    token = json_response['error_token']
+    assert_not_nil token
+    token_time = token.split('+', 2).first.to_i
+    assert_operator(token_time, :>=, @start_stamp, "error token too old")
+    assert_operator(token_time, :<=, now_timestamp, "error token too new")
+  end
+
+  def check_404(errmsg="Path not found")
+    assert_response 404
+    assert_equal([errmsg], json_response['errors'])
+    check_error_token
+  end
+
+  test "requesting nonexistent object returns 404 error" do
+    authorize_with :admin
+    get(:show, id: BAD_UUID)
+    check_404
+  end
+
+  test "requesting object without read permission returns 404 error" do
+    authorize_with :spectator
+    get(:show, id: specimens(:owned_by_active_user).uuid)
+    check_404
+  end
+
+  test "submitting bad object returns error" do
+    authorize_with :spectator
+    post(:create, specimen: {badattr: "badvalue"})
+    assert_response 422
+    check_error_token
+  end
+
+  test "X-Request-Id header" do
+    authorize_with :spectator
+    get(:index)
+    assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']
+  end
+
+  # The response header is the one that gets logged, so this test also
+  # ensures we log the ID supplied in the request, if any.
+  test "X-Request-Id given by client" do
+    authorize_with :spectator
+    @request.headers['X-Request-Id'] = 'abcdefG'
+    get(:index)
+    assert_equal 'abcdefG', response.headers['X-Request-Id']
+  end
+
+  test "X-Request-Id given by client is ignored if too long" do
+    authorize_with :spectator
+    @request.headers['X-Request-Id'] = 'abcdefG' * 1000
+    get(:index)
+    assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']
+  end
+
+  ['foo', '', 'FALSE', 'TRUE', nil, [true], {a:true}, '"true"'].each do |bogus|
+    test "bogus boolean parameter #{bogus.inspect} returns error" do
+      @controller = Arvados::V1::GroupsController.new
+      authorize_with :active
+      post :create, {
+        group: {},
+        ensure_unique_name: bogus
+      }
+      assert_response 422
+      assert_match(/parameter must be a boolean/, json_response['errors'].first,
+                   'Helpful error message not found')
+    end
+  end
+
+  [[true, [true, 'true', 1, '1']],
+   [false, [false, 'false', 0, '0']]].each do |bool, boolparams|
+    boolparams.each do |boolparam|
+      # Ensure boolparam is acceptable as a boolean
+      test "boolean parameter #{boolparam.inspect} acceptable" do
+        @controller = Arvados::V1::GroupsController.new
+        authorize_with :active
+        post :create, {
+          group: {},
+          ensure_unique_name: boolparam
+        }
+        assert_response :success
+      end
+
+      # Ensure boolparam is acceptable as the _intended_ boolean
+      test "boolean parameter #{boolparam.inspect} accepted as #{bool.inspect}" do
+        @controller = Arvados::V1::GroupsController.new
+        authorize_with :active
+        post :create, {
+          group: {
+            name: groups(:aproject).name,
+            owner_uuid: groups(:aproject).owner_uuid
+          },
+          ensure_unique_name: boolparam
+        }
+        assert_response (bool ? :success : 422)
+      end
+    end
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb b/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
new file mode 100644 (file)
index 0000000..616bd07
--- /dev/null
@@ -0,0 +1,188 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::TestCase
+  test "should get index" do
+    authorize_with :active_trustedclient
+    get :index
+    assert_response :success
+  end
+
+  test "should not get index with expired auth" do
+    authorize_with :expired
+    get :index, format: :json
+    assert_response 401
+  end
+
+  test "should not get index from untrusted client" do
+    authorize_with :active
+    get :index
+    assert_response 403
+  end
+
+  test "create system auth" do
+    authorize_with :admin_trustedclient
+    post :create_system_auth, scopes: '["test"]'
+    assert_response :success
+    assert_not_nil JSON.parse(@response.body)['uuid']
+  end
+
+  test "prohibit create system auth with token from non-trusted client" do
+    authorize_with :admin
+    post :create_system_auth, scopes: '["test"]'
+    assert_response 403
+  end
+
+  test "prohibit create system auth by non-admin" do
+    authorize_with :active
+    post :create_system_auth, scopes: '["test"]'
+    assert_response 403
+  end
+
+  def assert_found_tokens(auth, search_params, expected)
+    authorize_with auth
+    expected_tokens = expected.map do |name|
+      api_client_authorizations(name).api_token
+    end
+    get :index, search_params
+    assert_response :success
+    got_tokens = JSON.parse(@response.body)['items']
+      .map { |a| a['api_token'] }
+    assert_equal(expected_tokens.sort, got_tokens.sort,
+                 "wrong results for #{search_params.inspect}")
+  end
+
+  # Three-tuples with auth to use, scopes to find, and expected tokens.
+  # Make two tests for each tuple, one searching with where and the other
+  # with filter.
+  [[:admin_trustedclient, [], [:admin_noscope]],
+   [:active_trustedclient, ["GET /arvados/v1/users"], [:active_userlist]],
+   [:active_trustedclient,
+    ["POST /arvados/v1/api_client_authorizations",
+     "GET /arvados/v1/api_client_authorizations"],
+    [:active_apitokens]],
+  ].each do |auth, scopes, expected|
+    test "#{auth.to_s} can find auths where scopes=#{scopes.inspect}" do
+      assert_found_tokens(auth, {where: {scopes: scopes}}, expected)
+    end
+
+    test "#{auth.to_s} can find auths filtered with scopes=#{scopes.inspect}" do
+      assert_found_tokens(auth, {filters: [['scopes', '=', scopes]]}, expected)
+    end
+
+    test "#{auth.to_s} offset works with filter scopes=#{scopes.inspect}" do
+      assert_found_tokens(auth, {
+                            offset: expected.length,
+                            filters: [['scopes', '=', scopes]]
+                          }, [])
+    end
+  end
+
+  [# anyone can look up the token they're currently using
+   [:admin, :admin, 200, 200, 1],
+   [:active, :active, 200, 200, 1],
+   # cannot look up other tokens (even for same user) if not trustedclient
+   [:admin, :active, 403, 403],
+   [:admin, :admin_vm, 403, 403],
+   [:active, :admin, 403, 403],
+   # cannot look up other tokens for other users, regardless of trustedclient
+   [:admin_trustedclient, :active, 404, 200, 0],
+   [:active_trustedclient, :admin, 404, 200, 0],
+  ].each do |user, token, expect_get_response, expect_list_response, expect_list_items|
+    test "using '#{user}', get '#{token}' by uuid" do
+      authorize_with user
+      get :show, {
+        id: api_client_authorizations(token).uuid,
+      }
+      assert_response expect_get_response
+    end
+
+    test "using '#{user}', update '#{token}' by uuid" do
+      authorize_with user
+      put :update, {
+        id: api_client_authorizations(token).uuid,
+        api_client_authorization: {},
+      }
+      assert_response expect_get_response
+    end
+
+    test "using '#{user}', delete '#{token}' by uuid" do
+      authorize_with user
+      post :destroy, {
+        id: api_client_authorizations(token).uuid,
+      }
+      assert_response expect_get_response
+    end
+
+    test "using '#{user}', list '#{token}' by uuid" do
+      authorize_with user
+      get :index, {
+        filters: [['uuid','=',api_client_authorizations(token).uuid]],
+      }
+      assert_response expect_list_response
+      if expect_list_items
+        assert_equal assigns(:objects).length, expect_list_items
+        assert_equal json_response['items_available'], expect_list_items
+      end
+    end
+
+    if expect_list_items
+      test "using '#{user}', list '#{token}' by uuid with offset" do
+        authorize_with user
+        get :index, {
+          filters: [['uuid','=',api_client_authorizations(token).uuid]],
+          offset: expect_list_items,
+        }
+        assert_response expect_list_response
+        assert_equal json_response['items_available'], expect_list_items
+        assert_equal json_response['items'].length, 0
+      end
+    end
+
+    test "using '#{user}', list '#{token}' by token" do
+      authorize_with user
+      get :index, {
+        filters: [['api_token','=',api_client_authorizations(token).api_token]],
+      }
+      assert_response expect_list_response
+      if expect_list_items
+        assert_equal assigns(:objects).length, expect_list_items
+        assert_equal json_response['items_available'], expect_list_items
+      end
+    end
+  end
+
+  test "scoped token cannot change its own scopes" do
+    authorize_with :admin_vm
+    put :update, {
+      id: api_client_authorizations(:admin_vm).uuid,
+      api_client_authorization: {scopes: ['all']},
+    }
+    assert_response 403
+  end
+
+  test "token cannot change its own uuid" do
+    authorize_with :admin
+    put :update, {
+      id: api_client_authorizations(:admin).uuid,
+      api_client_authorization: {uuid: 'zzzzz-gj3su-zzzzzzzzzzzzzzz'},
+    }
+    assert_response 403
+  end
+
+  test "get current token" do
+    authorize_with :active
+    get :current
+    assert_response :success
+    assert_equal(json_response['api_token'],
+                 api_client_authorizations(:active).api_token)
+  end
+
+  test "get current token, no auth" do
+    get :current
+    assert_response 401
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb b/services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb
new file mode 100644 (file)
index 0000000..8945244
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::AuthorizedKeysControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb
new file mode 100644 (file)
index 0000000..997d89d
--- /dev/null
@@ -0,0 +1,1313 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
+  include DbCurrentTime
+
+  PERM_TOKEN_RE = /\+A[[:xdigit:]]+@[[:xdigit:]]{8}\b/
+
+  def permit_unsigned_manifests isok=true
+    # Set security model for the life of a test.
+    Rails.configuration.permit_create_collection_with_unsigned_manifest = isok
+  end
+
+  def assert_signed_manifest manifest_text, label='', token: false
+    assert_not_nil manifest_text, "#{label} manifest_text was nil"
+    manifest_text.scan(/ [[:xdigit:]]{32}\S*/) do |tok|
+      assert_match(PERM_TOKEN_RE, tok,
+                   "Locator in #{label} manifest_text was not signed")
+      if token
+        bare = tok.gsub(/\+A[^\+]*/, '').sub(/^ /, '')
+        exp = tok[/\+A[[:xdigit:]]+@([[:xdigit:]]+)/, 1].to_i(16)
+        sig = Blob.sign_locator(
+          bare,
+          key: Rails.configuration.blob_signing_key,
+          expire: exp,
+          api_token: token)[/\+A[^\+]*/, 0]
+        assert_includes tok, sig
+      end
+    end
+  end
+
+  def assert_unsigned_manifest resp, label=''
+    txt = resp['unsigned_manifest_text']
+    assert_not_nil(txt, "#{label} unsigned_manifest_text was nil")
+    locs = 0
+    txt.scan(/ [[:xdigit:]]{32}\S*/) do |tok|
+      locs += 1
+      refute_match(PERM_TOKEN_RE, tok,
+                   "Locator in #{label} unsigned_manifest_text was signed: #{tok}")
+    end
+    return locs
+  end
+
+  test "should get index" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert(assigns(:objects).andand.any?, "no Collections returned in index")
+    refute(json_response["items"].any? { |c| c.has_key?("manifest_text") },
+           "basic Collections index included manifest_text")
+    refute(json_response["items"].any? { |c| c["uuid"] == collections(:collection_owned_by_active_past_version_1).uuid },
+           "basic Collections index included past version")
+  end
+
+  test "get index with include_old_versions" do
+    authorize_with :active
+    get :index, {
+      include_old_versions: true
+    }
+    assert_response :success
+    assert(assigns(:objects).andand.any?, "no Collections returned in index")
+    assert(json_response["items"].any? { |c| c["uuid"] == collections(:collection_owned_by_active_past_version_1).uuid },
+           "past version not included on index")
+  end
+
+  test "collections.get returns signed locators, and no unsigned_manifest_text" do
+    permit_unsigned_manifests
+    authorize_with :active
+    get :show, {id: collections(:foo_file).uuid}
+    assert_response :success
+    assert_signed_manifest json_response['manifest_text'], 'foo_file'
+    refute_includes json_response, 'unsigned_manifest_text'
+  end
+
+  ['v1token', 'v2token'].each do |token_method|
+    test "correct signatures are given for #{token_method}" do
+      token = api_client_authorizations(:active).send(token_method)
+      authorize_with_token token
+      get :show, {id: collections(:foo_file).uuid}
+      assert_response :success
+      assert_signed_manifest json_response['manifest_text'], 'foo_file', token: token
+    end
+
+    test "signatures with #{token_method} are accepted" do
+      token = api_client_authorizations(:active).send(token_method)
+      signed = Blob.sign_locator(
+        'acbd18db4cc2f85cedef654fccc4a4d8+3',
+        key: Rails.configuration.blob_signing_key,
+        api_token: token)
+      authorize_with_token token
+      put :update, {
+            id: collections(:collection_owned_by_active).uuid,
+            collection: {
+              manifest_text: ". #{signed} 0:3:foo.txt\n",
+            },
+          }
+      assert_response :success
+      assert_signed_manifest json_response['manifest_text'], 'updated', token: token
+    end
+  end
+
+  test "index with manifest_text selected returns signed locators" do
+    columns = %w(uuid owner_uuid manifest_text)
+    authorize_with :active
+    get :index, select: columns
+    assert_response :success
+    assert(assigns(:objects).andand.any?,
+           "no Collections returned for index with columns selected")
+    json_response["items"].each do |coll|
+      assert_equal(coll.keys - ['kind'], columns,
+                   "Collections index did not respect selected columns")
+      assert_signed_manifest coll['manifest_text'], coll['uuid']
+    end
+  end
+
+  test "index with unsigned_manifest_text selected returns only unsigned locators" do
+    authorize_with :active
+    get :index, select: ['unsigned_manifest_text']
+    assert_response :success
+    assert_operator json_response["items"].count, :>, 0
+    locs = 0
+    json_response["items"].each do |coll|
+      assert_equal(coll.keys - ['kind'], ['unsigned_manifest_text'],
+                   "Collections index did not respect selected columns")
+      locs += assert_unsigned_manifest coll, coll['uuid']
+    end
+    assert_operator locs, :>, 0, "no locators found in any manifests"
+  end
+
+  test 'index without select returns everything except manifest' do
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert json_response['items'].any?
+    json_response['items'].each do |coll|
+      assert_includes(coll.keys, 'uuid')
+      assert_includes(coll.keys, 'name')
+      assert_includes(coll.keys, 'created_at')
+      refute_includes(coll.keys, 'manifest_text')
+    end
+  end
+
+  ['', nil, false, 'null'].each do |select|
+    test "index with select=#{select.inspect} returns everything except manifest" do
+      authorize_with :active
+      get :index, select: select
+      assert_response :success
+      assert json_response['items'].any?
+      json_response['items'].each do |coll|
+        assert_includes(coll.keys, 'uuid')
+        assert_includes(coll.keys, 'name')
+        assert_includes(coll.keys, 'created_at')
+        refute_includes(coll.keys, 'manifest_text')
+      end
+    end
+  end
+
+  [["uuid"],
+   ["uuid", "manifest_text"],
+   '["uuid"]',
+   '["uuid", "manifest_text"]'].each do |select|
+    test "index with select=#{select.inspect} returns no name" do
+      authorize_with :active
+      get :index, select: select
+      assert_response :success
+      assert json_response['items'].any?
+      json_response['items'].each do |coll|
+        refute_includes(coll.keys, 'name')
+      end
+    end
+  end
+
+  [0,1,2].each do |limit|
+    test "get index with limit=#{limit}" do
+      authorize_with :active
+      get :index, limit: limit
+      assert_response :success
+      assert_equal limit, assigns(:objects).count
+      resp = JSON.parse(@response.body)
+      assert_equal limit, resp['limit']
+    end
+  end
+
+  test "items.count == items_available" do
+    authorize_with :active
+    get :index, limit: 100000
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal resp['items_available'], assigns(:objects).length
+    assert_equal resp['items_available'], resp['items'].count
+    unique_uuids = resp['items'].collect { |i| i['uuid'] }.compact.uniq
+    assert_equal unique_uuids.count, resp['items'].count
+  end
+
+  test "items.count == items_available with filters" do
+    authorize_with :active
+    get :index, {
+      limit: 100,
+      filters: [['uuid','=',collections(:foo_file).uuid]]
+    }
+    assert_response :success
+    assert_equal 1, assigns(:objects).length
+    assert_equal 1, json_response['items_available']
+    assert_equal 1, json_response['items'].count
+  end
+
+  test "get index with limit=2 offset=99999" do
+    # Assume there are not that many test fixtures.
+    authorize_with :active
+    get :index, limit: 2, offset: 99999
+    assert_response :success
+    assert_equal 0, assigns(:objects).count
+    resp = JSON.parse(@response.body)
+    assert_equal 2, resp['limit']
+    assert_equal 99999, resp['offset']
+  end
+
+  def request_capped_index(params={})
+    authorize_with :user1_with_load
+    coll1 = collections(:collection_1_of_201)
+    Rails.configuration.max_index_database_read =
+      yield(coll1.manifest_text.size)
+    get :index, {
+      select: %w(uuid manifest_text),
+      filters: [["owner_uuid", "=", coll1.owner_uuid]],
+      limit: 300,
+    }.merge(params)
+  end
+
+  test "index with manifest_text limited by max_index_database_read returns non-empty" do
+    request_capped_index() { |_| 1 }
+    assert_response :success
+    assert_equal(1, json_response["items"].size)
+    assert_equal(1, json_response["limit"])
+    assert_equal(201, json_response["items_available"])
+  end
+
+  test "max_index_database_read size check follows same order as real query" do
+    authorize_with :user1_with_load
+    txt = '.' + ' d41d8cd98f00b204e9800998ecf8427e+0'*1000 + " 0:0:empty.txt\n"
+    c = Collection.create! manifest_text: txt, name: '0000000000000000000'
+    request_capped_index(select: %w(uuid manifest_text name),
+                         order: ['name asc'],
+                         filters: [['name','>=',c.name]]) do |_|
+      txt.length - 1
+    end
+    assert_response :success
+    assert_equal(1, json_response["items"].size)
+    assert_equal(1, json_response["limit"])
+    assert_equal(c.uuid, json_response["items"][0]["uuid"])
+    # The effectiveness of the test depends on >1 item matching the filters.
+    assert_operator(1, :<, json_response["items_available"])
+  end
+
+  test "index with manifest_text limited by max_index_database_read" do
+    request_capped_index() { |size| (size * 3) + 1 }
+    assert_response :success
+    assert_equal(3, json_response["items"].size)
+    assert_equal(3, json_response["limit"])
+    assert_equal(201, json_response["items_available"])
+  end
+
+  test "max_index_database_read does not interfere with limit" do
+    request_capped_index(limit: 5) { |size| size * 20 }
+    assert_response :success
+    assert_equal(5, json_response["items"].size)
+    assert_equal(5, json_response["limit"])
+    assert_equal(201, json_response["items_available"])
+  end
+
+  test "max_index_database_read does not interfere with order" do
+    request_capped_index(select: %w(uuid manifest_text name),
+                         order: "name DESC") { |size| (size * 11) + 1 }
+    assert_response :success
+    assert_equal(11, json_response["items"].size)
+    assert_empty(json_response["items"].reject do |coll|
+                   coll["name"] =~ /^Collection_9/
+                 end)
+    assert_equal(11, json_response["limit"])
+    assert_equal(201, json_response["items_available"])
+  end
+
+  test "admin can create collection with unsigned manifest" do
+    authorize_with :admin
+    test_collection = {
+      manifest_text: <<-EOS
+. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt
+. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt
+. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt
+./baz acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt
+EOS
+    }
+    test_collection[:portable_data_hash] =
+      Digest::MD5.hexdigest(test_collection[:manifest_text]) +
+      '+' +
+      test_collection[:manifest_text].length.to_s
+
+    # post :create will modify test_collection in place, so we save a copy first.
+    # Hash.deep_dup is not sufficient as it preserves references of strings (??!?)
+    post_collection = Marshal.load(Marshal.dump(test_collection))
+    post :create, {
+      collection: post_collection
+    }
+
+    assert_response :success
+    assert_nil assigns(:objects)
+
+    response_collection = assigns(:object)
+
+    stored_collection = Collection.select([:uuid, :portable_data_hash, :manifest_text]).
+      where(portable_data_hash: response_collection['portable_data_hash']).first
+
+    assert_equal test_collection[:portable_data_hash], stored_collection['portable_data_hash']
+
+    # The manifest in the response will have had permission hints added.
+    # Remove any permission hints in the response before comparing it to the source.
+    stripped_manifest = stored_collection['manifest_text'].gsub(/\+A[A-Za-z0-9@_-]+/, '')
+    assert_equal test_collection[:manifest_text], stripped_manifest
+
+    # TBD: create action should add permission signatures to manifest_text in the response,
+    # and we need to check those permission signatures here.
+  end
+
+  [:admin, :active].each do |user|
+    test "#{user} can get collection using portable data hash" do
+      authorize_with user
+
+      foo_collection = collections(:foo_file)
+
+      # Get foo_file using its portable data hash
+      get :show, {
+        id: foo_collection[:portable_data_hash]
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      resp = assigns(:object)
+      assert_equal foo_collection[:portable_data_hash], resp[:portable_data_hash]
+      assert_signed_manifest resp[:manifest_text]
+
+      # The manifest in the response will have had permission hints added.
+      # Remove any permission hints in the response before comparing it to the source.
+      stripped_manifest = resp[:manifest_text].gsub(/\+A[A-Za-z0-9@_-]+/, '')
+      assert_equal foo_collection[:manifest_text], stripped_manifest
+    end
+  end
+
+  test "create with owner_uuid set to owned group" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: 'zzzzz-j7d0g-rew6elm53kancon',
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal 'zzzzz-j7d0g-rew6elm53kancon', resp['owner_uuid']
+  end
+
+  test "create fails with duplicate name" do
+    permit_unsigned_manifests
+    authorize_with :admin
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: 'zzzzz-tpzed-000000000000000',
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
+        name: "foo_file"
+      }
+    }
+    assert_response 422
+    response_errors = json_response['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert(response_errors.first.include?('duplicate key'),
+           "Expected 'duplicate key' error in #{response_errors.first}")
+  end
+
+  [false, true].each do |unsigned|
+    test "create with duplicate name, ensure_unique_name, unsigned=#{unsigned}" do
+      permit_unsigned_manifests unsigned
+      authorize_with :active
+      manifest_text = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:foo.txt\n"
+      if !unsigned
+        manifest_text = Collection.sign_manifest manifest_text, api_token(:active)
+      end
+      post :create, {
+        collection: {
+          owner_uuid: users(:active).uuid,
+          manifest_text: manifest_text,
+          name: "owned_by_active"
+        },
+        ensure_unique_name: true
+      }
+      assert_response :success
+      assert_match /^owned_by_active \(\d{4}-\d\d-\d\d.*?Z\)$/, json_response['name']
+    end
+  end
+
+  test "create with owner_uuid set to group i can_manage" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: groups(:active_user_has_can_manage).uuid,
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal groups(:active_user_has_can_manage).uuid, resp['owner_uuid']
+  end
+
+  test "create with owner_uuid fails on group with only can_read permission" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: groups(:all_users).uuid,
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response 403
+  end
+
+  test "create with owner_uuid fails on group with no permission" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: groups(:public).uuid,
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response 422
+  end
+
+  test "admin create with owner_uuid set to group with no permission" do
+    permit_unsigned_manifests
+    authorize_with :admin
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: 'zzzzz-j7d0g-it30l961gq3t0oi',
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response :success
+  end
+
+  test "should create with collection passed as json" do
+    permit_unsigned_manifests
+    authorize_with :active
+    post :create, {
+      collection: <<-EOS
+      {
+        "manifest_text":". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",\
+        "portable_data_hash":"d30fe8ae534397864cb96c544f4cf102+47"\
+      }
+      EOS
+    }
+    assert_response :success
+  end
+
+  test "should fail to create with checksum mismatch" do
+    permit_unsigned_manifests
+    authorize_with :active
+    post :create, {
+      collection: <<-EOS
+      {
+        "manifest_text":". d41d8cd98f00b204e9800998ecf8427e 0:0:bar.txt\n",\
+        "portable_data_hash":"d30fe8ae534397864cb96c544f4cf102+47"\
+      }
+      EOS
+    }
+    assert_response 422
+  end
+
+  test "collection UUID is normalized when created" do
+    permit_unsigned_manifests
+    authorize_with :active
+    post :create, {
+      collection: {
+        manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47+Khint+Xhint+Zhint"
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal "d30fe8ae534397864cb96c544f4cf102+47", resp['portable_data_hash']
+  end
+
+  test "get full provenance for baz file" do
+    authorize_with :active
+    get :provenance, id: 'ea10d51bcf88862dbcc36eb292017dfd+45'
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
+    assert_not_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # bar
+    assert_not_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
+    assert_not_nil resp['zzzzz-8i9sb-cjs4pklxxjykyuq'] # bar->baz
+    assert_not_nil resp['zzzzz-8i9sb-aceg2bnq7jt7kon'] # foo->bar
+  end
+
+  test "get no provenance for foo file" do
+    # spectator user cannot even see baz collection
+    authorize_with :spectator
+    get :provenance, id: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+    assert_response 404
+  end
+
+  test "get partial provenance for baz file" do
+    # spectator user can see bar->baz job, but not foo->bar job
+    authorize_with :spectator
+    get :provenance, id: 'ea10d51bcf88862dbcc36eb292017dfd+45'
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
+    assert_not_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # bar
+    assert_not_nil resp['zzzzz-8i9sb-cjs4pklxxjykyuq']     # bar->baz
+    assert_nil resp['zzzzz-8i9sb-aceg2bnq7jt7kon']         # foo->bar
+    assert_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
+  end
+
+  test "search collections with 'any' operator" do
+    expect_pdh = collections(:docker_image).portable_data_hash
+    authorize_with :active
+    get :index, {
+      where: { any: ['contains', expect_pdh[5..25]] }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_equal 1, found.count
+    assert_equal expect_pdh, found.first.portable_data_hash
+  end
+
+  [false, true].each do |permit_unsigned|
+    test "create collection with signed manifest, permit_unsigned=#{permit_unsigned}" do
+      permit_unsigned_manifests permit_unsigned
+      authorize_with :active
+      locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+      unsigned_manifest = locators.map { |loc|
+        ". " + loc + " 0:0:foo.txt\n"
+      }.join()
+      manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+        '+' +
+        unsigned_manifest.length.to_s
+
+      # Build a manifest with both signed and unsigned locators.
+      signing_opts = {
+        key: Rails.configuration.blob_signing_key,
+        api_token: api_token(:active),
+      }
+      signed_locators = locators.collect do |x|
+        Blob.sign_locator x, signing_opts
+      end
+      if permit_unsigned
+        # Leave a non-empty blob unsigned.
+        signed_locators[1] = locators[1]
+      else
+        # Leave the empty blob unsigned. This should still be allowed.
+        signed_locators[0] = locators[0]
+      end
+      signed_manifest =
+        ". " + signed_locators[0] + " 0:0:foo.txt\n" +
+        ". " + signed_locators[1] + " 0:0:foo.txt\n" +
+        ". " + signed_locators[2] + " 0:0:foo.txt\n"
+
+      post :create, {
+        collection: {
+          manifest_text: signed_manifest,
+          portable_data_hash: manifest_uuid,
+        }
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      resp = JSON.parse(@response.body)
+      assert_equal manifest_uuid, resp['portable_data_hash']
+      # All of the locators in the output must be signed.
+      resp['manifest_text'].lines.each do |entry|
+        m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+        if m
+          assert Blob.verify_signature m[0], signing_opts
+        end
+      end
+    end
+  end
+
+  test "create collection with signed manifest and explicit TTL" do
+    authorize_with :active
+    locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+    unsigned_manifest = locators.map { |loc|
+      ". " + loc + " 0:0:foo.txt\n"
+    }.join()
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+      '+' +
+      unsigned_manifest.length.to_s
+
+    # build a manifest with both signed and unsigned locators.
+    # TODO(twp): in phase 4, all locators will need to be signed, so
+    # this test should break and will need to be rewritten. Issue #2755.
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+      ttl: 3600   # 1 hour
+    }
+    signed_manifest =
+      ". " + locators[0] + " 0:0:foo.txt\n" +
+      ". " + Blob.sign_locator(locators[1], signing_opts) + " 0:0:foo.txt\n" +
+      ". " + Blob.sign_locator(locators[2], signing_opts) + " 0:0:foo.txt\n"
+
+    post :create, {
+      collection: {
+        manifest_text: signed_manifest,
+        portable_data_hash: manifest_uuid,
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal manifest_uuid, resp['portable_data_hash']
+    # All of the locators in the output must be signed.
+    resp['manifest_text'].lines.each do |entry|
+      m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+      if m
+        assert Blob.verify_signature m[0], signing_opts
+      end
+    end
+  end
+
+  test "create fails with invalid signature" do
+    authorize_with :active
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+
+    # Generate a locator with a bad signature.
+    unsigned_locator = "acbd18db4cc2f85cedef654fccc4a4d8+3"
+    bad_locator = unsigned_locator + "+Affffffffffffffffffffffffffffffffffffffff@ffffffff"
+    assert !Blob.verify_signature(bad_locator, signing_opts)
+
+    # Creating a collection with this locator should
+    # produce 403 Permission denied.
+    unsigned_manifest = ". #{unsigned_locator} 0:0:foo.txt\n"
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+      '+' +
+      unsigned_manifest.length.to_s
+
+    bad_manifest = ". #{bad_locator} 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        manifest_text: bad_manifest,
+        portable_data_hash: manifest_uuid
+      }
+    }
+
+    assert_response 403
+  end
+
+  test "create fails with uuid of signed manifest" do
+    authorize_with :active
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+
+    unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+    signed_locator = Blob.sign_locator(unsigned_locator, signing_opts)
+    signed_manifest = ". #{signed_locator} 0:0:foo.txt\n"
+    manifest_uuid = Digest::MD5.hexdigest(signed_manifest) +
+      '+' +
+      signed_manifest.length.to_s
+
+    post :create, {
+      collection: {
+        manifest_text: signed_manifest,
+        portable_data_hash: manifest_uuid
+      }
+    }
+
+    assert_response 422
+  end
+
+  test "reject manifest with unsigned block as stream name" do
+    authorize_with :active
+    post :create, {
+      collection: {
+        manifest_text: "00000000000000000000000000000000+1234 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n"
+      }
+    }
+    assert_includes [422, 403], response.code.to_i
+  end
+
+  test "multiple locators per line" do
+    permit_unsigned_manifests
+    authorize_with :active
+    locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+    manifest_text = [".", *locators, "0:0:foo.txt\n"].join(" ")
+    manifest_uuid = Digest::MD5.hexdigest(manifest_text) +
+      '+' +
+      manifest_text.length.to_s
+
+    test_collection = {
+      manifest_text: manifest_text,
+      portable_data_hash: manifest_uuid,
+    }
+    post_collection = Marshal.load(Marshal.dump(test_collection))
+    post :create, {
+      collection: post_collection
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal manifest_uuid, resp['portable_data_hash']
+
+    # The manifest in the response will have had permission hints added.
+    # Remove any permission hints in the response before comparing it to the source.
+    stripped_manifest = resp['manifest_text'].gsub(/\+A[A-Za-z0-9@_-]+/, '')
+    assert_equal manifest_text, stripped_manifest
+  end
+
+  test "multiple signed locators per line" do
+    permit_unsigned_manifests
+    authorize_with :active
+    locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+
+    unsigned_manifest = [".", *locators, "0:0:foo.txt\n"].join(" ")
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+      '+' +
+      unsigned_manifest.length.to_s
+
+    signed_locators = locators.map { |loc| Blob.sign_locator loc, signing_opts }
+    signed_manifest = [".", *signed_locators, "0:0:foo.txt\n"].join(" ")
+
+    post :create, {
+      collection: {
+        manifest_text: signed_manifest,
+        portable_data_hash: manifest_uuid,
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal manifest_uuid, resp['portable_data_hash']
+    # All of the locators in the output must be signed.
+    # Each line is of the form "path locator locator ... 0:0:file.txt"
+    # entry.split[1..-2] will yield just the tokens in the middle of the line
+    returned_locator_count = 0
+    resp['manifest_text'].lines.each do |entry|
+      entry.split[1..-2].each do |tok|
+        returned_locator_count += 1
+        assert Blob.verify_signature tok, signing_opts
+      end
+    end
+    assert_equal locators.count, returned_locator_count
+  end
+
+  test 'Reject manifest with unsigned blob' do
+    permit_unsigned_manifests false
+    authorize_with :active
+    unsigned_manifest = ". 0cc175b9c0f1b6a831c399e269772661+1 0:1:a.txt\n"
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest)
+    post :create, {
+      collection: {
+        manifest_text: unsigned_manifest,
+        portable_data_hash: manifest_uuid,
+      }
+    }
+    assert_response 403,
+    "Creating a collection with unsigned blobs should respond 403"
+    assert_empty Collection.where('uuid like ?', manifest_uuid+'%'),
+    "Collection should not exist in database after failed create"
+  end
+
+  test 'List expired collection returns empty list' do
+    authorize_with :active
+    get :index, {
+      where: {name: 'expired_collection'},
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_equal 0, found.count
+  end
+
+  test 'Show expired collection returns 404' do
+    authorize_with :active
+    get :show, {
+      id: 'zzzzz-4zz18-mto52zx1s7sn3ih',
+    }
+    assert_response 404
+  end
+
+  test 'Update expired collection returns 404' do
+    authorize_with :active
+    post :update, {
+      id: 'zzzzz-4zz18-mto52zx1s7sn3ih',
+      collection: {
+        name: "still expired"
+      }
+    }
+    assert_response 404
+  end
+
+  test 'List collection with future expiration time succeeds' do
+    authorize_with :active
+    get :index, {
+      where: {name: 'collection_expires_in_future'},
+    }
+    found = assigns(:objects)
+    assert_equal 1, found.count
+  end
+
+
+  test 'Show collection with future expiration time succeeds' do
+    authorize_with :active
+    get :show, {
+      id: 'zzzzz-4zz18-padkqo7yb8d9i3j',
+    }
+    assert_response :success
+  end
+
+  test 'Update collection with future expiration time succeeds' do
+    authorize_with :active
+    post :update, {
+      id: 'zzzzz-4zz18-padkqo7yb8d9i3j',
+      collection: {
+        name: "still not expired"
+      }
+    }
+    assert_response :success
+  end
+
+  test "get collection and verify that file_names is not included" do
+    authorize_with :active
+    get :show, {id: collections(:foo_file).uuid}
+    assert_response :success
+    assert_equal collections(:foo_file).uuid, json_response['uuid']
+    assert_nil json_response['file_names']
+    assert json_response['manifest_text']
+  end
+
+  [
+    [2**8, :success],
+    [2**18, 422],
+  ].each do |description_size, expected_response|
+    # Descriptions are not part of search indexes. Skip until
+    # full-text search is implemented, at which point replace with a
+    # search in description.
+    skip "create collection with description size #{description_size}
+          and expect response #{expected_response}" do
+      authorize_with :active
+
+      description = 'here is a collection with a very large description'
+      while description.length < description_size
+        description = description + description
+      end
+
+      post :create, collection: {
+        manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n",
+        description: description,
+      }
+
+      assert_response expected_response
+    end
+  end
+
+  [1, 5, nil].each do |ask|
+    test "Set replication_desired=#{ask.inspect}" do
+      Rails.configuration.default_collection_replication = 2
+      authorize_with :active
+      put :update, {
+        id: collections(:replication_undesired_unconfirmed).uuid,
+        collection: {
+          replication_desired: ask,
+        },
+      }
+      assert_response :success
+      assert_equal ask, json_response['replication_desired']
+    end
+  end
+
+  test "get collection with properties" do
+    authorize_with :active
+    get :show, {id: collections(:collection_with_one_property).uuid}
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_equal 'value1', json_response['properties']['property1']
+  end
+
+  test "create collection with properties" do
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
+        properties: {'property_1' => 'value_1'}
+      }
+    }
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_equal 'value_1', json_response['properties']['property_1']
+  end
+
+  [
+    ". 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e foo.txt",
+    "d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+  ].each do |manifest_text|
+    test "create collection with invalid manifest #{manifest_text} and expect error" do
+      authorize_with :active
+      post :create, {
+        collection: {
+          manifest_text: manifest_text,
+          portable_data_hash: "d41d8cd98f00b204e9800998ecf8427e+0"
+        }
+      }
+      assert_response 422
+      response_errors = json_response['errors']
+      assert_not_nil response_errors, 'Expected error in response'
+      assert(response_errors.first.include?('Invalid manifest'),
+             "Expected 'Invalid manifest' error in #{response_errors.first}")
+    end
+  end
+
+  [
+    [nil, "d41d8cd98f00b204e9800998ecf8427e+0"],
+    ["", "d41d8cd98f00b204e9800998ecf8427e+0"],
+    [". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n", "d30fe8ae534397864cb96c544f4cf102+47"],
+  ].each do |manifest_text, pdh|
+    test "create collection with valid manifest #{manifest_text.inspect} and expect success" do
+      authorize_with :active
+      post :create, {
+        collection: {
+          manifest_text: manifest_text,
+          portable_data_hash: pdh
+        }
+      }
+      assert_response 200
+    end
+  end
+
+  [
+    ". 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e foo.txt",
+    "d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+  ].each do |manifest_text|
+    test "update collection with invalid manifest #{manifest_text} and expect error" do
+      authorize_with :active
+      post :update, {
+        id: 'zzzzz-4zz18-bv31uwvy3neko21',
+        collection: {
+          manifest_text: manifest_text,
+        }
+      }
+      assert_response 422
+      response_errors = json_response['errors']
+      assert_not_nil response_errors, 'Expected error in response'
+      assert(response_errors.first.include?('Invalid manifest'),
+             "Expected 'Invalid manifest' error in #{response_errors.first}")
+    end
+  end
+
+  [
+    nil,
+    "",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",
+  ].each do |manifest_text|
+    test "update collection with valid manifest #{manifest_text.inspect} and expect success" do
+      authorize_with :active
+      post :update, {
+        id: 'zzzzz-4zz18-bv31uwvy3neko21',
+        collection: {
+          manifest_text: manifest_text,
+        }
+      }
+      assert_response 200
+    end
+  end
+
+  test 'get trashed collection with include_trash' do
+    uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection
+    authorize_with :active
+    get :show, {
+      id: uuid,
+      include_trash: true,
+    }
+    assert_response 200
+  end
+
+  [:admin, :active].each do |user|
+    test "get trashed collection via filters and #{user} user" do
+      uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection
+      authorize_with user
+      get :index, {
+        filters: [["current_version_uuid", "=", uuid]],
+        include_trash: true,
+      }
+      assert_response 200
+      # Only the current version is returned
+      assert_equal 1, json_response["items"].size
+    end
+  end
+
+  [:admin, :active].each do |user|
+    test "get trashed collection via filters and #{user} user, including its past versions" do
+      uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection
+      authorize_with :admin
+      get :index, {
+        filters: [["current_version_uuid", "=", uuid]],
+        include_trash: true,
+        include_old_versions: true,
+      }
+      assert_response 200
+      # Both current & past version are returned
+      assert_equal 2, json_response["items"].size
+    end
+  end
+
+  test "trash collection also trash its past versions" do
+    uuid = collections(:collection_owned_by_active).uuid
+    authorize_with :active
+    versions = Collection.where(current_version_uuid: uuid)
+    assert_equal 2, versions.size
+    versions.each do |col|
+      refute col.is_trashed
+    end
+    post :trash, {
+      id: uuid,
+    }
+    assert_response 200
+    versions = Collection.where(current_version_uuid: uuid)
+    assert_equal 2, versions.size
+    versions.each do |col|
+      assert col.is_trashed
+    end
+  end
+
+  test 'get trashed collection without include_trash' do
+    uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection
+    authorize_with :active
+    get :show, {
+      id: uuid,
+    }
+    assert_response 404
+  end
+
+  test 'trash collection using http DELETE verb' do
+    uuid = collections(:collection_owned_by_active).uuid
+    authorize_with :active
+    delete :destroy, {
+      id: uuid,
+    }
+    assert_response 200
+    c = Collection.find_by_uuid(uuid)
+    assert_operator c.trash_at, :<, db_current_time
+    assert_equal c.delete_at, c.trash_at + Rails.configuration.blob_signature_ttl
+  end
+
+  test 'delete long-trashed collection immediately using http DELETE verb' do
+    uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection
+    authorize_with :active
+    delete :destroy, {
+      id: uuid,
+    }
+    assert_response 200
+    c = Collection.find_by_uuid(uuid)
+    assert_operator c.trash_at, :<, db_current_time
+    assert_operator c.delete_at, :<, db_current_time
+  end
+
+  ['zzzzz-4zz18-mto52zx1s7sn3ih', # expired_collection
+   :empty_collection_name_in_active_user_home_project,
+  ].each do |fixture|
+    test "trash collection #{fixture} via trash action with grace period" do
+      if fixture.is_a? String
+        uuid = fixture
+      else
+        uuid = collections(fixture).uuid
+      end
+      authorize_with :active
+      time_before_trashing = db_current_time
+      post :trash, {
+        id: uuid,
+      }
+      assert_response 200
+      c = Collection.find_by_uuid(uuid)
+      assert_operator c.trash_at, :<, db_current_time
+      assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.default_trash_lifetime
+    end
+  end
+
+  test 'untrash a trashed collection' do
+    authorize_with :active
+    post :untrash, {
+      id: collections(:expired_collection).uuid,
+    }
+    assert_response 200
+    assert_equal false, json_response['is_trashed']
+    assert_nil json_response['trash_at']
+  end
+
+  test 'untrash error on not trashed collection' do
+    authorize_with :active
+    post :untrash, {
+      id: collections(:collection_owned_by_active).uuid,
+    }
+    assert_response 422
+  end
+
+  [:active, :admin].each do |user|
+    test "get trashed collections as #{user}" do
+      authorize_with user
+      get :index, {
+        filters: [["is_trashed", "=", true]],
+        include_trash: true,
+      }
+      assert_response :success
+
+      items = []
+      json_response["items"].each do |coll|
+        items << coll['uuid']
+      end
+
+      assert_includes(items, collections('unique_expired_collection')['uuid'])
+      if user == :admin
+        assert_includes(items, collections('unique_expired_collection2')['uuid'])
+      else
+        assert_not_includes(items, collections('unique_expired_collection2')['uuid'])
+      end
+    end
+  end
+
+  test 'untrash collection with same name as another with no ensure unique name' do
+    authorize_with :active
+    post :untrash, {
+      id: collections(:trashed_collection_to_test_name_conflict_on_untrash).uuid,
+    }
+    assert_response 422
+  end
+
+  test 'untrash collection with same name as another with ensure unique name' do
+    authorize_with :active
+    post :untrash, {
+      id: collections(:trashed_collection_to_test_name_conflict_on_untrash).uuid,
+      ensure_unique_name: true
+    }
+    assert_response 200
+    assert_equal false, json_response['is_trashed']
+    assert_nil json_response['trash_at']
+    assert_nil json_response['delete_at']
+    assert_match /^same name for trashed and persisted collections \(\d{4}-\d\d-\d\d.*?Z\)$/, json_response['name']
+  end
+
+  test 'cannot show collection in trashed subproject' do
+    authorize_with :active
+    get :show, {
+      id: collections(:collection_in_trashed_subproject).uuid,
+      format: :json
+    }
+    assert_response 404
+  end
+
+  test 'can show collection in untrashed subproject' do
+    authorize_with :active
+    Group.find_by_uuid(groups(:trashed_project).uuid).update! is_trashed: false
+    get :show, {
+      id: collections(:collection_in_trashed_subproject).uuid,
+      format: :json,
+    }
+    assert_response :success
+  end
+
+  test 'cannot index collection in trashed subproject' do
+    authorize_with :active
+    get :index, { limit: 1000 }
+    assert_response :success
+    item_uuids = json_response['items'].map do |item|
+      item['uuid']
+    end
+    assert_not_includes(item_uuids, collections(:collection_in_trashed_subproject).uuid)
+  end
+
+  test 'can index collection in untrashed subproject' do
+    authorize_with :active
+    Group.find_by_uuid(groups(:trashed_project).uuid).update! is_trashed: false
+    get :index, { limit: 1000 }
+    assert_response :success
+    item_uuids = json_response['items'].map do |item|
+      item['uuid']
+    end
+    assert_includes(item_uuids, collections(:collection_in_trashed_subproject).uuid)
+  end
+
+  test 'can index trashed subproject collection with include_trash' do
+    authorize_with :active
+    get :index, {
+          include_trash: true,
+          limit: 1000
+        }
+    assert_response :success
+    item_uuids = json_response['items'].map do |item|
+      item['uuid']
+    end
+    assert_includes(item_uuids, collections(:collection_in_trashed_subproject).uuid)
+  end
+
+  test 'can get collection with past versions' do
+    authorize_with :active
+    get :index, {
+      filters: [['current_version_uuid','=',collections(:collection_owned_by_active).uuid]],
+      include_old_versions: true
+    }
+    assert_response :success
+    assert_equal 2, assigns(:objects).length
+    assert_equal 2, json_response['items_available']
+    assert_equal 2, json_response['items'].count
+    json_response['items'].each do |c|
+      assert_equal collections(:collection_owned_by_active).uuid,
+                   c['current_version_uuid'],
+                   'response includes a version from a different collection'
+    end
+  end
+
+  test 'can get old version collection by uuid' do
+    authorize_with :active
+    get :show, {
+      id: collections(:collection_owned_by_active_past_version_1).uuid,
+    }
+    assert_response :success
+    assert_equal collections(:collection_owned_by_active_past_version_1).name,
+                  json_response['name']
+  end
+
+  test 'version and current_version_uuid are ignored at creation time' do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        name: 'Test collection',
+        version: 42,
+        current_version_uuid: collections(:collection_owned_by_active).uuid,
+        manifest_text: manifest_text,
+      }
+    }
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal 1, resp['version']
+    assert_equal resp['uuid'], resp['current_version_uuid']
+  end
+
+  test "update collection with versioning enabled" do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 1 # 1 second
+
+    col = collections(:collection_owned_by_active)
+    assert_equal 2, col.version
+    assert col.modified_at < Time.now - 1.second
+
+    token = api_client_authorizations(:active).v2token
+    signed = Blob.sign_locator(
+      'acbd18db4cc2f85cedef654fccc4a4d8+3',
+      key: Rails.configuration.blob_signing_key,
+      api_token: token)
+    authorize_with_token token
+    put :update, {
+          id: col.uuid,
+          collection: {
+            manifest_text: ". #{signed} 0:3:foo.txt\n",
+          },
+        }
+    assert_response :success
+    assert_equal 3, json_response['version']
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/commits_controller_test.rb b/services/api/test/functional/arvados/v1/commits_controller_test.rb
new file mode 100644 (file)
index 0000000..bf285b0
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::CommitsControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/container_requests_controller_test.rb b/services/api/test/functional/arvados/v1/container_requests_controller_test.rb
new file mode 100644 (file)
index 0000000..a3252ad
--- /dev/null
@@ -0,0 +1,101 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
+  def minimal_cr
+    {
+      command: ['echo', 'hello'],
+      container_image: 'test',
+      output_path: 'test',
+    }
+  end
+
+  test 'create with scheduling parameters' do
+    authorize_with :active
+
+    sp = {'partitions' => ['test1', 'test2']}
+    post :create, {
+           container_request: minimal_cr.merge(scheduling_parameters: sp.dup)
+         }
+    assert_response :success
+
+    cr = JSON.parse(@response.body)
+    assert_not_nil cr, 'Expected container request'
+    assert_equal sp, cr['scheduling_parameters']
+  end
+
+  test "secret_mounts not in #create responses" do
+    authorize_with :active
+
+    post :create, {
+           container_request: minimal_cr.merge(
+             secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}}),
+         }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('secret_mounts')
+
+    req = ContainerRequest.where(uuid: resp['uuid']).first
+    assert_equal 'bar', req.secret_mounts['/foo']['content']
+  end
+
+  test "update with secret_mounts" do
+    authorize_with :active
+    req = container_requests(:uncommitted)
+
+    patch :update, {
+            id: req.uuid,
+            container_request: {
+              secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}},
+            },
+          }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('secret_mounts')
+
+    req.reload
+    assert_equal 'bar', req.secret_mounts['/foo']['content']
+  end
+
+  test "update without deleting secret_mounts" do
+    authorize_with :active
+    req = container_requests(:uncommitted)
+    req.update_attributes!(secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}})
+
+    patch :update, {
+            id: req.uuid,
+            container_request: {
+              command: ['echo', 'test'],
+            },
+          }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('secret_mounts')
+
+    req.reload
+    assert_equal 'bar', req.secret_mounts['/foo']['content']
+  end
+
+  test "runtime_token not in #create responses" do
+    authorize_with :active
+
+    post :create, {
+           container_request: minimal_cr.merge(
+             runtime_token: api_client_authorizations(:spectator).token)
+         }
+    assert_response :success
+
+    resp = JSON.parse(@response.body)
+    refute resp.has_key?('runtime_token')
+
+    req = ContainerRequest.where(uuid: resp['uuid']).first
+    assert_equal api_client_authorizations(:spectator).token, req.runtime_token
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/containers_controller_test.rb b/services/api/test/functional/arvados/v1/containers_controller_test.rb
new file mode 100644 (file)
index 0000000..452533b
--- /dev/null
@@ -0,0 +1,164 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::ContainersControllerTest < ActionController::TestCase
+  test 'create' do
+    authorize_with :system_user
+    post :create, {
+      container: {
+        command: ['echo', 'hello'],
+        container_image: 'test',
+        output_path: 'test',
+      },
+    }
+    assert_response :success
+  end
+
+  [Container::Queued, Container::Complete].each do |state|
+    test "cannot get auth in #{state} state" do
+      authorize_with :dispatch1
+      get :auth, id: containers(:queued).uuid
+      assert_response 403
+    end
+  end
+
+  test 'cannot get auth with wrong token' do
+    authorize_with :dispatch1
+    c = containers(:queued)
+    assert c.lock, show_errors(c)
+
+    authorize_with :system_user
+    get :auth, id: c.uuid
+    assert_response 403
+  end
+
+  test 'get auth' do
+    authorize_with :dispatch1
+    c = containers(:queued)
+    assert c.lock, show_errors(c)
+    get :auth, id: c.uuid
+    assert_response :success
+    assert_operator 32, :<, json_response['api_token'].length
+    assert_equal 'arvados#apiClientAuthorization', json_response['kind']
+  end
+
+  test 'no auth or secret_mounts in container response' do
+    authorize_with :dispatch1
+    c = containers(:queued)
+    assert c.lock, show_errors(c)
+    get :show, id: c.uuid
+    assert_response :success
+    assert_nil json_response['auth']
+    assert_nil json_response['secret_mounts']
+  end
+
+  test "lock container" do
+    authorize_with :dispatch1
+    uuid = containers(:queued).uuid
+    post :lock, {id: uuid}
+    assert_response :success
+    assert_nil json_response['mounts']
+    assert_nil json_response['command']
+    assert_not_nil json_response['auth_uuid']
+    assert_not_nil json_response['locked_by_uuid']
+    assert_equal containers(:queued).uuid, json_response['uuid']
+    assert_equal 'Locked', json_response['state']
+    assert_equal containers(:queued).priority, json_response['priority']
+
+    container = Container.where(uuid: uuid).first
+    assert_equal 'Locked', container.state
+    assert_not_nil container.locked_by_uuid
+    assert_not_nil container.auth_uuid
+  end
+
+  test "unlock container" do
+    authorize_with :dispatch1
+    uuid = containers(:locked).uuid
+    post :unlock, {id: uuid}
+    assert_response :success
+    assert_nil json_response['mounts']
+    assert_nil json_response['command']
+    assert_nil json_response['auth_uuid']
+    assert_nil json_response['locked_by_uuid']
+    assert_equal containers(:locked).uuid, json_response['uuid']
+    assert_equal 'Queued', json_response['state']
+    assert_equal containers(:locked).priority, json_response['priority']
+
+    container = Container.where(uuid: uuid).first
+    assert_equal 'Queued', container.state
+    assert_nil container.locked_by_uuid
+    assert_nil container.auth_uuid
+  end
+
+  test "unlock container locked by different dispatcher" do
+    authorize_with :dispatch2
+    uuid = containers(:locked).uuid
+    post :unlock, {id: uuid}
+    assert_response 422
+  end
+
+  [
+    [:queued, :lock, :success, 'Locked'],
+    [:queued, :unlock, 422, 'Queued'],
+    [:locked, :lock, 422, 'Locked'],
+    [:running, :lock, 422, 'Running'],
+    [:running, :unlock, 422, 'Running'],
+  ].each do |fixture, action, response, state|
+    test "state transitions from #{fixture} to #{action}" do
+      authorize_with :dispatch1
+      uuid = containers(fixture).uuid
+      post action, {id: uuid}
+      assert_response response
+      assert_equal state, Container.where(uuid: uuid).first.state
+    end
+  end
+
+  test 'get current container for token' do
+    authorize_with :running_container_auth
+    get :current
+    assert_response :success
+    assert_equal containers(:running).uuid, json_response['uuid']
+  end
+
+  test 'no container associated with token' do
+    authorize_with :dispatch1
+    get :current
+    assert_response 404
+  end
+
+  test 'try get current container, no token' do
+    get :current
+    assert_response 401
+  end
+
+  [
+    [true, :running_container_auth],
+    [false, :dispatch2],
+    [false, :admin],
+    [false, :active],
+  ].each do |expect_success, auth|
+    test "get secret_mounts with #{auth} token" do
+      authorize_with auth
+      get :secret_mounts, {id: containers(:running).uuid}
+      if expect_success
+        assert_response :success
+        assert_equal "42\n", json_response["secret_mounts"]["/secret/6x9"]["content"]
+      else
+        assert_response 403
+      end
+    end
+  end
+
+  test 'get runtime_token auth' do
+    authorize_with :dispatch2
+    c = containers(:runtime_token)
+    get :auth, id: c.uuid
+    assert_response :success
+    assert_equal "v2/#{json_response['uuid']}/#{json_response['api_token']}", api_client_authorizations(:container_runtime_token).token
+    assert_equal 'arvados#apiClientAuthorization', json_response['kind']
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/filters_test.rb b/services/api/test/functional/arvados/v1/filters_test.rb
new file mode 100644 (file)
index 0000000..c76b94e
--- /dev/null
@@ -0,0 +1,316 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::FiltersTest < ActionController::TestCase
+  test '"not in" filter passes null values' do
+    @controller = Arvados::V1::GroupsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['group_class', 'not in', ['project']] ],
+      controller: 'groups',
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_includes(found.collect(&:group_class), nil,
+                    "'group_class not in ['project']' filter should pass null")
+  end
+
+  test 'error message for non-array element in filters array' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [{bogus: 'filter'}],
+    }
+    assert_response 422
+    assert_match(/Invalid element in filters array/,
+                 json_response['errors'].join(' '))
+  end
+
+  test 'error message for full text search on a specific column' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '@@', 'abcdef']],
+    }
+    assert_response 422
+    assert_match(/not supported/, json_response['errors'].join(' '))
+  end
+
+  test 'difficult characters in full text search' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [['any', '@@', 'a|b"c']],
+    }
+    assert_response :success
+    # (Doesn't matter so much which results are returned.)
+  end
+
+  test 'array operand in full text search' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [['any', '@@', ['abc', 'def']]],
+    }
+    assert_response 422
+    assert_match(/not supported/, json_response['errors'].join(' '))
+  end
+
+  test 'api responses provide timestamps with nanoseconds' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert_not_empty json_response['items']
+    json_response['items'].each do |item|
+      %w(created_at modified_at).each do |attr|
+        # Pass fixtures with null timestamps.
+        next if item[attr].nil?
+        assert_match(/^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d.\d{9}Z$/, item[attr])
+      end
+    end
+  end
+
+  %w(< > <= >= =).each do |operator|
+    test "timestamp #{operator} filters work with nanosecond precision" do
+      # Python clients like Node Manager rely on this exact format.
+      # If you must change this format for some reason, make sure you
+      # coordinate the change with them.
+      expect_match = !!operator.index('=')
+      mine = act_as_user users(:active) do
+        Collection.create!(manifest_text: '')
+      end
+      timestamp = mine.modified_at.strftime('%Y-%m-%dT%H:%M:%S.%NZ')
+      @controller = Arvados::V1::CollectionsController.new
+      authorize_with :active
+      get :index, {
+        filters: [['modified_at', operator, timestamp],
+                  ['uuid', '=', mine.uuid]],
+      }
+      assert_response :success
+      uuids = json_response['items'].map { |item| item['uuid'] }
+      if expect_match
+        assert_includes uuids, mine.uuid
+      else
+        assert_not_includes uuids, mine.uuid
+      end
+    end
+  end
+
+  test "full text search with count='none'" do
+    @controller = Arvados::V1::GroupsController.new
+    authorize_with :admin
+
+    get :contents, {
+      format: :json,
+      count: 'none',
+      limit: 1000,
+      filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+    }
+
+    assert_response :success
+
+    all_objects = Hash.new(0)
+    json_response['items'].map{|o| o['kind']}.each{|t| all_objects[t] += 1}
+
+    assert_equal true, all_objects['arvados#group']>0
+    assert_equal true, all_objects['arvados#job']>0
+    assert_equal true, all_objects['arvados#pipelineInstance']>0
+    assert_equal true, all_objects['arvados#pipelineTemplate']>0
+
+    # Perform test again mimicking a second page request with:
+    # last_object_class = PipelineInstance
+    #   and hence groups and jobs should not be included in the response
+    # offset = 5, which means first 5 pipeline instances were already received in page 1
+    #   and hence the remaining pipeline instances and all other object types should be included in the response
+
+    @test_counter = 0  # Reset executed action counter
+
+    @controller = Arvados::V1::GroupsController.new
+
+    get :contents, {
+      format: :json,
+      count: 'none',
+      limit: 1000,
+      offset: '5',
+      last_object_class: 'PipelineInstance',
+      filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+    }
+
+    assert_response :success
+
+    second_page = Hash.new(0)
+    json_response['items'].map{|o| o['kind']}.each{|t| second_page[t] += 1}
+
+    assert_equal false, second_page.include?('arvados#group')
+    assert_equal false, second_page.include?('arvados#job')
+    assert_equal true, second_page['arvados#pipelineInstance']>0
+    assert_equal all_objects['arvados#pipelineInstance'], second_page['arvados#pipelineInstance']+5
+    assert_equal true, second_page['arvados#pipelineTemplate']>0
+  end
+
+  [['prop1', '=', 'value1', [:collection_with_prop1_value1], [:collection_with_prop1_value2, :collection_with_prop2_1]],
+   ['prop1', '!=', 'value1', [:collection_with_prop1_value2, :collection_with_prop2_1], [:collection_with_prop1_value1]],
+   ['prop1', 'exists', true, [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3, :collection_with_prop1_other1], [:collection_with_prop2_1]],
+   ['prop1', 'exists', false, [:collection_with_prop2_1], [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3, :collection_with_prop1_other1]],
+   ['prop1', 'in', ['value1', 'value2'], [:collection_with_prop1_value1, :collection_with_prop1_value2], [:collection_with_prop1_value3, :collection_with_prop2_1]],
+   ['prop1', 'in', ['value1', 'valueX'], [:collection_with_prop1_value1], [:collection_with_prop1_value3, :collection_with_prop2_1]],
+   ['prop1', 'not in', ['value1', 'value2'], [:collection_with_prop1_value3, :collection_with_prop1_other1, :collection_with_prop2_1], [:collection_with_prop1_value1, :collection_with_prop1_value2]],
+   ['prop1', 'not in', ['value1', 'valueX'], [:collection_with_prop1_value2, :collection_with_prop1_value3, :collection_with_prop1_other1, :collection_with_prop2_1], [:collection_with_prop1_value1]],
+   ['prop1', '>', 'value2', [:collection_with_prop1_value3], [:collection_with_prop1_other1, :collection_with_prop1_value1]],
+   ['prop1', '<', 'value2', [:collection_with_prop1_other1, :collection_with_prop1_value1], [:collection_with_prop1_value2, :collection_with_prop1_value2]],
+   ['prop1', '<=', 'value2', [:collection_with_prop1_other1, :collection_with_prop1_value1, :collection_with_prop1_value2], [:collection_with_prop1_value3]],
+   ['prop1', '>=', 'value2', [:collection_with_prop1_value2, :collection_with_prop1_value3], [:collection_with_prop1_other1, :collection_with_prop1_value1]],
+   ['prop1', 'like', 'value%', [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3], [:collection_with_prop1_other1]],
+   ['prop1', 'like', '%1', [:collection_with_prop1_value1, :collection_with_prop1_other1], [:collection_with_prop1_value2, :collection_with_prop1_value3]],
+   ['prop1', 'ilike', 'VALUE%', [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3], [:collection_with_prop1_other1]],
+   ['prop2', '>',  1, [:collection_with_prop2_5], [:collection_with_prop2_1]],
+   ['prop2', '<',  5, [:collection_with_prop2_1], [:collection_with_prop2_5]],
+   ['prop2', '<=', 5, [:collection_with_prop2_1, :collection_with_prop2_5], []],
+   ['prop2', '>=', 1, [:collection_with_prop2_1, :collection_with_prop2_5], []],
+   ['<http://schema.org/example>', '=', "value1", [:collection_with_uri_prop], []],
+  ].each do |prop, op, opr, inc, ex|
+    test "jsonb filter properties.#{prop} #{op} #{opr})" do
+      @controller = Arvados::V1::CollectionsController.new
+      authorize_with :admin
+      get :index, {
+            filters: SafeJSON.dump([ ["properties.#{prop}", op, opr] ]),
+            limit: 1000
+          }
+      assert_response :success
+      found = assigns(:objects).collect(&:uuid)
+
+      inc.each do |i|
+        assert_includes(found, collections(i).uuid)
+      end
+
+      ex.each do |e|
+        assert_not_includes(found, collections(e).uuid)
+      end
+    end
+  end
+
+  test "jsonb hash 'exists' and '!=' filter" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['properties.prop1', 'exists', true], ['properties.prop1', '!=', 'value1'] ]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found.length, 3
+    assert_not_includes(found, collections(:collection_with_prop1_value1).uuid)
+    assert_includes(found, collections(:collection_with_prop1_value2).uuid)
+    assert_includes(found, collections(:collection_with_prop1_value3).uuid)
+    assert_includes(found, collections(:collection_with_prop1_other1).uuid)
+  end
+
+  test "jsonb array 'exists'" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['storage_classes_confirmed.default', 'exists', true] ]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal 2, found.length
+    assert_not_includes(found,
+      collections(:storage_classes_desired_default_unconfirmed).uuid)
+    assert_includes(found,
+      collections(:storage_classes_desired_default_confirmed_default).uuid)
+    assert_includes(found,
+      collections(:storage_classes_desired_archive_confirmed_default).uuid)
+  end
+
+  test "jsonb hash alternate form 'exists' and '!=' filter" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['properties', 'exists', 'prop1'], ['properties.prop1', '!=', 'value1'] ]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found.length, 3
+    assert_not_includes(found, collections(:collection_with_prop1_value1).uuid)
+    assert_includes(found, collections(:collection_with_prop1_value2).uuid)
+    assert_includes(found, collections(:collection_with_prop1_value3).uuid)
+    assert_includes(found, collections(:collection_with_prop1_other1).uuid)
+  end
+
+  test "jsonb array alternate form 'exists' filter" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['storage_classes_confirmed', 'exists', 'default'] ]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal 2, found.length
+    assert_not_includes(found,
+      collections(:storage_classes_desired_default_unconfirmed).uuid)
+    assert_includes(found,
+      collections(:storage_classes_desired_default_confirmed_default).uuid)
+    assert_includes(found,
+      collections(:storage_classes_desired_archive_confirmed_default).uuid)
+  end
+
+  test "jsonb 'exists' must be boolean" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['properties.prop1', 'exists', nil] ]
+    }
+    assert_response 422
+    assert_match(/Invalid operand '' for 'exists' must be true or false/,
+                 json_response['errors'].join(' '))
+  end
+
+  test "jsonb checks column exists" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['puppies.prop1', '=', 'value1'] ]
+    }
+    assert_response 422
+    assert_match(/Invalid attribute 'puppies' for subproperty filter/,
+                 json_response['errors'].join(' '))
+  end
+
+  test "jsonb checks column is valid" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['name.prop1', '=', 'value1'] ]
+    }
+    assert_response 422
+    assert_match(/Invalid attribute 'name' for subproperty filter/,
+                 json_response['errors'].join(' '))
+  end
+
+  test "jsonb invalid operator" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['properties.prop1', '###', 'value1'] ]
+    }
+    assert_response 422
+    assert_match(/Invalid operator for subproperty search '###'/,
+                 json_response['errors'].join(' '))
+  end
+
+  test "replication_desired = 2" do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :admin
+    get :index, {
+      filters: SafeJSON.dump([ ['replication_desired', '=', 2] ])
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_includes(found, collections(:replication_desired_2_unconfirmed).uuid)
+    assert_includes(found, collections(:replication_desired_2_confirmed_2).uuid)
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb
new file mode 100644 (file)
index 0000000..5549304
--- /dev/null
@@ -0,0 +1,852 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::GroupsControllerTest < ActionController::TestCase
+
+  test "attempt to delete group without read or write access" do
+    authorize_with :active
+    post :destroy, id: groups(:empty_lonely_group).uuid
+    assert_response 404
+  end
+
+  test "attempt to delete group without write access" do
+    authorize_with :active
+    post :destroy, id: groups(:all_users).uuid
+    assert_response 403
+  end
+
+  test "get list of projects" do
+    authorize_with :active
+    get :index, filters: [['group_class', '=', 'project']], format: :json
+    assert_response :success
+    group_uuids = []
+    json_response['items'].each do |group|
+      assert_equal 'project', group['group_class']
+      group_uuids << group['uuid']
+    end
+    assert_includes group_uuids, groups(:aproject).uuid
+    assert_includes group_uuids, groups(:asubproject).uuid
+    assert_not_includes group_uuids, groups(:system_group).uuid
+    assert_not_includes group_uuids, groups(:private).uuid
+  end
+
+  test "get list of groups that are not projects" do
+    authorize_with :active
+    get :index, filters: [['group_class', '!=', 'project']], format: :json
+    assert_response :success
+    group_uuids = []
+    json_response['items'].each do |group|
+      assert_not_equal 'project', group['group_class']
+      group_uuids << group['uuid']
+    end
+    assert_not_includes group_uuids, groups(:aproject).uuid
+    assert_not_includes group_uuids, groups(:asubproject).uuid
+    assert_includes group_uuids, groups(:private).uuid
+    assert_includes group_uuids, groups(:group_with_no_class).uuid
+  end
+
+  test "get list of groups with bogus group_class" do
+    authorize_with :active
+    get :index, {
+      filters: [['group_class', '=', 'nogrouphasthislittleclass']],
+      format: :json,
+    }
+    assert_response :success
+    assert_equal [], json_response['items']
+    assert_equal 0, json_response['items_available']
+  end
+
+  def check_project_contents_response disabled_kinds=[]
+    assert_response :success
+    assert_operator 2, :<=, json_response['items_available']
+    assert_operator 2, :<=, json_response['items'].count
+    kinds = json_response['items'].collect { |i| i['kind'] }.uniq
+    expect_kinds = %w'arvados#group arvados#specimen arvados#pipelineTemplate arvados#job' - disabled_kinds
+    assert_equal expect_kinds, (expect_kinds & kinds)
+
+    json_response['items'].each do |i|
+      if i['kind'] == 'arvados#group'
+        assert(i['group_class'] == 'project',
+               "group#contents returned a non-project group")
+      end
+    end
+
+    disabled_kinds.each do |d|
+      assert_equal true, !kinds.include?(d)
+    end
+  end
+
+  test 'get group-owned objects' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      format: :json,
+    }
+    check_project_contents_response
+  end
+
+  test "user with project read permission can see project objects" do
+    authorize_with :project_viewer
+    get :contents, {
+      id: groups(:aproject).uuid,
+      format: :json,
+    }
+    check_project_contents_response
+  end
+
+  test "list objects across projects" do
+    authorize_with :project_viewer
+    get :contents, {
+      format: :json,
+      filters: [['uuid', 'is_a', 'arvados#specimen']]
+    }
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    [[:in_aproject, true],
+     [:in_asubproject, true],
+     [:owned_by_private_group, false]].each do |specimen_fixture, should_find|
+      if should_find
+        assert_includes found_uuids, specimens(specimen_fixture).uuid, "did not find specimen fixture '#{specimen_fixture}'"
+      else
+        refute_includes found_uuids, specimens(specimen_fixture).uuid, "found specimen fixture '#{specimen_fixture}'"
+      end
+    end
+  end
+
+  test "list trashed collections and projects" do
+    authorize_with :active
+    get(:contents, {
+          format: :json,
+          include_trash: true,
+          filters: [
+            ['uuid', 'is_a', ['arvados#collection', 'arvados#group']],
+            ['is_trashed', '=', true],
+          ],
+          limit: 10000,
+        })
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes found_uuids, groups(:trashed_project).uuid
+    refute_includes found_uuids, groups(:aproject).uuid
+    assert_includes found_uuids, collections(:expired_collection).uuid
+    refute_includes found_uuids, collections(:w_a_z_file).uuid
+  end
+
+  test "list objects in home project" do
+    authorize_with :active
+    get :contents, {
+      format: :json,
+      limit: 200,
+      id: users(:active).uuid
+    }
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes found_uuids, specimens(:owned_by_active_user).uuid, "specimen did not appear in home project"
+    refute_includes found_uuids, specimens(:in_asubproject).uuid, "specimen appeared unexpectedly in home project"
+  end
+
+  test "user with project read permission can see project collections" do
+    authorize_with :project_viewer
+    get :contents, {
+      id: groups(:asubproject).uuid,
+      format: :json,
+    }
+    ids = json_response['items'].map { |item| item["uuid"] }
+    assert_includes ids, collections(:baz_file_in_asubproject).uuid
+  end
+
+  [
+    ['collections.name', 'asc', :<=, "name"],
+    ['collections.name', 'desc', :>=, "name"],
+    ['name', 'asc', :<=, "name"],
+    ['name', 'desc', :>=, "name"],
+    ['collections.created_at', 'asc', :<=, "created_at"],
+    ['collections.created_at', 'desc', :>=, "created_at"],
+    ['created_at', 'asc', :<=, "created_at"],
+    ['created_at', 'desc', :>=, "created_at"],
+  ].each do |column, order, operator, field|
+    test "user with project read permission can sort projects on #{column} #{order}" do
+      authorize_with :project_viewer
+      get :contents, {
+        id: groups(:asubproject).uuid,
+        format: :json,
+        filters: [['uuid', 'is_a', "arvados#collection"]],
+        order: "#{column} #{order}"
+      }
+      sorted_values = json_response['items'].collect { |item| item[field] }
+      if field == "name"
+        # Here we avoid assuming too much about the database
+        # collation. Both "alice"<"Bob" and "alice">"Bob" can be
+        # correct. Hopefully it _is_ safe to assume that if "a" comes
+        # before "b" in the ascii alphabet, "aX">"bY" is never true for
+        # any strings X and Y.
+        reliably_sortable_names = sorted_values.select do |name|
+          name[0] >= 'a' && name[0] <= 'z'
+        end.uniq do |name|
+          name[0]
+        end
+        # Preserve order of sorted_values. But do not use &=. If
+        # sorted_values has out-of-order duplicates, we want to preserve
+        # them here, so we can detect them and fail the test below.
+        sorted_values.select! do |name|
+          reliably_sortable_names.include? name
+        end
+      end
+      assert_sorted(operator, sorted_values)
+    end
+  end
+
+  def assert_sorted(operator, sorted_items)
+    actually_checked_anything = false
+    previous = nil
+    sorted_items.each do |entry|
+      if !previous.nil?
+        assert_operator(previous, operator, entry,
+                        "Entries sorted incorrectly.")
+        actually_checked_anything = true
+      end
+      previous = entry
+    end
+    assert actually_checked_anything, "Didn't even find two items to compare."
+  end
+
+  # Even though the project_viewer tests go through other controllers,
+  # I'm putting them here so they're easy to find alongside the other
+  # project tests.
+  def check_new_project_link_fails(link_attrs)
+    @controller = Arvados::V1::LinksController.new
+    post :create, link: {
+      link_class: "permission",
+      name: "can_read",
+      head_uuid: groups(:aproject).uuid,
+    }.merge(link_attrs)
+    assert_includes(403..422, response.status)
+  end
+
+  test "user with project read permission can't add users to it" do
+    authorize_with :project_viewer
+    check_new_project_link_fails(tail_uuid: users(:spectator).uuid)
+  end
+
+  test "user with project read permission can't add items to it" do
+    authorize_with :project_viewer
+    check_new_project_link_fails(tail_uuid: collections(:baz_file).uuid)
+  end
+
+  test "user with project read permission can't rename items in it" do
+    authorize_with :project_viewer
+    @controller = Arvados::V1::LinksController.new
+    post :update, {
+      id: jobs(:running).uuid,
+      name: "Denied test name",
+    }
+    assert_includes(403..404, response.status)
+  end
+
+  test "user with project read permission can't remove items from it" do
+    @controller = Arvados::V1::PipelineTemplatesController.new
+    authorize_with :project_viewer
+    post :update, {
+      id: pipeline_templates(:two_part).uuid,
+      pipeline_template: {
+        owner_uuid: users(:project_viewer).uuid,
+      }
+    }
+    assert_response 403
+  end
+
+  test "user with project read permission can't delete it" do
+    authorize_with :project_viewer
+    post :destroy, {id: groups(:aproject).uuid}
+    assert_response 403
+  end
+
+  test 'get group-owned objects with limit' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      limit: 1,
+      format: :json,
+    }
+    assert_response :success
+    assert_operator 1, :<, json_response['items_available']
+    assert_equal 1, json_response['items'].count
+  end
+
+  test 'get group-owned objects with limit and offset' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      limit: 1,
+      offset: 12345,
+      format: :json,
+    }
+    assert_response :success
+    assert_operator 1, :<, json_response['items_available']
+    assert_equal 0, json_response['items'].count
+  end
+
+  test 'get group-owned objects with additional filter matching nothing' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      filters: [['uuid', 'in', ['foo_not_a_uuid','bar_not_a_uuid']]],
+      format: :json,
+    }
+    assert_response :success
+    assert_equal [], json_response['items']
+    assert_equal 0, json_response['items_available']
+  end
+
+  %w(offset limit).each do |arg|
+    ['foo', '', '1234five', '0x10', '-8'].each do |val|
+      test "Raise error on bogus #{arg} parameter #{val.inspect}" do
+        authorize_with :active
+        get :contents, {
+          :id => groups(:aproject).uuid,
+          :format => :json,
+          arg => val,
+        }
+        assert_response 422
+      end
+    end
+  end
+
+  test "Collection contents don't include manifest_text" do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      filters: [["uuid", "is_a", "arvados#collection"]],
+      format: :json,
+    }
+    assert_response :success
+    refute(json_response["items"].any? { |c| not c["portable_data_hash"] },
+           "response included an item without a portable data hash")
+    refute(json_response["items"].any? { |c| c.include?("manifest_text") },
+           "response included an item with a manifest text")
+  end
+
+  test 'get writable_by list for owned group' do
+    authorize_with :active
+    get :show, {
+      id: groups(:aproject).uuid,
+      format: :json
+    }
+    assert_response :success
+    assert_not_nil(json_response['writable_by'],
+                   "Should receive uuid list in 'writable_by' field")
+    assert_includes(json_response['writable_by'], users(:active).uuid,
+                    "owner should be included in writable_by list")
+  end
+
+  test 'no writable_by list for group with read-only access' do
+    authorize_with :rominiadmin
+    get :show, {
+      id: groups(:testusergroup_admins).uuid,
+      format: :json
+    }
+    assert_response :success
+    assert_equal([json_response['owner_uuid']],
+                 json_response['writable_by'],
+                 "Should only see owner_uuid in 'writable_by' field")
+  end
+
+  test 'get writable_by list by admin user' do
+    authorize_with :admin
+    get :show, {
+      id: groups(:testusergroup_admins).uuid,
+      format: :json
+    }
+    assert_response :success
+    assert_not_nil(json_response['writable_by'],
+                   "Should receive uuid list in 'writable_by' field")
+    assert_includes(json_response['writable_by'],
+                    users(:admin).uuid,
+                    "Current user should be included in 'writable_by' field")
+  end
+
+  test 'creating subproject with duplicate name fails' do
+    authorize_with :active
+    post :create, {
+      group: {
+        name: 'A Project',
+        owner_uuid: users(:active).uuid,
+        group_class: 'project',
+      },
+    }
+    assert_response 422
+    response_errors = json_response['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert(response_errors.first.include?('duplicate key'),
+           "Expected 'duplicate key' error in #{response_errors.first}")
+  end
+
+  test 'creating duplicate named subproject succeeds with ensure_unique_name' do
+    authorize_with :active
+    post :create, {
+      group: {
+        name: 'A Project',
+        owner_uuid: users(:active).uuid,
+        group_class: 'project',
+      },
+      ensure_unique_name: true
+    }
+    assert_response :success
+    new_project = json_response
+    assert_not_equal(new_project['uuid'],
+                     groups(:aproject).uuid,
+                     "create returned same uuid as existing project")
+    assert_match(/^A Project \(\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{3}Z\)$/,
+                 new_project['name'])
+  end
+
+  test "unsharing a project results in hiding it from previously shared user" do
+    # remove sharing link for project
+    @controller = Arvados::V1::LinksController.new
+    authorize_with :admin
+    post :destroy, id: links(:share_starred_project_with_project_viewer).uuid
+    assert_response :success
+
+    # verify that the user can no longer see the project
+    @test_counter = 0  # Reset executed action counter
+    @controller = Arvados::V1::GroupsController.new
+    authorize_with :project_viewer
+    get :index, filters: [['group_class', '=', 'project']], format: :json
+    assert_response :success
+    found_projects = {}
+    json_response['items'].each do |g|
+      found_projects[g['uuid']] = g
+    end
+    assert_equal false, found_projects.include?(groups(:starred_and_shared_active_user_project).uuid)
+
+    # share the project
+    @test_counter = 0
+    @controller = Arvados::V1::LinksController.new
+    authorize_with :system_user
+    post :create, link: {
+      link_class: "permission",
+      name: "can_read",
+      head_uuid: groups(:starred_and_shared_active_user_project).uuid,
+      tail_uuid: users(:project_viewer).uuid,
+    }
+
+    # verify that project_viewer user can now see shared project again
+    @test_counter = 0
+    @controller = Arvados::V1::GroupsController.new
+    authorize_with :project_viewer
+    get :index, filters: [['group_class', '=', 'project']], format: :json
+    assert_response :success
+    found_projects = {}
+    json_response['items'].each do |g|
+      found_projects[g['uuid']] = g
+    end
+    assert_equal true, found_projects.include?(groups(:starred_and_shared_active_user_project).uuid)
+  end
+
+  [
+    [['owner_uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 200,
+        'zzzzz-d1hrv-subprojpipeline', 'zzzzz-d1hrv-1xfj6xkicf2muk2'],
+    [["pipeline_instances.state", "not in", ["Complete", "Failed"]], 200,
+        'zzzzz-d1hrv-1xfj6xkicf2muk2', 'zzzzz-d1hrv-i3e77t9z5y8j9cc'],
+    [['container_requests.requesting_container_uuid', '=', nil], 200,
+        'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4requestercn2'],
+    [['container_requests.no_such_column', '=', nil], 422],
+    [['container_requests.', '=', nil], 422],
+    [['.requesting_container_uuid', '=', nil], 422],
+    [['no_such_table.uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 422],
+  ].each do |filter, expect_code, expect_uuid, not_expect_uuid|
+    test "get contents with '#{filter}' filter" do
+      authorize_with :active
+      get :contents, filters: [filter], format: :json
+      assert_response expect_code
+      if expect_code == 200
+        assert_not_empty json_response['items']
+        item_uuids = json_response['items'].collect {|item| item['uuid']}
+        assert_includes(item_uuids, expect_uuid)
+        assert_not_includes(item_uuids, not_expect_uuid)
+      end
+    end
+  end
+
+  test 'get contents with jobs and pipeline instances disabled' do
+    Rails.configuration.disable_api_methods = ['jobs.index', 'pipeline_instances.index']
+
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      format: :json,
+    }
+    check_project_contents_response %w'arvados#pipelineInstance arvados#job'
+  end
+
+  test 'get contents with low max_index_database_read' do
+    # Some result will certainly have at least 12 bytes in a
+    # restricted column
+    Rails.configuration.max_index_database_read = 12
+    authorize_with :active
+    get :contents, {
+          id: groups(:aproject).uuid,
+          format: :json,
+        }
+    assert_response :success
+    assert_not_empty(json_response['items'])
+    assert_operator(json_response['items'].count,
+                    :<, json_response['items_available'])
+  end
+
+  test 'get contents, recursive=true' do
+    authorize_with :active
+    params = {
+      id: groups(:aproject).uuid,
+      recursive: true,
+      format: :json,
+    }
+    get :contents, params
+    owners = json_response['items'].map do |item|
+      item['owner_uuid']
+    end
+    assert_includes(owners, groups(:aproject).uuid)
+    assert_includes(owners, groups(:asubproject).uuid)
+  end
+
+  [false, nil].each do |recursive|
+    test "get contents, recursive=#{recursive.inspect}" do
+      authorize_with :active
+      params = {
+        id: groups(:aproject).uuid,
+        format: :json,
+      }
+      params[:recursive] = false if recursive == false
+      get :contents, params
+      owners = json_response['items'].map do |item|
+        item['owner_uuid']
+      end
+      assert_includes(owners, groups(:aproject).uuid)
+      refute_includes(owners, groups(:asubproject).uuid)
+    end
+  end
+
+  test 'get home project contents, recursive=true' do
+    authorize_with :active
+    get :contents, {
+          id: users(:active).uuid,
+          recursive: true,
+          format: :json,
+        }
+    owners = json_response['items'].map do |item|
+      item['owner_uuid']
+    end
+    assert_includes(owners, users(:active).uuid)
+    assert_includes(owners, groups(:aproject).uuid)
+    assert_includes(owners, groups(:asubproject).uuid)
+  end
+
+  ### trashed project tests ###
+
+  [:active, :admin].each do |auth|
+    # project: to query,    to untrash,    is visible, parent contents listing success
+    [[:trashed_project,     [],                 false, true],
+     [:trashed_project,     [:trashed_project], true,  true],
+     [:trashed_subproject,  [],                 false, false],
+     [:trashed_subproject,  [:trashed_project], true,  true],
+     [:trashed_subproject3, [:trashed_project], false, true],
+     [:trashed_subproject3, [:trashed_subproject3], false, false],
+     [:trashed_subproject3, [:trashed_project, :trashed_subproject3], true, true],
+    ].each do |project, untrash, visible, success|
+
+      test "contents listing #{project} #{untrash} as #{auth}" do
+        authorize_with auth
+        untrash.each do |pr|
+          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+        end
+        get :contents, {
+              id: groups(project).owner_uuid,
+              format: :json
+            }
+        if success
+          assert_response :success
+          item_uuids = json_response['items'].map do |item|
+            item['uuid']
+          end
+          if visible
+            assert_includes(item_uuids, groups(project).uuid)
+          else
+            assert_not_includes(item_uuids, groups(project).uuid)
+          end
+        else
+          assert_response 404
+        end
+      end
+
+      test "contents of #{project} #{untrash} as #{auth}" do
+        authorize_with auth
+        untrash.each do |pr|
+          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+        end
+        get :contents, {
+              id: groups(project).uuid,
+              format: :json
+            }
+        if visible
+          assert_response :success
+        else
+          assert_response 404
+        end
+      end
+
+      test "index #{project} #{untrash} as #{auth}" do
+        authorize_with auth
+        untrash.each do |pr|
+          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+        end
+        get :index, {
+              format: :json,
+            }
+        assert_response :success
+        item_uuids = json_response['items'].map do |item|
+          item['uuid']
+        end
+        if visible
+          assert_includes(item_uuids, groups(project).uuid)
+        else
+          assert_not_includes(item_uuids, groups(project).uuid)
+        end
+      end
+
+      test "show #{project} #{untrash} as #{auth}" do
+        authorize_with auth
+        untrash.each do |pr|
+          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+        end
+        get :show, {
+              id: groups(project).uuid,
+              format: :json
+            }
+        if visible
+          assert_response :success
+        else
+          assert_response 404
+        end
+      end
+
+      test "show include_trash #{project} #{untrash} as #{auth}" do
+        authorize_with auth
+        untrash.each do |pr|
+          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+        end
+        get :show, {
+              id: groups(project).uuid,
+              format: :json,
+              include_trash: true
+            }
+        assert_response :success
+      end
+
+      test "index include_trash #{project} #{untrash} as #{auth}" do
+        authorize_with auth
+        untrash.each do |pr|
+          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+        end
+        get :index, {
+              format: :json,
+              include_trash: true
+            }
+        assert_response :success
+        item_uuids = json_response['items'].map do |item|
+          item['uuid']
+        end
+        assert_includes(item_uuids, groups(project).uuid)
+      end
+    end
+
+    test "delete project #{auth}" do
+      authorize_with auth
+      [:trashed_project].each do |pr|
+        Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+      end
+      assert !Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed
+      post :destroy, {
+            id: groups(:trashed_project).uuid,
+            format: :json,
+          }
+      assert_response :success
+      assert Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed
+    end
+
+    test "untrash project #{auth}" do
+      authorize_with auth
+      assert Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed
+      post :untrash, {
+            id: groups(:trashed_project).uuid,
+            format: :json,
+          }
+      assert_response :success
+      assert !Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed
+    end
+
+    test "untrash project with name conflict #{auth}" do
+      authorize_with auth
+      [:trashed_project].each do |pr|
+        Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false
+      end
+      gc = Group.create!({owner_uuid: "zzzzz-j7d0g-trashedproject1",
+                         name: "trashed subproject 3",
+                         group_class: "project"})
+      post :untrash, {
+            id: groups(:trashed_subproject3).uuid,
+            format: :json,
+            ensure_unique_name: true
+           }
+      assert_response :success
+      assert_match /^trashed subproject 3 \(\d{4}-\d\d-\d\d.*?Z\)$/, json_response['name']
+    end
+
+    test "move trashed subproject to new owner #{auth}" do
+      authorize_with auth
+      assert_nil Group.readable_by(users(auth)).where(uuid: groups(:trashed_subproject).uuid).first
+      put :update, {
+            id: groups(:trashed_subproject).uuid,
+            group: {
+              owner_uuid: users(:active).uuid
+            },
+            include_trash: true,
+            format: :json,
+          }
+      assert_response :success
+      assert_not_nil Group.readable_by(users(auth)).where(uuid: groups(:trashed_subproject).uuid).first
+    end
+  end
+
+  test 'get shared owned by another user' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:project_owned_by_foo).uuid)
+    end
+
+    get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 1, json_response['included'].length
+    assert_equal json_response['included'][0]["uuid"], users(:user_foo_in_sharing_group).uuid
+  end
+
+  test 'get shared, owned by unreadable project' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:aproject).uuid)
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:project_owned_by_foo).uuid)
+    end
+
+    get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 0, json_response['included'].length
+  end
+
+  test 'get shared, owned by non-project' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:group_for_sharing_tests).uuid)
+    end
+
+    get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 1, json_response['included'].length
+    assert_equal json_response['included'][0]["uuid"], groups(:group_for_sharing_tests).uuid
+  end
+
+  ### contents with exclude_home_project
+
+  test 'contents, exclude home owned by another user' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:project_owned_by_foo).uuid)
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:collection_owned_by_foo).uuid)
+    end
+
+    get :contents, {:include => "owner_uuid", :exclude_home_project => true}
+
+    assert_equal 2, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+    assert_equal json_response['items'][1]["uuid"], collections(:collection_owned_by_foo).uuid
+
+    assert_equal 1, json_response['included'].length
+    assert_equal json_response['included'][0]["uuid"], users(:user_foo_in_sharing_group).uuid
+  end
+
+  test 'contents, exclude home, owned by unreadable project' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:aproject).uuid)
+      Link.create!(
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:project_owned_by_foo).uuid)
+    end
+
+    get :contents, {:include => "owner_uuid", :exclude_home_project => true}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 0, json_response['included'].length
+  end
+
+  test 'contents, exclude home, owned by non-project' do
+    authorize_with :user_bar_in_sharing_group
+
+    act_as_system_user do
+      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:group_for_sharing_tests).uuid)
+    end
+
+    get :contents, {:include => "owner_uuid", :exclude_home_project => true}
+
+    assert_equal 1, json_response['items'].length
+    assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+    assert_equal 1, json_response['included'].length
+    assert_equal json_response['included'][0]["uuid"], groups(:group_for_sharing_tests).uuid
+  end
+
+
+  test 'contents, exclude home, with parent specified' do
+    authorize_with :active
+
+    get :contents, {id: groups(:aproject).uuid, :include => "owner_uuid", :exclude_home_project => true}
+
+    assert_response 422
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/healthcheck_controller_test.rb b/services/api/test/functional/arvados/v1/healthcheck_controller_test.rb
new file mode 100644 (file)
index 0000000..551eefa
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::HealthcheckControllerTest < ActionController::TestCase
+  [
+    [false, nil, 404, 'disabled'],
+    [true, nil, 401, 'authorization required'],
+    [true, 'badformatwithnoBearer', 403, 'authorization error'],
+    [true, 'Bearer wrongtoken', 403, 'authorization error'],
+    [true, 'Bearer configuredmanagementtoken', 200, '{"health":"OK"}'],
+  ].each do |enabled, header, error_code, error_msg|
+    test "ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'" do
+      Rails.configuration.ManagementToken = 'configuredmanagementtoken' if enabled
+
+      @request.headers['Authorization'] = header
+      get :ping
+      assert_response error_code
+
+      resp = JSON.parse(@response.body)
+      if error_code == 200
+        assert_equal(JSON.load('{"health":"OK"}'), resp)
+      else
+        assert_equal(error_msg, resp['errors'])
+      end
+    end
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/humans_controller_test.rb b/services/api/test/functional/arvados/v1/humans_controller_test.rb
new file mode 100644 (file)
index 0000000..d73fb30
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::HumansControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb b/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
new file mode 100644 (file)
index 0000000..f4abf4d
--- /dev/null
@@ -0,0 +1,778 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
+  fixtures :repositories, :users, :jobs, :links, :collections
+
+  # See git_setup.rb for the commit log for test.git.tar
+  include GitTestHelper
+
+  setup do
+    @controller = Arvados::V1::JobsController.new
+    authorize_with :active
+  end
+
+  test "reuse job with no_reuse=false" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "active/foo",
+      script_parameters: {
+        an_integer: '1',
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "reuse job with find_or_create=true" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "active/foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "no reuse job with null log" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "active/foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '3'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqq3', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "reuse job with symbolic script_version" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "tag1",
+        repository: "active/foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "do not reuse job because no_reuse=true" do
+    post :create, {
+      job: {
+        no_reuse: true,
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "active/foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  [false, "false"].each do |whichfalse|
+    test "do not reuse job because find_or_create=#{whichfalse.inspect}" do
+      post :create, {
+        job: {
+          script: "hash",
+          script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+          repository: "active/foo",
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+            an_integer: '1'
+          }
+        },
+        find_or_create: whichfalse
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      new_job = JSON.parse(@response.body)
+      assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+      assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+    end
+  end
+
+  test "do not reuse job because output is not readable by user" do
+    authorize_with :job_reader
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "active/foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_no_output" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '2'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykppp', new_job['uuid']
+  end
+
+  test "test_reuse_job_range" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      minimum_script_version: "tag1",
+      script_version: "master",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "cannot_reuse_job_no_minimum_given_so_must_use_specified_commit" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "master",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_different_input" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '2'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_different_version" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "master",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '2'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+  end
+
+  test "test_can_reuse_job_submitted_nondeterministic" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      },
+      nondeterministic: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_past_nondeterministic" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash2",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykyyy', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_no_permission" do
+    authorize_with :spectator
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "active/foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_excluded" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      minimum_script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
+      script_version: "master",
+      repository: "active/foo",
+      exclude_script_versions: ["tag1"],
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_not_equal('4fe459abe02d9b365932b8f5dc419439ab4e2577',
+                     new_job['script_version'])
+  end
+
+  test "cannot reuse job with find_or_create but excluded version" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "master",
+        repository: "active/foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true,
+      minimum_script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
+      exclude_script_versions: ["tag1"],
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_not_equal('4fe459abe02d9b365932b8f5dc419439ab4e2577',
+                     new_job['script_version'])
+  end
+
+  test "cannot reuse job when hash-like branch includes newer commit" do
+    check_new_job_created_from({job: {script_version: "738783"}},
+                               :previous_job_run_superseded_by_hash_branch)
+  end
+
+  BASE_FILTERS = {
+    'repository' => ['=', 'active/foo'],
+    'script' => ['=', 'hash'],
+    'script_version' => ['in git', 'master'],
+    'docker_image_locator' => ['=', nil],
+    'arvados_sdk_version' => ['=', nil],
+  }
+
+  def filters_from_hash(hash)
+    hash.each_pair.map { |name, filter| [name] + filter }
+  end
+
+  test "can reuse a Job based on filters" do
+    filters_hash = BASE_FILTERS.
+      merge('script_version' => ['in git', 'tag1'])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "master",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             }
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "can not reuse a Job based on filters" do
+    filters = filters_from_hash(BASE_FILTERS
+                                  .reject { |k| k == 'script_version' })
+    filters += [["script_version", "in git",
+                 "31ce37fe365b3dc204300a3e4c396ad333ed0556"],
+                ["script_version", "not in git", ["tag1"]]]
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "master",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             }
+           },
+           filters: filters,
+           find_or_create: true,
+         })
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+  end
+
+  test "can not reuse a Job based on arbitrary filters" do
+    filters_hash = BASE_FILTERS.
+      merge("created_at" => ["<", "2010-01-01T00:00:00Z"])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             }
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "can reuse a Job with a Docker image" do
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+             runtime_constraints: {
+               docker_image: 'arvados/apitestfixture',
+             }
+           },
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    target_job = jobs(:previous_docker_job_run)
+    [:uuid, :script_version, :docker_image_locator].each do |attr|
+      assert_equal(target_job.send(attr), new_job.send(attr))
+    end
+  end
+
+  test "can reuse a Job with a Docker image hash filter" do
+    filters_hash = BASE_FILTERS.
+      merge("script_version" =>
+              ["=", "4fe459abe02d9b365932b8f5dc419439ab4e2577"],
+            "docker_image_locator" =>
+              ["in docker", links(:docker_image_collection_hash).name])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    target_job = jobs(:previous_docker_job_run)
+    [:uuid, :script_version, :docker_image_locator].each do |attr|
+      assert_equal(target_job.send(attr), new_job.send(attr))
+    end
+  end
+
+  test "reuse Job with Docker image repo+tag" do
+    filters_hash = BASE_FILTERS.
+      merge("script_version" =>
+              ["=", "4fe459abe02d9b365932b8f5dc419439ab4e2577"],
+            "docker_image_locator" =>
+              ["in docker", links(:docker_image_collection_tag2).name])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    target_job = jobs(:previous_docker_job_run)
+    [:uuid, :script_version, :docker_image_locator].each do |attr|
+      assert_equal(target_job.send(attr), new_job.send(attr))
+    end
+  end
+
+  test "new job with unknown Docker image filter" do
+    filters_hash = BASE_FILTERS.
+      merge("docker_image_locator" => ["in docker", "_nonesuchname_"])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "active/foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    assert_not_equal(jobs(:previous_docker_job_run).uuid, new_job.uuid)
+  end
+
+  test "don't reuse job using older Docker image of same name" do
+    jobspec = {runtime_constraints: {
+        docker_image: "arvados/apitestfixture",
+      }}
+    check_new_job_created_from({job: jobspec},
+                               :previous_ancient_docker_image_job_run)
+  end
+
+  test "reuse job with Docker image that has hash name" do
+    jobspec = {runtime_constraints: {
+        docker_image: "a" * 64,
+      }}
+    check_job_reused_from(jobspec, :previous_docker_job_run)
+  end
+
+  ["repository", "script"].each do |skip_key|
+    test "missing #{skip_key} filter raises an error" do
+      filters = filters_from_hash(BASE_FILTERS.reject { |k| k == skip_key })
+      post(:create, {
+             job: {
+               script: "hash",
+               script_version: "master",
+               repository: "active/foo",
+               script_parameters: {
+                 input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+                 an_integer: '1'
+               }
+             },
+             filters: filters,
+             find_or_create: true,
+           })
+      assert_includes(405..599, @response.code.to_i,
+                      "bad status code with missing #{skip_key} filter")
+    end
+  end
+
+  test "find Job with script version range" do
+    get :index, filters: [["repository", "=", "active/foo"],
+                          ["script", "=", "hash"],
+                          ["script_version", "in git", "tag1"]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "find Job with script version range exclusions" do
+    get :index, filters: [["repository", "=", "active/foo"],
+                          ["script", "=", "hash"],
+                          ["script_version", "not in git", "tag1"]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "find Job with Docker image range" do
+    get :index, filters: [["docker_image_locator", "in docker",
+                           "arvados/apitestfixture"]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "find Job with Docker image using reader tokens" do
+    authorize_with :inactive
+    get(:index, {
+          filters: [["docker_image_locator", "in docker",
+                     "arvados/apitestfixture"]],
+          reader_tokens: [api_token(:active)],
+        })
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "'in docker' filter accepts arrays" do
+    get :index, filters: [["docker_image_locator", "in docker",
+                           ["_nonesuchname_", "arvados/apitestfixture"]]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "'not in docker' filter accepts arrays" do
+    get :index, filters: [["docker_image_locator", "not in docker",
+                           ["_nonesuchname_", "arvados/apitestfixture"]]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+  end
+
+  JOB_SUBMIT_KEYS = [:script, :script_parameters, :script_version, :repository]
+  DEFAULT_START_JOB = :previous_job_run
+
+  def create_job_params(params, start_from=DEFAULT_START_JOB)
+    if not params.has_key?(:find_or_create)
+      params[:find_or_create] = true
+    end
+    job_attrs = params.delete(:job) || {}
+    start_job = jobs(start_from)
+    params[:job] = Hash[JOB_SUBMIT_KEYS.map do |key|
+                          [key, start_job.send(key)]
+                        end]
+    params[:job][:runtime_constraints] =
+      job_attrs.delete(:runtime_constraints) || {}
+    { arvados_sdk_version: :arvados_sdk_version,
+      docker_image_locator: :docker_image }.each do |method, constraint_key|
+      if constraint_value = start_job.send(method)
+        params[:job][:runtime_constraints][constraint_key] ||= constraint_value
+      end
+    end
+    params[:job].merge!(job_attrs)
+    params
+  end
+
+  def create_job_from(params, start_from)
+    post(:create, create_job_params(params, start_from))
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    new_job
+  end
+
+  def check_new_job_created_from(params, start_from=DEFAULT_START_JOB)
+    start_time = Time.now
+    new_job = create_job_from(params, start_from)
+    assert_operator(start_time, :<=, new_job.created_at)
+    new_job
+  end
+
+  def check_job_reused_from(params, start_from)
+    new_job = create_job_from(params, start_from)
+    assert_equal(jobs(start_from).uuid, new_job.uuid)
+  end
+
+  def check_errors_from(params, start_from=DEFAULT_START_JOB)
+    post(:create, create_job_params(params, start_from))
+    assert_includes(405..499, @response.code.to_i)
+    errors = json_response.fetch("errors", [])
+    assert(errors.any?, "no errors assigned from #{params}")
+    refute(errors.any? { |msg| msg =~ /^#<[A-Za-z]+: / },
+           "errors include raw exception: #{errors.inspect}")
+    errors
+  end
+
+  # 1de84a8 is on the b1 branch, after master's tip.
+  test "new job created from unsatisfiable minimum version filter" do
+    filters_hash = BASE_FILTERS.merge("script_version" => ["in git", "1de84a8"])
+    check_new_job_created_from(filters: filters_from_hash(filters_hash))
+  end
+
+  test "new job created from unsatisfiable minimum version parameter" do
+    check_new_job_created_from(minimum_script_version: "1de84a8")
+  end
+
+  test "new job created from unsatisfiable minimum version attribute" do
+    check_new_job_created_from(job: {minimum_script_version: "1de84a8"})
+  end
+
+  test "graceful error from nonexistent minimum version filter" do
+    filters_hash = BASE_FILTERS.merge("script_version" =>
+                                      ["in git", "__nosuchbranch__"])
+    errors = check_errors_from(filters: filters_from_hash(filters_hash))
+    assert(errors.any? { |msg| msg.include? "__nosuchbranch__" },
+           "bad refspec not mentioned in error message")
+  end
+
+  test "graceful error from nonexistent minimum version parameter" do
+    errors = check_errors_from(minimum_script_version: "__nosuchbranch__")
+    assert(errors.any? { |msg| msg.include? "__nosuchbranch__" },
+           "bad refspec not mentioned in error message")
+  end
+
+  test "graceful error from nonexistent minimum version attribute" do
+    errors = check_errors_from(job: {minimum_script_version: "__nosuchbranch__"})
+    assert(errors.any? { |msg| msg.include? "__nosuchbranch__" },
+           "bad refspec not mentioned in error message")
+  end
+
+  test "don't reuse job with older Arvados SDK version specified by branch" do
+    jobspec = {runtime_constraints: {
+        arvados_sdk_version: "master",
+      }}
+    check_new_job_created_from({job: jobspec},
+                               :previous_job_run_with_arvados_sdk_version)
+  end
+
+  test "don't reuse job with older Arvados SDK version specified by commit" do
+    jobspec = {runtime_constraints: {
+        arvados_sdk_version: "ca68b24e51992e790f29df5cc4bc54ce1da4a1c2",
+      }}
+    check_new_job_created_from({job: jobspec},
+                               :previous_job_run_with_arvados_sdk_version)
+  end
+
+  test "don't reuse job with newer Arvados SDK version specified by commit" do
+    jobspec = {runtime_constraints: {
+        arvados_sdk_version: "436637c87a1d2bdbf4b624008304064b6cf0e30c",
+      }}
+    check_new_job_created_from({job: jobspec},
+                               :previous_job_run_with_arvados_sdk_version)
+  end
+
+  test "reuse job from arvados_sdk_version git filters" do
+    prev_job = jobs(:previous_job_run_with_arvados_sdk_version)
+    filters_hash = BASE_FILTERS.
+      merge("arvados_sdk_version" => ["in git", "commit2"],
+            "docker_image_locator" => ["=", prev_job.docker_image_locator])
+    filters_hash.delete("script_version")
+    params = create_job_params(filters: filters_from_hash(filters_hash))
+    post(:create, params)
+    assert_response :success
+    assert_equal(prev_job.uuid, assigns(:object).uuid)
+  end
+
+  test "create new job because of arvados_sdk_version 'not in git' filters" do
+    filters_hash = BASE_FILTERS.reject { |k| k == "script_version" }
+    filters = filters_from_hash(filters_hash)
+    # Allow anything from the root commit, but before commit 2.
+    filters += [["arvados_sdk_version", "in git", "436637c8"],
+                ["arvados_sdk_version", "not in git", "00634b2b"]]
+    check_new_job_created_from(filters: filters)
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb b/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb
new file mode 100644 (file)
index 0000000..d6f4347
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::JobTasksControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/jobs_controller_test.rb b/services/api/test/functional/arvados/v1/jobs_controller_test.rb
new file mode 100644 (file)
index 0000000..5e3d8e1
--- /dev/null
@@ -0,0 +1,528 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class Arvados::V1::JobsControllerTest < ActionController::TestCase
+
+  include GitTestHelper
+
+  test "submit a job" do
+    authorize_with :active
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      repository: "active/foo",
+      script_parameters: {}
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_nil new_job['uuid']
+    assert_not_nil new_job['script_version'].match(/^[0-9a-f]{40}$/)
+    assert_equal 0, new_job['priority']
+  end
+
+  test "normalize output and log uuids when creating job" do
+    authorize_with :active
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      script_parameters: {},
+      repository: "active/foo",
+      started_at: Time.now,
+      finished_at: Time.now,
+      running: false,
+      success: true,
+      output: 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy',
+      log: 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy'
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = assigns(:object)
+    assert_equal 'd41d8cd98f00b204e9800998ecf8427e+0', new_job['log']
+    assert_equal 'd41d8cd98f00b204e9800998ecf8427e+0', new_job['output']
+    version = new_job['script_version']
+
+    # Make sure version doesn't get mangled by normalize
+    assert_not_nil version.match(/^[0-9a-f]{40}$/)
+    assert_equal 'master', json_response['supplied_script_version']
+  end
+
+  test "normalize output and log uuids when updating job" do
+    authorize_with :active
+
+    foobar_job = jobs(:foobar)
+
+    new_output = 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy'
+    new_log = 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy'
+    put :update, {
+      id: foobar_job['uuid'],
+      job: {
+        output: new_output,
+        log: new_log
+      }
+    }
+
+    updated_job = json_response
+    assert_not_equal foobar_job['log'], updated_job['log']
+    assert_not_equal new_log, updated_job['log']  # normalized during update
+    assert_equal new_log[0,new_log.rindex('+')], updated_job['log']
+    assert_not_equal foobar_job['output'], updated_job['output']
+    assert_not_equal new_output, updated_job['output']  # normalized during update
+    assert_equal new_output[0,new_output.rindex('+')], updated_job['output']
+  end
+
+  test "cancel a running job" do
+    # We need to verify that "cancel" creates a trigger file, so first
+    # let's make sure there is no stale trigger file.
+    begin
+      File.unlink(Rails.configuration.crunch_refresh_trigger)
+    rescue Errno::ENOENT
+    end
+
+    authorize_with :active
+    put :update, {
+      id: jobs(:running).uuid,
+      job: {
+        cancelled_at: 4.day.ago
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    job = JSON.parse(@response.body)
+    assert_not_nil job['uuid']
+    assert_not_nil job['cancelled_at']
+    assert_not_nil job['cancelled_by_user_uuid']
+    assert_not_nil job['cancelled_by_client_uuid']
+    assert_equal(true, Time.parse(job['cancelled_at']) > 1.minute.ago,
+                 'server should correct bogus cancelled_at ' +
+                 job['cancelled_at'])
+    assert_equal(true,
+                 File.exist?(Rails.configuration.crunch_refresh_trigger),
+                 'trigger file should be created when job is cancelled')
+  end
+
+  [
+   [:put, :update, {job:{cancelled_at: Time.now}}, :success],
+   [:put, :update, {job:{cancelled_at: nil}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Cancelled'}}, :success],
+   [:put, :update, {job:{state: 'Queued'}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Running'}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Failed'}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Complete'}}, :unprocessable_entity],
+   [:post, :cancel, {}, :success],
+  ].each do |http_method, action, params, expected_response|
+    test "cancelled job stays cancelled after #{[http_method, action, params].inspect}" do
+      # We need to verify that "cancel" creates a trigger file, so first
+      # let's make sure there is no stale trigger file.
+      begin
+        File.unlink(Rails.configuration.crunch_refresh_trigger)
+      rescue Errno::ENOENT
+      end
+
+      authorize_with :active
+      self.send http_method, action, { id: jobs(:cancelled).uuid }.merge(params)
+      assert_response expected_response
+      if expected_response == :success
+        job = json_response
+        assert_not_nil job['cancelled_at'], 'job cancelled again using #{attribute}=#{value} did not have cancelled_at value'
+        assert_equal job['state'], 'Cancelled', 'cancelled again job state changed when updated using using #{attribute}=#{value}'
+      end
+      # Verify database record still says Cancelled
+      assert_equal 'Cancelled', Job.find(jobs(:cancelled).id).state, 'job was un-cancelled'
+    end
+  end
+
+  test "cancelled job updated to any other state change results in error" do
+    # We need to verify that "cancel" creates a trigger file, so first
+    # let's make sure there is no stale trigger file.
+    begin
+      File.unlink(Rails.configuration.crunch_refresh_trigger)
+    rescue Errno::ENOENT
+    end
+
+    authorize_with :active
+    put :update, {
+      id: jobs(:running_cancelled).uuid,
+      job: {
+        cancelled_at: nil
+      }
+    }
+    assert_response 422
+  end
+
+  ['abc.py', 'hash.py'].each do |script|
+    test "update job script attribute to #{script} without failing script_version check" do
+      authorize_with :admin
+      put :update, {
+        id: jobs(:uses_nonexistent_script_version).uuid,
+        job: {
+          script: script
+        }
+      }
+      assert_response :success
+      resp = assigns(:object)
+      assert_equal jobs(:uses_nonexistent_script_version).script_version, resp['script_version']
+    end
+  end
+
+  test "search jobs by uuid with >= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '>=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal false, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
+  end
+
+  test "search jobs by uuid with <= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '<=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal true, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
+  end
+
+  test "search jobs by uuid with >= and <= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '>=', 'zzzzz-8i9sb-pshmckwoma9plh7'],
+              ['uuid', '<=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found, ['zzzzz-8i9sb-pshmckwoma9plh7']
+  end
+
+  test "search jobs by uuid with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '<', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal false, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal true, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
+  end
+
+  test "search jobs by uuid with like query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', 'like', '%hmckwoma9pl%']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found, ['zzzzz-8i9sb-pshmckwoma9plh7']
+  end
+
+  test "search jobs by uuid with 'in' query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', 'in', ['zzzzz-8i9sb-4cf0nhn6xte809j',
+                                'zzzzz-8i9sb-pshmckwoma9plh7']]]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found.sort, ['zzzzz-8i9sb-4cf0nhn6xte809j',
+                              'zzzzz-8i9sb-pshmckwoma9plh7']
+  end
+
+  test "search jobs by uuid with 'not in' query" do
+    exclude_uuids = [jobs(:running).uuid,
+                     jobs(:running_cancelled).uuid]
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', 'not in', exclude_uuids]]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_not_empty found, "'not in' query returned nothing"
+    assert_empty(found & exclude_uuids,
+                 "'not in' query returned uuids I asked not to get")
+  end
+
+  ['=', '!='].each do |operator|
+    [['uuid', 'zzzzz-8i9sb-pshmckwoma9plh7'],
+     ['output', nil]].each do |attr, operand|
+      test "search jobs with #{attr} #{operator} #{operand.inspect} query" do
+        authorize_with :active
+        get :index, {
+          filters: [[attr, operator, operand]]
+        }
+        assert_response :success
+        values = assigns(:objects).collect { |x| x.send(attr) }
+        assert_not_empty values, "query should return non-empty result"
+        if operator == '='
+          assert_empty values - [operand], "query results do not satisfy query"
+        else
+          assert_empty values & [operand], "query results do not satisfy query"
+        end
+      end
+    end
+  end
+
+  test "search jobs by started_at with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '<', Time.now.to_s]]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+  end
+
+  test "search jobs by started_at with > query" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '>', Time.now.to_s]]
+    }
+    assert_response :success
+    assert_equal 0, assigns(:objects).count
+  end
+
+  test "search jobs by started_at with >= query on metric date" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '>=', '2014-01-01']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+  end
+
+  test "search jobs by started_at with >= query on metric date and time" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '>=', '2014-01-01 01:23:45']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+  end
+
+  test "search jobs with 'any' operator" do
+    authorize_with :active
+    get :index, {
+      where: { any: ['contains', 'pshmckw'] }
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal 0, found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal 1, found.count
+  end
+
+  test "search jobs by nonexistent column with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['is_borked', '<', 'fizzbuzz']]
+    }
+    assert_response 422
+  end
+
+  test "finish a job" do
+    authorize_with :active
+    put :update, {
+      id: jobs(:nearly_finished_job).uuid,
+      job: {
+        output: '551392cc37a317abf865b95f66f4ef94+101',
+        log: '9215de2a951a721f5f156bc08cf63ad7+93',
+        tasks_summary: {done: 1, running: 0, todo: 0, failed: 0},
+        success: true,
+        running: false,
+        finished_at: Time.now.to_s
+      }
+    }
+    assert_response :success
+  end
+
+  [:spectator, :admin].each_with_index do |which_token, i|
+    test "get job queue as #{which_token} user" do
+      authorize_with which_token
+      get :queue
+      assert_response :success
+      assert_equal i, assigns(:objects).count
+    end
+  end
+
+  test "get job queue as with a = filter" do
+    authorize_with :admin
+    get :queue, { filters: [['script','=','foo']] }
+    assert_response :success
+    assert_equal ['foo'], assigns(:objects).collect(&:script).uniq
+    assert_equal 0, assigns(:objects)[0].queue_position
+  end
+
+  test "get job queue as with a != filter" do
+    authorize_with :admin
+    get :queue, { filters: [['script','!=','foo']] }
+    assert_response :success
+    assert_equal 0, assigns(:objects).count
+  end
+
+  [:spectator, :admin].each do |which_token|
+    test "get queue_size as #{which_token} user" do
+      authorize_with which_token
+      get :queue_size
+      assert_response :success
+      assert_equal 1, JSON.parse(@response.body)["queue_size"]
+    end
+  end
+
+  test "job includes assigned nodes" do
+    authorize_with :active
+    get :show, {id: jobs(:nearly_finished_job).uuid}
+    assert_response :success
+    assert_equal([nodes(:busy).uuid], json_response["node_uuids"])
+  end
+
+  test "job lock success" do
+    authorize_with :active
+    post :lock, {id: jobs(:queued).uuid}
+    assert_response :success
+    job = Job.where(uuid: jobs(:queued).uuid).first
+    assert_equal "Running", job.state
+  end
+
+  test "job lock conflict" do
+    authorize_with :active
+    post :lock, {id: jobs(:running).uuid}
+    assert_response 422 # invalid state transition
+  end
+
+  test 'reject invalid commit in remote repository' do
+    authorize_with :active
+    url = "http://localhost:1/fake/fake.git"
+    fetch_remote_from_local_repo url, :foo
+    post :create, job: {
+      script: "hash",
+      script_version: "abc123",
+      repository: url,
+      script_parameters: {}
+    }
+    assert_response 422
+  end
+
+  test 'tag remote commit in internal repository' do
+    authorize_with :active
+    url = "http://localhost:1/fake/fake.git"
+    fetch_remote_from_local_repo url, :foo
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      repository: url,
+      script_parameters: {}
+    }
+    assert_response :success
+    assert_equal('077ba2ad3ea24a929091a9e6ce545c93199b8e57',
+                 internal_tag(json_response['uuid']))
+  end
+
+  test 'tag local commit in internal repository' do
+    authorize_with :active
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      repository: "active/foo",
+      script_parameters: {}
+    }
+    assert_response :success
+    assert_equal('077ba2ad3ea24a929091a9e6ce545c93199b8e57',
+                 internal_tag(json_response['uuid']))
+  end
+
+  test 'get job with components' do
+    authorize_with :active
+    get :show, {id: jobs(:running_job_with_components).uuid}
+    assert_response :success
+    assert_not_nil json_response["components"]
+    assert_equal ["component1", "component2"], json_response["components"].keys
+  end
+
+  [
+    [:active, :success],
+    [:system_user, :success],
+    [:admin, 403],
+  ].each do |user, expected|
+    test "add components to job locked by active user as #{user} user and expect #{expected}" do
+      authorize_with user
+      put :update, {
+        id: jobs(:running).uuid,
+        job: {
+          components: {"component1" => "value1", "component2" => "value2"}
+        }
+      }
+      assert_response expected
+      if expected == :success
+        assert_not_nil json_response["components"]
+        keys = json_response["components"].keys
+        assert_equal ["component1", "component2"], keys
+        assert_equal "value1", json_response["components"][keys[0]]
+      end
+    end
+  end
+
+  test 'get_delete components_get again for job with components' do
+    authorize_with :active
+    get :show, {id: jobs(:running_job_with_components).uuid}
+    assert_response :success
+    assert_not_nil json_response["components"]
+    assert_equal ["component1", "component2"], json_response["components"].keys
+
+    # delete second component
+    @test_counter = 0  # Reset executed action counter
+    @controller = Arvados::V1::JobsController.new
+    put :update, {
+      id: jobs(:running_job_with_components).uuid,
+      job: {
+        components: {"component1" => "zzzzz-8i9sb-jobuuid00000001"}
+      }
+    }
+    assert_response :success
+
+    @test_counter = 0  # Reset executed action counter
+    @controller = Arvados::V1::JobsController.new
+    get :show, {id: jobs(:running_job_with_components).uuid}
+    assert_response :success
+    assert_not_nil json_response["components"]
+    assert_equal ["component1"], json_response["components"].keys
+
+    # delete all components
+    @test_counter = 0  # Reset executed action counter
+    @controller = Arvados::V1::JobsController.new
+    put :update, {
+      id: jobs(:running_job_with_components).uuid,
+      job: {
+        components: {}
+      }
+    }
+    assert_response :success
+
+    @test_counter = 0  # Reset executed action counter
+    @controller = Arvados::V1::JobsController.new
+    get :show, {id: jobs(:running_job_with_components).uuid}
+    assert_response :success
+    assert_not_nil json_response["components"]
+    assert_equal [], json_response["components"].keys
+  end
+
+  test 'jobs.create disabled in config' do
+    Rails.configuration.disable_api_methods = ["jobs.create",
+                                               "pipeline_instances.create"]
+    authorize_with :active
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      repository: "active/foo",
+      script_parameters: {}
+    }
+    assert_response 404
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb b/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb
new file mode 100644 (file)
index 0000000..c5bff7b
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::KeepDisksControllerTest < ActionController::TestCase
+
+  def default_ping_opts
+    {ping_secret: '', service_ssl_flag: false, service_port: 1234}
+  end
+
+  test "add keep disk with admin token" do
+    authorize_with :admin
+    post :ping, default_ping_opts.
+      merge(filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f')
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_keep_disk = JSON.parse(@response.body)
+    assert_not_nil new_keep_disk['uuid']
+    assert_not_nil new_keep_disk['ping_secret']
+    assert_not_equal '', new_keep_disk['ping_secret']
+  end
+
+  [
+    {},
+    {filesystem_uuid: ''},
+  ].each do |opts|
+    test "add keep disk with[out] filesystem_uuid #{opts}" do
+      authorize_with :admin
+      post :ping, default_ping_opts.merge(opts)
+      assert_response :success
+      assert_not_nil JSON.parse(@response.body)['uuid']
+    end
+  end
+
+  test "refuse to add keep disk without admin token" do
+    post :ping, default_ping_opts
+    assert_response 404
+  end
+
+  test "ping keep disk" do
+    post :ping, default_ping_opts.
+      merge(id: keep_disks(:nonfull).uuid,
+            ping_secret: keep_disks(:nonfull).ping_secret,
+            filesystem_uuid: keep_disks(:nonfull).filesystem_uuid)
+    assert_response :success
+    assert_not_nil assigns(:object)
+    keep_disk = JSON.parse(@response.body)
+    assert_not_nil keep_disk['uuid']
+    assert_not_nil keep_disk['ping_secret']
+  end
+
+  test "admin should get index with ping_secret" do
+    authorize_with :admin
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, items.size
+    assert_not_nil items[0]['ping_secret']
+  end
+
+  # inactive user sees keep disks
+  test "inactive user should get index" do
+    authorize_with :inactive
+    get :index
+    assert_response :success
+    items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, items.size
+
+    # Check these are still included
+    assert items[0]['service_host']
+    assert items[0]['service_port']
+  end
+
+  # active user sees non-secret attributes of keep disks
+  test "active user should get non-empty index with no ping_secret" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, items.size
+    items.each do |item|
+      assert_nil item['ping_secret']
+      assert_not_nil item['is_readable']
+      assert_not_nil item['is_writable']
+      assert_not_nil item['service_host']
+      assert_not_nil item['service_port']
+    end
+  end
+
+  test "search keep_services with 'any' operator" do
+    authorize_with :active
+    get :index, {
+      where: { any: ['contains', 'o2t1q5w'] }
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-penuu-5w2o2t1q5wy7fhn')
+  end
+
+
+end
diff --git a/services/api/test/functional/arvados/v1/keep_services_controller_test.rb b/services/api/test/functional/arvados/v1/keep_services_controller_test.rb
new file mode 100644 (file)
index 0000000..d571610
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::KeepServicesControllerTest < ActionController::TestCase
+
+  test "search by service_port with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['service_port', '<', 25107]]
+    }
+    assert_response :success
+    assert_equal false, assigns(:objects).any?
+  end
+
+  test "search by service_port with >= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['service_port', '>=', 25107]]
+    }
+    assert_response :success
+    assert_equal true, assigns(:objects).any?
+  end
+
+  [:admin, :active, :inactive, :anonymous, nil].each do |u|
+    test "accessible to #{u.inspect} user" do
+      authorize_with(u) if u
+      get :accessible
+      assert_response :success
+      assert_not_empty json_response['items']
+      json_response['items'].each do |ks|
+        assert_not_equal ks['service_type'], 'proxy'
+      end
+    end
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/links_controller_test.rb b/services/api/test/functional/arvados/v1/links_controller_test.rb
new file mode 100644 (file)
index 0000000..47e46fe
--- /dev/null
@@ -0,0 +1,385 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::LinksControllerTest < ActionController::TestCase
+
+  ['link', 'link_json'].each do |formatted_link|
+    test "no symbol keys in serialized hash #{formatted_link}" do
+      link = {
+        properties: {username: 'testusername'},
+        link_class: 'test',
+        name: 'encoding',
+        tail_uuid: users(:admin).uuid,
+        head_uuid: virtual_machines(:testvm).uuid
+      }
+      authorize_with :admin
+      if formatted_link == 'link_json'
+        post :create, link: link.to_json
+      else
+        post :create, link: link
+      end
+      assert_response :success
+      assert_not_nil assigns(:object)
+      assert_equal 'testusername', assigns(:object).properties['username']
+      assert_equal false, assigns(:object).properties.has_key?(:username)
+    end
+  end
+
+  %w(created_at modified_at).each do |attr|
+    {nil: nil, bogus: 2.days.ago}.each do |bogustype, bogusvalue|
+      test "cannot set #{bogustype} #{attr} in create" do
+        authorize_with :active
+        post :create, {
+          link: {
+            properties: {},
+            link_class: 'test',
+            name: 'test',
+          }.merge(attr => bogusvalue)
+        }
+        assert_response :success
+        resp = JSON.parse @response.body
+        assert_in_delta Time.now, Time.parse(resp[attr]), 3.0
+      end
+      test "cannot set #{bogustype} #{attr} in update" do
+        really_created_at = links(:test_timestamps).created_at
+        authorize_with :active
+        put :update, {
+          id: links(:test_timestamps).uuid,
+          link: {
+            :properties => {test: 'test'},
+            attr => bogusvalue
+          }
+        }
+        assert_response :success
+        resp = JSON.parse @response.body
+        case attr
+        when 'created_at'
+          assert_in_delta really_created_at, Time.parse(resp[attr]), 0.001
+        else
+          assert_in_delta Time.now, Time.parse(resp[attr]), 3.0
+        end
+      end
+    end
+  end
+
+  test "head must exist" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      tail_uuid: users(:active).uuid,
+      head_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "tail must exist" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "head and tail exist, head_kind and tail_kind are returned" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: users(:spectator).uuid,
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response :success
+    l = JSON.parse(@response.body)
+    assert 'arvados#user', l['head_kind']
+    assert 'arvados#user', l['tail_kind']
+  end
+
+  test "can supply head_kind and tail_kind without error" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: users(:spectator).uuid,
+      head_kind: "arvados#user",
+      tail_kind: "arvados#user",
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response :success
+    l = JSON.parse(@response.body)
+    assert 'arvados#user', l['head_kind']
+    assert 'arvados#user', l['tail_kind']
+  end
+
+  test "tail must be visible by user" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: authorized_keys(:admin).uuid,
+    }
+    authorize_with :active
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "filter links with 'is_a' operator" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_uuid', 'is_a', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
+  end
+
+  test "filter links with 'is_a' operator includes remote objects" do
+    authorize_with :admin
+    get :index, {
+      filters: [
+        ['tail_uuid', 'is_a', 'arvados#user'],
+        ['link_class', '=', 'permission'],
+        ['name', '=', 'can_read'],
+        ['head_uuid', '=', collections(:foo_file).uuid],
+      ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_includes(found.map(&:tail_uuid),
+                    users(:federated_active).uuid)
+  end
+
+  test "filter links with 'is_a' operator with more than one" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_uuid', 'is_a', ['arvados#user', 'arvados#group'] ] ],
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f|
+                                 f.tail_uuid.match User.uuid_regex or
+                                 f.tail_uuid.match Group.uuid_regex
+                               }).count
+  end
+
+  test "filter links with 'is_a' operator with bogus type" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_uuid', 'is_a', ['arvados#bogus'] ] ],
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_equal 0, found.count
+  end
+
+  test "filter links with 'is_a' operator with collection" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['head_uuid', 'is_a', ['arvados#collection'] ] ],
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match Collection.uuid_regex}).count
+  end
+
+  test "test can still use where tail_kind" do
+    authorize_with :admin
+    get :index, {
+      where: { tail_kind: 'arvados#user' }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
+  end
+
+  test "test can still use where head_kind" do
+    authorize_with :admin
+    get :index, {
+      where: { head_kind: 'arvados#user' }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count
+  end
+
+  test "test can still use filter tail_kind" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_kind', '=', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
+  end
+
+  test "test can still use filter head_kind" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['head_kind', '=', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count
+  end
+
+  test "head_kind matches head_uuid" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: groups(:public).uuid,
+      head_kind: "arvados#user",
+      tail_uuid: users(:spectator).uuid,
+      tail_kind: "arvados#user",
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "tail_kind matches tail_uuid" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      head_kind: "arvados#user",
+      tail_uuid: groups(:public).uuid,
+      tail_kind: "arvados#user",
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "test with virtual_machine" do
+    link = {
+      tail_kind: "arvados#user",
+      tail_uuid: users(:active).uuid,
+      head_kind: "arvados#virtual_machine",
+      head_uuid: virtual_machines(:testvm).uuid,
+      link_class: "permission",
+      name: "can_login",
+      properties: {username: "repo_and_user_name"}
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "test with virtualMachine" do
+    link = {
+      tail_kind: "arvados#user",
+      tail_uuid: users(:active).uuid,
+      head_kind: "arvados#virtualMachine",
+      head_uuid: virtual_machines(:testvm).uuid,
+      link_class: "permission",
+      name: "can_login",
+      properties: {username: "repo_and_user_name"}
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response :success
+  end
+
+  test "project owner can show a project permission" do
+    uuid = links(:project_viewer_can_read_project).uuid
+    authorize_with :active
+    get :show, id: uuid
+    assert_response :success
+    assert_equal(uuid, assigns(:object).andand.uuid)
+  end
+
+  test "admin can show a project permission" do
+    uuid = links(:project_viewer_can_read_project).uuid
+    authorize_with :admin
+    get :show, id: uuid
+    assert_response :success
+    assert_equal(uuid, assigns(:object).andand.uuid)
+  end
+
+  test "project viewer can't show others' project permissions" do
+    authorize_with :project_viewer
+    get :show, id: links(:admin_can_write_aproject).uuid
+    assert_response 404
+  end
+
+  test "requesting a nonexistent link returns 404" do
+    authorize_with :active
+    get :show, id: 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+    assert_response 404
+  end
+
+  # not implemented
+  skip "retrieve all permissions using generic links index api" do
+    # Links.readable_by() does not return the full set of permission
+    # links that are visible to a user (i.e., all permission links
+    # whose head_uuid references an object for which the user has
+    # ownership or can_manage permission). Therefore, neither does
+    # /arvados/v1/links.
+    #
+    # It is possible to retrieve the full set of permissions for a
+    # single object via /arvados/v1/permissions.
+    authorize_with :active
+    get :index, filters: [['link_class', '=', 'permission'],
+                          ['head_uuid', '=', groups(:aproject).uuid]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map(&:uuid),
+                    links(:project_viewer_can_read_project).uuid)
+  end
+
+  test "admin can index project permissions" do
+    authorize_with :admin
+    get :index, filters: [['link_class', '=', 'permission'],
+                          ['head_uuid', '=', groups(:aproject).uuid]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map(&:uuid),
+                    links(:project_viewer_can_read_project).uuid)
+  end
+
+  test "project viewer can't index others' project permissions" do
+    authorize_with :project_viewer
+    get :index, filters: [['link_class', '=', 'permission'],
+                          ['head_uuid', '=', groups(:aproject).uuid],
+                          ['tail_uuid', '!=', users(:project_viewer).uuid]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_empty assigns(:objects)
+  end
+
+  # Granting permissions.
+  test "grant can_read on project to other users in group" do
+    authorize_with :user_foo_in_sharing_group
+
+    refute users(:user_bar_in_sharing_group).can?(read: collections(:collection_owned_by_foo).uuid)
+
+    post :create, {
+      link: {
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:collection_owned_by_foo).uuid,
+      }
+    }
+    assert_response :success
+    assert users(:user_bar_in_sharing_group).can?(read: collections(:collection_owned_by_foo).uuid)
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/logs_controller_test.rb b/services/api/test/functional/arvados/v1/logs_controller_test.rb
new file mode 100644 (file)
index 0000000..49fda47
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::LogsControllerTest < ActionController::TestCase
+  fixtures :logs
+
+  test "non-admins can create their own logs" do
+    authorize_with :active
+    post :create, log: {summary: 'test log'}
+    assert_response :success
+    resp = assigns(:object)
+    assert_not_nil resp.uuid
+    assert_equal('test log', resp.summary, "loaded wrong log after creation")
+  end
+
+  test "non-admins can read their own logs" do
+    authorize_with :active
+    my_log = logs(:log_owned_by_active)
+    get :show, {id: my_log[:uuid]}
+    assert_response(:success, "failed to get log")
+    resp = assigns(:object)
+    assert_equal(my_log[:summary], resp.summary, "got wrong log")
+  end
+
+  test "test can still use where object_kind" do
+    authorize_with :admin
+    get :index, {
+      where: { object_kind: 'arvados#user' }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count
+    l = JSON.parse(@response.body)
+    assert_equal 'arvados#user', l['items'][0]['object_kind']
+  end
+
+  test "test can still use filter object_kind" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['object_kind', '=', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/nodes_controller_test.rb b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
new file mode 100644 (file)
index 0000000..dc8b3ac
--- /dev/null
@@ -0,0 +1,271 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::NodesControllerTest < ActionController::TestCase
+
+  test "should get index with ping_secret" do
+    authorize_with :admin
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    node_items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, node_items.size
+    assert_not_nil node_items[0]['info'].andand['ping_secret']
+  end
+
+  # inactive user does not see any nodes
+  test "inactive user should get empty index" do
+    authorize_with :inactive
+    get :index
+    assert_response :success
+    assert_equal 0, json_response['items'].size
+    assert_equal 0, json_response['items_available']
+  end
+
+  # active user sees non-secret attributes of up and recently-up nodes
+  test "active user should get non-empty index with no ping_secret" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert_operator 0, :<, json_response['items_available']
+    node_items = json_response['items']
+    assert_operator 0, :<, node_items.size
+    found_busy_node = false
+    node_items.each do |node|
+      assert_nil node['info'].andand['ping_secret']
+      assert_not_nil node['crunch_worker_state']
+      if node['uuid'] == nodes(:busy).uuid
+        found_busy_node = true
+        assert_equal 'busy', node['crunch_worker_state']
+      end
+    end
+    assert_equal true, found_busy_node
+  end
+
+  test "node should ping with ping_secret and no token" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+      instance_id: 'i-0000000',
+      local_ipv4: '172.17.2.174',
+      ping_secret: '69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0'
+    }
+    assert_response :success
+    response = JSON.parse(@response.body)
+    assert_equal 'zzzzz-7ekkf-2z3mc76g2q73aio', response['uuid']
+    # Ensure we are getting the "superuser" attributes, too
+    assert_not_nil response['first_ping_at'], '"first_ping_at" attr missing'
+    assert_not_nil response['info'], '"info" attr missing'
+    assert_not_nil response['nameservers'], '"nameservers" attr missing'
+  end
+
+  test "node should fail ping with invalid ping_secret" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+      instance_id: 'i-0000000',
+      local_ipv4: '172.17.2.174',
+      ping_secret: 'dricrha4lcpi23pd69e44soanc069udawxvn3zzj45hs8bumvn'
+    }
+    assert_response 401
+  end
+
+  test "create node" do
+    authorize_with :admin
+    post :create, {node: {}}
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_not_nil json_response['info'].is_a? Hash
+    assert_not_nil json_response['info']['ping_secret']
+    assert_nil json_response['slot_number']
+    assert_nil json_response['hostname']
+  end
+
+  test "create node and assign slot" do
+    authorize_with :admin
+    post :create, {node: {}, assign_slot: true}
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_not_nil json_response['info'].is_a? Hash
+    assert_not_nil json_response['info']['ping_secret']
+    assert_operator 0, :<, json_response['slot_number']
+    n = json_response['slot_number']
+    assert_equal "compute#{n}", json_response['hostname']
+
+    node = Node.where(uuid: json_response['uuid']).first
+    assert_equal n, node.slot_number
+    assert_equal "compute#{n}", node.hostname
+  end
+
+  test "update node and assign slot" do
+    authorize_with :admin
+    node = nodes(:new_with_no_hostname)
+    post :update, {id: node.uuid, node: {}, assign_slot: true}
+    assert_response :success
+    assert_operator 0, :<, json_response['slot_number']
+    n = json_response['slot_number']
+    assert_equal "compute#{n}", json_response['hostname']
+
+    node.reload
+    assert_equal n, node.slot_number
+    assert_equal "compute#{n}", node.hostname
+  end
+
+  test "update node and assign slot, don't clobber hostname" do
+    authorize_with :admin
+    node = nodes(:new_with_custom_hostname)
+    post :update, {id: node.uuid, node: {}, assign_slot: true}
+    assert_response :success
+    assert_operator 0, :<, json_response['slot_number']
+    n = json_response['slot_number']
+    assert_equal "custom1", json_response['hostname']
+  end
+
+  test "ping adds node stats to info" do
+    authorize_with :admin
+    node = nodes(:idle)
+    post :ping, {
+      id: node.uuid,
+      ping_secret: node.info['ping_secret'],
+      total_cpu_cores: 32,
+      total_ram_mb: 1024,
+      total_scratch_mb: 2048
+    }
+    assert_response :success
+    info = JSON.parse(@response.body)['info']
+    properties = JSON.parse(@response.body)['properties']
+    assert_equal(node.info['ping_secret'], info['ping_secret'])
+    assert_equal(32, properties['total_cpu_cores'].to_i)
+    assert_equal(1024, properties['total_ram_mb'].to_i)
+    assert_equal(2048, properties['total_scratch_mb'].to_i)
+  end
+
+  test "active user can see their assigned job" do
+    authorize_with :active
+    get :show, {id: nodes(:busy).uuid}
+    assert_response :success
+    assert_equal(jobs(:nearly_finished_job).uuid, json_response["job_uuid"])
+  end
+
+  test "user without job read permission can't see job" do
+    authorize_with :spectator
+    get :show, {id: nodes(:busy).uuid}
+    assert_response :success
+    assert_nil(json_response["job"], "spectator can see node's assigned job")
+  end
+
+  [:admin, :spectator].each do |user|
+    test "select param does not break node list for #{user}" do
+      authorize_with user
+      get :index, {select: ['domain']}
+      assert_response :success
+      assert_operator 0, :<, json_response['items_available']
+    end
+  end
+
+  test "admin can associate a job with a node" do
+    changed_node = nodes(:idle)
+    assigned_job = jobs(:queued)
+    authorize_with :admin
+    post :update, {
+      id: changed_node.uuid,
+      node: {job_uuid: assigned_job.uuid},
+    }
+    assert_response :success
+    assert_equal(changed_node.hostname, json_response["hostname"],
+                 "hostname mismatch after defining job")
+    assert_equal(assigned_job.uuid, json_response["job_uuid"],
+                 "mismatch in node's assigned job UUID")
+  end
+
+  test "non-admin can't associate a job with a node" do
+    authorize_with :active
+    post :update, {
+      id: nodes(:idle).uuid,
+      node: {job_uuid: jobs(:queued).uuid},
+    }
+    assert_response 403
+  end
+
+  test "admin can unassign a job from a node" do
+    changed_node = nodes(:busy)
+    authorize_with :admin
+    post :update, {
+      id: changed_node.uuid,
+      node: {job_uuid: nil},
+    }
+    assert_response :success
+    assert_equal(changed_node.hostname, json_response["hostname"],
+                 "hostname mismatch after defining job")
+    assert_nil(json_response["job_uuid"],
+               "node still has job assignment after update")
+  end
+
+  test "non-admin can't unassign a job from a node" do
+    authorize_with :project_viewer
+    post :update, {
+      id: nodes(:busy).uuid,
+      node: {job_uuid: nil},
+    }
+    assert_response 403
+  end
+
+  test "job readable after updating other attributes" do
+    authorize_with :admin
+    post :update, {
+      id: nodes(:busy).uuid,
+      node: {last_ping_at: 1.second.ago},
+    }
+    assert_response :success
+    assert_equal(jobs(:nearly_finished_job).uuid, json_response["job_uuid"],
+                 "mismatched job UUID after ping update")
+  end
+
+  test "node should fail ping with invalid hostname config format" do
+    Rails.configuration.assign_node_hostname = 'compute%<slot_number>04'  # should end with "04d"
+    post :ping, {
+      id: nodes(:new_with_no_hostname).uuid,
+      ping_secret: nodes(:new_with_no_hostname).info['ping_secret'],
+    }
+    assert_response 422
+  end
+
+  test "first ping should set ip addr using local_ipv4 when provided" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-nodenoipaddryet',
+      instance_id: 'i-0000000',
+      local_ipv4: '172.17.2.172',
+      ping_secret: 'abcdyefg4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2'
+    }
+    assert_response :success
+    response = JSON.parse(@response.body)
+    assert_equal 'zzzzz-7ekkf-nodenoipaddryet', response['uuid']
+    assert_equal '172.17.2.172', response['ip_address']
+  end
+
+  test "first ping should set ip addr using remote_ip when local_ipv4 is not provided" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-nodenoipaddryet',
+      instance_id: 'i-0000000',
+      ping_secret: 'abcdyefg4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2'
+    }
+    assert_response :success
+    response = JSON.parse(@response.body)
+    assert_equal 'zzzzz-7ekkf-nodenoipaddryet', response['uuid']
+    assert_equal request.remote_ip, response['ip_address']
+  end
+
+  test "future pings should not change previous ip address" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+      instance_id: 'i-0000000',
+      local_ipv4: '172.17.2.175',
+      ping_secret: '69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0'
+    }
+    assert_response :success
+    response = JSON.parse(@response.body)
+    assert_equal 'zzzzz-7ekkf-2z3mc76g2q73aio', response['uuid']
+    assert_equal '172.17.2.174', response['ip_address']   # original ip address is not overwritten
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb b/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb
new file mode 100644 (file)
index 0000000..e6b4bfe
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::PipelineInstancesControllerTest < ActionController::TestCase
+
+  test 'create pipeline with components copied from template' do
+    authorize_with :active
+    post :create, {
+      pipeline_instance: {
+        pipeline_template_uuid: pipeline_templates(:two_part).uuid
+      }
+    }
+    assert_response :success
+    assert_equal(pipeline_templates(:two_part).components.to_json,
+                 assigns(:object).components.to_json)
+  end
+
+  test 'create pipeline with no template' do
+    authorize_with :active
+    post :create, {
+      pipeline_instance: {
+        components: {}
+      }
+    }
+    assert_response :success
+    assert_equal({}, assigns(:object).components)
+  end
+
+  [
+    true,
+    false
+  ].each do |cascade|
+    test "cancel a pipeline instance with cascade=#{cascade}" do
+      authorize_with :active
+      pi_uuid = pipeline_instances(:job_child_pipeline_with_components_at_level_2).uuid
+
+      post :cancel, {id: pi_uuid, cascade: cascade}
+      assert_response :success
+
+      pi = PipelineInstance.where(uuid: pi_uuid).first
+      assert_equal "Paused", pi.state
+
+      children = Job.where(uuid: ['zzzzz-8i9sb-job1atlevel3noc', 'zzzzz-8i9sb-job2atlevel3noc'])
+      children.each do |child|
+        assert_equal ("Cancelled" == child.state), cascade
+      end
+    end
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb b/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb
new file mode 100644 (file)
index 0000000..992749c
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::PipelineTemplatesControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/query_test.rb b/services/api/test/functional/arvados/v1/query_test.rb
new file mode 100644 (file)
index 0000000..2931a38
--- /dev/null
@@ -0,0 +1,101 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::QueryTest < ActionController::TestCase
+  test 'no fallback orders when order is unambiguous' do
+    @controller = Arvados::V1::LogsController.new
+    authorize_with :active
+    get :index, {
+      order: ['id asc'],
+      controller: 'logs',
+    }
+    assert_response :success
+    assert_equal ['logs.id asc'], assigns(:objects).order_values
+  end
+
+  test 'fallback orders when order is ambiguous' do
+    @controller = Arvados::V1::LogsController.new
+    authorize_with :active
+    get :index, {
+      order: ['event_type asc'],
+      controller: 'logs',
+    }
+    assert_response :success
+    assert_equal('logs.event_type asc, logs.modified_at desc, logs.uuid',
+                 assigns(:objects).order_values.join(', '))
+  end
+
+  test 'skip fallback orders already given by client' do
+    @controller = Arvados::V1::LogsController.new
+    authorize_with :active
+    get :index, {
+      order: ['modified_at asc'],
+      controller: 'logs',
+    }
+    assert_response :success
+    assert_equal('logs.modified_at asc, logs.uuid',
+                 assigns(:objects).order_values.join(', '))
+  end
+
+  test 'eliminate superfluous orders' do
+    @controller = Arvados::V1::LogsController.new
+    authorize_with :active
+    get :index, {
+      order: ['logs.modified_at asc',
+              'modified_at desc',
+              'event_type desc',
+              'logs.event_type asc'],
+      controller: 'logs',
+    }
+    assert_response :success
+    assert_equal('logs.modified_at asc, logs.event_type desc, logs.uuid',
+                 assigns(:objects).order_values.join(', '))
+  end
+
+  test 'eliminate orders after the first unique column' do
+    @controller = Arvados::V1::LogsController.new
+    authorize_with :active
+    get :index, {
+      order: ['event_type asc',
+              'id asc',
+              'uuid asc',
+              'modified_at desc'],
+      controller: 'logs',
+    }
+    assert_response :success
+    assert_equal('logs.event_type asc, logs.id asc',
+                 assigns(:objects).order_values.join(', '))
+  end
+
+  test 'do not count items_available if count=none' do
+    @controller = Arvados::V1::LinksController.new
+    authorize_with :active
+    get :index, {
+      count: 'none',
+    }
+    assert_response(:success)
+    refute(json_response.has_key?('items_available'))
+  end
+
+  [{}, {count: nil}, {count: ''}, {count: 'exact'}].each do |params|
+    test "count items_available if params=#{params.inspect}" do
+      @controller = Arvados::V1::LinksController.new
+      authorize_with :active
+      get :index, params
+      assert_response(:success)
+      assert_operator(json_response['items_available'], :>, 0)
+    end
+  end
+
+  test 'error if count=bogus' do
+    @controller = Arvados::V1::LinksController.new
+    authorize_with :active
+    get :index, {
+      count: 'bogus',
+    }
+    assert_response(422)
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
new file mode 100644 (file)
index 0000000..22548b6
--- /dev/null
@@ -0,0 +1,246 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
+  test "should get_all_logins with admin token" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+  end
+
+  test "should get_all_logins with non-admin token" do
+    authorize_with :active
+    get :get_all_permissions
+    assert_response 403
+  end
+
+  test "get_all_permissions gives RW to repository owner" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    ok = false
+    json_response['repositories'].each do |repo|
+      if repo['uuid'] == repositories(:repository2).uuid
+        if repo['user_permissions'][users(:active).uuid]['can_write']
+          ok = true
+        end
+      end
+    end
+    assert_equal(true, ok,
+                 "No permission on own repo '@{repositories(:repository2).uuid}'")
+  end
+
+  test "get_all_permissions takes into account is_admin flag" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    json_response['repositories'].each do |repo|
+      assert_not_nil(repo['user_permissions'][users(:admin).uuid],
+                     "Admin user is not listed in perms for #{repo['uuid']}")
+      assert_equal(true,
+                   repo['user_permissions'][users(:admin).uuid]['can_write'],
+                   "Admin has no perms for #{repo['uuid']}")
+    end
+  end
+
+  test "get_all_permissions takes into account is_active flag" do
+    act_as_user users(:active) do
+      Repository.create! name: 'active/testrepo'
+    end
+    act_as_system_user do
+      u = users(:active)
+      u.is_active = false
+      u.save!
+    end
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    json_response['repositories'].each do |r|
+      r['user_permissions'].each do |user_uuid, perms|
+        refute_equal user_uuid, users(:active).uuid
+      end
+    end
+  end
+
+  test "get_all_permissions does not give any access to user without permission" do
+    viewer_uuid = users(:project_viewer).uuid
+    assert_equal(authorized_keys(:project_viewer).authorized_user_uuid,
+                 viewer_uuid,
+                 "project_viewer must have an authorized_key for this test to work")
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    readable_repos = json_response["repositories"].select do |repo|
+      repo["user_permissions"].has_key?(viewer_uuid)
+    end
+    assert_equal(["arvados"], readable_repos.map { |r| r["name"] },
+                 "project_viewer should only have permissions on public repos")
+  end
+
+  test "get_all_permissions gives gitolite R to user with read-only access" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    found_it = false
+    assert_equal(authorized_keys(:spectator).authorized_user_uuid,
+                 users(:spectator).uuid,
+                 "spectator must have an authorized_key for this test to work")
+    json_response['repositories'].each do |repo|
+      next unless repo['uuid'] == repositories(:foo).uuid
+      assert_equal('R',
+                   repo['user_permissions'][users(:spectator).uuid]['gitolite_permissions'],
+                   "spectator user should have just R access to #{repo['uuid']}")
+      found_it = true
+    end
+    assert_equal true, found_it, "spectator user does not have R on foo repo"
+  end
+
+  test "get_all_permissions provides admin and active user keys" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    [:active, :admin].each do |u|
+      assert_equal(1, json_response['user_keys'][users(u).uuid].andand.count,
+                   "expected 1 key for #{u} (#{users(u).uuid})")
+      assert_equal(json_response['user_keys'][users(u).uuid][0]['public_key'],
+                   authorized_keys(u).public_key,
+                   "response public_key does not match fixture #{u}.")
+    end
+  end
+
+  test "get_all_permissions lists all repos regardless of permissions" do
+    act_as_system_user do
+      # Create repos that could potentially be left out of the
+      # permission list by accident.
+
+      # No authorized_key, no username (this can't even be done
+      # without skipping validations)
+      r = Repository.create name: 'root/testrepo'
+      assert r.save validate: false
+
+      r = Repository.create name: 'invalid username / repo name', owner_uuid: users(:inactive).uuid
+      assert r.save validate: false
+    end
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    assert_equal(Repository.count, json_response["repositories"].size)
+  end
+
+  test "get_all_permissions lists user permissions for users with no authorized keys" do
+    authorize_with :admin
+    AuthorizedKey.destroy_all
+    get :get_all_permissions
+    assert_response :success
+    assert_equal(Repository.count, json_response["repositories"].size)
+    repos_with_perms = []
+    json_response['repositories'].each do |repo|
+      if repo['user_permissions'].any?
+        repos_with_perms << repo['uuid']
+      end
+    end
+    assert_not_empty repos_with_perms, 'permissions are missing'
+  end
+
+  # Ensure get_all_permissions correctly describes what the normal
+  # permission system would do.
+  test "get_all_permissions obeys group permissions" do
+    act_as_user system_user do
+      r = Repository.create!(name: 'admin/groupcanwrite', owner_uuid: users(:admin).uuid)
+      g = Group.create!(group_class: 'group', name: 'repo-writers')
+      u1 = users(:active)
+      u2 = users(:spectator)
+      Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_manage')
+      Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
+      Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
+
+      r = Repository.create!(name: 'admin/groupreadonly', owner_uuid: users(:admin).uuid)
+      g = Group.create!(group_class: 'group', name: 'repo-readers')
+      u1 = users(:active)
+      u2 = users(:spectator)
+      Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_read')
+      Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
+      Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
+    end
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    json_response['repositories'].each do |repo|
+      repo['user_permissions'].each do |user_uuid, perms|
+        u = User.find_by_uuid(user_uuid)
+        if perms['can_read']
+          assert u.can? read: repo['uuid']
+          assert_match(/R/, perms['gitolite_permissions'])
+        else
+          refute_match(/R/, perms['gitolite_permissions'])
+        end
+        if perms['can_write']
+          assert u.can? write: repo['uuid']
+          assert_match(/RW\+/, perms['gitolite_permissions'])
+        else
+          refute_match(/W/, perms['gitolite_permissions'])
+        end
+        if perms['can_manage']
+          assert u.can? manage: repo['uuid']
+          assert_match(/RW\+/, perms['gitolite_permissions'])
+        end
+      end
+    end
+  end
+
+  test "default index includes fetch_url" do
+    authorize_with :active
+    get(:index)
+    assert_response :success
+    assert_includes(json_response["items"].map { |r| r["fetch_url"] },
+                    "git@git.zzzzz.arvadosapi.com:active/foo.git")
+  end
+
+  [
+    {cfg: :git_repo_ssh_base, cfgval: "git@example.com:", match: %r"^git@example.com:"},
+    {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git@git.zzzzz.arvadosapi.com:"},
+    {cfg: :git_repo_ssh_base, cfgval: false, refute: /^git@/ },
+    {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"^https://example.com/"},
+    {cfg: :git_repo_https_base, cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
+    {cfg: :git_repo_https_base, cfgval: false, refute: /^http/ },
+  ].each do |expect|
+    test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
+      Rails.configuration.send expect[:cfg].to_s+"=", expect[:cfgval]
+      authorize_with :active
+      get :index
+      assert_response :success
+      assert_not_empty json_response['items']
+      json_response['items'].each do |r|
+        if expect[:refute]
+          r['clone_urls'].each do |u|
+            refute_match expect[:refute], u
+          end
+        else
+          assert((r['clone_urls'].any? do |u|
+                    expect[:match].match u
+                  end),
+                 "no match for #{expect[:match]} in #{r['clone_urls'].inspect}")
+        end
+      end
+    end
+  end
+
+  test "select push_url in index" do
+    authorize_with :active
+    get(:index, {select: ["uuid", "push_url"]})
+    assert_response :success
+    assert_includes(json_response["items"].map { |r| r["push_url"] },
+                    "git@git.zzzzz.arvadosapi.com:active/foo.git")
+  end
+
+  test "select clone_urls in index" do
+    authorize_with :active
+    get(:index, {select: ["uuid", "clone_urls"]})
+    assert_response :success
+    assert_includes(json_response["items"].map { |r| r["clone_urls"] }.flatten,
+                    "git@git.zzzzz.arvadosapi.com:active/foo.git")
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
new file mode 100644 (file)
index 0000000..53c1ed7
--- /dev/null
@@ -0,0 +1,111 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::SchemaControllerTest < ActionController::TestCase
+
+  setup do forget end
+  teardown do forget end
+  def forget
+    Rails.cache.delete 'arvados_v1_rest_discovery'
+    AppVersion.forget
+  end
+
+  test "should get fresh discovery document" do
+    MAX_SCHEMA_AGE = 60
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_equal 'discovery#restDescription', discovery_doc['kind']
+    assert_equal(true,
+                 Time.now - MAX_SCHEMA_AGE.seconds < discovery_doc['generatedAt'],
+                 "discovery document was generated >#{MAX_SCHEMA_AGE}s ago")
+  end
+
+  test "discovery document fields" do
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_includes discovery_doc, 'defaultTrashLifetime'
+    assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime
+    assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
+    assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
+    assert_match(/^unknown$/, discovery_doc['packageVersion'])
+    assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
+    assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
+    assert_equal('zzzzz', discovery_doc['uuidPrefix'])
+  end
+
+  test "discovery document overrides source_version & sourceVersion with config" do
+    Rails.configuration.source_version = 'aaa888fff'
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    # Key source_version will be replaced with sourceVersion
+    assert_equal 'aaa888fff', discovery_doc['source_version']
+    assert_equal 'aaa888fff', discovery_doc['sourceVersion']
+  end
+
+  test "discovery document overrides packageVersion with config" do
+    Rails.configuration.package_version = '1.0.0-stable'
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_equal '1.0.0-stable', discovery_doc['packageVersion']
+  end
+
+  test "empty disable_api_methods" do
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_equal('POST',
+                 discovery_doc['resources']['jobs']['methods']['create']['httpMethod'])
+  end
+
+  test "non-empty disable_api_methods" do
+    Rails.configuration.disable_api_methods =
+      ['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create']
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    ['jobs', 'pipeline_instances', 'pipeline_templates'].each do |r|
+      refute_includes(discovery_doc['resources'][r]['methods'].keys(), 'create')
+    end
+  end
+
+  test "groups contents parameters" do
+    get :index
+    assert_response :success
+
+    discovery_doc = JSON.parse(@response.body)
+
+    group_index_params = discovery_doc['resources']['groups']['methods']['index']['parameters']
+    group_contents_params = discovery_doc['resources']['groups']['methods']['contents']['parameters']
+
+    assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive']).sort
+
+    recursive_param = group_contents_params['recursive']
+    assert_equal 'boolean', recursive_param['type']
+    assert_equal false, recursive_param['required']
+    assert_equal 'query', recursive_param['location']
+  end
+
+  test "collections index parameters" do
+    get :index
+    assert_response :success
+
+    discovery_doc = JSON.parse(@response.body)
+
+    specimens_index_params = discovery_doc['resources']['specimens']['methods']['index']['parameters']  # no changes from super
+    coll_index_params = discovery_doc['resources']['collections']['methods']['index']['parameters']
+
+    assert_equal (specimens_index_params.keys + ['include_trash', 'include_old_versions']).sort, coll_index_params.keys.sort
+
+    include_trash_param = coll_index_params['include_trash']
+    assert_equal 'boolean', include_trash_param['type']
+    assert_equal false, include_trash_param['required']
+    assert_equal 'query', include_trash_param['location']
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/specimens_controller_test.rb b/services/api/test/functional/arvados/v1/specimens_controller_test.rb
new file mode 100644 (file)
index 0000000..df681e6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::SpecimensControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/traits_controller_test.rb b/services/api/test/functional/arvados/v1/traits_controller_test.rb
new file mode 100644 (file)
index 0000000..3c8d097
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::TraitsControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/user_agreements_controller_test.rb b/services/api/test/functional/arvados/v1/user_agreements_controller_test.rb
new file mode 100644 (file)
index 0000000..c1ef667
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::UserAgreementsControllerTest < ActionController::TestCase
+
+  test "active user get user agreements" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_not_nil agreements_list['items'][0]
+  end
+
+  test "active user get user agreement signatures" do
+    authorize_with :active
+    get :signatures
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_not_nil agreements_list['items'][0]
+    assert_equal 1, agreements_list['items'].count
+  end
+
+  test "inactive user get user agreements" do
+    authorize_with :inactive
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_not_nil agreements_list['items'][0]
+  end
+
+  test "uninvited user receives empty list of user agreements" do
+    authorize_with :inactive_uninvited
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_nil agreements_list['items'][0]
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
new file mode 100644 (file)
index 0000000..728a2a1
--- /dev/null
@@ -0,0 +1,1011 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/users_test_helper'
+
+class Arvados::V1::UsersControllerTest < ActionController::TestCase
+  include CurrentApiClient
+  include UsersTestHelper
+
+  setup do
+    @initial_link_count = Link.count
+    @vm_uuid = virtual_machines(:testvm).uuid
+    ActionMailer::Base.deliveries = []
+  end
+
+  test "activate a user after signing UA" do
+    authorize_with :inactive_but_signed_user_agreement
+    post :activate, id: users(:inactive_but_signed_user_agreement).uuid
+    assert_response :success
+    assert_not_nil assigns(:object)
+    me = JSON.parse(@response.body)
+    assert_equal true, me['is_active']
+  end
+
+  test "refuse to activate a user before signing UA" do
+    act_as_system_user do
+    required_uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                                system_user_uuid,
+                                'signature',
+                                'require',
+                                system_user_uuid,
+                                Collection.uuid_like_pattern).
+      collect(&:head_uuid)
+
+      assert required_uuids.length > 0
+
+      signed_uuids = Link.where(owner_uuid: system_user_uuid,
+                                link_class: 'signature',
+                                name: 'click',
+                                tail_uuid: users(:inactive).uuid,
+                                head_uuid: required_uuids).
+                          collect(&:head_uuid)
+
+      assert_equal 0, signed_uuids.length
+    end
+
+    authorize_with :inactive
+    assert_equal false, users(:inactive).is_active
+
+    post :activate, id: users(:inactive).uuid
+    assert_response 403
+
+    resp = json_response
+    assert resp['errors'].first.include? 'Cannot activate without user agreements'
+    assert_nil resp['is_active']
+  end
+
+  test "activate an already-active user" do
+    authorize_with :active
+    post :activate, id: users(:active).uuid
+    assert_response :success
+    me = JSON.parse(@response.body)
+    assert_equal true, me['is_active']
+  end
+
+  test "respond 401 if given token exists but user record is missing" do
+    authorize_with :valid_token_deleted_user
+    get :current, {format: :json}
+    assert_response 401
+  end
+
+  test "create new user with user as input" do
+    authorize_with :admin
+    post :create, user: {
+      first_name: "test_first_name",
+      last_name: "test_last_name",
+      email: "foo@example.com"
+    }
+    assert_response :success
+    created = JSON.parse(@response.body)
+    assert_equal 'test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for the newly created user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+  end
+
+  test "create user with user, vm and repo as input" do
+    authorize_with :admin
+    repo_name = 'usertestrepo'
+
+    post :setup, {
+      repo_name: repo_name,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {
+        uuid: 'zzzzz-tpzed-abcdefghijklmno',
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
+    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # arvados#user, repo link and link add user to 'All users' group
+    verify_links_added 4
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        "foo/#{repo_name}", created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_system_group_permission_link_for created['uuid']
+  end
+
+  test "setup user with bogus uuid and expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      uuid: 'bogus_uuid',
+      repo_name: 'usertestrepo',
+      vm_uuid: @vm_uuid
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'Path not found'), 'Expected 404'
+  end
+
+  test "setup user with bogus uuid in user and expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {uuid: 'bogus_uuid'},
+      repo_name: 'usertestrepo',
+      vm_uuid: @vm_uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'ArgumentError: Require user email'),
+      'Expected RuntimeError'
+  end
+
+  test "setup user with no uuid and user, expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'usertestrepo',
+      vm_uuid: @vm_uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'Required uuid or user'),
+        'Expected ArgumentError'
+  end
+
+  test "setup user with no uuid and email, expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {},
+      repo_name: 'usertestrepo',
+      vm_uuid: @vm_uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? '<ArgumentError: Require user email'),
+        'Expected ArgumentError'
+  end
+
+  test "invoke setup with existing uuid, vm and repo and verify links" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      uuid: users(:inactive).uuid,
+      repo_name: 'usertestrepo',
+      vm_uuid: @vm_uuid
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    resp_obj = find_obj_in_resp response_items, 'User', nil
+
+    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'
+    assert_equal inactive_user['uuid'], resp_obj['uuid']
+    assert_equal inactive_user['email'], resp_obj['email'],
+        'expecting inactive user email'
+
+    # expect repo and vm links
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'inactiveuser/usertestrepo', resp_obj['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        @vm_uuid, resp_obj['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "invoke setup with existing uuid but different email, expect original email" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      uuid: inactive_user['uuid'],
+      user: {email: 'junk_email'}
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    resp_obj = find_obj_in_resp response_items, 'User', nil
+
+    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'
+    assert_equal inactive_user['uuid'], resp_obj['uuid']
+    assert_equal inactive_user['email'], resp_obj['email'],
+        'expecting inactive user email'
+  end
+
+  test "setup user with valid email and repo as input" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'usertestrepo',
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for the new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+
+    # four extra links; system_group, login, group and repo perms
+    verify_links_added 4
+  end
+
+  test "setup user with fake vm and expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'usertestrepo',
+      vm_uuid: 'no_such_vm',
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? "No vm found for no_such_vm"),
+          'Expected RuntimeError: No vm found for no_such_vm'
+  end
+
+  test "setup user with valid email, repo and real vm as input" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'usertestrepo',
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      vm_uuid: @vm_uuid,
+      user: {email: 'foo@example.com'}
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for the new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+
+    # five extra links; system_group, login, group, vm, repo
+    verify_links_added 5
+  end
+
+  test "setup user with valid email, no vm and no repo as input" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+
+    # three extra links; system_group, login, and group
+    verify_links_added 3
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        response_object['uuid'], response_object['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', response_object['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
+        'foo/usertestrepo', response_object['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, response_object['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "setup user with email, first name, repo name and vm uuid" do
+    authorize_with :admin
+
+    post :setup, {
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      repo_name: 'usertestrepo',
+      vm_uuid: @vm_uuid,
+      user: {
+        first_name: 'test_first_name',
+        email: 'foo@example.com'
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+    assert_equal 'test_first_name', response_object['first_name'],
+        'expecting first name'
+
+    # five extra links; system_group, login, group, repo and vm
+    verify_links_added 5
+  end
+
+  test "setup user with an existing user email and check different object is created" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      repo_name: 'usertestrepo',
+      user: {
+        email: inactive_user['email']
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for new user'
+    assert_not_equal response_object['uuid'], inactive_user['uuid'],
+        'expected different uuid after create operation'
+    assert_equal inactive_user['email'], response_object['email'], 'expected given email'
+    # system_group, openid, group, and repo. No vm link.
+    verify_links_added 4
+  end
+
+  test "setup user with openid prefix" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'usertestrepo',
+      openid_prefix: 'http://www.example.com/account',
+      user: {
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for new user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # verify links
+    # four new links: system_group, arvados#user, repo, and 'All users' group.
+    verify_links_added 4
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "invoke setup with no openid prefix, expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'usertestrepo',
+      user: {
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'openid_prefix parameter is missing'),
+        'Expected ArgumentError'
+  end
+
+  test "setup user with user, vm and repo and verify links" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      },
+      vm_uuid: @vm_uuid,
+      repo_name: 'usertestrepo',
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for new user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # five new links: system_group, arvados#user, repo, vm and 'All
+    # users' group link
+    verify_links_added 5
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        @vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "create user as non admin user and expect error" do
+    authorize_with :active
+
+    post :create, {
+      user: {email: 'foo@example.com'}
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'PermissionDenied'),
+          'Expected PermissionDeniedError'
+  end
+
+  test "setup user as non admin user and expect error" do
+    authorize_with :active
+
+    post :setup, {
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {email: 'foo@example.com'}
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'Forbidden'),
+          'Expected Forbidden error'
+  end
+
+  test "setup active user with repo and no vm" do
+    authorize_with :admin
+    active_user = users(:active)
+
+    # invoke setup with a repository
+    post :setup, {
+      repo_name: 'usertestrepo',
+      uuid: active_user['uuid']
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal active_user[:email], created['email'], 'expected input email'
+
+     # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'active/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "setup active user with vm and no repo" do
+    authorize_with :admin
+    active_user = users(:active)
+    repos_query = Repository.where(owner_uuid: active_user.uuid)
+    repo_link_query = Link.where(tail_uuid: active_user.uuid,
+                                 link_class: "permission", name: "can_manage")
+    repos_count = repos_query.count
+    repo_link_count = repo_link_query.count
+
+    # invoke setup with a repository
+    post :setup, {
+      vm_uuid: @vm_uuid,
+      uuid: active_user['uuid'],
+      email: 'junk_email'
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal active_user['email'], created['email'], 'expected original email'
+
+    # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    assert_equal(repos_count, repos_query.count)
+    assert_equal(repo_link_count, repo_link_query.count)
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        @vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "unsetup active user" do
+    active_user = users(:active)
+    assert_not_nil active_user['uuid'], 'expected uuid for the active user'
+    assert active_user['is_active'], 'expected is_active for active user'
+
+    verify_link_existence active_user['uuid'], active_user['email'],
+          false, true, true, true, true
+
+    authorize_with :admin
+
+    # now unsetup this user
+    post :unsetup, id: active_user['uuid']
+    assert_response :success
+
+    response_user = JSON.parse(@response.body)
+    assert_not_nil response_user['uuid'], 'expected uuid for the upsetup user'
+    assert_equal active_user['uuid'], response_user['uuid'], 'expected uuid not found'
+    assert !response_user['is_active'], 'expected user to be inactive'
+    assert !response_user['is_invited'], 'expected user to be uninvited'
+
+    verify_link_existence response_user['uuid'], response_user['email'],
+          false, false, false, false, false
+
+    active_user = User.find_by_uuid(users(:active).uuid)
+    readable_groups = active_user.groups_i_can(:read)
+    all_users_group = Group.all.collect(&:uuid).select { |g| g.match(/-f+$/) }
+    refute_includes(readable_groups, all_users_group,
+                    "active user can read All Users group after being deactivated")
+    assert_equal(false, active_user.is_invited,
+                 "active user is_invited after being deactivated & reloaded")
+  end
+
+  test "setup user with send notification param false and verify no email" do
+    authorize_with :admin
+
+    post :setup, {
+      openid_prefix: 'http://www.example.com/account',
+      send_notification_email: 'false',
+      user: {
+        email: "foo@example.com"
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil created['uuid'], 'expected uuid for the new user'
+    assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+    setup_email = ActionMailer::Base.deliveries.last
+    assert_nil setup_email, 'expected no setup email'
+  end
+
+  test "setup user with send notification param true and verify email" do
+    authorize_with :admin
+
+    post :setup, {
+      openid_prefix: 'http://www.example.com/account',
+      send_notification_email: 'true',
+      user: {
+        email: "foo@example.com"
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil created['uuid'], 'expected uuid for the new user'
+    assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+    setup_email = ActionMailer::Base.deliveries.last
+    assert_not_nil setup_email, 'Expected email after setup'
+
+    assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
+    assert_equal 'foo@example.com', setup_email.to[0]
+    assert_equal 'Welcome to Arvados - shell account enabled', setup_email.subject
+    assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'),
+        'Expected Your Arvados shell account has been set up in email body'
+    assert (setup_email.body.to_s.include? "#{Rails.configuration.workbench_address}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
+  end
+
+  test "setup inactive user by changing is_active to true" do
+    authorize_with :admin
+    active_user = users(:active)
+
+    # invoke setup with a repository
+    put :update, {
+          id: active_user['uuid'],
+          user: {
+            is_active: true,
+          }
+        }
+    assert_response :success
+    assert_equal active_user['uuid'], json_response['uuid']
+    updated = User.where(uuid: active_user['uuid']).first
+    assert_equal(true, updated.is_active)
+    assert_equal({read: true}, updated.group_permissions[all_users_group_uuid])
+  end
+
+  test "non-admin user can get basic information about readable users" do
+    authorize_with :spectator
+    get(:index)
+    check_non_admin_index
+    check_readable_users_index [:spectator], [:inactive, :active]
+  end
+
+  test "non-admin user gets only safe attributes from users#show" do
+    g = act_as_system_user do
+      create :group
+    end
+    users = create_list :active_user, 2, join_groups: [g]
+    token = create :token, user: users[0]
+    authorize_with_token token
+    get :show, id: users[1].uuid
+    check_non_admin_show
+  end
+
+  [2, 4].each do |limit|
+    test "non-admin user can limit index to #{limit}" do
+      g = act_as_system_user do
+        create :group
+      end
+      users = create_list :active_user, 4, join_groups: [g]
+      token = create :token, user: users[0]
+
+      authorize_with_token token
+      get(:index, limit: limit)
+      check_non_admin_index
+      assert_equal(limit, json_response["items"].size,
+                   "non-admin index limit was ineffective")
+    end
+  end
+
+  test "admin has full index powers" do
+    authorize_with :admin
+    check_inactive_user_findable
+  end
+
+  test "reader token can grant admin index powers" do
+    authorize_with :spectator
+    check_inactive_user_findable(reader_tokens: [api_token(:admin)])
+  end
+
+  test "admin can filter on user.is_active" do
+    authorize_with :admin
+    get(:index, filters: [["is_active", "=", "true"]])
+    assert_response :success
+    check_readable_users_index [:active, :spectator], [:inactive]
+  end
+
+  test "admin can search where user.is_active" do
+    authorize_with :admin
+    get(:index, where: {is_active: true})
+    assert_response :success
+    check_readable_users_index [:active, :spectator], [:inactive]
+  end
+
+  test "update active_no_prefs user profile and expect notification email" do
+    authorize_with :admin
+
+    put :update, {
+      id: users(:active_no_prefs).uuid,
+      user: {
+        prefs: {:profile => {'organization' => 'example.com'}}
+      }
+    }
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject == "Profile created by #{users(:active_no_prefs).email}"
+        found_email = true
+        break
+      end
+    end
+    assert_equal true, found_email, 'Expected email after creating profile'
+  end
+
+  test "update active_no_prefs_profile user profile and expect notification email" do
+    authorize_with :admin
+
+    user = {}
+    user[:prefs] = users(:active_no_prefs_profile_no_getting_started_shown).prefs
+    user[:prefs][:profile] = {:profile => {'organization' => 'example.com'}}
+    put :update, {
+      id: users(:active_no_prefs_profile_no_getting_started_shown).uuid,
+      user: user
+    }
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject == "Profile created by #{users(:active_no_prefs_profile_no_getting_started_shown).email}"
+        found_email = true
+        break
+      end
+    end
+    assert_equal true, found_email, 'Expected email after creating profile'
+  end
+
+  test "update active user profile and expect no notification email" do
+    authorize_with :admin
+
+    put :update, {
+      id: users(:active).uuid,
+      user: {
+        prefs: {:profile => {'organization' => 'anotherexample.com'}}
+      }
+    }
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject == "Profile created by #{users(:active).email}"
+        found_email = true
+        break
+      end
+    end
+    assert_equal false, found_email, 'Expected no email after updating profile'
+  end
+
+  test "user API response includes writable_by" do
+    authorize_with :active
+    get :current
+    assert_response :success
+    assert_includes(json_response["writable_by"], users(:active).uuid,
+                    "user's writable_by should include self")
+    assert_includes(json_response["writable_by"], users(:active).owner_uuid,
+                    "user's writable_by should include its owner_uuid")
+  end
+
+  [
+    [:admin, true],
+    [:active, false],
+  ].each do |auth_user, expect_success|
+    test "update_uuid as #{auth_user}" do
+      authorize_with auth_user
+      orig_uuid = users(:active).uuid
+      post :update_uuid, {
+             id: orig_uuid,
+             new_uuid: 'zbbbb-tpzed-abcde12345abcde',
+           }
+      if expect_success
+        assert_response :success
+        assert_empty User.where(uuid: orig_uuid)
+      else
+        assert_response 403
+        assert_not_empty User.where(uuid: orig_uuid)
+      end
+    end
+  end
+
+  test "refuse to merge with redirect_to_user_uuid=false (not yet supported)" do
+    authorize_with :project_viewer_trustedclient
+    post :merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: false,
+         }
+    assert_response(422)
+  end
+
+  test "refuse to merge user into self" do
+    authorize_with(:active_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+  end
+
+  [[:active, :project_viewer_trustedclient],
+   [:active_trustedclient, :project_viewer]].each do |src, dst|
+    test "refuse to merge with untrusted token (#{src} -> #{dst})" do
+      authorize_with(src)
+      post(:merge, {
+             new_user_token: api_client_authorizations(dst).api_token,
+             new_owner_uuid: api_client_authorizations(dst).user.uuid,
+             redirect_to_new_user: true,
+           })
+      assert_response(403)
+    end
+  end
+
+  [[:expired_trustedclient, :project_viewer_trustedclient],
+   [:project_viewer_trustedclient, :expired_trustedclient]].each do |src, dst|
+    test "refuse to merge with expired token (#{src} -> #{dst})" do
+      authorize_with(src)
+      post(:merge, {
+             new_user_token: api_client_authorizations(dst).api_token,
+             new_owner_uuid: api_client_authorizations(dst).user.uuid,
+             redirect_to_new_user: true,
+           })
+      assert_response(401)
+    end
+  end
+
+  [['src', :active_trustedclient],
+   ['dst', :project_viewer_trustedclient]].each do |which_scoped, auth|
+    test "refuse to merge with scoped #{which_scoped} token" do
+      act_as_system_user do
+        api_client_authorizations(auth).update_attributes(scopes: ["GET /", "POST /", "PUT /"])
+      end
+      authorize_with(:active_trustedclient)
+      post(:merge, {
+             new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,
+             new_owner_uuid: users(:project_viewer).uuid,
+             redirect_to_new_user: true,
+           })
+      assert_response(403)
+    end
+  end
+
+  test "refuse to merge if new_owner_uuid is not writable" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: groups(:anonymously_accessible_project).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(403)
+  end
+
+  test "refuse to merge if new_owner_uuid is empty" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: "",
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+  end
+
+  test "refuse to merge if new_owner_uuid is not provided" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           redirect_to_new_user: true,
+         })
+    assert_response(422)
+  end
+
+  test "refuse to update redirect_to_user_uuid directly" do
+    authorize_with(:active_trustedclient)
+    patch(:update, {
+            id: users(:active).uuid,
+            user: {
+              redirect_to_user_uuid: users(:active).uuid,
+            },
+          })
+    assert_response(403)
+  end
+
+  test "merge 'project_viewer' account into 'active' account" do
+    authorize_with(:project_viewer_trustedclient)
+    post(:merge, {
+           new_user_token: api_client_authorizations(:active_trustedclient).api_token,
+           new_owner_uuid: users(:active).uuid,
+           redirect_to_new_user: true,
+         })
+    assert_response(:success)
+    assert_equal(users(:project_viewer).redirect_to_user_uuid, users(:active).uuid)
+
+    auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)
+    assert_not_nil(auth)
+    assert_not_nil(auth.user)
+    assert_equal(users(:active).uuid, auth.user.uuid)
+  end
+
+  NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
+                         "last_name", "username"].sort
+
+  def check_non_admin_index
+    assert_response :success
+    response_items = json_response["items"]
+    assert_not_nil response_items
+    response_items.each do |user_data|
+      check_non_admin_item user_data
+      assert(user_data["is_active"], "non-admin index returned inactive user")
+    end
+  end
+
+  def check_non_admin_show
+    assert_response :success
+    check_non_admin_item json_response
+  end
+
+  def check_non_admin_item user_data
+    assert_equal(NON_ADMIN_USER_DATA, user_data.keys.sort,
+                 "data in response had missing or extra attributes")
+    assert_equal("arvados#user", user_data["kind"])
+  end
+
+
+  def check_readable_users_index expect_present, expect_missing
+    response_uuids = json_response["items"].map { |u| u["uuid"] }
+    expect_present.each do |user_key|
+      assert_includes(response_uuids, users(user_key).uuid,
+                      "#{user_key} missing from index")
+    end
+    expect_missing.each do |user_key|
+      refute_includes(response_uuids, users(user_key).uuid,
+                      "#{user_key} included in index")
+    end
+  end
+
+  def check_inactive_user_findable(params={})
+    inactive_user = users(:inactive)
+    get(:index, params.merge(filters: [["email", "=", inactive_user.email]]))
+    assert_response :success
+    user_list = json_response["items"]
+    assert_equal(1, user_list.andand.count)
+    # This test needs to check a column non-admins have no access to,
+    # to ensure that admins see all user information.
+    assert_equal(inactive_user.identity_url, user_list.first["identity_url"],
+                 "admin's filtered index did not return inactive user")
+  end
+
+  def verify_links_added more
+    assert_equal @initial_link_count+more, Link.count,
+        "Started with #{@initial_link_count} links, expected #{more} more"
+  end
+
+  def find_obj_in_resp (response_items, object_type, head_kind=nil)
+    return_obj = nil
+    response_items.each { |x|
+      if !x
+        next
+      end
+
+      if object_type == 'User'
+        if ArvadosModel::resource_class_for_uuid(x['uuid']) == User
+          return_obj = x
+          break
+        end
+      else  # looking for a link
+        if x['head_uuid'] and ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+          return_obj = x
+          break
+        end
+      end
+    }
+    return return_obj
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb b/services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb
new file mode 100644 (file)
index 0000000..02191ec
--- /dev/null
@@ -0,0 +1,78 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class Arvados::V1::VirtualMachinesControllerTest < ActionController::TestCase
+  def get_logins_for(vm_sym)
+    authorize_with :admin
+    get(:logins, id: virtual_machines(vm_sym).uuid)
+  end
+
+  def find_login(sshkey_sym)
+    assert_response :success
+    want_key = authorized_keys(sshkey_sym).public_key
+    logins = json_response["items"].select do |login|
+      login["public_key"] == want_key
+    end
+    assert_equal(1, logins.size, "failed to find #{sshkey_sym} login")
+    logins.first
+  end
+
+  test "username propagated from permission" do
+    get_logins_for(:testvm2)
+    admin_login = find_login(:admin)
+    perm = links(:admin_can_login_to_testvm2)
+    assert_equal(perm.properties["username"], admin_login["username"])
+  end
+
+  test "groups propagated from permission" do
+    get_logins_for(:testvm2)
+    admin_login = find_login(:admin)
+    perm = links(:admin_can_login_to_testvm2)
+    assert_equal(perm.properties["groups"], admin_login["groups"])
+  end
+
+  test "groups is an empty list by default" do
+    get_logins_for(:testvm2)
+    active_login = find_login(:active)
+    assert_equal([], active_login["groups"])
+  end
+
+  test "logins without usernames not listed" do
+    get_logins_for(:testvm2)
+    assert_response :success
+    spectator_uuid = users(:spectator).uuid
+    assert_empty(json_response.
+                 select { |login| login["user_uuid"] == spectator_uuid })
+  end
+
+  test "logins without ssh keys are listed" do
+    u, vm = nil
+    act_as_system_user do
+      u = create :active_user, first_name: 'Bob', last_name: 'Blogin'
+      vm = VirtualMachine.create! hostname: 'foo.shell'
+      Link.create!(tail_uuid: u.uuid,
+                   head_uuid: vm.uuid,
+                   link_class: 'permission',
+                   name: 'can_login',
+                   properties: {'username' => 'bobblogin'})
+    end
+    authorize_with :admin
+    get :logins, id: vm.uuid
+    assert_response :success
+    assert_equal 1, json_response['items'].length
+    assert_nil json_response['items'][0]['public_key']
+    assert_nil json_response['items'][0]['authorized_key_uuid']
+    assert_equal u.uuid, json_response['items'][0]['user_uuid']
+    assert_equal 'bobblogin', json_response['items'][0]['username']
+  end
+
+  test 'get all logins' do
+    authorize_with :admin
+    get :get_all_logins
+    find_login :admin
+    find_login :active
+  end
+end
diff --git a/services/api/test/functional/database_controller_test.rb b/services/api/test/functional/database_controller_test.rb
new file mode 100644 (file)
index 0000000..ef1d0c6
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class DatabaseControllerTest < ActionController::TestCase
+  include CurrentApiClient
+
+  test "reset fails with non-admin token" do
+    authorize_with :active
+    post :reset
+    assert_response 403
+  end
+
+  test "route not found when not in test mode" do
+    authorize_with :admin
+    env_was = Rails.env
+    begin
+      Rails.env = 'production'
+      Rails.application.reload_routes!
+      assert_raises ActionController::UrlGenerationError do
+        post :reset
+      end
+    ensure
+      Rails.env = env_was
+      Rails.application.reload_routes!
+    end
+  end
+
+  test "reset fails when a non-test-fixture user exists" do
+    act_as_system_user do
+      User.create!(uuid: 'abcde-tpzed-123451234512345', email: 'bar@example.net')
+    end
+    authorize_with :admin
+    post :reset
+    assert_response 403
+  end
+
+  test "reset succeeds with admin token" do
+    new_uuid = nil
+    act_as_system_user do
+      new_uuid = Specimen.create.uuid
+    end
+    assert_not_empty Specimen.where(uuid: new_uuid)
+    authorize_with :admin
+    post :reset
+    assert_response 200
+    assert_empty Specimen.where(uuid: new_uuid)
+  end
+end
diff --git a/services/api/test/functional/user_sessions_controller_test.rb b/services/api/test/functional/user_sessions_controller_test.rb
new file mode 100644 (file)
index 0000000..e304815
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserSessionsControllerTest < ActionController::TestCase
+
+  test "new user from new api client" do
+    authorize_with :inactive
+    api_client_page = 'http://client.example.com/home'
+    get :login, return_to: api_client_page
+    assert_response :redirect
+    assert_equal(0, @response.redirect_url.index(api_client_page + '?'),
+                 'Redirect url ' + @response.redirect_url +
+                 ' should start with ' + api_client_page + '?')
+    assert_not_nil assigns(:api_client)
+  end
+
+  test "login with remote param returns a salted token" do
+    authorize_with :inactive
+    api_client_page = 'http://client.example.com/home'
+    remote_prefix = 'zbbbb'
+    get :login, return_to: api_client_page, remote: remote_prefix
+    assert_response :redirect
+    api_client_auth = assigns(:api_client_auth)
+    assert_not_nil api_client_auth
+    assert_includes(@response.redirect_url, 'api_token='+api_client_auth.salted_token(remote: remote_prefix))
+  end
+
+  test "login with malformed remote param returns an error" do
+    authorize_with :inactive
+    api_client_page = 'http://client.example.com/home'
+    remote_prefix = 'invalid_cluster_id'
+    get :login, return_to: api_client_page, remote: remote_prefix
+    assert_response 400
+  end
+end
diff --git a/services/api/test/helpers/container_test_helper.rb b/services/api/test/helpers/container_test_helper.rb
new file mode 100644 (file)
index 0000000..88de724
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ContainerTestHelper
+  def secret_string
+    'UNGU3554BL3'
+  end
+
+  def assert_no_secrets_logged
+    Log.all.map(&:properties).each do |props|
+      refute_match /secret\/6x9|#{secret_string}/, SafeJSON.dump(props)
+    end
+  end
+end
diff --git a/services/api/test/helpers/docker_migration_helper.rb b/services/api/test/helpers/docker_migration_helper.rb
new file mode 100644 (file)
index 0000000..b6fed3d
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module DockerMigrationHelper
+  include CurrentApiClient
+
+  def add_docker19_migration_link
+    act_as_system_user do
+      assert(Link.create!(owner_uuid: system_user_uuid,
+                          link_class: 'docker_image_migration',
+                          name: 'migrate_1.9_1.10',
+                          tail_uuid: collections(:docker_image).portable_data_hash,
+                          head_uuid: collections(:docker_image_1_12).portable_data_hash))
+    end
+  end
+end
diff --git a/services/api/test/helpers/git_test_helper.rb b/services/api/test/helpers/git_test_helper.rb
new file mode 100644 (file)
index 0000000..170b59e
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'fileutils'
+require 'tmpdir'
+
+# Commit log for "foo" repository in test.git.tar
+# master is the main branch
+# b1 is a branch off of master
+# tag1 is a tag
+#
+# 1de84a8 * b1
+# 077ba2a * master
+# 4fe459a * tag1
+# 31ce37f * foo
+
+module GitTestHelper
+  def self.included base
+    base.setup do
+      # Extract the test repository data into the default test
+      # environment's Rails.configuration.git_repositories_dir. (We
+      # don't use that config setting here, though: it doesn't seem
+      # worth the risk of stepping on a real git repo root.)
+      @tmpdir = Rails.root.join 'tmp', 'git'
+      FileUtils.mkdir_p @tmpdir
+      system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
+      Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
+      Rails.configuration.git_internal_dir = "#{@tmpdir}/internal.git"
+    end
+
+    base.teardown do
+      FileUtils.remove_entry Commit.cache_dir_base, true
+      FileUtils.mkdir_p @tmpdir
+      system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
+    end
+  end
+
+  def internal_tag tag
+    IO.read "|git --git-dir #{Rails.configuration.git_internal_dir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
+  end
+
+  # Intercept fetch_remote_repository and fetch from a specified url
+  # or local fixture instead of the remote url requested. fakeurl can
+  # be a url (probably starting with file:///) or the name of a
+  # fixture (as a symbol)
+  def fetch_remote_from_local_repo url, fakeurl
+    if fakeurl.is_a? Symbol
+      fakeurl = 'file://' + repositories(fakeurl).server_path
+    end
+    Commit.expects(:fetch_remote_repository).once.with do |gitdir, giturl|
+      if giturl == url
+        Commit.unstub(:fetch_remote_repository)
+        Commit.fetch_remote_repository gitdir, fakeurl
+        true
+      end
+    end
+  end
+end
diff --git a/services/api/test/helpers/manifest_examples.rb b/services/api/test/helpers/manifest_examples.rb
new file mode 100644 (file)
index 0000000..396dd7f
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ManifestExamples
+  def make_manifest opts={}
+    opts = {
+      bytes_per_block: 1,
+      blocks_per_file: 1,
+      files_per_stream: 1,
+      streams: 1,
+    }.merge(opts)
+    datablip = "x" * opts[:bytes_per_block]
+    locator = Blob.sign_locator(Digest::MD5.hexdigest(datablip) +
+                                '+' + datablip.length.to_s,
+                                api_token: opts[:api_token])
+    filesize = datablip.length * opts[:blocks_per_file]
+    txt = ''
+    (1..opts[:streams]).each do |s|
+      streamtoken = "./stream#{s}"
+      streamsize = 0
+      blocktokens = []
+      filetokens = []
+      (1..opts[:files_per_stream]).each do |f|
+        filetokens << "#{streamsize}:#{filesize}:file#{f}.txt"
+        (1..opts[:blocks_per_file]).each do |b|
+          blocktokens << locator
+        end
+        streamsize += filesize
+      end
+      txt << ([streamtoken] + blocktokens + filetokens).join(' ') + "\n"
+    end
+    txt
+  end
+end
diff --git a/services/api/test/helpers/time_block.rb b/services/api/test/helpers/time_block.rb
new file mode 100644 (file)
index 0000000..6c444e2
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class ActiveSupport::TestCase
+  def time_block label
+    t0 = Time.now
+    begin
+      yield
+    ensure
+      t1 = Time.now
+      $stderr.puts "#{t1 - t0}s #{label}"
+    end
+  end
+
+  def vmpeak c
+    open("/proc/self/status").each_line do |line|
+      print "Begin #{c} #{line}" if (line =~ /^VmHWM:/)
+    end
+    n = yield
+    open("/proc/self/status").each_line do |line|
+      print "End #{c} #{line}" if (line =~ /^VmHWM:/)
+    end
+    n
+  end
+
+end
diff --git a/services/api/test/helpers/users_test_helper.rb b/services/api/test/helpers/users_test_helper.rb
new file mode 100644 (file)
index 0000000..585619e
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module UsersTestHelper
+  def verify_link(response_items, link_object_name, expect_link, link_class,
+        link_name, head_uuid, tail_uuid, head_kind, fetch_object, class_name)
+    link = find_obj_in_resp response_items, 'arvados#link', link_object_name
+
+    if !expect_link
+      assert_nil link, "Expected no link for #{link_object_name}"
+      return
+    end
+
+    assert_not_nil link, "Expected link for #{link_object_name}"
+
+    if fetch_object
+      object = Object.const_get(class_name).where(name: head_uuid)
+      assert [] != object, "expected #{class_name} with name #{head_uuid}"
+      head_uuid = object.first[:uuid]
+    end
+    assert_equal link_class, link['link_class'],
+        "did not find expected link_class for #{link_object_name}"
+
+    assert_equal link_name, link['name'],
+        "did not find expected link_name for #{link_object_name}"
+
+    assert_equal tail_uuid, link['tail_uuid'],
+        "did not find expected tail_uuid for #{link_object_name}"
+
+    assert_equal head_kind, link['head_kind'],
+        "did not find expected head_kind for #{link_object_name}"
+
+    assert_equal head_uuid, link['head_uuid'],
+        "did not find expected head_uuid for #{link_object_name}"
+  end
+
+  def verify_system_group_permission_link_for user_uuid
+    assert_equal 1, Link.where(link_class: 'permission',
+                               name: 'can_manage',
+                               tail_uuid: system_group_uuid,
+                               head_uuid: user_uuid).count
+  end
+
+  def verify_link_existence uuid, email, expect_oid_login_perms,
+      expect_repo_perms, expect_vm_perms, expect_group_perms, expect_signatures
+    # verify that all links are deleted for the user
+    oid_login_perms = Link.where(tail_uuid: email,
+                                 link_class: 'permission',
+                                 name: 'can_login').where("head_uuid like ?", User.uuid_like_pattern)
+    if expect_oid_login_perms
+      assert oid_login_perms.any?, "expected oid_login_perms"
+    else
+      assert !oid_login_perms.any?, "expected all oid_login_perms deleted"
+    end
+
+    repo_perms = Link.where(tail_uuid: uuid,
+                            link_class: 'permission',
+                            name: 'can_manage').where("head_uuid like ?", Repository.uuid_like_pattern)
+    if expect_repo_perms
+      assert repo_perms.any?, "expected repo_perms"
+    else
+      assert !repo_perms.any?, "expected all repo_perms deleted"
+    end
+
+    vm_login_perms = Link.
+      where(tail_uuid: uuid,
+            link_class: 'permission',
+            name: 'can_login').
+      where("head_uuid like ?",
+            VirtualMachine.uuid_like_pattern).
+      where('uuid <> ?',
+            links(:auto_setup_vm_login_username_can_login_to_test_vm).uuid)
+    if expect_vm_perms
+      assert vm_login_perms.any?, "expected vm_login_perms"
+    else
+      assert !vm_login_perms.any?, "expected all vm_login_perms deleted"
+    end
+
+    group = Group.where(name: 'All users').select do |g|
+      g[:uuid].match(/-f+$/)
+    end.first
+    group_read_perms = Link.where(tail_uuid: uuid,
+                                  head_uuid: group[:uuid],
+                                  link_class: 'permission',
+                                  name: 'can_read')
+    if expect_group_perms
+      assert group_read_perms.any?, "expected all users group read perms"
+    else
+      assert !group_read_perms.any?, "expected all users group perm deleted"
+    end
+
+    signed_uuids = Link.where(link_class: 'signature',
+                              tail_uuid: uuid)
+
+    if expect_signatures
+      assert signed_uuids.any?, "expected signatures"
+    else
+      assert !signed_uuids.any?, "expected all signatures deleted"
+    end
+
+  end
+
+end
diff --git a/services/api/test/integration/.gitkeep b/services/api/test/integration/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/integration/api_client_authorizations_api_test.rb b/services/api/test/integration/api_client_authorizations_api_test.rb
new file mode 100644 (file)
index 0000000..e42a4ef
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "create system auth" do
+    post "/arvados/v1/api_client_authorizations/create_system_auth", {:format => :json, :scopes => ['test'].to_json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
+    assert_response :success
+  end
+
+  test "create token for different user" do
+    post "/arvados/v1/api_client_authorizations", {
+      :format => :json,
+      :api_client_authorization => {
+        :owner_uuid => users(:spectator).uuid
+      }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
+    assert_response :success
+
+    get "/arvados/v1/users/current", {
+      :format => :json
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{json_response['api_token']}"}
+    @json_response = nil
+    assert_equal users(:spectator).uuid, json_response['uuid']
+  end
+
+  test "refuse to create token for different user if not trusted client" do
+    post "/arvados/v1/api_client_authorizations", {
+      :format => :json,
+      :api_client_authorization => {
+        :owner_uuid => users(:spectator).uuid
+      }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
+    assert_response 403
+  end
+
+  test "refuse to create token for different user if not admin" do
+    post "/arvados/v1/api_client_authorizations", {
+      :format => :json,
+      :api_client_authorization => {
+        :owner_uuid => users(:spectator).uuid
+      }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active_trustedclient).api_token}"}
+    assert_response 403
+  end
+
+end
diff --git a/services/api/test/integration/api_client_authorizations_scopes_test.rb b/services/api/test/integration/api_client_authorizations_scopes_test.rb
new file mode 100644 (file)
index 0000000..dfb5749
--- /dev/null
@@ -0,0 +1,105 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# The v1 API uses token scopes to control access to the REST API at the path
+# level.  This is enforced in the base ApplicationController, making it a
+# functional test that we can run against many different controllers.
+
+require 'test_helper'
+
+class ApiTokensScopeTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  def v1_url(*parts)
+    (['', 'arvados', 'v1'] + parts).join('/')
+  end
+
+  test "user list token can only list users" do
+    get_args = [{}, auth(:active_userlist)]
+    get(v1_url('users'), *get_args)
+    assert_response :success
+    get(v1_url('users', ''), *get_args)  # Add trailing slash.
+    assert_response :success
+    get(v1_url('users', 'current'), *get_args)
+    assert_response 403
+    get(v1_url('virtual_machines'), *get_args)
+    assert_response 403
+  end
+
+  test "narrow + wide scoped tokens for different users" do
+    get_args = [{
+                  reader_tokens: [api_client_authorizations(:anonymous).api_token]
+                }, auth(:active_userlist)]
+    get(v1_url('users'), *get_args)
+    assert_response :success
+    get(v1_url('users', ''), *get_args)  # Add trailing slash.
+    assert_response :success
+    get(v1_url('users', 'current'), *get_args)
+    assert_response 403
+    get(v1_url('virtual_machines'), *get_args)
+    assert_response 403
+   end
+
+  test "specimens token can see exactly owned specimens" do
+    get_args = [{}, auth(:active_specimens)]
+    get(v1_url('specimens'), *get_args)
+    assert_response 403
+    get(v1_url('specimens', specimens(:owned_by_active_user).uuid), *get_args)
+    assert_response :success
+    head(v1_url('specimens', specimens(:owned_by_active_user).uuid), *get_args)
+    assert_response :success
+    get(v1_url('specimens', specimens(:owned_by_spectator).uuid), *get_args)
+    assert_includes(403..404, @response.status)
+  end
+
+  test "token with multiple scopes can use them all" do
+    def get_token_count
+      get(v1_url('api_client_authorizations'), {}, auth(:active_apitokens))
+      assert_response :success
+      token_count = JSON.parse(@response.body)['items_available']
+      assert_not_nil(token_count, "could not find token count")
+      token_count
+    end
+    # Test the GET scope.
+    token_count = get_token_count
+    # Test the POST scope.
+    post(v1_url('api_client_authorizations'),
+         {api_client_authorization: {user_id: users(:active).id}},
+         auth(:active_apitokens))
+    assert_response :success
+    assert_equal(token_count + 1, get_token_count,
+                 "token count suggests POST was not accepted")
+    # Test other requests are denied.
+    get(v1_url('api_client_authorizations',
+               api_client_authorizations(:active_apitokens).uuid),
+        {}, auth(:active_apitokens))
+    assert_response 403
+  end
+
+  test "token without scope has no access" do
+    # Logs are good for this test, because logs have relatively
+    # few access controls enforced at the model level.
+    req_args = [{}, auth(:admin_noscope)]
+    get(v1_url('logs'), *req_args)
+    assert_response 403
+    get(v1_url('logs', logs(:noop).uuid), *req_args)
+    assert_response 403
+    post(v1_url('logs'), *req_args)
+    assert_response 403
+  end
+
+  test "VM login scopes work" do
+    # A system administration script makes an API token with limited scope
+    # for virtual machines to let it see logins.
+    def vm_logins_url(name)
+      v1_url('virtual_machines', virtual_machines(name).uuid, 'logins')
+    end
+    get_args = [{}, auth(:admin_vm)]
+    get(vm_logins_url(:testvm), *get_args)
+    assert_response :success
+    get(vm_logins_url(:testvm2), *get_args)
+    assert_includes(400..419, @response.status,
+                    "getting testvm2 logins should have failed")
+  end
+end
diff --git a/services/api/test/integration/collections_api_test.rb b/services/api/test/integration/collections_api_test.rb
new file mode 100644 (file)
index 0000000..ac1ca66
--- /dev/null
@@ -0,0 +1,327 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CollectionsApiTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "should get index" do
+    get "/arvados/v1/collections", {:format => :json}, auth(:active)
+    assert_response :success
+    assert_equal "arvados#collectionList", json_response['kind']
+  end
+
+  test "get index with filters= (empty string)" do
+    get "/arvados/v1/collections", {:format => :json, :filters => ''}, auth(:active)
+    assert_response :success
+    assert_equal "arvados#collectionList", json_response['kind']
+  end
+
+  test "get index with invalid filters (array of strings) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => ['uuid', '=', 'ad02e37b6a7f45bbe2ead3c29a109b8a+54'].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match(/nvalid element.*not an array/, json_response['errors'].join(' '))
+  end
+
+  test "get index with invalid filters (unsearchable column) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => [['this_column_does_not_exist', '=', 'bogus']].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match(/nvalid attribute/, json_response['errors'].join(' '))
+  end
+
+  test "get index with invalid filters (invalid operator) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => [['uuid', ':-(', 'displeased']].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match(/nvalid operator/, json_response['errors'].join(' '))
+  end
+
+  test "get index with invalid filters (invalid operand type) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => [['uuid', '=', {foo: 'bar'}]].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match(/nvalid operand type/, json_response['errors'].join(' '))
+  end
+
+  test "get index with where= (empty string)" do
+    get "/arvados/v1/collections", {:format => :json, :where => ''}, auth(:active)
+    assert_response :success
+    assert_equal "arvados#collectionList", json_response['kind']
+  end
+
+  test "get index with select= (valid attribute)" do
+    get "/arvados/v1/collections", {
+          :format => :json,
+          :select => ['portable_data_hash'].to_json
+        }, auth(:active)
+    assert_response :success
+    assert json_response['items'][0].keys.include?('portable_data_hash')
+    assert not(json_response['items'][0].keys.include?('uuid'))
+  end
+
+  test "get index with select= (invalid attribute) responds 422" do
+    get "/arvados/v1/collections", {
+          :format => :json,
+          :select => ['bogus'].to_json
+        }, auth(:active)
+    assert_response 422
+    assert_match(/Invalid attribute.*bogus/, json_response['errors'].join(' '))
+  end
+
+  test "get index with select= (invalid attribute type) responds 422" do
+    get "/arvados/v1/collections", {
+          :format => :json,
+          :select => [['bogus']].to_json
+        }, auth(:active)
+    assert_response 422
+    assert_match(/Invalid attribute.*bogus/, json_response['errors'].join(' '))
+  end
+
+  test "controller 404 response is json" do
+    get "/arvados/v1/thingsthatdonotexist", {:format => :xml}, auth(:active)
+    assert_response 404
+    assert_equal 1, json_response['errors'].length
+    assert_equal true, json_response['errors'][0].is_a?(String)
+  end
+
+  test "object 404 response is json" do
+    get "/arvados/v1/groups/zzzzz-j7d0g-o5ba971173cup4f", {}, auth(:active)
+    assert_response 404
+    assert_equal 1, json_response['errors'].length
+    assert_equal true, json_response['errors'][0].is_a?(String)
+  end
+
+  test "store collection as json" do
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
+                                       signing_opts)
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: "{\"manifest_text\":\". #{signed_locator} 0:44:md5sum.txt\\n\",\"portable_data_hash\":\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\"}"
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+  end
+
+  test "store collection with manifest_text only" do
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
+                                       signing_opts)
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: "{\"manifest_text\":\". #{signed_locator} 0:44:md5sum.txt\\n\"}"
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+  end
+
+  test "store collection then update name" do
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
+                                       signing_opts)
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: "{\"manifest_text\":\". #{signed_locator} 0:44:md5sum.txt\\n\",\"portable_data_hash\":\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\"}"
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+
+    put "/arvados/v1/collections/#{json_response['uuid']}", {
+      format: :json,
+      collection: { name: "a name" }
+    }, auth(:active)
+
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+    assert_equal 'a name', json_response['name']
+
+    get "/arvados/v1/collections/#{json_response['uuid']}", {
+      format: :json,
+    }, auth(:active)
+
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+    assert_equal 'a name', json_response['name']
+  end
+
+  test "update description for a collection, and search for that description" do
+    collection = collections(:multilevel_collection_1)
+
+    # update collection's description
+    put "/arvados/v1/collections/#{collection['uuid']}", {
+      format: :json,
+      collection: { description: "something specific" }
+    }, auth(:active)
+    assert_response :success
+    assert_equal 'something specific', json_response['description']
+
+    # get the collection and verify newly added description
+    get "/arvados/v1/collections/#{collection['uuid']}", {
+      format: :json,
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'something specific', json_response['description']
+
+    # search
+    search_using_filter 'specific', 1
+    search_using_filter 'not specific enough', 0
+  end
+
+  test "create collection, update manifest, and search with filename" do
+    # create collection
+    signed_manifest = Collection.sign_manifest(". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\n", api_token(:active))
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: {manifest_text: signed_manifest}.to_json,
+    }, auth(:active)
+    assert_response :success
+    assert_equal true, json_response['manifest_text'].include?('my_test_file.txt')
+    assert_includes json_response['manifest_text'], 'my_test_file.txt'
+
+    created = json_response
+
+    # search using the filename
+    search_using_filter 'my_test_file.txt', 1
+
+    # update the collection's manifest text
+    signed_manifest = Collection.sign_manifest(". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_updated_test_file.txt\n", api_token(:active))
+    put "/arvados/v1/collections/#{created['uuid']}", {
+      format: :json,
+      collection: {manifest_text: signed_manifest}.to_json,
+    }, auth(:active)
+    assert_response :success
+    assert_equal created['uuid'], json_response['uuid']
+    assert_includes json_response['manifest_text'], 'my_updated_test_file.txt'
+    assert_not_includes json_response['manifest_text'], 'my_test_file.txt'
+
+    # search using the new filename
+    search_using_filter 'my_updated_test_file.txt', 1
+    search_using_filter 'my_test_file.txt', 0
+    search_using_filter 'there_is_no_such_file.txt', 0
+  end
+
+  def search_using_filter search_filter, expected_items
+    get '/arvados/v1/collections', {
+      :filters => [['any', 'ilike', "%#{search_filter}%"]].to_json
+    }, auth(:active)
+    assert_response :success
+    response_items = json_response['items']
+    assert_not_nil response_items
+    if expected_items == 0
+      assert_empty response_items
+    else
+      refute_empty response_items
+      first_item = response_items.first
+      assert_not_nil first_item
+    end
+  end
+
+  test "search collection using full text search" do
+    # create collection to be searched for
+    signed_manifest = Collection.sign_manifest(". 85877ca2d7e05498dd3d109baf2df106+95+A3a4e26a366ee7e4ed3e476ccf05354761be2e4ae@545a9920 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file3_in_subdir4.txt 32:32:file4_in_subdir4.txt\n", api_token(:active))
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: {description: 'specific collection description', manifest_text: signed_manifest}.to_json,
+    }, auth(:active)
+    assert_response :success
+    assert_equal true, json_response['manifest_text'].include?('file4_in_subdir4.txt')
+
+    # search using the filename
+    search_using_full_text_search 'subdir2', 0
+    search_using_full_text_search 'subdir2:*', 1
+    search_using_full_text_search 'subdir2/subdir3/subdir4', 1
+    search_using_full_text_search 'file4:*', 1
+    search_using_full_text_search 'file4_in_subdir4.txt', 1
+    search_using_full_text_search 'subdir2 file4:*', 0      # first word is incomplete
+    search_using_full_text_search 'subdir2/subdir3/subdir4 file4:*', 1
+    search_using_full_text_search 'subdir2/subdir3/subdir4 file4_in_subdir4.txt', 1
+    search_using_full_text_search 'ile4', 0                 # not a prefix match
+  end
+
+  def search_using_full_text_search search_filter, expected_items
+    get '/arvados/v1/collections', {
+      :filters => [['any', '@@', search_filter]].to_json
+    }, auth(:active)
+    assert_response :success
+    response_items = json_response['items']
+    assert_not_nil response_items
+    if expected_items == 0
+      assert_empty response_items
+    else
+      refute_empty response_items
+      first_item = response_items.first
+      assert_not_nil first_item
+    end
+  end
+
+  # search for the filename in the file_names column and expect error
+  test "full text search not supported for individual columns" do
+    get '/arvados/v1/collections', {
+      :filters => [['name', '@@', 'General']].to_json
+    }, auth(:active)
+    assert_response 422
+  end
+
+  [
+    'quick fox',
+    'quick_brown fox',
+    'brown_ fox',
+    'fox dogs',
+  ].each do |search_filter|
+    test "full text search ignores special characters and finds with filter #{search_filter}" do
+      # description: The quick_brown_fox jumps over the lazy_dog
+      # full text search treats '_' as space apparently
+      get '/arvados/v1/collections', {
+        :filters => [['any', '@@', search_filter]].to_json
+      }, auth(:active)
+      assert_response 200
+      response_items = json_response['items']
+      assert_not_nil response_items
+      first_item = response_items.first
+      refute_empty first_item
+      assert_equal first_item['description'], 'The quick_brown_fox jumps over the lazy_dog'
+    end
+  end
+
+  test "create and get collection with properties" do
+    # create collection to be searched for
+    signed_manifest = Collection.sign_manifest(". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\n", api_token(:active))
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: {manifest_text: signed_manifest}.to_json,
+    }, auth(:active)
+    assert_response 200
+    assert_not_nil json_response['uuid']
+    assert_not_nil json_response['properties']
+    assert_empty json_response['properties']
+
+    # update collection's description
+    put "/arvados/v1/collections/#{json_response['uuid']}", {
+      format: :json,
+      collection: { properties: {'property_1' => 'value_1'} }
+    }, auth(:active)
+    assert_response :success
+    assert_equal 'value_1', json_response['properties']['property_1']
+  end
+end
diff --git a/services/api/test/integration/collections_performance_test.rb b/services/api/test/integration/collections_performance_test.rb
new file mode 100644 (file)
index 0000000..3d13d14
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'safe_json'
+require 'test_helper'
+require 'helpers/manifest_examples'
+require 'helpers/time_block'
+
+class CollectionsApiPerformanceTest < ActionDispatch::IntegrationTest
+  include ManifestExamples
+
+  slow_test "crud cycle for a collection with a big manifest" do
+    bigmanifest = time_block 'make example' do
+      make_manifest(streams: 100,
+                    files_per_stream: 100,
+                    blocks_per_file: 20,
+                    bytes_per_block: 2**26,
+                    api_token: api_token(:active))
+    end
+    json = time_block "JSON encode #{bigmanifest.length>>20}MiB manifest" do
+      SafeJSON.dump({"manifest_text" => bigmanifest})
+    end
+    time_block 'create' do
+      post '/arvados/v1/collections', {collection: json}, auth(:active)
+      assert_response :success
+    end
+    uuid = json_response['uuid']
+    time_block 'read' do
+      get '/arvados/v1/collections/' + uuid, {}, auth(:active)
+      assert_response :success
+    end
+    time_block 'list' do
+      get '/arvados/v1/collections', {select: ['manifest_text'], filters: [['uuid', '=', uuid]].to_json}, auth(:active)
+      assert_response :success
+    end
+    time_block 'update' do
+      put '/arvados/v1/collections/' + uuid, {collection: json}, auth(:active)
+      assert_response :success
+    end
+    time_block 'delete' do
+      delete '/arvados/v1/collections/' + uuid, {}, auth(:active)
+    end
+  end
+
+  slow_test "memory usage" do
+    hugemanifest = make_manifest(streams: 1,
+                                 files_per_stream: 2000,
+                                 blocks_per_file: 200,
+                                 bytes_per_block: 2**26,
+                                 api_token: api_token(:active))
+    json = time_block "JSON encode #{hugemanifest.length>>20}MiB manifest" do
+      SafeJSON.dump({manifest_text: hugemanifest})
+    end
+    vmpeak "post" do
+      post '/arvados/v1/collections', {collection: json}, auth(:active)
+    end
+  end
+end
diff --git a/services/api/test/integration/container_auth_test.rb b/services/api/test/integration/container_auth_test.rb
new file mode 100644 (file)
index 0000000..552cce4
--- /dev/null
@@ -0,0 +1,65 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ContainerAuthTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "container token validate, Running, regular auth" do
+    get "/arvados/v1/containers/current", {
+      :format => :json
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}"}
+    # Container is Running, token can be used
+    assert_response :success
+    assert_equal containers(:running).uuid, json_response['uuid']
+  end
+
+  test "container token validate, Locked, runtime_token" do
+    get "/arvados/v1/containers/current", {
+      :format => :json
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:container_runtime_token).token}/#{containers(:runtime_token).uuid}"}
+    # Container is Running, token can be used
+    assert_response :success
+    assert_equal containers(:runtime_token).uuid, json_response['uuid']
+  end
+
+  test "container token validate, Cancelled, runtime_token" do
+    put "/arvados/v1/containers/#{containers(:runtime_token).uuid}", {
+          :format => :json,
+          :container => {:state => "Cancelled"}
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:dispatch1).token}"}
+    assert_response :success
+    get "/arvados/v1/containers/current", {
+      :format => :json
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:container_runtime_token).token}/#{containers(:runtime_token).uuid}"}
+    # Container is Queued, token cannot be used
+    assert_response 401
+  end
+
+  test "container token validate, Running, without optional portion" do
+    get "/arvados/v1/containers/current", {
+      :format => :json
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:running_container_auth).token}"}
+    # Container is Running, token can be used
+    assert_response :success
+    assert_equal containers(:running).uuid, json_response['uuid']
+  end
+
+  test "container token validate, Locked, runtime_token, without optional portion" do
+    get "/arvados/v1/containers/current", {
+      :format => :json
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:container_runtime_token).token}"}
+    # runtime_token without container uuid won't return 'current'
+    assert_response 404
+  end
+
+  test "container token validate, wrong container uuid" do
+    get "/arvados/v1/containers/current", {
+      :format => :json
+        }, {'HTTP_AUTHORIZATION' => "Bearer #{api_client_authorizations(:container_runtime_token).token}/#{containers(:running).uuid}"}
+    # Container uuid mismatch, token can't be used
+    assert_response 401
+  end
+end
diff --git a/services/api/test/integration/cross_origin_test.rb b/services/api/test/integration/cross_origin_test.rb
new file mode 100644 (file)
index 0000000..5109ea4
--- /dev/null
@@ -0,0 +1,80 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CrossOriginTest < ActionDispatch::IntegrationTest
+  def options *args
+    # Rails doesn't support OPTIONS the same way as GET, POST, etc.
+    reset! unless integration_session
+    integration_session.__send__(:process, :options, *args).tap do
+      copy_session_variables!
+    end
+  end
+
+  %w(/login /logout /auth/example/callback /auth/joshid).each do |path|
+    test "OPTIONS requests are refused at #{path}" do
+      options path, {}, {}
+      assert_no_cors_headers
+    end
+
+    test "CORS headers do not exist at GET #{path}" do
+      get path, {}, {}
+      assert_no_cors_headers
+    end
+  end
+
+  %w(/discovery/v1/apis/arvados/v1/rest).each do |path|
+    test "CORS headers are set at GET #{path}" do
+      get path, {}, {}
+      assert_response :success
+      assert_cors_headers
+    end
+  end
+
+  ['/arvados/v1/collections',
+   '/arvados/v1/users',
+   '/arvados/v1/api_client_authorizations'].each do |path|
+    test "CORS headers are set and body is empty at OPTIONS #{path}" do
+      options path, {}, {}
+      assert_response :success
+      assert_cors_headers
+      assert_equal '', response.body
+    end
+
+    test "CORS headers are set at authenticated GET #{path}" do
+      get path, {}, auth(:active_trustedclient)
+      assert_response :success
+      assert_cors_headers
+    end
+
+    # CORS headers are OK only if cookies are *not* used to determine
+    # whether a transaction is allowed. The following is a (far from
+    # perfect) test that the usual Rails cookie->session mechanism
+    # does not grant access to any resources.
+    ['GET', 'POST'].each do |method|
+      test "Session does not work at #{method} #{path}" do
+        send method.downcase, path, {format: 'json'}, {user_id: 1}
+        assert_response 401
+        assert_cors_headers
+      end
+    end
+  end
+
+  protected
+  def assert_cors_headers
+    assert_equal '*', response.headers['Access-Control-Allow-Origin']
+    allowed = response.headers['Access-Control-Allow-Methods'].split(', ')
+    %w(GET HEAD POST PUT DELETE).each do |m|
+      assert_includes allowed, m, "A-C-A-Methods should include #{m}"
+    end
+    assert_equal 'Authorization, Content-Type', response.headers['Access-Control-Allow-Headers']
+  end
+
+  def assert_no_cors_headers
+    response.headers.keys.each do |h|
+      assert_no_match(/^Access-Control-/i, h)
+    end
+  end
+end
diff --git a/services/api/test/integration/crunch_dispatch_test.rb b/services/api/test/integration/crunch_dispatch_test.rb
new file mode 100644 (file)
index 0000000..dc215f2
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class CrunchDispatchIntegrationTest < ActionDispatch::IntegrationTest
+  include GitTestHelper
+
+  fixtures :all
+
+  @@crunch_dispatch_pid = nil
+
+  def launch_crunch_dispatch
+    @@crunch_dispatch_pid = Process.fork {
+      ENV['PATH'] = ENV['HOME'] + '/arvados/services/crunch:' + ENV['PATH']
+      exec(ENV['HOME'] + '/arvados/services/api/script/crunch-dispatch.rb')
+    }
+  end
+
+  teardown do
+    if @@crunch_dispatch_pid
+      Process.kill "TERM", @@crunch_dispatch_pid
+      Process.wait
+      @@crunch_dispatch_pid = nil
+    end
+  end
+
+  test "job runs" do
+    post "/arvados/v1/jobs", {
+      format: "json",
+      job: {
+        script: "log",
+        repository: "active/crunchdispatchtest",
+        script_version: "f35f99b7d32bac257f5989df02b9f12ee1a9b0d6",
+        script_parameters: "{}"
+      }
+    }, auth(:admin)
+    assert_response :success
+  end
+end
diff --git a/services/api/test/integration/database_reset_test.rb b/services/api/test/integration/database_reset_test.rb
new file mode 100644 (file)
index 0000000..430474e
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class DatabaseResetTest < ActionDispatch::IntegrationTest
+  slow_test "reset fails when Rails.env != 'test'" do
+    rails_env_was = Rails.env
+    begin
+      Rails.env = 'production'
+      Rails.application.reload_routes!
+      post '/database/reset', {}, auth(:admin)
+      assert_response 404
+    ensure
+      Rails.env = rails_env_was
+      Rails.application.reload_routes!
+    end
+  end
+
+  test "reset fails with non-admin token" do
+    post '/database/reset', {}, auth(:active)
+    assert_response 403
+  end
+
+  slow_test "database reset doesn't break basic CRUD operations" do
+    active_auth = auth(:active)
+    admin_auth = auth(:admin)
+
+    authorize_with :admin
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    post '/arvados/v1/specimens', {specimen: '{}'}, active_auth
+    assert_response :success
+    new_uuid = json_response['uuid']
+
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response :success
+
+    put('/arvados/v1/specimens/'+new_uuid,
+        {specimen: '{"properties":{}}'}, active_auth)
+    assert_response :success
+
+    delete '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response :success
+
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response 404
+  end
+
+  slow_test "roll back database change" do
+    active_auth = auth(:active)
+    admin_auth = auth(:admin)
+
+    old_uuid = specimens(:owned_by_active_user).uuid
+    authorize_with :admin
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    delete '/arvados/v1/specimens/' + old_uuid, {}, active_auth
+    assert_response :success
+    post '/arvados/v1/specimens', {specimen: '{}'}, active_auth
+    assert_response :success
+    new_uuid = json_response['uuid']
+
+    # Reset to fixtures.
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    # New specimen should disappear. Old specimen should reappear.
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response 404
+    get '/arvados/v1/specimens/'+old_uuid, {}, active_auth
+    assert_response :success
+  end
+end
diff --git a/services/api/test/integration/errors_test.rb b/services/api/test/integration/errors_test.rb
new file mode 100644 (file)
index 0000000..1424558
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ErrorsTest < ActionDispatch::IntegrationTest
+  fixtures :api_client_authorizations
+
+  %w(/arvados/v1/shoes /arvados/shoes /shoes /nodes /users).each do |path|
+    test "non-existent route #{path}" do
+      get path, {:format => :json}, auth(:active)
+      assert_nil assigns(:objects)
+      assert_nil assigns(:object)
+      assert_not_nil json_response['errors']
+      assert_response 404
+    end
+  end
+
+  n=0
+  Rails.application.routes.routes.each do |route|
+    test "route #{n += 1} '#{route.path.spec.to_s}' is not an accident" do
+      # Generally, new routes should appear under /arvados/v1/. If
+      # they appear elsewhere, that might have been caused by default
+      # rails generator behavior that we don't want.
+      assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|database\/reset|discovery\/.*|static\/.*|themes\/.*|assets|_health\/.*)(\(\.:format\))?$/,
+                   route.path.spec.to_s,
+                   "Unexpected new route: #{route.path.spec}")
+    end
+  end
+end
diff --git a/services/api/test/integration/groups_test.rb b/services/api/test/integration/groups_test.rb
new file mode 100644 (file)
index 0000000..6b1bf79
--- /dev/null
@@ -0,0 +1,157 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class GroupsTest < ActionDispatch::IntegrationTest
+  [[], ['replication_confirmed']].each do |orders|
+    test "results are consistent when provided orders #{orders} is incomplete" do
+      last = nil
+      (0..20).each do
+        get '/arvados/v1/groups/contents', {
+          id: groups(:aproject).uuid,
+          filters: [["uuid", "is_a", "arvados#collection"]].to_json,
+          orders: orders.to_json,
+          format: :json,
+        }, auth(:active)
+        assert_response :success
+        if last.nil?
+          last = json_response['items']
+        else
+          assert_equal last, json_response['items']
+        end
+      end
+    end
+  end
+
+  test "get all pages of group-owned objects" do
+    limit = 5
+    offset = 0
+    items_available = nil
+    uuid_received = {}
+    owner_received = {}
+    while true
+      get "/arvados/v1/groups/contents", {
+        id: groups(:aproject).uuid,
+        limit: limit,
+        offset: offset,
+        format: :json,
+      }, auth(:active)
+
+      assert_response :success
+      assert_operator(0, :<, json_response['items'].count,
+                      "items_available=#{items_available} but received 0 "\
+                      "items with offset=#{offset}")
+      items_available ||= json_response['items_available']
+      assert_equal(items_available, json_response['items_available'],
+                   "items_available changed between page #{offset/limit} "\
+                   "and page #{1+offset/limit}")
+      json_response['items'].each do |item|
+        uuid = item['uuid']
+        assert_equal(nil, uuid_received[uuid],
+                     "Received '#{uuid}' again on page #{1+offset/limit}")
+        uuid_received[uuid] = true
+        owner_received[item['owner_uuid']] = true
+        offset += 1
+        assert_equal groups(:aproject).uuid, item['owner_uuid']
+      end
+      break if offset >= items_available
+    end
+  end
+
+  [
+    ['Collection_', true],            # collections and pipelines templates
+    ['hash', true],                   # pipeline templates
+    ['fa7aeb5140e2848d39b', false],   # script_parameter of pipeline instances
+    ['fa7aeb5140e2848d39b:*', true],  # script_parameter of pipeline instances
+    ['project pipeline', true],       # finds "Completed pipeline in A Project"
+    ['project pipeli:*', true],       # finds "Completed pipeline in A Project"
+    ['proje pipeli:*', false],        # first word is incomplete, so no prefix match
+    ['no-such-thing', false],         # script_parameter of pipeline instances
+  ].each do |search_filter, expect_results|
+    test "full text search of group-owned objects for #{search_filter}" do
+      get "/arvados/v1/groups/contents", {
+        id: groups(:aproject).uuid,
+        limit: 5,
+        :filters => [['any', '@@', search_filter]].to_json
+      }, auth(:active)
+      assert_response :success
+      if expect_results
+        refute_empty json_response['items']
+        json_response['items'].each do |item|
+          assert item['uuid']
+          assert_equal groups(:aproject).uuid, item['owner_uuid']
+        end
+      else
+        assert_empty json_response['items']
+      end
+    end
+  end
+
+  test "full text search is not supported for individual columns" do
+    get "/arvados/v1/groups/contents", {
+      :filters => [['name', '@@', 'Private']].to_json
+    }, auth(:active)
+    assert_response 422
+  end
+
+  test "group contents with include trash collections" do
+    get "/arvados/v1/groups/contents", {
+      include_trash: "true",
+      filters: [["uuid", "is_a", "arvados#collection"]].to_json,
+      limit: 1000
+    }, auth(:active)
+    assert_response 200
+
+    coll_uuids = []
+    json_response['items'].each { |c| coll_uuids << c['uuid'] }
+    assert_includes coll_uuids, collections(:foo_collection_in_aproject).uuid
+    assert_includes coll_uuids, collections(:expired_collection).uuid
+  end
+
+  test "group contents without trash collections" do
+    get "/arvados/v1/groups/contents", {
+      filters: [["uuid", "is_a", "arvados#collection"]].to_json,
+      limit: 1000
+    }, auth(:active)
+    assert_response 200
+
+    coll_uuids = []
+    json_response['items'].each { |c| coll_uuids << c['uuid'] }
+    assert_includes coll_uuids, collections(:foo_collection_in_aproject).uuid
+    assert_not_includes coll_uuids, collections(:expired_collection).uuid
+  end
+
+  test "create request with async=true defers permissions update" do
+    Rails.configuration.async_permissions_update_interval = 1 # seconds
+    name = "Random group #{rand(1000)}"
+    assert_equal nil, Group.find_by_name(name)
+    post "/arvados/v1/groups", {
+      group: {
+        name: name
+      },
+      async: true
+    }, auth(:active)
+    assert_response 202
+    g = Group.find_by_name(name)
+    assert_not_nil g
+    get "/arvados/v1/groups", {
+      filters: [["name", "=", name]].to_json,
+      limit: 10
+    }, auth(:active)
+    assert_response 200
+    assert_equal 0, json_response['items_available']
+
+    # Unblock the thread doing the permissions update
+    ActiveRecord::Base.clear_active_connections!
+
+    sleep(3)
+    get "/arvados/v1/groups", {
+      filters: [["name", "=", name]].to_json,
+      limit: 10
+    }, auth(:active)
+    assert_response 200
+    assert_equal 1, json_response['items_available']
+  end
+end
diff --git a/services/api/test/integration/jobs_api_test.rb b/services/api/test/integration/jobs_api_test.rb
new file mode 100644 (file)
index 0000000..b8b338f
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobsApiTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "cancel job" do
+    post "/arvados/v1/jobs/#{jobs(:running).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+    assert_response :success
+    assert_equal "arvados#job", json_response['kind']
+    assert_not_nil json_response['cancelled_at']
+  end
+
+  test "cancel someone else's visible job" do
+    post "/arvados/v1/jobs/#{jobs(:runningbarbaz).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
+    assert_response 403
+  end
+
+  test "cancel someone else's invisible job" do
+    post "/arvados/v1/jobs/#{jobs(:running).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
+    assert_response 404
+  end
+
+  test "task qsequence values automatically increase monotonically" do
+    post_args = ["/arvados/v1/job_tasks",
+                 {job_task: {
+                     job_uuid: jobs(:running).uuid,
+                     sequence: 1,
+                   }},
+                 auth(:active)]
+    last_qsequence = -1
+    (1..3).each do |task_num|
+      @response = nil
+      post(*post_args)
+      assert_response :success
+      qsequence = json_response["qsequence"]
+      assert_not_nil(qsequence, "task not assigned qsequence")
+      assert_operator(qsequence, :>, last_qsequence,
+                      "qsequence did not increase between tasks")
+      last_qsequence = qsequence
+    end
+  end
+end
diff --git a/services/api/test/integration/keep_proxy_test.rb b/services/api/test/integration/keep_proxy_test.rb
new file mode 100644 (file)
index 0000000..8c286ea
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class KeepProxyTest < ActionDispatch::IntegrationTest
+  test "request keep disks" do
+    get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active)
+    assert_response :success
+    services = json_response['items']
+
+    assert_operator 2, :<=, services.length
+    services.each do |service|
+      assert_equal 'disk', service['service_type']
+    end
+  end
+
+  test "request keep proxy" do
+    get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active).merge({'HTTP_X_EXTERNAL_CLIENT' => '1'})
+    assert_response :success
+    services = json_response['items']
+
+    assert_equal 1, services.length
+
+    assert_equal keep_services(:proxy).uuid, services[0]['uuid']
+    assert_equal keep_services(:proxy).service_host, services[0]['service_host']
+    assert_equal keep_services(:proxy).service_port, services[0]['service_port']
+    assert_equal keep_services(:proxy).service_ssl_flag, services[0]['service_ssl_flag']
+    assert_equal 'proxy', services[0]['service_type']
+  end
+end
diff --git a/services/api/test/integration/login_workflow_test.rb b/services/api/test/integration/login_workflow_test.rb
new file mode 100644 (file)
index 0000000..85b4cb7
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LoginWorkflowTest < ActionDispatch::IntegrationTest
+  test "default prompt to login is JSON" do
+    post('/arvados/v1/specimens', {specimen: {}},
+         {'HTTP_ACCEPT' => ''})
+    assert_response 401
+    assert_includes(json_response['errors'], "Not logged in")
+  end
+
+  test "login prompt respects JSON Accept header" do
+    post('/arvados/v1/specimens', {specimen: {}},
+         {'HTTP_ACCEPT' => 'application/json'})
+    assert_response 401
+    assert_includes(json_response['errors'], "Not logged in")
+  end
+
+  test "login prompt respects HTML Accept header" do
+    post('/arvados/v1/specimens', {specimen: {}},
+         {'HTTP_ACCEPT' => 'text/html'})
+    assert_response 302
+    assert_match(%r{/auth/joshid$}, @response.headers['Location'],
+                 "HTML login prompt did not include expected redirect")
+  end
+end
diff --git a/services/api/test/integration/noop_deep_munge_test.rb b/services/api/test/integration/noop_deep_munge_test.rb
new file mode 100644 (file)
index 0000000..13b0fa6
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class NoopDeepMungeTest < ActionDispatch::IntegrationTest
+  test "empty array" do
+    check({"foo" => []})
+  end
+
+  test "null in array" do
+    check({"foo" => ["foo", nil]})
+  end
+
+  test "array of nulls" do
+    check({"foo" => [nil, nil, nil]})
+  end
+
+  protected
+
+  def check(val)
+    post "/arvados/v1/container_requests",
+         {
+           :container_request => {
+             :name => "workflow",
+             :state => "Uncommitted",
+             :command => ["echo"],
+             :container_image => "arvados/jobs",
+             :output_path => "/",
+             :mounts => {
+               :foo => {
+                 :kind => "json",
+                 :content => JSON.parse(SafeJSON.dump(val)),
+               }
+             }
+           }
+         }.to_json, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}",
+                    'CONTENT_TYPE' => 'application/json'}
+    assert_response :success
+    assert_equal "arvados#containerRequest", json_response['kind']
+    assert_equal val, json_response['mounts']['foo']['content']
+  end
+end
diff --git a/services/api/test/integration/permissions_test.rb b/services/api/test/integration/permissions_test.rb
new file mode 100644 (file)
index 0000000..49fa473
--- /dev/null
@@ -0,0 +1,374 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PermissionsTest < ActionDispatch::IntegrationTest
+  include DbCurrentTime
+  include CurrentApiClient  # for empty_collection
+  fixtures :users, :groups, :api_client_authorizations, :collections
+
+  test "adding and removing direct can_read links" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # try to add permission as spectator
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:spectator)
+    assert_response 422
+
+    # add permission as admin
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # try to delete permission as spectator
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:spectator)
+    assert_response 403
+
+    # delete permission as admin
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+  end
+
+
+  test "adding can_read links from user to group, group to collection" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for spectator to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:private).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for group to read collection
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:private).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # delete permission for group to read collection
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+  end
+
+
+  test "adding can_read links from group to collection, user to group" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for group to read collection
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:private).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for spectator to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:private).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # delete permission for spectator to read group
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+  end
+
+  test "adding can_read links from user to group, group to group, group to collection" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for user to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:private).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # add permission for group to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:private).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:empty_lonely_group).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # add permission for group to read collection
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:empty_lonely_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # delete permission for group to read collection
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+  end
+
+  test "read-only group-admin cannot modify administered user" do
+    put "/arvados/v1/users/#{users(:active).uuid}", {
+      :user => {
+        first_name: 'KilroyWasHere'
+      },
+      :format => :json
+    }, auth(:rominiadmin)
+    assert_response 403
+  end
+
+  test "read-only group-admin cannot read or update non-administered user" do
+    get "/arvados/v1/users/#{users(:spectator).uuid}", {
+      :format => :json
+    }, auth(:rominiadmin)
+    assert_response 404
+
+    put "/arvados/v1/users/#{users(:spectator).uuid}", {
+      :user => {
+        first_name: 'KilroyWasHere'
+      },
+      :format => :json
+    }, auth(:rominiadmin)
+    assert_response 404
+  end
+
+  test "RO group-admin finds user's specimens, RW group-admin can update" do
+    [[:rominiadmin, false],
+     [:miniadmin, true]].each do |which_user, update_should_succeed|
+      get "/arvados/v1/specimens", {:format => :json}, auth(which_user)
+      assert_response :success
+      resp_uuids = json_response['items'].collect { |i| i['uuid'] }
+      [[true, specimens(:owned_by_active_user).uuid],
+       [true, specimens(:owned_by_private_group).uuid],
+       [false, specimens(:owned_by_spectator).uuid],
+      ].each do |should_find, uuid|
+        assert_equal(should_find, !resp_uuids.index(uuid).nil?,
+                     "%s should%s see %s in specimen list" %
+                     [which_user.to_s,
+                      should_find ? '' : 'not ',
+                      uuid])
+        put "/arvados/v1/specimens/#{uuid}", {
+          :specimen => {
+            properties: {
+              miniadmin_was_here: true
+            }
+          },
+          :format => :json
+        }, auth(which_user)
+        if !should_find
+          assert_response 404
+        elsif !update_should_succeed
+          assert_response 403
+        else
+          assert_response :success
+        end
+      end
+    end
+  end
+
+  test "get_permissions returns list" do
+    # First confirm that user :active cannot get permissions on group :public
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 404
+
+    # add some permissions, including can_manage
+    # permission for user :active
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_read_uuid = json_response['uuid']
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:inactive).uuid,
+        link_class: 'permission',
+        name: 'can_write',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_write_uuid = json_response['uuid']
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:active).uuid,
+        link_class: 'permission',
+        name: 'can_manage',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_manage_uuid = json_response['uuid']
+
+    # Now user :active should be able to retrieve permissions
+    # on group :public.
+    get("/arvados/v1/permissions/#{groups(:public).uuid}",
+        { :format => :json },
+        auth(:active))
+    assert_response :success
+
+    perm_uuids = json_response['items'].map { |item| item['uuid'] }
+    assert_includes perm_uuids, can_read_uuid, "can_read_uuid not found"
+    assert_includes perm_uuids, can_write_uuid, "can_write_uuid not found"
+    assert_includes perm_uuids, can_manage_uuid, "can_manage_uuid not found"
+  end
+
+  test "get_permissions returns 404 for nonexistent uuid" do
+    nonexistent = Group.generate_uuid
+    # make sure it really doesn't exist
+    get "/arvados/v1/groups/#{nonexistent}", nil, auth(:admin)
+    assert_response 404
+
+    get "/arvados/v1/permissions/#{nonexistent}", nil, auth(:active)
+    assert_response 404
+  end
+
+  test "get_permissions returns 403 if user can read but not manage" do
+    post "/arvados/v1/links", {
+      :link => {
+        tail_uuid: users(:active).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 403
+  end
+
+  test "active user can read the empty collection" do
+    # The active user should be able to read the empty collection.
+
+    get("/arvados/v1/collections/#{empty_collection_uuid}",
+        { :format => :json },
+        auth(:active))
+    assert_response :success
+    assert_empty json_response['manifest_text'], "empty collection manifest_text is not empty"
+  end
+end
diff --git a/services/api/test/integration/pipeline_test.rb b/services/api/test/integration/pipeline_test.rb
new file mode 100644 (file)
index 0000000..1e9a4d5
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineIntegrationTest < ActionDispatch::IntegrationTest
+  # These tests simulate the workflow of arv-run-pipeline-instance
+  # and other pipeline-running code.
+
+  def check_component_match(comp_key, comp_hash)
+    assert_response :success
+    built_json = json_response
+    built_component = built_json["components"][comp_key]
+    comp_hash.each_pair do |key, expected|
+      assert_equal(expected, built_component[key.to_s],
+                   "component's #{key} field changed")
+    end
+  end
+
+  test "creating a pipeline instance preserves required component parameters" do
+    comp_name = "test_component"
+    component = {
+      repository: "test_repo",
+      script: "test_script",
+      script_version: "test_refspec",
+      script_parameters: {},
+    }
+
+    post("/arvados/v1/pipeline_instances",
+         {pipeline_instance: {components: {comp_name => component}}.to_json},
+         auth(:active))
+    check_component_match(comp_name, component)
+    pi_uuid = json_response["uuid"]
+
+    @response = nil
+    get("/arvados/v1/pipeline_instances/#{pi_uuid}", {}, auth(:active))
+    check_component_match(comp_name, component)
+  end
+end
diff --git a/services/api/test/integration/reader_tokens_test.rb b/services/api/test/integration/reader_tokens_test.rb
new file mode 100644 (file)
index 0000000..a60be09
--- /dev/null
@@ -0,0 +1,82 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ReaderTokensTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  def spectator_specimen
+    specimens(:owned_by_spectator).uuid
+  end
+
+  def get_specimens(main_auth, read_auth, formatter=:to_a)
+    params = {}
+    params[:reader_tokens] = [api_token(read_auth)].send(formatter) if read_auth
+    headers = {}
+    headers.merge!(auth(main_auth)) if main_auth
+    get('/arvados/v1/specimens', params, headers)
+  end
+
+  def get_specimen_uuids(main_auth, read_auth, formatter=:to_a)
+    get_specimens(main_auth, read_auth, formatter)
+    assert_response :success
+    json_response['items'].map { |spec| spec['uuid'] }
+  end
+
+  def assert_post_denied(main_auth, read_auth, formatter=:to_a)
+    if main_auth
+      headers = auth(main_auth)
+      expected = 403
+    else
+      headers = {}
+      expected = 401
+    end
+    post('/arvados/v1/specimens.json',
+         {specimen: {}, reader_tokens: [api_token(read_auth)].send(formatter)},
+         headers)
+    assert_response expected
+  end
+
+  test "active user can't see spectator specimen" do
+    # Other tests in this suite assume that the active user doesn't
+    # have read permission to the owned_by_spectator specimen.
+    # This test checks that this assumption still holds.
+    refute_includes(get_specimen_uuids(:active, nil), spectator_specimen,
+                    ["active user can read the owned_by_spectator specimen",
+                     "other tests will return false positives"].join(" - "))
+  end
+
+  [nil, :active_noscope].each do |main_auth|
+    [:spectator, :spectator_specimens].each do |read_auth|
+      [:to_a, :to_json].each do |formatter|
+        test "#{main_auth.inspect} auth with #{formatter} reader token #{read_auth} can#{"'t" if main_auth} read" do
+          get_specimens(main_auth, read_auth)
+          assert_response(if main_auth then 403 else 200 end)
+        end
+
+        test "#{main_auth.inspect} auth with #{formatter} reader token #{read_auth} can't write" do
+          assert_post_denied(main_auth, read_auth, formatter)
+        end
+      end
+    end
+  end
+
+  test "scopes are still limited with reader tokens" do
+    get('/arvados/v1/collections',
+        {reader_tokens: [api_token(:spectator_specimens)]},
+        auth(:active_noscope))
+    assert_response 403
+  end
+
+  test "reader tokens grant no permissions when expired" do
+    get_specimens(:active_noscope, :expired)
+    assert_response 403
+  end
+
+  test "reader tokens grant no permissions outside their scope" do
+    refute_includes(get_specimen_uuids(:active, :admin_vm), spectator_specimen,
+                    "scoped reader token granted permissions out of scope")
+  end
+end
diff --git a/services/api/test/integration/remote_user_test.rb b/services/api/test/integration/remote_user_test.rb
new file mode 100644 (file)
index 0000000..4473752
--- /dev/null
@@ -0,0 +1,287 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'webrick'
+require 'webrick/https'
+require 'test_helper'
+require 'helpers/users_test_helper'
+
+class RemoteUsersTest < ActionDispatch::IntegrationTest
+  include DbCurrentTime
+
+  def salted_active_token(remote:)
+    salt_token(fixture: :active, remote: remote).sub('/zzzzz-', '/'+remote+'-')
+  end
+
+  def auth(remote:)
+    token = salted_active_token(remote: remote)
+    {"HTTP_AUTHORIZATION" => "Bearer #{token}"}
+  end
+
+  # For remote authentication tests, we bring up a simple stub server
+  # (on a port chosen by webrick) and configure the SUT so the stub is
+  # responsible for clusters "zbbbb" (a well-behaved cluster) and
+  # "zbork" (a misbehaving cluster).
+  #
+  # Test cases can override the stub's default response to
+  # .../users/current by changing @stub_status and @stub_content.
+  setup do
+    clnt = HTTPClient.new
+    clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
+    HTTPClient.stubs(:new).returns clnt
+
+    @controller = Arvados::V1::UsersController.new
+    ready = Thread::Queue.new
+    srv = WEBrick::HTTPServer.new(
+      Port: 0,
+      Logger: WEBrick::Log.new(
+        Rails.root.join("log", "webrick.log").to_s,
+        WEBrick::Log::INFO),
+      AccessLog: [[File.open(Rails.root.join(
+                              "log", "webrick_access.log").to_s, 'a+'),
+                   WEBrick::AccessLog::COMBINED_LOG_FORMAT]],
+      SSLEnable: true,
+      SSLVerifyClient: OpenSSL::SSL::VERIFY_NONE,
+      SSLPrivateKey: OpenSSL::PKey::RSA.new(
+        File.open(Rails.root.join("tmp", "self-signed.key")).read),
+      SSLCertificate: OpenSSL::X509::Certificate.new(
+        File.open(Rails.root.join("tmp", "self-signed.pem")).read),
+      SSLCertName: [["CN", WEBrick::Utils::getservername]],
+      StartCallback: lambda { ready.push(true) })
+    srv.mount_proc '/discovery/v1/apis/arvados/v1/rest' do |req, res|
+      Rails.cache.delete 'arvados_v1_rest_discovery'
+      res.body = Arvados::V1::SchemaController.new.send(:discovery_doc).to_json
+    end
+    srv.mount_proc '/arvados/v1/users/current' do |req, res|
+      res.status = @stub_status
+      res.body = @stub_content.is_a?(String) ? @stub_content : @stub_content.to_json
+    end
+    Thread.new do
+      srv.start
+    end
+    ready.pop
+    @remote_server = srv
+    @remote_host = "127.0.0.1:#{srv.config[:Port]}"
+    Rails.configuration.remote_hosts = Rails.configuration.remote_hosts.merge({'zbbbb' => @remote_host,
+                                                                               'zbork' => @remote_host})
+    Arvados::V1::SchemaController.any_instance.stubs(:root_url).returns "https://#{@remote_host}"
+    @stub_status = 200
+    @stub_content = {
+      uuid: 'zbbbb-tpzed-000000000000000',
+      email: 'foo@example.com',
+      username: 'barney',
+      is_admin: true,
+      is_active: true,
+    }
+  end
+
+  teardown do
+    @remote_server.andand.stop
+  end
+
+  test 'authenticate with remote token' do
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal false, json_response['is_admin']
+    assert_equal false, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+
+    # revoke original token
+    @stub_status = 401
+
+    # re-authorize before cache expires
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+
+    # simulate cache expiry
+    ApiClientAuthorization.where(
+      uuid: salted_active_token(remote: 'zbbbb').split('/')[1]).
+      update_all(expires_at: db_current_time - 1.minute)
+
+    # re-authorize after cache expires
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response 401
+
+    # simulate cached token indicating wrong user (e.g., local user
+    # entry was migrated out of the way taking the cached token with
+    # it, or authorizing cluster reassigned auth to a different user)
+    ApiClientAuthorization.where(
+      uuid: salted_active_token(remote: 'zbbbb').split('/')[1]).
+      update_all(user_id: users(:active).id)
+
+    # revive original token and re-authorize
+    @stub_status = 200
+    @stub_content[:username] = 'blarney'
+    @stub_content[:email] = 'blarney@example.com'
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'barney', json_response['username'], 'local username should not change once assigned'
+    assert_equal 'blarney@example.com', json_response['email']
+  end
+
+  test 'authenticate with remote token, remote username conflicts with local' do
+    @stub_content[:username] = 'active'
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'active2', json_response['username']
+  end
+
+  test 'authenticate with remote token, remote username is nil' do
+    @stub_content.delete :username
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'foo', json_response['username']
+  end
+
+  test 'authenticate with remote token from misbhehaving remote cluster' do
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbork')
+    assert_response 401
+  end
+
+  test 'authenticate with remote token that fails validate' do
+    @stub_status = 401
+    @stub_content = {
+      error: 'not authorized',
+    }
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response 401
+  end
+
+  ['v2',
+   'v2/',
+   'v2//',
+   'v2///',
+   "v2/'; delete from users where 1=1; commit; select '/lol",
+   'v2/foo/bar',
+   'v2/zzzzz-gj3su-077z32aux8dg2s1',
+   'v2/zzzzz-gj3su-077z32aux8dg2s1/',
+   'v2/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',
+   'v2/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi/zzzzz-gj3su-077z32aux8dg2s1',
+   'v2//3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',
+   'v8/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',
+   '/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',
+   '"v2/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"',
+   '/',
+   '//',
+   '///',
+  ].each do |token|
+    test "authenticate with malformed remote token #{token}" do
+      get '/arvados/v1/users/current', {format: 'json'}, {"HTTP_AUTHORIZATION" => "Bearer #{token}"}
+      assert_response 401
+    end
+  end
+
+  test "ignore extra fields in remote token" do
+    token = salted_active_token(remote: 'zbbbb') + '/foo/bar/baz/*'
+    get '/arvados/v1/users/current', {format: 'json'}, {"HTTP_AUTHORIZATION" => "Bearer #{token}"}
+    assert_response :success
+  end
+
+  test 'remote api server is not an api server' do
+    @stub_status = 200
+    @stub_content = '<html>bad</html>'
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response 401
+  end
+
+  ['zbbbb', 'z0000'].each do |token_valid_for|
+    test "validate #{token_valid_for}-salted token for remote cluster zbbbb" do
+      salted_token = salt_token(fixture: :active, remote: token_valid_for)
+      get '/arvados/v1/users/current', {format: 'json', remote: 'zbbbb'}, {
+            "HTTP_AUTHORIZATION" => "Bearer #{salted_token}"
+          }
+      if token_valid_for == 'zbbbb'
+        assert_response 200
+        assert_equal(users(:active).uuid, json_response['uuid'])
+      else
+        assert_response 401
+      end
+    end
+  end
+
+  test "list readable groups with salted token" do
+    salted_token = salt_token(fixture: :active, remote: 'zbbbb')
+    get '/arvados/v1/groups', {
+          format: 'json',
+          remote: 'zbbbb',
+          limit: 10000,
+        }, {
+          "HTTP_AUTHORIZATION" => "Bearer #{salted_token}"
+        }
+    assert_response 200
+    group_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes(group_uuids, 'zzzzz-j7d0g-fffffffffffffff')
+    refute_includes(group_uuids, 'zzzzz-j7d0g-000000000000000')
+    assert_includes(group_uuids, groups(:aproject).uuid)
+    refute_includes(group_uuids, groups(:trashed_project).uuid)
+    refute_includes(group_uuids, groups(:testusergroup_admins).uuid)
+  end
+
+  test 'auto-activate user from trusted cluster' do
+    Rails.configuration.auto_activate_users_from = ['zbbbb']
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal false, json_response['is_admin']
+    assert_equal true, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+  end
+
+  test 'pre-activate remote user' do
+    post '/arvados/v1/users', {
+           "user" => {
+             "uuid" => "zbbbb-tpzed-000000000000000",
+             "email" => 'foo@example.com',
+             "username" => 'barney',
+             "is_active" => true
+           }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(:admin)}"}
+    assert_response :success
+
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal nil, json_response['is_admin']
+    assert_equal true, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+  end
+
+  test "validate unsalted v2 token for remote cluster zbbbb" do
+    auth = api_client_authorizations(:active)
+    token = "v2/#{auth.uuid}/#{auth.api_token}"
+    get '/arvados/v1/users/current', {format: 'json', remote: 'zbbbb'}, {
+          "HTTP_AUTHORIZATION" => "Bearer #{token}"
+        }
+    assert_response :success
+    assert_equal(users(:active).uuid, json_response['uuid'])
+  end
+
+  test 'container request with runtime_token' do
+    [["valid local", "v2/#{api_client_authorizations(:active).uuid}/#{api_client_authorizations(:active).api_token}"],
+     ["valid remote", "v2/zbbbb-gj3su-000000000000000/abc"],
+     ["invalid local", "v2/#{api_client_authorizations(:active).uuid}/fakefakefake"],
+     ["invalid remote", "v2/zbork-gj3su-000000000000000/abc"],
+    ].each do |label, runtime_token|
+      post '/arvados/v1/container_requests', {
+             "container_request" => {
+               "command" => ["echo"],
+               "container_image" => "xyz",
+               "output_path" => "/",
+               "cwd" => "/",
+               "runtime_token" => runtime_token
+             }
+           }, {"HTTP_AUTHORIZATION" => "Bearer #{api_client_authorizations(:active).api_token}"}
+      if label.include? "invalid"
+        assert_response 422
+      else
+        assert_response :success
+      end
+    end
+  end
+
+end
diff --git a/services/api/test/integration/select_test.rb b/services/api/test/integration/select_test.rb
new file mode 100644 (file)
index 0000000..fb3f4f6
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SelectTest < ActionDispatch::IntegrationTest
+  test "should select just two columns" do
+    get "/arvados/v1/links", {:format => :json, :select => ['uuid', 'link_class']}, auth(:active)
+    assert_response :success
+    assert_equal json_response['items'].count, json_response['items'].select { |i|
+      i.count == 3 and i['uuid'] != nil and i['link_class'] != nil
+    }.count
+  end
+
+  test "fewer distinct than total count" do
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class'], :distinct => false}, auth(:active)
+    assert_response :success
+    links = json_response['items']
+
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class'], :distinct => true}, auth(:active)
+    assert_response :success
+    distinct = json_response['items']
+
+    assert_operator(distinct.count, :<, links.count,
+                    "distinct count should be less than link count")
+    assert_equal links.uniq.count, distinct.count
+  end
+
+  test "select with order" do
+    get "/arvados/v1/links", {:format => :json, :select => ['uuid'], :order => ["uuid asc"]}, auth(:active)
+    assert_response :success
+
+    assert json_response['items'].length > 0
+
+    p = ""
+    json_response['items'].each do |i|
+      assert i['uuid'] > p
+      p = i['uuid']
+    end
+  end
+
+  test "select with default order" do
+    get "/arvados/v1/links", {format: :json, select: ['uuid']}, auth(:admin)
+    assert_response :success
+    uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_equal uuids, uuids.sort
+  end
+
+  def assert_link_classes_ascend(current_class, prev_class)
+    # Databases and Ruby don't always agree about string ordering with
+    # punctuation.  If the strings aren't ascending normally, check
+    # that they're equal up to punctuation.
+    if current_class < prev_class
+      class_prefix = current_class.split(/\W/).first
+      assert prev_class.start_with?(class_prefix)
+    end
+  end
+
+  test "select two columns with order" do
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => ['link_class asc', "uuid desc"]}, auth(:active)
+    assert_response :success
+
+    assert json_response['items'].length > 0
+
+    prev_link_class = ""
+    prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+    json_response['items'].each do |i|
+      if prev_link_class != i['link_class']
+        prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+      end
+
+      assert_link_classes_ascend(i['link_class'], prev_link_class)
+      assert i['uuid'] < prev_uuid
+
+      prev_link_class = i['link_class']
+      prev_uuid = i['uuid']
+    end
+  end
+
+  test "select two columns with old-style order syntax" do
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => 'link_class asc, uuid desc'}, auth(:active)
+    assert_response :success
+
+    assert json_response['items'].length > 0
+
+    prev_link_class = ""
+    prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+    json_response['items'].each do |i|
+      if prev_link_class != i['link_class']
+        prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+      end
+
+      assert_link_classes_ascend(i['link_class'], prev_link_class)
+      assert i['uuid'] < prev_uuid
+
+      prev_link_class = i['link_class']
+      prev_uuid = i['uuid']
+    end
+  end
+
+end
diff --git a/services/api/test/integration/serialized_encoding_test.rb b/services/api/test/integration/serialized_encoding_test.rb
new file mode 100644 (file)
index 0000000..d7599bc
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class SerializedEncodingTest < ActionDispatch::IntegrationTest
+  include GitTestHelper
+
+  fixtures :all
+
+  {
+    api_client_authorization: {scopes: []},
+
+    human: {properties: {eye_color: 'gray'}},
+
+    job: {
+      repository: 'active/foo',
+      runtime_constraints: {docker_image: 'arvados/apitestfixture'},
+      script: 'hash',
+      script_version: 'master',
+      script_parameters: {pattern: 'foobar'},
+      tasks_summary: {todo: 0},
+    },
+
+    job_task: {parameters: {pattern: 'foo'}},
+
+    link: {link_class: 'test', name: 'test', properties: {foo: :bar}},
+
+    node: {info: {uptime: 1234}},
+
+    pipeline_instance: {
+      components: {"job1" => {parameters: {pattern: "xyzzy"}}},
+      components_summary: {todo: 0},
+      properties: {test: true},
+    },
+
+    pipeline_template: {
+      components: {"job1" => {parameters: {pattern: "xyzzy"}}},
+    },
+
+    specimen: {properties: {eye_color: 'meringue'}},
+
+    trait: {properties: {eye_color: 'brown'}},
+
+    user: {prefs: {cookies: 'thin mint'}},
+  }.each_pair do |resource, postdata|
+    test "create json-encoded #{resource.to_s}" do
+      post("/arvados/v1/#{resource.to_s.pluralize}",
+           {resource => postdata.to_json}, auth(:admin_trustedclient))
+      assert_response :success
+    end
+  end
+end
diff --git a/services/api/test/integration/user_sessions_test.rb b/services/api/test/integration/user_sessions_test.rb
new file mode 100644 (file)
index 0000000..f508599
--- /dev/null
@@ -0,0 +1,164 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserSessionsApiTest < ActionDispatch::IntegrationTest
+  # remote prefix & return url packed into the return_to param passed around
+  # between API and SSO provider.
+  def client_url(remote: nil)
+    url = ',https://wb.example.com'
+    url = "#{remote}#{url}" unless remote.nil?
+    url
+  end
+
+  def mock_auth_with(email: nil, username: nil, identity_url: nil, remote: nil, expected_response: :redirect)
+    mock = {
+      'provider' => 'josh_id',
+      'uid' => 'https://edward.example.com',
+      'info' => {
+        'identity_url' => 'https://edward.example.com',
+        'name' => 'Edward Example',
+        'first_name' => 'Edward',
+        'last_name' => 'Example',
+      },
+    }
+    mock['info']['email'] = email unless email.nil?
+    mock['info']['username'] = username unless username.nil?
+    mock['info']['identity_url'] = identity_url unless identity_url.nil?
+    post('/auth/josh_id/callback',
+         {return_to: client_url(remote: remote)},
+         {'omniauth.auth' => mock})
+
+    errors = {
+      :redirect => 'Did not redirect to client with token',
+      400 => 'Did not return Bad Request error',
+    }
+    assert_response expected_response, errors[expected_response]
+  end
+
+  test 'assign username from sso' do
+    mock_auth_with(email: 'foo@example.com', username: 'bar')
+    u = assigns(:user)
+    assert_equal 'bar', u.username
+  end
+
+  test 'no assign username from sso' do
+    mock_auth_with(email: 'foo@example.com')
+    u = assigns(:user)
+    assert_equal 'foo', u.username
+  end
+
+  test 'existing user login' do
+    mock_auth_with(identity_url: "https://active-user.openid.local")
+    u = assigns(:user)
+    assert_equal 'zzzzz-tpzed-xurymjxw79nv3jz', u.uuid
+  end
+
+  test 'user redirect_to_user_uuid' do
+    mock_auth_with(identity_url: "https://redirects-to-active-user.openid.local")
+    u = assigns(:user)
+    assert_equal 'zzzzz-tpzed-xurymjxw79nv3jz', u.uuid
+  end
+
+  test 'user double redirect_to_user_uuid' do
+    mock_auth_with(identity_url: "https://double-redirects-to-active-user.openid.local")
+    u = assigns(:user)
+    assert_equal 'zzzzz-tpzed-xurymjxw79nv3jz', u.uuid
+  end
+
+  test 'create new user during omniauth callback' do
+    mock_auth_with(email: 'edward@example.com')
+    assert_equal(0, @response.redirect_url.index(client_url.split(',', 2)[1]),
+                 'Redirected to wrong address after succesful login: was ' +
+                 @response.redirect_url + ', expected ' + client_url.split(',', 2)[1] + '[...]')
+    assert_not_nil(@response.redirect_url.index('api_token='),
+                   'Expected api_token in query string of redirect url ' +
+                   @response.redirect_url)
+  end
+
+  test 'issue salted token from omniauth callback with remote param' do
+    mock_auth_with(email: 'edward@example.com', remote: 'zbbbb')
+    api_client_auth = assigns(:api_client_auth)
+    assert_not_nil api_client_auth
+    assert_includes(@response.redirect_url, 'api_token=' + api_client_auth.salted_token(remote: 'zbbbb'))
+  end
+
+  test 'error out from omniauth callback with invalid remote param' do
+    mock_auth_with(email: 'edward@example.com', remote: 'invalid_cluster_id', expected_response: 400)
+  end
+
+  # Test various combinations of auto_setup configuration and email
+  # address provided during a new user's first session setup.
+  [{result: :nope, email: nil, cfg: {auto: true, repo: true, vm: true}},
+   {result: :yup, email: nil, cfg: {auto: true}},
+   {result: :nope, email: '@example.com', cfg: {auto: true, repo: true, vm: true}},
+   {result: :yup, email: '@example.com', cfg: {auto: true}},
+   {result: :nope, email: 'root@', cfg: {auto: true, repo: true, vm: true}},
+   {result: :nope, email: 'root@', cfg: {auto: true, repo: true}},
+   {result: :nope, email: 'root@', cfg: {auto: true, vm: true}},
+   {result: :yup, email: 'root@', cfg: {auto: true}},
+   {result: :nope, email: 'gitolite@', cfg: {auto: true, repo: true}},
+   {result: :nope, email: '*_*@', cfg: {auto: true, vm: true}},
+   {result: :yup, email: 'toor@', cfg: {auto: true, vm: true, repo: true}},
+   {result: :yup, email: 'foo@', cfg: {auto: true, vm: true},
+     uniqprefix: 'foo'},
+   {result: :yup, email: 'foo@', cfg: {auto: true, repo: true},
+     uniqprefix: 'foo'},
+   {result: :yup, email: 'auto_setup_vm_login@', cfg: {auto: true, repo: true},
+     uniqprefix: 'auto_setup_vm_login'},
+   ].each do |testcase|
+    test "user auto-activate #{testcase.inspect}" do
+      # Configure auto_setup behavior according to testcase[:cfg]
+      Rails.configuration.auto_setup_new_users = testcase[:cfg][:auto]
+      Rails.configuration.auto_setup_new_users_with_vm_uuid =
+        (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : false)
+      Rails.configuration.auto_setup_new_users_with_repository =
+        testcase[:cfg][:repo]
+
+      mock_auth_with(email: testcase[:email])
+      u = assigns(:user)
+      vm_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',
+                            'permission', u.uuid,
+                            '%-' + VirtualMachine.uuid_prefix + '-%')
+      repo_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',
+                              'permission', u.uuid,
+                              '%-' + Repository.uuid_prefix + '-%')
+      repos = Repository.where('uuid in (?)', repo_links.collect(&:head_uuid))
+      case u[:result]
+      when :nope
+        assert_equal false, u.is_invited, "should not have been set up"
+        assert_empty vm_links, "should not have VM login permission"
+        assert_empty repo_links, "should not have repo permission"
+      when :yup
+        assert_equal true, u.is_invited
+        if testcase[:cfg][:vm]
+          assert_equal 1, vm_links.count, "wrong number of VM perm links"
+        else
+          assert_empty vm_links, "should not have VM login permission"
+        end
+        if testcase[:cfg][:repo]
+          assert_equal 1, repo_links.count, "wrong number of repo perm links"
+          assert_equal 1, repos.count, "wrong number of repos"
+          assert_equal 'can_manage', repo_links.first.name, "wrong perm type"
+        else
+          assert_empty repo_links, "should not have repo permission"
+        end
+      end
+      if (prefix = testcase[:uniqprefix])
+        # This email address conflicts with a test fixture. Make sure
+        # every VM login and repository name got digits added to make
+        # it unique.
+        (repos.collect(&:name) +
+         vm_links.collect { |link| link.properties['username'] }
+         ).each do |name|
+          r = name.match(/^(.{#{prefix.length}})(\d+)$/)
+          assert_not_nil r, "#{name.inspect} does not match {prefix}\\d+"
+          assert_equal(prefix, r[1],
+                       "#{name.inspect} was not {#{prefix.inspect} plus digits}")
+        end
+      end
+    end
+  end
+end
diff --git a/services/api/test/integration/users_test.rb b/services/api/test/integration/users_test.rb
new file mode 100644 (file)
index 0000000..28e43b8
--- /dev/null
@@ -0,0 +1,254 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/users_test_helper'
+
+class UsersTest < ActionDispatch::IntegrationTest
+  include UsersTestHelper
+
+  test "setup user multiple times" do
+    repo_name = 'usertestrepo'
+
+    post "/arvados/v1/users/setup", {
+      repo_name: repo_name,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {
+        uuid: 'zzzzz-tpzed-abcdefghijklmno',
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
+    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # arvados#user, repo link and link add user to 'All users' group
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'arvados#user'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_system_group_permission_link_for created['uuid']
+
+    # invoke setup again with the same data
+    post "/arvados/v1/users/setup", {
+      repo_name: repo_name,
+      vm_uuid: virtual_machines(:testvm).uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {
+        uuid: 'zzzzz-tpzed-abcdefghijklmno',
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }, auth(:admin)
+    assert_response 422         # cannot create another user with same UUID
+
+    # invoke setup on the same user
+    post "/arvados/v1/users/setup", {
+      repo_name: repo_name,
+      vm_uuid: virtual_machines(:testvm).uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      uuid: 'zzzzz-tpzed-abcdefghijklmno',
+    }, auth(:admin)
+
+    response_items = json_response['items']
+
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
+    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # arvados#user, repo link and link add user to 'All users' group
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_system_group_permission_link_for created['uuid']
+  end
+
+  test "setup user in multiple steps and verify response" do
+    post "/arvados/v1/users/setup", {
+      openid_prefix: 'http://www.example.com/account',
+      user: {
+        email: "foo@example.com"
+      }
+    }, auth(:admin)
+
+    assert_response :success
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_not_nil created['uuid'], 'expected uuid for new user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_equal created['email'], 'foo@example.com', 'expected input email'
+
+    # three new links: system_group, arvados#user, and 'All users' group.
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'arvados#user'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+   # invoke setup with a repository
+    post "/arvados/v1/users/setup", {
+      openid_prefix: 'http://www.example.com/account',
+      repo_name: 'newusertestrepo',
+      uuid: created['uuid']
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_equal 'foo@example.com', created['email'], 'expected input email'
+
+     # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'foo/newusertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    # invoke setup with a vm_uuid
+    post "/arvados/v1/users/setup", {
+      vm_uuid: virtual_machines(:testvm).uuid,
+      openid_prefix: 'http://www.example.com/account',
+      user: {
+        email: 'junk_email'
+      },
+      uuid: created['uuid']
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_equal created['email'], 'foo@example.com', 'expected original email'
+
+    # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "setup and unsetup user" do
+    post "/arvados/v1/users/setup", {
+      repo_name: 'newusertestrepo',
+      vm_uuid: virtual_machines(:testvm).uuid,
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }, auth(:admin)
+
+    assert_response :success
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+    assert_not_nil created['uuid'], 'expected uuid for the new user'
+    assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+    # five extra links: system_group, login, group, repo and vm
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'arvados#user'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'foo/newusertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_link_existence created['uuid'], created['email'], true, true, true, true, false
+
+    post "/arvados/v1/users/#{created['uuid']}/unsetup", {}, auth(:admin)
+
+    assert_response :success
+
+    created2 = json_response
+    assert_not_nil created2['uuid'], 'expected uuid for the newly created user'
+    assert_equal created['uuid'], created2['uuid'], 'expected uuid not found'
+
+    verify_link_existence created['uuid'], created['email'], false, false, false, false, false
+  end
+
+  def find_obj_in_resp (response_items, kind, head_kind=nil)
+    response_items.each do |x|
+      if x && x['kind']
+        return x if (x['kind'] == kind && x['head_kind'] == head_kind)
+      end
+    end
+    nil
+  end
+
+  test 'merge active into project_viewer account' do
+    post('/arvados/v1/groups', {
+           group: {
+             group_class: 'project',
+             name: "active user's stuff",
+           },
+         }, auth(:project_viewer))
+    assert_response(:success)
+    project_uuid = json_response['uuid']
+
+    post('/arvados/v1/users/merge', {
+           new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,
+           new_owner_uuid: project_uuid,
+           redirect_to_new_user: true,
+         }, auth(:active_trustedclient))
+    assert_response(:success)
+
+    get('/arvados/v1/users/current', {}, auth(:active))
+    assert_response(:success)
+    assert_equal(users(:project_viewer).uuid, json_response['uuid'])
+
+    get('/arvados/v1/authorized_keys/' + authorized_keys(:active).uuid, {}, auth(:active))
+    assert_response(:success)
+    assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
+    assert_equal(users(:project_viewer).uuid, json_response['authorized_user_uuid'])
+
+    get('/arvados/v1/repositories/' + repositories(:foo).uuid, {}, auth(:active))
+    assert_response(:success)
+    assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])
+
+    get('/arvados/v1/groups/' + groups(:aproject).uuid, {}, auth(:active))
+    assert_response(:success)
+    assert_equal(project_uuid, json_response['owner_uuid'])
+  end
+end
diff --git a/services/api/test/integration/valid_links_test.rb b/services/api/test/integration/valid_links_test.rb
new file mode 100644 (file)
index 0000000..ed705fc
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ValidLinksTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "tail must exist on update" do
+    admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        link_class: 'test',
+        name: 'stuff',
+        head_uuid: users(:active).uuid,
+        tail_uuid: virtual_machines(:testvm).uuid
+      }
+    }, admin_auth
+    assert_response :success
+    u = json_response['uuid']
+
+    put "/arvados/v1/links/#{u}", {
+      :format => :json,
+      :link => {
+        tail_uuid: virtual_machines(:testvm2).uuid
+      }
+    }, admin_auth
+    assert_response :success
+    assert_equal virtual_machines(:testvm2).uuid, (ActiveSupport::JSON.decode @response.body)['tail_uuid']
+
+    put "/arvados/v1/links/#{u}", {
+      :format => :json,
+      :link => {
+        tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+      }
+    }, admin_auth
+    assert_response 422
+  end
+
+end
diff --git a/services/api/test/job_logs/crunchstatshort.log b/services/api/test/job_logs/crunchstatshort.log
new file mode 100644 (file)
index 0000000..7b39318
--- /dev/null
@@ -0,0 +1 @@
+2014-11-07_23:33:51 qr1hi-8i9sb-nf3qk0xzwwz3lre 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys
diff --git a/services/api/test/performance/links_index_test.rb b/services/api/test/performance/links_index_test.rb
new file mode 100644 (file)
index 0000000..b1b5385
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'rails/performance_test_help'
+
+class IndexTest < ActionDispatch::PerformanceTest
+  def test_links_index
+    get '/arvados/v1/links', {format: :json}, auth(:admin)
+  end
+  def test_links_index_with_filters
+    get '/arvados/v1/links', {format: :json, filters: [%w[head_uuid is_a arvados#collection]].to_json}, auth(:admin)
+  end
+  def test_collections_index
+    get '/arvados/v1/collections', {format: :json}, auth(:admin)
+  end
+end
diff --git a/services/api/test/performance/permission_test.rb b/services/api/test/performance/permission_test.rb
new file mode 100644 (file)
index 0000000..e4a3b0a
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'benchmark'
+
+
+def create_eight parent
+  uuids = []
+  values = []
+  (0..8).each do
+    uuid = Group.generate_uuid
+    values.push "('#{uuid}', '#{parent}', now(), now(), '#{uuid}')"
+    uuids.push uuid
+  end
+  ActiveRecord::Base.connection.execute("INSERT INTO groups (uuid, owner_uuid, created_at, updated_at, name) VALUES #{values.join ','}")
+  uuids
+end
+
+class PermissionPerfTest < ActionDispatch::IntegrationTest
+  def test_groups_index
+    n = 0
+    act_as_system_user do
+      puts("Time spent creating records:", Benchmark.measure do
+             ActiveRecord::Base.transaction do
+               root = Group.create!(owner_uuid: users(:permission_perftest).uuid)
+               n += 1
+               a = create_eight root.uuid
+               n += 8
+               a.each do |p1|
+                 b = create_eight p1
+                 n += 8
+                 b.each do |p2|
+                   c = create_eight p2
+                   n += 8
+                   c.each do |p3|
+                     d = create_eight p3
+                     n += 8
+                   end
+                 end
+               end
+             end
+           end)
+    end
+    puts "created #{n}"
+    puts "Time spent getting group index:"
+    (0..4).each do
+      puts(Benchmark.measure do
+             get '/arvados/v1/groups', {format: :json, limit: 1000}, auth(:permission_perftest)
+             assert json_response['items_available'] >= n
+           end)
+    end
+  end
+end
diff --git a/services/api/test/tasks/delete_old_container_logs_test.rb b/services/api/test/tasks/delete_old_container_logs_test.rb
new file mode 100644 (file)
index 0000000..45278ac
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'rake'
+
+Rake.application.rake_require "tasks/delete_old_container_logs"
+Rake::Task.define_task(:environment)
+
+class DeleteOldContainerLogsTaskTest < ActiveSupport::TestCase
+  TASK_NAME = "db:delete_old_container_logs"
+
+  def log_uuids(*fixture_names)
+    fixture_names.map { |name| logs(name).uuid }
+  end
+
+  def run_with_expiry(clean_after)
+    Rails.configuration.clean_container_log_rows_after = clean_after
+    Rake::Task[TASK_NAME].reenable
+    Rake.application.invoke_task TASK_NAME
+  end
+
+  def check_log_existence(test_method, fixture_uuids)
+    uuids_now = Log.where("object_uuid LIKE :pattern AND event_type in ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat')", pattern: "%-dz642-%").map(&:uuid)
+    fixture_uuids.each do |expect_uuid|
+      send(test_method, uuids_now, expect_uuid)
+    end
+  end
+
+  test "delete all finished logs" do
+    uuids_to_keep = log_uuids(:stderr_for_running_container,
+                              :crunchstat_for_running_container)
+    uuids_to_clean = log_uuids(:stderr_for_previous_container,
+                               :crunchstat_for_previous_container,
+                               :stderr_for_ancient_container,
+                               :crunchstat_for_ancient_container)
+    run_with_expiry(1)
+    check_log_existence(:assert_includes, uuids_to_keep)
+    check_log_existence(:refute_includes, uuids_to_clean)
+  end
+
+  test "delete old finished logs" do
+    uuids_to_keep = log_uuids(:stderr_for_running_container,
+                              :crunchstat_for_running_container,
+                              :stderr_for_previous_container,
+                              :crunchstat_for_previous_container)
+    uuids_to_clean = log_uuids(:stderr_for_ancient_container,
+                               :crunchstat_for_ancient_container)
+    run_with_expiry(360.days)
+    check_log_existence(:assert_includes, uuids_to_keep)
+    check_log_existence(:refute_includes, uuids_to_clean)
+  end
+end
diff --git a/services/api/test/tasks/delete_old_job_logs_test.rb b/services/api/test/tasks/delete_old_job_logs_test.rb
new file mode 100644 (file)
index 0000000..4d4cdbc
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'rake'
+
+Rake.application.rake_require "tasks/delete_old_job_logs"
+Rake::Task.define_task(:environment)
+
+class DeleteOldJobLogsTaskTest < ActiveSupport::TestCase
+  TASK_NAME = "db:delete_old_job_logs"
+
+  def log_uuids(*fixture_names)
+    fixture_names.map { |name| logs(name).uuid }
+  end
+
+  def run_with_expiry(clean_after)
+    Rails.configuration.clean_job_log_rows_after = clean_after
+    Rake::Task[TASK_NAME].reenable
+    Rake.application.invoke_task TASK_NAME
+  end
+
+  def job_stderr_logs
+    Log.where("object_uuid LIKE :pattern AND event_type = :etype",
+              pattern: "_____-8i9sb-_______________",
+              etype: "stderr")
+  end
+
+  def check_existence(test_method, fixture_uuids)
+    uuids_now = job_stderr_logs.map(&:uuid)
+    fixture_uuids.each do |expect_uuid|
+      send(test_method, uuids_now, expect_uuid)
+    end
+  end
+
+  test "delete all logs" do
+    uuids_to_keep = log_uuids(:crunchstat_for_running_job)
+    uuids_to_clean = log_uuids(:crunchstat_for_previous_job,
+                               :crunchstat_for_ancient_job)
+    run_with_expiry(1)
+    check_existence(:assert_includes, uuids_to_keep)
+    check_existence(:refute_includes, uuids_to_clean)
+  end
+
+  test "delete only old logs" do
+    uuids_to_keep = log_uuids(:crunchstat_for_running_job,
+                              :crunchstat_for_previous_job)
+    uuids_to_clean = log_uuids(:crunchstat_for_ancient_job)
+    run_with_expiry(360.days)
+    check_existence(:assert_includes, uuids_to_keep)
+    check_existence(:refute_includes, uuids_to_clean)
+  end
+end
diff --git a/services/api/test/test.git.tar b/services/api/test/test.git.tar
new file mode 100644 (file)
index 0000000..8f6a48d
Binary files /dev/null and b/services/api/test/test.git.tar differ
diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb
new file mode 100644 (file)
index 0000000..ffd50d8
--- /dev/null
@@ -0,0 +1,199 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+ENV["RAILS_ENV"] = "test"
+unless ENV["NO_COVERAGE_TEST"]
+  begin
+    verbose_orig = $VERBOSE
+    begin
+      $VERBOSE = nil
+      require 'simplecov'
+      require 'simplecov-rcov'
+    ensure
+      $VERBOSE = verbose_orig
+    end
+    class SimpleCov::Formatter::MergedFormatter
+      def format(result)
+        SimpleCov::Formatter::HTMLFormatter.new.format(result)
+        SimpleCov::Formatter::RcovFormatter.new.format(result)
+      end
+    end
+    SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+    SimpleCov.start do
+      add_filter '/test/'
+      add_filter 'initializers/secret_token'
+      add_filter 'initializers/omniauth'
+    end
+  rescue Exception => e
+    $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+  end
+end
+
+require File.expand_path('../../config/environment', __FILE__)
+require 'rails/test_help'
+require 'mocha'
+require 'mocha/minitest'
+
+module ArvadosTestSupport
+  def json_response
+    Oj.strict_load response.body
+  end
+
+  def api_token(api_client_auth_name)
+    api_client_authorizations(api_client_auth_name).token
+  end
+
+  def auth(api_client_auth_name)
+    {'HTTP_AUTHORIZATION' => "Bearer #{api_token(api_client_auth_name)}"}
+  end
+
+  def show_errors model
+    return lambda { model.errors.full_messages.inspect }
+  end
+end
+
+class ActiveSupport::TestCase
+  include FactoryBot::Syntax::Methods
+  fixtures :all
+
+  include ArvadosTestSupport
+  include CurrentApiClient
+
+  teardown do
+    Thread.current[:api_client_ip_address] = nil
+    Thread.current[:api_client_authorization] = nil
+    Thread.current[:api_client_uuid] = nil
+    Thread.current[:api_client] = nil
+    Thread.current[:token] = nil
+    Thread.current[:user] = nil
+    restore_configuration
+  end
+
+  def assert_equal(expect, *args)
+    if expect.nil?
+      assert_nil(*args)
+    else
+      super
+    end
+  end
+
+  def assert_not_allowed
+    # Provide a block that calls a Rails boolean "true or false" success value,
+    # like model.save or model.destroy.  This method will test that it either
+    # returns false, or raises a Permission Denied exception.
+    begin
+      refute(yield)
+    rescue ArvadosModel::PermissionDeniedError
+    end
+  end
+
+  def add_permission_link from_who, to_what, perm_type
+    act_as_system_user do
+      Link.create!(tail_uuid: from_who.uuid,
+                   head_uuid: to_what.uuid,
+                   link_class: 'permission',
+                   name: perm_type)
+    end
+  end
+
+  def restore_configuration
+    # Restore configuration settings changed during tests
+    $application_config.each do |k,v|
+      if k.match(/^[^.]*$/)
+        Rails.configuration.send (k + '='), v
+      end
+    end
+  end
+
+  def set_user_from_auth(auth_name)
+    client_auth = api_client_authorizations(auth_name)
+    Thread.current[:api_client_authorization] = client_auth
+    Thread.current[:api_client] = client_auth.api_client
+    Thread.current[:user] = client_auth.user
+    Thread.current[:token] = client_auth.token
+  end
+
+  def expect_json
+    self.request.headers["Accept"] = "text/json"
+  end
+
+  def authorize_with api_client_auth_name
+    authorize_with_token api_client_authorizations(api_client_auth_name).token
+  end
+
+  def authorize_with_token token
+    t = token
+    t = t.token if t.respond_to? :token
+    ArvadosApiToken.new.call("rack.input" => "",
+                             "HTTP_AUTHORIZATION" => "Bearer #{t}")
+  end
+
+  def salt_token(fixture:, remote:)
+    auth = api_client_authorizations(fixture)
+    uuid = auth.uuid
+    token = auth.api_token
+    hmac = OpenSSL::HMAC.hexdigest('sha1', token, remote)
+    return "v2/#{uuid}/#{hmac}"
+  end
+
+  def self.skip_slow_tests?
+    !(ENV['RAILS_TEST_SHORT'] || '').empty?
+  end
+
+  def self.skip(*args, &block)
+  end
+
+  def self.slow_test(name, &block)
+    test(name, &block) unless skip_slow_tests?
+  end
+end
+
+class ActionController::TestCase
+  setup do
+    @test_counter = 0
+  end
+
+  def check_counter action
+    @test_counter += 1
+    if @test_counter == 2
+      assert_equal 1, 2, "Multiple actions in functional test"
+    end
+  end
+
+  [:get, :post, :put, :patch, :delete].each do |method|
+    define_method method do |action, *args|
+      check_counter action
+      super action, *args
+    end
+  end
+
+  def self.suite
+    s = super
+    def s.run(*args)
+      @test_case.startup()
+      begin
+        super
+      ensure
+        @test_case.shutdown()
+      end
+    end
+    s
+  end
+  def self.startup; end
+  def self.shutdown; end
+end
+
+class ActionDispatch::IntegrationTest
+  teardown do
+    Thread.current[:api_client_ip_address] = nil
+    Thread.current[:api_client_authorization] = nil
+    Thread.current[:api_client_uuid] = nil
+    Thread.current[:api_client] = nil
+    Thread.current[:token] = nil
+    Thread.current[:user] = nil
+  end
+end
+
+# Ensure permissions are computed from the test fixtures.
+User.invalidate_permissions_cache
diff --git a/services/api/test/unit/.gitkeep b/services/api/test/unit/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/unit/api_client_authorization_test.rb b/services/api/test/unit/api_client_authorization_test.rb
new file mode 100644 (file)
index 0000000..c390a02
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'sweep_trashed_objects'
+
+class ApiClientAuthorizationTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  [:admin_trustedclient, :active_trustedclient].each do |token|
+    test "ApiClientAuthorization can be created then deleted by #{token}" do
+      set_user_from_auth token
+      x = ApiClientAuthorization.create!(user_id: current_user.id,
+                                         api_client_id: 0,
+                                         scopes: [])
+      newtoken = x.api_token
+      assert x.destroy, "Failed to destroy new ApiClientAuth"
+      assert_empty ApiClientAuthorization.where(api_token: newtoken), "Destroyed ApiClientAuth is still in database"
+    end
+  end
+
+  test "delete expired in SweepTrashedObjects" do
+    assert_not_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)
+    SweepTrashedObjects.sweep_now
+    assert_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)
+  end
+
+end
diff --git a/services/api/test/unit/api_client_test.rb b/services/api/test/unit/api_client_test.rb
new file mode 100644 (file)
index 0000000..fc7d1ee
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApiClientTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/app_version_test.rb b/services/api/test/unit/app_version_test.rb
new file mode 100644 (file)
index 0000000..dd88004
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class AppVersionTest < ActiveSupport::TestCase
+
+  setup do AppVersion.forget end
+
+  teardown do AppVersion.forget end
+
+  test 'invoke git processes only on first call' do
+    AppVersion.expects(:git).
+      with("status", "--porcelain").once.
+      yields " M services/api/README\n"
+    AppVersion.expects(:git).
+      with("log", "-n1", "--format=%H").once.
+      yields "da39a3ee5e6b4b0d3255bfef95601890afd80709\n"
+
+    (0..4).each do
+      v = AppVersion.hash
+      assert_equal 'da39a3ee-modified', v
+    end
+  end
+
+  test 'override with configuration "foobar"' do
+    Rails.configuration.source_version = 'foobar'
+    assert_equal 'foobar', AppVersion.hash
+  end
+
+  test 'override with configuration false' do
+    Rails.configuration.source_version = false
+    assert_not_equal 'foobar', AppVersion.hash
+  end
+
+  test 'override with file' do
+    path = Rails.root.join 'git-commit.version'
+    assert(!File.exist?(path),
+           "Packaged version file found in source tree: #{path}")
+    begin
+      File.open(path, 'w') do |f|
+        f.write "0.1.abc123\n"
+      end
+      assert_equal "0.1.abc123", AppVersion.hash
+    ensure
+      File.unlink path
+    end
+  end
+end
diff --git a/services/api/test/unit/application_test.rb b/services/api/test/unit/application_test.rb
new file mode 100644 (file)
index 0000000..679dddf
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApplicationTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  test "test act_as_system_user" do
+    Thread.current[:user] = users(:active)
+    assert_equal users(:active), Thread.current[:user]
+    act_as_system_user do
+      assert_not_equal users(:active), Thread.current[:user]
+      assert_equal system_user, Thread.current[:user]
+    end
+    assert_equal users(:active), Thread.current[:user]
+  end
+
+  test "test act_as_system_user is exception safe" do
+    Thread.current[:user] = users(:active)
+    assert_equal users(:active), Thread.current[:user]
+    caught = false
+    begin
+      act_as_system_user do
+        assert_not_equal users(:active), Thread.current[:user]
+        assert_equal system_user, Thread.current[:user]
+        raise "Fail"
+      end
+    rescue
+      caught = true
+    end
+    assert caught
+    assert_equal users(:active), Thread.current[:user]
+  end
+end
diff --git a/services/api/test/unit/arvados_model_test.rb b/services/api/test/unit/arvados_model_test.rb
new file mode 100644 (file)
index 0000000..d070277
--- /dev/null
@@ -0,0 +1,281 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ArvadosModelTest < ActiveSupport::TestCase
+  fixtures :all
+
+  def create_with_attrs attrs
+    a = Specimen.create({material: 'caloric'}.merge(attrs))
+    a if a.valid?
+  end
+
+  test 'non-admin cannot assign uuid' do
+    set_user_from_auth :active_trustedclient
+    want_uuid = Specimen.generate_uuid
+    a = create_with_attrs(uuid: want_uuid)
+    assert_nil a, "Non-admin should not assign uuid."
+  end
+
+  test 'admin can assign valid uuid' do
+    set_user_from_auth :admin_trustedclient
+    want_uuid = Specimen.generate_uuid
+    a = create_with_attrs(uuid: want_uuid)
+    assert_equal want_uuid, a.uuid, "Admin should assign valid uuid."
+    assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+  end
+
+  test 'admin cannot assign uuid with wrong object type' do
+    set_user_from_auth :admin_trustedclient
+    want_uuid = Human.generate_uuid
+    a = create_with_attrs(uuid: want_uuid)
+    assert_nil a, "Admin should not be able to assign invalid uuid."
+  end
+
+  test 'admin cannot assign badly formed uuid' do
+    set_user_from_auth :admin_trustedclient
+    a = create_with_attrs(uuid: "ntoheunthaoesunhasoeuhtnsaoeunhtsth")
+    assert_nil a, "Admin should not be able to assign invalid uuid."
+  end
+
+  test 'admin cannot assign empty uuid' do
+    set_user_from_auth :admin_trustedclient
+    a = create_with_attrs(uuid: "")
+    assert_nil a, "Admin cannot assign empty uuid."
+  end
+
+  [ {:a => 'foo'},
+    {'a' => :foo},
+    {:a => ['foo', 'bar']},
+    {'a' => [:foo, 'bar']},
+    {'a' => ['foo', :bar]},
+    {:a => [:foo, :bar]},
+    {:a => {'foo' => {'bar' => 'baz'}}},
+    {'a' => {:foo => {'bar' => 'baz'}}},
+    {'a' => {'foo' => {:bar => 'baz'}}},
+    {'a' => {'foo' => {'bar' => :baz}}},
+    {'a' => {'foo' => ['bar', :baz]}},
+  ].each do |x|
+    test "prevent symbol keys in serialized db columns: #{x.inspect}" do
+      set_user_from_auth :active
+      link = Link.create!(link_class: 'test',
+                          properties: x)
+      raw = ActiveRecord::Base.connection.
+          select_value("select properties from links where uuid='#{link.uuid}'")
+      refute_match(/:[fb]/, raw)
+    end
+  end
+
+  [ {['foo'] => 'bar'},
+    {'a' => {['foo', :foo] => 'bar'}},
+    {'a' => {{'foo' => 'bar'} => 'bar'}},
+    {'a' => {['foo', :foo] => ['bar', 'baz']}},
+  ].each do |x|
+    test "refuse non-string keys in serialized db columns: #{x.inspect}" do
+      set_user_from_auth :active
+      assert_raises(ArgumentError) do
+        Link.create!(link_class: 'test',
+                     properties: x)
+      end
+    end
+  end
+
+  test "Stringify symbols coming from serialized attribute in database" do
+    set_user_from_auth :admin_trustedclient
+    fixed = Link.find_by_uuid(links(:has_symbol_keys_in_database_somehow).uuid)
+    assert_equal(["baz", "foo"], fixed.properties.keys.sort,
+                 "Hash symbol keys from DB did not get stringified.")
+    assert_equal(['waz', 'waz', 'waz', 1, nil, false, true],
+                 fixed.properties['baz'],
+                 "Array symbol values from DB did not get stringified.")
+    assert_equal true, fixed.save, "Failed to save fixed model back to db."
+  end
+
+  test "No HashWithIndifferentAccess in database" do
+    set_user_from_auth :admin_trustedclient
+    link = Link.create!(link_class: 'test',
+                        properties: {'foo' => 'bar'}.with_indifferent_access)
+    raw = ActiveRecord::Base.connection.
+      select_value("select properties from links where uuid='#{link.uuid}'")
+    assert_equal '{"foo": "bar"}', raw
+  end
+
+  test "store long string" do
+    set_user_from_auth :active
+    longstring = "a"
+    while longstring.length < 2**16
+      longstring = longstring + longstring
+    end
+    g = Group.create! name: 'Has a long description', description: longstring
+    g = Group.find_by_uuid g.uuid
+    assert_equal g.description, longstring
+  end
+
+  [['uuid', {unique: true}],
+   ['owner_uuid', {}]].each do |the_column, requires|
+    test "unique index on all models with #{the_column}" do
+      checked = 0
+      ActiveRecord::Base.connection.tables.each do |table|
+        columns = ActiveRecord::Base.connection.columns(table)
+
+        next unless columns.collect(&:name).include? the_column
+
+        indexes = ActiveRecord::Base.connection.indexes(table).reject do |index|
+          requires.map do |key, val|
+            index.send(key) == val
+          end.include? false
+        end
+        assert_includes indexes.collect(&:columns), [the_column], 'no index'
+        checked += 1
+      end
+      # Sanity check: make sure we didn't just systematically miss everything.
+      assert_operator(10, :<, checked,
+                      "Only #{checked} tables have a #{the_column}?!")
+    end
+  end
+
+  test "search index exists on models that go into projects" do
+    all_tables =  ActiveRecord::Base.connection.tables
+    all_tables.delete 'schema_migrations'
+    all_tables.delete 'permission_refresh_lock'
+
+    all_tables.each do |table|
+      table_class = table.classify.constantize
+      if table_class.respond_to?('searchable_columns')
+        search_index_columns = table_class.searchable_columns('ilike')
+        # Disappointing, but text columns aren't indexed yet.
+        search_index_columns -= table_class.columns.select { |c|
+          c.type == :text or c.name == 'description' or c.name == 'file_names'
+        }.collect(&:name)
+
+        indexes = ActiveRecord::Base.connection.indexes(table)
+        search_index_by_columns = indexes.select do |index|
+          index.columns.sort == search_index_columns.sort
+        end
+        search_index_by_name = indexes.select do |index|
+          index.name == "#{table}_search_index"
+        end
+        assert !search_index_by_columns.empty?, "#{table} has no search index with columns #{search_index_columns}. Instead found search index with columns #{search_index_by_name.first.andand.columns}"
+      end
+    end
+  end
+
+  test "full text search index exists on models" do
+    indexes = {}
+    conn = ActiveRecord::Base.connection
+    conn.exec_query("SELECT i.relname as indname,
+      i.relowner as indowner,
+      idx.indrelid::regclass::text as table,
+      am.amname as indam,
+      idx.indkey,
+      ARRAY(
+            SELECT pg_get_indexdef(idx.indexrelid, k + 1, true)
+                   FROM generate_subscripts(idx.indkey, 1) as k
+                   ORDER BY k
+                   ) as keys,
+      idx.indexprs IS NOT NULL as indexprs,
+      idx.indpred IS NOT NULL as indpred
+      FROM   pg_index as idx
+      JOIN   pg_class as i
+      ON     i.oid = idx.indexrelid
+      JOIN   pg_am as am
+      ON     i.relam = am.oid
+      JOIN   pg_namespace as ns
+      ON     ns.oid = i.relnamespace
+      AND    ns.nspname = ANY(current_schemas(false))").each do |idx|
+      if idx['keys'].match(/to_tsvector/)
+        indexes[idx['table']] ||= []
+        indexes[idx['table']] << idx
+      end
+    end
+    fts_tables =  ["collections", "container_requests", "groups", "jobs",
+                   "pipeline_instances", "pipeline_templates", "workflows"]
+    fts_tables.each do |table|
+      table_class = table.classify.constantize
+      if table_class.respond_to?('full_text_searchable_columns')
+        expect = table_class.full_text_searchable_columns
+        ok = false
+        indexes[table].andand.each do |idx|
+          if expect == idx['keys'].scan(/COALESCE\(([A-Za-z_]+)/).flatten
+            ok = true
+          end
+        end
+        assert ok, "#{table} has no full-text index\nexpect: #{expect.inspect}\nfound: #{indexes[table].inspect}"
+      end
+    end
+  end
+
+  test "selectable_attributes includes database attributes" do
+    assert_includes(Job.selectable_attributes, "success")
+  end
+
+  test "selectable_attributes includes non-database attributes" do
+    assert_includes(Job.selectable_attributes, "node_uuids")
+  end
+
+  test "selectable_attributes includes common attributes in extensions" do
+    assert_includes(Job.selectable_attributes, "uuid")
+  end
+
+  test "selectable_attributes does not include unexposed attributes" do
+    refute_includes(Job.selectable_attributes, "nodes")
+  end
+
+  test "selectable_attributes on a non-default template" do
+    attr_a = Job.selectable_attributes(:common)
+    assert_includes(attr_a, "uuid")
+    refute_includes(attr_a, "success")
+  end
+
+  test 'create and retrieve using created_at time' do
+    set_user_from_auth :active
+    group = Group.create! name: 'test create and retrieve group'
+    assert group.valid?, "group is not valid"
+
+    results = Group.where(created_at: group.created_at)
+    assert_includes results.map(&:uuid), group.uuid,
+      "Expected new group uuid in results when searched with its created_at timestamp"
+  end
+
+  test 'create and update twice and expect different update times' do
+    set_user_from_auth :active
+    group = Group.create! name: 'test create and retrieve group'
+    assert group.valid?, "group is not valid"
+
+    # update 1
+    group.update_attributes!(name: "test create and update name 1")
+    results = Group.where(uuid: group.uuid)
+    assert_equal "test create and update name 1", results.first.name, "Expected name to be updated to 1"
+    updated_at_1 = results.first.updated_at.to_f
+
+    # update 2
+    group.update_attributes!(name: "test create and update name 2")
+    results = Group.where(uuid: group.uuid)
+    assert_equal "test create and update name 2", results.first.name, "Expected name to be updated to 2"
+    updated_at_2 = results.first.updated_at.to_f
+
+    assert_equal true, (updated_at_2 > updated_at_1), "Expected updated time 2 to be newer than 1"
+  end
+
+  test 'jsonb column' do
+    set_user_from_auth :active
+
+    c = Collection.create!(properties: {})
+    assert_equal({}, c.properties)
+
+    c.update_attributes(properties: {'foo' => 'foo'})
+    c.reload
+    assert_equal({'foo' => 'foo'}, c.properties)
+
+    c.update_attributes(properties: nil)
+    c.reload
+    assert_equal({}, c.properties)
+
+    c.update_attributes(properties: {foo: 'bar'})
+    assert_equal({'foo' => 'bar'}, c.properties)
+    c.reload
+    assert_equal({'foo' => 'bar'}, c.properties)
+  end
+end
diff --git a/services/api/test/unit/authorized_key_test.rb b/services/api/test/unit/authorized_key_test.rb
new file mode 100644 (file)
index 0000000..14bca29
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class AuthorizedKeyTest < ActiveSupport::TestCase
+  TEST_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCf5aTI55uyWr44TckP/ELUAyPsdnf5fTZDcSDN4qiMZYAL7TYV2ixwnbPObLObM0GmHSSFLV1KqsuFICUPgkyKoHbAH6XPgmtfOLU60VkGf1v5uxQ/kXCECRCJmPb3K9dIXGEw+1DXPdOV/xG7rJNvo4a9WK9iqqZr8p+VGKM6C017b8BDLk0tuEEjZ5jXcT/ka/hTScxWkKgF6auPOVQ79OA5+0VaYm4uQLzVUdgwVUPWQQecRrtnc08XYM1htpcLDIAbWfUNK7uE6XR3/OhtrJGf05FGbtGguPgi33F9W3Q3yw6saOK5Y3TfLbskgFaEdLgzqK/QSBRk2zBF49Tj test@localhost"
+
+  test 'create and update key' do
+    u1 = users(:active)
+    act_as_user u1 do
+      ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)
+      assert ak.save, ak.errors.full_messages.to_s
+      ak.name = "bar"
+      assert ak.valid?, ak.errors.full_messages.to_s
+      assert ak.save, ak.errors.full_messages.to_s
+    end
+  end
+
+  test 'duplicate key not permitted' do
+    u1 = users(:active)
+    act_as_user u1 do
+      ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)
+      assert ak.save
+    end
+    u2 = users(:spectator)
+    act_as_user u2 do
+      ak2 = AuthorizedKey.new(name: "bar", public_key: TEST_KEY, authorized_user_uuid: u2.uuid)
+      refute ak2.valid?
+      refute ak2.save
+      assert_match(/already exists/, ak2.errors.full_messages.to_s)
+    end
+  end
+
+  test 'attach key to wrong user account' do
+    act_as_user users(:active) do
+      ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY)
+      ak.authorized_user_uuid = users(:spectator).uuid
+      refute ak.save
+      ak.uuid = nil
+      ak.authorized_user_uuid = users(:admin).uuid
+      refute ak.save
+      ak.uuid = nil
+      ak.authorized_user_uuid = users(:active).uuid
+      assert ak.save, ak.errors.full_messages.to_s
+      ak.authorized_user_uuid = users(:admin).uuid
+      refute ak.save
+    end
+  end
+end
diff --git a/services/api/test/unit/blob_test.rb b/services/api/test/unit/blob_test.rb
new file mode 100644 (file)
index 0000000..429ebde
--- /dev/null
@@ -0,0 +1,144 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class BlobTest < ActiveSupport::TestCase
+  @@api_token = rand(2**512).to_s(36)[0..49]
+  @@key = rand(2**2048).to_s(36)
+  @@blob_data = 'foo'
+  @@blob_locator = Digest::MD5.hexdigest(@@blob_data) +
+    '+' + @@blob_data.size.to_s
+
+  @@known_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3'
+  @@known_token = 'hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk'
+  @@known_key = '13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk' +
+    'p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc' +
+    'ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4' +
+    'jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y' +
+    'gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6' +
+    'vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei' +
+    '786u5rw2a9gx743dj3fgq2irk'
+  @@known_signed_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3' +
+    '+A89118b78732c33104a4d6231e8b5a5fa1e4301e3@7fffffff'
+
+  test 'generate predictable invincible signature' do
+    signed = Blob.sign_locator @@known_locator, {
+      api_token: @@known_token,
+      key: @@known_key,
+      expire: 0x7fffffff,
+    }
+    assert_equal @@known_signed_locator, signed
+  end
+
+  test 'verify predictable invincible signature' do
+    assert_equal true, Blob.verify_signature!(@@known_signed_locator,
+                                              api_token: @@known_token,
+                                              key: @@known_key)
+  end
+
+  test 'correct' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key
+    assert_equal true, Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+  end
+
+  test 'expired' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key, ttl: -1
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'expired, but no raise' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key, ttl: -1
+    assert_equal false, Blob.verify_signature(signed,
+                                              api_token: @@api_token,
+                                              key: @@key)
+  end
+
+  test 'bogus, wrong block hash' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed.sub('acbd','abcd'), api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, expired' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@531641bf'
+    assert_raises Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, wrong key' do
+    signed = Blob.sign_locator(@@blob_locator,
+                               api_token: @@api_token,
+                               key: (@@key+'x'))
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, wrong api token' do
+    signed = Blob.sign_locator(@@blob_locator,
+                               api_token: @@api_token.reverse,
+                               key: @@key)
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, signature format 1' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, signature format 2' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+A@531641bf'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, signature format 3' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Axyzzy@531641bf'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, timestamp format' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@xyzzy'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'no signature at all' do
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(@@blob_locator, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'signature changes when ttl changes' do
+    signed = Blob.sign_locator @@known_locator, {
+      api_token: @@known_token,
+      key: @@known_key,
+      expire: 0x7fffffff,
+    }
+
+    original_ttl = Rails.configuration.blob_signature_ttl
+    Rails.configuration.blob_signature_ttl = original_ttl*2
+    signed2 = Blob.sign_locator @@known_locator, {
+      api_token: @@known_token,
+      key: @@known_key,
+      expire: 0x7fffffff,
+    }
+    Rails.configuration.blob_signature_ttl = original_ttl
+
+    assert_not_equal signed, signed2
+  end
+end
diff --git a/services/api/test/unit/collection_performance_test.rb b/services/api/test/unit/collection_performance_test.rb
new file mode 100644 (file)
index 0000000..4efc947
--- /dev/null
@@ -0,0 +1,65 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/manifest_examples'
+require 'helpers/time_block'
+
+class CollectionModelPerformanceTest < ActiveSupport::TestCase
+  include ManifestExamples
+
+  setup do
+    # The Collection model needs to have a current token, not just a
+    # current user, to sign & verify manifests:
+    Thread.current[:token] = api_client_authorizations(:active).token
+  end
+
+  teardown do
+    Thread.current[:token] = nil
+  end
+
+  # "crrud" == "create read render update delete", not a typo
+  slow_test "crrud cycle for a collection with a big manifest)" do
+    bigmanifest = time_block 'make example' do
+      make_manifest(streams: 100,
+                    files_per_stream: 100,
+                    blocks_per_file: 20,
+                    bytes_per_block: 2**26,
+                    api_token: api_client_authorizations(:active).token)
+    end
+    act_as_user users(:active) do
+      c = time_block "new (manifest_text is #{bigmanifest.length>>20}MiB)" do
+        Collection.new manifest_text: bigmanifest.dup
+      end
+      time_block 'check signatures' do
+        c.check_signatures
+      end
+      time_block 'check signatures + save' do
+        c.instance_eval do @signatures_checked = false end
+        c.save!
+      end
+      c = time_block 'read' do
+        Collection.find_by_uuid(c.uuid)
+      end
+      time_block 'sign' do
+        c.signed_manifest_text
+      end
+      time_block 'sign + render' do
+        c.as_api_response(nil)
+      end
+      loc = Blob.sign_locator(Digest::MD5.hexdigest('foo') + '+3',
+                              api_token: api_client_authorizations(:active).token)
+      # Note Collection's strip_manifest_text method has now removed
+      # the signatures from c.manifest_text, so we have to start from
+      # bigmanifest again here instead of just appending with "+=".
+      c.manifest_text = bigmanifest.dup + ". #{loc} 0:3:foo.txt\n"
+      time_block 'update' do
+        c.save!
+      end
+      time_block 'delete' do
+        c.destroy
+      end
+    end
+  end
+end
diff --git a/services/api/test/unit/collection_test.rb b/services/api/test/unit/collection_test.rb
new file mode 100644 (file)
index 0000000..9797ed6
--- /dev/null
@@ -0,0 +1,905 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'sweep_trashed_objects'
+
+class CollectionTest < ActiveSupport::TestCase
+  include DbCurrentTime
+
+  def create_collection name, enc=nil
+    txt = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:#{name}.txt\n"
+    txt.force_encoding(enc) if enc
+    return Collection.create(manifest_text: txt, name: name)
+  end
+
+  test 'accept ASCII manifest_text' do
+    act_as_system_user do
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+    end
+  end
+
+  test 'accept UTF-8 manifest_text' do
+    act_as_system_user do
+      c = create_collection "f\xc3\x98\xc3\x98", Encoding::UTF_8
+      assert c.valid?
+    end
+  end
+
+  test 'refuse manifest_text with invalid UTF-8 byte sequence' do
+    act_as_system_user do
+      c = create_collection "f\xc8o", Encoding::UTF_8
+      assert !c.valid?
+      assert_equal [:manifest_text], c.errors.messages.keys
+      assert_match(/UTF-8/, c.errors.messages[:manifest_text].first)
+    end
+  end
+
+  test 'refuse manifest_text with non-UTF-8 encoding' do
+    act_as_system_user do
+      c = create_collection "f\xc8o", Encoding::ASCII_8BIT
+      assert !c.valid?
+      assert_equal [:manifest_text], c.errors.messages.keys
+      assert_match(/UTF-8/, c.errors.messages[:manifest_text].first)
+    end
+  end
+
+  [
+    ". 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e foo.txt",
+    "d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+  ].each do |manifest_text|
+    test "create collection with invalid manifest text #{manifest_text} and expect error" do
+      act_as_system_user do
+        c = Collection.create(manifest_text: manifest_text)
+        assert !c.valid?
+      end
+    end
+  end
+
+  [
+    nil,
+    "",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",
+  ].each do |manifest_text|
+    test "create collection with valid manifest text #{manifest_text.inspect} and expect success" do
+      act_as_system_user do
+        c = Collection.create(manifest_text: manifest_text)
+        assert c.valid?
+      end
+    end
+  end
+
+  [
+    ". 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e foo.txt",
+    "d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt",
+  ].each do |manifest_text|
+    test "update collection with invalid manifest text #{manifest_text} and expect error" do
+      act_as_system_user do
+        c = create_collection 'foo', Encoding::US_ASCII
+        assert c.valid?
+
+        c.update_attribute 'manifest_text', manifest_text
+        assert !c.valid?
+      end
+    end
+  end
+
+  [
+    nil,
+    "",
+    ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",
+  ].each do |manifest_text|
+    test "update collection with valid manifest text #{manifest_text.inspect} and expect success" do
+      act_as_system_user do
+        c = create_collection 'foo', Encoding::US_ASCII
+        assert c.valid?
+
+        c.update_attribute 'manifest_text', manifest_text
+        assert c.valid?
+      end
+    end
+  end
+
+  test "auto-create version after idle setting" do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 600 # 10 minutes
+    act_as_user users(:active) do
+      # Set up initial collection
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+      assert_equal 1, c.version
+      assert_equal false, c.preserve_version
+      # Make a versionable update, it shouldn't create a new version yet
+      c.update_attributes!({'name' => 'bar'})
+      c.reload
+      assert_equal 'bar', c.name
+      assert_equal 1, c.version
+      # Update modified_at to trigger a version auto-creation
+      fifteen_min_ago = Time.now - 15.minutes
+      c.update_column('modified_at', fifteen_min_ago) # Update without validations/callbacks
+      c.reload
+      assert_equal fifteen_min_ago.to_i, c.modified_at.to_i
+      c.update_attributes!({'name' => 'baz'})
+      c.reload
+      assert_equal 'baz', c.name
+      assert_equal 2, c.version
+      # Make another update, no new version should be created
+      c.update_attributes!({'name' => 'foobar'})
+      c.reload
+      assert_equal 'foobar', c.name
+      assert_equal 2, c.version
+    end
+  end
+
+  test "preserve_version=false assignment is ignored while being true and not producing a new version" do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 3600
+    act_as_user users(:active) do
+      # Set up initial collection
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+      assert_equal 1, c.version
+      assert_equal false, c.preserve_version
+      # This update shouldn't produce a new version, as the idle time is not up
+      c.update_attributes!({
+        'name' => 'bar',
+        'preserve_version' => true
+      })
+      c.reload
+      assert_equal 1, c.version
+      assert_equal 'bar', c.name
+      assert_equal true, c.preserve_version
+      # Make sure preserve_version is not disabled after being enabled, unless
+      # a new version is created.
+      c.update_attributes!({
+        'preserve_version' => false,
+        'replication_desired' => 2
+      })
+      c.reload
+      assert_equal 1, c.version
+      assert_equal 2, c.replication_desired
+      assert_equal true, c.preserve_version
+      c.update_attributes!({'name' => 'foobar'})
+      c.reload
+      assert_equal 2, c.version
+      assert_equal false, c.preserve_version
+      assert_equal 'foobar', c.name
+    end
+  end
+
+  [
+    ['version', 10],
+    ['current_version_uuid', 'zzzzz-4zz18-bv31uwvy3neko21'],
+  ].each do |name, new_value|
+    test "'#{name}' updates on current version collections are not allowed" do
+      act_as_user users(:active) do
+        # Set up initial collection
+        c = create_collection 'foo', Encoding::US_ASCII
+        assert c.valid?
+        assert_equal 1, c.version
+
+        assert_raises(ActiveRecord::RecordInvalid) do
+          c.update_attributes!({
+            name => new_value
+          })
+        end
+      end
+    end
+  end
+
+  test "uuid updates on current version make older versions update their pointers" do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 0
+    act_as_system_user do
+      # Set up initial collection
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+      assert_equal 1, c.version
+      # Make changes so that a new version is created
+      c.update_attributes!({'name' => 'bar'})
+      c.reload
+      assert_equal 2, c.version
+      assert_equal 2, Collection.where(current_version_uuid: c.uuid).count
+      new_uuid = 'zzzzz-4zz18-somefakeuuidnow'
+      assert_empty Collection.where(uuid: new_uuid)
+      # Update UUID on current version, check that both collections point to it
+      c.update_attributes!({'uuid' => new_uuid})
+      c.reload
+      assert_equal new_uuid, c.uuid
+      assert_equal 2, Collection.where(current_version_uuid: new_uuid).count
+    end
+  end
+
+  test "older versions' modified_at indicate when they're created" do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 0
+    act_as_user users(:active) do
+      # Set up initial collection
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+      # Make changes so that a new version is created
+      c.update_attributes!({'name' => 'bar'})
+      c.reload
+      assert_equal 2, c.version
+      # Get the old version
+      c_old = Collection.where(current_version_uuid: c.uuid, version: 1).first
+      assert_not_nil c_old
+
+      version_creation_datetime = c_old.modified_at.to_f
+      assert_equal c.created_at.to_f, c_old.created_at.to_f
+      # Current version is updated just a few milliseconds before the version is
+      # saved on the database.
+      assert_operator c.modified_at.to_f, :<, version_creation_datetime
+
+      # Make update on current version so old version get the attribute synced;
+      # its modified_at should not change.
+      new_replication = 3
+      c.update_attributes!({'replication_desired' => new_replication})
+      c.reload
+      assert_equal new_replication, c.replication_desired
+      c_old.reload
+      assert_equal new_replication, c_old.replication_desired
+      assert_equal version_creation_datetime, c_old.modified_at.to_f
+      assert_operator c.modified_at.to_f, :>, c_old.modified_at.to_f
+    end
+  end
+
+  test "past versions should not be directly updatable" do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 0
+    act_as_system_user do
+      # Set up initial collection
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+      # Make changes so that a new version is created
+      c.update_attributes!({'name' => 'bar'})
+      c.reload
+      assert_equal 2, c.version
+      # Get the old version
+      c_old = Collection.where(current_version_uuid: c.uuid, version: 1).first
+      assert_not_nil c_old
+      # With collection versioning still being enabled, try to update
+      c_old.name = 'this was foo'
+      assert c_old.invalid?
+      c_old.reload
+      # Try to fool the validator attempting to make c_old to look like a
+      # current version, it should also fail.
+      c_old.current_version_uuid = c_old.uuid
+      assert c_old.invalid?
+      c_old.reload
+      # Now disable collection versioning, it should behave the same way
+      Rails.configuration.collection_versioning = false
+      c_old.name = 'this was foo'
+      assert c_old.invalid?
+    end
+  end
+
+  [
+    ['owner_uuid', 'zzzzz-tpzed-d9tiejq69daie8f', 'zzzzz-tpzed-xurymjxw79nv3jz'],
+    ['replication_desired', 2, 3],
+    ['storage_classes_desired', ['hot'], ['archive']],
+    ['is_trashed', true, false],
+  ].each do |attr, first_val, second_val|
+    test "sync #{attr} with older versions" do
+      Rails.configuration.collection_versioning = true
+      Rails.configuration.preserve_version_if_idle = 0
+      act_as_system_user do
+        # Set up initial collection
+        c = create_collection 'foo', Encoding::US_ASCII
+        assert c.valid?
+        assert_equal 1, c.version
+        assert_not_equal first_val, c.attributes[attr]
+        # Make changes so that a new version is created and a synced field is
+        # updated on both
+        c.update_attributes!({'name' => 'bar', attr => first_val})
+        c.reload
+        assert_equal 2, c.version
+        assert_equal first_val, c.attributes[attr]
+        assert_equal 2, Collection.where(current_version_uuid: c.uuid).count
+        assert_equal first_val, Collection.where(current_version_uuid: c.uuid, version: 1).first.attributes[attr]
+        # Only make an update on the same synced field & check that the previously
+        # created version also gets it.
+        c.update_attributes!({attr => second_val})
+        c.reload
+        assert_equal 2, c.version
+        assert_equal second_val, c.attributes[attr]
+        assert_equal 2, Collection.where(current_version_uuid: c.uuid).count
+        assert_equal second_val, Collection.where(current_version_uuid: c.uuid, version: 1).first.attributes[attr]
+      end
+    end
+  end
+
+  [
+    [false, 'name', 'bar', false],
+    [false, 'description', 'The quick brown fox jumps over the lazy dog', false],
+    [false, 'properties', {'new_version' => true}, false],
+    [false, 'manifest_text', ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n", false],
+    [true, 'name', 'bar', true],
+    [true, 'description', 'The quick brown fox jumps over the lazy dog', true],
+    [true, 'properties', {'new_version' => true}, true],
+    [true, 'manifest_text', ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n", true],
+    # Non-versionable attribute updates shouldn't create new versions
+    [true, 'replication_desired', 5, false],
+    [false, 'replication_desired', 5, false],
+  ].each do |versioning, attr, val, new_version_expected|
+    test "update #{attr} with versioning #{versioning ? '' : 'not '}enabled should #{new_version_expected ? '' : 'not '}create a new version" do
+      Rails.configuration.collection_versioning = versioning
+      Rails.configuration.preserve_version_if_idle = 0
+      act_as_user users(:active) do
+        # Create initial collection
+        c = create_collection 'foo', Encoding::US_ASCII
+        assert c.valid?
+        assert_equal 'foo', c.name
+
+        # Check current version attributes
+        assert_equal 1, c.version
+        assert_equal c.uuid, c.current_version_uuid
+
+        # Update attribute and check if version number should be incremented
+        old_value = c.attributes[attr]
+        c.update_attributes!({attr => val})
+        assert_equal new_version_expected, c.version == 2
+        assert_equal val, c.attributes[attr]
+
+        if versioning && new_version_expected
+          # Search for the snapshot & previous value
+          assert_equal 2, Collection.where(current_version_uuid: c.uuid).count
+          s = Collection.where(current_version_uuid: c.uuid, version: 1).first
+          assert_not_nil s
+          assert_equal old_value, s.attributes[attr]
+        else
+          # If versioning is disabled or no versionable attribute was updated,
+          # only the current version should exist
+          assert_equal 1, Collection.where(current_version_uuid: c.uuid).count
+          assert_equal c, Collection.where(current_version_uuid: c.uuid).first
+        end
+      end
+    end
+  end
+
+  test 'current_version_uuid is ignored during update' do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 0
+    act_as_user users(:active) do
+      # Create 1st collection
+      col1 = create_collection 'foo', Encoding::US_ASCII
+      assert col1.valid?
+      assert_equal 1, col1.version
+
+      # Create 2nd collection, update it so it becomes version:2
+      # (to avoid unique index violation)
+      col2 = create_collection 'bar', Encoding::US_ASCII
+      assert col2.valid?
+      assert_equal 1, col2.version
+      col2.update_attributes({name: 'baz'})
+      assert_equal 2, col2.version
+
+      # Try to make col2 a past version of col1. It shouldn't be possible
+      col2.update_attributes({current_version_uuid: col1.uuid})
+      assert col2.invalid?
+      col2.reload
+      assert_not_equal col1.uuid, col2.current_version_uuid
+    end
+  end
+
+  test 'with versioning enabled, simultaneous updates increment version correctly' do
+    Rails.configuration.collection_versioning = true
+    Rails.configuration.preserve_version_if_idle = 0
+    act_as_user users(:active) do
+      # Create initial collection
+      col = create_collection 'foo', Encoding::US_ASCII
+      assert col.valid?
+      assert_equal 1, col.version
+
+      # Simulate simultaneous updates
+      c1 = Collection.where(uuid: col.uuid).first
+      assert_equal 1, c1.version
+      c1.name = 'bar'
+      c2 = Collection.where(uuid: col.uuid).first
+      c2.description = 'foo collection'
+      c1.save!
+      assert_equal 1, c2.version
+      # with_lock forces a reload, so this shouldn't produce an unique violation error
+      c2.save!
+      assert_equal 3, c2.version
+      assert_equal 'foo collection', c2.description
+    end
+  end
+
+  test 'create and update collection and verify file_names' do
+    act_as_system_user do
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+      created_file_names = c.file_names
+      assert created_file_names
+      assert_match(/foo.txt/, c.file_names)
+
+      c.update_attribute 'manifest_text', ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo2.txt\n"
+      assert_not_equal created_file_names, c.file_names
+      assert_match(/foo2.txt/, c.file_names)
+    end
+  end
+
+  [
+    [2**8, false],
+    [2**18, true],
+  ].each do |manifest_size, allow_truncate|
+    test "create collection with manifest size #{manifest_size} with allow_truncate=#{allow_truncate},
+          and not expect exceptions even on very large manifest texts" do
+      # file_names has a max size, hence there will be no errors even on large manifests
+      act_as_system_user do
+        manifest_text = ''
+        index = 0
+        while manifest_text.length < manifest_size
+          manifest_text += "./blurfl d41d8cd98f00b204e9800998ecf8427e+0 0:0:veryverylongfilename000000000000#{index}.txt\n"
+          index += 1
+        end
+        manifest_text += "./laststreamname d41d8cd98f00b204e9800998ecf8427e+0 0:0:veryverylastfilename.txt\n"
+        c = Collection.create(manifest_text: manifest_text)
+
+        assert c.valid?
+        assert c.file_names
+        assert_match(/veryverylongfilename0000000000001.txt/, c.file_names)
+        assert_match(/veryverylongfilename0000000000002.txt/, c.file_names)
+        if not allow_truncate
+          assert_match(/veryverylastfilename/, c.file_names)
+          assert_match(/laststreamname/, c.file_names)
+        end
+      end
+    end
+  end
+
+  test "full text search for collections" do
+    # file_names column does not get populated when fixtures are loaded, hence setup test data
+    act_as_system_user do
+      Collection.create(manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n")
+      Collection.create(manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n")
+      Collection.create(manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95+A3a4e26a366ee7e4ed3e476ccf05354761be2e4ae@545a9920 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1.txt 32:32:file2.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file3.txt 32:32:file4.txt\n")
+    end
+
+    [
+      ['foo', true],
+      ['foo bar', false],                     # no collection matching both
+      ['foo&bar', false],                     # no collection matching both
+      ['foo|bar', true],                      # works only no spaces between the words
+      ['Gnu public', true],                   # both prefixes found, though not consecutively
+      ['Gnu&public', true],                   # both prefixes found, though not consecutively
+      ['file4', true],                        # prefix match
+      ['file4.txt', true],                    # whole string match
+      ['filex', false],                       # no such prefix
+      ['subdir', true],                       # prefix matches
+      ['subdir2', true],
+      ['subdir2/', true],
+      ['subdir2/subdir3', true],
+      ['subdir2/subdir3/subdir4', true],
+      ['subdir2 file4', true],                # look for both prefixes
+      ['subdir4', false],                     # not a prefix match
+    ].each do |search_filter, expect_results|
+      search_filters = search_filter.split.each {|s| s.concat(':*')}.join('&')
+      results = Collection.where("#{Collection.full_text_tsvector} @@ to_tsquery(?)",
+                                 "#{search_filters}")
+      if expect_results
+        refute_empty results
+      else
+        assert_empty results
+      end
+    end
+  end
+
+  test 'portable data hash with missing size hints' do
+    [[". d41d8cd98f00b204e9800998ecf8427e+0+Bar 0:0:x\n",
+      ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\n"],
+     [". d41d8cd98f00b204e9800998ecf8427e+Foo 0:0:x\n",
+      ". d41d8cd98f00b204e9800998ecf8427e 0:0:x\n"],
+     [". d41d8cd98f00b204e9800998ecf8427e 0:0:x\n",
+      ". d41d8cd98f00b204e9800998ecf8427e 0:0:x\n"],
+    ].each do |unportable, portable|
+      c = Collection.new(manifest_text: unportable)
+      assert c.valid?
+      assert_equal(Digest::MD5.hexdigest(portable)+"+#{portable.length}",
+                   c.portable_data_hash)
+    end
+  end
+
+  pdhmanifest = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\n"
+  pdhmd5 = Digest::MD5.hexdigest pdhmanifest
+  [[true, nil],
+   [true, pdhmd5],
+   [true, pdhmd5+'+12345'],
+   [true, pdhmd5+'+'+pdhmanifest.length.to_s],
+   [true, pdhmd5+'+12345+Foo'],
+   [true, pdhmd5+'+Foo'],
+   [false, Digest::MD5.hexdigest(pdhmanifest.strip)],
+   [false, Digest::MD5.hexdigest(pdhmanifest.strip)+'+'+pdhmanifest.length.to_s],
+   [false, pdhmd5[0..30]],
+   [false, pdhmd5[0..30]+'z'],
+   [false, pdhmd5[0..24]+'000000000'],
+   [false, pdhmd5[0..24]+'000000000+0']].each do |isvalid, pdh|
+    test "portable_data_hash #{pdh.inspect} valid? == #{isvalid}" do
+      c = Collection.new manifest_text: pdhmanifest, portable_data_hash: pdh
+      assert_equal isvalid, c.valid?, c.errors.full_messages.to_s
+    end
+  end
+
+  test "storage_classes_desired cannot be empty" do
+    act_as_user users(:active) do
+      c = collections(:collection_owned_by_active)
+      c.update_attributes storage_classes_desired: ["hot"]
+      assert_equal ["hot"], c.storage_classes_desired
+      assert_raise ArvadosModel::InvalidStateTransitionError do
+        c.update_attributes storage_classes_desired: []
+      end
+    end
+  end
+
+  test "storage classes lists should only contain non-empty strings" do
+    c = collections(:storage_classes_desired_default_unconfirmed)
+    act_as_user users(:admin) do
+      assert c.update_attributes(storage_classes_desired: ["default", "a_string"],
+                                 storage_classes_confirmed: ["another_string"])
+      [
+        ["storage_classes_desired", ["default", 42]],
+        ["storage_classes_confirmed", [{the_answer: 42}]],
+        ["storage_classes_desired", ["default", ""]],
+        ["storage_classes_confirmed", [""]],
+      ].each do |attr, val|
+        assert_raise ArvadosModel::InvalidStateTransitionError do
+          assert c.update_attributes({attr => val})
+        end
+      end
+    end
+  end
+
+  test "storage_classes_confirmed* can be set by admin user" do
+    c = collections(:storage_classes_desired_default_unconfirmed)
+    act_as_user users(:admin) do
+      assert c.update_attributes(storage_classes_confirmed: ["default"],
+                                 storage_classes_confirmed_at: Time.now)
+    end
+  end
+
+  test "storage_classes_confirmed* cannot be set by non-admin user" do
+    act_as_user users(:active) do
+      c = collections(:storage_classes_desired_default_unconfirmed)
+      # Cannot set just one at a time.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes storage_classes_confirmed: ["default"]
+      end
+      c.reload
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes storage_classes_confirmed_at: Time.now
+      end
+      # Cannot set bot at once, either.
+      c.reload
+      assert_raise ArvadosModel::PermissionDeniedError do
+        assert c.update_attributes(storage_classes_confirmed: ["default"],
+                                   storage_classes_confirmed_at: Time.now)
+      end
+    end
+  end
+
+  test "storage_classes_confirmed* can be cleared (but only together) by non-admin user" do
+    act_as_user users(:active) do
+      c = collections(:storage_classes_desired_default_confirmed_default)
+      # Cannot clear just one at a time.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes storage_classes_confirmed: []
+      end
+      c.reload
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes storage_classes_confirmed_at: nil
+      end
+      # Can clear both at once.
+      c.reload
+      assert c.update_attributes(storage_classes_confirmed: [],
+                                 storage_classes_confirmed_at: nil)
+    end
+  end
+
+  [0, 2, 4, nil].each do |ask|
+    test "set replication_desired to #{ask.inspect}" do
+      Rails.configuration.default_collection_replication = 2
+      act_as_user users(:active) do
+        c = collections(:replication_undesired_unconfirmed)
+        c.update_attributes replication_desired: ask
+        assert_equal ask, c.replication_desired
+      end
+    end
+  end
+
+  test "replication_confirmed* can be set by admin user" do
+    c = collections(:replication_desired_2_unconfirmed)
+    act_as_user users(:admin) do
+      assert c.update_attributes(replication_confirmed: 2,
+                                 replication_confirmed_at: Time.now)
+    end
+  end
+
+  test "replication_confirmed* cannot be set by non-admin user" do
+    act_as_user users(:active) do
+      c = collections(:replication_desired_2_unconfirmed)
+      # Cannot set just one at a time.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed: 1
+      end
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed_at: Time.now
+      end
+      # Cannot set both at once, either.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes(replication_confirmed: 1,
+                            replication_confirmed_at: Time.now)
+      end
+    end
+  end
+
+  test "replication_confirmed* can be cleared (but only together) by non-admin user" do
+    act_as_user users(:active) do
+      c = collections(:replication_desired_2_confirmed_2)
+      # Cannot clear just one at a time.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed: nil
+      end
+      c.reload
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed_at: nil
+      end
+      # Can clear both at once.
+      c.reload
+      assert c.update_attributes(replication_confirmed: nil,
+                                 replication_confirmed_at: nil)
+    end
+  end
+
+  test "clear replication_confirmed* when introducing a new block in manifest" do
+    c = collections(:replication_desired_2_confirmed_2)
+    act_as_user users(:active) do
+      assert c.update_attributes(manifest_text: collections(:user_agreement).signed_manifest_text)
+      assert_nil c.replication_confirmed
+      assert_nil c.replication_confirmed_at
+    end
+  end
+
+  test "don't clear replication_confirmed* when just renaming a file" do
+    c = collections(:replication_desired_2_confirmed_2)
+    act_as_user users(:active) do
+      new_manifest = c.signed_manifest_text.sub(':bar', ':foo')
+      assert c.update_attributes(manifest_text: new_manifest)
+      assert_equal 2, c.replication_confirmed
+      assert_not_nil c.replication_confirmed_at
+    end
+  end
+
+  test "don't clear replication_confirmed* when just deleting a data block" do
+    c = collections(:replication_desired_2_confirmed_2)
+    act_as_user users(:active) do
+      new_manifest = c.signed_manifest_text
+      new_manifest.sub!(/ \S+:bar/, '')
+      new_manifest.sub!(/ acbd\S+/, '')
+
+      # Confirm that we did just remove a block from the manifest (if
+      # not, this test would pass without testing the relevant case):
+      assert_operator new_manifest.length+40, :<, c.signed_manifest_text.length
+
+      assert c.update_attributes(manifest_text: new_manifest)
+      assert_equal 2, c.replication_confirmed
+      assert_not_nil c.replication_confirmed_at
+    end
+  end
+
+  test 'signature expiry does not exceed trash_at' do
+    act_as_user users(:active) do
+      t0 = db_current_time
+      c = Collection.create!(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\n", name: 'foo')
+      c.update_attributes! trash_at: (t0 + 1.hours)
+      c.reload
+      sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
+      assert_operator sig_exp.to_i, :<=, (t0 + 1.hours).to_i
+    end
+  end
+
+  test 'far-future expiry date cannot be used to circumvent configured permission ttl' do
+    act_as_user users(:active) do
+      c = Collection.create!(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\n",
+                             name: 'foo',
+                             trash_at: db_current_time + 1.years)
+      sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
+      expect_max_sig_exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+      assert_operator c.trash_at.to_i, :>, expect_max_sig_exp
+      assert_operator sig_exp.to_i, :<=, expect_max_sig_exp
+    end
+  end
+
+  test "create collection with properties" do
+    act_as_system_user do
+      c = Collection.create(manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n",
+                            properties: {'property_1' => 'value_1'})
+      assert c.valid?
+      assert_equal 'value_1', c.properties['property_1']
+    end
+  end
+
+  test 'create, delete, recreate collection with same name and owner' do
+    act_as_user users(:active) do
+      # create collection with name
+      c = Collection.create(manifest_text: '',
+                            name: "test collection name")
+      assert c.valid?
+      uuid = c.uuid
+
+      c = Collection.readable_by(current_user).where(uuid: uuid)
+      assert_not_empty c, 'Should be able to find live collection'
+
+      # mark collection as expired
+      c.first.update_attributes!(trash_at: Time.new.strftime("%Y-%m-%d"))
+      c = Collection.readable_by(current_user).where(uuid: uuid)
+      assert_empty c, 'Should not be able to find expired collection'
+
+      # recreate collection with the same name
+      c = Collection.create(manifest_text: '',
+                            name: "test collection name")
+      assert c.valid?
+    end
+  end
+
+  test 'trash_at cannot be set too far in the past' do
+    act_as_user users(:active) do
+      t0 = db_current_time
+      c = Collection.create!(manifest_text: '', name: 'foo')
+      c.update_attributes! trash_at: (t0 - 2.weeks)
+      c.reload
+      assert_operator c.trash_at, :>, t0
+    end
+  end
+
+  now = Time.now
+  [['trash-to-delete interval negative',
+    :collection_owned_by_active,
+    {trash_at: now+2.weeks, delete_at: now},
+    {state: :invalid}],
+   ['now-to-delete interval short',
+    :collection_owned_by_active,
+    {trash_at: now+3.days, delete_at: now+7.days},
+    {state: :trash_future}],
+   ['now-to-delete interval short, trash=delete',
+    :collection_owned_by_active,
+    {trash_at: now+3.days, delete_at: now+3.days},
+    {state: :trash_future}],
+   ['trash-to-delete interval ok',
+    :collection_owned_by_active,
+    {trash_at: now, delete_at: now+15.days},
+    {state: :trash_now}],
+   ['trash-to-delete interval short, but far enough in future',
+    :collection_owned_by_active,
+    {trash_at: now+13.days, delete_at: now+15.days},
+    {state: :trash_future}],
+   ['trash by setting is_trashed bool',
+    :collection_owned_by_active,
+    {is_trashed: true},
+    {state: :trash_now}],
+   ['trash in future by setting just trash_at',
+    :collection_owned_by_active,
+    {trash_at: now+1.week},
+    {state: :trash_future}],
+   ['trash in future by setting trash_at and delete_at',
+    :collection_owned_by_active,
+    {trash_at: now+1.week, delete_at: now+4.weeks},
+    {state: :trash_future}],
+   ['untrash by clearing is_trashed bool',
+    :expired_collection,
+    {is_trashed: false},
+    {state: :not_trash}],
+  ].each do |test_name, fixture_name, updates, expect|
+    test test_name do
+      act_as_user users(:active) do
+        min_exp = (db_current_time +
+                   Rails.configuration.blob_signature_ttl.seconds)
+        if fixture_name == :expired_collection
+          # Fixture-finder shorthand doesn't find trashed collections
+          # because they're not in the default scope.
+          c = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3ih')
+        else
+          c = collections(fixture_name)
+        end
+        updates_ok = c.update_attributes(updates)
+        expect_valid = expect[:state] != :invalid
+        assert_equal expect_valid, updates_ok, c.errors.full_messages.to_s
+        case expect[:state]
+        when :invalid
+          refute c.valid?
+        when :trash_now
+          assert c.is_trashed
+          assert_not_nil c.trash_at
+          assert_operator c.trash_at, :<=, db_current_time
+          assert_not_nil c.delete_at
+          assert_operator c.delete_at, :>=, min_exp
+        when :trash_future
+          refute c.is_trashed
+          assert_not_nil c.trash_at
+          assert_operator c.trash_at, :>, db_current_time
+          assert_not_nil c.delete_at
+          assert_operator c.delete_at, :>=, c.trash_at
+          # Currently this minimum interval is needed to prevent early
+          # garbage collection:
+          assert_operator c.delete_at, :>=, min_exp
+        when :not_trash
+          refute c.is_trashed
+          assert_nil c.trash_at
+          assert_nil c.delete_at
+        else
+          raise "bad expect[:state]==#{expect[:state].inspect} in test case"
+        end
+      end
+    end
+  end
+
+  test 'default trash interval > blob signature ttl' do
+    Rails.configuration.default_trash_lifetime = 86400 * 21 # 3 weeks
+    start = db_current_time
+    act_as_user users(:active) do
+      c = Collection.create!(manifest_text: '', name: 'foo')
+      c.update_attributes!(trash_at: start + 86400.seconds)
+      assert_operator c.delete_at, :>=, start + (86400*22).seconds
+      assert_operator c.delete_at, :<, start + (86400*22 + 30).seconds
+      c.destroy
+
+      c = Collection.create!(manifest_text: '', name: 'foo')
+      c.update_attributes!(is_trashed: true)
+      assert_operator c.delete_at, :>=, start + (86400*21).seconds
+    end
+  end
+
+  test "find_all_for_docker_image resolves names that look like hashes" do
+    coll_list = Collection.
+      find_all_for_docker_image('a' * 64, nil, [users(:active)])
+    coll_uuids = coll_list.map(&:uuid)
+    assert_includes(coll_uuids, collections(:docker_image).uuid)
+  end
+
+  test "move collections to trash in SweepTrashedObjects" do
+    c = collections(:trashed_on_next_sweep)
+    refute_empty Collection.where('uuid=? and is_trashed=false', c.uuid)
+    assert_raises(ActiveRecord::RecordNotUnique) do
+      act_as_user users(:active) do
+        Collection.create!(owner_uuid: c.owner_uuid,
+                           name: c.name)
+      end
+    end
+    SweepTrashedObjects.sweep_now
+    c = Collection.where('uuid=? and is_trashed=true', c.uuid).first
+    assert c
+    act_as_user users(:active) do
+      assert Collection.create!(owner_uuid: c.owner_uuid,
+                                name: c.name)
+    end
+  end
+
+  test "delete collections in SweepTrashedObjects" do
+    uuid = 'zzzzz-4zz18-3u1p5umicfpqszp' # deleted_on_next_sweep
+    assert_not_empty Collection.where(uuid: uuid)
+    SweepTrashedObjects.sweep_now
+    assert_empty Collection.where(uuid: uuid)
+  end
+
+  test "delete referring links in SweepTrashedObjects" do
+    uuid = collections(:trashed_on_next_sweep).uuid
+    act_as_system_user do
+      Link.create!(head_uuid: uuid,
+                   tail_uuid: system_user_uuid,
+                   link_class: 'whatever',
+                   name: 'something')
+    end
+    past = db_current_time
+    Collection.where(uuid: uuid).
+      update_all(is_trashed: true, trash_at: past, delete_at: past)
+    assert_not_empty Collection.where(uuid: uuid)
+    SweepTrashedObjects.sweep_now
+    assert_empty Collection.where(uuid: uuid)
+  end
+end
diff --git a/services/api/test/unit/commit_ancestor_test.rb b/services/api/test/unit/commit_ancestor_test.rb
new file mode 100644 (file)
index 0000000..4604121
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CommitAncestorTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/commit_test.rb b/services/api/test/unit/commit_test.rb
new file mode 100644 (file)
index 0000000..af365b1
--- /dev/null
@@ -0,0 +1,270 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+# NOTE: calling Commit.find_commit_range(nil, nil, 'rev')
+# produces an error message "fatal: bad object 'rev'" on stderr if
+# 'rev' does not exist in a given repository.  Many of these tests
+# report such errors; their presence does not represent a fatal
+# condition.
+
+class CommitTest < ActiveSupport::TestCase
+  # See git_setup.rb for the commit log for test.git.tar
+  include GitTestHelper
+
+  setup do
+    authorize_with :active
+  end
+
+  test 'find_commit_range does not bypass permissions' do
+    authorize_with :inactive
+    assert_raises ArgumentError do
+      Commit.find_commit_range 'foo', nil, 'master', []
+    end
+  end
+
+  def must_pipe(cmd)
+    begin
+      return IO.read("|#{cmd}")
+    ensure
+      assert $?.success?
+    end
+  end
+
+  [
+   'https://github.com/curoverse/arvados.git',
+   'http://github.com/curoverse/arvados.git',
+   'git://github.com/curoverse/arvados.git',
+  ].each do |url|
+    test "find_commit_range uses fetch_remote_repository to get #{url}" do
+      fake_gitdir = repositories(:foo).server_path
+      Commit.expects(:cache_dir_for).once.with(url).returns fake_gitdir
+      Commit.expects(:fetch_remote_repository).once.with(fake_gitdir, url).returns true
+      c = Commit.find_commit_range url, nil, 'master', []
+      refute_empty c
+    end
+  end
+
+  [
+   'bogus/repo',
+   '/bogus/repo',
+   '/not/allowed/.git',
+   'file:///not/allowed.git',
+   'git.curoverse.com/arvados.git',
+   'github.com/curoverse/arvados.git',
+  ].each do |url|
+    test "find_commit_range skips fetch_remote_repository for #{url}" do
+      Commit.expects(:fetch_remote_repository).never
+      assert_raises ArgumentError do
+        Commit.find_commit_range url, nil, 'master', []
+      end
+    end
+  end
+
+  test 'fetch_remote_repository does not leak commits across repositories' do
+    url = "http://localhost:1/fake/fake.git"
+    fetch_remote_from_local_repo url, :foo
+    c = Commit.find_commit_range url, nil, 'master', []
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57'], c
+
+    url = "http://localhost:2/fake/fake.git"
+    fetch_remote_from_local_repo url, 'file://' + File.expand_path('../../.git', Rails.root)
+    c = Commit.find_commit_range url, nil, '077ba2ad3ea24a929091a9e6ce545c93199b8e57', []
+    assert_equal [], c
+  end
+
+  test 'tag_in_internal_repository creates and updates tags in internal.git' do
+    authorize_with :active
+    gitint = "git --git-dir #{Rails.configuration.git_internal_dir}"
+    IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine
+    assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1"))
+    refute $?.success?
+    Commit.tag_in_internal_repository 'active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', 'testtag'
+    assert_match(/^commit 31ce37f/, IO.read("|#{gitint} show testtag"))
+    assert $?.success?
+  end
+
+  def with_foo_repository
+    Dir.chdir("#{Rails.configuration.git_repositories_dir}/#{repositories(:foo).uuid}") do
+      must_pipe("git checkout master 2>&1")
+      yield
+    end
+  end
+
+  test 'tag_in_internal_repository, new non-tip sha1 in local repo' do
+    tag = "tag#{rand(10**10)}"
+    sha1 = nil
+    with_foo_repository do
+      must_pipe("git checkout -b branch-#{rand(10**10)} 2>&1")
+      must_pipe("echo -n #{tag.shellescape} >bar")
+      must_pipe("git add bar")
+      must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
+      sha1 = must_pipe("git log -n1 --format=%H").strip
+      must_pipe("git rm bar")
+      must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
+    end
+    Commit.tag_in_internal_repository 'active/foo', sha1, tag
+    gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+    assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
+    assert $?.success?
+  end
+
+  test 'tag_in_internal_repository, new unreferenced sha1 in local repo' do
+    tag = "tag#{rand(10**10)}"
+    sha1 = nil
+    with_foo_repository do
+      must_pipe("echo -n #{tag.shellescape} >bar")
+      must_pipe("git add bar")
+      must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
+      sha1 = must_pipe("git log -n1 --format=%H").strip
+      must_pipe("git reset --hard HEAD^")
+    end
+    Commit.tag_in_internal_repository 'active/foo', sha1, tag
+    gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+    assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
+    assert $?.success?
+  end
+
+  # In active/shabranchnames, "7387838c69a21827834586cc42b467ff6c63293b" is
+  # both a commit hash, and the name of a branch that begins from that same
+  # commit.
+  COMMIT_BRANCH_NAME = "7387838c69a21827834586cc42b467ff6c63293b"
+  # A commit that appears in the branch after 7387838c.
+  COMMIT_BRANCH_COMMIT_2 = "abec49829bf1758413509b7ffcab32a771b71e81"
+  # "738783" is another branch that starts from the above commit.
+  SHORT_COMMIT_BRANCH_NAME = COMMIT_BRANCH_NAME[0, 6]
+  # A commit that appears in branch 738783 after 7387838c.
+  SHORT_BRANCH_COMMIT_2 = "77e1a93093663705a63bb4d505698047e109dedd"
+
+  test "find_commit_range min_version prefers commits over branch names" do
+    assert_equal([COMMIT_BRANCH_NAME],
+                 Commit.find_commit_range("active/shabranchnames",
+                                          COMMIT_BRANCH_NAME, nil, nil))
+  end
+
+  test "find_commit_range max_version prefers commits over branch names" do
+    assert_equal([COMMIT_BRANCH_NAME],
+                 Commit.find_commit_range("active/shabranchnames",
+                                          nil, COMMIT_BRANCH_NAME, nil))
+  end
+
+  test "find_commit_range min_version with short branch name" do
+    assert_equal([SHORT_BRANCH_COMMIT_2],
+                 Commit.find_commit_range("active/shabranchnames",
+                                          SHORT_COMMIT_BRANCH_NAME, nil, nil))
+  end
+
+  test "find_commit_range max_version with short branch name" do
+    assert_equal([SHORT_BRANCH_COMMIT_2],
+                 Commit.find_commit_range("active/shabranchnames",
+                                          nil, SHORT_COMMIT_BRANCH_NAME, nil))
+  end
+
+  test "find_commit_range min_version with disambiguated branch name" do
+    assert_equal([COMMIT_BRANCH_COMMIT_2],
+                 Commit.find_commit_range("active/shabranchnames",
+                                          "heads/#{COMMIT_BRANCH_NAME}",
+                                          nil, nil))
+  end
+
+  test "find_commit_range max_version with disambiguated branch name" do
+    assert_equal([COMMIT_BRANCH_COMMIT_2],
+                 Commit.find_commit_range("active/shabranchnames", nil,
+                                          "heads/#{COMMIT_BRANCH_NAME}", nil))
+  end
+
+  test "find_commit_range min_version with unambiguous short name" do
+    assert_equal([COMMIT_BRANCH_NAME],
+                 Commit.find_commit_range("active/shabranchnames",
+                                          COMMIT_BRANCH_NAME[0..-2], nil, nil))
+  end
+
+  test "find_commit_range max_version with unambiguous short name" do
+    assert_equal([COMMIT_BRANCH_NAME],
+                 Commit.find_commit_range("active/shabranchnames", nil,
+                                          COMMIT_BRANCH_NAME[0..-2], nil))
+  end
+
+  test "find_commit_range laundry list" do
+    authorize_with :active
+
+    # single
+    a = Commit.find_commit_range('active/foo', nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
+    assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+    #test "test_branch1" do
+    a = Commit.find_commit_range('active/foo', nil, 'master', nil)
+    assert_includes(a, '077ba2ad3ea24a929091a9e6ce545c93199b8e57')
+
+    #test "test_branch2" do
+    a = Commit.find_commit_range('active/foo', nil, 'b1', nil)
+    assert_equal ['1de84a854e2b440dc53bf42f8548afa4c17da332'], a
+
+    #test "test_branch3" do
+    a = Commit.find_commit_range('active/foo', nil, 'HEAD', nil)
+    assert_equal ['1de84a854e2b440dc53bf42f8548afa4c17da332'], a
+
+    #test "test_single_revision_repo" do
+    a = Commit.find_commit_range('active/foo', nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
+    assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+    a = Commit.find_commit_range('arvados', nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
+    assert_equal [], a
+
+    #test "test_multi_revision" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+    a = Commit.find_commit_range('active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', nil)
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+    #test "test_tag" do
+    # complains "fatal: ambiguous argument 'tag1': unknown revision or path
+    # not in the working tree."
+    a = Commit.find_commit_range('active/foo', 'tag1', 'master', nil)
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577'], a
+
+    #test "test_multi_revision_exclude" do
+    a = Commit.find_commit_range('active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['4fe459abe02d9b365932b8f5dc419439ab4e2577'])
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+    #test "test_multi_revision_tagged_exclude" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+    a = Commit.find_commit_range('active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['tag1'])
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+    Dir.mktmpdir do |touchdir|
+      # invalid input to maximum
+      a = Commit.find_commit_range('active/foo', nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", nil)
+      assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
+      assert_equal [], a
+
+      # invalid input to maximum
+      a = Commit.find_commit_range('active/foo', nil, "$(uname>#{touchdir}/uh_oh)", nil)
+      assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
+      assert_equal [], a
+
+      # invalid input to minimum
+      a = Commit.find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
+      assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
+      assert_equal [], a
+
+      # invalid input to minimum
+      a = Commit.find_commit_range('active/foo', "$(uname>#{touchdir}/uh_oh)", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
+      assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
+      assert_equal [], a
+
+      # invalid input to 'excludes'
+      # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+      a = Commit.find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["4fe459abe02d9b365932b8f5dc419439ab4e2577 ; touch #{touchdir}/uh_oh"])
+      assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
+      assert_equal [], a
+
+      # invalid input to 'excludes'
+      # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+      a = Commit.find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["$(uname>#{touchdir}/uh_oh)"])
+      assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
+      assert_equal [], a
+    end
+  end
+end
diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb
new file mode 100644 (file)
index 0000000..5c4a56c
--- /dev/null
@@ -0,0 +1,1230 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/container_test_helper'
+require 'helpers/docker_migration_helper'
+require 'arvados/collection'
+
+class ContainerRequestTest < ActiveSupport::TestCase
+  include DockerMigrationHelper
+  include DbCurrentTime
+  include ContainerTestHelper
+
+  def with_container_auth(ctr)
+    auth_was = Thread.current[:api_client_authorization]
+    Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(ctr.auth_uuid)
+    begin
+      yield
+    ensure
+      Thread.current[:api_client_authorization] = auth_was
+    end
+  end
+
+  def lock_and_run(ctr)
+      act_as_system_user do
+        ctr.update_attributes!(state: Container::Locked)
+        ctr.update_attributes!(state: Container::Running)
+      end
+  end
+
+  def create_minimal_req! attrs={}
+    defaults = {
+      command: ["echo", "foo"],
+      container_image: links(:docker_image_collection_tag).name,
+      cwd: "/tmp",
+      environment: {},
+      mounts: {"/out" => {"kind" => "tmp", "capacity" => 1000000}},
+      output_path: "/out",
+      runtime_constraints: {"vcpus" => 1, "ram" => 2},
+      name: "foo",
+      description: "bar",
+    }
+    cr = ContainerRequest.create!(defaults.merge(attrs))
+    cr.reload
+    return cr
+  end
+
+  def check_bogus_states cr
+    [nil, "Flubber"].each do |state|
+      assert_raises(ActiveRecord::RecordInvalid) do
+        cr.state = state
+        cr.save!
+      end
+      cr.reload
+    end
+  end
+
+  test "Container request create" do
+    set_user_from_auth :active
+    cr = create_minimal_req!
+
+    assert_nil cr.container_uuid
+    assert_equal 0, cr.priority
+
+    check_bogus_states cr
+
+    # Ensure we can modify all attributes
+    cr.command = ["echo", "foo3"]
+    cr.container_image = "img3"
+    cr.cwd = "/tmp3"
+    cr.environment = {"BUP" => "BOP"}
+    cr.mounts = {"BAR" => {"kind" => "BAZ"}}
+    cr.output_path = "/tmp4"
+    cr.priority = 2
+    cr.runtime_constraints = {"vcpus" => 4}
+    cr.name = "foo3"
+    cr.description = "bar3"
+    cr.save!
+
+    assert_nil cr.container_uuid
+  end
+
+  [
+    {"runtime_constraints" => {"vcpus" => 1}},
+    {"runtime_constraints" => {"vcpus" => 1, "ram" => nil}},
+    {"runtime_constraints" => {"vcpus" => 0, "ram" => 123}},
+    {"runtime_constraints" => {"vcpus" => "1", "ram" => "123"}},
+    {"mounts" => {"FOO" => "BAR"}},
+    {"mounts" => {"FOO" => {}}},
+    {"mounts" => {"FOO" => {"kind" => "tmp", "capacity" => 42.222}}},
+    {"command" => ["echo", 55]},
+    {"environment" => {"FOO" => 55}}
+  ].each do |value|
+    test "Create with invalid #{value}" do
+      set_user_from_auth :active
+      assert_raises(ActiveRecord::RecordInvalid) do
+        cr = create_minimal_req!({state: "Committed",
+               priority: 1}.merge(value))
+        cr.save!
+      end
+    end
+
+    test "Update with invalid #{value}" do
+      set_user_from_auth :active
+      cr = create_minimal_req!(state: "Uncommitted", priority: 1)
+      cr.save!
+      assert_raises(ActiveRecord::RecordInvalid) do
+        cr = ContainerRequest.find_by_uuid cr.uuid
+        cr.update_attributes!({state: "Committed",
+                               priority: 1}.merge(value))
+      end
+    end
+  end
+
+  test "Update from fixture" do
+    set_user_from_auth :active
+    cr = ContainerRequest.find_by_uuid(container_requests(:running).uuid)
+    cr.update_attributes!(description: "New description")
+    assert_equal "New description", cr.description
+  end
+
+  test "Update with valid runtime constraints" do
+      set_user_from_auth :active
+      cr = create_minimal_req!(state: "Uncommitted", priority: 1)
+      cr.save!
+      cr = ContainerRequest.find_by_uuid cr.uuid
+      cr.update_attributes!(state: "Committed",
+                            runtime_constraints: {"vcpus" => 1, "ram" => 23})
+      assert_not_nil cr.container_uuid
+  end
+
+  test "Container request priority must be non-nil" do
+    set_user_from_auth :active
+    cr = create_minimal_req!
+    cr.priority = nil
+    cr.state = "Committed"
+    assert_raises(ActiveRecord::RecordInvalid) do
+      cr.save!
+    end
+  end
+
+  test "Container request commit" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(runtime_constraints: {"vcpus" => 2, "ram" => 30})
+
+    assert_nil cr.container_uuid
+
+    cr.reload
+    cr.state = "Committed"
+    cr.priority = 1
+    cr.save!
+
+    cr.reload
+
+    assert_equal({"vcpus" => 2, "ram" => 30}, cr.runtime_constraints)
+
+    assert_not_nil cr.container_uuid
+    c = Container.find_by_uuid cr.container_uuid
+    assert_not_nil c
+    assert_equal ["echo", "foo"], c.command
+    assert_equal collections(:docker_image).portable_data_hash, c.container_image
+    assert_equal "/tmp", c.cwd
+    assert_equal({}, c.environment)
+    assert_equal({"/out" => {"kind"=>"tmp", "capacity"=>1000000}}, c.mounts)
+    assert_equal "/out", c.output_path
+    assert_equal({"keep_cache_ram"=>268435456, "vcpus" => 2, "ram" => 30}, c.runtime_constraints)
+    assert_operator 0, :<, c.priority
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      cr.priority = nil
+      cr.save!
+    end
+
+    cr.priority = 0
+    cr.save!
+
+    cr.reload
+    c.reload
+    assert_equal 0, cr.priority
+    assert_equal 0, c.priority
+  end
+
+  test "Independent container requests" do
+    set_user_from_auth :active
+    cr1 = create_minimal_req!(command: ["foo", "1"], priority: 5, state: "Committed")
+    cr2 = create_minimal_req!(command: ["foo", "2"], priority: 10, state: "Committed")
+
+    c1 = Container.find_by_uuid cr1.container_uuid
+    assert_operator 0, :<, c1.priority
+
+    c2 = Container.find_by_uuid cr2.container_uuid
+    assert_operator c1.priority, :<, c2.priority
+    c2priority_was = c2.priority
+
+    cr1.update_attributes!(priority: 0)
+
+    c1.reload
+    assert_equal 0, c1.priority
+
+    c2.reload
+    assert_equal c2priority_was, c2.priority
+  end
+
+  test "Request is finalized when its container is cancelled" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 1)
+    assert_equal users(:active).uuid, cr.modified_by_user_uuid
+
+    act_as_system_user do
+      Container.find_by_uuid(cr.container_uuid).
+        update_attributes!(state: Container::Cancelled)
+    end
+
+    cr.reload
+    assert_equal "Final", cr.state
+    assert_equal users(:active).uuid, cr.modified_by_user_uuid
+  end
+
+  test "Request is finalized when its container is completed" do
+    set_user_from_auth :active
+    project = groups(:private)
+    cr = create_minimal_req!(owner_uuid: project.uuid,
+                             priority: 1,
+                             state: "Committed")
+    assert_equal users(:active).uuid, cr.modified_by_user_uuid
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c
+    end
+
+    cr.reload
+    assert_equal "Committed", cr.state
+
+    output_pdh = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+    log_pdh = 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+    act_as_system_user do
+      c.update_attributes!(state: Container::Complete,
+                           output: output_pdh,
+                           log: log_pdh)
+    end
+
+    cr.reload
+    assert_equal "Final", cr.state
+    assert_equal users(:active).uuid, cr.modified_by_user_uuid
+
+    assert_not_nil cr.output_uuid
+    assert_not_nil cr.log_uuid
+    output = Collection.find_by_uuid cr.output_uuid
+    assert_equal output_pdh, output.portable_data_hash
+    assert_equal output.owner_uuid, project.uuid, "Container output should be copied to #{project.uuid}"
+
+    log = Collection.find_by_uuid cr.log_uuid
+    assert_equal log.manifest_text, ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{cr.container_uuid} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+
+    assert_equal log.owner_uuid, project.uuid, "Container log should be copied to #{project.uuid}"
+  end
+
+  test "Container makes container request, then is cancelled" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 1)
+
+    c = Container.find_by_uuid cr.container_uuid
+    assert_operator 0, :<, c.priority
+    lock_and_run(c)
+
+    cr2 = with_container_auth(c) do
+      create_minimal_req!(priority: 10, state: "Committed", container_count_max: 1, command: ["echo", "foo2"])
+    end
+    assert_not_nil cr2.requesting_container_uuid
+    assert_equal users(:active).uuid, cr2.modified_by_user_uuid
+
+    c2 = Container.find_by_uuid cr2.container_uuid
+    assert_operator 0, :<, c2.priority
+
+    act_as_system_user do
+      c.state = "Cancelled"
+      c.save!
+    end
+
+    cr.reload
+    assert_equal "Final", cr.state
+
+    cr2.reload
+    assert_equal 0, cr2.priority
+    assert_equal users(:active).uuid, cr2.modified_by_user_uuid
+
+    c2.reload
+    assert_equal 0, c2.priority
+  end
+
+  test "child container priority follows same ordering as corresponding top-level ancestors" do
+    findctr = lambda { |cr| Container.find_by_uuid(cr.container_uuid) }
+
+    set_user_from_auth :active
+
+    toplevel_crs = [
+      create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "0"}),
+      create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "1"}),
+      create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "2"}),
+    ]
+    parents = toplevel_crs.map(&findctr)
+
+    children = parents.map do |parent|
+      lock_and_run(parent)
+      with_container_auth(parent) do
+        create_minimal_req!(state: "Committed",
+                            priority: 1,
+                            environment: {"child" => parent.environment["workflow"]})
+      end
+    end.map(&findctr)
+
+    grandchildren = children.reverse.map do |child|
+      lock_and_run(child)
+      with_container_auth(child) do
+        create_minimal_req!(state: "Committed",
+                            priority: 1,
+                            environment: {"grandchild" => child.environment["child"]})
+      end
+    end.reverse.map(&findctr)
+
+    shared_grandchildren = children.map do |child|
+      with_container_auth(child) do
+        create_minimal_req!(state: "Committed",
+                            priority: 1,
+                            environment: {"grandchild" => "shared"})
+      end
+    end.map(&findctr)
+
+    assert_equal shared_grandchildren[0].uuid, shared_grandchildren[1].uuid
+    assert_equal shared_grandchildren[0].uuid, shared_grandchildren[2].uuid
+    shared_grandchild = shared_grandchildren[0]
+
+    set_user_from_auth :active
+
+    # parents should be prioritized by submit time.
+    assert_operator parents[0].priority, :>, parents[1].priority
+    assert_operator parents[1].priority, :>, parents[2].priority
+
+    # children should be prioritized in same order as their respective
+    # parents.
+    assert_operator children[0].priority, :>, children[1].priority
+    assert_operator children[1].priority, :>, children[2].priority
+
+    # grandchildren should also be prioritized in the same order,
+    # despite having been submitted in the opposite order.
+    assert_operator grandchildren[0].priority, :>, grandchildren[1].priority
+    assert_operator grandchildren[1].priority, :>, grandchildren[2].priority
+
+    # shared grandchild container should be prioritized above
+    # everything that isn't needed by parents[0], but not above
+    # earlier-submitted descendants of parents[0]
+    assert_operator shared_grandchild.priority, :>, grandchildren[1].priority
+    assert_operator shared_grandchild.priority, :>, children[1].priority
+    assert_operator shared_grandchild.priority, :>, parents[1].priority
+    assert_operator shared_grandchild.priority, :<=, grandchildren[0].priority
+    assert_operator shared_grandchild.priority, :<=, children[0].priority
+    assert_operator shared_grandchild.priority, :<=, parents[0].priority
+
+    # increasing priority of the most recent toplevel container should
+    # reprioritize all of its descendants (including the shared
+    # grandchild) above everything else.
+    toplevel_crs[2].update_attributes!(priority: 72)
+    (parents + children + grandchildren + [shared_grandchild]).map(&:reload)
+    assert_operator shared_grandchild.priority, :>, grandchildren[0].priority
+    assert_operator shared_grandchild.priority, :>, children[0].priority
+    assert_operator shared_grandchild.priority, :>, parents[0].priority
+    assert_operator shared_grandchild.priority, :>, grandchildren[1].priority
+    assert_operator shared_grandchild.priority, :>, children[1].priority
+    assert_operator shared_grandchild.priority, :>, parents[1].priority
+    # ...but the shared container should not have higher priority than
+    # the earlier-submitted descendants of the high-priority workflow.
+    assert_operator shared_grandchild.priority, :<=, grandchildren[2].priority
+    assert_operator shared_grandchild.priority, :<=, children[2].priority
+    assert_operator shared_grandchild.priority, :<=, parents[2].priority
+  end
+
+  [
+    ['running_container_auth', 'zzzzz-dz642-runningcontainr', 501],
+    ['active_no_prefs', nil, 0]
+  ].each do |token, expected, expected_priority|
+    test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
+      set_user_from_auth token
+      cr = ContainerRequest.create(container_image: "img", output_path: "/tmp", command: ["echo", "foo"])
+      assert_not_nil cr.uuid, 'uuid should be set for newly created container_request'
+      assert_equal expected, cr.requesting_container_uuid
+      assert_equal expected_priority, cr.priority
+    end
+  end
+
+  test "create as container_runtime_token and expect requesting_container_uuid to be zzzzz-dz642-20isqbkl8xwnsao" do
+    set_user_from_auth :container_runtime_token
+    Thread.current[:token] = "#{Thread.current[:token]}/zzzzz-dz642-20isqbkl8xwnsao"
+    cr = ContainerRequest.create(container_image: "img", output_path: "/tmp", command: ["echo", "foo"])
+    assert_not_nil cr.uuid, 'uuid should be set for newly created container_request'
+    assert_equal 'zzzzz-dz642-20isqbkl8xwnsao', cr.requesting_container_uuid
+    assert_equal 1, cr.priority
+  end
+
+  [[{"vcpus" => [2, nil]},
+    lambda { |resolved| resolved["vcpus"] == 2 }],
+   [{"vcpus" => [3, 7]},
+    lambda { |resolved| resolved["vcpus"] == 3 }],
+   [{"vcpus" => 4},
+    lambda { |resolved| resolved["vcpus"] == 4 }],
+   [{"ram" => [1000000000, 2000000000]},
+    lambda { |resolved| resolved["ram"] == 1000000000 }],
+   [{"ram" => [1234234234]},
+    lambda { |resolved| resolved["ram"] == 1234234234 }],
+  ].each do |rc, okfunc|
+    test "resolve runtime constraint range #{rc} to values" do
+      resolved = Container.resolve_runtime_constraints(rc)
+      assert(okfunc.call(resolved),
+             "container runtime_constraints was #{resolved.inspect}")
+    end
+  end
+
+  [[{"/out" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "path" => "/foo"}},
+    lambda do |resolved|
+      resolved["/out"] == {
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "kind" => "collection",
+        "path" => "/foo",
+      }
+    end],
+   [{"/out" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "path" => "/foo"}},
+    lambda do |resolved|
+      resolved["/out"] == {
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "kind" => "collection",
+        "path" => "/foo",
+      }
+    end],
+   [{"/out" => {
+      "kind" => "collection",
+      "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+      "path" => "/foo"}},
+    lambda do |resolved|
+      resolved["/out"] == {
+        "portable_data_hash" => "1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+        "kind" => "collection",
+        "path" => "/foo",
+      }
+    end],
+    # Empty collection
+    [{"/out" => {
+      "kind" => "collection",
+      "path" => "/foo"}},
+    lambda do |resolved|
+      resolved["/out"] == {
+        "kind" => "collection",
+        "path" => "/foo",
+      }
+    end],
+  ].each do |mounts, okfunc|
+    test "resolve mounts #{mounts.inspect} to values" do
+      set_user_from_auth :active
+      resolved = Container.resolve_mounts(mounts)
+      assert(okfunc.call(resolved),
+             "Container.resolve_mounts returned #{resolved.inspect}")
+    end
+  end
+
+  test 'mount unreadable collection' do
+    set_user_from_auth :spectator
+    m = {
+      "/foo" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "path" => "/foo",
+      },
+    }
+    assert_raises(ArvadosModel::UnresolvableContainerError) do
+      Container.resolve_mounts(m)
+    end
+  end
+
+  test 'mount collection with mismatched UUID and PDH' do
+    set_user_from_auth :active
+    m = {
+      "/foo" => {
+        "kind" => "collection",
+        "uuid" => "zzzzz-4zz18-znfnqtbbv4spc3w",
+        "portable_data_hash" => "fa7aeb5140e2848d39b416daeef4ffc5+45",
+        "path" => "/foo",
+      },
+    }
+    resolved_mounts = Container.resolve_mounts(m)
+    assert_equal m['portable_data_hash'], resolved_mounts['portable_data_hash']
+  end
+
+  ['arvados/apitestfixture:latest',
+   'arvados/apitestfixture',
+   'd8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678',
+  ].each do |tag|
+    test "Container.resolve_container_image(#{tag.inspect})" do
+      set_user_from_auth :active
+      resolved = Container.resolve_container_image(tag)
+      assert_equal resolved, collections(:docker_image).portable_data_hash
+    end
+  end
+
+  test "Container.resolve_container_image(pdh)" do
+    set_user_from_auth :active
+    [[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
+      Rails.configuration.docker_image_formats = [ver]
+      pdh = collections(coll).portable_data_hash
+      resolved = Container.resolve_container_image(pdh)
+      assert_equal resolved, pdh
+    end
+  end
+
+  ['acbd18db4cc2f85cedef654fccc4a4d8+3',
+   'ENOEXIST',
+   'arvados/apitestfixture:ENOEXIST',
+  ].each do |img|
+    test "container_image_for_container(#{img.inspect}) => 422" do
+      set_user_from_auth :active
+      assert_raises(ArvadosModel::UnresolvableContainerError) do
+        Container.resolve_container_image(img)
+      end
+    end
+  end
+
+  test "allow unrecognized container when there are remote_hosts" do
+    set_user_from_auth :active
+    Rails.configuration.remote_hosts = {"foooo" => "bar.com"}
+    Container.resolve_container_image('acbd18db4cc2f85cedef654fccc4a4d8+3')
+  end
+
+  test "migrated docker image" do
+    Rails.configuration.docker_image_formats = ['v2']
+    add_docker19_migration_link
+
+    # Test that it returns only v2 images even though request is for v1 image.
+
+    set_user_from_auth :active
+    cr = create_minimal_req!(command: ["true", "1"],
+                             container_image: collections(:docker_image).portable_data_hash)
+    assert_equal(Container.resolve_container_image(cr.container_image),
+                 collections(:docker_image_1_12).portable_data_hash)
+
+    cr = create_minimal_req!(command: ["true", "2"],
+                             container_image: links(:docker_image_collection_tag).name)
+    assert_equal(Container.resolve_container_image(cr.container_image),
+                 collections(:docker_image_1_12).portable_data_hash)
+  end
+
+  test "use unmigrated docker image" do
+    Rails.configuration.docker_image_formats = ['v1']
+    add_docker19_migration_link
+
+    # Test that it returns only supported v1 images even though there is a
+    # migration link.
+
+    set_user_from_auth :active
+    cr = create_minimal_req!(command: ["true", "1"],
+                             container_image: collections(:docker_image).portable_data_hash)
+    assert_equal(Container.resolve_container_image(cr.container_image),
+                 collections(:docker_image).portable_data_hash)
+
+    cr = create_minimal_req!(command: ["true", "2"],
+                             container_image: links(:docker_image_collection_tag).name)
+    assert_equal(Container.resolve_container_image(cr.container_image),
+                 collections(:docker_image).portable_data_hash)
+  end
+
+  test "incompatible docker image v1" do
+    Rails.configuration.docker_image_formats = ['v1']
+    add_docker19_migration_link
+
+    # Don't return unsupported v2 image even if we ask for it directly.
+    set_user_from_auth :active
+    cr = create_minimal_req!(command: ["true", "1"],
+                             container_image: collections(:docker_image_1_12).portable_data_hash)
+    assert_raises(ArvadosModel::UnresolvableContainerError) do
+      Container.resolve_container_image(cr.container_image)
+    end
+  end
+
+  test "incompatible docker image v2" do
+    Rails.configuration.docker_image_formats = ['v2']
+    # No migration link, don't return unsupported v1 image,
+
+    set_user_from_auth :active
+    cr = create_minimal_req!(command: ["true", "1"],
+                             container_image: collections(:docker_image).portable_data_hash)
+    assert_raises(ArvadosModel::UnresolvableContainerError) do
+      Container.resolve_container_image(cr.container_image)
+    end
+    cr = create_minimal_req!(command: ["true", "2"],
+                             container_image: links(:docker_image_collection_tag).name)
+    assert_raises(ArvadosModel::UnresolvableContainerError) do
+      Container.resolve_container_image(cr.container_image)
+    end
+  end
+
+  test "requestor can retrieve container owned by dispatch" do
+    assert_not_empty Container.readable_by(users(:admin)).where(uuid: containers(:running).uuid)
+    assert_not_empty Container.readable_by(users(:active)).where(uuid: containers(:running).uuid)
+    assert_empty Container.readable_by(users(:spectator)).where(uuid: containers(:running).uuid)
+  end
+
+  [
+    [{"var" => "value1"}, {"var" => "value1"}, nil],
+    [{"var" => "value1"}, {"var" => "value1"}, true],
+    [{"var" => "value1"}, {"var" => "value1"}, false],
+    [{"var" => "value1"}, {"var" => "value2"}, nil],
+  ].each do |env1, env2, use_existing|
+    test "Container request #{((env1 == env2) and (use_existing.nil? or use_existing == true)) ? 'does' : 'does not'} reuse container when committed#{use_existing.nil? ? '' : use_existing ? ' and use_existing == true' : ' and use_existing == false'}" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      runtime_constraints: {"vcpus" => 4,
+                                            "ram" => 12000000000},
+                      mounts: {"test" => {"kind" => "json"}}}
+      set_user_from_auth :active
+      cr1 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Committed,
+                                                    environment: env1}))
+      if use_existing.nil?
+        # Testing with use_existing default value
+        cr2 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Uncommitted,
+                                                      environment: env2}))
+      else
+
+        cr2 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Uncommitted,
+                                                      environment: env2,
+                                                      use_existing: use_existing}))
+      end
+      assert_not_nil cr1.container_uuid
+      assert_nil cr2.container_uuid
+
+      # Update cr2 to commited state and check for container equality on different cases:
+      # * When env1 and env2 are equal and use_existing is true, the same container
+      #   should be assigned.
+      # * When use_existing is false, a different container should be assigned.
+      # * When env1 and env2 are different, a different container should be assigned.
+      cr2.update_attributes!({state: ContainerRequest::Committed})
+      assert_equal (cr2.use_existing == true and (env1 == env2)),
+                   (cr1.container_uuid == cr2.container_uuid)
+    end
+  end
+
+  test "requesting_container_uuid at create is not allowed" do
+    set_user_from_auth :active
+    assert_raises(ActiveRecord::RecordInvalid) do
+      create_minimal_req!(state: "Uncommitted", priority: 1, requesting_container_uuid: 'youcantdothat')
+    end
+  end
+
+  test "Retry on container cancelled" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 2)
+    cr2 = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 2, command: ["echo", "baz"])
+    prev_container_uuid = cr.container_uuid
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c
+    end
+
+    cr.reload
+    cr2.reload
+    assert_equal "Committed", cr.state
+    assert_equal prev_container_uuid, cr.container_uuid
+    assert_not_equal cr2.container_uuid, cr.container_uuid
+    prev_container_uuid = cr.container_uuid
+
+    act_as_system_user do
+      c.update_attributes!(state: Container::Cancelled)
+    end
+
+    cr.reload
+    cr2.reload
+    assert_equal "Committed", cr.state
+    assert_not_equal prev_container_uuid, cr.container_uuid
+    assert_not_equal cr2.container_uuid, cr.container_uuid
+    prev_container_uuid = cr.container_uuid
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Cancelled)
+      c
+    end
+
+    cr.reload
+    cr2.reload
+    assert_equal "Final", cr.state
+    assert_equal prev_container_uuid, cr.container_uuid
+    assert_not_equal cr2.container_uuid, cr.container_uuid
+  end
+
+  test "Retry on container cancelled with runtime_token" do
+    set_user_from_auth :spectator
+    spec = api_client_authorizations(:active)
+    cr = create_minimal_req!(priority: 1, state: "Committed",
+                             runtime_token: spec.token,
+                             container_count_max: 2)
+    prev_container_uuid = cr.container_uuid
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      assert_equal spec.token, c.runtime_token
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c
+    end
+
+    cr.reload
+    assert_equal "Committed", cr.state
+    assert_equal prev_container_uuid, cr.container_uuid
+    prev_container_uuid = cr.container_uuid
+
+    act_as_system_user do
+      c.update_attributes!(state: Container::Cancelled)
+    end
+
+    cr.reload
+    assert_equal "Committed", cr.state
+    assert_not_equal prev_container_uuid, cr.container_uuid
+    prev_container_uuid = cr.container_uuid
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      assert_equal spec.token, c.runtime_token
+      c.update_attributes!(state: Container::Cancelled)
+      c
+    end
+
+    cr.reload
+    assert_equal "Final", cr.state
+    assert_equal prev_container_uuid, cr.container_uuid
+  end
+
+
+  test "Retry saves logs from previous attempts" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 3)
+
+    c = act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c
+    end
+
+    container_uuids = []
+
+    [0, 1, 2].each do
+      cr.reload
+      assert_equal "Committed", cr.state
+      container_uuids << cr.container_uuid
+
+      c = act_as_system_user do
+        logc = Collection.new(manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n")
+        logc.save!
+        c = Container.find_by_uuid(cr.container_uuid)
+        c.update_attributes!(state: Container::Cancelled, log: logc.portable_data_hash)
+        c
+      end
+    end
+
+    container_uuids.sort!
+
+    cr.reload
+    assert_equal "Final", cr.state
+    assert_equal 3, cr.container_count
+    assert_equal ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{container_uuids[0]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{container_uuids[1]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{container_uuids[2]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+" , Collection.find_by_uuid(cr.log_uuid).manifest_text
+
+  end
+
+  test "Output collection name setting using output_name with name collision resolution" do
+    set_user_from_auth :active
+    output_name = 'unimaginative name'
+    Collection.create!(name: output_name)
+
+    cr = create_minimal_req!(priority: 1,
+                             state: ContainerRequest::Committed,
+                             output_name: output_name)
+    run_container(cr)
+    cr.reload
+    assert_equal ContainerRequest::Final, cr.state
+    output_coll = Collection.find_by_uuid(cr.output_uuid)
+    # Make sure the resulting output collection name include the original name
+    # plus the date
+    assert_not_equal output_name, output_coll.name,
+                     "more than one collection with the same owner and name"
+    assert output_coll.name.include?(output_name),
+           "New name should include original name"
+    assert_match /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z/, output_coll.name,
+                 "New name should include ISO8601 date"
+  end
+
+  [[0, :check_output_ttl_0],
+   [1, :check_output_ttl_1s],
+   [365*86400, :check_output_ttl_1y],
+  ].each do |ttl, checker|
+    test "output_ttl=#{ttl}" do
+      act_as_user users(:active) do
+        cr = create_minimal_req!(priority: 1,
+                                 state: ContainerRequest::Committed,
+                                 output_name: 'foo',
+                                 output_ttl: ttl)
+        run_container(cr)
+        cr.reload
+        output = Collection.find_by_uuid(cr.output_uuid)
+        send(checker, db_current_time, output.trash_at, output.delete_at)
+      end
+    end
+  end
+
+  def check_output_ttl_0(now, trash, delete)
+    assert_nil(trash)
+    assert_nil(delete)
+  end
+
+  def check_output_ttl_1s(now, trash, delete)
+    assert_not_nil(trash)
+    assert_not_nil(delete)
+    assert_in_delta(trash, now + 1.second, 10)
+    assert_in_delta(delete, now + Rails.configuration.blob_signature_ttl.second, 10)
+  end
+
+  def check_output_ttl_1y(now, trash, delete)
+    year = (86400*365).second
+    assert_not_nil(trash)
+    assert_not_nil(delete)
+    assert_in_delta(trash, now + year, 10)
+    assert_in_delta(delete, now + year, 10)
+  end
+
+  def run_container(cr)
+    act_as_system_user do
+      c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c.update_attributes!(state: Container::Complete,
+                           exit_code: 0,
+                           output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
+                           log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')
+      c
+    end
+  end
+
+  test "Finalize committed request when reusing a finished container" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 1, state: ContainerRequest::Committed)
+    cr.reload
+    assert_equal ContainerRequest::Committed, cr.state
+    run_container(cr)
+    cr.reload
+    assert_equal ContainerRequest::Final, cr.state
+
+    cr2 = create_minimal_req!(priority: 1, state: ContainerRequest::Committed)
+    assert_equal cr.container_uuid, cr2.container_uuid
+    assert_equal ContainerRequest::Final, cr2.state
+
+    cr3 = create_minimal_req!(priority: 1, state: ContainerRequest::Uncommitted)
+    assert_equal ContainerRequest::Uncommitted, cr3.state
+    cr3.update_attributes!(state: ContainerRequest::Committed)
+    assert_equal cr.container_uuid, cr3.container_uuid
+    assert_equal ContainerRequest::Final, cr3.state
+  end
+
+  [
+    [false, ActiveRecord::RecordInvalid],
+    [true, nil],
+  ].each do |preemptible_conf, expected|
+    test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
+      sp = {"preemptible" => true}
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      scheduling_parameters: sp,
+                      mounts: {"test" => {"kind" => "json"}}}
+      Rails.configuration.preemptible_instances = preemptible_conf
+      set_user_from_auth :active
+
+      cr = create_minimal_req!(common_attrs)
+      cr.state = ContainerRequest::Committed
+
+      if !expected.nil?
+        assert_raises(expected) do
+          cr.save!
+        end
+      else
+        cr.save!
+        assert_equal sp, cr.scheduling_parameters
+      end
+    end
+  end
+
+  [
+    'zzzzz-dz642-runningcontainr',
+    nil,
+  ].each do |requesting_c|
+    test "having preemptible instances active on the API server, a committed #{requesting_c.nil? ? 'non-':''}child CR should not ask for preemptible instance if parameter already set to false" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      scheduling_parameters: {"preemptible" => false},
+                      mounts: {"test" => {"kind" => "json"}}}
+
+      Rails.configuration.preemptible_instances = true
+      set_user_from_auth :active
+
+      if requesting_c
+        cr = with_container_auth(Container.find_by_uuid requesting_c) do
+          create_minimal_req!(common_attrs)
+        end
+        assert_not_nil cr.requesting_container_uuid
+      else
+        cr = create_minimal_req!(common_attrs)
+      end
+
+      cr.state = ContainerRequest::Committed
+      cr.save!
+
+      assert_equal false, cr.scheduling_parameters['preemptible']
+    end
+  end
+
+  [
+    [true, 'zzzzz-dz642-runningcontainr', true],
+    [true, nil, nil],
+    [false, 'zzzzz-dz642-runningcontainr', nil],
+    [false, nil, nil],
+  ].each do |preemptible_conf, requesting_c, schedule_preemptible|
+    test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      mounts: {"test" => {"kind" => "json"}}}
+
+      Rails.configuration.preemptible_instances = preemptible_conf
+      set_user_from_auth :active
+
+      if requesting_c
+        cr = with_container_auth(Container.find_by_uuid requesting_c) do
+          create_minimal_req!(common_attrs)
+        end
+        assert_not_nil cr.requesting_container_uuid
+      else
+        cr = create_minimal_req!(common_attrs)
+      end
+
+      cr.state = ContainerRequest::Committed
+      cr.save!
+
+      assert_equal schedule_preemptible, cr.scheduling_parameters['preemptible']
+    end
+  end
+
+  [
+    [{"partitions" => ["fastcpu","vfastcpu", 100]}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"partitions" => ["fastcpu","vfastcpu", 100]}, ContainerRequest::Uncommitted],
+    [{"partitions" => "fastcpu"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"partitions" => "fastcpu"}, ContainerRequest::Uncommitted],
+    [{"partitions" => ["fastcpu","vfastcpu"]}, ContainerRequest::Committed],
+    [{"max_run_time" => "one day"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"max_run_time" => "one day"}, ContainerRequest::Uncommitted],
+    [{"max_run_time" => -1}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"max_run_time" => -1}, ContainerRequest::Uncommitted],
+    [{"max_run_time" => 86400}, ContainerRequest::Committed],
+  ].each do |sp, state, expected|
+    test "create container request with scheduling_parameters #{sp} in state #{state} and verify #{expected}" do
+      common_attrs = {cwd: "test",
+                      priority: 1,
+                      command: ["echo", "hello"],
+                      output_path: "test",
+                      scheduling_parameters: sp,
+                      mounts: {"test" => {"kind" => "json"}}}
+      set_user_from_auth :active
+
+      if expected == ActiveRecord::RecordInvalid
+        assert_raises(ActiveRecord::RecordInvalid) do
+          create_minimal_req!(common_attrs.merge({state: state}))
+        end
+      else
+        cr = create_minimal_req!(common_attrs.merge({state: state}))
+        assert_equal sp, cr.scheduling_parameters
+
+        if state == ContainerRequest::Committed
+          c = Container.find_by_uuid(cr.container_uuid)
+          assert_equal sp, c.scheduling_parameters
+        end
+      end
+    end
+  end
+
+  test "Having preemptible_instances=true create a committed child container request and verify the scheduling parameter of its container" do
+    common_attrs = {cwd: "test",
+                    priority: 1,
+                    command: ["echo", "hello"],
+                    output_path: "test",
+                    state: ContainerRequest::Committed,
+                    mounts: {"test" => {"kind" => "json"}}}
+    set_user_from_auth :active
+    Rails.configuration.preemptible_instances = true
+
+    cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do
+      create_minimal_req!(common_attrs)
+    end
+    assert_equal 'zzzzz-dz642-runningcontainr', cr.requesting_container_uuid
+    assert_equal true, cr.scheduling_parameters["preemptible"]
+
+    c = Container.find_by_uuid(cr.container_uuid)
+    assert_equal true, c.scheduling_parameters["preemptible"]
+  end
+
+  [['Committed', true, {name: "foobar", priority: 123}],
+   ['Committed', false, {container_count: 2}],
+   ['Committed', false, {container_count: 0}],
+   ['Committed', false, {container_count: nil}],
+   ['Final', false, {state: ContainerRequest::Committed, name: "foobar"}],
+   ['Final', false, {name: "foobar", priority: 123}],
+   ['Final', false, {name: "foobar", output_uuid: "zzzzz-4zz18-znfnqtbbv4spc3w"}],
+   ['Final', false, {name: "foobar", log_uuid: "zzzzz-4zz18-znfnqtbbv4spc3w"}],
+   ['Final', false, {log_uuid: "zzzzz-4zz18-znfnqtbbv4spc3w"}],
+   ['Final', false, {priority: 123}],
+   ['Final', false, {mounts: {}}],
+   ['Final', false, {container_count: 2}],
+   ['Final', true, {name: "foobar"}],
+   ['Final', true, {name: "foobar", description: "baz"}],
+  ].each do |state, permitted, updates|
+    test "state=#{state} can#{'not' if !permitted} update #{updates.inspect}" do
+      act_as_user users(:active) do
+        cr = create_minimal_req!(priority: 1,
+                                 state: "Committed",
+                                 container_count_max: 1)
+        case state
+        when 'Committed'
+          # already done
+        when 'Final'
+          act_as_system_user do
+            Container.find_by_uuid(cr.container_uuid).
+              update_attributes!(state: Container::Cancelled)
+          end
+          cr.reload
+        else
+          raise 'broken test case'
+        end
+        assert_equal state, cr.state
+        if permitted
+          assert cr.update_attributes!(updates)
+        else
+          assert_raises(ActiveRecord::RecordInvalid) do
+            cr.update_attributes!(updates)
+          end
+        end
+      end
+    end
+  end
+
+  test "delete container_request and check its container's priority" do
+    act_as_user users(:active) do
+      cr = ContainerRequest.find_by_uuid container_requests(:running_to_be_deleted).uuid
+
+      # initially the cr's container has priority > 0
+      c = Container.find_by_uuid(cr.container_uuid)
+      assert_equal 1, c.priority
+
+      cr.destroy
+
+      # the cr's container now has priority of 0
+      c = Container.find_by_uuid(cr.container_uuid)
+      assert_equal 0, c.priority
+    end
+  end
+
+  test "delete container_request in final state and expect no error due to before_destroy callback" do
+    act_as_user users(:active) do
+      cr = ContainerRequest.find_by_uuid container_requests(:completed).uuid
+      assert_nothing_raised {cr.destroy}
+    end
+  end
+
+  test "Container request valid priority" do
+    set_user_from_auth :active
+    cr = create_minimal_req!
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      cr.priority = -1
+      cr.save!
+    end
+
+    cr.priority = 0
+    cr.save!
+
+    cr.priority = 1
+    cr.save!
+
+    cr.priority = 500
+    cr.save!
+
+    cr.priority = 999
+    cr.save!
+
+    cr.priority = 1000
+    cr.save!
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      cr.priority = 1001
+      cr.save!
+    end
+  end
+
+  # Note: some of these tests might look redundant because they test
+  # that out-of-order spellings of hashes are still considered equal
+  # regardless of whether the existing (container) or new (container
+  # request) hash needs to be re-ordered.
+  secrets = {"/foo" => {"kind" => "text", "content" => "xyzzy"}}
+  same_secrets = {"/foo" => {"content" => "xyzzy", "kind" => "text"}}
+  different_secrets = {"/foo" => {"kind" => "text", "content" => "something completely different"}}
+  [
+    [true, nil, nil],
+    [true, nil, {}],
+    [true, {}, nil],
+    [true, {}, {}],
+    [true, secrets, same_secrets],
+    [true, same_secrets, secrets],
+    [false, nil, secrets],
+    [false, {}, secrets],
+    [false, secrets, {}],
+    [false, secrets, nil],
+    [false, secrets, different_secrets],
+  ].each do |expect_reuse, sm1, sm2|
+    test "container reuse secret_mounts #{sm1.inspect}, #{sm2.inspect}" do
+      set_user_from_auth :active
+      cr1 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm1)
+      cr2 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm2)
+      assert_not_nil cr1.container_uuid
+      assert_not_nil cr2.container_uuid
+      if expect_reuse
+        assert_equal cr1.container_uuid, cr2.container_uuid
+      else
+        assert_not_equal cr1.container_uuid, cr2.container_uuid
+      end
+    end
+  end
+
+  test "scrub secret_mounts but reuse container for request with identical secret_mounts" do
+    set_user_from_auth :active
+    sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
+    cr1 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm.dup)
+    run_container(cr1)
+    cr1.reload
+
+    # secret_mounts scrubbed from db
+    c = Container.where(uuid: cr1.container_uuid).first
+    assert_equal({}, c.secret_mounts)
+    assert_equal({}, cr1.secret_mounts)
+
+    # can reuse container if secret_mounts match
+    cr2 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm.dup)
+    assert_equal cr1.container_uuid, cr2.container_uuid
+
+    # don't reuse container if secret_mounts don't match
+    cr3 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: {})
+    assert_not_equal cr1.container_uuid, cr3.container_uuid
+
+    assert_no_secrets_logged
+  end
+
+  test "conflicting key in mounts and secret_mounts" do
+    sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
+    set_user_from_auth :active
+    cr = create_minimal_req!
+    assert_equal false, cr.update_attributes(state: "Committed",
+                                             priority: 1,
+                                             mounts: cr.mounts.merge(sm),
+                                             secret_mounts: sm)
+    assert_equal [:secret_mounts], cr.errors.messages.keys
+  end
+
+  test "using runtime_token" do
+    set_user_from_auth :spectator
+    spec = api_client_authorizations(:active)
+    cr = create_minimal_req!(state: "Committed", runtime_token: spec.token, priority: 1)
+    cr.save!
+    c = Container.find_by_uuid cr.container_uuid
+    lock_and_run c
+    assert_nil c.auth_uuid
+    assert_equal c.runtime_token, spec.token
+
+    assert_not_nil ApiClientAuthorization.find_by_uuid(spec.uuid)
+
+    act_as_system_user do
+      c.update_attributes!(state: Container::Complete,
+                           exit_code: 0,
+                           output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
+                           log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')
+    end
+
+    cr.reload
+    c.reload
+    assert_nil cr.runtime_token
+    assert_nil c.runtime_token
+  end
+
+  test "invalid runtime_token" do
+    set_user_from_auth :active
+    spec = api_client_authorizations(:spectator)
+    assert_raises(ArgumentError) do
+      cr = create_minimal_req!(state: "Committed", runtime_token: "#{spec.token}xx")
+      cr.save!
+    end
+  end
+end
diff --git a/services/api/test/unit/container_test.rb b/services/api/test/unit/container_test.rb
new file mode 100644 (file)
index 0000000..1a53df7
--- /dev/null
@@ -0,0 +1,959 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/container_test_helper'
+
+class ContainerTest < ActiveSupport::TestCase
+  include DbCurrentTime
+  include ContainerTestHelper
+
+  DEFAULT_ATTRS = {
+    command: ['echo', 'foo'],
+    container_image: 'fa3c1a9cb6783f85f2ecda037e07b8c3+167',
+    output_path: '/tmp',
+    priority: 1,
+    runtime_constraints: {"vcpus" => 1, "ram" => 1},
+  }
+
+  REUSABLE_COMMON_ATTRS = {
+    container_image: "9ae44d5792468c58bcf85ce7353c7027+124",
+    cwd: "test",
+    command: ["echo", "hello"],
+    output_path: "test",
+    runtime_constraints: {
+      "ram" => 12000000000,
+      "vcpus" => 4,
+    },
+    mounts: {
+      "test" => {"kind" => "json"},
+    },
+    environment: {
+      "var" => "val",
+    },
+    secret_mounts: {},
+    runtime_user_uuid: "zzzzz-tpzed-xurymjxw79nv3jz",
+    runtime_auth_scopes: ["all"]
+  }
+
+  def request_only attrs
+    attrs.reject {|k| [:runtime_user_uuid, :runtime_auth_scopes].include? k}
+  end
+
+  def minimal_new attrs={}
+    cr = ContainerRequest.new request_only(DEFAULT_ATTRS.merge(attrs))
+    cr.state = ContainerRequest::Committed
+    cr.save!
+    c = Container.find_by_uuid cr.container_uuid
+    assert_not_nil c
+    return c, cr
+  end
+
+  def check_illegal_updates c, bad_updates
+    bad_updates.each do |u|
+      refute c.update_attributes(u), u.inspect
+      refute c.valid?, u.inspect
+      c.reload
+    end
+  end
+
+  def check_illegal_modify c
+    check_illegal_updates c, [{command: ["echo", "bar"]},
+                              {container_image: "arvados/apitestfixture:june10"},
+                              {cwd: "/tmp2"},
+                              {environment: {"FOO" => "BAR"}},
+                              {mounts: {"FOO" => "BAR"}},
+                              {output_path: "/tmp3"},
+                              {locked_by_uuid: "zzzzz-gj3su-027z32aux8dg2s1"},
+                              {auth_uuid: "zzzzz-gj3su-017z32aux8dg2s1"},
+                              {runtime_constraints: {"FOO" => "BAR"}}]
+  end
+
+  def check_bogus_states c
+    check_illegal_updates c, [{state: nil},
+                              {state: "Flubber"}]
+  end
+
+  def check_no_change_from_cancelled c
+    check_illegal_modify c
+    check_bogus_states c
+    check_illegal_updates c, [{ priority: 3 },
+                              { state: Container::Queued },
+                              { state: Container::Locked },
+                              { state: Container::Running },
+                              { state: Container::Complete }]
+  end
+
+  test "Container create" do
+    act_as_system_user do
+      c, _ = minimal_new(environment: {},
+                      mounts: {"BAR" => {"kind" => "FOO"}},
+                      output_path: "/tmp",
+                      priority: 1,
+                      runtime_constraints: {"vcpus" => 1, "ram" => 1})
+
+      check_illegal_modify c
+      check_bogus_states c
+
+      c.reload
+      c.priority = 2
+      c.save!
+    end
+  end
+
+  test "Container valid priority" do
+    act_as_system_user do
+      c, _ = minimal_new(environment: {},
+                      mounts: {"BAR" => {"kind" => "FOO"}},
+                      output_path: "/tmp",
+                      priority: 1,
+                      runtime_constraints: {"vcpus" => 1, "ram" => 1})
+
+      assert_raises(ActiveRecord::RecordInvalid) do
+        c.priority = -1
+        c.save!
+      end
+
+      c.priority = 0
+      c.save!
+
+      c.priority = 1
+      c.save!
+
+      c.priority = 500
+      c.save!
+
+      c.priority = 999
+      c.save!
+
+      c.priority = 1000
+      c.save!
+
+      c.priority = 1000 << 50
+      c.save!
+    end
+  end
+
+  test "Container runtime_status data types" do
+    set_user_from_auth :active
+    attrs = {
+      environment: {},
+      mounts: {"BAR" => {"kind" => "FOO"}},
+      output_path: "/tmp",
+      priority: 1,
+      runtime_constraints: {"vcpus" => 1, "ram" => 1}
+    }
+    c, _ = minimal_new(attrs)
+    assert_equal c.runtime_status, {}
+    assert_equal Container::Queued, c.state
+
+    set_user_from_auth :dispatch1
+    c.update_attributes! state: Container::Locked
+    c.update_attributes! state: Container::Running
+
+    [
+      'error', 'errorDetail', 'warning', 'warningDetail', 'activity'
+    ].each do |k|
+      # String type is allowed
+      string_val = 'A string is accepted'
+      c.update_attributes! runtime_status: {k => string_val}
+      assert_equal string_val, c.runtime_status[k]
+
+      # Other types aren't allowed
+      [
+        42, false, [], {}, nil
+      ].each do |unallowed_val|
+        assert_raises ActiveRecord::RecordInvalid do
+          c.update_attributes! runtime_status: {k => unallowed_val}
+        end
+      end
+    end
+  end
+
+  test "Container runtime_status updates" do
+    set_user_from_auth :active
+    attrs = {
+      environment: {},
+      mounts: {"BAR" => {"kind" => "FOO"}},
+      output_path: "/tmp",
+      priority: 1,
+      runtime_constraints: {"vcpus" => 1, "ram" => 1}
+    }
+    c1, _ = minimal_new(attrs)
+    assert_equal c1.runtime_status, {}
+
+    assert_equal Container::Queued, c1.state
+    assert_raises ActiveRecord::RecordInvalid do
+      c1.update_attributes! runtime_status: {'error' => 'Oops!'}
+    end
+
+    set_user_from_auth :dispatch1
+
+    # Allow updates when state = Locked
+    c1.update_attributes! state: Container::Locked
+    c1.update_attributes! runtime_status: {'error' => 'Oops!'}
+    assert c1.runtime_status.key? 'error'
+
+    # Reset when transitioning from Locked to Queued
+    c1.update_attributes! state: Container::Queued
+    assert_equal c1.runtime_status, {}
+
+    # Allow updates when state = Running
+    c1.update_attributes! state: Container::Locked
+    c1.update_attributes! state: Container::Running
+    c1.update_attributes! runtime_status: {'error' => 'Oops!'}
+    assert c1.runtime_status.key? 'error'
+
+    # Don't allow updates on other states
+    c1.update_attributes! state: Container::Complete
+    assert_raises ActiveRecord::RecordInvalid do
+      c1.update_attributes! runtime_status: {'error' => 'Some other error'}
+    end
+
+    set_user_from_auth :active
+    c2, _ = minimal_new(attrs)
+    assert_equal c2.runtime_status, {}
+    set_user_from_auth :dispatch1
+    c2.update_attributes! state: Container::Locked
+    c2.update_attributes! state: Container::Running
+    c2.update_attributes! state: Container::Cancelled
+    assert_raises ActiveRecord::RecordInvalid do
+      c2.update_attributes! runtime_status: {'error' => 'Oops!'}
+    end
+  end
+
+  test "Container serialized hash attributes sorted before save" do
+    set_user_from_auth :active
+    env = {"C" => "3", "B" => "2", "A" => "1"}
+    m = {"F" => {"kind" => "3"}, "E" => {"kind" => "2"}, "D" => {"kind" => "1"}}
+    rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1}
+    c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
+    assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
+    assert_equal c.mounts.to_json, Container.deep_sort_hash(m).to_json
+    assert_equal c.runtime_constraints.to_json, Container.deep_sort_hash(rc).to_json
+  end
+
+  test 'deep_sort_hash on array of hashes' do
+    a = {'z' => [[{'a' => 'a', 'b' => 'b'}]]}
+    b = {'z' => [[{'b' => 'b', 'a' => 'a'}]]}
+    assert_equal Container.deep_sort_hash(a).to_json, Container.deep_sort_hash(b).to_json
+  end
+
+  test "find_reusable method should select higher priority queued container" do
+        Rails.configuration.log_reuse_decisions = true
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}})
+    c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1}))
+    c_high_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:2}))
+    assert_not_equal c_low_priority.uuid, c_high_priority.uuid
+    assert_equal Container::Queued, c_low_priority.state
+    assert_equal Container::Queued, c_high_priority.state
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_high_priority.uuid
+  end
+
+  test "find_reusable method should select latest completed container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}})
+    completed_attrs = {
+      state: Container::Complete,
+      exit_code: 0,
+      log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+      output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+    }
+
+    c_older, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_recent, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    assert_not_equal c_older.uuid, c_recent.uuid
+
+    set_user_from_auth :dispatch1
+    c_older.update_attributes!({state: Container::Locked})
+    c_older.update_attributes!({state: Container::Running})
+    c_older.update_attributes!(completed_attrs)
+
+    c_recent.update_attributes!({state: Container::Locked})
+    c_recent.update_attributes!({state: Container::Running})
+    c_recent.update_attributes!(completed_attrs)
+
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_older.uuid
+  end
+
+  test "find_reusable method should select oldest completed container when inconsistent outputs exist" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}, priority: 1})
+    completed_attrs = {
+      state: Container::Complete,
+      exit_code: 0,
+      log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+    }
+
+    cr = ContainerRequest.new request_only(common_attrs)
+    cr.use_existing = false
+    cr.state = ContainerRequest::Committed
+    cr.save!
+    c_output1 = Container.where(uuid: cr.container_uuid).first
+
+    cr = ContainerRequest.new request_only(common_attrs)
+    cr.use_existing = false
+    cr.state = ContainerRequest::Committed
+    cr.save!
+    c_output2 = Container.where(uuid: cr.container_uuid).first
+
+    assert_not_equal c_output1.uuid, c_output2.uuid
+
+    set_user_from_auth :dispatch1
+
+    out1 = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+    log1 = collections(:real_log_collection).portable_data_hash
+    c_output1.update_attributes!({state: Container::Locked})
+    c_output1.update_attributes!({state: Container::Running})
+    c_output1.update_attributes!(completed_attrs.merge({log: log1, output: out1}))
+
+    out2 = 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+    c_output2.update_attributes!({state: Container::Locked})
+    c_output2.update_attributes!({state: Container::Running})
+    c_output2.update_attributes!(completed_attrs.merge({log: log1, output: out2}))
+
+    set_user_from_auth :active
+    reused = Container.resolve(ContainerRequest.new(request_only(common_attrs)))
+    assert_equal c_output1.uuid, reused.uuid
+  end
+
+  test "find_reusable method should select running container by start date" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running"}})
+    c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    # Confirm the 3 container UUIDs are different.
+    assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
+    set_user_from_auth :dispatch1
+    c_slower.update_attributes!({state: Container::Locked})
+    c_slower.update_attributes!({state: Container::Running,
+                                 progress: 0.1})
+    c_faster_started_first.update_attributes!({state: Container::Locked})
+    c_faster_started_first.update_attributes!({state: Container::Running,
+                                               progress: 0.15})
+    c_faster_started_second.update_attributes!({state: Container::Locked})
+    c_faster_started_second.update_attributes!({state: Container::Running,
+                                                progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    # Selected container is the one that started first
+    assert_equal reused.uuid, c_faster_started_first.uuid
+  end
+
+  test "find_reusable method should select running container by progress" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running2"}})
+    c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    # Confirm the 3 container UUIDs are different.
+    assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
+    set_user_from_auth :dispatch1
+    c_slower.update_attributes!({state: Container::Locked})
+    c_slower.update_attributes!({state: Container::Running,
+                                 progress: 0.1})
+    c_faster_started_first.update_attributes!({state: Container::Locked})
+    c_faster_started_first.update_attributes!({state: Container::Running,
+                                               progress: 0.15})
+    c_faster_started_second.update_attributes!({state: Container::Locked})
+    c_faster_started_second.update_attributes!({state: Container::Running,
+                                                progress: 0.2})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    # Selected container is the one with most progress done
+    assert_equal reused.uuid, c_faster_started_second.uuid
+  end
+
+  test "find_reusable method should select non-failing running container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running2"}})
+    c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    # Confirm the 3 container UUIDs are different.
+    assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
+    set_user_from_auth :dispatch1
+    c_slower.update_attributes!({state: Container::Locked})
+    c_slower.update_attributes!({state: Container::Running,
+                                 progress: 0.1})
+    c_faster_started_first.update_attributes!({state: Container::Locked})
+    c_faster_started_first.update_attributes!({state: Container::Running,
+                                               runtime_status: {'warning' => 'This is not an error'},
+                                               progress: 0.15})
+    c_faster_started_second.update_attributes!({state: Container::Locked})
+    c_faster_started_second.update_attributes!({state: Container::Running,
+                                                runtime_status: {'error' => 'Something bad happened'},
+                                                progress: 0.2})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    # Selected the non-failing container even if it's the one with less progress done
+    assert_equal reused.uuid, c_faster_started_first.uuid
+  end
+
+  test "find_reusable method should select locked container most likely to start sooner" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "locked"}})
+    c_low_priority, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_high_priority_older, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_high_priority_newer, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    # Confirm the 3 container UUIDs are different.
+    assert_equal 3, [c_low_priority.uuid, c_high_priority_older.uuid, c_high_priority_newer.uuid].uniq.length
+    set_user_from_auth :dispatch1
+    c_low_priority.update_attributes!({state: Container::Locked,
+                                       priority: 1})
+    c_high_priority_older.update_attributes!({state: Container::Locked,
+                                              priority: 2})
+    c_high_priority_newer.update_attributes!({state: Container::Locked,
+                                              priority: 2})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_high_priority_older.uuid
+  end
+
+  test "find_reusable method should select running over failed container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed_vs_running"}})
+    c_failed, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    assert_not_equal c_failed.uuid, c_running.uuid
+    set_user_from_auth :dispatch1
+    c_failed.update_attributes!({state: Container::Locked})
+    c_failed.update_attributes!({state: Container::Running})
+    c_failed.update_attributes!({state: Container::Complete,
+                                 exit_code: 42,
+                                 log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+                                 output: 'ea10d51bcf88862dbcc36eb292017dfd+45'})
+    c_running.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Running,
+                                  progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_running.uuid
+  end
+
+  test "find_reusable method should select complete over running container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "completed_vs_running"}})
+    c_completed, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    assert_not_equal c_completed.uuid, c_running.uuid
+    set_user_from_auth :dispatch1
+    c_completed.update_attributes!({state: Container::Locked})
+    c_completed.update_attributes!({state: Container::Running})
+    c_completed.update_attributes!({state: Container::Complete,
+                                    exit_code: 0,
+                                    log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
+                                    output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'})
+    c_running.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Running,
+                                  progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal c_completed.uuid, reused.uuid
+  end
+
+  test "find_reusable method should select running over locked container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running_vs_locked"}})
+    c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    assert_not_equal c_running.uuid, c_locked.uuid
+    set_user_from_auth :dispatch1
+    c_locked.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Locked})
+    c_running.update_attributes!({state: Container::Running,
+                                  progress: 0.15})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_running.uuid
+  end
+
+  test "find_reusable method should select locked over queued container" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running_vs_locked"}})
+    c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    c_queued, _ = minimal_new(common_attrs.merge({use_existing: false}))
+    assert_not_equal c_queued.uuid, c_locked.uuid
+    set_user_from_auth :dispatch1
+    c_locked.update_attributes!({state: Container::Locked})
+    reused = Container.find_reusable(common_attrs)
+    assert_not_nil reused
+    assert_equal reused.uuid, c_locked.uuid
+  end
+
+  test "find_reusable method should not select failed container" do
+    set_user_from_auth :active
+    attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed"}})
+    c, _ = minimal_new(attrs)
+    set_user_from_auth :dispatch1
+    c.update_attributes!({state: Container::Locked})
+    c.update_attributes!({state: Container::Running})
+    c.update_attributes!({state: Container::Complete,
+                          exit_code: 33})
+    reused = Container.find_reusable(attrs)
+    assert_nil reused
+  end
+
+  test "find_reusable with logging disabled" do
+    set_user_from_auth :active
+    Rails.logger.expects(:info).never
+    Container.find_reusable(REUSABLE_COMMON_ATTRS)
+  end
+
+  test "find_reusable with logging enabled" do
+    set_user_from_auth :active
+    Rails.configuration.log_reuse_decisions = true
+    Rails.logger.expects(:info).at_least(3)
+    Container.find_reusable(REUSABLE_COMMON_ATTRS)
+  end
+
+  def runtime_token_attr tok
+    auth = api_client_authorizations(tok)
+    {runtime_user_uuid: User.find_by_id(auth.user_id).uuid,
+     runtime_auth_scopes: auth.scopes,
+     runtime_token: auth.token}
+  end
+
+  test "find_reusable method with same runtime_token" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"}})
+    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:container_runtime_token).token}))
+    assert_equal Container::Queued, c1.state
+    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
+    assert_not_nil reused
+    assert_equal reused.uuid, c1.uuid
+  end
+
+  test "find_reusable method with different runtime_token, same user" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"}})
+    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:crt_user).token}))
+    assert_equal Container::Queued, c1.state
+    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
+    assert_not_nil reused
+    assert_equal reused.uuid, c1.uuid
+  end
+
+  test "find_reusable method with nil runtime_token, then runtime_token with same user" do
+    set_user_from_auth :crt_user
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"}})
+    c1, _ = minimal_new(common_attrs)
+    assert_equal Container::Queued, c1.state
+    assert_equal users(:container_runtime_token_user).uuid, c1.runtime_user_uuid
+    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
+    assert_not_nil reused
+    assert_equal reused.uuid, c1.uuid
+  end
+
+  test "find_reusable method with different runtime_token, different user" do
+    set_user_from_auth :crt_user
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"}})
+    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:active).token}))
+    assert_equal Container::Queued, c1.state
+    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
+    # See #14584
+    assert_equal c1.uuid, reused.uuid
+  end
+
+  test "find_reusable method with nil runtime_token, then runtime_token with different user" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"}})
+    c1, _ = minimal_new(common_attrs.merge({runtime_token: nil}))
+    assert_equal Container::Queued, c1.state
+    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
+    # See #14584
+    assert_equal c1.uuid, reused.uuid
+  end
+
+  test "find_reusable method with different runtime_token, different scope, same user" do
+    set_user_from_auth :active
+    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{"var" => "queued"}})
+    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:runtime_token_limited_scope).token}))
+    assert_equal Container::Queued, c1.state
+    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))
+    # See #14584
+    assert_equal c1.uuid, reused.uuid
+  end
+
+  test "Container running" do
+    set_user_from_auth :active
+    c, _ = minimal_new priority: 1
+
+    set_user_from_auth :dispatch1
+    check_illegal_updates c, [{state: Container::Running},
+                              {state: Container::Complete}]
+
+    c.lock
+    c.update_attributes! state: Container::Running
+
+    check_illegal_modify c
+    check_bogus_states c
+
+    check_illegal_updates c, [{state: Container::Queued}]
+    c.reload
+
+    c.update_attributes! priority: 3
+  end
+
+  test "Lock and unlock" do
+    set_user_from_auth :active
+    c, cr = minimal_new priority: 0
+
+    set_user_from_auth :dispatch1
+    assert_equal Container::Queued, c.state
+
+    assert_raise(ArvadosModel::LockFailedError) do
+      # "no priority"
+      c.lock
+    end
+    c.reload
+    assert cr.update_attributes priority: 1
+
+    refute c.update_attributes(state: Container::Running), "not locked"
+    c.reload
+    refute c.update_attributes(state: Container::Complete), "not locked"
+    c.reload
+
+    assert c.lock, show_errors(c)
+    assert c.locked_by_uuid
+    assert c.auth_uuid
+
+    assert_raise(ArvadosModel::LockFailedError) {c.lock}
+    c.reload
+
+    assert c.unlock, show_errors(c)
+    refute c.locked_by_uuid
+    refute c.auth_uuid
+
+    refute c.update_attributes(state: Container::Running), "not locked"
+    c.reload
+    refute c.locked_by_uuid
+    refute c.auth_uuid
+
+    assert c.lock, show_errors(c)
+    assert c.update_attributes(state: Container::Running), show_errors(c)
+    assert c.locked_by_uuid
+    assert c.auth_uuid
+
+    auth_uuid_was = c.auth_uuid
+
+    assert_raise(ArvadosModel::LockFailedError) do
+      # Running to Locked is not allowed
+      c.lock
+    end
+    c.reload
+    assert_raise(ArvadosModel::InvalidStateTransitionError) do
+      # Running to Queued is not allowed
+      c.unlock
+    end
+    c.reload
+
+    assert c.update_attributes(state: Container::Complete), show_errors(c)
+    refute c.locked_by_uuid
+    refute c.auth_uuid
+
+    auth_exp = ApiClientAuthorization.find_by_uuid(auth_uuid_was).expires_at
+    assert_operator auth_exp, :<, db_current_time
+  end
+
+  test "Exceed maximum lock-unlock cycles" do
+    Rails.configuration.max_container_dispatch_attempts = 3
+
+    set_user_from_auth :active
+    c, cr = minimal_new
+
+    set_user_from_auth :dispatch1
+    assert_equal Container::Queued, c.state
+    assert_equal 0, c.lock_count
+
+    c.lock
+    c.reload
+    assert_equal 1, c.lock_count
+    assert_equal Container::Locked, c.state
+
+    c.unlock
+    c.reload
+    assert_equal 1, c.lock_count
+    assert_equal Container::Queued, c.state
+
+    c.lock
+    c.reload
+    assert_equal 2, c.lock_count
+    assert_equal Container::Locked, c.state
+
+    c.unlock
+    c.reload
+    assert_equal 2, c.lock_count
+    assert_equal Container::Queued, c.state
+
+    c.lock
+    c.reload
+    assert_equal 3, c.lock_count
+    assert_equal Container::Locked, c.state
+
+    c.unlock
+    c.reload
+    assert_equal 3, c.lock_count
+    assert_equal Container::Cancelled, c.state
+
+    assert_raise(ArvadosModel::LockFailedError) do
+      # Cancelled to Locked is not allowed
+      c.lock
+    end
+  end
+
+  test "Container queued cancel" do
+    set_user_from_auth :active
+    c, cr = minimal_new({container_count_max: 1})
+    set_user_from_auth :dispatch1
+    assert c.update_attributes(state: Container::Cancelled), show_errors(c)
+    check_no_change_from_cancelled c
+    cr.reload
+    assert_equal ContainerRequest::Final, cr.state
+  end
+
+  test "Container queued count" do
+    assert_equal 1, Container.readable_by(users(:active)).where(state: "Queued").count
+  end
+
+  test "Containers with no matching request are readable by admin" do
+    uuids = Container.includes('container_requests').where(container_requests: {uuid: nil}).collect(&:uuid)
+    assert_not_empty uuids
+    assert_empty Container.readable_by(users(:active)).where(uuid: uuids)
+    assert_not_empty Container.readable_by(users(:admin)).where(uuid: uuids)
+    assert_equal uuids.count, Container.readable_by(users(:admin)).where(uuid: uuids).count
+  end
+
+  test "Container locked cancel" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    assert c.lock, show_errors(c)
+    assert c.update_attributes(state: Container::Cancelled), show_errors(c)
+    check_no_change_from_cancelled c
+  end
+
+  test "Container locked cancel with log" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    assert c.lock, show_errors(c)
+    assert c.update_attributes(
+             state: Container::Cancelled,
+             log: collections(:real_log_collection).portable_data_hash,
+           ), show_errors(c)
+    check_no_change_from_cancelled c
+  end
+
+  test "Container running cancel" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    c.lock
+    c.update_attributes! state: Container::Running
+    c.update_attributes! state: Container::Cancelled
+    check_no_change_from_cancelled c
+  end
+
+  test "Container create forbidden for non-admin" do
+    set_user_from_auth :active_trustedclient
+    c = Container.new DEFAULT_ATTRS
+    c.environment = {}
+    c.mounts = {"BAR" => "FOO"}
+    c.output_path = "/tmp"
+    c.priority = 1
+    c.runtime_constraints = {}
+    assert_raises(ArvadosModel::PermissionDeniedError) do
+      c.save!
+    end
+  end
+
+  test "Container only set exit code on complete" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    c.lock
+    c.update_attributes! state: Container::Running
+
+    check_illegal_updates c, [{exit_code: 1},
+                              {exit_code: 1, state: Container::Cancelled}]
+
+    assert c.update_attributes(exit_code: 1, state: Container::Complete)
+  end
+
+  test "locked_by_uuid can update log when locked/running, and output when running" do
+    set_user_from_auth :active
+    logcoll = collections(:real_log_collection)
+    c, cr1 = minimal_new
+    cr2 = ContainerRequest.new(DEFAULT_ATTRS)
+    cr2.state = ContainerRequest::Committed
+    act_as_user users(:active) do
+      cr2.save!
+    end
+    assert_equal cr1.container_uuid, cr2.container_uuid
+
+    logpdh_time1 = logcoll.portable_data_hash
+
+    set_user_from_auth :dispatch1
+    c.lock
+    assert_equal c.locked_by_uuid, Thread.current[:api_client_authorization].uuid
+    c.update_attributes!(log: logpdh_time1)
+    c.update_attributes!(state: Container::Running)
+    cr1.reload
+    cr2.reload
+    cr1log_uuid = cr1.log_uuid
+    cr2log_uuid = cr2.log_uuid
+    assert_not_nil cr1log_uuid
+    assert_not_nil cr2log_uuid
+    assert_not_equal logcoll.uuid, cr1log_uuid
+    assert_not_equal logcoll.uuid, cr2log_uuid
+    assert_not_equal cr1log_uuid, cr2log_uuid
+
+    logcoll.update_attributes!(manifest_text: logcoll.manifest_text + ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n")
+    logpdh_time2 = logcoll.portable_data_hash
+
+    assert c.update_attributes(output: collections(:collection_owned_by_active).portable_data_hash)
+    assert c.update_attributes(log: logpdh_time2)
+    assert c.update_attributes(state: Container::Complete, log: logcoll.portable_data_hash)
+    c.reload
+    assert_equal collections(:collection_owned_by_active).portable_data_hash, c.output
+    assert_equal logpdh_time2, c.log
+    refute c.update_attributes(output: nil)
+    refute c.update_attributes(log: nil)
+    cr1.reload
+    cr2.reload
+    assert_equal cr1log_uuid, cr1.log_uuid
+    assert_equal cr2log_uuid, cr2.log_uuid
+    assert_equal 1, Collection.where(uuid: [cr1log_uuid, cr2log_uuid]).to_a.collect(&:portable_data_hash).uniq.length
+    assert_equal ". acbd18db4cc2f85cedef654fccc4a4d8+3 cdd549ae79fe6640fa3d5c6261d8303c+195 0:3:foo.txt 3:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt
+./log\\040for\\040container\\040#{cr1.container_uuid} acbd18db4cc2f85cedef654fccc4a4d8+3 cdd549ae79fe6640fa3d5c6261d8303c+195 0:3:foo.txt 3:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt
+", Collection.find_by_uuid(cr1log_uuid).manifest_text
+  end
+
+  ["auth_uuid", "runtime_token"].each do |tok|
+    test "#{tok} can set output, progress, runtime_status, state on running container -- but not log" do
+      if tok == "runtime_token"
+        set_user_from_auth :spectator
+        c, _ = minimal_new(container_image: "9ae44d5792468c58bcf85ce7353c7027+124",
+                           runtime_token: api_client_authorizations(:active).token)
+      else
+        set_user_from_auth :active
+        c, _ = minimal_new
+      end
+      set_user_from_auth :dispatch1
+      c.lock
+      c.update_attributes! state: Container::Running
+
+      if tok == "runtime_token"
+        auth = ApiClientAuthorization.validate(token: c.runtime_token)
+        Thread.current[:api_client_authorization] = auth
+        Thread.current[:api_client] = auth.api_client
+        Thread.current[:token] = auth.token
+        Thread.current[:user] = auth.user
+      else
+        auth = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+        Thread.current[:api_client_authorization] = auth
+        Thread.current[:api_client] = auth.api_client
+        Thread.current[:token] = auth.token
+        Thread.current[:user] = auth.user
+      end
+
+      assert c.update_attributes(output: collections(:collection_owned_by_active).portable_data_hash)
+      assert c.update_attributes(runtime_status: {'warning' => 'something happened'})
+      assert c.update_attributes(progress: 0.5)
+      refute c.update_attributes(log: collections(:real_log_collection).portable_data_hash)
+      c.reload
+      assert c.update_attributes(state: Container::Complete, exit_code: 0)
+    end
+  end
+
+  test "not allowed to set output that is not readable by current user" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    c.lock
+    c.update_attributes! state: Container::Running
+
+    Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+    Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
+
+    assert_raises ActiveRecord::RecordInvalid do
+      c.update_attributes! output: collections(:collection_not_readable_by_active).portable_data_hash
+    end
+  end
+
+  test "other token cannot set output on running container" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    c.lock
+    c.update_attributes! state: Container::Running
+
+    set_user_from_auth :running_to_be_deleted_container_auth
+    refute c.update_attributes(output: collections(:foo_file).portable_data_hash)
+  end
+
+  test "can set trashed output on running container" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    c.lock
+    c.update_attributes! state: Container::Running
+
+    output = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3jk')
+
+    assert output.is_trashed
+    assert c.update_attributes output: output.portable_data_hash
+    assert c.update_attributes! state: Container::Complete
+  end
+
+  test "not allowed to set trashed output that is not readable by current user" do
+    set_user_from_auth :active
+    c, _ = minimal_new
+    set_user_from_auth :dispatch1
+    c.lock
+    c.update_attributes! state: Container::Running
+
+    output = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3jr')
+
+    Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+    Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
+
+    assert_raises ActiveRecord::RecordInvalid do
+      c.update_attributes! output: output.portable_data_hash
+    end
+  end
+
+  [
+    {state: Container::Complete, exit_code: 0, output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'},
+    {state: Container::Cancelled},
+  ].each do |final_attrs|
+    test "secret_mounts and runtime_token are null after container is #{final_attrs[:state]}" do
+      set_user_from_auth :active
+      c, cr = minimal_new(secret_mounts: {'/secret' => {'kind' => 'text', 'content' => 'foo'}},
+                          container_count_max: 1, runtime_token: api_client_authorizations(:active).token)
+      set_user_from_auth :dispatch1
+      c.lock
+      c.update_attributes!(state: Container::Running)
+      c.reload
+      assert c.secret_mounts.has_key?('/secret')
+      assert_equal api_client_authorizations(:active).token, c.runtime_token
+
+      c.update_attributes!(final_attrs)
+      c.reload
+      assert_equal({}, c.secret_mounts)
+      assert_nil c.runtime_token
+      cr.reload
+      assert_equal({}, cr.secret_mounts)
+      assert_nil cr.runtime_token
+      assert_no_secrets_logged
+    end
+  end
+end
diff --git a/services/api/test/unit/create_superuser_token_test.rb b/services/api/test/unit/create_superuser_token_test.rb
new file mode 100644 (file)
index 0000000..e95e0f2
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'safe_json'
+require 'test_helper'
+require 'create_superuser_token'
+
+class CreateSuperUserTokenTest < ActiveSupport::TestCase
+  include CreateSuperUserToken
+
+  test "create superuser token twice and expect same resutls" do
+    # Create a token with some string
+    token1 = create_superuser_token 'atesttoken'
+    assert_not_nil token1
+    assert_equal token1, 'atesttoken'
+
+    # Create token again; this time, we should get the one created earlier
+    token2 = create_superuser_token
+    assert_not_nil token2
+    assert_equal token1, token2
+  end
+
+  test "create superuser token with two different inputs and expect the first both times" do
+    # Create a token with some string
+    token1 = create_superuser_token 'atesttoken'
+    assert_not_nil token1
+    assert_equal token1, 'atesttoken'
+
+    # Create token again with some other string and expect the existing superuser token back
+    token2 = create_superuser_token 'someothertokenstring'
+    assert_not_nil token2
+    assert_equal token1, token2
+  end
+
+  test "create superuser token twice and expect same results" do
+    # Create a token with some string
+    token1 = create_superuser_token 'atesttoken'
+    assert_not_nil token1
+    assert_equal token1, 'atesttoken'
+
+    # Create token again with that same superuser token and expect it back
+    token2 = create_superuser_token 'atesttoken'
+    assert_not_nil token2
+    assert_equal token1, token2
+  end
+
+  test "create superuser token and invoke again with some other valid token" do
+    # Create a token with some string
+    token1 = create_superuser_token 'atesttoken'
+    assert_not_nil token1
+    assert_equal token1, 'atesttoken'
+
+    su_token = api_client_authorizations("system_user").api_token
+    token2 = create_superuser_token su_token
+    assert_equal token2, su_token
+  end
+
+  test "create superuser token, expire it, and create again" do
+    # Create a token with some string
+    token1 = create_superuser_token 'atesttoken'
+    assert_not_nil token1
+    assert_equal token1, 'atesttoken'
+
+    # Expire this token and call create again; expect a new token created
+    apiClientAuth = ApiClientAuthorization.where(api_token: token1).first
+    Thread.current[:user] = users(:admin)
+    apiClientAuth.update_attributes expires_at: '2000-10-10'
+
+    token2 = create_superuser_token
+    assert_not_nil token2
+    assert_not_equal token1, token2
+  end
+
+  test "invoke create superuser token with an invalid non-superuser token and expect error" do
+    active_user_token = api_client_authorizations("active").api_token
+    e = assert_raises RuntimeError do
+      create_superuser_token active_user_token
+    end
+    assert_not_nil e
+    assert_equal "Token exists but is not a superuser token.", e.message
+  end
+
+  test "specified token has limited scope" do
+    active_user_token = api_client_authorizations("data_manager").api_token
+    e = assert_raises RuntimeError do
+      create_superuser_token active_user_token
+    end
+    assert_not_nil e
+    assert_match /^Token exists but has limited scope/, e.message
+  end
+
+  test "existing token has limited scope" do
+    active_user_token = api_client_authorizations("admin_vm").api_token
+    ApiClientAuthorization.
+      where(user_id: system_user.id).
+      update_all(scopes: ["GET /"])
+    fixture_tokens = ApiClientAuthorization.all.collect(&:api_token)
+    new_token = create_superuser_token
+    refute_includes(fixture_tokens, new_token)
+  end
+end
diff --git a/services/api/test/unit/crunch_dispatch_test.rb b/services/api/test/unit/crunch_dispatch_test.rb
new file mode 100644 (file)
index 0000000..3460abe
--- /dev/null
@@ -0,0 +1,229 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'crunch_dispatch'
+require 'helpers/git_test_helper'
+
+class CrunchDispatchTest < ActiveSupport::TestCase
+  include GitTestHelper
+
+  test 'choose cheaper nodes first' do
+    act_as_system_user do
+      # Replace test fixtures with a set suitable for testing dispatch
+      Node.destroy_all
+
+      # Idle nodes with different prices
+      [['compute1', 3.20, 32],
+       ['compute2', 1.60, 16],
+       ['compute3', 0.80, 8]].each do |hostname, price, cores|
+        Node.create!(hostname: hostname,
+                     info: {
+                       'slurm_state' => 'idle',
+                     },
+                     properties: {
+                       'cloud_node' => {
+                         'price' => price,
+                       },
+                       'total_cpu_cores' => cores,
+                       'total_ram_mb' => cores*1024,
+                       'total_scratch_mb' => cores*10000,
+                     })
+      end
+
+      # Node with no price information
+      Node.create!(hostname: 'compute4',
+                   info: {
+                     'slurm_state' => 'idle',
+                   },
+                   properties: {
+                     'total_cpu_cores' => 8,
+                     'total_ram_mb' => 8192,
+                     'total_scratch_mb' => 80000,
+                   })
+
+      # Cheap but busy node
+      Node.create!(hostname: 'compute5',
+                   info: {
+                     'slurm_state' => 'alloc',
+                   },
+                   properties: {
+                     'cloud_node' => {
+                       'price' => 0.10,
+                     },
+                     'total_cpu_cores' => 32,
+                     'total_ram_mb' => 32768,
+                     'total_scratch_mb' => 320000,
+                   })
+    end
+
+    dispatch = CrunchDispatch.new
+    [[1, 16384, ['compute2']],
+     [2, 16384, ['compute2', 'compute1']],
+     [2, 8000, ['compute4', 'compute3']],
+    ].each do |min_nodes, min_ram, expect_nodes|
+      job = Job.new(uuid: 'zzzzz-8i9sb-382lhiizavzhqlp',
+                    runtime_constraints: {
+                      'min_nodes' => min_nodes,
+                      'min_ram_mb_per_node' => min_ram,
+                    })
+      nodes = dispatch.nodes_available_for_job_now job
+      assert_equal expect_nodes, nodes
+    end
+  end
+
+  test 'respond to TERM' do
+    lockfile = Rails.root.join 'tmp', 'dispatch.lock'
+    ENV['CRUNCH_DISPATCH_LOCKFILE'] = lockfile.to_s
+    begin
+      pid = Process.fork do
+        begin
+          # Abandon database connections inherited from parent
+          # process.  Credit to
+          # https://github.com/kstephens/rails_is_forked
+          ActiveRecord::Base.connection_handler.connection_pools.each_value do |pool|
+            pool.instance_eval do
+              @reserved_connections = {}
+              @connections = []
+            end
+          end
+          ActiveRecord::Base.establish_connection
+
+          dispatch = CrunchDispatch.new
+          dispatch.stubs(:did_recently).returns true
+          dispatch.run []
+        ensure
+          Process.exit!
+        end
+      end
+      assert_with_timeout 5, "Dispatch did not lock #{lockfile}" do
+        !can_lock(lockfile)
+      end
+    ensure
+      Process.kill("TERM", pid)
+    end
+    assert_with_timeout 20, "Dispatch did not unlock #{lockfile}" do
+      can_lock(lockfile)
+    end
+  end
+
+  test 'override --cgroup-root with CRUNCH_CGROUP_ROOT' do
+    ENV['CRUNCH_CGROUP_ROOT'] = '/path/to/cgroup'
+    Rails.configuration.crunch_job_wrapper = :none
+    act_as_system_user do
+      j = Job.create(repository: 'active/foo',
+                     script: 'hash',
+                     script_version: '4fe459abe02d9b365932b8f5dc419439ab4e2577',
+                     script_parameters: {})
+      ok = false
+      Open3.expects(:popen3).at_least_once.with do |*args|
+        if args.index(j.uuid)
+          ok = ((i = args.index '--cgroup-root') and
+                (args[i+1] == '/path/to/cgroup'))
+        end
+        true
+      end.raises(StandardError.new('all is well'))
+      dispatch = CrunchDispatch.new
+      dispatch.parse_argv ['--jobs']
+      dispatch.refresh_todo
+      dispatch.start_jobs
+      assert ok
+    end
+  end
+
+  def assert_with_timeout timeout, message
+    t = 0
+    while (t += 0.1) < timeout
+      if yield
+        return
+      end
+      sleep 0.1
+    end
+    assert false, message + " (waited #{timeout} seconds)"
+  end
+
+  def can_lock lockfile
+    lockfile.open(File::RDWR|File::CREAT, 0644) do |f|
+      return f.flock(File::LOCK_EX|File::LOCK_NB)
+    end
+  end
+
+  test 'rate limit of partial line segments' do
+    act_as_system_user do
+      Rails.configuration.crunch_log_partial_line_throttle_period = 1
+
+      job = {}
+      job[:bytes_logged] = 0
+      job[:log_throttle_bytes_so_far] = 0
+      job[:log_throttle_lines_so_far] = 0
+      job[:log_throttle_bytes_skipped] = 0
+      job[:log_throttle_is_open] = true
+      job[:log_throttle_partial_line_last_at] = Time.new(0)
+      job[:log_throttle_first_partial_line] = true
+
+      dispatch = CrunchDispatch.new
+
+      line = "first log line"
+      limit = dispatch.rate_limit(job, line)
+      assert_equal true, limit
+      assert_equal "first log line", line
+      assert_equal 1, job[:log_throttle_lines_so_far]
+
+      # first partial line segment is skipped and counted towards skipped lines
+      now = Time.now.strftime('%Y-%m-%d-%H:%M:%S')
+      line = "#{now} localhost 100 0 stderr [...] this is first partial line segment [...]"
+      limit = dispatch.rate_limit(job, line)
+      assert_equal true, limit
+      assert_includes line, "Rate-limiting partial segments of long lines", line
+      assert_equal 2, job[:log_throttle_lines_so_far]
+
+      # next partial line segment within throttle interval is skipped but not counted towards skipped lines
+      line = "#{now} localhost 100 0 stderr [...] second partial line segment within the interval [...]"
+      limit = dispatch.rate_limit(job, line)
+      assert_equal false, limit
+      assert_equal 2, job[:log_throttle_lines_so_far]
+
+      # next partial line after interval is counted towards skipped lines
+      sleep(1)
+      line = "#{now} localhost 100 0 stderr [...] third partial line segment after the interval [...]"
+      limit = dispatch.rate_limit(job, line)
+      assert_equal false, limit
+      assert_equal 3, job[:log_throttle_lines_so_far]
+
+      # this is not a valid line segment
+      line = "#{now} localhost 100 0 stderr [...] does not end with [...] and is not a partial segment"
+      limit = dispatch.rate_limit(job, line)
+      assert_equal true, limit
+      assert_equal "#{now} localhost 100 0 stderr [...] does not end with [...] and is not a partial segment", line
+      assert_equal 4, job[:log_throttle_lines_so_far]
+
+      # this also is not a valid line segment
+      line = "#{now} localhost 100 0 stderr does not start correctly but ends with [...]"
+      limit = dispatch.rate_limit(job, line)
+      assert_equal true, limit
+      assert_equal "#{now} localhost 100 0 stderr does not start correctly but ends with [...]", line
+      assert_equal 5, job[:log_throttle_lines_so_far]
+    end
+  end
+
+  test 'scancel orphaned job nodes' do
+    Rails.configuration.crunch_job_wrapper = :slurm_immediate
+    act_as_system_user do
+      dispatch = CrunchDispatch.new
+
+      squeue_resp = IO.popen("echo zzzzz-8i9sb-pshmckwoma9plh7\necho thisisnotvalidjobuuid\necho zzzzz-8i9sb-4cf0abc123e809j\necho zzzzz-dz642-o04e3r651turtdr\n")
+      scancel_resp = IO.popen("true")
+
+      IO.expects(:popen).
+        with(['squeue', '-a', '-h', '-o', '%j']).
+        returns(squeue_resp)
+
+      IO.expects(:popen).
+        with(dispatch.sudo_preface + ['scancel', '-n', 'zzzzz-8i9sb-4cf0abc123e809j']).
+        returns(scancel_resp)
+
+      dispatch.check_orphaned_slurm_jobs
+    end
+  end
+end
diff --git a/services/api/test/unit/fail_jobs_test.rb b/services/api/test/unit/fail_jobs_test.rb
new file mode 100644 (file)
index 0000000..3c7f9a9
--- /dev/null
@@ -0,0 +1,83 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'crunch_dispatch'
+
+class FailJobsTest < ActiveSupport::TestCase
+  include DbCurrentTime
+
+  BOOT_TIME = 1448378837
+
+  setup do
+    @job = {}
+    act_as_user users(:admin) do
+      @job[:before_reboot] = Job.create!(state: 'Running',
+                                         running: true,
+                                         started_at: Time.at(BOOT_TIME - 300))
+      @job[:after_reboot] = Job.create!(state: 'Running',
+                                        running: true,
+                                        started_at: Time.at(BOOT_TIME + 300))
+      @job[:complete] = Job.create!(state: 'Running',
+                                    running: true,
+                                    started_at: Time.at(BOOT_TIME - 300))
+      @job[:complete].update_attributes(state: 'Complete')
+      @job[:complete].update_attributes(finished_at: Time.at(BOOT_TIME + 100))
+      @job[:queued] = jobs(:queued)
+
+      @job.values.each do |job|
+        # backdate timestamps
+        Job.where(uuid: job.uuid).
+          update_all(created_at: Time.at(BOOT_TIME - 330),
+                     modified_at: (job.finished_at ||
+                                   job.started_at ||
+                                   Time.at(BOOT_TIME - 300)))
+      end
+    end
+    @dispatch = CrunchDispatch.new
+    @test_start_time = db_current_time
+  end
+
+  test 'cancel slurm jobs' do
+    Rails.configuration.crunch_job_wrapper = :slurm_immediate
+    Rails.configuration.crunch_job_user = 'foobar'
+    fake_squeue = IO.popen("echo #{@job[:before_reboot].uuid}")
+    fake_scancel = IO.popen("true")
+    IO.expects(:popen).
+      with(['squeue', '-a', '-h', '-o', '%j']).
+      returns(fake_squeue)
+    IO.expects(:popen).
+      with(includes('sudo', '-u', 'foobar', 'scancel', '-n', @job[:before_reboot].uuid)).
+      returns(fake_scancel)
+    @dispatch.fail_jobs(before: Time.at(BOOT_TIME).to_s)
+    assert_end_states
+  end
+
+  test 'use reboot time' do
+    Rails.configuration.crunch_job_wrapper = nil
+    @dispatch.expects(:open).once.with('/proc/stat').
+      returns open(Rails.root.join('test/fixtures/files/proc_stat'))
+    @dispatch.fail_jobs(before: 'reboot')
+    assert_end_states
+  end
+
+  test 'command line help' do
+    cmd = Rails.root.join('script/fail-jobs.rb').to_s
+    assert_match(/Options:.*--before=/m, File.popen([cmd, '--help']).read)
+  end
+
+  protected
+
+  def assert_end_states
+    @job.values.map(&:reload)
+    assert_equal 'Failed', @job[:before_reboot].state
+    assert_equal false, @job[:before_reboot].running
+    assert_equal false, @job[:before_reboot].success
+    assert_operator @job[:before_reboot].finished_at, :>=, @test_start_time
+    assert_operator @job[:before_reboot].finished_at, :<=, db_current_time
+    assert_equal 'Running', @job[:after_reboot].state
+    assert_equal 'Complete', @job[:complete].state
+    assert_equal 'Queued', @job[:queued].state
+  end
+end
diff --git a/services/api/test/unit/group_test.rb b/services/api/test/unit/group_test.rb
new file mode 100644 (file)
index 0000000..8b3052e
--- /dev/null
@@ -0,0 +1,235 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class GroupTest < ActiveSupport::TestCase
+
+  test "cannot set owner_uuid to object with existing ownership cycle" do
+    set_user_from_auth :active_trustedclient
+
+    # First make sure we have lots of permission on the bad group by
+    # renaming it to "{current name} is mine all mine"
+    g = groups(:bad_group_has_ownership_cycle_b)
+    g.name += " is mine all mine"
+    assert g.save, "active user should be able to modify group #{g.uuid}"
+
+    # Use the group as the owner of a new object
+    s = Specimen.
+      create(owner_uuid: groups(:bad_group_has_ownership_cycle_b).uuid)
+    assert s.valid?, "ownership should pass validation #{s.errors.messages}"
+    assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
+
+    # Use the group as the new owner of an existing object
+    s = specimens(:in_aproject)
+    s.owner_uuid = groups(:bad_group_has_ownership_cycle_b).uuid
+    assert s.valid?, "ownership should pass validation"
+    assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
+  end
+
+  test "cannot create a new ownership cycle" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    g_bar = Group.create!(name: "bar")
+
+    g_foo.owner_uuid = g_bar.uuid
+    assert g_foo.save, lambda { g_foo.errors.messages }
+    g_bar.owner_uuid = g_foo.uuid
+    assert g_bar.valid?, "ownership cycle should not prevent validation"
+    assert_equal false, g_bar.save, "should not create an ownership loop"
+    assert g_bar.errors.messages[:owner_uuid].join(" ").match(/ownership cycle/)
+  end
+
+  test "cannot create a single-object ownership cycle" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    assert g_foo.save
+
+    # Ensure I have permission to manage this group even when its owner changes
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                            head_uuid: g_foo.uuid,
+                            link_class: 'permission',
+                            name: 'can_manage')
+    assert perm_link.save
+
+    g_foo.owner_uuid = g_foo.uuid
+    assert_equal false, g_foo.save, "should not create an ownership loop"
+    assert g_foo.errors.messages[:owner_uuid].join(" ").match(/ownership cycle/)
+  end
+
+  test "trash group hides contents" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    col = Collection.create!(owner_uuid: g_foo.uuid)
+
+    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).any?
+    g_foo.update! is_trashed: true
+    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).empty?
+    assert Collection.readable_by(users(:active), {:include_trash => true}).where(uuid: col.uuid).any?
+    g_foo.update! is_trashed: false
+    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).any?
+  end
+
+  test "trash group" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    g_bar = Group.create!(name: "bar", owner_uuid: g_foo.uuid)
+    g_baz = Group.create!(name: "baz", owner_uuid: g_bar.uuid)
+
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?
+    g_foo.update! is_trashed: true
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?
+
+    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_bar.uuid).any?
+    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_baz.uuid).any?
+  end
+
+
+  test "trash subgroup" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    g_bar = Group.create!(name: "bar", owner_uuid: g_foo.uuid)
+    g_baz = Group.create!(name: "baz", owner_uuid: g_bar.uuid)
+
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?
+    g_bar.update! is_trashed: true
+
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?
+
+    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_bar.uuid).any?
+    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_baz.uuid).any?
+  end
+
+  test "trash subsubgroup" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    g_bar = Group.create!(name: "bar", owner_uuid: g_foo.uuid)
+    g_baz = Group.create!(name: "baz", owner_uuid: g_bar.uuid)
+
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?
+    g_baz.update! is_trashed: true
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?
+    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_baz.uuid).any?
+  end
+
+
+  test "trash group propagates to subgroups" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = groups(:trashed_project)
+    g_bar = groups(:trashed_subproject)
+    g_baz = groups(:trashed_subproject3)
+    col = collections(:collection_in_trashed_subproject)
+
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?
+    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).empty?
+
+    set_user_from_auth :admin
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?
+    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).empty?
+
+    set_user_from_auth :active_trustedclient
+    g_foo.update! is_trashed: false
+    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?
+    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?
+    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).any?
+
+    # this one should still be trashed.
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?
+
+    g_baz.update! is_trashed: false
+    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?
+  end
+
+  test "trashed does not propagate across permission links" do
+    set_user_from_auth :admin
+
+    g_foo = Group.create!(name: "foo")
+    u_bar = User.create!(first_name: "bar")
+
+    assert Group.readable_by(users(:admin)).where(uuid: g_foo.uuid).any?
+    assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?
+    g_foo.update! is_trashed: true
+
+    assert Group.readable_by(users(:admin)).where(uuid: g_foo.uuid).empty?
+    assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?
+
+    g_foo.update! is_trashed: false
+    ln = Link.create!(tail_uuid: g_foo.uuid,
+                      head_uuid: u_bar.uuid,
+                      link_class: "permission",
+                      name: "can_read")
+    g_foo.update! is_trashed: true
+
+    assert Group.readable_by(users(:admin)).where(uuid: g_foo.uuid).empty?
+    assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?
+  end
+
+  test "move projects to trash in SweepTrashedObjects" do
+    p = groups(:trashed_on_next_sweep)
+    assert_empty Group.where('uuid=? and is_trashed=true', p.uuid)
+    SweepTrashedObjects.sweep_now
+    assert_not_empty Group.where('uuid=? and is_trashed=true', p.uuid)
+  end
+
+  test "delete projects and their contents in SweepTrashedObjects" do
+    g_foo = groups(:trashed_project)
+    g_bar = groups(:trashed_subproject)
+    g_baz = groups(:trashed_subproject3)
+    col = collections(:collection_in_trashed_subproject)
+    job = jobs(:job_in_trashed_project)
+    cr = container_requests(:cr_in_trashed_project)
+    # Save how many objects were before the sweep
+    user_nr_was = User.all.length
+    coll_nr_was = Collection.all.length
+    group_nr_was = Group.where('group_class<>?', 'project').length
+    project_nr_was = Group.where(group_class: 'project').length
+    cr_nr_was = ContainerRequest.all.length
+    job_nr_was = Job.all.length
+    assert_not_empty Group.where(uuid: g_foo.uuid)
+    assert_not_empty Group.where(uuid: g_bar.uuid)
+    assert_not_empty Group.where(uuid: g_baz.uuid)
+    assert_not_empty Collection.where(uuid: col.uuid)
+    assert_not_empty Job.where(uuid: job.uuid)
+    assert_not_empty ContainerRequest.where(uuid: cr.uuid)
+    SweepTrashedObjects.sweep_now
+    assert_empty Group.where(uuid: g_foo.uuid)
+    assert_empty Group.where(uuid: g_bar.uuid)
+    assert_empty Group.where(uuid: g_baz.uuid)
+    assert_empty Collection.where(uuid: col.uuid)
+    assert_empty Job.where(uuid: job.uuid)
+    assert_empty ContainerRequest.where(uuid: cr.uuid)
+    # No unwanted deletions should have happened
+    assert_equal user_nr_was, User.all.length
+    assert_equal coll_nr_was-2,        # collection_in_trashed_subproject
+                 Collection.all.length # & deleted_on_next_sweep collections
+    assert_equal group_nr_was, Group.where('group_class<>?', 'project').length
+    assert_equal project_nr_was-3, Group.where(group_class: 'project').length
+    assert_equal cr_nr_was-1, ContainerRequest.all.length
+    assert_equal job_nr_was-1, Job.all.length
+  end
+end
diff --git a/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb b/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb
new file mode 100644 (file)
index 0000000..01ed430
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApiClientAuthorizationsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/api_clients_helper_test.rb b/services/api/test/unit/helpers/api_clients_helper_test.rb
new file mode 100644 (file)
index 0000000..4901fb4
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class ApiClientsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/authorized_keys_helper_test.rb b/services/api/test/unit/helpers/authorized_keys_helper_test.rb
new file mode 100644 (file)
index 0000000..010a0fe
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class AuthorizedKeysHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/collections_helper_test.rb b/services/api/test/unit/helpers/collections_helper_test.rb
new file mode 100644 (file)
index 0000000..dd01ca7
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CollectionsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/commit_ancestors_helper_test.rb b/services/api/test/unit/helpers/commit_ancestors_helper_test.rb
new file mode 100644 (file)
index 0000000..423dbf6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CommitAncestorsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/commits_helper_test.rb b/services/api/test/unit/helpers/commits_helper_test.rb
new file mode 100644 (file)
index 0000000..fd960a8
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class CommitsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/groups_helper_test.rb b/services/api/test/unit/helpers/groups_helper_test.rb
new file mode 100644 (file)
index 0000000..ce7a3fa
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class GroupsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/humans_helper_test.rb b/services/api/test/unit/helpers/humans_helper_test.rb
new file mode 100644 (file)
index 0000000..22f9e81
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class HumansHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/job_tasks_helper_test.rb b/services/api/test/unit/helpers/job_tasks_helper_test.rb
new file mode 100644 (file)
index 0000000..af0302c
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobTasksHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/jobs_helper_test.rb b/services/api/test/unit/helpers/jobs_helper_test.rb
new file mode 100644 (file)
index 0000000..9d64b7d
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/keep_disks_helper_test.rb b/services/api/test/unit/helpers/keep_disks_helper_test.rb
new file mode 100644 (file)
index 0000000..9dcc619
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class KeepDisksHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/links_helper_test.rb b/services/api/test/unit/helpers/links_helper_test.rb
new file mode 100644 (file)
index 0000000..918f145
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LinksHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/logs_helper_test.rb b/services/api/test/unit/helpers/logs_helper_test.rb
new file mode 100644 (file)
index 0000000..616f6e6
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LogsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/nodes_helper_test.rb b/services/api/test/unit/helpers/nodes_helper_test.rb
new file mode 100644 (file)
index 0000000..8a92eb9
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class NodesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/pipeline_instances_helper_test.rb b/services/api/test/unit/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..9d3b5c4
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/pipeline_templates_helper_test.rb b/services/api/test/unit/helpers/pipeline_templates_helper_test.rb
new file mode 100644 (file)
index 0000000..9a9a417
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelinesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/repositories_helper_test.rb b/services/api/test/unit/helpers/repositories_helper_test.rb
new file mode 100644 (file)
index 0000000..33cb590
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class RepositoriesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/specimens_helper_test.rb b/services/api/test/unit/helpers/specimens_helper_test.rb
new file mode 100644 (file)
index 0000000..3709198
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SpecimensHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/traits_helper_test.rb b/services/api/test/unit/helpers/traits_helper_test.rb
new file mode 100644 (file)
index 0000000..03b6a97
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class TraitsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/virtual_machines_helper_test.rb b/services/api/test/unit/helpers/virtual_machines_helper_test.rb
new file mode 100644 (file)
index 0000000..99fc258
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class VirtualMachinesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/human_test.rb b/services/api/test/unit/human_test.rb
new file mode 100644 (file)
index 0000000..83cc40e
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class HumanTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/job_task_test.rb b/services/api/test/unit/job_task_test.rb
new file mode 100644 (file)
index 0000000..05a5d21
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class JobTaskTest < ActiveSupport::TestCase
+  test "new tasks get an assigned qsequence" do
+    set_user_from_auth :active
+    task = JobTask.create
+    assert_not_nil task.qsequence
+    assert_operator(task.qsequence, :>=, 0)
+  end
+
+  test "assigned qsequence is not overwritten" do
+    set_user_from_auth :active
+    task = JobTask.create!(qsequence: 99)
+    assert_equal(99, task.qsequence)
+  end
+end
diff --git a/services/api/test/unit/job_test.rb b/services/api/test/unit/job_test.rb
new file mode 100644 (file)
index 0000000..41e2adb
--- /dev/null
@@ -0,0 +1,677 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+require 'helpers/docker_migration_helper'
+
+class JobTest < ActiveSupport::TestCase
+  include DockerMigrationHelper
+  include GitTestHelper
+
+  BAD_COLLECTION = "#{'f' * 32}+0"
+
+  setup do
+    set_user_from_auth :active
+  end
+
+  def job_attrs merge_me={}
+    # Default (valid) set of attributes, with given overrides
+    {
+      script: "hash",
+      script_version: "master",
+      repository: "active/foo",
+    }.merge(merge_me)
+  end
+
+  test "Job without Docker image doesn't get locator" do
+    job = Job.new job_attrs
+    assert job.valid?, job.errors.full_messages.to_s
+    assert_nil job.docker_image_locator
+  end
+
+  { 'name' => [:links, :docker_image_collection_tag, :name],
+    'hash' => [:links, :docker_image_collection_hash, :name],
+    'locator' => [:collections, :docker_image, :portable_data_hash],
+  }.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
+    test "Job initialized with Docker image #{spec_type} gets locator" do
+      image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+      job = Job.new job_attrs(runtime_constraints:
+                              {'docker_image' => image_spec})
+      assert job.valid?, job.errors.full_messages.to_s
+      assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+    end
+
+    test "Job modified with Docker image #{spec_type} gets locator" do
+      job = Job.new job_attrs
+      assert job.valid?, job.errors.full_messages.to_s
+      assert_nil job.docker_image_locator
+      image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+      job.runtime_constraints['docker_image'] = image_spec
+      assert job.valid?, job.errors.full_messages.to_s
+      assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+    end
+  end
+
+  test "removing a Docker runtime constraint removes the locator" do
+    image_locator = collections(:docker_image).portable_data_hash
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_locator})
+    assert job.valid?, job.errors.full_messages.to_s
+    assert_equal(image_locator, job.docker_image_locator)
+    job.runtime_constraints = {}
+    assert job.valid?, job.errors.full_messages.to_s + "after clearing runtime constraints"
+    assert_nil job.docker_image_locator
+  end
+
+  test "locate a Docker image with a repository + tag" do
+    image_repo, image_tag =
+      links(:docker_image_collection_tag2).name.split(':', 2)
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_repo,
+                              'docker_image_tag' => image_tag})
+    assert job.valid?, job.errors.full_messages.to_s
+    assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+  end
+
+  test "can't locate a Docker image with a nonexistent tag" do
+    image_repo = links(:docker_image_collection_tag).name
+    image_tag = '__nonexistent tag__'
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_repo,
+                              'docker_image_tag' => image_tag})
+    assert(job.invalid?, "Job with bad Docker tag valid")
+  end
+
+  [
+    false,
+    true
+  ].each do |use_config|
+    test "Job with no Docker image uses default docker image when configuration is set #{use_config}" do
+      default_docker_image = collections(:docker_image)[:portable_data_hash]
+      Rails.configuration.default_docker_image_for_jobs = default_docker_image if use_config
+
+      job = Job.new job_attrs
+      assert job.valid?, job.errors.full_messages.to_s
+
+      if use_config
+        refute_nil job.docker_image_locator
+        assert_equal default_docker_image, job.docker_image_locator
+      else
+        assert_nil job.docker_image_locator
+      end
+    end
+  end
+
+  test "create a job with a disambiguated script_version branch name" do
+    job = Job.
+      new(script: "testscript",
+          script_version: "heads/7387838c69a21827834586cc42b467ff6c63293b",
+          repository: "active/shabranchnames",
+          script_parameters: {})
+    assert(job.save)
+    assert_equal("abec49829bf1758413509b7ffcab32a771b71e81", job.script_version)
+  end
+
+  test "locate a Docker image with a partial hash" do
+    image_hash = links(:docker_image_collection_hash).name[0..24]
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_hash})
+    assert job.valid?, job.errors.full_messages.to_s + " with partial hash #{image_hash}"
+    assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+  end
+
+  { 'name' => 'arvados_test_nonexistent',
+    'hash' => 'f' * 64,
+    'locator' => BAD_COLLECTION,
+  }.each_pair do |spec_type, image_spec|
+    test "Job validation fails with nonexistent Docker image #{spec_type}" do
+      Rails.configuration.remote_hosts = {}
+      job = Job.new job_attrs(runtime_constraints:
+                              {'docker_image' => image_spec})
+      assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+    end
+  end
+
+  test "Job validation fails with non-Docker Collection constraint" do
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => collections(:foo_file).uuid})
+    assert(job.invalid?, "non-Docker Collection constraint was valid")
+  end
+
+  test "can create Job with Docker image Collection without Docker links" do
+    image_uuid = collections(:unlinked_docker_image).portable_data_hash
+    job = Job.new job_attrs(runtime_constraints: {"docker_image" => image_uuid})
+    assert(job.valid?, "Job created with unlinked Docker image was invalid")
+    assert_equal(image_uuid, job.docker_image_locator)
+  end
+
+  def check_attrs_unset(job, attrs)
+    assert_empty(attrs.each_key.map { |key| job.send(key) }.compact,
+                 "job has values for #{attrs.keys}")
+  end
+
+  def check_creation_prohibited(attrs)
+    begin
+      job = Job.new(job_attrs(attrs))
+    rescue ActiveModel::MassAssignmentSecurity::Error
+      # Test passes - expected attribute protection
+    else
+      check_attrs_unset(job, attrs)
+    end
+  end
+
+  def check_modification_prohibited(attrs)
+    job = Job.new(job_attrs)
+    attrs.each_pair do |key, value|
+      assert_raises(NoMethodError) { job.send("{key}=".to_sym, value) }
+    end
+    check_attrs_unset(job, attrs)
+  end
+
+  test "can't create Job with Docker image locator" do
+    check_creation_prohibited(docker_image_locator: BAD_COLLECTION)
+  end
+
+  test "can't assign Docker image locator to Job" do
+    check_modification_prohibited(docker_image_locator: BAD_COLLECTION)
+  end
+
+  [
+   {script_parameters: ""},
+   {script_parameters: []},
+   {script_parameters: {["foo"] => ["bar"]}},
+   {runtime_constraints: ""},
+   {runtime_constraints: []},
+   {tasks_summary: ""},
+   {tasks_summary: []},
+  ].each do |invalid_attrs|
+    test "validation failures set error messages: #{invalid_attrs.to_json}" do
+      # Ensure valid_attrs doesn't produce errors -- otherwise we will
+      # not know whether errors reported below are actually caused by
+      # invalid_attrs.
+      Job.new(job_attrs).save!
+
+      err = assert_raises(ArgumentError) do
+        Job.new(job_attrs(invalid_attrs)).save!
+      end
+      assert_match /parameters|constraints|summary/, err.message
+    end
+  end
+
+  test "invalid script_version" do
+    invalid = {
+      script_version: "no/branch/could/ever/possibly/have/this/name",
+    }
+    err = assert_raises(ActiveRecord::RecordInvalid) do
+      Job.new(job_attrs(invalid)).save!
+    end
+    assert_match /Script version .* does not resolve to a commit/, err.message
+  end
+
+  [
+    # Each test case is of the following format
+    # Array of parameters where each parameter is of the format:
+    #  attr name to be changed, attr value, and array of expectations (where each expectation is an array)
+    [['running', false, [['state', 'Queued']]]],
+    [['state', 'Running', [['started_at', 'not_nil']]]],
+    [['is_locked_by_uuid', 'use_current_user_uuid', [['state', 'Queued']]], ['state', 'Running', [['running', true], ['started_at', 'not_nil'], ['success', 'nil']]]],
+    [['running', false, [['state', 'Queued']]], ['state', 'Complete', [['success', true]]]],
+    [['running', true, [['state', 'Running']]], ['cancelled_at', Time.now, [['state', 'Cancelled']]]],
+    [['running', true, [['state', 'Running']]], ['state', 'Cancelled', [['cancelled_at', 'not_nil']]]],
+    [['running', true, [['state', 'Running']]], ['success', true, [['state', 'Complete']]]],
+    [['running', true, [['state', 'Running']]], ['success', false, [['state', 'Failed']]]],
+    [['running', true, [['state', 'Running']]], ['state', 'Complete', [['success', true],['finished_at', 'not_nil']]]],
+    [['running', true, [['state', 'Running']]], ['state', 'Failed', [['success', false],['finished_at', 'not_nil']]]],
+    [['cancelled_at', Time.now, [['state', 'Cancelled']]], ['success', false, [['state', 'Cancelled'],['finished_at', 'nil'], ['cancelled_at', 'not_nil']]]],
+    [['cancelled_at', Time.now, [['state', 'Cancelled'],['running', false]]], ['success', true, [['state', 'Cancelled'],['running', false],['finished_at', 'nil'],['cancelled_at', 'not_nil']]]],
+    # potential migration cases
+    [['state', nil, [['state', 'Queued']]]],
+    [['state', nil, [['state', 'Queued']]], ['cancelled_at', Time.now, [['state', 'Cancelled']]]],
+    [['running', true, [['state', 'Running']]], ['state', nil, [['state', 'Running']]]],
+  ].each do |parameters|
+    test "verify job status #{parameters}" do
+      job = Job.create! job_attrs
+      assert_equal 'Queued', job.state, "job.state"
+
+      parameters.each do |parameter|
+        expectations = parameter[2]
+        if 'use_current_user_uuid' == parameter[1]
+          parameter[1] = Thread.current[:user].uuid
+        end
+
+        if expectations.instance_of? Array
+          job[parameter[0]] = parameter[1]
+          assert_equal true, job.save, job.errors.full_messages.to_s
+          expectations.each do |expectation|
+            if expectation[1] == 'not_nil'
+              assert_not_nil job[expectation[0]], expectation[0]
+            elsif expectation[1] == 'nil'
+              assert_nil job[expectation[0]], expectation[0]
+            else
+              assert_equal expectation[1], job[expectation[0]], expectation[0]
+            end
+          end
+        else
+          raise 'I do not know how to handle this expectation'
+        end
+      end
+    end
+  end
+
+  test "Test job state changes" do
+    all = ["Queued", "Running", "Complete", "Failed", "Cancelled"]
+    valid = {"Queued" => all, "Running" => ["Complete", "Failed", "Cancelled"]}
+    all.each do |start|
+      all.each do |finish|
+        if start != finish
+          job = Job.create! job_attrs(state: start)
+          assert_equal start, job.state
+          job.state = finish
+          job.save
+          job.reload
+          if valid[start] and valid[start].include? finish
+            assert_equal finish, job.state
+          else
+            assert_equal start, job.state
+          end
+        end
+      end
+    end
+  end
+
+  test "Test job locking" do
+    set_user_from_auth :active_trustedclient
+    job = Job.create! job_attrs
+
+    assert_equal "Queued", job.state
+
+    # Should be able to lock successfully
+    job.lock current_user.uuid
+    assert_equal "Running", job.state
+
+    assert_raises ArvadosModel::AlreadyLockedError do
+      # Can't lock it again
+      job.lock current_user.uuid
+    end
+    job.reload
+    assert_equal "Running", job.state
+
+    set_user_from_auth :project_viewer
+    assert_raises ArvadosModel::AlreadyLockedError do
+      # Can't lock it as a different user either
+      job.lock current_user.uuid
+    end
+    job.reload
+    assert_equal "Running", job.state
+
+    assert_raises ArvadosModel::PermissionDeniedError do
+      # Can't update fields as a different user
+      job.update_attributes(state: "Failed")
+    end
+    job.reload
+    assert_equal "Running", job.state
+
+
+    set_user_from_auth :active_trustedclient
+
+    # Can update fields as the locked_by user
+    job.update_attributes(state: "Failed")
+    assert_equal "Failed", job.state
+  end
+
+  test "admin user can cancel a running job despite lock" do
+    set_user_from_auth :active_trustedclient
+    job = Job.create! job_attrs
+    job.lock current_user.uuid
+    assert_equal Job::Running, job.state
+
+    set_user_from_auth :spectator
+    assert_raises do
+      job.update_attributes!(state: Job::Cancelled)
+    end
+
+    set_user_from_auth :admin
+    job.reload
+    assert_equal Job::Running, job.state
+    job.update_attributes!(state: Job::Cancelled)
+    assert_equal Job::Cancelled, job.state
+  end
+
+  test "verify job queue position" do
+    job1 = Job.create! job_attrs
+    assert_equal 'Queued', job1.state, "Incorrect job state for newly created job1"
+
+    job2 = Job.create! job_attrs
+    assert_equal 'Queued', job2.state, "Incorrect job state for newly created job2"
+
+    assert_not_nil job1.queue_position, "Expected non-nil queue position for job1"
+    assert_not_nil job2.queue_position, "Expected non-nil queue position for job2"
+  end
+
+  SDK_MASTER = "ca68b24e51992e790f29df5cc4bc54ce1da4a1c2"
+  SDK_TAGGED = "00634b2b8a492d6f121e3cf1d6587b821136a9a7"
+
+  def sdk_constraint(version)
+    {runtime_constraints: {
+        "arvados_sdk_version" => version,
+        "docker_image" => links(:docker_image_collection_tag).name,
+      }}
+  end
+
+  def check_job_sdk_version(expected)
+    job = yield
+    if expected.nil?
+      refute(job.valid?, "job valid with bad Arvados SDK version")
+    else
+      assert(job.valid?, "job not valid with good Arvados SDK version")
+      assert_equal(expected, job.arvados_sdk_version)
+    end
+  end
+
+  { "master" => SDK_MASTER,
+    "commit2" => SDK_TAGGED,
+    SDK_TAGGED[0, 8] => SDK_TAGGED,
+    "__nonexistent__" => nil,
+  }.each_pair do |search, commit_hash|
+    test "creating job with SDK version '#{search}'" do
+      check_job_sdk_version(commit_hash) do
+        Job.new(job_attrs(sdk_constraint(search)))
+      end
+    end
+
+    test "updating job from no SDK to version '#{search}'" do
+      job = Job.create!(job_attrs)
+      assert_nil job.arvados_sdk_version
+      check_job_sdk_version(commit_hash) do
+        job.runtime_constraints = sdk_constraint(search)[:runtime_constraints]
+        job
+      end
+    end
+
+    test "updating job from SDK version 'master' to '#{search}'" do
+      job = Job.create!(job_attrs(sdk_constraint("master")))
+      assert_equal(SDK_MASTER, job.arvados_sdk_version)
+      check_job_sdk_version(commit_hash) do
+        job.runtime_constraints = sdk_constraint(search)[:runtime_constraints]
+        job
+      end
+    end
+  end
+
+  test "clear the SDK version" do
+    job = Job.create!(job_attrs(sdk_constraint("master")))
+    assert_equal(SDK_MASTER, job.arvados_sdk_version)
+    job.runtime_constraints = {}
+    assert(job.valid?, "job invalid after clearing SDK version")
+    assert_nil(job.arvados_sdk_version)
+  end
+
+  test "job with SDK constraint, without Docker image is invalid" do
+    sdk_attrs = sdk_constraint("master")
+    sdk_attrs[:runtime_constraints].delete("docker_image")
+    job = Job.create(job_attrs(sdk_attrs))
+    refute(job.valid?, "Job valid with SDK version, without Docker image")
+    sdk_errors = job.errors.messages[:arvados_sdk_version] || []
+    refute_empty(sdk_errors.grep(/\bDocker\b/),
+                 "no Job SDK errors mention that Docker is required")
+  end
+
+  test "invalid to clear Docker image constraint when SDK constraint exists" do
+    job = Job.create!(job_attrs(sdk_constraint("master")))
+    job.runtime_constraints.delete("docker_image")
+    refute(job.valid?,
+           "Job with SDK constraint valid after clearing Docker image")
+  end
+
+  test "use migrated docker image if requesting old-format image by tag" do
+    Rails.configuration.docker_image_formats = ['v2']
+    add_docker19_migration_link
+    job = Job.create!(
+      job_attrs(
+        script: 'foo',
+        runtime_constraints: {
+          'docker_image' => links(:docker_image_collection_tag).name}))
+    assert(job.valid?)
+    assert_equal(job.docker_image_locator, collections(:docker_image_1_12).portable_data_hash)
+  end
+
+  test "use migrated docker image if requesting old-format image by pdh" do
+    Rails.configuration.docker_image_formats = ['v2']
+    add_docker19_migration_link
+    job = Job.create!(
+      job_attrs(
+        script: 'foo',
+        runtime_constraints: {
+          'docker_image' => collections(:docker_image).portable_data_hash}))
+    assert(job.valid?)
+    assert_equal(job.docker_image_locator, collections(:docker_image_1_12).portable_data_hash)
+  end
+
+  [[:docker_image, :docker_image, :docker_image_1_12],
+   [:docker_image_1_12, :docker_image, :docker_image_1_12],
+   [:docker_image, :docker_image_1_12, :docker_image_1_12],
+   [:docker_image_1_12, :docker_image_1_12, :docker_image_1_12],
+  ].each do |existing_image, request_image, expect_image|
+    test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do
+      Rails.configuration.docker_image_formats = ['v1']
+
+      if existing_image == :docker_image
+        oldjob = Job.create!(
+          job_attrs(
+            script: 'foobar1',
+            runtime_constraints: {
+              'docker_image' => collections(existing_image).portable_data_hash}))
+        oldjob.reload
+        assert_equal(oldjob.docker_image_locator,
+                     collections(existing_image).portable_data_hash)
+      elsif existing_image == :docker_image_1_12
+        assert_raises(ActiveRecord::RecordInvalid,
+                      "Should not resolve v2 image when only v1 is supported") do
+        oldjob = Job.create!(
+          job_attrs(
+            script: 'foobar1',
+            runtime_constraints: {
+              'docker_image' => collections(existing_image).portable_data_hash}))
+        end
+      end
+
+      Rails.configuration.docker_image_formats = ['v2']
+      add_docker19_migration_link
+
+      # Check that both v1 and v2 images get resolved to v2.
+      newjob = Job.create!(
+        job_attrs(
+          script: 'foobar1',
+          runtime_constraints: {
+            'docker_image' => collections(request_image).portable_data_hash}))
+      newjob.reload
+      assert_equal(newjob.docker_image_locator,
+                   collections(expect_image).portable_data_hash)
+    end
+  end
+
+  test "can't create job with SDK version assigned directly" do
+    check_creation_prohibited(arvados_sdk_version: SDK_MASTER)
+  end
+
+  test "can't modify job to assign SDK version directly" do
+    check_modification_prohibited(arvados_sdk_version: SDK_MASTER)
+  end
+
+  test "job validation fails when collection uuid found in script_parameters" do
+    bad_params = {
+      script_parameters: {
+        'input' => {
+          'param1' => 'the collection uuid zzzzz-4zz18-012345678901234'
+        }
+      }
+    }
+    assert_raises(ActiveRecord::RecordInvalid,
+                  "created job with a collection uuid in script_parameters") do
+      Job.create!(job_attrs(bad_params))
+    end
+  end
+
+  test "job validation succeeds when no collection uuid in script_parameters" do
+    good_params = {
+      script_parameters: {
+        'arg1' => 'foo',
+        'arg2' => [ 'bar', 'baz' ],
+        'arg3' => {
+          'a' => 1,
+          'b' => [2, 3, 4],
+        }
+      }
+    }
+    job = Job.create!(job_attrs(good_params))
+    assert job.valid?
+  end
+
+  test 'update job uuid tag in internal.git when version changes' do
+    authorize_with :active
+    j = jobs :queued
+    j.update_attributes repository: 'active/foo', script_version: 'b1'
+    assert_equal('1de84a854e2b440dc53bf42f8548afa4c17da332',
+                 internal_tag(j.uuid))
+    j.update_attributes repository: 'active/foo', script_version: 'master'
+    assert_equal('077ba2ad3ea24a929091a9e6ce545c93199b8e57',
+                 internal_tag(j.uuid))
+  end
+
+  test 'script_parameters_digest is independent of key order' do
+    j1 = Job.new(job_attrs(script_parameters: {'a' => 'a', 'ddee' => {'d' => 'd', 'e' => 'e'}}))
+    j2 = Job.new(job_attrs(script_parameters: {'ddee' => {'e' => 'e', 'd' => 'd'}, 'a' => 'a'}))
+    assert j1.valid?
+    assert j2.valid?
+    assert_equal(j1.script_parameters_digest, j2.script_parameters_digest)
+  end
+
+  test 'job fixtures have correct script_parameters_digest' do
+    Job.all.each do |j|
+      d = j.script_parameters_digest
+      assert_equal(j.update_script_parameters_digest, d,
+                   "wrong script_parameters_digest for #{j.uuid}")
+    end
+  end
+
+  test 'deep_sort_hash on array of hashes' do
+    a = {'z' => [[{'a' => 'a', 'b' => 'b'}]]}
+    b = {'z' => [[{'b' => 'b', 'a' => 'a'}]]}
+    assert_equal Job.deep_sort_hash(a).to_json, Job.deep_sort_hash(b).to_json
+  end
+
+  test 'find_reusable without logging' do
+    Rails.logger.expects(:info).never
+    try_find_reusable
+  end
+
+  test 'find_reusable with logging' do
+    Rails.configuration.log_reuse_decisions = true
+    Rails.logger.expects(:info).at_least(3)
+    try_find_reusable
+  end
+
+  def try_find_reusable
+    foobar = jobs(:foobar)
+    example_attrs = {
+      script_version: foobar.script_version,
+      script: foobar.script,
+      script_parameters: foobar.script_parameters,
+      repository: foobar.repository,
+    }
+
+    # Two matching jobs exist with identical outputs. The older one
+    # should be reused.
+    j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
+    assert j
+    assert_equal foobar.uuid, j.uuid
+
+    # Two matching jobs exist with different outputs. Neither should
+    # be reused.
+    Job.where(uuid: jobs(:job_with_latest_version).uuid).
+      update_all(output: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1')
+    assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)])
+
+    # ...unless config says to reuse the earlier job in such cases.
+    Rails.configuration.reuse_job_if_outputs_differ = true
+    j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
+    assert_equal foobar.uuid, j.uuid
+  end
+
+  [
+    true,
+    false,
+  ].each do |cascade|
+    test "cancel job with cascade #{cascade}" do
+      job = Job.find_by_uuid jobs(:running_job_with_components_at_level_1).uuid
+      job.cancel cascade: cascade
+      assert_equal Job::Cancelled, job.state
+
+      descendents = ['zzzzz-8i9sb-jobcomponentsl2',
+                     'zzzzz-d1hrv-picomponentsl02',
+                     'zzzzz-8i9sb-job1atlevel3noc',
+                     'zzzzz-8i9sb-job2atlevel3noc']
+
+      jobs = Job.where(uuid: descendents)
+      jobs.each do |j|
+        assert_equal ('Cancelled' == j.state), cascade
+      end
+
+      pipelines = PipelineInstance.where(uuid: descendents)
+      pipelines.each do |pi|
+        assert_equal ('Paused' == pi.state), cascade
+      end
+    end
+  end
+
+  test 'cancelling a completed job raises error' do
+    job = Job.find_by_uuid jobs(:job_with_latest_version).uuid
+    assert job
+    assert_equal 'Complete', job.state
+
+    assert_raises(ArvadosModel::InvalidStateTransitionError) do
+      job.cancel
+    end
+  end
+
+  test 'cancelling a job with circular relationship with another does not result in an infinite loop' do
+    job = Job.find_by_uuid jobs(:running_job_2_with_circular_component_relationship).uuid
+
+    job.cancel cascade: true
+
+    assert_equal Job::Cancelled, job.state
+
+    child = Job.find_by_uuid job.components.collect{|_, uuid| uuid}[0]
+    assert_equal Job::Cancelled, child.state
+  end
+
+  test 'enable legacy api configuration option = true' do
+    Rails.configuration.enable_legacy_jobs_api = true
+    check_enable_legacy_jobs_api
+    assert_equal [], Rails.configuration.disable_api_methods
+  end
+
+  test 'enable legacy api configuration option = false' do
+    Rails.configuration.enable_legacy_jobs_api = false
+    check_enable_legacy_jobs_api
+    assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+  end
+
+  test 'enable legacy api configuration option = auto, has jobs' do
+    Rails.configuration.enable_legacy_jobs_api = "auto"
+    check_enable_legacy_jobs_api
+    assert_equal [], Rails.configuration.disable_api_methods
+  end
+
+  test 'enable legacy api configuration option = auto, no jobs' do
+    Rails.configuration.enable_legacy_jobs_api = "auto"
+    act_as_system_user do
+      Job.destroy_all
+    end
+    puts "ZZZ #{Job.count}"
+    check_enable_legacy_jobs_api
+    assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+  end
+end
diff --git a/services/api/test/unit/keep_disk_test.rb b/services/api/test/unit/keep_disk_test.rb
new file mode 100644 (file)
index 0000000..04b89cd
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class KeepDiskTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/keep_service_test.rb b/services/api/test/unit/keep_service_test.rb
new file mode 100644 (file)
index 0000000..76e4bf6
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class KeepServiceTest < ActiveSupport::TestCase
+  test "non-admins cannot create services" do
+    set_user_from_auth :active
+    ks = KeepService.new
+    assert_not_allowed do
+      ks.save
+    end
+  end
+
+  test "non-admins cannot update services" do
+    set_user_from_auth :active
+    ks = keep_services(:proxy)
+    ks.service_port = 64434
+    assert_not_allowed do
+      ks.save
+    end
+  end
+
+  test "admins can create services" do
+    set_user_from_auth :admin
+    ks = KeepService.new
+    assert(ks.save, "saving new service failed")
+  end
+
+  test "admins can update services" do
+    set_user_from_auth :admin
+    ks = keep_services(:proxy)
+    ks.service_port = 64434
+    assert(ks.save, "saving updated service failed")
+  end
+end
diff --git a/services/api/test/unit/link_test.rb b/services/api/test/unit/link_test.rb
new file mode 100644 (file)
index 0000000..00f3cc2
--- /dev/null
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class LinkTest < ActiveSupport::TestCase
+  fixtures :all
+
+  setup do
+    set_user_from_auth :admin_trustedclient
+  end
+
+  test "cannot delete an object referenced by unwritable links" do
+    ob = act_as_user users(:active) do
+      Specimen.create
+    end
+    link = act_as_user users(:admin) do
+      Link.create(tail_uuid: users(:active).uuid,
+                  head_uuid: ob.uuid,
+                  link_class: 'test',
+                  name: 'test')
+    end
+    assert_equal users(:admin).uuid, link.owner_uuid
+    assert_raises(ArvadosModel::PermissionDeniedError,
+                  "should not delete #{ob.uuid} with link #{link.uuid}") do
+      act_as_user users(:active) do
+        ob.destroy
+      end
+    end
+    act_as_user users(:admin) do
+      ob.destroy
+    end
+    assert_empty Link.where(uuid: link.uuid)
+  end
+
+  def new_active_link_valid?(link_attrs)
+    set_user_from_auth :active
+    begin
+      Link.
+        create({link_class: "permission",
+                 name: "can_read",
+                 head_uuid: groups(:aproject).uuid,
+               }.merge(link_attrs)).
+        valid?
+    rescue ArvadosModel::PermissionDeniedError
+      false
+    end
+  end
+
+  test "non-admin project owner can make it public" do
+    assert(new_active_link_valid?(tail_uuid: groups(:anonymous_group).uuid),
+           "non-admin project owner can't make their project public")
+  end
+
+  test "link granting permission to nonexistent user is invalid" do
+    refute new_active_link_valid?(tail_uuid:
+                                  users(:active).uuid.sub(/-\w+$/, "-#{'z' * 15}"))
+  end
+
+  test "link granting non-project permission to unreadable user is invalid" do
+    refute new_active_link_valid?(tail_uuid: users(:admin).uuid,
+                                  head_uuid: collections(:bar_file).uuid)
+  end
+
+  test "user can't add a Collection to a Project without permission" do
+    refute new_active_link_valid?(link_class: "name",
+                                  name: "Permission denied test name",
+                                  tail_uuid: collections(:bar_file).uuid)
+  end
+
+  test "user can't add a User to a Project" do
+    # Users *can* give other users permissions to projects.
+    # This test helps ensure that that exception is specific to permissions.
+    refute new_active_link_valid?(link_class: "name",
+                                  name: "Permission denied test name",
+                                  tail_uuid: users(:admin).uuid)
+  end
+
+  test "link granting project permissions to unreadable user is invalid" do
+    refute new_active_link_valid?(tail_uuid: users(:admin).uuid)
+  end
+
+  test "permission link can't exist on past collection versions" do
+    refute new_active_link_valid?(tail_uuid: groups(:public).uuid,
+                                  head_uuid: collections(:w_a_z_file_version_1).uuid)
+  end
+end
diff --git a/services/api/test/unit/log_test.rb b/services/api/test/unit/log_test.rb
new file mode 100644 (file)
index 0000000..5a78f25
--- /dev/null
@@ -0,0 +1,396 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'audit_logs'
+
+class LogTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  EVENT_TEST_METHODS = {
+    :create => [:created_at, :assert_nil, :assert_not_nil],
+    :update => [:modified_at, :assert_not_nil, :assert_not_nil],
+    :delete => [nil, :assert_not_nil, :assert_nil],
+  }
+
+  setup do
+    @start_time = Time.now
+    @log_count = 1
+  end
+
+  def assert_properties(test_method, event, props, *keys)
+    verb = (test_method == :assert_nil) ? 'have nil' : 'define'
+    keys.each do |prop_name|
+      assert_includes(props, prop_name, "log properties missing #{prop_name}")
+      self.send(test_method, props[prop_name],
+                "#{event.to_s} log should #{verb} #{prop_name}")
+    end
+  end
+
+  def get_logs_about(thing)
+    Log.where(object_uuid: thing.uuid).order("created_at ASC").all
+  end
+
+  def assert_logged(thing, event_type)
+    logs = get_logs_about(thing)
+    assert_equal(@log_count, logs.size, "log count mismatch")
+    @log_count += 1
+    log = logs.last
+    props = log.properties
+    assert_equal(current_user.andand.uuid, log.owner_uuid,
+                 "log is not owned by current user")
+    assert_equal(current_user.andand.uuid, log.modified_by_user_uuid,
+                 "log is not 'modified by' current user")
+    assert_equal(current_api_client.andand.uuid, log.modified_by_client_uuid,
+                 "log is not 'modified by' current client")
+    assert_equal(thing.uuid, log.object_uuid, "log UUID mismatch")
+    assert_equal(event_type.to_s, log.event_type, "log event type mismatch")
+    time_method, old_props_test, new_props_test = EVENT_TEST_METHODS[event_type]
+    if time_method.nil? or (timestamp = thing.send(time_method)).nil?
+      assert(log.event_at >= @start_time, "log timestamp too old")
+    else
+      assert_in_delta(timestamp, log.event_at, 1, "log timestamp mismatch")
+    end
+    assert_properties(old_props_test, event_type, props,
+                      'old_etag', 'old_attributes')
+    assert_properties(new_props_test, event_type, props,
+                      'new_etag', 'new_attributes')
+    ['old_attributes', 'new_attributes'].each do |logattr|
+      next if !props[logattr]
+      assert_match /"created_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{9}Z"/, Oj.dump(props, mode: :compat)
+    end
+    yield props if block_given?
+  end
+
+  def assert_logged_with_clean_properties(obj, event_type, excluded_attr)
+    assert_logged(obj, event_type) do |props|
+      ['old_attributes', 'new_attributes'].map do |logattr|
+        attributes = props[logattr]
+        next if attributes.nil?
+        refute_includes(attributes, excluded_attr,
+                        "log #{logattr} includes #{excluded_attr}")
+      end
+      yield props if block_given?
+    end
+  end
+
+  test "creating a user makes a log" do
+    set_user_from_auth :admin_trustedclient
+    u = User.new(first_name: "Log", last_name: "Test")
+    u.save!
+    assert_logged(u, :create) do |props|
+      assert_equal(u.etag, props['new_etag'], "new user etag mismatch")
+      assert_equal(u.first_name, props['new_attributes']['first_name'],
+                   "new user first name mismatch")
+      assert_equal(u.last_name, props['new_attributes']['last_name'],
+                   "new user first name mismatch")
+    end
+  end
+
+  test "updating a virtual machine makes a log" do
+    set_user_from_auth :admin_trustedclient
+    vm = virtual_machines(:testvm)
+    orig_etag = vm.etag
+    vm.hostname = 'testvm.testshell'
+    vm.save!
+    assert_logged(vm, :update) do |props|
+      assert_equal(orig_etag, props['old_etag'], "updated VM old etag mismatch")
+      assert_equal(vm.etag, props['new_etag'], "updated VM new etag mismatch")
+      assert_equal('testvm.shell', props['old_attributes']['hostname'],
+                   "updated VM old name mismatch")
+      assert_equal('testvm.testshell', props['new_attributes']['hostname'],
+                   "updated VM new name mismatch")
+    end
+  end
+
+  test "old_attributes preserves values deep inside a hash" do
+    set_user_from_auth :active
+    it = specimens(:owned_by_active_user)
+    it.properties = {'foo' => {'bar' => ['baz', 'qux', {'quux' => 'bleat'}]}}
+    it.save!
+    @log_count += 1
+    it.properties['foo']['bar'][2]['quux'] = 'blert'
+    it.save!
+    assert_logged it, :update do |props|
+      assert_equal 'bleat', props['old_attributes']['properties']['foo']['bar'][2]['quux']
+      assert_equal 'blert', props['new_attributes']['properties']['foo']['bar'][2]['quux']
+    end
+  end
+
+  test "destroying an authorization makes a log" do
+    set_user_from_auth :admin_trustedclient
+    auth = api_client_authorizations(:spectator)
+    orig_etag = auth.etag
+    orig_attrs = auth.attributes
+    orig_attrs.delete 'api_token'
+    auth.destroy
+    assert_logged(auth, :delete) do |props|
+      assert_equal(orig_etag, props['old_etag'], "destroyed auth etag mismatch")
+      assert_equal(orig_attrs, props['old_attributes'],
+                   "destroyed auth attributes mismatch")
+    end
+  end
+
+  test "saving an unchanged client still makes a log" do
+    set_user_from_auth :admin_trustedclient
+    client = api_clients(:untrusted)
+    client.is_trusted = client.is_trusted
+    client.save!
+    assert_logged(client, :update) do |props|
+      ['old', 'new'].each do |age|
+        assert_equal(client.etag, props["#{age}_etag"],
+                     "unchanged client #{age} etag mismatch")
+        assert_equal(client.attributes, props["#{age}_attributes"],
+                     "unchanged client #{age} attributes mismatch")
+      end
+    end
+  end
+
+  test "updating a group twice makes two logs" do
+    set_user_from_auth :admin_trustedclient
+    group = groups(:empty_lonely_group)
+    name1 = group.name
+    name2 = "#{name1} under test"
+    group.name = name2
+    group.save!
+    assert_logged(group, :update) do |props|
+      assert_equal(name1, props['old_attributes']['name'],
+                   "group start name mismatch")
+      assert_equal(name2, props['new_attributes']['name'],
+                   "group updated name mismatch")
+    end
+    group.name = name1
+    group.save!
+    assert_logged(group, :update) do |props|
+      assert_equal(name2, props['old_attributes']['name'],
+                   "group pre-revert name mismatch")
+      assert_equal(name1, props['new_attributes']['name'],
+                   "group final name mismatch")
+    end
+  end
+
+  test "making a log doesn't get logged" do
+    set_user_from_auth :active_trustedclient
+    log = Log.new
+    log.save!
+    assert_equal(0, get_logs_about(log).size, "made a Log about a Log")
+  end
+
+  test "non-admins can't modify or delete logs" do
+    set_user_from_auth :active_trustedclient
+    log = Log.new(summary: "immutable log test")
+    assert_nothing_raised { log.save! }
+    log.summary = "log mutation test should fail"
+    assert_raise(ArvadosModel::PermissionDeniedError) { log.save! }
+    assert_raise(ArvadosModel::PermissionDeniedError) { log.destroy }
+  end
+
+  test "admins can modify and delete logs" do
+    set_user_from_auth :admin_trustedclient
+    log = Log.new(summary: "admin log mutation test")
+    assert_nothing_raised { log.save! }
+    log.summary = "admin mutated log test"
+    assert_nothing_raised { log.save! }
+    assert_nothing_raised { log.destroy }
+  end
+
+  test "failure saving log causes failure saving object" do
+    Log.class_eval do
+      alias_method :_orig_validations, :perform_validations
+      def perform_validations(options)
+        false
+      end
+    end
+    begin
+      set_user_from_auth :active_trustedclient
+      user = users(:active)
+      user.first_name = 'Test'
+      assert_raise(ActiveRecord::RecordInvalid) { user.save! }
+    ensure
+      Log.class_eval do
+        alias_method :perform_validations, :_orig_validations
+      end
+    end
+  end
+
+  test "don't log changes only to ApiClientAuthorization.last_used_*" do
+    set_user_from_auth :admin_trustedclient
+    auth = api_client_authorizations(:spectator)
+    start_log_count = get_logs_about(auth).size
+    auth.last_used_at = Time.now
+    auth.last_used_by_ip_address = '::1'
+    auth.save!
+    assert_equal(start_log_count, get_logs_about(auth).size,
+                 "log count changed after 'using' ApiClientAuthorization")
+    auth.created_by_ip_address = '::1'
+    auth.save!
+    assert_logged(auth, :update)
+  end
+
+  test "token isn't included in ApiClientAuthorization logs" do
+    set_user_from_auth :admin_trustedclient
+    auth = ApiClientAuthorization.new
+    auth.user = users(:spectator)
+    auth.api_client = api_clients(:untrusted)
+    auth.save!
+    assert_logged_with_clean_properties(auth, :create, 'api_token')
+    auth.expires_at = Time.now
+    auth.save!
+    assert_logged_with_clean_properties(auth, :update, 'api_token')
+    auth.destroy
+    assert_logged_with_clean_properties(auth, :delete, 'api_token')
+  end
+
+  test "use ownership and permission links to determine which logs a user can see" do
+    known_logs = [:noop,
+                  :admin_changes_repository2,
+                  :admin_changes_specimen,
+                  :system_adds_foo_file,
+                  :system_adds_baz,
+                  :log_owned_by_active,
+                  :crunchstat_for_running_job]
+
+    c = Log.readable_by(users(:admin)).order("id asc").each.to_a
+    assert_log_result c, known_logs, known_logs
+
+    c = Log.readable_by(users(:active)).order("id asc").each.to_a
+    assert_log_result c, known_logs, [:admin_changes_repository2, # owned by active
+                                      :system_adds_foo_file,      # readable via link
+                                      :system_adds_baz,           # readable via 'all users' group
+                                      :log_owned_by_active,       # log owned by active
+                                      :crunchstat_for_running_job] # log & job owned by active
+
+    c = Log.readable_by(users(:spectator)).order("id asc").each.to_a
+    assert_log_result c, known_logs, [:noop,                   # object_uuid is spectator
+                                      :admin_changes_specimen, # object_uuid is a specimen owned by spectator
+                                      :system_adds_baz] # readable via 'all users' group
+  end
+
+  def assert_log_result result, known_logs, expected_logs
+    # All of expected_logs must appear in result. Additional logs can
+    # appear too, but only if they are _not_ listed in known_logs
+    # (i.e., we do not make any assertions about logs not mentioned in
+    # either "known" or "expected".)
+    result_ids = result.collect(&:id)
+    expected_logs.each do |want|
+      assert_includes result_ids, logs(want).id
+    end
+    (known_logs - expected_logs).each do |notwant|
+      refute_includes result_ids, logs(notwant).id
+    end
+  end
+
+  test "non-empty configuration.unlogged_attributes" do
+    Rails.configuration.unlogged_attributes = ["manifest_text"]
+    txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+    act_as_system_user do
+      coll = Collection.create(manifest_text: txt)
+      assert_logged_with_clean_properties(coll, :create, 'manifest_text')
+      coll.name = "testing"
+      coll.save!
+      assert_logged_with_clean_properties(coll, :update, 'manifest_text')
+      coll.destroy
+      assert_logged_with_clean_properties(coll, :delete, 'manifest_text')
+    end
+  end
+
+  test "empty configuration.unlogged_attributes" do
+    Rails.configuration.unlogged_attributes = []
+    txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+    act_as_system_user do
+      coll = Collection.create(manifest_text: txt)
+      assert_logged(coll, :create) do |props|
+        assert_equal(txt, props['new_attributes']['manifest_text'])
+      end
+      coll.update_attributes!(name: "testing")
+      assert_logged(coll, :update) do |props|
+        assert_equal(txt, props['old_attributes']['manifest_text'])
+        assert_equal(txt, props['new_attributes']['manifest_text'])
+      end
+      coll.destroy
+      assert_logged(coll, :delete) do |props|
+        assert_equal(txt, props['old_attributes']['manifest_text'])
+      end
+    end
+  end
+
+  def assert_no_logs_deleted
+    logs_before = Log.unscoped.all.count
+    yield
+    assert_equal logs_before, Log.unscoped.all.count
+  end
+
+  def remaining_audit_logs
+    Log.unscoped.where('event_type in (?)', %w(create update destroy delete))
+  end
+
+  # Default settings should not delete anything -- some sites rely on
+  # the original "keep everything forever" behavior.
+  test 'retain old audit logs with default settings' do
+    assert_no_logs_deleted do
+      AuditLogs.delete_old(
+        max_age: Rails.configuration.max_audit_log_age,
+        max_batch: Rails.configuration.max_audit_log_delete_batch)
+    end
+  end
+
+  # Batch size 0 should retain all logs -- even if max_age is very
+  # short, and even if the default settings (and associated test) have
+  # changed.
+  test 'retain old audit logs with max_audit_log_delete_batch=0' do
+    assert_no_logs_deleted do
+      AuditLogs.delete_old(max_age: 1, max_batch: 0)
+    end
+  end
+
+  # We recommend a more conservative age of 5 minutes for production,
+  # but 3 minutes suits our test data better (and is test-worthy in
+  # that it's expected to work correctly in production).
+  test 'delete old audit logs with production settings' do
+    initial_log_count = Log.unscoped.all.count
+    AuditLogs.delete_old(max_age: 180, max_batch: 100000)
+    assert_operator remaining_audit_logs.count, :<, initial_log_count
+  end
+
+  test 'delete all audit logs in multiple batches' do
+    AuditLogs.delete_old(max_age: 0.00001, max_batch: 2)
+    assert_equal [], remaining_audit_logs.collect(&:uuid)
+  end
+
+  test 'delete old audit logs in thread' do
+    begin
+      Rails.configuration.max_audit_log_age = 20
+      Rails.configuration.max_audit_log_delete_batch = 100000
+      Rails.cache.delete 'AuditLogs'
+      initial_log_count = Log.unscoped.all.count + 1
+      act_as_system_user do
+        Log.create!()
+        initial_log_count += 1
+      end
+      deadline = Time.now + 10
+      while remaining_audit_logs.count == initial_log_count
+        if Time.now > deadline
+          raise "timed out"
+        end
+        sleep 0.1
+      end
+      assert_operator remaining_audit_logs.count, :<, initial_log_count
+    ensure
+      # The test framework rolls back our transactions, but that
+      # doesn't undo the deletes we did from separate threads.
+      ActiveRecord::Base.connection.exec_query 'ROLLBACK'
+      Thread.new do
+        begin
+          dc = DatabaseController.new
+          dc.define_singleton_method :render do |*args| end
+          dc.reset
+        ensure
+          ActiveRecord::Base.connection.close
+        end
+      end.join
+    end
+  end
+end
diff --git a/services/api/test/unit/node_test.rb b/services/api/test/unit/node_test.rb
new file mode 100644 (file)
index 0000000..4cb7a0a
--- /dev/null
@@ -0,0 +1,215 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'tmpdir'
+require 'tempfile'
+
+class NodeTest < ActiveSupport::TestCase
+  def ping_node(node_name, ping_data)
+    set_user_from_auth :admin
+    node = nodes(node_name)
+    node.ping({ping_secret: node.info['ping_secret'],
+                ip: node.ip_address}.merge(ping_data))
+    node
+  end
+
+  test "pinging a node can add and update stats" do
+    node = ping_node(:idle, {total_cpu_cores: '12', total_ram_mb: '512'})
+    assert_equal(12, node.properties['total_cpu_cores'])
+    assert_equal(512, node.properties['total_ram_mb'])
+  end
+
+  test "stats disappear if not in a ping" do
+    node = ping_node(:idle, {total_ram_mb: '256'})
+    refute_includes(node.properties, 'total_cpu_cores')
+    assert_equal(256, node.properties['total_ram_mb'])
+  end
+
+  test "worker state is down for node with no slot" do
+    node = nodes(:was_idle_now_down)
+    assert_nil node.slot_number, "fixture is not what I expected"
+    assert_equal 'down', node.crunch_worker_state, "wrong worker state"
+  end
+
+  test "dns_server_conf_template" do
+    Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
+    Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+    conffile = Rails.root.join 'tmp', 'compute65535.conf'
+    File.unlink conffile rescue nil
+    assert Node.dns_server_update 'compute65535', '127.0.0.1'
+    assert_match(/\"1\.0\.0\.127\.in-addr\.arpa\. IN PTR compute65535\.zzzzz\.arvadosapi\.com\"/, IO.read(conffile))
+    File.unlink conffile
+  end
+
+  test "dns_server_restart_command" do
+    Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
+    Rails.configuration.dns_server_reload_command = 'foobar'
+    restartfile = Rails.root.join 'tmp', 'restart.txt'
+    File.unlink restartfile rescue nil
+    assert Node.dns_server_update 'compute65535', '127.0.0.127'
+    assert_equal "foobar\n", IO.read(restartfile)
+    File.unlink restartfile
+  end
+
+  test "dns_server_restart_command fail" do
+    Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp', 'bogusdir'
+    Rails.configuration.dns_server_reload_command = 'foobar'
+    refute Node.dns_server_update 'compute65535', '127.0.0.127'
+  end
+
+  test "dns_server_update_command with valid command" do
+    testfile = Rails.root.join('tmp', 'node_test_dns_server_update_command.txt')
+    Rails.configuration.dns_server_update_command =
+      ('echo -n "%{hostname} == %{ip_address}" >' +
+       testfile.to_s.shellescape)
+    assert Node.dns_server_update 'compute65535', '127.0.0.1'
+    assert_equal 'compute65535 == 127.0.0.1', IO.read(testfile)
+    File.unlink testfile
+  end
+
+  test "dns_server_update_command with failing command" do
+    Rails.configuration.dns_server_update_command = 'false %{hostname}'
+    refute Node.dns_server_update 'compute65535', '127.0.0.1'
+  end
+
+  test "dns update with no commands/dirs configured" do
+    Rails.configuration.dns_server_update_command = false
+    Rails.configuration.dns_server_conf_dir = false
+    Rails.configuration.dns_server_conf_template = 'ignored!'
+    Rails.configuration.dns_server_reload_command = 'ignored!'
+    assert Node.dns_server_update 'compute65535', '127.0.0.127'
+  end
+
+  test "don't leave temp files behind if there's an error writing them" do
+    Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+    Tempfile.any_instance.stubs(:puts).raises(IOError)
+    Dir.mktmpdir do |tmpdir|
+      Rails.configuration.dns_server_conf_dir = tmpdir
+      refute Node.dns_server_update 'compute65535', '127.0.0.127'
+      assert_empty Dir.entries(tmpdir).select{|f| File.file? f}
+    end
+  end
+
+  test "ping new node with no hostname and default config" do
+    node = ping_node(:new_with_no_hostname, {})
+    slot_number = node.slot_number
+    refute_nil slot_number
+    assert_equal("compute#{slot_number}", node.hostname)
+  end
+
+  test "ping new node with no hostname and no config" do
+    Rails.configuration.assign_node_hostname = false
+    node = ping_node(:new_with_no_hostname, {})
+    refute_nil node.slot_number
+    assert_nil node.hostname
+  end
+
+  test "ping new node with zero padding config" do
+    Rails.configuration.assign_node_hostname = 'compute%<slot_number>04d'
+    node = ping_node(:new_with_no_hostname, {})
+    slot_number = node.slot_number
+    refute_nil slot_number
+    assert_equal("compute000#{slot_number}", node.hostname)
+  end
+
+  test "ping node with hostname and config and expect hostname unchanged" do
+    node = ping_node(:new_with_custom_hostname, {})
+    assert_equal(23, node.slot_number)
+    assert_equal("custom1", node.hostname)
+  end
+
+  test "ping node with hostname and no config and expect hostname unchanged" do
+    Rails.configuration.assign_node_hostname = false
+    node = ping_node(:new_with_custom_hostname, {})
+    assert_equal(23, node.slot_number)
+    assert_equal("custom1", node.hostname)
+  end
+
+  # Ping two nodes: one without a hostname and the other with a hostname.
+  # Verify that the first one gets a hostname and second one is unchanged.
+  test "ping two nodes one with no hostname and one with hostname and check hostnames" do
+    # ping node with no hostname and expect it set with config format
+    node = ping_node(:new_with_no_hostname, {})
+    refute_nil node.slot_number
+    assert_equal "compute#{node.slot_number}", node.hostname
+
+    # ping node with a hostname and expect it to be unchanged
+    node2 = ping_node(:new_with_custom_hostname, {})
+    refute_nil node2.slot_number
+    assert_equal "custom1", node2.hostname
+  end
+
+  test "update dns when nodemanager clears hostname and ip_address" do
+    act_as_system_user do
+      node = ping_node(:new_with_custom_hostname, {})
+      Node.expects(:dns_server_update).with(node.hostname, Node::UNUSED_NODE_IP)
+      node.update_attributes(hostname: nil, ip_address: nil)
+    end
+  end
+
+  test "update dns when hostname changes" do
+    act_as_system_user do
+      node = ping_node(:new_with_custom_hostname, {})
+
+      Node.expects(:dns_server_update).with(node.hostname, Node::UNUSED_NODE_IP)
+      Node.expects(:dns_server_update).with('foo0', node.ip_address)
+      node.update_attributes!(hostname: 'foo0')
+
+      Node.expects(:dns_server_update).with('foo0', Node::UNUSED_NODE_IP)
+      node.update_attributes!(hostname: nil, ip_address: nil)
+
+      Node.expects(:dns_server_update).with('foo0', '10.11.12.13')
+      node.update_attributes!(hostname: 'foo0', ip_address: '10.11.12.13')
+
+      Node.expects(:dns_server_update).with('foo0', '10.11.12.14')
+      node.update_attributes!(hostname: 'foo0', ip_address: '10.11.12.14')
+    end
+  end
+
+  test 'newest ping wins IP address conflict' do
+    act_as_system_user do
+      n1, n2 = Node.create!, Node.create!
+
+      n1.ping(ip: '10.5.5.5', ping_secret: n1.info['ping_secret'])
+      n1.reload
+
+      Node.expects(:dns_server_update).with(n1.hostname, Node::UNUSED_NODE_IP)
+      Node.expects(:dns_server_update).with(Not(equals(n1.hostname)), '10.5.5.5')
+      n2.ping(ip: '10.5.5.5', ping_secret: n2.info['ping_secret'])
+
+      n1.reload
+      n2.reload
+      assert_nil n1.ip_address
+      assert_equal '10.5.5.5', n2.ip_address
+
+      Node.expects(:dns_server_update).with(n2.hostname, Node::UNUSED_NODE_IP)
+      Node.expects(:dns_server_update).with(n1.hostname, '10.5.5.5')
+      n1.ping(ip: '10.5.5.5', ping_secret: n1.info['ping_secret'])
+
+      n1.reload
+      n2.reload
+      assert_nil n2.ip_address
+      assert_equal '10.5.5.5', n1.ip_address
+    end
+  end
+
+  test 'run out of slots' do
+    Rails.configuration.max_compute_nodes = 3
+    act_as_system_user do
+      Node.destroy_all
+      (1..4).each do |i|
+        n = Node.create!
+        args = { ip: "10.0.0.#{i}", ping_secret: n.info['ping_secret'] }
+        if i <= Rails.configuration.max_compute_nodes
+          n.ping(args)
+        else
+          assert_raises do
+            n.ping(args)
+          end
+        end
+      end
+    end
+  end
+end
diff --git a/services/api/test/unit/owner_test.rb b/services/api/test/unit/owner_test.rb
new file mode 100644 (file)
index 0000000..528c6d2
--- /dev/null
@@ -0,0 +1,129 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+# Test referential integrity: ensure we cannot leave any object
+# without owners by deleting a user or group.
+#
+# "o" is an owner.
+# "i" is an item.
+
+class OwnerTest < ActiveSupport::TestCase
+  fixtures :users, :groups, :specimens
+
+  setup do
+    set_user_from_auth :admin_trustedclient
+  end
+
+  User.all
+  Group.all
+  [User, Group].each do |o_class|
+    test "create object with legit #{o_class} owner" do
+      o = o_class.create!
+      i = Specimen.create(owner_uuid: o.uuid)
+      assert i.valid?, "new item should pass validation"
+      assert i.uuid, "new item should have an ID"
+      assert Specimen.where(uuid: i.uuid).any?, "new item should really be in DB"
+    end
+
+    test "create object with non-existent #{o_class} owner" do
+      assert_raises(ActiveRecord::RecordInvalid,
+                    "create should fail with random owner_uuid") do
+        Specimen.create!(owner_uuid: o_class.generate_uuid)
+      end
+
+      i = Specimen.create(owner_uuid: o_class.generate_uuid)
+      assert !i.valid?, "object with random owner_uuid should not be valid?"
+
+      i = Specimen.new(owner_uuid: o_class.generate_uuid)
+      assert !i.valid?, "new item should not pass validation"
+      assert !i.uuid, "new item should not have an ID"
+    end
+
+    [User, Group].each do |new_o_class|
+      test "change owner from legit #{o_class} to legit #{new_o_class} owner" do
+        o = o_class.create!
+        i = Specimen.create!(owner_uuid: o.uuid)
+        new_o = new_o_class.create!
+        assert(Specimen.where(uuid: i.uuid).any?,
+               "new item should really be in DB")
+        assert(i.update_attributes(owner_uuid: new_o.uuid),
+               "should change owner_uuid from #{o.uuid} to #{new_o.uuid}")
+      end
+    end
+
+    test "delete #{o_class} that owns nothing" do
+      o = o_class.create!
+      assert(o_class.where(uuid: o.uuid).any?,
+             "new #{o_class} should really be in DB")
+      assert(o.destroy, "should delete #{o_class} that owns nothing")
+      assert_equal(false, o_class.where(uuid: o.uuid).any?,
+                   "#{o.uuid} should not be in DB after deleting")
+    end
+
+    test "change uuid of #{o_class} that owns nothing" do
+      # (we're relying on our admin credentials here)
+      o = o_class.create!
+      assert(o_class.where(uuid: o.uuid).any?,
+             "new #{o_class} should really be in DB")
+      old_uuid = o.uuid
+      new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+      assert(o.update_attributes(uuid: new_uuid),
+             "should change #{o_class} uuid from #{old_uuid} to #{new_uuid}")
+      assert_equal(false, o_class.where(uuid: old_uuid).any?,
+                   "#{old_uuid} should disappear when renamed to #{new_uuid}")
+    end
+  end
+
+  ['users(:active)', 'groups(:aproject)'].each do |ofixt|
+    test "delete #{ofixt} that owns other objects" do
+      o = eval ofixt
+      assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+                   "need something to be owned by #{o.uuid} for this test")
+
+      assert_raises(ActiveRecord::DeleteRestrictionError,
+                    "should not delete #{ofixt} that owns objects") do
+        o.destroy
+      end
+    end
+
+    test "change uuid of #{ofixt} that owns other objects" do
+      o = eval ofixt
+      assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+                   "need something to be owned by #{o.uuid} for this test")
+      new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+      assert(!o.update_attributes(uuid: new_uuid),
+             "should not change uuid of #{ofixt} that owns objects")
+    end
+  end
+
+  test "delete User that owns self" do
+    o = User.create!
+    assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+    assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+                 "setting owner to self should work")
+    assert(o.destroy, "should delete User that owns self")
+    assert_equal(false, User.where(uuid: o.uuid).any?,
+                 "#{o.uuid} should not be in DB after deleting")
+  end
+
+  test "change uuid of User that owns self" do
+    o = User.create!
+    assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+    assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+                 "setting owner to self should work")
+    old_uuid = o.uuid
+    new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+    assert(o.update_attributes(uuid: new_uuid),
+           "should change uuid of User that owns self")
+    assert_equal(false, User.where(uuid: old_uuid).any?,
+                 "#{old_uuid} should not be in DB after deleting")
+    assert_equal(true, User.where(uuid: new_uuid).any?,
+                 "#{new_uuid} should be in DB after renaming")
+    assert_equal(new_uuid, User.where(uuid: new_uuid).first.owner_uuid,
+                 "#{new_uuid} should be its own owner in DB after renaming")
+  end
+
+end
diff --git a/services/api/test/unit/permission_test.rb b/services/api/test/unit/permission_test.rb
new file mode 100644 (file)
index 0000000..275d2a6
--- /dev/null
@@ -0,0 +1,381 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PermissionTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  test "Grant permissions on an object I own" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create
+    assert ob.save
+
+    # Ensure I have permission to manage this group even when its owner changes
+    perm_link = Link.create(tail_uuid: users(:active).uuid,
+                            head_uuid: ob.uuid,
+                            link_class: 'permission',
+                            name: 'can_manage')
+    assert perm_link.save, "should give myself permission on my own object"
+  end
+
+  test "Delete permission links when deleting an object" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_manage')
+    ob_uuid = ob.uuid
+    assert ob.destroy, "Could not destroy object with 1 permission link"
+    assert_empty(Link.where(head_uuid: ob_uuid),
+                 "Permission link was not deleted when object was deleted")
+  end
+
+  test "permission links owned by root" do
+    set_user_from_auth :active_trustedclient
+    ob = Specimen.create!
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_read')
+    assert_equal system_user_uuid, perm_link.owner_uuid
+  end
+
+  test "readable_by" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_read')
+    assert Specimen.readable_by(users(:active)).where(uuid: ob.uuid).any?, "user does not have read permission"
+  end
+
+  test "writable_by" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    assert ob.writable_by.include?(users(:active).uuid), "user does not have write permission"
+  end
+
+  test "writable_by reports requesting user's own uuid for a writable project" do
+    invited_to_write = users(:project_viewer)
+    group = groups(:asubproject)
+
+    # project_view can read, but cannot see write or see writers list
+    set_user_from_auth :project_viewer
+    assert_equal([group.owner_uuid],
+                 group.writable_by,
+                 "writers list should just have owner_uuid")
+
+    # allow project_viewer to write for the remainder of the test
+    set_user_from_auth :admin
+    Link.create!(tail_uuid: invited_to_write.uuid,
+                 head_uuid: group.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    group.permissions.reload
+
+    # project_viewer should see self in writers list (but not all writers)
+    set_user_from_auth :project_viewer
+    assert_not_nil(group.writable_by,
+                    "can write but cannot see writers list")
+    assert_includes(group.writable_by, invited_to_write.uuid,
+                    "self missing from writers list")
+    assert_includes(group.writable_by, group.owner_uuid,
+                    "project owner missing from writers list")
+    refute_includes(group.writable_by, users(:active).uuid,
+                    "saw :active user in writers list")
+
+    # active user should see full writers list
+    set_user_from_auth :active
+    assert_includes(group.writable_by, invited_to_write.uuid,
+                    "permission just added, but missing from writers list")
+
+    # allow project_viewer to manage for the remainder of the test
+    set_user_from_auth :admin
+    Link.create!(tail_uuid: invited_to_write.uuid,
+                 head_uuid: group.uuid,
+                 link_class: 'permission',
+                 name: 'can_manage')
+    # invite another writer we can test for
+    Link.create!(tail_uuid: users(:spectator).uuid,
+                 head_uuid: group.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    group.permissions.reload
+
+    set_user_from_auth :project_viewer
+    assert_not_nil(group.writable_by,
+                    "can manage but cannot see writers list")
+    assert_includes(group.writable_by, users(:spectator).uuid,
+                    ":spectator missing from writers list")
+  end
+
+  test "user owns group, group can_manage object's group, user can add permissions" do
+    set_user_from_auth :admin
+
+    owner_grp = Group.create!(owner_uuid: users(:active).uuid)
+
+    sp_grp = Group.create!
+    sp = Specimen.create!(owner_uuid: sp_grp.uuid)
+
+    Link.create!(link_class: 'permission',
+                 name: 'can_manage',
+                 tail_uuid: owner_grp.uuid,
+                 head_uuid: sp_grp.uuid)
+
+    # active user owns owner_grp, which has can_manage permission on sp_grp
+    # user should be able to add permissions on sp.
+    set_user_from_auth :active_trustedclient
+    test_perm = Link.create(tail_uuid: users(:active).uuid,
+                            head_uuid: sp.uuid,
+                            link_class: 'permission',
+                            name: 'can_write')
+    assert test_perm.save, "could not save new permission on target object"
+    assert test_perm.destroy, "could not delete new permission on target object"
+  end
+
+  # bug #3091
+  skip "can_manage permission on a non-group object" do
+    set_user_from_auth :admin
+
+    ob = Specimen.create!
+    # grant can_manage permission to active
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_manage')
+    # ob is owned by :admin, the link is owned by root
+    assert_equal users(:admin).uuid, ob.owner_uuid
+    assert_equal system_user_uuid, perm_link.owner_uuid
+
+    # user "active" can modify the permission link
+    set_user_from_auth :active_trustedclient
+    perm_link.properties["foo"] = 'bar'
+    assert perm_link.save, "could not save modified link"
+
+    assert_equal 'bar', perm_link.properties['foo'], "link properties do not include foo = bar"
+  end
+
+  test "user without can_manage permission may not modify permission link" do
+    set_user_from_auth :admin
+
+    ob = Specimen.create!
+    # grant can_manage permission to active
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_read')
+    # ob is owned by :admin, the link is owned by root
+    assert_equal ob.owner_uuid, users(:admin).uuid
+    assert_equal perm_link.owner_uuid, system_user_uuid
+
+    # user "active" may not modify the permission link
+    set_user_from_auth :active_trustedclient
+    perm_link.name = 'can_manage'
+    assert_raises ArvadosModel::PermissionDeniedError do
+      perm_link.save
+    end
+  end
+
+  test "manager user gets permission to minions' articles via can_manage link" do
+    manager = create :active_user, first_name: "Manage", last_name: "Er"
+    minion = create :active_user, first_name: "Min", last_name: "Ion"
+    minions_specimen = act_as_user minion do
+      Specimen.create!
+    end
+    # Manager creates a group. (Make sure it doesn't magically give
+    # anyone any additional permissions.)
+    g = nil
+    act_as_user manager do
+      g = create :group, name: "NoBigSecret Lab"
+      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                   "saw a user I shouldn't see")
+      assert_raises(ArvadosModel::PermissionDeniedError,
+                    ActiveRecord::RecordInvalid,
+                    "gave can_read permission to a user I shouldn't see") do
+        create(:permission_link,
+               name: 'can_read', tail_uuid: minion.uuid, head_uuid: g.uuid)
+      end
+      %w(can_manage can_write can_read).each do |perm_type|
+        assert_raises(ArvadosModel::PermissionDeniedError,
+                      ActiveRecord::RecordInvalid,
+                      "escalated privileges") do
+          create(:permission_link,
+                 name: perm_type, tail_uuid: g.uuid, head_uuid: minion.uuid)
+        end
+      end
+      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                   "manager saw minion too soon")
+      assert_empty(User.readable_by(minion).where(uuid: manager.uuid),
+                   "minion saw manager too soon")
+      assert_empty(Group.readable_by(minion).where(uuid: g.uuid),
+                   "minion saw manager's new NoBigSecret Lab group too soon")
+
+      # Manager declares everybody on the system should be able to see
+      # the NoBigSecret Lab group.
+      create(:permission_link,
+             name: 'can_read',
+             tail_uuid: 'zzzzz-j7d0g-fffffffffffffff',
+             head_uuid: g.uuid)
+      # ...but nobody has joined the group yet. Manager still can't see
+      # minion.
+      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                   "manager saw minion too soon")
+    end
+
+    act_as_user minion do
+      # Minion can see the group.
+      assert_not_empty(Group.readable_by(minion).where(uuid: g.uuid),
+                       "minion could not see the NoBigSecret Lab group")
+      # Minion joins the group.
+      create(:permission_link,
+             name: 'can_read',
+             tail_uuid: g.uuid,
+             head_uuid: minion.uuid)
+    end
+
+    act_as_user manager do
+      # Now, manager can see minion.
+      assert_not_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                       "manager could not see minion")
+      # But cannot obtain further privileges this way.
+      assert_raises(ArvadosModel::PermissionDeniedError,
+                    "escalated privileges") do
+        create(:permission_link,
+               name: 'can_manage', tail_uuid: manager.uuid, head_uuid: minion.uuid)
+      end
+      assert_empty(Specimen
+                     .readable_by(manager)
+                     .where(uuid: minions_specimen.uuid),
+                   "manager saw the minion's private stuff")
+      assert_raises(ArvadosModel::PermissionDeniedError,
+                   "manager could update minion's private stuff") do
+        minions_specimen.update_attributes(properties: {'x' => 'y'})
+      end
+    end
+
+    act_as_system_user do
+      # Root can give Manager more privileges over Minion.
+      create(:permission_link,
+             name: 'can_manage', tail_uuid: g.uuid, head_uuid: minion.uuid)
+    end
+
+    act_as_user manager do
+      # Now, manager can read and write Minion's stuff.
+      assert_not_empty(Specimen
+                         .readable_by(manager)
+                         .where(uuid: minions_specimen.uuid),
+                       "manager could not find minion's specimen by uuid")
+      assert_equal(true,
+                   minions_specimen.update_attributes(properties: {'x' => 'y'}),
+                   "manager could not update minion's specimen object")
+    end
+  end
+
+  test "users with bidirectional read permission in group can see each other, but cannot see each other's private articles" do
+    a = create :active_user, first_name: "A"
+    b = create :active_user, first_name: "B"
+    other = create :active_user, first_name: "OTHER"
+    act_as_system_user do
+      g = create :group
+      [a,b].each do |u|
+        create(:permission_link,
+               name: 'can_read', tail_uuid: u.uuid, head_uuid: g.uuid)
+        create(:permission_link,
+               name: 'can_read', head_uuid: u.uuid, tail_uuid: g.uuid)
+      end
+    end
+    a_specimen = act_as_user a do
+      Specimen.create!
+    end
+    assert_not_empty(Specimen.readable_by(a).where(uuid: a_specimen.uuid),
+                     "A cannot read own Specimen, following test probably useless.")
+    assert_empty(Specimen.readable_by(b).where(uuid: a_specimen.uuid),
+                 "B can read A's Specimen")
+    [a,b].each do |u|
+      assert_empty(User.readable_by(u).where(uuid: other.uuid),
+                   "#{u.first_name} can see OTHER in the user list")
+      assert_empty(User.readable_by(other).where(uuid: u.uuid),
+                   "OTHER can see #{u.first_name} in the user list")
+      act_as_user u do
+        assert_raises ArvadosModel::PermissionDeniedError, "wrote without perm" do
+          other.update_attributes!(prefs: {'pwned' => true})
+        end
+        assert_equal(true, u.update_attributes!(prefs: {'thisisme' => true}),
+                     "#{u.first_name} can't update its own prefs")
+      end
+      act_as_user other do
+        assert_raises(ArvadosModel::PermissionDeniedError,
+                        "OTHER wrote #{u.first_name} without perm") do
+          u.update_attributes!(prefs: {'pwned' => true})
+        end
+        assert_equal(true, other.update_attributes!(prefs: {'thisisme' => true}),
+                     "OTHER can't update its own prefs")
+      end
+    end
+  end
+
+  test "cannot create with owner = unwritable user" do
+    set_user_from_auth :rominiadmin
+    assert_raises ArvadosModel::PermissionDeniedError, "created with owner = unwritable user" do
+      Specimen.create!(owner_uuid: users(:active).uuid)
+    end
+  end
+
+  test "cannot change owner to unwritable user" do
+    set_user_from_auth :rominiadmin
+    ob = Specimen.create!
+    assert_raises ArvadosModel::PermissionDeniedError, "changed owner to unwritable user" do
+      ob.update_attributes!(owner_uuid: users(:active).uuid)
+    end
+  end
+
+  test "cannot create with owner = unwritable group" do
+    set_user_from_auth :rominiadmin
+    assert_raises ArvadosModel::PermissionDeniedError, "created with owner = unwritable group" do
+      Specimen.create!(owner_uuid: groups(:aproject).uuid)
+    end
+  end
+
+  test "cannot change owner to unwritable group" do
+    set_user_from_auth :rominiadmin
+    ob = Specimen.create!
+    assert_raises ArvadosModel::PermissionDeniedError, "changed owner to unwritable group" do
+      ob.update_attributes!(owner_uuid: groups(:aproject).uuid)
+    end
+  end
+
+  def container_logs(container, user)
+    Log.readable_by(users(user)).
+      where(object_uuid: containers(container).uuid, event_type: "test")
+  end
+
+  test "container logs created by dispatch are visible to container requestor" do
+    set_user_from_auth :dispatch1
+    Log.create!(object_uuid: containers(:running).uuid,
+                event_type: "test")
+
+    assert_not_empty container_logs(:running, :admin)
+    assert_not_empty container_logs(:running, :active)
+    assert_empty container_logs(:running, :spectator)
+  end
+
+  test "container logs created by dispatch are public if container request is public" do
+    set_user_from_auth :dispatch1
+    Log.create!(object_uuid: containers(:running_older).uuid,
+                event_type: "test")
+
+    assert_not_empty container_logs(:running_older, :anonymous)
+  end
+end
diff --git a/services/api/test/unit/pipeline_instance_test.rb b/services/api/test/unit/pipeline_instance_test.rb
new file mode 100644 (file)
index 0000000..8197dee
--- /dev/null
@@ -0,0 +1,124 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineInstanceTest < ActiveSupport::TestCase
+
+  test "check active and success for a pipeline in new state" do
+    pi = pipeline_instances :new_pipeline
+
+    assert_equal 'New', pi.state, 'expected state to be New for :new_pipeline'
+
+    # save the pipeline and expect state to be New
+    Thread.current[:user] = users(:admin)
+
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::New, pi.state, 'expected state to be New for new pipeline'
+  end
+
+  test "check active and success for a newly created pipeline" do
+    set_user_from_auth :active
+
+    pi = PipelineInstance.create(state: 'Ready')
+    pi.save
+
+    assert pi.valid?, 'expected newly created empty pipeline to be valid ' + pi.errors.messages.to_s
+    assert_equal 'Ready', pi.state, 'expected state to be Ready for a new empty pipeline'
+  end
+
+  test "update attributes for pipeline" do
+    Thread.current[:user] = users(:admin)
+
+    pi = pipeline_instances :new_pipeline
+
+    # add a component with no input and expect state to be New
+    component = {'script_parameters' => {"input_not_provided" => {"required" => true}}}
+    pi.components['first'] = component
+    components = pi.components
+    pi.update_attribute 'components', pi.components
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::New, pi.state, 'expected state to be New after adding component with input'
+    assert_equal pi.components.size, 1, 'expected one component'
+    assert_nil pi.started_at, 'expected started_at to be nil on new pipeline instance'
+    assert_nil pi.finished_at, 'expected finished_at to be nil on new pipeline instance'
+
+    # add a component with no input not required
+    component = {'script_parameters' => {"input_not_provided" => {"required" => false}}}
+    pi.components['first'] = component
+    components = pi.components
+    pi.update_attribute 'components', pi.components
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Ready, pi.state, 'expected state to be Ready after adding component with input'
+    assert_equal pi.components.size, 1, 'expected one component'
+
+    # add a component with input and expect state to become Ready
+    component = {'script_parameters' => {"input" => "yyyad4b39ca5a924e481008009d94e32+210"}}
+    pi.components['first'] = component
+    components = pi.components
+    pi.update_attribute 'components', pi.components
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Ready, pi.state, 'expected state to be Ready after adding component with input'
+    assert_equal pi.components.size, 1, 'expected one component'
+
+    pi.state = PipelineInstance::RunningOnServer
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::RunningOnServer, pi.state, 'expected state to be RunningOnServer after updating state to RunningOnServer'
+    assert_not_nil pi.started_at, 'expected started_at to have a value on a running pipeline instance'
+    assert_nil pi.finished_at, 'expected finished_at to be nil on a running pipeline instance'
+
+    pi.state = PipelineInstance::Paused
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Paused, pi.state, 'expected state to be Paused after updating state to Paused'
+
+    pi.state = PipelineInstance::Complete
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Complete, pi.state, 'expected state to be Complete after updating state to Complete'
+    assert_not_nil pi.started_at, 'expected started_at to have a value on a completed pipeline instance'
+    assert_not_nil pi.finished_at, 'expected finished_at to have a value on a completed pipeline instance'
+
+    pi.state = 'bogus'
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Complete, pi.state, 'expected state to be unchanged with set to a bogus value'
+
+    pi.state = PipelineInstance::Failed
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Failed, pi.state, 'expected state to be Failed after updating state to Failed'
+    assert_not_nil pi.started_at, 'expected started_at to have a value on a failed pipeline instance'
+    assert_not_nil pi.finished_at, 'expected finished_at to have a value on a failed pipeline instance'
+  end
+
+  test "update attributes for pipeline with two components" do
+    pi = pipeline_instances :new_pipeline
+
+    # add two components, one with input and one with no input and expect state to be New
+    component1 = {'script_parameters' => {"something" => "xxxad4b39ca5a924e481008009d94e32+210", "input" => "c1bad4b39ca5a924e481008009d94e32+210"}}
+    component2 = {'script_parameters' => {"something_else" => "xxxad4b39ca5a924e481008009d94e32+210", "input_missing" => {"required" => true}}}
+    pi.components['first'] = component1
+    pi.components['second'] = component2
+
+    Thread.current[:user] = users(:admin)
+    pi.update_attribute 'components', pi.components
+
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::New, pi.state, 'expected state to be New after adding component with input'
+    assert_equal pi.components.size, 2, 'expected two components'
+  end
+
+  [:has_component_with_no_script_parameters,
+   :has_component_with_empty_script_parameters].each do |pi_name|
+    test "update pipeline that #{pi_name}" do
+      pi = pipeline_instances pi_name
+
+      Thread.current[:user] = users(:active)
+      assert_equal PipelineInstance::Ready, pi.state
+    end
+  end
+end
diff --git a/services/api/test/unit/pipeline_template_test.rb b/services/api/test/unit/pipeline_template_test.rb
new file mode 100644 (file)
index 0000000..8ead613
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class PipelineTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/repository_test.rb b/services/api/test/unit/repository_test.rb
new file mode 100644 (file)
index 0000000..fa4c37f
--- /dev/null
@@ -0,0 +1,283 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class RepositoryTest < ActiveSupport::TestCase
+  include GitTestHelper
+
+  def new_repo(owner_key, attrs={})
+    set_user_from_auth owner_key
+    owner = users(owner_key)
+    Repository.new({owner_uuid: owner.uuid}.merge(attrs))
+  end
+
+  def changed_repo(repo_key, changes)
+    repo = repositories(repo_key)
+    changes.each_pair { |attr, value| repo.send("#{attr}=".to_sym, value) }
+    repo
+  end
+
+  def default_git_url(repo_name, user_name=nil)
+    if user_name
+      "git@git.%s.arvadosapi.com:%s/%s.git" %
+        [Rails.configuration.uuid_prefix, user_name, repo_name]
+    else
+      "git@git.%s.arvadosapi.com:%s.git" %
+        [Rails.configuration.uuid_prefix, repo_name]
+    end
+  end
+
+  def assert_server_path(path_tail, repo_sym)
+    assert_equal(File.join(Rails.configuration.git_repositories_dir, path_tail),
+                 repositories(repo_sym).server_path)
+  end
+
+  ### name validation
+
+  {active: "active/", admin: "admin/", system_user: ""}.
+      each_pair do |user_sym, name_prefix|
+    test "valid names for #{user_sym} repo" do
+      %w(a aa a0 aA Aa AA A0).each do |name|
+        repo = new_repo(user_sym, name: name_prefix + name)
+        assert(repo.valid?)
+      end
+    end
+
+    test "name is required for #{user_sym} repo" do
+      refute(new_repo(user_sym).valid?)
+    end
+
+    test "repo name beginning with numeral is invalid for #{user_sym}" do
+      repo = new_repo(user_sym, name: "#{name_prefix}0a")
+      refute(repo.valid?)
+    end
+
+    test "name containing bad char is invalid for #{user_sym}" do
+      "\\.-_/!@#$%^&*()[]{}".each_char do |bad_char|
+        repo = new_repo(user_sym, name: "#{name_prefix}bad#{bad_char}reponame")
+        refute(repo.valid?)
+      end
+    end
+  end
+
+  test "admin can create valid repo for other user with correct name prefix" do
+    owner = users(:active)
+    repo = new_repo(:admin, name: "#{owner.username}/validnametest",
+                    owner_uuid: owner.uuid)
+    assert(repo.valid?)
+  end
+
+  test "admin can create valid system repo without name prefix" do
+    repo = new_repo(:admin, name: "validnametest",
+                    owner_uuid: users(:system_user).uuid)
+    assert(repo.valid?)
+  end
+
+  test "repo name prefix must match owner_uuid username" do
+    repo = new_repo(:admin, name: "admin/badusernametest",
+                    owner_uuid: users(:active).uuid)
+    refute(repo.valid?)
+  end
+
+  test "repo name prefix must be empty for system repo" do
+    repo = new_repo(:admin, name: "root/badprefixtest",
+                    owner_uuid: users(:system_user).uuid)
+    refute(repo.valid?)
+  end
+
+  ### owner validation
+
+  test "name must be unique per user" do
+    repo = new_repo(:active, name: repositories(:foo).name)
+    refute(repo.valid?)
+  end
+
+  test "name can be duplicated across users" do
+    repo = new_repo(:active, name: "active/#{repositories(:arvados).name}")
+    assert(repo.valid?)
+  end
+
+  test "repository cannot be owned by a group" do
+    set_user_from_auth :active
+    repo = Repository.new(owner_uuid: groups(:all_users).uuid,
+                          name: "ownedbygroup")
+    refute(repo.valid?)
+    refute_empty(repo.errors[:owner_uuid] || [])
+  end
+
+  ### URL generation
+
+  test "fetch_url" do
+    repo = new_repo(:active, name: "active/fetchtest")
+    repo.save
+    assert_equal(default_git_url("fetchtest", "active"), repo.fetch_url)
+  end
+
+  test "fetch_url owned by system user" do
+    set_user_from_auth :admin
+    repo = Repository.new(owner_uuid: users(:system_user).uuid,
+                          name: "fetchtest")
+    repo.save
+    assert_equal(default_git_url("fetchtest"), repo.fetch_url)
+  end
+
+  test "push_url" do
+    repo = new_repo(:active, name: "active/pushtest")
+    repo.save
+    assert_equal(default_git_url("pushtest", "active"), repo.push_url)
+  end
+
+  test "push_url owned by system user" do
+    set_user_from_auth :admin
+    repo = Repository.new(owner_uuid: users(:system_user).uuid,
+                          name: "pushtest")
+    repo.save
+    assert_equal(default_git_url("pushtest"), repo.push_url)
+  end
+
+  ### Path generation
+
+  test "disk path stored by UUID" do
+    assert_server_path("zzzzz-s0uqq-382brsig8rp3666/.git", :foo)
+  end
+
+  test "disk path stored by name" do
+    assert_server_path("arvados/.git", :arvados)
+  end
+
+  test "disk path for repository not on disk" do
+    assert_nil(Repository.new.server_path)
+  end
+
+  ### Repository creation
+
+  test "non-admin can create a repository for themselves" do
+    repo = new_repo(:active, name: "active/newtestrepo")
+    assert(repo.save)
+  end
+
+  test "non-admin can't create a repository for another visible user" do
+    repo = new_repo(:active, name: "repoforanon",
+                    owner_uuid: users(:anonymous).uuid)
+    assert_not_allowed { repo.save }
+  end
+
+  test "admin can create a repository for themselves" do
+    repo = new_repo(:admin, name: "admin/newtestrepo")
+    assert(repo.save)
+  end
+
+  test "admin can create a repository for others" do
+    repo = new_repo(:admin, name: "active/repoforactive",
+                    owner_uuid: users(:active).uuid)
+    assert(repo.save)
+  end
+
+  test "admin can create a system repository" do
+    repo = new_repo(:admin, name: "repoforsystem",
+                    owner_uuid: users(:system_user).uuid)
+    assert(repo.save)
+  end
+
+  ### Repository destruction
+
+  test "non-admin can destroy their own repository" do
+    set_user_from_auth :active
+    assert(repositories(:foo).destroy)
+  end
+
+  test "non-admin can't destroy others' repository" do
+    set_user_from_auth :active
+    assert_not_allowed { repositories(:repository3).destroy }
+  end
+
+  test "non-admin can't destroy system repository" do
+    set_user_from_auth :active
+    assert_not_allowed { repositories(:arvados).destroy }
+  end
+
+  test "admin can destroy their own repository" do
+    set_user_from_auth :admin
+    assert(repositories(:repository3).destroy)
+  end
+
+  test "admin can destroy others' repository" do
+    set_user_from_auth :admin
+    assert(repositories(:foo).destroy)
+  end
+
+  test "admin can destroy system repository" do
+    set_user_from_auth :admin
+    assert(repositories(:arvados).destroy)
+  end
+
+  ### Changing ownership
+
+  test "non-admin can't make their repository a system repository" do
+    set_user_from_auth :active
+    repo = changed_repo(:foo, owner_uuid: users(:system_user).uuid)
+    assert_not_allowed { repo.save }
+  end
+
+  test "admin can give their repository to someone else" do
+    set_user_from_auth :admin
+    repo = changed_repo(:repository3, owner_uuid: users(:active).uuid,
+                        name: "active/foo3")
+    assert(repo.save)
+  end
+
+  test "admin can make their repository a system repository" do
+    set_user_from_auth :admin
+    repo = changed_repo(:repository3, owner_uuid: users(:system_user).uuid,
+                        name: "foo3")
+    assert(repo.save)
+  end
+
+  test 'write permission allows changing modified_at' do
+    act_as_user users(:active) do
+      r = repositories(:foo)
+      modtime_was = r.modified_at
+      r.modified_at = Time.now
+      assert r.save
+      assert_operator modtime_was, :<, r.modified_at
+    end
+  end
+
+  test 'write permission necessary for changing modified_at' do
+    act_as_user users(:spectator) do
+      r = repositories(:foo)
+      modtime_was = r.modified_at
+      r.modified_at = Time.now
+      assert_raises ArvadosModel::PermissionDeniedError do
+        r.save!
+      end
+      r.reload
+      assert_equal modtime_was, r.modified_at
+    end
+  end
+
+  ### Renaming
+
+  test "non-admin can rename own repo" do
+    act_as_user users(:active) do
+      assert repositories(:foo).update_attributes(name: 'active/foo12345')
+    end
+  end
+
+  test "top level repo can be touched by non-admin with can_manage" do
+    add_permission_link users(:active), repositories(:arvados), 'can_manage'
+    act_as_user users(:active) do
+      assert changed_repo(:arvados, modified_at: Time.now).save
+    end
+  end
+
+  test "top level repo cannot be renamed by non-admin with can_manage" do
+    add_permission_link users(:active), repositories(:arvados), 'can_manage'
+    act_as_user users(:active) do
+      assert_not_allowed { changed_repo(:arvados, name: 'xarvados').save }
+    end
+  end
+end
diff --git a/services/api/test/unit/salvage_collection_test.rb b/services/api/test/unit/salvage_collection_test.rb
new file mode 100644 (file)
index 0000000..89d97f6
--- /dev/null
@@ -0,0 +1,169 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'salvage_collection'
+require 'shellwords'
+
+# Valid manifest_text
+TEST_MANIFEST = ". 341dabea2bd78ad0d6fc3f5b926b450e+85626+Ad391622a17f61e4a254eda85d1ca751c4f368da9@55e076ce 0:85626:brca2-hg19.fa\n. d7321a918923627c972d8f8080c07d29+82570+A22e0a1d9b9bc85c848379d98bedc64238b0b1532@55e076ce 0:82570:brca1-hg19.fa\n"
+TEST_MANIFEST_STRIPPED = ". 341dabea2bd78ad0d6fc3f5b926b450e+85626 0:85626:brca2-hg19.fa\n. d7321a918923627c972d8f8080c07d29+82570 0:82570:brca1-hg19.fa\n"
+
+# This invalid manifest_text has the following flaws:
+#   Missing stream name with locator in it's place
+#   Invalid locators:
+#     foofaafaafaabd78ad0d6fc3f5b926b450e+foo
+#     bar-baabaabaabd78ad0d6fc3f5b926b450e
+#     bad12345dae58ad0d6fc3f5b926b450e+
+#     341dabea2bd78ad0d6fc3f5b926b450e+abc
+#     341dabea2bd78ad0d6fc3f5b926abcdf
+# Expectation: All these locators are preserved in salvaged_data
+BAD_MANIFEST = "faafaafaabd78ad0d6fc3f5b926b450e+foo bar-baabaabaabd78ad0d6fc3f5b926b450e_bad12345dae58ad0d6fc3f5b926b450e+ 341dabea2bd78ad0d6fc3f5b926b450e+abc 341dabea2bd78ad0d6fc3f5b926abcdf 0:85626:brca2-hg19.fa\n. abcdabea2bd78ad0d6fc3f5b926b450e+1000 0:1000:brca-hg19.fa\n. d7321a918923627c972d8f8080c07d29+2000+A22e0a1d9b9bc85c848379d98bedc64238b0b1532@55e076ce 0:2000:brca1-hg19.fa\n"
+
+class SalvageCollectionTest < ActiveSupport::TestCase
+  include SalvageCollection
+
+  setup do
+    set_user_from_auth :admin
+    # arv-put needs ARV env variables
+    ENV['ARVADOS_API_HOST'] = 'unused_by_test'
+    ENV['ARVADOS_API_TOKEN'] = 'unused_by_test'
+    @backtick_mock_failure = false
+  end
+
+  teardown do
+    ENV['ARVADOS_API_HOST'] = ''
+    ENV['ARVADOS_API_TOKEN'] = ''
+  end
+
+  def ` cmd # mock Kernel `
+    assert_equal 'arv-put', cmd.shellsplit[0]
+    if @backtick_mock_failure
+      # run a process so $? indicates failure
+      return super 'false'
+    end
+    # run a process so $? indicates success
+    super 'true'
+    file_contents = File.open(cmd.shellsplit[-1], "r").read
+    ". " +
+      Digest::MD5.hexdigest(file_contents) + "+" + file_contents.length.to_s +
+      " 0:" + file_contents.length.to_s + ":invalid_manifest_text.txt\n"
+  end
+
+  test "salvage test collection with valid manifest text" do
+    # create a collection to test salvaging
+    src_collection = Collection.new name: "test collection", manifest_text: TEST_MANIFEST
+    src_collection.save!
+
+    # salvage this collection
+    salvage_collection src_collection.uuid, 'test salvage collection - see #6277, #6859'
+
+    # verify the updated src_collection data
+    updated_src_collection = Collection.find_by_uuid src_collection.uuid
+    updated_name = updated_src_collection.name
+    assert_equal true, updated_name.include?(src_collection.name)
+
+    match = updated_name.match(/^test collection.*salvaged data at (.*)\)$/)
+    assert_not_nil match
+    assert_not_nil match[1]
+    assert_empty updated_src_collection.manifest_text
+
+    # match[1] is the uuid of the new collection created from src_collection's salvaged data
+    # use this to get the new collection and verify
+    new_collection = Collection.find_by_uuid match[1]
+    match = new_collection.name.match(/^salvaged from (.*),.*/)
+    assert_not_nil match
+    assert_equal src_collection.uuid, match[1]
+
+    # verify the new collection's manifest format
+    expected_manifest = ". " + Digest::MD5.hexdigest(TEST_MANIFEST_STRIPPED) + "+" +
+      TEST_MANIFEST_STRIPPED.length.to_s + " 0:" + TEST_MANIFEST_STRIPPED.length.to_s +
+      ":invalid_manifest_text.txt\n. 341dabea2bd78ad0d6fc3f5b926b450e+85626 d7321a918923627c972d8f8080c07d29+82570 0:168196:salvaged_data\n"
+    assert_equal expected_manifest, new_collection.manifest_text
+  end
+
+  test "salvage collection with no uuid required argument" do
+    assert_raises RuntimeError do
+      salvage_collection nil
+    end
+  end
+
+  test "salvage collection with bogus uuid" do
+    e = assert_raises RuntimeError do
+      salvage_collection 'bogus-uuid'
+    end
+    assert_equal "No collection found for bogus-uuid.", e.message
+  end
+
+  test "salvage collection with no env ARVADOS_API_HOST" do
+    e = assert_raises RuntimeError do
+      ENV['ARVADOS_API_HOST'] = ''
+      ENV['ARVADOS_API_TOKEN'] = ''
+      salvage_collection collections('user_agreement').uuid
+    end
+    assert_equal "ARVADOS environment variables missing. Please set your admin user credentials as ARVADOS environment variables.", e.message
+  end
+
+  test "salvage collection with error during arv-put" do
+    # try to salvage collection while mimicking error during arv-put
+    @backtick_mock_failure = true
+    e = assert_raises RuntimeError do
+      salvage_collection collections('user_agreement').uuid
+    end
+    assert_match(/Error during arv-put: pid \d+ exit \d+ \(cmd was \"arv-put .*\"\)/, e.message)
+  end
+
+  # This test uses BAD_MANIFEST, which has the following flaws:
+  #   Missing stream name with locator in it's place
+  #   Invalid locators:
+  #     foo-faafaafaabd78ad0d6fc3f5b926b450e+foo
+  #     bar-baabaabaabd78ad0d6fc3f5b926b450e
+  #     bad12345dae58ad0d6fc3f5b926b450e+
+  #     341dabea2bd78ad0d6fc3f5b926b450e+abc
+  #     341dabea2bd78ad0d6fc3f5b926abcdf
+  # Expectation: All these locators are preserved in salvaged_data
+  test "invalid locators preserved during salvaging" do
+    locator_data = salvage_collection_locator_data BAD_MANIFEST
+    assert_equal \
+    ["faafaafaabd78ad0d6fc3f5b926b450e",
+     "baabaabaabd78ad0d6fc3f5b926b450e",
+     "bad12345dae58ad0d6fc3f5b926b450e",
+     "341dabea2bd78ad0d6fc3f5b926b450e",
+     "341dabea2bd78ad0d6fc3f5b926abcdf",
+     "abcdabea2bd78ad0d6fc3f5b926b450e+1000",
+     "d7321a918923627c972d8f8080c07d29+2000",
+    ], locator_data[0]
+    assert_equal 1000+2000, locator_data[1]
+  end
+
+  test "salvage a collection with invalid manifest text" do
+    # create a collection to test salvaging
+    src_collection = Collection.new name: "test collection", manifest_text: BAD_MANIFEST, owner_uuid: 'zzzzz-tpzed-000000000000000'
+    src_collection.save!(validate: false)
+
+    # salvage this collection
+    salvage_collection src_collection.uuid, 'test salvage collection - see #6277, #6859'
+
+    # verify the updated src_collection data
+    updated_src_collection = Collection.find_by_uuid src_collection.uuid
+    updated_name = updated_src_collection.name
+    assert_equal true, updated_name.include?(src_collection.name)
+
+    match = updated_name.match(/^test collection.*salvaged data at (.*)\)$/)
+    assert_not_nil match
+    assert_not_nil match[1]
+    assert_empty updated_src_collection.manifest_text
+
+    # match[1] is the uuid of the new collection created from src_collection's salvaged data
+    # use this to get the new collection and verify
+    new_collection = Collection.find_by_uuid match[1]
+    match = new_collection.name.match(/^salvaged from (.*),.*/)
+    assert_not_nil match
+    assert_equal src_collection.uuid, match[1]
+    # verify the new collection's manifest includes the bad locators
+    expected_manifest = ". " + Digest::MD5.hexdigest(BAD_MANIFEST) + "+" + BAD_MANIFEST.length.to_s +
+      " 0:" + BAD_MANIFEST.length.to_s + ":invalid_manifest_text.txt\n. faafaafaabd78ad0d6fc3f5b926b450e baabaabaabd78ad0d6fc3f5b926b450e bad12345dae58ad0d6fc3f5b926b450e 341dabea2bd78ad0d6fc3f5b926b450e 341dabea2bd78ad0d6fc3f5b926abcdf abcdabea2bd78ad0d6fc3f5b926b450e+1000 d7321a918923627c972d8f8080c07d29+2000 0:3000:salvaged_data\n"
+    assert_equal expected_manifest, new_collection.manifest_text
+  end
+end
diff --git a/services/api/test/unit/seralizer_test.rb b/services/api/test/unit/seralizer_test.rb
new file mode 100644 (file)
index 0000000..66140d5
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'serializers'
+
+class SerializerTest < ActiveSupport::TestCase
+  test 'serialize' do
+    assert_equal('{}', HashSerializer.dump({}))
+    assert_equal('{"foo":"bar"}', HashSerializer.dump(foo: 'bar'))
+    assert_equal('{"foo":"bar"}', HashSerializer.dump('foo' => 'bar'))
+    assert_equal('[]', ArraySerializer.dump([]))
+    assert_equal('["foo",{"foo":"bar"}]',
+                 ArraySerializer.dump(['foo', 'foo' => 'bar']))
+    assert_equal(['foo'],
+                 ArraySerializer.load(ArraySerializer.dump([:foo])))
+    assert_equal([1,'bar'],
+                 ArraySerializer.load(ArraySerializer.dump([1,'bar'])))
+  end
+
+  test 'load array that was saved as json, then mangled by an old version' do
+    assert_equal(['foo'],
+                 ArraySerializer.load(YAML.dump(ArraySerializer.dump(['foo']))))
+  end
+end
diff --git a/services/api/test/unit/specimen_test.rb b/services/api/test/unit/specimen_test.rb
new file mode 100644 (file)
index 0000000..5b2eda2
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SpecimenTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/trait_test.rb b/services/api/test/unit/trait_test.rb
new file mode 100644 (file)
index 0000000..fe63f16
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class TraitTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/update_priority_test.rb b/services/api/test/unit/update_priority_test.rb
new file mode 100644 (file)
index 0000000..2d28d3f
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'update_priority'
+
+class UpdatePriorityTest < ActiveSupport::TestCase
+  test 'priority 0 but should be >0' do
+    uuid = containers(:running).uuid
+    ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
+    assert_equal 0, Container.find_by_uuid(uuid).priority
+    UpdatePriority.update_priority
+    assert_operator 0, :<, Container.find_by_uuid(uuid).priority
+
+    uuid = containers(:queued).uuid
+    ActiveRecord::Base.connection.exec_query('UPDATE containers SET priority=0 WHERE uuid=$1', 'test-setup', [[nil, uuid]])
+    assert_equal 0, Container.find_by_uuid(uuid).priority
+    UpdatePriority.update_priority
+    assert_operator 0, :<, Container.find_by_uuid(uuid).priority
+  end
+
+  test 'priority>0 but should be 0' do
+    uuid = containers(:running).uuid
+    ActiveRecord::Base.connection.exec_query('DELETE FROM container_requests WHERE container_uuid=$1', 'test-setup', [[nil, uuid]])
+    assert_operator 0, :<, Container.find_by_uuid(uuid).priority
+    UpdatePriority.update_priority
+    assert_equal 0, Container.find_by_uuid(uuid).priority
+  end
+end
diff --git a/services/api/test/unit/user_notifier_test.rb b/services/api/test/unit/user_notifier_test.rb
new file mode 100644 (file)
index 0000000..008259c
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserNotifierTest < ActionMailer::TestCase
+
+  # Send the email, then test that it got queued
+  test "account is setup" do
+    user = users :active
+    email = UserNotifier.account_is_setup user
+
+    assert_not_nil email
+
+    # Test the body of the sent email contains what we expect it to
+    assert_equal Rails.configuration.user_notifier_email_from, email.from.first
+    assert_equal user.email, email.to.first
+    assert_equal 'Welcome to Arvados - shell account enabled', email.subject
+    assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),
+        'Expected Your Arvados shell account has been set up in email body'
+    assert (email.body.to_s.include? Rails.configuration.workbench_address),
+        'Expected workbench url in email body'
+  end
+
+end
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
new file mode 100644 (file)
index 0000000..67c4100
--- /dev/null
@@ -0,0 +1,803 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class UserTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  # The fixture services/api/test/fixtures/users.yml serves as the input for this test case
+  setup do
+    # Make sure system_user exists before making "pre-test users" list
+    system_user
+  end
+
+  %w(a aa a0 aA Aa AA A0).each do |username|
+    test "#{username.inspect} is a valid username" do
+      user = User.new(username: username)
+      assert(user.valid?)
+    end
+  end
+
+  test "username is not required" do
+    user = User.new(username: nil)
+    assert(user.valid?)
+  end
+
+  test "username beginning with numeral is invalid" do
+    user = User.new(username: "0a")
+    refute(user.valid?)
+  end
+
+  "\\.-_/!@#$%^&*()[]{}".each_char do |bad_char|
+    test "username containing #{bad_char.inspect} is invalid" do
+      user = User.new(username: "bad#{bad_char}username")
+      refute(user.valid?)
+    end
+  end
+
+  test "username must be unique" do
+    user = User.new(username: users(:active).username)
+    refute(user.valid?)
+  end
+
+  test "non-admin can't update username" do
+    set_user_from_auth :rominiadmin
+    user = User.find_by_uuid(users(:rominiadmin).uuid)
+    user.username = "selfupdate"
+    assert_not_allowed { user.save }
+  end
+
+  def check_admin_username_change(fixture_name)
+    set_user_from_auth :admin_trustedclient
+    user = User.find_by_uuid(users(fixture_name).uuid)
+    user.username = "newnamefromtest"
+    assert(user.save)
+  end
+
+  test "admin can set username" do
+    check_admin_username_change(:active_no_prefs)
+  end
+
+  test "admin can update username" do
+    check_admin_username_change(:active)
+  end
+
+  test "admin can update own username" do
+    check_admin_username_change(:admin)
+  end
+
+  def check_new_username_setting(email_name, expect_name)
+    set_user_from_auth :admin
+    user = User.create!(email: "#{email_name}@example.org")
+    assert_equal(expect_name, user.username)
+  end
+
+  test "new username set from e-mail" do
+    check_new_username_setting("dakota", "dakota")
+  end
+
+  test "new username set from e-mail with leading digits" do
+    check_new_username_setting("1dakota9", "dakota9")
+  end
+
+  test "new username set from e-mail with punctuation" do
+    check_new_username_setting("dakota.9", "dakota9")
+  end
+
+  test "new username set from e-mail with leading digits and punctuation" do
+    check_new_username_setting("1.dakota.z", "dakotaz")
+  end
+
+  test "new username set from e-mail with extra part" do
+    check_new_username_setting("dakota+arvados", "dakota")
+  end
+
+  test "new username set with deduplication" do
+    name = users(:active).username
+    check_new_username_setting(name, "#{name}2")
+    check_new_username_setting(name, "#{name}3")
+    # Insert some out-of-order conflicts, to ensure our "sort by
+    # username, stop when we see a hole" strategy doesn't depend on
+    # insert order.
+    check_new_username_setting("#{name}13", "#{name}13")
+    check_new_username_setting("#{name}5", "#{name}5")
+    check_new_username_setting(name, "#{name}4")
+    6.upto(12).each do |n|
+      check_new_username_setting(name, "#{name}#{n}")
+    end
+  end
+
+  test "new username set avoiding blacklist" do
+    Rails.configuration.auto_setup_name_blacklist = ["root"]
+    check_new_username_setting("root", "root2")
+  end
+
+  test "no username set when no base available" do
+    check_new_username_setting("_", nil)
+  end
+
+  test "updating username updates repository names" do
+    set_user_from_auth :admin
+    user = users(:active)
+    user.username = "newtestname"
+    assert(user.save, "username update failed")
+    {foo: "newtestname/foo", repository2: "newtestname/foo2"}.
+        each_pair do |repo_sym, expect_name|
+      assert_equal(expect_name, repositories(repo_sym).name)
+    end
+  end
+
+  test "admin can clear username when user owns no repositories" do
+    set_user_from_auth :admin
+    user = users(:spectator)
+    user.username = nil
+    assert(user.save)
+    assert_nil(user.username)
+  end
+
+  test "admin can't clear username when user owns repositories" do
+    set_user_from_auth :admin
+    user = users(:active)
+    user.username = nil
+    assert_not_allowed { user.save }
+    refute_empty(user.errors[:username])
+  end
+
+  test "failed username update doesn't change repository names" do
+    set_user_from_auth :admin
+    user = users(:active)
+    user.username = users(:fuse).username
+    assert_not_allowed { user.save }
+    assert_equal("active/foo", repositories(:foo).name)
+  end
+
+  [[false, 'foo@example.com', true, nil],
+   [false, 'bar@example.com', nil, true],
+   [true, 'foo@example.com', true, nil],
+   [true, 'bar@example.com', true, true],
+   [false, false, nil, nil],
+   [true, false, true, nil]
+  ].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|
+    # In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.
+    test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do
+
+      if auto_admin_first_user_config
+        # This test requires no admin users exist (except for the system user)
+        users(:admin).delete
+        @all_users = User.where("uuid not like '%-000000000000000'").where(:is_admin => true)
+        assert_equal 0, @all_users.count, "No admin users should exist (except for the system user)"
+      end
+
+      Rails.configuration.auto_admin_first_user = auto_admin_first_user_config
+      Rails.configuration.auto_admin_user = auto_admin_user_config
+
+      # See if the foo user has is_admin
+      foo = User.new
+      foo.first_name = 'foo'
+      foo.email = 'foo@example.com'
+
+      act_as_system_user do
+        foo.save!
+      end
+
+      foo = User.find(foo.id)   # get the user back
+      assert_equal foo_should_be_admin, foo.is_admin, "is_admin is wrong for user foo"
+      assert_equal 'foo', foo.first_name
+
+      # See if the bar user has is_admin
+      bar = User.new
+      bar.first_name = 'bar'
+      bar.email = 'bar@example.com'
+
+      act_as_system_user do
+        bar.save!
+      end
+
+      bar = User.find(bar.id)   # get the user back
+      assert_equal bar_should_be_admin, bar.is_admin, "is_admin is wrong for user bar"
+      assert_equal 'bar', bar.first_name
+
+      # A subsequent user with the bar@example.com address should never be
+      # elevated to admin
+      bar2 = User.new
+      bar2.first_name = 'bar2'
+      bar2.email = 'bar@example.com'
+
+      act_as_system_user do
+        bar2.save!
+      end
+
+      bar2 = User.find(bar2.id)   # get the user back
+      assert !bar2.is_admin, "is_admin is wrong for user bar2"
+      assert_equal 'bar2', bar2.first_name
+
+      # An ordinary new user should not be elevated to admin
+      baz = User.new
+      baz.first_name = 'baz'
+      baz.email = 'baz@example.com'
+
+      act_as_system_user do
+        baz.save!
+      end
+
+      baz = User.find(baz.id)   # get the user back
+      assert !baz.is_admin
+      assert_equal 'baz', baz.first_name
+
+    end
+  end
+
+  test "check non-admin active user properties" do
+    @active_user = users(:active)     # get the active user
+    assert !@active_user.is_admin, 'is_admin should not be set for a non-admin user'
+    assert @active_user.is_active, 'user should be active'
+    assert @active_user.is_invited, 'is_invited should be set'
+    assert_not_nil @active_user.prefs, "user's preferences should be non-null, but may be size zero"
+    assert (@active_user.can? :read=>"#{@active_user.uuid}"), "user should be able to read own object"
+    assert (@active_user.can? :write=>"#{@active_user.uuid}"), "user should be able to write own object"
+    assert (@active_user.can? :manage=>"#{@active_user.uuid}"), "user should be able to manage own object"
+
+    assert @active_user.groups_i_can(:read).size > 0, "active user should be able read at least one group"
+
+    # non-admin user cannot manage or write other user objects
+    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user
+    assert !(@active_user.can? :read=>"#{@uninvited_user.uuid}")
+    assert !(@active_user.can? :write=>"#{@uninvited_user.uuid}")
+    assert !(@active_user.can? :manage=>"#{@uninvited_user.uuid}")
+  end
+
+  test "check admin user properties" do
+    @admin_user = users(:admin)     # get the admin user
+    assert @admin_user.is_admin, 'is_admin should be set for admin user'
+    assert @admin_user.is_active, 'admin user cannot be inactive'
+    assert @admin_user.is_invited, 'is_invited should be set'
+    assert_not_nil @admin_user.uuid.size, "user's uuid should be non-null"
+    assert_not_nil @admin_user.prefs, "user's preferences should be non-null, but may be size zero"
+    assert @admin_user.identity_url.size > 0, "user's identity url is expected"
+    assert @admin_user.can? :read=>"#{@admin_user.uuid}"
+    assert @admin_user.can? :write=>"#{@admin_user.uuid}"
+    assert @admin_user.can? :manage=>"#{@admin_user.uuid}"
+
+    assert @admin_user.groups_i_can(:read).size > 0, "admin active user should be able read at least one group"
+    assert @admin_user.groups_i_can(:write).size > 0, "admin active user should be able write to at least one group"
+    assert @admin_user.groups_i_can(:manage).size > 0, "admin active user should be able manage at least one group"
+
+    # admin user can also write or manage other users
+    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user
+    assert @admin_user.can? :read=>"#{@uninvited_user.uuid}"
+    assert @admin_user.can? :write=>"#{@uninvited_user.uuid}"
+    assert @admin_user.can? :manage=>"#{@uninvited_user.uuid}"
+  end
+
+  test "check inactive and uninvited user properties" do
+    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user
+    assert !@uninvited_user.is_admin, 'is_admin should not be set for a non-admin user'
+    assert !@uninvited_user.is_active, 'user should be inactive'
+    assert !@uninvited_user.is_invited, 'is_invited should not be set'
+    assert @uninvited_user.can? :read=>"#{@uninvited_user.uuid}"
+    assert @uninvited_user.can? :write=>"#{@uninvited_user.uuid}"
+    assert @uninvited_user.can? :manage=>"#{@uninvited_user.uuid}"
+
+    assert_equal(@uninvited_user.groups_i_can(:read).sort,
+                 [@uninvited_user.uuid, groups(:anonymous_group).uuid].sort)
+    assert_equal(@uninvited_user.groups_i_can(:write),
+                 [@uninvited_user.uuid])
+    assert_equal(@uninvited_user.groups_i_can(:manage),
+                 [@uninvited_user.uuid])
+  end
+
+  test "find user method checks" do
+    User.all.each do |user|
+      assert_not_nil user.uuid, "non-null uuid expected for " + user.full_name
+    end
+
+    user = users(:active)     # get the active user
+
+    found_user = User.find(user.id)   # find a user by the row id
+
+    assert_equal found_user.full_name, user.first_name + ' ' + user.last_name
+    assert_equal found_user.identity_url, user.identity_url
+  end
+
+  test "full name should not contain spurious whitespace" do
+    set_user_from_auth :admin
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: 'foo@example.com' })
+
+    assert_equal '', user.full_name
+
+    user.first_name = 'John'
+    user.last_name = 'Smith'
+
+    assert_equal user.first_name + ' ' + user.last_name, user.full_name
+  end
+
+  test "create new user" do
+    set_user_from_auth :admin
+
+    @all_users = User.all.to_a
+
+    user = User.new
+    user.first_name = "first_name_for_newly_created_user"
+    user.save
+
+    # verify there is one extra user in the db now
+    assert_equal @all_users.size+1, User.all.count
+
+    user = User.find(user.id)   # get the user back
+    assert_equal(user.first_name, 'first_name_for_newly_created_user')
+    assert_not_nil user.uuid, 'uuid should be set for newly created user'
+    assert_nil user.email, 'email should be null for newly created user, because it was not passed in'
+    assert_nil user.identity_url, 'identity_url should be null for newly created user, because it was not passed in'
+
+    user.first_name = 'first_name_for_newly_created_user_updated'
+    user.save
+    user = User.find(user.id)   # get the user back
+    assert_equal(user.first_name, 'first_name_for_newly_created_user_updated')
+  end
+
+  test "create new user with notifications" do
+    set_user_from_auth :admin
+
+    create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, nil
+    create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', [], nil, nil
+    create_user_and_verify_setup_and_notifications true, [], [], nil, nil
+    create_user_and_verify_setup_and_notifications false, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, nil
+    create_user_and_verify_setup_and_notifications false, [], 'inactive-notify-address@example.com', nil, nil
+    create_user_and_verify_setup_and_notifications false, [], [], nil, nil
+  end
+
+  [
+    # Easy inactive user tests.
+    [false, [], [], "inactive-none@example.com", false, false, "inactivenone"],
+    [false, [], [], "inactive-vm@example.com", true, false, "inactivevm"],
+    [false, [], [], "inactive-repo@example.com", false, true, "inactiverepo"],
+    [false, [], [], "inactive-both@example.com", true, true, "inactiveboth"],
+
+    # Easy active user tests.
+    [true, "active-notify@example.com", "inactive-notify@example.com", "active-none@example.com", false, false, "activenone"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "active-vm@example.com", true, false, "activevm"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "active-repo@example.com", false, true, "activerepo"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "active-both@example.com", true, true, "activeboth"],
+
+    # Test users with malformed e-mail addresses.
+    [false, [], [], nil, true, true, nil],
+    [false, [], [], "arvados", true, true, nil],
+    [false, [], [], "@example.com", true, true, nil],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "*!*@example.com", true, false, nil],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "*!*@example.com", false, false, nil],
+
+    # Test users with various username transformations.
+    [false, [], [], "arvados@example.com", false, false, "arvados2"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "arvados@example.com", false, false, "arvados2"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "root@example.com", true, false, "root2"],
+    [false, "active-notify@example.com", "inactive-notify@example.com", "root@example.com", true, false, "root2"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "roo_t@example.com", false, true, "root2"],
+    [false, [], [], "^^incorrect_format@example.com", true, true, "incorrectformat"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", true, true, "ad9"],
+    [true, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", false, false, "ad9"],
+    [false, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", true, true, "ad9"],
+    [false, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", false, false, "ad9"],
+  ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
+    test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
+      set_user_from_auth :admin
+
+      Rails.configuration.auto_setup_new_users = true
+
+      if auto_setup_vm
+        Rails.configuration.auto_setup_new_users_with_vm_uuid = virtual_machines(:testvm)['uuid']
+      else
+        Rails.configuration.auto_setup_new_users_with_vm_uuid = false
+      end
+
+      Rails.configuration.auto_setup_new_users_with_repository = auto_setup_repo
+
+      create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username
+    end
+  end
+
+  test "update existing user" do
+    set_user_from_auth :active    # set active user as current user
+
+    @active_user = users(:active)     # get the active user
+
+    @active_user.first_name = "first_name_changed"
+    @active_user.save
+
+    @active_user = User.find(@active_user.id)   # get the user back
+    assert_equal(@active_user.first_name, 'first_name_changed')
+
+    # admin user also should be able to update the "active" user info
+    set_user_from_auth :admin # set admin user as current user
+    @active_user.first_name = "first_name_changed_by_admin_for_active_user"
+    @active_user.save
+
+    @active_user = User.find(@active_user.id)   # get the user back
+    assert_equal(@active_user.first_name, 'first_name_changed_by_admin_for_active_user')
+  end
+
+  test "delete a user and verify" do
+    @active_user = users(:active)     # get the active user
+    active_user_uuid = @active_user.uuid
+
+    set_user_from_auth :admin
+    @active_user.delete
+
+    found_deleted_user = false
+    User.all.each do |user|
+      if user.uuid == active_user_uuid
+        found_deleted_user = true
+        break
+      end
+    end
+    assert !found_deleted_user, "found deleted user: "+active_user_uuid
+
+  end
+
+  test "create new user as non-admin user" do
+    set_user_from_auth :active
+    assert_not_allowed { User.new.save }
+  end
+
+  test "setup new user" do
+    set_user_from_auth :admin
+
+    email = 'foo@example.com'
+    openid_prefix = 'http://openid/prefix'
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+    vm = VirtualMachine.create
+
+    response = user.setup(openid_prefix: openid_prefix,
+                          repo_name: 'foo/testrepo',
+                          vm_uuid: vm.uuid)
+
+    resp_user = find_obj_in_resp response, 'User'
+    verify_user resp_user, email
+
+    oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+
+    verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+        resp_user[:uuid]
+
+    assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+        'expected identity_url_prefix not found for oid_login_perm'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+    assert_equal("foo", vm_perm.properties["username"])
+  end
+
+  test "setup new user with junk in database" do
+    set_user_from_auth :admin
+
+    email = 'foo@example.com'
+    openid_prefix = 'http://openid/prefix'
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+    vm = VirtualMachine.create
+
+    # Set up the bogus Link
+    bad_uuid = 'zzzzz-tpzed-xyzxyzxyzxyzxyz'
+
+    resp_link = Link.create ({tail_uuid: email, link_class: 'permission',
+        name: 'can_login', head_uuid: bad_uuid})
+    resp_link.save(validate: false)
+
+    verify_link resp_link, 'permission', 'can_login', email, bad_uuid
+
+    response = user.setup(openid_prefix: openid_prefix,
+                          repo_name: 'foo/testrepo',
+                          vm_uuid: vm.uuid)
+
+    resp_user = find_obj_in_resp response, 'User'
+    verify_user resp_user, email
+
+    oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+
+    verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+        resp_user[:uuid]
+
+    assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+        'expected identity_url_prefix not found for oid_login_perm'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+    assert_equal("foo", vm_perm.properties["username"])
+  end
+
+  test "setup new user in multiple steps" do
+    set_user_from_auth :admin
+
+    email = 'foo@example.com'
+    openid_prefix = 'http://openid/prefix'
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+    response = user.setup(openid_prefix: openid_prefix)
+
+    resp_user = find_obj_in_resp response, 'User'
+    verify_user resp_user, email
+
+    oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+    verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+        resp_user[:uuid]
+    assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+        'expected identity_url_prefix not found for oid_login_perm'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    # invoke setup again with repo_name
+    response = user.setup(openid_prefix: openid_prefix,
+                          repo_name: 'foo/testrepo')
+    resp_user = find_obj_in_resp response, 'User', nil
+    verify_user resp_user, email
+    assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    # invoke setup again with a vm_uuid
+    vm = VirtualMachine.create
+
+    response = user.setup(openid_prefix: openid_prefix,
+                          repo_name: 'foo/testrepo',
+                          vm_uuid: vm.uuid)
+
+    resp_user = find_obj_in_resp response, 'User', nil
+    verify_user resp_user, email
+    assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+    assert_equal("foo", vm_perm.properties["username"])
+  end
+
+  def find_obj_in_resp (response_items, object_type, head_kind=nil)
+    return_obj = nil
+    response_items.each { |x|
+      if !x
+        next
+      end
+
+      if object_type == 'User'
+        if ArvadosModel::resource_class_for_uuid(x['uuid']) == User
+          return_obj = x
+          break
+        end
+      else  # looking for a link
+        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+          return_obj = x
+          break
+        end
+      end
+    }
+    return return_obj
+  end
+
+  def verify_user (resp_user, email)
+    assert_not_nil resp_user, 'expected user object'
+    assert_not_nil resp_user['uuid'], 'expected user object'
+    assert_equal email, resp_user['email'], 'expected email not found'
+
+  end
+
+  def verify_link (link_object, link_class, link_name, tail_uuid, head_uuid)
+    assert_not_nil link_object, "expected link for #{link_class} #{link_name}"
+    assert_not_nil link_object[:uuid],
+        "expected non-nil uuid for link for #{link_class} #{link_name}"
+    assert_equal link_class, link_object[:link_class],
+        "expected link_class not found for #{link_class} #{link_name}"
+    assert_equal link_name, link_object[:name],
+        "expected link_name not found for #{link_class} #{link_name}"
+    assert_equal tail_uuid, link_object[:tail_uuid],
+        "expected tail_uuid not found for #{link_class} #{link_name}"
+    if head_uuid
+      assert_equal head_uuid, link_object[:head_uuid],
+          "expected head_uuid not found for #{link_class} #{link_name}"
+    end
+  end
+
+  def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, expect_username)
+    Rails.configuration.new_user_notification_recipients = new_user_recipients
+    Rails.configuration.new_inactive_user_notification_recipients = inactive_recipients
+
+    ActionMailer::Base.deliveries = []
+
+    can_setup = (Rails.configuration.auto_setup_new_users and
+                 (not expect_username.nil?))
+    expect_repo_name = "#{expect_username}/#{expect_username}"
+    prior_repo = Repository.where(name: expect_repo_name).first
+
+    user = User.new
+    user.first_name = "first_name_for_newly_created_user"
+    user.email = email
+    user.is_active = active
+    user.save!
+    assert_equal(expect_username, user.username)
+
+    # check user setup
+    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+                       groups(:all_users).uuid, user.uuid,
+                       "permission", "can_read")
+    # Check for OID login link.
+    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+                       user.uuid, user.email, "permission", "can_login")
+    # Check for repository.
+    if named_repo = (prior_repo or
+                     Repository.where(name: expect_repo_name).first)
+      verify_link_exists((can_setup and prior_repo.nil? and
+                          Rails.configuration.auto_setup_new_users_with_repository),
+                         named_repo.uuid, user.uuid, "permission", "can_manage")
+    end
+    # Check for VM login.
+    if auto_vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid
+      verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
+                         "permission", "can_login", "username", expect_username)
+    end
+
+    # check email notifications
+    new_user_email = nil
+    new_inactive_user_email = nil
+
+    new_user_email_subject = "#{Rails.configuration.email_subject_prefix}New user created notification"
+    if Rails.configuration.auto_setup_new_users
+      new_user_email_subject = (expect_username or active) ?
+                                 "#{Rails.configuration.email_subject_prefix}New user created and setup notification" :
+                                 "#{Rails.configuration.email_subject_prefix}New user created, but not setup notification"
+    end
+
+    ActionMailer::Base.deliveries.each do |d|
+      if d.subject == new_user_email_subject then
+        new_user_email = d
+      elsif d.subject == "#{Rails.configuration.email_subject_prefix}New inactive user notification" then
+        new_inactive_user_email = d
+      end
+    end
+
+    # both active and inactive user creations should result in new user creation notification mails,
+    # if the new user email recipients config parameter is set
+    if not new_user_recipients.empty? then
+      assert_not_nil new_user_email, 'Expected new user email after setup'
+      assert_equal Rails.configuration.user_notifier_email_from, new_user_email.from[0]
+      assert_equal new_user_recipients, new_user_email.to[0]
+      assert_equal new_user_email_subject, new_user_email.subject
+    else
+      assert_nil new_user_email, 'Did not expect new user email after setup'
+    end
+
+    if not active
+      if not inactive_recipients.empty? then
+        assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
+        assert_equal Rails.configuration.user_notifier_email_from, new_inactive_user_email.from[0]
+        assert_equal inactive_recipients, new_inactive_user_email.to[0]
+        assert_equal "#{Rails.configuration.email_subject_prefix}New inactive user notification", new_inactive_user_email.subject
+      else
+        assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
+      end
+    else
+      assert_nil new_inactive_user_email, 'Expected no inactive user email after setting up active user'
+    end
+    ActionMailer::Base.deliveries = []
+
+  end
+
+  def verify_link_exists link_exists, head_uuid, tail_uuid, link_class, link_name, property_name=nil, property_value=nil
+    all_links = Link.where(head_uuid: head_uuid,
+                           tail_uuid: tail_uuid,
+                           link_class: link_class,
+                           name: link_name)
+    assert_equal link_exists, all_links.any?, "Link #{'not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
+    if link_exists && property_name && property_value
+      all_links.each do |link|
+        assert_equal true, all_links.first.properties[property_name].start_with?(property_value), 'Property not found in link'
+      end
+    end
+  end
+
+  [
+    [:active, 'zzzzz-borkd-abcde12345abcde'],
+    [:active, 'zzzzz-j7d0g-abcde12345abcde'],
+    [:active, 'zzzzz-tpzed-borkd'],
+    [:system_user, 'zzzzz-tpzed-abcde12345abcde'],
+    [:anonymous, 'zzzzz-tpzed-abcde12345abcde'],
+  ].each do |fixture, new_uuid|
+    test "disallow update_uuid #{fixture} -> #{new_uuid}" do
+      u = users(fixture)
+      orig_uuid = u.uuid
+      act_as_system_user do
+        assert_raises do
+          u.update_uuid(new_uuid: new_uuid)
+        end
+      end
+      # "Successfully aborted orig->new" outcome looks the same as
+      # "successfully updated new->orig".
+      assert_update_success(old_uuid: new_uuid,
+                            new_uuid: orig_uuid,
+                            expect_owned_objects: fixture == :active)
+    end
+  end
+
+  [:active, :spectator, :admin].each do |target|
+    test "update_uuid on #{target} as non-admin user" do
+      act_as_user users(:active) do
+        assert_raises(ArvadosModel::PermissionDeniedError) do
+          users(target).update_uuid(new_uuid: 'zzzzz-tpzed-abcde12345abcde')
+        end
+      end
+    end
+  end
+
+  test "update_uuid to existing uuid" do
+    u = users(:active)
+    orig_uuid = u.uuid
+    new_uuid = users(:admin).uuid
+    act_as_system_user do
+      assert_raises do
+        u.update_uuid(new_uuid: new_uuid)
+      end
+    end
+    u.reload
+    assert_equal u.uuid, orig_uuid
+    assert_not_empty Collection.where(owner_uuid: orig_uuid)
+    assert_not_empty Group.where(owner_uuid: orig_uuid)
+  end
+
+  [
+    [:active, 'zbbbb-tpzed-abcde12345abcde'],
+    [:active, 'zzzzz-tpzed-abcde12345abcde'],
+    [:admin, 'zbbbb-tpzed-abcde12345abcde'],
+    [:admin, 'zzzzz-tpzed-abcde12345abcde'],
+  ].each do |fixture, new_uuid|
+    test "update_uuid #{fixture} to unused uuid #{new_uuid}" do
+      u = users(fixture)
+      orig_uuid = u.uuid
+      act_as_system_user do
+        u.update_uuid(new_uuid: new_uuid)
+      end
+      assert_update_success(old_uuid: orig_uuid,
+                            new_uuid: new_uuid,
+                            expect_owned_objects: fixture == :active)
+    end
+  end
+
+  def assert_update_success(old_uuid:, new_uuid:, expect_owned_objects: true)
+    [[User, :uuid],
+     [Link, :head_uuid],
+     [Link, :tail_uuid],
+     [Group, :owner_uuid],
+     [Collection, :owner_uuid],
+    ].each do |klass, attr|
+      assert_empty klass.where(attr => old_uuid)
+      if klass == User || expect_owned_objects
+        assert_not_empty klass.where(attr => new_uuid)
+      end
+    end
+  end
+end
diff --git a/services/api/test/unit/virtual_machine_test.rb b/services/api/test/unit/virtual_machine_test.rb
new file mode 100644 (file)
index 0000000..80ed9ff
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class VirtualMachineTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/workflow_test.rb b/services/api/test/unit/workflow_test.rb
new file mode 100644 (file)
index 0000000..26cd7f2
--- /dev/null
@@ -0,0 +1,129 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class WorkflowTest < ActiveSupport::TestCase
+  test "create workflow with no definition yaml" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+    }
+
+    w = Workflow.create!(wf)
+    assert_not_nil w.uuid
+  end
+
+  test "create workflow with valid definition yaml" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+      definition: "k1:\n v1: x\n v2: y"
+    }
+
+    w = Workflow.create!(wf)
+    assert_not_nil w.uuid
+  end
+
+  test "create workflow with simple string as definition" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+      definition: "this is valid yaml"
+    }
+
+    w = Workflow.create!(wf)
+    assert_not_nil w.uuid
+  end
+
+  test "create workflow with invalid definition yaml" do
+    set_user_from_auth :active
+
+    wf = {
+      name: "test name",
+      definition: "k1:\n v1: x\n  v2: y"
+    }
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      Workflow.create! wf
+    end
+  end
+
+  test "update workflow with invalid definition yaml" do
+    set_user_from_auth :active
+
+    w = Workflow.find_by_uuid(workflows(:workflow_with_definition_yml).uuid)
+    definition = "k1:\n v1: x\n  v2: y"
+
+    assert_raises(ActiveRecord::RecordInvalid) do
+      w.update_attributes!(definition: definition)
+    end
+  end
+
+  test "update workflow and verify name and description" do
+    set_user_from_auth :active
+
+    # Workflow name and desc should be set with values from definition yaml
+    # when it does not already have custom values for these fields
+    w = Workflow.find_by_uuid(workflows(:workflow_with_no_name_and_desc).uuid)
+    definition = "name: test name 1\ndescription: test desc 1\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "test name 1", w.name
+    assert_equal "test desc 1", w.description
+
+    # Workflow name and desc should be set with values from definition yaml
+    # when it does not already have custom values for these fields
+    definition = "name: test name 2\ndescription: test desc 2\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "test name 2", w.name
+    assert_equal "test desc 2", w.description
+
+    # Workflow name and desc should be set with values from definition yaml
+    # even if it means emptying them out
+    definition = "more: etc"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_nil w.name
+    assert_nil w.description
+
+    # Workflow name and desc set using definition yaml should be cleared
+    # if definition yaml is cleared
+    definition = "name: test name 2\ndescription: test desc 2\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    definition = nil
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_nil w.name
+    assert_nil w.description
+
+    # Workflow name and desc should be set to provided custom values
+    definition = "name: test name 3\ndescription: test desc 3\nother: some more"
+    w.update_attributes!(name: "remains", description: "remains", definition: definition)
+    w.reload
+    assert_equal "remains", w.name
+    assert_equal "remains", w.description
+
+    # Workflow name and desc should retain provided custom values
+    # and should not be overwritten by values from yaml
+    definition = "name: test name 4\ndescription: test desc 4\nother: some more"
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "remains", w.name
+    assert_equal "remains", w.description
+
+    # Workflow name and desc should retain provided custom values
+    # and not be affected by the clearing of the definition yaml
+    definition = nil
+    w.update_attributes!(definition: definition)
+    w.reload
+    assert_equal "remains", w.name
+    assert_equal "remains", w.description
+  end
+end
diff --git a/services/api/vendor/assets/stylesheets/.gitkeep b/services/api/vendor/assets/stylesheets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/vendor/plugins/.gitkeep b/services/api/vendor/plugins/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-git-httpd/.gitignore b/services/arv-git-httpd/.gitignore
new file mode 100644 (file)
index 0000000..1ae1045
--- /dev/null
@@ -0,0 +1 @@
+arv-git-httpd
diff --git a/services/arv-git-httpd/arvados-git-httpd.service b/services/arv-git-httpd/arvados-git-httpd.service
new file mode 100644 (file)
index 0000000..6f8cca8
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados git server
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/git-httpd/git-httpd.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/arvados-git-httpd
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/arv-git-httpd/auth_handler.go b/services/arv-git-httpd/auth_handler.go
new file mode 100644 (file)
index 0000000..3b3032a
--- /dev/null
@@ -0,0 +1,210 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "errors"
+       "log"
+       "net/http"
+       "os"
+       "regexp"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+type authHandler struct {
+       handler    http.Handler
+       clientPool *arvadosclient.ClientPool
+       setupOnce  sync.Once
+}
+
+func (h *authHandler) setup() {
+       ac, err := arvadosclient.New(&theConfig.Client)
+       if err != nil {
+               log.Fatal(err)
+       }
+       h.clientPool = &arvadosclient.ClientPool{Prototype: ac}
+}
+
+func (h *authHandler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
+       h.setupOnce.Do(h.setup)
+
+       var statusCode int
+       var statusText string
+       var apiToken string
+       var repoName string
+       var validApiToken bool
+
+       w := httpserver.WrapResponseWriter(wOrig)
+
+       if r.Method == "OPTIONS" {
+               method := r.Header.Get("Access-Control-Request-Method")
+               if method != "GET" && method != "POST" {
+                       w.WriteHeader(http.StatusMethodNotAllowed)
+                       return
+               }
+               w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
+               w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
+               w.Header().Set("Access-Control-Allow-Origin", "*")
+               w.Header().Set("Access-Control-Max-Age", "86400")
+               w.WriteHeader(http.StatusOK)
+               return
+       }
+
+       if r.Header.Get("Origin") != "" {
+               // Allow simple cross-origin requests without user
+               // credentials ("user credentials" as defined by CORS,
+               // i.e., cookies, HTTP authentication, and client-side
+               // SSL certificates. See
+               // http://www.w3.org/TR/cors/#user-credentials).
+               w.Header().Set("Access-Control-Allow-Origin", "*")
+       }
+
+       defer func() {
+               if w.WroteStatus() == 0 {
+                       // Nobody has called WriteHeader yet: that
+                       // must be our job.
+                       w.WriteHeader(statusCode)
+                       if statusCode >= 400 {
+                               w.Write([]byte(statusText))
+                       }
+               }
+
+               // If the given password is a valid token, log the first 10 characters of the token.
+               // Otherwise: log the string <invalid> if a password is given, else an empty string.
+               passwordToLog := ""
+               if !validApiToken {
+                       if len(apiToken) > 0 {
+                               passwordToLog = "<invalid>"
+                       }
+               } else {
+                       passwordToLog = apiToken[0:10]
+               }
+
+               httpserver.Log(r.RemoteAddr, passwordToLog, w.WroteStatus(), statusText, repoName, r.Method, r.URL.Path)
+       }()
+
+       creds := auth.CredentialsFromRequest(r)
+       if len(creds.Tokens) == 0 {
+               statusCode, statusText = http.StatusUnauthorized, "no credentials provided"
+               w.Header().Add("WWW-Authenticate", "Basic realm=\"git\"")
+               return
+       }
+       apiToken = creds.Tokens[0]
+
+       // Access to paths "/foo/bar.git/*" and "/foo/bar/.git/*" are
+       // protected by the permissions on the repository named
+       // "foo/bar".
+       pathParts := strings.SplitN(r.URL.Path[1:], ".git/", 2)
+       if len(pathParts) != 2 {
+               statusCode, statusText = http.StatusNotFound, "not found"
+               return
+       }
+       repoName = pathParts[0]
+       repoName = strings.TrimRight(repoName, "/")
+
+       arv := h.clientPool.Get()
+       if arv == nil {
+               statusCode, statusText = http.StatusInternalServerError, "connection pool failed: "+h.clientPool.Err().Error()
+               return
+       }
+       defer h.clientPool.Put(arv)
+
+       // Ask API server whether the repository is readable using
+       // this token (by trying to read it!)
+       arv.ApiToken = apiToken
+       repoUUID, err := h.lookupRepo(arv, repoName)
+       if err != nil {
+               statusCode, statusText = http.StatusInternalServerError, err.Error()
+               return
+       }
+       validApiToken = true
+       if repoUUID == "" {
+               statusCode, statusText = http.StatusNotFound, "not found"
+               return
+       }
+
+       isWrite := strings.HasSuffix(r.URL.Path, "/git-receive-pack")
+       if !isWrite {
+               statusText = "read"
+       } else {
+               err := arv.Update("repositories", repoUUID, arvadosclient.Dict{
+                       "repository": arvadosclient.Dict{
+                               "modified_at": time.Now().String(),
+                       },
+               }, &arvadosclient.Dict{})
+               if err != nil {
+                       statusCode, statusText = http.StatusForbidden, err.Error()
+                       return
+               }
+               statusText = "write"
+       }
+
+       // Regardless of whether the client asked for "/foo.git" or
+       // "/foo/.git", we choose whichever variant exists in our repo
+       // root, and we try {uuid}.git and {uuid}/.git first. If none
+       // of these exist, we 404 even though the API told us the repo
+       // _should_ exist (presumably this means the repo was just
+       // created, and gitolite sync hasn't run yet).
+       rewrittenPath := ""
+       tryDirs := []string{
+               "/" + repoUUID + ".git",
+               "/" + repoUUID + "/.git",
+               "/" + repoName + ".git",
+               "/" + repoName + "/.git",
+       }
+       for _, dir := range tryDirs {
+               if fileInfo, err := os.Stat(theConfig.RepoRoot + dir); err != nil {
+                       if !os.IsNotExist(err) {
+                               statusCode, statusText = http.StatusInternalServerError, err.Error()
+                               return
+                       }
+               } else if fileInfo.IsDir() {
+                       rewrittenPath = dir + "/" + pathParts[1]
+                       break
+               }
+       }
+       if rewrittenPath == "" {
+               log.Println("WARNING:", repoUUID,
+                       "git directory not found in", theConfig.RepoRoot, tryDirs)
+               // We say "content not found" to disambiguate from the
+               // earlier "API says that repo does not exist" error.
+               statusCode, statusText = http.StatusNotFound, "content not found"
+               return
+       }
+       r.URL.Path = rewrittenPath
+
+       h.handler.ServeHTTP(w, r)
+}
+
+var uuidRegexp = regexp.MustCompile(`^[0-9a-z]{5}-s0uqq-[0-9a-z]{15}$`)
+
+func (h *authHandler) lookupRepo(arv *arvadosclient.ArvadosClient, repoName string) (string, error) {
+       reposFound := arvadosclient.Dict{}
+       var column string
+       if uuidRegexp.MatchString(repoName) {
+               column = "uuid"
+       } else {
+               column = "name"
+       }
+       err := arv.List("repositories", arvadosclient.Dict{
+               "filters": [][]string{{column, "=", repoName}},
+       }, &reposFound)
+       if err != nil {
+               return "", err
+       } else if avail, ok := reposFound["items_available"].(float64); !ok {
+               return "", errors.New("bad list response from API")
+       } else if avail < 1 {
+               return "", nil
+       } else if avail > 1 {
+               return "", errors.New("name collision")
+       }
+       return reposFound["items"].([]interface{})[0].(map[string]interface{})["uuid"].(string), nil
+}
diff --git a/services/arv-git-httpd/auth_handler_test.go b/services/arv-git-httpd/auth_handler_test.go
new file mode 100644 (file)
index 0000000..05fde03
--- /dev/null
@@ -0,0 +1,170 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "io"
+       "log"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "path/filepath"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&AuthHandlerSuite{})
+
+type AuthHandlerSuite struct{}
+
+func (s *AuthHandlerSuite) SetUpSuite(c *check.C) {
+       arvadostest.StartAPI()
+}
+
+func (s *AuthHandlerSuite) TearDownSuite(c *check.C) {
+       arvadostest.StopAPI()
+}
+
+func (s *AuthHandlerSuite) SetUpTest(c *check.C) {
+       arvadostest.ResetEnv()
+       repoRoot, err := filepath.Abs("../api/tmp/git/test")
+       c.Assert(err, check.IsNil)
+       theConfig = &Config{
+               Client: arvados.Client{
+                       APIHost:  arvadostest.APIHost(),
+                       Insecure: true,
+               },
+               Listen:          ":0",
+               GitCommand:      "/usr/bin/git",
+               RepoRoot:        repoRoot,
+               ManagementToken: arvadostest.ManagementToken,
+       }
+}
+
+func (s *AuthHandlerSuite) TestPermission(c *check.C) {
+       h := &authHandler{handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               log.Printf("%v", r.URL)
+               io.WriteString(w, r.URL.Path)
+       })}
+       baseURL, err := url.Parse("http://git.example/")
+       c.Assert(err, check.IsNil)
+       for _, trial := range []struct {
+               label   string
+               token   string
+               pathIn  string
+               pathOut string
+               status  int
+       }{
+               {
+                       label:   "read repo by name",
+                       token:   arvadostest.ActiveToken,
+                       pathIn:  arvadostest.Repository2Name + ".git/git-upload-pack",
+                       pathOut: arvadostest.Repository2UUID + ".git/git-upload-pack",
+               },
+               {
+                       label:   "read repo by uuid",
+                       token:   arvadostest.ActiveToken,
+                       pathIn:  arvadostest.Repository2UUID + ".git/git-upload-pack",
+                       pathOut: arvadostest.Repository2UUID + ".git/git-upload-pack",
+               },
+               {
+                       label:   "write repo by name",
+                       token:   arvadostest.ActiveToken,
+                       pathIn:  arvadostest.Repository2Name + ".git/git-receive-pack",
+                       pathOut: arvadostest.Repository2UUID + ".git/git-receive-pack",
+               },
+               {
+                       label:   "write repo by uuid",
+                       token:   arvadostest.ActiveToken,
+                       pathIn:  arvadostest.Repository2UUID + ".git/git-receive-pack",
+                       pathOut: arvadostest.Repository2UUID + ".git/git-receive-pack",
+               },
+               {
+                       label:  "uuid not found",
+                       token:  arvadostest.ActiveToken,
+                       pathIn: strings.Replace(arvadostest.Repository2UUID, "6", "z", -1) + ".git/git-upload-pack",
+                       status: http.StatusNotFound,
+               },
+               {
+                       label:  "name not found",
+                       token:  arvadostest.ActiveToken,
+                       pathIn: "nonexistent-bogus.git/git-upload-pack",
+                       status: http.StatusNotFound,
+               },
+               {
+                       label:   "read read-only repo",
+                       token:   arvadostest.SpectatorToken,
+                       pathIn:  arvadostest.FooRepoName + ".git/git-upload-pack",
+                       pathOut: arvadostest.FooRepoUUID + "/.git/git-upload-pack",
+               },
+               {
+                       label:  "write read-only repo",
+                       token:  arvadostest.SpectatorToken,
+                       pathIn: arvadostest.FooRepoName + ".git/git-receive-pack",
+                       status: http.StatusForbidden,
+               },
+       } {
+               c.Logf("trial label: %q", trial.label)
+               u, err := baseURL.Parse(trial.pathIn)
+               c.Assert(err, check.IsNil)
+               resp := httptest.NewRecorder()
+               req := &http.Request{
+                       Method: "POST",
+                       URL:    u,
+                       Header: http.Header{
+                               "Authorization": {"Bearer " + trial.token}}}
+               h.ServeHTTP(resp, req)
+               if trial.status == 0 {
+                       trial.status = http.StatusOK
+               }
+               c.Check(resp.Code, check.Equals, trial.status)
+               if trial.status < 400 {
+                       if trial.pathOut != "" && !strings.HasPrefix(trial.pathOut, "/") {
+                               trial.pathOut = "/" + trial.pathOut
+                       }
+                       c.Check(resp.Body.String(), check.Equals, trial.pathOut)
+               }
+       }
+}
+
+func (s *AuthHandlerSuite) TestCORS(c *check.C) {
+       h := &authHandler{}
+
+       // CORS preflight
+       resp := httptest.NewRecorder()
+       req := &http.Request{
+               Method: "OPTIONS",
+               Header: http.Header{
+                       "Origin":                        {"*"},
+                       "Access-Control-Request-Method": {"GET"},
+               },
+       }
+       h.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "GET, POST")
+       c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type")
+       c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
+       c.Check(resp.Body.String(), check.Equals, "")
+
+       // CORS actual request. Bogus token and path ensure
+       // authHandler responds 4xx without calling our wrapped (nil)
+       // handler.
+       u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
+       c.Assert(err, check.Equals, nil)
+       resp = httptest.NewRecorder()
+       req = &http.Request{
+               Method: "GET",
+               URL:    u,
+               Header: http.Header{
+                       "Origin":        {"*"},
+                       "Authorization": {"OAuth2 foobar"},
+               },
+       }
+       h.ServeHTTP(resp, req)
+       c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
+}
diff --git a/services/arv-git-httpd/git_handler.go b/services/arv-git-httpd/git_handler.go
new file mode 100644 (file)
index 0000000..d9b08a9
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "log"
+       "net"
+       "net/http"
+       "net/http/cgi"
+       "os"
+)
+
+// gitHandler is an http.Handler that invokes git-http-backend (or
+// whatever backend is configured) via CGI, with appropriate
+// environment variables in place for git-http-backend or
+// gitolite-shell.
+type gitHandler struct {
+       cgi.Handler
+}
+
+func newGitHandler() http.Handler {
+       const glBypass = "GL_BYPASS_ACCESS_CHECKS"
+       const glHome = "GITOLITE_HTTP_HOME"
+       var env []string
+       path := os.Getenv("PATH")
+       if theConfig.GitoliteHome != "" {
+               env = append(env,
+                       glHome+"="+theConfig.GitoliteHome,
+                       glBypass+"=1")
+               path = path + ":" + theConfig.GitoliteHome + "/bin"
+       } else if home, bypass := os.Getenv(glHome), os.Getenv(glBypass); home != "" || bypass != "" {
+               env = append(env, glHome+"="+home, glBypass+"="+bypass)
+               log.Printf("DEPRECATED: Passing through %s and %s environment variables. Use GitoliteHome configuration instead.", glHome, glBypass)
+       }
+       env = append(env,
+               "GIT_PROJECT_ROOT="+theConfig.RepoRoot,
+               "GIT_HTTP_EXPORT_ALL=",
+               "SERVER_ADDR="+theConfig.Listen,
+               "PATH="+path)
+       return &gitHandler{
+               Handler: cgi.Handler{
+                       Path: theConfig.GitCommand,
+                       Dir:  theConfig.RepoRoot,
+                       Env:  env,
+                       Args: []string{"http-backend"},
+               },
+       }
+}
+
+func (h *gitHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       remoteHost, remotePort, err := net.SplitHostPort(r.RemoteAddr)
+       if err != nil {
+               log.Printf("Internal error: SplitHostPort(r.RemoteAddr==%q): %s", r.RemoteAddr, err)
+               w.WriteHeader(http.StatusInternalServerError)
+               return
+       }
+
+       // Copy the wrapped cgi.Handler, so these request-specific
+       // variables don't leak into the next request.
+       handlerCopy := h.Handler
+       handlerCopy.Env = append(handlerCopy.Env,
+               // In Go1.5 we can skip this, net/http/cgi will do it for us:
+               "REMOTE_HOST="+remoteHost,
+               "REMOTE_ADDR="+remoteHost,
+               "REMOTE_PORT="+remotePort,
+               // Ideally this would be a real username:
+               "REMOTE_USER="+r.RemoteAddr,
+       )
+       handlerCopy.ServeHTTP(w, r)
+}
diff --git a/services/arv-git-httpd/git_handler_test.go b/services/arv-git-httpd/git_handler_test.go
new file mode 100644 (file)
index 0000000..0cf7de4
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "regexp"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&GitHandlerSuite{})
+
+type GitHandlerSuite struct{}
+
+func (s *GitHandlerSuite) TestEnvVars(c *check.C) {
+       theConfig = defaultConfig()
+       theConfig.RepoRoot = "/"
+       theConfig.GitoliteHome = "/test/ghh"
+
+       u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
+       c.Check(err, check.Equals, nil)
+       resp := httptest.NewRecorder()
+       req := &http.Request{
+               Method:     "GET",
+               URL:        u,
+               RemoteAddr: "[::1]:12345",
+       }
+       h := newGitHandler()
+       h.(*gitHandler).Path = "/bin/sh"
+       h.(*gitHandler).Args = []string{"-c", "printf 'Content-Type: text/plain\r\n\r\n'; env"}
+
+       h.ServeHTTP(resp, req)
+
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       body := resp.Body.String()
+       c.Check(body, check.Matches, `(?ms).*^PATH=.*:/test/ghh/bin$.*`)
+       c.Check(body, check.Matches, `(?ms).*^GITOLITE_HTTP_HOME=/test/ghh$.*`)
+       c.Check(body, check.Matches, `(?ms).*^GL_BYPASS_ACCESS_CHECKS=1$.*`)
+       c.Check(body, check.Matches, `(?ms).*^REMOTE_HOST=::1$.*`)
+       c.Check(body, check.Matches, `(?ms).*^REMOTE_PORT=12345$.*`)
+       c.Check(body, check.Matches, `(?ms).*^SERVER_ADDR=`+regexp.QuoteMeta(theConfig.Listen)+`$.*`)
+}
+
+func (s *GitHandlerSuite) TestCGIErrorOnSplitHostPortError(c *check.C) {
+       u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
+       c.Check(err, check.Equals, nil)
+       resp := httptest.NewRecorder()
+       req := &http.Request{
+               Method:     "GET",
+               URL:        u,
+               RemoteAddr: "test.bad.address.missing.port",
+       }
+       h := newGitHandler()
+       h.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusInternalServerError)
+       c.Check(resp.Body.String(), check.Equals, "")
+}
diff --git a/services/arv-git-httpd/gitolite_test.go b/services/arv-git-httpd/gitolite_test.go
new file mode 100644 (file)
index 0000000..0656cbf
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "io/ioutil"
+       "os"
+       "os/exec"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&GitoliteSuite{})
+
+// GitoliteSuite tests need an API server, an arv-git-httpd server,
+// and a repository hosted by gitolite.
+type GitoliteSuite struct {
+       IntegrationSuite
+       gitoliteHome string
+}
+
+func (s *GitoliteSuite) SetUpTest(c *check.C) {
+       var err error
+       s.gitoliteHome, err = ioutil.TempDir("", "arv-git-httpd")
+       c.Assert(err, check.Equals, nil)
+
+       runGitolite := func(prog string, args ...string) {
+               c.Log(prog, " ", args)
+               cmd := exec.Command(prog, args...)
+               cmd.Dir = s.gitoliteHome
+               cmd.Env = []string{"HOME=" + s.gitoliteHome}
+               for _, e := range os.Environ() {
+                       if !strings.HasPrefix(e, "HOME=") {
+                               cmd.Env = append(cmd.Env, e)
+                       }
+               }
+               diags, err := cmd.CombinedOutput()
+               c.Log(string(diags))
+               c.Assert(err, check.Equals, nil)
+       }
+
+       runGitolite("gitolite", "setup", "--admin", "root")
+
+       s.tmpRepoRoot = s.gitoliteHome + "/repositories"
+       s.Config = &Config{
+               Client: arvados.Client{
+                       APIHost:  arvadostest.APIHost(),
+                       Insecure: true,
+               },
+               Listen:       ":0",
+               GitCommand:   "/usr/share/gitolite3/gitolite-shell",
+               GitoliteHome: s.gitoliteHome,
+               RepoRoot:     s.tmpRepoRoot,
+       }
+       s.IntegrationSuite.SetUpTest(c)
+
+       // Install the gitolite hooks in the bare repo we made in
+       // (*IntegrationTest)SetUpTest() -- see 2.2.4 at
+       // http://gitolite.com/gitolite/gitolite.html
+       runGitolite("gitolite", "setup")
+}
+
+func (s *GitoliteSuite) TearDownTest(c *check.C) {
+       // We really want Unsetenv here, but it's not worth forcing an
+       // upgrade to Go 1.4.
+       os.Setenv("GITOLITE_HTTP_HOME", "")
+       os.Setenv("GL_BYPASS_ACCESS_CHECKS", "")
+       if s.gitoliteHome != "" {
+               err := os.RemoveAll(s.gitoliteHome)
+               c.Check(err, check.Equals, nil)
+       }
+       s.IntegrationSuite.TearDownTest(c)
+}
+
+func (s *GitoliteSuite) TestFetch(c *check.C) {
+       err := s.RunGit(c, activeToken, "fetch", "active/foo.git")
+       c.Check(err, check.Equals, nil)
+}
+
+func (s *GitoliteSuite) TestFetchUnreadable(c *check.C) {
+       err := s.RunGit(c, anonymousToken, "fetch", "active/foo.git")
+       c.Check(err, check.ErrorMatches, `.* not found.*`)
+}
+
+func (s *GitoliteSuite) TestPush(c *check.C) {
+       err := s.RunGit(c, activeToken, "push", "active/foo.git", "master:gitolite-push")
+       c.Check(err, check.Equals, nil)
+
+       // Check that the commit hash appears in the gitolite log, as
+       // assurance that the gitolite hooks really did run.
+
+       sha1, err := exec.Command("git", "--git-dir", s.tmpWorkdir+"/.git",
+               "log", "-n1", "--format=%H").CombinedOutput()
+       c.Logf("git-log in workdir: %q", string(sha1))
+       c.Assert(err, check.Equals, nil)
+       c.Assert(len(sha1), check.Equals, 41)
+
+       gitoliteLog, err := exec.Command("grep", "-r", string(sha1[:40]), s.gitoliteHome+"/.gitolite/logs").CombinedOutput()
+       c.Check(err, check.Equals, nil)
+       c.Logf("gitolite log message: %q", string(gitoliteLog))
+}
+
+func (s *GitoliteSuite) TestPushUnwritable(c *check.C) {
+       err := s.RunGit(c, spectatorToken, "push", "active/foo.git", "master:gitolite-push-fail")
+       c.Check(err, check.ErrorMatches, `.*HTTP (code = )?403.*`)
+}
diff --git a/services/arv-git-httpd/integration_test.go b/services/arv-git-httpd/integration_test.go
new file mode 100644 (file)
index 0000000..10c69ee
--- /dev/null
@@ -0,0 +1,149 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "errors"
+       "io/ioutil"
+       "os"
+       "os/exec"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+// IntegrationSuite tests need an API server and an arv-git-httpd
+// server. See GitSuite and GitoliteSuite.
+type IntegrationSuite struct {
+       tmpRepoRoot string
+       tmpWorkdir  string
+       testServer  *server
+       Config      *Config
+}
+
+func (s *IntegrationSuite) SetUpSuite(c *check.C) {
+       arvadostest.StartAPI()
+}
+
+func (s *IntegrationSuite) TearDownSuite(c *check.C) {
+       arvadostest.StopAPI()
+}
+
+func (s *IntegrationSuite) SetUpTest(c *check.C) {
+       arvadostest.ResetEnv()
+       s.testServer = &server{}
+       var err error
+       if s.tmpRepoRoot == "" {
+               s.tmpRepoRoot, err = ioutil.TempDir("", "arv-git-httpd")
+               c.Assert(err, check.Equals, nil)
+       }
+       s.tmpWorkdir, err = ioutil.TempDir("", "arv-git-httpd")
+       c.Assert(err, check.Equals, nil)
+       _, err = exec.Command("git", "init", "--bare", s.tmpRepoRoot+"/zzzzz-s0uqq-382brsig8rp3666.git").Output()
+       c.Assert(err, check.Equals, nil)
+       _, err = exec.Command("git", "init", s.tmpWorkdir).Output()
+       c.Assert(err, check.Equals, nil)
+       _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && echo initial >initial && git add initial && git -c user.name=Initial -c user.email=Initial commit -am 'foo: initial commit'").CombinedOutput()
+       c.Assert(err, check.Equals, nil)
+       _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && git push "+s.tmpRepoRoot+"/zzzzz-s0uqq-382brsig8rp3666.git master:master").CombinedOutput()
+       c.Assert(err, check.Equals, nil)
+       _, err = exec.Command("sh", "-c", "cd "+s.tmpWorkdir+" && echo work >work && git add work && git -c user.name=Foo -c user.email=Foo commit -am 'workdir: test'").CombinedOutput()
+       c.Assert(err, check.Equals, nil)
+
+       _, err = exec.Command("git", "config",
+               "--file", s.tmpWorkdir+"/.git/config",
+               "credential.http://"+s.testServer.Addr+"/.helper",
+               "!cred(){ cat >/dev/null; if [ \"$1\" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred").Output()
+       c.Assert(err, check.Equals, nil)
+       _, err = exec.Command("git", "config",
+               "--file", s.tmpWorkdir+"/.git/config",
+               "credential.http://"+s.testServer.Addr+"/.username",
+               "none").Output()
+       c.Assert(err, check.Equals, nil)
+
+       if s.Config == nil {
+               s.Config = &Config{
+                       Client: arvados.Client{
+                               APIHost:  arvadostest.APIHost(),
+                               Insecure: true,
+                       },
+                       Listen:          ":0",
+                       GitCommand:      "/usr/bin/git",
+                       RepoRoot:        s.tmpRepoRoot,
+                       ManagementToken: arvadostest.ManagementToken,
+               }
+       }
+
+       // Clear ARVADOS_API_* env vars before starting up the server,
+       // to make sure arv-git-httpd doesn't use them or complain
+       // about them being missing.
+       os.Unsetenv("ARVADOS_API_HOST")
+       os.Unsetenv("ARVADOS_API_HOST_INSECURE")
+       os.Unsetenv("ARVADOS_API_TOKEN")
+
+       theConfig = s.Config
+       err = s.testServer.Start()
+       c.Assert(err, check.Equals, nil)
+}
+
+func (s *IntegrationSuite) TearDownTest(c *check.C) {
+       var err error
+       if s.testServer != nil {
+               err = s.testServer.Close()
+       }
+       c.Check(err, check.Equals, nil)
+       s.testServer = nil
+
+       if s.tmpRepoRoot != "" {
+               err = os.RemoveAll(s.tmpRepoRoot)
+               c.Check(err, check.Equals, nil)
+       }
+       s.tmpRepoRoot = ""
+
+       if s.tmpWorkdir != "" {
+               err = os.RemoveAll(s.tmpWorkdir)
+               c.Check(err, check.Equals, nil)
+       }
+       s.tmpWorkdir = ""
+
+       s.Config = nil
+
+       theConfig = defaultConfig()
+}
+
+func (s *IntegrationSuite) RunGit(c *check.C, token, gitCmd, repo string, args ...string) error {
+       cwd, err := os.Getwd()
+       c.Assert(err, check.Equals, nil)
+       defer os.Chdir(cwd)
+       os.Chdir(s.tmpWorkdir)
+
+       gitargs := append([]string{
+               gitCmd, "http://" + s.testServer.Addr + "/" + repo,
+       }, args...)
+       cmd := exec.Command("git", gitargs...)
+       cmd.Env = append(os.Environ(), "ARVADOS_API_TOKEN="+token)
+       w, err := cmd.StdinPipe()
+       c.Assert(err, check.Equals, nil)
+       w.Close()
+       output, err := cmd.CombinedOutput()
+       c.Log("git ", gitargs, " => ", err)
+       c.Log(string(output))
+       if err != nil && len(output) > 0 {
+               // If messages appeared on stderr, they are more
+               // helpful than the err returned by CombinedOutput().
+               //
+               // Easier to match error strings without newlines:
+               err = errors.New(strings.Replace(string(output), "\n", " // ", -1))
+       }
+       return err
+}
diff --git a/services/arv-git-httpd/main.go b/services/arv-git-httpd/main.go
new file mode 100644 (file)
index 0000000..74ac7ae
--- /dev/null
@@ -0,0 +1,103 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "log"
+       "os"
+       "regexp"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/coreos/go-systemd/daemon"
+)
+
+var version = "dev"
+
+// Server configuration
+type Config struct {
+       Client          arvados.Client
+       Listen          string
+       GitCommand      string
+       RepoRoot        string
+       GitoliteHome    string
+       ManagementToken string
+}
+
+var theConfig = defaultConfig()
+
+func defaultConfig() *Config {
+       return &Config{
+               Listen:     ":80",
+               GitCommand: "/usr/bin/git",
+               RepoRoot:   "/var/lib/arvados/git/repositories",
+       }
+}
+
+func main() {
+       const defaultCfgPath = "/etc/arvados/git-httpd/git-httpd.yml"
+       const deprecated = " (DEPRECATED -- use config file instead)"
+       flag.StringVar(&theConfig.Listen, "address", theConfig.Listen,
+               "Address to listen on, \"host:port\" or \":port\"."+deprecated)
+       flag.StringVar(&theConfig.GitCommand, "git-command", theConfig.GitCommand,
+               "Path to git or gitolite-shell executable. Each authenticated request will execute this program with a single argument, \"http-backend\"."+deprecated)
+       flag.StringVar(&theConfig.RepoRoot, "repo-root", theConfig.RepoRoot,
+               "Path to git repositories."+deprecated)
+       flag.StringVar(&theConfig.GitoliteHome, "gitolite-home", theConfig.GitoliteHome,
+               "Value for GITOLITE_HTTP_HOME environment variable. If not empty, GL_BYPASS_ACCESS_CHECKS=1 will also be set."+deprecated)
+
+       cfgPath := flag.String("config", defaultCfgPath, "Configuration file `path`.")
+       dumpConfig := flag.Bool("dump-config", false, "write current configuration to stdout and exit (useful for migrating from command line flags to config file)")
+       getVersion := flag.Bool("version", false, "print version information and exit.")
+
+       flag.StringVar(&theConfig.ManagementToken, "management-token", theConfig.ManagementToken,
+               "Authorization token to be included in all health check requests.")
+
+       flag.Usage = usage
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("arv-git-httpd %s\n", version)
+               return
+       }
+
+       err := config.LoadFile(theConfig, *cfgPath)
+       if err != nil {
+               h := os.Getenv("ARVADOS_API_HOST")
+               if h == "" || !os.IsNotExist(err) || *cfgPath != defaultCfgPath {
+                       log.Fatal(err)
+               }
+               log.Print("DEPRECATED: No config file found, but ARVADOS_API_HOST environment variable is set. Please use a config file instead.")
+               theConfig.Client.APIHost = h
+               if regexp.MustCompile("^(?i:1|yes|true)$").MatchString(os.Getenv("ARVADOS_API_HOST_INSECURE")) {
+                       theConfig.Client.Insecure = true
+               }
+               if j, err := json.MarshalIndent(theConfig, "", "    "); err == nil {
+                       log.Print("Current configuration:\n", string(j))
+               }
+       }
+
+       if *dumpConfig {
+               log.Fatal(config.DumpAndExit(theConfig))
+       }
+
+       srv := &server{}
+       if err := srv.Start(); err != nil {
+               log.Fatal(err)
+       }
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
+       log.Printf("arv-git-httpd %s started", version)
+       log.Println("Listening at", srv.Addr)
+       log.Println("Repository root", theConfig.RepoRoot)
+       if err := srv.Wait(); err != nil {
+               log.Fatal(err)
+       }
+}
diff --git a/services/arv-git-httpd/server.go b/services/arv-git-httpd/server.go
new file mode 100644 (file)
index 0000000..8f0d90f
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net/http"
+
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+)
+
+type server struct {
+       httpserver.Server
+}
+
+func (srv *server) Start() error {
+       mux := http.NewServeMux()
+       mux.Handle("/", &authHandler{handler: newGitHandler()})
+       mux.Handle("/_health/", &health.Handler{
+               Token:  theConfig.ManagementToken,
+               Prefix: "/_health/",
+       })
+       srv.Handler = mux
+       srv.Addr = theConfig.Listen
+       return srv.Server.Start()
+}
diff --git a/services/arv-git-httpd/server_test.go b/services/arv-git-httpd/server_test.go
new file mode 100644 (file)
index 0000000..77049c3
--- /dev/null
@@ -0,0 +1,123 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "os/exec"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&GitSuite{})
+
+const (
+       spectatorToken = "zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu"
+       activeToken    = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
+       anonymousToken = "4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi"
+       expiredToken   = "2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx"
+)
+
+type GitSuite struct {
+       IntegrationSuite
+}
+
+func (s *GitSuite) TestPathVariants(c *check.C) {
+       s.makeArvadosRepo(c)
+       for _, repo := range []string{"active/foo.git", "active/foo/.git", "arvados.git", "arvados/.git"} {
+               err := s.RunGit(c, spectatorToken, "fetch", repo)
+               c.Assert(err, check.Equals, nil)
+       }
+}
+
+func (s *GitSuite) TestReadonly(c *check.C) {
+       err := s.RunGit(c, spectatorToken, "fetch", "active/foo.git")
+       c.Assert(err, check.Equals, nil)
+       err = s.RunGit(c, spectatorToken, "push", "active/foo.git", "master:newbranchfail")
+       c.Assert(err, check.ErrorMatches, `.*HTTP (code = )?403.*`)
+       _, err = os.Stat(s.tmpRepoRoot + "/zzzzz-s0uqq-382brsig8rp3666.git/refs/heads/newbranchfail")
+       c.Assert(err, check.FitsTypeOf, &os.PathError{})
+}
+
+func (s *GitSuite) TestReadwrite(c *check.C) {
+       err := s.RunGit(c, activeToken, "fetch", "active/foo.git")
+       c.Assert(err, check.Equals, nil)
+       err = s.RunGit(c, activeToken, "push", "active/foo.git", "master:newbranch")
+       c.Assert(err, check.Equals, nil)
+       _, err = os.Stat(s.tmpRepoRoot + "/zzzzz-s0uqq-382brsig8rp3666.git/refs/heads/newbranch")
+       c.Assert(err, check.Equals, nil)
+}
+
+func (s *GitSuite) TestNonexistent(c *check.C) {
+       err := s.RunGit(c, spectatorToken, "fetch", "thisrepodoesnotexist.git")
+       c.Assert(err, check.ErrorMatches, `.* not found.*`)
+}
+
+func (s *GitSuite) TestMissingGitdirReadableRepository(c *check.C) {
+       err := s.RunGit(c, activeToken, "fetch", "active/foo2.git")
+       c.Assert(err, check.ErrorMatches, `.* not found.*`)
+}
+
+func (s *GitSuite) TestNoPermission(c *check.C) {
+       for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
+               err := s.RunGit(c, anonymousToken, "fetch", repo)
+               c.Assert(err, check.ErrorMatches, `.* not found.*`)
+       }
+}
+
+func (s *GitSuite) TestExpiredToken(c *check.C) {
+       for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
+               err := s.RunGit(c, expiredToken, "fetch", repo)
+               c.Assert(err, check.ErrorMatches, `.* (500 while accessing|requested URL returned error: 500).*`)
+       }
+}
+
+func (s *GitSuite) TestInvalidToken(c *check.C) {
+       for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
+               err := s.RunGit(c, "s3cr3tp@ssw0rd", "fetch", repo)
+               c.Assert(err, check.ErrorMatches, `.* requested URL returned error.*`)
+       }
+}
+
+func (s *GitSuite) TestShortToken(c *check.C) {
+       for _, repo := range []string{"active/foo.git", "active/foo/.git"} {
+               err := s.RunGit(c, "s3cr3t", "fetch", repo)
+               c.Assert(err, check.ErrorMatches, `.* (500 while accessing|requested URL returned error: 500).*`)
+       }
+}
+
+func (s *GitSuite) TestShortTokenBadReq(c *check.C) {
+       for _, repo := range []string{"bogus"} {
+               err := s.RunGit(c, "s3cr3t", "fetch", repo)
+               c.Assert(err, check.ErrorMatches, `.*not found.*`)
+       }
+}
+
+// Make a bare arvados repo at {tmpRepoRoot}/arvados.git
+func (s *GitSuite) makeArvadosRepo(c *check.C) {
+       msg, err := exec.Command("git", "init", "--bare", s.tmpRepoRoot+"/zzzzz-s0uqq-arvadosrepo0123.git").CombinedOutput()
+       c.Log(string(msg))
+       c.Assert(err, check.Equals, nil)
+       msg, err = exec.Command("git", "--git-dir", s.tmpRepoRoot+"/zzzzz-s0uqq-arvadosrepo0123.git", "fetch", "../../.git", "HEAD:master").CombinedOutput()
+       c.Log(string(msg))
+       c.Assert(err, check.Equals, nil)
+}
+
+func (s *GitSuite) TestHealthCheckPing(c *check.C) {
+       req, err := http.NewRequest("GET",
+               "http://"+s.testServer.Addr+"/_health/ping",
+               nil)
+       c.Assert(err, check.Equals, nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+
+       resp := httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, 200)
+       c.Check(resp.Body.String(), check.Matches, `{"health":"OK"}\n`)
+}
diff --git a/services/arv-git-httpd/usage.go b/services/arv-git-httpd/usage.go
new file mode 100644 (file)
index 0000000..8863da6
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// arvados-git-httpd provides authenticated access to Arvados-hosted
+// git repositories.
+//
+// See http://doc.arvados.org/install/install-arv-git-httpd.html.
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+
+       "github.com/ghodss/yaml"
+)
+
+func usage() {
+       c := defaultConfig()
+       c.Client.APIHost = "zzzzz.arvadosapi.com:443"
+       exampleConfigFile, err := yaml.Marshal(c)
+       if err != nil {
+               panic(err)
+       }
+       fmt.Fprintf(os.Stderr, `
+
+arvados-git-httpd provides authenticated access to Arvados-hosted git
+repositories.
+
+See http://doc.arvados.org/install/install-arv-git-httpd.html.
+
+Usage: arvados-git-httpd [-config path/to/arvados/git-httpd.yml]
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+
+%s
+
+Client.APIHost:
+
+    Address (or address:port) of the Arvados API endpoint.
+
+Client.AuthToken:
+
+    Unused. Normally empty, or omitted entirely.
+
+Client.Insecure:
+
+    True if your Arvados API endpoint uses an unverifiable SSL/TLS
+    certificate.
+
+GitCommand:
+
+    Path to git or gitolite-shell executable. Each authenticated
+    request will execute this program with the single argument
+    "http-backend".
+
+GitoliteHome:
+
+    Path to Gitolite's home directory. If a non-empty path is given,
+    the CGI environment will be set up to support the use of
+    gitolite-shell as a GitCommand: for example, if GitoliteHome is
+    "/gh", then the CGI environment will have GITOLITE_HTTP_HOME=/gh,
+    PATH=$PATH:/gh/bin, and GL_BYPASS_ACCESS_CHECKS=1.
+
+Listen:
+
+    Local port to listen on. Can be "address:port" or ":port", where
+    "address" is a host IP address or name and "port" is a port number
+    or name.
+
+RepoRoot:
+
+    Path to git repositories.
+
+`, exampleConfigFile)
+}
diff --git a/services/arv-web/README b/services/arv-web/README
new file mode 100644 (file)
index 0000000..eaf7624
--- /dev/null
@@ -0,0 +1,6 @@
+arv-web enables you to run a custom web service using the contents of an
+Arvados collection.
+
+See "Using arv-web" in the Arvados user guide:
+
+http://doc.arvados.org/user/topics/arv-web.html
diff --git a/services/arv-web/arv-web.py b/services/arv-web/arv-web.py
new file mode 100755 (executable)
index 0000000..55b710a
--- /dev/null
@@ -0,0 +1,256 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# arv-web enables you to run a custom web service from the contents of an Arvados collection.
+#
+# See http://doc.arvados.org/user/topics/arv-web.html
+
+import arvados
+from arvados.safeapi import ThreadSafeApiCache
+import subprocess
+from arvados_fuse import Operations, CollectionDirectory
+import tempfile
+import os
+import llfuse
+import threading
+import Queue
+import argparse
+import logging
+import signal
+import sys
+import functools
+
+logger = logging.getLogger('arvados.arv-web')
+logger.setLevel(logging.INFO)
+
+class ArvWeb(object):
+    def __init__(self, project, docker_image, port):
+        self.project = project
+        self.loop = True
+        self.cid = None
+        self.prev_docker_image = None
+        self.mountdir = None
+        self.collection = None
+        self.override_docker_image = docker_image
+        self.port = port
+        self.evqueue = Queue.Queue()
+        self.api = ThreadSafeApiCache(arvados.config.settings())
+
+        if arvados.util.group_uuid_pattern.match(project) is None:
+            raise arvados.errors.ArgumentError("Project uuid is not valid")
+
+        collections = self.api.collections().list(filters=[["owner_uuid", "=", project]],
+                        limit=1,
+                        order='modified_at desc').execute()['items']
+        self.newcollection = collections[0]['uuid'] if collections else None
+
+        self.ws = arvados.events.subscribe(self.api, [["object_uuid", "is_a", "arvados#collection"]], self.on_message)
+
+    def check_docker_running(self):
+        # It would be less hacky to use "docker events" than poll "docker ps"
+        # but that would require writing a bigger pile of code.
+        if self.cid:
+            ps = subprocess.check_output(["docker", "ps", "--no-trunc=true", "--filter=status=running"])
+            for l in ps.splitlines():
+                if l.startswith(self.cid):
+                    return True
+        return False
+
+    # Handle messages from Arvados event bus.
+    def on_message(self, ev):
+        if 'event_type' in ev:
+            old_attr = None
+            if 'old_attributes' in ev['properties'] and ev['properties']['old_attributes']:
+                old_attr = ev['properties']['old_attributes']
+            if self.project not in (ev['properties']['new_attributes']['owner_uuid'],
+                                    old_attr['owner_uuid'] if old_attr else None):
+                return
+
+            et = ev['event_type']
+            if ev['event_type'] == 'update':
+                if ev['properties']['new_attributes']['owner_uuid'] != ev['properties']['old_attributes']['owner_uuid']:
+                    if self.project == ev['properties']['new_attributes']['owner_uuid']:
+                        et = 'add'
+                    else:
+                        et = 'remove'
+                if ev['properties']['new_attributes']['trash_at'] is not None:
+                    et = 'remove'
+
+            self.evqueue.put((self.project, et, ev['object_uuid']))
+
+    # Run an arvados_fuse mount under the control of the local process.  This lets
+    # us switch out the contents of the directory without having to unmount and
+    # remount.
+    def run_fuse_mount(self):
+        self.mountdir = tempfile.mkdtemp()
+
+        self.operations = Operations(os.getuid(), os.getgid(), self.api, "utf-8")
+        self.cdir = CollectionDirectory(llfuse.ROOT_INODE, self.operations.inodes, self.api, 2, self.collection)
+        self.operations.inodes.add_entry(self.cdir)
+
+        # Initialize the fuse connection
+        llfuse.init(self.operations, self.mountdir, ['allow_other'])
+
+        t = threading.Thread(None, llfuse.main)
+        t.start()
+
+        # wait until the driver is finished initializing
+        self.operations.initlock.wait()
+
+    def mount_collection(self):
+        if self.newcollection != self.collection:
+            self.collection = self.newcollection
+            if not self.mountdir and self.collection:
+                self.run_fuse_mount()
+
+            if self.mountdir:
+                with llfuse.lock:
+                    self.cdir.clear()
+                    # Switch the FUSE directory object so that it stores
+                    # the newly selected collection
+                    if self.collection:
+                        logger.info("Mounting %s", self.collection)
+                    else:
+                        logger.info("Mount is empty")
+                    self.cdir.change_collection(self.collection)
+
+
+    def stop_docker(self):
+        if self.cid:
+            logger.info("Stopping Docker container")
+            subprocess.call(["docker", "stop", self.cid])
+            self.cid = None
+
+    def run_docker(self):
+        try:
+            if self.collection is None:
+                self.stop_docker()
+                return
+
+            docker_image = None
+            if self.override_docker_image:
+                docker_image = self.override_docker_image
+            else:
+                try:
+                    with llfuse.lock:
+                        if "docker_image" in self.cdir:
+                            docker_image = self.cdir["docker_image"].readfrom(0, 1024).strip()
+                except IOError as e:
+                    pass
+
+            has_reload = False
+            try:
+                with llfuse.lock:
+                    has_reload = "reload" in self.cdir
+            except IOError as e:
+                pass
+
+            if docker_image is None:
+                logger.error("Collection must contain a file 'docker_image' or must specify --image on the command line.")
+                self.stop_docker()
+                return
+
+            if docker_image == self.prev_docker_image and self.cid is not None and has_reload:
+                logger.info("Running container reload command")
+                subprocess.check_call(["docker", "exec", self.cid, "/mnt/reload"])
+                return
+
+            self.stop_docker()
+
+            logger.info("Starting Docker container %s", docker_image)
+            self.cid = subprocess.check_output(["docker", "run",
+                                                "--detach=true",
+                                                "--publish=%i:80" % (self.port),
+                                                "--volume=%s:/mnt:ro" % self.mountdir,
+                                                docker_image]).strip()
+
+            self.prev_docker_image = docker_image
+            logger.info("Container id %s", self.cid)
+
+        except subprocess.CalledProcessError:
+            self.cid = None
+
+    def wait_for_events(self):
+        if not self.cid:
+            logger.warning("No service running!  Will wait for a new collection to appear in the project.")
+        else:
+            logger.info("Waiting for events")
+
+        running = True
+        self.loop = True
+        while running:
+            # Main run loop.  Wait on project events, signals, or the
+            # Docker container stopping.
+
+            try:
+                # Poll the queue with a 1 second timeout, if we have no
+                # timeout the Python runtime doesn't have a chance to
+                # process SIGINT or SIGTERM.
+                eq = self.evqueue.get(True, 1)
+                logger.info("%s %s", eq[1], eq[2])
+                self.newcollection = self.collection
+                if eq[1] in ('add', 'update', 'create'):
+                    self.newcollection = eq[2]
+                elif eq[1] == 'remove':
+                    collections = self.api.collections().list(filters=[["owner_uuid", "=", self.project]],
+                                                        limit=1,
+                                                        order='modified_at desc').execute()['items']
+                    self.newcollection = collections[0]['uuid'] if collections else None
+                running = False
+            except Queue.Empty:
+                pass
+
+            if self.cid and not self.check_docker_running():
+                logger.warning("Service has terminated.  Will try to restart.")
+                self.cid = None
+                running = False
+
+
+    def run(self):
+        try:
+            while self.loop:
+                self.loop = False
+                self.mount_collection()
+                try:
+                    self.run_docker()
+                    self.wait_for_events()
+                except (KeyboardInterrupt):
+                    logger.info("Got keyboard interrupt")
+                    self.ws.close()
+                    self.loop = False
+                except Exception as e:
+                    logger.exception("Caught fatal exception, shutting down")
+                    self.ws.close()
+                    self.loop = False
+        finally:
+            self.stop_docker()
+
+            if self.mountdir:
+                logger.info("Unmounting")
+                subprocess.call(["fusermount", "-u", self.mountdir])
+                os.rmdir(self.mountdir)
+
+
+def main(argv):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--project-uuid', type=str, required=True, help="Project uuid to watch")
+    parser.add_argument('--port', type=int, default=8080, help="Host port to listen on (default 8080)")
+    parser.add_argument('--image', type=str, help="Docker image to run")
+
+    args = parser.parse_args(argv)
+
+    signal.signal(signal.SIGTERM, lambda signal, frame: sys.exit(0))
+
+    try:
+        arvweb = ArvWeb(args.project_uuid, args.image, args.port)
+        arvweb.run()
+    except arvados.errors.ArgumentError as e:
+        logger.error(e)
+        return 1
+
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/services/arv-web/sample-cgi-app/docker_image b/services/arv-web/sample-cgi-app/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-cgi-app/public/.htaccess b/services/arv-web/sample-cgi-app/public/.htaccess
new file mode 100644 (file)
index 0000000..e5145bd
--- /dev/null
@@ -0,0 +1,3 @@
+Options +ExecCGI
+AddHandler cgi-script .cgi
+DirectoryIndex index.cgi
diff --git a/services/arv-web/sample-cgi-app/public/index.cgi b/services/arv-web/sample-cgi-app/public/index.cgi
new file mode 100755 (executable)
index 0000000..57bc2a9
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/perl
+
+print "Content-type: text/html\n\n";
+print "Hello world from perl!";
diff --git a/services/arv-web/sample-cgi-app/tmp/.keepkeep b/services/arv-web/sample-cgi-app/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-rack-app/config.ru b/services/arv-web/sample-rack-app/config.ru
new file mode 100644 (file)
index 0000000..65f3c7c
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+app = proc do |env|
+    [200, { "Content-Type" => "text/html" }, ["hello <b>world</b> from ruby"]]
+end
+run app
diff --git a/services/arv-web/sample-rack-app/docker_image b/services/arv-web/sample-rack-app/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-rack-app/public/.keepkeep b/services/arv-web/sample-rack-app/public/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-rack-app/tmp/.keepkeep b/services/arv-web/sample-rack-app/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-static-page/docker_image b/services/arv-web/sample-static-page/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-static-page/public/index.html b/services/arv-web/sample-static-page/public/index.html
new file mode 100644 (file)
index 0000000..e8608a5
--- /dev/null
@@ -0,0 +1,10 @@
+<!-- Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 -->
+
+<html>
+  <head><title>arv-web sample</title></head>
+  <body>
+    <p>Hello world static page</p>
+  </body>
+</html>
diff --git a/services/arv-web/sample-static-page/tmp/.keepkeep b/services/arv-web/sample-static-page/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-wsgi-app/docker_image b/services/arv-web/sample-wsgi-app/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-wsgi-app/passenger_wsgi.py b/services/arv-web/sample-wsgi-app/passenger_wsgi.py
new file mode 100644 (file)
index 0000000..faec3c2
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+def application(environ, start_response):
+    start_response('200 OK', [('Content-Type', 'text/plain')])
+    return [b"hello world from python!\n"]
diff --git a/services/arv-web/sample-wsgi-app/public/.keepkeep b/services/arv-web/sample-wsgi-app/public/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-wsgi-app/tmp/.keepkeep b/services/arv-web/sample-wsgi-app/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/crunch-dispatch-local/.gitignore b/services/crunch-dispatch-local/.gitignore
new file mode 100644 (file)
index 0000000..7c1070a
--- /dev/null
@@ -0,0 +1 @@
+crunch-dispatch-local
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local.go b/services/crunch-dispatch-local/crunch-dispatch-local.go
new file mode 100644 (file)
index 0000000..dcd54e8
--- /dev/null
@@ -0,0 +1,216 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+// Dispatcher service for Crunch that runs containers locally.
+
+import (
+       "context"
+       "flag"
+       "fmt"
+       "os"
+       "os/exec"
+       "os/signal"
+       "sync"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/sirupsen/logrus"
+)
+
+var version = "dev"
+
+func main() {
+       err := doMain()
+       if err != nil {
+               logrus.Fatalf("%q", err)
+       }
+}
+
+var (
+       runningCmds      map[string]*exec.Cmd
+       runningCmdsMutex sync.Mutex
+       waitGroup        sync.WaitGroup
+       crunchRunCommand *string
+)
+
+func doMain() error {
+       logger := logrus.StandardLogger()
+       if os.Getenv("DEBUG") != "" {
+               logger.SetLevel(logrus.DebugLevel)
+       }
+       logger.Formatter = &logrus.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       }
+
+       flags := flag.NewFlagSet("crunch-dispatch-local", flag.ExitOnError)
+
+       pollInterval := flags.Int(
+               "poll-interval",
+               10,
+               "Interval in seconds to poll for queued containers")
+
+       crunchRunCommand = flags.String(
+               "crunch-run-command",
+               "/usr/bin/crunch-run",
+               "Crunch command to run container")
+
+       getVersion := flags.Bool(
+               "version",
+               false,
+               "Print version information and exit.")
+
+       // Parse args; omit the first arg which is the command name
+       flags.Parse(os.Args[1:])
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("crunch-dispatch-local %s\n", version)
+               return nil
+       }
+
+       logger.Printf("crunch-dispatch-local %s started", version)
+
+       runningCmds = make(map[string]*exec.Cmd)
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               logger.Errorf("error making Arvados client: %v", err)
+               return err
+       }
+       arv.Retries = 25
+
+       dispatcher := dispatch.Dispatcher{
+               Logger:       logger,
+               Arv:          arv,
+               RunContainer: run,
+               PollPeriod:   time.Duration(*pollInterval) * time.Second,
+       }
+
+       ctx, cancel := context.WithCancel(context.Background())
+       err = dispatcher.Run(ctx)
+       if err != nil {
+               return err
+       }
+
+       c := make(chan os.Signal, 1)
+       signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
+       sig := <-c
+       logger.Printf("Received %s, shutting down", sig)
+       signal.Stop(c)
+
+       cancel()
+
+       runningCmdsMutex.Lock()
+       // Finished dispatching; interrupt any crunch jobs that are still running
+       for _, cmd := range runningCmds {
+               cmd.Process.Signal(os.Interrupt)
+       }
+       runningCmdsMutex.Unlock()
+
+       // Wait for all running crunch jobs to complete / terminate
+       waitGroup.Wait()
+
+       return nil
+}
+
+func startFunc(container arvados.Container, cmd *exec.Cmd) error {
+       return cmd.Start()
+}
+
+var startCmd = startFunc
+
+// Run a container.
+//
+// If the container is Locked, start a new crunch-run process and wait until
+// crunch-run completes.  If the priority is set to zero, set an interrupt
+// signal to the crunch-run process.
+//
+// If the container is in any other state, or is not Complete/Cancelled after
+// crunch-run terminates, mark the container as Cancelled.
+func run(dispatcher *dispatch.Dispatcher,
+       container arvados.Container,
+       status <-chan arvados.Container) {
+
+       uuid := container.UUID
+
+       if container.State == dispatch.Locked {
+               waitGroup.Add(1)
+
+               cmd := exec.Command(*crunchRunCommand, uuid)
+               cmd.Stdin = nil
+               cmd.Stderr = os.Stderr
+               cmd.Stdout = os.Stderr
+
+               dispatcher.Logger.Printf("starting container %v", uuid)
+
+               // Add this crunch job to the list of runningCmds only if we
+               // succeed in starting crunch-run.
+
+               runningCmdsMutex.Lock()
+               if err := startCmd(container, cmd); err != nil {
+                       runningCmdsMutex.Unlock()
+                       dispatcher.Logger.Warnf("error starting %q for %s: %s", *crunchRunCommand, uuid, err)
+                       dispatcher.UpdateState(uuid, dispatch.Cancelled)
+               } else {
+                       runningCmds[uuid] = cmd
+                       runningCmdsMutex.Unlock()
+
+                       // Need to wait for crunch-run to exit
+                       done := make(chan struct{})
+
+                       go func() {
+                               if _, err := cmd.Process.Wait(); err != nil {
+                                       dispatcher.Logger.Warnf("error while waiting for crunch job to finish for %v: %q", uuid, err)
+                               }
+                               dispatcher.Logger.Debugf("sending done")
+                               done <- struct{}{}
+                       }()
+
+               Loop:
+                       for {
+                               select {
+                               case <-done:
+                                       break Loop
+                               case c := <-status:
+                                       // Interrupt the child process if priority changes to 0
+                                       if (c.State == dispatch.Locked || c.State == dispatch.Running) && c.Priority == 0 {
+                                               dispatcher.Logger.Printf("sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
+                                               cmd.Process.Signal(os.Interrupt)
+                                       }
+                               }
+                       }
+                       close(done)
+
+                       dispatcher.Logger.Printf("finished container run for %v", uuid)
+
+                       // Remove the crunch job from runningCmds
+                       runningCmdsMutex.Lock()
+                       delete(runningCmds, uuid)
+                       runningCmdsMutex.Unlock()
+               }
+               waitGroup.Done()
+       }
+
+       // If the container is not finalized, then change it to "Cancelled".
+       err := dispatcher.Arv.Get("containers", uuid, nil, &container)
+       if err != nil {
+               dispatcher.Logger.Warnf("error getting final container state: %v", err)
+       }
+       if container.State == dispatch.Locked || container.State == dispatch.Running {
+               dispatcher.Logger.Warnf("after %q process termination, container state for %v is %q; updating it to %q",
+                       *crunchRunCommand, uuid, container.State, dispatch.Cancelled)
+               dispatcher.UpdateState(uuid, dispatch.Cancelled)
+       }
+
+       // drain any subsequent status changes
+       for range status {
+       }
+
+       dispatcher.Logger.Printf("finalized container %v", uuid)
+}
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local_test.go b/services/crunch-dispatch-local/crunch-dispatch-local_test.go
new file mode 100644 (file)
index 0000000..6bae1f4
--- /dev/null
@@ -0,0 +1,205 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "io"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "os/exec"
+       "regexp"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/sirupsen/logrus"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+var _ = Suite(&TestSuite{})
+var _ = Suite(&MockArvadosServerSuite{})
+
+type TestSuite struct{}
+type MockArvadosServerSuite struct{}
+
+var initialArgs []string
+
+func (s *TestSuite) SetUpSuite(c *C) {
+       initialArgs = os.Args
+       arvadostest.StartAPI()
+       runningCmds = make(map[string]*exec.Cmd)
+       logrus.SetFormatter(&logrus.TextFormatter{DisableColors: true})
+}
+
+func (s *TestSuite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+}
+
+func (s *TestSuite) SetUpTest(c *C) {
+       args := []string{"crunch-dispatch-local"}
+       os.Args = args
+}
+
+func (s *TestSuite) TearDownTest(c *C) {
+       arvadostest.ResetEnv()
+       os.Args = initialArgs
+}
+
+func (s *MockArvadosServerSuite) TearDownTest(c *C) {
+       arvadostest.ResetEnv()
+}
+
+func (s *TestSuite) TestIntegration(c *C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, IsNil)
+
+       echo := "echo"
+       crunchRunCommand = &echo
+
+       ctx, cancel := context.WithCancel(context.Background())
+       dispatcher := dispatch.Dispatcher{
+               Arv:        arv,
+               PollPeriod: time.Second,
+               RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+                       run(d, c, s)
+                       cancel()
+               },
+       }
+
+       startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+               dispatcher.UpdateState(container.UUID, "Running")
+               dispatcher.UpdateState(container.UUID, "Complete")
+               return cmd.Start()
+       }
+
+       err = dispatcher.Run(ctx)
+       c.Assert(err, Equals, context.Canceled)
+
+       // Wait for all running crunch jobs to complete / terminate
+       waitGroup.Wait()
+
+       // There should be no queued containers now
+       params := arvadosclient.Dict{
+               "filters": [][]string{{"state", "=", "Queued"}},
+       }
+       var containers arvados.ContainerList
+       err = arv.List("containers", params, &containers)
+       c.Check(err, IsNil)
+       c.Assert(len(containers.Items), Equals, 0)
+
+       // Previously "Queued" container should now be in "Complete" state
+       var container arvados.Container
+       err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
+       c.Check(err, IsNil)
+       c.Check(string(container.State), Equals, "Complete")
+}
+
+func (s *MockArvadosServerSuite) Test_APIErrorGettingContainers(c *C) {
+       apiStubResponses := make(map[string]arvadostest.StubResponse)
+       apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
+
+       testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
+}
+
+func (s *MockArvadosServerSuite) Test_APIErrorUpdatingContainerState(c *C) {
+       apiStubResponses := make(map[string]arvadostest.StubResponse)
+       apiStubResponses["/arvados/v1/containers"] =
+               arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx1","State":"Queued","Priority":1}]}`)}
+       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx1"] =
+               arvadostest.StubResponse{500, string(`{}`)}
+
+       testWithServerStub(c, apiStubResponses, "echo", "error locking container zzzzz-dz642-xxxxxxxxxxxxxx1")
+}
+
+func (s *MockArvadosServerSuite) Test_ContainerStillInRunningAfterRun(c *C) {
+       apiStubResponses := make(map[string]arvadostest.StubResponse)
+       apiStubResponses["/arvados/v1/containers"] =
+               arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2","State":"Queued","Priority":1}]}`)}
+       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx2/lock"] =
+               arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Locked", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
+       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx2"] =
+               arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Running", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
+
+       testWithServerStub(c, apiStubResponses, "echo",
+               `after \\"echo\\" process termination, container state for zzzzz-dz642-xxxxxxxxxxxxxx2 is \\"Running\\"; updating it to \\"Cancelled\\"`)
+}
+
+func (s *MockArvadosServerSuite) Test_ErrorRunningContainer(c *C) {
+       apiStubResponses := make(map[string]arvadostest.StubResponse)
+       apiStubResponses["/arvados/v1/containers"] =
+               arvadostest.StubResponse{200, string(`{"items_available":1, "items":[{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3","State":"Queued","Priority":1}]}`)}
+
+       apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx3/lock"] =
+               arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3", "state":"Locked", "priority":1}`)}
+
+       testWithServerStub(c, apiStubResponses, "nosuchcommand", `error starting \\"nosuchcommand\\" for zzzzz-dz642-xxxxxxxxxxxxxx3`)
+}
+
+func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
+       apiStubResponses["/arvados/v1/api_client_authorizations/current"] =
+               arvadostest.StubResponse{200, string(`{"uuid": "` + arvadostest.Dispatch1AuthUUID + `", "api_token": "xyz"}`)}
+
+       apiStub := arvadostest.ServerStub{apiStubResponses}
+
+       api := httptest.NewServer(&apiStub)
+       defer api.Close()
+
+       arv := &arvadosclient.ArvadosClient{
+               Scheme:    "http",
+               ApiServer: api.URL[7:],
+               ApiToken:  "abc123",
+               Client:    &http.Client{Transport: &http.Transport{}},
+               Retries:   0,
+       }
+
+       buf := bytes.NewBuffer(nil)
+       logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+       defer logrus.SetOutput(os.Stderr)
+
+       *crunchRunCommand = crunchCmd
+
+       ctx, cancel := context.WithCancel(context.Background())
+       dispatcher := dispatch.Dispatcher{
+               Arv:        arv,
+               PollPeriod: time.Second / 20,
+               RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+                       run(d, c, s)
+                       cancel()
+               },
+       }
+
+       startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+               dispatcher.UpdateState(container.UUID, "Running")
+               dispatcher.UpdateState(container.UUID, "Complete")
+               return cmd.Start()
+       }
+
+       re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
+       go func() {
+               for i := 0; i < 80 && !re.MatchString(buf.String()); i++ {
+                       time.Sleep(100 * time.Millisecond)
+               }
+               cancel()
+       }()
+
+       err := dispatcher.Run(ctx)
+       c.Assert(err, Equals, context.Canceled)
+
+       // Wait for all running crunch jobs to complete / terminate
+       waitGroup.Wait()
+
+       c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
+}
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
new file mode 100644 (file)
index 0000000..889e410
--- /dev/null
@@ -0,0 +1,390 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+// Dispatcher service for Crunch that submits containers to the slurm queue.
+
+import (
+       "bytes"
+       "context"
+       "flag"
+       "fmt"
+       "log"
+       "math"
+       "os"
+       "regexp"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/coreos/go-systemd/daemon"
+       "github.com/sirupsen/logrus"
+)
+
+type logger interface {
+       dispatch.Logger
+       Fatalf(string, ...interface{})
+}
+
+const initialNiceValue int64 = 10000
+
+var (
+       version           = "dev"
+       defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+)
+
+type Dispatcher struct {
+       *dispatch.Dispatcher
+       logger  logrus.FieldLogger
+       cluster *arvados.Cluster
+       sqCheck *SqueueChecker
+       slurm   Slurm
+
+       Client arvados.Client
+
+       SbatchArguments []string
+       PollPeriod      arvados.Duration
+       PrioritySpread  int64
+
+       // crunch-run command to invoke. The container UUID will be
+       // appended. If nil, []string{"crunch-run"} will be used.
+       //
+       // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+       CrunchRunCommand []string
+
+       // Extra RAM to reserve (in Bytes) for SLURM job, in addition
+       // to the amount specified in the container's RuntimeConstraints
+       ReserveExtraRAM int64
+
+       // Minimum time between two attempts to run the same container
+       MinRetryPeriod arvados.Duration
+
+       // Batch size for container queries
+       BatchSize int64
+}
+
+func main() {
+       logger := logrus.StandardLogger()
+       if os.Getenv("DEBUG") != "" {
+               logger.SetLevel(logrus.DebugLevel)
+       }
+       logger.Formatter = &logrus.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       }
+       disp := &Dispatcher{logger: logger}
+       err := disp.Run(os.Args[0], os.Args[1:])
+       if err != nil {
+               logrus.Fatalf("%s", err)
+       }
+}
+
+func (disp *Dispatcher) Run(prog string, args []string) error {
+       if err := disp.configure(prog, args); err != nil {
+               return err
+       }
+       disp.setup()
+       return disp.run()
+}
+
+// configure() loads config files. Tests skip this.
+func (disp *Dispatcher) configure(prog string, args []string) error {
+       flags := flag.NewFlagSet(prog, flag.ExitOnError)
+       flags.Usage = func() { usage(flags) }
+
+       configPath := flags.String(
+               "config",
+               defaultConfigPath,
+               "`path` to JSON or YAML configuration file")
+       dumpConfig := flag.Bool(
+               "dump-config",
+               false,
+               "write current configuration to stdout and exit")
+       getVersion := flags.Bool(
+               "version",
+               false,
+               "Print version information and exit.")
+       // Parse args; omit the first arg which is the command name
+       flags.Parse(args)
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("crunch-dispatch-slurm %s\n", version)
+               return nil
+       }
+
+       disp.logger.Printf("crunch-dispatch-slurm %s started", version)
+
+       err := disp.readConfig(*configPath)
+       if err != nil {
+               return err
+       }
+
+       if disp.CrunchRunCommand == nil {
+               disp.CrunchRunCommand = []string{"crunch-run"}
+       }
+
+       if disp.PollPeriod == 0 {
+               disp.PollPeriod = arvados.Duration(10 * time.Second)
+       }
+
+       if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
+               // Copy real configs into env vars so [a]
+               // MakeArvadosClient() uses them, and [b] they get
+               // propagated to crunch-run via SLURM.
+               os.Setenv("ARVADOS_API_HOST", disp.Client.APIHost)
+               os.Setenv("ARVADOS_API_TOKEN", disp.Client.AuthToken)
+               os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+               if disp.Client.Insecure {
+                       os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
+               }
+               os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(disp.Client.KeepServiceURIs, " "))
+               os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+       } else {
+               disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
+       }
+
+       if *dumpConfig {
+               return config.DumpAndExit(disp)
+       }
+
+       siteConfig, err := arvados.GetConfig(arvados.DefaultConfigFile)
+       if os.IsNotExist(err) {
+               disp.logger.Warnf("no cluster config (%s), proceeding with no node types defined", err)
+       } else if err != nil {
+               return fmt.Errorf("error loading config: %s", err)
+       } else if disp.cluster, err = siteConfig.GetCluster(""); err != nil {
+               return fmt.Errorf("config error: %s", err)
+       }
+
+       return nil
+}
+
+// setup() initializes private fields after configure().
+func (disp *Dispatcher) setup() {
+       if disp.logger == nil {
+               disp.logger = logrus.StandardLogger()
+       }
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               disp.logger.Fatalf("Error making Arvados client: %v", err)
+       }
+       arv.Retries = 25
+
+       disp.slurm = NewSlurmCLI()
+       disp.sqCheck = &SqueueChecker{
+               Logger:         disp.logger,
+               Period:         time.Duration(disp.PollPeriod),
+               PrioritySpread: disp.PrioritySpread,
+               Slurm:          disp.slurm,
+       }
+       disp.Dispatcher = &dispatch.Dispatcher{
+               Arv:            arv,
+               Logger:         disp.logger,
+               BatchSize:      disp.BatchSize,
+               RunContainer:   disp.runContainer,
+               PollPeriod:     time.Duration(disp.PollPeriod),
+               MinRetryPeriod: time.Duration(disp.MinRetryPeriod),
+       }
+}
+
+func (disp *Dispatcher) run() error {
+       defer disp.sqCheck.Stop()
+
+       if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
+               go SlurmNodeTypeFeatureKludge(disp.cluster)
+       }
+
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
+       go disp.checkSqueueForOrphans()
+       return disp.Dispatcher.Run(context.Background())
+}
+
+var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
+
+// Check the next squeue report, and invoke TrackContainer for all the
+// containers in the report. This gives us a chance to cancel slurm
+// jobs started by a previous dispatch process that never released
+// their slurm allocations even though their container states are
+// Cancelled or Complete. See https://dev.arvados.org/issues/10979
+func (disp *Dispatcher) checkSqueueForOrphans() {
+       for _, uuid := range disp.sqCheck.All() {
+               if !containerUuidPattern.MatchString(uuid) {
+                       continue
+               }
+               err := disp.TrackContainer(uuid)
+               if err != nil {
+                       log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
+               }
+       }
+}
+
+func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
+       mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM+disp.ReserveExtraRAM) / float64(1048576)))
+
+       disk := dispatchcloud.EstimateScratchSpace(&container)
+       disk = int64(math.Ceil(float64(disk) / float64(1048576)))
+       return []string{
+               fmt.Sprintf("--mem=%d", mem),
+               fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
+               fmt.Sprintf("--tmp=%d", disk),
+       }
+}
+
+func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
+       var args []string
+       args = append(args, disp.SbatchArguments...)
+       args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
+
+       if disp.cluster == nil {
+               // no instance types configured
+               args = append(args, disp.slurmConstraintArgs(container)...)
+       } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
+               // ditto
+               args = append(args, disp.slurmConstraintArgs(container)...)
+       } else if err != nil {
+               return nil, err
+       } else {
+               // use instancetype constraint instead of slurm mem/cpu/tmp specs
+               args = append(args, "--constraint=instancetype="+it.Name)
+       }
+
+       if len(container.SchedulingParameters.Partitions) > 0 {
+               args = append(args, "--partition="+strings.Join(container.SchedulingParameters.Partitions, ","))
+       }
+
+       return args, nil
+}
+
+func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
+       // append() here avoids modifying crunchRunCommand's
+       // underlying array, which is shared with other goroutines.
+       crArgs := append([]string(nil), crunchRunCommand...)
+       crArgs = append(crArgs, container.UUID)
+       crScript := strings.NewReader(execScript(crArgs))
+
+       sbArgs, err := disp.sbatchArgs(container)
+       if err != nil {
+               return err
+       }
+       log.Printf("running sbatch %+q", sbArgs)
+       return disp.slurm.Batch(crScript, sbArgs)
+}
+
+// Submit a container to the slurm queue (or resume monitoring if it's
+// already in the queue).  Cancel the slurm job if the container's
+// priority changes to zero or its state indicates it's no longer
+// running.
+func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+
+       if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
+               log.Printf("Submitting container %s to slurm", ctr.UUID)
+               if err := disp.submit(ctr, disp.CrunchRunCommand); err != nil {
+                       var text string
+                       if err, ok := err.(dispatchcloud.ConstraintsNotSatisfiableError); ok {
+                               var logBuf bytes.Buffer
+                               fmt.Fprintf(&logBuf, "cannot run container %s: %s\n", ctr.UUID, err)
+                               if len(err.AvailableTypes) == 0 {
+                                       fmt.Fprint(&logBuf, "No instance types are configured.\n")
+                               } else {
+                                       fmt.Fprint(&logBuf, "Available instance types:\n")
+                                       for _, t := range err.AvailableTypes {
+                                               fmt.Fprintf(&logBuf,
+                                                       "Type %q: %d VCPUs, %d RAM, %d Scratch, %f Price\n",
+                                                       t.Name, t.VCPUs, t.RAM, t.Scratch, t.Price,
+                                               )
+                                       }
+                               }
+                               text = logBuf.String()
+                               disp.UpdateState(ctr.UUID, dispatch.Cancelled)
+                       } else {
+                               text = fmt.Sprintf("Error submitting container %s to slurm: %s", ctr.UUID, err)
+                       }
+                       log.Print(text)
+
+                       lr := arvadosclient.Dict{"log": arvadosclient.Dict{
+                               "object_uuid": ctr.UUID,
+                               "event_type":  "dispatch",
+                               "properties":  map[string]string{"text": text}}}
+                       disp.Arv.Create("logs", lr, nil)
+
+                       disp.Unlock(ctr.UUID)
+                       return
+               }
+       }
+
+       log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
+       defer log.Printf("Done monitoring container %s", ctr.UUID)
+
+       // If the container disappears from the slurm queue, there is
+       // no point in waiting for further dispatch updates: just
+       // clean up and return.
+       go func(uuid string) {
+               for ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {
+               }
+               cancel()
+       }(ctr.UUID)
+
+       for {
+               select {
+               case <-ctx.Done():
+                       // Disappeared from squeue
+                       if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
+                               log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
+                       }
+                       switch ctr.State {
+                       case dispatch.Running:
+                               disp.UpdateState(ctr.UUID, dispatch.Cancelled)
+                       case dispatch.Locked:
+                               disp.Unlock(ctr.UUID)
+                       }
+                       return
+               case updated, ok := <-status:
+                       if !ok {
+                               log.Printf("container %s is done: cancel slurm job", ctr.UUID)
+                               disp.scancel(ctr)
+                       } else if updated.Priority == 0 {
+                               log.Printf("container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
+                               disp.scancel(ctr)
+                       } else {
+                               p := int64(updated.Priority)
+                               if p <= 1000 {
+                                       // API is providing
+                                       // user-assigned priority. If
+                                       // ctrs have equal priority,
+                                       // run the older one first.
+                                       p = int64(p)<<50 - (updated.CreatedAt.UnixNano() >> 14)
+                               }
+                               disp.sqCheck.SetPriority(ctr.UUID, p)
+                       }
+               }
+       }
+}
+func (disp *Dispatcher) scancel(ctr arvados.Container) {
+       err := disp.slurm.Cancel(ctr.UUID)
+       if err != nil {
+               log.Printf("scancel: %s", err)
+               time.Sleep(time.Second)
+       } else if disp.sqCheck.HasUUID(ctr.UUID) {
+               log.Printf("container %s is still in squeue after scancel", ctr.UUID)
+               time.Sleep(time.Second)
+       }
+}
+
+func (disp *Dispatcher) readConfig(path string) error {
+       err := config.LoadFile(disp, path)
+       if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
+               log.Printf("Config not specified. Continue with default configuration.")
+               err = nil
+       }
+       return err
+}
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.service b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.service
new file mode 100644 (file)
index 0000000..1509d7a
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Crunch Dispatcher for SLURM
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/crunch-dispatch-slurm
+Restart=always
+RestartSec=1
+LimitNOFILE=1000000
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
new file mode 100644 (file)
index 0000000..eea1020
--- /dev/null
@@ -0,0 +1,434 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "os/exec"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/dispatchcloud"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/sirupsen/logrus"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+var _ = Suite(&IntegrationSuite{})
+var _ = Suite(&StubbedSuite{})
+
+type IntegrationSuite struct {
+       disp  Dispatcher
+       slurm slurmFake
+}
+
+func (s *IntegrationSuite) SetUpTest(c *C) {
+       arvadostest.StartAPI()
+       os.Setenv("ARVADOS_API_TOKEN", arvadostest.Dispatch1Token)
+       s.disp = Dispatcher{}
+       s.disp.setup()
+       s.slurm = slurmFake{}
+}
+
+func (s *IntegrationSuite) TearDownTest(c *C) {
+       arvadostest.ResetEnv()
+       arvadostest.StopAPI()
+}
+
+type slurmFake struct {
+       didBatch      [][]string
+       didCancel     []string
+       didRelease    []string
+       didRenice     [][]string
+       queue         string
+       rejectNice10K bool
+       // If non-nil, run this func during the 2nd+ call to Cancel()
+       onCancel func()
+       // Error returned by Batch()
+       errBatch error
+}
+
+func (sf *slurmFake) Batch(script io.Reader, args []string) error {
+       sf.didBatch = append(sf.didBatch, args)
+       return sf.errBatch
+}
+
+func (sf *slurmFake) QueueCommand(args []string) *exec.Cmd {
+       return exec.Command("echo", sf.queue)
+}
+
+func (sf *slurmFake) Release(name string) error {
+       sf.didRelease = append(sf.didRelease, name)
+       return nil
+}
+
+func (sf *slurmFake) Renice(name string, nice int64) error {
+       sf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf("%d", nice)})
+       if sf.rejectNice10K && nice > 10000 {
+               return errors.New("scontrol: error: Invalid nice value, must be between -10000 and 10000")
+       }
+       return nil
+}
+
+func (sf *slurmFake) Cancel(name string) error {
+       sf.didCancel = append(sf.didCancel, name)
+       if len(sf.didCancel) == 1 {
+               // simulate error on first attempt
+               return errors.New("something terrible happened")
+       }
+       if sf.onCancel != nil {
+               sf.onCancel()
+       }
+       return nil
+}
+
+func (s *IntegrationSuite) integrationTest(c *C,
+       expectBatch [][]string,
+       runContainer func(*dispatch.Dispatcher, arvados.Container)) arvados.Container {
+       arvadostest.ResetEnv()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, IsNil)
+
+       // There should be one queued container
+       params := arvadosclient.Dict{
+               "filters": [][]string{{"state", "=", "Queued"}},
+       }
+       var containers arvados.ContainerList
+       err = arv.List("containers", params, &containers)
+       c.Check(err, IsNil)
+       c.Assert(len(containers.Items), Equals, 1)
+
+       s.disp.CrunchRunCommand = []string{"echo"}
+
+       ctx, cancel := context.WithCancel(context.Background())
+       doneRun := make(chan struct{})
+
+       s.disp.Dispatcher = &dispatch.Dispatcher{
+               Arv:        arv,
+               PollPeriod: time.Second,
+               RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+                       go func() {
+                               runContainer(disp, ctr)
+                               s.slurm.queue = ""
+                               doneRun <- struct{}{}
+                       }()
+                       s.disp.runContainer(disp, ctr, status)
+                       cancel()
+               },
+       }
+
+       s.disp.slurm = &s.slurm
+       s.disp.sqCheck = &SqueueChecker{
+               Logger: logrus.StandardLogger(),
+               Period: 500 * time.Millisecond,
+               Slurm:  s.disp.slurm,
+       }
+
+       err = s.disp.Dispatcher.Run(ctx)
+       <-doneRun
+       c.Assert(err, Equals, context.Canceled)
+
+       s.disp.sqCheck.Stop()
+
+       c.Check(s.slurm.didBatch, DeepEquals, expectBatch)
+
+       // There should be no queued containers now
+       err = arv.List("containers", params, &containers)
+       c.Check(err, IsNil)
+       c.Check(len(containers.Items), Equals, 0)
+
+       // Previously "Queued" container should now be in "Complete" state
+       var container arvados.Container
+       err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
+       c.Check(err, IsNil)
+       return container
+}
+
+func (s *IntegrationSuite) TestNormal(c *C) {
+       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
+       container := s.integrationTest(c,
+               nil,
+               func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
+                       dispatcher.UpdateState(container.UUID, dispatch.Running)
+                       time.Sleep(3 * time.Second)
+                       dispatcher.UpdateState(container.UUID, dispatch.Complete)
+               })
+       c.Check(container.State, Equals, arvados.ContainerStateComplete)
+}
+
+func (s *IntegrationSuite) TestCancel(c *C) {
+       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
+       readyToCancel := make(chan bool)
+       s.slurm.onCancel = func() { <-readyToCancel }
+       container := s.integrationTest(c,
+               nil,
+               func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
+                       dispatcher.UpdateState(container.UUID, dispatch.Running)
+                       time.Sleep(time.Second)
+                       dispatcher.Arv.Update("containers", container.UUID,
+                               arvadosclient.Dict{
+                                       "container": arvadosclient.Dict{"priority": 0}},
+                               nil)
+                       readyToCancel <- true
+                       close(readyToCancel)
+               })
+       c.Check(container.State, Equals, arvados.ContainerStateCancelled)
+       c.Check(len(s.slurm.didCancel) > 1, Equals, true)
+       c.Check(s.slurm.didCancel[:2], DeepEquals, []string{"zzzzz-dz642-queuedcontainer", "zzzzz-dz642-queuedcontainer"})
+}
+
+func (s *IntegrationSuite) TestMissingFromSqueue(c *C) {
+       container := s.integrationTest(c,
+               [][]string{{
+                       fmt.Sprintf("--job-name=%s", "zzzzz-dz642-queuedcontainer"),
+                       fmt.Sprintf("--nice=%d", 10000),
+                       "--no-requeue",
+                       fmt.Sprintf("--mem=%d", 11445),
+                       fmt.Sprintf("--cpus-per-task=%d", 4),
+                       fmt.Sprintf("--tmp=%d", 45777),
+               }},
+               func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
+                       dispatcher.UpdateState(container.UUID, dispatch.Running)
+                       time.Sleep(3 * time.Second)
+                       dispatcher.UpdateState(container.UUID, dispatch.Complete)
+               })
+       c.Check(container.State, Equals, arvados.ContainerStateCancelled)
+}
+
+func (s *IntegrationSuite) TestSbatchFail(c *C) {
+       s.slurm = slurmFake{errBatch: errors.New("something terrible happened")}
+       container := s.integrationTest(c,
+               [][]string{{"--job-name=zzzzz-dz642-queuedcontainer", "--nice=10000", "--no-requeue", "--mem=11445", "--cpus-per-task=4", "--tmp=45777"}},
+               func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
+                       dispatcher.UpdateState(container.UUID, dispatch.Running)
+                       dispatcher.UpdateState(container.UUID, dispatch.Complete)
+               })
+       c.Check(container.State, Equals, arvados.ContainerStateComplete)
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, IsNil)
+
+       var ll arvados.LogList
+       err = arv.List("logs", arvadosclient.Dict{"filters": [][]string{
+               {"object_uuid", "=", container.UUID},
+               {"event_type", "=", "dispatch"},
+       }}, &ll)
+       c.Assert(err, IsNil)
+       c.Assert(len(ll.Items), Equals, 1)
+}
+
+type StubbedSuite struct {
+       disp Dispatcher
+}
+
+func (s *StubbedSuite) SetUpTest(c *C) {
+       s.disp = Dispatcher{}
+       s.disp.setup()
+}
+
+func (s *StubbedSuite) TestAPIErrorGettingContainers(c *C) {
+       apiStubResponses := make(map[string]arvadostest.StubResponse)
+       apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
+       apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
+
+       s.testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
+}
+
+func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
+       apiStub := arvadostest.ServerStub{apiStubResponses}
+
+       api := httptest.NewServer(&apiStub)
+       defer api.Close()
+
+       arv := &arvadosclient.ArvadosClient{
+               Scheme:    "http",
+               ApiServer: api.URL[7:],
+               ApiToken:  "abc123",
+               Client:    &http.Client{Transport: &http.Transport{}},
+               Retries:   0,
+       }
+
+       buf := bytes.NewBuffer(nil)
+       logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+       defer logrus.SetOutput(os.Stderr)
+
+       s.disp.CrunchRunCommand = []string{crunchCmd}
+
+       ctx, cancel := context.WithCancel(context.Background())
+       dispatcher := dispatch.Dispatcher{
+               Arv:        arv,
+               PollPeriod: time.Second,
+               RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+                       go func() {
+                               time.Sleep(time.Second)
+                               disp.UpdateState(ctr.UUID, dispatch.Running)
+                               disp.UpdateState(ctr.UUID, dispatch.Complete)
+                       }()
+                       s.disp.runContainer(disp, ctr, status)
+                       cancel()
+               },
+       }
+
+       go func() {
+               for i := 0; i < 80 && !strings.Contains(buf.String(), expected); i++ {
+                       time.Sleep(100 * time.Millisecond)
+               }
+               cancel()
+       }()
+
+       err := dispatcher.Run(ctx)
+       c.Assert(err, Equals, context.Canceled)
+
+       c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
+}
+
+func (s *StubbedSuite) TestNoSuchConfigFile(c *C) {
+       err := s.disp.readConfig("/nosuchdir89j7879/8hjwr7ojgyy7")
+       c.Assert(err, NotNil)
+}
+
+func (s *StubbedSuite) TestBadSbatchArgsConfig(c *C) {
+       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
+       c.Check(err, IsNil)
+       defer os.Remove(tmpfile.Name())
+
+       _, err = tmpfile.Write([]byte(`{"SbatchArguments": "oops this is not a string array"}`))
+       c.Check(err, IsNil)
+
+       err = s.disp.readConfig(tmpfile.Name())
+       c.Assert(err, NotNil)
+}
+
+func (s *StubbedSuite) TestNoSuchArgInConfigIgnored(c *C) {
+       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
+       c.Check(err, IsNil)
+       defer os.Remove(tmpfile.Name())
+
+       _, err = tmpfile.Write([]byte(`{"NoSuchArg": "Nobody loves me, not one tiny hunk."}`))
+       c.Check(err, IsNil)
+
+       err = s.disp.readConfig(tmpfile.Name())
+       c.Assert(err, IsNil)
+       c.Check(0, Equals, len(s.disp.SbatchArguments))
+}
+
+func (s *StubbedSuite) TestReadConfig(c *C) {
+       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
+       c.Check(err, IsNil)
+       defer os.Remove(tmpfile.Name())
+
+       args := []string{"--arg1=v1", "--arg2", "--arg3=v3"}
+       argsS := `{"SbatchArguments": ["--arg1=v1",  "--arg2", "--arg3=v3"]}`
+       _, err = tmpfile.Write([]byte(argsS))
+       c.Check(err, IsNil)
+
+       err = s.disp.readConfig(tmpfile.Name())
+       c.Assert(err, IsNil)
+       c.Check(args, DeepEquals, s.disp.SbatchArguments)
+}
+
+func (s *StubbedSuite) TestSbatchArgs(c *C) {
+       container := arvados.Container{
+               UUID:               "123",
+               RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2},
+               Priority:           1,
+       }
+
+       for _, defaults := range [][]string{
+               nil,
+               {},
+               {"--arg1=v1", "--arg2"},
+       } {
+               c.Logf("%#v", defaults)
+               s.disp.SbatchArguments = defaults
+
+               args, err := s.disp.sbatchArgs(container)
+               c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--nice=10000", "--no-requeue", "--mem=239", "--cpus-per-task=2", "--tmp=0"))
+               c.Check(err, IsNil)
+       }
+}
+
+func (s *StubbedSuite) TestSbatchInstanceTypeConstraint(c *C) {
+       container := arvados.Container{
+               UUID:               "123",
+               RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2},
+               Priority:           1,
+       }
+
+       for _, trial := range []struct {
+               types      map[string]arvados.InstanceType
+               sbatchArgs []string
+               err        error
+       }{
+               // Choose node type => use --constraint arg
+               {
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny":   {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                               "a1.small":  {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
+                               "a1.medium": {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
+                               "a1.large":  {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
+                       },
+                       sbatchArgs: []string{"--constraint=instancetype=a1.medium"},
+               },
+               // No node types configured => no slurm constraint
+               {
+                       types:      nil,
+                       sbatchArgs: []string{"--mem=239", "--cpus-per-task=2", "--tmp=0"},
+               },
+               // No node type is big enough => error
+               {
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny": {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                       },
+                       err: dispatchcloud.ConstraintsNotSatisfiableError{},
+               },
+       } {
+               c.Logf("%#v", trial)
+               s.disp.cluster = &arvados.Cluster{InstanceTypes: trial.types}
+
+               args, err := s.disp.sbatchArgs(container)
+               c.Check(err == nil, Equals, trial.err == nil)
+               if trial.err == nil {
+                       c.Check(args, DeepEquals, append([]string{"--job-name=123", "--nice=10000", "--no-requeue"}, trial.sbatchArgs...))
+               } else {
+                       c.Check(len(err.(dispatchcloud.ConstraintsNotSatisfiableError).AvailableTypes), Equals, len(trial.types))
+               }
+       }
+}
+
+func (s *StubbedSuite) TestSbatchPartition(c *C) {
+       container := arvados.Container{
+               UUID:                 "123",
+               RuntimeConstraints:   arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 1},
+               SchedulingParameters: arvados.SchedulingParameters{Partitions: []string{"blurb", "b2"}},
+               Priority:             1,
+       }
+
+       args, err := s.disp.sbatchArgs(container)
+       c.Check(args, DeepEquals, []string{
+               "--job-name=123", "--nice=10000", "--no-requeue",
+               "--mem=239", "--cpus-per-task=1", "--tmp=0",
+               "--partition=blurb,b2",
+       })
+       c.Check(err, IsNil)
+}
diff --git a/services/crunch-dispatch-slurm/node_type.go b/services/crunch-dispatch-slurm/node_type.go
new file mode 100644 (file)
index 0000000..62a9693
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "log"
+       "os/exec"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// SlurmNodeTypeFeatureKludge ensures SLURM accepts every instance
+// type name as a valid feature name, even if no instances of that
+// type have appeared yet.
+//
+// It takes advantage of some SLURM peculiarities:
+//
+// (1) A feature is valid after it has been offered by a node, even if
+// it is no longer offered by any node. So, to make a feature name
+// valid, we can add it to a dummy node ("compute0"), then remove it.
+//
+// (2) To test whether a set of feature names are valid without
+// actually submitting a job, we can call srun --test-only with the
+// desired features.
+//
+// SlurmNodeTypeFeatureKludge does a test-and-fix operation
+// immediately, and then periodically, in case slurm restarts and
+// forgets the list of valid features. It never returns (unless there
+// are no node types configured, in which case it returns
+// immediately), so it should generally be invoked with "go".
+func SlurmNodeTypeFeatureKludge(cc *arvados.Cluster) {
+       if len(cc.InstanceTypes) == 0 {
+               return
+       }
+       var features []string
+       for _, it := range cc.InstanceTypes {
+               features = append(features, "instancetype="+it.Name)
+       }
+       for {
+               slurmKludge(features)
+               time.Sleep(2 * time.Second)
+       }
+}
+
+const slurmDummyNode = "compute0"
+
+func slurmKludge(features []string) {
+       allFeatures := strings.Join(features, ",")
+
+       cmd := exec.Command("sinfo", "--nodes="+slurmDummyNode, "--format=%f", "--noheader")
+       out, err := cmd.CombinedOutput()
+       if err != nil {
+               log.Printf("running %q %q: %s (output was %q)", cmd.Path, cmd.Args, err, out)
+               return
+       }
+       if string(out) == allFeatures+"\n" {
+               // Already configured correctly, nothing to do.
+               return
+       }
+
+       log.Printf("configuring node %q with all node type features", slurmDummyNode)
+       cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+allFeatures)
+       log.Printf("running: %q %q", cmd.Path, cmd.Args)
+       out, err = cmd.CombinedOutput()
+       if err != nil {
+               log.Printf("error: scontrol: %s (output was %q)", err, out)
+       }
+}
diff --git a/services/crunch-dispatch-slurm/priority.go b/services/crunch-dispatch-slurm/priority.go
new file mode 100644 (file)
index 0000000..2312ce5
--- /dev/null
@@ -0,0 +1,56 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+const defaultSpread int64 = 10
+
+// wantNice calculates appropriate nice values for a set of SLURM
+// jobs. The returned slice will have len(jobs) elements.
+//
+// spread is a positive amount of space to leave between adjacent
+// priorities when making adjustments. Generally, increasing spread
+// reduces the total number of adjustments made. A smaller spread
+// produces lower nice values, which is useful for old SLURM versions
+// with a limited "nice" range and for sites where SLURM is also
+// running non-Arvados jobs with low nice values.
+//
+// If spread<1, a sensible default (10) is used.
+func wantNice(jobs []*slurmJob, spread int64) []int64 {
+       if len(jobs) == 0 {
+               return nil
+       }
+
+       if spread < 1 {
+               spread = defaultSpread
+       }
+       renice := make([]int64, len(jobs))
+
+       // highest usable priority (without going out of order)
+       var target int64
+       for i, job := range jobs {
+               if i == 0 {
+                       // renice[0] is always zero, so our highest
+                       // priority container gets the highest
+                       // possible slurm priority.
+                       target = job.priority + job.nice
+               } else if space := target - job.priority; space >= 0 && space < (spread-1)*10 {
+                       // Ordering is correct, and interval isn't too
+                       // large. Leave existing nice value alone.
+                       renice[i] = job.nice
+                       target = job.priority
+               } else {
+                       target -= (spread - 1)
+                       if possible := job.priority + job.nice; target > possible {
+                               // renice[i] is already 0, that's the
+                               // best we can do
+                               target = possible
+                       } else {
+                               renice[i] = possible - target
+                       }
+               }
+               target--
+       }
+       return renice
+}
diff --git a/services/crunch-dispatch-slurm/priority_test.go b/services/crunch-dispatch-slurm/priority_test.go
new file mode 100644 (file)
index 0000000..e80984c
--- /dev/null
@@ -0,0 +1,143 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&PrioritySuite{})
+
+type PrioritySuite struct{}
+
+func (s *PrioritySuite) TestReniceCorrect(c *C) {
+       for _, test := range []struct {
+               spread int64
+               in     []*slurmJob
+               out    []int64
+       }{
+               {
+                       0,
+                       nil,
+                       nil,
+               },
+               {
+                       0,
+                       []*slurmJob{},
+                       nil,
+               },
+               {
+                       10,
+                       []*slurmJob{{priority: 4294000111, nice: 10000}},
+                       []int64{0},
+               },
+               {
+                       10,
+                       []*slurmJob{
+                               {priority: 4294000111, nice: 10000},
+                               {priority: 4294000111, nice: 10000},
+                               {priority: 4294000111, nice: 10000},
+                               {priority: 4294000111, nice: 10000},
+                       },
+                       []int64{0, 10, 20, 30},
+               },
+               { // smaller spread than necessary, but correctly ordered => leave nice alone
+                       10,
+                       []*slurmJob{
+                               {priority: 4294000113, nice: 0},
+                               {priority: 4294000112, nice: 1},
+                               {priority: 4294000111, nice: 99},
+                       },
+                       []int64{0, 1, 99},
+               },
+               { // larger spread than necessary, but less than 10x => leave nice alone
+                       10,
+                       []*slurmJob{
+                               {priority: 4294000144, nice: 0},
+                               {priority: 4294000122, nice: 20},
+                               {priority: 4294000111, nice: 30},
+                       },
+                       []int64{0, 20, 30},
+               },
+               { // > 10x spread => reduce nice to achieve spread=10
+                       10,
+                       []*slurmJob{
+                               {priority: 4000, nice: 0},    // max pri 4000
+                               {priority: 3000, nice: 999},  // max pri 3999
+                               {priority: 2000, nice: 1998}, // max pri 3998
+                       },
+                       []int64{0, 9, 18},
+               },
+               { // > 10x spread, but spread=10 is impossible without negative nice
+                       10,
+                       []*slurmJob{
+                               {priority: 4000, nice: 0},    // max pri 4000
+                               {priority: 3000, nice: 500},  // max pri 3500
+                               {priority: 2000, nice: 2000}, // max pri 4000
+                       },
+                       []int64{0, 0, 510},
+               },
+               { // default spread, needs reorder
+                       0,
+                       []*slurmJob{
+                               {priority: 4000, nice: 0}, // max pri 4000
+                               {priority: 5000, nice: 0}, // max pri 5000
+                               {priority: 6000, nice: 0}, // max pri 6000
+                       },
+                       []int64{0, 1000 + defaultSpread, 2000 + defaultSpread*2},
+               },
+               { // minimum spread
+                       1,
+                       []*slurmJob{
+                               {priority: 4000, nice: 0}, // max pri 4000
+                               {priority: 5000, nice: 0}, // max pri 5000
+                               {priority: 6000, nice: 0}, // max pri 6000
+                               {priority: 3000, nice: 0}, // max pri 3000
+                       },
+                       []int64{0, 1001, 2002, 0},
+               },
+       } {
+               c.Logf("spread=%d %+v -> %+v", test.spread, test.in, test.out)
+               c.Check(wantNice(test.in, test.spread), DeepEquals, test.out)
+
+               if len(test.in) == 0 {
+                       continue
+               }
+               // After making the adjustments, calling wantNice
+               // again should return the same recommendations.
+               updated := make([]*slurmJob, len(test.in))
+               for i, in := range test.in {
+                       updated[i] = &slurmJob{
+                               nice:     test.out[i],
+                               priority: in.priority + in.nice - test.out[i],
+                       }
+               }
+               c.Check(wantNice(updated, test.spread), DeepEquals, test.out)
+       }
+}
+
+func (s *PrioritySuite) TestReniceChurn(c *C) {
+       const spread = 10
+       jobs := make([]*slurmJob, 1000)
+       for i := range jobs {
+               jobs[i] = &slurmJob{priority: 4294000000 - int64(i), nice: 10000}
+       }
+       adjustments := 0
+       queue := jobs
+       for len(queue) > 0 {
+               renice := wantNice(queue, spread)
+               for i := range queue {
+                       if renice[i] == queue[i].nice {
+                               continue
+                       }
+                       queue[i].priority += queue[i].nice - renice[i]
+                       queue[i].nice = renice[i]
+                       adjustments++
+               }
+               queue = queue[1:]
+       }
+       c.Logf("processed queue of %d with %d renice ops", len(jobs), adjustments)
+       c.Check(adjustments < len(jobs)*len(jobs)/10, Equals, true)
+}
diff --git a/services/crunch-dispatch-slurm/script.go b/services/crunch-dispatch-slurm/script.go
new file mode 100644 (file)
index 0000000..f559104
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "strings"
+)
+
+func execScript(args []string) string {
+       s := "#!/bin/sh\nexec"
+       for _, w := range args {
+               s += ` '`
+               s += strings.Replace(w, `'`, `'\''`, -1)
+               s += `'`
+       }
+       return s + "\n"
+}
diff --git a/services/crunch-dispatch-slurm/script_test.go b/services/crunch-dispatch-slurm/script_test.go
new file mode 100644 (file)
index 0000000..a21aeed
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&ScriptSuite{})
+
+type ScriptSuite struct{}
+
+func (s *ScriptSuite) TestExecScript(c *C) {
+       for _, test := range []struct {
+               args   []string
+               script string
+       }{
+               {nil, `exec`},
+               {[]string{`foo`}, `exec 'foo'`},
+               {[]string{`foo`, `bar baz`}, `exec 'foo' 'bar baz'`},
+               {[]string{`foo"`, "'waz 'qux\n"}, `exec 'foo"' ''\''waz '\''qux` + "\n" + `'`},
+       } {
+               c.Logf("%+v -> %+v", test.args, test.script)
+               c.Check(execScript(test.args), Equals, "#!/bin/sh\n"+test.script+"\n")
+       }
+}
diff --git a/services/crunch-dispatch-slurm/slurm.go b/services/crunch-dispatch-slurm/slurm.go
new file mode 100644 (file)
index 0000000..791f294
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "io"
+       "log"
+       "os/exec"
+       "strings"
+)
+
+type Slurm interface {
+       Batch(script io.Reader, args []string) error
+       Cancel(name string) error
+       QueueCommand(args []string) *exec.Cmd
+       Release(name string) error
+       Renice(name string, nice int64) error
+}
+
+type slurmCLI struct {
+       runSemaphore chan bool
+}
+
+func NewSlurmCLI() *slurmCLI {
+       return &slurmCLI{
+               runSemaphore: make(chan bool, 3),
+       }
+}
+
+func (scli *slurmCLI) Batch(script io.Reader, args []string) error {
+       return scli.run(script, "sbatch", args)
+}
+
+func (scli *slurmCLI) Cancel(name string) error {
+       for _, args := range [][]string{
+               // If the slurm job hasn't started yet, remove it from
+               // the queue.
+               {"--state=pending"},
+               // If the slurm job has started, send SIGTERM. If we
+               // cancel a running job without a --signal argument,
+               // slurm will send SIGTERM and then (after some
+               // site-configured interval) SIGKILL. This would kill
+               // crunch-run without stopping the container, which we
+               // don't want.
+               {"--batch", "--signal=TERM", "--state=running"},
+               {"--batch", "--signal=TERM", "--state=suspended"},
+       } {
+               err := scli.run(nil, "scancel", append([]string{"--name=" + name}, args...))
+               if err != nil {
+                       // scancel exits 0 if no job matches the given
+                       // name and state. Any error from scancel here
+                       // really indicates something is wrong.
+                       return err
+               }
+       }
+       return nil
+}
+
+func (scli *slurmCLI) QueueCommand(args []string) *exec.Cmd {
+       return exec.Command("squeue", args...)
+}
+
+func (scli *slurmCLI) Release(name string) error {
+       return scli.run(nil, "scontrol", []string{"release", "Name=" + name})
+}
+
+func (scli *slurmCLI) Renice(name string, nice int64) error {
+       return scli.run(nil, "scontrol", []string{"update", "JobName=" + name, fmt.Sprintf("Nice=%d", nice)})
+}
+
+func (scli *slurmCLI) run(stdin io.Reader, prog string, args []string) error {
+       scli.runSemaphore <- true
+       defer func() { <-scli.runSemaphore }()
+       cmd := exec.Command(prog, args...)
+       cmd.Stdin = stdin
+       out, err := cmd.CombinedOutput()
+       outTrim := strings.TrimSpace(string(out))
+       if err != nil || len(out) > 0 {
+               log.Printf("%q %q: %q", cmd.Path, cmd.Args, outTrim)
+       }
+       if err != nil {
+               err = fmt.Errorf("%s: %s (%q)", cmd.Path, err, outTrim)
+       }
+       return err
+}
diff --git a/services/crunch-dispatch-slurm/squeue.go b/services/crunch-dispatch-slurm/squeue.go
new file mode 100644 (file)
index 0000000..5aee7e0
--- /dev/null
@@ -0,0 +1,248 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "sort"
+       "strings"
+       "sync"
+       "time"
+)
+
+const slurm15NiceLimit int64 = 10000
+
+type slurmJob struct {
+       uuid         string
+       wantPriority int64
+       priority     int64 // current slurm priority (incorporates nice value)
+       nice         int64 // current slurm nice value
+       hitNiceLimit bool
+}
+
+// Squeue implements asynchronous polling monitor of the SLURM queue using the
+// command 'squeue'.
+type SqueueChecker struct {
+       Logger         logger
+       Period         time.Duration
+       PrioritySpread int64
+       Slurm          Slurm
+       queue          map[string]*slurmJob
+       startOnce      sync.Once
+       done           chan struct{}
+       lock           sync.RWMutex
+       notify         sync.Cond
+}
+
+// HasUUID checks if a given container UUID is in the slurm queue.
+// This does not run squeue directly, but instead blocks until woken
+// up by next successful update of squeue.
+func (sqc *SqueueChecker) HasUUID(uuid string) bool {
+       sqc.startOnce.Do(sqc.start)
+
+       sqc.lock.RLock()
+       defer sqc.lock.RUnlock()
+
+       // block until next squeue broadcast signaling an update.
+       sqc.notify.Wait()
+       _, exists := sqc.queue[uuid]
+       return exists
+}
+
+// SetPriority sets or updates the desired (Arvados) priority for a
+// container.
+func (sqc *SqueueChecker) SetPriority(uuid string, want int64) {
+       sqc.startOnce.Do(sqc.start)
+
+       sqc.lock.RLock()
+       job := sqc.queue[uuid]
+       if job == nil {
+               // Wait in case the slurm job was just submitted and
+               // will appear in the next squeue update.
+               sqc.notify.Wait()
+               job = sqc.queue[uuid]
+       }
+       needUpdate := job != nil && job.wantPriority != want
+       sqc.lock.RUnlock()
+
+       if needUpdate {
+               sqc.lock.Lock()
+               job.wantPriority = want
+               sqc.lock.Unlock()
+       }
+}
+
+// adjust slurm job nice values as needed to ensure slurm priority
+// order matches Arvados priority order.
+func (sqc *SqueueChecker) reniceAll() {
+       // This is slow (it shells out to scontrol many times) and no
+       // other goroutines update sqc.queue or any of the job fields
+       // we use here, so we don't acquire a lock.
+       jobs := make([]*slurmJob, 0, len(sqc.queue))
+       for _, j := range sqc.queue {
+               if j.wantPriority == 0 {
+                       // SLURM job with unknown Arvados priority
+                       // (perhaps it's not an Arvados job)
+                       continue
+               }
+               if j.priority <= 2*slurm15NiceLimit {
+                       // SLURM <= 15.x implements "hold" by setting
+                       // priority to 0. If we include held jobs
+                       // here, we'll end up trying to push other
+                       // jobs below them using negative priority,
+                       // which won't help anything.
+                       continue
+               }
+               jobs = append(jobs, j)
+       }
+
+       sort.Slice(jobs, func(i, j int) bool {
+               if jobs[i].wantPriority != jobs[j].wantPriority {
+                       return jobs[i].wantPriority > jobs[j].wantPriority
+               } else {
+                       // break ties with container uuid --
+                       // otherwise, the ordering would change from
+                       // one interval to the next, and we'd do many
+                       // pointless slurm queue rearrangements.
+                       return jobs[i].uuid > jobs[j].uuid
+               }
+       })
+       renice := wantNice(jobs, sqc.PrioritySpread)
+       for i, job := range jobs {
+               niceNew := renice[i]
+               if job.hitNiceLimit && niceNew > slurm15NiceLimit {
+                       niceNew = slurm15NiceLimit
+               }
+               if niceNew == job.nice {
+                       continue
+               }
+               err := sqc.Slurm.Renice(job.uuid, niceNew)
+               if err != nil && niceNew > slurm15NiceLimit && strings.Contains(err.Error(), "Invalid nice value") {
+                       sqc.Logger.Warnf("container %q clamping nice values at %d, priority order will not be correct -- see https://dev.arvados.org/projects/arvados/wiki/SLURM_integration#Limited-nice-values-SLURM-15", job.uuid, slurm15NiceLimit)
+                       job.hitNiceLimit = true
+               }
+       }
+}
+
+// Stop stops the squeue monitoring goroutine. Do not call HasUUID
+// after calling Stop.
+func (sqc *SqueueChecker) Stop() {
+       if sqc.done != nil {
+               close(sqc.done)
+       }
+}
+
+// check gets the names of jobs in the SLURM queue (running and
+// queued). If it succeeds, it updates sqc.queue and wakes up any
+// goroutines that are waiting in HasUUID() or All().
+func (sqc *SqueueChecker) check() {
+       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
+       stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
+       cmd.Stdout, cmd.Stderr = stdout, stderr
+       if err := cmd.Run(); err != nil {
+               sqc.Logger.Warnf("Error running %q %q: %s %q", cmd.Path, cmd.Args, err, stderr.String())
+               return
+       }
+
+       lines := strings.Split(stdout.String(), "\n")
+       newq := make(map[string]*slurmJob, len(lines))
+       for _, line := range lines {
+               if line == "" {
+                       continue
+               }
+               var uuid, state, reason string
+               var n, p int64
+               if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
+                       sqc.Logger.Warnf("ignoring unparsed line in squeue output: %q", line)
+                       continue
+               }
+
+               // No other goroutines write to jobs' priority or nice
+               // fields, so we can read and write them without
+               // locks.
+               replacing, ok := sqc.queue[uuid]
+               if !ok {
+                       replacing = &slurmJob{uuid: uuid}
+               }
+               replacing.priority = p
+               replacing.nice = n
+               newq[uuid] = replacing
+
+               if state == "PENDING" && ((reason == "BadConstraints" && p <= 2*slurm15NiceLimit) || reason == "launch failed requeued held") && replacing.wantPriority > 0 {
+                       // When using SLURM 14.x or 15.x, our queued
+                       // jobs land in this state when "scontrol
+                       // reconfigure" invalidates their feature
+                       // constraints by clearing all node features.
+                       // They stay in this state even after the
+                       // features reappear, until we run "scontrol
+                       // release {jobid}". Priority is usually 0 in
+                       // this state, but sometimes (due to a race
+                       // with nice adjustments?) it's a small
+                       // positive value.
+                       //
+                       // "scontrol release" is silent and successful
+                       // regardless of whether the features have
+                       // reappeared, so rather than second-guessing
+                       // whether SLURM is ready, we just keep trying
+                       // this until it works.
+                       //
+                       // "launch failed requeued held" seems to be
+                       // another manifestation of this problem,
+                       // resolved the same way.
+                       sqc.Logger.Printf("releasing held job %q (priority=%d, state=%q, reason=%q)", uuid, p, state, reason)
+                       sqc.Slurm.Release(uuid)
+               } else if state != "RUNNING" && p <= 2*slurm15NiceLimit && replacing.wantPriority > 0 {
+                       sqc.Logger.Warnf("job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
+               }
+       }
+       sqc.lock.Lock()
+       sqc.queue = newq
+       sqc.lock.Unlock()
+       sqc.notify.Broadcast()
+}
+
+// Initialize, and start a goroutine to call check() once per
+// squeue.Period until terminated by calling Stop().
+func (sqc *SqueueChecker) start() {
+       sqc.notify.L = sqc.lock.RLocker()
+       sqc.done = make(chan struct{})
+       go func() {
+               ticker := time.NewTicker(sqc.Period)
+               for {
+                       select {
+                       case <-sqc.done:
+                               ticker.Stop()
+                               return
+                       case <-ticker.C:
+                               sqc.check()
+                               sqc.reniceAll()
+                               select {
+                               case <-ticker.C:
+                                       // If this iteration took
+                                       // longer than sqc.Period,
+                                       // consume the next tick and
+                                       // wait. Otherwise we would
+                                       // starve other goroutines.
+                               default:
+                               }
+                       }
+               }
+       }()
+}
+
+// All waits for the next squeue invocation, and returns all job
+// names reported by squeue.
+func (sqc *SqueueChecker) All() []string {
+       sqc.startOnce.Do(sqc.start)
+       sqc.lock.RLock()
+       defer sqc.lock.RUnlock()
+       sqc.notify.Wait()
+       var uuids []string
+       for u := range sqc.queue {
+               uuids = append(uuids, u)
+       }
+       return uuids
+}
diff --git a/services/crunch-dispatch-slurm/squeue_test.go b/services/crunch-dispatch-slurm/squeue_test.go
new file mode 100644 (file)
index 0000000..ce74fe6
--- /dev/null
@@ -0,0 +1,217 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "time"
+
+       "github.com/sirupsen/logrus"
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&SqueueSuite{})
+
+type SqueueSuite struct{}
+
+func (s *SqueueSuite) TestReleasePending(c *C) {
+       uuids := []string{
+               "zzzzz-dz642-fake0fake0fake0",
+               "zzzzz-dz642-fake1fake1fake1",
+               "zzzzz-dz642-fake2fake2fake2",
+       }
+       slurm := &slurmFake{
+               queue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING BadConstraints\n",
+       }
+       sqc := &SqueueChecker{
+               Logger: logrus.StandardLogger(),
+               Slurm:  slurm,
+               Period: time.Hour,
+       }
+       sqc.startOnce.Do(sqc.start)
+       defer sqc.Stop()
+
+       done := make(chan struct{})
+       go func() {
+               for _, u := range uuids {
+                       sqc.SetPriority(u, 1)
+               }
+               close(done)
+       }()
+       callUntilReady(sqc.check, done)
+
+       slurm.didRelease = nil
+       sqc.check()
+       c.Check(slurm.didRelease, DeepEquals, []string{uuids[2]})
+}
+
+func (s *SqueueSuite) TestReniceAll(c *C) {
+       uuids := []string{"zzzzz-dz642-fake0fake0fake0", "zzzzz-dz642-fake1fake1fake1", "zzzzz-dz642-fake2fake2fake2"}
+       for _, test := range []struct {
+               spread int64
+               squeue string
+               want   map[string]int64
+               expect [][]string
+       }{
+               {
+                       spread: 1,
+                       squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n",
+                       want:   map[string]int64{uuids[0]: 1},
+                       expect: [][]string{{uuids[0], "0"}},
+               },
+               { // fake0 priority is too high
+                       spread: 1,
+                       squeue: uuids[0] + " 10000 4294000777 PENDING Resources\n" + uuids[1] + " 10000 4294000444 PENDING Resources\n",
+                       want:   map[string]int64{uuids[0]: 1, uuids[1]: 999},
+                       expect: [][]string{{uuids[1], "0"}, {uuids[0], "334"}},
+               },
+               { // specify spread
+                       spread: 100,
+                       squeue: uuids[0] + " 10000 4294000777 PENDING Resources\n" + uuids[1] + " 10000 4294000444 PENDING Resources\n",
+                       want:   map[string]int64{uuids[0]: 1, uuids[1]: 999},
+                       expect: [][]string{{uuids[1], "0"}, {uuids[0], "433"}},
+               },
+               { // ignore fake2 because SetPriority() not called
+                       spread: 1,
+                       squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 4294000222 PENDING Resources\n",
+                       want:   map[string]int64{uuids[0]: 999, uuids[1]: 1},
+                       expect: [][]string{{uuids[0], "0"}, {uuids[1], "112"}},
+               },
+               { // ignore fake2 because slurm priority=0
+                       spread: 1,
+                       squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING Resources\n",
+                       want:   map[string]int64{uuids[0]: 999, uuids[1]: 1, uuids[2]: 997},
+                       expect: [][]string{{uuids[0], "0"}, {uuids[1], "112"}},
+               },
+       } {
+               c.Logf("spread=%d squeue=%q want=%v -> expect=%v", test.spread, test.squeue, test.want, test.expect)
+               slurm := &slurmFake{
+                       queue: test.squeue,
+               }
+               sqc := &SqueueChecker{
+                       Logger:         logrus.StandardLogger(),
+                       Slurm:          slurm,
+                       PrioritySpread: test.spread,
+                       Period:         time.Hour,
+               }
+               sqc.startOnce.Do(sqc.start)
+               sqc.check()
+               for uuid, pri := range test.want {
+                       sqc.SetPriority(uuid, pri)
+               }
+               sqc.reniceAll()
+               c.Check(slurm.didRenice, DeepEquals, test.expect)
+               sqc.Stop()
+       }
+}
+
+// If a limited nice range prevents desired priority adjustments, give
+// up and clamp nice to 10K.
+func (s *SqueueSuite) TestReniceInvalidNiceValue(c *C) {
+       uuids := []string{"zzzzz-dz642-fake0fake0fake0", "zzzzz-dz642-fake1fake1fake1", "zzzzz-dz642-fake2fake2fake2"}
+       slurm := &slurmFake{
+               queue:         uuids[0] + " 0 4294000222 PENDING Resources\n" + uuids[1] + " 0 4294555222 PENDING Resources\n",
+               rejectNice10K: true,
+       }
+       sqc := &SqueueChecker{
+               Logger:         logrus.StandardLogger(),
+               Slurm:          slurm,
+               PrioritySpread: 1,
+               Period:         time.Hour,
+       }
+       sqc.startOnce.Do(sqc.start)
+       sqc.check()
+       sqc.SetPriority(uuids[0], 2)
+       sqc.SetPriority(uuids[1], 1)
+
+       // First attempt should renice to 555001, which will fail
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}})
+
+       // Next attempt should renice to 10K, which will succeed
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}, {uuids[1], "10000"}})
+       // ...so we'll change the squeue response to reflect the
+       // updated priority+nice, and make sure sqc sees that...
+       slurm.queue = uuids[0] + " 0 4294000222 PENDING Resources\n" + uuids[1] + " 10000 4294545222 PENDING Resources\n"
+       sqc.check()
+
+       // Next attempt should leave nice alone because it's already
+       // at the 10K limit
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}, {uuids[1], "10000"}})
+
+       // Back to normal if desired nice value falls below 10K
+       slurm.queue = uuids[0] + " 0 4294000222 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n"
+       sqc.check()
+       sqc.reniceAll()
+       c.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], "555001"}, {uuids[1], "10000"}, {uuids[1], "9890"}})
+
+       sqc.Stop()
+}
+
+// If the given UUID isn't in the slurm queue yet, SetPriority()
+// should wait for it to appear on the very next poll, then give up.
+func (s *SqueueSuite) TestSetPriorityBeforeQueued(c *C) {
+       uuidGood := "zzzzz-dz642-fake0fake0fake0"
+       uuidBad := "zzzzz-dz642-fake1fake1fake1"
+
+       slurm := &slurmFake{}
+       sqc := &SqueueChecker{
+               Logger: logrus.StandardLogger(),
+               Slurm:  slurm,
+               Period: time.Hour,
+       }
+       sqc.startOnce.Do(sqc.start)
+       sqc.Stop()
+       sqc.check()
+
+       done := make(chan struct{})
+       go func() {
+               sqc.SetPriority(uuidGood, 123)
+               sqc.SetPriority(uuidBad, 345)
+               close(done)
+       }()
+       c.Check(sqc.queue[uuidGood], IsNil)
+       c.Check(sqc.queue[uuidBad], IsNil)
+       timeout := time.NewTimer(time.Second)
+       defer timeout.Stop()
+       tick := time.NewTicker(time.Millisecond)
+       defer tick.Stop()
+       for {
+               select {
+               case <-tick.C:
+                       slurm.queue = uuidGood + " 0 12345 PENDING Resources\n"
+                       sqc.check()
+
+                       // Avoid immediately selecting this case again
+                       // on the next iteration if check() took
+                       // longer than one tick.
+                       select {
+                       case <-tick.C:
+                       default:
+                       }
+               case <-timeout.C:
+                       c.Fatal("timed out")
+               case <-done:
+                       c.Assert(sqc.queue[uuidGood], NotNil)
+                       c.Check(sqc.queue[uuidGood].wantPriority, Equals, int64(123))
+                       c.Check(sqc.queue[uuidBad], IsNil)
+                       return
+               }
+       }
+}
+
+func callUntilReady(fn func(), done <-chan struct{}) {
+       tick := time.NewTicker(time.Millisecond)
+       defer tick.Stop()
+       for {
+               select {
+               case <-done:
+                       return
+               case <-tick.C:
+                       fn()
+               }
+       }
+}
diff --git a/services/crunch-dispatch-slurm/usage.go b/services/crunch-dispatch-slurm/usage.go
new file mode 100644 (file)
index 0000000..bcfa5b8
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+)
+
+var exampleConfigFile = []byte(`
+    {
+       "Client": {
+           "APIHost": "zzzzz.arvadosapi.com",
+           "AuthToken": "xyzzy",
+           "Insecure": false
+           "KeepServiceURIs": [],
+       },
+       "CrunchRunCommand": ["crunch-run"],
+       "PollPeriod": "10s",
+       "SbatchArguments": ["--partition=foo", "--exclude=node13"],
+       "ReserveExtraRAM": 268435456,
+       "BatchSize": 10000
+    }`)
+
+func usage(fs *flag.FlagSet) {
+       fmt.Fprintf(os.Stderr, `
+crunch-dispatch-slurm runs queued Arvados containers by submitting
+SLURM batch jobs.
+
+Options:
+`)
+       fs.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+%s
+`, exampleConfigFile)
+}
diff --git a/services/crunch-run/background.go b/services/crunch-run/background.go
new file mode 100644 (file)
index 0000000..b3c530e
--- /dev/null
@@ -0,0 +1,218 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "strings"
+       "syscall"
+       "time"
+)
+
+var (
+       lockdir    = "/var/lock"
+       lockprefix = "crunch-run-"
+       locksuffix = ".lock"
+)
+
+// procinfo is saved in each process's lockfile.
+type procinfo struct {
+       UUID string
+       PID  int
+}
+
+// Detach acquires a lock for the given uuid, and starts the current
+// program as a child process (with -no-detach prepended to the given
+// arguments so the child knows not to detach again). The lock is
+// passed along to the child process.
+//
+// Stdout and stderr in the child process are sent to the systemd
+// journal using the systemd-cat program.
+func Detach(uuid string, args []string, stdout, stderr io.Writer) int {
+       return exitcode(stderr, detach(uuid, args, stdout, stderr))
+}
+func detach(uuid string, args []string, stdout, stderr io.Writer) error {
+       lockfile, err := func() (*os.File, error) {
+               // We must hold the dir-level lock between
+               // opening/creating the lockfile and acquiring LOCK_EX
+               // on it, to avoid racing with the ListProcess's
+               // alive-checking and garbage collection.
+               dirlock, err := lockall()
+               if err != nil {
+                       return nil, err
+               }
+               defer dirlock.Close()
+               lockfilename := filepath.Join(lockdir, lockprefix+uuid+locksuffix)
+               lockfile, err := os.OpenFile(lockfilename, os.O_CREATE|os.O_RDWR, 0700)
+               if err != nil {
+                       return nil, fmt.Errorf("open %s: %s", lockfilename, err)
+               }
+               err = syscall.Flock(int(lockfile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+               if err != nil {
+                       lockfile.Close()
+                       return nil, fmt.Errorf("lock %s: %s", lockfilename, err)
+               }
+               return lockfile, nil
+       }()
+       if err != nil {
+               return err
+       }
+       defer lockfile.Close()
+       lockfile.Truncate(0)
+
+       cmd := exec.Command("systemd-cat", append([]string{"--identifier=crunch-run", args[0], "-no-detach"}, args[1:]...)...)
+       // Child inherits lockfile.
+       cmd.ExtraFiles = []*os.File{lockfile}
+       // Ensure child isn't interrupted even if we receive signals
+       // from parent (sshd) while sending lockfile content to
+       // caller.
+       cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
+       err = cmd.Start()
+       if err != nil {
+               return fmt.Errorf("exec %s: %s", cmd.Path, err)
+       }
+
+       w := io.MultiWriter(stdout, lockfile)
+       return json.NewEncoder(w).Encode(procinfo{
+               UUID: uuid,
+               PID:  cmd.Process.Pid,
+       })
+}
+
+// KillProcess finds the crunch-run process corresponding to the given
+// uuid, and sends the given signal to it. It then waits up to 1
+// second for the process to die. It returns 0 if the process is
+// successfully killed or didn't exist in the first place.
+func KillProcess(uuid string, signal syscall.Signal, stdout, stderr io.Writer) int {
+       return exitcode(stderr, kill(uuid, signal, stdout, stderr))
+}
+
+func kill(uuid string, signal syscall.Signal, stdout, stderr io.Writer) error {
+       path := filepath.Join(lockdir, lockprefix+uuid+locksuffix)
+       f, err := os.Open(path)
+       if os.IsNotExist(err) {
+               return nil
+       } else if err != nil {
+               return fmt.Errorf("open %s: %s", path, err)
+       }
+       defer f.Close()
+
+       var pi procinfo
+       err = json.NewDecoder(f).Decode(&pi)
+       if err != nil {
+               return fmt.Errorf("decode %s: %s\n", path, err)
+       }
+
+       if pi.UUID != uuid || pi.PID == 0 {
+               return fmt.Errorf("%s: bogus procinfo: %+v", path, pi)
+       }
+
+       proc, err := os.FindProcess(pi.PID)
+       if err != nil {
+               return fmt.Errorf("%s: find process %d: %s", uuid, pi.PID, err)
+       }
+
+       err = proc.Signal(signal)
+       for deadline := time.Now().Add(time.Second); err == nil && time.Now().Before(deadline); time.Sleep(time.Second / 100) {
+               err = proc.Signal(syscall.Signal(0))
+       }
+       if err == nil {
+               return fmt.Errorf("%s: pid %d: sent signal %d (%s) but process is still alive", uuid, pi.PID, signal, signal)
+       }
+       fmt.Fprintf(stderr, "%s: pid %d: %s\n", uuid, pi.PID, err)
+       return nil
+}
+
+// List UUIDs of active crunch-run processes.
+func ListProcesses(stdout, stderr io.Writer) int {
+       // filepath.Walk does not follow symlinks, so we must walk
+       // lockdir+"/." in case lockdir itself is a symlink.
+       walkdir := lockdir + "/."
+       return exitcode(stderr, filepath.Walk(walkdir, func(path string, info os.FileInfo, err error) error {
+               if info.IsDir() && path != walkdir {
+                       return filepath.SkipDir
+               }
+               if name := info.Name(); !strings.HasPrefix(name, lockprefix) || !strings.HasSuffix(name, locksuffix) {
+                       return nil
+               }
+               if info.Size() == 0 {
+                       // race: process has opened/locked but hasn't yet written pid/uuid
+                       return nil
+               }
+
+               f, err := os.Open(path)
+               if err != nil {
+                       return nil
+               }
+               defer f.Close()
+
+               // Ensure other processes don't acquire this lockfile
+               // after we have decided it is abandoned but before we
+               // have deleted it.
+               dirlock, err := lockall()
+               if err != nil {
+                       return err
+               }
+               err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH|syscall.LOCK_NB)
+               if err == nil {
+                       // lockfile is stale
+                       err := os.Remove(path)
+                       dirlock.Close()
+                       if err != nil {
+                               fmt.Fprintf(stderr, "unlink %s: %s\n", f.Name(), err)
+                       }
+                       return nil
+               }
+               dirlock.Close()
+
+               var pi procinfo
+               err = json.NewDecoder(f).Decode(&pi)
+               if err != nil {
+                       fmt.Fprintf(stderr, "%s: %s\n", path, err)
+                       return nil
+               }
+               if pi.UUID == "" || pi.PID == 0 {
+                       fmt.Fprintf(stderr, "%s: bogus procinfo: %+v", path, pi)
+                       return nil
+               }
+
+               fmt.Fprintln(stdout, pi.UUID)
+               return nil
+       }))
+}
+
+// If err is nil, return 0 ("success"); otherwise, print err to stderr
+// and return 1.
+func exitcode(stderr io.Writer, err error) int {
+       if err != nil {
+               fmt.Fprintln(stderr, err)
+               return 1
+       }
+       return 0
+}
+
+// Acquire a dir-level lock. Must be held while creating or deleting
+// container-specific lockfiles, to avoid races during the intervals
+// when those container-specific lockfiles are open but not locked.
+//
+// Caller releases the lock by closing the returned file.
+func lockall() (*os.File, error) {
+       lockfile := filepath.Join(lockdir, lockprefix+"all"+locksuffix)
+       f, err := os.OpenFile(lockfile, os.O_CREATE|os.O_RDWR, 0700)
+       if err != nil {
+               return nil, fmt.Errorf("open %s: %s", lockfile, err)
+       }
+       err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
+       if err != nil {
+               f.Close()
+               return nil, fmt.Errorf("lock %s: %s", lockfile, err)
+       }
+       return f, nil
+}
diff --git a/services/crunch-run/cgroup.go b/services/crunch-run/cgroup.go
new file mode 100644 (file)
index 0000000..9e52de5
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "io/ioutil"
+       "log"
+)
+
+// Return the current process's cgroup for the given subsystem.
+func findCgroup(subsystem string) string {
+       subsys := []byte(subsystem)
+       cgroups, err := ioutil.ReadFile("/proc/self/cgroup")
+       if err != nil {
+               log.Fatal(err)
+       }
+       for _, line := range bytes.Split(cgroups, []byte("\n")) {
+               toks := bytes.SplitN(line, []byte(":"), 4)
+               if len(toks) < 3 {
+                       continue
+               }
+               for _, s := range bytes.Split(toks[1], []byte(",")) {
+                       if bytes.Compare(s, subsys) == 0 {
+                               return string(toks[2])
+                       }
+               }
+       }
+       log.Fatalf("subsystem %q not found in /proc/self/cgroup", subsystem)
+       return ""
+}
diff --git a/services/crunch-run/cgroup_test.go b/services/crunch-run/cgroup_test.go
new file mode 100644 (file)
index 0000000..95adf74
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       . "gopkg.in/check.v1"
+)
+
+type CgroupSuite struct{}
+
+var _ = Suite(&CgroupSuite{})
+
+func (s *CgroupSuite) TestFindCgroup(c *C) {
+       for _, s := range []string{"devices", "cpu", "cpuset"} {
+               g := findCgroup(s)
+               c.Check(g, Not(Equals), "")
+               c.Logf("cgroup(%q) == %q", s, g)
+       }
+}
diff --git a/services/crunch-run/copier.go b/services/crunch-run/copier.go
new file mode 100644 (file)
index 0000000..3f529f6
--- /dev/null
@@ -0,0 +1,357 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+       "sort"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+)
+
+type printfer interface {
+       Printf(string, ...interface{})
+}
+
+var errTooManySymlinks = errors.New("too many symlinks, or symlink cycle")
+
+const limitFollowSymlinks = 10
+
+type filetodo struct {
+       src  string
+       dst  string
+       size int64
+}
+
+// copier copies data from a finished container's output path to a new
+// Arvados collection.
+//
+// Regular files (and symlinks to regular files) in hostOutputDir are
+// copied from the local filesystem.
+//
+// Symlinks to mounted collections, and any collections mounted under
+// ctrOutputDir, are copied by transforming the relevant parts of the
+// existing manifests, without moving any data around.
+//
+// Symlinks to other parts of the container's filesystem result in
+// errors.
+//
+// Use:
+//
+//     manifest, err := (&copier{...}).Copy()
+type copier struct {
+       client        *arvados.Client
+       arvClient     IArvadosClient
+       keepClient    IKeepClient
+       hostOutputDir string
+       ctrOutputDir  string
+       binds         []string
+       mounts        map[string]arvados.Mount
+       secretMounts  map[string]arvados.Mount
+       logger        printfer
+
+       dirs     []string
+       files    []filetodo
+       manifest string
+
+       manifestCache map[string]*manifest.Manifest
+}
+
+// Copy copies data as needed, and returns a new manifest.
+func (cp *copier) Copy() (string, error) {
+       err := cp.walkMount("", cp.ctrOutputDir, limitFollowSymlinks, true)
+       if err != nil {
+               return "", fmt.Errorf("error scanning files to copy to output: %v", err)
+       }
+       fs, err := (&arvados.Collection{ManifestText: cp.manifest}).FileSystem(cp.client, cp.keepClient)
+       if err != nil {
+               return "", fmt.Errorf("error creating Collection.FileSystem: %v", err)
+       }
+       for _, d := range cp.dirs {
+               err = fs.Mkdir(d, 0777)
+               if err != nil && err != os.ErrExist {
+                       return "", fmt.Errorf("error making directory %q in output collection: %v", d, err)
+               }
+       }
+       for _, f := range cp.files {
+               err = cp.copyFile(fs, f)
+               if err != nil {
+                       return "", fmt.Errorf("error copying file %q into output collection: %v", f, err)
+               }
+       }
+       return fs.MarshalManifest(".")
+}
+
+func (cp *copier) copyFile(fs arvados.CollectionFileSystem, f filetodo) error {
+       cp.logger.Printf("copying %q (%d bytes)", f.dst, f.size)
+       dst, err := fs.OpenFile(f.dst, os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return err
+       }
+       src, err := os.Open(f.src)
+       if err != nil {
+               dst.Close()
+               return err
+       }
+       defer src.Close()
+       _, err = io.Copy(dst, src)
+       if err != nil {
+               dst.Close()
+               return err
+       }
+       return dst.Close()
+}
+
+// Append to cp.manifest, cp.files, and cp.dirs so as to copy src (an
+// absolute path in the container's filesystem) to dest (an absolute
+// path in the output collection, or "" for output root).
+//
+// src must be (or be a descendant of) a readonly "collection" mount,
+// a writable collection mounted at ctrOutputPath, or a "tmp" mount.
+//
+// If walkMountsBelow is true, include contents of any collection
+// mounted below src as well.
+func (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow bool) error {
+       // srcRoot, srcMount indicate the innermost mount that
+       // contains src.
+       var srcRoot string
+       var srcMount arvados.Mount
+       for root, mnt := range cp.mounts {
+               if len(root) > len(srcRoot) && strings.HasPrefix(src+"/", root+"/") {
+                       srcRoot, srcMount = root, mnt
+               }
+       }
+       for root := range cp.secretMounts {
+               if len(root) > len(srcRoot) && strings.HasPrefix(src+"/", root+"/") {
+                       // Silently omit secrets, and symlinks to
+                       // secrets.
+                       return nil
+               }
+       }
+       if srcRoot == "" {
+               return fmt.Errorf("cannot output file %q: not in any mount", src)
+       }
+
+       // srcRelPath is the path to the file/dir we are trying to
+       // copy, relative to its mount point -- ".", "./foo.txt", ...
+       srcRelPath := filepath.Join(".", srcMount.Path, src[len(srcRoot):])
+
+       switch {
+       case srcMount.ExcludeFromOutput:
+       case srcMount.Kind == "tmp":
+               // Handle by walking the host filesystem.
+               return cp.walkHostFS(dest, src, maxSymlinks, walkMountsBelow)
+       case srcMount.Kind != "collection":
+               return fmt.Errorf("%q: unsupported mount %q in output (kind is %q)", src, srcRoot, srcMount.Kind)
+       case !srcMount.Writable:
+               mft, err := cp.getManifest(srcMount.PortableDataHash)
+               if err != nil {
+                       return err
+               }
+               cp.manifest += mft.Extract(srcRelPath, dest).Text
+       default:
+               hostRoot, err := cp.hostRoot(srcRoot)
+               if err != nil {
+                       return err
+               }
+               f, err := os.Open(filepath.Join(hostRoot, ".arvados#collection"))
+               if err != nil {
+                       return err
+               }
+               defer f.Close()
+               var coll arvados.Collection
+               err = json.NewDecoder(f).Decode(&coll)
+               if err != nil {
+                       return err
+               }
+               mft := manifest.Manifest{Text: coll.ManifestText}
+               cp.manifest += mft.Extract(srcRelPath, dest).Text
+       }
+       if walkMountsBelow {
+               return cp.walkMountsBelow(dest, src)
+       } else {
+               return nil
+       }
+}
+
+func (cp *copier) walkMountsBelow(dest, src string) error {
+       for mnt, mntinfo := range cp.mounts {
+               if !strings.HasPrefix(mnt, src+"/") {
+                       continue
+               }
+               if cp.copyRegularFiles(mntinfo) {
+                       // These got copied into the nearest parent
+                       // mount as regular files during setup, so
+                       // they get copied as regular files when we
+                       // process the parent. Output will reflect any
+                       // changes and deletions done by the
+                       // container.
+                       continue
+               }
+               // Example: we are processing dest=/foo src=/mnt1/dir1
+               // (perhaps we followed a symlink /outdir/foo ->
+               // /mnt1/dir1). Caller has already processed the
+               // collection mounted at /mnt1, but now we find that
+               // /mnt1/dir1/mnt2 is also a mount, so we need to copy
+               // src=/mnt1/dir1/mnt2 to dest=/foo/mnt2.
+               //
+               // We handle all descendants of /mnt1/dir1 in this
+               // loop instead of using recursion:
+               // /mnt1/dir1/mnt2/mnt3 is a child of both /mnt1 and
+               // /mnt1/dir1/mnt2, but we only want to walk it
+               // once. (This simplification is safe because mounted
+               // collections cannot contain symlinks.)
+               err := cp.walkMount(dest+mnt[len(src):], mnt, 0, false)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+// Add entries to cp.dirs and cp.files so as to copy src (an absolute
+// path in the container's filesystem which corresponds to a real file
+// or directory in cp.hostOutputDir) to dest (an absolute path in the
+// output collection, or "" for output root).
+//
+// Always follow symlinks.
+//
+// If includeMounts is true, include mounts at and below src.
+// Otherwise, skip them.
+func (cp *copier) walkHostFS(dest, src string, maxSymlinks int, includeMounts bool) error {
+       if includeMounts {
+               err := cp.walkMountsBelow(dest, src)
+               if err != nil {
+                       return err
+               }
+       }
+
+       hostsrc := cp.hostOutputDir + src[len(cp.ctrOutputDir):]
+
+       // If src is a symlink, walk its target.
+       fi, err := os.Lstat(hostsrc)
+       if err != nil {
+               return fmt.Errorf("lstat %q: %s", src, err)
+       }
+       if fi.Mode()&os.ModeSymlink != 0 {
+               if maxSymlinks < 0 {
+                       return errTooManySymlinks
+               }
+               target, err := os.Readlink(hostsrc)
+               if err != nil {
+                       return fmt.Errorf("readlink %q: %s", src, err)
+               }
+               if !strings.HasPrefix(target, "/") {
+                       target = filepath.Join(filepath.Dir(src), target)
+               }
+               return cp.walkMount(dest, target, maxSymlinks-1, true)
+       }
+
+       // If src is a regular directory, append it to cp.dirs and
+       // walk each of its children. (If there are no children,
+       // create an empty file "dest/.keep".)
+       if fi.Mode().IsDir() {
+               if dest != "" {
+                       cp.dirs = append(cp.dirs, dest)
+               }
+               dir, err := os.Open(hostsrc)
+               if err != nil {
+                       return fmt.Errorf("open %q: %s", src, err)
+               }
+               names, err := dir.Readdirnames(-1)
+               dir.Close()
+               if err != nil {
+                       return fmt.Errorf("readdirnames %q: %s", src, err)
+               }
+               if len(names) == 0 {
+                       if dest != "" {
+                               cp.files = append(cp.files, filetodo{
+                                       src: os.DevNull,
+                                       dst: dest + "/.keep",
+                               })
+                       }
+                       return nil
+               }
+               sort.Strings(names)
+               for _, name := range names {
+                       dest, src := dest+"/"+name, src+"/"+name
+                       if _, isSecret := cp.secretMounts[src]; isSecret {
+                               continue
+                       }
+                       if mntinfo, isMount := cp.mounts[src]; isMount && !cp.copyRegularFiles(mntinfo) {
+                               // If a regular file/dir somehow
+                               // exists at a path that's also a
+                               // mount target, ignore the file --
+                               // the mount has already been included
+                               // with walkMountsBelow().
+                               //
+                               // (...except mount types that are
+                               // handled as regular files.)
+                               continue
+                       }
+                       err = cp.walkHostFS(dest, src, maxSymlinks, false)
+                       if err != nil {
+                               return err
+                       }
+               }
+               return nil
+       }
+
+       // If src is a regular file, append it to cp.files.
+       if fi.Mode().IsRegular() {
+               cp.files = append(cp.files, filetodo{
+                       src:  hostsrc,
+                       dst:  dest,
+                       size: fi.Size(),
+               })
+               return nil
+       }
+
+       return fmt.Errorf("Unsupported file type (mode %o) in output dir: %q", fi.Mode(), src)
+}
+
+// Return the host path that was mounted at the given path in the
+// container.
+func (cp *copier) hostRoot(ctrRoot string) (string, error) {
+       if ctrRoot == cp.ctrOutputDir {
+               return cp.hostOutputDir, nil
+       }
+       for _, bind := range cp.binds {
+               tokens := strings.Split(bind, ":")
+               if len(tokens) >= 2 && tokens[1] == ctrRoot {
+                       return tokens[0], nil
+               }
+       }
+       return "", fmt.Errorf("not bind-mounted: %q", ctrRoot)
+}
+
+func (cp *copier) copyRegularFiles(m arvados.Mount) bool {
+       return m.Kind == "text" || m.Kind == "json" || (m.Kind == "collection" && m.Writable)
+}
+
+func (cp *copier) getManifest(pdh string) (*manifest.Manifest, error) {
+       if mft, ok := cp.manifestCache[pdh]; ok {
+               return mft, nil
+       }
+       var coll arvados.Collection
+       err := cp.arvClient.Get("collections", pdh, nil, &coll)
+       if err != nil {
+               return nil, fmt.Errorf("error retrieving collection record for %q: %s", pdh, err)
+       }
+       mft := &manifest.Manifest{Text: coll.ManifestText}
+       if cp.manifestCache == nil {
+               cp.manifestCache = map[string]*manifest.Manifest{pdh: mft}
+       } else {
+               cp.manifestCache[pdh] = mft
+       }
+       return mft, nil
+}
diff --git a/services/crunch-run/copier_test.go b/services/crunch-run/copier_test.go
new file mode 100644 (file)
index 0000000..a2b5608
--- /dev/null
@@ -0,0 +1,222 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "io"
+       "io/ioutil"
+       "os"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&copierSuite{})
+
+type copierSuite struct {
+       cp copier
+}
+
+func (s *copierSuite) SetUpTest(c *check.C) {
+       tmpdir, err := ioutil.TempDir("", "crunch-run.test.")
+       c.Assert(err, check.IsNil)
+       api, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.IsNil)
+       s.cp = copier{
+               client:        arvados.NewClientFromEnv(),
+               arvClient:     api,
+               hostOutputDir: tmpdir,
+               ctrOutputDir:  "/ctr/outdir",
+               mounts: map[string]arvados.Mount{
+                       "/ctr/outdir": {Kind: "tmp"},
+               },
+               secretMounts: map[string]arvados.Mount{
+                       "/secret_text": {Kind: "text", Content: "xyzzy"},
+               },
+       }
+}
+
+func (s *copierSuite) TearDownTest(c *check.C) {
+       os.RemoveAll(s.cp.hostOutputDir)
+}
+
+func (s *copierSuite) TestEmptyOutput(c *check.C) {
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string(nil))
+       c.Check(len(s.cp.files), check.Equals, 0)
+}
+
+func (s *copierSuite) TestRegularFilesAndDirs(c *check.C) {
+       err := os.MkdirAll(s.cp.hostOutputDir+"/dir1/dir2/dir3", 0755)
+       c.Assert(err, check.IsNil)
+       f, err := os.OpenFile(s.cp.hostOutputDir+"/dir1/foo", os.O_CREATE|os.O_WRONLY, 0644)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, "foo")
+       c.Assert(err, check.IsNil)
+       c.Assert(f.Close(), check.IsNil)
+
+       err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string{"/dir1", "/dir1/dir2", "/dir1/dir2/dir3"})
+       c.Check(s.cp.files, check.DeepEquals, []filetodo{
+               {src: os.DevNull, dst: "/dir1/dir2/dir3/.keep"},
+               {src: s.cp.hostOutputDir + "/dir1/foo", dst: "/dir1/foo", size: 3},
+       })
+}
+
+func (s *copierSuite) TestSymlinkCycle(c *check.C) {
+       c.Assert(os.Mkdir(s.cp.hostOutputDir+"/dir1", 0755), check.IsNil)
+       c.Assert(os.Mkdir(s.cp.hostOutputDir+"/dir2", 0755), check.IsNil)
+       c.Assert(os.Symlink("../dir2", s.cp.hostOutputDir+"/dir1/l_dir2"), check.IsNil)
+       c.Assert(os.Symlink("../dir1", s.cp.hostOutputDir+"/dir2/l_dir1"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.ErrorMatches, `.*cycle.*`)
+}
+
+func (s *copierSuite) TestSymlinkTargetMissing(c *check.C) {
+       c.Assert(os.Symlink("./missing", s.cp.hostOutputDir+"/symlink"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.ErrorMatches, `.*/ctr/outdir/missing.*`)
+}
+
+func (s *copierSuite) TestSymlinkTargetNotMounted(c *check.C) {
+       c.Assert(os.Symlink("../boop", s.cp.hostOutputDir+"/symlink"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.ErrorMatches, `.*/ctr/boop.*`)
+}
+
+func (s *copierSuite) TestSymlinkToSecret(c *check.C) {
+       c.Assert(os.Symlink("/secret_text", s.cp.hostOutputDir+"/symlink"), check.IsNil)
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(len(s.cp.dirs), check.Equals, 0)
+       c.Check(len(s.cp.files), check.Equals, 0)
+}
+
+func (s *copierSuite) TestSecretInOutputDir(c *check.C) {
+       s.cp.secretMounts["/ctr/outdir/secret_text"] = s.cp.secretMounts["/secret_text"]
+       s.writeFileInOutputDir(c, "secret_text", "xyzzy")
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(len(s.cp.dirs), check.Equals, 0)
+       c.Check(len(s.cp.files), check.Equals, 0)
+}
+
+func (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {
+       // simulate mounted read-only collection
+       s.cp.mounts["/mnt"] = arvados.Mount{
+               Kind:             "collection",
+               PortableDataHash: arvadostest.FooPdh,
+       }
+
+       // simulate mounted writable collection
+       bindtmp, err := ioutil.TempDir("", "crunch-run.test.")
+       c.Assert(err, check.IsNil)
+       defer os.RemoveAll(bindtmp)
+       f, err := os.OpenFile(bindtmp+"/.arvados#collection", os.O_CREATE|os.O_WRONLY, 0644)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, `{"manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"}`)
+       c.Assert(err, check.IsNil)
+       c.Assert(f.Close(), check.IsNil)
+       s.cp.mounts["/mnt-w"] = arvados.Mount{
+               Kind:             "collection",
+               PortableDataHash: arvadostest.FooPdh,
+               Writable:         true,
+       }
+       s.cp.binds = append(s.cp.binds, bindtmp+":/mnt-w")
+
+       c.Assert(os.Symlink("../../mnt", s.cp.hostOutputDir+"/l_dir"), check.IsNil)
+       c.Assert(os.Symlink("/mnt/foo", s.cp.hostOutputDir+"/l_file"), check.IsNil)
+       c.Assert(os.Symlink("/mnt-w/bar", s.cp.hostOutputDir+"/l_file_w"), check.IsNil)
+
+       err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.manifest, check.Matches, `(?ms)\./l_dir acbd\S+ 0:3:foo\n\. acbd\S+ 0:3:l_file\n\. 37b5\S+ 0:3:l_file_w\n`)
+}
+
+func (s *copierSuite) TestSymlink(c *check.C) {
+       hostfile := s.cp.hostOutputDir + "/dir1/file"
+
+       err := os.MkdirAll(s.cp.hostOutputDir+"/dir1/dir2/dir3", 0755)
+       c.Assert(err, check.IsNil)
+       s.writeFileInOutputDir(c, "dir1/file", "file")
+       for _, err := range []error{
+               os.Symlink(s.cp.ctrOutputDir+"/dir1/file", s.cp.hostOutputDir+"/l_abs_file"),
+               os.Symlink(s.cp.ctrOutputDir+"/dir1/dir2", s.cp.hostOutputDir+"/l_abs_dir2"),
+               os.Symlink("../../dir1/file", s.cp.hostOutputDir+"/dir1/dir2/l_rel_file"),
+               os.Symlink("dir1/file", s.cp.hostOutputDir+"/l_rel_file"),
+               os.MkdirAll(s.cp.hostOutputDir+"/morelinks", 0755),
+               os.Symlink("../dir1/dir2", s.cp.hostOutputDir+"/morelinks/l_rel_dir2"),
+               os.Symlink("dir1/dir2/dir3", s.cp.hostOutputDir+"/l_rel_dir3"),
+               // rel. symlink -> rel. symlink -> regular file
+               os.Symlink("../dir1/dir2/l_rel_file", s.cp.hostOutputDir+"/morelinks/l_rel_l_rel_file"),
+       } {
+               c.Assert(err, check.IsNil)
+       }
+
+       err = s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string{
+               "/dir1", "/dir1/dir2", "/dir1/dir2/dir3",
+               "/l_abs_dir2", "/l_abs_dir2/dir3",
+               "/l_rel_dir3",
+               "/morelinks", "/morelinks/l_rel_dir2", "/morelinks/l_rel_dir2/dir3",
+       })
+       c.Check(s.cp.files, check.DeepEquals, []filetodo{
+               {dst: "/dir1/dir2/dir3/.keep", src: os.DevNull},
+               {dst: "/dir1/dir2/l_rel_file", src: hostfile, size: 4},
+               {dst: "/dir1/file", src: hostfile, size: 4},
+               {dst: "/l_abs_dir2/dir3/.keep", src: os.DevNull},
+               {dst: "/l_abs_dir2/l_rel_file", src: hostfile, size: 4},
+               {dst: "/l_abs_file", src: hostfile, size: 4},
+               {dst: "/l_rel_dir3/.keep", src: os.DevNull},
+               {dst: "/l_rel_file", src: hostfile, size: 4},
+               {dst: "/morelinks/l_rel_dir2/dir3/.keep", src: os.DevNull},
+               {dst: "/morelinks/l_rel_dir2/l_rel_file", src: hostfile, size: 4},
+               {dst: "/morelinks/l_rel_l_rel_file", src: hostfile, size: 4},
+       })
+}
+
+func (s *copierSuite) TestUnsupportedOutputMount(c *check.C) {
+       s.cp.mounts["/ctr/outdir"] = arvados.Mount{Kind: "waz"}
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.NotNil)
+}
+
+func (s *copierSuite) TestUnsupportedMountKindBelow(c *check.C) {
+       s.cp.mounts["/ctr/outdir/dirk"] = arvados.Mount{Kind: "waz"}
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.NotNil)
+}
+
+func (s *copierSuite) TestWritableMountBelow(c *check.C) {
+       s.cp.mounts["/ctr/outdir/mount"] = arvados.Mount{
+               Kind:             "collection",
+               PortableDataHash: arvadostest.FooPdh,
+               Writable:         true,
+       }
+       c.Assert(os.MkdirAll(s.cp.hostOutputDir+"/mount", 0755), check.IsNil)
+       s.writeFileInOutputDir(c, "file", "file")
+       s.writeFileInOutputDir(c, "mount/foo", "foo")
+
+       err := s.cp.walkMount("", s.cp.ctrOutputDir, 10, true)
+       c.Check(err, check.IsNil)
+       c.Check(s.cp.dirs, check.DeepEquals, []string{"/mount"})
+       c.Check(s.cp.files, check.DeepEquals, []filetodo{
+               {src: s.cp.hostOutputDir + "/file", dst: "/file", size: 4},
+               {src: s.cp.hostOutputDir + "/mount/foo", dst: "/mount/foo", size: 3},
+       })
+}
+
+func (s *copierSuite) writeFileInOutputDir(c *check.C, path, data string) {
+       f, err := os.OpenFile(s.cp.hostOutputDir+"/"+path, os.O_CREATE|os.O_WRONLY, 0644)
+       c.Assert(err, check.IsNil)
+       _, err = io.WriteString(f, data)
+       c.Assert(err, check.IsNil)
+       c.Assert(f.Close(), check.IsNil)
+}
diff --git a/services/crunch-run/crunchrun.go b/services/crunch-run/crunchrun.go
new file mode 100644 (file)
index 0000000..0576337
--- /dev/null
@@ -0,0 +1,1884 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "encoding/json"
+       "errors"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "os/signal"
+       "path"
+       "path/filepath"
+       "regexp"
+       "runtime"
+       "runtime/pprof"
+       "sort"
+       "strings"
+       "sync"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/crunchstat"
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+       "golang.org/x/net/context"
+
+       dockertypes "github.com/docker/docker/api/types"
+       dockercontainer "github.com/docker/docker/api/types/container"
+       dockernetwork "github.com/docker/docker/api/types/network"
+       dockerclient "github.com/docker/docker/client"
+)
+
+var version = "dev"
+
+// IArvadosClient is the minimal Arvados API methods used by crunch-run.
+type IArvadosClient interface {
+       Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
+       Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
+       Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
+       Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
+       CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
+       Discovery(key string) (interface{}, error)
+}
+
+// ErrCancelled is the error returned when the container is cancelled.
+var ErrCancelled = errors.New("Cancelled")
+
+// IKeepClient is the minimal Keep API methods used by crunch-run.
+type IKeepClient interface {
+       PutB(buf []byte) (string, int, error)
+       ReadAt(locator string, p []byte, off int) (int, error)
+       ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
+       LocalLocator(locator string) (string, error)
+       ClearBlockCache()
+}
+
+// NewLogWriter is a factory function to create a new log writer.
+type NewLogWriter func(name string) (io.WriteCloser, error)
+
+type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
+
+type MkTempDir func(string, string) (string, error)
+
+// ThinDockerClient is the minimal Docker client interface used by crunch-run.
+type ThinDockerClient interface {
+       ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
+       ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
+               networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
+       ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
+       ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
+       ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
+       ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
+       ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
+       ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
+       ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
+}
+
+type PsProcess interface {
+       CmdlineSlice() ([]string, error)
+}
+
+// ContainerRunner is the main stateful struct used for a single execution of a
+// container.
+type ContainerRunner struct {
+       Docker ThinDockerClient
+
+       // Dispatcher client is initialized with the Dispatcher token.
+       // This is a priviledged token used to manage container status
+       // and logs.
+       //
+       // We have both dispatcherClient and DispatcherArvClient
+       // because there are two different incompatible Arvados Go
+       // SDKs and we have to use both (hopefully this gets fixed in
+       // #14467)
+       dispatcherClient     *arvados.Client
+       DispatcherArvClient  IArvadosClient
+       DispatcherKeepClient IKeepClient
+
+       // Container client is initialized with the Container token
+       // This token controls the permissions of the container, and
+       // must be used for operations such as reading collections.
+       //
+       // Same comment as above applies to
+       // containerClient/ContainerArvClient.
+       containerClient     *arvados.Client
+       ContainerArvClient  IArvadosClient
+       ContainerKeepClient IKeepClient
+
+       Container       arvados.Container
+       ContainerConfig dockercontainer.Config
+       HostConfig      dockercontainer.HostConfig
+       token           string
+       ContainerID     string
+       ExitCode        *int
+       NewLogWriter    NewLogWriter
+       loggingDone     chan bool
+       CrunchLog       *ThrottledLogger
+       Stdout          io.WriteCloser
+       Stderr          io.WriteCloser
+       logUUID         string
+       logMtx          sync.Mutex
+       LogCollection   arvados.CollectionFileSystem
+       LogsPDH         *string
+       RunArvMount     RunArvMount
+       MkTempDir       MkTempDir
+       ArvMount        *exec.Cmd
+       ArvMountPoint   string
+       HostOutputDir   string
+       Binds           []string
+       Volumes         map[string]struct{}
+       OutputPDH       *string
+       SigChan         chan os.Signal
+       ArvMountExit    chan error
+       SecretMounts    map[string]arvados.Mount
+       MkArvClient     func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
+       finalState      string
+       parentTemp      string
+
+       statLogger       io.WriteCloser
+       statReporter     *crunchstat.Reporter
+       hoststatLogger   io.WriteCloser
+       hoststatReporter *crunchstat.Reporter
+       statInterval     time.Duration
+       cgroupRoot       string
+       // What we expect the container's cgroup parent to be.
+       expectCgroupParent string
+       // What we tell docker to use as the container's cgroup
+       // parent. Note: Ideally we would use the same field for both
+       // expectCgroupParent and setCgroupParent, and just make it
+       // default to "docker". However, when using docker < 1.10 with
+       // systemd, specifying a non-empty cgroup parent (even the
+       // default value "docker") hits a docker bug
+       // (https://github.com/docker/docker/issues/17126). Using two
+       // separate fields makes it possible to use the "expect cgroup
+       // parent to be X" feature even on sites where the "specify
+       // cgroup parent" feature breaks.
+       setCgroupParent string
+
+       cStateLock sync.Mutex
+       cCancelled bool // StopContainer() invoked
+       cRemoved   bool // docker confirmed the container no longer exists
+
+       enableNetwork string // one of "default" or "always"
+       networkMode   string // passed through to HostConfig.NetworkMode
+       arvMountLog   *ThrottledLogger
+
+       containerWatchdogInterval time.Duration
+}
+
+// setupSignals sets up signal handling to gracefully terminate the underlying
+// Docker container and update state when receiving a TERM, INT or QUIT signal.
+func (runner *ContainerRunner) setupSignals() {
+       runner.SigChan = make(chan os.Signal, 1)
+       signal.Notify(runner.SigChan, syscall.SIGTERM)
+       signal.Notify(runner.SigChan, syscall.SIGINT)
+       signal.Notify(runner.SigChan, syscall.SIGQUIT)
+
+       go func(sig chan os.Signal) {
+               for s := range sig {
+                       runner.stop(s)
+               }
+       }(runner.SigChan)
+}
+
+// stop the underlying Docker container.
+func (runner *ContainerRunner) stop(sig os.Signal) {
+       runner.cStateLock.Lock()
+       defer runner.cStateLock.Unlock()
+       if sig != nil {
+               runner.CrunchLog.Printf("caught signal: %v", sig)
+       }
+       if runner.ContainerID == "" {
+               return
+       }
+       runner.cCancelled = true
+       runner.CrunchLog.Printf("removing container")
+       err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
+       if err != nil {
+               runner.CrunchLog.Printf("error removing container: %s", err)
+       }
+       if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
+               runner.cRemoved = true
+       }
+}
+
+var errorBlacklist = []string{
+       "(?ms).*[Cc]annot connect to the Docker daemon.*",
+       "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+       "(?ms).*grpc: the connection is unavailable.*",
+}
+var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
+
+func (runner *ContainerRunner) runBrokenNodeHook() {
+       if *brokenNodeHook == "" {
+               runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+       } else {
+               runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+               // run killme script
+               c := exec.Command(*brokenNodeHook)
+               c.Stdout = runner.CrunchLog
+               c.Stderr = runner.CrunchLog
+               err := c.Run()
+               if err != nil {
+                       runner.CrunchLog.Printf("Error running broken node hook: %v", err)
+               }
+       }
+}
+
+func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
+       for _, d := range errorBlacklist {
+               if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
+                       runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
+                       runner.runBrokenNodeHook()
+                       return true
+               }
+       }
+       return false
+}
+
+// LoadImage determines the docker image id from the container record and
+// checks if it is available in the local Docker image store.  If not, it loads
+// the image from Keep.
+func (runner *ContainerRunner) LoadImage() (err error) {
+
+       runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
+
+       var collection arvados.Collection
+       err = runner.ContainerArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
+       if err != nil {
+               return fmt.Errorf("While getting container image collection: %v", err)
+       }
+       manifest := manifest.Manifest{Text: collection.ManifestText}
+       var img, imageID string
+       for ms := range manifest.StreamIter() {
+               img = ms.FileStreamSegments[0].Name
+               if !strings.HasSuffix(img, ".tar") {
+                       return fmt.Errorf("First file in the container image collection does not end in .tar")
+               }
+               imageID = img[:len(img)-4]
+       }
+
+       runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
+
+       _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
+       if err != nil {
+               runner.CrunchLog.Print("Loading Docker image from keep")
+
+               var readCloser io.ReadCloser
+               readCloser, err = runner.ContainerKeepClient.ManifestFileReader(manifest, img)
+               if err != nil {
+                       return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
+               }
+
+               response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
+               if err != nil {
+                       return fmt.Errorf("While loading container image into Docker: %v", err)
+               }
+
+               defer response.Body.Close()
+               rbody, err := ioutil.ReadAll(response.Body)
+               if err != nil {
+                       return fmt.Errorf("Reading response to image load: %v", err)
+               }
+               runner.CrunchLog.Printf("Docker response: %s", rbody)
+       } else {
+               runner.CrunchLog.Print("Docker image is available")
+       }
+
+       runner.ContainerConfig.Image = imageID
+
+       runner.ContainerKeepClient.ClearBlockCache()
+
+       return nil
+}
+
+func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
+       c = exec.Command("arv-mount", arvMountCmd...)
+
+       // Copy our environment, but override ARVADOS_API_TOKEN with
+       // the container auth token.
+       c.Env = nil
+       for _, s := range os.Environ() {
+               if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
+                       c.Env = append(c.Env, s)
+               }
+       }
+       c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
+
+       w, err := runner.NewLogWriter("arv-mount")
+       if err != nil {
+               return nil, err
+       }
+       runner.arvMountLog = NewThrottledLogger(w)
+       c.Stdout = runner.arvMountLog
+       c.Stderr = runner.arvMountLog
+
+       runner.CrunchLog.Printf("Running %v", c.Args)
+
+       err = c.Start()
+       if err != nil {
+               return nil, err
+       }
+
+       statReadme := make(chan bool)
+       runner.ArvMountExit = make(chan error)
+
+       keepStatting := true
+       go func() {
+               for keepStatting {
+                       time.Sleep(100 * time.Millisecond)
+                       _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
+                       if err == nil {
+                               keepStatting = false
+                               statReadme <- true
+                       }
+               }
+               close(statReadme)
+       }()
+
+       go func() {
+               mnterr := c.Wait()
+               if mnterr != nil {
+                       runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
+               }
+               runner.ArvMountExit <- mnterr
+               close(runner.ArvMountExit)
+       }()
+
+       select {
+       case <-statReadme:
+               break
+       case err := <-runner.ArvMountExit:
+               runner.ArvMount = nil
+               keepStatting = false
+               return nil, err
+       }
+
+       return c, nil
+}
+
+func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
+       if runner.ArvMountPoint == "" {
+               runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
+       }
+       return
+}
+
+func copyfile(src string, dst string) (err error) {
+       srcfile, err := os.Open(src)
+       if err != nil {
+               return
+       }
+
+       os.MkdirAll(path.Dir(dst), 0777)
+
+       dstfile, err := os.Create(dst)
+       if err != nil {
+               return
+       }
+       _, err = io.Copy(dstfile, srcfile)
+       if err != nil {
+               return
+       }
+
+       err = srcfile.Close()
+       err2 := dstfile.Close()
+
+       if err != nil {
+               return
+       }
+
+       if err2 != nil {
+               return err2
+       }
+
+       return nil
+}
+
+func (runner *ContainerRunner) SetupMounts() (err error) {
+       err = runner.SetupArvMountPoint("keep")
+       if err != nil {
+               return fmt.Errorf("While creating keep mount temp dir: %v", err)
+       }
+
+       token, err := runner.ContainerToken()
+       if err != nil {
+               return fmt.Errorf("could not get container token: %s", err)
+       }
+
+       pdhOnly := true
+       tmpcount := 0
+       arvMountCmd := []string{
+               "--foreground",
+               "--allow-other",
+               "--read-write",
+               fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
+
+       if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
+               arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
+       }
+
+       collectionPaths := []string{}
+       runner.Binds = nil
+       runner.Volumes = make(map[string]struct{})
+       needCertMount := true
+       type copyFile struct {
+               src  string
+               bind string
+       }
+       var copyFiles []copyFile
+
+       var binds []string
+       for bind := range runner.Container.Mounts {
+               binds = append(binds, bind)
+       }
+       for bind := range runner.SecretMounts {
+               if _, ok := runner.Container.Mounts[bind]; ok {
+                       return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
+               }
+               if runner.SecretMounts[bind].Kind != "json" &&
+                       runner.SecretMounts[bind].Kind != "text" {
+                       return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
+                               bind, runner.SecretMounts[bind].Kind)
+               }
+               binds = append(binds, bind)
+       }
+       sort.Strings(binds)
+
+       for _, bind := range binds {
+               mnt, ok := runner.Container.Mounts[bind]
+               if !ok {
+                       mnt = runner.SecretMounts[bind]
+               }
+               if bind == "stdout" || bind == "stderr" {
+                       // Is it a "file" mount kind?
+                       if mnt.Kind != "file" {
+                               return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
+                       }
+
+                       // Does path start with OutputPath?
+                       prefix := runner.Container.OutputPath
+                       if !strings.HasSuffix(prefix, "/") {
+                               prefix += "/"
+                       }
+                       if !strings.HasPrefix(mnt.Path, prefix) {
+                               return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
+                       }
+               }
+
+               if bind == "stdin" {
+                       // Is it a "collection" mount kind?
+                       if mnt.Kind != "collection" && mnt.Kind != "json" {
+                               return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
+                       }
+               }
+
+               if bind == "/etc/arvados/ca-certificates.crt" {
+                       needCertMount = false
+               }
+
+               if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
+                       if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
+                               return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
+                       }
+               }
+
+               switch {
+               case mnt.Kind == "collection" && bind != "stdin":
+                       var src string
+                       if mnt.UUID != "" && mnt.PortableDataHash != "" {
+                               return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
+                       }
+                       if mnt.UUID != "" {
+                               if mnt.Writable {
+                                       return fmt.Errorf("Writing to existing collections currently not permitted.")
+                               }
+                               pdhOnly = false
+                               src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
+                       } else if mnt.PortableDataHash != "" {
+                               if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+                                       return fmt.Errorf("Can never write to a collection specified by portable data hash")
+                               }
+                               idx := strings.Index(mnt.PortableDataHash, "/")
+                               if idx > 0 {
+                                       mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
+                                       mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
+                                       runner.Container.Mounts[bind] = mnt
+                               }
+                               src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
+                               if mnt.Path != "" && mnt.Path != "." {
+                                       if strings.HasPrefix(mnt.Path, "./") {
+                                               mnt.Path = mnt.Path[2:]
+                                       } else if strings.HasPrefix(mnt.Path, "/") {
+                                               mnt.Path = mnt.Path[1:]
+                                       }
+                                       src += "/" + mnt.Path
+                               }
+                       } else {
+                               src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
+                               arvMountCmd = append(arvMountCmd, "--mount-tmp")
+                               arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
+                               tmpcount += 1
+                       }
+                       if mnt.Writable {
+                               if bind == runner.Container.OutputPath {
+                                       runner.HostOutputDir = src
+                                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
+                               } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+                                       copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+                               } else {
+                                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
+                               }
+                       } else {
+                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
+                       }
+                       collectionPaths = append(collectionPaths, src)
+
+               case mnt.Kind == "tmp":
+                       var tmpdir string
+                       tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
+                       if err != nil {
+                               return fmt.Errorf("While creating mount temp dir: %v", err)
+                       }
+                       st, staterr := os.Stat(tmpdir)
+                       if staterr != nil {
+                               return fmt.Errorf("While Stat on temp dir: %v", staterr)
+                       }
+                       err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
+                       if staterr != nil {
+                               return fmt.Errorf("While Chmod temp dir: %v", err)
+                       }
+                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
+                       if bind == runner.Container.OutputPath {
+                               runner.HostOutputDir = tmpdir
+                       }
+
+               case mnt.Kind == "json" || mnt.Kind == "text":
+                       var filedata []byte
+                       if mnt.Kind == "json" {
+                               filedata, err = json.Marshal(mnt.Content)
+                               if err != nil {
+                                       return fmt.Errorf("encoding json data: %v", err)
+                               }
+                       } else {
+                               text, ok := mnt.Content.(string)
+                               if !ok {
+                                       return fmt.Errorf("content for mount %q must be a string", bind)
+                               }
+                               filedata = []byte(text)
+                       }
+
+                       tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
+                       if err != nil {
+                               return fmt.Errorf("creating temp dir: %v", err)
+                       }
+                       tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
+                       err = ioutil.WriteFile(tmpfn, filedata, 0444)
+                       if err != nil {
+                               return fmt.Errorf("writing temp file: %v", err)
+                       }
+                       if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+                               copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+                       } else {
+                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+                       }
+
+               case mnt.Kind == "git_tree":
+                       tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
+                       if err != nil {
+                               return fmt.Errorf("creating temp dir: %v", err)
+                       }
+                       err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
+                       if err != nil {
+                               return err
+                       }
+                       runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
+               }
+       }
+
+       if runner.HostOutputDir == "" {
+               return fmt.Errorf("Output path does not correspond to a writable mount point")
+       }
+
+       if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
+               for _, certfile := range arvadosclient.CertFiles {
+                       _, err := os.Stat(certfile)
+                       if err == nil {
+                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
+                               break
+                       }
+               }
+       }
+
+       if pdhOnly {
+               arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
+       } else {
+               arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
+       }
+       arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
+
+       runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
+       if err != nil {
+               return fmt.Errorf("While trying to start arv-mount: %v", err)
+       }
+
+       for _, p := range collectionPaths {
+               _, err = os.Stat(p)
+               if err != nil {
+                       return fmt.Errorf("While checking that input files exist: %v", err)
+               }
+       }
+
+       for _, cp := range copyFiles {
+               st, err := os.Stat(cp.src)
+               if err != nil {
+                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+               }
+               if st.IsDir() {
+                       err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+                               if walkerr != nil {
+                                       return walkerr
+                               }
+                               target := path.Join(cp.bind, walkpath[len(cp.src):])
+                               if walkinfo.Mode().IsRegular() {
+                                       copyerr := copyfile(walkpath, target)
+                                       if copyerr != nil {
+                                               return copyerr
+                                       }
+                                       return os.Chmod(target, walkinfo.Mode()|0777)
+                               } else if walkinfo.Mode().IsDir() {
+                                       mkerr := os.MkdirAll(target, 0777)
+                                       if mkerr != nil {
+                                               return mkerr
+                                       }
+                                       return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
+                               } else {
+                                       return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
+                               }
+                       })
+               } else if st.Mode().IsRegular() {
+                       err = copyfile(cp.src, cp.bind)
+                       if err == nil {
+                               err = os.Chmod(cp.bind, st.Mode()|0777)
+                       }
+               }
+               if err != nil {
+                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+               }
+       }
+
+       return nil
+}
+
+func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
+       // Handle docker log protocol
+       // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
+       defer close(runner.loggingDone)
+
+       header := make([]byte, 8)
+       var err error
+       for err == nil {
+               _, err = io.ReadAtLeast(containerReader, header, 8)
+               if err != nil {
+                       if err == io.EOF {
+                               err = nil
+                       }
+                       break
+               }
+               readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
+               if header[0] == 1 {
+                       // stdout
+                       _, err = io.CopyN(runner.Stdout, containerReader, readsize)
+               } else {
+                       // stderr
+                       _, err = io.CopyN(runner.Stderr, containerReader, readsize)
+               }
+       }
+
+       if err != nil {
+               runner.CrunchLog.Printf("error reading docker logs: %v", err)
+       }
+
+       err = runner.Stdout.Close()
+       if err != nil {
+               runner.CrunchLog.Printf("error closing stdout logs: %v", err)
+       }
+
+       err = runner.Stderr.Close()
+       if err != nil {
+               runner.CrunchLog.Printf("error closing stderr logs: %v", err)
+       }
+
+       if runner.statReporter != nil {
+               runner.statReporter.Stop()
+               err = runner.statLogger.Close()
+               if err != nil {
+                       runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
+               }
+       }
+}
+
+func (runner *ContainerRunner) stopHoststat() error {
+       if runner.hoststatReporter == nil {
+               return nil
+       }
+       runner.hoststatReporter.Stop()
+       err := runner.hoststatLogger.Close()
+       if err != nil {
+               return fmt.Errorf("error closing hoststat logs: %v", err)
+       }
+       return nil
+}
+
+func (runner *ContainerRunner) startHoststat() error {
+       w, err := runner.NewLogWriter("hoststat")
+       if err != nil {
+               return err
+       }
+       runner.hoststatLogger = NewThrottledLogger(w)
+       runner.hoststatReporter = &crunchstat.Reporter{
+               Logger:     log.New(runner.hoststatLogger, "", 0),
+               CgroupRoot: runner.cgroupRoot,
+               PollPeriod: runner.statInterval,
+       }
+       runner.hoststatReporter.Start()
+       return nil
+}
+
+func (runner *ContainerRunner) startCrunchstat() error {
+       w, err := runner.NewLogWriter("crunchstat")
+       if err != nil {
+               return err
+       }
+       runner.statLogger = NewThrottledLogger(w)
+       runner.statReporter = &crunchstat.Reporter{
+               CID:          runner.ContainerID,
+               Logger:       log.New(runner.statLogger, "", 0),
+               CgroupParent: runner.expectCgroupParent,
+               CgroupRoot:   runner.cgroupRoot,
+               PollPeriod:   runner.statInterval,
+               TempDir:      runner.parentTemp,
+       }
+       runner.statReporter.Start()
+       return nil
+}
+
+type infoCommand struct {
+       label string
+       cmd   []string
+}
+
+// LogHostInfo logs info about the current host, for debugging and
+// accounting purposes. Although it's logged as "node-info", this is
+// about the environment where crunch-run is actually running, which
+// might differ from what's described in the node record (see
+// LogNodeRecord).
+func (runner *ContainerRunner) LogHostInfo() (err error) {
+       w, err := runner.NewLogWriter("node-info")
+       if err != nil {
+               return
+       }
+
+       commands := []infoCommand{
+               {
+                       label: "Host Information",
+                       cmd:   []string{"uname", "-a"},
+               },
+               {
+                       label: "CPU Information",
+                       cmd:   []string{"cat", "/proc/cpuinfo"},
+               },
+               {
+                       label: "Memory Information",
+                       cmd:   []string{"cat", "/proc/meminfo"},
+               },
+               {
+                       label: "Disk Space",
+                       cmd:   []string{"df", "-m", "/", os.TempDir()},
+               },
+               {
+                       label: "Disk INodes",
+                       cmd:   []string{"df", "-i", "/", os.TempDir()},
+               },
+       }
+
+       // Run commands with informational output to be logged.
+       for _, command := range commands {
+               fmt.Fprintln(w, command.label)
+               cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
+               cmd.Stdout = w
+               cmd.Stderr = w
+               if err := cmd.Run(); err != nil {
+                       err = fmt.Errorf("While running command %q: %v", command.cmd, err)
+                       fmt.Fprintln(w, err)
+                       return err
+               }
+               fmt.Fprintln(w, "")
+       }
+
+       err = w.Close()
+       if err != nil {
+               return fmt.Errorf("While closing node-info logs: %v", err)
+       }
+       return nil
+}
+
+// LogContainerRecord gets and saves the raw JSON container record from the API server
+func (runner *ContainerRunner) LogContainerRecord() error {
+       logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+       if !logged && err == nil {
+               err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
+       }
+       return err
+}
+
+// LogNodeRecord logs arvados#node record corresponding to the current host.
+func (runner *ContainerRunner) LogNodeRecord() error {
+       hostname := os.Getenv("SLURMD_NODENAME")
+       if hostname == "" {
+               hostname, _ = os.Hostname()
+       }
+       _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
+               // The "info" field has admin-only info when obtained
+               // with a privileged token, and should not be logged.
+               node, ok := resp.(map[string]interface{})
+               if ok {
+                       delete(node, "info")
+               }
+       })
+       return err
+}
+
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
+       writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return false, err
+       }
+       w := &ArvLogWriter{
+               ArvClient:     runner.DispatcherArvClient,
+               UUID:          runner.Container.UUID,
+               loggingStream: label,
+               writeCloser:   writer,
+       }
+
+       reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
+       if err != nil {
+               return false, fmt.Errorf("error getting %s record: %v", label, err)
+       }
+       defer reader.Close()
+
+       dec := json.NewDecoder(reader)
+       dec.UseNumber()
+       var resp map[string]interface{}
+       if err = dec.Decode(&resp); err != nil {
+               return false, fmt.Errorf("error decoding %s list response: %v", label, err)
+       }
+       items, ok := resp["items"].([]interface{})
+       if !ok {
+               return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
+       } else if len(items) < 1 {
+               return false, nil
+       }
+       if munge != nil {
+               munge(items[0])
+       }
+       // Re-encode it using indentation to improve readability
+       enc := json.NewEncoder(w)
+       enc.SetIndent("", "    ")
+       if err = enc.Encode(items[0]); err != nil {
+               return false, fmt.Errorf("error logging %s record: %v", label, err)
+       }
+       err = w.Close()
+       if err != nil {
+               return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
+       }
+       return true, nil
+}
+
+// AttachStreams connects the docker container stdin, stdout and stderr logs
+// to the Arvados logger which logs to Keep and the API server logs table.
+func (runner *ContainerRunner) AttachStreams() (err error) {
+
+       runner.CrunchLog.Print("Attaching container streams")
+
+       // If stdin mount is provided, attach it to the docker container
+       var stdinRdr arvados.File
+       var stdinJson []byte
+       if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
+               if stdinMnt.Kind == "collection" {
+                       var stdinColl arvados.Collection
+                       collId := stdinMnt.UUID
+                       if collId == "" {
+                               collId = stdinMnt.PortableDataHash
+                       }
+                       err = runner.ContainerArvClient.Get("collections", collId, nil, &stdinColl)
+                       if err != nil {
+                               return fmt.Errorf("While getting stdin collection: %v", err)
+                       }
+
+                       stdinRdr, err = runner.ContainerKeepClient.ManifestFileReader(
+                               manifest.Manifest{Text: stdinColl.ManifestText},
+                               stdinMnt.Path)
+                       if os.IsNotExist(err) {
+                               return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
+                       } else if err != nil {
+                               return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
+                       }
+               } else if stdinMnt.Kind == "json" {
+                       stdinJson, err = json.Marshal(stdinMnt.Content)
+                       if err != nil {
+                               return fmt.Errorf("While encoding stdin json data: %v", err)
+                       }
+               }
+       }
+
+       stdinUsed := stdinRdr != nil || len(stdinJson) != 0
+       response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
+               dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
+       if err != nil {
+               return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
+       }
+
+       runner.loggingDone = make(chan bool)
+
+       if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
+               stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
+               if err != nil {
+                       return err
+               }
+               runner.Stdout = stdoutFile
+       } else if w, err := runner.NewLogWriter("stdout"); err != nil {
+               return err
+       } else {
+               runner.Stdout = NewThrottledLogger(w)
+       }
+
+       if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
+               stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
+               if err != nil {
+                       return err
+               }
+               runner.Stderr = stderrFile
+       } else if w, err := runner.NewLogWriter("stderr"); err != nil {
+               return err
+       } else {
+               runner.Stderr = NewThrottledLogger(w)
+       }
+
+       if stdinRdr != nil {
+               go func() {
+                       _, err := io.Copy(response.Conn, stdinRdr)
+                       if err != nil {
+                               runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
+                               runner.stop(nil)
+                       }
+                       stdinRdr.Close()
+                       response.CloseWrite()
+               }()
+       } else if len(stdinJson) != 0 {
+               go func() {
+                       _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
+                       if err != nil {
+                               runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
+                               runner.stop(nil)
+                       }
+                       response.CloseWrite()
+               }()
+       }
+
+       go runner.ProcessDockerAttach(response.Reader)
+
+       return nil
+}
+
+func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
+       stdoutPath := mntPath[len(runner.Container.OutputPath):]
+       index := strings.LastIndex(stdoutPath, "/")
+       if index > 0 {
+               subdirs := stdoutPath[:index]
+               if subdirs != "" {
+                       st, err := os.Stat(runner.HostOutputDir)
+                       if err != nil {
+                               return nil, fmt.Errorf("While Stat on temp dir: %v", err)
+                       }
+                       stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
+                       err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
+                       if err != nil {
+                               return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
+                       }
+               }
+       }
+       stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
+       if err != nil {
+               return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
+       }
+
+       return stdoutFile, nil
+}
+
+// CreateContainer creates the docker container.
+func (runner *ContainerRunner) CreateContainer() error {
+       runner.CrunchLog.Print("Creating Docker container")
+
+       runner.ContainerConfig.Cmd = runner.Container.Command
+       if runner.Container.Cwd != "." {
+               runner.ContainerConfig.WorkingDir = runner.Container.Cwd
+       }
+
+       for k, v := range runner.Container.Environment {
+               runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
+       }
+
+       runner.ContainerConfig.Volumes = runner.Volumes
+
+       maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
+       if maxRAM < 4*1024*1024 {
+               // Docker daemon won't let you set a limit less than 4 MiB
+               maxRAM = 4 * 1024 * 1024
+       }
+       runner.HostConfig = dockercontainer.HostConfig{
+               Binds: runner.Binds,
+               LogConfig: dockercontainer.LogConfig{
+                       Type: "none",
+               },
+               Resources: dockercontainer.Resources{
+                       CgroupParent: runner.setCgroupParent,
+                       NanoCPUs:     int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
+                       Memory:       maxRAM, // RAM
+                       MemorySwap:   maxRAM, // RAM+swap
+                       KernelMemory: maxRAM, // kernel portion
+               },
+       }
+
+       if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+               tok, err := runner.ContainerToken()
+               if err != nil {
+                       return err
+               }
+               runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
+                       "ARVADOS_API_TOKEN="+tok,
+                       "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
+                       "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
+               )
+               runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
+       } else {
+               if runner.enableNetwork == "always" {
+                       runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
+               } else {
+                       runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
+               }
+       }
+
+       _, stdinUsed := runner.Container.Mounts["stdin"]
+       runner.ContainerConfig.OpenStdin = stdinUsed
+       runner.ContainerConfig.StdinOnce = stdinUsed
+       runner.ContainerConfig.AttachStdin = stdinUsed
+       runner.ContainerConfig.AttachStdout = true
+       runner.ContainerConfig.AttachStderr = true
+
+       createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
+       if err != nil {
+               return fmt.Errorf("While creating container: %v", err)
+       }
+
+       runner.ContainerID = createdBody.ID
+
+       return runner.AttachStreams()
+}
+
+// StartContainer starts the docker container created by CreateContainer.
+func (runner *ContainerRunner) StartContainer() error {
+       runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
+       runner.cStateLock.Lock()
+       defer runner.cStateLock.Unlock()
+       if runner.cCancelled {
+               return ErrCancelled
+       }
+       err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
+               dockertypes.ContainerStartOptions{})
+       if err != nil {
+               var advice string
+               if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
+                       advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
+               }
+               return fmt.Errorf("could not start container: %v%s", err, advice)
+       }
+       return nil
+}
+
+// WaitFinish waits for the container to terminate, capture the exit code, and
+// close the stdout/stderr logging.
+func (runner *ContainerRunner) WaitFinish() error {
+       var runTimeExceeded <-chan time.Time
+       runner.CrunchLog.Print("Waiting for container to finish")
+
+       waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
+       arvMountExit := runner.ArvMountExit
+       if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
+               runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
+       }
+
+       containerGone := make(chan struct{})
+       go func() {
+               defer close(containerGone)
+               if runner.containerWatchdogInterval < 1 {
+                       runner.containerWatchdogInterval = time.Minute
+               }
+               for range time.NewTicker(runner.containerWatchdogInterval).C {
+                       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
+                       ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID)
+                       cancel()
+                       runner.cStateLock.Lock()
+                       done := runner.cRemoved || runner.ExitCode != nil
+                       runner.cStateLock.Unlock()
+                       if done {
+                               return
+                       } else if err != nil {
+                               runner.CrunchLog.Printf("Error inspecting container: %s", err)
+                               runner.checkBrokenNode(err)
+                               return
+                       } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
+                               runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
+                               return
+                       }
+               }
+       }()
+
+       for {
+               select {
+               case waitBody := <-waitOk:
+                       runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
+                       code := int(waitBody.StatusCode)
+                       runner.ExitCode = &code
+
+                       // wait for stdout/stderr to complete
+                       <-runner.loggingDone
+                       return nil
+
+               case err := <-waitErr:
+                       return fmt.Errorf("container wait: %v", err)
+
+               case <-arvMountExit:
+                       runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
+                       runner.stop(nil)
+                       // arvMountExit will always be ready now that
+                       // it's closed, but that doesn't interest us.
+                       arvMountExit = nil
+
+               case <-runTimeExceeded:
+                       runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
+                       runner.stop(nil)
+                       runTimeExceeded = nil
+
+               case <-containerGone:
+                       return errors.New("docker client never returned status")
+               }
+       }
+}
+
+func (runner *ContainerRunner) updateLogs() {
+       ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
+       defer ticker.Stop()
+
+       sigusr1 := make(chan os.Signal, 1)
+       signal.Notify(sigusr1, syscall.SIGUSR1)
+       defer signal.Stop(sigusr1)
+
+       saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
+       saveAtSize := crunchLogUpdateSize
+       var savedSize int64
+       for {
+               select {
+               case <-ticker.C:
+               case <-sigusr1:
+                       saveAtTime = time.Now()
+               }
+               runner.logMtx.Lock()
+               done := runner.LogsPDH != nil
+               runner.logMtx.Unlock()
+               if done {
+                       return
+               }
+               size := runner.LogCollection.Size()
+               if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
+                       continue
+               }
+               saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
+               saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
+               saved, err := runner.saveLogCollection(false)
+               if err != nil {
+                       runner.CrunchLog.Printf("error updating log collection: %s", err)
+                       continue
+               }
+
+               var updated arvados.Container
+               err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+                       "container": arvadosclient.Dict{"log": saved.PortableDataHash},
+               }, &updated)
+               if err != nil {
+                       runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
+                       continue
+               }
+
+               savedSize = size
+       }
+}
+
+// CaptureOutput saves data from the container's output directory if
+// needed, and updates the container output accordingly.
+func (runner *ContainerRunner) CaptureOutput() error {
+       if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+               // Output may have been set directly by the container, so
+               // refresh the container record to check.
+               err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
+                       nil, &runner.Container)
+               if err != nil {
+                       return err
+               }
+               if runner.Container.Output != "" {
+                       // Container output is already set.
+                       runner.OutputPDH = &runner.Container.Output
+                       return nil
+               }
+       }
+
+       txt, err := (&copier{
+               client:        runner.containerClient,
+               arvClient:     runner.ContainerArvClient,
+               keepClient:    runner.ContainerKeepClient,
+               hostOutputDir: runner.HostOutputDir,
+               ctrOutputDir:  runner.Container.OutputPath,
+               binds:         runner.Binds,
+               mounts:        runner.Container.Mounts,
+               secretMounts:  runner.SecretMounts,
+               logger:        runner.CrunchLog,
+       }).Copy()
+       if err != nil {
+               return err
+       }
+       if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
+               runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
+               fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
+               if err != nil {
+                       return err
+               }
+               txt, err = fs.MarshalManifest(".")
+               if err != nil {
+                       return err
+               }
+       }
+       var resp arvados.Collection
+       err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
+               "ensure_unique_name": true,
+               "collection": arvadosclient.Dict{
+                       "is_trashed":    true,
+                       "name":          "output for " + runner.Container.UUID,
+                       "manifest_text": txt,
+               },
+       }, &resp)
+       if err != nil {
+               return fmt.Errorf("error creating output collection: %v", err)
+       }
+       runner.OutputPDH = &resp.PortableDataHash
+       return nil
+}
+
+func (runner *ContainerRunner) CleanupDirs() {
+       if runner.ArvMount != nil {
+               var delay int64 = 8
+               umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
+               umount.Stdout = runner.CrunchLog
+               umount.Stderr = runner.CrunchLog
+               runner.CrunchLog.Printf("Running %v", umount.Args)
+               umnterr := umount.Start()
+
+               if umnterr != nil {
+                       runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
+               } else {
+                       // If arv-mount --unmount gets stuck for any reason, we
+                       // don't want to wait for it forever.  Do Wait() in a goroutine
+                       // so it doesn't block crunch-run.
+                       umountExit := make(chan error)
+                       go func() {
+                               mnterr := umount.Wait()
+                               if mnterr != nil {
+                                       runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
+                               }
+                               umountExit <- mnterr
+                       }()
+
+                       for again := true; again; {
+                               again = false
+                               select {
+                               case <-umountExit:
+                                       umount = nil
+                                       again = true
+                               case <-runner.ArvMountExit:
+                                       break
+                               case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
+                                       runner.CrunchLog.Printf("Timed out waiting for unmount")
+                                       if umount != nil {
+                                               umount.Process.Kill()
+                                       }
+                                       runner.ArvMount.Process.Kill()
+                               }
+                       }
+               }
+       }
+
+       if runner.ArvMountPoint != "" {
+               if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
+                       runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
+               }
+       }
+
+       if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
+               runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
+       }
+}
+
+// CommitLogs posts the collection containing the final container logs.
+func (runner *ContainerRunner) CommitLogs() error {
+       func() {
+               // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
+               runner.cStateLock.Lock()
+               defer runner.cStateLock.Unlock()
+
+               runner.CrunchLog.Print(runner.finalState)
+
+               if runner.arvMountLog != nil {
+                       runner.arvMountLog.Close()
+               }
+               runner.CrunchLog.Close()
+
+               // Closing CrunchLog above allows them to be committed to Keep at this
+               // point, but re-open crunch log with ArvClient in case there are any
+               // other further errors (such as failing to write the log to Keep!)
+               // while shutting down
+               runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
+                       ArvClient:     runner.DispatcherArvClient,
+                       UUID:          runner.Container.UUID,
+                       loggingStream: "crunch-run",
+                       writeCloser:   nil,
+               })
+               runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
+       }()
+
+       if runner.LogsPDH != nil {
+               // If we have already assigned something to LogsPDH,
+               // we must be closing the re-opened log, which won't
+               // end up getting attached to the container record and
+               // therefore doesn't need to be saved as a collection
+               // -- it exists only to send logs to other channels.
+               return nil
+       }
+       saved, err := runner.saveLogCollection(true)
+       if err != nil {
+               return fmt.Errorf("error saving log collection: %s", err)
+       }
+       runner.logMtx.Lock()
+       defer runner.logMtx.Unlock()
+       runner.LogsPDH = &saved.PortableDataHash
+       return nil
+}
+
+func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
+       runner.logMtx.Lock()
+       defer runner.logMtx.Unlock()
+       if runner.LogsPDH != nil {
+               // Already finalized.
+               return
+       }
+       mt, err := runner.LogCollection.MarshalManifest(".")
+       if err != nil {
+               err = fmt.Errorf("error creating log manifest: %v", err)
+               return
+       }
+       updates := arvadosclient.Dict{
+               "name":          "logs for " + runner.Container.UUID,
+               "manifest_text": mt,
+       }
+       if final {
+               updates["is_trashed"] = true
+       } else {
+               exp := time.Now().Add(crunchLogUpdatePeriod * 24)
+               updates["trash_at"] = exp
+               updates["delete_at"] = exp
+       }
+       reqBody := arvadosclient.Dict{"collection": updates}
+       if runner.logUUID == "" {
+               reqBody["ensure_unique_name"] = true
+               err = runner.DispatcherArvClient.Create("collections", reqBody, &response)
+       } else {
+               err = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
+       }
+       if err != nil {
+               return
+       }
+       runner.logUUID = response.UUID
+       return
+}
+
+// UpdateContainerRunning updates the container state to "Running"
+func (runner *ContainerRunner) UpdateContainerRunning() error {
+       runner.cStateLock.Lock()
+       defer runner.cStateLock.Unlock()
+       if runner.cCancelled {
+               return ErrCancelled
+       }
+       return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
+               arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
+}
+
+// ContainerToken returns the api_token the container (and any
+// arv-mount processes) are allowed to use.
+func (runner *ContainerRunner) ContainerToken() (string, error) {
+       if runner.token != "" {
+               return runner.token, nil
+       }
+
+       var auth arvados.APIClientAuthorization
+       err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
+       if err != nil {
+               return "", err
+       }
+       runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
+       return runner.token, nil
+}
+
+// UpdateContainerComplete updates the container record state on API
+// server to "Complete" or "Cancelled"
+func (runner *ContainerRunner) UpdateContainerFinal() error {
+       update := arvadosclient.Dict{}
+       update["state"] = runner.finalState
+       if runner.LogsPDH != nil {
+               update["log"] = *runner.LogsPDH
+       }
+       if runner.finalState == "Complete" {
+               if runner.ExitCode != nil {
+                       update["exit_code"] = *runner.ExitCode
+               }
+               if runner.OutputPDH != nil {
+                       update["output"] = *runner.OutputPDH
+               }
+       }
+       return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
+}
+
+// IsCancelled returns the value of Cancelled, with goroutine safety.
+func (runner *ContainerRunner) IsCancelled() bool {
+       runner.cStateLock.Lock()
+       defer runner.cStateLock.Unlock()
+       return runner.cCancelled
+}
+
+// NewArvLogWriter creates an ArvLogWriter
+func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
+       writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
+       if err != nil {
+               return nil, err
+       }
+       return &ArvLogWriter{
+               ArvClient:     runner.DispatcherArvClient,
+               UUID:          runner.Container.UUID,
+               loggingStream: name,
+               writeCloser:   writer,
+       }, nil
+}
+
+// Run the full container lifecycle.
+func (runner *ContainerRunner) Run() (err error) {
+       runner.CrunchLog.Printf("crunch-run %s started", version)
+       runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
+
+       hostname, hosterr := os.Hostname()
+       if hosterr != nil {
+               runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
+       } else {
+               runner.CrunchLog.Printf("Executing on host '%s'", hostname)
+       }
+
+       runner.finalState = "Queued"
+
+       defer func() {
+               runner.CleanupDirs()
+
+               runner.CrunchLog.Printf("crunch-run finished")
+               runner.CrunchLog.Close()
+       }()
+
+       err = runner.fetchContainerRecord()
+       if err != nil {
+               return
+       }
+       if runner.Container.State != "Locked" {
+               return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
+       }
+
+       defer func() {
+               // checkErr prints e (unless it's nil) and sets err to
+               // e (unless err is already non-nil). Thus, if err
+               // hasn't already been assigned when Run() returns,
+               // this cleanup func will cause Run() to return the
+               // first non-nil error that is passed to checkErr().
+               checkErr := func(errorIn string, e error) {
+                       if e == nil {
+                               return
+                       }
+                       runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
+                       if err == nil {
+                               err = e
+                       }
+                       if runner.finalState == "Complete" {
+                               // There was an error in the finalization.
+                               runner.finalState = "Cancelled"
+                       }
+               }
+
+               // Log the error encountered in Run(), if any
+               checkErr("Run", err)
+
+               if runner.finalState == "Queued" {
+                       runner.UpdateContainerFinal()
+                       return
+               }
+
+               if runner.IsCancelled() {
+                       runner.finalState = "Cancelled"
+                       // but don't return yet -- we still want to
+                       // capture partial output and write logs
+               }
+
+               checkErr("CaptureOutput", runner.CaptureOutput())
+               checkErr("stopHoststat", runner.stopHoststat())
+               checkErr("CommitLogs", runner.CommitLogs())
+               checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
+       }()
+
+       runner.setupSignals()
+       err = runner.startHoststat()
+       if err != nil {
+               return
+       }
+
+       // check for and/or load image
+       err = runner.LoadImage()
+       if err != nil {
+               if !runner.checkBrokenNode(err) {
+                       // Failed to load image but not due to a "broken node"
+                       // condition, probably user error.
+                       runner.finalState = "Cancelled"
+               }
+               err = fmt.Errorf("While loading container image: %v", err)
+               return
+       }
+
+       // set up FUSE mount and binds
+       err = runner.SetupMounts()
+       if err != nil {
+               runner.finalState = "Cancelled"
+               err = fmt.Errorf("While setting up mounts: %v", err)
+               return
+       }
+
+       err = runner.CreateContainer()
+       if err != nil {
+               return
+       }
+       err = runner.LogHostInfo()
+       if err != nil {
+               return
+       }
+       err = runner.LogNodeRecord()
+       if err != nil {
+               return
+       }
+       err = runner.LogContainerRecord()
+       if err != nil {
+               return
+       }
+
+       if runner.IsCancelled() {
+               return
+       }
+
+       err = runner.UpdateContainerRunning()
+       if err != nil {
+               return
+       }
+       runner.finalState = "Cancelled"
+
+       err = runner.startCrunchstat()
+       if err != nil {
+               return
+       }
+
+       err = runner.StartContainer()
+       if err != nil {
+               runner.checkBrokenNode(err)
+               return
+       }
+
+       err = runner.WaitFinish()
+       if err == nil && !runner.IsCancelled() {
+               runner.finalState = "Complete"
+       }
+       return
+}
+
+// Fetch the current container record (uuid = runner.Container.UUID)
+// into runner.Container.
+func (runner *ContainerRunner) fetchContainerRecord() error {
+       reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+       if err != nil {
+               return fmt.Errorf("error fetching container record: %v", err)
+       }
+       defer reader.Close()
+
+       dec := json.NewDecoder(reader)
+       dec.UseNumber()
+       err = dec.Decode(&runner.Container)
+       if err != nil {
+               return fmt.Errorf("error decoding container record: %v", err)
+       }
+
+       var sm struct {
+               SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+       }
+
+       containerToken, err := runner.ContainerToken()
+       if err != nil {
+               return fmt.Errorf("error getting container token: %v", err)
+       }
+
+       runner.ContainerArvClient, runner.ContainerKeepClient,
+               runner.containerClient, err = runner.MkArvClient(containerToken)
+       if err != nil {
+               return fmt.Errorf("error creating container API client: %v", err)
+       }
+
+       err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
+       if err != nil {
+               if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
+                       return fmt.Errorf("error fetching secret_mounts: %v", err)
+               }
+               // ok && apierr.HttpStatusCode == 404, which means
+               // secret_mounts isn't supported by this API server.
+       }
+       runner.SecretMounts = sm.SecretMounts
+
+       return nil
+}
+
+// NewContainerRunner creates a new container runner.
+func NewContainerRunner(dispatcherClient *arvados.Client,
+       dispatcherArvClient IArvadosClient,
+       dispatcherKeepClient IKeepClient,
+       docker ThinDockerClient,
+       containerUUID string) (*ContainerRunner, error) {
+
+       cr := &ContainerRunner{
+               dispatcherClient:     dispatcherClient,
+               DispatcherArvClient:  dispatcherArvClient,
+               DispatcherKeepClient: dispatcherKeepClient,
+               Docker:               docker,
+       }
+       cr.NewLogWriter = cr.NewArvLogWriter
+       cr.RunArvMount = cr.ArvMountCmd
+       cr.MkTempDir = ioutil.TempDir
+       cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
+               cl, err := arvadosclient.MakeArvadosClient()
+               if err != nil {
+                       return nil, nil, nil, err
+               }
+               cl.ApiToken = token
+               kc, err := keepclient.MakeKeepClient(cl)
+               if err != nil {
+                       return nil, nil, nil, err
+               }
+               c2 := arvados.NewClientFromEnv()
+               c2.AuthToken = token
+               return cl, kc, c2, nil
+       }
+       var err error
+       cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
+       if err != nil {
+               return nil, err
+       }
+       cr.Container.UUID = containerUUID
+       w, err := cr.NewLogWriter("crunch-run")
+       if err != nil {
+               return nil, err
+       }
+       cr.CrunchLog = NewThrottledLogger(w)
+       cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
+
+       loadLogThrottleParams(dispatcherArvClient)
+       go cr.updateLogs()
+
+       return cr, nil
+}
+
+func main() {
+       statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
+       cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
+       cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
+       cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
+       caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
+       detach := flag.Bool("detach", false, "Detach from parent process and run in the background")
+       stdinEnv := flag.Bool("stdin-env", false, "Load environment variables from JSON message on stdin")
+       sleep := flag.Duration("sleep", 0, "Delay before starting (testing use only)")
+       kill := flag.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
+       list := flag.Bool("list", false, "List UUIDs of existing crunch-run processes")
+       enableNetwork := flag.String("container-enable-networking", "default",
+               `Specify if networking should be enabled for container.  One of 'default', 'always':
+       default: only enable networking if container requests it.
+       always:  containers always have networking enabled
+       `)
+       networkMode := flag.String("container-network-mode", "default",
+               `Set networking mode for container.  Corresponds to Docker network mode (--net).
+       `)
+       memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
+       flag.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
+
+       ignoreDetachFlag := false
+       if len(os.Args) > 1 && os.Args[1] == "-no-detach" {
+               // This process was invoked by a parent process, which
+               // has passed along its own arguments, including
+               // -detach, after the leading -no-detach flag.  Strip
+               // the leading -no-detach flag (it's not recognized by
+               // flag.Parse()) and ignore the -detach flag that
+               // comes later.
+               os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
+               ignoreDetachFlag = true
+       }
+
+       flag.Parse()
+
+       if *stdinEnv && !ignoreDetachFlag {
+               // Load env vars on stdin if asked (but not in a
+               // detached child process, in which case stdin is
+               // /dev/null).
+               loadEnv(os.Stdin)
+       }
+
+       switch {
+       case *detach && !ignoreDetachFlag:
+               os.Exit(Detach(flag.Arg(0), os.Args, os.Stdout, os.Stderr))
+       case *kill >= 0:
+               os.Exit(KillProcess(flag.Arg(0), syscall.Signal(*kill), os.Stdout, os.Stderr))
+       case *list:
+               os.Exit(ListProcesses(os.Stdout, os.Stderr))
+       }
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("crunch-run %s\n", version)
+               return
+       }
+
+       log.Printf("crunch-run %s started", version)
+       time.Sleep(*sleep)
+
+       containerId := flag.Arg(0)
+
+       if *caCertsPath != "" {
+               arvadosclient.CertFiles = []string{*caCertsPath}
+       }
+
+       api, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatalf("%s: %v", containerId, err)
+       }
+       api.Retries = 8
+
+       kc, kcerr := keepclient.MakeKeepClient(api)
+       if kcerr != nil {
+               log.Fatalf("%s: %v", containerId, kcerr)
+       }
+       kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
+       kc.Retries = 4
+
+       // API version 1.21 corresponds to Docker 1.9, which is currently the
+       // minimum version we want to support.
+       docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
+
+       cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
+       if err != nil {
+               log.Fatal(err)
+       }
+       if dockererr != nil {
+               cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
+               cr.checkBrokenNode(dockererr)
+               cr.CrunchLog.Close()
+               os.Exit(1)
+       }
+
+       parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
+       if tmperr != nil {
+               log.Fatalf("%s: %v", containerId, tmperr)
+       }
+
+       cr.parentTemp = parentTemp
+       cr.statInterval = *statInterval
+       cr.cgroupRoot = *cgroupRoot
+       cr.expectCgroupParent = *cgroupParent
+       cr.enableNetwork = *enableNetwork
+       cr.networkMode = *networkMode
+       if *cgroupParentSubsystem != "" {
+               p := findCgroup(*cgroupParentSubsystem)
+               cr.setCgroupParent = p
+               cr.expectCgroupParent = p
+       }
+
+       runerr := cr.Run()
+
+       if *memprofile != "" {
+               f, err := os.Create(*memprofile)
+               if err != nil {
+                       log.Printf("could not create memory profile: %s", err)
+               }
+               runtime.GC() // get up-to-date statistics
+               if err := pprof.WriteHeapProfile(f); err != nil {
+                       log.Printf("could not write memory profile: %s", err)
+               }
+               closeerr := f.Close()
+               if closeerr != nil {
+                       log.Printf("closing memprofile file: %s", err)
+               }
+       }
+
+       if runerr != nil {
+               log.Fatalf("%s: %v", containerId, runerr)
+       }
+}
+
+func loadEnv(rdr io.Reader) {
+       buf, err := ioutil.ReadAll(rdr)
+       if err != nil {
+               log.Fatalf("read stdin: %s", err)
+       }
+       var env map[string]string
+       err = json.Unmarshal(buf, &env)
+       if err != nil {
+               log.Fatalf("decode stdin: %s", err)
+       }
+       for k, v := range env {
+               err = os.Setenv(k, v)
+               if err != nil {
+                       log.Fatalf("setenv(%q): %s", k, err)
+               }
+       }
+}
diff --git a/services/crunch-run/crunchrun_test.go b/services/crunch-run/crunchrun_test.go
new file mode 100644 (file)
index 0000000..17e5e14
--- /dev/null
@@ -0,0 +1,2213 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bufio"
+       "bytes"
+       "crypto/md5"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "os"
+       "os/exec"
+       "runtime/pprof"
+       "sort"
+       "strings"
+       "sync"
+       "syscall"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+       "golang.org/x/net/context"
+
+       dockertypes "github.com/docker/docker/api/types"
+       dockercontainer "github.com/docker/docker/api/types/container"
+       dockernetwork "github.com/docker/docker/api/types/network"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func TestCrunchExec(t *testing.T) {
+       TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&TestSuite{})
+
+type TestSuite struct {
+       client *arvados.Client
+       docker *TestDockerClient
+       runner *ContainerRunner
+}
+
+func (s *TestSuite) SetUpTest(c *C) {
+       s.client = arvados.NewClientFromEnv()
+       s.docker = NewTestDockerClient()
+}
+
+type ArvTestClient struct {
+       Total   int64
+       Calls   int
+       Content []arvadosclient.Dict
+       arvados.Container
+       secretMounts []byte
+       Logs         map[string]*bytes.Buffer
+       sync.Mutex
+       WasSetRunning bool
+       callraw       bool
+}
+
+type KeepTestClient struct {
+       Called  bool
+       Content []byte
+}
+
+var hwManifest = ". 82ab40c24fc8df01798e57ba66795bb1+841216+Aa124ac75e5168396c73c0a18eda641a4f41791c0@569fa8c3 0:841216:9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7.tar\n"
+var hwPDH = "a45557269dcb65a6b78f9ac061c0850b+120"
+var hwImageId = "9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7"
+
+var otherManifest = ". 68a84f561b1d1708c6baff5e019a9ab3+46+Ae5d0af96944a3690becb1decdf60cc1c937f556d@5693216f 0:46:md5sum.txt\n"
+var otherPDH = "a3e8f74c6f101eae01fa08bfb4e49b3a+54"
+
+var normalizedManifestWithSubdirs = `. 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt
+./subdir1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
+./subdir1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
+`
+
+var normalizedWithSubdirsPDH = "a0def87f80dd594d4675809e83bd4f15+367"
+
+var denormalizedManifestWithSubdirs = ". 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt 0:10:subdir1/file1_in_subdir1.txt 10:17:subdir1/file2_in_subdir1.txt\n"
+var denormalizedWithSubdirsPDH = "b0def87f80dd594d4675809e83bd4f15+367"
+
+var fakeAuthUUID = "zzzzz-gj3su-55pqoyepgi2glem"
+var fakeAuthToken = "a3ltuwzqcu2u4sc0q7yhpc2w7s00fdcqecg5d6e0u3pfohmbjt"
+
+type TestDockerClient struct {
+       imageLoaded string
+       logReader   io.ReadCloser
+       logWriter   io.WriteCloser
+       fn          func(t *TestDockerClient)
+       exitCode    int
+       stop        chan bool
+       cwd         string
+       env         []string
+       api         *ArvTestClient
+       realTemp    string
+       calledWait  bool
+       ctrExited   bool
+}
+
+func NewTestDockerClient() *TestDockerClient {
+       t := &TestDockerClient{}
+       t.logReader, t.logWriter = io.Pipe()
+       t.stop = make(chan bool, 1)
+       t.cwd = "/"
+       return t
+}
+
+type MockConn struct {
+       net.Conn
+}
+
+func (m *MockConn) Write(b []byte) (int, error) {
+       return len(b), nil
+}
+
+func NewMockConn() *MockConn {
+       c := &MockConn{}
+       return c
+}
+
+func (t *TestDockerClient) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
+       return dockertypes.HijackedResponse{Conn: NewMockConn(), Reader: bufio.NewReader(t.logReader)}, nil
+}
+
+func (t *TestDockerClient) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
+       if config.WorkingDir != "" {
+               t.cwd = config.WorkingDir
+       }
+       t.env = config.Env
+       return dockercontainer.ContainerCreateCreatedBody{ID: "abcde"}, nil
+}
+
+func (t *TestDockerClient) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
+       if t.exitCode == 3 {
+               return errors.New(`Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused "process_linux.go:359: container init caused \"rootfs_linux.go:54: mounting \\\"/tmp/keep453790790/by_id/99999999999999999999999999999999+99999/myGenome\\\" to rootfs \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged\\\" at \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged/keep/99999999999999999999999999999999+99999/myGenome\\\" caused \\\"no such file or directory\\\"\""`)
+       }
+       if t.exitCode == 4 {
+               return errors.New(`panic: standard_init_linux.go:175: exec user process caused "no such file or directory"`)
+       }
+       if t.exitCode == 5 {
+               return errors.New(`Error response from daemon: Cannot start container 41f26cbc43bcc1280f4323efb1830a394ba8660c9d1c2b564ba42bf7f7694845: [8] System error: no such file or directory`)
+       }
+       if t.exitCode == 6 {
+               return errors.New(`Error response from daemon: Cannot start container 58099cd76c834f3dc2a4fb76c8028f049ae6d4fdf0ec373e1f2cfea030670c2d: [8] System error: exec: "foobar": executable file not found in $PATH`)
+       }
+
+       if container == "abcde" {
+               // t.fn gets executed in ContainerWait
+               return nil
+       } else {
+               return errors.New("Invalid container id")
+       }
+}
+
+func (t *TestDockerClient) ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error {
+       t.stop <- true
+       return nil
+}
+
+func (t *TestDockerClient) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
+       t.calledWait = true
+       body := make(chan dockercontainer.ContainerWaitOKBody, 1)
+       err := make(chan error)
+       go func() {
+               t.fn(t)
+               body <- dockercontainer.ContainerWaitOKBody{StatusCode: int64(t.exitCode)}
+       }()
+       return body, err
+}
+
+func (t *TestDockerClient) ContainerInspect(ctx context.Context, id string) (c dockertypes.ContainerJSON, err error) {
+       c.ContainerJSONBase = &dockertypes.ContainerJSONBase{}
+       c.ID = "abcde"
+       if t.ctrExited {
+               c.State = &dockertypes.ContainerState{Status: "exited", Dead: true}
+       } else {
+               c.State = &dockertypes.ContainerState{Status: "running", Pid: 1234, Running: true}
+       }
+       return
+}
+
+func (t *TestDockerClient) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
+       if t.exitCode == 2 {
+               return dockertypes.ImageInspect{}, nil, fmt.Errorf("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?")
+       }
+
+       if t.imageLoaded == image {
+               return dockertypes.ImageInspect{}, nil, nil
+       } else {
+               return dockertypes.ImageInspect{}, nil, errors.New("")
+       }
+}
+
+func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
+       if t.exitCode == 2 {
+               return dockertypes.ImageLoadResponse{}, fmt.Errorf("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?")
+       }
+       _, err := io.Copy(ioutil.Discard, input)
+       if err != nil {
+               return dockertypes.ImageLoadResponse{}, err
+       } else {
+               t.imageLoaded = hwImageId
+               return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
+       }
+}
+
+func (*TestDockerClient) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
+       return nil, nil
+}
+
+func (client *ArvTestClient) Create(resourceType string,
+       parameters arvadosclient.Dict,
+       output interface{}) error {
+
+       client.Mutex.Lock()
+       defer client.Mutex.Unlock()
+
+       client.Calls++
+       client.Content = append(client.Content, parameters)
+
+       if resourceType == "logs" {
+               et := parameters["log"].(arvadosclient.Dict)["event_type"].(string)
+               if client.Logs == nil {
+                       client.Logs = make(map[string]*bytes.Buffer)
+               }
+               if client.Logs[et] == nil {
+                       client.Logs[et] = &bytes.Buffer{}
+               }
+               client.Logs[et].Write([]byte(parameters["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"]))
+       }
+
+       if resourceType == "collections" && output != nil {
+               mt := parameters["collection"].(arvadosclient.Dict)["manifest_text"].(string)
+               outmap := output.(*arvados.Collection)
+               outmap.PortableDataHash = fmt.Sprintf("%x+%d", md5.Sum([]byte(mt)), len(mt))
+               outmap.UUID = fmt.Sprintf("zzzzz-4zz18-%15.15x", md5.Sum([]byte(mt)))
+       }
+
+       return nil
+}
+
+func (client *ArvTestClient) Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error {
+       switch {
+       case method == "GET" && resourceType == "containers" && action == "auth":
+               return json.Unmarshal([]byte(`{
+                       "kind": "arvados#api_client_authorization",
+                       "uuid": "`+fakeAuthUUID+`",
+                       "api_token": "`+fakeAuthToken+`"
+                       }`), output)
+       case method == "GET" && resourceType == "containers" && action == "secret_mounts":
+               if client.secretMounts != nil {
+                       return json.Unmarshal(client.secretMounts, output)
+               } else {
+                       return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
+               }
+       default:
+               return fmt.Errorf("Not found")
+       }
+}
+
+func (client *ArvTestClient) CallRaw(method, resourceType, uuid, action string,
+       parameters arvadosclient.Dict) (reader io.ReadCloser, err error) {
+       var j []byte
+       if method == "GET" && resourceType == "nodes" && uuid == "" && action == "" {
+               j = []byte(`{
+                       "kind": "arvados#nodeList",
+                       "items": [{
+                               "uuid": "zzzzz-7ekkf-2z3mc76g2q73aio",
+                               "hostname": "compute2",
+                               "properties": {"total_cpu_cores": 16}
+                       }]}`)
+       } else if method == "GET" && resourceType == "containers" && action == "" && !client.callraw {
+               if uuid == "" {
+                       j, err = json.Marshal(map[string]interface{}{
+                               "items": []interface{}{client.Container},
+                               "kind":  "arvados#nodeList",
+                       })
+               } else {
+                       j, err = json.Marshal(client.Container)
+               }
+       } else {
+               j = []byte(`{
+                       "command": ["sleep", "1"],
+                       "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+                       "cwd": ".",
+                       "environment": {},
+                       "mounts": {"/tmp": {"kind": "tmp"}, "/json": {"kind": "json", "content": {"number": 123456789123456789}}},
+                       "output_path": "/tmp",
+                       "priority": 1,
+                       "runtime_constraints": {}
+               }`)
+       }
+       return ioutil.NopCloser(bytes.NewReader(j)), err
+}
+
+func (client *ArvTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error {
+       if resourceType == "collections" {
+               if uuid == hwPDH {
+                       output.(*arvados.Collection).ManifestText = hwManifest
+               } else if uuid == otherPDH {
+                       output.(*arvados.Collection).ManifestText = otherManifest
+               } else if uuid == normalizedWithSubdirsPDH {
+                       output.(*arvados.Collection).ManifestText = normalizedManifestWithSubdirs
+               } else if uuid == denormalizedWithSubdirsPDH {
+                       output.(*arvados.Collection).ManifestText = denormalizedManifestWithSubdirs
+               }
+       }
+       if resourceType == "containers" {
+               (*output.(*arvados.Container)) = client.Container
+       }
+       return nil
+}
+
+func (client *ArvTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {
+       client.Mutex.Lock()
+       defer client.Mutex.Unlock()
+       client.Calls++
+       client.Content = append(client.Content, parameters)
+       if resourceType == "containers" {
+               if parameters["container"].(arvadosclient.Dict)["state"] == "Running" {
+                       client.WasSetRunning = true
+               }
+       } else if resourceType == "collections" {
+               mt := parameters["collection"].(arvadosclient.Dict)["manifest_text"].(string)
+               output.(*arvados.Collection).UUID = uuid
+               output.(*arvados.Collection).PortableDataHash = fmt.Sprintf("%x", md5.Sum([]byte(mt)))
+       }
+       return nil
+}
+
+var discoveryMap = map[string]interface{}{
+       "defaultTrashLifetime":               float64(1209600),
+       "crunchLimitLogBytesPerJob":          float64(67108864),
+       "crunchLogThrottleBytes":             float64(65536),
+       "crunchLogThrottlePeriod":            float64(60),
+       "crunchLogThrottleLines":             float64(1024),
+       "crunchLogPartialLineThrottlePeriod": float64(5),
+       "crunchLogBytesPerEvent":             float64(4096),
+       "crunchLogSecondsBetweenEvents":      float64(1),
+}
+
+func (client *ArvTestClient) Discovery(key string) (interface{}, error) {
+       return discoveryMap[key], nil
+}
+
+// CalledWith returns the parameters from the first API call whose
+// parameters match jpath/string. E.g., CalledWith(c, "foo.bar",
+// "baz") returns parameters with parameters["foo"]["bar"]=="baz". If
+// no call matches, it returns nil.
+func (client *ArvTestClient) CalledWith(jpath string, expect interface{}) arvadosclient.Dict {
+call:
+       for _, content := range client.Content {
+               var v interface{} = content
+               for _, k := range strings.Split(jpath, ".") {
+                       if dict, ok := v.(arvadosclient.Dict); !ok {
+                               continue call
+                       } else {
+                               v = dict[k]
+                       }
+               }
+               if v == expect {
+                       return content
+               }
+       }
+       return nil
+}
+
+func (client *KeepTestClient) LocalLocator(locator string) (string, error) {
+       return locator, nil
+}
+
+func (client *KeepTestClient) PutB(buf []byte) (string, int, error) {
+       client.Content = buf
+       return fmt.Sprintf("%x+%d", md5.Sum(buf), len(buf)), len(buf), nil
+}
+
+func (client *KeepTestClient) ReadAt(string, []byte, int) (int, error) {
+       return 0, errors.New("not implemented")
+}
+
+func (client *KeepTestClient) ClearBlockCache() {
+}
+
+func (client *KeepTestClient) Close() {
+       client.Content = nil
+}
+
+type FileWrapper struct {
+       io.ReadCloser
+       len int64
+}
+
+func (fw FileWrapper) Readdir(n int) ([]os.FileInfo, error) {
+       return nil, errors.New("not implemented")
+}
+
+func (fw FileWrapper) Seek(int64, int) (int64, error) {
+       return 0, errors.New("not implemented")
+}
+
+func (fw FileWrapper) Size() int64 {
+       return fw.len
+}
+
+func (fw FileWrapper) Stat() (os.FileInfo, error) {
+       return nil, errors.New("not implemented")
+}
+
+func (fw FileWrapper) Truncate(int64) error {
+       return errors.New("not implemented")
+}
+
+func (fw FileWrapper) Write([]byte) (int, error) {
+       return 0, errors.New("not implemented")
+}
+
+func (fw FileWrapper) Sync() error {
+       return errors.New("not implemented")
+}
+
+func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
+       if filename == hwImageId+".tar" {
+               rdr := ioutil.NopCloser(&bytes.Buffer{})
+               client.Called = true
+               return FileWrapper{rdr, 1321984}, nil
+       } else if filename == "/file1_in_main.txt" {
+               rdr := ioutil.NopCloser(strings.NewReader("foo"))
+               client.Called = true
+               return FileWrapper{rdr, 3}, nil
+       }
+       return nil, nil
+}
+
+func (s *TestSuite) TestLoadImage(c *C) {
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{},
+               &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr.ContainerArvClient = &ArvTestClient{}
+       cr.ContainerKeepClient = kc
+
+       _, err = cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+       c.Check(err, IsNil)
+
+       _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId)
+       c.Check(err, NotNil)
+
+       cr.Container.ContainerImage = hwPDH
+
+       // (1) Test loading image from keep
+       c.Check(kc.Called, Equals, false)
+       c.Check(cr.ContainerConfig.Image, Equals, "")
+
+       err = cr.LoadImage()
+
+       c.Check(err, IsNil)
+       defer func() {
+               cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+       }()
+
+       c.Check(kc.Called, Equals, true)
+       c.Check(cr.ContainerConfig.Image, Equals, hwImageId)
+
+       _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId)
+       c.Check(err, IsNil)
+
+       // (2) Test using image that's already loaded
+       kc.Called = false
+       cr.ContainerConfig.Image = ""
+
+       err = cr.LoadImage()
+       c.Check(err, IsNil)
+       c.Check(kc.Called, Equals, false)
+       c.Check(cr.ContainerConfig.Image, Equals, hwImageId)
+
+}
+
+type ArvErrorTestClient struct{}
+
+func (ArvErrorTestClient) Create(resourceType string,
+       parameters arvadosclient.Dict,
+       output interface{}) error {
+       return nil
+}
+
+func (ArvErrorTestClient) Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error {
+       if method == "GET" && resourceType == "containers" && action == "auth" {
+               return nil
+       }
+       return errors.New("ArvError")
+}
+
+func (ArvErrorTestClient) CallRaw(method, resourceType, uuid, action string,
+       parameters arvadosclient.Dict) (reader io.ReadCloser, err error) {
+       return nil, errors.New("ArvError")
+}
+
+func (ArvErrorTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error {
+       return errors.New("ArvError")
+}
+
+func (ArvErrorTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {
+       return nil
+}
+
+func (ArvErrorTestClient) Discovery(key string) (interface{}, error) {
+       return discoveryMap[key], nil
+}
+
+type KeepErrorTestClient struct {
+       KeepTestClient
+}
+
+func (*KeepErrorTestClient) ManifestFileReader(manifest.Manifest, string) (arvados.File, error) {
+       return nil, errors.New("KeepError")
+}
+
+func (*KeepErrorTestClient) PutB(buf []byte) (string, int, error) {
+       return "", 0, errors.New("KeepError")
+}
+
+func (*KeepErrorTestClient) LocalLocator(string) (string, error) {
+       return "", errors.New("KeepError")
+}
+
+type KeepReadErrorTestClient struct {
+       KeepTestClient
+}
+
+func (*KeepReadErrorTestClient) ReadAt(string, []byte, int) (int, error) {
+       return 0, errors.New("KeepError")
+}
+
+type ErrorReader struct {
+       FileWrapper
+}
+
+func (ErrorReader) Read(p []byte) (n int, err error) {
+       return 0, errors.New("ErrorReader")
+}
+
+func (ErrorReader) Seek(int64, int) (int64, error) {
+       return 0, errors.New("ErrorReader")
+}
+
+func (KeepReadErrorTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
+       return ErrorReader{}, nil
+}
+
+func (s *TestSuite) TestLoadImageArvError(c *C) {
+       // (1) Arvados error
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, &ArvErrorTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+
+       cr.ContainerArvClient = &ArvErrorTestClient{}
+       cr.ContainerKeepClient = &KeepTestClient{}
+
+       cr.Container.ContainerImage = hwPDH
+
+       err = cr.LoadImage()
+       c.Check(err.Error(), Equals, "While getting container image collection: ArvError")
+}
+
+func (s *TestSuite) TestLoadImageKeepError(c *C) {
+       // (2) Keep error
+       kc := &KeepErrorTestClient{}
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+
+       cr.ContainerArvClient = &ArvTestClient{}
+       cr.ContainerKeepClient = &KeepErrorTestClient{}
+
+       cr.Container.ContainerImage = hwPDH
+
+       err = cr.LoadImage()
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Equals, "While creating ManifestFileReader for container image: KeepError")
+}
+
+func (s *TestSuite) TestLoadImageCollectionError(c *C) {
+       // (3) Collection doesn't contain image
+       kc := &KeepReadErrorTestClient{}
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.Container.ContainerImage = otherPDH
+
+       cr.ContainerArvClient = &ArvTestClient{}
+       cr.ContainerKeepClient = &KeepReadErrorTestClient{}
+
+       err = cr.LoadImage()
+       c.Check(err.Error(), Equals, "First file in the container image collection does not end in .tar")
+}
+
+func (s *TestSuite) TestLoadImageKeepReadError(c *C) {
+       // (4) Collection doesn't contain image
+       kc := &KeepReadErrorTestClient{}
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.Container.ContainerImage = hwPDH
+       cr.ContainerArvClient = &ArvTestClient{}
+       cr.ContainerKeepClient = &KeepReadErrorTestClient{}
+
+       err = cr.LoadImage()
+       c.Check(err, NotNil)
+}
+
+type ClosableBuffer struct {
+       bytes.Buffer
+}
+
+func (*ClosableBuffer) Close() error {
+       return nil
+}
+
+type TestLogs struct {
+       Stdout ClosableBuffer
+       Stderr ClosableBuffer
+}
+
+func (tl *TestLogs) NewTestLoggingWriter(logstr string) (io.WriteCloser, error) {
+       if logstr == "stdout" {
+               return &tl.Stdout, nil
+       }
+       if logstr == "stderr" {
+               return &tl.Stderr, nil
+       }
+       return nil, errors.New("???")
+}
+
+func dockerLog(fd byte, msg string) []byte {
+       by := []byte(msg)
+       header := make([]byte, 8+len(by))
+       header[0] = fd
+       header[7] = byte(len(by))
+       copy(header[8:], by)
+       return header
+}
+
+func (s *TestSuite) TestRunContainer(c *C) {
+       s.docker.fn = func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "Hello world\n"))
+               t.logWriter.Close()
+       }
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+
+       cr.ContainerArvClient = &ArvTestClient{}
+       cr.ContainerKeepClient = &KeepTestClient{}
+
+       var logs TestLogs
+       cr.NewLogWriter = logs.NewTestLoggingWriter
+       cr.Container.ContainerImage = hwPDH
+       cr.Container.Command = []string{"./hw"}
+       err = cr.LoadImage()
+       c.Check(err, IsNil)
+
+       err = cr.CreateContainer()
+       c.Check(err, IsNil)
+
+       err = cr.StartContainer()
+       c.Check(err, IsNil)
+
+       err = cr.WaitFinish()
+       c.Check(err, IsNil)
+
+       c.Check(strings.HasSuffix(logs.Stdout.String(), "Hello world\n"), Equals, true)
+       c.Check(logs.Stderr.String(), Equals, "")
+}
+
+func (s *TestSuite) TestCommitLogs(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
+
+       cr.CrunchLog.Print("Hello world!")
+       cr.CrunchLog.Print("Goodbye")
+       cr.finalState = "Complete"
+
+       err = cr.CommitLogs()
+       c.Check(err, IsNil)
+
+       c.Check(api.Calls, Equals, 2)
+       c.Check(api.Content[1]["ensure_unique_name"], Equals, true)
+       c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["name"], Equals, "logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["manifest_text"], Equals, ". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\n")
+       c.Check(*cr.LogsPDH, Equals, "63da7bdacf08c40f604daad80c261e9a+60")
+}
+
+func (s *TestSuite) TestUpdateContainerRunning(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+
+       err = cr.UpdateContainerRunning()
+       c.Check(err, IsNil)
+
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running")
+}
+
+func (s *TestSuite) TestUpdateContainerComplete(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+
+       cr.LogsPDH = new(string)
+       *cr.LogsPDH = "d3a229d2fe3690c2c3e75a71a153c6a3+60"
+
+       cr.ExitCode = new(int)
+       *cr.ExitCode = 42
+       cr.finalState = "Complete"
+
+       err = cr.UpdateContainerFinal()
+       c.Check(err, IsNil)
+
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], Equals, *cr.ExitCode)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+}
+
+func (s *TestSuite) TestUpdateContainerCancelled(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.cCancelled = true
+       cr.finalState = "Cancelled"
+
+       err = cr.UpdateContainerFinal()
+       c.Check(err, IsNil)
+
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], IsNil)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], IsNil)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled")
+}
+
+// Used by the TestFullRun*() test below to DRY up boilerplate setup to do full
+// dress rehearsal of the Run() function, starting from a JSON container record.
+func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exitCode int, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, realTemp string) {
+       rec := arvados.Container{}
+       err := json.Unmarshal([]byte(record), &rec)
+       c.Check(err, IsNil)
+
+       var sm struct {
+               SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+       }
+       err = json.Unmarshal([]byte(record), &sm)
+       c.Check(err, IsNil)
+       secretMounts, err := json.Marshal(sm)
+       c.Logf("%s %q", sm, secretMounts)
+       c.Check(err, IsNil)
+
+       s.docker.exitCode = exitCode
+       s.docker.fn = fn
+       s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+
+       api = &ArvTestClient{Container: rec}
+       s.docker.api = api
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err = NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       s.runner = cr
+       cr.statInterval = 100 * time.Millisecond
+       cr.containerWatchdogInterval = time.Second
+       am := &ArvMountCmdLine{}
+       cr.RunArvMount = am.ArvMountTest
+
+       realTemp, err = ioutil.TempDir("", "crunchrun_test1-")
+       c.Assert(err, IsNil)
+       defer os.RemoveAll(realTemp)
+
+       s.docker.realTemp = realTemp
+
+       tempcount := 0
+       cr.MkTempDir = func(_ string, prefix string) (string, error) {
+               tempcount++
+               d := fmt.Sprintf("%s/%s%d", realTemp, prefix, tempcount)
+               err := os.Mkdir(d, os.ModePerm)
+               if err != nil && strings.Contains(err.Error(), ": file exists") {
+                       // Test case must have pre-populated the tempdir
+                       err = nil
+               }
+               return d, err
+       }
+       cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
+               return &ArvTestClient{secretMounts: secretMounts}, &KeepTestClient{}, nil, nil
+       }
+
+       if extraMounts != nil && len(extraMounts) > 0 {
+               err := cr.SetupArvMountPoint("keep")
+               c.Check(err, IsNil)
+
+               for _, m := range extraMounts {
+                       os.MkdirAll(cr.ArvMountPoint+"/by_id/"+m, os.ModePerm)
+               }
+       }
+
+       err = cr.Run()
+       if api.CalledWith("container.state", "Complete") != nil {
+               c.Check(err, IsNil)
+       }
+       if exitCode != 2 {
+               c.Check(api.WasSetRunning, Equals, true)
+               var lastupdate arvadosclient.Dict
+               for _, content := range api.Content {
+                       if content["container"] != nil {
+                               lastupdate = content["container"].(arvadosclient.Dict)
+                       }
+               }
+               if lastupdate["log"] == nil {
+                       c.Errorf("no container update with non-nil log -- updates were: %v", api.Content)
+               }
+       }
+
+       if err != nil {
+               for k, v := range api.Logs {
+                       c.Log(k)
+                       c.Log(v.String())
+               }
+       }
+
+       return
+}
+
+func (s *TestSuite) TestFullRunHello(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello world\n"), Equals, true)
+
+}
+
+func (s *TestSuite) TestRunAlreadyRunning(c *C) {
+       var ran bool
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["sleep", "3"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "scheduling_parameters":{"max_run_time": 1},
+    "state": "Running"
+}`, nil, 2, func(t *TestDockerClient) {
+               ran = true
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), IsNil)
+       c.Check(api.CalledWith("container.state", "Complete"), IsNil)
+       c.Check(ran, Equals, false)
+}
+
+func (s *TestSuite) TestRunTimeExceeded(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["sleep", "3"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "scheduling_parameters":{"max_run_time": 1},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               time.Sleep(3 * time.Second)
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*maximum run time exceeded.*")
+}
+
+func (s *TestSuite) TestContainerWaitFails(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["sleep", "3"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.ctrExited = true
+               time.Sleep(10 * time.Second)
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Container is not running.*")
+}
+
+func (s *TestSuite) TestCrunchstat(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+               "command": ["sleep", "1"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": ".",
+               "environment": {},
+               "mounts": {"/tmp": {"kind": "tmp"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`, nil, 0, func(t *TestDockerClient) {
+               time.Sleep(time.Second)
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+
+       // We didn't actually start a container, so crunchstat didn't
+       // find accounting files and therefore didn't log any stats.
+       // It should have logged a "can't find accounting files"
+       // message after one poll interval, though, so we can confirm
+       // it's alive:
+       c.Assert(api.Logs["crunchstat"], NotNil)
+       c.Check(api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files have not appeared after 100ms.*`)
+
+       // The "files never appeared" log assures us that we called
+       // (*crunchstat.Reporter)Stop(), and that we set it up with
+       // the correct container ID "abcde":
+       c.Check(api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files never appeared for abcde\n`)
+}
+
+func (s *TestSuite) TestNodeInfoLog(c *C) {
+       os.Setenv("SLURMD_NODENAME", "compute2")
+       api, _, _ := s.fullRunHelper(c, `{
+               "command": ["sleep", "1"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": ".",
+               "environment": {},
+               "mounts": {"/tmp": {"kind": "tmp"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`, nil, 0,
+               func(t *TestDockerClient) {
+                       time.Sleep(time.Second)
+                       t.logWriter.Close()
+               })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+
+       c.Assert(api.Logs["node"], NotNil)
+       json := api.Logs["node"].String()
+       c.Check(json, Matches, `(?ms).*"uuid": *"zzzzz-7ekkf-2z3mc76g2q73aio".*`)
+       c.Check(json, Matches, `(?ms).*"total_cpu_cores": *16.*`)
+       c.Check(json, Not(Matches), `(?ms).*"info":.*`)
+
+       c.Assert(api.Logs["node-info"], NotNil)
+       json = api.Logs["node-info"].String()
+       c.Check(json, Matches, `(?ms).*Host Information.*`)
+       c.Check(json, Matches, `(?ms).*CPU Information.*`)
+       c.Check(json, Matches, `(?ms).*Memory Information.*`)
+       c.Check(json, Matches, `(?ms).*Disk Space.*`)
+       c.Check(json, Matches, `(?ms).*Disk INodes.*`)
+}
+
+func (s *TestSuite) TestContainerRecordLog(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+               "command": ["sleep", "1"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": ".",
+               "environment": {},
+               "mounts": {"/tmp": {"kind": "tmp"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`, nil, 0,
+               func(t *TestDockerClient) {
+                       time.Sleep(time.Second)
+                       t.logWriter.Close()
+               })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+
+       c.Assert(api.Logs["container"], NotNil)
+       c.Check(api.Logs["container"].String(), Matches, `(?ms).*container_image.*`)
+}
+
+func (s *TestSuite) TestFullRunStderr(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["/bin/sh", "-c", "echo hello ; echo world 1>&2 ; exit 1"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 1, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello\n"))
+               t.logWriter.Write(dockerLog(2, "world\n"))
+               t.logWriter.Close()
+       })
+
+       final := api.CalledWith("container.state", "Complete")
+       c.Assert(final, NotNil)
+       c.Check(final["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
+       c.Check(final["container"].(arvadosclient.Dict)["log"], NotNil)
+
+       c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello\n"), Equals, true)
+       c.Check(strings.HasSuffix(api.Logs["stderr"].String(), "world\n"), Equals, true)
+}
+
+func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["pwd"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.cwd+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Log(api.Logs["stdout"])
+       c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/\n"), Equals, true)
+}
+
+func (s *TestSuite) TestFullRunSetCwd(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["pwd"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": "/bin",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.cwd+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/bin\n"), Equals, true)
+}
+
+func (s *TestSuite) TestStopOnSignal(c *C) {
+       s.testStopContainer(c, func(cr *ContainerRunner) {
+               go func() {
+                       for !s.docker.calledWait {
+                               time.Sleep(time.Millisecond)
+                       }
+                       cr.SigChan <- syscall.SIGINT
+               }()
+       })
+}
+
+func (s *TestSuite) TestStopOnArvMountDeath(c *C) {
+       s.testStopContainer(c, func(cr *ContainerRunner) {
+               cr.ArvMountExit = make(chan error)
+               go func() {
+                       cr.ArvMountExit <- exec.Command("true").Run()
+                       close(cr.ArvMountExit)
+               }()
+       })
+}
+
+func (s *TestSuite) testStopContainer(c *C, setup func(cr *ContainerRunner)) {
+       record := `{
+    "command": ["/bin/sh", "-c", "echo foo && sleep 30 && echo bar"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`
+
+       rec := arvados.Container{}
+       err := json.Unmarshal([]byte(record), &rec)
+       c.Check(err, IsNil)
+
+       s.docker.fn = func(t *TestDockerClient) {
+               <-t.stop
+               t.logWriter.Write(dockerLog(1, "foo\n"))
+               t.logWriter.Close()
+       }
+       s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+
+       api := &ArvTestClient{Container: rec}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil }
+       cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
+               return &ArvTestClient{}, &KeepTestClient{}, nil, nil
+       }
+       setup(cr)
+
+       done := make(chan error)
+       go func() {
+               done <- cr.Run()
+       }()
+       select {
+       case <-time.After(20 * time.Second):
+               pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
+               c.Fatal("timed out")
+       case err = <-done:
+               c.Check(err, IsNil)
+       }
+       for k, v := range api.Logs {
+               c.Log(k)
+               c.Log(v.String())
+       }
+
+       c.Check(api.CalledWith("container.log", nil), NotNil)
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["stdout"].String(), Matches, "(?ms).*foo\n$")
+}
+
+func (s *TestSuite) TestFullRunSetEnv(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": "/bin",
+    "environment": {"FROBIZ": "bilbo"},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "bilbo\n"), Equals, true)
+}
+
+type ArvMountCmdLine struct {
+       Cmd   []string
+       token string
+}
+
+func (am *ArvMountCmdLine) ArvMountTest(c []string, token string) (*exec.Cmd, error) {
+       am.Cmd = c
+       am.token = token
+       return nil, nil
+}
+
+func stubCert(temp string) string {
+       path := temp + "/ca-certificates.crt"
+       crt, _ := os.Create(path)
+       crt.Close()
+       arvadosclient.CertFiles = []string{path}
+       return path
+}
+
+func (s *TestSuite) TestSetupMounts(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       am := &ArvMountCmdLine{}
+       cr.RunArvMount = am.ArvMountTest
+       cr.ContainerArvClient = &ArvTestClient{}
+       cr.ContainerKeepClient = &KeepTestClient{}
+
+       realTemp, err := ioutil.TempDir("", "crunchrun_test1-")
+       c.Assert(err, IsNil)
+       certTemp, err := ioutil.TempDir("", "crunchrun_test2-")
+       c.Assert(err, IsNil)
+       stubCertPath := stubCert(certTemp)
+
+       cr.parentTemp = realTemp
+
+       defer os.RemoveAll(realTemp)
+       defer os.RemoveAll(certTemp)
+
+       i := 0
+       cr.MkTempDir = func(_ string, prefix string) (string, error) {
+               i++
+               d := fmt.Sprintf("%s/%s%d", realTemp, prefix, i)
+               err := os.Mkdir(d, os.ModePerm)
+               if err != nil && strings.Contains(err.Error(), ": file exists") {
+                       // Test case must have pre-populated the tempdir
+                       err = nil
+               }
+               return d, err
+       }
+
+       checkEmpty := func() {
+               // Should be deleted.
+               _, err := os.Stat(realTemp)
+               c.Assert(os.IsNotExist(err), Equals, true)
+
+               // Now recreate it for the next test.
+               c.Assert(os.Mkdir(realTemp, 0777), IsNil)
+       }
+
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"}
+               cr.Container.OutputPath = "/tmp"
+               cr.statInterval = 5 * time.Second
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts["/out"] = arvados.Mount{Kind: "tmp"}
+               cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"}
+               cr.Container.OutputPath = "/out"
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/out", realTemp + "/tmp3:/tmp"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"}
+               cr.Container.OutputPath = "/tmp"
+
+               apiflag := true
+               cr.Container.RuntimeConstraints.API = &apiflag
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp", stubCertPath + ":/etc/arvados/ca-certificates.crt:ro"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+
+               apiflag = false
+       }
+
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/keeptmp": {Kind: "collection", Writable: true},
+               }
+               cr.Container.OutputPath = "/keeptmp"
+
+               os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm)
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/tmp0:/keeptmp"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/keepinp": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"},
+                       "/keepout": {Kind: "collection", Writable: true},
+               }
+               cr.Container.OutputPath = "/keepout"
+
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm)
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               sort.StringSlice(cr.Binds).Sort()
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53:/keepinp:ro",
+                       realTemp + "/keep1/tmp0:/keepout"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.RuntimeConstraints.KeepCacheRAM = 512
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/keepinp": {Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"},
+                       "/keepout": {Kind: "collection", Writable: true},
+               }
+               cr.Container.OutputPath = "/keepout"
+
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm)
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               sort.StringSlice(cr.Binds).Sort()
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53:/keepinp:ro",
+                       realTemp + "/keep1/tmp0:/keepout"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       for _, test := range []struct {
+               in  interface{}
+               out string
+       }{
+               {in: "foo", out: `"foo"`},
+               {in: nil, out: `null`},
+               {in: map[string]int64{"foo": 123456789123456789}, out: `{"foo":123456789123456789}`},
+       } {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/mnt/test.json": {Kind: "json", Content: test.in},
+               }
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               sort.StringSlice(cr.Binds).Sort()
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/json2/mountdata.json:/mnt/test.json:ro"})
+               content, err := ioutil.ReadFile(realTemp + "/json2/mountdata.json")
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte(test.out))
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       for _, test := range []struct {
+               in  interface{}
+               out string
+       }{
+               {in: "foo", out: `foo`},
+               {in: nil, out: "error"},
+               {in: map[string]int64{"foo": 123456789123456789}, out: "error"},
+       } {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/mnt/test.txt": {Kind: "text", Content: test.in},
+               }
+               err := cr.SetupMounts()
+               if test.out == "error" {
+                       c.Check(err.Error(), Equals, "content for mount \"/mnt/test.txt\" must be a string")
+               } else {
+                       c.Check(err, IsNil)
+                       sort.StringSlice(cr.Binds).Sort()
+                       c.Check(cr.Binds, DeepEquals, []string{realTemp + "/text2/mountdata.text:/mnt/test.txt:ro"})
+                       content, err := ioutil.ReadFile(realTemp + "/text2/mountdata.text")
+                       c.Check(err, IsNil)
+                       c.Check(content, DeepEquals, []byte(test.out))
+               }
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       // Read-only mount points are allowed underneath output_dir mount point
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/tmp":     {Kind: "tmp"},
+                       "/tmp/foo": {Kind: "collection"},
+               }
+               cr.Container.OutputPath = "/tmp"
+
+               os.MkdirAll(realTemp+"/keep1/tmp0", os.ModePerm)
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
+                       "--read-write", "--crunchstat-interval=5",
+                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp", realTemp + "/keep1/tmp0:/tmp/foo:ro"})
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       // Writable mount points copied to output_dir mount point
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/tmp": {Kind: "tmp"},
+                       "/tmp/foo": {Kind: "collection",
+                               PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53",
+                               Writable:         true},
+                       "/tmp/bar": {Kind: "collection",
+                               PortableDataHash: "59389a8f9ee9d399be35462a0f92541d+53",
+                               Path:             "baz",
+                               Writable:         true},
+               }
+               cr.Container.OutputPath = "/tmp"
+
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz", os.ModePerm)
+
+               rf, _ := os.Create(realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz/quux")
+               rf.Write([]byte("bar"))
+               rf.Close()
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+               _, err = os.Stat(cr.HostOutputDir + "/foo")
+               c.Check(err, IsNil)
+               _, err = os.Stat(cr.HostOutputDir + "/bar/quux")
+               c.Check(err, IsNil)
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       // Only mount points of kind 'collection' are allowed underneath output_dir mount point
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/tmp":     {Kind: "tmp"},
+                       "/tmp/foo": {Kind: "tmp"},
+               }
+               cr.Container.OutputPath = "/tmp"
+
+               err := cr.SetupMounts()
+               c.Check(err, NotNil)
+               c.Check(err, ErrorMatches, `Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`)
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       // Only mount point of kind 'collection' is allowed for stdin
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "stdin": {Kind: "tmp"},
+               }
+
+               err := cr.SetupMounts()
+               c.Check(err, NotNil)
+               c.Check(err, ErrorMatches, `Unsupported mount kind 'tmp' for stdin.*`)
+               os.RemoveAll(cr.ArvMountPoint)
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+
+       // git_tree mounts
+       {
+               i = 0
+               cr.ArvMountPoint = ""
+               (*GitMountSuite)(nil).useTestGitServer(c)
+               cr.token = arvadostest.ActiveToken
+               cr.Container.Mounts = make(map[string]arvados.Mount)
+               cr.Container.Mounts = map[string]arvados.Mount{
+                       "/tip": {
+                               Kind:   "git_tree",
+                               UUID:   arvadostest.Repository2UUID,
+                               Commit: "fd3531f42995344f36c30b79f55f27b502f3d344",
+                               Path:   "/",
+                       },
+                       "/non-tip": {
+                               Kind:   "git_tree",
+                               UUID:   arvadostest.Repository2UUID,
+                               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+                               Path:   "/",
+                       },
+               }
+               cr.Container.OutputPath = "/tmp"
+
+               err := cr.SetupMounts()
+               c.Check(err, IsNil)
+
+               // dirMap[mountpoint] == tmpdir
+               dirMap := make(map[string]string)
+               for _, bind := range cr.Binds {
+                       tokens := strings.Split(bind, ":")
+                       dirMap[tokens[1]] = tokens[0]
+
+                       if cr.Container.Mounts[tokens[1]].Writable {
+                               c.Check(len(tokens), Equals, 2)
+                       } else {
+                               c.Check(len(tokens), Equals, 3)
+                               c.Check(tokens[2], Equals, "ro")
+                       }
+               }
+
+               data, err := ioutil.ReadFile(dirMap["/tip"] + "/dir1/dir2/file with mode 0644")
+               c.Check(err, IsNil)
+               c.Check(string(data), Equals, "\000\001\002\003")
+               _, err = ioutil.ReadFile(dirMap["/tip"] + "/file only on testbranch")
+               c.Check(err, FitsTypeOf, &os.PathError{})
+               c.Check(os.IsNotExist(err), Equals, true)
+
+               data, err = ioutil.ReadFile(dirMap["/non-tip"] + "/dir1/dir2/file with mode 0644")
+               c.Check(err, IsNil)
+               c.Check(string(data), Equals, "\000\001\002\003")
+               data, err = ioutil.ReadFile(dirMap["/non-tip"] + "/file only on testbranch")
+               c.Check(err, IsNil)
+               c.Check(string(data), Equals, "testfile\n")
+
+               cr.CleanupDirs()
+               checkEmpty()
+       }
+}
+
+func (s *TestSuite) TestStdout(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       api, cr, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", "./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n"), NotNil)
+}
+
+// Used by the TestStdoutWithWrongPath*()
+func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, err error) {
+       rec := arvados.Container{}
+       err = json.Unmarshal([]byte(record), &rec)
+       c.Check(err, IsNil)
+
+       s.docker.fn = fn
+       s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+
+       api = &ArvTestClient{Container: rec}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err = NewContainerRunner(s.client, api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       am := &ArvMountCmdLine{}
+       cr.RunArvMount = am.ArvMountTest
+       cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
+               return &ArvTestClient{}, &KeepTestClient{}, nil, nil
+       }
+
+       err = cr.Run()
+       return
+}
+
+func (s *TestSuite) TestStdoutWithWrongPath(c *C) {
+       _, _, err := s.stdoutErrorRunHelper(c, `{
+    "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path":"/tmpa.out"} },
+    "output_path": "/tmp",
+    "state": "Locked"
+}`, func(t *TestDockerClient) {})
+
+       c.Check(err, NotNil)
+       c.Check(strings.Contains(err.Error(), "Stdout path does not start with OutputPath"), Equals, true)
+}
+
+func (s *TestSuite) TestStdoutWithWrongKindTmp(c *C) {
+       _, _, err := s.stdoutErrorRunHelper(c, `{
+    "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "tmp", "path":"/tmp/a.out"} },
+    "output_path": "/tmp",
+    "state": "Locked"
+}`, func(t *TestDockerClient) {})
+
+       c.Check(err, NotNil)
+       c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'tmp' for stdout"), Equals, true)
+}
+
+func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {
+       _, _, err := s.stdoutErrorRunHelper(c, `{
+    "mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "collection", "path":"/tmp/a.out"} },
+    "output_path": "/tmp",
+    "state": "Locked"
+}`, func(t *TestDockerClient) {})
+
+       c.Check(err, NotNil)
+       c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'collection' for stdout"), Equals, true)
+}
+
+func (s *TestSuite) TestFullRunWithAPI(c *C) {
+       defer os.Setenv("ARVADOS_API_HOST", os.Getenv("ARVADOS_API_HOST"))
+       os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": "/bin",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {"API": true},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[1][17:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "test.arvados.org\n"), Equals, true)
+       c.Check(api.CalledWith("container.output", "d41d8cd98f00b204e9800998ecf8427e+0"), NotNil)
+}
+
+func (s *TestSuite) TestFullRunSetOutput(c *C) {
+       defer os.Setenv("ARVADOS_API_HOST", os.Getenv("ARVADOS_API_HOST"))
+       os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": "/bin",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {"API": true},
+    "state": "Locked"
+}`, nil, 0, func(t *TestDockerClient) {
+               t.api.Container.Output = "d4ab34d3d4f8a72f5c4973051ae69fab+122"
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(api.CalledWith("container.output", "d4ab34d3d4f8a72f5c4973051ae69fab+122"), NotNil)
+}
+
+func (s *TestSuite) TestStdoutWithExcludeFromOutputMountPointUnderOutputDir(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {
+        "/tmp": {"kind": "tmp"},
+        "/tmp/foo": {"kind": "collection",
+                     "portable_data_hash": "a3e8f74c6f101eae01fa08bfb4e49b3a+54",
+                     "exclude_from_output": true
+        },
+        "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"}
+    },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       extraMounts := []string{"a3e8f74c6f101eae01fa08bfb4e49b3a+54"}
+
+       api, cr, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", "./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n"), NotNil)
+}
+
+func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {
+        "/tmp": {"kind": "tmp"},
+        "/tmp/foo/bar": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/file2_in_main.txt"},
+        "/tmp/foo/sub1": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/subdir1"},
+        "/tmp/foo/sub1file2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/subdir1/file2_in_subdir1.txt"},
+        "/tmp/foo/baz/sub2file2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path":"/subdir1/subdir2/file2_in_subdir2.txt"},
+        "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"}
+    },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       extraMounts := []string{
+               "a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt",
+               "a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
+               "a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt",
+       }
+
+       api, runner, realtemp := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(runner.Binds, DeepEquals, []string{realtemp + "/tmp2:/tmp",
+               realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt:/tmp/foo/bar:ro",
+               realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt:/tmp/foo/baz/sub2file2:ro",
+               realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1:/tmp/foo/sub1:ro",
+               realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt:/tmp/foo/sub1file2:ro",
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       for _, v := range api.Content {
+               if v["collection"] != nil {
+                       c.Check(v["ensure_unique_name"], Equals, true)
+                       collection := v["collection"].(arvadosclient.Dict)
+                       if strings.Index(collection["name"].(string), "output") == 0 {
+                               manifest := collection["manifest_text"].(string)
+
+                               c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
+./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:bar 36:18:sub1file2
+./foo/baz 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 9:18:sub2file2
+./foo/sub1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
+./foo/sub1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
+`)
+                       }
+               }
+       }
+}
+
+func (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {
+        "/tmp": {"kind": "tmp"},
+        "/tmp/foo/bar": {"kind": "collection", "portable_data_hash": "b0def87f80dd594d4675809e83bd4f15+367", "path": "/subdir1/file2_in_subdir1.txt"},
+        "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"}
+    },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       extraMounts := []string{
+               "b0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
+       }
+
+       api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       for _, v := range api.Content {
+               if v["collection"] != nil {
+                       collection := v["collection"].(arvadosclient.Dict)
+                       if strings.Index(collection["name"].(string), "output") == 0 {
+                               manifest := collection["manifest_text"].(string)
+
+                               c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
+./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 10:17:bar
+`)
+                       }
+               }
+       }
+}
+
+func (s *TestSuite) TestOutputError(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {
+                       "/tmp": {"kind": "tmp"}
+               },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       extraMounts := []string{}
+
+       api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+               os.Symlink("/etc/hosts", t.realTemp+"/tmp2/baz")
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+}
+
+func (s *TestSuite) TestStdinCollectionMountPoint(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {
+        "/tmp": {"kind": "tmp"},
+        "stdin": {"kind": "collection", "portable_data_hash": "b0def87f80dd594d4675809e83bd4f15+367", "path": "/file1_in_main.txt"},
+        "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"}
+    },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       extraMounts := []string{
+               "b0def87f80dd594d4675809e83bd4f15+367/file1_in_main.txt",
+       }
+
+       api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       for _, v := range api.Content {
+               if v["collection"] != nil {
+                       collection := v["collection"].(arvadosclient.Dict)
+                       if strings.Index(collection["name"].(string), "output") == 0 {
+                               manifest := collection["manifest_text"].(string)
+                               c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
+`)
+                       }
+               }
+       }
+}
+
+func (s *TestSuite) TestStdinJsonMountPoint(c *C) {
+       helperRecord := `{
+               "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "environment": {"FROBIZ": "bilbo"},
+               "mounts": {
+        "/tmp": {"kind": "tmp"},
+        "stdin": {"kind": "json", "content": "foo"},
+        "stdout": {"kind": "file", "path": "/tmp/a/b/c.out"}
+    },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       for _, v := range api.Content {
+               if v["collection"] != nil {
+                       collection := v["collection"].(arvadosclient.Dict)
+                       if strings.Index(collection["name"].(string), "output") == 0 {
+                               manifest := collection["manifest_text"].(string)
+                               c.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out
+`)
+                       }
+               }
+       }
+}
+
+func (s *TestSuite) TestStderrMount(c *C) {
+       api, cr, _ := s.fullRunHelper(c, `{
+    "command": ["/bin/sh", "-c", "echo hello;exit 1"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"},
+               "stdout": {"kind": "file", "path": "/tmp/a/out.txt"},
+               "stderr": {"kind": "file", "path": "/tmp/b/err.txt"}},
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 1, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello\n"))
+               t.logWriter.Write(dockerLog(2, "oops\n"))
+               t.logWriter.Close()
+       })
+
+       final := api.CalledWith("container.state", "Complete")
+       c.Assert(final, NotNil)
+       c.Check(final["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
+       c.Check(final["container"].(arvadosclient.Dict)["log"], NotNil)
+
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", "./a b1946ac92492d2347c6235b4d2611184+6 0:6:out.txt\n./b 38af5c54926b620264ab1501150cf189+5 0:5:err.txt\n"), NotNil)
+}
+
+func (s *TestSuite) TestNumberRoundTrip(c *C) {
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, &ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.fetchContainerRecord()
+
+       jsondata, err := json.Marshal(cr.Container.Mounts["/json"].Content)
+
+       c.Check(err, IsNil)
+       c.Check(string(jsondata), Equals, `{"number":123456789123456789}`)
+}
+
+func (s *TestSuite) TestFullBrokenDocker1(c *C) {
+       tf, err := ioutil.TempFile("", "brokenNodeHook-")
+       c.Assert(err, IsNil)
+       defer os.Remove(tf.Name())
+
+       tf.Write([]byte(`#!/bin/sh
+exec echo killme
+`))
+       tf.Close()
+       os.Chmod(tf.Name(), 0700)
+
+       ech := tf.Name()
+       brokenNodeHook = &ech
+
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 2, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Queued"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Running broken node hook.*")
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*killme.*")
+
+}
+
+func (s *TestSuite) TestFullBrokenDocker2(c *C) {
+       ech := ""
+       brokenNodeHook = &ech
+
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 2, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Queued"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*No broken node hook.*")
+}
+
+func (s *TestSuite) TestFullBrokenDocker3(c *C) {
+       ech := ""
+       brokenNodeHook = &ech
+
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 3, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
+}
+
+func (s *TestSuite) TestBadCommand1(c *C) {
+       ech := ""
+       brokenNodeHook = &ech
+
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 4, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+}
+
+func (s *TestSuite) TestBadCommand2(c *C) {
+       ech := ""
+       brokenNodeHook = &ech
+
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 5, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+}
+
+func (s *TestSuite) TestBadCommand3(c *C) {
+       ech := ""
+       brokenNodeHook = &ech
+
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["echo", "hello world"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+    "runtime_constraints": {},
+    "state": "Locked"
+}`, nil, 6, func(t *TestDockerClient) {
+               t.logWriter.Write(dockerLog(1, "hello world\n"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+}
+
+func (s *TestSuite) TestSecretTextMountPoint(c *C) {
+       // under normal mounts, gets captured in output, oops
+       helperRecord := `{
+               "command": ["true"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "mounts": {
+                    "/tmp": {"kind": "tmp"},
+                    "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+                },
+                "secret_mounts": {
+                },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       api, cr, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+               content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf")
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte("mypassword"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), NotNil)
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ""), IsNil)
+
+       // under secret mounts, not captured in output
+       helperRecord = `{
+               "command": ["true"],
+               "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+               "cwd": "/bin",
+               "mounts": {
+                    "/tmp": {"kind": "tmp"}
+                },
+                "secret_mounts": {
+                    "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+                },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked"
+       }`
+
+       api, cr, _ = s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+               content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf")
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte("mypassword"))
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+       c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), IsNil)
+       c.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ""), NotNil)
+}
+
+type FakeProcess struct {
+       cmdLine []string
+}
+
+func (fp FakeProcess) CmdlineSlice() ([]string, error) {
+       return fp.cmdLine, nil
+}
diff --git a/services/crunch-run/git_mount.go b/services/crunch-run/git_mount.go
new file mode 100644 (file)
index 0000000..c312a53
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "net/url"
+       "os"
+       "path/filepath"
+       "regexp"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "gopkg.in/src-d/go-billy.v4/osfs"
+       git "gopkg.in/src-d/go-git.v4"
+       git_config "gopkg.in/src-d/go-git.v4/config"
+       git_plumbing "gopkg.in/src-d/go-git.v4/plumbing"
+       git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
+       "gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+type gitMount arvados.Mount
+
+var (
+       sha1re     = regexp.MustCompile(`^[0-9a-f]{40}$`)
+       repoUUIDre = regexp.MustCompile(`^[0-9a-z]{5}-s0uqq-[0-9a-z]{15}$`)
+)
+
+func (gm gitMount) validate() error {
+       if gm.Path != "" && gm.Path != "/" {
+               return fmt.Errorf("cannot mount git_tree with path %q -- only \"/\" is supported", gm.Path)
+       }
+       if !sha1re.MatchString(gm.Commit) {
+               return fmt.Errorf("cannot mount git_tree with commit %q -- must be a 40-char SHA1", gm.Commit)
+       }
+       if gm.RepositoryName != "" || gm.GitURL != "" {
+               return fmt.Errorf("cannot mount git_tree -- repository_name and git_url must be empty")
+       }
+       if !repoUUIDre.MatchString(gm.UUID) {
+               return fmt.Errorf("cannot mount git_tree with uuid %q -- must be a repository UUID", gm.UUID)
+       }
+       if gm.Writable {
+               return fmt.Errorf("writable git_tree mount is not supported")
+       }
+       return nil
+}
+
+// ExtractTree extracts the specified tree into dir, which is an
+// existing empty local directory.
+func (gm gitMount) extractTree(ac IArvadosClient, dir string, token string) error {
+       err := gm.validate()
+       if err != nil {
+               return err
+       }
+       baseURL, err := ac.Discovery("gitUrl")
+       if err != nil {
+               return fmt.Errorf("discover gitUrl from API: %s", err)
+       } else if _, ok := baseURL.(string); !ok {
+               return fmt.Errorf("discover gitUrl from API: expected string, found %T", baseURL)
+       }
+
+       u, err := url.Parse(baseURL.(string))
+       if err != nil {
+               return fmt.Errorf("parse gitUrl %q: %s", baseURL, err)
+       }
+       u, err = u.Parse("/" + gm.UUID + ".git")
+       if err != nil {
+               return fmt.Errorf("build git url from %q, %q: %s", baseURL, gm.UUID, err)
+       }
+       store := memory.NewStorage()
+       repo, err := git.Init(store, osfs.New(dir))
+       if err != nil {
+               return fmt.Errorf("init repo: %s", err)
+       }
+       _, err = repo.CreateRemote(&git_config.RemoteConfig{
+               Name: "origin",
+               URLs: []string{u.String()},
+       })
+       if err != nil {
+               return fmt.Errorf("create remote %q: %s", u.String(), err)
+       }
+       err = repo.Fetch(&git.FetchOptions{
+               RemoteName: "origin",
+               Auth: &git_http.BasicAuth{
+                       Username: "none",
+                       Password: token,
+               },
+       })
+       if err != nil {
+               return fmt.Errorf("git fetch %q: %s", u.String(), err)
+       }
+       wt, err := repo.Worktree()
+       if err != nil {
+               return fmt.Errorf("worktree failed: %s", err)
+       }
+       err = wt.Checkout(&git.CheckoutOptions{
+               Hash: git_plumbing.NewHash(gm.Commit),
+       })
+       if err != nil {
+               return fmt.Errorf("checkout failed: %s", err)
+       }
+       err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+               if err != nil {
+                       return err
+               }
+               // copy user rx bits to group and other, in case
+               // prevailing umask is more restrictive than 022
+               mode := info.Mode()
+               mode = mode | ((mode >> 3) & 050) | ((mode >> 6) & 5)
+               return os.Chmod(path, mode)
+       })
+       if err != nil {
+               return fmt.Errorf("chmod -R %q: %s", dir, err)
+       }
+       return nil
+}
diff --git a/services/crunch-run/git_mount_test.go b/services/crunch-run/git_mount_test.go
new file mode 100644 (file)
index 0000000..4dc95bc
--- /dev/null
@@ -0,0 +1,209 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "io/ioutil"
+       "os"
+       "path/filepath"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+       git_client "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
+       git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
+)
+
+type GitMountSuite struct {
+       tmpdir string
+}
+
+var _ = check.Suite(&GitMountSuite{})
+
+func (s *GitMountSuite) SetUpTest(c *check.C) {
+       s.useTestGitServer(c)
+
+       var err error
+       s.tmpdir, err = ioutil.TempDir("", "")
+       c.Assert(err, check.IsNil)
+}
+
+func (s *GitMountSuite) TearDownTest(c *check.C) {
+       err := os.RemoveAll(s.tmpdir)
+       c.Check(err, check.IsNil)
+}
+
+// Commit fd3531f is crunch-run-tree-test
+func (s *GitMountSuite) TestextractTree(c *check.C) {
+       gm := gitMount{
+               Path:   "/",
+               UUID:   arvadostest.Repository2UUID,
+               Commit: "fd3531f42995344f36c30b79f55f27b502f3d344",
+       }
+       err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+       c.Check(err, check.IsNil)
+
+       fnm := filepath.Join(s.tmpdir, "dir1/dir2/file with mode 0644")
+       data, err := ioutil.ReadFile(fnm)
+       c.Check(err, check.IsNil)
+       c.Check(data, check.DeepEquals, []byte{0, 1, 2, 3})
+       fi, err := os.Stat(fnm)
+       c.Check(err, check.IsNil)
+       if err == nil {
+               c.Check(fi.Mode(), check.Equals, os.FileMode(0644))
+       }
+
+       fnm = filepath.Join(s.tmpdir, "dir1/dir2/file with mode 0755")
+       data, err = ioutil.ReadFile(fnm)
+       c.Check(err, check.IsNil)
+       c.Check(string(data), check.DeepEquals, "#!/bin/sh\nexec echo OK\n")
+       fi, err = os.Stat(fnm)
+       c.Check(err, check.IsNil)
+       if err == nil {
+               c.Check(fi.Mode(), check.Equals, os.FileMode(0755))
+       }
+
+       // Ensure there's no extra stuff like a ".git" dir
+       s.checkTmpdirContents(c, []string{"dir1"})
+
+       // Ensure tmpdir is world-readable and world-executable so the
+       // UID inside the container can use it.
+       fi, err = os.Stat(s.tmpdir)
+       c.Check(err, check.IsNil)
+       c.Check(fi.Mode()&os.ModePerm, check.Equals, os.FileMode(0755))
+}
+
+// Commit 5ebfab0 is not the tip of any branch or tag, but is
+// reachable in branch "crunch-run-non-tip-test".
+func (s *GitMountSuite) TestExtractNonTipCommit(c *check.C) {
+       gm := gitMount{
+               UUID:   arvadostest.Repository2UUID,
+               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+       }
+       err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+       c.Check(err, check.IsNil)
+
+       fnm := filepath.Join(s.tmpdir, "file only on testbranch")
+       data, err := ioutil.ReadFile(fnm)
+       c.Check(err, check.IsNil)
+       c.Check(string(data), check.DeepEquals, "testfile\n")
+}
+
+func (s *GitMountSuite) TestNonexistentRepository(c *check.C) {
+       gm := gitMount{
+               Path:   "/",
+               UUID:   "zzzzz-s0uqq-nonexistentrepo",
+               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+       }
+       err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+       c.Check(err, check.NotNil)
+       c.Check(err, check.ErrorMatches, ".*repository not found.*")
+
+       s.checkTmpdirContents(c, []string{})
+}
+
+func (s *GitMountSuite) TestNonexistentCommit(c *check.C) {
+       gm := gitMount{
+               Path:   "/",
+               UUID:   arvadostest.Repository2UUID,
+               Commit: "bb66b6bb6b6bbb6b6b6b66b6b6b6b6b6b6b6b66b",
+       }
+       err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+       c.Check(err, check.NotNil)
+       c.Check(err, check.ErrorMatches, ".*object not found.*")
+
+       s.checkTmpdirContents(c, []string{})
+}
+
+func (s *GitMountSuite) TestGitUrlDiscoveryFails(c *check.C) {
+       delete(discoveryMap, "gitUrl")
+       gm := gitMount{
+               Path:   "/",
+               UUID:   arvadostest.Repository2UUID,
+               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+       }
+       err := gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+       c.Check(err, check.ErrorMatches, ".*gitUrl.*")
+}
+
+func (s *GitMountSuite) TestInvalid(c *check.C) {
+       for _, trial := range []struct {
+               gm      gitMount
+               matcher string
+       }{
+               {
+                       gm: gitMount{
+                               Path:   "/",
+                               UUID:   arvadostest.Repository2UUID,
+                               Commit: "abc123",
+                       },
+                       matcher: ".*SHA1.*",
+               },
+               {
+                       gm: gitMount{
+                               Path:           "/",
+                               UUID:           arvadostest.Repository2UUID,
+                               RepositoryName: arvadostest.Repository2Name,
+                               Commit:         "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+                       },
+                       matcher: ".*repository_name.*",
+               },
+               {
+                       gm: gitMount{
+                               Path:   "/",
+                               GitURL: "https://localhost:0/" + arvadostest.Repository2Name + ".git",
+                               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+                       },
+                       matcher: ".*git_url.*",
+               },
+               {
+                       gm: gitMount{
+                               Path:   "/dir1/",
+                               UUID:   arvadostest.Repository2UUID,
+                               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+                       },
+                       matcher: ".*path.*",
+               },
+               {
+                       gm: gitMount{
+                               Path:   "/",
+                               Commit: "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+                       },
+                       matcher: ".*UUID.*",
+               },
+               {
+                       gm: gitMount{
+                               Path:     "/",
+                               UUID:     arvadostest.Repository2UUID,
+                               Commit:   "5ebfab0522851df01fec11ec55a6d0f4877b542e",
+                               Writable: true,
+                       },
+                       matcher: ".*writable.*",
+               },
+       } {
+               err := trial.gm.extractTree(&ArvTestClient{}, s.tmpdir, arvadostest.ActiveToken)
+               c.Check(err, check.NotNil)
+               s.checkTmpdirContents(c, []string{})
+
+               err = trial.gm.validate()
+               c.Check(err, check.ErrorMatches, trial.matcher)
+       }
+}
+
+func (s *GitMountSuite) checkTmpdirContents(c *check.C, expect []string) {
+       f, err := os.Open(s.tmpdir)
+       c.Check(err, check.IsNil)
+       names, err := f.Readdirnames(-1)
+       c.Check(err, check.IsNil)
+       c.Check(names, check.DeepEquals, expect)
+}
+
+func (*GitMountSuite) useTestGitServer(c *check.C) {
+       git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
+
+       port, err := ioutil.ReadFile("../../tmp/arv-git-httpd-ssl.port")
+       c.Assert(err, check.IsNil)
+       discoveryMap["gitUrl"] = "https://localhost:" + string(port)
+}
diff --git a/services/crunch-run/logging.go b/services/crunch-run/logging.go
new file mode 100644 (file)
index 0000000..f8ddd56
--- /dev/null
@@ -0,0 +1,407 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io"
+       "log"
+       "regexp"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+)
+
+// Timestamper is the signature for a function that takes a timestamp and
+// return a formated string value.
+type Timestamper func(t time.Time) string
+
+// Logging plumbing:
+//
+// ThrottledLogger.Logger -> ThrottledLogger.Write ->
+// ThrottledLogger.buf -> ThrottledLogger.flusher ->
+// ArvLogWriter.Write -> CollectionFileWriter.Write | Api.Create
+//
+// For stdout/stderr ReadWriteLines additionally runs as a goroutine to pull
+// data from the stdout/stderr Reader and send to the Logger.
+
+// ThrottledLogger accepts writes, prepends a timestamp to each line of the
+// write, and periodically flushes to a downstream writer.  It supports the
+// "Logger" and "WriteCloser" interfaces.
+type ThrottledLogger struct {
+       *log.Logger
+       buf *bytes.Buffer
+       sync.Mutex
+       writer   io.WriteCloser
+       flush    chan struct{}
+       stopped  chan struct{}
+       stopping chan struct{}
+       Timestamper
+       Immediate    *log.Logger
+       pendingFlush bool
+}
+
+// RFC3339NanoFixed is a fixed-width version of time.RFC3339Nano.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// RFC3339Timestamp formats t as RFC3339NanoFixed.
+func RFC3339Timestamp(t time.Time) string {
+       return t.Format(RFC3339NanoFixed)
+}
+
+// Write prepends a timestamp to each line of the input data and
+// appends to the internal buffer. Each line is also logged to
+// tl.Immediate, if tl.Immediate is not nil.
+func (tl *ThrottledLogger) Write(p []byte) (n int, err error) {
+       tl.Mutex.Lock()
+       defer tl.Mutex.Unlock()
+
+       if tl.buf == nil {
+               tl.buf = &bytes.Buffer{}
+       }
+
+       now := tl.Timestamper(time.Now().UTC())
+       sc := bufio.NewScanner(bytes.NewBuffer(p))
+       for err == nil && sc.Scan() {
+               out := fmt.Sprintf("%s %s\n", now, sc.Bytes())
+               if tl.Immediate != nil {
+                       tl.Immediate.Print(out[:len(out)-1])
+               }
+               _, err = io.WriteString(tl.buf, out)
+       }
+       if err == nil {
+               err = sc.Err()
+               if err == nil {
+                       n = len(p)
+               }
+       }
+
+       if int64(tl.buf.Len()) >= crunchLogBytesPerEvent {
+               // Non-blocking send.  Try send a flush if it is ready to
+               // accept it.  Otherwise do nothing because a flush is already
+               // pending.
+               select {
+               case tl.flush <- struct{}{}:
+               default:
+               }
+       }
+
+       return
+}
+
+// Periodically check the current buffer; if not empty, send it on the
+// channel to the goWriter goroutine.
+func (tl *ThrottledLogger) flusher() {
+       ticker := time.NewTicker(time.Duration(crunchLogSecondsBetweenEvents))
+       defer ticker.Stop()
+       for stopping := false; !stopping; {
+               select {
+               case <-tl.stopping:
+                       // flush tl.buf and exit the loop
+                       stopping = true
+               case <-tl.flush:
+               case <-ticker.C:
+               }
+
+               var ready *bytes.Buffer
+
+               tl.Mutex.Lock()
+               ready, tl.buf = tl.buf, &bytes.Buffer{}
+               tl.Mutex.Unlock()
+
+               if ready != nil && ready.Len() > 0 {
+                       tl.writer.Write(ready.Bytes())
+               }
+       }
+       close(tl.stopped)
+}
+
+// Close the flusher goroutine and wait for it to complete, then close the
+// underlying Writer.
+func (tl *ThrottledLogger) Close() error {
+       select {
+       case <-tl.stopping:
+               // already stopped
+       default:
+               close(tl.stopping)
+       }
+       <-tl.stopped
+       return tl.writer.Close()
+}
+
+const (
+       // MaxLogLine is the maximum length of stdout/stderr lines before they are split.
+       MaxLogLine = 1 << 12
+)
+
+// ReadWriteLines reads lines from a reader and writes to a Writer, with long
+// line splitting.
+func ReadWriteLines(in io.Reader, writer io.Writer, done chan<- bool) {
+       reader := bufio.NewReaderSize(in, MaxLogLine)
+       var prefix string
+       for {
+               line, isPrefix, err := reader.ReadLine()
+               if err == io.EOF {
+                       break
+               } else if err != nil {
+                       writer.Write([]byte(fmt.Sprintln("error reading container log:", err)))
+               }
+               var suffix string
+               if isPrefix {
+                       suffix = "[...]\n"
+               }
+
+               if prefix == "" && suffix == "" {
+                       writer.Write(line)
+               } else {
+                       writer.Write([]byte(fmt.Sprint(prefix, string(line), suffix)))
+               }
+
+               // Set up prefix for following line
+               if isPrefix {
+                       prefix = "[...]"
+               } else {
+                       prefix = ""
+               }
+       }
+       done <- true
+}
+
+// NewThrottledLogger creates a new thottled logger that
+// (a) prepends timestamps to each line
+// (b) batches log messages and only calls the underlying Writer
+//  at most once per "crunchLogSecondsBetweenEvents" seconds.
+func NewThrottledLogger(writer io.WriteCloser) *ThrottledLogger {
+       tl := &ThrottledLogger{}
+       tl.flush = make(chan struct{}, 1)
+       tl.stopped = make(chan struct{})
+       tl.stopping = make(chan struct{})
+       tl.writer = writer
+       tl.Logger = log.New(tl, "", 0)
+       tl.Timestamper = RFC3339Timestamp
+       go tl.flusher()
+       return tl
+}
+
+// Log throttling rate limiting config parameters
+var crunchLimitLogBytesPerJob int64 = 67108864
+var crunchLogThrottleBytes int64 = 65536
+var crunchLogThrottlePeriod time.Duration = time.Second * 60
+var crunchLogThrottleLines int64 = 1024
+var crunchLogPartialLineThrottlePeriod time.Duration = time.Second * 5
+var crunchLogBytesPerEvent int64 = 4096
+var crunchLogSecondsBetweenEvents = time.Second
+var crunchLogUpdatePeriod = time.Hour / 2
+var crunchLogUpdateSize = int64(1 << 25)
+
+// ArvLogWriter is an io.WriteCloser that processes each write by
+// writing it through to another io.WriteCloser (typically a
+// CollectionFileWriter) and creating an Arvados log entry.
+type ArvLogWriter struct {
+       ArvClient     IArvadosClient
+       UUID          string
+       loggingStream string
+       writeCloser   io.WriteCloser
+
+       // for rate limiting
+       bytesLogged                  int64
+       logThrottleResetTime         time.Time
+       logThrottleLinesSoFar        int64
+       logThrottleBytesSoFar        int64
+       logThrottleBytesSkipped      int64
+       logThrottleIsOpen            bool
+       logThrottlePartialLineNextAt time.Time
+       logThrottleFirstPartialLine  bool
+       bufToFlush                   bytes.Buffer
+       bufFlushedAt                 time.Time
+       closing                      bool
+}
+
+func (arvlog *ArvLogWriter) Write(p []byte) (int, error) {
+       // Write to the next writer in the chain (a file in Keep)
+       var err1 error
+       if arvlog.writeCloser != nil {
+               _, err1 = arvlog.writeCloser.Write(p)
+       }
+
+       // write to API after checking rate limit
+       now := time.Now()
+
+       if now.After(arvlog.logThrottleResetTime) {
+               // It has been more than throttle_period seconds since the last
+               // checkpoint; so reset the throttle
+               if arvlog.logThrottleBytesSkipped > 0 {
+                       arvlog.bufToFlush.WriteString(fmt.Sprintf("%s Skipped %d bytes of log\n", RFC3339Timestamp(now.UTC()), arvlog.logThrottleBytesSkipped))
+               }
+
+               arvlog.logThrottleResetTime = now.Add(crunchLogThrottlePeriod)
+               arvlog.logThrottleBytesSoFar = 0
+               arvlog.logThrottleLinesSoFar = 0
+               arvlog.logThrottleBytesSkipped = 0
+               arvlog.logThrottleIsOpen = true
+       }
+
+       lines := bytes.Split(p, []byte("\n"))
+
+       for _, line := range lines {
+               // Short circuit the counting code if we're just going to throw
+               // away the data anyway.
+               if !arvlog.logThrottleIsOpen {
+                       arvlog.logThrottleBytesSkipped += int64(len(line))
+                       continue
+               } else if len(line) == 0 {
+                       continue
+               }
+
+               // check rateLimit
+               logOpen, msg := arvlog.rateLimit(line, now)
+               if logOpen {
+                       arvlog.bufToFlush.WriteString(string(msg) + "\n")
+               }
+       }
+
+       if (int64(arvlog.bufToFlush.Len()) >= crunchLogBytesPerEvent ||
+               (now.Sub(arvlog.bufFlushedAt) >= crunchLogSecondsBetweenEvents) ||
+               arvlog.closing) && (arvlog.bufToFlush.Len() > 0) {
+               // write to API
+               lr := arvadosclient.Dict{"log": arvadosclient.Dict{
+                       "object_uuid": arvlog.UUID,
+                       "event_type":  arvlog.loggingStream,
+                       "properties":  map[string]string{"text": arvlog.bufToFlush.String()}}}
+               err2 := arvlog.ArvClient.Create("logs", lr, nil)
+
+               arvlog.bufToFlush = bytes.Buffer{}
+               arvlog.bufFlushedAt = now
+
+               if err1 != nil || err2 != nil {
+                       return 0, fmt.Errorf("%s ; %s", err1, err2)
+               }
+       }
+
+       return len(p), nil
+}
+
+// Close the underlying writer
+func (arvlog *ArvLogWriter) Close() (err error) {
+       arvlog.closing = true
+       arvlog.Write([]byte{})
+       if arvlog.writeCloser != nil {
+               err = arvlog.writeCloser.Close()
+               arvlog.writeCloser = nil
+       }
+       return err
+}
+
+var lineRegexp = regexp.MustCompile(`^\S+ (.*)`)
+
+// Test for hard cap on total output and for log throttling. Returns whether
+// the log line should go to output or not. Returns message if limit exceeded.
+func (arvlog *ArvLogWriter) rateLimit(line []byte, now time.Time) (bool, []byte) {
+       message := ""
+       lineSize := int64(len(line))
+
+       if arvlog.logThrottleIsOpen {
+               matches := lineRegexp.FindStringSubmatch(string(line))
+
+               if len(matches) == 2 && strings.HasPrefix(matches[1], "[...]") && strings.HasSuffix(matches[1], "[...]") {
+                       // This is a partial line.
+
+                       if arvlog.logThrottleFirstPartialLine {
+                               // Partial should be suppressed.  First time this is happening for this line so provide a message instead.
+                               arvlog.logThrottleFirstPartialLine = false
+                               arvlog.logThrottlePartialLineNextAt = now.Add(crunchLogPartialLineThrottlePeriod)
+                               arvlog.logThrottleBytesSkipped += lineSize
+                               return true, []byte(fmt.Sprintf("%s Rate-limiting partial segments of long lines to one every %d seconds.",
+                                       RFC3339Timestamp(now.UTC()), crunchLogPartialLineThrottlePeriod/time.Second))
+                       } else if now.After(arvlog.logThrottlePartialLineNextAt) {
+                               // The throttle period has passed.  Update timestamp and let it through.
+                               arvlog.logThrottlePartialLineNextAt = now.Add(crunchLogPartialLineThrottlePeriod)
+                       } else {
+                               // Suppress line.
+                               arvlog.logThrottleBytesSkipped += lineSize
+                               return false, line
+                       }
+               } else {
+                       // Not a partial line so reset.
+                       arvlog.logThrottlePartialLineNextAt = time.Time{}
+                       arvlog.logThrottleFirstPartialLine = true
+               }
+
+               arvlog.bytesLogged += lineSize
+               arvlog.logThrottleBytesSoFar += lineSize
+               arvlog.logThrottleLinesSoFar += 1
+
+               if arvlog.bytesLogged > crunchLimitLogBytesPerJob {
+                       message = fmt.Sprintf("%s Exceeded log limit %d bytes (crunch_limit_log_bytes_per_job). Log will be truncated.",
+                               RFC3339Timestamp(now.UTC()), crunchLimitLogBytesPerJob)
+                       arvlog.logThrottleResetTime = now.Add(time.Duration(365 * 24 * time.Hour))
+                       arvlog.logThrottleIsOpen = false
+
+               } else if arvlog.logThrottleBytesSoFar > crunchLogThrottleBytes {
+                       remainingTime := arvlog.logThrottleResetTime.Sub(now)
+                       message = fmt.Sprintf("%s Exceeded rate %d bytes per %d seconds (crunch_log_throttle_bytes). Logging will be silenced for the next %d seconds.",
+                               RFC3339Timestamp(now.UTC()), crunchLogThrottleBytes, crunchLogThrottlePeriod/time.Second, remainingTime/time.Second)
+                       arvlog.logThrottleIsOpen = false
+
+               } else if arvlog.logThrottleLinesSoFar > crunchLogThrottleLines {
+                       remainingTime := arvlog.logThrottleResetTime.Sub(now)
+                       message = fmt.Sprintf("%s Exceeded rate %d lines per %d seconds (crunch_log_throttle_lines), logging will be silenced for the next %d seconds.",
+                               RFC3339Timestamp(now.UTC()), crunchLogThrottleLines, crunchLogThrottlePeriod/time.Second, remainingTime/time.Second)
+                       arvlog.logThrottleIsOpen = false
+
+               }
+       }
+
+       if !arvlog.logThrottleIsOpen {
+               // Don't log anything if any limit has been exceeded. Just count lossage.
+               arvlog.logThrottleBytesSkipped += lineSize
+       }
+
+       if message != "" {
+               // Yes, write to logs, but use our "rate exceeded" message
+               // instead of the log message that exceeded the limit.
+               message += " A complete log is still being written to Keep, and will be available when the job finishes."
+               return true, []byte(message)
+       } else {
+               return arvlog.logThrottleIsOpen, line
+       }
+}
+
+// load the rate limit discovery config parameters
+func loadLogThrottleParams(clnt IArvadosClient) {
+       loadDuration := func(dst *time.Duration, key string) {
+               if param, err := clnt.Discovery(key); err != nil {
+                       return
+               } else if d, ok := param.(float64); !ok {
+                       return
+               } else {
+                       *dst = time.Duration(d) * time.Second
+               }
+       }
+       loadInt64 := func(dst *int64, key string) {
+               if param, err := clnt.Discovery(key); err != nil {
+                       return
+               } else if val, ok := param.(float64); !ok {
+                       return
+               } else {
+                       *dst = int64(val)
+               }
+       }
+
+       loadInt64(&crunchLimitLogBytesPerJob, "crunchLimitLogBytesPerJob")
+       loadInt64(&crunchLogThrottleBytes, "crunchLogThrottleBytes")
+       loadDuration(&crunchLogThrottlePeriod, "crunchLogThrottlePeriod")
+       loadInt64(&crunchLogThrottleLines, "crunchLogThrottleLines")
+       loadDuration(&crunchLogPartialLineThrottlePeriod, "crunchLogPartialLineThrottlePeriod")
+       loadInt64(&crunchLogBytesPerEvent, "crunchLogBytesPerEvent")
+       loadDuration(&crunchLogSecondsBetweenEvents, "crunchLogSecondsBetweenEvents")
+       loadInt64(&crunchLogUpdateSize, "crunchLogUpdateSize")
+       loadDuration(&crunchLogUpdatePeriod, "crunchLogUpdatePeriod")
+
+}
diff --git a/services/crunch-run/logging_test.go b/services/crunch-run/logging_test.go
new file mode 100644 (file)
index 0000000..78f984d
--- /dev/null
@@ -0,0 +1,221 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       . "gopkg.in/check.v1"
+)
+
+type LoggingTestSuite struct {
+       client *arvados.Client
+}
+
+type TestTimestamper struct {
+       count int
+}
+
+func (this *TestTimestamper) Timestamp(t time.Time) string {
+       this.count += 1
+       t, err := time.ParseInLocation(time.RFC3339Nano, fmt.Sprintf("2015-12-29T15:51:45.%09dZ", this.count), t.Location())
+       if err != nil {
+               panic(err)
+       }
+       return RFC3339Timestamp(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&LoggingTestSuite{})
+
+func (s *LoggingTestSuite) SetUpTest(c *C) {
+       s.client = arvados.NewClientFromEnv()
+       crunchLogUpdatePeriod = time.Hour * 24 * 365
+       crunchLogUpdateSize = 1 << 50
+}
+
+func (s *LoggingTestSuite) TestWriteLogs(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
+
+       cr.CrunchLog.Print("Hello world!")
+       cr.CrunchLog.Print("Goodbye")
+       cr.CrunchLog.Close()
+
+       c.Check(api.Calls, Equals, 1)
+
+       mt, err := cr.LogCollection.MarshalManifest(".")
+       c.Check(err, IsNil)
+       c.Check(mt, Equals, ". 74561df9ae65ee9f35d5661d42454264+83 0:83:crunch-run.txt\n")
+
+       logtext := "2015-12-29T15:51:45.000000001Z Hello world!\n" +
+               "2015-12-29T15:51:45.000000002Z Goodbye\n"
+
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext)
+       c.Check(string(kc.Content), Equals, logtext)
+}
+
+func (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {
+       if testing.Short() {
+               return
+       }
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
+       cr.CrunchLog.Immediate = nil
+
+       for i := 0; i < 2000000; i++ {
+               cr.CrunchLog.Printf("Hello %d", i)
+       }
+       cr.CrunchLog.Print("Goodbye")
+       cr.CrunchLog.Close()
+
+       c.Check(api.Calls > 0, Equals, true)
+       c.Check(api.Calls < 2000000, Equals, true)
+
+       mt, err := cr.LogCollection.MarshalManifest(".")
+       c.Check(err, IsNil)
+       c.Check(mt, Equals, ". 9c2c05d1fae6aaa8af85113ba725716d+67108864 80b821383a07266c2a66a4566835e26e+21780065 0:88888929:crunch-run.txt\n")
+}
+
+func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       ts := &TestTimestamper{}
+       cr.CrunchLog.Timestamper = ts.Timestamp
+       w, err := cr.NewLogWriter("stdout")
+       c.Assert(err, IsNil)
+       stdout := NewThrottledLogger(w)
+       stdout.Timestamper = ts.Timestamp
+
+       cr.CrunchLog.Print("Hello world!")
+       stdout.Print("Doing stuff")
+       cr.CrunchLog.Print("Goodbye")
+       stdout.Print("Blurb")
+       cr.CrunchLog.Close()
+       stdout.Close()
+
+       logText := make(map[string]string)
+       for _, content := range api.Content {
+               log := content["log"].(arvadosclient.Dict)
+               logText[log["event_type"].(string)] += log["properties"].(map[string]string)["text"]
+       }
+
+       c.Check(logText["crunch-run"], Equals, `2015-12-29T15:51:45.000000001Z Hello world!
+2015-12-29T15:51:45.000000003Z Goodbye
+`)
+       c.Check(logText["stdout"], Equals, `2015-12-29T15:51:45.000000002Z Doing stuff
+2015-12-29T15:51:45.000000004Z Blurb
+`)
+
+       mt, err := cr.LogCollection.MarshalManifest(".")
+       c.Check(err, IsNil)
+       c.Check(mt, Equals, ". 48f9023dc683a850b1c9b482b14c4b97+163 0:83:crunch-run.txt 83:80:stdout.txt\n")
+}
+
+func (s *LoggingTestSuite) TestLogUpdate(c *C) {
+       for _, trial := range []struct {
+               maxBytes    int64
+               maxDuration time.Duration
+       }{
+               {1000, 10 * time.Second},
+               {1000000, time.Millisecond},
+       } {
+               c.Logf("max %d bytes, %s", trial.maxBytes, trial.maxDuration)
+               crunchLogUpdateSize = trial.maxBytes
+               crunchLogUpdatePeriod = trial.maxDuration
+
+               api := &ArvTestClient{}
+               kc := &KeepTestClient{}
+               defer kc.Close()
+               cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+               c.Assert(err, IsNil)
+               ts := &TestTimestamper{}
+               cr.CrunchLog.Timestamper = ts.Timestamp
+               w, err := cr.NewLogWriter("stdout")
+               c.Assert(err, IsNil)
+               stdout := NewThrottledLogger(w)
+               stdout.Timestamper = ts.Timestamp
+
+               c.Check(cr.logUUID, Equals, "")
+               cr.CrunchLog.Printf("Hello %1000s", "space")
+               for i, t := 0, time.NewTicker(time.Millisecond); i < 5000 && cr.logUUID == ""; i++ {
+                       <-t.C
+               }
+               c.Check(cr.logUUID, Not(Equals), "")
+               cr.CrunchLog.Print("Goodbye")
+               fmt.Fprint(stdout, "Goodbye\n")
+               cr.CrunchLog.Close()
+               stdout.Close()
+               w.Close()
+
+               mt, err := cr.LogCollection.MarshalManifest(".")
+               c.Check(err, IsNil)
+               // Block packing depends on whether there's an update
+               // between the two Goodbyes -- either way the first
+               // block will be 4dc76.
+               c.Check(mt, Matches, `. 4dc76e0a212bfa30c39d76d8c16da0c0\+1038 (afc503bc1b9a828b4bb543cb629e936c\+78|90699dc22545cd74a0664303f70bc05a\+39 276b49339fd5203d15a93ff3de11bfb9\+39) 0:1077:crunch-run.txt 1077:39:stdout.txt\n`)
+       }
+}
+
+func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytes(c *C) {
+       s.testWriteLogsWithRateLimit(c, "crunchLogThrottleBytes", 50, 65536, "Exceeded rate 50 bytes per 60 seconds")
+}
+
+func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleLines(c *C) {
+       s.testWriteLogsWithRateLimit(c, "crunchLogThrottleLines", 1, 1024, "Exceeded rate 1 lines per 60 seconds")
+}
+
+func (s *LoggingTestSuite) TestWriteLogsWithRateLimitThrottleBytesPerEvent(c *C) {
+       s.testWriteLogsWithRateLimit(c, "crunchLimitLogBytesPerJob", 50, 67108864, "Exceeded log limit 50 bytes (crunch_limit_log_bytes_per_job)")
+}
+
+func (s *LoggingTestSuite) testWriteLogsWithRateLimit(c *C, throttleParam string, throttleValue int, throttleDefault int, expected string) {
+       discoveryMap[throttleParam] = float64(throttleValue)
+       defer func() {
+               discoveryMap[throttleParam] = float64(throttleDefault)
+       }()
+
+       api := &ArvTestClient{}
+       kc := &KeepTestClient{}
+       defer kc.Close()
+       cr, err := NewContainerRunner(s.client, api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
+       c.Assert(err, IsNil)
+       cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
+
+       cr.CrunchLog.Print("Hello world!")
+       cr.CrunchLog.Print("Goodbye")
+       cr.CrunchLog.Close()
+
+       c.Check(api.Calls, Equals, 1)
+
+       mt, err := cr.LogCollection.MarshalManifest(".")
+       c.Check(err, IsNil)
+       c.Check(mt, Equals, ". 74561df9ae65ee9f35d5661d42454264+83 0:83:crunch-run.txt\n")
+
+       logtext := "2015-12-29T15:51:45.000000001Z Hello world!\n" +
+               "2015-12-29T15:51:45.000000002Z Goodbye\n"
+
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
+       stderrLog := api.Content[0]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"]
+       c.Check(true, Equals, strings.Contains(stderrLog, expected))
+       c.Check(string(kc.Content), Equals, logtext)
+}
diff --git a/services/crunch/crunch-job b/services/crunch/crunch-job
new file mode 120000 (symlink)
index 0000000..ff0e702
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/cli/bin/arv-crunch-job
\ No newline at end of file
diff --git a/services/crunchstat/.gitignore b/services/crunchstat/.gitignore
new file mode 100644 (file)
index 0000000..c26270a
--- /dev/null
@@ -0,0 +1 @@
+crunchstat
diff --git a/services/crunchstat/crunchstat.go b/services/crunchstat/crunchstat.go
new file mode 100644 (file)
index 0000000..7e2dc01
--- /dev/null
@@ -0,0 +1,174 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bufio"
+       "flag"
+       "fmt"
+       "io"
+       "log"
+       "os"
+       "os/exec"
+       "os/signal"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/lib/crunchstat"
+)
+
+const MaxLogLine = 1 << 14 // Child stderr lines >16KiB will be split
+
+var (
+       signalOnDeadPPID  int = 15
+       ppidCheckInterval     = time.Second
+       version               = "dev"
+)
+
+func main() {
+       reporter := crunchstat.Reporter{
+               Logger: log.New(os.Stderr, "crunchstat: ", 0),
+       }
+
+       flag.StringVar(&reporter.CgroupRoot, "cgroup-root", "", "Root of cgroup tree")
+       flag.StringVar(&reporter.CgroupParent, "cgroup-parent", "", "Name of container parent under cgroup")
+       flag.StringVar(&reporter.CIDFile, "cgroup-cid", "", "Path to container id file")
+       flag.IntVar(&signalOnDeadPPID, "signal-on-dead-ppid", signalOnDeadPPID, "Signal to send child if crunchstat's parent process disappears (0 to disable)")
+       flag.DurationVar(&ppidCheckInterval, "ppid-check-interval", ppidCheckInterval, "Time between checks for parent process disappearance")
+       pollMsec := flag.Int64("poll", 1000, "Reporting interval, in milliseconds")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
+
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("crunchstat %s\n", version)
+               return
+       }
+
+       reporter.Logger.Printf("crunchstat %s started", version)
+
+       if reporter.CgroupRoot == "" {
+               reporter.Logger.Fatal("error: must provide -cgroup-root")
+       } else if signalOnDeadPPID < 0 {
+               reporter.Logger.Fatalf("-signal-on-dead-ppid=%d is invalid (use a positive signal number, or 0 to disable)", signalOnDeadPPID)
+       }
+       reporter.PollPeriod = time.Duration(*pollMsec) * time.Millisecond
+
+       reporter.Start()
+       err := runCommand(flag.Args(), reporter.Logger)
+       reporter.Stop()
+
+       if err, ok := err.(*exec.ExitError); ok {
+               // The program has exited with an exit code != 0
+
+               // This works on both Unix and Windows. Although
+               // package syscall is generally platform dependent,
+               // WaitStatus is defined for both Unix and Windows and
+               // in both cases has an ExitStatus() method with the
+               // same signature.
+               if status, ok := err.Sys().(syscall.WaitStatus); ok {
+                       os.Exit(status.ExitStatus())
+               } else {
+                       reporter.Logger.Fatalln("ExitError without WaitStatus:", err)
+               }
+       } else if err != nil {
+               reporter.Logger.Fatalln("error in cmd.Wait:", err)
+       }
+}
+
+func runCommand(argv []string, logger *log.Logger) error {
+       cmd := exec.Command(argv[0], argv[1:]...)
+
+       logger.Println("Running", argv)
+
+       // Child process will use our stdin and stdout pipes
+       // (we close our copies below)
+       cmd.Stdin = os.Stdin
+       cmd.Stdout = os.Stdout
+
+       // Forward SIGINT and SIGTERM to child process
+       sigChan := make(chan os.Signal, 1)
+       go func(sig <-chan os.Signal) {
+               catch := <-sig
+               if cmd.Process != nil {
+                       cmd.Process.Signal(catch)
+               }
+               logger.Println("notice: caught signal:", catch)
+       }(sigChan)
+       signal.Notify(sigChan, syscall.SIGTERM)
+       signal.Notify(sigChan, syscall.SIGINT)
+
+       // Kill our child proc if our parent process disappears
+       if signalOnDeadPPID != 0 {
+               go sendSignalOnDeadPPID(ppidCheckInterval, signalOnDeadPPID, os.Getppid(), cmd, logger)
+       }
+
+       // Funnel stderr through our channel
+       stderrPipe, err := cmd.StderrPipe()
+       if err != nil {
+               logger.Fatalln("error in StderrPipe:", err)
+       }
+
+       // Run subprocess
+       if err := cmd.Start(); err != nil {
+               logger.Fatalln("error in cmd.Start:", err)
+       }
+
+       // Close stdin/stdout in this (parent) process
+       os.Stdin.Close()
+       os.Stdout.Close()
+
+       copyPipeToChildLog(stderrPipe, log.New(os.Stderr, "", 0))
+
+       return cmd.Wait()
+}
+
+func sendSignalOnDeadPPID(intvl time.Duration, signum, ppidOrig int, cmd *exec.Cmd, logger *log.Logger) {
+       ticker := time.NewTicker(intvl)
+       for range ticker.C {
+               ppid := os.Getppid()
+               if ppid == ppidOrig {
+                       continue
+               }
+               if cmd.Process == nil {
+                       // Child process isn't running yet
+                       continue
+               }
+               logger.Printf("notice: crunchstat ppid changed from %d to %d -- killing child pid %d with signal %d", ppidOrig, ppid, cmd.Process.Pid, signum)
+               err := cmd.Process.Signal(syscall.Signal(signum))
+               if err != nil {
+                       logger.Printf("error: sending signal: %s", err)
+                       continue
+               }
+               ticker.Stop()
+               break
+       }
+}
+
+func copyPipeToChildLog(in io.ReadCloser, logger *log.Logger) {
+       reader := bufio.NewReaderSize(in, MaxLogLine)
+       var prefix string
+       for {
+               line, isPrefix, err := reader.ReadLine()
+               if err == io.EOF {
+                       break
+               } else if err != nil {
+                       logger.Fatal("error reading child stderr:", err)
+               }
+               var suffix string
+               if isPrefix {
+                       suffix = "[...]"
+               }
+               logger.Print(prefix, string(line), suffix)
+               // Set up prefix for following line
+               if isPrefix {
+                       prefix = "[...]"
+               } else {
+                       prefix = ""
+               }
+       }
+       in.Close()
+}
diff --git a/services/crunchstat/crunchstat_test.go b/services/crunchstat/crunchstat_test.go
new file mode 100644 (file)
index 0000000..eb02395
--- /dev/null
@@ -0,0 +1,238 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "math/rand"
+       "os"
+       "os/exec"
+       "sync"
+       "syscall"
+       "testing"
+       "time"
+)
+
+// Test that CopyPipeToChildLog works even on lines longer than
+// bufio.MaxScanTokenSize.
+func TestCopyPipeToChildLogLongLines(t *testing.T) {
+       logger, logBuf := bufLogger()
+
+       pipeIn, pipeOut := io.Pipe()
+       copied := make(chan bool)
+       go func() {
+               copyPipeToChildLog(pipeIn, logger)
+               close(copied)
+       }()
+
+       sentBytes := make([]byte, bufio.MaxScanTokenSize+MaxLogLine+(1<<22))
+       go func() {
+               pipeOut.Write([]byte("before\n"))
+
+               for i := range sentBytes {
+                       // Some bytes that aren't newlines:
+                       sentBytes[i] = byte((rand.Int() & 0xff) | 0x80)
+               }
+               sentBytes[len(sentBytes)-1] = '\n'
+               pipeOut.Write(sentBytes)
+
+               pipeOut.Write([]byte("after"))
+               pipeOut.Close()
+       }()
+
+       if before, err := logBuf.ReadBytes('\n'); err != nil || string(before) != "before\n" {
+               t.Fatalf("\"before\n\" not received (got \"%s\", %s)", before, err)
+       }
+
+       var receivedBytes []byte
+       done := false
+       for !done {
+               line, err := logBuf.ReadBytes('\n')
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if len(line) >= 5 && string(line[0:5]) == "[...]" {
+                       if receivedBytes == nil {
+                               t.Fatal("Beginning of line reported as continuation")
+                       }
+                       line = line[5:]
+               }
+               if len(line) >= 6 && string(line[len(line)-6:]) == "[...]\n" {
+                       line = line[:len(line)-6]
+               } else {
+                       done = true
+               }
+               receivedBytes = append(receivedBytes, line...)
+       }
+       if bytes.Compare(receivedBytes, sentBytes) != 0 {
+               t.Fatalf("sent %d bytes, got %d different bytes", len(sentBytes), len(receivedBytes))
+       }
+
+       if after, err := logBuf.ReadBytes('\n'); err != nil || string(after) != "after\n" {
+               t.Fatalf("\"after\n\" not received (got \"%s\", %s)", after, err)
+       }
+
+       select {
+       case <-time.After(time.Second):
+               t.Fatal("Timeout")
+       case <-copied:
+               // Done.
+       }
+}
+
+func bufLogger() (*log.Logger, *bufio.Reader) {
+       r, w := io.Pipe()
+       logger := log.New(w, "", 0)
+       return logger, bufio.NewReader(r)
+}
+
+func TestSignalOnDeadPPID(t *testing.T) {
+       if !testDeadParent(t, 0) {
+               t.Fatal("child should still be alive after parent dies")
+       }
+       if testDeadParent(t, 15) {
+               t.Fatal("child should have been killed when parent died")
+       }
+}
+
+// testDeadParent returns true if crunchstat's child proc is still
+// alive after its parent dies.
+func testDeadParent(t *testing.T, signum int) bool {
+       var err error
+       var bin, childlockfile, parentlockfile *os.File
+       for _, f := range []**os.File{&bin, &childlockfile, &parentlockfile} {
+               *f, err = ioutil.TempFile("", "crunchstat_")
+               if err != nil {
+                       t.Fatal(err)
+               }
+               defer (*f).Close()
+               defer os.Remove((*f).Name())
+       }
+
+       bin.Close()
+       err = exec.Command("go", "build", "-o", bin.Name()).Run()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       err = syscall.Flock(int(parentlockfile.Fd()), syscall.LOCK_EX)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       cmd := exec.Command("bash", "-c", `
+set -e
+"$BINFILE" -cgroup-root=/none -ppid-check-interval=10ms -signal-on-dead-ppid="$SIGNUM" bash -c '
+    set -e
+    unlock() {
+        flock --unlock "$CHILDLOCKFD"
+        kill %1
+    }
+    trap unlock TERM
+    flock --exclusive "$CHILDLOCKFD"
+    echo -n "$$" > "$CHILDLOCKFILE"
+    flock --unlock "$PARENTLOCKFD"
+    sleep 20 </dev/null >/dev/null 2>/dev/null &
+    wait %1
+    unlock
+' &
+
+# wait for inner bash to start, to ensure $BINFILE has seen this bash proc as its initial PPID
+flock --exclusive "$PARENTLOCKFILE" true
+`)
+       cmd.Env = append(os.Environ(),
+               "SIGNUM="+fmt.Sprintf("%d", signum),
+               "PARENTLOCKFD=3",
+               "PARENTLOCKFILE="+parentlockfile.Name(),
+               "CHILDLOCKFD=4",
+               "CHILDLOCKFILE="+childlockfile.Name(),
+               "BINFILE="+bin.Name())
+       cmd.ExtraFiles = []*os.File{parentlockfile, childlockfile}
+       stderr, err := cmd.StderrPipe()
+       if err != nil {
+               t.Fatal(err)
+       }
+       stdout, err := cmd.StdoutPipe()
+       if err != nil {
+               t.Fatal(err)
+       }
+       cmd.Start()
+       defer cmd.Wait()
+
+       var wg sync.WaitGroup
+       wg.Add(2)
+       defer wg.Wait()
+       for _, rdr := range []io.ReadCloser{stderr, stdout} {
+               go func(rdr io.ReadCloser) {
+                       defer wg.Done()
+                       buf := make([]byte, 1024)
+                       for {
+                               n, err := rdr.Read(buf)
+                               if n > 0 {
+                                       t.Logf("%s", buf[:n])
+                               }
+                               if err != nil {
+                                       return
+                               }
+                       }
+               }(rdr)
+       }
+
+       // Wait until inner bash process releases parentlockfile
+       // (which means it has locked childlockfile and written its
+       // PID)
+       err = exec.Command("flock", "--exclusive", parentlockfile.Name(), "true").Run()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       childDone := make(chan bool)
+       go func() {
+               // Notify the main thread when the inner bash process
+               // releases its lock on childlockfile (which means
+               // either its sleep process ended or it received a
+               // TERM signal).
+               t0 := time.Now()
+               err = exec.Command("flock", "--exclusive", childlockfile.Name(), "true").Run()
+               if err != nil {
+                       t.Fatal(err)
+               }
+               t.Logf("child done after %s", time.Since(t0))
+               close(childDone)
+       }()
+
+       select {
+       case <-time.After(500 * time.Millisecond):
+               // Inner bash process is still alive after the timeout
+               // period. Kill it now, so our stdout and stderr pipes
+               // can finish and we don't leave a mess of child procs
+               // behind.
+               buf, err := ioutil.ReadFile(childlockfile.Name())
+               if err != nil {
+                       t.Fatal(err)
+               }
+               var childPID int
+               _, err = fmt.Sscanf(string(buf), "%d", &childPID)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               child, err := os.FindProcess(childPID)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               child.Signal(syscall.Signal(15))
+               return true
+
+       case <-childDone:
+               // Inner bash process ended soon after its grandparent
+               // ended.
+               return false
+       }
+}
diff --git a/services/dockercleaner/.gitignore b/services/dockercleaner/.gitignore
new file mode 120000 (symlink)
index 0000000..ed3b362
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/.gitignore
\ No newline at end of file
diff --git a/services/dockercleaner/MANIFEST.in b/services/dockercleaner/MANIFEST.in
new file mode 100644 (file)
index 0000000..5d510b4
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+include agpl-3.0.txt
+include arvados-docker-cleaner.service
+include arvados_version.py
\ No newline at end of file
diff --git a/services/dockercleaner/README.rst b/services/dockercleaner/README.rst
new file mode 100644 (file)
index 0000000..dd2b7e9
--- /dev/null
@@ -0,0 +1,5 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: Apache-2.0
+
+Arvados Docker Cleaner.
diff --git a/services/dockercleaner/agpl-3.0.txt b/services/dockercleaner/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/services/dockercleaner/arvados-docker-cleaner.service b/services/dockercleaner/arvados-docker-cleaner.service
new file mode 100644 (file)
index 0000000..29697e4
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Docker Image Cleaner
+Documentation=https://doc.arvados.org/
+After=network.target
+#AssertPathExists=/etc/arvados/docker-cleaner/docker-cleaner.json
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=10s
+RestartPreventExitStatus=2
+#
+# This unwieldy ExecStart command detects at runtime whether
+# arvados-docker-cleaner is installed with the Python 3.3 Software
+# Collection, and if so, invokes it with the "scl" wrapper.
+ExecStart=/bin/sh -c 'if [ -e /opt/rh/rh-python35/root/bin/arvados-docker-cleaner ]; then exec scl enable rh-python35 arvados-docker-cleaner; else exec arvados-docker-cleaner; fi'
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/dockercleaner/arvados_docker/__init__.py b/services/dockercleaner/arvados_docker/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/dockercleaner/arvados_docker/cleaner.py b/services/dockercleaner/arvados_docker/cleaner.py
new file mode 100755 (executable)
index 0000000..2a0e8b9
--- /dev/null
@@ -0,0 +1,370 @@
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+"""arvados_docker.cleaner - Remove unused Docker images from compute nodes
+
+Usage:
+  python3 -m arvados_docker.cleaner --quota 50G
+"""
+
+import argparse
+import collections
+import copy
+import functools
+import json
+import logging
+import sys
+import time
+
+import docker
+import json
+
+DEFAULT_CONFIG_FILE = '/etc/arvados/docker-cleaner/docker-cleaner.json'
+
+SUFFIX_SIZES = {suffix: 1024 ** exp for exp, suffix in enumerate('kmgt', 1)}
+
+logger = logging.getLogger('arvados_docker.cleaner')
+
+
+def return_when_docker_not_found(result=None):
+    # If the decorated function raises a 404 error from Docker, return
+    # `result` instead.
+    def docker_not_found_decorator(orig_func):
+        @functools.wraps(orig_func)
+        def docker_not_found_wrapper(*args, **kwargs):
+            try:
+                return orig_func(*args, **kwargs)
+            except docker.errors.APIError as error:
+                if error.response.status_code != 404:
+                    raise
+                return result
+        return docker_not_found_wrapper
+    return docker_not_found_decorator
+
+
+class DockerImage:
+
+    def __init__(self, image_hash):
+        self.docker_id = image_hash['Id']
+        self.size = image_hash['VirtualSize']
+        self.last_used = -1
+
+    def used_at(self, use_time):
+        self.last_used = max(self.last_used, use_time)
+
+
+class DockerImages:
+
+    def __init__(self, target_size):
+        self.target_size = target_size
+        self.images = {}
+        self.container_image_map = {}
+
+    @classmethod
+    def from_daemon(cls, target_size, docker_client):
+        images = cls(target_size)
+        for image in docker_client.images():
+            images.add_image(image)
+        return images
+
+    def add_image(self, image_hash):
+        image = DockerImage(image_hash)
+        self.images[image.docker_id] = image
+        logger.debug("Registered image %s", image.docker_id)
+
+    def del_image(self, image_id):
+        if image_id in self.images:
+            del self.images[image_id]
+            self.container_image_map = {
+                cid: cid_image
+                for cid, cid_image in self.container_image_map.items()
+                if cid_image != image_id}
+            logger.debug("Unregistered image %s", image_id)
+
+    def has_image(self, image_id):
+        return image_id in self.images
+
+    def add_user(self, container_hash, use_time):
+        image_id = container_hash['Image']
+        if image_id in self.images:
+            self.container_image_map[container_hash['Id']] = image_id
+            self.images[image_id].used_at(use_time)
+            logger.debug("Registered container %s using image %s",
+                         container_hash['Id'], image_id)
+
+    def end_user(self, cid):
+        self.container_image_map.pop(cid, None)
+        logger.debug("Unregistered container %s", cid)
+
+    def should_delete(self):
+        if not self.images:
+            return
+        # Build a list of images, ordered by use time.
+        lru_images = list(self.images.values())
+        lru_images.sort(key=lambda image: image.last_used)
+        # Make sure we don't delete any images in use, or if there are
+        # none, the most recently used image.
+        if self.container_image_map:
+            keep_ids = set(self.container_image_map.values())
+        else:
+            keep_ids = {lru_images[-1].docker_id}
+        space_left = (self.target_size - sum(self.images[image_id].size
+                                             for image_id in keep_ids))
+        # Go through the list most recently used first, and note which
+        # images can be saved with the space allotted.
+        for image in reversed(lru_images):
+            if (image.docker_id not in keep_ids) and (image.size <= space_left):
+                keep_ids.add(image.docker_id)
+                space_left -= image.size
+        # Yield the Docker IDs of any image we don't want to save, least
+        # recently used first.
+        for image in lru_images:
+            if image.docker_id not in keep_ids:
+                yield image.docker_id
+
+
+class DockerEventHandlers:
+    # This class maps Docker event types to the names of methods that should
+    # receive those events.
+
+    def __init__(self):
+        self.handler_names = collections.defaultdict(list)
+
+    def on(self, *status_names):
+        def register_handler(handler_method):
+            for status in status_names:
+                self.handler_names[status].append(handler_method.__name__)
+            return handler_method
+        return register_handler
+
+    def for_event(self, status):
+        return iter(self.handler_names[status])
+
+    def copy(self):
+        result = self.__class__()
+        result.handler_names = copy.deepcopy(self.handler_names)
+        return result
+
+
+class DockerEventListener:
+    # To use this class, define event_handlers as an instance of
+    # DockerEventHandlers.  Call run() to iterate over events and call the
+    # handler methods as they come in.
+    ENCODING = 'utf-8'
+
+    def __init__(self, events):
+        self.events = events
+
+    def run(self):
+        for event in self.events:
+            event = json.loads(event.decode(self.ENCODING))
+            if event.get('Type', 'container') != 'container':
+                continue
+            for method_name in self.event_handlers.for_event(event.get('status')):
+                getattr(self, method_name)(event)
+
+
+class DockerImageUseRecorder(DockerEventListener):
+    event_handlers = DockerEventHandlers()
+
+    def __init__(self, images, docker_client, events):
+        self.images = images
+        self.docker_client = docker_client
+        super().__init__(events)
+
+    @event_handlers.on('create')
+    @return_when_docker_not_found()
+    def load_container(self, event):
+        container_hash = self.docker_client.inspect_container(event['id'])
+        self.new_container(event, container_hash)
+
+    def new_container(self, event, container_hash):
+        self.images.add_user(container_hash, event['time'])
+
+    @event_handlers.on('destroy')
+    def container_stopped(self, event):
+        self.images.end_user(event['id'])
+
+
+class DockerImageCleaner(DockerImageUseRecorder):
+    event_handlers = DockerImageUseRecorder.event_handlers.copy()
+
+    def __init__(self, images, docker_client, events, remove_containers_onexit=False):
+        super().__init__(images, docker_client, events)
+        self.logged_unknown = set()
+        self.remove_containers_onexit = remove_containers_onexit
+
+    def new_container(self, event, container_hash):
+        container_image_id = container_hash['Image']
+        if not self.images.has_image(container_image_id):
+            image_hash = self.docker_client.inspect_image(container_image_id)
+            self.images.add_image(image_hash)
+        return super().new_container(event, container_hash)
+
+    def _remove_container(self, cid):
+        try:
+            self.docker_client.remove_container(cid, v=True)
+        except docker.errors.APIError as error:
+            logger.warning("Failed to remove container %s: %s", cid, error)
+        else:
+            logger.info("Removed container %s", cid)
+
+    @event_handlers.on('die')
+    def clean_container(self, event=None):
+        if self.remove_containers_onexit:
+            self._remove_container(event['id'])
+
+    def check_stopped_containers(self, remove=False):
+        logger.info("Checking for stopped containers")
+        for c in self.docker_client.containers(filters={'status': 'exited'}):
+            logger.info("Container %s %s", c['Id'], c['Status'])
+            if c['Status'][:6] != 'Exited':
+                logger.error("Unexpected status %s for container %s",
+                             c['Status'], c['Id'])
+            elif remove:
+                self._remove_container(c['Id'])
+
+    @event_handlers.on('destroy')
+    def clean_images(self, event=None):
+        for image_id in self.images.should_delete():
+            try:
+                self.docker_client.remove_image(image_id)
+            except docker.errors.APIError as error:
+                logger.warning(
+                    "Failed to remove image %s: %s", image_id, error)
+            else:
+                logger.info("Removed image %s", image_id)
+                self.images.del_image(image_id)
+
+    @event_handlers.on('destroy')
+    def log_unknown_images(self, event):
+        unknown_ids = {image['Id'] for image in self.docker_client.images()
+                       if not self.images.has_image(image['Id'])}
+        for image_id in (unknown_ids - self.logged_unknown):
+            logger.info(
+                "Image %s is loaded but unused, so it won't be cleaned",
+                image_id)
+        self.logged_unknown = unknown_ids
+
+
+def human_size(size_str):
+    size_str = size_str.lower().rstrip('b')
+    multiplier = SUFFIX_SIZES.get(size_str[-1])
+    if multiplier is None:
+        multiplier = 1
+    else:
+        size_str = size_str[:-1]
+    return int(size_str) * multiplier
+
+
+def load_config(arguments):
+    args = parse_arguments(arguments)
+
+    config = default_config()
+    try:
+        with open(args.config, 'r') as f:
+            c = json.load(f)
+            config.update(c)
+    except (FileNotFoundError, IOError, ValueError) as error:
+        if (isinstance(error, FileNotFoundError) and
+            args.config == DEFAULT_CONFIG_FILE):
+            logger.warning("DEPRECATED: default config file %s not found; "
+                           "relying on command line configuration",
+                           repr(DEFAULT_CONFIG_FILE))
+        else:
+            sys.exit('error reading config file {}: {}'.format(
+                args.config, error))
+
+    configargs = vars(args).copy()
+    configargs.pop('config')
+    config.update({k: v for k, v in configargs.items() if v})
+
+    if isinstance(config['Quota'], str):
+        config['Quota'] = human_size(config['Quota'])
+
+    return config
+
+
+def default_config():
+    return {
+        'Quota': '1G',
+        'RemoveStoppedContainers': 'always',
+        'Verbose': 0,
+    }
+
+
+def parse_arguments(arguments):
+    class Formatter(argparse.ArgumentDefaultsHelpFormatter,
+                    argparse.RawDescriptionHelpFormatter):
+        pass
+    parser = argparse.ArgumentParser(
+        prog="arvados_docker.cleaner",
+        description="clean old Docker images from Arvados compute nodes",
+        epilog="Example config file:\n\n{}".format(
+            json.dumps(default_config(), indent=4)),
+        formatter_class=Formatter,
+    )
+    parser.add_argument(
+        '--config', action='store', type=str, default=DEFAULT_CONFIG_FILE,
+        help="configuration file")
+
+    deprecated = " (DEPRECATED -- use config file instead)"
+    parser.add_argument(
+        '--quota', action='store', type=human_size, dest='Quota',
+        help="space allowance for Docker images, suffixed with K/M/G/T" + deprecated)
+    parser.add_argument(
+        '--remove-stopped-containers', type=str, default='always', dest='RemoveStoppedContainers',
+        choices=['never', 'onexit', 'always'],
+        help="""when to remove stopped containers (default: always, i.e., remove
+        stopped containers found at startup, and remove containers as
+        soon as they exit)""" + deprecated)
+    parser.add_argument(
+        '--verbose', '-v', action='count', default=0, dest='Verbose',
+        help="log more information" + deprecated)
+
+    return parser.parse_args(arguments)
+
+
+def setup_logging():
+    log_handler = logging.StreamHandler()
+    log_handler.setFormatter(logging.Formatter(
+        '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
+        '%Y-%m-%d %H:%M:%S'))
+    logger.addHandler(log_handler)
+
+
+def configure_logging(config):
+    logger.setLevel(logging.ERROR - (10 * config['Verbose']))
+
+
+def run(config, docker_client):
+    start_time = int(time.time())
+    logger.debug("Loading Docker activity through present")
+    images = DockerImages.from_daemon(config['Quota'], docker_client)
+    use_recorder = DockerImageUseRecorder(
+        images, docker_client, docker_client.events(since=1, until=start_time))
+    use_recorder.run()
+    cleaner = DockerImageCleaner(
+        images, docker_client, docker_client.events(since=start_time),
+        remove_containers_onexit=config['RemoveStoppedContainers'] != 'never')
+    cleaner.check_stopped_containers(
+        remove=config['RemoveStoppedContainers'] == 'always')
+    logger.info("Checking image quota at startup")
+    cleaner.clean_images()
+    logger.info("Listening for docker events")
+    cleaner.run()
+
+
+def main(arguments=sys.argv[1:]):
+    setup_logging()
+    config = load_config(arguments)
+    configure_logging(config)
+    try:
+        run(config, docker.Client(version='1.14'))
+    except KeyboardInterrupt:
+        sys.exit(1)
+
+if __name__ == '__main__':
+    main()
diff --git a/services/dockercleaner/arvados_version.py b/services/dockercleaner/arvados_version.py
new file mode 100644 (file)
index 0000000..2e6484c
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', '.']).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except subprocess.CalledProcessError:
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/services/dockercleaner/bin/arvados-docker-cleaner b/services/dockercleaner/bin/arvados-docker-cleaner
new file mode 100755 (executable)
index 0000000..c00593f
--- /dev/null
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+from arvados_docker.cleaner import main
+main()
diff --git a/services/dockercleaner/gittaggers.py b/services/dockercleaner/gittaggers.py
new file mode 120000 (symlink)
index 0000000..a9ad861
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/gittaggers.py
\ No newline at end of file
diff --git a/services/dockercleaner/setup.py b/services/dockercleaner/setup.py
new file mode 100644 (file)
index 0000000..9d8505e
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import
+import os
+import sys
+import re
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "arvados_docker")
+
+short_tests_only = False
+if '--short-tests-only' in sys.argv:
+    short_tests_only = True
+    sys.argv.remove('--short-tests-only')
+
+setup(name="arvados-docker-cleaner",
+      version=version,
+      description="Arvados Docker cleaner",
+      author="Arvados",
+      author_email="info@arvados.org",
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license="GNU Affero General Public License version 3.0",
+      packages=find_packages(),
+      entry_points={
+          'console_scripts': ['arvados-docker-cleaner=arvados_docker.cleaner:main'],
+      },
+      data_files=[
+          ('share/doc/arvados-docker-cleaner', ['agpl-3.0.txt', 'arvados-docker-cleaner.service']),
+      ],
+      install_requires=[
+          'docker-py==1.7.2',
+          'setuptools',
+      ],
+      tests_require=[
+          'pbr<1.7.0',
+          'mock',
+      ],
+      test_suite='tests',
+      zip_safe=False
+)
diff --git a/services/dockercleaner/tests/__init__.py b/services/dockercleaner/tests/__init__.py
new file mode 100644 (file)
index 0000000..b86e38d
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import logging
+logging.getLogger('').setLevel(logging.CRITICAL)
diff --git a/services/dockercleaner/tests/test_cleaner.py b/services/dockercleaner/tests/test_cleaner.py
new file mode 100644 (file)
index 0000000..7580b01
--- /dev/null
@@ -0,0 +1,505 @@
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import collections
+import itertools
+import json
+import os
+import random
+import tempfile
+import time
+import unittest
+
+import docker
+import mock
+
+from arvados_docker import cleaner
+
+MAX_DOCKER_ID = (16 ** 64) - 1
+
+
+def MockDockerId():
+    return '{:064x}'.format(random.randint(0, MAX_DOCKER_ID))
+
+
+def MockContainer(image_hash):
+    return {'Id': MockDockerId(),
+            'Image': image_hash['Id']}
+
+
+def MockImage(*, size=0, vsize=None, tags=[]):
+    if vsize is None:
+        vsize = random.randint(100, 2000000)
+    return {'Id': MockDockerId(),
+            'ParentId': MockDockerId(),
+            'RepoTags': list(tags),
+            'Size': size,
+            'VirtualSize': vsize}
+
+
+class MockEvent(dict):
+    ENCODING = 'utf-8'
+    event_seq = itertools.count(1)
+
+    def __init__(self, status, docker_id=None, **event_data):
+        if docker_id is None:
+            docker_id = MockDockerId()
+        super().__init__(self, **event_data)
+        self['status'] = status
+        self['id'] = docker_id
+        self.setdefault('time', next(self.event_seq))
+
+    def encoded(self):
+        return json.dumps(self).encode(self.ENCODING)
+
+
+class MockException(docker.errors.APIError):
+
+    def __init__(self, status_code):
+        response = mock.Mock(name='response')
+        response.status_code = status_code
+        super().__init__("mock exception", response)
+
+
+class DockerImageTestCase(unittest.TestCase):
+
+    def test_used_at_sets_last_used(self):
+        image = cleaner.DockerImage(MockImage())
+        image.used_at(5)
+        self.assertEqual(5, image.last_used)
+
+    def test_used_at_moves_forward(self):
+        image = cleaner.DockerImage(MockImage())
+        image.used_at(6)
+        image.used_at(8)
+        self.assertEqual(8, image.last_used)
+
+    def test_used_at_does_not_go_backward(self):
+        image = cleaner.DockerImage(MockImage())
+        image.used_at(4)
+        image.used_at(2)
+        self.assertEqual(4, image.last_used)
+
+
+class DockerImagesTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_images = []
+
+    def setup_mock_images(self, *vsizes):
+        self.mock_images.extend(MockImage(vsize=vsize) for vsize in vsizes)
+
+    def setup_images(self, *vsizes, target_size=1000000):
+        self.setup_mock_images(*vsizes)
+        images = cleaner.DockerImages(target_size)
+        for image in self.mock_images:
+            images.add_image(image)
+        return images
+
+    def test_has_image(self):
+        images = self.setup_images(None)
+        self.assertTrue(images.has_image(self.mock_images[0]['Id']))
+        self.assertFalse(images.has_image(MockDockerId()))
+
+    def test_del_image(self):
+        images = self.setup_images(None)
+        images.del_image(self.mock_images[0]['Id'])
+        self.assertFalse(images.has_image(self.mock_images[0]['Id']))
+
+    def test_del_nonexistent_image(self):
+        images = self.setup_images(None)
+        images.del_image(MockDockerId())
+        self.assertTrue(images.has_image(self.mock_images[0]['Id']))
+
+    def test_one_image_always_kept(self):
+        # When crunch-job starts a job, it makes sure each compute node
+        # has the Docker image loaded, then it runs all the tasks with
+        # the assumption the image is on each node.  As long as that's
+        # true, the cleaner should avoid removing every installed image:
+        # crunch-job might be counting on the most recent one to be
+        # available, even if it's not currently in use.
+        images = self.setup_images(None, None, target_size=1)
+        for use_time, image in enumerate(self.mock_images, 1):
+            user = MockContainer(image)
+            images.add_user(user, use_time)
+            images.end_user(user['Id'])
+        self.assertEqual([self.mock_images[0]['Id']],
+                         list(images.should_delete()))
+
+    def test_images_under_target_not_deletable(self):
+        # The images are used in this order.  target_size is set so it
+        # could hold the largest image, but not after the most recently
+        # used image is kept; then we have to fall back to the previous one.
+        images = self.setup_images(20, 30, 40, 10, target_size=45)
+        for use_time, image in enumerate(self.mock_images, 1):
+            user = MockContainer(image)
+            images.add_user(user, use_time)
+            images.end_user(user['Id'])
+        self.assertEqual([self.mock_images[ii]['Id'] for ii in [0, 2]],
+                         list(images.should_delete()))
+
+    def test_images_in_use_not_deletable(self):
+        images = self.setup_images(None, None, target_size=1)
+        users = [MockContainer(image) for image in self.mock_images]
+        images.add_user(users[0], 1)
+        images.add_user(users[1], 2)
+        images.end_user(users[1]['Id'])
+        self.assertEqual([self.mock_images[1]['Id']],
+                         list(images.should_delete()))
+
+    def test_image_deletable_after_unused(self):
+        images = self.setup_images(None, None, target_size=1)
+        users = [MockContainer(image) for image in self.mock_images]
+        images.add_user(users[0], 1)
+        images.add_user(users[1], 2)
+        images.end_user(users[0]['Id'])
+        self.assertEqual([self.mock_images[0]['Id']],
+                         list(images.should_delete()))
+
+    def test_image_not_deletable_if_user_restarts(self):
+        images = self.setup_images(None, target_size=1)
+        user = MockContainer(self.mock_images[-1])
+        images.add_user(user, 1)
+        images.end_user(user['Id'])
+        images.add_user(user, 2)
+        self.assertEqual([], list(images.should_delete()))
+
+    def test_image_not_deletable_if_any_user_remains(self):
+        images = self.setup_images(None, target_size=1)
+        users = [MockContainer(self.mock_images[0]) for ii in range(2)]
+        images.add_user(users[0], 1)
+        images.add_user(users[1], 2)
+        images.end_user(users[0]['Id'])
+        self.assertEqual([], list(images.should_delete()))
+
+    def test_image_deletable_after_all_users_end(self):
+        images = self.setup_images(None, None, target_size=1)
+        users = [MockContainer(self.mock_images[ii]) for ii in [0, 1, 1]]
+        images.add_user(users[0], 1)
+        images.add_user(users[1], 2)
+        images.add_user(users[2], 3)
+        images.end_user(users[1]['Id'])
+        images.end_user(users[2]['Id'])
+        self.assertEqual([self.mock_images[-1]['Id']],
+                         list(images.should_delete()))
+
+    def test_images_suggested_for_deletion_by_lru(self):
+        images = self.setup_images(10, 10, 10, target_size=1)
+        users = [MockContainer(image) for image in self.mock_images]
+        images.add_user(users[0], 3)
+        images.add_user(users[1], 1)
+        images.add_user(users[2], 2)
+        for user in users:
+            images.end_user(user['Id'])
+        self.assertEqual([self.mock_images[ii]['Id'] for ii in [1, 2]],
+                         list(images.should_delete()))
+
+    def test_adding_user_without_image_does_not_implicitly_add_image(self):
+        images = self.setup_images(10)
+        images.add_user(MockContainer(MockImage()), 1)
+        self.assertEqual([], list(images.should_delete()))
+
+    def test_nonexistent_user_removed(self):
+        images = self.setup_images()
+        images.end_user('nonexistent')
+        # No exception should be raised.
+
+    def test_del_image_effective_with_users_present(self):
+        images = self.setup_images(None, target_size=1)
+        user = MockContainer(self.mock_images[0])
+        images.add_user(user, 1)
+        images.del_image(self.mock_images[0]['Id'])
+        images.end_user(user['Id'])
+        self.assertEqual([], list(images.should_delete()))
+
+    def setup_from_daemon(self, *vsizes, target_size=1500000):
+        self.setup_mock_images(*vsizes)
+        docker_client = mock.MagicMock(name='docker_client')
+        docker_client.images.return_value = iter(self.mock_images)
+        return cleaner.DockerImages.from_daemon(target_size, docker_client)
+
+    def test_images_loaded_from_daemon(self):
+        images = self.setup_from_daemon(None, None)
+        for image in self.mock_images:
+            self.assertTrue(images.has_image(image['Id']))
+
+    def test_target_size_set_from_daemon(self):
+        images = self.setup_from_daemon(20, 10, 5, target_size=15)
+        user = MockContainer(self.mock_images[-1])
+        images.add_user(user, 1)
+        self.assertEqual([self.mock_images[0]['Id']],
+                         list(images.should_delete()))
+
+
+class DockerImageUseRecorderTestCase(unittest.TestCase):
+    TEST_CLASS = cleaner.DockerImageUseRecorder
+    TEST_CLASS_INIT_KWARGS = {}
+
+    def setUp(self):
+        self.images = mock.MagicMock(name='images')
+        self.docker_client = mock.MagicMock(name='docker_client')
+        self.events = []
+        self.recorder = self.TEST_CLASS(self.images, self.docker_client,
+                                        self.encoded_events, **self.TEST_CLASS_INIT_KWARGS)
+
+    @property
+    def encoded_events(self):
+        return (event.encoded() for event in self.events)
+
+    def test_unknown_events_ignored(self):
+        self.events.append(MockEvent('mock!event'))
+        self.recorder.run()
+        # No exception should be raised.
+
+    def test_fetches_container_on_create(self):
+        self.events.append(MockEvent('create'))
+        self.recorder.run()
+        self.docker_client.inspect_container.assert_called_with(
+            self.events[0]['id'])
+
+    def test_adds_user_on_container_create(self):
+        self.events.append(MockEvent('create'))
+        self.recorder.run()
+        self.images.add_user.assert_called_with(
+            self.docker_client.inspect_container(), self.events[0]['time'])
+
+    def test_unknown_image_handling(self):
+        # The use recorder should not fetch any images.
+        self.events.append(MockEvent('create'))
+        self.recorder.run()
+        self.assertFalse(self.docker_client.inspect_image.called)
+
+    def test_unfetchable_containers_ignored(self):
+        self.events.append(MockEvent('create'))
+        self.docker_client.inspect_container.side_effect = MockException(404)
+        self.recorder.run()
+        self.assertFalse(self.images.add_user.called)
+
+    def test_ends_user_on_container_destroy(self):
+        self.events.append(MockEvent('destroy'))
+        self.recorder.run()
+        self.images.end_user.assert_called_with(self.events[0]['id'])
+
+
+class DockerImageCleanerTestCase(DockerImageUseRecorderTestCase):
+    TEST_CLASS = cleaner.DockerImageCleaner
+
+    def test_unknown_image_handling(self):
+        # The image cleaner should fetch and record new images.
+        self.images.has_image.return_value = False
+        self.events.append(MockEvent('create'))
+        self.recorder.run()
+        self.docker_client.inspect_image.assert_called_with(
+            self.docker_client.inspect_container()['Image'])
+        self.images.add_image.assert_called_with(
+            self.docker_client.inspect_image())
+
+    def test_unfetchable_images_ignored(self):
+        self.images.has_image.return_value = False
+        self.docker_client.inspect_image.side_effect = MockException(404)
+        self.events.append(MockEvent('create'))
+        self.recorder.run()
+        self.docker_client.inspect_image.assert_called_with(
+            self.docker_client.inspect_container()['Image'])
+        self.assertFalse(self.images.add_image.called)
+
+    def test_deletions_after_destroy(self):
+        delete_id = MockDockerId()
+        self.images.should_delete.return_value = [delete_id]
+        self.events.append(MockEvent('destroy'))
+        self.recorder.run()
+        self.docker_client.remove_image.assert_called_with(delete_id)
+        self.images.del_image.assert_called_with(delete_id)
+
+    def test_failed_deletion_handling(self):
+        delete_id = MockDockerId()
+        self.images.should_delete.return_value = [delete_id]
+        self.docker_client.remove_image.side_effect = MockException(500)
+        self.events.append(MockEvent('destroy'))
+        self.recorder.run()
+        self.docker_client.remove_image.assert_called_with(delete_id)
+        self.assertFalse(self.images.del_image.called)
+
+
+class DockerContainerCleanerTestCase(DockerImageUseRecorderTestCase):
+    TEST_CLASS = cleaner.DockerImageCleaner
+    TEST_CLASS_INIT_KWARGS = {'remove_containers_onexit': True}
+
+    def test_container_deletion_deletes_volumes(self):
+        cid = MockDockerId()
+        self.events.append(MockEvent('die', docker_id=cid))
+        self.recorder.run()
+        self.docker_client.remove_container.assert_called_with(cid, v=True)
+
+    @mock.patch('arvados_docker.cleaner.logger')
+    def test_failed_container_deletion_handling(self, mockLogger):
+        cid = MockDockerId()
+        self.docker_client.remove_container.side_effect = MockException(500)
+        self.events.append(MockEvent('die', docker_id=cid))
+        self.recorder.run()
+        self.docker_client.remove_container.assert_called_with(cid, v=True)
+        self.assertEqual("Failed to remove container %s: %s",
+                         mockLogger.warning.call_args[0][0])
+        self.assertEqual(cid,
+                         mockLogger.warning.call_args[0][1])
+
+
+class HumanSizeTestCase(unittest.TestCase):
+
+    def check(self, human_str, count, exp):
+        self.assertEqual(count * (1024 ** exp),
+                         cleaner.human_size(human_str))
+
+    def test_bytes(self):
+        self.check('1', 1, 0)
+        self.check('82', 82, 0)
+
+    def test_kibibytes(self):
+        self.check('2K', 2, 1)
+        self.check('3k', 3, 1)
+
+    def test_mebibytes(self):
+        self.check('4M', 4, 2)
+        self.check('5m', 5, 2)
+
+    def test_gibibytes(self):
+        self.check('6G', 6, 3)
+        self.check('7g', 7, 3)
+
+    def test_tebibytes(self):
+        self.check('8T', 8, 4)
+        self.check('9t', 9, 4)
+
+
+class RunTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.config = cleaner.default_config()
+        self.config['Quota'] = 1000000
+        self.docker_client = mock.MagicMock(name='docker_client')
+
+    def test_run(self):
+        test_start_time = int(time.time())
+        self.docker_client.events.return_value = []
+        cleaner.run(self.config, self.docker_client)
+        self.assertEqual(2, self.docker_client.events.call_count)
+        event_kwargs = [args[1] for args in
+                        self.docker_client.events.call_args_list]
+        self.assertIn('since', event_kwargs[0])
+        self.assertIn('until', event_kwargs[0])
+        self.assertLessEqual(test_start_time, event_kwargs[0]['until'])
+        self.assertIn('since', event_kwargs[1])
+        self.assertEqual(event_kwargs[0]['until'], event_kwargs[1]['since'])
+
+
+@mock.patch('docker.Client', name='docker_client')
+@mock.patch('arvados_docker.cleaner.run', name='cleaner_run')
+class MainTestCase(unittest.TestCase):
+
+    def test_client_api_version(self, run_mock, docker_client):
+        with tempfile.NamedTemporaryFile(mode='wt') as cf:
+            cf.write('{"Quota":"1000T"}')
+            cf.flush()
+            cleaner.main(['--config', cf.name])
+        self.assertEqual(1, docker_client.call_count)
+        # 1.14 is the first version that's well defined, going back to
+        # Docker 1.2, and still supported up to at least Docker 1.9.
+        # See
+        # <https://docs.docker.com/engine/reference/api/docker_remote_api/>.
+        self.assertEqual('1.14',
+                         docker_client.call_args[1].get('version'))
+        self.assertEqual(1, run_mock.call_count)
+        self.assertIs(run_mock.call_args[0][1], docker_client())
+
+
+class ConfigTestCase(unittest.TestCase):
+
+    def test_load_config(self):
+        with tempfile.NamedTemporaryFile(mode='wt') as cf:
+            cf.write(
+                '{"Quota":"1000T", "RemoveStoppedContainers":"always", "Verbose":2}')
+            cf.flush()
+            config = cleaner.load_config(['--config', cf.name])
+        self.assertEqual(1000 << 40, config['Quota'])
+        self.assertEqual("always", config['RemoveStoppedContainers'])
+        self.assertEqual(2, config['Verbose'])
+
+    def test_args_override_config(self):
+        with tempfile.NamedTemporaryFile(mode='wt') as cf:
+            cf.write(
+                '{"Quota":"1000T", "RemoveStoppedContainers":"always", "Verbose":2}')
+            cf.flush()
+            config = cleaner.load_config([
+                '--config', cf.name,
+                '--quota', '1G',
+                '--remove-stopped-containers', 'never',
+                '--verbose',
+            ])
+        self.assertEqual(1 << 30, config['Quota'])
+        self.assertEqual('never', config['RemoveStoppedContainers'])
+        self.assertEqual(1, config['Verbose'])
+
+    def test_args_no_config(self):
+        self.assertEqual(False, os.path.exists(cleaner.DEFAULT_CONFIG_FILE))
+        config = cleaner.load_config(['--quota', '1G'])
+        self.assertEqual(1 << 30, config['Quota'])
+
+
+class ContainerRemovalTestCase(unittest.TestCase):
+    LIFECYCLE = ['create', 'attach', 'start', 'resize', 'die', 'destroy']
+
+    def setUp(self):
+        self.config = cleaner.default_config()
+        self.docker_client = mock.MagicMock(name='docker_client')
+        self.existingCID = MockDockerId()
+        self.docker_client.containers.return_value = [{
+            'Id': self.existingCID,
+            'Status': 'Exited (0) 6 weeks ago',
+        }, {
+            # If docker_client.containers() returns non-exited
+            # containers for some reason, do not remove them.
+            'Id': MockDockerId(),
+            'Status': 'Running',
+        }]
+        self.newCID = MockDockerId()
+        self.docker_client.events.return_value = [
+            MockEvent(e, docker_id=self.newCID).encoded()
+            for e in self.LIFECYCLE]
+
+    def test_remove_onexit(self):
+        self.config['RemoveStoppedContainers'] = 'onexit'
+        cleaner.run(self.config, self.docker_client)
+        self.docker_client.remove_container.assert_called_once_with(
+            self.newCID, v=True)
+
+    def test_remove_always(self):
+        self.config['RemoveStoppedContainers'] = 'always'
+        cleaner.run(self.config, self.docker_client)
+        self.docker_client.remove_container.assert_any_call(
+            self.existingCID, v=True)
+        self.docker_client.remove_container.assert_any_call(
+            self.newCID, v=True)
+        self.assertEqual(2, self.docker_client.remove_container.call_count)
+
+    def test_remove_never(self):
+        self.config['RemoveStoppedContainers'] = 'never'
+        cleaner.run(self.config, self.docker_client)
+        self.assertEqual(0, self.docker_client.remove_container.call_count)
+
+    def test_container_exited_between_subscribe_events_and_check_existing(self):
+        self.config['RemoveStoppedContainers'] = 'always'
+        self.docker_client.events.return_value = [
+            MockEvent(e, docker_id=self.existingCID).encoded()
+            for e in ['die', 'destroy']]
+        cleaner.run(self.config, self.docker_client)
+        # Subscribed to events before getting the list of existing
+        # exited containers?
+        self.docker_client.assert_has_calls([
+            mock.call.events(since=mock.ANY),
+            mock.call.containers(filters={'status': 'exited'})])
+        # Asked to delete the container twice?
+        self.docker_client.remove_container.assert_has_calls(
+            [mock.call(self.existingCID, v=True)] * 2)
+        self.assertEqual(2, self.docker_client.remove_container.call_count)
diff --git a/services/fuse/.gitignore b/services/fuse/.gitignore
new file mode 120000 (symlink)
index 0000000..ed3b362
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/.gitignore
\ No newline at end of file
diff --git a/services/fuse/MANIFEST.in b/services/fuse/MANIFEST.in
new file mode 100644 (file)
index 0000000..7322a0a
--- /dev/null
@@ -0,0 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+include agpl-3.0.txt
+include README.rst
+include arvados_version.py
\ No newline at end of file
diff --git a/services/fuse/README.rst b/services/fuse/README.rst
new file mode 100644 (file)
index 0000000..d91ae05
--- /dev/null
@@ -0,0 +1,70 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: AGPL-3.0
+
+========================
+Arvados Keep FUSE Driver
+========================
+
+Overview
+--------
+
+This package provides a FUSE driver for Keep, the Arvados_ storage
+system.  It allows you to read data from your collections as if they
+were on the local filesystem.
+
+.. _Arvados: https://arvados.org/
+
+Installation
+------------
+
+Installing under your user account
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method lets you install the package without root access.
+However, other users on the same system won't be able to use it.
+
+1. Run ``pip install --user arvados_fuse``.
+
+2. In your shell configuration, make sure you add ``$HOME/.local/bin``
+   to your PATH environment variable.  For example, you could add the
+   command ``PATH=$PATH:$HOME/.local/bin`` to your ``.bashrc`` file.
+
+3. Reload your shell configuration.  For example, bash users could run
+   ``source ~/.bashrc``.
+
+Installing on Debian systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Add this Arvados repository to your sources list::
+
+     deb http://apt.arvados.org/ jessie main
+
+2. Update your package list.
+
+3. Install the ``python-arvados-fuse`` package.
+
+Configuration
+-------------
+
+This driver needs two pieces of information to connect to
+Arvados: the DNS name of the API server, and an API authorization
+token.  You can set these in environment variables, or the file
+``$HOME/.config/arvados/settings.conf``.  `The Arvados user
+documentation
+<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes
+how to find this information in the Arvados Workbench, and install it
+on your system.
+
+Testing and Development
+-----------------------
+
+Debian packages you need to build llfuse:
+
+$ apt-get install python-dev pkg-config libfuse-dev libattr1-dev
+
+This package is one part of the Arvados source package, and it has
+integration tests to check interoperability with other Arvados
+components.  Our `hacking guide
+<https://arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_
+describes how to set up a development environment and run tests.
diff --git a/services/fuse/agpl-3.0.txt b/services/fuse/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/services/fuse/arvados_fuse/__init__.py b/services/fuse/arvados_fuse/__init__.py
new file mode 100644 (file)
index 0000000..f1e49f5
--- /dev/null
@@ -0,0 +1,772 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+"""FUSE driver for Arvados Keep
+
+Architecture:
+
+There is one `Operations` object per mount point.  It is the entry point for all
+read and write requests from the llfuse module.
+
+The operations object owns an `Inodes` object.  The inodes object stores the
+mapping from numeric inode (used throughout the file system API to uniquely
+identify files) to the Python objects that implement files and directories.
+
+The `Inodes` object owns an `InodeCache` object.  The inode cache records the
+memory footprint of file system objects and when they are last used.  When the
+cache limit is exceeded, the least recently used objects are cleared.
+
+File system objects inherit from `fresh.FreshBase` which manages the object lifecycle.
+
+File objects inherit from `fusefile.File`.  Key methods are `readfrom` and `writeto`
+which implement actual reads and writes.
+
+Directory objects inherit from `fusedir.Directory`.  The directory object wraps
+a Python dict which stores the mapping from filenames to directory entries.
+Directory contents can be accessed through the Python operators such as `[]`
+and `in`.  These methods automatically check if the directory is fresh (up to
+date) or stale (needs update) and will call `update` if necessary before
+returing a result.
+
+The general FUSE operation flow is as follows:
+
+- The request handler is called with either an inode or file handle that is the
+  subject of the operation.
+
+- Look up the inode using the Inodes table or the file handle in the
+  filehandles table to get the file system object.
+
+- For methods that alter files or directories, check that the operation is
+  valid and permitted using _check_writable().
+
+- Call the relevant method on the file system object.
+
+- Return the result.
+
+The FUSE driver supports the Arvados event bus.  When an event is received for
+an object that is live in the inode cache, that object is immediately updated.
+
+"""
+
+import os
+import sys
+import llfuse
+import errno
+import stat
+import threading
+import arvados
+import pprint
+import arvados.events
+import re
+import apiclient
+import json
+import logging
+import time
+import _strptime
+import calendar
+import threading
+import itertools
+import ciso8601
+import collections
+import functools
+import arvados.keep
+
+import Queue
+
+# Default _notify_queue has a limit of 1000 items, but it really needs to be
+# unlimited to avoid deadlocks, see https://arvados.org/issues/3198#note-43 for
+# details.
+
+if hasattr(llfuse, 'capi'):
+    # llfuse < 0.42
+    llfuse.capi._notify_queue = Queue.Queue()
+else:
+    # llfuse >= 0.42
+    llfuse._notify_queue = Queue.Queue()
+
+LLFUSE_VERSION_0 = llfuse.__version__.startswith('0')
+
+from fusedir import sanitize_filename, Directory, CollectionDirectory, TmpCollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
+from fusefile import StringFile, FuseArvadosFile
+
+_logger = logging.getLogger('arvados.arvados_fuse')
+
+# Uncomment this to enable llfuse debug logging.
+# log_handler = logging.StreamHandler()
+# llogger = logging.getLogger('llfuse')
+# llogger.addHandler(log_handler)
+# llogger.setLevel(logging.DEBUG)
+
+class Handle(object):
+    """Connects a numeric file handle to a File or Directory object that has
+    been opened by the client."""
+
+    def __init__(self, fh, obj):
+        self.fh = fh
+        self.obj = obj
+        self.obj.inc_use()
+
+    def release(self):
+        self.obj.dec_use()
+
+    def flush(self):
+        pass
+
+
+class FileHandle(Handle):
+    """Connects a numeric file handle to a File  object that has
+    been opened by the client."""
+
+    def flush(self):
+        if self.obj.writable():
+            return self.obj.flush()
+
+
+class DirectoryHandle(Handle):
+    """Connects a numeric file handle to a Directory object that has
+    been opened by the client."""
+
+    def __init__(self, fh, dirobj, entries):
+        super(DirectoryHandle, self).__init__(fh, dirobj)
+        self.entries = entries
+
+
+class InodeCache(object):
+    """Records the memory footprint of objects and when they are last used.
+
+    When the cache limit is exceeded, the least recently used objects are
+    cleared.  Clearing the object means discarding its contents to release
+    memory.  The next time the object is accessed, it must be re-fetched from
+    the server.  Note that the inode cache limit is a soft limit; the cache
+    limit may be exceeded if necessary to load very large objects, it may also
+    be exceeded if open file handles prevent objects from being cleared.
+
+    """
+
+    def __init__(self, cap, min_entries=4):
+        self._entries = collections.OrderedDict()
+        self._by_uuid = {}
+        self.cap = cap
+        self._total = 0
+        self.min_entries = min_entries
+
+    def total(self):
+        return self._total
+
+    def _remove(self, obj, clear):
+        if clear:
+            # Kernel behavior seems to be that if a file is
+            # referenced, its parents remain referenced too. This
+            # means has_ref() exits early when a collection is not
+            # candidate for eviction.
+            #
+            # By contrast, in_use() doesn't increment references on
+            # parents, so it requires a full tree walk to determine if
+            # a collection is a candidate for eviction.  This takes
+            # .07s for 240000 files, which becomes a major drag when
+            # cap_cache is being called several times a second and
+            # there are multiple non-evictable collections in the
+            # cache.
+            #
+            # So it is important for performance that we do the
+            # has_ref() check first.
+
+            if obj.has_ref(True):
+                _logger.debug("InodeCache cannot clear inode %i, still referenced", obj.inode)
+                return
+
+            if obj.in_use():
+                _logger.debug("InodeCache cannot clear inode %i, in use", obj.inode)
+                return
+
+            obj.kernel_invalidate()
+            _logger.debug("InodeCache sent kernel invalidate inode %i", obj.inode)
+            obj.clear()
+
+        # The llfuse lock is released in del_entry(), which is called by
+        # Directory.clear().  While the llfuse lock is released, it can happen
+        # that a reentrant call removes this entry before this call gets to it.
+        # Ensure that the entry is still valid before trying to remove it.
+        if obj.inode not in self._entries:
+            return
+
+        self._total -= obj.cache_size
+        del self._entries[obj.inode]
+        if obj.cache_uuid:
+            self._by_uuid[obj.cache_uuid].remove(obj)
+            if not self._by_uuid[obj.cache_uuid]:
+                del self._by_uuid[obj.cache_uuid]
+            obj.cache_uuid = None
+        if clear:
+            _logger.debug("InodeCache cleared inode %i total now %i", obj.inode, self._total)
+
+    def cap_cache(self):
+        if self._total > self.cap:
+            for ent in self._entries.values():
+                if self._total < self.cap or len(self._entries) < self.min_entries:
+                    break
+                self._remove(ent, True)
+
+    def manage(self, obj):
+        if obj.persisted():
+            obj.cache_size = obj.objsize()
+            self._entries[obj.inode] = obj
+            obj.cache_uuid = obj.uuid()
+            if obj.cache_uuid:
+                if obj.cache_uuid not in self._by_uuid:
+                    self._by_uuid[obj.cache_uuid] = [obj]
+                else:
+                    if obj not in self._by_uuid[obj.cache_uuid]:
+                        self._by_uuid[obj.cache_uuid].append(obj)
+            self._total += obj.objsize()
+            _logger.debug("InodeCache touched inode %i (size %i) (uuid %s) total now %i (%i entries)",
+                          obj.inode, obj.objsize(), obj.cache_uuid, self._total, len(self._entries))
+            self.cap_cache()
+
+    def touch(self, obj):
+        if obj.persisted():
+            if obj.inode in self._entries:
+                self._remove(obj, False)
+            self.manage(obj)
+
+    def unmanage(self, obj):
+        if obj.persisted() and obj.inode in self._entries:
+            self._remove(obj, True)
+
+    def find_by_uuid(self, uuid):
+        return self._by_uuid.get(uuid, [])
+
+    def clear(self):
+        self._entries.clear()
+        self._by_uuid.clear()
+        self._total = 0
+
+class Inodes(object):
+    """Manage the set of inodes.  This is the mapping from a numeric id
+    to a concrete File or Directory object"""
+
+    def __init__(self, inode_cache, encoding="utf-8"):
+        self._entries = {}
+        self._counter = itertools.count(llfuse.ROOT_INODE)
+        self.inode_cache = inode_cache
+        self.encoding = encoding
+        self.deferred_invalidations = []
+
+    def __getitem__(self, item):
+        return self._entries[item]
+
+    def __setitem__(self, key, item):
+        self._entries[key] = item
+
+    def __iter__(self):
+        return self._entries.iterkeys()
+
+    def items(self):
+        return self._entries.items()
+
+    def __contains__(self, k):
+        return k in self._entries
+
+    def touch(self, entry):
+        entry._atime = time.time()
+        self.inode_cache.touch(entry)
+
+    def add_entry(self, entry):
+        entry.inode = next(self._counter)
+        if entry.inode == llfuse.ROOT_INODE:
+            entry.inc_ref()
+        self._entries[entry.inode] = entry
+        self.inode_cache.manage(entry)
+        return entry
+
+    def del_entry(self, entry):
+        if entry.ref_count == 0:
+            self.inode_cache.unmanage(entry)
+            del self._entries[entry.inode]
+            with llfuse.lock_released:
+                entry.finalize()
+            entry.inode = None
+        else:
+            entry.dead = True
+            _logger.debug("del_entry on inode %i with refcount %i", entry.inode, entry.ref_count)
+
+    def invalidate_inode(self, entry):
+        if entry.has_ref(False):
+            # Only necessary if the kernel has previously done a lookup on this
+            # inode and hasn't yet forgotten about it.
+            llfuse.invalidate_inode(entry.inode)
+
+    def invalidate_entry(self, entry, name):
+        if entry.has_ref(False):
+            # Only necessary if the kernel has previously done a lookup on this
+            # inode and hasn't yet forgotten about it.
+            llfuse.invalidate_entry(entry.inode, name.encode(self.encoding))
+
+    def clear(self):
+        self.inode_cache.clear()
+
+        for k,v in self._entries.items():
+            try:
+                v.finalize()
+            except Exception as e:
+                _logger.exception("Error during finalize of inode %i", k)
+
+        self._entries.clear()
+
+
+def catch_exceptions(orig_func):
+    """Catch uncaught exceptions and log them consistently."""
+
+    @functools.wraps(orig_func)
+    def catch_exceptions_wrapper(self, *args, **kwargs):
+        try:
+            return orig_func(self, *args, **kwargs)
+        except llfuse.FUSEError:
+            raise
+        except EnvironmentError as e:
+            raise llfuse.FUSEError(e.errno)
+        except arvados.errors.KeepWriteError as e:
+            _logger.error("Keep write error: " + str(e))
+            raise llfuse.FUSEError(errno.EIO)
+        except arvados.errors.NotFoundError as e:
+            _logger.error("Block not found error: " + str(e))
+            raise llfuse.FUSEError(errno.EIO)
+        except:
+            _logger.exception("Unhandled exception during FUSE operation")
+            raise llfuse.FUSEError(errno.EIO)
+
+    return catch_exceptions_wrapper
+
+
+class Operations(llfuse.Operations):
+    """This is the main interface with llfuse.
+
+    The methods on this object are called by llfuse threads to service FUSE
+    events to query and read from the file system.
+
+    llfuse has its own global lock which is acquired before calling a request handler,
+    so request handlers do not run concurrently unless the lock is explicitly released
+    using 'with llfuse.lock_released:'
+
+    """
+
+    def __init__(self, uid, gid, api_client, encoding="utf-8", inode_cache=None, num_retries=4, enable_write=False):
+        super(Operations, self).__init__()
+
+        self._api_client = api_client
+
+        if not inode_cache:
+            inode_cache = InodeCache(cap=256*1024*1024)
+        self.inodes = Inodes(inode_cache, encoding=encoding)
+        self.uid = uid
+        self.gid = gid
+        self.enable_write = enable_write
+
+        # dict of inode to filehandle
+        self._filehandles = {}
+        self._filehandles_counter = itertools.count(0)
+
+        # Other threads that need to wait until the fuse driver
+        # is fully initialized should wait() on this event object.
+        self.initlock = threading.Event()
+
+        # If we get overlapping shutdown events (e.g., fusermount -u
+        # -z and operations.destroy()) llfuse calls forget() on inodes
+        # that have already been deleted. To avoid this, we make
+        # forget() a no-op if called after destroy().
+        self._shutdown_started = threading.Event()
+
+        self.num_retries = num_retries
+
+        self.read_counter = arvados.keep.Counter()
+        self.write_counter = arvados.keep.Counter()
+        self.read_ops_counter = arvados.keep.Counter()
+        self.write_ops_counter = arvados.keep.Counter()
+
+        self.events = None
+
+    def init(self):
+        # Allow threads that are waiting for the driver to be finished
+        # initializing to continue
+        self.initlock.set()
+
+    @catch_exceptions
+    def destroy(self):
+        self._shutdown_started.set()
+        if self.events:
+            self.events.close()
+            self.events = None
+
+        # Different versions of llfuse require and forbid us to
+        # acquire the lock here. See #8345#note-37, #10805#note-9.
+        if LLFUSE_VERSION_0 and llfuse.lock.acquire():
+            # llfuse < 0.42
+            self.inodes.clear()
+            llfuse.lock.release()
+        else:
+            # llfuse >= 0.42
+            self.inodes.clear()
+
+    def access(self, inode, mode, ctx):
+        return True
+
+    def listen_for_events(self):
+        self.events = arvados.events.subscribe(
+            self._api_client,
+            [["event_type", "in", ["create", "update", "delete"]]],
+            self.on_event)
+
+    @catch_exceptions
+    def on_event(self, ev):
+        if 'event_type' not in ev or ev["event_type"] not in ("create", "update", "delete"):
+            return
+        with llfuse.lock:
+            properties = ev.get("properties") or {}
+            old_attrs = properties.get("old_attributes") or {}
+            new_attrs = properties.get("new_attributes") or {}
+
+            for item in self.inodes.inode_cache.find_by_uuid(ev["object_uuid"]):
+                item.invalidate()
+                if ev.get("object_kind") == "arvados#collection":
+                    pdh = new_attrs.get("portable_data_hash")
+                    # new_attributes.modified_at currently lacks
+                    # subsecond precision (see #6347) so use event_at
+                    # which should always be the same.
+                    stamp = ev.get("event_at")
+                    if (stamp and pdh and item.writable() and
+                        item.collection is not None and
+                        item.collection.modified() and
+                        new_attrs.get("is_trashed") is not True):
+                        item.update(to_record_version=(stamp, pdh))
+
+            oldowner = old_attrs.get("owner_uuid")
+            newowner = ev.get("object_owner_uuid")
+            for parent in (
+                    self.inodes.inode_cache.find_by_uuid(oldowner) +
+                    self.inodes.inode_cache.find_by_uuid(newowner)):
+                parent.child_event(ev)
+
+    @catch_exceptions
+    def getattr(self, inode, ctx=None):
+        if inode not in self.inodes:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        e = self.inodes[inode]
+
+        entry = llfuse.EntryAttributes()
+        entry.st_ino = inode
+        entry.generation = 0
+        entry.entry_timeout = 0
+        entry.attr_timeout = e.time_to_next_poll() if e.allow_attr_cache else 0
+
+        entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+        if isinstance(e, Directory):
+            entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
+        else:
+            entry.st_mode |= stat.S_IFREG
+            if isinstance(e, FuseArvadosFile):
+                entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+
+        if self.enable_write and e.writable():
+            entry.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+
+        entry.st_nlink = 1
+        entry.st_uid = self.uid
+        entry.st_gid = self.gid
+        entry.st_rdev = 0
+
+        entry.st_size = e.size()
+
+        entry.st_blksize = 512
+        entry.st_blocks = (entry.st_size/512)+1
+        if hasattr(entry, 'st_atime_ns'):
+            # llfuse >= 0.42
+            entry.st_atime_ns = int(e.atime() * 1000000000)
+            entry.st_mtime_ns = int(e.mtime() * 1000000000)
+            entry.st_ctime_ns = int(e.mtime() * 1000000000)
+        else:
+            # llfuse < 0.42
+            entry.st_atime = int(e.atime)
+            entry.st_mtime = int(e.mtime)
+            entry.st_ctime = int(e.mtime)
+
+        return entry
+
+    @catch_exceptions
+    def setattr(self, inode, attr, fields=None, fh=None, ctx=None):
+        entry = self.getattr(inode)
+
+        if fh is not None and fh in self._filehandles:
+            handle = self._filehandles[fh]
+            e = handle.obj
+        else:
+            e = self.inodes[inode]
+
+        if fields is None:
+            # llfuse < 0.42
+            update_size = attr.st_size is not None
+        else:
+            # llfuse >= 0.42
+            update_size = fields.update_size
+        if update_size and isinstance(e, FuseArvadosFile):
+            with llfuse.lock_released:
+                e.arvfile.truncate(attr.st_size)
+                entry.st_size = e.arvfile.size()
+
+        return entry
+
+    @catch_exceptions
+    def lookup(self, parent_inode, name, ctx=None):
+        name = unicode(name, self.inodes.encoding)
+        inode = None
+
+        if name == '.':
+            inode = parent_inode
+        else:
+            if parent_inode in self.inodes:
+                p = self.inodes[parent_inode]
+                self.inodes.touch(p)
+                if name == '..':
+                    inode = p.parent_inode
+                elif isinstance(p, Directory) and name in p:
+                    inode = p[name].inode
+
+        if inode != None:
+            _logger.debug("arv-mount lookup: parent_inode %i name '%s' inode %i",
+                      parent_inode, name, inode)
+            self.inodes[inode].inc_ref()
+            return self.getattr(inode)
+        else:
+            _logger.debug("arv-mount lookup: parent_inode %i name '%s' not found",
+                      parent_inode, name)
+            raise llfuse.FUSEError(errno.ENOENT)
+
+    @catch_exceptions
+    def forget(self, inodes):
+        if self._shutdown_started.is_set():
+            return
+        for inode, nlookup in inodes:
+            ent = self.inodes[inode]
+            _logger.debug("arv-mount forget: inode %i nlookup %i ref_count %i", inode, nlookup, ent.ref_count)
+            if ent.dec_ref(nlookup) == 0 and ent.dead:
+                self.inodes.del_entry(ent)
+
+    @catch_exceptions
+    def open(self, inode, flags, ctx=None):
+        if inode in self.inodes:
+            p = self.inodes[inode]
+        else:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        if isinstance(p, Directory):
+            raise llfuse.FUSEError(errno.EISDIR)
+
+        if ((flags & os.O_WRONLY) or (flags & os.O_RDWR)) and not p.writable():
+            raise llfuse.FUSEError(errno.EPERM)
+
+        fh = next(self._filehandles_counter)
+        self._filehandles[fh] = FileHandle(fh, p)
+        self.inodes.touch(p)
+
+        # Normally, we will have received an "update" event if the
+        # parent collection is stale here. However, even if the parent
+        # collection hasn't changed, the manifest might have been
+        # fetched so long ago that the signatures on the data block
+        # locators have expired. Calling checkupdate() on all
+        # ancestors ensures the signatures will be refreshed if
+        # necessary.
+        while p.parent_inode in self.inodes:
+            if p == self.inodes[p.parent_inode]:
+                break
+            p = self.inodes[p.parent_inode]
+            self.inodes.touch(p)
+            p.checkupdate()
+
+        _logger.debug("arv-mount open inode %i flags %x fh %i", inode, flags, fh)
+
+        return fh
+
+    @catch_exceptions
+    def read(self, fh, off, size):
+        _logger.debug("arv-mount read fh %i off %i size %i", fh, off, size)
+        self.read_ops_counter.add(1)
+
+        if fh in self._filehandles:
+            handle = self._filehandles[fh]
+        else:
+            raise llfuse.FUSEError(errno.EBADF)
+
+        self.inodes.touch(handle.obj)
+
+        r = handle.obj.readfrom(off, size, self.num_retries)
+        if r:
+            self.read_counter.add(len(r))
+        return r
+
+    @catch_exceptions
+    def write(self, fh, off, buf):
+        _logger.debug("arv-mount write %i %i %i", fh, off, len(buf))
+        self.write_ops_counter.add(1)
+
+        if fh in self._filehandles:
+            handle = self._filehandles[fh]
+        else:
+            raise llfuse.FUSEError(errno.EBADF)
+
+        if not handle.obj.writable():
+            raise llfuse.FUSEError(errno.EPERM)
+
+        self.inodes.touch(handle.obj)
+
+        w = handle.obj.writeto(off, buf, self.num_retries)
+        if w:
+            self.write_counter.add(w)
+        return w
+
+    @catch_exceptions
+    def release(self, fh):
+        if fh in self._filehandles:
+            _logger.debug("arv-mount release fh %i", fh)
+            try:
+                self._filehandles[fh].flush()
+            except Exception:
+                raise
+            finally:
+                self._filehandles[fh].release()
+                del self._filehandles[fh]
+        self.inodes.inode_cache.cap_cache()
+
+    def releasedir(self, fh):
+        self.release(fh)
+
+    @catch_exceptions
+    def opendir(self, inode, ctx=None):
+        _logger.debug("arv-mount opendir: inode %i", inode)
+
+        if inode in self.inodes:
+            p = self.inodes[inode]
+        else:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        if not isinstance(p, Directory):
+            raise llfuse.FUSEError(errno.ENOTDIR)
+
+        fh = next(self._filehandles_counter)
+        if p.parent_inode in self.inodes:
+            parent = self.inodes[p.parent_inode]
+        else:
+            raise llfuse.FUSEError(errno.EIO)
+
+        # update atime
+        self.inodes.touch(p)
+
+        self._filehandles[fh] = DirectoryHandle(fh, p, [('.', p), ('..', parent)] + list(p.items()))
+        return fh
+
+    @catch_exceptions
+    def readdir(self, fh, off):
+        _logger.debug("arv-mount readdir: fh %i off %i", fh, off)
+
+        if fh in self._filehandles:
+            handle = self._filehandles[fh]
+        else:
+            raise llfuse.FUSEError(errno.EBADF)
+
+        e = off
+        while e < len(handle.entries):
+            if handle.entries[e][1].inode in self.inodes:
+                yield (handle.entries[e][0].encode(self.inodes.encoding), self.getattr(handle.entries[e][1].inode), e+1)
+            e += 1
+
+    @catch_exceptions
+    def statfs(self, ctx=None):
+        st = llfuse.StatvfsData()
+        st.f_bsize = 128 * 1024
+        st.f_blocks = 0
+        st.f_files = 0
+
+        st.f_bfree = 0
+        st.f_bavail = 0
+
+        st.f_ffree = 0
+        st.f_favail = 0
+
+        st.f_frsize = 0
+        return st
+
+    def _check_writable(self, inode_parent):
+        if not self.enable_write:
+            raise llfuse.FUSEError(errno.EROFS)
+
+        if inode_parent in self.inodes:
+            p = self.inodes[inode_parent]
+        else:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        if not isinstance(p, Directory):
+            raise llfuse.FUSEError(errno.ENOTDIR)
+
+        if not p.writable():
+            raise llfuse.FUSEError(errno.EPERM)
+
+        return p
+
+    @catch_exceptions
+    def create(self, inode_parent, name, mode, flags, ctx=None):
+        _logger.debug("arv-mount create: parent_inode %i '%s' %o", inode_parent, name, mode)
+
+        p = self._check_writable(inode_parent)
+        p.create(name)
+
+        # The file entry should have been implicitly created by callback.
+        f = p[name]
+        fh = next(self._filehandles_counter)
+        self._filehandles[fh] = FileHandle(fh, f)
+        self.inodes.touch(p)
+
+        f.inc_ref()
+        return (fh, self.getattr(f.inode))
+
+    @catch_exceptions
+    def mkdir(self, inode_parent, name, mode, ctx=None):
+        _logger.debug("arv-mount mkdir: parent_inode %i '%s' %o", inode_parent, name, mode)
+
+        p = self._check_writable(inode_parent)
+        p.mkdir(name)
+
+        # The dir entry should have been implicitly created by callback.
+        d = p[name]
+
+        d.inc_ref()
+        return self.getattr(d.inode)
+
+    @catch_exceptions
+    def unlink(self, inode_parent, name, ctx=None):
+        _logger.debug("arv-mount unlink: parent_inode %i '%s'", inode_parent, name)
+        p = self._check_writable(inode_parent)
+        p.unlink(name)
+
+    @catch_exceptions
+    def rmdir(self, inode_parent, name, ctx=None):
+        _logger.debug("arv-mount rmdir: parent_inode %i '%s'", inode_parent, name)
+        p = self._check_writable(inode_parent)
+        p.rmdir(name)
+
+    @catch_exceptions
+    def rename(self, inode_parent_old, name_old, inode_parent_new, name_new, ctx=None):
+        _logger.debug("arv-mount rename: old_parent_inode %i '%s' new_parent_inode %i '%s'", inode_parent_old, name_old, inode_parent_new, name_new)
+        src = self._check_writable(inode_parent_old)
+        dest = self._check_writable(inode_parent_new)
+        dest.rename(name_old, name_new, src)
+
+    @catch_exceptions
+    def flush(self, fh):
+        if fh in self._filehandles:
+            self._filehandles[fh].flush()
+
+    def fsync(self, fh, datasync):
+        self.flush(fh)
+
+    def fsyncdir(self, fh, datasync):
+        self.flush(fh)
diff --git a/services/fuse/arvados_fuse/command.py b/services/fuse/arvados_fuse/command.py
new file mode 100644 (file)
index 0000000..f174d1b
--- /dev/null
@@ -0,0 +1,387 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import argparse
+import arvados
+import daemon
+import llfuse
+import logging
+import os
+import resource
+import signal
+import subprocess
+import sys
+import time
+
+import arvados.commands._util as arv_cmd
+from arvados_fuse import crunchstat
+from arvados_fuse import *
+from arvados_fuse.unmount import unmount
+from arvados_fuse._version import __version__
+
+class ArgumentParser(argparse.ArgumentParser):
+    def __init__(self):
+        super(ArgumentParser, self).__init__(
+            parents=[arv_cmd.retry_opt],
+            description='''Mount Keep data under the local filesystem.  Default mode is --home''',
+            epilog="""
+    Note: When using the --exec feature, you must either specify the
+    mountpoint before --exec, or mark the end of your --exec arguments
+    with "--".
+            """)
+        self.add_argument('--version', action='version',
+                          version="%s %s" % (sys.argv[0], __version__),
+                          help='Print version and exit.')
+        self.add_argument('mountpoint', type=str, help="""Mount point.""")
+        self.add_argument('--allow-other', action='store_true',
+                            help="""Let other users read the mount""")
+        self.add_argument('--subtype', type=str, metavar='STRING',
+                            help="""Report mounted filesystem type as "fuse.STRING", instead of just "fuse".""")
+
+        mode = self.add_mutually_exclusive_group()
+
+        mode.add_argument('--all', action='store_const', const='all', dest='mode',
+                                help="""Mount a subdirectory for each mode: home, shared, by_tag, by_id (default if no --mount-* arguments are given).""")
+        mode.add_argument('--custom', action='store_const', const=None, dest='mode',
+                                help="""Mount a top level meta-directory with subdirectories as specified by additional --mount-* arguments (default if any --mount-* arguments are given).""")
+        mode.add_argument('--home', action='store_const', const='home', dest='mode',
+                                help="""Mount only the user's home project.""")
+        mode.add_argument('--shared', action='store_const', const='shared', dest='mode',
+                                help="""Mount only list of projects shared with the user.""")
+        mode.add_argument('--by-tag', action='store_const', const='by_tag', dest='mode',
+                                help="""Mount subdirectories listed by tag.""")
+        mode.add_argument('--by-id', action='store_const', const='by_id', dest='mode',
+                                help="""Mount subdirectories listed by portable data hash or uuid.""")
+        mode.add_argument('--by-pdh', action='store_const', const='by_pdh', dest='mode',
+                                help="""Mount subdirectories listed by portable data hash.""")
+        mode.add_argument('--project', type=str, metavar='UUID',
+                                help="""Mount the specified project.""")
+        mode.add_argument('--collection', type=str, metavar='UUID_or_PDH',
+                                help="""Mount only the specified collection.""")
+
+        mounts = self.add_argument_group('Custom mount options')
+        mounts.add_argument('--mount-by-pdh',
+                            type=str, metavar='PATH', action='append', default=[],
+                            help="Mount each readable collection at mountpoint/PATH/P where P is the collection's portable data hash.")
+        mounts.add_argument('--mount-by-id',
+                            type=str, metavar='PATH', action='append', default=[],
+                            help="Mount each readable collection at mountpoint/PATH/UUID and mountpoint/PATH/PDH where PDH is the collection's portable data hash and UUID is its UUID.")
+        mounts.add_argument('--mount-by-tag',
+                            type=str, metavar='PATH', action='append', default=[],
+                            help="Mount all collections with tag TAG at mountpoint/PATH/TAG/UUID.")
+        mounts.add_argument('--mount-home',
+                            type=str, metavar='PATH', action='append', default=[],
+                            help="Mount the current user's home project at mountpoint/PATH.")
+        mounts.add_argument('--mount-shared',
+                            type=str, metavar='PATH', action='append', default=[],
+                            help="Mount projects shared with the current user at mountpoint/PATH.")
+        mounts.add_argument('--mount-tmp',
+                            type=str, metavar='PATH', action='append', default=[],
+                            help="Create a new collection, mount it in read/write mode at mountpoint/PATH, and delete it when unmounting.")
+
+        self.add_argument('--debug', action='store_true', help="""Debug mode""")
+        self.add_argument('--logfile', help="""Write debug logs and errors to the specified file (default stderr).""")
+        self.add_argument('--foreground', action='store_true', help="""Run in foreground (default is to daemonize unless --exec specified)""", default=False)
+        self.add_argument('--encoding', type=str, help="Character encoding to use for filesystem, default is utf-8 (see Python codec registry for list of available encodings)", default="utf-8")
+
+        self.add_argument('--file-cache', type=int, help="File data cache size, in bytes (default 256MiB)", default=256*1024*1024)
+        self.add_argument('--directory-cache', type=int, help="Directory data cache size, in bytes (default 128MiB)", default=128*1024*1024)
+
+        self.add_argument('--disable-event-listening', action='store_true', help="Don't subscribe to events on the API server", dest="disable_event_listening", default=False)
+
+        self.add_argument('--read-only', action='store_false', help="Mount will be read only (default)", dest="enable_write", default=False)
+        self.add_argument('--read-write', action='store_true', help="Mount will be read-write", dest="enable_write", default=False)
+
+        self.add_argument('--crunchstat-interval', type=float, help="Write stats to stderr every N seconds (default disabled)", default=0)
+
+        unmount = self.add_mutually_exclusive_group()
+        unmount.add_argument('--unmount', action='store_true', default=False,
+                             help="Forcefully unmount the specified mountpoint (if it's a fuse mount) and exit. If --subtype is given, unmount only if the mount has the specified subtype. WARNING: This command can affect any kind of fuse mount, not just arv-mount.")
+        unmount.add_argument('--unmount-all', action='store_true', default=False,
+                             help="Forcefully unmount every fuse mount at or below the specified path and exit. If --subtype is given, unmount only mounts that have the specified subtype. Exit non-zero if any other types of mounts are found at or below the given path. WARNING: This command can affect any kind of fuse mount, not just arv-mount.")
+        unmount.add_argument('--replace', action='store_true', default=False,
+                             help="If a fuse mount is already present at mountpoint, forcefully unmount it before mounting")
+        self.add_argument('--unmount-timeout',
+                          type=float, default=2.0,
+                          help="Time to wait for graceful shutdown after --exec program exits and filesystem is unmounted")
+
+        self.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
+                            dest="exec_args", metavar=('command', 'args', '...', '--'),
+                            help="""Mount, run a command, then unmount and exit""")
+
+
+class Mount(object):
+    def __init__(self, args, logger=logging.getLogger('arvados.arv-mount')):
+        self.daemon = False
+        self.logger = logger
+        self.args = args
+        self.listen_for_events = False
+
+        self.args.mountpoint = os.path.realpath(self.args.mountpoint)
+        if self.args.logfile:
+            self.args.logfile = os.path.realpath(self.args.logfile)
+
+        try:
+            self._setup_logging()
+            self._setup_api()
+            self._setup_mount()
+        except Exception as e:
+            self.logger.exception("arv-mount: exception during setup: %s", e)
+            exit(1)
+
+    def __enter__(self):
+        if self.args.replace:
+            unmount(path=self.args.mountpoint,
+                    timeout=self.args.unmount_timeout)
+        llfuse.init(self.operations, self.args.mountpoint, self._fuse_options())
+        if self.daemon:
+            daemon.DaemonContext(
+                working_directory=os.path.dirname(self.args.mountpoint),
+                files_preserve=range(
+                    3, resource.getrlimit(resource.RLIMIT_NOFILE)[1])
+            ).open()
+        if self.listen_for_events and not self.args.disable_event_listening:
+            self.operations.listen_for_events()
+        self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
+        self.llfuse_thread.daemon = True
+        self.llfuse_thread.start()
+        self.operations.initlock.wait()
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if self.operations.events:
+            self.operations.events.close(timeout=self.args.unmount_timeout)
+        subprocess.call(["fusermount", "-u", "-z", self.args.mountpoint])
+        self.llfuse_thread.join(timeout=self.args.unmount_timeout)
+        if self.llfuse_thread.is_alive():
+            self.logger.warning("Mount.__exit__:"
+                                " llfuse thread still alive %fs after umount"
+                                " -- abandoning and exiting anyway",
+                                self.args.unmount_timeout)
+
+    def run(self):
+        if self.args.unmount or self.args.unmount_all:
+            unmount(path=self.args.mountpoint,
+                    subtype=self.args.subtype,
+                    timeout=self.args.unmount_timeout,
+                    recursive=self.args.unmount_all)
+        elif self.args.exec_args:
+            self._run_exec()
+        else:
+            self._run_standalone()
+
+    def _fuse_options(self):
+        """FUSE mount options; see mount.fuse(8)"""
+        opts = [optname for optname in ['allow_other', 'debug']
+                if getattr(self.args, optname)]
+        # Increase default read/write size from 4KiB to 128KiB
+        opts += ["big_writes", "max_read=131072"]
+        if self.args.subtype:
+            opts += ["subtype="+self.args.subtype]
+        return opts
+
+    def _setup_logging(self):
+        # Configure a log handler based on command-line switches.
+        if self.args.logfile:
+            log_handler = logging.FileHandler(self.args.logfile)
+            log_handler.setFormatter(logging.Formatter(
+                '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
+                '%Y-%m-%d %H:%M:%S'))
+        else:
+            log_handler = None
+
+        if log_handler is not None:
+            arvados.logger.removeHandler(arvados.log_handler)
+            arvados.logger.addHandler(log_handler)
+
+        if self.args.debug:
+            arvados.logger.setLevel(logging.DEBUG)
+            logging.getLogger('arvados.keep').setLevel(logging.DEBUG)
+            logging.getLogger('arvados.api').setLevel(logging.DEBUG)
+            logging.getLogger('arvados.collection').setLevel(logging.DEBUG)
+            self.logger.debug("arv-mount debugging enabled")
+
+        self.logger.info("%s %s started", sys.argv[0], __version__)
+        self.logger.info("enable write is %s", self.args.enable_write)
+
+    def _setup_api(self):
+        try:
+            self.api = arvados.safeapi.ThreadSafeApiCache(
+                apiconfig=arvados.config.settings(),
+                keep_params={
+                    'block_cache': arvados.keep.KeepBlockCache(self.args.file_cache),
+                    'num_retries': self.args.retries,
+                })
+        except KeyError as e:
+            self.logger.error("Missing environment: %s", e)
+            exit(1)
+        # Do a sanity check that we have a working arvados host + token.
+        self.api.users().current().execute()
+
+    def _setup_mount(self):
+        self.operations = Operations(
+            os.getuid(),
+            os.getgid(),
+            api_client=self.api,
+            encoding=self.args.encoding,
+            inode_cache=InodeCache(cap=self.args.directory_cache),
+            enable_write=self.args.enable_write)
+
+        if self.args.crunchstat_interval:
+            statsthread = threading.Thread(
+                target=crunchstat.statlogger,
+                args=(self.args.crunchstat_interval,
+                      self.api.keep,
+                      self.operations))
+            statsthread.daemon = True
+            statsthread.start()
+
+        usr = self.api.users().current().execute(num_retries=self.args.retries)
+        now = time.time()
+        dir_class = None
+        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries]
+        mount_readme = False
+
+        if self.args.collection is not None:
+            # Set up the request handler with the collection at the root
+            # First check that the collection is readable
+            self.api.collections().get(uuid=self.args.collection).execute()
+            self.args.mode = 'collection'
+            dir_class = CollectionDirectory
+            dir_args.append(self.args.collection)
+        elif self.args.project is not None:
+            self.args.mode = 'project'
+            dir_class = ProjectDirectory
+            dir_args.append(
+                self.api.groups().get(uuid=self.args.project).execute(
+                    num_retries=self.args.retries))
+
+        if (self.args.mount_by_id or
+            self.args.mount_by_pdh or
+            self.args.mount_by_tag or
+            self.args.mount_home or
+            self.args.mount_shared or
+            self.args.mount_tmp):
+            if self.args.mode is not None:
+                sys.exit(
+                    "Cannot combine '{}' mode with custom --mount-* options.".
+                    format(self.args.mode))
+        elif self.args.mode is None:
+            # If no --mount-custom or custom mount args, --all is the default
+            self.args.mode = 'all'
+
+        if self.args.mode in ['by_id', 'by_pdh']:
+            # Set up the request handler with the 'magic directory' at the root
+            dir_class = MagicDirectory
+            dir_args.append(self.args.mode == 'by_pdh')
+        elif self.args.mode == 'by_tag':
+            dir_class = TagsDirectory
+        elif self.args.mode == 'shared':
+            dir_class = SharedDirectory
+            dir_args.append(usr)
+        elif self.args.mode == 'home':
+            dir_class = ProjectDirectory
+            dir_args.append(usr)
+            dir_args.append(True)
+        elif self.args.mode == 'all':
+            self.args.mount_by_id = ['by_id']
+            self.args.mount_by_tag = ['by_tag']
+            self.args.mount_home = ['home']
+            self.args.mount_shared = ['shared']
+            mount_readme = True
+
+        if dir_class is not None:
+            ent = dir_class(*dir_args)
+            self.operations.inodes.add_entry(ent)
+            self.listen_for_events = ent.want_event_subscribe()
+            return
+
+        e = self.operations.inodes.add_entry(Directory(
+            llfuse.ROOT_INODE, self.operations.inodes))
+        dir_args[0] = e.inode
+
+        for name in self.args.mount_by_id:
+            self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=False))
+        for name in self.args.mount_by_pdh:
+            self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=True))
+        for name in self.args.mount_by_tag:
+            self._add_mount(e, name, TagsDirectory(*dir_args))
+        for name in self.args.mount_home:
+            self._add_mount(e, name, ProjectDirectory(*dir_args, project_object=usr, poll=True))
+        for name in self.args.mount_shared:
+            self._add_mount(e, name, SharedDirectory(*dir_args, exclude=usr, poll=True))
+        for name in self.args.mount_tmp:
+            self._add_mount(e, name, TmpCollectionDirectory(*dir_args))
+
+        if mount_readme:
+            text = self._readme_text(
+                arvados.config.get('ARVADOS_API_HOST'),
+                usr['email'])
+            self._add_mount(e, 'README', StringFile(e.inode, text, now))
+
+    def _add_mount(self, tld, name, ent):
+        if name in ['', '.', '..'] or '/' in name:
+            sys.exit("Mount point '{}' is not supported.".format(name))
+        tld._entries[name] = self.operations.inodes.add_entry(ent)
+        self.listen_for_events = (self.listen_for_events or ent.want_event_subscribe())
+
+    def _readme_text(self, api_host, user_email):
+        return '''
+Welcome to Arvados!  This directory provides file system access to
+files and objects available on the Arvados installation located at
+'{}' using credentials for user '{}'.
+
+From here, the following directories are available:
+
+  by_id/     Access to Keep collections by uuid or portable data hash (see by_id/README for details).
+  by_tag/    Access to Keep collections organized by tag.
+  home/      The contents of your home project.
+  shared/    Projects shared with you.
+
+'''.format(api_host, user_email)
+
+    def _run_exec(self):
+        rc = 255
+        with self:
+            try:
+                sp = subprocess.Popen(self.args.exec_args, shell=False)
+
+                # forward signals to the process.
+                signal.signal(signal.SIGINT, lambda signum, frame: sp.send_signal(signum))
+                signal.signal(signal.SIGTERM, lambda signum, frame: sp.send_signal(signum))
+                signal.signal(signal.SIGQUIT, lambda signum, frame: sp.send_signal(signum))
+
+                # wait for process to complete.
+                rc = sp.wait()
+
+                # restore default signal handlers.
+                signal.signal(signal.SIGINT, signal.SIG_DFL)
+                signal.signal(signal.SIGTERM, signal.SIG_DFL)
+                signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+            except Exception as e:
+                self.logger.exception(
+                    'arv-mount: exception during exec %s', self.args.exec_args)
+                try:
+                    rc = e.errno
+                except AttributeError:
+                    pass
+        exit(rc)
+
+    def _run_standalone(self):
+        try:
+            self.daemon = not self.args.foreground
+            with self:
+                self.llfuse_thread.join(timeout=None)
+        except Exception as e:
+            self.logger.exception('arv-mount: exception during mount: %s', e)
+            exit(getattr(e, 'errno', 1))
+        exit(0)
+
+    def _llfuse_main(self):
+        try:
+            llfuse.main()
+        except:
+            llfuse.close(unmount=False)
+            raise
+        llfuse.close()
diff --git a/services/fuse/arvados_fuse/crunchstat.py b/services/fuse/arvados_fuse/crunchstat.py
new file mode 100644 (file)
index 0000000..47d89d8
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import sys
+import time
+
+class Stat(object):
+    def __init__(self, prefix, interval,
+                 egr_name, ing_name,
+                 egr_func, ing_func):
+        self.prefix = prefix
+        self.interval = interval
+        self.egr_name = egr_name
+        self.ing_name = ing_name
+        self.egress = egr_func
+        self.ingress = ing_func
+        self.egr_prev = self.egress()
+        self.ing_prev = self.ingress()
+
+    def update(self):
+        egr = self.egress()
+        ing = self.ingress()
+
+        delta = " -- interval %.4f seconds %d %s %d %s" % (self.interval,
+                                                           egr - self.egr_prev,
+                                                           self.egr_name,
+                                                           ing - self.ing_prev,
+                                                           self.ing_name)
+
+        sys.stderr.write("crunchstat: %s %d %s %d %s%s\n" % (self.prefix,
+                                                             egr,
+                                                             self.egr_name,
+                                                             ing,
+                                                             self.ing_name,
+                                                             delta))
+
+        self.egr_prev = egr
+        self.ing_prev = ing
+
+
+def statlogger(interval, keep, ops):
+    calls = Stat("keepcalls", interval, "put", "get",
+                 keep.put_counter.get,
+                 keep.get_counter.get)
+    net = Stat("net:keep0", interval, "tx", "rx",
+               keep.upload_counter.get,
+               keep.download_counter.get)
+    cache = Stat("keepcache", interval, "hit", "miss",
+               keep.hits_counter.get,
+               keep.misses_counter.get)
+    fuseops = Stat("fuseops", interval,"write", "read",
+                   ops.write_ops_counter.get,
+                   ops.read_ops_counter.get)
+    blk = Stat("blkio:0:0", interval, "write", "read",
+               ops.write_counter.get,
+               ops.read_counter.get)
+
+    while True:
+        time.sleep(interval)
+        calls.update()
+        net.update()
+        cache.update()
+        fuseops.update()
+        blk.update()
+
+
diff --git a/services/fuse/arvados_fuse/fresh.py b/services/fuse/arvados_fuse/fresh.py
new file mode 100644 (file)
index 0000000..2e7a2a8
--- /dev/null
@@ -0,0 +1,159 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import time
+import ciso8601
+import calendar
+import functools
+
+def convertTime(t):
+    """Parse Arvados timestamp to unix time."""
+    if not t:
+        return 0
+    try:
+        return calendar.timegm(ciso8601.parse_datetime_as_naive(t).timetuple())
+    except (TypeError, ValueError):
+        return 0
+
+def use_counter(orig_func):
+    @functools.wraps(orig_func)
+    def use_counter_wrapper(self, *args, **kwargs):
+        try:
+            self.inc_use()
+            return orig_func(self, *args, **kwargs)
+        finally:
+            self.dec_use()
+    return use_counter_wrapper
+
+def check_update(orig_func):
+    @functools.wraps(orig_func)
+    def check_update_wrapper(self, *args, **kwargs):
+        self.checkupdate()
+        return orig_func(self, *args, **kwargs)
+    return check_update_wrapper
+
+class FreshBase(object):
+    """Base class for maintaining object lifecycle.
+
+    Functions include:
+
+    * Indicate if an object is up to date (stale() == false) or needs to be
+      updated sets stale() == True).  Use invalidate() to mark the object as
+      stale.  An object is also automatically stale if it has not been updated
+      in `_poll_time` seconds.
+
+    * Record access time (atime) timestamp
+
+    * Manage internal use count used by the inode cache ("inc_use" and
+      "dec_use").  An object which is in use cannot be cleared by the inode
+      cache.
+
+    * Manage the kernel reference count ("inc_ref" and "dec_ref").  An object
+      which is referenced by the kernel cannot have its inode entry deleted.
+
+    * Record cache footprint, cache priority
+
+    * Record Arvados uuid at the time the object is placed in the cache
+
+    * Clear the object contents (invalidates the object)
+
+    """
+
+    __slots__ = ("_stale", "_poll", "_last_update", "_atime", "_poll_time", "use_count",
+                 "ref_count", "dead", "cache_size", "cache_uuid", "allow_attr_cache")
+
+    def __init__(self):
+        self._stale = True
+        self._poll = False
+        self._last_update = time.time()
+        self._atime = time.time()
+        self._poll_time = 60
+        self.use_count = 0
+        self.ref_count = 0
+        self.dead = False
+        self.cache_size = 0
+        self.cache_uuid = None
+
+        # Can the kernel cache attributes?
+        self.allow_attr_cache = True
+
+    def invalidate(self):
+        """Indicate that object contents should be refreshed from source."""
+        self._stale = True
+
+    def kernel_invalidate(self):
+        """Indicate that an invalidation for this object should be sent to the kernel."""
+        pass
+
+    # Test if the entries dict is stale.
+    def stale(self):
+        if self._stale:
+            return True
+        if self._poll:
+            return (self._last_update + self._poll_time) < self._atime
+        return False
+
+    def fresh(self):
+        self._stale = False
+        self._last_update = time.time()
+
+    def atime(self):
+        return self._atime
+
+    def persisted(self):
+        return False
+
+    def clear(self):
+        pass
+
+    def in_use(self):
+        return self.use_count > 0
+
+    def inc_use(self):
+        self.use_count += 1
+
+    def dec_use(self):
+        self.use_count -= 1
+
+    def inc_ref(self):
+        self.ref_count += 1
+        return self.ref_count
+
+    def dec_ref(self, n):
+        self.ref_count -= n
+        return self.ref_count
+
+    def has_ref(self, only_children):
+        """Determine if there are any kernel references to this
+        object or its children.
+
+        If only_children is True, ignore refcount of self and only consider
+        children.
+        """
+        if only_children:
+            return False
+        else:
+            return self.ref_count > 0
+
+    def objsize(self):
+        return 0
+
+    def uuid(self):
+        return None
+
+    def finalize(self):
+        pass
+
+    def child_event(self, ev):
+        pass
+
+    def time_to_next_poll(self):
+        if self._poll:
+            t = (self._last_update + self._poll_time) - self._atime
+            if t < 0:
+                return 0
+            else:
+                return t
+        else:
+            return self._poll_time
diff --git a/services/fuse/arvados_fuse/fusedir.py b/services/fuse/arvados_fuse/fusedir.py
new file mode 100644 (file)
index 0000000..2d58012
--- /dev/null
@@ -0,0 +1,1129 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import logging
+import re
+import time
+import llfuse
+import arvados
+import apiclient
+import functools
+import threading
+from apiclient import errors as apiclient_errors
+import errno
+import time
+
+from fusefile import StringFile, ObjectFile, FuncToJSONFile, FuseArvadosFile
+from fresh import FreshBase, convertTime, use_counter, check_update
+
+import arvados.collection
+from arvados.util import portable_data_hash_pattern, uuid_pattern, collection_uuid_pattern, group_uuid_pattern, user_uuid_pattern, link_uuid_pattern
+
+_logger = logging.getLogger('arvados.arvados_fuse')
+
+
+# Match any character which FUSE or Linux cannot accommodate as part
+# of a filename. (If present in a collection filename, they will
+# appear as underscores in the fuse mount.)
+_disallowed_filename_characters = re.compile('[\x00/]')
+
+# '.' and '..' are not reachable if API server is newer than #6277
+def sanitize_filename(dirty):
+    """Replace disallowed filename characters with harmless "_"."""
+    if dirty is None:
+        return None
+    elif dirty == '':
+        return '_'
+    elif dirty == '.':
+        return '_'
+    elif dirty == '..':
+        return '__'
+    else:
+        return _disallowed_filename_characters.sub('_', dirty)
+
+
+class Directory(FreshBase):
+    """Generic directory object, backed by a dict.
+
+    Consists of a set of entries with the key representing the filename
+    and the value referencing a File or Directory object.
+    """
+
+    def __init__(self, parent_inode, inodes):
+        """parent_inode is the integer inode number"""
+
+        super(Directory, self).__init__()
+
+        self.inode = None
+        if not isinstance(parent_inode, int):
+            raise Exception("parent_inode should be an int")
+        self.parent_inode = parent_inode
+        self.inodes = inodes
+        self._entries = {}
+        self._mtime = time.time()
+
+    #  Overriden by subclasses to implement logic to update the entries dict
+    #  when the directory is stale
+    @use_counter
+    def update(self):
+        pass
+
+    # Only used when computing the size of the disk footprint of the directory
+    # (stub)
+    def size(self):
+        return 0
+
+    def persisted(self):
+        return False
+
+    def checkupdate(self):
+        if self.stale():
+            try:
+                self.update()
+            except apiclient.errors.HttpError as e:
+                _logger.warn(e)
+
+    @use_counter
+    @check_update
+    def __getitem__(self, item):
+        return self._entries[item]
+
+    @use_counter
+    @check_update
+    def items(self):
+        return list(self._entries.items())
+
+    @use_counter
+    @check_update
+    def __contains__(self, k):
+        return k in self._entries
+
+    @use_counter
+    @check_update
+    def __len__(self):
+        return len(self._entries)
+
+    def fresh(self):
+        self.inodes.touch(self)
+        super(Directory, self).fresh()
+
+    def merge(self, items, fn, same, new_entry):
+        """Helper method for updating the contents of the directory.
+
+        Takes a list describing the new contents of the directory, reuse
+        entries that are the same in both the old and new lists, create new
+        entries, and delete old entries missing from the new list.
+
+        :items: iterable with new directory contents
+
+        :fn: function to take an entry in 'items' and return the desired file or
+        directory name, or None if this entry should be skipped
+
+        :same: function to compare an existing entry (a File or Directory
+        object) with an entry in the items list to determine whether to keep
+        the existing entry.
+
+        :new_entry: function to create a new directory entry (File or Directory
+        object) from an entry in the items list.
+
+        """
+
+        oldentries = self._entries
+        self._entries = {}
+        changed = False
+        for i in items:
+            name = sanitize_filename(fn(i))
+            if name:
+                if name in oldentries and same(oldentries[name], i):
+                    # move existing directory entry over
+                    self._entries[name] = oldentries[name]
+                    del oldentries[name]
+                else:
+                    _logger.debug("Adding entry '%s' to inode %i", name, self.inode)
+                    # create new directory entry
+                    ent = new_entry(i)
+                    if ent is not None:
+                        self._entries[name] = self.inodes.add_entry(ent)
+                        changed = True
+
+        # delete any other directory entries that were not in found in 'items'
+        for i in oldentries:
+            _logger.debug("Forgetting about entry '%s' on inode %i", i, self.inode)
+            self.inodes.invalidate_entry(self, i)
+            self.inodes.del_entry(oldentries[i])
+            changed = True
+
+        if changed:
+            self.inodes.invalidate_inode(self)
+            self._mtime = time.time()
+
+        self.fresh()
+
+    def in_use(self):
+        if super(Directory, self).in_use():
+            return True
+        for v in self._entries.itervalues():
+            if v.in_use():
+                return True
+        return False
+
+    def has_ref(self, only_children):
+        if super(Directory, self).has_ref(only_children):
+            return True
+        for v in self._entries.itervalues():
+            if v.has_ref(False):
+                return True
+        return False
+
+    def clear(self):
+        """Delete all entries"""
+        oldentries = self._entries
+        self._entries = {}
+        for n in oldentries:
+            oldentries[n].clear()
+            self.inodes.del_entry(oldentries[n])
+        self.invalidate()
+
+    def kernel_invalidate(self):
+        # Invalidating the dentry on the parent implies invalidating all paths
+        # below it as well.
+        parent = self.inodes[self.parent_inode]
+
+        # Find self on the parent in order to invalidate this path.
+        # Calling the public items() method might trigger a refresh,
+        # which we definitely don't want, so read the internal dict directly.
+        for k,v in parent._entries.items():
+            if v is self:
+                self.inodes.invalidate_entry(parent, k)
+                break
+
+    def mtime(self):
+        return self._mtime
+
+    def writable(self):
+        return False
+
+    def flush(self):
+        pass
+
+    def want_event_subscribe(self):
+        raise NotImplementedError()
+
+    def create(self, name):
+        raise NotImplementedError()
+
+    def mkdir(self, name):
+        raise NotImplementedError()
+
+    def unlink(self, name):
+        raise NotImplementedError()
+
+    def rmdir(self, name):
+        raise NotImplementedError()
+
+    def rename(self, name_old, name_new, src):
+        raise NotImplementedError()
+
+
+class CollectionDirectoryBase(Directory):
+    """Represent an Arvados Collection as a directory.
+
+    This class is used for Subcollections, and is also the base class for
+    CollectionDirectory, which implements collection loading/saving on
+    Collection records.
+
+    Most operations act only the underlying Arvados `Collection` object.  The
+    `Collection` object signals via a notify callback to
+    `CollectionDirectoryBase.on_event` that an item was added, removed or
+    modified.  FUSE inodes and directory entries are created, deleted or
+    invalidated in response to these events.
+
+    """
+
+    def __init__(self, parent_inode, inodes, collection):
+        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes)
+        self.collection = collection
+
+    def new_entry(self, name, item, mtime):
+        name = sanitize_filename(name)
+        if hasattr(item, "fuse_entry") and item.fuse_entry is not None:
+            if item.fuse_entry.dead is not True:
+                raise Exception("Can only reparent dead inode entry")
+            if item.fuse_entry.inode is None:
+                raise Exception("Reparented entry must still have valid inode")
+            item.fuse_entry.dead = False
+            self._entries[name] = item.fuse_entry
+        elif isinstance(item, arvados.collection.RichCollectionBase):
+            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, item))
+            self._entries[name].populate(mtime)
+        else:
+            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime))
+        item.fuse_entry = self._entries[name]
+
+    def on_event(self, event, collection, name, item):
+        if collection == self.collection:
+            name = sanitize_filename(name)
+            _logger.debug("collection notify %s %s %s %s", event, collection, name, item)
+            with llfuse.lock:
+                if event == arvados.collection.ADD:
+                    self.new_entry(name, item, self.mtime())
+                elif event == arvados.collection.DEL:
+                    ent = self._entries[name]
+                    del self._entries[name]
+                    self.inodes.invalidate_entry(self, name)
+                    self.inodes.del_entry(ent)
+                elif event == arvados.collection.MOD:
+                    if hasattr(item, "fuse_entry") and item.fuse_entry is not None:
+                        self.inodes.invalidate_inode(item.fuse_entry)
+                    elif name in self._entries:
+                        self.inodes.invalidate_inode(self._entries[name])
+
+    def populate(self, mtime):
+        self._mtime = mtime
+        self.collection.subscribe(self.on_event)
+        for entry, item in self.collection.items():
+            self.new_entry(entry, item, self.mtime())
+
+    def writable(self):
+        return self.collection.writable()
+
+    @use_counter
+    def flush(self):
+        with llfuse.lock_released:
+            self.collection.root_collection().save()
+
+    @use_counter
+    @check_update
+    def create(self, name):
+        with llfuse.lock_released:
+            self.collection.open(name, "w").close()
+
+    @use_counter
+    @check_update
+    def mkdir(self, name):
+        with llfuse.lock_released:
+            self.collection.mkdirs(name)
+
+    @use_counter
+    @check_update
+    def unlink(self, name):
+        with llfuse.lock_released:
+            self.collection.remove(name)
+        self.flush()
+
+    @use_counter
+    @check_update
+    def rmdir(self, name):
+        with llfuse.lock_released:
+            self.collection.remove(name)
+        self.flush()
+
+    @use_counter
+    @check_update
+    def rename(self, name_old, name_new, src):
+        if not isinstance(src, CollectionDirectoryBase):
+            raise llfuse.FUSEError(errno.EPERM)
+
+        if name_new in self:
+            ent = src[name_old]
+            tgt = self[name_new]
+            if isinstance(ent, FuseArvadosFile) and isinstance(tgt, FuseArvadosFile):
+                pass
+            elif isinstance(ent, CollectionDirectoryBase) and isinstance(tgt, CollectionDirectoryBase):
+                if len(tgt) > 0:
+                    raise llfuse.FUSEError(errno.ENOTEMPTY)
+            elif isinstance(ent, CollectionDirectoryBase) and isinstance(tgt, FuseArvadosFile):
+                raise llfuse.FUSEError(errno.ENOTDIR)
+            elif isinstance(ent, FuseArvadosFile) and isinstance(tgt, CollectionDirectoryBase):
+                raise llfuse.FUSEError(errno.EISDIR)
+
+        with llfuse.lock_released:
+            self.collection.rename(name_old, name_new, source_collection=src.collection, overwrite=True)
+        self.flush()
+        src.flush()
+
+    def clear(self):
+        super(CollectionDirectoryBase, self).clear()
+        self.collection = None
+
+
+class CollectionDirectory(CollectionDirectoryBase):
+    """Represents the root of a directory tree representing a collection."""
+
+    def __init__(self, parent_inode, inodes, api, num_retries, collection_record=None, explicit_collection=None):
+        super(CollectionDirectory, self).__init__(parent_inode, inodes, None)
+        self.api = api
+        self.num_retries = num_retries
+        self.collection_record_file = None
+        self.collection_record = None
+        self._poll = True
+        try:
+            self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2)/2)
+        except:
+            _logger.debug("Error getting blobSignatureTtl from discovery document: %s", sys.exc_info()[0])
+            self._poll_time = 60*60
+
+        if isinstance(collection_record, dict):
+            self.collection_locator = collection_record['uuid']
+            self._mtime = convertTime(collection_record.get('modified_at'))
+        else:
+            self.collection_locator = collection_record
+            self._mtime = 0
+        self._manifest_size = 0
+        if self.collection_locator:
+            self._writable = (uuid_pattern.match(self.collection_locator) is not None)
+        self._updating_lock = threading.Lock()
+
+    def same(self, i):
+        return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
+
+    def writable(self):
+        return self.collection.writable() if self.collection is not None else self._writable
+
+    def want_event_subscribe(self):
+        return (uuid_pattern.match(self.collection_locator) is not None)
+
+    # Used by arv-web.py to switch the contents of the CollectionDirectory
+    def change_collection(self, new_locator):
+        """Switch the contents of the CollectionDirectory.
+
+        Must be called with llfuse.lock held.
+        """
+
+        self.collection_locator = new_locator
+        self.collection_record = None
+        self.update()
+
+    def new_collection(self, new_collection_record, coll_reader):
+        if self.inode:
+            self.clear()
+
+        self.collection_record = new_collection_record
+
+        if self.collection_record:
+            self._mtime = convertTime(self.collection_record.get('modified_at'))
+            self.collection_locator = self.collection_record["uuid"]
+            if self.collection_record_file is not None:
+                self.collection_record_file.update(self.collection_record)
+
+        self.collection = coll_reader
+        self.populate(self.mtime())
+
+    def uuid(self):
+        return self.collection_locator
+
+    @use_counter
+    def update(self, to_record_version=None):
+        try:
+            if self.collection_record is not None and portable_data_hash_pattern.match(self.collection_locator):
+                return True
+
+            if self.collection_locator is None:
+                self.fresh()
+                return True
+
+            try:
+                with llfuse.lock_released:
+                    self._updating_lock.acquire()
+                    if not self.stale():
+                        return
+
+                    _logger.debug("Updating collection %s inode %s to record version %s", self.collection_locator, self.inode, to_record_version)
+                    if self.collection is not None:
+                        if self.collection.known_past_version(to_record_version):
+                            _logger.debug("%s already processed %s", self.collection_locator, to_record_version)
+                        else:
+                            self.collection.update()
+                    else:
+                        if uuid_pattern.match(self.collection_locator):
+                            coll_reader = arvados.collection.Collection(
+                                self.collection_locator, self.api, self.api.keep,
+                                num_retries=self.num_retries)
+                        else:
+                            coll_reader = arvados.collection.CollectionReader(
+                                self.collection_locator, self.api, self.api.keep,
+                                num_retries=self.num_retries)
+                        new_collection_record = coll_reader.api_response() or {}
+                        # If the Collection only exists in Keep, there will be no API
+                        # response.  Fill in the fields we need.
+                        if 'uuid' not in new_collection_record:
+                            new_collection_record['uuid'] = self.collection_locator
+                        if "portable_data_hash" not in new_collection_record:
+                            new_collection_record["portable_data_hash"] = new_collection_record["uuid"]
+                        if 'manifest_text' not in new_collection_record:
+                            new_collection_record['manifest_text'] = coll_reader.manifest_text()
+
+                        if self.collection_record is None or self.collection_record["portable_data_hash"] != new_collection_record.get("portable_data_hash"):
+                            self.new_collection(new_collection_record, coll_reader)
+
+                        self._manifest_size = len(coll_reader.manifest_text())
+                        _logger.debug("%s manifest_size %i", self, self._manifest_size)
+                # end with llfuse.lock_released, re-acquire lock
+
+                self.fresh()
+                return True
+            finally:
+                self._updating_lock.release()
+        except arvados.errors.NotFoundError as e:
+            _logger.error("Error fetching collection '%s': %s", self.collection_locator, e)
+        except arvados.errors.ArgumentError as detail:
+            _logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
+            if self.collection_record is not None and "manifest_text" in self.collection_record:
+                _logger.warning("arv-mount manifest_text is: %s", self.collection_record["manifest_text"])
+        except Exception:
+            _logger.exception("arv-mount %s: error", self.collection_locator)
+            if self.collection_record is not None and "manifest_text" in self.collection_record:
+                _logger.error("arv-mount manifest_text is: %s", self.collection_record["manifest_text"])
+        self.invalidate()
+        return False
+
+    @use_counter
+    @check_update
+    def __getitem__(self, item):
+        if item == '.arvados#collection':
+            if self.collection_record_file is None:
+                self.collection_record_file = ObjectFile(self.inode, self.collection_record)
+                self.inodes.add_entry(self.collection_record_file)
+            return self.collection_record_file
+        else:
+            return super(CollectionDirectory, self).__getitem__(item)
+
+    def __contains__(self, k):
+        if k == '.arvados#collection':
+            return True
+        else:
+            return super(CollectionDirectory, self).__contains__(k)
+
+    def invalidate(self):
+        self.collection_record = None
+        self.collection_record_file = None
+        super(CollectionDirectory, self).invalidate()
+
+    def persisted(self):
+        return (self.collection_locator is not None)
+
+    def objsize(self):
+        # This is an empirically-derived heuristic to estimate the memory used
+        # to store this collection's metadata.  Calculating the memory
+        # footprint directly would be more accurate, but also more complicated.
+        return self._manifest_size * 128
+
+    def finalize(self):
+        if self.collection is not None:
+            if self.writable():
+                self.collection.save()
+            self.collection.stop_threads()
+
+    def clear(self):
+        if self.collection is not None:
+            self.collection.stop_threads()
+        super(CollectionDirectory, self).clear()
+        self._manifest_size = 0
+
+
+class TmpCollectionDirectory(CollectionDirectoryBase):
+    """A directory backed by an Arvados collection that never gets saved.
+
+    This supports using Keep as scratch space. A userspace program can
+    read the .arvados#collection file to get a current manifest in
+    order to save a snapshot of the scratch data or use it as a crunch
+    job output.
+    """
+
+    class UnsaveableCollection(arvados.collection.Collection):
+        def save(self):
+            pass
+        def save_new(self):
+            pass
+
+    def __init__(self, parent_inode, inodes, api_client, num_retries):
+        collection = self.UnsaveableCollection(
+            api_client=api_client,
+            keep_client=api_client.keep,
+            num_retries=num_retries)
+        super(TmpCollectionDirectory, self).__init__(
+            parent_inode, inodes, collection)
+        self.collection_record_file = None
+        self.populate(self.mtime())
+
+    def on_event(self, *args, **kwargs):
+        super(TmpCollectionDirectory, self).on_event(*args, **kwargs)
+        if self.collection_record_file:
+            with llfuse.lock:
+                self.collection_record_file.invalidate()
+            self.inodes.invalidate_inode(self.collection_record_file)
+            _logger.debug("%s invalidated collection record", self)
+
+    def collection_record(self):
+        with llfuse.lock_released:
+            return {
+                "uuid": None,
+                "manifest_text": self.collection.manifest_text(),
+                "portable_data_hash": self.collection.portable_data_hash(),
+            }
+
+    def __contains__(self, k):
+        return (k == '.arvados#collection' or
+                super(TmpCollectionDirectory, self).__contains__(k))
+
+    @use_counter
+    def __getitem__(self, item):
+        if item == '.arvados#collection':
+            if self.collection_record_file is None:
+                self.collection_record_file = FuncToJSONFile(
+                    self.inode, self.collection_record)
+                self.inodes.add_entry(self.collection_record_file)
+            return self.collection_record_file
+        return super(TmpCollectionDirectory, self).__getitem__(item)
+
+    def persisted(self):
+        return False
+
+    def writable(self):
+        return True
+
+    def want_event_subscribe(self):
+        return False
+
+    def finalize(self):
+        self.collection.stop_threads()
+
+    def invalidate(self):
+        if self.collection_record_file:
+            self.collection_record_file.invalidate()
+        super(TmpCollectionDirectory, self).invalidate()
+
+
+class MagicDirectory(Directory):
+    """A special directory that logically contains the set of all extant keep locators.
+
+    When a file is referenced by lookup(), it is tested to see if it is a valid
+    keep locator to a manifest, and if so, loads the manifest contents as a
+    subdirectory of this directory with the locator as the directory name.
+    Since querying a list of all extant keep locators is impractical, only
+    collections that have already been accessed are visible to readdir().
+
+    """
+
+    README_TEXT = """
+This directory provides access to Arvados collections as subdirectories listed
+by uuid (in the form 'zzzzz-4zz18-1234567890abcde') or portable data hash (in
+the form '1234567890abcdef0123456789abcdef+123'), and Arvados projects by uuid
+(in the form 'zzzzz-j7d0g-1234567890abcde').
+
+Note that this directory will appear empty until you attempt to access a
+specific collection or project subdirectory (such as trying to 'cd' into it),
+at which point the collection or project will actually be looked up on the server
+and the directory will appear if it exists.
+
+""".lstrip()
+
+    def __init__(self, parent_inode, inodes, api, num_retries, pdh_only=False):
+        super(MagicDirectory, self).__init__(parent_inode, inodes)
+        self.api = api
+        self.num_retries = num_retries
+        self.pdh_only = pdh_only
+
+    def __setattr__(self, name, value):
+        super(MagicDirectory, self).__setattr__(name, value)
+        # When we're assigned an inode, add a README.
+        if ((name == 'inode') and (self.inode is not None) and
+              (not self._entries)):
+            self._entries['README'] = self.inodes.add_entry(
+                StringFile(self.inode, self.README_TEXT, time.time()))
+            # If we're the root directory, add an identical by_id subdirectory.
+            if self.inode == llfuse.ROOT_INODE:
+                self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
+                        self.inode, self.inodes, self.api, self.num_retries, self.pdh_only))
+
+    def __contains__(self, k):
+        if k in self._entries:
+            return True
+
+        if not portable_data_hash_pattern.match(k) and (self.pdh_only or not uuid_pattern.match(k)):
+            return False
+
+        try:
+            e = None
+
+            if group_uuid_pattern.match(k):
+                project = self.api.groups().list(
+                    filters=[['group_class', '=', 'project'], ["uuid", "=", k]]).execute(num_retries=self.num_retries)
+                if project[u'items_available'] == 0:
+                    return False
+                e = self.inodes.add_entry(ProjectDirectory(
+                    self.inode, self.inodes, self.api, self.num_retries, project[u'items'][0]))
+            else:
+                e = self.inodes.add_entry(CollectionDirectory(
+                        self.inode, self.inodes, self.api, self.num_retries, k))
+
+            if e.update():
+                if k not in self._entries:
+                    self._entries[k] = e
+                else:
+                    self.inodes.del_entry(e)
+                return True
+            else:
+                self.inodes.invalidate_entry(self, k)
+                self.inodes.del_entry(e)
+                return False
+        except Exception as ex:
+            _logger.exception("arv-mount lookup '%s':", k)
+            if e is not None:
+                self.inodes.del_entry(e)
+            return False
+
+    def __getitem__(self, item):
+        if item in self:
+            return self._entries[item]
+        else:
+            raise KeyError("No collection with id " + item)
+
+    def clear(self):
+        pass
+
+    def want_event_subscribe(self):
+        return not self.pdh_only
+
+
+class TagsDirectory(Directory):
+    """A special directory that contains as subdirectories all tags visible to the user."""
+
+    def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode, inodes)
+        self.api = api
+        self.num_retries = num_retries
+        self._poll = True
+        self._poll_time = poll_time
+        self._extra = set()
+
+    def want_event_subscribe(self):
+        return True
+
+    @use_counter
+    def update(self):
+        with llfuse.lock_released:
+            tags = self.api.links().list(
+                filters=[['link_class', '=', 'tag'], ["name", "!=", ""]],
+                select=['name'], distinct=True, limit=1000
+                ).execute(num_retries=self.num_retries)
+        if "items" in tags:
+            self.merge(tags['items']+[{"name": n} for n in self._extra],
+                       lambda i: i['name'],
+                       lambda a, i: a.tag == i['name'],
+                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+
+    @use_counter
+    @check_update
+    def __getitem__(self, item):
+        if super(TagsDirectory, self).__contains__(item):
+            return super(TagsDirectory, self).__getitem__(item)
+        with llfuse.lock_released:
+            tags = self.api.links().list(
+                filters=[['link_class', '=', 'tag'], ['name', '=', item]], limit=1
+            ).execute(num_retries=self.num_retries)
+        if tags["items"]:
+            self._extra.add(item)
+            self.update()
+        return super(TagsDirectory, self).__getitem__(item)
+
+    @use_counter
+    @check_update
+    def __contains__(self, k):
+        if super(TagsDirectory, self).__contains__(k):
+            return True
+        try:
+            self[k]
+            return True
+        except KeyError:
+            pass
+        return False
+
+
+class TagDirectory(Directory):
+    """A special directory that contains as subdirectories all collections visible
+    to the user that are tagged with a particular tag.
+    """
+
+    def __init__(self, parent_inode, inodes, api, num_retries, tag,
+                 poll=False, poll_time=60):
+        super(TagDirectory, self).__init__(parent_inode, inodes)
+        self.api = api
+        self.num_retries = num_retries
+        self.tag = tag
+        self._poll = poll
+        self._poll_time = poll_time
+
+    def want_event_subscribe(self):
+        return True
+
+    @use_counter
+    def update(self):
+        with llfuse.lock_released:
+            taggedcollections = self.api.links().list(
+                filters=[['link_class', '=', 'tag'],
+                         ['name', '=', self.tag],
+                         ['head_uuid', 'is_a', 'arvados#collection']],
+                select=['head_uuid']
+                ).execute(num_retries=self.num_retries)
+        self.merge(taggedcollections['items'],
+                   lambda i: i['head_uuid'],
+                   lambda a, i: a.collection_locator == i['head_uuid'],
+                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+
+
+class ProjectDirectory(Directory):
+    """A special directory that contains the contents of a project."""
+
+    def __init__(self, parent_inode, inodes, api, num_retries, project_object,
+                 poll=False, poll_time=60):
+        super(ProjectDirectory, self).__init__(parent_inode, inodes)
+        self.api = api
+        self.num_retries = num_retries
+        self.project_object = project_object
+        self.project_object_file = None
+        self.project_uuid = project_object['uuid']
+        self._poll = poll
+        self._poll_time = poll_time
+        self._updating_lock = threading.Lock()
+        self._current_user = None
+        self._full_listing = False
+
+    def want_event_subscribe(self):
+        return True
+
+    def createDirectory(self, i):
+        if collection_uuid_pattern.match(i['uuid']):
+            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+        elif group_uuid_pattern.match(i['uuid']):
+            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time)
+        elif link_uuid_pattern.match(i['uuid']):
+            if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
+                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+            else:
+                return None
+        elif uuid_pattern.match(i['uuid']):
+            return ObjectFile(self.parent_inode, i)
+        else:
+            return None
+
+    def uuid(self):
+        return self.project_uuid
+
+    def items(self):
+        self._full_listing = True
+        return super(ProjectDirectory, self).items()
+
+    def namefn(self, i):
+        if 'name' in i:
+            if i['name'] is None or len(i['name']) == 0:
+                return None
+            elif "uuid" in i and (collection_uuid_pattern.match(i['uuid']) or group_uuid_pattern.match(i['uuid'])):
+                # collection or subproject
+                return i['name']
+            elif link_uuid_pattern.match(i['uuid']) and i['head_kind'] == 'arvados#collection':
+                # name link
+                return i['name']
+            elif 'kind' in i and i['kind'].startswith('arvados#'):
+                # something else
+                return "{}.{}".format(i['name'], i['kind'][8:])
+        else:
+            return None
+
+
+    @use_counter
+    def update(self):
+        if self.project_object_file == None:
+            self.project_object_file = ObjectFile(self.inode, self.project_object)
+            self.inodes.add_entry(self.project_object_file)
+
+        if not self._full_listing:
+            return True
+
+        def samefn(a, i):
+            if isinstance(a, CollectionDirectory) or isinstance(a, ProjectDirectory):
+                return a.uuid() == i['uuid']
+            elif isinstance(a, ObjectFile):
+                return a.uuid() == i['uuid'] and not a.stale()
+            return False
+
+        try:
+            with llfuse.lock_released:
+                self._updating_lock.acquire()
+                if not self.stale():
+                    return
+
+                if group_uuid_pattern.match(self.project_uuid):
+                    self.project_object = self.api.groups().get(
+                        uuid=self.project_uuid).execute(num_retries=self.num_retries)
+                elif user_uuid_pattern.match(self.project_uuid):
+                    self.project_object = self.api.users().get(
+                        uuid=self.project_uuid).execute(num_retries=self.num_retries)
+
+                contents = arvados.util.list_all(self.api.groups().list,
+                                                 self.num_retries,
+                                                 filters=[["owner_uuid", "=", self.project_uuid],
+                                                          ["group_class", "=", "project"]])
+                contents.extend(arvados.util.list_all(self.api.collections().list,
+                                                      self.num_retries,
+                                                      filters=[["owner_uuid", "=", self.project_uuid]]))
+
+            # end with llfuse.lock_released, re-acquire lock
+
+            self.merge(contents,
+                       self.namefn,
+                       samefn,
+                       self.createDirectory)
+            return True
+        finally:
+            self._updating_lock.release()
+
+    def _add_entry(self, i, name):
+        ent = self.createDirectory(i)
+        self._entries[name] = self.inodes.add_entry(ent)
+        return self._entries[name]
+
+    @use_counter
+    @check_update
+    def __getitem__(self, k):
+        if k == '.arvados#project':
+            return self.project_object_file
+        elif self._full_listing or super(ProjectDirectory, self).__contains__(k):
+            return super(ProjectDirectory, self).__getitem__(k)
+        with llfuse.lock_released:
+            contents = self.api.groups().list(filters=[["owner_uuid", "=", self.project_uuid],
+                                                       ["group_class", "=", "project"],
+                                                       ["name", "=", k]],
+                                              limit=1).execute(num_retries=self.num_retries)["items"]
+            if not contents:
+                contents = self.api.collections().list(filters=[["owner_uuid", "=", self.project_uuid],
+                                                                ["name", "=", k]],
+                                                       limit=1).execute(num_retries=self.num_retries)["items"]
+        if contents:
+            name = sanitize_filename(self.namefn(contents[0]))
+            if name != k:
+                raise KeyError(k)
+            return self._add_entry(contents[0], name)
+
+        # Didn't find item
+        raise KeyError(k)
+
+    def __contains__(self, k):
+        if k == '.arvados#project':
+            return True
+        try:
+            self[k]
+            return True
+        except KeyError:
+            pass
+        return False
+
+    @use_counter
+    @check_update
+    def writable(self):
+        with llfuse.lock_released:
+            if not self._current_user:
+                self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
+            return self._current_user["uuid"] in self.project_object.get("writable_by", [])
+
+    def persisted(self):
+        return True
+
+    @use_counter
+    @check_update
+    def mkdir(self, name):
+        try:
+            with llfuse.lock_released:
+                self.api.collections().create(body={"owner_uuid": self.project_uuid,
+                                                    "name": name,
+                                                    "manifest_text": ""}).execute(num_retries=self.num_retries)
+            self.invalidate()
+        except apiclient_errors.Error as error:
+            _logger.error(error)
+            raise llfuse.FUSEError(errno.EEXIST)
+
+    @use_counter
+    @check_update
+    def rmdir(self, name):
+        if name not in self:
+            raise llfuse.FUSEError(errno.ENOENT)
+        if not isinstance(self[name], CollectionDirectory):
+            raise llfuse.FUSEError(errno.EPERM)
+        if len(self[name]) > 0:
+            raise llfuse.FUSEError(errno.ENOTEMPTY)
+        with llfuse.lock_released:
+            self.api.collections().delete(uuid=self[name].uuid()).execute(num_retries=self.num_retries)
+        self.invalidate()
+
+    @use_counter
+    @check_update
+    def rename(self, name_old, name_new, src):
+        if not isinstance(src, ProjectDirectory):
+            raise llfuse.FUSEError(errno.EPERM)
+
+        ent = src[name_old]
+
+        if not isinstance(ent, CollectionDirectory):
+            raise llfuse.FUSEError(errno.EPERM)
+
+        if name_new in self:
+            # POSIX semantics for replacing one directory with another is
+            # tricky (the target directory must be empty, the operation must be
+            # atomic which isn't possible with the Arvados API as of this
+            # writing) so don't support that.
+            raise llfuse.FUSEError(errno.EPERM)
+
+        self.api.collections().update(uuid=ent.uuid(),
+                                      body={"owner_uuid": self.uuid(),
+                                            "name": name_new}).execute(num_retries=self.num_retries)
+
+        # Acually move the entry from source directory to this directory.
+        del src._entries[name_old]
+        self._entries[name_new] = ent
+        self.inodes.invalidate_entry(src, name_old)
+
+    @use_counter
+    def child_event(self, ev):
+        properties = ev.get("properties") or {}
+        old_attrs = properties.get("old_attributes") or {}
+        new_attrs = properties.get("new_attributes") or {}
+        old_attrs["uuid"] = ev["object_uuid"]
+        new_attrs["uuid"] = ev["object_uuid"]
+        old_name = sanitize_filename(self.namefn(old_attrs))
+        new_name = sanitize_filename(self.namefn(new_attrs))
+
+        # create events will have a new name, but not an old name
+        # delete events will have an old name, but not a new name
+        # update events will have an old and new name, and they may be same or different
+        # if they are the same, an unrelated field changed and there is nothing to do.
+
+        if old_attrs.get("owner_uuid") != self.project_uuid:
+            # Was moved from somewhere else, so don't try to remove entry.
+            old_name = None
+        if ev.get("object_owner_uuid") != self.project_uuid:
+            # Was moved to somewhere else, so don't try to add entry
+            new_name = None
+
+        if old_attrs.get("is_trashed"):
+            # Was previously deleted
+            old_name = None
+        if new_attrs.get("is_trashed"):
+            # Has been deleted
+            new_name = None
+
+        if new_name != old_name:
+            ent = None
+            if old_name in self._entries:
+                ent = self._entries[old_name]
+                del self._entries[old_name]
+                self.inodes.invalidate_entry(self, old_name)
+
+            if new_name:
+                if ent is not None:
+                    self._entries[new_name] = ent
+                else:
+                    self._add_entry(new_attrs, new_name)
+            elif ent is not None:
+                self.inodes.del_entry(ent)
+
+
+class SharedDirectory(Directory):
+    """A special directory that represents users or groups who have shared projects with me."""
+
+    def __init__(self, parent_inode, inodes, api, num_retries, exclude,
+                 poll=False, poll_time=60):
+        super(SharedDirectory, self).__init__(parent_inode, inodes)
+        self.api = api
+        self.num_retries = num_retries
+        self.current_user = api.users().current().execute(num_retries=num_retries)
+        self._poll = True
+        self._poll_time = poll_time
+        self._updating_lock = threading.Lock()
+
+    @use_counter
+    def update(self):
+        try:
+            with llfuse.lock_released:
+                self._updating_lock.acquire()
+                if not self.stale():
+                    return
+
+                contents = {}
+                roots = []
+                root_owners = set()
+                objects = {}
+
+                methods = self.api._rootDesc.get('resources')["groups"]['methods']
+                if 'httpMethod' in methods.get('shared', {}):
+                    page = []
+                    while True:
+                        resp = self.api.groups().shared(filters=[['group_class', '=', 'project']]+page,
+                                                        order="uuid",
+                                                        limit=10000,
+                                                        count="none",
+                                                        include="owner_uuid").execute()
+                        if not resp["items"]:
+                            break
+                        page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
+                        for r in resp["items"]:
+                            objects[r["uuid"]] = r
+                            roots.append(r["uuid"])
+                        for r in resp["included"]:
+                            objects[r["uuid"]] = r
+                            root_owners.add(r["uuid"])
+                else:
+                    all_projects = arvados.util.list_all(
+                        self.api.groups().list, self.num_retries,
+                        filters=[['group_class','=','project']],
+                        select=["uuid", "owner_uuid"])
+                    for ob in all_projects:
+                        objects[ob['uuid']] = ob
+
+                    current_uuid = self.current_user['uuid']
+                    for ob in all_projects:
+                        if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:
+                            roots.append(ob['uuid'])
+                            root_owners.add(ob['owner_uuid'])
+
+                    lusers = arvados.util.list_all(
+                        self.api.users().list, self.num_retries,
+                        filters=[['uuid','in', list(root_owners)]])
+                    lgroups = arvados.util.list_all(
+                        self.api.groups().list, self.num_retries,
+                        filters=[['uuid','in', list(root_owners)+roots]])
+
+                    for l in lusers:
+                        objects[l["uuid"]] = l
+                    for l in lgroups:
+                        objects[l["uuid"]] = l
+
+                for r in root_owners:
+                    if r in objects:
+                        obr = objects[r]
+                        if obr.get("name"):
+                            contents[obr["name"]] = obr
+                        #elif obr.get("username"):
+                        #    contents[obr["username"]] = obr
+                        elif "first_name" in obr:
+                            contents[u"{} {}".format(obr["first_name"], obr["last_name"])] = obr
+
+                for r in roots:
+                    if r in objects:
+                        obr = objects[r]
+                        if obr['owner_uuid'] not in objects:
+                            contents[obr["name"]] = obr
+
+            # end with llfuse.lock_released, re-acquire lock
+
+            self.merge(contents.items(),
+                       lambda i: i[0],
+                       lambda a, i: a.uuid() == i[1]['uuid'],
+                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time))
+        except Exception:
+            _logger.exception("arv-mount shared dir error")
+        finally:
+            self._updating_lock.release()
+
+    def want_event_subscribe(self):
+        return True
diff --git a/services/fuse/arvados_fuse/fusefile.py b/services/fuse/arvados_fuse/fusefile.py
new file mode 100644 (file)
index 0000000..cedb4fb
--- /dev/null
@@ -0,0 +1,149 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import json
+import llfuse
+import logging
+import re
+import time
+
+from fresh import FreshBase, convertTime
+
+_logger = logging.getLogger('arvados.arvados_fuse')
+
+class File(FreshBase):
+    """Base for file objects."""
+
+    __slots__ = ("inode", "parent_inode", "_mtime")
+
+    def __init__(self, parent_inode, _mtime=0):
+        super(File, self).__init__()
+        self.inode = None
+        self.parent_inode = parent_inode
+        self._mtime = _mtime
+
+    def size(self):
+        return 0
+
+    def readfrom(self, off, size, num_retries=0):
+        return ''
+
+    def writeto(self, off, size, num_retries=0):
+        raise Exception("Not writable")
+
+    def mtime(self):
+        return self._mtime
+
+    def clear(self):
+        pass
+
+    def writable(self):
+        return False
+
+    def flush(self):
+        pass
+
+
+class FuseArvadosFile(File):
+    """Wraps a ArvadosFile."""
+
+    __slots__ = ('arvfile',)
+
+    def __init__(self, parent_inode, arvfile, _mtime):
+        super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
+        self.arvfile = arvfile
+
+    def size(self):
+        with llfuse.lock_released:
+            return self.arvfile.size()
+
+    def readfrom(self, off, size, num_retries=0):
+        with llfuse.lock_released:
+            return self.arvfile.readfrom(off, size, num_retries, exact=True)
+
+    def writeto(self, off, buf, num_retries=0):
+        with llfuse.lock_released:
+            return self.arvfile.writeto(off, buf, num_retries)
+
+    def stale(self):
+        return False
+
+    def writable(self):
+        return self.arvfile.writable()
+
+    def flush(self):
+        with llfuse.lock_released:
+            if self.writable():
+                self.arvfile.parent.root_collection().save()
+
+
+class StringFile(File):
+    """Wrap a simple string as a file"""
+    def __init__(self, parent_inode, contents, _mtime):
+        super(StringFile, self).__init__(parent_inode, _mtime)
+        self.contents = contents
+
+    def size(self):
+        return len(self.contents)
+
+    def readfrom(self, off, size, num_retries=0):
+        return self.contents[off:(off+size)]
+
+
+class ObjectFile(StringFile):
+    """Wrap a dict as a serialized json object."""
+
+    def __init__(self, parent_inode, obj):
+        super(ObjectFile, self).__init__(parent_inode, "", 0)
+        self.object_uuid = obj['uuid']
+        self.update(obj)
+
+    def uuid(self):
+        return self.object_uuid
+
+    def update(self, obj=None):
+        if obj is None:
+            # TODO: retrieve the current record for self.object_uuid
+            # from the server. For now, at least don't crash when
+            # someone tells us it's a good time to update but doesn't
+            # pass us a fresh obj. See #8345
+            return
+        self._mtime = convertTime(obj['modified_at']) if 'modified_at' in obj else 0
+        self.contents = json.dumps(obj, indent=4, sort_keys=True) + "\n"
+
+    def persisted(self):
+        return True
+
+
+class FuncToJSONFile(StringFile):
+    """File content is the return value of a given function, encoded as JSON.
+
+    The function is called at the time the file is read. The result is
+    cached until invalidate() is called.
+    """
+    def __init__(self, parent_inode, func):
+        super(FuncToJSONFile, self).__init__(parent_inode, "", 0)
+        self.func = func
+
+        # invalidate_inode() is asynchronous with no callback to wait for. In
+        # order to guarantee userspace programs don't get stale data that was
+        # generated before the last invalidate(), we must disallow inode
+        # caching entirely.
+        self.allow_attr_cache = False
+
+    def size(self):
+        self._update()
+        return super(FuncToJSONFile, self).size()
+
+    def readfrom(self, *args, **kwargs):
+        self._update()
+        return super(FuncToJSONFile, self).readfrom(*args, **kwargs)
+
+    def _update(self):
+        if not self.stale():
+            return
+        self._mtime = time.time()
+        obj = self.func()
+        self.contents = json.dumps(obj, indent=4, sort_keys=True) + "\n"
+        self.fresh()
diff --git a/services/fuse/arvados_fuse/unmount.py b/services/fuse/arvados_fuse/unmount.py
new file mode 100644 (file)
index 0000000..a72da3a
--- /dev/null
@@ -0,0 +1,177 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import collections
+import errno
+import os
+import subprocess
+import time
+
+
+MountInfo = collections.namedtuple(
+    'MountInfo', ['is_fuse', 'major', 'minor', 'mnttype', 'path'])
+
+
+def mountinfo():
+    mi = []
+    with open('/proc/self/mountinfo') as f:
+        for m in f.readlines():
+            mntid, pmntid, dev, root, path, extra = m.split(" ", 5)
+            mnttype = extra.split(" - ")[1].split(" ", 1)[0]
+            major, minor = dev.split(":")
+            mi.append(MountInfo(
+                is_fuse=(mnttype == "fuse" or mnttype.startswith("fuse.")),
+                major=major,
+                minor=minor,
+                mnttype=mnttype,
+                path=path,
+            ))
+    return mi
+
+
+def paths_to_unmount(path, mnttype):
+    paths = []
+    for m in mountinfo():
+        if m.path == path or m.path.startswith(path+"/"):
+            paths.append(m.path)
+            if not (m.is_fuse and (mnttype is None or
+                                   mnttype == m.mnttype)):
+                raise Exception(
+                    "cannot unmount {}: mount type is {}".format(
+                        path, m.mnttype))
+    return paths
+
+
+def safer_realpath(path, loop=True):
+    """Similar to os.path.realpath(), but avoids calling lstat().
+
+    Leaves some symlinks unresolved."""
+    if path == '/':
+        return path, True
+    elif not path.startswith('/'):
+        path = os.path.abspath(path)
+    while True:
+        path = path.rstrip('/')
+        dirname, basename = os.path.split(path)
+        try:
+            path, resolved = safer_realpath(os.path.join(dirname, os.readlink(path)), loop=False)
+        except OSError as e:
+            # Path is not a symlink (EINVAL), or is unreadable, or
+            # doesn't exist. If the error was EINVAL and dirname can
+            # be resolved, we will have eliminated all symlinks and it
+            # will be safe to call normpath().
+            dirname, resolved = safer_realpath(dirname, loop=loop)
+            path = os.path.join(dirname, basename)
+            if resolved and e.errno == errno.EINVAL:
+                return os.path.normpath(path), True
+            else:
+                return path, False
+        except RuntimeError:
+            if not loop:
+                # Unwind to the point where we first started following
+                # symlinks.
+                raise
+            # Resolving the whole path landed in a symlink cycle, but
+            # we might still be able to resolve dirname.
+            dirname, _ = safer_realpath(dirname, loop=loop)
+            return os.path.join(dirname, basename), False
+
+
+def unmount(path, subtype=None, timeout=10, recursive=False):
+    """Unmount the fuse mount at path.
+
+    Unmounting is done by writing 1 to the "abort" control file in
+    sysfs to kill the fuse driver process, then executing "fusermount
+    -u -z" to detach the mount point, and repeating these steps until
+    the mount is no longer listed in /proc/self/mountinfo.
+
+    This procedure should enable a non-root user to reliably unmount
+    their own fuse filesystem without risk of deadlock.
+
+    Returns True if unmounting was successful, False if it wasn't a
+    fuse mount at all. Raises an exception if it cannot be unmounted.
+    """
+
+    path, _ = safer_realpath(path)
+
+    if subtype is None:
+        mnttype = None
+    elif subtype == '':
+        mnttype = 'fuse'
+    else:
+        mnttype = 'fuse.' + subtype
+
+    if recursive:
+        paths = paths_to_unmount(path, mnttype)
+        if not paths:
+            # We might not have found any mounts merely because path
+            # contains symlinks, so we should resolve them and try
+            # again. We didn't do this from the outset because
+            # realpath() can hang (see explanation below).
+            paths = paths_to_unmount(os.path.realpath(path), mnttype)
+        for path in sorted(paths, key=len, reverse=True):
+            unmount(path, timeout=timeout, recursive=False)
+        return len(paths) > 0
+
+    was_mounted = False
+    attempted = False
+    if timeout is None:
+        deadline = None
+    else:
+        deadline = time.time() + timeout
+
+    while True:
+        mounted = False
+        for m in mountinfo():
+            if m.is_fuse and (mnttype is None or mnttype == m.mnttype):
+                try:
+                    if m.path == path:
+                        was_mounted = True
+                        mounted = True
+                        break
+                except OSError:
+                    continue
+        if not was_mounted and path != os.path.realpath(path):
+            # If the specified path contains symlinks, it won't appear
+            # verbatim in mountinfo.
+            #
+            # It might seem like we should have called realpath() from
+            # the outset. But we can't: realpath() hangs (in lstat())
+            # if we call it on an unresponsive mount point, and this
+            # is an important and common scenario.
+            #
+            # By waiting until now to try realpath(), we avoid this
+            # problem in the most common cases, which are: (1) the
+            # specified path has no symlinks and is a mount point, in
+            # which case was_mounted==True and we can proceed without
+            # calling realpath(); and (2) the specified path is not a
+            # mount point (e.g., it was already unmounted by someone
+            # else, or it's a typo), and realpath() can determine that
+            # without hitting any other unresponsive mounts.
+            path = os.path.realpath(path)
+            continue
+        elif not mounted:
+            return was_mounted
+
+        if attempted:
+            delay = 1
+            if deadline:
+                delay = min(delay, deadline - time.time())
+                if delay <= 0:
+                    raise Exception("timed out")
+            time.sleep(delay)
+
+        try:
+            with open('/sys/fs/fuse/connections/{}/abort'.format(m.minor),
+                      'w') as f:
+                f.write("1")
+        except IOError as e:
+            if e.errno != errno.ENOENT:
+                raise
+
+        attempted = True
+        try:
+            subprocess.check_call(["fusermount", "-u", "-z", path])
+        except subprocess.CalledProcessError:
+            pass
diff --git a/services/fuse/arvados_version.py b/services/fuse/arvados_version.py
new file mode 100644 (file)
index 0000000..2e6484c
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', '.']).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except subprocess.CalledProcessError:
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/services/fuse/bin/arv-mount b/services/fuse/bin/arv-mount
new file mode 100755 (executable)
index 0000000..2663e3d
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados_fuse.command
+
+if __name__ == '__main__':
+    args = arvados_fuse.command.ArgumentParser().parse_args()
+    arvados_fuse.command.Mount(args).run()
diff --git a/services/fuse/fpm-info.sh b/services/fuse/fpm-info.sh
new file mode 100644 (file)
index 0000000..fd94ef7
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+fpm_depends+=(fuse)
+
+case "$TARGET" in
+    centos*)
+        fpm_depends+=(fuse-libs)
+        ;;
+    debian* | ubuntu*)
+        fpm_depends+=(libcurl3-gnutls libpython2.7)
+        ;;
+esac
diff --git a/services/fuse/gittaggers.py b/services/fuse/gittaggers.py
new file mode 120000 (symlink)
index 0000000..a9ad861
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/gittaggers.py
\ No newline at end of file
diff --git a/services/fuse/setup.py b/services/fuse/setup.py
new file mode 100644 (file)
index 0000000..9b4b997
--- /dev/null
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import
+import os
+import sys
+import re
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "arvados_fuse")
+
+short_tests_only = False
+if '--short-tests-only' in sys.argv:
+    short_tests_only = True
+    sys.argv.remove('--short-tests-only')
+
+setup(name='arvados_fuse',
+      version=version,
+      description='Arvados FUSE driver',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license='GNU Affero General Public License, version 3.0',
+      packages=['arvados_fuse'],
+      scripts=[
+        'bin/arv-mount'
+        ],
+      data_files=[
+          ('share/doc/arvados_fuse', ['agpl-3.0.txt', 'README.rst']),
+      ],
+      install_requires=[
+        'arvados-python-client >= 0.1.20151118035730',
+        # llfuse 1.3.4 fails to install via pip
+        'llfuse >=1.2, <1.3.4',
+        'python-daemon',
+        'ciso8601 >= 2.0.0',
+        'setuptools'
+        ],
+      extras_require={
+          ':python_version<"3"': ['pytz'],
+      },
+      test_suite='tests',
+      tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
+      zip_safe=False
+      )
diff --git a/services/fuse/tests/__init__.py b/services/fuse/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/fuse/tests/fstest.py b/services/fuse/tests/fstest.py
new file mode 100644 (file)
index 0000000..2b3e85e
--- /dev/null
@@ -0,0 +1,137 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from multiprocessing import Process
+import os
+import subprocess
+import sys
+import prof
+
+def fn(n):
+    return "file%i" % n
+
+def createfiles(d, n):
+    for j in xrange(1, 5):
+        print "Starting small file %s %i, %i" % (d, n, j)
+        if d:
+            os.mkdir(d)
+            ld = os.listdir('.')
+            if d not in ld:
+                print "ERROR %s missing" % d
+            os.chdir(d)
+
+        for i in xrange(n, n+10):
+            with open(fn(i), "w") as f:
+                f.write(fn(i))
+
+        ld = os.listdir('.')
+        for i in xrange(n, n+10):
+            if fn(i) not in ld:
+                print "ERROR %s missing" % fn(i)
+
+        for i in xrange(n, n+10):
+            with open(fn(i), "r") as f:
+                if f.read() != fn(i):
+                    print "ERROR %s doesn't have expected contents" % fn(i)
+
+        for i in xrange(n, n+10):
+            os.remove(fn(i))
+
+        ld = os.listdir('.')
+        for i in xrange(n, n+10):
+            if fn(i) in ld:
+                print "ERROR %s should have been removed" % fn(i)
+
+        if d:
+            os.chdir('..')
+            os.rmdir(d)
+            ld = os.listdir('.')
+            if d in ld:
+                print "ERROR %s should have been removed" % d
+
+
+def createbigfile(d, n):
+    for j in xrange(1, 5):
+        print "Starting big file %s %i, %i" % (d, n, j)
+        i = n
+        if d:
+            os.mkdir(d)
+            ld = os.listdir('.')
+            if d not in ld:
+                print "ERROR %s missing" % d
+            os.chdir(d)
+
+        with open(fn(i), "w") as f:
+            for j in xrange(0, 1000):
+                f.write((str(j) + fn(i)) * 10000)
+
+        ld = os.listdir('.')
+        if fn(i) not in ld:
+            print "ERROR %s missing" % fn(i)
+
+        with open(fn(i), "r") as f:
+            for j in xrange(0, 1000):
+                expect = (str(j) + fn(i)) * 10000
+                if f.read(len(expect)) != expect:
+                    print "ERROR %s doesn't have expected contents" % fn(i)
+
+        os.remove(fn(i))
+
+        ld = os.listdir('.')
+        if fn(i) in ld:
+            print "ERROR %s should have been removed" % fn(i)
+
+        if d:
+            os.chdir('..')
+            os.rmdir(d)
+            ld = os.listdir('.')
+            if d in ld:
+                print "ERROR %s should have been removed" % d
+
+def do_ls():
+    with open("/dev/null", "w") as nul:
+        for j in xrange(1, 50):
+            subprocess.call(["ls", "-l"], stdout=nul, stderr=nul)
+
+def runit(target, indir):
+    procs = []
+    for n in xrange(0, 20):
+        if indir:
+            p = Process(target=target, args=("dir%i" % n, n*10,))
+        else:
+            p = Process(target=target, args=("", n*10,))
+        p.start()
+        procs.append(p)
+
+    p = Process(target=do_ls, args=())
+    p.start()
+    procs.append(p)
+
+    for p in procs:
+        p.join()
+
+    if os.listdir('.'):
+        print "ERROR there are left over files in the directory"
+
+
+if __name__ == '__main__':
+    if os.listdir('.'):
+        print "ERROR starting directory is not empty"
+        sys.exit()
+
+    print "Single directory small files"
+    with prof.CountTime():
+        runit(createfiles, False)
+
+    print "Separate directories small files"
+    with prof.CountTime():
+        runit(createfiles, True)
+
+    print "Single directory large files"
+    with prof.CountTime():
+        runit(createbigfile, False)
+
+    print "Separate directories large files"
+    with prof.CountTime():
+        runit(createbigfile, True)
diff --git a/services/fuse/tests/integration_test.py b/services/fuse/tests/integration_test.py
new file mode 100644 (file)
index 0000000..3c11fa2
--- /dev/null
@@ -0,0 +1,98 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados_fuse
+import arvados_fuse.command
+import atexit
+import functools
+import inspect
+import logging
+import multiprocessing
+import os
+import run_test_server
+import signal
+import sys
+import tempfile
+import unittest
+
+@atexit.register
+def _pool_cleanup():
+    if _pool is None:
+        return
+    _pool.close()
+    _pool.join()
+
+
+def wrap_static_test_method(modName, clsName, funcName, args, kwargs):
+    class Test(unittest.TestCase):
+        def runTest(self, *args, **kwargs):
+            getattr(getattr(sys.modules[modName], clsName), funcName)(self, *args, **kwargs)
+    Test().runTest(*args, **kwargs)
+
+
+# To avoid Python's threading+multiprocessing=deadlock problems, we
+# use a single global pool with maxtasksperchild=None for the entire
+# test suite.
+_pool = None
+def workerPool():
+    global _pool
+    if _pool is None:
+        _pool = multiprocessing.Pool(processes=1, maxtasksperchild=None)
+    return _pool
+
+
+class IntegrationTest(unittest.TestCase):
+    def pool_test(self, *args, **kwargs):
+        """Run a static method as a unit test, in a different process.
+
+        If called by method 'foobar', the static method '_foobar' of
+        the same class will be called in the other process.
+        """
+        modName = inspect.getmodule(self).__name__
+        clsName = self.__class__.__name__
+        funcName = inspect.currentframe().f_back.f_code.co_name
+        workerPool().apply(
+            wrap_static_test_method,
+            (modName, clsName, '_'+funcName, args, kwargs))
+
+    @classmethod
+    def setUpClass(cls):
+        run_test_server.run()
+        run_test_server.run_keep(enforce_permissions=True, num_servers=2)
+
+    @classmethod
+    def tearDownClass(cls):
+        run_test_server.stop_keep(num_servers=2)
+
+    def setUp(self):
+        self.mnt = tempfile.mkdtemp()
+        run_test_server.authorize_with('active')
+
+    def tearDown(self):
+        os.rmdir(self.mnt)
+        run_test_server.reset()
+
+    @staticmethod
+    def mount(argv):
+        """Decorator. Sets up a FUSE mount at self.mnt with the given args."""
+        def decorator(func):
+            @functools.wraps(func)
+            def wrapper(self, *args, **kwargs):
+                self.mount = None
+                try:
+                    with arvados_fuse.command.Mount(
+                            arvados_fuse.command.ArgumentParser().parse_args(
+                                argv + ['--foreground',
+                                        '--unmount-timeout=2',
+                                        self.mnt])) as self.mount:
+                        return func(self, *args, **kwargs)
+                finally:
+                    if self.mount and self.mount.llfuse_thread.is_alive():
+                        logging.warning("IntegrationTest.mount:"
+                                            " llfuse thread still alive after umount"
+                                            " -- killing test suite to avoid deadlock")
+                        os.kill(os.getpid(), signal.SIGKILL)
+            return wrapper
+        return decorator
diff --git a/services/fuse/tests/mount_test_base.py b/services/fuse/tests/mount_test_base.py
new file mode 100644 (file)
index 0000000..d476fc7
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados_fuse as fuse
+import arvados.safeapi
+import llfuse
+import logging
+import multiprocessing
+import os
+import run_test_server
+import shutil
+import signal
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import unittest
+
+logger = logging.getLogger('arvados.arv-mount')
+
+from .integration_test import workerPool
+
+class MountTestBase(unittest.TestCase):
+    def setUp(self, api=None, local_store=True):
+        # The underlying C implementation of open() makes a fstat() syscall
+        # with the GIL still held.  When the GETATTR message comes back to
+        # llfuse (which in these tests is in the same interpreter process) it
+        # can't acquire the GIL, so it can't service the fstat() call, so it
+        # deadlocks.  The workaround is to run some of our test code in a
+        # separate process.  Forturnately the multiprocessing module makes this
+        # relatively easy.
+
+        self.pool = workerPool()
+        if local_store:
+            self.keeptmp = tempfile.mkdtemp()
+            os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
+        else:
+            self.keeptmp = None
+        self.mounttmp = tempfile.mkdtemp()
+        run_test_server.run()
+        run_test_server.authorize_with("admin")
+        self.api = api if api else arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
+        self.llfuse_thread = None
+
+    # This is a copy of Mount's method.  TODO: Refactor MountTestBase
+    # to use a Mount instead of copying its code.
+    def _llfuse_main(self):
+        try:
+            llfuse.main()
+        except:
+            llfuse.close(unmount=False)
+            raise
+        llfuse.close()
+
+    def make_mount(self, root_class, **root_kwargs):
+        self.operations = fuse.Operations(
+            os.getuid(), os.getgid(),
+            api_client=self.api,
+            enable_write=True)
+        self.operations.inodes.add_entry(root_class(
+            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
+        llfuse.init(self.operations, self.mounttmp, [])
+        self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
+        self.llfuse_thread.daemon = True
+        self.llfuse_thread.start()
+        # wait until the driver is finished initializing
+        self.operations.initlock.wait()
+        return self.operations.inodes[llfuse.ROOT_INODE]
+
+    def tearDown(self):
+        if self.llfuse_thread:
+            if self.operations.events:
+                self.operations.events.close(timeout=10)
+            subprocess.call(["fusermount", "-u", "-z", self.mounttmp])
+            t0 = time.time()
+            self.llfuse_thread.join(timeout=10)
+            if self.llfuse_thread.is_alive():
+                logger.warning("MountTestBase.tearDown():"
+                               " llfuse thread still alive 10s after umount"
+                               " -- exiting with SIGKILL")
+                os.kill(os.getpid(), signal.SIGKILL)
+            waited = time.time() - t0
+            if waited > 0.1:
+                logger.warning("MountTestBase.tearDown(): waited %f s for llfuse thread to end", waited)
+
+        os.rmdir(self.mounttmp)
+        if self.keeptmp:
+            shutil.rmtree(self.keeptmp)
+            os.environ.pop('KEEP_LOCAL_STORE')
+        run_test_server.reset()
+
+    def assertDirContents(self, subdir, expect_content):
+        path = self.mounttmp
+        if subdir:
+            path = os.path.join(path, subdir)
+        self.assertEqual(sorted(expect_content), sorted(llfuse.listdir(path)))
diff --git a/services/fuse/tests/performance/__init__.py b/services/fuse/tests/performance/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/fuse/tests/performance/performance_profiler.py b/services/fuse/tests/performance/performance_profiler.py
new file mode 120000 (symlink)
index 0000000..01a6805
--- /dev/null
@@ -0,0 +1 @@
+../../../../sdk/python/tests/performance/performance_profiler.py
\ No newline at end of file
diff --git a/services/fuse/tests/performance/test_collection_performance.py b/services/fuse/tests/performance/test_collection_performance.py
new file mode 100644 (file)
index 0000000..6772a7d
--- /dev/null
@@ -0,0 +1,488 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados_fuse as fuse
+import llfuse
+import logging
+import os
+import sys
+import unittest
+from .. import run_test_server
+from ..mount_test_base import MountTestBase
+from ..slow_test import slow_test
+
+logger = logging.getLogger('arvados.arv-mount')
+
+from performance_profiler import profiled
+
+def fuse_createCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.createCollectionWithMultipleBlocks()
+
+        @profiled
+        def createCollectionWithMultipleBlocks(self):
+            for i in range(0, streams):
+                os.mkdir(os.path.join(mounttmp, "./stream" + str(i)))
+
+                # Create files
+                for j in range(0, files_per_stream):
+                    with open(os.path.join(mounttmp, "./stream" + str(i), "file" + str(j) +".txt"), "w") as f:
+                        f.write(data)
+
+    Test().runTest()
+
+def fuse_readContentsFromCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.readContentsFromCollectionWithMultipleBlocks()
+
+        @profiled
+        def readContentsFromCollectionWithMultipleBlocks(self):
+            for i in range(0, streams):
+                d1 = llfuse.listdir(os.path.join(mounttmp, 'stream'+str(i)))
+                for j in range(0, files_per_stream):
+                    with open(os.path.join(mounttmp, 'stream'+str(i), 'file'+str(i)+'.txt')) as f:
+                        self.assertEqual(data, f.read())
+
+    Test().runTest()
+
+def fuse_moveFileFromCollectionWithMultipleBlocks(mounttmp, stream, filename):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.moveFileFromCollectionWithMultipleBlocks()
+
+        @profiled
+        def moveFileFromCollectionWithMultipleBlocks(self):
+            d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+            self.assertIn(filename, d1)
+
+            os.rename(os.path.join(mounttmp, stream, filename), os.path.join(mounttmp, 'moved_from_'+stream+'_'+filename))
+
+            d1 = llfuse.listdir(os.path.join(mounttmp))
+            self.assertIn('moved_from_'+stream+'_'+filename, d1)
+
+            d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+            self.assertNotIn(filename, d1)
+
+    Test().runTest()
+
+def fuse_deleteFileFromCollectionWithMultipleBlocks(mounttmp, stream, filename):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.deleteFileFromCollectionWithMultipleBlocks()
+
+        @profiled
+        def deleteFileFromCollectionWithMultipleBlocks(self):
+            os.remove(os.path.join(mounttmp, stream, filename))
+
+    Test().runTest()
+
+# Create a collection with 2 streams, 3 files_per_stream, 2 blocks_per_file, 2**26 bytes_per_block
+class CreateCollectionWithMultipleBlocksAndMoveAndDeleteFile(MountTestBase):
+    def setUp(self):
+        super(CreateCollectionWithMultipleBlocksAndMoveAndDeleteFile, self).setUp()
+
+    @slow_test
+    def test_CreateCollectionWithManyBlocksAndMoveAndDeleteFile(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        streams = 2
+        files_per_stream = 3
+        blocks_per_file = 2
+        bytes_per_block = 2**26
+
+        data = 'x' * blocks_per_file * bytes_per_block
+
+        self.pool.apply(fuse_createCollectionWithMultipleBlocks, (self.mounttmp, streams, files_per_stream, data,))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+        for i in range(0, streams):
+            self.assertIn('./stream' + str(i), collection2["manifest_text"])
+
+        for i in range(0, files_per_stream):
+            self.assertIn('file' + str(i) + '.txt', collection2["manifest_text"])
+
+        # Read file contents
+        self.pool.apply(fuse_readContentsFromCollectionWithMultipleBlocks, (self.mounttmp, streams, files_per_stream, data,))
+
+        # Move file0.txt out of the streams into .
+        for i in range(0, streams):
+            self.pool.apply(fuse_moveFileFromCollectionWithMultipleBlocks, (self.mounttmp, 'stream'+str(i), 'file0.txt',))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+        manifest_streams = collection2['manifest_text'].split('\n')
+        self.assertEqual(4, len(manifest_streams))
+
+        for i in range(0, streams):
+            self.assertIn('file0.txt', manifest_streams[0])
+
+        for i in range(0, streams):
+            self.assertNotIn('file0.txt', manifest_streams[i+1])
+
+        for i in range(0, streams):
+            for j in range(1, files_per_stream):
+                self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+        # Delete 'file1.txt' from all the streams
+        for i in range(0, streams):
+            self.pool.apply(fuse_deleteFileFromCollectionWithMultipleBlocks, (self.mounttmp, 'stream'+str(i), 'file1.txt'))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+        manifest_streams = collection2['manifest_text'].split('\n')
+        self.assertEqual(4, len(manifest_streams))
+
+        for i in range(0, streams):
+            self.assertIn('file0.txt', manifest_streams[0])
+
+        self.assertNotIn('file1.txt', collection2['manifest_text'])
+
+        for i in range(0, streams):
+            for j in range(2, files_per_stream):
+                self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+
+def fuse_createCollectionWithManyFiles(mounttmp, streams=1, files_per_stream=1, data='x'):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.createCollectionWithManyFiles()
+
+        @profiled
+        def createCollectionWithManyFiles(self):
+            for i in range(0, streams):
+                os.mkdir(os.path.join(mounttmp, "./stream" + str(i)))
+
+                # Create files
+                for j in range(0, files_per_stream):
+                    with open(os.path.join(mounttmp, "./stream" + str(i), "file" + str(j) +".txt"), "w") as f:
+                        f.write(data)
+
+    Test().runTest()
+
+def fuse_readContentsFromCollectionWithManyFiles(mounttmp, streams=1, files_per_stream=1, data='x'):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.readContentsFromCollectionWithManyFiles()
+
+        @profiled
+        def readContentsFromCollectionWithManyFiles(self):
+            for i in range(0, streams):
+                d1 = llfuse.listdir(os.path.join(mounttmp, 'stream'+str(i)))
+                for j in range(0, files_per_stream):
+                    with open(os.path.join(mounttmp, 'stream'+str(i), 'file'+str(i)+'.txt')) as f:
+                        self.assertEqual(data, f.read())
+
+    Test().runTest()
+
+def fuse_moveFileFromCollectionWithManyFiles(mounttmp, stream, filename):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.moveFileFromCollectionWithManyFiles()
+
+        @profiled
+        def moveFileFromCollectionWithManyFiles(self):
+            d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+            self.assertIn(filename, d1)
+
+            os.rename(os.path.join(mounttmp, stream, filename), os.path.join(mounttmp, 'moved_from_'+stream+'_'+filename))
+
+            d1 = llfuse.listdir(os.path.join(mounttmp))
+            self.assertIn('moved_from_'+stream+'_'+filename, d1)
+
+            d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+            self.assertNotIn(filename, d1)
+
+    Test().runTest()
+
+def fuse_deleteFileFromCollectionWithManyFiles(mounttmp, stream, filename):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.deleteFileFromCollectionWithManyFiles()
+
+        @profiled
+        def deleteFileFromCollectionWithManyFiles(self):
+            os.remove(os.path.join(mounttmp, stream, filename))
+
+    Test().runTest()
+
+# Create a collection with two streams, each with 200 files
+class CreateCollectionWithManyFilesAndMoveAndDeleteFile(MountTestBase):
+    def setUp(self):
+        super(CreateCollectionWithManyFilesAndMoveAndDeleteFile, self).setUp()
+
+    @slow_test
+    def test_CreateCollectionWithManyFilesAndMoveAndDeleteFile(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        streams = 2
+        files_per_stream = 200
+        data = 'x'
+
+        self.pool.apply(fuse_createCollectionWithManyFiles, (self.mounttmp, streams, files_per_stream, data,))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+        for i in range(0, streams):
+            self.assertIn('./stream' + str(i), collection2["manifest_text"])
+
+        for i in range(0, files_per_stream):
+            self.assertIn('file' + str(i) + '.txt', collection2["manifest_text"])
+
+        # Read file contents
+        self.pool.apply(fuse_readContentsFromCollectionWithManyFiles, (self.mounttmp, streams, files_per_stream, data,))
+
+        # Move file0.txt out of the streams into .
+        for i in range(0, streams):
+            self.pool.apply(fuse_moveFileFromCollectionWithManyFiles, (self.mounttmp, 'stream'+str(i), 'file0.txt',))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+        manifest_streams = collection2['manifest_text'].split('\n')
+        self.assertEqual(4, len(manifest_streams))
+
+        for i in range(0, streams):
+            self.assertIn('file0.txt', manifest_streams[0])
+
+        for i in range(0, streams):
+            self.assertNotIn('file0.txt', manifest_streams[i+1])
+
+        for i in range(0, streams):
+            for j in range(1, files_per_stream):
+                self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+        # Delete 'file1.txt' from all the streams
+        for i in range(0, streams):
+            self.pool.apply(fuse_deleteFileFromCollectionWithManyFiles, (self.mounttmp, 'stream'+str(i), 'file1.txt'))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+        manifest_streams = collection2['manifest_text'].split('\n')
+        self.assertEqual(4, len(manifest_streams))
+
+        for i in range(0, streams):
+            self.assertIn('file0.txt', manifest_streams[0])
+
+        self.assertNotIn('file1.txt', collection2['manifest_text'])
+
+        for i in range(0, streams):
+            for j in range(2, files_per_stream):
+                self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+
+def magicDirTest_MoveFileFromCollection(mounttmp, collection1, collection2, stream, filename):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.magicDirTest_moveFileFromCollection()
+
+        @profiled
+        def magicDirTest_moveFileFromCollection(self):
+            os.rename(os.path.join(mounttmp, collection1, filename), os.path.join(mounttmp, collection2, filename))
+
+    Test().runTest()
+
+def magicDirTest_RemoveFileFromCollection(mounttmp, collection1, stream, filename):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.magicDirTest_removeFileFromCollection()
+
+        @profiled
+        def magicDirTest_removeFileFromCollection(self):
+            os.remove(os.path.join(mounttmp, collection1, filename))
+
+    Test().runTest()
+
+class UsingMagicDir_CreateCollectionWithManyFilesAndMoveAndDeleteFile(MountTestBase):
+    def setUp(self):
+        super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveAndDeleteFile, self).setUp()
+
+    @profiled
+    def magicDirTest_createCollectionWithManyFiles(self, streams=0, files_per_stream=0, data='x'):
+        # Create collection
+        collection = arvados.collection.Collection(api_client=self.api)
+        for j in range(0, files_per_stream):
+            with collection.open("file"+str(j)+".txt", "w") as f:
+                f.write(data)
+        collection.save_new()
+        return collection
+
+    @profiled
+    def magicDirTest_readCollectionContents(self, collection, streams=1, files_per_stream=1, data='x'):
+        mount_ls = os.listdir(os.path.join(self.mounttmp, collection))
+
+        files = {}
+        for j in range(0, files_per_stream):
+            files[os.path.join(self.mounttmp, collection, 'file'+str(j)+'.txt')] = data
+
+        for k, v in files.items():
+            with open(os.path.join(self.mounttmp, collection, k)) as f:
+                self.assertEqual(v, f.read())
+
+    @slow_test
+    def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveAndDeleteFile(self):
+        streams = 2
+        files_per_stream = 200
+        data = 'x'
+
+        collection1 = self.magicDirTest_createCollectionWithManyFiles()
+        # Create collection with multiple files
+        collection2 = self.magicDirTest_createCollectionWithManyFiles(streams, files_per_stream, data)
+
+        # Mount FuseMagicDir
+        self.make_mount(fuse.MagicDirectory)
+
+        self.magicDirTest_readCollectionContents(collection2.manifest_locator(), streams, files_per_stream, data)
+
+        # Move file0.txt out of the collection2 into collection1
+        self.pool.apply(magicDirTest_MoveFileFromCollection, (self.mounttmp, collection2.manifest_locator(),
+              collection1.manifest_locator(), 'stream0', 'file0.txt',))
+        updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+        self.assertFalse('file0.txt' in updated_collection['manifest_text'])
+        self.assertTrue('file1.txt' in updated_collection['manifest_text'])
+
+        # Delete file1.txt from collection2
+        self.pool.apply(magicDirTest_RemoveFileFromCollection, (self.mounttmp, collection2.manifest_locator(), 'stream0', 'file1.txt',))
+        updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+        self.assertFalse('file1.txt' in updated_collection['manifest_text'])
+        self.assertTrue('file2.txt' in updated_collection['manifest_text'])
+
+
+def magicDirTest_MoveAllFilesFromCollection(mounttmp, from_collection, to_collection, stream, files_per_stream):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            self.magicDirTest_moveAllFilesFromCollection()
+
+        @profiled
+        def magicDirTest_moveAllFilesFromCollection(self):
+            for j in range(0, files_per_stream):
+                os.rename(os.path.join(mounttmp, from_collection, 'file'+str(j)+'.txt'), os.path.join(mounttmp, to_collection, 'file'+str(j)+'.txt'))
+
+    Test().runTest()
+
+class UsingMagicDir_CreateCollectionWithManyFilesAndMoveAllFilesIntoAnother(MountTestBase):
+    def setUp(self):
+        super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveAllFilesIntoAnother, self).setUp()
+
+    @profiled
+    def magicDirTestMoveAllFiles_createCollectionWithManyFiles(self, streams=0, files_per_stream=0,
+            blocks_per_file=0, bytes_per_block=0, data='x'):
+        # Create collection
+        collection = arvados.collection.Collection(api_client=self.api)
+        for j in range(0, files_per_stream):
+            with collection.open("file"+str(j)+".txt", "w") as f:
+                f.write(data)
+        collection.save_new()
+        return collection
+
+    @slow_test
+    def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveAllFilesIntoAnother(self):
+        streams = 2
+        files_per_stream = 200
+        data = 'x'
+
+        collection1 = self.magicDirTestMoveAllFiles_createCollectionWithManyFiles()
+        # Create collection with multiple files
+        collection2 = self.magicDirTestMoveAllFiles_createCollectionWithManyFiles(streams, files_per_stream, data)
+
+        # Mount FuseMagicDir
+        self.make_mount(fuse.MagicDirectory)
+
+        # Move all files from collection2 into collection1
+        self.pool.apply(magicDirTest_MoveAllFilesFromCollection, (self.mounttmp, collection2.manifest_locator(),
+                  collection1.manifest_locator(), 'stream0', files_per_stream,))
+
+        updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+        file_names = ["file%i.txt" % i for i in range(0, files_per_stream)]
+        for name in file_names:
+            self.assertFalse(name in updated_collection['manifest_text'])
+
+        updated_collection = self.api.collections().get(uuid=collection1.manifest_locator()).execute()
+        for name in file_names:
+            self.assertTrue(name in updated_collection['manifest_text'])
+
+
+# Move one file at a time from one collection into another
+class UsingMagicDir_CreateCollectionWithManyFilesAndMoveEachFileIntoAnother(MountTestBase):
+    def setUp(self):
+        super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveEachFileIntoAnother, self).setUp()
+
+    @profiled
+    def magicDirTestMoveFiles_createCollectionWithManyFiles(self, streams=0, files_per_stream=0, data='x'):
+        # Create collection
+        collection = arvados.collection.Collection(api_client=self.api)
+        for j in range(0, files_per_stream):
+            with collection.open("file"+str(j)+".txt", "w") as f:
+                f.write(data)
+        collection.save_new()
+        return collection
+
+    def magicDirTestMoveFiles_oneEachIntoAnother(self, from_collection, to_collection, files_per_stream):
+        for j in range(0, files_per_stream):
+            self.pool.apply(magicDirTest_MoveFileFromCollection, (self.mounttmp, from_collection.manifest_locator(),
+                  to_collection.manifest_locator(), 'stream0', 'file'+str(j)+'.txt',))
+
+    @slow_test
+    def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveEachFileIntoAnother(self):
+        streams = 2
+        files_per_stream = 200
+        data = 'x'
+
+        collection1 = self.magicDirTestMoveFiles_createCollectionWithManyFiles()
+        # Create collection with multiple files
+        collection2 = self.magicDirTestMoveFiles_createCollectionWithManyFiles(streams, files_per_stream, data)
+
+        # Mount FuseMagicDir
+        self.make_mount(fuse.MagicDirectory)
+
+        # Move all files from collection2 into collection1
+        self.magicDirTestMoveFiles_oneEachIntoAnother(collection2, collection1, files_per_stream)
+
+        updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+        file_names = ["file%i.txt" % i for i in range(0, files_per_stream)]
+        for name in file_names:
+            self.assertFalse(name in updated_collection['manifest_text'])
+
+        updated_collection = self.api.collections().get(uuid=collection1.manifest_locator()).execute()
+        for name in file_names:
+            self.assertTrue(name in updated_collection['manifest_text'])
+
+class FuseListLargeProjectContents(MountTestBase):
+    @profiled
+    def getProjectWithManyCollections(self):
+        project_contents = llfuse.listdir(self.mounttmp)
+        self.assertEqual(201, len(project_contents))
+        self.assertIn('Collection_1', project_contents)
+        return project_contents
+
+    @profiled
+    def listContentsInProjectWithManyCollections(self, project_contents):
+        project_contents = llfuse.listdir(self.mounttmp)
+        self.assertEqual(201, len(project_contents))
+        self.assertIn('Collection_1', project_contents)
+
+        for collection_name in project_contents:
+            collection_contents = llfuse.listdir(os.path.join(self.mounttmp, collection_name))
+            self.assertIn('baz', collection_contents)
+
+    @slow_test
+    def test_listLargeProjectContents(self):
+        self.make_mount(fuse.ProjectDirectory,
+                        project_object=run_test_server.fixture('groups')['project_with_201_collections'])
+        project_contents = self.getProjectWithManyCollections()
+        self.listContentsInProjectWithManyCollections(project_contents)
diff --git a/services/fuse/tests/prof.py b/services/fuse/tests/prof.py
new file mode 100644 (file)
index 0000000..021839c
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import time
+
+class CountTime(object):
+    def __init__(self, tag="", size=None):
+        self.tag = tag
+        self.size = size
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        sec = (time.time() - self.start)
+        th = ""
+        if self.size:
+            th = "throughput %s/sec" % (self.size / sec)
+        print "%s time %s micoseconds %s" % (self.tag, sec*1000000, th)
diff --git a/services/fuse/tests/run_test_server.py b/services/fuse/tests/run_test_server.py
new file mode 120000 (symlink)
index 0000000..76bcc16
--- /dev/null
@@ -0,0 +1 @@
+../../../sdk/python/tests/run_test_server.py
\ No newline at end of file
diff --git a/services/fuse/tests/slow_test.py b/services/fuse/tests/slow_test.py
new file mode 120000 (symlink)
index 0000000..c7e1f7f
--- /dev/null
@@ -0,0 +1 @@
+../../../sdk/python/tests/slow_test.py
\ No newline at end of file
diff --git a/services/fuse/tests/test_cache.py b/services/fuse/tests/test_cache.py
new file mode 100644 (file)
index 0000000..3f6b804
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados.collection
+import arvados_fuse
+import arvados_fuse.command
+import json
+import logging
+import os
+import tempfile
+import unittest
+
+from .integration_test import IntegrationTest
+from .mount_test_base import MountTestBase
+
+class CacheTest(IntegrationTest):
+    mnt_args = ["--by-id", "--directory-cache=0"]
+
+    @IntegrationTest.mount(argv=mnt_args)
+    def test_cache_spill(self):
+        pdh = []
+        for i in range(0, 8):
+            cw = arvados.collection.Collection()
+            f = cw.open("blurg%i" % i, "w")
+            f.write("bloop%i" % i)
+
+            cw.mkdirs("dir%i" % i)
+            f = cw.open("dir%i/blurg" % i, "w")
+            f.write("dirbloop%i" % i)
+
+            cw.save_new()
+            pdh.append(cw.portable_data_hash())
+        self.pool_test(self.mnt, pdh)
+
+    @staticmethod
+    def _test_cache_spill(self, mnt, pdh):
+        for i,v in enumerate(pdh):
+            j = os.path.join(mnt, "by_id", v, "blurg%i" % i)
+            self.assertTrue(os.path.exists(j))
+            j = os.path.join(mnt, "by_id", v, "dir%i/blurg" % i)
+            self.assertTrue(os.path.exists(j))
+
+        for i,v in enumerate(pdh):
+            j = os.path.join(mnt, "by_id", v, "blurg%i" % i)
+            self.assertTrue(os.path.exists(j))
+            j = os.path.join(mnt, "by_id", v, "dir%i/blurg" % i)
+            self.assertTrue(os.path.exists(j))
diff --git a/services/fuse/tests/test_command_args.py b/services/fuse/tests/test_command_args.py
new file mode 100644 (file)
index 0000000..0d85df3
--- /dev/null
@@ -0,0 +1,324 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados_fuse
+import arvados_fuse.command
+import contextlib
+import functools
+import io
+import json
+import llfuse
+import logging
+import mock
+import os
+import run_test_server
+import sys
+import tempfile
+import unittest
+
+def noexit(func):
+    """If argparse or arvados_fuse tries to exit, fail the test instead"""
+    class SystemExitCaught(StandardError):
+        pass
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        try:
+            return func(*args, **kwargs)
+        except SystemExit:
+            raise SystemExitCaught
+    return wrapper
+
+@contextlib.contextmanager
+def nostderr():
+    orig, sys.stderr = sys.stderr, open(os.devnull, 'w')
+    try:
+        yield
+    finally:
+        sys.stderr = orig
+
+
+class MountArgsTest(unittest.TestCase):
+    def setUp(self):
+        self.mntdir = tempfile.mkdtemp()
+        run_test_server.authorize_with('active')
+
+    def tearDown(self):
+        os.rmdir(self.mntdir)
+
+    def lookup(self, mnt, *path):
+        ent = mnt.operations.inodes[llfuse.ROOT_INODE]
+        for p in path:
+            ent = ent[p]
+        return ent
+
+    @contextlib.contextmanager
+    def stderrMatches(self, stderr):
+        orig, sys.stderr = sys.stderr, stderr
+        try:
+            yield
+        finally:
+            sys.stderr = orig
+
+    def check_ent_type(self, cls, *path):
+        ent = self.lookup(self.mnt, *path)
+        self.assertEqual(ent.__class__, cls)
+        return ent
+
+    @noexit
+    def test_default_all(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, None)
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.ProjectDirectory, 'home')
+        self.assertEqual(e.project_object['uuid'],
+                         run_test_server.fixture('users')['active']['uuid'])
+        e = self.check_ent_type(arvados_fuse.MagicDirectory, 'by_id')
+
+        e = self.check_ent_type(arvados_fuse.StringFile, 'README')
+        readme = e.readfrom(0, -1)
+        self.assertRegexpMatches(readme, r'active-user@arvados\.local')
+        self.assertRegexpMatches(readme, r'\n$')
+
+        e = self.check_ent_type(arvados_fuse.StringFile, 'by_id', 'README')
+        txt = e.readfrom(0, -1)
+        self.assertRegexpMatches(txt, r'portable data hash')
+        self.assertRegexpMatches(txt, r'\n$')
+
+    @noexit
+    def test_by_id(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--by-id',
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, 'by_id')
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.MagicDirectory)
+        self.assertEqual(e.pdh_only, False)
+        self.assertEqual(True, self.mnt.listen_for_events)
+
+    @noexit
+    def test_by_pdh(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--by-pdh',
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, 'by_pdh')
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.MagicDirectory)
+        self.assertEqual(e.pdh_only, True)
+        self.assertEqual(False, self.mnt.listen_for_events)
+
+    @noexit
+    def test_by_tag(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--by-tag',
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, 'by_tag')
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.TagsDirectory)
+        self.assertEqual(True, self.mnt.listen_for_events)
+
+    @noexit
+    def test_collection(self, id_type='uuid'):
+        c = run_test_server.fixture('collections')['public_text_file']
+        cid = c[id_type]
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--collection', cid,
+            '--foreground', self.mntdir])
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.CollectionDirectory)
+        self.assertEqual(e.collection_locator, cid)
+        self.assertEqual(id_type == 'uuid', self.mnt.listen_for_events)
+
+    def test_collection_pdh(self):
+        self.test_collection('portable_data_hash')
+
+    @noexit
+    def test_home(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--home',
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, 'home')
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.ProjectDirectory)
+        self.assertEqual(e.project_object['uuid'],
+                         run_test_server.fixture('users')['active']['uuid'])
+        self.assertEqual(True, self.mnt.listen_for_events)
+
+    def test_mutually_exclusive_args(self):
+        cid = run_test_server.fixture('collections')['public_text_file']['uuid']
+        gid = run_test_server.fixture('groups')['aproject']['uuid']
+        for badargs in [
+                ['--mount-tmp', 'foo', '--collection', cid],
+                ['--mount-tmp', 'foo', '--project', gid],
+                ['--collection', cid, '--project', gid],
+                ['--by-id', '--project', gid],
+                ['--mount-tmp', 'foo', '--by-id'],
+        ]:
+            with nostderr():
+                with self.assertRaises(SystemExit):
+                    args = arvados_fuse.command.ArgumentParser().parse_args(
+                        badargs + ['--foreground', self.mntdir])
+                    arvados_fuse.command.Mount(args)
+    @noexit
+    def test_project(self):
+        uuid = run_test_server.fixture('groups')['aproject']['uuid']
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--project', uuid,
+            '--foreground', self.mntdir])
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.ProjectDirectory)
+        self.assertEqual(e.project_object['uuid'], uuid)
+
+    @noexit
+    def test_shared(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--shared',
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, 'shared')
+        self.mnt = arvados_fuse.command.Mount(args)
+        e = self.check_ent_type(arvados_fuse.SharedDirectory)
+        self.assertEqual(e.current_user['uuid'],
+                         run_test_server.fixture('users')['active']['uuid'])
+        self.assertEqual(True, self.mnt.listen_for_events)
+
+    def test_version_argument(self):
+        orig, sys.stderr = sys.stderr, io.BytesIO()
+        with self.assertRaises(SystemExit):
+            args = arvados_fuse.command.ArgumentParser().parse_args(['--version'])
+        self.assertRegexpMatches(sys.stderr.getvalue(), "[0-9]+\.[0-9]+\.[0-9]+")
+        sys.stderr = orig
+
+    @noexit
+    @mock.patch('arvados.events.subscribe')
+    def test_disable_event_listening(self, mock_subscribe):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--disable-event-listening',
+            '--by-id',
+            '--foreground', self.mntdir])
+        self.mnt = arvados_fuse.command.Mount(args)
+        self.assertEqual(True, self.mnt.listen_for_events)
+        self.assertEqual(True, self.mnt.args.disable_event_listening)
+        with self.mnt:
+            pass
+        self.assertEqual(0, mock_subscribe.call_count)
+
+    @noexit
+    @mock.patch('arvados.events.subscribe')
+    def test_custom(self, mock_subscribe):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--mount-tmp', 'foo',
+            '--mount-tmp', 'bar',
+            '--mount-home', 'my_home',
+            '--foreground', self.mntdir])
+        self.assertEqual(args.mode, None)
+        self.mnt = arvados_fuse.command.Mount(args)
+        self.check_ent_type(arvados_fuse.Directory)
+        self.check_ent_type(arvados_fuse.TmpCollectionDirectory, 'foo')
+        self.check_ent_type(arvados_fuse.TmpCollectionDirectory, 'bar')
+        e = self.check_ent_type(arvados_fuse.ProjectDirectory, 'my_home')
+        self.assertEqual(e.project_object['uuid'],
+                         run_test_server.fixture('users')['active']['uuid'])
+        self.assertEqual(True, self.mnt.listen_for_events)
+        with self.mnt:
+            pass
+        self.assertEqual(1, mock_subscribe.call_count)
+
+    @noexit
+    @mock.patch('arvados.events.subscribe')
+    def test_custom_no_listen(self, mock_subscribe):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--mount-by-pdh', 'pdh',
+            '--mount-tmp', 'foo',
+            '--mount-tmp', 'bar',
+            '--foreground', self.mntdir])
+        self.mnt = arvados_fuse.command.Mount(args)
+        self.assertEqual(False, self.mnt.listen_for_events)
+        with self.mnt:
+            pass
+        self.assertEqual(0, mock_subscribe.call_count)
+
+    def test_custom_unsupported_layouts(self):
+        for name in ['.', '..', '', 'foo/bar', '/foo']:
+            with nostderr():
+                with self.assertRaises(SystemExit):
+                    args = arvados_fuse.command.ArgumentParser().parse_args([
+                        '--mount-tmp', name,
+                        '--foreground', self.mntdir])
+                    arvados_fuse.command.Mount(args)
+
+class MountErrorTest(unittest.TestCase):
+    def setUp(self):
+        self.mntdir = tempfile.mkdtemp()
+        run_test_server.run()
+        run_test_server.authorize_with("active")
+        self.logger = logging.getLogger("null")
+        self.logger.setLevel(logging.CRITICAL+1)
+
+    def tearDown(self):
+        if os.path.exists(self.mntdir):
+            # If the directory was not unmounted, this will raise an exception.
+            os.rmdir(self.mntdir)
+        run_test_server.reset()
+
+    def test_no_token(self):
+        del arvados.config._settings["ARVADOS_API_TOKEN"]
+        arvados.config._settings = {}
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
+
+    def test_no_host(self):
+        del arvados.config._settings["ARVADOS_API_HOST"]
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
+
+    def test_bogus_host(self):
+        arvados.config._settings["ARVADOS_API_HOST"] = "100::"
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
+
+    def test_bogus_token(self):
+        arvados.config._settings["ARVADOS_API_TOKEN"] = "zzzzzzzzzzzzz"
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
+
+    def test_bogus_mount_dir(self):
+        # All FUSE errors in llfuse.init() are raised as RuntimeError
+        # An easy error to trigger is to supply a nonexistent mount point,
+        # so test that one.
+        #
+        # Other possible errors that also raise RuntimeError (but are much
+        # harder to test automatically because they depend on operating
+        # system configuration):
+        #
+        # The user doesn't have permission to use FUSE
+        # The user specified --allow-other but user_allow_other is not set
+        # in /etc/fuse.conf
+        os.rmdir(self.mntdir)
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
+
+    def test_unreadable_collection(self):
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([
+                "--collection", "zzzzz-4zz18-zzzzzzzzzzzzzzz", self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
+
+    def test_unreadable_project(self):
+        with self.assertRaises(SystemExit) as ex:
+            args = arvados_fuse.command.ArgumentParser().parse_args([
+                "--project", "zzzzz-j7d0g-zzzzzzzzzzzzzzz", self.mntdir])
+            arvados_fuse.command.Mount(args, logger=self.logger).run()
+        self.assertEqual(1, ex.exception.code)
diff --git a/services/fuse/tests/test_crunchstat.py b/services/fuse/tests/test_crunchstat.py
new file mode 100644 (file)
index 0000000..f3bf211
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import subprocess
+
+from integration_test import IntegrationTest
+
+
+class CrunchstatTest(IntegrationTest):
+    def test_crunchstat(self):
+        output = subprocess.check_output(
+            ['./bin/arv-mount',
+             '--crunchstat-interval', '1',
+             self.mnt,
+             '--exec', 'echo', 'ok'])
+        self.assertEqual("ok\n", output)
diff --git a/services/fuse/tests/test_exec.py b/services/fuse/tests/test_exec.py
new file mode 100644 (file)
index 0000000..ab6e131
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados_fuse.command
+import json
+import multiprocessing
+import os
+import run_test_server
+import tempfile
+import unittest
+
+from .integration_test import workerPool
+
+try:
+    from shlex import quote
+except:
+    from pipes import quote
+
+def try_exec(mnt, cmd):
+    try:
+        arvados_fuse.command.Mount(
+            arvados_fuse.command.ArgumentParser().parse_args([
+                '--read-write',
+                '--mount-tmp=zzz',
+                '--unmount-timeout=0.1',
+                mnt,
+                '--exec'] + cmd)).run()
+    except SystemExit:
+        pass
+    else:
+        raise AssertionError('should have exited')
+
+
+class ExecMode(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        run_test_server.run()
+        run_test_server.run_keep(enforce_permissions=True, num_servers=2)
+        run_test_server.authorize_with('active')
+
+    @classmethod
+    def tearDownClass(cls):
+        run_test_server.stop_keep(num_servers=2)
+
+    def setUp(self):
+        self.mnt = tempfile.mkdtemp()
+        _, self.okfile = tempfile.mkstemp()
+
+    def tearDown(self):
+        os.rmdir(self.mnt)
+        os.unlink(self.okfile)
+
+    def test_exec(self):
+        workerPool().apply(try_exec, (self.mnt, [
+            'sh', '-c',
+            'echo -n foo >{}; cp {} {}'.format(
+                quote(os.path.join(self.mnt, 'zzz', 'foo.txt')),
+                quote(os.path.join(self.mnt, 'zzz', '.arvados#collection')),
+                quote(os.path.join(self.okfile)))]))
+        self.assertRegexpMatches(
+            json.load(open(self.okfile))['manifest_text'],
+            r' 0:3:foo.txt\n')
diff --git a/services/fuse/tests/test_inodes.py b/services/fuse/tests/test_inodes.py
new file mode 100644 (file)
index 0000000..07e6036
--- /dev/null
@@ -0,0 +1,152 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados_fuse
+import mock
+import unittest
+import llfuse
+import logging
+
+class InodeTests(unittest.TestCase):
+    def test_inodes_basic(self):
+        cache = arvados_fuse.InodeCache(1000, 4)
+        inodes = arvados_fuse.Inodes(cache)
+
+        # Check that ent1 gets added to inodes
+        ent1 = mock.MagicMock()
+        ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
+        ent1.persisted.return_value = True
+        ent1.objsize.return_value = 500
+        inodes.add_entry(ent1)
+        self.assertIn(ent1.inode, inodes)
+        self.assertIs(inodes[ent1.inode], ent1)
+        self.assertEqual(500, cache.total())
+
+    def test_inodes_not_persisted(self):
+        cache = arvados_fuse.InodeCache(1000, 4)
+        inodes = arvados_fuse.Inodes(cache)
+
+        ent1 = mock.MagicMock()
+        ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
+        ent1.persisted.return_value = True
+        ent1.objsize.return_value = 500
+        inodes.add_entry(ent1)
+
+        # ent2 is not persisted, so it doesn't
+        # affect the cache total
+        ent2 = mock.MagicMock()
+        ent2.in_use.return_value = False
+        ent2.has_ref.return_value = False
+        ent2.persisted.return_value = False
+        ent2.objsize.return_value = 600
+        inodes.add_entry(ent2)
+        self.assertEqual(500, cache.total())
+
+    def test_inode_cleared(self):
+        cache = arvados_fuse.InodeCache(1000, 4)
+        inodes = arvados_fuse.Inodes(cache)
+
+        # Check that ent1 gets added to inodes
+        ent1 = mock.MagicMock()
+        ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
+        ent1.persisted.return_value = True
+        ent1.objsize.return_value = 500
+        inodes.add_entry(ent1)
+
+        # ent3 is persisted, adding it should cause ent1 to get cleared
+        ent3 = mock.MagicMock()
+        ent3.in_use.return_value = False
+        ent3.has_ref.return_value = False
+        ent3.persisted.return_value = True
+        ent3.objsize.return_value = 600
+
+        self.assertFalse(ent1.clear.called)
+        inodes.add_entry(ent3)
+
+        # Won't clear anything because min_entries = 4
+        self.assertEqual(2, len(cache._entries))
+        self.assertFalse(ent1.clear.called)
+        self.assertEqual(1100, cache.total())
+
+        # Change min_entries
+        cache.min_entries = 1
+        cache.cap_cache()
+        self.assertEqual(600, cache.total())
+        self.assertTrue(ent1.clear.called)
+
+        # Touching ent1 should cause ent3 to get cleared
+        self.assertFalse(ent3.clear.called)
+        cache.touch(ent1)
+        self.assertTrue(ent3.clear.called)
+        self.assertEqual(500, cache.total())
+
+    def test_clear_in_use(self):
+        cache = arvados_fuse.InodeCache(1000, 4)
+        inodes = arvados_fuse.Inodes(cache)
+
+        ent1 = mock.MagicMock()
+        ent1.in_use.return_value = True
+        ent1.has_ref.return_value = False
+        ent1.persisted.return_value = True
+        ent1.objsize.return_value = 500
+        inodes.add_entry(ent1)
+
+        ent3 = mock.MagicMock()
+        ent3.in_use.return_value = False
+        ent3.has_ref.return_value = True
+        ent3.persisted.return_value = True
+        ent3.objsize.return_value = 600
+        inodes.add_entry(ent3)
+
+        cache.min_entries = 1
+
+        # ent1, ent3 in use, has ref, can't be cleared
+        ent1.clear.called = False
+        ent3.clear.called = False
+        self.assertFalse(ent1.clear.called)
+        self.assertFalse(ent3.clear.called)
+        cache.touch(ent3)
+        self.assertFalse(ent1.clear.called)
+        self.assertFalse(ent3.clear.called)
+        self.assertFalse(ent3.kernel_invalidate.called)
+        self.assertEqual(1100, cache.total())
+
+        # ent1 still in use, ent3 doesn't have ref,
+        # so ent3 gets cleared
+        ent3.has_ref.return_value = False
+        ent1.clear.called = False
+        ent3.clear.called = False
+        cache.touch(ent3)
+        self.assertFalse(ent1.clear.called)
+        self.assertTrue(ent3.clear.called)
+        self.assertEqual(500, cache.total())
+
+    def test_delete(self):
+        cache = arvados_fuse.InodeCache(1000, 4)
+        inodes = arvados_fuse.Inodes(cache)
+
+        ent1 = mock.MagicMock()
+        ent1.in_use.return_value = False
+        ent1.has_ref.return_value = False
+        ent1.persisted.return_value = True
+        ent1.objsize.return_value = 500
+        inodes.add_entry(ent1)
+
+        ent3 = mock.MagicMock()
+        ent3.in_use.return_value = False
+        ent3.has_ref.return_value = False
+        ent3.persisted.return_value = True
+        ent3.objsize.return_value = 600
+
+        # Delete ent1
+        self.assertEqual(500, cache.total())
+        ent1.ref_count = 0
+        with llfuse.lock:
+            inodes.del_entry(ent1)
+        self.assertEqual(0, cache.total())
+        cache.touch(ent3)
+        self.assertEqual(600, cache.total())
diff --git a/services/fuse/tests/test_mount.py b/services/fuse/tests/test_mount.py
new file mode 100644 (file)
index 0000000..d25ab71
--- /dev/null
@@ -0,0 +1,1187 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import json
+import llfuse
+import logging
+import mock
+import os
+import subprocess
+import time
+import unittest
+
+import arvados
+import arvados_fuse as fuse
+import run_test_server
+
+from mount_test_base import MountTestBase
+
+logger = logging.getLogger('arvados.arv-mount')
+
+
+class AssertWithTimeout(object):
+    """Allow some time for an assertion to pass."""
+
+    def __init__(self, timeout=0):
+        self.timeout = timeout
+
+    def __iter__(self):
+        self.deadline = time.time() + self.timeout
+        self.done = False
+        return self
+
+    def next(self):
+        if self.done:
+            raise StopIteration
+        return self.attempt
+
+    def attempt(self, fn, *args, **kwargs):
+        try:
+            fn(*args, **kwargs)
+        except AssertionError:
+            if time.time() > self.deadline:
+                raise
+            time.sleep(0.1)
+        else:
+            self.done = True
+
+
+class FuseMountTest(MountTestBase):
+    def setUp(self):
+        super(FuseMountTest, self).setUp()
+
+        cw = arvados.CollectionWriter()
+
+        cw.start_new_file('thing1.txt')
+        cw.write("data 1")
+        cw.start_new_file('thing2.txt')
+        cw.write("data 2")
+
+        cw.start_new_stream('dir1')
+        cw.start_new_file('thing3.txt')
+        cw.write("data 3")
+        cw.start_new_file('thing4.txt')
+        cw.write("data 4")
+
+        cw.start_new_stream('dir2')
+        cw.start_new_file('thing5.txt')
+        cw.write("data 5")
+        cw.start_new_file('thing6.txt')
+        cw.write("data 6")
+
+        cw.start_new_stream('dir2/dir3')
+        cw.start_new_file('thing7.txt')
+        cw.write("data 7")
+
+        cw.start_new_file('thing8.txt')
+        cw.write("data 8")
+
+        cw.start_new_stream('edgecases')
+        for f in ":/.../-/*/ ".split("/"):
+            cw.start_new_file(f)
+            cw.write('x')
+
+        for f in ":/.../-/*/ ".split("/"):
+            cw.start_new_stream('edgecases/dirs/' + f)
+            cw.start_new_file('x/x')
+            cw.write('x')
+
+        self.testcollection = cw.finish()
+        self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
+
+    def runTest(self):
+        self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection)
+
+        self.assertDirContents(None, ['thing1.txt', 'thing2.txt',
+                                      'edgecases', 'dir1', 'dir2'])
+        self.assertDirContents('dir1', ['thing3.txt', 'thing4.txt'])
+        self.assertDirContents('dir2', ['thing5.txt', 'thing6.txt', 'dir3'])
+        self.assertDirContents('dir2/dir3', ['thing7.txt', 'thing8.txt'])
+        self.assertDirContents('edgecases',
+                               "dirs/:/.../-/*/ ".split("/"))
+        self.assertDirContents('edgecases/dirs',
+                               ":/.../-/*/ ".split("/"))
+
+        files = {'thing1.txt': 'data 1',
+                 'thing2.txt': 'data 2',
+                 'dir1/thing3.txt': 'data 3',
+                 'dir1/thing4.txt': 'data 4',
+                 'dir2/thing5.txt': 'data 5',
+                 'dir2/thing6.txt': 'data 6',
+                 'dir2/dir3/thing7.txt': 'data 7',
+                 'dir2/dir3/thing8.txt': 'data 8'}
+
+        for k, v in files.items():
+            with open(os.path.join(self.mounttmp, k)) as f:
+                self.assertEqual(v, f.read())
+
+
+class FuseMagicTest(MountTestBase):
+    def setUp(self, api=None):
+        super(FuseMagicTest, self).setUp(api=api)
+
+        self.test_project = run_test_server.fixture('groups')['aproject']['uuid']
+        self.non_project_group = run_test_server.fixture('groups')['public']['uuid']
+        self.collection_in_test_project = run_test_server.fixture('collections')['foo_collection_in_aproject']['name']
+
+        cw = arvados.CollectionWriter()
+
+        cw.start_new_file('thing1.txt')
+        cw.write("data 1")
+
+        self.testcollection = cw.finish()
+        self.test_manifest = cw.manifest_text()
+        coll = self.api.collections().create(body={"manifest_text":self.test_manifest}).execute()
+        self.test_manifest_pdh = coll['portable_data_hash']
+
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+
+        mount_ls = llfuse.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or
+                             arvados.util.uuid_pattern.match(fn)
+                             for fn in mount_ls),
+                         "new FUSE MagicDirectory has no collections or projects")
+        self.assertDirContents(self.testcollection, ['thing1.txt'])
+        self.assertDirContents(os.path.join('by_id', self.testcollection),
+                               ['thing1.txt'])
+        self.assertIn(self.collection_in_test_project,
+                      llfuse.listdir(os.path.join(self.mounttmp, self.test_project)))
+        self.assertIn(self.collection_in_test_project,
+                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.test_project)))
+
+        mount_ls = llfuse.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertIn(self.testcollection, mount_ls)
+        self.assertIn(self.testcollection,
+                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
+        self.assertIn(self.test_project, mount_ls)
+        self.assertIn(self.test_project,
+                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
+
+        with self.assertRaises(OSError):
+            llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.non_project_group))
+
+        files = {}
+        files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
+
+        for k, v in files.items():
+            with open(os.path.join(self.mounttmp, k)) as f:
+                self.assertEqual(v, f.read())
+
+
+class FuseTagsTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.TagsDirectory)
+
+        d1 = llfuse.listdir(self.mounttmp)
+        d1.sort()
+        self.assertEqual(['foo_tag'], d1)
+
+        d2 = llfuse.listdir(os.path.join(self.mounttmp, 'foo_tag'))
+        d2.sort()
+        self.assertEqual(['zzzzz-4zz18-fy296fx3hot09f7'], d2)
+
+        d3 = llfuse.listdir(os.path.join(self.mounttmp, 'foo_tag', 'zzzzz-4zz18-fy296fx3hot09f7'))
+        d3.sort()
+        self.assertEqual(['foo'], d3)
+
+
+class FuseTagsUpdateTest(MountTestBase):
+    def tag_collection(self, coll_uuid, tag_name):
+        return self.api.links().create(
+            body={'link': {'head_uuid': coll_uuid,
+                           'link_class': 'tag',
+                           'name': tag_name,
+        }}).execute()
+
+    def runTest(self):
+        self.make_mount(fuse.TagsDirectory, poll_time=1)
+
+        self.assertIn('foo_tag', llfuse.listdir(self.mounttmp))
+
+        bar_uuid = run_test_server.fixture('collections')['bar_file']['uuid']
+        self.tag_collection(bar_uuid, 'fuse_test_tag')
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertIn, 'fuse_test_tag', llfuse.listdir(self.mounttmp))
+        self.assertDirContents('fuse_test_tag', [bar_uuid])
+
+        baz_uuid = run_test_server.fixture('collections')['baz_file']['uuid']
+        l = self.tag_collection(baz_uuid, 'fuse_test_tag')
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertDirContents, 'fuse_test_tag', [bar_uuid, baz_uuid])
+
+        self.api.links().delete(uuid=l['uuid']).execute()
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertDirContents, 'fuse_test_tag', [bar_uuid])
+
+
+def fuseSharedTestHelper(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            # Double check that we can open and read objects in this folder as a file,
+            # and that its contents are what we expect.
+            baz_path = os.path.join(
+                mounttmp,
+                'FUSE User',
+                'FUSE Test Project',
+                'collection in FUSE project',
+                'baz')
+            with open(baz_path) as f:
+                self.assertEqual("baz", f.read())
+
+            # check mtime on collection
+            st = os.stat(baz_path)
+            try:
+                mtime = st.st_mtime_ns / 1000000000
+            except AttributeError:
+                mtime = st.st_mtime
+            self.assertEqual(mtime, 1391448174)
+
+            # shared_dirs is a list of the directories exposed
+            # by fuse.SharedDirectory (i.e. any object visible
+            # to the current user)
+            shared_dirs = llfuse.listdir(mounttmp)
+            shared_dirs.sort()
+            self.assertIn('FUSE User', shared_dirs)
+
+            # fuse_user_objs is a list of the objects owned by the FUSE
+            # test user (which present as files in the 'FUSE User'
+            # directory)
+            fuse_user_objs = llfuse.listdir(os.path.join(mounttmp, 'FUSE User'))
+            fuse_user_objs.sort()
+            self.assertEqual(['FUSE Test Project',                    # project owned by user
+                              'collection #1 owned by FUSE',          # collection owned by user
+                              'collection #2 owned by FUSE'          # collection owned by user
+                          ], fuse_user_objs)
+
+            # test_proj_files is a list of the files in the FUSE Test Project.
+            test_proj_files = llfuse.listdir(os.path.join(mounttmp, 'FUSE User', 'FUSE Test Project'))
+            test_proj_files.sort()
+            self.assertEqual(['collection in FUSE project'
+                          ], test_proj_files)
+
+
+    Test().runTest()
+
+class FuseSharedTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.SharedDirectory,
+                        exclude=self.api.users().current().execute()['uuid'])
+        keep = arvados.keep.KeepClient()
+        keep.put("baz")
+
+        self.pool.apply(fuseSharedTestHelper, (self.mounttmp,))
+
+
+class FuseHomeTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.ProjectDirectory,
+                        project_object=self.api.users().current().execute())
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertIn('Unrestricted public data', d1)
+
+        d2 = llfuse.listdir(os.path.join(self.mounttmp, 'Unrestricted public data'))
+        public_project = run_test_server.fixture('groups')[
+            'anonymously_accessible_project']
+        found_in = 0
+        found_not_in = 0
+        for name, item in run_test_server.fixture('collections').iteritems():
+            if 'name' not in item:
+                pass
+            elif item['owner_uuid'] == public_project['uuid']:
+                self.assertIn(item['name'], d2)
+                found_in += 1
+            else:
+                # Artificial assumption here: there is no public
+                # collection fixture with the same name as a
+                # non-public collection.
+                self.assertNotIn(item['name'], d2)
+                found_not_in += 1
+        self.assertNotEqual(0, found_in)
+        self.assertNotEqual(0, found_not_in)
+
+        d3 = llfuse.listdir(os.path.join(self.mounttmp, 'Unrestricted public data', 'GNU General Public License, version 3'))
+        self.assertEqual(["GNU_General_Public_License,_version_3.pdf"], d3)
+
+
+def fuseModifyFileTestHelperReadStartContents(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            d1 = llfuse.listdir(mounttmp)
+            self.assertEqual(["file1.txt"], d1)
+            with open(os.path.join(mounttmp, "file1.txt")) as f:
+                self.assertEqual("blub", f.read())
+    Test().runTest()
+
+def fuseModifyFileTestHelperReadEndContents(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            d1 = llfuse.listdir(mounttmp)
+            self.assertEqual(["file1.txt"], d1)
+            with open(os.path.join(mounttmp, "file1.txt")) as f:
+                self.assertEqual("plnp", f.read())
+    Test().runTest()
+
+class FuseModifyFileTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        with collection.open("file1.txt", "w") as f:
+            f.write("blub")
+
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+
+        self.pool.apply(fuseModifyFileTestHelperReadStartContents, (self.mounttmp,))
+
+        with collection.open("file1.txt", "w") as f:
+            f.write("plnp")
+
+        self.pool.apply(fuseModifyFileTestHelperReadEndContents, (self.mounttmp,))
+
+
+class FuseAddFileToCollectionTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        with collection.open("file1.txt", "w") as f:
+            f.write("blub")
+
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertEqual(["file1.txt"], d1)
+
+        with collection.open("file2.txt", "w") as f:
+            f.write("plnp")
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertEqual(["file1.txt", "file2.txt"], sorted(d1))
+
+
+class FuseRemoveFileFromCollectionTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        with collection.open("file1.txt", "w") as f:
+            f.write("blub")
+
+        with collection.open("file2.txt", "w") as f:
+            f.write("plnp")
+
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertEqual(["file1.txt", "file2.txt"], sorted(d1))
+
+        collection.remove("file2.txt")
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertEqual(["file1.txt"], d1)
+
+
+def fuseCreateFileTestHelper(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "file1.txt"), "w") as f:
+                pass
+    Test().runTest()
+
+class FuseCreateFileTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertEqual(collection2["manifest_text"], "")
+
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        self.assertNotIn("file1.txt", collection)
+
+        self.pool.apply(fuseCreateFileTestHelper, (self.mounttmp,))
+
+        self.assertIn("file1.txt", collection)
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertEqual(["file1.txt"], d1)
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\. d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:file1\.txt$')
+
+
+def fuseWriteFileTestHelperWriteFile(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "file1.txt"), "w") as f:
+                f.write("Hello world!")
+    Test().runTest()
+
+def fuseWriteFileTestHelperReadFile(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "file1.txt"), "r") as f:
+                self.assertEqual(f.read(), "Hello world!")
+    Test().runTest()
+
+class FuseWriteFileTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        self.assertNotIn("file1.txt", collection)
+
+        self.assertEqual(0, self.operations.write_counter.get())
+        self.pool.apply(fuseWriteFileTestHelperWriteFile, (self.mounttmp,))
+        self.assertEqual(12, self.operations.write_counter.get())
+
+        with collection.open("file1.txt") as f:
+            self.assertEqual(f.read(), "Hello world!")
+
+        self.assertEqual(0, self.operations.read_counter.get())
+        self.pool.apply(fuseWriteFileTestHelperReadFile, (self.mounttmp,))
+        self.assertEqual(12, self.operations.read_counter.get())
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+
+
+def fuseUpdateFileTestHelper(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+            with open(os.path.join(mounttmp, "file1.txt"), "r+") as f:
+                fr = f.read()
+                self.assertEqual(fr, "Hello world!")
+                f.seek(0)
+                f.write("Hola mundo!")
+                f.seek(0)
+                fr = f.read()
+                self.assertEqual(fr, "Hola mundo!!")
+
+            with open(os.path.join(mounttmp, "file1.txt"), "r") as f:
+                self.assertEqual(f.read(), "Hola mundo!!")
+
+    Test().runTest()
+
+class FuseUpdateFileTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        # See note in MountTestBase.setUp
+        self.pool.apply(fuseUpdateFileTestHelper, (self.mounttmp,))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\. daaef200ebb921e011e3ae922dd3266b\+11\+A\S+ 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:11:file1\.txt 22:1:file1\.txt$')
+
+
+def fuseMkdirTestHelper(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with self.assertRaises(IOError):
+                with open(os.path.join(mounttmp, "testdir", "file1.txt"), "w") as f:
+                    f.write("Hello world!")
+
+            os.mkdir(os.path.join(mounttmp, "testdir"))
+
+            with self.assertRaises(OSError):
+                os.mkdir(os.path.join(mounttmp, "testdir"))
+
+            d1 = llfuse.listdir(mounttmp)
+            self.assertEqual(["testdir"], d1)
+
+            with open(os.path.join(mounttmp, "testdir", "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+            d1 = llfuse.listdir(os.path.join(mounttmp, "testdir"))
+            self.assertEqual(["file1.txt"], d1)
+
+    Test().runTest()
+
+class FuseMkdirTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        self.pool.apply(fuseMkdirTestHelper, (self.mounttmp,))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+
+
+def fuseRmTestHelperWriteFile(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.mkdir(os.path.join(mounttmp, "testdir"))
+
+            with open(os.path.join(mounttmp, "testdir", "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+    Test().runTest()
+
+def fuseRmTestHelperDeleteFile(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            # Can't delete because it's not empty
+            with self.assertRaises(OSError):
+                os.rmdir(os.path.join(mounttmp, "testdir"))
+
+            d1 = llfuse.listdir(os.path.join(mounttmp, "testdir"))
+            self.assertEqual(["file1.txt"], d1)
+
+            # Delete file
+            os.remove(os.path.join(mounttmp, "testdir", "file1.txt"))
+
+            # Make sure it's empty
+            d1 = llfuse.listdir(os.path.join(mounttmp, "testdir"))
+            self.assertEqual([], d1)
+
+            # Try to delete it again
+            with self.assertRaises(OSError):
+                os.remove(os.path.join(mounttmp, "testdir", "file1.txt"))
+
+    Test().runTest()
+
+def fuseRmTestHelperRmdir(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            # Should be able to delete now that it is empty
+            os.rmdir(os.path.join(mounttmp, "testdir"))
+
+            # Make sure it's empty
+            d1 = llfuse.listdir(os.path.join(mounttmp))
+            self.assertEqual([], d1)
+
+            # Try to delete it again
+            with self.assertRaises(OSError):
+                os.rmdir(os.path.join(mounttmp, "testdir"))
+
+    Test().runTest()
+
+class FuseRmTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        self.pool.apply(fuseRmTestHelperWriteFile, (self.mounttmp,))
+
+        # Starting manifest
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+        self.pool.apply(fuseRmTestHelperDeleteFile, (self.mounttmp,))
+
+        # Empty directories are represented by an empty file named "."
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+                                 r'./testdir d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:\\056\n')
+
+        self.pool.apply(fuseRmTestHelperRmdir, (self.mounttmp,))
+
+        # manifest should be empty now.
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertEqual(collection2["manifest_text"], "")
+
+
+def fuseMvFileTestHelperWriteFile(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.mkdir(os.path.join(mounttmp, "testdir"))
+
+            with open(os.path.join(mounttmp, "testdir", "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+    Test().runTest()
+
+def fuseMvFileTestHelperMoveFile(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            d1 = llfuse.listdir(os.path.join(mounttmp))
+            self.assertEqual(["testdir"], d1)
+            d1 = llfuse.listdir(os.path.join(mounttmp, "testdir"))
+            self.assertEqual(["file1.txt"], d1)
+
+            os.rename(os.path.join(mounttmp, "testdir", "file1.txt"), os.path.join(mounttmp, "file1.txt"))
+
+            d1 = llfuse.listdir(os.path.join(mounttmp))
+            self.assertEqual(["file1.txt", "testdir"], sorted(d1))
+            d1 = llfuse.listdir(os.path.join(mounttmp, "testdir"))
+            self.assertEqual([], d1)
+
+    Test().runTest()
+
+class FuseMvFileTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        self.pool.apply(fuseMvFileTestHelperWriteFile, (self.mounttmp,))
+
+        # Starting manifest
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+
+        self.pool.apply(fuseMvFileTestHelperMoveFile, (self.mounttmp,))
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt\n\./testdir d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:\\056\n')
+
+
+def fuseRenameTestHelper(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.mkdir(os.path.join(mounttmp, "testdir"))
+
+            with open(os.path.join(mounttmp, "testdir", "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+    Test().runTest()
+
+class FuseRenameTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+        self.assertTrue(m.writable())
+
+        self.pool.apply(fuseRenameTestHelper, (self.mounttmp,))
+
+        # Starting manifest
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+
+        d1 = llfuse.listdir(os.path.join(self.mounttmp))
+        self.assertEqual(["testdir"], d1)
+        d1 = llfuse.listdir(os.path.join(self.mounttmp, "testdir"))
+        self.assertEqual(["file1.txt"], d1)
+
+        os.rename(os.path.join(self.mounttmp, "testdir"), os.path.join(self.mounttmp, "testdir2"))
+
+        d1 = llfuse.listdir(os.path.join(self.mounttmp))
+        self.assertEqual(["testdir2"], sorted(d1))
+        d1 = llfuse.listdir(os.path.join(self.mounttmp, "testdir2"))
+        self.assertEqual(["file1.txt"], d1)
+
+        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+        self.assertRegexpMatches(collection2["manifest_text"],
+            r'\./testdir2 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+
+
+class FuseUpdateFromEventTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+
+        self.operations.listen_for_events()
+
+        d1 = llfuse.listdir(os.path.join(self.mounttmp))
+        self.assertEqual([], sorted(d1))
+
+        with arvados.collection.Collection(collection.manifest_locator(), api_client=self.api) as collection2:
+            with collection2.open("file1.txt", "w") as f:
+                f.write("foo")
+
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertEqual, ["file1.txt"], llfuse.listdir(os.path.join(self.mounttmp)))
+
+
+class FuseDeleteProjectEventTest(MountTestBase):
+    def runTest(self):
+
+        aproject = self.api.groups().create(body={
+            "name": "aproject",
+            "group_class": "project"
+        }).execute()
+
+        bproject = self.api.groups().create(body={
+            "name": "bproject",
+            "group_class": "project",
+            "owner_uuid": aproject["uuid"]
+        }).execute()
+
+        self.make_mount(fuse.ProjectDirectory,
+                        project_object=self.api.users().current().execute())
+
+        self.operations.listen_for_events()
+
+        d1 = llfuse.listdir(os.path.join(self.mounttmp, "aproject"))
+        self.assertEqual(["bproject"], sorted(d1))
+
+        self.api.groups().delete(uuid=bproject["uuid"]).execute()
+
+        for attempt in AssertWithTimeout(10):
+            attempt(self.assertEqual, [], llfuse.listdir(os.path.join(self.mounttmp, "aproject")))
+
+
+def fuseFileConflictTestHelper(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "file1.txt"), "w") as f:
+                f.write("bar")
+
+            d1 = sorted(llfuse.listdir(os.path.join(mounttmp)))
+            self.assertEqual(len(d1), 2)
+
+            with open(os.path.join(mounttmp, "file1.txt"), "r") as f:
+                self.assertEqual(f.read(), "bar")
+
+            self.assertRegexpMatches(d1[1],
+                r'file1\.txt~\d\d\d\d\d\d\d\d-\d\d\d\d\d\d~conflict~')
+
+            with open(os.path.join(mounttmp, d1[1]), "r") as f:
+                self.assertEqual(f.read(), "foo")
+
+    Test().runTest()
+
+class FuseFileConflictTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+
+        d1 = llfuse.listdir(os.path.join(self.mounttmp))
+        self.assertEqual([], sorted(d1))
+
+        with arvados.collection.Collection(collection.manifest_locator(), api_client=self.api) as collection2:
+            with collection2.open("file1.txt", "w") as f:
+                f.write("foo")
+
+        # See note in MountTestBase.setUp
+        self.pool.apply(fuseFileConflictTestHelper, (self.mounttmp,))
+
+
+def fuseUnlinkOpenFileTest(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "file1.txt"), "w+") as f:
+                f.write("foo")
+
+                d1 = llfuse.listdir(os.path.join(mounttmp))
+                self.assertEqual(["file1.txt"], sorted(d1))
+
+                os.remove(os.path.join(mounttmp, "file1.txt"))
+
+                d1 = llfuse.listdir(os.path.join(mounttmp))
+                self.assertEqual([], sorted(d1))
+
+                f.seek(0)
+                self.assertEqual(f.read(), "foo")
+                f.write("bar")
+
+                f.seek(0)
+                self.assertEqual(f.read(), "foobar")
+
+    Test().runTest()
+
+class FuseUnlinkOpenFileTest(MountTestBase):
+    def runTest(self):
+        collection = arvados.collection.Collection(api_client=self.api)
+        collection.save_new()
+
+        m = self.make_mount(fuse.CollectionDirectory)
+        with llfuse.lock:
+            m.new_collection(collection.api_response(), collection)
+
+        # See note in MountTestBase.setUp
+        self.pool.apply(fuseUnlinkOpenFileTest, (self.mounttmp,))
+
+        self.assertEqual(collection.manifest_text(), "")
+
+
+def fuseMvFileBetweenCollectionsTest1(mounttmp, uuid1, uuid2):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, uuid1, "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+            d1 = os.listdir(os.path.join(mounttmp, uuid1))
+            self.assertEqual(["file1.txt"], sorted(d1))
+            d1 = os.listdir(os.path.join(mounttmp, uuid2))
+            self.assertEqual([], sorted(d1))
+
+    Test().runTest()
+
+def fuseMvFileBetweenCollectionsTest2(mounttmp, uuid1, uuid2):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.rename(os.path.join(mounttmp, uuid1, "file1.txt"), os.path.join(mounttmp, uuid2, "file2.txt"))
+
+            d1 = os.listdir(os.path.join(mounttmp, uuid1))
+            self.assertEqual([], sorted(d1))
+            d1 = os.listdir(os.path.join(mounttmp, uuid2))
+            self.assertEqual(["file2.txt"], sorted(d1))
+
+    Test().runTest()
+
+class FuseMvFileBetweenCollectionsTest(MountTestBase):
+    def runTest(self):
+        collection1 = arvados.collection.Collection(api_client=self.api)
+        collection1.save_new()
+
+        collection2 = arvados.collection.Collection(api_client=self.api)
+        collection2.save_new()
+
+        m = self.make_mount(fuse.MagicDirectory)
+
+        # See note in MountTestBase.setUp
+        self.pool.apply(fuseMvFileBetweenCollectionsTest1, (self.mounttmp,
+                                                  collection1.manifest_locator(),
+                                                  collection2.manifest_locator()))
+
+        collection1.update()
+        collection2.update()
+
+        self.assertRegexpMatches(collection1.manifest_text(), r"\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
+        self.assertEqual(collection2.manifest_text(), "")
+
+        self.pool.apply(fuseMvFileBetweenCollectionsTest2, (self.mounttmp,
+                                                  collection1.manifest_locator(),
+                                                  collection2.manifest_locator()))
+
+        collection1.update()
+        collection2.update()
+
+        self.assertEqual(collection1.manifest_text(), "")
+        self.assertRegexpMatches(collection2.manifest_text(), r"\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file2\.txt$")
+
+        collection1.stop_threads()
+        collection2.stop_threads()
+
+
+def fuseMvDirBetweenCollectionsTest1(mounttmp, uuid1, uuid2):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.mkdir(os.path.join(mounttmp, uuid1, "testdir"))
+            with open(os.path.join(mounttmp, uuid1, "testdir", "file1.txt"), "w") as f:
+                f.write("Hello world!")
+
+            d1 = os.listdir(os.path.join(mounttmp, uuid1))
+            self.assertEqual(["testdir"], sorted(d1))
+            d1 = os.listdir(os.path.join(mounttmp, uuid1, "testdir"))
+            self.assertEqual(["file1.txt"], sorted(d1))
+
+            d1 = os.listdir(os.path.join(mounttmp, uuid2))
+            self.assertEqual([], sorted(d1))
+
+    Test().runTest()
+
+
+def fuseMvDirBetweenCollectionsTest2(mounttmp, uuid1, uuid2):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.rename(os.path.join(mounttmp, uuid1, "testdir"), os.path.join(mounttmp, uuid2, "testdir2"))
+
+            d1 = os.listdir(os.path.join(mounttmp, uuid1))
+            self.assertEqual([], sorted(d1))
+
+            d1 = os.listdir(os.path.join(mounttmp, uuid2))
+            self.assertEqual(["testdir2"], sorted(d1))
+            d1 = os.listdir(os.path.join(mounttmp, uuid2, "testdir2"))
+            self.assertEqual(["file1.txt"], sorted(d1))
+
+            with open(os.path.join(mounttmp, uuid2, "testdir2", "file1.txt"), "r") as f:
+                self.assertEqual(f.read(), "Hello world!")
+
+    Test().runTest()
+
+class FuseMvDirBetweenCollectionsTest(MountTestBase):
+    def runTest(self):
+        collection1 = arvados.collection.Collection(api_client=self.api)
+        collection1.save_new()
+
+        collection2 = arvados.collection.Collection(api_client=self.api)
+        collection2.save_new()
+
+        m = self.make_mount(fuse.MagicDirectory)
+
+        # See note in MountTestBase.setUp
+        self.pool.apply(fuseMvDirBetweenCollectionsTest1, (self.mounttmp,
+                                                  collection1.manifest_locator(),
+                                                  collection2.manifest_locator()))
+
+        collection1.update()
+        collection2.update()
+
+        self.assertRegexpMatches(collection1.manifest_text(), r"\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
+        self.assertEqual(collection2.manifest_text(), "")
+
+        self.pool.apply(fuseMvDirBetweenCollectionsTest2, (self.mounttmp,
+                                                  collection1.manifest_locator(),
+                                                  collection2.manifest_locator()))
+
+        collection1.update()
+        collection2.update()
+
+        self.assertEqual(collection1.manifest_text(), "")
+        self.assertRegexpMatches(collection2.manifest_text(), r"\./testdir2 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$")
+
+        collection1.stop_threads()
+        collection2.stop_threads()
+
+def fuseProjectMkdirTestHelper1(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            os.mkdir(os.path.join(mounttmp, "testcollection"))
+            with self.assertRaises(OSError):
+                os.mkdir(os.path.join(mounttmp, "testcollection"))
+    Test().runTest()
+
+def fuseProjectMkdirTestHelper2(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            with open(os.path.join(mounttmp, "testcollection", "file1.txt"), "w") as f:
+                f.write("Hello world!")
+            with self.assertRaises(OSError):
+                os.rmdir(os.path.join(mounttmp, "testcollection"))
+            os.remove(os.path.join(mounttmp, "testcollection", "file1.txt"))
+            with self.assertRaises(OSError):
+                os.remove(os.path.join(mounttmp, "testcollection"))
+            os.rmdir(os.path.join(mounttmp, "testcollection"))
+    Test().runTest()
+
+class FuseProjectMkdirRmdirTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.ProjectDirectory,
+                        project_object=self.api.users().current().execute())
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertNotIn('testcollection', d1)
+
+        self.pool.apply(fuseProjectMkdirTestHelper1, (self.mounttmp,))
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertIn('testcollection', d1)
+
+        self.pool.apply(fuseProjectMkdirTestHelper2, (self.mounttmp,))
+
+        d1 = llfuse.listdir(self.mounttmp)
+        self.assertNotIn('testcollection', d1)
+
+
+def fuseProjectMvTestHelper1(mounttmp):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            d1 = llfuse.listdir(mounttmp)
+            self.assertNotIn('testcollection', d1)
+
+            os.mkdir(os.path.join(mounttmp, "testcollection"))
+
+            d1 = llfuse.listdir(mounttmp)
+            self.assertIn('testcollection', d1)
+
+            with self.assertRaises(OSError):
+                os.rename(os.path.join(mounttmp, "testcollection"), os.path.join(mounttmp, 'Unrestricted public data'))
+
+            os.rename(os.path.join(mounttmp, "testcollection"), os.path.join(mounttmp, 'Unrestricted public data', 'testcollection'))
+
+            d1 = llfuse.listdir(mounttmp)
+            self.assertNotIn('testcollection', d1)
+
+            d1 = llfuse.listdir(os.path.join(mounttmp, 'Unrestricted public data'))
+            self.assertIn('testcollection', d1)
+
+    Test().runTest()
+
+class FuseProjectMvTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.ProjectDirectory,
+                        project_object=self.api.users().current().execute())
+
+        self.pool.apply(fuseProjectMvTestHelper1, (self.mounttmp,))
+
+
+def fuseFsyncTestHelper(mounttmp, k):
+    class Test(unittest.TestCase):
+        def runTest(self):
+            fd = os.open(os.path.join(mounttmp, k), os.O_RDONLY)
+            os.fsync(fd)
+            os.close(fd)
+
+    Test().runTest()
+
+class FuseFsyncTest(FuseMagicTest):
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+        self.pool.apply(fuseFsyncTestHelper, (self.mounttmp, self.testcollection))
+
+
+class MagicDirApiError(FuseMagicTest):
+    def setUp(self):
+        api = mock.MagicMock()
+        super(MagicDirApiError, self).setUp(api=api)
+        api.collections().get().execute.side_effect = iter([
+            Exception('API fail'),
+            {
+                "manifest_text": self.test_manifest,
+                "portable_data_hash": self.test_manifest_pdh,
+            },
+        ])
+        api.keep.get.side_effect = Exception('Keep fail')
+
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+
+        self.operations.inodes.inode_cache.cap = 1
+        self.operations.inodes.inode_cache.min_entries = 2
+
+        with self.assertRaises(OSError):
+            llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+
+        llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+
+
+class FuseUnitTest(unittest.TestCase):
+    def test_sanitize_filename(self):
+        acceptable = [
+            "foo.txt",
+            ".foo",
+            "..foo",
+            "...",
+            "foo...",
+            "foo..",
+            "foo.",
+            "-",
+            "\x01\x02\x03",
+            ]
+        unacceptable = [
+            "f\00",
+            "\00\00",
+            "/foo",
+            "foo/",
+            "//",
+            ]
+        for f in acceptable:
+            self.assertEqual(f, fuse.sanitize_filename(f))
+        for f in unacceptable:
+            self.assertNotEqual(f, fuse.sanitize_filename(f))
+            # The sanitized filename should be the same length, though.
+            self.assertEqual(len(f), len(fuse.sanitize_filename(f)))
+        # Special cases
+        self.assertEqual("_", fuse.sanitize_filename(""))
+        self.assertEqual("_", fuse.sanitize_filename("."))
+        self.assertEqual("__", fuse.sanitize_filename(".."))
+
+
+class FuseMagicTestPDHOnly(MountTestBase):
+    def setUp(self, api=None):
+        super(FuseMagicTestPDHOnly, self).setUp(api=api)
+
+        cw = arvados.CollectionWriter()
+
+        cw.start_new_file('thing1.txt')
+        cw.write("data 1")
+
+        self.testcollection = cw.finish()
+        self.test_manifest = cw.manifest_text()
+        created = self.api.collections().create(body={"manifest_text":self.test_manifest}).execute()
+        self.testcollectionuuid = str(created['uuid'])
+
+    def verify_pdh_only(self, pdh_only=False, skip_pdh_only=False):
+        if skip_pdh_only is True:
+            self.make_mount(fuse.MagicDirectory)    # in this case, the default by_id applies
+        else:
+            self.make_mount(fuse.MagicDirectory, pdh_only=pdh_only)
+
+        mount_ls = llfuse.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or
+                             arvados.util.uuid_pattern.match(fn)
+                             for fn in mount_ls),
+                         "new FUSE MagicDirectory lists Collection")
+
+        # look up using pdh should succeed in all cases
+        self.assertDirContents(self.testcollection, ['thing1.txt'])
+        self.assertDirContents(os.path.join('by_id', self.testcollection),
+                               ['thing1.txt'])
+        mount_ls = llfuse.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertIn(self.testcollection, mount_ls)
+        self.assertIn(self.testcollection,
+                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
+
+        files = {}
+        files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
+
+        for k, v in files.items():
+            with open(os.path.join(self.mounttmp, k)) as f:
+                self.assertEqual(v, f.read())
+
+        # look up using uuid should fail when pdh_only is set
+        if pdh_only is True:
+            with self.assertRaises(OSError):
+                self.assertDirContents(os.path.join('by_id', self.testcollectionuuid),
+                               ['thing1.txt'])
+        else:
+            self.assertDirContents(os.path.join('by_id', self.testcollectionuuid),
+                               ['thing1.txt'])
+
+    def test_with_pdh_only_true(self):
+        self.verify_pdh_only(pdh_only=True)
+
+    def test_with_pdh_only_false(self):
+        self.verify_pdh_only(pdh_only=False)
+
+    def test_with_default_by_id(self):
+        self.verify_pdh_only(skip_pdh_only=True)
diff --git a/services/fuse/tests/test_mount_type.py b/services/fuse/tests/test_mount_type.py
new file mode 100644 (file)
index 0000000..79f2f32
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import logging
+import subprocess
+
+from .integration_test import IntegrationTest
+
+logger = logging.getLogger('arvados.arv-mount')
+
+
+class MountTypeTest(IntegrationTest):
+    @IntegrationTest.mount(argv=["--subtype=arv-mount-test"])
+    def test_mount_type(self):
+        self.pool_test(self.mnt)
+
+    @staticmethod
+    def _test_mount_type(self, mnt):
+        self.assertEqual(["fuse.arv-mount-test"], [
+            toks[4]
+            for toks in [
+                line.split(' ')
+                for line in subprocess.check_output("mount").split("\n")
+            ]
+            if len(toks) > 4 and toks[2] == mnt
+        ])
diff --git a/services/fuse/tests/test_retry.py b/services/fuse/tests/test_retry.py
new file mode 100644 (file)
index 0000000..1c2ade2
--- /dev/null
@@ -0,0 +1,64 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados_fuse.command
+import json
+import mock
+import os
+import pycurl
+import Queue
+import run_test_server
+import tempfile
+import unittest
+
+from .integration_test import IntegrationTest
+
+
+class KeepClientRetry(unittest.TestCase):
+    origKeepClient = arvados.keep.KeepClient
+
+    def setUp(self):
+        self.mnt = tempfile.mkdtemp()
+        run_test_server.authorize_with('active')
+
+    def tearDown(self):
+        os.rmdir(self.mnt)
+
+    @mock.patch('arvados_fuse.arvados.keep.KeepClient')
+    def _test_retry(self, num_retries, argv, kc):
+        kc.side_effect = lambda *args, **kw: self.origKeepClient(*args, **kw)
+        with arvados_fuse.command.Mount(
+                arvados_fuse.command.ArgumentParser().parse_args(
+                    argv+[self.mnt])):
+            pass
+        self.assertEqual(num_retries, kc.call_args[1].get('num_retries'))
+
+    def test_default_retry_3(self):
+        self._test_retry(3, [])
+
+    def test_retry_2(self):
+        self._test_retry(2, ['--retries=2'])
+
+    def test_no_retry(self):
+        self._test_retry(0, ['--retries=0'])
+
+class RetryPUT(IntegrationTest):
+    @mock.patch('time.sleep')
+    @IntegrationTest.mount(argv=['--read-write', '--mount-tmp=zzz'])
+    def test_retry_write(self, sleep):
+        mockedCurl = mock.Mock(spec=pycurl.Curl(), wraps=pycurl.Curl())
+        mockedCurl.perform.side_effect = Exception('mock error (ok)')
+        q = Queue.Queue()
+        q.put(mockedCurl)
+        q.put(pycurl.Curl())
+        q.put(pycurl.Curl())
+        with mock.patch('arvados.keep.KeepClient.KeepService._get_user_agent', side_effect=q.get_nowait):
+            self.pool_test(os.path.join(self.mnt, 'zzz'))
+            self.assertTrue(mockedCurl.perform.called)
+    @staticmethod
+    def _test_retry_write(self, tmp):
+        with open(os.path.join(tmp, 'foo'), 'w') as f:
+            f.write('foo')
+        json.load(open(os.path.join(tmp, '.arvados#collection')))
diff --git a/services/fuse/tests/test_tmp_collection.py b/services/fuse/tests/test_tmp_collection.py
new file mode 100644 (file)
index 0000000..b8e0646
--- /dev/null
@@ -0,0 +1,141 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import arvados_fuse
+import arvados_fuse.command
+import json
+import logging
+import os
+import tempfile
+import unittest
+
+from .integration_test import IntegrationTest
+from .mount_test_base import MountTestBase
+
+logger = logging.getLogger('arvados.arv-mount')
+
+
+class TmpCollectionArgsTest(unittest.TestCase):
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+
+    def tearDown(self):
+        os.rmdir(self.tmpdir)
+
+    def test_tmp_only(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--mount-tmp', 'tmp1',
+            '--mount-tmp', 'tmp2',
+            self.tmpdir,
+        ])
+        self.assertIn(args.mode, [None, 'custom'])
+        self.assertEqual(['tmp1', 'tmp2'], args.mount_tmp)
+        for mtype in ['home', 'shared', 'by_id', 'by_pdh', 'by_tag']:
+            self.assertEqual([], getattr(args, 'mount_'+mtype))
+
+    def test_tmp_and_home(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            '--mount-tmp', 'test_tmp',
+            '--mount-home', 'test_home',
+            self.tmpdir,
+        ])
+        self.assertIn(args.mode, [None, 'custom'])
+        self.assertEqual(['test_tmp'], args.mount_tmp)
+        self.assertEqual(['test_home'], args.mount_home)
+
+    def test_no_tmp(self):
+        args = arvados_fuse.command.ArgumentParser().parse_args([
+            self.tmpdir,
+        ])
+        self.assertEqual([], args.mount_tmp)
+
+
+def current_manifest(tmpdir):
+    return json.load(open(
+        os.path.join(tmpdir, '.arvados#collection')
+    ))['manifest_text']
+
+
+class TmpCollectionTest(IntegrationTest):
+    mnt_args = [
+        '--read-write',
+        '--mount-tmp', 'zzz',
+    ]
+
+    @IntegrationTest.mount(argv=mnt_args+['--mount-tmp', 'yyy'])
+    def test_two_tmp(self):
+        self.pool_test(os.path.join(self.mnt, 'zzz'),
+                       os.path.join(self.mnt, 'yyy'))
+    @staticmethod
+    def _test_two_tmp(self, zzz, yyy):
+        self.assertEqual(current_manifest(zzz), "")
+        self.assertEqual(current_manifest(yyy), "")
+        with open(os.path.join(zzz, 'foo'), 'w') as f:
+            f.write('foo')
+        self.assertNotEqual(current_manifest(zzz), "")
+        self.assertEqual(current_manifest(yyy), "")
+        os.unlink(os.path.join(zzz, 'foo'))
+        with open(os.path.join(yyy, 'bar'), 'w') as f:
+            f.write('bar')
+        self.assertEqual(current_manifest(zzz), "")
+        self.assertNotEqual(current_manifest(yyy), "")
+
+    @IntegrationTest.mount(argv=mnt_args)
+    def test_tmp_empty(self):
+        self.pool_test(os.path.join(self.mnt, 'zzz'))
+    @staticmethod
+    def _test_tmp_empty(self, tmpdir):
+        self.assertEqual(current_manifest(tmpdir), "")
+
+    @IntegrationTest.mount(argv=mnt_args)
+    def test_tmp_onefile(self):
+        self.pool_test(os.path.join(self.mnt, 'zzz'))
+    @staticmethod
+    def _test_tmp_onefile(self, tmpdir):
+        with open(os.path.join(tmpdir, 'foo'), 'w') as f:
+            f.write('foo')
+        self.assertRegexpMatches(
+            current_manifest(tmpdir),
+            r'^\. acbd18db4cc2f85cedef654fccc4a4d8\+3(\+\S+)? 0:3:foo\n$')
+
+    @IntegrationTest.mount(argv=mnt_args)
+    def test_tmp_snapshots(self):
+        self.pool_test(os.path.join(self.mnt, 'zzz'))
+    @staticmethod
+    def _test_tmp_snapshots(self, tmpdir):
+        ops = [
+            ('foo', 'bar',
+             r'^\. 37b51d194a7513e45b56f6524f2d51f2\+3(\+\S+)? 0:3:foo\n$'),
+            ('foo', 'foo',
+             r'^\. acbd18db4cc2f85cedef654fccc4a4d8\+3(\+\S+)? 0:3:foo\n$'),
+            ('bar', 'bar',
+             r'^\. 37b51d194a7513e45b56f6524f2d51f2\+3(\+\S+)? acbd18db4cc2f85cedef654fccc4a4d8\+3(\+\S+)? 0:3:bar 3:3:foo\n$'),
+            ('foo', None,
+             r'^\. 37b51d194a7513e45b56f6524f2d51f2\+3(\+\S+)? 0:3:bar\n$'),
+            ('bar', None,
+             r'^$'),
+        ]
+        for _ in range(10):
+            for fn, content, expect in ops:
+                path = os.path.join(tmpdir, fn)
+                if content is None:
+                    os.unlink(path)
+                else:
+                    with open(path, 'w') as f:
+                        f.write(content)
+                self.assertRegexpMatches(current_manifest(tmpdir), expect)
+
+    @IntegrationTest.mount(argv=mnt_args)
+    def test_tmp_rewrite(self):
+        self.pool_test(os.path.join(self.mnt, 'zzz'))
+    @staticmethod
+    def _test_tmp_rewrite(self, tmpdir):
+        with open(os.path.join(tmpdir, "b1"), 'w') as f:
+            f.write("b1")
+        with open(os.path.join(tmpdir, "b2"), 'w') as f:
+            f.write("b2")
+        with open(os.path.join(tmpdir, "b1"), 'w') as f:
+            f.write("1b")
+        self.assertRegexpMatches(current_manifest(tmpdir), "^\. ed4f3f67c70b02b29c50ce1ea26666bd\+4(\+\S+)? 0:2:b1 2:2:b2\n$")
diff --git a/services/fuse/tests/test_token_expiry.py b/services/fuse/tests/test_token_expiry.py
new file mode 100644 (file)
index 0000000..9756b2e
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import apiclient
+import arvados
+import arvados_fuse
+import logging
+import mock
+import multiprocessing
+import os
+import re
+import sys
+import time
+import unittest
+
+from .integration_test import IntegrationTest
+
+logger = logging.getLogger('arvados.arv-mount')
+
+class TokenExpiryTest(IntegrationTest):
+    def setUp(self):
+        super(TokenExpiryTest, self).setUp()
+        self.test_start_time = time.time()
+        self.time_now = int(time.time())+1
+
+    def fake_time(self):
+        self.time_now += 1
+        return self.time_now
+
+    orig_open = arvados_fuse.Operations.open
+    def fake_open(self, operations, *args, **kwargs):
+        self.time_now += 86400*13
+        logger.debug('opening file at time=%f', self.time_now)
+        return self.orig_open(operations, *args, **kwargs)
+
+    @mock.patch.object(arvados_fuse.Operations, 'open', autospec=True)
+    @mock.patch('time.time')
+    @mock.patch('arvados.keep.KeepClient.get')
+    @IntegrationTest.mount(argv=['--mount-by-id', 'zzz'])
+    def test_refresh_old_manifest(self, mocked_get, mocked_time, mocked_open):
+        # This test (and associated behavior) is still not strong
+        # enough. We should ensure old tokens are never used even if
+        # blobSignatureTtl seconds elapse between open() and
+        # read(). See https://dev.arvados.org/issues/10008
+
+        mocked_get.return_value = 'fake data'
+        mocked_time.side_effect = self.fake_time
+        mocked_open.side_effect = self.fake_open
+
+        with mock.patch.object(self.mount.api, 'collections', wraps=self.mount.api.collections) as mocked_collections:
+            mocked_collections.return_value = mocked_collections()
+            with mock.patch.object(self.mount.api.collections(), 'get', wraps=self.mount.api.collections().get) as mocked_get:
+                self.pool_test(os.path.join(self.mnt, 'zzz'))
+
+        # open() several times here to make sure we don't reach our
+        # quota of mocked_get.call_count dishonestly (e.g., the first
+        # open causes 5 mocked_get, and the rest cause none).
+        self.assertEqual(8, mocked_open.call_count)
+        self.assertGreaterEqual(
+            mocked_get.call_count, 8,
+            'Not enough calls to collections().get(): expected 8, got {!r}'.format(
+                mocked_get.mock_calls))
+
+    @staticmethod
+    def _test_refresh_old_manifest(self, zzz):
+        uuid = 'zzzzz-4zz18-op4e2lbej01tcvu'
+        fnm = 'zzzzz-8i9sb-0vsrcqi7whchuil.log.txt'
+        os.listdir(os.path.join(zzz, uuid))
+        for _ in range(8):
+            with open(os.path.join(zzz, uuid, fnm)) as f:
+                f.read()
diff --git a/services/fuse/tests/test_unmount.py b/services/fuse/tests/test_unmount.py
new file mode 100644 (file)
index 0000000..bf180be
--- /dev/null
@@ -0,0 +1,137 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados_fuse.unmount
+import os
+import subprocess
+import shutil
+import tempfile
+import time
+import unittest
+
+from integration_test import IntegrationTest
+
+class UnmountTest(IntegrationTest):
+    def setUp(self):
+        super(UnmountTest, self).setUp()
+        self.tmp = self.mnt
+        self.to_delete = []
+
+    def tearDown(self):
+        for d in self.to_delete:
+            os.rmdir(d)
+        super(UnmountTest, self).tearDown()
+
+    def test_replace(self):
+        subprocess.check_call(
+            ['./bin/arv-mount', '--subtype', 'test', '--replace',
+             self.mnt])
+        subprocess.check_call(
+            ['./bin/arv-mount', '--subtype', 'test', '--replace',
+             '--unmount-timeout', '10',
+             self.mnt])
+        subprocess.check_call(
+            ['./bin/arv-mount', '--subtype', 'test', '--replace',
+             '--unmount-timeout', '10',
+             self.mnt,
+             '--exec', 'true'])
+        for m in subprocess.check_output(['mount']).splitlines():
+            self.assertNotIn(' '+self.mnt+' ', m)
+
+    def _mounted(self, mounts):
+        all_mounts = subprocess.check_output(['mount'])
+        return [m for m in mounts
+                if ' '+m+' ' in all_mounts]
+
+    def _wait_for_mounts(self, mounts):
+        deadline = time.time() + 10
+        while self._mounted(mounts) != mounts:
+            time.sleep(0.1)
+            self.assertLess(time.time(), deadline)
+
+    def test_unmount_subtype(self):
+        mounts = []
+        for d in ['foo', 'bar']:
+            mnt = self.tmp+'/'+d
+            os.mkdir(mnt)
+            self.to_delete.insert(0, mnt)
+            mounts.append(mnt)
+            subprocess.check_call(
+                ['./bin/arv-mount', '--subtype', d, mnt])
+
+        self._wait_for_mounts(mounts)
+        self.assertEqual(mounts, self._mounted(mounts))
+        subprocess.call(['./bin/arv-mount', '--subtype', 'baz', '--unmount-all', self.tmp])
+        self.assertEqual(mounts, self._mounted(mounts))
+        subprocess.call(['./bin/arv-mount', '--subtype', 'bar', '--unmount', mounts[0]])
+        self.assertEqual(mounts, self._mounted(mounts))
+        subprocess.call(['./bin/arv-mount', '--subtype', '', '--unmount', self.tmp])
+        self.assertEqual(mounts, self._mounted(mounts))
+        subprocess.check_call(['./bin/arv-mount', '--subtype', 'foo', '--unmount', mounts[0]])
+        self.assertEqual(mounts[1:], self._mounted(mounts))
+        subprocess.check_call(['./bin/arv-mount', '--subtype', '', '--unmount-all', mounts[0]])
+        self.assertEqual(mounts[1:], self._mounted(mounts))
+        subprocess.check_call(['./bin/arv-mount', '--subtype', 'bar', '--unmount-all', self.tmp])
+        self.assertEqual([], self._mounted(mounts))
+
+    def test_unmount_children(self):
+        for d in ['foo', 'foo/bar', 'bar']:
+            mnt = self.tmp+'/'+d
+            os.mkdir(mnt)
+            self.to_delete.insert(0, mnt)
+        mounts = []
+        for d in ['bar', 'foo/bar']:
+            mnt = self.tmp+'/'+d
+            mounts.append(mnt)
+            subprocess.check_call(
+                ['./bin/arv-mount', '--subtype', 'test', mnt])
+
+        self._wait_for_mounts(mounts)
+        self.assertEqual(mounts, self._mounted(mounts))
+        subprocess.check_call(['./bin/arv-mount', '--unmount', self.tmp])
+        self.assertEqual(mounts, self._mounted(mounts))
+        subprocess.check_call(['./bin/arv-mount', '--unmount-all', self.tmp])
+        self.assertEqual([], self._mounted(mounts))
+
+
+
+class SaferRealpath(unittest.TestCase):
+    def setUp(self):
+        self.tmp = tempfile.mkdtemp()
+
+    def tearDown(self):
+        shutil.rmtree(self.tmp)
+
+    def test_safer_realpath(self):
+        os.mkdir(self.tmp+"/dir")
+        os.mkdir(self.tmp+"/dir/dir2")
+        os.symlink("missing", self.tmp+"/relative-missing")
+        os.symlink("dir", self.tmp+"/./relative-dir")
+        os.symlink("relative-dir", self.tmp+"/relative-indirect")
+        os.symlink(self.tmp+"/dir", self.tmp+"/absolute-dir")
+        os.symlink("./dir/../loop", self.tmp+"/loop")
+        os.symlink(".", self.tmp+"/dir/self")
+        os.symlink("..", self.tmp+"/dir/dir2/parent")
+        os.symlink("../dir3", self.tmp+"/dir/dir2/sibling")
+        os.symlink("../missing/../danger", self.tmp+"/dir/tricky")
+        os.symlink("/proc/1/fd/12345", self.tmp+"/eperm")
+        for (inpath, outpath, ok) in [
+                ("dir/self", "dir", True),
+                ("dir/dir2/parent", "dir", True),
+                ("dir/dir2/sibling", "dir/dir3", False),
+                ("dir", "dir", True),
+                ("relative-dir", "dir", True),
+                ("relative-missing", "missing", False),
+                ("relative-indirect", "dir", True),
+                ("absolute-dir", "dir", True),
+                ("loop", "loop", False),
+                # "missing" doesn't exist, so "missing/.." isn't our
+                # tmpdir; it's important not to contract this to just
+                # "danger".
+                ("dir/tricky", "missing/../danger", False),
+                ("eperm", "/proc/1/fd/12345", False),
+        ]:
+            if not outpath.startswith('/'):
+                outpath = self.tmp + '/' + outpath
+            self.assertEqual((outpath, ok), arvados_fuse.unmount.safer_realpath(self.tmp+"/"+inpath))
diff --git a/services/health/arvados-health.service b/services/health/arvados-health.service
new file mode 100644 (file)
index 0000000..dac7c3a
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados healthcheck server
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/config.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/arvados-health
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/health/main.go b/services/health/main.go
new file mode 100644 (file)
index 0000000..21fcf4d
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "net/http"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       log "github.com/sirupsen/logrus"
+)
+
+var version = "dev"
+
+func main() {
+       configFile := flag.String("config", arvados.DefaultConfigFile, "`path` to arvados configuration file")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("arvados-health %s\n", version)
+               return
+       }
+
+       log.SetFormatter(&log.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       })
+       log.Printf("arvados-health %s started", version)
+
+       cfg, err := arvados.GetConfig(*configFile)
+       if err != nil {
+               log.Fatal(err)
+       }
+       clusterCfg, err := cfg.GetCluster("")
+       if err != nil {
+               log.Fatal(err)
+       }
+       nodeCfg, err := clusterCfg.GetNodeProfile("")
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       log := log.WithField("Service", "Health")
+       srv := &httpserver.Server{
+               Addr: nodeCfg.Health.Listen,
+               Server: http.Server{
+                       Handler: &health.Aggregator{
+                               Config: cfg,
+                               Log: func(req *http.Request, err error) {
+                                       log.WithField("RemoteAddr", req.RemoteAddr).
+                                               WithField("Path", req.URL.Path).
+                                               WithError(err).
+                                               Info("HTTP request")
+                               },
+                       },
+               },
+       }
+       if err := srv.Start(); err != nil {
+               log.Fatal(err)
+       }
+       log.WithField("Listen", srv.Addr).Info("listening")
+       if err := srv.Wait(); err != nil {
+               log.Fatal(err)
+       }
+}
diff --git a/services/keep-balance/balance.go b/services/keep-balance/balance.go
new file mode 100644 (file)
index 0000000..836be2e
--- /dev/null
@@ -0,0 +1,1075 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "crypto/md5"
+       "fmt"
+       "log"
+       "math"
+       "runtime"
+       "sort"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/sirupsen/logrus"
+)
+
+// Balancer compares the contents of keepstore servers with the
+// collections stored in Arvados, and issues pull/trash requests
+// needed to get (closer to) the optimal data layout.
+//
+// In the optimal data layout: every data block referenced by a
+// collection is replicated at least as many times as desired by the
+// collection; there are no unreferenced data blocks older than
+// BlobSignatureTTL; and all N existing replicas of a given data block
+// are in the N best positions in rendezvous probe order.
+type Balancer struct {
+       Logger  *logrus.Logger
+       Dumper  *logrus.Logger
+       Metrics *metrics
+
+       *BlockStateMap
+       KeepServices       map[string]*KeepService
+       DefaultReplication int
+       MinMtime           int64
+
+       classes       []string
+       mounts        int
+       mountsByClass map[string]map[*KeepMount]bool
+       collScanned   int
+       serviceRoots  map[string]string
+       errors        []error
+       stats         balancerStats
+       mutex         sync.Mutex
+}
+
+// Run performs a balance operation using the given config and
+// runOptions, and returns RunOptions suitable for passing to a
+// subsequent balance operation.
+//
+// Run should only be called once on a given Balancer object.
+//
+// Typical usage:
+//
+//   runOptions, err = (&Balancer{}).Run(config, runOptions)
+func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
+       nextRunOptions = runOptions
+
+       defer bal.time("sweep", "wall clock time to run one full sweep")()
+
+       if len(config.KeepServiceList.Items) > 0 {
+               err = bal.SetKeepServices(config.KeepServiceList)
+       } else {
+               err = bal.DiscoverKeepServices(&config.Client, config.KeepServiceTypes)
+       }
+       if err != nil {
+               return
+       }
+
+       for _, srv := range bal.KeepServices {
+               err = srv.discoverMounts(&config.Client)
+               if err != nil {
+                       return
+               }
+       }
+       bal.cleanupMounts()
+
+       if err = bal.CheckSanityEarly(&config.Client); err != nil {
+               return
+       }
+       rs := bal.rendezvousState()
+       if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
+               if runOptions.SafeRendezvousState != "" {
+                       bal.logf("notice: KeepServices list has changed since last run")
+               }
+               bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
+               if err = bal.ClearTrashLists(&config.Client); err != nil {
+                       return
+               }
+               // The current rendezvous state becomes "safe" (i.e.,
+               // OK to compute changes for that state without
+               // clearing existing trash lists) only now, after we
+               // succeed in clearing existing trash lists.
+               nextRunOptions.SafeRendezvousState = rs
+       }
+       if err = bal.GetCurrentState(&config.Client, config.CollectionBatchSize, config.CollectionBuffers); err != nil {
+               return
+       }
+       bal.ComputeChangeSets()
+       bal.PrintStatistics()
+       if err = bal.CheckSanityLate(); err != nil {
+               return
+       }
+       if runOptions.CommitPulls {
+               err = bal.CommitPulls(&config.Client)
+               if err != nil {
+                       // Skip trash if we can't pull. (Too cautious?)
+                       return
+               }
+       }
+       if runOptions.CommitTrash {
+               err = bal.CommitTrash(&config.Client)
+       }
+       return
+}
+
+// SetKeepServices sets the list of KeepServices to operate on.
+func (bal *Balancer) SetKeepServices(srvList arvados.KeepServiceList) error {
+       bal.KeepServices = make(map[string]*KeepService)
+       for _, srv := range srvList.Items {
+               bal.KeepServices[srv.UUID] = &KeepService{
+                       KeepService: srv,
+                       ChangeSet:   &ChangeSet{},
+               }
+       }
+       return nil
+}
+
+// DiscoverKeepServices sets the list of KeepServices by calling the
+// API to get a list of all services, and selecting the ones whose
+// ServiceType is in okTypes.
+func (bal *Balancer) DiscoverKeepServices(c *arvados.Client, okTypes []string) error {
+       bal.KeepServices = make(map[string]*KeepService)
+       ok := make(map[string]bool)
+       for _, t := range okTypes {
+               ok[t] = true
+       }
+       return c.EachKeepService(func(srv arvados.KeepService) error {
+               if ok[srv.ServiceType] {
+                       bal.KeepServices[srv.UUID] = &KeepService{
+                               KeepService: srv,
+                               ChangeSet:   &ChangeSet{},
+                       }
+               } else {
+                       bal.logf("skipping %v with service type %q", srv.UUID, srv.ServiceType)
+               }
+               return nil
+       })
+}
+
+func (bal *Balancer) cleanupMounts() {
+       rwdev := map[string]*KeepService{}
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       if !mnt.ReadOnly && mnt.DeviceID != "" {
+                               rwdev[mnt.DeviceID] = srv
+                       }
+               }
+       }
+       // Drop the readonly mounts whose device is mounted RW
+       // elsewhere.
+       for _, srv := range bal.KeepServices {
+               var dedup []*KeepMount
+               for _, mnt := range srv.mounts {
+                       if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
+                               bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
+                       } else {
+                               dedup = append(dedup, mnt)
+                       }
+               }
+               srv.mounts = dedup
+       }
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       if mnt.Replication <= 0 {
+                               log.Printf("%s: mount %s reports replication=%d, using replication=1", srv, mnt.UUID, mnt.Replication)
+                               mnt.Replication = 1
+                       }
+               }
+       }
+}
+
+// CheckSanityEarly checks for configuration and runtime errors that
+// can be detected before GetCurrentState() and ComputeChangeSets()
+// are called.
+//
+// If it returns an error, it is pointless to run GetCurrentState or
+// ComputeChangeSets: after doing so, the statistics would be
+// meaningless and it would be dangerous to run any Commit methods.
+func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
+       u, err := c.CurrentUser()
+       if err != nil {
+               return fmt.Errorf("CurrentUser(): %v", err)
+       }
+       if !u.IsActive || !u.IsAdmin {
+               return fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
+       }
+       for _, srv := range bal.KeepServices {
+               if srv.ServiceType == "proxy" {
+                       return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
+               }
+       }
+       return nil
+}
+
+// rendezvousState returns a fingerprint (e.g., a sorted list of
+// UUID+host+port) of the current set of keep services.
+func (bal *Balancer) rendezvousState() string {
+       srvs := make([]string, 0, len(bal.KeepServices))
+       for _, srv := range bal.KeepServices {
+               srvs = append(srvs, srv.String())
+       }
+       sort.Strings(srvs)
+       return strings.Join(srvs, "; ")
+}
+
+// ClearTrashLists sends an empty trash list to each keep
+// service. Calling this before GetCurrentState avoids races.
+//
+// When a block appears in an index, we assume that replica will still
+// exist after we delete other replicas on other servers. However,
+// it's possible that a previous rebalancing operation made different
+// decisions (e.g., servers were added/removed, and rendezvous order
+// changed). In this case, the replica might already be on that
+// server's trash list, and it might be deleted before we send a
+// replacement trash list.
+//
+// We avoid this problem if we clear all trash lists before getting
+// indexes. (We also assume there is only one rebalancing process
+// running at a time.)
+func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
+       for _, srv := range bal.KeepServices {
+               srv.ChangeSet = &ChangeSet{}
+       }
+       return bal.CommitTrash(c)
+}
+
+// GetCurrentState determines the current replication state, and the
+// desired replication level, for every block that is either
+// retrievable or referenced.
+//
+// It determines the current replication state by reading the block index
+// from every known Keep service.
+//
+// It determines the desired replication level by retrieving all
+// collection manifests in the database (API server).
+//
+// It encodes the resulting information in BlockStateMap.
+func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
+       defer bal.time("get_state", "wall clock time to get current state")()
+       bal.BlockStateMap = NewBlockStateMap()
+
+       dd, err := c.DiscoveryDocument()
+       if err != nil {
+               return err
+       }
+       bal.DefaultReplication = dd.DefaultCollectionReplication
+       bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
+
+       errs := make(chan error, 1)
+       wg := sync.WaitGroup{}
+
+       // When a device is mounted more than once, we will get its
+       // index only once, and call AddReplicas on all of the mounts.
+       // equivMount keys are the mounts that will be indexed, and
+       // each value is a list of mounts to apply the received index
+       // to.
+       equivMount := map[*KeepMount][]*KeepMount{}
+       // deviceMount maps each device ID to the one mount that will
+       // be indexed for that device.
+       deviceMount := map[string]*KeepMount{}
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       equiv := deviceMount[mnt.DeviceID]
+                       if equiv == nil {
+                               equiv = mnt
+                               if mnt.DeviceID != "" {
+                                       deviceMount[mnt.DeviceID] = equiv
+                               }
+                       }
+                       equivMount[equiv] = append(equivMount[equiv], mnt)
+               }
+       }
+
+       // Start one goroutine for each (non-redundant) mount:
+       // retrieve the index, and add the returned blocks to
+       // BlockStateMap.
+       for _, mounts := range equivMount {
+               wg.Add(1)
+               go func(mounts []*KeepMount) {
+                       defer wg.Done()
+                       bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
+                       idx, err := mounts[0].KeepService.IndexMount(c, mounts[0].UUID, "")
+                       if err != nil {
+                               select {
+                               case errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err):
+                               default:
+                               }
+                               return
+                       }
+                       if len(errs) > 0 {
+                               // Some other goroutine encountered an
+                               // error -- any further effort here
+                               // will be wasted.
+                               return
+                       }
+                       for _, mount := range mounts {
+                               bal.logf("%s: add %d entries to map", mount, len(idx))
+                               bal.BlockStateMap.AddReplicas(mount, idx)
+                               bal.logf("%s: added %d entries to map at %dx (%d replicas)", mount, len(idx), mount.Replication, len(idx)*mount.Replication)
+                       }
+                       bal.logf("mount %s: index done", mounts[0])
+               }(mounts)
+       }
+
+       // collQ buffers incoming collections so we can start fetching
+       // the next page without waiting for the current page to
+       // finish processing.
+       collQ := make(chan arvados.Collection, bufs)
+
+       // Start a goroutine to process collections. (We could use a
+       // worker pool here, but even with a single worker we already
+       // process collections much faster than we can retrieve them.)
+       wg.Add(1)
+       go func() {
+               defer wg.Done()
+               for coll := range collQ {
+                       err := bal.addCollection(coll)
+                       if err != nil {
+                               select {
+                               case errs <- err:
+                               default:
+                               }
+                               for range collQ {
+                               }
+                               return
+                       }
+                       bal.collScanned++
+               }
+       }()
+
+       // Start a goroutine to retrieve all collections from the
+       // Arvados database and send them to collQ for processing.
+       wg.Add(1)
+       go func() {
+               defer wg.Done()
+               err = EachCollection(c, pageSize,
+                       func(coll arvados.Collection) error {
+                               collQ <- coll
+                               if len(errs) > 0 {
+                                       // some other GetCurrentState
+                                       // error happened: no point
+                                       // getting any more
+                                       // collections.
+                                       return fmt.Errorf("")
+                               }
+                               return nil
+                       }, func(done, total int) {
+                               bal.logf("collections: %d/%d", done, total)
+                       })
+               close(collQ)
+               if err != nil {
+                       select {
+                       case errs <- err:
+                       default:
+                       }
+               }
+       }()
+
+       wg.Wait()
+       if len(errs) > 0 {
+               return <-errs
+       }
+       return nil
+}
+
+func (bal *Balancer) addCollection(coll arvados.Collection) error {
+       blkids, err := coll.SizedDigests()
+       if err != nil {
+               bal.mutex.Lock()
+               bal.errors = append(bal.errors, fmt.Errorf("%v: %v", coll.UUID, err))
+               bal.mutex.Unlock()
+               return nil
+       }
+       repl := bal.DefaultReplication
+       if coll.ReplicationDesired != nil {
+               repl = *coll.ReplicationDesired
+       }
+       debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
+       bal.BlockStateMap.IncreaseDesired(coll.StorageClassesDesired, repl, blkids)
+       return nil
+}
+
+// ComputeChangeSets compares, for each known block, the current and
+// desired replication states. If it is possible to get closer to the
+// desired state by copying or deleting blocks, it adds those changes
+// to the relevant KeepServices' ChangeSets.
+//
+// It does not actually apply any of the computed changes.
+func (bal *Balancer) ComputeChangeSets() {
+       // This just calls balanceBlock() once for each block, using a
+       // pool of worker goroutines.
+       defer bal.time("changeset_compute", "wall clock time to compute changesets")()
+       bal.setupLookupTables()
+
+       type balanceTask struct {
+               blkid arvados.SizedDigest
+               blk   *BlockState
+       }
+       workers := runtime.GOMAXPROCS(-1)
+       todo := make(chan balanceTask, workers)
+       go func() {
+               bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
+                       todo <- balanceTask{
+                               blkid: blkid,
+                               blk:   blk,
+                       }
+               })
+               close(todo)
+       }()
+       results := make(chan balanceResult, workers)
+       go func() {
+               var wg sync.WaitGroup
+               for i := 0; i < workers; i++ {
+                       wg.Add(1)
+                       go func() {
+                               for work := range todo {
+                                       results <- bal.balanceBlock(work.blkid, work.blk)
+                               }
+                               wg.Done()
+                       }()
+               }
+               wg.Wait()
+               close(results)
+       }()
+       bal.collectStatistics(results)
+}
+
+func (bal *Balancer) setupLookupTables() {
+       bal.serviceRoots = make(map[string]string)
+       bal.classes = []string{"default"}
+       bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
+       bal.mounts = 0
+       for _, srv := range bal.KeepServices {
+               bal.serviceRoots[srv.UUID] = srv.UUID
+               for _, mnt := range srv.mounts {
+                       bal.mounts++
+
+                       // All mounts on a read-only service are
+                       // effectively read-only.
+                       mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
+
+                       if len(mnt.StorageClasses) == 0 {
+                               bal.mountsByClass["default"][mnt] = true
+                               continue
+                       }
+                       for _, class := range mnt.StorageClasses {
+                               if mbc := bal.mountsByClass[class]; mbc == nil {
+                                       bal.classes = append(bal.classes, class)
+                                       bal.mountsByClass[class] = map[*KeepMount]bool{mnt: true}
+                               } else {
+                                       mbc[mnt] = true
+                               }
+                       }
+               }
+       }
+       // Consider classes in lexicographic order to avoid flapping
+       // between balancing runs.  The outcome of the "prefer a mount
+       // we're already planning to use for a different storage
+       // class" case in balanceBlock depends on the order classes
+       // are considered.
+       sort.Strings(bal.classes)
+}
+
+const (
+       changeStay = iota
+       changePull
+       changeTrash
+       changeNone
+)
+
+var changeName = map[int]string{
+       changeStay:  "stay",
+       changePull:  "pull",
+       changeTrash: "trash",
+       changeNone:  "none",
+}
+
+type balanceResult struct {
+       blk        *BlockState
+       blkid      arvados.SizedDigest
+       have       int
+       want       int
+       classState map[string]balancedBlockState
+}
+
+// balanceBlock compares current state to desired state for a single
+// block, and makes the appropriate ChangeSet calls.
+func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
+       debugf("balanceBlock: %v %+v", blkid, blk)
+
+       type slot struct {
+               mnt  *KeepMount // never nil
+               repl *Replica   // replica already stored here (or nil)
+               want bool       // we should pull/leave a replica here
+       }
+
+       // Build a list of all slots (one per mounted volume).
+       slots := make([]slot, 0, bal.mounts)
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       var repl *Replica
+                       for r := range blk.Replicas {
+                               if blk.Replicas[r].KeepMount == mnt {
+                                       repl = &blk.Replicas[r]
+                               }
+                       }
+                       // Initial value of "want" is "have, and can't
+                       // delete". These untrashable replicas get
+                       // prioritized when sorting slots: otherwise,
+                       // non-optimal readonly copies would cause us
+                       // to overreplicate.
+                       slots = append(slots, slot{
+                               mnt:  mnt,
+                               repl: repl,
+                               want: repl != nil && mnt.ReadOnly,
+                       })
+               }
+       }
+
+       uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
+       srvRendezvous := make(map[*KeepService]int, len(uuids))
+       for i, uuid := range uuids {
+               srv := bal.KeepServices[uuid]
+               srvRendezvous[srv] = i
+       }
+
+       // Below we set underreplicated=true if we find any storage
+       // class that's currently underreplicated -- in that case we
+       // won't want to trash any replicas.
+       underreplicated := false
+
+       classState := make(map[string]balancedBlockState, len(bal.classes))
+       unsafeToDelete := make(map[int64]bool, len(slots))
+       for _, class := range bal.classes {
+               desired := blk.Desired[class]
+
+               countedDev := map[string]bool{}
+               have := 0
+               for _, slot := range slots {
+                       if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
+                               have += slot.mnt.Replication
+                               if slot.mnt.DeviceID != "" {
+                                       countedDev[slot.mnt.DeviceID] = true
+                               }
+                       }
+               }
+               classState[class] = balancedBlockState{
+                       desired: desired,
+                       surplus: have - desired,
+               }
+
+               if desired == 0 {
+                       continue
+               }
+
+               // Sort the slots by desirability.
+               sort.Slice(slots, func(i, j int) bool {
+                       si, sj := slots[i], slots[j]
+                       if classi, classj := bal.mountsByClass[class][si.mnt], bal.mountsByClass[class][sj.mnt]; classi != classj {
+                               // Prefer a mount that satisfies the
+                               // desired class.
+                               return bal.mountsByClass[class][si.mnt]
+                       } else if si.want != sj.want {
+                               // Prefer a mount that will have a
+                               // replica no matter what we do here
+                               // -- either because it already has an
+                               // untrashable replica, or because we
+                               // already need it to satisfy a
+                               // different storage class.
+                               return si.want
+                       } else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
+                               // Prefer a better rendezvous
+                               // position.
+                               return orderi < orderj
+                       } else if repli, replj := si.repl != nil, sj.repl != nil; repli != replj {
+                               // Prefer a mount that already has a
+                               // replica.
+                               return repli
+                       } else {
+                               // If pull/trash turns out to be
+                               // needed, distribute the
+                               // new/remaining replicas uniformly
+                               // across qualifying mounts on a given
+                               // server.
+                               return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
+                       }
+               })
+
+               // Servers/mounts/devices (with or without existing
+               // replicas) that are part of the best achievable
+               // layout for this storage class.
+               wantSrv := map[*KeepService]bool{}
+               wantMnt := map[*KeepMount]bool{}
+               wantDev := map[string]bool{}
+               // Positions (with existing replicas) that have been
+               // protected (via unsafeToDelete) to ensure we don't
+               // reduce replication below desired level when
+               // trashing replicas that aren't optimal positions for
+               // any storage class.
+               protMnt := map[*KeepMount]bool{}
+               // Replication planned so far (corresponds to wantMnt).
+               replWant := 0
+               // Protected replication (corresponds to protMnt).
+               replProt := 0
+
+               // trySlot tries using a slot to meet requirements,
+               // and returns true if all requirements are met.
+               trySlot := func(i int) bool {
+                       slot := slots[i]
+                       if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
+                               // Already allocated a replica to this
+                               // backend device, possibly on a
+                               // different server.
+                               return false
+                       }
+                       if replProt < desired && slot.repl != nil && !protMnt[slot.mnt] {
+                               unsafeToDelete[slot.repl.Mtime] = true
+                               protMnt[slot.mnt] = true
+                               replProt += slot.mnt.Replication
+                       }
+                       if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+                               slots[i].want = true
+                               wantSrv[slot.mnt.KeepService] = true
+                               wantMnt[slot.mnt] = true
+                               if slot.mnt.DeviceID != "" {
+                                       wantDev[slot.mnt.DeviceID] = true
+                               }
+                               replWant += slot.mnt.Replication
+                       }
+                       return replProt >= desired && replWant >= desired
+               }
+
+               // First try to achieve desired replication without
+               // using the same server twice.
+               done := false
+               for i := 0; i < len(slots) && !done; i++ {
+                       if !wantSrv[slots[i].mnt.KeepService] {
+                               done = trySlot(i)
+                       }
+               }
+
+               // If that didn't suffice, do another pass without the
+               // "distinct services" restriction. (Achieving the
+               // desired volume replication on fewer than the
+               // desired number of services is better than
+               // underreplicating.)
+               for i := 0; i < len(slots) && !done; i++ {
+                       done = trySlot(i)
+               }
+
+               if !underreplicated {
+                       safe := 0
+                       for _, slot := range slots {
+                               if slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {
+                                       continue
+                               }
+                               if safe += slot.mnt.Replication; safe >= desired {
+                                       break
+                               }
+                       }
+                       underreplicated = safe < desired
+               }
+
+               // set the unachievable flag if there aren't enough
+               // slots offering the relevant storage class. (This is
+               // as easy as checking slots[desired] because we
+               // already sorted the qualifying slots to the front.)
+               if desired >= len(slots) || !bal.mountsByClass[class][slots[desired].mnt] {
+                       cs := classState[class]
+                       cs.unachievable = true
+                       classState[class] = cs
+               }
+
+               // Avoid deleting wanted replicas from devices that
+               // are mounted on multiple servers -- even if they
+               // haven't already been added to unsafeToDelete
+               // because the servers report different Mtimes.
+               for _, slot := range slots {
+                       if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
+                               unsafeToDelete[slot.repl.Mtime] = true
+                       }
+               }
+       }
+
+       // TODO: If multiple replicas are trashable, prefer the oldest
+       // replica that doesn't have a timestamp collision with
+       // others.
+
+       countedDev := map[string]bool{}
+       var have, want int
+       for _, slot := range slots {
+               if countedDev[slot.mnt.DeviceID] {
+                       continue
+               }
+               if slot.want {
+                       want += slot.mnt.Replication
+               }
+               if slot.repl != nil {
+                       have += slot.mnt.Replication
+               }
+               if slot.mnt.DeviceID != "" {
+                       countedDev[slot.mnt.DeviceID] = true
+               }
+       }
+
+       var changes []string
+       for _, slot := range slots {
+               // TODO: request a Touch if Mtime is duplicated.
+               var change int
+               switch {
+               case !underreplicated && !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime && !unsafeToDelete[slot.repl.Mtime]:
+                       slot.mnt.KeepService.AddTrash(Trash{
+                               SizedDigest: blkid,
+                               Mtime:       slot.repl.Mtime,
+                               From:        slot.mnt,
+                       })
+                       change = changeTrash
+               case len(blk.Replicas) == 0:
+                       change = changeNone
+               case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+                       slot.mnt.KeepService.AddPull(Pull{
+                               SizedDigest: blkid,
+                               From:        blk.Replicas[0].KeepMount.KeepService,
+                               To:          slot.mnt,
+                       })
+                       change = changePull
+               default:
+                       change = changeStay
+               }
+               if bal.Dumper != nil {
+                       var mtime int64
+                       if slot.repl != nil {
+                               mtime = slot.repl.Mtime
+                       }
+                       srv := slot.mnt.KeepService
+                       changes = append(changes, fmt.Sprintf("%s:%d/%s=%s,%d", srv.ServiceHost, srv.ServicePort, slot.mnt.UUID, changeName[change], mtime))
+               }
+       }
+       if bal.Dumper != nil {
+               bal.Dumper.Printf("%s have=%d want=%v %s", blkid, have, want, strings.Join(changes, " "))
+       }
+       return balanceResult{
+               blk:        blk,
+               blkid:      blkid,
+               have:       have,
+               want:       want,
+               classState: classState,
+       }
+}
+
+type blocksNBytes struct {
+       replicas int
+       blocks   int
+       bytes    int64
+}
+
+func (bb blocksNBytes) String() string {
+       return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
+}
+
+type balancerStats struct {
+       lost          blocksNBytes
+       overrep       blocksNBytes
+       unref         blocksNBytes
+       garbage       blocksNBytes
+       underrep      blocksNBytes
+       unachievable  blocksNBytes
+       justright     blocksNBytes
+       desired       blocksNBytes
+       current       blocksNBytes
+       pulls         int
+       trashes       int
+       replHistogram []int
+       classStats    map[string]replicationStats
+
+       // collectionBytes / collectionBlockBytes = deduplication ratio
+       collectionBytes      int64 // sum(bytes in referenced blocks) across all collections
+       collectionBlockBytes int64 // sum(block size) across all blocks referenced by collections
+       collectionBlockRefs  int64 // sum(number of blocks referenced) across all collections
+       collectionBlocks     int64 // number of blocks referenced by any collection
+}
+
+func (s *balancerStats) dedupByteRatio() float64 {
+       if s.collectionBlockBytes == 0 {
+               return 0
+       }
+       return float64(s.collectionBytes) / float64(s.collectionBlockBytes)
+}
+
+func (s *balancerStats) dedupBlockRatio() float64 {
+       if s.collectionBlocks == 0 {
+               return 0
+       }
+       return float64(s.collectionBlockRefs) / float64(s.collectionBlocks)
+}
+
+type replicationStats struct {
+       desired      blocksNBytes
+       surplus      blocksNBytes
+       short        blocksNBytes
+       unachievable blocksNBytes
+}
+
+type balancedBlockState struct {
+       desired      int
+       surplus      int
+       unachievable bool
+}
+
+func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
+       var s balancerStats
+       s.replHistogram = make([]int, 2)
+       s.classStats = make(map[string]replicationStats, len(bal.classes))
+       for result := range results {
+               surplus := result.have - result.want
+               bytes := result.blkid.Size()
+
+               if rc := int64(result.blk.RefCount); rc > 0 {
+                       s.collectionBytes += rc * bytes
+                       s.collectionBlockBytes += bytes
+                       s.collectionBlockRefs += rc
+                       s.collectionBlocks++
+               }
+
+               for class, state := range result.classState {
+                       cs := s.classStats[class]
+                       if state.unachievable {
+                               cs.unachievable.blocks++
+                               cs.unachievable.bytes += bytes
+                       }
+                       if state.desired > 0 {
+                               cs.desired.replicas += state.desired
+                               cs.desired.blocks++
+                               cs.desired.bytes += bytes * int64(state.desired)
+                       }
+                       if state.surplus > 0 {
+                               cs.surplus.replicas += state.surplus
+                               cs.surplus.blocks++
+                               cs.surplus.bytes += bytes * int64(state.surplus)
+                       } else if state.surplus < 0 {
+                               cs.short.replicas += -state.surplus
+                               cs.short.blocks++
+                               cs.short.bytes += bytes * int64(-state.surplus)
+                       }
+                       s.classStats[class] = cs
+               }
+
+               switch {
+               case result.have == 0 && result.want > 0:
+                       s.lost.replicas -= surplus
+                       s.lost.blocks++
+                       s.lost.bytes += bytes * int64(-surplus)
+               case surplus < 0:
+                       s.underrep.replicas -= surplus
+                       s.underrep.blocks++
+                       s.underrep.bytes += bytes * int64(-surplus)
+               case surplus > 0 && result.want == 0:
+                       counter := &s.garbage
+                       for _, r := range result.blk.Replicas {
+                               if r.Mtime >= bal.MinMtime {
+                                       counter = &s.unref
+                                       break
+                               }
+                       }
+                       counter.replicas += surplus
+                       counter.blocks++
+                       counter.bytes += bytes * int64(surplus)
+               case surplus > 0:
+                       s.overrep.replicas += surplus
+                       s.overrep.blocks++
+                       s.overrep.bytes += bytes * int64(result.have-result.want)
+               default:
+                       s.justright.replicas += result.want
+                       s.justright.blocks++
+                       s.justright.bytes += bytes * int64(result.want)
+               }
+
+               if result.want > 0 {
+                       s.desired.replicas += result.want
+                       s.desired.blocks++
+                       s.desired.bytes += bytes * int64(result.want)
+               }
+               if result.have > 0 {
+                       s.current.replicas += result.have
+                       s.current.blocks++
+                       s.current.bytes += bytes * int64(result.have)
+               }
+
+               for len(s.replHistogram) <= result.have {
+                       s.replHistogram = append(s.replHistogram, 0)
+               }
+               s.replHistogram[result.have]++
+       }
+       for _, srv := range bal.KeepServices {
+               s.pulls += len(srv.ChangeSet.Pulls)
+               s.trashes += len(srv.ChangeSet.Trashes)
+       }
+       bal.stats = s
+       bal.Metrics.UpdateStats(s)
+}
+
+// PrintStatistics writes statistics about the computed changes to
+// bal.Logger. It should not be called until ComputeChangeSets has
+// finished.
+func (bal *Balancer) PrintStatistics() {
+       bal.logf("===")
+       bal.logf("%s lost (0=have<want)", bal.stats.lost)
+       bal.logf("%s underreplicated (0<have<want)", bal.stats.underrep)
+       bal.logf("%s just right (have=want)", bal.stats.justright)
+       bal.logf("%s overreplicated (have>want>0)", bal.stats.overrep)
+       bal.logf("%s unreferenced (have>want=0, new)", bal.stats.unref)
+       bal.logf("%s garbage (have>want=0, old)", bal.stats.garbage)
+       for _, class := range bal.classes {
+               cs := bal.stats.classStats[class]
+               bal.logf("===")
+               bal.logf("storage class %q: %s desired", class, cs.desired)
+               bal.logf("storage class %q: %s short", class, cs.short)
+               bal.logf("storage class %q: %s surplus", class, cs.surplus)
+               bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
+       }
+       bal.logf("===")
+       bal.logf("%s total commitment (excluding unreferenced)", bal.stats.desired)
+       bal.logf("%s total usage", bal.stats.current)
+       bal.logf("===")
+       for _, srv := range bal.KeepServices {
+               bal.logf("%s: %v\n", srv, srv.ChangeSet)
+       }
+       bal.logf("===")
+       bal.printHistogram(60)
+       bal.logf("===")
+}
+
+func (bal *Balancer) printHistogram(hashColumns int) {
+       bal.logf("Replication level distribution (counting N replicas on a single server as N):")
+       maxCount := 0
+       for _, count := range bal.stats.replHistogram {
+               if maxCount < count {
+                       maxCount = count
+               }
+       }
+       hashes := strings.Repeat("#", hashColumns)
+       countWidth := 1 + int(math.Log10(float64(maxCount+1)))
+       scaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))
+       for repl, count := range bal.stats.replHistogram {
+               nHashes := int(scaleCount * math.Log10(float64(count+1)))
+               bal.logf("%2d: %*d %s", repl, countWidth, count, hashes[:nHashes])
+       }
+}
+
+// CheckSanityLate checks for configuration and runtime errors after
+// GetCurrentState() and ComputeChangeSets() have finished.
+//
+// If it returns an error, it is dangerous to run any Commit methods.
+func (bal *Balancer) CheckSanityLate() error {
+       if bal.errors != nil {
+               for _, err := range bal.errors {
+                       bal.logf("deferred error: %v", err)
+               }
+               return fmt.Errorf("cannot proceed safely after deferred errors")
+       }
+
+       if bal.collScanned == 0 {
+               return fmt.Errorf("received zero collections")
+       }
+
+       anyDesired := false
+       bal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {
+               for _, desired := range blk.Desired {
+                       if desired > 0 {
+                               anyDesired = true
+                               break
+                       }
+               }
+       })
+       if !anyDesired {
+               return fmt.Errorf("zero blocks have desired replication>0")
+       }
+
+       if dr := bal.DefaultReplication; dr < 1 {
+               return fmt.Errorf("Default replication (%d) is less than 1", dr)
+       }
+
+       // TODO: no two services have identical indexes
+       // TODO: no collisions (same md5, different size)
+       return nil
+}
+
+// CommitPulls sends the computed lists of pull requests to the
+// keepstore servers. This has the effect of increasing replication of
+// existing blocks that are either underreplicated or poorly
+// distributed according to rendezvous hashing.
+func (bal *Balancer) CommitPulls(c *arvados.Client) error {
+       defer bal.time("send_pull_lists", "wall clock time to send pull lists")()
+       return bal.commitAsync(c, "send pull list",
+               func(srv *KeepService) error {
+                       return srv.CommitPulls(c)
+               })
+}
+
+// CommitTrash sends the computed lists of trash requests to the
+// keepstore servers. This has the effect of deleting blocks that are
+// overreplicated or unreferenced.
+func (bal *Balancer) CommitTrash(c *arvados.Client) error {
+       defer bal.time("send_trash_lists", "wall clock time to send trash lists")()
+       return bal.commitAsync(c, "send trash list",
+               func(srv *KeepService) error {
+                       return srv.CommitTrash(c)
+               })
+}
+
+func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *KeepService) error) error {
+       errs := make(chan error)
+       for _, srv := range bal.KeepServices {
+               go func(srv *KeepService) {
+                       var err error
+                       defer func() { errs <- err }()
+                       label := fmt.Sprintf("%s: %v", srv, label)
+                       err = f(srv)
+                       if err != nil {
+                               err = fmt.Errorf("%s: %v", label, err)
+                       }
+               }(srv)
+       }
+       var lastErr error
+       for range bal.KeepServices {
+               if err := <-errs; err != nil {
+                       bal.logf("%v", err)
+                       lastErr = err
+               }
+       }
+       close(errs)
+       return lastErr
+}
+
+func (bal *Balancer) logf(f string, args ...interface{}) {
+       if bal.Logger != nil {
+               bal.Logger.Printf(f, args...)
+       }
+}
+
+func (bal *Balancer) time(name, help string) func() {
+       observer := bal.Metrics.DurationObserver(name+"_seconds", help)
+       t0 := time.Now()
+       bal.Logger.Printf("%s: start", name)
+       return func() {
+               dur := time.Since(t0)
+               observer.Observe(dur.Seconds())
+               bal.Logger.Printf("%s: took %vs", name, dur.Seconds())
+       }
+}
+
+// Rendezvous hash sort function. Less efficient than sorting on
+// precomputed rendezvous hashes, but also rarely used.
+func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {
+       a := md5.Sum([]byte(string(blkid[:32]) + i))
+       b := md5.Sum([]byte(string(blkid[:32]) + j))
+       return bytes.Compare(a[:], b[:]) < 0
+}
diff --git a/services/keep-balance/balance_run_test.go b/services/keep-balance/balance_run_test.go
new file mode 100644 (file)
index 0000000..7e2adcf
--- /dev/null
@@ -0,0 +1,524 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&runSuite{})
+
+type reqTracker struct {
+       reqs []http.Request
+       sync.Mutex
+}
+
+func (rt *reqTracker) Count() int {
+       rt.Lock()
+       defer rt.Unlock()
+       return len(rt.reqs)
+}
+
+func (rt *reqTracker) Add(req *http.Request) int {
+       rt.Lock()
+       defer rt.Unlock()
+       rt.reqs = append(rt.reqs, *req)
+       return len(rt.reqs)
+}
+
+var stubServices = []arvados.KeepService{
+       {
+               UUID:           "zzzzz-bi6l4-000000000000000",
+               ServiceHost:    "keep0.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-000000000000001",
+               ServiceHost:    "keep1.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-000000000000002",
+               ServiceHost:    "keep2.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-000000000000003",
+               ServiceHost:    "keep3.zzzzz.arvadosapi.com",
+               ServicePort:    25107,
+               ServiceSSLFlag: false,
+               ServiceType:    "disk",
+       },
+       {
+               UUID:           "zzzzz-bi6l4-h0a0xwut9qa6g3a",
+               ServiceHost:    "keep.zzzzz.arvadosapi.com",
+               ServicePort:    25333,
+               ServiceSSLFlag: true,
+               ServiceType:    "proxy",
+       },
+}
+
+var stubMounts = map[string][]arvados.KeepMount{
+       "keep0.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-000000000000000",
+               DeviceID: "keep0-vol0",
+       }},
+       "keep1.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-100000000000000",
+               DeviceID: "keep1-vol0",
+       }},
+       "keep2.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-200000000000000",
+               DeviceID: "keep2-vol0",
+       }},
+       "keep3.zzzzz.arvadosapi.com:25107": {{
+               UUID:     "zzzzz-ivpuk-300000000000000",
+               DeviceID: "keep3-vol0",
+       }},
+}
+
+// stubServer is an HTTP transport that intercepts and processes all
+// requests using its own handlers.
+type stubServer struct {
+       mux      *http.ServeMux
+       srv      *httptest.Server
+       mutex    sync.Mutex
+       Requests reqTracker
+       logf     func(string, ...interface{})
+}
+
+// Start initializes the stub server and returns an *http.Client that
+// uses the stub server to handle all requests.
+//
+// A stubServer that has been started should eventually be shut down
+// with Close().
+func (s *stubServer) Start() *http.Client {
+       // Set up a config.Client that forwards all requests to s.mux
+       // via s.srv. Test cases will attach handlers to s.mux to get
+       // the desired responses.
+       s.mux = http.NewServeMux()
+       s.srv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               s.mutex.Lock()
+               s.Requests.Add(r)
+               s.mutex.Unlock()
+               w.Header().Set("Content-Type", "application/json")
+               s.mux.ServeHTTP(w, r)
+       }))
+       return &http.Client{Transport: s}
+}
+
+func (s *stubServer) RoundTrip(req *http.Request) (*http.Response, error) {
+       w := httptest.NewRecorder()
+       s.mux.ServeHTTP(w, req)
+       return &http.Response{
+               StatusCode: w.Code,
+               Status:     fmt.Sprintf("%d %s", w.Code, http.StatusText(w.Code)),
+               Header:     w.HeaderMap,
+               Body:       ioutil.NopCloser(w.Body)}, nil
+}
+
+// Close releases resources used by the server.
+func (s *stubServer) Close() {
+       s.srv.Close()
+}
+
+func (s *stubServer) serveStatic(path, data string) *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+               rt.Add(r)
+               if r.Body != nil {
+                       ioutil.ReadAll(r.Body)
+                       r.Body.Close()
+               }
+               io.WriteString(w, data)
+       })
+       return rt
+}
+
+func (s *stubServer) serveCurrentUserAdmin() *reqTracker {
+       return s.serveStatic("/arvados/v1/users/current",
+               `{"uuid":"zzzzz-tpzed-000000000000000","is_admin":true,"is_active":true}`)
+}
+
+func (s *stubServer) serveCurrentUserNotAdmin() *reqTracker {
+       return s.serveStatic("/arvados/v1/users/current",
+               `{"uuid":"zzzzz-tpzed-000000000000000","is_admin":false,"is_active":true}`)
+}
+
+func (s *stubServer) serveDiscoveryDoc() *reqTracker {
+       return s.serveStatic("/discovery/v1/apis/arvados/v1/rest",
+               `{"defaultCollectionReplication":2}`)
+}
+
+func (s *stubServer) serveZeroCollections() *reqTracker {
+       return s.serveStatic("/arvados/v1/collections",
+               `{"items":[],"items_available":0}`)
+}
+
+func (s *stubServer) serveFooBarFileCollections() *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc("/arvados/v1/collections", func(w http.ResponseWriter, r *http.Request) {
+               r.ParseForm()
+               rt.Add(r)
+               if strings.Contains(r.Form.Get("filters"), `modified_at`) {
+                       io.WriteString(w, `{"items_available":0,"items":[]}`)
+               } else {
+                       io.WriteString(w, `{"items_available":3,"items":[
+                               {"uuid":"zzzzz-4zz18-aaaaaaaaaaaaaaa","portable_data_hash":"fa7aeb5140e2848d39b416daeef4ffc5+45","manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n","modified_at":"2014-02-03T17:22:54Z"},
+                               {"uuid":"zzzzz-4zz18-ehbhgtheo8909or","portable_data_hash":"fa7aeb5140e2848d39b416daeef4ffc5+45","manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n","modified_at":"2014-02-03T17:22:54Z"},
+                               {"uuid":"zzzzz-4zz18-znfnqtbbv4spc3w","portable_data_hash":"1f4b0bc7583c2a7f9102c395f4ffc5e3+45","manifest_text":". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n","modified_at":"2014-02-03T17:22:54Z"}]}`)
+               }
+       })
+       return rt
+}
+
+func (s *stubServer) serveCollectionsButSkipOne() *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc("/arvados/v1/collections", func(w http.ResponseWriter, r *http.Request) {
+               r.ParseForm()
+               rt.Add(r)
+               if strings.Contains(r.Form.Get("filters"), `"modified_at","\u003c="`) {
+                       io.WriteString(w, `{"items_available":3,"items":[]}`)
+               } else if strings.Contains(r.Form.Get("filters"), `"modified_at","\u003e`) {
+                       io.WriteString(w, `{"items_available":0,"items":[]}`)
+               } else if strings.Contains(r.Form.Get("filters"), `"modified_at","="`) && strings.Contains(r.Form.Get("filters"), `"uuid","\u003e"`) {
+                       io.WriteString(w, `{"items_available":0,"items":[]}`)
+               } else {
+                       io.WriteString(w, `{"items_available":2,"items":[
+                               {"uuid":"zzzzz-4zz18-ehbhgtheo8909or","portable_data_hash":"fa7aeb5140e2848d39b416daeef4ffc5+45","manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n","modified_at":"2014-02-03T17:22:54Z"},
+                               {"uuid":"zzzzz-4zz18-znfnqtbbv4spc3w","portable_data_hash":"1f4b0bc7583c2a7f9102c395f4ffc5e3+45","manifest_text":". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n","modified_at":"2014-02-03T17:22:54Z"}]}`)
+               }
+       })
+       return rt
+}
+
+func (s *stubServer) serveZeroKeepServices() *reqTracker {
+       return s.serveJSON("/arvados/v1/keep_services", arvados.KeepServiceList{})
+}
+
+func (s *stubServer) serveKeepServices(svcs []arvados.KeepService) *reqTracker {
+       return s.serveJSON("/arvados/v1/keep_services", arvados.KeepServiceList{
+               ItemsAvailable: len(svcs),
+               Items:          svcs,
+       })
+}
+
+func (s *stubServer) serveJSON(path string, resp interface{}) *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+               rt.Add(r)
+               json.NewEncoder(w).Encode(resp)
+       })
+       return rt
+}
+
+func (s *stubServer) serveKeepstoreMounts() *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc("/mounts", func(w http.ResponseWriter, r *http.Request) {
+               rt.Add(r)
+               json.NewEncoder(w).Encode(stubMounts[r.Host])
+       })
+       return rt
+}
+
+func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) {
+               count := rt.Add(r)
+               if r.Host == "keep0.zzzzz.arvadosapi.com:25107" {
+                       io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n")
+               }
+               fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n\n", 12345678+count)
+       })
+       for _, mounts := range stubMounts {
+               for i, mnt := range mounts {
+                       i := i
+                       s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
+                               count := rt.Add(r)
+                               if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" {
+                                       io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n")
+                               }
+                               if i == 0 {
+                                       fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+count)
+                               }
+                               fmt.Fprintf(w, "\n")
+                       })
+               }
+       }
+       return rt
+}
+
+func (s *stubServer) serveKeepstoreTrash() *reqTracker {
+       return s.serveStatic("/trash", `{}`)
+}
+
+func (s *stubServer) serveKeepstorePull() *reqTracker {
+       return s.serveStatic("/pull", `{}`)
+}
+
+type runSuite struct {
+       stub   stubServer
+       config Config
+}
+
+// make a log.Logger that writes to the current test's c.Log().
+func (s *runSuite) logger(c *check.C) *logrus.Logger {
+       r, w := io.Pipe()
+       go func() {
+               buf := make([]byte, 10000)
+               for {
+                       n, err := r.Read(buf)
+                       if n > 0 {
+                               if buf[n-1] == '\n' {
+                                       n--
+                               }
+                               c.Log(string(buf[:n]))
+                       }
+                       if err != nil {
+                               break
+                       }
+               }
+       }()
+       logger := logrus.New()
+       logger.Out = w
+       return logger
+}
+
+func (s *runSuite) SetUpTest(c *check.C) {
+       s.config = Config{
+               Client: arvados.Client{
+                       AuthToken: "xyzzy",
+                       APIHost:   "zzzzz.arvadosapi.com",
+                       Client:    s.stub.Start()},
+               KeepServiceTypes: []string{"disk"},
+               RunPeriod:        arvados.Duration(time.Second),
+       }
+       s.stub.serveDiscoveryDoc()
+       s.stub.logf = c.Logf
+}
+
+func (s *runSuite) TearDownTest(c *check.C) {
+       s.stub.Close()
+}
+
+func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+       }
+       s.stub.serveCurrentUserAdmin()
+       s.stub.serveZeroCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       s.stub.serveKeepstoreIndexFoo4Bar1()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       pullReqs := s.stub.serveKeepstorePull()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       _, err = srv.Run()
+       c.Check(err, check.ErrorMatches, "received zero collections")
+       c.Check(trashReqs.Count(), check.Equals, 4)
+       c.Check(pullReqs.Count(), check.Equals, 0)
+}
+
+func (s *runSuite) TestServiceTypes(c *check.C) {
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+       }
+       s.config.KeepServiceTypes = []string{"unlisted-type"}
+       s.stub.serveCurrentUserAdmin()
+       s.stub.serveFooBarFileCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       indexReqs := s.stub.serveKeepstoreIndexFoo4Bar1()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       _, err = srv.Run()
+       c.Check(err, check.IsNil)
+       c.Check(indexReqs.Count(), check.Equals, 0)
+       c.Check(trashReqs.Count(), check.Equals, 0)
+}
+
+func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+       }
+       s.stub.serveCurrentUserNotAdmin()
+       s.stub.serveZeroCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       pullReqs := s.stub.serveKeepstorePull()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       _, err = srv.Run()
+       c.Check(err, check.ErrorMatches, "current user .* is not .* admin user")
+       c.Check(trashReqs.Count(), check.Equals, 0)
+       c.Check(pullReqs.Count(), check.Equals, 0)
+}
+
+func (s *runSuite) TestDetectSkippedCollections(c *check.C) {
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+       }
+       s.stub.serveCurrentUserAdmin()
+       s.stub.serveCollectionsButSkipOne()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       s.stub.serveKeepstoreIndexFoo4Bar1()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       pullReqs := s.stub.serveKeepstorePull()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       _, err = srv.Run()
+       c.Check(err, check.ErrorMatches, `Retrieved 2 collections with modtime <= .* but server now reports there are 3 collections.*`)
+       c.Check(trashReqs.Count(), check.Equals, 4)
+       c.Check(pullReqs.Count(), check.Equals, 0)
+}
+
+func (s *runSuite) TestDryRun(c *check.C) {
+       opts := RunOptions{
+               CommitPulls: false,
+               CommitTrash: false,
+               Logger:      s.logger(c),
+       }
+       s.stub.serveCurrentUserAdmin()
+       collReqs := s.stub.serveFooBarFileCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       s.stub.serveKeepstoreIndexFoo4Bar1()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       pullReqs := s.stub.serveKeepstorePull()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       bal, err := srv.Run()
+       c.Check(err, check.IsNil)
+       for _, req := range collReqs.reqs {
+               c.Check(req.Form.Get("include_trash"), check.Equals, "true")
+               c.Check(req.Form.Get("include_old_versions"), check.Equals, "true")
+       }
+       c.Check(trashReqs.Count(), check.Equals, 0)
+       c.Check(pullReqs.Count(), check.Equals, 0)
+       c.Check(bal.stats.pulls, check.Not(check.Equals), 0)
+       c.Check(bal.stats.underrep.replicas, check.Not(check.Equals), 0)
+       c.Check(bal.stats.overrep.replicas, check.Not(check.Equals), 0)
+}
+
+func (s *runSuite) TestCommit(c *check.C) {
+       s.config.Listen = ":"
+       s.config.ManagementToken = "xyzzy"
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+               Dumper:      s.logger(c),
+       }
+       s.stub.serveCurrentUserAdmin()
+       s.stub.serveFooBarFileCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       s.stub.serveKeepstoreIndexFoo4Bar1()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       pullReqs := s.stub.serveKeepstorePull()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       bal, err := srv.Run()
+       c.Check(err, check.IsNil)
+       c.Check(trashReqs.Count(), check.Equals, 8)
+       c.Check(pullReqs.Count(), check.Equals, 4)
+       // "foo" block is overreplicated by 2
+       c.Check(bal.stats.trashes, check.Equals, 2)
+       // "bar" block is underreplicated by 1, and its only copy is
+       // in a poor rendezvous position
+       c.Check(bal.stats.pulls, check.Equals, 2)
+
+       metrics := s.getMetrics(c, srv)
+       c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_total_bytes 15\n.*`)
+       c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_sum [0-9\.]+\n.*`)
+       c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 1\n.*`)
+       c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_dedup_byte_ratio 1\.5\n.*`)
+       c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_dedup_block_ratio 1\.5\n.*`)
+}
+
+func (s *runSuite) TestRunForever(c *check.C) {
+       s.config.Listen = ":"
+       s.config.ManagementToken = "xyzzy"
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+               Dumper:      s.logger(c),
+       }
+       s.stub.serveCurrentUserAdmin()
+       s.stub.serveFooBarFileCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       s.stub.serveKeepstoreIndexFoo4Bar1()
+       trashReqs := s.stub.serveKeepstoreTrash()
+       pullReqs := s.stub.serveKeepstorePull()
+
+       stop := make(chan interface{})
+       s.config.RunPeriod = arvados.Duration(time.Millisecond)
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+
+       done := make(chan bool)
+       go func() {
+               srv.RunForever(stop)
+               close(done)
+       }()
+
+       // Each run should send 4 pull lists + 4 trash lists. The
+       // first run should also send 4 empty trash lists at
+       // startup. We should complete all four runs in much less than
+       // a second.
+       for t0 := time.Now(); pullReqs.Count() < 16 && time.Since(t0) < 10*time.Second; {
+               time.Sleep(time.Millisecond)
+       }
+       stop <- true
+       <-done
+       c.Check(pullReqs.Count() >= 16, check.Equals, true)
+       c.Check(trashReqs.Count(), check.Equals, pullReqs.Count()+4)
+       c.Check(s.getMetrics(c, srv), check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count `+fmt.Sprintf("%d", pullReqs.Count()/4)+`\n.*`)
+}
+
+func (s *runSuite) getMetrics(c *check.C, srv *Server) string {
+       resp, err := http.Get("http://" + srv.listening + "/metrics")
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
+
+       resp, err = http.Get("http://" + srv.listening + "/metrics?api_token=xyzzy")
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       buf, err := ioutil.ReadAll(resp.Body)
+       c.Check(err, check.IsNil)
+       return string(buf)
+}
diff --git a/services/keep-balance/balance_test.go b/services/keep-balance/balance_test.go
new file mode 100644 (file)
index 0000000..37be185
--- /dev/null
@@ -0,0 +1,656 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/md5"
+       "fmt"
+       "sort"
+       "strconv"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+
+       check "gopkg.in/check.v1"
+)
+
+// Test with Gocheck
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&balancerSuite{})
+
+type balancerSuite struct {
+       Balancer
+       srvs            []*KeepService
+       blks            map[string]tester
+       knownRendezvous [][]int
+       signatureTTL    int64
+}
+
+const (
+       // index into knownRendezvous
+       known0 = 0
+)
+
+type slots []int
+
+type tester struct {
+       known       int
+       desired     map[string]int
+       current     slots
+       timestamps  []int64
+       shouldPull  slots
+       shouldTrash slots
+
+       shouldPullMounts  []string
+       shouldTrashMounts []string
+
+       expectResult balanceResult
+}
+
+func (bal *balancerSuite) SetUpSuite(c *check.C) {
+       bal.knownRendezvous = nil
+       for _, str := range []string{
+               "3eab2d5fc9681074",
+               "097dba52e648f1c3",
+               "c5b4e023f8a7d691",
+               "9d81c02e76a3bf54",
+       } {
+               var slots []int
+               for _, c := range []byte(str) {
+                       pos, _ := strconv.ParseUint(string(c), 16, 4)
+                       slots = append(slots, int(pos))
+               }
+               bal.knownRendezvous = append(bal.knownRendezvous, slots)
+       }
+
+       bal.signatureTTL = 3600
+}
+
+func (bal *balancerSuite) SetUpTest(c *check.C) {
+       bal.srvs = make([]*KeepService, 16)
+       bal.KeepServices = make(map[string]*KeepService)
+       for i := range bal.srvs {
+               srv := &KeepService{
+                       KeepService: arvados.KeepService{
+                               UUID: fmt.Sprintf("zzzzz-bi6l4-%015x", i),
+                       },
+               }
+               srv.mounts = []*KeepMount{{
+                       KeepMount: arvados.KeepMount{
+                               UUID: fmt.Sprintf("zzzzz-mount-%015x", i),
+                       },
+                       KeepService: srv,
+               }}
+               bal.srvs[i] = srv
+               bal.KeepServices[srv.UUID] = srv
+       }
+
+       bal.MinMtime = time.Now().UnixNano() - bal.signatureTTL*1e9
+       bal.cleanupMounts()
+}
+
+func (bal *balancerSuite) TestPerfect(c *check.C) {
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1},
+               shouldPull:  nil,
+               shouldTrash: nil})
+}
+
+func (bal *balancerSuite) TestDecreaseRepl(c *check.C) {
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 2, 1},
+               shouldTrash: slots{2}})
+}
+
+func (bal *balancerSuite) TestDecreaseReplToZero(c *check.C) {
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 0},
+               current:     slots{0, 1, 3},
+               shouldTrash: slots{0, 1, 3}})
+}
+
+func (bal *balancerSuite) TestIncreaseRepl(c *check.C) {
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 4},
+               current:    slots{0, 1},
+               shouldPull: slots{2, 3}})
+}
+
+func (bal *balancerSuite) TestSkipReadonly(c *check.C) {
+       bal.srvList(0, slots{3})[0].ReadOnly = true
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 4},
+               current:    slots{0, 1},
+               shouldPull: slots{2, 4}})
+}
+
+func (bal *balancerSuite) TestFixUnbalanced(c *check.C) {
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{2, 0},
+               shouldPull: slots{1}})
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{2, 7},
+               shouldPull: slots{0, 1}})
+       // if only one of the pulls succeeds, we'll see this next:
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{2, 1, 7},
+               shouldPull:  slots{0},
+               shouldTrash: slots{7}})
+       // if both pulls succeed, we'll see this next:
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{2, 0, 1, 7},
+               shouldTrash: slots{2, 7}})
+
+       // unbalanced + excessive replication => pull + trash
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{2, 5, 7},
+               shouldPull:  slots{0, 1},
+               shouldTrash: slots{7}})
+}
+
+func (bal *balancerSuite) TestMultipleReplicasPerService(c *check.C) {
+       for _, srv := range bal.srvs {
+               for i := 0; i < 3; i++ {
+                       m := *(srv.mounts[0])
+                       srv.mounts = append(srv.mounts, &m)
+               }
+       }
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 0},
+               shouldPull: slots{1}})
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{2, 2},
+               shouldPull: slots{0, 1}})
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 0, 1},
+               shouldTrash: slots{0}})
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 1, 0},
+               shouldTrash: slots{1}})
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 0, 1, 0, 2},
+               shouldTrash: slots{0, 1, 2}})
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 1, 1, 0, 2},
+               shouldTrash: slots{1, 1, 2}})
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 1, 2},
+               shouldPull:  slots{0},
+               shouldTrash: slots{1}})
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 1, 0},
+               timestamps:  []int64{12345678, 12345678, 12345679},
+               shouldTrash: nil})
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{1, 1},
+               shouldPull: slots{0}})
+}
+
+func (bal *balancerSuite) TestIncreaseReplTimestampCollision(c *check.C) {
+       // For purposes of increasing replication, we assume identical
+       // replicas are distinct.
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 4},
+               current:    slots{0, 1},
+               timestamps: []int64{12345678, 12345678},
+               shouldPull: slots{2, 3}})
+}
+
+func (bal *balancerSuite) TestDecreaseReplTimestampCollision(c *check.C) {
+       // For purposes of decreasing replication, we assume identical
+       // replicas are NOT distinct.
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1, 2},
+               timestamps: []int64{12345678, 12345678, 12345678}})
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1, 2},
+               timestamps: []int64{12345678, 10000000, 10000000}})
+}
+
+func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
+       oldTime := bal.MinMtime - 3600
+       newTime := bal.MinMtime + 3600
+       // The excess replica is too new to delete.
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1, 2},
+               timestamps: []int64{oldTime, newTime, newTime + 1},
+               expectResult: balanceResult{
+                       have: 3,
+                       want: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      1,
+                               unachievable: false}}}})
+       // The best replicas are too new to delete, but the excess
+       // replica is old enough.
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 2},
+               timestamps:  []int64{newTime, newTime + 1, oldTime},
+               shouldTrash: slots{2}})
+}
+
+func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
+       bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
+       bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
+       bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
+       c.Check(len(bal.srvs[3].mounts), check.Equals, 1)
+       bal.cleanupMounts()
+       c.Check(len(bal.srvs[3].mounts), check.Equals, 0)
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1},
+               shouldPull: slots{2}})
+}
+
+func (bal *balancerSuite) TestVolumeReplication(c *check.C) {
+       bal.srvs[0].mounts[0].KeepMount.Replication = 2  // srv 0
+       bal.srvs[14].mounts[0].KeepMount.Replication = 2 // srv e
+       bal.cleanupMounts()
+       // block 0 rendezvous is 3,e,a -- so slot 1 has repl=2
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1},
+               shouldPull: slots{0}})
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1},
+               shouldPull: nil})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 2},
+               shouldTrash: slots{2}})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 3},
+               current:     slots{0, 2, 3, 4},
+               shouldPull:  slots{1},
+               shouldTrash: slots{4},
+               expectResult: balanceResult{
+                       have: 4,
+                       want: 3,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      3,
+                               surplus:      1,
+                               unachievable: false}}}})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 3},
+               current:     slots{0, 1, 2, 3, 4},
+               shouldTrash: slots{2, 3, 4}})
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 4},
+               current:     slots{0, 1, 2, 3, 4},
+               shouldTrash: slots{3, 4},
+               expectResult: balanceResult{
+                       have: 6,
+                       want: 4,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      4,
+                               surplus:      2,
+                               unachievable: false}}}})
+       // block 1 rendezvous is 0,9,7 -- so slot 0 has repl=2
+       bal.try(c, tester{
+               known:   1,
+               desired: map[string]int{"default": 2},
+               current: slots{0},
+               expectResult: balanceResult{
+                       have: 2,
+                       want: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      0,
+                               unachievable: false}}}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 3},
+               current:    slots{0},
+               shouldPull: slots{1}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 4},
+               current:    slots{0},
+               shouldPull: slots{1, 2}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 4},
+               current:    slots{2},
+               shouldPull: slots{0, 1}})
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 4},
+               current:    slots{7},
+               shouldPull: slots{0, 1, 2},
+               expectResult: balanceResult{
+                       have: 1,
+                       want: 4,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      4,
+                               surplus:      -3,
+                               unachievable: false}}}})
+       bal.try(c, tester{
+               known:       1,
+               desired:     map[string]int{"default": 2},
+               current:     slots{1, 2, 3, 4},
+               shouldPull:  slots{0},
+               shouldTrash: slots{3, 4}})
+       bal.try(c, tester{
+               known:       1,
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 2},
+               shouldTrash: slots{1, 2},
+               expectResult: balanceResult{
+                       have: 4,
+                       want: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      2,
+                               unachievable: false}}}})
+}
+
+func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {
+       bal.srvs[0].mounts[0].KeepMount.DeviceID = "abcdef"
+       bal.srvs[9].mounts[0].KeepMount.DeviceID = "abcdef"
+       bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
+       // block 0 belongs on servers 3 and e, which have different
+       // device IDs.
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1},
+               shouldPull: slots{0}})
+       // block 1 belongs on servers 0 and 9, which both report
+       // having a replica, but the replicas are on the same device
+       // ID -- so we should pull to the third position (7).
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 1},
+               shouldPull: slots{2}})
+       // block 1 can be pulled to the doubly-mounted device, but the
+       // pull should only be done on the first of the two servers.
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 2},
+               current:    slots{2},
+               shouldPull: slots{0}})
+       // block 0 has one replica on a single device mounted on two
+       // servers (e,9 at positions 1,9). Trashing the replica on 9
+       // would lose the block.
+       bal.try(c, tester{
+               known:      0,
+               desired:    map[string]int{"default": 2},
+               current:    slots{1, 9},
+               shouldPull: slots{0},
+               expectResult: balanceResult{
+                       have: 1,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      -1,
+                               unachievable: false}}}})
+       // block 0 is overreplicated, but the second and third
+       // replicas are the same replica according to DeviceID
+       // (despite different Mtimes). Don't trash the third replica.
+       bal.try(c, tester{
+               known:   0,
+               desired: map[string]int{"default": 2},
+               current: slots{0, 1, 9},
+               expectResult: balanceResult{
+                       have: 2,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      0,
+                               unachievable: false}}}})
+       // block 0 is overreplicated; the third and fifth replicas are
+       // extra, but the fourth is another view of the second and
+       // shouldn't be trashed.
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 5, 9, 12},
+               shouldTrash: slots{5, 12},
+               expectResult: balanceResult{
+                       have: 4,
+                       classState: map[string]balancedBlockState{"default": {
+                               desired:      2,
+                               surplus:      2,
+                               unachievable: false}}}})
+}
+
+func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
+       // For known blocks 0/1/2/3, server 9 is slot 9/1/14/0 in
+       // probe order. For these tests we give it two mounts, one
+       // with classes=[special], one with
+       // classes=[special,special2].
+       bal.srvs[9].mounts = []*KeepMount{{
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"special"},
+                       UUID:           "zzzzz-mount-special00000009",
+                       DeviceID:       "9-special",
+               },
+               KeepService: bal.srvs[9],
+       }, {
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"special", "special2"},
+                       UUID:           "zzzzz-mount-special20000009",
+                       DeviceID:       "9-special-and-special2",
+               },
+               KeepService: bal.srvs[9],
+       }}
+       // For known blocks 0/1/2/3, server 13 (d) is slot 5/3/11/1 in
+       // probe order. We give it two mounts, one with
+       // classes=[special3], one with classes=[default].
+       bal.srvs[13].mounts = []*KeepMount{{
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"special2"},
+                       UUID:           "zzzzz-mount-special2000000d",
+                       DeviceID:       "13-special2",
+               },
+               KeepService: bal.srvs[13],
+       }, {
+               KeepMount: arvados.KeepMount{
+                       Replication:    1,
+                       StorageClasses: []string{"default"},
+                       UUID:           "zzzzz-mount-00000000000000d",
+                       DeviceID:       "13-default",
+               },
+               KeepService: bal.srvs[13],
+       }}
+       // Pull to slot 9 because that's the only server with the
+       // desired class "special".
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"default": 2, "special": 1},
+               current:          slots{0, 1},
+               shouldPull:       slots{9},
+               shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+       // If some storage classes are not satisfied, don't trash any
+       // excess replicas. (E.g., if someone desires repl=1 on
+       // class=durable, and we have two copies on class=volatile, we
+       // should wait for pull to succeed before trashing anything).
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"special": 1},
+               current:          slots{0, 1},
+               shouldPull:       slots{9},
+               shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+       // Once storage classes are satisfied, trash excess replicas
+       // that appear earlier in probe order but aren't needed to
+       // satisfy the desired classes.
+       bal.try(c, tester{
+               known:       0,
+               desired:     map[string]int{"special": 1},
+               current:     slots{0, 1, 9},
+               shouldTrash: slots{0, 1}})
+       // Pull to slot 5, the best server with class "special2".
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"special2": 1},
+               current:          slots{0, 1},
+               shouldPull:       slots{5},
+               shouldPullMounts: []string{"zzzzz-mount-special2000000d"}})
+       // Pull to slot 5 and 9 to get replication 2 in desired class
+       // "special2".
+       bal.try(c, tester{
+               known:            0,
+               desired:          map[string]int{"special2": 2},
+               current:          slots{0, 1},
+               shouldPull:       slots{5, 9},
+               shouldPullMounts: []string{"zzzzz-mount-special20000009", "zzzzz-mount-special2000000d"}})
+       // Slot 0 has a replica in "default", slot 1 has a replica
+       // in "special"; we need another replica in "default", i.e.,
+       // on slot 2.
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"default": 2, "special": 1},
+               current:    slots{0, 1},
+               shouldPull: slots{2}})
+       // Pull to best probe position 0 (despite wrong storage class)
+       // if it's impossible to achieve desired replication in the
+       // desired class (only slots 1 and 3 have special2).
+       bal.try(c, tester{
+               known:      1,
+               desired:    map[string]int{"special2": 3},
+               current:    slots{3},
+               shouldPull: slots{0, 1}})
+       // Trash excess replica.
+       bal.try(c, tester{
+               known:       3,
+               desired:     map[string]int{"special": 1},
+               current:     slots{0, 1},
+               shouldTrash: slots{1}})
+       // Leave one copy on slot 1 because slot 0 (server 9) only
+       // gives us repl=1.
+       bal.try(c, tester{
+               known:   3,
+               desired: map[string]int{"special": 2},
+               current: slots{0, 1}})
+}
+
+// Clear all servers' changesets, balance a single block, and verify
+// the appropriate changes for that block have been added to the
+// changesets.
+func (bal *balancerSuite) try(c *check.C, t tester) {
+       bal.setupLookupTables()
+       blk := &BlockState{
+               Replicas: bal.replList(t.known, t.current),
+               Desired:  t.desired,
+       }
+       for i, t := range t.timestamps {
+               blk.Replicas[i].Mtime = t
+       }
+       for _, srv := range bal.srvs {
+               srv.ChangeSet = &ChangeSet{}
+       }
+       result := bal.balanceBlock(knownBlkid(t.known), blk)
+
+       var didPull, didTrash slots
+       var didPullMounts, didTrashMounts []string
+       for i, srv := range bal.srvs {
+               var slot int
+               for probeOrder, srvNum := range bal.knownRendezvous[t.known] {
+                       if srvNum == i {
+                               slot = probeOrder
+                       }
+               }
+               for _, pull := range srv.Pulls {
+                       didPull = append(didPull, slot)
+                       didPullMounts = append(didPullMounts, pull.To.UUID)
+                       c.Check(pull.SizedDigest, check.Equals, knownBlkid(t.known))
+               }
+               for _, trash := range srv.Trashes {
+                       didTrash = append(didTrash, slot)
+                       didTrashMounts = append(didTrashMounts, trash.From.UUID)
+                       c.Check(trash.SizedDigest, check.Equals, knownBlkid(t.known))
+               }
+       }
+
+       for _, list := range []slots{didPull, didTrash, t.shouldPull, t.shouldTrash} {
+               sort.Sort(sort.IntSlice(list))
+       }
+       c.Check(didPull, check.DeepEquals, t.shouldPull)
+       c.Check(didTrash, check.DeepEquals, t.shouldTrash)
+       if t.shouldPullMounts != nil {
+               sort.Strings(didPullMounts)
+               c.Check(didPullMounts, check.DeepEquals, t.shouldPullMounts)
+       }
+       if t.shouldTrashMounts != nil {
+               sort.Strings(didTrashMounts)
+               c.Check(didTrashMounts, check.DeepEquals, t.shouldTrashMounts)
+       }
+       if t.expectResult.have > 0 {
+               c.Check(result.have, check.Equals, t.expectResult.have)
+       }
+       if t.expectResult.want > 0 {
+               c.Check(result.want, check.Equals, t.expectResult.want)
+       }
+       if t.expectResult.classState != nil {
+               c.Check(result.classState, check.DeepEquals, t.expectResult.classState)
+       }
+}
+
+// srvList returns the KeepServices, sorted in rendezvous order and
+// then selected by idx. For example, srvList(3, slots{0, 1, 4})
+// returns the the first-, second-, and fifth-best servers for storing
+// bal.knownBlkid(3).
+func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepService) {
+       for _, i := range order {
+               srvs = append(srvs, bal.srvs[bal.knownRendezvous[knownBlockID][i]])
+       }
+       return
+}
+
+// replList is like srvList but returns an "existing replicas" slice,
+// suitable for a BlockState test fixture.
+func (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {
+       nextMnt := map[*KeepService]int{}
+       mtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9
+       for _, srv := range bal.srvList(knownBlockID, order) {
+               // round-robin repls onto each srv's mounts
+               n := nextMnt[srv]
+               nextMnt[srv] = (n + 1) % len(srv.mounts)
+
+               repls = append(repls, Replica{srv.mounts[n], mtime})
+               mtime++
+       }
+       return
+}
+
+// generate the same data hashes that are tested in
+// sdk/go/keepclient/root_sorter_test.go
+func knownBlkid(i int) arvados.SizedDigest {
+       return arvados.SizedDigest(fmt.Sprintf("%x+64", md5.Sum([]byte(fmt.Sprintf("%064x", i)))))
+}
diff --git a/services/keep-balance/block_state.go b/services/keep-balance/block_state.go
new file mode 100644 (file)
index 0000000..46e6905
--- /dev/null
@@ -0,0 +1,119 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "sync"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// Replica is a file on disk (or object in an S3 bucket, or blob in an
+// Azure storage container, etc.) as reported in a keepstore index
+// response.
+type Replica struct {
+       *KeepMount
+       Mtime int64
+}
+
+// BlockState indicates the desired storage class and number of
+// replicas (according to the collections we know about) and the
+// replicas actually stored (according to the keepstore indexes we
+// know about).
+type BlockState struct {
+       RefCount int
+       Replicas []Replica
+       Desired  map[string]int
+       // TODO: Support combinations of classes ("private + durable")
+       // by replacing the map[string]int with a map[*[]string]int
+       // here, where the map keys come from a pool of semantically
+       // distinct class combinations.
+       //
+       // TODO: Use a pool of semantically distinct Desired maps to
+       // conserve memory (typically there are far more BlockState
+       // objects in memory than distinct Desired profiles).
+}
+
+var defaultClasses = []string{"default"}
+
+func (bs *BlockState) addReplica(r Replica) {
+       bs.Replicas = append(bs.Replicas, r)
+}
+
+func (bs *BlockState) increaseDesired(classes []string, n int) {
+       bs.RefCount++
+       if len(classes) == 0 {
+               classes = defaultClasses
+       }
+       for _, class := range classes {
+               if bs.Desired == nil {
+                       bs.Desired = map[string]int{class: n}
+               } else if d, ok := bs.Desired[class]; !ok || d < n {
+                       bs.Desired[class] = n
+               }
+       }
+}
+
+// BlockStateMap is a goroutine-safe wrapper around a
+// map[arvados.SizedDigest]*BlockState.
+type BlockStateMap struct {
+       entries map[arvados.SizedDigest]*BlockState
+       mutex   sync.Mutex
+}
+
+// NewBlockStateMap returns a newly allocated BlockStateMap.
+func NewBlockStateMap() *BlockStateMap {
+       return &BlockStateMap{
+               entries: make(map[arvados.SizedDigest]*BlockState),
+       }
+}
+
+// return a BlockState entry, allocating a new one if needed. (Private
+// method: not goroutine-safe.)
+func (bsm *BlockStateMap) get(blkid arvados.SizedDigest) *BlockState {
+       // TODO? Allocate BlockState structs a slice at a time,
+       // instead of one at a time.
+       blk := bsm.entries[blkid]
+       if blk == nil {
+               blk = &BlockState{}
+               bsm.entries[blkid] = blk
+       }
+       return blk
+}
+
+// Apply runs f on each entry in the map.
+func (bsm *BlockStateMap) Apply(f func(arvados.SizedDigest, *BlockState)) {
+       bsm.mutex.Lock()
+       defer bsm.mutex.Unlock()
+
+       for blkid, blk := range bsm.entries {
+               f(blkid, blk)
+       }
+}
+
+// AddReplicas updates the map to indicate that mnt has a replica of
+// each block in idx.
+func (bsm *BlockStateMap) AddReplicas(mnt *KeepMount, idx []arvados.KeepServiceIndexEntry) {
+       bsm.mutex.Lock()
+       defer bsm.mutex.Unlock()
+
+       for _, ent := range idx {
+               bsm.get(ent.SizedDigest).addReplica(Replica{
+                       KeepMount: mnt,
+                       Mtime:     ent.Mtime,
+               })
+       }
+}
+
+// IncreaseDesired updates the map to indicate the desired replication
+// for the given blocks in the given storage class is at least n.
+func (bsm *BlockStateMap) IncreaseDesired(classes []string, n int, blocks []arvados.SizedDigest) {
+       bsm.mutex.Lock()
+       defer bsm.mutex.Unlock()
+
+       for _, blkid := range blocks {
+               bsm.get(blkid).increaseDesired(classes, n)
+       }
+}
diff --git a/services/keep-balance/change_set.go b/services/keep-balance/change_set.go
new file mode 100644 (file)
index 0000000..5437f76
--- /dev/null
@@ -0,0 +1,87 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "fmt"
+       "sync"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// Pull is a request to retrieve a block from a remote server, and
+// store it locally.
+type Pull struct {
+       arvados.SizedDigest
+       From *KeepService
+       To   *KeepMount
+}
+
+// MarshalJSON formats a pull request the way keepstore wants to see
+// it.
+func (p Pull) MarshalJSON() ([]byte, error) {
+       type KeepstorePullRequest struct {
+               Locator   string   `json:"locator"`
+               Servers   []string `json:"servers"`
+               MountUUID string   `json:"mount_uuid"`
+       }
+       return json.Marshal(KeepstorePullRequest{
+               Locator:   string(p.SizedDigest[:32]),
+               Servers:   []string{p.From.URLBase()},
+               MountUUID: p.To.KeepMount.UUID,
+       })
+}
+
+// Trash is a request to delete a block.
+type Trash struct {
+       arvados.SizedDigest
+       Mtime int64
+       From  *KeepMount
+}
+
+// MarshalJSON formats a trash request the way keepstore wants to see
+// it, i.e., as a bare locator with no +size hint.
+func (t Trash) MarshalJSON() ([]byte, error) {
+       type KeepstoreTrashRequest struct {
+               Locator    string `json:"locator"`
+               BlockMtime int64  `json:"block_mtime"`
+               MountUUID  string `json:"mount_uuid"`
+       }
+       return json.Marshal(KeepstoreTrashRequest{
+               Locator:    string(t.SizedDigest[:32]),
+               BlockMtime: t.Mtime,
+               MountUUID:  t.From.KeepMount.UUID,
+       })
+}
+
+// ChangeSet is a set of change requests that will be sent to a
+// keepstore server.
+type ChangeSet struct {
+       Pulls   []Pull
+       Trashes []Trash
+       mutex   sync.Mutex
+}
+
+// AddPull adds a Pull operation.
+func (cs *ChangeSet) AddPull(p Pull) {
+       cs.mutex.Lock()
+       cs.Pulls = append(cs.Pulls, p)
+       cs.mutex.Unlock()
+}
+
+// AddTrash adds a Trash operation
+func (cs *ChangeSet) AddTrash(t Trash) {
+       cs.mutex.Lock()
+       cs.Trashes = append(cs.Trashes, t)
+       cs.mutex.Unlock()
+}
+
+// String implements fmt.Stringer.
+func (cs *ChangeSet) String() string {
+       cs.mutex.Lock()
+       defer cs.mutex.Unlock()
+       return fmt.Sprintf("ChangeSet{Pulls:%d, Trashes:%d}", len(cs.Pulls), len(cs.Trashes))
+}
diff --git a/services/keep-balance/change_set_test.go b/services/keep-balance/change_set_test.go
new file mode 100644 (file)
index 0000000..6421a4d
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&changeSetSuite{})
+
+type changeSetSuite struct{}
+
+func (s *changeSetSuite) TestJSONFormat(c *check.C) {
+       mnt := &KeepMount{
+               KeepMount: arvados.KeepMount{
+                       UUID: "zzzzz-mount-abcdefghijklmno"}}
+       srv := &KeepService{
+               KeepService: arvados.KeepService{
+                       UUID:           "zzzzz-bi6l4-000000000000001",
+                       ServiceType:    "disk",
+                       ServiceSSLFlag: false,
+                       ServiceHost:    "keep1.zzzzz.arvadosapi.com",
+                       ServicePort:    25107}}
+
+       buf, err := json.Marshal([]Pull{{
+               SizedDigest: arvados.SizedDigest("acbd18db4cc2f85cedef654fccc4a4d8+3"),
+               To:          mnt,
+               From:        srv}})
+       c.Check(err, check.IsNil)
+       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","servers":["http://keep1.zzzzz.arvadosapi.com:25107"],"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
+
+       buf, err = json.Marshal([]Trash{{
+               SizedDigest: arvados.SizedDigest("acbd18db4cc2f85cedef654fccc4a4d8+3"),
+               From:        mnt,
+               Mtime:       123456789}})
+       c.Check(err, check.IsNil)
+       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","block_mtime":123456789,"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
+}
diff --git a/services/keep-balance/collection.go b/services/keep-balance/collection.go
new file mode 100644 (file)
index 0000000..1e5fa57
--- /dev/null
@@ -0,0 +1,154 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+func countCollections(c *arvados.Client, params arvados.ResourceListParams) (int, error) {
+       var page arvados.CollectionList
+       var zero int
+       params.Limit = &zero
+       params.Count = "exact"
+       err := c.RequestAndDecode(&page, "GET", "arvados/v1/collections", nil, params)
+       return page.ItemsAvailable, err
+}
+
+// EachCollection calls f once for every readable
+// collection. EachCollection stops if it encounters an error, such as
+// f returning a non-nil error.
+//
+// The progress function is called periodically with done (number of
+// times f has been called) and total (number of times f is expected
+// to be called).
+//
+// If pageSize > 0 it is used as the maximum page size in each API
+// call; otherwise the maximum allowed page size is requested.
+func EachCollection(c *arvados.Client, pageSize int, f func(arvados.Collection) error, progress func(done, total int)) error {
+       if progress == nil {
+               progress = func(_, _ int) {}
+       }
+
+       expectCount, err := countCollections(c, arvados.ResourceListParams{
+               IncludeTrash:       true,
+               IncludeOldVersions: true,
+       })
+       if err != nil {
+               return err
+       }
+
+       limit := pageSize
+       if limit <= 0 {
+               // Use the maximum page size the server allows
+               limit = 1<<31 - 1
+       }
+       params := arvados.ResourceListParams{
+               Limit:              &limit,
+               Order:              "modified_at, uuid",
+               Count:              "none",
+               Select:             []string{"uuid", "unsigned_manifest_text", "modified_at", "portable_data_hash", "replication_desired"},
+               IncludeTrash:       true,
+               IncludeOldVersions: true,
+       }
+       var last arvados.Collection
+       var filterTime time.Time
+       callCount := 0
+       gettingExactTimestamp := false
+       for {
+               progress(callCount, expectCount)
+               var page arvados.CollectionList
+               err := c.RequestAndDecode(&page, "GET", "arvados/v1/collections", nil, params)
+               if err != nil {
+                       return err
+               }
+               for _, coll := range page.Items {
+                       if last.ModifiedAt != nil && *last.ModifiedAt == *coll.ModifiedAt && last.UUID >= coll.UUID {
+                               continue
+                       }
+                       callCount++
+                       err = f(coll)
+                       if err != nil {
+                               return err
+                       }
+                       last = coll
+               }
+               if len(page.Items) == 0 && !gettingExactTimestamp {
+                       break
+               } else if last.ModifiedAt == nil {
+                       return fmt.Errorf("BUG: Last collection on the page (%s) has no modified_at timestamp; cannot make progress", last.UUID)
+               } else if len(page.Items) > 0 && *last.ModifiedAt == filterTime {
+                       // If we requested time>=X and never got a
+                       // time>X then we might not have received all
+                       // items with time==X yet. Switch to
+                       // gettingExactTimestamp mode (if we're not
+                       // there already), advancing our UUID
+                       // threshold with each request, until we get
+                       // an empty page.
+                       gettingExactTimestamp = true
+                       params.Filters = []arvados.Filter{{
+                               Attr:     "modified_at",
+                               Operator: "=",
+                               Operand:  filterTime,
+                       }, {
+                               Attr:     "uuid",
+                               Operator: ">",
+                               Operand:  last.UUID,
+                       }}
+               } else if gettingExactTimestamp {
+                       // This must be an empty page (in this mode,
+                       // an unequal timestamp is impossible) so we
+                       // can start getting pages of newer
+                       // collections.
+                       gettingExactTimestamp = false
+                       params.Filters = []arvados.Filter{{
+                               Attr:     "modified_at",
+                               Operator: ">",
+                               Operand:  filterTime,
+                       }}
+               } else {
+                       // In the normal case, we know we have seen
+                       // all collections with modtime<filterTime,
+                       // but we might not have seen all that have
+                       // modtime=filterTime. Hence we use >= instead
+                       // of > and skip the obvious overlapping item,
+                       // i.e., the last item on the previous
+                       // page. In some edge cases this can return
+                       // collections we have already seen, but
+                       // avoiding that would add overhead in the
+                       // overwhelmingly common cases, so we don't
+                       // bother.
+                       filterTime = *last.ModifiedAt
+                       params.Filters = []arvados.Filter{{
+                               Attr:     "modified_at",
+                               Operator: ">=",
+                               Operand:  filterTime,
+                       }, {
+                               Attr:     "uuid",
+                               Operator: "!=",
+                               Operand:  last.UUID,
+                       }}
+               }
+       }
+       progress(callCount, expectCount)
+
+       if checkCount, err := countCollections(c, arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "modified_at",
+                       Operator: "<=",
+                       Operand:  filterTime}},
+               IncludeTrash:       true,
+               IncludeOldVersions: true,
+       }); err != nil {
+               return err
+       } else if callCount < checkCount {
+               return fmt.Errorf("Retrieved %d collections with modtime <= T=%q, but server now reports there are %d collections with modtime <= T", callCount, filterTime, checkCount)
+       }
+
+       return nil
+}
diff --git a/services/keep-balance/collection_test.go b/services/keep-balance/collection_test.go
new file mode 100644 (file)
index 0000000..a548b1f
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       check "gopkg.in/check.v1"
+)
+
+//  TestIdenticalTimestamps ensures EachCollection returns the same
+//  set of collections for various page sizes -- even page sizes so
+//  small that we get entire pages full of collections with identical
+//  timestamps and exercise our gettingExactTimestamp cases.
+func (s *integrationSuite) TestIdenticalTimestamps(c *check.C) {
+       // pageSize==0 uses the default (large) page size.
+       pageSizes := []int{0, 2, 3, 4, 5}
+       got := make([][]string, len(pageSizes))
+       var wg sync.WaitGroup
+       for trial, pageSize := range pageSizes {
+               wg.Add(1)
+               go func(trial, pageSize int) {
+                       defer wg.Done()
+                       streak := 0
+                       longestStreak := 0
+                       var lastMod time.Time
+                       sawUUID := make(map[string]bool)
+                       err := EachCollection(&s.config.Client, pageSize, func(c arvados.Collection) error {
+                               got[trial] = append(got[trial], c.UUID)
+                               if c.ModifiedAt == nil {
+                                       return nil
+                               }
+                               if sawUUID[c.UUID] {
+                                       // dup
+                                       return nil
+                               }
+                               sawUUID[c.UUID] = true
+                               if lastMod == *c.ModifiedAt {
+                                       streak++
+                                       if streak > longestStreak {
+                                               longestStreak = streak
+                                       }
+                               } else {
+                                       streak = 0
+                                       lastMod = *c.ModifiedAt
+                               }
+                               return nil
+                       }, nil)
+                       c.Check(err, check.IsNil)
+                       c.Check(longestStreak > 25, check.Equals, true)
+               }(trial, pageSize)
+       }
+       wg.Wait()
+       for trial := 1; trial < len(pageSizes); trial++ {
+               c.Check(got[trial], check.DeepEquals, got[0])
+       }
+}
diff --git a/services/keep-balance/integration_test.go b/services/keep-balance/integration_test.go
new file mode 100644 (file)
index 0000000..8f5d08a
--- /dev/null
@@ -0,0 +1,102 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "os"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&integrationSuite{})
+
+type integrationSuite struct {
+       config     Config
+       keepClient *keepclient.KeepClient
+}
+
+func (s *integrationSuite) SetUpSuite(c *check.C) {
+       if testing.Short() {
+               c.Skip("-short")
+       }
+       arvadostest.ResetEnv()
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(4, true)
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       arv.ApiToken = arvadostest.DataManagerToken
+       c.Assert(err, check.IsNil)
+
+       s.keepClient, err = keepclient.MakeKeepClient(arv)
+       c.Assert(err, check.IsNil)
+       s.putReplicas(c, "foo", 4)
+       s.putReplicas(c, "bar", 1)
+}
+
+func (s *integrationSuite) putReplicas(c *check.C, data string, replicas int) {
+       s.keepClient.Want_replicas = replicas
+       _, _, err := s.keepClient.PutB([]byte(data))
+       c.Assert(err, check.IsNil)
+}
+
+func (s *integrationSuite) TearDownSuite(c *check.C) {
+       if testing.Short() {
+               c.Skip("-short")
+       }
+       arvadostest.StopKeep(4)
+       arvadostest.StopAPI()
+}
+
+func (s *integrationSuite) SetUpTest(c *check.C) {
+       s.config = Config{
+               Client: arvados.Client{
+                       APIHost:   os.Getenv("ARVADOS_API_HOST"),
+                       AuthToken: arvadostest.DataManagerToken,
+                       Insecure:  true,
+               },
+               KeepServiceTypes: []string{"disk"},
+               RunPeriod:        arvados.Duration(time.Second),
+       }
+}
+
+func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
+       var logBuf *bytes.Buffer
+       for iter := 0; iter < 20; iter++ {
+               logBuf := &bytes.Buffer{}
+               logger := logrus.New()
+               logger.Out = logBuf
+               opts := RunOptions{
+                       CommitPulls: true,
+                       CommitTrash: true,
+                       Logger:      logger,
+               }
+
+               bal := &Balancer{
+                       Logger:  logger,
+                       Metrics: newMetrics(),
+               }
+               nextOpts, err := bal.Run(s.config, opts)
+               c.Check(err, check.IsNil)
+               c.Check(nextOpts.SafeRendezvousState, check.Not(check.Equals), "")
+               c.Check(nextOpts.CommitPulls, check.Equals, true)
+               if iter == 0 {
+                       c.Check(logBuf.String(), check.Matches, `(?ms).*ChangeSet{Pulls:1.*`)
+                       c.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*ChangeSet{.*Trashes:[^0]}*`)
+               } else if strings.Contains(logBuf.String(), "ChangeSet{Pulls:0") {
+                       break
+               }
+               time.Sleep(200 * time.Millisecond)
+       }
+       c.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*0 replicas (0 blocks, 0 bytes) underreplicated.*`)
+}
diff --git a/services/keep-balance/keep-balance.service b/services/keep-balance/keep-balance.service
new file mode 100644 (file)
index 0000000..5638716
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Keep Balance
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keep-balance/keep-balance.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/keep-balance -commit-pulls -commit-trash
+Restart=always
+RestartSec=10s
+Nice=19
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/keep-balance/keep_service.go b/services/keep-balance/keep_service.go
new file mode 100644 (file)
index 0000000..27d0af8
--- /dev/null
@@ -0,0 +1,106 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// KeepService represents a keepstore server that is being rebalanced.
+type KeepService struct {
+       arvados.KeepService
+       mounts []*KeepMount
+       *ChangeSet
+}
+
+// String implements fmt.Stringer.
+func (srv *KeepService) String() string {
+       return fmt.Sprintf("%s (%s:%d, %s)", srv.UUID, srv.ServiceHost, srv.ServicePort, srv.ServiceType)
+}
+
+var ksSchemes = map[bool]string{false: "http", true: "https"}
+
+// URLBase returns scheme://host:port for this server.
+func (srv *KeepService) URLBase() string {
+       return fmt.Sprintf("%s://%s:%d", ksSchemes[srv.ServiceSSLFlag], srv.ServiceHost, srv.ServicePort)
+}
+
+// CommitPulls sends the current list of pull requests to the storage
+// server (even if the list is empty).
+func (srv *KeepService) CommitPulls(c *arvados.Client) error {
+       return srv.put(c, "pull", srv.ChangeSet.Pulls)
+}
+
+// CommitTrash sends the current list of trash requests to the storage
+// server (even if the list is empty).
+func (srv *KeepService) CommitTrash(c *arvados.Client) error {
+       return srv.put(c, "trash", srv.ChangeSet.Trashes)
+}
+
+// Perform a PUT request at path, with data (as JSON) in the request
+// body.
+func (srv *KeepService) put(c *arvados.Client, path string, data interface{}) error {
+       // We'll start a goroutine to do the JSON encoding, so we can
+       // stream it to the http client through a Pipe, rather than
+       // keeping the entire encoded version in memory.
+       jsonR, jsonW := io.Pipe()
+
+       // errC communicates any encoding errors back to our main
+       // goroutine.
+       errC := make(chan error, 1)
+
+       go func() {
+               enc := json.NewEncoder(jsonW)
+               errC <- enc.Encode(data)
+               jsonW.Close()
+       }()
+
+       url := srv.URLBase() + "/" + path
+       req, err := http.NewRequest("PUT", url, ioutil.NopCloser(jsonR))
+       if err != nil {
+               return fmt.Errorf("building request for %s: %v", url, err)
+       }
+       err = c.DoAndDecode(nil, req)
+
+       // If there was an error encoding the request body, report
+       // that instead of the response: obviously we won't get a
+       // useful response if our request wasn't properly encoded.
+       if encErr := <-errC; encErr != nil {
+               return fmt.Errorf("encoding data for %s: %v", url, encErr)
+       }
+
+       return err
+}
+
+func (srv *KeepService) discoverMounts(c *arvados.Client) error {
+       mounts, err := srv.Mounts(c)
+       if err != nil {
+               return fmt.Errorf("%s: error retrieving mounts: %v", srv, err)
+       }
+       srv.mounts = nil
+       for _, m := range mounts {
+               srv.mounts = append(srv.mounts, &KeepMount{
+                       KeepMount:   m,
+                       KeepService: srv,
+               })
+       }
+       return nil
+}
+
+type KeepMount struct {
+       arvados.KeepMount
+       KeepService *KeepService
+}
+
+// String implements fmt.Stringer.
+func (mnt *KeepMount) String() string {
+       return fmt.Sprintf("%s (%s) on %s", mnt.UUID, mnt.DeviceID, mnt.KeepService)
+}
diff --git a/services/keep-balance/main.go b/services/keep-balance/main.go
new file mode 100644 (file)
index 0000000..3316a17
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "log"
+       "net/http"
+       "os"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/sirupsen/logrus"
+)
+
+var debugf = func(string, ...interface{}) {}
+
+func main() {
+       var cfg Config
+       var runOptions RunOptions
+
+       configPath := flag.String("config", defaultConfigPath,
+               "`path` of JSON or YAML configuration file")
+       serviceListPath := flag.String("config.KeepServiceList", "",
+               "`path` of JSON or YAML file with list of keep services to balance, as given by \"arv keep_service list\" "+
+                       "(default: config[\"KeepServiceList\"], or if none given, get all available services and filter by config[\"KeepServiceTypes\"])")
+       flag.BoolVar(&runOptions.Once, "once", false,
+               "balance once and then exit")
+       flag.BoolVar(&runOptions.CommitPulls, "commit-pulls", false,
+               "send pull requests (make more replicas of blocks that are underreplicated or are not in optimal rendezvous probe order)")
+       flag.BoolVar(&runOptions.CommitTrash, "commit-trash", false,
+               "send trash requests (delete unreferenced old blocks, and excess replicas of overreplicated blocks)")
+       dumpConfig := flag.Bool("dump-config", false, "write current configuration to stdout and exit")
+       dumpFlag := flag.Bool("dump", false, "dump details for each block to stdout")
+       debugFlag := flag.Bool("debug", false, "enable debug messages")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
+       flag.Usage = usage
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keep-balance %s\n", version)
+               return
+       }
+
+       mustReadConfig(&cfg, *configPath)
+       if *serviceListPath != "" {
+               mustReadConfig(&cfg.KeepServiceList, *serviceListPath)
+       }
+
+       if *dumpConfig {
+               log.Fatal(config.DumpAndExit(cfg))
+       }
+
+       to := time.Duration(cfg.RequestTimeout)
+       if to == 0 {
+               to = 30 * time.Minute
+       }
+       arvados.DefaultSecureClient.Timeout = to
+       arvados.InsecureHTTPClient.Timeout = to
+       http.DefaultClient.Timeout = to
+
+       log.Printf("keep-balance %s started", version)
+
+       if *debugFlag {
+               debugf = log.Printf
+               if j, err := json.Marshal(cfg); err != nil {
+                       log.Fatal(err)
+               } else {
+                       log.Printf("config is %s", j)
+               }
+       }
+       if *dumpFlag {
+               runOptions.Dumper = logrus.New()
+               runOptions.Dumper.Out = os.Stdout
+               runOptions.Dumper.Formatter = &logrus.TextFormatter{}
+       }
+       srv, err := NewServer(cfg, runOptions)
+       if err != nil {
+               // (don't run)
+       } else if runOptions.Once {
+               _, err = srv.Run()
+       } else {
+               err = srv.RunForever(nil)
+       }
+       if err != nil {
+               log.Fatal(err)
+       }
+}
+
+func mustReadConfig(dst interface{}, path string) {
+       if err := config.LoadFile(dst, path); err != nil {
+               log.Fatal(err)
+       }
+}
diff --git a/services/keep-balance/main_test.go b/services/keep-balance/main_test.go
new file mode 100644 (file)
index 0000000..a280434
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "time"
+
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&mainSuite{})
+
+type mainSuite struct{}
+
+func (s *mainSuite) TestExampleJSON(c *check.C) {
+       var config Config
+       c.Check(yaml.Unmarshal(exampleConfigFile, &config), check.IsNil)
+       c.Check(config.KeepServiceTypes, check.DeepEquals, []string{"disk"})
+       c.Check(config.Client.AuthToken, check.Equals, "xyzzy")
+       c.Check(time.Duration(config.RunPeriod), check.Equals, 600*time.Second)
+}
+
+func (s *mainSuite) TestConfigJSONWithKeepServiceList(c *check.C) {
+       var config Config
+       c.Check(yaml.Unmarshal([]byte(`{
+                   "Client": {
+                       "APIHost": "zzzzz.arvadosapi.com:443",
+                       "AuthToken": "xyzzy",
+                       "Insecure": false
+                   },
+                   "KeepServiceList": {
+                       "items": [
+                           {"uuid":"zzzzz-bi64l-abcdefghijklmno", "service_type":"disk", "service_host":"a.zzzzz.arvadosapi.com", "service_port":12345},
+                           {"uuid":"zzzzz-bi64l-bcdefghijklmnop", "service_type":"blob", "service_host":"b.zzzzz.arvadosapi.com", "service_port":12345}
+                       ]
+                   },
+                   "RunPeriod": "600s"
+               }`), &config), check.IsNil)
+       c.Assert(len(config.KeepServiceList.Items), check.Equals, 2)
+       c.Check(config.KeepServiceList.Items[0].UUID, check.Equals, "zzzzz-bi64l-abcdefghijklmno")
+       c.Check(config.KeepServiceList.Items[0].ServicePort, check.Equals, 12345)
+       c.Check(config.Client.AuthToken, check.Equals, "xyzzy")
+}
diff --git a/services/keep-balance/metrics.go b/services/keep-balance/metrics.go
new file mode 100644 (file)
index 0000000..5f3c987
--- /dev/null
@@ -0,0 +1,118 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "net/http"
+       "sync"
+
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+type observer interface{ Observe(float64) }
+type setter interface{ Set(float64) }
+
+type metrics struct {
+       reg         *prometheus.Registry
+       statsGauges map[string]setter
+       observers   map[string]observer
+       setupOnce   sync.Once
+       mtx         sync.Mutex
+}
+
+func newMetrics() *metrics {
+       return &metrics{
+               reg:         prometheus.NewRegistry(),
+               statsGauges: map[string]setter{},
+               observers:   map[string]observer{},
+       }
+}
+
+func (m *metrics) DurationObserver(name, help string) observer {
+       m.mtx.Lock()
+       defer m.mtx.Unlock()
+       if obs, ok := m.observers[name]; ok {
+               return obs
+       }
+       summary := prometheus.NewSummary(prometheus.SummaryOpts{
+               Namespace: "arvados",
+               Name:      name,
+               Subsystem: "keepbalance",
+               Help:      help,
+       })
+       m.reg.MustRegister(summary)
+       m.observers[name] = summary
+       return summary
+}
+
+// UpdateStats updates prometheus metrics using the given
+// balancerStats. It creates and registers the needed gauges on its
+// first invocation.
+func (m *metrics) UpdateStats(s balancerStats) {
+       type gauge struct {
+               Value interface{}
+               Help  string
+       }
+       s2g := map[string]gauge{
+               "total":             {s.current, "current backend storage usage"},
+               "garbage":           {s.garbage, "garbage (unreferenced, old)"},
+               "transient":         {s.unref, "transient (unreferenced, new)"},
+               "overreplicated":    {s.overrep, "overreplicated"},
+               "underreplicated":   {s.underrep, "underreplicated"},
+               "lost":              {s.lost, "lost"},
+               "dedup_byte_ratio":  {s.dedupByteRatio(), "deduplication ratio, bytes referenced / bytes stored"},
+               "dedup_block_ratio": {s.dedupBlockRatio(), "deduplication ratio, blocks referenced / blocks stored"},
+       }
+       m.setupOnce.Do(func() {
+               // Register gauge(s) for each balancerStats field.
+               addGauge := func(name, help string) {
+                       g := prometheus.NewGauge(prometheus.GaugeOpts{
+                               Namespace: "arvados",
+                               Name:      name,
+                               Subsystem: "keep",
+                               Help:      help,
+                       })
+                       m.reg.MustRegister(g)
+                       m.statsGauges[name] = g
+               }
+               for name, gauge := range s2g {
+                       switch gauge.Value.(type) {
+                       case blocksNBytes:
+                               for _, sub := range []string{"blocks", "bytes", "replicas"} {
+                                       addGauge(name+"_"+sub, sub+" of "+gauge.Help)
+                               }
+                       case int, int64, float64:
+                               addGauge(name, gauge.Help)
+                       default:
+                               panic(fmt.Sprintf("bad gauge type %T", gauge.Value))
+                       }
+               }
+       })
+       // Set gauges to values from s.
+       for name, gauge := range s2g {
+               switch val := gauge.Value.(type) {
+               case blocksNBytes:
+                       m.statsGauges[name+"_blocks"].Set(float64(val.blocks))
+                       m.statsGauges[name+"_bytes"].Set(float64(val.bytes))
+                       m.statsGauges[name+"_replicas"].Set(float64(val.replicas))
+               case int:
+                       m.statsGauges[name].Set(float64(val))
+               case int64:
+                       m.statsGauges[name].Set(float64(val))
+               case float64:
+                       m.statsGauges[name].Set(float64(val))
+               default:
+                       panic(fmt.Sprintf("bad gauge type %T", gauge.Value))
+               }
+       }
+}
+
+func (m *metrics) Handler(log promhttp.Logger) http.Handler {
+       return promhttp.HandlerFor(m.reg, promhttp.HandlerOpts{
+               ErrorLog: log,
+       })
+}
diff --git a/services/keep-balance/server.go b/services/keep-balance/server.go
new file mode 100644 (file)
index 0000000..613a2f7
--- /dev/null
@@ -0,0 +1,197 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "net/http"
+       "os"
+       "os/signal"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/sirupsen/logrus"
+)
+
+var version = "dev"
+
+const (
+       defaultConfigPath = "/etc/arvados/keep-balance/keep-balance.yml"
+       rfc3339NanoFixed  = "2006-01-02T15:04:05.000000000Z07:00"
+)
+
+// Config specifies site configuration, like API credentials and the
+// choice of which servers are to be balanced.
+//
+// Config is loaded from a JSON config file (see usage()).
+type Config struct {
+       // Arvados API endpoint and credentials.
+       Client arvados.Client
+
+       // List of service types (e.g., "disk") to balance.
+       KeepServiceTypes []string
+
+       KeepServiceList arvados.KeepServiceList
+
+       // address, address:port, or :port for management interface
+       Listen string
+
+       // token for management APIs
+       ManagementToken string
+
+       // How often to check
+       RunPeriod arvados.Duration
+
+       // Number of collections to request in each API call
+       CollectionBatchSize int
+
+       // Max collections to buffer in memory (bigger values consume
+       // more memory, but can reduce store-and-forward latency when
+       // fetching pages)
+       CollectionBuffers int
+
+       // Timeout for outgoing http request/response cycle.
+       RequestTimeout arvados.Duration
+}
+
+// RunOptions controls runtime behavior. The flags/options that belong
+// here are the ones that are useful for interactive use. For example,
+// "CommitTrash" is a runtime option rather than a config item because
+// it invokes a troubleshooting feature rather than expressing how
+// balancing is meant to be done at a given site.
+//
+// RunOptions fields are controlled by command line flags.
+type RunOptions struct {
+       Once        bool
+       CommitPulls bool
+       CommitTrash bool
+       Logger      *logrus.Logger
+       Dumper      *logrus.Logger
+
+       // SafeRendezvousState from the most recent balance operation,
+       // or "" if unknown. If this changes from one run to the next,
+       // we need to watch out for races. See
+       // (*Balancer)ClearTrashLists.
+       SafeRendezvousState string
+}
+
+type Server struct {
+       config     Config
+       runOptions RunOptions
+       metrics    *metrics
+       listening  string // for tests
+
+       Logger *logrus.Logger
+       Dumper *logrus.Logger
+}
+
+// NewServer returns a new Server that runs Balancers using the given
+// config and runOptions.
+func NewServer(config Config, runOptions RunOptions) (*Server, error) {
+       if len(config.KeepServiceList.Items) > 0 && config.KeepServiceTypes != nil {
+               return nil, fmt.Errorf("cannot specify both KeepServiceList and KeepServiceTypes in config")
+       }
+       if !runOptions.Once && config.RunPeriod == arvados.Duration(0) {
+               return nil, fmt.Errorf("you must either use the -once flag, or specify RunPeriod in config")
+       }
+
+       if runOptions.Logger == nil {
+               log := logrus.New()
+               log.Formatter = &logrus.JSONFormatter{
+                       TimestampFormat: rfc3339NanoFixed,
+               }
+               log.Out = os.Stderr
+               runOptions.Logger = log
+       }
+
+       srv := &Server{
+               config:     config,
+               runOptions: runOptions,
+               metrics:    newMetrics(),
+               Logger:     runOptions.Logger,
+               Dumper:     runOptions.Dumper,
+       }
+       return srv, srv.start()
+}
+
+func (srv *Server) start() error {
+       if srv.config.Listen == "" {
+               return nil
+       }
+       server := &httpserver.Server{
+               Server: http.Server{
+                       Handler: httpserver.LogRequests(srv.Logger,
+                               auth.RequireLiteralToken(srv.config.ManagementToken,
+                                       srv.metrics.Handler(srv.Logger))),
+               },
+               Addr: srv.config.Listen,
+       }
+       err := server.Start()
+       if err != nil {
+               return err
+       }
+       srv.Logger.Printf("listening at %s", server.Addr)
+       srv.listening = server.Addr
+       return nil
+}
+
+func (srv *Server) Run() (*Balancer, error) {
+       bal := &Balancer{
+               Logger:  srv.Logger,
+               Dumper:  srv.Dumper,
+               Metrics: srv.metrics,
+       }
+       var err error
+       srv.runOptions, err = bal.Run(srv.config, srv.runOptions)
+       return bal, err
+}
+
+// RunForever runs forever, or (for testing purposes) until the given
+// stop channel is ready to receive.
+func (srv *Server) RunForever(stop <-chan interface{}) error {
+       logger := srv.runOptions.Logger
+
+       ticker := time.NewTicker(time.Duration(srv.config.RunPeriod))
+
+       // The unbuffered channel here means we only hear SIGUSR1 if
+       // it arrives while we're waiting in select{}.
+       sigUSR1 := make(chan os.Signal)
+       signal.Notify(sigUSR1, syscall.SIGUSR1)
+
+       logger.Printf("starting up: will scan every %v and on SIGUSR1", srv.config.RunPeriod)
+
+       for {
+               if !srv.runOptions.CommitPulls && !srv.runOptions.CommitTrash {
+                       logger.Print("WARNING: Will scan periodically, but no changes will be committed.")
+                       logger.Print("=======  Consider using -commit-pulls and -commit-trash flags.")
+               }
+
+               _, err := srv.Run()
+               if err != nil {
+                       logger.Print("run failed: ", err)
+               } else {
+                       logger.Print("run succeeded")
+               }
+
+               select {
+               case <-stop:
+                       signal.Stop(sigUSR1)
+                       return nil
+               case <-ticker.C:
+                       logger.Print("timer went off")
+               case <-sigUSR1:
+                       logger.Print("received SIGUSR1, resetting timer")
+                       // Reset the timer so we don't start the N+1st
+                       // run too soon after the Nth run is triggered
+                       // by SIGUSR1.
+                       ticker.Stop()
+                       ticker = time.NewTicker(time.Duration(srv.config.RunPeriod))
+               }
+               logger.Print("starting next run")
+       }
+}
diff --git a/services/keep-balance/usage.go b/services/keep-balance/usage.go
new file mode 100644 (file)
index 0000000..b39e839
--- /dev/null
@@ -0,0 +1,106 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+)
+
+var exampleConfigFile = []byte(`
+Client:
+    APIHost: zzzzz.arvadosapi.com:443
+    AuthToken: xyzzy
+    Insecure: false
+KeepServiceTypes:
+    - disk
+Listen: ":9005"
+ManagementToken: xyzzy
+RunPeriod: 600s
+CollectionBatchSize: 100000
+CollectionBuffers: 1000
+RequestTimeout: 30m`)
+
+func usage() {
+       fmt.Fprintf(os.Stderr, `
+
+keep-balance rebalances a set of keepstore servers. It creates new
+copies of underreplicated blocks, deletes excess copies of
+overreplicated and unreferenced blocks, and moves blocks to better
+positions (according to the rendezvous hash algorithm) so clients find
+them faster.
+
+Usage: keep-balance [options]
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+%s
+
+    Client.AuthToken must be recognized by Arvados as an admin token,
+    and must be recognized by all Keep services as a "data manager
+    key".
+
+    Client.Insecure should be true if your Arvados API endpoint uses
+    an unverifiable SSL/TLS certificate.
+
+Periodic scanning:
+
+    By default, keep-balance operates periodically, i.e.: do a
+    scan/balance operation, sleep, repeat.
+
+    RunPeriod determines the interval between start times of
+    successive scan/balance operations. If a scan/balance operation
+    takes longer than RunPeriod, the next one will follow it
+    immediately.
+
+    If SIGUSR1 is received during an idle period between operations,
+    the next operation will start immediately.
+
+One-time scanning:
+
+    Use the -once flag to do a single operation and then exit. The
+    exit code will be zero if the operation was successful.
+
+Committing:
+
+    By default, keep-service computes and reports changes but does not
+    implement them by sending pull and trash lists to the Keep
+    services.
+
+    Use the -commit-pull and -commit-trash flags to implement the
+    computed changes.
+
+Tuning resource usage:
+
+    CollectionBatchSize limits the number of collections retrieved per
+    API transaction. If this is zero or omitted, page size is
+    determined by the API server's own page size limits (see
+    max_items_per_response and max_index_database_read configs).
+
+    CollectionBuffers sets the size of an internal queue of
+    collections. Higher values use more memory, and improve throughput
+    by allowing keep-balance to fetch the next page of collections
+    while the current page is still being processed. If this is zero
+    or omitted, pages are processed serially.
+
+    RequestTimeout is the maximum time keep-balance will spend on a
+    single HTTP request (getting a page of collections, getting the
+    block index from a keepstore server, or sending a trash or pull
+    list to a keepstore server). Defaults to 30 minutes.
+
+Limitations:
+
+    keep-balance does not attempt to discover whether committed pull
+    and trash requests ever get carried out -- only that they are
+    accepted by the Keep services. If some services are full, new
+    copies of underreplicated blocks might never get made, only
+    repeatedly requested.
+
+`, exampleConfigFile)
+}
diff --git a/services/keep-web/.gitignore b/services/keep-web/.gitignore
new file mode 100644 (file)
index 0000000..53997c2
--- /dev/null
@@ -0,0 +1 @@
+keep-web
diff --git a/services/keep-web/cache.go b/services/keep-web/cache.go
new file mode 100644 (file)
index 0000000..8336b78
--- /dev/null
@@ -0,0 +1,332 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "github.com/hashicorp/golang-lru"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricsUpdateInterval = time.Second / 10
+
+type cache struct {
+       TTL                  arvados.Duration
+       UUIDTTL              arvados.Duration
+       MaxCollectionEntries int
+       MaxCollectionBytes   int64
+       MaxPermissionEntries int
+       MaxUUIDEntries       int
+
+       registry    *prometheus.Registry
+       metrics     cacheMetrics
+       pdhs        *lru.TwoQueueCache
+       collections *lru.TwoQueueCache
+       permissions *lru.TwoQueueCache
+       setupOnce   sync.Once
+}
+
+type cacheMetrics struct {
+       requests          prometheus.Counter
+       collectionBytes   prometheus.Gauge
+       collectionEntries prometheus.Gauge
+       collectionHits    prometheus.Counter
+       pdhHits           prometheus.Counter
+       permissionHits    prometheus.Counter
+       apiCalls          prometheus.Counter
+}
+
+func (m *cacheMetrics) setup(reg *prometheus.Registry) {
+       m.requests = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "requests",
+               Help:      "Number of targetID-to-manifest lookups handled.",
+       })
+       reg.MustRegister(m.requests)
+       m.collectionHits = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "hits",
+               Help:      "Number of pdh-to-manifest cache hits.",
+       })
+       reg.MustRegister(m.collectionHits)
+       m.pdhHits = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "pdh_hits",
+               Help:      "Number of uuid-to-pdh cache hits.",
+       })
+       reg.MustRegister(m.pdhHits)
+       m.permissionHits = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "permission_hits",
+               Help:      "Number of targetID-to-permission cache hits.",
+       })
+       reg.MustRegister(m.permissionHits)
+       m.apiCalls = prometheus.NewCounter(prometheus.CounterOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "api_calls",
+               Help:      "Number of outgoing API calls made by cache.",
+       })
+       reg.MustRegister(m.apiCalls)
+       m.collectionBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "cached_manifest_bytes",
+               Help:      "Total size of all manifests in cache.",
+       })
+       reg.MustRegister(m.collectionBytes)
+       m.collectionEntries = prometheus.NewGauge(prometheus.GaugeOpts{
+               Namespace: "arvados",
+               Subsystem: "keepweb_collectioncache",
+               Name:      "cached_manifests",
+               Help:      "Number of manifests in cache.",
+       })
+       reg.MustRegister(m.collectionEntries)
+}
+
+type cachedPDH struct {
+       expire time.Time
+       pdh    string
+}
+
+type cachedCollection struct {
+       expire     time.Time
+       collection *arvados.Collection
+}
+
+type cachedPermission struct {
+       expire time.Time
+}
+
+func (c *cache) setup() {
+       var err error
+       c.pdhs, err = lru.New2Q(c.MaxUUIDEntries)
+       if err != nil {
+               panic(err)
+       }
+       c.collections, err = lru.New2Q(c.MaxCollectionEntries)
+       if err != nil {
+               panic(err)
+       }
+       c.permissions, err = lru.New2Q(c.MaxPermissionEntries)
+       if err != nil {
+               panic(err)
+       }
+
+       reg := c.registry
+       if reg == nil {
+               reg = prometheus.NewRegistry()
+       }
+       c.metrics.setup(reg)
+       go func() {
+               for range time.Tick(metricsUpdateInterval) {
+                       c.updateGauges()
+               }
+       }()
+}
+
+func (c *cache) updateGauges() {
+       c.metrics.collectionBytes.Set(float64(c.collectionBytes()))
+       c.metrics.collectionEntries.Set(float64(c.collections.Len()))
+}
+
+var selectPDH = map[string]interface{}{
+       "select": []string{"portable_data_hash"},
+}
+
+// Update saves a modified version (fs) to an existing collection
+// (coll) and, if successful, updates the relevant cache entries so
+// subsequent calls to Get() reflect the modifications.
+func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvados.CollectionFileSystem) error {
+       c.setupOnce.Do(c.setup)
+
+       if m, err := fs.MarshalManifest("."); err != nil || m == coll.ManifestText {
+               return err
+       } else {
+               coll.ManifestText = m
+       }
+       var updated arvados.Collection
+       defer c.pdhs.Remove(coll.UUID)
+       err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, client.UpdateBody(coll), nil)
+       if err == nil {
+               c.collections.Add(client.AuthToken+"\000"+coll.PortableDataHash, &cachedCollection{
+                       expire:     time.Now().Add(time.Duration(c.TTL)),
+                       collection: &updated,
+               })
+       }
+       return err
+}
+
+func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceReload bool) (*arvados.Collection, error) {
+       c.setupOnce.Do(c.setup)
+       c.metrics.requests.Inc()
+
+       permOK := false
+       permKey := arv.ApiToken + "\000" + targetID
+       if forceReload {
+       } else if ent, cached := c.permissions.Get(permKey); cached {
+               ent := ent.(*cachedPermission)
+               if ent.expire.Before(time.Now()) {
+                       c.permissions.Remove(permKey)
+               } else {
+                       permOK = true
+                       c.metrics.permissionHits.Inc()
+               }
+       }
+
+       var pdh string
+       if arvadosclient.PDHMatch(targetID) {
+               pdh = targetID
+       } else if ent, cached := c.pdhs.Get(targetID); cached {
+               ent := ent.(*cachedPDH)
+               if ent.expire.Before(time.Now()) {
+                       c.pdhs.Remove(targetID)
+               } else {
+                       pdh = ent.pdh
+                       c.metrics.pdhHits.Inc()
+               }
+       }
+
+       var collection *arvados.Collection
+       if pdh != "" {
+               collection = c.lookupCollection(arv.ApiToken + "\000" + pdh)
+       }
+
+       if collection != nil && permOK {
+               return collection, nil
+       } else if collection != nil {
+               // Ask API for current PDH for this targetID. Most
+               // likely, the cached PDH is still correct; if so,
+               // _and_ the current token has permission, we can
+               // use our cached manifest.
+               c.metrics.apiCalls.Inc()
+               var current arvados.Collection
+               err := arv.Get("collections", targetID, selectPDH, &current)
+               if err != nil {
+                       return nil, err
+               }
+               if current.PortableDataHash == pdh {
+                       c.permissions.Add(permKey, &cachedPermission{
+                               expire: time.Now().Add(time.Duration(c.TTL)),
+                       })
+                       if pdh != targetID {
+                               c.pdhs.Add(targetID, &cachedPDH{
+                                       expire: time.Now().Add(time.Duration(c.UUIDTTL)),
+                                       pdh:    pdh,
+                               })
+                       }
+                       return collection, err
+               } else {
+                       // PDH changed, but now we know we have
+                       // permission -- and maybe we already have the
+                       // new PDH in the cache.
+                       if coll := c.lookupCollection(arv.ApiToken + "\000" + current.PortableDataHash); coll != nil {
+                               return coll, nil
+                       }
+               }
+       }
+
+       // Collection manifest is not cached.
+       c.metrics.apiCalls.Inc()
+       err := arv.Get("collections", targetID, nil, &collection)
+       if err != nil {
+               return nil, err
+       }
+       exp := time.Now().Add(time.Duration(c.TTL))
+       c.permissions.Add(permKey, &cachedPermission{
+               expire: exp,
+       })
+       c.pdhs.Add(targetID, &cachedPDH{
+               expire: time.Now().Add(time.Duration(c.UUIDTTL)),
+               pdh:    collection.PortableDataHash,
+       })
+       c.collections.Add(arv.ApiToken+"\000"+collection.PortableDataHash, &cachedCollection{
+               expire:     exp,
+               collection: collection,
+       })
+       if int64(len(collection.ManifestText)) > c.MaxCollectionBytes/int64(c.MaxCollectionEntries) {
+               go c.pruneCollections()
+       }
+       return collection, nil
+}
+
+// pruneCollections checks the total bytes occupied by manifest_text
+// in the collection cache and removes old entries as needed to bring
+// the total size down to CollectionBytes. It also deletes all expired
+// entries.
+//
+// pruneCollections does not aim to be perfectly correct when there is
+// concurrent cache activity.
+func (c *cache) pruneCollections() {
+       var size int64
+       now := time.Now()
+       keys := c.collections.Keys()
+       entsize := make([]int, len(keys))
+       expired := make([]bool, len(keys))
+       for i, k := range keys {
+               v, ok := c.collections.Peek(k)
+               if !ok {
+                       continue
+               }
+               ent := v.(*cachedCollection)
+               n := len(ent.collection.ManifestText)
+               size += int64(n)
+               entsize[i] = n
+               expired[i] = ent.expire.Before(now)
+       }
+       for i, k := range keys {
+               if expired[i] {
+                       c.collections.Remove(k)
+                       size -= int64(entsize[i])
+               }
+       }
+       for i, k := range keys {
+               if size <= c.MaxCollectionBytes {
+                       break
+               }
+               if expired[i] {
+                       // already removed this entry in the previous loop
+                       continue
+               }
+               c.collections.Remove(k)
+               size -= int64(entsize[i])
+       }
+}
+
+// collectionBytes returns the approximate memory size of the
+// collection cache.
+func (c *cache) collectionBytes() uint64 {
+       var size uint64
+       for _, k := range c.collections.Keys() {
+               v, ok := c.collections.Peek(k)
+               if !ok {
+                       continue
+               }
+               size += uint64(len(v.(*cachedCollection).collection.ManifestText))
+       }
+       return size
+}
+
+func (c *cache) lookupCollection(key string) *arvados.Collection {
+       e, cached := c.collections.Get(key)
+       if !cached {
+               return nil
+       }
+       ent := e.(*cachedCollection)
+       if ent.expire.Before(time.Now()) {
+               c.collections.Remove(key)
+               return nil
+       }
+       c.metrics.collectionHits.Inc()
+       return ent.collection
+}
diff --git a/services/keep-web/cache_test.go b/services/keep-web/cache_test.go
new file mode 100644 (file)
index 0000000..d147573
--- /dev/null
@@ -0,0 +1,151 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/prometheus/common/expfmt"
+       "gopkg.in/check.v1"
+)
+
+func (s *UnitSuite) checkCacheMetrics(c *check.C, reg *prometheus.Registry, regs ...string) {
+       mfs, err := reg.Gather()
+       c.Check(err, check.IsNil)
+       buf := &bytes.Buffer{}
+       enc := expfmt.NewEncoder(buf, expfmt.FmtText)
+       for _, mf := range mfs {
+               c.Check(enc.Encode(mf), check.IsNil)
+       }
+       mm := buf.String()
+       for _, reg := range regs {
+               c.Check(mm, check.Matches, `(?ms).*collectioncache_`+reg+`\n.*`)
+       }
+}
+
+func (s *UnitSuite) TestCache(c *check.C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.Equals, nil)
+
+       cache := DefaultConfig().Cache
+       cache.registry = prometheus.NewRegistry()
+
+       // Hit the same collection 5 times using the same token. Only
+       // the first req should cause an API call; the next 4 should
+       // hit all caches.
+       arv.ApiToken = arvadostest.AdminToken
+       var coll *arvados.Collection
+       for i := 0; i < 5; i++ {
+               coll, err = cache.Get(arv, arvadostest.FooCollection, false)
+               c.Check(err, check.Equals, nil)
+               c.Assert(coll, check.NotNil)
+               c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooPdh)
+               c.Check(coll.ManifestText[:2], check.Equals, ". ")
+       }
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 5",
+               "hits 4",
+               "permission_hits 4",
+               "pdh_hits 4",
+               "api_calls 1")
+
+       // Hit the same collection 2 more times, this time requesting
+       // it by PDH and using a different token. The first req should
+       // miss the permission cache and fetch the new manifest; the
+       // second should hit the Collection cache and skip the API
+       // lookup.
+       arv.ApiToken = arvadostest.ActiveToken
+
+       coll2, err := cache.Get(arv, arvadostest.FooPdh, false)
+       c.Check(err, check.Equals, nil)
+       c.Assert(coll2, check.NotNil)
+       c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
+       c.Check(coll2.ManifestText[:2], check.Equals, ". ")
+       c.Check(coll2.ManifestText, check.Not(check.Equals), coll.ManifestText)
+
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 6",
+               "hits 4",
+               "permission_hits 4",
+               "pdh_hits 4",
+               "api_calls 2")
+
+       coll2, err = cache.Get(arv, arvadostest.FooPdh, false)
+       c.Check(err, check.Equals, nil)
+       c.Assert(coll2, check.NotNil)
+       c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
+       c.Check(coll2.ManifestText[:2], check.Equals, ". ")
+
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 7",
+               "hits 5",
+               "permission_hits 5",
+               "pdh_hits 4",
+               "api_calls 2")
+
+       // Alternating between two collections N times should produce
+       // only 2 more API calls.
+       arv.ApiToken = arvadostest.AdminToken
+       for i := 0; i < 20; i++ {
+               var target string
+               if i%2 == 0 {
+                       target = arvadostest.HelloWorldCollection
+               } else {
+                       target = arvadostest.FooBarDirCollection
+               }
+               _, err := cache.Get(arv, target, false)
+               c.Check(err, check.Equals, nil)
+       }
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 27",
+               "hits 23",
+               "permission_hits 23",
+               "pdh_hits 22",
+               "api_calls 4")
+}
+
+func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.Equals, nil)
+
+       cache := DefaultConfig().Cache
+       cache.registry = prometheus.NewRegistry()
+
+       for _, forceReload := range []bool{false, true, false, true} {
+               _, err := cache.Get(arv, arvadostest.FooPdh, forceReload)
+               c.Check(err, check.Equals, nil)
+       }
+
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 4",
+               "hits 3",
+               "permission_hits 1",
+               "pdh_hits 0",
+               "api_calls 3")
+}
+
+func (s *UnitSuite) TestCacheForceReloadByUUID(c *check.C) {
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.Equals, nil)
+
+       cache := DefaultConfig().Cache
+       cache.registry = prometheus.NewRegistry()
+
+       for _, forceReload := range []bool{false, true, false, true} {
+               _, err := cache.Get(arv, arvadostest.FooCollection, forceReload)
+               c.Check(err, check.Equals, nil)
+       }
+
+       s.checkCacheMetrics(c, cache.registry,
+               "requests 4",
+               "hits 3",
+               "permission_hits 1",
+               "pdh_hits 3",
+               "api_calls 3")
+}
diff --git a/services/keep-web/cadaver_test.go b/services/keep-web/cadaver_test.go
new file mode 100644 (file)
index 0000000..44d0b0f
--- /dev/null
@@ -0,0 +1,365 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/url"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+func (s *IntegrationSuite) TestCadaverHTTPAuth(c *check.C) {
+       s.testCadaver(c, arvadostest.ActiveToken, func(newCollection arvados.Collection) (string, string, string) {
+               r := "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/"
+               w := "/c=" + newCollection.UUID + "/"
+               pdh := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/"
+               return r, w, pdh
+       }, nil)
+}
+
+func (s *IntegrationSuite) TestCadaverPathAuth(c *check.C) {
+       s.testCadaver(c, "", func(newCollection arvados.Collection) (string, string, string) {
+               r := "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken + "/"
+               w := "/c=" + newCollection.UUID + "/t=" + arvadostest.ActiveToken + "/"
+               pdh := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/t=" + arvadostest.ActiveToken + "/"
+               return r, w, pdh
+       }, nil)
+}
+
+func (s *IntegrationSuite) TestCadaverUserProject(c *check.C) {
+       rpath := "/users/active/foo_file_in_dir/"
+       s.testCadaver(c, arvadostest.ActiveToken, func(newCollection arvados.Collection) (string, string, string) {
+               wpath := "/users/active/" + newCollection.Name
+               pdh := "/c=" + strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + "/"
+               return rpath, wpath, pdh
+       }, func(path string) bool {
+               // Skip tests that rely on writes, because /users/
+               // tree is read-only.
+               return !strings.HasPrefix(path, rpath) || strings.HasPrefix(path, rpath+"_/")
+       })
+}
+
+func (s *IntegrationSuite) testCadaver(c *check.C, password string, pathFunc func(arvados.Collection) (string, string, string), skip func(string) bool) {
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
+
+       testdata := []byte("the human tragedy consists in the necessity of living with the consequences of actions performed under the pressure of compulsions we do not understand")
+
+       tempdir, err := ioutil.TempDir("", "keep-web-test-")
+       c.Assert(err, check.IsNil)
+       defer os.RemoveAll(tempdir)
+
+       localfile, err := ioutil.TempFile(tempdir, "localfile")
+       c.Assert(err, check.IsNil)
+       localfile.Write(testdata)
+
+       emptyfile, err := ioutil.TempFile(tempdir, "emptyfile")
+       c.Assert(err, check.IsNil)
+
+       checkfile, err := ioutil.TempFile(tempdir, "checkfile")
+       c.Assert(err, check.IsNil)
+
+       var newCollection arvados.Collection
+       arv := arvados.NewClientFromEnv()
+       arv.AuthToken = arvadostest.ActiveToken
+       err = arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", bytes.NewBufferString(url.Values{"collection": {"{}"}}.Encode()), nil)
+       c.Assert(err, check.IsNil)
+
+       readPath, writePath, pdhPath := pathFunc(newCollection)
+
+       matchToday := time.Now().Format("Jan +2")
+
+       type testcase struct {
+               path  string
+               cmd   string
+               match string
+               data  []byte
+       }
+       for _, trial := range []testcase{
+               {
+                       path:  readPath,
+                       cmd:   "ls\n",
+                       match: `(?ms).*dir1 *0 .*`,
+               },
+               {
+                       path:  readPath,
+                       cmd:   "ls dir1\n",
+                       match: `(?ms).*bar *3.*foo *3 .*`,
+               },
+               {
+                       path:  readPath + "_/dir1",
+                       cmd:   "ls\n",
+                       match: `(?ms).*bar *3.*foo *3 .*`,
+               },
+               {
+                       path:  readPath + "dir1/",
+                       cmd:   "ls\n",
+                       match: `(?ms).*bar *3.*foo +3 +Feb +\d+ +2014.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get emptyfile '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*Not Found.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "put '" + emptyfile.Name() + "' emptyfile\n",
+                       match: `(?ms).*Uploading .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get emptyfile '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*Downloading .* succeeded.*`,
+                       data:  []byte{},
+               },
+               {
+                       path:  writePath,
+                       cmd:   "put '" + localfile.Name() + "' testfile\n",
+                       match: `(?ms).*Uploading .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get testfile '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*succeeded.*`,
+                       data:  testdata,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move testfile newdir0/\n",
+                       match: `(?ms).*Moving .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move testfile newdir0/\n",
+                       match: `(?ms).*Moving .* failed.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "lock newdir0/testfile\n",
+                       match: `(?ms).*Locking .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "unlock newdir0/testfile\nasdf\n",
+                       match: `(?ms).*Unlocking .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "ls\n",
+                       match: `(?ms).*newdir0.* 0 +` + matchToday + ` \d+:\d+\n.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move newdir0/testfile emptyfile/bogus/\n",
+                       match: `(?ms).*Moving .* failed.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "mkcol newdir1\n",
+                       match: `(?ms).*Creating .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move newdir1/ newdir1x/\n",
+                       match: `(?ms).*Moving .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move newdir1x newdir1\n",
+                       match: `(?ms).*Moving .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move newdir0/testfile newdir1/\n",
+                       match: `(?ms).*Moving .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "move newdir1 newdir1/\n",
+                       match: `(?ms).*Moving .* failed.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get newdir1/testfile '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*succeeded.*`,
+                       data:  testdata,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "put '" + localfile.Name() + "' newdir1/testfile1\n",
+                       match: `(?ms).*Uploading .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "mkcol newdir2\n",
+                       match: `(?ms).*Creating .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "put '" + localfile.Name() + "' newdir2/testfile2\n",
+                       match: `(?ms).*Uploading .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "copy newdir2/testfile2 testfile3\n",
+                       match: `(?ms).*succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get testfile3 '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*succeeded.*`,
+                       data:  testdata,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get newdir2/testfile2 '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*succeeded.*`,
+                       data:  testdata,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "rmcol newdir2\n",
+                       match: `(?ms).*Deleting collection .* succeeded.*`,
+               },
+               {
+                       path:  writePath,
+                       cmd:   "get newdir2/testfile2 '" + checkfile.Name() + "'\n",
+                       match: `(?ms).*Downloading .* failed.*`,
+               },
+               {
+                       path:  "/c=" + arvadostest.UserAgreementCollection + "/t=" + arv.AuthToken + "/",
+                       cmd:   "put '" + localfile.Name() + "' foo\n",
+                       match: `(?ms).*Uploading .* failed:.*403 Forbidden.*`,
+               },
+               {
+                       path:  pdhPath,
+                       cmd:   "put '" + localfile.Name() + "' foo\n",
+                       match: `(?ms).*Uploading .* failed:.*405 Method Not Allowed.*`,
+               },
+               {
+                       path:  pdhPath,
+                       cmd:   "move foo bar\n",
+                       match: `(?ms).*Moving .* failed:.*405 Method Not Allowed.*`,
+               },
+               {
+                       path:  pdhPath,
+                       cmd:   "copy foo bar\n",
+                       match: `(?ms).*Copying .* failed:.*405 Method Not Allowed.*`,
+               },
+               {
+                       path:  pdhPath,
+                       cmd:   "delete foo\n",
+                       match: `(?ms).*Deleting .* failed:.*405 Method Not Allowed.*`,
+               },
+               {
+                       path:  pdhPath,
+                       cmd:   "lock foo\n",
+                       match: `(?ms).*Locking .* failed:.*405 Method Not Allowed.*`,
+               },
+       } {
+               c.Logf("%s %+v", "http://"+s.testServer.Addr, trial)
+               if skip != nil && skip(trial.path) {
+                       c.Log("(skip)")
+                       continue
+               }
+
+               os.Remove(checkfile.Name())
+
+               stdout := s.runCadaver(c, password, trial.path, trial.cmd)
+               c.Check(stdout, check.Matches, trial.match)
+
+               if trial.data == nil {
+                       continue
+               }
+               checkfile, err = os.Open(checkfile.Name())
+               c.Assert(err, check.IsNil)
+               checkfile.Seek(0, os.SEEK_SET)
+               got, err := ioutil.ReadAll(checkfile)
+               c.Check(got, check.DeepEquals, trial.data)
+               c.Check(err, check.IsNil)
+       }
+}
+
+func (s *IntegrationSuite) TestCadaverByID(c *check.C) {
+       for _, path := range []string{"/by_id", "/by_id/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*collection is empty.*`)
+       }
+       for _, path := range []string{
+               "/by_id/" + arvadostest.FooPdh,
+               "/by_id/" + arvadostest.FooPdh + "/",
+               "/by_id/" + arvadostest.FooCollection,
+               "/by_id/" + arvadostest.FooCollection + "/",
+       } {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*\s+foo\s+3 .*`)
+       }
+}
+
+func (s *IntegrationSuite) TestCadaverUsersDir(c *check.C) {
+       for _, path := range []string{"/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+by_id\s+0 .*`)
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+users\s+0 .*`)
+       }
+       for _, path := range []string{"/users", "/users/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+active.*`)
+       }
+       for _, path := range []string{"/users/active", "/users/active/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+A Project\s+0 .*`)
+               c.Check(stdout, check.Matches, `(?ms).*Coll:\s+bar_file\s+0 .*`)
+       }
+       for _, path := range []string{"/users/admin", "/users/doesnotexist", "/users/doesnotexist/"} {
+               stdout := s.runCadaver(c, arvadostest.ActiveToken, path, "ls")
+               c.Check(stdout, check.Matches, `(?ms).*404 Not Found.*`)
+       }
+}
+
+func (s *IntegrationSuite) runCadaver(c *check.C, password, path, stdin string) string {
+       tempdir, err := ioutil.TempDir("", "keep-web-test-")
+       c.Assert(err, check.IsNil)
+       defer os.RemoveAll(tempdir)
+
+       cmd := exec.Command("cadaver", "http://"+s.testServer.Addr+path)
+       if password != "" {
+               // cadaver won't try username/password authentication
+               // unless the server responds 401 to an
+               // unauthenticated request, which it only does in
+               // AttachmentOnlyHost, TrustAllContent, and
+               // per-collection vhost cases.
+               s.testServer.Config.AttachmentOnlyHost = s.testServer.Addr
+
+               cmd.Env = append(os.Environ(), "HOME="+tempdir)
+               f, err := os.OpenFile(filepath.Join(tempdir, ".netrc"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+               c.Assert(err, check.IsNil)
+               _, err = fmt.Fprintf(f, "default login none password %s\n", password)
+               c.Assert(err, check.IsNil)
+               c.Assert(f.Close(), check.IsNil)
+       }
+       cmd.Stdin = bytes.NewBufferString(stdin)
+       stdout, err := cmd.StdoutPipe()
+       c.Assert(err, check.Equals, nil)
+       cmd.Stderr = cmd.Stdout
+       go cmd.Start()
+
+       var buf bytes.Buffer
+       _, err = io.Copy(&buf, stdout)
+       c.Check(err, check.Equals, nil)
+       err = cmd.Wait()
+       c.Check(err, check.Equals, nil)
+       return buf.String()
+}
diff --git a/services/keep-web/doc.go b/services/keep-web/doc.go
new file mode 100644 (file)
index 0000000..d65156f
--- /dev/null
@@ -0,0 +1,282 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Keep-web provides read/write HTTP (WebDAV) access to files stored
+// in Keep. It serves public data to anonymous and unauthenticated
+// clients, and serves private data to clients that supply Arvados API
+// tokens. It can be installed anywhere with access to Keep services,
+// typically behind a web proxy that supports TLS.
+//
+// See http://doc.arvados.org/install/install-keep-web.html.
+//
+// Configuration
+//
+// The default configuration file location is
+// /etc/arvados/keep-web/keep-web.yml.
+//
+// Example configuration file
+//
+//     Client:
+//       APIHost: "zzzzz.arvadosapi.com:443"
+//       AuthToken: ""
+//       Insecure: false
+//     Listen: :1234
+//     AnonymousTokens:
+//       - xxxxxxxxxxxxxxxxxxxx
+//     AttachmentOnlyHost: ""
+//     TrustAllContent: false
+//
+// Starting the server
+//
+// Start a server using the default config file
+// /etc/arvados/keep-web/keep-web.yml:
+//
+//   keep-web
+//
+// Start a server using the config file /path/to/keep-web.yml:
+//
+//   keep-web -config /path/to/keep-web.yml
+//
+// Proxy configuration
+//
+// Keep-web does not support TLS natively. Typically, it is installed
+// behind a proxy like nginx.
+//
+// Here is an example nginx configuration.
+//
+//     http {
+//       upstream keep-web {
+//         server localhost:1234;
+//       }
+//       server {
+//         listen *:443 ssl;
+//         server_name collections.example.com *.collections.example.com ~.*--collections.example.com;
+//         ssl_certificate /root/wildcard.example.com.crt;
+//         ssl_certificate_key /root/wildcard.example.com.key;
+//         location  / {
+//           proxy_pass http://keep-web;
+//           proxy_set_header Host $host;
+//           proxy_set_header X-Forwarded-For $remote_addr;
+//         }
+//       }
+//     }
+//
+// It is not necessary to run keep-web on the same host as the nginx
+// proxy. However, TLS is not used between nginx and keep-web, so
+// intervening networks must be secured by other means.
+//
+// Anonymous downloads
+//
+// The "AnonymousTokens" configuration entry is an array of tokens to
+// use when processing anonymous requests, i.e., whenever a web client
+// does not supply its own Arvados API token via path, query string,
+// cookie, or request header.
+//
+//   "AnonymousTokens":["xxxxxxxxxxxxxxxxxxxxxxx"]
+//
+// See http://doc.arvados.org/install/install-keep-web.html for examples.
+//
+// Download URLs
+//
+// The following "same origin" URL patterns are supported for public
+// collections and collections shared anonymously via secret links
+// (i.e., collections which can be served by keep-web without making
+// use of any implicit credentials like cookies). See "Same-origin
+// URLs" below.
+//
+//   http://collections.example.com/c=uuid_or_pdh/path/file.txt
+//   http://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt
+//
+// The following "multiple origin" URL patterns are supported for all
+// collections:
+//
+//   http://uuid_or_pdh--collections.example.com/path/file.txt
+//   http://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt
+//
+// In the "multiple origin" form, the string "--" can be replaced with
+// "." with identical results (assuming the downstream proxy is
+// configured accordingly). These two are equivalent:
+//
+//   http://uuid_or_pdh--collections.example.com/path/file.txt
+//   http://uuid_or_pdh.collections.example.com/path/file.txt
+//
+// The first form (with "--" instead of ".") avoids the cost and
+// effort of deploying a wildcard TLS certificate for
+// *.collections.example.com at sites that already have a wildcard
+// certificate for *.example.com. The second form is likely to be
+// easier to configure, and more efficient to run, on a downstream
+// proxy.
+//
+// In all of the above forms, the "collections.example.com" part can
+// be anything at all: keep-web itself ignores everything after the
+// first "." or "--". (Of course, in order for clients to connect at
+// all, DNS and any relevant proxies must be configured accordingly.)
+//
+// In all of the above forms, the "uuid_or_pdh" part can be either a
+// collection UUID or a portable data hash with the "+" character
+// optionally replaced by "-". (When "uuid_or_pdh" appears in the
+// domain name, replacing "+" with "-" is mandatory, because "+" is
+// not a valid character in a domain name.)
+//
+// In all of the above forms, a top level directory called "_" is
+// skipped. In cases where the "path/file.txt" part might start with
+// "t=" or "c=" or "_/", links should be constructed with a leading
+// "_/" to ensure the top level directory is not interpreted as a
+// token or collection ID.
+//
+// Assuming there is a collection with UUID
+// zzzzz-4zz18-znfnqtbbv4spc3w and portable data hash
+// 1f4b0bc7583c2a7f9102c395f4ffc5e3+45, the following URLs are
+// interchangeable:
+//
+//   http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
+//   http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
+//   http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
+//
+// The following URLs are read-only, but otherwise interchangeable
+// with the above:
+//
+//   http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
+//   http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
+//   http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
+//   http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
+//
+// If the collection is named "MyCollection" and located in a project
+// called "MyProject" which is in the home project of a user with
+// username is "bob", the following read-only URL is also available
+// when authenticating as bob:
+//
+//   http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt
+//
+// An additional form is supported specifically to make it more
+// convenient to maintain support for existing Workbench download
+// links:
+//
+//   http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt
+//
+// A regular Workbench "download" link is also accepted, but
+// credentials passed via cookie, header, etc. are ignored. Only
+// public data can be served this way:
+//
+//   http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
+//
+// Collections can also be accessed (read-only) via "/by_id/X" where X
+// is a UUID or portable data hash.
+//
+// Authorization mechanisms
+//
+// A token can be provided in an Authorization header:
+//
+//   Authorization: OAuth2 o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+//
+// A base64-encoded token can be provided in a cookie named "api_token":
+//
+//   Cookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=
+//
+// A token can be provided in an URL-encoded query string:
+//
+//   GET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+//
+// A suitably encoded token can be provided in a POST body if the
+// request has a content type of application/x-www-form-urlencoded or
+// multipart/form-data:
+//
+//   POST /foo/bar.txt
+//   Content-Type: application/x-www-form-urlencoded
+//   [...]
+//   api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+//
+// If a token is provided in a query string or in a POST request, the
+// response is an HTTP 303 redirect to an equivalent GET request, with
+// the token stripped from the query string and added to a cookie
+// instead.
+//
+// Indexes
+//
+// Keep-web returns a generic HTML index listing when a directory is
+// requested with the GET method. It does not serve a default file
+// like "index.html". Directory listings are also returned for WebDAV
+// PROPFIND requests.
+//
+// Compatibility
+//
+// Client-provided authorization tokens are ignored if the client does
+// not provide a Host header.
+//
+// In order to use the query string or a POST form authorization
+// mechanisms, the client must follow 303 redirects; the client must
+// accept cookies with a 303 response and send those cookies when
+// performing the redirect; and either the client or an intervening
+// proxy must resolve a relative URL ("//host/path") if given in a
+// response Location header.
+//
+// Intranet mode
+//
+// Normally, Keep-web accepts requests for multiple collections using
+// the same host name, provided the client's credentials are not being
+// used. This provides insufficient XSS protection in an installation
+// where the "anonymously accessible" data is not truly public, but
+// merely protected by network topology.
+//
+// In such cases -- for example, a site which is not reachable from
+// the internet, where some data is world-readable from Arvados's
+// perspective but is intended to be available only to users within
+// the local network -- the downstream proxy should configured to
+// return 401 for all paths beginning with "/c=".
+//
+// Same-origin URLs
+//
+// Without the same-origin protection outlined above, a web page
+// stored in collection X could execute JavaScript code that uses the
+// current viewer's credentials to download additional data from
+// collection Y -- data which is accessible to the current viewer, but
+// not to the author of collection X -- from the same origin
+// (``https://collections.example.com/'') and upload it to some other
+// site chosen by the author of collection X.
+//
+// Attachment-Only host
+//
+// It is possible to serve untrusted content and accept user
+// credentials at the same origin as long as the content is only
+// downloaded, never executed by browsers. A single origin (hostname
+// and port) can be designated as an "attachment-only" origin: cookies
+// will be accepted and all responses will have a
+// "Content-Disposition: attachment" header. This behavior is invoked
+// only when the designated origin matches exactly the Host header
+// provided by the client or downstream proxy.
+//
+//   "AttachmentOnlyHost":"domain.example:9999"
+//
+// Trust All Content mode
+//
+// In TrustAllContent mode, Keep-web will accept credentials (API
+// tokens) and serve any collection X at
+// "https://collections.example.com/c=X/path/file.ext".  This is
+// UNSAFE except in the special case where everyone who is able write
+// ANY data to Keep, and every JavaScript and HTML file written to
+// Keep, is also trusted to read ALL of the data in Keep.
+//
+// In such cases you can enable trust-all-content mode.
+//
+//   "TrustAllContent":true
+//
+// When TrustAllContent is enabled, the only effect of the
+// AttachmentOnlyHost flag is to add a "Content-Disposition:
+// attachment" header.
+//
+//   "AttachmentOnlyHost":"domain.example:9999",
+//   "TrustAllContent":true
+//
+// Depending on your site configuration, you might also want to enable
+// the "trust all content" setting in Workbench. Normally, Workbench
+// avoids redirecting requests to keep-web if they depend on
+// TrustAllContent being enabled.
+//
+// Metrics
+//
+// Keep-web exposes request metrics in Prometheus text-based format at
+// /metrics. The same information is also available as JSON at
+// /metrics.json.
+//
+package main
diff --git a/services/keep-web/handler.go b/services/keep-web/handler.go
new file mode 100644 (file)
index 0000000..b5c11e5
--- /dev/null
@@ -0,0 +1,798 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "fmt"
+       "html"
+       "html/template"
+       "io"
+       "net/http"
+       "net/url"
+       "os"
+       "path/filepath"
+       "sort"
+       "strconv"
+       "strings"
+       "sync"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       log "github.com/sirupsen/logrus"
+       "golang.org/x/net/webdav"
+)
+
+type handler struct {
+       Config        *Config
+       MetricsAPI    http.Handler
+       clientPool    *arvadosclient.ClientPool
+       setupOnce     sync.Once
+       healthHandler http.Handler
+       webdavLS      webdav.LockSystem
+}
+
+// parseCollectionIDFromDNSName returns a UUID or PDH if s begins with
+// a UUID or URL-encoded PDH; otherwise "".
+func parseCollectionIDFromDNSName(s string) string {
+       // Strip domain.
+       if i := strings.IndexRune(s, '.'); i >= 0 {
+               s = s[:i]
+       }
+       // Names like {uuid}--collections.example.com serve the same
+       // purpose as {uuid}.collections.example.com but can reduce
+       // cost/effort of using [additional] wildcard certificates.
+       if i := strings.Index(s, "--"); i >= 0 {
+               s = s[:i]
+       }
+       if arvadosclient.UUIDMatch(s) {
+               return s
+       }
+       if pdh := strings.Replace(s, "-", "+", 1); arvadosclient.PDHMatch(pdh) {
+               return pdh
+       }
+       return ""
+}
+
+var urlPDHDecoder = strings.NewReplacer(" ", "+", "-", "+")
+
+// parseCollectionIDFromURL returns a UUID or PDH if s is a UUID or a
+// PDH (even if it is a PDH with "+" replaced by " " or "-");
+// otherwise "".
+func parseCollectionIDFromURL(s string) string {
+       if arvadosclient.UUIDMatch(s) {
+               return s
+       }
+       if pdh := urlPDHDecoder.Replace(s); arvadosclient.PDHMatch(pdh) {
+               return pdh
+       }
+       return ""
+}
+
+func (h *handler) setup() {
+       h.clientPool = arvadosclient.MakeClientPool()
+
+       keepclient.RefreshServiceDiscoveryOnSIGHUP()
+
+       h.healthHandler = &health.Handler{
+               Token:  h.Config.ManagementToken,
+               Prefix: "/_health/",
+       }
+
+       // Even though we don't accept LOCK requests, every webdav
+       // handler must have a non-nil LockSystem.
+       h.webdavLS = &noLockSystem{}
+}
+
+func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
+       json.NewEncoder(w).Encode(struct{ Version string }{version})
+}
+
+// updateOnSuccess wraps httpserver.ResponseWriter. If the handler
+// sends an HTTP header indicating success, updateOnSuccess first
+// calls the provided update func. If the update func fails, a 500
+// response is sent, and the status code and body sent by the handler
+// are ignored (all response writes return the update error).
+type updateOnSuccess struct {
+       httpserver.ResponseWriter
+       update     func() error
+       sentHeader bool
+       err        error
+}
+
+func (uos *updateOnSuccess) Write(p []byte) (int, error) {
+       if !uos.sentHeader {
+               uos.WriteHeader(http.StatusOK)
+       }
+       if uos.err != nil {
+               return 0, uos.err
+       }
+       return uos.ResponseWriter.Write(p)
+}
+
+func (uos *updateOnSuccess) WriteHeader(code int) {
+       if !uos.sentHeader {
+               uos.sentHeader = true
+               if code >= 200 && code < 400 {
+                       if uos.err = uos.update(); uos.err != nil {
+                               code := http.StatusInternalServerError
+                               if err, ok := uos.err.(*arvados.TransactionError); ok {
+                                       code = err.StatusCode
+                               }
+                               log.Printf("update() changes response to HTTP %d: %T %q", code, uos.err, uos.err)
+                               http.Error(uos.ResponseWriter, uos.err.Error(), code)
+                               return
+                       }
+               }
+       }
+       uos.ResponseWriter.WriteHeader(code)
+}
+
+var (
+       corsAllowHeadersHeader = strings.Join([]string{
+               "Authorization", "Content-Type", "Range",
+               // WebDAV request headers:
+               "Depth", "Destination", "If", "Lock-Token", "Overwrite", "Timeout",
+       }, ", ")
+       writeMethod = map[string]bool{
+               "COPY":      true,
+               "DELETE":    true,
+               "LOCK":      true,
+               "MKCOL":     true,
+               "MOVE":      true,
+               "PROPPATCH": true,
+               "PUT":       true,
+               "RMCOL":     true,
+               "UNLOCK":    true,
+       }
+       webdavMethod = map[string]bool{
+               "COPY":      true,
+               "DELETE":    true,
+               "LOCK":      true,
+               "MKCOL":     true,
+               "MOVE":      true,
+               "OPTIONS":   true,
+               "PROPFIND":  true,
+               "PROPPATCH": true,
+               "PUT":       true,
+               "RMCOL":     true,
+               "UNLOCK":    true,
+       }
+       browserMethod = map[string]bool{
+               "GET":  true,
+               "HEAD": true,
+               "POST": true,
+       }
+       // top-level dirs to serve with siteFS
+       siteFSDir = map[string]bool{
+               "":      true, // root directory
+               "by_id": true,
+               "users": true,
+       }
+)
+
+// ServeHTTP implements http.Handler.
+func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
+       h.setupOnce.Do(h.setup)
+
+       var statusCode = 0
+       var statusText string
+
+       remoteAddr := r.RemoteAddr
+       if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
+               remoteAddr = xff + "," + remoteAddr
+       }
+       if xfp := r.Header.Get("X-Forwarded-Proto"); xfp != "" && xfp != "http" {
+               r.URL.Scheme = xfp
+       }
+
+       w := httpserver.WrapResponseWriter(wOrig)
+       defer func() {
+               if statusCode == 0 {
+                       statusCode = w.WroteStatus()
+               } else if w.WroteStatus() == 0 {
+                       w.WriteHeader(statusCode)
+               } else if w.WroteStatus() != statusCode {
+                       log.WithField("RequestID", r.Header.Get("X-Request-Id")).Warn(
+                               fmt.Sprintf("Our status changed from %d to %d after we sent headers", w.WroteStatus(), statusCode))
+               }
+               if statusText == "" {
+                       statusText = http.StatusText(statusCode)
+               }
+       }()
+
+       if strings.HasPrefix(r.URL.Path, "/_health/") && r.Method == "GET" {
+               h.healthHandler.ServeHTTP(w, r)
+               return
+       }
+
+       if method := r.Header.Get("Access-Control-Request-Method"); method != "" && r.Method == "OPTIONS" {
+               if !browserMethod[method] && !webdavMethod[method] {
+                       statusCode = http.StatusMethodNotAllowed
+                       return
+               }
+               w.Header().Set("Access-Control-Allow-Headers", corsAllowHeadersHeader)
+               w.Header().Set("Access-Control-Allow-Methods", "COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK")
+               w.Header().Set("Access-Control-Allow-Origin", "*")
+               w.Header().Set("Access-Control-Max-Age", "86400")
+               statusCode = http.StatusOK
+               return
+       }
+
+       if !browserMethod[r.Method] && !webdavMethod[r.Method] {
+               statusCode, statusText = http.StatusMethodNotAllowed, r.Method
+               return
+       }
+
+       if r.Header.Get("Origin") != "" {
+               // Allow simple cross-origin requests without user
+               // credentials ("user credentials" as defined by CORS,
+               // i.e., cookies, HTTP authentication, and client-side
+               // SSL certificates. See
+               // http://www.w3.org/TR/cors/#user-credentials).
+               w.Header().Set("Access-Control-Allow-Origin", "*")
+               w.Header().Set("Access-Control-Expose-Headers", "Content-Range")
+       }
+
+       pathParts := strings.Split(r.URL.Path[1:], "/")
+
+       var stripParts int
+       var collectionID string
+       var tokens []string
+       var reqTokens []string
+       var pathToken bool
+       var attachment bool
+       var useSiteFS bool
+       credentialsOK := h.Config.TrustAllContent
+
+       if r.Host != "" && r.Host == h.Config.AttachmentOnlyHost {
+               credentialsOK = true
+               attachment = true
+       } else if r.FormValue("disposition") == "attachment" {
+               attachment = true
+       }
+
+       if collectionID = parseCollectionIDFromDNSName(r.Host); collectionID != "" {
+               // http://ID.collections.example/PATH...
+               credentialsOK = true
+       } else if r.URL.Path == "/status.json" {
+               h.serveStatus(w, r)
+               return
+       } else if strings.HasPrefix(r.URL.Path, "/metrics") {
+               h.MetricsAPI.ServeHTTP(w, r)
+               return
+       } else if siteFSDir[pathParts[0]] {
+               useSiteFS = true
+       } else if len(pathParts) >= 1 && strings.HasPrefix(pathParts[0], "c=") {
+               // /c=ID[/PATH...]
+               collectionID = parseCollectionIDFromURL(pathParts[0][2:])
+               stripParts = 1
+       } else if len(pathParts) >= 2 && pathParts[0] == "collections" {
+               if len(pathParts) >= 4 && pathParts[1] == "download" {
+                       // /collections/download/ID/TOKEN/PATH...
+                       collectionID = parseCollectionIDFromURL(pathParts[2])
+                       tokens = []string{pathParts[3]}
+                       stripParts = 4
+                       pathToken = true
+               } else {
+                       // /collections/ID/PATH...
+                       collectionID = parseCollectionIDFromURL(pathParts[1])
+                       tokens = h.Config.AnonymousTokens
+                       stripParts = 2
+               }
+       }
+
+       if collectionID == "" && !useSiteFS {
+               statusCode = http.StatusNotFound
+               return
+       }
+
+       forceReload := false
+       if cc := r.Header.Get("Cache-Control"); strings.Contains(cc, "no-cache") || strings.Contains(cc, "must-revalidate") {
+               forceReload = true
+       }
+
+       formToken := r.FormValue("api_token")
+       if formToken != "" && r.Header.Get("Origin") != "" && attachment && r.URL.Query().Get("api_token") == "" {
+               // The client provided an explicit token in the POST
+               // body. The Origin header indicates this *might* be
+               // an AJAX request, in which case redirect-with-cookie
+               // won't work: we should just serve the content in the
+               // POST response. This is safe because:
+               //
+               // * We're supplying an attachment, not inline
+               //   content, so we don't need to convert the POST to
+               //   a GET and avoid the "really resubmit form?"
+               //   problem.
+               //
+               // * The token isn't embedded in the URL, so we don't
+               //   need to worry about bookmarks and copy/paste.
+               tokens = append(tokens, formToken)
+       } else if formToken != "" && browserMethod[r.Method] {
+               // The client provided an explicit token in the query
+               // string, or a form in POST body. We must put the
+               // token in an HttpOnly cookie, and redirect to the
+               // same URL with the query param redacted and method =
+               // GET.
+               h.seeOtherWithCookie(w, r, "", credentialsOK)
+               return
+       }
+
+       if useSiteFS {
+               if tokens == nil {
+                       tokens = auth.CredentialsFromRequest(r).Tokens
+               }
+               h.serveSiteFS(w, r, tokens, credentialsOK, attachment)
+               return
+       }
+
+       targetPath := pathParts[stripParts:]
+       if tokens == nil && len(targetPath) > 0 && strings.HasPrefix(targetPath[0], "t=") {
+               // http://ID.example/t=TOKEN/PATH...
+               // /c=ID/t=TOKEN/PATH...
+               //
+               // This form must only be used to pass scoped tokens
+               // that give permission for a single collection. See
+               // FormValue case above.
+               tokens = []string{targetPath[0][2:]}
+               pathToken = true
+               targetPath = targetPath[1:]
+               stripParts++
+       }
+
+       if tokens == nil {
+               if credentialsOK {
+                       reqTokens = auth.CredentialsFromRequest(r).Tokens
+               }
+               tokens = append(reqTokens, h.Config.AnonymousTokens...)
+       }
+
+       if len(targetPath) > 0 && targetPath[0] == "_" {
+               // If a collection has a directory called "t=foo" or
+               // "_", it can be served at
+               // //collections.example/_/t=foo/ or
+               // //collections.example/_/_/ respectively:
+               // //collections.example/t=foo/ won't work because
+               // t=foo will be interpreted as a token "foo".
+               targetPath = targetPath[1:]
+               stripParts++
+       }
+
+       arv := h.clientPool.Get()
+       if arv == nil {
+               statusCode, statusText = http.StatusInternalServerError, "Pool failed: "+h.clientPool.Err().Error()
+               return
+       }
+       defer h.clientPool.Put(arv)
+
+       var collection *arvados.Collection
+       tokenResult := make(map[string]int)
+       for _, arv.ApiToken = range tokens {
+               var err error
+               collection, err = h.Config.Cache.Get(arv, collectionID, forceReload)
+               if err == nil {
+                       // Success
+                       break
+               }
+               if srvErr, ok := err.(arvadosclient.APIServerError); ok {
+                       switch srvErr.HttpStatusCode {
+                       case 404, 401:
+                               // Token broken or insufficient to
+                               // retrieve collection
+                               tokenResult[arv.ApiToken] = srvErr.HttpStatusCode
+                               continue
+                       }
+               }
+               // Something more serious is wrong
+               statusCode, statusText = http.StatusInternalServerError, err.Error()
+               return
+       }
+       if collection == nil {
+               if pathToken || !credentialsOK {
+                       // Either the URL is a "secret sharing link"
+                       // that didn't work out (and asking the client
+                       // for additional credentials would just be
+                       // confusing), or we don't even accept
+                       // credentials at this path.
+                       statusCode = http.StatusNotFound
+                       return
+               }
+               for _, t := range reqTokens {
+                       if tokenResult[t] == 404 {
+                               // The client provided valid token(s), but the
+                               // collection was not found.
+                               statusCode = http.StatusNotFound
+                               return
+                       }
+               }
+               // The client's token was invalid (e.g., expired), or
+               // the client didn't even provide one.  Propagate the
+               // 401 to encourage the client to use a [different]
+               // token.
+               //
+               // TODO(TC): This response would be confusing to
+               // someone trying (anonymously) to download public
+               // data that has been deleted.  Allow a referrer to
+               // provide this context somehow?
+               w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
+               statusCode = http.StatusUnauthorized
+               return
+       }
+
+       kc, err := keepclient.MakeKeepClient(arv)
+       if err != nil {
+               statusCode, statusText = http.StatusInternalServerError, err.Error()
+               return
+       }
+       kc.RequestID = r.Header.Get("X-Request-Id")
+
+       var basename string
+       if len(targetPath) > 0 {
+               basename = targetPath[len(targetPath)-1]
+       }
+       applyContentDispositionHdr(w, r, basename, attachment)
+
+       client := (&arvados.Client{
+               APIHost:   arv.ApiServer,
+               AuthToken: arv.ApiToken,
+               Insecure:  arv.ApiInsecure,
+       }).WithRequestID(r.Header.Get("X-Request-Id"))
+
+       fs, err := collection.FileSystem(client, kc)
+       if err != nil {
+               statusCode, statusText = http.StatusInternalServerError, err.Error()
+               return
+       }
+
+       writefs, writeOK := fs.(arvados.CollectionFileSystem)
+       targetIsPDH := arvadosclient.PDHMatch(collectionID)
+       if (targetIsPDH || !writeOK) && writeMethod[r.Method] {
+               statusCode, statusText = http.StatusMethodNotAllowed, errReadOnly.Error()
+               return
+       }
+
+       if webdavMethod[r.Method] {
+               if writeMethod[r.Method] {
+                       // Save the collection only if/when all
+                       // webdav->filesystem operations succeed --
+                       // and send a 500 error if the modified
+                       // collection can't be saved.
+                       w = &updateOnSuccess{
+                               ResponseWriter: w,
+                               update: func() error {
+                                       return h.Config.Cache.Update(client, *collection, writefs)
+                               }}
+               }
+               h := webdav.Handler{
+                       Prefix: "/" + strings.Join(pathParts[:stripParts], "/"),
+                       FileSystem: &webdavFS{
+                               collfs:        fs,
+                               writing:       writeMethod[r.Method],
+                               alwaysReadEOF: r.Method == "PROPFIND",
+                       },
+                       LockSystem: h.webdavLS,
+                       Logger: func(_ *http.Request, err error) {
+                               if err != nil {
+                                       log.Printf("error from webdav handler: %q", err)
+                               }
+                       },
+               }
+               h.ServeHTTP(w, r)
+               return
+       }
+
+       openPath := "/" + strings.Join(targetPath, "/")
+       if f, err := fs.Open(openPath); os.IsNotExist(err) {
+               // Requested non-existent path
+               statusCode = http.StatusNotFound
+       } else if err != nil {
+               // Some other (unexpected) error
+               statusCode, statusText = http.StatusInternalServerError, err.Error()
+       } else if stat, err := f.Stat(); err != nil {
+               // Can't get Size/IsDir (shouldn't happen with a collectionFS!)
+               statusCode, statusText = http.StatusInternalServerError, err.Error()
+       } else if stat.IsDir() && !strings.HasSuffix(r.URL.Path, "/") {
+               // If client requests ".../dirname", redirect to
+               // ".../dirname/". This way, relative links in the
+               // listing for "dirname" can always be "fnm", never
+               // "dirname/fnm".
+               h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
+       } else if stat.IsDir() {
+               h.serveDirectory(w, r, collection.Name, fs, openPath, true)
+       } else {
+               http.ServeContent(w, r, basename, stat.ModTime(), f)
+               if r.Header.Get("Range") == "" && int64(w.WroteBodyBytes()) != stat.Size() {
+                       // If we wrote fewer bytes than expected, it's
+                       // too late to change the real response code
+                       // or send an error message to the client, but
+                       // at least we can try to put some useful
+                       // debugging info in the logs.
+                       n, err := f.Read(make([]byte, 1024))
+                       statusCode, statusText = http.StatusInternalServerError, fmt.Sprintf("f.Size()==%d but only wrote %d bytes; read(1024) returns %d, %s", stat.Size(), w.WroteBodyBytes(), n, err)
+
+               }
+       }
+}
+
+func (h *handler) serveSiteFS(w http.ResponseWriter, r *http.Request, tokens []string, credentialsOK, attachment bool) {
+       if len(tokens) == 0 {
+               w.Header().Add("WWW-Authenticate", "Basic realm=\"collections\"")
+               http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
+               return
+       }
+       if writeMethod[r.Method] {
+               http.Error(w, errReadOnly.Error(), http.StatusMethodNotAllowed)
+               return
+       }
+       arv := h.clientPool.Get()
+       if arv == nil {
+               http.Error(w, "Pool failed: "+h.clientPool.Err().Error(), http.StatusInternalServerError)
+               return
+       }
+       defer h.clientPool.Put(arv)
+       arv.ApiToken = tokens[0]
+
+       kc, err := keepclient.MakeKeepClient(arv)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       kc.RequestID = r.Header.Get("X-Request-Id")
+       client := (&arvados.Client{
+               APIHost:   arv.ApiServer,
+               AuthToken: arv.ApiToken,
+               Insecure:  arv.ApiInsecure,
+       }).WithRequestID(r.Header.Get("X-Request-Id"))
+       fs := client.SiteFileSystem(kc)
+       f, err := fs.Open(r.URL.Path)
+       if os.IsNotExist(err) {
+               http.Error(w, err.Error(), http.StatusNotFound)
+               return
+       } else if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       defer f.Close()
+       if fi, err := f.Stat(); err == nil && fi.IsDir() && r.Method == "GET" {
+               if !strings.HasSuffix(r.URL.Path, "/") {
+                       h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
+               } else {
+                       h.serveDirectory(w, r, fi.Name(), fs, r.URL.Path, false)
+               }
+               return
+       }
+       if r.Method == "GET" {
+               _, basename := filepath.Split(r.URL.Path)
+               applyContentDispositionHdr(w, r, basename, attachment)
+       }
+       wh := webdav.Handler{
+               Prefix: "/",
+               FileSystem: &webdavFS{
+                       collfs:        fs,
+                       writing:       writeMethod[r.Method],
+                       alwaysReadEOF: r.Method == "PROPFIND",
+               },
+               LockSystem: h.webdavLS,
+               Logger: func(_ *http.Request, err error) {
+                       if err != nil {
+                               log.Printf("error from webdav handler: %q", err)
+                       }
+               },
+       }
+       wh.ServeHTTP(w, r)
+}
+
+var dirListingTemplate = `<!DOCTYPE HTML>
+<HTML><HEAD>
+  <META name="robots" content="NOINDEX">
+  <TITLE>{{ .CollectionName }}</TITLE>
+  <STYLE type="text/css">
+    body {
+      margin: 1.5em;
+    }
+    pre {
+      background-color: #D9EDF7;
+      border-radius: .25em;
+      padding: .75em;
+      overflow: auto;
+    }
+    .footer p {
+      font-size: 82%;
+    }
+    ul {
+      padding: 0;
+    }
+    ul li {
+      font-family: monospace;
+      list-style: none;
+    }
+  </STYLE>
+</HEAD>
+<BODY>
+
+<H1>{{ .CollectionName }}</H1>
+
+<P>This collection of data files is being shared with you through
+Arvados.  You can download individual files listed below.  To download
+the entire directory tree with wget, try:</P>
+
+<PRE>$ wget --mirror --no-parent --no-host --cut-dirs={{ .StripParts }} https://{{ .Request.Host }}{{ .Request.URL.Path }}</PRE>
+
+<H2>File Listing</H2>
+
+{{if .Files}}
+<UL>
+{{range .Files}}
+{{if .IsDir }}
+  <LI>{{" " | printf "%15s  " | nbsp}}<A href="{{print "./" .Name}}/">{{.Name}}/</A></LI>
+{{else}}
+  <LI>{{.Size | printf "%15d  " | nbsp}}<A href="{{print "./" .Name}}">{{.Name}}</A></LI>
+{{end}}
+{{end}}
+</UL>
+{{else}}
+<P>(No files; this collection is empty.)</P>
+{{end}}
+
+<HR noshade>
+<DIV class="footer">
+  <P>
+    About Arvados:
+    Arvados is a free and open source software bioinformatics platform.
+    To learn more, visit arvados.org.
+    Arvados is not responsible for the files listed on this page.
+  </P>
+</DIV>
+
+</BODY>
+`
+
+type fileListEnt struct {
+       Name  string
+       Size  int64
+       IsDir bool
+}
+
+func (h *handler) serveDirectory(w http.ResponseWriter, r *http.Request, collectionName string, fs http.FileSystem, base string, recurse bool) {
+       var files []fileListEnt
+       var walk func(string) error
+       if !strings.HasSuffix(base, "/") {
+               base = base + "/"
+       }
+       walk = func(path string) error {
+               dirname := base + path
+               if dirname != "/" {
+                       dirname = strings.TrimSuffix(dirname, "/")
+               }
+               d, err := fs.Open(dirname)
+               if err != nil {
+                       return err
+               }
+               ents, err := d.Readdir(-1)
+               if err != nil {
+                       return err
+               }
+               for _, ent := range ents {
+                       if recurse && ent.IsDir() {
+                               err = walk(path + ent.Name() + "/")
+                               if err != nil {
+                                       return err
+                               }
+                       } else {
+                               files = append(files, fileListEnt{
+                                       Name:  path + ent.Name(),
+                                       Size:  ent.Size(),
+                                       IsDir: ent.IsDir(),
+                               })
+                       }
+               }
+               return nil
+       }
+       if err := walk(""); err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+
+       funcs := template.FuncMap{
+               "nbsp": func(s string) template.HTML {
+                       return template.HTML(strings.Replace(s, " ", "&nbsp;", -1))
+               },
+       }
+       tmpl, err := template.New("dir").Funcs(funcs).Parse(dirListingTemplate)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       sort.Slice(files, func(i, j int) bool {
+               return files[i].Name < files[j].Name
+       })
+       w.WriteHeader(http.StatusOK)
+       tmpl.Execute(w, map[string]interface{}{
+               "CollectionName": collectionName,
+               "Files":          files,
+               "Request":        r,
+               "StripParts":     strings.Count(strings.TrimRight(r.URL.Path, "/"), "/"),
+       })
+}
+
+func applyContentDispositionHdr(w http.ResponseWriter, r *http.Request, filename string, isAttachment bool) {
+       disposition := "inline"
+       if isAttachment {
+               disposition = "attachment"
+       }
+       if strings.ContainsRune(r.RequestURI, '?') {
+               // Help the UA realize that the filename is just
+               // "filename.txt", not
+               // "filename.txt?disposition=attachment".
+               //
+               // TODO(TC): Follow advice at RFC 6266 appendix D
+               disposition += "; filename=" + strconv.QuoteToASCII(filename)
+       }
+       if disposition != "inline" {
+               w.Header().Set("Content-Disposition", disposition)
+       }
+}
+
+func (h *handler) seeOtherWithCookie(w http.ResponseWriter, r *http.Request, location string, credentialsOK bool) {
+       if formToken := r.FormValue("api_token"); formToken != "" {
+               if !credentialsOK {
+                       // It is not safe to copy the provided token
+                       // into a cookie unless the current vhost
+                       // (origin) serves only a single collection or
+                       // we are in TrustAllContent mode.
+                       w.WriteHeader(http.StatusBadRequest)
+                       return
+               }
+
+               // The HttpOnly flag is necessary to prevent
+               // JavaScript code (included in, or loaded by, a page
+               // in the collection being served) from employing the
+               // user's token beyond reading other files in the same
+               // domain, i.e., same collection.
+               //
+               // The 303 redirect is necessary in the case of a GET
+               // request to avoid exposing the token in the Location
+               // bar, and in the case of a POST request to avoid
+               // raising warnings when the user refreshes the
+               // resulting page.
+               http.SetCookie(w, &http.Cookie{
+                       Name:     "arvados_api_token",
+                       Value:    auth.EncodeTokenCookie([]byte(formToken)),
+                       Path:     "/",
+                       HttpOnly: true,
+               })
+       }
+
+       // Propagate query parameters (except api_token) from
+       // the original request.
+       redirQuery := r.URL.Query()
+       redirQuery.Del("api_token")
+
+       u := r.URL
+       if location != "" {
+               newu, err := u.Parse(location)
+               if err != nil {
+                       w.WriteHeader(http.StatusInternalServerError)
+                       return
+               }
+               u = newu
+       }
+       redir := (&url.URL{
+               Scheme:   r.URL.Scheme,
+               Host:     r.Host,
+               Path:     u.Path,
+               RawQuery: redirQuery.Encode(),
+       }).String()
+
+       w.Header().Add("Location", redir)
+       w.WriteHeader(http.StatusSeeOther)
+       io.WriteString(w, `<A href="`)
+       io.WriteString(w, html.EscapeString(redir))
+       io.WriteString(w, `">Continue</A>`)
+}
diff --git a/services/keep-web/handler_test.go b/services/keep-web/handler_test.go
new file mode 100644 (file)
index 0000000..7a015c9
--- /dev/null
@@ -0,0 +1,837 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "html"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "os"
+       "path/filepath"
+       "regexp"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&UnitSuite{})
+
+type UnitSuite struct{}
+
+func (s *UnitSuite) TestCORSPreflight(c *check.C) {
+       h := handler{Config: DefaultConfig()}
+       u := mustParseURL("http://keep-web.example/c=" + arvadostest.FooCollection + "/foo")
+       req := &http.Request{
+               Method:     "OPTIONS",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header: http.Header{
+                       "Origin":                        {"https://workbench.example"},
+                       "Access-Control-Request-Method": {"POST"},
+               },
+       }
+
+       // Check preflight for an allowed request
+       resp := httptest.NewRecorder()
+       h.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Equals, "")
+       c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
+       c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK")
+       c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout")
+
+       // Check preflight for a disallowed request
+       resp = httptest.NewRecorder()
+       req.Header.Set("Access-Control-Request-Method", "MAKE-COFFEE")
+       h.ServeHTTP(resp, req)
+       c.Check(resp.Body.String(), check.Equals, "")
+       c.Check(resp.Code, check.Equals, http.StatusMethodNotAllowed)
+}
+
+func (s *UnitSuite) TestInvalidUUID(c *check.C) {
+       bogusID := strings.Replace(arvadostest.FooPdh, "+", "-", 1) + "-"
+       token := arvadostest.ActiveToken
+       for _, trial := range []string{
+               "http://keep-web/c=" + bogusID + "/foo",
+               "http://keep-web/c=" + bogusID + "/t=" + token + "/foo",
+               "http://keep-web/collections/download/" + bogusID + "/" + token + "/foo",
+               "http://keep-web/collections/" + bogusID + "/foo",
+               "http://" + bogusID + ".keep-web/" + bogusID + "/foo",
+               "http://" + bogusID + ".keep-web/t=" + token + "/" + bogusID + "/foo",
+       } {
+               c.Log(trial)
+               u := mustParseURL(trial)
+               req := &http.Request{
+                       Method:     "GET",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+               }
+               resp := httptest.NewRecorder()
+               cfg := DefaultConfig()
+               cfg.AnonymousTokens = []string{arvadostest.AnonymousToken}
+               h := handler{Config: cfg}
+               h.ServeHTTP(resp, req)
+               c.Check(resp.Code, check.Equals, http.StatusNotFound)
+       }
+}
+
+func mustParseURL(s string) *url.URL {
+       r, err := url.Parse(s)
+       if err != nil {
+               panic("parse URL: " + s)
+       }
+       return r
+}
+
+func (s *IntegrationSuite) TestVhost404(c *check.C) {
+       for _, testURL := range []string{
+               arvadostest.NonexistentCollection + ".example.com/theperthcountyconspiracy",
+               arvadostest.NonexistentCollection + ".example.com/t=" + arvadostest.ActiveToken + "/theperthcountyconspiracy",
+       } {
+               resp := httptest.NewRecorder()
+               u := mustParseURL(testURL)
+               req := &http.Request{
+                       Method:     "GET",
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+               }
+               s.testServer.Handler.ServeHTTP(resp, req)
+               c.Check(resp.Code, check.Equals, http.StatusNotFound)
+               c.Check(resp.Body.String(), check.Equals, "")
+       }
+}
+
+// An authorizer modifies an HTTP request to make use of the given
+// token -- by adding it to a header, cookie, query param, or whatever
+// -- and returns the HTTP status code we should expect from keep-web if
+// the token is invalid.
+type authorizer func(*http.Request, string) int
+
+func (s *IntegrationSuite) TestVhostViaAuthzHeader(c *check.C) {
+       s.doVhostRequests(c, authzViaAuthzHeader)
+}
+func authzViaAuthzHeader(r *http.Request, tok string) int {
+       r.Header.Add("Authorization", "OAuth2 "+tok)
+       return http.StatusUnauthorized
+}
+
+func (s *IntegrationSuite) TestVhostViaCookieValue(c *check.C) {
+       s.doVhostRequests(c, authzViaCookieValue)
+}
+func authzViaCookieValue(r *http.Request, tok string) int {
+       r.AddCookie(&http.Cookie{
+               Name:  "arvados_api_token",
+               Value: auth.EncodeTokenCookie([]byte(tok)),
+       })
+       return http.StatusUnauthorized
+}
+
+func (s *IntegrationSuite) TestVhostViaPath(c *check.C) {
+       s.doVhostRequests(c, authzViaPath)
+}
+func authzViaPath(r *http.Request, tok string) int {
+       r.URL.Path = "/t=" + tok + r.URL.Path
+       return http.StatusNotFound
+}
+
+func (s *IntegrationSuite) TestVhostViaQueryString(c *check.C) {
+       s.doVhostRequests(c, authzViaQueryString)
+}
+func authzViaQueryString(r *http.Request, tok string) int {
+       r.URL.RawQuery = "api_token=" + tok
+       return http.StatusUnauthorized
+}
+
+func (s *IntegrationSuite) TestVhostViaPOST(c *check.C) {
+       s.doVhostRequests(c, authzViaPOST)
+}
+func authzViaPOST(r *http.Request, tok string) int {
+       r.Method = "POST"
+       r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+       r.Body = ioutil.NopCloser(strings.NewReader(
+               url.Values{"api_token": {tok}}.Encode()))
+       return http.StatusUnauthorized
+}
+
+func (s *IntegrationSuite) TestVhostViaXHRPOST(c *check.C) {
+       s.doVhostRequests(c, authzViaPOST)
+}
+func authzViaXHRPOST(r *http.Request, tok string) int {
+       r.Method = "POST"
+       r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+       r.Header.Add("Origin", "https://origin.example")
+       r.Body = ioutil.NopCloser(strings.NewReader(
+               url.Values{
+                       "api_token":   {tok},
+                       "disposition": {"attachment"},
+               }.Encode()))
+       return http.StatusUnauthorized
+}
+
+// Try some combinations of {url, token} using the given authorization
+// mechanism, and verify the result is correct.
+func (s *IntegrationSuite) doVhostRequests(c *check.C, authz authorizer) {
+       for _, hostPath := range []string{
+               arvadostest.FooCollection + ".example.com/foo",
+               arvadostest.FooCollection + "--collections.example.com/foo",
+               arvadostest.FooCollection + "--collections.example.com/_/foo",
+               arvadostest.FooPdh + ".example.com/foo",
+               strings.Replace(arvadostest.FooPdh, "+", "-", -1) + "--collections.example.com/foo",
+               arvadostest.FooBarDirCollection + ".example.com/dir1/foo",
+       } {
+               c.Log("doRequests: ", hostPath)
+               s.doVhostRequestsWithHostPath(c, authz, hostPath)
+       }
+}
+
+func (s *IntegrationSuite) doVhostRequestsWithHostPath(c *check.C, authz authorizer, hostPath string) {
+       for _, tok := range []string{
+               arvadostest.ActiveToken,
+               arvadostest.ActiveToken[:15],
+               arvadostest.SpectatorToken,
+               "bogus",
+               "",
+       } {
+               u := mustParseURL("http://" + hostPath)
+               req := &http.Request{
+                       Method:     "GET",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+                       Header:     http.Header{},
+               }
+               failCode := authz(req, tok)
+               req, resp := s.doReq(req)
+               code, body := resp.Code, resp.Body.String()
+
+               // If the initial request had a (non-empty) token
+               // showing in the query string, we should have been
+               // redirected in order to hide it in a cookie.
+               c.Check(req.URL.String(), check.Not(check.Matches), `.*api_token=.+`)
+
+               if tok == arvadostest.ActiveToken {
+                       c.Check(code, check.Equals, http.StatusOK)
+                       c.Check(body, check.Equals, "foo")
+
+               } else {
+                       c.Check(code >= 400, check.Equals, true)
+                       c.Check(code < 500, check.Equals, true)
+                       if tok == arvadostest.SpectatorToken {
+                               // Valid token never offers to retry
+                               // with different credentials.
+                               c.Check(code, check.Equals, http.StatusNotFound)
+                       } else {
+                               // Invalid token can ask to retry
+                               // depending on the authz method.
+                               c.Check(code, check.Equals, failCode)
+                       }
+                       c.Check(body, check.Equals, "")
+               }
+       }
+}
+
+func (s *IntegrationSuite) doReq(req *http.Request) (*http.Request, *httptest.ResponseRecorder) {
+       resp := httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       if resp.Code != http.StatusSeeOther {
+               return req, resp
+       }
+       cookies := (&http.Response{Header: resp.Header()}).Cookies()
+       u, _ := req.URL.Parse(resp.Header().Get("Location"))
+       req = &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header:     http.Header{},
+       }
+       for _, c := range cookies {
+               req.AddCookie(c)
+       }
+       return s.doReq(req)
+}
+
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenToCookie(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               arvadostest.FooCollection+".example.com/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+}
+
+func (s *IntegrationSuite) TestSingleOriginSecretLink(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.FooCollection+"/t="+arvadostest.ActiveToken+"/foo",
+               "",
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+}
+
+// Bad token in URL is 404 Not Found because it doesn't make sense to
+// retry the same URL with different authorization.
+func (s *IntegrationSuite) TestSingleOriginSecretLinkBadToken(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.FooCollection+"/t=bogus/foo",
+               "",
+               "",
+               "",
+               http.StatusNotFound,
+               "",
+       )
+}
+
+// Bad token in a cookie (even if it got there via our own
+// query-string-to-cookie redirect) is, in principle, retryable at the
+// same URL so it's 401 Unauthorized.
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenToBogusCookie(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               arvadostest.FooCollection+".example.com/foo",
+               "?api_token=thisisabogustoken",
+               "",
+               "",
+               http.StatusUnauthorized,
+               "",
+       )
+}
+
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenSingleOriginError(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.FooCollection+"/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusBadRequest,
+               "",
+       )
+}
+
+// If client requests an attachment by putting ?disposition=attachment
+// in the query string, and gets redirected, the redirect target
+// should respond with an attachment.
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenRequestAttachment(c *check.C) {
+       resp := s.testVhostRedirectTokenToCookie(c, "GET",
+               arvadostest.FooCollection+".example.com/foo",
+               "?disposition=attachment&api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+       c.Check(resp.Header().Get("Content-Disposition"), check.Matches, "attachment(;.*)?")
+}
+
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenSiteFS(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "download.example.com"
+       resp := s.testVhostRedirectTokenToCookie(c, "GET",
+               "download.example.com/by_id/"+arvadostest.FooCollection+"/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+       c.Check(resp.Header().Get("Content-Disposition"), check.Matches, "attachment(;.*)?")
+}
+
+func (s *IntegrationSuite) TestPastCollectionVersionFileAccess(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "download.example.com"
+       resp := s.testVhostRedirectTokenToCookie(c, "GET",
+               "download.example.com/c="+arvadostest.WazVersion1Collection+"/waz",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "waz",
+       )
+       c.Check(resp.Header().Get("Content-Disposition"), check.Matches, "attachment(;.*)?")
+       resp = s.testVhostRedirectTokenToCookie(c, "GET",
+               "download.example.com/by_id/"+arvadostest.WazVersion1Collection+"/waz",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "waz",
+       )
+       c.Check(resp.Header().Get("Content-Disposition"), check.Matches, "attachment(;.*)?")
+}
+
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenTrustAllContent(c *check.C) {
+       s.testServer.Config.TrustAllContent = true
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.FooCollection+"/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+}
+
+func (s *IntegrationSuite) TestVhostRedirectQueryTokenAttachmentOnlyHost(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "example.com:1234"
+
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.FooCollection+"/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusBadRequest,
+               "",
+       )
+
+       resp := s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com:1234/c="+arvadostest.FooCollection+"/foo",
+               "?api_token="+arvadostest.ActiveToken,
+               "",
+               "",
+               http.StatusOK,
+               "foo",
+       )
+       c.Check(resp.Header().Get("Content-Disposition"), check.Equals, "attachment")
+}
+
+func (s *IntegrationSuite) TestVhostRedirectPOSTFormTokenToCookie(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "POST",
+               arvadostest.FooCollection+".example.com/foo",
+               "",
+               "application/x-www-form-urlencoded",
+               url.Values{"api_token": {arvadostest.ActiveToken}}.Encode(),
+               http.StatusOK,
+               "foo",
+       )
+}
+
+func (s *IntegrationSuite) TestVhostRedirectPOSTFormTokenToCookie404(c *check.C) {
+       s.testVhostRedirectTokenToCookie(c, "POST",
+               arvadostest.FooCollection+".example.com/foo",
+               "",
+               "application/x-www-form-urlencoded",
+               url.Values{"api_token": {arvadostest.SpectatorToken}}.Encode(),
+               http.StatusNotFound,
+               "",
+       )
+}
+
+func (s *IntegrationSuite) TestAnonymousTokenOK(c *check.C) {
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.HelloWorldCollection+"/Hello%20world.txt",
+               "",
+               "",
+               "",
+               http.StatusOK,
+               "Hello world\n",
+       )
+}
+
+func (s *IntegrationSuite) TestAnonymousTokenError(c *check.C) {
+       s.testServer.Config.AnonymousTokens = []string{"anonymousTokenConfiguredButInvalid"}
+       s.testVhostRedirectTokenToCookie(c, "GET",
+               "example.com/c="+arvadostest.HelloWorldCollection+"/Hello%20world.txt",
+               "",
+               "",
+               "",
+               http.StatusNotFound,
+               "",
+       )
+}
+
+func (s *IntegrationSuite) TestSpecialCharsInPath(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "download.example.com"
+
+       client := s.testServer.Config.Client
+       client.AuthToken = arvadostest.ActiveToken
+       fs, err := (&arvados.Collection{}).FileSystem(&client, nil)
+       c.Assert(err, check.IsNil)
+       f, err := fs.OpenFile("https:\\\"odd' path chars", os.O_CREATE, 0777)
+       c.Assert(err, check.IsNil)
+       f.Close()
+       mtxt, err := fs.MarshalManifest(".")
+       c.Assert(err, check.IsNil)
+       coll := arvados.Collection{ManifestText: mtxt}
+       err = client.RequestAndDecode(&coll, "POST", "arvados/v1/collections", client.UpdateBody(coll), nil)
+       c.Assert(err, check.IsNil)
+
+       u, _ := url.Parse("http://download.example.com/c=" + coll.UUID + "/")
+       req := &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header: http.Header{
+                       "Authorization": {"Bearer " + client.AuthToken},
+               },
+       }
+       resp := httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Matches, `(?ms).*href="./https:%5c%22odd%27%20path%20chars"\S+https:\\&#34;odd&#39; path chars.*`)
+}
+
+// XHRs can't follow redirect-with-cookie so they rely on method=POST
+// and disposition=attachment (telling us it's acceptable to respond
+// with content instead of a redirect) and an Origin header that gets
+// added automatically by the browser (telling us it's desirable to do
+// so).
+func (s *IntegrationSuite) TestXHRNoRedirect(c *check.C) {
+       u, _ := url.Parse("http://example.com/c=" + arvadostest.FooCollection + "/foo")
+       req := &http.Request{
+               Method:     "POST",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header: http.Header{
+                       "Origin":       {"https://origin.example"},
+                       "Content-Type": {"application/x-www-form-urlencoded"},
+               },
+               Body: ioutil.NopCloser(strings.NewReader(url.Values{
+                       "api_token":   {arvadostest.ActiveToken},
+                       "disposition": {"attachment"},
+               }.Encode())),
+       }
+       resp := httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Equals, "foo")
+       c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
+}
+
+func (s *IntegrationSuite) testVhostRedirectTokenToCookie(c *check.C, method, hostPath, queryString, contentType, reqBody string, expectStatus int, expectRespBody string) *httptest.ResponseRecorder {
+       u, _ := url.Parse(`http://` + hostPath + queryString)
+       req := &http.Request{
+               Method:     method,
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header:     http.Header{"Content-Type": {contentType}},
+               Body:       ioutil.NopCloser(strings.NewReader(reqBody)),
+       }
+
+       resp := httptest.NewRecorder()
+       defer func() {
+               c.Check(resp.Code, check.Equals, expectStatus)
+               c.Check(resp.Body.String(), check.Equals, expectRespBody)
+       }()
+
+       s.testServer.Handler.ServeHTTP(resp, req)
+       if resp.Code != http.StatusSeeOther {
+               return resp
+       }
+       c.Check(resp.Body.String(), check.Matches, `.*href="http://`+regexp.QuoteMeta(html.EscapeString(hostPath))+`(\?[^"]*)?".*`)
+       cookies := (&http.Response{Header: resp.Header()}).Cookies()
+
+       u, _ = u.Parse(resp.Header().Get("Location"))
+       req = &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header:     http.Header{},
+       }
+       for _, c := range cookies {
+               req.AddCookie(c)
+       }
+
+       resp = httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       c.Check(resp.Header().Get("Location"), check.Equals, "")
+       return resp
+}
+
+func (s *IntegrationSuite) TestDirectoryListing(c *check.C) {
+       s.testServer.Config.AttachmentOnlyHost = "download.example.com"
+       authHeader := http.Header{
+               "Authorization": {"OAuth2 " + arvadostest.ActiveToken},
+       }
+       for _, trial := range []struct {
+               uri      string
+               header   http.Header
+               expect   []string
+               redirect string
+               cutDirs  int
+       }{
+               {
+                       uri:     strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + ".example.com/",
+                       header:  authHeader,
+                       expect:  []string{"dir1/foo", "dir1/bar"},
+                       cutDirs: 0,
+               },
+               {
+                       uri:     strings.Replace(arvadostest.FooAndBarFilesInDirPDH, "+", "-", -1) + ".example.com/dir1/",
+                       header:  authHeader,
+                       expect:  []string{"foo", "bar"},
+                       cutDirs: 1,
+               },
+               {
+                       uri:     "download.example.com/collections/" + arvadostest.FooAndBarFilesInDirUUID + "/",
+                       header:  authHeader,
+                       expect:  []string{"dir1/foo", "dir1/bar"},
+                       cutDirs: 2,
+               },
+               {
+                       uri:     "download.example.com/users/active/foo_file_in_dir/",
+                       header:  authHeader,
+                       expect:  []string{"dir1/"},
+                       cutDirs: 3,
+               },
+               {
+                       uri:     "download.example.com/users/active/foo_file_in_dir/dir1/",
+                       header:  authHeader,
+                       expect:  []string{"bar"},
+                       cutDirs: 4,
+               },
+               {
+                       uri:     "download.example.com/",
+                       header:  authHeader,
+                       expect:  []string{"users/"},
+                       cutDirs: 0,
+               },
+               {
+                       uri:      "download.example.com/users",
+                       header:   authHeader,
+                       redirect: "/users/",
+                       expect:   []string{"active/"},
+                       cutDirs:  1,
+               },
+               {
+                       uri:     "download.example.com/users/",
+                       header:  authHeader,
+                       expect:  []string{"active/"},
+                       cutDirs: 1,
+               },
+               {
+                       uri:      "download.example.com/users/active",
+                       header:   authHeader,
+                       redirect: "/users/active/",
+                       expect:   []string{"foo_file_in_dir/"},
+                       cutDirs:  2,
+               },
+               {
+                       uri:     "download.example.com/users/active/",
+                       header:  authHeader,
+                       expect:  []string{"foo_file_in_dir/"},
+                       cutDirs: 2,
+               },
+               {
+                       uri:     "collections.example.com/collections/download/" + arvadostest.FooAndBarFilesInDirUUID + "/" + arvadostest.ActiveToken + "/",
+                       header:  nil,
+                       expect:  []string{"dir1/foo", "dir1/bar"},
+                       cutDirs: 4,
+               },
+               {
+                       uri:     "collections.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken + "/",
+                       header:  nil,
+                       expect:  []string{"dir1/foo", "dir1/bar"},
+                       cutDirs: 2,
+               },
+               {
+                       uri:     "collections.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken,
+                       header:  nil,
+                       expect:  []string{"dir1/foo", "dir1/bar"},
+                       cutDirs: 2,
+               },
+               {
+                       uri:     "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID,
+                       header:  authHeader,
+                       expect:  []string{"dir1/foo", "dir1/bar"},
+                       cutDirs: 1,
+               },
+               {
+                       uri:      "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/dir1",
+                       header:   authHeader,
+                       redirect: "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/dir1/",
+                       expect:   []string{"foo", "bar"},
+                       cutDirs:  2,
+               },
+               {
+                       uri:     "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/_/dir1/",
+                       header:  authHeader,
+                       expect:  []string{"foo", "bar"},
+                       cutDirs: 3,
+               },
+               {
+                       uri:      arvadostest.FooAndBarFilesInDirUUID + ".example.com/dir1?api_token=" + arvadostest.ActiveToken,
+                       header:   authHeader,
+                       redirect: "/dir1/",
+                       expect:   []string{"foo", "bar"},
+                       cutDirs:  1,
+               },
+               {
+                       uri:    "collections.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/theperthcountyconspiracydoesnotexist/",
+                       header: authHeader,
+                       expect: nil,
+               },
+               {
+                       uri:     "download.example.com/c=" + arvadostest.WazVersion1Collection,
+                       header:  authHeader,
+                       expect:  []string{"waz"},
+                       cutDirs: 1,
+               },
+               {
+                       uri:     "download.example.com/by_id/" + arvadostest.WazVersion1Collection,
+                       header:  authHeader,
+                       expect:  []string{"waz"},
+                       cutDirs: 2,
+               },
+       } {
+               c.Logf("HTML: %q => %q", trial.uri, trial.expect)
+               resp := httptest.NewRecorder()
+               u := mustParseURL("//" + trial.uri)
+               req := &http.Request{
+                       Method:     "GET",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+                       Header:     copyHeader(trial.header),
+               }
+               s.testServer.Handler.ServeHTTP(resp, req)
+               var cookies []*http.Cookie
+               for resp.Code == http.StatusSeeOther {
+                       u, _ := req.URL.Parse(resp.Header().Get("Location"))
+                       req = &http.Request{
+                               Method:     "GET",
+                               Host:       u.Host,
+                               URL:        u,
+                               RequestURI: u.RequestURI(),
+                               Header:     copyHeader(trial.header),
+                       }
+                       cookies = append(cookies, (&http.Response{Header: resp.Header()}).Cookies()...)
+                       for _, c := range cookies {
+                               req.AddCookie(c)
+                       }
+                       resp = httptest.NewRecorder()
+                       s.testServer.Handler.ServeHTTP(resp, req)
+               }
+               if trial.redirect != "" {
+                       c.Check(req.URL.Path, check.Equals, trial.redirect)
+               }
+               if trial.expect == nil {
+                       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+               } else {
+                       c.Check(resp.Code, check.Equals, http.StatusOK)
+                       for _, e := range trial.expect {
+                               c.Check(resp.Body.String(), check.Matches, `(?ms).*href="./`+e+`".*`)
+                       }
+                       c.Check(resp.Body.String(), check.Matches, `(?ms).*--cut-dirs=`+fmt.Sprintf("%d", trial.cutDirs)+` .*`)
+               }
+
+               c.Logf("WebDAV: %q => %q", trial.uri, trial.expect)
+               req = &http.Request{
+                       Method:     "OPTIONS",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+                       Header:     copyHeader(trial.header),
+                       Body:       ioutil.NopCloser(&bytes.Buffer{}),
+               }
+               resp = httptest.NewRecorder()
+               s.testServer.Handler.ServeHTTP(resp, req)
+               if trial.expect == nil {
+                       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+               } else {
+                       c.Check(resp.Code, check.Equals, http.StatusOK)
+               }
+
+               req = &http.Request{
+                       Method:     "PROPFIND",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+                       Header:     copyHeader(trial.header),
+                       Body:       ioutil.NopCloser(&bytes.Buffer{}),
+               }
+               resp = httptest.NewRecorder()
+               s.testServer.Handler.ServeHTTP(resp, req)
+               if trial.expect == nil {
+                       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+               } else {
+                       c.Check(resp.Code, check.Equals, http.StatusMultiStatus)
+                       for _, e := range trial.expect {
+                               c.Check(resp.Body.String(), check.Matches, `(?ms).*<D:href>`+filepath.Join(u.Path, e)+`</D:href>.*`)
+                       }
+               }
+       }
+}
+
+func (s *IntegrationSuite) TestDeleteLastFile(c *check.C) {
+       arv := arvados.NewClientFromEnv()
+       var newCollection arvados.Collection
+       err := arv.RequestAndDecode(&newCollection, "POST", "arvados/v1/collections", arv.UpdateBody(&arvados.Collection{
+               OwnerUUID:    arvadostest.ActiveUserUUID,
+               ManifestText: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt 0:3:bar.txt\n",
+               Name:         "keep-web test collection",
+       }), map[string]bool{"ensure_unique_name": true})
+       c.Assert(err, check.IsNil)
+       defer arv.RequestAndDecode(&newCollection, "DELETE", "arvados/v1/collections/"+newCollection.UUID, nil, nil)
+
+       var updated arvados.Collection
+       for _, fnm := range []string{"foo.txt", "bar.txt"} {
+               s.testServer.Config.AttachmentOnlyHost = "example.com"
+               u, _ := url.Parse("http://example.com/c=" + newCollection.UUID + "/" + fnm)
+               req := &http.Request{
+                       Method:     "DELETE",
+                       Host:       u.Host,
+                       URL:        u,
+                       RequestURI: u.RequestURI(),
+                       Header: http.Header{
+                               "Authorization": {"Bearer " + arvadostest.ActiveToken},
+                       },
+               }
+               resp := httptest.NewRecorder()
+               s.testServer.Handler.ServeHTTP(resp, req)
+               c.Check(resp.Code, check.Equals, http.StatusNoContent)
+
+               updated = arvados.Collection{}
+               err = arv.RequestAndDecode(&updated, "GET", "arvados/v1/collections/"+newCollection.UUID, nil, nil)
+               c.Check(err, check.IsNil)
+               c.Check(updated.ManifestText, check.Not(check.Matches), `(?ms).*\Q`+fnm+`\E.*`)
+               c.Logf("updated manifest_text %q", updated.ManifestText)
+       }
+       c.Check(updated.ManifestText, check.Equals, "")
+}
+
+func (s *IntegrationSuite) TestHealthCheckPing(c *check.C) {
+       s.testServer.Config.ManagementToken = arvadostest.ManagementToken
+       authHeader := http.Header{
+               "Authorization": {"Bearer " + arvadostest.ManagementToken},
+       }
+
+       resp := httptest.NewRecorder()
+       u := mustParseURL("http://download.example.com/_health/ping")
+       req := &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header:     authHeader,
+       }
+       s.testServer.Handler.ServeHTTP(resp, req)
+
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Matches, `{"health":"OK"}\n`)
+}
+
+func copyHeader(h http.Header) http.Header {
+       hc := http.Header{}
+       for k, v := range h {
+               hc[k] = append([]string(nil), v...)
+       }
+       return hc
+}
diff --git a/services/keep-web/keep-web.service b/services/keep-web/keep-web.service
new file mode 100644 (file)
index 0000000..1931256
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Keep web gateway
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keep-web/keep-web.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/keep-web
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/keep-web/main.go b/services/keep-web/main.go
new file mode 100644 (file)
index 0000000..018b5a2
--- /dev/null
@@ -0,0 +1,136 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "github.com/coreos/go-systemd/daemon"
+       log "github.com/sirupsen/logrus"
+)
+
+var (
+       defaultConfigPath = "/etc/arvados/keep-web/keep-web.yml"
+       version           = "dev"
+)
+
+// Config specifies server configuration.
+type Config struct {
+       Client arvados.Client
+
+       Listen string
+
+       AnonymousTokens    []string
+       AttachmentOnlyHost string
+       TrustAllContent    bool
+
+       Cache cache
+
+       // Hack to support old command line flag, which is a bool
+       // meaning "get actual token from environment".
+       deprecatedAllowAnonymous bool
+
+       //Authorization token to be included in all health check requests.
+       ManagementToken string
+}
+
+// DefaultConfig returns the default configuration.
+func DefaultConfig() *Config {
+       return &Config{
+               Listen: ":80",
+               Cache: cache{
+                       TTL:                  arvados.Duration(5 * time.Minute),
+                       UUIDTTL:              arvados.Duration(5 * time.Second),
+                       MaxCollectionEntries: 1000,
+                       MaxCollectionBytes:   100000000,
+                       MaxPermissionEntries: 1000,
+                       MaxUUIDEntries:       1000,
+               },
+       }
+}
+
+func init() {
+       // MakeArvadosClient returns an error if this env var isn't
+       // available as a default token (even if we explicitly set a
+       // different token before doing anything with the client). We
+       // set this dummy value during init so it doesn't clobber the
+       // one used by "run test servers".
+       if os.Getenv("ARVADOS_API_TOKEN") == "" {
+               os.Setenv("ARVADOS_API_TOKEN", "xxx")
+       }
+
+       log.SetFormatter(&log.JSONFormatter{
+               TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+       })
+}
+
+func main() {
+       cfg := DefaultConfig()
+
+       var configPath string
+       deprecated := " (DEPRECATED -- use config file instead)"
+       flag.StringVar(&configPath, "config", defaultConfigPath,
+               "`path` to JSON or YAML configuration file")
+       flag.StringVar(&cfg.Listen, "listen", "",
+               "address:port or :port to listen on"+deprecated)
+       flag.BoolVar(&cfg.deprecatedAllowAnonymous, "allow-anonymous", false,
+               "Load an anonymous token from the ARVADOS_API_TOKEN environment variable"+deprecated)
+       flag.StringVar(&cfg.AttachmentOnlyHost, "attachment-only-host", "",
+               "Only serve attachments at the given `host:port`"+deprecated)
+       flag.BoolVar(&cfg.TrustAllContent, "trust-all-content", false,
+               "Serve non-public content from a single origin. Dangerous: read docs before using!"+deprecated)
+       flag.StringVar(&cfg.ManagementToken, "management-token", "",
+               "Authorization token to be included in all health check requests.")
+
+       dumpConfig := flag.Bool("dump-config", false,
+               "write current configuration to stdout and exit")
+       getVersion := flag.Bool("version", false,
+               "print version information and exit.")
+       flag.Usage = usage
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keep-web %s\n", version)
+               return
+       }
+
+       if err := config.LoadFile(cfg, configPath); err != nil {
+               if h := os.Getenv("ARVADOS_API_HOST"); h != "" && configPath == defaultConfigPath {
+                       log.Printf("DEPRECATED: Using ARVADOS_API_HOST environment variable. Use config file instead.")
+                       cfg.Client.APIHost = h
+               } else {
+                       log.Fatal(err)
+               }
+       }
+       if cfg.deprecatedAllowAnonymous {
+               log.Printf("DEPRECATED: Using -allow-anonymous command line flag with ARVADOS_API_TOKEN environment variable. Use config file instead.")
+               cfg.AnonymousTokens = []string{os.Getenv("ARVADOS_API_TOKEN")}
+       }
+
+       if *dumpConfig {
+               log.Fatal(config.DumpAndExit(cfg))
+       }
+
+       log.Printf("keep-web %s started", version)
+
+       os.Setenv("ARVADOS_API_HOST", cfg.Client.APIHost)
+       srv := &server{Config: cfg}
+       if err := srv.Start(); err != nil {
+               log.Fatal(err)
+       }
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
+       log.Println("Listening at", srv.Addr)
+       if err := srv.Wait(); err != nil {
+               log.Fatal(err)
+       }
+}
diff --git a/services/keep-web/ranges_test.go b/services/keep-web/ranges_test.go
new file mode 100644 (file)
index 0000000..4cef01e
--- /dev/null
@@ -0,0 +1,94 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "net/http"
+       "net/http/httptest"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       check "gopkg.in/check.v1"
+)
+
+func (s *IntegrationSuite) TestRanges(c *check.C) {
+       blocksize := 1000000
+       var uuid string
+       {
+               testdata := make([]byte, blocksize)
+               for i := 0; i < blocksize; i++ {
+                       testdata[i] = byte(' ')
+               }
+               copy(testdata[1:4], []byte("foo"))
+               arv, err := arvadosclient.MakeArvadosClient()
+               c.Assert(err, check.Equals, nil)
+               arv.ApiToken = arvadostest.ActiveToken
+               kc, err := keepclient.MakeKeepClient(arv)
+               c.Assert(err, check.Equals, nil)
+               loc, _, err := kc.PutB(testdata[:])
+               c.Assert(err, check.Equals, nil)
+               loc2, _, err := kc.PutB([]byte{'Z'})
+               c.Assert(err, check.Equals, nil)
+
+               mtext := fmt.Sprintf(". %s %s %s %s %s 1:%d:testdata.bin 0:1:space.txt\n", loc, loc, loc, loc, loc2, blocksize*4)
+               coll := map[string]interface{}{}
+               err = arv.Create("collections",
+                       map[string]interface{}{
+                               "collection": map[string]interface{}{
+                                       "name":          "test data for keep-web TestRanges",
+                                       "manifest_text": mtext,
+                               },
+                       }, &coll)
+               c.Assert(err, check.Equals, nil)
+               uuid = coll["uuid"].(string)
+               defer arv.Delete("collections", uuid, nil, nil)
+       }
+
+       url := mustParseURL("http://" + uuid + ".collections.example.com/testdata.bin")
+       for _, trial := range []struct {
+               header     string
+               expectObey bool
+               expectBody string
+       }{
+               {"0-2", true, "foo"},
+               {"-2", true, " Z"},
+               {"1-4", true, "oo  "},
+               {"z-y", false, ""},
+               {"1000000-1000003", true, "foo "},
+               {"999999-1000003", true, " foo "},
+               {"2000000-2000003", true, "foo "},
+               {"1999999-2000002", true, " foo"},
+               {"3999998-3999999", true, " Z"},
+               {"3999998-4000004", true, " Z"},
+               {"3999998-", true, " Z"},
+       } {
+               c.Logf("trial: %#v", trial)
+               resp := httptest.NewRecorder()
+               req := &http.Request{
+                       Method:     "GET",
+                       URL:        url,
+                       Host:       url.Host,
+                       RequestURI: url.RequestURI(),
+                       Header: http.Header{
+                               "Authorization": {"OAuth2 " + arvadostest.ActiveToken},
+                               "Range":         {"bytes=" + trial.header},
+                       },
+               }
+               s.testServer.Handler.ServeHTTP(resp, req)
+               if trial.expectObey {
+                       c.Check(resp.Code, check.Equals, http.StatusPartialContent)
+                       c.Check(resp.Body.Len(), check.Equals, len(trial.expectBody))
+                       if resp.Body.Len() > 1000 {
+                               c.Check(resp.Body.String()[:1000]+"[...]", check.Equals, trial.expectBody)
+                       } else {
+                               c.Check(resp.Body.String(), check.Equals, trial.expectBody)
+                       }
+               } else {
+                       c.Check(resp.Code, check.Equals, http.StatusRequestedRangeNotSatisfiable)
+               }
+       }
+}
diff --git a/services/keep-web/server.go b/services/keep-web/server.go
new file mode 100644 (file)
index 0000000..f70dd1a
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net/http"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+type server struct {
+       httpserver.Server
+       Config *Config
+}
+
+func (srv *server) Start() error {
+       h := &handler{Config: srv.Config}
+       reg := prometheus.NewRegistry()
+       h.Config.Cache.registry = reg
+       mh := httpserver.Instrument(reg, nil, httpserver.AddRequestIDs(httpserver.LogRequests(nil, h)))
+       h.MetricsAPI = mh.ServeAPI(h.Config.ManagementToken, http.NotFoundHandler())
+       srv.Handler = mh
+       srv.Addr = srv.Config.Listen
+       return srv.Server.Start()
+}
diff --git a/services/keep-web/server_test.go b/services/keep-web/server_test.go
new file mode 100644 (file)
index 0000000..8b689ef
--- /dev/null
@@ -0,0 +1,451 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "os"
+       "os/exec"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       check "gopkg.in/check.v1"
+)
+
+var testAPIHost = os.Getenv("ARVADOS_API_HOST")
+
+var _ = check.Suite(&IntegrationSuite{})
+
+// IntegrationSuite tests need an API server and a keep-web server
+type IntegrationSuite struct {
+       testServer *server
+}
+
+func (s *IntegrationSuite) TestNoToken(c *check.C) {
+       for _, token := range []string{
+               "",
+               "bogustoken",
+       } {
+               hdr, body, _ := s.runCurl(c, token, "collections.example.com", "/collections/"+arvadostest.FooCollection+"/foo")
+               c.Check(hdr, check.Matches, `(?s)HTTP/1.1 404 Not Found\r\n.*`)
+               c.Check(body, check.Equals, "")
+
+               if token != "" {
+                       hdr, body, _ = s.runCurl(c, token, "collections.example.com", "/collections/download/"+arvadostest.FooCollection+"/"+token+"/foo")
+                       c.Check(hdr, check.Matches, `(?s)HTTP/1.1 404 Not Found\r\n.*`)
+                       c.Check(body, check.Equals, "")
+               }
+
+               hdr, body, _ = s.runCurl(c, token, "collections.example.com", "/bad-route")
+               c.Check(hdr, check.Matches, `(?s)HTTP/1.1 404 Not Found\r\n.*`)
+               c.Check(body, check.Equals, "")
+       }
+}
+
+// TODO: Move most cases to functional tests -- at least use Go's own
+// http client instead of forking curl. Just leave enough of an
+// integration test to assure that the documented way of invoking curl
+// really works against the server.
+func (s *IntegrationSuite) Test404(c *check.C) {
+       for _, uri := range []string{
+               // Routing errors (always 404 regardless of what's stored in Keep)
+               "/foo",
+               "/download",
+               "/collections",
+               "/collections/",
+               // Implicit/generated index is not implemented yet;
+               // until then, return 404.
+               "/collections/" + arvadostest.FooCollection,
+               "/collections/" + arvadostest.FooCollection + "/",
+               "/collections/" + arvadostest.FooBarDirCollection + "/dir1",
+               "/collections/" + arvadostest.FooBarDirCollection + "/dir1/",
+               // Non-existent file in collection
+               "/collections/" + arvadostest.FooCollection + "/theperthcountyconspiracy",
+               "/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/theperthcountyconspiracy",
+               // Non-existent collection
+               "/collections/" + arvadostest.NonexistentCollection,
+               "/collections/" + arvadostest.NonexistentCollection + "/",
+               "/collections/" + arvadostest.NonexistentCollection + "/theperthcountyconspiracy",
+               "/collections/download/" + arvadostest.NonexistentCollection + "/" + arvadostest.ActiveToken + "/theperthcountyconspiracy",
+       } {
+               hdr, body, _ := s.runCurl(c, arvadostest.ActiveToken, "collections.example.com", uri)
+               c.Check(hdr, check.Matches, "(?s)HTTP/1.1 404 Not Found\r\n.*")
+               if len(body) > 0 {
+                       c.Check(body, check.Equals, "404 page not found\n")
+               }
+       }
+}
+
+func (s *IntegrationSuite) Test1GBFile(c *check.C) {
+       if testing.Short() {
+               c.Skip("skipping 1GB integration test in short mode")
+       }
+       s.test100BlockFile(c, 10000000)
+}
+
+func (s *IntegrationSuite) Test100BlockFile(c *check.C) {
+       if testing.Short() {
+               // 3 MB
+               s.test100BlockFile(c, 30000)
+       } else {
+               // 300 MB
+               s.test100BlockFile(c, 3000000)
+       }
+}
+
+func (s *IntegrationSuite) test100BlockFile(c *check.C, blocksize int) {
+       testdata := make([]byte, blocksize)
+       for i := 0; i < blocksize; i++ {
+               testdata[i] = byte(' ')
+       }
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.Equals, nil)
+       arv.ApiToken = arvadostest.ActiveToken
+       kc, err := keepclient.MakeKeepClient(arv)
+       c.Assert(err, check.Equals, nil)
+       loc, _, err := kc.PutB(testdata[:])
+       c.Assert(err, check.Equals, nil)
+       mtext := "."
+       for i := 0; i < 100; i++ {
+               mtext = mtext + " " + loc
+       }
+       mtext = mtext + fmt.Sprintf(" 0:%d00:testdata.bin\n", blocksize)
+       coll := map[string]interface{}{}
+       err = arv.Create("collections",
+               map[string]interface{}{
+                       "collection": map[string]interface{}{
+                               "name":          fmt.Sprintf("testdata blocksize=%d", blocksize),
+                               "manifest_text": mtext,
+                       },
+               }, &coll)
+       c.Assert(err, check.Equals, nil)
+       uuid := coll["uuid"].(string)
+
+       hdr, body, size := s.runCurl(c, arv.ApiToken, uuid+".collections.example.com", "/testdata.bin")
+       c.Check(hdr, check.Matches, `(?s)HTTP/1.1 200 OK\r\n.*`)
+       c.Check(hdr, check.Matches, `(?si).*Content-length: `+fmt.Sprintf("%d00", blocksize)+`\r\n.*`)
+       c.Check([]byte(body)[:1234], check.DeepEquals, testdata[:1234])
+       c.Check(size, check.Equals, int64(blocksize)*100)
+}
+
+type curlCase struct {
+       auth    string
+       host    string
+       path    string
+       dataMD5 string
+}
+
+func (s *IntegrationSuite) Test200(c *check.C) {
+       s.testServer.Config.AnonymousTokens = []string{arvadostest.AnonymousToken}
+       for _, spec := range []curlCase{
+               // My collection
+               {
+                       auth:    arvadostest.ActiveToken,
+                       host:    arvadostest.FooCollection + "--collections.example.com",
+                       path:    "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       auth:    arvadostest.ActiveToken,
+                       host:    arvadostest.FooCollection + ".collections.example.com",
+                       path:    "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       host:    strings.Replace(arvadostest.FooPdh, "+", "-", 1) + ".collections.example.com",
+                       path:    "/t=" + arvadostest.ActiveToken + "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       path:    "/c=" + arvadostest.FooPdh + "/t=" + arvadostest.ActiveToken + "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       path:    "/c=" + strings.Replace(arvadostest.FooPdh, "+", "-", 1) + "/t=" + arvadostest.ActiveToken + "/_/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       path:    "/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       auth:    "tokensobogus",
+                       path:    "/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       auth:    arvadostest.ActiveToken,
+                       path:    "/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+               {
+                       auth:    arvadostest.AnonymousToken,
+                       path:    "/collections/download/" + arvadostest.FooCollection + "/" + arvadostest.ActiveToken + "/foo",
+                       dataMD5: "acbd18db4cc2f85cedef654fccc4a4d8",
+               },
+
+               // Anonymously accessible data
+               {
+                       path:    "/c=" + arvadostest.HelloWorldCollection + "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       host:    arvadostest.HelloWorldCollection + ".collections.example.com",
+                       path:    "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       host:    arvadostest.HelloWorldCollection + ".collections.example.com",
+                       path:    "/_/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       path:    "/collections/" + arvadostest.HelloWorldCollection + "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       auth:    arvadostest.ActiveToken,
+                       path:    "/collections/" + arvadostest.HelloWorldCollection + "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       auth:    arvadostest.SpectatorToken,
+                       path:    "/collections/" + arvadostest.HelloWorldCollection + "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       auth:    arvadostest.SpectatorToken,
+                       host:    arvadostest.HelloWorldCollection + "--collections.example.com",
+                       path:    "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+               {
+                       auth:    arvadostest.SpectatorToken,
+                       path:    "/collections/download/" + arvadostest.HelloWorldCollection + "/" + arvadostest.SpectatorToken + "/Hello%20world.txt",
+                       dataMD5: "f0ef7081e1539ac00ef5b761b4fb01b3",
+               },
+       } {
+               host := spec.host
+               if host == "" {
+                       host = "collections.example.com"
+               }
+               hdr, body, _ := s.runCurl(c, spec.auth, host, spec.path)
+               c.Check(hdr, check.Matches, `(?s)HTTP/1.1 200 OK\r\n.*`)
+               if strings.HasSuffix(spec.path, ".txt") {
+                       c.Check(hdr, check.Matches, `(?s).*\r\nContent-Type: text/plain.*`)
+                       // TODO: Check some types that aren't
+                       // automatically detected by Go's http server
+                       // by sniffing the content.
+               }
+               c.Check(fmt.Sprintf("%x", md5.Sum([]byte(body))), check.Equals, spec.dataMD5)
+       }
+}
+
+// Return header block and body.
+func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...string) (hdr, bodyPart string, bodySize int64) {
+       curlArgs := []string{"--silent", "--show-error", "--include"}
+       testHost, testPort, _ := net.SplitHostPort(s.testServer.Addr)
+       curlArgs = append(curlArgs, "--resolve", host+":"+testPort+":"+testHost)
+       if token != "" {
+               curlArgs = append(curlArgs, "-H", "Authorization: OAuth2 "+token)
+       }
+       curlArgs = append(curlArgs, args...)
+       curlArgs = append(curlArgs, "http://"+host+":"+testPort+uri)
+       c.Log(fmt.Sprintf("curlArgs == %#v", curlArgs))
+       cmd := exec.Command("curl", curlArgs...)
+       stdout, err := cmd.StdoutPipe()
+       c.Assert(err, check.Equals, nil)
+       cmd.Stderr = cmd.Stdout
+       go cmd.Start()
+       buf := make([]byte, 2<<27)
+       n, err := io.ReadFull(stdout, buf)
+       // Discard (but measure size of) anything past 128 MiB.
+       var discarded int64
+       if err == io.ErrUnexpectedEOF {
+               buf = buf[:n]
+       } else {
+               c.Assert(err, check.Equals, nil)
+               discarded, err = io.Copy(ioutil.Discard, stdout)
+               c.Assert(err, check.Equals, nil)
+       }
+       err = cmd.Wait()
+       // Without "-f", curl exits 0 as long as it gets a valid HTTP
+       // response from the server, even if the response status
+       // indicates that the request failed. In our test suite, we
+       // always expect a valid HTTP response, and we parse the
+       // headers ourselves. If curl exits non-zero, our testing
+       // environment is broken.
+       c.Assert(err, check.Equals, nil)
+       hdrsAndBody := strings.SplitN(string(buf), "\r\n\r\n", 2)
+       c.Assert(len(hdrsAndBody), check.Equals, 2)
+       hdr = hdrsAndBody[0]
+       bodyPart = hdrsAndBody[1]
+       bodySize = int64(len(bodyPart)) + discarded
+       return
+}
+
+func (s *IntegrationSuite) TestMetrics(c *check.C) {
+       origin := "http://" + s.testServer.Addr
+       req, _ := http.NewRequest("GET", origin+"/notfound", nil)
+       _, err := http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       req, _ = http.NewRequest("GET", origin+"/by_id/", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp, err := http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       for i := 0; i < 2; i++ {
+               req, _ = http.NewRequest("GET", origin+"/foo", nil)
+               req.Host = arvadostest.FooCollection + ".example.com"
+               req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+               resp, err = http.DefaultClient.Do(req)
+               c.Assert(err, check.IsNil)
+               c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+               buf, _ := ioutil.ReadAll(resp.Body)
+               c.Check(buf, check.DeepEquals, []byte("foo"))
+               resp.Body.Close()
+       }
+
+       s.testServer.Config.Cache.updateGauges()
+
+       req, _ = http.NewRequest("GET", origin+"/metrics.json", nil)
+       resp, err = http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
+
+       req, _ = http.NewRequest("GET", origin+"/metrics.json", nil)
+       req.Header.Set("Authorization", "Bearer badtoken")
+       resp, err = http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusForbidden)
+
+       req, _ = http.NewRequest("GET", origin+"/metrics.json", nil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+       resp, err = http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       type summary struct {
+               SampleCount string  `json:"sample_count"`
+               SampleSum   float64 `json:"sample_sum"`
+               Quantile    []struct {
+                       Quantile float64
+                       Value    float64
+               }
+       }
+       type counter struct {
+               Value int64
+       }
+       type gauge struct {
+               Value float64
+       }
+       var ents []struct {
+               Name   string
+               Help   string
+               Type   string
+               Metric []struct {
+                       Label []struct {
+                               Name  string
+                               Value string
+                       }
+                       Counter counter
+                       Gauge   gauge
+                       Summary summary
+               }
+       }
+       json.NewDecoder(resp.Body).Decode(&ents)
+       summaries := map[string]summary{}
+       gauges := map[string]gauge{}
+       counters := map[string]counter{}
+       for _, e := range ents {
+               for _, m := range e.Metric {
+                       labels := map[string]string{}
+                       for _, lbl := range m.Label {
+                               labels[lbl.Name] = lbl.Value
+                       }
+                       summaries[e.Name+"/"+labels["method"]+"/"+labels["code"]] = m.Summary
+                       counters[e.Name+"/"+labels["method"]+"/"+labels["code"]] = m.Counter
+                       gauges[e.Name+"/"+labels["method"]+"/"+labels["code"]] = m.Gauge
+               }
+       }
+       c.Check(summaries["request_duration_seconds/get/200"].SampleSum, check.Not(check.Equals), 0)
+       c.Check(summaries["request_duration_seconds/get/200"].SampleCount, check.Equals, "3")
+       c.Check(summaries["request_duration_seconds/get/404"].SampleCount, check.Equals, "1")
+       c.Check(summaries["time_to_status_seconds/get/404"].SampleCount, check.Equals, "1")
+       c.Check(counters["arvados_keepweb_collectioncache_requests//"].Value, check.Equals, int64(2))
+       c.Check(counters["arvados_keepweb_collectioncache_api_calls//"].Value, check.Equals, int64(1))
+       c.Check(counters["arvados_keepweb_collectioncache_hits//"].Value, check.Equals, int64(1))
+       c.Check(counters["arvados_keepweb_collectioncache_pdh_hits//"].Value, check.Equals, int64(1))
+       c.Check(counters["arvados_keepweb_collectioncache_permission_hits//"].Value, check.Equals, int64(1))
+       c.Check(gauges["arvados_keepweb_collectioncache_cached_manifests//"].Value, check.Equals, float64(1))
+       // FooCollection's cached manifest size is 45 ("1f4b0....+45") plus one 51-byte blob signature
+       c.Check(gauges["arvados_keepweb_collectioncache_cached_manifest_bytes//"].Value, check.Equals, float64(45+51))
+
+       // If the Host header indicates a collection, /metrics.json
+       // refers to a file in the collection -- the metrics handler
+       // must not intercept that route.
+       req, _ = http.NewRequest("GET", origin+"/metrics.json", nil)
+       req.Host = strings.Replace(arvadostest.FooCollectionPDH, "+", "-", -1) + ".example.com"
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+       resp, err = http.DefaultClient.Do(req)
+       c.Assert(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+}
+
+func (s *IntegrationSuite) SetUpSuite(c *check.C) {
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(2, true)
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, check.Equals, nil)
+       arv.ApiToken = arvadostest.ActiveToken
+       kc, err := keepclient.MakeKeepClient(arv)
+       c.Assert(err, check.Equals, nil)
+       kc.PutB([]byte("Hello world\n"))
+       kc.PutB([]byte("foo"))
+       kc.PutB([]byte("foobar"))
+       kc.PutB([]byte("waz"))
+}
+
+func (s *IntegrationSuite) TearDownSuite(c *check.C) {
+       arvadostest.StopKeep(2)
+       arvadostest.StopAPI()
+}
+
+func (s *IntegrationSuite) SetUpTest(c *check.C) {
+       arvadostest.ResetEnv()
+       cfg := DefaultConfig()
+       cfg.Client = arvados.Client{
+               APIHost:  testAPIHost,
+               Insecure: true,
+       }
+       cfg.Listen = "127.0.0.1:0"
+       cfg.ManagementToken = arvadostest.ManagementToken
+       s.testServer = &server{Config: cfg}
+       err := s.testServer.Start()
+       c.Assert(err, check.Equals, nil)
+}
+
+func (s *IntegrationSuite) TearDownTest(c *check.C) {
+       var err error
+       if s.testServer != nil {
+               err = s.testServer.Close()
+       }
+       c.Check(err, check.Equals, nil)
+}
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/services/keep-web/status_test.go b/services/keep-web/status_test.go
new file mode 100644 (file)
index 0000000..62db198
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "gopkg.in/check.v1"
+)
+
+func (s *UnitSuite) TestStatus(c *check.C) {
+       h := handler{Config: DefaultConfig()}
+       u, _ := url.Parse("http://keep-web.example/status.json")
+       req := &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+       }
+       resp := httptest.NewRecorder()
+       h.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+
+       var status map[string]interface{}
+       err := json.NewDecoder(resp.Body).Decode(&status)
+       c.Check(err, check.IsNil)
+       c.Check(status["Version"], check.Not(check.Equals), "")
+}
+
+func (s *IntegrationSuite) TestNoStatusFromVHost(c *check.C) {
+       u, _ := url.Parse("http://" + arvadostest.FooCollection + "--keep-web.example/status.json")
+       req := &http.Request{
+               Method:     "GET",
+               Host:       u.Host,
+               URL:        u,
+               RequestURI: u.RequestURI(),
+               Header: http.Header{
+                       "Authorization": {"OAuth2 " + arvadostest.ActiveToken},
+               },
+       }
+       resp := httptest.NewRecorder()
+       s.testServer.Handler.ServeHTTP(resp, req)
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+}
diff --git a/services/keep-web/usage.go b/services/keep-web/usage.go
new file mode 100644 (file)
index 0000000..705955b
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "os"
+)
+
+func usage() {
+       c := DefaultConfig()
+       c.AnonymousTokens = []string{"xxxxxxxxxxxxxxxxxxxxxxx"}
+       c.Client.APIHost = "zzzzz.arvadosapi.com:443"
+       exampleConfigFile, err := json.MarshalIndent(c, "    ", "  ")
+       if err != nil {
+               panic(err)
+       }
+       fmt.Fprintf(os.Stderr, `
+
+Keep-web provides read-only HTTP access to files stored in Keep; see
+https://godoc.org/github.com/curoverse/arvados/services/keep-web and
+http://doc.arvados.org/install/install-keep-web.html
+
+Usage: keep-web -config path/to/keep-web.yml
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+    %s
+
+Client.APIHost:
+
+    Address (or address:port) of the Arvados API endpoint.
+
+Client.AuthToken:
+
+    Unused. Normally empty, or omitted entirely.
+
+Client.Insecure:
+
+    True if your Arvados API endpoint uses an unverifiable SSL/TLS
+    certificate.
+
+Listen:
+
+    Local port to listen on. Can be "address", "address:port", or
+    ":port", where "address" is a host IP address or name and "port"
+    is a port number or name.
+
+AnonymousTokens:
+
+    Array of tokens to try when a client does not provide a token.
+
+AttachmentOnlyHost:
+
+    Accept credentials, and add "Content-Disposition: attachment"
+    response headers, for requests at this hostname:port.
+
+    This prohibits inline display, which makes it possible to serve
+    untrusted and non-public content from a single origin, i.e.,
+    without wildcard DNS or SSL.
+
+TrustAllContent:
+
+    Serve non-public content from a single origin. Dangerous: read
+    docs before using!
+
+Cache.TTL:
+
+    Maximum time to cache manifests and permission checks.
+
+Cache.UUIDTTL:
+
+    Maximum time to cache collection state.
+
+Cache.MaxCollectionEntries:
+
+    Maximum number of collection cache entries.
+
+Cache.MaxCollectionBytes:
+
+    Approximate memory limit for collection cache.
+
+Cache.MaxPermissionEntries:
+
+    Maximum number of permission cache entries.
+
+Cache.MaxUUIDEntries:
+
+    Maximum number of UUID cache entries.
+
+`, exampleConfigFile)
+}
diff --git a/services/keep-web/webdav.go b/services/keep-web/webdav.go
new file mode 100644 (file)
index 0000000..f9b7538
--- /dev/null
@@ -0,0 +1,198 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/rand"
+       "errors"
+       "fmt"
+       "io"
+       prand "math/rand"
+       "os"
+       "path"
+       "strings"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+
+       "golang.org/x/net/context"
+       "golang.org/x/net/webdav"
+)
+
+var (
+       lockPrefix     string = uuid()
+       nextLockSuffix int64  = prand.Int63()
+       errReadOnly           = errors.New("read-only filesystem")
+)
+
+// webdavFS implements a webdav.FileSystem by wrapping an
+// arvados.CollectionFilesystem.
+//
+// Collections don't preserve empty directories, so Mkdir is
+// effectively a no-op, and we need to make parent dirs spring into
+// existence automatically so sequences like "mkcol foo; put foo/bar"
+// work as expected.
+type webdavFS struct {
+       collfs  arvados.FileSystem
+       writing bool
+       // webdav PROPFIND reads the first few bytes of each file
+       // whose filename extension isn't recognized, which is
+       // prohibitively expensive: we end up fetching multiple 64MiB
+       // blocks. Avoid this by returning EOF on all reads when
+       // handling a PROPFIND.
+       alwaysReadEOF bool
+}
+
+func (fs *webdavFS) makeparents(name string) {
+       if !fs.writing {
+               return
+       }
+       dir, _ := path.Split(name)
+       if dir == "" || dir == "/" {
+               return
+       }
+       dir = dir[:len(dir)-1]
+       fs.makeparents(dir)
+       fs.collfs.Mkdir(dir, 0755)
+}
+
+func (fs *webdavFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+       if !fs.writing {
+               return errReadOnly
+       }
+       name = strings.TrimRight(name, "/")
+       fs.makeparents(name)
+       return fs.collfs.Mkdir(name, 0755)
+}
+
+func (fs *webdavFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (f webdav.File, err error) {
+       writing := flag&(os.O_WRONLY|os.O_RDWR|os.O_TRUNC) != 0
+       if writing {
+               fs.makeparents(name)
+       }
+       f, err = fs.collfs.OpenFile(name, flag, perm)
+       if !fs.writing {
+               // webdav module returns 404 on all OpenFile errors,
+               // but returns 405 Method Not Allowed if OpenFile()
+               // succeeds but Write() or Close() fails. We'd rather
+               // have 405. writeFailer ensures Close() fails if the
+               // file is opened for writing *or* Write() is called.
+               var err error
+               if writing {
+                       err = errReadOnly
+               }
+               f = writeFailer{File: f, err: err}
+       }
+       if fs.alwaysReadEOF {
+               f = readEOF{File: f}
+       }
+       return
+}
+
+func (fs *webdavFS) RemoveAll(ctx context.Context, name string) error {
+       return fs.collfs.RemoveAll(name)
+}
+
+func (fs *webdavFS) Rename(ctx context.Context, oldName, newName string) error {
+       if !fs.writing {
+               return errReadOnly
+       }
+       if strings.HasSuffix(oldName, "/") {
+               // WebDAV "MOVE foo/ bar/" means rename foo to bar.
+               oldName = oldName[:len(oldName)-1]
+               newName = strings.TrimSuffix(newName, "/")
+       }
+       fs.makeparents(newName)
+       return fs.collfs.Rename(oldName, newName)
+}
+
+func (fs *webdavFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+       if fs.writing {
+               fs.makeparents(name)
+       }
+       return fs.collfs.Stat(name)
+}
+
+type writeFailer struct {
+       webdav.File
+       err error
+}
+
+func (wf writeFailer) Write([]byte) (int, error) {
+       wf.err = errReadOnly
+       return 0, wf.err
+}
+
+func (wf writeFailer) Close() error {
+       err := wf.File.Close()
+       if err != nil {
+               wf.err = err
+       }
+       return wf.err
+}
+
+type readEOF struct {
+       webdav.File
+}
+
+func (readEOF) Read(p []byte) (int, error) {
+       return 0, io.EOF
+}
+
+// noLockSystem implements webdav.LockSystem by returning success for
+// every possible locking operation, even though it has no side
+// effects such as actually locking anything. This works for a
+// read-only webdav filesystem because webdav locks only apply to
+// writes.
+//
+// This is more suitable than webdav.NewMemLS() for two reasons:
+// First, it allows keep-web to use one locker for all collections
+// even though coll1.vhost/foo and coll2.vhost/foo have the same path
+// but represent different resources. Additionally, it returns valid
+// tokens (rfc2518 specifies that tokens are represented as URIs and
+// are unique across all resources for all time), which might improve
+// client compatibility.
+//
+// However, it does also permit impossible operations, like acquiring
+// conflicting locks and releasing non-existent locks.  This might
+// confuse some clients if they try to probe for correctness.
+//
+// Currently this is a moot point: the LOCK and UNLOCK methods are not
+// accepted by keep-web, so it suffices to implement the
+// webdav.LockSystem interface.
+type noLockSystem struct{}
+
+func (*noLockSystem) Confirm(time.Time, string, string, ...webdav.Condition) (func(), error) {
+       return noop, nil
+}
+
+func (*noLockSystem) Create(now time.Time, details webdav.LockDetails) (token string, err error) {
+       return fmt.Sprintf("opaquelocktoken:%s-%x", lockPrefix, atomic.AddInt64(&nextLockSuffix, 1)), nil
+}
+
+func (*noLockSystem) Refresh(now time.Time, token string, duration time.Duration) (webdav.LockDetails, error) {
+       return webdav.LockDetails{}, nil
+}
+
+func (*noLockSystem) Unlock(now time.Time, token string) error {
+       return nil
+}
+
+func noop() {}
+
+// Return a version 1 variant 4 UUID, meaning all bits are random
+// except the ones indicating the version and variant.
+func uuid() string {
+       var data [16]byte
+       if _, err := rand.Read(data[:]); err != nil {
+               panic(err)
+       }
+       // variant 1: N=10xx
+       data[8] = data[8]&0x3f | 0x80
+       // version 4: M=0100
+       data[6] = data[6]&0x0f | 0x40
+       return fmt.Sprintf("%x-%x-%x-%x-%x", data[0:4], data[4:6], data[6:8], data[8:10], data[10:])
+}
diff --git a/services/keep-web/webdav_test.go b/services/keep-web/webdav_test.go
new file mode 100644 (file)
index 0000000..473171e
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import "golang.org/x/net/webdav"
+
+var _ webdav.FileSystem = &webdavFS{}
diff --git a/services/keep/tools/traffic_test.py b/services/keep/tools/traffic_test.py
new file mode 100755 (executable)
index 0000000..cd50a52
--- /dev/null
@@ -0,0 +1,129 @@
+#! /usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# traffic_test.py
+#
+# Launch a test Keep and API server and PUT and GET a bunch of blocks.
+# Can be used to simulate client traffic in Keep to evaluate memory usage,
+# error logging, performance, etc.
+#
+# This script is warty and is relatively environment-specific, but the
+# example run described below should execute cleanly.
+#
+# Usage:
+#   traffic_test.py start
+#       Starts the test servers.
+#   traffic_test.py put file1 file2 file3 ....
+#       Runs arv-put on each file.
+#   traffic_test.py get hash1 hash2 hash3 ....
+#       Loops forever issuing GET requests for specified blocks.
+#   traffic_test.py stop
+#       Stops the test servers.
+#
+# Example:
+#
+#   $ ./traffic_test.py start
+#   $ ./traffic_test.py put GS00253-DNA_A02_200_37.tsv.bz2 \
+#         GS00253-DNA_B01_200_37.tsv.bz2 \
+#         GS00253-DNA_B02_200_37.tsv.bz2
+#   $ ./traffic_test.py get $(find /tmp/tmp* -type f -printf "%f ")
+#     [loops forever]
+#     ^C
+#   $ ./traffic_test.py stop
+#
+# Multiple "get" runs may be run concurrently to evaluate Keep's handling
+# of additional concurrent clients.
+
+PYSDK_DIR    = "../../../sdk/python"
+PYTEST_DIR   = PYSDK_DIR + "/tests"
+ARV_PUT_PATH = PYSDK_DIR + "/bin/arv-put"
+ARV_GET_PATH = PYSDK_DIR + "/bin/arv-get"
+SECONDS_BETWEEN_GETS = 1
+
+import argparse
+import httplib2
+import os
+import random
+import subprocess
+import sys
+import time
+
+# for run_test_server.py
+sys.path.insert(0, PYSDK_DIR)
+sys.path.insert(0, PYTEST_DIR)
+import arvados
+import run_test_server
+
+def arv_cmd(*args):
+    p = subprocess.Popen([sys.executable] + list(args),
+                         stdout=subprocess.PIPE)
+    (arvout, arverr) = p.communicate()
+    if p.returncode != 0:
+        print "error {} from {} {}: {}".format(
+            p.returncode, sys.executable, args, arverr)
+        sys.exit(p.returncode)
+    return arvout
+
+def start():
+    run_test_server.run()
+    run_test_server.run_keep()
+
+def put(files):
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+    run_test_server.authorize_with('active')
+    for v in ["ARVADOS_API_HOST",
+              "ARVADOS_API_HOST_INSECURE",
+              "ARVADOS_API_TOKEN"]:
+        os.environ[v] = arvados.config.settings()[v]
+
+    if not os.environ.has_key('PYTHONPATH'):
+        os.environ['PYTHONPATH'] = ''
+    os.environ['PYTHONPATH'] = "{}:{}:{}".format(
+        PYSDK_DIR, PYTEST_DIR, os.environ['PYTHONPATH'])
+
+    for c in files:
+        manifest_uuid = arv_cmd(ARV_PUT_PATH, c)
+
+def get(blocks):
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+
+    run_test_server.authorize_with('active')
+    for v in ["ARVADOS_API_HOST",
+              "ARVADOS_API_HOST_INSECURE",
+              "ARVADOS_API_TOKEN"]:
+        os.environ[v] = arvados.config.settings()[v]
+
+    nqueries = 0
+    while True:
+        b = random.choice(blocks)
+        print "GET /" + b
+        body = arv_cmd(ARV_GET_PATH, b)
+        print "got {} bytes".format(len(body))
+        time.sleep(SECONDS_BETWEEN_GETS)
+        nqueries = nqueries + 1
+
+def stop():
+    run_test_server.stop_keep()
+    run_test_server.stop()
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('action',
+                        type=str,
+                        nargs='+',
+                        help='''"start", "put", "get", "stop"''')
+    args = parser.parse_args()
+
+    if args.action[0] == 'start':
+        start()
+    elif args.action[0] == 'put':
+        put(args.action[1:])
+    elif args.action[0] == 'get':
+        get(args.action[1:])
+    elif args.action[0] == 'stop':
+        stop()
+    else:
+        print('Unrecognized action "{}"'.format(args.action))
+        print('actions are "start", "put", "get", "stop"')
diff --git a/services/keepproxy/.gitignore b/services/keepproxy/.gitignore
new file mode 100644 (file)
index 0000000..a4c8ad9
--- /dev/null
@@ -0,0 +1 @@
+keepproxy
diff --git a/services/keepproxy/keepproxy.go b/services/keepproxy/keepproxy.go
new file mode 100644 (file)
index 0000000..fc4783e
--- /dev/null
@@ -0,0 +1,651 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "errors"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "os"
+       "os/signal"
+       "regexp"
+       "strings"
+       "sync"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/coreos/go-systemd/daemon"
+       "github.com/ghodss/yaml"
+       "github.com/gorilla/mux"
+       log "github.com/sirupsen/logrus"
+)
+
+var version = "dev"
+
+type Config struct {
+       Client          arvados.Client
+       Listen          string
+       DisableGet      bool
+       DisablePut      bool
+       DefaultReplicas int
+       Timeout         arvados.Duration
+       PIDFile         string
+       Debug           bool
+       ManagementToken string
+}
+
+func DefaultConfig() *Config {
+       return &Config{
+               Listen:  ":25107",
+               Timeout: arvados.Duration(15 * time.Second),
+       }
+}
+
+var (
+       listener net.Listener
+       router   http.Handler
+)
+
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+func main() {
+       log.SetFormatter(&log.JSONFormatter{
+               TimestampFormat: rfc3339NanoFixed,
+       })
+
+       cfg := DefaultConfig()
+
+       flagset := flag.NewFlagSet("keepproxy", flag.ExitOnError)
+       flagset.Usage = usage
+
+       const deprecated = " (DEPRECATED -- use config file instead)"
+       flagset.StringVar(&cfg.Listen, "listen", cfg.Listen, "Local port to listen on."+deprecated)
+       flagset.BoolVar(&cfg.DisableGet, "no-get", cfg.DisableGet, "Disable GET operations."+deprecated)
+       flagset.BoolVar(&cfg.DisablePut, "no-put", cfg.DisablePut, "Disable PUT operations."+deprecated)
+       flagset.IntVar(&cfg.DefaultReplicas, "default-replicas", cfg.DefaultReplicas, "Default number of replicas to write if not specified by the client. If 0, use site default."+deprecated)
+       flagset.StringVar(&cfg.PIDFile, "pid", cfg.PIDFile, "Path to write pid file."+deprecated)
+       timeoutSeconds := flagset.Int("timeout", int(time.Duration(cfg.Timeout)/time.Second), "Timeout (in seconds) on requests to internal Keep services."+deprecated)
+       flagset.StringVar(&cfg.ManagementToken, "management-token", cfg.ManagementToken, "Authorization token to be included in all health check requests.")
+
+       var cfgPath string
+       const defaultCfgPath = "/etc/arvados/keepproxy/keepproxy.yml"
+       flagset.StringVar(&cfgPath, "config", defaultCfgPath, "Configuration file `path`")
+       dumpConfig := flagset.Bool("dump-config", false, "write current configuration to stdout and exit")
+       getVersion := flagset.Bool("version", false, "Print version information and exit.")
+       flagset.Parse(os.Args[1:])
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keepproxy %s\n", version)
+               return
+       }
+
+       err := config.LoadFile(cfg, cfgPath)
+       if err != nil {
+               h := os.Getenv("ARVADOS_API_HOST")
+               t := os.Getenv("ARVADOS_API_TOKEN")
+               if h == "" || t == "" || !os.IsNotExist(err) || cfgPath != defaultCfgPath {
+                       log.Fatal(err)
+               }
+               log.Print("DEPRECATED: No config file found, but ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are set. Please use a config file instead.")
+               cfg.Client.APIHost = h
+               cfg.Client.AuthToken = t
+               if regexp.MustCompile("^(?i:1|yes|true)$").MatchString(os.Getenv("ARVADOS_API_HOST_INSECURE")) {
+                       cfg.Client.Insecure = true
+               }
+               if y, err := yaml.Marshal(cfg); err == nil && !*dumpConfig {
+                       log.Print("Current configuration:\n", string(y))
+               }
+               cfg.Timeout = arvados.Duration(time.Duration(*timeoutSeconds) * time.Second)
+       }
+
+       if *dumpConfig {
+               log.Fatal(config.DumpAndExit(cfg))
+       }
+
+       log.Printf("keepproxy %s started", version)
+
+       arv, err := arvadosclient.New(&cfg.Client)
+       if err != nil {
+               log.Fatalf("Error setting up arvados client %s", err.Error())
+       }
+
+       if cfg.Debug {
+               keepclient.DebugPrintf = log.Printf
+       }
+       kc, err := keepclient.MakeKeepClient(arv)
+       if err != nil {
+               log.Fatalf("Error setting up keep client %s", err.Error())
+       }
+       keepclient.RefreshServiceDiscoveryOnSIGHUP()
+
+       if cfg.PIDFile != "" {
+               f, err := os.Create(cfg.PIDFile)
+               if err != nil {
+                       log.Fatal(err)
+               }
+               defer f.Close()
+               err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+               if err != nil {
+                       log.Fatalf("flock(%s): %s", cfg.PIDFile, err)
+               }
+               defer os.Remove(cfg.PIDFile)
+               err = f.Truncate(0)
+               if err != nil {
+                       log.Fatalf("truncate(%s): %s", cfg.PIDFile, err)
+               }
+               _, err = fmt.Fprint(f, os.Getpid())
+               if err != nil {
+                       log.Fatalf("write(%s): %s", cfg.PIDFile, err)
+               }
+               err = f.Sync()
+               if err != nil {
+                       log.Fatal("sync(%s): %s", cfg.PIDFile, err)
+               }
+       }
+
+       if cfg.DefaultReplicas > 0 {
+               kc.Want_replicas = cfg.DefaultReplicas
+       }
+
+       listener, err = net.Listen("tcp", cfg.Listen)
+       if err != nil {
+               log.Fatalf("listen(%s): %s", cfg.Listen, err)
+       }
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
+       log.Println("Listening at", listener.Addr())
+
+       // Shut down the server gracefully (by closing the listener)
+       // if SIGTERM is received.
+       term := make(chan os.Signal, 1)
+       go func(sig <-chan os.Signal) {
+               s := <-sig
+               log.Println("caught signal:", s)
+               listener.Close()
+       }(term)
+       signal.Notify(term, syscall.SIGTERM)
+       signal.Notify(term, syscall.SIGINT)
+
+       // Start serving requests.
+       router = MakeRESTRouter(!cfg.DisableGet, !cfg.DisablePut, kc, time.Duration(cfg.Timeout), cfg.ManagementToken)
+       http.Serve(listener, httpserver.AddRequestIDs(httpserver.LogRequests(nil, router)))
+
+       log.Println("shutting down")
+}
+
+type ApiTokenCache struct {
+       tokens     map[string]int64
+       lock       sync.Mutex
+       expireTime int64
+}
+
+// Cache the token and set an expire time.  If we already have an expire time
+// on the token, it is not updated.
+func (this *ApiTokenCache) RememberToken(token string) {
+       this.lock.Lock()
+       defer this.lock.Unlock()
+
+       now := time.Now().Unix()
+       if this.tokens[token] == 0 {
+               this.tokens[token] = now + this.expireTime
+       }
+}
+
+// Check if the cached token is known and still believed to be valid.
+func (this *ApiTokenCache) RecallToken(token string) bool {
+       this.lock.Lock()
+       defer this.lock.Unlock()
+
+       now := time.Now().Unix()
+       if this.tokens[token] == 0 {
+               // Unknown token
+               return false
+       } else if now < this.tokens[token] {
+               // Token is known and still valid
+               return true
+       } else {
+               // Token is expired
+               this.tokens[token] = 0
+               return false
+       }
+}
+
+func GetRemoteAddress(req *http.Request) string {
+       if xff := req.Header.Get("X-Forwarded-For"); xff != "" {
+               return xff + "," + req.RemoteAddr
+       }
+       return req.RemoteAddr
+}
+
+func CheckAuthorizationHeader(kc *keepclient.KeepClient, cache *ApiTokenCache, req *http.Request) (pass bool, tok string) {
+       parts := strings.SplitN(req.Header.Get("Authorization"), " ", 2)
+       if len(parts) < 2 || !(parts[0] == "OAuth2" || parts[0] == "Bearer") || len(parts[1]) == 0 {
+               return false, ""
+       }
+       tok = parts[1]
+
+       // Tokens are validated differently depending on what kind of
+       // operation is being performed. For example, tokens in
+       // collection-sharing links permit GET requests, but not
+       // PUT requests.
+       var op string
+       if req.Method == "GET" || req.Method == "HEAD" {
+               op = "read"
+       } else {
+               op = "write"
+       }
+
+       if cache.RecallToken(op + ":" + tok) {
+               // Valid in the cache, short circuit
+               return true, tok
+       }
+
+       var err error
+       arv := *kc.Arvados
+       arv.ApiToken = tok
+       arv.RequestID = req.Header.Get("X-Request-Id")
+       if op == "read" {
+               err = arv.Call("HEAD", "keep_services", "", "accessible", nil, nil)
+       } else {
+               err = arv.Call("HEAD", "users", "", "current", nil, nil)
+       }
+       if err != nil {
+               log.Printf("%s: CheckAuthorizationHeader error: %v", GetRemoteAddress(req), err)
+               return false, ""
+       }
+
+       // Success!  Update cache
+       cache.RememberToken(op + ":" + tok)
+
+       return true, tok
+}
+
+// We need to make a private copy of the default http transport early
+// in initialization, then make copies of our private copy later. It
+// won't be safe to copy http.DefaultTransport itself later, because
+// its private mutexes might have already been used. (Without this,
+// the test suite sometimes panics "concurrent map writes" in
+// net/http.(*Transport).removeIdleConnLocked().)
+var defaultTransport = *(http.DefaultTransport.(*http.Transport))
+
+type proxyHandler struct {
+       http.Handler
+       *keepclient.KeepClient
+       *ApiTokenCache
+       timeout   time.Duration
+       transport *http.Transport
+}
+
+// MakeRESTRouter returns an http.Handler that passes GET and PUT
+// requests to the appropriate handlers.
+func MakeRESTRouter(enable_get bool, enable_put bool, kc *keepclient.KeepClient, timeout time.Duration, mgmtToken string) http.Handler {
+       rest := mux.NewRouter()
+
+       transport := defaultTransport
+       transport.DialContext = (&net.Dialer{
+               Timeout:   keepclient.DefaultConnectTimeout,
+               KeepAlive: keepclient.DefaultKeepAlive,
+               DualStack: true,
+       }).DialContext
+       transport.TLSClientConfig = arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure)
+       transport.TLSHandshakeTimeout = keepclient.DefaultTLSHandshakeTimeout
+
+       h := &proxyHandler{
+               Handler:    rest,
+               KeepClient: kc,
+               timeout:    timeout,
+               transport:  &transport,
+               ApiTokenCache: &ApiTokenCache{
+                       tokens:     make(map[string]int64),
+                       expireTime: 300,
+               },
+       }
+
+       if enable_get {
+               rest.HandleFunc(`/{locator:[0-9a-f]{32}\+.*}`, h.Get).Methods("GET", "HEAD")
+               rest.HandleFunc(`/{locator:[0-9a-f]{32}}`, h.Get).Methods("GET", "HEAD")
+
+               // List all blocks
+               rest.HandleFunc(`/index`, h.Index).Methods("GET")
+
+               // List blocks whose hash has the given prefix
+               rest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, h.Index).Methods("GET")
+       }
+
+       if enable_put {
+               rest.HandleFunc(`/{locator:[0-9a-f]{32}\+.*}`, h.Put).Methods("PUT")
+               rest.HandleFunc(`/{locator:[0-9a-f]{32}}`, h.Put).Methods("PUT")
+               rest.HandleFunc(`/`, h.Put).Methods("POST")
+               rest.HandleFunc(`/{any}`, h.Options).Methods("OPTIONS")
+               rest.HandleFunc(`/`, h.Options).Methods("OPTIONS")
+       }
+
+       rest.Handle("/_health/{check}", &health.Handler{
+               Token:  mgmtToken,
+               Prefix: "/_health/",
+       }).Methods("GET")
+
+       rest.NotFoundHandler = InvalidPathHandler{}
+       return h
+}
+
+var errLoopDetected = errors.New("loop detected")
+
+func (*proxyHandler) checkLoop(resp http.ResponseWriter, req *http.Request) error {
+       if via := req.Header.Get("Via"); strings.Index(via, " "+viaAlias) >= 0 {
+               log.Printf("proxy loop detected (request has Via: %q): perhaps keepproxy is misidentified by gateway config as an external client, or its keep_services record does not have service_type=proxy?", via)
+               http.Error(resp, errLoopDetected.Error(), http.StatusInternalServerError)
+               return errLoopDetected
+       }
+       return nil
+}
+
+func SetCorsHeaders(resp http.ResponseWriter) {
+       resp.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, POST, PUT, OPTIONS")
+       resp.Header().Set("Access-Control-Allow-Origin", "*")
+       resp.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
+       resp.Header().Set("Access-Control-Max-Age", "86486400")
+}
+
+type InvalidPathHandler struct{}
+
+func (InvalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       log.Printf("%s: %s %s unroutable", GetRemoteAddress(req), req.Method, req.URL.Path)
+       http.Error(resp, "Bad request", http.StatusBadRequest)
+}
+
+func (h *proxyHandler) Options(resp http.ResponseWriter, req *http.Request) {
+       log.Printf("%s: %s %s", GetRemoteAddress(req), req.Method, req.URL.Path)
+       SetCorsHeaders(resp)
+}
+
+var BadAuthorizationHeader = errors.New("Missing or invalid Authorization header")
+var ContentLengthMismatch = errors.New("Actual length != expected content length")
+var MethodNotSupported = errors.New("Method not supported")
+
+var removeHint, _ = regexp.Compile("\\+K@[a-z0-9]{5}(\\+|$)")
+
+func (h *proxyHandler) Get(resp http.ResponseWriter, req *http.Request) {
+       if err := h.checkLoop(resp, req); err != nil {
+               return
+       }
+       SetCorsHeaders(resp)
+       resp.Header().Set("Via", req.Proto+" "+viaAlias)
+
+       locator := mux.Vars(req)["locator"]
+       var err error
+       var status int
+       var expectLength, responseLength int64
+       var proxiedURI = "-"
+
+       defer func() {
+               log.Println(GetRemoteAddress(req), req.Method, req.URL.Path, status, expectLength, responseLength, proxiedURI, err)
+               if status != http.StatusOK {
+                       http.Error(resp, err.Error(), status)
+               }
+       }()
+
+       kc := h.makeKeepClient(req)
+
+       var pass bool
+       var tok string
+       if pass, tok = CheckAuthorizationHeader(kc, h.ApiTokenCache, req); !pass {
+               status, err = http.StatusForbidden, BadAuthorizationHeader
+               return
+       }
+
+       // Copy ArvadosClient struct and use the client's API token
+       arvclient := *kc.Arvados
+       arvclient.ApiToken = tok
+       kc.Arvados = &arvclient
+
+       var reader io.ReadCloser
+
+       locator = removeHint.ReplaceAllString(locator, "$1")
+
+       switch req.Method {
+       case "HEAD":
+               expectLength, proxiedURI, err = kc.Ask(locator)
+       case "GET":
+               reader, expectLength, proxiedURI, err = kc.Get(locator)
+               if reader != nil {
+                       defer reader.Close()
+               }
+       default:
+               status, err = http.StatusNotImplemented, MethodNotSupported
+               return
+       }
+
+       if expectLength == -1 {
+               log.Println("Warning:", GetRemoteAddress(req), req.Method, proxiedURI, "Content-Length not provided")
+       }
+
+       switch respErr := err.(type) {
+       case nil:
+               status = http.StatusOK
+               resp.Header().Set("Content-Length", fmt.Sprint(expectLength))
+               switch req.Method {
+               case "HEAD":
+                       responseLength = 0
+               case "GET":
+                       responseLength, err = io.Copy(resp, reader)
+                       if err == nil && expectLength > -1 && responseLength != expectLength {
+                               err = ContentLengthMismatch
+                       }
+               }
+       case keepclient.Error:
+               if respErr == keepclient.BlockNotFound {
+                       status = http.StatusNotFound
+               } else if respErr.Temporary() {
+                       status = http.StatusBadGateway
+               } else {
+                       status = 422
+               }
+       default:
+               status = http.StatusInternalServerError
+       }
+}
+
+var LengthRequiredError = errors.New(http.StatusText(http.StatusLengthRequired))
+var LengthMismatchError = errors.New("Locator size hint does not match Content-Length header")
+
+func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
+       if err := h.checkLoop(resp, req); err != nil {
+               return
+       }
+       SetCorsHeaders(resp)
+       resp.Header().Set("Via", "HTTP/1.1 "+viaAlias)
+
+       kc := h.makeKeepClient(req)
+
+       var err error
+       var expectLength int64
+       var status = http.StatusInternalServerError
+       var wroteReplicas int
+       var locatorOut string = "-"
+
+       defer func() {
+               log.Println(GetRemoteAddress(req), req.Method, req.URL.Path, status, expectLength, kc.Want_replicas, wroteReplicas, locatorOut, err)
+               if status != http.StatusOK {
+                       http.Error(resp, err.Error(), status)
+               }
+       }()
+
+       locatorIn := mux.Vars(req)["locator"]
+
+       // Check if the client specified storage classes
+       if req.Header.Get("X-Keep-Storage-Classes") != "" {
+               var scl []string
+               for _, sc := range strings.Split(req.Header.Get("X-Keep-Storage-Classes"), ",") {
+                       scl = append(scl, strings.Trim(sc, " "))
+               }
+               kc.StorageClasses = scl
+       }
+
+       _, err = fmt.Sscanf(req.Header.Get("Content-Length"), "%d", &expectLength)
+       if err != nil || expectLength < 0 {
+               err = LengthRequiredError
+               status = http.StatusLengthRequired
+               return
+       }
+
+       if locatorIn != "" {
+               var loc *keepclient.Locator
+               if loc, err = keepclient.MakeLocator(locatorIn); err != nil {
+                       status = http.StatusBadRequest
+                       return
+               } else if loc.Size > 0 && int64(loc.Size) != expectLength {
+                       err = LengthMismatchError
+                       status = http.StatusBadRequest
+                       return
+               }
+       }
+
+       var pass bool
+       var tok string
+       if pass, tok = CheckAuthorizationHeader(kc, h.ApiTokenCache, req); !pass {
+               err = BadAuthorizationHeader
+               status = http.StatusForbidden
+               return
+       }
+
+       // Copy ArvadosClient struct and use the client's API token
+       arvclient := *kc.Arvados
+       arvclient.ApiToken = tok
+       kc.Arvados = &arvclient
+
+       // Check if the client specified the number of replicas
+       if req.Header.Get("X-Keep-Desired-Replicas") != "" {
+               var r int
+               _, err := fmt.Sscanf(req.Header.Get(keepclient.X_Keep_Desired_Replicas), "%d", &r)
+               if err == nil {
+                       kc.Want_replicas = r
+               }
+       }
+
+       // Now try to put the block through
+       if locatorIn == "" {
+               bytes, err2 := ioutil.ReadAll(req.Body)
+               if err2 != nil {
+                       _ = errors.New(fmt.Sprintf("Error reading request body: %s", err2))
+                       status = http.StatusInternalServerError
+                       return
+               }
+               locatorOut, wroteReplicas, err = kc.PutB(bytes)
+       } else {
+               locatorOut, wroteReplicas, err = kc.PutHR(locatorIn, req.Body, expectLength)
+       }
+
+       // Tell the client how many successful PUTs we accomplished
+       resp.Header().Set(keepclient.X_Keep_Replicas_Stored, fmt.Sprintf("%d", wroteReplicas))
+
+       switch err.(type) {
+       case nil:
+               status = http.StatusOK
+               _, err = io.WriteString(resp, locatorOut)
+
+       case keepclient.OversizeBlockError:
+               // Too much data
+               status = http.StatusRequestEntityTooLarge
+
+       case keepclient.InsufficientReplicasError:
+               if wroteReplicas > 0 {
+                       // At least one write is considered success.  The
+                       // client can decide if getting less than the number of
+                       // replications it asked for is a fatal error.
+                       status = http.StatusOK
+                       _, err = io.WriteString(resp, locatorOut)
+               } else {
+                       status = http.StatusServiceUnavailable
+               }
+
+       default:
+               status = http.StatusBadGateway
+       }
+}
+
+// ServeHTTP implementation for IndexHandler
+// Supports only GET requests for /index/{prefix:[0-9a-f]{0,32}}
+// For each keep server found in LocalRoots:
+//   Invokes GetIndex using keepclient
+//   Expects "complete" response (terminating with blank new line)
+//   Aborts on any errors
+// Concatenates responses from all those keep servers and returns
+func (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {
+       SetCorsHeaders(resp)
+
+       prefix := mux.Vars(req)["prefix"]
+       var err error
+       var status int
+
+       defer func() {
+               if status != http.StatusOK {
+                       http.Error(resp, err.Error(), status)
+               }
+       }()
+
+       kc := h.makeKeepClient(req)
+       ok, token := CheckAuthorizationHeader(kc, h.ApiTokenCache, req)
+       if !ok {
+               status, err = http.StatusForbidden, BadAuthorizationHeader
+               return
+       }
+
+       // Copy ArvadosClient struct and use the client's API token
+       arvclient := *kc.Arvados
+       arvclient.ApiToken = token
+       kc.Arvados = &arvclient
+
+       // Only GET method is supported
+       if req.Method != "GET" {
+               status, err = http.StatusNotImplemented, MethodNotSupported
+               return
+       }
+
+       // Get index from all LocalRoots and write to resp
+       var reader io.Reader
+       for uuid := range kc.LocalRoots() {
+               reader, err = kc.GetIndex(uuid, prefix)
+               if err != nil {
+                       status = http.StatusBadGateway
+                       return
+               }
+
+               _, err = io.Copy(resp, reader)
+               if err != nil {
+                       status = http.StatusBadGateway
+                       return
+               }
+       }
+
+       // Got index from all the keep servers and wrote to resp
+       status = http.StatusOK
+       resp.Write([]byte("\n"))
+}
+
+func (h *proxyHandler) makeKeepClient(req *http.Request) *keepclient.KeepClient {
+       kc := *h.KeepClient
+       kc.RequestID = req.Header.Get("X-Request-Id")
+       kc.HTTPClient = &proxyClient{
+               client: &http.Client{
+                       Timeout:   h.timeout,
+                       Transport: h.transport,
+               },
+               proto: req.Proto,
+       }
+       return &kc
+}
diff --git a/services/keepproxy/keepproxy.service b/services/keepproxy/keepproxy.service
new file mode 100644 (file)
index 0000000..96dec25
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Keep Proxy
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keepproxy/keepproxy.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/keepproxy
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/keepproxy/keepproxy_test.go b/services/keepproxy/keepproxy_test.go
new file mode 100644 (file)
index 0000000..dc70d96
--- /dev/null
@@ -0,0 +1,681 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io/ioutil"
+       "math/rand"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "strings"
+       "sync"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+// Gocheck boilerplate
+var _ = Suite(&NoKeepServerSuite{})
+
+// Test with no keepserver to simulate errors
+type NoKeepServerSuite struct{}
+
+var TestProxyUUID = "zzzzz-bi6l4-lrixqc4fxofbmzz"
+
+// Wait (up to 1 second) for keepproxy to listen on a port. This
+// avoids a race condition where we hit a "connection refused" error
+// because we start testing the proxy too soon.
+func waitForListener() {
+       const (
+               ms = 5
+       )
+       for i := 0; listener == nil && i < 10000; i += ms {
+               time.Sleep(ms * time.Millisecond)
+       }
+       if listener == nil {
+               panic("Timed out waiting for listener to start")
+       }
+}
+
+func closeListener() {
+       if listener != nil {
+               listener.Close()
+       }
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(2, false)
+}
+
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       arvadostest.ResetEnv()
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       arvadostest.StopKeep(2)
+       arvadostest.StopAPI()
+}
+
+func (s *NoKeepServerSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+       // We need API to have some keep services listed, but the
+       // services themselves should be unresponsive.
+       arvadostest.StartKeep(2, false)
+       arvadostest.StopKeep(2)
+}
+
+func (s *NoKeepServerSuite) SetUpTest(c *C) {
+       arvadostest.ResetEnv()
+}
+
+func (s *NoKeepServerSuite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+}
+
+func runProxy(c *C, args []string, bogusClientToken bool) *keepclient.KeepClient {
+       args = append([]string{"keepproxy"}, args...)
+       os.Args = append(args, "-listen=:0")
+       listener = nil
+       go main()
+       waitForListener()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+       if bogusClientToken {
+               arv.ApiToken = "bogus-token"
+       }
+       kc := keepclient.New(arv)
+       sr := map[string]string{
+               TestProxyUUID: "http://" + listener.Addr().String(),
+       }
+       kc.SetServiceRoots(sr, sr, sr)
+       kc.Arvados.External = true
+
+       return kc
+}
+
+func (s *ServerRequiredSuite) TestResponseViaHeader(c *C) {
+       runProxy(c, nil, false)
+       defer closeListener()
+
+       req, err := http.NewRequest("POST",
+               "http://"+listener.Addr().String()+"/",
+               strings.NewReader("TestViaHeader"))
+       c.Assert(err, Equals, nil)
+       req.Header.Add("Authorization", "OAuth2 "+arvadostest.ActiveToken)
+       resp, err := (&http.Client{}).Do(req)
+       c.Assert(err, Equals, nil)
+       c.Check(resp.Header.Get("Via"), Equals, "HTTP/1.1 keepproxy")
+       locator, err := ioutil.ReadAll(resp.Body)
+       c.Assert(err, Equals, nil)
+       resp.Body.Close()
+
+       req, err = http.NewRequest("GET",
+               "http://"+listener.Addr().String()+"/"+string(locator),
+               nil)
+       c.Assert(err, Equals, nil)
+       resp, err = (&http.Client{}).Do(req)
+       c.Assert(err, Equals, nil)
+       c.Check(resp.Header.Get("Via"), Equals, "HTTP/1.1 keepproxy")
+       resp.Body.Close()
+}
+
+func (s *ServerRequiredSuite) TestLoopDetection(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       sr := map[string]string{
+               TestProxyUUID: "http://" + listener.Addr().String(),
+       }
+       router.(*proxyHandler).KeepClient.SetServiceRoots(sr, sr, sr)
+
+       content := []byte("TestLoopDetection")
+       _, _, err := kc.PutB(content)
+       c.Check(err, ErrorMatches, `.*loop detected.*`)
+
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+       _, _, _, err = kc.Get(hash)
+       c.Check(err, ErrorMatches, `.*loop detected.*`)
+}
+
+func (s *ServerRequiredSuite) TestStorageClassesHeader(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       // Set up fake keepstore to record request headers
+       var hdr http.Header
+       ts := httptest.NewServer(http.HandlerFunc(
+               func(w http.ResponseWriter, r *http.Request) {
+                       hdr = r.Header
+                       http.Error(w, "Error", http.StatusInternalServerError)
+               }))
+       defer ts.Close()
+
+       // Point keepproxy router's keepclient to the fake keepstore
+       sr := map[string]string{
+               TestProxyUUID: ts.URL,
+       }
+       router.(*proxyHandler).KeepClient.SetServiceRoots(sr, sr, sr)
+
+       // Set up client to ask for storage classes to keepproxy
+       kc.StorageClasses = []string{"secure"}
+       content := []byte("Very important data")
+       _, _, err := kc.PutB(content)
+       c.Check(err, NotNil)
+       c.Check(hdr.Get("X-Keep-Storage-Classes"), Equals, "secure")
+}
+
+func (s *ServerRequiredSuite) TestDesiredReplicas(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       content := []byte("TestDesiredReplicas")
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       for _, kc.Want_replicas = range []int{0, 1, 2} {
+               locator, rep, err := kc.PutB(content)
+               c.Check(err, Equals, nil)
+               c.Check(rep, Equals, kc.Want_replicas)
+               if rep > 0 {
+                       c.Check(locator, Matches, fmt.Sprintf(`^%s\+%d(\+.+)?$`, hash, len(content)))
+               }
+       }
+}
+
+func (s *ServerRequiredSuite) TestPutWrongContentLength(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       content := []byte("TestPutWrongContentLength")
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       // If we use http.Client to send these requests to the network
+       // server we just started, the Go http library automatically
+       // fixes the invalid Content-Length header. In order to test
+       // our server behavior, we have to call the handler directly
+       // using an httptest.ResponseRecorder.
+       rtr := MakeRESTRouter(true, true, kc, 10*time.Second, "")
+
+       type testcase struct {
+               sendLength   string
+               expectStatus int
+       }
+
+       for _, t := range []testcase{
+               {"1", http.StatusBadRequest},
+               {"", http.StatusLengthRequired},
+               {"-1", http.StatusLengthRequired},
+               {"abcdef", http.StatusLengthRequired},
+       } {
+               req, err := http.NewRequest("PUT",
+                       fmt.Sprintf("http://%s/%s+%d", listener.Addr().String(), hash, len(content)),
+                       bytes.NewReader(content))
+               c.Assert(err, IsNil)
+               req.Header.Set("Content-Length", t.sendLength)
+               req.Header.Set("Authorization", "OAuth2 "+arvadostest.ActiveToken)
+               req.Header.Set("Content-Type", "application/octet-stream")
+
+               resp := httptest.NewRecorder()
+               rtr.ServeHTTP(resp, req)
+               c.Check(resp.Code, Equals, t.expectStatus)
+       }
+}
+
+func (s *ServerRequiredSuite) TestManyFailedPuts(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+       router.(*proxyHandler).timeout = time.Nanosecond
+
+       buf := make([]byte, 1<<20)
+       rand.Read(buf)
+       var wg sync.WaitGroup
+       for i := 0; i < 128; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       kc.PutB(buf)
+               }()
+       }
+       done := make(chan bool)
+       go func() {
+               wg.Wait()
+               close(done)
+       }()
+       select {
+       case <-done:
+       case <-time.After(10 * time.Second):
+               c.Error("timeout")
+       }
+}
+
+func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       var hash2 string
+
+       {
+               _, _, err := kc.Ask(hash)
+               c.Check(err, Equals, keepclient.BlockNotFound)
+               c.Log("Finished Ask (expected BlockNotFound)")
+       }
+
+       {
+               reader, _, _, err := kc.Get(hash)
+               c.Check(reader, Equals, nil)
+               c.Check(err, Equals, keepclient.BlockNotFound)
+               c.Log("Finished Get (expected BlockNotFound)")
+       }
+
+       // Note in bug #5309 among other errors keepproxy would set
+       // Content-Length incorrectly on the 404 BlockNotFound response, this
+       // would result in a protocol violation that would prevent reuse of the
+       // connection, which would manifest by the next attempt to use the
+       // connection (in this case the PutB below) failing.  So to test for
+       // that bug it's necessary to trigger an error response (such as
+       // BlockNotFound) and then do something else with the same httpClient
+       // connection.
+
+       {
+               var rep int
+               var err error
+               hash2, rep, err = kc.PutB([]byte("foo"))
+               c.Check(hash2, Matches, fmt.Sprintf(`^%s\+3(\+.+)?$`, hash))
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               c.Log("Finished PutB (expected success)")
+       }
+
+       {
+               blocklen, _, err := kc.Ask(hash2)
+               c.Assert(err, Equals, nil)
+               c.Check(blocklen, Equals, int64(3))
+               c.Log("Finished Ask (expected success)")
+       }
+
+       {
+               reader, blocklen, _, err := kc.Get(hash2)
+               c.Assert(err, Equals, nil)
+               all, err := ioutil.ReadAll(reader)
+               c.Check(err, IsNil)
+               c.Check(all, DeepEquals, []byte("foo"))
+               c.Check(blocklen, Equals, int64(3))
+               c.Log("Finished Get (expected success)")
+       }
+
+       {
+               var rep int
+               var err error
+               hash2, rep, err = kc.PutB([]byte(""))
+               c.Check(hash2, Matches, `^d41d8cd98f00b204e9800998ecf8427e\+0(\+.+)?$`)
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               c.Log("Finished PutB zero block")
+       }
+
+       {
+               reader, blocklen, _, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e")
+               c.Assert(err, Equals, nil)
+               all, err := ioutil.ReadAll(reader)
+               c.Check(err, IsNil)
+               c.Check(all, DeepEquals, []byte(""))
+               c.Check(blocklen, Equals, int64(0))
+               c.Log("Finished Get zero block")
+       }
+}
+
+func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
+       kc := runProxy(c, nil, true)
+       defer closeListener()
+
+       hash := fmt.Sprintf("%x+3", md5.Sum([]byte("bar")))
+
+       _, _, err := kc.Ask(hash)
+       c.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})
+
+       hash2, rep, err := kc.PutB([]byte("bar"))
+       c.Check(hash2, Equals, "")
+       c.Check(rep, Equals, 0)
+       c.Check(err, FitsTypeOf, keepclient.InsufficientReplicasError(errors.New("")))
+
+       blocklen, _, err := kc.Ask(hash)
+       c.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})
+       c.Check(err, ErrorMatches, ".*not found.*")
+       c.Check(blocklen, Equals, int64(0))
+
+       _, blocklen, _, err = kc.Get(hash)
+       c.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})
+       c.Check(err, ErrorMatches, ".*not found.*")
+       c.Check(blocklen, Equals, int64(0))
+
+}
+
+func (s *ServerRequiredSuite) TestGetDisabled(c *C) {
+       kc := runProxy(c, []string{"-no-get"}, false)
+       defer closeListener()
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("baz")))
+
+       {
+               _, _, err := kc.Ask(hash)
+               errNotFound, _ := err.(keepclient.ErrNotFound)
+               c.Check(errNotFound, NotNil)
+               c.Assert(err, ErrorMatches, `.*HTTP 405.*`)
+               c.Log("Ask 1")
+       }
+
+       {
+               hash2, rep, err := kc.PutB([]byte("baz"))
+               c.Check(hash2, Matches, fmt.Sprintf(`^%s\+3(\+.+)?$`, hash))
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               c.Log("PutB")
+       }
+
+       {
+               blocklen, _, err := kc.Ask(hash)
+               errNotFound, _ := err.(keepclient.ErrNotFound)
+               c.Check(errNotFound, NotNil)
+               c.Assert(err, ErrorMatches, `.*HTTP 405.*`)
+               c.Check(blocklen, Equals, int64(0))
+               c.Log("Ask 2")
+       }
+
+       {
+               _, blocklen, _, err := kc.Get(hash)
+               errNotFound, _ := err.(keepclient.ErrNotFound)
+               c.Check(errNotFound, NotNil)
+               c.Assert(err, ErrorMatches, `.*HTTP 405.*`)
+               c.Check(blocklen, Equals, int64(0))
+               c.Log("Get")
+       }
+}
+
+func (s *ServerRequiredSuite) TestPutDisabled(c *C) {
+       kc := runProxy(c, []string{"-no-put"}, false)
+       defer closeListener()
+
+       hash2, rep, err := kc.PutB([]byte("quux"))
+       c.Check(hash2, Equals, "")
+       c.Check(rep, Equals, 0)
+       c.Check(err, FitsTypeOf, keepclient.InsufficientReplicasError(errors.New("")))
+}
+
+func (s *ServerRequiredSuite) TestCorsHeaders(c *C) {
+       runProxy(c, nil, false)
+       defer closeListener()
+
+       {
+               client := http.Client{}
+               req, err := http.NewRequest("OPTIONS",
+                       fmt.Sprintf("http://%s/%x+3", listener.Addr().String(), md5.Sum([]byte("foo"))),
+                       nil)
+               c.Assert(err, IsNil)
+               req.Header.Add("Access-Control-Request-Method", "PUT")
+               req.Header.Add("Access-Control-Request-Headers", "Authorization, X-Keep-Desired-Replicas")
+               resp, err := client.Do(req)
+               c.Check(err, Equals, nil)
+               c.Check(resp.StatusCode, Equals, 200)
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Check(err, IsNil)
+               c.Check(string(body), Equals, "")
+               c.Check(resp.Header.Get("Access-Control-Allow-Methods"), Equals, "GET, HEAD, POST, PUT, OPTIONS")
+               c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
+       }
+
+       {
+               resp, err := http.Get(
+                       fmt.Sprintf("http://%s/%x+3", listener.Addr().String(), md5.Sum([]byte("foo"))))
+               c.Check(err, Equals, nil)
+               c.Check(resp.Header.Get("Access-Control-Allow-Headers"), Equals, "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
+               c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
+       }
+}
+
+func (s *ServerRequiredSuite) TestPostWithoutHash(c *C) {
+       runProxy(c, nil, false)
+       defer closeListener()
+
+       {
+               client := http.Client{}
+               req, err := http.NewRequest("POST",
+                       "http://"+listener.Addr().String()+"/",
+                       strings.NewReader("qux"))
+               c.Check(err, IsNil)
+               req.Header.Add("Authorization", "OAuth2 "+arvadostest.ActiveToken)
+               req.Header.Add("Content-Type", "application/octet-stream")
+               resp, err := client.Do(req)
+               c.Check(err, Equals, nil)
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Check(err, Equals, nil)
+               c.Check(string(body), Matches,
+                       fmt.Sprintf(`^%x\+3(\+.+)?$`, md5.Sum([]byte("qux"))))
+       }
+}
+
+func (s *ServerRequiredSuite) TestStripHint(c *C) {
+       c.Check(removeHint.ReplaceAllString("http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73+K@zzzzz", "$1"),
+               Equals,
+               "http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73")
+       c.Check(removeHint.ReplaceAllString("http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+K@zzzzz+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73", "$1"),
+               Equals,
+               "http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73")
+       c.Check(removeHint.ReplaceAllString("http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz", "$1"),
+               Equals,
+               "http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Check(removeHint.ReplaceAllString("http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73", "$1"),
+               Equals,
+               "http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73")
+
+}
+
+// Test GetIndex
+//   Put one block, with 2 replicas
+//   With no prefix (expect the block locator, twice)
+//   With an existing prefix (expect the block locator, twice)
+//   With a valid but non-existing prefix (expect "\n")
+//   With an invalid prefix (expect error)
+func (s *ServerRequiredSuite) TestGetIndex(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       // Put "index-data" blocks
+       data := []byte("index-data")
+       hash := fmt.Sprintf("%x", md5.Sum(data))
+
+       hash2, rep, err := kc.PutB(data)
+       c.Check(hash2, Matches, fmt.Sprintf(`^%s\+10(\+.+)?$`, hash))
+       c.Check(rep, Equals, 2)
+       c.Check(err, Equals, nil)
+
+       reader, blocklen, _, err := kc.Get(hash)
+       c.Assert(err, IsNil)
+       c.Check(blocklen, Equals, int64(10))
+       all, err := ioutil.ReadAll(reader)
+       c.Assert(err, IsNil)
+       c.Check(all, DeepEquals, data)
+
+       // Put some more blocks
+       _, _, err = kc.PutB([]byte("some-more-index-data"))
+       c.Check(err, IsNil)
+
+       kc.Arvados.ApiToken = arvadostest.DataManagerToken
+
+       // Invoke GetIndex
+       for _, spec := range []struct {
+               prefix         string
+               expectTestHash bool
+               expectOther    bool
+       }{
+               {"", true, true},         // with no prefix
+               {hash[:3], true, false},  // with matching prefix
+               {"abcdef", false, false}, // with no such prefix
+       } {
+               indexReader, err := kc.GetIndex(TestProxyUUID, spec.prefix)
+               c.Assert(err, Equals, nil)
+               indexResp, err := ioutil.ReadAll(indexReader)
+               c.Assert(err, Equals, nil)
+               locators := strings.Split(string(indexResp), "\n")
+               gotTestHash := 0
+               gotOther := 0
+               for _, locator := range locators {
+                       if locator == "" {
+                               continue
+                       }
+                       c.Check(locator[:len(spec.prefix)], Equals, spec.prefix)
+                       if locator[:32] == hash {
+                               gotTestHash++
+                       } else {
+                               gotOther++
+                       }
+               }
+               c.Check(gotTestHash == 2, Equals, spec.expectTestHash)
+               c.Check(gotOther > 0, Equals, spec.expectOther)
+       }
+
+       // GetIndex with invalid prefix
+       _, err = kc.GetIndex(TestProxyUUID, "xyz")
+       c.Assert((err != nil), Equals, true)
+}
+
+func (s *ServerRequiredSuite) TestCollectionSharingToken(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+       hash, _, err := kc.PutB([]byte("shareddata"))
+       c.Check(err, IsNil)
+       kc.Arvados.ApiToken = arvadostest.FooCollectionSharingToken
+       rdr, _, _, err := kc.Get(hash)
+       c.Assert(err, IsNil)
+       data, err := ioutil.ReadAll(rdr)
+       c.Check(err, IsNil)
+       c.Check(data, DeepEquals, []byte("shareddata"))
+}
+
+func (s *ServerRequiredSuite) TestPutAskGetInvalidToken(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       // Put a test block
+       hash, rep, err := kc.PutB([]byte("foo"))
+       c.Check(err, IsNil)
+       c.Check(rep, Equals, 2)
+
+       for _, badToken := range []string{
+               "nosuchtoken",
+               "2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx", // expired
+       } {
+               kc.Arvados.ApiToken = badToken
+
+               // Ask and Get will fail only if the upstream
+               // keepstore server checks for valid signatures.
+               // Without knowing the blob signing key, there is no
+               // way for keepproxy to know whether a given token is
+               // permitted to read a block.  So these tests fail:
+               if false {
+                       _, _, err = kc.Ask(hash)
+                       c.Assert(err, FitsTypeOf, &keepclient.ErrNotFound{})
+                       c.Check(err.(*keepclient.ErrNotFound).Temporary(), Equals, false)
+                       c.Check(err, ErrorMatches, ".*HTTP 403.*")
+
+                       _, _, _, err = kc.Get(hash)
+                       c.Assert(err, FitsTypeOf, &keepclient.ErrNotFound{})
+                       c.Check(err.(*keepclient.ErrNotFound).Temporary(), Equals, false)
+                       c.Check(err, ErrorMatches, ".*HTTP 403 \"Missing or invalid Authorization header\".*")
+               }
+
+               _, _, err = kc.PutB([]byte("foo"))
+               c.Check(err, ErrorMatches, ".*403.*Missing or invalid Authorization header")
+       }
+}
+
+func (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       // Point keepproxy at a non-existent keepstore
+       locals := map[string]string{
+               TestProxyUUID: "http://localhost:12345",
+       }
+       router.(*proxyHandler).KeepClient.SetServiceRoots(locals, nil, nil)
+
+       // Ask should result in temporary bad gateway error
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       _, _, err := kc.Ask(hash)
+       c.Check(err, NotNil)
+       errNotFound, _ := err.(*keepclient.ErrNotFound)
+       c.Check(errNotFound.Temporary(), Equals, true)
+       c.Assert(err, ErrorMatches, ".*HTTP 502.*")
+
+       // Get should result in temporary bad gateway error
+       _, _, _, err = kc.Get(hash)
+       c.Check(err, NotNil)
+       errNotFound, _ = err.(*keepclient.ErrNotFound)
+       c.Check(errNotFound.Temporary(), Equals, true)
+       c.Assert(err, ErrorMatches, ".*HTTP 502.*")
+}
+
+func (s *NoKeepServerSuite) TestAskGetNoKeepServerError(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       for _, f := range []func() error{
+               func() error {
+                       _, _, err := kc.Ask(hash)
+                       return err
+               },
+               func() error {
+                       _, _, _, err := kc.Get(hash)
+                       return err
+               },
+       } {
+               err := f()
+               c.Assert(err, NotNil)
+               errNotFound, _ := err.(*keepclient.ErrNotFound)
+               c.Check(errNotFound.Temporary(), Equals, true)
+               c.Check(err, ErrorMatches, `.*HTTP 502.*`)
+       }
+}
+
+func (s *ServerRequiredSuite) TestPing(c *C) {
+       kc := runProxy(c, nil, false)
+       defer closeListener()
+
+       rtr := MakeRESTRouter(true, true, kc, 10*time.Second, arvadostest.ManagementToken)
+
+       req, err := http.NewRequest("GET",
+               "http://"+listener.Addr().String()+"/_health/ping",
+               nil)
+       c.Assert(err, IsNil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+
+       resp := httptest.NewRecorder()
+       rtr.ServeHTTP(resp, req)
+       c.Check(resp.Code, Equals, 200)
+       c.Assert(resp.Body.String(), Matches, `{"health":"OK"}\n?`)
+}
diff --git a/services/keepproxy/pkg-extras/etc/default/keepproxy b/services/keepproxy/pkg-extras/etc/default/keepproxy
new file mode 100644 (file)
index 0000000..ddcab10
--- /dev/null
@@ -0,0 +1,7 @@
+user="root"
+group="root"
+chroot="/"
+chdir="/"
+nice=""
+args="-listen=':9100'"
+
diff --git a/services/keepproxy/pkg-extras/etc/init.d/keepproxy b/services/keepproxy/pkg-extras/etc/init.d/keepproxy
new file mode 100755 (executable)
index 0000000..1077927
--- /dev/null
@@ -0,0 +1,160 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Init script for keepproxy
+# Maintained by 
+# Generated by pleaserun.
+# Implemented based on LSB Core 3.1:
+#   * Sections: 20.2, 20.3
+#
+### BEGIN INIT INFO
+# Provides:          keepproxy
+# Required-Start:    $remote_fs $syslog
+# Required-Stop:     $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: 
+# Description:       no description given
+### END INIT INFO
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+export PATH
+
+name=keepproxy
+program=/usr/bin/keepproxy
+args=''
+pidfile="/var/run/$name.pid"
+
+[ -r /etc/default/$name ] && . /etc/default/$name
+[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
+
+trace() {
+  logger -t "/etc/init.d/keepproxy" "$@"
+}
+
+emit() {
+  trace "$@"
+  echo "$@"
+}
+
+start() {
+
+  # Ensure the log directory is setup correctly.
+  [ ! -d "/var/log/" ] && mkdir "/var/log/"
+  chown "$user":"$group" "/var/log/"
+  chmod 755 "/var/log/"
+
+
+  # Setup any environmental stuff beforehand
+  
+
+  # Run the program!
+  
+  chroot --userspec "$user":"$group" "$chroot" sh -c "
+    
+    cd \"$chdir\"
+    exec \"$program\" $args
+  " >> /var/log/keepproxy.stdout 2>> /var/log/keepproxy.stderr &
+
+  # Generate the pidfile from here. If we instead made the forked process
+  # generate it there will be a race condition between the pidfile writing
+  # and a process possibly asking for status.
+  echo $! > $pidfile
+
+  emit "$name started"
+  return 0
+}
+
+stop() {
+  # Try a few times to kill TERM the program
+  if status ; then
+    pid=$(cat "$pidfile")
+    trace "Killing $name (pid $pid) with SIGTERM"
+    kill -TERM $pid
+    # Wait for it to exit.
+    for i in 1 2 3 4 5 ; do
+      trace "Waiting $name (pid $pid) to die..."
+      status || break
+      sleep 1
+    done
+    if status ; then
+      emit "$name stop failed; still running."
+    else
+      emit "$name stopped."
+    fi
+  fi
+}
+
+status() {
+  if [ -f "$pidfile" ] ; then
+    pid=$(cat "$pidfile")
+    if ps -p $pid > /dev/null 2> /dev/null ; then
+      # process by this pid is running.
+      # It may not be our pid, but that's what you get with just pidfiles.
+      # TODO(sissel): Check if this process seems to be the same as the one we
+      # expect. It'd be nice to use flock here, but flock uses fork, not exec,
+      # so it makes it quite awkward to use in this case.
+      return 0
+    else
+      return 2 # program is dead but pid file exists
+    fi
+  else
+    return 3 # program is not running
+  fi
+}
+
+force_stop() {
+  if status ; then
+    stop
+    status && kill -KILL $(cat "$pidfile")
+  fi
+}
+
+
+case "$1" in
+  force-start|start|stop|force-stop|restart)
+    trace "Attempting '$1' on keepproxy"
+    ;;
+esac
+
+case "$1" in
+  force-start)
+    PRESTART=no
+    exec "$0" start
+    ;;
+  start)
+    status
+    code=$?
+    if [ $code -eq 0 ]; then
+      emit "$name is already running"
+      exit $code
+    else
+      start
+      exit $?
+    fi
+    ;;
+  stop) stop ;;
+  force-stop) force_stop ;;
+  status) 
+    status
+    code=$?
+    if [ $code -eq 0 ] ; then
+      emit "$name is running"
+    else
+      emit "$name is not running"
+    fi
+    exit $code
+    ;;
+  restart) 
+    
+    stop && start 
+    ;;
+  *)
+    echo "Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}" >&2
+    exit 3
+  ;;
+esac
+
+exit $?
diff --git a/services/keepproxy/proxy_client.go b/services/keepproxy/proxy_client.go
new file mode 100644 (file)
index 0000000..0faf4ae
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net/http"
+
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+var viaAlias = "keepproxy"
+
+type proxyClient struct {
+       client keepclient.HTTPClient
+       proto  string
+}
+
+func (pc *proxyClient) Do(req *http.Request) (*http.Response, error) {
+       req.Header.Add("Via", pc.proto+" "+viaAlias)
+       return pc.client.Do(req)
+}
diff --git a/services/keepproxy/usage.go b/services/keepproxy/usage.go
new file mode 100644 (file)
index 0000000..6d3d21e
--- /dev/null
@@ -0,0 +1,90 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "flag"
+       "fmt"
+       "os"
+)
+
+func usage() {
+       c := DefaultConfig()
+       c.Client.APIHost = "zzzzz.arvadosapi.com:443"
+       exampleConfigFile, err := json.MarshalIndent(c, "    ", "  ")
+       if err != nil {
+               panic(err)
+       }
+       fmt.Fprintf(os.Stderr, `
+
+Keepproxy forwards GET and PUT requests to keepstore servers.  See
+http://doc.arvados.org/install/install-keepproxy.html
+
+Usage: keepproxy [-config path/to/keepproxy.yml]
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+    %s
+
+Client.APIHost:
+
+    Address (or address:port) of the Arvados API endpoint.
+
+Client.AuthToken:
+
+    Anonymous API token.
+
+Client.Insecure:
+
+    True if your Arvados API endpoint uses an unverifiable SSL/TLS
+    certificate.
+
+Listen:
+
+    Local port to listen on. Can be "address:port" or ":port", where
+    "address" is a host IP address or name and "port" is a port number
+    or name.
+
+DisableGet:
+
+    Respond 404 to GET and HEAD requests.
+
+DisablePut:
+
+    Respond 404 to PUT, POST, and OPTIONS requests.
+
+DefaultReplicas:
+
+    Default number of replicas to write if not specified by the
+    client. If this is zero or omitted, the site-wide
+    defaultCollectionReplication configuration will be used.
+
+Timeout:
+
+    Timeout for requests to keep services, with units (e.g., "120s",
+    "2m").
+
+PIDFile:
+
+    Path to PID file. During startup this file will be created if
+    needed, and locked using flock() until keepproxy exits. If it is
+    already locked, or any error is encountered while writing to it,
+    keepproxy will exit immediately. If omitted or empty, no PID file
+    will be used.
+
+Debug:
+
+    Enable debug logging.
+
+ManagementToken:
+
+    Authorization token to be included in all health check requests.
+
+`, exampleConfigFile)
+}
diff --git a/services/keepstore/.gitignore b/services/keepstore/.gitignore
new file mode 100644 (file)
index 0000000..c195c4a
--- /dev/null
@@ -0,0 +1 @@
+keepstore
diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
new file mode 100644 (file)
index 0000000..6b5b233
--- /dev/null
@@ -0,0 +1,837 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "errors"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "regexp"
+       "strconv"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/Azure/azure-sdk-for-go/storage"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+const azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
+
+var (
+       azureMaxGetBytes           int
+       azureStorageAccountName    string
+       azureStorageAccountKeyFile string
+       azureStorageReplication    int
+       azureWriteRaceInterval     = 15 * time.Second
+       azureWriteRacePollTime     = time.Second
+)
+
+func readKeyFromFile(file string) (string, error) {
+       buf, err := ioutil.ReadFile(file)
+       if err != nil {
+               return "", errors.New("reading key from " + file + ": " + err.Error())
+       }
+       accountKey := strings.TrimSpace(string(buf))
+       if accountKey == "" {
+               return "", errors.New("empty account key in " + file)
+       }
+       return accountKey, nil
+}
+
+type azureVolumeAdder struct {
+       *Config
+}
+
+// String implements flag.Value
+func (s *azureVolumeAdder) String() string {
+       return "-"
+}
+
+func (s *azureVolumeAdder) Set(containerName string) error {
+       s.Config.Volumes = append(s.Config.Volumes, &AzureBlobVolume{
+               ContainerName:         containerName,
+               StorageAccountName:    azureStorageAccountName,
+               StorageAccountKeyFile: azureStorageAccountKeyFile,
+               AzureReplication:      azureStorageReplication,
+               ReadOnly:              deprecated.flagReadonly,
+       })
+       return nil
+}
+
+func init() {
+       VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &AzureBlobVolume{} })
+
+       flag.Var(&azureVolumeAdder{theConfig},
+               "azure-storage-container-volume",
+               "Use the given container as a storage volume. Can be given multiple times.")
+       flag.StringVar(
+               &azureStorageAccountName,
+               "azure-storage-account-name",
+               "",
+               "Azure storage account name used for subsequent --azure-storage-container-volume arguments.")
+       flag.StringVar(
+               &azureStorageAccountKeyFile,
+               "azure-storage-account-key-file",
+               "",
+               "`File` containing the account key used for subsequent --azure-storage-container-volume arguments.")
+       flag.IntVar(
+               &azureStorageReplication,
+               "azure-storage-replication",
+               3,
+               "Replication level to report to clients when data is stored in an Azure container.")
+       flag.IntVar(
+               &azureMaxGetBytes,
+               "azure-max-get-bytes",
+               BlockSize,
+               fmt.Sprintf("Maximum bytes to request in a single GET request. If smaller than %d, use multiple concurrent range requests to retrieve a block.", BlockSize))
+}
+
+// An AzureBlobVolume stores and retrieves blocks in an Azure Blob
+// container.
+type AzureBlobVolume struct {
+       StorageAccountName    string
+       StorageAccountKeyFile string
+       StorageBaseURL        string // "" means default, "core.windows.net"
+       ContainerName         string
+       AzureReplication      int
+       ReadOnly              bool
+       RequestTimeout        arvados.Duration
+       StorageClasses        []string
+
+       azClient  storage.Client
+       container *azureContainer
+}
+
+// singleSender is a single-attempt storage.Sender.
+type singleSender struct{}
+
+// Send performs req exactly once.
+func (*singleSender) Send(c *storage.Client, req *http.Request) (resp *http.Response, err error) {
+       return c.HTTPClient.Do(req)
+}
+
+// Examples implements VolumeWithExamples.
+func (*AzureBlobVolume) Examples() []Volume {
+       return []Volume{
+               &AzureBlobVolume{
+                       StorageAccountName:    "example-account-name",
+                       StorageAccountKeyFile: "/etc/azure_storage_account_key.txt",
+                       ContainerName:         "example-container-name",
+                       AzureReplication:      3,
+                       RequestTimeout:        azureDefaultRequestTimeout,
+               },
+               &AzureBlobVolume{
+                       StorageAccountName:    "cn-account-name",
+                       StorageAccountKeyFile: "/etc/azure_cn_storage_account_key.txt",
+                       StorageBaseURL:        "core.chinacloudapi.cn",
+                       ContainerName:         "cn-container-name",
+                       AzureReplication:      3,
+                       RequestTimeout:        azureDefaultRequestTimeout,
+               },
+       }
+}
+
+// Type implements Volume.
+func (v *AzureBlobVolume) Type() string {
+       return "Azure"
+}
+
+// Start implements Volume.
+func (v *AzureBlobVolume) Start(vm *volumeMetricsVecs) error {
+       if v.ContainerName == "" {
+               return errors.New("no container name given")
+       }
+       if v.StorageAccountName == "" || v.StorageAccountKeyFile == "" {
+               return errors.New("StorageAccountName and StorageAccountKeyFile must be given")
+       }
+       accountKey, err := readKeyFromFile(v.StorageAccountKeyFile)
+       if err != nil {
+               return err
+       }
+       if v.StorageBaseURL == "" {
+               v.StorageBaseURL = storage.DefaultBaseURL
+       }
+       v.azClient, err = storage.NewClient(v.StorageAccountName, accountKey, v.StorageBaseURL, storage.DefaultAPIVersion, true)
+       if err != nil {
+               return fmt.Errorf("creating Azure storage client: %s", err)
+       }
+       v.azClient.Sender = &singleSender{}
+
+       if v.RequestTimeout == 0 {
+               v.RequestTimeout = azureDefaultRequestTimeout
+       }
+       v.azClient.HTTPClient = &http.Client{
+               Timeout: time.Duration(v.RequestTimeout),
+       }
+       bs := v.azClient.GetBlobService()
+       v.container = &azureContainer{
+               ctr: bs.GetContainerReference(v.ContainerName),
+       }
+
+       if ok, err := v.container.Exists(); err != nil {
+               return err
+       } else if !ok {
+               return fmt.Errorf("Azure container %q does not exist", v.ContainerName)
+       }
+       // Set up prometheus metrics
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
+       v.container.stats.opsCounters, v.container.stats.errCounters, v.container.stats.ioBytes = vm.getCounterVecsFor(lbls)
+
+       return nil
+}
+
+// DeviceID returns a globally unique ID for the storage container.
+func (v *AzureBlobVolume) DeviceID() string {
+       return "azure://" + v.StorageBaseURL + "/" + v.StorageAccountName + "/" + v.ContainerName
+}
+
+// Return true if expires_at metadata attribute is found on the block
+func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
+       metadata, err := v.container.GetBlobMetadata(loc)
+       if err != nil {
+               return false, metadata, v.translateError(err)
+       }
+       if metadata["expires_at"] != "" {
+               return true, metadata, nil
+       }
+       return false, metadata, nil
+}
+
+// Get reads a Keep block that has been stored as a block blob in the
+// container.
+//
+// If the block is younger than azureWriteRaceInterval and is
+// unexpectedly empty, assume a PutBlob operation is in progress, and
+// wait for it to finish writing.
+func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
+       trashed, _, err := v.checkTrashed(loc)
+       if err != nil {
+               return 0, err
+       }
+       if trashed {
+               return 0, os.ErrNotExist
+       }
+       var deadline time.Time
+       haveDeadline := false
+       size, err := v.get(ctx, loc, buf)
+       for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" {
+               // Seeing a brand new empty block probably means we're
+               // in a race with CreateBlob, which under the hood
+               // (apparently) does "CreateEmpty" and "CommitData"
+               // with no additional transaction locking.
+               if !haveDeadline {
+                       t, err := v.Mtime(loc)
+                       if err != nil {
+                               log.Print("Got empty block (possible race) but Mtime failed: ", err)
+                               break
+                       }
+                       deadline = t.Add(azureWriteRaceInterval)
+                       if time.Now().After(deadline) {
+                               break
+                       }
+                       log.Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", loc, time.Since(t), deadline)
+                       haveDeadline = true
+               } else if time.Now().After(deadline) {
+                       break
+               }
+               select {
+               case <-ctx.Done():
+                       return 0, ctx.Err()
+               case <-time.After(azureWriteRacePollTime):
+               }
+               size, err = v.get(ctx, loc, buf)
+       }
+       if haveDeadline {
+               log.Printf("Race ended with size==%d", size)
+       }
+       return size, err
+}
+
+func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int, error) {
+       ctx, cancel := context.WithCancel(ctx)
+       defer cancel()
+       expectSize := len(buf)
+       if azureMaxGetBytes < BlockSize {
+               // Unfortunately the handler doesn't tell us how long the blob
+               // is expected to be, so we have to ask Azure.
+               props, err := v.container.GetBlobProperties(loc)
+               if err != nil {
+                       return 0, v.translateError(err)
+               }
+               if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
+                       return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
+               }
+               expectSize = int(props.ContentLength)
+       }
+
+       if expectSize == 0 {
+               return 0, nil
+       }
+
+       // We'll update this actualSize if/when we get the last piece.
+       actualSize := -1
+       pieces := (expectSize + azureMaxGetBytes - 1) / azureMaxGetBytes
+       errors := make(chan error, pieces)
+       var wg sync.WaitGroup
+       wg.Add(pieces)
+       for p := 0; p < pieces; p++ {
+               // Each goroutine retrieves one piece. If we hit an
+               // error, it is sent to the errors chan so get() can
+               // return it -- but only if the error happens before
+               // ctx is done. This way, if ctx is done before we hit
+               // any other error (e.g., requesting client has hung
+               // up), we return the original ctx.Err() instead of
+               // the secondary errors from the transfers that got
+               // interrupted as a result.
+               go func(p int) {
+                       defer wg.Done()
+                       startPos := p * azureMaxGetBytes
+                       endPos := startPos + azureMaxGetBytes
+                       if endPos > expectSize {
+                               endPos = expectSize
+                       }
+                       var rdr io.ReadCloser
+                       var err error
+                       gotRdr := make(chan struct{})
+                       go func() {
+                               defer close(gotRdr)
+                               if startPos == 0 && endPos == expectSize {
+                                       rdr, err = v.container.GetBlob(loc)
+                               } else {
+                                       rdr, err = v.container.GetBlobRange(loc, startPos, endPos-1, nil)
+                               }
+                       }()
+                       select {
+                       case <-ctx.Done():
+                               go func() {
+                                       <-gotRdr
+                                       if err == nil {
+                                               rdr.Close()
+                                       }
+                               }()
+                               return
+                       case <-gotRdr:
+                       }
+                       if err != nil {
+                               errors <- err
+                               cancel()
+                               return
+                       }
+                       go func() {
+                               // Close the reader when the client
+                               // hangs up or another piece fails
+                               // (possibly interrupting ReadFull())
+                               // or when all pieces succeed and
+                               // get() returns.
+                               <-ctx.Done()
+                               rdr.Close()
+                       }()
+                       n, err := io.ReadFull(rdr, buf[startPos:endPos])
+                       if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
+                               // If we don't know the actual size,
+                               // and just tried reading 64 MiB, it's
+                               // normal to encounter EOF.
+                       } else if err != nil {
+                               if ctx.Err() == nil {
+                                       errors <- err
+                               }
+                               cancel()
+                               return
+                       }
+                       if p == pieces-1 {
+                               actualSize = startPos + n
+                       }
+               }(p)
+       }
+       wg.Wait()
+       close(errors)
+       if len(errors) > 0 {
+               return 0, v.translateError(<-errors)
+       }
+       if ctx.Err() != nil {
+               return 0, ctx.Err()
+       }
+       return actualSize, nil
+}
+
+// Compare the given data with existing stored data.
+func (v *AzureBlobVolume) Compare(ctx context.Context, loc string, expect []byte) error {
+       trashed, _, err := v.checkTrashed(loc)
+       if err != nil {
+               return err
+       }
+       if trashed {
+               return os.ErrNotExist
+       }
+       var rdr io.ReadCloser
+       gotRdr := make(chan struct{})
+       go func() {
+               defer close(gotRdr)
+               rdr, err = v.container.GetBlob(loc)
+       }()
+       select {
+       case <-ctx.Done():
+               go func() {
+                       <-gotRdr
+                       if err == nil {
+                               rdr.Close()
+                       }
+               }()
+               return ctx.Err()
+       case <-gotRdr:
+       }
+       if err != nil {
+               return v.translateError(err)
+       }
+       defer rdr.Close()
+       return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
+}
+
+// Put stores a Keep block as a block blob in the container.
+func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       // Send the block data through a pipe, so that (if we need to)
+       // we can close the pipe early and abandon our
+       // CreateBlockBlobFromReader() goroutine, without worrying
+       // about CreateBlockBlobFromReader() accessing our block
+       // buffer after we release it.
+       bufr, bufw := io.Pipe()
+       go func() {
+               io.Copy(bufw, bytes.NewReader(block))
+               bufw.Close()
+       }()
+       errChan := make(chan error)
+       go func() {
+               var body io.Reader = bufr
+               if len(block) == 0 {
+                       // We must send a "Content-Length: 0" header,
+                       // but the http client interprets
+                       // ContentLength==0 as "unknown" unless it can
+                       // confirm by introspection that Body will
+                       // read 0 bytes.
+                       body = http.NoBody
+                       bufr.Close()
+               }
+               errChan <- v.container.CreateBlockBlobFromReader(loc, len(block), body, nil)
+       }()
+       select {
+       case <-ctx.Done():
+               theConfig.debugLogf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
+               // Our pipe might be stuck in Write(), waiting for
+               // io.Copy() to read. If so, un-stick it. This means
+               // CreateBlockBlobFromReader will get corrupt data,
+               // but that's OK: the size won't match, so the write
+               // will fail.
+               go io.Copy(ioutil.Discard, bufr)
+               // CloseWithError() will return once pending I/O is done.
+               bufw.CloseWithError(ctx.Err())
+               theConfig.debugLogf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
+               return ctx.Err()
+       case err := <-errChan:
+               return err
+       }
+}
+
+// Touch updates the last-modified property of a block blob.
+func (v *AzureBlobVolume) Touch(loc string) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       trashed, metadata, err := v.checkTrashed(loc)
+       if err != nil {
+               return err
+       }
+       if trashed {
+               return os.ErrNotExist
+       }
+
+       metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
+       return v.container.SetBlobMetadata(loc, metadata, nil)
+}
+
+// Mtime returns the last-modified property of a block blob.
+func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
+       trashed, _, err := v.checkTrashed(loc)
+       if err != nil {
+               return time.Time{}, err
+       }
+       if trashed {
+               return time.Time{}, os.ErrNotExist
+       }
+
+       props, err := v.container.GetBlobProperties(loc)
+       if err != nil {
+               return time.Time{}, err
+       }
+       return time.Time(props.LastModified), nil
+}
+
+// IndexTo writes a list of Keep blocks that are stored in the
+// container.
+func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
+       params := storage.ListBlobsParameters{
+               Prefix:  prefix,
+               Include: &storage.IncludeBlobDataset{Metadata: true},
+       }
+       for {
+               resp, err := v.container.ListBlobs(params)
+               if err != nil {
+                       return err
+               }
+               for _, b := range resp.Blobs {
+                       if !v.isKeepBlock(b.Name) {
+                               continue
+                       }
+                       modtime := time.Time(b.Properties.LastModified)
+                       if b.Properties.ContentLength == 0 && modtime.Add(azureWriteRaceInterval).After(time.Now()) {
+                               // A new zero-length blob is probably
+                               // just a new non-empty blob that
+                               // hasn't committed its data yet (see
+                               // Get()), and in any case has no
+                               // value.
+                               continue
+                       }
+                       if b.Metadata["expires_at"] != "" {
+                               // Trashed blob; exclude it from response
+                               continue
+                       }
+                       fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, modtime.UnixNano())
+               }
+               if resp.NextMarker == "" {
+                       return nil
+               }
+               params.Marker = resp.NextMarker
+       }
+}
+
+// Trash a Keep block.
+func (v *AzureBlobVolume) Trash(loc string) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+
+       // Ideally we would use If-Unmodified-Since, but that
+       // particular condition seems to be ignored by Azure. Instead,
+       // we get the Etag before checking Mtime, and use If-Match to
+       // ensure we don't delete data if Put() or Touch() happens
+       // between our calls to Mtime() and DeleteBlob().
+       props, err := v.container.GetBlobProperties(loc)
+       if err != nil {
+               return err
+       }
+       if t, err := v.Mtime(loc); err != nil {
+               return err
+       } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
+               return nil
+       }
+
+       // If TrashLifetime == 0, just delete it
+       if theConfig.TrashLifetime == 0 {
+               return v.container.DeleteBlob(loc, &storage.DeleteBlobOptions{
+                       IfMatch: props.Etag,
+               })
+       }
+
+       // Otherwise, mark as trash
+       return v.container.SetBlobMetadata(loc, storage.BlobMetadata{
+               "expires_at": fmt.Sprintf("%d", time.Now().Add(theConfig.TrashLifetime.Duration()).Unix()),
+       }, &storage.SetBlobMetadataOptions{
+               IfMatch: props.Etag,
+       })
+}
+
+// Untrash a Keep block.
+// Delete the expires_at metadata attribute
+func (v *AzureBlobVolume) Untrash(loc string) error {
+       // if expires_at does not exist, return NotFoundError
+       metadata, err := v.container.GetBlobMetadata(loc)
+       if err != nil {
+               return v.translateError(err)
+       }
+       if metadata["expires_at"] == "" {
+               return os.ErrNotExist
+       }
+
+       // reset expires_at metadata attribute
+       metadata["expires_at"] = ""
+       err = v.container.SetBlobMetadata(loc, metadata, nil)
+       return v.translateError(err)
+}
+
+// Status returns a VolumeStatus struct with placeholder data.
+func (v *AzureBlobVolume) Status() *VolumeStatus {
+       return &VolumeStatus{
+               DeviceNum: 1,
+               BytesFree: BlockSize * 1000,
+               BytesUsed: 1,
+       }
+}
+
+// String returns a volume label, including the container name.
+func (v *AzureBlobVolume) String() string {
+       return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
+}
+
+// Writable returns true, unless the -readonly flag was on when the
+// volume was added.
+func (v *AzureBlobVolume) Writable() bool {
+       return !v.ReadOnly
+}
+
+// Replication returns the replication level of the container, as
+// specified by the -azure-storage-replication argument.
+func (v *AzureBlobVolume) Replication() int {
+       return v.AzureReplication
+}
+
+// GetStorageClasses implements Volume
+func (v *AzureBlobVolume) GetStorageClasses() []string {
+       return v.StorageClasses
+}
+
+// If possible, translate an Azure SDK error to a recognizable error
+// like os.ErrNotExist.
+func (v *AzureBlobVolume) translateError(err error) error {
+       switch {
+       case err == nil:
+               return err
+       case strings.Contains(err.Error(), "StatusCode=503"):
+               // "storage: service returned error: StatusCode=503, ErrorCode=ServerBusy, ErrorMessage=The server is busy" (See #14804)
+               return VolumeBusyError
+       case strings.Contains(err.Error(), "Not Found"):
+               // "storage: service returned without a response body (404 Not Found)"
+               return os.ErrNotExist
+       default:
+               return err
+       }
+}
+
+var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+
+func (v *AzureBlobVolume) isKeepBlock(s string) bool {
+       return keepBlockRegexp.MatchString(s)
+}
+
+// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
+// and deletes them from the volume.
+func (v *AzureBlobVolume) EmptyTrash() {
+       var bytesDeleted, bytesInTrash int64
+       var blocksDeleted, blocksInTrash int64
+
+       doBlob := func(b storage.Blob) {
+               // Check whether the block is flagged as trash
+               if b.Metadata["expires_at"] == "" {
+                       return
+               }
+
+               atomic.AddInt64(&blocksInTrash, 1)
+               atomic.AddInt64(&bytesInTrash, b.Properties.ContentLength)
+
+               expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
+               if err != nil {
+                       log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
+                       return
+               }
+
+               if expiresAt > time.Now().Unix() {
+                       return
+               }
+
+               err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
+                       IfMatch: b.Properties.Etag,
+               })
+               if err != nil {
+                       log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
+                       return
+               }
+               atomic.AddInt64(&blocksDeleted, 1)
+               atomic.AddInt64(&bytesDeleted, b.Properties.ContentLength)
+       }
+
+       var wg sync.WaitGroup
+       todo := make(chan storage.Blob, theConfig.EmptyTrashWorkers)
+       for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for b := range todo {
+                               doBlob(b)
+                       }
+               }()
+       }
+
+       params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
+       for {
+               resp, err := v.container.ListBlobs(params)
+               if err != nil {
+                       log.Printf("EmptyTrash: ListBlobs: %v", err)
+                       break
+               }
+               for _, b := range resp.Blobs {
+                       todo <- b
+               }
+               if resp.NextMarker == "" {
+                       break
+               }
+               params.Marker = resp.NextMarker
+       }
+       close(todo)
+       wg.Wait()
+
+       log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+}
+
+// InternalStats returns bucket I/O and API call counters.
+func (v *AzureBlobVolume) InternalStats() interface{} {
+       return &v.container.stats
+}
+
+type azureBlobStats struct {
+       statsTicker
+       Ops              uint64
+       GetOps           uint64
+       GetRangeOps      uint64
+       GetMetadataOps   uint64
+       GetPropertiesOps uint64
+       CreateOps        uint64
+       SetMetadataOps   uint64
+       DelOps           uint64
+       ListOps          uint64
+}
+
+func (s *azureBlobStats) TickErr(err error) {
+       if err == nil {
+               return
+       }
+       errType := fmt.Sprintf("%T", err)
+       if err, ok := err.(storage.AzureStorageServiceError); ok {
+               errType = errType + fmt.Sprintf(" %d (%s)", err.StatusCode, err.Code)
+       }
+       log.Printf("errType %T, err %s", err, err)
+       s.statsTicker.TickErr(err, errType)
+}
+
+// azureContainer wraps storage.Container in order to count I/O and
+// API usage stats.
+type azureContainer struct {
+       ctr   *storage.Container
+       stats azureBlobStats
+}
+
+func (c *azureContainer) Exists() (bool, error) {
+       c.stats.TickOps("exists")
+       c.stats.Tick(&c.stats.Ops)
+       ok, err := c.ctr.Exists()
+       c.stats.TickErr(err)
+       return ok, err
+}
+
+func (c *azureContainer) GetBlobMetadata(bname string) (storage.BlobMetadata, error) {
+       c.stats.TickOps("get_metadata")
+       c.stats.Tick(&c.stats.Ops, &c.stats.GetMetadataOps)
+       b := c.ctr.GetBlobReference(bname)
+       err := b.GetMetadata(nil)
+       c.stats.TickErr(err)
+       return b.Metadata, err
+}
+
+func (c *azureContainer) GetBlobProperties(bname string) (*storage.BlobProperties, error) {
+       c.stats.TickOps("get_properties")
+       c.stats.Tick(&c.stats.Ops, &c.stats.GetPropertiesOps)
+       b := c.ctr.GetBlobReference(bname)
+       err := b.GetProperties(nil)
+       c.stats.TickErr(err)
+       return &b.Properties, err
+}
+
+func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
+       c.stats.TickOps("get")
+       c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
+       b := c.ctr.GetBlobReference(bname)
+       rdr, err := b.Get(nil)
+       c.stats.TickErr(err)
+       return NewCountingReader(rdr, c.stats.TickInBytes), err
+}
+
+func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
+       c.stats.TickOps("get_range")
+       c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
+       b := c.ctr.GetBlobReference(bname)
+       rdr, err := b.GetRange(&storage.GetBlobRangeOptions{
+               Range: &storage.BlobRange{
+                       Start: uint64(start),
+                       End:   uint64(end),
+               },
+               GetBlobOptions: opts,
+       })
+       c.stats.TickErr(err)
+       return NewCountingReader(rdr, c.stats.TickInBytes), err
+}
+
+// If we give it an io.Reader that doesn't also have a Len() int
+// method, the Azure SDK determines data size by copying the data into
+// a new buffer, which is not a good use of memory.
+type readerWithAzureLen struct {
+       io.Reader
+       len int
+}
+
+// Len satisfies the private lener interface in azure-sdk-for-go.
+func (r *readerWithAzureLen) Len() int {
+       return r.len
+}
+
+func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr io.Reader, opts *storage.PutBlobOptions) error {
+       c.stats.TickOps("create")
+       c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
+       if size != 0 {
+               rdr = &readerWithAzureLen{
+                       Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
+                       len:    size,
+               }
+       }
+       b := c.ctr.GetBlobReference(bname)
+       err := b.CreateBlockBlobFromReader(rdr, opts)
+       c.stats.TickErr(err)
+       return err
+}
+
+func (c *azureContainer) SetBlobMetadata(bname string, m storage.BlobMetadata, opts *storage.SetBlobMetadataOptions) error {
+       c.stats.TickOps("set_metadata")
+       c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
+       b := c.ctr.GetBlobReference(bname)
+       b.Metadata = m
+       err := b.SetMetadata(opts)
+       c.stats.TickErr(err)
+       return err
+}
+
+func (c *azureContainer) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
+       c.stats.TickOps("list")
+       c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
+       resp, err := c.ctr.ListBlobs(params)
+       c.stats.TickErr(err)
+       return resp, err
+}
+
+func (c *azureContainer) DeleteBlob(bname string, opts *storage.DeleteBlobOptions) error {
+       c.stats.TickOps("delete")
+       c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
+       b := c.ctr.GetBlobReference(bname)
+       err := b.Delete(opts)
+       c.stats.TickErr(err)
+       return err
+}
diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
new file mode 100644 (file)
index 0000000..cfad757
--- /dev/null
@@ -0,0 +1,766 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "encoding/base64"
+       "encoding/json"
+       "encoding/xml"
+       "flag"
+       "fmt"
+       "io/ioutil"
+       "math/rand"
+       "net"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "regexp"
+       "sort"
+       "strconv"
+       "strings"
+       "sync"
+       "testing"
+       "time"
+
+       "github.com/Azure/azure-sdk-for-go/storage"
+       "github.com/ghodss/yaml"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
+)
+
+const (
+       // This cannot be the fake account name "devstoreaccount1"
+       // used by Microsoft's Azure emulator: the Azure SDK
+       // recognizes that magic string and changes its behavior to
+       // cater to the Azure SDK's own test suite.
+       fakeAccountName = "fakeaccountname"
+       fakeAccountKey  = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
+)
+
+var (
+       azureTestContainer string
+       azureTestDebug     = os.Getenv("ARVADOS_DEBUG") != ""
+)
+
+func init() {
+       flag.StringVar(
+               &azureTestContainer,
+               "test.azure-storage-container-volume",
+               "",
+               "Name of Azure container to use for testing. Do not use a container with real data! Use -azure-storage-account-name and -azure-storage-key-file arguments to supply credentials.")
+}
+
+type azBlob struct {
+       Data        []byte
+       Etag        string
+       Metadata    map[string]string
+       Mtime       time.Time
+       Uncommitted map[string][]byte
+}
+
+type azStubHandler struct {
+       sync.Mutex
+       blobs map[string]*azBlob
+       race  chan chan struct{}
+}
+
+func newAzStubHandler() *azStubHandler {
+       return &azStubHandler{
+               blobs: make(map[string]*azBlob),
+       }
+}
+
+func (h *azStubHandler) TouchWithDate(container, hash string, t time.Time) {
+       blob, ok := h.blobs[container+"|"+hash]
+       if !ok {
+               return
+       }
+       blob.Mtime = t
+}
+
+func (h *azStubHandler) PutRaw(container, hash string, data []byte) {
+       h.Lock()
+       defer h.Unlock()
+       h.blobs[container+"|"+hash] = &azBlob{
+               Data:        data,
+               Mtime:       time.Now(),
+               Metadata:    make(map[string]string),
+               Uncommitted: make(map[string][]byte),
+       }
+}
+
+func (h *azStubHandler) unlockAndRace() {
+       if h.race == nil {
+               return
+       }
+       h.Unlock()
+       // Signal caller that race is starting by reading from
+       // h.race. If we get a channel, block until that channel is
+       // ready to receive. If we get nil (or h.race is closed) just
+       // proceed.
+       if c := <-h.race; c != nil {
+               c <- struct{}{}
+       }
+       h.Lock()
+}
+
+var rangeRegexp = regexp.MustCompile(`^bytes=(\d+)-(\d+)$`)
+
+func (h *azStubHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
+       h.Lock()
+       defer h.Unlock()
+       if azureTestDebug {
+               defer log.Printf("azStubHandler: %+v", r)
+       }
+
+       path := strings.Split(r.URL.Path, "/")
+       container := path[1]
+       hash := ""
+       if len(path) > 2 {
+               hash = path[2]
+       }
+
+       if err := r.ParseForm(); err != nil {
+               log.Printf("azStubHandler(%+v): %s", r, err)
+               rw.WriteHeader(http.StatusBadRequest)
+               return
+       }
+
+       if (r.Method == "PUT" || r.Method == "POST") && r.Header.Get("Content-Length") == "" {
+               rw.WriteHeader(http.StatusLengthRequired)
+               return
+       }
+
+       body, err := ioutil.ReadAll(r.Body)
+       if err != nil {
+               return
+       }
+
+       type blockListRequestBody struct {
+               XMLName     xml.Name `xml:"BlockList"`
+               Uncommitted []string
+       }
+
+       blob, blobExists := h.blobs[container+"|"+hash]
+
+       switch {
+       case r.Method == "PUT" && r.Form.Get("comp") == "":
+               // "Put Blob" API
+               if _, ok := h.blobs[container+"|"+hash]; !ok {
+                       // Like the real Azure service, we offer a
+                       // race window during which other clients can
+                       // list/get the new blob before any data is
+                       // committed.
+                       h.blobs[container+"|"+hash] = &azBlob{
+                               Mtime:       time.Now(),
+                               Uncommitted: make(map[string][]byte),
+                               Metadata:    make(map[string]string),
+                               Etag:        makeEtag(),
+                       }
+                       h.unlockAndRace()
+               }
+               metadata := make(map[string]string)
+               for k, v := range r.Header {
+                       if strings.HasPrefix(strings.ToLower(k), "x-ms-meta-") {
+                               name := k[len("x-ms-meta-"):]
+                               metadata[strings.ToLower(name)] = v[0]
+                       }
+               }
+               h.blobs[container+"|"+hash] = &azBlob{
+                       Data:        body,
+                       Mtime:       time.Now(),
+                       Uncommitted: make(map[string][]byte),
+                       Metadata:    metadata,
+                       Etag:        makeEtag(),
+               }
+               rw.WriteHeader(http.StatusCreated)
+       case r.Method == "PUT" && r.Form.Get("comp") == "block":
+               // "Put Block" API
+               if !blobExists {
+                       log.Printf("Got block for nonexistent blob: %+v", r)
+                       rw.WriteHeader(http.StatusBadRequest)
+                       return
+               }
+               blockID, err := base64.StdEncoding.DecodeString(r.Form.Get("blockid"))
+               if err != nil || len(blockID) == 0 {
+                       log.Printf("Invalid blockid: %+q", r.Form.Get("blockid"))
+                       rw.WriteHeader(http.StatusBadRequest)
+                       return
+               }
+               blob.Uncommitted[string(blockID)] = body
+               rw.WriteHeader(http.StatusCreated)
+       case r.Method == "PUT" && r.Form.Get("comp") == "blocklist":
+               // "Put Block List" API
+               bl := &blockListRequestBody{}
+               if err := xml.Unmarshal(body, bl); err != nil {
+                       log.Printf("xml Unmarshal: %s", err)
+                       rw.WriteHeader(http.StatusBadRequest)
+                       return
+               }
+               for _, encBlockID := range bl.Uncommitted {
+                       blockID, err := base64.StdEncoding.DecodeString(encBlockID)
+                       if err != nil || len(blockID) == 0 || blob.Uncommitted[string(blockID)] == nil {
+                               log.Printf("Invalid blockid: %+q", encBlockID)
+                               rw.WriteHeader(http.StatusBadRequest)
+                               return
+                       }
+                       blob.Data = blob.Uncommitted[string(blockID)]
+                       blob.Etag = makeEtag()
+                       blob.Mtime = time.Now()
+                       delete(blob.Uncommitted, string(blockID))
+               }
+               rw.WriteHeader(http.StatusCreated)
+       case r.Method == "PUT" && r.Form.Get("comp") == "metadata":
+               // "Set Metadata Headers" API. We don't bother
+               // stubbing "Get Metadata Headers": AzureBlobVolume
+               // sets metadata headers only as a way to bump Etag
+               // and Last-Modified.
+               if !blobExists {
+                       log.Printf("Got metadata for nonexistent blob: %+v", r)
+                       rw.WriteHeader(http.StatusBadRequest)
+                       return
+               }
+               blob.Metadata = make(map[string]string)
+               for k, v := range r.Header {
+                       if strings.HasPrefix(strings.ToLower(k), "x-ms-meta-") {
+                               name := k[len("x-ms-meta-"):]
+                               blob.Metadata[strings.ToLower(name)] = v[0]
+                       }
+               }
+               blob.Mtime = time.Now()
+               blob.Etag = makeEtag()
+       case (r.Method == "GET" || r.Method == "HEAD") && r.Form.Get("comp") == "metadata" && hash != "":
+               // "Get Blob Metadata" API
+               if !blobExists {
+                       rw.WriteHeader(http.StatusNotFound)
+                       return
+               }
+               for k, v := range blob.Metadata {
+                       rw.Header().Set(fmt.Sprintf("x-ms-meta-%s", k), v)
+               }
+               return
+       case (r.Method == "GET" || r.Method == "HEAD") && hash != "":
+               // "Get Blob" API
+               if !blobExists {
+                       rw.WriteHeader(http.StatusNotFound)
+                       return
+               }
+               data := blob.Data
+               if rangeSpec := rangeRegexp.FindStringSubmatch(r.Header.Get("Range")); rangeSpec != nil {
+                       b0, err0 := strconv.Atoi(rangeSpec[1])
+                       b1, err1 := strconv.Atoi(rangeSpec[2])
+                       if err0 != nil || err1 != nil || b0 >= len(data) || b1 >= len(data) || b0 > b1 {
+                               rw.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", len(data)))
+                               rw.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+                               return
+                       }
+                       rw.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", b0, b1, len(data)))
+                       rw.WriteHeader(http.StatusPartialContent)
+                       data = data[b0 : b1+1]
+               }
+               rw.Header().Set("Last-Modified", blob.Mtime.Format(time.RFC1123))
+               rw.Header().Set("Content-Length", strconv.Itoa(len(data)))
+               if r.Method == "GET" {
+                       if _, err := rw.Write(data); err != nil {
+                               log.Printf("write %+q: %s", data, err)
+                       }
+               }
+               h.unlockAndRace()
+       case r.Method == "DELETE" && hash != "":
+               // "Delete Blob" API
+               if !blobExists {
+                       rw.WriteHeader(http.StatusNotFound)
+                       return
+               }
+               delete(h.blobs, container+"|"+hash)
+               rw.WriteHeader(http.StatusAccepted)
+       case r.Method == "GET" && r.Form.Get("comp") == "list" && r.Form.Get("restype") == "container":
+               // "List Blobs" API
+               prefix := container + "|" + r.Form.Get("prefix")
+               marker := r.Form.Get("marker")
+
+               maxResults := 2
+               if n, err := strconv.Atoi(r.Form.Get("maxresults")); err == nil && n >= 1 && n <= 5000 {
+                       maxResults = n
+               }
+
+               resp := storage.BlobListResponse{
+                       Marker:     marker,
+                       NextMarker: "",
+                       MaxResults: int64(maxResults),
+               }
+               var hashes sort.StringSlice
+               for k := range h.blobs {
+                       if strings.HasPrefix(k, prefix) {
+                               hashes = append(hashes, k[len(container)+1:])
+                       }
+               }
+               hashes.Sort()
+               for _, hash := range hashes {
+                       if len(resp.Blobs) == maxResults {
+                               resp.NextMarker = hash
+                               break
+                       }
+                       if len(resp.Blobs) > 0 || marker == "" || marker == hash {
+                               blob := h.blobs[container+"|"+hash]
+                               bmeta := map[string]string(nil)
+                               if r.Form.Get("include") == "metadata" {
+                                       bmeta = blob.Metadata
+                               }
+                               b := storage.Blob{
+                                       Name: hash,
+                                       Properties: storage.BlobProperties{
+                                               LastModified:  storage.TimeRFC1123(blob.Mtime),
+                                               ContentLength: int64(len(blob.Data)),
+                                               Etag:          blob.Etag,
+                                       },
+                                       Metadata: bmeta,
+                               }
+                               resp.Blobs = append(resp.Blobs, b)
+                       }
+               }
+               buf, err := xml.Marshal(resp)
+               if err != nil {
+                       log.Print(err)
+                       rw.WriteHeader(http.StatusInternalServerError)
+               }
+               rw.Write(buf)
+       default:
+               log.Printf("azStubHandler: not implemented: %+v Body:%+q", r, body)
+               rw.WriteHeader(http.StatusNotImplemented)
+       }
+}
+
+// azStubDialer is a net.Dialer that notices when the Azure driver
+// tries to connect to "devstoreaccount1.blob.127.0.0.1:46067", and
+// in such cases transparently dials "127.0.0.1:46067" instead.
+type azStubDialer struct {
+       net.Dialer
+}
+
+var localHostPortRe = regexp.MustCompile(`(127\.0\.0\.1|localhost|\[::1\]):\d+`)
+
+func (d *azStubDialer) Dial(network, address string) (net.Conn, error) {
+       if hp := localHostPortRe.FindString(address); hp != "" {
+               if azureTestDebug {
+                       log.Println("azStubDialer: dial", hp, "instead of", address)
+               }
+               address = hp
+       }
+       return d.Dialer.Dial(network, address)
+}
+
+type TestableAzureBlobVolume struct {
+       *AzureBlobVolume
+       azHandler *azStubHandler
+       azStub    *httptest.Server
+       t         TB
+}
+
+func NewTestableAzureBlobVolume(t TB, readonly bool, replication int) *TestableAzureBlobVolume {
+       azHandler := newAzStubHandler()
+       azStub := httptest.NewServer(azHandler)
+
+       var azClient storage.Client
+
+       container := azureTestContainer
+       if container == "" {
+               // Connect to stub instead of real Azure storage service
+               stubURLBase := strings.Split(azStub.URL, "://")[1]
+               var err error
+               if azClient, err = storage.NewClient(fakeAccountName, fakeAccountKey, stubURLBase, storage.DefaultAPIVersion, false); err != nil {
+                       t.Fatal(err)
+               }
+               container = "fakecontainername"
+       } else {
+               // Connect to real Azure storage service
+               accountKey, err := readKeyFromFile(azureStorageAccountKeyFile)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               azClient, err = storage.NewBasicClient(azureStorageAccountName, accountKey)
+               if err != nil {
+                       t.Fatal(err)
+               }
+       }
+
+       bs := azClient.GetBlobService()
+       v := &AzureBlobVolume{
+               ContainerName:    container,
+               ReadOnly:         readonly,
+               AzureReplication: replication,
+               azClient:         azClient,
+               container:        &azureContainer{ctr: bs.GetContainerReference(container)},
+       }
+
+       return &TestableAzureBlobVolume{
+               AzureBlobVolume: v,
+               azHandler:       azHandler,
+               azStub:          azStub,
+               t:               t,
+       }
+}
+
+var _ = check.Suite(&StubbedAzureBlobSuite{})
+
+type StubbedAzureBlobSuite struct {
+       volume            *TestableAzureBlobVolume
+       origHTTPTransport http.RoundTripper
+}
+
+func (s *StubbedAzureBlobSuite) SetUpTest(c *check.C) {
+       s.origHTTPTransport = http.DefaultTransport
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+       azureWriteRaceInterval = time.Millisecond
+       azureWriteRacePollTime = time.Nanosecond
+
+       s.volume = NewTestableAzureBlobVolume(c, false, 3)
+}
+
+func (s *StubbedAzureBlobSuite) TearDownTest(c *check.C) {
+       s.volume.Teardown()
+       http.DefaultTransport = s.origHTTPTransport
+}
+
+func TestAzureBlobVolumeWithGeneric(t *testing.T) {
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+       azureWriteRaceInterval = time.Millisecond
+       azureWriteRacePollTime = time.Nanosecond
+       DoGenericVolumeTests(t, func(t TB) TestableVolume {
+               return NewTestableAzureBlobVolume(t, false, azureStorageReplication)
+       })
+}
+
+func TestAzureBlobVolumeConcurrentRanges(t *testing.T) {
+       defer func(b int) {
+               azureMaxGetBytes = b
+       }(azureMaxGetBytes)
+
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+       azureWriteRaceInterval = time.Millisecond
+       azureWriteRacePollTime = time.Nanosecond
+       // Test (BlockSize mod azureMaxGetBytes)==0 and !=0 cases
+       for _, azureMaxGetBytes = range []int{2 << 22, 2<<22 - 1} {
+               DoGenericVolumeTests(t, func(t TB) TestableVolume {
+                       return NewTestableAzureBlobVolume(t, false, azureStorageReplication)
+               })
+       }
+}
+
+func TestReadonlyAzureBlobVolumeWithGeneric(t *testing.T) {
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+       azureWriteRaceInterval = time.Millisecond
+       azureWriteRacePollTime = time.Nanosecond
+       DoGenericVolumeTests(t, func(t TB) TestableVolume {
+               return NewTestableAzureBlobVolume(t, true, azureStorageReplication)
+       })
+}
+
+func TestAzureBlobVolumeRangeFenceposts(t *testing.T) {
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+
+       v := NewTestableAzureBlobVolume(t, false, 3)
+       defer v.Teardown()
+
+       for _, size := range []int{
+               2<<22 - 1, // one <max read
+               2 << 22,   // one =max read
+               2<<22 + 1, // one =max read, one <max
+               2 << 23,   // two =max reads
+               BlockSize - 1,
+               BlockSize,
+       } {
+               data := make([]byte, size)
+               for i := range data {
+                       data[i] = byte((i + 7) & 0xff)
+               }
+               hash := fmt.Sprintf("%x", md5.Sum(data))
+               err := v.Put(context.Background(), hash, data)
+               if err != nil {
+                       t.Error(err)
+               }
+               gotData := make([]byte, len(data))
+               gotLen, err := v.Get(context.Background(), hash, gotData)
+               if err != nil {
+                       t.Error(err)
+               }
+               gotHash := fmt.Sprintf("%x", md5.Sum(gotData))
+               if gotLen != size {
+                       t.Errorf("length mismatch: got %d != %d", gotLen, size)
+               }
+               if gotHash != hash {
+                       t.Errorf("hash mismatch: got %s != %s", gotHash, hash)
+               }
+       }
+}
+
+func TestAzureBlobVolumeReplication(t *testing.T) {
+       for r := 1; r <= 4; r++ {
+               v := NewTestableAzureBlobVolume(t, false, r)
+               defer v.Teardown()
+               if n := v.Replication(); n != r {
+                       t.Errorf("Got replication %d, expected %d", n, r)
+               }
+       }
+}
+
+func TestAzureBlobVolumeCreateBlobRace(t *testing.T) {
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+
+       v := NewTestableAzureBlobVolume(t, false, 3)
+       defer v.Teardown()
+
+       azureWriteRaceInterval = time.Second
+       azureWriteRacePollTime = time.Millisecond
+
+       var wg sync.WaitGroup
+
+       v.azHandler.race = make(chan chan struct{})
+
+       wg.Add(1)
+       go func() {
+               defer wg.Done()
+               err := v.Put(context.Background(), TestHash, TestBlock)
+               if err != nil {
+                       t.Error(err)
+               }
+       }()
+       continuePut := make(chan struct{})
+       // Wait for the stub's Put to create the empty blob
+       v.azHandler.race <- continuePut
+       wg.Add(1)
+       go func() {
+               defer wg.Done()
+               buf := make([]byte, len(TestBlock))
+               _, err := v.Get(context.Background(), TestHash, buf)
+               if err != nil {
+                       t.Error(err)
+               }
+       }()
+       // Wait for the stub's Get to get the empty blob
+       close(v.azHandler.race)
+       // Allow stub's Put to continue, so the real data is ready
+       // when the volume's Get retries
+       <-continuePut
+       // Wait for Get() and Put() to finish
+       wg.Wait()
+}
+
+func TestAzureBlobVolumeCreateBlobRaceDeadline(t *testing.T) {
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+
+       v := NewTestableAzureBlobVolume(t, false, 3)
+       defer v.Teardown()
+
+       azureWriteRaceInterval = 2 * time.Second
+       azureWriteRacePollTime = 5 * time.Millisecond
+
+       v.PutRaw(TestHash, nil)
+
+       buf := new(bytes.Buffer)
+       v.IndexTo("", buf)
+       if buf.Len() != 0 {
+               t.Errorf("Index %+q should be empty", buf.Bytes())
+       }
+
+       v.TouchWithDate(TestHash, time.Now().Add(-1982*time.Millisecond))
+
+       allDone := make(chan struct{})
+       go func() {
+               defer close(allDone)
+               buf := make([]byte, BlockSize)
+               n, err := v.Get(context.Background(), TestHash, buf)
+               if err != nil {
+                       t.Error(err)
+                       return
+               }
+               if n != 0 {
+                       t.Errorf("Got %+q, expected empty buf", buf[:n])
+               }
+       }()
+       select {
+       case <-allDone:
+       case <-time.After(time.Second):
+               t.Error("Get should have stopped waiting for race when block was 2s old")
+       }
+
+       buf.Reset()
+       v.IndexTo("", buf)
+       if !bytes.HasPrefix(buf.Bytes(), []byte(TestHash+"+0")) {
+               t.Errorf("Index %+q should have %+q", buf.Bytes(), TestHash+"+0")
+       }
+}
+
+func TestAzureBlobVolumeContextCancelGet(t *testing.T) {
+       testAzureBlobVolumeContextCancel(t, func(ctx context.Context, v *TestableAzureBlobVolume) error {
+               v.PutRaw(TestHash, TestBlock)
+               _, err := v.Get(ctx, TestHash, make([]byte, BlockSize))
+               return err
+       })
+}
+
+func TestAzureBlobVolumeContextCancelPut(t *testing.T) {
+       testAzureBlobVolumeContextCancel(t, func(ctx context.Context, v *TestableAzureBlobVolume) error {
+               return v.Put(ctx, TestHash, make([]byte, BlockSize))
+       })
+}
+
+func TestAzureBlobVolumeContextCancelCompare(t *testing.T) {
+       testAzureBlobVolumeContextCancel(t, func(ctx context.Context, v *TestableAzureBlobVolume) error {
+               v.PutRaw(TestHash, TestBlock)
+               return v.Compare(ctx, TestHash, TestBlock2)
+       })
+}
+
+func testAzureBlobVolumeContextCancel(t *testing.T, testFunc func(context.Context, *TestableAzureBlobVolume) error) {
+       defer func(t http.RoundTripper) {
+               http.DefaultTransport = t
+       }(http.DefaultTransport)
+       http.DefaultTransport = &http.Transport{
+               Dial: (&azStubDialer{}).Dial,
+       }
+
+       v := NewTestableAzureBlobVolume(t, false, 3)
+       defer v.Teardown()
+       v.azHandler.race = make(chan chan struct{})
+
+       ctx, cancel := context.WithCancel(context.Background())
+       allDone := make(chan struct{})
+       go func() {
+               defer close(allDone)
+               err := testFunc(ctx, v)
+               if err != context.Canceled {
+                       t.Errorf("got %T %q, expected %q", err, err, context.Canceled)
+               }
+       }()
+       releaseHandler := make(chan struct{})
+       select {
+       case <-allDone:
+               t.Error("testFunc finished without waiting for v.azHandler.race")
+       case <-time.After(10 * time.Second):
+               t.Error("timed out waiting to enter handler")
+       case v.azHandler.race <- releaseHandler:
+       }
+
+       cancel()
+
+       select {
+       case <-time.After(10 * time.Second):
+               t.Error("timed out waiting to cancel")
+       case <-allDone:
+       }
+
+       go func() {
+               <-releaseHandler
+       }()
+}
+
+func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
+       stats := func() string {
+               buf, err := json.Marshal(s.volume.InternalStats())
+               c.Check(err, check.IsNil)
+               return string(buf)
+       }
+
+       c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
+       c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
+
+       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+       _, err := s.volume.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.NotNil)
+       c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
+       c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
+       c.Check(stats(), check.Matches, `.*"storage\.AzureStorageServiceError 404 \(404 Not Found\)":[^0].*`)
+       c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
+
+       err = s.volume.Put(context.Background(), loc, []byte("foo"))
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
+       c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
+
+       _, err = s.volume.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.IsNil)
+       _, err = s.volume.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
+}
+
+func (s *StubbedAzureBlobSuite) TestConfig(c *check.C) {
+       var cfg Config
+       err := yaml.Unmarshal([]byte(`
+Volumes:
+  - Type: Azure
+    StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+       c.Check(err, check.IsNil)
+       c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}
+
+func (v *TestableAzureBlobVolume) PutRaw(locator string, data []byte) {
+       v.azHandler.PutRaw(v.ContainerName, locator, data)
+}
+
+func (v *TestableAzureBlobVolume) TouchWithDate(locator string, lastPut time.Time) {
+       v.azHandler.TouchWithDate(v.ContainerName, locator, lastPut)
+}
+
+func (v *TestableAzureBlobVolume) Teardown() {
+       v.azStub.Close()
+}
+
+func (v *TestableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
+       return "get", "create"
+}
+
+func (v *TestableAzureBlobVolume) DeviceID() string {
+       // Dummy device id for testing purposes
+       return "azure://azure_blob_volume_test"
+}
+
+func (v *TestableAzureBlobVolume) Start(vm *volumeMetricsVecs) error {
+       // Override original Start() to be able to assign CounterVecs with a dummy DeviceID
+       v.container.stats.opsCounters, v.container.stats.errCounters, v.container.stats.ioBytes = vm.getCounterVecsFor(prometheus.Labels{"device_id": v.DeviceID()})
+       return nil
+}
+
+func makeEtag() string {
+       return fmt.Sprintf("0x%x", rand.Int63())
+}
diff --git a/services/keepstore/bufferpool.go b/services/keepstore/bufferpool.go
new file mode 100644 (file)
index 0000000..d2e7c9e
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "sync"
+       "sync/atomic"
+       "time"
+)
+
+type bufferPool struct {
+       // limiter has a "true" placeholder for each in-use buffer.
+       limiter chan bool
+       // allocated is the number of bytes currently allocated to buffers.
+       allocated uint64
+       // Pool has unused buffers.
+       sync.Pool
+}
+
+func newBufferPool(count int, bufSize int) *bufferPool {
+       p := bufferPool{}
+       p.New = func() interface{} {
+               atomic.AddUint64(&p.allocated, uint64(bufSize))
+               return make([]byte, bufSize)
+       }
+       p.limiter = make(chan bool, count)
+       return &p
+}
+
+func (p *bufferPool) Get(size int) []byte {
+       select {
+       case p.limiter <- true:
+       default:
+               t0 := time.Now()
+               log.Printf("reached max buffers (%d), waiting", cap(p.limiter))
+               p.limiter <- true
+               log.Printf("waited %v for a buffer", time.Since(t0))
+       }
+       buf := p.Pool.Get().([]byte)
+       if cap(buf) < size {
+               log.Fatalf("bufferPool Get(size=%d) but max=%d", size, cap(buf))
+       }
+       return buf[:size]
+}
+
+func (p *bufferPool) Put(buf []byte) {
+       p.Pool.Put(buf)
+       <-p.limiter
+}
+
+// Alloc returns the number of bytes allocated to buffers.
+func (p *bufferPool) Alloc() uint64 {
+       return atomic.LoadUint64(&p.allocated)
+}
+
+// Cap returns the maximum number of buffers allowed.
+func (p *bufferPool) Cap() int {
+       return cap(p.limiter)
+}
+
+// Len returns the number of buffers in use right now.
+func (p *bufferPool) Len() int {
+       return len(p.limiter)
+}
diff --git a/services/keepstore/bufferpool_test.go b/services/keepstore/bufferpool_test.go
new file mode 100644 (file)
index 0000000..21b03ed
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "time"
+
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&BufferPoolSuite{})
+
+type BufferPoolSuite struct{}
+
+// Initialize a default-sized buffer pool for the benefit of test
+// suites that don't run main().
+func init() {
+       bufs = newBufferPool(theConfig.MaxBuffers, BlockSize)
+}
+
+// Restore sane default after bufferpool's own tests
+func (s *BufferPoolSuite) TearDownTest(c *C) {
+       bufs = newBufferPool(theConfig.MaxBuffers, BlockSize)
+}
+
+func (s *BufferPoolSuite) TestBufferPoolBufSize(c *C) {
+       bufs := newBufferPool(2, 10)
+       b1 := bufs.Get(1)
+       bufs.Get(2)
+       bufs.Put(b1)
+       b3 := bufs.Get(3)
+       c.Check(len(b3), Equals, 3)
+}
+
+func (s *BufferPoolSuite) TestBufferPoolUnderLimit(c *C) {
+       bufs := newBufferPool(3, 10)
+       b1 := bufs.Get(10)
+       bufs.Get(10)
+       testBufferPoolRace(c, bufs, b1, "Get")
+}
+
+func (s *BufferPoolSuite) TestBufferPoolAtLimit(c *C) {
+       bufs := newBufferPool(2, 10)
+       b1 := bufs.Get(10)
+       bufs.Get(10)
+       testBufferPoolRace(c, bufs, b1, "Put")
+}
+
+func testBufferPoolRace(c *C, bufs *bufferPool, unused []byte, expectWin string) {
+       race := make(chan string)
+       go func() {
+               bufs.Get(10)
+               time.Sleep(time.Millisecond)
+               race <- "Get"
+       }()
+       go func() {
+               time.Sleep(10 * time.Millisecond)
+               bufs.Put(unused)
+               race <- "Put"
+       }()
+       c.Check(<-race, Equals, expectWin)
+       c.Check(<-race, Not(Equals), expectWin)
+       close(race)
+}
+
+func (s *BufferPoolSuite) TestBufferPoolReuse(c *C) {
+       bufs := newBufferPool(2, 10)
+       bufs.Get(10)
+       last := bufs.Get(10)
+       // The buffer pool is allowed to throw away unused buffers
+       // (e.g., during sync.Pool's garbage collection hook, in the
+       // the current implementation). However, if unused buffers are
+       // getting thrown away and reallocated more than {arbitrary
+       // frequency threshold} during a busy loop, it's not acting
+       // much like a buffer pool.
+       allocs := 1000
+       reuses := 0
+       for i := 0; i < allocs; i++ {
+               bufs.Put(last)
+               next := bufs.Get(10)
+               copy(last, []byte("last"))
+               copy(next, []byte("next"))
+               if last[0] == 'n' {
+                       reuses++
+               }
+               last = next
+       }
+       c.Check(reuses > allocs*95/100, Equals, true)
+}
diff --git a/services/keepstore/collision.go b/services/keepstore/collision.go
new file mode 100644 (file)
index 0000000..4d6583b
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "fmt"
+       "io"
+)
+
+// Compute the MD5 digest of a data block (consisting of buf1 + buf2 +
+// all bytes readable from rdr). If all data is read successfully,
+// return DiskHashError or CollisionError depending on whether it
+// matches expectMD5. If an error occurs while reading, return that
+// error.
+//
+// "content has expected MD5" is called a collision because this
+// function is used in cases where we have another block in hand with
+// the given MD5 but different content.
+func collisionOrCorrupt(expectMD5 string, buf1, buf2 []byte, rdr io.Reader) error {
+       outcome := make(chan error)
+       data := make(chan []byte, 1)
+       go func() {
+               h := md5.New()
+               for b := range data {
+                       h.Write(b)
+               }
+               if fmt.Sprintf("%x", h.Sum(nil)) == expectMD5 {
+                       outcome <- CollisionError
+               } else {
+                       outcome <- DiskHashError
+               }
+       }()
+       data <- buf1
+       if buf2 != nil {
+               data <- buf2
+       }
+       var err error
+       for rdr != nil && err == nil {
+               buf := make([]byte, 1<<18)
+               var n int
+               n, err = rdr.Read(buf)
+               data <- buf[:n]
+       }
+       close(data)
+       if rdr != nil && err != io.EOF {
+               <-outcome
+               return err
+       }
+       return <-outcome
+}
+
+func compareReaderWithBuf(ctx context.Context, rdr io.Reader, expect []byte, hash string) error {
+       bufLen := 1 << 20
+       if bufLen > len(expect) && len(expect) > 0 {
+               // No need for bufLen to be longer than
+               // expect, except that len(buf)==0 would
+               // prevent us from handling empty readers the
+               // same way as non-empty readers: reading 0
+               // bytes at a time never reaches EOF.
+               bufLen = len(expect)
+       }
+       buf := make([]byte, bufLen)
+       cmp := expect
+
+       // Loop invariants: all data read so far matched what
+       // we expected, and the first N bytes of cmp are
+       // expected to equal the next N bytes read from
+       // rdr.
+       for {
+               ready := make(chan bool)
+               var n int
+               var err error
+               go func() {
+                       n, err = rdr.Read(buf)
+                       close(ready)
+               }()
+               select {
+               case <-ready:
+               case <-ctx.Done():
+                       return ctx.Err()
+               }
+               if n > len(cmp) || bytes.Compare(cmp[:n], buf[:n]) != 0 {
+                       return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], buf[:n], rdr)
+               }
+               cmp = cmp[n:]
+               if err == io.EOF {
+                       if len(cmp) != 0 {
+                               return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], nil, nil)
+                       }
+                       return nil
+               } else if err != nil {
+                       return err
+               }
+       }
+}
diff --git a/services/keepstore/collision_test.go b/services/keepstore/collision_test.go
new file mode 100644 (file)
index 0000000..0d6fd62
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "testing/iotest"
+
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CollisionSuite{})
+
+type CollisionSuite struct{}
+
+func (s *CollisionSuite) TestCollisionOrCorrupt(c *check.C) {
+       fooMD5 := "acbd18db4cc2f85cedef654fccc4a4d8"
+
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, []byte{'o'}, bytes.NewBufferString("o")),
+               check.Equals, CollisionError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, nil, bytes.NewBufferString("oo")),
+               check.Equals, CollisionError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, []byte{'o', 'o'}, nil),
+               check.Equals, CollisionError)
+       c.Check(collisionOrCorrupt(fooMD5, nil, []byte{}, bytes.NewBufferString("foo")),
+               check.Equals, CollisionError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o', 'o'}, nil, bytes.NewBufferString("")),
+               check.Equals, CollisionError)
+       c.Check(collisionOrCorrupt(fooMD5, nil, nil, iotest.NewReadLogger("foo: ", iotest.DataErrReader(iotest.OneByteReader(bytes.NewBufferString("foo"))))),
+               check.Equals, CollisionError)
+
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o', 'o'}, nil, bytes.NewBufferString("bar")),
+               check.Equals, DiskHashError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, nil, nil),
+               check.Equals, DiskHashError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{}, nil, bytes.NewBufferString("")),
+               check.Equals, DiskHashError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'O'}, nil, bytes.NewBufferString("o")),
+               check.Equals, DiskHashError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'O', 'o'}, nil, nil),
+               check.Equals, DiskHashError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, []byte{'O'}, nil),
+               check.Equals, DiskHashError)
+       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, nil, bytes.NewBufferString("O")),
+               check.Equals, DiskHashError)
+
+       c.Check(collisionOrCorrupt(fooMD5, []byte{}, nil, iotest.TimeoutReader(iotest.OneByteReader(bytes.NewBufferString("foo")))),
+               check.Equals, iotest.ErrTimeout)
+}
diff --git a/services/keepstore/config.go b/services/keepstore/config.go
new file mode 100644 (file)
index 0000000..43a2191
--- /dev/null
@@ -0,0 +1,226 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "io/ioutil"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
+)
+
+type Config struct {
+       Debug  bool
+       Listen string
+
+       LogFormat string
+
+       PIDFile string
+
+       MaxBuffers  int
+       MaxRequests int
+
+       BlobSignatureTTL    arvados.Duration
+       BlobSigningKeyFile  string
+       RequireSignatures   bool
+       SystemAuthTokenFile string
+       EnableDelete        bool
+       TrashLifetime       arvados.Duration
+       TrashCheckInterval  arvados.Duration
+       PullWorkers         int
+       TrashWorkers        int
+       EmptyTrashWorkers   int
+       TLSCertificateFile  string
+       TLSKeyFile          string
+
+       Volumes VolumeList
+
+       blobSigningKey  []byte
+       systemAuthToken string
+       debugLogf       func(string, ...interface{})
+
+       ManagementToken string
+}
+
+var (
+       theConfig = DefaultConfig()
+       formatter = map[string]logrus.Formatter{
+               "text": &logrus.TextFormatter{
+                       FullTimestamp:   true,
+                       TimestampFormat: rfc3339NanoFixed,
+               },
+               "json": &logrus.JSONFormatter{
+                       TimestampFormat: rfc3339NanoFixed,
+               },
+       }
+       log = logrus.StandardLogger()
+)
+
+const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// DefaultConfig returns the default configuration.
+func DefaultConfig() *Config {
+       return &Config{
+               Listen:             ":25107",
+               LogFormat:          "json",
+               MaxBuffers:         128,
+               RequireSignatures:  true,
+               BlobSignatureTTL:   arvados.Duration(14 * 24 * time.Hour),
+               TrashLifetime:      arvados.Duration(14 * 24 * time.Hour),
+               TrashCheckInterval: arvados.Duration(24 * time.Hour),
+               Volumes:            []Volume{},
+       }
+}
+
+// Start should be called exactly once: after setting all public
+// fields, and before using the config.
+func (cfg *Config) Start(reg *prometheus.Registry) error {
+       if cfg.Debug {
+               log.Level = logrus.DebugLevel
+               cfg.debugLogf = log.Printf
+               cfg.debugLogf("debugging enabled")
+       } else {
+               log.Level = logrus.InfoLevel
+               cfg.debugLogf = func(string, ...interface{}) {}
+       }
+
+       f := formatter[strings.ToLower(cfg.LogFormat)]
+       if f == nil {
+               return fmt.Errorf(`unsupported log format %q (try "text" or "json")`, cfg.LogFormat)
+       }
+       log.Formatter = f
+
+       if cfg.MaxBuffers < 0 {
+               return fmt.Errorf("MaxBuffers must be greater than zero")
+       }
+       bufs = newBufferPool(cfg.MaxBuffers, BlockSize)
+
+       if cfg.MaxRequests < 1 {
+               cfg.MaxRequests = cfg.MaxBuffers * 2
+               log.Printf("MaxRequests <1 or not specified; defaulting to MaxBuffers * 2 == %d", cfg.MaxRequests)
+       }
+
+       if cfg.BlobSigningKeyFile != "" {
+               buf, err := ioutil.ReadFile(cfg.BlobSigningKeyFile)
+               if err != nil {
+                       return fmt.Errorf("reading blob signing key file: %s", err)
+               }
+               cfg.blobSigningKey = bytes.TrimSpace(buf)
+               if len(cfg.blobSigningKey) == 0 {
+                       return fmt.Errorf("blob signing key file %q is empty", cfg.BlobSigningKeyFile)
+               }
+       } else if cfg.RequireSignatures {
+               return fmt.Errorf("cannot enable RequireSignatures (-enforce-permissions) without a blob signing key")
+       } else {
+               log.Println("Running without a blob signing key. Block locators " +
+                       "returned by this server will not be signed, and will be rejected " +
+                       "by a server that enforces permissions.")
+               log.Println("To fix this, use the BlobSigningKeyFile config entry.")
+       }
+
+       if fn := cfg.SystemAuthTokenFile; fn != "" {
+               buf, err := ioutil.ReadFile(fn)
+               if err != nil {
+                       return fmt.Errorf("cannot read system auth token file %q: %s", fn, err)
+               }
+               cfg.systemAuthToken = strings.TrimSpace(string(buf))
+       }
+
+       if cfg.EnableDelete {
+               log.Print("Trash/delete features are enabled. WARNING: this has not " +
+                       "been extensively tested. You should disable this unless you can afford to lose data.")
+       }
+
+       if len(cfg.Volumes) == 0 {
+               if (&unixVolumeAdder{cfg}).Discover() == 0 {
+                       return fmt.Errorf("no volumes found")
+               }
+       }
+       vm := newVolumeMetricsVecs(reg)
+       for _, v := range cfg.Volumes {
+               if err := v.Start(vm); err != nil {
+                       return fmt.Errorf("volume %s: %s", v, err)
+               }
+               log.Printf("Using volume %v (writable=%v)", v, v.Writable())
+       }
+       return nil
+}
+
+// VolumeTypes is built up by init() funcs in the source files that
+// define the volume types.
+var VolumeTypes = []func() VolumeWithExamples{}
+
+type VolumeList []Volume
+
+// UnmarshalJSON -- given an array of objects -- deserializes each
+// object as the volume type indicated by the object's Type field.
+func (vl *VolumeList) UnmarshalJSON(data []byte) error {
+       typeMap := map[string]func() VolumeWithExamples{}
+       for _, factory := range VolumeTypes {
+               t := factory().Type()
+               if _, ok := typeMap[t]; ok {
+                       log.Fatalf("volume type %+q is claimed by multiple VolumeTypes", t)
+               }
+               typeMap[t] = factory
+       }
+
+       var mapList []map[string]interface{}
+       err := json.Unmarshal(data, &mapList)
+       if err != nil {
+               return err
+       }
+       for _, mapIn := range mapList {
+               typeIn, ok := mapIn["Type"].(string)
+               if !ok {
+                       return fmt.Errorf("invalid volume type %+v", mapIn["Type"])
+               }
+               factory, ok := typeMap[typeIn]
+               if !ok {
+                       return fmt.Errorf("unsupported volume type %+q", typeIn)
+               }
+               data, err := json.Marshal(mapIn)
+               if err != nil {
+                       return err
+               }
+               vol := factory()
+               err = json.Unmarshal(data, vol)
+               if err != nil {
+                       return err
+               }
+               *vl = append(*vl, vol)
+       }
+       return nil
+}
+
+// MarshalJSON adds a "Type" field to each volume corresponding to its
+// Type().
+func (vl *VolumeList) MarshalJSON() ([]byte, error) {
+       data := []byte{'['}
+       for _, vs := range *vl {
+               j, err := json.Marshal(vs)
+               if err != nil {
+                       return nil, err
+               }
+               if len(data) > 1 {
+                       data = append(data, byte(','))
+               }
+               t, err := json.Marshal(vs.Type())
+               if err != nil {
+                       panic(err)
+               }
+               data = append(data, j[0])
+               data = append(data, []byte(`"Type":`)...)
+               data = append(data, t...)
+               data = append(data, byte(','))
+               data = append(data, j[1:]...)
+       }
+       return append(data, byte(']')), nil
+}
diff --git a/services/keepstore/config_test.go b/services/keepstore/config_test.go
new file mode 100644 (file)
index 0000000..e3b0ffc
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "github.com/sirupsen/logrus"
+)
+
+func init() {
+       log.Level = logrus.DebugLevel
+       theConfig.debugLogf = log.Printf
+}
diff --git a/services/keepstore/count.go b/services/keepstore/count.go
new file mode 100644 (file)
index 0000000..ccba4f1
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "io"
+)
+
+func NewCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
+       return &countingReadWriter{
+               writer:  w,
+               counter: f,
+       }
+}
+
+func NewCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
+       return &countingReadWriter{
+               reader:  r,
+               counter: f,
+       }
+}
+
+type countingReadWriter struct {
+       reader  io.Reader
+       writer  io.Writer
+       counter func(uint64)
+}
+
+func (crw *countingReadWriter) Read(buf []byte) (int, error) {
+       n, err := crw.reader.Read(buf)
+       crw.counter(uint64(n))
+       return n, err
+}
+
+func (crw *countingReadWriter) Write(buf []byte) (int, error) {
+       n, err := crw.writer.Write(buf)
+       crw.counter(uint64(n))
+       return n, err
+}
+
+func (crw *countingReadWriter) Close() error {
+       if c, ok := crw.writer.(io.Closer); ok {
+               return c.Close()
+       }
+       return nil
+}
diff --git a/services/keepstore/deprecated.go b/services/keepstore/deprecated.go
new file mode 100644 (file)
index 0000000..d137797
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type deprecatedOptions struct {
+       flagSerializeIO     bool
+       flagReadonly        bool
+       neverDelete         bool
+       signatureTTLSeconds int
+}
+
+var deprecated = deprecatedOptions{
+       neverDelete:         !theConfig.EnableDelete,
+       signatureTTLSeconds: int(theConfig.BlobSignatureTTL.Duration() / time.Second),
+}
+
+func (depr *deprecatedOptions) beforeFlagParse(cfg *Config) {
+       flag.StringVar(&cfg.Listen, "listen", cfg.Listen, "see Listen configuration")
+       flag.IntVar(&cfg.MaxBuffers, "max-buffers", cfg.MaxBuffers, "see MaxBuffers configuration")
+       flag.IntVar(&cfg.MaxRequests, "max-requests", cfg.MaxRequests, "see MaxRequests configuration")
+       flag.BoolVar(&depr.neverDelete, "never-delete", depr.neverDelete, "see EnableDelete configuration")
+       flag.BoolVar(&cfg.RequireSignatures, "enforce-permissions", cfg.RequireSignatures, "see RequireSignatures configuration")
+       flag.StringVar(&cfg.BlobSigningKeyFile, "permission-key-file", cfg.BlobSigningKeyFile, "see BlobSigningKey`File` configuration")
+       flag.StringVar(&cfg.BlobSigningKeyFile, "blob-signing-key-file", cfg.BlobSigningKeyFile, "see BlobSigningKey`File` configuration")
+       flag.StringVar(&cfg.SystemAuthTokenFile, "data-manager-token-file", cfg.SystemAuthTokenFile, "see SystemAuthToken`File` configuration")
+       flag.IntVar(&depr.signatureTTLSeconds, "permission-ttl", depr.signatureTTLSeconds, "signature TTL in seconds; see BlobSignatureTTL configuration")
+       flag.IntVar(&depr.signatureTTLSeconds, "blob-signature-ttl", depr.signatureTTLSeconds, "signature TTL in seconds; see BlobSignatureTTL configuration")
+       flag.Var(&cfg.TrashLifetime, "trash-lifetime", "see TrashLifetime configuration")
+       flag.BoolVar(&depr.flagSerializeIO, "serialize", depr.flagSerializeIO, "serialize read and write operations on the following volumes.")
+       flag.BoolVar(&depr.flagReadonly, "readonly", depr.flagReadonly, "do not write, delete, or touch anything on the following volumes.")
+       flag.StringVar(&cfg.PIDFile, "pid", cfg.PIDFile, "see `PIDFile` configuration")
+       flag.Var(&cfg.TrashCheckInterval, "trash-check-interval", "see TrashCheckInterval configuration")
+}
+
+func (depr *deprecatedOptions) afterFlagParse(cfg *Config) {
+       cfg.BlobSignatureTTL = arvados.Duration(depr.signatureTTLSeconds) * arvados.Duration(time.Second)
+       cfg.EnableDelete = !depr.neverDelete
+}
diff --git a/services/keepstore/gocheck_test.go b/services/keepstore/gocheck_test.go
new file mode 100644 (file)
index 0000000..89d680f
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "gopkg.in/check.v1"
+       "testing"
+)
+
+func TestGocheck(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go
new file mode 100644 (file)
index 0000000..ad907ef
--- /dev/null
@@ -0,0 +1,1196 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Tests for Keep HTTP handlers:
+//
+//     GetBlockHandler
+//     PutBlockHandler
+//     IndexHandler
+//
+// The HTTP handlers are responsible for enforcing permission policy,
+// so these tests must exercise all possible permission permutations.
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "regexp"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+var testCluster = &arvados.Cluster{
+       ClusterID: "zzzzz",
+}
+
+// A RequestTester represents the parameters for an HTTP request to
+// be issued on behalf of a unit test.
+type RequestTester struct {
+       uri         string
+       apiToken    string
+       method      string
+       requestBody []byte
+}
+
+// Test GetBlockHandler on the following situations:
+//   - permissions off, unauthenticated request, unsigned locator
+//   - permissions on, authenticated request, signed locator
+//   - permissions on, authenticated request, unsigned locator
+//   - permissions on, unauthenticated request, signed locator
+//   - permissions on, authenticated request, expired locator
+//   - permissions on, authenticated request, signed locator, transient error from backend
+//
+func TestGetHandler(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes. Our block is stored on the second volume.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllWritable()
+       if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
+               t.Error(err)
+       }
+
+       // Create locators for testing.
+       // Turn on permission settings so we can generate signed locators.
+       theConfig.RequireSignatures = true
+       theConfig.blobSigningKey = []byte(knownKey)
+       theConfig.BlobSignatureTTL.Set("5m")
+
+       var (
+               unsignedLocator  = "/" + TestHash
+               validTimestamp   = time.Now().Add(theConfig.BlobSignatureTTL.Duration())
+               expiredTimestamp = time.Now().Add(-time.Hour)
+               signedLocator    = "/" + SignLocator(TestHash, knownToken, validTimestamp)
+               expiredLocator   = "/" + SignLocator(TestHash, knownToken, expiredTimestamp)
+       )
+
+       // -----------------
+       // Test unauthenticated request with permissions off.
+       theConfig.RequireSignatures = false
+
+       // Unauthenticated request, unsigned locator
+       // => OK
+       response := IssueRequest(
+               &RequestTester{
+                       method: "GET",
+                       uri:    unsignedLocator,
+               })
+       ExpectStatusCode(t,
+               "Unauthenticated request, unsigned locator", http.StatusOK, response)
+       ExpectBody(t,
+               "Unauthenticated request, unsigned locator",
+               string(TestBlock),
+               response)
+
+       receivedLen := response.Header().Get("Content-Length")
+       expectedLen := fmt.Sprintf("%d", len(TestBlock))
+       if receivedLen != expectedLen {
+               t.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
+       }
+
+       // ----------------
+       // Permissions: on.
+       theConfig.RequireSignatures = true
+
+       // Authenticated request, signed locator
+       // => OK
+       response = IssueRequest(&RequestTester{
+               method:   "GET",
+               uri:      signedLocator,
+               apiToken: knownToken,
+       })
+       ExpectStatusCode(t,
+               "Authenticated request, signed locator", http.StatusOK, response)
+       ExpectBody(t,
+               "Authenticated request, signed locator", string(TestBlock), response)
+
+       receivedLen = response.Header().Get("Content-Length")
+       expectedLen = fmt.Sprintf("%d", len(TestBlock))
+       if receivedLen != expectedLen {
+               t.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
+       }
+
+       // Authenticated request, unsigned locator
+       // => PermissionError
+       response = IssueRequest(&RequestTester{
+               method:   "GET",
+               uri:      unsignedLocator,
+               apiToken: knownToken,
+       })
+       ExpectStatusCode(t, "unsigned locator", PermissionError.HTTPCode, response)
+
+       // Unauthenticated request, signed locator
+       // => PermissionError
+       response = IssueRequest(&RequestTester{
+               method: "GET",
+               uri:    signedLocator,
+       })
+       ExpectStatusCode(t,
+               "Unauthenticated request, signed locator",
+               PermissionError.HTTPCode, response)
+
+       // Authenticated request, expired locator
+       // => ExpiredError
+       response = IssueRequest(&RequestTester{
+               method:   "GET",
+               uri:      expiredLocator,
+               apiToken: knownToken,
+       })
+       ExpectStatusCode(t,
+               "Authenticated request, expired locator",
+               ExpiredError.HTTPCode, response)
+
+       // Authenticated request, signed locator
+       // => 503 Server busy (transient error)
+
+       // Set up the block owning volume to respond with errors
+       vols[0].(*MockVolume).Bad = true
+       vols[0].(*MockVolume).BadVolumeError = VolumeBusyError
+       response = IssueRequest(&RequestTester{
+               method:   "GET",
+               uri:      signedLocator,
+               apiToken: knownToken,
+       })
+       // A transient error from one volume while the other doesn't find the block
+       // should make the service return a 503 so that clients can retry.
+       ExpectStatusCode(t,
+               "Volume backend busy",
+               503, response)
+}
+
+// Test PutBlockHandler on the following situations:
+//   - no server key
+//   - with server key, authenticated request, unsigned locator
+//   - with server key, unauthenticated request, unsigned locator
+//
+func TestPutHandler(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // --------------
+       // No server key.
+
+       // Unauthenticated request, no server key
+       // => OK (unsigned response)
+       unsignedLocator := "/" + TestHash
+       response := IssueRequest(
+               &RequestTester{
+                       method:      "PUT",
+                       uri:         unsignedLocator,
+                       requestBody: TestBlock,
+               })
+
+       ExpectStatusCode(t,
+               "Unauthenticated request, no server key", http.StatusOK, response)
+       ExpectBody(t,
+               "Unauthenticated request, no server key",
+               TestHashPutResp, response)
+
+       // ------------------
+       // With a server key.
+
+       theConfig.blobSigningKey = []byte(knownKey)
+       theConfig.BlobSignatureTTL.Set("5m")
+
+       // When a permission key is available, the locator returned
+       // from an authenticated PUT request will be signed.
+
+       // Authenticated PUT, signed locator
+       // => OK (signed response)
+       response = IssueRequest(
+               &RequestTester{
+                       method:      "PUT",
+                       uri:         unsignedLocator,
+                       requestBody: TestBlock,
+                       apiToken:    knownToken,
+               })
+
+       ExpectStatusCode(t,
+               "Authenticated PUT, signed locator, with server key",
+               http.StatusOK, response)
+       responseLocator := strings.TrimSpace(response.Body.String())
+       if VerifySignature(responseLocator, knownToken) != nil {
+               t.Errorf("Authenticated PUT, signed locator, with server key:\n"+
+                       "response '%s' does not contain a valid signature",
+                       responseLocator)
+       }
+
+       // Unauthenticated PUT, unsigned locator
+       // => OK
+       response = IssueRequest(
+               &RequestTester{
+                       method:      "PUT",
+                       uri:         unsignedLocator,
+                       requestBody: TestBlock,
+               })
+
+       ExpectStatusCode(t,
+               "Unauthenticated PUT, unsigned locator, with server key",
+               http.StatusOK, response)
+       ExpectBody(t,
+               "Unauthenticated PUT, unsigned locator, with server key",
+               TestHashPutResp, response)
+}
+
+func TestPutAndDeleteSkipReadonlyVolumes(t *testing.T) {
+       defer teardown()
+       theConfig.systemAuthToken = "fake-data-manager-token"
+       vols := []*MockVolume{CreateMockVolume(), CreateMockVolume()}
+       vols[0].Readonly = true
+       KeepVM = MakeRRVolumeManager([]Volume{vols[0], vols[1]})
+       defer KeepVM.Close()
+       IssueRequest(
+               &RequestTester{
+                       method:      "PUT",
+                       uri:         "/" + TestHash,
+                       requestBody: TestBlock,
+               })
+       defer func(orig bool) {
+               theConfig.EnableDelete = orig
+       }(theConfig.EnableDelete)
+       theConfig.EnableDelete = true
+       IssueRequest(
+               &RequestTester{
+                       method:      "DELETE",
+                       uri:         "/" + TestHash,
+                       requestBody: TestBlock,
+                       apiToken:    theConfig.systemAuthToken,
+               })
+       type expect struct {
+               volnum    int
+               method    string
+               callcount int
+       }
+       for _, e := range []expect{
+               {0, "Get", 0},
+               {0, "Compare", 0},
+               {0, "Touch", 0},
+               {0, "Put", 0},
+               {0, "Delete", 0},
+               {1, "Get", 0},
+               {1, "Compare", 1},
+               {1, "Touch", 1},
+               {1, "Put", 1},
+               {1, "Delete", 1},
+       } {
+               if calls := vols[e.volnum].CallCount(e.method); calls != e.callcount {
+                       t.Errorf("Got %d %s() on vol %d, expect %d", calls, e.method, e.volnum, e.callcount)
+               }
+       }
+}
+
+// Test /index requests:
+//   - unauthenticated /index request
+//   - unauthenticated /index/prefix request
+//   - authenticated   /index request        | non-superuser
+//   - authenticated   /index/prefix request | non-superuser
+//   - authenticated   /index request        | superuser
+//   - authenticated   /index/prefix request | superuser
+//
+// The only /index requests that should succeed are those issued by the
+// superuser. They should pass regardless of the value of RequireSignatures.
+//
+func TestIndexHandler(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes and populate them.
+       // Include multiple blocks on different volumes, and
+       // some metadata files (which should be omitted from index listings)
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       vols[1].Put(context.Background(), TestHash2, TestBlock2)
+       vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
+       vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
+
+       theConfig.systemAuthToken = "DATA MANAGER TOKEN"
+
+       unauthenticatedReq := &RequestTester{
+               method: "GET",
+               uri:    "/index",
+       }
+       authenticatedReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index",
+               apiToken: knownToken,
+       }
+       superuserReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index",
+               apiToken: theConfig.systemAuthToken,
+       }
+       unauthPrefixReq := &RequestTester{
+               method: "GET",
+               uri:    "/index/" + TestHash[0:3],
+       }
+       authPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/" + TestHash[0:3],
+               apiToken: knownToken,
+       }
+       superuserPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/" + TestHash[0:3],
+               apiToken: theConfig.systemAuthToken,
+       }
+       superuserNoSuchPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/abcd",
+               apiToken: theConfig.systemAuthToken,
+       }
+       superuserInvalidPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/xyz",
+               apiToken: theConfig.systemAuthToken,
+       }
+
+       // -------------------------------------------------------------
+       // Only the superuser should be allowed to issue /index requests.
+
+       // ---------------------------
+       // RequireSignatures enabled
+       // This setting should not affect tests passing.
+       theConfig.RequireSignatures = true
+
+       // unauthenticated /index request
+       // => UnauthorizedError
+       response := IssueRequest(unauthenticatedReq)
+       ExpectStatusCode(t,
+               "RequireSignatures on, unauthenticated request",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // unauthenticated /index/prefix request
+       // => UnauthorizedError
+       response = IssueRequest(unauthPrefixReq)
+       ExpectStatusCode(t,
+               "permissions on, unauthenticated /index/prefix request",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // authenticated /index request, non-superuser
+       // => UnauthorizedError
+       response = IssueRequest(authenticatedReq)
+       ExpectStatusCode(t,
+               "permissions on, authenticated request, non-superuser",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // authenticated /index/prefix request, non-superuser
+       // => UnauthorizedError
+       response = IssueRequest(authPrefixReq)
+       ExpectStatusCode(t,
+               "permissions on, authenticated /index/prefix request, non-superuser",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // superuser /index request
+       // => OK
+       response = IssueRequest(superuserReq)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       // ----------------------------
+       // RequireSignatures disabled
+       // Valid Request should still pass.
+       theConfig.RequireSignatures = false
+
+       // superuser /index request
+       // => OK
+       response = IssueRequest(superuserReq)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       expected := `^` + TestHash + `\+\d+ \d+\n` +
+               TestHash2 + `\+\d+ \d+\n\n$`
+       match, _ := regexp.MatchString(expected, response.Body.String())
+       if !match {
+               t.Errorf(
+                       "permissions on, superuser request: expected %s, got:\n%s",
+                       expected, response.Body.String())
+       }
+
+       // superuser /index/prefix request
+       // => OK
+       response = IssueRequest(superuserPrefixReq)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       expected = `^` + TestHash + `\+\d+ \d+\n\n$`
+       match, _ = regexp.MatchString(expected, response.Body.String())
+       if !match {
+               t.Errorf(
+                       "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
+                       expected, response.Body.String())
+       }
+
+       // superuser /index/{no-such-prefix} request
+       // => OK
+       response = IssueRequest(superuserNoSuchPrefixReq)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       if "\n" != response.Body.String() {
+               t.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
+       }
+
+       // superuser /index/{invalid-prefix} request
+       // => StatusBadRequest
+       response = IssueRequest(superuserInvalidPrefixReq)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusBadRequest,
+               response)
+}
+
+// TestDeleteHandler
+//
+// Cases tested:
+//
+//   With no token and with a non-data-manager token:
+//   * Delete existing block
+//     (test for 403 Forbidden, confirm block not deleted)
+//
+//   With data manager token:
+//
+//   * Delete existing block
+//     (test for 200 OK, response counts, confirm block deleted)
+//
+//   * Delete nonexistent block
+//     (test for 200 OK, response counts)
+//
+//   TODO(twp):
+//
+//   * Delete block on read-only and read-write volume
+//     (test for 200 OK, response with copies_deleted=1,
+//     copies_failed=1, confirm block deleted only on r/w volume)
+//
+//   * Delete block on read-only volume only
+//     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
+//     confirm block not deleted)
+//
+func TestDeleteHandler(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes and populate them.
+       // Include multiple blocks on different volumes, and
+       // some metadata files (which should be omitted from index listings)
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+
+       // Explicitly set the BlobSignatureTTL to 0 for these
+       // tests, to ensure the MockVolume deletes the blocks
+       // even though they have just been created.
+       theConfig.BlobSignatureTTL = arvados.Duration(0)
+
+       var userToken = "NOT DATA MANAGER TOKEN"
+       theConfig.systemAuthToken = "DATA MANAGER TOKEN"
+
+       theConfig.EnableDelete = true
+
+       unauthReq := &RequestTester{
+               method: "DELETE",
+               uri:    "/" + TestHash,
+       }
+
+       userReq := &RequestTester{
+               method:   "DELETE",
+               uri:      "/" + TestHash,
+               apiToken: userToken,
+       }
+
+       superuserExistingBlockReq := &RequestTester{
+               method:   "DELETE",
+               uri:      "/" + TestHash,
+               apiToken: theConfig.systemAuthToken,
+       }
+
+       superuserNonexistentBlockReq := &RequestTester{
+               method:   "DELETE",
+               uri:      "/" + TestHash2,
+               apiToken: theConfig.systemAuthToken,
+       }
+
+       // Unauthenticated request returns PermissionError.
+       var response *httptest.ResponseRecorder
+       response = IssueRequest(unauthReq)
+       ExpectStatusCode(t,
+               "unauthenticated request",
+               PermissionError.HTTPCode,
+               response)
+
+       // Authenticated non-admin request returns PermissionError.
+       response = IssueRequest(userReq)
+       ExpectStatusCode(t,
+               "authenticated non-admin request",
+               PermissionError.HTTPCode,
+               response)
+
+       // Authenticated admin request for nonexistent block.
+       type deletecounter struct {
+               Deleted int `json:"copies_deleted"`
+               Failed  int `json:"copies_failed"`
+       }
+       var responseDc, expectedDc deletecounter
+
+       response = IssueRequest(superuserNonexistentBlockReq)
+       ExpectStatusCode(t,
+               "data manager request, nonexistent block",
+               http.StatusNotFound,
+               response)
+
+       // Authenticated admin request for existing block while EnableDelete is false.
+       theConfig.EnableDelete = false
+       response = IssueRequest(superuserExistingBlockReq)
+       ExpectStatusCode(t,
+               "authenticated request, existing block, method disabled",
+               MethodDisabledError.HTTPCode,
+               response)
+       theConfig.EnableDelete = true
+
+       // Authenticated admin request for existing block.
+       response = IssueRequest(superuserExistingBlockReq)
+       ExpectStatusCode(t,
+               "data manager request, existing block",
+               http.StatusOK,
+               response)
+       // Expect response {"copies_deleted":1,"copies_failed":0}
+       expectedDc = deletecounter{1, 0}
+       json.NewDecoder(response.Body).Decode(&responseDc)
+       if responseDc != expectedDc {
+               t.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
+                       expectedDc, responseDc)
+       }
+       // Confirm the block has been deleted
+       buf := make([]byte, BlockSize)
+       _, err := vols[0].Get(context.Background(), TestHash, buf)
+       var blockDeleted = os.IsNotExist(err)
+       if !blockDeleted {
+               t.Error("superuserExistingBlockReq: block not deleted")
+       }
+
+       // A DELETE request on a block newer than BlobSignatureTTL
+       // should return success but leave the block on the volume.
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       theConfig.BlobSignatureTTL = arvados.Duration(time.Hour)
+
+       response = IssueRequest(superuserExistingBlockReq)
+       ExpectStatusCode(t,
+               "data manager request, existing block",
+               http.StatusOK,
+               response)
+       // Expect response {"copies_deleted":1,"copies_failed":0}
+       expectedDc = deletecounter{1, 0}
+       json.NewDecoder(response.Body).Decode(&responseDc)
+       if responseDc != expectedDc {
+               t.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
+                       expectedDc, responseDc)
+       }
+       // Confirm the block has NOT been deleted.
+       _, err = vols[0].Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Errorf("testing delete on new block: %s\n", err)
+       }
+}
+
+// TestPullHandler
+//
+// Test handling of the PUT /pull statement.
+//
+// Cases tested: syntactically valid and invalid pull lists, from the
+// data manager and from unprivileged users:
+//
+//   1. Valid pull list from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   2. Invalid pull request from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   3. Valid pull request from the data manager
+//      (expected result: 200 OK with request body "Received 3 pull
+//      requests"
+//
+//   4. Invalid pull request from the data manager
+//      (expected result: 400 Bad Request)
+//
+// Test that in the end, the pull manager received a good pull list with
+// the expected number of requests.
+//
+// TODO(twp): test concurrency: launch 100 goroutines to update the
+// pull list simultaneously.  Make sure that none of them return 400
+// Bad Request and that pullq.GetList() returns a valid list.
+//
+func TestPullHandler(t *testing.T) {
+       defer teardown()
+
+       var userToken = "USER TOKEN"
+       theConfig.systemAuthToken = "DATA MANAGER TOKEN"
+
+       pullq = NewWorkQueue()
+
+       goodJSON := []byte(`[
+               {
+                       "locator":"locator_with_two_servers",
+                       "servers":[
+                               "server1",
+                               "server2"
+                       ]
+               },
+               {
+                       "locator":"locator_with_no_servers",
+                       "servers":[]
+               },
+               {
+                       "locator":"",
+                       "servers":["empty_locator"]
+               }
+       ]`)
+
+       badJSON := []byte(`{ "key":"I'm a little teapot" }`)
+
+       type pullTest struct {
+               name         string
+               req          RequestTester
+               responseCode int
+               responseBody string
+       }
+       var testcases = []pullTest{
+               {
+                       "Valid pull list from an ordinary user",
+                       RequestTester{"/pull", userToken, "PUT", goodJSON},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Invalid pull request from an ordinary user",
+                       RequestTester{"/pull", userToken, "PUT", badJSON},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Valid pull request from the data manager",
+                       RequestTester{"/pull", theConfig.systemAuthToken, "PUT", goodJSON},
+                       http.StatusOK,
+                       "Received 3 pull requests\n",
+               },
+               {
+                       "Invalid pull request from the data manager",
+                       RequestTester{"/pull", theConfig.systemAuthToken, "PUT", badJSON},
+                       http.StatusBadRequest,
+                       "",
+               },
+       }
+
+       for _, tst := range testcases {
+               response := IssueRequest(&tst.req)
+               ExpectStatusCode(t, tst.name, tst.responseCode, response)
+               ExpectBody(t, tst.name, tst.responseBody, response)
+       }
+
+       // The Keep pull manager should have received one good list with 3
+       // requests on it.
+       for i := 0; i < 3; i++ {
+               item := <-pullq.NextItem
+               if _, ok := item.(PullRequest); !ok {
+                       t.Errorf("item %v could not be parsed as a PullRequest", item)
+               }
+       }
+
+       expectChannelEmpty(t, pullq.NextItem)
+}
+
+// TestTrashHandler
+//
+// Test cases:
+//
+// Cases tested: syntactically valid and invalid trash lists, from the
+// data manager and from unprivileged users:
+//
+//   1. Valid trash list from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   2. Invalid trash list from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   3. Valid trash list from the data manager
+//      (expected result: 200 OK with request body "Received 3 trash
+//      requests"
+//
+//   4. Invalid trash list from the data manager
+//      (expected result: 400 Bad Request)
+//
+// Test that in the end, the trash collector received a good list
+// trash list with the expected number of requests.
+//
+// TODO(twp): test concurrency: launch 100 goroutines to update the
+// pull list simultaneously.  Make sure that none of them return 400
+// Bad Request and that replica.Dump() returns a valid list.
+//
+func TestTrashHandler(t *testing.T) {
+       defer teardown()
+
+       var userToken = "USER TOKEN"
+       theConfig.systemAuthToken = "DATA MANAGER TOKEN"
+
+       trashq = NewWorkQueue()
+
+       goodJSON := []byte(`[
+               {
+                       "locator":"block1",
+                       "block_mtime":1409082153
+               },
+               {
+                       "locator":"block2",
+                       "block_mtime":1409082153
+               },
+               {
+                       "locator":"block3",
+                       "block_mtime":1409082153
+               }
+       ]`)
+
+       badJSON := []byte(`I am not a valid JSON string`)
+
+       type trashTest struct {
+               name         string
+               req          RequestTester
+               responseCode int
+               responseBody string
+       }
+
+       var testcases = []trashTest{
+               {
+                       "Valid trash list from an ordinary user",
+                       RequestTester{"/trash", userToken, "PUT", goodJSON},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Invalid trash list from an ordinary user",
+                       RequestTester{"/trash", userToken, "PUT", badJSON},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Valid trash list from the data manager",
+                       RequestTester{"/trash", theConfig.systemAuthToken, "PUT", goodJSON},
+                       http.StatusOK,
+                       "Received 3 trash requests\n",
+               },
+               {
+                       "Invalid trash list from the data manager",
+                       RequestTester{"/trash", theConfig.systemAuthToken, "PUT", badJSON},
+                       http.StatusBadRequest,
+                       "",
+               },
+       }
+
+       for _, tst := range testcases {
+               response := IssueRequest(&tst.req)
+               ExpectStatusCode(t, tst.name, tst.responseCode, response)
+               ExpectBody(t, tst.name, tst.responseBody, response)
+       }
+
+       // The trash collector should have received one good list with 3
+       // requests on it.
+       for i := 0; i < 3; i++ {
+               item := <-trashq.NextItem
+               if _, ok := item.(TrashRequest); !ok {
+                       t.Errorf("item %v could not be parsed as a TrashRequest", item)
+               }
+       }
+
+       expectChannelEmpty(t, trashq.NextItem)
+}
+
+// ====================
+// Helper functions
+// ====================
+
+// IssueTestRequest executes an HTTP request described by rt, to a
+// REST router.  It returns the HTTP response to the request.
+func IssueRequest(rt *RequestTester) *httptest.ResponseRecorder {
+       response := httptest.NewRecorder()
+       body := bytes.NewReader(rt.requestBody)
+       req, _ := http.NewRequest(rt.method, rt.uri, body)
+       if rt.apiToken != "" {
+               req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
+       }
+       loggingRouter := MakeRESTRouter(testCluster, prometheus.NewRegistry())
+       loggingRouter.ServeHTTP(response, req)
+       return response
+}
+
+func IssueHealthCheckRequest(rt *RequestTester) *httptest.ResponseRecorder {
+       response := httptest.NewRecorder()
+       body := bytes.NewReader(rt.requestBody)
+       req, _ := http.NewRequest(rt.method, rt.uri, body)
+       if rt.apiToken != "" {
+               req.Header.Set("Authorization", "Bearer "+rt.apiToken)
+       }
+       loggingRouter := MakeRESTRouter(testCluster, prometheus.NewRegistry())
+       loggingRouter.ServeHTTP(response, req)
+       return response
+}
+
+// ExpectStatusCode checks whether a response has the specified status code,
+// and reports a test failure if not.
+func ExpectStatusCode(
+       t *testing.T,
+       testname string,
+       expectedStatus int,
+       response *httptest.ResponseRecorder) {
+       if response.Code != expectedStatus {
+               t.Errorf("%s: expected status %d, got %+v",
+                       testname, expectedStatus, response)
+       }
+}
+
+func ExpectBody(
+       t *testing.T,
+       testname string,
+       expectedBody string,
+       response *httptest.ResponseRecorder) {
+       if expectedBody != "" && response.Body.String() != expectedBody {
+               t.Errorf("%s: expected response body '%s', got %+v",
+                       testname, expectedBody, response)
+       }
+}
+
+// See #7121
+func TestPutNeedsOnlyOneBuffer(t *testing.T) {
+       defer teardown()
+       KeepVM = MakeTestVolumeManager(1)
+       defer KeepVM.Close()
+
+       defer func(orig *bufferPool) {
+               bufs = orig
+       }(bufs)
+       bufs = newBufferPool(1, BlockSize)
+
+       ok := make(chan struct{})
+       go func() {
+               for i := 0; i < 2; i++ {
+                       response := IssueRequest(
+                               &RequestTester{
+                                       method:      "PUT",
+                                       uri:         "/" + TestHash,
+                                       requestBody: TestBlock,
+                               })
+                       ExpectStatusCode(t,
+                               "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
+               }
+               ok <- struct{}{}
+       }()
+
+       select {
+       case <-ok:
+       case <-time.After(time.Second):
+               t.Fatal("PUT deadlocks with MaxBuffers==1")
+       }
+}
+
+// Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
+// leak.
+func TestPutHandlerNoBufferleak(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       ok := make(chan bool)
+       go func() {
+               for i := 0; i < theConfig.MaxBuffers+1; i++ {
+                       // Unauthenticated request, no server key
+                       // => OK (unsigned response)
+                       unsignedLocator := "/" + TestHash
+                       response := IssueRequest(
+                               &RequestTester{
+                                       method:      "PUT",
+                                       uri:         unsignedLocator,
+                                       requestBody: TestBlock,
+                               })
+                       ExpectStatusCode(t,
+                               "TestPutHandlerBufferleak", http.StatusOK, response)
+                       ExpectBody(t,
+                               "TestPutHandlerBufferleak",
+                               TestHashPutResp, response)
+               }
+               ok <- true
+       }()
+       select {
+       case <-time.After(20 * time.Second):
+               // If the buffer pool leaks, the test goroutine hangs.
+               t.Fatal("test did not finish, assuming pool leaked")
+       case <-ok:
+       }
+}
+
+type notifyingResponseRecorder struct {
+       *httptest.ResponseRecorder
+       closer chan bool
+}
+
+func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
+       return r.closer
+}
+
+func TestGetHandlerClientDisconnect(t *testing.T) {
+       defer func(was bool) {
+               theConfig.RequireSignatures = was
+       }(theConfig.RequireSignatures)
+       theConfig.RequireSignatures = false
+
+       defer func(orig *bufferPool) {
+               bufs = orig
+       }(bufs)
+       bufs = newBufferPool(1, BlockSize)
+       defer bufs.Put(bufs.Get(BlockSize))
+
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       if err := KeepVM.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
+               t.Error(err)
+       }
+
+       resp := &notifyingResponseRecorder{
+               ResponseRecorder: httptest.NewRecorder(),
+               closer:           make(chan bool, 1),
+       }
+       if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
+               t.Fatal("notifyingResponseRecorder is broken")
+       }
+       // If anyone asks, the client has disconnected.
+       resp.closer <- true
+
+       ok := make(chan struct{})
+       go func() {
+               req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
+               MakeRESTRouter(testCluster, prometheus.NewRegistry()).ServeHTTP(resp, req)
+               ok <- struct{}{}
+       }()
+
+       select {
+       case <-time.After(20 * time.Second):
+               t.Fatal("request took >20s, close notifier must be broken")
+       case <-ok:
+       }
+
+       ExpectStatusCode(t, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
+       for i, v := range KeepVM.AllWritable() {
+               if calls := v.(*MockVolume).called["GET"]; calls != 0 {
+                       t.Errorf("volume %d got %d calls, expected 0", i, calls)
+               }
+       }
+}
+
+// Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
+// leak.
+func TestGetHandlerNoBufferLeak(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes. Our block is stored on the second volume.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllWritable()
+       if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
+               t.Error(err)
+       }
+
+       ok := make(chan bool)
+       go func() {
+               for i := 0; i < theConfig.MaxBuffers+1; i++ {
+                       // Unauthenticated request, unsigned locator
+                       // => OK
+                       unsignedLocator := "/" + TestHash
+                       response := IssueRequest(
+                               &RequestTester{
+                                       method: "GET",
+                                       uri:    unsignedLocator,
+                               })
+                       ExpectStatusCode(t,
+                               "Unauthenticated request, unsigned locator", http.StatusOK, response)
+                       ExpectBody(t,
+                               "Unauthenticated request, unsigned locator",
+                               string(TestBlock),
+                               response)
+               }
+               ok <- true
+       }()
+       select {
+       case <-time.After(20 * time.Second):
+               // If the buffer pool leaks, the test goroutine hangs.
+               t.Fatal("test did not finish, assuming pool leaked")
+       case <-ok:
+       }
+}
+
+func TestPutReplicationHeader(t *testing.T) {
+       defer teardown()
+
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       resp := IssueRequest(&RequestTester{
+               method:      "PUT",
+               uri:         "/" + TestHash,
+               requestBody: TestBlock,
+       })
+       if r := resp.Header().Get("X-Keep-Replicas-Stored"); r != "1" {
+               t.Errorf("Got X-Keep-Replicas-Stored: %q, expected %q", r, "1")
+       }
+}
+
+func TestUntrashHandler(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+       vols := KeepVM.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+
+       theConfig.systemAuthToken = "DATA MANAGER TOKEN"
+
+       // unauthenticatedReq => UnauthorizedError
+       unauthenticatedReq := &RequestTester{
+               method: "PUT",
+               uri:    "/untrash/" + TestHash,
+       }
+       response := IssueRequest(unauthenticatedReq)
+       ExpectStatusCode(t,
+               "Unauthenticated request",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // notDataManagerReq => UnauthorizedError
+       notDataManagerReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/" + TestHash,
+               apiToken: knownToken,
+       }
+
+       response = IssueRequest(notDataManagerReq)
+       ExpectStatusCode(t,
+               "Non-datamanager token",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // datamanagerWithBadHashReq => StatusBadRequest
+       datamanagerWithBadHashReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/thisisnotalocator",
+               apiToken: theConfig.systemAuthToken,
+       }
+       response = IssueRequest(datamanagerWithBadHashReq)
+       ExpectStatusCode(t,
+               "Bad locator in untrash request",
+               http.StatusBadRequest,
+               response)
+
+       // datamanagerWrongMethodReq => StatusBadRequest
+       datamanagerWrongMethodReq := &RequestTester{
+               method:   "GET",
+               uri:      "/untrash/" + TestHash,
+               apiToken: theConfig.systemAuthToken,
+       }
+       response = IssueRequest(datamanagerWrongMethodReq)
+       ExpectStatusCode(t,
+               "Only PUT method is supported for untrash",
+               http.StatusMethodNotAllowed,
+               response)
+
+       // datamanagerReq => StatusOK
+       datamanagerReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/" + TestHash,
+               apiToken: theConfig.systemAuthToken,
+       }
+       response = IssueRequest(datamanagerReq)
+       ExpectStatusCode(t,
+               "",
+               http.StatusOK,
+               response)
+       expected := "Successfully untrashed on: [MockVolume],[MockVolume]"
+       if response.Body.String() != expected {
+               t.Errorf(
+                       "Untrash response mismatched: expected %s, got:\n%s",
+                       expected, response.Body.String())
+       }
+}
+
+func TestUntrashHandlerWithNoWritableVolumes(t *testing.T) {
+       defer teardown()
+
+       // Set up readonly Keep volumes
+       vols := []*MockVolume{CreateMockVolume(), CreateMockVolume()}
+       vols[0].Readonly = true
+       vols[1].Readonly = true
+       KeepVM = MakeRRVolumeManager([]Volume{vols[0], vols[1]})
+       defer KeepVM.Close()
+
+       theConfig.systemAuthToken = "DATA MANAGER TOKEN"
+
+       // datamanagerReq => StatusOK
+       datamanagerReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/" + TestHash,
+               apiToken: theConfig.systemAuthToken,
+       }
+       response := IssueRequest(datamanagerReq)
+       ExpectStatusCode(t,
+               "No writable volumes",
+               http.StatusNotFound,
+               response)
+}
+
+func TestHealthCheckPing(t *testing.T) {
+       theConfig.ManagementToken = arvadostest.ManagementToken
+       pingReq := &RequestTester{
+               method:   "GET",
+               uri:      "/_health/ping",
+               apiToken: arvadostest.ManagementToken,
+       }
+       response := IssueHealthCheckRequest(pingReq)
+       ExpectStatusCode(t,
+               "",
+               http.StatusOK,
+               response)
+       want := `{"health":"OK"}`
+       if !strings.Contains(response.Body.String(), want) {
+               t.Errorf("expected response to include %s: got %s", want, response.Body.String())
+       }
+}
diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
new file mode 100644 (file)
index 0000000..51dd73a
--- /dev/null
@@ -0,0 +1,890 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "container/list"
+       "context"
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+       "regexp"
+       "runtime"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/gorilla/mux"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+type router struct {
+       *mux.Router
+       limiter     httpserver.RequestCounter
+       cluster     *arvados.Cluster
+       remoteProxy remoteProxy
+       metrics     *nodeMetrics
+}
+
+// MakeRESTRouter returns a new router that forwards all Keep requests
+// to the appropriate handlers.
+func MakeRESTRouter(cluster *arvados.Cluster, reg *prometheus.Registry) http.Handler {
+       rtr := &router{
+               Router:  mux.NewRouter(),
+               cluster: cluster,
+               metrics: &nodeMetrics{reg: reg},
+       }
+
+       rtr.HandleFunc(
+               `/{hash:[0-9a-f]{32}}`, rtr.handleGET).Methods("GET", "HEAD")
+       rtr.HandleFunc(
+               `/{hash:[0-9a-f]{32}}+{hints}`,
+               rtr.handleGET).Methods("GET", "HEAD")
+
+       rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handlePUT).Methods("PUT")
+       rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, DeleteHandler).Methods("DELETE")
+       // List all blocks stored here. Privileged client only.
+       rtr.HandleFunc(`/index`, rtr.IndexHandler).Methods("GET", "HEAD")
+       // List blocks stored here whose hash has the given prefix.
+       // Privileged client only.
+       rtr.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, rtr.IndexHandler).Methods("GET", "HEAD")
+
+       // Internals/debugging info (runtime.MemStats)
+       rtr.HandleFunc(`/debug.json`, rtr.DebugHandler).Methods("GET", "HEAD")
+
+       // List volumes: path, device number, bytes used/avail.
+       rtr.HandleFunc(`/status.json`, rtr.StatusHandler).Methods("GET", "HEAD")
+
+       // List mounts: UUID, readonly, tier, device ID, ...
+       rtr.HandleFunc(`/mounts`, rtr.MountsHandler).Methods("GET")
+       rtr.HandleFunc(`/mounts/{uuid}/blocks`, rtr.IndexHandler).Methods("GET")
+       rtr.HandleFunc(`/mounts/{uuid}/blocks/`, rtr.IndexHandler).Methods("GET")
+
+       // Replace the current pull queue.
+       rtr.HandleFunc(`/pull`, PullHandler).Methods("PUT")
+
+       // Replace the current trash queue.
+       rtr.HandleFunc(`/trash`, TrashHandler).Methods("PUT")
+
+       // Untrash moves blocks from trash back into store
+       rtr.HandleFunc(`/untrash/{hash:[0-9a-f]{32}}`, UntrashHandler).Methods("PUT")
+
+       rtr.Handle("/_health/{check}", &health.Handler{
+               Token:  theConfig.ManagementToken,
+               Prefix: "/_health/",
+       }).Methods("GET")
+
+       // Any request which does not match any of these routes gets
+       // 400 Bad Request.
+       rtr.NotFoundHandler = http.HandlerFunc(BadRequestHandler)
+
+       rtr.limiter = httpserver.NewRequestLimiter(theConfig.MaxRequests, rtr)
+       rtr.metrics.setupBufferPoolMetrics(bufs)
+       rtr.metrics.setupWorkQueueMetrics(pullq, "pull")
+       rtr.metrics.setupWorkQueueMetrics(trashq, "trash")
+       rtr.metrics.setupRequestMetrics(rtr.limiter)
+
+       instrumented := httpserver.Instrument(rtr.metrics.reg, nil,
+               httpserver.AddRequestIDs(httpserver.LogRequests(nil, rtr.limiter)))
+       return instrumented.ServeAPI(theConfig.ManagementToken, instrumented)
+}
+
+// BadRequestHandler is a HandleFunc to address bad requests.
+func BadRequestHandler(w http.ResponseWriter, r *http.Request) {
+       http.Error(w, BadRequestError.Error(), BadRequestError.HTTPCode)
+}
+
+func (rtr *router) handleGET(resp http.ResponseWriter, req *http.Request) {
+       ctx, cancel := contextForResponse(context.TODO(), resp)
+       defer cancel()
+
+       locator := req.URL.Path[1:]
+       if strings.Contains(locator, "+R") && !strings.Contains(locator, "+A") {
+               rtr.remoteProxy.Get(ctx, resp, req, rtr.cluster)
+               return
+       }
+
+       if theConfig.RequireSignatures {
+               locator := req.URL.Path[1:] // strip leading slash
+               if err := VerifySignature(locator, GetAPIToken(req)); err != nil {
+                       http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
+                       return
+               }
+       }
+
+       // TODO: Probe volumes to check whether the block _might_
+       // exist. Some volumes/types could support a quick existence
+       // check without causing other operations to suffer. If all
+       // volumes support that, and assure us the block definitely
+       // isn't here, we can return 404 now instead of waiting for a
+       // buffer.
+
+       buf, err := getBufferWithContext(ctx, bufs, BlockSize)
+       if err != nil {
+               http.Error(resp, err.Error(), http.StatusServiceUnavailable)
+               return
+       }
+       defer bufs.Put(buf)
+
+       size, err := GetBlock(ctx, mux.Vars(req)["hash"], buf, resp)
+       if err != nil {
+               code := http.StatusInternalServerError
+               if err, ok := err.(*KeepError); ok {
+                       code = err.HTTPCode
+               }
+               http.Error(resp, err.Error(), code)
+               return
+       }
+
+       resp.Header().Set("Content-Length", strconv.Itoa(size))
+       resp.Header().Set("Content-Type", "application/octet-stream")
+       resp.Write(buf[:size])
+}
+
+// Return a new context that gets cancelled by resp's CloseNotifier.
+func contextForResponse(parent context.Context, resp http.ResponseWriter) (context.Context, context.CancelFunc) {
+       ctx, cancel := context.WithCancel(parent)
+       if cn, ok := resp.(http.CloseNotifier); ok {
+               go func(c <-chan bool) {
+                       select {
+                       case <-c:
+                               theConfig.debugLogf("cancel context")
+                               cancel()
+                       case <-ctx.Done():
+                       }
+               }(cn.CloseNotify())
+       }
+       return ctx, cancel
+}
+
+// Get a buffer from the pool -- but give up and return a non-nil
+// error if ctx ends before we get a buffer.
+func getBufferWithContext(ctx context.Context, bufs *bufferPool, bufSize int) ([]byte, error) {
+       bufReady := make(chan []byte)
+       go func() {
+               bufReady <- bufs.Get(bufSize)
+       }()
+       select {
+       case buf := <-bufReady:
+               return buf, nil
+       case <-ctx.Done():
+               go func() {
+                       // Even if closeNotifier happened first, we
+                       // need to keep waiting for our buf so we can
+                       // return it to the pool.
+                       bufs.Put(<-bufReady)
+               }()
+               return nil, ErrClientDisconnect
+       }
+}
+
+func (rtr *router) handlePUT(resp http.ResponseWriter, req *http.Request) {
+       ctx, cancel := contextForResponse(context.TODO(), resp)
+       defer cancel()
+
+       hash := mux.Vars(req)["hash"]
+
+       // Detect as many error conditions as possible before reading
+       // the body: avoid transmitting data that will not end up
+       // being written anyway.
+
+       if req.ContentLength == -1 {
+               http.Error(resp, SizeRequiredError.Error(), SizeRequiredError.HTTPCode)
+               return
+       }
+
+       if req.ContentLength > BlockSize {
+               http.Error(resp, TooLongError.Error(), TooLongError.HTTPCode)
+               return
+       }
+
+       if len(KeepVM.AllWritable()) == 0 {
+               http.Error(resp, FullError.Error(), FullError.HTTPCode)
+               return
+       }
+
+       buf, err := getBufferWithContext(ctx, bufs, int(req.ContentLength))
+       if err != nil {
+               http.Error(resp, err.Error(), http.StatusServiceUnavailable)
+               return
+       }
+
+       _, err = io.ReadFull(req.Body, buf)
+       if err != nil {
+               http.Error(resp, err.Error(), 500)
+               bufs.Put(buf)
+               return
+       }
+
+       replication, err := PutBlock(ctx, buf, hash)
+       bufs.Put(buf)
+
+       if err != nil {
+               code := http.StatusInternalServerError
+               if err, ok := err.(*KeepError); ok {
+                       code = err.HTTPCode
+               }
+               http.Error(resp, err.Error(), code)
+               return
+       }
+
+       // Success; add a size hint, sign the locator if possible, and
+       // return it to the client.
+       returnHash := fmt.Sprintf("%s+%d", hash, req.ContentLength)
+       apiToken := GetAPIToken(req)
+       if theConfig.blobSigningKey != nil && apiToken != "" {
+               expiry := time.Now().Add(theConfig.BlobSignatureTTL.Duration())
+               returnHash = SignLocator(returnHash, apiToken, expiry)
+       }
+       resp.Header().Set("X-Keep-Replicas-Stored", strconv.Itoa(replication))
+       resp.Write([]byte(returnHash + "\n"))
+}
+
+// IndexHandler responds to "/index", "/index/{prefix}", and
+// "/mounts/{uuid}/blocks" requests.
+func (rtr *router) IndexHandler(resp http.ResponseWriter, req *http.Request) {
+       if !IsSystemAuth(GetAPIToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       prefix := mux.Vars(req)["prefix"]
+       if prefix == "" {
+               req.ParseForm()
+               prefix = req.Form.Get("prefix")
+       }
+
+       uuid := mux.Vars(req)["uuid"]
+
+       var vols []Volume
+       if uuid == "" {
+               vols = KeepVM.AllReadable()
+       } else if v := KeepVM.Lookup(uuid, false); v == nil {
+               http.Error(resp, "mount not found", http.StatusNotFound)
+               return
+       } else {
+               vols = []Volume{v}
+       }
+
+       for _, v := range vols {
+               if err := v.IndexTo(prefix, resp); err != nil {
+                       // The only errors returned by IndexTo are
+                       // write errors returned by resp.Write(),
+                       // which probably means the client has
+                       // disconnected and this error will never be
+                       // reported to the client -- but it will
+                       // appear in our own error log.
+                       http.Error(resp, err.Error(), http.StatusInternalServerError)
+                       return
+               }
+       }
+       // An empty line at EOF is the only way the client can be
+       // assured the entire index was received.
+       resp.Write([]byte{'\n'})
+}
+
+// MountsHandler responds to "GET /mounts" requests.
+func (rtr *router) MountsHandler(resp http.ResponseWriter, req *http.Request) {
+       err := json.NewEncoder(resp).Encode(KeepVM.Mounts())
+       if err != nil {
+               http.Error(resp, err.Error(), http.StatusInternalServerError)
+       }
+}
+
+// PoolStatus struct
+type PoolStatus struct {
+       Alloc uint64 `json:"BytesAllocatedCumulative"`
+       Cap   int    `json:"BuffersMax"`
+       Len   int    `json:"BuffersInUse"`
+}
+
+type volumeStatusEnt struct {
+       Label         string
+       Status        *VolumeStatus `json:",omitempty"`
+       VolumeStats   *ioStats      `json:",omitempty"`
+       InternalStats interface{}   `json:",omitempty"`
+}
+
+// NodeStatus struct
+type NodeStatus struct {
+       Volumes         []*volumeStatusEnt
+       BufferPool      PoolStatus
+       PullQueue       WorkQueueStatus
+       TrashQueue      WorkQueueStatus
+       RequestsCurrent int
+       RequestsMax     int
+       Version         string
+}
+
+var st NodeStatus
+var stLock sync.Mutex
+
+// DebugHandler addresses /debug.json requests.
+func (rtr *router) DebugHandler(resp http.ResponseWriter, req *http.Request) {
+       type debugStats struct {
+               MemStats runtime.MemStats
+       }
+       var ds debugStats
+       runtime.ReadMemStats(&ds.MemStats)
+       err := json.NewEncoder(resp).Encode(&ds)
+       if err != nil {
+               http.Error(resp, err.Error(), 500)
+       }
+}
+
+// StatusHandler addresses /status.json requests.
+func (rtr *router) StatusHandler(resp http.ResponseWriter, req *http.Request) {
+       stLock.Lock()
+       rtr.readNodeStatus(&st)
+       jstat, err := json.Marshal(&st)
+       stLock.Unlock()
+       if err == nil {
+               resp.Write(jstat)
+       } else {
+               log.Printf("json.Marshal: %s", err)
+               log.Printf("NodeStatus = %v", &st)
+               http.Error(resp, err.Error(), 500)
+       }
+}
+
+// populate the given NodeStatus struct with current values.
+func (rtr *router) readNodeStatus(st *NodeStatus) {
+       st.Version = version
+       vols := KeepVM.AllReadable()
+       if cap(st.Volumes) < len(vols) {
+               st.Volumes = make([]*volumeStatusEnt, len(vols))
+       }
+       st.Volumes = st.Volumes[:0]
+       for _, vol := range vols {
+               var internalStats interface{}
+               if vol, ok := vol.(InternalStatser); ok {
+                       internalStats = vol.InternalStats()
+               }
+               st.Volumes = append(st.Volumes, &volumeStatusEnt{
+                       Label:         vol.String(),
+                       Status:        vol.Status(),
+                       InternalStats: internalStats,
+                       //VolumeStats: KeepVM.VolumeStats(vol),
+               })
+       }
+       st.BufferPool.Alloc = bufs.Alloc()
+       st.BufferPool.Cap = bufs.Cap()
+       st.BufferPool.Len = bufs.Len()
+       st.PullQueue = getWorkQueueStatus(pullq)
+       st.TrashQueue = getWorkQueueStatus(trashq)
+       if rtr.limiter != nil {
+               st.RequestsCurrent = rtr.limiter.Current()
+               st.RequestsMax = rtr.limiter.Max()
+       }
+}
+
+// return a WorkQueueStatus for the given queue. If q is nil (which
+// should never happen except in test suites), return a zero status
+// value instead of crashing.
+func getWorkQueueStatus(q *WorkQueue) WorkQueueStatus {
+       if q == nil {
+               // This should only happen during tests.
+               return WorkQueueStatus{}
+       }
+       return q.Status()
+}
+
+// DeleteHandler processes DELETE requests.
+//
+// DELETE /{hash:[0-9a-f]{32} will delete the block with the specified hash
+// from all connected volumes.
+//
+// Only the Data Manager, or an Arvados admin with scope "all", are
+// allowed to issue DELETE requests.  If a DELETE request is not
+// authenticated or is issued by a non-admin user, the server returns
+// a PermissionError.
+//
+// Upon receiving a valid request from an authorized user,
+// DeleteHandler deletes all copies of the specified block on local
+// writable volumes.
+//
+// Response format:
+//
+// If the requested blocks was not found on any volume, the response
+// code is HTTP 404 Not Found.
+//
+// Otherwise, the response code is 200 OK, with a response body
+// consisting of the JSON message
+//
+//    {"copies_deleted":d,"copies_failed":f}
+//
+// where d and f are integers representing the number of blocks that
+// were successfully and unsuccessfully deleted.
+//
+func DeleteHandler(resp http.ResponseWriter, req *http.Request) {
+       hash := mux.Vars(req)["hash"]
+
+       // Confirm that this user is an admin and has a token with unlimited scope.
+       var tok = GetAPIToken(req)
+       if tok == "" || !CanDelete(tok) {
+               http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+               return
+       }
+
+       if !theConfig.EnableDelete {
+               http.Error(resp, MethodDisabledError.Error(), MethodDisabledError.HTTPCode)
+               return
+       }
+
+       // Delete copies of this block from all available volumes.
+       // Report how many blocks were successfully deleted, and how
+       // many were found on writable volumes but not deleted.
+       var result struct {
+               Deleted int `json:"copies_deleted"`
+               Failed  int `json:"copies_failed"`
+       }
+       for _, vol := range KeepVM.AllWritable() {
+               if err := vol.Trash(hash); err == nil {
+                       result.Deleted++
+               } else if os.IsNotExist(err) {
+                       continue
+               } else {
+                       result.Failed++
+                       log.Println("DeleteHandler:", err)
+               }
+       }
+
+       var st int
+
+       if result.Deleted == 0 && result.Failed == 0 {
+               st = http.StatusNotFound
+       } else {
+               st = http.StatusOK
+       }
+
+       resp.WriteHeader(st)
+
+       if st == http.StatusOK {
+               if body, err := json.Marshal(result); err == nil {
+                       resp.Write(body)
+               } else {
+                       log.Printf("json.Marshal: %s (result = %v)", err, result)
+                       http.Error(resp, err.Error(), 500)
+               }
+       }
+}
+
+/* PullHandler processes "PUT /pull" requests for the data manager.
+   The request body is a JSON message containing a list of pull
+   requests in the following format:
+
+   [
+      {
+         "locator":"e4d909c290d0fb1ca068ffaddf22cbd0+4985",
+         "servers":[
+                       "keep0.qr1hi.arvadosapi.com:25107",
+                       "keep1.qr1hi.arvadosapi.com:25108"
+                ]
+         },
+         {
+                "locator":"55ae4d45d2db0793d53f03e805f656e5+658395",
+                "servers":[
+                       "10.0.1.5:25107",
+                       "10.0.1.6:25107",
+                       "10.0.1.7:25108"
+                ]
+         },
+         ...
+   ]
+
+   Each pull request in the list consists of a block locator string
+   and an ordered list of servers.  Keepstore should try to fetch the
+   block from each server in turn.
+
+   If the request has not been sent by the Data Manager, return 401
+   Unauthorized.
+
+   If the JSON unmarshalling fails, return 400 Bad Request.
+*/
+
+// PullRequest consists of a block locator and an ordered list of servers
+type PullRequest struct {
+       Locator string   `json:"locator"`
+       Servers []string `json:"servers"`
+
+       // Destination mount, or "" for "anywhere"
+       MountUUID string `json:"mount_uuid"`
+}
+
+// PullHandler processes "PUT /pull" requests for the data manager.
+func PullHandler(resp http.ResponseWriter, req *http.Request) {
+       // Reject unauthorized requests.
+       if !IsSystemAuth(GetAPIToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       // Parse the request body.
+       var pr []PullRequest
+       r := json.NewDecoder(req.Body)
+       if err := r.Decode(&pr); err != nil {
+               http.Error(resp, err.Error(), BadRequestError.HTTPCode)
+               return
+       }
+
+       // We have a properly formatted pull list sent from the data
+       // manager.  Report success and send the list to the pull list
+       // manager for further handling.
+       resp.WriteHeader(http.StatusOK)
+       resp.Write([]byte(
+               fmt.Sprintf("Received %d pull requests\n", len(pr))))
+
+       plist := list.New()
+       for _, p := range pr {
+               plist.PushBack(p)
+       }
+       pullq.ReplaceQueue(plist)
+}
+
+// TrashRequest consists of a block locator and its Mtime
+type TrashRequest struct {
+       Locator    string `json:"locator"`
+       BlockMtime int64  `json:"block_mtime"`
+
+       // Target mount, or "" for "everywhere"
+       MountUUID string `json:"mount_uuid"`
+}
+
+// TrashHandler processes /trash requests.
+func TrashHandler(resp http.ResponseWriter, req *http.Request) {
+       // Reject unauthorized requests.
+       if !IsSystemAuth(GetAPIToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       // Parse the request body.
+       var trash []TrashRequest
+       r := json.NewDecoder(req.Body)
+       if err := r.Decode(&trash); err != nil {
+               http.Error(resp, err.Error(), BadRequestError.HTTPCode)
+               return
+       }
+
+       // We have a properly formatted trash list sent from the data
+       // manager.  Report success and send the list to the trash work
+       // queue for further handling.
+       resp.WriteHeader(http.StatusOK)
+       resp.Write([]byte(
+               fmt.Sprintf("Received %d trash requests\n", len(trash))))
+
+       tlist := list.New()
+       for _, t := range trash {
+               tlist.PushBack(t)
+       }
+       trashq.ReplaceQueue(tlist)
+}
+
+// UntrashHandler processes "PUT /untrash/{hash:[0-9a-f]{32}}" requests for the data manager.
+func UntrashHandler(resp http.ResponseWriter, req *http.Request) {
+       // Reject unauthorized requests.
+       if !IsSystemAuth(GetAPIToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       hash := mux.Vars(req)["hash"]
+
+       if len(KeepVM.AllWritable()) == 0 {
+               http.Error(resp, "No writable volumes", http.StatusNotFound)
+               return
+       }
+
+       var untrashedOn, failedOn []string
+       var numNotFound int
+       for _, vol := range KeepVM.AllWritable() {
+               err := vol.Untrash(hash)
+
+               if os.IsNotExist(err) {
+                       numNotFound++
+               } else if err != nil {
+                       log.Printf("Error untrashing %v on volume %v", hash, vol.String())
+                       failedOn = append(failedOn, vol.String())
+               } else {
+                       log.Printf("Untrashed %v on volume %v", hash, vol.String())
+                       untrashedOn = append(untrashedOn, vol.String())
+               }
+       }
+
+       if numNotFound == len(KeepVM.AllWritable()) {
+               http.Error(resp, "Block not found on any of the writable volumes", http.StatusNotFound)
+               return
+       }
+
+       if len(failedOn) == len(KeepVM.AllWritable()) {
+               http.Error(resp, "Failed to untrash on all writable volumes", http.StatusInternalServerError)
+       } else {
+               respBody := "Successfully untrashed on: " + strings.Join(untrashedOn, ",")
+               if len(failedOn) > 0 {
+                       respBody += "; Failed to untrash on: " + strings.Join(failedOn, ",")
+               }
+               resp.Write([]byte(respBody))
+       }
+}
+
+// GetBlock and PutBlock implement lower-level code for handling
+// blocks by rooting through volumes connected to the local machine.
+// Once the handler has determined that system policy permits the
+// request, it calls these methods to perform the actual operation.
+//
+// TODO(twp): this code would probably be better located in the
+// VolumeManager interface. As an abstraction, the VolumeManager
+// should be the only part of the code that cares about which volume a
+// block is stored on, so it should be responsible for figuring out
+// which volume to check for fetching blocks, storing blocks, etc.
+
+// GetBlock fetches the block identified by "hash" into the provided
+// buf, and returns the data size.
+//
+// If the block cannot be found on any volume, returns NotFoundError.
+//
+// If the block found does not have the correct MD5 hash, returns
+// DiskHashError.
+//
+func GetBlock(ctx context.Context, hash string, buf []byte, resp http.ResponseWriter) (int, error) {
+       // Attempt to read the requested hash from a keep volume.
+       errorToCaller := NotFoundError
+
+       for _, vol := range KeepVM.AllReadable() {
+               size, err := vol.Get(ctx, hash, buf)
+               select {
+               case <-ctx.Done():
+                       return 0, ErrClientDisconnect
+               default:
+               }
+               if err != nil {
+                       // IsNotExist is an expected error and may be
+                       // ignored. All other errors are logged. In
+                       // any case we continue trying to read other
+                       // volumes. If all volumes report IsNotExist,
+                       // we return a NotFoundError.
+                       if !os.IsNotExist(err) {
+                               log.Printf("%s: Get(%s): %s", vol, hash, err)
+                       }
+                       // If some volume returns a transient error, return it to the caller
+                       // instead of "Not found" so it can retry.
+                       if err == VolumeBusyError {
+                               errorToCaller = err.(*KeepError)
+                       }
+                       continue
+               }
+               // Check the file checksum.
+               //
+               filehash := fmt.Sprintf("%x", md5.Sum(buf[:size]))
+               if filehash != hash {
+                       // TODO: Try harder to tell a sysadmin about
+                       // this.
+                       log.Printf("%s: checksum mismatch for request %s (actual %s)",
+                               vol, hash, filehash)
+                       errorToCaller = DiskHashError
+                       continue
+               }
+               if errorToCaller == DiskHashError {
+                       log.Printf("%s: checksum mismatch for request %s but a good copy was found on another volume and returned",
+                               vol, hash)
+               }
+               return size, nil
+       }
+       return 0, errorToCaller
+}
+
+// PutBlock Stores the BLOCK (identified by the content id HASH) in Keep.
+//
+// PutBlock(ctx, block, hash)
+//   Stores the BLOCK (identified by the content id HASH) in Keep.
+//
+//   The MD5 checksum of the block must be identical to the content id HASH.
+//   If not, an error is returned.
+//
+//   PutBlock stores the BLOCK on the first Keep volume with free space.
+//   A failure code is returned to the user only if all volumes fail.
+//
+//   On success, PutBlock returns nil.
+//   On failure, it returns a KeepError with one of the following codes:
+//
+//   500 Collision
+//          A different block with the same hash already exists on this
+//          Keep server.
+//   422 MD5Fail
+//          The MD5 hash of the BLOCK does not match the argument HASH.
+//   503 Full
+//          There was not enough space left in any Keep volume to store
+//          the object.
+//   500 Fail
+//          The object could not be stored for some other reason (e.g.
+//          all writes failed). The text of the error message should
+//          provide as much detail as possible.
+//
+func PutBlock(ctx context.Context, block []byte, hash string) (int, error) {
+       // Check that BLOCK's checksum matches HASH.
+       blockhash := fmt.Sprintf("%x", md5.Sum(block))
+       if blockhash != hash {
+               log.Printf("%s: MD5 checksum %s did not match request", hash, blockhash)
+               return 0, RequestHashError
+       }
+
+       // If we already have this data, it's intact on disk, and we
+       // can update its timestamp, return success. If we have
+       // different data with the same hash, return failure.
+       if n, err := CompareAndTouch(ctx, hash, block); err == nil || err == CollisionError {
+               return n, err
+       } else if ctx.Err() != nil {
+               return 0, ErrClientDisconnect
+       }
+
+       // Choose a Keep volume to write to.
+       // If this volume fails, try all of the volumes in order.
+       if vol := KeepVM.NextWritable(); vol != nil {
+               if err := vol.Put(ctx, hash, block); err == nil {
+                       return vol.Replication(), nil // success!
+               }
+               if ctx.Err() != nil {
+                       return 0, ErrClientDisconnect
+               }
+       }
+
+       writables := KeepVM.AllWritable()
+       if len(writables) == 0 {
+               log.Print("No writable volumes.")
+               return 0, FullError
+       }
+
+       allFull := true
+       for _, vol := range writables {
+               err := vol.Put(ctx, hash, block)
+               if ctx.Err() != nil {
+                       return 0, ErrClientDisconnect
+               }
+               if err == nil {
+                       return vol.Replication(), nil // success!
+               }
+               if err != FullError {
+                       // The volume is not full but the
+                       // write did not succeed.  Report the
+                       // error and continue trying.
+                       allFull = false
+                       log.Printf("%s: Write(%s): %s", vol, hash, err)
+               }
+       }
+
+       if allFull {
+               log.Print("All volumes are full.")
+               return 0, FullError
+       }
+       // Already logged the non-full errors.
+       return 0, GenericError
+}
+
+// CompareAndTouch returns the current replication level if one of the
+// volumes already has the given content and it successfully updates
+// the relevant block's modification time in order to protect it from
+// premature garbage collection. Otherwise, it returns a non-nil
+// error.
+func CompareAndTouch(ctx context.Context, hash string, buf []byte) (int, error) {
+       var bestErr error = NotFoundError
+       for _, vol := range KeepVM.AllWritable() {
+               err := vol.Compare(ctx, hash, buf)
+               if ctx.Err() != nil {
+                       return 0, ctx.Err()
+               } else if err == CollisionError {
+                       // Stop if we have a block with same hash but
+                       // different content. (It will be impossible
+                       // to tell which one is wanted if we have
+                       // both, so there's no point writing it even
+                       // on a different volume.)
+                       log.Printf("%s: Compare(%s): %s", vol, hash, err)
+                       return 0, err
+               } else if os.IsNotExist(err) {
+                       // Block does not exist. This is the only
+                       // "normal" error: we don't log anything.
+                       continue
+               } else if err != nil {
+                       // Couldn't open file, data is corrupt on
+                       // disk, etc.: log this abnormal condition,
+                       // and try the next volume.
+                       log.Printf("%s: Compare(%s): %s", vol, hash, err)
+                       continue
+               }
+               if err := vol.Touch(hash); err != nil {
+                       log.Printf("%s: Touch %s failed: %s", vol, hash, err)
+                       bestErr = err
+                       continue
+               }
+               // Compare and Touch both worked --> done.
+               return vol.Replication(), nil
+       }
+       return 0, bestErr
+}
+
+var validLocatorRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
+
+// IsValidLocator returns true if the specified string is a valid Keep locator.
+//   When Keep is extended to support hash types other than MD5,
+//   this should be updated to cover those as well.
+//
+func IsValidLocator(loc string) bool {
+       return validLocatorRe.MatchString(loc)
+}
+
+var authRe = regexp.MustCompile(`^(OAuth2|Bearer)\s+(.*)`)
+
+// GetAPIToken returns the OAuth2 token from the Authorization
+// header of a HTTP request, or an empty string if no matching
+// token is found.
+func GetAPIToken(req *http.Request) string {
+       if auth, ok := req.Header["Authorization"]; ok {
+               if match := authRe.FindStringSubmatch(auth[0]); match != nil {
+                       return match[2]
+               }
+       }
+       return ""
+}
+
+// IsExpired returns true if the given Unix timestamp (expressed as a
+// hexadecimal string) is in the past, or if timestampHex cannot be
+// parsed as a hexadecimal string.
+func IsExpired(timestampHex string) bool {
+       ts, err := strconv.ParseInt(timestampHex, 16, 0)
+       if err != nil {
+               log.Printf("IsExpired: %s", err)
+               return true
+       }
+       return time.Unix(ts, 0).Before(time.Now())
+}
+
+// CanDelete returns true if the user identified by apiToken is
+// allowed to delete blocks.
+func CanDelete(apiToken string) bool {
+       if apiToken == "" {
+               return false
+       }
+       // Blocks may be deleted only when Keep has been configured with a
+       // data manager.
+       if IsSystemAuth(apiToken) {
+               return true
+       }
+       // TODO(twp): look up apiToken with the API server
+       // return true if is_admin is true and if the token
+       // has unlimited scope
+       return false
+}
+
+// IsSystemAuth returns true if the given token is allowed to perform
+// system level actions like deleting data.
+func IsSystemAuth(token string) bool {
+       return token != "" && token == theConfig.systemAuthToken
+}
diff --git a/services/keepstore/handlers_with_generic_volume_test.go b/services/keepstore/handlers_with_generic_volume_test.go
new file mode 100644 (file)
index 0000000..4ffb7f8
--- /dev/null
@@ -0,0 +1,127 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+)
+
+// A TestableVolumeManagerFactory creates a volume manager with at least two TestableVolume instances.
+// The factory function, and the TestableVolume instances it returns, can use "t" to write
+// logs, fail the current test, etc.
+type TestableVolumeManagerFactory func(t TB) (*RRVolumeManager, []TestableVolume)
+
+// DoHandlersWithGenericVolumeTests runs a set of handler tests with a
+// Volume Manager comprised of TestableVolume instances.
+// It calls factory to create a volume manager with TestableVolume
+// instances for each test case, to avoid leaking state between tests.
+func DoHandlersWithGenericVolumeTests(t TB, factory TestableVolumeManagerFactory) {
+       testGetBlock(t, factory, TestHash, TestBlock)
+       testGetBlock(t, factory, EmptyHash, EmptyBlock)
+       testPutRawBadDataGetBlock(t, factory, TestHash, TestBlock, []byte("baddata"))
+       testPutRawBadDataGetBlock(t, factory, EmptyHash, EmptyBlock, []byte("baddata"))
+       testPutBlock(t, factory, TestHash, TestBlock)
+       testPutBlock(t, factory, EmptyHash, EmptyBlock)
+       testPutBlockCorrupt(t, factory, TestHash, TestBlock, []byte("baddata"))
+       testPutBlockCorrupt(t, factory, EmptyHash, EmptyBlock, []byte("baddata"))
+}
+
+// Setup RRVolumeManager with TestableVolumes
+func setupHandlersWithGenericVolumeTest(t TB, factory TestableVolumeManagerFactory) []TestableVolume {
+       vm, testableVolumes := factory(t)
+       KeepVM = vm
+
+       for _, v := range testableVolumes {
+               defer v.Teardown()
+       }
+       defer KeepVM.Close()
+
+       return testableVolumes
+}
+
+// Put a block using PutRaw in just one volume and Get it using GetBlock
+func testGetBlock(t TB, factory TestableVolumeManagerFactory, testHash string, testBlock []byte) {
+       testableVolumes := setupHandlersWithGenericVolumeTest(t, factory)
+
+       // Put testBlock in one volume
+       testableVolumes[1].PutRaw(testHash, testBlock)
+
+       // Get should pass
+       buf := make([]byte, len(testBlock))
+       n, err := GetBlock(context.Background(), testHash, buf, nil)
+       if err != nil {
+               t.Fatalf("Error while getting block %s", err)
+       }
+       if bytes.Compare(buf[:n], testBlock) != 0 {
+               t.Errorf("Put succeeded but Get returned %+v, expected %+v", buf[:n], testBlock)
+       }
+}
+
+// Put a bad block using PutRaw and get it.
+func testPutRawBadDataGetBlock(t TB, factory TestableVolumeManagerFactory,
+       testHash string, testBlock []byte, badData []byte) {
+       testableVolumes := setupHandlersWithGenericVolumeTest(t, factory)
+
+       // Put bad data for testHash in both volumes
+       testableVolumes[0].PutRaw(testHash, badData)
+       testableVolumes[1].PutRaw(testHash, badData)
+
+       // Get should fail
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), testHash, buf, nil)
+       if err == nil {
+               t.Fatalf("Got %+q, expected error while getting corrupt block %v", buf[:size], testHash)
+       }
+}
+
+// Invoke PutBlock twice to ensure CompareAndTouch path is tested.
+func testPutBlock(t TB, factory TestableVolumeManagerFactory, testHash string, testBlock []byte) {
+       setupHandlersWithGenericVolumeTest(t, factory)
+
+       // PutBlock
+       if _, err := PutBlock(context.Background(), testBlock, testHash); err != nil {
+               t.Fatalf("Error during PutBlock: %s", err)
+       }
+
+       // Check that PutBlock succeeds again even after CompareAndTouch
+       if _, err := PutBlock(context.Background(), testBlock, testHash); err != nil {
+               t.Fatalf("Error during PutBlock: %s", err)
+       }
+
+       // Check that PutBlock stored the data as expected
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), testHash, buf, nil)
+       if err != nil {
+               t.Fatalf("Error during GetBlock for %q: %s", testHash, err)
+       } else if bytes.Compare(buf[:size], testBlock) != 0 {
+               t.Errorf("Get response incorrect. Expected %q; found %q", testBlock, buf[:size])
+       }
+}
+
+// Put a bad block using PutRaw, overwrite it using PutBlock and get it.
+func testPutBlockCorrupt(t TB, factory TestableVolumeManagerFactory,
+       testHash string, testBlock []byte, badData []byte) {
+       testableVolumes := setupHandlersWithGenericVolumeTest(t, factory)
+
+       // Put bad data for testHash in both volumes
+       testableVolumes[0].PutRaw(testHash, badData)
+       testableVolumes[1].PutRaw(testHash, badData)
+
+       // Check that PutBlock with good data succeeds
+       if _, err := PutBlock(context.Background(), testBlock, testHash); err != nil {
+               t.Fatalf("Error during PutBlock for %q: %s", testHash, err)
+       }
+
+       // Put succeeded and overwrote the badData in one volume,
+       // and Get should return the testBlock now, ignoring the bad data.
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), testHash, buf, nil)
+       if err != nil {
+               t.Fatalf("Error during GetBlock for %q: %s", testHash, err)
+       } else if bytes.Compare(buf[:size], testBlock) != 0 {
+               t.Errorf("Get response incorrect. Expected %q; found %q", testBlock, buf[:size])
+       }
+}
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
new file mode 100644 (file)
index 0000000..fcbddda
--- /dev/null
@@ -0,0 +1,248 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "net"
+       "os"
+       "os/signal"
+       "syscall"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/coreos/go-systemd/daemon"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+var version = "dev"
+
+// A Keep "block" is 64MB.
+const BlockSize = 64 * 1024 * 1024
+
+// A Keep volume must have at least MinFreeKilobytes available
+// in order to permit writes.
+const MinFreeKilobytes = BlockSize / 1024
+
+// ProcMounts /proc/mounts
+var ProcMounts = "/proc/mounts"
+
+var bufs *bufferPool
+
+// KeepError types.
+//
+type KeepError struct {
+       HTTPCode int
+       ErrMsg   string
+}
+
+var (
+       BadRequestError     = &KeepError{400, "Bad Request"}
+       UnauthorizedError   = &KeepError{401, "Unauthorized"}
+       CollisionError      = &KeepError{500, "Collision"}
+       RequestHashError    = &KeepError{422, "Hash mismatch in request"}
+       PermissionError     = &KeepError{403, "Forbidden"}
+       DiskHashError       = &KeepError{500, "Hash mismatch in stored data"}
+       ExpiredError        = &KeepError{401, "Expired permission signature"}
+       NotFoundError       = &KeepError{404, "Not Found"}
+       VolumeBusyError     = &KeepError{503, "Volume backend busy"}
+       GenericError        = &KeepError{500, "Fail"}
+       FullError           = &KeepError{503, "Full"}
+       SizeRequiredError   = &KeepError{411, "Missing Content-Length"}
+       TooLongError        = &KeepError{413, "Block is too large"}
+       MethodDisabledError = &KeepError{405, "Method disabled"}
+       ErrNotImplemented   = &KeepError{500, "Unsupported configuration"}
+       ErrClientDisconnect = &KeepError{503, "Client disconnected"}
+)
+
+func (e *KeepError) Error() string {
+       return e.ErrMsg
+}
+
+// ========================
+// Internal data structures
+//
+// These global variables are used by multiple parts of the
+// program. They are good candidates for moving into their own
+// packages.
+
+// The Keep VolumeManager maintains a list of available volumes.
+// Initialized by the --volumes flag (or by FindKeepVolumes).
+var KeepVM VolumeManager
+
+// The pull list manager and trash queue are threadsafe queues which
+// support atomic update operations. The PullHandler and TrashHandler
+// store results from Data Manager /pull and /trash requests here.
+//
+// See the Keep and Data Manager design documents for more details:
+// https://arvados.org/projects/arvados/wiki/Keep_Design_Doc
+// https://arvados.org/projects/arvados/wiki/Data_Manager_Design_Doc
+//
+var pullq *WorkQueue
+var trashq *WorkQueue
+
+func main() {
+       deprecated.beforeFlagParse(theConfig)
+
+       dumpConfig := flag.Bool("dump-config", false, "write current configuration to stdout and exit (useful for migrating from command line flags to config file)")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
+
+       defaultConfigPath := "/etc/arvados/keepstore/keepstore.yml"
+       var configPath string
+       flag.StringVar(
+               &configPath,
+               "config",
+               defaultConfigPath,
+               "YAML or JSON configuration file `path`")
+       flag.Usage = usage
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keepstore %s\n", version)
+               return
+       }
+
+       deprecated.afterFlagParse(theConfig)
+
+       err := config.LoadFile(theConfig, configPath)
+       if err != nil && (!os.IsNotExist(err) || configPath != defaultConfigPath) {
+               log.Fatal(err)
+       }
+
+       if *dumpConfig {
+               log.Fatal(config.DumpAndExit(theConfig))
+       }
+
+       log.Printf("keepstore %s started", version)
+
+       metricsRegistry := prometheus.NewRegistry()
+
+       err = theConfig.Start(metricsRegistry)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       if pidfile := theConfig.PIDFile; pidfile != "" {
+               f, err := os.OpenFile(pidfile, os.O_RDWR|os.O_CREATE, 0777)
+               if err != nil {
+                       log.Fatalf("open pidfile (%s): %s", pidfile, err)
+               }
+               defer f.Close()
+               err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+               if err != nil {
+                       log.Fatalf("flock pidfile (%s): %s", pidfile, err)
+               }
+               defer os.Remove(pidfile)
+               err = f.Truncate(0)
+               if err != nil {
+                       log.Fatalf("truncate pidfile (%s): %s", pidfile, err)
+               }
+               _, err = fmt.Fprint(f, os.Getpid())
+               if err != nil {
+                       log.Fatalf("write pidfile (%s): %s", pidfile, err)
+               }
+               err = f.Sync()
+               if err != nil {
+                       log.Fatalf("sync pidfile (%s): %s", pidfile, err)
+               }
+       }
+
+       var cluster *arvados.Cluster
+       cfg, err := arvados.GetConfig(arvados.DefaultConfigFile)
+       if err != nil && os.IsNotExist(err) {
+               log.Warnf("DEPRECATED: proceeding without cluster configuration file %q (%s)", arvados.DefaultConfigFile, err)
+               cluster = &arvados.Cluster{
+                       ClusterID: "xxxxx",
+               }
+       } else if err != nil {
+               log.Fatalf("load config %q: %s", arvados.DefaultConfigFile, err)
+       } else {
+               cluster, err = cfg.GetCluster("")
+               if err != nil {
+                       log.Fatalf("config error in %q: %s", arvados.DefaultConfigFile, err)
+               }
+       }
+
+       log.Println("keepstore starting, pid", os.Getpid())
+       defer log.Println("keepstore exiting, pid", os.Getpid())
+
+       // Start a round-robin VolumeManager with the volumes we have found.
+       KeepVM = MakeRRVolumeManager(theConfig.Volumes)
+
+       // Middleware/handler stack
+       router := MakeRESTRouter(cluster, metricsRegistry)
+
+       // Set up a TCP listener.
+       listener, err := net.Listen("tcp", theConfig.Listen)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       // Initialize keepclient for pull workers
+       keepClient := &keepclient.KeepClient{
+               Arvados:       &arvadosclient.ArvadosClient{},
+               Want_replicas: 1,
+       }
+
+       // Initialize the pullq and workers
+       pullq = NewWorkQueue()
+       for i := 0; i < 1 || i < theConfig.PullWorkers; i++ {
+               go RunPullWorker(pullq, keepClient)
+       }
+
+       // Initialize the trashq and workers
+       trashq = NewWorkQueue()
+       for i := 0; i < 1 || i < theConfig.TrashWorkers; i++ {
+               go RunTrashWorker(trashq)
+       }
+
+       // Start emptyTrash goroutine
+       doneEmptyingTrash := make(chan bool)
+       go emptyTrash(doneEmptyingTrash, theConfig.TrashCheckInterval.Duration())
+
+       // Shut down the server gracefully (by closing the listener)
+       // if SIGTERM is received.
+       term := make(chan os.Signal, 1)
+       go func(sig <-chan os.Signal) {
+               s := <-sig
+               log.Println("caught signal:", s)
+               doneEmptyingTrash <- true
+               listener.Close()
+       }(term)
+       signal.Notify(term, syscall.SIGTERM)
+       signal.Notify(term, syscall.SIGINT)
+
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.Printf("Error notifying init daemon: %v", err)
+       }
+       log.Println("listening at", listener.Addr())
+       srv := &server{}
+       srv.Handler = router
+       srv.Serve(listener)
+}
+
+// Periodically (once per interval) invoke EmptyTrash on all volumes.
+func emptyTrash(done <-chan bool, interval time.Duration) {
+       ticker := time.NewTicker(interval)
+
+       for {
+               select {
+               case <-ticker.C:
+                       for _, v := range theConfig.Volumes {
+                               if v.Writable() {
+                                       v.EmptyTrash()
+                               }
+                       }
+               case <-done:
+                       ticker.Stop()
+                       return
+               }
+       }
+}
diff --git a/services/keepstore/keepstore.service b/services/keepstore/keepstore.service
new file mode 100644 (file)
index 0000000..8b448e7
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Keep Storage Daemon
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/keepstore/keepstore.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/keepstore
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
new file mode 100644 (file)
index 0000000..d1d3804
--- /dev/null
@@ -0,0 +1,456 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "errors"
+       "fmt"
+       "io/ioutil"
+       "os"
+       "path"
+       "regexp"
+       "sort"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+)
+
+var TestBlock = []byte("The quick brown fox jumps over the lazy dog.")
+var TestHash = "e4d909c290d0fb1ca068ffaddf22cbd0"
+var TestHashPutResp = "e4d909c290d0fb1ca068ffaddf22cbd0+44\n"
+
+var TestBlock2 = []byte("Pack my box with five dozen liquor jugs.")
+var TestHash2 = "f15ac516f788aec4f30932ffb6395c39"
+
+var TestBlock3 = []byte("Now is the time for all good men to come to the aid of their country.")
+var TestHash3 = "eed29bbffbc2dbe5e5ee0bb71888e61f"
+
+// BadBlock is used to test collisions and corruption.
+// It must not match any test hashes.
+var BadBlock = []byte("The magic words are squeamish ossifrage.")
+
+// Empty block
+var EmptyHash = "d41d8cd98f00b204e9800998ecf8427e"
+var EmptyBlock = []byte("")
+
+// TODO(twp): Tests still to be written
+//
+//   * TestPutBlockFull
+//       - test that PutBlock returns 503 Full if the filesystem is full.
+//         (must mock FreeDiskSpace or Statfs? use a tmpfs?)
+//
+//   * TestPutBlockWriteErr
+//       - test the behavior when Write returns an error.
+//           - Possible solutions: use a small tmpfs and a high
+//             MIN_FREE_KILOBYTES to trick PutBlock into attempting
+//             to write a block larger than the amount of space left
+//           - use an interface to mock ioutil.TempFile with a File
+//             object that always returns an error on write
+//
+// ========================================
+// GetBlock tests.
+// ========================================
+
+// TestGetBlock
+//     Test that simple block reads succeed.
+//
+func TestGetBlock(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes. Our block is stored on the second volume.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllReadable()
+       if err := vols[1].Put(context.Background(), TestHash, TestBlock); err != nil {
+               t.Error(err)
+       }
+
+       // Check that GetBlock returns success.
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), TestHash, buf, nil)
+       if err != nil {
+               t.Errorf("GetBlock error: %s", err)
+       }
+       if bytes.Compare(buf[:size], TestBlock) != 0 {
+               t.Errorf("got %v, expected %v", buf[:size], TestBlock)
+       }
+}
+
+// TestGetBlockMissing
+//     GetBlock must return an error when the block is not found.
+//
+func TestGetBlockMissing(t *testing.T) {
+       defer teardown()
+
+       // Create two empty test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // Check that GetBlock returns failure.
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), TestHash, buf, nil)
+       if err != NotFoundError {
+               t.Errorf("Expected NotFoundError, got %v, err %v", buf[:size], err)
+       }
+}
+
+// TestGetBlockCorrupt
+//     GetBlock must return an error when a corrupted block is requested
+//     (the contents of the file do not checksum to its hash).
+//
+func TestGetBlockCorrupt(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes and store a corrupt block in one.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllReadable()
+       vols[0].Put(context.Background(), TestHash, BadBlock)
+
+       // Check that GetBlock returns failure.
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), TestHash, buf, nil)
+       if err != DiskHashError {
+               t.Errorf("Expected DiskHashError, got %v (buf: %v)", err, buf[:size])
+       }
+}
+
+// ========================================
+// PutBlock tests
+// ========================================
+
+// TestPutBlockOK
+//     PutBlock can perform a simple block write and returns success.
+//
+func TestPutBlockOK(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // Check that PutBlock stores the data as expected.
+       if n, err := PutBlock(context.Background(), TestBlock, TestHash); err != nil || n < 1 {
+               t.Fatalf("PutBlock: n %d err %v", n, err)
+       }
+
+       vols := KeepVM.AllReadable()
+       buf := make([]byte, BlockSize)
+       n, err := vols[1].Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Fatalf("Volume #0 Get returned error: %v", err)
+       }
+       if string(buf[:n]) != string(TestBlock) {
+               t.Fatalf("PutBlock stored '%s', Get retrieved '%s'",
+                       string(TestBlock), string(buf[:n]))
+       }
+}
+
+// TestPutBlockOneVol
+//     PutBlock still returns success even when only one of the known
+//     volumes is online.
+//
+func TestPutBlockOneVol(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes, but cripple one of them.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllWritable()
+       vols[0].(*MockVolume).Bad = true
+       vols[0].(*MockVolume).BadVolumeError = errors.New("Bad volume")
+
+       // Check that PutBlock stores the data as expected.
+       if n, err := PutBlock(context.Background(), TestBlock, TestHash); err != nil || n < 1 {
+               t.Fatalf("PutBlock: n %d err %v", n, err)
+       }
+
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), TestHash, buf, nil)
+       if err != nil {
+               t.Fatalf("GetBlock: %v", err)
+       }
+       if bytes.Compare(buf[:size], TestBlock) != 0 {
+               t.Fatalf("PutBlock stored %+q, GetBlock retrieved %+q",
+                       TestBlock, buf[:size])
+       }
+}
+
+// TestPutBlockMD5Fail
+//     Check that PutBlock returns an error if passed a block and hash that
+//     do not match.
+//
+func TestPutBlockMD5Fail(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // Check that PutBlock returns the expected error when the hash does
+       // not match the block.
+       if _, err := PutBlock(context.Background(), BadBlock, TestHash); err != RequestHashError {
+               t.Errorf("Expected RequestHashError, got %v", err)
+       }
+
+       // Confirm that GetBlock fails to return anything.
+       if result, err := GetBlock(context.Background(), TestHash, make([]byte, BlockSize), nil); err != NotFoundError {
+               t.Errorf("GetBlock succeeded after a corrupt block store (result = %s, err = %v)",
+                       string(result), err)
+       }
+}
+
+// TestPutBlockCorrupt
+//     PutBlock should overwrite corrupt blocks on disk when given
+//     a PUT request with a good block.
+//
+func TestPutBlockCorrupt(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // Store a corrupted block under TestHash.
+       vols := KeepVM.AllWritable()
+       vols[0].Put(context.Background(), TestHash, BadBlock)
+       if n, err := PutBlock(context.Background(), TestBlock, TestHash); err != nil || n < 1 {
+               t.Errorf("PutBlock: n %d err %v", n, err)
+       }
+
+       // The block on disk should now match TestBlock.
+       buf := make([]byte, BlockSize)
+       if size, err := GetBlock(context.Background(), TestHash, buf, nil); err != nil {
+               t.Errorf("GetBlock: %v", err)
+       } else if bytes.Compare(buf[:size], TestBlock) != 0 {
+               t.Errorf("Got %+q, expected %+q", buf[:size], TestBlock)
+       }
+}
+
+// TestPutBlockCollision
+//     PutBlock returns a 400 Collision error when attempting to
+//     store a block that collides with another block on disk.
+//
+func TestPutBlockCollision(t *testing.T) {
+       defer teardown()
+
+       // These blocks both hash to the MD5 digest cee9a457e790cf20d4bdaa6d69f01e41.
+       b1 := arvadostest.MD5CollisionData[0]
+       b2 := arvadostest.MD5CollisionData[1]
+       locator := arvadostest.MD5CollisionMD5
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // Store one block, then attempt to store the other. Confirm that
+       // PutBlock reported a CollisionError.
+       if _, err := PutBlock(context.Background(), b1, locator); err != nil {
+               t.Error(err)
+       }
+       if _, err := PutBlock(context.Background(), b2, locator); err == nil {
+               t.Error("PutBlock did not report a collision")
+       } else if err != CollisionError {
+               t.Errorf("PutBlock returned %v", err)
+       }
+}
+
+// TestPutBlockTouchFails
+//     When PutBlock is asked to PUT an existing block, but cannot
+//     modify the timestamp, it should write a second block.
+//
+func TestPutBlockTouchFails(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+       vols := KeepVM.AllWritable()
+
+       // Store a block and then make the underlying volume bad,
+       // so a subsequent attempt to update the file timestamp
+       // will fail.
+       vols[0].Put(context.Background(), TestHash, BadBlock)
+       oldMtime, err := vols[0].Mtime(TestHash)
+       if err != nil {
+               t.Fatalf("vols[0].Mtime(%s): %s\n", TestHash, err)
+       }
+
+       // vols[0].Touch will fail on the next call, so the volume
+       // manager will store a copy on vols[1] instead.
+       vols[0].(*MockVolume).Touchable = false
+       if n, err := PutBlock(context.Background(), TestBlock, TestHash); err != nil || n < 1 {
+               t.Fatalf("PutBlock: n %d err %v", n, err)
+       }
+       vols[0].(*MockVolume).Touchable = true
+
+       // Now the mtime on the block on vols[0] should be unchanged, and
+       // there should be a copy of the block on vols[1].
+       newMtime, err := vols[0].Mtime(TestHash)
+       if err != nil {
+               t.Fatalf("vols[0].Mtime(%s): %s\n", TestHash, err)
+       }
+       if !newMtime.Equal(oldMtime) {
+               t.Errorf("mtime was changed on vols[0]:\noldMtime = %v\nnewMtime = %v\n",
+                       oldMtime, newMtime)
+       }
+       buf := make([]byte, BlockSize)
+       n, err := vols[1].Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Fatalf("vols[1]: %v", err)
+       }
+       if bytes.Compare(buf[:n], TestBlock) != 0 {
+               t.Errorf("new block does not match test block\nnew block = %v\n", buf[:n])
+       }
+}
+
+func TestDiscoverTmpfs(t *testing.T) {
+       var tempVols [4]string
+       var err error
+
+       // Create some directories suitable for using as keep volumes.
+       for i := range tempVols {
+               if tempVols[i], err = ioutil.TempDir("", "findvol"); err != nil {
+                       t.Fatal(err)
+               }
+               defer os.RemoveAll(tempVols[i])
+               tempVols[i] = tempVols[i] + "/keep"
+               if err = os.Mkdir(tempVols[i], 0755); err != nil {
+                       t.Fatal(err)
+               }
+       }
+
+       // Set up a bogus ProcMounts file.
+       f, err := ioutil.TempFile("", "keeptest")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.Remove(f.Name())
+       for i, vol := range tempVols {
+               // Add readonly mount points at odd indexes.
+               var opts string
+               switch i % 2 {
+               case 0:
+                       opts = "rw,nosuid,nodev,noexec"
+               case 1:
+                       opts = "nosuid,nodev,noexec,ro"
+               }
+               fmt.Fprintf(f, "tmpfs %s tmpfs %s 0 0\n", path.Dir(vol), opts)
+       }
+       f.Close()
+       ProcMounts = f.Name()
+
+       cfg := &Config{}
+       added := (&unixVolumeAdder{cfg}).Discover()
+
+       if added != len(cfg.Volumes) {
+               t.Errorf("Discover returned %d, but added %d volumes",
+                       added, len(cfg.Volumes))
+       }
+       if added != len(tempVols) {
+               t.Errorf("Discover returned %d but we set up %d volumes",
+                       added, len(tempVols))
+       }
+       for i, tmpdir := range tempVols {
+               if tmpdir != cfg.Volumes[i].(*UnixVolume).Root {
+                       t.Errorf("Discover returned %s, expected %s\n",
+                               cfg.Volumes[i].(*UnixVolume).Root, tmpdir)
+               }
+               if expectReadonly := i%2 == 1; expectReadonly != cfg.Volumes[i].(*UnixVolume).ReadOnly {
+                       t.Errorf("Discover added %s with readonly=%v, should be %v",
+                               tmpdir, !expectReadonly, expectReadonly)
+               }
+       }
+}
+
+func TestDiscoverNone(t *testing.T) {
+       defer teardown()
+
+       // Set up a bogus ProcMounts file with no Keep vols.
+       f, err := ioutil.TempFile("", "keeptest")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.Remove(f.Name())
+       fmt.Fprintln(f, "rootfs / rootfs opts 0 0")
+       fmt.Fprintln(f, "sysfs /sys sysfs opts 0 0")
+       fmt.Fprintln(f, "proc /proc proc opts 0 0")
+       fmt.Fprintln(f, "udev /dev devtmpfs opts 0 0")
+       fmt.Fprintln(f, "devpts /dev/pts devpts opts 0 0")
+       f.Close()
+       ProcMounts = f.Name()
+
+       cfg := &Config{}
+       added := (&unixVolumeAdder{cfg}).Discover()
+       if added != 0 || len(cfg.Volumes) != 0 {
+               t.Fatalf("got %d, %v; expected 0, []", added, cfg.Volumes)
+       }
+}
+
+// TestIndex
+//     Test an /index request.
+func TestIndex(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes and populate them.
+       // Include multiple blocks on different volumes, and
+       // some metadata files.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       vols := KeepVM.AllReadable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       vols[1].Put(context.Background(), TestHash2, TestBlock2)
+       vols[0].Put(context.Background(), TestHash3, TestBlock3)
+       vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
+       vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
+
+       buf := new(bytes.Buffer)
+       vols[0].IndexTo("", buf)
+       vols[1].IndexTo("", buf)
+       indexRows := strings.Split(string(buf.Bytes()), "\n")
+       sort.Strings(indexRows)
+       sortedIndex := strings.Join(indexRows, "\n")
+       expected := `^\n` + TestHash + `\+\d+ \d+\n` +
+               TestHash3 + `\+\d+ \d+\n` +
+               TestHash2 + `\+\d+ \d+$`
+
+       match, err := regexp.MatchString(expected, sortedIndex)
+       if err == nil {
+               if !match {
+                       t.Errorf("IndexLocators returned:\n%s", string(buf.Bytes()))
+               }
+       } else {
+               t.Errorf("regexp.MatchString: %s", err)
+       }
+}
+
+// ========================================
+// Helper functions for unit tests.
+// ========================================
+
+// MakeTestVolumeManager returns a RRVolumeManager with the specified
+// number of MockVolumes.
+func MakeTestVolumeManager(numVolumes int) VolumeManager {
+       vols := make([]Volume, numVolumes)
+       for i := range vols {
+               vols[i] = CreateMockVolume()
+       }
+       return MakeRRVolumeManager(vols)
+}
+
+// teardown cleans up after each test.
+func teardown() {
+       theConfig.systemAuthToken = ""
+       theConfig.RequireSignatures = false
+       theConfig.blobSigningKey = nil
+       KeepVM = nil
+}
diff --git a/services/keepstore/metrics.go b/services/keepstore/metrics.go
new file mode 100644 (file)
index 0000000..235c418
--- /dev/null
@@ -0,0 +1,137 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+
+       "git.curoverse.com/arvados.git/sdk/go/httpserver"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+type nodeMetrics struct {
+       reg *prometheus.Registry
+}
+
+func (m *nodeMetrics) setupBufferPoolMetrics(b *bufferPool) {
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "bufferpool_allocated_bytes",
+                       Help:      "Number of bytes allocated to buffers",
+               },
+               func() float64 { return float64(b.Alloc()) },
+       ))
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "bufferpool_max_buffers",
+                       Help:      "Maximum number of buffers allowed",
+               },
+               func() float64 { return float64(b.Cap()) },
+       ))
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "bufferpool_inuse_buffers",
+                       Help:      "Number of buffers in use",
+               },
+               func() float64 { return float64(b.Len()) },
+       ))
+}
+
+func (m *nodeMetrics) setupWorkQueueMetrics(q *WorkQueue, qName string) {
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      fmt.Sprintf("%s_queue_inprogress_entries", qName),
+                       Help:      fmt.Sprintf("Number of %s requests in progress", qName),
+               },
+               func() float64 { return float64(getWorkQueueStatus(q).InProgress) },
+       ))
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      fmt.Sprintf("%s_queue_pending_entries", qName),
+                       Help:      fmt.Sprintf("Number of queued %s requests", qName),
+               },
+               func() float64 { return float64(getWorkQueueStatus(q).Queued) },
+       ))
+}
+
+func (m *nodeMetrics) setupRequestMetrics(rc httpserver.RequestCounter) {
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "concurrent_requests",
+                       Help:      "Number of requests in progress",
+               },
+               func() float64 { return float64(rc.Current()) },
+       ))
+       m.reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "max_concurrent_requests",
+                       Help:      "Maximum number of concurrent requests",
+               },
+               func() float64 { return float64(rc.Max()) },
+       ))
+}
+
+type volumeMetricsVecs struct {
+       ioBytes     *prometheus.CounterVec
+       errCounters *prometheus.CounterVec
+       opsCounters *prometheus.CounterVec
+}
+
+func newVolumeMetricsVecs(reg *prometheus.Registry) *volumeMetricsVecs {
+       m := &volumeMetricsVecs{}
+       m.opsCounters = prometheus.NewCounterVec(
+               prometheus.CounterOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "volume_operations",
+                       Help:      "Number of volume operations",
+               },
+               []string{"device_id", "operation"},
+       )
+       reg.MustRegister(m.opsCounters)
+       m.errCounters = prometheus.NewCounterVec(
+               prometheus.CounterOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "volume_errors",
+                       Help:      "Number of volume errors",
+               },
+               []string{"device_id", "error_type"},
+       )
+       reg.MustRegister(m.errCounters)
+       m.ioBytes = prometheus.NewCounterVec(
+               prometheus.CounterOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "volume_io_bytes",
+                       Help:      "Volume I/O traffic in bytes",
+               },
+               []string{"device_id", "direction"},
+       )
+       reg.MustRegister(m.ioBytes)
+
+       return m
+}
+
+func (vm *volumeMetricsVecs) getCounterVecsFor(lbls prometheus.Labels) (opsCV, errCV, ioCV *prometheus.CounterVec) {
+       opsCV = vm.opsCounters.MustCurryWith(lbls)
+       errCV = vm.errCounters.MustCurryWith(lbls)
+       ioCV = vm.ioBytes.MustCurryWith(lbls)
+       return
+}
diff --git a/services/keepstore/mock_mutex_for_test.go b/services/keepstore/mock_mutex_for_test.go
new file mode 100644 (file)
index 0000000..484b177
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+type MockMutex struct {
+       AllowLock   chan struct{}
+       AllowUnlock chan struct{}
+}
+
+func NewMockMutex() *MockMutex {
+       return &MockMutex{
+               AllowLock:   make(chan struct{}),
+               AllowUnlock: make(chan struct{}),
+       }
+}
+
+// Lock waits for someone to send to AllowLock.
+func (m *MockMutex) Lock() {
+       <-m.AllowLock
+}
+
+// Unlock waits for someone to send to AllowUnlock.
+func (m *MockMutex) Unlock() {
+       <-m.AllowUnlock
+}
diff --git a/services/keepstore/mounts_test.go b/services/keepstore/mounts_test.go
new file mode 100644 (file)
index 0000000..7c932ee
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&MountsSuite{})
+
+type MountsSuite struct {
+       vm  VolumeManager
+       rtr http.Handler
+}
+
+func (s *MountsSuite) SetUpTest(c *check.C) {
+       s.vm = MakeTestVolumeManager(2)
+       KeepVM = s.vm
+       theConfig = DefaultConfig()
+       theConfig.systemAuthToken = arvadostest.DataManagerToken
+       theConfig.ManagementToken = arvadostest.ManagementToken
+       r := prometheus.NewRegistry()
+       theConfig.Start(r)
+       s.rtr = MakeRESTRouter(testCluster, r)
+}
+
+func (s *MountsSuite) TearDownTest(c *check.C) {
+       s.vm.Close()
+       KeepVM = nil
+       theConfig = DefaultConfig()
+       theConfig.Start(prometheus.NewRegistry())
+}
+
+func (s *MountsSuite) TestMounts(c *check.C) {
+       vols := s.vm.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       vols[1].Put(context.Background(), TestHash2, TestBlock2)
+
+       resp := s.call("GET", "/mounts", "", nil)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var mntList []struct {
+               UUID           string   `json:"uuid"`
+               DeviceID       string   `json:"device_id"`
+               ReadOnly       bool     `json:"read_only"`
+               Replication    int      `json:"replication"`
+               StorageClasses []string `json:"storage_classes"`
+       }
+       err := json.Unmarshal(resp.Body.Bytes(), &mntList)
+       c.Assert(err, check.IsNil)
+       c.Assert(len(mntList), check.Equals, 2)
+       for _, m := range mntList {
+               c.Check(len(m.UUID), check.Equals, 27)
+               c.Check(m.UUID[:12], check.Equals, "zzzzz-ivpuk-")
+               c.Check(m.DeviceID, check.Equals, "mock-device-id")
+               c.Check(m.ReadOnly, check.Equals, false)
+               c.Check(m.Replication, check.Equals, 1)
+               c.Check(m.StorageClasses, check.DeepEquals, []string{"default"})
+       }
+       c.Check(mntList[0].UUID, check.Not(check.Equals), mntList[1].UUID)
+
+       // Bad auth
+       for _, tok := range []string{"", "xyzzy"} {
+               resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil)
+               c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+               c.Check(resp.Body.String(), check.Equals, "Unauthorized\n")
+       }
+
+       tok := arvadostest.DataManagerToken
+
+       // Nonexistent mount UUID
+       resp = s.call("GET", "/mounts/X/blocks", tok, nil)
+       c.Check(resp.Code, check.Equals, http.StatusNotFound)
+       c.Check(resp.Body.String(), check.Equals, "mount not found\n")
+
+       // Complete index of first mount
+       resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks", tok, nil)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
+
+       // Partial index of first mount (one block matches prefix)
+       resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+TestHash[:2], tok, nil)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
+
+       // Complete index of second mount (note trailing slash)
+       resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/", tok, nil)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Matches, TestHash2+`\+[0-9]+ [0-9]+\n\n`)
+
+       // Partial index of second mount (no blocks match prefix)
+       resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+TestHash[:2], tok, nil)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       c.Check(resp.Body.String(), check.Equals, "\n")
+}
+
+func (s *MountsSuite) TestMetrics(c *check.C) {
+       s.call("PUT", "/"+TestHash, "", TestBlock)
+       s.call("PUT", "/"+TestHash2, "", TestBlock2)
+       resp := s.call("GET", "/metrics.json", "", nil)
+       c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+       resp = s.call("GET", "/metrics.json", "foobar", nil)
+       c.Check(resp.Code, check.Equals, http.StatusForbidden)
+       resp = s.call("GET", "/metrics.json", arvadostest.ManagementToken, nil)
+       c.Check(resp.Code, check.Equals, http.StatusOK)
+       var j []struct {
+               Name   string
+               Help   string
+               Type   string
+               Metric []struct {
+                       Label []struct {
+                               Name  string
+                               Value string
+                       }
+                       Summary struct {
+                               SampleCount string  `json:"sample_count"`
+                               SampleSum   float64 `json:"sample_sum"`
+                               Quantile    []struct {
+                                       Quantile float64
+                                       Value    float64
+                               }
+                       }
+               }
+       }
+       json.NewDecoder(resp.Body).Decode(&j)
+       found := make(map[string]bool)
+       names := map[string]bool{}
+       for _, g := range j {
+               names[g.Name] = true
+               for _, m := range g.Metric {
+                       if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
+                               c.Check(m.Summary.SampleCount, check.Equals, "2")
+                               c.Check(len(m.Summary.Quantile), check.Not(check.Equals), 0)
+                               c.Check(m.Summary.Quantile[0].Value, check.Not(check.Equals), float64(0))
+                               found[g.Name] = true
+                       }
+               }
+       }
+       c.Check(found["request_duration_seconds"], check.Equals, true)
+       c.Check(found["time_to_status_seconds"], check.Equals, true)
+
+       metricsNames := []string{
+               "arvados_keepstore_bufferpool_inuse_buffers",
+               "arvados_keepstore_bufferpool_max_buffers",
+               "arvados_keepstore_bufferpool_allocated_bytes",
+               "arvados_keepstore_pull_queue_inprogress_entries",
+               "arvados_keepstore_pull_queue_pending_entries",
+               "arvados_keepstore_concurrent_requests",
+               "arvados_keepstore_max_concurrent_requests",
+               "arvados_keepstore_trash_queue_inprogress_entries",
+               "arvados_keepstore_trash_queue_pending_entries",
+               "request_duration_seconds",
+               "time_to_status_seconds",
+       }
+       for _, m := range metricsNames {
+               _, ok := names[m]
+               c.Check(ok, check.Equals, true)
+       }
+}
+
+func (s *MountsSuite) call(method, path, tok string, body []byte) *httptest.ResponseRecorder {
+       resp := httptest.NewRecorder()
+       req, _ := http.NewRequest(method, path, bytes.NewReader(body))
+       if tok != "" {
+               req.Header.Set("Authorization", "Bearer "+tok)
+       }
+       s.rtr.ServeHTTP(resp, req)
+       return resp
+}
diff --git a/services/keepstore/perms.go b/services/keepstore/perms.go
new file mode 100644 (file)
index 0000000..49a2316
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "time"
+)
+
+// SignLocator takes a blobLocator, an apiToken and an expiry time, and
+// returns a signed locator string.
+func SignLocator(blobLocator, apiToken string, expiry time.Time) string {
+       return keepclient.SignLocator(blobLocator, apiToken, expiry, theConfig.BlobSignatureTTL.Duration(), theConfig.blobSigningKey)
+}
+
+// VerifySignature returns nil if the signature on the signedLocator
+// can be verified using the given apiToken. Otherwise it returns
+// either ExpiredError (if the timestamp has expired, which is
+// something the client could have figured out independently) or
+// PermissionError.
+func VerifySignature(signedLocator, apiToken string) error {
+       err := keepclient.VerifySignature(signedLocator, apiToken, theConfig.BlobSignatureTTL.Duration(), theConfig.blobSigningKey)
+       if err == keepclient.ErrSignatureExpired {
+               return ExpiredError
+       } else if err != nil {
+               return PermissionError
+       }
+       return nil
+}
diff --git a/services/keepstore/perms_test.go b/services/keepstore/perms_test.go
new file mode 100644 (file)
index 0000000..dd57faf
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "strconv"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+const (
+       knownHash    = "acbd18db4cc2f85cedef654fccc4a4d8"
+       knownLocator = knownHash + "+3"
+       knownToken   = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
+       knownKey     = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
+               "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
+               "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
+               "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
+               "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
+               "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
+               "786u5rw2a9gx743dj3fgq2irk"
+       knownSignatureTTL  = arvados.Duration(24 * 14 * time.Hour)
+       knownSignature     = "89118b78732c33104a4d6231e8b5a5fa1e4301e3"
+       knownTimestamp     = "7fffffff"
+       knownSigHint       = "+A" + knownSignature + "@" + knownTimestamp
+       knownSignedLocator = knownLocator + knownSigHint
+)
+
+func TestSignLocator(t *testing.T) {
+       defer func(b []byte) {
+               theConfig.blobSigningKey = b
+       }(theConfig.blobSigningKey)
+
+       tsInt, err := strconv.ParseInt(knownTimestamp, 16, 0)
+       if err != nil {
+               t.Fatal(err)
+       }
+       t0 := time.Unix(tsInt, 0)
+
+       theConfig.BlobSignatureTTL = knownSignatureTTL
+
+       theConfig.blobSigningKey = []byte(knownKey)
+       if x := SignLocator(knownLocator, knownToken, t0); x != knownSignedLocator {
+               t.Fatalf("Got %+q, expected %+q", x, knownSignedLocator)
+       }
+
+       theConfig.blobSigningKey = []byte("arbitrarykey")
+       if x := SignLocator(knownLocator, knownToken, t0); x == knownSignedLocator {
+               t.Fatalf("Got same signature %+q, even though blobSigningKey changed", x)
+       }
+}
+
+func TestVerifyLocator(t *testing.T) {
+       defer func(b []byte) {
+               theConfig.blobSigningKey = b
+       }(theConfig.blobSigningKey)
+
+       theConfig.BlobSignatureTTL = knownSignatureTTL
+
+       theConfig.blobSigningKey = []byte(knownKey)
+       if err := VerifySignature(knownSignedLocator, knownToken); err != nil {
+               t.Fatal(err)
+       }
+
+       theConfig.blobSigningKey = []byte("arbitrarykey")
+       if err := VerifySignature(knownSignedLocator, knownToken); err == nil {
+               t.Fatal("Verified signature even with wrong blobSigningKey")
+       }
+}
diff --git a/services/keepstore/pipe_adapters.go b/services/keepstore/pipe_adapters.go
new file mode 100644 (file)
index 0000000..69ed6d2
--- /dev/null
@@ -0,0 +1,93 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "io"
+       "io/ioutil"
+)
+
+// getWithPipe invokes getter and copies the resulting data into
+// buf. If ctx is done before all data is copied, getWithPipe closes
+// the pipe with an error, and returns early with an error.
+func getWithPipe(ctx context.Context, loc string, buf []byte, br BlockReader) (int, error) {
+       piper, pipew := io.Pipe()
+       go func() {
+               pipew.CloseWithError(br.ReadBlock(ctx, loc, pipew))
+       }()
+       done := make(chan struct{})
+       var size int
+       var err error
+       go func() {
+               size, err = io.ReadFull(piper, buf)
+               if err == io.EOF || err == io.ErrUnexpectedEOF {
+                       err = nil
+               }
+               close(done)
+       }()
+       select {
+       case <-ctx.Done():
+               piper.CloseWithError(ctx.Err())
+               return 0, ctx.Err()
+       case <-done:
+               piper.Close()
+               return size, err
+       }
+}
+
+// putWithPipe invokes putter with a new pipe, and copies data
+// from buf into the pipe. If ctx is done before all data is copied,
+// putWithPipe closes the pipe with an error, and returns early with
+// an error.
+func putWithPipe(ctx context.Context, loc string, buf []byte, bw BlockWriter) error {
+       piper, pipew := io.Pipe()
+       copyErr := make(chan error)
+       go func() {
+               _, err := io.Copy(pipew, bytes.NewReader(buf))
+               copyErr <- err
+               close(copyErr)
+       }()
+
+       putErr := make(chan error, 1)
+       go func() {
+               putErr <- bw.WriteBlock(ctx, loc, piper)
+               close(putErr)
+       }()
+
+       var err error
+       select {
+       case err = <-copyErr:
+       case err = <-putErr:
+       case <-ctx.Done():
+               err = ctx.Err()
+       }
+
+       // Ensure io.Copy goroutine isn't blocked writing to pipew
+       // (otherwise, io.Copy is still using buf so it isn't safe to
+       // return). This can cause pipew to receive corrupt data if
+       // err came from copyErr or ctx.Done() before the copy
+       // finished. That's OK, though: in that case err != nil, and
+       // CloseWithErr(err) ensures putter() will get an error from
+       // piper.Read() before seeing EOF.
+       go pipew.CloseWithError(err)
+       go io.Copy(ioutil.Discard, piper)
+       <-copyErr
+
+       // Note: io.Copy() is finished now, but putter() might still
+       // be running. If we encounter an error before putter()
+       // returns, we return right away without waiting for putter().
+
+       if err != nil {
+               return err
+       }
+       select {
+       case <-ctx.Done():
+               return ctx.Err()
+       case err = <-putErr:
+               return err
+       }
+}
diff --git a/services/keepstore/proxy_remote.go b/services/keepstore/proxy_remote.go
new file mode 100644 (file)
index 0000000..1f82f3f
--- /dev/null
@@ -0,0 +1,206 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "context"
+       "errors"
+       "io"
+       "net/http"
+       "regexp"
+       "strings"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+type remoteProxy struct {
+       clients map[string]*keepclient.KeepClient
+       mtx     sync.Mutex
+}
+
+func (rp *remoteProxy) Get(ctx context.Context, w http.ResponseWriter, r *http.Request, cluster *arvados.Cluster) {
+       // Intervening proxies must not return a cached GET response
+       // to a prior request if a X-Keep-Signature request header has
+       // been added or changed.
+       w.Header().Add("Vary", "X-Keep-Signature")
+
+       token := GetAPIToken(r)
+       if token == "" {
+               http.Error(w, "no token provided in Authorization header", http.StatusUnauthorized)
+               return
+       }
+       if strings.SplitN(r.Header.Get("X-Keep-Signature"), ",", 2)[0] == "local" {
+               buf, err := getBufferWithContext(ctx, bufs, BlockSize)
+               if err != nil {
+                       http.Error(w, err.Error(), http.StatusServiceUnavailable)
+                       return
+               }
+               defer bufs.Put(buf)
+               rrc := &remoteResponseCacher{
+                       Locator:        r.URL.Path[1:],
+                       Token:          token,
+                       Buffer:         buf[:0],
+                       ResponseWriter: w,
+                       Context:        ctx,
+               }
+               defer rrc.Close()
+               w = rrc
+       }
+       var remoteClient *keepclient.KeepClient
+       var parts []string
+       for i, part := range strings.Split(r.URL.Path[1:], "+") {
+               switch {
+               case i == 0:
+                       // don't try to parse hash part as hint
+               case strings.HasPrefix(part, "A"):
+                       // drop local permission hint
+                       continue
+               case len(part) > 7 && part[0] == 'R' && part[6] == '-':
+                       remoteID := part[1:6]
+                       remote, ok := cluster.RemoteClusters[remoteID]
+                       if !ok {
+                               http.Error(w, "remote cluster not configured", http.StatusBadRequest)
+                               return
+                       }
+                       kc, err := rp.remoteClient(remoteID, remote, token)
+                       if err == auth.ErrObsoleteToken {
+                               http.Error(w, err.Error(), http.StatusBadRequest)
+                               return
+                       } else if err != nil {
+                               http.Error(w, err.Error(), http.StatusInternalServerError)
+                               return
+                       }
+                       remoteClient = kc
+                       part = "A" + part[7:]
+               }
+               parts = append(parts, part)
+       }
+       if remoteClient == nil {
+               http.Error(w, "bad request", http.StatusBadRequest)
+               return
+       }
+       locator := strings.Join(parts, "+")
+       rdr, _, _, err := remoteClient.Get(locator)
+       switch err.(type) {
+       case nil:
+               defer rdr.Close()
+               io.Copy(w, rdr)
+       case *keepclient.ErrNotFound:
+               http.Error(w, err.Error(), http.StatusNotFound)
+       default:
+               http.Error(w, err.Error(), http.StatusBadGateway)
+       }
+}
+
+func (rp *remoteProxy) remoteClient(remoteID string, remoteCluster arvados.RemoteCluster, token string) (*keepclient.KeepClient, error) {
+       rp.mtx.Lock()
+       kc, ok := rp.clients[remoteID]
+       rp.mtx.Unlock()
+       if !ok {
+               c := &arvados.Client{
+                       APIHost:   remoteCluster.Host,
+                       AuthToken: "xxx",
+                       Insecure:  remoteCluster.Insecure,
+               }
+               ac, err := arvadosclient.New(c)
+               if err != nil {
+                       return nil, err
+               }
+               kc, err = keepclient.MakeKeepClient(ac)
+               if err != nil {
+                       return nil, err
+               }
+
+               rp.mtx.Lock()
+               if rp.clients == nil {
+                       rp.clients = map[string]*keepclient.KeepClient{remoteID: kc}
+               } else {
+                       rp.clients[remoteID] = kc
+               }
+               rp.mtx.Unlock()
+       }
+       accopy := *kc.Arvados
+       accopy.ApiToken = token
+       kccopy := *kc
+       kccopy.Arvados = &accopy
+       token, err := auth.SaltToken(token, remoteID)
+       if err != nil {
+               return nil, err
+       }
+       kccopy.Arvados.ApiToken = token
+       return &kccopy, nil
+}
+
+var localOrRemoteSignature = regexp.MustCompile(`\+[AR][^\+]*`)
+
+// remoteResponseCacher wraps http.ResponseWriter. It buffers the
+// response data in the provided buffer, writes/touches a copy on a
+// local volume, adds a response header with a locally-signed locator,
+// and finally writes the data through.
+type remoteResponseCacher struct {
+       Locator string
+       Token   string
+       Buffer  []byte
+       Context context.Context
+       http.ResponseWriter
+       statusCode int
+}
+
+func (rrc *remoteResponseCacher) Write(p []byte) (int, error) {
+       if len(rrc.Buffer)+len(p) > cap(rrc.Buffer) {
+               return 0, errors.New("buffer full")
+       }
+       rrc.Buffer = append(rrc.Buffer, p...)
+       return len(p), nil
+}
+
+func (rrc *remoteResponseCacher) WriteHeader(statusCode int) {
+       rrc.statusCode = statusCode
+}
+
+func (rrc *remoteResponseCacher) Close() error {
+       if rrc.statusCode == 0 {
+               rrc.statusCode = http.StatusOK
+       } else if rrc.statusCode != http.StatusOK {
+               rrc.ResponseWriter.WriteHeader(rrc.statusCode)
+               rrc.ResponseWriter.Write(rrc.Buffer)
+               return nil
+       }
+       _, err := PutBlock(rrc.Context, rrc.Buffer, rrc.Locator[:32])
+       if rrc.Context.Err() != nil {
+               // If caller hung up, log that instead of subsequent/misleading errors.
+               http.Error(rrc.ResponseWriter, rrc.Context.Err().Error(), http.StatusGatewayTimeout)
+               return err
+       }
+       if err == RequestHashError {
+               http.Error(rrc.ResponseWriter, "checksum mismatch in remote response", http.StatusBadGateway)
+               return err
+       }
+       if err, ok := err.(*KeepError); ok {
+               http.Error(rrc.ResponseWriter, err.Error(), err.HTTPCode)
+               return err
+       }
+       if err != nil {
+               http.Error(rrc.ResponseWriter, err.Error(), http.StatusBadGateway)
+               return err
+       }
+
+       unsigned := localOrRemoteSignature.ReplaceAllLiteralString(rrc.Locator, "")
+       signed := SignLocator(unsigned, rrc.Token, time.Now().Add(theConfig.BlobSignatureTTL.Duration()))
+       if signed == unsigned {
+               err = errors.New("could not sign locator")
+               http.Error(rrc.ResponseWriter, err.Error(), http.StatusInternalServerError)
+               return err
+       }
+       rrc.Header().Set("X-Keep-Locator", signed)
+       rrc.ResponseWriter.WriteHeader(rrc.statusCode)
+       _, err = rrc.ResponseWriter.Write(rrc.Buffer)
+       return err
+}
diff --git a/services/keepstore/proxy_remote_test.go b/services/keepstore/proxy_remote_test.go
new file mode 100644 (file)
index 0000000..6c22d1d
--- /dev/null
@@ -0,0 +1,223 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "net"
+       "net/http"
+       "net/http/httptest"
+       "strconv"
+       "strings"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/auth"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ProxyRemoteSuite{})
+
+type ProxyRemoteSuite struct {
+       cluster *arvados.Cluster
+       vm      VolumeManager
+       rtr     http.Handler
+
+       remoteClusterID      string
+       remoteBlobSigningKey []byte
+       remoteKeepLocator    string
+       remoteKeepData       []byte
+       remoteKeepproxy      *httptest.Server
+       remoteKeepRequests   int64
+       remoteAPI            *httptest.Server
+}
+
+func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
+       expectToken, err := auth.SaltToken(arvadostest.ActiveTokenV2, s.remoteClusterID)
+       if err != nil {
+               panic(err)
+       }
+       atomic.AddInt64(&s.remoteKeepRequests, 1)
+       var token string
+       if auth := strings.Split(r.Header.Get("Authorization"), " "); len(auth) == 2 && (auth[0] == "OAuth2" || auth[0] == "Bearer") {
+               token = auth[1]
+       }
+       if r.Method == "GET" && r.URL.Path == "/"+s.remoteKeepLocator && token == expectToken {
+               w.Write(s.remoteKeepData)
+               return
+       }
+       http.Error(w, "404", 404)
+}
+
+func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
+       host, port, _ := net.SplitHostPort(strings.Split(s.remoteKeepproxy.URL, "//")[1])
+       portnum, _ := strconv.Atoi(port)
+       if r.URL.Path == "/arvados/v1/discovery/v1/rest" {
+               json.NewEncoder(w).Encode(arvados.DiscoveryDocument{})
+               return
+       }
+       if r.URL.Path == "/arvados/v1/keep_services/accessible" {
+               json.NewEncoder(w).Encode(arvados.KeepServiceList{
+                       Items: []arvados.KeepService{
+                               {
+                                       UUID:           s.remoteClusterID + "-bi6l4-proxyproxyproxy",
+                                       ServiceType:    "proxy",
+                                       ServiceHost:    host,
+                                       ServicePort:    portnum,
+                                       ServiceSSLFlag: false,
+                               },
+                       },
+               })
+               return
+       }
+       http.Error(w, "404", 404)
+}
+
+func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
+       s.remoteClusterID = "z0000"
+       s.remoteBlobSigningKey = []byte("3b6df6fb6518afe12922a5bc8e67bf180a358bc8")
+       s.remoteKeepproxy = httptest.NewServer(http.HandlerFunc(s.remoteKeepproxyHandler))
+       s.remoteAPI = httptest.NewUnstartedServer(http.HandlerFunc(s.remoteAPIHandler))
+       s.remoteAPI.StartTLS()
+       s.cluster = arvados.IntegrationTestCluster()
+       s.cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+               s.remoteClusterID: arvados.RemoteCluster{
+                       Host:     strings.Split(s.remoteAPI.URL, "//")[1],
+                       Proxy:    true,
+                       Scheme:   "http",
+                       Insecure: true,
+               },
+       }
+       s.vm = MakeTestVolumeManager(2)
+       KeepVM = s.vm
+       theConfig = DefaultConfig()
+       theConfig.systemAuthToken = arvadostest.DataManagerToken
+       theConfig.blobSigningKey = []byte(knownKey)
+       r := prometheus.NewRegistry()
+       theConfig.Start(r)
+       s.rtr = MakeRESTRouter(s.cluster, r)
+}
+
+func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
+       s.vm.Close()
+       KeepVM = nil
+       theConfig = DefaultConfig()
+       theConfig.Start(prometheus.NewRegistry())
+       s.remoteAPI.Close()
+       s.remoteKeepproxy.Close()
+}
+
+func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
+       data := []byte("foo bar")
+       s.remoteKeepData = data
+       locator := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
+       s.remoteKeepLocator = keepclient.SignLocator(locator, arvadostest.ActiveTokenV2, time.Now().Add(time.Minute), time.Minute, s.remoteBlobSigningKey)
+
+       path := "/" + strings.Replace(s.remoteKeepLocator, "+A", "+R"+s.remoteClusterID+"-", 1)
+
+       for _, trial := range []struct {
+               label            string
+               method           string
+               token            string
+               xKeepSignature   string
+               expectRemoteReqs int64
+               expectCode       int
+               expectSignature  bool
+       }{
+               {
+                       label:            "GET only",
+                       method:           "GET",
+                       token:            arvadostest.ActiveTokenV2,
+                       expectRemoteReqs: 1,
+                       expectCode:       http.StatusOK,
+               },
+               {
+                       label:            "obsolete token",
+                       method:           "GET",
+                       token:            arvadostest.ActiveToken,
+                       expectRemoteReqs: 0,
+                       expectCode:       http.StatusBadRequest,
+               },
+               {
+                       label:            "bad token",
+                       method:           "GET",
+                       token:            arvadostest.ActiveTokenV2[:len(arvadostest.ActiveTokenV2)-3] + "xxx",
+                       expectRemoteReqs: 1,
+                       expectCode:       http.StatusNotFound,
+               },
+               {
+                       label:            "HEAD only",
+                       method:           "HEAD",
+                       token:            arvadostest.ActiveTokenV2,
+                       expectRemoteReqs: 1,
+                       expectCode:       http.StatusOK,
+               },
+               {
+                       label:            "HEAD with local signature",
+                       method:           "HEAD",
+                       xKeepSignature:   "local, time=" + time.Now().Format(time.RFC3339),
+                       token:            arvadostest.ActiveTokenV2,
+                       expectRemoteReqs: 1,
+                       expectCode:       http.StatusOK,
+                       expectSignature:  true,
+               },
+               {
+                       label:            "GET with local signature",
+                       method:           "GET",
+                       xKeepSignature:   "local, time=" + time.Now().Format(time.RFC3339),
+                       token:            arvadostest.ActiveTokenV2,
+                       expectRemoteReqs: 1,
+                       expectCode:       http.StatusOK,
+                       expectSignature:  true,
+               },
+       } {
+               c.Logf("trial: %s", trial.label)
+
+               s.remoteKeepRequests = 0
+
+               var req *http.Request
+               var resp *httptest.ResponseRecorder
+               req = httptest.NewRequest(trial.method, path, nil)
+               req.Header.Set("Authorization", "Bearer "+trial.token)
+               if trial.xKeepSignature != "" {
+                       req.Header.Set("X-Keep-Signature", trial.xKeepSignature)
+               }
+               resp = httptest.NewRecorder()
+               s.rtr.ServeHTTP(resp, req)
+               c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
+               c.Check(resp.Code, check.Equals, trial.expectCode)
+               if resp.Code == http.StatusOK {
+                       c.Check(resp.Body.String(), check.Equals, string(data))
+               } else {
+                       c.Check(resp.Body.String(), check.Not(check.Equals), string(data))
+               }
+
+               c.Check(resp.Header().Get("Vary"), check.Matches, `(.*, )?X-Keep-Signature(, .*)?`)
+
+               locHdr := resp.Header().Get("X-Keep-Locator")
+               if !trial.expectSignature {
+                       c.Check(locHdr, check.Equals, "")
+                       continue
+               }
+
+               c.Check(locHdr, check.Not(check.Equals), "")
+               c.Check(locHdr, check.Not(check.Matches), `.*\+R.*`)
+               c.Check(VerifySignature(locHdr, trial.token), check.IsNil)
+
+               // Ensure block can be requested using new signature
+               req = httptest.NewRequest("GET", "/"+locHdr, nil)
+               req.Header.Set("Authorization", "Bearer "+trial.token)
+               resp = httptest.NewRecorder()
+               s.rtr.ServeHTTP(resp, req)
+               c.Check(resp.Code, check.Equals, http.StatusOK)
+               c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
+       }
+}
diff --git a/services/keepstore/pull_worker.go b/services/keepstore/pull_worker.go
new file mode 100644 (file)
index 0000000..42b5d58
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "context"
+       "crypto/rand"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+// RunPullWorker receives PullRequests from pullq, invokes
+// PullItemAndProcess on each one. After each PR, it logs a message
+// indicating whether the pull was successful.
+func RunPullWorker(pullq *WorkQueue, keepClient *keepclient.KeepClient) {
+       for item := range pullq.NextItem {
+               pr := item.(PullRequest)
+               err := PullItemAndProcess(pr, keepClient)
+               pullq.DoneItem <- struct{}{}
+               if err == nil {
+                       log.Printf("Pull %s success", pr)
+               } else {
+                       log.Printf("Pull %s error: %s", pr, err)
+               }
+       }
+}
+
+// PullItemAndProcess executes a pull request by retrieving the
+// specified block from one of the specified servers, and storing it
+// on a local volume.
+//
+// If the PR specifies a non-blank mount UUID, PullItemAndProcess will
+// only attempt to write the data to the corresponding
+// volume. Otherwise it writes to any local volume, as a PUT request
+// would.
+func PullItemAndProcess(pullRequest PullRequest, keepClient *keepclient.KeepClient) error {
+       var vol Volume
+       if uuid := pullRequest.MountUUID; uuid != "" {
+               vol = KeepVM.Lookup(pullRequest.MountUUID, true)
+               if vol == nil {
+                       return fmt.Errorf("pull req has nonexistent mount: %v", pullRequest)
+               }
+       }
+
+       keepClient.Arvados.ApiToken = randomToken
+
+       serviceRoots := make(map[string]string)
+       for _, addr := range pullRequest.Servers {
+               serviceRoots[addr] = addr
+       }
+       keepClient.SetServiceRoots(serviceRoots, nil, nil)
+
+       // Generate signature with a random token
+       expiresAt := time.Now().Add(60 * time.Second)
+       signedLocator := SignLocator(pullRequest.Locator, randomToken, expiresAt)
+
+       reader, contentLen, _, err := GetContent(signedLocator, keepClient)
+       if err != nil {
+               return err
+       }
+       if reader == nil {
+               return fmt.Errorf("No reader found for : %s", signedLocator)
+       }
+       defer reader.Close()
+
+       readContent, err := ioutil.ReadAll(reader)
+       if err != nil {
+               return err
+       }
+
+       if (readContent == nil) || (int64(len(readContent)) != contentLen) {
+               return fmt.Errorf("Content not found for: %s", signedLocator)
+       }
+
+       writePulledBlock(vol, readContent, pullRequest.Locator)
+       return nil
+}
+
+// Fetch the content for the given locator using keepclient.
+var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (io.ReadCloser, int64, string, error) {
+       return keepClient.Get(signedLocator)
+}
+
+var writePulledBlock = func(volume Volume, data []byte, locator string) {
+       var err error
+       if volume != nil {
+               err = volume.Put(context.Background(), locator, data)
+       } else {
+               _, err = PutBlock(context.Background(), data, locator)
+       }
+       if err != nil {
+               log.Printf("error writing pulled block %q: %s", locator, err)
+       }
+}
+
+var randomToken = func() string {
+       const alphaNumeric = "0123456789abcdefghijklmnopqrstuvwxyz"
+       var bytes = make([]byte, 36)
+       rand.Read(bytes)
+       for i, b := range bytes {
+               bytes[i] = alphaNumeric[b%byte(len(alphaNumeric))]
+       }
+       return (string(bytes))
+}()
diff --git a/services/keepstore/pull_worker_integration_test.go b/services/keepstore/pull_worker_integration_test.go
new file mode 100644 (file)
index 0000000..231a4c0
--- /dev/null
@@ -0,0 +1,146 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "errors"
+       "io"
+       "io/ioutil"
+       "os"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+var keepClient *keepclient.KeepClient
+
+type PullWorkIntegrationTestData struct {
+       Name     string
+       Locator  string
+       Content  string
+       GetError string
+}
+
+func SetupPullWorkerIntegrationTest(t *testing.T, testData PullWorkIntegrationTestData, wantData bool) PullRequest {
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       // start api and keep servers
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(2, false)
+
+       // make arvadosclient
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               t.Fatalf("Error creating arv: %s", err)
+       }
+
+       // keep client
+       keepClient, err = keepclient.MakeKeepClient(arv)
+       if err != nil {
+               t.Fatalf("error creating KeepClient: %s", err)
+       }
+       keepClient.Want_replicas = 1
+       keepClient.RefreshServiceDiscovery()
+
+       // discover keep services
+       var servers []string
+       for _, host := range keepClient.LocalRoots() {
+               servers = append(servers, host)
+       }
+
+       // Put content if the test needs it
+       if wantData {
+               locator, _, err := keepClient.PutB([]byte(testData.Content))
+               if err != nil {
+                       t.Errorf("Error putting test data in setup for %s %s %v", testData.Content, locator, err)
+               }
+               if locator == "" {
+                       t.Errorf("No locator found after putting test data")
+               }
+       }
+
+       // Create pullRequest for the test
+       pullRequest := PullRequest{
+               Locator: testData.Locator,
+               Servers: servers,
+       }
+       return pullRequest
+}
+
+// Do a get on a block that is not existing in any of the keep servers.
+// Expect "block not found" error.
+func TestPullWorkerIntegration_GetNonExistingLocator(t *testing.T) {
+       testData := PullWorkIntegrationTestData{
+               Name:     "TestPullWorkerIntegration_GetLocator",
+               Locator:  "5d41402abc4b2a76b9719d911017c592",
+               Content:  "hello",
+               GetError: "Block not found",
+       }
+
+       pullRequest := SetupPullWorkerIntegrationTest(t, testData, false)
+       defer arvadostest.StopAPI()
+       defer arvadostest.StopKeep(2)
+
+       performPullWorkerIntegrationTest(testData, pullRequest, t)
+}
+
+// Do a get on a block that exists on one of the keep servers.
+// The setup method will create this block before doing the get.
+func TestPullWorkerIntegration_GetExistingLocator(t *testing.T) {
+       testData := PullWorkIntegrationTestData{
+               Name:     "TestPullWorkerIntegration_GetLocator",
+               Locator:  "5d41402abc4b2a76b9719d911017c592",
+               Content:  "hello",
+               GetError: "",
+       }
+
+       pullRequest := SetupPullWorkerIntegrationTest(t, testData, true)
+       defer arvadostest.StopAPI()
+       defer arvadostest.StopKeep(2)
+
+       performPullWorkerIntegrationTest(testData, pullRequest, t)
+}
+
+// Perform the test.
+// The test directly invokes the "PullItemAndProcess" rather than
+// putting an item on the pullq so that the errors can be verified.
+func performPullWorkerIntegrationTest(testData PullWorkIntegrationTestData, pullRequest PullRequest, t *testing.T) {
+
+       // Override writePulledBlock to mock PutBlock functionality
+       defer func(orig func(Volume, []byte, string)) { writePulledBlock = orig }(writePulledBlock)
+       writePulledBlock = func(v Volume, content []byte, locator string) {
+               if string(content) != testData.Content {
+                       t.Errorf("writePulledBlock invoked with unexpected data. Expected: %s; Found: %s", testData.Content, content)
+               }
+       }
+
+       // Override GetContent to mock keepclient Get functionality
+       defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
+               GetContent = orig
+       }(GetContent)
+       GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
+               if testData.GetError != "" {
+                       return nil, 0, "", errors.New(testData.GetError)
+               }
+               rdr := ioutil.NopCloser(bytes.NewBufferString(testData.Content))
+               return rdr, int64(len(testData.Content)), "", nil
+       }
+
+       err := PullItemAndProcess(pullRequest, keepClient)
+
+       if len(testData.GetError) > 0 {
+               if (err == nil) || (!strings.Contains(err.Error(), testData.GetError)) {
+                       t.Errorf("Got error %v, expected %v", err, testData.GetError)
+               }
+       } else {
+               if err != nil {
+                       t.Errorf("Got error %v, expected nil", err)
+               }
+       }
+}
diff --git a/services/keepstore/pull_worker_test.go b/services/keepstore/pull_worker_test.go
new file mode 100644 (file)
index 0000000..8e667e0
--- /dev/null
@@ -0,0 +1,317 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "errors"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/prometheus/client_golang/prometheus"
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&PullWorkerTestSuite{})
+
+type PullWorkerTestSuite struct {
+       testPullLists map[string]string
+       readContent   string
+       readError     error
+       putContent    []byte
+       putError      error
+}
+
+func (s *PullWorkerTestSuite) SetUpTest(c *C) {
+       theConfig.systemAuthToken = "arbitrary data manager token"
+       s.readContent = ""
+       s.readError = nil
+       s.putContent = []byte{}
+       s.putError = nil
+
+       // When a new pull request arrives, the old one will be overwritten.
+       // This behavior is verified using these two maps in the
+       // "TestPullWorkerPullList_with_two_items_latest_replacing_old"
+       s.testPullLists = make(map[string]string)
+
+       KeepVM = MakeTestVolumeManager(2)
+
+       // Normally the pull queue and workers are started by main()
+       // -- tests need to set up their own.
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, IsNil)
+       keepClient, err := keepclient.MakeKeepClient(arv)
+       c.Assert(err, IsNil)
+       pullq = NewWorkQueue()
+       go RunPullWorker(pullq, keepClient)
+}
+
+func (s *PullWorkerTestSuite) TearDownTest(c *C) {
+       KeepVM.Close()
+       KeepVM = nil
+       pullq.Close()
+       pullq = nil
+       teardown()
+       theConfig = DefaultConfig()
+       theConfig.Start(prometheus.NewRegistry())
+}
+
+var firstPullList = []byte(`[
+               {
+                       "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
+                       "servers":[
+                               "server_1",
+                               "server_2"
+                       ]
+               },{
+                       "locator":"37b51d194a7513e45b56f6524f2d51f2+3",
+                       "servers":[
+                               "server_3"
+                       ]
+               }
+       ]`)
+
+var secondPullList = []byte(`[
+               {
+                       "locator":"73feffa4b7f6bb68e44cf984c85f6e88+3",
+                       "servers":[
+                               "server_1",
+                               "server_2"
+                       ]
+               }
+       ]`)
+
+type PullWorkerTestData struct {
+       name         string
+       req          RequestTester
+       responseCode int
+       responseBody string
+       readContent  string
+       readError    bool
+       putError     bool
+}
+
+// Ensure MountUUID in a pull list is correctly translated to a Volume
+// argument passed to writePulledBlock().
+func (s *PullWorkerTestSuite) TestSpecifyMountUUID(c *C) {
+       defer func(f func(Volume, []byte, string)) {
+               writePulledBlock = f
+       }(writePulledBlock)
+
+       for _, spec := range []struct {
+               sendUUID     string
+               expectVolume Volume
+       }{
+               {
+                       sendUUID:     "",
+                       expectVolume: nil,
+               },
+               {
+                       sendUUID:     KeepVM.Mounts()[0].UUID,
+                       expectVolume: KeepVM.Mounts()[0].volume,
+               },
+       } {
+               writePulledBlock = func(v Volume, _ []byte, _ string) {
+                       c.Check(v, Equals, spec.expectVolume)
+               }
+
+               resp := IssueRequest(&RequestTester{
+                       uri:      "/pull",
+                       apiToken: theConfig.systemAuthToken,
+                       method:   "PUT",
+                       requestBody: []byte(`[{
+                               "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
+                               "servers":["server_1","server_2"],
+                               "mount_uuid":"` + spec.sendUUID + `"}]`),
+               })
+               c.Assert(resp.Code, Equals, http.StatusOK)
+               expectEqualWithin(c, time.Second, 0, func() interface{} {
+                       st := pullq.Status()
+                       return st.InProgress + st.Queued
+               })
+       }
+}
+
+func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_two_locators(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorkerPullList_with_two_locators",
+               req:          RequestTester{"/pull", theConfig.systemAuthToken, "PUT", firstPullList},
+               responseCode: http.StatusOK,
+               responseBody: "Received 2 pull requests\n",
+               readContent:  "hello",
+               readError:    false,
+               putError:     false,
+       }
+
+       s.performTest(testData, c)
+}
+
+func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_one_locator(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorkerPullList_with_one_locator",
+               req:          RequestTester{"/pull", theConfig.systemAuthToken, "PUT", secondPullList},
+               responseCode: http.StatusOK,
+               responseBody: "Received 1 pull requests\n",
+               readContent:  "hola",
+               readError:    false,
+               putError:     false,
+       }
+
+       s.performTest(testData, c)
+}
+
+func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_one_locator(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorker_error_on_get_one_locator",
+               req:          RequestTester{"/pull", theConfig.systemAuthToken, "PUT", secondPullList},
+               responseCode: http.StatusOK,
+               responseBody: "Received 1 pull requests\n",
+               readContent:  "unused",
+               readError:    true,
+               putError:     false,
+       }
+
+       s.performTest(testData, c)
+}
+
+func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_two_locators(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorker_error_on_get_two_locators",
+               req:          RequestTester{"/pull", theConfig.systemAuthToken, "PUT", firstPullList},
+               responseCode: http.StatusOK,
+               responseBody: "Received 2 pull requests\n",
+               readContent:  "unused",
+               readError:    true,
+               putError:     false,
+       }
+
+       s.performTest(testData, c)
+}
+
+func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_one_locator(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorker_error_on_put_one_locator",
+               req:          RequestTester{"/pull", theConfig.systemAuthToken, "PUT", secondPullList},
+               responseCode: http.StatusOK,
+               responseBody: "Received 1 pull requests\n",
+               readContent:  "hello hello",
+               readError:    false,
+               putError:     true,
+       }
+
+       s.performTest(testData, c)
+}
+
+func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_two_locators(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorker_error_on_put_two_locators",
+               req:          RequestTester{"/pull", theConfig.systemAuthToken, "PUT", firstPullList},
+               responseCode: http.StatusOK,
+               responseBody: "Received 2 pull requests\n",
+               readContent:  "hello again",
+               readError:    false,
+               putError:     true,
+       }
+
+       s.performTest(testData, c)
+}
+
+// In this case, the item will not be placed on pullq
+func (s *PullWorkerTestSuite) TestPullWorker_invalidToken(c *C) {
+       testData := PullWorkerTestData{
+               name:         "TestPullWorkerPullList_with_two_locators",
+               req:          RequestTester{"/pull", "invalidToken", "PUT", firstPullList},
+               responseCode: http.StatusUnauthorized,
+               responseBody: "Unauthorized\n",
+               readContent:  "hello",
+               readError:    false,
+               putError:     false,
+       }
+
+       s.performTest(testData, c)
+}
+
+func (s *PullWorkerTestSuite) performTest(testData PullWorkerTestData, c *C) {
+       s.testPullLists[testData.name] = testData.responseBody
+
+       processedPullLists := make(map[string]string)
+
+       // Override GetContent to mock keepclient Get functionality
+       defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
+               GetContent = orig
+       }(GetContent)
+       GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
+               c.Assert(getStatusItem("PullQueue", "InProgress"), Equals, float64(1))
+               processedPullLists[testData.name] = testData.responseBody
+               if testData.readError {
+                       err = errors.New("Error getting data")
+                       s.readError = err
+                       return
+               }
+               s.readContent = testData.readContent
+               reader = ioutil.NopCloser(bytes.NewBufferString(testData.readContent))
+               contentLength = int64(len(testData.readContent))
+               return
+       }
+
+       // Override writePulledBlock to mock PutBlock functionality
+       defer func(orig func(Volume, []byte, string)) { writePulledBlock = orig }(writePulledBlock)
+       writePulledBlock = func(v Volume, content []byte, locator string) {
+               if testData.putError {
+                       s.putError = errors.New("Error putting data")
+                       return
+               }
+               s.putContent = content
+       }
+
+       c.Check(getStatusItem("PullQueue", "InProgress"), Equals, float64(0))
+       c.Check(getStatusItem("PullQueue", "Queued"), Equals, float64(0))
+       c.Check(getStatusItem("Version"), Not(Equals), "")
+
+       response := IssueRequest(&testData.req)
+       c.Assert(response.Code, Equals, testData.responseCode)
+       c.Assert(response.Body.String(), Equals, testData.responseBody)
+
+       expectEqualWithin(c, time.Second, 0, func() interface{} {
+               st := pullq.Status()
+               return st.InProgress + st.Queued
+       })
+
+       if testData.name == "TestPullWorkerPullList_with_two_items_latest_replacing_old" {
+               c.Assert(len(s.testPullLists), Equals, 2)
+               c.Assert(len(processedPullLists), Equals, 1)
+               c.Assert(s.testPullLists["Added_before_actual_test_item"], NotNil)
+               c.Assert(s.testPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
+               c.Assert(processedPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
+       } else {
+               if testData.responseCode == http.StatusOK {
+                       c.Assert(len(s.testPullLists), Equals, 1)
+                       c.Assert(len(processedPullLists), Equals, 1)
+                       c.Assert(s.testPullLists[testData.name], NotNil)
+               } else {
+                       c.Assert(len(s.testPullLists), Equals, 1)
+                       c.Assert(len(processedPullLists), Equals, 0)
+               }
+       }
+
+       if testData.readError {
+               c.Assert(s.readError, NotNil)
+       } else if testData.responseCode == http.StatusOK {
+               c.Assert(s.readError, IsNil)
+               c.Assert(s.readContent, Equals, testData.readContent)
+               if testData.putError {
+                       c.Assert(s.putError, NotNil)
+               } else {
+                       c.Assert(s.putError, IsNil)
+                       c.Assert(string(s.putContent), Equals, testData.readContent)
+               }
+       }
+
+       expectChannelEmpty(c, pullq.NextItem)
+}
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
new file mode 100644 (file)
index 0000000..4c39dcd
--- /dev/null
@@ -0,0 +1,1034 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/sha256"
+       "encoding/base64"
+       "encoding/hex"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "regexp"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/AdRoll/goamz/aws"
+       "github.com/AdRoll/goamz/s3"
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+       s3DefaultReadTimeout    = arvados.Duration(10 * time.Minute)
+       s3DefaultConnectTimeout = arvados.Duration(time.Minute)
+)
+
+var (
+       // ErrS3TrashDisabled is returned by Trash if that operation
+       // is impossible with the current config.
+       ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because -trash-lifetime=0 and -s3-unsafe-delete=false")
+
+       s3AccessKeyFile string
+       s3SecretKeyFile string
+       s3RegionName    string
+       s3Endpoint      string
+       s3Replication   int
+       s3UnsafeDelete  bool
+       s3RaceWindow    time.Duration
+
+       s3ACL = s3.Private
+
+       zeroTime time.Time
+)
+
+const (
+       maxClockSkew  = 600 * time.Second
+       nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
+)
+
+type s3VolumeAdder struct {
+       *Config
+}
+
+// String implements flag.Value
+func (s *s3VolumeAdder) String() string {
+       return "-"
+}
+
+func (s *s3VolumeAdder) Set(bucketName string) error {
+       if bucketName == "" {
+               return fmt.Errorf("no container name given")
+       }
+       if s3AccessKeyFile == "" || s3SecretKeyFile == "" {
+               return fmt.Errorf("-s3-access-key-file and -s3-secret-key-file arguments must given before -s3-bucket-volume")
+       }
+       if deprecated.flagSerializeIO {
+               log.Print("Notice: -serialize is not supported by s3-bucket volumes.")
+       }
+       s.Config.Volumes = append(s.Config.Volumes, &S3Volume{
+               Bucket:        bucketName,
+               AccessKeyFile: s3AccessKeyFile,
+               SecretKeyFile: s3SecretKeyFile,
+               Endpoint:      s3Endpoint,
+               Region:        s3RegionName,
+               RaceWindow:    arvados.Duration(s3RaceWindow),
+               S3Replication: s3Replication,
+               UnsafeDelete:  s3UnsafeDelete,
+               ReadOnly:      deprecated.flagReadonly,
+               IndexPageSize: 1000,
+       })
+       return nil
+}
+
+func s3regions() (okList []string) {
+       for r := range aws.Regions {
+               okList = append(okList, r)
+       }
+       return
+}
+
+func init() {
+       VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &S3Volume{} })
+
+       flag.Var(&s3VolumeAdder{theConfig},
+               "s3-bucket-volume",
+               "Use the given bucket as a storage volume. Can be given multiple times.")
+       flag.StringVar(
+               &s3RegionName,
+               "s3-region",
+               "",
+               fmt.Sprintf("AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are %+q.", s3regions()))
+       flag.StringVar(
+               &s3Endpoint,
+               "s3-endpoint",
+               "",
+               "Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use \"https://storage.googleapis.com\".")
+       flag.StringVar(
+               &s3AccessKeyFile,
+               "s3-access-key-file",
+               "",
+               "`File` containing the access key used for subsequent -s3-bucket-volume arguments.")
+       flag.StringVar(
+               &s3SecretKeyFile,
+               "s3-secret-key-file",
+               "",
+               "`File` containing the secret key used for subsequent -s3-bucket-volume arguments.")
+       flag.DurationVar(
+               &s3RaceWindow,
+               "s3-race-window",
+               24*time.Hour,
+               "Maximum eventual consistency latency for subsequent -s3-bucket-volume arguments.")
+       flag.IntVar(
+               &s3Replication,
+               "s3-replication",
+               2,
+               "Replication level reported to clients for subsequent -s3-bucket-volume arguments.")
+       flag.BoolVar(
+               &s3UnsafeDelete,
+               "s3-unsafe-delete",
+               false,
+               "EXPERIMENTAL. Enable deletion (garbage collection) even when trash lifetime is zero, even though there are known race conditions that can cause data loss.")
+}
+
+// S3Volume implements Volume using an S3 bucket.
+type S3Volume struct {
+       AccessKeyFile      string
+       SecretKeyFile      string
+       Endpoint           string
+       Region             string
+       Bucket             string
+       LocationConstraint bool
+       IndexPageSize      int
+       S3Replication      int
+       ConnectTimeout     arvados.Duration
+       ReadTimeout        arvados.Duration
+       RaceWindow         arvados.Duration
+       ReadOnly           bool
+       UnsafeDelete       bool
+       StorageClasses     []string
+
+       bucket *s3bucket
+
+       startOnce sync.Once
+}
+
+// Examples implements VolumeWithExamples.
+func (*S3Volume) Examples() []Volume {
+       return []Volume{
+               &S3Volume{
+                       AccessKeyFile:  "/etc/aws_s3_access_key.txt",
+                       SecretKeyFile:  "/etc/aws_s3_secret_key.txt",
+                       Endpoint:       "",
+                       Region:         "us-east-1",
+                       Bucket:         "example-bucket-name",
+                       IndexPageSize:  1000,
+                       S3Replication:  2,
+                       RaceWindow:     arvados.Duration(24 * time.Hour),
+                       ConnectTimeout: arvados.Duration(time.Minute),
+                       ReadTimeout:    arvados.Duration(5 * time.Minute),
+               },
+               &S3Volume{
+                       AccessKeyFile:  "/etc/gce_s3_access_key.txt",
+                       SecretKeyFile:  "/etc/gce_s3_secret_key.txt",
+                       Endpoint:       "https://storage.googleapis.com",
+                       Region:         "",
+                       Bucket:         "example-bucket-name",
+                       IndexPageSize:  1000,
+                       S3Replication:  2,
+                       RaceWindow:     arvados.Duration(24 * time.Hour),
+                       ConnectTimeout: arvados.Duration(time.Minute),
+                       ReadTimeout:    arvados.Duration(5 * time.Minute),
+               },
+       }
+}
+
+// Type implements Volume.
+func (*S3Volume) Type() string {
+       return "S3"
+}
+
+// Start populates private fields and verifies the configuration is
+// valid.
+func (v *S3Volume) Start(vm *volumeMetricsVecs) error {
+       region, ok := aws.Regions[v.Region]
+       if v.Endpoint == "" {
+               if !ok {
+                       return fmt.Errorf("unrecognized region %+q; try specifying -s3-endpoint instead", v.Region)
+               }
+       } else if ok {
+               return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
+                       "specify empty endpoint (\"-s3-endpoint=\") or use a different region name", v.Region, v.Endpoint)
+       } else {
+               region = aws.Region{
+                       Name:                 v.Region,
+                       S3Endpoint:           v.Endpoint,
+                       S3LocationConstraint: v.LocationConstraint,
+               }
+       }
+
+       var err error
+       var auth aws.Auth
+       auth.AccessKey, err = readKeyFromFile(v.AccessKeyFile)
+       if err != nil {
+               return err
+       }
+       auth.SecretKey, err = readKeyFromFile(v.SecretKeyFile)
+       if err != nil {
+               return err
+       }
+
+       // Zero timeouts mean "wait forever", which is a bad
+       // default. Default to long timeouts instead.
+       if v.ConnectTimeout == 0 {
+               v.ConnectTimeout = s3DefaultConnectTimeout
+       }
+       if v.ReadTimeout == 0 {
+               v.ReadTimeout = s3DefaultReadTimeout
+       }
+
+       client := s3.New(auth, region)
+       if region.EC2Endpoint.Signer == aws.V4Signature {
+               // Currently affects only eu-central-1
+               client.Signature = aws.V4Signature
+       }
+       client.ConnectTimeout = time.Duration(v.ConnectTimeout)
+       client.ReadTimeout = time.Duration(v.ReadTimeout)
+       v.bucket = &s3bucket{
+               Bucket: &s3.Bucket{
+                       S3:   client,
+                       Name: v.Bucket,
+               },
+       }
+       // Set up prometheus metrics
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
+       v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = vm.getCounterVecsFor(lbls)
+
+       return nil
+}
+
+// DeviceID returns a globally unique ID for the storage bucket.
+func (v *S3Volume) DeviceID() string {
+       return "s3://" + v.Endpoint + "/" + v.Bucket
+}
+
+func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
+       ready := make(chan bool)
+       go func() {
+               rdr, err = v.getReader(loc)
+               close(ready)
+       }()
+       select {
+       case <-ready:
+               return
+       case <-ctx.Done():
+               theConfig.debugLogf("s3: abandoning getReader(): %s", ctx.Err())
+               go func() {
+                       <-ready
+                       if err == nil {
+                               rdr.Close()
+                       }
+               }()
+               return nil, ctx.Err()
+       }
+}
+
+// getReader wraps (Bucket)GetReader.
+//
+// In situations where (Bucket)GetReader would fail because the block
+// disappeared in a Trash race, getReader calls fixRace to recover the
+// data, and tries again.
+func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
+       rdr, err = v.bucket.GetReader(loc)
+       err = v.translateError(err)
+       if err == nil || !os.IsNotExist(err) {
+               return
+       }
+
+       _, err = v.bucket.Head("recent/"+loc, nil)
+       err = v.translateError(err)
+       if err != nil {
+               // If we can't read recent/X, there's no point in
+               // trying fixRace. Give up.
+               return
+       }
+       if !v.fixRace(loc) {
+               err = os.ErrNotExist
+               return
+       }
+
+       rdr, err = v.bucket.GetReader(loc)
+       if err != nil {
+               log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
+               err = v.translateError(err)
+       }
+       return
+}
+
+// Get a block: copy the block data into buf, and return the number of
+// bytes copied.
+func (v *S3Volume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
+       rdr, err := v.getReaderWithContext(ctx, loc)
+       if err != nil {
+               return 0, err
+       }
+
+       var n int
+       ready := make(chan bool)
+       go func() {
+               defer close(ready)
+
+               defer rdr.Close()
+               n, err = io.ReadFull(rdr, buf)
+
+               switch err {
+               case nil, io.EOF, io.ErrUnexpectedEOF:
+                       err = nil
+               default:
+                       err = v.translateError(err)
+               }
+       }()
+       select {
+       case <-ctx.Done():
+               theConfig.debugLogf("s3: interrupting ReadFull() with Close() because %s", ctx.Err())
+               rdr.Close()
+               // Must wait for ReadFull to return, to ensure it
+               // doesn't write to buf after we return.
+               theConfig.debugLogf("s3: waiting for ReadFull() to fail")
+               <-ready
+               return 0, ctx.Err()
+       case <-ready:
+               return n, err
+       }
+}
+
+// Compare the given data with the stored data.
+func (v *S3Volume) Compare(ctx context.Context, loc string, expect []byte) error {
+       errChan := make(chan error, 1)
+       go func() {
+               _, err := v.bucket.Head("recent/"+loc, nil)
+               errChan <- err
+       }()
+       var err error
+       select {
+       case <-ctx.Done():
+               return ctx.Err()
+       case err = <-errChan:
+       }
+       if err != nil {
+               // Checking for "loc" itself here would interfere with
+               // future GET requests.
+               //
+               // On AWS, if X doesn't exist, a HEAD or GET request
+               // for X causes X's non-existence to be cached. Thus,
+               // if we test for X, then create X and return a
+               // signature to our client, the client might still get
+               // 404 from all keepstores when trying to read it.
+               //
+               // To avoid this, we avoid doing HEAD X or GET X until
+               // we know X has been written.
+               //
+               // Note that X might exist even though recent/X
+               // doesn't: for example, the response to HEAD recent/X
+               // might itself come from a stale cache. In such
+               // cases, we will return a false negative and
+               // PutHandler might needlessly create another replica
+               // on a different volume. That's not ideal, but it's
+               // better than passing the eventually-consistent
+               // problem on to our clients.
+               return v.translateError(err)
+       }
+       rdr, err := v.getReaderWithContext(ctx, loc)
+       if err != nil {
+               return err
+       }
+       defer rdr.Close()
+       return v.translateError(compareReaderWithBuf(ctx, rdr, expect, loc[:32]))
+}
+
+// Put writes a block.
+func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       var opts s3.Options
+       size := len(block)
+       if size > 0 {
+               md5, err := hex.DecodeString(loc)
+               if err != nil {
+                       return err
+               }
+               opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
+               // In AWS regions that use V4 signatures, we need to
+               // provide ContentSHA256 up front. Otherwise, the S3
+               // library reads the request body (from our buffer)
+               // into another new buffer in order to compute the
+               // SHA256 before sending the request -- which would
+               // mean consuming 128 MiB of memory for the duration
+               // of a 64 MiB write.
+               opts.ContentSHA256 = fmt.Sprintf("%x", sha256.Sum256(block))
+       }
+
+       // Send the block data through a pipe, so that (if we need to)
+       // we can close the pipe early and abandon our PutReader()
+       // goroutine, without worrying about PutReader() accessing our
+       // block buffer after we release it.
+       bufr, bufw := io.Pipe()
+       go func() {
+               io.Copy(bufw, bytes.NewReader(block))
+               bufw.Close()
+       }()
+
+       var err error
+       ready := make(chan bool)
+       go func() {
+               defer func() {
+                       if ctx.Err() != nil {
+                               theConfig.debugLogf("%s: abandoned PutReader goroutine finished with err: %s", v, err)
+                       }
+               }()
+               defer close(ready)
+               err = v.bucket.PutReader(loc, bufr, int64(size), "application/octet-stream", s3ACL, opts)
+               if err != nil {
+                       return
+               }
+               err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
+       }()
+       select {
+       case <-ctx.Done():
+               theConfig.debugLogf("%s: taking PutReader's input away: %s", v, ctx.Err())
+               // Our pipe might be stuck in Write(), waiting for
+               // PutReader() to read. If so, un-stick it. This means
+               // PutReader will get corrupt data, but that's OK: the
+               // size and MD5 won't match, so the write will fail.
+               go io.Copy(ioutil.Discard, bufr)
+               // CloseWithError() will return once pending I/O is done.
+               bufw.CloseWithError(ctx.Err())
+               theConfig.debugLogf("%s: abandoning PutReader goroutine", v)
+               return ctx.Err()
+       case <-ready:
+               // Unblock pipe in case PutReader did not consume it.
+               io.Copy(ioutil.Discard, bufr)
+               return v.translateError(err)
+       }
+}
+
+// Touch sets the timestamp for the given locator to the current time.
+func (v *S3Volume) Touch(loc string) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       _, err := v.bucket.Head(loc, nil)
+       err = v.translateError(err)
+       if os.IsNotExist(err) && v.fixRace(loc) {
+               // The data object got trashed in a race, but fixRace
+               // rescued it.
+       } else if err != nil {
+               return err
+       }
+       err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
+       return v.translateError(err)
+}
+
+// Mtime returns the stored timestamp for the given locator.
+func (v *S3Volume) Mtime(loc string) (time.Time, error) {
+       _, err := v.bucket.Head(loc, nil)
+       if err != nil {
+               return zeroTime, v.translateError(err)
+       }
+       resp, err := v.bucket.Head("recent/"+loc, nil)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               // The data object X exists, but recent/X is missing.
+               err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
+               if err != nil {
+                       log.Printf("error: creating %q: %s", "recent/"+loc, err)
+                       return zeroTime, v.translateError(err)
+               }
+               log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
+               resp, err = v.bucket.Head("recent/"+loc, nil)
+               if err != nil {
+                       log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
+                       return zeroTime, v.translateError(err)
+               }
+       } else if err != nil {
+               // HEAD recent/X failed for some other reason.
+               return zeroTime, err
+       }
+       return v.lastModified(resp)
+}
+
+// IndexTo writes a complete list of locators with the given prefix
+// for which Get() can retrieve data.
+func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
+       // Use a merge sort to find matching sets of X and recent/X.
+       dataL := s3Lister{
+               Bucket:   v.bucket.Bucket,
+               Prefix:   prefix,
+               PageSize: v.IndexPageSize,
+               Stats:    &v.bucket.stats,
+       }
+       recentL := s3Lister{
+               Bucket:   v.bucket.Bucket,
+               Prefix:   "recent/" + prefix,
+               PageSize: v.IndexPageSize,
+               Stats:    &v.bucket.stats,
+       }
+       for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
+               if data.Key >= "g" {
+                       // Conveniently, "recent/*" and "trash/*" are
+                       // lexically greater than all hex-encoded data
+                       // hashes, so stopping here avoids iterating
+                       // over all of them needlessly with dataL.
+                       break
+               }
+               if !v.isKeepBlock(data.Key) {
+                       continue
+               }
+
+               // stamp is the list entry we should use to report the
+               // last-modified time for this data block: it will be
+               // the recent/X entry if one exists, otherwise the
+               // entry for the data block itself.
+               stamp := data
+
+               // Advance to the corresponding recent/X marker, if any
+               for recent != nil && recentL.Error() == nil {
+                       if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
+                               recent = recentL.Next()
+                               continue
+                       } else if cmp == 0 {
+                               stamp = recent
+                               recent = recentL.Next()
+                               break
+                       } else {
+                               // recent/X marker is missing: we'll
+                               // use the timestamp on the data
+                               // object.
+                               break
+                       }
+               }
+               if err := recentL.Error(); err != nil {
+                       return err
+               }
+               t, err := time.Parse(time.RFC3339, stamp.LastModified)
+               if err != nil {
+                       return err
+               }
+               fmt.Fprintf(writer, "%s+%d %d\n", data.Key, data.Size, t.UnixNano())
+       }
+       return dataL.Error()
+}
+
+// Trash a Keep block.
+func (v *S3Volume) Trash(loc string) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       if t, err := v.Mtime(loc); err != nil {
+               return err
+       } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
+               return nil
+       }
+       if theConfig.TrashLifetime == 0 {
+               if !s3UnsafeDelete {
+                       return ErrS3TrashDisabled
+               }
+               return v.translateError(v.bucket.Del(loc))
+       }
+       err := v.checkRaceWindow(loc)
+       if err != nil {
+               return err
+       }
+       err = v.safeCopy("trash/"+loc, loc)
+       if err != nil {
+               return err
+       }
+       return v.translateError(v.bucket.Del(loc))
+}
+
+// checkRaceWindow returns a non-nil error if trash/loc is, or might
+// be, in the race window (i.e., it's not safe to trash loc).
+func (v *S3Volume) checkRaceWindow(loc string) error {
+       resp, err := v.bucket.Head("trash/"+loc, nil)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               // OK, trash/X doesn't exist so we're not in the race
+               // window
+               return nil
+       } else if err != nil {
+               // Error looking up trash/X. We don't know whether
+               // we're in the race window
+               return err
+       }
+       t, err := v.lastModified(resp)
+       if err != nil {
+               // Can't parse timestamp
+               return err
+       }
+       safeWindow := t.Add(theConfig.TrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
+       if safeWindow <= 0 {
+               // We can't count on "touch trash/X" to prolong
+               // trash/X's lifetime. The new timestamp might not
+               // become visible until now+raceWindow, and EmptyTrash
+               // is allowed to delete trash/X before then.
+               return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
+       }
+       // trash/X exists, but it won't be eligible for deletion until
+       // after now+raceWindow, so it's safe to overwrite it.
+       return nil
+}
+
+// safeCopy calls PutCopy, and checks the response to make sure the
+// copy succeeded and updated the timestamp on the destination object
+// (PutCopy returns 200 OK if the request was received, even if the
+// copy failed).
+func (v *S3Volume) safeCopy(dst, src string) error {
+       resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
+               ContentType:       "application/octet-stream",
+               MetadataDirective: "REPLACE",
+       }, v.bucket.Name+"/"+src)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               return err
+       } else if err != nil {
+               return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Name+"/"+src, err)
+       }
+       if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
+               return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
+       } else if time.Now().Sub(t) > maxClockSkew {
+               return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t)
+       }
+       return nil
+}
+
+// Get the LastModified header from resp, and parse it as RFC1123 or
+// -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123.
+func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) {
+       s := resp.Header.Get("Last-Modified")
+       t, err = time.Parse(time.RFC1123, s)
+       if err != nil && s != "" {
+               // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
+               // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
+               // as required by HTTP spec. If it's not a valid HTTP
+               // header value, it's probably AWS (or s3test) giving
+               // us a nearly-RFC1123 timestamp.
+               t, err = time.Parse(nearlyRFC1123, s)
+       }
+       return
+}
+
+// Untrash moves block from trash back into store
+func (v *S3Volume) Untrash(loc string) error {
+       err := v.safeCopy(loc, "trash/"+loc)
+       if err != nil {
+               return err
+       }
+       err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
+       return v.translateError(err)
+}
+
+// Status returns a *VolumeStatus representing the current in-use
+// storage capacity and a fake available capacity that doesn't make
+// the volume seem full or nearly-full.
+func (v *S3Volume) Status() *VolumeStatus {
+       return &VolumeStatus{
+               DeviceNum: 1,
+               BytesFree: BlockSize * 1000,
+               BytesUsed: 1,
+       }
+}
+
+// InternalStats returns bucket I/O and API call counters.
+func (v *S3Volume) InternalStats() interface{} {
+       return &v.bucket.stats
+}
+
+// String implements fmt.Stringer.
+func (v *S3Volume) String() string {
+       return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
+}
+
+// Writable returns false if all future Put, Mtime, and Delete calls
+// are expected to fail.
+func (v *S3Volume) Writable() bool {
+       return !v.ReadOnly
+}
+
+// Replication returns the storage redundancy of the underlying
+// device. Configured via command line flag.
+func (v *S3Volume) Replication() int {
+       return v.S3Replication
+}
+
+// GetStorageClasses implements Volume
+func (v *S3Volume) GetStorageClasses() []string {
+       return v.StorageClasses
+}
+
+var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+
+func (v *S3Volume) isKeepBlock(s string) bool {
+       return s3KeepBlockRegexp.MatchString(s)
+}
+
+// fixRace(X) is called when "recent/X" exists but "X" doesn't
+// exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
+// there was a race between Put and Trash, fixRace recovers from the
+// race by Untrashing the block.
+func (v *S3Volume) fixRace(loc string) bool {
+       trash, err := v.bucket.Head("trash/"+loc, nil)
+       if err != nil {
+               if !os.IsNotExist(v.translateError(err)) {
+                       log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
+               }
+               return false
+       }
+       trashTime, err := v.lastModified(trash)
+       if err != nil {
+               log.Printf("error: fixRace: parse %q: %s", trash.Header.Get("Last-Modified"), err)
+               return false
+       }
+
+       recent, err := v.bucket.Head("recent/"+loc, nil)
+       if err != nil {
+               log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
+               return false
+       }
+       recentTime, err := v.lastModified(recent)
+       if err != nil {
+               log.Printf("error: fixRace: parse %q: %s", recent.Header.Get("Last-Modified"), err)
+               return false
+       }
+
+       ageWhenTrashed := trashTime.Sub(recentTime)
+       if ageWhenTrashed >= theConfig.BlobSignatureTTL.Duration() {
+               // No evidence of a race: block hasn't been written
+               // since it became eligible for Trash. No fix needed.
+               return false
+       }
+
+       log.Printf("notice: fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, theConfig.BlobSignatureTTL)
+       log.Printf("notice: fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
+       err = v.safeCopy(loc, "trash/"+loc)
+       if err != nil {
+               log.Printf("error: fixRace: %s", err)
+               return false
+       }
+       return true
+}
+
+func (v *S3Volume) translateError(err error) error {
+       switch err := err.(type) {
+       case *s3.Error:
+               if (err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey") ||
+                       strings.Contains(err.Error(), "Not Found") {
+                       return os.ErrNotExist
+               }
+               // Other 404 errors like NoSuchVersion and
+               // NoSuchBucket are different problems which should
+               // get called out downstream, so we don't convert them
+               // to os.ErrNotExist.
+       }
+       return err
+}
+
+// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
+// and deletes them from the volume.
+func (v *S3Volume) EmptyTrash() {
+       var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
+
+       // Define "ready to delete" as "...when EmptyTrash started".
+       startT := time.Now()
+
+       emptyOneKey := func(trash *s3.Key) {
+               loc := trash.Key[6:]
+               if !v.isKeepBlock(loc) {
+                       return
+               }
+               atomic.AddInt64(&bytesInTrash, trash.Size)
+               atomic.AddInt64(&blocksInTrash, 1)
+
+               trashT, err := time.Parse(time.RFC3339, trash.LastModified)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
+                       return
+               }
+               recent, err := v.bucket.Head("recent/"+loc, nil)
+               if err != nil && os.IsNotExist(v.translateError(err)) {
+                       log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
+                       err = v.Untrash(loc)
+                       if err != nil {
+                               log.Printf("error: %s: EmptyTrash: Untrash(%q): %s", v, loc, err)
+                       }
+                       return
+               } else if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
+                       return
+               }
+               recentT, err := v.lastModified(recent)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, "recent/"+loc, recent.Header.Get("Last-Modified"), err)
+                       return
+               }
+               if trashT.Sub(recentT) < theConfig.BlobSignatureTTL.Duration() {
+                       if age := startT.Sub(recentT); age >= theConfig.BlobSignatureTTL.Duration()-time.Duration(v.RaceWindow) {
+                               // recent/loc is too old to protect
+                               // loc from being Trashed again during
+                               // the raceWindow that starts if we
+                               // delete trash/X now.
+                               //
+                               // Note this means (TrashCheckInterval
+                               // < BlobSignatureTTL - raceWindow) is
+                               // necessary to avoid starvation.
+                               log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
+                               v.fixRace(loc)
+                               v.Touch(loc)
+                               return
+                       }
+                       _, err := v.bucket.Head(loc, nil)
+                       if os.IsNotExist(err) {
+                               log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
+                               v.fixRace(loc)
+                               return
+                       } else if err != nil {
+                               log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
+                               return
+                       }
+               }
+               if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
+                       return
+               }
+               err = v.bucket.Del(trash.Key)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
+                       return
+               }
+               atomic.AddInt64(&bytesDeleted, trash.Size)
+               atomic.AddInt64(&blocksDeleted, 1)
+
+               _, err = v.bucket.Head(loc, nil)
+               if err == nil {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q succeeded immediately after deleting %q", v, loc, loc)
+                       return
+               }
+               if !os.IsNotExist(v.translateError(err)) {
+                       log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
+                       return
+               }
+               err = v.bucket.Del("recent/" + loc)
+               if err != nil {
+                       log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
+               }
+       }
+
+       var wg sync.WaitGroup
+       todo := make(chan *s3.Key, theConfig.EmptyTrashWorkers)
+       for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for key := range todo {
+                               emptyOneKey(key)
+                       }
+               }()
+       }
+
+       trashL := s3Lister{
+               Bucket:   v.bucket.Bucket,
+               Prefix:   "trash/",
+               PageSize: v.IndexPageSize,
+               Stats:    &v.bucket.stats,
+       }
+       for trash := trashL.First(); trash != nil; trash = trashL.Next() {
+               todo <- trash
+       }
+       close(todo)
+       wg.Wait()
+
+       if err := trashL.Error(); err != nil {
+               log.Printf("error: %s: EmptyTrash: lister: %s", v, err)
+       }
+       log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+}
+
+type s3Lister struct {
+       Bucket     *s3.Bucket
+       Prefix     string
+       PageSize   int
+       Stats      *s3bucketStats
+       nextMarker string
+       buf        []s3.Key
+       err        error
+}
+
+// First fetches the first page and returns the first item. It returns
+// nil if the response is the empty set or an error occurs.
+func (lister *s3Lister) First() *s3.Key {
+       lister.getPage()
+       return lister.pop()
+}
+
+// Next returns the next item, fetching the next page if necessary. It
+// returns nil if the last available item has already been fetched, or
+// an error occurs.
+func (lister *s3Lister) Next() *s3.Key {
+       if len(lister.buf) == 0 && lister.nextMarker != "" {
+               lister.getPage()
+       }
+       return lister.pop()
+}
+
+// Return the most recent error encountered by First or Next.
+func (lister *s3Lister) Error() error {
+       return lister.err
+}
+
+func (lister *s3Lister) getPage() {
+       lister.Stats.TickOps("list")
+       lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
+       resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
+       lister.nextMarker = ""
+       if err != nil {
+               lister.err = err
+               return
+       }
+       if resp.IsTruncated {
+               lister.nextMarker = resp.NextMarker
+       }
+       lister.buf = make([]s3.Key, 0, len(resp.Contents))
+       for _, key := range resp.Contents {
+               if !strings.HasPrefix(key.Key, lister.Prefix) {
+                       log.Printf("warning: s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
+                       continue
+               }
+               lister.buf = append(lister.buf, key)
+       }
+}
+
+func (lister *s3Lister) pop() (k *s3.Key) {
+       if len(lister.buf) > 0 {
+               k = &lister.buf[0]
+               lister.buf = lister.buf[1:]
+       }
+       return
+}
+
+// s3bucket wraps s3.bucket and counts I/O and API usage stats.
+type s3bucket struct {
+       *s3.Bucket
+       stats s3bucketStats
+}
+
+func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
+       rdr, err := b.Bucket.GetReader(path)
+       b.stats.TickOps("get")
+       b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
+       b.stats.TickErr(err)
+       return NewCountingReader(rdr, b.stats.TickInBytes), err
+}
+
+func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
+       resp, err := b.Bucket.Head(path, headers)
+       b.stats.TickOps("head")
+       b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
+       b.stats.TickErr(err)
+       return resp, err
+}
+
+func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
+       if length == 0 {
+               // goamz will only send Content-Length: 0 when reader
+               // is nil due to net.http.Request.ContentLength
+               // behavior.  Otherwise, Content-Length header is
+               // omitted which will cause some S3 services
+               // (including AWS and Ceph RadosGW) to fail to create
+               // empty objects.
+               r = nil
+       } else {
+               r = NewCountingReader(r, b.stats.TickOutBytes)
+       }
+       err := b.Bucket.PutReader(path, r, length, contType, perm, options)
+       b.stats.TickOps("put")
+       b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
+       b.stats.TickErr(err)
+       return err
+}
+
+func (b *s3bucket) Del(path string) error {
+       err := b.Bucket.Del(path)
+       b.stats.TickOps("delete")
+       b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
+       b.stats.TickErr(err)
+       return err
+}
+
+type s3bucketStats struct {
+       statsTicker
+       Ops     uint64
+       GetOps  uint64
+       PutOps  uint64
+       HeadOps uint64
+       DelOps  uint64
+       ListOps uint64
+}
+
+func (s *s3bucketStats) TickErr(err error) {
+       if err == nil {
+               return
+       }
+       errType := fmt.Sprintf("%T", err)
+       if err, ok := err.(*s3.Error); ok {
+               errType = errType + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
+       }
+       s.statsTicker.TickErr(err, errType)
+}
diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go
new file mode 100644 (file)
index 0000000..6377420
--- /dev/null
@@ -0,0 +1,499 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/AdRoll/goamz/s3"
+       "github.com/AdRoll/goamz/s3/s3test"
+       "github.com/ghodss/yaml"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
+)
+
+const (
+       TestBucketName = "testbucket"
+)
+
+type fakeClock struct {
+       now *time.Time
+}
+
+func (c *fakeClock) Now() time.Time {
+       if c.now == nil {
+               return time.Now()
+       }
+       return *c.now
+}
+
+func init() {
+       // Deleting isn't safe from races, but if it's turned on
+       // anyway we do expect it to pass the generic volume tests.
+       s3UnsafeDelete = true
+}
+
+var _ = check.Suite(&StubbedS3Suite{})
+
+type StubbedS3Suite struct {
+       volumes []*TestableS3Volume
+}
+
+func (s *StubbedS3Suite) TestGeneric(c *check.C) {
+       DoGenericVolumeTests(c, func(t TB) TestableVolume {
+               // Use a negative raceWindow so s3test's 1-second
+               // timestamp precision doesn't confuse fixRace.
+               return s.newTestableVolume(c, -2*time.Second, false, 2)
+       })
+}
+
+func (s *StubbedS3Suite) TestGenericReadOnly(c *check.C) {
+       DoGenericVolumeTests(c, func(t TB) TestableVolume {
+               return s.newTestableVolume(c, -2*time.Second, true, 2)
+       })
+}
+
+func (s *StubbedS3Suite) TestIndex(c *check.C) {
+       v := s.newTestableVolume(c, 0, false, 2)
+       v.IndexPageSize = 3
+       for i := 0; i < 256; i++ {
+               v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
+       }
+       for _, spec := range []struct {
+               prefix      string
+               expectMatch int
+       }{
+               {"", 256},
+               {"c", 16},
+               {"bc", 1},
+               {"abc", 0},
+       } {
+               buf := new(bytes.Buffer)
+               err := v.IndexTo(spec.prefix, buf)
+               c.Check(err, check.IsNil)
+
+               idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
+               c.Check(len(idx), check.Equals, spec.expectMatch+1)
+               c.Check(len(idx[len(idx)-1]), check.Equals, 0)
+       }
+}
+
+func (s *StubbedS3Suite) TestStats(c *check.C) {
+       v := s.newTestableVolume(c, 5*time.Minute, false, 2)
+       stats := func() string {
+               buf, err := json.Marshal(v.InternalStats())
+               c.Check(err, check.IsNil)
+               return string(buf)
+       }
+
+       c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
+
+       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+       _, err := v.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.NotNil)
+       c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
+       c.Check(stats(), check.Matches, `.*"\*s3.Error 404 [^"]*":[^0].*`)
+       c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
+
+       err = v.Put(context.Background(), loc, []byte("foo"))
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
+       c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
+
+       _, err = v.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.IsNil)
+       _, err = v.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
+}
+
+type blockingHandler struct {
+       requested chan *http.Request
+       unblock   chan struct{}
+}
+
+func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       if h.requested != nil {
+               h.requested <- r
+       }
+       if h.unblock != nil {
+               <-h.unblock
+       }
+       http.Error(w, "nothing here", http.StatusNotFound)
+}
+
+func (s *StubbedS3Suite) TestGetContextCancel(c *check.C) {
+       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+       buf := make([]byte, 3)
+
+       s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
+               _, err := v.Get(ctx, loc, buf)
+               return err
+       })
+}
+
+func (s *StubbedS3Suite) TestCompareContextCancel(c *check.C) {
+       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+       buf := []byte("bar")
+
+       s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
+               return v.Compare(ctx, loc, buf)
+       })
+}
+
+func (s *StubbedS3Suite) TestPutContextCancel(c *check.C) {
+       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+       buf := []byte("foo")
+
+       s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
+               return v.Put(ctx, loc, buf)
+       })
+}
+
+func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3Volume) error) {
+       handler := &blockingHandler{}
+       srv := httptest.NewServer(handler)
+       defer srv.Close()
+
+       v := s.newTestableVolume(c, 5*time.Minute, false, 2)
+       vol := *v.S3Volume
+       vol.Endpoint = srv.URL
+       v = &TestableS3Volume{S3Volume: &vol}
+       metrics := newVolumeMetricsVecs(prometheus.NewRegistry())
+       v.Start(metrics)
+
+       ctx, cancel := context.WithCancel(context.Background())
+
+       handler.requested = make(chan *http.Request)
+       handler.unblock = make(chan struct{})
+       defer close(handler.unblock)
+
+       doneFunc := make(chan struct{})
+       go func() {
+               err := testFunc(ctx, v)
+               c.Check(err, check.Equals, context.Canceled)
+               close(doneFunc)
+       }()
+
+       timeout := time.After(10 * time.Second)
+
+       // Wait for the stub server to receive a request, meaning
+       // Get() is waiting for an s3 operation.
+       select {
+       case <-timeout:
+               c.Fatal("timed out waiting for test func to call our handler")
+       case <-doneFunc:
+               c.Fatal("test func finished without even calling our handler!")
+       case <-handler.requested:
+       }
+
+       cancel()
+
+       select {
+       case <-timeout:
+               c.Fatal("timed out")
+       case <-doneFunc:
+       }
+}
+
+func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
+       defer func(tl, bs arvados.Duration) {
+               theConfig.TrashLifetime = tl
+               theConfig.BlobSignatureTTL = bs
+       }(theConfig.TrashLifetime, theConfig.BlobSignatureTTL)
+       theConfig.TrashLifetime.Set("1h")
+       theConfig.BlobSignatureTTL.Set("1h")
+
+       v := s.newTestableVolume(c, 5*time.Minute, false, 2)
+       var none time.Time
+
+       putS3Obj := func(t time.Time, key string, data []byte) {
+               if t == none {
+                       return
+               }
+               v.serverClock.now = &t
+               v.bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{})
+       }
+
+       t0 := time.Now()
+       nextKey := 0
+       for _, scenario := range []struct {
+               label               string
+               dataT               time.Time
+               recentT             time.Time
+               trashT              time.Time
+               canGet              bool
+               canTrash            bool
+               canGetAfterTrash    bool
+               canUntrash          bool
+               haveTrashAfterEmpty bool
+               freshAfterEmpty     bool
+       }{
+               {
+                       "No related objects",
+                       none, none, none,
+                       false, false, false, false, false, false,
+               },
+               {
+                       // Stored by older version, or there was a
+                       // race between EmptyTrash and Put: Trash is a
+                       // no-op even though the data object is very
+                       // old
+                       "No recent/X",
+                       t0.Add(-48 * time.Hour), none, none,
+                       true, true, true, false, false, false,
+               },
+               {
+                       "Not trash, but old enough to be eligible for trash",
+                       t0.Add(-24 * time.Hour), t0.Add(-2 * time.Hour), none,
+                       true, true, false, false, false, false,
+               },
+               {
+                       "Not trash, and not old enough to be eligible for trash",
+                       t0.Add(-24 * time.Hour), t0.Add(-30 * time.Minute), none,
+                       true, true, true, false, false, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, due to recent race between Trash and Put",
+                       t0.Add(-24 * time.Hour), t0.Add(-3 * time.Minute), t0.Add(-2 * time.Minute),
+                       true, true, true, true, true, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, trash nearly eligible for deletion: prone to Trash race",
+                       t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
+                       true, false, true, true, true, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, trash is eligible for deletion: prone to Trash race",
+                       t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-61 * time.Minute),
+                       true, false, true, true, false, false,
+               },
+               {
+                       "Trashed + untrashed copies exist, due to old race between Put and unfinished Trash: emptying trash is unsafe",
+                       t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-12 * time.Hour),
+                       true, false, true, true, true, true,
+               },
+               {
+                       "Trashed + untrashed copies exist, used to be unsafe to empty, but since made safe by fixRace+Touch",
+                       t0.Add(-time.Second), t0.Add(-time.Second), t0.Add(-12 * time.Hour),
+                       true, true, true, true, false, false,
+               },
+               {
+                       "Trashed + untrashed copies exist because Trash operation was interrupted (no race)",
+                       t0.Add(-24 * time.Hour), t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour),
+                       true, false, true, true, false, false,
+               },
+               {
+                       "Trash, not yet eligible for deletion",
+                       none, t0.Add(-12 * time.Hour), t0.Add(-time.Minute),
+                       false, false, false, true, true, false,
+               },
+               {
+                       "Trash, not yet eligible for deletion, prone to races",
+                       none, t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
+                       false, false, false, true, true, false,
+               },
+               {
+                       "Trash, eligible for deletion",
+                       none, t0.Add(-12 * time.Hour), t0.Add(-2 * time.Hour),
+                       false, false, false, true, false, false,
+               },
+               {
+                       "Erroneously trashed during a race, detected before TrashLifetime",
+                       none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute),
+                       true, false, true, true, true, false,
+               },
+               {
+                       "Erroneously trashed during a race, rescue during EmptyTrash despite reaching TrashLifetime",
+                       none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute),
+                       true, false, true, true, true, false,
+               },
+               {
+                       "Trashed copy exists with no recent/* marker (cause unknown); repair by untrashing",
+                       none, none, t0.Add(-time.Minute),
+                       false, false, false, true, true, true,
+               },
+       } {
+               c.Log("Scenario: ", scenario.label)
+
+               // We have a few tests to run for each scenario, and
+               // the tests are expected to change state. By calling
+               // this setup func between tests, we (re)create the
+               // scenario as specified, using a new unique block
+               // locator to prevent interference from previous
+               // tests.
+
+               setupScenario := func() (string, []byte) {
+                       nextKey++
+                       blk := []byte(fmt.Sprintf("%d", nextKey))
+                       loc := fmt.Sprintf("%x", md5.Sum(blk))
+                       c.Log("\t", loc)
+                       putS3Obj(scenario.dataT, loc, blk)
+                       putS3Obj(scenario.recentT, "recent/"+loc, nil)
+                       putS3Obj(scenario.trashT, "trash/"+loc, blk)
+                       v.serverClock.now = &t0
+                       return loc, blk
+               }
+
+               // Check canGet
+               loc, blk := setupScenario()
+               buf := make([]byte, len(blk))
+               _, err := v.Get(context.Background(), loc, buf)
+               c.Check(err == nil, check.Equals, scenario.canGet)
+               if err != nil {
+                       c.Check(os.IsNotExist(err), check.Equals, true)
+               }
+
+               // Call Trash, then check canTrash and canGetAfterTrash
+               loc, _ = setupScenario()
+               err = v.Trash(loc)
+               c.Check(err == nil, check.Equals, scenario.canTrash)
+               _, err = v.Get(context.Background(), loc, buf)
+               c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
+               if err != nil {
+                       c.Check(os.IsNotExist(err), check.Equals, true)
+               }
+
+               // Call Untrash, then check canUntrash
+               loc, _ = setupScenario()
+               err = v.Untrash(loc)
+               c.Check(err == nil, check.Equals, scenario.canUntrash)
+               if scenario.dataT != none || scenario.trashT != none {
+                       // In all scenarios where the data exists, we
+                       // should be able to Get after Untrash --
+                       // regardless of timestamps, errors, race
+                       // conditions, etc.
+                       _, err = v.Get(context.Background(), loc, buf)
+                       c.Check(err, check.IsNil)
+               }
+
+               // Call EmptyTrash, then check haveTrashAfterEmpty and
+               // freshAfterEmpty
+               loc, _ = setupScenario()
+               v.EmptyTrash()
+               _, err = v.bucket.Head("trash/"+loc, nil)
+               c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
+               if scenario.freshAfterEmpty {
+                       t, err := v.Mtime(loc)
+                       c.Check(err, check.IsNil)
+                       // new mtime must be current (with an
+                       // allowance for 1s timestamp precision)
+                       c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
+               }
+
+               // Check for current Mtime after Put (applies to all
+               // scenarios)
+               loc, blk = setupScenario()
+               err = v.Put(context.Background(), loc, blk)
+               c.Check(err, check.IsNil)
+               t, err := v.Mtime(loc)
+               c.Check(err, check.IsNil)
+               c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
+       }
+}
+
+type TestableS3Volume struct {
+       *S3Volume
+       server      *s3test.Server
+       c           *check.C
+       serverClock *fakeClock
+}
+
+func (s *StubbedS3Suite) newTestableVolume(c *check.C, raceWindow time.Duration, readonly bool, replication int) *TestableS3Volume {
+       clock := &fakeClock{}
+       srv, err := s3test.NewServer(&s3test.Config{Clock: clock})
+       c.Assert(err, check.IsNil)
+
+       v := &TestableS3Volume{
+               S3Volume: &S3Volume{
+                       Bucket:             TestBucketName,
+                       Endpoint:           srv.URL(),
+                       Region:             "test-region-1",
+                       LocationConstraint: true,
+                       RaceWindow:         arvados.Duration(raceWindow),
+                       S3Replication:      replication,
+                       UnsafeDelete:       s3UnsafeDelete,
+                       ReadOnly:           readonly,
+                       IndexPageSize:      1000,
+               },
+               c:           c,
+               server:      srv,
+               serverClock: clock,
+       }
+       metrics := newVolumeMetricsVecs(prometheus.NewRegistry())
+       v.Start(metrics)
+       err = v.bucket.PutBucket(s3.ACL("private"))
+       c.Assert(err, check.IsNil)
+       return v
+}
+
+func (s *StubbedS3Suite) TestConfig(c *check.C) {
+       var cfg Config
+       err := yaml.Unmarshal([]byte(`
+Volumes:
+  - Type: S3
+    StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+       c.Check(err, check.IsNil)
+       c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}
+
+func (v *TestableS3Volume) Start(vm *volumeMetricsVecs) error {
+       tmp, err := ioutil.TempFile("", "keepstore")
+       v.c.Assert(err, check.IsNil)
+       defer os.Remove(tmp.Name())
+       _, err = tmp.Write([]byte("xxx\n"))
+       v.c.Assert(err, check.IsNil)
+       v.c.Assert(tmp.Close(), check.IsNil)
+
+       v.S3Volume.AccessKeyFile = tmp.Name()
+       v.S3Volume.SecretKeyFile = tmp.Name()
+
+       v.c.Assert(v.S3Volume.Start(vm), check.IsNil)
+       return nil
+}
+
+// PutRaw skips the ContentMD5 test
+func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
+       err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{})
+       if err != nil {
+               log.Printf("PutRaw: %s: %+v", loc, err)
+       }
+       err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+       if err != nil {
+               log.Printf("PutRaw: recent/%s: %+v", loc, err)
+       }
+}
+
+// TouchWithDate turns back the clock while doing a Touch(). We assume
+// there are no other operations happening on the same s3test server
+// while we do this.
+func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) {
+       v.serverClock.now = &lastPut
+       err := v.bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{})
+       if err != nil {
+               panic(err)
+       }
+       v.serverClock.now = nil
+}
+
+func (v *TestableS3Volume) Teardown() {
+       v.server.Quit()
+}
+
+func (v *TestableS3Volume) ReadWriteOperationLabelValues() (r, w string) {
+       return "get", "put"
+}
diff --git a/services/keepstore/server.go b/services/keepstore/server.go
new file mode 100644 (file)
index 0000000..3f67277
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/tls"
+       "net"
+       "net/http"
+       "os"
+       "os/signal"
+       "syscall"
+)
+
+type server struct {
+       http.Server
+
+       // channel (size=1) with the current keypair
+       currentCert chan *tls.Certificate
+}
+
+func (srv *server) Serve(l net.Listener) error {
+       if theConfig.TLSCertificateFile == "" && theConfig.TLSKeyFile == "" {
+               return srv.Server.Serve(l)
+       }
+       // https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/
+       srv.TLSConfig = &tls.Config{
+               GetCertificate:           srv.getCertificate,
+               PreferServerCipherSuites: true,
+               CurvePreferences: []tls.CurveID{
+                       tls.CurveP256,
+                       tls.X25519,
+               },
+               MinVersion: tls.VersionTLS12,
+               CipherSuites: []uint16{
+                       tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+                       tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+                       tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+                       tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+                       tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+                       tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+               },
+       }
+       srv.currentCert = make(chan *tls.Certificate, 1)
+       go srv.refreshCertificate(theConfig.TLSCertificateFile, theConfig.TLSKeyFile)
+       return srv.Server.ServeTLS(l, "", "")
+}
+
+func (srv *server) refreshCertificate(certfile, keyfile string) {
+       cert, err := tls.LoadX509KeyPair(certfile, keyfile)
+       if err != nil {
+               log.WithError(err).Fatal("error loading X509 key pair")
+       }
+       srv.currentCert <- &cert
+
+       reload := make(chan os.Signal, 1)
+       signal.Notify(reload, syscall.SIGHUP)
+       for range reload {
+               cert, err := tls.LoadX509KeyPair(certfile, keyfile)
+               if err != nil {
+                       log.WithError(err).Warn("error loading X509 key pair")
+                       continue
+               }
+               // Throw away old cert and start using new one
+               <-srv.currentCert
+               srv.currentCert <- &cert
+       }
+}
+
+func (srv *server) getCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
+       if srv.currentCert == nil {
+               panic("srv.currentCert not initialized")
+       }
+       cert := <-srv.currentCert
+       srv.currentCert <- cert
+       return cert, nil
+}
diff --git a/services/keepstore/server_test.go b/services/keepstore/server_test.go
new file mode 100644 (file)
index 0000000..84adf36
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/tls"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "testing"
+)
+
+func TestTLS(t *testing.T) {
+       defer func() {
+               theConfig.TLSKeyFile = ""
+               theConfig.TLSCertificateFile = ""
+       }()
+       theConfig.TLSKeyFile = "../api/tmp/self-signed.key"
+       theConfig.TLSCertificateFile = "../api/tmp/self-signed.pem"
+       srv := &server{}
+       srv.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               w.Write([]byte("OK"))
+       })
+       l, err := net.Listen("tcp", ":")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer l.Close()
+       go srv.Serve(l)
+       defer srv.Shutdown(context.Background())
+       c := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
+       resp, err := c.Get("https://" + l.Addr().String() + "/")
+       if err != nil {
+               t.Fatal(err)
+       }
+       body, err := ioutil.ReadAll(resp.Body)
+       if err != nil {
+               t.Error(err)
+       }
+       if !bytes.Equal(body, []byte("OK")) {
+               t.Errorf("expected OK, got %q", body)
+       }
+}
diff --git a/services/keepstore/stats_ticker.go b/services/keepstore/stats_ticker.go
new file mode 100644 (file)
index 0000000..342b9e3
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "sync"
+       "sync/atomic"
+
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+type statsTicker struct {
+       Errors   uint64
+       InBytes  uint64
+       OutBytes uint64
+
+       ErrorCodes map[string]uint64 `json:",omitempty"`
+       lock       sync.Mutex
+
+       opsCounters *prometheus.CounterVec
+       errCounters *prometheus.CounterVec
+       ioBytes     *prometheus.CounterVec
+}
+
+// Tick increments each of the given counters by 1 using
+// atomic.AddUint64.
+func (s *statsTicker) Tick(counters ...*uint64) {
+       for _, counter := range counters {
+               atomic.AddUint64(counter, 1)
+       }
+}
+
+// TickErr increments the overall error counter, as well as the
+// ErrorCodes entry for the given errType. If err is nil, TickErr is a
+// no-op.
+func (s *statsTicker) TickErr(err error, errType string) {
+       if err == nil {
+               return
+       }
+       s.Tick(&s.Errors)
+
+       s.lock.Lock()
+       if s.ErrorCodes == nil {
+               s.ErrorCodes = make(map[string]uint64)
+       }
+       s.ErrorCodes[errType]++
+       s.lock.Unlock()
+       if s.errCounters != nil {
+               s.errCounters.With(prometheus.Labels{"error_type": errType}).Inc()
+       }
+}
+
+// TickInBytes increments the incoming byte counter by n.
+func (s *statsTicker) TickInBytes(n uint64) {
+       if s.ioBytes != nil {
+               s.ioBytes.With(prometheus.Labels{"direction": "in"}).Add(float64(n))
+       }
+       atomic.AddUint64(&s.InBytes, n)
+}
+
+// TickOutBytes increments the outgoing byte counter by n.
+func (s *statsTicker) TickOutBytes(n uint64) {
+       if s.ioBytes != nil {
+               s.ioBytes.With(prometheus.Labels{"direction": "out"}).Add(float64(n))
+       }
+       atomic.AddUint64(&s.OutBytes, n)
+}
+
+// TickOps increments the counter of the listed operations by 1.
+func (s *statsTicker) TickOps(operations ...string) {
+       if s.opsCounters == nil {
+               return
+       }
+       for _, opType := range operations {
+               s.opsCounters.With(prometheus.Labels{"operation": opType}).Inc()
+       }
+}
diff --git a/services/keepstore/status_test.go b/services/keepstore/status_test.go
new file mode 100644 (file)
index 0000000..dc6efb0
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+)
+
+// We don't have isolated unit tests for /status.json yet, but we do
+// check (e.g., in pull_worker_test.go) that /status.json reports
+// specific statistics correctly at the appropriate times.
+
+// getStatusItem("foo","bar","baz") retrieves /status.json, decodes
+// the response body into resp, and returns resp["foo"]["bar"]["baz"].
+func getStatusItem(keys ...string) interface{} {
+       resp := IssueRequest(&RequestTester{"/status.json", "", "GET", nil})
+       var s interface{}
+       json.NewDecoder(resp.Body).Decode(&s)
+       for _, k := range keys {
+               s = s.(map[string]interface{})[k]
+       }
+       return s
+}
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
new file mode 100644 (file)
index 0000000..8a9fedf
--- /dev/null
@@ -0,0 +1,74 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "errors"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// RunTrashWorker is used by Keepstore to initiate trash worker channel goroutine.
+//     The channel will process trash list.
+//             For each (next) trash request:
+//      Delete the block indicated by the trash request Locator
+//             Repeat
+//
+func RunTrashWorker(trashq *WorkQueue) {
+       for item := range trashq.NextItem {
+               trashRequest := item.(TrashRequest)
+               TrashItem(trashRequest)
+               trashq.DoneItem <- struct{}{}
+       }
+}
+
+// TrashItem deletes the indicated block from every writable volume.
+func TrashItem(trashRequest TrashRequest) {
+       reqMtime := time.Unix(0, trashRequest.BlockMtime)
+       if time.Since(reqMtime) < theConfig.BlobSignatureTTL.Duration() {
+               log.Printf("WARNING: data manager asked to delete a %v old block %v (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
+                       arvados.Duration(time.Since(reqMtime)),
+                       trashRequest.Locator,
+                       trashRequest.BlockMtime,
+                       reqMtime,
+                       theConfig.BlobSignatureTTL)
+               return
+       }
+
+       var volumes []Volume
+       if uuid := trashRequest.MountUUID; uuid == "" {
+               volumes = KeepVM.AllWritable()
+       } else if v := KeepVM.Lookup(uuid, true); v == nil {
+               log.Printf("warning: trash request for nonexistent mount: %v", trashRequest)
+               return
+       } else {
+               volumes = []Volume{v}
+       }
+
+       for _, volume := range volumes {
+               mtime, err := volume.Mtime(trashRequest.Locator)
+               if err != nil {
+                       log.Printf("%v Trash(%v): %v", volume, trashRequest.Locator, err)
+                       continue
+               }
+               if trashRequest.BlockMtime != mtime.UnixNano() {
+                       log.Printf("%v Trash(%v): stored mtime %v does not match trash list value %v", volume, trashRequest.Locator, mtime.UnixNano(), trashRequest.BlockMtime)
+                       continue
+               }
+
+               if !theConfig.EnableDelete {
+                       err = errors.New("skipping because EnableDelete is false")
+               } else {
+                       err = volume.Trash(trashRequest.Locator)
+               }
+
+               if err != nil {
+                       log.Printf("%v Trash(%v): %v", volume, trashRequest.Locator, err)
+               } else {
+                       log.Printf("%v Trash(%v) OK", volume, trashRequest.Locator)
+               }
+       }
+}
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
new file mode 100644 (file)
index 0000000..c5a410b
--- /dev/null
@@ -0,0 +1,366 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "container/list"
+       "context"
+       "testing"
+       "time"
+)
+
+type TrashWorkerTestData struct {
+       Locator1    string
+       Block1      []byte
+       BlockMtime1 int64
+
+       Locator2    string
+       Block2      []byte
+       BlockMtime2 int64
+
+       CreateData      bool
+       CreateInVolume1 bool
+
+       UseTrashLifeTime bool
+       DifferentMtimes  bool
+
+       DeleteLocator    string
+       SpecifyMountUUID bool
+
+       ExpectLocator1 bool
+       ExpectLocator2 bool
+}
+
+/* Delete block that does not exist in any of the keep volumes.
+   Expect no errors.
+*/
+func TestTrashWorkerIntegration_GetNonExistingLocator(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: "5d41402abc4b2a76b9719d911017c592",
+               Block1:   []byte("hello"),
+
+               Locator2: "5d41402abc4b2a76b9719d911017c592",
+               Block2:   []byte("hello"),
+
+               CreateData: false,
+
+               DeleteLocator: "5d41402abc4b2a76b9719d911017c592",
+
+               ExpectLocator1: false,
+               ExpectLocator2: false,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Delete a block that exists on volume 1 of the keep servers.
+   Expect the second locator in volume 2 to be unaffected.
+*/
+func TestTrashWorkerIntegration_LocatorInVolume1(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash2,
+               Block2:   TestBlock2,
+
+               CreateData: true,
+
+               DeleteLocator: TestHash, // first locator
+
+               ExpectLocator1: false,
+               ExpectLocator2: true,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Delete a block that exists on volume 2 of the keep servers.
+   Expect the first locator in volume 1 to be unaffected.
+*/
+func TestTrashWorkerIntegration_LocatorInVolume2(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash2,
+               Block2:   TestBlock2,
+
+               CreateData: true,
+
+               DeleteLocator: TestHash2, // locator 2
+
+               ExpectLocator1: true,
+               ExpectLocator2: false,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Delete a block with matching mtime for locator in both volumes.
+   Expect locator to be deleted from both volumes.
+*/
+func TestTrashWorkerIntegration_LocatorInBothVolumes(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash,
+               Block2:   TestBlock,
+
+               CreateData: true,
+
+               DeleteLocator: TestHash,
+
+               ExpectLocator1: false,
+               ExpectLocator2: false,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Same locator with different Mtimes exists in both volumes.
+   Delete the second and expect the first to be still around.
+*/
+func TestTrashWorkerIntegration_MtimeMatchesForLocator1ButNotForLocator2(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash,
+               Block2:   TestBlock,
+
+               CreateData:      true,
+               DifferentMtimes: true,
+
+               DeleteLocator: TestHash,
+
+               ExpectLocator1: true,
+               ExpectLocator2: false,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+// Delete a block that exists on both volumes with matching mtimes,
+// but specify a MountUUID in the request so it only gets deleted from
+// the first volume.
+func TestTrashWorkerIntegration_SpecifyMountUUID(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash,
+               Block2:   TestBlock,
+
+               CreateData: true,
+
+               DeleteLocator:    TestHash,
+               SpecifyMountUUID: true,
+
+               ExpectLocator1: true,
+               ExpectLocator2: true,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Two different locators in volume 1.
+   Delete one of them.
+   Expect the other unaffected.
+*/
+func TestTrashWorkerIntegration_TwoDifferentLocatorsInVolume1(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash2,
+               Block2:   TestBlock2,
+
+               CreateData:      true,
+               CreateInVolume1: true,
+
+               DeleteLocator: TestHash, // locator 1
+
+               ExpectLocator1: false,
+               ExpectLocator2: true,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Allow default Trash Life time to be used. Thus, the newly created block
+   will not be deleted because its Mtime is within the trash life time.
+*/
+func TestTrashWorkerIntegration_SameLocatorInTwoVolumesWithDefaultTrashLifeTime(t *testing.T) {
+       theConfig.EnableDelete = true
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash2,
+               Block2:   TestBlock2,
+
+               CreateData:      true,
+               CreateInVolume1: true,
+
+               UseTrashLifeTime: true,
+
+               DeleteLocator: TestHash, // locator 1
+
+               // Since trash life time is in effect, block won't be deleted.
+               ExpectLocator1: true,
+               ExpectLocator2: true,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Delete a block with matching mtime for locator in both volumes, but EnableDelete is false,
+   so block won't be deleted.
+*/
+func TestTrashWorkerIntegration_DisabledDelete(t *testing.T) {
+       theConfig.EnableDelete = false
+       testData := TrashWorkerTestData{
+               Locator1: TestHash,
+               Block1:   TestBlock,
+
+               Locator2: TestHash,
+               Block2:   TestBlock,
+
+               CreateData: true,
+
+               DeleteLocator: TestHash,
+
+               ExpectLocator1: true,
+               ExpectLocator2: true,
+       }
+       performTrashWorkerTest(testData, t)
+}
+
+/* Perform the test */
+func performTrashWorkerTest(testData TrashWorkerTestData, t *testing.T) {
+       // Create Keep Volumes
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Close()
+
+       // Put test content
+       vols := KeepVM.AllWritable()
+       if testData.CreateData {
+               vols[0].Put(context.Background(), testData.Locator1, testData.Block1)
+               vols[0].Put(context.Background(), testData.Locator1+".meta", []byte("metadata"))
+
+               if testData.CreateInVolume1 {
+                       vols[0].Put(context.Background(), testData.Locator2, testData.Block2)
+                       vols[0].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
+               } else {
+                       vols[1].Put(context.Background(), testData.Locator2, testData.Block2)
+                       vols[1].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
+               }
+       }
+
+       oldBlockTime := time.Now().Add(-theConfig.BlobSignatureTTL.Duration() - time.Minute)
+
+       // Create TrashRequest for the test
+       trashRequest := TrashRequest{
+               Locator:    testData.DeleteLocator,
+               BlockMtime: oldBlockTime.UnixNano(),
+       }
+       if testData.SpecifyMountUUID {
+               trashRequest.MountUUID = KeepVM.Mounts()[0].UUID
+       }
+
+       // Run trash worker and put the trashRequest on trashq
+       trashList := list.New()
+       trashList.PushBack(trashRequest)
+       trashq = NewWorkQueue()
+       defer trashq.Close()
+
+       if !testData.UseTrashLifeTime {
+               // Trash worker would not delete block if its Mtime is
+               // within trash life time. Back-date the block to
+               // allow the deletion to succeed.
+               for _, v := range vols {
+                       v.(*MockVolume).Timestamps[testData.DeleteLocator] = oldBlockTime
+                       if testData.DifferentMtimes {
+                               oldBlockTime = oldBlockTime.Add(time.Second)
+                       }
+               }
+       }
+       go RunTrashWorker(trashq)
+
+       // Install gate so all local operations block until we say go
+       gate := make(chan struct{})
+       for _, v := range vols {
+               v.(*MockVolume).Gate = gate
+       }
+
+       assertStatusItem := func(k string, expect float64) {
+               if v := getStatusItem("TrashQueue", k); v != expect {
+                       t.Errorf("Got %s %v, expected %v", k, v, expect)
+               }
+       }
+
+       assertStatusItem("InProgress", 0)
+       assertStatusItem("Queued", 0)
+
+       listLen := trashList.Len()
+       trashq.ReplaceQueue(trashList)
+
+       // Wait for worker to take request(s)
+       expectEqualWithin(t, time.Second, listLen, func() interface{} { return trashq.Status().InProgress })
+
+       // Ensure status.json also reports work is happening
+       assertStatusItem("InProgress", float64(1))
+       assertStatusItem("Queued", float64(listLen-1))
+
+       // Let worker proceed
+       close(gate)
+
+       // Wait for worker to finish
+       expectEqualWithin(t, time.Second, 0, func() interface{} { return trashq.Status().InProgress })
+
+       // Verify Locator1 to be un/deleted as expected
+       buf := make([]byte, BlockSize)
+       size, err := GetBlock(context.Background(), testData.Locator1, buf, nil)
+       if testData.ExpectLocator1 {
+               if size == 0 || err != nil {
+                       t.Errorf("Expected Locator1 to be still present: %s", testData.Locator1)
+               }
+       } else {
+               if size > 0 || err == nil {
+                       t.Errorf("Expected Locator1 to be deleted: %s", testData.Locator1)
+               }
+       }
+
+       // Verify Locator2 to be un/deleted as expected
+       if testData.Locator1 != testData.Locator2 {
+               size, err = GetBlock(context.Background(), testData.Locator2, buf, nil)
+               if testData.ExpectLocator2 {
+                       if size == 0 || err != nil {
+                               t.Errorf("Expected Locator2 to be still present: %s", testData.Locator2)
+                       }
+               } else {
+                       if size > 0 || err == nil {
+                               t.Errorf("Expected Locator2 to be deleted: %s", testData.Locator2)
+                       }
+               }
+       }
+
+       // The DifferentMtimes test puts the same locator in two
+       // different volumes, but only one copy has an Mtime matching
+       // the trash request.
+       if testData.DifferentMtimes {
+               locatorFoundIn := 0
+               for _, volume := range KeepVM.AllReadable() {
+                       buf := make([]byte, BlockSize)
+                       if _, err := volume.Get(context.Background(), testData.Locator1, buf); err == nil {
+                               locatorFoundIn = locatorFoundIn + 1
+                       }
+               }
+               if locatorFoundIn != 1 {
+                       t.Errorf("Found %d copies of %s, expected 1", locatorFoundIn, testData.Locator1)
+               }
+       }
+}
diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
new file mode 100644 (file)
index 0000000..96f4587
--- /dev/null
@@ -0,0 +1,873 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bufio"
+       "context"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "regexp"
+       "strconv"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "syscall"
+       "time"
+
+       "github.com/prometheus/client_golang/prometheus"
+)
+
+type unixVolumeAdder struct {
+       *Config
+}
+
+// String implements flag.Value
+func (vs *unixVolumeAdder) String() string {
+       return "-"
+}
+
+func (vs *unixVolumeAdder) Set(path string) error {
+       if dirs := strings.Split(path, ","); len(dirs) > 1 {
+               log.Print("DEPRECATED: using comma-separated volume list.")
+               for _, dir := range dirs {
+                       if err := vs.Set(dir); err != nil {
+                               return err
+                       }
+               }
+               return nil
+       }
+       vs.Config.Volumes = append(vs.Config.Volumes, &UnixVolume{
+               Root:      path,
+               ReadOnly:  deprecated.flagReadonly,
+               Serialize: deprecated.flagSerializeIO,
+       })
+       return nil
+}
+
+func init() {
+       VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &UnixVolume{} })
+
+       flag.Var(&unixVolumeAdder{theConfig}, "volumes", "see Volumes configuration")
+       flag.Var(&unixVolumeAdder{theConfig}, "volume", "see Volumes configuration")
+}
+
+// Discover adds a UnixVolume for every directory named "keep" that is
+// located at the top level of a device- or tmpfs-backed mount point
+// other than "/". It returns the number of volumes added.
+func (vs *unixVolumeAdder) Discover() int {
+       added := 0
+       f, err := os.Open(ProcMounts)
+       if err != nil {
+               log.Fatalf("opening %s: %s", ProcMounts, err)
+       }
+       scanner := bufio.NewScanner(f)
+       for scanner.Scan() {
+               args := strings.Fields(scanner.Text())
+               if err := scanner.Err(); err != nil {
+                       log.Fatalf("reading %s: %s", ProcMounts, err)
+               }
+               dev, mount := args[0], args[1]
+               if mount == "/" {
+                       continue
+               }
+               if dev != "tmpfs" && !strings.HasPrefix(dev, "/dev/") {
+                       continue
+               }
+               keepdir := mount + "/keep"
+               if st, err := os.Stat(keepdir); err != nil || !st.IsDir() {
+                       continue
+               }
+               // Set the -readonly flag (but only for this volume)
+               // if the filesystem is mounted readonly.
+               flagReadonlyWas := deprecated.flagReadonly
+               for _, fsopt := range strings.Split(args[3], ",") {
+                       if fsopt == "ro" {
+                               deprecated.flagReadonly = true
+                               break
+                       }
+                       if fsopt == "rw" {
+                               break
+                       }
+               }
+               if err := vs.Set(keepdir); err != nil {
+                       log.Printf("adding %q: %s", keepdir, err)
+               } else {
+                       added++
+               }
+               deprecated.flagReadonly = flagReadonlyWas
+       }
+       return added
+}
+
+// A UnixVolume stores and retrieves blocks in a local directory.
+type UnixVolume struct {
+       Root                 string // path to the volume's root directory
+       ReadOnly             bool
+       Serialize            bool
+       DirectoryReplication int
+       StorageClasses       []string
+
+       // something to lock during IO, typically a sync.Mutex (or nil
+       // to skip locking)
+       locker sync.Locker
+
+       os osWithStats
+}
+
+// DeviceID returns a globally unique ID for the volume's root
+// directory, consisting of the filesystem's UUID and the path from
+// filesystem root to storage directory, joined by "/". For example,
+// the DeviceID for a local directory "/mnt/xvda1/keep" might be
+// "fa0b6166-3b55-4994-bd3f-92f4e00a1bb0/keep".
+func (v *UnixVolume) DeviceID() string {
+       giveup := func(f string, args ...interface{}) string {
+               log.Printf(f+"; using blank DeviceID for volume %s", append(args, v)...)
+               return ""
+       }
+       buf, err := exec.Command("findmnt", "--noheadings", "--target", v.Root).CombinedOutput()
+       if err != nil {
+               return giveup("findmnt: %s (%q)", err, buf)
+       }
+       findmnt := strings.Fields(string(buf))
+       if len(findmnt) < 2 {
+               return giveup("could not parse findmnt output: %q", buf)
+       }
+       fsRoot, dev := findmnt[0], findmnt[1]
+
+       absRoot, err := filepath.Abs(v.Root)
+       if err != nil {
+               return giveup("resolving relative path %q: %s", v.Root, err)
+       }
+       realRoot, err := filepath.EvalSymlinks(absRoot)
+       if err != nil {
+               return giveup("resolving symlinks in %q: %s", absRoot, err)
+       }
+
+       // Find path from filesystem root to realRoot
+       var fsPath string
+       if strings.HasPrefix(realRoot, fsRoot+"/") {
+               fsPath = realRoot[len(fsRoot):]
+       } else if fsRoot == "/" {
+               fsPath = realRoot
+       } else if fsRoot == realRoot {
+               fsPath = ""
+       } else {
+               return giveup("findmnt reports mount point %q which is not a prefix of volume root %q", fsRoot, realRoot)
+       }
+
+       if !strings.HasPrefix(dev, "/") {
+               return giveup("mount %q device %q is not a path", fsRoot, dev)
+       }
+
+       fi, err := os.Stat(dev)
+       if err != nil {
+               return giveup("stat %q: %s\n", dev, err)
+       }
+       ino := fi.Sys().(*syscall.Stat_t).Ino
+
+       // Find a symlink in /dev/disk/by-uuid/ whose target is (i.e.,
+       // has the same inode as) the mounted device
+       udir := "/dev/disk/by-uuid"
+       d, err := os.Open(udir)
+       if err != nil {
+               return giveup("opening %q: %s", udir, err)
+       }
+       uuids, err := d.Readdirnames(0)
+       if err != nil {
+               return giveup("reading %q: %s", udir, err)
+       }
+       for _, uuid := range uuids {
+               link := filepath.Join(udir, uuid)
+               fi, err = os.Stat(link)
+               if err != nil {
+                       log.Printf("error: stat %q: %s", link, err)
+                       continue
+               }
+               if fi.Sys().(*syscall.Stat_t).Ino == ino {
+                       return uuid + fsPath
+               }
+       }
+       return giveup("could not find entry in %q matching %q", udir, dev)
+}
+
+// Examples implements VolumeWithExamples.
+func (*UnixVolume) Examples() []Volume {
+       return []Volume{
+               &UnixVolume{
+                       Root:                 "/mnt/local-disk",
+                       Serialize:            true,
+                       DirectoryReplication: 1,
+               },
+               &UnixVolume{
+                       Root:                 "/mnt/network-disk",
+                       Serialize:            false,
+                       DirectoryReplication: 2,
+               },
+       }
+}
+
+// Type implements Volume
+func (v *UnixVolume) Type() string {
+       return "Directory"
+}
+
+// Start implements Volume
+func (v *UnixVolume) Start(vm *volumeMetricsVecs) error {
+       if v.Serialize {
+               v.locker = &sync.Mutex{}
+       }
+       if !strings.HasPrefix(v.Root, "/") {
+               return fmt.Errorf("volume root does not start with '/': %q", v.Root)
+       }
+       if v.DirectoryReplication == 0 {
+               v.DirectoryReplication = 1
+       }
+       // Set up prometheus metrics
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
+       v.os.stats.opsCounters, v.os.stats.errCounters, v.os.stats.ioBytes = vm.getCounterVecsFor(lbls)
+
+       _, err := v.os.Stat(v.Root)
+
+       return err
+}
+
+// Touch sets the timestamp for the given locator to the current time
+func (v *UnixVolume) Touch(loc string) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       p := v.blockPath(loc)
+       f, err := v.os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+       if err := v.lock(context.TODO()); err != nil {
+               return err
+       }
+       defer v.unlock()
+       if e := v.lockfile(f); e != nil {
+               return e
+       }
+       defer v.unlockfile(f)
+       ts := syscall.NsecToTimespec(time.Now().UnixNano())
+       v.os.stats.TickOps("utimes")
+       v.os.stats.Tick(&v.os.stats.UtimesOps)
+       err = syscall.UtimesNano(p, []syscall.Timespec{ts, ts})
+       v.os.stats.TickErr(err)
+       return err
+}
+
+// Mtime returns the stored timestamp for the given locator.
+func (v *UnixVolume) Mtime(loc string) (time.Time, error) {
+       p := v.blockPath(loc)
+       fi, err := v.os.Stat(p)
+       if err != nil {
+               return time.Time{}, err
+       }
+       return fi.ModTime(), nil
+}
+
+// Lock the locker (if one is in use), open the file for reading, and
+// call the given function if and when the file is ready to read.
+func (v *UnixVolume) getFunc(ctx context.Context, path string, fn func(io.Reader) error) error {
+       if err := v.lock(ctx); err != nil {
+               return err
+       }
+       defer v.unlock()
+       f, err := v.os.Open(path)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+       return fn(NewCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes))
+}
+
+// stat is os.Stat() with some extra sanity checks.
+func (v *UnixVolume) stat(path string) (os.FileInfo, error) {
+       stat, err := v.os.Stat(path)
+       if err == nil {
+               if stat.Size() < 0 {
+                       err = os.ErrInvalid
+               } else if stat.Size() > BlockSize {
+                       err = TooLongError
+               }
+       }
+       return stat, err
+}
+
+// Get retrieves a block, copies it to the given slice, and returns
+// the number of bytes copied.
+func (v *UnixVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
+       return getWithPipe(ctx, loc, buf, v)
+}
+
+// ReadBlock implements BlockReader.
+func (v *UnixVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
+       path := v.blockPath(loc)
+       stat, err := v.stat(path)
+       if err != nil {
+               return v.translateError(err)
+       }
+       return v.getFunc(ctx, path, func(rdr io.Reader) error {
+               n, err := io.Copy(w, rdr)
+               if err == nil && n != stat.Size() {
+                       err = io.ErrUnexpectedEOF
+               }
+               return err
+       })
+}
+
+// Compare returns nil if Get(loc) would return the same content as
+// expect. It is functionally equivalent to Get() followed by
+// bytes.Compare(), but uses less memory.
+func (v *UnixVolume) Compare(ctx context.Context, loc string, expect []byte) error {
+       path := v.blockPath(loc)
+       if _, err := v.stat(path); err != nil {
+               return v.translateError(err)
+       }
+       return v.getFunc(ctx, path, func(rdr io.Reader) error {
+               return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
+       })
+}
+
+// Put stores a block of data identified by the locator string
+// "loc".  It returns nil on success.  If the volume is full, it
+// returns a FullError.  If the write fails due to some other error,
+// that error is returned.
+func (v *UnixVolume) Put(ctx context.Context, loc string, block []byte) error {
+       return putWithPipe(ctx, loc, block, v)
+}
+
+// WriteBlock implements BlockWriter.
+func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       if v.IsFull() {
+               return FullError
+       }
+       bdir := v.blockDir(loc)
+       if err := os.MkdirAll(bdir, 0755); err != nil {
+               log.Printf("%s: could not create directory %s: %s",
+                       loc, bdir, err)
+               return err
+       }
+
+       tmpfile, tmperr := v.os.TempFile(bdir, "tmp"+loc)
+       if tmperr != nil {
+               log.Printf("ioutil.TempFile(%s, tmp%s): %s", bdir, loc, tmperr)
+               return tmperr
+       }
+
+       bpath := v.blockPath(loc)
+
+       if err := v.lock(ctx); err != nil {
+               return err
+       }
+       defer v.unlock()
+       n, err := io.Copy(tmpfile, rdr)
+       v.os.stats.TickOutBytes(uint64(n))
+       if err != nil {
+               log.Printf("%s: writing to %s: %s\n", v, bpath, err)
+               tmpfile.Close()
+               v.os.Remove(tmpfile.Name())
+               return err
+       }
+       if err := tmpfile.Close(); err != nil {
+               log.Printf("closing %s: %s\n", tmpfile.Name(), err)
+               v.os.Remove(tmpfile.Name())
+               return err
+       }
+       if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
+               log.Printf("rename %s %s: %s\n", tmpfile.Name(), bpath, err)
+               return v.os.Remove(tmpfile.Name())
+       }
+       return nil
+}
+
+// Status returns a VolumeStatus struct describing the volume's
+// current state, or nil if an error occurs.
+//
+func (v *UnixVolume) Status() *VolumeStatus {
+       fi, err := v.os.Stat(v.Root)
+       if err != nil {
+               log.Printf("%s: os.Stat: %s\n", v, err)
+               return nil
+       }
+       devnum := fi.Sys().(*syscall.Stat_t).Dev
+
+       var fs syscall.Statfs_t
+       if err := syscall.Statfs(v.Root, &fs); err != nil {
+               log.Printf("%s: statfs: %s\n", v, err)
+               return nil
+       }
+       // These calculations match the way df calculates disk usage:
+       // "free" space is measured by fs.Bavail, but "used" space
+       // uses fs.Blocks - fs.Bfree.
+       free := fs.Bavail * uint64(fs.Bsize)
+       used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
+       return &VolumeStatus{
+               MountPoint: v.Root,
+               DeviceNum:  devnum,
+               BytesFree:  free,
+               BytesUsed:  used,
+       }
+}
+
+var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)
+var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
+
+// IndexTo writes (to the given Writer) a list of blocks found on this
+// volume which begin with the specified prefix. If the prefix is an
+// empty string, IndexTo writes a complete list of blocks.
+//
+// Each block is given in the format
+//
+//     locator+size modification-time {newline}
+//
+// e.g.:
+//
+//     e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
+//     e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
+//     e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
+//
+func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
+       var lastErr error
+       rootdir, err := v.os.Open(v.Root)
+       if err != nil {
+               return err
+       }
+       defer rootdir.Close()
+       v.os.stats.TickOps("readdir")
+       v.os.stats.Tick(&v.os.stats.ReaddirOps)
+       for {
+               names, err := rootdir.Readdirnames(1)
+               if err == io.EOF {
+                       return lastErr
+               } else if err != nil {
+                       return err
+               }
+               if !strings.HasPrefix(names[0], prefix) && !strings.HasPrefix(prefix, names[0]) {
+                       // prefix excludes all blocks stored in this dir
+                       continue
+               }
+               if !blockDirRe.MatchString(names[0]) {
+                       continue
+               }
+               blockdirpath := filepath.Join(v.Root, names[0])
+               blockdir, err := v.os.Open(blockdirpath)
+               if err != nil {
+                       log.Print("Error reading ", blockdirpath, ": ", err)
+                       lastErr = err
+                       continue
+               }
+               v.os.stats.TickOps("readdir")
+               v.os.stats.Tick(&v.os.stats.ReaddirOps)
+               for {
+                       fileInfo, err := blockdir.Readdir(1)
+                       if err == io.EOF {
+                               break
+                       } else if err != nil {
+                               log.Print("Error reading ", blockdirpath, ": ", err)
+                               lastErr = err
+                               break
+                       }
+                       name := fileInfo[0].Name()
+                       if !strings.HasPrefix(name, prefix) {
+                               continue
+                       }
+                       if !blockFileRe.MatchString(name) {
+                               continue
+                       }
+                       _, err = fmt.Fprint(w,
+                               name,
+                               "+", fileInfo[0].Size(),
+                               " ", fileInfo[0].ModTime().UnixNano(),
+                               "\n")
+                       if err != nil {
+                               log.Print("Error writing : ", err)
+                               lastErr = err
+                               break
+                       }
+               }
+               blockdir.Close()
+       }
+}
+
+// Trash trashes the block data from the unix storage
+// If TrashLifetime == 0, the block is deleted
+// Else, the block is renamed as path/{loc}.trash.{deadline},
+// where deadline = now + TrashLifetime
+func (v *UnixVolume) Trash(loc string) error {
+       // Touch() must be called before calling Write() on a block.  Touch()
+       // also uses lockfile().  This avoids a race condition between Write()
+       // and Trash() because either (a) the file will be trashed and Touch()
+       // will signal to the caller that the file is not present (and needs to
+       // be re-written), or (b) Touch() will update the file's timestamp and
+       // Trash() will read the correct up-to-date timestamp and choose not to
+       // trash the file.
+
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+       if err := v.lock(context.TODO()); err != nil {
+               return err
+       }
+       defer v.unlock()
+       p := v.blockPath(loc)
+       f, err := v.os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+       if e := v.lockfile(f); e != nil {
+               return e
+       }
+       defer v.unlockfile(f)
+
+       // If the block has been PUT in the last blobSignatureTTL
+       // seconds, return success without removing the block. This
+       // protects data from garbage collection until it is no longer
+       // possible for clients to retrieve the unreferenced blocks
+       // anyway (because the permission signatures have expired).
+       if fi, err := v.os.Stat(p); err != nil {
+               return err
+       } else if time.Since(fi.ModTime()) < time.Duration(theConfig.BlobSignatureTTL) {
+               return nil
+       }
+
+       if theConfig.TrashLifetime == 0 {
+               return v.os.Remove(p)
+       }
+       return v.os.Rename(p, fmt.Sprintf("%v.trash.%d", p, time.Now().Add(theConfig.TrashLifetime.Duration()).Unix()))
+}
+
+// Untrash moves block from trash back into store
+// Look for path/{loc}.trash.{deadline} in storage,
+// and rename the first such file as path/{loc}
+func (v *UnixVolume) Untrash(loc string) (err error) {
+       if v.ReadOnly {
+               return MethodDisabledError
+       }
+
+       v.os.stats.TickOps("readdir")
+       v.os.stats.Tick(&v.os.stats.ReaddirOps)
+       files, err := ioutil.ReadDir(v.blockDir(loc))
+       if err != nil {
+               return err
+       }
+
+       if len(files) == 0 {
+               return os.ErrNotExist
+       }
+
+       foundTrash := false
+       prefix := fmt.Sprintf("%v.trash.", loc)
+       for _, f := range files {
+               if strings.HasPrefix(f.Name(), prefix) {
+                       foundTrash = true
+                       err = v.os.Rename(v.blockPath(f.Name()), v.blockPath(loc))
+                       if err == nil {
+                               break
+                       }
+               }
+       }
+
+       if foundTrash == false {
+               return os.ErrNotExist
+       }
+
+       return
+}
+
+// blockDir returns the fully qualified directory name for the directory
+// where loc is (or would be) stored on this volume.
+func (v *UnixVolume) blockDir(loc string) string {
+       return filepath.Join(v.Root, loc[0:3])
+}
+
+// blockPath returns the fully qualified pathname for the path to loc
+// on this volume.
+func (v *UnixVolume) blockPath(loc string) string {
+       return filepath.Join(v.blockDir(loc), loc)
+}
+
+// IsFull returns true if the free space on the volume is less than
+// MinFreeKilobytes.
+//
+func (v *UnixVolume) IsFull() (isFull bool) {
+       fullSymlink := v.Root + "/full"
+
+       // Check if the volume has been marked as full in the last hour.
+       if link, err := os.Readlink(fullSymlink); err == nil {
+               if ts, err := strconv.Atoi(link); err == nil {
+                       fulltime := time.Unix(int64(ts), 0)
+                       if time.Since(fulltime).Hours() < 1.0 {
+                               return true
+                       }
+               }
+       }
+
+       if avail, err := v.FreeDiskSpace(); err == nil {
+               isFull = avail < MinFreeKilobytes
+       } else {
+               log.Printf("%s: FreeDiskSpace: %s\n", v, err)
+               isFull = false
+       }
+
+       // If the volume is full, timestamp it.
+       if isFull {
+               now := fmt.Sprintf("%d", time.Now().Unix())
+               os.Symlink(now, fullSymlink)
+       }
+       return
+}
+
+// FreeDiskSpace returns the number of unused 1k blocks available on
+// the volume.
+//
+func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
+       var fs syscall.Statfs_t
+       err = syscall.Statfs(v.Root, &fs)
+       if err == nil {
+               // Statfs output is not guaranteed to measure free
+               // space in terms of 1K blocks.
+               free = fs.Bavail * uint64(fs.Bsize) / 1024
+       }
+       return
+}
+
+func (v *UnixVolume) String() string {
+       return fmt.Sprintf("[UnixVolume %s]", v.Root)
+}
+
+// Writable returns false if all future Put, Mtime, and Delete calls
+// are expected to fail.
+func (v *UnixVolume) Writable() bool {
+       return !v.ReadOnly
+}
+
+// Replication returns the number of replicas promised by the
+// underlying device (as specified in configuration).
+func (v *UnixVolume) Replication() int {
+       return v.DirectoryReplication
+}
+
+// GetStorageClasses implements Volume
+func (v *UnixVolume) GetStorageClasses() []string {
+       return v.StorageClasses
+}
+
+// InternalStats returns I/O and filesystem ops counters.
+func (v *UnixVolume) InternalStats() interface{} {
+       return &v.os.stats
+}
+
+// lock acquires the serialize lock, if one is in use. If ctx is done
+// before the lock is acquired, lock returns ctx.Err() instead of
+// acquiring the lock.
+func (v *UnixVolume) lock(ctx context.Context) error {
+       if v.locker == nil {
+               return nil
+       }
+       locked := make(chan struct{})
+       go func() {
+               v.locker.Lock()
+               close(locked)
+       }()
+       select {
+       case <-ctx.Done():
+               go func() {
+                       <-locked
+                       v.locker.Unlock()
+               }()
+               return ctx.Err()
+       case <-locked:
+               return nil
+       }
+}
+
+// unlock releases the serialize lock, if one is in use.
+func (v *UnixVolume) unlock() {
+       if v.locker == nil {
+               return
+       }
+       v.locker.Unlock()
+}
+
+// lockfile and unlockfile use flock(2) to manage kernel file locks.
+func (v *UnixVolume) lockfile(f *os.File) error {
+       v.os.stats.TickOps("flock")
+       v.os.stats.Tick(&v.os.stats.FlockOps)
+       err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
+       v.os.stats.TickErr(err)
+       return err
+}
+
+func (v *UnixVolume) unlockfile(f *os.File) error {
+       err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
+       v.os.stats.TickErr(err)
+       return err
+}
+
+// Where appropriate, translate a more specific filesystem error to an
+// error recognized by handlers, like os.ErrNotExist.
+func (v *UnixVolume) translateError(err error) error {
+       switch err.(type) {
+       case *os.PathError:
+               // stat() returns a PathError if the parent directory
+               // (not just the file itself) is missing
+               return os.ErrNotExist
+       default:
+               return err
+       }
+}
+
+var unixTrashLocRegexp = regexp.MustCompile(`/([0-9a-f]{32})\.trash\.(\d+)$`)
+
+// EmptyTrash walks hierarchy looking for {hash}.trash.*
+// and deletes those with deadline < now.
+func (v *UnixVolume) EmptyTrash() {
+       var bytesDeleted, bytesInTrash int64
+       var blocksDeleted, blocksInTrash int64
+
+       doFile := func(path string, info os.FileInfo) {
+               if info.Mode().IsDir() {
+                       return
+               }
+               matches := unixTrashLocRegexp.FindStringSubmatch(path)
+               if len(matches) != 3 {
+                       return
+               }
+               deadline, err := strconv.ParseInt(matches[2], 10, 64)
+               if err != nil {
+                       log.Printf("EmptyTrash: %v: ParseInt(%v): %v", path, matches[2], err)
+                       return
+               }
+               atomic.AddInt64(&bytesInTrash, info.Size())
+               atomic.AddInt64(&blocksInTrash, 1)
+               if deadline > time.Now().Unix() {
+                       return
+               }
+               err = v.os.Remove(path)
+               if err != nil {
+                       log.Printf("EmptyTrash: Remove %v: %v", path, err)
+                       return
+               }
+               atomic.AddInt64(&bytesDeleted, info.Size())
+               atomic.AddInt64(&blocksDeleted, 1)
+       }
+
+       type dirent struct {
+               path string
+               info os.FileInfo
+       }
+       var wg sync.WaitGroup
+       todo := make(chan dirent, theConfig.EmptyTrashWorkers)
+       for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for e := range todo {
+                               doFile(e.path, e.info)
+                       }
+               }()
+       }
+
+       err := filepath.Walk(v.Root, func(path string, info os.FileInfo, err error) error {
+               if err != nil {
+                       log.Printf("EmptyTrash: filepath.Walk: %v: %v", path, err)
+                       return nil
+               }
+               todo <- dirent{path, info}
+               return nil
+       })
+       close(todo)
+       wg.Wait()
+
+       if err != nil {
+               log.Printf("EmptyTrash error for %v: %v", v.String(), err)
+       }
+
+       log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+}
+
+type unixStats struct {
+       statsTicker
+       OpenOps    uint64
+       StatOps    uint64
+       FlockOps   uint64
+       UtimesOps  uint64
+       CreateOps  uint64
+       RenameOps  uint64
+       UnlinkOps  uint64
+       ReaddirOps uint64
+}
+
+func (s *unixStats) TickErr(err error) {
+       if err == nil {
+               return
+       }
+       s.statsTicker.TickErr(err, fmt.Sprintf("%T", err))
+}
+
+type osWithStats struct {
+       stats unixStats
+}
+
+func (o *osWithStats) Open(name string) (*os.File, error) {
+       o.stats.TickOps("open")
+       o.stats.Tick(&o.stats.OpenOps)
+       f, err := os.Open(name)
+       o.stats.TickErr(err)
+       return f, err
+}
+
+func (o *osWithStats) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
+       o.stats.TickOps("open")
+       o.stats.Tick(&o.stats.OpenOps)
+       f, err := os.OpenFile(name, flag, perm)
+       o.stats.TickErr(err)
+       return f, err
+}
+
+func (o *osWithStats) Remove(path string) error {
+       o.stats.TickOps("unlink")
+       o.stats.Tick(&o.stats.UnlinkOps)
+       err := os.Remove(path)
+       o.stats.TickErr(err)
+       return err
+}
+
+func (o *osWithStats) Rename(a, b string) error {
+       o.stats.TickOps("rename")
+       o.stats.Tick(&o.stats.RenameOps)
+       err := os.Rename(a, b)
+       o.stats.TickErr(err)
+       return err
+}
+
+func (o *osWithStats) Stat(path string) (os.FileInfo, error) {
+       o.stats.TickOps("stat")
+       o.stats.Tick(&o.stats.StatOps)
+       fi, err := os.Stat(path)
+       o.stats.TickErr(err)
+       return fi, err
+}
+
+func (o *osWithStats) TempFile(dir, base string) (*os.File, error) {
+       o.stats.TickOps("create")
+       o.stats.Tick(&o.stats.CreateOps)
+       f, err := ioutil.TempFile(dir, base)
+       o.stats.TickErr(err)
+       return f, err
+}
diff --git a/services/keepstore/unix_volume_test.go b/services/keepstore/unix_volume_test.go
new file mode 100644 (file)
index 0000000..872f408
--- /dev/null
@@ -0,0 +1,448 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "strings"
+       "sync"
+       "syscall"
+       "testing"
+       "time"
+
+       "github.com/ghodss/yaml"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
+)
+
+type TestableUnixVolume struct {
+       UnixVolume
+       t TB
+}
+
+func NewTestableUnixVolume(t TB, serialize bool, readonly bool) *TestableUnixVolume {
+       d, err := ioutil.TempDir("", "volume_test")
+       if err != nil {
+               t.Fatal(err)
+       }
+       var locker sync.Locker
+       if serialize {
+               locker = &sync.Mutex{}
+       }
+       return &TestableUnixVolume{
+               UnixVolume: UnixVolume{
+                       Root:     d,
+                       ReadOnly: readonly,
+                       locker:   locker,
+               },
+               t: t,
+       }
+}
+
+// PutRaw writes a Keep block directly into a UnixVolume, even if
+// the volume is readonly.
+func (v *TestableUnixVolume) PutRaw(locator string, data []byte) {
+       defer func(orig bool) {
+               v.ReadOnly = orig
+       }(v.ReadOnly)
+       v.ReadOnly = false
+       err := v.Put(context.Background(), locator, data)
+       if err != nil {
+               v.t.Fatal(err)
+       }
+}
+
+func (v *TestableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
+       err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{lastPut.Unix(), lastPut.Unix()})
+       if err != nil {
+               v.t.Fatal(err)
+       }
+}
+
+func (v *TestableUnixVolume) Teardown() {
+       if err := os.RemoveAll(v.Root); err != nil {
+               v.t.Fatal(err)
+       }
+}
+
+func (v *TestableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
+       return "open", "create"
+}
+
+// serialize = false; readonly = false
+func TestUnixVolumeWithGenericTests(t *testing.T) {
+       DoGenericVolumeTests(t, func(t TB) TestableVolume {
+               return NewTestableUnixVolume(t, false, false)
+       })
+}
+
+// serialize = false; readonly = true
+func TestUnixVolumeWithGenericTestsReadOnly(t *testing.T) {
+       DoGenericVolumeTests(t, func(t TB) TestableVolume {
+               return NewTestableUnixVolume(t, false, true)
+       })
+}
+
+// serialize = true; readonly = false
+func TestUnixVolumeWithGenericTestsSerialized(t *testing.T) {
+       DoGenericVolumeTests(t, func(t TB) TestableVolume {
+               return NewTestableUnixVolume(t, true, false)
+       })
+}
+
+// serialize = false; readonly = false
+func TestUnixVolumeHandlersWithGenericVolumeTests(t *testing.T) {
+       DoHandlersWithGenericVolumeTests(t, func(t TB) (*RRVolumeManager, []TestableVolume) {
+               vols := make([]Volume, 2)
+               testableUnixVols := make([]TestableVolume, 2)
+
+               for i := range vols {
+                       v := NewTestableUnixVolume(t, false, false)
+                       vols[i] = v
+                       testableUnixVols[i] = v
+               }
+
+               return MakeRRVolumeManager(vols), testableUnixVols
+       })
+}
+
+func TestReplicationDefault1(t *testing.T) {
+       v := &UnixVolume{
+               Root:     "/",
+               ReadOnly: true,
+       }
+       metrics := newVolumeMetricsVecs(prometheus.NewRegistry())
+       if err := v.Start(metrics); err != nil {
+               t.Error(err)
+       }
+       if got := v.Replication(); got != 1 {
+               t.Errorf("Replication() returned %d, expected 1 if no config given", got)
+       }
+}
+
+func TestGetNotFound(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+       v.Put(context.Background(), TestHash, TestBlock)
+
+       buf := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), TestHash2, buf)
+       switch {
+       case os.IsNotExist(err):
+               break
+       case err == nil:
+               t.Errorf("Read should have failed, returned %+q", buf[:n])
+       default:
+               t.Errorf("Read expected ErrNotExist, got: %s", err)
+       }
+}
+
+func TestPut(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       err := v.Put(context.Background(), TestHash, TestBlock)
+       if err != nil {
+               t.Error(err)
+       }
+       p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
+       if buf, err := ioutil.ReadFile(p); err != nil {
+               t.Error(err)
+       } else if bytes.Compare(buf, TestBlock) != 0 {
+               t.Errorf("Write should have stored %s, did store %s",
+                       string(TestBlock), string(buf))
+       }
+}
+
+func TestPutBadVolume(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       os.Chmod(v.Root, 000)
+       err := v.Put(context.Background(), TestHash, TestBlock)
+       if err == nil {
+               t.Error("Write should have failed")
+       }
+}
+
+func TestUnixVolumeReadonly(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, true)
+       defer v.Teardown()
+
+       v.PutRaw(TestHash, TestBlock)
+
+       buf := make([]byte, BlockSize)
+       _, err := v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Errorf("got err %v, expected nil", err)
+       }
+
+       err = v.Put(context.Background(), TestHash, TestBlock)
+       if err != MethodDisabledError {
+               t.Errorf("got err %v, expected MethodDisabledError", err)
+       }
+
+       err = v.Touch(TestHash)
+       if err != MethodDisabledError {
+               t.Errorf("got err %v, expected MethodDisabledError", err)
+       }
+
+       err = v.Trash(TestHash)
+       if err != MethodDisabledError {
+               t.Errorf("got err %v, expected MethodDisabledError", err)
+       }
+}
+
+func TestIsFull(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       fullPath := v.Root + "/full"
+       now := fmt.Sprintf("%d", time.Now().Unix())
+       os.Symlink(now, fullPath)
+       if !v.IsFull() {
+               t.Errorf("%s: claims not to be full", v)
+       }
+       os.Remove(fullPath)
+
+       // Test with an expired /full link.
+       expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
+       os.Symlink(expired, fullPath)
+       if v.IsFull() {
+               t.Errorf("%s: should no longer be full", v)
+       }
+}
+
+func TestNodeStatus(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       // Get node status and make a basic sanity check.
+       volinfo := v.Status()
+       if volinfo.MountPoint != v.Root {
+               t.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.Root)
+       }
+       if volinfo.DeviceNum == 0 {
+               t.Errorf("uninitialized device_num in %v", volinfo)
+       }
+       if volinfo.BytesFree == 0 {
+               t.Errorf("uninitialized bytes_free in %v", volinfo)
+       }
+       if volinfo.BytesUsed == 0 {
+               t.Errorf("uninitialized bytes_used in %v", volinfo)
+       }
+}
+
+func TestUnixVolumeGetFuncWorkerError(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       v.Put(context.Background(), TestHash, TestBlock)
+       mockErr := errors.New("Mock error")
+       err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
+               return mockErr
+       })
+       if err != mockErr {
+               t.Errorf("Got %v, expected %v", err, mockErr)
+       }
+}
+
+func TestUnixVolumeGetFuncFileError(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       funcCalled := false
+       err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
+               funcCalled = true
+               return nil
+       })
+       if err == nil {
+               t.Errorf("Expected error opening non-existent file")
+       }
+       if funcCalled {
+               t.Errorf("Worker func should not have been called")
+       }
+}
+
+func TestUnixVolumeGetFuncWorkerWaitsOnMutex(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       v.Put(context.Background(), TestHash, TestBlock)
+
+       mtx := NewMockMutex()
+       v.locker = mtx
+
+       funcCalled := make(chan struct{})
+       go v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
+               funcCalled <- struct{}{}
+               return nil
+       })
+       select {
+       case mtx.AllowLock <- struct{}{}:
+       case <-funcCalled:
+               t.Fatal("Function was called before mutex was acquired")
+       case <-time.After(5 * time.Second):
+               t.Fatal("Timed out before mutex was acquired")
+       }
+       select {
+       case <-funcCalled:
+       case mtx.AllowUnlock <- struct{}{}:
+               t.Fatal("Mutex was released before function was called")
+       case <-time.After(5 * time.Second):
+               t.Fatal("Timed out waiting for funcCalled")
+       }
+       select {
+       case mtx.AllowUnlock <- struct{}{}:
+       case <-time.After(5 * time.Second):
+               t.Fatal("Timed out waiting for getFunc() to release mutex")
+       }
+}
+
+func TestUnixVolumeCompare(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+
+       v.Put(context.Background(), TestHash, TestBlock)
+       err := v.Compare(context.Background(), TestHash, TestBlock)
+       if err != nil {
+               t.Errorf("Got err %q, expected nil", err)
+       }
+
+       err = v.Compare(context.Background(), TestHash, []byte("baddata"))
+       if err != CollisionError {
+               t.Errorf("Got err %q, expected %q", err, CollisionError)
+       }
+
+       v.Put(context.Background(), TestHash, []byte("baddata"))
+       err = v.Compare(context.Background(), TestHash, TestBlock)
+       if err != DiskHashError {
+               t.Errorf("Got err %q, expected %q", err, DiskHashError)
+       }
+
+       p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
+       os.Chmod(p, 000)
+       err = v.Compare(context.Background(), TestHash, TestBlock)
+       if err == nil || strings.Index(err.Error(), "permission denied") < 0 {
+               t.Errorf("Got err %q, expected %q", err, "permission denied")
+       }
+}
+
+func TestUnixVolumeContextCancelPut(t *testing.T) {
+       v := NewTestableUnixVolume(t, true, false)
+       defer v.Teardown()
+       v.locker.Lock()
+       ctx, cancel := context.WithCancel(context.Background())
+       go func() {
+               time.Sleep(50 * time.Millisecond)
+               cancel()
+               time.Sleep(50 * time.Millisecond)
+               v.locker.Unlock()
+       }()
+       err := v.Put(ctx, TestHash, TestBlock)
+       if err != context.Canceled {
+               t.Errorf("Put() returned %s -- expected short read / canceled", err)
+       }
+}
+
+func TestUnixVolumeContextCancelGet(t *testing.T) {
+       v := NewTestableUnixVolume(t, false, false)
+       defer v.Teardown()
+       bpath := v.blockPath(TestHash)
+       v.PutRaw(TestHash, TestBlock)
+       os.Remove(bpath)
+       err := syscall.Mkfifo(bpath, 0600)
+       if err != nil {
+               t.Fatalf("Mkfifo %s: %s", bpath, err)
+       }
+       defer os.Remove(bpath)
+       ctx, cancel := context.WithCancel(context.Background())
+       go func() {
+               time.Sleep(50 * time.Millisecond)
+               cancel()
+       }()
+       buf := make([]byte, len(TestBlock))
+       n, err := v.Get(ctx, TestHash, buf)
+       if n == len(TestBlock) || err != context.Canceled {
+               t.Errorf("Get() returned %d, %s -- expected short read / canceled", n, err)
+       }
+}
+
+var _ = check.Suite(&UnixVolumeSuite{})
+
+type UnixVolumeSuite struct {
+       volume *TestableUnixVolume
+}
+
+func (s *UnixVolumeSuite) TearDownTest(c *check.C) {
+       if s.volume != nil {
+               s.volume.Teardown()
+       }
+}
+
+func (s *UnixVolumeSuite) TestStats(c *check.C) {
+       s.volume = NewTestableUnixVolume(c, false, false)
+       stats := func() string {
+               buf, err := json.Marshal(s.volume.InternalStats())
+               c.Check(err, check.IsNil)
+               return string(buf)
+       }
+
+       c.Check(stats(), check.Matches, `.*"StatOps":0,.*`)
+       c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
+
+       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+       _, err := s.volume.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.NotNil)
+       c.Check(stats(), check.Matches, `.*"StatOps":[^0],.*`)
+       c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
+       c.Check(stats(), check.Matches, `.*"\*os\.PathError":[^0].*`)
+       c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
+       c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
+       c.Check(stats(), check.Matches, `.*"CreateOps":0,.*`)
+
+       err = s.volume.Put(context.Background(), loc, []byte("foo"))
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
+       c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
+       c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
+       c.Check(stats(), check.Matches, `.*"UtimesOps":0,.*`)
+
+       err = s.volume.Touch(loc)
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"FlockOps":1,.*`)
+       c.Check(stats(), check.Matches, `.*"OpenOps":1,.*`)
+       c.Check(stats(), check.Matches, `.*"UtimesOps":1,.*`)
+
+       _, err = s.volume.Get(context.Background(), loc, make([]byte, 3))
+       c.Check(err, check.IsNil)
+       err = s.volume.Compare(context.Background(), loc, []byte("foo"))
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
+       c.Check(stats(), check.Matches, `.*"OpenOps":3,.*`)
+
+       err = s.volume.Trash(loc)
+       c.Check(err, check.IsNil)
+       c.Check(stats(), check.Matches, `.*"FlockOps":2,.*`)
+}
+
+func (s *UnixVolumeSuite) TestConfig(c *check.C) {
+       var cfg Config
+       err := yaml.Unmarshal([]byte(`
+Volumes:
+  - Type: Directory
+    StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+       c.Check(err, check.IsNil)
+       c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}
diff --git a/services/keepstore/usage.go b/services/keepstore/usage.go
new file mode 100644 (file)
index 0000000..8e83f6c
--- /dev/null
@@ -0,0 +1,162 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+       "sort"
+       "strings"
+
+       "github.com/ghodss/yaml"
+)
+
+func usage() {
+       c := DefaultConfig()
+       knownTypes := []string{}
+       for _, vt := range VolumeTypes {
+               c.Volumes = append(c.Volumes, vt().Examples()...)
+               knownTypes = append(knownTypes, vt().Type())
+       }
+       exampleConfigFile, err := yaml.Marshal(c)
+       if err != nil {
+               panic(err)
+       }
+       sort.Strings(knownTypes)
+       knownTypeList := strings.Join(knownTypes, ", ")
+       fmt.Fprintf(os.Stderr, `
+
+keepstore provides a content-addressed data store backed by a local filesystem or networked storage.
+
+Usage: keepstore -config path/to/keepstore.yml
+       keepstore [OPTIONS] -dump-config
+
+NOTE: All options (other than -config) are deprecated in favor of YAML
+      configuration. Use -dump-config to translate existing
+      configurations to YAML format.
+
+Options:
+`)
+       flag.PrintDefaults()
+       fmt.Fprintf(os.Stderr, `
+Example config file:
+
+%s
+
+Listen:
+
+    Local port to listen on. Can be "address:port" or ":port", where
+    "address" is a host IP address or name and "port" is a port number
+    or name.
+
+LogFormat:
+
+    Format of request/response and error logs: "json" or "text".
+
+PIDFile:
+
+   Path to write PID file during startup. This file is kept open and
+   locked with LOCK_EX until keepstore exits, so "fuser -k pidfile" is
+   one way to shut down. Exit immediately if there is an error
+   opening, locking, or writing the PID file.
+
+MaxBuffers:
+
+    Maximum RAM to use for data buffers, given in multiples of block
+    size (64 MiB). When this limit is reached, HTTP requests requiring
+    buffers (like GET and PUT) will wait for buffer space to be
+    released.
+
+MaxRequests:
+
+    Maximum concurrent requests. When this limit is reached, new
+    requests will receive 503 responses. Note: this limit does not
+    include idle connections from clients using HTTP keepalive, so it
+    does not strictly limit the number of concurrent connections. If
+    omitted or zero, the default is 2 * MaxBuffers.
+
+BlobSigningKeyFile:
+
+    Local file containing the secret blob signing key (used to
+    generate and verify blob signatures).  This key should be
+    identical to the API server's blob_signing_key configuration
+    entry.
+
+RequireSignatures:
+
+    Honor read requests only if a valid signature is provided.  This
+    should be true, except for development use and when migrating from
+    a very old version.
+
+BlobSignatureTTL:
+
+    Duration for which new permission signatures (returned in PUT
+    responses) will be valid.  This should be equal to the API
+    server's blob_signature_ttl configuration entry.
+
+SystemAuthTokenFile:
+
+    Local file containing the Arvados API token used by keep-balance
+    or data manager.  Delete, trash, and index requests are honored
+    only for this token.
+
+EnableDelete:
+
+    Enable trash and delete features. If false, trash lists will be
+    accepted but blocks will not be trashed or deleted.
+
+TrashLifetime:
+
+    Time duration after a block is trashed during which it can be
+    recovered using an /untrash request.
+
+TrashCheckInterval:
+
+    How often to check for (and delete) trashed blocks whose
+    TrashLifetime has expired.
+
+TrashWorkers:
+
+    Maximum number of concurrent trash operations. Default is 1, i.e.,
+    trash lists are processed serially.
+
+EmptyTrashWorkers:
+
+    Maximum number of concurrent block deletion operations (per
+    volume) when emptying trash. Default is 1.
+
+PullWorkers:
+
+    Maximum number of concurrent pull operations. Default is 1, i.e.,
+    pull lists are processed serially.
+
+TLSCertificateFile:
+
+    Path to server certificate file in X509 format. Enables TLS mode.
+
+    Example: /var/lib/acme/live/keep0.example.com/fullchain
+
+TLSKeyFile:
+
+    Path to server key file in X509 format. Enables TLS mode.
+
+    The key pair is read from disk during startup, and whenever SIGHUP
+    is received.
+
+    Example: /var/lib/acme/live/keep0.example.com/privkey
+
+Volumes:
+
+    List of storage volumes. If omitted or empty, the default is to
+    use all directories named "keep" that exist in the top level
+    directory of a mount point at startup time.
+
+    Volume types: %s
+
+    (See volume configuration examples above.)
+
+`, exampleConfigFile, knownTypeList)
+}
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
new file mode 100644 (file)
index 0000000..52b9b1b
--- /dev/null
@@ -0,0 +1,417 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "context"
+       "crypto/rand"
+       "fmt"
+       "io"
+       "math/big"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type BlockWriter interface {
+       // WriteBlock reads all data from r, writes it to a backing
+       // store as "loc", and returns the number of bytes written.
+       WriteBlock(ctx context.Context, loc string, r io.Reader) error
+}
+
+type BlockReader interface {
+       // ReadBlock retrieves data previously stored as "loc" and
+       // writes it to w.
+       ReadBlock(ctx context.Context, loc string, w io.Writer) error
+}
+
+// A Volume is an interface representing a Keep back-end storage unit:
+// for example, a single mounted disk, a RAID array, an Amazon S3 volume,
+// etc.
+type Volume interface {
+       // Volume type as specified in config file. Examples: "S3",
+       // "Directory".
+       Type() string
+
+       // Do whatever private setup tasks and configuration checks
+       // are needed. Return non-nil if the volume is unusable (e.g.,
+       // invalid config).
+       Start(vm *volumeMetricsVecs) error
+
+       // Get a block: copy the block data into buf, and return the
+       // number of bytes copied.
+       //
+       // loc is guaranteed to consist of 32 or more lowercase hex
+       // digits.
+       //
+       // Get should not verify the integrity of the data: it should
+       // just return whatever was found in its backing
+       // store. (Integrity checking is the caller's responsibility.)
+       //
+       // If an error is encountered that prevents it from
+       // retrieving the data, that error should be returned so the
+       // caller can log (and send to the client) a more useful
+       // message.
+       //
+       // If the error is "not found", and there's no particular
+       // reason to expect the block to be found (other than that a
+       // caller is asking for it), the returned error should satisfy
+       // os.IsNotExist(err): this is a normal condition and will not
+       // be logged as an error (except that a 404 will appear in the
+       // access log if the block is not found on any other volumes
+       // either).
+       //
+       // If the data in the backing store is bigger than len(buf),
+       // then Get is permitted to return an error without reading
+       // any of the data.
+       //
+       // len(buf) will not exceed BlockSize.
+       Get(ctx context.Context, loc string, buf []byte) (int, error)
+
+       // Compare the given data with the stored data (i.e., what Get
+       // would return). If equal, return nil. If not, return
+       // CollisionError or DiskHashError (depending on whether the
+       // data on disk matches the expected hash), or whatever error
+       // was encountered opening/reading the stored data.
+       Compare(ctx context.Context, loc string, data []byte) error
+
+       // Put writes a block to an underlying storage device.
+       //
+       // loc is as described in Get.
+       //
+       // len(block) is guaranteed to be between 0 and BlockSize.
+       //
+       // If a block is already stored under the same name (loc) with
+       // different content, Put must either overwrite the existing
+       // data with the new data or return a non-nil error. When
+       // overwriting existing data, it must never leave the storage
+       // device in an inconsistent state: a subsequent call to Get
+       // must return either the entire old block, the entire new
+       // block, or an error. (An implementation that cannot peform
+       // atomic updates must leave the old data alone and return an
+       // error.)
+       //
+       // Put also sets the timestamp for the given locator to the
+       // current time.
+       //
+       // Put must return a non-nil error unless it can guarantee
+       // that the entire block has been written and flushed to
+       // persistent storage, and that its timestamp is current. Of
+       // course, this guarantee is only as good as the underlying
+       // storage device, but it is Put's responsibility to at least
+       // get whatever guarantee is offered by the storage device.
+       //
+       // Put should not verify that loc==hash(block): this is the
+       // caller's responsibility.
+       Put(ctx context.Context, loc string, block []byte) error
+
+       // Touch sets the timestamp for the given locator to the
+       // current time.
+       //
+       // loc is as described in Get.
+       //
+       // If invoked at time t0, Touch must guarantee that a
+       // subsequent call to Mtime will return a timestamp no older
+       // than {t0 minus one second}. For example, if Touch is called
+       // at 2015-07-07T01:23:45.67890123Z, it is acceptable for a
+       // subsequent Mtime to return any of the following:
+       //
+       //   - 2015-07-07T01:23:45.00000000Z
+       //   - 2015-07-07T01:23:45.67890123Z
+       //   - 2015-07-07T01:23:46.67890123Z
+       //   - 2015-07-08T00:00:00.00000000Z
+       //
+       // It is not acceptable for a subsequente Mtime to return
+       // either of the following:
+       //
+       //   - 2015-07-07T00:00:00.00000000Z -- ERROR
+       //   - 2015-07-07T01:23:44.00000000Z -- ERROR
+       //
+       // Touch must return a non-nil error if the timestamp cannot
+       // be updated.
+       Touch(loc string) error
+
+       // Mtime returns the stored timestamp for the given locator.
+       //
+       // loc is as described in Get.
+       //
+       // Mtime must return a non-nil error if the given block is not
+       // found or the timestamp could not be retrieved.
+       Mtime(loc string) (time.Time, error)
+
+       // IndexTo writes a complete list of locators with the given
+       // prefix for which Get() can retrieve data.
+       //
+       // prefix consists of zero or more lowercase hexadecimal
+       // digits.
+       //
+       // Each locator must be written to the given writer using the
+       // following format:
+       //
+       //   loc "+" size " " timestamp "\n"
+       //
+       // where:
+       //
+       //   - size is the number of bytes of content, given as a
+       //     decimal number with one or more digits
+       //
+       //   - timestamp is the timestamp stored for the locator,
+       //     given as a decimal number of seconds after January 1,
+       //     1970 UTC.
+       //
+       // IndexTo must not write any other data to writer: for
+       // example, it must not write any blank lines.
+       //
+       // If an error makes it impossible to provide a complete
+       // index, IndexTo must return a non-nil error. It is
+       // acceptable to return a non-nil error after writing a
+       // partial index to writer.
+       //
+       // The resulting index is not expected to be sorted in any
+       // particular order.
+       IndexTo(prefix string, writer io.Writer) error
+
+       // Trash moves the block data from the underlying storage
+       // device to trash area. The block then stays in trash for
+       // -trash-lifetime interval before it is actually deleted.
+       //
+       // loc is as described in Get.
+       //
+       // If the timestamp for the given locator is newer than
+       // BlobSignatureTTL, Trash must not trash the data.
+       //
+       // If a Trash operation overlaps with any Touch or Put
+       // operations on the same locator, the implementation must
+       // ensure one of the following outcomes:
+       //
+       //   - Touch and Put return a non-nil error, or
+       //   - Trash does not trash the block, or
+       //   - Both of the above.
+       //
+       // If it is possible for the storage device to be accessed by
+       // a different process or host, the synchronization mechanism
+       // should also guard against races with other processes and
+       // hosts. If such a mechanism is not available, there must be
+       // a mechanism for detecting unsafe configurations, alerting
+       // the operator, and aborting or falling back to a read-only
+       // state. In other words, running multiple keepstore processes
+       // with the same underlying storage device must either work
+       // reliably or fail outright.
+       //
+       // Corollary: A successful Touch or Put guarantees a block
+       // will not be trashed for at least BlobSignatureTTL
+       // seconds.
+       Trash(loc string) error
+
+       // Untrash moves block from trash back into store
+       Untrash(loc string) error
+
+       // Status returns a *VolumeStatus representing the current
+       // in-use and available storage capacity and an
+       // implementation-specific volume identifier (e.g., "mount
+       // point" for a UnixVolume).
+       Status() *VolumeStatus
+
+       // String returns an identifying label for this volume,
+       // suitable for including in log messages. It should contain
+       // enough information to uniquely identify the underlying
+       // storage device, but should not contain any credentials or
+       // secrets.
+       String() string
+
+       // Writable returns false if all future Put, Mtime, and Delete
+       // calls are expected to fail.
+       //
+       // If the volume is only temporarily unwritable -- or if Put
+       // will fail because it is full, but Mtime or Delete can
+       // succeed -- then Writable should return false.
+       Writable() bool
+
+       // Replication returns the storage redundancy of the
+       // underlying device. It will be passed on to clients in
+       // responses to PUT requests.
+       Replication() int
+
+       // EmptyTrash looks for trashed blocks that exceeded TrashLifetime
+       // and deletes them from the volume.
+       EmptyTrash()
+
+       // Return a globally unique ID of the underlying storage
+       // device if possible, otherwise "".
+       DeviceID() string
+
+       // Get the storage classes associated with this volume
+       GetStorageClasses() []string
+}
+
+// A VolumeWithExamples provides example configs to display in the
+// -help message.
+type VolumeWithExamples interface {
+       Volume
+       Examples() []Volume
+}
+
+// A VolumeManager tells callers which volumes can read, which volumes
+// can write, and on which volume the next write should be attempted.
+type VolumeManager interface {
+       // Mounts returns all mounts (volume attachments).
+       Mounts() []*VolumeMount
+
+       // Lookup returns the volume under the given mount
+       // UUID. Returns nil if the mount does not exist. If
+       // write==true, returns nil if the volume is not writable.
+       Lookup(uuid string, write bool) Volume
+
+       // AllReadable returns all volumes.
+       AllReadable() []Volume
+
+       // AllWritable returns all volumes that aren't known to be in
+       // a read-only state. (There is no guarantee that a write to
+       // one will succeed, though.)
+       AllWritable() []Volume
+
+       // NextWritable returns the volume where the next new block
+       // should be written. A VolumeManager can select a volume in
+       // order to distribute activity across spindles, fill up disks
+       // with more free space, etc.
+       NextWritable() Volume
+
+       // VolumeStats returns the ioStats used for tracking stats for
+       // the given Volume.
+       VolumeStats(Volume) *ioStats
+
+       // Close shuts down the volume manager cleanly.
+       Close()
+}
+
+// A VolumeMount is an attachment of a Volume to a VolumeManager.
+type VolumeMount struct {
+       arvados.KeepMount
+       volume Volume
+}
+
+// Generate a UUID the way API server would for a "KeepVolumeMount"
+// object.
+func (*VolumeMount) generateUUID() string {
+       var max big.Int
+       _, ok := max.SetString("zzzzzzzzzzzzzzz", 36)
+       if !ok {
+               panic("big.Int parse failed")
+       }
+       r, err := rand.Int(rand.Reader, &max)
+       if err != nil {
+               panic(err)
+       }
+       return fmt.Sprintf("zzzzz-ivpuk-%015s", r.Text(36))
+}
+
+// RRVolumeManager is a round-robin VolumeManager: the Nth call to
+// NextWritable returns the (N % len(writables))th writable Volume
+// (where writables are all Volumes v where v.Writable()==true).
+type RRVolumeManager struct {
+       mounts    []*VolumeMount
+       mountMap  map[string]*VolumeMount
+       readables []Volume
+       writables []Volume
+       counter   uint32
+       iostats   map[Volume]*ioStats
+}
+
+// MakeRRVolumeManager initializes RRVolumeManager
+func MakeRRVolumeManager(volumes []Volume) *RRVolumeManager {
+       vm := &RRVolumeManager{
+               iostats: make(map[Volume]*ioStats),
+       }
+       vm.mountMap = make(map[string]*VolumeMount)
+       for _, v := range volumes {
+               sc := v.GetStorageClasses()
+               if len(sc) == 0 {
+                       sc = []string{"default"}
+               }
+               mnt := &VolumeMount{
+                       KeepMount: arvados.KeepMount{
+                               UUID:           (*VolumeMount)(nil).generateUUID(),
+                               DeviceID:       v.DeviceID(),
+                               ReadOnly:       !v.Writable(),
+                               Replication:    v.Replication(),
+                               StorageClasses: sc,
+                       },
+                       volume: v,
+               }
+               vm.iostats[v] = &ioStats{}
+               vm.mounts = append(vm.mounts, mnt)
+               vm.mountMap[mnt.UUID] = mnt
+               vm.readables = append(vm.readables, v)
+               if v.Writable() {
+                       vm.writables = append(vm.writables, v)
+               }
+       }
+       return vm
+}
+
+func (vm *RRVolumeManager) Mounts() []*VolumeMount {
+       return vm.mounts
+}
+
+func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) Volume {
+       if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || !mnt.ReadOnly) {
+               return mnt.volume
+       } else {
+               return nil
+       }
+}
+
+// AllReadable returns an array of all readable volumes
+func (vm *RRVolumeManager) AllReadable() []Volume {
+       return vm.readables
+}
+
+// AllWritable returns an array of all writable volumes
+func (vm *RRVolumeManager) AllWritable() []Volume {
+       return vm.writables
+}
+
+// NextWritable returns the next writable
+func (vm *RRVolumeManager) NextWritable() Volume {
+       if len(vm.writables) == 0 {
+               return nil
+       }
+       i := atomic.AddUint32(&vm.counter, 1)
+       return vm.writables[i%uint32(len(vm.writables))]
+}
+
+// VolumeStats returns an ioStats for the given volume.
+func (vm *RRVolumeManager) VolumeStats(v Volume) *ioStats {
+       return vm.iostats[v]
+}
+
+// Close the RRVolumeManager
+func (vm *RRVolumeManager) Close() {
+}
+
+// VolumeStatus describes the current condition of a volume
+type VolumeStatus struct {
+       MountPoint string
+       DeviceNum  uint64
+       BytesFree  uint64
+       BytesUsed  uint64
+}
+
+// ioStats tracks I/O statistics for a volume or server
+type ioStats struct {
+       Errors     uint64
+       Ops        uint64
+       CompareOps uint64
+       GetOps     uint64
+       PutOps     uint64
+       TouchOps   uint64
+       InBytes    uint64
+       OutBytes   uint64
+}
+
+type InternalStatser interface {
+       InternalStats() interface{}
+}
diff --git a/services/keepstore/volume_generic_test.go b/services/keepstore/volume_generic_test.go
new file mode 100644 (file)
index 0000000..d5a4136
--- /dev/null
@@ -0,0 +1,1097 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "fmt"
+       "os"
+       "regexp"
+       "sort"
+       "strconv"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "github.com/prometheus/client_golang/prometheus"
+       dto "github.com/prometheus/client_model/go"
+)
+
+type TB interface {
+       Error(args ...interface{})
+       Errorf(format string, args ...interface{})
+       Fail()
+       FailNow()
+       Failed() bool
+       Fatal(args ...interface{})
+       Fatalf(format string, args ...interface{})
+       Log(args ...interface{})
+       Logf(format string, args ...interface{})
+}
+
+// A TestableVolumeFactory returns a new TestableVolume. The factory
+// function, and the TestableVolume it returns, can use "t" to write
+// logs, fail the current test, etc.
+type TestableVolumeFactory func(t TB) TestableVolume
+
+// DoGenericVolumeTests runs a set of tests that every TestableVolume
+// is expected to pass. It calls factory to create a new TestableVolume
+// for each test case, to avoid leaking state between tests.
+func DoGenericVolumeTests(t TB, factory TestableVolumeFactory) {
+       testGet(t, factory)
+       testGetNoSuchBlock(t, factory)
+
+       testCompareNonexistent(t, factory)
+       testCompareSameContent(t, factory, TestHash, TestBlock)
+       testCompareSameContent(t, factory, EmptyHash, EmptyBlock)
+       testCompareWithCollision(t, factory, TestHash, TestBlock, []byte("baddata"))
+       testCompareWithCollision(t, factory, TestHash, TestBlock, EmptyBlock)
+       testCompareWithCollision(t, factory, EmptyHash, EmptyBlock, TestBlock)
+       testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, []byte("baddata"))
+       testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, EmptyBlock)
+       testCompareWithCorruptStoredData(t, factory, EmptyHash, EmptyBlock, []byte("baddata"))
+
+       testPutBlockWithSameContent(t, factory, TestHash, TestBlock)
+       testPutBlockWithSameContent(t, factory, EmptyHash, EmptyBlock)
+       testPutBlockWithDifferentContent(t, factory, arvadostest.MD5CollisionMD5, arvadostest.MD5CollisionData[0], arvadostest.MD5CollisionData[1])
+       testPutBlockWithDifferentContent(t, factory, arvadostest.MD5CollisionMD5, EmptyBlock, arvadostest.MD5CollisionData[0])
+       testPutBlockWithDifferentContent(t, factory, arvadostest.MD5CollisionMD5, arvadostest.MD5CollisionData[0], EmptyBlock)
+       testPutBlockWithDifferentContent(t, factory, EmptyHash, EmptyBlock, arvadostest.MD5CollisionData[0])
+       testPutMultipleBlocks(t, factory)
+
+       testPutAndTouch(t, factory)
+       testTouchNoSuchBlock(t, factory)
+
+       testMtimeNoSuchBlock(t, factory)
+
+       testIndexTo(t, factory)
+
+       testDeleteNewBlock(t, factory)
+       testDeleteOldBlock(t, factory)
+       testDeleteNoSuchBlock(t, factory)
+
+       testStatus(t, factory)
+
+       testMetrics(t, factory)
+
+       testString(t, factory)
+
+       testUpdateReadOnly(t, factory)
+
+       testGetConcurrent(t, factory)
+       testPutConcurrent(t, factory)
+
+       testPutFullBlock(t, factory)
+
+       testTrashUntrash(t, factory)
+       testTrashEmptyTrashUntrash(t, factory)
+}
+
+// Put a test block, get it and verify content
+// Test should pass for both writable and read-only volumes
+func testGet(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       v.PutRaw(TestHash, TestBlock)
+
+       buf := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if bytes.Compare(buf[:n], TestBlock) != 0 {
+               t.Errorf("expected %s, got %s", string(TestBlock), string(buf))
+       }
+}
+
+// Invoke get on a block that does not exist in volume; should result in error
+// Test should pass for both writable and read-only volumes
+func testGetNoSuchBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       buf := make([]byte, BlockSize)
+       if _, err := v.Get(context.Background(), TestHash2, buf); err == nil {
+               t.Errorf("Expected error while getting non-existing block %v", TestHash2)
+       }
+}
+
+// Compare() should return os.ErrNotExist if the block does not exist.
+// Otherwise, writing new data causes CompareAndTouch() to generate
+// error logs even though everything is working fine.
+func testCompareNonexistent(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       err := v.Compare(context.Background(), TestHash, TestBlock)
+       if err != os.ErrNotExist {
+               t.Errorf("Got err %T %q, expected os.ErrNotExist", err, err)
+       }
+}
+
+// Put a test block and compare the locator with same content
+// Test should pass for both writable and read-only volumes
+func testCompareSameContent(t TB, factory TestableVolumeFactory, testHash string, testData []byte) {
+       v := factory(t)
+       defer v.Teardown()
+
+       v.PutRaw(testHash, testData)
+
+       // Compare the block locator with same content
+       err := v.Compare(context.Background(), testHash, testData)
+       if err != nil {
+               t.Errorf("Got err %q, expected nil", err)
+       }
+}
+
+// Test behavior of Compare() when stored data matches expected
+// checksum but differs from new data we need to store. Requires
+// testHash = md5(testDataA).
+//
+// Test should pass for both writable and read-only volumes
+func testCompareWithCollision(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
+       v := factory(t)
+       defer v.Teardown()
+
+       v.PutRaw(testHash, testDataA)
+
+       // Compare the block locator with different content; collision
+       err := v.Compare(context.Background(), TestHash, testDataB)
+       if err == nil {
+               t.Errorf("Got err nil, expected error due to collision")
+       }
+}
+
+// Test behavior of Compare() when stored data has become
+// corrupted. Requires testHash = md5(testDataA) != md5(testDataB).
+//
+// Test should pass for both writable and read-only volumes
+func testCompareWithCorruptStoredData(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
+       v := factory(t)
+       defer v.Teardown()
+
+       v.PutRaw(TestHash, testDataB)
+
+       err := v.Compare(context.Background(), testHash, testDataA)
+       if err == nil || err == CollisionError {
+               t.Errorf("Got err %+v, expected non-collision error", err)
+       }
+}
+
+// Put a block and put again with same content
+// Test is intended for only writable volumes
+func testPutBlockWithSameContent(t TB, factory TestableVolumeFactory, testHash string, testData []byte) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if v.Writable() == false {
+               return
+       }
+
+       err := v.Put(context.Background(), testHash, testData)
+       if err != nil {
+               t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
+       }
+
+       err = v.Put(context.Background(), testHash, testData)
+       if err != nil {
+               t.Errorf("Got err putting block second time %q: %q, expected nil", TestBlock, err)
+       }
+}
+
+// Put a block and put again with different content
+// Test is intended for only writable volumes
+func testPutBlockWithDifferentContent(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if v.Writable() == false {
+               return
+       }
+
+       v.PutRaw(testHash, testDataA)
+
+       putErr := v.Put(context.Background(), testHash, testDataB)
+       buf := make([]byte, BlockSize)
+       n, getErr := v.Get(context.Background(), testHash, buf)
+       if putErr == nil {
+               // Put must not return a nil error unless it has
+               // overwritten the existing data.
+               if bytes.Compare(buf[:n], testDataB) != 0 {
+                       t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf[:n], testDataB)
+               }
+       } else {
+               // It is permissible for Put to fail, but it must
+               // leave us with either the original data, the new
+               // data, or nothing at all.
+               if getErr == nil && bytes.Compare(buf[:n], testDataA) != 0 && bytes.Compare(buf[:n], testDataB) != 0 {
+                       t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf[:n], testDataA, testDataB)
+               }
+       }
+}
+
+// Put and get multiple blocks
+// Test is intended for only writable volumes
+func testPutMultipleBlocks(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if v.Writable() == false {
+               return
+       }
+
+       err := v.Put(context.Background(), TestHash, TestBlock)
+       if err != nil {
+               t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
+       }
+
+       err = v.Put(context.Background(), TestHash2, TestBlock2)
+       if err != nil {
+               t.Errorf("Got err putting block %q: %q, expected nil", TestBlock2, err)
+       }
+
+       err = v.Put(context.Background(), TestHash3, TestBlock3)
+       if err != nil {
+               t.Errorf("Got err putting block %q: %q, expected nil", TestBlock3, err)
+       }
+
+       data := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), TestHash, data)
+       if err != nil {
+               t.Error(err)
+       } else {
+               if bytes.Compare(data[:n], TestBlock) != 0 {
+                       t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock)
+               }
+       }
+
+       n, err = v.Get(context.Background(), TestHash2, data)
+       if err != nil {
+               t.Error(err)
+       } else {
+               if bytes.Compare(data[:n], TestBlock2) != 0 {
+                       t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock2)
+               }
+       }
+
+       n, err = v.Get(context.Background(), TestHash3, data)
+       if err != nil {
+               t.Error(err)
+       } else {
+               if bytes.Compare(data[:n], TestBlock3) != 0 {
+                       t.Errorf("Block present, but to %+q, expected %+q", data[:n], TestBlock3)
+               }
+       }
+}
+
+// testPutAndTouch
+//   Test that when applying PUT to a block that already exists,
+//   the block's modification time is updated.
+// Test is intended for only writable volumes
+func testPutAndTouch(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if v.Writable() == false {
+               return
+       }
+
+       if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+               t.Error(err)
+       }
+
+       // We'll verify { t0 < threshold < t1 }, where t0 is the
+       // existing block's timestamp on disk before Put() and t1 is
+       // its timestamp after Put().
+       threshold := time.Now().Add(-time.Second)
+
+       // Set the stored block's mtime far enough in the past that we
+       // can see the difference between "timestamp didn't change"
+       // and "timestamp granularity is too low".
+       v.TouchWithDate(TestHash, time.Now().Add(-20*time.Second))
+
+       // Make sure v.Mtime() agrees the above Utime really worked.
+       if t0, err := v.Mtime(TestHash); err != nil || t0.IsZero() || !t0.Before(threshold) {
+               t.Errorf("Setting mtime failed: %v, %v", t0, err)
+       }
+
+       // Write the same block again.
+       if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+               t.Error(err)
+       }
+
+       // Verify threshold < t1
+       if t1, err := v.Mtime(TestHash); err != nil {
+               t.Error(err)
+       } else if t1.Before(threshold) {
+               t.Errorf("t1 %v should be >= threshold %v after v.Put ", t1, threshold)
+       }
+}
+
+// Touching a non-existing block should result in error.
+// Test should pass for both writable and read-only volumes
+func testTouchNoSuchBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if err := v.Touch(TestHash); err == nil {
+               t.Error("Expected error when attempted to touch a non-existing block")
+       }
+}
+
+// Invoking Mtime on a non-existing block should result in error.
+// Test should pass for both writable and read-only volumes
+func testMtimeNoSuchBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if _, err := v.Mtime("12345678901234567890123456789012"); err == nil {
+               t.Error("Expected error when updating Mtime on a non-existing block")
+       }
+}
+
+// Put a few blocks and invoke IndexTo with:
+// * no prefix
+// * with a prefix
+// * with no such prefix
+// Test should pass for both writable and read-only volumes
+func testIndexTo(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       // minMtime and maxMtime are the minimum and maximum
+       // acceptable values the index can report for our test
+       // blocks. 1-second precision is acceptable.
+       minMtime := time.Now().UTC().UnixNano()
+       minMtime -= minMtime % 1e9
+
+       v.PutRaw(TestHash, TestBlock)
+       v.PutRaw(TestHash2, TestBlock2)
+       v.PutRaw(TestHash3, TestBlock3)
+
+       maxMtime := time.Now().UTC().UnixNano()
+       if maxMtime%1e9 > 0 {
+               maxMtime -= maxMtime % 1e9
+               maxMtime += 1e9
+       }
+
+       // Blocks whose names aren't Keep hashes should be omitted from
+       // index
+       v.PutRaw("fffffffffnotreallyahashfffffffff", nil)
+       v.PutRaw("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
+       v.PutRaw("f0000000000000000000000000000000f", nil)
+       v.PutRaw("f00", nil)
+
+       buf := new(bytes.Buffer)
+       v.IndexTo("", buf)
+       indexRows := strings.Split(string(buf.Bytes()), "\n")
+       sort.Strings(indexRows)
+       sortedIndex := strings.Join(indexRows, "\n")
+       m := regexp.MustCompile(
+               `^\n` + TestHash + `\+\d+ (\d+)\n` +
+                       TestHash3 + `\+\d+ \d+\n` +
+                       TestHash2 + `\+\d+ \d+$`,
+       ).FindStringSubmatch(sortedIndex)
+       if m == nil {
+               t.Errorf("Got index %q for empty prefix", sortedIndex)
+       } else {
+               mtime, err := strconv.ParseInt(m[1], 10, 64)
+               if err != nil {
+                       t.Error(err)
+               } else if mtime < minMtime || mtime > maxMtime {
+                       t.Errorf("got %d for TestHash timestamp, expected %d <= t <= %d",
+                               mtime, minMtime, maxMtime)
+               }
+       }
+
+       for _, prefix := range []string{"f", "f15", "f15ac"} {
+               buf = new(bytes.Buffer)
+               v.IndexTo(prefix, buf)
+
+               m, err := regexp.MatchString(`^`+TestHash2+`\+\d+ \d+\n$`, string(buf.Bytes()))
+               if err != nil {
+                       t.Error(err)
+               } else if !m {
+                       t.Errorf("Got index %q for prefix %s", string(buf.Bytes()), prefix)
+               }
+       }
+
+       for _, prefix := range []string{"zero", "zip", "zilch"} {
+               buf = new(bytes.Buffer)
+               err := v.IndexTo(prefix, buf)
+               if err != nil {
+                       t.Errorf("Got error on IndexTo with no such prefix %v", err.Error())
+               } else if buf.Len() != 0 {
+                       t.Errorf("Expected empty list for IndexTo with no such prefix %s", prefix)
+               }
+       }
+}
+
+// Calling Delete() for a block immediately after writing it (not old enough)
+// should neither delete the data nor return an error.
+// Test is intended for only writable volumes
+func testDeleteNewBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+       theConfig.BlobSignatureTTL.Set("5m")
+
+       if v.Writable() == false {
+               return
+       }
+
+       v.Put(context.Background(), TestHash, TestBlock)
+
+       if err := v.Trash(TestHash); err != nil {
+               t.Error(err)
+       }
+       data := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), TestHash, data)
+       if err != nil {
+               t.Error(err)
+       } else if bytes.Compare(data[:n], TestBlock) != 0 {
+               t.Errorf("Got data %+q, expected %+q", data[:n], TestBlock)
+       }
+}
+
+// Calling Delete() for a block with a timestamp older than
+// BlobSignatureTTL seconds in the past should delete the data.
+// Test is intended for only writable volumes
+func testDeleteOldBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+       theConfig.BlobSignatureTTL.Set("5m")
+
+       if v.Writable() == false {
+               return
+       }
+
+       v.Put(context.Background(), TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       if err := v.Trash(TestHash); err != nil {
+               t.Error(err)
+       }
+       data := make([]byte, BlockSize)
+       if _, err := v.Get(context.Background(), TestHash, data); err == nil || !os.IsNotExist(err) {
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       _, err := v.Mtime(TestHash)
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       err = v.Compare(context.Background(), TestHash, TestBlock)
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       indexBuf := new(bytes.Buffer)
+       v.IndexTo("", indexBuf)
+       if strings.Contains(string(indexBuf.Bytes()), TestHash) {
+               t.Fatalf("Found trashed block in IndexTo")
+       }
+
+       err = v.Touch(TestHash)
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+}
+
+// Calling Delete() for a block that does not exist should result in error.
+// Test should pass for both writable and read-only volumes
+func testDeleteNoSuchBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if err := v.Trash(TestHash2); err == nil {
+               t.Errorf("Expected error when attempting to delete a non-existing block")
+       }
+}
+
+// Invoke Status and verify that VolumeStatus is returned
+// Test should pass for both writable and read-only volumes
+func testStatus(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       // Get node status and make a basic sanity check.
+       status := v.Status()
+       if status.DeviceNum == 0 {
+               t.Errorf("uninitialized device_num in %v", status)
+       }
+
+       if status.BytesFree == 0 {
+               t.Errorf("uninitialized bytes_free in %v", status)
+       }
+
+       if status.BytesUsed == 0 {
+               t.Errorf("uninitialized bytes_used in %v", status)
+       }
+}
+
+func getValueFrom(cv *prometheus.CounterVec, lbls prometheus.Labels) float64 {
+       c, _ := cv.GetMetricWith(lbls)
+       pb := &dto.Metric{}
+       c.Write(pb)
+       return pb.GetCounter().GetValue()
+}
+
+func testMetrics(t TB, factory TestableVolumeFactory) {
+       var err error
+
+       v := factory(t)
+       defer v.Teardown()
+       reg := prometheus.NewRegistry()
+       vm := newVolumeMetricsVecs(reg)
+
+       err = v.Start(vm)
+       if err != nil {
+               t.Error("Failed Start(): ", err)
+       }
+       opsC, _, ioC := vm.getCounterVecsFor(prometheus.Labels{"device_id": v.DeviceID()})
+
+       if ioC == nil {
+               t.Error("ioBytes CounterVec is nil")
+               return
+       }
+
+       if getValueFrom(ioC, prometheus.Labels{"direction": "out"})+
+               getValueFrom(ioC, prometheus.Labels{"direction": "in"}) > 0 {
+               t.Error("ioBytes counter should be zero")
+       }
+
+       if opsC == nil {
+               t.Error("opsCounter CounterVec is nil")
+               return
+       }
+
+       var c, writeOpCounter, readOpCounter float64
+
+       readOpType, writeOpType := v.ReadWriteOperationLabelValues()
+       writeOpCounter = getValueFrom(opsC, prometheus.Labels{"operation": writeOpType})
+       readOpCounter = getValueFrom(opsC, prometheus.Labels{"operation": readOpType})
+
+       // Test Put if volume is writable
+       if v.Writable() {
+               err = v.Put(context.Background(), TestHash, TestBlock)
+               if err != nil {
+                       t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
+               }
+               // Check that the write operations counter increased
+               c = getValueFrom(opsC, prometheus.Labels{"operation": writeOpType})
+               if c <= writeOpCounter {
+                       t.Error("Operation(s) not counted on Put")
+               }
+               // Check that bytes counter is > 0
+               if getValueFrom(ioC, prometheus.Labels{"direction": "out"}) == 0 {
+                       t.Error("ioBytes{direction=out} counter shouldn't be zero")
+               }
+       } else {
+               v.PutRaw(TestHash, TestBlock)
+       }
+
+       buf := make([]byte, BlockSize)
+       _, err = v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Check that the operations counter increased
+       c = getValueFrom(opsC, prometheus.Labels{"operation": readOpType})
+       if c <= readOpCounter {
+               t.Error("Operation(s) not counted on Get")
+       }
+       // Check that the bytes "in" counter is > 0
+       if getValueFrom(ioC, prometheus.Labels{"direction": "in"}) == 0 {
+               t.Error("ioBytes{direction=in} counter shouldn't be zero")
+       }
+}
+
+// Invoke String for the volume; expect non-empty result
+// Test should pass for both writable and read-only volumes
+func testString(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if id := v.String(); len(id) == 0 {
+               t.Error("Got empty string for v.String()")
+       }
+}
+
+// Putting, updating, touching, and deleting blocks from a read-only volume result in error.
+// Test is intended for only read-only volumes
+func testUpdateReadOnly(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if v.Writable() == true {
+               return
+       }
+
+       v.PutRaw(TestHash, TestBlock)
+       buf := make([]byte, BlockSize)
+
+       // Get from read-only volume should succeed
+       _, err := v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Errorf("got err %v, expected nil", err)
+       }
+
+       // Put a new block to read-only volume should result in error
+       err = v.Put(context.Background(), TestHash2, TestBlock2)
+       if err == nil {
+               t.Errorf("Expected error when putting block in a read-only volume")
+       }
+       _, err = v.Get(context.Background(), TestHash2, buf)
+       if err == nil {
+               t.Errorf("Expected error when getting block whose put in read-only volume failed")
+       }
+
+       // Touch a block in read-only volume should result in error
+       err = v.Touch(TestHash)
+       if err == nil {
+               t.Errorf("Expected error when touching block in a read-only volume")
+       }
+
+       // Delete a block from a read-only volume should result in error
+       err = v.Trash(TestHash)
+       if err == nil {
+               t.Errorf("Expected error when deleting block from a read-only volume")
+       }
+
+       // Overwriting an existing block in read-only volume should result in error
+       err = v.Put(context.Background(), TestHash, TestBlock)
+       if err == nil {
+               t.Errorf("Expected error when putting block in a read-only volume")
+       }
+}
+
+// Launch concurrent Gets
+// Test should pass for both writable and read-only volumes
+func testGetConcurrent(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       v.PutRaw(TestHash, TestBlock)
+       v.PutRaw(TestHash2, TestBlock2)
+       v.PutRaw(TestHash3, TestBlock3)
+
+       sem := make(chan int)
+       go func() {
+               buf := make([]byte, BlockSize)
+               n, err := v.Get(context.Background(), TestHash, buf)
+               if err != nil {
+                       t.Errorf("err1: %v", err)
+               }
+               if bytes.Compare(buf[:n], TestBlock) != 0 {
+                       t.Errorf("buf should be %s, is %s", string(TestBlock), string(buf[:n]))
+               }
+               sem <- 1
+       }()
+
+       go func() {
+               buf := make([]byte, BlockSize)
+               n, err := v.Get(context.Background(), TestHash2, buf)
+               if err != nil {
+                       t.Errorf("err2: %v", err)
+               }
+               if bytes.Compare(buf[:n], TestBlock2) != 0 {
+                       t.Errorf("buf should be %s, is %s", string(TestBlock2), string(buf[:n]))
+               }
+               sem <- 1
+       }()
+
+       go func() {
+               buf := make([]byte, BlockSize)
+               n, err := v.Get(context.Background(), TestHash3, buf)
+               if err != nil {
+                       t.Errorf("err3: %v", err)
+               }
+               if bytes.Compare(buf[:n], TestBlock3) != 0 {
+                       t.Errorf("buf should be %s, is %s", string(TestBlock3), string(buf[:n]))
+               }
+               sem <- 1
+       }()
+
+       // Wait for all goroutines to finish
+       for done := 0; done < 3; done++ {
+               <-sem
+       }
+}
+
+// Launch concurrent Puts
+// Test is intended for only writable volumes
+func testPutConcurrent(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if v.Writable() == false {
+               return
+       }
+
+       sem := make(chan int)
+       go func(sem chan int) {
+               err := v.Put(context.Background(), TestHash, TestBlock)
+               if err != nil {
+                       t.Errorf("err1: %v", err)
+               }
+               sem <- 1
+       }(sem)
+
+       go func(sem chan int) {
+               err := v.Put(context.Background(), TestHash2, TestBlock2)
+               if err != nil {
+                       t.Errorf("err2: %v", err)
+               }
+               sem <- 1
+       }(sem)
+
+       go func(sem chan int) {
+               err := v.Put(context.Background(), TestHash3, TestBlock3)
+               if err != nil {
+                       t.Errorf("err3: %v", err)
+               }
+               sem <- 1
+       }(sem)
+
+       // Wait for all goroutines to finish
+       for done := 0; done < 3; done++ {
+               <-sem
+       }
+
+       // Double check that we actually wrote the blocks we expected to write.
+       buf := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Errorf("Get #1: %v", err)
+       }
+       if bytes.Compare(buf[:n], TestBlock) != 0 {
+               t.Errorf("Get #1: expected %s, got %s", string(TestBlock), string(buf[:n]))
+       }
+
+       n, err = v.Get(context.Background(), TestHash2, buf)
+       if err != nil {
+               t.Errorf("Get #2: %v", err)
+       }
+       if bytes.Compare(buf[:n], TestBlock2) != 0 {
+               t.Errorf("Get #2: expected %s, got %s", string(TestBlock2), string(buf[:n]))
+       }
+
+       n, err = v.Get(context.Background(), TestHash3, buf)
+       if err != nil {
+               t.Errorf("Get #3: %v", err)
+       }
+       if bytes.Compare(buf[:n], TestBlock3) != 0 {
+               t.Errorf("Get #3: expected %s, got %s", string(TestBlock3), string(buf[:n]))
+       }
+}
+
+// Write and read back a full size block
+func testPutFullBlock(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+
+       if !v.Writable() {
+               return
+       }
+
+       wdata := make([]byte, BlockSize)
+       wdata[0] = 'a'
+       wdata[BlockSize-1] = 'z'
+       hash := fmt.Sprintf("%x", md5.Sum(wdata))
+       err := v.Put(context.Background(), hash, wdata)
+       if err != nil {
+               t.Fatal(err)
+       }
+       buf := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), hash, buf)
+       if err != nil {
+               t.Error(err)
+       }
+       if bytes.Compare(buf[:n], wdata) != 0 {
+               t.Error("buf %+q != wdata %+q", buf[:n], wdata)
+       }
+}
+
+// With TrashLifetime != 0, perform:
+// Trash an old block - which either raises ErrNotImplemented or succeeds
+// Untrash -  which either raises ErrNotImplemented or succeeds
+// Get - which must succeed
+func testTrashUntrash(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+       defer func() {
+               theConfig.TrashLifetime = 0
+       }()
+
+       theConfig.TrashLifetime.Set("1h")
+
+       // put block and backdate it
+       v.PutRaw(TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       buf := make([]byte, BlockSize)
+       n, err := v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if bytes.Compare(buf[:n], TestBlock) != 0 {
+               t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+       }
+
+       // Trash
+       err = v.Trash(TestHash)
+       if v.Writable() == false {
+               if err != MethodDisabledError {
+                       t.Fatal(err)
+               }
+       } else if err != nil {
+               if err != ErrNotImplemented {
+                       t.Fatal(err)
+               }
+       } else {
+               _, err = v.Get(context.Background(), TestHash, buf)
+               if err == nil || !os.IsNotExist(err) {
+                       t.Errorf("os.IsNotExist(%v) should have been true", err)
+               }
+
+               // Untrash
+               err = v.Untrash(TestHash)
+               if err != nil {
+                       t.Fatal(err)
+               }
+       }
+
+       // Get the block - after trash and untrash sequence
+       n, err = v.Get(context.Background(), TestHash, buf)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if bytes.Compare(buf[:n], TestBlock) != 0 {
+               t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+       }
+}
+
+func testTrashEmptyTrashUntrash(t TB, factory TestableVolumeFactory) {
+       v := factory(t)
+       defer v.Teardown()
+       defer func(orig arvados.Duration) {
+               theConfig.TrashLifetime = orig
+       }(theConfig.TrashLifetime)
+
+       checkGet := func() error {
+               buf := make([]byte, BlockSize)
+               n, err := v.Get(context.Background(), TestHash, buf)
+               if err != nil {
+                       return err
+               }
+               if bytes.Compare(buf[:n], TestBlock) != 0 {
+                       t.Fatalf("Got data %+q, expected %+q", buf[:n], TestBlock)
+               }
+
+               _, err = v.Mtime(TestHash)
+               if err != nil {
+                       return err
+               }
+
+               err = v.Compare(context.Background(), TestHash, TestBlock)
+               if err != nil {
+                       return err
+               }
+
+               indexBuf := new(bytes.Buffer)
+               v.IndexTo("", indexBuf)
+               if !strings.Contains(string(indexBuf.Bytes()), TestHash) {
+                       return os.ErrNotExist
+               }
+
+               return nil
+       }
+
+       // First set: EmptyTrash before reaching the trash deadline.
+
+       theConfig.TrashLifetime.Set("1h")
+
+       v.PutRaw(TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       err := checkGet()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Trash the block
+       err = v.Trash(TestHash)
+       if err == MethodDisabledError || err == ErrNotImplemented {
+               // Skip the trash tests for read-only volumes, and
+               // volume types that don't support TrashLifetime>0.
+               return
+       }
+
+       err = checkGet()
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       err = v.Touch(TestHash)
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       v.EmptyTrash()
+
+       // Even after emptying the trash, we can untrash our block
+       // because the deadline hasn't been reached.
+       err = v.Untrash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       err = checkGet()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       err = v.Touch(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Because we Touch'ed, need to backdate again for next set of tests
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       // If the only block in the trash has already been untrashed,
+       // most volumes will fail a subsequent Untrash with a 404, but
+       // it's also acceptable for Untrash to succeed.
+       err = v.Untrash(TestHash)
+       if err != nil && !os.IsNotExist(err) {
+               t.Fatalf("Expected success or os.IsNotExist(), but got: %v", err)
+       }
+
+       // The additional Untrash should not interfere with our
+       // already-untrashed copy.
+       err = checkGet()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Untrash might have updated the timestamp, so backdate again
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       // Second set: EmptyTrash after the trash deadline has passed.
+
+       theConfig.TrashLifetime.Set("1ns")
+
+       err = v.Trash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = checkGet()
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       // Even though 1ns has passed, we can untrash because we
+       // haven't called EmptyTrash yet.
+       err = v.Untrash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = checkGet()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Trash it again, and this time call EmptyTrash so it really
+       // goes away.
+       // (In Azure volumes, un/trash changes Mtime, so first backdate again)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+       _ = v.Trash(TestHash)
+       err = checkGet()
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+       v.EmptyTrash()
+
+       // Untrash won't find it
+       err = v.Untrash(TestHash)
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       // Get block won't find it
+       err = checkGet()
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       // Third set: If the same data block gets written again after
+       // being trashed, and then the trash gets emptied, the newer
+       // un-trashed copy doesn't get deleted along with it.
+
+       v.PutRaw(TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       theConfig.TrashLifetime.Set("1ns")
+       err = v.Trash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = checkGet()
+       if err == nil || !os.IsNotExist(err) {
+               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+       }
+
+       v.PutRaw(TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       // EmptyTrash should not delete the untrashed copy.
+       v.EmptyTrash()
+       err = checkGet()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Fourth set: If the same data block gets trashed twice with
+       // different deadlines A and C, and then the trash is emptied
+       // at intermediate time B (A < B < C), it is still possible to
+       // untrash the block whose deadline is "C".
+
+       v.PutRaw(TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       theConfig.TrashLifetime.Set("1ns")
+       err = v.Trash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       v.PutRaw(TestHash, TestBlock)
+       v.TouchWithDate(TestHash, time.Now().Add(-2*theConfig.BlobSignatureTTL.Duration()))
+
+       theConfig.TrashLifetime.Set("1h")
+       err = v.Trash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // EmptyTrash should not prevent us from recovering the
+       // time.Hour ("C") trash
+       v.EmptyTrash()
+       err = v.Untrash(TestHash)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = checkGet()
+       if err != nil {
+               t.Fatal(err)
+       }
+}
diff --git a/services/keepstore/volume_test.go b/services/keepstore/volume_test.go
new file mode 100644 (file)
index 0000000..0b8af33
--- /dev/null
@@ -0,0 +1,252 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "os"
+       "strings"
+       "sync"
+       "time"
+)
+
+// A TestableVolume allows test suites to manipulate the state of an
+// underlying Volume, in order to test behavior in cases that are
+// impractical to achieve with a sequence of normal Volume operations.
+type TestableVolume interface {
+       Volume
+
+       // [Over]write content for a locator with the given data,
+       // bypassing all constraints like readonly and serialize.
+       PutRaw(locator string, data []byte)
+
+       // Returns the strings that a driver uses to record read/write operations.
+       ReadWriteOperationLabelValues() (r, w string)
+
+       // Specify the value Mtime() should return, until the next
+       // call to Touch, TouchWithDate, or Put.
+       TouchWithDate(locator string, lastPut time.Time)
+
+       // Clean up, delete temporary files.
+       Teardown()
+}
+
+// MockVolumes are test doubles for Volumes, used to test handlers.
+type MockVolume struct {
+       Store      map[string][]byte
+       Timestamps map[string]time.Time
+
+       // Bad volumes return an error for every operation.
+       Bad            bool
+       BadVolumeError error
+
+       // Touchable volumes' Touch() method succeeds for a locator
+       // that has been Put().
+       Touchable bool
+
+       // Readonly volumes return an error for Put, Delete, and
+       // Touch.
+       Readonly bool
+
+       // Gate is a "starting gate", allowing test cases to pause
+       // volume operations long enough to inspect state. Every
+       // operation (except Status) starts by receiving from
+       // Gate. Sending one value unblocks one operation; closing the
+       // channel unblocks all operations. By default, Gate is a
+       // closed channel, so all operations proceed without
+       // blocking. See trash_worker_test.go for an example.
+       Gate chan struct{}
+
+       called map[string]int
+       mutex  sync.Mutex
+}
+
+// CreateMockVolume returns a non-Bad, non-Readonly, Touchable mock
+// volume.
+func CreateMockVolume() *MockVolume {
+       gate := make(chan struct{})
+       close(gate)
+       return &MockVolume{
+               Store:      make(map[string][]byte),
+               Timestamps: make(map[string]time.Time),
+               Bad:        false,
+               Touchable:  true,
+               Readonly:   false,
+               called:     map[string]int{},
+               Gate:       gate,
+       }
+}
+
+// CallCount returns how many times the named method has been called.
+func (v *MockVolume) CallCount(method string) int {
+       v.mutex.Lock()
+       defer v.mutex.Unlock()
+       c, ok := v.called[method]
+       if !ok {
+               return 0
+       }
+       return c
+}
+
+func (v *MockVolume) gotCall(method string) {
+       v.mutex.Lock()
+       defer v.mutex.Unlock()
+       if _, ok := v.called[method]; !ok {
+               v.called[method] = 1
+       } else {
+               v.called[method]++
+       }
+}
+
+func (v *MockVolume) Compare(ctx context.Context, loc string, buf []byte) error {
+       v.gotCall("Compare")
+       <-v.Gate
+       if v.Bad {
+               return v.BadVolumeError
+       } else if block, ok := v.Store[loc]; ok {
+               if fmt.Sprintf("%x", md5.Sum(block)) != loc {
+                       return DiskHashError
+               }
+               if bytes.Compare(buf, block) != 0 {
+                       return CollisionError
+               }
+               return nil
+       } else {
+               return NotFoundError
+       }
+}
+
+func (v *MockVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
+       v.gotCall("Get")
+       <-v.Gate
+       if v.Bad {
+               return 0, v.BadVolumeError
+       } else if block, ok := v.Store[loc]; ok {
+               copy(buf[:len(block)], block)
+               return len(block), nil
+       }
+       return 0, os.ErrNotExist
+}
+
+func (v *MockVolume) Put(ctx context.Context, loc string, block []byte) error {
+       v.gotCall("Put")
+       <-v.Gate
+       if v.Bad {
+               return v.BadVolumeError
+       }
+       if v.Readonly {
+               return MethodDisabledError
+       }
+       v.Store[loc] = block
+       return v.Touch(loc)
+}
+
+func (v *MockVolume) Touch(loc string) error {
+       v.gotCall("Touch")
+       <-v.Gate
+       if v.Readonly {
+               return MethodDisabledError
+       }
+       if v.Touchable {
+               v.Timestamps[loc] = time.Now()
+               return nil
+       }
+       return errors.New("Touch failed")
+}
+
+func (v *MockVolume) Mtime(loc string) (time.Time, error) {
+       v.gotCall("Mtime")
+       <-v.Gate
+       var mtime time.Time
+       var err error
+       if v.Bad {
+               err = v.BadVolumeError
+       } else if t, ok := v.Timestamps[loc]; ok {
+               mtime = t
+       } else {
+               err = os.ErrNotExist
+       }
+       return mtime, err
+}
+
+func (v *MockVolume) IndexTo(prefix string, w io.Writer) error {
+       v.gotCall("IndexTo")
+       <-v.Gate
+       for loc, block := range v.Store {
+               if !IsValidLocator(loc) || !strings.HasPrefix(loc, prefix) {
+                       continue
+               }
+               _, err := fmt.Fprintf(w, "%s+%d %d\n",
+                       loc, len(block), 123456789)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func (v *MockVolume) Trash(loc string) error {
+       v.gotCall("Delete")
+       <-v.Gate
+       if v.Readonly {
+               return MethodDisabledError
+       }
+       if _, ok := v.Store[loc]; ok {
+               if time.Since(v.Timestamps[loc]) < time.Duration(theConfig.BlobSignatureTTL) {
+                       return nil
+               }
+               delete(v.Store, loc)
+               return nil
+       }
+       return os.ErrNotExist
+}
+
+func (v *MockVolume) DeviceID() string {
+       return "mock-device-id"
+}
+
+func (v *MockVolume) Type() string {
+       return "Mock"
+}
+
+func (v *MockVolume) Start(vm *volumeMetricsVecs) error {
+       return nil
+}
+
+func (v *MockVolume) Untrash(loc string) error {
+       return nil
+}
+
+func (v *MockVolume) Status() *VolumeStatus {
+       var used uint64
+       for _, block := range v.Store {
+               used = used + uint64(len(block))
+       }
+       return &VolumeStatus{"/bogo", 123, 1000000 - used, used}
+}
+
+func (v *MockVolume) String() string {
+       return "[MockVolume]"
+}
+
+func (v *MockVolume) Writable() bool {
+       return !v.Readonly
+}
+
+func (v *MockVolume) Replication() int {
+       return 1
+}
+
+func (v *MockVolume) EmptyTrash() {
+}
+
+func (v *MockVolume) GetStorageClasses() []string {
+       return nil
+}
diff --git a/services/keepstore/work_queue.go b/services/keepstore/work_queue.go
new file mode 100644 (file)
index 0000000..56c6376
--- /dev/null
@@ -0,0 +1,212 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+/* A WorkQueue is an asynchronous thread-safe queue manager.  It
+   provides a channel from which items can be read off the queue, and
+   permits replacing the contents of the queue at any time.
+
+   The overall work flow for a WorkQueue is as follows:
+
+     1. A WorkQueue is created with NewWorkQueue().  This
+        function instantiates a new WorkQueue and starts a manager
+        goroutine.  The manager listens on an input channel
+        (manager.newlist) and an output channel (manager.NextItem).
+
+     2. The manager first waits for a new list of requests on the
+        newlist channel.  When another goroutine calls
+        manager.ReplaceQueue(lst), it sends lst over the newlist
+        channel to the manager.  The manager goroutine now has
+        ownership of the list.
+
+     3. Once the manager has this initial list, it listens on both the
+        input and output channels for one of the following to happen:
+
+          a. A worker attempts to read an item from the NextItem
+             channel.  The manager sends the next item from the list
+             over this channel to the worker, and loops.
+
+          b. New data is sent to the manager on the newlist channel.
+             This happens when another goroutine calls
+             manager.ReplaceItem() with a new list.  The manager
+             discards the current list, replaces it with the new one,
+             and begins looping again.
+
+          c. The input channel is closed.  The manager closes its
+             output channel (signalling any workers to quit) and
+             terminates.
+
+   Tasks currently handled by WorkQueue:
+     * the pull list
+     * the trash list
+
+   Example usage:
+
+        // Any kind of user-defined type can be used with the
+        // WorkQueue.
+               type FrobRequest struct {
+                       frob string
+               }
+
+               // Make a work list.
+               froblist := NewWorkQueue()
+
+               // Start a concurrent worker to read items from the NextItem
+               // channel until it is closed, deleting each one.
+               go func(list WorkQueue) {
+                       for i := range list.NextItem {
+                               req := i.(FrobRequest)
+                               frob.Run(req)
+                       }
+               }(froblist)
+
+               // Set up a HTTP handler for PUT /frob
+               router.HandleFunc(`/frob`,
+                       func(w http.ResponseWriter, req *http.Request) {
+                               // Parse the request body into a list.List
+                               // of FrobRequests, and give this list to the
+                               // frob manager.
+                               newfrobs := parseBody(req.Body)
+                               froblist.ReplaceQueue(newfrobs)
+                       }).Methods("PUT")
+
+   Methods available on a WorkQueue:
+
+               ReplaceQueue(list)
+                       Replaces the current item list with a new one.  The list
+            manager discards any unprocessed items on the existing
+            list and replaces it with the new one. If the worker is
+            processing a list item when ReplaceQueue is called, it
+            finishes processing before receiving items from the new
+            list.
+               Close()
+                       Shuts down the manager goroutine. When Close is called,
+                       the manager closes the NextItem channel.
+*/
+
+import "container/list"
+
+// WorkQueue definition
+type WorkQueue struct {
+       getStatus chan WorkQueueStatus
+       newlist   chan *list.List
+       // Workers get work items by reading from this channel.
+       NextItem <-chan interface{}
+       // Each worker must send struct{}{} to DoneItem exactly once
+       // for each work item received from NextItem, when it stops
+       // working on that item (regardless of whether the work was
+       // successful).
+       DoneItem chan<- struct{}
+}
+
+// WorkQueueStatus reflects the queue status.
+type WorkQueueStatus struct {
+       InProgress int
+       Queued     int
+}
+
+// NewWorkQueue returns a new empty WorkQueue.
+//
+func NewWorkQueue() *WorkQueue {
+       nextItem := make(chan interface{})
+       reportDone := make(chan struct{})
+       newList := make(chan *list.List)
+       b := WorkQueue{
+               getStatus: make(chan WorkQueueStatus),
+               newlist:   newList,
+               NextItem:  nextItem,
+               DoneItem:  reportDone,
+       }
+       go func() {
+               // Read new work lists from the newlist channel.
+               // Reply to "status" and "get next item" queries by
+               // sending to the getStatus and nextItem channels
+               // respectively. Return when the newlist channel
+               // closes.
+
+               todo := &list.List{}
+               status := WorkQueueStatus{}
+
+               // When we're done, close the output channel; workers will
+               // shut down next time they ask for new work.
+               defer close(nextItem)
+               defer close(b.getStatus)
+
+               // nextChan and nextVal are both nil when we have
+               // nothing to send; otherwise they are, respectively,
+               // the nextItem channel and the next work item to send
+               // to it.
+               var nextChan chan interface{}
+               var nextVal interface{}
+
+               for newList != nil || status.InProgress > 0 {
+                       select {
+                       case p, ok := <-newList:
+                               if !ok {
+                                       // Closed, stop receiving
+                                       newList = nil
+                               }
+                               todo = p
+                               if todo == nil {
+                                       todo = &list.List{}
+                               }
+                               status.Queued = todo.Len()
+                               if status.Queued == 0 {
+                                       // Stop sending work
+                                       nextChan = nil
+                                       nextVal = nil
+                               } else {
+                                       nextChan = nextItem
+                                       nextVal = todo.Front().Value
+                               }
+                       case nextChan <- nextVal:
+                               todo.Remove(todo.Front())
+                               status.InProgress++
+                               status.Queued--
+                               if status.Queued == 0 {
+                                       // Stop sending work
+                                       nextChan = nil
+                                       nextVal = nil
+                               } else {
+                                       nextVal = todo.Front().Value
+                               }
+                       case <-reportDone:
+                               status.InProgress--
+                       case b.getStatus <- status:
+                       }
+               }
+       }()
+       return &b
+}
+
+// ReplaceQueue abandons any work items left in the existing queue,
+// and starts giving workers items from the given list. After giving
+// it to ReplaceQueue, the caller must not read or write the given
+// list.
+//
+func (b *WorkQueue) ReplaceQueue(list *list.List) {
+       b.newlist <- list
+}
+
+// Close shuts down the manager and terminates the goroutine, which
+// abandons any pending requests, but allows any pull request already
+// in progress to continue.
+//
+// After Close, Status will return correct values, NextItem will be
+// closed, and ReplaceQueue will panic.
+//
+func (b *WorkQueue) Close() {
+       close(b.newlist)
+}
+
+// Status returns an up-to-date WorkQueueStatus reflecting the current
+// queue status.
+//
+func (b *WorkQueue) Status() WorkQueueStatus {
+       // If the channel is closed, we get the nil value of
+       // WorkQueueStatus, which is an accurate description of a
+       // finished queue.
+       return <-b.getStatus
+}
diff --git a/services/keepstore/work_queue_test.go b/services/keepstore/work_queue_test.go
new file mode 100644 (file)
index 0000000..8a26c09
--- /dev/null
@@ -0,0 +1,244 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "container/list"
+       "runtime"
+       "testing"
+       "time"
+)
+
+type fatalfer interface {
+       Fatalf(string, ...interface{})
+}
+
+func makeTestWorkList(ary []interface{}) *list.List {
+       l := list.New()
+       for _, n := range ary {
+               l.PushBack(n)
+       }
+       return l
+}
+
+func expectChannelEmpty(t fatalfer, c <-chan interface{}) {
+       select {
+       case item, ok := <-c:
+               if ok {
+                       t.Fatalf("Received value (%+v) from channel that we expected to be empty", item)
+               }
+       default:
+       }
+}
+
+func expectChannelNotEmpty(t fatalfer, c <-chan interface{}) interface{} {
+       select {
+       case item, ok := <-c:
+               if !ok {
+                       t.Fatalf("expected data on a closed channel")
+               }
+               return item
+       case <-time.After(time.Second):
+               t.Fatalf("expected data on an empty channel")
+               return nil
+       }
+}
+
+func expectChannelClosedWithin(t fatalfer, timeout time.Duration, c <-chan interface{}) {
+       select {
+       case received, ok := <-c:
+               if ok {
+                       t.Fatalf("Expected channel to be closed, but received %+v instead", received)
+               }
+       case <-time.After(timeout):
+               t.Fatalf("Expected channel to be closed, but it is still open after %v", timeout)
+       }
+}
+
+func doWorkItems(t fatalfer, q *WorkQueue, expected []interface{}) {
+       for i := range expected {
+               actual, ok := <-q.NextItem
+               if !ok {
+                       t.Fatalf("Expected %+v but channel was closed after receiving %+v as expected.", expected, expected[:i])
+               }
+               q.DoneItem <- struct{}{}
+               if actual.(int) != expected[i] {
+                       t.Fatalf("Expected %+v but received %+v after receiving %+v as expected.", expected[i], actual, expected[:i])
+               }
+       }
+}
+
+func expectEqualWithin(t fatalfer, timeout time.Duration, expect interface{}, f func() interface{}) {
+       ok := make(chan struct{})
+       giveup := false
+       go func() {
+               for f() != expect && !giveup {
+                       time.Sleep(time.Millisecond)
+               }
+               close(ok)
+       }()
+       select {
+       case <-ok:
+       case <-time.After(timeout):
+               giveup = true
+               _, file, line, _ := runtime.Caller(1)
+               t.Fatalf("Still getting %+v, timed out waiting for %+v\n%s:%d", f(), expect, file, line)
+       }
+}
+
+func expectQueued(t fatalfer, b *WorkQueue, expectQueued int) {
+       if l := b.Status().Queued; l != expectQueued {
+               t.Fatalf("Got Queued==%d, expected %d", l, expectQueued)
+       }
+}
+
+func TestWorkQueueDoneness(t *testing.T) {
+       b := NewWorkQueue()
+       defer b.Close()
+       b.ReplaceQueue(makeTestWorkList([]interface{}{1, 2, 3}))
+       expectQueued(t, b, 3)
+       gate := make(chan struct{})
+       go func() {
+               <-gate
+               for range b.NextItem {
+                       <-gate
+                       time.Sleep(time.Millisecond)
+                       b.DoneItem <- struct{}{}
+               }
+       }()
+       expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
+       b.ReplaceQueue(makeTestWorkList([]interface{}{4, 5, 6}))
+       for i := 1; i <= 3; i++ {
+               gate <- struct{}{}
+               expectEqualWithin(t, time.Second, 3-i, func() interface{} { return b.Status().Queued })
+               expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
+       }
+       close(gate)
+       expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
+       expectChannelEmpty(t, b.NextItem)
+}
+
+// Create a WorkQueue, generate a list for it, and instantiate a worker.
+func TestWorkQueueReadWrite(t *testing.T) {
+       var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
+
+       b := NewWorkQueue()
+       expectQueued(t, b, 0)
+
+       b.ReplaceQueue(makeTestWorkList(input))
+       expectQueued(t, b, len(input))
+
+       doWorkItems(t, b, input)
+       expectChannelEmpty(t, b.NextItem)
+       b.Close()
+}
+
+// Start a worker before the list has any input.
+func TestWorkQueueEarlyRead(t *testing.T) {
+       var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
+
+       b := NewWorkQueue()
+       defer b.Close()
+
+       // First, demonstrate that nothing is available on the NextItem
+       // channel.
+       expectChannelEmpty(t, b.NextItem)
+
+       // Start a reader in a goroutine. The reader will block until the
+       // block work list has been initialized.
+       //
+       done := make(chan int)
+       go func() {
+               doWorkItems(t, b, input)
+               done <- 1
+       }()
+
+       // Feed the blocklist a new worklist, and wait for the worker to
+       // finish.
+       b.ReplaceQueue(makeTestWorkList(input))
+       <-done
+       expectQueued(t, b, 0)
+}
+
+// After Close(), NextItem closes, work finishes, then stats return zero.
+func TestWorkQueueClose(t *testing.T) {
+       b := NewWorkQueue()
+       input := []interface{}{1, 2, 3, 4, 5, 6, 7, 8}
+       mark := make(chan struct{})
+       go func() {
+               <-b.NextItem
+               mark <- struct{}{}
+               <-mark
+               b.DoneItem <- struct{}{}
+       }()
+       b.ReplaceQueue(makeTestWorkList(input))
+       // Wait for worker to take item 1
+       <-mark
+       b.Close()
+       expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
+       // Tell worker to report done
+       mark <- struct{}{}
+       expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
+       expectChannelClosedWithin(t, time.Second, b.NextItem)
+}
+
+// Show that a reader may block when the manager's list is exhausted,
+// and that the reader resumes automatically when new data is
+// available.
+func TestWorkQueueReaderBlocks(t *testing.T) {
+       var (
+               inputBeforeBlock = []interface{}{1, 2, 3, 4, 5}
+               inputAfterBlock  = []interface{}{6, 7, 8, 9, 10}
+       )
+
+       b := NewWorkQueue()
+       defer b.Close()
+       sendmore := make(chan int)
+       done := make(chan int)
+       go func() {
+               doWorkItems(t, b, inputBeforeBlock)
+
+               // Confirm that the channel is empty, so a subsequent read
+               // on it will block.
+               expectChannelEmpty(t, b.NextItem)
+
+               // Signal that we're ready for more input.
+               sendmore <- 1
+               doWorkItems(t, b, inputAfterBlock)
+               done <- 1
+       }()
+
+       // Write a slice of the first five elements and wait for the
+       // reader to signal that it's ready for us to send more input.
+       b.ReplaceQueue(makeTestWorkList(inputBeforeBlock))
+       <-sendmore
+
+       b.ReplaceQueue(makeTestWorkList(inputAfterBlock))
+
+       // Wait for the reader to complete.
+       <-done
+}
+
+// Replace one active work list with another.
+func TestWorkQueueReplaceQueue(t *testing.T) {
+       var firstInput = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
+       var replaceInput = []interface{}{1, 4, 9, 16, 25, 36, 49, 64, 81}
+
+       b := NewWorkQueue()
+       b.ReplaceQueue(makeTestWorkList(firstInput))
+
+       // Read just the first five elements from the work list.
+       // Confirm that the channel is not empty.
+       doWorkItems(t, b, firstInput[0:5])
+       expectChannelNotEmpty(t, b.NextItem)
+
+       // Replace the work list and read five more elements.
+       // The old list should have been discarded and all new
+       // elements come from the new list.
+       b.ReplaceQueue(makeTestWorkList(replaceInput))
+       doWorkItems(t, b, replaceInput[0:5])
+
+       b.Close()
+}
diff --git a/services/login-sync/.gitignore b/services/login-sync/.gitignore
new file mode 100644 (file)
index 0000000..cec3cb5
--- /dev/null
@@ -0,0 +1,2 @@
+*.gem
+Gemfile.lock
diff --git a/services/login-sync/Gemfile b/services/login-sync/Gemfile
new file mode 100644 (file)
index 0000000..42d990f
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+source 'https://rubygems.org'
+gemspec
+group :test, :performance do
+  gem 'minitest', '>= 5.0.0'
+  gem 'mocha', require: false
+  gem 'rake'
+end
diff --git a/services/login-sync/Rakefile b/services/login-sync/Rakefile
new file mode 100644 (file)
index 0000000..f1a7860
--- /dev/null
@@ -0,0 +1,12 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'rake/testtask'
+
+Rake::TestTask.new do |t|
+  t.libs << 'test'
+end
+
+desc 'Run tests'
+task default: :test
diff --git a/services/login-sync/agpl-3.0.txt b/services/login-sync/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/services/login-sync/arvados-login-sync.gemspec b/services/login-sync/arvados-login-sync.gemspec
new file mode 100644 (file)
index 0000000..b64aab2
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+if not File.exists?('/usr/bin/git') then
+  STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
+  exit
+end
+
+git_latest_tag = `git tag -l |sort -V -r |head -n1`
+git_latest_tag = git_latest_tag.encode('utf-8').strip
+git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
+git_timestamp = Time.at(git_timestamp.to_i).utc
+
+Gem::Specification.new do |s|
+  s.name        = 'arvados-login-sync'
+  s.version     = "#{git_latest_tag}.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.date        = git_timestamp.strftime("%Y-%m-%d")
+  s.summary     = "Set up local login accounts for Arvados users"
+  s.description = "Creates and updates local login accounts for Arvados users. Built from git commit #{git_hash}"
+  s.authors     = ["Arvados Authors"]
+  s.email       = 'gem-dev@curoverse.com'
+  s.licenses    = ['GNU Affero General Public License, version 3.0']
+  s.files       = ["bin/arvados-login-sync", "agpl-3.0.txt"]
+  s.executables << "arvados-login-sync"
+  s.required_ruby_version = '>= 2.1.0'
+  s.add_runtime_dependency 'arvados', '~> 1.3.0', '>= 1.3.0'
+  s.homepage    =
+    'https://arvados.org'
+end
diff --git a/services/login-sync/bin/arvados-login-sync b/services/login-sync/bin/arvados-login-sync
new file mode 100755 (executable)
index 0000000..eb68004
--- /dev/null
@@ -0,0 +1,162 @@
+#!/usr/bin/env ruby
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'rubygems'
+require 'pp'
+require 'arvados'
+require 'etc'
+require 'fileutils'
+require 'yaml'
+
+req_envs = %w(ARVADOS_API_HOST ARVADOS_API_TOKEN ARVADOS_VIRTUAL_MACHINE_UUID)
+req_envs.each do |k|
+  unless ENV[k]
+    abort "Fatal: These environment vars must be set: #{req_envs}"
+  end
+end
+
+exclusive_mode = ARGV.index("--exclusive")
+exclusive_banner = "#######################################################################################
+#  THIS FILE IS MANAGED BY #{$0} -- CHANGES WILL BE OVERWRITTEN  #
+#######################################################################################\n\n"
+start_banner = "### BEGIN Arvados-managed keys -- changes between markers will be overwritten\n"
+end_banner = "### END Arvados-managed keys -- changes between markers will be overwritten\n"
+
+# Don't try to create any local accounts
+skip_missing_users = ARGV.index("--skip-missing-users")
+
+keys = ''
+
+begin
+  arv = Arvados.new({ :suppress_ssl_warnings => false })
+
+  vm_uuid = ENV['ARVADOS_VIRTUAL_MACHINE_UUID']
+
+  logins = arv.virtual_machine.logins(:uuid => vm_uuid)[:items]
+  logins = [] if logins.nil?
+  logins = logins.reject { |l| l[:username].nil? or l[:hostname].nil? or l[:public_key].nil? or l[:virtual_machine_uuid] != vm_uuid }
+
+  # No system users
+  uid_min = 1000
+  open("/etc/login.defs", encoding: "utf-8") do |login_defs|
+    login_defs.each_line do |line|
+      next unless match = /^UID_MIN\s+(\S+)$/.match(line)
+      if match[1].start_with?("0x")
+        base = 16
+      elsif match[1].start_with?("0")
+        base = 8
+      else
+        base = 10
+      end
+      new_uid_min = match[1].to_i(base)
+      uid_min = new_uid_min if (new_uid_min > 0)
+    end
+  end
+
+  pwnam = Hash.new()
+  logins.reject! do |l|
+    if not pwnam[l[:username]]
+      begin
+        pwnam[l[:username]] = Etc.getpwnam(l[:username])
+      rescue
+        if skip_missing_users
+          STDERR.puts "Account #{l[:username]} not found. Skipping"
+          true
+        end
+      else
+        if pwnam[l[:username]].uid < uid_min
+          STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping"
+          true
+        end
+      end
+    end
+  end
+  keys = Hash.new()
+
+  # Collect all keys
+  logins.each do |l|
+    keys[l[:username]] = Array.new() if not keys.has_key?(l[:username])
+    key = l[:public_key]
+    # Handle putty-style ssh public keys
+    key.sub!(/^(Comment: "r[^\n]*\n)(.*)$/m,'ssh-rsa \2 \1')
+    key.sub!(/^(Comment: "d[^\n]*\n)(.*)$/m,'ssh-dss \2 \1')
+    key.gsub!(/\n/,'')
+    key.strip
+
+    keys[l[:username]].push(key) if not keys[l[:username]].include?(key)
+  end
+
+  seen = Hash.new()
+  devnull = open("/dev/null", "w")
+
+  logins.each do |l|
+    next if seen[l[:username]]
+    seen[l[:username]] = true
+
+    unless pwnam[l[:username]]
+      STDERR.puts "Creating account #{l[:username]}"
+      groups = l[:groups] || []
+      # Adding users to the FUSE group has long been hardcoded behavior.
+      groups << "fuse"
+      groups.select! { |g| Etc.getgrnam(g) rescue false }
+      # Create new user
+      unless system("useradd", "-m",
+                "-c", l[:username],
+                "-s", "/bin/bash",
+                "-G", groups.join(","),
+                l[:username],
+                out: devnull)
+        STDERR.puts "Account creation failed for #{l[:username]}: $?"
+        next
+      end
+      begin
+        pwnam[l[:username]] = Etc.getpwnam(l[:username])
+      rescue => e
+        STDERR.puts "Created account but then getpwnam() failed for #{l[:username]}: #{e}"
+        raise
+      end
+    end
+
+    @homedir = pwnam[l[:username]].dir
+    userdotssh = File.join(@homedir, ".ssh")
+    Dir.mkdir(userdotssh) if !File.exists?(userdotssh)
+
+    newkeys = "###\n###\n" + keys[l[:username]].join("\n") + "\n###\n###\n"
+
+    keysfile = File.join(userdotssh, "authorized_keys")
+
+    if File.exists?(keysfile)
+      oldkeys = IO::read(keysfile)
+    else
+      oldkeys = ""
+    end
+
+    if exclusive_mode
+      newkeys = exclusive_banner + newkeys
+    elsif oldkeys.start_with?(exclusive_banner)
+      newkeys = start_banner + newkeys + end_banner
+    elsif (m = /^(.*?\n|)#{start_banner}(.*?\n|)#{end_banner}(.*)/m.match(oldkeys))
+      newkeys = m[1] + start_banner + newkeys + end_banner + m[3]
+    else
+      newkeys = start_banner + newkeys + end_banner + oldkeys
+    end
+
+    if oldkeys != newkeys then
+      f = File.new(keysfile, 'w')
+      f.write(newkeys)
+      f.close()
+    end
+    FileUtils.chown_R(l[:username], nil, userdotssh)
+    File.chmod(0700, userdotssh)
+    File.chmod(0750, @homedir)
+    File.chmod(0600, keysfile)
+  end
+
+  devnull.close
+rescue Exception => bang
+  puts "Error: " + bang.to_s
+  puts bang.backtrace.join("\n")
+  exit 1
+end
diff --git a/services/login-sync/test/binstub_new_user/useradd b/services/login-sync/test/binstub_new_user/useradd
new file mode 100755 (executable)
index 0000000..86dc6e8
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+stub="${0##*/}"
+
+# Record what actually happened in the "spy" file
+echo "$stub $*" >> "$ARVADOS_LOGIN_SYNC_TMPDIR/spy"
+
+# Exit 0 if this command was listed in the "succeed" file
+exec fgrep -qx -- "$stub $*" "$ARVADOS_LOGIN_SYNC_TMPDIR/succeed"
diff --git a/services/login-sync/test/stubs.rb b/services/login-sync/test/stubs.rb
new file mode 100644 (file)
index 0000000..d7fab3c
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'etc'
+require 'mocha/mini_test'
+require 'ostruct'
+
+module Stubs
+  # These Etc mocks help only when we run arvados-login-sync in-process.
+
+  def setup
+    super
+    ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
+    Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
+    Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
+  end
+
+  def stubpasswd
+    [{name: 'root', uid: 0}]
+  end
+
+  def stubgroup
+    [{name: 'root', gid: 0}]
+  end
+
+  # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
+
+  def setup
+    super
+    @env_was = Hash[ENV]
+    @tmpdir = Dir.mktmpdir
+  end
+
+  def teardown
+    FileUtils.remove_dir(@tmpdir)
+    ENV.select! { |k| @env_was.has_key? k }
+    @env_was.each do |k,v| ENV[k]=v end
+    super
+  end
+
+  def stubenv opts={}
+    # Use UUID of testvm2.shell fixture, unless otherwise specified by test case.
+    Hash[ENV].merge('ARVADOS_VIRTUAL_MACHINE_UUID' => 'zzzzz-2x53u-382brsig8rp3065',
+                    'ARVADOS_LOGIN_SYNC_TMPDIR' => @tmpdir)
+  end
+
+  def invoke_sync opts={}
+    env = stubenv.merge(opts[:env] || {})
+    (opts[:binstubs] || []).each do |binstub|
+      env['PATH'] = File.absolute_path('../binstub_'+binstub, __FILE__) + ':' + env['PATH']
+    end
+    login_sync_path = File.absolute_path '../../bin/arvados-login-sync', __FILE__
+    system env, login_sync_path
+  end
+end
diff --git a/services/login-sync/test/test_add_user.rb b/services/login-sync/test/test_add_user.rb
new file mode 100644 (file)
index 0000000..17942c2
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'minitest/autorun'
+
+require 'stubs'
+
+class TestAddUser < Minitest::Test
+  include Stubs
+
+  def test_useradd_error
+    # binstub_new_user/useradd will exit non-zero because its args
+    # won't match any line in this empty file:
+    File.open(@tmpdir+'/succeed', 'w') do |f| end
+    invoke_sync binstubs: ['new_user']
+    spied = File.read(@tmpdir+'/spy')
+    assert_match %r{useradd -m -c active -s /bin/bash -G (fuse)? active}, spied
+    # BUG(TC): This assertion succeeds only if docker and fuse groups
+    # exist on the host, but is insensitive to the admin group (groups
+    # are quietly ignored by login-sync if they don't exist on the
+    # current host).
+    assert_match %r{useradd -m -c adminroot -s /bin/bash -G (docker)?(,admin)?(,fuse)? adminroot}, spied
+  end
+
+  def test_useradd_success
+    # binstub_new_user/useradd will succeed.
+    File.open(@tmpdir+'/succeed', 'w') do |f|
+      f.puts 'useradd -m -c active -s /bin/bash -G fuse active'
+      f.puts 'useradd -m -c active -s /bin/bash -G  active'
+      # Accept either form; see note about groups in test_useradd_error.
+      f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,fuse adminroot'
+      f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,admin,fuse adminroot'
+      f.puts 'useradd -m -c adminroot -s /bin/bash -G docker adminroot'
+      f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,admin adminroot'
+    end
+    $stderr.puts "*** Expect crash after getpwnam() fails:"
+    invoke_sync binstubs: ['new_user']
+    assert !$?.success?
+    spied = File.read(@tmpdir+'/spy')
+    # Expect a crash after adding one user, because Dir.mkdir({home}) fails.
+    assert_match %r{^useradd -m -c [^\n]+\n$}s, spied
+  end
+end
diff --git a/services/nodemanager/.gitignore b/services/nodemanager/.gitignore
new file mode 120000 (symlink)
index 0000000..ed3b362
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/.gitignore
\ No newline at end of file
diff --git a/services/nodemanager/MANIFEST.in b/services/nodemanager/MANIFEST.in
new file mode 100644 (file)
index 0000000..8410420
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+include agpl-3.0.txt
+include README.rst
+include arvados_version.py
+include arvados-node-manager.service
diff --git a/services/nodemanager/README.rst b/services/nodemanager/README.rst
new file mode 100644 (file)
index 0000000..1d725e0
--- /dev/null
@@ -0,0 +1,43 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: AGPL-3.0
+
+====================
+Arvados Node Manager
+====================
+
+Overview
+--------
+
+This package provides ``arvados-node-manager``.  It dynamically starts
+and stops compute nodes on an Arvados_ cloud installation based on job
+demand.
+
+.. _Arvados: https://arvados.org/
+
+Setup
+-----
+
+1. Install the package.
+
+2. Write a configuration file.  ``doc/ec2.example.cfg`` documents all
+   of the options available, with specific tunables for EC2 clouds.
+
+3. Run ``arvados-node-manager --config YOURCONFIGFILE`` using whatever
+   supervisor you like (e.g., runit).
+
+Testing and Development
+-----------------------
+
+To run tests, just run::
+
+  python setup.py test
+
+Our `hacking guide
+<https://arvados.org/projects/arvados/wiki/Hacking_Node_Manager>`_
+provides an architectural overview of the Arvados Node Manager to help
+you find your way around the source.  The `Lifecycle of an Arvados
+compute node
+<https://arvados.org/projects/arvados/wiki/Lifecycle_of_an_Arvados_compute_node>`_
+page explains how it works in concert with other Arvados components to
+prepare a node for compute work.
diff --git a/services/nodemanager/agpl-3.0.txt b/services/nodemanager/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/services/nodemanager/arvados-node-manager.service b/services/nodemanager/arvados-node-manager.service
new file mode 100644 (file)
index 0000000..38c525b
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados Node Manager Daemon
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados-node-manager/config.ini
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+EnvironmentFile=-/etc/default/arvados-node-manager
+LimitDATA=3145728K
+LimitRSS=3145728K
+LimitMEMLOCK=3145728K
+LimitNOFILE=10240
+Type=simple
+ExecStart=/usr/bin/env sh -c '/usr/bin/arvados-node-manager --foreground --config /etc/arvados-node-manager/config.ini 2>&1 | cat'
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/nodemanager/arvados_version.py b/services/nodemanager/arvados_version.py
new file mode 100644 (file)
index 0000000..2e6484c
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', '.']).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except subprocess.CalledProcessError:
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/services/nodemanager/arvnodeman/__init__.py b/services/nodemanager/arvnodeman/__init__.py
new file mode 100644 (file)
index 0000000..3f94807
--- /dev/null
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import _strptime  # See <http://bugs.python.org/issue7980#msg221094>.
+import logging
+
+logger = logging.getLogger('arvnodeman')
+logger.addHandler(logging.NullHandler())
diff --git a/services/nodemanager/arvnodeman/baseactor.py b/services/nodemanager/arvnodeman/baseactor.py
new file mode 100644 (file)
index 0000000..bdfe5d4
--- /dev/null
@@ -0,0 +1,129 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import errno
+import logging
+import os
+import signal
+import time
+import threading
+import traceback
+
+import pykka
+
+from .status import tracker
+
+class _TellCallableProxy(object):
+    """Internal helper class for proxying callables."""
+
+    def __init__(self, ref, attr_path):
+        self.actor_ref = ref
+        self._attr_path = attr_path
+
+    def __call__(self, *args, **kwargs):
+        message = {
+            'command': 'pykka_call',
+            'attr_path': self._attr_path,
+            'args': args,
+            'kwargs': kwargs,
+        }
+        self.actor_ref.tell(message)
+
+
+class TellActorProxy(pykka.ActorProxy):
+    """ActorProxy in which all calls are implemented as using tell().
+
+    The standard pykka.ActorProxy always uses ask() and returns a Future.  If
+    the target method raises an exception, it is placed in the Future object
+    and re-raised when get() is called on the Future.  Unfortunately, most
+    messaging in Node Manager is asynchronous and the caller does not store the
+    Future object returned by the call to ActorProxy.  As a result, exceptions
+    resulting from these calls end up in limbo, neither reported in the logs
+    nor handled by on_failure().
+
+    The TellActorProxy uses tell() instead of ask() and does not return a
+    Future object.  As a result, if the target method raises an exception, it
+    will be logged and on_failure() will be called as intended.
+
+    """
+
+    def __repr__(self):
+        return '<ActorProxy for %s, attr_path=%s>' % (
+            self.actor_ref, self._attr_path)
+
+    def __getattr__(self, name):
+        """Get a callable from the actor."""
+        attr_path = self._attr_path + (name,)
+        if attr_path not in self._known_attrs:
+            self._known_attrs = self._get_attributes()
+        attr_info = self._known_attrs.get(attr_path)
+        if attr_info is None:
+            raise AttributeError('%s has no attribute "%s"' % (self, name))
+        if attr_info['callable']:
+            if attr_path not in self._callable_proxies:
+                self._callable_proxies[attr_path] = _TellCallableProxy(
+                    self.actor_ref, attr_path)
+            return self._callable_proxies[attr_path]
+        else:
+            raise AttributeError('attribute "%s" is not a callable on %s' % (name, self))
+
+class TellableActorRef(pykka.ActorRef):
+    """ActorRef adding the tell_proxy() method to get TellActorProxy."""
+
+    def tell_proxy(self):
+        return TellActorProxy(self)
+
+class BaseNodeManagerActor(pykka.ThreadingActor):
+    """Base class for actors in node manager, redefining actor_ref as a
+    TellableActorRef and providing a default on_failure handler.
+    """
+
+    def __init__(self, *args, **kwargs):
+         super(pykka.ThreadingActor, self).__init__(*args, **kwargs)
+         self.actor_ref = TellableActorRef(self)
+         self._killfunc = kwargs.get("killfunc", os.kill)
+
+    def on_failure(self, exception_type, exception_value, tb):
+        lg = getattr(self, "_logger", logging)
+        if (exception_type in (threading.ThreadError, MemoryError) or
+            exception_type is OSError and exception_value.errno == errno.ENOMEM):
+            lg.critical("Unhandled exception is a fatal error, killing Node Manager")
+            self._killfunc(os.getpid(), signal.SIGKILL)
+        tracker.counter_add('actor_exceptions')
+
+    def ping(self):
+        return True
+
+    def get_thread(self):
+        return threading.current_thread()
+
+class WatchdogActor(pykka.ThreadingActor):
+    def __init__(self, timeout, *args, **kwargs):
+         super(pykka.ThreadingActor, self).__init__(*args, **kwargs)
+         self.timeout = timeout
+         self.actors = [a.proxy() for a in args]
+         self.actor_ref = TellableActorRef(self)
+         self._later = self.actor_ref.tell_proxy()
+         self._killfunc = kwargs.get("killfunc", os.kill)
+
+    def kill_self(self, e, act):
+        lg = getattr(self, "_logger", logging)
+        lg.critical("Watchdog exception", exc_info=e)
+        lg.critical("Actor %s watchdog ping time out, killing Node Manager", act)
+        self._killfunc(os.getpid(), signal.SIGKILL)
+
+    def on_start(self):
+        self._later.run()
+
+    def run(self):
+        a = None
+        try:
+            for a in self.actors:
+                a.ping().get(self.timeout)
+            time.sleep(20)
+            self._later.run()
+        except Exception as e:
+            self.kill_self(e, a)
diff --git a/services/nodemanager/arvnodeman/clientactor.py b/services/nodemanager/arvnodeman/clientactor.py
new file mode 100644 (file)
index 0000000..afc4f1c
--- /dev/null
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import logging
+import time
+
+import pykka
+
+from .config import actor_class
+
+def _notify_subscribers(response, subscribers):
+    """Send the response to all the subscriber methods.
+
+    If any of the subscriber actors have stopped, remove them from the
+    subscriber set.
+    """
+    dead_subscribers = set()
+    for subscriber in subscribers:
+        try:
+            subscriber(response)
+        except pykka.ActorDeadError:
+            dead_subscribers.add(subscriber)
+    subscribers.difference_update(dead_subscribers)
+
+class RemotePollLoopActor(actor_class):
+    """Abstract actor class to regularly poll a remote service.
+
+    This actor sends regular requests to a remote service, and sends each
+    response to subscribers.  It takes care of error handling, and retrying
+    requests with exponential backoff.
+
+    To use this actor, define the _send_request method.  If you also
+    define an _item_key method, this class will support subscribing to
+    a specific item by key in responses.
+    """
+    def __init__(self, client, timer_actor, poll_wait=60, max_poll_wait=180):
+        super(RemotePollLoopActor, self).__init__()
+        self._client = client
+        self._timer = timer_actor
+        self._later = self.actor_ref.tell_proxy()
+        self._polling_started = False
+        self.min_poll_wait = poll_wait
+        self.max_poll_wait = max_poll_wait
+        self.poll_wait = self.min_poll_wait
+        self.all_subscribers = set()
+        self.key_subscribers = {}
+        if hasattr(self, '_item_key'):
+            self.subscribe_to = self._subscribe_to
+
+    def on_start(self):
+        self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, id(self.actor_urn[9:])))
+
+    def _start_polling(self):
+        if not self._polling_started:
+            self._polling_started = True
+            self._later.poll()
+
+    def subscribe(self, subscriber):
+        self.all_subscribers.add(subscriber)
+        self._logger.debug("%s subscribed to all events", subscriber.actor_ref.actor_urn)
+        self._start_polling()
+
+    # __init__ exposes this method to the proxy if the subclass defines
+    # _item_key.
+    def _subscribe_to(self, key, subscriber):
+        self.key_subscribers.setdefault(key, set()).add(subscriber)
+        self._logger.debug("%s subscribed to events for '%s'", subscriber.actor_ref.actor_urn, key)
+        self._start_polling()
+
+    def _send_request(self):
+        raise NotImplementedError("subclasses must implement request method")
+
+    def _got_response(self, response):
+        self.poll_wait = self.min_poll_wait
+        _notify_subscribers(response, self.all_subscribers)
+        if hasattr(self, '_item_key'):
+            items = {self._item_key(x): x for x in response}
+            for key, subscribers in self.key_subscribers.iteritems():
+                _notify_subscribers(items.get(key), subscribers)
+
+    def _got_error(self, error):
+        self.poll_wait = min(self.poll_wait * 2, self.max_poll_wait)
+        return "got error: {} - will try again in {} seconds".format(
+            error, self.poll_wait)
+
+    def is_common_error(self, exception):
+        return False
+
+    def poll(self, scheduled_start=None):
+        self._logger.debug("sending request")
+        start_time = time.time()
+        if scheduled_start is None:
+            scheduled_start = start_time
+        try:
+            response = self._send_request()
+        except Exception as error:
+            errmsg = self._got_error(error)
+            if self.is_common_error(error):
+                self._logger.warning(errmsg)
+            else:
+                self._logger.exception(errmsg)
+            next_poll = start_time + self.poll_wait
+        else:
+            self._got_response(response)
+            next_poll = scheduled_start + self.poll_wait
+            self._logger.info("got response with %d items in %s seconds, next poll at %s",
+                              len(response), (time.time() - scheduled_start),
+                              time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(next_poll)))
+        end_time = time.time()
+        if next_poll < end_time:  # We've drifted too much; start fresh.
+            next_poll = end_time + self.poll_wait
+        self._timer.schedule(next_poll, self._later.poll, next_poll)
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
new file mode 100644 (file)
index 0000000..b124c66
--- /dev/null
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import calendar
+import functools
+import itertools
+import re
+import time
+
+from ..config import CLOUD_ERRORS
+from ..status import tracker
+from libcloud.common.exceptions import BaseHTTPError, RateLimitReachedError
+
+ARVADOS_TIMEFMT = '%Y-%m-%dT%H:%M:%SZ'
+ARVADOS_TIMESUBSEC_RE = re.compile(r'(\.\d+)Z$')
+
+def arvados_node_fqdn(arvados_node, default_hostname='dynamic.compute'):
+    hostname = arvados_node.get('hostname') or default_hostname
+    return '{}.{}'.format(hostname, arvados_node['domain'])
+
+def arvados_node_mtime(node):
+    return arvados_timestamp(node['modified_at'])
+
+def arvados_timestamp(timestr):
+    subsec_match = ARVADOS_TIMESUBSEC_RE.search(timestr)
+    if subsec_match is None:
+        subsecs = .0
+    else:
+        subsecs = float(subsec_match.group(1))
+        timestr = timestr[:subsec_match.start()] + 'Z'
+    return calendar.timegm(time.strptime(timestr + 'UTC',
+                                         ARVADOS_TIMEFMT + '%Z')) + subsecs
+
+def timestamp_fresh(timestamp, fresh_time):
+    return (time.time() - timestamp) < fresh_time
+
+def arvados_node_missing(arvados_node, fresh_time):
+    """Indicate if cloud node corresponding to the arvados
+    node is "missing".
+
+    If True, this means the node has not pinged the API server within the timeout
+    period.  If False, the ping is up to date.  If the node has never pinged,
+    returns None.
+    """
+    if arvados_node["last_ping_at"] is None:
+        return None
+    else:
+        return not timestamp_fresh(arvados_timestamp(arvados_node["last_ping_at"]), fresh_time)
+
+class RetryMixin(object):
+    """Retry decorator for an method that makes remote requests.
+
+    Use this function to decorate method, and pass in a tuple of exceptions to
+    catch.  If the original method raises a known cloud driver error, or any of
+    the given exception types, this decorator will either go into a
+    sleep-and-retry loop with exponential backoff either by sleeping (if
+    self._timer is None) or by scheduling retries of the method (if self._timer
+    is a timer actor.)
+
+    """
+    def __init__(self, retry_wait, max_retry_wait, logger, cloud, timer=None):
+        self.min_retry_wait = max(1, retry_wait)
+        self.max_retry_wait = max(self.min_retry_wait, max_retry_wait)
+        self.retry_wait = retry_wait
+        self._logger = logger
+        self._cloud = cloud
+        self._timer = timer
+
+    @staticmethod
+    def _retry(errors=()):
+        def decorator(orig_func):
+            @functools.wraps(orig_func)
+            def retry_wrapper(self, *args, **kwargs):
+                while True:
+                    should_retry = False
+                    try:
+                        ret = orig_func(self, *args, **kwargs)
+                    except RateLimitReachedError as error:
+                        # If retry-after is zero, continue with exponential
+                        # backoff.
+                        if error.retry_after != 0:
+                            self.retry_wait = error.retry_after
+                        should_retry = True
+                    except BaseHTTPError as error:
+                        if error.headers and error.headers.get("retry-after"):
+                            try:
+                                retry_after = int(error.headers["retry-after"])
+                                # If retry-after is zero, continue with
+                                # exponential backoff.
+                                if retry_after != 0:
+                                    self.retry_wait = retry_after
+                                should_retry = True
+                            except ValueError:
+                                self._logger.warning(
+                                    "Unrecognizable Retry-After header: %r",
+                                    error.headers["retry-after"],
+                                    exc_info=error)
+                        if error.code == 429 or error.code >= 500:
+                            should_retry = True
+                    except CLOUD_ERRORS as error:
+                        tracker.counter_add('cloud_errors')
+                        should_retry = True
+                    except errors as error:
+                        should_retry = True
+                    except Exception as error:
+                        # As a libcloud workaround for drivers that don't use
+                        # typed exceptions, consider bare Exception() objects
+                        # retryable.
+                        if type(error) is Exception:
+                            tracker.counter_add('cloud_errors')
+                            should_retry = True
+                    else:
+                        # No exception
+                        self.retry_wait = self.min_retry_wait
+                        return ret
+
+                    # Only got here if an exception was caught.  Now determine what to do about it.
+                    if not should_retry:
+                        self.retry_wait = self.min_retry_wait
+                        self._logger.warning(
+                            "Re-raising error (no retry): %s",
+                            error, exc_info=error)
+                        raise
+
+                    # Retry wait out of bounds?
+                    if self.retry_wait < self.min_retry_wait:
+                        self.retry_wait = self.min_retry_wait
+                    elif self.retry_wait > self.max_retry_wait:
+                        self.retry_wait = self.max_retry_wait
+
+                    self._logger.warning(
+                        "Client error: %s - %s %s seconds",
+                        error,
+                        "scheduling retry in" if self._timer else "sleeping",
+                        self.retry_wait,
+                        exc_info=error)
+
+                    if self._timer:
+                        start_time = time.time()
+                        # reschedule to be called again
+                        self._timer.schedule(start_time + self.retry_wait,
+                                             getattr(self._later,
+                                                     orig_func.__name__),
+                                             *args, **kwargs)
+                    else:
+                        # sleep on it.
+                        time.sleep(self.retry_wait)
+
+                    self.retry_wait = min(self.retry_wait * 2,
+                                          self.max_retry_wait)
+                    if self._timer:
+                        # expect to be called again by timer so don't loop
+                        return
+
+            return retry_wrapper
+        return decorator
+
+class ShutdownTimer(object):
+    """Keep track of a cloud node's shutdown windows.
+
+    Instantiate this class with a timestamp of when a cloud node started,
+    and a list of durations (in minutes) of when the node must not and may
+    be shut down, alternating.  The class will tell you when a shutdown
+    window is open, and when the next open window will start.
+    """
+    def __init__(self, start_time, shutdown_windows):
+        # The implementation is easiest if we have an even number of windows,
+        # because then windows always alternate between open and closed.
+        # Rig that up: calculate the first shutdown window based on what's
+        # passed in.  Then, if we were given an odd number of windows, merge
+        # that first window into the last one, since they both# represent
+        # closed state.
+        first_window = shutdown_windows[0]
+        shutdown_windows = list(shutdown_windows[1:])
+        self._next_opening = start_time + (60 * first_window)
+        if len(shutdown_windows) % 2:
+            shutdown_windows.append(first_window)
+        else:
+            shutdown_windows[-1] += first_window
+        self.shutdown_windows = itertools.cycle([60 * n
+                                                 for n in shutdown_windows])
+        self._open_start = self._next_opening
+        self._open_for = next(self.shutdown_windows)
+
+    def _advance_opening(self):
+        while self._next_opening < time.time():
+            self._open_start = self._next_opening
+            self._next_opening += self._open_for + next(self.shutdown_windows)
+            self._open_for = next(self.shutdown_windows)
+
+    def next_opening(self):
+        self._advance_opening()
+        return self._next_opening
+
+    def window_open(self):
+        self._advance_opening()
+        return 0 < (time.time() - self._open_start) < self._open_for
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
new file mode 100644 (file)
index 0000000..77c515d
--- /dev/null
@@ -0,0 +1,536 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import functools
+import logging
+import time
+import re
+
+import libcloud.common.types as cloud_types
+from libcloud.common.exceptions import BaseHTTPError
+
+import pykka
+
+from .. import \
+    arvados_node_fqdn, arvados_node_mtime, arvados_timestamp, timestamp_fresh, \
+    arvados_node_missing, RetryMixin
+from ...clientactor import _notify_subscribers
+from ... import config
+from ... import status
+from .transitions import transitions
+
+QuotaExceeded = "QuotaExceeded"
+
+class ComputeNodeStateChangeBase(config.actor_class, RetryMixin):
+    """Base class for actors that change a compute node's state.
+
+    This base class takes care of retrying changes and notifying
+    subscribers when the change is finished.
+    """
+    def __init__(self, cloud_client, arvados_client, timer_actor,
+                 retry_wait, max_retry_wait):
+        super(ComputeNodeStateChangeBase, self).__init__()
+        RetryMixin.__init__(self, retry_wait, max_retry_wait,
+                            None, cloud_client, timer_actor)
+        self._later = self.actor_ref.tell_proxy()
+        self._arvados = arvados_client
+        self.subscribers = set()
+
+    def _set_logger(self):
+        self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
+
+    def on_start(self):
+        self._set_logger()
+
+    def _finished(self):
+        if self.subscribers is None:
+            raise Exception("Actor tried to finish twice")
+        _notify_subscribers(self.actor_ref.proxy(), self.subscribers)
+        self.subscribers = None
+        self._logger.info("finished")
+
+    def subscribe(self, subscriber):
+        if self.subscribers is None:
+            try:
+                subscriber(self.actor_ref.proxy())
+            except pykka.ActorDeadError:
+                pass
+        else:
+            self.subscribers.add(subscriber)
+
+    def _clean_arvados_node(self, arvados_node, explanation):
+        return self._arvados.nodes().update(
+            uuid=arvados_node['uuid'],
+            body={'hostname': None,
+                  'ip_address': None,
+                  'slot_number': None,
+                  'first_ping_at': None,
+                  'last_ping_at': None,
+                  'properties': {},
+                  'info': {'ec2_instance_id': None,
+                           'last_action': explanation}},
+            ).execute()
+
+    @staticmethod
+    def _finish_on_exception(orig_func):
+        @functools.wraps(orig_func)
+        def finish_wrapper(self, *args, **kwargs):
+            try:
+                return orig_func(self, *args, **kwargs)
+            except Exception as error:
+                self._logger.error("Actor error %s", error)
+                self._finished()
+        return finish_wrapper
+
+
+class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
+    """Actor to create and set up a cloud compute node.
+
+    This actor prepares an Arvados node record for a new compute node
+    (either creating one or cleaning one passed in), then boots the
+    actual compute node.  It notifies subscribers when the cloud node
+    is successfully created (the last step in the process for Node
+    Manager to handle).
+    """
+    def __init__(self, timer_actor, arvados_client, cloud_client,
+                 cloud_size, arvados_node=None,
+                 retry_wait=1, max_retry_wait=180):
+        super(ComputeNodeSetupActor, self).__init__(
+            cloud_client, arvados_client, timer_actor,
+            retry_wait, max_retry_wait)
+        self.cloud_size = cloud_size
+        self.arvados_node = None
+        self.cloud_node = None
+        self.error = None
+        if arvados_node is None:
+            self._later.create_arvados_node()
+        else:
+            self._later.prepare_arvados_node(arvados_node)
+
+    @ComputeNodeStateChangeBase._finish_on_exception
+    @RetryMixin._retry(config.ARVADOS_ERRORS)
+    def create_arvados_node(self):
+        self.arvados_node = self._arvados.nodes().create(
+            body={}, assign_slot=True).execute()
+        self._later.create_cloud_node()
+
+    @ComputeNodeStateChangeBase._finish_on_exception
+    @RetryMixin._retry(config.ARVADOS_ERRORS)
+    def prepare_arvados_node(self, node):
+        self._clean_arvados_node(node, "Prepared by Node Manager")
+        self.arvados_node = self._arvados.nodes().update(
+            uuid=node['uuid'], body={}, assign_slot=True).execute()
+        self._later.create_cloud_node()
+
+    @ComputeNodeStateChangeBase._finish_on_exception
+    @RetryMixin._retry()
+    def create_cloud_node(self):
+        self._logger.info("Sending create_node request for node size %s.",
+                          self.cloud_size.id)
+        try:
+            self.cloud_node = self._cloud.create_node(self.cloud_size,
+                                                      self.arvados_node)
+        except BaseHTTPError as e:
+            if e.code == 429 or "RequestLimitExceeded" in e.message:
+                # Don't consider API rate limits to be quota errors.
+                # re-raise so the Retry logic applies.
+                raise
+
+            # The set of possible error codes / messages isn't documented for
+            # all clouds, so use a keyword heuristic to determine if the
+            # failure is likely due to a quota.
+            if re.search(r'(exceed|quota|limit)', e.message, re.I):
+                self.error = QuotaExceeded
+                self._logger.warning("Quota exceeded: %s", e)
+                self._finished()
+                return
+            else:
+                # Something else happened, re-raise so the Retry logic applies.
+                raise
+        except Exception as e:
+            raise
+
+        # The information included in the node size object we get from libcloud
+        # is inconsistent between cloud drivers.  Replace libcloud NodeSize
+        # object with compatible CloudSizeWrapper object which merges the size
+        # info reported from the cloud with size information from the
+        # configuration file.
+        self.cloud_node.size = self.cloud_size
+
+        self._logger.info("Cloud node %s created.", self.cloud_node.id)
+        self._later.update_arvados_node_properties()
+
+    @ComputeNodeStateChangeBase._finish_on_exception
+    @RetryMixin._retry(config.ARVADOS_ERRORS)
+    def update_arvados_node_properties(self):
+        """Tell Arvados some details about the cloud node.
+
+        Currently we only include size/price from our request, which
+        we already knew before create_cloud_node(), but doing it here
+        gives us an opportunity to provide more detail from
+        self.cloud_node, too.
+        """
+        self.arvados_node['properties']['cloud_node'] = {
+            # Note this 'size' is the node size we asked the cloud
+            # driver to create -- not necessarily equal to the size
+            # reported by the cloud driver for the node that was
+            # created.
+            'size': self.cloud_size.id,
+            'price': self.cloud_size.price,
+        }
+        self.arvados_node = self._arvados.nodes().update(
+            uuid=self.arvados_node['uuid'],
+            body={'properties': self.arvados_node['properties']},
+        ).execute()
+        self._logger.info("%s updated properties.", self.arvados_node['uuid'])
+        self._later.post_create()
+
+    @RetryMixin._retry()
+    def post_create(self):
+        self._cloud.post_create_node(self.cloud_node)
+        self._logger.info("%s post-create work done.", self.cloud_node.id)
+        self._finished()
+
+    def stop_if_no_cloud_node(self):
+        if self.cloud_node is not None:
+            return False
+        self.stop()
+        return True
+
+
+class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
+    """Actor to shut down a compute node.
+
+    This actor simply destroys a cloud node, retrying as needed.
+    """
+    # Reasons for a shutdown to be cancelled.
+    WINDOW_CLOSED = "shutdown window closed"
+    DESTROY_FAILED = "destroy_node failed"
+
+    def __init__(self, timer_actor, cloud_client, arvados_client, node_monitor,
+                 cancellable=True, retry_wait=1, max_retry_wait=180):
+        # If a ShutdownActor is cancellable, it will ask the
+        # ComputeNodeMonitorActor if it's still eligible before taking each
+        # action, and stop the shutdown process if the node is no longer
+        # eligible.  Normal shutdowns based on job demand should be
+        # cancellable; shutdowns based on node misbehavior should not.
+        super(ComputeNodeShutdownActor, self).__init__(
+            cloud_client, arvados_client, timer_actor,
+            retry_wait, max_retry_wait)
+        self._monitor = node_monitor.proxy()
+        self.cloud_node = self._monitor.cloud_node.get()
+        self.cancellable = cancellable
+        self.cancel_reason = None
+        self.success = None
+
+    def _set_logger(self):
+        self._logger = logging.getLogger("%s.%s.%s" % (self.__class__.__name__, self.actor_urn[33:], self.cloud_node.name))
+
+    def on_start(self):
+        super(ComputeNodeShutdownActor, self).on_start()
+        self._later.shutdown_node()
+
+    def _arvados_node(self):
+        return self._monitor.arvados_node.get()
+
+    def _finished(self, success_flag=None):
+        if success_flag is not None:
+            self.success = success_flag
+        return super(ComputeNodeShutdownActor, self)._finished()
+
+    def cancel_shutdown(self, reason, **kwargs):
+        if not self.cancellable:
+            return False
+        if self.cancel_reason is not None:
+            # already cancelled
+            return False
+        self.cancel_reason = reason
+        self._logger.info("Shutdown cancelled: %s.", reason)
+        self._finished(success_flag=False)
+        return True
+
+    def _cancel_on_exception(orig_func):
+        @functools.wraps(orig_func)
+        def finish_wrapper(self, *args, **kwargs):
+            try:
+                return orig_func(self, *args, **kwargs)
+            except Exception as error:
+                self._logger.error("Actor error %s", error)
+                self._logger.debug("", exc_info=True)
+                self._later.cancel_shutdown("Unhandled exception %s" % error, try_resume=False)
+        return finish_wrapper
+
+    @_cancel_on_exception
+    def shutdown_node(self):
+        if self.cancel_reason is not None:
+            # already cancelled
+            return
+        if self.cancellable:
+            self._logger.info("Checking that node is still eligible for shutdown")
+            eligible, reason = self._monitor.shutdown_eligible().get()
+            if not eligible:
+                self.cancel_shutdown("No longer eligible for shut down because %s" % reason,
+                                     try_resume=True)
+                return
+        # If boot failed, count the event
+        if self._monitor.get_state().get() == 'unpaired':
+            status.tracker.counter_add('boot_failures')
+        self._destroy_node()
+
+    def _destroy_node(self):
+        self._logger.info("Starting shutdown")
+        arv_node = self._arvados_node()
+        if self._cloud.destroy_node(self.cloud_node):
+            self.cancellable = False
+            self._logger.info("Shutdown success")
+            if arv_node:
+                self._later.clean_arvados_node(arv_node)
+            else:
+                self._finished(success_flag=True)
+        else:
+            self.cancel_shutdown(self.DESTROY_FAILED, try_resume=False)
+
+    @ComputeNodeStateChangeBase._finish_on_exception
+    @RetryMixin._retry(config.ARVADOS_ERRORS)
+    def clean_arvados_node(self, arvados_node):
+        self._clean_arvados_node(arvados_node, "Shut down by Node Manager")
+        self._finished(success_flag=True)
+
+
+class ComputeNodeUpdateActor(config.actor_class, RetryMixin):
+    """Actor to dispatch one-off cloud management requests.
+
+    This actor receives requests for small cloud updates, and
+    dispatches them to a real driver.  ComputeNodeMonitorActors use
+    this to perform maintenance tasks on themselves.  Having a
+    dedicated actor for this gives us the opportunity to control the
+    flow of requests; e.g., by backing off when errors occur.
+    """
+    def __init__(self, cloud_factory, timer_actor, max_retry_wait=180):
+        super(ComputeNodeUpdateActor, self).__init__()
+        RetryMixin.__init__(self, 1, max_retry_wait,
+                            None, cloud_factory(), timer_actor)
+        self._cloud = cloud_factory()
+        self._later = self.actor_ref.tell_proxy()
+
+    def _set_logger(self):
+        self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
+
+    def on_start(self):
+        self._set_logger()
+
+    @RetryMixin._retry()
+    def sync_node(self, cloud_node, arvados_node):
+        if self._cloud.node_fqdn(cloud_node) != arvados_node_fqdn(arvados_node):
+            return self._cloud.sync_node(cloud_node, arvados_node)
+
+
+class ComputeNodeMonitorActor(config.actor_class):
+    """Actor to manage a running compute node.
+
+    This actor gets updates about a compute node's cloud and Arvados records.
+    It uses this information to notify subscribers when the node is eligible
+    for shutdown.
+    """
+    def __init__(self, cloud_node, cloud_node_start_time, shutdown_timer,
+                 timer_actor, update_actor, cloud_client,
+                 arvados_node=None, poll_stale_after=600, node_stale_after=3600,
+                 boot_fail_after=1800, consecutive_idle_count=0
+    ):
+        super(ComputeNodeMonitorActor, self).__init__()
+        self._later = self.actor_ref.tell_proxy()
+        self._shutdowns = shutdown_timer
+        self._timer = timer_actor
+        self._update = update_actor
+        self._cloud = cloud_client
+        self.cloud_node = cloud_node
+        self.cloud_node_start_time = cloud_node_start_time
+        self.poll_stale_after = poll_stale_after
+        self.node_stale_after = node_stale_after
+        self.boot_fail_after = boot_fail_after
+        self.subscribers = set()
+        self.arvados_node = None
+        self.consecutive_idle_count = consecutive_idle_count
+        self.consecutive_idle = 0
+        self._later.update_arvados_node(arvados_node)
+        self.last_shutdown_opening = None
+        self._later.consider_shutdown()
+
+    def _set_logger(self):
+        self._logger = logging.getLogger("%s.%s.%s" % (self.__class__.__name__, self.actor_urn[33:], self.cloud_node.name))
+
+    def on_start(self):
+        self._set_logger()
+        self._timer.schedule(self.cloud_node_start_time + self.boot_fail_after, self._later.consider_shutdown)
+
+    def subscribe(self, subscriber):
+        self.subscribers.add(subscriber)
+
+    def _debug(self, msg, *args):
+        self._logger.debug(msg, *args)
+
+    def get_state(self):
+        """Get node state, one of ['unpaired', 'busy', 'idle', 'down']."""
+
+        # If this node is not associated with an Arvados node, return
+        # 'unpaired' if we're in the boot grace period, and 'down' if not,
+        # so it isn't counted towards usable nodes.
+        if self.arvados_node is None:
+            if timestamp_fresh(self.cloud_node_start_time,
+                               self.boot_fail_after):
+                return 'unpaired'
+            else:
+                return 'down'
+
+        state = self.arvados_node['crunch_worker_state']
+
+        # If state information is not available because it is missing or the
+        # record is stale, return 'down'.
+        if not state or not timestamp_fresh(arvados_node_mtime(self.arvados_node),
+                                            self.node_stale_after):
+            state = 'down'
+
+        # There's a window between when a node pings for the first time and the
+        # value of 'slurm_state' is synchronized by crunch-dispatch.  In this
+        # window, the node will still report as 'down'.  Check that
+        # first_ping_at is truthy and consider the node 'idle' during the
+        # initial boot grace period.
+        if (state == 'down' and
+            self.arvados_node['first_ping_at'] and
+            timestamp_fresh(self.cloud_node_start_time,
+                            self.boot_fail_after) and
+            not self._cloud.broken(self.cloud_node)):
+            state = 'idle'
+
+        # "missing" means last_ping_at is stale, this should be
+        # considered "down"
+        if arvados_node_missing(self.arvados_node, self.node_stale_after):
+            state = 'down'
+
+        # Turns out using 'job_uuid' this way is a bad idea.  The node record
+        # is assigned the job_uuid before the job is locked (which removes it
+        # from the queue) which means the job will be double-counted as both in
+        # the wishlist and but also keeping a node busy.  This end result is
+        # excess nodes being booted.
+        #if state == 'idle' and self.arvados_node['job_uuid']:
+        #    state = 'busy'
+
+        # Update idle node times tracker
+        if state == 'idle':
+            status.tracker.idle_in(self.arvados_node['hostname'])
+        else:
+            status.tracker.idle_out(self.arvados_node['hostname'])
+
+        return state
+
+    def in_state(self, *states):
+        return self.get_state() in states
+
+    def shutdown_eligible(self):
+        """Determine if node is candidate for shut down.
+
+        Returns a tuple of (boolean, string) where the first value is whether
+        the node is candidate for shut down, and the second value is the
+        reason for the decision.
+        """
+
+        # If this node's size is invalid (because it has a stale arvados_node_size
+        # tag), return True so that it's properly shut down.
+        if self.cloud_node.size.id == 'invalid':
+            return (True, "node's size tag '%s' not recognizable" % (self.cloud_node.extra['arvados_node_size'],))
+
+        # Collect states and then consult state transition table whether we
+        # should shut down.  Possible states are:
+        # crunch_worker_state = ['unpaired', 'busy', 'idle', 'down']
+        # window = ["open", "closed"]
+        # boot_grace = ["boot wait", "boot exceeded"]
+        # idle_grace = ["not idle", "idle wait", "idle exceeded"]
+
+        if self.arvados_node and not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after):
+            return (False, "node state is stale")
+
+        crunch_worker_state = self.get_state()
+
+        window = "open" if self._shutdowns.window_open() else "closed"
+
+        if timestamp_fresh(self.cloud_node_start_time, self.boot_fail_after):
+            boot_grace = "boot wait"
+        else:
+            boot_grace = "boot exceeded"
+
+        if crunch_worker_state == "idle":
+            # Must report as "idle" at least "consecutive_idle_count" times
+            if self.consecutive_idle < self.consecutive_idle_count:
+                idle_grace = 'idle wait'
+            else:
+                idle_grace = 'idle exceeded'
+        else:
+            idle_grace = 'not idle'
+
+        node_state = (crunch_worker_state, window, boot_grace, idle_grace)
+        t = transitions[node_state]
+        if t is not None:
+            # yes, shutdown eligible
+            return (True, "node state is %s" % (node_state,))
+        else:
+            # no, return a reason
+            return (False, "node state is %s" % (node_state,))
+
+    def consider_shutdown(self):
+        try:
+            eligible, reason = self.shutdown_eligible()
+            next_opening = self._shutdowns.next_opening()
+            if eligible:
+                self._debug("Suggesting shutdown because %s", reason)
+                _notify_subscribers(self.actor_ref.proxy(), self.subscribers)
+            else:
+                self._debug("Not eligible for shut down because %s", reason)
+
+                if self.last_shutdown_opening != next_opening:
+                    self._debug("Shutdown window closed.  Next at %s.",
+                                time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(next_opening)))
+                    self._timer.schedule(next_opening, self._later.consider_shutdown)
+                    self.last_shutdown_opening = next_opening
+        except Exception:
+            self._logger.exception("Unexpected exception")
+
+    def offer_arvados_pair(self, arvados_node):
+        first_ping_s = arvados_node.get('first_ping_at')
+        if (self.arvados_node is not None) or (not first_ping_s):
+            return None
+        elif ((arvados_node['info'].get('ec2_instance_id') == self._cloud.node_id(self.cloud_node)) and
+              (arvados_timestamp(first_ping_s) >= self.cloud_node_start_time)):
+            self._later.update_arvados_node(arvados_node)
+            return self.cloud_node.id
+        else:
+            return None
+
+    def update_cloud_node(self, cloud_node):
+        if cloud_node is not None:
+            self.cloud_node = cloud_node
+            self._later.consider_shutdown()
+
+    def update_arvados_node(self, arvados_node):
+        """Called when the latest Arvados node record is retrieved.
+
+        Calls the updater's sync_node() method.
+
+        """
+        # This method is a little unusual in the way it just fires off the
+        # request without checking the result or retrying errors.  That's
+        # because this update happens every time we reload the Arvados node
+        # list: if a previous sync attempt failed, we'll see that the names
+        # are out of sync and just try again.  ComputeNodeUpdateActor has
+        # the logic to throttle those effective retries when there's trouble.
+        if arvados_node is not None:
+            self.arvados_node = arvados_node
+            self._update.sync_node(self.cloud_node, self.arvados_node)
+            if self.arvados_node['crunch_worker_state'] == "idle":
+                self.consecutive_idle += 1
+            else:
+                self.consecutive_idle = 0
+            self._later.consider_shutdown()
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
new file mode 100644 (file)
index 0000000..5b7785a
--- /dev/null
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import subprocess32 as subprocess
+import time
+
+from . import ComputeNodeMonitorActor
+from . import ComputeNodeSetupActor as SetupActorBase
+from . import ComputeNodeShutdownActor as ShutdownActorBase
+from . import ComputeNodeUpdateActor as UpdateActorBase
+from .. import RetryMixin
+
+class SlurmMixin(object):
+    SLURM_END_STATES = frozenset(['down\n', 'down*\n',
+                                  'drain\n', 'drain*\n',
+                                  'fail\n', 'fail*\n'])
+    SLURM_DRAIN_STATES = frozenset(['drain\n', 'drng\n'])
+
+    def _update_slurm_node(self, nodename, updates):
+        cmd = ['scontrol', 'update', 'NodeName=' + nodename] + updates
+        try:
+            subprocess.check_output(cmd)
+        except:
+            self._logger.error(
+                "SLURM update %r failed", cmd, exc_info=True)
+
+    def _update_slurm_size_attrs(self, nodename, size):
+        self._update_slurm_node(nodename, [
+            'Weight=%i' % int(size.price * 1000),
+            'Features=instancetype=' + size.id,
+        ])
+
+    def _get_slurm_state(self, nodename):
+        return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', nodename])
+
+
+class ComputeNodeSetupActor(SlurmMixin, SetupActorBase):
+    def create_cloud_node(self):
+        hostname = self.arvados_node.get("hostname")
+        if hostname:
+            self._update_slurm_size_attrs(hostname, self.cloud_size)
+        return super(ComputeNodeSetupActor, self).create_cloud_node()
+
+
+class ComputeNodeShutdownActor(SlurmMixin, ShutdownActorBase):
+    def on_start(self):
+        arv_node = self._arvados_node()
+        if arv_node is None:
+            self._nodename = None
+            return super(ComputeNodeShutdownActor, self).on_start()
+        else:
+            self._set_logger()
+            self._nodename = arv_node['hostname']
+            self._logger.info("Draining SLURM node %s", self._nodename)
+            self._later.issue_slurm_drain()
+
+    @RetryMixin._retry((subprocess.CalledProcessError, OSError))
+    def cancel_shutdown(self, reason, try_resume=True):
+        if self._nodename:
+            if try_resume and self._get_slurm_state(self._nodename) in self.SLURM_DRAIN_STATES:
+                # Resume from "drng" or "drain"
+                self._update_slurm_node(self._nodename, ['State=RESUME'])
+            else:
+                # Node is in a state such as 'idle' or 'alloc' so don't
+                # try to resume it because that will just raise an error.
+                pass
+        return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason)
+
+    @RetryMixin._retry((subprocess.CalledProcessError, OSError))
+    def issue_slurm_drain(self):
+        if self.cancel_reason is not None:
+            return
+        if self._nodename:
+            self._update_slurm_node(self._nodename, [
+                'State=DRAIN', 'Reason=Node Manager shutdown'])
+            self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
+            self._later.await_slurm_drain()
+        else:
+            self._later.shutdown_node()
+
+    @RetryMixin._retry((subprocess.CalledProcessError, OSError))
+    def await_slurm_drain(self):
+        if self.cancel_reason is not None:
+            return
+        output = self._get_slurm_state(self._nodename)
+        if output in ("drng\n", "alloc\n", "drng*\n", "alloc*\n"):
+            self._timer.schedule(time.time() + 10,
+                                 self._later.await_slurm_drain)
+        elif output in ("idle\n",):
+            # Not in "drng" but idle, don't shut down
+            self.cancel_shutdown("slurm state is %s" % output.strip(), try_resume=False)
+        else:
+            # any other state.
+            self._later.shutdown_node()
+
+    def _destroy_node(self):
+        if self._nodename:
+            self._update_slurm_node(self._nodename, [
+                'State=DOWN', 'Reason=Node Manager shutdown'])
+        super(ComputeNodeShutdownActor, self)._destroy_node()
+
+
+class ComputeNodeUpdateActor(SlurmMixin, UpdateActorBase):
+    def sync_node(self, cloud_node, arvados_node):
+        """Keep SLURM's node properties up to date."""
+        hostname = arvados_node.get("hostname")
+        features = arvados_node.get("slurm_node_features", "").split(",")
+        sizefeature = "instancetype=" + cloud_node.size.id
+        if hostname and sizefeature not in features:
+            # This probably means SLURM has restarted and lost our
+            # dynamically configured node weights and features.
+            self._update_slurm_size_attrs(hostname, cloud_node.size)
+        return super(ComputeNodeUpdateActor, self).sync_node(
+            cloud_node, arvados_node)
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/transitions.py b/services/nodemanager/arvnodeman/computenode/dispatch/transitions.py
new file mode 100644 (file)
index 0000000..93f50c1
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+transitions = {
+ ('busy', 'closed', 'boot exceeded', 'idle exceeded'): None,
+ ('busy', 'closed', 'boot exceeded', 'idle wait'): None,
+ ('busy', 'closed', 'boot exceeded', 'not idle'): None,
+ ('busy', 'closed', 'boot wait', 'idle exceeded'): None,
+ ('busy', 'closed', 'boot wait', 'idle wait'): None,
+ ('busy', 'closed', 'boot wait', 'not idle'): None,
+ ('busy', 'open', 'boot exceeded', 'idle exceeded'): None,
+ ('busy', 'open', 'boot exceeded', 'idle wait'): None,
+ ('busy', 'open', 'boot exceeded', 'not idle'): None,
+ ('busy', 'open', 'boot wait', 'idle exceeded'): None,
+ ('busy', 'open', 'boot wait', 'idle wait'): None,
+ ('busy', 'open', 'boot wait', 'not idle'): None,
+
+ ('down', 'closed', 'boot exceeded', 'idle exceeded'): "START_SHUTDOWN",
+ ('down', 'closed', 'boot exceeded', 'idle wait'): "START_SHUTDOWN",
+ ('down', 'closed', 'boot exceeded', 'not idle'): "START_SHUTDOWN",
+ ('down', 'closed', 'boot wait', 'idle exceeded'): None,
+ ('down', 'closed', 'boot wait', 'idle wait'): None,
+ ('down', 'closed', 'boot wait', 'not idle'): None,
+ ('down', 'open', 'boot exceeded', 'idle exceeded'): "START_SHUTDOWN",
+ ('down', 'open', 'boot exceeded', 'idle wait'): "START_SHUTDOWN",
+ ('down', 'open', 'boot exceeded', 'not idle'): "START_SHUTDOWN",
+ ('down', 'open', 'boot wait', 'idle exceeded'): "START_SHUTDOWN",
+ ('down', 'open', 'boot wait', 'idle wait'): "START_SHUTDOWN",
+ ('down', 'open', 'boot wait', 'not idle'): "START_SHUTDOWN",
+
+ ('idle', 'closed', 'boot exceeded', 'idle exceeded'): None,
+ ('idle', 'closed', 'boot exceeded', 'idle wait'): None,
+ ('idle', 'closed', 'boot exceeded', 'not idle'): None,
+ ('idle', 'closed', 'boot wait', 'idle exceeded'): None,
+ ('idle', 'closed', 'boot wait', 'idle wait'): None,
+ ('idle', 'closed', 'boot wait', 'not idle'): None,
+ ('idle', 'open', 'boot exceeded', 'idle exceeded'): "START_DRAIN",
+ ('idle', 'open', 'boot exceeded', 'idle wait'): None,
+ ('idle', 'open', 'boot exceeded', 'not idle'): None,
+ ('idle', 'open', 'boot wait', 'idle exceeded'): "START_DRAIN",
+ ('idle', 'open', 'boot wait', 'idle wait'): None,
+ ('idle', 'open', 'boot wait', 'not idle'): None,
+
+ ('unpaired', 'closed', 'boot exceeded', 'idle exceeded'): "START_SHUTDOWN",
+ ('unpaired', 'closed', 'boot exceeded', 'idle wait'): "START_SHUTDOWN",
+ ('unpaired', 'closed', 'boot exceeded', 'not idle'): "START_SHUTDOWN",
+ ('unpaired', 'closed', 'boot wait', 'idle exceeded'): None,
+ ('unpaired', 'closed', 'boot wait', 'idle wait'): None,
+ ('unpaired', 'closed', 'boot wait', 'not idle'): None,
+ ('unpaired', 'open', 'boot exceeded', 'idle exceeded'): "START_SHUTDOWN",
+ ('unpaired', 'open', 'boot exceeded', 'idle wait'): "START_SHUTDOWN",
+ ('unpaired', 'open', 'boot exceeded', 'not idle'): "START_SHUTDOWN",
+ ('unpaired', 'open', 'boot wait', 'idle exceeded'): None,
+ ('unpaired', 'open', 'boot wait', 'idle wait'): None,
+ ('unpaired', 'open', 'boot wait', 'not idle'): None,
+
+ ('fail', 'closed', 'boot exceeded', 'idle exceeded'): "START_SHUTDOWN",
+ ('fail', 'closed', 'boot exceeded', 'idle wait'): "START_SHUTDOWN",
+ ('fail', 'closed', 'boot exceeded', 'not idle'): "START_SHUTDOWN",
+ ('fail', 'closed', 'boot wait', 'idle exceeded'): "START_SHUTDOWN",
+ ('fail', 'closed', 'boot wait', 'idle wait'): "START_SHUTDOWN",
+ ('fail', 'closed', 'boot wait', 'not idle'): "START_SHUTDOWN",
+ ('fail', 'open', 'boot exceeded', 'idle exceeded'): "START_SHUTDOWN",
+ ('fail', 'open', 'boot exceeded', 'idle wait'): "START_SHUTDOWN",
+ ('fail', 'open', 'boot exceeded', 'not idle'): "START_SHUTDOWN",
+ ('fail', 'open', 'boot wait', 'idle exceeded'): "START_SHUTDOWN",
+ ('fail', 'open', 'boot wait', 'idle wait'): "START_SHUTDOWN",
+ ('fail', 'open', 'boot wait', 'not idle'): "START_SHUTDOWN"}
diff --git a/services/nodemanager/arvnodeman/computenode/driver/__init__.py b/services/nodemanager/arvnodeman/computenode/driver/__init__.py
new file mode 100644 (file)
index 0000000..48d19f5
--- /dev/null
@@ -0,0 +1,253 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import logging
+from operator import attrgetter
+
+import libcloud.common.types as cloud_types
+from libcloud.compute.base import NodeDriver, NodeAuthSSHKey
+
+from ...config import CLOUD_ERRORS
+from ...status import tracker
+from .. import RetryMixin
+
+class BaseComputeNodeDriver(RetryMixin):
+    """Abstract base class for compute node drivers.
+
+    libcloud drivers abstract away many of the differences between
+    cloud providers, but managing compute nodes requires some
+    cloud-specific features (e.g., keeping track of node FQDNs and
+    boot times).  Compute node drivers are responsible for translating
+    the node manager's cloud requests to a specific cloud's
+    vocabulary.
+
+    Subclasses must implement arvados_create_kwargs, sync_node,
+    node_fqdn, and node_start_time.
+    """
+
+
+    @RetryMixin._retry()
+    def _create_driver(self, driver_class, **auth_kwargs):
+        return driver_class(**auth_kwargs)
+
+    @RetryMixin._retry()
+    def sizes(self):
+        if self._sizes is None:
+            self._sizes = {sz.id: sz for sz in self.real.list_sizes()}
+        return self._sizes
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class, retry_wait=1, max_retry_wait=180):
+        """Base initializer for compute node drivers.
+
+        Arguments:
+        * auth_kwargs: A dictionary of arguments that are passed into the
+          driver_class constructor to instantiate a libcloud driver.
+        * list_kwargs: A dictionary of arguments that are passed to the
+          libcloud driver's list_nodes method to return the list of compute
+          nodes.
+        * create_kwargs: A dictionary of arguments that are passed to the
+          libcloud driver's create_node method to create a new compute node.
+        * driver_class: The class of a libcloud driver to use.
+        """
+
+        super(BaseComputeNodeDriver, self).__init__(retry_wait, max_retry_wait,
+                                         logging.getLogger(self.__class__.__name__),
+                                         type(self),
+                                         None)
+        self.real = self._create_driver(driver_class, **auth_kwargs)
+        self.list_kwargs = list_kwargs
+        self.create_kwargs = create_kwargs
+        # Transform entries in create_kwargs.  For each key K, if this class
+        # has an _init_K method, remove the entry and call _init_K with the
+        # corresponding value.  If _init_K returns None, the entry stays out
+        # of the dictionary (we expect we're holding the value somewhere
+        # else, like an instance variable).  Otherwise, _init_K returns a
+        # key-value tuple pair, and we add that entry to create_kwargs.
+        for key in self.create_kwargs.keys():
+            init_method = getattr(self, '_init_' + key, None)
+            if init_method is not None:
+                new_pair = init_method(self.create_kwargs.pop(key))
+                if new_pair is not None:
+                    self.create_kwargs[new_pair[0]] = new_pair[1]
+
+        self._sizes = None
+
+    def _init_ping_host(self, ping_host):
+        self.ping_host = ping_host
+
+    def _init_ssh_key(self, filename):
+        with open(filename) as ssh_file:
+            key = NodeAuthSSHKey(ssh_file.read())
+        return 'auth', key
+
+    def search_for_now(self, term, list_method, key=attrgetter('id'), **kwargs):
+        """Return one matching item from a list of cloud objects.
+
+        Raises ValueError if the number of matching objects is not exactly 1.
+
+        Arguments:
+        * term: The value that identifies a matching item.
+        * list_method: A string that names the method to call for a
+          list of objects.
+        * key: A function that accepts a cloud object and returns a
+          value search for a `term` match on each item.  Returns the
+          object's 'id' attribute by default.
+        """
+        try:
+            list_func = getattr(self, list_method)
+        except AttributeError:
+            list_func = getattr(self.real, list_method)
+        items = list_func(**kwargs)
+        results = [item for item in items if key(item) == term]
+        count = len(results)
+        if count != 1:
+            raise ValueError("{} returned {} results for {!r}".format(
+                    list_method, count, term))
+        return results[0]
+
+    def search_for(self, term, list_method, key=attrgetter('id'), **kwargs):
+        """Return one cached matching item from a list of cloud objects.
+
+        See search_for_now() for details of arguments and exceptions.
+        This method caches results, so it's good to find static cloud objects
+        like node sizes, regions, etc.
+        """
+        cache_key = (list_method, term)
+        if cache_key not in self.SEARCH_CACHE:
+            self.SEARCH_CACHE[cache_key] = self.search_for_now(
+                term, list_method, key, **kwargs)
+        return self.SEARCH_CACHE[cache_key]
+
+    def list_nodes(self, **kwargs):
+        l = self.list_kwargs.copy()
+        l.update(kwargs)
+        try:
+            return self.real.list_nodes(**l)
+        except CLOUD_ERRORS:
+            tracker.counter_add('list_nodes_errors')
+            raise
+
+    def create_cloud_name(self, arvados_node):
+        """Return a cloud node name for the given Arvados node record.
+
+        Subclasses must override this method.  It should return a string
+        that can be used as the name for a newly-created cloud node,
+        based on identifying information in the Arvados node record.
+
+        Arguments:
+        * arvados_node: This Arvados node record to seed the new cloud node.
+        """
+        raise NotImplementedError("BaseComputeNodeDriver.create_cloud_name")
+
+    def arvados_create_kwargs(self, size, arvados_node):
+        """Return dynamic keyword arguments for create_node.
+
+        Subclasses must override this method.  It should return a dictionary
+        of keyword arguments to pass to the libcloud driver's create_node
+        method.  These arguments will extend the static arguments in
+        create_kwargs.
+
+        Arguments:
+        * size: The node size that will be created (libcloud NodeSize object)
+        * arvados_node: The Arvados node record that will be associated
+          with this cloud node, as returned from the API server.
+        """
+        raise NotImplementedError("BaseComputeNodeDriver.arvados_create_kwargs")
+
+    def broken(self, cloud_node):
+        """Return true if libcloud has indicated the node is in a "broken" state."""
+        return False
+
+    def _make_ping_url(self, arvados_node):
+        return 'https://{}/arvados/v1/nodes/{}/ping?ping_secret={}'.format(
+            self.ping_host, arvados_node['uuid'],
+            arvados_node['info']['ping_secret'])
+
+    @staticmethod
+    def _name_key(cloud_object):
+        return cloud_object.name
+
+    def create_node(self, size, arvados_node):
+        try:
+            kwargs = self.create_kwargs.copy()
+            kwargs.update(self.arvados_create_kwargs(size, arvados_node))
+            kwargs['size'] = size.real
+            return self.real.create_node(**kwargs)
+        except CLOUD_ERRORS as create_error:
+            # Workaround for bug #6702: sometimes the create node request
+            # succeeds but times out and raises an exception instead of
+            # returning a result.  If this happens, we get stuck in a retry
+            # loop forever because subsequent create_node attempts will fail
+            # due to node name collision.  So check if the node we intended to
+            # create shows up in the cloud node list and return it if found.
+            try:
+                return self.search_for_now(kwargs['name'], 'list_nodes', self._name_key)
+            except ValueError:
+                tracker.counter_add('create_node_errors')
+                raise create_error
+
+    def post_create_node(self, cloud_node):
+        # ComputeNodeSetupActor calls this method after the cloud node is
+        # created.  Any setup tasks that need to happen afterward (e.g.,
+        # tagging) should be done in this method.
+        pass
+
+    def sync_node(self, cloud_node, arvados_node):
+        # When a compute node first pings the API server, the API server
+        # will automatically assign some attributes on the corresponding
+        # node record, like hostname.  This method should propagate that
+        # information back to the cloud node appropriately.
+        raise NotImplementedError("BaseComputeNodeDriver.sync_node")
+
+    @classmethod
+    def node_fqdn(cls, node):
+        # This method should return the FQDN of the node object argument.
+        # Different clouds store this in different places.
+        raise NotImplementedError("BaseComputeNodeDriver.node_fqdn")
+
+    @classmethod
+    def node_start_time(cls, node):
+        # This method should return the time the node was started, in
+        # seconds since the epoch UTC.
+        raise NotImplementedError("BaseComputeNodeDriver.node_start_time")
+
+    def destroy_node(self, cloud_node):
+        try:
+            return self.real.destroy_node(cloud_node)
+        except CLOUD_ERRORS:
+            # Sometimes the destroy node request succeeds but times out and
+            # raises an exception instead of returning success.  If this
+            # happens, we get a noisy stack trace.  Check if the node is still
+            # on the node list.  If it is gone, we can declare victory.
+            try:
+                self.search_for_now(cloud_node.id, 'list_nodes')
+            except ValueError:
+                # If we catch ValueError, that means search_for_now didn't find
+                # it, which means destroy_node actually succeeded.
+                return True
+            # The node is still on the list.  Re-raise.
+            tracker.counter_add('destroy_node_errors')
+            raise
+
+    # Now that we've defined all our own methods, delegate generic, public
+    # attributes of libcloud drivers that we haven't defined ourselves.
+    def _delegate_to_real(attr_name):
+        return property(
+            lambda self: getattr(self.real, attr_name),
+            lambda self, value: setattr(self.real, attr_name, value),
+            doc=getattr(getattr(NodeDriver, attr_name), '__doc__', None))
+
+    # node id
+    @classmethod
+    def node_id(cls):
+        raise NotImplementedError("BaseComputeNodeDriver.node_id")
+
+    _locals = locals()
+    for _attr_name in dir(NodeDriver):
+        if (not _attr_name.startswith('_')) and (_attr_name not in _locals):
+            _locals[_attr_name] = _delegate_to_real(_attr_name)
diff --git a/services/nodemanager/arvnodeman/computenode/driver/azure.py b/services/nodemanager/arvnodeman/computenode/driver/azure.py
new file mode 100644 (file)
index 0000000..35c8b5a
--- /dev/null
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import pipes
+import time
+
+import libcloud.compute.base as cloud_base
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+from libcloud.common.exceptions import BaseHTTPError
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn, arvados_timestamp, ARVADOS_TIMEFMT
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+
+    DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.AZURE_ARM)
+    SEARCH_CACHE = {}
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+
+        if not list_kwargs.get("ex_resource_group"):
+            raise Exception("Must include ex_resource_group in Cloud List configuration (list_kwargs)")
+
+        create_kwargs["ex_resource_group"] = list_kwargs["ex_resource_group"]
+
+        self.tags = {key[4:]: value
+                     for key, value in create_kwargs.iteritems()
+                     if key.startswith('tag_')}
+        # filter out tags from create_kwargs
+        create_kwargs = {key: value
+                         for key, value in create_kwargs.iteritems()
+                         if not key.startswith('tag_')}
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, list_kwargs, create_kwargs,
+            driver_class)
+
+    def create_cloud_name(self, arvados_node):
+        uuid_parts = arvados_node['uuid'].split('-', 2)
+        return 'compute-{parts[2]}-{parts[0]}'.format(parts=uuid_parts)
+
+    def arvados_create_kwargs(self, size, arvados_node):
+        tags = {
+            # Set up tag indicating the Arvados assigned Cloud Size id.
+            'arvados_node_size': size.id,
+            'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()),
+            'arv-ping-url': self._make_ping_url(arvados_node)
+        }
+        tags.update(self.tags)
+
+        name = self.create_cloud_name(arvados_node)
+        customdata = """#!/bin/sh
+mkdir -p    /var/tmp/arv-node-data/meta-data
+echo %s > /var/tmp/arv-node-data/arv-ping-url
+echo %s > /var/tmp/arv-node-data/meta-data/instance-id
+echo %s > /var/tmp/arv-node-data/meta-data/instance-type
+""" % (pipes.quote(tags['arv-ping-url']),
+       pipes.quote(name),
+       pipes.quote(size.id))
+
+        return {
+            'name': name,
+            'ex_tags': tags,
+            'ex_customdata': customdata
+        }
+
+    def sync_node(self, cloud_node, arvados_node):
+        try:
+            self.real.ex_create_tags(cloud_node,
+                                     {'hostname': arvados_node_fqdn(arvados_node)})
+            return True
+        except BaseHTTPError as b:
+            return False
+
+    def _init_image(self, urn):
+        return "image", self.get_image(urn)
+
+    def list_nodes(self):
+        # Azure only supports filtering node lists by resource group.
+        # Do our own filtering based on tag.
+        nodes = [node for node in
+                super(ComputeNodeDriver, self).list_nodes(ex_fetch_nic=False, ex_fetch_power_state=False)
+                if node.extra.get("tags", {}).get("arvados-class") == self.tags["arvados-class"]]
+        for n in nodes:
+            # Need to populate Node.size
+            if not n.size:
+                n.size = self.sizes()[n.extra["properties"]["hardwareProfile"]["vmSize"]]
+            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size') or n.size.id
+        return nodes
+
+    def broken(self, cloud_node):
+        """Return true if libcloud has indicated the node is in a "broken" state."""
+        # UNKNOWN means the node state is unrecognized, which in practice means some combination
+        # of failure that the Azure libcloud driver doesn't know how to interpret.
+        return (cloud_node.state in (cloud_types.NodeState.ERROR, cloud_types.NodeState.UNKNOWN))
+
+    @classmethod
+    def node_fqdn(cls, node):
+        return node.extra["tags"].get("hostname")
+
+    @classmethod
+    def node_start_time(cls, node):
+        return arvados_timestamp(node.extra["tags"].get("booted_at"))
+
+    @classmethod
+    def node_id(cls, node):
+        return node.name
diff --git a/services/nodemanager/arvnodeman/computenode/driver/dummy.py b/services/nodemanager/arvnodeman/computenode/driver/dummy.py
new file mode 100644 (file)
index 0000000..14845ac
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import time
+
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+    """Compute node driver wrapper for libcloud's dummy driver.
+
+    This class provides the glue necessary to run the node manager with a
+    dummy cloud.  It's useful for testing.
+    """
+    DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.DUMMY)
+    DEFAULT_REAL = DEFAULT_DRIVER('ComputeNodeDriver')
+    DUMMY_START_TIME = time.time()
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, list_kwargs, create_kwargs, driver_class)
+        if driver_class is self.DEFAULT_DRIVER:
+            self.real = self.DEFAULT_REAL
+
+    def _ensure_private_ip(self, node):
+        if not node.private_ips:
+            node.private_ips = ['10.10.0.{}'.format(node.id)]
+
+    def arvados_create_kwargs(self, size, arvados_node):
+        return {}
+
+    def list_nodes(self):
+        nodelist = super(ComputeNodeDriver, self).list_nodes()
+        for node in nodelist:
+            self._ensure_private_ip(node)
+            node.size = self.sizes()["1"]
+        return nodelist
+
+    def create_node(self, size, arvados_node):
+        node = super(ComputeNodeDriver, self).create_node(size, arvados_node)
+        self._ensure_private_ip(node)
+        return node
+
+    def sync_node(self, cloud_node, arvados_node):
+        cloud_node.name = arvados_node_fqdn(arvados_node)
+
+    @classmethod
+    def node_fqdn(cls, node):
+        return node.name
+
+    @classmethod
+    def node_start_time(cls, node):
+        return cls.DUMMY_START_TIME
diff --git a/services/nodemanager/arvnodeman/computenode/driver/ec2.py b/services/nodemanager/arvnodeman/computenode/driver/ec2.py
new file mode 100644 (file)
index 0000000..418a9f9
--- /dev/null
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import time
+
+import libcloud.compute.base as cloud_base
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+from libcloud.compute.drivers import ec2 as cloud_ec2
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn
+
+### Monkeypatch libcloud to support AWS' new SecurityGroup API.
+# These classes can be removed when libcloud support specifying
+# security groups with the SecurityGroupId parameter.
+class ANMEC2Connection(cloud_ec2.EC2Connection):
+    def request(self, *args, **kwargs):
+        params = kwargs.get('params')
+        if (params is not None) and (params.get('Action') == 'RunInstances'):
+            for key in params.keys():
+                if key.startswith('SecurityGroup.'):
+                    new_key = key.replace('Group.', 'GroupId.', 1)
+                    params[new_key] = params.pop(key).id
+            kwargs['params'] = params
+        return super(ANMEC2Connection, self).request(*args, **kwargs)
+
+
+class ANMEC2NodeDriver(cloud_ec2.EC2NodeDriver):
+    connectionCls = ANMEC2Connection
+
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+    """Compute node driver wrapper for EC2.
+
+    This translates cloud driver requests to EC2's specific parameters.
+    """
+    DEFAULT_DRIVER = ANMEC2NodeDriver
+### End monkeypatch
+    SEARCH_CACHE = {}
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+        # We need full lists of keys up front because these loops modify
+        # dictionaries in-place.
+        for key in list_kwargs.keys():
+            list_kwargs[key.replace('_', ':')] = list_kwargs.pop(key)
+        self.tags = {key[4:]: value
+                     for key, value in list_kwargs.iteritems()
+                     if key.startswith('tag:')}
+        # Tags are assigned at instance creation time
+        create_kwargs.setdefault('ex_metadata', {})
+        create_kwargs['ex_metadata'].update(self.tags)
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, {'ex_filters': list_kwargs}, create_kwargs,
+            driver_class)
+
+    def _init_image_id(self, image_id):
+        return 'image', self.search_for(image_id, 'list_images', ex_owner='self')
+
+    def _init_security_groups(self, group_names):
+        return 'ex_security_groups', [
+            self.search_for(gname.strip(), 'ex_get_security_groups')
+            for gname in group_names.split(',')]
+
+    def _init_subnet_id(self, subnet_id):
+        return 'ex_subnet', self.search_for(subnet_id, 'ex_list_subnets')
+
+    create_cloud_name = staticmethod(arvados_node_fqdn)
+
+    def arvados_create_kwargs(self, size, arvados_node):
+        kw = {'name': self.create_cloud_name(arvados_node),
+                'ex_userdata': self._make_ping_url(arvados_node)}
+        # libcloud/ec2 disk sizes are in GB, Arvados/SLURM "scratch" value is in MB
+        scratch = int(size.scratch / 1000) + 1
+        if scratch > size.disk:
+            volsize = scratch - size.disk
+            if volsize > 16384:
+                # Must be 1-16384 for General Purpose SSD (gp2) devices
+                # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
+                self._logger.warning("Requested EBS volume size %d is too large, capping size request to 16384 GB", volsize)
+                volsize = 16384
+            kw["ex_blockdevicemappings"] = [{
+                "DeviceName": "/dev/xvdt",
+                "Ebs": {
+                    "DeleteOnTermination": True,
+                    "VolumeSize": volsize,
+                    "VolumeType": "gp2"
+                }}]
+        if size.preemptible:
+            # Request a Spot instance for this node
+            kw['ex_spot_market'] = True
+        return kw
+
+    def sync_node(self, cloud_node, arvados_node):
+        self.real.ex_create_tags(cloud_node,
+                                 {'Name': arvados_node_fqdn(arvados_node)})
+
+    def create_node(self, size, arvados_node):
+        # Set up tag indicating the Arvados assigned Cloud Size id.
+        self.create_kwargs['ex_metadata'].update({'arvados_node_size': size.id})
+        return super(ComputeNodeDriver, self).create_node(size, arvados_node)
+
+    def list_nodes(self):
+        # Need to populate Node.size
+        nodes = super(ComputeNodeDriver, self).list_nodes()
+        for n in nodes:
+            if not n.size:
+                n.size = self.sizes()[n.extra["instance_type"]]
+            n.extra['arvados_node_size'] = n.extra.get('tags', {}).get('arvados_node_size') or n.size.id
+        return nodes
+
+    @classmethod
+    def node_fqdn(cls, node):
+        return node.name
+
+    @classmethod
+    def node_start_time(cls, node):
+        time_str = node.extra['launch_time'].split('.', 2)[0] + 'UTC'
+        return time.mktime(time.strptime(
+                time_str,'%Y-%m-%dT%H:%M:%S%Z')) - time.timezone
+
+    @classmethod
+    def node_id(cls, node):
+        return node.id
diff --git a/services/nodemanager/arvnodeman/computenode/driver/gce.py b/services/nodemanager/arvnodeman/computenode/driver/gce.py
new file mode 100644 (file)
index 0000000..23a1017
--- /dev/null
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import functools
+import json
+import time
+
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn, arvados_timestamp, ARVADOS_TIMEFMT
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+    """Compute node driver wrapper for GCE
+
+    This translates cloud driver requests to GCE's specific parameters.
+    """
+    DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.GCE)
+    SEARCH_CACHE = {}
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+        list_kwargs = list_kwargs.copy()
+        tags_str = list_kwargs.pop('tags', '')
+        if not tags_str.strip():
+            self.node_tags = frozenset()
+        else:
+            self.node_tags = frozenset(t.strip() for t in tags_str.split(','))
+        create_kwargs = create_kwargs.copy()
+        create_kwargs.setdefault('external_ip', None)
+        create_kwargs.setdefault('ex_metadata', {})
+        self._project = auth_kwargs.get("project")
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, list_kwargs, create_kwargs,
+            driver_class)
+        self._disktype_links = {dt.name: self._object_link(dt)
+                                for dt in self.real.ex_list_disktypes()}
+
+    @staticmethod
+    def _object_link(cloud_object):
+        return cloud_object.extra.get('selfLink')
+
+    def _init_image(self, image_name):
+        return 'image', self.search_for(
+            image_name, 'list_images', self._name_key, ex_project=self._project)
+
+    def _init_network(self, network_name):
+        return 'ex_network', self.search_for(
+            network_name, 'ex_list_networks', self._name_key)
+
+    def _init_service_accounts(self, service_accounts_str):
+        return 'ex_service_accounts', json.loads(service_accounts_str)
+
+    def _init_ssh_key(self, filename):
+        # SSH keys are delivered to GCE nodes via ex_metadata: see
+        # http://stackoverflow.com/questions/26752617/creating-sshkeys-for-gce-instance-using-libcloud
+        with open(filename) as ssh_file:
+            self.create_kwargs['ex_metadata']['sshKeys'] = (
+                'root:' + ssh_file.read().strip())
+
+    def create_cloud_name(self, arvados_node):
+        uuid_parts = arvados_node['uuid'].split('-', 2)
+        return 'compute-{parts[2]}-{parts[0]}'.format(parts=uuid_parts)
+
+    def arvados_create_kwargs(self, size, arvados_node):
+        name = self.create_cloud_name(arvados_node)
+
+        if size.scratch > 375000:
+            self._logger.warning("Requested %d MB scratch space, but GCE driver currently only supports attaching a single 375 GB disk.", size.scratch)
+
+        disks = [
+            {'autoDelete': True,
+             'boot': True,
+             'deviceName': name,
+             'initializeParams':
+                 {'diskName': name,
+                  'diskType': self._disktype_links['pd-standard'],
+                  'sourceImage': self._object_link(self.create_kwargs['image']),
+                  },
+             'type': 'PERSISTENT',
+             },
+            {'autoDelete': True,
+             'boot': False,
+             # Boot images rely on this device name to find the SSD.
+             # Any change must be coordinated in the image.
+             'deviceName': 'tmp',
+             'initializeParams':
+                 {'diskType': self._disktype_links['local-ssd'],
+                  },
+             'type': 'SCRATCH',
+             },
+            ]
+        result = {'name': name,
+                  'ex_metadata': self.create_kwargs['ex_metadata'].copy(),
+                  'ex_tags': list(self.node_tags),
+                  'ex_disks_gce_struct': disks,
+                  }
+        result['ex_metadata'].update({
+            'arvados_node_size': size.id,
+            'arv-ping-url': self._make_ping_url(arvados_node),
+            'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()),
+            'hostname': arvados_node_fqdn(arvados_node),
+        })
+        return result
+
+    def list_nodes(self):
+        # The GCE libcloud driver only supports filtering node lists by zone.
+        # Do our own filtering based on tag list.
+        nodelist = [node for node in
+                    super(ComputeNodeDriver, self).list_nodes()
+                    if self.node_tags.issubset(node.extra.get('tags', []))]
+        for node in nodelist:
+            # As of 0.18, the libcloud GCE driver sets node.size to the size's name.
+            # It's supposed to be the actual size object.  Check that it's not,
+            # and monkeypatch the results when that's the case.
+            if not hasattr(node.size, 'id'):
+                node.size = self.sizes()[node.size]
+            # Get arvados-assigned cloud size id
+            node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size') or node.size.id
+        return nodelist
+
+    @classmethod
+    def _find_metadata(cls, metadata_items, key):
+        # Given a list of two-item metadata dictonaries, return the one with
+        # the named key.  Raise KeyError if not found.
+        try:
+            return next(data_dict for data_dict in metadata_items
+                        if data_dict.get('key') == key)
+        except StopIteration:
+            raise KeyError(key)
+
+    @classmethod
+    def _get_metadata(cls, metadata_items, key, *default):
+        try:
+            return cls._find_metadata(metadata_items, key)['value']
+        except KeyError:
+            if default:
+                return default[0]
+            raise
+
+    def sync_node(self, cloud_node, arvados_node):
+        # Update the cloud node record to ensure we have the correct metadata
+        # fingerprint.
+        cloud_node = self.real.ex_get_node(cloud_node.name, cloud_node.extra['zone'])
+
+        # We can't store the FQDN on the name attribute or anything like it,
+        # because (a) names are static throughout the node's life (so FQDN
+        # isn't available because we don't know it at node creation time) and
+        # (b) it can't contain dots.  Instead stash it in metadata.
+        hostname = arvados_node_fqdn(arvados_node)
+        metadata_req = cloud_node.extra['metadata'].copy()
+        metadata_items = metadata_req.setdefault('items', [])
+        try:
+            self._find_metadata(metadata_items, 'hostname')['value'] = hostname
+        except KeyError:
+            metadata_items.append({'key': 'hostname', 'value': hostname})
+
+        self.real.ex_set_node_metadata(cloud_node, metadata_items)
+
+    @classmethod
+    def node_fqdn(cls, node):
+        # See sync_node comment.
+        return cls._get_metadata(node.extra['metadata'].get('items', []),
+                                 'hostname', '')
+
+    @classmethod
+    def node_start_time(cls, node):
+        try:
+            return arvados_timestamp(cls._get_metadata(
+                    node.extra['metadata']['items'], 'booted_at'))
+        except KeyError:
+            return 0
+
+    @classmethod
+    def node_id(cls, node):
+        return node.id
diff --git a/services/nodemanager/arvnodeman/config.py b/services/nodemanager/arvnodeman/config.py
new file mode 100644 (file)
index 0000000..4857e89
--- /dev/null
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import ConfigParser
+import importlib
+import logging
+import sys
+
+import arvados
+import httplib2
+import pykka
+from apiclient import errors as apierror
+
+from .baseactor import BaseNodeManagerActor
+
+from functools import partial
+from libcloud.common.types import LibcloudError
+from libcloud.common.exceptions import BaseHTTPError
+
+# IOError is the base class for socket.error, ssl.SSLError, and friends.
+# It seems like it hits the sweet spot for operations we want to retry:
+# it's low-level, but unlikely to catch code bugs.
+NETWORK_ERRORS = (IOError,)
+ARVADOS_ERRORS = NETWORK_ERRORS + (apierror.Error,)
+CLOUD_ERRORS = NETWORK_ERRORS + (LibcloudError, BaseHTTPError)
+
+actor_class = BaseNodeManagerActor
+
+class NodeManagerConfig(ConfigParser.SafeConfigParser):
+    """Node Manager Configuration class.
+
+    This a standard Python ConfigParser, with additional helper methods to
+    create objects instantiated with configuration information.
+    """
+
+    LOGGING_NONLEVELS = frozenset(['file'])
+
+    def __init__(self, *args, **kwargs):
+        # Can't use super() because SafeConfigParser is an old-style class.
+        ConfigParser.SafeConfigParser.__init__(self, *args, **kwargs)
+        for sec_name, settings in {
+            'Arvados': {'insecure': 'no',
+                        'timeout': '15',
+                        'jobs_queue': 'yes',
+                        'slurm_queue': 'yes'
+                    },
+            'Daemon': {'min_nodes': '0',
+                       'max_nodes': '1',
+                       'poll_time': '60',
+                       'cloudlist_poll_time': '0',
+                       'nodelist_poll_time': '0',
+                       'wishlist_poll_time': '0',
+                       'max_poll_time': '300',
+                       'poll_stale_after': '600',
+                       'max_total_price': '0',
+                       'boot_fail_after': str(sys.maxint),
+                       'node_stale_after': str(60 * 60 * 2),
+                       'watchdog': '600',
+                       'node_mem_scaling': '0.95',
+                       'consecutive_idle_count': '2'},
+            'Manage': {'address': '127.0.0.1',
+                       'port': '-1',
+                       'ManagementToken': ''},
+            'Logging': {'file': '/dev/stderr',
+                        'level': 'WARNING'}
+        }.iteritems():
+            if not self.has_section(sec_name):
+                self.add_section(sec_name)
+            for opt_name, value in settings.iteritems():
+                if not self.has_option(sec_name, opt_name):
+                    self.set(sec_name, opt_name, value)
+
+    def get_section(self, section, transformers={}, default_transformer=None):
+        transformer_map = {
+            str: self.get,
+            int: self.getint,
+            bool: self.getboolean,
+            float: self.getfloat,
+        }
+        result = self._dict()
+        for key, value in self.items(section):
+            transformer = None
+            if transformers.get(key) in transformer_map:
+                transformer = partial(transformer_map[transformers[key]], section)
+            elif default_transformer in transformer_map:
+                transformer = partial(transformer_map[default_transformer], section)
+            if transformer is not None:
+                try:
+                    value = transformer(key)
+                except (TypeError, ValueError):
+                    pass
+            result[key] = value
+        return result
+
+    def log_levels(self):
+        return {key: getattr(logging, self.get('Logging', key).upper())
+                for key in self.options('Logging')
+                if key not in self.LOGGING_NONLEVELS}
+
+    def dispatch_classes(self):
+        mod_name = 'arvnodeman.computenode.dispatch'
+        if self.has_option('Daemon', 'dispatcher'):
+            mod_name = '{}.{}'.format(mod_name,
+                                      self.get('Daemon', 'dispatcher'))
+        module = importlib.import_module(mod_name)
+        return (module.ComputeNodeSetupActor,
+                module.ComputeNodeShutdownActor,
+                module.ComputeNodeUpdateActor,
+                module.ComputeNodeMonitorActor)
+
+    def new_arvados_client(self):
+        if self.has_option('Daemon', 'certs_file'):
+            certs_file = self.get('Daemon', 'certs_file')
+        else:
+            certs_file = None
+        insecure = self.getboolean('Arvados', 'insecure')
+        http = httplib2.Http(timeout=self.getint('Arvados', 'timeout'),
+                             ca_certs=certs_file,
+                             disable_ssl_certificate_validation=insecure)
+        return arvados.api(version='v1',
+                           host=self.get('Arvados', 'host'),
+                           token=self.get('Arvados', 'token'),
+                           insecure=insecure,
+                           http=http)
+
+    def new_cloud_client(self):
+        module = importlib.import_module('arvnodeman.computenode.driver.' +
+                                         self.get('Cloud', 'provider'))
+        driver_class = module.ComputeNodeDriver.DEFAULT_DRIVER
+        if self.has_option('Cloud', 'driver_class'):
+            d = self.get('Cloud', 'driver_class').split('.')
+            mod = '.'.join(d[:-1])
+            cls = d[-1]
+            driver_class = importlib.import_module(mod).__dict__[cls]
+        auth_kwargs = self.get_section('Cloud Credentials')
+        if 'timeout' in auth_kwargs:
+            auth_kwargs['timeout'] = int(auth_kwargs['timeout'])
+        return module.ComputeNodeDriver(auth_kwargs,
+                                        self.get_section('Cloud List'),
+                                        self.get_section('Cloud Create'),
+                                        driver_class=driver_class)
+
+    def node_sizes(self):
+        """Finds all acceptable NodeSizes for our installation.
+
+        Returns a list of (NodeSize, kwargs) pairs for each NodeSize object
+        returned by libcloud that matches a size listed in our config file.
+        """
+        all_sizes = self.new_cloud_client().list_sizes()
+        size_kwargs = {}
+        section_types = {
+            'instance_type': str,
+            'price': float,
+            'preemptible': bool,
+        }
+        for sec_name in self.sections():
+            sec_words = sec_name.split(None, 2)
+            if sec_words[0] != 'Size':
+                continue
+            size_spec = self.get_section(sec_name, section_types, int)
+            if 'preemptible' not in size_spec:
+                size_spec['preemptible'] = False
+            if 'instance_type' not in size_spec:
+                # Assume instance type is Size name if missing
+                size_spec['instance_type'] = sec_words[1]
+            size_spec['id'] = sec_words[1]
+            size_kwargs[sec_words[1]] = size_spec
+        # EC2 node sizes are identified by id. GCE sizes are identified by name.
+        matching_sizes = []
+        for size in all_sizes:
+            matching_sizes += [
+                (size, size_kwargs[s]) for s in size_kwargs
+                if size_kwargs[s]['instance_type'] == size.id
+                or size_kwargs[s]['instance_type'] == size.name
+            ]
+        return matching_sizes
+
+    def shutdown_windows(self):
+        return [float(n)
+                for n in self.get('Cloud', 'shutdown_windows').split(',')]
diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py
new file mode 100644 (file)
index 0000000..1edf4dc
--- /dev/null
@@ -0,0 +1,583 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import functools
+import logging
+import time
+
+import pykka
+
+from . import computenode as cnode
+from . import status
+from .computenode import dispatch
+from .config import actor_class
+
+class _ComputeNodeRecord(object):
+    def __init__(self, actor=None, cloud_node=None, arvados_node=None,
+                 assignment_time=float('-inf')):
+        self.actor = actor
+        self.cloud_node = cloud_node
+        self.arvados_node = arvados_node
+        self.assignment_time = assignment_time
+        self.shutdown_actor = None
+
+class _BaseNodeTracker(object):
+    def __init__(self):
+        self.nodes = {}
+        self.orphans = {}
+
+    # Proxy the methods listed below to self.nodes.
+    def _proxy_method(name):
+        method = getattr(dict, name)
+        @functools.wraps(method, ('__name__', '__doc__'))
+        def wrapper(self, *args, **kwargs):
+            return method(self.nodes, *args, **kwargs)
+        return wrapper
+
+    for _method_name in ['__contains__', '__getitem__', '__len__', 'get']:
+        locals()[_method_name] = _proxy_method(_method_name)
+
+    def record_key(self, record):
+        return self.item_key(getattr(record, self.RECORD_ATTR))
+
+    def add(self, record):
+        self.nodes[self.record_key(record)] = record
+
+    def update_record(self, key, item):
+        setattr(self.nodes[key], self.RECORD_ATTR, item)
+
+    def update_from(self, response):
+        unseen = set(self.nodes.iterkeys())
+        for item in response:
+            key = self.item_key(item)
+            if key in unseen:
+                unseen.remove(key)
+                self.update_record(key, item)
+            else:
+                yield key, item
+        self.orphans = {key: self.nodes.pop(key) for key in unseen}
+
+    def unpaired(self):
+        return (record for record in self.nodes.itervalues()
+                if getattr(record, self.PAIR_ATTR) is None)
+
+
+class _CloudNodeTracker(_BaseNodeTracker):
+    RECORD_ATTR = 'cloud_node'
+    PAIR_ATTR = 'arvados_node'
+    item_key = staticmethod(lambda cloud_node: cloud_node.id)
+
+
+class _ArvadosNodeTracker(_BaseNodeTracker):
+    RECORD_ATTR = 'arvados_node'
+    PAIR_ATTR = 'cloud_node'
+    item_key = staticmethod(lambda arvados_node: arvados_node['uuid'])
+
+    def find_stale_node(self, stale_time):
+        # Try to select a stale node record that have an assigned slot first
+        for record in sorted(self.nodes.itervalues(),
+                             key=lambda r: r.arvados_node['slot_number'],
+                             reverse=True):
+            node = record.arvados_node
+            if (not cnode.timestamp_fresh(cnode.arvados_node_mtime(node),
+                                          stale_time) and
+                  not cnode.timestamp_fresh(record.assignment_time,
+                                            stale_time)):
+                return node
+        return None
+
+
+class NodeManagerDaemonActor(actor_class):
+    """Node Manager daemon.
+
+    This actor subscribes to all information polls about cloud nodes,
+    Arvados nodes, and the job queue.  It creates a ComputeNodeMonitorActor
+    for every cloud node, subscribing them to poll updates
+    appropriately.  It creates and destroys cloud nodes based on job queue
+    demand, and stops the corresponding ComputeNode actors when their work
+    is done.
+    """
+    def __init__(self, server_wishlist_actor, arvados_nodes_actor,
+                 cloud_nodes_actor, cloud_update_actor, timer_actor,
+                 arvados_factory, cloud_factory,
+                 shutdown_windows, server_calculator,
+                 min_nodes, max_nodes,
+                 poll_stale_after=600,
+                 boot_fail_after=1800,
+                 node_stale_after=7200,
+                 node_setup_class=dispatch.ComputeNodeSetupActor,
+                 node_shutdown_class=dispatch.ComputeNodeShutdownActor,
+                 node_actor_class=dispatch.ComputeNodeMonitorActor,
+                 max_total_price=0,
+                 consecutive_idle_count=1):
+        super(NodeManagerDaemonActor, self).__init__()
+        self._node_setup = node_setup_class
+        self._node_shutdown = node_shutdown_class
+        self._node_actor = node_actor_class
+        self._cloud_updater = cloud_update_actor
+        self._timer = timer_actor
+        self._new_arvados = arvados_factory
+        self._new_cloud = cloud_factory
+        self._cloud_driver = self._new_cloud()
+        self._later = self.actor_ref.tell_proxy()
+        self.shutdown_windows = shutdown_windows
+        self.server_calculator = server_calculator
+        self.min_cloud_size = self.server_calculator.cheapest_size()
+        self.min_nodes = min_nodes
+        self.max_nodes = max_nodes
+        self.node_quota = max_nodes
+        self.max_total_price = max_total_price
+        self.poll_stale_after = poll_stale_after
+        self.boot_fail_after = boot_fail_after
+        self.node_stale_after = node_stale_after
+        self.consecutive_idle_count = consecutive_idle_count
+        self.last_polls = {}
+        for poll_name in ['server_wishlist', 'arvados_nodes', 'cloud_nodes']:
+            poll_actor = locals()[poll_name + '_actor']
+            poll_actor.subscribe(getattr(self._later, 'update_' + poll_name))
+            setattr(self, '_{}_actor'.format(poll_name), poll_actor)
+            self.last_polls[poll_name] = -self.poll_stale_after
+        self.cloud_nodes = _CloudNodeTracker()
+        self.arvados_nodes = _ArvadosNodeTracker()
+        self.booting = {}       # Actor IDs to ComputeNodeSetupActors
+        self.sizes_booting = {} # Actor IDs to node size
+
+    def on_start(self):
+        self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
+        self._logger.debug("Daemon started")
+
+    def _update_poll_time(self, poll_key):
+        self.last_polls[poll_key] = time.time()
+
+    def _pair_nodes(self, node_record, arvados_node):
+        self._logger.info("Cloud node %s is now paired with Arvados node %s with hostname %s",
+                          node_record.cloud_node.name, arvados_node['uuid'], arvados_node['hostname'])
+        self._arvados_nodes_actor.subscribe_to(
+            arvados_node['uuid'], node_record.actor.update_arvados_node)
+        node_record.arvados_node = arvados_node
+        self.arvados_nodes.add(node_record)
+
+    def _new_node(self, cloud_node):
+        start_time = self._cloud_driver.node_start_time(cloud_node)
+        shutdown_timer = cnode.ShutdownTimer(start_time,
+                                             self.shutdown_windows)
+        actor = self._node_actor.start(
+            cloud_node=cloud_node,
+            cloud_node_start_time=start_time,
+            shutdown_timer=shutdown_timer,
+            update_actor=self._cloud_updater,
+            timer_actor=self._timer,
+            arvados_node=None,
+            poll_stale_after=self.poll_stale_after,
+            node_stale_after=self.node_stale_after,
+            cloud_client=self._cloud_driver,
+            boot_fail_after=self.boot_fail_after,
+            consecutive_idle_count=self.consecutive_idle_count)
+        actorTell = actor.tell_proxy()
+        actorTell.subscribe(self._later.node_can_shutdown)
+        self._cloud_nodes_actor.subscribe_to(cloud_node.id,
+                                             actorTell.update_cloud_node)
+        record = _ComputeNodeRecord(actor.proxy(), cloud_node)
+        return record
+
+    def _register_cloud_node(self, node):
+        rec = self.cloud_nodes.get(node.id)
+        if rec is None:
+            self._logger.info("Registering new cloud node %s", node.id)
+            record = self._new_node(node)
+            self.cloud_nodes.add(record)
+        else:
+            rec.cloud_node = node
+
+    def update_cloud_nodes(self, nodelist):
+        self._update_poll_time('cloud_nodes')
+        for _, node in self.cloud_nodes.update_from(nodelist):
+            self._register_cloud_node(node)
+
+        self.try_pairing()
+
+        for record in self.cloud_nodes.orphans.itervalues():
+            if record.shutdown_actor:
+                try:
+                    record.shutdown_actor.stop()
+                except pykka.ActorDeadError:
+                    pass
+                record.shutdown_actor = None
+
+            # A recently booted node is a node that successfully completed the
+            # setup actor but has not yet appeared in the cloud node list.
+            # This will have the tag _nodemanager_recently_booted on it, which
+            # means (if we're not shutting it down) we want to put it back into
+            # the cloud node list.  Once it really appears in the cloud list,
+            # the object in record.cloud_node will be replaced by a new one
+            # that lacks the "_nodemanager_recently_booted" tag.
+            if hasattr(record.cloud_node, "_nodemanager_recently_booted"):
+                self.cloud_nodes.add(record)
+            else:
+                # Node disappeared from the cloud node list. If it's paired,
+                # remove its idle time counter.
+                if record.arvados_node:
+                    status.tracker.idle_out(record.arvados_node.get('hostname'))
+                # Stop the monitor actor if necessary and forget about the node.
+                if record.actor:
+                    try:
+                        record.actor.stop()
+                    except pykka.ActorDeadError:
+                        pass
+                    record.actor = None
+                record.cloud_node = None
+
+    def _register_arvados_node(self, key, arv_node):
+        self._logger.info("Registering new Arvados node %s", key)
+        record = _ComputeNodeRecord(arvados_node=arv_node)
+        self.arvados_nodes.add(record)
+
+    def update_arvados_nodes(self, nodelist):
+        self._update_poll_time('arvados_nodes')
+        for key, node in self.arvados_nodes.update_from(nodelist):
+            self._register_arvados_node(key, node)
+        self.try_pairing()
+
+    def try_pairing(self):
+        for record in self.cloud_nodes.unpaired():
+            for arv_rec in self.arvados_nodes.unpaired():
+                if record.actor is not None and record.actor.offer_arvados_pair(arv_rec.arvados_node).get():
+                    self._pair_nodes(record, arv_rec.arvados_node)
+                    break
+
+    def _nodes_booting(self, size):
+        s = sum(1
+                for c in self.booting.iterkeys()
+                if size is None or self.sizes_booting[c].id == size.id)
+        return s
+
+    def _node_states(self, size):
+        proxy_states = []
+        states = []
+        for rec in self.cloud_nodes.nodes.itervalues():
+            if size is None or rec.cloud_node.size.id == size.id:
+                if rec.shutdown_actor is None and rec.actor is not None:
+                    proxy_states.append(rec.actor.get_state())
+                else:
+                    states.append("shutdown")
+        return states + pykka.get_all(proxy_states)
+
+    def _update_tracker(self):
+        updates = {
+            k: 0
+            for k in status.tracker.keys()
+            if k.startswith('nodes_')
+        }
+        for s in self._node_states(size=None):
+            updates.setdefault('nodes_'+s, 0)
+            updates['nodes_'+s] += 1
+        updates['nodes_wish'] = len(self.last_wishlist)
+        updates['node_quota'] = self.node_quota
+        status.tracker.update(updates)
+
+    def _state_counts(self, size):
+        states = self._node_states(size)
+        counts = {
+            "booting": self._nodes_booting(size),
+            "unpaired": 0,
+            "busy": 0,
+            "idle": 0,
+            "fail": 0,
+            "down": 0,
+            "shutdown": 0
+        }
+        for s in states:
+            counts[s] = counts[s] + 1
+        return counts
+
+    def _nodes_up(self, counts):
+        up = counts["booting"] + counts["unpaired"] + counts["idle"] + counts["busy"]
+        return up
+
+    def _total_price(self):
+        cost = 0
+        cost += sum(self.sizes_booting[c].price
+                    for c in self.booting.iterkeys())
+        cost += sum(c.cloud_node.size.price
+                    for c in self.cloud_nodes.nodes.itervalues())
+        return cost
+
+    def _size_wishlist(self, size):
+        return sum(1 for c in self.last_wishlist if c.id == size.id)
+
+    def _nodes_wanted(self, size):
+        total_node_count = self._nodes_booting(None) + len(self.cloud_nodes)
+        under_min = self.min_nodes - total_node_count
+        over_max = total_node_count - self.node_quota
+        total_price = self._total_price()
+
+        counts = self._state_counts(size)
+
+        up_count = self._nodes_up(counts)
+        busy_count = counts["busy"]
+        wishlist_count = self._size_wishlist(size)
+
+        self._logger.info("%s: wishlist %i, up %i (booting %i, unpaired %i, idle %i, busy %i), down %i, shutdown %i", size.id,
+                          wishlist_count,
+                          up_count,
+                          counts["booting"],
+                          counts["unpaired"],
+                          counts["idle"],
+                          busy_count,
+                          counts["down"]+counts["fail"],
+                          counts["shutdown"])
+
+        if over_max >= 0:
+            return -over_max
+        elif under_min > 0 and size.id == self.min_cloud_size.id:
+            return under_min
+
+        wanted = wishlist_count - (up_count - busy_count)
+        if wanted > 0 and self.max_total_price and ((total_price + (size.price*wanted)) > self.max_total_price):
+            can_boot = int((self.max_total_price - total_price) / size.price)
+            if can_boot == 0:
+                self._logger.info("Not booting %s (price %s) because with it would exceed max_total_price of %s (current total_price is %s)",
+                                  size.id, size.price, self.max_total_price, total_price)
+            return can_boot
+        else:
+            return wanted
+
+    def _nodes_excess(self, size):
+        counts = self._state_counts(size)
+        up_count = self._nodes_up(counts)
+        if size.id == self.min_cloud_size.id:
+            up_count -= self.min_nodes
+        return up_count - (counts["busy"] + self._size_wishlist(size))
+
+    def update_server_wishlist(self, wishlist):
+        self._update_poll_time('server_wishlist')
+        requestable_nodes = self.node_quota - (self._nodes_booting(None) + len(self.cloud_nodes))
+        self.last_wishlist = wishlist[:requestable_nodes]
+        for size in reversed(self.server_calculator.cloud_sizes):
+            try:
+                nodes_wanted = self._nodes_wanted(size)
+                if nodes_wanted > 0:
+                    self._later.start_node(size)
+                elif (nodes_wanted < 0) and self.booting:
+                    self._later.stop_booting_node(size)
+            except Exception:
+                self._logger.exception("while calculating nodes wanted for size %s", getattr(size, "id", "(id not available)"))
+        try:
+            self._update_tracker()
+        except:
+            self._logger.exception("while updating tracker")
+
+    def _check_poll_freshness(orig_func):
+        """Decorator to inhibit a method when poll information is stale.
+
+        This decorator checks the timestamps of all the poll information the
+        daemon has received.  The decorated method is only called if none
+        of the timestamps are considered stale.
+        """
+        @functools.wraps(orig_func)
+        def wrapper(self, *args, **kwargs):
+            now = time.time()
+            if all(now - t < self.poll_stale_after
+                   for t in self.last_polls.itervalues()):
+                return orig_func(self, *args, **kwargs)
+            else:
+                return None
+        return wrapper
+
+    @_check_poll_freshness
+    def start_node(self, cloud_size):
+        nodes_wanted = self._nodes_wanted(cloud_size)
+        if nodes_wanted < 1:
+            return None
+
+        if not self.cancel_node_shutdown(cloud_size):
+            arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
+            self._logger.info("Want %i more %s nodes.  Booting a node.",
+                              nodes_wanted, cloud_size.id)
+            new_setup = self._node_setup.start(
+                timer_actor=self._timer,
+                arvados_client=self._new_arvados(),
+                arvados_node=arvados_node,
+                cloud_client=self._new_cloud(),
+                cloud_size=self.server_calculator.find_size(cloud_size.id))
+            self.booting[new_setup.actor_urn] = new_setup.proxy()
+            self.sizes_booting[new_setup.actor_urn] = cloud_size
+
+            if arvados_node is not None:
+                self.arvados_nodes[arvados_node['uuid']].assignment_time = (
+                    time.time())
+            new_setup.tell_proxy().subscribe(self._later.node_setup_finished)
+
+        if nodes_wanted > 1:
+            self._later.start_node(cloud_size)
+
+    def _get_actor_attrs(self, actor, *attr_names):
+        return pykka.get_all([getattr(actor, name) for name in attr_names])
+
+    def node_setup_finished(self, setup_proxy):
+        # Called when a SetupActor has completed.
+        cloud_node, arvados_node, error = self._get_actor_attrs(
+            setup_proxy, 'cloud_node', 'arvados_node', 'error')
+        setup_proxy.stop()
+
+        if cloud_node is None:
+            # If cloud_node is None then the node create wasn't successful.
+            if error == dispatch.QuotaExceeded:
+                # We've hit a quota limit, so adjust node_quota to stop trying to
+                # boot new nodes until the node count goes down.
+                self.node_quota = len(self.cloud_nodes)
+                self._logger.warning("After quota exceeded error setting node quota to %s", self.node_quota)
+        else:
+            # Node creation succeeded.  Update cloud node list.
+            cloud_node._nodemanager_recently_booted = True
+            self._register_cloud_node(cloud_node)
+
+            # Different quota policies may in force depending on the cloud
+            # provider, account limits, and the specific mix of nodes sizes
+            # that are already created.  If we are right at the quota limit,
+            # we want to probe to see if the last quota still applies or if we
+            # are allowed to create more nodes.
+            #
+            # For example, if the quota is actually based on core count, the
+            # quota might be 20 single-core machines or 10 dual-core machines.
+            # If we previously set node_quota to 10 dual core machines, but are
+            # now booting single core machines (actual quota 20), we want to
+            # allow the quota to expand so we don't get stuck at 10 machines
+            # forever.
+            if len(self.cloud_nodes) >= self.node_quota:
+                self.node_quota = len(self.cloud_nodes)+1
+                self._logger.warning("After successful boot setting node quota to %s", self.node_quota)
+
+        self.node_quota = min(self.node_quota, self.max_nodes)
+        del self.booting[setup_proxy.actor_ref.actor_urn]
+        del self.sizes_booting[setup_proxy.actor_ref.actor_urn]
+
+    @_check_poll_freshness
+    def stop_booting_node(self, size):
+        nodes_excess = self._nodes_excess(size)
+        if (nodes_excess < 1) or not self.booting:
+            return None
+        for key, node in self.booting.iteritems():
+            try:
+                if node and node.cloud_size.get().id == size.id and node.stop_if_no_cloud_node().get(2):
+                    del self.booting[key]
+                    del self.sizes_booting[key]
+                    if nodes_excess > 1:
+                        self._later.stop_booting_node(size)
+                    return
+            except pykka.Timeout:
+                pass
+
+    @_check_poll_freshness
+    def cancel_node_shutdown(self, size):
+        # Go through shutdown actors and see if there are any of the appropriate size that can be cancelled
+        for record in self.cloud_nodes.nodes.itervalues():
+            try:
+                if (record.shutdown_actor is not None and
+                    record.cloud_node.size.id == size.id and
+                    record.shutdown_actor.cancel_shutdown("Node size is in wishlist").get(2)):
+                        return True
+            except (pykka.ActorDeadError, pykka.Timeout) as e:
+                pass
+        return False
+
+    def _begin_node_shutdown(self, node_actor, cancellable):
+        cloud_node_obj = node_actor.cloud_node.get()
+        cloud_node_id = cloud_node_obj.id
+        record = self.cloud_nodes[cloud_node_id]
+        if record.shutdown_actor is not None:
+            return None
+        shutdown = self._node_shutdown.start(
+            timer_actor=self._timer, cloud_client=self._new_cloud(),
+            arvados_client=self._new_arvados(),
+            node_monitor=node_actor.actor_ref, cancellable=cancellable)
+        record.shutdown_actor = shutdown.proxy()
+        shutdown.tell_proxy().subscribe(self._later.node_finished_shutdown)
+
+    @_check_poll_freshness
+    def node_can_shutdown(self, node_actor):
+        try:
+            if self._nodes_excess(node_actor.cloud_node.get().size) > 0:
+                self._begin_node_shutdown(node_actor, cancellable=True)
+            elif self.cloud_nodes.nodes.get(node_actor.cloud_node.get().id).arvados_node is None:
+                # Node is unpaired, which means it probably exceeded its booting
+                # grace period without a ping, so shut it down so we can boot a new
+                # node in its place.
+                self._begin_node_shutdown(node_actor, cancellable=False)
+            elif node_actor.in_state('down', 'fail').get():
+                # Node is down and unlikely to come back.
+                self._begin_node_shutdown(node_actor, cancellable=False)
+        except pykka.ActorDeadError as e:
+            # The monitor actor sends shutdown suggestions every time the
+            # node's state is updated, and these go into the daemon actor's
+            # message queue.  It's possible that the node has already been shut
+            # down (which shuts down the node monitor actor).  In that case,
+            # this message is stale and we'll get ActorDeadError when we try to
+            # access node_actor.  Log the error.
+            self._logger.debug("ActorDeadError in node_can_shutdown: %s", e)
+
+    def node_finished_shutdown(self, shutdown_actor):
+        try:
+            cloud_node, success = self._get_actor_attrs(
+                shutdown_actor, 'cloud_node', 'success')
+        except pykka.ActorDeadError:
+            return
+        cloud_node_id = cloud_node.id
+
+        try:
+            shutdown_actor.stop()
+        except pykka.ActorDeadError:
+            pass
+
+        try:
+            record = self.cloud_nodes[cloud_node_id]
+        except KeyError:
+            # Cloud node was already removed from the cloud node list
+            # supposedly while the destroy_node call was finishing its
+            # job.
+            return
+        record.shutdown_actor = None
+
+        if not success:
+            return
+
+        # Shutdown was successful, so stop the monitor actor, otherwise it
+        # will keep offering the node as a candidate for shutdown.
+        record.actor.stop()
+        record.actor = None
+
+        # If the node went from being booted to being shut down without ever
+        # appearing in the cloud node list, it will have the
+        # _nodemanager_recently_booted tag, so get rid of it so that the node
+        # can be forgotten completely.
+        if hasattr(record.cloud_node, "_nodemanager_recently_booted"):
+            del record.cloud_node._nodemanager_recently_booted
+
+    def shutdown(self):
+        self._logger.info("Shutting down after signal.")
+        self.poll_stale_after = -1  # Inhibit starting/stopping nodes
+
+        # Shut down pollers
+        self._server_wishlist_actor.stop()
+        self._arvados_nodes_actor.stop()
+        self._cloud_nodes_actor.stop()
+
+        # Clear cloud node list
+        self.update_cloud_nodes([])
+
+        # Stop setup actors unless they are in the middle of setup.
+        setup_stops = {key: node.stop_if_no_cloud_node()
+                       for key, node in self.booting.iteritems()}
+        self.booting = {key: self.booting[key]
+                        for key in setup_stops if not setup_stops[key].get()}
+        self._later.await_shutdown()
+
+    def await_shutdown(self):
+        if self.booting:
+            self._timer.schedule(time.time() + 1, self._later.await_shutdown)
+        else:
+            self.stop()
diff --git a/services/nodemanager/arvnodeman/jobqueue.py b/services/nodemanager/arvnodeman/jobqueue.py
new file mode 100644 (file)
index 0000000..7ca9c95
--- /dev/null
@@ -0,0 +1,255 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import logging
+import re
+import subprocess32 as subprocess
+
+import arvados.util
+
+from . import clientactor
+from .config import ARVADOS_ERRORS
+
+
+class ServerCalculator(object):
+    """Generate cloud server wishlists from an Arvados job queue.
+
+    Instantiate this class with a list of cloud node sizes you're willing to
+    use, plus keyword overrides from the configuration.  Then you can pass
+    job queues to servers_for_queue.  It will return a list of node sizes
+    that would best satisfy the jobs, choosing the cheapest size that
+    satisfies each job, and ignoring jobs that can't be satisfied.
+    """
+    class InvalidCloudSize(object):
+        """
+        Dummy CloudSizeWrapper-like class, to be used when a cloud node doesn't
+        have a recognizable arvados_node_size tag.
+        """
+        def __init__(self):
+            self.id = 'invalid'
+            self.name = 'invalid'
+            self.ram = 0
+            self.disk = 0
+            self.scratch = 0
+            self.cores = 0
+            self.bandwidth = 0
+            # price is multiplied by 1000 to get the node weight
+            # the maximum node weight is                  4294967280
+            # so use invalid node weight 4294967 * 1000 = 4294967000
+            self.price = 4294967
+            self.preemptible = False
+            self.extra = {}
+
+        def meets_constraints(self, **kwargs):
+            return False
+
+
+    class CloudSizeWrapper(object):
+        def __init__(self, real_size, node_mem_scaling, **kwargs):
+            self.real = real_size
+            for name in ['id', 'name', 'ram', 'disk', 'bandwidth', 'price',
+                         'extra']:
+                setattr(self, name, getattr(self.real, name))
+            self.cores = kwargs.pop('cores')
+            # libcloud disk sizes are in GB, Arvados/SLURM are in MB
+            # multiply by 1000 instead of 1024 to err on low side
+            if self.disk is None:
+                self.disk = 0
+            self.scratch = self.disk * 1000
+            self.ram = int(self.ram * node_mem_scaling)
+            self.preemptible = False
+            for name, override in kwargs.iteritems():
+                if name == 'instance_type': continue
+                if not hasattr(self, name):
+                    raise ValueError("unrecognized size field '%s'" % (name,))
+                setattr(self, name, override)
+
+            if self.price is None:
+                raise ValueError("Required field 'price' is None")
+
+        def meets_constraints(self, **kwargs):
+            for name, want_value in kwargs.iteritems():
+                have_value = getattr(self, name)
+                if (have_value != 0) and (have_value < want_value):
+                    return False
+            return True
+
+
+    def __init__(self, server_list, max_nodes=None, max_price=None,
+                 node_mem_scaling=0.95):
+        self.cloud_sizes = [self.CloudSizeWrapper(s, node_mem_scaling, **kws)
+                            for s, kws in server_list]
+        self.cloud_sizes.sort(key=lambda s: s.price)
+        self.max_nodes = max_nodes or float('inf')
+        self.max_price = max_price or float('inf')
+        self.logger = logging.getLogger('arvnodeman.jobqueue')
+
+        self.logger.info("Using cloud node sizes:")
+        for s in self.cloud_sizes:
+            self.logger.info(str(s.__dict__))
+
+    @staticmethod
+    def coerce_int(x, fallback):
+        try:
+            return int(x)
+        except (TypeError, ValueError):
+            return fallback
+
+    def cloud_size_for_constraints(self, constraints):
+        specified_size = constraints.get('instance_type')
+        want_value = lambda key: self.coerce_int(constraints.get(key), 0)
+        wants = {'cores': want_value('min_cores_per_node'),
+                 'ram': want_value('min_ram_mb_per_node'),
+                 'scratch': want_value('min_scratch_mb_per_node')}
+        # EC2 node sizes are identified by id. GCE sizes are identified by name.
+        for size in self.cloud_sizes:
+            if (size.meets_constraints(**wants) and
+                (specified_size is None or
+                    size.id == specified_size or size.name == specified_size)):
+                        return size
+        return None
+
+    def servers_for_queue(self, queue):
+        servers = []
+        unsatisfiable_jobs = {}
+        for job in queue:
+            constraints = job['runtime_constraints']
+            want_count = max(1, self.coerce_int(constraints.get('min_nodes'), 1))
+            cloud_size = self.cloud_size_for_constraints(constraints)
+            if cloud_size is None:
+                unsatisfiable_jobs[job['uuid']] = (
+                    "Constraints cannot be satisfied by any node type")
+            elif (want_count > self.max_nodes):
+                unsatisfiable_jobs[job['uuid']] = (
+                    "Job's min_nodes constraint is greater than the configured "
+                    "max_nodes (%d)" % self.max_nodes)
+            elif (want_count*cloud_size.price <= self.max_price):
+                servers.extend([cloud_size] * want_count)
+            else:
+                unsatisfiable_jobs[job['uuid']] = (
+                    "Job's price (%d) is above system's max_price "
+                    "limit (%d)" % (want_count*cloud_size.price, self.max_price))
+        return (servers, unsatisfiable_jobs)
+
+    def cheapest_size(self):
+        return self.cloud_sizes[0]
+
+    def find_size(self, sizeid):
+        for s in self.cloud_sizes:
+            if s.id == sizeid:
+                return s
+        return self.InvalidCloudSize()
+
+
+class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
+    """Actor to generate server wishlists from the job queue.
+
+    This actor regularly polls Arvados' job queue, and uses the provided
+    ServerCalculator to turn that into a list of requested node sizes.  That
+    list is sent to subscribers on every poll.
+    """
+
+    CLIENT_ERRORS = ARVADOS_ERRORS
+
+    def __init__(self, client, timer_actor, server_calc,
+                 jobs_queue, slurm_queue, *args, **kwargs):
+        super(JobQueueMonitorActor, self).__init__(
+            client, timer_actor, *args, **kwargs)
+        self.jobs_queue = jobs_queue
+        self.slurm_queue = slurm_queue
+        self._calculator = server_calc
+
+    @staticmethod
+    def coerce_to_mb(x):
+        v, u = x[:-1], x[-1]
+        if u in ("M", "m"):
+            return int(v)
+        elif u in ("G", "g"):
+            return float(v) * 2**10
+        elif u in ("T", "t"):
+            return float(v) * 2**20
+        elif u in ("P", "p"):
+            return float(v) * 2**30
+        else:
+            return int(x)
+
+    def _send_request(self):
+        queuelist = []
+        if self.slurm_queue:
+            # cpus, memory, tempory disk space, reason, job name, feature constraints, priority
+            squeue_out = subprocess.check_output(["squeue", "--state=PENDING", "--noheader", "--format=%c|%m|%d|%r|%j|%f|%Q"])
+            for out in squeue_out.splitlines():
+                try:
+                    cpu, ram, disk, reason, jobname, features, priority = out.split("|", 6)
+                except ValueError:
+                    self._logger.warning("ignored malformed line in squeue output: %r", out)
+                    continue
+                if '-dz642-' not in jobname:
+                    continue
+                if not re.search(r'BadConstraints|ReqNodeNotAvail|Resources|Priority', reason):
+                    continue
+
+                for feature in features.split(','):
+                    m = re.match(r'instancetype=(.*)', feature)
+                    if not m:
+                        continue
+                    instance_type = m.group(1)
+                    # Ignore cpu/ram/scratch requirements, bring up
+                    # the requested node type.
+                    queuelist.append({
+                        "uuid": jobname,
+                        "runtime_constraints": {
+                            "instance_type": instance_type,
+                        },
+                        "priority": int(priority)
+                    })
+                    break
+                else:
+                    # No instance type specified. Choose a node type
+                    # to suit cpu/ram/scratch requirements.
+                    queuelist.append({
+                        "uuid": jobname,
+                        "runtime_constraints": {
+                            "min_cores_per_node": cpu,
+                            "min_ram_mb_per_node": self.coerce_to_mb(ram),
+                            "min_scratch_mb_per_node": self.coerce_to_mb(disk)
+                        },
+                        "priority": int(priority)
+                    })
+            queuelist.sort(key=lambda x: x.get('priority', 1), reverse=True)
+
+        if self.jobs_queue:
+            queuelist.extend(self._client.jobs().queue().execute()['items'])
+
+        return queuelist
+
+    def _got_response(self, queue):
+        server_list, unsatisfiable_jobs = self._calculator.servers_for_queue(queue)
+        # Cancel any job/container with unsatisfiable requirements, emitting
+        # a log explaining why.
+        for job_uuid, reason in unsatisfiable_jobs.iteritems():
+            try:
+                self._client.logs().create(body={
+                    'object_uuid': job_uuid,
+                    'event_type': 'stderr',
+                    'properties': {'text': reason},
+                }).execute()
+                # Cancel the job depending on its type
+                if arvados.util.container_uuid_pattern.match(job_uuid):
+                    subprocess.check_call(['scancel', '--name='+job_uuid])
+                elif arvados.util.job_uuid_pattern.match(job_uuid):
+                    self._client.jobs().cancel(uuid=job_uuid).execute()
+                else:
+                    raise Exception('Unknown job type')
+                self._logger.debug("Cancelled unsatisfiable job '%s'", job_uuid)
+            except Exception as error:
+                self._logger.error("Trying to cancel job '%s': %s",
+                                   job_uuid,
+                                   error)
+        self._logger.debug("Calculated wishlist: %s",
+                           ', '.join(s.id for s in server_list) or "(empty)")
+        return super(JobQueueMonitorActor, self)._got_response(server_list)
diff --git a/services/nodemanager/arvnodeman/launcher.py b/services/nodemanager/arvnodeman/launcher.py
new file mode 100644 (file)
index 0000000..34ea9ad
--- /dev/null
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import argparse
+import logging
+import signal
+import sys
+import time
+
+import daemon
+import pykka
+import libcloud
+
+from . import config as nmconfig
+from . import status
+from .baseactor import WatchdogActor
+from .daemon import NodeManagerDaemonActor
+from .jobqueue import JobQueueMonitorActor, ServerCalculator
+from .nodelist import ArvadosNodeListMonitorActor, CloudNodeListMonitorActor
+from .timedcallback import TimedCallBackActor
+from ._version import __version__
+
+node_daemon = None
+watchdog = None
+
+def abort(msg, code=1):
+    print("arvados-node-manager: " + msg)
+    sys.exit(code)
+
+def parse_cli(args):
+    parser = argparse.ArgumentParser(
+        prog='arvados-node-manager',
+        description="Dynamically allocate Arvados cloud compute nodes")
+    parser.add_argument(
+        '--version', action='version',
+        version="%s %s" % (sys.argv[0], __version__),
+        help='Print version and exit.')
+    parser.add_argument(
+        '--foreground', action='store_true', default=False,
+        help="Run in the foreground.  Don't daemonize.")
+    parser.add_argument(
+        '--config', help="Path to configuration file")
+    return parser.parse_args(args)
+
+def load_config(path):
+    if not path:
+        abort("No --config file specified", 2)
+    config = nmconfig.NodeManagerConfig()
+    try:
+        with open(path) as config_file:
+            config.readfp(config_file)
+    except (IOError, OSError) as error:
+        abort("Error reading configuration file {}: {}".format(path, error))
+    return config
+
+def setup_logging(path, level, **sublevels):
+    handler = logging.FileHandler(path)
+    handler.setFormatter(logging.Formatter(
+            '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
+            '%Y-%m-%d %H:%M:%S'))
+    root_logger = logging.getLogger()
+    root_logger.addHandler(handler)
+    root_logger.setLevel(level)
+    for logger_name, sublevel in sublevels.iteritems():
+        sublogger = logging.getLogger(logger_name)
+        sublogger.setLevel(sublevel)
+    return root_logger
+
+def build_server_calculator(config):
+    cloud_size_list = config.node_sizes()
+    if not cloud_size_list:
+        abort("No valid node sizes configured")
+    return ServerCalculator(cloud_size_list,
+                            config.getint('Daemon', 'max_nodes'),
+                            config.getfloat('Daemon', 'max_total_price'),
+                            config.getfloat('Daemon', 'node_mem_scaling'))
+
+def launch_pollers(config, server_calculator):
+    poll_time = config.getfloat('Daemon', 'poll_time')
+    max_poll_time = config.getint('Daemon', 'max_poll_time')
+
+    cloudlist_poll_time = config.getfloat('Daemon', 'cloudlist_poll_time') or poll_time
+    nodelist_poll_time = config.getfloat('Daemon', 'nodelist_poll_time') or poll_time
+    wishlist_poll_time = config.getfloat('Daemon', 'wishlist_poll_time') or poll_time
+
+    timer = TimedCallBackActor.start(poll_time / 10.0).tell_proxy()
+    cloud_node_poller = CloudNodeListMonitorActor.start(
+        config.new_cloud_client(), timer, server_calculator, cloudlist_poll_time, max_poll_time).tell_proxy()
+    arvados_node_poller = ArvadosNodeListMonitorActor.start(
+        config.new_arvados_client(), timer, nodelist_poll_time, max_poll_time).tell_proxy()
+    job_queue_poller = JobQueueMonitorActor.start(
+        config.new_arvados_client(), timer, server_calculator,
+        config.getboolean('Arvados', 'jobs_queue'),
+        config.getboolean('Arvados', 'slurm_queue'),
+        wishlist_poll_time, max_poll_time
+    ).tell_proxy()
+    return timer, cloud_node_poller, arvados_node_poller, job_queue_poller
+
+_caught_signals = {}
+def shutdown_signal(signal_code, frame):
+    current_count = _caught_signals.get(signal_code, 0)
+    _caught_signals[signal_code] = current_count + 1
+    if node_daemon is None:
+        pykka.ActorRegistry.stop_all()
+        sys.exit(-signal_code)
+    elif current_count == 0:
+        watchdog.stop()
+        node_daemon.shutdown()
+    elif current_count == 1:
+        pykka.ActorRegistry.stop_all()
+    else:
+        sys.exit(-signal_code)
+
+def main(args=None):
+    global node_daemon, watchdog
+    args = parse_cli(args)
+    config = load_config(args.config)
+
+    if not args.foreground:
+        daemon.DaemonContext().open()
+    for sigcode in [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]:
+        signal.signal(sigcode, shutdown_signal)
+
+    status.Server(config).start()
+
+    try:
+        root_logger = setup_logging(config.get('Logging', 'file'), **config.log_levels())
+        root_logger.info("%s %s started, libcloud %s", sys.argv[0], __version__, libcloud.__version__)
+        node_setup, node_shutdown, node_update, node_monitor = \
+            config.dispatch_classes()
+        server_calculator = build_server_calculator(config)
+        timer, cloud_node_poller, arvados_node_poller, job_queue_poller = \
+            launch_pollers(config, server_calculator)
+        cloud_node_updater = node_update.start(config.new_cloud_client, timer).tell_proxy()
+        node_daemon = NodeManagerDaemonActor.start(
+            job_queue_poller, arvados_node_poller, cloud_node_poller,
+            cloud_node_updater, timer,
+            config.new_arvados_client, config.new_cloud_client,
+            config.shutdown_windows(),
+            server_calculator,
+            config.getint('Daemon', 'min_nodes'),
+            config.getint('Daemon', 'max_nodes'),
+            config.getint('Daemon', 'poll_stale_after'),
+            config.getint('Daemon', 'boot_fail_after'),
+            config.getint('Daemon', 'node_stale_after'),
+            node_setup, node_shutdown, node_monitor,
+            max_total_price=config.getfloat('Daemon', 'max_total_price'),
+            consecutive_idle_count=config.getint('Daemon', 'consecutive_idle_count'),).tell_proxy()
+
+        watchdog = WatchdogActor.start(config.getint('Daemon', 'watchdog'),
+                            cloud_node_poller.actor_ref,
+                            arvados_node_poller.actor_ref,
+                            job_queue_poller.actor_ref,
+                            node_daemon.actor_ref)
+
+        signal.pause()
+        daemon_stopped = node_daemon.actor_ref.actor_stopped.is_set
+        while not daemon_stopped():
+            time.sleep(1)
+    except Exception:
+        logging.exception("Uncaught exception during setup")
+    finally:
+        pykka.ActorRegistry.stop_all()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/services/nodemanager/arvnodeman/nodelist.py b/services/nodemanager/arvnodeman/nodelist.py
new file mode 100644 (file)
index 0000000..0abb3b3
--- /dev/null
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import subprocess32 as subprocess
+
+from . import clientactor
+from . import config
+
+import arvados.util
+
+class ArvadosNodeListMonitorActor(clientactor.RemotePollLoopActor):
+    """Actor to poll the Arvados node list.
+
+    This actor regularly polls the list of Arvados node records,
+    augments it with the latest SLURM node info (`sinfo`), and sends
+    it to subscribers.
+    """
+
+    def is_common_error(self, exception):
+        return isinstance(exception, config.ARVADOS_ERRORS)
+
+    def _item_key(self, node):
+        return node['uuid']
+
+    def _send_request(self):
+        nodelist = arvados.util.list_all(self._client.nodes().list)
+
+        # node hostname, state
+        sinfo_out = subprocess.check_output(["sinfo", "--noheader", "--format=%n|%t|%f"])
+        nodestates = {}
+        nodefeatures = {}
+        for out in sinfo_out.splitlines():
+            try:
+                nodename, state, features = out.split("|", 3)
+            except ValueError:
+                continue
+            if state in ('alloc', 'alloc*',
+                         'comp',  'comp*',
+                         'mix',   'mix*',
+                         'drng',  'drng*'):
+                nodestates[nodename] = 'busy'
+            elif state in ('idle', 'fail'):
+                nodestates[nodename] = state
+            else:
+                nodestates[nodename] = 'down'
+            if features != "(null)":
+                nodefeatures[nodename] = features
+
+        for n in nodelist:
+            if n["slot_number"] and n["hostname"] and n["hostname"] in nodestates:
+                n["crunch_worker_state"] = nodestates[n["hostname"]]
+            else:
+                n["crunch_worker_state"] = 'down'
+            n["slurm_node_features"] = nodefeatures.get(n["hostname"], "")
+
+        return nodelist
+
+class CloudNodeListMonitorActor(clientactor.RemotePollLoopActor):
+    """Actor to poll the cloud node list.
+
+    This actor regularly polls the cloud to get a list of running compute
+    nodes, and sends it to subscribers.
+    """
+
+    def __init__(self, client, timer_actor, server_calc, *args, **kwargs):
+        super(CloudNodeListMonitorActor, self).__init__(
+            client, timer_actor, *args, **kwargs)
+        self._calculator = server_calc
+
+    def is_common_error(self, exception):
+        return isinstance(exception, config.CLOUD_ERRORS)
+
+    def _item_key(self, node):
+        return node.id
+
+    def _send_request(self):
+        nodes = self._client.list_nodes()
+        for n in nodes:
+            # Replace the libcloud NodeSize object with compatible
+            # CloudSizeWrapper object which merges the size info reported from
+            # the cloud with size information from the configuration file.
+            n.size = self._calculator.find_size(n.extra['arvados_node_size'])
+        return nodes
diff --git a/services/nodemanager/arvnodeman/status.py b/services/nodemanager/arvnodeman/status.py
new file mode 100644 (file)
index 0000000..1e18996
--- /dev/null
@@ -0,0 +1,129 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+from future import standard_library
+
+import http.server
+import time
+import json
+import logging
+import socketserver
+import threading
+
+from ._version import __version__
+
+_logger = logging.getLogger('status.Handler')
+
+
+class Server(socketserver.ThreadingMixIn, http.server.HTTPServer, object):
+    def __init__(self, config):
+        port = config.getint('Manage', 'port')
+        self.enabled = port >= 0
+        if not self.enabled:
+            _logger.warning("Management server disabled. "+
+                            "Use [Manage] config section to enable.")
+            return
+        self._config = config
+        self._tracker = tracker
+        self._tracker.update({'config_max_nodes': config.getint('Daemon', 'max_nodes')})
+        super(Server, self).__init__(
+            (config.get('Manage', 'address'), port), Handler)
+        self._thread = threading.Thread(target=self.serve_forever)
+        self._thread.daemon = True
+
+    def start(self):
+        if self.enabled:
+            self._thread.start()
+
+
+class Handler(http.server.BaseHTTPRequestHandler, object):
+    def do_GET(self):
+        if self.path == '/status.json':
+            self.send_response(200)
+            self.send_header('Content-type', 'application/json')
+            self.end_headers()
+            self.wfile.write(tracker.get_json())
+        elif self.path == '/_health/ping':
+            code, msg = self.check_auth()
+
+            if code != 200:
+              self.send_response(code)
+              self.wfile.write(msg)
+            else:
+              self.send_response(200)
+              self.send_header('Content-type', 'application/json')
+              self.end_headers()
+              self.wfile.write(json.dumps({"health":"OK"}))
+        else:
+            self.send_response(404)
+
+    def log_message(self, fmt, *args, **kwargs):
+        _logger.info(fmt, *args, **kwargs)
+
+    def check_auth(self):
+        mgmt_token = self.server._config.get('Manage', 'ManagementToken')
+        auth_header = self.headers.get('Authorization', None)
+
+        if mgmt_token == '':
+          return 404, "disabled"
+        elif auth_header == None:
+          return 401, "authorization required"
+        elif auth_header != 'Bearer '+mgmt_token:
+          return 403, "authorization error"
+        return 200, ""
+
+class Tracker(object):
+    def __init__(self):
+        self._mtx = threading.Lock()
+        self._latest = {
+            'list_nodes_errors': 0,
+            'create_node_errors': 0,
+            'destroy_node_errors': 0,
+            'boot_failures': 0,
+            'actor_exceptions': 0
+        }
+        self._version = {'Version' : __version__}
+        self._idle_nodes = {}
+
+    def get_json(self):
+        with self._mtx:
+            times = {'idle_times' : {}}
+            now = time.time()
+            for node, ts in self._idle_nodes.items():
+                times['idle_times'][node] = int(now - ts)
+            return json.dumps(
+                dict(dict(self._latest, **self._version), **times))
+
+    def keys(self):
+        with self._mtx:
+            return self._latest.keys()
+
+    def get(self, key):
+        with self._mtx:
+            return self._latest.get(key)
+
+    def update(self, updates):
+        with self._mtx:
+            self._latest.update(updates)
+
+    def counter_add(self, counter, value=1):
+        with self._mtx:
+            self._latest.setdefault(counter, 0)
+            self._latest[counter] += value
+
+    def idle_in(self, nodename):
+        with self._mtx:
+            if self._idle_nodes.get(nodename):
+                return
+            self._idle_nodes[nodename] = time.time()
+
+    def idle_out(self, nodename):
+        with self._mtx:
+            try:
+                del self._idle_nodes[nodename]
+            except KeyError:
+                pass
+
+tracker = Tracker()
diff --git a/services/nodemanager/arvnodeman/test/__init__.py b/services/nodemanager/arvnodeman/test/__init__.py
new file mode 100644 (file)
index 0000000..d3ac1c2
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+
diff --git a/services/nodemanager/arvnodeman/test/fake_driver.py b/services/nodemanager/arvnodeman/test/fake_driver.py
new file mode 100644 (file)
index 0000000..2a592f9
--- /dev/null
@@ -0,0 +1,226 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import re
+import urllib
+import ssl
+import time
+
+from arvnodeman.computenode import ARVADOS_TIMEFMT
+
+from libcloud.compute.base import NodeSize, Node, NodeDriver, NodeState, NodeImage
+from libcloud.compute.drivers.gce import GCEDiskType
+from libcloud.common.exceptions import BaseHTTPError, RateLimitReachedError
+
+all_nodes = []
+create_calls = 0
+quota = 2
+
+class FakeDriver(NodeDriver):
+    def __init__(self, *args, **kwargs):
+        self.name = "FakeDriver"
+
+    def list_sizes(self, **kwargs):
+        return [NodeSize("Standard_D3", "Standard_D3", 3500, 200, 0, 0, self),
+                NodeSize("Standard_D4", "Standard_D4", 7000, 400, 0, 0, self)]
+
+    def list_nodes(self, **kwargs):
+        return all_nodes
+
+    def create_node(self, name=None,
+                    size=None,
+                    image=None,
+                    auth=None,
+                    ex_storage_account=None,
+                    ex_customdata=None,
+                    ex_resource_group=None,
+                    ex_user_name=None,
+                    ex_tags=None,
+                    ex_metadata=None,
+                    ex_network=None,
+                    ex_userdata=None):
+        global all_nodes, create_calls
+        create_calls += 1
+        nodeid = "node%i" % create_calls
+        if ex_tags is None:
+            ex_tags = {}
+        ex_tags.update({'arvados_node_size': size.id})
+        n = Node(nodeid, nodeid, NodeState.RUNNING, [], [], self, size=size, extra={"tags": ex_tags})
+        all_nodes.append(n)
+        if ex_customdata:
+            ping_url = re.search(r"echo '(.*)' > /var/tmp/arv-node-data/arv-ping-url", ex_customdata).groups(1)[0]
+        if ex_userdata:
+            ping_url = ex_userdata
+        elif ex_metadata:
+            ping_url = ex_metadata["arv-ping-url"]
+        ping_url += "&instance_id=" + nodeid
+        ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+        ctx.verify_mode = ssl.CERT_NONE
+        f = urllib.urlopen(ping_url, "", context=ctx)
+        f.close()
+        return n
+
+    def destroy_node(self, cloud_node):
+        global all_nodes
+        all_nodes = [n for n in all_nodes if n.id != cloud_node.id]
+        return True
+
+    def get_image(self, img):
+        pass
+
+    def ex_create_tags(self, cloud_node, tags):
+        pass
+
+class QuotaDriver(FakeDriver):
+    def create_node(self, name=None,
+                    size=None,
+                    image=None,
+                    auth=None,
+                    ex_storage_account=None,
+                    ex_customdata=None,
+                    ex_resource_group=None,
+                    ex_user_name=None,
+                    ex_tags=None,
+                    ex_network=None):
+        global all_nodes, create_calls, quota
+        if len(all_nodes) >= quota:
+            raise BaseHTTPError(503, "Quota exceeded")
+        else:
+            return super(QuotaDriver, self).create_node(name=name,
+                    size=size,
+                    image=image,
+                    auth=auth,
+                    ex_storage_account=ex_storage_account,
+                    ex_customdata=ex_customdata,
+                    ex_resource_group=ex_resource_group,
+                    ex_user_name=ex_user_name,
+                    ex_tags=ex_tags,
+                    ex_network=ex_network)
+
+    def destroy_node(self, cloud_node):
+        global all_nodes, quota
+        all_nodes = [n for n in all_nodes if n.id != cloud_node.id]
+        if len(all_nodes) == 0:
+            quota = 4
+        return True
+
+class FailingDriver(FakeDriver):
+    def create_node(self, name=None,
+                    size=None,
+                    image=None,
+                    auth=None,
+                    ex_storage_account=None,
+                    ex_customdata=None,
+                    ex_resource_group=None,
+                    ex_user_name=None,
+                    ex_tags=None,
+                    ex_network=None):
+        raise Exception("nope")
+
+class RetryDriver(FakeDriver):
+    def create_node(self, name=None,
+                    size=None,
+                    image=None,
+                    auth=None,
+                    ex_storage_account=None,
+                    ex_customdata=None,
+                    ex_resource_group=None,
+                    ex_user_name=None,
+                    ex_tags=None,
+                    ex_network=None):
+        global create_calls
+        create_calls += 1
+        if create_calls < 2:
+            raise RateLimitReachedError(429, "Rate limit exceeded",
+                                        headers={'retry-after': '2'})
+        elif create_calls < 3:
+            raise BaseHTTPError(429, "Rate limit exceeded",
+                                {'retry-after': '1'})
+        else:
+            return super(RetryDriver, self).create_node(name=name,
+                    size=size,
+                    image=image,
+                    auth=auth,
+                    ex_storage_account=ex_storage_account,
+                    ex_customdata=ex_customdata,
+                    ex_resource_group=ex_resource_group,
+                    ex_user_name=ex_user_name,
+                    ex_tags=ex_tags,
+                    ex_network=ex_network)
+
+class FakeAwsDriver(FakeDriver):
+
+    def create_node(self, name=None,
+                    size=None,
+                    image=None,
+                    auth=None,
+                    ex_userdata=None,
+                    ex_metadata=None,
+                    ex_blockdevicemappings=None):
+        n = super(FakeAwsDriver, self).create_node(name=name,
+                                                      size=size,
+                                                      image=image,
+                                                      auth=auth,
+                                                      ex_metadata=ex_metadata,
+                                                      ex_userdata=ex_userdata)
+        n.extra = {
+            "launch_time": time.strftime(ARVADOS_TIMEFMT, time.gmtime())[:-1],
+            "tags" : {
+                "arvados_node_size": size.id
+            }
+        }
+        return n
+
+    def list_sizes(self, **kwargs):
+        return [NodeSize("m3.xlarge", "Extra Large Instance", 3500, 80, 0, 0, self),
+                NodeSize("m4.xlarge", "Extra Large Instance", 3500, 0, 0, 0, self),
+                NodeSize("m4.2xlarge", "Double Extra Large Instance", 7000, 0, 0, 0, self)]
+
+
+class FakeGceDriver(FakeDriver):
+
+    def create_node(self, name=None,
+                    size=None,
+                    image=None,
+                    auth=None,
+                    external_ip=None,
+                    ex_metadata=None,
+                    ex_tags=None,
+                    ex_disks_gce_struct=None):
+        n = super(FakeGceDriver, self).create_node(name=name,
+                                                   size=size,
+                                                   image=image,
+                                                   auth=auth,
+                                                   ex_metadata=ex_metadata)
+        n.extra = {
+            "metadata": {
+                "items": [{"key": k, "value": v} for k,v in ex_metadata.iteritems()],
+                "arvados_node_size": size.id
+            },
+            "zone": "fake"
+        }
+        return n
+
+    def list_images(self, ex_project=None):
+        return [NodeImage("fake_image_id", "fake_image_id", self)]
+
+    def list_sizes(self, **kwargs):
+        return [NodeSize("n1-standard-1", "Standard", 3750, None, 0, 0, self),
+                NodeSize("n1-standard-2", "Double standard", 7500, None, 0, 0, self)]
+
+    def ex_list_disktypes(self, zone=None):
+        return [GCEDiskType("pd-standard", "pd-standard", zone, self,
+                            extra={"selfLink": "pd-standard"}),
+                GCEDiskType("local-ssd", "local-ssd", zone, self,
+                            extra={"selfLink": "local-ssd"})]
+
+    def ex_get_node(self, name, zone=None):
+        global all_nodes
+        for n in all_nodes:
+            if n.id == name:
+                return n
+        return None
+
+    def ex_set_node_metadata(self, n, items):
+        n.extra["metadata"]["items"] = items
diff --git a/services/nodemanager/arvnodeman/timedcallback.py b/services/nodemanager/arvnodeman/timedcallback.py
new file mode 100644 (file)
index 0000000..e7e3f25
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import heapq
+import time
+
+import pykka
+
+from .config import actor_class
+
+class TimedCallBackActor(actor_class):
+    """Send messages to other actors on a schedule.
+
+    Other actors can call the schedule() method to schedule delivery of a
+    message at a later time.  This actor runs the necessary event loop for
+    delivery.
+    """
+    def __init__(self, max_sleep=1, timefunc=None):
+        super(TimedCallBackActor, self).__init__()
+        self._proxy = self.actor_ref.tell_proxy()
+        self.messages = []
+        self.max_sleep = max_sleep
+        if timefunc is None:
+            self._timefunc = time.time
+        else:
+            self._timefunc = timefunc
+
+    def schedule(self, delivery_time, receiver, *args, **kwargs):
+        if not self.messages:
+            self._proxy.deliver()
+        heapq.heappush(self.messages, (delivery_time, receiver, args, kwargs))
+
+    def deliver(self):
+        if not self.messages:
+            return
+        til_next = self.messages[0][0] - self._timefunc()
+        if til_next <= 0:
+            t, receiver, args, kwargs = heapq.heappop(self.messages)
+            try:
+                receiver(*args, **kwargs)
+            except pykka.ActorDeadError:
+                pass
+        else:
+            time.sleep(min(til_next, self.max_sleep))
+        self._proxy.deliver()
diff --git a/services/nodemanager/bin/arvados-node-manager b/services/nodemanager/bin/arvados-node-manager
new file mode 100755 (executable)
index 0000000..72e0831
--- /dev/null
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+from arvnodeman.launcher import main
+main()
diff --git a/services/nodemanager/doc/azure.example.cfg b/services/nodemanager/doc/azure.example.cfg
new file mode 100644 (file)
index 0000000..8ba6801
--- /dev/null
@@ -0,0 +1,202 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Azure configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+#dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll Azure nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+jobs_queue = yes   # Get work request from Arvados jobs queue (jobs API)
+slurm_queue = yes  # Get work request from squeue (containers API)
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = azure
+
+# Shutdown windows define periods of time when a node may and may not be shut
+# down.  These are windows in full minutes, separated by commas.  Counting from
+# the time the node is booted, the node WILL NOT shut down for N1 minutes; then
+# it MAY shut down for N2 minutes; then it WILL NOT shut down for N3 minutes;
+# and so on.  For example, "20, 999999" means the node may shut down between
+# the 20th and 999999th minutes of uptime.
+# Azure bills by the minute, so it makes sense to agressively shut down idle
+# nodes.  Specify at least two windows.  You can add as many as you need beyond
+# that.
+shutdown_windows = 20, 999999
+
+[Cloud Credentials]
+# Use "azure account list" with the azure CLI to get these values.
+tenant_id = 00000000-0000-0000-0000-000000000000
+subscription_id = 00000000-0000-0000-0000-000000000000
+
+# The following directions are based on
+# https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/
+#
+# azure config mode arm
+# azure ad app create --name "<Your Application Display Name>" --home-page "<https://YourApplicationHomePage>" --identifier-uris "<https://YouApplicationUri>" --password <Your_Password>
+# azure ad sp create "<Application_Id>"
+# azure role assignment create --objectId "<Object_Id>" -o Owner -c /subscriptions/{subscriptionId}/
+#
+# Use <Application_Id> for "key" and the <Your_Password> for "secret"
+#
+key = 00000000-0000-0000-0000-000000000000
+secret = PASSWORD
+timeout = 60
+region = East US
+
+[Cloud List]
+# The resource group in which the compute node virtual machines will be created
+# and listed.
+ex_resource_group = ArvadosResourceGroup
+
+[Cloud Create]
+# The image id, in the form "Publisher:Offer:SKU:Version"
+image = Canonical:UbuntuServer:14.04.3-LTS:14.04.201508050
+
+# Path to a local ssh key file that will be used to provision new nodes.
+ssh_key = /home/arvadosuser/.ssh/id_rsa.pub
+
+# The account name for the admin user that will be provisioned on new nodes.
+ex_user_name = arvadosuser
+
+# The Azure storage account that will be used to store the node OS disk images.
+ex_storage_account = arvadosstorage
+
+# The virtual network the VMs will be associated with.
+ex_network = ArvadosNetwork
+
+# Optional subnet of the virtual network.
+#ex_subnet = default
+
+# Node tags
+tag_arvados-class = dynamic-compute
+tag_cluster = zyxwv
+
+# the API server to ping
+ping_host = hostname:port
+
+# You can define any number of Size sections to list Azure sizes you're willing
+# to use.  The Node Manager should boot the cheapest size(s) that can run jobs
+# in the queue.  You must also provide price per hour as the Azure driver
+# compute currently does not report prices.
+#
+# See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/
+# for a list of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Microsoft's provided
+# data fields by setting them here.
+
+[Size Standard_D3]
+cores = 4
+price = 0.56
+
+[Size Standard_D4]
+cores = 8
+price = 1.12
diff --git a/services/nodemanager/doc/ec2.example.cfg b/services/nodemanager/doc/ec2.example.cfg
new file mode 100644 (file)
index 0000000..f5329eb
--- /dev/null
@@ -0,0 +1,202 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# EC2 configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+#dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll EC2 nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+jobs_queue = yes   # Get work request from Arvados jobs queue (jobs API)
+slurm_queue = yes  # Get work request from squeue (containers API)
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = ec2
+
+# It's usually most cost-effective to shut down compute nodes during narrow
+# windows of time.  For example, EC2 bills each node by the hour, so the best
+# time to shut down a node is right before a new hour of uptime starts.
+# Shutdown windows define these periods of time.  These are windows in
+# full minutes, separated by commas.  Counting from the time the node is
+# booted, the node WILL NOT shut down for N1 minutes; then it MAY shut down
+# for N2 minutes; then it WILL NOT shut down for N3 minutes; and so on.
+# For example, "54, 5, 1" means the node may shut down from the 54th to the
+# 59th minute of each hour of uptime.
+# Specify at least two windows.  You can add as many as you need beyond that.
+shutdown_windows = 54, 5, 1
+
+[Cloud Credentials]
+key = KEY
+secret = SECRET_KEY
+region = us-east-1
+timeout = 60
+
+[Cloud List]
+# This section defines filters that find compute nodes.
+# Tags that you specify here will automatically be added to nodes you create.
+# Replace colons in Amazon filters with underscores
+# (e.g., write "tag:mytag" as "tag_mytag").
+instance-state-name = running
+tag_arvados-class = dynamic-compute
+tag_cluster = zyxwv
+
+[Cloud Create]
+# New compute nodes will send pings to Arvados at this host.
+# You may specify a port, and use brackets to disambiguate IPv6 addresses.
+ping_host = hostname:port
+
+# Give the name of an SSH key on AWS...
+ex_keyname = string
+
+# ... or a file path for an SSH key that can log in to the compute node.
+# (One or the other, not both.)
+# ssh_key = path
+
+# The EC2 IDs of the image and subnet compute nodes should use.
+image_id = idstring
+subnet_id = idstring
+
+# Comma-separated EC2 IDs for the security group(s) assigned to each
+# compute node.
+security_groups = idstring1, idstring2
+
+# Apply an Instance Profile ARN to the newly created compute nodes
+# For more info, see:
+# https://aws.amazon.com/premiumsupport/knowledge-center/iam-policy-restrict-vpc/
+# ex_iamprofile = arn:aws:iam::ACCOUNTNUMBER:instance-profile/ROLENAME
+
+
+# You can define any number of Size sections to list EC2 sizes you're
+# willing to use.  The Node Manager should boot the cheapest size(s) that
+# can run jobs in the queue.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Amazon's provided
+# data fields (such as price per hour) by setting them here.
+#
+# Additionally, you can ask for a preemptible instance (AWS's spot instance)
+# by adding the appropriate boolean configuration flag. If you want to have
+# both spot & reserved versions of the same size, you can do so by renaming
+# the Size section and specifying the instance type inside it.
+
+[Size m4.large]
+cores = 2
+price = 0.126
+scratch = 100
+
+[Size m4.large.spot]
+instance_type = m4.large
+preemptible = true
+cores = 2
+price = 0.126
+scratch = 100
+
+[Size m4.xlarge]
+cores = 4
+price = 0.252
+scratch = 100
diff --git a/services/nodemanager/doc/gce.example.cfg b/services/nodemanager/doc/gce.example.cfg
new file mode 100644 (file)
index 0000000..acd3fd1
--- /dev/null
@@ -0,0 +1,187 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Google Compute Engine configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# running at all times.  By default, these will be the cheapest node size.
+max_nodes = 8
+
+# Poll compute nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Number of consecutive times a node must report as "idle" before it
+# will be considered eligible for shutdown.  Node status is checked
+# each poll period, and node can go idle at any point during a poll
+# period (meaning a node could be reported as idle that has only been
+# idle for 1 second).  With a 60 second poll period, three consecutive
+# status updates of "idle" suggests the node has been idle at least
+# 121 seconds.
+consecutive_idle_count = 3
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+jobs_queue = yes   # Get work request from Arvados jobs queue (jobs API)
+slurm_queue = yes  # Get work request from squeue (containers API)
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = gce
+
+# Shutdown windows define periods of time when a node may and may not
+# be shut down.  These are windows in full minutes, separated by
+# commas.  Counting from the time the node is booted, the node WILL
+# NOT shut down for N1 minutes; then it MAY shut down for N2 minutes;
+# then it WILL NOT shut down for N3 minutes; and so on.  For example,
+# "54, 5, 1" means the node may shut down from the 54th to the 59th
+# minute of each hour of uptime.
+# GCE bills by the minute, and does not provide information about when
+# a node booted.  Node Manager will store this information in metadata
+# when it boots a node; if that information is not available, it will
+# assume the node booted at the epoch.  These shutdown settings are
+# very aggressive.  You may want to adjust this if you want more
+# continuity of service from a single node.
+shutdown_windows = 20, 999999
+
+[Cloud Credentials]
+user_id = client_email_address@developer.gserviceaccount.com
+key = path_to_certificate.pem
+project = project-id-from-google-cloud-dashboard
+timeout = 60
+
+# Valid location (zone) names: https://cloud.google.com/compute/docs/zones
+datacenter = us-central1-a
+
+# Optional settings. For full documentation see
+# http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#libcloud.compute.drivers.gce.GCENodeDriver
+#
+# auth_type = SA               # SA, IA or GCE
+# scopes = https://www.googleapis.com/auth/compute
+# credential_file =
+
+[Cloud List]
+# A comma-separated list of tags that must be applied to a node for it to
+# be considered a compute node.
+# The driver will automatically apply these tags to nodes it creates.
+tags = zyxwv, compute
+
+[Cloud Create]
+# New compute nodes will send pings to Arvados at this host.
+# You may specify a port, and use brackets to disambiguate IPv6 addresses.
+ping_host = hostname:port
+
+# A file path for an SSH key that can log in to the compute node.
+# ssh_key = path
+
+# The GCE image name and network zone name to use when creating new nodes.
+image = debian-7
+# network = your_network_name
+
+# JSON string of service account authorizations for this cluster.
+# See http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#specifying-service-account-scopes
+# service_accounts = [{'email':'account@example.com', 'scopes':['storage-ro']}]
+
+
+# You can define any number of Size sections to list node sizes you're
+# willing to use.  The Node Manager should boot the cheapest size(s) that
+# can run jobs in the queue.
+#
+# The Size fields are interpreted the same way as with a libcloud NodeSize:
+# http://libcloud.readthedocs.org/en/latest/compute/api.html#libcloud.compute.base.NodeSize
+#
+# See https://cloud.google.com/compute/docs/machine-types for a list
+# of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.
+# You can also override Google's provided data fields (such as price per hour)
+# by setting them here.
+
+[Size n1-standard-2]
+cores = 2
+price = 0.076
+scratch = 100
+
+[Size n1-standard-4]
+cores = 4
+price = 0.152
+scratch = 200
\ No newline at end of file
diff --git a/services/nodemanager/doc/local.example.cfg b/services/nodemanager/doc/local.example.cfg
new file mode 100644 (file)
index 0000000..1221775
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# You can use this configuration to run a development Node Manager for
+# testing.  It uses libcloud's dummy driver and your own development API server.
+# When new cloud nodes are created, you'll need to simulate the ping that
+# they send to the Arvados API server.  The easiest way I've found to do that
+# is through the API server Rails console: load the Node object, set its
+# IP address to 10.10.0.N (where N is the cloud node's ID), and save.
+
+[Manage]
+address = 0.0.0.0
+port = 8989
+
+[Daemon]
+min_nodes = 0
+max_nodes = 8
+poll_time = 15
+max_poll_time = 60
+poll_stale_after = 600
+node_stale_after = 300
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+level = DEBUG
+pykka = DEBUG
+apiclient = WARNING
+
+[Arvados]
+host = localhost:3030
+# This is the token for the text fixture's admin user.
+token = 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h
+insecure = yes
+timeout = 15
+
+[Cloud]
+provider = dummy
+shutdown_windows = 1, 1
+timeout = 15
+
+[Cloud Credentials]
+creds = dummycreds
+
+[Cloud List]
+[Cloud Create]
+
+[Size 2]
+cores = 4
+scratch = 1234
diff --git a/services/nodemanager/fpm-info.sh b/services/nodemanager/fpm-info.sh
new file mode 100644 (file)
index 0000000..c4a9dbb
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+case "$TARGET" in
+    debian* | ubuntu*)
+        fpm_depends+=(libcurl3-gnutls libpython2.7)
+        ;;
+esac
diff --git a/services/nodemanager/gittaggers.py b/services/nodemanager/gittaggers.py
new file mode 120000 (symlink)
index 0000000..a9ad861
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/gittaggers.py
\ No newline at end of file
diff --git a/services/nodemanager/setup.py b/services/nodemanager/setup.py
new file mode 100644 (file)
index 0000000..4f00d54
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import
+import os
+import sys
+import re
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "arvnodeman")
+
+short_tests_only = False
+if '--short-tests-only' in sys.argv:
+    short_tests_only = True
+    sys.argv.remove('--short-tests-only')
+
+setup(name='arvados-node-manager',
+      version=version,
+      description='Arvados compute node manager',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      license='GNU Affero General Public License, version 3.0',
+      packages=find_packages(),
+      scripts=['bin/arvados-node-manager'],
+      data_files=[
+          ('share/doc/arvados-node-manager', ['agpl-3.0.txt', 'README.rst', 'arvados-node-manager.service']),
+      ],
+      install_requires=[
+          'apache-libcloud>=2.3.1.dev1',
+          'arvados-python-client>=0.1.20170731145219',
+          'future',
+          'pykka',
+          'python-daemon',
+          'setuptools',
+          'subprocess32>=3.5.1',
+      ],
+      dependency_links=[
+          "https://github.com/curoverse/libcloud/archive/apache-libcloud-2.3.1.dev1.zip"
+      ],
+      test_suite='tests',
+      tests_require=[
+          'requests',
+          'pbr<1.7.0',
+          'mock>=1.0',
+          'apache-libcloud>=2.3.1.dev1',
+          'subprocess32>=3.5.1',
+      ],
+      zip_safe=False
+      )
diff --git a/services/nodemanager/tests/__init__.py b/services/nodemanager/tests/__init__.py
new file mode 100644 (file)
index 0000000..20e02f9
--- /dev/null
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import logging
+import os
+
+# Set the ANMTEST_LOGLEVEL environment variable to enable logging at that level.
+loglevel = os.environ.get('ANMTEST_LOGLEVEL', 'CRITICAL')
+logging.basicConfig(level=getattr(logging, loglevel.upper()))
+
+# Set the ANMTEST_TIMEOUT environment variable to the maximum amount of time to
+# wait for tested actors to respond to important messages.  The default value
+# is very conservative, because a small value may produce false negatives on
+# slower systems.  If you're debugging a known timeout issue, however, you may
+# want to set this lower to speed up tests.
+pykka_timeout = int(os.environ.get('ANMTEST_TIMEOUT', '10'))
diff --git a/services/nodemanager/tests/fake_azure.cfg.template b/services/nodemanager/tests/fake_azure.cfg.template
new file mode 100644 (file)
index 0000000..e5deac8
--- /dev/null
@@ -0,0 +1,194 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Azure configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+port = 8989
+
+MangementToken = xxx
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+#dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll Azure nodes and Arvados for new information every N seconds.
+poll_time = 0.5
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 1
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 1
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 45
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+#file = node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = DEBUG
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = {host}
+token = {token}
+timeout = 15
+jobs_queue = no
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = yes
+
+[Cloud]
+provider = azure
+driver_class = {driver_class}
+
+# Shutdown windows define periods of time when a node may and may not be shut
+# down.  These are windows in full minutes, separated by commas.  Counting from
+# the time the node is booted, the node WILL NOT shut down for N1 minutes; then
+# it MAY shut down for N2 minutes; then it WILL NOT shut down for N3 minutes;
+# and so on.  For example, "20, 999999" means the node may shut down between
+# the 20th and 999999th minutes of uptime.
+# Azure bills by the minute, so it makes sense to agressively shut down idle
+# nodes.  Specify at least two windows.  You can add as many as you need beyond
+# that.
+shutdown_windows = 0.05, 999999
+
+[Cloud Credentials]
+# Use "azure account list" with the azure CLI to get these values.
+tenant_id = 00000000-0000-0000-0000-000000000000
+subscription_id = 00000000-0000-0000-0000-000000000000
+
+# The following directions are based on
+# https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/
+#
+# azure config mode arm
+# azure ad app create --name "<Your Application Display Name>" --home-page "<https://YourApplicationHomePage>" --identifier-uris "<https://YouApplicationUri>" --password <Your_Password>
+# azure ad sp create "<Application_Id>"
+# azure role assignment create --objectId "<Object_Id>" -o Owner -c /subscriptions/<subscriptionId>/
+#
+# Use <Application_Id> for "key" and the <Your_Password> for "secret"
+#
+key = 00000000-0000-0000-0000-000000000000
+secret = PASSWORD
+timeout = 60
+region = East US
+
+[Cloud List]
+# The resource group in which the compute node virtual machines will be created
+# and listed.
+ex_resource_group = ArvadosResourceGroup
+
+[Cloud Create]
+# The image id, in the form "Publisher:Offer:SKU:Version"
+image = Canonical:UbuntuServer:14.04.3-LTS:14.04.201508050
+
+# Path to a local ssh key file that will be used to provision new nodes.
+ssh_key = {ssh_key}
+
+# The account name for the admin user that will be provisioned on new nodes.
+ex_user_name = arvadosuser
+
+# The Azure storage account that will be used to store the node OS disk images.
+ex_storage_account = arvadosstorage
+
+# The virtual network the VMs will be associated with.
+ex_network = ArvadosNetwork
+
+# Optional subnet of the virtual network.
+#ex_subnet = default
+
+# Node tags
+tag_arvados-class = dynamic-compute
+tag_cluster = zyxwv
+
+# the API server to ping
+ping_host = {host}
+
+# You can define any number of Size sections to list Azure sizes you're willing
+# to use.  The Node Manager should boot the cheapest size(s) that can run jobs
+# in the queue.  You must also provide price per hour as the Azure driver
+# compute currently does not report prices.
+#
+# See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/
+# for a list of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Microsoft's provided
+# data fields by setting them here.
+
+[Size Standard_D3]
+cores = 4
+price = 0.56
+
+[Size Standard_D4]
+cores = 8
+price = 1.12
diff --git a/services/nodemanager/tests/fake_ec2.cfg.template b/services/nodemanager/tests/fake_ec2.cfg.template
new file mode 100644 (file)
index 0000000..2bb7d0e
--- /dev/null
@@ -0,0 +1,162 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Azure configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+#dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll Azure nodes and Arvados for new information every N seconds.
+poll_time = 0.5
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 1
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 1
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 45
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+#file = node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = DEBUG
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = {host}
+token = {token}
+timeout = 15
+jobs_queue = no
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = yes
+
+[Cloud]
+provider = ec2
+driver_class = {driver_class}
+
+# Shutdown windows define periods of time when a node may and may not be shut
+# down.  These are windows in full minutes, separated by commas.  Counting from
+# the time the node is booted, the node WILL NOT shut down for N1 minutes; then
+# it MAY shut down for N2 minutes; then it WILL NOT shut down for N3 minutes;
+# and so on.  For example, "20, 999999" means the node may shut down between
+# the 20th and 999999th minutes of uptime.
+# Azure bills by the minute, so it makes sense to agressively shut down idle
+# nodes.  Specify at least two windows.  You can add as many as you need beyond
+# that.
+shutdown_windows = 0.05, 999999
+
+[Cloud Credentials]
+
+key = 00000000-0000-0000-0000-000000000000
+secret = PASSWORD
+timeout = 60
+region = East US
+
+[Cloud List]
+
+[Cloud Create]
+# The image id
+image = fake_image_id
+
+# Path to a local ssh key file that will be used to provision new nodes.
+ssh_key = {ssh_key}
+
+# the API server to ping
+ping_host = {host}
+
+# You can define any number of Size sections to list Azure sizes you're willing
+# to use.  The Node Manager should boot the cheapest size(s) that can run jobs
+# in the queue.  You must also provide price per hour as the Azure driver
+# compute currently does not report prices.
+#
+# See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/
+# for a list of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Microsoft's provided
+# data fields by setting them here.
+
+[Size m4.xlarge]
+cores = 4
+price = 0.56
+scratch = 250
+
+[Size m4.2xlarge]
+cores = 8
+price = 1.12
+scratch = 500
diff --git a/services/nodemanager/tests/fake_gce.cfg.template b/services/nodemanager/tests/fake_gce.cfg.template
new file mode 100644 (file)
index 0000000..11131ef
--- /dev/null
@@ -0,0 +1,159 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Azure configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+#dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes running at
+# all times.  If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type.  However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
+# Poll Azure nodes and Arvados for new information every N seconds.
+poll_time = 0.5
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 1
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 1
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 45
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+#file = node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = DEBUG
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = {host}
+token = {token}
+timeout = 15
+jobs_queue = no
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = yes
+
+[Cloud]
+provider = gce
+driver_class = {driver_class}
+
+# Shutdown windows define periods of time when a node may and may not be shut
+# down.  These are windows in full minutes, separated by commas.  Counting from
+# the time the node is booted, the node WILL NOT shut down for N1 minutes; then
+# it MAY shut down for N2 minutes; then it WILL NOT shut down for N3 minutes;
+# and so on.  For example, "20, 999999" means the node may shut down between
+# the 20th and 999999th minutes of uptime.
+# Azure bills by the minute, so it makes sense to agressively shut down idle
+# nodes.  Specify at least two windows.  You can add as many as you need beyond
+# that.
+shutdown_windows = 0.05, 999999
+
+[Cloud Credentials]
+key = 00000000-0000-0000-0000-000000000000
+secret = PASSWORD
+timeout = 60
+region = East US
+
+[Cloud List]
+
+[Cloud Create]
+# The image id
+image = fake_image_id
+
+# Path to a local ssh key file that will be used to provision new nodes.
+ssh_key = {ssh_key}
+
+# the API server to ping
+ping_host = {host}
+
+# You can define any number of Size sections to list Azure sizes you're willing
+# to use.  The Node Manager should boot the cheapest size(s) that can run jobs
+# in the queue.  You must also provide price per hour as the Azure driver
+# compute currently does not report prices.
+#
+# See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/
+# for a list of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Microsoft's provided
+# data fields by setting them here.
+
+[Size n1-standard-1]
+cores = 1
+price = 0.56
+
+[Size n1-standard-2]
+cores = 2
+price = 1.12
\ No newline at end of file
diff --git a/services/nodemanager/tests/integration_test.py b/services/nodemanager/tests/integration_test.py
new file mode 100755 (executable)
index 0000000..1ba2957
--- /dev/null
@@ -0,0 +1,494 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+"""Integration test framework for node manager.
+
+Runs full node manager with an API server (needs ARVADOS_API_HOST and
+ARVADOS_API_TOKEN).  Stubs out the cloud driver and slurm commands to mock
+specific behaviors.  Monitors the log output to verify an expected sequence of
+events or behaviors for each test.
+
+"""
+
+import subprocess32 as subprocess
+import os
+import sys
+import re
+import time
+import logging
+import stat
+import tempfile
+import shutil
+import errno
+from functools import partial
+import arvados
+import StringIO
+
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+
+handler = logging.StreamHandler(sys.stderr)
+handler.setFormatter(formatter)
+logger = logging.getLogger("logger")
+logger.setLevel(logging.INFO)
+logger.addHandler(handler)
+
+detail = logging.getLogger("detail")
+detail.setLevel(logging.INFO)
+if os.environ.get("ANMTEST_LOGLEVEL"):
+    detail_content = sys.stderr
+else:
+    detail_content = StringIO.StringIO()
+handler = logging.StreamHandler(detail_content)
+handler.setFormatter(formatter)
+detail.addHandler(handler)
+
+fake_slurm = None
+compute_nodes = None
+all_jobs = None
+unsatisfiable_job_scancelled = None
+
+def update_script(path, val):
+    with open(path+"_", "w") as f:
+        f.write(val)
+    os.chmod(path+"_", stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+    os.rename(path+"_", path)
+    detail.info("Update script %s: %s", path, val)
+
+def set_squeue(g):
+    global all_jobs
+    update_script(os.path.join(fake_slurm, "squeue"), "#!/bin/sh\n" +
+                  "\n".join("echo '1|100|100|%s|%s|(null)|1234567890'" % (v, k) for k,v in all_jobs.items()))
+    return 0
+
+def set_queue_unsatisfiable(g):
+    global all_jobs, unsatisfiable_job_scancelled
+    # Simulate a job requesting a 99 core node.
+    update_script(os.path.join(fake_slurm, "squeue"), "#!/bin/sh\n" +
+                  "\n".join("echo '99|100|100|%s|%s|(null)|1234567890'" % (v, k) for k,v in all_jobs.items()))
+    update_script(os.path.join(fake_slurm, "scancel"), "#!/bin/sh\n" +
+                  "\ntouch %s" % unsatisfiable_job_scancelled)
+    return 0
+
+def job_cancelled(g):
+    global unsatisfiable_job_scancelled
+    cancelled_job = g.group(1)
+    api = arvados.api('v1')
+    # Check that 'scancel' was called
+    if not os.path.isfile(unsatisfiable_job_scancelled):
+        return 1
+    # Check for the log entry
+    log_entry = api.logs().list(
+        filters=[
+            ['object_uuid', '=', cancelled_job],
+            ['event_type', '=', 'stderr'],
+        ]).execute()['items'][0]
+    if not re.match(
+            r"Constraints cannot be satisfied",
+            log_entry['properties']['text']):
+        return 1
+    return 0
+
+def node_paired(g):
+    global compute_nodes
+    compute_nodes[g.group(1)] = g.group(3)
+
+    update_script(os.path.join(fake_slurm, "sinfo"), "#!/bin/sh\n" +
+                  "\n".join("echo '%s|alloc|(null)'" % (v) for k,v in compute_nodes.items()))
+
+    for k,v in all_jobs.items():
+        if v == "ReqNodeNotAvail":
+            all_jobs[k] = "Running"
+            break
+
+    set_squeue(g)
+
+    return 0
+
+def node_busy(g):
+    update_script(os.path.join(fake_slurm, "sinfo"), "#!/bin/sh\n" +
+                  "\n".join("echo '%s|idle|(null)'" % (v) for k,v in compute_nodes.items()))
+    return 0
+
+def node_shutdown(g):
+    global compute_nodes
+    if g.group(1) in compute_nodes:
+        del compute_nodes[g.group(1)]
+        return 0
+    else:
+        return 1
+
+
+def jobs_req(g):
+    global all_jobs
+    for k,v in all_jobs.items():
+        all_jobs[k] = "ReqNodeNotAvail"
+    set_squeue(g)
+    return 0
+
+def noop(g):
+    return 0
+
+def fail(checks, pattern, g):
+    return 1
+
+def expect_count(count, checks, pattern, g):
+    if count == 0:
+        return 1
+    else:
+        checks[pattern] = partial(expect_count, count-1)
+        return 0
+
+def run_test(name, actions, checks, driver_class, jobs, provider):
+    code = 0
+    global unsatisfiable_job_scancelled
+    unsatisfiable_job_scancelled = os.path.join(tempfile.mkdtemp(),
+                                                "scancel_called")
+
+    # Delete any stale node records
+    api = arvados.api('v1')
+    for n in api.nodes().list().execute()['items']:
+        api.nodes().delete(uuid=n["uuid"]).execute()
+
+    logger.info("Start %s", name)
+
+    global fake_slurm
+    fake_slurm = tempfile.mkdtemp()
+    detail.info("fake_slurm is %s", fake_slurm)
+
+    global compute_nodes
+    compute_nodes = {}
+
+    global all_jobs
+    all_jobs = jobs
+
+    env = os.environ.copy()
+    env["PATH"] = fake_slurm + ":" + env["PATH"]
+
+    # Reset fake squeue/sinfo to empty
+    update_script(os.path.join(fake_slurm, "squeue"), "#!/bin/sh\n")
+    update_script(os.path.join(fake_slurm, "sinfo"), "#!/bin/sh\n")
+
+    # Write configuration file for test
+    with open("tests/fake_%s.cfg.template" % provider) as f:
+        open(os.path.join(fake_slurm, "id_rsa.pub"), "w").close()
+        with open(os.path.join(fake_slurm, "fake.cfg"), "w") as cfg:
+            cfg.write(f.read().format(host=os.environ["ARVADOS_API_HOST"],
+                                      token=os.environ["ARVADOS_API_TOKEN"],
+                                      driver_class=driver_class,
+                                      ssh_key=os.path.join(fake_slurm, "id_rsa.pub")))
+
+    # Tests must complete in less than 30 seconds.
+    timeout = time.time() + 30
+    terminated = False
+
+    # Now start node manager
+    p = subprocess.Popen(["bin/arvados-node-manager", "--foreground", "--config", os.path.join(fake_slurm, "fake.cfg")],
+                         bufsize=0, stderr=subprocess.PIPE, env=env)
+
+    # Test main loop:
+    # - Read line
+    # - Apply negative checks (things that are not supposed to happen)
+    # - Check timeout
+    # - Check if the next action should trigger
+    # - If all actions are exhausted, terminate with test success
+    # - If it hits timeout with actions remaining, terminate with test failed
+    try:
+        # naive line iteration over pipes gets buffered, which isn't what we want,
+        # see https://bugs.python.org/issue3907
+        for line in iter(p.stderr.readline, ""):
+            detail_content.write(line)
+
+            for k,v in checks.items():
+                g = re.match(k, line)
+                if g:
+                    detail.info("Matched check %s", k)
+                    code += v(checks, k, g)
+                    if code != 0:
+                        detail.error("Check failed")
+                        if not terminated:
+                            p.kill()
+                            terminated = True
+
+            if terminated:
+                continue
+
+            if time.time() > timeout:
+                detail.error("Exceeded timeout with actions remaining: %s", actions)
+                code += 1
+                if not terminated:
+                    p.kill()
+                    terminated = True
+
+            k, v = actions[0]
+            g = re.match(k, line)
+            if g:
+                detail.info("Matched action %s", k)
+                actions.pop(0)
+                code += v(g)
+                if code != 0:
+                    detail.error("Action failed")
+                    p.kill()
+                    terminated = True
+
+            if not actions:
+                p.kill()
+                terminated = True
+    except KeyboardInterrupt:
+        p.kill()
+
+    if actions:
+        detail.error("Ended with remaining actions: %s", actions)
+        code = 1
+
+    shutil.rmtree(fake_slurm)
+    shutil.rmtree(os.path.dirname(unsatisfiable_job_scancelled))
+
+    if code == 0:
+        logger.info("%s passed", name)
+    else:
+        if isinstance(detail_content, StringIO.StringIO):
+            detail_content.seek(0)
+            chunk = detail_content.read(4096)
+            while chunk:
+                try:
+                    sys.stderr.write(chunk)
+                    chunk = detail_content.read(4096)
+                except IOError as e:
+                    if e.errno == errno.EAGAIN:
+                        # try again (probably pipe buffer full)
+                        pass
+                    else:
+                        raise
+        logger.info("%s failed", name)
+
+    return code
+
+
+def main():
+    # Test lifecycle.
+
+    tests = {
+        "test_unsatisfiable_jobs" : (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_queue_unsatisfiable),
+                (r".*Cancelled unsatisfiable job '(\S+)'", job_cancelled),
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": fail,
+                r".*Trying to cancel job '(\S+)'": fail,
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.FakeDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf9": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_single_node_azure": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Suggesting shutdown because node state is \('down', .*\)": fail,
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 1),
+                r".*Setting node quota.*": fail,
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.FakeDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf9": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_multiple_nodes": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 4),
+                r".*Setting node quota.*": fail,
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.FakeDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf1": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf2": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf3": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf4": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_hit_quota": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown)
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 2),
+                r".*Sending create_node request.*": partial(expect_count, 5)
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.QuotaDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf1": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf2": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf3": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf4": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_probe_quota": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*sending request", jobs_req),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 6),
+                r".*Sending create_node request.*": partial(expect_count, 9)
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.QuotaDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf1": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf2": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf3": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf4": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_no_hang_failing_node_create": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Client error: nope", noop),
+                (r".*Client error: nope", noop),
+                (r".*Client error: nope", noop),
+                (r".*Client error: nope", noop),
+            ],
+            # Checks (things that shouldn't happen)
+            {},
+            # Driver class
+            "arvnodeman.test.fake_driver.FailingDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf1": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf2": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf3": "ReqNodeNotAvail",
+             "34t0i-dz642-h42bg3hq4bdfpf4": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_retry_create": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Rate limit exceeded - scheduling retry in 2 seconds", noop),
+                (r".*Rate limit exceeded - scheduling retry in 1 seconds", noop),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", noop),
+            ],
+            # Checks (things that shouldn't happen)
+            {},
+            # Driver class
+            "arvnodeman.test.fake_driver.RetryDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf1": "ReqNodeNotAvail"},
+            # Provider
+            "azure"),
+        "test_single_node_aws": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 1),
+                r".*Setting node quota.*": fail,
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.FakeAwsDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf9": "ReqNodeNotAvail"},
+            # Provider
+            "ec2"),
+        "test_single_node_gce": (
+            # Actions (pattern -> action)
+            [
+                (r".*Daemon started", set_squeue),
+                (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", node_paired),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Not eligible for shut down because node state is \('busy', 'open', .*\)", node_busy),
+                (r".*ComputeNodeMonitorActor\..*\.([^[]*).*Suggesting shutdown because node state is \('idle', 'open', .*\)", noop),
+                (r".*ComputeNodeShutdownActor\..*\.([^[]*).*Shutdown success", node_shutdown),
+            ],
+            # Checks (things that shouldn't happen)
+            {
+                r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)": partial(expect_count, 1),
+                r".*Setting node quota.*": fail,
+            },
+            # Driver class
+            "arvnodeman.test.fake_driver.FakeGceDriver",
+            # Jobs
+            {"34t0i-dz642-h42bg3hq4bdfpf9": "ReqNodeNotAvail"},
+            # Provider
+            "gce")
+    }
+
+    code = 0
+    if len(sys.argv) > 1:
+        code = run_test(sys.argv[1], *tests[sys.argv[1]])
+    else:
+        for t in sorted(tests.keys()):
+            code += run_test(t, *tests[t])
+
+    if code == 0:
+        logger.info("Tests passed")
+    else:
+        logger.info("Tests failed")
+
+    exit(code)
+
+if __name__ == '__main__':
+    main()
diff --git a/services/nodemanager/tests/stress_test.cwl b/services/nodemanager/tests/stress_test.cwl
new file mode 100644 (file)
index 0000000..082df64
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+#
+# Usage: arvados-cwl-runner stress_test.cwl
+#
+# Submits 100 jobs or containers, creating load on node manager and
+# scheduler.
+
+class: Workflow
+cwlVersion: v1.0
+requirements:
+  ScatterFeatureRequirement: {}
+  InlineJavascriptRequirement: {}
+inputs: []
+outputs: []
+steps:
+  step1:
+    in: []
+    out: [out]
+    run:
+      class: ExpressionTool
+      inputs: []
+      outputs:
+        out: int[]
+      expression: |
+        ${
+          var r = [];
+          for (var i = 1; i <= 100; i++) {
+            r.push(i);
+          }
+          return {out: r};
+        }
+  step2:
+    in:
+      num: step1/out
+    out: []
+    scatter: num
+    run:
+      class: CommandLineTool
+      requirements:
+        ShellCommandRequirement: {}
+      inputs:
+        num: int
+      outputs: []
+      arguments: [echo, "starting",
+        {shellQuote: false, valueFrom: "&&"},
+        sleep, $((101-inputs.num)*2),
+        {shellQuote: false, valueFrom: "&&"},
+        echo, "the number of the day is", $(inputs.num)]
diff --git a/services/nodemanager/tests/test_arguments.py b/services/nodemanager/tests/test_arguments.py
new file mode 100644 (file)
index 0000000..e325e52
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import io
+import os
+import sys
+import tempfile
+import unittest
+
+import arvnodeman.launcher as nodeman
+from . import testutil
+
+class ArvNodemArgumentsTestCase(unittest.TestCase):
+    def run_nodeman(self, args):
+        return nodeman.main(args)
+
+    def test_unsupported_arg(self):
+        with self.assertRaises(SystemExit):
+            self.run_nodeman(['-x=unknown'])
+
+    def test_version_argument(self):
+        err = io.BytesIO()
+        out = io.BytesIO()
+        with testutil.redirected_streams(stdout=out, stderr=err):
+            with self.assertRaises(SystemExit):
+                self.run_nodeman(['--version'])
+        self.assertEqual(out.getvalue(), '')
+        self.assertRegexpMatches(err.getvalue(), "[0-9]+\.[0-9]+\.[0-9]+")
diff --git a/services/nodemanager/tests/test_clientactor.py b/services/nodemanager/tests/test_clientactor.py
new file mode 100644 (file)
index 0000000..19e094d
--- /dev/null
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import unittest
+
+import mock
+import pykka
+
+import arvnodeman.clientactor as clientactor
+from . import testutil
+
+class RemotePollLoopActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                  unittest.TestCase):
+    class MockClientError(Exception):
+        pass
+
+    class TestActor(clientactor.RemotePollLoopActor):
+        LOGGER_NAME = 'arvnodeman.testpoll'
+
+        def _send_request(self):
+            return self._client()
+    TestActor.CLIENT_ERRORS = (MockClientError,)
+    TEST_CLASS = TestActor
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(RemotePollLoopActorTestCase, self).build_monitor(*args, **kwargs)
+        self.client.side_effect = side_effect
+
+    def test_poll_loop_starts_after_subscription(self):
+        self.build_monitor(['test1'])
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with('test1')
+        self.assertTrue(self.timer.schedule.called)
+
+    def test_poll_loop_continues_after_failure(self):
+        self.build_monitor(self.MockClientError)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.monitor),
+                        "poll loop died after error")
+        self.assertTrue(self.timer.schedule.called,
+                        "poll loop did not reschedule after error")
+        self.assertFalse(self.subscriber.called,
+                         "poll loop notified subscribers after error")
+
+    def test_late_subscribers_get_responses(self):
+        self.build_monitor(['pre_late_test', 'late_test'])
+        mock_subscriber = mock.Mock(name='mock_subscriber')
+        self.monitor.subscribe(mock_subscriber).get(self.TIMEOUT)
+        self.monitor.subscribe(self.subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with('late_test')
+
+    def test_survive_dead_subscriptions(self):
+        self.build_monitor(['survive1', 'survive2'])
+        dead_subscriber = mock.Mock(name='dead_subscriber')
+        dead_subscriber.side_effect = pykka.ActorDeadError
+        self.monitor.subscribe(dead_subscriber)
+        self.monitor.subscribe(self.subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.monitor),
+                        "poll loop died from dead subscriber")
+        self.subscriber.assert_called_with('survive2')
+
+    def check_poll_timers(self, *test_times):
+        schedule_mock = self.timer.schedule
+        last_expect = None
+        with mock.patch('time.time') as time_mock:
+            for fake_time, expect_next in test_times:
+                time_mock.return_value = fake_time
+                self.monitor.poll(last_expect).get(self.TIMEOUT)
+                self.assertTrue(schedule_mock.called)
+                self.assertEqual(expect_next, schedule_mock.call_args[0][0])
+                schedule_mock.reset_mock()
+                last_expect = expect_next
+
+    def test_poll_timing_on_consecutive_successes_with_drift(self):
+        self.build_monitor(['1', '2'], poll_wait=3, max_poll_wait=14)
+        self.check_poll_timers((0, 3), (4, 6))
+
+    def test_poll_backoff_on_failures(self):
+        self.build_monitor(self.MockClientError, poll_wait=3, max_poll_wait=14)
+        self.check_poll_timers((0, 6), (6, 18), (18, 32))
+
+    def test_poll_timing_after_error_recovery(self):
+        self.build_monitor(['a', self.MockClientError(), 'b'],
+                           poll_wait=3, max_poll_wait=14)
+        self.check_poll_timers((0, 3), (4, 10), (10, 13))
+
+    def test_no_subscriptions_by_key_without_support(self):
+        self.build_monitor([])
+        with self.assertRaises(AttributeError):
+            self.monitor.subscribe_to('key')
+
+
+class RemotePollLoopActorWithKeysTestCase(testutil.RemotePollLoopActorTestMixin,
+                                          unittest.TestCase):
+    class TestActor(RemotePollLoopActorTestCase.TestActor):
+        def _item_key(self, item):
+            return item['key']
+    TEST_CLASS = TestActor
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(RemotePollLoopActorWithKeysTestCase, self).build_monitor(
+            *args, **kwargs)
+        self.client.side_effect = side_effect
+
+    def test_key_subscription(self):
+        self.build_monitor([[{'key': 1}, {'key': 2}]])
+        self.monitor.subscribe_to(2, self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with({'key': 2})
+
+    def test_survive_dead_key_subscriptions(self):
+        item = {'key': 3}
+        self.build_monitor([[item], [item]])
+        dead_subscriber = mock.Mock(name='dead_subscriber')
+        dead_subscriber.side_effect = pykka.ActorDeadError
+        self.monitor.subscribe_to(3, dead_subscriber)
+        self.monitor.subscribe_to(3, self.subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.monitor),
+                        "poll loop died from dead key subscriber")
+        self.subscriber.assert_called_with(item)
+
+    def test_mixed_subscriptions(self):
+        item = {'key': 4}
+        self.build_monitor([[item], [item]])
+        key_subscriber = mock.Mock(name='key_subscriber')
+        self.monitor.subscribe(self.subscriber)
+        self.monitor.subscribe_to(4, key_subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([item])
+        key_subscriber.assert_called_with(item)
+
+    def test_subscription_to_missing_key(self):
+        self.build_monitor([[]])
+        self.monitor.subscribe_to('nonesuch', self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(None)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/services/nodemanager/tests/test_computenode.py b/services/nodemanager/tests/test_computenode.py
new file mode 100644 (file)
index 0000000..898112b
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import arvados.errors as arverror
+import mock
+
+import arvnodeman.computenode as cnode
+from . import testutil
+
+@mock.patch('time.time', return_value=1)
+class ShutdownTimerTestCase(unittest.TestCase):
+    def test_two_length_window(self, time_mock):
+        timer = cnode.ShutdownTimer(time_mock.return_value, [8, 2])
+        self.assertEqual(481, timer.next_opening())
+        self.assertFalse(timer.window_open())
+        time_mock.return_value += 500
+        self.assertEqual(1081, timer.next_opening())
+        self.assertTrue(timer.window_open())
+        time_mock.return_value += 200
+        self.assertEqual(1081, timer.next_opening())
+        self.assertFalse(timer.window_open())
+
+    def test_three_length_window(self, time_mock):
+        timer = cnode.ShutdownTimer(time_mock.return_value, [6, 3, 1])
+        self.assertEqual(361, timer.next_opening())
+        self.assertFalse(timer.window_open())
+        time_mock.return_value += 400
+        self.assertEqual(961, timer.next_opening())
+        self.assertTrue(timer.window_open())
+        time_mock.return_value += 200
+        self.assertEqual(961, timer.next_opening())
+        self.assertFalse(timer.window_open())
+
+
+class ArvadosTimestamp(unittest.TestCase):
+    def test_arvados_timestamp(self):
+        self.assertEqual(1527710178, cnode.arvados_timestamp('2018-05-30T19:56:18Z'))
+        self.assertEqual(1527710178.999371, cnode.arvados_timestamp('2018-05-30T19:56:18.999371Z'))
diff --git a/services/nodemanager/tests/test_computenode_dispatch.py b/services/nodemanager/tests/test_computenode_dispatch.py
new file mode 100644 (file)
index 0000000..aee3cbd
--- /dev/null
@@ -0,0 +1,562 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import arvados.errors as arverror
+import httplib2
+import mock
+import pykka
+import threading
+
+from libcloud.common.exceptions import BaseHTTPError
+
+import arvnodeman.computenode.dispatch as dispatch
+import arvnodeman.status as status
+from arvnodeman.computenode.driver import BaseComputeNodeDriver
+from . import testutil
+
+class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
+    ACTOR_CLASS = dispatch.ComputeNodeSetupActor
+
+    def make_mocks(self, arvados_effect=None):
+        if arvados_effect is None:
+            arvados_effect = [testutil.arvados_node_mock(
+                slot_number=None,
+                hostname=None,
+                first_ping_at=None,
+                last_ping_at=None,
+            )]
+        self.arvados_effect = arvados_effect
+        self.timer = testutil.MockTimer()
+        self.api_client = mock.MagicMock(name='api_client')
+        self.api_client.nodes().create().execute.side_effect = arvados_effect
+        self.api_client.nodes().update().execute.side_effect = arvados_effect
+        self.cloud_client = mock.MagicMock(name='cloud_client')
+        self.cloud_client.create_node.return_value = testutil.cloud_node_mock(1)
+
+    def make_actor(self, arv_node=None):
+        if not hasattr(self, 'timer'):
+            self.make_mocks(arvados_effect=[arv_node] if arv_node else None)
+        self.setup_actor = self.ACTOR_CLASS.start(
+            self.timer, self.api_client, self.cloud_client,
+            testutil.MockSize(1), arv_node).proxy()
+
+    def assert_node_properties_updated(self, uuid=None,
+                                       size=testutil.MockSize(1)):
+        self.api_client.nodes().update.assert_any_call(
+            uuid=(uuid or self.arvados_effect[-1]['uuid']),
+            body={
+                'properties': {
+                    'cloud_node': {
+                        'size': size.id,
+                        'price': size.price}}})
+
+    def test_creation_without_arvados_node(self):
+        self.make_actor()
+        finished = threading.Event()
+        self.setup_actor.subscribe(lambda _: finished.set())
+        self.assertEqual(self.arvados_effect[-1],
+                         self.setup_actor.arvados_node.get(self.TIMEOUT))
+        assert(finished.wait(self.TIMEOUT))
+        self.api_client.nodes().create.called_with(body={}, assign_slot=True)
+        self.assertEqual(1, self.api_client.nodes().create().execute.call_count)
+        self.assertEqual(1, self.api_client.nodes().update().execute.call_count)
+        self.assert_node_properties_updated()
+        self.assertEqual(self.cloud_client.create_node(),
+                         self.setup_actor.cloud_node.get(self.TIMEOUT))
+
+    def test_creation_with_arvados_node(self):
+        self.make_mocks(arvados_effect=[testutil.arvados_node_mock()]*2)
+        self.make_actor(testutil.arvados_node_mock())
+        finished = threading.Event()
+        self.setup_actor.subscribe(lambda _: finished.set())
+        self.assertEqual(self.arvados_effect[-1],
+                         self.setup_actor.arvados_node.get(self.TIMEOUT))
+        assert(finished.wait(self.TIMEOUT))
+        self.assert_node_properties_updated()
+        self.api_client.nodes().create.called_with(body={}, assign_slot=True)
+        self.assertEqual(3, self.api_client.nodes().update().execute.call_count)
+        self.assertEqual(self.cloud_client.create_node(),
+                         self.setup_actor.cloud_node.get(self.TIMEOUT))
+
+    def test_failed_arvados_calls_retried(self):
+        self.make_mocks([
+                arverror.ApiError(httplib2.Response({'status': '500'}), ""),
+                testutil.arvados_node_mock(),
+                ])
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'arvados_node')
+
+    def test_failed_cloud_calls_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            Exception("test cloud creation error"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+
+    def test_basehttperror_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            BaseHTTPError(500, "Try again"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.setup_actor.ping().get(self.TIMEOUT)
+        self.assertEqual(1, self.cloud_client.post_create_node.call_count)
+
+    def test_instance_exceeded_not_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            BaseHTTPError(400, "InstanceLimitExceeded"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
+        done = self.FUTURE_CLASS()
+        self.setup_actor.subscribe(done.set)
+        done.get(self.TIMEOUT)
+        self.assertEqual(0, self.cloud_client.post_create_node.call_count)
+
+    def test_failed_post_create_retried(self):
+        self.make_mocks()
+        self.cloud_client.post_create_node.side_effect = [
+            Exception("test cloud post-create error"), None]
+        self.make_actor()
+        done = self.FUTURE_CLASS()
+        self.setup_actor.subscribe(done.set)
+        done.get(self.TIMEOUT)
+        self.assertEqual(2, self.cloud_client.post_create_node.call_count)
+
+    def test_stop_when_no_cloud_node(self):
+        self.make_mocks(
+            arverror.ApiError(httplib2.Response({'status': '500'}), ""))
+        self.make_actor()
+        self.assertTrue(
+            self.setup_actor.stop_if_no_cloud_node().get(self.TIMEOUT))
+        self.assertTrue(
+            self.setup_actor.actor_ref.actor_stopped.wait(self.TIMEOUT))
+
+    def test_no_stop_when_cloud_node(self):
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.assertFalse(
+            self.setup_actor.stop_if_no_cloud_node().get(self.TIMEOUT))
+        self.assertTrue(self.stop_proxy(self.setup_actor),
+                        "actor was stopped by stop_if_no_cloud_node")
+
+    def test_subscribe(self):
+        self.make_mocks(
+            arverror.ApiError(httplib2.Response({'status': '500'}), ""))
+        self.make_actor()
+        subscriber = mock.Mock(name='subscriber_mock')
+        self.setup_actor.subscribe(subscriber)
+        retry_resp = [testutil.arvados_node_mock()]
+        self.api_client.nodes().create().execute.side_effect = retry_resp
+        self.api_client.nodes().update().execute.side_effect = retry_resp
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.setup_actor.ping().get(self.TIMEOUT)
+        self.assertEqual(self.setup_actor.actor_ref.actor_urn,
+                         subscriber.call_args[0][0].actor_ref.actor_urn)
+
+    def test_late_subscribe(self):
+        self.make_actor()
+        subscriber = mock.Mock(name='subscriber_mock')
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.setup_actor.subscribe(subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.setup_actor)
+        self.assertEqual(self.setup_actor.actor_ref.actor_urn,
+                         subscriber.call_args[0][0].actor_ref.actor_urn)
+
+
+class ComputeNodeShutdownActorMixin(testutil.ActorTestMixin):
+    def make_mocks(self, cloud_node=None, arvados_node=None,
+                   shutdown_open=True, node_broken=False):
+        self.timer = testutil.MockTimer()
+        self.shutdowns = testutil.MockShutdownTimer()
+        self.shutdowns._set_state(shutdown_open, 300)
+        self.cloud_client = mock.MagicMock(name='cloud_client')
+        self.cloud_client.broken.return_value = node_broken
+        self.arvados_client = mock.MagicMock(name='arvados_client')
+        self.updates = mock.MagicMock(name='update_mock')
+        if cloud_node is None:
+            cloud_node = testutil.cloud_node_mock()
+        self.cloud_node = cloud_node
+        self.arvados_node = arvados_node
+
+    def make_actor(self, cancellable=True, start_time=None):
+        if not hasattr(self, 'timer'):
+            self.make_mocks()
+        if start_time is None:
+            start_time = time.time()
+        monitor_actor = dispatch.ComputeNodeMonitorActor.start(
+            self.cloud_node, start_time, self.shutdowns,
+            self.timer, self.updates, self.cloud_client,
+            self.arvados_node)
+        self.shutdown_actor = self.ACTOR_CLASS.start(
+            self.timer, self.cloud_client, self.arvados_client, monitor_actor,
+            cancellable).proxy()
+        self.monitor_actor = monitor_actor.proxy()
+
+    def check_success_flag(self, expected, allow_msg_count=1):
+        # allow_msg_count is the number of internal messages that may
+        # need to be handled for shutdown to finish.
+        for _ in range(1 + allow_msg_count):
+            last_flag = self.shutdown_actor.success.get(self.TIMEOUT)
+            if last_flag is expected:
+                break
+        else:
+            self.fail("success flag {} is not {}".format(last_flag, expected))
+
+    def test_boot_failure_counting(self, *mocks):
+        # A boot failure happens when a node transitions from unpaired to shutdown
+        status.tracker.update({'boot_failures': 0})
+        self.make_mocks(shutdown_open=True, arvados_node=testutil.arvados_node_mock(crunch_worker_state="unpaired"))
+        self.cloud_client.destroy_node.return_value = True
+        self.make_actor(cancellable=False)
+        self.check_success_flag(True, 2)
+        self.assertTrue(self.cloud_client.destroy_node.called)
+        self.assertEqual(1, status.tracker.get('boot_failures'))
+
+    def test_cancellable_shutdown(self, *mocks):
+        self.make_mocks(shutdown_open=True, arvados_node=testutil.arvados_node_mock(crunch_worker_state="busy"))
+        self.cloud_client.destroy_node.return_value = True
+        self.make_actor(cancellable=True)
+        self.check_success_flag(False, 2)
+        self.assertFalse(self.cloud_client.destroy_node.called)
+
+    def test_uncancellable_shutdown(self, *mocks):
+        status.tracker.update({'boot_failures': 0})
+        self.make_mocks(shutdown_open=True, arvados_node=testutil.arvados_node_mock(crunch_worker_state="busy"))
+        self.cloud_client.destroy_node.return_value = True
+        self.make_actor(cancellable=False)
+        self.check_success_flag(True, 4)
+        self.assertTrue(self.cloud_client.destroy_node.called)
+        # A normal shutdown shouldn't be counted as boot failure
+        self.assertEqual(0, status.tracker.get('boot_failures'))
+
+    def test_arvados_node_cleaned_after_shutdown(self, *mocks):
+        if len(mocks) == 1:
+            mocks[0].return_value = "drain\n"
+        cloud_node = testutil.cloud_node_mock(62)
+        arv_node = testutil.arvados_node_mock(62)
+        self.make_mocks(cloud_node, arv_node)
+        self.make_actor()
+        self.check_success_flag(True, 3)
+        update_mock = self.arvados_client.nodes().update
+        self.assertTrue(update_mock.called)
+        update_kwargs = update_mock.call_args_list[0][1]
+        self.assertEqual(arv_node['uuid'], update_kwargs.get('uuid'))
+        self.assertIn('body', update_kwargs)
+        for clear_key in ['slot_number', 'hostname', 'ip_address',
+                          'first_ping_at', 'last_ping_at']:
+            self.assertIn(clear_key, update_kwargs['body'])
+            self.assertIsNone(update_kwargs['body'][clear_key])
+        self.assertTrue(update_mock().execute.called)
+
+    def test_arvados_node_not_cleaned_after_shutdown_cancelled(self, *mocks):
+        if len(mocks) == 1:
+            mocks[0].return_value = "idle\n"
+        cloud_node = testutil.cloud_node_mock(61)
+        arv_node = testutil.arvados_node_mock(61)
+        self.make_mocks(cloud_node, arv_node, shutdown_open=False)
+        self.cloud_client.destroy_node.return_value = False
+        self.make_actor(cancellable=True)
+        self.shutdown_actor.cancel_shutdown("test")
+        self.shutdown_actor.ping().get(self.TIMEOUT)
+        self.check_success_flag(False, 2)
+        self.assertFalse(self.arvados_client.nodes().update.called)
+
+
+class ComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
+                                       unittest.TestCase):
+    ACTOR_CLASS = dispatch.ComputeNodeShutdownActor
+
+    def test_easy_shutdown(self):
+        self.make_actor(start_time=0)
+        self.check_success_flag(True)
+        self.assertTrue(self.cloud_client.destroy_node.called)
+
+    def test_shutdown_cancelled_when_destroy_node_fails(self):
+        self.make_mocks(node_broken=True)
+        self.cloud_client.destroy_node.return_value = False
+        self.make_actor(start_time=0)
+        self.check_success_flag(False, 2)
+        self.assertEqual(1, self.cloud_client.destroy_node.call_count)
+        self.assertEqual(self.ACTOR_CLASS.DESTROY_FAILED,
+                         self.shutdown_actor.cancel_reason.get(self.TIMEOUT))
+
+    def test_late_subscribe(self):
+        self.make_actor()
+        subscriber = mock.Mock(name='subscriber_mock')
+        self.shutdown_actor.subscribe(subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.shutdown_actor)
+        self.assertTrue(subscriber.called)
+        self.assertEqual(self.shutdown_actor.actor_ref.actor_urn,
+                         subscriber.call_args[0][0].actor_ref.actor_urn)
+
+
+class ComputeNodeUpdateActorTestCase(testutil.ActorTestMixin,
+                                     unittest.TestCase):
+    ACTOR_CLASS = dispatch.ComputeNodeUpdateActor
+
+    def make_actor(self):
+        self.driver = mock.MagicMock(name='driver_mock')
+        self.timer = mock.MagicMock(name='timer_mock')
+        self.updater = self.ACTOR_CLASS.start(self.driver, self.timer).proxy()
+
+    def test_node_sync(self, *args):
+        self.make_actor()
+        cloud_node = testutil.cloud_node_mock()
+        arv_node = testutil.arvados_node_mock()
+        self.updater.sync_node(cloud_node, arv_node).get(self.TIMEOUT)
+        self.driver().sync_node.assert_called_with(cloud_node, arv_node)
+
+    @testutil.no_sleep
+    def test_node_sync_error(self, *args):
+        self.make_actor()
+        cloud_node = testutil.cloud_node_mock()
+        arv_node = testutil.arvados_node_mock()
+        self.driver().sync_node.side_effect = (IOError, Exception, True)
+        self.updater.sync_node(cloud_node, arv_node).get(self.TIMEOUT)
+        self.updater.sync_node(cloud_node, arv_node).get(self.TIMEOUT)
+        self.updater.sync_node(cloud_node, arv_node).get(self.TIMEOUT)
+        self.driver().sync_node.assert_called_with(cloud_node, arv_node)
+
+class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
+                                      unittest.TestCase):
+    def make_mocks(self, node_num):
+        self.shutdowns = testutil.MockShutdownTimer()
+        self.shutdowns._set_state(False, 300)
+        self.timer = mock.MagicMock(name='timer_mock')
+        self.updates = mock.MagicMock(name='update_mock')
+        self.cloud_mock = testutil.cloud_node_mock(node_num)
+        self.subscriber = mock.Mock(name='subscriber_mock')
+        self.cloud_client = mock.MagicMock(name='cloud_client')
+        self.cloud_client.broken.return_value = False
+
+    def make_actor(self, node_num=1, arv_node=None, start_time=None):
+        if not hasattr(self, 'cloud_mock'):
+            self.make_mocks(node_num)
+        if start_time is None:
+            start_time = time.time()
+        self.node_actor = dispatch.ComputeNodeMonitorActor.start(
+            self.cloud_mock, start_time, self.shutdowns,
+            self.timer, self.updates, self.cloud_client,
+            arv_node, boot_fail_after=300).proxy()
+        self.node_actor.subscribe(self.subscriber).get(self.TIMEOUT)
+
+    def node_state(self, *states):
+        return self.node_actor.in_state(*states).get(self.TIMEOUT)
+
+    def test_in_state_when_unpaired(self):
+        self.make_actor()
+        self.assertTrue(self.node_state('unpaired'))
+
+    def test_in_state_when_pairing_stale(self):
+        self.make_actor(arv_node=testutil.arvados_node_mock(
+                job_uuid=None, age=90000))
+        self.assertTrue(self.node_state('down'))
+
+    def test_in_state_when_no_state_available(self):
+        self.make_actor(arv_node=testutil.arvados_node_mock(
+                crunch_worker_state=None))
+        self.assertTrue(self.node_state('idle'))
+
+    def test_in_state_when_no_state_available_old(self):
+        self.make_actor(arv_node=testutil.arvados_node_mock(
+                crunch_worker_state=None, age=90000))
+        self.assertTrue(self.node_state('down'))
+
+    def test_in_idle_state(self):
+        idle_nodes_before = status.tracker._idle_nodes.keys()
+        self.make_actor(2, arv_node=testutil.arvados_node_mock(job_uuid=None))
+        self.assertTrue(self.node_state('idle'))
+        self.assertFalse(self.node_state('busy'))
+        self.assertTrue(self.node_state('idle', 'busy'))
+        idle_nodes_after = status.tracker._idle_nodes.keys()
+        new_idle_nodes = [n for n in idle_nodes_after if n not in idle_nodes_before]
+        # There should be 1 additional idle node
+        self.assertEqual(1, len(new_idle_nodes))
+
+    def test_in_busy_state(self):
+        idle_nodes_before = status.tracker._idle_nodes.keys()
+        self.make_actor(3, arv_node=testutil.arvados_node_mock(job_uuid=True))
+        self.assertFalse(self.node_state('idle'))
+        self.assertTrue(self.node_state('busy'))
+        self.assertTrue(self.node_state('idle', 'busy'))
+        idle_nodes_after = status.tracker._idle_nodes.keys()
+        new_idle_nodes = [n for n in idle_nodes_after if n not in idle_nodes_before]
+        # There shouldn't be any additional idle node
+        self.assertEqual(0, len(new_idle_nodes))
+
+    def test_init_shutdown_scheduling(self):
+        self.make_actor()
+        self.assertTrue(self.timer.schedule.called)
+        self.assertEqual(300, self.timer.schedule.call_args[0][0])
+
+    def test_shutdown_window_close_scheduling(self):
+        self.make_actor()
+        self.shutdowns._set_state(False, 600)
+        self.timer.schedule.reset_mock()
+        self.node_actor.consider_shutdown().get(self.TIMEOUT)
+        self.stop_proxy(self.node_actor)
+        self.assertTrue(self.timer.schedule.called)
+        self.assertEqual(600, self.timer.schedule.call_args[0][0])
+        self.assertFalse(self.subscriber.called)
+
+    def test_shutdown_subscription(self):
+        self.make_actor(start_time=0)
+        self.shutdowns._set_state(True, 600)
+        self.node_actor.consider_shutdown().get(self.TIMEOUT)
+        self.assertTrue(self.subscriber.called)
+        self.assertEqual(self.node_actor.actor_ref.actor_urn,
+                         self.subscriber.call_args[0][0].actor_ref.actor_urn)
+
+    def test_no_shutdown_booting(self):
+        self.make_actor()
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals(self.node_actor.shutdown_eligible().get(self.TIMEOUT),
+                          (False, "node state is ('unpaired', 'open', 'boot wait', 'not idle')"))
+
+    def test_shutdown_when_invalid_cloud_node_size(self):
+        self.make_mocks(1)
+        self.cloud_mock.size.id = 'invalid'
+        self.cloud_mock.extra['arvados_node_size'] = 'stale.type'
+        self.make_actor()
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((True, "node's size tag 'stale.type' not recognizable"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_shutdown_without_arvados_node(self):
+        self.make_actor(start_time=0)
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((True, "node state is ('down', 'open', 'boot exceeded', 'not idle')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_shutdown_missing(self):
+        arv_node = testutil.arvados_node_mock(10, job_uuid=None,
+                                              crunch_worker_state="down",
+                                              last_ping_at='1970-01-01T01:02:03.04050607Z')
+        self.make_actor(10, arv_node)
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((True, "node state is ('down', 'open', 'boot wait', 'not idle')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_shutdown_running_broken(self):
+        arv_node = testutil.arvados_node_mock(12, job_uuid=None,
+                                              crunch_worker_state="down")
+        self.make_actor(12, arv_node)
+        self.shutdowns._set_state(True, 600)
+        self.cloud_client.broken.return_value = True
+        self.assertEquals((True, "node state is ('down', 'open', 'boot wait', 'not idle')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_shutdown_missing_broken(self):
+        arv_node = testutil.arvados_node_mock(11, job_uuid=None,
+                                              crunch_worker_state="down",
+                                              last_ping_at='1970-01-01T01:02:03.04050607Z')
+        self.make_actor(11, arv_node)
+        self.shutdowns._set_state(True, 600)
+        self.cloud_client.broken.return_value = True
+        self.assertEquals(self.node_actor.shutdown_eligible().get(self.TIMEOUT), (True, "node state is ('down', 'open', 'boot wait', 'not idle')"))
+
+    def test_no_shutdown_when_window_closed(self):
+        self.make_actor(3, testutil.arvados_node_mock(3, job_uuid=None))
+        self.assertEquals((False, "node state is ('idle', 'closed', 'boot wait', 'idle exceeded')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_when_node_running_job(self):
+        self.make_actor(4, testutil.arvados_node_mock(4, job_uuid=True))
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((False, "node state is ('busy', 'open', 'boot wait', 'not idle')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_shutdown_when_node_state_unknown(self):
+        self.make_actor(5, testutil.arvados_node_mock(
+            5, crunch_worker_state=None))
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((True, "node state is ('idle', 'open', 'boot wait', 'idle exceeded')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_shutdown_when_node_state_fail(self):
+        self.make_actor(5, testutil.arvados_node_mock(
+            5, crunch_worker_state='fail'))
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((True, "node state is ('fail', 'open', 'boot wait', 'not idle')"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_when_node_state_stale(self):
+        self.make_actor(6, testutil.arvados_node_mock(6, age=90000))
+        self.shutdowns._set_state(True, 600)
+        self.assertEquals((False, "node state is stale"),
+                          self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_arvados_node_match(self):
+        self.make_actor(2)
+        arv_node = testutil.arvados_node_mock(
+            2, hostname='compute-two.zzzzz.arvadosapi.com')
+        self.cloud_client.node_id.return_value = '2'
+        pair_id = self.node_actor.offer_arvados_pair(arv_node).get(self.TIMEOUT)
+        self.assertEqual(self.cloud_mock.id, pair_id)
+        self.stop_proxy(self.node_actor)
+        self.updates.sync_node.assert_called_with(self.cloud_mock, arv_node)
+
+    def test_arvados_node_mismatch(self):
+        self.make_actor(3)
+        arv_node = testutil.arvados_node_mock(1)
+        self.assertIsNone(
+            self.node_actor.offer_arvados_pair(arv_node).get(self.TIMEOUT))
+
+    def test_arvados_node_mismatch_first_ping_too_early(self):
+        self.make_actor(4)
+        arv_node = testutil.arvados_node_mock(
+            4, first_ping_at='1971-03-02T14:15:16.1717282Z')
+        self.assertIsNone(
+            self.node_actor.offer_arvados_pair(arv_node).get(self.TIMEOUT))
+
+    def test_update_cloud_node(self):
+        self.make_actor(1)
+        self.make_mocks(2)
+        self.cloud_mock.id = '1'
+        self.node_actor.update_cloud_node(self.cloud_mock)
+        current_cloud = self.node_actor.cloud_node.get(self.TIMEOUT)
+        self.assertEqual([testutil.ip_address_mock(2)],
+                         current_cloud.private_ips)
+
+    def test_missing_cloud_node_update(self):
+        self.make_actor(1)
+        self.node_actor.update_cloud_node(None)
+        current_cloud = self.node_actor.cloud_node.get(self.TIMEOUT)
+        self.assertEqual([testutil.ip_address_mock(1)],
+                         current_cloud.private_ips)
+
+    def test_update_arvados_node(self):
+        self.make_actor(3)
+        job_uuid = 'zzzzz-jjjjj-updatejobnode00'
+        new_arvados = testutil.arvados_node_mock(3, job_uuid)
+        self.node_actor.update_arvados_node(new_arvados)
+        current_arvados = self.node_actor.arvados_node.get(self.TIMEOUT)
+        self.assertEqual(job_uuid, current_arvados['job_uuid'])
+
+    def test_missing_arvados_node_update(self):
+        self.make_actor(4, testutil.arvados_node_mock(4))
+        self.node_actor.update_arvados_node(None)
+        current_arvados = self.node_actor.arvados_node.get(self.TIMEOUT)
+        self.assertEqual(testutil.ip_address_mock(4),
+                         current_arvados['ip_address'])
+
+    def test_update_arvados_node_calls_sync_node(self):
+        self.make_mocks(5)
+        self.cloud_mock.extra['testname'] = 'cloudfqdn.zzzzz.arvadosapi.com'
+        self.make_actor()
+        arv_node = testutil.arvados_node_mock(5)
+        self.node_actor.update_arvados_node(arv_node).get(self.TIMEOUT)
+        self.assertEqual(1, self.updates.sync_node.call_count)
diff --git a/services/nodemanager/tests/test_computenode_dispatch_slurm.py b/services/nodemanager/tests/test_computenode_dispatch_slurm.py
new file mode 100644 (file)
index 0000000..02d8fb6
--- /dev/null
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import subprocess32 as subprocess
+import time
+import unittest
+
+import mock
+
+import arvnodeman.computenode.dispatch.slurm as slurm_dispatch
+from . import testutil
+from .test_computenode_dispatch import \
+    ComputeNodeShutdownActorMixin, \
+    ComputeNodeSetupActorTestCase, \
+    ComputeNodeUpdateActorTestCase
+
+@mock.patch('subprocess32.check_output')
+class SLURMComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
+                                            unittest.TestCase):
+    ACTOR_CLASS = slurm_dispatch.ComputeNodeShutdownActor
+
+    def check_slurm_got_args(self, proc_mock, *args):
+        self.assertTrue(proc_mock.called)
+        slurm_cmd = proc_mock.call_args[0][0]
+        for s in args:
+            self.assertIn(s, slurm_cmd)
+
+    def check_success_after_reset(self, proc_mock, end_state='drain\n', timer=False):
+        self.make_mocks(arvados_node=testutil.arvados_node_mock(63))
+        if not timer:
+            self.timer = testutil.MockTimer(False)
+        self.make_actor()
+        self.check_success_flag(None, 0)
+        # At this point, 1st try should have happened.
+
+        self.timer.deliver()
+        self.check_success_flag(None, 0)
+        # At this point, 2nd try should have happened.
+
+        # Order is critical here: if the mock gets called when no return value
+        # or side effect is set, we may invoke a real subprocess.
+        proc_mock.return_value = end_state
+        proc_mock.side_effect = None
+
+        # 3rd try
+        self.timer.deliver()
+
+        self.check_success_flag(True, 3)
+        self.check_slurm_got_args(proc_mock, 'NodeName=compute63')
+
+    def make_wait_state_test(start_state='drng\n', end_state='drain\n'):
+        def test(self, proc_mock):
+            proc_mock.return_value = start_state
+            self.check_success_after_reset(proc_mock, end_state)
+        return test
+
+    for wait_state in ['alloc\n', 'drng\n']:
+        locals()['test_wait_while_' + wait_state.strip()
+                 ] = make_wait_state_test(start_state=wait_state)
+
+    for end_state in ['idle*\n', 'down\n', 'down*\n', 'drain\n', 'fail\n']:
+        locals()['test_wait_until_' + end_state.strip()
+                 ] = make_wait_state_test(end_state=end_state)
+
+    def test_retry_failed_slurm_calls(self, proc_mock):
+        proc_mock.side_effect = subprocess.CalledProcessError(1, ["mock"])
+        self.check_success_after_reset(proc_mock)
+
+    def test_slurm_bypassed_when_no_arvados_node(self, proc_mock):
+        # Test we correctly handle a node that failed to bootstrap.
+        proc_mock.return_value = 'down\n'
+        self.make_actor(start_time=0)
+        self.check_success_flag(True)
+        self.assertFalse(proc_mock.called)
+
+    def test_node_resumed_when_shutdown_cancelled(self, proc_mock):
+        try:
+            proc_mock.side_effect = iter(['', 'drng\n', 'drng\n', ''])
+            self.make_mocks(arvados_node=testutil.arvados_node_mock(job_uuid=True))
+            self.timer = testutil.MockTimer(False)
+            self.make_actor()
+            self.busywait(lambda: proc_mock.call_args is not None)
+            self.shutdown_actor.cancel_shutdown("test")
+            self.check_success_flag(False, 2)
+            self.assertEqual(proc_mock.call_args_list[0], mock.call(['scontrol', 'update', 'NodeName=compute99', 'State=DRAIN', 'Reason=Node Manager shutdown']))
+            self.assertEqual(proc_mock.call_args_list[-1], mock.call(['scontrol', 'update', 'NodeName=compute99', 'State=RESUME']))
+
+        finally:
+            self.shutdown_actor.actor_ref.stop()
+
+    def test_cancel_shutdown_retry(self, proc_mock):
+        proc_mock.side_effect = iter([OSError, 'drain\n', OSError, 'idle\n', 'idle\n'])
+        self.make_mocks(arvados_node=testutil.arvados_node_mock(job_uuid=True))
+        self.make_actor()
+        self.check_success_flag(False, 5)
+
+    def test_issue_slurm_drain_retry(self, proc_mock):
+        proc_mock.side_effect = iter([OSError, OSError, 'drng\n', 'drain\n'])
+        self.check_success_after_reset(proc_mock, timer=False)
+
+    def test_arvados_node_cleaned_after_shutdown(self, proc_mock):
+        proc_mock.return_value = 'drain\n'
+        super(SLURMComputeNodeShutdownActorTestCase,
+              self).test_arvados_node_cleaned_after_shutdown()
+
+    def test_cancellable_shutdown(self, proc_mock):
+        proc_mock.return_value = 'other\n'
+        super(SLURMComputeNodeShutdownActorTestCase,
+              self).test_cancellable_shutdown()
+
+    def test_uncancellable_shutdown(self, proc_mock):
+        proc_mock.return_value = 'other\n'
+        super(SLURMComputeNodeShutdownActorTestCase,
+              self).test_uncancellable_shutdown()
+
+@mock.patch('subprocess32.check_output')
+class SLURMComputeNodeUpdateActorTestCase(ComputeNodeUpdateActorTestCase):
+    ACTOR_CLASS = slurm_dispatch.ComputeNodeUpdateActor
+
+    def test_update_node_weight(self, check_output):
+        self.make_actor()
+        cloud_node = testutil.cloud_node_mock()
+        arv_node = testutil.arvados_node_mock()
+        self.updater.sync_node(cloud_node, arv_node).get(self.TIMEOUT)
+        check_output.assert_called_with(['scontrol', 'update', 'NodeName=compute99', 'Weight=99000', 'Features=instancetype=z99.test'])
+
+class SLURMComputeNodeSetupActorTestCase(ComputeNodeSetupActorTestCase):
+    ACTOR_CLASS = slurm_dispatch.ComputeNodeSetupActor
+
+    @mock.patch('subprocess32.check_output')
+    def test_update_node_features(self, check_output):
+        # `scontrol update` happens only if the Arvados node record
+        # has a hostname. ComputeNodeSetupActorTestCase.make_mocks
+        # uses mocks with scrubbed hostnames, so we override with the
+        # default testutil.arvados_node_mock.
+        self.make_mocks(arvados_effect=[testutil.arvados_node_mock()])
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        check_output.assert_called_with(['scontrol', 'update', 'NodeName=compute99', 'Weight=1000', 'Features=instancetype=z1.test'])
+
+    @mock.patch('subprocess32.check_output')
+    def test_failed_arvados_calls_retried(self, check_output):
+        super(SLURMComputeNodeSetupActorTestCase, self).test_failed_arvados_calls_retried()
+
+    @mock.patch('subprocess32.check_output')
+    def test_subscribe(self, check_output):
+        super(SLURMComputeNodeSetupActorTestCase, self).test_subscribe()
+
+    @mock.patch('subprocess32.check_output')
+    def test_creation_with_arvados_node(self, check_output):
+        super(SLURMComputeNodeSetupActorTestCase, self).test_creation_with_arvados_node()
diff --git a/services/nodemanager/tests/test_computenode_driver.py b/services/nodemanager/tests/test_computenode_driver.py
new file mode 100644 (file)
index 0000000..4bf4c39
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import unittest
+
+import libcloud.common.types as cloud_types
+import mock
+
+import arvnodeman.computenode.driver as driver_base
+import arvnodeman.status as status
+import arvnodeman.config as config
+from . import testutil
+
+class ComputeNodeDriverTestCase(unittest.TestCase):
+    def setUp(self):
+        self.driver_mock = mock.MagicMock(name='driver_mock')
+        driver_base.BaseComputeNodeDriver.SEARCH_CACHE = {}
+
+    def test_search_for_now_uses_public_method(self):
+        image = testutil.cloud_object_mock(1)
+        self.driver_mock().list_images.return_value = [image]
+        driver = driver_base.BaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        self.assertIs(image, driver.search_for_now('id_1', 'list_images'))
+        self.assertEqual(1, self.driver_mock().list_images.call_count)
+
+    def test_search_for_now_uses_private_method(self):
+        net = testutil.cloud_object_mock(1)
+        self.driver_mock().ex_list_networks.return_value = [net]
+        driver = driver_base.BaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        self.assertIs(net, driver.search_for_now('id_1', 'ex_list_networks'))
+        self.assertEqual(1, self.driver_mock().ex_list_networks.call_count)
+
+    def test_search_for_now_raises_ValueError_on_zero_results(self):
+        self.driver_mock().list_images.return_value = []
+        driver = driver_base.BaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        with self.assertRaises(ValueError) as test:
+            driver.search_for_now('id_1', 'list_images')
+
+    def test_search_for_now_raises_ValueError_on_extra_results(self):
+        image = testutil.cloud_object_mock(1)
+        self.driver_mock().list_images.return_value = [image, image]
+        driver = driver_base.BaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        with self.assertRaises(ValueError) as test:
+            driver.search_for_now('id_1', 'list_images')
+
+    def test_search_for_now_does_not_cache_results(self):
+        image1 = testutil.cloud_object_mock(1)
+        image2 = testutil.cloud_object_mock(1)
+        self.driver_mock().list_images.side_effect = [[image1], [image2]]
+        driver = driver_base.BaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        self.assertIsNot(driver.search_for_now('id_1', 'list_images'),
+                         driver.search_for_now('id_1', 'list_images'))
+        self.assertEqual(2, self.driver_mock().list_images.call_count)
+
+    def test_search_for_returns_cached_results(self):
+        image1 = testutil.cloud_object_mock(1)
+        image2 = testutil.cloud_object_mock(1)
+        self.driver_mock().list_images.side_effect = [[image1], [image2]]
+        driver = driver_base.BaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        self.assertIs(driver.search_for('id_1', 'list_images'),
+                      driver.search_for('id_1', 'list_images'))
+        self.assertEqual(1, self.driver_mock().list_images.call_count)
+
+
+    class TestBaseComputeNodeDriver(driver_base.BaseComputeNodeDriver):
+        def arvados_create_kwargs(self, size, arvados_node):
+            return {'name': arvados_node}
+
+
+    def test_create_node_only_cloud_errors_are_counted(self):
+        status.tracker.update({'create_node_errors': 0})
+        errors = [(config.CLOUD_ERRORS[0], True), (KeyError, False)]
+        self.driver_mock().list_images.return_value = []
+        driver = self.TestBaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        error_count = 0
+        for an_error, is_cloud_error in errors:
+            self.driver_mock().create_node.side_effect = an_error
+            with self.assertRaises(an_error):
+                driver.create_node(testutil.MockSize(1), 'id_1')
+            if is_cloud_error:
+                error_count += 1
+            self.assertEqual(error_count, status.tracker.get('create_node_errors'))
+
+    def test_list_nodes_only_cloud_errors_are_counted(self):
+        status.tracker.update({'list_nodes_errors': 0})
+        errors = [(config.CLOUD_ERRORS[0], True), (KeyError, False)]
+        driver = self.TestBaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        error_count = 0
+        for an_error, is_cloud_error in errors:
+            self.driver_mock().list_nodes.side_effect = an_error
+            with self.assertRaises(an_error):
+                driver.list_nodes()
+            if is_cloud_error:
+                error_count += 1
+            self.assertEqual(error_count, status.tracker.get('list_nodes_errors'))
+
+    def test_destroy_node_only_cloud_errors_are_counted(self):
+        status.tracker.update({'destroy_node_errors': 0})
+        errors = [(config.CLOUD_ERRORS[0], True), (KeyError, False)]
+        self.driver_mock().list_nodes.return_value = [testutil.MockSize(1)]
+        driver = self.TestBaseComputeNodeDriver({}, {}, {}, self.driver_mock)
+        error_count = 0
+        for an_error, is_cloud_error in errors:
+            self.driver_mock().destroy_node.side_effect = an_error
+            with self.assertRaises(an_error):
+                driver.destroy_node(testutil.MockSize(1))
+            if is_cloud_error:
+                error_count += 1
+            self.assertEqual(error_count, status.tracker.get('destroy_node_errors'))
diff --git a/services/nodemanager/tests/test_computenode_driver_azure.py b/services/nodemanager/tests/test_computenode_driver_azure.py
new file mode 100644 (file)
index 0000000..ea7a033
--- /dev/null
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import ssl
+import time
+import unittest
+
+import libcloud.common.types as cloud_types
+import mock
+
+import arvnodeman.computenode.driver.azure as azure
+from . import testutil
+
+class AzureComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
+    TEST_CLASS = azure.ComputeNodeDriver
+
+    def new_driver(self, auth_kwargs={}, list_kwargs={}, create_kwargs={}):
+        list_kwargs.setdefault("ex_resource_group", "TestResourceGroup")
+        return super(AzureComputeNodeDriverTestCase, self).new_driver(auth_kwargs, list_kwargs, create_kwargs)
+
+    def test_driver_instantiation(self):
+        kwargs = {'key': 'testkey'}
+        driver = self.new_driver(auth_kwargs=kwargs)
+        self.assertTrue(self.driver_mock.called)
+        self.assertEqual(kwargs, self.driver_mock.call_args[1])
+
+    def test_create_image_loaded_at_initialization(self):
+        get_method = self.driver_mock().get_image
+        get_method.return_value = testutil.cloud_object_mock('id_b')
+        driver = self.new_driver(create_kwargs={'image': 'id_b'})
+        self.assertEqual(1, get_method.call_count)
+
+    def test_create_includes_ping(self):
+        arv_node = testutil.arvados_node_mock(info={'ping_secret': 'ssshh'})
+        arv_node["hostname"] = None
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn('ping_secret=ssshh',
+                      create_method.call_args[1].get('ex_tags', {}).get('arv-ping-url', ""))
+
+    def test_create_includes_arvados_node_size(self):
+        arv_node = testutil.arvados_node_mock()
+        arv_node["hostname"] = None
+        size = testutil.MockSize(1)
+        driver = self.new_driver()
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn(
+            ('arvados_node_size', size.id),
+            create_method.call_args[1].get('ex_tags', {'tags': 'missing'}).items()
+        )
+
+    def test_name_from_new_arvados_node(self):
+        arv_node = testutil.arvados_node_mock(hostname=None)
+        driver = self.new_driver()
+        self.assertEqual('compute-000000000000063-zzzzz',
+                         driver.arvados_create_kwargs(testutil.MockSize(1), arv_node)['name'])
+
+    def check_node_tagged(self, cloud_node, expected_tags):
+        tag_mock = self.driver_mock().ex_create_tags
+        self.assertTrue(tag_mock.called)
+        self.assertIs(cloud_node, tag_mock.call_args[0][0])
+        self.assertEqual(expected_tags, tag_mock.call_args[0][1])
+
+    def test_node_create_time(self):
+        refsecs = int(time.time())
+        reftuple = time.gmtime(refsecs)
+        node = testutil.cloud_node_mock()
+        node.extra = {'tags': {'booted_at': time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
+                                                   reftuple)}}
+        self.assertEqual(refsecs, azure.ComputeNodeDriver.node_start_time(node))
+
+    def test_node_fqdn(self):
+        name = 'fqdntest.zzzzz.arvadosapi.com'
+        node = testutil.cloud_node_mock()
+        node.extra = {'tags': {"hostname": name}}
+        self.assertEqual(name, azure.ComputeNodeDriver.node_fqdn(node))
+
+    def test_sync_node(self):
+        arv_node = testutil.arvados_node_mock(1)
+        cloud_node = testutil.cloud_node_mock(2)
+        driver = self.new_driver()
+        driver.sync_node(cloud_node, arv_node)
+        self.check_node_tagged(cloud_node,
+                               {'hostname': 'compute1.zzzzz.arvadosapi.com'})
+
+    def test_custom_data(self):
+        arv_node = testutil.arvados_node_mock(hostname=None)
+        driver = self.new_driver()
+        self.assertEqual("""#!/bin/sh
+mkdir -p    /var/tmp/arv-node-data/meta-data
+echo 'https://100::/arvados/v1/nodes/zzzzz-yyyyy-000000000000063/ping?ping_secret=defaulttestsecret' > /var/tmp/arv-node-data/arv-ping-url
+echo compute-000000000000063-zzzzz > /var/tmp/arv-node-data/meta-data/instance-id
+echo z1.test > /var/tmp/arv-node-data/meta-data/instance-type
+""",
+                         driver.arvados_create_kwargs(testutil.MockSize(1), arv_node)['ex_customdata'])
+
+    def test_list_nodes_ignores_nodes_without_tags(self):
+        driver = self.new_driver(create_kwargs={"tag_arvados-class": "dynamic-compute"})
+        # Mock cloud node without tags
+        nodelist = [testutil.cloud_node_mock(1)]
+        self.driver_mock().list_nodes.return_value = nodelist
+        n = driver.list_nodes()
+        self.assertEqual([], n)
+
+    def test_create_raises_but_actually_succeeded(self):
+        arv_node = testutil.arvados_node_mock(1, hostname=None)
+        driver = self.new_driver(create_kwargs={"tag_arvados-class": "dynamic-compute"})
+        nodelist = [testutil.cloud_node_mock(1, tags={"arvados-class": "dynamic-compute"})]
+        nodelist[0].name = 'compute-000000000000001-zzzzz'
+        self.driver_mock().list_nodes.return_value = nodelist
+        self.driver_mock().create_node.side_effect = IOError
+        n = driver.create_node(testutil.MockSize(1), arv_node)
+        self.assertEqual('compute-000000000000001-zzzzz', n.name)
+
+    def test_ex_fetch_nic_false(self):
+        arv_node = testutil.arvados_node_mock(1, hostname=None)
+        driver = self.new_driver(create_kwargs={"tag_arvados-class": "dynamic-compute"})
+        nodelist = [testutil.cloud_node_mock(1, tags={"arvados-class": "dynamic-compute"})]
+        nodelist[0].name = 'compute-000000000000001-zzzzz'
+        self.driver_mock().list_nodes.return_value = nodelist
+        n = driver.list_nodes()
+        self.assertEqual(nodelist, n)
+        self.driver_mock().list_nodes.assert_called_with(ex_fetch_nic=False, ex_fetch_power_state=False, ex_resource_group='TestResourceGroup')
+
+    def test_create_can_find_node_after_timeout(self):
+        super(AzureComputeNodeDriverTestCase,
+              self).test_create_can_find_node_after_timeout(
+                  create_kwargs={'tag_arvados-class': 'test'},
+                  node_extra={'tags': {'arvados-class': 'test'}})
+
+    def test_node_found_after_timeout_has_fixed_size(self):
+        size = testutil.MockSize(4)
+        node_props = {'hardwareProfile': {'vmSize': size.id}}
+        cloud_node = testutil.cloud_node_mock(tags={'arvados-class': 'test'}, properties=node_props)
+        cloud_node.size = None
+        self.check_node_found_after_timeout_has_fixed_size(
+            size, cloud_node, {'tag_arvados-class': 'test'})
diff --git a/services/nodemanager/tests/test_computenode_driver_ec2.py b/services/nodemanager/tests/test_computenode_driver_ec2.py
new file mode 100644 (file)
index 0000000..520c0dc
--- /dev/null
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import ssl
+import time
+import unittest
+
+import libcloud.common.types as cloud_types
+import mock
+
+import arvnodeman.computenode.driver.ec2 as ec2
+from . import testutil
+
+class EC2ComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
+    TEST_CLASS = ec2.ComputeNodeDriver
+
+    def test_driver_instantiation(self):
+        kwargs = {'key': 'testkey'}
+        driver = self.new_driver(auth_kwargs=kwargs)
+        self.assertTrue(self.driver_mock.called)
+        self.assertEqual(kwargs, self.driver_mock.call_args[1])
+
+    def test_list_kwargs_become_filters(self):
+        # We're also testing tag name translation.
+        driver = self.new_driver(list_kwargs={'tag_test': 'true'})
+        driver.list_nodes()
+        list_method = self.driver_mock().list_nodes
+        self.assertTrue(list_method.called)
+        self.assertEqual({'tag:test': 'true'},
+                          list_method.call_args[1].get('ex_filters'))
+
+    def test_create_image_loaded_at_initialization(self):
+        list_method = self.driver_mock().list_images
+        list_method.return_value = [testutil.cloud_object_mock(c)
+                                    for c in 'abc']
+        driver = self.new_driver(create_kwargs={'image_id': 'id_b'})
+        self.assertEqual(1, list_method.call_count)
+
+    def test_create_includes_ping_secret(self):
+        arv_node = testutil.arvados_node_mock(info={'ping_secret': 'ssshh'})
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn('ping_secret=ssshh',
+                      create_method.call_args[1].get('ex_userdata',
+                                                     'arg missing'))
+
+    def test_create_includes_metadata(self):
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver(list_kwargs={'tag_test': 'testvalue'})
+        driver.create_node(testutil.MockSize(1), arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn(
+            ('test', 'testvalue'),
+            create_method.call_args[1].get('ex_metadata', {'arg': 'missing'}).items()
+        )
+
+    def test_create_includes_arvados_node_size(self):
+        arv_node = testutil.arvados_node_mock()
+        size = testutil.MockSize(1)
+        driver = self.new_driver()
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn(
+            ('arvados_node_size', size.id),
+            create_method.call_args[1].get('ex_metadata', {'arg': 'missing'}).items()
+        )
+
+    def test_create_preemptible_instance(self):
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1, preemptible=True), arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertEqual(
+            True,
+            create_method.call_args[1].get('ex_spot_market', 'arg missing')
+        )
+
+    def test_hostname_from_arvados_node(self):
+        arv_node = testutil.arvados_node_mock(8)
+        driver = self.new_driver()
+        self.assertEqual('compute8.zzzzz.arvadosapi.com',
+                         driver.arvados_create_kwargs(testutil.MockSize(1), arv_node)['name'])
+
+    def test_default_hostname_from_new_arvados_node(self):
+        arv_node = testutil.arvados_node_mock(hostname=None)
+        driver = self.new_driver()
+        self.assertEqual('dynamic.compute.zzzzz.arvadosapi.com',
+                         driver.arvados_create_kwargs(testutil.MockSize(1), arv_node)['name'])
+
+    def check_node_tagged(self, cloud_node, expected_tags):
+        tag_mock = self.driver_mock().ex_create_tags
+        self.assertTrue(tag_mock.called)
+        self.assertIs(cloud_node, tag_mock.call_args[0][0])
+        self.assertEqual(expected_tags, tag_mock.call_args[0][1])
+
+    def test_sync_node(self):
+        arv_node = testutil.arvados_node_mock(1)
+        cloud_node = testutil.cloud_node_mock(2)
+        driver = self.new_driver()
+        driver.sync_node(cloud_node, arv_node)
+        self.check_node_tagged(cloud_node,
+                               {'Name': 'compute1.zzzzz.arvadosapi.com'})
+
+    def test_node_create_time(self):
+        refsecs = int(time.time())
+        reftuple = time.gmtime(refsecs)
+        node = testutil.cloud_node_mock()
+        node.extra = {'launch_time': time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
+                                                   reftuple)}
+        self.assertEqual(refsecs, ec2.ComputeNodeDriver.node_start_time(node))
+
+    def test_node_fqdn(self):
+        name = 'fqdntest.zzzzz.arvadosapi.com'
+        node = testutil.cloud_node_mock()
+        node.name = name
+        self.assertEqual(name, ec2.ComputeNodeDriver.node_fqdn(node))
+
+    def test_create_ebs_volume(self):
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        # libcloud/ec2 "disk" sizes are in GB, Arvados/SLURM "scratch" value is in MB
+        size = testutil.MockSize(1)
+        size.disk=5
+        size.scratch=20000
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertEqual([{
+            "DeviceName": "/dev/xvdt",
+            "Ebs": {
+                "DeleteOnTermination": True,
+                "VolumeSize": 16,
+                "VolumeType": "gp2"
+            }}],
+                         create_method.call_args[1].get('ex_blockdevicemappings'))
+
+    def test_ebs_volume_not_needed(self):
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        # libcloud/ec2 "disk" sizes are in GB, Arvados/SLURM "scratch" value is in MB
+        size = testutil.MockSize(1)
+        size.disk=80
+        size.scratch=20000
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIsNone(create_method.call_args[1].get('ex_blockdevicemappings'))
+
+    def test_ebs_volume_too_big(self):
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        # libcloud/ec2 "disk" sizes are in GB, Arvados/SLURM "scratch" value is in MB
+        size = testutil.MockSize(1)
+        size.disk=80
+        size.scratch=20000000
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertEqual([{
+            "DeviceName": "/dev/xvdt",
+            "Ebs": {
+                "DeleteOnTermination": True,
+                "VolumeSize": 16384,
+                "VolumeType": "gp2"
+            }}],
+                         create_method.call_args[1].get('ex_blockdevicemappings'))
diff --git a/services/nodemanager/tests/test_computenode_driver_gce.py b/services/nodemanager/tests/test_computenode_driver_gce.py
new file mode 100644 (file)
index 0000000..1446cd2
--- /dev/null
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import json
+import time
+import unittest
+
+import mock
+
+import arvnodeman.computenode.driver.gce as gce
+from . import testutil
+
+class GCEComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
+    TEST_CLASS = gce.ComputeNodeDriver
+
+    def setUp(self):
+        super(GCEComputeNodeDriverTestCase, self).setUp()
+        self.driver_mock().list_images.return_value = [
+            testutil.cloud_object_mock('testimage', selfLink='image-link')]
+        self.driver_mock().ex_list_disktypes.return_value = [
+            testutil.cloud_object_mock(name, selfLink=name + '-link')
+            for name in ['pd-standard', 'pd-ssd', 'local-ssd']]
+        self.driver_mock.reset_mock()
+
+    def new_driver(self, auth_kwargs={}, list_kwargs={}, create_kwargs={}):
+        create_kwargs.setdefault('image', 'testimage')
+        return super(GCEComputeNodeDriverTestCase, self).new_driver(
+            auth_kwargs, list_kwargs, create_kwargs)
+
+    def test_driver_instantiation(self):
+        kwargs = {'user_id': 'foo'}
+        driver = self.new_driver(auth_kwargs=kwargs)
+        self.assertTrue(self.driver_mock.called)
+        self.assertEqual(kwargs, self.driver_mock.call_args[1])
+
+    def test_create_image_loaded_at_initialization_by_name(self):
+        image_mocks = [testutil.cloud_object_mock(c) for c in 'abc']
+        list_method = self.driver_mock().list_images
+        list_method.return_value = image_mocks
+        driver = self.new_driver(create_kwargs={'image': 'b'})
+        self.assertEqual(1, list_method.call_count)
+
+    def test_create_includes_ping_secret(self):
+        arv_node = testutil.arvados_node_mock(info={'ping_secret': 'ssshh'})
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
+        self.assertIn('ping_secret=ssshh', metadata.get('arv-ping-url'))
+
+    def test_create_includes_arvados_node_size(self):
+        arv_node = testutil.arvados_node_mock()
+        size = testutil.MockSize(1)
+        driver = self.new_driver()
+        driver.create_node(size, arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertIn(
+            ('arvados_node_size', size.id),
+            create_method.call_args[1].get('ex_metadata', {'metadata':'missing'}).items()
+        )
+
+    def test_create_raises_but_actually_succeeded(self):
+        arv_node = testutil.arvados_node_mock(1, hostname=None)
+        driver = self.new_driver()
+        nodelist = [testutil.cloud_node_mock(1)]
+        nodelist[0].name = 'compute-000000000000001-zzzzz'
+        self.driver_mock().list_nodes.return_value = nodelist
+        self.driver_mock().create_node.side_effect = IOError
+        n = driver.create_node(testutil.MockSize(1), arv_node)
+        self.assertEqual('compute-000000000000001-zzzzz', n.name)
+
+    def test_create_sets_default_hostname(self):
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1),
+                           testutil.arvados_node_mock(254, hostname=None))
+        create_kwargs = self.driver_mock().create_node.call_args[1]
+        self.assertEqual('compute-0000000000000fe-zzzzz',
+                         create_kwargs.get('name'))
+        self.assertEqual('dynamic.compute.zzzzz.arvadosapi.com',
+                         create_kwargs.get('ex_metadata', {}).get('hostname'))
+
+    def test_create_tags_from_list_tags(self):
+        driver = self.new_driver(list_kwargs={'tags': 'testA, testB'})
+        driver.create_node(testutil.MockSize(1), testutil.arvados_node_mock())
+        self.assertEqual(['testA', 'testB'],
+                         self.driver_mock().create_node.call_args[1]['ex_tags'])
+
+    def test_create_with_two_disks_attached(self):
+        driver = self.new_driver(create_kwargs={'image': 'testimage'})
+        driver.create_node(testutil.MockSize(1), testutil.arvados_node_mock())
+        create_disks = self.driver_mock().create_node.call_args[1].get(
+            'ex_disks_gce_struct', [])
+        self.assertEqual(2, len(create_disks))
+        self.assertTrue(create_disks[0].get('autoDelete'))
+        self.assertTrue(create_disks[0].get('boot'))
+        self.assertEqual('PERSISTENT', create_disks[0].get('type'))
+        init_params = create_disks[0].get('initializeParams', {})
+        self.assertEqual('pd-standard-link', init_params.get('diskType'))
+        self.assertEqual('image-link', init_params.get('sourceImage'))
+        # Our node images expect the SSD to be named `tmp` to find and mount it.
+        self.assertEqual('tmp', create_disks[1].get('deviceName'))
+        self.assertTrue(create_disks[1].get('autoDelete'))
+        self.assertFalse(create_disks[1].get('boot', 'unset'))
+        self.assertEqual('SCRATCH', create_disks[1].get('type'))
+        init_params = create_disks[1].get('initializeParams', {})
+        self.assertEqual('local-ssd-link', init_params.get('diskType'))
+
+    def test_list_nodes_requires_tags_match(self):
+        # A node matches if our list tags are a subset of the node's tags.
+        # Test behavior with no tags, no match, partial matches, different
+        # order, and strict supersets.
+        cloud_mocks = [
+            testutil.cloud_node_mock(node_num, tags=tag_set)
+            for node_num, tag_set in enumerate(
+                [[], ['bad'], ['good'], ['great'], ['great', 'ok'],
+                 ['great', 'good'], ['good', 'fantastic', 'great']])]
+        cloud_mocks.append(testutil.cloud_node_mock())
+        self.driver_mock().list_nodes.return_value = cloud_mocks
+        driver = self.new_driver(list_kwargs={'tags': 'good, great'})
+        self.assertItemsEqual(['5', '6'], [n.id for n in driver.list_nodes()])
+
+    def build_gce_metadata(self, metadata_dict):
+        # Convert a plain metadata dictionary to the GCE data structure.
+        return {
+            'kind': 'compute#metadata',
+            'fingerprint': 'testprint',
+            'items': [{'key': key, 'value': metadata_dict[key]}
+                      for key in metadata_dict],
+            }
+
+    def check_sync_node_updates_hostname_tag(self, plain_metadata):
+        start_metadata = self.build_gce_metadata(plain_metadata)
+        arv_node = testutil.arvados_node_mock(1)
+        cloud_node = testutil.cloud_node_mock(
+            2, metadata=start_metadata.copy(),
+            zone=testutil.cloud_object_mock('testzone'))
+        self.driver_mock().ex_get_node.return_value = cloud_node
+        driver = self.new_driver()
+        driver.sync_node(cloud_node, arv_node)
+        args, kwargs = self.driver_mock().ex_set_node_metadata.call_args
+        self.assertEqual(cloud_node, args[0])
+        plain_metadata['hostname'] = 'compute1.zzzzz.arvadosapi.com'
+        self.assertEqual(
+            plain_metadata,
+            {item['key']: item['value'] for item in args[1]})
+
+    def test_sync_node_updates_hostname_tag(self):
+        self.check_sync_node_updates_hostname_tag(
+            {'testkey': 'testvalue', 'hostname': 'startvalue'})
+
+    def test_sync_node_adds_hostname_tag(self):
+        self.check_sync_node_updates_hostname_tag({'testkey': 'testval'})
+
+    def test_sync_node_raises_exception_on_failure(self):
+        arv_node = testutil.arvados_node_mock(8)
+        cloud_node = testutil.cloud_node_mock(
+            9, metadata={}, zone=testutil.cloud_object_mock('failzone'))
+        mock_response = self.driver_mock().ex_set_node_metadata.side_effect = (Exception('sync error test'),)
+        driver = self.new_driver()
+        with self.assertRaises(Exception) as err_check:
+            driver.sync_node(cloud_node, arv_node)
+        self.assertIs(err_check.exception.__class__, Exception)
+        self.assertIn('sync error test', str(err_check.exception))
+
+    def test_node_create_time_zero_for_unknown_nodes(self):
+        node = testutil.cloud_node_mock()
+        self.assertEqual(0, gce.ComputeNodeDriver.node_start_time(node))
+
+    def test_node_create_time_for_known_node(self):
+        node = testutil.cloud_node_mock(metadata=self.build_gce_metadata(
+                {'booted_at': '1970-01-01T00:01:05Z'}))
+        self.assertEqual(65, gce.ComputeNodeDriver.node_start_time(node))
+
+    def test_node_create_time_recorded_when_node_boots(self):
+        start_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
+        self.assertLessEqual(start_time, metadata.get('booted_at'))
+
+    def test_known_node_fqdn(self):
+        name = 'fqdntest.zzzzz.arvadosapi.com'
+        node = testutil.cloud_node_mock(metadata=self.build_gce_metadata(
+                {'hostname': name}))
+        self.assertEqual(name, gce.ComputeNodeDriver.node_fqdn(node))
+
+    def test_unknown_node_fqdn(self):
+        # Return an empty string.  This lets fqdn be safely compared
+        # against an expected value, and ComputeNodeMonitorActor
+        # should try to update it.
+        node = testutil.cloud_node_mock(metadata=self.build_gce_metadata({}))
+        self.assertEqual('', gce.ComputeNodeDriver.node_fqdn(node))
+
+    def test_deliver_ssh_key_in_metadata(self):
+        test_ssh_key = 'ssh-rsa-foo'
+        arv_node = testutil.arvados_node_mock(1)
+        with mock.patch('__builtin__.open',
+                        mock.mock_open(read_data=test_ssh_key)) as mock_file:
+            driver = self.new_driver(create_kwargs={'ssh_key': 'ssh-key-file'})
+        mock_file.assert_called_once_with('ssh-key-file')
+        driver.create_node(testutil.MockSize(1), arv_node)
+        metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
+        self.assertEqual('root:ssh-rsa-foo', metadata.get('sshKeys'))
+
+    def test_create_driver_with_service_accounts(self):
+        service_accounts = {'email': 'foo@bar', 'scopes': ['storage-full']}
+        srv_acct_config = {'service_accounts': json.dumps(service_accounts)}
+        arv_node = testutil.arvados_node_mock(1)
+        driver = self.new_driver(create_kwargs=srv_acct_config)
+        driver.create_node(testutil.MockSize(1), arv_node)
+        self.assertEqual(
+            service_accounts,
+            self.driver_mock().create_node.call_args[1]['ex_service_accounts'])
+
+    def test_fix_string_size(self):
+        # As of 0.18, the libcloud GCE driver sets node.size to the size's name.
+        # It's supposed to be the actual size object.  Make sure our driver
+        # patches that up in listings.
+        size = testutil.MockSize(2)
+        node = testutil.cloud_node_mock(size=size)
+        node.size = size.id
+        self.driver_mock().list_sizes.return_value = [size]
+        self.driver_mock().list_nodes.return_value = [node]
+        driver = self.new_driver()
+        nodelist = driver.list_nodes()
+        self.assertEqual(1, len(nodelist))
+        self.assertIs(node, nodelist[0])
+        self.assertIs(size, nodelist[0].size)
+
+    def test_skip_fix_when_size_not_string(self):
+        # Ensure we don't monkeypatch node sizes unless we need to.
+        size = testutil.MockSize(3)
+        node = testutil.cloud_node_mock(size=size)
+        self.driver_mock().list_nodes.return_value = [node]
+        driver = self.new_driver()
+        nodelist = driver.list_nodes()
+        self.assertEqual(1, len(nodelist))
+        self.assertIs(node, nodelist[0])
+        self.assertIs(size, nodelist[0].size)
+
+    def test_node_found_after_timeout_has_fixed_size(self):
+        size = testutil.MockSize(4)
+        cloud_node = testutil.cloud_node_mock(size=size.id)
+        self.check_node_found_after_timeout_has_fixed_size(size, cloud_node)
+
+    def test_list_empty_nodes(self):
+        self.driver_mock().list_nodes.return_value = []
+        self.assertEqual([], self.new_driver().list_nodes())
diff --git a/services/nodemanager/tests/test_config.py b/services/nodemanager/tests/test_config.py
new file mode 100644 (file)
index 0000000..8002b3b
--- /dev/null
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import io
+import logging
+import unittest
+
+import arvnodeman.computenode.dispatch as dispatch
+import arvnodeman.computenode.dispatch.slurm as slurm_dispatch
+import arvnodeman.config as nmconfig
+
+class NodeManagerConfigTestCase(unittest.TestCase):
+    TEST_CONFIG = u"""
+[Cloud]
+provider = dummy
+shutdown_windows = 52, 6, 2
+
+[Cloud Credentials]
+creds = dummy_creds
+
+[Cloud List]
+[Cloud Create]
+
+[Size 1]
+cores = 1
+price = 0.8
+
+[Size 1.preemptible]
+instance_type = 1
+preemptible = true
+cores = 1
+price = 0.8
+
+[Logging]
+file = /dev/null
+level = DEBUG
+testlogger = INFO
+"""
+
+    def load_config(self, config=None, config_str=None):
+        if config is None:
+            config = nmconfig.NodeManagerConfig()
+        if config_str is None:
+            config_str = self.TEST_CONFIG
+        with io.StringIO(config_str) as config_fp:
+            config.readfp(config_fp)
+        return config
+
+    def test_seeded_defaults(self):
+        config = nmconfig.NodeManagerConfig()
+        sec_names = set(config.sections())
+        self.assertIn('Arvados', sec_names)
+        self.assertIn('Daemon', sec_names)
+        self.assertFalse(any(name.startswith('Size ') for name in sec_names))
+
+    def test_list_sizes(self):
+        config = self.load_config()
+        sizes = config.node_sizes()
+        self.assertEqual(2, len(sizes))
+        size, kwargs = sizes[0]
+        self.assertEqual('Small', size.name)
+        self.assertEqual(1, kwargs['cores'])
+        self.assertEqual(0.8, kwargs['price'])
+        # preemptible is False by default
+        self.assertEqual(False, kwargs['preemptible'])
+        # instance_type == arvados node size id by default
+        self.assertEqual(kwargs['id'], kwargs['instance_type'])
+        # Now retrieve the preemptible version
+        size, kwargs = sizes[1]
+        self.assertEqual('Small', size.name)
+        self.assertEqual('1.preemptible', kwargs['id'])
+        self.assertEqual(1, kwargs['cores'])
+        self.assertEqual(0.8, kwargs['price'])
+        self.assertEqual(True, kwargs['preemptible'])
+        self.assertEqual('1', kwargs['instance_type'])
+
+
+    def test_default_node_mem_scaling(self):
+        config = self.load_config()
+        self.assertEqual(0.95, config.getfloat('Daemon', 'node_mem_scaling'))
+
+    def test_shutdown_windows(self):
+        config = self.load_config()
+        self.assertEqual([52, 6, 2], config.shutdown_windows())
+
+    def test_log_levels(self):
+        config = self.load_config()
+        self.assertEqual({'level': logging.DEBUG,
+                          'testlogger': logging.INFO},
+                         config.log_levels())
+
+    def check_dispatch_classes(self, config, module):
+        setup, shutdown, update, monitor = config.dispatch_classes()
+        self.assertIs(setup, module.ComputeNodeSetupActor)
+        self.assertIs(shutdown, module.ComputeNodeShutdownActor)
+        self.assertIs(update, module.ComputeNodeUpdateActor)
+        self.assertIs(monitor, module.ComputeNodeMonitorActor)
+
+    def test_default_dispatch(self):
+        config = self.load_config()
+        self.check_dispatch_classes(config, dispatch)
+
+    def test_custom_dispatch(self):
+        config = self.load_config(
+            config_str=self.TEST_CONFIG + "[Daemon]\ndispatcher=slurm\n")
+        self.check_dispatch_classes(config, slurm_dispatch)
diff --git a/services/nodemanager/tests/test_daemon.py b/services/nodemanager/tests/test_daemon.py
new file mode 100644 (file)
index 0000000..1b6e4ca
--- /dev/null
@@ -0,0 +1,858 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import mock
+import pykka
+
+import arvnodeman.daemon as nmdaemon
+import arvnodeman.status as status
+from arvnodeman.jobqueue import ServerCalculator
+from arvnodeman.computenode.dispatch import ComputeNodeMonitorActor
+from . import testutil
+from . import test_status
+from . import pykka_timeout
+import logging
+
+class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
+                                     unittest.TestCase):
+
+    def assertwait(self, f, timeout=pykka_timeout*2):
+        deadline = time.time() + timeout
+        while True:
+            try:
+                return f()
+            except AssertionError:
+                if time.time() > deadline:
+                    raise
+                pass
+            time.sleep(.1)
+            self.daemon.ping().get(self.TIMEOUT)
+
+    def busywait(self, f):
+        for n in xrange(200):
+            ok = f()
+            if ok:
+                return
+            time.sleep(.1)
+            self.daemon.ping().get(self.TIMEOUT)
+        self.assertTrue(ok) # always falsy, but not necessarily False
+
+    def mock_node_start(self, **kwargs):
+        # Make sure that every time the daemon starts a setup actor,
+        # it gets a new mock object back.
+        get_cloud_size = mock.MagicMock()
+        get_cloud_size.get.return_value = kwargs["cloud_size"]
+        mock_actor = mock.MagicMock()
+        mock_proxy = mock.NonCallableMock(name='setup_mock_proxy',
+                                          cloud_size=get_cloud_size,
+                                          actor_ref=mock_actor)
+        mock_actor.proxy.return_value = mock_proxy
+        mock_actor.tell_proxy.return_value = mock_proxy
+
+        self.last_setup = mock_proxy
+        return mock_actor
+
+    def mock_node_shutdown(self, **kwargs):
+        # Make sure that every time the daemon starts a shutdown actor,
+        # it gets a new mock object back.
+        get_cloud_node = mock.MagicMock()
+        if "node_monitor" in kwargs:
+            get_cloud_node.get.return_value = kwargs["node_monitor"].proxy().cloud_node.get()
+        mock_actor = mock.MagicMock()
+        mock_proxy = mock.NonCallableMock(name='shutdown_mock_proxy',
+                                          cloud_node=get_cloud_node,
+                                          actor_ref=mock_actor)
+
+        mock_actor.proxy.return_value = mock_proxy
+        self.last_shutdown = mock_proxy
+
+        return mock_actor
+
+    def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
+                    avail_sizes=None,
+                    min_nodes=0, max_nodes=8,
+                    shutdown_windows=[54, 5, 1],
+                    max_total_price=None):
+        for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
+            setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
+
+        if not avail_sizes:
+            if cloud_nodes or want_sizes:
+                avail_sizes=[(c.size, {"cores": int(c.id)}) for c in cloud_nodes] + [(s, {"cores": 1}) for s in want_sizes]
+            else:
+                avail_sizes=[(testutil.MockSize(1), {"cores": 1})]
+
+        self.arv_factory = mock.MagicMock(name='arvados_mock')
+        api_client = mock.MagicMock(name='api_client')
+        api_client.nodes().create().execute.side_effect = \
+            [testutil.arvados_node_mock(1),
+             testutil.arvados_node_mock(2)]
+        self.arv_factory.return_value = api_client
+
+        self.cloud_factory = mock.MagicMock(name='cloud_mock')
+        self.cloud_factory().node_start_time.return_value = time.time()
+        self.cloud_updates = mock.MagicMock(name='updates_mock')
+        self.timer = testutil.MockTimer(deliver_immediately=False)
+        self.cloud_factory().node_id.side_effect = lambda node: node.id
+        self.cloud_factory().broken.return_value = False
+
+        self.node_setup = mock.MagicMock(name='setup_mock')
+        self.node_setup.start.side_effect = self.mock_node_start
+        self.node_setup.reset_mock()
+
+        self.node_shutdown = mock.MagicMock(name='shutdown_mock')
+        self.node_shutdown.start.side_effect = self.mock_node_shutdown
+
+        self.daemon = nmdaemon.NodeManagerDaemonActor.start(
+            self.server_wishlist_poller, self.arvados_nodes_poller,
+            self.cloud_nodes_poller, self.cloud_updates, self.timer,
+            self.arv_factory, self.cloud_factory,
+            shutdown_windows, ServerCalculator(avail_sizes),
+            min_nodes, max_nodes, 600, 1800, 3600,
+            self.node_setup, self.node_shutdown,
+            max_total_price=max_total_price).proxy()
+        if arvados_nodes is not None:
+            self.daemon.update_arvados_nodes(arvados_nodes).get(self.TIMEOUT)
+        if cloud_nodes is not None:
+            self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
+        if want_sizes is not None:
+            self.daemon.update_server_wishlist(want_sizes).get(self.TIMEOUT)
+
+    def monitor_list(self):
+        return [c.actor.actor_ref for c in self.daemon.cloud_nodes.get(self.TIMEOUT).nodes.values() if c.actor]
+
+    def monitored_arvados_nodes(self, include_unpaired=True):
+        pairings = []
+        for future in [actor.proxy().arvados_node
+                       for actor in self.monitor_list()]:
+            try:
+                g = future.get(self.TIMEOUT)
+                if g or include_unpaired:
+                    pairings.append(g)
+            except pykka.ActorDeadError:
+                pass
+        return pairings
+
+    def alive_monitor_count(self):
+        return len(self.monitored_arvados_nodes())
+
+    def paired_monitor_count(self):
+        return len(self.monitored_arvados_nodes(False))
+
+    def assertShutdownCancellable(self, expected=True):
+        self.assertTrue(self.node_shutdown.start.called)
+        self.assertIs(expected,
+                      self.node_shutdown.start.call_args[1]['cancellable'],
+                      "ComputeNodeShutdownActor incorrectly cancellable")
+
+    def test_easy_node_creation(self):
+        size = testutil.MockSize(1)
+        self.make_daemon(want_sizes=[size])
+        self.busywait(lambda: self.node_setup.start.called)
+        self.assertIn('node_quota', status.tracker._latest)
+
+    def check_monitors_arvados_nodes(self, *arv_nodes):
+        self.assertwait(lambda: self.assertItemsEqual(arv_nodes, self.monitored_arvados_nodes()))
+
+    def test_node_pairing(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        arv_node = testutil.arvados_node_mock(1)
+        self.make_daemon([cloud_node], [arv_node])
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_node_pairing_after_arvados_update(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        self.make_daemon([cloud_node],
+                         [testutil.arvados_node_mock(1, ip_address=None)])
+        arv_node = testutil.arvados_node_mock(2)
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_arvados_node_un_and_re_paired(self):
+        # We need to create the Arvados node mock after spinning up the daemon
+        # to make sure it's new enough to pair with the cloud node.
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(3)],
+                         arvados_nodes=None)
+        arv_node = testutil.arvados_node_mock(3)
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.check_monitors_arvados_nodes(arv_node)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.busywait(lambda: 0 == self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([testutil.cloud_node_mock(3)])
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_old_arvados_node_not_double_assigned(self):
+        arv_node = testutil.arvados_node_mock(3, age=9000)
+        size = testutil.MockSize(3)
+        self.make_daemon(arvados_nodes=[arv_node],
+                         avail_sizes=[(size, {"cores":1})])
+        self.daemon.update_server_wishlist([size]).get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([size, size]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        used_nodes = [call[1].get('arvados_node')
+                      for call in self.node_setup.start.call_args_list]
+        self.assertEqual(2, len(used_nodes))
+        self.assertIn(arv_node, used_nodes)
+        self.assertIn(None, used_nodes)
+
+    def test_node_count_satisfied(self):
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(1)],
+                         want_sizes=[testutil.MockSize(1)])
+        self.busywait(lambda: not self.node_setup.start.called)
+
+    def test_select_stale_node_records_with_slot_numbers_first(self):
+        """
+        Stale node records with slot_number assigned can exist when
+        clean_arvados_node() isn't executed after a node shutdown, for
+        various reasons.
+        NodeManagerDaemonActor should use these stale node records first, so
+        that they don't accumulate unused, reducing the slots available.
+        """
+        size = testutil.MockSize(1)
+        a_long_time_ago = '1970-01-01T01:02:03.04050607Z'
+        arvados_nodes = []
+        for n in range(9):
+            # Add several stale node records without slot_number assigned
+            arvados_nodes.append(
+                testutil.arvados_node_mock(
+                    n+1,
+                    slot_number=None,
+                    modified_at=a_long_time_ago))
+        # Add one record with stale_node assigned, it should be the
+        # first one selected
+        arv_node = testutil.arvados_node_mock(
+            123,
+            modified_at=a_long_time_ago)
+        arvados_nodes.append(arv_node)
+        cloud_node = testutil.cloud_node_mock(125, size=size)
+        self.make_daemon(cloud_nodes=[cloud_node],
+                         arvados_nodes=arvados_nodes)
+        arvados_nodes_tracker = self.daemon.arvados_nodes.get()
+        # Here, find_stale_node() should return the node record with
+        # the slot_number assigned.
+        self.assertEqual(arv_node,
+                         arvados_nodes_tracker.find_stale_node(3601))
+
+    def test_dont_count_missing_as_busy(self):
+        size = testutil.MockSize(1)
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(1, size=size),
+                                      testutil.cloud_node_mock(2, size=size)],
+                         arvados_nodes=[testutil.arvados_node_mock(1),
+                                        testutil.arvados_node_mock(
+                                            2,
+                                            last_ping_at='1970-01-01T01:02:03.04050607Z')],
+                         want_sizes=[size, size])
+        self.busywait(lambda: 2 == self.alive_monitor_count())
+        self.busywait(lambda: self.node_setup.start.called)
+
+    def test_missing_counts_towards_max(self):
+        size = testutil.MockSize(1)
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(1, size=size),
+                                      testutil.cloud_node_mock(2, size=size)],
+                         arvados_nodes=[testutil.arvados_node_mock(1),
+                                        testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
+                         want_sizes=[size, size],
+                         max_nodes=2)
+        self.busywait(lambda: not self.node_setup.start.called)
+
+    def test_excess_counts_missing(self):
+        size = testutil.MockSize(1)
+        cloud_nodes = [testutil.cloud_node_mock(1, size=size), testutil.cloud_node_mock(2, size=size)]
+        self.make_daemon(cloud_nodes=cloud_nodes,
+                         arvados_nodes=[testutil.arvados_node_mock(1),
+                                        testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
+                         want_sizes=[size])
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
+        for mon_ref in self.monitor_list():
+            self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
+        self.assertEqual(1, self.node_shutdown.start.call_count)
+
+    def test_missing_shutdown_not_excess(self):
+        size = testutil.MockSize(1)
+        cloud_nodes = [testutil.cloud_node_mock(1, size=size), testutil.cloud_node_mock(2, size=size)]
+        self.make_daemon(cloud_nodes=cloud_nodes,
+                         arvados_nodes=[testutil.arvados_node_mock(1),
+                                        testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
+                         want_sizes=[size])
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
+        get_cloud_node = mock.MagicMock(name="get_cloud_node")
+        get_cloud_node.get.return_value = cloud_nodes[1]
+        mock_node_monitor = mock.MagicMock()
+        mock_node_monitor.proxy.return_value = mock.NonCallableMock(cloud_node=get_cloud_node)
+        mock_shutdown = self.node_shutdown.start(node_monitor=mock_node_monitor)
+
+        self.daemon.cloud_nodes.get()[cloud_nodes[1].id].shutdown_actor = mock_shutdown.proxy()
+
+        self.assertwait(lambda: self.assertEqual(2, self.alive_monitor_count()))
+        for mon_ref in self.monitor_list():
+            self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
+        self.busywait(lambda: 1 == self.node_shutdown.start.call_count)
+
+    def test_booting_nodes_counted(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        arv_node = testutil.arvados_node_mock(1)
+        server_wishlist = [testutil.MockSize(1)] * 2
+        self.make_daemon([cloud_node], [arv_node], server_wishlist)
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        self.assertTrue(self.node_setup.start.called)
+        self.daemon.update_server_wishlist(server_wishlist).get(self.TIMEOUT)
+        self.busywait(lambda: 1 == self.node_setup.start.call_count)
+
+    def test_boot_new_node_when_all_nodes_busy(self):
+        size = testutil.MockSize(2)
+        arv_node = testutil.arvados_node_mock(2, job_uuid=True)
+        self.make_daemon([testutil.cloud_node_mock(2, size=size)], [arv_node],
+                         [size], avail_sizes=[(size, {"cores":1})])
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+        self.assertwait(lambda: self.assertEqual(1, self.node_setup.start.called))
+
+    def test_boot_new_node_below_min_nodes(self):
+        min_size = testutil.MockSize(1)
+        wish_size = testutil.MockSize(3)
+        avail_sizes = [(min_size, {"cores": 1}),
+                       (wish_size, {"cores": 3})]
+        self.make_daemon([], [], None, avail_sizes=avail_sizes, min_nodes=2)
+        self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual([wish_size, min_size],
+                         [call[1].get('cloud_size')
+                          for call in self.node_setup.start.call_args_list])
+
+    def test_no_new_node_when_ge_min_nodes_busy(self):
+        size = testutil.MockSize(2)
+        cloud_nodes = [testutil.cloud_node_mock(n, size=size) for n in range(1, 4)]
+        arv_nodes = [testutil.arvados_node_mock(n, job_uuid=True)
+                     for n in range(1, 4)]
+        self.make_daemon(cloud_nodes, arv_nodes, [], min_nodes=2)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(0, self.node_setup.start.call_count)
+
+    def test_no_new_node_when_max_nodes_busy(self):
+        size = testutil.MockSize(3)
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(3)],
+                         arvados_nodes=[testutil.arvados_node_mock(3, job_uuid=True)],
+                         want_sizes=[size],
+                         max_nodes=1)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_setup.start.called)
+
+    def start_node_boot(self, cloud_node=None, arv_node=None, id_num=1):
+        if cloud_node is None:
+            cloud_node = testutil.cloud_node_mock(id_num)
+        id_num = int(cloud_node.id)
+        if arv_node is None:
+            arv_node = testutil.arvados_node_mock(id_num)
+        self.make_daemon(want_sizes=[testutil.MockSize(id_num)],
+                         avail_sizes=[(testutil.MockSize(id_num), {"cores":1})])
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        self.assertEqual(1, self.node_setup.start.call_count)
+        self.last_setup.cloud_node.get.return_value = cloud_node
+        self.last_setup.arvados_node.get.return_value = arv_node
+        return self.last_setup
+
+    def test_new_node_when_booted_node_not_usable(self):
+        cloud_node = testutil.cloud_node_mock(4)
+        arv_node = testutil.arvados_node_mock(4, crunch_worker_state='down')
+        setup = self.start_node_boot(cloud_node, arv_node)
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_arvados_nodes([arv_node])
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.monitor_list()[0].proxy().cloud_node_start_time = time.time()-1801
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(4)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(2, self.node_setup.start.call_count)
+
+    def test_no_duplication_when_booting_node_listed_fast(self):
+        # Test that we don't start two ComputeNodeMonitorActors when
+        # we learn about a booting node through a listing before we
+        # get the "node up" message from CloudNodeSetupActor.
+        cloud_node = testutil.cloud_node_mock(1)
+        setup = self.start_node_boot(cloud_node)
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+
+    def test_no_duplication_when_booted_node_listed(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        setup = self.start_node_boot(cloud_node, id_num=2)
+        self.daemon.node_setup_finished(setup)
+        self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+
+    def test_node_counted_after_boot_with_slow_listing(self):
+        # Test that, after we boot a compute node, we assume it exists
+        # even it doesn't appear in the listing (e.g., because of delays
+        # propagating tags).
+        setup = self.start_node_boot()
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+
+    def test_booted_unlisted_node_counted(self):
+        setup = self.start_node_boot(id_num=1)
+        self.daemon.node_setup_finished(setup)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(1)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(1, self.node_setup.start.call_count)
+
+    def test_booted_node_can_shutdown(self):
+        setup = self.start_node_boot()
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.update_server_wishlist([])
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_shutdown.start.called,
+                        "daemon did not shut down booted node on offer")
+
+        with test_status.TestServer() as srv:
+            self.assertEqual(0, srv.get_status().get('nodes_unpaired', None))
+            self.assertEqual(1, srv.get_status().get('nodes_shutdown', None))
+            self.assertEqual(0, srv.get_status().get('nodes_wish', None))
+
+    def test_booted_node_lifecycle(self):
+        cloud_node = testutil.cloud_node_mock(6)
+        setup = self.start_node_boot(cloud_node, id_num=6)
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.update_server_wishlist([])
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertShutdownCancellable(True)
+        shutdown = self.node_shutdown.start().proxy()
+        shutdown.cloud_node.get.return_value = cloud_node
+        self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
+        self.daemon.update_cloud_nodes([])
+        self.assertTrue(shutdown.stop.called,
+                        "shutdown actor not stopped after finishing")
+        self.assertTrue(monitor.actor_ref.actor_stopped.wait(self.TIMEOUT),
+                        "monitor for booted node not stopped after shutdown")
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(2)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_setup.start.called,
+                        "second node not started after booted node stopped")
+
+    def test_node_disappearing_during_shutdown(self):
+        cloud_node = testutil.cloud_node_mock(6)
+        setup = self.start_node_boot(cloud_node, id_num=6)
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.update_server_wishlist([])
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertShutdownCancellable(True)
+        shutdown = self.node_shutdown.start().proxy()
+        shutdown.cloud_node.get.return_value = cloud_node
+        # Simulate a successful but slow node destroy call: the cloud node
+        # list gets updated before the ShutdownActor finishes.
+        record = self.daemon.cloud_nodes.get().nodes.values()[0]
+        self.assertTrue(record.shutdown_actor is not None)
+        self.daemon.cloud_nodes.get().nodes.clear()
+        self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
+        self.assertTrue(
+            record.shutdown_actor is not None,
+            "test was ineffective -- failed to simulate the race condition")
+
+    def test_booted_node_shut_down_when_never_listed(self):
+        setup = self.start_node_boot()
+        self.cloud_factory().node_start_time.return_value = time.time() - 3601
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.assertFalse(self.node_shutdown.start.called)
+        now = time.time()
+        self.monitor_list()[0].tell_proxy().consider_shutdown()
+        self.busywait(lambda: self.node_shutdown.start.called)
+        self.assertShutdownCancellable(False)
+
+    def test_booted_node_shut_down_when_never_paired(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        setup = self.start_node_boot(cloud_node)
+        self.cloud_factory().node_start_time.return_value = time.time() - 3601
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.monitor_list()[0].tell_proxy().consider_shutdown()
+        self.busywait(lambda: self.node_shutdown.start.called)
+        self.assertShutdownCancellable(False)
+
+    def test_booted_node_shut_down_when_never_working(self):
+        cloud_node = testutil.cloud_node_mock(4)
+        arv_node = testutil.arvados_node_mock(4, crunch_worker_state='down')
+        setup = self.start_node_boot(cloud_node, arv_node)
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.monitor_list()[0].proxy().cloud_node_start_time = time.time()-3601
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.busywait(lambda: self.node_shutdown.start.called)
+        self.assertShutdownCancellable(False)
+
+    def test_node_that_pairs_not_considered_failed_boot(self):
+        cloud_node = testutil.cloud_node_mock(3)
+        arv_node = testutil.arvados_node_mock(3)
+        setup = self.start_node_boot(cloud_node, arv_node)
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_node_that_pairs_busy_not_considered_failed_boot(self):
+        cloud_node = testutil.cloud_node_mock(5)
+        arv_node = testutil.arvados_node_mock(5, job_uuid=True)
+        setup = self.start_node_boot(cloud_node, arv_node)
+        self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_booting_nodes_shut_down(self):
+        self.make_daemon(want_sizes=[testutil.MockSize(1)])
+        self.daemon.update_server_wishlist([]).get(self.TIMEOUT)
+        self.busywait(lambda: self.last_setup.stop_if_no_cloud_node.called)
+
+    def test_all_booting_nodes_tried_to_shut_down(self):
+        size = testutil.MockSize(2)
+        self.make_daemon(want_sizes=[size], avail_sizes=[(size, {"cores":1})])
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        setup1 = self.last_setup
+        setup1.stop_if_no_cloud_node().get.return_value = False
+        setup1.stop_if_no_cloud_node.reset_mock()
+        self.daemon.update_server_wishlist([size, size]).get(self.TIMEOUT)
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        self.assertIsNot(setup1, self.last_setup)
+        self.last_setup.stop_if_no_cloud_node().get.return_value = True
+        self.last_setup.stop_if_no_cloud_node.reset_mock()
+        self.daemon.update_server_wishlist([]).get(self.TIMEOUT)
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(1, self.last_setup.stop_if_no_cloud_node.call_count)
+        self.assertTrue(setup1.stop_if_no_cloud_node.called)
+
+    def test_shutdown_declined_at_wishlist_capacity(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        arv_node = testutil.arvados_node_mock(1)
+        size = testutil.MockSize(1)
+        self.make_daemon(cloud_nodes=[cloud_node], arvados_nodes=[arv_node], want_sizes=[size])
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_shutdown_declined_below_min_nodes(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        arv_node = testutil.arvados_node_mock(1)
+        self.make_daemon(cloud_nodes=[cloud_node], arvados_nodes=[arv_node], min_nodes=1)
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_shutdown_accepted_below_capacity(self):
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
+        self.busywait(lambda: 1 == self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.busywait(lambda: self.node_shutdown.start.called)
+
+    def test_shutdown_declined_when_idle_and_job_queued(self):
+        size = testutil.MockSize(1)
+        cloud_nodes = [testutil.cloud_node_mock(n, size=size) for n in [3, 4]]
+        arv_nodes = [testutil.arvados_node_mock(3, job_uuid=True),
+                     testutil.arvados_node_mock(4, job_uuid=None)]
+        self.make_daemon(cloud_nodes, arv_nodes, [size])
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
+        for mon_ref in self.monitor_list():
+            monitor = mon_ref.proxy()
+            if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
+                break
+        else:
+            self.fail("monitor for idle node not found")
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_node_shutdown_after_cancelled_shutdown(self):
+        cloud_node = testutil.cloud_node_mock(5)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(5)])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.last_shutdown.success.get.return_value = False
+        self.daemon.node_finished_shutdown(self.last_shutdown).get(self.TIMEOUT)
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.last_shutdown.success.get.return_value = True
+        self.last_shutdown.stop.side_effect = lambda: monitor.stop()
+        self.daemon.node_finished_shutdown(self.last_shutdown).get(self.TIMEOUT)
+        self.assertwait(lambda: self.assertEqual(0, self.paired_monitor_count()))
+
+    def test_nodes_shutting_down_replaced_below_max_nodes(self):
+        size = testutil.MockSize(6)
+        cloud_node = testutil.cloud_node_mock(6, size=size)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(6, crunch_worker_state='down')],
+                         avail_sizes=[(size, {"cores":1})])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(self.node_shutdown.start.called)
+        getmock = mock.MagicMock()
+        getmock.get.return_value = False
+        self.last_shutdown.cancel_shutdown.return_value = getmock
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(6)]).get(self.TIMEOUT)
+        self.busywait(lambda: self.node_setup.start.called)
+
+    def test_nodes_shutting_down_cancelled(self):
+        size = testutil.MockSize(6)
+        cloud_node = testutil.cloud_node_mock(6, size=size)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(6, crunch_worker_state='down')],
+                         avail_sizes=[(size, {"cores":1})])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(self.node_shutdown.start.called)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(6)]).get(self.TIMEOUT)
+        self.busywait(lambda: self.last_shutdown.cancel_shutdown.called)
+
+    def test_nodes_shutting_down_not_replaced_at_max_nodes(self):
+        cloud_node = testutil.cloud_node_mock(7)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(7)],
+                         max_nodes=1)
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(self.node_shutdown.start.called)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(7)]).get(self.TIMEOUT)
+        self.busywait(lambda: not self.node_setup.start.called)
+
+    def test_nodes_shutting_down_count_against_excess(self):
+        size = testutil.MockSize(8)
+        cloud_nodes = [testutil.cloud_node_mock(n, size=size) for n in [8, 9]]
+        arv_nodes = [testutil.arvados_node_mock(n, size=size) for n in [8, 9]]
+        self.make_daemon(cloud_nodes, arv_nodes, [size],
+                         avail_sizes=[(size, {"cores":1})])
+        self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
+        for mon_ref in self.monitor_list():
+            self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
+        self.assertEqual(1, self.node_shutdown.start.call_count)
+
+    def test_clean_shutdown_waits_for_node_setup_finish(self):
+        new_node = self.start_node_boot()
+        new_node.stop_if_no_cloud_node().get.return_value = False
+        new_node.stop_if_no_cloud_node.reset_mock()
+        self.daemon.shutdown().get(self.TIMEOUT)
+        self.assertTrue(new_node.stop_if_no_cloud_node.called)
+        self.daemon.node_setup_finished(new_node).get(self.TIMEOUT)
+        self.assertTrue(new_node.stop.called)
+        self.timer.deliver()
+        self.assertTrue(
+            self.daemon.actor_ref.actor_stopped.wait(self.TIMEOUT))
+
+    def test_wishlist_ignored_after_shutdown(self):
+        new_node = self.start_node_boot()
+        new_node.stop_if_no_cloud_node().get.return_value = False
+        new_node.stop_if_no_cloud_node.reset_mock()
+        self.daemon.shutdown().get(self.TIMEOUT)
+        size = testutil.MockSize(2)
+        self.daemon.update_server_wishlist([size] * 2).get(self.TIMEOUT)
+        self.timer.deliver()
+        self.busywait(lambda: 1 == self.node_setup.start.call_count)
+
+    def test_shutdown_actor_stopped_when_cloud_node_delisted(self):
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.busywait(lambda: 1 == self.last_shutdown.stop.call_count)
+
+    def test_idle_node_disappearing_clears_status_idle_time_counter(self):
+        size = testutil.MockSize(1)
+        status.tracker._idle_nodes = {}
+        cloud_nodes = [testutil.cloud_node_mock(1, size=size)]
+        arv_nodes = [testutil.arvados_node_mock(1, job_uuid=None)]
+        self.make_daemon(cloud_nodes, arv_nodes, [size])
+        self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+        for mon_ref in self.monitor_list():
+            monitor = mon_ref.proxy()
+            if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
+                break
+        else:
+            self.fail("monitor for idle node not found")
+        self.assertEqual(1, status.tracker.get('nodes_idle'))
+        hostname = monitor.arvados_node.get()['hostname']
+        self.assertIn(hostname, status.tracker._idle_nodes)
+        # Simulate the node disappearing from the cloud node list
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.busywait(lambda: 0 == self.alive_monitor_count())
+        self.assertNotIn(hostname, status.tracker._idle_nodes)
+
+    def test_shutdown_actor_cleanup_copes_with_dead_actors(self):
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        # We're mainly testing that update_cloud_nodes catches and handles
+        # the ActorDeadError.
+        self.last_shutdown.stop.side_effect = pykka.ActorDeadError
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.busywait(lambda: 1 == self.last_shutdown.stop.call_count)
+
+    def test_node_create_two_sizes(self):
+        small = testutil.MockSize(1)
+        big = testutil.MockSize(2)
+        avail_sizes = [(testutil.MockSize(1), {"cores":1}),
+                        (testutil.MockSize(2), {"cores":2})]
+        self.make_daemon(want_sizes=[small, small, small, big],
+                         avail_sizes=avail_sizes, max_nodes=4)
+
+        # the daemon runs in another thread, so we need to wait and see
+        # if it does all the work we're expecting it to do before stopping it.
+        self.busywait(lambda: self.node_setup.start.call_count == 4)
+        booting = self.daemon.booting.get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        sizecounts = {a[0].id: 0 for a in avail_sizes}
+        for b in booting.itervalues():
+            sizecounts[b.cloud_size.get().id] += 1
+        logging.info(sizecounts)
+        self.assertEqual(3, sizecounts[small.id])
+        self.assertEqual(1, sizecounts[big.id])
+
+    def test_node_max_nodes_two_sizes(self):
+        small = testutil.MockSize(1)
+        big = testutil.MockSize(2)
+        avail_sizes = [(testutil.MockSize(1), {"cores":1}),
+                        (testutil.MockSize(2), {"cores":2})]
+        self.make_daemon(want_sizes=[small, small, big, small],
+                         avail_sizes=avail_sizes, max_nodes=3)
+
+        # the daemon runs in another thread, so we need to wait and see
+        # if it does all the work we're expecting it to do before stopping it.
+        self.busywait(lambda: self.node_setup.start.call_count == 3)
+        booting = self.daemon.booting.get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        sizecounts = {a[0].id: 0 for a in avail_sizes}
+        for b in booting.itervalues():
+            sizecounts[b.cloud_size.get().id] += 1
+        self.assertEqual(2, sizecounts[small.id])
+        self.assertEqual(1, sizecounts[big.id])
+
+    def test_wishlist_ordering(self):
+        # Check that big nodes aren't prioritized; since #12199 containers are
+        # scheduled on specific node sizes.
+        small = testutil.MockSize(1)
+        big = testutil.MockSize(2)
+        avail_sizes = [(testutil.MockSize(1), {"cores":1}),
+                        (testutil.MockSize(2), {"cores":2})]
+        self.make_daemon(want_sizes=[small, small, small, big],
+                         avail_sizes=avail_sizes, max_nodes=3)
+
+        # the daemon runs in another thread, so we need to wait and see
+        # if it does all the work we're expecting it to do before stopping it.
+        self.busywait(lambda: self.node_setup.start.call_count == 3)
+        booting = self.daemon.booting.get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        sizecounts = {a[0].id: 0 for a in avail_sizes}
+        for b in booting.itervalues():
+            sizecounts[b.cloud_size.get().id] += 1
+        self.assertEqual(3, sizecounts[small.id])
+        self.assertEqual(0, sizecounts[big.id])
+
+    def test_wishlist_reconfigure(self):
+        small = testutil.MockSize(1)
+        big = testutil.MockSize(2)
+        avail_sizes = [(small, {"cores":1}), (big, {"cores":2})]
+
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(1, small),
+                                      testutil.cloud_node_mock(2, small),
+                                      testutil.cloud_node_mock(3, big)],
+                         arvados_nodes=[testutil.arvados_node_mock(1),
+                                        testutil.arvados_node_mock(2),
+                                        testutil.arvados_node_mock(3)],
+                         want_sizes=[small, small, big],
+                         avail_sizes=avail_sizes)
+        self.assertwait(lambda: self.assertEqual(3, self.paired_monitor_count()))
+        self.daemon.update_server_wishlist([small, big, big]).get(self.TIMEOUT)
+
+        self.assertEqual(0, self.node_shutdown.start.call_count)
+
+        for c in self.daemon.cloud_nodes.get().nodes.itervalues():
+            self.daemon.node_can_shutdown(c.actor)
+
+        booting = self.daemon.booting.get()
+        cloud_nodes = self.daemon.cloud_nodes.get()
+
+        self.busywait(lambda: 1 == self.node_setup.start.call_count)
+        self.busywait(lambda: 1 == self.node_shutdown.start.call_count)
+
+        self.stop_proxy(self.daemon)
+
+        # booting a new big node
+        sizecounts = {a[0].id: 0 for a in avail_sizes}
+        for b in booting.itervalues():
+            sizecounts[b.cloud_size.get().id] += 1
+        self.assertEqual(0, sizecounts[small.id])
+        self.assertEqual(1, sizecounts[big.id])
+
+        # shutting down a small node
+        sizecounts = {a[0].id: 0 for a in avail_sizes}
+        for b in cloud_nodes.nodes.itervalues():
+            if b.shutdown_actor is not None:
+                sizecounts[b.cloud_node.size.id] += 1
+        self.assertEqual(1, sizecounts[small.id])
+        self.assertEqual(0, sizecounts[big.id])
+
+    def test_node_max_price(self):
+        small = testutil.MockSize(1)
+        big = testutil.MockSize(2)
+        avail_sizes = [(testutil.MockSize(1), {"cores":1, "price":1}),
+                        (testutil.MockSize(2), {"cores":2, "price":2})]
+        self.make_daemon(want_sizes=[small, small, small, big],
+                         avail_sizes=avail_sizes,
+                         max_nodes=4,
+                         max_total_price=4)
+        # the daemon runs in another thread, so we need to wait and see
+        # if it does all the work we're expecting it to do before stopping it.
+        self.busywait(lambda: self.node_setup.start.call_count == 3)
+        booting = self.daemon.booting.get()
+        self.stop_proxy(self.daemon)
+
+        sizecounts = {a[0].id: 0 for a in avail_sizes}
+        for b in booting.itervalues():
+            sizecounts[b.cloud_size.get().id] += 1
+        logging.info(sizecounts)
+
+        # Booting 3 small nodes and not booting a big node would also partially
+        # satisfy the wishlist and come in under the price cap, however the way
+        # the update_server_wishlist() currently works effectively results in a
+        # round-robin creation of one node of each size in the wishlist, so
+        # test for that.
+        self.assertEqual(2, sizecounts[small.id])
+        self.assertEqual(1, sizecounts[big.id])
diff --git a/services/nodemanager/tests/test_failure.py b/services/nodemanager/tests/test_failure.py
new file mode 100644 (file)
index 0000000..8bf3ea8
--- /dev/null
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import errno
+import logging
+import time
+import threading
+import unittest
+
+import mock
+import pykka
+
+from . import testutil
+
+import arvnodeman.baseactor
+import arvnodeman.status as status
+
+class BogusActor(arvnodeman.baseactor.BaseNodeManagerActor):
+    def __init__(self, e, killfunc=None):
+        super(BogusActor, self).__init__(killfunc=killfunc)
+        self.exp = e
+
+    def doStuff(self):
+        raise self.exp
+
+    def ping(self):
+        # Called by WatchdogActorTest, this delay is longer than the test timeout
+        # of 1 second, which should cause the watchdog ping to fail.
+        time.sleep(2)
+        return True
+
+class ActorUnhandledExceptionTest(testutil.ActorTestMixin, unittest.TestCase):
+    def test_fatal_error(self):
+        for e in (MemoryError(), threading.ThreadError(), OSError(errno.ENOMEM, "")):
+            kill_mock = mock.Mock('os.kill')
+            bgact = BogusActor.start(e, killfunc=kill_mock)
+            act_thread = bgact.proxy().get_thread().get()
+            act = bgact.tell_proxy()
+            act.doStuff()
+            act.actor_ref.stop(block=True)
+            act_thread.join()
+            self.assertTrue(kill_mock.called)
+
+    def test_nonfatal_error(self):
+        status.tracker.update({'actor_exceptions': 0})
+        kill_mock = mock.Mock('os.kill')
+        bgact = BogusActor.start(OSError(errno.ENOENT, ""), killfunc=kill_mock)
+        act_thread = bgact.proxy().get_thread().get()
+        act = bgact.tell_proxy()
+        act.doStuff()
+        act.actor_ref.stop(block=True)
+        act_thread.join()
+        self.assertFalse(kill_mock.called)
+        self.assertEqual(1, status.tracker.get('actor_exceptions'))
+
+class WatchdogActorTest(testutil.ActorTestMixin, unittest.TestCase):
+
+    def test_time_timout(self):
+        kill_mock = mock.Mock('os.kill')
+        act = BogusActor.start(OSError(errno.ENOENT, ""))
+        watch = arvnodeman.baseactor.WatchdogActor.start(1, act, killfunc=kill_mock)
+        time.sleep(1)
+        watch.stop(block=True)
+        act.stop(block=True)
+        self.assertTrue(kill_mock.called)
diff --git a/services/nodemanager/tests/test_jobqueue.py b/services/nodemanager/tests/test_jobqueue.py
new file mode 100644 (file)
index 0000000..de83b68
--- /dev/null
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import unittest
+import mock
+
+import arvnodeman.jobqueue as jobqueue
+from . import testutil
+
+class ServerCalculatorTestCase(unittest.TestCase):
+    def make_calculator(self, factors, **kwargs):
+        return jobqueue.ServerCalculator(
+            [(testutil.MockSize(n), {'cores': n}) for n in factors], **kwargs)
+
+    def calculate(self, servcalc, *constraints):
+        return servcalc.servers_for_queue(
+            [{'uuid': 'zzzzz-jjjjj-{:015x}'.format(index),
+              'runtime_constraints': cdict}
+             for index, cdict in enumerate(constraints)])
+
+    def test_empty_queue_needs_no_servers(self):
+        servcalc = self.make_calculator([1])
+        self.assertEqual(([], {}), servcalc.servers_for_queue([]))
+
+    def test_easy_server_count(self):
+        servcalc = self.make_calculator([1])
+        servlist, _ = self.calculate(servcalc, {'min_nodes': 3})
+        self.assertEqual(3, len(servlist))
+
+    def test_default_5pct_ram_value_decrease(self):
+        servcalc = self.make_calculator([1])
+        servlist, _ = self.calculate(servcalc, {'min_ram_mb_per_node': 128})
+        self.assertEqual(0, len(servlist))
+        servlist, _ = self.calculate(servcalc, {'min_ram_mb_per_node': 121})
+        self.assertEqual(1, len(servlist))
+
+    def test_custom_node_mem_scaling_factor(self):
+        # Simulate a custom 'node_mem_scaling' config parameter by passing
+        # the value to ServerCalculator
+        servcalc = self.make_calculator([1], node_mem_scaling=0.5)
+        servlist, _ = self.calculate(servcalc, {'min_ram_mb_per_node': 128})
+        self.assertEqual(0, len(servlist))
+        servlist, _ = self.calculate(servcalc, {'min_ram_mb_per_node': 64})
+        self.assertEqual(1, len(servlist))
+
+    def test_implicit_server_count(self):
+        servcalc = self.make_calculator([1])
+        servlist, _ = self.calculate(servcalc, {}, {'min_nodes': 3})
+        self.assertEqual(4, len(servlist))
+
+    def test_bad_min_nodes_override(self):
+        servcalc = self.make_calculator([1])
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_nodes': -2}, {'min_nodes': 'foo'})
+        self.assertEqual(2, len(servlist))
+
+    def test_ignore_and_return_unsatisfiable_jobs(self):
+        servcalc = self.make_calculator([1], max_nodes=9)
+        servlist, u_jobs = self.calculate(servcalc,
+                                          {'min_cores_per_node': 2},
+                                          {'min_ram_mb_per_node': 256},
+                                          {'min_nodes': 6},
+                                          {'min_nodes': 12},
+                                          {'min_scratch_mb_per_node': 300000})
+        self.assertEqual(6, len(servlist))
+        # Only unsatisfiable jobs are returned on u_jobs
+        self.assertIn('zzzzz-jjjjj-000000000000000', u_jobs.keys())
+        self.assertIn('zzzzz-jjjjj-000000000000001', u_jobs.keys())
+        self.assertNotIn('zzzzz-jjjjj-000000000000002', u_jobs.keys())
+        self.assertIn('zzzzz-jjjjj-000000000000003', u_jobs.keys())
+        self.assertIn('zzzzz-jjjjj-000000000000004', u_jobs.keys())
+
+    def test_ignore_too_expensive_jobs(self):
+        servcalc = self.make_calculator([1, 2], max_nodes=12, max_price=6)
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_cores_per_node': 1, 'min_nodes': 6})
+        self.assertEqual(6, len(servlist))
+
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_cores_per_node': 2, 'min_nodes': 6})
+        self.assertEqual(0, len(servlist))
+
+    def test_job_requesting_max_nodes_accepted(self):
+        servcalc = self.make_calculator([1], max_nodes=4)
+        servlist, _ = self.calculate(servcalc, {'min_nodes': 4})
+        self.assertEqual(4, len(servlist))
+
+    def test_cheapest_size(self):
+        servcalc = self.make_calculator([2, 4, 1, 3])
+        self.assertEqual(testutil.MockSize(1), servcalc.cheapest_size())
+
+    def test_next_biggest(self):
+        servcalc = self.make_calculator([1, 2, 4, 8])
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_cores_per_node': 3},
+                                     {'min_cores_per_node': 6})
+        self.assertEqual([servcalc.cloud_sizes[2].id,
+                          servcalc.cloud_sizes[3].id],
+                         [s.id for s in servlist])
+
+    def test_multiple_sizes(self):
+        servcalc = self.make_calculator([1, 2])
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_cores_per_node': 2},
+                                     {'min_cores_per_node': 1},
+                                     {'min_cores_per_node': 1})
+        self.assertEqual([servcalc.cloud_sizes[1].id,
+                          servcalc.cloud_sizes[0].id,
+                          servcalc.cloud_sizes[0].id],
+                         [s.id for s in servlist])
+
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_cores_per_node': 1},
+                                     {'min_cores_per_node': 2},
+                                     {'min_cores_per_node': 1})
+        self.assertEqual([servcalc.cloud_sizes[0].id,
+                          servcalc.cloud_sizes[1].id,
+                          servcalc.cloud_sizes[0].id],
+                         [s.id for s in servlist])
+
+        servlist, _ = self.calculate(servcalc,
+                                     {'min_cores_per_node': 1},
+                                     {'min_cores_per_node': 1},
+                                     {'min_cores_per_node': 2})
+        self.assertEqual([servcalc.cloud_sizes[0].id,
+                          servcalc.cloud_sizes[0].id,
+                          servcalc.cloud_sizes[1].id],
+                         [s.id for s in servlist])
+
+
+
+class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                   unittest.TestCase):
+    TEST_CLASS = jobqueue.JobQueueMonitorActor
+
+
+    class MockCalculator(object):
+        @staticmethod
+        def servers_for_queue(queue):
+            return ([testutil.MockSize(n) for n in queue], {})
+
+
+    class MockCalculatorUnsatisfiableJobs(object):
+        @staticmethod
+        def servers_for_queue(queue):
+            return ([], {k["uuid"]: "Unsatisfiable job mock" for k in queue})
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(JobQueueMonitorActorTestCase, self).build_monitor(*args, **kwargs)
+        self.client.jobs().queue().execute.side_effect = side_effect
+
+    @mock.patch("subprocess32.check_call")
+    @mock.patch("subprocess32.check_output")
+    def test_unsatisfiable_jobs(self, mock_squeue, mock_scancel):
+        job_uuid = 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'
+        container_uuid = 'yyyyy-dz642-yyyyyyyyyyyyyyy'
+        mock_squeue.return_value = "1|1024|0|(Resources)|" + container_uuid + "||1234567890\n"
+
+        self.build_monitor([{'items': [{'uuid': job_uuid}]}],
+                           self.MockCalculatorUnsatisfiableJobs(), True, True)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.monitor.ping().get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.client.jobs().cancel.assert_called_with(uuid=job_uuid)
+        mock_scancel.assert_called_with(['scancel', '--name='+container_uuid])
+
+    @mock.patch("subprocess32.check_output")
+    def test_subscribers_get_server_lists(self, mock_squeue):
+        mock_squeue.return_value = ""
+
+        self.build_monitor([{'items': [1, 2]}], self.MockCalculator(), True, True)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([testutil.MockSize(1),
+                                            testutil.MockSize(2)])
+
+    @mock.patch("subprocess32.check_output")
+    def test_squeue_server_list(self, mock_squeue):
+        mock_squeue.return_value = """1|1024|0|(Resources)|zzzzz-dz642-zzzzzzzzzzzzzzy|(null)|1234567890
+2|1024|0|(Resources)|zzzzz-dz642-zzzzzzzzzzzzzzz|(null)|1234567890
+"""
+
+        super(JobQueueMonitorActorTestCase, self).build_monitor(jobqueue.ServerCalculator(
+            [(testutil.MockSize(n), {'cores': n, 'ram': n*1024, 'scratch': n}) for n in range(1, 3)]),
+                                                                True, True)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([testutil.MockSize(1),
+                                            testutil.MockSize(2)])
+
+    @mock.patch("subprocess32.check_output")
+    def test_squeue_server_list_suffix(self, mock_squeue):
+        mock_squeue.return_value = """1|1024M|0|(ReqNodeNotAvail, UnavailableNodes:compute123)|zzzzz-dz642-zzzzzzzzzzzzzzy|(null)|1234567890
+1|2G|0|(ReqNodeNotAvail)|zzzzz-dz642-zzzzzzzzzzzzzzz|(null)|1234567890
+"""
+
+        super(JobQueueMonitorActorTestCase, self).build_monitor(jobqueue.ServerCalculator(
+            [(testutil.MockSize(n), {'cores': n, 'ram': n*1024, 'scratch': n}) for n in range(1, 3)]),
+                                                                True, True)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([testutil.MockSize(1),
+                                            testutil.MockSize(2)])
+
+    @mock.patch("subprocess32.check_output")
+    def test_squeue_server_list_instancetype_constraint(self, mock_squeue):
+        mock_squeue.return_value = """1|1024|0|(Resources)|zzzzz-dz642-zzzzzzzzzzzzzzy|instancetype=z2.test|1234567890\n"""
+        super(JobQueueMonitorActorTestCase, self).build_monitor(jobqueue.ServerCalculator(
+            [(testutil.MockSize(n), {'cores': n, 'ram': n*1024, 'scratch': n}) for n in range(1, 3)]),
+                                                                True, True)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([testutil.MockSize(2)])
+
+    def test_coerce_to_mb(self):
+        self.assertEqual(1, jobqueue.JobQueueMonitorActor.coerce_to_mb("1"))
+        self.assertEqual(512, jobqueue.JobQueueMonitorActor.coerce_to_mb("512"))
+        self.assertEqual(512, jobqueue.JobQueueMonitorActor.coerce_to_mb("512M"))
+        self.assertEqual(1024, jobqueue.JobQueueMonitorActor.coerce_to_mb("1024M"))
+        self.assertEqual(1024, jobqueue.JobQueueMonitorActor.coerce_to_mb("1G"))
+        self.assertEqual(1536, jobqueue.JobQueueMonitorActor.coerce_to_mb("1.5G"))
+        self.assertEqual(2048, jobqueue.JobQueueMonitorActor.coerce_to_mb("2G"))
+        self.assertEqual(1025, jobqueue.JobQueueMonitorActor.coerce_to_mb("1025M"))
+        self.assertEqual(1048576, jobqueue.JobQueueMonitorActor.coerce_to_mb("1T"))
+        self.assertEqual(1572864, jobqueue.JobQueueMonitorActor.coerce_to_mb("1.5T"))
+        self.assertEqual(1073741824, jobqueue.JobQueueMonitorActor.coerce_to_mb("1P"))
+        self.assertEqual(1610612736, jobqueue.JobQueueMonitorActor.coerce_to_mb("1.5P"))
+        self.assertEqual(0, jobqueue.JobQueueMonitorActor.coerce_to_mb("0"))
+        self.assertEqual(0, jobqueue.JobQueueMonitorActor.coerce_to_mb("0M"))
+        self.assertEqual(0, jobqueue.JobQueueMonitorActor.coerce_to_mb("0G"))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/services/nodemanager/tests/test_nodelist.py b/services/nodemanager/tests/test_nodelist.py
new file mode 100644 (file)
index 0000000..df31a12
--- /dev/null
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import unittest
+import mock
+
+import arvnodeman.nodelist as nodelist
+from libcloud.compute.base import NodeSize
+from . import testutil
+
+class ArvadosNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                          unittest.TestCase):
+    TEST_CLASS = nodelist.ArvadosNodeListMonitorActor
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(ArvadosNodeListMonitorActorTestCase, self).build_monitor(
+            *args, **kwargs)
+        self.client.nodes().list().execute.side_effect = side_effect
+
+    @mock.patch("subprocess32.check_output")
+    def test_uuid_is_subscription_key(self, sinfo_mock):
+        sinfo_mock.return_value = ""
+        node = testutil.arvados_node_mock()
+        self.build_monitor([{
+            'items': [node],
+            'items_available': 1,
+            'offset': 0
+        }, {
+            'items': [],
+            'items_available': 1,
+            'offset': 1
+        }])
+        self.monitor.subscribe_to(node['uuid'],
+                                  self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(node)
+        self.assertEqual("down", node["crunch_worker_state"])
+
+    @mock.patch("subprocess32.check_output")
+    def test_update_from_sinfo(self, sinfo_mock):
+        sinfo_mock.return_value = """compute1|idle|instancetype=a1.test
+compute2|alloc|(null)
+notarvados12345|idle|(null)
+"""
+        nodeIdle = testutil.arvados_node_mock(node_num=1)
+        nodeBusy = testutil.arvados_node_mock(node_num=2)
+        nodeMissing = testutil.arvados_node_mock(node_num=99)
+        self.build_monitor([{
+            'items': [nodeIdle, nodeBusy, nodeMissing],
+            'items_available': 1,
+            'offset': 0
+        }, {
+            'items': [],
+            'items_available': 1,
+            'offset': 1
+        }])
+        self.monitor.subscribe_to(nodeMissing['uuid'],
+                                  self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(nodeMissing)
+
+        self.assertEqual("idle", nodeIdle["crunch_worker_state"])
+        self.assertEqual("busy", nodeBusy["crunch_worker_state"])
+        self.assertEqual("down", nodeMissing["crunch_worker_state"])
+
+        self.assertEqual("instancetype=a1.test", nodeIdle["slurm_node_features"])
+        self.assertEqual("", nodeBusy["slurm_node_features"])
+        self.assertEqual("", nodeMissing["slurm_node_features"])
+
+
+class CloudNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                        unittest.TestCase):
+    TEST_CLASS = nodelist.CloudNodeListMonitorActor
+
+    class MockNode(object):
+        def __init__(self, count):
+            self.id = str(count)
+            self.name = 'test{}.example.com'.format(count)
+            self.private_ips = ['10.0.0.{}'.format(count)]
+            self.public_ips = []
+            self.size = testutil.MockSize(1)
+            self.state = 0
+            self.extra = {'arvados_node_size': self.size.id}
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(CloudNodeListMonitorActorTestCase, self).build_monitor(
+            *args, **kwargs)
+        self.client.list_nodes.side_effect = side_effect
+
+    def test_id_is_subscription_key(self):
+        node = self.MockNode(1)
+        mock_calc = mock.MagicMock()
+        mock_calc.find_size.return_value = testutil.MockSize(2)
+        self.build_monitor([[node]], mock_calc)
+        self.monitor.subscribe_to('1', self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(node)
+        self.assertEqual(testutil.MockSize(2), node.size)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/services/nodemanager/tests/test_status.py b/services/nodemanager/tests/test_status.py
new file mode 100644 (file)
index 0000000..2a1c0fc
--- /dev/null
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+from future import standard_library
+
+import json
+import mock
+import random
+import requests
+import unittest
+
+import arvnodeman.status as status
+import arvnodeman.config as config
+
+
+class TestServer(object):
+    def __init__(self, management_token=None):
+        self.mgmt_token = management_token
+
+    def __enter__(self):
+        cfg = config.NodeManagerConfig()
+        cfg.set('Manage', 'port', '0')
+        cfg.set('Manage', 'address', '127.0.0.1')
+        if self.mgmt_token != None:
+            cfg.set('Manage', 'ManagementToken', self.mgmt_token)
+        self.srv = status.Server(cfg)
+        self.srv.start()
+        addr, port = self.srv.server_address
+        self.srv_base = 'http://127.0.0.1:'+str(port)
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.srv.shutdown()
+
+    def get_status_response(self):
+        return requests.get(self.srv_base+'/status.json')
+
+    def get_status(self):
+        return self.get_status_response().json()
+
+    def get_healthcheck_ping(self, auth_header=None):
+        headers = {}
+        if auth_header != None:
+            headers['Authorization'] = auth_header
+        return requests.get(self.srv_base+'/_health/ping', headers=headers)
+
+class StatusServerUpdates(unittest.TestCase):
+    def test_updates(self):
+        with TestServer() as srv:
+            for n in [1, 2, 3]:
+                status.tracker.update({'nodes_'+str(n): n})
+                r = srv.get_status_response()
+                self.assertEqual(200, r.status_code)
+                self.assertEqual('application/json', r.headers['content-type'])
+                resp = r.json()
+                self.assertEqual(n, resp['nodes_'+str(n)])
+            self.assertEqual(1, resp['nodes_1'])
+            self.assertIn('Version', resp)
+            self.assertIn('config_max_nodes', resp)
+
+    def test_counters(self):
+        with TestServer() as srv:
+            resp = srv.get_status()
+            # Test counters existance
+            for counter in ['list_nodes_errors', 'create_node_errors',
+                'destroy_node_errors', 'boot_failures', 'actor_exceptions']:
+                self.assertIn(counter, resp)
+            # Test counter increment
+            for count in range(1, 3):
+                status.tracker.counter_add('a_counter')
+                resp = srv.get_status()
+                self.assertEqual(count, resp['a_counter'])
+
+    @mock.patch('time.time')
+    def test_idle_times(self, time_mock):
+        with TestServer() as srv:
+            resp = srv.get_status()
+            node_name = 'idle_compute{}'.format(random.randint(1, 1024))
+            self.assertIn('idle_times', resp)
+            # Test add an idle node
+            time_mock.return_value = 10
+            status.tracker.idle_in(node_name)
+            time_mock.return_value += 10
+            resp = srv.get_status()
+            self.assertEqual(10, resp['idle_times'][node_name])
+            # Test adding the same idle node a 2nd time
+            time_mock.return_value += 10
+            status.tracker.idle_in(node_name)
+            time_mock.return_value += 10
+            resp = srv.get_status()
+            # Idle timestamp doesn't get reset if already exists
+            self.assertEqual(30, resp['idle_times'][node_name])
+            # Test remove idle node
+            status.tracker.idle_out(node_name)
+            resp = srv.get_status()
+            self.assertNotIn(node_name, resp['idle_times'])
+
+
+class StatusServerDisabled(unittest.TestCase):
+    def test_config_disabled(self):
+        cfg = config.NodeManagerConfig()
+        cfg.set('Manage', 'port', '-1')
+        cfg.set('Manage', 'address', '127.0.0.1')
+        self.srv = status.Server(cfg)
+        self.srv.start()
+        self.assertFalse(self.srv.enabled)
+        self.assertFalse(getattr(self.srv, '_thread', False))
+
+class HealthcheckPing(unittest.TestCase):
+    def test_ping_disabled(self):
+        with TestServer() as srv:
+            r = srv.get_healthcheck_ping()
+            self.assertEqual(404, r.status_code)
+
+    def test_ping_no_auth(self):
+        with TestServer('configuredmanagementtoken') as srv:
+            r = srv.get_healthcheck_ping()
+            self.assertEqual(401, r.status_code)
+
+    def test_ping_bad_auth_format(self):
+        with TestServer('configuredmanagementtoken') as srv:
+            r = srv.get_healthcheck_ping('noBearer')
+            self.assertEqual(403, r.status_code)
+
+    def test_ping_bad_auth_token(self):
+        with TestServer('configuredmanagementtoken') as srv:
+            r = srv.get_healthcheck_ping('Bearer badtoken')
+            self.assertEqual(403, r.status_code)
+
+    def test_ping_success(self):
+        with TestServer('configuredmanagementtoken') as srv:
+            r = srv.get_healthcheck_ping('Bearer configuredmanagementtoken')
+            self.assertEqual(200, r.status_code)
+            self.assertEqual('application/json', r.headers['content-type'])
+            resp = r.json()
+            self.assertEqual('{"health": "OK"}', json.dumps(resp))
diff --git a/services/nodemanager/tests/test_timedcallback.py b/services/nodemanager/tests/test_timedcallback.py
new file mode 100644 (file)
index 0000000..21a9b5a
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import mock
+import pykka
+
+import arvnodeman.timedcallback as timedcallback
+from . import testutil
+
+@testutil.no_sleep
+class TimedCallBackActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
+    def test_immediate_turnaround(self):
+        receiver = mock.Mock()
+        deliverer = timedcallback.TimedCallBackActor.start().proxy()
+        deliverer.schedule(time.time() - 1, receiver,
+                           'immediate').get(self.TIMEOUT)
+        self.stop_proxy(deliverer)
+        receiver.assert_called_with('immediate')
+
+    def test_delayed_turnaround(self):
+        receiver = mock.Mock()
+        mock_now = mock.Mock()
+        mock_now.return_value = 0
+        deliverer = timedcallback.TimedCallBackActor.start(timefunc=mock_now).proxy()
+        deliverer.schedule(1, receiver, 'delayed')
+        deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+        self.assertFalse(receiver.called)
+        mock_now.return_value = 2
+        deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+        self.stop_proxy(deliverer)
+        receiver.assert_called_with('delayed')
+
+    def test_out_of_order_scheduling(self):
+        receiver = mock.Mock()
+        mock_now = mock.Mock()
+        mock_now.return_value = 1.5
+        deliverer = timedcallback.TimedCallBackActor.start(timefunc=mock_now).proxy()
+        deliverer.schedule(2, receiver, 'second')
+        deliverer.schedule(1, receiver, 'first')
+        deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+        receiver.assert_called_with('first')
+        mock_now.return_value = 2.5
+        deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+        self.stop_proxy(deliverer)
+        receiver.assert_called_with('second')
+
+    def test_dead_actors_ignored(self):
+        receiver = mock.Mock(name='dead_actor', spec=pykka.ActorRef)
+        receiver.tell.side_effect = pykka.ActorDeadError
+        deliverer = timedcallback.TimedCallBackActor.start().proxy()
+        deliverer.schedule(time.time() - 1, receiver.tell,
+                           'error').get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(deliverer), "deliverer died")
+        receiver.tell.assert_called_with('error')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/services/nodemanager/tests/testutil.py b/services/nodemanager/tests/testutil.py
new file mode 100644 (file)
index 0000000..ee475ef
--- /dev/null
@@ -0,0 +1,236 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import, print_function
+
+import contextlib
+import datetime
+import mock
+import pykka
+import sys
+import threading
+import time
+
+import libcloud.common.types as cloud_types
+
+from . import pykka_timeout
+
+no_sleep = mock.patch('time.sleep', lambda n: None)
+
+def arvados_node_mock(node_num=99, job_uuid=None, age=-1, **kwargs):
+    mod_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=age)
+    mod_time_s = mod_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
+    if job_uuid is True:
+        job_uuid = 'zzzzz-jjjjj-jobjobjobjobjob'
+    crunch_worker_state = 'idle' if (job_uuid is None) else 'busy'
+    node = {'uuid': 'zzzzz-yyyyy-{:015x}'.format(node_num),
+            'created_at': '2014-01-01T01:02:03.04050607Z',
+            'modified_at': mod_time_s,
+            'first_ping_at': kwargs.pop('first_ping_at', mod_time_s),
+            'last_ping_at': mod_time_s,
+            'slot_number': node_num,
+            'hostname': 'compute{}'.format(node_num),
+            'domain': 'zzzzz.arvadosapi.com',
+            'ip_address': ip_address_mock(node_num),
+            'job_uuid': job_uuid,
+            'crunch_worker_state': crunch_worker_state,
+            'properties': {},
+            'info': {'ping_secret': 'defaulttestsecret', 'ec2_instance_id': str(node_num)}}
+    node.update(kwargs)
+    return node
+
+def cloud_object_mock(name_id, **extra):
+    # A very generic mock, useful for stubbing libcloud objects we
+    # only search for and pass around, like locations, subnets, etc.
+    cloud_object = mock.NonCallableMagicMock(['id', 'name'],
+                                             name='cloud_object')
+    cloud_object.name = str(name_id)
+    cloud_object.id = 'id_' + cloud_object.name
+    cloud_object.extra = extra
+    return cloud_object
+
+
+def cloud_node_fqdn(node):
+    # We intentionally put the FQDN somewhere goofy to make sure tested code is
+    # using this function for lookups.
+    return node.extra.get('testname', node.name+'.NoTestName.invalid')
+
+def ip_address_mock(last_octet):
+    return '10.20.30.{}'.format(last_octet)
+
+@contextlib.contextmanager
+def redirected_streams(stdout=None, stderr=None):
+    orig_stdout, sys.stdout = sys.stdout, stdout or sys.stdout
+    orig_stderr, sys.stderr = sys.stderr, stderr or sys.stderr
+    try:
+        yield
+    finally:
+        sys.stdout = orig_stdout
+        sys.stderr = orig_stderr
+
+
+class MockShutdownTimer(object):
+    def _set_state(self, is_open, next_opening):
+        self.window_open = lambda: is_open
+        self.next_opening = lambda: next_opening
+
+
+class MockSize(object):
+    def __init__(self, factor, preemptible=False):
+        self.id = 'z{}.test'.format(factor)
+        self.name = 'test size '+self.id
+        self.ram = 128 * factor
+        self.disk = factor   # GB
+        self.scratch = 1000 * factor # MB
+        self.bandwidth = 16 * factor
+        self.price = float(factor)
+        self.extra = {}
+        self.real = self
+        self.preemptible = preemptible
+
+    def __eq__(self, other):
+        return self.id == other.id
+
+
+class MockTimer(object):
+    def __init__(self, deliver_immediately=True):
+        self.deliver_immediately = deliver_immediately
+        self.messages = []
+        self.lock = threading.Lock()
+
+    def deliver(self):
+        with self.lock:
+            to_deliver = self.messages
+            self.messages = []
+        for callback, args, kwargs in to_deliver:
+            try:
+                callback(*args, **kwargs)
+            except pykka.ActorDeadError:
+                pass
+
+    def schedule(self, want_time, callback, *args, **kwargs):
+        with self.lock:
+            self.messages.append((callback, args, kwargs))
+        if self.deliver_immediately:
+            self.deliver()
+
+
+class ActorTestMixin(object):
+    FUTURE_CLASS = pykka.ThreadingFuture
+    TIMEOUT = pykka_timeout
+
+    def tearDown(self):
+        pykka.ActorRegistry.stop_all()
+
+    def stop_proxy(self, proxy):
+        th = proxy.get_thread().get()
+        t = proxy.actor_ref.stop(timeout=self.TIMEOUT)
+        th.join()
+        return t
+
+    def wait_for_assignment(self, proxy, attr_name, unassigned=None,
+                            timeout=TIMEOUT):
+        deadline = time.time() + timeout
+        while True:
+            loop_timeout = deadline - time.time()
+            if loop_timeout <= 0:
+                self.fail("actor did not assign {} in time".format(attr_name))
+            result = getattr(proxy, attr_name).get(loop_timeout)
+            if result is not unassigned:
+                return result
+
+    def busywait(self, f, finalize=None):
+        n = 0
+        while not f() and n < 20:
+            time.sleep(.1)
+            n += 1
+        if finalize is not None:
+            finalize()
+        self.assertTrue(f())
+
+
+class DriverTestMixin(object):
+    def setUp(self):
+        self.driver_mock = mock.MagicMock(name='driver_mock')
+        super(DriverTestMixin, self).setUp()
+
+    def new_driver(self, auth_kwargs={}, list_kwargs={}, create_kwargs={}):
+        create_kwargs.setdefault('ping_host', '100::')
+        return self.TEST_CLASS(
+            auth_kwargs, list_kwargs, create_kwargs,
+            driver_class=self.driver_mock)
+
+    def driver_method_args(self, method_name):
+        return getattr(self.driver_mock(), method_name).call_args
+
+    def test_driver_create_retry(self):
+        with mock.patch('time.sleep'):
+            driver_mock2 = mock.MagicMock(name='driver_mock2')
+            self.driver_mock.side_effect = (Exception("oops"), driver_mock2)
+            kwargs = {'user_id': 'foo'}
+            driver = self.new_driver(auth_kwargs=kwargs)
+            self.assertTrue(self.driver_mock.called)
+            self.assertIs(driver.real, driver_mock2)
+
+    def test_create_can_find_node_after_timeout(self, create_kwargs={}, node_extra={}):
+        driver = self.new_driver(create_kwargs=create_kwargs)
+        arv_node = arvados_node_mock()
+        cloud_node = cloud_node_mock(**node_extra)
+        cloud_node.name = driver.create_cloud_name(arv_node)
+        create_method = self.driver_mock().create_node
+        create_method.side_effect = cloud_types.LibcloudError("fake timeout")
+        list_method = self.driver_mock().list_nodes
+        list_method.return_value = [cloud_node]
+        actual = driver.create_node(MockSize(1), arv_node)
+        self.assertIs(cloud_node, actual)
+
+    def test_create_can_raise_exception_after_timeout(self):
+        driver = self.new_driver()
+        arv_node = arvados_node_mock()
+        create_method = self.driver_mock().create_node
+        create_method.side_effect = cloud_types.LibcloudError("fake timeout")
+        list_method = self.driver_mock().list_nodes
+        list_method.return_value = []
+        with self.assertRaises(cloud_types.LibcloudError) as exc_test:
+            driver.create_node(MockSize(1), arv_node)
+        self.assertIs(create_method.side_effect, exc_test.exception)
+
+    def check_node_found_after_timeout_has_fixed_size(self, size, cloud_node,
+                                                      create_kwargs={}):
+        # This method needs to be called explicitly by driver test suites
+        # that need it.
+        self.driver_mock().list_sizes.return_value = [size]
+        driver = self.new_driver(create_kwargs=create_kwargs)
+        arv_node = arvados_node_mock()
+        cloud_node.name = driver.create_cloud_name(arv_node)
+        create_method = self.driver_mock().create_node
+        create_method.side_effect = cloud_types.LibcloudError("fake timeout")
+        self.driver_mock().list_nodes.return_value = [cloud_node]
+        actual = driver.create_node(size, arv_node)
+        self.assertIs(size, actual.size)
+
+
+class RemotePollLoopActorTestMixin(ActorTestMixin):
+    def build_monitor(self, *args, **kwargs):
+        self.timer = mock.MagicMock(name='timer_mock')
+        self.client = mock.MagicMock(name='client_mock')
+        self.subscriber = mock.Mock(name='subscriber_mock')
+        self.monitor = self.TEST_CLASS.start(
+            self.client, self.timer, *args, **kwargs).proxy()
+
+def cloud_node_mock(node_num=99, size=None, **extra):
+    if size is None:
+        size = MockSize(node_num)
+    node = mock.NonCallableMagicMock(
+        ['id', 'name', 'state', 'public_ips', 'private_ips', 'driver', 'size',
+         'image', 'extra'],
+        name='cloud_node')
+    node.id = str(node_num)
+    node.name = node.id
+    node.size = size
+    node.public_ips = []
+    node.private_ips = [ip_address_mock(node_num)]
+    node.extra = extra
+    return node
diff --git a/services/ws/arvados-ws.service b/services/ws/arvados-ws.service
new file mode 100644 (file)
index 0000000..9e02d41
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados websocket server
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/ws/ws.yml
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/arvados-ws
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/ws/config.go b/services/ws/config.go
new file mode 100644 (file)
index 0000000..ead1ec2
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type wsConfig struct {
+       Client       arvados.Client
+       Postgres     arvados.PostgreSQLConnection
+       PostgresPool int
+       Listen       string
+       LogLevel     string
+       LogFormat    string
+
+       PingTimeout      arvados.Duration
+       ClientEventQueue int
+       ServerEventQueue int
+
+       ManagementToken string
+}
+
+func defaultConfig() wsConfig {
+       return wsConfig{
+               Client: arvados.Client{
+                       APIHost: "localhost:443",
+               },
+               Postgres: arvados.PostgreSQLConnection{
+                       "dbname":                    "arvados_production",
+                       "user":                      "arvados",
+                       "password":                  "xyzzy",
+                       "host":                      "localhost",
+                       "connect_timeout":           "30",
+                       "sslmode":                   "require",
+                       "fallback_application_name": "arvados-ws",
+               },
+               PostgresPool:     64,
+               LogLevel:         "info",
+               LogFormat:        "json",
+               PingTimeout:      arvados.Duration(time.Minute),
+               ClientEventQueue: 64,
+               ServerEventQueue: 4,
+       }
+}
diff --git a/services/ws/doc.go b/services/ws/doc.go
new file mode 100644 (file)
index 0000000..0925397
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Arvados-ws exposes Arvados APIs (currently just one, the
+// cache-invalidation event feed at "ws://.../websocket") to
+// websocket clients.
+//
+// Installation
+//
+// See https://doc.arvados.org/install/install-ws.html.
+//
+// Developer info
+//
+// See https://dev.arvados.org/projects/arvados/wiki/Hacking_websocket_server.
+//
+// Usage
+//
+//     arvados-ws [-config /etc/arvados/ws/ws.yml] [-dump-config]
+//
+// Minimal configuration
+//
+//     Client:
+//       APIHost: localhost:443
+//     Listen: ":1234"
+//     Postgres:
+//       dbname: arvados_production
+//       host: localhost
+//       password: xyzzy
+//       user: arvados
+//
+// Options
+//
+// -config path
+//
+// Load configuration from the given file instead of the default
+// /etc/arvados/ws/ws.yml
+//
+// -dump-config
+//
+// Print the loaded configuration to stdout and exit.
+//
+// Logs
+//
+// Logs are printed to stderr, formatted as JSON.
+//
+// A log is printed each time a client connects or disconnects.
+//
+// Enable additional logs by configuring:
+//
+//     LogLevel: debug
+//
+// Runtime status
+//
+// GET /debug.json responds with debug stats.
+//
+// GET /status.json responds with health check results and
+// activity/usage metrics.
+package main
diff --git a/services/ws/event.go b/services/ws/event.go
new file mode 100644 (file)
index 0000000..0e414a3
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "database/sql"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/ghodss/yaml"
+)
+
+type eventSink interface {
+       Channel() <-chan *event
+       Stop()
+}
+
+type eventSource interface {
+       NewSink() eventSink
+       DB() *sql.DB
+       DBHealth() error
+}
+
+type event struct {
+       LogID    uint64
+       Received time.Time
+       Ready    time.Time
+       Serial   uint64
+
+       db     *sql.DB
+       logRow *arvados.Log
+       err    error
+       mtx    sync.Mutex
+}
+
+// Detail returns the database row corresponding to the event. It can
+// be called safely from multiple goroutines. Only one attempt will be
+// made. If the database row cannot be retrieved, Detail returns nil.
+func (e *event) Detail() *arvados.Log {
+       e.mtx.Lock()
+       defer e.mtx.Unlock()
+       if e.logRow != nil || e.err != nil {
+               return e.logRow
+       }
+       var logRow arvados.Log
+       var propYAML []byte
+       e.err = e.db.QueryRow(`SELECT id, uuid, object_uuid, COALESCE(object_owner_uuid,''), COALESCE(event_type,''), event_at, created_at, properties FROM logs WHERE id = $1`, e.LogID).Scan(
+               &logRow.ID,
+               &logRow.UUID,
+               &logRow.ObjectUUID,
+               &logRow.ObjectOwnerUUID,
+               &logRow.EventType,
+               &logRow.EventAt,
+               &logRow.CreatedAt,
+               &propYAML)
+       if e.err != nil {
+               logger(nil).WithField("LogID", e.LogID).WithError(e.err).Error("QueryRow failed")
+               return nil
+       }
+       e.err = yaml.Unmarshal(propYAML, &logRow.Properties)
+       if e.err != nil {
+               logger(nil).WithField("LogID", e.LogID).WithError(e.err).Error("yaml decode failed")
+               return nil
+       }
+       e.logRow = &logRow
+       return e.logRow
+}
diff --git a/services/ws/event_source.go b/services/ws/event_source.go
new file mode 100644 (file)
index 0000000..309dab7
--- /dev/null
@@ -0,0 +1,284 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "context"
+       "database/sql"
+       "strconv"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/stats"
+       "github.com/lib/pq"
+)
+
+type pgEventSource struct {
+       DataSource   string
+       MaxOpenConns int
+       QueueSize    int
+
+       db         *sql.DB
+       pqListener *pq.Listener
+       queue      chan *event
+       sinks      map[*pgEventSink]bool
+       mtx        sync.Mutex
+
+       lastQDelay time.Duration
+       eventsIn   uint64
+       eventsOut  uint64
+
+       cancel func()
+
+       setupOnce sync.Once
+       ready     chan bool
+}
+
+var _ debugStatuser = (*pgEventSource)(nil)
+
+func (ps *pgEventSource) listenerProblem(et pq.ListenerEventType, err error) {
+       if et == pq.ListenerEventConnected {
+               logger(nil).Debug("pgEventSource connected")
+               return
+       }
+
+       // Until we have a mechanism for catching up on missed events,
+       // we cannot recover from a dropped connection without
+       // breaking our promises to clients.
+       logger(nil).
+               WithField("eventType", et).
+               WithError(err).
+               Error("listener problem")
+       ps.cancel()
+}
+
+func (ps *pgEventSource) setup() {
+       ps.ready = make(chan bool)
+}
+
+// Close stops listening for new events and disconnects all clients.
+func (ps *pgEventSource) Close() {
+       ps.WaitReady()
+       ps.cancel()
+}
+
+// WaitReady returns when the event listener is connected.
+func (ps *pgEventSource) WaitReady() {
+       ps.setupOnce.Do(ps.setup)
+       <-ps.ready
+}
+
+// Run listens for event notifications on the "logs" channel and sends
+// them to all subscribers.
+func (ps *pgEventSource) Run() {
+       logger(nil).Debug("pgEventSource Run starting")
+       defer logger(nil).Debug("pgEventSource Run finished")
+
+       ps.setupOnce.Do(ps.setup)
+       ready := ps.ready
+       defer func() {
+               if ready != nil {
+                       close(ready)
+               }
+       }()
+
+       ctx, cancel := context.WithCancel(context.Background())
+       ps.cancel = cancel
+       defer cancel()
+
+       defer func() {
+               // Disconnect all clients
+               ps.mtx.Lock()
+               for sink := range ps.sinks {
+                       close(sink.channel)
+               }
+               ps.sinks = nil
+               ps.mtx.Unlock()
+       }()
+
+       db, err := sql.Open("postgres", ps.DataSource)
+       if err != nil {
+               logger(nil).WithError(err).Error("sql.Open failed")
+               return
+       }
+       if ps.MaxOpenConns <= 0 {
+               logger(nil).Warn("no database connection limit configured -- consider setting PostgresPool>0 in arvados-ws configuration file")
+       }
+       db.SetMaxOpenConns(ps.MaxOpenConns)
+       if err = db.Ping(); err != nil {
+               logger(nil).WithError(err).Error("db.Ping failed")
+               return
+       }
+       ps.db = db
+
+       ps.pqListener = pq.NewListener(ps.DataSource, time.Second, time.Minute, ps.listenerProblem)
+       err = ps.pqListener.Listen("logs")
+       if err != nil {
+               logger(nil).WithError(err).Error("pq Listen failed")
+               return
+       }
+       defer ps.pqListener.Close()
+       logger(nil).Debug("pq Listen setup done")
+
+       close(ready)
+       // Avoid double-close in deferred func
+       ready = nil
+
+       ps.queue = make(chan *event, ps.QueueSize)
+       defer close(ps.queue)
+
+       go func() {
+               for e := range ps.queue {
+                       // Wait for the "select ... from logs" call to
+                       // finish. This limits max concurrent queries
+                       // to ps.QueueSize. Without this, max
+                       // concurrent queries would be bounded by
+                       // client_count X client_queue_size.
+                       e.Detail()
+
+                       logger(nil).
+                               WithField("serial", e.Serial).
+                               WithField("detail", e.Detail()).
+                               Debug("event ready")
+                       e.Ready = time.Now()
+                       ps.lastQDelay = e.Ready.Sub(e.Received)
+
+                       ps.mtx.Lock()
+                       atomic.AddUint64(&ps.eventsOut, uint64(len(ps.sinks)))
+                       for sink := range ps.sinks {
+                               sink.channel <- e
+                       }
+                       ps.mtx.Unlock()
+               }
+       }()
+
+       var serial uint64
+       ticker := time.NewTicker(time.Minute)
+       defer ticker.Stop()
+       for {
+               select {
+               case <-ctx.Done():
+                       logger(nil).Debug("ctx done")
+                       return
+
+               case <-ticker.C:
+                       logger(nil).Debug("listener ping")
+                       ps.pqListener.Ping()
+
+               case pqEvent, ok := <-ps.pqListener.Notify:
+                       if !ok {
+                               logger(nil).Debug("pqListener Notify chan closed")
+                               return
+                       }
+                       if pqEvent == nil {
+                               // pq should call listenerProblem
+                               // itself in addition to sending us a
+                               // nil event, so this might be
+                               // superfluous:
+                               ps.listenerProblem(-1, nil)
+                               continue
+                       }
+                       if pqEvent.Channel != "logs" {
+                               logger(nil).WithField("pqEvent", pqEvent).Error("unexpected notify from wrong channel")
+                               continue
+                       }
+                       logID, err := strconv.ParseUint(pqEvent.Extra, 10, 64)
+                       if err != nil {
+                               logger(nil).WithField("pqEvent", pqEvent).Error("bad notify payload")
+                               continue
+                       }
+                       serial++
+                       e := &event{
+                               LogID:    logID,
+                               Received: time.Now(),
+                               Serial:   serial,
+                               db:       ps.db,
+                       }
+                       logger(nil).WithField("event", e).Debug("incoming")
+                       atomic.AddUint64(&ps.eventsIn, 1)
+                       ps.queue <- e
+                       go e.Detail()
+               }
+       }
+}
+
+// NewSink subscribes to the event source. NewSink returns an
+// eventSink, whose Channel() method returns a channel: a pointer to
+// each subsequent event will be sent to that channel.
+//
+// The caller must ensure events are received from the sink channel as
+// quickly as possible because when one sink stops being ready, all
+// other sinks block.
+func (ps *pgEventSource) NewSink() eventSink {
+       sink := &pgEventSink{
+               channel: make(chan *event, 1),
+               source:  ps,
+       }
+       ps.mtx.Lock()
+       if ps.sinks == nil {
+               ps.sinks = make(map[*pgEventSink]bool)
+       }
+       ps.sinks[sink] = true
+       ps.mtx.Unlock()
+       return sink
+}
+
+func (ps *pgEventSource) DB() *sql.DB {
+       ps.WaitReady()
+       return ps.db
+}
+
+func (ps *pgEventSource) DBHealth() error {
+       ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second))
+       defer cancel()
+       var i int
+       return ps.db.QueryRowContext(ctx, "SELECT 1").Scan(&i)
+}
+
+func (ps *pgEventSource) DebugStatus() interface{} {
+       ps.mtx.Lock()
+       defer ps.mtx.Unlock()
+       blocked := 0
+       for sink := range ps.sinks {
+               blocked += len(sink.channel)
+       }
+       return map[string]interface{}{
+               "EventsIn":     atomic.LoadUint64(&ps.eventsIn),
+               "EventsOut":    atomic.LoadUint64(&ps.eventsOut),
+               "Queue":        len(ps.queue),
+               "QueueLimit":   cap(ps.queue),
+               "QueueDelay":   stats.Duration(ps.lastQDelay),
+               "Sinks":        len(ps.sinks),
+               "SinksBlocked": blocked,
+               "DBStats":      ps.db.Stats(),
+       }
+}
+
+type pgEventSink struct {
+       channel chan *event
+       source  *pgEventSource
+}
+
+func (sink *pgEventSink) Channel() <-chan *event {
+       return sink.channel
+}
+
+// Stop sending events to the sink's channel.
+func (sink *pgEventSink) Stop() {
+       go func() {
+               // Ensure this sink cannot fill up and block the
+               // server-side queue (which otherwise could in turn
+               // block our mtx.Lock() here)
+               for range sink.channel {
+               }
+       }()
+       sink.source.mtx.Lock()
+       if _, ok := sink.source.sinks[sink]; ok {
+               delete(sink.source.sinks, sink)
+               close(sink.channel)
+       }
+       sink.source.mtx.Unlock()
+}
diff --git a/services/ws/event_source_test.go b/services/ws/event_source_test.go
new file mode 100644 (file)
index 0000000..ac5d130
--- /dev/null
@@ -0,0 +1,106 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "database/sql"
+       "fmt"
+       "os"
+       "path/filepath"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&eventSourceSuite{})
+
+type eventSourceSuite struct{}
+
+func testDBConfig() arvados.PostgreSQLConnection {
+       cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+       if err != nil {
+               panic(err)
+       }
+       cc, err := cfg.GetCluster("zzzzz")
+       if err != nil {
+               panic(err)
+       }
+       return cc.PostgreSQL.Connection
+}
+
+func testDB() *sql.DB {
+       db, err := sql.Open("postgres", testDBConfig().String())
+       if err != nil {
+               panic(err)
+       }
+       return db
+}
+
+func (*eventSourceSuite) TestEventSource(c *check.C) {
+       cfg := testDBConfig()
+       db := testDB()
+       pges := &pgEventSource{
+               DataSource: cfg.String(),
+               QueueSize:  4,
+       }
+       go pges.Run()
+       sinks := make([]eventSink, 18)
+       for i := range sinks {
+               sinks[i] = pges.NewSink()
+       }
+
+       pges.WaitReady()
+       defer pges.cancel()
+
+       done := make(chan bool, 1)
+
+       go func() {
+               for i := range sinks {
+                       _, err := db.Exec(fmt.Sprintf(`NOTIFY logs, '%d'`, i))
+                       if err != nil {
+                               done <- true
+                               c.Fatal(err)
+                               return
+                       }
+               }
+       }()
+
+       var wg sync.WaitGroup
+       wg.Add(len(sinks))
+       for si, s := range sinks {
+               go func(si int, s eventSink) {
+                       defer wg.Done()
+                       defer sinks[si].Stop()
+                       for i := 0; i <= si; i++ {
+                               ev := <-sinks[si].Channel()
+                               c.Logf("sink %d received event %d", si, i)
+                               c.Check(ev.LogID, check.Equals, uint64(i))
+                               row := ev.Detail()
+                               if i == 0 {
+                                       // no matching row, null event
+                                       c.Check(row, check.IsNil)
+                               } else {
+                                       c.Check(row, check.NotNil)
+                                       c.Check(row.ID, check.Equals, uint64(i))
+                                       c.Check(row.UUID, check.Not(check.Equals), "")
+                               }
+                       }
+               }(si, s)
+       }
+       go func() {
+               wg.Wait()
+               done <- true
+       }()
+
+       select {
+       case <-done:
+       case <-time.After(10 * time.Second):
+               c.Fatal("timed out")
+       }
+
+       c.Check(pges.DBHealth(), check.IsNil)
+}
diff --git a/services/ws/event_test.go b/services/ws/event_test.go
new file mode 100644 (file)
index 0000000..dc32446
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import check "gopkg.in/check.v1"
+
+var _ = check.Suite(&eventSuite{})
+
+type eventSuite struct{}
+
+func (*eventSuite) TestDetail(c *check.C) {
+       e := &event{
+               LogID: 17,
+               db:    testDB(),
+       }
+       logRow := e.Detail()
+       c.Assert(logRow, check.NotNil)
+       c.Check(logRow, check.Equals, e.logRow)
+       c.Check(logRow.UUID, check.Equals, "zzzzz-57u5n-containerlog006")
+       c.Check(logRow.ObjectUUID, check.Equals, "zzzzz-dz642-logscontainer03")
+       c.Check(logRow.EventType, check.Equals, "crunchstat")
+       c.Check(logRow.Properties["text"], check.Equals, "2013-11-07_23:33:41 zzzzz-8i9sb-ahd7cie8jah9qui 29610 1 stderr crunchstat: cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user 0.9900 sys")
+}
diff --git a/services/ws/gocheck_test.go b/services/ws/gocheck_test.go
new file mode 100644 (file)
index 0000000..ea8dfc3
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "testing"
+
+       check "gopkg.in/check.v1"
+)
+
+func TestGocheck(t *testing.T) {
+       check.TestingT(t)
+}
diff --git a/services/ws/handler.go b/services/ws/handler.go
new file mode 100644 (file)
index 0000000..d527c39
--- /dev/null
@@ -0,0 +1,237 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "context"
+       "io"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/stats"
+)
+
+type handler struct {
+       Client      arvados.Client
+       PingTimeout time.Duration
+       QueueSize   int
+
+       mtx       sync.Mutex
+       lastDelay map[chan interface{}]stats.Duration
+       setupOnce sync.Once
+}
+
+type handlerStats struct {
+       QueueDelayNs time.Duration
+       WriteDelayNs time.Duration
+       EventBytes   uint64
+       EventCount   uint64
+}
+
+func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsConn, chan<- interface{}) (session, error)) (hStats handlerStats) {
+       h.setupOnce.Do(h.setup)
+
+       ctx, cancel := context.WithCancel(ws.Request().Context())
+       defer cancel()
+       log := logger(ctx)
+
+       incoming := eventSource.NewSink()
+       defer incoming.Stop()
+
+       queue := make(chan interface{}, h.QueueSize)
+       h.mtx.Lock()
+       h.lastDelay[queue] = 0
+       h.mtx.Unlock()
+       defer func() {
+               h.mtx.Lock()
+               delete(h.lastDelay, queue)
+               h.mtx.Unlock()
+       }()
+
+       sess, err := newSession(ws, queue)
+       if err != nil {
+               log.WithError(err).Error("newSession failed")
+               return
+       }
+
+       // Receive websocket frames from the client and pass them to
+       // sess.Receive().
+       go func() {
+               defer cancel()
+               buf := make([]byte, 2<<20)
+               for {
+                       select {
+                       case <-ctx.Done():
+                               return
+                       default:
+                       }
+                       ws.SetReadDeadline(time.Now().Add(24 * 365 * time.Hour))
+                       n, err := ws.Read(buf)
+                       buf := buf[:n]
+                       log.WithField("frame", string(buf[:n])).Debug("received frame")
+                       if err == nil && n == cap(buf) {
+                               err = errFrameTooBig
+                       }
+                       if err != nil {
+                               if err != io.EOF && ctx.Err() == nil {
+                                       log.WithError(err).Info("read error")
+                               }
+                               return
+                       }
+                       err = sess.Receive(buf)
+                       if err != nil {
+                               log.WithError(err).Error("sess.Receive() failed")
+                               return
+                       }
+               }
+       }()
+
+       // Take items from the outgoing queue, serialize them using
+       // sess.EventMessage() as needed, and send them to the client
+       // as websocket frames.
+       go func() {
+               defer cancel()
+               for {
+                       var ok bool
+                       var data interface{}
+                       select {
+                       case <-ctx.Done():
+                               return
+                       case data, ok = <-queue:
+                               if !ok {
+                                       return
+                               }
+                       }
+                       var e *event
+                       var buf []byte
+                       var err error
+                       log := log
+
+                       switch data := data.(type) {
+                       case []byte:
+                               buf = data
+                       case *event:
+                               e = data
+                               log = log.WithField("serial", e.Serial)
+                               buf, err = sess.EventMessage(e)
+                               if err != nil {
+                                       log.WithError(err).Error("EventMessage failed")
+                                       return
+                               } else if len(buf) == 0 {
+                                       log.Debug("skip")
+                                       continue
+                               }
+                       default:
+                               log.WithField("data", data).Error("bad object in client queue")
+                               continue
+                       }
+
+                       log.WithField("frame", string(buf)).Debug("send event")
+                       ws.SetWriteDeadline(time.Now().Add(h.PingTimeout))
+                       t0 := time.Now()
+                       _, err = ws.Write(buf)
+                       if err != nil {
+                               if ctx.Err() == nil {
+                                       log.WithError(err).Error("write failed")
+                               }
+                               return
+                       }
+                       log.Debug("sent")
+
+                       if e != nil {
+                               hStats.QueueDelayNs += t0.Sub(e.Ready)
+                               h.mtx.Lock()
+                               h.lastDelay[queue] = stats.Duration(time.Since(e.Ready))
+                               h.mtx.Unlock()
+                       }
+                       hStats.WriteDelayNs += time.Since(t0)
+                       hStats.EventBytes += uint64(len(buf))
+                       hStats.EventCount++
+               }
+       }()
+
+       // Filter incoming events against the current subscription
+       // list, and forward matching events to the outgoing message
+       // queue. Close the queue and return when the request context
+       // is done/cancelled or the incoming event stream ends. Shut
+       // down the handler if the outgoing queue fills up.
+       go func() {
+               defer cancel()
+               ticker := time.NewTicker(h.PingTimeout)
+               defer ticker.Stop()
+
+               for {
+                       select {
+                       case <-ctx.Done():
+                               return
+                       case <-ticker.C:
+                               // If the outgoing queue is empty,
+                               // send an empty message. This can
+                               // help detect a disconnected network
+                               // socket, and prevent an idle socket
+                               // from being closed.
+                               if len(queue) == 0 {
+                                       select {
+                                       case queue <- []byte(`{}`):
+                                       default:
+                                       }
+                               }
+                       case e, ok := <-incoming.Channel():
+                               if !ok {
+                                       return
+                               }
+                               if !sess.Filter(e) {
+                                       continue
+                               }
+                               select {
+                               case queue <- e:
+                               default:
+                                       log.WithError(errQueueFull).Error("terminate")
+                                       return
+                               }
+                       }
+               }
+       }()
+
+       <-ctx.Done()
+       return
+}
+
+func (h *handler) DebugStatus() interface{} {
+       h.mtx.Lock()
+       defer h.mtx.Unlock()
+
+       var s struct {
+               QueueCount    int
+               QueueMin      int
+               QueueMax      int
+               QueueTotal    uint64
+               QueueDelayMin stats.Duration
+               QueueDelayMax stats.Duration
+       }
+       for q, lastDelay := range h.lastDelay {
+               s.QueueCount++
+               n := len(q)
+               s.QueueTotal += uint64(n)
+               if s.QueueMax < n {
+                       s.QueueMax = n
+               }
+               if s.QueueMin > n || s.QueueCount == 1 {
+                       s.QueueMin = n
+               }
+               if (s.QueueDelayMin > lastDelay || s.QueueDelayMin == 0) && lastDelay > 0 {
+                       s.QueueDelayMin = lastDelay
+               }
+               if s.QueueDelayMax < lastDelay {
+                       s.QueueDelayMax = lastDelay
+               }
+       }
+       return &s
+}
+
+func (h *handler) setup() {
+       h.lastDelay = make(map[chan interface{}]stats.Duration)
+}
diff --git a/services/ws/main.go b/services/ws/main.go
new file mode 100644 (file)
index 0000000..a0006a4
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "flag"
+       "fmt"
+
+       "git.curoverse.com/arvados.git/sdk/go/config"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+)
+
+var logger = ctxlog.FromContext
+var version = "dev"
+
+func main() {
+       log := logger(nil)
+
+       configPath := flag.String("config", "/etc/arvados/ws/ws.yml", "`path` to config file")
+       dumpConfig := flag.Bool("dump-config", false, "show current configuration and exit")
+       getVersion := flag.Bool("version", false, "Print version information and exit.")
+       cfg := defaultConfig()
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("arvados-ws %s\n", version)
+               return
+       }
+
+       err := config.LoadFile(&cfg, *configPath)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       ctxlog.SetLevel(cfg.LogLevel)
+       ctxlog.SetFormat(cfg.LogFormat)
+
+       if *dumpConfig {
+               txt, err := config.Dump(&cfg)
+               if err != nil {
+                       log.Fatal(err)
+               }
+               fmt.Print(string(txt))
+               return
+       }
+
+       log.Printf("arvados-ws %s started", version)
+       srv := &server{wsConfig: &cfg}
+       log.Fatal(srv.Run())
+}
diff --git a/services/ws/permission.go b/services/ws/permission.go
new file mode 100644 (file)
index 0000000..a39a959
--- /dev/null
@@ -0,0 +1,115 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net/http"
+       "net/url"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+const (
+       maxPermCacheAge = time.Hour
+       minPermCacheAge = 5 * time.Minute
+)
+
+type permChecker interface {
+       SetToken(token string)
+       Check(uuid string) (bool, error)
+}
+
+func newPermChecker(ac arvados.Client) permChecker {
+       ac.AuthToken = ""
+       return &cachingPermChecker{
+               Client:     &ac,
+               cache:      make(map[string]cacheEnt),
+               maxCurrent: 16,
+       }
+}
+
+type cacheEnt struct {
+       time.Time
+       allowed bool
+}
+
+type cachingPermChecker struct {
+       *arvados.Client
+       cache      map[string]cacheEnt
+       maxCurrent int
+
+       nChecks  uint64
+       nMisses  uint64
+       nInvalid uint64
+}
+
+func (pc *cachingPermChecker) SetToken(token string) {
+       if pc.Client.AuthToken == token {
+               return
+       }
+       pc.Client.AuthToken = token
+       pc.cache = make(map[string]cacheEnt)
+}
+
+func (pc *cachingPermChecker) Check(uuid string) (bool, error) {
+       pc.nChecks++
+       logger := logger(nil).
+               WithField("token", pc.Client.AuthToken).
+               WithField("uuid", uuid)
+       pc.tidy()
+       now := time.Now()
+       if perm, ok := pc.cache[uuid]; ok && now.Sub(perm.Time) < maxPermCacheAge {
+               logger.WithField("allowed", perm.allowed).Debug("cache hit")
+               return perm.allowed, nil
+       }
+       var buf map[string]interface{}
+       path, err := pc.PathForUUID("get", uuid)
+       if err != nil {
+               pc.nInvalid++
+               return false, err
+       }
+
+       pc.nMisses++
+       err = pc.RequestAndDecode(&buf, "GET", path, nil, url.Values{
+               "include_trash": {"true"},
+               "select":        {`["uuid"]`},
+       })
+
+       var allowed bool
+       if err == nil {
+               allowed = true
+       } else if txErr, ok := err.(*arvados.TransactionError); ok && pc.isNotAllowed(txErr.StatusCode) {
+               allowed = false
+       } else {
+               logger.WithError(err).Error("lookup error")
+               return false, err
+       }
+       logger.WithField("allowed", allowed).Debug("cache miss")
+       pc.cache[uuid] = cacheEnt{Time: now, allowed: allowed}
+       return allowed, nil
+}
+
+func (pc *cachingPermChecker) isNotAllowed(status int) bool {
+       switch status {
+       case http.StatusForbidden, http.StatusUnauthorized, http.StatusNotFound:
+               return true
+       default:
+               return false
+       }
+}
+
+func (pc *cachingPermChecker) tidy() {
+       if len(pc.cache) <= pc.maxCurrent*2 {
+               return
+       }
+       tooOld := time.Now().Add(-minPermCacheAge)
+       for uuid, t := range pc.cache {
+               if t.Before(tooOld) {
+                       delete(pc.cache, uuid)
+               }
+       }
+       pc.maxCurrent = len(pc.cache)
+}
diff --git a/services/ws/permission_test.go b/services/ws/permission_test.go
new file mode 100644 (file)
index 0000000..3ddde6f
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&permSuite{})
+
+type permSuite struct{}
+
+func (s *permSuite) TestCheck(c *check.C) {
+       pc := newPermChecker(*(arvados.NewClientFromEnv())).(*cachingPermChecker)
+       setToken := func(label, token string) {
+               c.Logf("...%s token %q", label, token)
+               pc.SetToken(token)
+       }
+       wantError := func(uuid string) {
+               c.Log(uuid)
+               ok, err := pc.Check(uuid)
+               c.Check(ok, check.Equals, false)
+               c.Check(err, check.NotNil)
+       }
+       wantYes := func(uuid string) {
+               c.Log(uuid)
+               ok, err := pc.Check(uuid)
+               c.Check(ok, check.Equals, true)
+               c.Check(err, check.IsNil)
+       }
+       wantNo := func(uuid string) {
+               c.Log(uuid)
+               ok, err := pc.Check(uuid)
+               c.Check(ok, check.Equals, false)
+               c.Check(err, check.IsNil)
+       }
+
+       setToken("no", "")
+       wantNo(arvadostest.UserAgreementCollection)
+       wantNo(arvadostest.UserAgreementPDH)
+       wantNo(arvadostest.FooBarDirCollection)
+
+       setToken("anonymous", arvadostest.AnonymousToken)
+       wantYes(arvadostest.UserAgreementCollection)
+       wantYes(arvadostest.UserAgreementPDH)
+       wantNo(arvadostest.FooBarDirCollection)
+       wantNo(arvadostest.FooCollection)
+
+       setToken("active", arvadostest.ActiveToken)
+       wantYes(arvadostest.UserAgreementCollection)
+       wantYes(arvadostest.UserAgreementPDH)
+       wantYes(arvadostest.FooBarDirCollection)
+       wantYes(arvadostest.FooCollection)
+
+       setToken("admin", arvadostest.AdminToken)
+       wantYes(arvadostest.UserAgreementCollection)
+       wantYes(arvadostest.UserAgreementPDH)
+       wantYes(arvadostest.FooBarDirCollection)
+       wantYes(arvadostest.FooCollection)
+
+       // hack to empty the cache
+       pc.SetToken("")
+       pc.SetToken(arvadostest.ActiveToken)
+
+       c.Log("...network error")
+       pc.Client.APIHost = "127.0.0.1:discard"
+       wantError(arvadostest.UserAgreementCollection)
+       wantError(arvadostest.FooBarDirCollection)
+
+       c.Logf("%d checks, %d misses, %d invalid, %d cached", pc.nChecks, pc.nMisses, pc.nInvalid, len(pc.cache))
+}
diff --git a/services/ws/router.go b/services/ws/router.go
new file mode 100644 (file)
index 0000000..a408b58
--- /dev/null
@@ -0,0 +1,159 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "io"
+       "net/http"
+       "strconv"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "git.curoverse.com/arvados.git/sdk/go/health"
+       "github.com/sirupsen/logrus"
+       "golang.org/x/net/websocket"
+)
+
+type wsConn interface {
+       io.ReadWriter
+       Request() *http.Request
+       SetReadDeadline(time.Time) error
+       SetWriteDeadline(time.Time) error
+}
+
+type router struct {
+       Config         *wsConfig
+       eventSource    eventSource
+       newPermChecker func() permChecker
+
+       handler   *handler
+       mux       *http.ServeMux
+       setupOnce sync.Once
+
+       lastReqID  int64
+       lastReqMtx sync.Mutex
+
+       status routerDebugStatus
+}
+
+type routerDebugStatus struct {
+       ReqsReceived int64
+       ReqsActive   int64
+}
+
+type debugStatuser interface {
+       DebugStatus() interface{}
+}
+
+func (rtr *router) setup() {
+       rtr.handler = &handler{
+               PingTimeout: rtr.Config.PingTimeout.Duration(),
+               QueueSize:   rtr.Config.ClientEventQueue,
+       }
+       rtr.mux = http.NewServeMux()
+       rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0))
+       rtr.mux.Handle("/arvados/v1/events.ws", rtr.makeServer(newSessionV1))
+       rtr.mux.Handle("/debug.json", rtr.jsonHandler(rtr.DebugStatus))
+       rtr.mux.Handle("/status.json", rtr.jsonHandler(rtr.Status))
+
+       rtr.mux.Handle("/_health/", &health.Handler{
+               Token:  rtr.Config.ManagementToken,
+               Prefix: "/_health/",
+               Routes: health.Routes{
+                       "db": rtr.eventSource.DBHealth,
+               },
+               Log: func(r *http.Request, err error) {
+                       if err != nil {
+                               logger(r.Context()).WithError(err).Error("error")
+                       }
+               },
+       })
+}
+
+func (rtr *router) makeServer(newSession sessionFactory) *websocket.Server {
+       return &websocket.Server{
+               Handshake: func(c *websocket.Config, r *http.Request) error {
+                       return nil
+               },
+               Handler: websocket.Handler(func(ws *websocket.Conn) {
+                       t0 := time.Now()
+                       log := logger(ws.Request().Context())
+                       log.Info("connected")
+
+                       stats := rtr.handler.Handle(ws, rtr.eventSource,
+                               func(ws wsConn, sendq chan<- interface{}) (session, error) {
+                                       return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.Config.Client)
+                               })
+
+                       log.WithFields(logrus.Fields{
+                               "elapsed": time.Now().Sub(t0).Seconds(),
+                               "stats":   stats,
+                       }).Info("disconnect")
+                       ws.Close()
+               }),
+       }
+}
+
+func (rtr *router) newReqID() string {
+       rtr.lastReqMtx.Lock()
+       defer rtr.lastReqMtx.Unlock()
+       id := time.Now().UnixNano()
+       if id <= rtr.lastReqID {
+               id = rtr.lastReqID + 1
+       }
+       return strconv.FormatInt(id, 36)
+}
+
+func (rtr *router) DebugStatus() interface{} {
+       s := map[string]interface{}{
+               "HTTP":     rtr.status,
+               "Outgoing": rtr.handler.DebugStatus(),
+       }
+       if es, ok := rtr.eventSource.(debugStatuser); ok {
+               s["EventSource"] = es.DebugStatus()
+       }
+       return s
+}
+
+func (rtr *router) Status() interface{} {
+       return map[string]interface{}{
+               "Clients": atomic.LoadInt64(&rtr.status.ReqsActive),
+               "Version": version,
+       }
+}
+
+func (rtr *router) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       rtr.setupOnce.Do(rtr.setup)
+       atomic.AddInt64(&rtr.status.ReqsReceived, 1)
+       atomic.AddInt64(&rtr.status.ReqsActive, 1)
+       defer atomic.AddInt64(&rtr.status.ReqsActive, -1)
+
+       logger := logger(req.Context()).
+               WithField("RequestID", rtr.newReqID())
+       ctx := ctxlog.Context(req.Context(), logger)
+       req = req.WithContext(ctx)
+       logger.WithFields(logrus.Fields{
+               "remoteAddr":      req.RemoteAddr,
+               "reqForwardedFor": req.Header.Get("X-Forwarded-For"),
+       }).Info("accept request")
+       rtr.mux.ServeHTTP(resp, req)
+}
+
+func (rtr *router) jsonHandler(fn func() interface{}) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+               logger := logger(r.Context())
+               w.Header().Set("Content-Type", "application/json")
+               enc := json.NewEncoder(w)
+               err := enc.Encode(fn())
+               if err != nil {
+                       msg := "encode failed"
+                       logger.WithError(err).Error(msg)
+                       http.Error(w, msg, http.StatusInternalServerError)
+               }
+       })
+}
diff --git a/services/ws/server.go b/services/ws/server.go
new file mode 100644 (file)
index 0000000..eda7ff2
--- /dev/null
@@ -0,0 +1,76 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "net"
+       "net/http"
+       "sync"
+       "time"
+
+       "github.com/coreos/go-systemd/daemon"
+)
+
+type server struct {
+       httpServer  *http.Server
+       listener    net.Listener
+       wsConfig    *wsConfig
+       eventSource *pgEventSource
+       setupOnce   sync.Once
+}
+
+func (srv *server) Close() {
+       srv.WaitReady()
+       srv.eventSource.Close()
+       srv.listener.Close()
+}
+
+func (srv *server) WaitReady() {
+       srv.setupOnce.Do(srv.setup)
+       srv.eventSource.WaitReady()
+}
+
+func (srv *server) Run() error {
+       srv.setupOnce.Do(srv.setup)
+       return srv.httpServer.Serve(srv.listener)
+}
+
+func (srv *server) setup() {
+       log := logger(nil)
+
+       ln, err := net.Listen("tcp", srv.wsConfig.Listen)
+       if err != nil {
+               log.WithField("Listen", srv.wsConfig.Listen).Fatal(err)
+       }
+       log.WithField("Listen", ln.Addr().String()).Info("listening")
+
+       srv.listener = ln
+       srv.eventSource = &pgEventSource{
+               DataSource:   srv.wsConfig.Postgres.String(),
+               MaxOpenConns: srv.wsConfig.PostgresPool,
+               QueueSize:    srv.wsConfig.ServerEventQueue,
+       }
+       srv.httpServer = &http.Server{
+               Addr:           srv.wsConfig.Listen,
+               ReadTimeout:    time.Minute,
+               WriteTimeout:   time.Minute,
+               MaxHeaderBytes: 1 << 20,
+               Handler: &router{
+                       Config:         srv.wsConfig,
+                       eventSource:    srv.eventSource,
+                       newPermChecker: func() permChecker { return newPermChecker(srv.wsConfig.Client) },
+               },
+       }
+
+       go func() {
+               srv.eventSource.Run()
+               log.Info("event source stopped")
+               srv.Close()
+       }()
+
+       if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+               log.WithError(err).Warn("error notifying init daemon")
+       }
+}
diff --git a/services/ws/server_test.go b/services/ws/server_test.go
new file mode 100644 (file)
index 0000000..b1f9438
--- /dev/null
@@ -0,0 +1,124 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "encoding/json"
+       "io/ioutil"
+       "net/http"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&serverSuite{})
+
+type serverSuite struct {
+       cfg *wsConfig
+       srv *server
+       wg  sync.WaitGroup
+}
+
+func (s *serverSuite) SetUpTest(c *check.C) {
+       s.cfg = s.testConfig()
+       s.srv = &server{wsConfig: s.cfg}
+}
+
+func (*serverSuite) testConfig() *wsConfig {
+       cfg := defaultConfig()
+       cfg.Client = *(arvados.NewClientFromEnv())
+       cfg.Postgres = testDBConfig()
+       cfg.Listen = ":"
+       cfg.ManagementToken = arvadostest.ManagementToken
+       return &cfg
+}
+
+// TestBadDB ensures Run() returns an error (instead of panicking or
+// deadlocking) if it can't connect to the database server at startup.
+func (s *serverSuite) TestBadDB(c *check.C) {
+       s.cfg.Postgres["password"] = "1234"
+
+       var wg sync.WaitGroup
+       wg.Add(1)
+       go func() {
+               err := s.srv.Run()
+               c.Check(err, check.NotNil)
+               wg.Done()
+       }()
+       wg.Add(1)
+       go func() {
+               s.srv.WaitReady()
+               wg.Done()
+       }()
+
+       done := make(chan bool)
+       go func() {
+               wg.Wait()
+               close(done)
+       }()
+       select {
+       case <-done:
+       case <-time.After(10 * time.Second):
+               c.Fatal("timeout")
+       }
+}
+
+func (s *serverSuite) TestHealth(c *check.C) {
+       go s.srv.Run()
+       defer s.srv.Close()
+       s.srv.WaitReady()
+       for _, token := range []string{"", "foo", s.cfg.ManagementToken} {
+               req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
+               c.Assert(err, check.IsNil)
+               if token != "" {
+                       req.Header.Add("Authorization", "Bearer "+token)
+               }
+               resp, err := http.DefaultClient.Do(req)
+               c.Check(err, check.IsNil)
+               if token == s.cfg.ManagementToken {
+                       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+                       buf, err := ioutil.ReadAll(resp.Body)
+                       c.Check(err, check.IsNil)
+                       c.Check(string(buf), check.Equals, `{"health":"OK"}`+"\n")
+               } else {
+                       c.Check(resp.StatusCode, check.Not(check.Equals), http.StatusOK)
+               }
+       }
+}
+
+func (s *serverSuite) TestStatus(c *check.C) {
+       go s.srv.Run()
+       defer s.srv.Close()
+       s.srv.WaitReady()
+       req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/status.json", nil)
+       c.Assert(err, check.IsNil)
+       resp, err := http.DefaultClient.Do(req)
+       c.Check(err, check.IsNil)
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var status map[string]interface{}
+       err = json.NewDecoder(resp.Body).Decode(&status)
+       c.Check(err, check.IsNil)
+       c.Check(status["Version"], check.Not(check.Equals), "")
+}
+
+func (s *serverSuite) TestHealthDisabled(c *check.C) {
+       s.cfg.ManagementToken = ""
+
+       go s.srv.Run()
+       defer s.srv.Close()
+       s.srv.WaitReady()
+
+       for _, token := range []string{"", "foo", arvadostest.ManagementToken} {
+               req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
+               c.Assert(err, check.IsNil)
+               req.Header.Add("Authorization", "Bearer "+token)
+               resp, err := http.DefaultClient.Do(req)
+               c.Check(err, check.IsNil)
+               c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
+       }
+}
diff --git a/services/ws/session.go b/services/ws/session.go
new file mode 100644 (file)
index 0000000..d41e745
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "database/sql"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+type session interface {
+       // Receive processes a message received from the client. If a
+       // non-nil error is returned, the connection will be
+       // terminated.
+       Receive([]byte) error
+
+       // Filter returns true if the event should be queued for
+       // sending to the client. It should return as fast as
+       // possible, and must not block.
+       Filter(*event) bool
+
+       // EventMessage encodes the given event (from the front of the
+       // queue) into a form suitable to send to the client. If a
+       // non-nil error is returned, the connection is terminated. If
+       // the returned buffer is empty, nothing is sent to the client
+       // and the event is not counted in statistics.
+       //
+       // Unlike Filter, EventMessage can block without affecting
+       // other connections. If EventMessage is slow, additional
+       // incoming events will be queued. If the event queue fills
+       // up, the connection will be dropped.
+       EventMessage(*event) ([]byte, error)
+}
+
+type sessionFactory func(wsConn, chan<- interface{}, *sql.DB, permChecker, *arvados.Client) (session, error)
diff --git a/services/ws/session_v0.go b/services/ws/session_v0.go
new file mode 100644 (file)
index 0000000..63bdb49
--- /dev/null
@@ -0,0 +1,348 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "database/sql"
+       "encoding/json"
+       "errors"
+       "reflect"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
+)
+
+var (
+       errQueueFull   = errors.New("client queue full")
+       errFrameTooBig = errors.New("frame too big")
+
+       // Send clients only these keys from the
+       // log.properties.old_attributes and
+       // log.properties.new_attributes hashes.
+       sendObjectAttributes = []string{
+               "is_trashed",
+               "name",
+               "owner_uuid",
+               "portable_data_hash",
+               "state",
+       }
+
+       v0subscribeOK   = []byte(`{"status":200}`)
+       v0subscribeFail = []byte(`{"status":400}`)
+)
+
+type v0session struct {
+       ac            *arvados.Client
+       ws            wsConn
+       sendq         chan<- interface{}
+       db            *sql.DB
+       permChecker   permChecker
+       subscriptions []v0subscribe
+       lastMsgID     uint64
+       log           logrus.FieldLogger
+       mtx           sync.Mutex
+       setupOnce     sync.Once
+}
+
+// newSessionV0 returns a v0 session: a partial port of the Rails/puma
+// implementation, with just enough functionality to support Workbench
+// and arv-mount.
+func newSessionV0(ws wsConn, sendq chan<- interface{}, db *sql.DB, pc permChecker, ac *arvados.Client) (session, error) {
+       sess := &v0session{
+               sendq:       sendq,
+               ws:          ws,
+               db:          db,
+               ac:          ac,
+               permChecker: pc,
+               log:         logger(ws.Request().Context()),
+       }
+
+       err := ws.Request().ParseForm()
+       if err != nil {
+               sess.log.WithError(err).Error("ParseForm failed")
+               return nil, err
+       }
+       token := ws.Request().Form.Get("api_token")
+       sess.permChecker.SetToken(token)
+       sess.log.WithField("token", token).Debug("set token")
+
+       return sess, nil
+}
+
+func (sess *v0session) Receive(buf []byte) error {
+       var sub v0subscribe
+       if err := json.Unmarshal(buf, &sub); err != nil {
+               sess.log.WithError(err).Info("invalid message from client")
+       } else if sub.Method == "subscribe" {
+               sub.prepare(sess)
+               sess.log.WithField("sub", sub).Debug("sub prepared")
+               sess.sendq <- v0subscribeOK
+               sess.mtx.Lock()
+               sess.subscriptions = append(sess.subscriptions, sub)
+               sess.mtx.Unlock()
+               sub.sendOldEvents(sess)
+               return nil
+       } else if sub.Method == "unsubscribe" {
+               sess.mtx.Lock()
+               found := false
+               for i, s := range sess.subscriptions {
+                       if !reflect.DeepEqual(s.Filters, sub.Filters) {
+                               continue
+                       }
+                       copy(sess.subscriptions[i:], sess.subscriptions[i+1:])
+                       sess.subscriptions = sess.subscriptions[:len(sess.subscriptions)-1]
+                       found = true
+                       break
+               }
+               sess.mtx.Unlock()
+               sess.log.WithField("sub", sub).WithField("found", found).Debug("unsubscribe")
+               if found {
+                       sess.sendq <- v0subscribeOK
+                       return nil
+               }
+       } else {
+               sess.log.WithField("Method", sub.Method).Info("unknown method")
+       }
+       sess.sendq <- v0subscribeFail
+       return nil
+}
+
+func (sess *v0session) EventMessage(e *event) ([]byte, error) {
+       detail := e.Detail()
+       if detail == nil {
+               return nil, nil
+       }
+
+       var permTarget string
+       if detail.EventType == "delete" {
+               // It's pointless to check permission by reading
+               // ObjectUUID if it has just been deleted, but if the
+               // client has permission on the parent project then
+               // it's OK to send the event.
+               permTarget = detail.ObjectOwnerUUID
+       } else {
+               permTarget = detail.ObjectUUID
+       }
+       ok, err := sess.permChecker.Check(permTarget)
+       if err != nil || !ok {
+               return nil, err
+       }
+
+       kind, _ := sess.ac.KindForUUID(detail.ObjectUUID)
+       msg := map[string]interface{}{
+               "msgID":             atomic.AddUint64(&sess.lastMsgID, 1),
+               "id":                detail.ID,
+               "uuid":              detail.UUID,
+               "object_uuid":       detail.ObjectUUID,
+               "object_owner_uuid": detail.ObjectOwnerUUID,
+               "object_kind":       kind,
+               "event_type":        detail.EventType,
+               "event_at":          detail.EventAt,
+       }
+       if detail.Properties != nil && detail.Properties["text"] != nil {
+               msg["properties"] = detail.Properties
+       } else {
+               msgProps := map[string]map[string]interface{}{}
+               for _, ak := range []string{"old_attributes", "new_attributes"} {
+                       eventAttrs, ok := detail.Properties[ak].(map[string]interface{})
+                       if !ok {
+                               continue
+                       }
+                       msgAttrs := map[string]interface{}{}
+                       for _, k := range sendObjectAttributes {
+                               if v, ok := eventAttrs[k]; ok {
+                                       msgAttrs[k] = v
+                               }
+                       }
+                       msgProps[ak] = msgAttrs
+               }
+               msg["properties"] = msgProps
+       }
+       return json.Marshal(msg)
+}
+
+func (sess *v0session) Filter(e *event) bool {
+       sess.mtx.Lock()
+       defer sess.mtx.Unlock()
+       for _, sub := range sess.subscriptions {
+               if sub.match(sess, e) {
+                       return true
+               }
+       }
+       return false
+}
+
+func (sub *v0subscribe) sendOldEvents(sess *v0session) {
+       if sub.LastLogID == 0 {
+               return
+       }
+       sess.log.WithField("LastLogID", sub.LastLogID).Debug("sendOldEvents")
+       // Here we do a "select id" query and queue an event for every
+       // log since the given ID, then use (*event)Detail() to
+       // retrieve the whole row and decide whether to send it. This
+       // approach is very inefficient if the subscriber asks for
+       // last_log_id==1, even if the filters end up matching very
+       // few events.
+       //
+       // To mitigate this, filter on "created > 10 minutes ago" when
+       // retrieving the list of old event IDs to consider.
+       rows, err := sess.db.Query(
+               `SELECT id FROM logs WHERE id > $1 AND created_at > $2 ORDER BY id`,
+               sub.LastLogID,
+               time.Now().UTC().Add(-10*time.Minute).Format(time.RFC3339Nano))
+       if err != nil {
+               sess.log.WithError(err).Error("sendOldEvents db.Query failed")
+               return
+       }
+
+       var ids []uint64
+       for rows.Next() {
+               var id uint64
+               err := rows.Scan(&id)
+               if err != nil {
+                       sess.log.WithError(err).Error("sendOldEvents row Scan failed")
+                       continue
+               }
+               ids = append(ids, id)
+       }
+       if err := rows.Err(); err != nil {
+               sess.log.WithError(err).Error("sendOldEvents db.Query failed")
+       }
+       rows.Close()
+
+       for _, id := range ids {
+               for len(sess.sendq)*2 > cap(sess.sendq) {
+                       // Ugly... but if we fill up the whole client
+                       // queue with a backlog of old events, a
+                       // single new event will overflow it and
+                       // terminate the connection, and then the
+                       // client will probably reconnect and do the
+                       // same thing all over again.
+                       time.Sleep(100 * time.Millisecond)
+                       if sess.ws.Request().Context().Err() != nil {
+                               // Session terminated while we were sleeping
+                               return
+                       }
+               }
+               now := time.Now()
+               e := &event{
+                       LogID:    id,
+                       Received: now,
+                       Ready:    now,
+                       db:       sess.db,
+               }
+               if sub.match(sess, e) {
+                       select {
+                       case sess.sendq <- e:
+                       case <-sess.ws.Request().Context().Done():
+                               return
+                       }
+               }
+       }
+}
+
+type v0subscribe struct {
+       Method    string
+       Filters   []v0filter
+       LastLogID int64 `json:"last_log_id"`
+
+       funcs []func(*event) bool
+}
+
+type v0filter [3]interface{}
+
+func (sub *v0subscribe) match(sess *v0session, e *event) bool {
+       log := sess.log.WithField("LogID", e.LogID)
+       detail := e.Detail()
+       if detail == nil {
+               log.Error("match failed, no detail")
+               return false
+       }
+       log = log.WithField("funcs", len(sub.funcs))
+       for i, f := range sub.funcs {
+               if !f(e) {
+                       log.WithField("func", i).Debug("match failed")
+                       return false
+               }
+       }
+       log.Debug("match passed")
+       return true
+}
+
+func (sub *v0subscribe) prepare(sess *v0session) {
+       for _, f := range sub.Filters {
+               if len(f) != 3 {
+                       continue
+               }
+               if col, ok := f[0].(string); ok && col == "event_type" {
+                       op, ok := f[1].(string)
+                       if !ok || op != "in" {
+                               continue
+                       }
+                       arr, ok := f[2].([]interface{})
+                       if !ok {
+                               continue
+                       }
+                       var strs []string
+                       for _, s := range arr {
+                               if s, ok := s.(string); ok {
+                                       strs = append(strs, s)
+                               }
+                       }
+                       sub.funcs = append(sub.funcs, func(e *event) bool {
+                               for _, s := range strs {
+                                       if s == e.Detail().EventType {
+                                               return true
+                                       }
+                               }
+                               return false
+                       })
+               } else if ok && col == "created_at" {
+                       op, ok := f[1].(string)
+                       if !ok {
+                               continue
+                       }
+                       tstr, ok := f[2].(string)
+                       if !ok {
+                               continue
+                       }
+                       t, err := time.Parse(time.RFC3339Nano, tstr)
+                       if err != nil {
+                               sess.log.WithField("data", tstr).WithError(err).Info("time.Parse failed")
+                               continue
+                       }
+                       var fn func(*event) bool
+                       switch op {
+                       case ">=":
+                               fn = func(e *event) bool {
+                                       return !e.Detail().CreatedAt.Before(t)
+                               }
+                       case "<=":
+                               fn = func(e *event) bool {
+                                       return !e.Detail().CreatedAt.After(t)
+                               }
+                       case ">":
+                               fn = func(e *event) bool {
+                                       return e.Detail().CreatedAt.After(t)
+                               }
+                       case "<":
+                               fn = func(e *event) bool {
+                                       return e.Detail().CreatedAt.Before(t)
+                               }
+                       case "=":
+                               fn = func(e *event) bool {
+                                       return e.Detail().CreatedAt.Equal(t)
+                               }
+                       default:
+                               sess.log.WithField("operator", op).Info("bogus operator")
+                               continue
+                       }
+                       sub.funcs = append(sub.funcs, fn)
+               }
+       }
+}
diff --git a/services/ws/session_v0_test.go b/services/ws/session_v0_test.go
new file mode 100644 (file)
index 0000000..7585bc5
--- /dev/null
@@ -0,0 +1,366 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "io"
+       "net/url"
+       "os"
+       "sync"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+       "golang.org/x/net/websocket"
+       check "gopkg.in/check.v1"
+)
+
+func init() {
+       if os.Getenv("ARVADOS_DEBUG") != "" {
+               ctxlog.SetLevel("debug")
+       }
+}
+
+var _ = check.Suite(&v0Suite{})
+
+type v0Suite struct {
+       serverSuite serverSuite
+       token       string
+       toDelete    []string
+       wg          sync.WaitGroup
+       ignoreLogID uint64
+}
+
+func (s *v0Suite) SetUpTest(c *check.C) {
+       s.serverSuite.SetUpTest(c)
+       go s.serverSuite.srv.Run()
+       s.serverSuite.srv.WaitReady()
+
+       s.token = arvadostest.ActiveToken
+       s.ignoreLogID = s.lastLogID(c)
+}
+
+func (s *v0Suite) TearDownTest(c *check.C) {
+       s.wg.Wait()
+       s.serverSuite.srv.Close()
+}
+
+func (s *v0Suite) TearDownSuite(c *check.C) {
+       s.deleteTestObjects(c)
+}
+
+func (s *v0Suite) deleteTestObjects(c *check.C) {
+       ac := arvados.NewClientFromEnv()
+       ac.AuthToken = arvadostest.AdminToken
+       for _, path := range s.toDelete {
+               err := ac.RequestAndDecode(nil, "DELETE", path, nil, nil)
+               if err != nil {
+                       panic(err)
+               }
+       }
+       s.toDelete = nil
+}
+
+func (s *v0Suite) TestFilters(c *check.C) {
+       conn, r, w := s.testClient()
+       defer conn.Close()
+
+       cmd := func(method, eventType string, status int) {
+               c.Check(w.Encode(map[string]interface{}{
+                       "method":  method,
+                       "filters": [][]interface{}{{"event_type", "in", []string{eventType}}},
+               }), check.IsNil)
+               s.expectStatus(c, r, status)
+       }
+       cmd("subscribe", "update", 200)
+       cmd("subscribe", "update", 200)
+       cmd("subscribe", "create", 200)
+       cmd("subscribe", "update", 200)
+       cmd("unsubscribe", "blip", 400)
+       cmd("unsubscribe", "create", 200)
+       cmd("unsubscribe", "update", 200)
+
+       go s.emitEvents(nil)
+       lg := s.expectLog(c, r)
+       c.Check(lg.EventType, check.Equals, "update")
+
+       cmd("unsubscribe", "update", 200)
+       cmd("unsubscribe", "update", 200)
+       cmd("unsubscribe", "update", 400)
+}
+
+func (s *v0Suite) TestLastLogID(c *check.C) {
+       lastID := s.lastLogID(c)
+
+       checkLogs := func(r *json.Decoder, uuid string) {
+               for _, etype := range []string{"create", "blip", "update"} {
+                       lg := s.expectLog(c, r)
+                       for lg.ObjectUUID != uuid {
+                               lg = s.expectLog(c, r)
+                       }
+                       c.Check(lg.EventType, check.Equals, etype)
+               }
+       }
+
+       // Connecting connEarly (before sending the early events) lets
+       // us confirm all of the "early" events have already passed
+       // through the server.
+       connEarly, rEarly, wEarly := s.testClient()
+       defer connEarly.Close()
+       c.Check(wEarly.Encode(map[string]interface{}{
+               "method": "subscribe",
+       }), check.IsNil)
+       s.expectStatus(c, rEarly, 200)
+
+       // Send the early events.
+       uuidChan := make(chan string, 1)
+       s.emitEvents(uuidChan)
+       uuidEarly := <-uuidChan
+
+       // Wait for the early events to pass through.
+       checkLogs(rEarly, uuidEarly)
+
+       // Connect the client that wants to get old events via
+       // last_log_id.
+       conn, r, w := s.testClient()
+       defer conn.Close()
+
+       c.Check(w.Encode(map[string]interface{}{
+               "method":      "subscribe",
+               "last_log_id": lastID,
+       }), check.IsNil)
+       s.expectStatus(c, r, 200)
+
+       checkLogs(r, uuidEarly)
+       s.emitEvents(uuidChan)
+       checkLogs(r, <-uuidChan)
+}
+
+func (s *v0Suite) TestPermission(c *check.C) {
+       conn, r, w := s.testClient()
+       defer conn.Close()
+
+       c.Check(w.Encode(map[string]interface{}{
+               "method": "subscribe",
+       }), check.IsNil)
+       s.expectStatus(c, r, 200)
+
+       uuidChan := make(chan string, 2)
+       go func() {
+               s.token = arvadostest.AdminToken
+               s.emitEvents(uuidChan)
+               s.token = arvadostest.ActiveToken
+               s.emitEvents(uuidChan)
+       }()
+
+       wrongUUID := <-uuidChan
+       rightUUID := <-uuidChan
+       lg := s.expectLog(c, r)
+       for lg.ObjectUUID != rightUUID {
+               c.Check(lg.ObjectUUID, check.Not(check.Equals), wrongUUID)
+               lg = s.expectLog(c, r)
+       }
+}
+
+// Two users create private objects; admin deletes both objects; each
+// user receives a "delete" event for their own object (not for the
+// other user's object).
+func (s *v0Suite) TestEventTypeDelete(c *check.C) {
+       clients := []struct {
+               token string
+               uuid  string
+               conn  *websocket.Conn
+               r     *json.Decoder
+               w     *json.Encoder
+       }{{token: arvadostest.ActiveToken}, {token: arvadostest.SpectatorToken}}
+       for i := range clients {
+               uuidChan := make(chan string, 1)
+               s.token = clients[i].token
+               s.emitEvents(uuidChan)
+               clients[i].uuid = <-uuidChan
+               clients[i].conn, clients[i].r, clients[i].w = s.testClient()
+
+               c.Check(clients[i].w.Encode(map[string]interface{}{
+                       "method": "subscribe",
+               }), check.IsNil)
+               s.expectStatus(c, clients[i].r, 200)
+       }
+
+       s.ignoreLogID = s.lastLogID(c)
+       s.deleteTestObjects(c)
+
+       for _, client := range clients {
+               lg := s.expectLog(c, client.r)
+               c.Check(lg.ObjectUUID, check.Equals, client.uuid)
+               c.Check(lg.EventType, check.Equals, "delete")
+       }
+}
+
+// Trashing/deleting a collection produces an "update" event with
+// properties["new_attributes"]["is_trashed"] == true.
+func (s *v0Suite) TestTrashedCollection(c *check.C) {
+       ac := arvados.NewClientFromEnv()
+       ac.AuthToken = s.token
+
+       coll := &arvados.Collection{ManifestText: ""}
+       err := ac.RequestAndDecode(coll, "POST", "arvados/v1/collections", s.jsonBody("collection", coll), map[string]interface{}{"ensure_unique_name": true})
+       c.Assert(err, check.IsNil)
+       s.ignoreLogID = s.lastLogID(c)
+
+       conn, r, w := s.testClient()
+       defer conn.Close()
+
+       c.Check(w.Encode(map[string]interface{}{
+               "method": "subscribe",
+       }), check.IsNil)
+       s.expectStatus(c, r, 200)
+
+       err = ac.RequestAndDecode(nil, "DELETE", "arvados/v1/collections/"+coll.UUID, nil, nil)
+       c.Assert(err, check.IsNil)
+
+       lg := s.expectLog(c, r)
+       c.Check(lg.ObjectUUID, check.Equals, coll.UUID)
+       c.Check(lg.EventType, check.Equals, "update")
+       c.Check(lg.Properties["old_attributes"].(map[string]interface{})["is_trashed"], check.Equals, false)
+       c.Check(lg.Properties["new_attributes"].(map[string]interface{})["is_trashed"], check.Equals, true)
+}
+
+func (s *v0Suite) TestSendBadJSON(c *check.C) {
+       conn, r, w := s.testClient()
+       defer conn.Close()
+
+       c.Check(w.Encode(map[string]interface{}{
+               "method": "subscribe",
+       }), check.IsNil)
+       s.expectStatus(c, r, 200)
+
+       _, err := fmt.Fprint(conn, "^]beep\n")
+       c.Check(err, check.IsNil)
+       s.expectStatus(c, r, 400)
+
+       c.Check(w.Encode(map[string]interface{}{
+               "method": "subscribe",
+       }), check.IsNil)
+       s.expectStatus(c, r, 200)
+}
+
+func (s *v0Suite) TestSubscribe(c *check.C) {
+       conn, r, w := s.testClient()
+       defer conn.Close()
+
+       s.emitEvents(nil)
+
+       err := w.Encode(map[string]interface{}{"21": 12})
+       c.Check(err, check.IsNil)
+       s.expectStatus(c, r, 400)
+
+       err = w.Encode(map[string]interface{}{"method": "subscribe", "filters": []string{}})
+       c.Check(err, check.IsNil)
+       s.expectStatus(c, r, 200)
+
+       uuidChan := make(chan string, 1)
+       go s.emitEvents(uuidChan)
+       uuid := <-uuidChan
+
+       for _, etype := range []string{"create", "blip", "update"} {
+               lg := s.expectLog(c, r)
+               for lg.ObjectUUID != uuid {
+                       lg = s.expectLog(c, r)
+               }
+               c.Check(lg.EventType, check.Equals, etype)
+       }
+}
+
+// Generate some events by creating and updating a workflow object,
+// and creating a custom log entry (event_type="blip") about the newly
+// created workflow. If uuidChan is not nil, send the new workflow
+// UUID to uuidChan as soon as it's known.
+func (s *v0Suite) emitEvents(uuidChan chan<- string) {
+       s.wg.Add(1)
+       defer s.wg.Done()
+
+       ac := arvados.NewClientFromEnv()
+       ac.AuthToken = s.token
+       wf := &arvados.Workflow{
+               Name: "ws_test",
+       }
+       err := ac.RequestAndDecode(wf, "POST", "arvados/v1/workflows", s.jsonBody("workflow", wf), map[string]interface{}{"ensure_unique_name": true})
+       if err != nil {
+               panic(err)
+       }
+       if uuidChan != nil {
+               uuidChan <- wf.UUID
+       }
+       lg := &arvados.Log{}
+       err = ac.RequestAndDecode(lg, "POST", "arvados/v1/logs", s.jsonBody("log", &arvados.Log{
+               ObjectUUID: wf.UUID,
+               EventType:  "blip",
+               Properties: map[string]interface{}{
+                       "beep": "boop",
+               },
+       }), nil)
+       if err != nil {
+               panic(err)
+       }
+       err = ac.RequestAndDecode(wf, "PUT", "arvados/v1/workflows/"+wf.UUID, s.jsonBody("workflow", wf), nil)
+       if err != nil {
+               panic(err)
+       }
+       s.toDelete = append(s.toDelete, "arvados/v1/workflows/"+wf.UUID, "arvados/v1/logs/"+lg.UUID)
+}
+
+func (s *v0Suite) jsonBody(rscName string, ob interface{}) io.Reader {
+       j, err := json.Marshal(ob)
+       if err != nil {
+               panic(err)
+       }
+       v := url.Values{}
+       v[rscName] = []string{string(j)}
+       return bytes.NewBufferString(v.Encode())
+}
+
+func (s *v0Suite) expectStatus(c *check.C, r *json.Decoder, status int) {
+       msg := map[string]interface{}{}
+       c.Check(r.Decode(&msg), check.IsNil)
+       c.Check(int(msg["status"].(float64)), check.Equals, status)
+}
+
+func (s *v0Suite) expectLog(c *check.C, r *json.Decoder) *arvados.Log {
+       lg := &arvados.Log{}
+       ok := make(chan struct{})
+       go func() {
+               for lg.ID <= s.ignoreLogID {
+                       c.Check(r.Decode(lg), check.IsNil)
+               }
+               close(ok)
+       }()
+       select {
+       case <-time.After(10 * time.Second):
+               panic("timed out")
+       case <-ok:
+               return lg
+       }
+}
+
+func (s *v0Suite) testClient() (*websocket.Conn, *json.Decoder, *json.Encoder) {
+       srv := s.serverSuite.srv
+       conn, err := websocket.Dial("ws://"+srv.listener.Addr().String()+"/websocket?api_token="+s.token, "", "http://"+srv.listener.Addr().String())
+       if err != nil {
+               panic(err)
+       }
+       w := json.NewEncoder(conn)
+       r := json.NewDecoder(conn)
+       return conn, r, w
+}
+
+func (s *v0Suite) lastLogID(c *check.C) uint64 {
+       var lastID uint64
+       c.Assert(testDB().QueryRow(`SELECT MAX(id) FROM logs`).Scan(&lastID), check.IsNil)
+       return lastID
+}
diff --git a/services/ws/session_v1.go b/services/ws/session_v1.go
new file mode 100644 (file)
index 0000000..9f46332
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "database/sql"
+       "errors"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+// newSessionV1 returns a v1 session -- see
+// https://dev.arvados.org/projects/arvados/wiki/Websocket_server
+func newSessionV1(ws wsConn, sendq chan<- interface{}, db *sql.DB, pc permChecker, ac *arvados.Client) (session, error) {
+       return nil, errors.New("Not implemented")
+}
diff --git a/tools/arvbash/arvbash.sh b/tools/arvbash/arvbash.sh
new file mode 100755 (executable)
index 0000000..b47e3b8
--- /dev/null
@@ -0,0 +1,128 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# bash functions for managing Arvados tokens and other conveniences.
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): bash functions for managing Arvados tokens and other shortcuts.
+
+Syntax:
+        . $0            # activate for current shell
+        $0 --install    # install into .bashrc
+
+arvswitch <name>
+  Set ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment based on
+  $HOME/.config/arvados/<name>.conf
+  With no arguments, list available Arvados configurations.
+
+arvsave <name>
+  Save values of ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment to
+  $HOME/.config/arvados/<name>.conf
+
+arvrm <name>
+  Delete $HOME/.config/arvados/<name>.conf
+
+arvboxswitch <name>
+  Set ARVBOX_CONTAINER to <name>
+  With no arguments, list available arvboxes.
+
+arvopen:
+  Open an Arvados uuid in web browser (http://curover.se)
+
+arvissue
+  Open an Arvados ticket in web browser (http://dev.arvados.org)
+
+EOF
+
+if [[ "$1" = "--install" ]] ; then
+    this=$(readlink -f $0)
+    if ! grep ". $this" ~/.bashrc >/dev/null ; then
+        echo ". $this" >> ~/.bashrc
+        echo "Installed into ~/.bashrc"
+    else
+        echo "Already installed in ~/.bashrc"
+    fi
+elif ! [[ $0 =~ bash$ ]] ; then
+    echo "$helpmessage"
+fi
+
+HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
+
+arvswitch() {
+    if [[ -n "$1" ]] ; then
+        if [[ -f $HOME/.config/arvados/$1.conf ]] ; then
+            unset ARVADOS_API_HOST_INSECURE
+            for a in $(cat $HOME/.config/arvados/$1.conf) ; do export $a ; done
+            echo "Switched to $1"
+        else
+            echo "$1 unknown"
+        fi
+    else
+        echo "Switch Arvados environment conf"
+        echo "Usage: arvswitch name"
+        echo "Available confs:" $((cd $HOME/.config/arvados && ls --indicator-style=none *.conf) | rev | cut -c6- | rev)
+    fi
+}
+
+arvsave() {
+    if [[ -n "$1" ]] ; then
+       touch $HOME/.config/arvados/$1.conf
+       chmod 0600 $HOME/.config/arvados/$1.conf
+        env | grep ARVADOS_ > $HOME/.config/arvados/$1.conf
+    else
+        echo "Save current Arvados environment variables to conf file"
+        echo "Usage: arvsave name"
+    fi
+}
+
+arvrm() {
+    if [[ -n "$1" ]] ; then
+        if [[ -f $HOME/.config/arvados/$1.conf ]] ; then
+            rm $HOME/.config/arvados/$1.conf
+        else
+            echo "$1 unknown"
+        fi
+    else
+        echo "Delete Arvados environment conf"
+        echo "Usage: arvrm name"
+    fi
+}
+
+arvboxswitch() {
+    if [[ -n "$1" ]] ; then
+        if [[ -d $HOME/.arvbox/$1 ]] ; then
+            export ARVBOX_CONTAINER=$1
+            echo "Arvbox switched to $1"
+        else
+            echo "$1 unknown"
+        fi
+    else
+        if test -z "$ARVBOX_CONTAINER" ; then
+            ARVBOX_CONTAINER=arvbox
+        fi
+        echo "Switch Arvbox environment conf"
+        echo "Usage: arvboxswitch name"
+        echo "Your current container is: $ARVBOX_CONTAINER"
+        echo "Available confs:" $(cd $HOME/.arvbox && ls --indicator-style=none)
+    fi
+}
+
+arvopen() {
+    if [[ -n "$1" ]] ; then
+        xdg-open https://curover.se/$1
+    else
+        echo "Open Arvados uuid in browser"
+        echo "Usage: arvopen uuid"
+    fi
+}
+
+arvissue() {
+    if [[ -n "$1" ]] ; then
+        xdg-open https://dev.arvados.org/issues/$1
+    else
+        echo "Open Arvados issue in browser"
+        echo "Usage: arvissue uuid"
+    fi
+}
diff --git a/tools/arvbox/bin/arvbox b/tools/arvbox/bin/arvbox
new file mode 100755 (executable)
index 0000000..66aebf8
--- /dev/null
@@ -0,0 +1,520 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+if ! test -d /sys/fs/cgroup ; then
+     echo "Arvbox requires cgroups to be mounted at /sys/fs/cgroup in order to use"
+     echo "Docker-in-Docker.  Older operating systems that put cgroups in other"
+     echo "places (such as /cgroup) are not supported."
+     exit 1
+fi
+
+if ! which docker >/dev/null 2>/dev/null ; then
+  echo "Arvbox requires Docker.  To install, run the following command as root:"
+  echo "curl -sSL https://get.docker.com/ | sh"
+  exit 1
+fi
+
+if test -z "$ARVBOX_DOCKER" ; then
+    if which greadlink >/dev/null 2>/dev/null ; then
+        ARVBOX_DOCKER=$(greadlink -f $(dirname $0)/../lib/arvbox/docker)
+    else
+        ARVBOX_DOCKER=$(readlink -f $(dirname $0)/../lib/arvbox/docker)
+    fi
+fi
+
+if test -z "$ARVBOX_CONTAINER" ; then
+    ARVBOX_CONTAINER=arvbox
+fi
+
+if test -z "$ARVBOX_BASE" ; then
+    ARVBOX_BASE="$HOME/.arvbox"
+fi
+
+if test -z "$ARVBOX_DATA" ; then
+    ARVBOX_DATA="$ARVBOX_BASE/$ARVBOX_CONTAINER"
+fi
+
+if test -z "$ARVADOS_ROOT" ; then
+    ARVADOS_ROOT="$ARVBOX_DATA/arvados"
+fi
+
+if test -z "$SSO_ROOT" ; then
+    SSO_ROOT="$ARVBOX_DATA/sso-devise-omniauth-provider"
+fi
+
+if test -z "$COMPOSER_ROOT" ; then
+    COMPOSER_ROOT="$ARVBOX_DATA/composer"
+fi
+
+if test -z "$WORKBENCH2_ROOT" ; then
+    WORKBENCH2_ROOT="$ARVBOX_DATA/workbench2"
+fi
+
+PG_DATA="$ARVBOX_DATA/postgres"
+VAR_DATA="$ARVBOX_DATA/var"
+PASSENGER="$ARVBOX_DATA/passenger"
+GEMS="$ARVBOX_DATA/gems"
+PIPCACHE="$ARVBOX_DATA/pip"
+NPMCACHE="$ARVBOX_DATA/npm"
+GOSTUFF="$ARVBOX_DATA/gopath"
+RLIBS="$ARVBOX_DATA/Rlibs"
+
+getip() {
+    docker inspect $ARVBOX_CONTAINER | grep \"IPAddress\" | head -n1 | tr -d ' ":,\n' | cut -c10-
+}
+
+gethost() {
+    set +e
+    OVERRIDE=$(docker exec -i $ARVBOX_CONTAINER cat /var/run/localip_override 2>/dev/null)
+    CODE=$?
+    set -e
+    if test "$CODE" = 0 ; then
+       echo $OVERRIDE
+    else
+        getip
+    fi
+}
+
+getclusterid() {
+    docker exec $ARVBOX_CONTAINER cat /var/lib/arvados/api_uuid_prefix
+}
+
+updateconf() {
+    if test -f ~/.config/arvados/$ARVBOX_CONTAINER.conf ; then
+        sed "s/ARVADOS_API_HOST=.*/ARVADOS_API_HOST=$(gethost):8000/" <$HOME/.config/arvados/$ARVBOX_CONTAINER.conf >$HOME/.config/arvados/$ARVBOX_CONTAINER.conf.tmp
+        mv ~/.config/arvados/$ARVBOX_CONTAINER.conf.tmp ~/.config/arvados/$ARVBOX_CONTAINER.conf
+    else
+        mkdir -p $HOME/.config/arvados
+        cat >$HOME/.config/arvados/$ARVBOX_CONTAINER.conf <<EOF
+ARVADOS_API_HOST=$(gethost):8000
+ARVADOS_API_TOKEN=
+ARVADOS_API_HOST_INSECURE=true
+EOF
+    fi
+}
+
+wait_for_arvbox() {
+    FF=/tmp/arvbox-fifo-$$
+    mkfifo $FF
+    docker logs -f $ARVBOX_CONTAINER > $FF &
+    LOGPID=$!
+    while read line ; do
+        if echo $line | grep "ok: down: ready:" >/dev/null ; then
+            kill $LOGPID
+       else
+           echo $line
+        fi
+    done < $FF
+    rm $FF
+    echo
+    if test -n "$localip" ; then
+        echo "export ARVADOS_API_HOST=$localip:8000"
+    else
+        echo "export ARVADOS_API_HOST=$(gethost):8000"
+    fi
+}
+
+run() {
+    CONFIG=$1
+    TAG=$2
+
+    shift
+
+    need_setup=1
+
+    if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
+        if test "$CONFIG" = test ; then
+            need_setup=0
+        else
+            echo "Container $ARVBOX_CONTAINER is already running"
+            exit 0
+        fi
+    fi
+
+    if test $need_setup = 1 ; then
+        if docker ps -a | grep -E "$ARVBOX_CONTAINER$" -q ; then
+            echo "Container $ARVBOX_CONTAINER already exists but is not running; use restart or reboot"
+            exit 1
+        fi
+    fi
+
+    if test -n "$TAG"
+    then
+        if test $(echo $TAG | cut -c1-1) != '-' ; then
+           TAG=":$TAG"
+            shift
+        else
+            unset TAG
+        fi
+    fi
+
+    if echo "$CONFIG" | grep '^public' ; then
+        if test -n "$ARVBOX_PUBLISH_IP" ; then
+            localip=$ARVBOX_PUBLISH_IP
+        else
+            defaultdev=$(/sbin/ip route|awk '/default/ { print $5 }')
+            localip=$(ip addr show $defaultdev | grep 'inet ' | sed 's/ *inet \(.*\)\/.*/\1/')
+        fi
+        iptemp=$(tempfile)
+        echo $localip > $iptemp
+        chmod og+r $iptemp
+        PUBLIC="--volume=$iptemp:/var/run/localip_override
+              --publish=443:443
+              --publish=3001:3001
+              --publish=8000:8000
+              --publish=8900:8900
+              --publish=9001:9001
+              --publish=9002:9002
+              --publish=25100:25100
+              --publish=25107:25107
+              --publish=25108:25108
+              --publish=8001:8001
+              --publish=8002:8002"
+    else
+        PUBLIC=""
+    fi
+
+    if echo "$CONFIG" | grep 'demo$' ; then
+        if test -d "$ARVBOX_DATA" ; then
+            echo "It looks like you already have a development container named $ARVBOX_CONTAINER."
+            echo "Set ARVBOX_CONTAINER to set a different name for your demo container"
+            exit 1
+        fi
+
+        if ! (docker ps -a | grep -E "$ARVBOX_CONTAINER-data$" -q) ; then
+            docker create -v /var/lib/postgresql -v /var/lib/arvados --name $ARVBOX_CONTAINER-data arvados/arvbox-demo /bin/true
+        fi
+
+        docker run \
+               --detach \
+               --name=$ARVBOX_CONTAINER \
+               --privileged \
+               --volumes-from $ARVBOX_CONTAINER-data \
+               $PUBLIC \
+               arvados/arvbox-demo$TAG
+        updateconf
+        wait_for_arvbox
+    else
+        mkdir -p "$PG_DATA" "$VAR_DATA" "$PASSENGER" "$GEMS" "$PIPCACHE" "$NPMCACHE" "$GOSTUFF" "$RLIBS"
+
+
+        if ! test -d "$ARVADOS_ROOT" ; then
+            git clone https://github.com/curoverse/arvados.git "$ARVADOS_ROOT"
+        fi
+        if ! test -d "$SSO_ROOT" ; then
+            git clone https://github.com/curoverse/sso-devise-omniauth-provider.git "$SSO_ROOT"
+        fi
+        if ! test -d "$COMPOSER_ROOT" ; then
+            git clone https://github.com/curoverse/composer.git "$COMPOSER_ROOT"
+        fi
+        if ! test -d "$WORKBENCH2_ROOT" ; then
+            git clone https://github.com/curoverse/arvados-workbench2.git "$WORKBENCH2_ROOT"
+        fi
+
+        if test "$CONFIG" = test ; then
+
+            mkdir -p $VAR_DATA/test
+
+            if test "$need_setup" = 1 ; then
+                docker run \
+                       --detach \
+                       --name=$ARVBOX_CONTAINER \
+                       --privileged \
+                       "--volume=$ARVADOS_ROOT:/usr/src/arvados:rw" \
+                       "--volume=$SSO_ROOT:/usr/src/sso:rw" \
+                       "--volume=$COMPOSER_ROOT:/usr/src/composer:rw" \
+                       "--volume=$WORKBENCH2_ROOT:/usr/src/workbench2:rw" \
+                       "--volume=$PG_DATA:/var/lib/postgresql:rw" \
+                       "--volume=$VAR_DATA:/var/lib/arvados:rw" \
+                       "--volume=$PASSENGER:/var/lib/passenger:rw" \
+                       "--volume=$GEMS:/var/lib/gems:rw" \
+                       "--volume=$PIPCACHE:/var/lib/pip:rw" \
+                       "--volume=$NPMCACHE:/var/lib/npm:rw" \
+                       "--volume=$GOSTUFF:/var/lib/gopath:rw" \
+                       "--volume=$RLIBS:/var/lib/Rlibs:rw" \
+                      "--env=SVDIR=/etc/test-service" \
+                       arvados/arvbox-dev$TAG
+
+                docker exec -ti \
+                       $ARVBOX_CONTAINER \
+                       /usr/local/lib/arvbox/runsu.sh \
+                       /usr/local/lib/arvbox/waitforpostgres.sh
+
+                docker exec -ti \
+                       $ARVBOX_CONTAINER \
+                       /usr/local/lib/arvbox/runsu.sh \
+                       /var/lib/arvbox/service/sso/run-service --only-setup
+
+                docker exec -ti \
+                       $ARVBOX_CONTAINER \
+                       /usr/local/lib/arvbox/runsu.sh \
+                       /var/lib/arvbox/service/api/run-service --only-setup
+            fi
+
+            docker exec -ti \
+                   $ARVBOX_CONTAINER \
+                   /usr/local/lib/arvbox/runsu.sh \
+                   /usr/src/arvados/build/run-tests.sh \
+                   --temp /var/lib/arvados/test \
+                   WORKSPACE=/usr/src/arvados \
+                   GEM_HOME=/var/lib/gems \
+                   "$@"
+        elif echo "$CONFIG" | grep 'dev$' ; then
+            docker run \
+                   --detach \
+                   --name=$ARVBOX_CONTAINER \
+                   --privileged \
+                   "--volume=$ARVADOS_ROOT:/usr/src/arvados:rw" \
+                   "--volume=$SSO_ROOT:/usr/src/sso:rw" \
+                   "--volume=$COMPOSER_ROOT:/usr/src/composer:rw" \
+                   "--volume=$WORKBENCH2_ROOT:/usr/src/workbench2:rw" \
+                   "--volume=$PG_DATA:/var/lib/postgresql:rw" \
+                   "--volume=$VAR_DATA:/var/lib/arvados:rw" \
+                   "--volume=$PASSENGER:/var/lib/passenger:rw" \
+                   "--volume=$GEMS:/var/lib/gems:rw" \
+                   "--volume=$PIPCACHE:/var/lib/pip:rw" \
+                   "--volume=$NPMCACHE:/var/lib/npm:rw" \
+                   "--volume=$GOSTUFF:/var/lib/gopath:rw" \
+                   "--volume=$RLIBS:/var/lib/Rlibs:rw" \
+                   $PUBLIC \
+                   arvados/arvbox-dev$TAG
+            updateconf
+            wait_for_arvbox
+            echo "The Arvados source code is checked out at: $ARVADOS_ROOT"
+           echo "The Arvados testing root certificate is $VAR_DATA/root-cert.pem"
+        else
+            echo "Unknown configuration '$CONFIG'"
+        fi
+    fi
+}
+
+stop() {
+    if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
+        docker stop $ARVBOX_CONTAINER
+    fi
+
+    VOLUMES=--volumes=true
+    if docker ps -a --filter "status=created" | grep -E "$ARVBOX_CONTAINER$" -q ; then
+        docker rm $VOLUMES $ARVBOX_CONTAINER
+    fi
+    if docker ps -a --filter "status=exited" | grep -E "$ARVBOX_CONTAINER$" -q ; then
+        docker rm $VOLUMES $ARVBOX_CONTAINER
+    fi
+}
+
+build() {
+    if ! test -f "$ARVBOX_DOCKER/Dockerfile.base" ;  then
+        echo "Could not find Dockerfile (expected it at $ARVBOX_DOCKER/Dockerfile.base)"
+        exit 1
+    fi
+    if docker --version |grep " 1\.[0-9]\." ; then
+        # Docker version prior 1.10 require -f flag
+        # -f flag removed in Docker 1.12
+        FORCE=-f
+    fi
+    GITHEAD=$(cd $ARVBOX_DOCKER && git log --format=%H -n1 HEAD)
+    docker build --build-arg=arvados_version=$GITHEAD $NO_CACHE -t arvados/arvbox-base:$GITHEAD -f "$ARVBOX_DOCKER/Dockerfile.base" "$ARVBOX_DOCKER"
+    docker tag $FORCE arvados/arvbox-base:$GITHEAD arvados/arvbox-base:latest
+    if test "$1" = localdemo -o "$1" = publicdemo ; then
+        docker build $NO_CACHE -t arvados/arvbox-demo:$GITHEAD -f "$ARVBOX_DOCKER/Dockerfile.demo" "$ARVBOX_DOCKER"
+        docker tag $FORCE arvados/arvbox-demo:$GITHEAD arvados/arvbox-demo:latest
+    else
+        docker build $NO_CACHE -t arvados/arvbox-dev:$GITHEAD -f "$ARVBOX_DOCKER/Dockerfile.dev" "$ARVBOX_DOCKER"
+        docker tag $FORCE arvados/arvbox-dev:$GITHEAD arvados/arvbox-dev:latest
+    fi
+}
+
+check() {
+    case "$1" in
+        localdemo|publicdemo|dev|publicdev|test)
+            true
+            ;;
+        *)
+            echo "Argument to $subcmd must be one of localdemo, publicdemo, dev, publicdev, test"
+            exit 1
+        ;;
+    esac
+}
+
+subcmd="$1"
+if test -n "$subcmd" ; then
+    shift
+fi
+case "$subcmd" in
+    build)
+        check $@
+        build $@
+        ;;
+
+    rebuild)
+        check $@
+        NO_CACHE=--no-cache build $@
+        ;;
+
+    start|run)
+        check $@
+        run $@
+        ;;
+
+    sh*)
+        exec docker exec -ti \
+              -e LINES=$(tput lines) \
+              -e COLUMNS=$(tput cols) \
+              -e TERM=$TERM \
+              -e GEM_HOME=/var/lib/gems \
+              $ARVBOX_CONTAINER /bin/bash
+        ;;
+
+    pipe)
+        exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=/var/lib/gems /bin/bash -
+        ;;
+
+    stop)
+        stop
+        ;;
+
+    restart)
+        check $@
+        stop
+        run $@
+        ;;
+
+    reboot)
+        check $@
+        stop
+        build $@
+        run $@
+        ;;
+
+    ip)
+        getip
+        ;;
+
+    host)
+        gethost
+        ;;
+
+    open)
+        exec xdg-open http://$(gethost)
+        ;;
+
+    status)
+        echo "Container: $ARVBOX_CONTAINER"
+        if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
+           echo "Cluster id: $(getclusterid)"
+            echo "Status: running"
+            echo "Container IP: $(getip)"
+            echo "Published host: $(gethost)"
+        else
+            echo "Status: not running"
+        fi
+        if test -d "$ARVBOX_DATA" ; then
+            echo "Data: $ARVBOX_DATA"
+        elif docker ps -a | grep -E "$ARVBOX_CONTAINER-data$" -q ; then
+            echo "Data: $ARVBOX_CONTAINER-data"
+        else
+            echo "Data: none"
+        fi
+        ;;
+
+    reset|destroy)
+        stop
+        if test -d "$ARVBOX_DATA" ; then
+            if test "$subcmd" = destroy ; then
+                if test "$1" != -f ; then
+                    echo "WARNING!  This will delete your entire arvbox ($ARVBOX_DATA)."
+                    echo "Use destroy -f if you really mean it."
+                    exit 1
+                fi
+                set -x
+                rm -rf "$ARVBOX_DATA"
+            else
+                if test "$1" != -f ; then
+                    echo "WARNING!  This will delete your arvbox data ($ARVBOX_DATA)."
+                    echo "Code and downloaded packages will be preserved."
+                    echo "Use reset -f if you really mean it."
+                    exit 1
+                fi
+                set -x
+                rm -rf "$ARVBOX_DATA/postgres"
+                rm -rf "$ARVBOX_DATA/var"
+            fi
+        else
+            if test "$1" != -f ; then
+                echo "WARNING!  This will delete your data container $ARVBOX_CONTAINER-data.  Use -f if you really mean it."
+                exit 1
+            fi
+            set -x
+            docker rm "$ARVBOX_CONTAINER-data"
+        fi
+        ;;
+
+    log)
+        if test -n "$1" ; then
+            exec docker exec -ti -e LINES=$(tput lines) -e COLUMNS=$(tput cols) -e TERM=$TERM $ARVBOX_CONTAINER less --follow-name -R +GF "/etc/service/$1/log/main/current"
+        else
+            exec docker exec -ti $ARVBOX_CONTAINER tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
+        fi
+        ;;
+
+    cat)
+        if test -n "$1" ; then
+            exec docker exec $ARVBOX_CONTAINER cat "$@"
+        else
+            echo "Usage: $0 $subcmd <files>"
+        fi
+        ;;
+
+    ls)
+        exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM ls "$@"
+        ;;
+
+    sv)
+        if test -n "$1" -a -n "$2" ; then
+            exec docker exec $ARVBOX_CONTAINER sv "$@"
+        else
+            echo "Usage: $0 $subcmd <start|stop|restart> <service>"
+            echo "Available services:"
+            exec docker execa $ARVBOX_CONTAINER ls /etc/service
+        fi
+        ;;
+
+    clone)
+        if test -n "$2" ; then
+            cp -r "$ARVBOX_BASE/$1" "$ARVBOX_BASE/$2"
+            echo "Created new arvbox $2"
+            echo "export ARVBOX_CONTAINER=$2"
+        else
+            echo "clone <from> <to>   clone an arvbox"
+            echo "available arvboxes: $(ls $ARVBOX_BASE)"
+        fi
+        ;;
+
+    *)
+        echo "Arvados-in-a-box                      http://arvados.org"
+        echo
+        echo "build   <config>      build arvbox Docker image"
+        echo "rebuild <config>      build arvbox Docker image, no layer cache"
+        echo "start|run <config> [tag]  start $ARVBOX_CONTAINER container"
+        echo "open       open arvbox workbench in a web browser"
+        echo "shell      enter arvbox shell"
+        echo "ip         print arvbox docker container ip address"
+        echo "host       print arvbox published host"
+        echo "status     print some information about current arvbox"
+        echo "stop       stop arvbox container"
+        echo "restart <config>  stop, then run again"
+        echo "reboot  <config>  stop, build arvbox Docker image, run"
+        echo "reset      delete arvbox arvados data (be careful!)"
+        echo "destroy    delete all arvbox code and data (be careful!)"
+        echo "log <service> tail log of specified service"
+        echo "ls <options>  list directories inside arvbox"
+        echo "cat <files>   get contents of files inside arvbox"
+        echo "pipe       run a bash script piped in from stdin"
+        echo "sv <start|stop|restart> <service> change state of service inside arvbox"
+        echo "clone <from> <to>   clone an arvbox"
+        ;;
+esac
diff --git a/tools/arvbox/lib/arvbox/docker/58118E89F3A912897C070ADBF76221572C52609D.asc b/tools/arvbox/lib/arvbox/docker/58118E89F3A912897C070ADBF76221572C52609D.asc
new file mode 100644 (file)
index 0000000..086bab3
--- /dev/null
@@ -0,0 +1,106 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
+ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
+mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
+TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
+dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
+X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
+HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
+NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
+hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
+65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
+zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
+tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
+Y2tlci5jb20+iQGcBBABCgAGBQJaJYMKAAoJENNu5NUL+WcWfQML/RjicnhN0G28
++Hj3icn/SHYXg8VTHMX7aAuuClZh7GoXlvVlyN0cfRHTcFPkhv1LJ5/zFVwJxlIc
+xX0DlWbv5zlPQQQfNYH7mGCt3OS0QJGDpCM9Q6iw1EqC0CdtBDIZMGn7s9pnuq5C
+3kzer097BltvuXWI+BRMvVad2dhzuOQi76jyxhprTUL6Xwm7ytNSja5Xyigfc8HF
+rXhlQxnMEpWpTttY+En1SaTgGg7/4yB9jG7UqtdaVuAvWI69V+qzJcvgW6do5XwH
+b/5waezxOU033stXcRCYkhEenm+mXzcJYXt2avg1BYIQsZuubCBlpPtZkgWWLOf+
+eQR1Qcy9IdWQsfpH8DX6cEbeiC0xMImcuufI5KDHZQk7E7q8SDbDbk5Dam+2tRef
+eTB2A+MybVQnpsgCvEBNQ2TfcWsZ6uLHMBhesx/+rmyOnpJDTvvCLlkOMTUNPISf
+GJI0IHZFHUJ/+/uRfgIzG6dSqxQ0zHXOwGg4GbhjpQ5I+5Eg2BNRkYkCHAQQAQoA
+BgUCVsO73QAKCRBcs2HlUvsNEB8rD/4t+5uEsqDglXJ8m5dfL88ARHKeFQkW17x7
+zl7ctYHHFSFfP2iajSoAVfe5WN766TsoiHgfBE0HoLK8RRO7fxs9K7Czm6nyxB3Z
+p+YgSUZIS3wqc43jp8gd2dCCQelKIDv5rEFWHuQlyZersK9AJqIggS61ZQwJLcVY
+fUVnIdJdCmUV9haR7vIfrjNP88kqiInZWHy2t8uaB7HFPpxlNYuiJsA0w98rGQuY
+6fWlX71JnBEsgG+L73XAB0fm14QP0VvEB3njBZYlsO2do2B8rh5g51htslK5wqgC
+U61lfjnykSM8yRQbOHvPK7uYdmSF3UXqcP/gjmI9+C8s8UdnMa9rv8b8cFwpEjHu
+xeCmQKYQ/tcLOtRYZ1DIvzxETGH0xbrz6wpKuIMgY7d3xaWdjUf3ylvO0DnlXJ9Y
+r15fYndzDLPSlybIO0GrE+5grHntlSBbMa5BUNozaQ/iQBEUZ/RY+AKxy+U28JJB
+W2Wb0oun6+YdhmwgFyBoSFyp446Kz2P2A1+l/AGhzltc25Vsvwha+lRZfet464yY
+GoNBurTbQWS63JWYFoTkKXmWeS2789mQOQqka3wFXMDzVtXzmxSEbaler7lZbhTj
+wjAAJzp6kdNsPbde4lUIzt6FTdJm0Ivb47hMV4dWKEnFXrYjui0ppUH1RFUU6hyz
+IF8kfxDKO4kCHAQQAQoABgUCV0lgZQAKCRBcs2HlUvsNEHh9EACOm7QH2MGD7gI3
+0VMvapZz4Wfsbda58LFM7G5qPCt10zYfpf0dPJ7tHbHM8N9ENcI7tvH4dTfGsttt
+/uvX9PsiAml6kdfAGxoBRil+76NIHxFWsXSLVDd3hzcnRhc5njimwJa8SDBAp0kx
+v05BVWDvTbZb/b0jdgbqZk2oE0RK8S2Sp1bFkc6fl3pcJYFOQQmelOmXvPmyHOhd
+W2bLX9e1/IulzVf6zgi8dsj9IZ9aLKJY6Cz6VvJ85ML6mLGGwgNvJTLdWqntFFr0
+QqkdM8ZSp9ezWUKo28XGoxDAmo6ENNTLIZjuRlnj1Yr9mmwmf4mgucyqlU93XjCR
+y6u5bpuqoQONRPYCR/UKKk/qoGnYXnhX6AtUD+3JHvrV5mINkd/ad5eR5pviUGz+
+H/VeZqVhMbxxgkm3Gra9+bZ2pCCWboKtqIM7JtXYwks/dttkV5fTqBarJtWzcwO/
+Pv3DreTdnMoVNGzNk/84IeNmGww/iQ1Px0psVCKVPsKxr2RjNhVP7qdA0cTguFNX
+y+hx5Y/JYjSVnxIN74aLoDoeuoBhfYpOY+HiJTaM+pbLfoJr5WUPf/YUQ3qBvgG4
+WXiJUOAgsPmNY//n1MSMyhz1SvmhSXfqCVTb26IyVv0oA3UjLRcKjr18mHB5d9Fr
+NIGVHg8gJjRmXid5BZJZwKQ5niivjokCIgQQAQoADAUCV3uc0wWDB4YfgAAKCRAx
+uBWjAQZ0qe2DEACaq16AaJ2QKtOweqlGk92gQoJ2OCbIW15hW/1660u+X+2CQz8d
+nySXaq22AyBx4Do88b6d54D6TqScyObGJpGroHqAjvyh7v/t/V6oEwe34Ls2qUX2
+77lqfqsz3B0nW/aKZ2oH8ygM3tw0J5y4sAj5bMrxqcwuCs14Fds3v+K2mjsntZCu
+ztHB8mqZp/6v00d0vGGqcl6uVaS04cCQMNUkQ7tGMXlyAEIiH2ksU+/RJLaIqFtg
+klfP3Y7foAY15ymCSQPD9c81+xjbf0XNmBtDreL+rQVtesahU4Pp+Sc23iuXGdY2
+yF13wnGmScojNjM2BoUiffhFeyWBdOTgCFhOEhk0Y1zKrkNqDC0sDAj0B5vhQg/T
+10NLR2MerSk9+MJLHZqFrHXo5f59zUvte/JhtViP5TdO/Yd4ptoEcDspDKLv0FrN
+7xsP8Q6DmBz1doCe06PQS1Z1Sv4UToHRS2RXskUnDc8Cpuex5mDBQO+LV+tNToh4
+ZNcpj9lFHNuaA1qS15X3EVCySZaPyn2WRd6ZisCKtwopRmshVItTTcLmrxu+hHAF
+bVRVFRRSCE8JIZLkWwRyMrcxB2KLBYA+f2nCtD2rqiZ8K8Cr9J1qt2iu5yogCwA/
+ombzzYxWWrt/wD6ixJr5kZwBJZroHB7FkRBcTDIzDFYGBYmClACTvLuOnokCIgQS
+AQoADAUCWKy8/gWDB4YfgAAKCRAkW0txwCm5FmrGD/9lL31LQtn5wxwoZvfEKuMh
+KRw0FDUq59lQpqyMxp7lrZozFUqlH4MLTeEWbFle+R+UbUoVkBnZ/cSvVGwtRVaH
+wUeP9NAqBLtIqt4S0T2T0MW6Ug0DVH7V7uYuFktpv1xmIzcC4gV+LHhp95SPYbWr
+uVMi6ENIMZoEqW9uHOy6n2/nh76dR2NVJiZHt5LbG8YXM/Y+z3XsIenwKQ97YO7x
+yEaM7UdsQSqKVB0isTQXT2wxoA/pDvSyu7jpElD5dOtPPz3r0fQpcQKrq0IMjgcB
+u5X5tQ5uktmmdaAvIwLibUB9A+htFiFP4irSx//Lkn66RLjrSqwtMCsv7wbPvTfc
+fdpcmkR767t1VvjQWj9DBfOMjGJk9eiLkUSHYyQst6ELyVdutAIHRV2GQqfEKJzc
+cD3wKdbaOoABqRVr/ok5Oj0YKSrvk0lW3l8vS/TZXvQppSMdJuaTR8JDy6dGuoKt
+uyFDb0fKf1JU3+Gj3Yy2YEfqX0MjNQsck9pDV647UXXdzF9uh3cYVfPbl+xBYOU9
+d9qRcqMut50AVIxpUepGa4Iw7yOSRPCnPAMNAPSmAdJTaQcRWcUd9LOaZH+ZFLJZ
+mpbvS//jQpoBt++Ir8wl9ZJXICRJcvrQuhCjOSNLFzsNr/wyVLnGwmTjLWoJEA0p
+c0cYtLW6fSGknkvNA7e8LYkCMwQQAQgAHRYhBFI9KC2HD6c70cN9svEo88fgKodF
+BQJZ76NPAAoJEPEo88fgKodFYXwP+wW6F7UpNmKXaddu+aamLTe3uv8OSKUHQbRh
+By1oxfINI7iC+BZl9ycJip0S08JH0F+RZsi1H24+GcP9vGTDgu3z0NcOOD4mPpzM
+jSi2/hbGzh9C84pxRJVLAKrbqCz7YQ6JdNG4RUHW/r0QgKTnTlvikVx7n9QaPrVl
+PsVFU3xv5oQxUHpwNWyvpPGTDiycuaGKekodYhZ0vKzJzfyyaUTgfxvTVVj10jyi
+f+mSfY8YBHhDesgYF1d2CUEPth9z5KC/eDgY7KoWs8ZK6sVL3+tGrnqK/s6jqcsk
+J7Kt4c3k0jU56rUo8+jnu9yUHcBXAjtr1Vz/nwVfqmPzukIF1ZkMqdQqIRtvDyEC
+16yGngMpWEVM3/vIsi2/uUMuGvjEkEmqs2oLK1hf+Y0W6Avq+9fZUQUEk0e4wbpu
+RCqX5OjeQTEEXmAzoMsdAiwFvr1ul+eI/BPy+29OQ77hz3/dotdYYfs1JVkiFUhf
+PJwvpoUOXiA5V56wl3i5tkbRSLRSkLmiLTlCEfClHEK/wwLU4ZKuD5UpW8xL438l
+/Ycnsl7aumnofWoaEREBc1Xbnx9SZbrTT8VctW8XpMVIPxCwJCp/LqHtyEbnptnD
+7QoHtdWexFmQFUIlGaDiaL7nv0BD6RA/HwhVSxU3b3deKDYNpG9QnAzte8KXA9/s
+ejP18gCKiQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIX
+gAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+nAk40
+RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I1WDa
+lRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4SluyMK
+H5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv0C0V
+9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8L5Mx
+VPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzDYBHh
+S8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR7d+b
+NCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxcjk6Y
+1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXPHXIT
+X660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVELMXg2
+UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQTvBR
+8YkCOQQRAQgAIxYhBDlHZ/sRadXUayJzU3Es9wyw8WURBQJaajQrBYMHhh+AAAoJ
+EHEs9wyw8WURDyEP/iD903EcaiZP68IqUBsdHMxOaxnKZD9H2RTBaTjR6r9UjCOf
+bomXpVzL0dMZw1nHIE7u2VT++5wk+QvcN7epBgOWUb6tNcv3nI3vqMGRR+fKW15V
+J1sUwMOKGC4vlbLRVRWd2bb+oPZWeteOxNIqu/8DHDFHg3LtoYxWbrMYHhvd0ben
+B9GvwoqeBaqAeERKYCEoPZRB5O6ZHccX2HacjwFs4uYvIoRg4WI+ODXVHXCgOVZq
+yRuVAuQUjwkLbKL1vxJ01EWzWwRI6cY9mngFXNTHEkoxNyjzlfpn/YWheRiwpwg+
+ymDL4oj1KHNq06zNl38dZCd0rde3OFNuF904H6D+reYL50YA9lkL9mRtlaiYyo1J
+SOOjdr+qxuelfbLgDSeM75YVSiYiZZO8DWr2Cq/SNp47z4T4Il/yhQ6eAstZOIkF
+KQlBjr+ZtLdUu67sPdgPoT842IwSrRTrirEUd6cyADbRggPHrOoYEooBCrCgDYCM
+K1xxG9f6Q42yvL1zWKollibsvJF8MVwgkWfJJyhLYylmJ8osvX9LNdCJZErVrRTz
+wAM00crp/KIiIDCREEgE+5BiuGdM70gSuy3JXSs78JHA4l2tu1mDBrMxNR+C8lpj
+1pnLFHTfGYwHQSwKm42/JZqbePh6LKblUdS5Np1dl0tk5DDHBluRzhx16H7E
+=lwu7
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/tools/arvbox/lib/arvbox/docker/Dockerfile.base b/tools/arvbox/lib/arvbox/docker/Dockerfile.base
new file mode 100644 (file)
index 0000000..162edc9
--- /dev/null
@@ -0,0 +1,111 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM debian:9
+
+ENV DEBIAN_FRONTEND noninteractive
+
+RUN apt-get update && \
+    apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
+    postgresql-9.6 git build-essential runit curl libpq-dev \
+    libcurl4-openssl-dev libssl1.0-dev zlib1g-dev libpcre3-dev \
+    openssh-server python-setuptools netcat-traditional \
+    python-epydoc graphviz bzip2 less sudo virtualenv \
+    libpython-dev fuse libfuse-dev python-pip python-yaml \
+    pkg-config libattr1-dev python-llfuse python-pycurl \
+    libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
+    libjson-perl nginx gitolite3 lsof libreadline-dev \
+    apt-transport-https ca-certificates slurm-wlm \
+    linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
+    libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
+    libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc \
+    python3-setuptools python3-pip && \
+    apt-get clean
+
+ENV RUBYVERSION_MINOR 2.3
+ENV RUBYVERSION 2.3.5
+
+# Install Ruby from source
+RUN cd /tmp && \
+ curl -f http://cache.ruby-lang.org/pub/ruby/${RUBYVERSION_MINOR}/ruby-${RUBYVERSION}.tar.gz | tar -xzf - && \
+ cd ruby-${RUBYVERSION} && \
+ ./configure --disable-install-doc && \
+ make && \
+ make install && \
+ cd /tmp && \
+ rm -rf ruby-${RUBYVERSION}
+
+ENV GEM_HOME /var/lib/gems
+ENV GEM_PATH /var/lib/gems
+ENV PATH $PATH:/var/lib/gems/bin
+
+ENV GOVERSION 1.10.1
+
+# Install golang binary
+RUN curl -f http://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | \
+    tar -C /usr/local -xzf -
+
+ENV PATH ${PATH}:/usr/local/go/bin
+
+VOLUME /var/lib/docker
+VOLUME /var/log/nginx
+VOLUME /etc/ssl/private
+
+ADD 58118E89F3A912897C070ADBF76221572C52609D.asc /tmp/
+RUN apt-key add --no-tty /tmp/58118E89F3A912897C070ADBF76221572C52609D.asc && \
+    rm -f /tmp/58118E89F3A912897C070ADBF76221572C52609D.asc
+
+RUN mkdir -p /etc/apt/sources.list.d && \
+    echo deb https://apt.dockerproject.org/repo debian-stretch main > /etc/apt/sources.list.d/docker.list && \
+    apt-get update && \
+    apt-get -yq --no-install-recommends install docker-engine=17.05.0~ce-0~debian-stretch && \
+    apt-get clean
+
+RUN rm -rf /var/lib/postgresql && mkdir -p /var/lib/postgresql
+
+ENV PJSVERSION=1.9.8
+# bitbucket is the origin, but downloads fail sometimes, so use our own mirror instead.
+#ENV PJSURL=https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-${PJSVERSION}-linux-x86_64.tar.bz2
+ENV PJSURL=http://cache.arvados.org/phantomjs-${PJSVERSION}-linux-x86_64.tar.bz2
+
+RUN set -e && \
+ curl -L -f ${PJSURL} | tar -C /usr/local -xjf - && \
+ ln -s ../phantomjs-${PJSVERSION}-linux-x86_64/bin/phantomjs /usr/local/bin
+
+ENV GDVERSION=v0.23.0
+ENV GDURL=https://github.com/mozilla/geckodriver/releases/download/$GDVERSION/geckodriver-$GDVERSION-linux64.tar.gz
+RUN set -e && curl -L -f ${GDURL} | tar -C /usr/local/bin -xzf - geckodriver
+
+RUN pip install -U setuptools
+
+ENV NODEVERSION v6.11.4
+
+# Install nodejs binary
+RUN curl -L -f https://nodejs.org/dist/${NODEVERSION}/node-${NODEVERSION}-linux-x64.tar.xz | tar -C /usr/local -xJf - && \
+    ln -s ../node-${NODEVERSION}-linux-x64/bin/node ../node-${NODEVERSION}-linux-x64/bin/npm /usr/local/bin
+
+# Set UTF-8 locale
+RUN echo en_US.UTF-8 UTF-8 > /etc/locale.gen && locale-gen
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
+
+ARG arvados_version
+RUN echo arvados_version is git commit $arvados_version
+
+ADD fuse.conf /etc/
+
+ADD crunch-setup.sh gitolite.rc \
+    keep-setup.sh common.sh createusers.sh \
+    logger runsu.sh waitforpostgres.sh \
+    yml_override.py api-setup.sh \
+    go-setup.sh \
+    /usr/local/lib/arvbox/
+
+ADD runit /etc/runit
+
+# Start the supervisor.
+ENV SVDIR /etc/service
+STOPSIGNAL SIGINT
+CMD ["/sbin/runit"]
diff --git a/tools/arvbox/lib/arvbox/docker/Dockerfile.demo b/tools/arvbox/lib/arvbox/docker/Dockerfile.demo
new file mode 100644 (file)
index 0000000..dbfa3f1
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM arvados/arvbox-base
+ARG arvados_version
+ARG sso_version=master
+ARG composer_version=master
+ARG workbench2_version=master
+
+RUN cd /usr/src && \
+    git clone --no-checkout https://github.com/curoverse/arvados.git && \
+    git -C arvados checkout ${arvados_version} && \
+    git clone --no-checkout https://github.com/curoverse/sso-devise-omniauth-provider.git sso && \
+    git -C sso checkout ${sso_version} && \
+    git clone --no-checkout https://github.com/curoverse/composer.git && \
+    git -C composer checkout ${composer_version} && \
+    git clone --no-checkout https://github.com/curoverse/arvados-workbench2.git workbench2 && \
+    git -C workbench2 checkout ${workbench2_version}
+
+ADD service/ /var/lib/arvbox/service
+RUN ln -sf /var/lib/arvbox/service /etc
+RUN mkdir -p /var/lib/arvados
+RUN echo "production" > /var/lib/arvados/api_rails_env
+RUN echo "production" > /var/lib/arvados/sso_rails_env
+RUN echo "production" > /var/lib/arvados/workbench_rails_env
+
+RUN chown -R 1000:1000 /usr/src && /usr/local/lib/arvbox/createusers.sh
+
+RUN sudo -u arvbox /var/lib/arvbox/service/composer/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/workbench2/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/keep-web/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/sso/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/workbench/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/doc/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/vm/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/keepproxy/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/arv-git-httpd/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/crunch-dispatch-local/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/websockets/run-service --only-deps
+RUN sudo -u arvbox /usr/local/lib/arvbox/keep-setup.sh --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/sdk/run-service
diff --git a/tools/arvbox/lib/arvbox/docker/Dockerfile.dev b/tools/arvbox/lib/arvbox/docker/Dockerfile.dev
new file mode 100644 (file)
index 0000000..e6e0397
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM arvados/arvbox-base
+ARG arvados_version
+
+ADD service/ /var/lib/arvbox/service
+RUN ln -sf /var/lib/arvbox/service /etc
+RUN mkdir -p /var/lib/arvados
+RUN echo "development" > /var/lib/arvados/api_rails_env
+RUN echo "development" > /var/lib/arvados/sso_rails_env
+RUN echo "development" > /var/lib/arvados/workbench_rails_env
+
+RUN mkdir /etc/test-service && ln -sf /var/lib/arvbox/service/postgres /etc/test-service
diff --git a/tools/arvbox/lib/arvbox/docker/api-setup.sh b/tools/arvbox/lib/arvbox/docker/api-setup.sh
new file mode 100755 (executable)
index 0000000..0f28383
--- /dev/null
@@ -0,0 +1,101 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/arvados/services/api
+
+if test -s /var/lib/arvados/api_rails_env ; then
+  export RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+  export RAILS_ENV=development
+fi
+
+set -u
+
+if ! test -s /var/lib/arvados/api_uuid_prefix ; then
+    ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/api_uuid_prefix
+fi
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+
+if ! test -s /var/lib/arvados/api_secret_token ; then
+    ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/api_secret_token
+fi
+secret_token=$(cat /var/lib/arvados/api_secret_token)
+
+if ! test -s /var/lib/arvados/blob_signing_key ; then
+    ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/blob_signing_key
+fi
+blob_signing_key=$(cat /var/lib/arvados/blob_signing_key)
+
+if ! test -s /var/lib/arvados/management_token ; then
+    ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/management_token
+fi
+management_token=$(cat /var/lib/arvados/management_token)
+
+sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
+
+if test -s /var/lib/arvados/vm-uuid ; then
+    vm_uuid=$(cat /var/lib/arvados/vm-uuid)
+else
+    vm_uuid=$uuid_prefix-2x53u-$(ruby -e 'puts rand(2**400).to_s(36)[0,15]')
+    echo $vm_uuid > /var/lib/arvados/vm-uuid
+fi
+
+cat >config/application.yml <<EOF
+$RAILS_ENV:
+  uuid_prefix: $uuid_prefix
+  secret_token: $secret_token
+  blob_signing_key: $blob_signing_key
+  sso_app_secret: $sso_app_secret
+  sso_app_id: arvados-server
+  sso_provider_url: "https://$localip:${services[sso]}"
+  sso_insecure: false
+  workbench_address: "https://$localip/"
+  websocket_address: "wss://$localip:${services[websockets-ssl]}/websocket"
+  git_repo_ssh_base: "git@$localip:"
+  git_repo_https_base: "http://$localip:${services[arv-git-httpd]}/"
+  new_users_are_active: true
+  auto_admin_first_user: true
+  auto_setup_new_users: true
+  auto_setup_new_users_with_vm_uuid: $vm_uuid
+  auto_setup_new_users_with_repository: true
+  default_collection_replication: 1
+  docker_image_formats: ["v2"]
+  keep_web_service_url: https://$localip:${services[keep-web-ssl]}/
+  ManagementToken: $management_token
+EOF
+
+(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
+
+if ! test -f /var/lib/arvados/api_database_pw ; then
+    ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/api_database_pw
+fi
+database_pw=$(cat /var/lib/arvados/api_database_pw)
+
+if ! (psql postgres -c "\du" | grep "^ arvados ") >/dev/null ; then
+    psql postgres -c "create user arvados with password '$database_pw'"
+    psql postgres -c "ALTER USER arvados CREATEDB;"
+fi
+
+sed "s/password:.*/password: $database_pw/" <config/database.yml.example >config/database.yml
+
+if ! test -f /var/lib/arvados/api_database_setup ; then
+   bundle exec rake db:setup
+   touch /var/lib/arvados/api_database_setup
+fi
+
+if ! test -s /var/lib/arvados/superuser_token ; then
+    superuser_tok=$(bundle exec ./script/create_superuser_token.rb)
+    echo "$superuser_tok" > /var/lib/arvados/superuser_token
+fi
+
+rm -rf tmp
+mkdir -p tmp/cache
+
+bundle exec rake db:migrate
diff --git a/tools/arvbox/lib/arvbox/docker/common.sh b/tools/arvbox/lib/arvbox/docker/common.sh
new file mode 100644 (file)
index 0000000..bbd11f0
--- /dev/null
@@ -0,0 +1,88 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+
+export PATH=${PATH}:/usr/local/go/bin:/var/lib/gems/bin
+export GEM_HOME=/var/lib/gems
+export GEM_PATH=/var/lib/gems
+export npm_config_cache=/var/lib/npm
+export npm_config_cache_min=Infinity
+export R_LIBS=/var/lib/Rlibs
+
+if test -s /var/run/localip_override ; then
+    localip=$(cat /var/run/localip_override)
+else
+    defaultdev=$(/sbin/ip route|awk '/default/ { print $5 }')
+    localip=$(ip addr show $defaultdev | grep 'inet ' | sed 's/ *inet \(.*\)\/.*/\1/')
+fi
+
+declare -A services
+services=(
+  [workbench]=443
+  [workbench2]=3000
+  [workbench2-ssl]=3001
+  [api]=8004
+  [controller]=8003
+  [controller-ssl]=8000
+  [sso]=8900
+  [composer]=4200
+  [arv-git-httpd]=9001
+  [keep-web]=9003
+  [keep-web-ssl]=9002
+  [keepproxy]=25100
+  [keepstore0]=25107
+  [keepstore1]=25108
+  [ssh]=22
+  [doc]=8001
+  [websockets]=8005
+  [websockets-ssl]=8002
+)
+
+if test "$(id arvbox -u 2>/dev/null)" = 0 ; then
+    PGUSER=postgres
+    PGGROUP=postgres
+else
+    PGUSER=arvbox
+    PGGROUP=arvbox
+fi
+
+run_bundler() {
+    if test -f Gemfile.lock ; then
+        frozen=--frozen
+    else
+        frozen=""
+    fi
+    if ! test -x /var/lib/gems/bin/bundler ; then
+        bundlergem=$(ls -r $GEM_HOME/cache/bundler-*.gem 2>/dev/null | head -n1 || true)
+        if test -n "$bundlergem" ; then
+            flock /var/lib/gems/gems.lock gem install --local --no-document $bundlergem
+        else
+            flock /var/lib/gems/gems.lock gem install --no-document bundler
+        fi
+    fi
+    if ! flock /var/lib/gems/gems.lock bundler install --path $GEM_HOME --local --no-deployment $frozen "$@" ; then
+        flock /var/lib/gems/gems.lock bundler install --path $GEM_HOME --no-deployment $frozen "$@"
+    fi
+}
+
+PYCMD=""
+pip_install() {
+    pushd /var/lib/pip
+    for p in $(ls http*.tar.gz) $(ls http*.tar.bz2) $(ls http*.whl) $(ls http*.zip) ; do
+        if test -f $p ; then
+            ln -sf $p $(echo $p | sed 's/.*%2F\(.*\)/\1/')
+        fi
+    done
+    popd
+
+    if [ "$PYCMD" = "python3" ]; then
+       if ! pip3 install --no-index --find-links /var/lib/pip $1 ; then
+            pip3 install $1
+       fi
+    else
+       if ! pip install --no-index --find-links /var/lib/pip $1 ; then
+            pip install $1
+       fi
+    fi
+}
diff --git a/tools/arvbox/lib/arvbox/docker/createusers.sh b/tools/arvbox/lib/arvbox/docker/createusers.sh
new file mode 100755 (executable)
index 0000000..a4689f0
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e -o pipefail
+
+if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
+    HOSTUID=$(ls -nd /usr/src/arvados | sed 's/ */ /' | cut -d' ' -f4)
+    HOSTGID=$(ls -nd /usr/src/arvados | sed 's/ */ /' | cut -d' ' -f5)
+
+    mkdir -p /var/lib/arvados/git /var/lib/gems \
+          /var/lib/passenger /var/lib/gopath \
+          /var/lib/pip /var/lib/npm
+
+    groupadd --gid $HOSTGID --non-unique arvbox
+    groupadd --gid $HOSTGID --non-unique git
+    useradd --home-dir /var/lib/arvados \
+            --uid $HOSTUID --gid $HOSTGID \
+            --non-unique \
+            --groups docker \
+            --shell /bin/bash \
+            arvbox
+    useradd --home-dir /var/lib/arvados/git --uid $HOSTUID --gid $HOSTGID --non-unique git
+    useradd --groups docker crunch
+
+    chown arvbox:arvbox -R /usr/local /var/lib/arvados /var/lib/gems \
+          /var/lib/passenger /var/lib/postgresql \
+          /var/lib/nginx /var/log/nginx /etc/ssl/private \
+          /var/lib/gopath /var/lib/pip /var/lib/npm
+
+    mkdir -p /var/lib/gems/ruby
+    chown arvbox:arvbox -R /var/lib/gems/ruby
+
+    mkdir -p /tmp/crunch0 /tmp/crunch1
+    chown crunch:crunch -R /tmp/crunch0 /tmp/crunch1
+
+    echo "arvbox    ALL=(crunch) NOPASSWD: ALL" >> /etc/sudoers
+fi
+
+if ! grep "^fuse:" /etc/group >/dev/null 2>/dev/null ; then
+    if test -c /dev/fuse ; then
+        FUSEGID=$(ls -nd /dev/fuse | sed 's/ */ /' | cut -d' ' -f5)
+        groupadd --gid $FUSEGID --non-unique fuse
+        adduser arvbox fuse
+        adduser crunch fuse
+    fi
+fi
diff --git a/tools/arvbox/lib/arvbox/docker/crunch-setup.sh b/tools/arvbox/lib/arvbox/docker/crunch-setup.sh
new file mode 100755 (executable)
index 0000000..a36e589
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunchstat"
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/sdk/go/crunchrunner"
+install $GOPATH/bin/crunchstat $GOPATH/bin/crunchrunner /usr/local/bin
+
+if test -s /var/lib/arvados/api_rails_env ; then
+  RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+  RAILS_ENV=development
+fi
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /usr/src/arvados/services/api/superuser_token)
+export CRUNCH_JOB_BIN=/usr/src/arvados/sdk/cli/bin/crunch-job
+export PERLLIB=/usr/src/arvados/sdk/perl/lib
+export CRUNCH_TMP=/tmp/$1
+export CRUNCH_DISPATCH_LOCKFILE=/var/lock/$1-dispatch
+export CRUNCH_JOB_DOCKER_BIN=docker
+export HOME=/tmp/$1
+export CRUNCH_JOB_DOCKER_RUN_ARGS=--net=host
+# Stop excessive stat of /etc/localtime
+export TZ='America/New_York'
+
+cd /usr/src/arvados/services/api
+if test "$1" = "crunch0" ; then
+    exec bundle exec ./script/crunch-dispatch.rb $RAILS_ENV --jobs --pipelines
+else
+    exec bundle exec ./script/crunch-dispatch.rb $RAILS_ENV --jobs
+fi
diff --git a/tools/arvbox/lib/arvbox/docker/daemon.json b/tools/arvbox/lib/arvbox/docker/daemon.json
new file mode 100644 (file)
index 0000000..a19cbdb
--- /dev/null
@@ -0,0 +1,3 @@
+{
+    "storage-driver": "overlay2"
+}
diff --git a/tools/arvbox/lib/arvbox/docker/fuse.conf b/tools/arvbox/lib/arvbox/docker/fuse.conf
new file mode 100644 (file)
index 0000000..4bfe73d
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+user_allow_other
diff --git a/tools/arvbox/lib/arvbox/docker/gitolite.rc b/tools/arvbox/lib/arvbox/docker/gitolite.rc
new file mode 100644 (file)
index 0000000..07a9ce0
--- /dev/null
@@ -0,0 +1,217 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This is based on the default Gitolite configuration file with the following
+# changes applied as described here:
+# http://doc.arvados.org/install/install-arv-git-httpd.html
+
+# configuration variables for gitolite
+
+# This file is in perl syntax.  But you do NOT need to know perl to edit it --
+# just mind the commas, use single quotes unless you know what you're doing,
+# and make sure the brackets and braces stay matched up!
+
+# (Tip: perl allows a comma after the last item in a list also!)
+
+# HELP for commands can be had by running the command with "-h".
+
+# HELP for all the other FEATURES can be found in the documentation (look for
+# "list of non-core programs shipped with gitolite" in the master index) or
+# directly in the corresponding source file.
+
+my $repo_aliases;
+my $aliases_src = "$ENV{HOME}/.gitolite/arvadosaliases.pl";
+if ($ENV{HOME} && (-e $aliases_src)) {
+    $repo_aliases = do $aliases_src;
+}
+$repo_aliases ||= {};
+
+%RC = (
+
+    REPO_ALIASES => $repo_aliases,
+
+    # ------------------------------------------------------------------
+
+    # default umask gives you perms of '0700'; see the rc file docs for
+    # how/why you might change this
+    UMASK                           =>  0022,
+
+    # look for "git-config" in the documentation
+    GIT_CONFIG_KEYS                 =>  '',
+
+    # comment out if you don't need all the extra detail in the logfile
+    LOG_EXTRA                       =>  1,
+    # logging options
+    # 1. leave this section as is for 'normal' gitolite logging (default)
+    # 2. uncomment this line to log ONLY to syslog:
+    # LOG_DEST                      => 'syslog',
+    # 3. uncomment this line to log to syslog and the normal gitolite log:
+    # LOG_DEST                      => 'syslog,normal',
+    # 4. prefixing "repo-log," to any of the above will **also** log just the
+    #    update records to "gl-log" in the bare repo directory:
+    # LOG_DEST                      => 'repo-log,normal',
+    # LOG_DEST                      => 'repo-log,syslog',
+    # LOG_DEST                      => 'repo-log,syslog,normal',
+
+    # roles.  add more roles (like MANAGER, TESTER, ...) here.
+    #   WARNING: if you make changes to this hash, you MUST run 'gitolite
+    #   compile' afterward, and possibly also 'gitolite trigger POST_COMPILE'
+    ROLES => {
+        READERS                     =>  1,
+        WRITERS                     =>  1,
+    },
+
+    # enable caching (currently only Redis).  PLEASE RTFM BEFORE USING!!!
+    # CACHE                         =>  'Redis',
+
+    # ------------------------------------------------------------------
+
+    # rc variables used by various features
+
+    # the 'info' command prints this as additional info, if it is set
+        # SITE_INFO                 =>  'Please see http://blahblah/gitolite for more help',
+
+    # the CpuTime feature uses these
+        # display user, system, and elapsed times to user after each git operation
+        # DISPLAY_CPU_TIME          =>  1,
+        # display a warning if total CPU times (u, s, cu, cs) crosses this limit
+        # CPU_TIME_WARN_LIMIT       =>  0.1,
+
+    # the Mirroring feature needs this
+        # HOSTNAME                  =>  "foo",
+
+    # TTL for redis cache; PLEASE SEE DOCUMENTATION BEFORE UNCOMMENTING!
+        # CACHE_TTL                 =>  600,
+
+    # ------------------------------------------------------------------
+
+    # suggested locations for site-local gitolite code (see cust.html)
+
+        # this one is managed directly on the server
+        # LOCAL_CODE                =>  "$ENV{HOME}/local",
+
+        # or you can use this, which lets you put everything in a subdirectory
+        # called "local" in your gitolite-admin repo.  For a SECURITY WARNING
+        # on this, see http://gitolite.com/gitolite/non-core.html#pushcode
+        # LOCAL_CODE                =>  "$rc{GL_ADMIN_BASE}/local",
+
+    # ------------------------------------------------------------------
+
+    # List of commands and features to enable
+
+    ENABLE => [
+
+        # COMMANDS
+
+            # These are the commands enabled by default
+            'help',
+            'desc',
+            'info',
+            'perms',
+            'writable',
+
+            # Uncomment or add new commands here.
+            # 'create',
+            # 'fork',
+            # 'mirror',
+            # 'readme',
+            # 'sskm',
+            # 'D',
+
+        # These FEATURES are enabled by default.
+
+            # essential (unless you're using smart-http mode)
+            'ssh-authkeys',
+
+            # creates git-config enties from gitolite.conf file entries like 'config foo.bar = baz'
+            'git-config',
+
+            # creates git-daemon-export-ok files; if you don't use git-daemon, comment this out
+            'daemon',
+
+            # creates projects.list file; if you don't use gitweb, comment this out
+            'gitweb',
+
+        # These FEATURES are disabled by default; uncomment to enable.  If you
+        # need to add new ones, ask on the mailing list :-)
+
+        # user-visible behaviour
+
+            # prevent wild repos auto-create on fetch/clone
+            # 'no-create-on-read',
+            # no auto-create at all (don't forget to enable the 'create' command!)
+            # 'no-auto-create',
+
+            # access a repo by another (possibly legacy) name
+            'Alias',
+
+            # give some users direct shell access.  See documentation in
+            # sts.html for details on the following two choices.
+            # "Shell $ENV{HOME}/.gitolite.shell-users",
+            # 'Shell alice bob',
+
+            # set default roles from lines like 'option default.roles-1 = ...', etc.
+            # 'set-default-roles',
+
+            # show more detailed messages on deny
+            # 'expand-deny-messages',
+
+            # show a message of the day
+            # 'Motd',
+
+        # system admin stuff
+
+            # enable mirroring (don't forget to set the HOSTNAME too!)
+            # 'Mirroring',
+
+            # allow people to submit pub files with more than one key in them
+            # 'ssh-authkeys-split',
+
+            # selective read control hack
+            # 'partial-copy',
+
+            # manage local, gitolite-controlled, copies of read-only upstream repos
+            # 'upstream',
+
+            # updates 'description' file instead of 'gitweb.description' config item
+            # 'cgit',
+
+            # allow repo-specific hooks to be added
+            # 'repo-specific-hooks',
+
+        # performance, logging, monitoring...
+
+            # be nice
+            # 'renice 10',
+
+            # log CPU times (user, system, cumulative user, cumulative system)
+            # 'CpuTime',
+
+        # syntactic_sugar for gitolite.conf and included files
+
+            # allow backslash-escaped continuation lines in gitolite.conf
+            # 'continuation-lines',
+
+            # create implicit user groups from directory names in keydir/
+            # 'keysubdirs-as-groups',
+
+            # allow simple line-oriented macros
+            # 'macros',
+
+        # Kindergarten mode
+
+            # disallow various things that sensible people shouldn't be doing anyway
+            # 'Kindergarten',
+    ],
+
+);
+
+# ------------------------------------------------------------------------------
+# per perl rules, this should be the last line in such a file:
+1;
+
+# Local variables:
+# mode: perl
+# End:
+# vim: set syn=perl:
diff --git a/tools/arvbox/lib/arvbox/docker/go-setup.sh b/tools/arvbox/lib/arvbox/docker/go-setup.sh
new file mode 100644 (file)
index 0000000..f068ce6
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+mkdir -p /var/lib/gopath
+cd /var/lib/gopath
+
+export GOPATH=$PWD
+mkdir -p "$GOPATH/src/git.curoverse.com"
+ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
+
+flock /var/lib/gopath/gopath.lock go get -t github.com/kardianos/govendor
+cd "$GOPATH/src/git.curoverse.com/arvados.git"
+flock /var/lib/gopath/gopath.lock go get -v -d ...
+flock /var/lib/gopath/gopath.lock "$GOPATH/bin/govendor" sync
diff --git a/tools/arvbox/lib/arvbox/docker/keep-setup.sh b/tools/arvbox/lib/arvbox/docker/keep-setup.sh
new file mode 100755 (executable)
index 0000000..f16cb44
--- /dev/null
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+sleep 2
+set -eux -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keepstore"
+install $GOPATH/bin/keepstore /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+mkdir -p /var/lib/arvados/$1
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+set +e
+read -rd $'\000' keepservice <<EOF
+{
+ "service_host":"$localip",
+ "service_port":$2,
+ "service_ssl_flag":false,
+ "service_type":"disk"
+}
+EOF
+set -e
+
+if test -s /var/lib/arvados/$1-uuid ; then
+    keep_uuid=$(cat /var/lib/arvados/$1-uuid)
+    arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
+else
+    UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
+    echo $UUID > /var/lib/arvados/$1-uuid
+fi
+
+management_token=$(cat /var/lib/arvados/management_token)
+
+set +e
+killall -HUP keepproxy
+
+cat >/var/lib/arvados/$1.yml <<EOF
+Listen: ":$2"
+BlobSigningKeyFile: /var/lib/arvados/blob_signing_key
+SystemAuthTokenFile: /var/lib/arvados/superuser_token
+ManagementToken: $management_token
+MaxBuffers: 20
+Volumes:
+  - Type: Directory
+    Root: /var/lib/arvados/$1
+EOF
+
+exec /usr/local/bin/keepstore -config=/var/lib/arvados/$1.yml
diff --git a/tools/arvbox/lib/arvbox/docker/logger b/tools/arvbox/lib/arvbox/docker/logger
new file mode 100755 (executable)
index 0000000..f2f2433
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec svlogd -tt ./main
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/.gitignore b/tools/arvbox/lib/arvbox/docker/runit-docker/.gitignore
new file mode 100644 (file)
index 0000000..bbf313b
--- /dev/null
@@ -0,0 +1,32 @@
+# Object files
+*.o
+*.ko
+*.obj
+*.elf
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Libraries
+*.lib
+*.a
+*.la
+*.lo
+
+# Shared objects (inc. Windows DLLs)
+*.dll
+*.so
+*.so.*
+*.dylib
+
+# Executables
+*.exe
+*.out
+*.app
+*.i*86
+*.x86_64
+*.hex
+
+# Debug files
+*.dSYM/
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/LICENSE b/tools/arvbox/lib/arvbox/docker/runit-docker/LICENSE
new file mode 100644 (file)
index 0000000..d158667
--- /dev/null
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Kosma Moczek
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of runit-docker nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/Makefile b/tools/arvbox/lib/arvbox/docker/runit-docker/Makefile
new file mode 100644 (file)
index 0000000..9a28963
--- /dev/null
@@ -0,0 +1,18 @@
+CFLAGS=-std=c99 -Wall -O2 -fPIC -D_POSIX_SOURCE -D_GNU_SOURCE
+LDLIBS=-ldl
+
+PROGNAME=runit-docker
+
+all: $(PROGNAME).so
+
+%.so: %.c
+       gcc -shared $(CFLAGS) $(LDLIBS) -o $@ $^
+
+install: runit-docker.so
+       mkdir -p $(DESTDIR)/sbin
+       mkdir -p $(DESTDIR)/lib
+       install -m 755 $(PROGNAME) $(DESTDIR)/sbin/
+       install -m 755 $(PROGNAME).so $(DESTDIR)/lib/
+
+clean:
+       $(RM) $(PROGNAME).so
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/README.md b/tools/arvbox/lib/arvbox/docker/runit-docker/README.md
new file mode 100644 (file)
index 0000000..1bcb8cc
--- /dev/null
@@ -0,0 +1,24 @@
+# runit-docker
+
+Docker and `runsvdir` don't quite agree on what each signal means, causing
+TONS of frustration when attempting to use `runsvdir` as init under Docker.
+`runit-docker` is a plug'n'play adapter library which does signal translation
+without the overhead and nuisance of running a nanny process.
+
+## Features
+
+* Pressing Ctrl-C does a clean shutdown.
+* `docker stop` does a clean shutdown.
+
+Under the hood, `runit-docker` translates `SIGTERM` and `SIGINT` to `SIGHUP`.
+
+## Usage
+
+* Build with `make`, install with `make install`.
+* Add `CMD ["/sbin/runit-docker"]` to your `Dockerfile`.
+* Run `debian/rules clean build binary` to build a Debian package.
+
+## Author
+
+runit-docker was written by Kosma Moczek &lt;kosma.moczek@pixers.pl&gt; during a single Scrum
+planning meeting. Damn meetings.
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/changelog b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/changelog
new file mode 100644 (file)
index 0000000..7d8689f
--- /dev/null
@@ -0,0 +1,12 @@
+runit-docker (1.1) unstable; urgency=low
+
+  * Simplify logic.
+  * Install for SIGINT as well.
+
+ -- Kosma Moczek <kosma@kosma.pl>  Mon, 11 May 2015 12:23:59 +0000
+
+runit-docker (1.0) unstable; urgency=low
+
+  * Initial release
+
+ -- Kosma Moczek <kosma@kosma.pl>  Mon, 11 May 2015 12:23:59 +0000
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/compat b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/compat
new file mode 100644 (file)
index 0000000..ec63514
--- /dev/null
@@ -0,0 +1 @@
+9
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/control b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/control
new file mode 100644 (file)
index 0000000..4060915
--- /dev/null
@@ -0,0 +1,14 @@
+Source: runit-docker
+Section: contrib/admin
+Priority: optional
+Maintainer: Kosma Moczek <kosma@kosma.pl>
+Build-Depends: debhelper (>= 9)
+Standards-Version: 3.9.5
+Homepage: https://github.com/kosma/runit-docker
+#Vcs-Git: git://anonscm.debian.org/collab-maint/runit-docker.git
+#Vcs-Browser: http://anonscm.debian.org/?p=collab-maint/runit-docker.git;a=summary
+
+Package: runit-docker
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: painlessly use runit in Docker containers
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/copyright b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/copyright
new file mode 100644 (file)
index 0000000..8679a6a
--- /dev/null
@@ -0,0 +1,31 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: runit-docker
+Source: https://github.com/kosma/runit-docker
+
+Files: *
+Copyright: 2015 Kosma Moczek <kosma@kosma.pl>
+License: MIT
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of runit-docker nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/docs b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/docs
new file mode 100644 (file)
index 0000000..b43bf86
--- /dev/null
@@ -0,0 +1 @@
+README.md
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/rules b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/rules
new file mode 100755 (executable)
index 0000000..ce15cce
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/make -f
+# See debhelper(7) (uncomment to enable)
+# output every command that modifies files on the build system.
+#DH_VERBOSE = 1
+
+# see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/*
+DPKG_EXPORT_BUILDFLAGS = 1
+include /usr/share/dpkg/default.mk
+
+# see FEATURE AREAS in dpkg-buildflags(1)
+#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
+
+# see ENVIRONMENT in dpkg-buildflags(1)
+# package maintainers to append CFLAGS
+#export DEB_CFLAGS_MAINT_APPEND  = -Wall -pedantic
+# package maintainers to append LDFLAGS
+#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
+
+
+# main packaging script based on dh7 syntax
+%:
+       dh $@ 
+
+# debmake generated override targets
+# This is example for Cmake (See http://bugs.debian.org/641051 )
+#override_dh_auto_configure:
+#      dh_auto_configure -- \
+#      -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH)
+
+
+
+
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/debian/source/format b/tools/arvbox/lib/arvbox/docker/runit-docker/debian/source/format
new file mode 100644 (file)
index 0000000..163aaf8
--- /dev/null
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/runit-docker b/tools/arvbox/lib/arvbox/docker/runit-docker/runit-docker
new file mode 100755 (executable)
index 0000000..fdbaad5
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export LD_PRELOAD=/lib/runit-docker.so
+exec runsvdir /etc/service
diff --git a/tools/arvbox/lib/arvbox/docker/runit-docker/runit-docker.c b/tools/arvbox/lib/arvbox/docker/runit-docker/runit-docker.c
new file mode 100644 (file)
index 0000000..825a35f
--- /dev/null
@@ -0,0 +1,32 @@
+#include <signal.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+
+
+int sigaction(int signum, const struct sigaction *act, struct sigaction *oldact)
+{
+  static int (*real_sigaction)(int signum, const struct sigaction *act, struct sigaction *oldact) = NULL;
+
+  // Retrieve the real sigaction we just shadowed.
+  if (real_sigaction == NULL) {
+    real_sigaction = (void *) dlsym(RTLD_NEXT, "sigaction");
+    // Prevent further shadowing in children.
+    unsetenv("LD_PRELOAD");
+  }
+
+  if (signum == SIGTERM) {
+    // Skip this handler, it doesn't do what we want.
+    return 0;
+  }
+
+  if (signum == SIGHUP) {
+    // Install this handler for others as well.
+    real_sigaction(SIGTERM, act, oldact);
+    real_sigaction(SIGINT, act, oldact);
+  }
+
+  // Forward the call the the real sigaction.
+  return real_sigaction(signum, act, oldact);
+}
+
+// vim: ts=2 sw=2 et
diff --git a/tools/arvbox/lib/arvbox/docker/runit/1 b/tools/arvbox/lib/arvbox/docker/runit/1
new file mode 100755 (executable)
index 0000000..35a8b15
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# system one time tasks
+
+PATH=/command:/sbin:/bin:/usr/sbin:/usr/bin
+
+touch /run/runit.stopit
+chmod 0 /run/runit.stopit
diff --git a/tools/arvbox/lib/arvbox/docker/runit/2 b/tools/arvbox/lib/arvbox/docker/runit/2
new file mode 100755 (executable)
index 0000000..5812f3d
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+PATH=/usr/local/bin:/usr/local/sbin:/bin:/sbin:/usr/bin:/usr/sbin:/usr/X11R6/bin
+
+echo
+echo "Arvados-in-a-box starting"
+echo
+
+exec env - PATH=$PATH \
+runsvdir -P $SVDIR
diff --git a/tools/arvbox/lib/arvbox/docker/runit/3 b/tools/arvbox/lib/arvbox/docker/runit/3
new file mode 100755 (executable)
index 0000000..242c035
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+
+PATH=/command:/sbin:/bin:/usr/sbin:/usr/bin
+
+LAST=0
+test -x /run/runit.reboot && LAST=6
+
+echo 'Waiting for services to stop...'
+sv -w196 force-stop /service/*
+sv exit /service/*
+
+echo 'Shutdown...'
+/etc/init.d/rc $LAST
diff --git a/tools/arvbox/lib/arvbox/docker/runit/ctrlaltdel b/tools/arvbox/lib/arvbox/docker/runit/ctrlaltdel
new file mode 100755 (executable)
index 0000000..d4d2190
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+
+PATH=/command:/sbin:/bin:/usr/sbin:/usr/bin
+
+LAST=0
+test -x /run/runit.reboot && LAST=6
+
+echo 'Waiting for services to stop...'
+sv -w196 force-stop $SVDIR/*
+sv exit $SVDIR/*
+
+echo 'Shutdown...'
diff --git a/tools/arvbox/lib/arvbox/docker/runsu.sh b/tools/arvbox/lib/arvbox/docker/runsu.sh
new file mode 100755 (executable)
index 0000000..88d832f
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+HOSTUID=$(ls -nd /usr/src/arvados | sed 's/ */ /' | cut -d' ' -f4)
+HOSTGID=$(ls -nd /usr/src/arvados | sed 's/ */ /' | cut -d' ' -f5)
+
+flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh
+
+export HOME=/var/lib/arvados
+
+chown arvbox /dev/stderr
+
+if test -z "$1" ; then
+    exec chpst -u arvbox:arvbox:docker $0-service
+else
+    exec chpst -u arvbox:arvbox:docker $@
+fi
diff --git a/tools/arvbox/lib/arvbox/docker/service/api/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/api/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/api/log/run b/tools/arvbox/lib/arvbox/docker/service/api/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/api/run b/tools/arvbox/lib/arvbox/docker/service/api/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/api/run-service b/tools/arvbox/lib/arvbox/docker/service/api/run-service
new file mode 100755 (executable)
index 0000000..f052b5d
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/arvados/services/api
+
+if test -s /var/lib/arvados/api_rails_env ; then
+  export RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+  export RAILS_ENV=development
+fi
+
+run_bundler --without=development
+bundle exec passenger-config build-native-support
+bundle exec passenger-config install-standalone-runtime
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+flock /var/lib/arvados/api.lock /usr/local/lib/arvbox/api-setup.sh
+
+set +u
+if test "$1" = "--only-setup" ; then
+    exit
+fi
+
+exec bundle exec passenger start --port=${services[api]}
diff --git a/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/log/run b/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run b/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run-service b/tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run-service
new file mode 100755 (executable)
index 0000000..9339f23
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/arv-git-httpd"
+install $GOPATH/bin/arv-git-httpd /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export PATH="$PATH:/var/lib/arvados/git/bin"
+cd ~git
+
+exec /usr/local/bin/arv-git-httpd \
+    -address=:${services[arv-git-httpd]} \
+    -git-command=/usr/share/gitolite3/gitolite-shell \
+    -gitolite-home=/var/lib/arvados/git \
+    -repo-root=/var/lib/arvados/git/repositories
diff --git a/tools/arvbox/lib/arvbox/docker/service/certificate/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/certificate/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/certificate/log/run b/tools/arvbox/lib/arvbox/docker/service/certificate/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/certificate/run b/tools/arvbox/lib/arvbox/docker/service/certificate/run
new file mode 100755 (executable)
index 0000000..1b062ad
--- /dev/null
@@ -0,0 +1,88 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+if test ! -s /var/lib/arvados/root-cert.pem ; then
+    # req           signing request sub-command
+    # -new          new certificate request
+    # -nodes        "no des" don't encrypt key
+    # -sha256       include sha256 fingerprint
+    # -x509         generate self-signed certificate
+    # -subj         certificate subject
+    # -reqexts      certificate request extension for subjectAltName
+    # -extensions   certificate request extension for subjectAltName
+    # -config       certificate generation configuration plus subjectAltName
+    # -out          certificate output
+    # -keyout       private key output
+    # -days         certificate lifetime
+    openssl req \
+           -new \
+           -nodes \
+           -sha256 \
+           -x509 \
+           -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=arvbox testing root CA for ${uuid_prefix}" \
+           -extensions x509_ext \
+           -config <(cat /etc/ssl/openssl.cnf \
+                         <(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
+            -out /var/lib/arvados/root-cert.pem \
+            -keyout /var/lib/arvados/root-cert.key \
+            -days 365
+    chown arvbox:arvbox /var/lib/arvados/root-cert.*
+fi
+
+if test ! -s /var/lib/arvados/server-cert-${localip}.pem ; then
+
+    if [[ $localip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
+       san=IP:$localip
+    else
+       san=DNS:$localip
+    fi
+
+    # req           signing request sub-command
+    # -new          new certificate request
+    # -nodes        "no des" don't encrypt key
+    # -sha256       include sha256 fingerprint
+    # -subj         certificate subject
+    # -reqexts      certificate request extension for subjectAltName
+    # -extensions   certificate request extension for subjectAltName
+    # -config       certificate generation configuration plus subjectAltName
+    # -out          certificate output
+    # -keyout       private key output
+    # -days         certificate lifetime
+    openssl req \
+           -new \
+           -nodes \
+           -sha256 \
+           -subj "/C=US/ST=MA/O=Arvados testing for ${uuid_prefix}/OU=arvbox/CN=localhost" \
+           -reqexts x509_ext \
+           -extensions x509_ext \
+           -config <(cat /etc/ssl/openssl.cnf \
+                         <(printf "\n[x509_ext]\nkeyUsage=critical,digitalSignature,keyEncipherment\nsubjectAltName=DNS:localhost,$san")) \
+            -out /var/lib/arvados/server-cert-${localip}.csr \
+            -keyout /var/lib/arvados/server-cert-${localip}.key \
+            -days 365
+
+    openssl x509 \
+           -req \
+           -in /var/lib/arvados/server-cert-${localip}.csr \
+           -CA /var/lib/arvados/root-cert.pem \
+           -CAkey /var/lib/arvados/root-cert.key \
+           -out /var/lib/arvados/server-cert-${localip}.pem \
+           -set_serial $RANDOM$RANDOM \
+           -extfile <(cat /etc/ssl/openssl.cnf \
+                         <(printf "\n[x509_ext]\nkeyUsage=critical,digitalSignature,keyEncipherment\nsubjectAltName=DNS:localhost,$san")) \
+           -extensions x509_ext
+
+    chown arvbox:arvbox /var/lib/arvados/server-cert-${localip}.*
+fi
+
+cp /var/lib/arvados/root-cert.pem /usr/local/share/ca-certificates/arvados-testing-cert.crt
+update-ca-certificates
+
+sv stop certificate
diff --git a/tools/arvbox/lib/arvbox/docker/service/composer/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/composer/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/composer/log/run b/tools/arvbox/lib/arvbox/docker/service/composer/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/composer/run b/tools/arvbox/lib/arvbox/docker/service/composer/run
new file mode 100755 (executable)
index 0000000..50a8ce1
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+exec /usr/local/lib/arvbox/runsu.sh $0-service $1
diff --git a/tools/arvbox/lib/arvbox/docker/service/composer/run-service b/tools/arvbox/lib/arvbox/docker/service/composer/run-service
new file mode 100755 (executable)
index 0000000..f00b7f7
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+.  /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/composer
+
+npm -d install --prefix /usr/local --global yarn
+
+yarn install
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+echo "apiEndPoint: https://${localip}:${services[controller-ssl]}" > /usr/src/composer/src/composer.yml
+exec node_modules/.bin/ng serve --host 0.0.0.0 --port 4200 --env=webdev
diff --git a/tools/arvbox/lib/arvbox/docker/service/controller/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/controller/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/controller/log/run b/tools/arvbox/lib/arvbox/docker/service/controller/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/controller/run b/tools/arvbox/lib/arvbox/docker/service/controller/run
new file mode 100755 (executable)
index 0000000..06a9ba7
--- /dev/null
@@ -0,0 +1,56 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/cmd/arvados-server"
+install $GOPATH/bin/arvados-server /usr/local/bin
+(cd /usr/local/bin && ln -sf arvados-server arvados-controller)
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
+database_pw=$(cat /var/lib/arvados/api_database_pw)
+
+if test -s /var/lib/arvados/api_rails_env ; then
+  database_env=$(cat /var/lib/arvados/api_rails_env)
+else
+  database_env=development
+fi
+
+mkdir -p /etc/arvados
+
+cat >/var/lib/arvados/cluster_config.yml <<EOF
+Clusters:
+  ${uuid_prefix}:
+    NodeProfiles:
+      "*":
+        arvados-controller:
+          Listen: ":${services[controller]}" # choose a port
+        arvados-api-server:
+          Listen: ":${services[api]}" # must match Rails server port in your Nginx config
+    PostgreSQL:
+      ConnectionPool: 32 # max concurrent connections per arvados server daemon
+      Connection:
+        # All parameters here are passed to the PG client library in a connection string;
+        # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+        Host: localhost
+        User: arvados
+        Password: ${database_pw}
+        DBName: arvados_${database_env}
+        client_encoding: utf8
+EOF
+
+/usr/local/lib/arvbox/yml_override.py /var/lib/arvados/cluster_config.yml
+
+cp /var/lib/arvados/cluster_config.yml /etc/arvados/config.yml
+
+exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/arvados-controller
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/log/run b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run-service b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run-service
new file mode 100755 (executable)
index 0000000..87c427c
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunch-run"
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunch-dispatch-local"
+install $GOPATH/bin/crunch-run $GOPATH/bin/crunch-dispatch-local /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+cat > /usr/local/bin/crunch-run.sh <<EOF
+#!/bin/sh
+exec /usr/local/bin/crunch-run -container-enable-networking=always -container-network-mode=host \$@
+EOF
+chmod +x /usr/local/bin/crunch-run.sh
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+exec /usr/local/bin/crunch-dispatch-local -crunch-run-command=/usr/local/bin/crunch-run.sh -poll-interval=1
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/log/run b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/run b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/run-service b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch0/run-service
new file mode 100755 (executable)
index 0000000..2b482ec
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec /usr/local/lib/arvbox/crunch-setup.sh crunch0
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/log/run b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/run b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/run-service b/tools/arvbox/lib/arvbox/docker/service/crunch-dispatch1/run-service
new file mode 100755 (executable)
index 0000000..0407fb8
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+sleep 1
+exec /usr/local/lib/arvbox/crunch-setup.sh crunch1
diff --git a/tools/arvbox/lib/arvbox/docker/service/doc/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/doc/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/doc/log/run b/tools/arvbox/lib/arvbox/docker/service/doc/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/doc/run b/tools/arvbox/lib/arvbox/docker/service/doc/run
new file mode 100755 (executable)
index 0000000..e83db3f
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+/usr/local/lib/arvbox/runsu.sh $0-service
+sv stop doc
diff --git a/tools/arvbox/lib/arvbox/docker/service/doc/run-service b/tools/arvbox/lib/arvbox/docker/service/doc/run-service
new file mode 100755 (executable)
index 0000000..ea66cfd
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/arvados/doc
+run_bundler --without=development
+
+cd /usr/src/arvados/sdk/R
+R --quiet --vanilla --file=install_deps.R
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+cd /usr/src/arvados/doc
+bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
diff --git a/tools/arvbox/lib/arvbox/docker/service/docker/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/docker/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/docker/log/run b/tools/arvbox/lib/arvbox/docker/service/docker/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/docker/run b/tools/arvbox/lib/arvbox/docker/service/docker/run
new file mode 100755 (executable)
index 0000000..b6dbaf1
--- /dev/null
@@ -0,0 +1,111 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Taken from https://github.com/jpetazzo/dind
+
+exec 2>&1
+
+# Ensure that all nodes in /dev/mapper correspond to mapped devices currently loaded by the device-mapper kernel driver
+dmsetup mknodes
+
+: {LOG:=stdio}
+
+# First, make sure that cgroups are mounted correctly.
+CGROUP=/sys/fs/cgroup
+[ -d $CGROUP ] || mkdir $CGROUP
+
+if mountpoint -q $CGROUP ; then
+    break
+else
+    mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP
+fi
+
+if ! mountpoint -q $CGROUP ; then
+    echo "Could not find or mount cgroups. Tried /sys/fs/cgroup and /cgroup.  Did you use --privileged?"
+    exit 1
+fi
+
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
+then
+    mount -t securityfs none /sys/kernel/security || {
+        echo "Could not mount /sys/kernel/security."
+        echo "AppArmor detection and --privileged mode might break."
+    }
+fi
+
+# Mount the cgroup hierarchies exactly as they are in the parent system.
+for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
+do
+        [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
+        mountpoint -q $CGROUP/$SUBSYS ||
+                mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
+
+        # The two following sections address a bug which manifests itself
+        # by a cryptic "lxc-start: no ns_cgroup option specified" when
+        # trying to start containers withina container.
+        # The bug seems to appear when the cgroup hierarchies are not
+        # mounted on the exact same directories in the host, and in the
+        # container.
+
+        # Named, control-less cgroups are mounted with "-o name=foo"
+        # (and appear as such under /proc/<pid>/cgroup) but are usually
+        # mounted on a directory named "foo" (without the "name=" prefix).
+        # Systemd and OpenRC (and possibly others) both create such a
+        # cgroup. To avoid the aforementioned bug, we symlink "foo" to
+        # "name=foo". This shouldn't have any adverse effect.
+        echo $SUBSYS | grep -q ^name= && {
+                NAME=$(echo $SUBSYS | sed s/^name=//)
+                ln -s $SUBSYS $CGROUP/$NAME
+        }
+
+        # Likewise, on at least one system, it has been reported that
+        # systemd would mount the CPU and CPU accounting controllers
+        # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
+        # but on a directory called "cpu,cpuacct" (note the inversion
+        # in the order of the groups). This tries to work around it.
+        [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
+done
+
+# Note: as I write those lines, the LXC userland tools cannot setup
+# a "sub-container" properly if the "devices" cgroup is not in its
+# own hierarchy. Let's detect this and issue a warning.
+grep -q :devices: /proc/1/cgroup ||
+       echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
+grep -qw devices /proc/1/cgroup ||
+       echo "WARNING: it looks like the 'devices' cgroup is not mounted."
+
+# Now, close extraneous file descriptors.
+pushd /proc/self/fd >/dev/null
+for FD in *
+do
+       case "$FD" in
+       # Keep stdin/stdout/stderr
+       [012])
+               ;;
+       # Nuke everything else
+       *)
+               eval exec "$FD>&-"
+               ;;
+       esac
+done
+popd >/dev/null
+
+
+# If a pidfile is still around (for example after a container restart),
+# delete it so that docker can start.
+rm -rf /var/run/docker.pid
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
+
+mkdir /etc/docker
+# Prefer overlay2
+echo '{"storage-driver": "overlay2"}' > /etc/docker/daemon.json
+
+if ! dockerd ; then
+    # Oops overlay2 didn't work, let docker choose a default.
+    echo '{}' > /etc/docker/daemon.json
+    dockerd
+fi
diff --git a/tools/arvbox/lib/arvbox/docker/service/gitolite/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/gitolite/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/gitolite/log/run b/tools/arvbox/lib/arvbox/docker/service/gitolite/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/gitolite/run b/tools/arvbox/lib/arvbox/docker/service/gitolite/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/gitolite/run-service b/tools/arvbox/lib/arvbox/docker/service/gitolite/run-service
new file mode 100755 (executable)
index 0000000..6055efc
--- /dev/null
@@ -0,0 +1,124 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+mkdir -p /var/lib/arvados/git
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+export USER=git
+export USERNAME=git
+export LOGNAME=git
+export HOME=/var/lib/arvados/git
+
+cd ~arvbox
+
+mkdir -p ~arvbox/.ssh ~git/.ssh
+chmod 0700 ~arvbox/.ssh ~git/.ssh
+
+if ! test -s ~arvbox/.ssh/id_rsa ; then
+    ssh-keygen -t rsa -P '' -f .ssh/id_rsa
+    cp ~arvbox/.ssh/id_rsa ~arvbox/.ssh/id_rsa.pub ~git/.ssh
+fi
+
+if test -s ~arvbox/.ssh/known_hosts ; then
+    ssh-keygen -f ".ssh/known_hosts" -R localhost
+fi
+
+if ! test -f /var/lib/arvados/gitolite-setup ; then
+    cd ~git
+
+    # Do a no-op login to populate known_hosts
+    # with the hostkey, so it won't try to ask
+    # about it later.
+    cp .ssh/id_rsa.pub .ssh/authorized_keys
+    ssh -o stricthostkeychecking=no git@localhost true
+    rm .ssh/authorized_keys
+
+    cp /usr/local/lib/arvbox/gitolite.rc .gitolite.rc
+
+    gitolite setup -pk .ssh/id_rsa.pub
+
+    if ! test -d gitolite-admin ; then
+        git clone git@localhost:gitolite-admin
+    fi
+
+    cd gitolite-admin
+    git config user.email arvados
+    git config user.name arvados
+    git config push.default simple
+    git push
+
+    touch /var/lib/arvados/gitolite-setup
+else
+    # Do a no-op login to populate known_hosts
+    # with the hostkey, so it won't try to ask
+    # about it later.  Don't run anything,
+    # get the default gitolite behavior.
+    ssh -o stricthostkeychecking=no git@localhost
+fi
+
+prefix=$(arv --format=uuid user current | cut -d- -f1)
+
+if ! test -s /var/lib/arvados/arvados-git-uuid ; then
+    repo_uuid=$(arv --format=uuid repository create --repository "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"arvados\"}")
+    echo $repo_uuid > /var/lib/arvados/arvados-git-uuid
+fi
+
+repo_uuid=$(cat /var/lib/arvados/arvados-git-uuid)
+
+if ! test -s /var/lib/arvados/arvados-git-link-uuid ; then
+    all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"
+
+    set +e
+    read -rd $'\000' newlink <<EOF
+{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF
+    set -e
+    link_uuid=$(arv --format=uuid link create --link "$newlink")
+    echo $link_uuid > /var/lib/arvados/arvados-git-link-uuid
+fi
+
+if ! test -d /var/lib/arvados/git/repositories/$repo_uuid.git ; then
+    git clone --bare /usr/src/arvados /var/lib/arvados/git/repositories/$repo_uuid.git
+else
+    git --git-dir=/var/lib/arvados/git/repositories/$repo_uuid.git fetch -f /usr/src/arvados master:master
+fi
+
+cd /usr/src/arvados/services/api
+
+if test -s /var/lib/arvados/api_rails_env ; then
+  RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+  RAILS_ENV=development
+fi
+
+git_user_key=$(cat ~git/.ssh/id_rsa.pub)
+
+cat > config/arvados-clients.yml <<EOF
+$RAILS_ENV:
+  gitolite_url: /var/lib/arvados/git/repositories/gitolite-admin.git
+  gitolite_tmp: /var/lib/arvados/git
+  arvados_api_host: $localip:${services[controller-ssl]}
+  arvados_api_token: "$ARVADOS_API_TOKEN"
+  arvados_api_host_insecure: false
+  gitolite_arvados_git_user_key: "$git_user_key"
+EOF
+
+while true ; do
+    bundle exec script/arvados-git-sync.rb $RAILS_ENV
+    sleep 120
+done
diff --git a/tools/arvbox/lib/arvbox/docker/service/keep-web/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/keep-web/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/keep-web/log/run b/tools/arvbox/lib/arvbox/docker/service/keep-web/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keep-web/run b/tools/arvbox/lib/arvbox/docker/service/keep-web/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keep-web/run-service b/tools/arvbox/lib/arvbox/docker/service/keep-web/run-service
new file mode 100755 (executable)
index 0000000..b539b6a
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keep-web"
+install $GOPATH/bin/keep-web /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+exec /usr/local/bin/keep-web -trust-all-content -listen=:${services[keep-web]}
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepproxy/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/keepproxy/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepproxy/log/run b/tools/arvbox/lib/arvbox/docker/service/keepproxy/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepproxy/run b/tools/arvbox/lib/arvbox/docker/service/keepproxy/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepproxy/run-service b/tools/arvbox/lib/arvbox/docker/service/keepproxy/run-service
new file mode 100755 (executable)
index 0000000..bf802d4
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+sleep 2
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keepproxy"
+install $GOPATH/bin/keepproxy /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+
+set +e
+read -rd $'\000' keepservice <<EOF
+{
+ "service_host":"$localip",
+ "service_port":${services[keepproxy]},
+ "service_ssl_flag":false,
+ "service_type":"proxy"
+}
+EOF
+set -e
+
+if test -s /var/lib/arvados/keepproxy-uuid ; then
+    keep_uuid=$(cat /var/lib/arvados/keepproxy-uuid)
+    arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
+else
+    UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
+    echo $UUID > /var/lib/arvados/keepproxy-uuid
+fi
+
+exec /usr/local/bin/keepproxy -listen=:${services[keepproxy]}
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore0/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/keepstore0/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore0/log/run b/tools/arvbox/lib/arvbox/docker/service/keepstore0/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore0/run b/tools/arvbox/lib/arvbox/docker/service/keepstore0/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore0/run-service b/tools/arvbox/lib/arvbox/docker/service/keepstore0/run-service
new file mode 100755 (executable)
index 0000000..c7cb7c9
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+. /usr/local/lib/arvbox/common.sh
+exec /usr/local/lib/arvbox/keep-setup.sh keep0 ${services[keepstore0]}
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore1/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/keepstore1/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore1/log/run b/tools/arvbox/lib/arvbox/docker/service/keepstore1/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore1/run b/tools/arvbox/lib/arvbox/docker/service/keepstore1/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/keepstore1/run-service b/tools/arvbox/lib/arvbox/docker/service/keepstore1/run-service
new file mode 100755 (executable)
index 0000000..3511a91
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+. /usr/local/lib/arvbox/common.sh
+exec /usr/local/lib/arvbox/keep-setup.sh keep1 ${services[keepstore1]}
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/nginx/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/log/run b/tools/arvbox/lib/arvbox/docker/service/nginx/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/run b/tools/arvbox/lib/arvbox/docker/service/nginx/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/nginx/run-service b/tools/arvbox/lib/arvbox/docker/service/nginx/run-service
new file mode 100755 (executable)
index 0000000..cf72ed2
--- /dev/null
@@ -0,0 +1,119 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cat <<EOF >/var/lib/arvados/nginx.conf
+worker_processes auto;
+pid /var/lib/arvados/nginx.pid;
+
+error_log stderr;
+daemon off;
+
+events {
+       worker_connections 64;
+}
+
+http {
+     access_log off;
+     include /etc/nginx/mime.types;
+     default_type application/octet-stream;
+     server {
+            listen ${services[doc]} default_server;
+            listen [::]:${services[doc]} default_server;
+            root /usr/src/arvados/doc/.site;
+            index index.html;
+            server_name _;
+     }
+
+  upstream controller {
+    server localhost:${services[controller]};
+  }
+  server {
+    listen *:${services[controller-ssl]} ssl default_server;
+    server_name controller;
+    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+    location  / {
+      proxy_pass http://controller;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+
+upstream arvados-ws {
+  server localhost:${services[websockets]};
+}
+server {
+  listen *:${services[websockets-ssl]} ssl default_server;
+  server_name           websockets;
+
+  proxy_connect_timeout 90s;
+  proxy_read_timeout    300s;
+
+  ssl                   on;
+  ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+  ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+
+  location / {
+    proxy_pass          http://arvados-ws;
+    proxy_set_header    Upgrade         \$http_upgrade;
+    proxy_set_header    Connection      "upgrade";
+    proxy_set_header Host \$http_host;
+    proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+  }
+}
+
+  upstream workbench2 {
+    server localhost:${services[workbench2]};
+  }
+  server {
+    listen *:${services[workbench2-ssl]} ssl default_server;
+    server_name workbench2;
+    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+    location  / {
+      proxy_pass http://workbench2;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+    location  /sockjs-node {
+      proxy_pass http://workbench2;
+      proxy_set_header    Upgrade         \$http_upgrade;
+      proxy_set_header    Connection      "upgrade";
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+    }
+  }
+
+  upstream keep-web {
+    server localhost:${services[keep-web]};
+  }
+  server {
+    listen *:${services[keep-web-ssl]} ssl default_server;
+    server_name keep-web;
+    ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+    ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+    location  / {
+      proxy_pass http://keep-web;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
+
+}
+
+EOF
+
+exec nginx -c /var/lib/arvados/nginx.conf
diff --git a/tools/arvbox/lib/arvbox/docker/service/postgres/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/postgres/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/postgres/log/run b/tools/arvbox/lib/arvbox/docker/service/postgres/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/postgres/run b/tools/arvbox/lib/arvbox/docker/service/postgres/run
new file mode 100755 (executable)
index 0000000..3ef78ee
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh
+
+make-ssl-cert generate-default-snakeoil --force-overwrite
+
+. /usr/local/lib/arvbox/common.sh
+
+chown -R $PGUSER:$PGGROUP /var/lib/postgresql
+chown -R $PGUSER:$PGGROUP /var/run/postgresql
+chown -R $PGUSER:$PGGROUP /etc/postgresql
+chown -R $PGUSER:$PGGROUP /etc/ssl/private
+chmod -R g-r /etc/ssl/private
+
+exec chpst -u $PGUSER:$PGGROUP $0-service
diff --git a/tools/arvbox/lib/arvbox/docker/service/postgres/run-service b/tools/arvbox/lib/arvbox/docker/service/postgres/run-service
new file mode 100755 (executable)
index 0000000..a0771aa
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+PGVERSION=9.6
+
+if ! test -d /var/lib/postgresql/$PGVERSION/main ; then
+    /usr/lib/postgresql/$PGVERSION/bin/initdb --locale=en_US.UTF-8 -D /var/lib/postgresql/$PGVERSION/main
+    sh -c "while ! (psql postgres -c'\du' | grep '^ arvbox ') >/dev/null ; do createuser -s arvbox ; sleep 1 ; done" &
+fi
+mkdir -p /var/run/postgresql/$PGVERSION-main.pg_stat_tmp
+
+rm -f /var/lib/postgresql/$PGVERSION/main/postmaster.pid
+
+exec /usr/lib/postgresql/$PGVERSION/bin/postgres -D /var/lib/postgresql/$PGVERSION/main -c config_file=/etc/postgresql/$PGVERSION/main/postgresql.conf
diff --git a/tools/arvbox/lib/arvbox/docker/service/ready/run b/tools/arvbox/lib/arvbox/docker/service/ready/run
new file mode 100755 (executable)
index 0000000..904476a
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+/usr/local/lib/arvbox/runsu.sh $0-service
+sv stop ready
diff --git a/tools/arvbox/lib/arvbox/docker/service/ready/run-service b/tools/arvbox/lib/arvbox/docker/service/ready/run-service
new file mode 100755 (executable)
index 0000000..470d105
--- /dev/null
@@ -0,0 +1,96 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+. /usr/local/lib/arvbox/common.sh
+
+set -eu -o pipefail
+
+if ! [[ -d /tmp/arvbox-ready ]] ; then
+   echo "Note: if this is a fresh arvbox installation, it may take 10-15 minutes (or longer) to download and"
+   echo "install dependencies.  Use \"arvbox log\" to monitor the progress of specific services."
+   echo
+   mkdir -p /tmp/arvbox-ready
+fi
+
+sleep 3
+
+waiting=""
+
+for s in "${!services[@]}"
+do
+  if ! [[ -f /tmp/arvbox-ready/$s ]] ; then
+    if nc -z localhost ${services[$s]} ; then
+      echo "$s is ready at $localip:${services[$s]}"
+      touch /tmp/arvbox-ready/$s
+    else
+      waiting="$waiting $s"
+    fi
+  fi
+done
+
+if ! docker version >/dev/null 2>/dev/null ; then
+  waiting="$waiting docker"
+fi
+
+for sdk_app in arv arv-get cwl-runner arv-mount ; do
+    if ! which $sdk_app >/dev/null ; then
+        waiting="$waiting sdk"
+        break
+    fi
+done
+
+if ! (ps x | grep -v grep | grep "crunch-dispatch") > /dev/null ; then
+    waiting="$waiting crunch-dispatch"
+fi
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+
+vm_ok=0
+if test -s /var/lib/arvados/vm-uuid -a -s /var/lib/arvados/superuser_token; then
+    vm_uuid=$(cat /var/lib/arvados/vm-uuid)
+    export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+    if (which arv && arv virtual_machine get --uuid $vm_uuid) >/dev/null 2>/dev/null ; then
+        vm_ok=1
+    fi
+fi
+
+if test $vm_ok = 0 ; then
+    waiting="$waiting vm"
+fi
+
+if ! [[ -z "$waiting" ]] ; then
+    if ps x | grep -v grep | grep "bundle install" > /dev/null; then
+        gemcount=$(ls /var/lib/gems/ruby/2.1.0/gems 2>/dev/null | wc -l)
+
+        gemlockcount=0
+        for l in /usr/src/arvados/services/api/Gemfile.lock \
+                     /usr/src/arvados/apps/workbench/Gemfile.lock \
+                     /usr/src/sso/Gemfile.lock ; do
+            gc=$(cat $l \
+                        | grep -vE "(GEM|PLATFORMS|DEPENDENCIES|BUNDLED|GIT|$^|remote:|specs:|revision:)" \
+                        | sed 's/^ *//' | sed 's/(.*)//' | sed 's/ *$//' | sort | uniq | wc -l)
+            gemlockcount=$(($gemlockcount + $gc))
+        done
+        waiting="$waiting (installing ruby gems $gemcount of about $gemlockcount)"
+    fi
+
+    if ps x | grep -v grep | grep "c++.*/var/lib/passenger" > /dev/null ; then
+        waiting="$waiting (compiling passenger)"
+    fi
+
+    if ps x | grep -v grep | grep "pip install" > /dev/null; then
+        waiting="$waiting (installing python packages)"
+    fi
+    echo "    Waiting for$waiting ..."
+    exit 1
+fi
+
+echo
+echo "Your Arvados-in-a-box is ready!"
+echo "Workbench is running at https://$localip"
+echo "Workbench2 is running at https://$localip:${services[workbench2-ssl]}"
+
+rm -r /tmp/arvbox-ready
diff --git a/tools/arvbox/lib/arvbox/docker/service/sdk/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/sdk/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/sdk/log/run b/tools/arvbox/lib/arvbox/docker/service/sdk/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/sdk/run b/tools/arvbox/lib/arvbox/docker/service/sdk/run
new file mode 100755 (executable)
index 0000000..a3cd1d3
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+/usr/local/lib/arvbox/runsu.sh $0-service
+sv stop sdk
diff --git a/tools/arvbox/lib/arvbox/docker/service/sdk/run-service b/tools/arvbox/lib/arvbox/docker/service/sdk/run-service
new file mode 100755 (executable)
index 0000000..da6db36
--- /dev/null
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+mkdir -p ~/.pip /var/lib/pip
+cat > ~/.pip/pip.conf <<EOF
+[global]
+download_cache = /var/lib/pip
+EOF
+
+cd /usr/src/arvados/sdk/cli
+run_bundler --binstubs=$PWD/binstubs
+ln -sf /usr/src/arvados/sdk/cli/binstubs/arv /usr/local/bin/arv
+
+# Need to install the upstream version of pip because the python-pip package
+# shipped with Debian 9 is patched to change behavior in a way that breaks our
+# use case.
+# See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=876145
+# When a non-root user attempts to install system packages, it makes the
+# --ignore-installed flag the default (and there is no way to turn it off),
+# this has the effect of making it very hard to share dependencies shared among
+# multiple packages, because it will blindly install the latest version of each
+# dependency requested by each package, even if a compatible package version is
+# already installed.
+pip_install pip==9.0.3
+
+pip_install wheel
+
+cd /usr/src/arvados/sdk/python
+python setup.py sdist
+pip_install $(ls dist/arvados-python-client-*.tar.gz | tail -n1)
+
+cd /usr/src/arvados/services/fuse
+python setup.py sdist
+pip_install $(ls dist/arvados_fuse-*.tar.gz | tail -n1)
+
+cd /usr/src/arvados/sdk/cwl
+python setup.py sdist
+pip_install $(ls dist/arvados-cwl-runner-*.tar.gz | tail -n1)
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run b/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/run b/tools/arvbox/lib/arvbox/docker/service/slurmctld/run
new file mode 100755 (executable)
index 0000000..bb500a5
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cat > /etc/slurm-llnl/slurm.conf  <<EOF
+ControlMachine=$HOSTNAME
+ControlAddr=$HOSTNAME
+AuthType=auth/munge
+DefaultStorageLoc=/var/log/slurm-llnl
+SelectType=select/cons_res
+SelectTypeParameters=CR_CPU_Memory
+SlurmUser=arvbox
+SlurmdUser=arvbox
+SlurmctldPort=7002
+SlurmctldTimeout=300
+SlurmdPort=7003
+SlurmdSpoolDir=/var/tmp/slurmd.spool
+SlurmdTimeout=300
+StateSaveLocation=/var/tmp/slurm.state
+NodeName=$HOSTNAME
+PartitionName=compute State=UP Default=YES Nodes=$HOSTNAME
+EOF
+
+mkdir -p /var/run/munge
+
+/usr/sbin/munged -f
+
+exec /usr/sbin/slurmctld -v -D
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/log/run b/tools/arvbox/lib/arvbox/docker/service/slurmd/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/run b/tools/arvbox/lib/arvbox/docker/service/slurmd/run
new file mode 100755 (executable)
index 0000000..8656b27
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+exec /usr/local/lib/arvbox/runsu.sh /usr/sbin/slurmd -v -D
diff --git a/tools/arvbox/lib/arvbox/docker/service/ssh/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/ssh/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/ssh/log/run b/tools/arvbox/lib/arvbox/docker/service/ssh/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/ssh/run b/tools/arvbox/lib/arvbox/docker/service/ssh/run
new file mode 100755 (executable)
index 0000000..b1aedaa
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -eux -o pipefail
+
+if ! test -d /var/run/sshd ; then
+   mkdir /var/run/sshd
+   chmod 0755 /var/run/sshd
+fi
+exec /usr/sbin/sshd -D
diff --git a/tools/arvbox/lib/arvbox/docker/service/sso/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/sso/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/sso/log/run b/tools/arvbox/lib/arvbox/docker/service/sso/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/sso/run b/tools/arvbox/lib/arvbox/docker/service/sso/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/sso/run-service b/tools/arvbox/lib/arvbox/docker/service/sso/run-service
new file mode 100755 (executable)
index 0000000..af49d4b
--- /dev/null
@@ -0,0 +1,94 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/sso
+if test -s /var/lib/arvados/sso_rails_env ; then
+  export RAILS_ENV=$(cat /var/lib/arvados/sso_rails_env)
+else
+  export RAILS_ENV=development
+fi
+
+run_bundler --without=development
+bundle exec passenger-config build-native-support
+bundle exec passenger-config install-standalone-runtime
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+set -u
+
+if ! test -s /var/lib/arvados/sso_uuid_prefix ; then
+  ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/sso_uuid_prefix
+fi
+uuid_prefix=$(cat /var/lib/arvados/sso_uuid_prefix)
+
+if ! test -s /var/lib/arvados/sso_secret_token ; then
+  ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_secret_token
+fi
+secret_token=$(cat /var/lib/arvados/sso_secret_token)
+
+test -s /var/lib/arvados/server-cert-${localip}.pem
+
+cat >config/application.yml <<EOF
+$RAILS_ENV:
+  uuid_prefix: $uuid_prefix
+  secret_token: $secret_token
+  default_link_url: "http://$localip"
+  allow_account_registration: true
+EOF
+
+(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
+
+if ! test -f /var/lib/arvados/sso_database_pw ; then
+    ruby -e 'puts rand(2**128).to_s(36)' > /var/lib/arvados/sso_database_pw
+fi
+database_pw=$(cat /var/lib/arvados/sso_database_pw)
+
+if ! (psql postgres -c "\du" | grep "^ arvados_sso ") >/dev/null ; then
+    psql postgres -c "create user arvados_sso with password '$database_pw'"
+    psql postgres -c "ALTER USER arvados_sso CREATEDB;"
+fi
+
+sed "s/password:.*/password: $database_pw/" <config/database.yml.example >config/database.yml
+
+if ! test -f /var/lib/arvados/sso_database_setup ; then
+   bundle exec rake db:setup
+
+   if ! test -s /var/lib/arvados/sso_app_secret ; then
+       ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_app_secret
+   fi
+   app_secret=$(cat /var/lib/arvados/sso_app_secret)
+
+   bundle exec rails console <<EOF
+c = Client.new
+c.name = "joshid"
+c.app_id = "arvados-server"
+c.app_secret = "$app_secret"
+c.save!
+EOF
+
+   touch /var/lib/arvados/sso_database_setup
+fi
+
+rm -rf tmp
+mkdir -p tmp/cache
+
+bundle exec rake assets:precompile
+bundle exec rake db:migrate
+
+set +u
+if test "$1" = "--only-setup" ; then
+    exit
+fi
+
+exec bundle exec passenger start --port=${services[sso]} \
+     --ssl --ssl-certificate=/var/lib/arvados/server-cert-${localip}.pem \
+     --ssl-certificate-key=/var/lib/arvados/server-cert-${localip}.key
diff --git a/tools/arvbox/lib/arvbox/docker/service/vm/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/vm/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/vm/log/run b/tools/arvbox/lib/arvbox/docker/service/vm/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/vm/run b/tools/arvbox/lib/arvbox/docker/service/vm/run
new file mode 100755 (executable)
index 0000000..863de73
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+. /usr/local/lib/arvbox/common.sh
+
+git config --system "credential.http://$localip:${services[arv-git-httpd]}/.username" none
+git config --system "credential.http://$localip:${services[arv-git-httpd]}/.helper" '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'
+
+/usr/local/lib/arvbox/runsu.sh $0-service
+
+cd /usr/src/arvados/services/login-sync
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+export ARVADOS_VIRTUAL_MACHINE_UUID=$(cat /var/lib/arvados/vm-uuid)
+
+while true ; do
+      bundle exec arvados-login-sync
+      sleep 120
+done
diff --git a/tools/arvbox/lib/arvbox/docker/service/vm/run-service b/tools/arvbox/lib/arvbox/docker/service/vm/run-service
new file mode 100755 (executable)
index 0000000..065c557
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+sleep 2
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/arvados/services/login-sync
+run_bundler
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+set -u
+
+export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
+export ARVADOS_VIRTUAL_MACHINE_UUID=$(cat /var/lib/arvados/vm-uuid)
+
+set +e
+read -rd $'\000' vm <<EOF
+{
+ "uuid": "$ARVADOS_VIRTUAL_MACHINE_UUID",
+ "hostname":"$localip"
+}
+EOF
+set -e
+
+if arv virtual_machine get --uuid $ARVADOS_VIRTUAL_MACHINE_UUID ; then
+    arv virtual_machine update --uuid $ARVADOS_VIRTUAL_MACHINE_UUID --virtual-machine "$vm"
+else
+    arv virtual_machine create --virtual-machine "$vm"
+fi
diff --git a/tools/arvbox/lib/arvbox/docker/service/websockets/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/websockets/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/websockets/log/run b/tools/arvbox/lib/arvbox/docker/service/websockets/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/websockets/run b/tools/arvbox/lib/arvbox/docker/service/websockets/run
new file mode 120000 (symlink)
index 0000000..a388c8b
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/websockets/run-service b/tools/arvbox/lib/arvbox/docker/service/websockets/run-service
new file mode 100755 (executable)
index 0000000..cc33032
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+if test -s /var/lib/arvados/api_rails_env ; then
+  RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+  RAILS_ENV=development
+fi
+
+. /usr/local/lib/arvbox/go-setup.sh
+
+flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/ws"
+install $GOPATH/bin/ws /usr/local/bin/arvados-ws
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+database_pw=$(cat /var/lib/arvados/api_database_pw)
+
+cat >/var/lib/arvados/arvados-ws.yml <<EOF
+Client:
+  APIHost: $localip:${services[controller-ssl]}
+  Insecure: false
+Postgres:
+  dbname: arvados_$RAILS_ENV
+  user: arvados
+  password: $database_pw
+  host: localhost
+Listen: localhost:${services[websockets]}
+EOF
+
+exec /usr/local/bin/arvados-ws -config /var/lib/arvados/arvados-ws.yml
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/workbench/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench/log/run b/tools/arvbox/lib/arvbox/docker/service/workbench/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench/run b/tools/arvbox/lib/arvbox/docker/service/workbench/run
new file mode 100755 (executable)
index 0000000..e65801b
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+.  /usr/local/lib/arvbox/common.sh
+
+/usr/local/lib/arvbox/runsu.sh $0-service $1
+
+cd /usr/src/arvados/apps/workbench
+
+rm -rf tmp
+mkdir tmp
+chown arvbox:arvbox tmp
+
+if test -s /var/lib/arvados/workbench_rails_env ; then
+  export RAILS_ENV=$(cat /var/lib/arvados/workbench_rails_env)
+else
+  export RAILS_ENV=development
+fi
+
+if test "$1" != "--only-deps" ; then
+    exec bundle exec passenger start --port=${services[workbench]} \
+        --ssl --ssl-certificate=/var/lib/arvados/server-cert-${localip}.pem \
+        --ssl-certificate-key=/var/lib/arvados/server-cert-${localip}.key \
+         --user arvbox
+fi
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench/run-service b/tools/arvbox/lib/arvbox/docker/service/workbench/run-service
new file mode 100755 (executable)
index 0000000..6f13ee0
--- /dev/null
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+.  /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/arvados/apps/workbench
+
+if test -s /var/lib/arvados/workbench_rails_env ; then
+  export RAILS_ENV=$(cat /var/lib/arvados/workbench_rails_env)
+else
+  export RAILS_ENV=development
+fi
+
+run_bundler --without=development
+bundle exec passenger-config build-native-support
+bundle exec passenger-config install-standalone-runtime
+mkdir -p /usr/src/arvados/apps/workbench/tmp
+RAILS_GROUPS=assets bundle exec rake npm:install
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+set -u
+
+if ! test -s /var/lib/arvados/workbench_secret_token ; then
+  ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/workbench_secret_token
+fi
+secret_token=$(cat /var/lib/arvados/workbench_secret_token)
+
+cat >config/application.yml <<EOF
+$RAILS_ENV:
+  secret_token: $secret_token
+  arvados_login_base: https://$localip:${services[controller-ssl]}/login
+  arvados_v1_base: https://$localip:${services[controller-ssl]}/arvados/v1
+  arvados_insecure_https: false
+  keep_web_download_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
+  keep_web_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
+  arvados_docsite: http://$localip:${services[doc]}/
+  force_ssl: false
+  composer_url: http://$localip:${services[composer]}
+  workbench2_url: https://$localip:${services[workbench2-ssl]}
+EOF
+
+bundle exec rake assets:precompile
+
+(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench2/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/workbench2/log/main/.gitstub
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench2/log/run b/tools/arvbox/lib/arvbox/docker/service/workbench2/log/run
new file mode 120000 (symlink)
index 0000000..d6aef4a
--- /dev/null
@@ -0,0 +1 @@
+/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench2/run b/tools/arvbox/lib/arvbox/docker/service/workbench2/run
new file mode 100755 (executable)
index 0000000..50a8ce1
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+exec /usr/local/lib/arvbox/runsu.sh $0-service $1
diff --git a/tools/arvbox/lib/arvbox/docker/service/workbench2/run-service b/tools/arvbox/lib/arvbox/docker/service/workbench2/run-service
new file mode 100755 (executable)
index 0000000..2dbef4a
--- /dev/null
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+.  /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/workbench2
+
+npm -d install --prefix /usr/local --global yarn
+
+yarn install
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
+
+cat <<EOF > /usr/src/workbench2/public/config.json
+{
+  "API_HOST": "${localip}:${services[controller-ssl]}",
+  "VOCABULARY_URL": "vocabulary-example.json",
+  "FILE_VIEWERS_CONFIG_URL": "file-viewers-example.json"
+}
+EOF
+
+export HTTPS=false
+# Can't use "yarn start", need to run the dev server script
+# directly so that the TERM signal from "sv restart" gets to the
+# right process.
+exec node node_modules/react-scripts-ts/scripts/start.js
diff --git a/tools/arvbox/lib/arvbox/docker/waitforpostgres.sh b/tools/arvbox/lib/arvbox/docker/waitforpostgres.sh
new file mode 100755 (executable)
index 0000000..58f156c
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+while ! psql postgres -c\\du >/dev/null 2>/dev/null ; do
+    sleep 1
+done
diff --git a/tools/arvbox/lib/arvbox/docker/yml_override.py b/tools/arvbox/lib/arvbox/docker/yml_override.py
new file mode 100755 (executable)
index 0000000..b44acf4
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import yaml
+import sys
+
+fn = sys.argv[1]
+
+try:
+    with open(fn+".override") as f:
+        b = yaml.load(f)
+except IOError:
+    exit()
+
+with open(fn) as f:
+    a = yaml.load(f)
+
+def recursiveMerge(a, b):
+    if isinstance(a, dict) and isinstance(b, dict):
+        for k in b:
+            print k
+            a[k] = recursiveMerge(a.get(k), b[k])
+        return a
+    else:
+        return b
+
+with open(fn, "w") as f:
+    yaml.dump(recursiveMerge(a, b), f)
diff --git a/tools/crunchstat-summary/.gitignore b/tools/crunchstat-summary/.gitignore
new file mode 100644 (file)
index 0000000..2247d5f
--- /dev/null
@@ -0,0 +1,2 @@
+/build
+/dist
diff --git a/tools/crunchstat-summary/MANIFEST.in b/tools/crunchstat-summary/MANIFEST.in
new file mode 100644 (file)
index 0000000..764a473
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+include agpl-3.0.txt
+include crunchstat_summary/dygraphs.js
+include crunchstat_summary/synchronizer.js
+include arvados_version.py
\ No newline at end of file
diff --git a/tools/crunchstat-summary/README.rst b/tools/crunchstat-summary/README.rst
new file mode 100644 (file)
index 0000000..fa8e1bd
--- /dev/null
@@ -0,0 +1,5 @@
+.. Copyright (C) The Arvados Authors. All rights reserved.
+..
+.. SPDX-License-Identifier: Apache-2.0
+
+Arvados Crunchstat Summary.
diff --git a/tools/crunchstat-summary/agpl-3.0.txt b/tools/crunchstat-summary/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/tools/crunchstat-summary/arvados_version.py b/tools/crunchstat-summary/arvados_version.py
new file mode 100644 (file)
index 0000000..2e6484c
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import subprocess
+import time
+import os
+import re
+
+def git_latest_tag():
+    gittags = subprocess.check_output(['git', 'tag', '-l']).split()
+    gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
+    return str(next(iter(gittags)).decode('utf-8'))
+
+def git_timestamp_tag():
+    gitinfo = subprocess.check_output(
+        ['git', 'log', '--first-parent', '--max-count=1',
+         '--format=format:%ct', '.']).strip()
+    return str(time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo))))
+
+def save_version(setup_dir, module, v):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'w') as fp:
+      return fp.write("__version__ = '%s'\n" % v)
+
+def read_version(setup_dir, module):
+  with open(os.path.join(setup_dir, module, "_version.py"), 'r') as fp:
+      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+
+def get_version(setup_dir, module):
+    env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
+
+    if env_version:
+        save_version(setup_dir, module, env_version)
+    else:
+        try:
+            save_version(setup_dir, module, git_latest_tag() + git_timestamp_tag())
+        except subprocess.CalledProcessError:
+            pass
+
+    return read_version(setup_dir, module)
diff --git a/tools/crunchstat-summary/bin/crunchstat-summary b/tools/crunchstat-summary/bin/crunchstat-summary
new file mode 100755 (executable)
index 0000000..0ccb898
--- /dev/null
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import print_function
+
+import crunchstat_summary.command
+import crunchstat_summary.summarizer
+import logging
+import sys
+
+logging.getLogger().addHandler(logging.StreamHandler())
+
+args = crunchstat_summary.command.ArgumentParser().parse_args(sys.argv[1:])
+cmd = crunchstat_summary.command.Command(args)
+cmd.run()
+print(cmd.report(), end='')
diff --git a/tools/crunchstat-summary/crunchstat_summary/__init__.py b/tools/crunchstat-summary/crunchstat_summary/__init__.py
new file mode 100644 (file)
index 0000000..9bdf358
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import logging
+
+logger = logging.getLogger(__name__)
+logger.addHandler(logging.NullHandler())
diff --git a/tools/crunchstat-summary/crunchstat_summary/command.py b/tools/crunchstat-summary/crunchstat_summary/command.py
new file mode 100644 (file)
index 0000000..71bf383
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import argparse
+import gzip
+import logging
+import sys
+
+from crunchstat_summary import logger, summarizer
+
+
+class ArgumentParser(argparse.ArgumentParser):
+    def __init__(self):
+        super(ArgumentParser, self).__init__(
+            description='Summarize resource usage of an Arvados Crunch job')
+        src = self.add_mutually_exclusive_group()
+        src.add_argument(
+            '--job', '--container', '--container-request',
+            type=str, metavar='UUID',
+            help='Look up the specified job, container, or container request '
+            'and read its log data from Keep (or from the Arvados event log, '
+            'if the job is still running)')
+        src.add_argument(
+            '--pipeline-instance', type=str, metavar='UUID',
+            help='Summarize each component of the given pipeline instance')
+        src.add_argument(
+            '--log-file', type=str,
+            help='Read log data from a regular file')
+        self.add_argument(
+            '--skip-child-jobs', action='store_true',
+            help='Do not include stats from child jobs/containers')
+        self.add_argument(
+            '--format', type=str, choices=('html', 'text'), default='text',
+            help='Report format')
+        self.add_argument(
+            '--threads', type=int, default=8,
+            help='Maximum worker threads to run')
+        self.add_argument(
+            '--verbose', '-v', action='count', default=0,
+            help='Log more information (once for progress, twice for debug)')
+
+
+class Command(object):
+    def __init__(self, args):
+        self.args = args
+        logger.setLevel(logging.WARNING - 10 * args.verbose)
+
+    def run(self):
+        kwargs = {
+            'skip_child_jobs': self.args.skip_child_jobs,
+            'threads': self.args.threads,
+        }
+        if self.args.pipeline_instance:
+            self.summer = summarizer.NewSummarizer(self.args.pipeline_instance, **kwargs)
+        elif self.args.job:
+            self.summer = summarizer.NewSummarizer(self.args.job, **kwargs)
+        elif self.args.log_file:
+            if self.args.log_file.endswith('.gz'):
+                fh = gzip.open(self.args.log_file)
+            else:
+                fh = open(self.args.log_file)
+            self.summer = summarizer.Summarizer(fh, **kwargs)
+        else:
+            self.summer = summarizer.Summarizer(sys.stdin, **kwargs)
+        return self.summer.run()
+
+    def report(self):
+        if self.args.format == 'html':
+            return self.summer.html_report()
+        elif self.args.format == 'text':
+            return self.summer.text_report()
diff --git a/tools/crunchstat-summary/crunchstat_summary/dygraphs.js b/tools/crunchstat-summary/crunchstat_summary/dygraphs.js
new file mode 100644 (file)
index 0000000..52e5534
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.onload = function() {
+    var charts = {};
+    var fmt = {
+        iso: function(y) {
+            var s='';
+            if (y > 1000000000) { y=y/1000000000; s='G'; }
+            else if (y > 1000000) { y=y/1000000; s='M'; }
+            else if (y > 1000) { y=y/1000; s='K'; }
+            return y.toFixed(2).replace(/\.0+$/, '')+s;
+        },
+        time: function(s) {
+            var ret = ''
+            if (s >= 86400) ret += Math.floor(s/86400) + 'd'
+            if (s >= 3600) ret += Math.floor(s/3600)%24 + 'h'
+            if (s >= 60) ret += Math.floor(s/60)%60 + 'm'
+            ret += Math.floor(s)%60 + 's'
+            // finally, strip trailing zeroes: 1d0m0s -> 1d
+            return ret.replace(/(\D)(0\D)*$/, '$1')
+        },
+    }
+    var ticker = {
+        time: function(min, max, pixels, opts, dg) {
+            var max_ticks = Math.floor(pixels / opts('pixelsPerLabel'))
+            var natural = [1, 5, 10, 30, 60,
+                           120, 300, 600, 1800, 3600,
+                           7200, 14400, 43200, 86400]
+            var interval = natural.shift()
+            while (max>min && (max-min)/interval > max_ticks) {
+                interval = natural.shift() || (interval * 2)
+            }
+            var ticks = []
+            for (var i=Math.ceil(min/interval)*interval; i<=max; i+=interval) {
+                ticks.push({v: i, label: fmt.time(i)})
+            }
+            return ticks
+        },
+    }
+    chartdata.forEach(function(section, section_idx) {
+        var h1 = document.createElement('h1');
+        h1.appendChild(document.createTextNode(section.label));
+        document.body.appendChild(h1);
+        section.charts.forEach(function(chart, chart_idx) {
+            // Skip chart if every series has zero data points
+            if (0 == chart.data.reduce(function(len, series) {
+                return len + series.length;
+            }, 0)) {
+                return;
+            }
+            var id = 'chart-'+section_idx+'-'+chart_idx;
+            var div = document.createElement('div');
+            div.setAttribute('id', id);
+            div.setAttribute('style', 'width: 100%; height: 150px');
+            document.body.appendChild(div);
+            chart.options.valueFormatter = function(y) {
+            }
+            chart.options.axes = {
+                x: {
+                    axisLabelFormatter: fmt.time,
+                    valueFormatter: fmt.time,
+                    ticker: ticker.time,
+                },
+                y: {
+                    axisLabelFormatter: fmt.iso,
+                    valueFormatter: fmt.iso,
+                },
+            }
+            charts[id] = new Dygraph(div, chart.data, chart.options);
+        });
+    });
+
+    var sync = Dygraph.synchronize(Object.values(charts), {range: false});
+
+    if (typeof window.debug === 'undefined')
+        window.debug = {};
+    window.debug.charts = charts;
+};
diff --git a/tools/crunchstat-summary/crunchstat_summary/dygraphs.py b/tools/crunchstat-summary/crunchstat_summary/dygraphs.py
new file mode 100644 (file)
index 0000000..1314e9d
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import crunchstat_summary.webchart
+
+
+class DygraphsChart(crunchstat_summary.webchart.WebChart):
+    CSS = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.css'
+    JSLIB = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.js'
+    JSASSETS = ['synchronizer.js','dygraphs.js']
+
+    def headHTML(self):
+        return '<link rel="stylesheet" href="{}">\n'.format(self.CSS)
+
+    def chartdata(self, label, tasks, stat):
+        return {
+            'data': self._collate_data(tasks, stat),
+            'options': {
+                'connectSeparatedPoints': True,
+                'labels': ['elapsed']+[uuid for uuid, _ in tasks.iteritems()],
+                'title': '{}: {} {}'.format(label, stat[0], stat[1]),
+            },
+        }
+
+    def _collate_data(self, tasks, stat):
+        data = []
+        nulls = []
+        for uuid, task in tasks.iteritems():
+            for pt in task.series[stat]:
+                data.append([pt[0].total_seconds()] + nulls + [pt[1]])
+            nulls.append(None)
+        return sorted(data)
diff --git a/tools/crunchstat-summary/crunchstat_summary/reader.py b/tools/crunchstat-summary/crunchstat_summary/reader.py
new file mode 100644 (file)
index 0000000..311c006
--- /dev/null
@@ -0,0 +1,107 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import print_function
+
+import arvados
+import itertools
+import Queue
+import threading
+
+from crunchstat_summary import logger
+
+
+class CollectionReader(object):
+    def __init__(self, collection_id):
+        self._collection_id = collection_id
+        self._label = collection_id
+        self._readers = []
+
+    def __str__(self):
+        return self._label
+
+    def __iter__(self):
+        logger.debug('load collection %s', self._collection_id)
+        collection = arvados.collection.CollectionReader(self._collection_id)
+        filenames = [filename for filename in collection]
+        # Crunch2 has multiple stats files
+        if len(filenames) > 1:
+            filenames = ['crunchstat.txt', 'arv-mount.txt']
+        for filename in filenames:
+            try:
+                self._readers.append(collection.open(filename))
+            except IOError:
+                logger.warn('Unable to open %s', filename)
+        self._label = "{}/{}".format(self._collection_id, filenames[0])
+        return itertools.chain(*[iter(reader) for reader in self._readers])
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if self._readers:
+            for reader in self._readers:
+                reader.close()
+            self._readers = []
+
+
+class LiveLogReader(object):
+    EOF = None
+
+    def __init__(self, job_uuid):
+        self.job_uuid = job_uuid
+        self.event_types = (['stderr'] if '-8i9sb-' in job_uuid else ['crunchstat', 'arv-mount'])
+        logger.debug('load %s events for job %s', self.event_types, self.job_uuid)
+
+    def __str__(self):
+        return self.job_uuid
+
+    def _get_all_pages(self):
+        got = 0
+        last_id = 0
+        filters = [
+            ['object_uuid', '=', self.job_uuid],
+            ['event_type', 'in', self.event_types]]
+        try:
+            while True:
+                page = arvados.api().logs().index(
+                    limit=1000,
+                    order=['id asc'],
+                    filters=filters + [['id','>',str(last_id)]],
+                    select=['id', 'properties'],
+                ).execute(num_retries=2)
+                got += len(page['items'])
+                logger.debug(
+                    '%s: received %d of %d log events',
+                    self.job_uuid, got,
+                    got + page['items_available'] - len(page['items']))
+                for i in page['items']:
+                    for line in i['properties']['text'].split('\n'):
+                        self._queue.put(line+'\n')
+                    last_id = i['id']
+                if (len(page['items']) == 0 or
+                    len(page['items']) >= page['items_available']):
+                    break
+        finally:
+            self._queue.put(self.EOF)
+
+    def __iter__(self):
+        self._queue = Queue.Queue()
+        self._thread = threading.Thread(target=self._get_all_pages)
+        self._thread.daemon = True
+        self._thread.start()
+        return self
+
+    def next(self):
+        line = self._queue.get()
+        if line is self.EOF:
+            self._thread.join()
+            raise StopIteration
+        return line
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
diff --git a/tools/crunchstat-summary/crunchstat_summary/summarizer.py b/tools/crunchstat-summary/crunchstat_summary/summarizer.py
new file mode 100644 (file)
index 0000000..b2f6f1b
--- /dev/null
@@ -0,0 +1,671 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import print_function
+
+import arvados
+import collections
+import crunchstat_summary.dygraphs
+import crunchstat_summary.reader
+import datetime
+import functools
+import itertools
+import math
+import re
+import sys
+import threading
+import _strptime
+
+from arvados.api import OrderedJsonModel
+from crunchstat_summary import logger
+
+# Recommend memory constraints that are this multiple of an integral
+# number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB
+# that have amounts like 7.5 GiB according to the kernel.)
+AVAILABLE_RAM_RATIO = 0.95
+
+
+# Workaround datetime.datetime.strptime() thread-safety bug by calling
+# it once before starting threads.  https://bugs.python.org/issue7980
+datetime.datetime.strptime('1999-12-31_23:59:59', '%Y-%m-%d_%H:%M:%S')
+
+
+WEBCHART_CLASS = crunchstat_summary.dygraphs.DygraphsChart
+
+
+class Task(object):
+    def __init__(self):
+        self.starttime = None
+        self.finishtime = None
+        self.series = collections.defaultdict(list)
+
+
+class Summarizer(object):
+    def __init__(self, logdata, label=None, skip_child_jobs=False, uuid=None, **kwargs):
+        self._logdata = logdata
+
+        self.uuid = uuid
+        self.label = label
+        self.starttime = None
+        self.finishtime = None
+        self._skip_child_jobs = skip_child_jobs
+
+        # stats_max: {category: {stat: val}}
+        self.stats_max = collections.defaultdict(
+            functools.partial(collections.defaultdict, lambda: 0))
+        # task_stats: {task_id: {category: {stat: val}}}
+        self.task_stats = collections.defaultdict(
+            functools.partial(collections.defaultdict, dict))
+
+        self.seq_to_uuid = {}
+        self.tasks = collections.defaultdict(Task)
+
+        # We won't bother recommending new runtime constraints if the
+        # constraints given when running the job are known to us and
+        # are already suitable.  If applicable, the subclass
+        # constructor will overwrite this with something useful.
+        self.existing_constraints = {}
+
+        logger.debug("%s: logdata %s", self.label, logdata)
+
+    def run(self):
+        logger.debug("%s: parsing logdata %s", self.label, self._logdata)
+        with self._logdata as logdata:
+            self._run(logdata)
+
+    def _run(self, logdata):
+        self.detected_crunch1 = False
+        for line in logdata:
+            if not self.detected_crunch1 and '-8i9sb-' in line:
+                self.detected_crunch1 = True
+
+            if self.detected_crunch1:
+                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
+                if m:
+                    seq = int(m.group('seq'))
+                    uuid = m.group('task_uuid')
+                    self.seq_to_uuid[seq] = uuid
+                    logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
+                    continue
+
+                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
+                if m:
+                    task_id = self.seq_to_uuid[int(m.group('seq'))]
+                    elapsed = int(m.group('elapsed'))
+                    self.task_stats[task_id]['time'] = {'elapsed': elapsed}
+                    if elapsed > self.stats_max['time']['elapsed']:
+                        self.stats_max['time']['elapsed'] = elapsed
+                    continue
+
+                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
+                if m:
+                    uuid = m.group('uuid')
+                    if self._skip_child_jobs:
+                        logger.warning('%s: omitting stats from child job %s'
+                                       ' because --skip-child-jobs flag is on',
+                                       self.label, uuid)
+                        continue
+                    logger.debug('%s: follow %s', self.label, uuid)
+                    child_summarizer = ProcessSummarizer(uuid)
+                    child_summarizer.stats_max = self.stats_max
+                    child_summarizer.task_stats = self.task_stats
+                    child_summarizer.tasks = self.tasks
+                    child_summarizer.starttime = self.starttime
+                    child_summarizer.run()
+                    logger.debug('%s: done %s', self.label, uuid)
+                    continue
+
+                # 2017-12-02_17:15:08 e51c5-8i9sb-mfp68stkxnqdd6m 63676 0 stderr crunchstat: keepcalls 0 put 2576 get -- interval 10.0000 seconds 0 put 2576 get
+                m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr (?P<crunchstat>crunchstat: )(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
+                if not m:
+                    continue
+            else:
+                # crunch2
+                # 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get
+                m = re.search(r'^(?P<timestamp>\S+) (?P<crunchstat>crunchstat: )?(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
+                if not m:
+                    continue
+
+            if self.label is None:
+                try:
+                    self.label = m.group('job_uuid')
+                except IndexError:
+                    self.label = 'container'
+            if m.group('category').endswith(':'):
+                # "stderr crunchstat: notice: ..."
+                continue
+            elif m.group('category') in ('error', 'caught'):
+                continue
+            elif m.group('category') in ('read', 'open', 'cgroup', 'CID', 'Running'):
+                # "stderr crunchstat: read /proc/1234/net/dev: ..."
+                # (old logs are less careful with unprefixed error messages)
+                continue
+
+            if self.detected_crunch1:
+                task_id = self.seq_to_uuid[int(m.group('seq'))]
+            else:
+                task_id = 'container'
+            task = self.tasks[task_id]
+
+            # Use the first and last crunchstat timestamps as
+            # approximations of starttime and finishtime.
+            timestamp = m.group('timestamp')
+            if timestamp[10:11] == '_':
+                timestamp = datetime.datetime.strptime(
+                    timestamp, '%Y-%m-%d_%H:%M:%S')
+            elif timestamp[10:11] == 'T':
+                timestamp = datetime.datetime.strptime(
+                    timestamp[:19], '%Y-%m-%dT%H:%M:%S')
+            else:
+                raise ValueError("Cannot parse timestamp {!r}".format(
+                    timestamp))
+
+            if task.starttime is None:
+                logger.debug('%s: task %s starttime %s',
+                             self.label, task_id, timestamp)
+            if task.starttime is None or timestamp < task.starttime:
+                task.starttime = timestamp
+            if task.finishtime is None or timestamp > task.finishtime:
+                task.finishtime = timestamp
+
+            if self.starttime is None or timestamp < task.starttime:
+                self.starttime = timestamp
+            if self.finishtime is None or timestamp < task.finishtime:
+                self.finishtime = timestamp
+
+            if (not self.detected_crunch1) and task.starttime is not None and task.finishtime is not None:
+                elapsed = (task.finishtime - task.starttime).seconds
+                self.task_stats[task_id]['time'] = {'elapsed': elapsed}
+                if elapsed > self.stats_max['time']['elapsed']:
+                    self.stats_max['time']['elapsed'] = elapsed
+
+            this_interval_s = None
+            for group in ['current', 'interval']:
+                if not m.group(group):
+                    continue
+                category = m.group('category')
+                words = m.group(group).split(' ')
+                stats = {}
+                try:
+                    for val, stat in zip(words[::2], words[1::2]):
+                        if '.' in val:
+                            stats[stat] = float(val)
+                        else:
+                            stats[stat] = int(val)
+                except ValueError as e:
+                    # If the line doesn't start with 'crunchstat:' we
+                    # might have mistaken an error message for a
+                    # structured crunchstat line.
+                    if m.group("crunchstat") is None or m.group("category") == "crunchstat":
+                        logger.warning("%s: log contains message\n  %s", self.label, line)
+                    else:
+                        logger.warning(
+                            '%s: Error parsing value %r (stat %r, category %r): %r',
+                            self.label, val, stat, category, e)
+                        logger.warning('%s', line)
+                    continue
+                if 'user' in stats or 'sys' in stats:
+                    stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
+                if 'tx' in stats or 'rx' in stats:
+                    stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
+                for stat, val in stats.iteritems():
+                    if group == 'interval':
+                        if stat == 'seconds':
+                            this_interval_s = val
+                            continue
+                        elif not (this_interval_s > 0):
+                            logger.error(
+                                "BUG? interval stat given with duration {!r}".
+                                format(this_interval_s))
+                            continue
+                        else:
+                            stat = stat + '__rate'
+                            val = val / this_interval_s
+                            if stat in ['user+sys__rate', 'tx+rx__rate']:
+                                task.series[category, stat].append(
+                                    (timestamp - self.starttime, val))
+                    else:
+                        if stat in ['rss']:
+                            task.series[category, stat].append(
+                                (timestamp - self.starttime, val))
+                        self.task_stats[task_id][category][stat] = val
+                    if val > self.stats_max[category][stat]:
+                        self.stats_max[category][stat] = val
+        logger.debug('%s: done parsing', self.label)
+
+        self.job_tot = collections.defaultdict(
+            functools.partial(collections.defaultdict, int))
+        for task_id, task_stat in self.task_stats.iteritems():
+            for category, stat_last in task_stat.iteritems():
+                for stat, val in stat_last.iteritems():
+                    if stat in ['cpus', 'cache', 'swap', 'rss']:
+                        # meaningless stats like 16 cpu cores x 5 tasks = 80
+                        continue
+                    self.job_tot[category][stat] += val
+        logger.debug('%s: done totals', self.label)
+
+    def long_label(self):
+        label = self.label
+        if hasattr(self, 'process') and self.process['uuid'] not in label:
+            label = '{} ({})'.format(label, self.process['uuid'])
+        if self.finishtime:
+            label += ' -- elapsed time '
+            s = (self.finishtime - self.starttime).total_seconds()
+            if s > 86400:
+                label += '{}d'.format(int(s/86400))
+            if s > 3600:
+                label += '{}h'.format(int(s/3600) % 24)
+            if s > 60:
+                label += '{}m'.format(int(s/60) % 60)
+            label += '{}s'.format(int(s) % 60)
+        return label
+
+    def text_report(self):
+        if not self.tasks:
+            return "(no report generated)\n"
+        return "\n".join(itertools.chain(
+            self._text_report_gen(),
+            self._recommend_gen())) + "\n"
+
+    def html_report(self):
+        return WEBCHART_CLASS(self.label, [self]).html()
+
+    def _text_report_gen(self):
+        yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
+        for category, stat_max in sorted(self.stats_max.iteritems()):
+            for stat, val in sorted(stat_max.iteritems()):
+                if stat.endswith('__rate'):
+                    continue
+                max_rate = self._format(stat_max.get(stat+'__rate', '-'))
+                val = self._format(val)
+                tot = self._format(self.job_tot[category].get(stat, '-'))
+                yield "\t".join([category, stat, str(val), max_rate, tot])
+        for args in (
+                ('Number of tasks: {}',
+                 len(self.tasks),
+                 None),
+                ('Max CPU time spent by a single task: {}s',
+                 self.stats_max['cpu']['user+sys'],
+                 None),
+                ('Max CPU usage in a single interval: {}%',
+                 self.stats_max['cpu']['user+sys__rate'],
+                 lambda x: x * 100),
+                ('Overall CPU usage: {}%',
+                 self.job_tot['cpu']['user+sys'] /
+                 self.job_tot['time']['elapsed']
+                 if self.job_tot['time']['elapsed'] > 0 else 0,
+                 lambda x: x * 100),
+                ('Max memory used by a single task: {}GB',
+                 self.stats_max['mem']['rss'],
+                 lambda x: x / 1e9),
+                ('Max network traffic in a single task: {}GB',
+                 self.stats_max['net:eth0']['tx+rx'] +
+                 self.stats_max['net:keep0']['tx+rx'],
+                 lambda x: x / 1e9),
+                ('Max network speed in a single interval: {}MB/s',
+                 self.stats_max['net:eth0']['tx+rx__rate'] +
+                 self.stats_max['net:keep0']['tx+rx__rate'],
+                 lambda x: x / 1e6),
+                ('Keep cache miss rate {}%',
+                 (float(self.job_tot['keepcache']['miss']) /
+                 float(self.job_tot['keepcalls']['get']))
+                 if self.job_tot['keepcalls']['get'] > 0 else 0,
+                 lambda x: x * 100.0),
+                ('Keep cache utilization {}%',
+                 (float(self.job_tot['blkio:0:0']['read']) /
+                 float(self.job_tot['net:keep0']['rx']))
+                 if self.job_tot['net:keep0']['rx'] > 0 else 0,
+                 lambda x: x * 100.0)):
+            format_string, val, transform = args
+            if val == float('-Inf'):
+                continue
+            if transform:
+                val = transform(val)
+            yield "# "+format_string.format(self._format(val))
+
+    def _recommend_gen(self):
+        return itertools.chain(
+            self._recommend_cpu(),
+            self._recommend_ram(),
+            self._recommend_keep_cache())
+
+    def _recommend_cpu(self):
+        """Recommend asking for 4 cores if max CPU usage was 333%"""
+
+        constraint_key = self._map_runtime_constraint('vcpus')
+        cpu_max_rate = self.stats_max['cpu']['user+sys__rate']
+        if cpu_max_rate == float('-Inf'):
+            logger.warning('%s: no CPU usage data', self.label)
+            return
+        used_cores = max(1, int(math.ceil(cpu_max_rate)))
+        asked_cores = self.existing_constraints.get(constraint_key)
+        if asked_cores is None or used_cores < asked_cores:
+            yield (
+                '#!! {} max CPU usage was {}% -- '
+                'try runtime_constraints "{}":{}'
+            ).format(
+                self.label,
+                int(math.ceil(cpu_max_rate*100)),
+                constraint_key,
+                int(used_cores))
+
+    def _recommend_ram(self):
+        """Recommend an economical RAM constraint for this job.
+
+        Nodes that are advertised as "8 gibibytes" actually have what
+        we might call "8 nearlygibs" of memory available for jobs.
+        Here, we calculate a whole number of nearlygibs that would
+        have sufficed to run the job, then recommend requesting a node
+        with that number of nearlygibs (expressed as mebibytes).
+
+        Requesting a node with "nearly 8 gibibytes" is our best hope
+        of getting a node that actually has nearly 8 gibibytes
+        available.  If the node manager is smart enough to account for
+        the discrepancy itself when choosing/creating a node, we'll
+        get an 8 GiB node with nearly 8 GiB available.  Otherwise, the
+        advertised size of the next-size-smaller node (say, 6 GiB)
+        will be too low to satisfy our request, so we will effectively
+        get rounded up to 8 GiB.
+
+        For example, if we need 7500 MiB, we can ask for 7500 MiB, and
+        we will generally get a node that is advertised as "8 GiB" and
+        has at least 7500 MiB available.  However, asking for 8192 MiB
+        would either result in an unnecessarily expensive 12 GiB node
+        (if node manager knows about the discrepancy), or an 8 GiB
+        node which has less than 8192 MiB available and is therefore
+        considered by crunch-dispatch to be too small to meet our
+        constraint.
+
+        When node manager learns how to predict the available memory
+        for each node type such that crunch-dispatch always agrees
+        that a node is big enough to run the job it was brought up
+        for, all this will be unnecessary.  We'll just ask for exactly
+        the memory we want -- even if that happens to be 8192 MiB.
+        """
+
+        constraint_key = self._map_runtime_constraint('ram')
+        used_bytes = self.stats_max['mem']['rss']
+        if used_bytes == float('-Inf'):
+            logger.warning('%s: no memory usage data', self.label)
+            return
+        used_mib = math.ceil(float(used_bytes) / 1048576)
+        asked_mib = self.existing_constraints.get(constraint_key)
+
+        nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024
+        if asked_mib is None or (
+                math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib)):
+            yield (
+                '#!! {} max RSS was {} MiB -- '
+                'try runtime_constraints "{}":{}'
+            ).format(
+                self.label,
+                int(used_mib),
+                constraint_key,
+                int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024*(2**20)/self._runtime_constraint_mem_unit()))
+
+    def _recommend_keep_cache(self):
+        """Recommend increasing keep cache if utilization < 80%"""
+        constraint_key = self._map_runtime_constraint('keep_cache_ram')
+        if self.job_tot['net:keep0']['rx'] == 0:
+            return
+        utilization = (float(self.job_tot['blkio:0:0']['read']) /
+                       float(self.job_tot['net:keep0']['rx']))
+        asked_mib = self.existing_constraints.get(constraint_key, 256)
+
+        if utilization < 0.8:
+            yield (
+                '#!! {} Keep cache utilization was {:.2f}% -- '
+                'try runtime_constraints "{}":{} (or more)'
+            ).format(
+                self.label,
+                utilization * 100.0,
+                constraint_key,
+                asked_mib*2*(2**20)/self._runtime_constraint_mem_unit())
+
+
+    def _format(self, val):
+        """Return a string representation of a stat.
+
+        {:.2f} for floats, default format for everything else."""
+        if isinstance(val, float):
+            return '{:.2f}'.format(val)
+        else:
+            return '{}'.format(val)
+
+    def _runtime_constraint_mem_unit(self):
+        if hasattr(self, 'runtime_constraint_mem_unit'):
+            return self.runtime_constraint_mem_unit
+        elif self.detected_crunch1:
+            return JobSummarizer.runtime_constraint_mem_unit
+        else:
+            return ContainerSummarizer.runtime_constraint_mem_unit
+
+    def _map_runtime_constraint(self, key):
+        if hasattr(self, 'map_runtime_constraint'):
+            return self.map_runtime_constraint[key]
+        elif self.detected_crunch1:
+            return JobSummarizer.map_runtime_constraint[key]
+        else:
+            return key
+
+
+class CollectionSummarizer(Summarizer):
+    def __init__(self, collection_id, **kwargs):
+        super(CollectionSummarizer, self).__init__(
+            crunchstat_summary.reader.CollectionReader(collection_id), **kwargs)
+        self.label = collection_id
+
+
+def NewSummarizer(process_or_uuid, **kwargs):
+    """Construct with the appropriate subclass for this uuid/object."""
+
+    if isinstance(process_or_uuid, dict):
+        process = process_or_uuid
+        uuid = process['uuid']
+    else:
+        uuid = process_or_uuid
+        process = None
+        arv = arvados.api('v1', model=OrderedJsonModel())
+
+    if '-dz642-' in uuid:
+        if process is None:
+            process = arv.containers().get(uuid=uuid).execute()
+        klass = ContainerTreeSummarizer
+    elif '-xvhdp-' in uuid:
+        if process is None:
+            process = arv.container_requests().get(uuid=uuid).execute()
+        klass = ContainerTreeSummarizer
+    elif '-8i9sb-' in uuid:
+        if process is None:
+            process = arv.jobs().get(uuid=uuid).execute()
+        klass = JobTreeSummarizer
+    elif '-d1hrv-' in uuid:
+        if process is None:
+            process = arv.pipeline_instances().get(uuid=uuid).execute()
+        klass = PipelineSummarizer
+    elif '-4zz18-' in uuid:
+        return CollectionSummarizer(collection_id=uuid)
+    else:
+        raise ArgumentError("Unrecognized uuid %s", uuid)
+    return klass(process, uuid=uuid, **kwargs)
+
+
+class ProcessSummarizer(Summarizer):
+    """Process is a job, pipeline, container, or container request."""
+
+    def __init__(self, process, label=None, **kwargs):
+        rdr = None
+        self.process = process
+        if label is None:
+            label = self.process.get('name', self.process['uuid'])
+        if self.process.get('log'):
+            try:
+                rdr = crunchstat_summary.reader.CollectionReader(self.process['log'])
+            except arvados.errors.NotFoundError as e:
+                logger.warning("Trying event logs after failing to read "
+                               "log collection %s: %s", self.process['log'], e)
+        if rdr is None:
+            rdr = crunchstat_summary.reader.LiveLogReader(self.process['uuid'])
+            label = label + ' (partial)'
+        super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)
+        self.existing_constraints = self.process.get('runtime_constraints', {})
+
+
+class JobSummarizer(ProcessSummarizer):
+    runtime_constraint_mem_unit = 1048576
+    map_runtime_constraint = {
+        'keep_cache_ram': 'keep_cache_mb_per_task',
+        'ram': 'min_ram_mb_per_node',
+        'vcpus': 'min_cores_per_node',
+    }
+
+
+class ContainerSummarizer(ProcessSummarizer):
+    runtime_constraint_mem_unit = 1
+
+
+class MultiSummarizer(object):
+    def __init__(self, children={}, label=None, threads=1, **kwargs):
+        self.throttle = threading.Semaphore(threads)
+        self.children = children
+        self.label = label
+
+    def run_and_release(self, target, *args, **kwargs):
+        try:
+            return target(*args, **kwargs)
+        finally:
+            self.throttle.release()
+
+    def run(self):
+        threads = []
+        for child in self.children.itervalues():
+            self.throttle.acquire()
+            t = threading.Thread(target=self.run_and_release, args=(child.run, ))
+            t.daemon = True
+            t.start()
+            threads.append(t)
+        for t in threads:
+            t.join()
+
+    def text_report(self):
+        txt = ''
+        d = self._descendants()
+        for child in d.itervalues():
+            if len(d) > 1:
+                txt += '### Summary for {} ({})\n'.format(
+                    child.label, child.process['uuid'])
+            txt += child.text_report()
+            txt += '\n'
+        return txt
+
+    def _descendants(self):
+        """Dict of self and all descendants.
+
+        Nodes with nothing of their own to report (like
+        MultiSummarizers) are omitted.
+        """
+        d = collections.OrderedDict()
+        for key, child in self.children.iteritems():
+            if isinstance(child, Summarizer):
+                d[key] = child
+            if isinstance(child, MultiSummarizer):
+                d.update(child._descendants())
+        return d
+
+    def html_report(self):
+        return WEBCHART_CLASS(self.label, self._descendants().itervalues()).html()
+
+
+class JobTreeSummarizer(MultiSummarizer):
+    """Summarizes a job and all children listed in its components field."""
+    def __init__(self, job, label=None, **kwargs):
+        arv = arvados.api('v1', model=OrderedJsonModel())
+        label = label or job.get('name', job['uuid'])
+        children = collections.OrderedDict()
+        children[job['uuid']] = JobSummarizer(job, label=label, **kwargs)
+        if job.get('components', None):
+            preloaded = {}
+            for j in arv.jobs().index(
+                    limit=len(job['components']),
+                    filters=[['uuid','in',job['components'].values()]]).execute()['items']:
+                preloaded[j['uuid']] = j
+            for cname in sorted(job['components'].keys()):
+                child_uuid = job['components'][cname]
+                j = (preloaded.get(child_uuid) or
+                     arv.jobs().get(uuid=child_uuid).execute())
+                children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs)
+
+        super(JobTreeSummarizer, self).__init__(
+            children=children,
+            label=label,
+            **kwargs)
+
+
+class PipelineSummarizer(MultiSummarizer):
+    def __init__(self, instance, **kwargs):
+        children = collections.OrderedDict()
+        for cname, component in instance['components'].iteritems():
+            if 'job' not in component:
+                logger.warning(
+                    "%s: skipping component with no job assigned", cname)
+            else:
+                logger.info(
+                    "%s: job %s", cname, component['job']['uuid'])
+                summarizer = JobTreeSummarizer(component['job'], label=cname, **kwargs)
+                summarizer.label = '{} {}'.format(
+                    cname, component['job']['uuid'])
+                children[cname] = summarizer
+        super(PipelineSummarizer, self).__init__(
+            children=children,
+            label=instance['uuid'],
+            **kwargs)
+
+
+class ContainerTreeSummarizer(MultiSummarizer):
+    def __init__(self, root, skip_child_jobs=False, **kwargs):
+        arv = arvados.api('v1', model=OrderedJsonModel())
+
+        label = kwargs.pop('label', None) or root.get('name') or root['uuid']
+        root['name'] = label
+
+        children = collections.OrderedDict()
+        todo = collections.deque((root, ))
+        while len(todo) > 0:
+            current = todo.popleft()
+            label = current['name']
+            sort_key = current['created_at']
+            if current['uuid'].find('-xvhdp-') > 0:
+                current = arv.containers().get(uuid=current['container_uuid']).execute()
+
+            summer = ContainerSummarizer(current, label=label, **kwargs)
+            summer.sort_key = sort_key
+            children[current['uuid']] = summer
+
+            page_filters = []
+            while True:
+                child_crs = arv.container_requests().index(
+                    order=['uuid asc'],
+                    filters=page_filters+[
+                        ['requesting_container_uuid', '=', current['uuid']]],
+                ).execute()
+                if not child_crs['items']:
+                    break
+                elif skip_child_jobs:
+                    logger.warning('%s: omitting stats from %d child containers'
+                                   ' because --skip-child-jobs flag is on',
+                                   label, child_crs['items_available'])
+                    break
+                page_filters = [['uuid', '>', child_crs['items'][-1]['uuid']]]
+                for cr in child_crs['items']:
+                    if cr['container_uuid']:
+                        logger.debug('%s: container req %s', current['uuid'], cr['uuid'])
+                        cr['name'] = cr.get('name') or cr['uuid']
+                        todo.append(cr)
+        sorted_children = collections.OrderedDict()
+        for uuid in sorted(children.keys(), key=lambda uuid: children[uuid].sort_key):
+            sorted_children[uuid] = children[uuid]
+        super(ContainerTreeSummarizer, self).__init__(
+            children=sorted_children,
+            label=root['name'],
+            **kwargs)
diff --git a/tools/crunchstat-summary/crunchstat_summary/synchronizer.js b/tools/crunchstat-summary/crunchstat_summary/synchronizer.js
new file mode 100644 (file)
index 0000000..562ee83
--- /dev/null
@@ -0,0 +1,276 @@
+// Copyright (c) 2009 Dan Vanderkam. All rights reserved.
+//
+// SPDX-License-Identifier: MIT
+
+/**
+ * Synchronize zooming and/or selections between a set of dygraphs.
+ *
+ * Usage:
+ *
+ *   var g1 = new Dygraph(...),
+ *       g2 = new Dygraph(...),
+ *       ...;
+ *   var sync = Dygraph.synchronize(g1, g2, ...);
+ *   // charts are now synchronized
+ *   sync.detach();
+ *   // charts are no longer synchronized
+ *
+ * You can set options using the last parameter, for example:
+ *
+ *   var sync = Dygraph.synchronize(g1, g2, g3, {
+ *      selection: true,
+ *      zoom: true
+ *   });
+ *
+ * The default is to synchronize both of these.
+ *
+ * Instead of passing one Dygraph object as each parameter, you may also pass an
+ * array of dygraphs:
+ *
+ *   var sync = Dygraph.synchronize([g1, g2, g3], {
+ *      selection: false,
+ *      zoom: true
+ *   });
+ *
+ * You may also set `range: false` if you wish to only sync the x-axis.
+ * The `range` option has no effect unless `zoom` is true (the default).
+ *
+ * Original source: https://github.com/danvk/dygraphs/blob/master/src/extras/synchronizer.js
+ * at commit b55a71d768d2f8de62877c32b3aec9e9975ac389
+ *
+ * Copyright (c) 2009 Dan Vanderkam
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+(function() {
+/* global Dygraph:false */
+'use strict';
+
+var Dygraph;
+if (window.Dygraph) {
+  Dygraph = window.Dygraph;
+} else if (typeof(module) !== 'undefined') {
+  Dygraph = require('../dygraph');
+}
+
+var synchronize = function(/* dygraphs..., opts */) {
+  if (arguments.length === 0) {
+    throw 'Invalid invocation of Dygraph.synchronize(). Need >= 1 argument.';
+  }
+
+  var OPTIONS = ['selection', 'zoom', 'range'];
+  var opts = {
+    selection: true,
+    zoom: true,
+    range: true
+  };
+  var dygraphs = [];
+  var prevCallbacks = [];
+
+  var parseOpts = function(obj) {
+    if (!(obj instanceof Object)) {
+      throw 'Last argument must be either Dygraph or Object.';
+    } else {
+      for (var i = 0; i < OPTIONS.length; i++) {
+        var optName = OPTIONS[i];
+        if (obj.hasOwnProperty(optName)) opts[optName] = obj[optName];
+      }
+    }
+  };
+
+  if (arguments[0] instanceof Dygraph) {
+    // Arguments are Dygraph objects.
+    for (var i = 0; i < arguments.length; i++) {
+      if (arguments[i] instanceof Dygraph) {
+        dygraphs.push(arguments[i]);
+      } else {
+        break;
+      }
+    }
+    if (i < arguments.length - 1) {
+      throw 'Invalid invocation of Dygraph.synchronize(). ' +
+            'All but the last argument must be Dygraph objects.';
+    } else if (i == arguments.length - 1) {
+      parseOpts(arguments[arguments.length - 1]);
+    }
+  } else if (arguments[0].length) {
+    // Invoked w/ list of dygraphs, options
+    for (var i = 0; i < arguments[0].length; i++) {
+      dygraphs.push(arguments[0][i]);
+    }
+    if (arguments.length == 2) {
+      parseOpts(arguments[1]);
+    } else if (arguments.length > 2) {
+      throw 'Invalid invocation of Dygraph.synchronize(). ' +
+            'Expected two arguments: array and optional options argument.';
+    }  // otherwise arguments.length == 1, which is fine.
+  } else {
+    throw 'Invalid invocation of Dygraph.synchronize(). ' +
+          'First parameter must be either Dygraph or list of Dygraphs.';
+  }
+
+  if (dygraphs.length < 2) {
+    throw 'Invalid invocation of Dygraph.synchronize(). ' +
+          'Need two or more dygraphs to synchronize.';
+  }
+
+  var readycount = dygraphs.length;
+  for (var i = 0; i < dygraphs.length; i++) {
+    var g = dygraphs[i];
+    g.ready( function() {
+      if (--readycount == 0) {
+        // store original callbacks
+        var callBackTypes = ['drawCallback', 'highlightCallback', 'unhighlightCallback'];
+        for (var j = 0; j < dygraphs.length; j++) {
+          if (!prevCallbacks[j]) {
+            prevCallbacks[j] = {};
+          }
+          for (var k = callBackTypes.length - 1; k >= 0; k--) {
+            prevCallbacks[j][callBackTypes[k]] = dygraphs[j].getFunctionOption(callBackTypes[k]);
+          }
+        }
+
+        // Listen for draw, highlight, unhighlight callbacks.
+        if (opts.zoom) {
+          attachZoomHandlers(dygraphs, opts, prevCallbacks);
+        }
+
+        if (opts.selection) {
+          attachSelectionHandlers(dygraphs, prevCallbacks);
+        }
+      }
+    });
+  }
+
+  return {
+    detach: function() {
+      for (var i = 0; i < dygraphs.length; i++) {
+        var g = dygraphs[i];
+        if (opts.zoom) {
+          g.updateOptions({drawCallback: prevCallbacks[i].drawCallback});
+        }
+        if (opts.selection) {
+          g.updateOptions({
+            highlightCallback: prevCallbacks[i].highlightCallback,
+            unhighlightCallback: prevCallbacks[i].unhighlightCallback
+          });
+        }
+      }
+      // release references & make subsequent calls throw.
+      dygraphs = null;
+      opts = null;
+      prevCallbacks = null;
+    }
+  };
+};
+
+function arraysAreEqual(a, b) {
+  if (!Array.isArray(a) || !Array.isArray(b)) return false;
+  var i = a.length;
+  if (i !== b.length) return false;
+  while (i--) {
+    if (a[i] !== b[i]) return false;
+  }
+  return true;
+}
+
+function attachZoomHandlers(gs, syncOpts, prevCallbacks) {
+  var block = false;
+  for (var i = 0; i < gs.length; i++) {
+    var g = gs[i];
+    g.updateOptions({
+      drawCallback: function(me, initial) {
+        if (block || initial) return;
+        block = true;
+        var opts = {
+          dateWindow: me.xAxisRange()
+        };
+        if (syncOpts.range) opts.valueRange = me.yAxisRange();
+
+        for (var j = 0; j < gs.length; j++) {
+          if (gs[j] == me) {
+            if (prevCallbacks[j] && prevCallbacks[j].drawCallback) {
+              prevCallbacks[j].drawCallback.apply(this, arguments);
+            }
+            continue;
+          }
+
+          // Only redraw if there are new options
+          if (arraysAreEqual(opts.dateWindow, gs[j].getOption('dateWindow')) && 
+              arraysAreEqual(opts.valueRange, gs[j].getOption('valueRange'))) {
+            continue;
+          }
+
+          gs[j].updateOptions(opts);
+        }
+        block = false;
+      }
+    }, true /* no need to redraw */);
+  }
+}
+
+function attachSelectionHandlers(gs, prevCallbacks) {
+  var block = false;
+  for (var i = 0; i < gs.length; i++) {
+    var g = gs[i];
+
+    g.updateOptions({
+      highlightCallback: function(event, x, points, row, seriesName) {
+        if (block) return;
+        block = true;
+        var me = this;
+        for (var i = 0; i < gs.length; i++) {
+          if (me == gs[i]) {
+            if (prevCallbacks[i] && prevCallbacks[i].highlightCallback) {
+              prevCallbacks[i].highlightCallback.apply(this, arguments);
+            }
+            continue;
+          }
+          var idx = gs[i].getRowForX(x);
+          if (idx !== null) {
+            gs[i].setSelection(idx, seriesName);
+          }
+        }
+        block = false;
+      },
+      unhighlightCallback: function(event) {
+        if (block) return;
+        block = true;
+        var me = this;
+        for (var i = 0; i < gs.length; i++) {
+          if (me == gs[i]) {
+            if (prevCallbacks[i] && prevCallbacks[i].unhighlightCallback) {
+              prevCallbacks[i].unhighlightCallback.apply(this, arguments);
+            }
+            continue;
+          }
+          gs[i].clearSelection();
+        }
+        block = false;
+      }
+    }, true /* no need to redraw */);
+  }
+}
+
+Dygraph.synchronize = synchronize;
+
+})();
diff --git a/tools/crunchstat-summary/crunchstat_summary/webchart.py b/tools/crunchstat-summary/crunchstat_summary/webchart.py
new file mode 100644 (file)
index 0000000..9d18883
--- /dev/null
@@ -0,0 +1,61 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import cgi
+import json
+import pkg_resources
+
+
+class WebChart(object):
+    """Base class for a web chart.
+
+    Subclasses must assign JSLIB and JSASSETS, and override the
+    chartdata() method.
+    """
+    JSLIB = None
+    JSASSET = None
+
+    def __init__(self, label, summarizers):
+        self.label = label
+        self.summarizers = summarizers
+
+    def html(self):
+        return '''<!doctype html><html><head>
+        <title>{} stats</title>
+        <script type="text/javascript" src="{}"></script>
+        <script type="text/javascript">{}</script>
+        {}
+        </head><body></body></html>
+        '''.format(cgi.escape(self.label),
+                   self.JSLIB, self.js(), self.headHTML())
+
+    def js(self):
+        return 'var chartdata = {};\n{}'.format(
+            json.dumps(self.sections()),
+            '\n'.join([pkg_resources.resource_string('crunchstat_summary', jsa) for jsa in self.JSASSETS]))
+
+    def sections(self):
+        return [
+            {
+                'label': s.long_label(),
+                'charts': [
+                    self.chartdata(s.label, s.tasks, stat)
+                    for stat in (('cpu', 'user+sys__rate'),
+                                 ('mem', 'rss'),
+                                 ('net:eth0', 'tx+rx__rate'),
+                                 ('net:keep0', 'tx+rx__rate'))],
+            }
+            for s in self.summarizers]
+
+    def chartdata(self, label, tasks, stat):
+        """Return chart data for the given tasks.
+
+        The returned value will be available on the client side as an
+        element of the "chartdata" array.
+        """
+        raise NotImplementedError()
+
+    def headHTML(self):
+        """Return extra HTML text to include in HEAD."""
+        return ''
diff --git a/tools/crunchstat-summary/fpm-info.sh b/tools/crunchstat-summary/fpm-info.sh
new file mode 100644 (file)
index 0000000..0abc6a0
--- /dev/null
@@ -0,0 +1,9 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+case "$TARGET" in
+    debian* | ubuntu*)
+        fpm_depends+=(libcurl3-gnutls)
+        ;;
+esac
diff --git a/tools/crunchstat-summary/gittaggers.py b/tools/crunchstat-summary/gittaggers.py
new file mode 120000 (symlink)
index 0000000..a9ad861
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/gittaggers.py
\ No newline at end of file
diff --git a/tools/crunchstat-summary/setup.py b/tools/crunchstat-summary/setup.py
new file mode 100755 (executable)
index 0000000..6424281
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+from __future__ import absolute_import
+import os
+import sys
+import re
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+import arvados_version
+version = arvados_version.get_version(SETUP_DIR, "crunchstat_summary")
+
+short_tests_only = False
+if '--short-tests-only' in sys.argv:
+    short_tests_only = True
+    sys.argv.remove('--short-tests-only')
+
+setup(name='crunchstat_summary',
+      version=version,
+      description='Arvados crunchstat-summary reads crunch log files and summarizes resource usage',
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license='GNU Affero General Public License, version 3.0',
+      packages=['crunchstat_summary'],
+      include_package_data=True,
+      scripts=[
+          'bin/crunchstat-summary'
+      ],
+      data_files=[
+          ('share/doc/crunchstat_summary', ['agpl-3.0.txt']),
+      ],
+      install_requires=[
+          'arvados-python-client',
+      ],
+      test_suite='tests',
+      tests_require=['pbr<1.7.0', 'mock>=1.0'],
+      zip_safe=False
+      )
diff --git a/tools/crunchstat-summary/tests/__init__.py b/tools/crunchstat-summary/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz
new file mode 100644 (file)
index 0000000..ff7dd30
Binary files /dev/null and b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz differ
diff --git a/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz.report b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz.report
new file mode 100644 (file)
index 0000000..9819461
--- /dev/null
@@ -0,0 +1,24 @@
+category       metric  task_max        task_max_rate   job_total
+blkio:0:0      read    0       0       0
+blkio:0:0      write   0       0       0
+fuseops        read    0       0       0
+fuseops        write   0       0       0
+keepcache      hit     0       0       0
+keepcache      miss    0       0       0
+keepcalls      get     0       0       0
+keepcalls      put     0       0       0
+net:keep0      rx      0       0       0
+net:keep0      tx      0       0       0
+net:keep0      tx+rx   0       0       0
+time   elapsed 10      -       10
+# Number of tasks: 1
+# Max CPU time spent by a single task: 0s
+# Max CPU usage in a single interval: 0%
+# Overall CPU usage: 0%
+# Max memory used by a single task: 0.00GB
+# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+# Keep cache miss rate 0.00%
+# Keep cache utilization 0.00%
+#!! container max CPU usage was 0% -- try runtime_constraints "vcpus":1
+#!! container max RSS was 0 MiB -- try runtime_constraints "ram":0
diff --git a/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz
new file mode 100644 (file)
index 0000000..249ad22
Binary files /dev/null and b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz differ
diff --git a/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz.report b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz.report
new file mode 100644 (file)
index 0000000..b61da15
--- /dev/null
@@ -0,0 +1,27 @@
+category       metric  task_max        task_max_rate   job_total
+cpu    cpus    20      -       -
+cpu    sys     0.39    0.04    0.39
+cpu    user    2.06    0.20    2.06
+cpu    user+sys        2.45    0.24    2.45
+mem    cache   172032  -       -
+mem    pgmajfault      0       -       0
+mem    rss     69525504        -       -
+mem    swap    0       -       -
+net:eth0       rx      859480  1478.97 859480
+net:eth0       tx      55888   395.71  55888
+net:eth0       tx+rx   915368  1874.69 915368
+statfs available       397744787456    -       397744787456
+statfs total   402611240960    -       402611240960
+statfs used    4870303744      52426.18        4866453504
+time   elapsed 20      -       20
+# Number of tasks: 1
+# Max CPU time spent by a single task: 2.45s
+# Max CPU usage in a single interval: 23.70%
+# Overall CPU usage: 12.25%
+# Max memory used by a single task: 0.07GB
+# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+# Keep cache miss rate 0.00%
+# Keep cache utilization 0.00%
+#!! container max CPU usage was 24% -- try runtime_constraints "vcpus":1
+#!! container max RSS was 67 MiB -- try runtime_constraints "ram":1020054732
diff --git a/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk.txt.gz.report b/tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk.txt.gz.report
new file mode 100644 (file)
index 0000000..9d3cd78
--- /dev/null
@@ -0,0 +1,38 @@
+category       metric  task_max        task_max_rate   job_total
+blkio:0:0      read    0       0       0
+blkio:0:0      write   0       0       0
+cpu    cpus    20      -       -
+cpu    sys     0.39    0.04    0.39
+cpu    user    2.06    0.20    2.06
+cpu    user+sys        2.45    0.24    2.45
+fuseops        read    0       0       0
+fuseops        write   0       0       0
+keepcache      hit     0       0       0
+keepcache      miss    0       0       0
+keepcalls      get     0       0       0
+keepcalls      put     0       0       0
+mem    cache   172032  -       -
+mem    pgmajfault      0       -       0
+mem    rss     69525504        -       -
+mem    swap    0       -       -
+net:eth0       rx      859480  1478.97 859480
+net:eth0       tx      55888   395.71  55888
+net:eth0       tx+rx   915368  1874.69 915368
+net:keep0      rx      0       0       0
+net:keep0      tx      0       0       0
+net:keep0      tx+rx   0       0       0
+statfs available       397744787456    -       397744787456
+statfs total   402611240960    -       402611240960
+statfs used    4870303744      52426.18        4866453504
+time   elapsed 20      -       20
+# Number of tasks: 1
+# Max CPU time spent by a single task: 2.45s
+# Max CPU usage in a single interval: 23.70%
+# Overall CPU usage: 12.25%
+# Max memory used by a single task: 0.07GB
+# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+# Keep cache miss rate 0.00%
+# Keep cache utilization 0.00%
+#!! container max CPU usage was 24% -- try runtime_constraints "vcpus":1
+#!! container max RSS was 67 MiB -- try runtime_constraints "ram":1020054732
diff --git a/tools/crunchstat-summary/tests/crunchstat_error_messages.txt b/tools/crunchstat-summary/tests/crunchstat_error_messages.txt
new file mode 100644 (file)
index 0000000..bf6dd5c
--- /dev/null
@@ -0,0 +1,9 @@
+2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr 
+2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr old error message:
+2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: read /proc/3305/net/dev: open /proc/3305/net/dev: no such file or directory
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr 
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr new error message:
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: error reading /proc/3305/net/dev: open /proc/3305/net/dev: no such file or directory
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr cancelled job:
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: caught signal: interrupt
diff --git a/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz b/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz
new file mode 100644 (file)
index 0000000..0042cc5
Binary files /dev/null and b/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz differ
diff --git a/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz.report b/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz.report
new file mode 100644 (file)
index 0000000..f0a6095
--- /dev/null
@@ -0,0 +1,35 @@
+category       metric  task_max        task_max_rate   job_total
+blkio:0:0      read    0       0       0
+blkio:0:0      write   0       0       0
+cpu    cpus    8       -       -
+cpu    sys     1.92    0.04    1.92
+cpu    user    3.83    0.09    3.83
+cpu    user+sys        5.75    0.13    5.75
+fuseops        read    0       0       0
+fuseops        write   0       0       0
+keepcache      hit     0       0       0
+keepcache      miss    0       0       0
+keepcalls      get     0       0       0
+keepcalls      put     0       0       0
+mem    cache   1678139392      -       -
+mem    pgmajfault      0       -       0
+mem    rss     349814784       -       -
+mem    swap    0       -       -
+net:eth0       rx      1754364530      41658344.87     1754364530
+net:eth0       tx      38837956        920817.97       38837956
+net:eth0       tx+rx   1793202486      42579162.83     1793202486
+net:keep0      rx      0       0       0
+net:keep0      tx      0       0       0
+net:keep0      tx+rx   0       0       0
+time   elapsed 80      -       80
+# Number of tasks: 1
+# Max CPU time spent by a single task: 5.75s
+# Max CPU usage in a single interval: 13.00%
+# Overall CPU usage: 7.19%
+# Max memory used by a single task: 0.35GB
+# Max network traffic in a single task: 1.79GB
+# Max network speed in a single interval: 42.58MB/s
+# Keep cache miss rate 0.00%
+# Keep cache utilization 0.00%
+#!! 4xphq-8i9sb-jq0ekny1xou3zoh max CPU usage was 13% -- try runtime_constraints "min_cores_per_node":1
+#!! 4xphq-8i9sb-jq0ekny1xou3zoh max RSS was 334 MiB -- try runtime_constraints "min_ram_mb_per_node":972
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz b/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz
new file mode 100644 (file)
index 0000000..78afb98
Binary files /dev/null and b/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz differ
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz.report b/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz.report
new file mode 100644 (file)
index 0000000..f9a34cf
--- /dev/null
@@ -0,0 +1,24 @@
+category       metric  task_max        task_max_rate   job_total
+cpu    cpus    8       -       -
+cpu    sys     0       -       0.00
+cpu    user    0       -       0.00
+cpu    user+sys        0       -       0.00
+mem    cache   12288   -       -
+mem    pgmajfault      0       -       0
+mem    rss     856064  -       -
+mem    swap    0       -       -
+net:eth0       rx      90      -       90
+net:eth0       tx      90      -       90
+net:eth0       tx+rx   180     -       180
+time   elapsed 2       -       4
+# Number of tasks: 2
+# Max CPU time spent by a single task: 0s
+# Max CPU usage in a single interval: 0%
+# Overall CPU usage: 0.00%
+# Max memory used by a single task: 0.00GB
+# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+# Keep cache miss rate 0.00%
+# Keep cache utilization 0.00%
+#!! 4xphq-8i9sb-zvb2ocfycpomrup max CPU usage was 0% -- try runtime_constraints "min_cores_per_node":1
+#!! 4xphq-8i9sb-zvb2ocfycpomrup max RSS was 1 MiB -- try runtime_constraints "min_ram_mb_per_node":972
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz b/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz
new file mode 100644 (file)
index 0000000..49018f7
Binary files /dev/null and b/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz differ
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz.report b/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz.report
new file mode 100644 (file)
index 0000000..c54102d
--- /dev/null
@@ -0,0 +1,24 @@
+category       metric  task_max        task_max_rate   job_total
+cpu    cpus    8       -       -
+cpu    sys     0       -       0.00
+cpu    user    0       -       0.00
+cpu    user+sys        0       -       0.00
+mem    cache   8192    -       -
+mem    pgmajfault      0       -       0
+mem    rss     450560  -       -
+mem    swap    0       -       -
+net:eth0       rx      90      -       90
+net:eth0       tx      90      -       90
+net:eth0       tx+rx   180     -       180
+time   elapsed 2       -       3
+# Number of tasks: 2
+# Max CPU time spent by a single task: 0s
+# Max CPU usage in a single interval: 0%
+# Overall CPU usage: 0.00%
+# Max memory used by a single task: 0.00GB
+# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+# Keep cache miss rate 0.00%
+# Keep cache utilization 0.00%
+#!! 4xphq-8i9sb-v831jm2uq0g2g9x max CPU usage was 0% -- try runtime_constraints "min_cores_per_node":1
+#!! 4xphq-8i9sb-v831jm2uq0g2g9x max RSS was 1 MiB -- try runtime_constraints "min_ram_mb_per_node":972
diff --git a/tools/crunchstat-summary/tests/test_examples.py b/tools/crunchstat-summary/tests/test_examples.py
new file mode 100644 (file)
index 0000000..af92bec
--- /dev/null
@@ -0,0 +1,283 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import arvados
+import collections
+import crunchstat_summary.command
+import difflib
+import glob
+import gzip
+import mock
+import os
+import unittest
+
+TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+class ReportDiff(unittest.TestCase):
+    def diff_known_report(self, logfile, cmd):
+        expectfile = logfile+'.report'
+        expect = open(expectfile).readlines()
+        self.diff_report(cmd, expect, expectfile=expectfile)
+
+    def diff_report(self, cmd, expect, expectfile=None):
+        got = [x+"\n" for x in cmd.report().strip("\n").split("\n")]
+        self.assertEqual(got, expect, "\n"+"".join(difflib.context_diff(
+            expect, got, fromfile=expectfile, tofile="(generated)")))
+
+
+class SummarizeFile(ReportDiff):
+    def test_example_files(self):
+        for fnm in glob.glob(os.path.join(TESTS_DIR, '*.txt.gz')):
+            logfile = os.path.join(TESTS_DIR, fnm)
+            args = crunchstat_summary.command.ArgumentParser().parse_args(
+                ['--log-file', logfile])
+            cmd = crunchstat_summary.command.Command(args)
+            cmd.run()
+            self.diff_known_report(logfile, cmd)
+
+
+class HTMLFromFile(ReportDiff):
+    def test_example_files(self):
+        # Note we don't test the output content at all yet; we're
+        # mainly just verifying the --format=html option isn't ignored
+        # and the HTML code path doesn't crash.
+        for fnm in glob.glob(os.path.join(TESTS_DIR, '*.txt.gz')):
+            logfile = os.path.join(TESTS_DIR, fnm)
+            args = crunchstat_summary.command.ArgumentParser().parse_args(
+                ['--format=html', '--log-file', logfile])
+            cmd = crunchstat_summary.command.Command(args)
+            cmd.run()
+            self.assertRegexpMatches(cmd.report(), r'(?is)<html>.*</html>\s*$')
+
+
+class SummarizeEdgeCases(unittest.TestCase):
+    def test_error_messages(self):
+        logfile = open(os.path.join(TESTS_DIR, 'crunchstat_error_messages.txt'))
+        s = crunchstat_summary.summarizer.Summarizer(logfile)
+        s.run()
+
+
+class SummarizeContainer(ReportDiff):
+    fake_container = {
+        'uuid': '9tee4-dz642-lymtndkpy39eibk',
+        'created_at': '2017-08-18T14:27:25.371388141',
+        'log': '9tee4-4zz18-ihyzym9tcwjwg4r',
+    }
+    fake_request = {
+        'uuid': '9tee4-xvhdp-uper95jktm10d3w',
+        'name': 'container',
+        'created_at': '2017-08-18T14:27:25.242339223Z',
+        'container_uuid': fake_container['uuid'],
+    }
+    reportfile = os.path.join(
+        TESTS_DIR, 'container_9tee4-dz642-lymtndkpy39eibk.txt.gz')
+    logfile = os.path.join(
+        TESTS_DIR, 'container_9tee4-dz642-lymtndkpy39eibk-crunchstat.txt.gz')
+    arvmountlog = os.path.join(
+        TESTS_DIR, 'container_9tee4-dz642-lymtndkpy39eibk-arv-mount.txt.gz')
+
+    @mock.patch('arvados.collection.CollectionReader')
+    @mock.patch('arvados.api')
+    def test_container(self, mock_api, mock_cr):
+        mock_api().container_requests().index().execute.return_value = {'items':[]}
+        mock_api().container_requests().get().execute.return_value = self.fake_request
+        mock_api().containers().get().execute.return_value = self.fake_container
+        mock_cr().__iter__.return_value = [
+            'crunch-run.txt', 'stderr.txt', 'node-info.txt',
+            'container.json', 'crunchstat.txt', 'arv-mount.txt']
+        def _open(n):
+            if n == "crunchstat.txt":
+                return gzip.open(self.logfile)
+            elif n == "arv-mount.txt":
+                return gzip.open(self.arvmountlog)
+        mock_cr().open.side_effect = _open
+        args = crunchstat_summary.command.ArgumentParser().parse_args(
+            ['--job', self.fake_request['uuid']])
+        cmd = crunchstat_summary.command.Command(args)
+        cmd.run()
+        self.diff_known_report(self.reportfile, cmd)
+
+
+class SummarizeJob(ReportDiff):
+    fake_job_uuid = '4xphq-8i9sb-jq0ekny1xou3zoh'
+    fake_log_id = 'fake-log-collection-id'
+    fake_job = {
+        'uuid': fake_job_uuid,
+        'log': fake_log_id,
+    }
+    logfile = os.path.join(TESTS_DIR, 'logfile_20151204190335.txt.gz')
+
+    @mock.patch('arvados.collection.CollectionReader')
+    @mock.patch('arvados.api')
+    def test_job_report(self, mock_api, mock_cr):
+        mock_api().jobs().get().execute.return_value = self.fake_job
+        mock_cr().__iter__.return_value = ['fake-logfile.txt']
+        mock_cr().open.return_value = gzip.open(self.logfile)
+        args = crunchstat_summary.command.ArgumentParser().parse_args(
+            ['--job', self.fake_job_uuid])
+        cmd = crunchstat_summary.command.Command(args)
+        cmd.run()
+        self.diff_known_report(self.logfile, cmd)
+        mock_api().jobs().get.assert_called_with(uuid=self.fake_job_uuid)
+        mock_cr.assert_called_with(self.fake_log_id)
+        mock_cr().open.assert_called_with('fake-logfile.txt')
+
+
+class SummarizePipeline(ReportDiff):
+    fake_instance = {
+        'uuid': 'zzzzz-d1hrv-i3e77t9z5y8j9cc',
+        'owner_uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
+        'components': collections.OrderedDict([
+            ['foo', {
+                'job': {
+                    'uuid': 'zzzzz-8i9sb-000000000000000',
+                    'log': 'fake-log-pdh-0',
+                    'runtime_constraints': {
+                        'min_ram_mb_per_node': 900,
+                        'min_cores_per_node': 1,
+                    },
+                },
+            }],
+            ['bar', {
+                'job': {
+                    'uuid': 'zzzzz-8i9sb-000000000000001',
+                    'log': 'fake-log-pdh-1',
+                    'runtime_constraints': {
+                        'min_ram_mb_per_node': 900,
+                        'min_cores_per_node': 1,
+                    },
+                },
+            }],
+            ['no-job-assigned', {}],
+            ['unfinished-job', {
+                'job': {
+                    'uuid': 'zzzzz-8i9sb-xxxxxxxxxxxxxxx',
+                },
+            }],
+            ['baz', {
+                'job': {
+                    'uuid': 'zzzzz-8i9sb-000000000000002',
+                    'log': 'fake-log-pdh-2',
+                    'runtime_constraints': {
+                        'min_ram_mb_per_node': 900,
+                        'min_cores_per_node': 1,
+                    },
+                },
+            }]]),
+    }
+
+    @mock.patch('arvados.collection.CollectionReader')
+    @mock.patch('arvados.api')
+    def test_pipeline(self, mock_api, mock_cr):
+        logfile = os.path.join(TESTS_DIR, 'logfile_20151204190335.txt.gz')
+        mock_api().pipeline_instances().get().execute. \
+            return_value = self.fake_instance
+        mock_cr().__iter__.return_value = ['fake-logfile.txt']
+        mock_cr().open.side_effect = [gzip.open(logfile) for _ in range(3)]
+        args = crunchstat_summary.command.ArgumentParser().parse_args(
+            ['--pipeline-instance', self.fake_instance['uuid']])
+        cmd = crunchstat_summary.command.Command(args)
+        cmd.run()
+
+        job_report = [
+            line for line in open(logfile+'.report').readlines()
+            if not line.startswith('#!! ')]
+        expect = (
+            ['### Summary for foo (zzzzz-8i9sb-000000000000000)\n'] +
+            job_report + ['\n'] +
+            ['### Summary for bar (zzzzz-8i9sb-000000000000001)\n'] +
+            job_report + ['\n'] +
+            ['### Summary for unfinished-job (partial) (zzzzz-8i9sb-xxxxxxxxxxxxxxx)\n',
+             '(no report generated)\n',
+             '\n'] +
+            ['### Summary for baz (zzzzz-8i9sb-000000000000002)\n'] +
+            job_report)
+        self.diff_report(cmd, expect)
+        mock_cr.assert_has_calls(
+            [
+                mock.call('fake-log-pdh-0'),
+                mock.call('fake-log-pdh-1'),
+                mock.call('fake-log-pdh-2'),
+            ], any_order=True)
+        mock_cr().open.assert_called_with('fake-logfile.txt')
+
+
+class SummarizeACRJob(ReportDiff):
+    fake_job = {
+        'uuid': 'zzzzz-8i9sb-i3e77t9z5y8j9cc',
+        'owner_uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
+        'components': {
+            'foo': 'zzzzz-8i9sb-000000000000000',
+            'bar': 'zzzzz-8i9sb-000000000000001',
+            'unfinished-job': 'zzzzz-8i9sb-xxxxxxxxxxxxxxx',
+            'baz': 'zzzzz-8i9sb-000000000000002',
+        }
+    }
+    fake_jobs_index = { 'items': [
+        {
+            'uuid': 'zzzzz-8i9sb-000000000000000',
+            'log': 'fake-log-pdh-0',
+            'runtime_constraints': {
+                'min_ram_mb_per_node': 900,
+                'min_cores_per_node': 1,
+            },
+        },
+        {
+            'uuid': 'zzzzz-8i9sb-000000000000001',
+            'log': 'fake-log-pdh-1',
+            'runtime_constraints': {
+                'min_ram_mb_per_node': 900,
+                'min_cores_per_node': 1,
+            },
+        },
+        {
+            'uuid': 'zzzzz-8i9sb-xxxxxxxxxxxxxxx',
+        },
+        {
+            'uuid': 'zzzzz-8i9sb-000000000000002',
+            'log': 'fake-log-pdh-2',
+            'runtime_constraints': {
+                'min_ram_mb_per_node': 900,
+                'min_cores_per_node': 1,
+            },
+        },
+    ]}
+    @mock.patch('arvados.collection.CollectionReader')
+    @mock.patch('arvados.api')
+    def test_acr_job(self, mock_api, mock_cr):
+        logfile = os.path.join(TESTS_DIR, 'logfile_20151204190335.txt.gz')
+        mock_api().jobs().index().execute.return_value = self.fake_jobs_index
+        mock_api().jobs().get().execute.return_value = self.fake_job
+        mock_cr().__iter__.return_value = ['fake-logfile.txt']
+        mock_cr().open.side_effect = [gzip.open(logfile) for _ in range(3)]
+        args = crunchstat_summary.command.ArgumentParser().parse_args(
+            ['--job', self.fake_job['uuid']])
+        cmd = crunchstat_summary.command.Command(args)
+        cmd.run()
+
+        job_report = [
+            line for line in open(logfile+'.report').readlines()
+            if not line.startswith('#!! ')]
+        expect = (
+            ['### Summary for zzzzz-8i9sb-i3e77t9z5y8j9cc (partial) (zzzzz-8i9sb-i3e77t9z5y8j9cc)\n',
+             '(no report generated)\n',
+             '\n'] +
+            ['### Summary for bar (zzzzz-8i9sb-000000000000001)\n'] +
+            job_report + ['\n'] +
+            ['### Summary for baz (zzzzz-8i9sb-000000000000002)\n'] +
+            job_report + ['\n'] +
+            ['### Summary for foo (zzzzz-8i9sb-000000000000000)\n'] +
+            job_report + ['\n'] +
+            ['### Summary for unfinished-job (partial) (zzzzz-8i9sb-xxxxxxxxxxxxxxx)\n',
+             '(no report generated)\n']
+        )
+        self.diff_report(cmd, expect)
+        mock_cr.assert_has_calls(
+            [
+                mock.call('fake-log-pdh-0'),
+                mock.call('fake-log-pdh-1'),
+                mock.call('fake-log-pdh-2'),
+            ], any_order=True)
+        mock_cr().open.assert_called_with('fake-logfile.txt')
diff --git a/tools/keep-block-check/.gitignore b/tools/keep-block-check/.gitignore
new file mode 100644 (file)
index 0000000..97eb5da
--- /dev/null
@@ -0,0 +1 @@
+keep-block-check
diff --git a/tools/keep-block-check/keep-block-check.go b/tools/keep-block-check/keep-block-check.go
new file mode 100644 (file)
index 0000000..2de7a96
--- /dev/null
@@ -0,0 +1,254 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/tls"
+       "errors"
+       "flag"
+       "fmt"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "os"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+var version = "dev"
+
+func main() {
+       err := doMain(os.Args[1:])
+       if err != nil {
+               log.Fatalf("%v", err)
+       }
+}
+
+func doMain(args []string) error {
+       flags := flag.NewFlagSet("keep-block-check", flag.ExitOnError)
+
+       configFile := flags.String(
+               "config",
+               "",
+               "Configuration filename. May be either a pathname to a config file, or (for example) 'foo' as shorthand for $HOME/.config/arvados/foo.conf file. This file is expected to specify the values for ARVADOS_API_TOKEN, ARVADOS_API_HOST, ARVADOS_API_HOST_INSECURE, and ARVADOS_BLOB_SIGNING_KEY for the source.")
+
+       keepServicesJSON := flags.String(
+               "keep-services-json",
+               "",
+               "An optional list of available keepservices. "+
+                       "If not provided, this list is obtained from api server configured in config-file.")
+
+       locatorFile := flags.String(
+               "block-hash-file",
+               "",
+               "Filename containing the block hashes to be checked. This is required. "+
+                       "This file contains the block hashes one per line.")
+
+       prefix := flags.String(
+               "prefix",
+               "",
+               "Block hash prefix. When a prefix is specified, only hashes listed in the file with this prefix will be checked.")
+
+       blobSignatureTTLFlag := flags.Duration(
+               "blob-signature-ttl",
+               0,
+               "Lifetime of blob permission signatures on the keepservers. If not provided, this will be retrieved from the API server's discovery document.")
+
+       verbose := flags.Bool(
+               "v",
+               false,
+               "Log progress of each block verification")
+
+       getVersion := flags.Bool(
+               "version",
+               false,
+               "Print version information and exit.")
+
+       // Parse args; omit the first arg which is the command name
+       flags.Parse(args)
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keep-block-check %s\n", version)
+               os.Exit(0)
+       }
+
+       config, blobSigningKey, err := loadConfig(*configFile)
+       if err != nil {
+               return fmt.Errorf("Error loading configuration from file: %s", err.Error())
+       }
+
+       // get list of block locators to be checked
+       blockLocators, err := getBlockLocators(*locatorFile, *prefix)
+       if err != nil {
+               return fmt.Errorf("Error reading block hashes to be checked from file: %s", err.Error())
+       }
+
+       // setup keepclient
+       kc, blobSignatureTTL, err := setupKeepClient(config, *keepServicesJSON, *blobSignatureTTLFlag)
+       if err != nil {
+               return fmt.Errorf("Error configuring keepclient: %s", err.Error())
+       }
+
+       return performKeepBlockCheck(kc, blobSignatureTTL, blobSigningKey, blockLocators, *verbose)
+}
+
+type apiConfig struct {
+       APIToken        string
+       APIHost         string
+       APIHostInsecure bool
+       ExternalClient  bool
+}
+
+// Load config from given file
+func loadConfig(configFile string) (config apiConfig, blobSigningKey string, err error) {
+       if configFile == "" {
+               err = errors.New("Client config file not specified")
+               return
+       }
+
+       config, blobSigningKey, err = readConfigFromFile(configFile)
+       return
+}
+
+// Read config from file
+func readConfigFromFile(filename string) (config apiConfig, blobSigningKey string, err error) {
+       if !strings.Contains(filename, "/") {
+               filename = os.Getenv("HOME") + "/.config/arvados/" + filename + ".conf"
+       }
+
+       content, err := ioutil.ReadFile(filename)
+
+       if err != nil {
+               return
+       }
+
+       lines := strings.Split(string(content), "\n")
+       for _, line := range lines {
+               if line == "" {
+                       continue
+               }
+
+               kv := strings.SplitN(line, "=", 2)
+               if len(kv) == 2 {
+                       key := strings.TrimSpace(kv[0])
+                       value := strings.TrimSpace(kv[1])
+
+                       switch key {
+                       case "ARVADOS_API_TOKEN":
+                               config.APIToken = value
+                       case "ARVADOS_API_HOST":
+                               config.APIHost = value
+                       case "ARVADOS_API_HOST_INSECURE":
+                               config.APIHostInsecure = arvadosclient.StringBool(value)
+                       case "ARVADOS_EXTERNAL_CLIENT":
+                               config.ExternalClient = arvadosclient.StringBool(value)
+                       case "ARVADOS_BLOB_SIGNING_KEY":
+                               blobSigningKey = value
+                       }
+               }
+       }
+
+       return
+}
+
+// setup keepclient using the config provided
+func setupKeepClient(config apiConfig, keepServicesJSON string, blobSignatureTTL time.Duration) (kc *keepclient.KeepClient, ttl time.Duration, err error) {
+       arv := arvadosclient.ArvadosClient{
+               ApiToken:    config.APIToken,
+               ApiServer:   config.APIHost,
+               ApiInsecure: config.APIHostInsecure,
+               Client: &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: &tls.Config{InsecureSkipVerify: config.APIHostInsecure}}},
+               External: config.ExternalClient,
+       }
+
+       // If keepServicesJSON is provided, use it instead of service discovery
+       if keepServicesJSON == "" {
+               kc, err = keepclient.MakeKeepClient(&arv)
+               if err != nil {
+                       return
+               }
+       } else {
+               kc = keepclient.New(&arv)
+               err = kc.LoadKeepServicesFromJSON(keepServicesJSON)
+               if err != nil {
+                       return
+               }
+       }
+
+       // Get if blobSignatureTTL is not provided
+       ttl = blobSignatureTTL
+       if blobSignatureTTL == 0 {
+               value, err := arv.Discovery("blobSignatureTtl")
+               if err == nil {
+                       ttl = time.Duration(int(value.(float64))) * time.Second
+               } else {
+                       return nil, 0, err
+               }
+       }
+
+       return
+}
+
+// Get list of unique block locators from the given file
+func getBlockLocators(locatorFile, prefix string) (locators []string, err error) {
+       if locatorFile == "" {
+               err = errors.New("block-hash-file not specified")
+               return
+       }
+
+       content, err := ioutil.ReadFile(locatorFile)
+       if err != nil {
+               return
+       }
+
+       locatorMap := make(map[string]bool)
+       for _, line := range strings.Split(string(content), "\n") {
+               line = strings.TrimSpace(line)
+               if line == "" || !strings.HasPrefix(line, prefix) || locatorMap[line] {
+                       continue
+               }
+               locators = append(locators, line)
+               locatorMap[line] = true
+       }
+
+       return
+}
+
+// Get block headers from keep. Log any errors.
+func performKeepBlockCheck(kc *keepclient.KeepClient, blobSignatureTTL time.Duration, blobSigningKey string, blockLocators []string, verbose bool) error {
+       totalBlocks := len(blockLocators)
+       notFoundBlocks := 0
+       current := 0
+       for _, locator := range blockLocators {
+               current++
+               if verbose {
+                       log.Printf("Verifying block %d of %d: %v", current, totalBlocks, locator)
+               }
+               getLocator := locator
+               if blobSigningKey != "" {
+                       expiresAt := time.Now().AddDate(0, 0, 1)
+                       getLocator = keepclient.SignLocator(locator, kc.Arvados.ApiToken, expiresAt, blobSignatureTTL, []byte(blobSigningKey))
+               }
+
+               _, _, err := kc.Ask(getLocator)
+               if err != nil {
+                       notFoundBlocks++
+                       log.Printf("Error verifying block %v: %v", locator, err)
+               }
+       }
+
+       log.Printf("Verify block totals: %d attempts, %d successes, %d errors", totalBlocks, totalBlocks-notFoundBlocks, notFoundBlocks)
+
+       if notFoundBlocks > 0 {
+               return fmt.Errorf("Block verification failed for %d out of %d blocks with matching prefix.", notFoundBlocks, totalBlocks)
+       }
+
+       return nil
+}
diff --git a/tools/keep-block-check/keep-block-check_test.go b/tools/keep-block-check/keep-block-check_test.go
new file mode 100644 (file)
index 0000000..a2000ba
--- /dev/null
@@ -0,0 +1,359 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "regexp"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&DoMainTestSuite{})
+
+type ServerRequiredSuite struct{}
+type DoMainTestSuite struct{}
+
+var kc *keepclient.KeepClient
+var logBuffer bytes.Buffer
+
+var TestHash = "aaaa09c290d0fb1ca068ffaddf22cbd0"
+var TestHash2 = "aaaac516f788aec4f30932ffb6395c39"
+
+var blobSignatureTTL = time.Duration(2*7*24) * time.Hour
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+       arvadostest.ResetEnv()
+}
+
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       logOutput := io.MultiWriter(&logBuffer)
+       log.SetOutput(logOutput)
+}
+
+func (s *ServerRequiredSuite) TearDownTest(c *C) {
+       arvadostest.StopKeep(2)
+       log.SetOutput(os.Stdout)
+       log.Printf("%v", logBuffer.String())
+}
+
+func (s *DoMainTestSuite) SetUpSuite(c *C) {
+}
+
+func (s *DoMainTestSuite) SetUpTest(c *C) {
+       logOutput := io.MultiWriter(&logBuffer)
+       log.SetOutput(logOutput)
+       keepclient.RefreshServiceDiscovery()
+}
+
+func (s *DoMainTestSuite) TearDownTest(c *C) {
+       log.SetOutput(os.Stdout)
+       log.Printf("%v", logBuffer.String())
+}
+
+func setupKeepBlockCheck(c *C, enforcePermissions bool, keepServicesJSON string) {
+       setupKeepBlockCheckWithTTL(c, enforcePermissions, keepServicesJSON, blobSignatureTTL)
+}
+
+func setupKeepBlockCheckWithTTL(c *C, enforcePermissions bool, keepServicesJSON string, ttl time.Duration) {
+       var config apiConfig
+       config.APIHost = os.Getenv("ARVADOS_API_HOST")
+       config.APIToken = arvadostest.DataManagerToken
+       config.APIHostInsecure = arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE"))
+
+       // Start Keep servers
+       arvadostest.StartKeep(2, enforcePermissions)
+
+       // setup keepclients
+       var err error
+       kc, ttl, err = setupKeepClient(config, keepServicesJSON, ttl)
+       c.Assert(ttl, Equals, blobSignatureTTL)
+       c.Check(err, IsNil)
+
+       keepclient.RefreshServiceDiscovery()
+}
+
+// Setup test data
+func setupTestData(c *C) []string {
+       allLocators := []string{}
+
+       // Put a few blocks
+       for i := 0; i < 5; i++ {
+               hash, _, err := kc.PutB([]byte(fmt.Sprintf("keep-block-check-test-data-%d", i)))
+               c.Check(err, IsNil)
+               allLocators = append(allLocators, strings.Split(hash, "+A")[0])
+       }
+
+       return allLocators
+}
+
+func setupConfigFile(c *C, fileName string) string {
+       // Setup a config file
+       file, err := ioutil.TempFile(os.TempDir(), fileName)
+       c.Check(err, IsNil)
+
+       // Add config to file. While at it, throw some extra white space
+       fileContent := "ARVADOS_API_HOST=" + os.Getenv("ARVADOS_API_HOST") + "\n"
+       fileContent += "ARVADOS_API_TOKEN=" + arvadostest.DataManagerToken + "\n"
+       fileContent += "\n"
+       fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
+       fileContent += " ARVADOS_EXTERNAL_CLIENT = false \n"
+       fileContent += " NotANameValuePairAndShouldGetIgnored \n"
+       fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg\n"
+
+       _, err = file.Write([]byte(fileContent))
+       c.Check(err, IsNil)
+
+       return file.Name()
+}
+
+func setupBlockHashFile(c *C, name string, blocks []string) string {
+       // Setup a block hash file
+       file, err := ioutil.TempFile(os.TempDir(), name)
+       c.Check(err, IsNil)
+
+       // Add the hashes to the file. While at it, throw some extra white space
+       fileContent := ""
+       for _, hash := range blocks {
+               fileContent += fmt.Sprintf(" %s \n", hash)
+       }
+       fileContent += "\n"
+       _, err = file.Write([]byte(fileContent))
+       c.Check(err, IsNil)
+
+       return file.Name()
+}
+
+func checkErrorLog(c *C, blocks []string, prefix, suffix string) {
+       for _, hash := range blocks {
+               expected := `(?ms).*` + prefix + `.*` + hash + `.*` + suffix + `.*`
+               c.Check(logBuffer.String(), Matches, expected)
+       }
+}
+
+func checkNoErrorsLogged(c *C, prefix, suffix string) {
+       expected := prefix + `.*` + suffix
+       match, _ := regexp.MatchString(expected, logBuffer.String())
+       c.Assert(match, Equals, false)
+}
+
+func (s *ServerRequiredSuite) TestBlockCheck(c *C) {
+       setupKeepBlockCheck(c, false, "")
+       allLocators := setupTestData(c)
+       err := performKeepBlockCheck(kc, blobSignatureTTL, "", allLocators, true)
+       c.Check(err, IsNil)
+       checkNoErrorsLogged(c, "Error verifying block", "Block not found")
+}
+
+func (s *ServerRequiredSuite) TestBlockCheckWithBlobSigning(c *C) {
+       setupKeepBlockCheck(c, true, "")
+       allLocators := setupTestData(c)
+       err := performKeepBlockCheck(kc, blobSignatureTTL, arvadostest.BlobSigningKey, allLocators, true)
+       c.Check(err, IsNil)
+       checkNoErrorsLogged(c, "Error verifying block", "Block not found")
+}
+
+func (s *ServerRequiredSuite) TestBlockCheckWithBlobSigningAndTTLFromDiscovery(c *C) {
+       setupKeepBlockCheckWithTTL(c, true, "", 0)
+       allLocators := setupTestData(c)
+       err := performKeepBlockCheck(kc, blobSignatureTTL, arvadostest.BlobSigningKey, allLocators, true)
+       c.Check(err, IsNil)
+       checkNoErrorsLogged(c, "Error verifying block", "Block not found")
+}
+
+func (s *ServerRequiredSuite) TestBlockCheck_NoSuchBlock(c *C) {
+       setupKeepBlockCheck(c, false, "")
+       allLocators := setupTestData(c)
+       allLocators = append(allLocators, TestHash)
+       allLocators = append(allLocators, TestHash2)
+       err := performKeepBlockCheck(kc, blobSignatureTTL, "", allLocators, true)
+       c.Check(err, NotNil)
+       c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 7 blocks with matching prefix.")
+       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "Block not found")
+}
+
+func (s *ServerRequiredSuite) TestBlockCheck_NoSuchBlock_WithMatchingPrefix(c *C) {
+       setupKeepBlockCheck(c, false, "")
+       allLocators := setupTestData(c)
+       allLocators = append(allLocators, TestHash)
+       allLocators = append(allLocators, TestHash2)
+       locatorFile := setupBlockHashFile(c, "block-hash", allLocators)
+       defer os.Remove(locatorFile)
+       locators, err := getBlockLocators(locatorFile, "aaa")
+       c.Check(err, IsNil)
+       err = performKeepBlockCheck(kc, blobSignatureTTL, "", locators, true)
+       c.Check(err, NotNil)
+       // Of the 7 blocks in allLocators, only two match the prefix and hence only those are checked
+       c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
+       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "Block not found")
+}
+
+func (s *ServerRequiredSuite) TestBlockCheck_NoSuchBlock_WithPrefixMismatch(c *C) {
+       setupKeepBlockCheck(c, false, "")
+       allLocators := setupTestData(c)
+       allLocators = append(allLocators, TestHash)
+       allLocators = append(allLocators, TestHash2)
+       locatorFile := setupBlockHashFile(c, "block-hash", allLocators)
+       defer os.Remove(locatorFile)
+       locators, err := getBlockLocators(locatorFile, "999")
+       c.Check(err, IsNil)
+       err = performKeepBlockCheck(kc, blobSignatureTTL, "", locators, true)
+       c.Check(err, IsNil) // there were no matching locators in file and hence nothing was checked
+}
+
+func (s *ServerRequiredSuite) TestBlockCheck_BadSignature(c *C) {
+       setupKeepBlockCheck(c, true, "")
+       setupTestData(c)
+       err := performKeepBlockCheck(kc, blobSignatureTTL, "badblobsigningkey", []string{TestHash, TestHash2}, false)
+       c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
+       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "HTTP 403")
+       // verbose logging not requested
+       c.Assert(strings.Contains(logBuffer.String(), "Verifying block 1 of 2"), Equals, false)
+}
+
+var testKeepServicesJSON = `{
+  "kind":"arvados#keepServiceList",
+  "etag":"",
+  "self_link":"",
+  "offset":null, "limit":null,
+  "items":[
+    {"href":"/keep_services/zzzzz-bi6l4-123456789012340",
+     "kind":"arvados#keepService",
+     "uuid":"zzzzz-bi6l4-123456789012340",
+     "service_host":"keep0.zzzzz.arvadosapi.com",
+     "service_port":25107,
+     "service_ssl_flag":false,
+     "service_type":"disk",
+     "read_only":false },
+    {"href":"/keep_services/zzzzz-bi6l4-123456789012341",
+     "kind":"arvados#keepService",
+     "uuid":"zzzzz-bi6l4-123456789012341",
+     "service_host":"keep0.zzzzz.arvadosapi.com",
+     "service_port":25108,
+     "service_ssl_flag":false,
+     "service_type":"disk",
+     "read_only":false }
+    ],
+  "items_available":2 }`
+
+// Setup block-check using keepServicesJSON with fake keepservers.
+// Expect error during performKeepBlockCheck due to unreachable keepservers.
+func (s *ServerRequiredSuite) TestErrorDuringKeepBlockCheck_FakeKeepservers(c *C) {
+       setupKeepBlockCheck(c, false, testKeepServicesJSON)
+       err := performKeepBlockCheck(kc, blobSignatureTTL, "", []string{TestHash, TestHash2}, true)
+       c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
+       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "")
+}
+
+// Test keep-block-check initialization with keepServicesJSON
+func (s *ServerRequiredSuite) TestKeepBlockCheck_InitializeWithKeepServicesJSON(c *C) {
+       setupKeepBlockCheck(c, false, testKeepServicesJSON)
+       found := 0
+       for k := range kc.LocalRoots() {
+               if k == "zzzzz-bi6l4-123456789012340" || k == "zzzzz-bi6l4-123456789012341" {
+                       found++
+               }
+       }
+       c.Check(found, Equals, 2)
+}
+
+// Test loadConfig func
+func (s *ServerRequiredSuite) TestLoadConfig(c *C) {
+       // Setup config file
+       configFile := setupConfigFile(c, "config")
+       defer os.Remove(configFile)
+
+       // load configuration from the file
+       config, blobSigningKey, err := loadConfig(configFile)
+       c.Check(err, IsNil)
+
+       c.Assert(config.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Assert(config.APIToken, Equals, arvadostest.DataManagerToken)
+       c.Assert(config.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
+       c.Assert(config.ExternalClient, Equals, false)
+       c.Assert(blobSigningKey, Equals, "abcdefg")
+}
+
+func (s *DoMainTestSuite) Test_doMain_WithNoConfig(c *C) {
+       args := []string{"-prefix", "a"}
+       err := doMain(args)
+       c.Check(err, NotNil)
+       c.Assert(strings.Contains(err.Error(), "config file not specified"), Equals, true)
+}
+
+func (s *DoMainTestSuite) Test_doMain_WithNoSuchConfigFile(c *C) {
+       args := []string{"-config", "no-such-file"}
+       err := doMain(args)
+       c.Check(err, NotNil)
+       c.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true)
+}
+
+func (s *DoMainTestSuite) Test_doMain_WithNoBlockHashFile(c *C) {
+       config := setupConfigFile(c, "config")
+       defer os.Remove(config)
+
+       // Start keepservers.
+       arvadostest.StartKeep(2, false)
+       defer arvadostest.StopKeep(2)
+
+       args := []string{"-config", config}
+       err := doMain(args)
+       c.Assert(strings.Contains(err.Error(), "block-hash-file not specified"), Equals, true)
+}
+
+func (s *DoMainTestSuite) Test_doMain_WithNoSuchBlockHashFile(c *C) {
+       config := setupConfigFile(c, "config")
+       defer os.Remove(config)
+
+       arvadostest.StartKeep(2, false)
+       defer arvadostest.StopKeep(2)
+
+       args := []string{"-config", config, "-block-hash-file", "no-such-file"}
+       err := doMain(args)
+       c.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true)
+}
+
+func (s *DoMainTestSuite) Test_doMain(c *C) {
+       // Start keepservers.
+       arvadostest.StartKeep(2, false)
+       defer arvadostest.StopKeep(2)
+
+       config := setupConfigFile(c, "config")
+       defer os.Remove(config)
+
+       locatorFile := setupBlockHashFile(c, "block-hash", []string{TestHash, TestHash2})
+       defer os.Remove(locatorFile)
+
+       args := []string{"-config", config, "-block-hash-file", locatorFile, "-v"}
+       err := doMain(args)
+       c.Check(err, NotNil)
+       c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
+       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "Block not found")
+       c.Assert(strings.Contains(logBuffer.String(), "Verifying block 1 of 2"), Equals, true)
+}
diff --git a/tools/keep-exercise/.gitignore b/tools/keep-exercise/.gitignore
new file mode 100644 (file)
index 0000000..6a1d10c
--- /dev/null
@@ -0,0 +1 @@
+keep-exercise
diff --git a/tools/keep-exercise/keep-exercise.go b/tools/keep-exercise/keep-exercise.go
new file mode 100644 (file)
index 0000000..6bf1abb
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Testing tool for Keep services.
+//
+// keepexercise helps measure throughput and test reliability under
+// various usage patterns.
+//
+// By default, it reads and writes blocks containing 2^26 NUL
+// bytes. This generates network traffic without consuming much disk
+// space.
+//
+// For a more realistic test, enable -vary-request. Warning: this will
+// fill your storage volumes with random data if you leave it running,
+// which can cost you money or leave you with too little room for
+// useful data.
+//
+package main
+
+import (
+       "crypto/rand"
+       "encoding/binary"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "os"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+var version = "dev"
+
+// Command line config knobs
+var (
+       BlockSize     = flag.Int("block-size", keepclient.BLOCKSIZE, "bytes per read/write op")
+       ReadThreads   = flag.Int("rthreads", 1, "number of concurrent readers")
+       WriteThreads  = flag.Int("wthreads", 1, "number of concurrent writers")
+       VaryRequest   = flag.Bool("vary-request", false, "vary the data for each request: consumes disk space, exercises write behavior")
+       VaryThread    = flag.Bool("vary-thread", false, "use -wthreads different data blocks")
+       Replicas      = flag.Int("replicas", 1, "replication level for writing")
+       StatsInterval = flag.Duration("stats-interval", time.Second, "time interval between IO stats reports, or 0 to disable")
+       ServiceURL    = flag.String("url", "", "specify scheme://host of a single keep service to exercise (instead of using all advertised services like normal clients)")
+       ServiceUUID   = flag.String("uuid", "", "specify UUID of a single advertised keep service to exercise")
+       getVersion    = flag.Bool("version", false, "Print version information and exit.")
+)
+
+func main() {
+       flag.Parse()
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keep-exercise %s\n", version)
+               os.Exit(0)
+       }
+
+       log.Printf("keep-exercise %s started", version)
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatal(err)
+       }
+       kc, err := keepclient.MakeKeepClient(arv)
+       if err != nil {
+               log.Fatal(err)
+       }
+       kc.Want_replicas = *Replicas
+
+       transport := *(http.DefaultTransport.(*http.Transport))
+       transport.TLSClientConfig = arvadosclient.MakeTLSConfig(arv.ApiInsecure)
+       kc.HTTPClient = &http.Client{
+               Timeout:   10 * time.Minute,
+               Transport: &transport,
+       }
+
+       overrideServices(kc)
+
+       nextLocator := make(chan string, *ReadThreads+*WriteThreads)
+
+       go countBeans(nextLocator)
+       for i := 0; i < *WriteThreads; i++ {
+               nextBuf := make(chan []byte, 1)
+               go makeBufs(nextBuf, i)
+               go doWrites(kc, nextBuf, nextLocator)
+       }
+       for i := 0; i < *ReadThreads; i++ {
+               go doReads(kc, nextLocator)
+       }
+       <-make(chan struct{})
+}
+
+// Send 1234 to bytesInChan when we receive 1234 bytes from keepstore.
+var bytesInChan = make(chan uint64)
+var bytesOutChan = make(chan uint64)
+
+// Send struct{}{} to errorsChan when an error happens.
+var errorsChan = make(chan struct{})
+
+func countBeans(nextLocator chan string) {
+       t0 := time.Now()
+       var tickChan <-chan time.Time
+       if *StatsInterval > 0 {
+               tickChan = time.NewTicker(*StatsInterval).C
+       }
+       var bytesIn uint64
+       var bytesOut uint64
+       var errors uint64
+       for {
+               select {
+               case <-tickChan:
+                       elapsed := time.Since(t0)
+                       log.Printf("%v elapsed: read %v bytes (%.1f MiB/s), wrote %v bytes (%.1f MiB/s), errors %d",
+                               elapsed,
+                               bytesIn, (float64(bytesIn) / elapsed.Seconds() / 1048576),
+                               bytesOut, (float64(bytesOut) / elapsed.Seconds() / 1048576),
+                               errors,
+                       )
+               case i := <-bytesInChan:
+                       bytesIn += i
+               case o := <-bytesOutChan:
+                       bytesOut += o
+               case <-errorsChan:
+                       errors++
+               }
+       }
+}
+
+func makeBufs(nextBuf chan<- []byte, threadID int) {
+       buf := make([]byte, *BlockSize)
+       if *VaryThread {
+               binary.PutVarint(buf, int64(threadID))
+       }
+       randSize := 524288
+       if randSize > *BlockSize {
+               randSize = *BlockSize
+       }
+       for {
+               if *VaryRequest {
+                       rnd := make([]byte, randSize)
+                       if _, err := io.ReadFull(rand.Reader, rnd); err != nil {
+                               log.Fatal(err)
+                       }
+                       buf = append(rnd, buf[randSize:]...)
+               }
+               nextBuf <- buf
+       }
+}
+
+func doWrites(kc *keepclient.KeepClient, nextBuf <-chan []byte, nextLocator chan<- string) {
+       for buf := range nextBuf {
+               locator, _, err := kc.PutB(buf)
+               if err != nil {
+                       log.Print(err)
+                       errorsChan <- struct{}{}
+                       continue
+               }
+               bytesOutChan <- uint64(len(buf))
+               for cap(nextLocator) > len(nextLocator)+*WriteThreads {
+                       // Give the readers something to do, unless
+                       // they have lots queued up already.
+                       nextLocator <- locator
+               }
+       }
+}
+
+func doReads(kc *keepclient.KeepClient, nextLocator <-chan string) {
+       for locator := range nextLocator {
+               rdr, size, url, err := kc.Get(locator)
+               if err != nil {
+                       log.Print(err)
+                       errorsChan <- struct{}{}
+                       continue
+               }
+               n, err := io.Copy(ioutil.Discard, rdr)
+               rdr.Close()
+               if n != size || err != nil {
+                       log.Printf("Got %d bytes (expected %d) from %s: %v", n, size, url, err)
+                       errorsChan <- struct{}{}
+                       continue
+                       // Note we don't count the bytes received in
+                       // partial/corrupt responses: we are measuring
+                       // throughput, not resource consumption.
+               }
+               bytesInChan <- uint64(n)
+       }
+}
+
+func overrideServices(kc *keepclient.KeepClient) {
+       roots := make(map[string]string)
+       if *ServiceURL != "" {
+               roots["zzzzz-bi6l4-000000000000000"] = *ServiceURL
+       } else if *ServiceUUID != "" {
+               for uuid, url := range kc.GatewayRoots() {
+                       if uuid == *ServiceUUID {
+                               roots[uuid] = url
+                               break
+                       }
+               }
+               if len(roots) == 0 {
+                       log.Fatalf("Service %q was not in list advertised by API %+q", *ServiceUUID, kc.GatewayRoots())
+               }
+       } else {
+               return
+       }
+       kc.SetServiceRoots(roots, roots, roots)
+}
diff --git a/tools/keep-rsync/.gitignore b/tools/keep-rsync/.gitignore
new file mode 100644 (file)
index 0000000..5ee7f3b
--- /dev/null
@@ -0,0 +1 @@
+keep-rsync
diff --git a/tools/keep-rsync/keep-rsync.go b/tools/keep-rsync/keep-rsync.go
new file mode 100644 (file)
index 0000000..303f71f
--- /dev/null
@@ -0,0 +1,321 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bufio"
+       "crypto/tls"
+       "errors"
+       "flag"
+       "fmt"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "os"
+       "strings"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
+
+var version = "dev"
+
+func main() {
+       err := doMain()
+       if err != nil {
+               log.Fatalf("%v", err)
+       }
+}
+
+func doMain() error {
+       flags := flag.NewFlagSet("keep-rsync", flag.ExitOnError)
+
+       srcConfigFile := flags.String(
+               "src",
+               "",
+               "Source configuration filename. May be either a pathname to a config file, or (for example) 'foo' as shorthand for $HOME/.config/arvados/foo.conf file. This file is expected to specify the values for ARVADOS_API_TOKEN, ARVADOS_API_HOST, ARVADOS_API_HOST_INSECURE, and ARVADOS_BLOB_SIGNING_KEY for the source.")
+
+       dstConfigFile := flags.String(
+               "dst",
+               "",
+               "Destination configuration filename. May be either a pathname to a config file, or (for example) 'foo' as shorthand for $HOME/.config/arvados/foo.conf file. This file is expected to specify the values for ARVADOS_API_TOKEN, ARVADOS_API_HOST, and ARVADOS_API_HOST_INSECURE for the destination.")
+
+       srcKeepServicesJSON := flags.String(
+               "src-keep-services-json",
+               "",
+               "An optional list of available source keepservices. "+
+                       "If not provided, this list is obtained from api server configured in src-config-file.")
+
+       dstKeepServicesJSON := flags.String(
+               "dst-keep-services-json",
+               "",
+               "An optional list of available destination keepservices. "+
+                       "If not provided, this list is obtained from api server configured in dst-config-file.")
+
+       replications := flags.Int(
+               "replications",
+               0,
+               "Number of replications to write to the destination. If replications not specified, "+
+                       "default replication level configured on destination server will be used.")
+
+       prefix := flags.String(
+               "prefix",
+               "",
+               "Index prefix")
+
+       srcBlobSignatureTTLFlag := flags.Duration(
+               "src-blob-signature-ttl",
+               0,
+               "Lifetime of blob permission signatures on source keepservers. If not provided, this will be retrieved from the API server's discovery document.")
+
+       getVersion := flags.Bool(
+               "version",
+               false,
+               "Print version information and exit.")
+
+       // Parse args; omit the first arg which is the command name
+       flags.Parse(os.Args[1:])
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("keep-rsync %s\n", version)
+               os.Exit(0)
+       }
+
+       srcConfig, srcBlobSigningKey, err := loadConfig(*srcConfigFile)
+       if err != nil {
+               return fmt.Errorf("Error loading src configuration from file: %s", err.Error())
+       }
+
+       dstConfig, _, err := loadConfig(*dstConfigFile)
+       if err != nil {
+               return fmt.Errorf("Error loading dst configuration from file: %s", err.Error())
+       }
+
+       // setup src and dst keepclients
+       kcSrc, srcBlobSignatureTTL, err := setupKeepClient(srcConfig, *srcKeepServicesJSON, false, 0, *srcBlobSignatureTTLFlag)
+       if err != nil {
+               return fmt.Errorf("Error configuring src keepclient: %s", err.Error())
+       }
+
+       kcDst, _, err := setupKeepClient(dstConfig, *dstKeepServicesJSON, true, *replications, 0)
+       if err != nil {
+               return fmt.Errorf("Error configuring dst keepclient: %s", err.Error())
+       }
+
+       // Copy blocks not found in dst from src
+       err = performKeepRsync(kcSrc, kcDst, srcBlobSignatureTTL, srcBlobSigningKey, *prefix)
+       if err != nil {
+               return fmt.Errorf("Error while syncing data: %s", err.Error())
+       }
+
+       return nil
+}
+
+type apiConfig struct {
+       APIToken        string
+       APIHost         string
+       APIHostInsecure bool
+       ExternalClient  bool
+}
+
+// Load src and dst config from given files
+func loadConfig(configFile string) (config apiConfig, blobSigningKey string, err error) {
+       if configFile == "" {
+               return config, blobSigningKey, errors.New("config file not specified")
+       }
+
+       config, blobSigningKey, err = readConfigFromFile(configFile)
+       if err != nil {
+               return config, blobSigningKey, fmt.Errorf("Error reading config file: %v", err)
+       }
+
+       return
+}
+
+// Read config from file
+func readConfigFromFile(filename string) (config apiConfig, blobSigningKey string, err error) {
+       if !strings.Contains(filename, "/") {
+               filename = os.Getenv("HOME") + "/.config/arvados/" + filename + ".conf"
+       }
+
+       content, err := ioutil.ReadFile(filename)
+
+       if err != nil {
+               return config, "", err
+       }
+
+       lines := strings.Split(string(content), "\n")
+       for _, line := range lines {
+               if line == "" {
+                       continue
+               }
+
+               kv := strings.SplitN(line, "=", 2)
+               key := strings.TrimSpace(kv[0])
+               value := strings.TrimSpace(kv[1])
+
+               switch key {
+               case "ARVADOS_API_TOKEN":
+                       config.APIToken = value
+               case "ARVADOS_API_HOST":
+                       config.APIHost = value
+               case "ARVADOS_API_HOST_INSECURE":
+                       config.APIHostInsecure = arvadosclient.StringBool(value)
+               case "ARVADOS_EXTERNAL_CLIENT":
+                       config.ExternalClient = arvadosclient.StringBool(value)
+               case "ARVADOS_BLOB_SIGNING_KEY":
+                       blobSigningKey = value
+               }
+       }
+       return
+}
+
+// setup keepclient using the config provided
+func setupKeepClient(config apiConfig, keepServicesJSON string, isDst bool, replications int, srcBlobSignatureTTL time.Duration) (kc *keepclient.KeepClient, blobSignatureTTL time.Duration, err error) {
+       arv := arvadosclient.ArvadosClient{
+               ApiToken:    config.APIToken,
+               ApiServer:   config.APIHost,
+               ApiInsecure: config.APIHostInsecure,
+               Client: &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: &tls.Config{InsecureSkipVerify: config.APIHostInsecure}}},
+               External: config.ExternalClient,
+       }
+
+       // If keepServicesJSON is provided, use it instead of service discovery
+       if keepServicesJSON == "" {
+               kc, err = keepclient.MakeKeepClient(&arv)
+               if err != nil {
+                       return nil, 0, err
+               }
+       } else {
+               kc = keepclient.New(&arv)
+               err = kc.LoadKeepServicesFromJSON(keepServicesJSON)
+               if err != nil {
+                       return kc, 0, err
+               }
+       }
+
+       if isDst {
+               // Get default replications value from destination, if it is not already provided
+               if replications == 0 {
+                       value, err := arv.Discovery("defaultCollectionReplication")
+                       if err == nil {
+                               replications = int(value.(float64))
+                       } else {
+                               return nil, 0, err
+                       }
+               }
+
+               kc.Want_replicas = replications
+       }
+
+       // If srcBlobSignatureTTL is not provided, get it from API server discovery doc
+       blobSignatureTTL = srcBlobSignatureTTL
+       if !isDst && srcBlobSignatureTTL == 0 {
+               value, err := arv.Discovery("blobSignatureTtl")
+               if err == nil {
+                       blobSignatureTTL = time.Duration(int(value.(float64))) * time.Second
+               } else {
+                       return nil, 0, err
+               }
+       }
+
+       return kc, blobSignatureTTL, nil
+}
+
+// Get unique block locators from src and dst
+// Copy any blocks missing in dst
+func performKeepRsync(kcSrc, kcDst *keepclient.KeepClient, srcBlobSignatureTTL time.Duration, blobSigningKey, prefix string) error {
+       // Get unique locators from src
+       srcIndex, err := getUniqueLocators(kcSrc, prefix)
+       if err != nil {
+               return err
+       }
+
+       // Get unique locators from dst
+       dstIndex, err := getUniqueLocators(kcDst, prefix)
+       if err != nil {
+               return err
+       }
+
+       // Get list of locators found in src, but missing in dst
+       toBeCopied := getMissingLocators(srcIndex, dstIndex)
+
+       // Copy each missing block to dst
+       log.Printf("Before keep-rsync, there are %d blocks in src and %d blocks in dst. Start copying %d blocks from src not found in dst.",
+               len(srcIndex), len(dstIndex), len(toBeCopied))
+
+       err = copyBlocksToDst(toBeCopied, kcSrc, kcDst, srcBlobSignatureTTL, blobSigningKey)
+
+       return err
+}
+
+// Get list of unique locators from the specified cluster
+func getUniqueLocators(kc *keepclient.KeepClient, prefix string) (map[string]bool, error) {
+       uniqueLocators := map[string]bool{}
+
+       // Get index and dedup
+       for uuid := range kc.LocalRoots() {
+               reader, err := kc.GetIndex(uuid, prefix)
+               if err != nil {
+                       return uniqueLocators, err
+               }
+               scanner := bufio.NewScanner(reader)
+               for scanner.Scan() {
+                       uniqueLocators[strings.Split(scanner.Text(), " ")[0]] = true
+               }
+       }
+
+       return uniqueLocators, nil
+}
+
+// Get list of locators that are in src but not in dst
+func getMissingLocators(srcLocators, dstLocators map[string]bool) []string {
+       var missingLocators []string
+       for locator := range srcLocators {
+               if _, ok := dstLocators[locator]; !ok {
+                       missingLocators = append(missingLocators, locator)
+               }
+       }
+       return missingLocators
+}
+
+// Copy blocks from src to dst; only those that are missing in dst are copied
+func copyBlocksToDst(toBeCopied []string, kcSrc, kcDst *keepclient.KeepClient, srcBlobSignatureTTL time.Duration, blobSigningKey string) error {
+       total := len(toBeCopied)
+
+       startedAt := time.Now()
+       for done, locator := range toBeCopied {
+               if done == 0 {
+                       log.Printf("Copying data block %d of %d (%.2f%% done): %v", done+1, total,
+                               float64(done)/float64(total)*100, locator)
+               } else {
+                       timePerBlock := time.Since(startedAt) / time.Duration(done)
+                       log.Printf("Copying data block %d of %d (%.2f%% done, %v est. time remaining): %v", done+1, total,
+                               float64(done)/float64(total)*100, timePerBlock*time.Duration(total-done), locator)
+               }
+
+               getLocator := locator
+               expiresAt := time.Now().AddDate(0, 0, 1)
+               if blobSigningKey != "" {
+                       getLocator = keepclient.SignLocator(getLocator, kcSrc.Arvados.ApiToken, expiresAt, srcBlobSignatureTTL, []byte(blobSigningKey))
+               }
+
+               reader, len, _, err := kcSrc.Get(getLocator)
+               if err != nil {
+                       return fmt.Errorf("Error getting block: %v %v", locator, err)
+               }
+
+               _, _, err = kcDst.PutHR(getLocator[:32], reader, len)
+               if err != nil {
+                       return fmt.Errorf("Error copying data block: %v %v", locator, err)
+               }
+       }
+
+       log.Printf("Successfully copied to destination %d blocks.", total)
+       return nil
+}
diff --git a/tools/keep-rsync/keep-rsync_test.go b/tools/keep-rsync/keep-rsync_test.go
new file mode 100644 (file)
index 0000000..9c37e38
--- /dev/null
@@ -0,0 +1,475 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "crypto/md5"
+       "fmt"
+       "io/ioutil"
+       "os"
+       "strings"
+       "testing"
+       "time"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+
+       . "gopkg.in/check.v1"
+)
+
+var kcSrc, kcDst *keepclient.KeepClient
+var srcKeepServicesJSON, dstKeepServicesJSON, blobSigningKey string
+var blobSignatureTTL = time.Duration(2*7*24) * time.Hour
+
+func resetGlobals() {
+       blobSigningKey = ""
+       srcKeepServicesJSON = ""
+       dstKeepServicesJSON = ""
+       kcSrc = nil
+       kcDst = nil
+}
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&ServerNotRequiredSuite{})
+var _ = Suite(&DoMainTestSuite{})
+
+type ServerRequiredSuite struct{}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+       arvadostest.ResetEnv()
+}
+
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       resetGlobals()
+}
+
+func (s *ServerRequiredSuite) TearDownTest(c *C) {
+       arvadostest.StopKeep(3)
+}
+
+func (s *ServerNotRequiredSuite) SetUpTest(c *C) {
+       resetGlobals()
+}
+
+type ServerNotRequiredSuite struct{}
+
+type DoMainTestSuite struct {
+       initialArgs []string
+}
+
+func (s *DoMainTestSuite) SetUpTest(c *C) {
+       s.initialArgs = os.Args
+       os.Args = []string{"keep-rsync"}
+       resetGlobals()
+}
+
+func (s *DoMainTestSuite) TearDownTest(c *C) {
+       os.Args = s.initialArgs
+}
+
+var testKeepServicesJSON = "{ \"kind\":\"arvados#keepServiceList\", \"etag\":\"\", \"self_link\":\"\", \"offset\":null, \"limit\":null, \"items\":[ { \"href\":\"/keep_services/zzzzz-bi6l4-123456789012340\", \"kind\":\"arvados#keepService\", \"etag\":\"641234567890enhj7hzx432e5\", \"uuid\":\"zzzzz-bi6l4-123456789012340\", \"owner_uuid\":\"zzzzz-tpzed-123456789012345\", \"service_host\":\"keep0.zzzzz.arvadosapi.com\", \"service_port\":25107, \"service_ssl_flag\":false, \"service_type\":\"disk\", \"read_only\":false }, { \"href\":\"/keep_services/zzzzz-bi6l4-123456789012341\", \"kind\":\"arvados#keepService\", \"etag\":\"641234567890enhj7hzx432e5\", \"uuid\":\"zzzzz-bi6l4-123456789012341\", \"owner_uuid\":\"zzzzz-tpzed-123456789012345\", \"service_host\":\"keep0.zzzzz.arvadosapi.com\", \"service_port\":25108, \"service_ssl_flag\":false, \"service_type\":\"disk\", \"read_only\":false } ], \"items_available\":2 }"
+
+// Testing keep-rsync needs two sets of keep services: src and dst.
+// The test setup hence creates 3 servers instead of the default 2,
+// and uses the first 2 as src and the 3rd as dst keep servers.
+func setupRsync(c *C, enforcePermissions bool, replications int) {
+       // srcConfig
+       var srcConfig apiConfig
+       srcConfig.APIHost = os.Getenv("ARVADOS_API_HOST")
+       srcConfig.APIToken = arvadostest.DataManagerToken
+       srcConfig.APIHostInsecure = arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE"))
+
+       // dstConfig
+       var dstConfig apiConfig
+       dstConfig.APIHost = os.Getenv("ARVADOS_API_HOST")
+       dstConfig.APIToken = arvadostest.DataManagerToken
+       dstConfig.APIHostInsecure = arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE"))
+
+       if enforcePermissions {
+               blobSigningKey = arvadostest.BlobSigningKey
+       }
+
+       // Start Keep servers
+       arvadostest.StartKeep(3, enforcePermissions)
+       keepclient.RefreshServiceDiscovery()
+
+       // setup keepclients
+       var err error
+       kcSrc, _, err = setupKeepClient(srcConfig, srcKeepServicesJSON, false, 0, blobSignatureTTL)
+       c.Assert(err, IsNil)
+
+       kcDst, _, err = setupKeepClient(dstConfig, dstKeepServicesJSON, true, replications, 0)
+       c.Assert(err, IsNil)
+
+       srcRoots := map[string]string{}
+       dstRoots := map[string]string{}
+       for uuid, root := range kcSrc.LocalRoots() {
+               if strings.HasSuffix(uuid, "02") {
+                       dstRoots[uuid] = root
+               } else {
+                       srcRoots[uuid] = root
+               }
+       }
+       if srcKeepServicesJSON == "" {
+               kcSrc.SetServiceRoots(srcRoots, srcRoots, srcRoots)
+       }
+       if dstKeepServicesJSON == "" {
+               kcDst.SetServiceRoots(dstRoots, dstRoots, dstRoots)
+       }
+
+       if replications == 0 {
+               // Must have got default replications value of 2 from dst discovery document
+               c.Assert(kcDst.Want_replicas, Equals, 2)
+       } else {
+               // Since replications value is provided, it is used
+               c.Assert(kcDst.Want_replicas, Equals, replications)
+       }
+}
+
+func (s *ServerRequiredSuite) TestRsyncPutInOne_GetFromOtherShouldFail(c *C) {
+       setupRsync(c, false, 1)
+
+       // Put a block in src and verify that it is not found in dst
+       testNoCrosstalk(c, "test-data-1", kcSrc, kcDst)
+
+       // Put a block in dst and verify that it is not found in src
+       testNoCrosstalk(c, "test-data-2", kcDst, kcSrc)
+}
+
+func (s *ServerRequiredSuite) TestRsyncWithBlobSigning_PutInOne_GetFromOtherShouldFail(c *C) {
+       setupRsync(c, true, 1)
+
+       // Put a block in src and verify that it is not found in dst
+       testNoCrosstalk(c, "test-data-1", kcSrc, kcDst)
+
+       // Put a block in dst and verify that it is not found in src
+       testNoCrosstalk(c, "test-data-2", kcDst, kcSrc)
+}
+
+// Do a Put in the first and Get from the second,
+// which should raise block not found error.
+func testNoCrosstalk(c *C, testData string, kc1, kc2 *keepclient.KeepClient) {
+       // Put a block using kc1
+       locator, _, err := kc1.PutB([]byte(testData))
+       c.Assert(err, Equals, nil)
+
+       locator = strings.Split(locator, "+")[0]
+       _, _, _, err = kc2.Get(keepclient.SignLocator(locator, kc2.Arvados.ApiToken, time.Now().AddDate(0, 0, 1), blobSignatureTTL, []byte(blobSigningKey)))
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Equals, "Block not found")
+}
+
+// Test keep-rsync initialization, with srcKeepServicesJSON
+func (s *ServerRequiredSuite) TestRsyncInitializeWithKeepServicesJSON(c *C) {
+       srcKeepServicesJSON = testKeepServicesJSON
+
+       setupRsync(c, false, 1)
+
+       localRoots := kcSrc.LocalRoots()
+       c.Check(localRoots, NotNil)
+       c.Check(localRoots["zzzzz-bi6l4-123456789012340"], Not(Equals), "")
+       c.Check(localRoots["zzzzz-bi6l4-123456789012341"], Not(Equals), "")
+}
+
+// Test keep-rsync initialization with default replications count
+func (s *ServerRequiredSuite) TestInitializeRsyncDefaultReplicationsCount(c *C) {
+       setupRsync(c, false, 0)
+}
+
+// Test keep-rsync initialization with replications count argument
+func (s *ServerRequiredSuite) TestInitializeRsyncReplicationsCount(c *C) {
+       setupRsync(c, false, 3)
+}
+
+// Put some blocks in Src and some more in Dst
+// And copy missing blocks from Src to Dst
+func (s *ServerRequiredSuite) TestKeepRsync(c *C) {
+       testKeepRsync(c, false, "")
+}
+
+// Put some blocks in Src and some more in Dst with blob signing enabled.
+// And copy missing blocks from Src to Dst
+func (s *ServerRequiredSuite) TestKeepRsync_WithBlobSigning(c *C) {
+       testKeepRsync(c, true, "")
+}
+
+// Put some blocks in Src and some more in Dst
+// Use prefix while doing rsync
+// And copy missing blocks from Src to Dst
+func (s *ServerRequiredSuite) TestKeepRsync_WithPrefix(c *C) {
+       data := []byte("test-data-4")
+       hash := fmt.Sprintf("%x", md5.Sum(data))
+
+       testKeepRsync(c, false, hash[0:3])
+       c.Check(len(dstIndex) > len(dstLocators), Equals, true)
+}
+
+// Put some blocks in Src and some more in Dst
+// Use prefix not in src while doing rsync
+// And copy missing blocks from Src to Dst
+func (s *ServerRequiredSuite) TestKeepRsync_WithNoSuchPrefixInSrc(c *C) {
+       testKeepRsync(c, false, "999")
+       c.Check(len(dstIndex), Equals, len(dstLocators))
+}
+
+// Put 5 blocks in src. Put 2 of those blocks in dst
+// Hence there are 3 additional blocks in src
+// Also, put 2 extra blocks in dst; they are hence only in dst
+// Run rsync and verify that those 7 blocks are now available in dst
+func testKeepRsync(c *C, enforcePermissions bool, prefix string) {
+       setupRsync(c, enforcePermissions, 1)
+
+       // setupTestData
+       setupTestData(c, prefix)
+
+       err := performKeepRsync(kcSrc, kcDst, blobSignatureTTL, blobSigningKey, prefix)
+       c.Check(err, IsNil)
+
+       // Now GetIndex from dst and verify that all 5 from src and the 2 extra blocks are found
+       dstIndex, err = getUniqueLocators(kcDst, "")
+       c.Check(err, IsNil)
+
+       for _, locator := range srcLocatorsMatchingPrefix {
+               _, ok := dstIndex[locator]
+               c.Assert(ok, Equals, true)
+       }
+
+       for _, locator := range extraDstLocators {
+               _, ok := dstIndex[locator]
+               c.Assert(ok, Equals, true)
+       }
+
+       if prefix == "" {
+               // all blocks from src and the two extra blocks
+               c.Assert(len(dstIndex), Equals, len(srcLocators)+len(extraDstLocators))
+       } else {
+               // 1 matching prefix and copied over, 2 that were initially copied into dst along with src, and the 2 extra blocks
+               c.Assert(len(dstIndex), Equals, len(srcLocatorsMatchingPrefix)+len(extraDstLocators)+2)
+       }
+}
+
+// Setup test data in src and dst.
+var srcLocators, srcLocatorsMatchingPrefix, dstLocators, extraDstLocators []string
+var dstIndex map[string]bool
+
+func setupTestData(c *C, indexPrefix string) {
+       srcLocators = []string{}
+       srcLocatorsMatchingPrefix = []string{}
+       dstLocators = []string{}
+       extraDstLocators = []string{}
+       dstIndex = make(map[string]bool)
+
+       // Put a few blocks in src using kcSrc
+       for i := 0; i < 5; i++ {
+               hash, _, err := kcSrc.PutB([]byte(fmt.Sprintf("test-data-%d", i)))
+               c.Check(err, IsNil)
+
+               srcLocators = append(srcLocators, strings.Split(hash, "+A")[0])
+               if strings.HasPrefix(hash, indexPrefix) {
+                       srcLocatorsMatchingPrefix = append(srcLocatorsMatchingPrefix, strings.Split(hash, "+A")[0])
+               }
+       }
+
+       // Put first two of those src blocks in dst using kcDst
+       for i := 0; i < 2; i++ {
+               hash, _, err := kcDst.PutB([]byte(fmt.Sprintf("test-data-%d", i)))
+               c.Check(err, IsNil)
+               dstLocators = append(dstLocators, strings.Split(hash, "+A")[0])
+       }
+
+       // Put two more blocks in dst; they are not in src at all
+       for i := 0; i < 2; i++ {
+               hash, _, err := kcDst.PutB([]byte(fmt.Sprintf("other-data-%d", i)))
+               c.Check(err, IsNil)
+               dstLocators = append(dstLocators, strings.Split(hash, "+A")[0])
+               extraDstLocators = append(extraDstLocators, strings.Split(hash, "+A")[0])
+       }
+}
+
+// Setup rsync using srcKeepServicesJSON with fake keepservers.
+// Expect error during performKeepRsync due to unreachable src keepservers.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_FakeSrcKeepservers(c *C) {
+       srcKeepServicesJSON = testKeepServicesJSON
+
+       setupRsync(c, false, 1)
+
+       err := performKeepRsync(kcSrc, kcDst, blobSignatureTTL, "", "")
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Matches, ".*no such host.*")
+}
+
+// Setup rsync using dstKeepServicesJSON with fake keepservers.
+// Expect error during performKeepRsync due to unreachable dst keepservers.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_FakeDstKeepservers(c *C) {
+       dstKeepServicesJSON = testKeepServicesJSON
+
+       setupRsync(c, false, 1)
+
+       err := performKeepRsync(kcSrc, kcDst, blobSignatureTTL, "", "")
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Matches, ".*no such host.*")
+}
+
+// Test rsync with signature error during Get from src.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_ErrorGettingBlockFromSrc(c *C) {
+       setupRsync(c, true, 1)
+
+       // put some blocks in src and dst
+       setupTestData(c, "")
+
+       // Change blob signing key to a fake key, so that Get from src fails
+       blobSigningKey = "thisisfakeblobsigningkey"
+
+       err := performKeepRsync(kcSrc, kcDst, blobSignatureTTL, blobSigningKey, "")
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Matches, ".*HTTP 403 \"Forbidden\".*")
+}
+
+// Test rsync with error during Put to src.
+func (s *ServerRequiredSuite) TestErrorDuringRsync_ErrorPuttingBlockInDst(c *C) {
+       setupRsync(c, false, 1)
+
+       // put some blocks in src and dst
+       setupTestData(c, "")
+
+       // Increase Want_replicas on dst to result in insufficient replicas error during Put
+       kcDst.Want_replicas = 2
+
+       err := performKeepRsync(kcSrc, kcDst, blobSignatureTTL, blobSigningKey, "")
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Matches, ".*Could not write sufficient replicas.*")
+}
+
+// Test loadConfig func
+func (s *ServerNotRequiredSuite) TestLoadConfig(c *C) {
+       // Setup a src config file
+       srcFile := setupConfigFile(c, "src-config")
+       defer os.Remove(srcFile.Name())
+       srcConfigFile := srcFile.Name()
+
+       // Setup a dst config file
+       dstFile := setupConfigFile(c, "dst-config")
+       defer os.Remove(dstFile.Name())
+       dstConfigFile := dstFile.Name()
+
+       // load configuration from those files
+       srcConfig, srcBlobSigningKey, err := loadConfig(srcConfigFile)
+       c.Check(err, IsNil)
+
+       c.Assert(srcConfig.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Assert(srcConfig.APIToken, Equals, arvadostest.DataManagerToken)
+       c.Assert(srcConfig.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
+       c.Assert(srcConfig.ExternalClient, Equals, false)
+
+       dstConfig, _, err := loadConfig(dstConfigFile)
+       c.Check(err, IsNil)
+
+       c.Assert(dstConfig.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Assert(dstConfig.APIToken, Equals, arvadostest.DataManagerToken)
+       c.Assert(dstConfig.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
+       c.Assert(dstConfig.ExternalClient, Equals, false)
+
+       c.Assert(srcBlobSigningKey, Equals, "abcdefg")
+}
+
+// Test loadConfig func without setting up the config files
+func (s *ServerNotRequiredSuite) TestLoadConfig_MissingSrcConfig(c *C) {
+       _, _, err := loadConfig("")
+       c.Assert(err.Error(), Equals, "config file not specified")
+}
+
+// Test loadConfig func - error reading config
+func (s *ServerNotRequiredSuite) TestLoadConfig_ErrorLoadingSrcConfig(c *C) {
+       _, _, err := loadConfig("no-such-config-file")
+       c.Assert(err, NotNil)
+       c.Check(err.Error(), Matches, ".*no such file or directory.*")
+}
+
+func (s *ServerNotRequiredSuite) TestSetupKeepClient_NoBlobSignatureTTL(c *C) {
+       var srcConfig apiConfig
+       srcConfig.APIHost = os.Getenv("ARVADOS_API_HOST")
+       srcConfig.APIToken = arvadostest.DataManagerToken
+       srcConfig.APIHostInsecure = arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE"))
+
+       _, ttl, err := setupKeepClient(srcConfig, srcKeepServicesJSON, false, 0, 0)
+       c.Check(err, IsNil)
+       c.Assert(ttl, Equals, blobSignatureTTL)
+}
+
+func setupConfigFile(c *C, name string) *os.File {
+       // Setup a config file
+       file, err := ioutil.TempFile(os.TempDir(), name)
+       c.Check(err, IsNil)
+
+       fileContent := "ARVADOS_API_HOST=" + os.Getenv("ARVADOS_API_HOST") + "\n"
+       fileContent += "ARVADOS_API_TOKEN=" + arvadostest.DataManagerToken + "\n"
+       fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
+       fileContent += "ARVADOS_EXTERNAL_CLIENT=false\n"
+       fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg"
+
+       _, err = file.Write([]byte(fileContent))
+       c.Check(err, IsNil)
+
+       return file
+}
+
+func (s *DoMainTestSuite) Test_doMain_NoSrcConfig(c *C) {
+       err := doMain()
+       c.Assert(err, NotNil)
+       c.Assert(err.Error(), Equals, "Error loading src configuration from file: config file not specified")
+}
+
+func (s *DoMainTestSuite) Test_doMain_SrcButNoDstConfig(c *C) {
+       srcConfig := setupConfigFile(c, "src")
+       args := []string{"-replications", "3", "-src", srcConfig.Name()}
+       os.Args = append(os.Args, args...)
+       err := doMain()
+       c.Assert(err, NotNil)
+       c.Assert(err.Error(), Equals, "Error loading dst configuration from file: config file not specified")
+}
+
+func (s *DoMainTestSuite) Test_doMain_BadSrcConfig(c *C) {
+       args := []string{"-src", "abcd"}
+       os.Args = append(os.Args, args...)
+       err := doMain()
+       c.Assert(err, NotNil)
+       c.Assert(err.Error(), Matches, "Error loading src configuration from file: Error reading config file.*")
+}
+
+func (s *DoMainTestSuite) Test_doMain_WithReplicationsButNoSrcConfig(c *C) {
+       args := []string{"-replications", "3"}
+       os.Args = append(os.Args, args...)
+       err := doMain()
+       c.Check(err, NotNil)
+       c.Assert(err.Error(), Equals, "Error loading src configuration from file: config file not specified")
+}
+
+func (s *DoMainTestSuite) Test_doMainWithSrcAndDstConfig(c *C) {
+       srcConfig := setupConfigFile(c, "src")
+       dstConfig := setupConfigFile(c, "dst")
+       args := []string{"-src", srcConfig.Name(), "-dst", dstConfig.Name()}
+       os.Args = append(os.Args, args...)
+
+       // Start keepservers. Since we are not doing any tweaking as
+       // in setupRsync func, kcSrc and kcDst will be the same and no
+       // actual copying to dst will happen, but that's ok.
+       arvadostest.StartKeep(2, false)
+       defer arvadostest.StopKeep(2)
+       keepclient.RefreshServiceDiscovery()
+
+       err := doMain()
+       c.Check(err, IsNil)
+}
diff --git a/tools/sync-groups/.gitignore b/tools/sync-groups/.gitignore
new file mode 100644 (file)
index 0000000..a06aa76
--- /dev/null
@@ -0,0 +1 @@
+sync-groups
diff --git a/tools/sync-groups/sync-groups.go b/tools/sync-groups/sync-groups.go
new file mode 100644 (file)
index 0000000..93e0dd5
--- /dev/null
@@ -0,0 +1,697 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "encoding/csv"
+       "encoding/json"
+       "flag"
+       "fmt"
+       "io"
+       "log"
+       "net/url"
+       "os"
+       "strings"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+)
+
+var version = "dev"
+
+type resourceList interface {
+       Len() int
+       GetItems() []interface{}
+}
+
+// GroupInfo tracks previous and current members of a particular Group
+type GroupInfo struct {
+       Group           arvados.Group
+       PreviousMembers map[string]bool
+       CurrentMembers  map[string]bool
+}
+
+// GetUserID returns the correct user id value depending on the selector
+func GetUserID(u arvados.User, idSelector string) (string, error) {
+       switch idSelector {
+       case "email":
+               return u.Email, nil
+       case "username":
+               return u.Username, nil
+       default:
+               return "", fmt.Errorf("cannot identify user by %q selector", idSelector)
+       }
+}
+
+// UserList implements resourceList interface
+type UserList struct {
+       arvados.UserList
+}
+
+// Len returns the amount of items this list holds
+func (l UserList) Len() int {
+       return len(l.Items)
+}
+
+// GetItems returns the list of items
+func (l UserList) GetItems() (out []interface{}) {
+       for _, item := range l.Items {
+               out = append(out, item)
+       }
+       return
+}
+
+// GroupList implements resourceList interface
+type GroupList struct {
+       arvados.GroupList
+}
+
+// Len returns the amount of items this list holds
+func (l GroupList) Len() int {
+       return len(l.Items)
+}
+
+// GetItems returns the list of items
+func (l GroupList) GetItems() (out []interface{}) {
+       for _, item := range l.Items {
+               out = append(out, item)
+       }
+       return
+}
+
+// LinkList implements resourceList interface
+type LinkList struct {
+       arvados.LinkList
+}
+
+// Len returns the amount of items this list holds
+func (l LinkList) Len() int {
+       return len(l.Items)
+}
+
+// GetItems returns the list of items
+func (l LinkList) GetItems() (out []interface{}) {
+       for _, item := range l.Items {
+               out = append(out, item)
+       }
+       return
+}
+
+func main() {
+       // Parse & validate arguments, set up arvados client.
+       cfg, err := GetConfig()
+       if err != nil {
+               log.Fatalf("%v", err)
+       }
+
+       if err := doMain(&cfg); err != nil {
+               log.Fatalf("%v", err)
+       }
+}
+
+// ConfigParams holds configuration data for this tool
+type ConfigParams struct {
+       Path            string
+       UserID          string
+       Verbose         bool
+       ParentGroupUUID string
+       ParentGroupName string
+       SysUserUUID     string
+       Client          *arvados.Client
+}
+
+// ParseFlags parses and validates command line arguments
+func ParseFlags(config *ConfigParams) error {
+       // Acceptable attributes to identify a user on the CSV file
+       userIDOpts := map[string]bool{
+               "email":    true, // default
+               "username": true,
+       }
+
+       flags := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+       // Set up usage message
+       flags.Usage = func() {
+               usageStr := `Synchronize remote groups into Arvados from a CSV format file with 2 columns:
+  * 1st column: Group name
+  * 2nd column: User identifier`
+               fmt.Fprintf(os.Stderr, "%s\n\n", usageStr)
+               fmt.Fprintf(os.Stderr, "Usage:\n%s [OPTIONS] <input-file.csv>\n\n", os.Args[0])
+               fmt.Fprintf(os.Stderr, "Options:\n")
+               flags.PrintDefaults()
+       }
+
+       // Set up option flags
+       userID := flags.String(
+               "user-id",
+               "email",
+               "Attribute by which every user is identified. Valid values are: email and username.")
+       verbose := flags.Bool(
+               "verbose",
+               false,
+               "Log informational messages. Off by default.")
+       getVersion := flags.Bool(
+               "version",
+               false,
+               "Print version information and exit.")
+       parentGroupUUID := flags.String(
+               "parent-group-uuid",
+               "",
+               "Use given group UUID as a parent for the remote groups. Should be owned by the system user. If not specified, a group named '"+config.ParentGroupName+"' will be used (and created if nonexistant).")
+
+       // Parse args; omit the first arg which is the command name
+       flags.Parse(os.Args[1:])
+
+       // Print version information if requested
+       if *getVersion {
+               fmt.Printf("%s %s\n", os.Args[0], version)
+               os.Exit(0)
+       }
+
+       // Input file as a required positional argument
+       if flags.NArg() == 0 {
+               return fmt.Errorf("please provide a path to an input file")
+       }
+       srcPath := &os.Args[flags.NFlag()+1]
+
+       // Validations
+       if *srcPath == "" {
+               return fmt.Errorf("input file path invalid")
+       }
+       if !userIDOpts[*userID] {
+               var options []string
+               for opt := range userIDOpts {
+                       options = append(options, opt)
+               }
+               return fmt.Errorf("user ID must be one of: %s", strings.Join(options, ", "))
+       }
+
+       config.Path = *srcPath
+       config.ParentGroupUUID = *parentGroupUUID
+       config.UserID = *userID
+       config.Verbose = *verbose
+
+       return nil
+}
+
+// SetParentGroup finds/create parent group of all remote groups
+func SetParentGroup(cfg *ConfigParams) error {
+       var parentGroup arvados.Group
+       if cfg.ParentGroupUUID == "" {
+               // UUID not provided, search for preexisting parent group
+               var gl GroupList
+               params := arvados.ResourceListParams{
+                       Filters: []arvados.Filter{{
+                               Attr:     "name",
+                               Operator: "=",
+                               Operand:  cfg.ParentGroupName,
+                       }, {
+                               Attr:     "owner_uuid",
+                               Operator: "=",
+                               Operand:  cfg.SysUserUUID,
+                       }},
+               }
+               if err := cfg.Client.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params); err != nil {
+                       return fmt.Errorf("error searching for parent group: %s", err)
+               }
+               if len(gl.Items) == 0 {
+                       // Default parent group does not exist, create it.
+                       if cfg.Verbose {
+                               log.Println("Default parent group not found, creating...")
+                       }
+                       groupData := map[string]string{
+                               "name":       cfg.ParentGroupName,
+                               "owner_uuid": cfg.SysUserUUID,
+                       }
+                       if err := CreateGroup(cfg, &parentGroup, groupData); err != nil {
+                               return fmt.Errorf("error creating system user owned group named %q: %s", groupData["name"], err)
+                       }
+               } else if len(gl.Items) == 1 {
+                       // Default parent group found.
+                       parentGroup = gl.Items[0]
+               } else {
+                       // This should never happen, as there's an unique index for
+                       // (owner_uuid, name) on groups.
+                       return fmt.Errorf("bug: found %d groups owned by system user and named %q", len(gl.Items), cfg.ParentGroupName)
+               }
+               cfg.ParentGroupUUID = parentGroup.UUID
+       } else {
+               // UUID provided. Check if exists and if it's owned by system user
+               if err := GetGroup(cfg, &parentGroup, cfg.ParentGroupUUID); err != nil {
+                       return fmt.Errorf("error searching for parent group with UUID %q: %s", cfg.ParentGroupUUID, err)
+               }
+               if parentGroup.OwnerUUID != cfg.SysUserUUID {
+                       return fmt.Errorf("parent group %q (%s) must be owned by system user", parentGroup.Name, cfg.ParentGroupUUID)
+               }
+       }
+       return nil
+}
+
+// GetConfig sets up a ConfigParams struct
+func GetConfig() (config ConfigParams, err error) {
+       config.ParentGroupName = "Externally synchronized groups"
+
+       // Command arguments
+       err = ParseFlags(&config)
+       if err != nil {
+               return config, err
+       }
+
+       // Arvados Client setup
+       config.Client = arvados.NewClientFromEnv()
+
+       // Check current user permissions & get System user's UUID
+       u, err := config.Client.CurrentUser()
+       if err != nil {
+               return config, fmt.Errorf("error getting the current user: %s", err)
+       }
+       if !u.IsActive || !u.IsAdmin {
+               return config, fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
+       }
+       config.SysUserUUID = u.UUID[:12] + "000000000000000"
+
+       // Set up remote groups' parent
+       if err = SetParentGroup(&config); err != nil {
+               return config, err
+       }
+
+       return config, nil
+}
+
+func doMain(cfg *ConfigParams) error {
+       // Try opening the input file early, just in case there's a problem.
+       f, err := os.Open(cfg.Path)
+       if err != nil {
+               return fmt.Errorf("%s", err)
+       }
+       defer f.Close()
+
+       log.Printf("%s %s started. Using %q as users id and parent group UUID %q", os.Args[0], version, cfg.UserID, cfg.ParentGroupUUID)
+
+       // Get the complete user list to minimize API Server requests
+       allUsers := make(map[string]arvados.User)
+       userIDToUUID := make(map[string]string) // Index by email or username
+       results, err := GetAll(cfg.Client, "users", arvados.ResourceListParams{}, &UserList{})
+       if err != nil {
+               return fmt.Errorf("error getting user list: %s", err)
+       }
+       log.Printf("Found %d users", len(results))
+       for _, item := range results {
+               u := item.(arvados.User)
+               allUsers[u.UUID] = u
+               uID, err := GetUserID(u, cfg.UserID)
+               if err != nil {
+                       return err
+               }
+               userIDToUUID[uID] = u.UUID
+               if cfg.Verbose {
+                       log.Printf("Seen user %q (%s)", u.Username, u.UUID)
+               }
+       }
+
+       // Get remote groups and their members
+       remoteGroups, groupNameToUUID, err := GetRemoteGroups(cfg, allUsers)
+       if err != nil {
+               return err
+       }
+       log.Printf("Found %d remote groups", len(remoteGroups))
+       if cfg.Verbose {
+               for groupUUID := range remoteGroups {
+                       log.Printf("- Group %q: %d users", remoteGroups[groupUUID].Group.Name, len(remoteGroups[groupUUID].PreviousMembers))
+               }
+       }
+
+       membershipsRemoved := 0
+
+       // Read the CSV file
+       groupsCreated, membershipsAdded, membershipsSkipped, err := ProcessFile(cfg, f, userIDToUUID, groupNameToUUID, remoteGroups, allUsers)
+       if err != nil {
+               return err
+       }
+
+       // Remove previous members not listed on this run
+       for groupUUID := range remoteGroups {
+               gi := remoteGroups[groupUUID]
+               evictedMembers := subtract(gi.PreviousMembers, gi.CurrentMembers)
+               groupName := gi.Group.Name
+               if len(evictedMembers) > 0 {
+                       log.Printf("Removing %d users from group %q", len(evictedMembers), groupName)
+               }
+               for evictedUser := range evictedMembers {
+                       if err := RemoveMemberFromGroup(cfg, allUsers[userIDToUUID[evictedUser]], gi.Group); err != nil {
+                               return err
+                       }
+                       membershipsRemoved++
+               }
+       }
+       log.Printf("Groups created: %d. Memberships added: %d, removed: %d, skipped: %d", groupsCreated, membershipsAdded, membershipsRemoved, membershipsSkipped)
+
+       return nil
+}
+
+// ProcessFile reads the CSV file and process every record
+func ProcessFile(
+       cfg *ConfigParams,
+       f *os.File,
+       userIDToUUID map[string]string,
+       groupNameToUUID map[string]string,
+       remoteGroups map[string]*GroupInfo,
+       allUsers map[string]arvados.User,
+) (groupsCreated, membersAdded, membersSkipped int, err error) {
+       lineNo := 0
+       csvReader := csv.NewReader(f)
+       csvReader.FieldsPerRecord = 2
+       for {
+               record, e := csvReader.Read()
+               if e == io.EOF {
+                       break
+               }
+               lineNo++
+               if e != nil {
+                       err = fmt.Errorf("error parsing %q, line %d", cfg.Path, lineNo)
+                       return
+               }
+               groupName := strings.TrimSpace(record[0])
+               groupMember := strings.TrimSpace(record[1]) // User ID (username or email)
+               if groupName == "" || groupMember == "" {
+                       log.Printf("Warning: CSV record has at least one empty field (%s, %s). Skipping", groupName, groupMember)
+                       membersSkipped++
+                       continue
+               }
+               if _, found := userIDToUUID[groupMember]; !found {
+                       // User not present on the system, skip.
+                       log.Printf("Warning: there's no user with %s %q on the system, skipping.", cfg.UserID, groupMember)
+                       membersSkipped++
+                       continue
+               }
+               if _, found := groupNameToUUID[groupName]; !found {
+                       // Group doesn't exist, create it before continuing
+                       if cfg.Verbose {
+                               log.Printf("Remote group %q not found, creating...", groupName)
+                       }
+                       var newGroup arvados.Group
+                       groupData := map[string]string{
+                               "name":        groupName,
+                               "owner_uuid":  cfg.ParentGroupUUID,
+                               "group_class": "role",
+                       }
+                       if e := CreateGroup(cfg, &newGroup, groupData); e != nil {
+                               err = fmt.Errorf("error creating group named %q: %s", groupName, err)
+                               return
+                       }
+                       // Update cached group data
+                       groupNameToUUID[groupName] = newGroup.UUID
+                       remoteGroups[newGroup.UUID] = &GroupInfo{
+                               Group:           newGroup,
+                               PreviousMembers: make(map[string]bool), // Empty set
+                               CurrentMembers:  make(map[string]bool), // Empty set
+                       }
+                       groupsCreated++
+               }
+               // Both group & user exist, check if user is a member
+               groupUUID := groupNameToUUID[groupName]
+               gi := remoteGroups[groupUUID]
+               if !gi.PreviousMembers[groupMember] && !gi.CurrentMembers[groupMember] {
+                       if cfg.Verbose {
+                               log.Printf("Adding %q to group %q", groupMember, groupName)
+                       }
+                       // User wasn't a member, but should be.
+                       if e := AddMemberToGroup(cfg, allUsers[userIDToUUID[groupMember]], gi.Group); e != nil {
+                               err = e
+                               return
+                       }
+                       membersAdded++
+               }
+               gi.CurrentMembers[groupMember] = true
+       }
+       return
+}
+
+// GetAll : Adds all objects of type 'resource' to the 'allItems' list
+func GetAll(c *arvados.Client, res string, params arvados.ResourceListParams, page resourceList) (allItems []interface{}, err error) {
+       // Use the maximum page size the server allows
+       limit := 1<<31 - 1
+       params.Limit = &limit
+       params.Offset = 0
+       params.Order = "uuid"
+       for {
+               if err = GetResourceList(c, &page, res, params); err != nil {
+                       return allItems, err
+               }
+               // Have we finished paging?
+               if page.Len() == 0 {
+                       break
+               }
+               for _, i := range page.GetItems() {
+                       allItems = append(allItems, i)
+               }
+               params.Offset += page.Len()
+       }
+       return allItems, nil
+}
+
+func subtract(setA map[string]bool, setB map[string]bool) map[string]bool {
+       result := make(map[string]bool)
+       for element := range setA {
+               if !setB[element] {
+                       result[element] = true
+               }
+       }
+       return result
+}
+
+func jsonReader(rscName string, ob interface{}) io.Reader {
+       j, err := json.Marshal(ob)
+       if err != nil {
+               panic(err)
+       }
+       v := url.Values{}
+       v[rscName] = []string{string(j)}
+       return bytes.NewBufferString(v.Encode())
+}
+
+// GetRemoteGroups fetches all remote groups with their members
+func GetRemoteGroups(cfg *ConfigParams, allUsers map[string]arvados.User) (remoteGroups map[string]*GroupInfo, groupNameToUUID map[string]string, err error) {
+       remoteGroups = make(map[string]*GroupInfo)
+       groupNameToUUID = make(map[string]string) // Index by group name
+
+       params := arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "owner_uuid",
+                       Operator: "=",
+                       Operand:  cfg.ParentGroupUUID,
+               }},
+       }
+       results, err := GetAll(cfg.Client, "groups", params, &GroupList{})
+       if err != nil {
+               return remoteGroups, groupNameToUUID, fmt.Errorf("error getting remote groups: %s", err)
+       }
+       for _, item := range results {
+               group := item.(arvados.Group)
+               // Group -> User filter
+               g2uFilter := arvados.ResourceListParams{
+                       Filters: []arvados.Filter{{
+                               Attr:     "owner_uuid",
+                               Operator: "=",
+                               Operand:  cfg.SysUserUUID,
+                       }, {
+                               Attr:     "link_class",
+                               Operator: "=",
+                               Operand:  "permission",
+                       }, {
+                               Attr:     "name",
+                               Operator: "=",
+                               Operand:  "can_read",
+                       }, {
+                               Attr:     "tail_uuid",
+                               Operator: "=",
+                               Operand:  group.UUID,
+                       }, {
+                               Attr:     "head_uuid",
+                               Operator: "is_a",
+                               Operand:  "arvados#user",
+                       }},
+               }
+               // User -> Group filter
+               u2gFilter := arvados.ResourceListParams{
+                       Filters: []arvados.Filter{{
+                               Attr:     "owner_uuid",
+                               Operator: "=",
+                               Operand:  cfg.SysUserUUID,
+                       }, {
+                               Attr:     "link_class",
+                               Operator: "=",
+                               Operand:  "permission",
+                       }, {
+                               Attr:     "name",
+                               Operator: "=",
+                               Operand:  "can_write",
+                       }, {
+                               Attr:     "head_uuid",
+                               Operator: "=",
+                               Operand:  group.UUID,
+                       }, {
+                               Attr:     "tail_uuid",
+                               Operator: "is_a",
+                               Operand:  "arvados#user",
+                       }},
+               }
+               g2uLinks, err := GetAll(cfg.Client, "links", g2uFilter, &LinkList{})
+               if err != nil {
+                       return remoteGroups, groupNameToUUID, fmt.Errorf("error getting member (can_read) links for group %q: %s", group.Name, err)
+               }
+               u2gLinks, err := GetAll(cfg.Client, "links", u2gFilter, &LinkList{})
+               if err != nil {
+                       return remoteGroups, groupNameToUUID, fmt.Errorf("error getting member (can_write) links for group %q: %s", group.Name, err)
+               }
+               // Build a list of user ids (email or username) belonging to this group
+               membersSet := make(map[string]bool)
+               u2gLinkSet := make(map[string]bool)
+               for _, l := range u2gLinks {
+                       linkedMemberUUID := l.(arvados.Link).TailUUID
+                       u2gLinkSet[linkedMemberUUID] = true
+               }
+               for _, item := range g2uLinks {
+                       link := item.(arvados.Link)
+                       // We may have received an old link pointing to a removed account.
+                       if _, found := allUsers[link.HeadUUID]; !found {
+                               continue
+                       }
+                       // The matching User -> Group link may not exist if the link
+                       // creation failed on a previous run. If that's the case, don't
+                       // include this account on the "previous members" list.
+                       if _, found := u2gLinkSet[link.HeadUUID]; !found {
+                               continue
+                       }
+                       memberID, err := GetUserID(allUsers[link.HeadUUID], cfg.UserID)
+                       if err != nil {
+                               return remoteGroups, groupNameToUUID, err
+                       }
+                       membersSet[memberID] = true
+               }
+               remoteGroups[group.UUID] = &GroupInfo{
+                       Group:           group,
+                       PreviousMembers: membersSet,
+                       CurrentMembers:  make(map[string]bool), // Empty set
+               }
+               groupNameToUUID[group.Name] = group.UUID
+       }
+       return remoteGroups, groupNameToUUID, nil
+}
+
+// RemoveMemberFromGroup remove all links related to the membership
+func RemoveMemberFromGroup(cfg *ConfigParams, user arvados.User, group arvados.Group) error {
+       if cfg.Verbose {
+               log.Printf("Getting group membership links for user %q (%s) on group %q (%s)", user.Username, user.UUID, group.Name, group.UUID)
+       }
+       var links []interface{}
+       // Search for all group<->user links (both ways)
+       for _, filterset := range [][]arvados.Filter{
+               // Group -> User
+               {{
+                       Attr:     "link_class",
+                       Operator: "=",
+                       Operand:  "permission",
+               }, {
+                       Attr:     "tail_uuid",
+                       Operator: "=",
+                       Operand:  group.UUID,
+               }, {
+                       Attr:     "head_uuid",
+                       Operator: "=",
+                       Operand:  user.UUID,
+               }},
+               // Group <- User
+               {{
+                       Attr:     "link_class",
+                       Operator: "=",
+                       Operand:  "permission",
+               }, {
+                       Attr:     "tail_uuid",
+                       Operator: "=",
+                       Operand:  user.UUID,
+               }, {
+                       Attr:     "head_uuid",
+                       Operator: "=",
+                       Operand:  group.UUID,
+               }},
+       } {
+               l, err := GetAll(cfg.Client, "links", arvados.ResourceListParams{Filters: filterset}, &LinkList{})
+               if err != nil {
+                       userID, _ := GetUserID(user, cfg.UserID)
+                       return fmt.Errorf("error getting links needed to remove user %q from group %q: %s", userID, group.Name, err)
+               }
+               for _, link := range l {
+                       links = append(links, link)
+               }
+       }
+       for _, item := range links {
+               link := item.(arvados.Link)
+               userID, _ := GetUserID(user, cfg.UserID)
+               if cfg.Verbose {
+                       log.Printf("Removing %q permission link for %q on group %q", link.Name, userID, group.Name)
+               }
+               if err := DeleteLink(cfg, link.UUID); err != nil {
+                       return fmt.Errorf("error removing user %q from group %q: %s", userID, group.Name, err)
+               }
+       }
+       return nil
+}
+
+// AddMemberToGroup create membership links
+func AddMemberToGroup(cfg *ConfigParams, user arvados.User, group arvados.Group) error {
+       var newLink arvados.Link
+       linkData := map[string]string{
+               "owner_uuid": cfg.SysUserUUID,
+               "link_class": "permission",
+               "name":       "can_read",
+               "tail_uuid":  group.UUID,
+               "head_uuid":  user.UUID,
+       }
+       if err := CreateLink(cfg, &newLink, linkData); err != nil {
+               userID, _ := GetUserID(user, cfg.UserID)
+               return fmt.Errorf("error adding group %q -> user %q read permission: %s", group.Name, userID, err)
+       }
+       linkData = map[string]string{
+               "owner_uuid": cfg.SysUserUUID,
+               "link_class": "permission",
+               "name":       "can_write",
+               "tail_uuid":  user.UUID,
+               "head_uuid":  group.UUID,
+       }
+       if err := CreateLink(cfg, &newLink, linkData); err != nil {
+               userID, _ := GetUserID(user, cfg.UserID)
+               return fmt.Errorf("error adding user %q -> group %q write permission: %s", userID, group.Name, err)
+       }
+       return nil
+}
+
+// CreateGroup creates a group with groupData parameters, assigns it to dst
+func CreateGroup(cfg *ConfigParams, dst *arvados.Group, groupData map[string]string) error {
+       return cfg.Client.RequestAndDecode(dst, "POST", "/arvados/v1/groups", jsonReader("group", groupData), nil)
+}
+
+// GetGroup fetches a group by its UUID
+func GetGroup(cfg *ConfigParams, dst *arvados.Group, groupUUID string) error {
+       return cfg.Client.RequestAndDecode(&dst, "GET", "/arvados/v1/groups/"+groupUUID, nil, nil)
+}
+
+// CreateLink creates a link with linkData parameters, assigns it to dst
+func CreateLink(cfg *ConfigParams, dst *arvados.Link, linkData map[string]string) error {
+       return cfg.Client.RequestAndDecode(dst, "POST", "/arvados/v1/links", jsonReader("link", linkData), nil)
+}
+
+// DeleteLink deletes a link by its UUID
+func DeleteLink(cfg *ConfigParams, linkUUID string) error {
+       if linkUUID == "" {
+               return fmt.Errorf("cannot delete link with invalid UUID: %q", linkUUID)
+       }
+       return cfg.Client.RequestAndDecode(&arvados.Link{}, "DELETE", "/arvados/v1/links/"+linkUUID, nil, nil)
+}
+
+// GetResourceList fetches res list using params
+func GetResourceList(c *arvados.Client, dst *resourceList, res string, params interface{}) error {
+       return c.RequestAndDecode(dst, "GET", "/arvados/v1/"+res, nil, params)
+}
diff --git a/tools/sync-groups/sync-groups_test.go b/tools/sync-groups/sync-groups_test.go
new file mode 100644 (file)
index 0000000..4a3e470
--- /dev/null
@@ -0,0 +1,420 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "fmt"
+       "io/ioutil"
+       "os"
+       "strings"
+       "testing"
+
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+type TestSuite struct {
+       cfg   *ConfigParams
+       users map[string]arvados.User
+}
+
+func (s *TestSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+}
+
+func (s *TestSuite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+}
+
+func (s *TestSuite) SetUpTest(c *C) {
+       ac := arvados.NewClientFromEnv()
+       u, err := ac.CurrentUser()
+       c.Assert(err, IsNil)
+       // Check that the parent group doesn't exist
+       sysUserUUID := u.UUID[:12] + "000000000000000"
+       gl := arvados.GroupList{}
+       params := arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "owner_uuid",
+                       Operator: "=",
+                       Operand:  sysUserUUID,
+               }, {
+                       Attr:     "name",
+                       Operator: "=",
+                       Operand:  "Externally synchronized groups",
+               }},
+       }
+       ac.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
+       c.Assert(gl.ItemsAvailable, Equals, 0)
+       // Set up config
+       os.Args = []string{"cmd", "somefile.csv"}
+       config, err := GetConfig()
+       c.Assert(err, IsNil)
+       // Confirm that the parent group was created
+       gl = arvados.GroupList{}
+       ac.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
+       c.Assert(gl.ItemsAvailable, Equals, 1)
+       // Config set up complete, save config for further testing
+       s.cfg = &config
+
+       // Fetch current user list
+       ul := arvados.UserList{}
+       params = arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "uuid",
+                       Operator: "!=",
+                       Operand:  s.cfg.SysUserUUID,
+               }},
+       }
+       ac.RequestAndDecode(&ul, "GET", "/arvados/v1/users", nil, params)
+       c.Assert(ul.ItemsAvailable, Not(Equals), 0)
+       s.users = make(map[string]arvados.User)
+       for _, u := range ul.Items {
+               s.users[u.UUID] = u
+       }
+       c.Assert(len(s.users), Not(Equals), 0)
+}
+
+func (s *TestSuite) TearDownTest(c *C) {
+       var dst interface{}
+       // Reset database to fixture state after every test run.
+       err := s.cfg.Client.RequestAndDecode(&dst, "POST", "/database/reset", nil, nil)
+       c.Assert(err, IsNil)
+}
+
+var _ = Suite(&TestSuite{})
+
+// MakeTempCSVFile creates a temp file with data as comma separated values
+func MakeTempCSVFile(data [][]string) (f *os.File, err error) {
+       f, err = ioutil.TempFile("", "test_sync_remote_groups")
+       if err != nil {
+               return
+       }
+       for _, line := range data {
+               fmt.Fprintf(f, "%s\n", strings.Join(line, ","))
+       }
+       err = f.Close()
+       return
+}
+
+// GroupMembershipExists checks that both needed links exist between user and group
+func GroupMembershipExists(ac *arvados.Client, userUUID string, groupUUID string) bool {
+       ll := LinkList{}
+       // Check Group -> User can_read permission
+       params := arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "link_class",
+                       Operator: "=",
+                       Operand:  "permission",
+               }, {
+                       Attr:     "tail_uuid",
+                       Operator: "=",
+                       Operand:  groupUUID,
+               }, {
+                       Attr:     "name",
+                       Operator: "=",
+                       Operand:  "can_read",
+               }, {
+                       Attr:     "head_uuid",
+                       Operator: "=",
+                       Operand:  userUUID,
+               }},
+       }
+       ac.RequestAndDecode(&ll, "GET", "/arvados/v1/links", nil, params)
+       if ll.Len() != 1 {
+               return false
+       }
+       // Check User -> Group can_write permission
+       params = arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "link_class",
+                       Operator: "=",
+                       Operand:  "permission",
+               }, {
+                       Attr:     "head_uuid",
+                       Operator: "=",
+                       Operand:  groupUUID,
+               }, {
+                       Attr:     "name",
+                       Operator: "=",
+                       Operand:  "can_write",
+               }, {
+                       Attr:     "tail_uuid",
+                       Operator: "=",
+                       Operand:  userUUID,
+               }},
+       }
+       ac.RequestAndDecode(&ll, "GET", "/arvados/v1/links", nil, params)
+       if ll.Len() != 1 {
+               return false
+       }
+       return true
+}
+
+// If named group exists, return its UUID
+func RemoteGroupExists(cfg *ConfigParams, groupName string) (uuid string, err error) {
+       gl := arvados.GroupList{}
+       params := arvados.ResourceListParams{
+               Filters: []arvados.Filter{{
+                       Attr:     "name",
+                       Operator: "=",
+                       Operand:  groupName,
+               }, {
+                       Attr:     "owner_uuid",
+                       Operator: "=",
+                       Operand:  cfg.ParentGroupUUID,
+               }, {
+                       Attr:     "group_class",
+                       Operator: "=",
+                       Operand:  "role",
+               }},
+       }
+       err = cfg.Client.RequestAndDecode(&gl, "GET", "/arvados/v1/groups", nil, params)
+       if err != nil {
+               return "", err
+       }
+       if gl.ItemsAvailable == 0 {
+               // No group with this name
+               uuid = ""
+       } else if gl.ItemsAvailable == 1 {
+               // Group found
+               uuid = gl.Items[0].UUID
+       } else {
+               // This should never happen
+               uuid = ""
+               err = fmt.Errorf("more than 1 group found with the same name and parent")
+       }
+       return
+}
+
+func (s *TestSuite) TestParseFlagsWithPositionalArgument(c *C) {
+       cfg := ConfigParams{}
+       os.Args = []string{"cmd", "-verbose", "/tmp/somefile.csv"}
+       err := ParseFlags(&cfg)
+       c.Assert(err, IsNil)
+       c.Check(cfg.Path, Equals, "/tmp/somefile.csv")
+       c.Check(cfg.Verbose, Equals, true)
+}
+
+func (s *TestSuite) TestParseFlagsWithoutPositionalArgument(c *C) {
+       os.Args = []string{"cmd", "-verbose"}
+       err := ParseFlags(&ConfigParams{})
+       c.Assert(err, NotNil)
+}
+
+func (s *TestSuite) TestGetUserID(c *C) {
+       u := arvados.User{
+               Email:    "testuser@example.com",
+               Username: "Testuser",
+       }
+       email, err := GetUserID(u, "email")
+       c.Assert(err, IsNil)
+       c.Check(email, Equals, "testuser@example.com")
+       _, err = GetUserID(u, "bogus")
+       c.Assert(err, NotNil)
+}
+
+func (s *TestSuite) TestGetConfig(c *C) {
+       os.Args = []string{"cmd", "/tmp/somefile.csv"}
+       cfg, err := GetConfig()
+       c.Assert(err, IsNil)
+       c.Check(cfg.SysUserUUID, NotNil)
+       c.Check(cfg.Client, NotNil)
+       c.Check(cfg.ParentGroupUUID, NotNil)
+       c.Check(cfg.ParentGroupName, Equals, "Externally synchronized groups")
+}
+
+// Ignore leading & trailing spaces on group & users names
+func (s *TestSuite) TestIgnoreSpaces(c *C) {
+       activeUserEmail := s.users[arvadostest.ActiveUserUUID].Email
+       activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       // Confirm that the groups don't exist
+       for _, groupName := range []string{"TestGroup1", "TestGroup2", "Test Group 3"} {
+               groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+               c.Assert(err, IsNil)
+               c.Assert(groupUUID, Equals, "")
+       }
+       data := [][]string{
+               {" TestGroup1", activeUserEmail},
+               {"TestGroup2 ", " " + activeUserEmail},
+               {" Test Group 3 ", activeUserEmail + " "},
+       }
+       tmpfile, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile.Name()) // clean up
+       s.cfg.Path = tmpfile.Name()
+       err = doMain(s.cfg)
+       c.Assert(err, IsNil)
+       // Check that 3 groups were created correctly, and have the active user as
+       // a member.
+       for _, groupName := range []string{"TestGroup1", "TestGroup2", "Test Group 3"} {
+               groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+               c.Assert(err, IsNil)
+               c.Assert(groupUUID, Not(Equals), "")
+               c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+       }
+}
+
+// The absence of a user membership on the CSV file implies its removal
+func (s *TestSuite) TestMembershipRemoval(c *C) {
+       localUserEmail := s.users[arvadostest.ActiveUserUUID].Email
+       localUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       remoteUserEmail := s.users[arvadostest.FederatedActiveUserUUID].Email
+       remoteUserUUID := s.users[arvadostest.FederatedActiveUserUUID].UUID
+       data := [][]string{
+               {"TestGroup1", localUserEmail},
+               {"TestGroup1", remoteUserEmail},
+               {"TestGroup2", localUserEmail},
+               {"TestGroup2", remoteUserEmail},
+       }
+       tmpfile, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile.Name()) // clean up
+       s.cfg.Path = tmpfile.Name()
+       err = doMain(s.cfg)
+       c.Assert(err, IsNil)
+       // Confirm that memberships exist
+       for _, groupName := range []string{"TestGroup1", "TestGroup2"} {
+               groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+               c.Assert(err, IsNil)
+               c.Assert(groupUUID, Not(Equals), "")
+               c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
+               c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
+       }
+       // New CSV with some previous membership missing
+       data = [][]string{
+               {"TestGroup1", localUserEmail},
+               {"TestGroup2", remoteUserEmail},
+       }
+       tmpfile2, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile2.Name()) // clean up
+       s.cfg.Path = tmpfile2.Name()
+       err = doMain(s.cfg)
+       c.Assert(err, IsNil)
+       // Confirm TestGroup1 memberships
+       groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup1")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Not(Equals), "")
+       c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
+       c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, false)
+       // Confirm TestGroup1 memberships
+       groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup2")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Not(Equals), "")
+       c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, false)
+       c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
+}
+
+// If a group doesn't exist on the system, create it before adding users
+func (s *TestSuite) TestAutoCreateGroupWhenNotExisting(c *C) {
+       groupName := "Testers"
+       // Confirm that group doesn't exist
+       groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Equals, "")
+       // Make a tmp CSV file
+       data := [][]string{
+               {groupName, s.users[arvadostest.ActiveUserUUID].Email},
+       }
+       tmpfile, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile.Name()) // clean up
+       s.cfg.Path = tmpfile.Name()
+       err = doMain(s.cfg)
+       c.Assert(err, IsNil)
+       // "Testers" group should now exist
+       groupUUID, err = RemoteGroupExists(s.cfg, groupName)
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Not(Equals), "")
+       // active user should be a member
+       c.Assert(GroupMembershipExists(s.cfg.Client, arvadostest.ActiveUserUUID, groupUUID), Equals, true)
+}
+
+// Users listed on the file that don't exist on the system are ignored
+func (s *TestSuite) TestIgnoreNonexistantUsers(c *C) {
+       activeUserEmail := s.users[arvadostest.ActiveUserUUID].Email
+       activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       // Confirm that group doesn't exist
+       groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup4")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Equals, "")
+       // Create file & run command
+       data := [][]string{
+               {"TestGroup4", "nonexistantuser@unknowndomain.com"}, // Processed first
+               {"TestGroup4", activeUserEmail},
+       }
+       tmpfile, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile.Name()) // clean up
+       s.cfg.Path = tmpfile.Name()
+       err = doMain(s.cfg)
+       c.Assert(err, IsNil)
+       // Confirm that memberships exist
+       groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup4")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Not(Equals), "")
+       c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+}
+
+// Users listed on the file that don't exist on the system are ignored
+func (s *TestSuite) TestIgnoreEmptyFields(c *C) {
+       activeUserEmail := s.users[arvadostest.ActiveUserUUID].Email
+       activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       // Confirm that group doesn't exist
+       groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup4")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Equals, "")
+       // Create file & run command
+       data := [][]string{
+               {"", activeUserEmail}, // Empty field
+               {"TestGroup5", ""},    // Empty field
+               {"TestGroup4", activeUserEmail},
+       }
+       tmpfile, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile.Name()) // clean up
+       s.cfg.Path = tmpfile.Name()
+       err = doMain(s.cfg)
+       c.Assert(err, IsNil)
+       // Confirm that memberships exist
+       groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup4")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Not(Equals), "")
+       c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+}
+
+// Instead of emails, use username as identifier
+func (s *TestSuite) TestUseUsernames(c *C) {
+       activeUserName := s.users[arvadostest.ActiveUserUUID].Username
+       activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       // Confirm that group doesn't exist
+       groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup1")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Equals, "")
+       // Create file & run command
+       data := [][]string{
+               {"TestGroup1", activeUserName},
+       }
+       tmpfile, err := MakeTempCSVFile(data)
+       c.Assert(err, IsNil)
+       defer os.Remove(tmpfile.Name()) // clean up
+       s.cfg.Path = tmpfile.Name()
+       s.cfg.UserID = "username"
+       err = doMain(s.cfg)
+       s.cfg.UserID = "email"
+       c.Assert(err, IsNil)
+       // Confirm that memberships exist
+       groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup1")
+       c.Assert(err, IsNil)
+       c.Assert(groupUUID, Not(Equals), "")
+       c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+}
diff --git a/vendor/.gitignore b/vendor/.gitignore
new file mode 100644 (file)
index 0000000..f902f86
--- /dev/null
@@ -0,0 +1,3 @@
+*
+!vendor.json
+!.gitignore
diff --git a/vendor/vendor.json b/vendor/vendor.json
new file mode 100644 (file)
index 0000000..5e2ed2e
--- /dev/null
@@ -0,0 +1,1179 @@
+{
+       "comment": "",
+       "ignore": "test",
+       "package": [
+               {
+                       "checksumSHA1": "j4je0EzPGzjb6INLY1BHZ+hyMjc=",
+                       "origin": "github.com/curoverse/goamz/aws",
+                       "path": "github.com/AdRoll/goamz/aws",
+                       "revision": "888b4804f2653cd35ebcc95f046079e63b5b2799",
+                       "revisionTime": "2017-07-27T13:52:37Z"
+               },
+               {
+                       "checksumSHA1": "0+n3cT6e7sQCCbBAH8zg6neiHTk=",
+                       "origin": "github.com/curoverse/goamz/s3",
+                       "path": "github.com/AdRoll/goamz/s3",
+                       "revision": "888b4804f2653cd35ebcc95f046079e63b5b2799",
+                       "revisionTime": "2017-07-27T13:52:37Z"
+               },
+               {
+                       "checksumSHA1": "tvxbsTkdjB0C/uxEglqD6JfVnMg=",
+                       "origin": "github.com/curoverse/goamz/s3/s3test",
+                       "path": "github.com/AdRoll/goamz/s3/s3test",
+                       "revision": "888b4804f2653cd35ebcc95f046079e63b5b2799",
+                       "revisionTime": "2017-07-27T13:52:37Z"
+               },
+               {
+                       "checksumSHA1": "KF4DsRUpZ+h+qRQ/umRAQZfVvw0=",
+                       "path": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute",
+                       "revision": "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a",
+                       "revisionTime": "2018-07-27T22:05:59Z"
+               },
+               {
+                       "checksumSHA1": "IZNzp1cYx+xYHd4gzosKpG6Jr/k=",
+                       "path": "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network",
+                       "revision": "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a",
+                       "revisionTime": "2018-07-27T22:05:59Z"
+               },
+               {
+                       "checksumSHA1": "W4c2uTDJlwhfryWg9esshmJANo0=",
+                       "path": "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage",
+                       "revision": "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a",
+                       "revisionTime": "2018-07-27T22:05:59Z"
+               },
+               {
+                       "checksumSHA1": "xHZe/h/tyrqmS9qiR03bLfRv5FI=",
+                       "path": "github.com/Azure/azure-sdk-for-go/storage",
+                       "revision": "f8eeb65a1a1f969696b49aada9d24073f2c2acd1",
+                       "revisionTime": "2018-02-15T19:19:13Z"
+               },
+               {
+                       "checksumSHA1": "PfyfOXsPbGEWmdh54cguqzdwloY=",
+                       "path": "github.com/Azure/azure-sdk-for-go/version",
+                       "revision": "471256ff7c6c93b96131845cef5309d20edd313d",
+                       "revisionTime": "2018-02-14T01:17:07Z"
+               },
+               {
+                       "checksumSHA1": "1Y2+bSzYrdPHQqRjR1OrBMHAvxY=",
+                       "path": "github.com/Azure/go-autorest/autorest",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "GxL0HHpZDj2milPhR3SPV6MWLPc=",
+                       "path": "github.com/Azure/go-autorest/autorest/adal",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "ZNgwJOdHZmm4k/HJIbT1L5giO6M=",
+                       "path": "github.com/Azure/go-autorest/autorest/azure",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "6i7kwcXGTn55WqfubQs21swgr34=",
+                       "path": "github.com/Azure/go-autorest/autorest/azure/auth",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=",
+                       "path": "github.com/Azure/go-autorest/autorest/date",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "SbBb2GcJNm5GjuPKGL2777QywR4=",
+                       "path": "github.com/Azure/go-autorest/autorest/to",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "HjdLfAF3oA2In8F3FKh/Y+BPyXk=",
+                       "path": "github.com/Azure/go-autorest/autorest/validation",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "b2lrPJRxf+MEfmMafN40wepi5WM=",
+                       "path": "github.com/Azure/go-autorest/logger",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "UtAIMAsMWLBJ6yO1qZ0soFnb0sI=",
+                       "path": "github.com/Azure/go-autorest/version",
+                       "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+                       "revisionTime": "2018-08-09T20:19:59Z"
+               },
+               {
+                       "checksumSHA1": "o/3cn04KAiwC7NqNVvmfVTD+hgA=",
+                       "path": "github.com/Microsoft/go-winio",
+                       "revision": "78439966b38d69bf38227fbf57ac8a6fee70f69a",
+                       "revisionTime": "2017-08-04T20:09:54Z"
+               },
+               {
+                       "checksumSHA1": "k59wLJfyqGB04o238WhKSAzSz9M=",
+                       "path": "github.com/aws/aws-sdk-go/aws",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
+                       "path": "github.com/aws/aws-sdk-go/aws/awserr",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "PEDqMAEPxlh9Y8/dIbHlE6A7LEA=",
+                       "path": "github.com/aws/aws-sdk-go/aws/awsutil",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "KpW2B6W3J1yB/7QJWjjtsKz1Xbc=",
+                       "path": "github.com/aws/aws-sdk-go/aws/client",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "uEJU4I6dTKaraQKvrljlYKUZwoc=",
+                       "path": "github.com/aws/aws-sdk-go/aws/client/metadata",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "GvmthjOyNZGOKmXK4XVrbT5+K9I=",
+                       "path": "github.com/aws/aws-sdk-go/aws/corehandlers",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "QHizt8XKUpuslIZv6EH6ENiGpGA=",
+                       "path": "github.com/aws/aws-sdk-go/aws/credentials",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "JTilCBYWVAfhbKSnrxCNhE8IFns=",
+                       "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "1pENtl2K9hG7qoB7R6J7dAHa82g=",
+                       "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "sPtOSV32SZr2xN7vZlF4FXo43/o=",
+                       "path": "github.com/aws/aws-sdk-go/aws/credentials/processcreds",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=",
+                       "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "3pJft1H34eTYK6s6p3ijj3mGtc4=",
+                       "path": "github.com/aws/aws-sdk-go/aws/csm",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "7AmyyJXVkMdmy8dphC3Nalx5XkI=",
+                       "path": "github.com/aws/aws-sdk-go/aws/defaults",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "47hnR1KYqZDBT3xmHuS7cNtqHP8=",
+                       "path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "pcWH1AkR7sUs84cN/XTD9Jexf2Q=",
+                       "path": "github.com/aws/aws-sdk-go/aws/endpoints",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "nhavXPspOdqm5iAvIGgmZmXk4aI=",
+                       "path": "github.com/aws/aws-sdk-go/aws/request",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "w4tSwNFNJ4cGgjYEdAgsDnikqec=",
+                       "path": "github.com/aws/aws-sdk-go/aws/session",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "C9uAu9gsLIpJGIX6/5P+n3s9wQo=",
+                       "path": "github.com/aws/aws-sdk-go/aws/signer/v4",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "Fe2TPw9X2UvlkRaOS7LPJlpkuTo=",
+                       "path": "github.com/aws/aws-sdk-go/internal/ini",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "wjxQlU1PYxrDRFoL1Vek8Wch7jk=",
+                       "path": "github.com/aws/aws-sdk-go/internal/sdkio",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "MYLldFRnsZh21TfCkgkXCT3maPU=",
+                       "path": "github.com/aws/aws-sdk-go/internal/sdkrand",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "tQVg7Sz2zv+KkhbiXxPH0mh9spg=",
+                       "path": "github.com/aws/aws-sdk-go/internal/sdkuri",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "sXiZ5x6j2FvlIO57pboVnRTm7QA=",
+                       "path": "github.com/aws/aws-sdk-go/internal/shareddefaults",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "NtXXi501Kou3laVAsJfcbKSkNI8=",
+                       "path": "github.com/aws/aws-sdk-go/private/protocol",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "0cZnOaE1EcFUuiu4bdHV2k7slQg=",
+                       "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "lj56XJFI2OSp+hEOrFZ+eiEi/yM=",
+                       "path": "github.com/aws/aws-sdk-go/private/protocol/query",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "+O6A945eTP9plLpkEMZB0lwBAcg=",
+                       "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "RDOk9se2S83/HAYmWnpoW3bgQfQ=",
+                       "path": "github.com/aws/aws-sdk-go/private/protocol/rest",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "B8unEuOlpQfnig4cMyZtXLZVVOs=",
+                       "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "uvEbLM/ZodhtEUVTEoC+Lbc9PHg=",
+                       "path": "github.com/aws/aws-sdk-go/service/ec2",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "HMY+b4YBLVvWoKm5vB+H7tpKiTI=",
+                       "path": "github.com/aws/aws-sdk-go/service/sts",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=",
+                       "path": "github.com/beorn7/perks/quantile",
+                       "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9",
+                       "revisionTime": "2016-08-04T10:47:26Z"
+               },
+               {
+                       "checksumSHA1": "+Zz+leZHHC9C0rx8DoRuffSRPso=",
+                       "path": "github.com/coreos/go-systemd/daemon",
+                       "revision": "cc4f39464dc797b91c8025330de585294c2a6950",
+                       "revisionTime": "2018-01-08T08:51:32Z"
+               },
+               {
+                       "checksumSHA1": "+TKtBzv23ywvmmqRiGEjUba4YmI=",
+                       "path": "github.com/dgrijalva/jwt-go",
+                       "revision": "dbeaa9332f19a944acb5736b4456cfcc02140e29",
+                       "revisionTime": "2017-10-19T21:57:19Z"
+               },
+               {
+                       "checksumSHA1": "7EjxkAUND/QY/sN+2fNKJ52v1Rc=",
+                       "path": "github.com/dimchansky/utfbom",
+                       "revision": "5448fe645cb1964ba70ac8f9f2ffe975e61a536c",
+                       "revisionTime": "2018-07-13T13:37:17Z"
+               },
+               {
+                       "checksumSHA1": "Gj+xR1VgFKKmFXYOJMnAczC3Znk=",
+                       "path": "github.com/docker/distribution/digestset",
+                       "revision": "277ed486c948042cab91ad367c379524f3b25e18",
+                       "revisionTime": "2018-01-05T23:27:52Z"
+               },
+               {
+                       "checksumSHA1": "2Fe4D6PGaVE2he4fUeenLmhC1lE=",
+                       "path": "github.com/docker/distribution/reference",
+                       "revision": "277ed486c948042cab91ad367c379524f3b25e18",
+                       "revisionTime": "2018-01-05T23:27:52Z"
+               },
+               {
+                       "checksumSHA1": "QKCQfrTv4wTL0KBDMHpWM/jHl9I=",
+                       "path": "github.com/docker/docker/api",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "b91BIyJbqy05pXpEh1eGCJkdjYc=",
+                       "path": "github.com/docker/docker/api/types",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "jVJDbe0IcyjoKc2xbohwzQr+FF0=",
+                       "path": "github.com/docker/docker/api/types/blkiodev",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "DuOqFTQ95vKSuSE/Va88yRN/wb8=",
+                       "path": "github.com/docker/docker/api/types/container",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "XDP7i6sMYGnUKeFzgt+mFBJwjjw=",
+                       "path": "github.com/docker/docker/api/types/events",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "S4SWOa0XduRd8ene8Alwih2Nwcw=",
+                       "path": "github.com/docker/docker/api/types/filters",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "KuC0C6jo1t7tlvIqb7G3u1FIaZU=",
+                       "path": "github.com/docker/docker/api/types/image",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "uJeLBKpHZXP+bWhXP4HhpyUTWYI=",
+                       "path": "github.com/docker/docker/api/types/mount",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "Gskp+nvbVe8Gk1xPLHylZvNmqTg=",
+                       "path": "github.com/docker/docker/api/types/network",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "r2vWq7Uc3ExKzMqYgH0b4AKjLKY=",
+                       "path": "github.com/docker/docker/api/types/registry",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "VTxWyFud/RedrpllGdQonVtGM/A=",
+                       "path": "github.com/docker/docker/api/types/strslice",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "Q0U3queMsCw+rPPztXnRHwAxQEc=",
+                       "path": "github.com/docker/docker/api/types/swarm",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "kVfD1e4Gak7k6tqDX5nrgQ57EYY=",
+                       "path": "github.com/docker/docker/api/types/swarm/runtime",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "77axKFOjRx1nGrzIggGXfTxUYVQ=",
+                       "path": "github.com/docker/docker/api/types/time",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "uDPQ3nHsrvGQc9tg/J9OSC4N5dQ=",
+                       "path": "github.com/docker/docker/api/types/versions",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "IBJy2zPEnYmcFJ3lM1eiRWnCxTA=",
+                       "path": "github.com/docker/docker/api/types/volume",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "zQvx3WYTAwbPZEaVPjAsrmW7V00=",
+                       "path": "github.com/docker/docker/client",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "JbiWTzH699Sqz25XmDlsARpMN9w=",
+                       "path": "github.com/docker/go-connections/nat",
+                       "revision": "3ede32e2033de7505e6500d6c868c2b9ed9f169d",
+                       "revisionTime": "2017-06-23T20:36:43Z"
+               },
+               {
+                       "checksumSHA1": "jUfDG3VQsA2UZHvvIXncgiddpYA=",
+                       "path": "github.com/docker/go-connections/sockets",
+                       "revision": "3ede32e2033de7505e6500d6c868c2b9ed9f169d",
+                       "revisionTime": "2017-06-23T20:36:43Z"
+               },
+               {
+                       "checksumSHA1": "c6lDGNwTm5mYq18IHP+lqYpk8xU=",
+                       "path": "github.com/docker/go-connections/tlsconfig",
+                       "revision": "3ede32e2033de7505e6500d6c868c2b9ed9f169d",
+                       "revisionTime": "2017-06-23T20:36:43Z"
+               },
+               {
+                       "checksumSHA1": "kP4hqQGUNNXhgYxgB4AMWfNvmnA=",
+                       "path": "github.com/docker/go-units",
+                       "revision": "d59758554a3d3911fa25c0269de1ebe2f1912c39",
+                       "revisionTime": "2017-12-21T20:03:56Z"
+               },
+               {
+                       "checksumSHA1": "ImX1uv6O09ggFeBPUJJ2nu7MPSA=",
+                       "path": "github.com/ghodss/yaml",
+                       "revision": "0ca9ea5df5451ffdf184b4428c902747c2c11cd7",
+                       "revisionTime": "2017-03-27T23:54:44Z"
+               },
+               {
+                       "checksumSHA1": "8UEp6v0Dczw/SlasE0DivB0mAHA=",
+                       "path": "github.com/gogo/protobuf/jsonpb",
+                       "revision": "30cf7ac33676b5786e78c746683f0d4cd64fa75b",
+                       "revisionTime": "2018-05-09T16:24:41Z"
+               },
+               {
+                       "checksumSHA1": "wn2shNJMwRZpvuvkf1s7h0wvqHI=",
+                       "path": "github.com/gogo/protobuf/proto",
+                       "revision": "160de10b2537169b5ae3e7e221d28269ef40d311",
+                       "revisionTime": "2018-01-04T10:21:28Z"
+               },
+               {
+                       "checksumSHA1": "HPVQZu059/Rfw2bAWM538bVTcUc=",
+                       "path": "github.com/gogo/protobuf/sortkeys",
+                       "revision": "30cf7ac33676b5786e78c746683f0d4cd64fa75b",
+                       "revisionTime": "2018-05-09T16:24:41Z"
+               },
+               {
+                       "checksumSHA1": "SkxU1+wPGUJyLyQENrZtr2/OUBs=",
+                       "path": "github.com/gogo/protobuf/types",
+                       "revision": "30cf7ac33676b5786e78c746683f0d4cd64fa75b",
+                       "revisionTime": "2018-05-09T16:24:41Z"
+               },
+               {
+                       "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=",
+                       "path": "github.com/golang/protobuf/proto",
+                       "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845",
+                       "revisionTime": "2017-11-13T18:07:20Z"
+               },
+               {
+                       "checksumSHA1": "iIUYZyoanCQQTUaWsu8b+iOSPt4=",
+                       "origin": "github.com/docker/docker/vendor/github.com/gorilla/context",
+                       "path": "github.com/gorilla/context",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "fSs1WcPh2F5JJtxqYC+Jt8yCkYc=",
+                       "path": "github.com/gorilla/mux",
+                       "revision": "5bbbb5b2b5729b132181cc7f4aa3b3c973e9a0ed",
+                       "revisionTime": "2018-01-07T15:57:08Z"
+               },
+               {
+                       "checksumSHA1": "d9PxF1XQGLMJZRct2R8qVM/eYlE=",
+                       "path": "github.com/hashicorp/golang-lru",
+                       "revision": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6",
+                       "revisionTime": "2016-08-13T22:13:03Z"
+               },
+               {
+                       "checksumSHA1": "9hffs0bAIU6CquiRhKQdzjHnKt0=",
+                       "path": "github.com/hashicorp/golang-lru/simplelru",
+                       "revision": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6",
+                       "revisionTime": "2016-08-13T22:13:03Z"
+               },
+               {
+                       "checksumSHA1": "iCsyavJDnXC9OY//p52IWJWy7PY=",
+                       "path": "github.com/jbenet/go-context/io",
+                       "revision": "d14ea06fba99483203c19d92cfcd13ebe73135f4",
+                       "revisionTime": "2015-07-11T00:45:18Z"
+               },
+               {
+                       "checksumSHA1": "khL6oKjx81rAZKW+36050b7f5As=",
+                       "path": "github.com/jmcvetta/randutil",
+                       "revision": "2bb1b664bcff821e02b2a0644cd29c7e824d54f8",
+                       "revisionTime": "2015-08-17T12:26:01Z"
+               },
+               {
+                       "checksumSHA1": "blwbl9vPvRLtL5QlZgfpLvsFiZ4=",
+                       "origin": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath",
+                       "path": "github.com/jmespath/go-jmespath",
+                       "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+                       "revisionTime": "2019-03-06T20:18:39Z"
+               },
+               {
+                       "checksumSHA1": "X7g98YfLr+zM7aN76AZvAfpZyfk=",
+                       "path": "github.com/julienschmidt/httprouter",
+                       "revision": "adbc77eec0d91467376ca515bc3a14b8434d0f18",
+                       "revisionTime": "2018-04-11T15:45:01Z"
+               },
+               {
+                       "checksumSHA1": "oX6jFQD74oOApvDIhOzW2dXpg5Q=",
+                       "path": "github.com/kevinburke/ssh_config",
+                       "revision": "802051befeb51da415c46972b5caf36e7c33c53d",
+                       "revisionTime": "2017-10-13T21:14:58Z"
+               },
+               {
+                       "checksumSHA1": "IfZcD4U1dtllJKlPNeD2aU4Jn98=",
+                       "path": "github.com/lib/pq",
+                       "revision": "83612a56d3dd153a94a629cd64925371c9adad78",
+                       "revisionTime": "2017-11-26T05:04:59Z"
+               },
+               {
+                       "checksumSHA1": "AU3fA8Sm33Vj9PBoRPSeYfxLRuE=",
+                       "path": "github.com/lib/pq/oid",
+                       "revision": "83612a56d3dd153a94a629cd64925371c9adad78",
+                       "revisionTime": "2017-11-26T05:04:59Z"
+               },
+               {
+                       "checksumSHA1": "T9E+5mKBQ/BX4wlNxgaPfetxdeI=",
+                       "path": "github.com/marstr/guid",
+                       "revision": "8bdf7d1a087ccc975cf37dd6507da50698fd19ca",
+                       "revisionTime": "2017-04-27T23:51:15Z"
+               },
+               {
+                       "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=",
+                       "path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+                       "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c",
+                       "revisionTime": "2016-04-24T11:30:07Z"
+               },
+               {
+                       "checksumSHA1": "V/quM7+em2ByJbWBLOsEwnY3j/Q=",
+                       "path": "github.com/mitchellh/go-homedir",
+                       "revision": "b8bc1bf767474819792c23f32d8286a45736f1c6",
+                       "revisionTime": "2016-12-03T19:45:07Z"
+               },
+               {
+                       "checksumSHA1": "OFNit1Qx2DdWhotfREKodDNUwCM=",
+                       "path": "github.com/opencontainers/go-digest",
+                       "revision": "279bed98673dd5bef374d3b6e4b09e2af76183bf",
+                       "revisionTime": "2017-06-07T19:53:33Z"
+               },
+               {
+                       "checksumSHA1": "ZGlIwSRjdLYCUII7JLE++N4w7Xc=",
+                       "path": "github.com/opencontainers/image-spec/specs-go",
+                       "revision": "577479e4dc273d3779f00c223c7e0dba4cd6b8b0",
+                       "revisionTime": "2017-11-25T02:40:18Z"
+               },
+               {
+                       "checksumSHA1": "jdbXRRzeu0njLE9/nCEZG+Yg/Jk=",
+                       "path": "github.com/opencontainers/image-spec/specs-go/v1",
+                       "revision": "577479e4dc273d3779f00c223c7e0dba4cd6b8b0",
+                       "revisionTime": "2017-11-25T02:40:18Z"
+               },
+               {
+                       "checksumSHA1": "F1IYMLBLAZaTOWnmXsgaxTGvrWI=",
+                       "path": "github.com/pelletier/go-buffruneio",
+                       "revision": "c37440a7cf42ac63b919c752ca73a85067e05992",
+                       "revisionTime": "2017-02-27T22:03:11Z"
+               },
+               {
+                       "checksumSHA1": "xCv4GBFyw07vZkVtKF/XrUnkHRk=",
+                       "path": "github.com/pkg/errors",
+                       "revision": "e881fd58d78e04cf6d0de1217f8707c8cc2249bc",
+                       "revisionTime": "2017-12-16T07:03:16Z"
+               },
+               {
+                       "checksumSHA1": "Ajt29IHVbX99PUvzn8Gc/lMCXBY=",
+                       "path": "github.com/prometheus/client_golang/prometheus",
+                       "revision": "9bb6ab929dcbe1c8393cd9ef70387cb69811bd1c",
+                       "revisionTime": "2018-02-03T14:28:15Z"
+               },
+               {
+                       "checksumSHA1": "c3Ui7nnLiJ4CAGWZ8dGuEgqHd8s=",
+                       "path": "github.com/prometheus/client_golang/prometheus/promhttp",
+                       "revision": "9bb6ab929dcbe1c8393cd9ef70387cb69811bd1c",
+                       "revisionTime": "2018-02-03T14:28:15Z"
+               },
+               {
+                       "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
+                       "path": "github.com/prometheus/client_model/go",
+                       "revision": "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c",
+                       "revisionTime": "2017-11-17T10:05:41Z"
+               },
+               {
+                       "checksumSHA1": "xfnn0THnqNwjwimeTClsxahYrIo=",
+                       "path": "github.com/prometheus/common/expfmt",
+                       "revision": "89604d197083d4781071d3c65855d24ecfb0a563",
+                       "revisionTime": "2018-01-10T21:49:58Z"
+               },
+               {
+                       "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
+                       "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
+                       "revision": "89604d197083d4781071d3c65855d24ecfb0a563",
+                       "revisionTime": "2018-01-10T21:49:58Z"
+               },
+               {
+                       "checksumSHA1": "YU+/K48IMawQnToO4ETE6a+hhj4=",
+                       "path": "github.com/prometheus/common/model",
+                       "revision": "89604d197083d4781071d3c65855d24ecfb0a563",
+                       "revisionTime": "2018-01-10T21:49:58Z"
+               },
+               {
+                       "checksumSHA1": "lolK0h7LSVERIX8zLyVQ/+7wEyA=",
+                       "path": "github.com/prometheus/procfs",
+                       "revision": "cb4147076ac75738c9a7d279075a253c0cc5acbd",
+                       "revisionTime": "2018-01-25T13:30:57Z"
+               },
+               {
+                       "checksumSHA1": "lv9rIcjbVEGo8AT1UCUZXhXrfQc=",
+                       "path": "github.com/prometheus/procfs/internal/util",
+                       "revision": "cb4147076ac75738c9a7d279075a253c0cc5acbd",
+                       "revisionTime": "2018-01-25T13:30:57Z"
+               },
+               {
+                       "checksumSHA1": "BXJH5h2ri8SU5qC6kkDvTIGCky4=",
+                       "path": "github.com/prometheus/procfs/nfs",
+                       "revision": "cb4147076ac75738c9a7d279075a253c0cc5acbd",
+                       "revisionTime": "2018-01-25T13:30:57Z"
+               },
+               {
+                       "checksumSHA1": "yItvTQLUVqm/ArLEbvEhqG0T5a0=",
+                       "path": "github.com/prometheus/procfs/xfs",
+                       "revision": "cb4147076ac75738c9a7d279075a253c0cc5acbd",
+                       "revisionTime": "2018-01-25T13:30:57Z"
+               },
+               {
+                       "checksumSHA1": "eDQ6f1EsNf+frcRO/9XukSEchm8=",
+                       "path": "github.com/satori/go.uuid",
+                       "revision": "36e9d2ebbde5e3f13ab2e25625fd453271d6522e",
+                       "revisionTime": "2018-01-03T17:44:51Z"
+               },
+               {
+                       "checksumSHA1": "UwtyqB7CaUWPlw0DVJQvw0IFQZs=",
+                       "path": "github.com/sergi/go-diff/diffmatchpatch",
+                       "revision": "1744e2970ca51c86172c8190fadad617561ed6e7",
+                       "revisionTime": "2017-11-10T11:01:46Z"
+               },
+               {
+                       "checksumSHA1": "ySaT8G3I3y4MmnoXOYAAX0rC+p8=",
+                       "path": "github.com/sirupsen/logrus",
+                       "revision": "d682213848ed68c0a260ca37d6dd5ace8423f5ba",
+                       "revisionTime": "2017-12-05T20:32:29Z"
+               },
+               {
+                       "checksumSHA1": "8QeSG127zQqbA+YfkO1WkKx/iUI=",
+                       "path": "github.com/src-d/gcfg",
+                       "revision": "f187355171c936ac84a82793659ebb4936bc1c23",
+                       "revisionTime": "2016-10-26T10:01:55Z"
+               },
+               {
+                       "checksumSHA1": "yf5NBT8BofPfGYCXoLnj7BIA1wo=",
+                       "path": "github.com/src-d/gcfg/scanner",
+                       "revision": "f187355171c936ac84a82793659ebb4936bc1c23",
+                       "revisionTime": "2016-10-26T10:01:55Z"
+               },
+               {
+                       "checksumSHA1": "C5Z8YVyNTuvupM9AUr9KbPlps4Q=",
+                       "path": "github.com/src-d/gcfg/token",
+                       "revision": "f187355171c936ac84a82793659ebb4936bc1c23",
+                       "revisionTime": "2016-10-26T10:01:55Z"
+               },
+               {
+                       "checksumSHA1": "mDkN3UpR7auuFbwUuIwExz4DZgY=",
+                       "path": "github.com/src-d/gcfg/types",
+                       "revision": "f187355171c936ac84a82793659ebb4936bc1c23",
+                       "revisionTime": "2016-10-26T10:01:55Z"
+               },
+               {
+                       "checksumSHA1": "iHiMTBffQvWYlOLu3130JXuQpgQ=",
+                       "path": "github.com/xanzy/ssh-agent",
+                       "revision": "ba9c9e33906f58169366275e3450db66139a31a9",
+                       "revisionTime": "2015-12-15T15:34:51Z"
+               },
+               {
+                       "checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
+                       "path": "golang.org/x/crypto/cast5",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "IQkUIOnvlf0tYloFx9mLaXSvXWQ=",
+                       "path": "golang.org/x/crypto/curve25519",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "1hwn8cgg4EVXhCpJIqmMbzqnUo0=",
+                       "path": "golang.org/x/crypto/ed25519",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=",
+                       "path": "golang.org/x/crypto/ed25519/internal/edwards25519",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "ooU7jaiYSUKlg5BVllI8lsq+5Qk=",
+                       "path": "golang.org/x/crypto/openpgp",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "olOKkhrdkYQHZ0lf1orrFQPQrv4=",
+                       "path": "golang.org/x/crypto/openpgp/armor",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "eo/KtdjieJQXH7Qy+faXFcF70ME=",
+                       "path": "golang.org/x/crypto/openpgp/elgamal",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "rlxVSaGgqdAgwblsErxTxIfuGfg=",
+                       "path": "golang.org/x/crypto/openpgp/errors",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "Pq88+Dgh04UdXWZN6P+bLgYnbRc=",
+                       "path": "golang.org/x/crypto/openpgp/packet",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "s2qT4UwvzBSkzXuiuMkowif1Olw=",
+                       "path": "golang.org/x/crypto/openpgp/s2k",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "PJY7uCr3UnX4/Mf/RoWnbieSZ8o=",
+                       "path": "golang.org/x/crypto/pkcs12",
+                       "revision": "614d502a4dac94afa3a6ce146bd1736da82514c6",
+                       "revisionTime": "2018-07-28T08:01:47Z"
+               },
+               {
+                       "checksumSHA1": "p0GC51McIdA7JygoP223twJ1s0E=",
+                       "path": "golang.org/x/crypto/pkcs12/internal/rc2",
+                       "revision": "614d502a4dac94afa3a6ce146bd1736da82514c6",
+                       "revisionTime": "2018-07-28T08:01:47Z"
+               },
+               {
+                       "checksumSHA1": "NHjGg73p5iGZ+7tflJ4cVABNmKE=",
+                       "path": "golang.org/x/crypto/ssh",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "NMRX0onGReaL9IfLr0XQ3kl5Id0=",
+                       "path": "golang.org/x/crypto/ssh/agent",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "zBHtHvMj+MXa1qa4aglBt46uUck=",
+                       "path": "golang.org/x/crypto/ssh/knownhosts",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "X1NTlfcau2XcV6WtAHF6b/DECOA=",
+                       "path": "golang.org/x/crypto/ssh/terminal",
+                       "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
+                       "revisionTime": "2017-11-25T19:00:56Z"
+               },
+               {
+                       "checksumSHA1": "Y+HGqEkYM15ir+J93MEaHdyFy0c=",
+                       "origin": "github.com/docker/docker/vendor/golang.org/x/net/context",
+                       "path": "golang.org/x/net/context",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=",
+                       "path": "golang.org/x/net/context/ctxhttp",
+                       "revision": "434ec0c7fe3742c984919a691b2018a6e9694425",
+                       "revisionTime": "2017-09-25T09:26:47Z"
+               },
+               {
+                       "checksumSHA1": "r9l4r3H6FOLQ0c2JaoXpopFjpnw=",
+                       "path": "golang.org/x/net/proxy",
+                       "revision": "434ec0c7fe3742c984919a691b2018a6e9694425",
+                       "revisionTime": "2017-09-25T09:26:47Z"
+               },
+               {
+                       "checksumSHA1": "TBlnCuZUOzJHLu5DNY7XEj8TvbU=",
+                       "path": "golang.org/x/net/webdav",
+                       "revision": "434ec0c7fe3742c984919a691b2018a6e9694425",
+                       "revisionTime": "2017-09-25T09:26:47Z"
+               },
+               {
+                       "checksumSHA1": "XgtZlzd39qIkBHs6XYrq9dhTCog=",
+                       "path": "golang.org/x/net/webdav/internal/xml",
+                       "revision": "434ec0c7fe3742c984919a691b2018a6e9694425",
+                       "revisionTime": "2017-09-25T09:26:47Z"
+               },
+               {
+                       "checksumSHA1": "7EZyXN0EmZLgGxZxK01IJua4c8o=",
+                       "path": "golang.org/x/net/websocket",
+                       "revision": "434ec0c7fe3742c984919a691b2018a6e9694425",
+                       "revisionTime": "2017-09-25T09:26:47Z"
+               },
+               {
+                       "checksumSHA1": "znPq37/LZ4pJh7B4Lbu0ZuoMhNk=",
+                       "origin": "github.com/docker/docker/vendor/golang.org/x/sys/unix",
+                       "path": "golang.org/x/sys/unix",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "8BcMOi8XTSigDtV2npDc8vMrS60=",
+                       "origin": "github.com/docker/docker/vendor/golang.org/x/sys/windows",
+                       "path": "golang.org/x/sys/windows",
+                       "revision": "94b8a116fbf1cd90e68d8f5361b520d326a66f9b",
+                       "revisionTime": "2018-01-09T01:38:17Z"
+               },
+               {
+                       "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=",
+                       "path": "golang.org/x/text/transform",
+                       "revision": "e19ae1496984b1c655b8044a65c0300a3c878dd3",
+                       "revisionTime": "2017-12-24T20:31:28Z"
+               },
+               {
+                       "checksumSHA1": "BCNYmf4Ek93G4lk5x3ucNi/lTwA=",
+                       "path": "golang.org/x/text/unicode/norm",
+                       "revision": "e19ae1496984b1c655b8044a65c0300a3c878dd3",
+                       "revisionTime": "2017-12-24T20:31:28Z"
+               },
+               {
+                       "checksumSHA1": "CEFTYXtWmgSh+3Ik1NmDaJcz4E0=",
+                       "path": "gopkg.in/check.v1",
+                       "revision": "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec",
+                       "revisionTime": "2016-12-08T18:13:25Z"
+               },
+               {
+                       "checksumSHA1": "GdsHg+yOsZtdMvD9HJFovPsqKec=",
+                       "path": "gopkg.in/src-d/go-billy.v4",
+                       "revision": "053dbd006f81a230434f712314aacfb540b52cc5",
+                       "revisionTime": "2017-11-27T19:20:57Z"
+               },
+               {
+                       "checksumSHA1": "yscejfasrttJfPq91pn7gArFb5o=",
+                       "path": "gopkg.in/src-d/go-billy.v4/helper/chroot",
+                       "revision": "053dbd006f81a230434f712314aacfb540b52cc5",
+                       "revisionTime": "2017-11-27T19:20:57Z"
+               },
+               {
+                       "checksumSHA1": "B7HAyGfl+ONIAvlHzbvSsLisx9o=",
+                       "path": "gopkg.in/src-d/go-billy.v4/helper/polyfill",
+                       "revision": "053dbd006f81a230434f712314aacfb540b52cc5",
+                       "revisionTime": "2017-11-27T19:20:57Z"
+               },
+               {
+                       "checksumSHA1": "1CnG3JdmIQoa6mE0O98BfymLmuM=",
+                       "path": "gopkg.in/src-d/go-billy.v4/osfs",
+                       "revision": "053dbd006f81a230434f712314aacfb540b52cc5",
+                       "revisionTime": "2017-11-27T19:20:57Z"
+               },
+               {
+                       "checksumSHA1": "lo42NuhQJppy2ne/uwPR2T9BSPY=",
+                       "path": "gopkg.in/src-d/go-billy.v4/util",
+                       "revision": "053dbd006f81a230434f712314aacfb540b52cc5",
+                       "revisionTime": "2017-11-27T19:20:57Z"
+               },
+               {
+                       "checksumSHA1": "ydjzL2seh3M8h9svrSDV5y/KQJU=",
+                       "path": "gopkg.in/src-d/go-git.v4",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "TSoIlaADKlw3Zx0ysCCBn6kyXNE=",
+                       "path": "gopkg.in/src-d/go-git.v4/config",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "B2OLPJ4wnJIM2TMjTyzusYluUeI=",
+                       "path": "gopkg.in/src-d/go-git.v4/internal/revision",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "o9YH41kQMefVGUS7d3WWSLLhIRk=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "BrsKLhmB0BtaMY+ol1oglnHhvrs=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/cache",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "pHPMiAzXG/TJqTLEKj2SHjxX4zs=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/filemode",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "UGIM9BX7w3MhiadsuN6f8Bx0VZU=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/config",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "L1H7nPf65//6nQGt3Lzq16vLD8w=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/diff",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "87WhYdropmGA4peZOembY5hEgq8=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "G0TX3efLdk7noo/n1Dt9Tzempig=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "q7HtzrSzVE9qN5N3QOxkLFcZI1U=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/index",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "0IxJpGMfdnr3cuuVE59u+1B5n9o=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/objfile",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "LJnyldAM69WmMXW5avaEeSScKTU=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/packfile",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "T8efjPxCKp23RvSBI51qugHzgxw=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/format/pktline",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "97LEL3gxgDWPP/UlRHMfKb5I0RA=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/object",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "PQmY1mHiPdNBNrh3lESZe3QH36c=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "JjHHYoWDYf0H//nP2FIS05ZLgj8=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "wVfbzV5BNhjW/HFFJuTCjkPSJ5M=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "m8nTTRFD7kmX9nT5Yfr9lqabR4s=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/revlist",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "Xito+BwVCMpKrhcvgz5wU+MRmEo=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/storer",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "AVSX04sTj3cBv1muAmIbPE9D9FY=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "cmOntUALmiRvvblEXAQXNO4Oous=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/client",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "gaKy+c/OjPQFLhENnSAFEZUngok=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/file",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "chcAwbm6J5uXXn6IV58+G6RKCjU=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/git",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "m9TNeIIGUBdZ0qdSl5Xa/0TIvfo=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/http",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "6asrmcjb98FpRr83ICCODXdGWdE=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "MGiWWrsy8iQ5ZdCXEN2Oc4oprCk=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/server",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "vat8YhxXGXNcg8HvCDfHAR6BcL0=",
+                       "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "FlVLBdu4cjlXj9zjRRNDurRLABU=",
+                       "path": "gopkg.in/src-d/go-git.v4/storage",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "IpSxC31PynwJBajOaHR7gtnVc7I=",
+                       "path": "gopkg.in/src-d/go-git.v4/storage/filesystem",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "OaZO6dgvn6PMvezw0bYQUGLSrF0=",
+                       "path": "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "jPRm9YqpcJzx4oasd6PBdD33Dgo=",
+                       "path": "gopkg.in/src-d/go-git.v4/storage/memory",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "AzdUpuGqSNnNK6DgdNjWrn99i3o=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/binary",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "vniUxB6bbDYazl21cOfmhdZZiY8=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/diff",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "cspCXRxvzvoNOEUB7wRgOKYrVjQ=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/ioutil",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "shsY2I1OFbnjopNWF21Tkfx+tac=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "QiHHx1Qb/Vv4W6uQb+mJU2zMqLo=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "M+6y9mdBFksksEGBceBh9Se3W7Y=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie/index",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "7eEw/xsSrFLfSppRf/JIt9u7lbU=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "qCb9d3cwnPHVLqS/U9NAzK+1Ptg=",
+                       "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder",
+                       "revision": "bf3b1f1fb9e0a04d0f87511a7ded2562b48a19d8",
+                       "revisionTime": "2018-01-08T13:05:52Z"
+               },
+               {
+                       "checksumSHA1": "I4c3qsEX8KAUTeB9+2pwVX/2ojU=",
+                       "path": "gopkg.in/warnings.v0",
+                       "revision": "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b",
+                       "revisionTime": "2017-11-15T19:30:34Z"
+               },
+               {
+                       "checksumSHA1": "qOmvuDm+F+2nQQecUZBVkZrTn6Y=",
+                       "path": "gopkg.in/yaml.v2",
+                       "revision": "d670f9405373e636a5a2765eea47fac0c9bc91a4",
+                       "revisionTime": "2018-01-09T11:43:31Z"
+               },
+               {
+                       "checksumSHA1": "rBIcwbUjE9w1aV0qh7lAL1hcxCQ=",
+                       "path": "rsc.io/getopt",
+                       "revision": "20be20937449f18bb9967c10d732849fb4401e63",
+                       "revisionTime": "2017-08-11T00:05:52Z"
+               }
+       ],
+       "rootPath": "git.curoverse.com/arvados.git"
+}